summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/bnx2.c48
-rw-r--r--drivers/net/e1000e/netdev.c3
-rw-r--r--drivers/net/gianfar.c6
-rw-r--r--drivers/net/ixgbe/ixgbe.h4
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c17
-rw-r--r--drivers/net/macvtap.c13
-rw-r--r--drivers/net/pcmcia/smc91c92_cs.c31
-rw-r--r--drivers/net/phy/Kconfig5
-rw-r--r--drivers/net/phy/Makefile1
-rw-r--r--drivers/net/phy/micrel.c113
-rw-r--r--drivers/net/ppp_generic.c34
-rw-r--r--drivers/net/sb1250-mac.c67
-rw-r--r--drivers/net/sfc/efx.c4
-rw-r--r--drivers/net/sfc/falcon.c4
-rw-r--r--drivers/net/sfc/falcon_boards.c13
-rw-r--r--drivers/net/sfc/nic.h2
-rw-r--r--drivers/net/sfc/siena.c13
-rw-r--r--drivers/net/tun.c21
-rw-r--r--drivers/net/usb/Kconfig9
-rw-r--r--drivers/net/usb/Makefile1
-rw-r--r--drivers/net/usb/cdc_ether.c1
-rw-r--r--drivers/net/usb/dm9601.c2
-rw-r--r--drivers/net/usb/ipheth.c15
-rw-r--r--drivers/net/usb/kaweth.c1
-rw-r--r--drivers/net/usb/sierra_net.c1004
-rw-r--r--drivers/net/wireless/p54/p54pci.c2
-rw-r--r--include/linux/net.h14
-rw-r--r--include/linux/netdevice.h19
-rw-r--r--include/linux/rculist.h29
-rw-r--r--include/linux/skbuff.h5
-rw-r--r--include/net/af_unix.h20
-rw-r--r--include/net/sctp/command.h1
-rw-r--r--include/net/sctp/sctp.h1
-rw-r--r--include/net/sctp/structs.h1
-rw-r--r--include/net/sock.h40
-rw-r--r--net/atm/common.c22
-rw-r--r--net/bluetooth/l2cap.c5
-rw-r--r--net/core/dev.c26
-rw-r--r--net/core/skbuff.c2
-rw-r--r--net/core/sock.c50
-rw-r--r--net/core/stream.c10
-rw-r--r--net/dccp/output.c10
-rw-r--r--net/ethernet/eth.c2
-rw-r--r--net/ipv4/inet_connection_sock.c16
-rw-r--r--net/ipv6/addrconf.c16
-rw-r--r--net/ipv6/inet6_connection_sock.c15
-rw-r--r--net/iucv/af_iucv.c11
-rw-r--r--net/phonet/pep.c8
-rw-r--r--net/phonet/socket.c2
-rw-r--r--net/rxrpc/af_rxrpc.c10
-rw-r--r--net/sched/sch_generic.c2
-rw-r--r--net/sctp/associola.c6
-rw-r--r--net/sctp/endpointola.c1
-rw-r--r--net/sctp/probe.c1
-rw-r--r--net/sctp/sm_make_chunk.c94
-rw-r--r--net/sctp/sm_sideeffect.c26
-rw-r--r--net/sctp/sm_statefuns.c8
-rw-r--r--net/sctp/socket.c19
-rw-r--r--net/socket.c47
-rw-r--r--net/unix/af_unix.c17
-rw-r--r--net/unix/garbage.c13
61 files changed, 1662 insertions, 341 deletions
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 53326fed6c81..ab26bbc2a1d3 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -58,8 +58,8 @@
#include "bnx2_fw.h"
#define DRV_MODULE_NAME "bnx2"
-#define DRV_MODULE_VERSION "2.0.8"
-#define DRV_MODULE_RELDATE "Feb 15, 2010"
+#define DRV_MODULE_VERSION "2.0.9"
+#define DRV_MODULE_RELDATE "April 27, 2010"
#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-5.0.0.j6.fw"
#define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-5.0.0.j3.fw"
#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-5.0.0.j9.fw"
@@ -651,9 +651,10 @@ bnx2_napi_enable(struct bnx2 *bp)
}
static void
-bnx2_netif_stop(struct bnx2 *bp)
+bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic)
{
- bnx2_cnic_stop(bp);
+ if (stop_cnic)
+ bnx2_cnic_stop(bp);
if (netif_running(bp->dev)) {
int i;
@@ -671,14 +672,15 @@ bnx2_netif_stop(struct bnx2 *bp)
}
static void
-bnx2_netif_start(struct bnx2 *bp)
+bnx2_netif_start(struct bnx2 *bp, bool start_cnic)
{
if (atomic_dec_and_test(&bp->intr_sem)) {
if (netif_running(bp->dev)) {
netif_tx_wake_all_queues(bp->dev);
bnx2_napi_enable(bp);
bnx2_enable_int(bp);
- bnx2_cnic_start(bp);
+ if (start_cnic)
+ bnx2_cnic_start(bp);
}
}
}
@@ -4758,8 +4760,12 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
rc = bnx2_alloc_bad_rbuf(bp);
}
- if (bp->flags & BNX2_FLAG_USING_MSIX)
+ if (bp->flags & BNX2_FLAG_USING_MSIX) {
bnx2_setup_msix_tbl(bp);
+ /* Prevent MSIX table reads and write from timing out */
+ REG_WR(bp, BNX2_MISC_ECO_HW_CTL,
+ BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
+ }
return rc;
}
@@ -6272,12 +6278,12 @@ bnx2_reset_task(struct work_struct *work)
return;
}
- bnx2_netif_stop(bp);
+ bnx2_netif_stop(bp, true);
bnx2_init_nic(bp, 1);
atomic_set(&bp->intr_sem, 1);
- bnx2_netif_start(bp);
+ bnx2_netif_start(bp, true);
rtnl_unlock();
}
@@ -6319,7 +6325,7 @@ bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
struct bnx2 *bp = netdev_priv(dev);
if (netif_running(dev))
- bnx2_netif_stop(bp);
+ bnx2_netif_stop(bp, false);
bp->vlgrp = vlgrp;
@@ -6330,7 +6336,7 @@ bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
- bnx2_netif_start(bp);
+ bnx2_netif_start(bp, false);
}
#endif
@@ -7050,9 +7056,9 @@ bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
if (netif_running(bp->dev)) {
- bnx2_netif_stop(bp);
+ bnx2_netif_stop(bp, true);
bnx2_init_nic(bp, 0);
- bnx2_netif_start(bp);
+ bnx2_netif_start(bp, true);
}
return 0;
@@ -7082,7 +7088,7 @@ bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
/* Reset will erase chipset stats; save them */
bnx2_save_stats(bp);
- bnx2_netif_stop(bp);
+ bnx2_netif_stop(bp, true);
bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
bnx2_free_skbs(bp);
bnx2_free_mem(bp);
@@ -7110,7 +7116,7 @@ bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
bnx2_setup_cnic_irq_info(bp);
mutex_unlock(&bp->cnic_lock);
#endif
- bnx2_netif_start(bp);
+ bnx2_netif_start(bp, true);
}
return 0;
}
@@ -7363,7 +7369,7 @@ bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
if (etest->flags & ETH_TEST_FL_OFFLINE) {
int i;
- bnx2_netif_stop(bp);
+ bnx2_netif_stop(bp, true);
bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
bnx2_free_skbs(bp);
@@ -7382,7 +7388,7 @@ bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
bnx2_shutdown_chip(bp);
else {
bnx2_init_nic(bp, 1);
- bnx2_netif_start(bp);
+ bnx2_netif_start(bp, true);
}
/* wait for link up */
@@ -8376,7 +8382,7 @@ bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
return 0;
flush_scheduled_work();
- bnx2_netif_stop(bp);
+ bnx2_netif_stop(bp, true);
netif_device_detach(dev);
del_timer_sync(&bp->timer);
bnx2_shutdown_chip(bp);
@@ -8398,7 +8404,7 @@ bnx2_resume(struct pci_dev *pdev)
bnx2_set_power_state(bp, PCI_D0);
netif_device_attach(dev);
bnx2_init_nic(bp, 1);
- bnx2_netif_start(bp);
+ bnx2_netif_start(bp, true);
return 0;
}
@@ -8425,7 +8431,7 @@ static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
}
if (netif_running(dev)) {
- bnx2_netif_stop(bp);
+ bnx2_netif_stop(bp, true);
del_timer_sync(&bp->timer);
bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
}
@@ -8482,7 +8488,7 @@ static void bnx2_io_resume(struct pci_dev *pdev)
rtnl_lock();
if (netif_running(dev))
- bnx2_netif_start(bp);
+ bnx2_netif_start(bp, true);
netif_device_attach(dev);
rtnl_unlock();
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 904bd6bf3199..d13760dc27f8 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -5020,6 +5020,9 @@ static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
reg16 &= ~state;
pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16);
+ if (!pdev->bus->self)
+ return;
+
pos = pci_pcie_cap(pdev->bus->self);
pci_read_config_word(pdev->bus->self, pos + PCI_EXP_LNKCTL, &reg16);
reg16 &= ~state;
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 0cef967499d3..5267c27e3174 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -1567,9 +1567,9 @@ static void gfar_halt_nodisable(struct net_device *dev)
tempval |= (DMACTRL_GRS | DMACTRL_GTS);
gfar_write(&regs->dmactrl, tempval);
- while (!(gfar_read(&regs->ievent) &
- (IEVENT_GRSC | IEVENT_GTSC)))
- cpu_relax();
+ spin_event_timeout(((gfar_read(&regs->ievent) &
+ (IEVENT_GRSC | IEVENT_GTSC)) ==
+ (IEVENT_GRSC | IEVENT_GTSC)), -1, 0);
}
}
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index ec6bcc0660c6..79c35ae3718c 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -104,10 +104,6 @@
#define MAX_EMULATION_MAC_ADDRS 16
#define VMDQ_P(p) ((p) + adapter->num_vfs)
-#define IXGBE_SUBDEV_ID_82598AF_MEZZ 0x0049
-#define IXGBE_SUBDEV_ID_82598AF_MENLO_Q_MEZZ 0x004a
-#define IXGBE_SUBDEV_ID_82598AF_MENLO_E_MEZZ 0x004b
-
struct vf_data_storage {
unsigned char vf_mac_addresses[ETH_ALEN];
u16 vf_mc_hashes[IXGBE_MAX_VF_MC_ENTRIES];
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index ff59f88dc7a1..2ae5a5159ce4 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -4314,9 +4314,6 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
int err = 0;
int vector, v_budget;
- if (!(adapter->flags & IXGBE_FLAG_MSIX_CAPABLE))
- goto try_msi;
-
/*
* It's easy to be greedy for MSI-X vectors, but it really
* doesn't do us much good if we have a lot more vectors
@@ -4348,7 +4345,7 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
goto out;
}
-try_msi:
+
adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
@@ -4629,18 +4626,6 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
adapter->ring_feature[RING_F_RSS].indices = rss;
adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
adapter->ring_feature[RING_F_DCB].indices = IXGBE_MAX_DCB_INDICES;
- adapter->flags |= IXGBE_FLAG_MSIX_CAPABLE;
- if (adapter->hw.device_id == IXGBE_DEV_ID_82598AF_DUAL_PORT) {
- switch (adapter->hw.subsystem_device_id) {
- case IXGBE_SUBDEV_ID_82598AF_MEZZ:
- case IXGBE_SUBDEV_ID_82598AF_MENLO_Q_MEZZ:
- case IXGBE_SUBDEV_ID_82598AF_MENLO_E_MEZZ:
- adapter->flags &= ~IXGBE_FLAG_MSIX_CAPABLE;
- break;
- default:
- break;
- }
- }
if (hw->mac.type == ixgbe_mac_82598EB) {
if (hw->device_id == IXGBE_DEV_ID_82598AT)
adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index d97e1fd234ba..1c4110df343e 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -37,6 +37,7 @@
struct macvtap_queue {
struct sock sk;
struct socket sock;
+ struct socket_wq wq;
struct macvlan_dev *vlan;
struct file *file;
unsigned int flags;
@@ -242,12 +243,15 @@ static struct rtnl_link_ops macvtap_link_ops __read_mostly = {
static void macvtap_sock_write_space(struct sock *sk)
{
+ wait_queue_head_t *wqueue;
+
if (!sock_writeable(sk) ||
!test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags))
return;
- if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk)))
- wake_up_interruptible_poll(sk_sleep(sk), POLLOUT | POLLWRNORM | POLLWRBAND);
+ wqueue = sk_sleep(sk);
+ if (wqueue && waitqueue_active(wqueue))
+ wake_up_interruptible_poll(wqueue, POLLOUT | POLLWRNORM | POLLWRBAND);
}
static int macvtap_open(struct inode *inode, struct file *file)
@@ -272,7 +276,8 @@ static int macvtap_open(struct inode *inode, struct file *file)
if (!q)
goto out;
- init_waitqueue_head(&q->sock.wait);
+ q->sock.wq = &q->wq;
+ init_waitqueue_head(&q->wq.wait);
q->sock.type = SOCK_RAW;
q->sock.state = SS_CONNECTED;
q->sock.file = file;
@@ -308,7 +313,7 @@ static unsigned int macvtap_poll(struct file *file, poll_table * wait)
goto out;
mask = 0;
- poll_wait(file, &q->sock.wait, wait);
+ poll_wait(file, &q->wq.wait, wait);
if (!skb_queue_empty(&q->sk.sk_receive_queue))
mask |= POLLIN | POLLRDNORM;
diff --git a/drivers/net/pcmcia/smc91c92_cs.c b/drivers/net/pcmcia/smc91c92_cs.c
index 408f3d7b1545..949ac1a12537 100644
--- a/drivers/net/pcmcia/smc91c92_cs.c
+++ b/drivers/net/pcmcia/smc91c92_cs.c
@@ -1804,23 +1804,30 @@ static void media_check(u_long arg)
SMC_SELECT_BANK(1);
media |= (inw(ioaddr + CONFIG) & CFG_AUI_SELECT) ? 2 : 1;
+ SMC_SELECT_BANK(saved_bank);
+ spin_unlock_irqrestore(&smc->lock, flags);
+
/* Check for pending interrupt with watchdog flag set: with
this, we can limp along even if the interrupt is blocked */
if (smc->watchdog++ && ((i>>8) & i)) {
if (!smc->fast_poll)
printk(KERN_INFO "%s: interrupt(s) dropped!\n", dev->name);
+ local_irq_save(flags);
smc_interrupt(dev->irq, dev);
+ local_irq_restore(flags);
smc->fast_poll = HZ;
}
if (smc->fast_poll) {
smc->fast_poll--;
smc->media.expires = jiffies + HZ/100;
add_timer(&smc->media);
- SMC_SELECT_BANK(saved_bank);
- spin_unlock_irqrestore(&smc->lock, flags);
return;
}
+ spin_lock_irqsave(&smc->lock, flags);
+
+ saved_bank = inw(ioaddr + BANK_SELECT);
+
if (smc->cfg & CFG_MII_SELECT) {
if (smc->mii_if.phy_id < 0)
goto reschedule;
@@ -1978,15 +1985,16 @@ static int smc_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
unsigned int ioaddr = dev->base_addr;
u16 saved_bank = inw(ioaddr + BANK_SELECT);
int ret;
+ unsigned long flags;
- spin_lock_irq(&smc->lock);
+ spin_lock_irqsave(&smc->lock, flags);
SMC_SELECT_BANK(3);
if (smc->cfg & CFG_MII_SELECT)
ret = mii_ethtool_gset(&smc->mii_if, ecmd);
else
ret = smc_netdev_get_ecmd(dev, ecmd);
SMC_SELECT_BANK(saved_bank);
- spin_unlock_irq(&smc->lock);
+ spin_unlock_irqrestore(&smc->lock, flags);
return ret;
}
@@ -1996,15 +2004,16 @@ static int smc_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
unsigned int ioaddr = dev->base_addr;
u16 saved_bank = inw(ioaddr + BANK_SELECT);
int ret;
+ unsigned long flags;
- spin_lock_irq(&smc->lock);
+ spin_lock_irqsave(&smc->lock, flags);
SMC_SELECT_BANK(3);
if (smc->cfg & CFG_MII_SELECT)
ret = mii_ethtool_sset(&smc->mii_if, ecmd);
else
ret = smc_netdev_set_ecmd(dev, ecmd);
SMC_SELECT_BANK(saved_bank);
- spin_unlock_irq(&smc->lock);
+ spin_unlock_irqrestore(&smc->lock, flags);
return ret;
}
@@ -2014,12 +2023,13 @@ static u32 smc_get_link(struct net_device *dev)
unsigned int ioaddr = dev->base_addr;
u16 saved_bank = inw(ioaddr + BANK_SELECT);
u32 ret;
+ unsigned long flags;
- spin_lock_irq(&smc->lock);
+ spin_lock_irqsave(&smc->lock, flags);
SMC_SELECT_BANK(3);
ret = smc_link_ok(dev);
SMC_SELECT_BANK(saved_bank);
- spin_unlock_irq(&smc->lock);
+ spin_unlock_irqrestore(&smc->lock, flags);
return ret;
}
@@ -2056,16 +2066,17 @@ static int smc_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
int rc = 0;
u16 saved_bank;
unsigned int ioaddr = dev->base_addr;
+ unsigned long flags;
if (!netif_running(dev))
return -EINVAL;
- spin_lock_irq(&smc->lock);
+ spin_lock_irqsave(&smc->lock, flags);
saved_bank = inw(ioaddr + BANK_SELECT);
SMC_SELECT_BANK(3);
rc = generic_mii_ioctl(&smc->mii_if, mii, cmd, NULL);
SMC_SELECT_BANK(saved_bank);
- spin_unlock_irq(&smc->lock);
+ spin_unlock_irqrestore(&smc->lock, flags);
return rc;
}
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index fc5938ba3d78..a527e37728cd 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -88,6 +88,11 @@ config LSI_ET1011C_PHY
---help---
Supports the LSI ET1011C PHY.
+config MICREL_PHY
+ tristate "Driver for Micrel PHYs"
+ ---help---
+ Supports the KSZ9021, VSC8201, KS8001 PHYs.
+
config FIXED_PHY
bool "Driver for MDIO Bus/PHY emulation with fixed speed/link PHYs"
depends on PHYLIB=y
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index 1342585af381..13bebab65d02 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -20,4 +20,5 @@ obj-$(CONFIG_MDIO_BITBANG) += mdio-bitbang.o
obj-$(CONFIG_MDIO_GPIO) += mdio-gpio.o
obj-$(CONFIG_NATIONAL_PHY) += national.o
obj-$(CONFIG_STE10XP) += ste10Xp.o
+obj-$(CONFIG_MICREL_PHY) += micrel.o
obj-$(CONFIG_MDIO_OCTEON) += mdio-octeon.o
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
new file mode 100644
index 000000000000..68dd107bad24
--- /dev/null
+++ b/drivers/net/phy/micrel.c
@@ -0,0 +1,113 @@
+/*
+ * drivers/net/phy/micrel.c
+ *
+ * Driver for Micrel PHYs
+ *
+ * Author: David J. Choi
+ *
+ * Copyright (c) 2010 Micrel, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * Support : ksz9021 , vsc8201, ks8001
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/phy.h>
+
+#define PHY_ID_KSZ9021 0x00221611
+#define PHY_ID_VSC8201 0x000FC413
+#define PHY_ID_KS8001 0x0022161A
+
+
+static int kszphy_config_init(struct phy_device *phydev)
+{
+ return 0;
+}
+
+
+static struct phy_driver ks8001_driver = {
+ .phy_id = PHY_ID_KS8001,
+ .phy_id_mask = 0x00fffff0,
+ .features = PHY_BASIC_FEATURES,
+ .flags = PHY_POLL,
+ .config_init = kszphy_config_init,
+ .config_aneg = genphy_config_aneg,
+ .read_status = genphy_read_status,
+ .driver = { .owner = THIS_MODULE,},
+};
+
+static struct phy_driver vsc8201_driver = {
+ .phy_id = PHY_ID_VSC8201,
+ .name = "Micrel VSC8201",
+ .phy_id_mask = 0x00fffff0,
+ .features = PHY_BASIC_FEATURES,
+ .flags = PHY_POLL,
+ .config_init = kszphy_config_init,
+ .config_aneg = genphy_config_aneg,
+ .read_status = genphy_read_status,
+ .driver = { .owner = THIS_MODULE,},
+};
+
+static struct phy_driver ksz9021_driver = {
+ .phy_id = PHY_ID_KSZ9021,
+ .phy_id_mask = 0x000fff10,
+ .name = "Micrel KSZ9021 Gigabit PHY",
+ .features = PHY_GBIT_FEATURES | SUPPORTED_Pause,
+ .flags = PHY_POLL,
+ .config_init = kszphy_config_init,
+ .config_aneg = genphy_config_aneg,
+ .read_status = genphy_read_status,
+ .driver = { .owner = THIS_MODULE, },
+};
+
+static int __init ksphy_init(void)
+{
+ int ret;
+
+ ret = phy_driver_register(&ks8001_driver);
+ if (ret)
+ goto err1;
+ ret = phy_driver_register(&vsc8201_driver);
+ if (ret)
+ goto err2;
+
+ ret = phy_driver_register(&ksz9021_driver);
+ if (ret)
+ goto err3;
+ return 0;
+
+err3:
+ phy_driver_unregister(&vsc8201_driver);
+err2:
+ phy_driver_unregister(&ks8001_driver);
+err1:
+ return ret;
+}
+
+static void __exit ksphy_exit(void)
+{
+ phy_driver_unregister(&ks8001_driver);
+ phy_driver_unregister(&vsc8201_driver);
+ phy_driver_unregister(&ksz9021_driver);
+}
+
+module_init(ksphy_init);
+module_exit(ksphy_exit);
+
+MODULE_DESCRIPTION("Micrel PHY driver");
+MODULE_AUTHOR("David J. Choi");
+MODULE_LICENSE("GPL");
+
+static struct mdio_device_id micrel_tbl[] = {
+ { PHY_ID_KSZ9021, 0x000fff10 },
+ { PHY_ID_VSC8201, 0x00fffff0 },
+ { PHY_ID_KS8001, 0x00fffff0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, micrel_tbl);
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index 35f195329fdd..5441688daba7 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -405,6 +405,7 @@ static ssize_t ppp_read(struct file *file, char __user *buf,
DECLARE_WAITQUEUE(wait, current);
ssize_t ret;
struct sk_buff *skb = NULL;
+ struct iovec iov;
ret = count;
@@ -448,7 +449,9 @@ static ssize_t ppp_read(struct file *file, char __user *buf,
if (skb->len > count)
goto outf;
ret = -EFAULT;
- if (copy_to_user(buf, skb->data, skb->len))
+ iov.iov_base = buf;
+ iov.iov_len = count;
+ if (skb_copy_datagram_iovec(skb, 0, &iov, skb->len))
goto outf;
ret = skb->len;
@@ -1567,13 +1570,22 @@ ppp_input(struct ppp_channel *chan, struct sk_buff *skb)
struct channel *pch = chan->ppp;
int proto;
- if (!pch || skb->len == 0) {
+ if (!pch) {
kfree_skb(skb);
return;
}
- proto = PPP_PROTO(skb);
read_lock_bh(&pch->upl);
+ if (!pskb_may_pull(skb, 2)) {
+ kfree_skb(skb);
+ if (pch->ppp) {
+ ++pch->ppp->dev->stats.rx_length_errors;
+ ppp_receive_error(pch->ppp);
+ }
+ goto done;
+ }
+
+ proto = PPP_PROTO(skb);
if (!pch->ppp || proto >= 0xc000 || proto == PPP_CCPFRAG) {
/* put it on the channel queue */
skb_queue_tail(&pch->file.rq, skb);
@@ -1585,6 +1597,8 @@ ppp_input(struct ppp_channel *chan, struct sk_buff *skb)
} else {
ppp_do_recv(pch->ppp, skb, pch);
}
+
+done:
read_unlock_bh(&pch->upl);
}
@@ -1617,7 +1631,8 @@ ppp_input_error(struct ppp_channel *chan, int code)
static void
ppp_receive_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch)
{
- if (pskb_may_pull(skb, 2)) {
+ /* note: a 0-length skb is used as an error indication */
+ if (skb->len > 0) {
#ifdef CONFIG_PPP_MULTILINK
/* XXX do channel-level decompression here */
if (PPP_PROTO(skb) == PPP_MP)
@@ -1625,15 +1640,10 @@ ppp_receive_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch)
else
#endif /* CONFIG_PPP_MULTILINK */
ppp_receive_nonmp_frame(ppp, skb);
- return;
+ } else {
+ kfree_skb(skb);
+ ppp_receive_error(ppp);
}
-
- if (skb->len > 0)
- /* note: a 0-length skb is used as an error indication */
- ++ppp->dev->stats.rx_length_errors;
-
- kfree_skb(skb);
- ppp_receive_error(ppp);
}
static void
diff --git a/drivers/net/sb1250-mac.c b/drivers/net/sb1250-mac.c
index ba4770a6c2e4..fec3c29b2ea8 100644
--- a/drivers/net/sb1250-mac.c
+++ b/drivers/net/sb1250-mac.c
@@ -2256,17 +2256,36 @@ static int sbmac_init(struct platform_device *pldev, long long base)
sc->mii_bus = mdiobus_alloc();
if (sc->mii_bus == NULL) {
- sbmac_uninitctx(sc);
- return -ENOMEM;
+ err = -ENOMEM;
+ goto uninit_ctx;
}
+ sc->mii_bus->name = sbmac_mdio_string;
+ snprintf(sc->mii_bus->id, MII_BUS_ID_SIZE, "%x", idx);
+ sc->mii_bus->priv = sc;
+ sc->mii_bus->read = sbmac_mii_read;
+ sc->mii_bus->write = sbmac_mii_write;
+ sc->mii_bus->irq = sc->phy_irq;
+ for (i = 0; i < PHY_MAX_ADDR; ++i)
+ sc->mii_bus->irq[i] = SBMAC_PHY_INT;
+
+ sc->mii_bus->parent = &pldev->dev;
+ /*
+ * Probe PHY address
+ */
+ err = mdiobus_register(sc->mii_bus);
+ if (err) {
+ printk(KERN_ERR "%s: unable to register MDIO bus\n",
+ dev->name);
+ goto free_mdio;
+ }
+ dev_set_drvdata(&pldev->dev, sc->mii_bus);
+
err = register_netdev(dev);
if (err) {
printk(KERN_ERR "%s.%d: unable to register netdev\n",
sbmac_string, idx);
- mdiobus_free(sc->mii_bus);
- sbmac_uninitctx(sc);
- return err;
+ goto unreg_mdio;
}
pr_info("%s.%d: registered as %s\n", sbmac_string, idx, dev->name);
@@ -2282,19 +2301,15 @@ static int sbmac_init(struct platform_device *pldev, long long base)
pr_info("%s: SiByte Ethernet at 0x%08Lx, address: %pM\n",
dev->name, base, eaddr);
- sc->mii_bus->name = sbmac_mdio_string;
- snprintf(sc->mii_bus->id, MII_BUS_ID_SIZE, "%x", idx);
- sc->mii_bus->priv = sc;
- sc->mii_bus->read = sbmac_mii_read;
- sc->mii_bus->write = sbmac_mii_write;
- sc->mii_bus->irq = sc->phy_irq;
- for (i = 0; i < PHY_MAX_ADDR; ++i)
- sc->mii_bus->irq[i] = SBMAC_PHY_INT;
-
- sc->mii_bus->parent = &pldev->dev;
- dev_set_drvdata(&pldev->dev, sc->mii_bus);
-
return 0;
+unreg_mdio:
+ mdiobus_unregister(sc->mii_bus);
+ dev_set_drvdata(&pldev->dev, NULL);
+free_mdio:
+ mdiobus_free(sc->mii_bus);
+uninit_ctx:
+ sbmac_uninitctx(sc);
+ return err;
}
@@ -2320,16 +2335,6 @@ static int sbmac_open(struct net_device *dev)
goto out_err;
}
- /*
- * Probe PHY address
- */
- err = mdiobus_register(sc->mii_bus);
- if (err) {
- printk(KERN_ERR "%s: unable to register MDIO bus\n",
- dev->name);
- goto out_unirq;
- }
-
sc->sbm_speed = sbmac_speed_none;
sc->sbm_duplex = sbmac_duplex_none;
sc->sbm_fc = sbmac_fc_none;
@@ -2360,11 +2365,7 @@ static int sbmac_open(struct net_device *dev)
return 0;
out_unregister:
- mdiobus_unregister(sc->mii_bus);
-
-out_unirq:
free_irq(dev->irq, dev);
-
out_err:
return err;
}
@@ -2553,9 +2554,6 @@ static int sbmac_close(struct net_device *dev)
phy_disconnect(sc->phy_dev);
sc->phy_dev = NULL;
-
- mdiobus_unregister(sc->mii_bus);
-
free_irq(dev->irq, dev);
sbdma_emptyring(&(sc->sbm_txdma));
@@ -2662,6 +2660,7 @@ static int __exit sbmac_remove(struct platform_device *pldev)
unregister_netdev(dev);
sbmac_uninitctx(sc);
+ mdiobus_unregister(sc->mii_bus);
mdiobus_free(sc->mii_bus);
iounmap(sc->sbm_base);
free_netdev(dev);
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index bc75ef683c9f..156460527231 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -1870,6 +1870,7 @@ out:
}
if (disabled) {
+ dev_close(efx->net_dev);
EFX_ERR(efx, "has been disabled\n");
efx->state = STATE_DISABLED;
} else {
@@ -1893,8 +1894,7 @@ static void efx_reset_work(struct work_struct *data)
}
rtnl_lock();
- if (efx_reset(efx, efx->reset_pending))
- dev_close(efx->net_dev);
+ (void)efx_reset(efx, efx->reset_pending);
rtnl_unlock();
}
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c
index f7df24dce38a..655b697b45b2 100644
--- a/drivers/net/sfc/falcon.c
+++ b/drivers/net/sfc/falcon.c
@@ -1326,7 +1326,9 @@ static int falcon_probe_nvconfig(struct efx_nic *efx)
EFX_LOG(efx, "PHY is %d phy_id %d\n", efx->phy_type, efx->mdio.prtad);
- falcon_probe_board(efx, board_rev);
+ rc = falcon_probe_board(efx, board_rev);
+ if (rc)
+ goto fail2;
kfree(nvconfig);
return 0;
diff --git a/drivers/net/sfc/falcon_boards.c b/drivers/net/sfc/falcon_boards.c
index 5712fddd72f2..c7a933a3292e 100644
--- a/drivers/net/sfc/falcon_boards.c
+++ b/drivers/net/sfc/falcon_boards.c
@@ -728,15 +728,7 @@ static const struct falcon_board_type board_types[] = {
},
};
-static const struct falcon_board_type falcon_dummy_board = {
- .init = efx_port_dummy_op_int,
- .init_phy = efx_port_dummy_op_void,
- .fini = efx_port_dummy_op_void,
- .set_id_led = efx_port_dummy_op_set_id_led,
- .monitor = efx_port_dummy_op_int,
-};
-
-void falcon_probe_board(struct efx_nic *efx, u16 revision_info)
+int falcon_probe_board(struct efx_nic *efx, u16 revision_info)
{
struct falcon_board *board = falcon_board(efx);
u8 type_id = FALCON_BOARD_TYPE(revision_info);
@@ -754,8 +746,9 @@ void falcon_probe_board(struct efx_nic *efx, u16 revision_info)
(efx->pci_dev->subsystem_vendor == EFX_VENDID_SFC)
? board->type->ref_model : board->type->gen_type,
'A' + board->major, board->minor);
+ return 0;
} else {
EFX_ERR(efx, "unknown board type %d\n", type_id);
- board->type = &falcon_dummy_board;
+ return -ENODEV;
}
}
diff --git a/drivers/net/sfc/nic.h b/drivers/net/sfc/nic.h
index 5825f37b51bd..bbc2c0c2f843 100644
--- a/drivers/net/sfc/nic.h
+++ b/drivers/net/sfc/nic.h
@@ -158,7 +158,7 @@ extern struct efx_nic_type siena_a0_nic_type;
**************************************************************************
*/
-extern void falcon_probe_board(struct efx_nic *efx, u16 revision_info);
+extern int falcon_probe_board(struct efx_nic *efx, u16 revision_info);
/* TX data path */
extern int efx_nic_probe_tx(struct efx_tx_queue *tx_queue);
diff --git a/drivers/net/sfc/siena.c b/drivers/net/sfc/siena.c
index 7bf93faff3ab..727b4228e081 100644
--- a/drivers/net/sfc/siena.c
+++ b/drivers/net/sfc/siena.c
@@ -475,8 +475,17 @@ static int siena_try_update_nic_stats(struct efx_nic *efx)
static void siena_update_nic_stats(struct efx_nic *efx)
{
- while (siena_try_update_nic_stats(efx) == -EAGAIN)
- cpu_relax();
+ int retry;
+
+ /* If we're unlucky enough to read statistics wduring the DMA, wait
+ * up to 10ms for it to finish (typically takes <500us) */
+ for (retry = 0; retry < 100; ++retry) {
+ if (siena_try_update_nic_stats(efx) == 0)
+ return;
+ udelay(100);
+ }
+
+ /* Use the old values instead */
}
static void siena_start_nic_stats(struct efx_nic *efx)
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 20a17938c62b..e525a6cf5587 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -109,7 +109,7 @@ struct tun_struct {
struct tap_filter txflt;
struct socket socket;
-
+ struct socket_wq wq;
#ifdef TUN_DEBUG
int debug;
#endif
@@ -323,7 +323,7 @@ static void tun_net_uninit(struct net_device *dev)
/* Inform the methods they need to stop using the dev.
*/
if (tfile) {
- wake_up_all(&tun->socket.wait);
+ wake_up_all(&tun->wq.wait);
if (atomic_dec_and_test(&tfile->count))
__tun_detach(tun);
}
@@ -398,7 +398,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
/* Notify and wake up reader process */
if (tun->flags & TUN_FASYNC)
kill_fasync(&tun->fasync, SIGIO, POLL_IN);
- wake_up_interruptible_poll(&tun->socket.wait, POLLIN |
+ wake_up_interruptible_poll(&tun->wq.wait, POLLIN |
POLLRDNORM | POLLRDBAND);
return NETDEV_TX_OK;
@@ -498,7 +498,7 @@ static unsigned int tun_chr_poll(struct file *file, poll_table * wait)
DBG(KERN_INFO "%s: tun_chr_poll\n", tun->dev->name);
- poll_wait(file, &tun->socket.wait, wait);
+ poll_wait(file, &tun->wq.wait, wait);
if (!skb_queue_empty(&sk->sk_receive_queue))
mask |= POLLIN | POLLRDNORM;
@@ -773,7 +773,7 @@ static ssize_t tun_do_read(struct tun_struct *tun,
DBG(KERN_INFO "%s: tun_chr_read\n", tun->dev->name);
- add_wait_queue(&tun->socket.wait, &wait);
+ add_wait_queue(&tun->wq.wait, &wait);
while (len) {
current->state = TASK_INTERRUPTIBLE;
@@ -804,7 +804,7 @@ static ssize_t tun_do_read(struct tun_struct *tun,
}
current->state = TASK_RUNNING;
- remove_wait_queue(&tun->socket.wait, &wait);
+ remove_wait_queue(&tun->wq.wait, &wait);
return ret;
}
@@ -861,6 +861,7 @@ static struct rtnl_link_ops tun_link_ops __read_mostly = {
static void tun_sock_write_space(struct sock *sk)
{
struct tun_struct *tun;
+ wait_queue_head_t *wqueue;
if (!sock_writeable(sk))
return;
@@ -868,8 +869,9 @@ static void tun_sock_write_space(struct sock *sk)
if (!test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags))
return;
- if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk)))
- wake_up_interruptible_sync_poll(sk_sleep(sk), POLLOUT |
+ wqueue = sk_sleep(sk);
+ if (wqueue && waitqueue_active(wqueue))
+ wake_up_interruptible_sync_poll(wqueue, POLLOUT |
POLLWRNORM | POLLWRBAND);
tun = tun_sk(sk)->tun;
@@ -1039,7 +1041,8 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
if (!sk)
goto err_free_dev;
- init_waitqueue_head(&tun->socket.wait);
+ tun->socket.wq = &tun->wq;
+ init_waitqueue_head(&tun->wq.wait);
tun->socket.ops = &tun_socket_ops;
sock_init_data(&tun->socket, sk);
sk->sk_write_space = tun_sock_write_space;
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 63be4caec70e..d7b7018a1de1 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -397,4 +397,13 @@ config USB_IPHETH
For more information: http://giagio.com/wiki/moin.cgi/iPhoneEthernetDriver
+config USB_SIERRA_NET
+ tristate "USB-to-WWAN Driver for Sierra Wireless modems"
+ depends on USB_USBNET
+ help
+ Choose this option if you have a Sierra Wireless USB-to-WWAN device.
+
+ To compile this driver as a module, choose M here: the
+ module will be called sierra_net.
+
endmenu
diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile
index edb09c0ddf8e..b13a279663ba 100644
--- a/drivers/net/usb/Makefile
+++ b/drivers/net/usb/Makefile
@@ -24,4 +24,5 @@ obj-$(CONFIG_USB_USBNET) += usbnet.o
obj-$(CONFIG_USB_NET_INT51X1) += int51x1.o
obj-$(CONFIG_USB_CDC_PHONET) += cdc-phonet.o
obj-$(CONFIG_USB_IPHETH) += ipheth.o
+obj-$(CONFIG_USB_SIERRA_NET) += sierra_net.o
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 811b2dc423d1..b3fe0de40469 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -466,6 +466,7 @@ static const struct driver_info mbm_info = {
.bind = cdc_bind,
.unbind = usbnet_cdc_unbind,
.status = cdc_status,
+ .manage_power = cdc_manage_power,
};
/*-------------------------------------------------------------------------*/
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index 291add255246..47634b617107 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -240,7 +240,7 @@ static int dm_write_shared_word(struct usbnet *dev, int phy, u8 reg, __le16 valu
goto out;
dm_write_reg(dev, DM_SHARED_ADDR, phy ? (reg | 0x40) : reg);
- dm_write_reg(dev, DM_SHARED_CTRL, phy ? 0x1c : 0x14);
+ dm_write_reg(dev, DM_SHARED_CTRL, phy ? 0x1a : 0x12);
for (i = 0; i < DM_TIMEOUT; i++) {
u8 tmp;
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
index fd1033130a81..418825d26f90 100644
--- a/drivers/net/usb/ipheth.c
+++ b/drivers/net/usb/ipheth.c
@@ -122,25 +122,25 @@ static int ipheth_alloc_urbs(struct ipheth_device *iphone)
tx_urb = usb_alloc_urb(0, GFP_KERNEL);
if (tx_urb == NULL)
- goto error;
+ goto error_nomem;
rx_urb = usb_alloc_urb(0, GFP_KERNEL);
if (rx_urb == NULL)
- goto error;
+ goto free_tx_urb;
tx_buf = usb_buffer_alloc(iphone->udev,
IPHETH_BUF_SIZE,
GFP_KERNEL,
&tx_urb->transfer_dma);
if (tx_buf == NULL)
- goto error;
+ goto free_rx_urb;
rx_buf = usb_buffer_alloc(iphone->udev,
IPHETH_BUF_SIZE,
GFP_KERNEL,
&rx_urb->transfer_dma);
if (rx_buf == NULL)
- goto error;
+ goto free_tx_buf;
iphone->tx_urb = tx_urb;
@@ -149,13 +149,14 @@ static int ipheth_alloc_urbs(struct ipheth_device *iphone)
iphone->rx_buf = rx_buf;
return 0;
-error:
- usb_buffer_free(iphone->udev, IPHETH_BUF_SIZE, rx_buf,
- rx_urb->transfer_dma);
+free_tx_buf:
usb_buffer_free(iphone->udev, IPHETH_BUF_SIZE, tx_buf,
tx_urb->transfer_dma);
+free_rx_urb:
usb_free_urb(rx_urb);
+free_tx_urb:
usb_free_urb(tx_urb);
+error_nomem:
return -ENOMEM;
}
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index 52671ea043a7..c4c334d9770f 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -145,6 +145,7 @@ static struct usb_device_id usb_klsi_table[] = {
{ USB_DEVICE(0x0707, 0x0100) }, /* SMC 2202USB */
{ USB_DEVICE(0x07aa, 0x0001) }, /* Correga K.K. */
{ USB_DEVICE(0x07b8, 0x4000) }, /* D-Link DU-E10 */
+ { USB_DEVICE(0x07c9, 0xb010) }, /* Allied Telesyn AT-USB10 USB Ethernet Adapter */
{ USB_DEVICE(0x0846, 0x1001) }, /* NetGear EA-101 */
{ USB_DEVICE(0x0846, 0x1002) }, /* NetGear EA-101 */
{ USB_DEVICE(0x085a, 0x0008) }, /* PortGear Ethernet Adapter */
diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
new file mode 100644
index 000000000000..f1942d69a0d5
--- /dev/null
+++ b/drivers/net/usb/sierra_net.c
@@ -0,0 +1,1004 @@
+/*
+ * USB-to-WWAN Driver for Sierra Wireless modems
+ *
+ * Copyright (C) 2008, 2009, 2010 Paxton Smith, Matthew Safar, Rory Filer
+ * <linux@sierrawireless.com>
+ *
+ * Portions of this based on the cdc_ether driver by David Brownell (2003-2005)
+ * and Ole Andre Vadla Ravnas (ActiveSync) (2006).
+ *
+ * IMPORTANT DISCLAIMER: This driver is not commercially supported by
+ * Sierra Wireless. Use at your own risk.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#define DRIVER_VERSION "v.2.0"
+#define DRIVER_AUTHOR "Paxton Smith, Matthew Safar, Rory Filer"
+#define DRIVER_DESC "USB-to-WWAN Driver for Sierra Wireless modems"
+static const char driver_name[] = "sierra_net";
+
+/* if defined debug messages enabled */
+/*#define DEBUG*/
+
+#include <linux/module.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/sched.h>
+#include <linux/timer.h>
+#include <linux/usb.h>
+#include <linux/usb/cdc.h>
+#include <net/ip.h>
+#include <net/udp.h>
+#include <asm/unaligned.h>
+#include <linux/usb/usbnet.h>
+
+#define SWI_USB_REQUEST_GET_FW_ATTR 0x06
+#define SWI_GET_FW_ATTR_MASK 0x08
+
+/* atomic counter partially included in MAC address to make sure 2 devices
+ * do not end up with the same MAC - concept breaks in case of > 255 ifaces
+ */
+static atomic_t iface_counter = ATOMIC_INIT(0);
+
+/*
+ * SYNC Timer Delay definition used to set the expiry time
+ */
+#define SIERRA_NET_SYNCDELAY (2*HZ)
+
+/* Max. MTU supported. The modem buffers are limited to 1500 */
+#define SIERRA_NET_MAX_SUPPORTED_MTU 1500
+
+/* The SIERRA_NET_USBCTL_BUF_LEN defines a buffer size allocated for control
+ * message reception ... and thus the max. received packet.
+ * (May be the cause for parse_hip returning -EINVAL)
+ */
+#define SIERRA_NET_USBCTL_BUF_LEN 1024
+
+/* list of interface numbers - used for constructing interface lists */
+struct sierra_net_iface_info {
+ const u32 infolen; /* number of interface numbers on list */
+ const u8 *ifaceinfo; /* pointer to the array holding the numbers */
+};
+
+struct sierra_net_info_data {
+ u16 rx_urb_size;
+ struct sierra_net_iface_info whitelist;
+};
+
+/* Private data structure */
+struct sierra_net_data {
+
+ u8 ethr_hdr_tmpl[ETH_HLEN]; /* ethernet header template for rx'd pkts */
+
+ u16 link_up; /* air link up or down */
+ u8 tx_hdr_template[4]; /* part of HIP hdr for tx'd packets */
+
+ u8 sync_msg[4]; /* SYNC message */
+ u8 shdwn_msg[4]; /* Shutdown message */
+
+ /* Backpointer to the container */
+ struct usbnet *usbnet;
+
+ u8 ifnum; /* interface number */
+
+/* Bit masks, must be a power of 2 */
+#define SIERRA_NET_EVENT_RESP_AVAIL 0x01
+#define SIERRA_NET_TIMER_EXPIRY 0x02
+ unsigned long kevent_flags;
+ struct work_struct sierra_net_kevent;
+ struct timer_list sync_timer; /* For retrying SYNC sequence */
+};
+
+struct param {
+ int is_present;
+ union {
+ void *ptr;
+ u32 dword;
+ u16 word;
+ u8 byte;
+ };
+};
+
+/* HIP message type */
+#define SIERRA_NET_HIP_EXTENDEDID 0x7F
+#define SIERRA_NET_HIP_HSYNC_ID 0x60 /* Modem -> host */
+#define SIERRA_NET_HIP_RESTART_ID 0x62 /* Modem -> host */
+#define SIERRA_NET_HIP_MSYNC_ID 0x20 /* Host -> modem */
+#define SIERRA_NET_HIP_SHUTD_ID 0x26 /* Host -> modem */
+
+#define SIERRA_NET_HIP_EXT_IP_IN_ID 0x0202
+#define SIERRA_NET_HIP_EXT_IP_OUT_ID 0x0002
+
+/* 3G UMTS Link Sense Indication definitions */
+#define SIERRA_NET_HIP_LSI_UMTSID 0x78
+
+/* Reverse Channel Grant Indication HIP message */
+#define SIERRA_NET_HIP_RCGI 0x64
+
+/* LSI Protocol types */
+#define SIERRA_NET_PROTOCOL_UMTS 0x01
+/* LSI Coverage */
+#define SIERRA_NET_COVERAGE_NONE 0x00
+#define SIERRA_NET_COVERAGE_NOPACKET 0x01
+
+/* LSI Session */
+#define SIERRA_NET_SESSION_IDLE 0x00
+/* LSI Link types */
+#define SIERRA_NET_AS_LINK_TYPE_IPv4 0x00
+
+struct lsi_umts {
+ u8 protocol;
+ u8 unused1;
+ __be16 length;
+ /* eventually use a union for the rest - assume umts for now */
+ u8 coverage;
+ u8 unused2[41];
+ u8 session_state;
+ u8 unused3[33];
+ u8 link_type;
+ u8 pdp_addr_len; /* NW-supplied PDP address len */
+ u8 pdp_addr[16]; /* NW-supplied PDP address (bigendian)) */
+ u8 unused4[23];
+ u8 dns1_addr_len; /* NW-supplied 1st DNS address len (bigendian) */
+ u8 dns1_addr[16]; /* NW-supplied 1st DNS address */
+ u8 dns2_addr_len; /* NW-supplied 2nd DNS address len */
+ u8 dns2_addr[16]; /* NW-supplied 2nd DNS address (bigendian)*/
+ u8 wins1_addr_len; /* NW-supplied 1st Wins address len */
+ u8 wins1_addr[16]; /* NW-supplied 1st Wins address (bigendian)*/
+ u8 wins2_addr_len; /* NW-supplied 2nd Wins address len */
+ u8 wins2_addr[16]; /* NW-supplied 2nd Wins address (bigendian) */
+ u8 unused5[4];
+ u8 gw_addr_len; /* NW-supplied GW address len */
+ u8 gw_addr[16]; /* NW-supplied GW address (bigendian) */
+ u8 reserved[8];
+} __attribute__ ((packed));
+
+#define SIERRA_NET_LSI_COMMON_LEN 4
+#define SIERRA_NET_LSI_UMTS_LEN (sizeof(struct lsi_umts))
+#define SIERRA_NET_LSI_UMTS_STATUS_LEN \
+ (SIERRA_NET_LSI_UMTS_LEN - SIERRA_NET_LSI_COMMON_LEN)
+
+/* Forward definitions */
+static void sierra_sync_timer(unsigned long syncdata);
+static int sierra_net_change_mtu(struct net_device *net, int new_mtu);
+
+/* Our own net device operations structure */
+static const struct net_device_ops sierra_net_device_ops = {
+ .ndo_open = usbnet_open,
+ .ndo_stop = usbnet_stop,
+ .ndo_start_xmit = usbnet_start_xmit,
+ .ndo_tx_timeout = usbnet_tx_timeout,
+ .ndo_change_mtu = sierra_net_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+/* get private data associated with passed in usbnet device */
+static inline struct sierra_net_data *sierra_net_get_private(struct usbnet *dev)
+{
+ return (struct sierra_net_data *)dev->data[0];
+}
+
+/* set private data associated with passed in usbnet device */
+static inline void sierra_net_set_private(struct usbnet *dev,
+ struct sierra_net_data *priv)
+{
+ dev->data[0] = (unsigned long)priv;
+}
+
+/* is packet IPv4 */
+static inline int is_ip(struct sk_buff *skb)
+{
+ return (skb->protocol == cpu_to_be16(ETH_P_IP));
+}
+
+/*
+ * check passed in packet and make sure that:
+ * - it is linear (no scatter/gather)
+ * - it is ethernet (mac_header properly set)
+ */
+static int check_ethip_packet(struct sk_buff *skb, struct usbnet *dev)
+{
+ skb_reset_mac_header(skb); /* ethernet header */
+
+ if (skb_is_nonlinear(skb)) {
+ netdev_err(dev->net, "Non linear buffer-dropping\n");
+ return 0;
+ }
+
+ if (!pskb_may_pull(skb, ETH_HLEN))
+ return 0;
+ skb->protocol = eth_hdr(skb)->h_proto;
+
+ return 1;
+}
+
+static const u8 *save16bit(struct param *p, const u8 *datap)
+{
+ p->is_present = 1;
+ p->word = get_unaligned_be16(datap);
+ return datap + sizeof(p->word);
+}
+
+static const u8 *save8bit(struct param *p, const u8 *datap)
+{
+ p->is_present = 1;
+ p->byte = *datap;
+ return datap + sizeof(p->byte);
+}
+
+/*----------------------------------------------------------------------------*
+ * BEGIN HIP *
+ *----------------------------------------------------------------------------*/
+/* HIP header */
+#define SIERRA_NET_HIP_HDR_LEN 4
+/* Extended HIP header */
+#define SIERRA_NET_HIP_EXT_HDR_LEN 6
+
+struct hip_hdr {
+ int hdrlen;
+ struct param payload_len;
+ struct param msgid;
+ struct param msgspecific;
+ struct param extmsgid;
+};
+
+static int parse_hip(const u8 *buf, const u32 buflen, struct hip_hdr *hh)
+{
+ const u8 *curp = buf;
+ int padded;
+
+ if (buflen < SIERRA_NET_HIP_HDR_LEN)
+ return -EPROTO;
+
+ curp = save16bit(&hh->payload_len, curp);
+ curp = save8bit(&hh->msgid, curp);
+ curp = save8bit(&hh->msgspecific, curp);
+
+ padded = hh->msgid.byte & 0x80;
+ hh->msgid.byte &= 0x7F; /* 7 bits */
+
+ hh->extmsgid.is_present = (hh->msgid.byte == SIERRA_NET_HIP_EXTENDEDID);
+ if (hh->extmsgid.is_present) {
+ if (buflen < SIERRA_NET_HIP_EXT_HDR_LEN)
+ return -EPROTO;
+
+ hh->payload_len.word &= 0x3FFF; /* 14 bits */
+
+ curp = save16bit(&hh->extmsgid, curp);
+ hh->extmsgid.word &= 0x03FF; /* 10 bits */
+
+ hh->hdrlen = SIERRA_NET_HIP_EXT_HDR_LEN;
+ } else {
+ hh->payload_len.word &= 0x07FF; /* 11 bits */
+ hh->hdrlen = SIERRA_NET_HIP_HDR_LEN;
+ }
+
+ if (padded) {
+ hh->hdrlen++;
+ hh->payload_len.word--;
+ }
+
+ /* if real packet shorter than the claimed length */
+ if (buflen < (hh->hdrlen + hh->payload_len.word))
+ return -EINVAL;
+
+ return 0;
+}
+
+static void build_hip(u8 *buf, const u16 payloadlen,
+ struct sierra_net_data *priv)
+{
+ /* the following doesn't have the full functionality. We
+ * currently build only one kind of header, so it is faster this way
+ */
+ put_unaligned_be16(payloadlen, buf);
+ memcpy(buf+2, priv->tx_hdr_template, sizeof(priv->tx_hdr_template));
+}
+/*----------------------------------------------------------------------------*
+ * END HIP *
+ *----------------------------------------------------------------------------*/
+
+static int sierra_net_send_cmd(struct usbnet *dev,
+ u8 *cmd, int cmdlen, const char * cmd_name)
+{
+ struct sierra_net_data *priv = sierra_net_get_private(dev);
+ int status;
+
+ status = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
+ USB_CDC_SEND_ENCAPSULATED_COMMAND,
+ USB_DIR_OUT|USB_TYPE_CLASS|USB_RECIP_INTERFACE, 0,
+ priv->ifnum, cmd, cmdlen, USB_CTRL_SET_TIMEOUT);
+
+ if (status != cmdlen && status != -ENODEV)
+ netdev_err(dev->net, "Submit %s failed %d\n", cmd_name, status);
+
+ return status;
+}
+
+static int sierra_net_send_sync(struct usbnet *dev)
+{
+ int status;
+ struct sierra_net_data *priv = sierra_net_get_private(dev);
+
+ dev_dbg(&dev->udev->dev, "%s", __func__);
+
+ status = sierra_net_send_cmd(dev, priv->sync_msg,
+ sizeof(priv->sync_msg), "SYNC");
+
+ return status;
+}
+
+static void sierra_net_set_ctx_index(struct sierra_net_data *priv, u8 ctx_ix)
+{
+ dev_dbg(&(priv->usbnet->udev->dev), "%s %d", __func__, ctx_ix);
+ priv->tx_hdr_template[0] = 0x3F;
+ priv->tx_hdr_template[1] = ctx_ix;
+ *((u16 *)&priv->tx_hdr_template[2]) =
+ cpu_to_be16(SIERRA_NET_HIP_EXT_IP_OUT_ID);
+}
+
+static inline int sierra_net_is_valid_addrlen(u8 len)
+{
+ return (len == sizeof(struct in_addr));
+}
+
+static int sierra_net_parse_lsi(struct usbnet *dev, char *data, int datalen)
+{
+ struct lsi_umts *lsi = (struct lsi_umts *)data;
+
+ if (datalen < sizeof(struct lsi_umts)) {
+ netdev_err(dev->net, "%s: Data length %d, exp %Zu\n",
+ __func__, datalen,
+ sizeof(struct lsi_umts));
+ return -1;
+ }
+
+ if (lsi->length != cpu_to_be16(SIERRA_NET_LSI_UMTS_STATUS_LEN)) {
+ netdev_err(dev->net, "%s: LSI_UMTS_STATUS_LEN %d, exp %u\n",
+ __func__, be16_to_cpu(lsi->length),
+ (u32)SIERRA_NET_LSI_UMTS_STATUS_LEN);
+ return -1;
+ }
+
+ /* Validate the protocol - only support UMTS for now */
+ if (lsi->protocol != SIERRA_NET_PROTOCOL_UMTS) {
+ netdev_err(dev->net, "Protocol unsupported, 0x%02x\n",
+ lsi->protocol);
+ return -1;
+ }
+
+ /* Validate the link type */
+ if (lsi->link_type != SIERRA_NET_AS_LINK_TYPE_IPv4) {
+ netdev_err(dev->net, "Link type unsupported: 0x%02x\n",
+ lsi->link_type);
+ return -1;
+ }
+
+ /* Validate the coverage */
+ if (lsi->coverage == SIERRA_NET_COVERAGE_NONE
+ || lsi->coverage == SIERRA_NET_COVERAGE_NOPACKET) {
+ netdev_err(dev->net, "No coverage, 0x%02x\n", lsi->coverage);
+ return 0;
+ }
+
+ /* Validate the session state */
+ if (lsi->session_state == SIERRA_NET_SESSION_IDLE) {
+ netdev_err(dev->net, "Session idle, 0x%02x\n",
+ lsi->session_state);
+ return 0;
+ }
+
+ /* Set link_sense true */
+ return 1;
+}
+
+static void sierra_net_handle_lsi(struct usbnet *dev, char *data,
+ struct hip_hdr *hh)
+{
+ struct sierra_net_data *priv = sierra_net_get_private(dev);
+ int link_up;
+
+ link_up = sierra_net_parse_lsi(dev, data + hh->hdrlen,
+ hh->payload_len.word);
+ if (link_up < 0) {
+ netdev_err(dev->net, "Invalid LSI\n");
+ return;
+ }
+ if (link_up) {
+ sierra_net_set_ctx_index(priv, hh->msgspecific.byte);
+ priv->link_up = 1;
+ netif_carrier_on(dev->net);
+ } else {
+ priv->link_up = 0;
+ netif_carrier_off(dev->net);
+ }
+}
+
+static void sierra_net_dosync(struct usbnet *dev)
+{
+ int status;
+ struct sierra_net_data *priv = sierra_net_get_private(dev);
+
+ dev_dbg(&dev->udev->dev, "%s", __func__);
+
+ /* tell modem we are ready */
+ status = sierra_net_send_sync(dev);
+ if (status < 0)
+ netdev_err(dev->net,
+ "Send SYNC failed, status %d\n", status);
+ status = sierra_net_send_sync(dev);
+ if (status < 0)
+ netdev_err(dev->net,
+ "Send SYNC failed, status %d\n", status);
+
+ /* Now, start a timer and make sure we get the Restart Indication */
+ priv->sync_timer.function = sierra_sync_timer;
+ priv->sync_timer.data = (unsigned long) dev;
+ priv->sync_timer.expires = jiffies + SIERRA_NET_SYNCDELAY;
+ add_timer(&priv->sync_timer);
+}
+
+static void sierra_net_kevent(struct work_struct *work)
+{
+ struct sierra_net_data *priv =
+ container_of(work, struct sierra_net_data, sierra_net_kevent);
+ struct usbnet *dev = priv->usbnet;
+ int len;
+ int err;
+ u8 *buf;
+ u8 ifnum;
+
+ if (test_bit(SIERRA_NET_EVENT_RESP_AVAIL, &priv->kevent_flags)) {
+ clear_bit(SIERRA_NET_EVENT_RESP_AVAIL, &priv->kevent_flags);
+
+ /* Query the modem for the LSI message */
+ buf = kzalloc(SIERRA_NET_USBCTL_BUF_LEN, GFP_KERNEL);
+ if (!buf) {
+ netdev_err(dev->net,
+ "failed to allocate buf for LS msg\n");
+ return;
+ }
+ ifnum = priv->ifnum;
+ len = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
+ USB_CDC_GET_ENCAPSULATED_RESPONSE,
+ USB_DIR_IN|USB_TYPE_CLASS|USB_RECIP_INTERFACE,
+ 0, ifnum, buf, SIERRA_NET_USBCTL_BUF_LEN,
+ USB_CTRL_SET_TIMEOUT);
+
+ if (len < 0) {
+ netdev_err(dev->net,
+ "usb_control_msg failed, status %d\n", len);
+ } else {
+ struct hip_hdr hh;
+
+ dev_dbg(&dev->udev->dev, "%s: Received status message,"
+ " %04x bytes", __func__, len);
+
+ err = parse_hip(buf, len, &hh);
+ if (err) {
+ netdev_err(dev->net, "%s: Bad packet,"
+ " parse result %d\n", __func__, err);
+ kfree(buf);
+ return;
+ }
+
+ /* Validate packet length */
+ if (len != hh.hdrlen + hh.payload_len.word) {
+ netdev_err(dev->net, "%s: Bad packet, received"
+ " %d, expected %d\n", __func__, len,
+ hh.hdrlen + hh.payload_len.word);
+ kfree(buf);
+ return;
+ }
+
+ /* Switch on received message types */
+ switch (hh.msgid.byte) {
+ case SIERRA_NET_HIP_LSI_UMTSID:
+ dev_dbg(&dev->udev->dev, "LSI for ctx:%d",
+ hh.msgspecific.byte);
+ sierra_net_handle_lsi(dev, buf, &hh);
+ break;
+ case SIERRA_NET_HIP_RESTART_ID:
+ dev_dbg(&dev->udev->dev, "Restart reported: %d,"
+ " stopping sync timer",
+ hh.msgspecific.byte);
+ /* Got sync resp - stop timer & clear mask */
+ del_timer_sync(&priv->sync_timer);
+ clear_bit(SIERRA_NET_TIMER_EXPIRY,
+ &priv->kevent_flags);
+ break;
+ case SIERRA_NET_HIP_HSYNC_ID:
+ dev_dbg(&dev->udev->dev, "SYNC received");
+ err = sierra_net_send_sync(dev);
+ if (err < 0)
+ netdev_err(dev->net,
+ "Send SYNC failed %d\n", err);
+ break;
+ case SIERRA_NET_HIP_EXTENDEDID:
+ netdev_err(dev->net, "Unrecognized HIP msg, "
+ "extmsgid 0x%04x\n", hh.extmsgid.word);
+ break;
+ case SIERRA_NET_HIP_RCGI:
+ /* Ignored */
+ break;
+ default:
+ netdev_err(dev->net, "Unrecognized HIP msg, "
+ "msgid 0x%02x\n", hh.msgid.byte);
+ break;
+ }
+ }
+ kfree(buf);
+ }
+ /* The sync timer bit might be set */
+ if (test_bit(SIERRA_NET_TIMER_EXPIRY, &priv->kevent_flags)) {
+ clear_bit(SIERRA_NET_TIMER_EXPIRY, &priv->kevent_flags);
+ dev_dbg(&dev->udev->dev, "Deferred sync timer expiry");
+ sierra_net_dosync(priv->usbnet);
+ }
+
+ if (priv->kevent_flags)
+ dev_dbg(&dev->udev->dev, "sierra_net_kevent done, "
+ "kevent_flags = 0x%lx", priv->kevent_flags);
+}
+
+static void sierra_net_defer_kevent(struct usbnet *dev, int work)
+{
+ struct sierra_net_data *priv = sierra_net_get_private(dev);
+
+ set_bit(work, &priv->kevent_flags);
+ schedule_work(&priv->sierra_net_kevent);
+}
+
+/*
+ * Sync Retransmit Timer Handler. On expiry, kick the work queue
+ */
+void sierra_sync_timer(unsigned long syncdata)
+{
+ struct usbnet *dev = (struct usbnet *)syncdata;
+
+ dev_dbg(&dev->udev->dev, "%s", __func__);
+ /* Kick the tasklet */
+ sierra_net_defer_kevent(dev, SIERRA_NET_TIMER_EXPIRY);
+}
+
+static void sierra_net_status(struct usbnet *dev, struct urb *urb)
+{
+ struct usb_cdc_notification *event;
+
+ dev_dbg(&dev->udev->dev, "%s", __func__);
+
+ if (urb->actual_length < sizeof *event)
+ return;
+
+ /* Add cases to handle other standard notifications. */
+ event = urb->transfer_buffer;
+ switch (event->bNotificationType) {
+ case USB_CDC_NOTIFY_NETWORK_CONNECTION:
+ case USB_CDC_NOTIFY_SPEED_CHANGE:
+ /* USB 305 sends those */
+ break;
+ case USB_CDC_NOTIFY_RESPONSE_AVAILABLE:
+ sierra_net_defer_kevent(dev, SIERRA_NET_EVENT_RESP_AVAIL);
+ break;
+ default:
+ netdev_err(dev->net, ": unexpected notification %02x!\n",
+ event->bNotificationType);
+ break;
+ }
+}
+
+static void sierra_net_get_drvinfo(struct net_device *net,
+ struct ethtool_drvinfo *info)
+{
+ /* Inherit standard device info */
+ usbnet_get_drvinfo(net, info);
+ strncpy(info->driver, driver_name, sizeof info->driver);
+ strncpy(info->version, DRIVER_VERSION, sizeof info->version);
+}
+
+static u32 sierra_net_get_link(struct net_device *net)
+{
+ struct usbnet *dev = netdev_priv(net);
+ /* Report link is down whenever the interface is down */
+ return sierra_net_get_private(dev)->link_up && netif_running(net);
+}
+
+static struct ethtool_ops sierra_net_ethtool_ops = {
+ .get_drvinfo = sierra_net_get_drvinfo,
+ .get_link = sierra_net_get_link,
+ .get_msglevel = usbnet_get_msglevel,
+ .set_msglevel = usbnet_set_msglevel,
+ .get_settings = usbnet_get_settings,
+ .set_settings = usbnet_set_settings,
+ .nway_reset = usbnet_nway_reset,
+};
+
+/* MTU can not be more than 1500 bytes, enforce it. */
+static int sierra_net_change_mtu(struct net_device *net, int new_mtu)
+{
+ if (new_mtu > SIERRA_NET_MAX_SUPPORTED_MTU)
+ return -EINVAL;
+
+ return usbnet_change_mtu(net, new_mtu);
+}
+
+static int is_whitelisted(const u8 ifnum,
+ const struct sierra_net_iface_info *whitelist)
+{
+ if (whitelist) {
+ const u8 *list = whitelist->ifaceinfo;
+ int i;
+
+ for (i = 0; i < whitelist->infolen; i++) {
+ if (list[i] == ifnum)
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static int sierra_net_get_fw_attr(struct usbnet *dev, u16 *datap)
+{
+ int result = 0;
+ u16 *attrdata;
+
+ attrdata = kmalloc(sizeof(*attrdata), GFP_KERNEL);
+ if (!attrdata)
+ return -ENOMEM;
+
+ result = usb_control_msg(
+ dev->udev,
+ usb_rcvctrlpipe(dev->udev, 0),
+ /* _u8 vendor specific request */
+ SWI_USB_REQUEST_GET_FW_ATTR,
+ USB_DIR_IN | USB_TYPE_VENDOR, /* __u8 request type */
+ 0x0000, /* __u16 value not used */
+ 0x0000, /* __u16 index not used */
+ attrdata, /* char *data */
+ sizeof(*attrdata), /* __u16 size */
+ USB_CTRL_SET_TIMEOUT); /* int timeout */
+
+ if (result < 0) {
+ kfree(attrdata);
+ return -EIO;
+ }
+
+ *datap = *attrdata;
+
+ kfree(attrdata);
+ return result;
+}
+
+/*
+ * collects the bulk endpoints, the status endpoint.
+ */
+static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf)
+{
+ u8 ifacenum;
+ u8 numendpoints;
+ u16 fwattr = 0;
+ int status;
+ struct ethhdr *eth;
+ struct sierra_net_data *priv;
+ static const u8 sync_tmplate[sizeof(priv->sync_msg)] = {
+ 0x00, 0x00, SIERRA_NET_HIP_MSYNC_ID, 0x00};
+ static const u8 shdwn_tmplate[sizeof(priv->shdwn_msg)] = {
+ 0x00, 0x00, SIERRA_NET_HIP_SHUTD_ID, 0x00};
+
+ struct sierra_net_info_data *data =
+ (struct sierra_net_info_data *)dev->driver_info->data;
+
+ dev_dbg(&dev->udev->dev, "%s", __func__);
+
+ ifacenum = intf->cur_altsetting->desc.bInterfaceNumber;
+ /* We only accept certain interfaces */
+ if (!is_whitelisted(ifacenum, &data->whitelist)) {
+ dev_dbg(&dev->udev->dev, "Ignoring interface: %d", ifacenum);
+ return -ENODEV;
+ }
+ numendpoints = intf->cur_altsetting->desc.bNumEndpoints;
+ /* We have three endpoints, bulk in and out, and a status */
+ if (numendpoints != 3) {
+ dev_err(&dev->udev->dev, "Expected 3 endpoints, found: %d",
+ numendpoints);
+ return -ENODEV;
+ }
+ /* Status endpoint set in usbnet_get_endpoints() */
+ dev->status = NULL;
+ status = usbnet_get_endpoints(dev, intf);
+ if (status < 0) {
+ dev_err(&dev->udev->dev, "Error in usbnet_get_endpoints (%d)",
+ status);
+ return -ENODEV;
+ }
+ /* Initialize sierra private data */
+ priv = kzalloc(sizeof *priv, GFP_KERNEL);
+ if (!priv) {
+ dev_err(&dev->udev->dev, "No memory");
+ return -ENOMEM;
+ }
+
+ priv->usbnet = dev;
+ priv->ifnum = ifacenum;
+ dev->net->netdev_ops = &sierra_net_device_ops;
+
+ /* change MAC addr to include, ifacenum, and to be unique */
+ dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return(&iface_counter);
+ dev->net->dev_addr[ETH_ALEN-1] = ifacenum;
+
+ /* we will have to manufacture ethernet headers, prepare template */
+ eth = (struct ethhdr *)priv->ethr_hdr_tmpl;
+ memcpy(&eth->h_dest, dev->net->dev_addr, ETH_ALEN);
+ eth->h_proto = cpu_to_be16(ETH_P_IP);
+
+ /* prepare shutdown message template */
+ memcpy(priv->shdwn_msg, shdwn_tmplate, sizeof(priv->shdwn_msg));
+ /* set context index initially to 0 - prepares tx hdr template */
+ sierra_net_set_ctx_index(priv, 0);
+
+ /* decrease the rx_urb_size and max_tx_size to 4k on USB 1.1 */
+ dev->rx_urb_size = data->rx_urb_size;
+ if (dev->udev->speed != USB_SPEED_HIGH)
+ dev->rx_urb_size = min_t(size_t, 4096, data->rx_urb_size);
+
+ dev->net->hard_header_len += SIERRA_NET_HIP_EXT_HDR_LEN;
+ dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
+
+ /* Set up the netdev */
+ dev->net->flags |= IFF_NOARP;
+ dev->net->ethtool_ops = &sierra_net_ethtool_ops;
+ netif_carrier_off(dev->net);
+
+ sierra_net_set_private(dev, priv);
+
+ priv->kevent_flags = 0;
+
+ /* Use the shared workqueue */
+ INIT_WORK(&priv->sierra_net_kevent, sierra_net_kevent);
+
+ /* Only need to do this once */
+ init_timer(&priv->sync_timer);
+
+ /* verify fw attributes */
+ status = sierra_net_get_fw_attr(dev, &fwattr);
+ dev_dbg(&dev->udev->dev, "Fw attr: %x\n", fwattr);
+
+ /* test whether firmware supports DHCP */
+ if (!(status == sizeof(fwattr) && (fwattr & SWI_GET_FW_ATTR_MASK))) {
+ /* found incompatible firmware version */
+ dev_err(&dev->udev->dev, "Incompatible driver and firmware"
+ " versions\n");
+ kfree(priv);
+ return -ENODEV;
+ }
+ /* prepare sync message from template */
+ memcpy(priv->sync_msg, sync_tmplate, sizeof(priv->sync_msg));
+
+ /* initiate the sync sequence */
+ sierra_net_dosync(dev);
+
+ return 0;
+}
+
+static void sierra_net_unbind(struct usbnet *dev, struct usb_interface *intf)
+{
+ int status;
+ struct sierra_net_data *priv = sierra_net_get_private(dev);
+
+ dev_dbg(&dev->udev->dev, "%s", __func__);
+
+ /* Kill the timer then flush the work queue */
+ del_timer_sync(&priv->sync_timer);
+
+ flush_scheduled_work();
+
+ /* tell modem we are going away */
+ status = sierra_net_send_cmd(dev, priv->shdwn_msg,
+ sizeof(priv->shdwn_msg), "Shutdown");
+ if (status < 0)
+ netdev_err(dev->net,
+ "usb_control_msg failed, status %d\n", status);
+
+ sierra_net_set_private(dev, NULL);
+
+ kfree(priv);
+}
+
+static struct sk_buff *sierra_net_skb_clone(struct usbnet *dev,
+ struct sk_buff *skb, int len)
+{
+ struct sk_buff *new_skb;
+
+ /* clone skb */
+ new_skb = skb_clone(skb, GFP_ATOMIC);
+
+ /* remove len bytes from original */
+ skb_pull(skb, len);
+
+ /* trim next packet to it's length */
+ if (new_skb) {
+ skb_trim(new_skb, len);
+ } else {
+ if (netif_msg_rx_err(dev))
+ netdev_err(dev->net, "failed to get skb\n");
+ dev->net->stats.rx_dropped++;
+ }
+
+ return new_skb;
+}
+
+/* ---------------------------- Receive data path ----------------------*/
+static int sierra_net_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+{
+ int err;
+ struct hip_hdr hh;
+ struct sk_buff *new_skb;
+
+ dev_dbg(&dev->udev->dev, "%s", __func__);
+
+ /* could contain multiple packets */
+ while (likely(skb->len)) {
+ err = parse_hip(skb->data, skb->len, &hh);
+ if (err) {
+ if (netif_msg_rx_err(dev))
+ netdev_err(dev->net, "Invalid HIP header %d\n",
+ err);
+ /* dev->net->stats.rx_errors incremented by caller */
+ dev->net->stats.rx_length_errors++;
+ return 0;
+ }
+
+ /* Validate Extended HIP header */
+ if (!hh.extmsgid.is_present
+ || hh.extmsgid.word != SIERRA_NET_HIP_EXT_IP_IN_ID) {
+ if (netif_msg_rx_err(dev))
+ netdev_err(dev->net, "HIP/ETH: Invalid pkt\n");
+
+ dev->net->stats.rx_frame_errors++;
+ /* dev->net->stats.rx_errors incremented by caller */;
+ return 0;
+ }
+
+ skb_pull(skb, hh.hdrlen);
+
+ /* We are going to accept this packet, prepare it */
+ memcpy(skb->data, sierra_net_get_private(dev)->ethr_hdr_tmpl,
+ ETH_HLEN);
+
+ /* Last packet in batch handled by usbnet */
+ if (hh.payload_len.word == skb->len)
+ return 1;
+
+ new_skb = sierra_net_skb_clone(dev, skb, hh.payload_len.word);
+ if (new_skb)
+ usbnet_skb_return(dev, new_skb);
+
+ } /* while */
+
+ return 0;
+}
+
+/* ---------------------------- Transmit data path ----------------------*/
+struct sk_buff *sierra_net_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
+ gfp_t flags)
+{
+ struct sierra_net_data *priv = sierra_net_get_private(dev);
+ u16 len;
+ bool need_tail;
+
+ dev_dbg(&dev->udev->dev, "%s", __func__);
+ if (priv->link_up && check_ethip_packet(skb, dev) && is_ip(skb)) {
+ /* enough head room as is? */
+ if (SIERRA_NET_HIP_EXT_HDR_LEN <= skb_headroom(skb)) {
+ /* Save the Eth/IP length and set up HIP hdr */
+ len = skb->len;
+ skb_push(skb, SIERRA_NET_HIP_EXT_HDR_LEN);
+ /* Handle ZLP issue */
+ need_tail = ((len + SIERRA_NET_HIP_EXT_HDR_LEN)
+ % dev->maxpacket == 0);
+ if (need_tail) {
+ if (unlikely(skb_tailroom(skb) == 0)) {
+ netdev_err(dev->net, "tx_fixup:"
+ "no room for packet\n");
+ dev_kfree_skb_any(skb);
+ return NULL;
+ } else {
+ skb->data[skb->len] = 0;
+ __skb_put(skb, 1);
+ len = len + 1;
+ }
+ }
+ build_hip(skb->data, len, priv);
+ return skb;
+ } else {
+ /*
+ * compensate in the future if necessary
+ */
+ netdev_err(dev->net, "tx_fixup: no room for HIP\n");
+ } /* headroom */
+ }
+
+ if (!priv->link_up)
+ dev->net->stats.tx_carrier_errors++;
+
+ /* tx_dropped incremented by usbnet */
+
+ /* filter the packet out, release it */
+ dev_kfree_skb_any(skb);
+ return NULL;
+}
+
+static const u8 sierra_net_ifnum_list[] = { 7, 10, 11 };
+static const struct sierra_net_info_data sierra_net_info_data_68A3 = {
+ .rx_urb_size = 8 * 1024,
+ .whitelist = {
+ .infolen = ARRAY_SIZE(sierra_net_ifnum_list),
+ .ifaceinfo = sierra_net_ifnum_list
+ }
+};
+
+static const struct driver_info sierra_net_info_68A3 = {
+ .description = "Sierra Wireless USB-to-WWAN Modem",
+ .flags = FLAG_WWAN | FLAG_SEND_ZLP,
+ .bind = sierra_net_bind,
+ .unbind = sierra_net_unbind,
+ .status = sierra_net_status,
+ .rx_fixup = sierra_net_rx_fixup,
+ .tx_fixup = sierra_net_tx_fixup,
+ .data = (unsigned long)&sierra_net_info_data_68A3,
+};
+
+static const struct usb_device_id products[] = {
+ {USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless USB-to-WWAN modem */
+ .driver_info = (unsigned long) &sierra_net_info_68A3},
+
+ {}, /* last item */
+};
+MODULE_DEVICE_TABLE(usb, products);
+
+/* We are based on usbnet, so let it handle the USB driver specifics */
+static struct usb_driver sierra_net_driver = {
+ .name = "sierra_net",
+ .id_table = products,
+ .probe = usbnet_probe,
+ .disconnect = usbnet_disconnect,
+ .suspend = usbnet_suspend,
+ .resume = usbnet_resume,
+ .no_dynamic_id = 1,
+};
+
+static int __init sierra_net_init(void)
+{
+ BUILD_BUG_ON(FIELD_SIZEOF(struct usbnet, data)
+ < sizeof(struct cdc_state));
+
+ return usb_register(&sierra_net_driver);
+}
+
+static void __exit sierra_net_exit(void)
+{
+ usb_deregister(&sierra_net_driver);
+}
+
+module_exit(sierra_net_exit);
+module_init(sierra_net_init);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_VERSION(DRIVER_VERSION);
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c
index 679da7e7522e..ca42ccb23d76 100644
--- a/drivers/net/wireless/p54/p54pci.c
+++ b/drivers/net/wireless/p54/p54pci.c
@@ -246,7 +246,7 @@ static void p54p_check_tx_ring(struct ieee80211_hw *dev, u32 *index,
u32 idx, i;
i = (*index) % ring_limit;
- (*index) = idx = le32_to_cpu(ring_control->device_idx[1]);
+ (*index) = idx = le32_to_cpu(ring_control->device_idx[ring_index]);
idx %= ring_limit;
while (i != idx) {
diff --git a/include/linux/net.h b/include/linux/net.h
index 4157b5d42bd6..2b4deeeb8646 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -59,6 +59,7 @@ typedef enum {
#include <linux/wait.h>
#include <linux/fcntl.h> /* For O_CLOEXEC and O_NONBLOCK */
#include <linux/kmemcheck.h>
+#include <linux/rcupdate.h>
struct poll_table_struct;
struct pipe_inode_info;
@@ -116,6 +117,12 @@ enum sock_shutdown_cmd {
SHUT_RDWR = 2,
};
+struct socket_wq {
+ wait_queue_head_t wait;
+ struct fasync_struct *fasync_list;
+ struct rcu_head rcu;
+} ____cacheline_aligned_in_smp;
+
/**
* struct socket - general BSD socket
* @state: socket state (%SS_CONNECTED, etc)
@@ -135,11 +142,8 @@ struct socket {
kmemcheck_bitfield_end(type);
unsigned long flags;
- /*
- * Please keep fasync_list & wait fields in the same cache line
- */
- struct fasync_struct *fasync_list;
- wait_queue_head_t wait;
+
+ struct socket_wq *wq;
struct file *file;
struct sock *sk;
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 40d4c20d034b..98112fbddefd 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -218,16 +218,6 @@ struct neighbour;
struct neigh_parms;
struct sk_buff;
-struct netif_rx_stats {
- unsigned total;
- unsigned dropped;
- unsigned time_squeeze;
- unsigned cpu_collision;
- unsigned received_rps;
-};
-
-DECLARE_PER_CPU(struct netif_rx_stats, netdev_rx_stat);
-
struct netdev_hw_addr {
struct list_head list;
unsigned char addr[MAX_ADDR_LEN];
@@ -888,7 +878,7 @@ struct net_device {
unsigned char operstate; /* RFC2863 operstate */
unsigned char link_mode; /* mapping policy to operstate */
- unsigned mtu; /* interface MTU value */
+ unsigned int mtu; /* interface MTU value */
unsigned short type; /* interface hardware type */
unsigned short hard_header_len; /* hardware hdr length */
@@ -1390,6 +1380,12 @@ struct softnet_data {
struct sk_buff *completion_queue;
struct sk_buff_head process_queue;
+ /* stats */
+ unsigned int processed;
+ unsigned int time_squeeze;
+ unsigned int cpu_collision;
+ unsigned int received_rps;
+
#ifdef CONFIG_RPS
struct softnet_data *rps_ipi_list;
@@ -1399,6 +1395,7 @@ struct softnet_data {
unsigned int cpu;
unsigned int input_queue_head;
#endif
+ unsigned dropped;
struct sk_buff_head input_pkt_queue;
struct napi_struct backlog;
};
diff --git a/include/linux/rculist.h b/include/linux/rculist.h
index 004908b104d5..4ec3b38ce9c5 100644
--- a/include/linux/rculist.h
+++ b/include/linux/rculist.h
@@ -429,6 +429,23 @@ static inline void hlist_add_after_rcu(struct hlist_node *prev,
pos = rcu_dereference_raw(pos->next))
/**
+ * hlist_for_each_entry_rcu_bh - iterate over rcu list of given type
+ * @tpos: the type * to use as a loop cursor.
+ * @pos: the &struct hlist_node to use as a loop cursor.
+ * @head: the head for your list.
+ * @member: the name of the hlist_node within the struct.
+ *
+ * This list-traversal primitive may safely run concurrently with
+ * the _rcu list-mutation primitives such as hlist_add_head_rcu()
+ * as long as the traversal is guarded by rcu_read_lock().
+ */
+#define hlist_for_each_entry_rcu_bh(tpos, pos, head, member) \
+ for (pos = rcu_dereference_bh((head)->first); \
+ pos && ({ prefetch(pos->next); 1; }) && \
+ ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \
+ pos = rcu_dereference_bh(pos->next))
+
+/**
* hlist_for_each_entry_continue_rcu - iterate over a hlist continuing after current point
* @tpos: the type * to use as a loop cursor.
* @pos: the &struct hlist_node to use as a loop cursor.
@@ -440,6 +457,18 @@ static inline void hlist_add_after_rcu(struct hlist_node *prev,
({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \
pos = rcu_dereference(pos->next))
+/**
+ * hlist_for_each_entry_continue_rcu_bh - iterate over a hlist continuing after current point
+ * @tpos: the type * to use as a loop cursor.
+ * @pos: the &struct hlist_node to use as a loop cursor.
+ * @member: the name of the hlist_node within the struct.
+ */
+#define hlist_for_each_entry_continue_rcu_bh(tpos, pos, member) \
+ for (pos = rcu_dereference_bh((pos)->next); \
+ pos && ({ prefetch(pos->next); 1; }) && \
+ ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \
+ pos = rcu_dereference_bh(pos->next))
+
#endif /* __KERNEL__ */
#endif
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 82f5116a89e4..746a652b9f6f 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1128,6 +1128,11 @@ static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len)
return skb->data += len;
}
+static inline unsigned char *skb_pull_inline(struct sk_buff *skb, unsigned int len)
+{
+ return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
+}
+
extern unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta);
static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len)
diff --git a/include/net/af_unix.h b/include/net/af_unix.h
index 1614d78c60ed..20725e213aee 100644
--- a/include/net/af_unix.h
+++ b/include/net/af_unix.h
@@ -30,7 +30,7 @@ struct unix_skb_parms {
#endif
};
-#define UNIXCB(skb) (*(struct unix_skb_parms*)&((skb)->cb))
+#define UNIXCB(skb) (*(struct unix_skb_parms *)&((skb)->cb))
#define UNIXCREDS(skb) (&UNIXCB((skb)).creds)
#define UNIXSID(skb) (&UNIXCB((skb)).secid)
@@ -45,21 +45,23 @@ struct unix_skb_parms {
struct unix_sock {
/* WARNING: sk has to be the first member */
struct sock sk;
- struct unix_address *addr;
- struct dentry *dentry;
- struct vfsmount *mnt;
+ struct unix_address *addr;
+ struct dentry *dentry;
+ struct vfsmount *mnt;
struct mutex readlock;
- struct sock *peer;
- struct sock *other;
+ struct sock *peer;
+ struct sock *other;
struct list_head link;
- atomic_long_t inflight;
- spinlock_t lock;
+ atomic_long_t inflight;
+ spinlock_t lock;
unsigned int gc_candidate : 1;
unsigned int gc_maybe_cycle : 1;
- wait_queue_head_t peer_wait;
+ struct socket_wq peer_wq;
};
#define unix_sk(__sk) ((struct unix_sock *)__sk)
+#define peer_wait peer_wq.wait
+
#ifdef CONFIG_SYSCTL
extern int unix_sysctl_register(struct net *net);
extern void unix_sysctl_unregister(struct net *net);
diff --git a/include/net/sctp/command.h b/include/net/sctp/command.h
index 8be5135ff7aa..2c55a7ea20af 100644
--- a/include/net/sctp/command.h
+++ b/include/net/sctp/command.h
@@ -107,6 +107,7 @@ typedef enum {
SCTP_CMD_T1_RETRAN, /* Mark for retransmission after T1 timeout */
SCTP_CMD_UPDATE_INITTAG, /* Update peer inittag */
SCTP_CMD_SEND_MSG, /* Send the whole use message */
+ SCTP_CMD_SEND_NEXT_ASCONF, /* Send the next ASCONF after ACK */
SCTP_CMD_LAST
} sctp_verb_t;
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 289241d31cc1..65946bc43d00 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -128,6 +128,7 @@ extern int sctp_register_pf(struct sctp_pf *, sa_family_t);
int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb);
int sctp_inet_listen(struct socket *sock, int backlog);
void sctp_write_space(struct sock *sk);
+void sctp_data_ready(struct sock *sk, int len);
unsigned int sctp_poll(struct file *file, struct socket *sock,
poll_table *wait);
void sctp_sock_rfree(struct sk_buff *skb);
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 9d44aef365da..43257b903c82 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -775,6 +775,7 @@ int sctp_user_addto_chunk(struct sctp_chunk *chunk, int off, int len,
struct iovec *data);
void sctp_chunk_free(struct sctp_chunk *);
void *sctp_addto_chunk(struct sctp_chunk *, int len, const void *data);
+void *sctp_addto_chunk_fixed(struct sctp_chunk *, int len, const void *data);
struct sctp_chunk *sctp_chunkify(struct sk_buff *,
const struct sctp_association *,
struct sock *);
diff --git a/include/net/sock.h b/include/net/sock.h
index e1777db5b9ab..328e03f47dd1 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -74,7 +74,7 @@
printk(KERN_DEBUG msg); } while (0)
#else
/* Validate arguments and do nothing */
-static void inline int __attribute__ ((format (printf, 2, 3)))
+static inline void __attribute__ ((format (printf, 2, 3)))
SOCK_DEBUG(struct sock *sk, const char *msg, ...)
{
}
@@ -159,7 +159,7 @@ struct sock_common {
* @sk_userlocks: %SO_SNDBUF and %SO_RCVBUF settings
* @sk_lock: synchronizer
* @sk_rcvbuf: size of receive buffer in bytes
- * @sk_sleep: sock wait queue
+ * @sk_wq: sock wait queue and async head
* @sk_dst_cache: destination cache
* @sk_dst_lock: destination cache lock
* @sk_policy: flow policy
@@ -257,7 +257,7 @@ struct sock {
struct sk_buff *tail;
int len;
} sk_backlog;
- wait_queue_head_t *sk_sleep;
+ struct socket_wq *sk_wq;
struct dst_entry *sk_dst_cache;
#ifdef CONFIG_XFRM
struct xfrm_policy *sk_policy[2];
@@ -1219,7 +1219,7 @@ static inline void sk_set_socket(struct sock *sk, struct socket *sock)
static inline wait_queue_head_t *sk_sleep(struct sock *sk)
{
- return sk->sk_sleep;
+ return &sk->sk_wq->wait;
}
/* Detach socket from process context.
* Announce socket dead, detach it from wait queue and inode.
@@ -1233,14 +1233,14 @@ static inline void sock_orphan(struct sock *sk)
write_lock_bh(&sk->sk_callback_lock);
sock_set_flag(sk, SOCK_DEAD);
sk_set_socket(sk, NULL);
- sk->sk_sleep = NULL;
+ sk->sk_wq = NULL;
write_unlock_bh(&sk->sk_callback_lock);
}
static inline void sock_graft(struct sock *sk, struct socket *parent)
{
write_lock_bh(&sk->sk_callback_lock);
- sk->sk_sleep = &parent->wait;
+ rcu_assign_pointer(sk->sk_wq, parent->wq);
parent->sk = sk;
sk_set_socket(sk, parent);
security_sock_graft(sk, parent);
@@ -1392,12 +1392,12 @@ static inline int sk_has_allocations(const struct sock *sk)
}
/**
- * sk_has_sleeper - check if there are any waiting processes
- * @sk: socket
+ * wq_has_sleeper - check if there are any waiting processes
+ * @sk: struct socket_wq
*
- * Returns true if socket has waiting processes
+ * Returns true if socket_wq has waiting processes
*
- * The purpose of the sk_has_sleeper and sock_poll_wait is to wrap the memory
+ * The purpose of the wq_has_sleeper and sock_poll_wait is to wrap the memory
* barrier call. They were added due to the race found within the tcp code.
*
* Consider following tcp code paths:
@@ -1410,9 +1410,10 @@ static inline int sk_has_allocations(const struct sock *sk)
* ... ...
* tp->rcv_nxt check sock_def_readable
* ... {
- * schedule ...
- * if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk)))
- * wake_up_interruptible(sk_sleep(sk))
+ * schedule rcu_read_lock();
+ * wq = rcu_dereference(sk->sk_wq);
+ * if (wq && waitqueue_active(&wq->wait))
+ * wake_up_interruptible(&wq->wait)
* ...
* }
*
@@ -1421,19 +1422,18 @@ static inline int sk_has_allocations(const struct sock *sk)
* could then endup calling schedule and sleep forever if there are no more
* data on the socket.
*
- * The sk_has_sleeper is always called right after a call to read_lock, so we
- * can use smp_mb__after_lock barrier.
*/
-static inline int sk_has_sleeper(struct sock *sk)
+static inline bool wq_has_sleeper(struct socket_wq *wq)
{
+
/*
* We need to be sure we are in sync with the
* add_wait_queue modifications to the wait queue.
*
* This memory barrier is paired in the sock_poll_wait.
*/
- smp_mb__after_lock();
- return sk_sleep(sk) && waitqueue_active(sk_sleep(sk));
+ smp_mb();
+ return wq && waitqueue_active(&wq->wait);
}
/**
@@ -1442,7 +1442,7 @@ static inline int sk_has_sleeper(struct sock *sk)
* @wait_address: socket wait queue
* @p: poll_table
*
- * See the comments in the sk_has_sleeper function.
+ * See the comments in the wq_has_sleeper function.
*/
static inline void sock_poll_wait(struct file *filp,
wait_queue_head_t *wait_address, poll_table *p)
@@ -1453,7 +1453,7 @@ static inline void sock_poll_wait(struct file *filp,
* We need to be sure we are in sync with the
* socket flags modification.
*
- * This memory barrier is paired in the sk_has_sleeper.
+ * This memory barrier is paired in the wq_has_sleeper.
*/
smp_mb();
}
diff --git a/net/atm/common.c b/net/atm/common.c
index e3e10e6f8628..b43feb1a3995 100644
--- a/net/atm/common.c
+++ b/net/atm/common.c
@@ -90,10 +90,13 @@ static void vcc_sock_destruct(struct sock *sk)
static void vcc_def_wakeup(struct sock *sk)
{
- read_lock(&sk->sk_callback_lock);
- if (sk_has_sleeper(sk))
- wake_up(sk_sleep(sk));
- read_unlock(&sk->sk_callback_lock);
+ struct socket_wq *wq;
+
+ rcu_read_lock();
+ wq = rcu_dereference(sk->sk_wq);
+ if (wq_has_sleeper(wq))
+ wake_up(&wq->wait);
+ rcu_read_unlock();
}
static inline int vcc_writable(struct sock *sk)
@@ -106,16 +109,19 @@ static inline int vcc_writable(struct sock *sk)
static void vcc_write_space(struct sock *sk)
{
- read_lock(&sk->sk_callback_lock);
+ struct socket_wq *wq;
+
+ rcu_read_lock();
if (vcc_writable(sk)) {
- if (sk_has_sleeper(sk))
- wake_up_interruptible(sk_sleep(sk));
+ wq = rcu_dereference(sk->sk_wq);
+ if (wq_has_sleeper(wq))
+ wake_up_interruptible(&wq->wait);
sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
}
- read_unlock(&sk->sk_callback_lock);
+ rcu_read_unlock();
}
static struct proto vcc_proto = {
diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c
index c1e60eed5a97..864c76f4a678 100644
--- a/net/bluetooth/l2cap.c
+++ b/net/bluetooth/l2cap.c
@@ -1626,7 +1626,10 @@ static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct ms
/* Connectionless channel */
if (sk->sk_type == SOCK_DGRAM) {
skb = l2cap_create_connless_pdu(sk, msg, len);
- err = l2cap_do_send(sk, skb);
+ if (IS_ERR(skb))
+ err = PTR_ERR(skb);
+ else
+ err = l2cap_do_send(sk, skb);
goto done;
}
diff --git a/net/core/dev.c b/net/core/dev.c
index 100dcbd29739..36d53be4fca6 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2205,8 +2205,6 @@ int netdev_max_backlog __read_mostly = 1000;
int netdev_budget __read_mostly = 300;
int weight_p __read_mostly = 64; /* old backlog weight */
-DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, };
-
#ifdef CONFIG_RPS
/* One global table that all flow-based protocols share. */
@@ -2366,7 +2364,7 @@ static void rps_trigger_softirq(void *data)
struct softnet_data *sd = data;
__napi_schedule(&sd->backlog);
- __get_cpu_var(netdev_rx_stat).received_rps++;
+ sd->received_rps++;
}
#endif /* CONFIG_RPS */
@@ -2405,7 +2403,6 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
sd = &per_cpu(softnet_data, cpu);
local_irq_save(flags);
- __get_cpu_var(netdev_rx_stat).total++;
rps_lock(sd);
if (skb_queue_len(&sd->input_pkt_queue) <= netdev_max_backlog) {
@@ -2429,9 +2426,9 @@ enqueue:
goto enqueue;
}
+ sd->dropped++;
rps_unlock(sd);
- __get_cpu_var(netdev_rx_stat).dropped++;
local_irq_restore(flags);
kfree_skb(skb);
@@ -2806,7 +2803,7 @@ static int __netif_receive_skb(struct sk_buff *skb)
skb->dev = master;
}
- __get_cpu_var(netdev_rx_stat).total++;
+ __get_cpu_var(softnet_data).processed++;
skb_reset_network_header(skb);
skb_reset_transport_header(skb);
@@ -3490,7 +3487,7 @@ out:
return;
softnet_break:
- __get_cpu_var(netdev_rx_stat).time_squeeze++;
+ sd->time_squeeze++;
__raise_softirq_irqoff(NET_RX_SOFTIRQ);
goto out;
}
@@ -3691,17 +3688,17 @@ static int dev_seq_show(struct seq_file *seq, void *v)
return 0;
}
-static struct netif_rx_stats *softnet_get_online(loff_t *pos)
+static struct softnet_data *softnet_get_online(loff_t *pos)
{
- struct netif_rx_stats *rc = NULL;
+ struct softnet_data *sd = NULL;
while (*pos < nr_cpu_ids)
if (cpu_online(*pos)) {
- rc = &per_cpu(netdev_rx_stat, *pos);
+ sd = &per_cpu(softnet_data, *pos);
break;
} else
++*pos;
- return rc;
+ return sd;
}
static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
@@ -3721,12 +3718,12 @@ static void softnet_seq_stop(struct seq_file *seq, void *v)
static int softnet_seq_show(struct seq_file *seq, void *v)
{
- struct netif_rx_stats *s = v;
+ struct softnet_data *sd = v;
seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
- s->total, s->dropped, s->time_squeeze, 0,
+ sd->processed, sd->dropped, sd->time_squeeze, 0,
0, 0, 0, 0, /* was fastroute */
- s->cpu_collision, s->received_rps);
+ sd->cpu_collision, sd->received_rps);
return 0;
}
@@ -5869,6 +5866,7 @@ static int __init net_dev_init(void)
for_each_possible_cpu(i) {
struct softnet_data *sd = &per_cpu(softnet_data, i);
+ memset(sd, 0, sizeof(*sd));
skb_queue_head_init(&sd->input_pkt_queue);
skb_queue_head_init(&sd->process_queue);
sd->completion_queue = NULL;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 4218ff49bf13..8b9c109166a7 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -1051,7 +1051,7 @@ EXPORT_SYMBOL(skb_push);
*/
unsigned char *skb_pull(struct sk_buff *skb, unsigned int len)
{
- return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
+ return skb_pull_inline(skb, len);
}
EXPORT_SYMBOL(skb_pull);
diff --git a/net/core/sock.c b/net/core/sock.c
index 51041759517e..94c4affdda9b 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1211,7 +1211,7 @@ struct sock *sk_clone(const struct sock *sk, const gfp_t priority)
*/
sk_refcnt_debug_inc(newsk);
sk_set_socket(newsk, NULL);
- newsk->sk_sleep = NULL;
+ newsk->sk_wq = NULL;
if (newsk->sk_prot->sockets_allocated)
percpu_counter_inc(newsk->sk_prot->sockets_allocated);
@@ -1800,41 +1800,53 @@ EXPORT_SYMBOL(sock_no_sendpage);
static void sock_def_wakeup(struct sock *sk)
{
- read_lock(&sk->sk_callback_lock);
- if (sk_has_sleeper(sk))
- wake_up_interruptible_all(sk_sleep(sk));
- read_unlock(&sk->sk_callback_lock);
+ struct socket_wq *wq;
+
+ rcu_read_lock();
+ wq = rcu_dereference(sk->sk_wq);
+ if (wq_has_sleeper(wq))
+ wake_up_interruptible_all(&wq->wait);
+ rcu_read_unlock();
}
static void sock_def_error_report(struct sock *sk)
{
- read_lock(&sk->sk_callback_lock);
- if (sk_has_sleeper(sk))
- wake_up_interruptible_poll(sk_sleep(sk), POLLERR);
+ struct socket_wq *wq;
+
+ rcu_read_lock();
+ wq = rcu_dereference(sk->sk_wq);
+ if (wq_has_sleeper(wq))
+ wake_up_interruptible_poll(&wq->wait, POLLERR);
sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
- read_unlock(&sk->sk_callback_lock);
+ rcu_read_unlock();
}
static void sock_def_readable(struct sock *sk, int len)
{
- read_lock(&sk->sk_callback_lock);
- if (sk_has_sleeper(sk))
- wake_up_interruptible_sync_poll(sk_sleep(sk), POLLIN |
+ struct socket_wq *wq;
+
+ rcu_read_lock();
+ wq = rcu_dereference(sk->sk_wq);
+ if (wq_has_sleeper(wq))
+ wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
POLLRDNORM | POLLRDBAND);
sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
- read_unlock(&sk->sk_callback_lock);
+ rcu_read_unlock();
}
static void sock_def_write_space(struct sock *sk)
{
- read_lock(&sk->sk_callback_lock);
+ struct socket_wq *wq;
+
+ rcu_read_lock();
/* Do not wake up a writer until he can make "significant"
* progress. --DaveM
*/
if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
- if (sk_has_sleeper(sk))
- wake_up_interruptible_sync_poll(sk_sleep(sk), POLLOUT |
+ wq = rcu_dereference(sk->sk_wq);
+ if (wq_has_sleeper(wq))
+ wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
POLLWRNORM | POLLWRBAND);
/* Should agree with poll, otherwise some programs break */
@@ -1842,7 +1854,7 @@ static void sock_def_write_space(struct sock *sk)
sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
}
- read_unlock(&sk->sk_callback_lock);
+ rcu_read_unlock();
}
static void sock_def_destruct(struct sock *sk)
@@ -1896,10 +1908,10 @@ void sock_init_data(struct socket *sock, struct sock *sk)
if (sock) {
sk->sk_type = sock->type;
- sk->sk_sleep = &sock->wait;
+ sk->sk_wq = sock->wq;
sock->sk = sk;
} else
- sk->sk_sleep = NULL;
+ sk->sk_wq = NULL;
spin_lock_init(&sk->sk_dst_lock);
rwlock_init(&sk->sk_callback_lock);
diff --git a/net/core/stream.c b/net/core/stream.c
index 7b3c3f30b107..cc196f42b8d8 100644
--- a/net/core/stream.c
+++ b/net/core/stream.c
@@ -28,15 +28,19 @@
void sk_stream_write_space(struct sock *sk)
{
struct socket *sock = sk->sk_socket;
+ struct socket_wq *wq;
if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk) && sock) {
clear_bit(SOCK_NOSPACE, &sock->flags);
- if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk)))
- wake_up_interruptible_poll(sk_sleep(sk), POLLOUT |
+ rcu_read_lock();
+ wq = rcu_dereference(sk->sk_wq);
+ if (wq_has_sleeper(wq))
+ wake_up_interruptible_poll(&wq->wait, POLLOUT |
POLLWRNORM | POLLWRBAND);
- if (sock->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN))
+ if (wq && wq->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN))
sock_wake_async(sock, SOCK_WAKE_SPACE, POLL_OUT);
+ rcu_read_unlock();
}
}
diff --git a/net/dccp/output.c b/net/dccp/output.c
index 2d3dcb39851f..aadbdb58758b 100644
--- a/net/dccp/output.c
+++ b/net/dccp/output.c
@@ -195,15 +195,17 @@ EXPORT_SYMBOL_GPL(dccp_sync_mss);
void dccp_write_space(struct sock *sk)
{
- read_lock(&sk->sk_callback_lock);
+ struct socket_wq *wq;
- if (sk_has_sleeper(sk))
- wake_up_interruptible(sk_sleep(sk));
+ rcu_read_lock();
+ wq = rcu_dereference(sk->sk_wq);
+ if (wq_has_sleeper(wq))
+ wake_up_interruptible(&wq->wait);
/* Should agree with poll, otherwise some programs break */
if (sock_writeable(sk))
sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
- read_unlock(&sk->sk_callback_lock);
+ rcu_read_unlock();
}
/**
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index 0c0d272a9888..61ec0329316c 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -162,7 +162,7 @@ __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev)
skb->dev = dev;
skb_reset_mac_header(skb);
- skb_pull(skb, ETH_HLEN);
+ skb_pull_inline(skb, ETH_HLEN);
eth = eth_hdr(skb);
if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 78cbc39f56c4..e0a3e3537b14 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -70,17 +70,13 @@ int inet_csk_bind_conflict(const struct sock *sk,
(!sk->sk_bound_dev_if ||
!sk2->sk_bound_dev_if ||
sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
- const __be32 sk2_rcv_saddr = inet_rcv_saddr(sk2);
-
if (!reuse || !sk2->sk_reuse ||
sk2->sk_state == TCP_LISTEN) {
+ const __be32 sk2_rcv_saddr = inet_rcv_saddr(sk2);
if (!sk2_rcv_saddr || !sk_rcv_saddr ||
sk2_rcv_saddr == sk_rcv_saddr)
break;
- } else if (reuse && sk2->sk_reuse &&
- sk2_rcv_saddr &&
- sk2_rcv_saddr == sk_rcv_saddr)
- break;
+ }
}
}
return node != NULL;
@@ -124,11 +120,9 @@ again:
smallest_size = tb->num_owners;
smallest_rover = rover;
if (atomic_read(&hashinfo->bsockets) > (high - low) + 1) {
- if (!inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb)) {
- spin_unlock(&head->lock);
- snum = smallest_rover;
- goto have_snum;
- }
+ spin_unlock(&head->lock);
+ snum = smallest_rover;
+ goto have_snum;
}
}
goto next;
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 34d2d649e396..3984f52181f4 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -1346,7 +1346,7 @@ struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net, const struct in6_addr *add
struct hlist_node *node;
rcu_read_lock_bh();
- hlist_for_each_entry_rcu(ifp, node, &inet6_addr_lst[hash], addr_lst) {
+ hlist_for_each_entry_rcu_bh(ifp, node, &inet6_addr_lst[hash], addr_lst) {
if (!net_eq(dev_net(ifp->idev->dev), net))
continue;
if (ipv6_addr_equal(&ifp->addr, addr)) {
@@ -2959,7 +2959,7 @@ static struct inet6_ifaddr *if6_get_first(struct seq_file *seq)
for (state->bucket = 0; state->bucket < IN6_ADDR_HSIZE; ++state->bucket) {
struct hlist_node *n;
- hlist_for_each_entry_rcu(ifa, n, &inet6_addr_lst[state->bucket],
+ hlist_for_each_entry_rcu_bh(ifa, n, &inet6_addr_lst[state->bucket],
addr_lst)
if (net_eq(dev_net(ifa->idev->dev), net))
return ifa;
@@ -2974,12 +2974,12 @@ static struct inet6_ifaddr *if6_get_next(struct seq_file *seq,
struct net *net = seq_file_net(seq);
struct hlist_node *n = &ifa->addr_lst;
- hlist_for_each_entry_continue_rcu(ifa, n, addr_lst)
+ hlist_for_each_entry_continue_rcu_bh(ifa, n, addr_lst)
if (net_eq(dev_net(ifa->idev->dev), net))
return ifa;
while (++state->bucket < IN6_ADDR_HSIZE) {
- hlist_for_each_entry(ifa, n,
+ hlist_for_each_entry_rcu_bh(ifa, n,
&inet6_addr_lst[state->bucket], addr_lst) {
if (net_eq(dev_net(ifa->idev->dev), net))
return ifa;
@@ -3000,7 +3000,7 @@ static struct inet6_ifaddr *if6_get_idx(struct seq_file *seq, loff_t pos)
}
static void *if6_seq_start(struct seq_file *seq, loff_t *pos)
- __acquires(rcu)
+ __acquires(rcu_bh)
{
rcu_read_lock_bh();
return if6_get_idx(seq, *pos);
@@ -3016,7 +3016,7 @@ static void *if6_seq_next(struct seq_file *seq, void *v, loff_t *pos)
}
static void if6_seq_stop(struct seq_file *seq, void *v)
- __releases(rcu)
+ __releases(rcu_bh)
{
rcu_read_unlock_bh();
}
@@ -3093,7 +3093,7 @@ int ipv6_chk_home_addr(struct net *net, struct in6_addr *addr)
unsigned int hash = ipv6_addr_hash(addr);
rcu_read_lock_bh();
- hlist_for_each_entry_rcu(ifp, n, &inet6_addr_lst[hash], addr_lst) {
+ hlist_for_each_entry_rcu_bh(ifp, n, &inet6_addr_lst[hash], addr_lst) {
if (!net_eq(dev_net(ifp->idev->dev), net))
continue;
if (ipv6_addr_equal(&ifp->addr, addr) &&
@@ -3127,7 +3127,7 @@ static void addrconf_verify(unsigned long foo)
for (i = 0; i < IN6_ADDR_HSIZE; i++) {
restart:
- hlist_for_each_entry_rcu(ifp, node,
+ hlist_for_each_entry_rcu_bh(ifp, node,
&inet6_addr_lst[i], addr_lst) {
unsigned long age;
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index 9ca1efc923a1..0c5e3c3b7fd5 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -42,16 +42,11 @@ int inet6_csk_bind_conflict(const struct sock *sk,
if (sk != sk2 &&
(!sk->sk_bound_dev_if ||
!sk2->sk_bound_dev_if ||
- sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
- if ((!sk->sk_reuse || !sk2->sk_reuse ||
- sk2->sk_state == TCP_LISTEN) &&
- ipv6_rcv_saddr_equal(sk, sk2))
- break;
- else if (sk->sk_reuse && sk2->sk_reuse &&
- !ipv6_addr_any(inet6_rcv_saddr(sk)) &&
- ipv6_rcv_saddr_equal(sk, sk2))
- break;
- }
+ sk->sk_bound_dev_if == sk2->sk_bound_dev_if) &&
+ (!sk->sk_reuse || !sk2->sk_reuse ||
+ sk2->sk_state == TCP_LISTEN) &&
+ ipv6_rcv_saddr_equal(sk, sk2))
+ break;
}
return node != NULL;
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index 9636b7d27b48..8be324fe08b9 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -305,11 +305,14 @@ static inline int iucv_below_msglim(struct sock *sk)
*/
static void iucv_sock_wake_msglim(struct sock *sk)
{
- read_lock(&sk->sk_callback_lock);
- if (sk_has_sleeper(sk))
- wake_up_interruptible_all(sk_sleep(sk));
+ struct socket_wq *wq;
+
+ rcu_read_lock();
+ wq = rcu_dereference(sk->sk_wq);
+ if (wq_has_sleeper(wq))
+ wake_up_interruptible_all(&wq->wait);
sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
- read_unlock(&sk->sk_callback_lock);
+ rcu_read_unlock();
}
/* Timers */
diff --git a/net/phonet/pep.c b/net/phonet/pep.c
index e2a95762abd3..af4d38bc3b22 100644
--- a/net/phonet/pep.c
+++ b/net/phonet/pep.c
@@ -664,12 +664,12 @@ static int pep_wait_connreq(struct sock *sk, int noblock)
if (signal_pending(tsk))
return sock_intr_errno(timeo);
- prepare_to_wait_exclusive(&sk->sk_socket->wait, &wait,
+ prepare_to_wait_exclusive(sk_sleep(sk), &wait,
TASK_INTERRUPTIBLE);
release_sock(sk);
timeo = schedule_timeout(timeo);
lock_sock(sk);
- finish_wait(&sk->sk_socket->wait, &wait);
+ finish_wait(sk_sleep(sk), &wait);
}
return 0;
@@ -910,10 +910,10 @@ disabled:
goto out;
}
- prepare_to_wait(&sk->sk_socket->wait, &wait,
+ prepare_to_wait(sk_sleep(sk), &wait,
TASK_INTERRUPTIBLE);
done = sk_wait_event(sk, &timeo, atomic_read(&pn->tx_credits));
- finish_wait(&sk->sk_socket->wait, &wait);
+ finish_wait(sk_sleep(sk), &wait);
if (sk->sk_state != TCP_ESTABLISHED)
goto disabled;
diff --git a/net/phonet/socket.c b/net/phonet/socket.c
index c785bfd0744f..6e9848bf0370 100644
--- a/net/phonet/socket.c
+++ b/net/phonet/socket.c
@@ -265,7 +265,7 @@ static unsigned int pn_socket_poll(struct file *file, struct socket *sock,
struct pep_sock *pn = pep_sk(sk);
unsigned int mask = 0;
- poll_wait(file, &sock->wait, wait);
+ poll_wait(file, sk_sleep(sk), wait);
switch (sk->sk_state) {
case TCP_LISTEN:
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index c432d76f415e..0b9bb2085ce4 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -62,13 +62,15 @@ static inline int rxrpc_writable(struct sock *sk)
static void rxrpc_write_space(struct sock *sk)
{
_enter("%p", sk);
- read_lock(&sk->sk_callback_lock);
+ rcu_read_lock();
if (rxrpc_writable(sk)) {
- if (sk_has_sleeper(sk))
- wake_up_interruptible(sk_sleep(sk));
+ struct socket_wq *wq = rcu_dereference(sk->sk_wq);
+
+ if (wq_has_sleeper(wq))
+ wake_up_interruptible(&wq->wait);
sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
}
- read_unlock(&sk->sk_callback_lock);
+ rcu_read_unlock();
}
/*
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index aeddabfb8e4e..a969b111bd76 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -94,7 +94,7 @@ static inline int handle_dev_cpu_collision(struct sk_buff *skb,
* Another cpu is holding lock, requeue & delay xmits for
* some time.
*/
- __get_cpu_var(netdev_rx_stat).cpu_collision++;
+ __get_cpu_var(softnet_data).cpu_collision++;
ret = dev_requeue_skb(skb, q);
}
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 65f9a7cdf466..3912420cedcc 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -1192,8 +1192,10 @@ void sctp_assoc_update(struct sctp_association *asoc,
/* Remove any peer addresses not present in the new association. */
list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
trans = list_entry(pos, struct sctp_transport, transports);
- if (!sctp_assoc_lookup_paddr(new, &trans->ipaddr))
- sctp_assoc_del_peer(asoc, &trans->ipaddr);
+ if (!sctp_assoc_lookup_paddr(new, &trans->ipaddr)) {
+ sctp_assoc_rm_peer(asoc, trans);
+ continue;
+ }
if (asoc->state >= SCTP_STATE_ESTABLISHED)
sctp_transport_reset(trans);
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
index 2f8763bae9ed..e10acc01c75f 100644
--- a/net/sctp/endpointola.c
+++ b/net/sctp/endpointola.c
@@ -142,6 +142,7 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
/* Use SCTP specific send buffer space queues. */
ep->sndbuf_policy = sctp_sndbuf_policy;
+ sk->sk_data_ready = sctp_data_ready;
sk->sk_write_space = sctp_write_space;
sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
diff --git a/net/sctp/probe.c b/net/sctp/probe.c
index 8f025d5831aa..db3a42b8b349 100644
--- a/net/sctp/probe.c
+++ b/net/sctp/probe.c
@@ -27,6 +27,7 @@
#include <linux/socket.h>
#include <linux/sctp.h>
#include <linux/proc_fs.h>
+#include <linux/vmalloc.h>
#include <linux/module.h>
#include <linux/kfifo.h>
#include <linux/time.h>
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 24effdf471eb..d8261f3d7715 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -108,7 +108,7 @@ static const struct sctp_paramhdr prsctp_param = {
cpu_to_be16(sizeof(struct sctp_paramhdr)),
};
-/* A helper to initialize to initialize an op error inside a
+/* A helper to initialize an op error inside a
* provided chunk, as most cause codes will be embedded inside an
* abort chunk.
*/
@@ -125,6 +125,29 @@ void sctp_init_cause(struct sctp_chunk *chunk, __be16 cause_code,
chunk->subh.err_hdr = sctp_addto_chunk(chunk, sizeof(sctp_errhdr_t), &err);
}
+/* A helper to initialize an op error inside a
+ * provided chunk, as most cause codes will be embedded inside an
+ * abort chunk. Differs from sctp_init_cause in that it won't oops
+ * if there isn't enough space in the op error chunk
+ */
+int sctp_init_cause_fixed(struct sctp_chunk *chunk, __be16 cause_code,
+ size_t paylen)
+{
+ sctp_errhdr_t err;
+ __u16 len;
+
+ /* Cause code constants are now defined in network order. */
+ err.cause = cause_code;
+ len = sizeof(sctp_errhdr_t) + paylen;
+ err.length = htons(len);
+
+ if (skb_tailroom(chunk->skb) > len)
+ return -ENOSPC;
+ chunk->subh.err_hdr = sctp_addto_chunk_fixed(chunk,
+ sizeof(sctp_errhdr_t),
+ &err);
+ return 0;
+}
/* 3.3.2 Initiation (INIT) (1)
*
* This chunk is used to initiate a SCTP association between two
@@ -208,7 +231,8 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc,
sp = sctp_sk(asoc->base.sk);
num_types = sp->pf->supported_addrs(sp, types);
- chunksize = sizeof(init) + addrs_len + SCTP_SAT_LEN(num_types);
+ chunksize = sizeof(init) + addrs_len;
+ chunksize += WORD_ROUND(SCTP_SAT_LEN(num_types));
chunksize += sizeof(ecap_param);
if (sctp_prsctp_enable)
@@ -238,14 +262,14 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc,
/* Add HMACS parameter length if any were defined */
auth_hmacs = (sctp_paramhdr_t *)asoc->c.auth_hmacs;
if (auth_hmacs->length)
- chunksize += ntohs(auth_hmacs->length);
+ chunksize += WORD_ROUND(ntohs(auth_hmacs->length));
else
auth_hmacs = NULL;
/* Add CHUNKS parameter length */
auth_chunks = (sctp_paramhdr_t *)asoc->c.auth_chunks;
if (auth_chunks->length)
- chunksize += ntohs(auth_chunks->length);
+ chunksize += WORD_ROUND(ntohs(auth_chunks->length));
else
auth_chunks = NULL;
@@ -255,7 +279,8 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc,
/* If we have any extensions to report, account for that */
if (num_ext)
- chunksize += sizeof(sctp_supported_ext_param_t) + num_ext;
+ chunksize += WORD_ROUND(sizeof(sctp_supported_ext_param_t) +
+ num_ext);
/* RFC 2960 3.3.2 Initiation (INIT) (1)
*
@@ -397,13 +422,13 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc,
auth_hmacs = (sctp_paramhdr_t *)asoc->c.auth_hmacs;
if (auth_hmacs->length)
- chunksize += ntohs(auth_hmacs->length);
+ chunksize += WORD_ROUND(ntohs(auth_hmacs->length));
else
auth_hmacs = NULL;
auth_chunks = (sctp_paramhdr_t *)asoc->c.auth_chunks;
if (auth_chunks->length)
- chunksize += ntohs(auth_chunks->length);
+ chunksize += WORD_ROUND(ntohs(auth_chunks->length));
else
auth_chunks = NULL;
@@ -412,7 +437,8 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc,
}
if (num_ext)
- chunksize += sizeof(sctp_supported_ext_param_t) + num_ext;
+ chunksize += WORD_ROUND(sizeof(sctp_supported_ext_param_t) +
+ num_ext);
/* Now allocate and fill out the chunk. */
retval = sctp_make_chunk(asoc, SCTP_CID_INIT_ACK, 0, chunksize);
@@ -1124,6 +1150,24 @@ nodata:
return retval;
}
+/* Create an Operation Error chunk of a fixed size,
+ * specifically, max(asoc->pathmtu, SCTP_DEFAULT_MAXSEGMENT)
+ * This is a helper function to allocate an error chunk for
+ * for those invalid parameter codes in which we may not want
+ * to report all the errors, if the incomming chunk is large
+ */
+static inline struct sctp_chunk *sctp_make_op_error_fixed(
+ const struct sctp_association *asoc,
+ const struct sctp_chunk *chunk)
+{
+ size_t size = asoc ? asoc->pathmtu : 0;
+
+ if (!size)
+ size = SCTP_DEFAULT_MAXSEGMENT;
+
+ return sctp_make_op_error_space(asoc, chunk, size);
+}
+
/* Create an Operation Error chunk. */
struct sctp_chunk *sctp_make_op_error(const struct sctp_association *asoc,
const struct sctp_chunk *chunk,
@@ -1365,6 +1409,18 @@ void *sctp_addto_chunk(struct sctp_chunk *chunk, int len, const void *data)
return target;
}
+/* Append bytes to the end of a chunk. Returns NULL if there isn't sufficient
+ * space in the chunk
+ */
+void *sctp_addto_chunk_fixed(struct sctp_chunk *chunk,
+ int len, const void *data)
+{
+ if (skb_tailroom(chunk->skb) > len)
+ return sctp_addto_chunk(chunk, len, data);
+ else
+ return NULL;
+}
+
/* Append bytes from user space to the end of a chunk. Will panic if
* chunk is not big enough.
* Returns a kernel err value.
@@ -1968,13 +2024,12 @@ static sctp_ierror_t sctp_process_unk_param(const struct sctp_association *asoc,
* returning multiple unknown parameters.
*/
if (NULL == *errp)
- *errp = sctp_make_op_error_space(asoc, chunk,
- ntohs(chunk->chunk_hdr->length));
+ *errp = sctp_make_op_error_fixed(asoc, chunk);
if (*errp) {
- sctp_init_cause(*errp, SCTP_ERROR_UNKNOWN_PARAM,
+ sctp_init_cause_fixed(*errp, SCTP_ERROR_UNKNOWN_PARAM,
WORD_ROUND(ntohs(param.p->length)));
- sctp_addto_chunk(*errp,
+ sctp_addto_chunk_fixed(*errp,
WORD_ROUND(ntohs(param.p->length)),
param.v);
} else {
@@ -3309,21 +3364,6 @@ int sctp_process_asconf_ack(struct sctp_association *asoc,
sctp_chunk_free(asconf);
asoc->addip_last_asconf = NULL;
- /* Send the next asconf chunk from the addip chunk queue. */
- if (!list_empty(&asoc->addip_chunk_list)) {
- struct list_head *entry = asoc->addip_chunk_list.next;
- asconf = list_entry(entry, struct sctp_chunk, list);
-
- list_del_init(entry);
-
- /* Hold the chunk until an ASCONF_ACK is received. */
- sctp_chunk_hold(asconf);
- if (sctp_primitive_ASCONF(asoc, asconf))
- sctp_chunk_free(asconf);
- else
- asoc->addip_last_asconf = asconf;
- }
-
return retval;
}
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index 49fb9acece63..3b7230ef77c2 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -966,6 +966,29 @@ static int sctp_cmd_send_msg(struct sctp_association *asoc,
}
+/* Sent the next ASCONF packet currently stored in the association.
+ * This happens after the ASCONF_ACK was succeffully processed.
+ */
+static void sctp_cmd_send_asconf(struct sctp_association *asoc)
+{
+ /* Send the next asconf chunk from the addip chunk
+ * queue.
+ */
+ if (!list_empty(&asoc->addip_chunk_list)) {
+ struct list_head *entry = asoc->addip_chunk_list.next;
+ struct sctp_chunk *asconf = list_entry(entry,
+ struct sctp_chunk, list);
+ list_del_init(entry);
+
+ /* Hold the chunk until an ASCONF_ACK is received. */
+ sctp_chunk_hold(asconf);
+ if (sctp_primitive_ASCONF(asoc, asconf))
+ sctp_chunk_free(asconf);
+ else
+ asoc->addip_last_asconf = asconf;
+ }
+}
+
/* These three macros allow us to pull the debugging code out of the
* main flow of sctp_do_sm() to keep attention focused on the real
@@ -1621,6 +1644,9 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
}
error = sctp_cmd_send_msg(asoc, cmd->obj.msg);
break;
+ case SCTP_CMD_SEND_NEXT_ASCONF:
+ sctp_cmd_send_asconf(asoc);
+ break;
default:
printk(KERN_WARNING "Impossible command: %u, %p\n",
cmd->verb, cmd->obj.ptr);
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index abf601a1b847..24b2cd555637 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -3676,8 +3676,14 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep,
SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
if (!sctp_process_asconf_ack((struct sctp_association *)asoc,
- asconf_ack))
+ asconf_ack)) {
+ /* Successfully processed ASCONF_ACK. We can
+ * release the next asconf if we have one.
+ */
+ sctp_add_cmd_sf(commands, SCTP_CMD_SEND_NEXT_ASCONF,
+ SCTP_NULL());
return SCTP_DISPOSITION_CONSUME;
+ }
abort = sctp_make_abort(asoc, asconf_ack,
sizeof(sctp_errhdr_t));
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 1282a0ed855e..ba1add0b13c3 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -3719,9 +3719,9 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk)
sp->hmac = NULL;
SCTP_DBG_OBJCNT_INC(sock);
- percpu_counter_inc(&sctp_sockets_allocated);
local_bh_disable();
+ percpu_counter_inc(&sctp_sockets_allocated);
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
local_bh_enable();
@@ -3738,8 +3738,8 @@ SCTP_STATIC void sctp_destroy_sock(struct sock *sk)
/* Release our hold on the endpoint. */
ep = sctp_sk(sk)->ep;
sctp_endpoint_free(ep);
- percpu_counter_dec(&sctp_sockets_allocated);
local_bh_disable();
+ percpu_counter_dec(&sctp_sockets_allocated);
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
local_bh_enable();
}
@@ -6065,7 +6065,7 @@ static void __sctp_write_space(struct sctp_association *asoc)
* here by modeling from the current TCP/UDP code.
* We have not tested with it yet.
*/
- if (sock->fasync_list &&
+ if (sock->wq->fasync_list &&
!(sk->sk_shutdown & SEND_SHUTDOWN))
sock_wake_async(sock,
SOCK_WAKE_SPACE, POLL_OUT);
@@ -6185,6 +6185,19 @@ do_nonblock:
goto out;
}
+void sctp_data_ready(struct sock *sk, int len)
+{
+ struct socket_wq *wq;
+
+ rcu_read_lock();
+ wq = rcu_dereference(sk->sk_wq);
+ if (wq_has_sleeper(wq))
+ wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
+ POLLRDNORM | POLLRDBAND);
+ sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
+ rcu_read_unlock();
+}
+
/* If socket sndbuf has changed, wake up all per association waiters. */
void sctp_write_space(struct sock *sk)
{
diff --git a/net/socket.c b/net/socket.c
index cb7c1f6c0d6e..dae8c6b84a09 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -252,9 +252,14 @@ static struct inode *sock_alloc_inode(struct super_block *sb)
ei = kmem_cache_alloc(sock_inode_cachep, GFP_KERNEL);
if (!ei)
return NULL;
- init_waitqueue_head(&ei->socket.wait);
+ ei->socket.wq = kmalloc(sizeof(struct socket_wq), GFP_KERNEL);
+ if (!ei->socket.wq) {
+ kmem_cache_free(sock_inode_cachep, ei);
+ return NULL;
+ }
+ init_waitqueue_head(&ei->socket.wq->wait);
+ ei->socket.wq->fasync_list = NULL;
- ei->socket.fasync_list = NULL;
ei->socket.state = SS_UNCONNECTED;
ei->socket.flags = 0;
ei->socket.ops = NULL;
@@ -264,10 +269,21 @@ static struct inode *sock_alloc_inode(struct super_block *sb)
return &ei->vfs_inode;
}
+
+static void wq_free_rcu(struct rcu_head *head)
+{
+ struct socket_wq *wq = container_of(head, struct socket_wq, rcu);
+
+ kfree(wq);
+}
+
static void sock_destroy_inode(struct inode *inode)
{
- kmem_cache_free(sock_inode_cachep,
- container_of(inode, struct socket_alloc, vfs_inode));
+ struct socket_alloc *ei;
+
+ ei = container_of(inode, struct socket_alloc, vfs_inode);
+ call_rcu(&ei->socket.wq->rcu, wq_free_rcu);
+ kmem_cache_free(sock_inode_cachep, ei);
}
static void init_once(void *foo)
@@ -513,7 +529,7 @@ void sock_release(struct socket *sock)
module_put(owner);
}
- if (sock->fasync_list)
+ if (sock->wq->fasync_list)
printk(KERN_ERR "sock_release: fasync list not empty!\n");
percpu_sub(sockets_in_use, 1);
@@ -1080,9 +1096,9 @@ static int sock_fasync(int fd, struct file *filp, int on)
lock_sock(sk);
- fasync_helper(fd, filp, on, &sock->fasync_list);
+ fasync_helper(fd, filp, on, &sock->wq->fasync_list);
- if (!sock->fasync_list)
+ if (!sock->wq->fasync_list)
sock_reset_flag(sk, SOCK_FASYNC);
else
sock_set_flag(sk, SOCK_FASYNC);
@@ -1091,12 +1107,20 @@ static int sock_fasync(int fd, struct file *filp, int on)
return 0;
}
-/* This function may be called only under socket lock or callback_lock */
+/* This function may be called only under socket lock or callback_lock or rcu_lock */
int sock_wake_async(struct socket *sock, int how, int band)
{
- if (!sock || !sock->fasync_list)
+ struct socket_wq *wq;
+
+ if (!sock)
return -1;
+ rcu_read_lock();
+ wq = rcu_dereference(sock->wq);
+ if (!wq || !wq->fasync_list) {
+ rcu_read_unlock();
+ return -1;
+ }
switch (how) {
case SOCK_WAKE_WAITD:
if (test_bit(SOCK_ASYNC_WAITDATA, &sock->flags))
@@ -1108,11 +1132,12 @@ int sock_wake_async(struct socket *sock, int how, int band)
/* fall through */
case SOCK_WAKE_IO:
call_kill:
- kill_fasync(&sock->fasync_list, SIGIO, band);
+ kill_fasync(&wq->fasync_list, SIGIO, band);
break;
case SOCK_WAKE_URG:
- kill_fasync(&sock->fasync_list, SIGURG, band);
+ kill_fasync(&wq->fasync_list, SIGURG, band);
}
+ rcu_read_unlock();
return 0;
}
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 87c0360eaa25..fef2cc5e9d2b 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -313,13 +313,16 @@ static inline int unix_writable(struct sock *sk)
static void unix_write_space(struct sock *sk)
{
- read_lock(&sk->sk_callback_lock);
+ struct socket_wq *wq;
+
+ rcu_read_lock();
if (unix_writable(sk)) {
- if (sk_has_sleeper(sk))
- wake_up_interruptible_sync(sk_sleep(sk));
+ wq = rcu_dereference(sk->sk_wq);
+ if (wq_has_sleeper(wq))
+ wake_up_interruptible_sync(&wq->wait);
sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
}
- read_unlock(&sk->sk_callback_lock);
+ rcu_read_unlock();
}
/* When dgram socket disconnects (or changes its peer), we clear its receive
@@ -406,9 +409,7 @@ static int unix_release_sock(struct sock *sk, int embrion)
skpair->sk_err = ECONNRESET;
unix_state_unlock(skpair);
skpair->sk_state_change(skpair);
- read_lock(&skpair->sk_callback_lock);
sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP);
- read_unlock(&skpair->sk_callback_lock);
}
sock_put(skpair); /* It may now die */
unix_peer(sk) = NULL;
@@ -1142,7 +1143,7 @@ restart:
newsk->sk_peercred.pid = task_tgid_vnr(current);
current_euid_egid(&newsk->sk_peercred.uid, &newsk->sk_peercred.gid);
newu = unix_sk(newsk);
- newsk->sk_sleep = &newu->peer_wait;
+ newsk->sk_wq = &newu->peer_wq;
otheru = unix_sk(other);
/* copy address information from listening to new sock*/
@@ -1931,12 +1932,10 @@ static int unix_shutdown(struct socket *sock, int mode)
other->sk_shutdown |= peer_mode;
unix_state_unlock(other);
other->sk_state_change(other);
- read_lock(&other->sk_callback_lock);
if (peer_mode == SHUTDOWN_MASK)
sk_wake_async(other, SOCK_WAKE_WAITD, POLL_HUP);
else if (peer_mode & RCV_SHUTDOWN)
sk_wake_async(other, SOCK_WAKE_WAITD, POLL_IN);
- read_unlock(&other->sk_callback_lock);
}
if (other)
sock_put(other);
diff --git a/net/unix/garbage.c b/net/unix/garbage.c
index 14c22c3768da..c8df6fda0b1f 100644
--- a/net/unix/garbage.c
+++ b/net/unix/garbage.c
@@ -153,15 +153,6 @@ void unix_notinflight(struct file *fp)
}
}
-static inline struct sk_buff *sock_queue_head(struct sock *sk)
-{
- return (struct sk_buff *)&sk->sk_receive_queue;
-}
-
-#define receive_queue_for_each_skb(sk, next, skb) \
- for (skb = sock_queue_head(sk)->next, next = skb->next; \
- skb != sock_queue_head(sk); skb = next, next = skb->next)
-
static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *),
struct sk_buff_head *hitlist)
{
@@ -169,7 +160,7 @@ static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *),
struct sk_buff *next;
spin_lock(&x->sk_receive_queue.lock);
- receive_queue_for_each_skb(x, next, skb) {
+ skb_queue_walk_safe(&x->sk_receive_queue, skb, next) {
/*
* Do we have file descriptors ?
*/
@@ -225,7 +216,7 @@ static void scan_children(struct sock *x, void (*func)(struct unix_sock *),
* and perform a scan on them as well.
*/
spin_lock(&x->sk_receive_queue.lock);
- receive_queue_for_each_skb(x, next, skb) {
+ skb_queue_walk_safe(&x->sk_receive_queue, skb, next) {
u = unix_sk(skb->sk);
/*