summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/arm/net/bpf_jit_32.c1
-rw-r--r--arch/powerpc/net/bpf_jit_comp.c1
-rw-r--r--arch/s390/net/bpf_jit_comp.c4
-rw-r--r--arch/sparc/net/bpf_jit_comp.c1
-rw-r--r--arch/x86/net/bpf_jit_comp.c18
-rw-r--r--drivers/net/can/dev.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c41
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h4
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.c22
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c5
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c4
-rw-r--r--drivers/net/ethernet/ti/cpsw.c10
-rw-r--r--drivers/net/ieee802154/mrf24j40.c31
-rw-r--r--drivers/net/tun.c8
-rw-r--r--drivers/net/xen-netback/xenbus.c4
-rw-r--r--include/linux/filter.h15
-rw-r--r--include/linux/netdevice.h5
-rw-r--r--include/net/sock.h6
-rw-r--r--net/8021q/vlan_netlink.c2
-rw-r--r--net/core/dev.c3
-rw-r--r--net/core/filter.c8
-rw-r--r--net/core/sock.c1
-rw-r--r--net/ieee802154/6lowpan.c5
-rw-r--r--net/ipv4/route.c2
-rw-r--r--net/ipv6/ip6_gre.c3
-rw-r--r--net/ipv6/ip6_tunnel.c12
-rw-r--r--net/l2tp/l2tp_core.c13
-rw-r--r--net/sched/sch_fq.c22
-rw-r--r--net/sysctl_net.c4
29 files changed, 160 insertions, 105 deletions
diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
index f50d223a0bd3..99b44e0e8d86 100644
--- a/arch/arm/net/bpf_jit_32.c
+++ b/arch/arm/net/bpf_jit_32.c
@@ -930,4 +930,5 @@ void bpf_jit_free(struct sk_filter *fp)
{
if (fp->bpf_func != sk_run_filter)
module_free(NULL, fp->bpf_func);
+ kfree(fp);
}
diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
index bf56e33f8257..2345bdb4d917 100644
--- a/arch/powerpc/net/bpf_jit_comp.c
+++ b/arch/powerpc/net/bpf_jit_comp.c
@@ -691,4 +691,5 @@ void bpf_jit_free(struct sk_filter *fp)
{
if (fp->bpf_func != sk_run_filter)
module_free(NULL, fp->bpf_func);
+ kfree(fp);
}
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index 709239285869..a5df511e27a2 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -881,7 +881,9 @@ void bpf_jit_free(struct sk_filter *fp)
struct bpf_binary_header *header = (void *)addr;
if (fp->bpf_func == sk_run_filter)
- return;
+ goto free_filter;
set_memory_rw(addr, header->pages);
module_free(NULL, header);
+free_filter:
+ kfree(fp);
}
diff --git a/arch/sparc/net/bpf_jit_comp.c b/arch/sparc/net/bpf_jit_comp.c
index 9c7be59e6f5a..218b6b23c378 100644
--- a/arch/sparc/net/bpf_jit_comp.c
+++ b/arch/sparc/net/bpf_jit_comp.c
@@ -808,4 +808,5 @@ void bpf_jit_free(struct sk_filter *fp)
{
if (fp->bpf_func != sk_run_filter)
module_free(NULL, fp->bpf_func);
+ kfree(fp);
}
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 79c216aa0e2b..516593e1ce33 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -772,13 +772,21 @@ out:
return;
}
+static void bpf_jit_free_deferred(struct work_struct *work)
+{
+ struct sk_filter *fp = container_of(work, struct sk_filter, work);
+ unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
+ struct bpf_binary_header *header = (void *)addr;
+
+ set_memory_rw(addr, header->pages);
+ module_free(NULL, header);
+ kfree(fp);
+}
+
void bpf_jit_free(struct sk_filter *fp)
{
if (fp->bpf_func != sk_run_filter) {
- unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
- struct bpf_binary_header *header = (void *)addr;
-
- set_memory_rw(addr, header->pages);
- module_free(NULL, header);
+ INIT_WORK(&fp->work, bpf_jit_free_deferred);
+ schedule_work(&fp->work);
}
}
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index f9cba4123c66..1870c4731a57 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -705,14 +705,14 @@ static size_t can_get_size(const struct net_device *dev)
size_t size;
size = nla_total_size(sizeof(u32)); /* IFLA_CAN_STATE */
- size += sizeof(struct can_ctrlmode); /* IFLA_CAN_CTRLMODE */
+ size += nla_total_size(sizeof(struct can_ctrlmode)); /* IFLA_CAN_CTRLMODE */
size += nla_total_size(sizeof(u32)); /* IFLA_CAN_RESTART_MS */
- size += sizeof(struct can_bittiming); /* IFLA_CAN_BITTIMING */
- size += sizeof(struct can_clock); /* IFLA_CAN_CLOCK */
+ size += nla_total_size(sizeof(struct can_bittiming)); /* IFLA_CAN_BITTIMING */
+ size += nla_total_size(sizeof(struct can_clock)); /* IFLA_CAN_CLOCK */
if (priv->do_get_berr_counter) /* IFLA_CAN_BERR_COUNTER */
- size += sizeof(struct can_berr_counter);
+ size += nla_total_size(sizeof(struct can_berr_counter));
if (priv->bittiming_const) /* IFLA_CAN_BITTIMING_CONST */
- size += sizeof(struct can_bittiming_const);
+ size += nla_total_size(sizeof(struct can_bittiming_const));
return size;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index dec455c8f627..afe2efa69c86 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -70,14 +70,15 @@ static int mlx4_alloc_pages(struct mlx4_en_priv *priv,
put_page(page);
return -ENOMEM;
}
- page_alloc->size = PAGE_SIZE << order;
+ page_alloc->page_size = PAGE_SIZE << order;
page_alloc->page = page;
page_alloc->dma = dma;
- page_alloc->offset = frag_info->frag_align;
+ page_alloc->page_offset = frag_info->frag_align;
/* Not doing get_page() for each frag is a big win
* on asymetric workloads.
*/
- atomic_set(&page->_count, page_alloc->size / frag_info->frag_stride);
+ atomic_set(&page->_count,
+ page_alloc->page_size / frag_info->frag_stride);
return 0;
}
@@ -96,16 +97,19 @@ static int mlx4_en_alloc_frags(struct mlx4_en_priv *priv,
for (i = 0; i < priv->num_frags; i++) {
frag_info = &priv->frag_info[i];
page_alloc[i] = ring_alloc[i];
- page_alloc[i].offset += frag_info->frag_stride;
- if (page_alloc[i].offset + frag_info->frag_stride <= ring_alloc[i].size)
+ page_alloc[i].page_offset += frag_info->frag_stride;
+
+ if (page_alloc[i].page_offset + frag_info->frag_stride <=
+ ring_alloc[i].page_size)
continue;
+
if (mlx4_alloc_pages(priv, &page_alloc[i], frag_info, gfp))
goto out;
}
for (i = 0; i < priv->num_frags; i++) {
frags[i] = ring_alloc[i];
- dma = ring_alloc[i].dma + ring_alloc[i].offset;
+ dma = ring_alloc[i].dma + ring_alloc[i].page_offset;
ring_alloc[i] = page_alloc[i];
rx_desc->data[i].addr = cpu_to_be64(dma);
}
@@ -117,7 +121,7 @@ out:
frag_info = &priv->frag_info[i];
if (page_alloc[i].page != ring_alloc[i].page) {
dma_unmap_page(priv->ddev, page_alloc[i].dma,
- page_alloc[i].size, PCI_DMA_FROMDEVICE);
+ page_alloc[i].page_size, PCI_DMA_FROMDEVICE);
page = page_alloc[i].page;
atomic_set(&page->_count, 1);
put_page(page);
@@ -131,10 +135,12 @@ static void mlx4_en_free_frag(struct mlx4_en_priv *priv,
int i)
{
const struct mlx4_en_frag_info *frag_info = &priv->frag_info[i];
+ u32 next_frag_end = frags[i].page_offset + 2 * frag_info->frag_stride;
+
- if (frags[i].offset + frag_info->frag_stride > frags[i].size)
- dma_unmap_page(priv->ddev, frags[i].dma, frags[i].size,
- PCI_DMA_FROMDEVICE);
+ if (next_frag_end > frags[i].page_size)
+ dma_unmap_page(priv->ddev, frags[i].dma, frags[i].page_size,
+ PCI_DMA_FROMDEVICE);
if (frags[i].page)
put_page(frags[i].page);
@@ -161,7 +167,7 @@ out:
page_alloc = &ring->page_alloc[i];
dma_unmap_page(priv->ddev, page_alloc->dma,
- page_alloc->size, PCI_DMA_FROMDEVICE);
+ page_alloc->page_size, PCI_DMA_FROMDEVICE);
page = page_alloc->page;
atomic_set(&page->_count, 1);
put_page(page);
@@ -184,10 +190,11 @@ static void mlx4_en_destroy_allocator(struct mlx4_en_priv *priv,
i, page_count(page_alloc->page));
dma_unmap_page(priv->ddev, page_alloc->dma,
- page_alloc->size, PCI_DMA_FROMDEVICE);
- while (page_alloc->offset + frag_info->frag_stride < page_alloc->size) {
+ page_alloc->page_size, PCI_DMA_FROMDEVICE);
+ while (page_alloc->page_offset + frag_info->frag_stride <
+ page_alloc->page_size) {
put_page(page_alloc->page);
- page_alloc->offset += frag_info->frag_stride;
+ page_alloc->page_offset += frag_info->frag_stride;
}
page_alloc->page = NULL;
}
@@ -478,7 +485,7 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
/* Save page reference in skb */
__skb_frag_set_page(&skb_frags_rx[nr], frags[nr].page);
skb_frag_size_set(&skb_frags_rx[nr], frag_info->frag_size);
- skb_frags_rx[nr].page_offset = frags[nr].offset;
+ skb_frags_rx[nr].page_offset = frags[nr].page_offset;
skb->truesize += frag_info->frag_stride;
frags[nr].page = NULL;
}
@@ -517,7 +524,7 @@ static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
/* Get pointer to first fragment so we could copy the headers into the
* (linear part of the) skb */
- va = page_address(frags[0].page) + frags[0].offset;
+ va = page_address(frags[0].page) + frags[0].page_offset;
if (length <= SMALL_PACKET_SIZE) {
/* We are copying all relevant data to the skb - temporarily
@@ -645,7 +652,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
dma_sync_single_for_cpu(priv->ddev, dma, sizeof(*ethh),
DMA_FROM_DEVICE);
ethh = (struct ethhdr *)(page_address(frags[0].page) +
- frags[0].offset);
+ frags[0].page_offset);
if (is_multicast_ether_addr(ethh->h_dest)) {
struct mlx4_mac_entry *entry;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 5e0aa569306a..bf06e3610d27 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -237,8 +237,8 @@ struct mlx4_en_tx_desc {
struct mlx4_en_rx_alloc {
struct page *page;
dma_addr_t dma;
- u32 offset;
- u32 size;
+ u32 page_offset;
+ u32 page_size;
};
struct mlx4_en_tx_ring {
diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
index bd1a2d2bc2ae..ea54d95e5b9f 100644
--- a/drivers/net/ethernet/moxa/moxart_ether.c
+++ b/drivers/net/ethernet/moxa/moxart_ether.c
@@ -448,7 +448,8 @@ static int moxart_mac_probe(struct platform_device *pdev)
irq = irq_of_parse_and_map(node, 0);
if (irq <= 0) {
netdev_err(ndev, "irq_of_parse_and_map failed\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto irq_map_fail;
}
priv = netdev_priv(ndev);
@@ -472,24 +473,32 @@ static int moxart_mac_probe(struct platform_device *pdev)
priv->tx_desc_base = dma_alloc_coherent(NULL, TX_REG_DESC_SIZE *
TX_DESC_NUM, &priv->tx_base,
GFP_DMA | GFP_KERNEL);
- if (priv->tx_desc_base == NULL)
+ if (priv->tx_desc_base == NULL) {
+ ret = -ENOMEM;
goto init_fail;
+ }
priv->rx_desc_base = dma_alloc_coherent(NULL, RX_REG_DESC_SIZE *
RX_DESC_NUM, &priv->rx_base,
GFP_DMA | GFP_KERNEL);
- if (priv->rx_desc_base == NULL)
+ if (priv->rx_desc_base == NULL) {
+ ret = -ENOMEM;
goto init_fail;
+ }
priv->tx_buf_base = kmalloc(priv->tx_buf_size * TX_DESC_NUM,
GFP_ATOMIC);
- if (!priv->tx_buf_base)
+ if (!priv->tx_buf_base) {
+ ret = -ENOMEM;
goto init_fail;
+ }
priv->rx_buf_base = kmalloc(priv->rx_buf_size * RX_DESC_NUM,
GFP_ATOMIC);
- if (!priv->rx_buf_base)
+ if (!priv->rx_buf_base) {
+ ret = -ENOMEM;
goto init_fail;
+ }
platform_set_drvdata(pdev, ndev);
@@ -522,7 +531,8 @@ static int moxart_mac_probe(struct platform_device *pdev)
init_fail:
netdev_err(ndev, "init failed\n");
moxart_mac_free_memory(ndev);
-
+irq_map_fail:
+ free_netdev(ndev);
return ret;
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 21d00a0449a1..f07f2b0fefa0 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -2257,7 +2257,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = qlcnic_alloc_adapter_resources(adapter);
if (err)
- goto err_out_free_netdev;
+ goto err_out_free_wq;
adapter->dev_rst_time = jiffies;
adapter->ahw->revision_id = pdev->revision;
@@ -2396,6 +2396,9 @@ err_out_disable_msi:
err_out_free_hw:
qlcnic_free_adapter_resources(adapter);
+err_out_free_wq:
+ destroy_workqueue(adapter->qlcnic_wq);
+
err_out_free_netdev:
free_netdev(netdev);
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 5cd831ebfa83..c7e1b8f333ed 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -620,12 +620,16 @@ static struct sh_eth_cpu_data sh7734_data = {
.eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
EESR_TDE | EESR_ECI,
+ .fdr_value = 0x0000070f,
+ .rmcr_value = 0x00000001,
.apr = 1,
.mpr = 1,
.tpauser = 1,
.bculr = 1,
.hw_swap = 1,
+ .rpadir = 1,
+ .rpadir_value = 2 << 16,
.no_trimd = 1,
.no_ade = 1,
.tsu = 1,
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 79974e31187a..804846eb5fc2 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -1771,8 +1771,8 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
}
data->mac_control = prop;
- if (!of_property_read_u32(node, "dual_emac", &prop))
- data->dual_emac = prop;
+ if (of_property_read_bool(node, "dual_emac"))
+ data->dual_emac = 1;
/*
* Populate all the child nodes here...
@@ -1782,7 +1782,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
if (ret)
pr_warn("Doesn't have any child node\n");
- for_each_node_by_name(slave_node, "slave") {
+ for_each_child_of_node(node, slave_node) {
struct cpsw_slave_data *slave_data = data->slave_data + i;
const void *mac_addr = NULL;
u32 phyid;
@@ -1791,6 +1791,10 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
struct device_node *mdio_node;
struct platform_device *mdio;
+ /* This is no slave child node, continue */
+ if (strcmp(slave_node->name, "slave"))
+ continue;
+
parp = of_get_property(slave_node, "phy_id", &lenp);
if ((parp == NULL) || (lenp != (sizeof(void *) * 2))) {
pr_err("Missing slave[%d] phy_id property\n", i);
diff --git a/drivers/net/ieee802154/mrf24j40.c b/drivers/net/ieee802154/mrf24j40.c
index 42e6deee6db5..0632d34905c7 100644
--- a/drivers/net/ieee802154/mrf24j40.c
+++ b/drivers/net/ieee802154/mrf24j40.c
@@ -82,7 +82,6 @@ struct mrf24j40 {
struct mutex buffer_mutex; /* only used to protect buf */
struct completion tx_complete;
- struct work_struct irqwork;
u8 *buf; /* 3 bytes. Used for SPI single-register transfers. */
};
@@ -344,6 +343,8 @@ static int mrf24j40_tx(struct ieee802154_dev *dev, struct sk_buff *skb)
if (ret)
goto err;
+ INIT_COMPLETION(devrec->tx_complete);
+
/* Set TXNTRIG bit of TXNCON to send packet */
ret = read_short_reg(devrec, REG_TXNCON, &val);
if (ret)
@@ -354,8 +355,6 @@ static int mrf24j40_tx(struct ieee802154_dev *dev, struct sk_buff *skb)
val |= 0x4;
write_short_reg(devrec, REG_TXNCON, val);
- INIT_COMPLETION(devrec->tx_complete);
-
/* Wait for the device to send the TX complete interrupt. */
ret = wait_for_completion_interruptible_timeout(
&devrec->tx_complete,
@@ -590,17 +589,6 @@ static struct ieee802154_ops mrf24j40_ops = {
static irqreturn_t mrf24j40_isr(int irq, void *data)
{
struct mrf24j40 *devrec = data;
-
- disable_irq_nosync(irq);
-
- schedule_work(&devrec->irqwork);
-
- return IRQ_HANDLED;
-}
-
-static void mrf24j40_isrwork(struct work_struct *work)
-{
- struct mrf24j40 *devrec = container_of(work, struct mrf24j40, irqwork);
u8 intstat;
int ret;
@@ -618,7 +606,7 @@ static void mrf24j40_isrwork(struct work_struct *work)
mrf24j40_handle_rx(devrec);
out:
- enable_irq(devrec->spi->irq);
+ return IRQ_HANDLED;
}
static int mrf24j40_probe(struct spi_device *spi)
@@ -642,7 +630,6 @@ static int mrf24j40_probe(struct spi_device *spi)
mutex_init(&devrec->buffer_mutex);
init_completion(&devrec->tx_complete);
- INIT_WORK(&devrec->irqwork, mrf24j40_isrwork);
devrec->spi = spi;
spi_set_drvdata(spi, devrec);
@@ -688,11 +675,12 @@ static int mrf24j40_probe(struct spi_device *spi)
val &= ~0x3; /* Clear RX mode (normal) */
write_short_reg(devrec, REG_RXMCR, val);
- ret = request_irq(spi->irq,
- mrf24j40_isr,
- IRQF_TRIGGER_FALLING,
- dev_name(&spi->dev),
- devrec);
+ ret = request_threaded_irq(spi->irq,
+ NULL,
+ mrf24j40_isr,
+ IRQF_TRIGGER_LOW|IRQF_ONESHOT,
+ dev_name(&spi->dev),
+ devrec);
if (ret) {
dev_err(printdev(devrec), "Unable to get IRQ");
@@ -721,7 +709,6 @@ static int mrf24j40_remove(struct spi_device *spi)
dev_dbg(printdev(devrec), "remove\n");
free_irq(spi->irq, devrec);
- flush_work(&devrec->irqwork); /* TODO: Is this the right call? */
ieee802154_unregister_device(devrec->dev);
ieee802154_free_device(devrec->dev);
/* TODO: Will ieee802154_free_device() wait until ->xmit() is
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 807815fc9968..7cb105c103fe 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1293,7 +1293,8 @@ static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
if (unlikely(!noblock))
add_wait_queue(&tfile->wq.wait, &wait);
while (len) {
- current->state = TASK_INTERRUPTIBLE;
+ if (unlikely(!noblock))
+ current->state = TASK_INTERRUPTIBLE;
/* Read frames from the queue */
if (!(skb = skb_dequeue(&tfile->socket.sk->sk_receive_queue))) {
@@ -1320,9 +1321,10 @@ static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
break;
}
- current->state = TASK_RUNNING;
- if (unlikely(!noblock))
+ if (unlikely(!noblock)) {
+ current->state = TASK_RUNNING;
remove_wait_queue(&tfile->wq.wait, &wait);
+ }
return ret;
}
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index b45bce20ad76..1b08d8798372 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -39,11 +39,15 @@ static int connect_rings(struct backend_info *);
static void connect(struct backend_info *);
static void backend_create_xenvif(struct backend_info *be);
static void unregister_hotplug_status_watch(struct backend_info *be);
+static void set_backend_state(struct backend_info *be,
+ enum xenbus_state state);
static int netback_remove(struct xenbus_device *dev)
{
struct backend_info *be = dev_get_drvdata(&dev->dev);
+ set_backend_state(be, XenbusStateClosed);
+
unregister_hotplug_status_watch(be);
if (be->vif) {
kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE);
diff --git a/include/linux/filter.h b/include/linux/filter.h
index a6ac84871d6d..ff4e40cd45b1 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -6,6 +6,7 @@
#include <linux/atomic.h>
#include <linux/compat.h>
+#include <linux/workqueue.h>
#include <uapi/linux/filter.h>
#ifdef CONFIG_COMPAT
@@ -25,15 +26,19 @@ struct sk_filter
{
atomic_t refcnt;
unsigned int len; /* Number of filter blocks */
+ struct rcu_head rcu;
unsigned int (*bpf_func)(const struct sk_buff *skb,
const struct sock_filter *filter);
- struct rcu_head rcu;
- struct sock_filter insns[0];
+ union {
+ struct sock_filter insns[0];
+ struct work_struct work;
+ };
};
-static inline unsigned int sk_filter_len(const struct sk_filter *fp)
+static inline unsigned int sk_filter_size(unsigned int proglen)
{
- return fp->len * sizeof(struct sock_filter) + sizeof(*fp);
+ return max(sizeof(struct sk_filter),
+ offsetof(struct sk_filter, insns[proglen]));
}
extern int sk_filter(struct sock *sk, struct sk_buff *skb);
@@ -67,11 +72,13 @@ static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen,
}
#define SK_RUN_FILTER(FILTER, SKB) (*FILTER->bpf_func)(SKB, FILTER->insns)
#else
+#include <linux/slab.h>
static inline void bpf_jit_compile(struct sk_filter *fp)
{
}
static inline void bpf_jit_free(struct sk_filter *fp)
{
+ kfree(fp);
}
#define SK_RUN_FILTER(FILTER, SKB) sk_run_filter(SKB, FILTER->insns)
#endif
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 3de49aca4519..25f5d2d11e7c 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -2264,11 +2264,12 @@ static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
}
#ifdef CONFIG_XPS
-extern int netif_set_xps_queue(struct net_device *dev, struct cpumask *mask,
+extern int netif_set_xps_queue(struct net_device *dev,
+ const struct cpumask *mask,
u16 index);
#else
static inline int netif_set_xps_queue(struct net_device *dev,
- struct cpumask *mask,
+ const struct cpumask *mask,
u16 index)
{
return 0;
diff --git a/include/net/sock.h b/include/net/sock.h
index 1d37a8086bed..808cbc2ec6c1 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1630,16 +1630,14 @@ static inline void sk_filter_release(struct sk_filter *fp)
static inline void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp)
{
- unsigned int size = sk_filter_len(fp);
-
- atomic_sub(size, &sk->sk_omem_alloc);
+ atomic_sub(sk_filter_size(fp->len), &sk->sk_omem_alloc);
sk_filter_release(fp);
}
static inline void sk_filter_charge(struct sock *sk, struct sk_filter *fp)
{
atomic_inc(&fp->refcnt);
- atomic_add(sk_filter_len(fp), &sk->sk_omem_alloc);
+ atomic_add(sk_filter_size(fp->len), &sk->sk_omem_alloc);
}
/*
diff --git a/net/8021q/vlan_netlink.c b/net/8021q/vlan_netlink.c
index 309129732285..c7e634af8516 100644
--- a/net/8021q/vlan_netlink.c
+++ b/net/8021q/vlan_netlink.c
@@ -171,7 +171,7 @@ static size_t vlan_get_size(const struct net_device *dev)
return nla_total_size(2) + /* IFLA_VLAN_PROTOCOL */
nla_total_size(2) + /* IFLA_VLAN_ID */
- sizeof(struct ifla_vlan_flags) + /* IFLA_VLAN_FLAGS */
+ nla_total_size(sizeof(struct ifla_vlan_flags)) + /* IFLA_VLAN_FLAGS */
vlan_qos_map_size(vlan->nr_ingress_mappings) +
vlan_qos_map_size(vlan->nr_egress_mappings);
}
diff --git a/net/core/dev.c b/net/core/dev.c
index 65f829cfd928..3430b1ed12e5 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1917,7 +1917,8 @@ static struct xps_map *expand_xps_map(struct xps_map *map,
return new_map;
}
-int netif_set_xps_queue(struct net_device *dev, struct cpumask *mask, u16 index)
+int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
+ u16 index)
{
struct xps_dev_maps *dev_maps, *new_dev_maps = NULL;
struct xps_map *map, *new_map;
diff --git a/net/core/filter.c b/net/core/filter.c
index 6438f29ff266..01b780856db2 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -644,7 +644,6 @@ void sk_filter_release_rcu(struct rcu_head *rcu)
struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu);
bpf_jit_free(fp);
- kfree(fp);
}
EXPORT_SYMBOL(sk_filter_release_rcu);
@@ -683,7 +682,7 @@ int sk_unattached_filter_create(struct sk_filter **pfp,
if (fprog->filter == NULL)
return -EINVAL;
- fp = kmalloc(fsize + sizeof(*fp), GFP_KERNEL);
+ fp = kmalloc(sk_filter_size(fprog->len), GFP_KERNEL);
if (!fp)
return -ENOMEM;
memcpy(fp->insns, fprog->filter, fsize);
@@ -723,6 +722,7 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
{
struct sk_filter *fp, *old_fp;
unsigned int fsize = sizeof(struct sock_filter) * fprog->len;
+ unsigned int sk_fsize = sk_filter_size(fprog->len);
int err;
if (sock_flag(sk, SOCK_FILTER_LOCKED))
@@ -732,11 +732,11 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
if (fprog->filter == NULL)
return -EINVAL;
- fp = sock_kmalloc(sk, fsize+sizeof(*fp), GFP_KERNEL);
+ fp = sock_kmalloc(sk, sk_fsize, GFP_KERNEL);
if (!fp)
return -ENOMEM;
if (copy_from_user(fp->insns, fprog->filter, fsize)) {
- sock_kfree_s(sk, fp, fsize+sizeof(*fp));
+ sock_kfree_s(sk, fp, sk_fsize);
return -EFAULT;
}
diff --git a/net/core/sock.c b/net/core/sock.c
index 5b6beba494a3..0b39e7ae4383 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -2319,6 +2319,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
sk->sk_ll_usec = sysctl_net_busy_read;
#endif
+ sk->sk_pacing_rate = ~0U;
/*
* Before updating sk_refcnt, we must commit prior changes to memory
* (Documentation/RCU/rculist_nulls.txt for details)
diff --git a/net/ieee802154/6lowpan.c b/net/ieee802154/6lowpan.c
index c85e71e0c7ff..ff41b4d60d30 100644
--- a/net/ieee802154/6lowpan.c
+++ b/net/ieee802154/6lowpan.c
@@ -1372,6 +1372,8 @@ static int lowpan_newlink(struct net *src_net, struct net_device *dev,
real_dev = dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
if (!real_dev)
return -ENODEV;
+ if (real_dev->type != ARPHRD_IEEE802154)
+ return -EINVAL;
lowpan_dev_info(dev)->real_dev = real_dev;
lowpan_dev_info(dev)->fragment_tag = 0;
@@ -1386,6 +1388,9 @@ static int lowpan_newlink(struct net *src_net, struct net_device *dev,
entry->ldev = dev;
+ /* Set the lowpan harware address to the wpan hardware address. */
+ memcpy(dev->dev_addr, real_dev->dev_addr, IEEE802154_ADDR_LEN);
+
mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx);
INIT_LIST_HEAD(&entry->list);
list_add_tail(&entry->list, &lowpan_devices);
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 727f4365bcdf..6011615e810d 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -2072,7 +2072,7 @@ struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *fl4)
RT_SCOPE_LINK);
goto make_route;
}
- if (fl4->saddr) {
+ if (!fl4->saddr) {
if (ipv4_is_multicast(fl4->daddr))
fl4->saddr = inet_select_addr(dev_out, 0,
fl4->flowi4_scope);
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 7bb5446b9d73..1ef1fa2b22a6 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -1173,9 +1173,8 @@ done:
static int ip6gre_tunnel_change_mtu(struct net_device *dev, int new_mtu)
{
- struct ip6_tnl *tunnel = netdev_priv(dev);
if (new_mtu < 68 ||
- new_mtu > 0xFFF8 - dev->hard_header_len - tunnel->hlen)
+ new_mtu > 0xFFF8 - dev->hard_header_len)
return -EINVAL;
dev->mtu = new_mtu;
return 0;
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index a791552e0422..583b77e2f69b 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -1430,9 +1430,17 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
static int
ip6_tnl_change_mtu(struct net_device *dev, int new_mtu)
{
- if (new_mtu < IPV6_MIN_MTU) {
- return -EINVAL;
+ struct ip6_tnl *tnl = netdev_priv(dev);
+
+ if (tnl->parms.proto == IPPROTO_IPIP) {
+ if (new_mtu < 68)
+ return -EINVAL;
+ } else {
+ if (new_mtu < IPV6_MIN_MTU)
+ return -EINVAL;
}
+ if (new_mtu > 0xFFF8 - dev->hard_header_len)
+ return -EINVAL;
dev->mtu = new_mtu;
return 0;
}
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index aedaa2cd4237..b076e8309bc2 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -115,6 +115,11 @@ struct l2tp_net {
static void l2tp_session_set_header_len(struct l2tp_session *session, int version);
static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel);
+static inline struct l2tp_tunnel *l2tp_tunnel(struct sock *sk)
+{
+ return sk->sk_user_data;
+}
+
static inline struct l2tp_net *l2tp_pernet(struct net *net)
{
BUG_ON(!net);
@@ -496,7 +501,6 @@ out:
static inline int l2tp_verify_udp_checksum(struct sock *sk,
struct sk_buff *skb)
{
- struct l2tp_tunnel *tunnel = (struct l2tp_tunnel *)sk->sk_user_data;
struct udphdr *uh = udp_hdr(skb);
u16 ulen = ntohs(uh->len);
__wsum psum;
@@ -505,7 +509,7 @@ static inline int l2tp_verify_udp_checksum(struct sock *sk,
return 0;
#if IS_ENABLED(CONFIG_IPV6)
- if (sk->sk_family == PF_INET6 && !tunnel->v4mapped) {
+ if (sk->sk_family == PF_INET6 && !l2tp_tunnel(sk)->v4mapped) {
if (!uh->check) {
LIMIT_NETDEBUG(KERN_INFO "L2TP: IPv6: checksum is 0\n");
return 1;
@@ -1305,10 +1309,9 @@ EXPORT_SYMBOL_GPL(l2tp_xmit_skb);
*/
static void l2tp_tunnel_destruct(struct sock *sk)
{
- struct l2tp_tunnel *tunnel;
+ struct l2tp_tunnel *tunnel = l2tp_tunnel(sk);
struct l2tp_net *pn;
- tunnel = sk->sk_user_data;
if (tunnel == NULL)
goto end;
@@ -1676,7 +1679,7 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
}
/* Check if this socket has already been prepped */
- tunnel = (struct l2tp_tunnel *)sk->sk_user_data;
+ tunnel = l2tp_tunnel(sk);
if (tunnel != NULL) {
/* This socket has already been prepped */
err = -EBUSY;
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
index a2fef8b10b96..a9dfdda9ed1d 100644
--- a/net/sched/sch_fq.c
+++ b/net/sched/sch_fq.c
@@ -472,20 +472,16 @@ begin:
if (f->credit > 0 || !q->rate_enable)
goto out;
- if (skb->sk && skb->sk->sk_state != TCP_TIME_WAIT) {
- rate = skb->sk->sk_pacing_rate ?: q->flow_default_rate;
+ rate = q->flow_max_rate;
+ if (skb->sk && skb->sk->sk_state != TCP_TIME_WAIT)
+ rate = min(skb->sk->sk_pacing_rate, rate);
- rate = min(rate, q->flow_max_rate);
- } else {
- rate = q->flow_max_rate;
- if (rate == ~0U)
- goto out;
- }
- if (rate) {
+ if (rate != ~0U) {
u32 plen = max(qdisc_pkt_len(skb), q->quantum);
u64 len = (u64)plen * NSEC_PER_SEC;
- do_div(len, rate);
+ if (likely(rate))
+ do_div(len, rate);
/* Since socket rate can change later,
* clamp the delay to 125 ms.
* TODO: maybe segment the too big skb, as in commit
@@ -656,7 +652,7 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt)
q->quantum = nla_get_u32(tb[TCA_FQ_QUANTUM]);
if (tb[TCA_FQ_INITIAL_QUANTUM])
- q->quantum = nla_get_u32(tb[TCA_FQ_INITIAL_QUANTUM]);
+ q->initial_quantum = nla_get_u32(tb[TCA_FQ_INITIAL_QUANTUM]);
if (tb[TCA_FQ_FLOW_DEFAULT_RATE])
q->flow_default_rate = nla_get_u32(tb[TCA_FQ_FLOW_DEFAULT_RATE]);
@@ -735,12 +731,14 @@ static int fq_dump(struct Qdisc *sch, struct sk_buff *skb)
if (opts == NULL)
goto nla_put_failure;
+ /* TCA_FQ_FLOW_DEFAULT_RATE is not used anymore,
+ * do not bother giving its value
+ */
if (nla_put_u32(skb, TCA_FQ_PLIMIT, sch->limit) ||
nla_put_u32(skb, TCA_FQ_FLOW_PLIMIT, q->flow_plimit) ||
nla_put_u32(skb, TCA_FQ_QUANTUM, q->quantum) ||
nla_put_u32(skb, TCA_FQ_INITIAL_QUANTUM, q->initial_quantum) ||
nla_put_u32(skb, TCA_FQ_RATE_ENABLE, q->rate_enable) ||
- nla_put_u32(skb, TCA_FQ_FLOW_DEFAULT_RATE, q->flow_default_rate) ||
nla_put_u32(skb, TCA_FQ_FLOW_MAX_RATE, q->flow_max_rate) ||
nla_put_u32(skb, TCA_FQ_BUCKETS_LOG, q->fq_trees_log))
goto nla_put_failure;
diff --git a/net/sysctl_net.c b/net/sysctl_net.c
index 9bc6db04be3e..e7000be321b0 100644
--- a/net/sysctl_net.c
+++ b/net/sysctl_net.c
@@ -47,12 +47,12 @@ static int net_ctl_permissions(struct ctl_table_header *head,
/* Allow network administrator to have same access as root. */
if (ns_capable(net->user_ns, CAP_NET_ADMIN) ||
- uid_eq(root_uid, current_uid())) {
+ uid_eq(root_uid, current_euid())) {
int mode = (table->mode >> 6) & 7;
return (mode << 6) | (mode << 3) | mode;
}
/* Allow netns root group to have the same access as the root group */
- if (gid_eq(root_gid, current_gid())) {
+ if (in_egroup_p(root_gid)) {
int mode = (table->mode >> 3) & 7;
return (mode << 3) | mode;
}