diff options
author | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2018-08-16 20:10:56 +0200 |
---|---|---|
committer | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2018-08-16 20:10:56 +0200 |
commit | 13fe7056bebb4015c6231a07a1be4d3aebbfe979 (patch) | |
tree | 8aefa59a61c081c402bc85f2b47c17e1374eabdd /drivers/net/ethernet | |
parent | MAINTAINERS: Add PhoenixRC Flight Controller Adapter (diff) | |
parent | Input: do not use WARN() in input_alloc_absinfo() (diff) | |
download | linux-13fe7056bebb4015c6231a07a1be4d3aebbfe979.tar.xz linux-13fe7056bebb4015c6231a07a1be4d3aebbfe979.zip |
Merge branch 'next' into for-linus
Prepare input updates for 4.19 merge window.
Diffstat (limited to 'drivers/net/ethernet')
26 files changed, 281 insertions, 222 deletions
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c index 36c8950dbd2d..176861bd2252 100644 --- a/drivers/net/ethernet/3com/3c59x.c +++ b/drivers/net/ethernet/3com/3c59x.c @@ -1212,9 +1212,9 @@ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq, vp->mii.reg_num_mask = 0x1f; /* Makes sure rings are at least 16 byte aligned. */ - vp->rx_ring = pci_alloc_consistent(pdev, sizeof(struct boom_rx_desc) * RX_RING_SIZE + vp->rx_ring = dma_alloc_coherent(gendev, sizeof(struct boom_rx_desc) * RX_RING_SIZE + sizeof(struct boom_tx_desc) * TX_RING_SIZE, - &vp->rx_ring_dma); + &vp->rx_ring_dma, GFP_KERNEL); retval = -ENOMEM; if (!vp->rx_ring) goto free_device; @@ -1476,11 +1476,10 @@ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq, return 0; free_ring: - pci_free_consistent(pdev, - sizeof(struct boom_rx_desc) * RX_RING_SIZE - + sizeof(struct boom_tx_desc) * TX_RING_SIZE, - vp->rx_ring, - vp->rx_ring_dma); + dma_free_coherent(&pdev->dev, + sizeof(struct boom_rx_desc) * RX_RING_SIZE + + sizeof(struct boom_tx_desc) * TX_RING_SIZE, + vp->rx_ring, vp->rx_ring_dma); free_device: free_netdev(dev); pr_err(PFX "vortex_probe1 fails. Returns %d\n", retval); @@ -1751,9 +1750,9 @@ vortex_open(struct net_device *dev) break; /* Bad news! */ skb_reserve(skb, NET_IP_ALIGN); /* Align IP on 16 byte boundaries */ - dma = pci_map_single(VORTEX_PCI(vp), skb->data, - PKT_BUF_SZ, PCI_DMA_FROMDEVICE); - if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma)) + dma = dma_map_single(vp->gendev, skb->data, + PKT_BUF_SZ, DMA_FROM_DEVICE); + if (dma_mapping_error(vp->gendev, dma)) break; vp->rx_ring[i].addr = cpu_to_le32(dma); } @@ -2067,9 +2066,9 @@ vortex_start_xmit(struct sk_buff *skb, struct net_device *dev) if (vp->bus_master) { /* Set the bus-master controller to transfer the packet. */ int len = (skb->len + 3) & ~3; - vp->tx_skb_dma = pci_map_single(VORTEX_PCI(vp), skb->data, len, - PCI_DMA_TODEVICE); - if (dma_mapping_error(&VORTEX_PCI(vp)->dev, vp->tx_skb_dma)) { + vp->tx_skb_dma = dma_map_single(vp->gendev, skb->data, len, + DMA_TO_DEVICE); + if (dma_mapping_error(vp->gendev, vp->tx_skb_dma)) { dev_kfree_skb_any(skb); dev->stats.tx_dropped++; return NETDEV_TX_OK; @@ -2168,9 +2167,9 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev) vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded | AddTCPChksum | AddUDPChksum); if (!skb_shinfo(skb)->nr_frags) { - dma_addr = pci_map_single(VORTEX_PCI(vp), skb->data, skb->len, - PCI_DMA_TODEVICE); - if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr)) + dma_addr = dma_map_single(vp->gendev, skb->data, skb->len, + DMA_TO_DEVICE); + if (dma_mapping_error(vp->gendev, dma_addr)) goto out_dma_err; vp->tx_ring[entry].frag[0].addr = cpu_to_le32(dma_addr); @@ -2178,9 +2177,9 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev) } else { int i; - dma_addr = pci_map_single(VORTEX_PCI(vp), skb->data, - skb_headlen(skb), PCI_DMA_TODEVICE); - if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr)) + dma_addr = dma_map_single(vp->gendev, skb->data, + skb_headlen(skb), DMA_TO_DEVICE); + if (dma_mapping_error(vp->gendev, dma_addr)) goto out_dma_err; vp->tx_ring[entry].frag[0].addr = cpu_to_le32(dma_addr); @@ -2189,21 +2188,21 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev) for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; - dma_addr = skb_frag_dma_map(&VORTEX_PCI(vp)->dev, frag, + dma_addr = skb_frag_dma_map(vp->gendev, frag, 0, frag->size, DMA_TO_DEVICE); - if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr)) { + if (dma_mapping_error(vp->gendev, dma_addr)) { for(i = i-1; i >= 0; i--) - dma_unmap_page(&VORTEX_PCI(vp)->dev, + dma_unmap_page(vp->gendev, le32_to_cpu(vp->tx_ring[entry].frag[i+1].addr), le32_to_cpu(vp->tx_ring[entry].frag[i+1].length), DMA_TO_DEVICE); - pci_unmap_single(VORTEX_PCI(vp), + dma_unmap_single(vp->gendev, le32_to_cpu(vp->tx_ring[entry].frag[0].addr), le32_to_cpu(vp->tx_ring[entry].frag[0].length), - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); goto out_dma_err; } @@ -2218,8 +2217,8 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev) } } #else - dma_addr = pci_map_single(VORTEX_PCI(vp), skb->data, skb->len, PCI_DMA_TODEVICE); - if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr)) + dma_addr = dma_map_single(vp->gendev, skb->data, skb->len, DMA_TO_DEVICE); + if (dma_mapping_error(vp->gendev, dma_addr)) goto out_dma_err; vp->tx_ring[entry].addr = cpu_to_le32(dma_addr); vp->tx_ring[entry].length = cpu_to_le32(skb->len | LAST_FRAG); @@ -2254,7 +2253,7 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev) out: return NETDEV_TX_OK; out_dma_err: - dev_err(&VORTEX_PCI(vp)->dev, "Error mapping dma buffer\n"); + dev_err(vp->gendev, "Error mapping dma buffer\n"); goto out; } @@ -2322,7 +2321,7 @@ vortex_interrupt(int irq, void *dev_id) if (status & DMADone) { if (ioread16(ioaddr + Wn7_MasterStatus) & 0x1000) { iowrite16(0x1000, ioaddr + Wn7_MasterStatus); /* Ack the event. */ - pci_unmap_single(VORTEX_PCI(vp), vp->tx_skb_dma, (vp->tx_skb->len + 3) & ~3, PCI_DMA_TODEVICE); + dma_unmap_single(vp->gendev, vp->tx_skb_dma, (vp->tx_skb->len + 3) & ~3, DMA_TO_DEVICE); pkts_compl++; bytes_compl += vp->tx_skb->len; dev_kfree_skb_irq(vp->tx_skb); /* Release the transferred buffer */ @@ -2459,19 +2458,19 @@ boomerang_interrupt(int irq, void *dev_id) struct sk_buff *skb = vp->tx_skbuff[entry]; #if DO_ZEROCOPY int i; - pci_unmap_single(VORTEX_PCI(vp), + dma_unmap_single(vp->gendev, le32_to_cpu(vp->tx_ring[entry].frag[0].addr), le32_to_cpu(vp->tx_ring[entry].frag[0].length)&0xFFF, - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); for (i=1; i<=skb_shinfo(skb)->nr_frags; i++) - pci_unmap_page(VORTEX_PCI(vp), + dma_unmap_page(vp->gendev, le32_to_cpu(vp->tx_ring[entry].frag[i].addr), le32_to_cpu(vp->tx_ring[entry].frag[i].length)&0xFFF, - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); #else - pci_unmap_single(VORTEX_PCI(vp), - le32_to_cpu(vp->tx_ring[entry].addr), skb->len, PCI_DMA_TODEVICE); + dma_unmap_single(vp->gendev, + le32_to_cpu(vp->tx_ring[entry].addr), skb->len, DMA_TO_DEVICE); #endif pkts_compl++; bytes_compl += skb->len; @@ -2561,14 +2560,14 @@ static int vortex_rx(struct net_device *dev) /* 'skb_put()' points to the start of sk_buff data area. */ if (vp->bus_master && ! (ioread16(ioaddr + Wn7_MasterStatus) & 0x8000)) { - dma_addr_t dma = pci_map_single(VORTEX_PCI(vp), skb_put(skb, pkt_len), - pkt_len, PCI_DMA_FROMDEVICE); + dma_addr_t dma = dma_map_single(vp->gendev, skb_put(skb, pkt_len), + pkt_len, DMA_FROM_DEVICE); iowrite32(dma, ioaddr + Wn7_MasterAddr); iowrite16((skb->len + 3) & ~3, ioaddr + Wn7_MasterLen); iowrite16(StartDMAUp, ioaddr + EL3_CMD); while (ioread16(ioaddr + Wn7_MasterStatus) & 0x8000) ; - pci_unmap_single(VORTEX_PCI(vp), dma, pkt_len, PCI_DMA_FROMDEVICE); + dma_unmap_single(vp->gendev, dma, pkt_len, DMA_FROM_DEVICE); } else { ioread32_rep(ioaddr + RX_FIFO, skb_put(skb, pkt_len), @@ -2635,11 +2634,11 @@ boomerang_rx(struct net_device *dev) if (pkt_len < rx_copybreak && (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) { skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ - pci_dma_sync_single_for_cpu(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE); + dma_sync_single_for_cpu(vp->gendev, dma, PKT_BUF_SZ, DMA_FROM_DEVICE); /* 'skb_put()' points to the start of sk_buff data area. */ skb_put_data(skb, vp->rx_skbuff[entry]->data, pkt_len); - pci_dma_sync_single_for_device(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE); + dma_sync_single_for_device(vp->gendev, dma, PKT_BUF_SZ, DMA_FROM_DEVICE); vp->rx_copy++; } else { /* Pre-allocate the replacement skb. If it or its @@ -2651,9 +2650,9 @@ boomerang_rx(struct net_device *dev) dev->stats.rx_dropped++; goto clear_complete; } - newdma = pci_map_single(VORTEX_PCI(vp), newskb->data, - PKT_BUF_SZ, PCI_DMA_FROMDEVICE); - if (dma_mapping_error(&VORTEX_PCI(vp)->dev, newdma)) { + newdma = dma_map_single(vp->gendev, newskb->data, + PKT_BUF_SZ, DMA_FROM_DEVICE); + if (dma_mapping_error(vp->gendev, newdma)) { dev->stats.rx_dropped++; consume_skb(newskb); goto clear_complete; @@ -2664,7 +2663,7 @@ boomerang_rx(struct net_device *dev) vp->rx_skbuff[entry] = newskb; vp->rx_ring[entry].addr = cpu_to_le32(newdma); skb_put(skb, pkt_len); - pci_unmap_single(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE); + dma_unmap_single(vp->gendev, dma, PKT_BUF_SZ, DMA_FROM_DEVICE); vp->rx_nocopy++; } skb->protocol = eth_type_trans(skb, dev); @@ -2761,8 +2760,8 @@ vortex_close(struct net_device *dev) if (vp->full_bus_master_rx) { /* Free Boomerang bus master Rx buffers. */ for (i = 0; i < RX_RING_SIZE; i++) if (vp->rx_skbuff[i]) { - pci_unmap_single( VORTEX_PCI(vp), le32_to_cpu(vp->rx_ring[i].addr), - PKT_BUF_SZ, PCI_DMA_FROMDEVICE); + dma_unmap_single(vp->gendev, le32_to_cpu(vp->rx_ring[i].addr), + PKT_BUF_SZ, DMA_FROM_DEVICE); dev_kfree_skb(vp->rx_skbuff[i]); vp->rx_skbuff[i] = NULL; } @@ -2775,12 +2774,12 @@ vortex_close(struct net_device *dev) int k; for (k=0; k<=skb_shinfo(skb)->nr_frags; k++) - pci_unmap_single(VORTEX_PCI(vp), + dma_unmap_single(vp->gendev, le32_to_cpu(vp->tx_ring[i].frag[k].addr), le32_to_cpu(vp->tx_ring[i].frag[k].length)&0xFFF, - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); #else - pci_unmap_single(VORTEX_PCI(vp), le32_to_cpu(vp->tx_ring[i].addr), skb->len, PCI_DMA_TODEVICE); + dma_unmap_single(vp->gendev, le32_to_cpu(vp->tx_ring[i].addr), skb->len, DMA_TO_DEVICE); #endif dev_kfree_skb(skb); vp->tx_skbuff[i] = NULL; @@ -3288,11 +3287,10 @@ static void vortex_remove_one(struct pci_dev *pdev) pci_iounmap(pdev, vp->ioaddr); - pci_free_consistent(pdev, - sizeof(struct boom_rx_desc) * RX_RING_SIZE - + sizeof(struct boom_tx_desc) * TX_RING_SIZE, - vp->rx_ring, - vp->rx_ring_dma); + dma_free_coherent(&pdev->dev, + sizeof(struct boom_rx_desc) * RX_RING_SIZE + + sizeof(struct boom_tx_desc) * TX_RING_SIZE, + vp->rx_ring, vp->rx_ring_dma); pci_release_regions(pdev); diff --git a/drivers/net/ethernet/8390/ne.c b/drivers/net/ethernet/8390/ne.c index ac99d089ac72..1c97e39b478e 100644 --- a/drivers/net/ethernet/8390/ne.c +++ b/drivers/net/ethernet/8390/ne.c @@ -164,7 +164,9 @@ bad_clone_list[] __initdata = { #define NESM_START_PG 0x40 /* First page of TX buffer */ #define NESM_STOP_PG 0x80 /* Last page +1 of RX ring */ -#if defined(CONFIG_ATARI) /* 8-bit mode on Atari, normal on Q40 */ +#if defined(CONFIG_MACH_TX49XX) +# define DCR_VAL 0x48 /* 8-bit mode */ +#elif defined(CONFIG_ATARI) /* 8-bit mode on Atari, normal on Q40 */ # define DCR_VAL (MACH_IS_ATARI ? 0x48 : 0x49) #else # define DCR_VAL 0x49 diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c index a561705f232c..be198cc0b10c 100644 --- a/drivers/net/ethernet/amd/pcnet32.c +++ b/drivers/net/ethernet/amd/pcnet32.c @@ -1552,22 +1552,26 @@ pcnet32_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent) if (!ioaddr) { if (pcnet32_debug & NETIF_MSG_PROBE) pr_err("card has no PCI IO resources, aborting\n"); - return -ENODEV; + err = -ENODEV; + goto err_disable_dev; } err = pci_set_dma_mask(pdev, PCNET32_DMA_MASK); if (err) { if (pcnet32_debug & NETIF_MSG_PROBE) pr_err("architecture does not support 32bit PCI busmaster DMA\n"); - return err; + goto err_disable_dev; } if (!request_region(ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_pci")) { if (pcnet32_debug & NETIF_MSG_PROBE) pr_err("io address range already allocated\n"); - return -EBUSY; + err = -EBUSY; + goto err_disable_dev; } err = pcnet32_probe1(ioaddr, 1, pdev); + +err_disable_dev: if (err < 0) pci_disable_device(pdev); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h index b57acb8dc35b..dc25066c59a1 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h @@ -419,15 +419,15 @@ static const u32 t6_up_cim_reg_array[][IREG_NUM_ELEM + 1] = { {0x7b50, 0x7b54, 0x280, 0x20, 0}, /* up_cim_280_to_2fc */ {0x7b50, 0x7b54, 0x300, 0x20, 0}, /* up_cim_300_to_37c */ {0x7b50, 0x7b54, 0x380, 0x14, 0}, /* up_cim_380_to_3cc */ - {0x7b50, 0x7b54, 0x2900, 0x4, 0x4}, /* up_cim_2900_to_3d40 */ - {0x7b50, 0x7b54, 0x2904, 0x4, 0x4}, /* up_cim_2904_to_3d44 */ - {0x7b50, 0x7b54, 0x2908, 0x4, 0x4}, /* up_cim_2908_to_3d48 */ - {0x7b50, 0x7b54, 0x2910, 0x4, 0x4}, /* up_cim_2910_to_3d4c */ - {0x7b50, 0x7b54, 0x2914, 0x4, 0x4}, /* up_cim_2914_to_3d50 */ - {0x7b50, 0x7b54, 0x2920, 0x10, 0x10}, /* up_cim_2920_to_2a10 */ - {0x7b50, 0x7b54, 0x2924, 0x10, 0x10}, /* up_cim_2924_to_2a14 */ - {0x7b50, 0x7b54, 0x2928, 0x10, 0x10}, /* up_cim_2928_to_2a18 */ - {0x7b50, 0x7b54, 0x292c, 0x10, 0x10}, /* up_cim_292c_to_2a1c */ + {0x7b50, 0x7b54, 0x4900, 0x4, 0x4}, /* up_cim_4900_to_4c60 */ + {0x7b50, 0x7b54, 0x4904, 0x4, 0x4}, /* up_cim_4904_to_4c64 */ + {0x7b50, 0x7b54, 0x4908, 0x4, 0x4}, /* up_cim_4908_to_4c68 */ + {0x7b50, 0x7b54, 0x4910, 0x4, 0x4}, /* up_cim_4910_to_4c70 */ + {0x7b50, 0x7b54, 0x4914, 0x4, 0x4}, /* up_cim_4914_to_4c74 */ + {0x7b50, 0x7b54, 0x4920, 0x10, 0x10}, /* up_cim_4920_to_4a10 */ + {0x7b50, 0x7b54, 0x4924, 0x10, 0x10}, /* up_cim_4924_to_4a14 */ + {0x7b50, 0x7b54, 0x4928, 0x10, 0x10}, /* up_cim_4928_to_4a18 */ + {0x7b50, 0x7b54, 0x492c, 0x10, 0x10}, /* up_cim_492c_to_4a1c */ }; static const u32 t5_up_cim_reg_array[][IREG_NUM_ELEM + 1] = { @@ -444,16 +444,6 @@ static const u32 t5_up_cim_reg_array[][IREG_NUM_ELEM + 1] = { {0x7b50, 0x7b54, 0x280, 0x20, 0}, /* up_cim_280_to_2fc */ {0x7b50, 0x7b54, 0x300, 0x20, 0}, /* up_cim_300_to_37c */ {0x7b50, 0x7b54, 0x380, 0x14, 0}, /* up_cim_380_to_3cc */ - {0x7b50, 0x7b54, 0x2900, 0x4, 0x4}, /* up_cim_2900_to_3d40 */ - {0x7b50, 0x7b54, 0x2904, 0x4, 0x4}, /* up_cim_2904_to_3d44 */ - {0x7b50, 0x7b54, 0x2908, 0x4, 0x4}, /* up_cim_2908_to_3d48 */ - {0x7b50, 0x7b54, 0x2910, 0x4, 0x4}, /* up_cim_2910_to_3d4c */ - {0x7b50, 0x7b54, 0x2914, 0x4, 0x4}, /* up_cim_2914_to_3d50 */ - {0x7b50, 0x7b54, 0x2918, 0x4, 0x4}, /* up_cim_2918_to_3d54 */ - {0x7b50, 0x7b54, 0x291c, 0x4, 0x4}, /* up_cim_291c_to_3d58 */ - {0x7b50, 0x7b54, 0x2924, 0x10, 0x10}, /* up_cim_2924_to_2914 */ - {0x7b50, 0x7b54, 0x2928, 0x10, 0x10}, /* up_cim_2928_to_2a18 */ - {0x7b50, 0x7b54, 0x292c, 0x10, 0x10}, /* up_cim_292c_to_2a1c */ }; static const u32 t6_hma_ireg_array[][IREG_NUM_ELEM] = { diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c index db92f1858060..b76447baccaf 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c @@ -836,7 +836,7 @@ bool is_filter_exact_match(struct adapter *adap, { struct tp_params *tp = &adap->params.tp; u64 hash_filter_mask = tp->hash_filter_mask; - u32 mask; + u64 ntuple_mask = 0; if (!is_hashfilter(adap)) return false; @@ -865,73 +865,45 @@ bool is_filter_exact_match(struct adapter *adap, if (!fs->val.fport || fs->mask.fport != 0xffff) return false; - if (tp->fcoe_shift >= 0) { - mask = (hash_filter_mask >> tp->fcoe_shift) & FT_FCOE_W; - if (mask && !fs->mask.fcoe) - return false; - } + /* calculate tuple mask and compare with mask configured in hw */ + if (tp->fcoe_shift >= 0) + ntuple_mask |= (u64)fs->mask.fcoe << tp->fcoe_shift; - if (tp->port_shift >= 0) { - mask = (hash_filter_mask >> tp->port_shift) & FT_PORT_W; - if (mask && !fs->mask.iport) - return false; - } + if (tp->port_shift >= 0) + ntuple_mask |= (u64)fs->mask.iport << tp->port_shift; if (tp->vnic_shift >= 0) { - mask = (hash_filter_mask >> tp->vnic_shift) & FT_VNIC_ID_W; - - if ((adap->params.tp.ingress_config & VNIC_F)) { - if (mask && !fs->mask.pfvf_vld) - return false; - } else { - if (mask && !fs->mask.ovlan_vld) - return false; - } + if ((adap->params.tp.ingress_config & VNIC_F)) + ntuple_mask |= (u64)fs->mask.pfvf_vld << tp->vnic_shift; + else + ntuple_mask |= (u64)fs->mask.ovlan_vld << + tp->vnic_shift; } - if (tp->vlan_shift >= 0) { - mask = (hash_filter_mask >> tp->vlan_shift) & FT_VLAN_W; - if (mask && !fs->mask.ivlan) - return false; - } + if (tp->vlan_shift >= 0) + ntuple_mask |= (u64)fs->mask.ivlan << tp->vlan_shift; - if (tp->tos_shift >= 0) { - mask = (hash_filter_mask >> tp->tos_shift) & FT_TOS_W; - if (mask && !fs->mask.tos) - return false; - } + if (tp->tos_shift >= 0) + ntuple_mask |= (u64)fs->mask.tos << tp->tos_shift; - if (tp->protocol_shift >= 0) { - mask = (hash_filter_mask >> tp->protocol_shift) & FT_PROTOCOL_W; - if (mask && !fs->mask.proto) - return false; - } + if (tp->protocol_shift >= 0) + ntuple_mask |= (u64)fs->mask.proto << tp->protocol_shift; - if (tp->ethertype_shift >= 0) { - mask = (hash_filter_mask >> tp->ethertype_shift) & - FT_ETHERTYPE_W; - if (mask && !fs->mask.ethtype) - return false; - } + if (tp->ethertype_shift >= 0) + ntuple_mask |= (u64)fs->mask.ethtype << tp->ethertype_shift; - if (tp->macmatch_shift >= 0) { - mask = (hash_filter_mask >> tp->macmatch_shift) & FT_MACMATCH_W; - if (mask && !fs->mask.macidx) - return false; - } + if (tp->macmatch_shift >= 0) + ntuple_mask |= (u64)fs->mask.macidx << tp->macmatch_shift; + + if (tp->matchtype_shift >= 0) + ntuple_mask |= (u64)fs->mask.matchtype << tp->matchtype_shift; + + if (tp->frag_shift >= 0) + ntuple_mask |= (u64)fs->mask.frag << tp->frag_shift; + + if (ntuple_mask != hash_filter_mask) + return false; - if (tp->matchtype_shift >= 0) { - mask = (hash_filter_mask >> tp->matchtype_shift) & - FT_MPSHITTYPE_W; - if (mask && !fs->mask.matchtype) - return false; - } - if (tp->frag_shift >= 0) { - mask = (hash_filter_mask >> tp->frag_shift) & - FT_FRAGMENTATION_W; - if (mask && !fs->mask.frag) - return false; - } return true; } diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index 81684acf52af..8a8b12b720ef 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -2747,11 +2747,11 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) pci_set_master(pdev); /* Query PCI controller on system for DMA addressing - * limitation for the device. Try 64-bit first, and + * limitation for the device. Try 47-bit first, and * fail to 32-bit. */ - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(47)); if (err) { err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (err) { @@ -2765,10 +2765,10 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_out_release_regions; } } else { - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(47)); if (err) { dev_err(dev, "Unable to obtain %u-bit DMA " - "for consistent allocations, aborting\n", 64); + "for consistent allocations, aborting\n", 47); goto err_out_release_regions; } using_dac = 1; diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index c697e79e491e..8f755009ff38 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -3309,7 +3309,9 @@ void be_detect_error(struct be_adapter *adapter) if ((val & POST_STAGE_FAT_LOG_START) != POST_STAGE_FAT_LOG_START && (val & POST_STAGE_ARMFW_UE) - != POST_STAGE_ARMFW_UE) + != POST_STAGE_ARMFW_UE && + (val & POST_STAGE_RECOVERABLE_ERR) + != POST_STAGE_RECOVERABLE_ERR) return; } diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index d4604bc8eb5b..9d3eed46830d 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * Fast Ethernet Controller (FEC) driver for Motorola MPC8xx. * Copyright (c) 1997 Dan Malek (dmalek@jlc.net) diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c index f81439796ac7..43d973215040 100644 --- a/drivers/net/ethernet/freescale/fec_ptp.c +++ b/drivers/net/ethernet/freescale/fec_ptp.c @@ -1,20 +1,8 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Fast Ethernet Controller (ENET) PTP driver for MX6x. * * Copyright (C) 2012 Freescale Semiconductor, Inc. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 6e8d6a6f6aaf..5ec1185808e5 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -192,6 +192,7 @@ static int alloc_long_term_buff(struct ibmvnic_adapter *adapter, if (adapter->fw_done_rc) { dev_err(dev, "Couldn't map long term buffer,rc = %d\n", adapter->fw_done_rc); + dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr); return -1; } return 0; @@ -795,9 +796,11 @@ static int ibmvnic_login(struct net_device *netdev) struct ibmvnic_adapter *adapter = netdev_priv(netdev); unsigned long timeout = msecs_to_jiffies(30000); int retry_count = 0; + bool retry; int rc; do { + retry = false; if (retry_count > IBMVNIC_MAX_QUEUES) { netdev_warn(netdev, "Login attempts exceeded\n"); return -1; @@ -821,6 +824,9 @@ static int ibmvnic_login(struct net_device *netdev) retry_count++; release_sub_crqs(adapter, 1); + retry = true; + netdev_dbg(netdev, + "Received partial success, retrying...\n"); adapter->init_done_rc = 0; reinit_completion(&adapter->init_done); send_cap_queries(adapter); @@ -848,7 +854,7 @@ static int ibmvnic_login(struct net_device *netdev) netdev_warn(netdev, "Adapter login failed\n"); return -1; } - } while (adapter->init_done_rc == PARTIALSUCCESS); + } while (retry); /* handle pending MAC address changes after successful login */ if (adapter->mac_change_pending) { @@ -1821,9 +1827,8 @@ static int do_reset(struct ibmvnic_adapter *adapter, if (rc) return rc; } + ibmvnic_disable_irqs(adapter); } - - ibmvnic_disable_irqs(adapter); adapter->state = VNIC_CLOSED; if (reset_state == VNIC_CLOSED) @@ -2617,18 +2622,21 @@ static int enable_scrq_irq(struct ibmvnic_adapter *adapter, { struct device *dev = &adapter->vdev->dev; unsigned long rc; - u64 val; if (scrq->hw_irq > 0x100000000ULL) { dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq); return 1; } - val = (0xff000000) | scrq->hw_irq; - rc = plpar_hcall_norets(H_EOI, val); - if (rc) - dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n", - val, rc); + if (adapter->resetting && + adapter->reset_reason == VNIC_RESET_MOBILITY) { + u64 val = (0xff000000) | scrq->hw_irq; + + rc = plpar_hcall_norets(H_EOI, val); + if (rc) + dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n", + val, rc); + } rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address, H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0); @@ -4586,14 +4594,6 @@ static int ibmvnic_init(struct ibmvnic_adapter *adapter) release_crq_queue(adapter); } - rc = init_stats_buffers(adapter); - if (rc) - return rc; - - rc = init_stats_token(adapter); - if (rc) - return rc; - return rc; } @@ -4662,13 +4662,21 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) goto ibmvnic_init_fail; } while (rc == EAGAIN); + rc = init_stats_buffers(adapter); + if (rc) + goto ibmvnic_init_fail; + + rc = init_stats_token(adapter); + if (rc) + goto ibmvnic_stats_fail; + netdev->mtu = adapter->req_mtu - ETH_HLEN; netdev->min_mtu = adapter->min_mtu - ETH_HLEN; netdev->max_mtu = adapter->max_mtu - ETH_HLEN; rc = device_create_file(&dev->dev, &dev_attr_failover); if (rc) - goto ibmvnic_init_fail; + goto ibmvnic_dev_file_err; netif_carrier_off(netdev); rc = register_netdev(netdev); @@ -4687,6 +4695,12 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) ibmvnic_register_fail: device_remove_file(&dev->dev, &dev_attr_failover); +ibmvnic_dev_file_err: + release_stats_token(adapter); + +ibmvnic_stats_fail: + release_stats_buffers(adapter); + ibmvnic_init_fail: release_sub_crqs(adapter, 1); release_crq_queue(adapter); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index afadba99f7b8..2ecd55856c50 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -9054,7 +9054,6 @@ static int parse_tc_actions(struct ixgbe_adapter *adapter, { const struct tc_action *a; LIST_HEAD(actions); - int err; if (!tcf_exts_has_actions(exts)) return -EINVAL; @@ -9075,11 +9074,11 @@ static int parse_tc_actions(struct ixgbe_adapter *adapter, if (!dev) return -EINVAL; - err = handle_redirect_action(adapter, dev->ifindex, queue, - action); - if (err == 0) - return err; + return handle_redirect_action(adapter, dev->ifindex, + queue, action); } + + return -EINVAL; } return -EINVAL; diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.c b/drivers/net/ethernet/mellanox/mlx4/icm.c index a822f7a56bc5..685337d58276 100644 --- a/drivers/net/ethernet/mellanox/mlx4/icm.c +++ b/drivers/net/ethernet/mellanox/mlx4/icm.c @@ -43,12 +43,12 @@ #include "fw.h" /* - * We allocate in as big chunks as we can, up to a maximum of 256 KB - * per chunk. + * We allocate in page size (default 4KB on many archs) chunks to avoid high + * order memory allocations in fragmented/high usage memory situation. */ enum { - MLX4_ICM_ALLOC_SIZE = 1 << 18, - MLX4_TABLE_CHUNK_SIZE = 1 << 18 + MLX4_ICM_ALLOC_SIZE = PAGE_SIZE, + MLX4_TABLE_CHUNK_SIZE = PAGE_SIZE, }; static void mlx4_free_icm_pages(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk) @@ -398,9 +398,11 @@ int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table, u64 size; obj_per_chunk = MLX4_TABLE_CHUNK_SIZE / obj_size; + if (WARN_ON(!obj_per_chunk)) + return -EINVAL; num_icm = (nobj + obj_per_chunk - 1) / obj_per_chunk; - table->icm = kcalloc(num_icm, sizeof(*table->icm), GFP_KERNEL); + table->icm = kvzalloc(num_icm * sizeof(*table->icm), GFP_KERNEL); if (!table->icm) return -ENOMEM; table->virt = virt; @@ -446,7 +448,7 @@ err: mlx4_free_icm(dev, table->icm[i], use_coherent); } - kfree(table->icm); + kvfree(table->icm); return -ENOMEM; } @@ -462,5 +464,5 @@ void mlx4_cleanup_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table) mlx4_free_icm(dev, table->icm[i], table->coherent); } - kfree(table->icm); + kvfree(table->icm); } diff --git a/drivers/net/ethernet/mellanox/mlx4/intf.c b/drivers/net/ethernet/mellanox/mlx4/intf.c index 2edcce98ab2d..65482f004e50 100644 --- a/drivers/net/ethernet/mellanox/mlx4/intf.c +++ b/drivers/net/ethernet/mellanox/mlx4/intf.c @@ -172,7 +172,7 @@ int mlx4_do_bond(struct mlx4_dev *dev, bool enable) list_add_tail(&dev_ctx->list, &priv->ctx_list); spin_unlock_irqrestore(&priv->ctx_lock, flags); - mlx4_dbg(dev, "Inrerface for protocol %d restarted with when bonded mode is %s\n", + mlx4_dbg(dev, "Interface for protocol %d restarted with bonded mode %s\n", dev_ctx->intf->protocol, enable ? "enabled" : "disabled"); } diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 211578ffc70d..60172a38c4a4 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -2929,6 +2929,7 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port) mlx4_err(dev, "Failed to create file for port %d\n", port); devlink_port_unregister(&info->devlink_port); info->port = -1; + return err; } sprintf(info->dev_mtu_name, "mlx4_port%d_mtu", port); @@ -2950,9 +2951,10 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port) &info->port_attr); devlink_port_unregister(&info->devlink_port); info->port = -1; + return err; } - return err; + return 0; } static void mlx4_cleanup_port_info(struct mlx4_port_info *info) diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c index 3aaf4bad6c5a..427e7a31862c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/qp.c +++ b/drivers/net/ethernet/mellanox/mlx4/qp.c @@ -393,11 +393,11 @@ struct mlx4_qp *mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn) struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; struct mlx4_qp *qp; - spin_lock(&qp_table->lock); + spin_lock_irq(&qp_table->lock); qp = __mlx4_qp_lookup(dev, qpn); - spin_unlock(&qp_table->lock); + spin_unlock_irq(&qp_table->lock); return qp; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 176645762e49..1ff0b0e93804 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -615,6 +615,45 @@ static inline bool is_last_ethertype_ip(struct sk_buff *skb, int *network_depth) return (ethertype == htons(ETH_P_IP) || ethertype == htons(ETH_P_IPV6)); } +static __be32 mlx5e_get_fcs(struct sk_buff *skb) +{ + int last_frag_sz, bytes_in_prev, nr_frags; + u8 *fcs_p1, *fcs_p2; + skb_frag_t *last_frag; + __be32 fcs_bytes; + + if (!skb_is_nonlinear(skb)) + return *(__be32 *)(skb->data + skb->len - ETH_FCS_LEN); + + nr_frags = skb_shinfo(skb)->nr_frags; + last_frag = &skb_shinfo(skb)->frags[nr_frags - 1]; + last_frag_sz = skb_frag_size(last_frag); + + /* If all FCS data is in last frag */ + if (last_frag_sz >= ETH_FCS_LEN) + return *(__be32 *)(skb_frag_address(last_frag) + + last_frag_sz - ETH_FCS_LEN); + + fcs_p2 = (u8 *)skb_frag_address(last_frag); + bytes_in_prev = ETH_FCS_LEN - last_frag_sz; + + /* Find where the other part of the FCS is - Linear or another frag */ + if (nr_frags == 1) { + fcs_p1 = skb_tail_pointer(skb); + } else { + skb_frag_t *prev_frag = &skb_shinfo(skb)->frags[nr_frags - 2]; + + fcs_p1 = skb_frag_address(prev_frag) + + skb_frag_size(prev_frag); + } + fcs_p1 -= bytes_in_prev; + + memcpy(&fcs_bytes, fcs_p1, bytes_in_prev); + memcpy(((u8 *)&fcs_bytes) + bytes_in_prev, fcs_p2, last_frag_sz); + + return fcs_bytes; +} + static inline void mlx5e_handle_csum(struct net_device *netdev, struct mlx5_cqe64 *cqe, struct mlx5e_rq *rq, @@ -643,6 +682,9 @@ static inline void mlx5e_handle_csum(struct net_device *netdev, skb->csum = csum_partial(skb->data + ETH_HLEN, network_depth - ETH_HLEN, skb->csum); + if (unlikely(netdev->features & NETIF_F_RXFCS)) + skb->csum = csum_add(skb->csum, + (__force __wsum)mlx5e_get_fcs(skb)); rq->stats.csum_complete++; return; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c index 0f5da499a223..fad8c2e3804e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c @@ -237,19 +237,17 @@ static void *mlx5_fpga_ipsec_cmd_exec(struct mlx5_core_dev *mdev, context->buf.sg[0].data = &context->command; spin_lock_irqsave(&fdev->ipsec->pending_cmds_lock, flags); - list_add_tail(&context->list, &fdev->ipsec->pending_cmds); + res = mlx5_fpga_sbu_conn_sendmsg(fdev->ipsec->conn, &context->buf); + if (!res) + list_add_tail(&context->list, &fdev->ipsec->pending_cmds); spin_unlock_irqrestore(&fdev->ipsec->pending_cmds_lock, flags); - res = mlx5_fpga_sbu_conn_sendmsg(fdev->ipsec->conn, &context->buf); if (res) { - mlx5_fpga_warn(fdev, "Failure sending IPSec command: %d\n", - res); - spin_lock_irqsave(&fdev->ipsec->pending_cmds_lock, flags); - list_del(&context->list); - spin_unlock_irqrestore(&fdev->ipsec->pending_cmds_lock, flags); + mlx5_fpga_warn(fdev, "Failed to send IPSec command: %d\n", res); kfree(context); return ERR_PTR(res); } + /* Context will be freed by wait func after completion */ return context; } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index ca38a30fbe91..adc6ab2cf429 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -4433,6 +4433,11 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev, NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on an OVS port"); return -EINVAL; } + if (is_vlan_dev(upper_dev) && + vlan_dev_vlan_id(upper_dev) == 1) { + NL_SET_ERR_MSG_MOD(extack, "Creating a VLAN device with VID 1 is unsupported: VLAN 1 carries untagged traffic"); + return -EINVAL; + } break; case NETDEV_CHANGEUPPER: upper_dev = info->upper_dev; diff --git a/drivers/net/ethernet/natsemi/sonic.c b/drivers/net/ethernet/natsemi/sonic.c index 7ed08486ae23..c805dcbebd02 100644 --- a/drivers/net/ethernet/natsemi/sonic.c +++ b/drivers/net/ethernet/natsemi/sonic.c @@ -84,7 +84,7 @@ static int sonic_open(struct net_device *dev) for (i = 0; i < SONIC_NUM_RRS; i++) { dma_addr_t laddr = dma_map_single(lp->device, skb_put(lp->rx_skb[i], SONIC_RBSIZE), SONIC_RBSIZE, DMA_FROM_DEVICE); - if (!laddr) { + if (dma_mapping_error(lp->device, laddr)) { while(i > 0) { /* free any that were mapped successfully */ i--; dma_unmap_single(lp->device, lp->rx_laddr[i], SONIC_RBSIZE, DMA_FROM_DEVICE); diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c index 1dc424685f4e..35fb31f682af 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c @@ -335,7 +335,7 @@ static int nfp_bpf_parse_capabilities(struct nfp_app *app) return PTR_ERR(mem) == -ENOENT ? 0 : PTR_ERR(mem); start = mem; - while (mem - start + 8 < nfp_cpp_area_size(area)) { + while (mem - start + 8 <= nfp_cpp_area_size(area)) { u8 __iomem *value; u32 type, length; diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c index 00f41c145d4d..820b226d6ff8 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c @@ -77,7 +77,7 @@ #define ILT_CFG_REG(cli, reg) PSWRQ2_REG_ ## cli ## _ ## reg ## _RT_OFFSET /* ILT entry structure */ -#define ILT_ENTRY_PHY_ADDR_MASK 0x000FFFFFFFFFFFULL +#define ILT_ENTRY_PHY_ADDR_MASK (~0ULL >> 12) #define ILT_ENTRY_PHY_ADDR_SHIFT 0 #define ILT_ENTRY_VALID_MASK 0x1ULL #define ILT_ENTRY_VALID_SHIFT 52 diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c index 38502815d681..468c59d2e491 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c @@ -292,6 +292,7 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle) struct qed_ll2_tx_packet *p_pkt = NULL; struct qed_ll2_info *p_ll2_conn; struct qed_ll2_tx_queue *p_tx; + unsigned long flags = 0; dma_addr_t tx_frag; p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle); @@ -300,6 +301,7 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle) p_tx = &p_ll2_conn->tx_queue; + spin_lock_irqsave(&p_tx->lock, flags); while (!list_empty(&p_tx->active_descq)) { p_pkt = list_first_entry(&p_tx->active_descq, struct qed_ll2_tx_packet, list_entry); @@ -309,6 +311,7 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle) list_del(&p_pkt->list_entry); b_last_packet = list_empty(&p_tx->active_descq); list_add_tail(&p_pkt->list_entry, &p_tx->free_descq); + spin_unlock_irqrestore(&p_tx->lock, flags); if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO) { struct qed_ooo_buffer *p_buffer; @@ -328,7 +331,9 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle) b_last_frag, b_last_packet); } + spin_lock_irqsave(&p_tx->lock, flags); } + spin_unlock_irqrestore(&p_tx->lock, flags); } static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie) @@ -556,6 +561,7 @@ static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle) struct qed_ll2_info *p_ll2_conn = NULL; struct qed_ll2_rx_packet *p_pkt = NULL; struct qed_ll2_rx_queue *p_rx; + unsigned long flags = 0; p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle); if (!p_ll2_conn) @@ -563,13 +569,14 @@ static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle) p_rx = &p_ll2_conn->rx_queue; + spin_lock_irqsave(&p_rx->lock, flags); while (!list_empty(&p_rx->active_descq)) { p_pkt = list_first_entry(&p_rx->active_descq, struct qed_ll2_rx_packet, list_entry); if (!p_pkt) break; - list_move_tail(&p_pkt->list_entry, &p_rx->free_descq); + spin_unlock_irqrestore(&p_rx->lock, flags); if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO) { struct qed_ooo_buffer *p_buffer; @@ -588,7 +595,30 @@ static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle) cookie, rx_buf_addr, b_last); } + spin_lock_irqsave(&p_rx->lock, flags); } + spin_unlock_irqrestore(&p_rx->lock, flags); +} + +static bool +qed_ll2_lb_rxq_handler_slowpath(struct qed_hwfn *p_hwfn, + struct core_rx_slow_path_cqe *p_cqe) +{ + struct ooo_opaque *iscsi_ooo; + u32 cid; + + if (p_cqe->ramrod_cmd_id != CORE_RAMROD_RX_QUEUE_FLUSH) + return false; + + iscsi_ooo = (struct ooo_opaque *)&p_cqe->opaque_data; + if (iscsi_ooo->ooo_opcode != TCP_EVENT_DELETE_ISLES) + return false; + + /* Need to make a flush */ + cid = le32_to_cpu(iscsi_ooo->cid); + qed_ooo_release_connection_isles(p_hwfn, p_hwfn->p_ooo_info, cid); + + return true; } static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn, @@ -617,6 +647,11 @@ static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn, cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain); cqe_type = cqe->rx_cqe_sp.type; + if (cqe_type == CORE_RX_CQE_TYPE_SLOW_PATH) + if (qed_ll2_lb_rxq_handler_slowpath(p_hwfn, + &cqe->rx_cqe_sp)) + continue; + if (cqe_type != CORE_RX_CQE_TYPE_REGULAR) { DP_NOTICE(p_hwfn, "Got a non-regular LB LL2 completion [type 0x%02x]\n", @@ -794,6 +829,9 @@ static int qed_ll2_lb_rxq_completion(struct qed_hwfn *p_hwfn, void *p_cookie) struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie; int rc; + if (!QED_LL2_RX_REGISTERED(p_ll2_conn)) + return 0; + rc = qed_ll2_lb_rxq_handler(p_hwfn, p_ll2_conn); if (rc) return rc; @@ -814,6 +852,9 @@ static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie) u16 new_idx = 0, num_bds = 0; int rc; + if (!QED_LL2_TX_REGISTERED(p_ll2_conn)) + return 0; + new_idx = le16_to_cpu(*p_tx->p_fw_cons); num_bds = ((s16)new_idx - (s16)p_tx->bds_idx); @@ -1867,17 +1908,25 @@ int qed_ll2_terminate_connection(void *cxt, u8 connection_handle) /* Stop Tx & Rx of connection, if needed */ if (QED_LL2_TX_REGISTERED(p_ll2_conn)) { + p_ll2_conn->tx_queue.b_cb_registred = false; + smp_wmb(); /* Make sure this is seen by ll2_lb_rxq_completion */ rc = qed_sp_ll2_tx_queue_stop(p_hwfn, p_ll2_conn); if (rc) goto out; + qed_ll2_txq_flush(p_hwfn, connection_handle); + qed_int_unregister_cb(p_hwfn, p_ll2_conn->tx_queue.tx_sb_index); } if (QED_LL2_RX_REGISTERED(p_ll2_conn)) { + p_ll2_conn->rx_queue.b_cb_registred = false; + smp_wmb(); /* Make sure this is seen by ll2_lb_rxq_completion */ rc = qed_sp_ll2_rx_queue_stop(p_hwfn, p_ll2_conn); if (rc) goto out; + qed_ll2_rxq_flush(p_hwfn, connection_handle); + qed_int_unregister_cb(p_hwfn, p_ll2_conn->rx_queue.rx_sb_index); } if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO) @@ -1925,16 +1974,6 @@ void qed_ll2_release_connection(void *cxt, u8 connection_handle) if (!p_ll2_conn) return; - if (QED_LL2_RX_REGISTERED(p_ll2_conn)) { - p_ll2_conn->rx_queue.b_cb_registred = false; - qed_int_unregister_cb(p_hwfn, p_ll2_conn->rx_queue.rx_sb_index); - } - - if (QED_LL2_TX_REGISTERED(p_ll2_conn)) { - p_ll2_conn->tx_queue.b_cb_registred = false; - qed_int_unregister_cb(p_hwfn, p_ll2_conn->tx_queue.tx_sb_index); - } - kfree(p_ll2_conn->tx_queue.descq_mem); qed_chain_free(p_hwfn->cdev, &p_ll2_conn->tx_queue.txq_chain); diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index a01e7d6e5442..f6655e251bbd 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -1066,13 +1066,12 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode) DP_INFO(edev, "Starting qede_remove\n"); + qede_rdma_dev_remove(edev); unregister_netdev(ndev); cancel_delayed_work_sync(&edev->sp_task); qede_ptp_disable(edev); - qede_rdma_dev_remove(edev); - edev->ops->common->set_power_state(cdev, PCI_D0); pci_set_drvdata(pdev, NULL); diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h index a5b792ce2ae7..1bf930d4a1e5 100644 --- a/drivers/net/ethernet/renesas/sh_eth.h +++ b/drivers/net/ethernet/renesas/sh_eth.h @@ -163,7 +163,7 @@ enum { }; /* Driver's parameters */ -#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE) +#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_RENESAS) #define SH_ETH_RX_ALIGN 32 #else #define SH_ETH_RX_ALIGN 2 diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c index f4c0b02ddad8..59fbf74dcada 100644 --- a/drivers/net/ethernet/socionext/netsec.c +++ b/drivers/net/ethernet/socionext/netsec.c @@ -1674,8 +1674,8 @@ static int netsec_probe(struct platform_device *pdev) if (ret) goto unreg_napi; - if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) - dev_warn(&pdev->dev, "Failed to enable 64-bit DMA\n"); + if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40))) + dev_warn(&pdev->dev, "Failed to set DMA mask\n"); ret = register_netdev(ndev); if (ret) { diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c index abceea802ea1..38828ab77eb9 100644 --- a/drivers/net/ethernet/ti/davinci_emac.c +++ b/drivers/net/ethernet/ti/davinci_emac.c @@ -1873,7 +1873,7 @@ static int davinci_emac_probe(struct platform_device *pdev) if (IS_ERR(priv->txchan)) { dev_err(&pdev->dev, "error initializing tx dma channel\n"); rc = PTR_ERR(priv->txchan); - goto no_cpdma_chan; + goto err_free_dma; } priv->rxchan = cpdma_chan_create(priv->dma, EMAC_DEF_RX_CH, @@ -1881,14 +1881,14 @@ static int davinci_emac_probe(struct platform_device *pdev) if (IS_ERR(priv->rxchan)) { dev_err(&pdev->dev, "error initializing rx dma channel\n"); rc = PTR_ERR(priv->rxchan); - goto no_cpdma_chan; + goto err_free_txchan; } res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!res) { dev_err(&pdev->dev, "error getting irq res\n"); rc = -ENOENT; - goto no_cpdma_chan; + goto err_free_rxchan; } ndev->irq = res->start; @@ -1914,7 +1914,7 @@ static int davinci_emac_probe(struct platform_device *pdev) pm_runtime_put_noidle(&pdev->dev); dev_err(&pdev->dev, "%s: failed to get_sync(%d)\n", __func__, rc); - goto no_cpdma_chan; + goto err_napi_del; } /* register the network device */ @@ -1924,7 +1924,7 @@ static int davinci_emac_probe(struct platform_device *pdev) dev_err(&pdev->dev, "error in register_netdev\n"); rc = -ENODEV; pm_runtime_put(&pdev->dev); - goto no_cpdma_chan; + goto err_napi_del; } @@ -1937,11 +1937,13 @@ static int davinci_emac_probe(struct platform_device *pdev) return 0; -no_cpdma_chan: - if (priv->txchan) - cpdma_chan_destroy(priv->txchan); - if (priv->rxchan) - cpdma_chan_destroy(priv->rxchan); +err_napi_del: + netif_napi_del(&priv->napi); +err_free_rxchan: + cpdma_chan_destroy(priv->rxchan); +err_free_txchan: + cpdma_chan_destroy(priv->txchan); +err_free_dma: cpdma_ctlr_destroy(priv->dma); no_pdata: if (of_phy_is_fixed_link(np)) |