diff options
Diffstat (limited to 'drivers/net/vxge/vxge-main.c')
-rw-r--r-- | drivers/net/vxge/vxge-main.c | 173 |
1 files changed, 98 insertions, 75 deletions
diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c index 6034497536a4..b378037a29b5 100644 --- a/drivers/net/vxge/vxge-main.c +++ b/drivers/net/vxge/vxge-main.c @@ -87,22 +87,25 @@ static inline int is_vxge_card_up(struct vxgedev *vdev) static inline void VXGE_COMPLETE_VPATH_TX(struct vxge_fifo *fifo) { unsigned long flags = 0; - struct sk_buff *skb_ptr = NULL; - struct sk_buff **temp, *head, *skb; - - if (spin_trylock_irqsave(&fifo->tx_lock, flags)) { - vxge_hw_vpath_poll_tx(fifo->handle, (void **)&skb_ptr); - spin_unlock_irqrestore(&fifo->tx_lock, flags); - } - /* free SKBs */ - head = skb_ptr; - while (head) { - skb = head; - temp = (struct sk_buff **)&skb->cb; - head = *temp; - *temp = NULL; - dev_kfree_skb_irq(skb); - } + struct sk_buff **skb_ptr = NULL; + struct sk_buff **temp; +#define NR_SKB_COMPLETED 128 + struct sk_buff *completed[NR_SKB_COMPLETED]; + int more; + + do { + more = 0; + skb_ptr = completed; + + if (spin_trylock_irqsave(&fifo->tx_lock, flags)) { + vxge_hw_vpath_poll_tx(fifo->handle, &skb_ptr, + NR_SKB_COMPLETED, &more); + spin_unlock_irqrestore(&fifo->tx_lock, flags); + } + /* free SKBs */ + for (temp = completed; temp != skb_ptr; temp++) + dev_kfree_skb_irq(*temp); + } while (more) ; } static inline void VXGE_COMPLETE_ALL_TX(struct vxgedev *vdev) @@ -283,6 +286,7 @@ vxge_rx_alloc(void *dtrh, struct vxge_ring *ring, const int skb_size) skb_reserve(skb, VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN); rx_priv->skb = skb; + rx_priv->skb_data = NULL; rx_priv->data_size = skb_size; vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__); @@ -302,7 +306,8 @@ static int vxge_rx_map(void *dtrh, struct vxge_ring *ring) ring->ndev->name, __func__, __LINE__); rx_priv = vxge_hw_ring_rxd_private_get(dtrh); - dma_addr = pci_map_single(ring->pdev, rx_priv->skb->data, + rx_priv->skb_data = rx_priv->skb->data; + dma_addr = pci_map_single(ring->pdev, rx_priv->skb_data, rx_priv->data_size, PCI_DMA_FROMDEVICE); if (dma_addr == 0) { @@ -374,10 +379,10 @@ vxge_rx_complete(struct vxge_ring *ring, struct sk_buff *skb, u16 vlan, if (ring->vlgrp && ext_info->vlan && (ring->vlan_tag_strip == VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE)) - vlan_gro_receive(&ring->napi, ring->vlgrp, + vlan_gro_receive(ring->napi_p, ring->vlgrp, ext_info->vlan, skb); else - napi_gro_receive(&ring->napi, skb); + napi_gro_receive(ring->napi_p, skb); } else { if (ring->vlgrp && vlan && (ring->vlan_tag_strip == @@ -442,10 +447,12 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr, vxge_hw_ring_replenish(ringh, 0); do { + prefetch((char *)dtr + L1_CACHE_BYTES); rx_priv = vxge_hw_ring_rxd_private_get(dtr); skb = rx_priv->skb; data_size = rx_priv->data_size; data_dma = rx_priv->data_dma; + prefetch(rx_priv->skb_data); vxge_debug_rx(VXGE_TRACE, "%s: %s:%d skb = 0x%p", @@ -454,6 +461,8 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr, vxge_hw_ring_rxd_1b_get(ringh, dtr, &dma_sizes); pkt_length = dma_sizes; + pkt_length -= ETH_FCS_LEN; + vxge_debug_rx(VXGE_TRACE, "%s: %s:%d Packet Length = %d", ring->ndev->name, __func__, __LINE__, pkt_length); @@ -579,8 +588,6 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr, if (first_dtr) vxge_hw_ring_rxd_post_post_wmb(ringh, first_dtr); - dev->last_rx = jiffies; - vxge_debug_entryexit(VXGE_TRACE, "%s:%d Exiting...", __func__, __LINE__); @@ -598,11 +605,10 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr, enum vxge_hw_status vxge_xmit_compl(struct __vxge_hw_fifo *fifo_hw, void *dtr, enum vxge_hw_fifo_tcode t_code, void *userdata, - void **skb_ptr) + struct sk_buff ***skb_ptr, int nr_skb, int *more) { struct vxge_fifo *fifo = (struct vxge_fifo *)userdata; - struct sk_buff *skb, *head = NULL; - struct sk_buff **temp; + struct sk_buff *skb, **done_skb = *skb_ptr; int pkt_cnt = 0; vxge_debug_entryexit(VXGE_TRACE, @@ -655,9 +661,12 @@ vxge_xmit_compl(struct __vxge_hw_fifo *fifo_hw, void *dtr, fifo->stats.tx_frms++; fifo->stats.tx_bytes += skb->len; - temp = (struct sk_buff **)&skb->cb; - *temp = head; - head = skb; + *done_skb++ = skb; + + if (--nr_skb <= 0) { + *more = 1; + break; + } pkt_cnt++; if (pkt_cnt > fifo->indicate_max_pkts) @@ -666,11 +675,9 @@ vxge_xmit_compl(struct __vxge_hw_fifo *fifo_hw, void *dtr, } while (vxge_hw_fifo_txdl_next_completed(fifo_hw, &dtr, &t_code) == VXGE_HW_OK); + *skb_ptr = done_skb; vxge_wake_tx_queue(fifo, skb); - if (skb_ptr) - *skb_ptr = (void *) head; - vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d Exiting...", fifo->ndev->name, __func__, __LINE__); @@ -803,7 +810,7 @@ static int vxge_learn_mac(struct vxgedev *vdev, u8 *mac_header) * NOTE: when device cant queue the pkt, just the trans_start variable will * not be upadted. */ -static int +static netdev_tx_t vxge_xmit(struct sk_buff *skb, struct net_device *dev) { struct vxge_fifo *fifo = NULL; @@ -817,7 +824,6 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev) u64 dma_pointer; struct vxge_tx_priv *txdl_priv = NULL; struct __vxge_hw_fifo *fifo_hw; - u32 max_mss = 0x0; int offload_type; unsigned long flags = 0; int vpath_no = 0; @@ -894,6 +900,12 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev) goto _exit2; } + /* Last TXD? Stop tx queue to avoid dropping packets. TX + * completion will resume the queue. + */ + if (avail == 1) + vxge_stop_tx_queue(fifo); + status = vxge_hw_fifo_txdl_reserve(fifo_hw, &dtr, &dtr_priv); if (unlikely(status != VXGE_HW_OK)) { vxge_debug_tx(VXGE_ERR, @@ -969,10 +981,6 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev) int mss = vxge_tcp_mss(skb); if (mss) { - max_mss = dev->mtu + ETH_HLEN - - VXGE_HW_TCPIP_HEADER_MAX_SIZE; - if (mss > max_mss) - mss = max_mss; vxge_debug_tx(VXGE_TRACE, "%s: %s:%d mss = %d", dev->name, __func__, __LINE__, mss); @@ -1000,7 +1008,7 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev) VXGE_COMPLETE_VPATH_TX(fifo); vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d Exiting...", dev->name, __func__, __LINE__); - return 0; + return NETDEV_TX_OK; _exit0: vxge_debug_tx(VXGE_TRACE, "%s: pci_map_page failed", dev->name); @@ -1024,7 +1032,7 @@ _exit2: spin_unlock_irqrestore(&fifo->tx_lock, flags); VXGE_COMPLETE_VPATH_TX(fifo); - return 0; + return NETDEV_TX_OK; } /* @@ -1049,6 +1057,7 @@ vxge_rx_term(void *dtrh, enum vxge_hw_rxd_state state, void *userdata) rx_priv->data_size, PCI_DMA_FROMDEVICE); dev_kfree_skb(rx_priv->skb); + rx_priv->skb_data = NULL; vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d Exiting...", @@ -2137,16 +2146,16 @@ int vxge_open_vpaths(struct vxgedev *vdev) */ static irqreturn_t vxge_isr_napi(int irq, void *dev_id) { - struct __vxge_hw_device *hldev = (struct __vxge_hw_device *)dev_id; - struct vxgedev *vdev; struct net_device *dev; + struct __vxge_hw_device *hldev; u64 reason; enum vxge_hw_status status; + struct vxgedev *vdev = (struct vxgedev *) dev_id;; vxge_debug_intr(VXGE_TRACE, "%s:%d", __func__, __LINE__); - dev = hldev->ndev; - vdev = netdev_priv(dev); + dev = vdev->ndev; + hldev = (struct __vxge_hw_device *)pci_get_drvdata(vdev->pdev); if (pci_channel_offline(vdev->pdev)) return IRQ_NONE; @@ -2417,15 +2426,13 @@ static void vxge_rem_isr(struct vxgedev *vdev) #endif if (vdev->config.intr_type == INTA) { synchronize_irq(vdev->pdev->irq); - free_irq(vdev->pdev->irq, hldev); + free_irq(vdev->pdev->irq, vdev); } } static int vxge_add_isr(struct vxgedev *vdev) { int ret = 0; - struct __vxge_hw_device *hldev = - (struct __vxge_hw_device *) pci_get_drvdata(vdev->pdev); #ifdef CONFIG_PCI_MSI int vp_idx = 0, intr_idx = 0, intr_cnt = 0, msix_idx = 0, irq_req = 0; u64 function_mode = vdev->config.device_hw_info.function_mode; @@ -2579,7 +2586,7 @@ INTA_MODE: if (vdev->config.intr_type == INTA) { ret = request_irq((int) vdev->pdev->irq, vxge_isr_napi, - IRQF_SHARED, vdev->desc[0], hldev); + IRQF_SHARED, vdev->desc[0], vdev); if (ret) { vxge_debug_init(VXGE_ERR, "%s %s-%d: ISR registration failed", @@ -2712,11 +2719,15 @@ vxge_open(struct net_device *dev) netif_napi_add(dev, &vdev->napi, vxge_poll_inta, vdev->config.napi_weight); napi_enable(&vdev->napi); + for (i = 0; i < vdev->no_of_vpath; i++) + vdev->vpaths[i].ring.napi_p = &vdev->napi; } else { for (i = 0; i < vdev->no_of_vpath; i++) { netif_napi_add(dev, &vdev->vpaths[i].ring.napi, vxge_poll_msix, vdev->config.napi_weight); napi_enable(&vdev->vpaths[i].ring.napi); + vdev->vpaths[i].ring.napi_p = + &vdev->vpaths[i].ring.napi; } } @@ -2890,6 +2901,9 @@ int do_vxge_close(struct net_device *dev, int do_io) vdev = (struct vxgedev *)netdev_priv(dev); hldev = (struct __vxge_hw_device *) pci_get_drvdata(vdev->pdev); + if (unlikely(!is_vxge_card_up(vdev))) + return 0; + /* If vxge_handle_crit_err task is executing, * wait till it completes. */ while (test_and_set_bit(__VXGE_STATE_RESET_CARD, &vdev->state)) @@ -3954,6 +3968,9 @@ static pci_ers_result_t vxge_io_error_detected(struct pci_dev *pdev, netif_device_detach(netdev); + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + if (netif_running(netdev)) { /* Bring down the card, while avoiding PCI I/O */ do_vxge_close(netdev, 0); @@ -4152,18 +4169,6 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) attr.bar0, (unsigned long long)pci_resource_start(pdev, 0)); - attr.bar1 = pci_ioremap_bar(pdev, 2); - if (!attr.bar1) { - vxge_debug_init(VXGE_ERR, - "%s : cannot remap io memory bar2", __func__); - ret = -ENODEV; - goto _exit3; - } - vxge_debug_ll_config(VXGE_TRACE, - "pci ioremap bar1: %p:0x%llx", - attr.bar1, - (unsigned long long)pci_resource_start(pdev, 2)); - status = vxge_hw_device_hw_info_get(attr.bar0, &ll_config.device_hw_info); if (status != VXGE_HW_OK) { @@ -4171,17 +4176,17 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) "%s: Reading of hardware info failed." "Please try upgrading the firmware.", VXGE_DRIVER_NAME); ret = -EINVAL; - goto _exit4; + goto _exit3; } if (ll_config.device_hw_info.fw_version.major != - VXGE_DRIVER_VERSION_MAJOR) { + VXGE_DRIVER_FW_VERSION_MAJOR) { vxge_debug_init(VXGE_ERR, - "FW Ver.(maj): %d not driver's expected version: %d", - ll_config.device_hw_info.fw_version.major, - VXGE_DRIVER_VERSION_MAJOR); + "%s: Incorrect firmware version." + "Please upgrade the firmware to version 1.x.x", + VXGE_DRIVER_NAME); ret = -EINVAL; - goto _exit4; + goto _exit3; } vpath_mask = ll_config.device_hw_info.vpath_mask; @@ -4189,7 +4194,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) vxge_debug_ll_config(VXGE_TRACE, "%s: No vpaths available in device", VXGE_DRIVER_NAME); ret = -EINVAL; - goto _exit4; + goto _exit3; } vxge_debug_ll_config(VXGE_TRACE, @@ -4222,7 +4227,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) vxge_debug_ll_config(VXGE_ERR, "%s: No more vpaths to configure", VXGE_DRIVER_NAME); ret = 0; - goto _exit4; + goto _exit3; } /* Setting driver callbacks */ @@ -4235,7 +4240,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) vxge_debug_init(VXGE_ERR, "Failed to initialize device (%d)", status); ret = -EINVAL; - goto _exit4; + goto _exit3; } vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_LL); @@ -4260,7 +4265,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) if (vxge_device_register(hldev, &ll_config, high_dma, no_of_vpath, &vdev)) { ret = -EINVAL; - goto _exit5; + goto _exit4; } vxge_hw_device_debug_set(hldev, VXGE_TRACE, VXGE_COMPONENT_LL); @@ -4271,7 +4276,6 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) hldev->ndev = vdev->ndev; vdev->mtu = VXGE_HW_DEFAULT_MTU; vdev->bar0 = attr.bar0; - vdev->bar1 = attr.bar1; vdev->max_vpath_supported = max_vpath_supported; vdev->no_of_vpath = no_of_vpath; @@ -4336,6 +4340,27 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) ll_config.device_hw_info.fw_version.version, ll_config.device_hw_info.fw_date.date); + if (new_device) { + switch (ll_config.device_hw_info.function_mode) { + case VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION: + vxge_debug_init(VXGE_TRACE, + "%s: Single Function Mode Enabled", vdev->ndev->name); + break; + case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION: + vxge_debug_init(VXGE_TRACE, + "%s: Multi Function Mode Enabled", vdev->ndev->name); + break; + case VXGE_HW_FUNCTION_MODE_SRIOV: + vxge_debug_init(VXGE_TRACE, + "%s: Single Root IOV Mode Enabled", vdev->ndev->name); + break; + case VXGE_HW_FUNCTION_MODE_MRIOV: + vxge_debug_init(VXGE_TRACE, + "%s: Multi Root IOV Mode Enabled", vdev->ndev->name); + break; + } + } + vxge_print_parm(vdev, vpath_mask); /* Store the fw version for ethttool option */ @@ -4353,7 +4378,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) "%s: mac_addr_list : memory allocation failed", vdev->ndev->name); ret = -EPERM; - goto _exit6; + goto _exit5; } macaddr = (u8 *)&entry->macaddr; memcpy(macaddr, vdev->ndev->dev_addr, ETH_ALEN); @@ -4361,6 +4386,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) vdev->vpaths[i].mac_addr_cnt = 1; } + kfree(device_config); vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d Exiting...", vdev->ndev->name, __func__, __LINE__); @@ -4370,16 +4396,14 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) return 0; -_exit6: +_exit5: for (i = 0; i < vdev->no_of_vpath; i++) vxge_free_mac_add_list(&vdev->vpaths[i]); vxge_device_unregister(hldev); -_exit5: +_exit4: pci_disable_sriov(pdev); vxge_hw_device_terminate(hldev); -_exit4: - iounmap(attr.bar1); _exit3: iounmap(attr.bar0); _exit2: @@ -4438,7 +4462,6 @@ vxge_remove(struct pci_dev *pdev) kfree(vdev->vpaths); iounmap(vdev->bar0); - iounmap(vdev->bar1); pci_disable_sriov(pdev); |