summaryrefslogtreecommitdiffstats
path: root/drivers/net/cxgb3/sge.c
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2009-09-08 23:32:24 +0200
committerDan Williams <dan.j.williams@intel.com>2009-09-08 23:32:24 +0200
commita348a7e6fdbcd2d5192a09719a479bb238fde727 (patch)
tree5ff94185f4e5a810777469d7fe7832a8ec2d3430 /drivers/net/cxgb3/sge.c
parentdmaengine: at_hdmac: add DMA slave transfers (diff)
parentLinux 2.6.31-rc1 (diff)
downloadlinux-a348a7e6fdbcd2d5192a09719a479bb238fde727.tar.xz
linux-a348a7e6fdbcd2d5192a09719a479bb238fde727.zip
Merge commit 'v2.6.31-rc1' into dmaengine
Diffstat (limited to 'drivers/net/cxgb3/sge.c')
-rw-r--r--drivers/net/cxgb3/sge.c71
1 files changed, 39 insertions, 32 deletions
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index b3ee2bc1a005..29c79eb43beb 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -653,7 +653,8 @@ static void t3_reset_qset(struct sge_qset *q)
q->txq_stopped = 0;
q->tx_reclaim_timer.function = NULL; /* for t3_stop_sge_timers() */
q->rx_reclaim_timer.function = NULL;
- q->lro_frag_tbl.nr_frags = q->lro_frag_tbl.len = 0;
+ q->nomem = 0;
+ napi_free_frags(&q->napi);
}
@@ -1239,7 +1240,6 @@ int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
q = &qs->txq[TXQ_ETH];
txq = netdev_get_tx_queue(dev, qidx);
- spin_lock(&q->lock);
reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
credits = q->size - q->in_use;
@@ -1250,7 +1250,6 @@ int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
dev_err(&adap->pdev->dev,
"%s: Tx ring %u full while queue awake!\n",
dev->name, q->cntxt_id & 7);
- spin_unlock(&q->lock);
return NETDEV_TX_BUSY;
}
@@ -1284,9 +1283,6 @@ int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
if (vlan_tx_tag_present(skb) && pi->vlan_grp)
qs->port_stats[SGE_PSTAT_VLANINS]++;
- dev->trans_start = jiffies;
- spin_unlock(&q->lock);
-
/*
* We do not use Tx completion interrupts to free DMAd Tx packets.
* This is good for performamce but means that we rely on new Tx
@@ -2073,20 +2069,19 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
struct sge_fl *fl, int len, int complete)
{
struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
+ struct sk_buff *skb = NULL;
struct cpl_rx_pkt *cpl;
- struct skb_frag_struct *rx_frag = qs->lro_frag_tbl.frags;
- int nr_frags = qs->lro_frag_tbl.nr_frags;
- int frag_len = qs->lro_frag_tbl.len;
+ struct skb_frag_struct *rx_frag;
+ int nr_frags;
int offset = 0;
- if (!nr_frags) {
- offset = 2 + sizeof(struct cpl_rx_pkt);
- qs->lro_va = cpl = sd->pg_chunk.va + 2;
+ if (!qs->nomem) {
+ skb = napi_get_frags(&qs->napi);
+ qs->nomem = !skb;
}
fl->credits--;
- len -= offset;
pci_dma_sync_single_for_cpu(adap->pdev,
pci_unmap_addr(sd, dma_addr),
fl->buf_size - SGE_PG_RSVD,
@@ -2099,21 +2094,38 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
fl->alloc_size,
PCI_DMA_FROMDEVICE);
+ if (!skb) {
+ put_page(sd->pg_chunk.page);
+ if (complete)
+ qs->nomem = 0;
+ return;
+ }
+
+ rx_frag = skb_shinfo(skb)->frags;
+ nr_frags = skb_shinfo(skb)->nr_frags;
+
+ if (!nr_frags) {
+ offset = 2 + sizeof(struct cpl_rx_pkt);
+ qs->lro_va = sd->pg_chunk.va + 2;
+ }
+ len -= offset;
+
prefetch(qs->lro_va);
rx_frag += nr_frags;
rx_frag->page = sd->pg_chunk.page;
rx_frag->page_offset = sd->pg_chunk.offset + offset;
rx_frag->size = len;
- frag_len += len;
- qs->lro_frag_tbl.nr_frags++;
- qs->lro_frag_tbl.len = frag_len;
+ skb->len += len;
+ skb->data_len += len;
+ skb->truesize += len;
+ skb_shinfo(skb)->nr_frags++;
if (!complete)
return;
- qs->lro_frag_tbl.ip_summed = CHECKSUM_UNNECESSARY;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
cpl = qs->lro_va;
if (unlikely(cpl->vlan_valid)) {
@@ -2122,15 +2134,11 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
struct vlan_group *grp = pi->vlan_grp;
if (likely(grp != NULL)) {
- vlan_gro_frags(&qs->napi, grp, ntohs(cpl->vlan),
- &qs->lro_frag_tbl);
- goto out;
+ vlan_gro_frags(&qs->napi, grp, ntohs(cpl->vlan));
+ return;
}
}
- napi_gro_frags(&qs->napi, &qs->lro_frag_tbl);
-
-out:
- qs->lro_frag_tbl.nr_frags = qs->lro_frag_tbl.len = 0;
+ napi_gro_frags(&qs->napi);
}
/**
@@ -2299,8 +2307,6 @@ no_mem:
if (fl->use_pages) {
void *addr = fl->sdesc[fl->cidx].pg_chunk.va;
- prefetch(&qs->lro_frag_tbl);
-
prefetch(addr);
#if L1_CACHE_BYTES < 128
prefetch(addr + L1_CACHE_BYTES);
@@ -2846,11 +2852,12 @@ static void sge_timer_tx(unsigned long data)
unsigned int tbd[SGE_TXQ_PER_SET] = {0, 0};
unsigned long next_period;
- if (spin_trylock(&qs->txq[TXQ_ETH].lock)) {
- tbd[TXQ_ETH] = reclaim_completed_tx(adap, &qs->txq[TXQ_ETH],
- TX_RECLAIM_TIMER_CHUNK);
- spin_unlock(&qs->txq[TXQ_ETH].lock);
+ if (__netif_tx_trylock(qs->tx_q)) {
+ tbd[TXQ_ETH] = reclaim_completed_tx(adap, &qs->txq[TXQ_ETH],
+ TX_RECLAIM_TIMER_CHUNK);
+ __netif_tx_unlock(qs->tx_q);
}
+
if (spin_trylock(&qs->txq[TXQ_OFLD].lock)) {
tbd[TXQ_OFLD] = reclaim_completed_tx(adap, &qs->txq[TXQ_OFLD],
TX_RECLAIM_TIMER_CHUNK);
@@ -2858,8 +2865,8 @@ static void sge_timer_tx(unsigned long data)
}
next_period = TX_RECLAIM_PERIOD >>
- (max(tbd[TXQ_ETH], tbd[TXQ_OFLD]) /
- TX_RECLAIM_TIMER_CHUNK);
+ (max(tbd[TXQ_ETH], tbd[TXQ_OFLD]) /
+ TX_RECLAIM_TIMER_CHUNK);
mod_timer(&qs->tx_reclaim_timer, jiffies + next_period);
}