summaryrefslogtreecommitdiffstats
path: root/drivers/net/cxgb3/sge.c
diff options
context:
space:
mode:
authorDivy Le Ray <divy@chelsio.com>2009-05-28 13:23:02 +0200
committerDavid S. Miller <davem@davemloft.net>2009-05-29 10:54:37 +0200
commit10b6d95612672f89deb39b5a60fb677c78ba4844 (patch)
tree5b853a9db086d4c0894d93f54e5bf666f0b8fa70 /drivers/net/cxgb3/sge.c
parentMerge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/kaber/... (diff)
downloadlinux-10b6d95612672f89deb39b5a60fb677c78ba4844.tar.xz
linux-10b6d95612672f89deb39b5a60fb677c78ba4844.zip
cxgb3: fix dma mapping regression
Commit 5e68b772e6efd189d6aca76f6872fb75d51ace60 cxgb3: map entire Rx page, feed map+offset to Rx ring. introduced a regression on platforms defining DECLARE_PCI_UNMAP_ADDR() and related macros as no-ops. Rx descriptors are fed with the a page buffer bus address + page chunk offset. The page buffer bus address is set and retrieved through pci_unamp_addr_set(), pci_unmap_addr(). These functions being meaningless on x86 (if CONFIG_DMA_API_DEBUG is not set). The HW ends up with a bogus bus address. This patch saves the page buffer bus address for all plaftorms. Signed-off-by: Divy Le Ray <divy@chelsio.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/cxgb3/sge.c')
-rw-r--r--drivers/net/cxgb3/sge.c11
1 files changed, 5 insertions, 6 deletions
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index 26d3587f3399..b3ee2bc1a005 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -355,7 +355,7 @@ static void clear_rx_desc(struct pci_dev *pdev, const struct sge_fl *q,
(*d->pg_chunk.p_cnt)--;
if (!*d->pg_chunk.p_cnt)
pci_unmap_page(pdev,
- pci_unmap_addr(&d->pg_chunk, mapping),
+ d->pg_chunk.mapping,
q->alloc_size, PCI_DMA_FROMDEVICE);
put_page(d->pg_chunk.page);
@@ -454,7 +454,7 @@ static int alloc_pg_chunk(struct adapter *adapter, struct sge_fl *q,
q->pg_chunk.offset = 0;
mapping = pci_map_page(adapter->pdev, q->pg_chunk.page,
0, q->alloc_size, PCI_DMA_FROMDEVICE);
- pci_unmap_addr_set(&q->pg_chunk, mapping, mapping);
+ q->pg_chunk.mapping = mapping;
}
sd->pg_chunk = q->pg_chunk;
@@ -511,8 +511,7 @@ static int refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp)
nomem: q->alloc_failed++;
break;
}
- mapping = pci_unmap_addr(&sd->pg_chunk, mapping) +
- sd->pg_chunk.offset;
+ mapping = sd->pg_chunk.mapping + sd->pg_chunk.offset;
pci_unmap_addr_set(sd, dma_addr, mapping);
add_one_rx_chunk(mapping, d, q->gen);
@@ -881,7 +880,7 @@ recycle:
(*sd->pg_chunk.p_cnt)--;
if (!*sd->pg_chunk.p_cnt)
pci_unmap_page(adap->pdev,
- pci_unmap_addr(&sd->pg_chunk, mapping),
+ sd->pg_chunk.mapping,
fl->alloc_size,
PCI_DMA_FROMDEVICE);
if (!skb) {
@@ -2096,7 +2095,7 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
(*sd->pg_chunk.p_cnt)--;
if (!*sd->pg_chunk.p_cnt)
pci_unmap_page(adap->pdev,
- pci_unmap_addr(&sd->pg_chunk, mapping),
+ sd->pg_chunk.mapping,
fl->alloc_size,
PCI_DMA_FROMDEVICE);