summaryrefslogtreecommitdiffstats
path: root/drivers/infiniband
diff options
context:
space:
mode:
authorVipul Pandya <vipul@chelsio.com>2013-03-14 06:09:02 +0100
committerDavid S. Miller <davem@davemloft.net>2013-03-14 16:35:59 +0100
commit0e5eca791c8d8dd7622a58785947f1cab92e595c (patch)
tree2f7651ca1646b1d50d6b9d27333ce94bd966f165 /drivers/infiniband
parentRDMA/cxgb4: Use DSGLs for fastreg and adapter memory writes for T5. (diff)
downloadlinux-0e5eca791c8d8dd7622a58785947f1cab92e595c.tar.xz
linux-0e5eca791c8d8dd7622a58785947f1cab92e595c.zip
RDMA/cxgb4: Map pbl buffers for dma if using DSGL.
Signed-off-by: Vipul Pandya <vipul@chelsio.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c29
1 files changed, 23 insertions, 6 deletions
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index 33db9ee307dc..4cb8eb24497c 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -51,7 +51,7 @@ module_param(inline_threshold, int, 0644);
MODULE_PARM_DESC(inline_threshold, "inline vs dsgl threshold (default=128)");
static int _c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr,
- u32 len, void *data, int wait)
+ u32 len, dma_addr_t data, int wait)
{
struct sk_buff *skb;
struct ulp_mem_io *req;
@@ -88,7 +88,7 @@ static int _c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr,
sgl->cmd_nsge = cpu_to_be32(ULPTX_CMD(ULP_TX_SC_DSGL) |
ULPTX_NSGE(1));
sgl->len0 = cpu_to_be32(len);
- sgl->addr0 = cpu_to_be64(virt_to_phys(data));
+ sgl->addr0 = cpu_to_be64(data);
ret = c4iw_ofld_send(rdev, skb);
if (ret)
@@ -178,6 +178,13 @@ int _c4iw_write_mem_dma(struct c4iw_rdev *rdev, u32 addr, u32 len, void *data)
u32 remain = len;
u32 dmalen;
int ret = 0;
+ dma_addr_t daddr;
+ dma_addr_t save;
+
+ daddr = dma_map_single(&rdev->lldi.pdev->dev, data, len, DMA_TO_DEVICE);
+ if (dma_mapping_error(&rdev->lldi.pdev->dev, daddr))
+ return -1;
+ save = daddr;
while (remain > inline_threshold) {
if (remain < T4_ULPTX_MAX_DMA) {
@@ -188,16 +195,18 @@ int _c4iw_write_mem_dma(struct c4iw_rdev *rdev, u32 addr, u32 len, void *data)
} else
dmalen = T4_ULPTX_MAX_DMA;
remain -= dmalen;
- ret = _c4iw_write_mem_dma_aligned(rdev, addr, dmalen, data,
+ ret = _c4iw_write_mem_dma_aligned(rdev, addr, dmalen, daddr,
!remain);
if (ret)
goto out;
addr += dmalen >> 5;
data += dmalen;
+ daddr += dmalen;
}
if (remain)
ret = _c4iw_write_mem_inline(rdev, addr, remain, data);
out:
+ dma_unmap_single(&rdev->lldi.pdev->dev, save, len, DMA_TO_DEVICE);
return ret;
}
@@ -209,9 +218,17 @@ static int write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len,
void *data)
{
if (is_t5(rdev->lldi.adapter_type) && use_dsgl) {
- if (len > inline_threshold)
- return _c4iw_write_mem_dma(rdev, addr, len, data);
- else
+ if (len > inline_threshold) {
+ if (_c4iw_write_mem_dma(rdev, addr, len, data)) {
+ printk_ratelimited(KERN_WARNING
+ "%s: dma map"
+ " failure (non fatal)\n",
+ pci_name(rdev->lldi.pdev));
+ return _c4iw_write_mem_inline(rdev, addr, len,
+ data);
+ } else
+ return 0;
+ } else
return _c4iw_write_mem_inline(rdev, addr, len, data);
} else
return _c4iw_write_mem_inline(rdev, addr, len, data);