summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.c12
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mr.c4
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c17
3 files changed, 19 insertions, 14 deletions
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c
index 818cf1aee8c7..f5e9aeec6f6e 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.c
+++ b/drivers/infiniband/hw/cxgb3/cxio_hal.c
@@ -498,9 +498,9 @@ static int cxio_hal_init_ctrl_qp(struct cxio_rdev *rdev_p)
u64 sge_cmd, ctx0, ctx1;
u64 base_addr;
struct t3_modify_qp_wr *wqe;
- struct sk_buff *skb = alloc_skb(sizeof(*wqe), GFP_KERNEL);
-
+ struct sk_buff *skb;
+ skb = alloc_skb(sizeof(*wqe), GFP_KERNEL);
if (!skb) {
PDBG("%s alloc_skb failed\n", __FUNCTION__);
return -ENOMEM;
@@ -508,7 +508,7 @@ static int cxio_hal_init_ctrl_qp(struct cxio_rdev *rdev_p)
err = cxio_hal_init_ctrl_cq(rdev_p);
if (err) {
PDBG("%s err %d initializing ctrl_cq\n", __FUNCTION__, err);
- return err;
+ goto err;
}
rdev_p->ctrl_qp.workq = dma_alloc_coherent(
&(rdev_p->rnic_info.pdev->dev),
@@ -518,7 +518,8 @@ static int cxio_hal_init_ctrl_qp(struct cxio_rdev *rdev_p)
GFP_KERNEL);
if (!rdev_p->ctrl_qp.workq) {
PDBG("%s dma_alloc_coherent failed\n", __FUNCTION__);
- return -ENOMEM;
+ err = -ENOMEM;
+ goto err;
}
pci_unmap_addr_set(&rdev_p->ctrl_qp, mapping,
rdev_p->ctrl_qp.dma_addr);
@@ -556,6 +557,9 @@ static int cxio_hal_init_ctrl_qp(struct cxio_rdev *rdev_p)
rdev_p->ctrl_qp.workq, 1 << T3_CTRL_QP_SIZE_LOG2);
skb->priority = CPL_PRIORITY_CONTROL;
return (cxgb3_ofld_send(rdev_p->t3cdev_p, skb));
+err:
+ kfree_skb(skb);
+ return err;
}
static int cxio_hal_destroy_ctrl_qp(struct cxio_rdev *rdev_p)
diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c
index 8e4846b5c641..fdb576dcfaa8 100644
--- a/drivers/infiniband/hw/mthca/mthca_mr.c
+++ b/drivers/infiniband/hw/mthca/mthca_mr.c
@@ -881,8 +881,8 @@ int mthca_init_mr_table(struct mthca_dev *dev)
}
mpts = mtts = 1 << i;
} else {
- mpts = dev->limits.num_mtt_segs;
- mtts = dev->limits.num_mpts;
+ mtts = dev->limits.num_mtt_segs;
+ mpts = dev->limits.num_mpts;
}
if (!mthca_is_memfree(dev) &&
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index 89e37283c836..278fcbccc2d9 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -658,6 +658,7 @@ void iser_ctask_rdma_finalize(struct iscsi_iser_cmd_task *iser_ctask)
{
int deferred;
int is_rdma_aligned = 1;
+ struct iser_regd_buf *regd;
/* if we were reading, copy back to unaligned sglist,
* anyway dma_unmap and free the copy
@@ -672,20 +673,20 @@ void iser_ctask_rdma_finalize(struct iscsi_iser_cmd_task *iser_ctask)
}
if (iser_ctask->dir[ISER_DIR_IN]) {
- deferred = iser_regd_buff_release
- (&iser_ctask->rdma_regd[ISER_DIR_IN]);
+ regd = &iser_ctask->rdma_regd[ISER_DIR_IN];
+ deferred = iser_regd_buff_release(regd);
if (deferred) {
- iser_err("References remain for BUF-IN rdma reg\n");
- BUG();
+ iser_err("%d references remain for BUF-IN rdma reg\n",
+ atomic_read(&regd->ref_count));
}
}
if (iser_ctask->dir[ISER_DIR_OUT]) {
- deferred = iser_regd_buff_release
- (&iser_ctask->rdma_regd[ISER_DIR_OUT]);
+ regd = &iser_ctask->rdma_regd[ISER_DIR_OUT];
+ deferred = iser_regd_buff_release(regd);
if (deferred) {
- iser_err("References remain for BUF-OUT rdma reg\n");
- BUG();
+ iser_err("%d references remain for BUF-OUT rdma reg\n",
+ atomic_read(&regd->ref_count));
}
}