summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorSomnath Kotur <somnath.kotur@broadcom.com>2017-05-22 12:15:36 +0200
committerDoug Ledford <dledford@redhat.com>2017-06-14 19:01:58 +0200
commit3fb755b3d58084001c89e5f0fd558552bdef9051 (patch)
treea4ee02d3eb52e5a813181e2dbe5ea7b7f3e0ab93 /drivers
parentRDMA/bnxt_re: Dereg MR in FW before freeing the fast_reg_page_list (diff)
downloadlinux-3fb755b3d58084001c89e5f0fd558552bdef9051.tar.xz
linux-3fb755b3d58084001c89e5f0fd558552bdef9051.zip
RDMA/bnxt_re: Add HW workaround for avoiding stall for UD QPs
HW stalls out after 0x800000 WQEs are posted for UD QPs. To workaround this problem, driver will send a modify_qp cmd to the HW at around the halfway mark(0x400000) so that FW can accordingly modify the QP context in the HW to prevent this stall. This workaround needs to be done for UD, QP1 and Raw Ethertype packets. Added a counter to keep track of WQEs posted during post_send. Signed-off-by: Somnath Kotur <somnath.kotur@broadcom.com> Signed-off-by: Selvin Xavier <selvin.xavier@broadcom.com> Signed-off-by: Doug Ledford <dledford@redhat.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/infiniband/hw/bnxt_re/bnxt_re.h2
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c18
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.c3
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.h1
4 files changed, 24 insertions, 0 deletions
diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
index ebf7be8d4139..d5e457ee7e7b 100644
--- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h
+++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
@@ -56,6 +56,8 @@
#define BNXT_RE_MAX_SRQC_COUNT (64 * 1024)
#define BNXT_RE_MAX_CQ_COUNT (64 * 1024)
+#define BNXT_RE_UD_QP_HW_STALL 0x400000
+
struct bnxt_re_work {
struct work_struct work;
unsigned long event;
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index d94b1b304135..08e7e59df28c 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -2075,6 +2075,22 @@ static int bnxt_re_copy_wr_payload(struct bnxt_re_dev *rdev,
return payload_sz;
}
+static void bnxt_ud_qp_hw_stall_workaround(struct bnxt_re_qp *qp)
+{
+ if ((qp->ib_qp.qp_type == IB_QPT_UD ||
+ qp->ib_qp.qp_type == IB_QPT_GSI ||
+ qp->ib_qp.qp_type == IB_QPT_RAW_ETHERTYPE) &&
+ qp->qplib_qp.wqe_cnt == BNXT_RE_UD_QP_HW_STALL) {
+ int qp_attr_mask;
+ struct ib_qp_attr qp_attr;
+
+ qp_attr_mask = IB_QP_STATE;
+ qp_attr.qp_state = IB_QPS_RTS;
+ bnxt_re_modify_qp(&qp->ib_qp, &qp_attr, qp_attr_mask, NULL);
+ qp->qplib_qp.wqe_cnt = 0;
+ }
+}
+
static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev *rdev,
struct bnxt_re_qp *qp,
struct ib_send_wr *wr)
@@ -2120,6 +2136,7 @@ bad:
wr = wr->next;
}
bnxt_qplib_post_send_db(&qp->qplib_qp);
+ bnxt_ud_qp_hw_stall_workaround(qp);
spin_unlock_irqrestore(&qp->sq_lock, flags);
return rc;
}
@@ -2216,6 +2233,7 @@ bad:
wr = wr->next;
}
bnxt_qplib_post_send_db(&qp->qplib_qp);
+ bnxt_ud_qp_hw_stall_workaround(qp);
spin_unlock_irqrestore(&qp->sq_lock, flags);
return rc;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
index 31593fdf6365..f05500bcdcf1 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
@@ -1298,6 +1298,9 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
}
sq->hwq.prod++;
+
+ qp->wqe_cnt++;
+
done:
return rc;
}
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
index 71539ea7f421..36b7b7db0e3f 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
@@ -250,6 +250,7 @@ struct bnxt_qplib_qp {
u8 timeout;
u8 retry_cnt;
u8 rnr_retry;
+ u64 wqe_cnt;
u32 min_rnr_timer;
u32 max_rd_atomic;
u32 max_dest_rd_atomic;