diff options
author | Mike Marciniszyn <mike.marciniszyn@intel.com> | 2016-02-04 20:03:28 +0100 |
---|---|---|
committer | Doug Ledford <dledford@redhat.com> | 2016-03-11 02:38:04 +0100 |
commit | ee84541ad11e70d372670160e727680051801517 (patch) | |
tree | abbdb47297d51ea17a4c2a10323d2ef6325651c1 /drivers/infiniband/hw/qib/qib_rc.c | |
parent | staging/rdma/hfi1: Insure last cursor is updated prior to complete (diff) | |
download | linux-ee84541ad11e70d372670160e727680051801517.tar.xz linux-ee84541ad11e70d372670160e727680051801517.zip |
IB/qib: Insure last cursor is updated prior to complete
This patch is a prerequisite for adding a separate lock
for post send.
The timing of updating s_last needs to be before returning
any send completion to avoid a race between a poll cq seeing
a completion and the post send checking for a full queue.
Reviewed-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
Signed-off-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
Diffstat (limited to 'drivers/infiniband/hw/qib/qib_rc.c')
-rw-r--r-- | drivers/infiniband/hw/qib/qib_rc.c | 20 |
1 files changed, 16 insertions, 4 deletions
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c index 044525d9c8b8..ce886b2ade74 100644 --- a/drivers/infiniband/hw/qib/qib_rc.c +++ b/drivers/infiniband/hw/qib/qib_rc.c @@ -1008,10 +1008,18 @@ void qib_rc_send_complete(struct rvt_qp *qp, struct qib_ib_header *hdr) start_timer(qp); while (qp->s_last != qp->s_acked) { + u32 s_last; + wqe = rvt_get_swqe_ptr(qp, qp->s_last); if (qib_cmp24(wqe->lpsn, qp->s_sending_psn) >= 0 && qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0) break; + s_last = qp->s_last; + if (++s_last >= qp->s_size) + s_last = 0; + qp->s_last = s_last; + /* see post_send() */ + barrier(); for (i = 0; i < wqe->wr.num_sge; i++) { struct rvt_sge *sge = &wqe->sg_list[i]; @@ -1028,8 +1036,6 @@ void qib_rc_send_complete(struct rvt_qp *qp, struct qib_ib_header *hdr) wc.qp = &qp->ibqp; rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.send_cq), &wc, 0); } - if (++qp->s_last >= qp->s_size) - qp->s_last = 0; } /* * If we were waiting for sends to complete before resending, @@ -1068,11 +1074,19 @@ static struct rvt_swqe *do_rc_completion(struct rvt_qp *qp, */ if (qib_cmp24(wqe->lpsn, qp->s_sending_psn) < 0 || qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) > 0) { + u32 s_last; + for (i = 0; i < wqe->wr.num_sge; i++) { struct rvt_sge *sge = &wqe->sg_list[i]; rvt_put_mr(sge->mr); } + s_last = qp->s_last; + if (++s_last >= qp->s_size) + s_last = 0; + qp->s_last = s_last; + /* see post_send() */ + barrier(); /* Post a send completion queue entry if requested. */ if (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) || (wqe->wr.send_flags & IB_SEND_SIGNALED)) { @@ -1084,8 +1098,6 @@ static struct rvt_swqe *do_rc_completion(struct rvt_qp *qp, wc.qp = &qp->ibqp; rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.send_cq), &wc, 0); } - if (++qp->s_last >= qp->s_size) - qp->s_last = 0; } else this_cpu_inc(*ibp->rvp.rc_delayed_comp); |