diff options
author | Eli Cohen <eli at mellanox.co.il> | 2008-01-24 15:38:06 +0100 |
---|---|---|
committer | Roland Dreier <rolandd@cisco.com> | 2008-02-05 05:20:44 +0100 |
commit | 1d368c546566e249da8181e933c53788093965cf (patch) | |
tree | 08ea2e0280f125f2981ab7ada0d335b5928cc670 /drivers/infiniband/hw/mthca | |
parent | IB/mthca: Remove checks for srq->first_free < 0 (diff) | |
download | linux-1d368c546566e249da8181e933c53788093965cf.tar.xz linux-1d368c546566e249da8181e933c53788093965cf.zip |
IB/ib_mthca: Pre-link receive WQEs in Tavor mode
We have recently discovered that Tavor mode requires each WQE in a
posted list of receive WQEs to have a valid NDA field at all times.
This requirement holds true for regular QPs as well as for SRQs. This
patch prelinks the receive queue in a regular QP and keeps the free
list in SRQ always properly linked.
Signed-off-by: Eli Cohen <eli@mellanox.co.il>
Reviewed-by: Jack Morgenstein <jackm@mellanox.co.il>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/hw/mthca')
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_qp.c | 13 | ||||
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_srq.c | 23 |
2 files changed, 22 insertions, 14 deletions
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c index 0e5461c65731..db5595bbf7f0 100644 --- a/drivers/infiniband/hw/mthca/mthca_qp.c +++ b/drivers/infiniband/hw/mthca/mthca_qp.c @@ -1175,6 +1175,7 @@ static int mthca_alloc_qp_common(struct mthca_dev *dev, { int ret; int i; + struct mthca_next_seg *next; qp->refcount = 1; init_waitqueue_head(&qp->wait); @@ -1217,7 +1218,6 @@ static int mthca_alloc_qp_common(struct mthca_dev *dev, } if (mthca_is_memfree(dev)) { - struct mthca_next_seg *next; struct mthca_data_seg *scatter; int size = (sizeof (struct mthca_next_seg) + qp->rq.max_gs * sizeof (struct mthca_data_seg)) / 16; @@ -1240,6 +1240,13 @@ static int mthca_alloc_qp_common(struct mthca_dev *dev, qp->sq.wqe_shift) + qp->send_wqe_offset); } + } else { + for (i = 0; i < qp->rq.max; ++i) { + next = get_recv_wqe(qp, i); + next->nda_op = htonl((((i + 1) % qp->rq.max) << + qp->rq.wqe_shift) | 1); + } + } qp->sq.last = get_send_wqe(qp, qp->sq.max - 1); @@ -1863,7 +1870,6 @@ int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, prev_wqe = qp->rq.last; qp->rq.last = wqe; - ((struct mthca_next_seg *) wqe)->nda_op = 0; ((struct mthca_next_seg *) wqe)->ee_nds = cpu_to_be32(MTHCA_NEXT_DBD); ((struct mthca_next_seg *) wqe)->flags = 0; @@ -1885,9 +1891,6 @@ int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, qp->wrid[ind] = wr->wr_id; - ((struct mthca_next_seg *) prev_wqe)->nda_op = - cpu_to_be32((ind << qp->rq.wqe_shift) | 1); - wmb(); ((struct mthca_next_seg *) prev_wqe)->ee_nds = cpu_to_be32(MTHCA_NEXT_DBD | size); diff --git a/drivers/infiniband/hw/mthca/mthca_srq.c b/drivers/infiniband/hw/mthca/mthca_srq.c index ec63adc1099c..a5ffff6e1026 100644 --- a/drivers/infiniband/hw/mthca/mthca_srq.c +++ b/drivers/infiniband/hw/mthca/mthca_srq.c @@ -175,9 +175,17 @@ static int mthca_alloc_srq_buf(struct mthca_dev *dev, struct mthca_pd *pd, * scatter list L_Keys to the sentry value of 0x100. */ for (i = 0; i < srq->max; ++i) { - wqe = get_wqe(srq, i); + struct mthca_next_seg *next; - *wqe_to_link(wqe) = i < srq->max - 1 ? i + 1 : -1; + next = wqe = get_wqe(srq, i); + + if (i < srq->max - 1) { + *wqe_to_link(wqe) = i + 1; + next->nda_op = htonl(((i + 1) << srq->wqe_shift) | 1); + } else { + *wqe_to_link(wqe) = -1; + next->nda_op = 0; + } for (scatter = wqe + sizeof (struct mthca_next_seg); (void *) scatter < wqe + (1 << srq->wqe_shift); @@ -470,12 +478,15 @@ out: void mthca_free_srq_wqe(struct mthca_srq *srq, u32 wqe_addr) { int ind; + struct mthca_next_seg *last_free; ind = wqe_addr >> srq->wqe_shift; spin_lock(&srq->lock); - *wqe_to_link(get_wqe(srq, srq->last_free)) = ind; + last_free = get_wqe(srq, srq->last_free); + *wqe_to_link(last_free) = ind; + last_free->nda_op = htonl((ind << srq->wqe_shift) | 1); *wqe_to_link(get_wqe(srq, ind)) = -1; srq->last_free = ind; @@ -516,7 +527,6 @@ int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, prev_wqe = srq->last; srq->last = wqe; - ((struct mthca_next_seg *) wqe)->nda_op = 0; ((struct mthca_next_seg *) wqe)->ee_nds = 0; /* flags field will always remain 0 */ @@ -537,9 +547,6 @@ int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, if (i < srq->max_gs) mthca_set_data_seg_inval(wqe); - ((struct mthca_next_seg *) prev_wqe)->nda_op = - cpu_to_be32((ind << srq->wqe_shift) | 1); - wmb(); ((struct mthca_next_seg *) prev_wqe)->ee_nds = cpu_to_be32(MTHCA_NEXT_DBD); @@ -613,8 +620,6 @@ int mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, break; } - ((struct mthca_next_seg *) wqe)->nda_op = - cpu_to_be32((next_ind << srq->wqe_shift) | 1); ((struct mthca_next_seg *) wqe)->ee_nds = 0; /* flags field will always remain 0 */ |