diff options
author | Steve Wise <swise@opengridcomputing.com> | 2007-07-29 22:12:29 +0200 |
---|---|---|
committer | Roland Dreier <rolandd@cisco.com> | 2007-08-03 19:45:18 +0200 |
commit | 699924b1e1ea3c9307eb582b9cc386e4af88aaae (patch) | |
tree | f425c5343d2d170032df5a1501a2a18e1bad81d9 /drivers/infiniband | |
parent | IB: Move the macro IB_UMEM_MAX_PAGE_CHUNK() to umem.c (diff) | |
download | linux-699924b1e1ea3c9307eb582b9cc386e4af88aaae.tar.xz linux-699924b1e1ea3c9307eb582b9cc386e4af88aaae.zip |
RDMA/cxgb3: Always call low level send function via cxgb3_ofld_send()
This avoids deadlocks.
Signed-off-by: Steve Wise <swise@opengridcomputing.com>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r-- | drivers/infiniband/hw/cxgb3/iwch_cm.c | 16 |
1 files changed, 8 insertions, 8 deletions
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c index 9574088f0d4e..1cdfcd43b0bc 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_cm.c +++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c @@ -139,7 +139,7 @@ static void release_tid(struct t3cdev *tdev, u32 hwtid, struct sk_buff *skb) req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, hwtid)); skb->priority = CPL_PRIORITY_SETUP; - tdev->send(tdev, skb); + cxgb3_ofld_send(tdev, skb); return; } @@ -161,7 +161,7 @@ int iwch_quiesce_tid(struct iwch_ep *ep) req->val = cpu_to_be64(1 << S_TCB_RX_QUIESCE); skb->priority = CPL_PRIORITY_DATA; - ep->com.tdev->send(ep->com.tdev, skb); + cxgb3_ofld_send(ep->com.tdev, skb); return 0; } @@ -183,7 +183,7 @@ int iwch_resume_tid(struct iwch_ep *ep) req->val = 0; skb->priority = CPL_PRIORITY_DATA; - ep->com.tdev->send(ep->com.tdev, skb); + cxgb3_ofld_send(ep->com.tdev, skb); return 0; } @@ -784,7 +784,7 @@ static int update_rx_credits(struct iwch_ep *ep, u32 credits) OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RX_DATA_ACK, ep->hwtid)); req->credit_dack = htonl(V_RX_CREDITS(credits) | V_RX_FORCE_ACK(1)); skb->priority = CPL_PRIORITY_ACK; - ep->com.tdev->send(ep->com.tdev, skb); + cxgb3_ofld_send(ep->com.tdev, skb); return credits; } @@ -1152,7 +1152,7 @@ static int listen_start(struct iwch_listen_ep *ep) req->opt1 = htonl(V_CONN_POLICY(CPL_CONN_POLICY_ASK)); skb->priority = 1; - ep->com.tdev->send(ep->com.tdev, skb); + cxgb3_ofld_send(ep->com.tdev, skb); return 0; } @@ -1186,7 +1186,7 @@ static int listen_stop(struct iwch_listen_ep *ep) req->cpu_idx = 0; OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, ep->stid)); skb->priority = 1; - ep->com.tdev->send(ep->com.tdev, skb); + cxgb3_ofld_send(ep->com.tdev, skb); return 0; } @@ -1264,7 +1264,7 @@ static void reject_cr(struct t3cdev *tdev, u32 hwtid, __be32 peer_ip, rpl->opt0l_status = htonl(CPL_PASS_OPEN_REJECT); rpl->opt2 = 0; rpl->rsvd = rpl->opt2; - tdev->send(tdev, skb); + cxgb3_ofld_send(tdev, skb); } } @@ -1557,7 +1557,7 @@ static int peer_abort(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) rpl->wr.wr_lo = htonl(V_WR_TID(ep->hwtid)); OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, ep->hwtid)); rpl->cmd = CPL_ABORT_NO_RST; - ep->com.tdev->send(ep->com.tdev, rpl_skb); + cxgb3_ofld_send(ep->com.tdev, rpl_skb); if (state != ABORTING) { state_set(&ep->com, DEAD); release_ep_resources(ep); |