diff options
author | Kaike Wan <kaike.wan@intel.com> | 2019-01-24 06:50:24 +0100 |
---|---|---|
committer | Doug Ledford <dledford@redhat.com> | 2019-02-06 00:07:43 +0100 |
commit | 829eaee5d09a7500bdce9ed0bc6ec6861f8ae45b (patch) | |
tree | 9e554996b7e2ad37be06444e90a58e0a7a58d771 /drivers/infiniband/hw/hfi1 | |
parent | IB/hfi1: Add a function to receive TID RDMA ACK packet (diff) | |
download | linux-829eaee5d09a7500bdce9ed0bc6ec6861f8ae45b.tar.xz linux-829eaee5d09a7500bdce9ed0bc6ec6861f8ae45b.zip |
IB/hfi1: Add TID RDMA retry timer
This patch adds the TID RDMA retry timer to make sure that TID RDMA
WRITE DATA packets for a segment are received successfully by the
responder. This timer is generally armed when the last TID RDMA
WRITE DATA packet for a segment is sent out and stopped when all
TID RDMA DATA packets are acknowledged.
Reviewed-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
Signed-off-by: Mitko Haralanov <mitko.haralanov@intel.com>
Signed-off-by: Kaike Wan <kaike.wan@intel.com>
Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
Diffstat (limited to 'drivers/infiniband/hw/hfi1')
-rw-r--r-- | drivers/infiniband/hw/hfi1/opfn.c | 2 | ||||
-rw-r--r-- | drivers/infiniband/hw/hfi1/tid_rdma.c | 93 | ||||
-rw-r--r-- | drivers/infiniband/hw/hfi1/tid_rdma.h | 4 | ||||
-rw-r--r-- | drivers/infiniband/hw/hfi1/verbs.h | 2 |
4 files changed, 101 insertions, 0 deletions
diff --git a/drivers/infiniband/hw/hfi1/opfn.c b/drivers/infiniband/hw/hfi1/opfn.c index 82e1889ca969..370a5a8eaa71 100644 --- a/drivers/infiniband/hw/hfi1/opfn.c +++ b/drivers/infiniband/hw/hfi1/opfn.c @@ -252,6 +252,8 @@ void opfn_qp_init(struct rvt_qp *qp, struct ib_qp_attr *attr, int attr_mask) if (ibqp->qp_type == IB_QPT_RC && HFI1_CAP_IS_KSET(TID_RDMA)) { struct tid_rdma_params *local = &priv->tid_rdma.local; + if (attr_mask & IB_QP_TIMEOUT) + priv->tid_retry_timeout_jiffies = qp->timeout_jiffies; if (qp->pmtu == enum_to_mtu(OPA_MTU_4096) || qp->pmtu == enum_to_mtu(OPA_MTU_8192)) { tid_rdma_opfn_init(qp, local); diff --git a/drivers/infiniband/hw/hfi1/tid_rdma.c b/drivers/infiniband/hw/hfi1/tid_rdma.c index 5eb8453a719e..a4faf7d6c224 100644 --- a/drivers/infiniband/hw/hfi1/tid_rdma.c +++ b/drivers/infiniband/hw/hfi1/tid_rdma.c @@ -121,6 +121,9 @@ static void hfi1_tid_write_alloc_resources(struct rvt_qp *qp, bool intr_ctx); static void hfi1_tid_timeout(struct timer_list *t); static void hfi1_add_tid_reap_timer(struct rvt_qp *qp); static void hfi1_mod_tid_reap_timer(struct rvt_qp *qp); +static void hfi1_mod_tid_retry_timer(struct rvt_qp *qp); +static int hfi1_stop_tid_retry_timer(struct rvt_qp *qp); +static void hfi1_tid_retry_timeout(struct timer_list *t); static u64 tid_rdma_opfn_encode(struct tid_rdma_params *p) { @@ -330,6 +333,7 @@ int hfi1_qp_priv_init(struct rvt_dev_info *rdi, struct rvt_qp *qp, qpriv->r_tid_alloc = HFI1_QP_WQE_INVALID; atomic_set(&qpriv->n_tid_requests, 0); timer_setup(&qpriv->s_tid_timer, hfi1_tid_timeout, 0); + timer_setup(&qpriv->s_tid_retry_timer, hfi1_tid_retry_timeout, 0); INIT_LIST_HEAD(&qpriv->tid_wait); if (init_attr->qp_type == IB_QPT_RC && HFI1_CAP_IS_KSET(TID_RDMA)) { @@ -4396,11 +4400,19 @@ void hfi1_rc_rcv_tid_rdma_ack(struct hfi1_packet *packet) if (qpriv->s_flags & RVT_S_WAIT_ACK) qpriv->s_flags &= ~RVT_S_WAIT_ACK; if (!hfi1_tid_rdma_is_resync_psn(psn)) { + /* Check if there is any pending TID ACK */ + if (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE && + req->ack_seg < req->cur_seg) + hfi1_mod_tid_retry_timer(qp); + else + hfi1_stop_tid_retry_timer(qp); hfi1_schedule_send(qp); } else { u32 spsn, fpsn, last_acked, generation; struct tid_rdma_request *rptr; + /* ACK(RESYNC) */ + hfi1_stop_tid_retry_timer(qp); /* Allow new requests (see hfi1_make_tid_rdma_pkt) */ qp->s_flags &= ~HFI1_S_WAIT_HALT; /* @@ -4506,6 +4518,7 @@ done: break; case 3: /* NAK */ + hfi1_stop_tid_retry_timer(qp); switch ((aeth >> IB_AETH_CREDIT_SHIFT) & IB_AETH_CREDIT_MASK) { case 0: /* PSN sequence error */ @@ -4530,3 +4543,83 @@ done: ack_op_err: spin_unlock_irqrestore(&qp->s_lock, flags); } + +void hfi1_add_tid_retry_timer(struct rvt_qp *qp) +{ + struct hfi1_qp_priv *priv = qp->priv; + struct ib_qp *ibqp = &qp->ibqp; + struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device); + + lockdep_assert_held(&qp->s_lock); + if (!(priv->s_flags & HFI1_S_TID_RETRY_TIMER)) { + priv->s_flags |= HFI1_S_TID_RETRY_TIMER; + priv->s_tid_retry_timer.expires = jiffies + + priv->tid_retry_timeout_jiffies + rdi->busy_jiffies; + add_timer(&priv->s_tid_retry_timer); + } +} + +static void hfi1_mod_tid_retry_timer(struct rvt_qp *qp) +{ + struct hfi1_qp_priv *priv = qp->priv; + struct ib_qp *ibqp = &qp->ibqp; + struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device); + + lockdep_assert_held(&qp->s_lock); + priv->s_flags |= HFI1_S_TID_RETRY_TIMER; + mod_timer(&priv->s_tid_retry_timer, jiffies + + priv->tid_retry_timeout_jiffies + rdi->busy_jiffies); +} + +static int hfi1_stop_tid_retry_timer(struct rvt_qp *qp) +{ + struct hfi1_qp_priv *priv = qp->priv; + int rval = 0; + + lockdep_assert_held(&qp->s_lock); + if (priv->s_flags & HFI1_S_TID_RETRY_TIMER) { + rval = del_timer(&priv->s_tid_retry_timer); + priv->s_flags &= ~HFI1_S_TID_RETRY_TIMER; + } + return rval; +} + +void hfi1_del_tid_retry_timer(struct rvt_qp *qp) +{ + struct hfi1_qp_priv *priv = qp->priv; + + del_timer_sync(&priv->s_tid_retry_timer); + priv->s_flags &= ~HFI1_S_TID_RETRY_TIMER; +} + +static void hfi1_tid_retry_timeout(struct timer_list *t) +{ + struct hfi1_qp_priv *priv = from_timer(priv, t, s_tid_retry_timer); + struct rvt_qp *qp = priv->owner; + struct rvt_swqe *wqe; + unsigned long flags; + + spin_lock_irqsave(&qp->r_lock, flags); + spin_lock(&qp->s_lock); + if (priv->s_flags & HFI1_S_TID_RETRY_TIMER) { + hfi1_stop_tid_retry_timer(qp); + if (!priv->s_retry) { + wqe = rvt_get_swqe_ptr(qp, qp->s_acked); + hfi1_trdma_send_complete(qp, wqe, IB_WC_RETRY_EXC_ERR); + rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR); + } else { + priv->s_flags &= ~RVT_S_WAIT_ACK; + /* Only send one packet (the RESYNC) */ + priv->s_flags |= RVT_S_SEND_ONE; + /* + * No additional request shall be made by this QP until + * the RESYNC has been complete. + */ + qp->s_flags |= HFI1_S_WAIT_HALT; + priv->s_state = TID_OP(RESYNC); + priv->s_retry--; + } + } + spin_unlock(&qp->s_lock); + spin_unlock_irqrestore(&qp->r_lock, flags); +} diff --git a/drivers/infiniband/hw/hfi1/tid_rdma.h b/drivers/infiniband/hw/hfi1/tid_rdma.h index 499036e7a3e8..3be5f79ed1fb 100644 --- a/drivers/infiniband/hw/hfi1/tid_rdma.h +++ b/drivers/infiniband/hw/hfi1/tid_rdma.h @@ -29,6 +29,7 @@ #define HFI1_R_TID_RSC_TIMER BIT(2) /* BIT(4) reserved for RVT_S_ACK_PENDING. */ #define HFI1_S_TID_WAIT_INTERLCK BIT(5) +#define HFI1_S_TID_RETRY_TIMER BIT(17) #define HFI1_R_TID_SW_PSN BIT(19) /* @@ -288,4 +289,7 @@ u32 hfi1_build_tid_rdma_write_ack(struct rvt_qp *qp, struct rvt_ack_entry *e, void hfi1_rc_rcv_tid_rdma_ack(struct hfi1_packet *packet); +void hfi1_add_tid_retry_timer(struct rvt_qp *qp); +void hfi1_del_tid_retry_timer(struct rvt_qp *qp); + #endif /* HFI1_TID_RDMA_H */ diff --git a/drivers/infiniband/hw/hfi1/verbs.h b/drivers/infiniband/hw/hfi1/verbs.h index 30e3f5af5cf1..bfd642e831f7 100644 --- a/drivers/infiniband/hw/hfi1/verbs.h +++ b/drivers/infiniband/hw/hfi1/verbs.h @@ -164,6 +164,7 @@ struct hfi1_qp_priv { u8 s_sc; /* SC[0..4] for next packet */ struct iowait s_iowait; struct timer_list s_tid_timer; /* for timing tid wait */ + struct timer_list s_tid_retry_timer; /* for timing tid ack */ struct list_head tid_wait; /* for queueing tid space */ struct hfi1_opfn_data opfn; struct tid_flow_state flow_state; @@ -172,6 +173,7 @@ struct hfi1_qp_priv { u8 hdr_type; /* 9B or 16B */ atomic_t n_tid_requests; /* # of sent TID RDMA requests */ unsigned long tid_timer_timeout_jiffies; + unsigned long tid_retry_timeout_jiffies; /* variables for the TID RDMA SE state machine */ u8 s_state; |