diff options
Diffstat (limited to 'drivers/infiniband/ulp/rtrs')
-rw-r--r-- | drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c | 4 | ||||
-rw-r--r-- | drivers/infiniband/ulp/rtrs/rtrs-clt.c | 157 | ||||
-rw-r--r-- | drivers/infiniband/ulp/rtrs/rtrs-clt.h | 7 | ||||
-rw-r--r-- | drivers/infiniband/ulp/rtrs/rtrs-pri.h | 6 | ||||
-rw-r--r-- | drivers/infiniband/ulp/rtrs/rtrs-srv.c | 95 | ||||
-rw-r--r-- | drivers/infiniband/ulp/rtrs/rtrs-srv.h | 6 | ||||
-rw-r--r-- | drivers/infiniband/ulp/rtrs/rtrs.c | 23 |
7 files changed, 156 insertions, 142 deletions
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c b/drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c index 26bbe5d6dff5..5e780bdd763d 100644 --- a/drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c +++ b/drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c @@ -20,7 +20,7 @@ void rtrs_clt_update_wc_stats(struct rtrs_clt_con *con) cpu = raw_smp_processor_id(); s = this_cpu_ptr(stats->pcpu_stats); - if (unlikely(con->cpu != cpu)) { + if (con->cpu != cpu) { s->cpu_migr.to++; /* Careful here, override s pointer */ @@ -180,7 +180,7 @@ void rtrs_clt_update_all_stats(struct rtrs_clt_io_req *req, int dir) len = req->usr_len + req->data_len; rtrs_clt_update_rdma_stats(stats, len, dir); - if (sess->clt->mp_policy == MP_POLICY_MIN_INFLIGHT) + if (req->mp_policy == MP_POLICY_MIN_INFLIGHT) atomic_inc(&stats->inflight); } diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c index f2c40e50f25e..bc8824b4ee0d 100644 --- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c +++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c @@ -75,9 +75,9 @@ __rtrs_get_permit(struct rtrs_clt *clt, enum rtrs_clt_con_type con_type) */ do { bit = find_first_zero_bit(clt->permits_map, max_depth); - if (unlikely(bit >= max_depth)) + if (bit >= max_depth) return NULL; - } while (unlikely(test_and_set_bit_lock(bit, clt->permits_map))); + } while (test_and_set_bit_lock(bit, clt->permits_map)); permit = get_permit(clt, bit); WARN_ON(permit->mem_id != bit); @@ -115,14 +115,14 @@ struct rtrs_permit *rtrs_clt_get_permit(struct rtrs_clt *clt, DEFINE_WAIT(wait); permit = __rtrs_get_permit(clt, con_type); - if (likely(permit) || !can_wait) + if (permit || !can_wait) return permit; do { prepare_to_wait(&clt->permits_wait, &wait, TASK_UNINTERRUPTIBLE); permit = __rtrs_get_permit(clt, con_type); - if (likely(permit)) + if (permit) break; io_schedule(); @@ -175,7 +175,7 @@ struct rtrs_clt_con *rtrs_permit_to_clt_con(struct rtrs_clt_sess *sess, { int id = 0; - if (likely(permit->con_type == RTRS_IO_CON)) + if (permit->con_type == RTRS_IO_CON) id = (permit->cpu_id % (sess->s.irq_con_num - 1)) + 1; return to_clt_con(sess->s.con[id]); @@ -329,7 +329,7 @@ static void rtrs_clt_fast_reg_done(struct ib_cq *cq, struct ib_wc *wc) { struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context); - if (unlikely(wc->status != IB_WC_SUCCESS)) { + if (wc->status != IB_WC_SUCCESS) { rtrs_err(con->c.sess, "Failed IB_WR_REG_MR: %s\n", ib_wc_status_msg(wc->status)); rtrs_rdma_error_recovery(con); @@ -349,13 +349,13 @@ static void rtrs_clt_inv_rkey_done(struct ib_cq *cq, struct ib_wc *wc) container_of(wc->wr_cqe, typeof(*req), inv_cqe); struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context); - if (unlikely(wc->status != IB_WC_SUCCESS)) { + if (wc->status != IB_WC_SUCCESS) { rtrs_err(con->c.sess, "Failed IB_WR_LOCAL_INV: %s\n", ib_wc_status_msg(wc->status)); rtrs_rdma_error_recovery(con); } req->need_inv = false; - if (likely(req->need_inv_comp)) + if (req->need_inv_comp) complete(&req->inv_comp); else /* Complete request from INV callback */ @@ -390,7 +390,7 @@ static void complete_rdma_req(struct rtrs_clt_io_req *req, int errno, sess = to_clt_sess(con->c.sess); if (req->sg_cnt) { - if (unlikely(req->dir == DMA_FROM_DEVICE && req->need_inv)) { + if (req->dir == DMA_FROM_DEVICE && req->need_inv) { /* * We are here to invalidate read requests * ourselves. In normal scenario server should @@ -405,7 +405,7 @@ static void complete_rdma_req(struct rtrs_clt_io_req *req, int errno, * should do that ourselves. */ - if (likely(can_wait)) { + if (can_wait) { req->need_inv_comp = true; } else { /* This should be IO path, so always notify */ @@ -416,10 +416,10 @@ static void complete_rdma_req(struct rtrs_clt_io_req *req, int errno, refcount_inc(&req->ref); err = rtrs_inv_rkey(req); - if (unlikely(err)) { + if (err) { rtrs_err(con->c.sess, "Send INV WR key=%#x: %d\n", req->mr->rkey, err); - } else if (likely(can_wait)) { + } else if (can_wait) { wait_for_completion(&req->inv_comp); } else { /* @@ -438,7 +438,7 @@ static void complete_rdma_req(struct rtrs_clt_io_req *req, int errno, } if (!refcount_dec_and_test(&req->ref)) return; - if (sess->clt->mp_policy == MP_POLICY_MIN_INFLIGHT) + if (req->mp_policy == MP_POLICY_MIN_INFLIGHT) atomic_dec(&sess->stats->inflight); req->in_use = false; @@ -463,7 +463,7 @@ static int rtrs_post_send_rdma(struct rtrs_clt_con *con, enum ib_send_flags flags; struct ib_sge sge; - if (unlikely(!req->sg_size)) { + if (!req->sg_size) { rtrs_wrn(con->c.sess, "Doing RDMA Write failed, no data supplied\n"); return -EINVAL; @@ -478,7 +478,7 @@ static int rtrs_post_send_rdma(struct rtrs_clt_con *con, * From time to time we have to post signalled sends, * or send queue will fill up and only QP reset can help. */ - flags = atomic_inc_return(&con->io_cnt) % sess->queue_depth ? + flags = atomic_inc_return(&con->c.wr_cnt) % sess->s.signal_interval ? 0 : IB_SEND_SIGNALED; ib_dma_sync_single_for_device(sess->s.dev->ib_dev, req->iu->dma_addr, @@ -513,7 +513,7 @@ static void rtrs_clt_recv_done(struct rtrs_clt_con *con, struct ib_wc *wc) iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe); err = rtrs_iu_post_recv(&con->c, iu); - if (unlikely(err)) { + if (err) { rtrs_err(con->c.sess, "post iu failed %d\n", err); rtrs_rdma_error_recovery(con); } @@ -533,7 +533,7 @@ static void rtrs_clt_rkey_rsp_done(struct rtrs_clt_con *con, struct ib_wc *wc) iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe); - if (unlikely(wc->byte_len < sizeof(*msg))) { + if (wc->byte_len < sizeof(*msg)) { rtrs_err(con->c.sess, "rkey response is malformed: size %d\n", wc->byte_len); goto out; @@ -541,7 +541,7 @@ static void rtrs_clt_rkey_rsp_done(struct rtrs_clt_con *con, struct ib_wc *wc) ib_dma_sync_single_for_cpu(sess->s.dev->ib_dev, iu->dma_addr, iu->size, DMA_FROM_DEVICE); msg = iu->buf; - if (unlikely(le16_to_cpu(msg->type) != RTRS_MSG_RKEY_RSP)) { + if (le16_to_cpu(msg->type) != RTRS_MSG_RKEY_RSP) { rtrs_err(sess->clt, "rkey response is malformed: type %d\n", le16_to_cpu(msg->type)); goto out; @@ -551,8 +551,8 @@ static void rtrs_clt_rkey_rsp_done(struct rtrs_clt_con *con, struct ib_wc *wc) goto out; rtrs_from_imm(be32_to_cpu(wc->ex.imm_data), &imm_type, &imm_payload); - if (likely(imm_type == RTRS_IO_RSP_IMM || - imm_type == RTRS_IO_RSP_W_INV_IMM)) { + if (imm_type == RTRS_IO_RSP_IMM || + imm_type == RTRS_IO_RSP_W_INV_IMM) { u32 msg_id; w_inval = (imm_type == RTRS_IO_RSP_W_INV_IMM); @@ -605,7 +605,7 @@ static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc) bool w_inval = false; int err; - if (unlikely(wc->status != IB_WC_SUCCESS)) { + if (wc->status != IB_WC_SUCCESS) { if (wc->status != IB_WC_WR_FLUSH_ERR) { rtrs_err(sess->clt, "RDMA failed: %s\n", ib_wc_status_msg(wc->status)); @@ -625,8 +625,8 @@ static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc) return; rtrs_from_imm(be32_to_cpu(wc->ex.imm_data), &imm_type, &imm_payload); - if (likely(imm_type == RTRS_IO_RSP_IMM || - imm_type == RTRS_IO_RSP_W_INV_IMM)) { + if (imm_type == RTRS_IO_RSP_IMM || + imm_type == RTRS_IO_RSP_W_INV_IMM) { u32 msg_id; w_inval = (imm_type == RTRS_IO_RSP_W_INV_IMM); @@ -657,7 +657,7 @@ static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc) err = rtrs_post_recv_empty_x2(&con->c, &io_comp_cqe); else err = rtrs_post_recv_empty(&con->c, &io_comp_cqe); - if (unlikely(err)) { + if (err) { rtrs_err(con->c.sess, "rtrs_post_recv_empty(): %d\n", err); rtrs_rdma_error_recovery(con); @@ -680,6 +680,7 @@ static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc) case IB_WC_RDMA_WRITE: /* * post_send() RDMA write completions of IO reqs (read/write) + * and hb. */ break; @@ -702,7 +703,7 @@ static int post_recv_io(struct rtrs_clt_con *con, size_t q_size) } else { err = rtrs_post_recv_empty(&con->c, &io_comp_cqe); } - if (unlikely(err)) + if (err) return err; } @@ -727,7 +728,7 @@ static int post_recv_sess(struct rtrs_clt_sess *sess) q_size *= 2; err = post_recv_io(to_clt_con(sess->s.con[cid]), q_size); - if (unlikely(err)) { + if (err) { rtrs_err(sess->clt, "post_recv_io(), err: %d\n", err); return err; } @@ -788,7 +789,7 @@ static struct rtrs_clt_sess *get_next_path_rr(struct path_it *it) ppcpu_path = this_cpu_ptr(clt->pcpu_path); path = rcu_dereference(*ppcpu_path); - if (unlikely(!path)) + if (!path) path = list_first_or_null_rcu(&clt->paths_list, typeof(*path), s.entry); else @@ -819,10 +820,10 @@ static struct rtrs_clt_sess *get_next_path_min_inflight(struct path_it *it) int inflight; list_for_each_entry_rcu(sess, &clt->paths_list, s.entry) { - if (unlikely(READ_ONCE(sess->state) != RTRS_CLT_CONNECTED)) + if (READ_ONCE(sess->state) != RTRS_CLT_CONNECTED) continue; - if (unlikely(!list_empty(raw_cpu_ptr(sess->mp_skip_entry)))) + if (!list_empty(raw_cpu_ptr(sess->mp_skip_entry))) continue; inflight = atomic_read(&sess->stats->inflight); @@ -870,10 +871,10 @@ static struct rtrs_clt_sess *get_next_path_min_latency(struct path_it *it) ktime_t latency; list_for_each_entry_rcu(sess, &clt->paths_list, s.entry) { - if (unlikely(READ_ONCE(sess->state) != RTRS_CLT_CONNECTED)) + if (READ_ONCE(sess->state) != RTRS_CLT_CONNECTED) continue; - if (unlikely(!list_empty(raw_cpu_ptr(sess->mp_skip_entry)))) + if (!list_empty(raw_cpu_ptr(sess->mp_skip_entry))) continue; latency = sess->s.hb_cur_latency; @@ -963,6 +964,7 @@ static void rtrs_clt_init_req(struct rtrs_clt_io_req *req, req->need_inv_comp = false; req->inv_errno = 0; refcount_set(&req->ref, 1); + req->mp_policy = sess->clt->mp_policy; iov_iter_kvec(&iter, READ, vec, 1, usr_len); len = _copy_from_iter(req->iu->buf, usr_len, &iter); @@ -1043,7 +1045,7 @@ static int rtrs_post_rdma_write_sg(struct rtrs_clt_con *con, * From time to time we have to post signalled sends, * or send queue will fill up and only QP reset can help. */ - flags = atomic_inc_return(&con->io_cnt) % sess->queue_depth ? + flags = atomic_inc_return(&con->c.wr_cnt) % sess->s.signal_interval ? 0 : IB_SEND_SIGNALED; ib_dma_sync_single_for_device(sess->s.dev->ib_dev, req->iu->dma_addr, @@ -1062,7 +1064,7 @@ static int rtrs_map_sg_fr(struct rtrs_clt_io_req *req, size_t count) nr = ib_map_mr_sg(req->mr, req->sglist, count, NULL, SZ_4K); if (nr < 0) return nr; - if (unlikely(nr < req->sg_cnt)) + if (nr < req->sg_cnt) return -EINVAL; ib_update_fast_reg_key(req->mr, ib_inc_rkey(req->mr->rkey)); @@ -1086,7 +1088,7 @@ static int rtrs_clt_write_req(struct rtrs_clt_io_req *req) const size_t tsize = sizeof(*msg) + req->data_len + req->usr_len; - if (unlikely(tsize > sess->chunk_size)) { + if (tsize > sess->chunk_size) { rtrs_wrn(s, "Write request failed, size too big %zu > %d\n", tsize, sess->chunk_size); return -EMSGSIZE; @@ -1094,7 +1096,7 @@ static int rtrs_clt_write_req(struct rtrs_clt_io_req *req) if (req->sg_cnt) { count = ib_dma_map_sg(sess->s.dev->ib_dev, req->sglist, req->sg_cnt, req->dir); - if (unlikely(!count)) { + if (!count) { rtrs_wrn(s, "Write request failed, map failed\n"); return -EINVAL; } @@ -1148,12 +1150,12 @@ static int rtrs_clt_write_req(struct rtrs_clt_io_req *req) ret = rtrs_post_rdma_write_sg(req->con, req, rbuf, fr_en, req->usr_len + sizeof(*msg), imm, wr, &inv_wr); - if (unlikely(ret)) { + if (ret) { rtrs_err_rl(s, "Write request failed: error=%d path=%s [%s:%u]\n", ret, kobject_name(&sess->kobj), sess->hca_name, sess->hca_port); - if (sess->clt->mp_policy == MP_POLICY_MIN_INFLIGHT) + if (req->mp_policy == MP_POLICY_MIN_INFLIGHT) atomic_dec(&sess->stats->inflight); if (req->sg_cnt) ib_dma_unmap_sg(sess->s.dev->ib_dev, req->sglist, @@ -1179,7 +1181,7 @@ static int rtrs_clt_read_req(struct rtrs_clt_io_req *req) const size_t tsize = sizeof(*msg) + req->data_len + req->usr_len; - if (unlikely(tsize > sess->chunk_size)) { + if (tsize > sess->chunk_size) { rtrs_wrn(s, "Read request failed, message size is %zu, bigger than CHUNK_SIZE %d\n", tsize, sess->chunk_size); @@ -1189,7 +1191,7 @@ static int rtrs_clt_read_req(struct rtrs_clt_io_req *req) if (req->sg_cnt) { count = ib_dma_map_sg(dev->ib_dev, req->sglist, req->sg_cnt, req->dir); - if (unlikely(!count)) { + if (!count) { rtrs_wrn(s, "Read request failed, dma map failed\n"); return -EINVAL; @@ -1254,12 +1256,12 @@ static int rtrs_clt_read_req(struct rtrs_clt_io_req *req) ret = rtrs_post_send_rdma(req->con, req, &sess->rbufs[buf_id], req->data_len, imm, wr); - if (unlikely(ret)) { + if (ret) { rtrs_err_rl(s, "Read request failed: error=%d path=%s [%s:%u]\n", ret, kobject_name(&sess->kobj), sess->hca_name, sess->hca_port); - if (sess->clt->mp_policy == MP_POLICY_MIN_INFLIGHT) + if (req->mp_policy == MP_POLICY_MIN_INFLIGHT) atomic_dec(&sess->stats->inflight); req->need_inv = false; if (req->sg_cnt) @@ -1287,15 +1289,14 @@ static int rtrs_clt_failover_req(struct rtrs_clt *clt, for (path_it_init(&it, clt); (alive_sess = it.next_path(&it)) && it.i < it.clt->paths_num; it.i++) { - if (unlikely(READ_ONCE(alive_sess->state) != - RTRS_CLT_CONNECTED)) + if (READ_ONCE(alive_sess->state) != RTRS_CLT_CONNECTED) continue; req = rtrs_clt_get_copy_req(alive_sess, fail_req); if (req->dir == DMA_TO_DEVICE) err = rtrs_clt_write_req(req); else err = rtrs_clt_read_req(req); - if (unlikely(err)) { + if (err) { req->in_use = false; continue; } @@ -1330,7 +1331,7 @@ static void fail_all_outstanding_reqs(struct rtrs_clt_sess *sess) complete_rdma_req(req, -ECONNABORTED, false, true); err = rtrs_clt_failover_req(clt, req); - if (unlikely(err)) + if (err) /* Failover failed, notify anyway */ req->conf(req->priv, err); } @@ -1601,7 +1602,8 @@ static int create_con(struct rtrs_clt_sess *sess, unsigned int cid) con->cpu = (cid ? cid - 1 : 0) % nr_cpu_ids; con->c.cid = cid; con->c.sess = &sess->s; - atomic_set(&con->io_cnt, 0); + /* Align with srv, init as 1 */ + atomic_set(&con->c.wr_cnt, 1); mutex_init(&con->con_mutex); sess->s.con[cid] = &con->c; @@ -1678,6 +1680,7 @@ static int create_con_cq_qp(struct rtrs_clt_con *con) sess->queue_depth * 3 + 1); max_send_sge = 2; } + atomic_set(&con->c.sq_wr_avail, max_send_wr); cq_num = max_send_wr + max_recv_wr; /* alloc iu to recv new rkey reply when server reports flags set */ if (sess->flags & RTRS_MSG_NEW_RKEY_F || con->c.cid == 0) { @@ -1841,13 +1844,14 @@ static int rtrs_rdma_conn_established(struct rtrs_clt_con *con, } if (!sess->rbufs) { - kfree(sess->rbufs); sess->rbufs = kcalloc(queue_depth, sizeof(*sess->rbufs), GFP_KERNEL); if (!sess->rbufs) return -ENOMEM; } sess->queue_depth = queue_depth; + sess->s.signal_interval = min_not_zero(queue_depth, + (unsigned short) SERVICE_CON_QUEUE_DEPTH); sess->max_hdr_size = le32_to_cpu(msg->max_hdr_size); sess->max_io_size = le32_to_cpu(msg->max_io_size); sess->flags = le32_to_cpu(msg->flags); @@ -1958,7 +1962,7 @@ static int rtrs_clt_rdma_cm_handler(struct rdma_cm_id *cm_id, break; case RDMA_CM_EVENT_ESTABLISHED: cm_err = rtrs_rdma_conn_established(con, ev); - if (likely(!cm_err)) { + if (!cm_err) { /* * Report success and wake up. Here we abuse state_wq, * i.e. wake up without state change, but we set cm_err. @@ -2377,7 +2381,7 @@ static void rtrs_clt_info_req_done(struct ib_cq *cq, struct ib_wc *wc) iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe); rtrs_iu_free(iu, sess->s.dev->ib_dev, 1); - if (unlikely(wc->status != IB_WC_SUCCESS)) { + if (wc->status != IB_WC_SUCCESS) { rtrs_err(sess->clt, "Sess info request send failed: %s\n", ib_wc_status_msg(wc->status)); rtrs_clt_change_state_get_old(sess, RTRS_CLT_CONNECTING_ERR, NULL); @@ -2394,7 +2398,7 @@ static int process_info_rsp(struct rtrs_clt_sess *sess, int i, sgi; sg_cnt = le16_to_cpu(msg->sg_cnt); - if (unlikely(!sg_cnt || (sess->queue_depth % sg_cnt))) { + if (!sg_cnt || (sess->queue_depth % sg_cnt)) { rtrs_err(sess->clt, "Incorrect sg_cnt %d, is not multiple\n", sg_cnt); return -EINVAL; @@ -2404,9 +2408,8 @@ static int process_info_rsp(struct rtrs_clt_sess *sess, * Check if IB immediate data size is enough to hold the mem_id and * the offset inside the memory chunk. */ - if (unlikely((ilog2(sg_cnt - 1) + 1) + - (ilog2(sess->chunk_size - 1) + 1) > - MAX_IMM_PAYL_BITS)) { + if ((ilog2(sg_cnt - 1) + 1) + (ilog2(sess->chunk_size - 1) + 1) > + MAX_IMM_PAYL_BITS) { rtrs_err(sess->clt, "RDMA immediate size (%db) not enough to encode %d buffers of size %dB\n", MAX_IMM_PAYL_BITS, sg_cnt, sess->chunk_size); @@ -2424,7 +2427,7 @@ static int process_info_rsp(struct rtrs_clt_sess *sess, total_len += len; - if (unlikely(!len || (len % sess->chunk_size))) { + if (!len || (len % sess->chunk_size)) { rtrs_err(sess->clt, "Incorrect [%d].len %d\n", sgi, len); return -EINVAL; @@ -2438,11 +2441,11 @@ static int process_info_rsp(struct rtrs_clt_sess *sess, } } /* Sanity check */ - if (unlikely(sgi != sg_cnt || i != sess->queue_depth)) { + if (sgi != sg_cnt || i != sess->queue_depth) { rtrs_err(sess->clt, "Incorrect sg vector, not fully mapped\n"); return -EINVAL; } - if (unlikely(total_len != sess->chunk_size * sess->queue_depth)) { + if (total_len != sess->chunk_size * sess->queue_depth) { rtrs_err(sess->clt, "Incorrect total_len %d\n", total_len); return -EINVAL; } @@ -2464,14 +2467,14 @@ static void rtrs_clt_info_rsp_done(struct ib_cq *cq, struct ib_wc *wc) WARN_ON(con->c.cid); iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe); - if (unlikely(wc->status != IB_WC_SUCCESS)) { + if (wc->status != IB_WC_SUCCESS) { rtrs_err(sess->clt, "Sess info response recv failed: %s\n", ib_wc_status_msg(wc->status)); goto out; } WARN_ON(wc->opcode != IB_WC_RECV); - if (unlikely(wc->byte_len < sizeof(*msg))) { + if (wc->byte_len < sizeof(*msg)) { rtrs_err(sess->clt, "Sess info response is malformed: size %d\n", wc->byte_len); goto out; @@ -2479,24 +2482,24 @@ static void rtrs_clt_info_rsp_done(struct ib_cq *cq, struct ib_wc *wc) ib_dma_sync_single_for_cpu(sess->s.dev->ib_dev, iu->dma_addr, iu->size, DMA_FROM_DEVICE); msg = iu->buf; - if (unlikely(le16_to_cpu(msg->type) != RTRS_MSG_INFO_RSP)) { + if (le16_to_cpu(msg->type) != RTRS_MSG_INFO_RSP) { rtrs_err(sess->clt, "Sess info response is malformed: type %d\n", le16_to_cpu(msg->type)); goto out; } rx_sz = sizeof(*msg); rx_sz += sizeof(msg->desc[0]) * le16_to_cpu(msg->sg_cnt); - if (unlikely(wc->byte_len < rx_sz)) { + if (wc->byte_len < rx_sz) { rtrs_err(sess->clt, "Sess info response is malformed: size %d\n", wc->byte_len); goto out; } err = process_info_rsp(sess, msg); - if (unlikely(err)) + if (err) goto out; err = post_recv_sess(sess); - if (unlikely(err)) + if (err) goto out; state = RTRS_CLT_CONNECTED; @@ -2523,13 +2526,13 @@ static int rtrs_send_sess_info(struct rtrs_clt_sess *sess) rtrs_clt_info_req_done); rx_iu = rtrs_iu_alloc(1, rx_sz, GFP_KERNEL, sess->s.dev->ib_dev, DMA_FROM_DEVICE, rtrs_clt_info_rsp_done); - if (unlikely(!tx_iu || !rx_iu)) { + if (!tx_iu || !rx_iu) { err = -ENOMEM; goto out; } /* Prepare for getting info response */ err = rtrs_iu_post_recv(&usr_con->c, rx_iu); - if (unlikely(err)) { + if (err) { rtrs_err(sess->clt, "rtrs_iu_post_recv(), err: %d\n", err); goto out; } @@ -2544,7 +2547,7 @@ static int rtrs_send_sess_info(struct rtrs_clt_sess *sess) /* Send info request */ err = rtrs_iu_post_send(&usr_con->c, tx_iu, sizeof(*msg), NULL); - if (unlikely(err)) { + if (err) { rtrs_err(sess->clt, "rtrs_iu_post_send(), err: %d\n", err); goto out; } @@ -2555,7 +2558,7 @@ static int rtrs_send_sess_info(struct rtrs_clt_sess *sess) sess->state != RTRS_CLT_CONNECTING, msecs_to_jiffies( RTRS_CONNECT_TIMEOUT_MS)); - if (unlikely(READ_ONCE(sess->state) != RTRS_CLT_CONNECTED)) { + if (READ_ONCE(sess->state) != RTRS_CLT_CONNECTED) { if (READ_ONCE(sess->state) == RTRS_CLT_CONNECTING_ERR) err = -ECONNRESET; else @@ -2567,7 +2570,7 @@ out: rtrs_iu_free(tx_iu, sess->s.dev->ib_dev, 1); if (rx_iu) rtrs_iu_free(rx_iu, sess->s.dev->ib_dev, 1); - if (unlikely(err)) + if (err) /* If we've never taken async path because of malloc problems */ rtrs_clt_change_state_get_old(sess, RTRS_CLT_CONNECTING_ERR, NULL); @@ -2915,7 +2918,7 @@ int rtrs_clt_remove_path_from_sysfs(struct rtrs_clt_sess *sess, &old_state); } while (!changed && old_state != RTRS_CLT_DEAD); - if (likely(changed)) { + if (changed) { rtrs_clt_remove_path_from_arr(sess); rtrs_clt_destroy_sess_files(sess, sysfs_self); kobject_put(&sess->kobj); @@ -2987,10 +2990,10 @@ int rtrs_clt_request(int dir, struct rtrs_clt_req_ops *ops, rcu_read_lock(); for (path_it_init(&it, clt); (sess = it.next_path(&it)) && it.i < it.clt->paths_num; it.i++) { - if (unlikely(READ_ONCE(sess->state) != RTRS_CLT_CONNECTED)) + if (READ_ONCE(sess->state) != RTRS_CLT_CONNECTED) continue; - if (unlikely(usr_len + hdr_len > sess->max_hdr_size)) { + if (usr_len + hdr_len > sess->max_hdr_size) { rtrs_wrn_rl(sess->clt, "%s request failed, user message size is %zu and header length %zu, but max size is %u\n", dir == READ ? "Read" : "Write", @@ -3005,7 +3008,7 @@ int rtrs_clt_request(int dir, struct rtrs_clt_req_ops *ops, err = rtrs_clt_read_req(req); else err = rtrs_clt_write_req(req); - if (unlikely(err)) { + if (err) { req->in_use = false; continue; } @@ -3078,6 +3081,18 @@ int rtrs_clt_create_path_from_sysfs(struct rtrs_clt *clt, if (IS_ERR(sess)) return PTR_ERR(sess); + mutex_lock(&clt->paths_mutex); + if (clt->paths_num == 0) { + /* + * When all the paths are removed for a session, + * the addition of the first path is like a new session for + * the storage server + */ + sess->for_new_clt = 1; + } + + mutex_unlock(&clt->paths_mutex); + /* * It is totally safe to add path in CONNECTING state: coming * IO will never grab it. Also it is very important to add diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.h b/drivers/infiniband/ulp/rtrs/rtrs-clt.h index e276a2dfcf7c..9dc819885ec7 100644 --- a/drivers/infiniband/ulp/rtrs/rtrs-clt.h +++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.h @@ -74,7 +74,6 @@ struct rtrs_clt_con { u32 queue_num; unsigned int cpu; struct mutex con_mutex; - atomic_t io_cnt; int cm_err; }; @@ -102,6 +101,7 @@ struct rtrs_clt_io_req { unsigned int usr_len; void *priv; bool in_use; + enum rtrs_mp_policy mp_policy; struct rtrs_clt_con *con; struct rtrs_sg_desc *desc; struct ib_sge *sge; @@ -230,10 +230,7 @@ int rtrs_clt_stats_migration_cnt_to_str(struct rtrs_clt_stats *stats, char *buf, size_t len); int rtrs_clt_reset_reconnects_stat(struct rtrs_clt_stats *stats, bool enable); int rtrs_clt_stats_reconnects_to_str(struct rtrs_clt_stats *stats, char *buf, - size_t len); -int rtrs_clt_reset_wc_comp_stats(struct rtrs_clt_stats *stats, bool enable); -int rtrs_clt_stats_wc_completion_to_str(struct rtrs_clt_stats *stats, char *buf, - size_t len); + size_t len); int rtrs_clt_reset_rdma_stats(struct rtrs_clt_stats *stats, bool enable); ssize_t rtrs_clt_stats_rdma_to_str(struct rtrs_clt_stats *stats, char *page, size_t len); diff --git a/drivers/infiniband/ulp/rtrs/rtrs-pri.h b/drivers/infiniband/ulp/rtrs/rtrs-pri.h index 36f184a3b676..d12ddfa50747 100644 --- a/drivers/infiniband/ulp/rtrs/rtrs-pri.h +++ b/drivers/infiniband/ulp/rtrs/rtrs-pri.h @@ -96,6 +96,8 @@ struct rtrs_con { struct rdma_cm_id *cm_id; unsigned int cid; int nr_cqe; + atomic_t wr_cnt; + atomic_t sq_wr_avail; }; struct rtrs_sess { @@ -108,6 +110,7 @@ struct rtrs_sess { unsigned int con_num; unsigned int irq_con_num; unsigned int recon_cnt; + unsigned int signal_interval; struct rtrs_ib_dev *dev; int dev_ref; struct ib_cqe *hb_cqe; @@ -309,9 +312,6 @@ int rtrs_iu_post_rdma_write_imm(struct rtrs_con *con, struct rtrs_iu *iu, struct ib_send_wr *tail); int rtrs_post_recv_empty(struct rtrs_con *con, struct ib_cqe *cqe); -int rtrs_post_rdma_write_imm_empty(struct rtrs_con *con, struct ib_cqe *cqe, - u32 imm_data, enum ib_send_flags flags, - struct ib_send_wr *head); int rtrs_cq_qp_create(struct rtrs_sess *sess, struct rtrs_con *con, u32 max_send_sge, int cq_vector, int nr_cqe, diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.c b/drivers/infiniband/ulp/rtrs/rtrs-srv.c index 3df290086169..716ef7b23558 100644 --- a/drivers/infiniband/ulp/rtrs/rtrs-srv.c +++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.c @@ -183,7 +183,7 @@ static void rtrs_srv_reg_mr_done(struct ib_cq *cq, struct ib_wc *wc) struct rtrs_sess *s = con->c.sess; struct rtrs_srv_sess *sess = to_srv_sess(s); - if (unlikely(wc->status != IB_WC_SUCCESS)) { + if (wc->status != IB_WC_SUCCESS) { rtrs_err(s, "REG MR failed: %s\n", ib_wc_status_msg(wc->status)); close_sess(sess); @@ -201,7 +201,6 @@ static int rdma_write_sg(struct rtrs_srv_op *id) struct rtrs_srv_sess *sess = to_srv_sess(s); dma_addr_t dma_addr = sess->dma_addr[id->msg_id]; struct rtrs_srv_mr *srv_mr; - struct rtrs_srv *srv = sess->srv; struct ib_send_wr inv_wr; struct ib_rdma_wr imm_wr; struct ib_rdma_wr *wr = NULL; @@ -216,7 +215,7 @@ static int rdma_write_sg(struct rtrs_srv_op *id) sg_cnt = le16_to_cpu(id->rd_msg->sg_cnt); need_inval = le16_to_cpu(id->rd_msg->flags) & RTRS_MSG_NEED_INVAL_F; - if (unlikely(sg_cnt != 1)) + if (sg_cnt != 1) return -EINVAL; offset = 0; @@ -229,7 +228,7 @@ static int rdma_write_sg(struct rtrs_srv_op *id) /* WR will fail with length error * if this is 0 */ - if (unlikely(plist->length == 0)) { + if (plist->length == 0) { rtrs_err(s, "Invalid RDMA-Write sg list length 0\n"); return -EINVAL; } @@ -269,7 +268,7 @@ static int rdma_write_sg(struct rtrs_srv_op *id) * From time to time we have to post signaled sends, * or send queue will fill up and only QP reset can help. */ - flags = (atomic_inc_return(&id->con->wr_cnt) % srv->queue_depth) ? + flags = (atomic_inc_return(&id->con->c.wr_cnt) % s->signal_interval) ? 0 : IB_SEND_SIGNALED; if (need_inval) { @@ -322,7 +321,7 @@ static int rdma_write_sg(struct rtrs_srv_op *id) offset, DMA_BIDIRECTIONAL); err = ib_post_send(id->con->c.qp, &id->tx_wr.wr, NULL); - if (unlikely(err)) + if (err) rtrs_err(s, "Posting RDMA-Write-Request to QP failed, err: %d\n", err); @@ -347,7 +346,6 @@ static int send_io_resp_imm(struct rtrs_srv_con *con, struct rtrs_srv_op *id, struct ib_send_wr inv_wr, *wr = NULL; struct ib_rdma_wr imm_wr; struct ib_reg_wr rwr; - struct rtrs_srv *srv = sess->srv; struct rtrs_srv_mr *srv_mr; bool need_inval = false; enum ib_send_flags flags; @@ -363,7 +361,7 @@ static int send_io_resp_imm(struct rtrs_srv_con *con, struct rtrs_srv_op *id, sg_cnt = le16_to_cpu(rd_msg->sg_cnt); if (need_inval) { - if (likely(sg_cnt)) { + if (sg_cnt) { inv_wr.wr_cqe = &io_comp_cqe; inv_wr.sg_list = NULL; inv_wr.num_sge = 0; @@ -396,7 +394,7 @@ static int send_io_resp_imm(struct rtrs_srv_con *con, struct rtrs_srv_op *id, * From time to time we have to post signalled sends, * or send queue will fill up and only QP reset can help. */ - flags = (atomic_inc_return(&con->wr_cnt) % srv->queue_depth) ? + flags = (atomic_inc_return(&con->c.wr_cnt) % s->signal_interval) ? 0 : IB_SEND_SIGNALED; imm = rtrs_to_io_rsp_imm(id->msg_id, errno, need_inval); imm_wr.wr.next = NULL; @@ -439,7 +437,7 @@ static int send_io_resp_imm(struct rtrs_srv_con *con, struct rtrs_srv_op *id, imm_wr.wr.ex.imm_data = cpu_to_be32(imm); err = ib_post_send(id->con->c.qp, wr, NULL); - if (unlikely(err)) + if (err) rtrs_err_rl(s, "Posting RDMA-Reply to QP failed, err: %d\n", err); @@ -496,7 +494,7 @@ bool rtrs_srv_resp_rdma(struct rtrs_srv_op *id, int status) id->status = status; - if (unlikely(sess->state != RTRS_SRV_CONNECTED)) { + if (sess->state != RTRS_SRV_CONNECTED) { rtrs_err_rl(s, "Sending I/O response failed, session %s is disconnected, sess state %s\n", kobject_name(&sess->kobj), @@ -508,12 +506,11 @@ bool rtrs_srv_resp_rdma(struct rtrs_srv_op *id, int status) ib_update_fast_reg_key(mr->mr, ib_inc_rkey(mr->mr->rkey)); } - if (unlikely(atomic_sub_return(1, - &con->sq_wr_avail) < 0)) { + if (atomic_sub_return(1, &con->c.sq_wr_avail) < 0) { rtrs_err(s, "IB send queue full: sess=%s cid=%d\n", kobject_name(&sess->kobj), con->c.cid); - atomic_add(1, &con->sq_wr_avail); + atomic_add(1, &con->c.sq_wr_avail); spin_lock(&con->rsp_wr_wait_lock); list_add_tail(&id->wait_list, &con->rsp_wr_wait_list); spin_unlock(&con->rsp_wr_wait_lock); @@ -525,7 +522,7 @@ bool rtrs_srv_resp_rdma(struct rtrs_srv_op *id, int status) else err = rdma_write_sg(id); - if (unlikely(err)) { + if (err) { rtrs_err_rl(s, "IO response failed: %d: sess=%s\n", err, kobject_name(&sess->kobj)); close_sess(sess); @@ -712,7 +709,7 @@ static void rtrs_srv_info_rsp_done(struct ib_cq *cq, struct ib_wc *wc) iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe); rtrs_iu_free(iu, sess->s.dev->ib_dev, 1); - if (unlikely(wc->status != IB_WC_SUCCESS)) { + if (wc->status != IB_WC_SUCCESS) { rtrs_err(s, "Sess info response send failed: %s\n", ib_wc_status_msg(wc->status)); close_sess(sess); @@ -801,7 +798,7 @@ static int process_info_req(struct rtrs_srv_con *con, size_t tx_sz; err = post_recv_sess(sess); - if (unlikely(err)) { + if (err) { rtrs_err(s, "post_recv_sess(), err: %d\n", err); return err; } @@ -814,14 +811,14 @@ static int process_info_req(struct rtrs_srv_con *con, strscpy(sess->s.sessname, msg->sessname, sizeof(sess->s.sessname)); rwr = kcalloc(sess->mrs_num, sizeof(*rwr), GFP_KERNEL); - if (unlikely(!rwr)) + if (!rwr) return -ENOMEM; tx_sz = sizeof(*rsp); tx_sz += sizeof(rsp->desc[0]) * sess->mrs_num; tx_iu = rtrs_iu_alloc(1, tx_sz, GFP_KERNEL, sess->s.dev->ib_dev, DMA_TO_DEVICE, rtrs_srv_info_rsp_done); - if (unlikely(!tx_iu)) { + if (!tx_iu) { err = -ENOMEM; goto rwr_free; } @@ -853,7 +850,7 @@ static int process_info_req(struct rtrs_srv_con *con, } err = rtrs_srv_create_sess_files(sess); - if (unlikely(err)) + if (err) goto iu_free; kobject_get(&sess->kobj); get_device(&sess->srv->dev); @@ -873,7 +870,7 @@ static int process_info_req(struct rtrs_srv_con *con, /* Send info response */ err = rtrs_iu_post_send(&con->c, tx_iu, tx_sz, reg_wr); - if (unlikely(err)) { + if (err) { rtrs_err(s, "rtrs_iu_post_send(), err: %d\n", err); iu_free: rtrs_iu_free(tx_iu, sess->s.dev->ib_dev, 1); @@ -896,14 +893,14 @@ static void rtrs_srv_info_req_done(struct ib_cq *cq, struct ib_wc *wc) WARN_ON(con->c.cid); iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe); - if (unlikely(wc->status != IB_WC_SUCCESS)) { + if (wc->status != IB_WC_SUCCESS) { rtrs_err(s, "Sess info request receive failed: %s\n", ib_wc_status_msg(wc->status)); goto close; } WARN_ON(wc->opcode != IB_WC_RECV); - if (unlikely(wc->byte_len < sizeof(*msg))) { + if (wc->byte_len < sizeof(*msg)) { rtrs_err(s, "Sess info request is malformed: size %d\n", wc->byte_len); goto close; @@ -911,13 +908,13 @@ static void rtrs_srv_info_req_done(struct ib_cq *cq, struct ib_wc *wc) ib_dma_sync_single_for_cpu(sess->s.dev->ib_dev, iu->dma_addr, iu->size, DMA_FROM_DEVICE); msg = iu->buf; - if (unlikely(le16_to_cpu(msg->type) != RTRS_MSG_INFO_REQ)) { + if (le16_to_cpu(msg->type) != RTRS_MSG_INFO_REQ) { rtrs_err(s, "Sess info request is malformed: type %d\n", le16_to_cpu(msg->type)); goto close; } err = process_info_req(con, msg); - if (unlikely(err)) + if (err) goto close; out: @@ -938,11 +935,11 @@ static int post_recv_info_req(struct rtrs_srv_con *con) rx_iu = rtrs_iu_alloc(1, sizeof(struct rtrs_msg_info_req), GFP_KERNEL, sess->s.dev->ib_dev, DMA_FROM_DEVICE, rtrs_srv_info_req_done); - if (unlikely(!rx_iu)) + if (!rx_iu) return -ENOMEM; /* Prepare for getting info response */ err = rtrs_iu_post_recv(&con->c, rx_iu); - if (unlikely(err)) { + if (err) { rtrs_err(s, "rtrs_iu_post_recv(), err: %d\n", err); rtrs_iu_free(rx_iu, sess->s.dev->ib_dev, 1); return err; @@ -957,7 +954,7 @@ static int post_recv_io(struct rtrs_srv_con *con, size_t q_size) for (i = 0; i < q_size; i++) { err = rtrs_post_recv_empty(&con->c, &io_comp_cqe); - if (unlikely(err)) + if (err) return err; } @@ -978,7 +975,7 @@ static int post_recv_sess(struct rtrs_srv_sess *sess) q_size = srv->queue_depth; err = post_recv_io(to_srv_con(sess->s.con[cid]), q_size); - if (unlikely(err)) { + if (err) { rtrs_err(s, "post_recv_io(), err: %d\n", err); return err; } @@ -1001,13 +998,13 @@ static void process_read(struct rtrs_srv_con *con, void *data; int ret; - if (unlikely(sess->state != RTRS_SRV_CONNECTED)) { + if (sess->state != RTRS_SRV_CONNECTED) { rtrs_err_rl(s, "Processing read request failed, session is disconnected, sess state %s\n", rtrs_srv_state_str(sess->state)); return; } - if (unlikely(msg->sg_cnt != 1 && msg->sg_cnt != 0)) { + if (msg->sg_cnt != 1 && msg->sg_cnt != 0) { rtrs_err_rl(s, "Processing read request failed, invalid message\n"); return; @@ -1025,7 +1022,7 @@ static void process_read(struct rtrs_srv_con *con, ret = ctx->ops.rdma_ev(srv->priv, id, READ, data, data_len, data + data_len, usr_len); - if (unlikely(ret)) { + if (ret) { rtrs_err_rl(s, "Processing read request failed, user module cb reported for msg_id %d, err: %d\n", buf_id, ret); @@ -1059,7 +1056,7 @@ static void process_write(struct rtrs_srv_con *con, void *data; int ret; - if (unlikely(sess->state != RTRS_SRV_CONNECTED)) { + if (sess->state != RTRS_SRV_CONNECTED) { rtrs_err_rl(s, "Processing write request failed, session is disconnected, sess state %s\n", rtrs_srv_state_str(sess->state)); @@ -1076,8 +1073,8 @@ static void process_write(struct rtrs_srv_con *con, data_len = off - usr_len; data = page_address(srv->chunks[buf_id]); ret = ctx->ops.rdma_ev(srv->priv, id, WRITE, data, data_len, - data + data_len, usr_len); - if (unlikely(ret)) { + data + data_len, usr_len); + if (ret) { rtrs_err_rl(s, "Processing write request failed, user module callback reports err: %d\n", ret); @@ -1141,7 +1138,7 @@ static void rtrs_srv_inv_rkey_done(struct ib_cq *cq, struct ib_wc *wc) u32 msg_id, off; void *data; - if (unlikely(wc->status != IB_WC_SUCCESS)) { + if (wc->status != IB_WC_SUCCESS) { rtrs_err(s, "Failed IB_WR_LOCAL_INV: %s\n", ib_wc_status_msg(wc->status)); close_sess(sess); @@ -1198,7 +1195,7 @@ static void rtrs_srv_rdma_done(struct ib_cq *cq, struct ib_wc *wc) u32 imm_type, imm_payload; int err; - if (unlikely(wc->status != IB_WC_SUCCESS)) { + if (wc->status != IB_WC_SUCCESS) { if (wc->status != IB_WC_WR_FLUSH_ERR) { rtrs_err(s, "%s (wr_cqe: %p, type: %d, vendor_err: 0x%x, len: %u)\n", @@ -1218,21 +1215,20 @@ static void rtrs_srv_rdma_done(struct ib_cq *cq, struct ib_wc *wc) if (WARN_ON(wc->wr_cqe != &io_comp_cqe)) return; err = rtrs_post_recv_empty(&con->c, &io_comp_cqe); - if (unlikely(err)) { + if (err) { rtrs_err(s, "rtrs_post_recv(), err: %d\n", err); close_sess(sess); break; } rtrs_from_imm(be32_to_cpu(wc->ex.imm_data), &imm_type, &imm_payload); - if (likely(imm_type == RTRS_IO_REQ_IMM)) { + if (imm_type == RTRS_IO_REQ_IMM) { u32 msg_id, off; void *data; msg_id = imm_payload >> sess->mem_bits; off = imm_payload & ((1 << sess->mem_bits) - 1); - if (unlikely(msg_id >= srv->queue_depth || - off >= max_chunk_size)) { + if (msg_id >= srv->queue_depth || off >= max_chunk_size) { rtrs_err(s, "Wrong msg_id %u, off %u\n", msg_id, off); close_sess(sess); @@ -1244,7 +1240,7 @@ static void rtrs_srv_rdma_done(struct ib_cq *cq, struct ib_wc *wc) mr->msg_off = off; mr->msg_id = msg_id; err = rtrs_srv_inv_rkey(con, mr); - if (unlikely(err)) { + if (err) { rtrs_err(s, "rtrs_post_recv(), err: %d\n", err); close_sess(sess); @@ -1268,10 +1264,11 @@ static void rtrs_srv_rdma_done(struct ib_cq *cq, struct ib_wc *wc) case IB_WC_SEND: /* * post_send() RDMA write completions of IO reqs (read/write) + * and hb. */ - atomic_add(srv->queue_depth, &con->sq_wr_avail); + atomic_add(s->signal_interval, &con->c.sq_wr_avail); - if (unlikely(!list_empty_careful(&con->rsp_wr_wait_list))) + if (!list_empty_careful(&con->rsp_wr_wait_list)) rtrs_rdma_process_wr_wait_list(con); break; @@ -1648,7 +1645,7 @@ static int create_con(struct rtrs_srv_sess *sess, con->c.cm_id = cm_id; con->c.sess = &sess->s; con->c.cid = cid; - atomic_set(&con->wr_cnt, 1); + atomic_set(&con->c.wr_cnt, 1); wr_limit = sess->s.dev->ib_dev->attrs.max_qp_wr; if (con->c.cid == 0) { @@ -1659,6 +1656,8 @@ static int create_con(struct rtrs_srv_sess *sess, max_send_wr = min_t(int, wr_limit, SERVICE_CON_QUEUE_DEPTH * 2 + 2); max_recv_wr = max_send_wr; + s->signal_interval = min_not_zero(srv->queue_depth, + (size_t)SERVICE_CON_QUEUE_DEPTH); } else { /* when always_invlaidate enalbed, we need linv+rinv+mr+imm */ if (always_invalidate) @@ -1679,7 +1678,7 @@ static int create_con(struct rtrs_srv_sess *sess, */ } cq_num = max_send_wr + max_recv_wr; - atomic_set(&con->sq_wr_avail, max_send_wr); + atomic_set(&con->c.sq_wr_avail, max_send_wr); cq_vector = rtrs_srv_get_next_cq_vector(sess); /* TODO: SOFTIRQ can be faster, but be careful with softirq context */ @@ -1894,7 +1893,7 @@ static int rtrs_rdma_connect(struct rdma_cm_id *cm_id, err = create_con(sess, cm_id, cid); if (err) { rtrs_err((&sess->s), "create_con(), error %d\n", err); - (void)rtrs_rdma_do_reject(cm_id, err); + rtrs_rdma_do_reject(cm_id, err); /* * Since session has other connections we follow normal way * through workqueue, but still return an error to tell cma.c @@ -1905,7 +1904,7 @@ static int rtrs_rdma_connect(struct rdma_cm_id *cm_id, err = rtrs_rdma_do_accept(sess, cm_id); if (err) { rtrs_err((&sess->s), "rtrs_rdma_do_accept(), error %d\n", err); - (void)rtrs_rdma_do_reject(cm_id, err); + rtrs_rdma_do_reject(cm_id, err); /* * Since current connection was successfully added to the * session we follow normal way through workqueue to close the diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.h b/drivers/infiniband/ulp/rtrs/rtrs-srv.h index f8da2e3f0bda..9d8d2a91a235 100644 --- a/drivers/infiniband/ulp/rtrs/rtrs-srv.h +++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.h @@ -42,8 +42,6 @@ struct rtrs_srv_stats { struct rtrs_srv_con { struct rtrs_con c; - atomic_t wr_cnt; - atomic_t sq_wr_avail; struct list_head rsp_wr_wait_list; spinlock_t rsp_wr_wait_lock; }; @@ -140,10 +138,6 @@ static inline void rtrs_srv_update_rdma_stats(struct rtrs_srv_stats *s, int rtrs_srv_reset_rdma_stats(struct rtrs_srv_stats *stats, bool enable); ssize_t rtrs_srv_stats_rdma_to_str(struct rtrs_srv_stats *stats, char *page, size_t len); -int rtrs_srv_reset_wc_completion_stats(struct rtrs_srv_stats *stats, - bool enable); -int rtrs_srv_stats_wc_completion_to_str(struct rtrs_srv_stats *stats, char *buf, - size_t len); int rtrs_srv_reset_all_stats(struct rtrs_srv_stats *stats, bool enable); ssize_t rtrs_srv_reset_all_help(struct rtrs_srv_stats *stats, char *page, size_t len); diff --git a/drivers/infiniband/ulp/rtrs/rtrs.c b/drivers/infiniband/ulp/rtrs/rtrs.c index 61919ebd92b2..ca542e477d38 100644 --- a/drivers/infiniband/ulp/rtrs/rtrs.c +++ b/drivers/infiniband/ulp/rtrs/rtrs.c @@ -182,22 +182,28 @@ int rtrs_iu_post_rdma_write_imm(struct rtrs_con *con, struct rtrs_iu *iu, } EXPORT_SYMBOL_GPL(rtrs_iu_post_rdma_write_imm); -int rtrs_post_rdma_write_imm_empty(struct rtrs_con *con, struct ib_cqe *cqe, - u32 imm_data, enum ib_send_flags flags, - struct ib_send_wr *head) +static int rtrs_post_rdma_write_imm_empty(struct rtrs_con *con, + struct ib_cqe *cqe, + u32 imm_data, + struct ib_send_wr *head) { struct ib_rdma_wr wr; + struct rtrs_sess *sess = con->sess; + enum ib_send_flags sflags; + + atomic_dec_if_positive(&con->sq_wr_avail); + sflags = (atomic_inc_return(&con->wr_cnt) % sess->signal_interval) ? + 0 : IB_SEND_SIGNALED; wr = (struct ib_rdma_wr) { .wr.wr_cqe = cqe, - .wr.send_flags = flags, + .wr.send_flags = sflags, .wr.opcode = IB_WR_RDMA_WRITE_WITH_IMM, .wr.ex.imm_data = cpu_to_be32(imm_data), }; return rtrs_post_send(con->qp, head, &wr.wr, NULL); } -EXPORT_SYMBOL_GPL(rtrs_post_rdma_write_imm_empty); static void qp_event_handler(struct ib_event *ev, void *ctx) { @@ -314,8 +320,9 @@ void rtrs_send_hb_ack(struct rtrs_sess *sess) imm = rtrs_to_imm(RTRS_HB_ACK_IMM, 0); err = rtrs_post_rdma_write_imm_empty(usr_con, sess->hb_cqe, imm, - 0, NULL); + NULL); if (err) { + rtrs_err(sess, "send HB ACK failed, errno: %d\n", err); sess->hb_err_handler(usr_con); return; } @@ -333,6 +340,7 @@ static void hb_work(struct work_struct *work) usr_con = sess->con[0]; if (sess->hb_missed_cnt > sess->hb_missed_max) { + rtrs_err(sess, "HB missed max reached.\n"); sess->hb_err_handler(usr_con); return; } @@ -346,8 +354,9 @@ static void hb_work(struct work_struct *work) imm = rtrs_to_imm(RTRS_HB_MSG_IMM, 0); err = rtrs_post_rdma_write_imm_empty(usr_con, sess->hb_cqe, imm, - 0, NULL); + NULL); if (err) { + rtrs_err(sess, "HB send failed, errno: %d\n", err); sess->hb_err_handler(usr_con); return; } |