diff options
author | Amrani, Ram <Ram.Amrani@cavium.com> | 2017-04-27 12:35:35 +0200 |
---|---|---|
committer | Doug Ledford <dledford@redhat.com> | 2017-04-28 18:47:57 +0200 |
commit | b6acd71fefc92d13ac9a0f117101d1aab1102d18 (patch) | |
tree | a13f6411c42a244e75e8434383f4f2fb18cd92ed /drivers/infiniband/hw/qedr | |
parent | RDMA/qedr: destroy CQ only after HW releases it (diff) | |
download | linux-b6acd71fefc92d13ac9a0f117101d1aab1102d18.tar.xz linux-b6acd71fefc92d13ac9a0f117101d1aab1102d18.zip |
RDMA/qedr: add support for send+invalidate in poll CQ
Split the poll responder CQ into two functions.
Add support for send+invalidate in poll CQ.
Signed-off-by: Ram Amrani <Ram.Amrani@cavium.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
Diffstat (limited to 'drivers/infiniband/hw/qedr')
-rw-r--r-- | drivers/infiniband/hw/qedr/qedr.h | 3 | ||||
-rw-r--r-- | drivers/infiniband/hw/qedr/verbs.c | 98 |
2 files changed, 63 insertions, 38 deletions
diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h index fc9e2797ac5a..bf02ae4c8891 100644 --- a/drivers/infiniband/hw/qedr/qedr.h +++ b/drivers/infiniband/hw/qedr/qedr.h @@ -431,7 +431,8 @@ struct qedr_mr { RDMA_CQE_RESPONDER_IMM_FLG_SHIFT) #define QEDR_RESP_RDMA (RDMA_CQE_RESPONDER_RDMA_FLG_MASK << \ RDMA_CQE_RESPONDER_RDMA_FLG_SHIFT) -#define QEDR_RESP_RDMA_IMM (QEDR_RESP_IMM | QEDR_RESP_RDMA) +#define QEDR_RESP_INV (RDMA_CQE_RESPONDER_INV_FLG_MASK << \ + RDMA_CQE_RESPONDER_INV_FLG_SHIFT) static inline void qedr_inc_sw_cons(struct qedr_qp_hwq_info *info) { diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index 9f76c46b058e..a3afd2b21cc5 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -3331,57 +3331,81 @@ static int qedr_poll_cq_req(struct qedr_dev *dev, return cnt; } -static void __process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp, - struct qedr_cq *cq, struct ib_wc *wc, - struct rdma_cqe_responder *resp, u64 wr_id) +static inline int qedr_cqe_resp_status_to_ib(u8 status) { - enum ib_wc_status wc_status = IB_WC_SUCCESS; - u8 flags; - - wc->opcode = IB_WC_RECV; - wc->wc_flags = 0; - - switch (resp->status) { + switch (status) { case RDMA_CQE_RESP_STS_LOCAL_ACCESS_ERR: - wc_status = IB_WC_LOC_ACCESS_ERR; - break; + return IB_WC_LOC_ACCESS_ERR; case RDMA_CQE_RESP_STS_LOCAL_LENGTH_ERR: - wc_status = IB_WC_LOC_LEN_ERR; - break; + return IB_WC_LOC_LEN_ERR; case RDMA_CQE_RESP_STS_LOCAL_QP_OPERATION_ERR: - wc_status = IB_WC_LOC_QP_OP_ERR; - break; + return IB_WC_LOC_QP_OP_ERR; case RDMA_CQE_RESP_STS_LOCAL_PROTECTION_ERR: - wc_status = IB_WC_LOC_PROT_ERR; - break; + return IB_WC_LOC_PROT_ERR; case RDMA_CQE_RESP_STS_MEMORY_MGT_OPERATION_ERR: - wc_status = IB_WC_MW_BIND_ERR; - break; + return IB_WC_MW_BIND_ERR; case RDMA_CQE_RESP_STS_REMOTE_INVALID_REQUEST_ERR: - wc_status = IB_WC_REM_INV_RD_REQ_ERR; - break; + return IB_WC_REM_INV_RD_REQ_ERR; case RDMA_CQE_RESP_STS_OK: - wc_status = IB_WC_SUCCESS; - wc->byte_len = le32_to_cpu(resp->length); + return IB_WC_SUCCESS; + default: + return IB_WC_GENERAL_ERR; + } +} - flags = resp->flags & QEDR_RESP_RDMA_IMM; +static inline int qedr_set_ok_cqe_resp_wc(struct rdma_cqe_responder *resp, + struct ib_wc *wc) +{ + wc->status = IB_WC_SUCCESS; + wc->byte_len = le32_to_cpu(resp->length); - if (flags == QEDR_RESP_RDMA_IMM) + if (resp->flags & QEDR_RESP_IMM) { + wc->ex.imm_data = le32_to_cpu(resp->imm_data_or_inv_r_Key); + wc->wc_flags |= IB_WC_WITH_IMM; + + if (resp->flags & QEDR_RESP_RDMA) wc->opcode = IB_WC_RECV_RDMA_WITH_IMM; - if (flags == QEDR_RESP_RDMA_IMM || flags == QEDR_RESP_IMM) { - wc->ex.imm_data = - le32_to_cpu(resp->imm_data_or_inv_r_Key); - wc->wc_flags |= IB_WC_WITH_IMM; - } - break; - default: - wc->status = IB_WC_GENERAL_ERR; - DP_ERR(dev, "Invalid CQE status detected\n"); + if (resp->flags & QEDR_RESP_INV) + return -EINVAL; + + } else if (resp->flags & QEDR_RESP_INV) { + wc->ex.imm_data = le32_to_cpu(resp->imm_data_or_inv_r_Key); + wc->wc_flags |= IB_WC_WITH_INVALIDATE; + + if (resp->flags & QEDR_RESP_RDMA) + return -EINVAL; + + } else if (resp->flags & QEDR_RESP_RDMA) { + return -EINVAL; + } + + return 0; +} + +static void __process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp, + struct qedr_cq *cq, struct ib_wc *wc, + struct rdma_cqe_responder *resp, u64 wr_id) +{ + /* Must fill fields before qedr_set_ok_cqe_resp_wc() */ + wc->opcode = IB_WC_RECV; + wc->wc_flags = 0; + + if (likely(resp->status == RDMA_CQE_RESP_STS_OK)) { + if (qedr_set_ok_cqe_resp_wc(resp, wc)) + DP_ERR(dev, + "CQ %p (icid=%d) has invalid CQE responder flags=0x%x\n", + cq, cq->icid, resp->flags); + + } else { + wc->status = qedr_cqe_resp_status_to_ib(resp->status); + if (wc->status == IB_WC_GENERAL_ERR) + DP_ERR(dev, + "CQ %p (icid=%d) contains an invalid CQE status %d\n", + cq, cq->icid, resp->status); } - /* fill WC */ - wc->status = wc_status; + /* Fill the rest of the WC */ wc->vendor_err = 0; wc->src_qp = qp->id; wc->qp = &qp->ibqp; |