diff options
author | Roland Dreier <rolandd@cisco.com> | 2005-11-15 09:24:23 +0100 |
---|---|---|
committer | Roland Dreier <rolandd@cisco.com> | 2005-11-15 09:24:23 +0100 |
commit | cbc5b2bb9e226c2b2b981836d2289912e2ef3c1c (patch) | |
tree | 9bb777025b4237dad46d7e9235c80f2e85749a10 /drivers/infiniband | |
parent | [IB] srp: don't post receive if no send buf available (diff) | |
download | linux-cbc5b2bb9e226c2b2b981836d2289912e2ef3c1c.tar.xz linux-cbc5b2bb9e226c2b2b981836d2289912e2ef3c1c.zip |
[IB] mthca: don't disable RDMA writes if no responder resources
Responder resources are only required to handle RDMA reads and atomic
operations, not RDMA writes. So the driver should allow RDMA writes
even if responder resources are set to 0. This is especially
important for the UC transport -- with the old code, it was impossible
to enable RDMA writes for UC QPs.
Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_qp.c | 27 |
1 files changed, 12 insertions, 15 deletions
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c index 760c418d5bc9..5899f0c765be 100644 --- a/drivers/infiniband/hw/mthca/mthca_qp.c +++ b/drivers/infiniband/hw/mthca/mthca_qp.c @@ -730,15 +730,16 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask) } if (attr_mask & IB_QP_ACCESS_FLAGS) { + qp_context->params2 |= + cpu_to_be32(attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE ? + MTHCA_QP_BIT_RWE : 0); + /* - * Only enable RDMA/atomics if we have responder - * resources set to a non-zero value. + * Only enable RDMA reads and atomics if we have + * responder resources set to a non-zero value. */ if (qp->resp_depth) { qp_context->params2 |= - cpu_to_be32(attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE ? - MTHCA_QP_BIT_RWE : 0); - qp_context->params2 |= cpu_to_be32(attr->qp_access_flags & IB_ACCESS_REMOTE_READ ? MTHCA_QP_BIT_RRE : 0); qp_context->params2 |= @@ -759,31 +760,27 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask) if (qp->resp_depth && !attr->max_dest_rd_atomic) { /* * Lowering our responder resources to zero. - * Turn off RDMA/atomics as responder. - * (RWE/RRE/RAE in params2 already zero) + * Turn off reads RDMA and atomics as responder. + * (RRE/RAE in params2 already zero) */ - qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RWE | - MTHCA_QP_OPTPAR_RRE | + qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RRE | MTHCA_QP_OPTPAR_RAE); } if (!qp->resp_depth && attr->max_dest_rd_atomic) { /* * Increasing our responder resources from - * zero. Turn on RDMA/atomics as appropriate. + * zero. Turn on RDMA reads and atomics as + * appropriate. */ qp_context->params2 |= - cpu_to_be32(qp->atomic_rd_en & IB_ACCESS_REMOTE_WRITE ? - MTHCA_QP_BIT_RWE : 0); - qp_context->params2 |= cpu_to_be32(qp->atomic_rd_en & IB_ACCESS_REMOTE_READ ? MTHCA_QP_BIT_RRE : 0); qp_context->params2 |= cpu_to_be32(qp->atomic_rd_en & IB_ACCESS_REMOTE_ATOMIC ? MTHCA_QP_BIT_RAE : 0); - qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RWE | - MTHCA_QP_OPTPAR_RRE | + qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RRE | MTHCA_QP_OPTPAR_RAE); } |