diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-08-25 00:48:38 +0200 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-08-25 00:48:38 +0200 |
commit | 90a6cd503982bfd33ce8c70eb49bd2dd33bc6325 (patch) | |
tree | 982ae0d6653b7a4b5020b52173cb989bd6e3aab0 | |
parent | Merge tag 'acpi-4.13-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/ra... (diff) | |
parent | IB/mlx5: Always return success for RoCE modify port (diff) | |
download | linux-90a6cd503982bfd33ce8c70eb49bd2dd33bc6325.tar.xz linux-90a6cd503982bfd33ce8c70eb49bd2dd33bc6325.zip |
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma
Pull more rdma fixes from Doug Ledford:
"Well, I thought we were going to be done for this -rc cycle. I should
have known better than to say so though.
We have four additional items that trickled in.
One was a simple mistake on my part. I took a patch into my for-next
thinking that the issue was less severe than it was. I was then
notified that it needed to be in my -rc area instead.
The other three were just found late in testing.
Summary:
- One core fix accidentally applied first to for-next and then cherry
picked back because it needed to be in the -rc cycles instead
- Another core fix
- Two mlx5 fixes"
* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma:
IB/mlx5: Always return success for RoCE modify port
IB/mlx5: Fix Raw Packet QP event handler assignment
IB/core: Avoid accessing non-allocated memory when inferring port type
RDMA/uverbs: Initialize cq_context appropriately
-rw-r--r-- | drivers/infiniband/core/uverbs_cmd.c | 13 | ||||
-rw-r--r-- | drivers/infiniband/core/verbs.c | 7 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx5/main.c | 6 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx5/qp.c | 1 | ||||
-rw-r--r-- | include/rdma/ib_verbs.h | 1 |
5 files changed, 22 insertions, 6 deletions
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index c551d2b275fd..739bd69ef1d4 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -1015,7 +1015,7 @@ static struct ib_ucq_object *create_cq(struct ib_uverbs_file *file, cq->uobject = &obj->uobject; cq->comp_handler = ib_uverbs_comp_handler; cq->event_handler = ib_uverbs_cq_event_handler; - cq->cq_context = &ev_file->ev_queue; + cq->cq_context = ev_file ? &ev_file->ev_queue : NULL; atomic_set(&cq->usecnt, 0); obj->uobject.object = cq; @@ -1522,6 +1522,7 @@ static int create_qp(struct ib_uverbs_file *file, qp->qp_type = attr.qp_type; atomic_set(&qp->usecnt, 0); atomic_inc(&pd->usecnt); + qp->port = 0; if (attr.send_cq) atomic_inc(&attr.send_cq->usecnt); if (attr.recv_cq) @@ -1962,8 +1963,9 @@ static int modify_qp(struct ib_uverbs_file *file, attr->alt_timeout = cmd->base.alt_timeout; attr->rate_limit = cmd->rate_limit; - attr->ah_attr.type = rdma_ah_find_type(qp->device, - cmd->base.dest.port_num); + if (cmd->base.attr_mask & IB_QP_AV) + attr->ah_attr.type = rdma_ah_find_type(qp->device, + cmd->base.dest.port_num); if (cmd->base.dest.is_global) { rdma_ah_set_grh(&attr->ah_attr, NULL, cmd->base.dest.flow_label, @@ -1981,8 +1983,9 @@ static int modify_qp(struct ib_uverbs_file *file, rdma_ah_set_port_num(&attr->ah_attr, cmd->base.dest.port_num); - attr->alt_ah_attr.type = rdma_ah_find_type(qp->device, - cmd->base.dest.port_num); + if (cmd->base.attr_mask & IB_QP_ALT_PATH) + attr->alt_ah_attr.type = + rdma_ah_find_type(qp->device, cmd->base.dest.port_num); if (cmd->base.alt_dest.is_global) { rdma_ah_set_grh(&attr->alt_ah_attr, NULL, cmd->base.alt_dest.flow_label, diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index 7f8fe443df46..b456e3ca1876 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c @@ -838,6 +838,7 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd, spin_lock_init(&qp->mr_lock); INIT_LIST_HEAD(&qp->rdma_mrs); INIT_LIST_HEAD(&qp->sig_mrs); + qp->port = 0; if (qp_init_attr->qp_type == IB_QPT_XRC_TGT) return ib_create_xrc_qp(qp, qp_init_attr); @@ -1297,7 +1298,11 @@ int ib_modify_qp_with_udata(struct ib_qp *qp, struct ib_qp_attr *attr, if (ret) return ret; } - return ib_security_modify_qp(qp, attr, attr_mask, udata); + ret = ib_security_modify_qp(qp, attr, attr_mask, udata); + if (!ret && (attr_mask & IB_QP_PORT)) + qp->port = attr->port_num; + + return ret; } EXPORT_SYMBOL(ib_modify_qp_with_udata); diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index a7f2e60085c4..f7fcde1ff0aa 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -1085,6 +1085,12 @@ static int mlx5_ib_modify_port(struct ib_device *ibdev, u8 port, int mask, bool is_ib = (mlx5_ib_port_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND); + /* CM layer calls ib_modify_port() regardless of the link layer. For + * Ethernet ports, qkey violation and Port capabilities are meaningless. + */ + if (!is_ib) + return 0; + if (MLX5_CAP_GEN(dev->mdev, ib_virt) && is_ib) { change_mask = props->clr_port_cap_mask | props->set_port_cap_mask; value = ~props->clr_port_cap_mask | props->set_port_cap_mask; diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 0889ff367c86..f58f8f5f3ebe 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -1238,6 +1238,7 @@ static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, goto err_destroy_tis; sq->base.container_mibqp = qp; + sq->base.mqp.event = mlx5_ib_qp_event; } if (qp->rq.wqe_cnt) { diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index b5732432bb29..88c32aba32f7 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -1683,6 +1683,7 @@ struct ib_qp { enum ib_qp_type qp_type; struct ib_rwq_ind_table *rwq_ind_tbl; struct ib_qp_security *qp_sec; + u8 port; }; struct ib_mr { |