summaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/sw/rxe/rxe_qp.c
diff options
context:
space:
mode:
authorBob Pearson <rpearsonhpe@gmail.com>2021-05-27 21:47:48 +0200
committerJason Gunthorpe <jgg@nvidia.com>2021-06-03 20:53:01 +0200
commit5bcf5a59c41e19141783c7305d420a5e36c937b2 (patch)
tree899a03f565ca4445b39db55d7a04132cacab9ad3 /drivers/infiniband/sw/rxe/rxe_qp.c
parentRDMA/rxe: Protect user space index loads/stores (diff)
downloadlinux-5bcf5a59c41e19141783c7305d420a5e36c937b2.tar.xz
linux-5bcf5a59c41e19141783c7305d420a5e36c937b2.zip
RDMA/rxe: Protext kernel index from user space
In order to prevent user space from modifying the index that belongs to the kernel for shared queues let the kernel use a local copy of the index and copy any new values of that index to the shared rxe_queue_bus struct. This adds more switch statements which decreases the performance of the queue API. Move the type into the parameter list for these functions so that the compiler can optimize out the switch statements when the explicit type is known. Modify all the calls in the driver on performance paths to pass in the explicit queue type. Link: https://lore.kernel.org/r/20210527194748.662636-4-rpearsonhpe@gmail.com Link: https://lore.kernel.org/linux-rdma/20210526165239.GP1002214@@nvidia.com/ Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Diffstat (limited to 'drivers/infiniband/sw/rxe/rxe_qp.c')
-rw-r--r--drivers/infiniband/sw/rxe/rxe_qp.c10
1 files changed, 9 insertions, 1 deletions
diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
index 9bd6bf8f9bd9..a9256862464b 100644
--- a/drivers/infiniband/sw/rxe/rxe_qp.c
+++ b/drivers/infiniband/sw/rxe/rxe_qp.c
@@ -248,7 +248,13 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
return err;
}
- qp->req.wqe_index = producer_index(qp->sq.queue);
+ if (qp->is_user)
+ qp->req.wqe_index = producer_index(qp->sq.queue,
+ QUEUE_TYPE_FROM_USER);
+ else
+ qp->req.wqe_index = producer_index(qp->sq.queue,
+ QUEUE_TYPE_KERNEL);
+
qp->req.state = QP_STATE_RESET;
qp->req.opcode = -1;
qp->comp.opcode = -1;
@@ -306,6 +312,8 @@ static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp,
spin_lock_init(&qp->rq.producer_lock);
spin_lock_init(&qp->rq.consumer_lock);
+ qp->rq.is_user = qp->is_user;
+
skb_queue_head_init(&qp->resp_pkts);
rxe_init_task(rxe, &qp->resp.task, qp,