summaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/sw/rxe
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/sw/rxe')
-rw-r--r--drivers/infiniband/sw/rxe/Kconfig1
-rw-r--r--drivers/infiniband/sw/rxe/Makefile1
-rw-r--r--drivers/infiniband/sw/rxe/rxe.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_comp.c91
-rw-r--r--drivers/infiniband/sw/rxe/rxe_cq.c4
-rw-r--r--drivers/infiniband/sw/rxe/rxe_dma.c183
-rw-r--r--drivers/infiniband/sw/rxe/rxe_hdr.h12
-rw-r--r--drivers/infiniband/sw/rxe/rxe_loc.h31
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mcast.c8
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mr.c18
-rw-r--r--drivers/infiniband/sw/rxe/rxe_net.c53
-rw-r--r--drivers/infiniband/sw/rxe/rxe_pool.c14
-rw-r--r--drivers/infiniband/sw/rxe/rxe_pool.h8
-rw-r--r--drivers/infiniband/sw/rxe/rxe_qp.c13
-rw-r--r--drivers/infiniband/sw/rxe/rxe_recv.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_req.c34
-rw-r--r--drivers/infiniband/sw/rxe/rxe_resp.c66
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c23
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.h24
19 files changed, 192 insertions, 396 deletions
diff --git a/drivers/infiniband/sw/rxe/Kconfig b/drivers/infiniband/sw/rxe/Kconfig
index 1e4e628fe7b0..7d1ac27ed251 100644
--- a/drivers/infiniband/sw/rxe/Kconfig
+++ b/drivers/infiniband/sw/rxe/Kconfig
@@ -2,6 +2,7 @@ config RDMA_RXE
tristate "Software RDMA over Ethernet (RoCE) driver"
depends on INET && PCI && INFINIBAND
depends on NET_UDP_TUNNEL
+ select DMA_VIRT_OPS
---help---
This driver implements the InfiniBand RDMA transport over
the Linux network stack. It enables a system with a
diff --git a/drivers/infiniband/sw/rxe/Makefile b/drivers/infiniband/sw/rxe/Makefile
index 3b3fb9d1c470..ec35ff022a42 100644
--- a/drivers/infiniband/sw/rxe/Makefile
+++ b/drivers/infiniband/sw/rxe/Makefile
@@ -14,7 +14,6 @@ rdma_rxe-y := \
rxe_qp.o \
rxe_cq.o \
rxe_mr.o \
- rxe_dma.o \
rxe_opcode.o \
rxe_mmap.o \
rxe_icrc.o \
diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c
index ab6c3c25d7ff..b12dd9b5a89d 100644
--- a/drivers/infiniband/sw/rxe/rxe.c
+++ b/drivers/infiniband/sw/rxe/rxe.c
@@ -178,7 +178,7 @@ static int rxe_init_ports(struct rxe_dev *rxe)
return -ENOMEM;
port->pkey_tbl[0] = 0xffff;
- port->port_guid = rxe->ifc_ops->port_guid(rxe);
+ port->port_guid = rxe_port_guid(rxe);
spin_lock_init(&port->port_lock);
diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c
index d369f24425f9..4cd55d5617f7 100644
--- a/drivers/infiniband/sw/rxe/rxe_comp.c
+++ b/drivers/infiniband/sw/rxe/rxe_comp.c
@@ -254,7 +254,7 @@ static inline enum comp_state check_ack(struct rxe_qp *qp,
}
break;
default:
- WARN_ON(1);
+ WARN_ON_ONCE(1);
}
/* Check operation validity. */
@@ -412,13 +412,21 @@ static void make_send_cqe(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
}
}
+/*
+ * IBA Spec. Section 10.7.3.1 SIGNALED COMPLETIONS
+ * ---------8<---------8<-------------
+ * ...Note that if a completion error occurs, a Work Completion
+ * will always be generated, even if the signaling
+ * indicator requests an Unsignaled Completion.
+ * ---------8<---------8<-------------
+ */
static void do_complete(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
{
struct rxe_cqe cqe;
if ((qp->sq_sig_type == IB_SIGNAL_ALL_WR) ||
(wqe->wr.send_flags & IB_SEND_SIGNALED) ||
- (qp->req.state == QP_STATE_ERROR)) {
+ wqe->status != IB_WC_SUCCESS) {
make_send_cqe(qp, wqe, &cqe);
advance_consumer(qp->sq.queue);
rxe_cq_post(qp->scq, &cqe, 0);
@@ -503,57 +511,40 @@ static inline enum comp_state complete_wqe(struct rxe_qp *qp,
return COMPST_GET_WQE;
}
-int rxe_completer(void *arg)
+static void rxe_drain_resp_pkts(struct rxe_qp *qp, bool notify)
{
- struct rxe_qp *qp = (struct rxe_qp *)arg;
- struct rxe_send_wqe *wqe = wqe;
- struct sk_buff *skb = NULL;
- struct rxe_pkt_info *pkt = NULL;
- enum comp_state state;
-
- rxe_add_ref(qp);
-
- if (!qp->valid) {
- while ((skb = skb_dequeue(&qp->resp_pkts))) {
- rxe_drop_ref(qp);
- kfree_skb(skb);
- }
- skb = NULL;
- pkt = NULL;
-
- while (queue_head(qp->sq.queue))
- advance_consumer(qp->sq.queue);
+ struct sk_buff *skb;
+ struct rxe_send_wqe *wqe;
- goto exit;
+ while ((skb = skb_dequeue(&qp->resp_pkts))) {
+ rxe_drop_ref(qp);
+ kfree_skb(skb);
}
- if (qp->req.state == QP_STATE_ERROR) {
- while ((skb = skb_dequeue(&qp->resp_pkts))) {
- rxe_drop_ref(qp);
- kfree_skb(skb);
- }
- skb = NULL;
- pkt = NULL;
-
- while ((wqe = queue_head(qp->sq.queue))) {
+ while ((wqe = queue_head(qp->sq.queue))) {
+ if (notify) {
wqe->status = IB_WC_WR_FLUSH_ERR;
do_complete(qp, wqe);
+ } else {
+ advance_consumer(qp->sq.queue);
}
-
- goto exit;
}
+}
- if (qp->req.state == QP_STATE_RESET) {
- while ((skb = skb_dequeue(&qp->resp_pkts))) {
- rxe_drop_ref(qp);
- kfree_skb(skb);
- }
- skb = NULL;
- pkt = NULL;
+int rxe_completer(void *arg)
+{
+ struct rxe_qp *qp = (struct rxe_qp *)arg;
+ struct rxe_send_wqe *wqe = wqe;
+ struct sk_buff *skb = NULL;
+ struct rxe_pkt_info *pkt = NULL;
+ enum comp_state state;
- while (queue_head(qp->sq.queue))
- advance_consumer(qp->sq.queue);
+ rxe_add_ref(qp);
+ if (!qp->valid || qp->req.state == QP_STATE_ERROR ||
+ qp->req.state == QP_STATE_RESET) {
+ rxe_drain_resp_pkts(qp, qp->valid &&
+ qp->req.state == QP_STATE_ERROR);
goto exit;
}
@@ -639,6 +630,7 @@ int rxe_completer(void *arg)
if (pkt) {
rxe_drop_ref(pkt->qp);
kfree_skb(skb);
+ skb = NULL;
}
goto done;
@@ -662,6 +654,7 @@ int rxe_completer(void *arg)
qp->qp_timeout_jiffies)
mod_timer(&qp->retrans_timer,
jiffies + qp->qp_timeout_jiffies);
+ WARN_ON_ONCE(skb);
goto exit;
case COMPST_ERROR_RETRY:
@@ -674,8 +667,10 @@ int rxe_completer(void *arg)
*/
/* there is nothing to retry in this case */
- if (!wqe || (wqe->state == wqe_state_posted))
+ if (!wqe || (wqe->state == wqe_state_posted)) {
+ WARN_ON_ONCE(skb);
goto exit;
+ }
if (qp->comp.retry_cnt > 0) {
if (qp->comp.retry_cnt != 7)
@@ -697,8 +692,10 @@ int rxe_completer(void *arg)
if (pkt) {
rxe_drop_ref(pkt->qp);
kfree_skb(skb);
+ skb = NULL;
}
+ WARN_ON_ONCE(skb);
goto exit;
} else {
@@ -718,6 +715,9 @@ int rxe_completer(void *arg)
mod_timer(&qp->rnr_nak_timer,
jiffies + rnrnak_jiffies(aeth_syn(pkt)
& ~AETH_TYPE_MASK));
+ rxe_drop_ref(pkt->qp);
+ kfree_skb(skb);
+ skb = NULL;
goto exit;
} else {
wqe->status = IB_WC_RNR_RETRY_EXC_ERR;
@@ -726,14 +726,17 @@ int rxe_completer(void *arg)
break;
case COMPST_ERROR:
+ WARN_ON_ONCE(wqe->status == IB_WC_SUCCESS);
do_complete(qp, wqe);
rxe_qp_error(qp);
if (pkt) {
rxe_drop_ref(pkt->qp);
kfree_skb(skb);
+ skb = NULL;
}
+ WARN_ON_ONCE(skb);
goto exit;
}
}
@@ -742,6 +745,7 @@ exit:
/* we come here if we are done with processing and want the task to
* exit from the loop calling us
*/
+ WARN_ON_ONCE(skb);
rxe_drop_ref(qp);
return -EAGAIN;
@@ -749,6 +753,7 @@ done:
/* we come here if we have processed a packet we want the task to call
* us again to see if there is anything else to do
*/
+ WARN_ON_ONCE(skb);
rxe_drop_ref(qp);
return 0;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_cq.c b/drivers/infiniband/sw/rxe/rxe_cq.c
index e5e6a5e7dee9..49fe42c23f4d 100644
--- a/drivers/infiniband/sw/rxe/rxe_cq.c
+++ b/drivers/infiniband/sw/rxe/rxe_cq.c
@@ -156,9 +156,9 @@ int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited)
return 0;
}
-void rxe_cq_cleanup(void *arg)
+void rxe_cq_cleanup(struct rxe_pool_entry *arg)
{
- struct rxe_cq *cq = arg;
+ struct rxe_cq *cq = container_of(arg, typeof(*cq), pelem);
if (cq->queue)
rxe_queue_cleanup(cq->queue);
diff --git a/drivers/infiniband/sw/rxe/rxe_dma.c b/drivers/infiniband/sw/rxe/rxe_dma.c
deleted file mode 100644
index a0f8af5851ae..000000000000
--- a/drivers/infiniband/sw/rxe/rxe_dma.c
+++ /dev/null
@@ -1,183 +0,0 @@
-/*
- * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
- * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include "rxe.h"
-#include "rxe_loc.h"
-
-#define DMA_BAD_ADDER ((u64)0)
-
-static int rxe_mapping_error(struct ib_device *dev, u64 dma_addr)
-{
- return dma_addr == DMA_BAD_ADDER;
-}
-
-static u64 rxe_dma_map_single(struct ib_device *dev,
- void *cpu_addr, size_t size,
- enum dma_data_direction direction)
-{
- WARN_ON(!valid_dma_direction(direction));
- return (uintptr_t)cpu_addr;
-}
-
-static void rxe_dma_unmap_single(struct ib_device *dev,
- u64 addr, size_t size,
- enum dma_data_direction direction)
-{
- WARN_ON(!valid_dma_direction(direction));
-}
-
-static u64 rxe_dma_map_page(struct ib_device *dev,
- struct page *page,
- unsigned long offset,
- size_t size, enum dma_data_direction direction)
-{
- u64 addr;
-
- WARN_ON(!valid_dma_direction(direction));
-
- if (offset + size > PAGE_SIZE) {
- addr = DMA_BAD_ADDER;
- goto done;
- }
-
- addr = (uintptr_t)page_address(page);
- if (addr)
- addr += offset;
-
-done:
- return addr;
-}
-
-static void rxe_dma_unmap_page(struct ib_device *dev,
- u64 addr, size_t size,
- enum dma_data_direction direction)
-{
- WARN_ON(!valid_dma_direction(direction));
-}
-
-static int rxe_map_sg(struct ib_device *dev, struct scatterlist *sgl,
- int nents, enum dma_data_direction direction)
-{
- struct scatterlist *sg;
- u64 addr;
- int i;
- int ret = nents;
-
- WARN_ON(!valid_dma_direction(direction));
-
- for_each_sg(sgl, sg, nents, i) {
- addr = (uintptr_t)page_address(sg_page(sg));
- if (!addr) {
- ret = 0;
- break;
- }
- sg->dma_address = addr + sg->offset;
-#ifdef CONFIG_NEED_SG_DMA_LENGTH
- sg->dma_length = sg->length;
-#endif
- }
-
- return ret;
-}
-
-static void rxe_unmap_sg(struct ib_device *dev,
- struct scatterlist *sg, int nents,
- enum dma_data_direction direction)
-{
- WARN_ON(!valid_dma_direction(direction));
-}
-
-static int rxe_map_sg_attrs(struct ib_device *dev, struct scatterlist *sgl,
- int nents, enum dma_data_direction direction,
- unsigned long attrs)
-{
- return rxe_map_sg(dev, sgl, nents, direction);
-}
-
-static void rxe_unmap_sg_attrs(struct ib_device *dev,
- struct scatterlist *sg, int nents,
- enum dma_data_direction direction,
- unsigned long attrs)
-{
- rxe_unmap_sg(dev, sg, nents, direction);
-}
-
-static void rxe_sync_single_for_cpu(struct ib_device *dev,
- u64 addr,
- size_t size, enum dma_data_direction dir)
-{
-}
-
-static void rxe_sync_single_for_device(struct ib_device *dev,
- u64 addr,
- size_t size, enum dma_data_direction dir)
-{
-}
-
-static void *rxe_dma_alloc_coherent(struct ib_device *dev, size_t size,
- u64 *dma_handle, gfp_t flag)
-{
- struct page *p;
- void *addr = NULL;
-
- p = alloc_pages(flag, get_order(size));
- if (p)
- addr = page_address(p);
-
- if (dma_handle)
- *dma_handle = (uintptr_t)addr;
-
- return addr;
-}
-
-static void rxe_dma_free_coherent(struct ib_device *dev, size_t size,
- void *cpu_addr, u64 dma_handle)
-{
- free_pages((unsigned long)cpu_addr, get_order(size));
-}
-
-struct ib_dma_mapping_ops rxe_dma_mapping_ops = {
- .mapping_error = rxe_mapping_error,
- .map_single = rxe_dma_map_single,
- .unmap_single = rxe_dma_unmap_single,
- .map_page = rxe_dma_map_page,
- .unmap_page = rxe_dma_unmap_page,
- .map_sg = rxe_map_sg,
- .unmap_sg = rxe_unmap_sg,
- .map_sg_attrs = rxe_map_sg_attrs,
- .unmap_sg_attrs = rxe_unmap_sg_attrs,
- .sync_single_for_cpu = rxe_sync_single_for_cpu,
- .sync_single_for_device = rxe_sync_single_for_device,
- .alloc_coherent = rxe_dma_alloc_coherent,
- .free_coherent = rxe_dma_free_coherent
-};
diff --git a/drivers/infiniband/sw/rxe/rxe_hdr.h b/drivers/infiniband/sw/rxe/rxe_hdr.h
index d57b5e956ceb..6cb18406f5b8 100644
--- a/drivers/infiniband/sw/rxe/rxe_hdr.h
+++ b/drivers/infiniband/sw/rxe/rxe_hdr.h
@@ -53,8 +53,16 @@ struct rxe_pkt_info {
};
/* Macros should be used only for received skb */
-#define SKB_TO_PKT(skb) ((struct rxe_pkt_info *)(skb)->cb)
-#define PKT_TO_SKB(pkt) container_of((void *)(pkt), struct sk_buff, cb)
+static inline struct rxe_pkt_info *SKB_TO_PKT(struct sk_buff *skb)
+{
+ BUILD_BUG_ON(sizeof(struct rxe_pkt_info) > sizeof(skb->cb));
+ return (void *)skb->cb;
+}
+
+static inline struct sk_buff *PKT_TO_SKB(struct rxe_pkt_info *pkt)
+{
+ return container_of((void *)pkt, struct sk_buff, cb);
+}
/*
* IBA header types and methods
diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h
index efe4c6a35442..183a9d379b41 100644
--- a/drivers/infiniband/sw/rxe/rxe_loc.h
+++ b/drivers/infiniband/sw/rxe/rxe_loc.h
@@ -64,7 +64,7 @@ int rxe_cq_resize_queue(struct rxe_cq *cq, int new_cqe, struct ib_udata *udata);
int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited);
-void rxe_cq_cleanup(void *arg);
+void rxe_cq_cleanup(struct rxe_pool_entry *arg);
/* rxe_mcast.c */
int rxe_mcast_get_grp(struct rxe_dev *rxe, union ib_gid *mgid,
@@ -78,7 +78,7 @@ int rxe_mcast_drop_grp_elem(struct rxe_dev *rxe, struct rxe_qp *qp,
void rxe_drop_all_mcast_groups(struct rxe_qp *qp);
-void rxe_mc_cleanup(void *arg);
+void rxe_mc_cleanup(struct rxe_pool_entry *arg);
/* rxe_mmap.c */
struct rxe_mmap_info {
@@ -137,10 +137,26 @@ int mem_check_range(struct rxe_mem *mem, u64 iova, size_t length);
int rxe_mem_map_pages(struct rxe_dev *rxe, struct rxe_mem *mem,
u64 *page, int num_pages, u64 iova);
-void rxe_mem_cleanup(void *arg);
+void rxe_mem_cleanup(struct rxe_pool_entry *arg);
int advance_dma_data(struct rxe_dma_info *dma, unsigned int length);
+/* rxe_net.c */
+int rxe_loopback(struct sk_buff *skb);
+int rxe_send(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
+ struct sk_buff *skb);
+__be64 rxe_port_guid(struct rxe_dev *rxe);
+struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av,
+ int paylen, struct rxe_pkt_info *pkt);
+int rxe_prepare(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
+ struct sk_buff *skb, u32 *crc);
+enum rdma_link_layer rxe_link_layer(struct rxe_dev *rxe, unsigned int port_num);
+const char *rxe_parent_name(struct rxe_dev *rxe, unsigned int port_num);
+struct device *rxe_dma_device(struct rxe_dev *rxe);
+__be64 rxe_node_guid(struct rxe_dev *rxe);
+int rxe_mcast_add(struct rxe_dev *rxe, union ib_gid *mgid);
+int rxe_mcast_delete(struct rxe_dev *rxe, union ib_gid *mgid);
+
/* rxe_qp.c */
int rxe_qp_chk_init(struct rxe_dev *rxe, struct ib_qp_init_attr *init);
@@ -162,7 +178,7 @@ void rxe_qp_error(struct rxe_qp *qp);
void rxe_qp_destroy(struct rxe_qp *qp);
-void rxe_qp_cleanup(void *arg);
+void rxe_qp_cleanup(struct rxe_pool_entry *arg);
static inline int qp_num(struct rxe_qp *qp)
{
@@ -221,10 +237,9 @@ int rxe_srq_from_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
struct ib_srq_attr *attr, enum ib_srq_attr_mask mask,
struct ib_udata *udata);
-extern struct ib_dma_mapping_ops rxe_dma_mapping_ops;
-
void rxe_release(struct kref *kref);
+void rxe_drain_req_pkts(struct rxe_qp *qp, bool notify);
int rxe_completer(void *arg);
int rxe_requester(void *arg);
int rxe_responder(void *arg);
@@ -256,9 +271,9 @@ static inline int rxe_xmit_packet(struct rxe_dev *rxe, struct rxe_qp *qp,
if (pkt->mask & RXE_LOOPBACK_MASK) {
memcpy(SKB_TO_PKT(skb), pkt, sizeof(*pkt));
- err = rxe->ifc_ops->loopback(skb);
+ err = rxe_loopback(skb);
} else {
- err = rxe->ifc_ops->send(rxe, pkt, skb);
+ err = rxe_send(rxe, pkt, skb);
}
if (err) {
diff --git a/drivers/infiniband/sw/rxe/rxe_mcast.c b/drivers/infiniband/sw/rxe/rxe_mcast.c
index fa95544ca7e0..522a7942c56c 100644
--- a/drivers/infiniband/sw/rxe/rxe_mcast.c
+++ b/drivers/infiniband/sw/rxe/rxe_mcast.c
@@ -61,7 +61,7 @@ int rxe_mcast_get_grp(struct rxe_dev *rxe, union ib_gid *mgid,
rxe_add_key(grp, mgid);
- err = rxe->ifc_ops->mcast_add(rxe, mgid);
+ err = rxe_mcast_add(rxe, mgid);
if (err)
goto err2;
@@ -180,11 +180,11 @@ void rxe_drop_all_mcast_groups(struct rxe_qp *qp)
}
}
-void rxe_mc_cleanup(void *arg)
+void rxe_mc_cleanup(struct rxe_pool_entry *arg)
{
- struct rxe_mc_grp *grp = arg;
+ struct rxe_mc_grp *grp = container_of(arg, typeof(*grp), pelem);
struct rxe_dev *rxe = grp->rxe;
rxe_drop_key(grp);
- rxe->ifc_ops->mcast_delete(rxe, &grp->mgid);
+ rxe_mcast_delete(rxe, &grp->mgid);
}
diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
index d0faca294006..37eea7441ca4 100644
--- a/drivers/infiniband/sw/rxe/rxe_mr.c
+++ b/drivers/infiniband/sw/rxe/rxe_mr.c
@@ -59,9 +59,11 @@ int mem_check_range(struct rxe_mem *mem, u64 iova, size_t length)
case RXE_MEM_TYPE_MR:
case RXE_MEM_TYPE_FMR:
- return ((iova < mem->iova) ||
- ((iova + length) > (mem->iova + mem->length))) ?
- -EFAULT : 0;
+ if (iova < mem->iova ||
+ length > mem->length ||
+ iova > mem->iova + mem->length - length)
+ return -EFAULT;
+ return 0;
default:
return -EFAULT;
@@ -89,9 +91,9 @@ static void rxe_mem_init(int access, struct rxe_mem *mem)
mem->map_shift = ilog2(RXE_BUF_PER_MAP);
}
-void rxe_mem_cleanup(void *arg)
+void rxe_mem_cleanup(struct rxe_pool_entry *arg)
{
- struct rxe_mem *mem = arg;
+ struct rxe_mem *mem = container_of(arg, typeof(*mem), pelem);
int i;
if (mem->umem)
@@ -123,7 +125,7 @@ static int rxe_mem_alloc(struct rxe_dev *rxe, struct rxe_mem *mem, int num_buf)
goto err2;
}
- WARN_ON(!is_power_of_2(RXE_BUF_PER_MAP));
+ BUILD_BUG_ON(!is_power_of_2(RXE_BUF_PER_MAP));
mem->map_shift = ilog2(RXE_BUF_PER_MAP);
mem->map_mask = RXE_BUF_PER_MAP - 1;
@@ -189,7 +191,7 @@ int rxe_mem_init_user(struct rxe_dev *rxe, struct rxe_pd *pd, u64 start,
goto err1;
}
- WARN_ON(!is_power_of_2(umem->page_size));
+ WARN_ON_ONCE(!is_power_of_2(umem->page_size));
mem->page_shift = ilog2(umem->page_size);
mem->page_mask = umem->page_size - 1;
@@ -375,7 +377,7 @@ int rxe_mem_copy(struct rxe_mem *mem, u64 iova, void *addr, int length,
return 0;
}
- WARN_ON(!mem->map);
+ WARN_ON_ONCE(!mem->map);
err = mem_check_range(mem, iova, length);
if (err) {
diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
index 4abdeb359fb4..d8610960630a 100644
--- a/drivers/infiniband/sw/rxe/rxe_net.c
+++ b/drivers/infiniband/sw/rxe/rxe_net.c
@@ -102,29 +102,29 @@ static __be64 rxe_mac_to_eui64(struct net_device *ndev)
return eui64;
}
-static __be64 node_guid(struct rxe_dev *rxe)
+__be64 rxe_node_guid(struct rxe_dev *rxe)
{
return rxe_mac_to_eui64(rxe->ndev);
}
-static __be64 port_guid(struct rxe_dev *rxe)
+__be64 rxe_port_guid(struct rxe_dev *rxe)
{
return rxe_mac_to_eui64(rxe->ndev);
}
-static struct device *dma_device(struct rxe_dev *rxe)
+struct device *rxe_dma_device(struct rxe_dev *rxe)
{
struct net_device *ndev;
ndev = rxe->ndev;
- if (ndev->priv_flags & IFF_802_1Q_VLAN)
+ if (is_vlan_dev(ndev))
ndev = vlan_dev_real_dev(ndev);
return ndev->dev.parent;
}
-static int mcast_add(struct rxe_dev *rxe, union ib_gid *mgid)
+int rxe_mcast_add(struct rxe_dev *rxe, union ib_gid *mgid)
{
int err;
unsigned char ll_addr[ETH_ALEN];
@@ -135,7 +135,7 @@ static int mcast_add(struct rxe_dev *rxe, union ib_gid *mgid)
return err;
}
-static int mcast_delete(struct rxe_dev *rxe, union ib_gid *mgid)
+int rxe_mcast_delete(struct rxe_dev *rxe, union ib_gid *mgid)
{
int err;
unsigned char ll_addr[ETH_ALEN];
@@ -243,8 +243,8 @@ static struct socket *rxe_setup_udp_tunnel(struct net *net, __be16 port,
{
int err;
struct socket *sock;
- struct udp_port_cfg udp_cfg = {0};
- struct udp_tunnel_sock_cfg tnl_cfg = {0};
+ struct udp_port_cfg udp_cfg = { };
+ struct udp_tunnel_sock_cfg tnl_cfg = { };
if (ipv6) {
udp_cfg.family = AF_INET6;
@@ -397,8 +397,8 @@ static int prepare6(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
return 0;
}
-static int prepare(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
- struct sk_buff *skb, u32 *crc)
+int rxe_prepare(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
+ struct sk_buff *skb, u32 *crc)
{
int err = 0;
struct rxe_av *av = rxe_get_av(pkt);
@@ -424,8 +424,7 @@ static void rxe_skb_tx_dtor(struct sk_buff *skb)
rxe_run_task(&qp->req.task, 1);
}
-static int send(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
- struct sk_buff *skb)
+int rxe_send(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, struct sk_buff *skb)
{
struct sk_buff *nskb;
struct rxe_av *av;
@@ -461,7 +460,7 @@ static int send(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
return 0;
}
-static int loopback(struct sk_buff *skb)
+int rxe_loopback(struct sk_buff *skb)
{
return rxe_rcv(skb);
}
@@ -471,8 +470,8 @@ static inline int addr_same(struct rxe_dev *rxe, struct rxe_av *av)
return rxe->port.port_guid == av->grh.dgid.global.interface_id;
}
-static struct sk_buff *init_packet(struct rxe_dev *rxe, struct rxe_av *av,
- int paylen, struct rxe_pkt_info *pkt)
+struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av,
+ int paylen, struct rxe_pkt_info *pkt)
{
unsigned int hdr_len;
struct sk_buff *skb;
@@ -511,31 +510,16 @@ static struct sk_buff *init_packet(struct rxe_dev *rxe, struct rxe_av *av,
* this is required by rxe_cfg to match rxe devices in
* /sys/class/infiniband up with their underlying ethernet devices
*/
-static char *parent_name(struct rxe_dev *rxe, unsigned int port_num)
+const char *rxe_parent_name(struct rxe_dev *rxe, unsigned int port_num)
{
return rxe->ndev->name;
}
-static enum rdma_link_layer link_layer(struct rxe_dev *rxe,
- unsigned int port_num)
+enum rdma_link_layer rxe_link_layer(struct rxe_dev *rxe, unsigned int port_num)
{
return IB_LINK_LAYER_ETHERNET;
}
-static struct rxe_ifc_ops ifc_ops = {
- .node_guid = node_guid,
- .port_guid = port_guid,
- .dma_device = dma_device,
- .mcast_add = mcast_add,
- .mcast_delete = mcast_delete,
- .prepare = prepare,
- .send = send,
- .loopback = loopback,
- .init_packet = init_packet,
- .parent_name = parent_name,
- .link_layer = link_layer,
-};
-
struct rxe_dev *rxe_net_add(struct net_device *ndev)
{
int err;
@@ -545,7 +529,6 @@ struct rxe_dev *rxe_net_add(struct net_device *ndev)
if (!rxe)
return NULL;
- rxe->ifc_ops = &ifc_ops;
rxe->ndev = ndev;
err = rxe_add(rxe, ndev->mtu);
@@ -658,7 +641,7 @@ struct notifier_block rxe_net_notifier = {
.notifier_call = rxe_notify,
};
-int rxe_net_ipv4_init(void)
+static int rxe_net_ipv4_init(void)
{
recv_sockets.sk4 = rxe_setup_udp_tunnel(&init_net,
htons(ROCE_V2_UDP_DPORT), false);
@@ -671,7 +654,7 @@ int rxe_net_ipv4_init(void)
return 0;
}
-int rxe_net_ipv6_init(void)
+static int rxe_net_ipv6_init(void)
{
#if IS_ENABLED(CONFIG_IPV6)
diff --git a/drivers/infiniband/sw/rxe/rxe_pool.c b/drivers/infiniband/sw/rxe/rxe_pool.c
index d723947a8542..75d11ee635ec 100644
--- a/drivers/infiniband/sw/rxe/rxe_pool.c
+++ b/drivers/infiniband/sw/rxe/rxe_pool.c
@@ -102,7 +102,7 @@ struct rxe_type_info rxe_type_info[RXE_NUM_TYPES] = {
},
};
-static inline char *pool_name(struct rxe_pool *pool)
+static inline const char *pool_name(struct rxe_pool *pool)
{
return rxe_type_info[pool->type].name;
}
@@ -112,13 +112,6 @@ static inline struct kmem_cache *pool_cache(struct rxe_pool *pool)
return rxe_type_info[pool->type].cache;
}
-static inline enum rxe_elem_type rxe_type(void *arg)
-{
- struct rxe_pool_entry *elem = arg;
-
- return elem->pool->type;
-}
-
int rxe_cache_init(void)
{
int err;
@@ -273,6 +266,7 @@ static u32 alloc_index(struct rxe_pool *pool)
if (index >= range)
index = find_first_zero_bit(pool->table, range);
+ WARN_ON_ONCE(index >= range);
set_bit(index, pool->table);
pool->last = index;
return index + pool->min_index;
@@ -461,7 +455,7 @@ void *rxe_pool_get_index(struct rxe_pool *pool, u32 index)
out:
spin_unlock_irqrestore(&pool->pool_lock, flags);
- return node ? (void *)elem : NULL;
+ return node ? elem : NULL;
}
void *rxe_pool_get_key(struct rxe_pool *pool, void *key)
@@ -497,5 +491,5 @@ void *rxe_pool_get_key(struct rxe_pool *pool, void *key)
out:
spin_unlock_irqrestore(&pool->pool_lock, flags);
- return node ? ((void *)elem) : NULL;
+ return node ? elem : NULL;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_pool.h b/drivers/infiniband/sw/rxe/rxe_pool.h
index 4d04830adcae..47df28e43acf 100644
--- a/drivers/infiniband/sw/rxe/rxe_pool.h
+++ b/drivers/infiniband/sw/rxe/rxe_pool.h
@@ -57,10 +57,12 @@ enum rxe_elem_type {
RXE_NUM_TYPES, /* keep me last */
};
+struct rxe_pool_entry;
+
struct rxe_type_info {
- char *name;
+ const char *name;
size_t size;
- void (*cleanup)(void *obj);
+ void (*cleanup)(struct rxe_pool_entry *obj);
enum rxe_pool_flags flags;
u32 max_index;
u32 min_index;
@@ -91,7 +93,7 @@ struct rxe_pool {
spinlock_t pool_lock; /* pool spinlock */
size_t elem_size;
struct kref ref_cnt;
- void (*cleanup)(void *obj);
+ void (*cleanup)(struct rxe_pool_entry *obj);
enum rxe_pool_state state;
enum rxe_pool_flags flags;
enum rxe_elem_type type;
diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
index 44b2108253bd..f98a19e61a3d 100644
--- a/drivers/infiniband/sw/rxe/rxe_qp.c
+++ b/drivers/infiniband/sw/rxe/rxe_qp.c
@@ -273,13 +273,8 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
rxe_init_task(rxe, &qp->comp.task, qp,
rxe_completer, "comp");
- init_timer(&qp->rnr_nak_timer);
- qp->rnr_nak_timer.function = rnr_nak_timer;
- qp->rnr_nak_timer.data = (unsigned long)qp;
-
- init_timer(&qp->retrans_timer);
- qp->retrans_timer.function = retransmit_timer;
- qp->retrans_timer.data = (unsigned long)qp;
+ setup_timer(&qp->rnr_nak_timer, rnr_nak_timer, (unsigned long)qp);
+ setup_timer(&qp->retrans_timer, retransmit_timer, (unsigned long)qp);
qp->qp_timeout_jiffies = 0; /* Can't be set for UD/UC in modify_qp */
return 0;
@@ -824,9 +819,9 @@ void rxe_qp_destroy(struct rxe_qp *qp)
}
/* called when the last reference to the qp is dropped */
-void rxe_qp_cleanup(void *arg)
+void rxe_qp_cleanup(struct rxe_pool_entry *arg)
{
- struct rxe_qp *qp = arg;
+ struct rxe_qp *qp = container_of(arg, typeof(*qp), pelem);
rxe_drop_all_mcast_groups(qp);
diff --git a/drivers/infiniband/sw/rxe/rxe_recv.c b/drivers/infiniband/sw/rxe/rxe_recv.c
index 252b4d637d45..50886031096f 100644
--- a/drivers/infiniband/sw/rxe/rxe_recv.c
+++ b/drivers/infiniband/sw/rxe/rxe_recv.c
@@ -389,7 +389,7 @@ int rxe_rcv(struct sk_buff *skb)
calc_icrc = rxe_icrc_hdr(pkt, skb);
calc_icrc = crc32_le(calc_icrc, (u8 *)payload_addr(pkt),
payload_size(pkt));
- calc_icrc = cpu_to_be32(~calc_icrc);
+ calc_icrc = (__force u32)cpu_to_be32(~calc_icrc);
if (unlikely(calc_icrc != pack_icrc)) {
if (skb->protocol == htons(ETH_P_IPV6))
pr_warn_ratelimited("bad ICRC from %pI6c\n",
diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
index 73d4a97603a1..dbfde0dc6ff7 100644
--- a/drivers/infiniband/sw/rxe/rxe_req.c
+++ b/drivers/infiniband/sw/rxe/rxe_req.c
@@ -361,19 +361,14 @@ static inline int check_init_depth(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
return -EAGAIN;
}
-static inline int get_mtu(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
+static inline int get_mtu(struct rxe_qp *qp)
{
struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
- struct rxe_port *port;
- struct rxe_av *av;
if ((qp_type(qp) == IB_QPT_RC) || (qp_type(qp) == IB_QPT_UC))
return qp->mtu;
- av = &wqe->av;
- port = &rxe->port;
-
- return port->mtu_cap;
+ return rxe->port.mtu_cap;
}
static struct sk_buff *init_req_packet(struct rxe_qp *qp,
@@ -409,7 +404,7 @@ static struct sk_buff *init_req_packet(struct rxe_qp *qp,
/* init skb */
av = rxe_get_av(pkt);
- skb = rxe->ifc_ops->init_packet(rxe, av, paylen, pkt);
+ skb = rxe_init_packet(rxe, av, paylen, pkt);
if (unlikely(!skb))
return NULL;
@@ -480,7 +475,7 @@ static int fill_packet(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
u32 *p;
int err;
- err = rxe->ifc_ops->prepare(rxe, pkt, skb, &crc);
+ err = rxe_prepare(rxe, pkt, skb, &crc);
if (err)
return err;
@@ -599,8 +594,13 @@ int rxe_requester(void *arg)
rxe_add_ref(qp);
next_wqe:
- if (unlikely(!qp->valid || qp->req.state == QP_STATE_ERROR))
+ if (unlikely(!qp->valid))
+ goto exit;
+
+ if (unlikely(qp->req.state == QP_STATE_ERROR)) {
+ rxe_drain_req_pkts(qp, true);
goto exit;
+ }
if (unlikely(qp->req.state == QP_STATE_RESET)) {
qp->req.wqe_index = consumer_index(qp->sq.queue);
@@ -635,6 +635,7 @@ next_wqe:
goto exit;
}
rmr->state = RXE_MEM_STATE_FREE;
+ rxe_drop_ref(rmr);
wqe->state = wqe_state_done;
wqe->status = IB_WC_SUCCESS;
} else if (wqe->wr.opcode == IB_WR_REG_MR) {
@@ -679,7 +680,7 @@ next_wqe:
goto exit;
}
- mtu = get_mtu(qp, wqe);
+ mtu = get_mtu(qp);
payload = (mask & RXE_WRITE_OR_SEND) ? wqe->dma.resid : 0;
if (payload > mtu) {
if (qp_type(qp) == IB_QPT_UD) {
@@ -748,17 +749,8 @@ err:
kfree_skb(skb);
wqe->status = IB_WC_LOC_PROT_ERR;
wqe->state = wqe_state_error;
-
- /*
- * IBA Spec. Section 10.7.3.1 SIGNALED COMPLETIONS
- * ---------8<---------8<-------------
- * ...Note that if a completion error occurs, a Work Completion
- * will always be generated, even if the signaling
- * indicator requests an Unsignaled Completion.
- * ---------8<---------8<-------------
- */
- wqe->wr.send_flags |= IB_SEND_SIGNALED;
__rxe_do_task(&qp->comp.task);
+
exit:
rxe_drop_ref(qp);
return -EAGAIN;
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index 3435efff8799..d404a8aba7af 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -307,7 +307,7 @@ static enum resp_states check_op_valid(struct rxe_qp *qp,
break;
default:
- WARN_ON(1);
+ WARN_ON_ONCE(1);
break;
}
@@ -418,7 +418,7 @@ static enum resp_states check_length(struct rxe_qp *qp,
static enum resp_states check_rkey(struct rxe_qp *qp,
struct rxe_pkt_info *pkt)
{
- struct rxe_mem *mem;
+ struct rxe_mem *mem = NULL;
u64 va;
u32 rkey;
u32 resid;
@@ -459,50 +459,50 @@ static enum resp_states check_rkey(struct rxe_qp *qp,
mem = lookup_mem(qp->pd, access, rkey, lookup_remote);
if (!mem) {
state = RESPST_ERR_RKEY_VIOLATION;
- goto err1;
+ goto err;
}
if (unlikely(mem->state == RXE_MEM_STATE_FREE)) {
state = RESPST_ERR_RKEY_VIOLATION;
- goto err1;
+ goto err;
}
if (mem_check_range(mem, va, resid)) {
state = RESPST_ERR_RKEY_VIOLATION;
- goto err2;
+ goto err;
}
if (pkt->mask & RXE_WRITE_MASK) {
if (resid > mtu) {
if (pktlen != mtu || bth_pad(pkt)) {
state = RESPST_ERR_LENGTH;
- goto err2;
+ goto err;
}
- resid = mtu;
+ qp->resp.resid = mtu;
} else {
if (pktlen != resid) {
state = RESPST_ERR_LENGTH;
- goto err2;
+ goto err;
}
if ((bth_pad(pkt) != (0x3 & (-resid)))) {
/* This case may not be exactly that
* but nothing else fits.
*/
state = RESPST_ERR_LENGTH;
- goto err2;
+ goto err;
}
}
}
- WARN_ON(qp->resp.mr);
+ WARN_ON_ONCE(qp->resp.mr);
qp->resp.mr = mem;
return RESPST_EXECUTE;
-err2:
- rxe_drop_ref(mem);
-err1:
+err:
+ if (mem)
+ rxe_drop_ref(mem);
return state;
}
@@ -608,7 +608,7 @@ static struct sk_buff *prepare_ack_packet(struct rxe_qp *qp,
pad = (-payload) & 0x3;
paylen = rxe_opcode[opcode].length + payload + pad + RXE_ICRC_SIZE;
- skb = rxe->ifc_ops->init_packet(rxe, &qp->pri_av, paylen, ack);
+ skb = rxe_init_packet(rxe, &qp->pri_av, paylen, ack);
if (!skb)
return NULL;
@@ -637,7 +637,7 @@ static struct sk_buff *prepare_ack_packet(struct rxe_qp *qp,
if (ack->mask & RXE_ATMACK_MASK)
atmack_set_orig(ack, qp->resp.atomic_orig);
- err = rxe->ifc_ops->prepare(rxe, ack, skb, &crc);
+ err = rxe_prepare(rxe, ack, skb, &crc);
if (err) {
kfree_skb(skb);
return NULL;
@@ -808,9 +808,10 @@ static enum resp_states execute(struct rxe_qp *qp, struct rxe_pkt_info *pkt)
err = process_atomic(qp, pkt);
if (err)
return err;
- } else
+ } else {
/* Unreachable */
- WARN_ON(1);
+ WARN_ON_ONCE(1);
+ }
/* We successfully processed this new request. */
qp->resp.msn++;
@@ -906,6 +907,7 @@ static enum resp_states do_complete(struct rxe_qp *qp,
return RESPST_ERROR;
}
rmr->state = RXE_MEM_STATE_FREE;
+ rxe_drop_ref(rmr);
}
wc->qp = &qp->ibqp;
@@ -1206,6 +1208,19 @@ static enum resp_states do_class_d1e_error(struct rxe_qp *qp)
}
}
+void rxe_drain_req_pkts(struct rxe_qp *qp, bool notify)
+{
+ struct sk_buff *skb;
+
+ while ((skb = skb_dequeue(&qp->req_pkts))) {
+ rxe_drop_ref(qp);
+ kfree_skb(skb);
+ }
+
+ while (!qp->srq && qp->rq.queue && queue_head(qp->rq.queue))
+ advance_consumer(qp->rq.queue);
+}
+
int rxe_responder(void *arg)
{
struct rxe_qp *qp = (struct rxe_qp *)arg;
@@ -1373,21 +1388,10 @@ int rxe_responder(void *arg)
goto exit;
- case RESPST_RESET: {
- struct sk_buff *skb;
-
- while ((skb = skb_dequeue(&qp->req_pkts))) {
- rxe_drop_ref(qp);
- kfree_skb(skb);
- }
-
- while (!qp->srq && qp->rq.queue &&
- queue_head(qp->rq.queue))
- advance_consumer(qp->rq.queue);
-
+ case RESPST_RESET:
+ rxe_drain_req_pkts(qp, false);
qp->resp.wqe = NULL;
goto exit;
- }
case RESPST_ERROR:
qp->resp.goto_error = 0;
@@ -1396,7 +1400,7 @@ int rxe_responder(void *arg)
goto exit;
default:
- WARN_ON(1);
+ WARN_ON_ONCE(1);
}
}
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index beb7021ff18a..5113e502f6f9 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -31,6 +31,7 @@
* SOFTWARE.
*/
+#include <linux/dma-mapping.h>
#include "rxe.h"
#include "rxe_loc.h"
#include "rxe_queue.h"
@@ -86,6 +87,7 @@ static int rxe_query_port(struct ib_device *dev,
port = &rxe->port;
+ /* *attr being zeroed by the caller, avoid zeroing it here */
*attr = port->attr;
mutex_lock(&rxe->usdev_lock);
@@ -168,7 +170,7 @@ static int rxe_query_pkey(struct ib_device *device,
struct rxe_port *port;
if (unlikely(port_num != 1)) {
- dev_warn(device->dma_device, "invalid port_num = %d\n",
+ dev_warn(device->dev.parent, "invalid port_num = %d\n",
port_num);
goto err1;
}
@@ -176,7 +178,7 @@ static int rxe_query_pkey(struct ib_device *device,
port = &rxe->port;
if (unlikely(index >= port->attr.pkey_tbl_len)) {
- dev_warn(device->dma_device, "invalid index = %d\n",
+ dev_warn(device->dev.parent, "invalid index = %d\n",
index);
goto err1;
}
@@ -234,7 +236,7 @@ static enum rdma_link_layer rxe_get_link_layer(struct ib_device *dev,
{
struct rxe_dev *rxe = to_rdev(dev);
- return rxe->ifc_ops->link_layer(rxe, port_num);
+ return rxe_link_layer(rxe, port_num);
}
static struct ib_ucontext *rxe_alloc_ucontext(struct ib_device *dev,
@@ -261,13 +263,14 @@ static int rxe_port_immutable(struct ib_device *dev, u8 port_num,
int err;
struct ib_port_attr attr;
- err = rxe_query_port(dev, port_num, &attr);
+ immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
+
+ err = ib_query_port(dev, port_num, &attr);
if (err)
return err;
immutable->pkey_tbl_len = attr.pkey_tbl_len;
immutable->gid_tbl_len = attr.gid_tbl_len;
- immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
immutable->max_mad_size = IB_MGMT_MAD_SIZE;
return 0;
@@ -1209,10 +1212,8 @@ static ssize_t rxe_show_parent(struct device *device,
{
struct rxe_dev *rxe = container_of(device, struct rxe_dev,
ib_dev.dev);
- char *name;
- name = rxe->ifc_ops->parent_name(rxe, 1);
- return snprintf(buf, 16, "%s\n", name);
+ return snprintf(buf, 16, "%s\n", rxe_parent_name(rxe, 1));
}
static DEVICE_ATTR(parent, S_IRUGO, rxe_show_parent, NULL);
@@ -1234,10 +1235,10 @@ int rxe_register_device(struct rxe_dev *rxe)
dev->node_type = RDMA_NODE_IB_CA;
dev->phys_port_cnt = 1;
dev->num_comp_vectors = RXE_NUM_COMP_VECTORS;
- dev->dma_device = rxe->ifc_ops->dma_device(rxe);
+ dev->dev.parent = rxe_dma_device(rxe);
dev->local_dma_lkey = 0;
- dev->node_guid = rxe->ifc_ops->node_guid(rxe);
- dev->dma_ops = &rxe_dma_mapping_ops;
+ dev->node_guid = rxe_node_guid(rxe);
+ dev->dev.dma_ops = &dma_virt_ops;
dev->uverbs_abi_ver = RXE_UVERBS_ABI_VERSION;
dev->uverbs_cmd_mask = BIT_ULL(IB_USER_VERBS_CMD_GET_CONTEXT)
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.h b/drivers/infiniband/sw/rxe/rxe_verbs.h
index cac1d52a08f0..e100c500ae85 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.h
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.h
@@ -372,26 +372,6 @@ struct rxe_port {
u32 qp_gsi_index;
};
-/* callbacks from rdma_rxe to network interface layer */
-struct rxe_ifc_ops {
- void (*release)(struct rxe_dev *rxe);
- __be64 (*node_guid)(struct rxe_dev *rxe);
- __be64 (*port_guid)(struct rxe_dev *rxe);
- struct device *(*dma_device)(struct rxe_dev *rxe);
- int (*mcast_add)(struct rxe_dev *rxe, union ib_gid *mgid);
- int (*mcast_delete)(struct rxe_dev *rxe, union ib_gid *mgid);
- int (*prepare)(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
- struct sk_buff *skb, u32 *crc);
- int (*send)(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
- struct sk_buff *skb);
- int (*loopback)(struct sk_buff *skb);
- struct sk_buff *(*init_packet)(struct rxe_dev *rxe, struct rxe_av *av,
- int paylen, struct rxe_pkt_info *pkt);
- char *(*parent_name)(struct rxe_dev *rxe, unsigned int port_num);
- enum rdma_link_layer (*link_layer)(struct rxe_dev *rxe,
- unsigned int port_num);
-};
-
struct rxe_dev {
struct ib_device ib_dev;
struct ib_device_attr attr;
@@ -400,8 +380,6 @@ struct rxe_dev {
struct kref ref_cnt;
struct mutex usdev_lock;
- struct rxe_ifc_ops *ifc_ops;
-
struct net_device *ndev;
int xmit_errors;
@@ -475,6 +453,6 @@ static inline struct rxe_mem *to_rmw(struct ib_mw *mw)
int rxe_register_device(struct rxe_dev *rxe);
int rxe_unregister_device(struct rxe_dev *rxe);
-void rxe_mc_cleanup(void *arg);
+void rxe_mc_cleanup(struct rxe_pool_entry *arg);
#endif /* RXE_VERBS_H */