summaryrefslogtreecommitdiffstats
path: root/drivers/infiniband
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/core/addr.c8
-rw-r--r--drivers/infiniband/core/iwpm_util.c8
-rw-r--r--drivers/infiniband/core/nldev.c45
-rw-r--r--drivers/infiniband/core/sa_query.c8
-rw-r--r--drivers/infiniband/core/uverbs_main.c3
-rw-r--r--drivers/infiniband/hw/cxgb4/restrack.c8
-rw-r--r--drivers/infiniband/hw/hfi1/chip.c26
-rw-r--r--drivers/infiniband/hw/hfi1/qp.c4
-rw-r--r--drivers/infiniband/hw/hfi1/rc.c4
-rw-r--r--drivers/infiniband/hw/hfi1/tid_rdma.c31
-rw-r--r--drivers/infiniband/hw/hfi1/vnic_main.c3
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hem.c6
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_mr.c4
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_qp.c3
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_utils.c12
-rw-r--r--drivers/infiniband/hw/mlx4/alias_GUID.c2
-rw-r--r--drivers/infiniband/hw/mlx5/devx.c34
-rw-r--r--drivers/infiniband/hw/mlx5/main.c7
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c3
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c4
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c2
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c2
-rw-r--r--drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c6
23 files changed, 136 insertions, 97 deletions
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index 0dce94e3c495..744b6ec0acb0 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -42,7 +42,7 @@
#include <net/neighbour.h>
#include <net/route.h>
#include <net/netevent.h>
-#include <net/addrconf.h>
+#include <net/ipv6_stubs.h>
#include <net/ip6_route.h>
#include <rdma/ib_addr.h>
#include <rdma/ib_sa.h>
@@ -86,8 +86,8 @@ static inline bool ib_nl_is_good_ip_resp(const struct nlmsghdr *nlh)
if (nlh->nlmsg_flags & RDMA_NL_LS_F_ERR)
return false;
- ret = nla_parse(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh),
- nlmsg_len(nlh), ib_nl_addr_policy, NULL);
+ ret = nla_parse_deprecated(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh),
+ nlmsg_len(nlh), ib_nl_addr_policy, NULL);
if (ret)
return false;
@@ -351,7 +351,7 @@ static bool has_gateway(const struct dst_entry *dst, sa_family_t family)
if (family == AF_INET) {
rt = container_of(dst, struct rtable, dst);
- return rt->rt_uses_gateway;
+ return rt->rt_gw_family == AF_INET;
}
rt6 = container_of(dst, struct rt6_info, dst);
diff --git a/drivers/infiniband/core/iwpm_util.c b/drivers/infiniband/core/iwpm_util.c
index a5d2a20ee697..41929bb83739 100644
--- a/drivers/infiniband/core/iwpm_util.c
+++ b/drivers/infiniband/core/iwpm_util.c
@@ -506,14 +506,14 @@ int iwpm_parse_nlmsg(struct netlink_callback *cb, int policy_max,
int ret;
const char *err_str = "";
- ret = nlmsg_validate(cb->nlh, nlh_len, policy_max - 1, nlmsg_policy,
- NULL);
+ ret = nlmsg_validate_deprecated(cb->nlh, nlh_len, policy_max - 1,
+ nlmsg_policy, NULL);
if (ret) {
err_str = "Invalid attribute";
goto parse_nlmsg_error;
}
- ret = nlmsg_parse(cb->nlh, nlh_len, nltb, policy_max - 1,
- nlmsg_policy, NULL);
+ ret = nlmsg_parse_deprecated(cb->nlh, nlh_len, nltb, policy_max - 1,
+ nlmsg_policy, NULL);
if (ret) {
err_str = "Unable to parse the nlmsg";
goto parse_nlmsg_error;
diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
index 11ed58d3fce5..85324012bf07 100644
--- a/drivers/infiniband/core/nldev.c
+++ b/drivers/infiniband/core/nldev.c
@@ -292,7 +292,8 @@ static int fill_res_info_entry(struct sk_buff *msg,
{
struct nlattr *entry_attr;
- entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY);
+ entry_attr = nla_nest_start_noflag(msg,
+ RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY);
if (!entry_attr)
return -EMSGSIZE;
@@ -327,7 +328,7 @@ static int fill_res_info(struct sk_buff *msg, struct ib_device *device)
if (fill_nldev_handle(msg, device))
return -EMSGSIZE;
- table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_SUMMARY);
+ table_attr = nla_nest_start_noflag(msg, RDMA_NLDEV_ATTR_RES_SUMMARY);
if (!table_attr)
return -EMSGSIZE;
@@ -607,8 +608,8 @@ static int nldev_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
u32 index;
int err;
- err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
- nldev_policy, extack);
+ err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
+ nldev_policy, extack);
if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
return -EINVAL;
@@ -652,8 +653,8 @@ static int nldev_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
u32 index;
int err;
- err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, nldev_policy,
- extack);
+ err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
+ nldev_policy, extack);
if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
return -EINVAL;
@@ -721,8 +722,8 @@ static int nldev_port_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
u32 port;
int err;
- err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
- nldev_policy, extack);
+ err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
+ nldev_policy, extack);
if (err ||
!tb[RDMA_NLDEV_ATTR_DEV_INDEX] ||
!tb[RDMA_NLDEV_ATTR_PORT_INDEX])
@@ -777,8 +778,8 @@ static int nldev_port_get_dumpit(struct sk_buff *skb,
int err;
unsigned int p;
- err = nlmsg_parse(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
- nldev_policy, NULL);
+ err = nlmsg_parse_deprecated(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
+ nldev_policy, NULL);
if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
return -EINVAL;
@@ -832,8 +833,8 @@ static int nldev_res_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
u32 index;
int ret;
- ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
- nldev_policy, extack);
+ ret = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
+ nldev_policy, extack);
if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
return -EINVAL;
@@ -981,8 +982,8 @@ static int res_get_common_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
struct sk_buff *msg;
int ret;
- ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
- nldev_policy, extack);
+ ret = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
+ nldev_policy, extack);
if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX] || !fe->id || !tb[fe->id])
return -EINVAL;
@@ -1070,8 +1071,8 @@ static int res_get_common_dumpit(struct sk_buff *skb,
u32 index, port = 0;
bool filled = false;
- err = nlmsg_parse(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
- nldev_policy, NULL);
+ err = nlmsg_parse_deprecated(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
+ nldev_policy, NULL);
/*
* Right now, we are expecting the device index to get res information,
* but it is possible to extend this code to return all devices in
@@ -1108,7 +1109,7 @@ static int res_get_common_dumpit(struct sk_buff *skb,
goto err;
}
- table_attr = nla_nest_start(skb, fe->nldev_attr);
+ table_attr = nla_nest_start_noflag(skb, fe->nldev_attr);
if (!table_attr) {
ret = -EMSGSIZE;
goto err;
@@ -1134,7 +1135,7 @@ static int res_get_common_dumpit(struct sk_buff *skb,
filled = true;
- entry_attr = nla_nest_start(skb, fe->entry);
+ entry_attr = nla_nest_start_noflag(skb, fe->entry);
if (!entry_attr) {
ret = -EMSGSIZE;
rdma_restrack_put(res);
@@ -1249,8 +1250,8 @@ static int nldev_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
char type[IFNAMSIZ];
int err;
- err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
- nldev_policy, extack);
+ err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
+ nldev_policy, extack);
if (err || !tb[RDMA_NLDEV_ATTR_DEV_NAME] ||
!tb[RDMA_NLDEV_ATTR_LINK_TYPE] || !tb[RDMA_NLDEV_ATTR_NDEV_NAME])
return -EINVAL;
@@ -1293,8 +1294,8 @@ static int nldev_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
u32 index;
int err;
- err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
- nldev_policy, extack);
+ err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
+ nldev_policy, extack);
if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
return -EINVAL;
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index 7925e45ea88a..bb534959abf0 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -1028,8 +1028,8 @@ int ib_nl_handle_set_timeout(struct sk_buff *skb,
!(NETLINK_CB(skb).sk))
return -EPERM;
- ret = nla_parse(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh),
- nlmsg_len(nlh), ib_nl_policy, NULL);
+ ret = nla_parse_deprecated(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh),
+ nlmsg_len(nlh), ib_nl_policy, NULL);
attr = (const struct nlattr *)tb[LS_NLA_TYPE_TIMEOUT];
if (ret || !attr)
goto settimeout_out;
@@ -1080,8 +1080,8 @@ static inline int ib_nl_is_good_resolve_resp(const struct nlmsghdr *nlh)
if (nlh->nlmsg_flags & RDMA_NL_LS_F_ERR)
return 0;
- ret = nla_parse(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh),
- nlmsg_len(nlh), ib_nl_policy, NULL);
+ ret = nla_parse_deprecated(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh),
+ nlmsg_len(nlh), ib_nl_policy, NULL);
if (ret)
return 0;
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 70b7d80431a9..f2e7ffe6fc54 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -993,6 +993,8 @@ void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile)
* will only be one mm, so no big deal.
*/
down_write(&mm->mmap_sem);
+ if (!mmget_still_valid(mm))
+ goto skip_mm;
mutex_lock(&ufile->umap_lock);
list_for_each_entry_safe (priv, next_priv, &ufile->umaps,
list) {
@@ -1007,6 +1009,7 @@ void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile)
vma->vm_flags &= ~(VM_SHARED | VM_MAYSHARE);
}
mutex_unlock(&ufile->umap_lock);
+ skip_mm:
up_write(&mm->mmap_sem);
mmput(mm);
}
diff --git a/drivers/infiniband/hw/cxgb4/restrack.c b/drivers/infiniband/hw/cxgb4/restrack.c
index 9a7520ee41e0..f82d46ed969d 100644
--- a/drivers/infiniband/hw/cxgb4/restrack.c
+++ b/drivers/infiniband/hw/cxgb4/restrack.c
@@ -149,7 +149,7 @@ static int fill_res_qp_entry(struct sk_buff *msg,
if (qhp->ucontext)
return 0;
- table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
+ table_attr = nla_nest_start_noflag(msg, RDMA_NLDEV_ATTR_DRIVER);
if (!table_attr)
goto err;
@@ -216,7 +216,7 @@ static int fill_res_ep_entry(struct sk_buff *msg,
if (!uep)
return 0;
- table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
+ table_attr = nla_nest_start_noflag(msg, RDMA_NLDEV_ATTR_DRIVER);
if (!table_attr)
goto err_free_uep;
@@ -387,7 +387,7 @@ static int fill_res_cq_entry(struct sk_buff *msg,
if (ibcq->uobject)
return 0;
- table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
+ table_attr = nla_nest_start_noflag(msg, RDMA_NLDEV_ATTR_DRIVER);
if (!table_attr)
goto err;
@@ -447,7 +447,7 @@ static int fill_res_mr_entry(struct sk_buff *msg,
if (!stag)
return 0;
- table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
+ table_attr = nla_nest_start_noflag(msg, RDMA_NLDEV_ATTR_DRIVER);
if (!table_attr)
goto err;
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index 612f04190ed8..9784c6c0d2ec 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -13232,7 +13232,7 @@ static int set_up_context_variables(struct hfi1_devdata *dd)
int total_contexts;
int ret;
unsigned ngroups;
- int qos_rmt_count;
+ int rmt_count;
int user_rmt_reduced;
u32 n_usr_ctxts;
u32 send_contexts = chip_send_contexts(dd);
@@ -13294,10 +13294,20 @@ static int set_up_context_variables(struct hfi1_devdata *dd)
n_usr_ctxts = rcv_contexts - total_contexts;
}
- /* each user context requires an entry in the RMT */
- qos_rmt_count = qos_rmt_entries(dd, NULL, NULL);
- if (qos_rmt_count + n_usr_ctxts > NUM_MAP_ENTRIES) {
- user_rmt_reduced = NUM_MAP_ENTRIES - qos_rmt_count;
+ /*
+ * The RMT entries are currently allocated as shown below:
+ * 1. QOS (0 to 128 entries);
+ * 2. FECN for PSM (num_user_contexts + num_vnic_contexts);
+ * 3. VNIC (num_vnic_contexts).
+ * It should be noted that PSM FECN oversubscribe num_vnic_contexts
+ * entries of RMT because both VNIC and PSM could allocate any receive
+ * context between dd->first_dyn_alloc_text and dd->num_rcv_contexts,
+ * and PSM FECN must reserve an RMT entry for each possible PSM receive
+ * context.
+ */
+ rmt_count = qos_rmt_entries(dd, NULL, NULL) + (num_vnic_contexts * 2);
+ if (rmt_count + n_usr_ctxts > NUM_MAP_ENTRIES) {
+ user_rmt_reduced = NUM_MAP_ENTRIES - rmt_count;
dd_dev_err(dd,
"RMT size is reducing the number of user receive contexts from %u to %d\n",
n_usr_ctxts,
@@ -14285,9 +14295,11 @@ static void init_user_fecn_handling(struct hfi1_devdata *dd,
u64 reg;
int i, idx, regoff, regidx;
u8 offset;
+ u32 total_cnt;
/* there needs to be enough room in the map table */
- if (rmt->used + dd->num_user_contexts >= NUM_MAP_ENTRIES) {
+ total_cnt = dd->num_rcv_contexts - dd->first_dyn_alloc_ctxt;
+ if (rmt->used + total_cnt >= NUM_MAP_ENTRIES) {
dd_dev_err(dd, "User FECN handling disabled - too many user contexts allocated\n");
return;
}
@@ -14341,7 +14353,7 @@ static void init_user_fecn_handling(struct hfi1_devdata *dd,
/* add rule 1 */
add_rsm_rule(dd, RSM_INS_FECN, &rrd);
- rmt->used += dd->num_user_contexts;
+ rmt->used += total_cnt;
}
/* Initialize RSM for VNIC */
diff --git a/drivers/infiniband/hw/hfi1/qp.c b/drivers/infiniband/hw/hfi1/qp.c
index 9b643c2409cf..eba300330a02 100644
--- a/drivers/infiniband/hw/hfi1/qp.c
+++ b/drivers/infiniband/hw/hfi1/qp.c
@@ -898,7 +898,9 @@ void notify_error_qp(struct rvt_qp *qp)
if (!list_empty(&priv->s_iowait.list) &&
!(qp->s_flags & RVT_S_BUSY) &&
!(priv->s_flags & RVT_S_BUSY)) {
- qp->s_flags &= ~RVT_S_ANY_WAIT_IO;
+ qp->s_flags &= ~HFI1_S_ANY_WAIT_IO;
+ iowait_clear_flag(&priv->s_iowait, IOWAIT_PENDING_IB);
+ iowait_clear_flag(&priv->s_iowait, IOWAIT_PENDING_TID);
list_del_init(&priv->s_iowait.list);
priv->s_iowait.lock = NULL;
rvt_put_qp(qp);
diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c
index e6726c1ab866..5991211d72bd 100644
--- a/drivers/infiniband/hw/hfi1/rc.c
+++ b/drivers/infiniband/hw/hfi1/rc.c
@@ -3088,7 +3088,7 @@ send_last:
update_ack_queue(qp, next);
}
e = &qp->s_ack_queue[qp->r_head_ack_queue];
- if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
+ if (e->rdma_sge.mr) {
rvt_put_mr(e->rdma_sge.mr);
e->rdma_sge.mr = NULL;
}
@@ -3166,7 +3166,7 @@ send_last:
update_ack_queue(qp, next);
}
e = &qp->s_ack_queue[qp->r_head_ack_queue];
- if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
+ if (e->rdma_sge.mr) {
rvt_put_mr(e->rdma_sge.mr);
e->rdma_sge.mr = NULL;
}
diff --git a/drivers/infiniband/hw/hfi1/tid_rdma.c b/drivers/infiniband/hw/hfi1/tid_rdma.c
index fdda33aca77f..43cbce7a19ea 100644
--- a/drivers/infiniband/hw/hfi1/tid_rdma.c
+++ b/drivers/infiniband/hw/hfi1/tid_rdma.c
@@ -5017,24 +5017,14 @@ int hfi1_make_tid_rdma_pkt(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
make_tid_rdma_ack(qp, ohdr, ps))
return 1;
- if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) {
- if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
- goto bail;
- /* We are in the error state, flush the work request. */
- if (qp->s_last == READ_ONCE(qp->s_head))
- goto bail;
- /* If DMAs are in progress, we can't flush immediately. */
- if (iowait_sdma_pending(&priv->s_iowait)) {
- qp->s_flags |= RVT_S_WAIT_DMA;
- goto bail;
- }
- clear_ahg(qp);
- wqe = rvt_get_swqe_ptr(qp, qp->s_last);
- hfi1_trdma_send_complete(qp, wqe, qp->s_last != qp->s_acked ?
- IB_WC_SUCCESS : IB_WC_WR_FLUSH_ERR);
- /* will get called again */
- goto done_free_tx;
- }
+ /*
+ * Bail out if we can't send data.
+ * Be reminded that this check must been done after the call to
+ * make_tid_rdma_ack() because the responding QP could be in
+ * RTR state where it can send TID RDMA ACK, not TID RDMA WRITE DATA.
+ */
+ if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK))
+ goto bail;
if (priv->s_flags & RVT_S_WAIT_ACK)
goto bail;
@@ -5144,11 +5134,6 @@ int hfi1_make_tid_rdma_pkt(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
hfi1_make_ruc_header(qp, ohdr, (opcode << 24), bth1, bth2,
middle, ps);
return 1;
-done_free_tx:
- hfi1_put_txreq(ps->s_txreq);
- ps->s_txreq = NULL;
- return 1;
-
bail:
hfi1_put_txreq(ps->s_txreq);
bail_no_tx:
diff --git a/drivers/infiniband/hw/hfi1/vnic_main.c b/drivers/infiniband/hw/hfi1/vnic_main.c
index a922db58be14..2b07032dbdda 100644
--- a/drivers/infiniband/hw/hfi1/vnic_main.c
+++ b/drivers/infiniband/hw/hfi1/vnic_main.c
@@ -423,8 +423,7 @@ tx_finish:
static u16 hfi1_vnic_select_queue(struct net_device *netdev,
struct sk_buff *skb,
- struct net_device *sb_dev,
- select_queue_fallback_t fallback)
+ struct net_device *sb_dev)
{
struct hfi1_vnic_vport_info *vinfo = opa_vnic_dev_priv(netdev);
struct opa_vnic_skb_mdata *mdata;
diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c
index f1fec56f3ff4..8e29dbb5b5fb 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hem.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hem.c
@@ -792,6 +792,8 @@ void *hns_roce_table_find(struct hns_roce_dev *hr_dev,
idx_offset = (obj & (table->num_obj - 1)) % obj_per_chunk;
dma_offset = offset = idx_offset * table->obj_size;
} else {
+ u32 seg_size = 64; /* 8 bytes per BA and 8 BA per segment */
+
hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop);
/* mtt mhop */
i = mhop.l0_idx;
@@ -803,8 +805,8 @@ void *hns_roce_table_find(struct hns_roce_dev *hr_dev,
hem_idx = i;
hem = table->hem[hem_idx];
- dma_offset = offset = (obj & (table->num_obj - 1)) *
- table->obj_size % mhop.bt_chunk_size;
+ dma_offset = offset = (obj & (table->num_obj - 1)) * seg_size %
+ mhop.bt_chunk_size;
if (mhop.hop_num == 2)
dma_offset = offset = 0;
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c
index b09f1cde2ff5..08be0e4eabcd 100644
--- a/drivers/infiniband/hw/hns/hns_roce_mr.c
+++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
@@ -746,7 +746,6 @@ static int hns_roce_write_mtt_chunk(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table;
dma_addr_t dma_handle;
__le64 *mtts;
- u32 s = start_index * sizeof(u64);
u32 bt_page_size;
u32 i;
@@ -780,7 +779,8 @@ static int hns_roce_write_mtt_chunk(struct hns_roce_dev *hr_dev,
return -EINVAL;
mtts = hns_roce_table_find(hr_dev, table,
- mtt->first_seg + s / hr_dev->caps.mtt_entry_sz,
+ mtt->first_seg +
+ start_index / HNS_ROCE_MTT_ENTRY_PER_SEG,
&dma_handle);
if (!mtts)
return -ENOMEM;
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
index 57c76eafef2f..66cdf625534f 100644
--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
@@ -274,9 +274,6 @@ void hns_roce_qp_free(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
wait_for_completion(&hr_qp->free);
if ((hr_qp->ibqp.qp_type) != IB_QPT_GSI) {
- if (hr_dev->caps.sccc_entry_sz)
- hns_roce_table_put(hr_dev, &qp_table->sccc_table,
- hr_qp->qpn);
if (hr_dev->caps.trrl_entry_sz)
hns_roce_table_put(hr_dev, &qp_table->trrl_table,
hr_qp->qpn);
diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c
index c5a881172524..337410f40860 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_utils.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_utils.c
@@ -173,7 +173,12 @@ int i40iw_inetaddr_event(struct notifier_block *notifier,
rcu_read_lock();
in = __in_dev_get_rcu(upper_dev);
- local_ipaddr = ntohl(in->ifa_list->ifa_address);
+
+ if (!in->ifa_list)
+ local_ipaddr = 0;
+ else
+ local_ipaddr = ntohl(in->ifa_list->ifa_address);
+
rcu_read_unlock();
} else {
local_ipaddr = ntohl(ifa->ifa_address);
@@ -185,6 +190,11 @@ int i40iw_inetaddr_event(struct notifier_block *notifier,
case NETDEV_UP:
/* Fall through */
case NETDEV_CHANGEADDR:
+
+ /* Just skip if no need to handle ARP cache */
+ if (!local_ipaddr)
+ break;
+
i40iw_manage_arp_cache(iwdev,
netdev->dev_addr,
&local_ipaddr,
diff --git a/drivers/infiniband/hw/mlx4/alias_GUID.c b/drivers/infiniband/hw/mlx4/alias_GUID.c
index 782499abcd98..2a0b59a4b6eb 100644
--- a/drivers/infiniband/hw/mlx4/alias_GUID.c
+++ b/drivers/infiniband/hw/mlx4/alias_GUID.c
@@ -804,8 +804,8 @@ void mlx4_ib_destroy_alias_guid_service(struct mlx4_ib_dev *dev)
unsigned long flags;
for (i = 0 ; i < dev->num_ports; i++) {
- cancel_delayed_work(&dev->sriov.alias_guid.ports_guid[i].alias_guid_work);
det = &sriov->alias_guid.ports_guid[i];
+ cancel_delayed_work_sync(&det->alias_guid_work);
spin_lock_irqsave(&sriov->alias_guid.ag_work_lock, flags);
while (!list_empty(&det->cb_list)) {
cb_ctx = list_entry(det->cb_list.next,
diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
index eaa055007f28..9e08df7914aa 100644
--- a/drivers/infiniband/hw/mlx5/devx.c
+++ b/drivers/infiniband/hw/mlx5/devx.c
@@ -20,6 +20,7 @@
enum devx_obj_flags {
DEVX_OBJ_FLAGS_INDIRECT_MKEY = 1 << 0,
+ DEVX_OBJ_FLAGS_DCT = 1 << 1,
};
struct devx_async_data {
@@ -39,7 +40,10 @@ struct devx_obj {
u32 dinlen; /* destroy inbox length */
u32 dinbox[MLX5_MAX_DESTROY_INBOX_SIZE_DW];
u32 flags;
- struct mlx5_ib_devx_mr devx_mr;
+ union {
+ struct mlx5_ib_devx_mr devx_mr;
+ struct mlx5_core_dct core_dct;
+ };
};
struct devx_umem {
@@ -347,7 +351,6 @@ static u64 devx_get_obj_id(const void *in)
obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
MLX5_GET(arm_rq_in, in, srq_number));
break;
- case MLX5_CMD_OP_DRAIN_DCT:
case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT,
MLX5_GET(drain_dct_in, in, dctn));
@@ -618,7 +621,6 @@ static bool devx_is_obj_modify_cmd(const void *in)
case MLX5_CMD_OP_2RST_QP:
case MLX5_CMD_OP_ARM_XRC_SRQ:
case MLX5_CMD_OP_ARM_RQ:
- case MLX5_CMD_OP_DRAIN_DCT:
case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
case MLX5_CMD_OP_ARM_XRQ:
case MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY:
@@ -1124,7 +1126,11 @@ static int devx_obj_cleanup(struct ib_uobject *uobject,
if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY)
devx_cleanup_mkey(obj);
- ret = mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out, sizeof(out));
+ if (obj->flags & DEVX_OBJ_FLAGS_DCT)
+ ret = mlx5_core_destroy_dct(obj->mdev, &obj->core_dct);
+ else
+ ret = mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out,
+ sizeof(out));
if (ib_is_destroy_retryable(ret, why, uobject))
return ret;
@@ -1185,9 +1191,17 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)(
devx_set_umem_valid(cmd_in);
}
- err = mlx5_cmd_exec(dev->mdev, cmd_in,
- cmd_in_len,
- cmd_out, cmd_out_len);
+ if (opcode == MLX5_CMD_OP_CREATE_DCT) {
+ obj->flags |= DEVX_OBJ_FLAGS_DCT;
+ err = mlx5_core_create_dct(dev->mdev, &obj->core_dct,
+ cmd_in, cmd_in_len,
+ cmd_out, cmd_out_len);
+ } else {
+ err = mlx5_cmd_exec(dev->mdev, cmd_in,
+ cmd_in_len,
+ cmd_out, cmd_out_len);
+ }
+
if (err)
goto obj_free;
@@ -1214,7 +1228,11 @@ err_copy:
if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY)
devx_cleanup_mkey(obj);
obj_destroy:
- mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out, sizeof(out));
+ if (obj->flags & DEVX_OBJ_FLAGS_DCT)
+ mlx5_core_destroy_dct(obj->mdev, &obj->core_dct);
+ else
+ mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out,
+ sizeof(out));
obj_free:
kfree(obj);
return err;
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index fae6a6a1fbea..b0b502c7055e 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -415,10 +415,17 @@ static int translate_eth_ext_proto_oper(u32 eth_proto_oper, u8 *active_speed,
*active_speed = IB_SPEED_EDR;
break;
case MLX5E_PROT_MASK(MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2):
+ *active_width = IB_WIDTH_2X;
+ *active_speed = IB_SPEED_EDR;
+ break;
case MLX5E_PROT_MASK(MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR):
*active_width = IB_WIDTH_1X;
*active_speed = IB_SPEED_HDR;
break;
+ case MLX5E_PROT_MASK(MLX5E_CAUI_4_100GBASE_CR4_KR4):
+ *active_width = IB_WIDTH_4X;
+ *active_speed = IB_SPEED_EDR;
+ break;
case MLX5E_PROT_MASK(MLX5E_100GAUI_2_100GBASE_CR2_KR2):
*active_width = IB_WIDTH_2X;
*active_speed = IB_SPEED_HDR;
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index c20bfc41ecf1..0aa10ebda5d9 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -585,7 +585,7 @@ static int pagefault_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
struct ib_umem_odp *odp_mr = to_ib_umem_odp(mr->umem);
bool downgrade = flags & MLX5_PF_FLAGS_DOWNGRADE;
bool prefetch = flags & MLX5_PF_FLAGS_PREFETCH;
- u64 access_mask = ODP_READ_ALLOWED_BIT;
+ u64 access_mask;
u64 start_idx, page_mask;
struct ib_umem_odp *odp;
size_t size;
@@ -607,6 +607,7 @@ next_mr:
page_shift = mr->umem->page_shift;
page_mask = ~(BIT(page_shift) - 1);
start_idx = (io_virt - (mr->mmkey.iova & page_mask)) >> page_shift;
+ access_mask = ODP_READ_ALLOWED_BIT;
if (prefetch && !downgrade && !mr->umem->writable) {
/* prefetch with write-access must
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 19ae2b018f01..ef7d69269a88 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -3729,6 +3729,7 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr,
} else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
struct mlx5_ib_modify_qp_resp resp = {};
+ u32 out[MLX5_ST_SZ_DW(create_dct_out)] = {0};
u32 min_resp_len = offsetof(typeof(resp), dctn) +
sizeof(resp.dctn);
@@ -3747,7 +3748,8 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr,
MLX5_SET(dctc, dctc, hop_limit, attr->ah_attr.grh.hop_limit);
err = mlx5_core_create_dct(dev->mdev, &qp->dct.mdct, qp->dct.in,
- MLX5_ST_SZ_BYTES(create_dct_in));
+ MLX5_ST_SZ_BYTES(create_dct_in), out,
+ sizeof(out));
if (err)
return err;
resp.dctn = qp->dct.mdct.mqp.qpn;
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index 032883180f65..0010a3ed64f1 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -1407,7 +1407,7 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi
if (neigh->nud_state & NUD_VALID) {
nes_debug(NES_DBG_CM, "Neighbor MAC address for 0x%08X"
" is %pM, Gateway is 0x%08X \n", dst_ip,
- neigh->ha, ntohl(rt->rt_gateway));
+ neigh->ha, ntohl(rt->rt_gw4));
if (arpindex >= 0) {
if (ether_addr_equal(nesadapter->arp_table[arpindex].mac_addr, neigh->ha)) {
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
index 6d8b3e0de57a..ec41400fec0c 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
@@ -1131,6 +1131,8 @@ static void pvrdma_pci_remove(struct pci_dev *pdev)
pvrdma_page_dir_cleanup(dev, &dev->cq_pdir);
pvrdma_page_dir_cleanup(dev, &dev->async_pdir);
pvrdma_free_slots(dev);
+ dma_free_coherent(&pdev->dev, sizeof(*dev->dsr), dev->dsr,
+ dev->dsrbase);
iounmap(dev->regs);
kfree(dev->sgid_tbl);
diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c
index ae70cd18903e..aeff68f582d3 100644
--- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c
+++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c
@@ -95,8 +95,7 @@ static netdev_tx_t opa_netdev_start_xmit(struct sk_buff *skb,
}
static u16 opa_vnic_select_queue(struct net_device *netdev, struct sk_buff *skb,
- struct net_device *sb_dev,
- select_queue_fallback_t fallback)
+ struct net_device *sb_dev)
{
struct opa_vnic_adapter *adapter = opa_vnic_priv(netdev);
struct opa_vnic_skb_mdata *mdata;
@@ -106,8 +105,7 @@ static u16 opa_vnic_select_queue(struct net_device *netdev, struct sk_buff *skb,
mdata = skb_push(skb, sizeof(*mdata));
mdata->entropy = opa_vnic_calc_entropy(skb);
mdata->vl = opa_vnic_get_vl(adapter, skb);
- rc = adapter->rn_ops->ndo_select_queue(netdev, skb,
- sb_dev, fallback);
+ rc = adapter->rn_ops->ndo_select_queue(netdev, skb, sb_dev);
skb_pull(skb, sizeof(*mdata));
return rc;
}