diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-03-10 17:38:01 +0100 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-03-10 17:38:01 +0100 |
commit | 2f64e70cd0fc5db1d2e41dac1fc668951840f9ed (patch) | |
tree | 472cacc28f76e24b9e5318003bef53f9ed8cac6f /drivers | |
parent | Merge tag 'platform-drivers-x86-v4.16-6' of git://git.infradead.org/linux-pla... (diff) | |
parent | RDMA/mlx5: Fix integer overflow while resizing CQ (diff) | |
download | linux-2f64e70cd0fc5db1d2e41dac1fc668951840f9ed.tar.xz linux-2f64e70cd0fc5db1d2e41dac1fc668951840f9ed.zip |
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
Pull rdma fixes from Doug Ledford:
- Various driver bug fixes in mlx5, mlx4, bnxt_re and qedr, ranging
from bugs under load to bad error case handling
- There in one largish patch fixing the locking in bnxt_re to avoid a
machine hard lock situation
- A few core bugs on error paths
- A patch to reduce stack usage in the new CQ API
- One mlx5 regression introduced in this merge window
- There were new syzkaller scripts written for the RDMA subsystem and
we are fixing issues found by the bot
- One of the commits (aa0de36a40f4 “RDMA/mlx5: Fix integer overflow
while resizing CQ”) is missing part of the commit log message and one
of the SOB lines. The original patch was from Leon Romanovsky, and a
cut-n-paste separator in the commit message confused patchworks which
then put the end of message separator in the wrong place in the
downloaded patch, and I didn’t notice in time. The patch made it into
the official branch, and the only way to fix it in-place was to
rebase. Given the pain that a rebase causes, and the fact that the
patch has relevant tags for stable and syzkaller, a revert of the
munged patch and a reapplication of the original patch with the log
message intact was done.
* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: (25 commits)
RDMA/mlx5: Fix integer overflow while resizing CQ
Revert "RDMA/mlx5: Fix integer overflow while resizing CQ"
RDMA/ucma: Check that user doesn't overflow QP state
RDMA/mlx5: Fix integer overflow while resizing CQ
RDMA/ucma: Limit possible option size
IB/core: Fix possible crash to access NULL netdev
RDMA/bnxt_re: Avoid Hard lockup during error CQE processing
RDMA/core: Reduce poll batch for direct cq polling
IB/mlx5: Fix an error code in __mlx5_ib_modify_qp()
IB/mlx5: When not in dual port RoCE mode, use provided port as native
IB/mlx4: Include GID type when deleting GIDs from HW table under RoCE
IB/mlx4: Fix corruption of RoCEv2 IPv4 GIDs
RDMA/qedr: Fix iWARP write and send with immediate
RDMA/qedr: Fix kernel panic when running fio over NFSoRDMA
RDMA/qedr: Fix iWARP connect with port mapper
RDMA/qedr: Fix ipv6 destination address resolution
IB/core : Add null pointer check in addr_resolve
RDMA/bnxt_re: Fix the ib_reg failure cleanup
RDMA/bnxt_re: Fix incorrect DB offset calculation
RDMA/bnxt_re: Unconditionly fence non wire memory operations
...
Diffstat (limited to 'drivers')
23 files changed, 192 insertions, 156 deletions
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c index a5b4cf030c11..9183d148d644 100644 --- a/drivers/infiniband/core/addr.c +++ b/drivers/infiniband/core/addr.c @@ -550,18 +550,13 @@ static int addr_resolve(struct sockaddr *src_in, dst_release(dst); } - if (ndev->flags & IFF_LOOPBACK) { - ret = rdma_translate_ip(dst_in, addr); - /* - * Put the loopback device and get the translated - * device instead. - */ + if (ndev) { + if (ndev->flags & IFF_LOOPBACK) + ret = rdma_translate_ip(dst_in, addr); + else + addr->bound_dev_if = ndev->ifindex; dev_put(ndev); - ndev = dev_get_by_index(addr->net, addr->bound_dev_if); - } else { - addr->bound_dev_if = ndev->ifindex; } - dev_put(ndev); return ret; } diff --git a/drivers/infiniband/core/cq.c b/drivers/infiniband/core/cq.c index bc79ca8215d7..af5ad6a56ae4 100644 --- a/drivers/infiniband/core/cq.c +++ b/drivers/infiniband/core/cq.c @@ -17,6 +17,7 @@ /* # of WCs to poll for with a single call to ib_poll_cq */ #define IB_POLL_BATCH 16 +#define IB_POLL_BATCH_DIRECT 8 /* # of WCs to iterate over before yielding */ #define IB_POLL_BUDGET_IRQ 256 @@ -25,18 +26,18 @@ #define IB_POLL_FLAGS \ (IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS) -static int __ib_process_cq(struct ib_cq *cq, int budget, struct ib_wc *poll_wc) +static int __ib_process_cq(struct ib_cq *cq, int budget, struct ib_wc *wcs, + int batch) { int i, n, completed = 0; - struct ib_wc *wcs = poll_wc ? : cq->wc; /* * budget might be (-1) if the caller does not * want to bound this call, thus we need unsigned * minimum here. */ - while ((n = ib_poll_cq(cq, min_t(u32, IB_POLL_BATCH, - budget - completed), wcs)) > 0) { + while ((n = ib_poll_cq(cq, min_t(u32, batch, + budget - completed), wcs)) > 0) { for (i = 0; i < n; i++) { struct ib_wc *wc = &wcs[i]; @@ -48,8 +49,7 @@ static int __ib_process_cq(struct ib_cq *cq, int budget, struct ib_wc *poll_wc) completed += n; - if (n != IB_POLL_BATCH || - (budget != -1 && completed >= budget)) + if (n != batch || (budget != -1 && completed >= budget)) break; } @@ -72,9 +72,9 @@ static int __ib_process_cq(struct ib_cq *cq, int budget, struct ib_wc *poll_wc) */ int ib_process_cq_direct(struct ib_cq *cq, int budget) { - struct ib_wc wcs[IB_POLL_BATCH]; + struct ib_wc wcs[IB_POLL_BATCH_DIRECT]; - return __ib_process_cq(cq, budget, wcs); + return __ib_process_cq(cq, budget, wcs, IB_POLL_BATCH_DIRECT); } EXPORT_SYMBOL(ib_process_cq_direct); @@ -88,7 +88,7 @@ static int ib_poll_handler(struct irq_poll *iop, int budget) struct ib_cq *cq = container_of(iop, struct ib_cq, iop); int completed; - completed = __ib_process_cq(cq, budget, NULL); + completed = __ib_process_cq(cq, budget, cq->wc, IB_POLL_BATCH); if (completed < budget) { irq_poll_complete(&cq->iop); if (ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0) @@ -108,7 +108,8 @@ static void ib_cq_poll_work(struct work_struct *work) struct ib_cq *cq = container_of(work, struct ib_cq, work); int completed; - completed = __ib_process_cq(cq, IB_POLL_BUDGET_WORKQUEUE, NULL); + completed = __ib_process_cq(cq, IB_POLL_BUDGET_WORKQUEUE, cq->wc, + IB_POLL_BATCH); if (completed >= IB_POLL_BUDGET_WORKQUEUE || ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0) queue_work(ib_comp_wq, &cq->work); diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index e8010e73a1cf..bb065c9449be 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c @@ -536,14 +536,14 @@ int ib_register_device(struct ib_device *device, ret = device->query_device(device, &device->attrs, &uhw); if (ret) { pr_warn("Couldn't query the device attributes\n"); - goto cache_cleanup; + goto cg_cleanup; } ret = ib_device_register_sysfs(device, port_callback); if (ret) { pr_warn("Couldn't register device %s with driver model\n", device->name); - goto cache_cleanup; + goto cg_cleanup; } device->reg_state = IB_DEV_REGISTERED; @@ -559,6 +559,8 @@ int ib_register_device(struct ib_device *device, mutex_unlock(&device_mutex); return 0; +cg_cleanup: + ib_device_unregister_rdmacg(device); cache_cleanup: ib_cache_cleanup_one(device); ib_cache_release_one(device); diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c index 8cf15d4a8ac4..9f029a1ca5ea 100644 --- a/drivers/infiniband/core/sa_query.c +++ b/drivers/infiniband/core/sa_query.c @@ -1291,10 +1291,9 @@ int ib_init_ah_attr_from_path(struct ib_device *device, u8 port_num, resolved_dev = dev_get_by_index(dev_addr.net, dev_addr.bound_dev_if); - if (resolved_dev->flags & IFF_LOOPBACK) { - dev_put(resolved_dev); - resolved_dev = idev; - dev_hold(resolved_dev); + if (!resolved_dev) { + dev_put(idev); + return -ENODEV; } ndev = ib_get_ndev_from_path(rec); rcu_read_lock(); diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c index f015f1bf88c9..3a9d0f5b5881 100644 --- a/drivers/infiniband/core/ucma.c +++ b/drivers/infiniband/core/ucma.c @@ -1149,6 +1149,9 @@ static ssize_t ucma_init_qp_attr(struct ucma_file *file, if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; + if (cmd.qp_state > IB_QPS_ERR) + return -EINVAL; + ctx = ucma_get_ctx(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); @@ -1294,6 +1297,9 @@ static ssize_t ucma_set_option(struct ucma_file *file, const char __user *inbuf, if (IS_ERR(ctx)) return PTR_ERR(ctx); + if (unlikely(cmd.optval > KMALLOC_MAX_SIZE)) + return -EINVAL; + optval = memdup_user((void __user *) (unsigned long) cmd.optval, cmd.optlen); if (IS_ERR(optval)) { diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index 643174d949a8..0dd75f449872 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c @@ -785,7 +785,7 @@ int bnxt_re_query_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr) return 0; } -static unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp) +unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp) __acquires(&qp->scq->cq_lock) __acquires(&qp->rcq->cq_lock) { unsigned long flags; @@ -799,8 +799,8 @@ static unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp) return flags; } -static void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp, - unsigned long flags) +void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp, + unsigned long flags) __releases(&qp->scq->cq_lock) __releases(&qp->rcq->cq_lock) { if (qp->rcq != qp->scq) @@ -1606,6 +1606,7 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr, int status; union ib_gid sgid; struct ib_gid_attr sgid_attr; + unsigned int flags; u8 nw_type; qp->qplib_qp.modify_flags = 0; @@ -1634,14 +1635,18 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr, dev_dbg(rdev_to_dev(rdev), "Move QP = %p to flush list\n", qp); + flags = bnxt_re_lock_cqs(qp); bnxt_qplib_add_flush_qp(&qp->qplib_qp); + bnxt_re_unlock_cqs(qp, flags); } if (!qp->sumem && qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_RESET) { dev_dbg(rdev_to_dev(rdev), "Move QP = %p out of flush list\n", qp); + flags = bnxt_re_lock_cqs(qp); bnxt_qplib_clean_qp(&qp->qplib_qp); + bnxt_re_unlock_cqs(qp, flags); } } if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) { @@ -2227,10 +2232,13 @@ static int bnxt_re_build_inv_wqe(struct ib_send_wr *wr, wqe->type = BNXT_QPLIB_SWQE_TYPE_LOCAL_INV; wqe->local_inv.inv_l_key = wr->ex.invalidate_rkey; + /* Need unconditional fence for local invalidate + * opcode to work as expected. + */ + wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE; + if (wr->send_flags & IB_SEND_SIGNALED) wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP; - if (wr->send_flags & IB_SEND_FENCE) - wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE; if (wr->send_flags & IB_SEND_SOLICITED) wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT; @@ -2251,8 +2259,12 @@ static int bnxt_re_build_reg_wqe(struct ib_reg_wr *wr, wqe->frmr.levels = qplib_frpl->hwq.level + 1; wqe->type = BNXT_QPLIB_SWQE_TYPE_REG_MR; - if (wr->wr.send_flags & IB_SEND_FENCE) - wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE; + /* Need unconditional fence for reg_mr + * opcode to function as expected. + */ + + wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE; + if (wr->wr.send_flags & IB_SEND_SIGNALED) wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP; diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.h b/drivers/infiniband/hw/bnxt_re/ib_verbs.h index b88a48d43a9d..e62b7c2c7da6 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.h +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.h @@ -222,4 +222,7 @@ struct ib_ucontext *bnxt_re_alloc_ucontext(struct ib_device *ibdev, struct ib_udata *udata); int bnxt_re_dealloc_ucontext(struct ib_ucontext *context); int bnxt_re_mmap(struct ib_ucontext *context, struct vm_area_struct *vma); + +unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp); +void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp, unsigned long flags); #endif /* __BNXT_RE_IB_VERBS_H__ */ diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c index 33a448036c2e..f6e361750466 100644 --- a/drivers/infiniband/hw/bnxt_re/main.c +++ b/drivers/infiniband/hw/bnxt_re/main.c @@ -730,6 +730,13 @@ static int bnxt_re_handle_qp_async_event(struct creq_qp_event *qp_event, struct bnxt_re_qp *qp) { struct ib_event event; + unsigned int flags; + + if (qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR) { + flags = bnxt_re_lock_cqs(qp); + bnxt_qplib_add_flush_qp(&qp->qplib_qp); + bnxt_re_unlock_cqs(qp, flags); + } memset(&event, 0, sizeof(event)); if (qp->qplib_qp.srq) { @@ -1416,9 +1423,12 @@ static void bnxt_re_task(struct work_struct *work) switch (re_work->event) { case NETDEV_REGISTER: rc = bnxt_re_ib_reg(rdev); - if (rc) + if (rc) { dev_err(rdev_to_dev(rdev), "Failed to register with IB: %#x", rc); + bnxt_re_remove_one(rdev); + bnxt_re_dev_unreg(rdev); + } break; case NETDEV_UP: bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c index 3ea5b9624f6b..06b42c880fd4 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c @@ -88,75 +88,35 @@ static void __bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp) } } -void bnxt_qplib_acquire_cq_locks(struct bnxt_qplib_qp *qp, - unsigned long *flags) - __acquires(&qp->scq->hwq.lock) __acquires(&qp->rcq->hwq.lock) +static void bnxt_qplib_acquire_cq_flush_locks(struct bnxt_qplib_qp *qp, + unsigned long *flags) + __acquires(&qp->scq->flush_lock) __acquires(&qp->rcq->flush_lock) { - spin_lock_irqsave(&qp->scq->hwq.lock, *flags); + spin_lock_irqsave(&qp->scq->flush_lock, *flags); if (qp->scq == qp->rcq) - __acquire(&qp->rcq->hwq.lock); + __acquire(&qp->rcq->flush_lock); else - spin_lock(&qp->rcq->hwq.lock); + spin_lock(&qp->rcq->flush_lock); } -void bnxt_qplib_release_cq_locks(struct bnxt_qplib_qp *qp, - unsigned long *flags) - __releases(&qp->scq->hwq.lock) __releases(&qp->rcq->hwq.lock) +static void bnxt_qplib_release_cq_flush_locks(struct bnxt_qplib_qp *qp, + unsigned long *flags) + __releases(&qp->scq->flush_lock) __releases(&qp->rcq->flush_lock) { if (qp->scq == qp->rcq) - __release(&qp->rcq->hwq.lock); + __release(&qp->rcq->flush_lock); else - spin_unlock(&qp->rcq->hwq.lock); - spin_unlock_irqrestore(&qp->scq->hwq.lock, *flags); -} - -static struct bnxt_qplib_cq *bnxt_qplib_find_buddy_cq(struct bnxt_qplib_qp *qp, - struct bnxt_qplib_cq *cq) -{ - struct bnxt_qplib_cq *buddy_cq = NULL; - - if (qp->scq == qp->rcq) - buddy_cq = NULL; - else if (qp->scq == cq) - buddy_cq = qp->rcq; - else - buddy_cq = qp->scq; - return buddy_cq; -} - -static void bnxt_qplib_lock_buddy_cq(struct bnxt_qplib_qp *qp, - struct bnxt_qplib_cq *cq) - __acquires(&buddy_cq->hwq.lock) -{ - struct bnxt_qplib_cq *buddy_cq = NULL; - - buddy_cq = bnxt_qplib_find_buddy_cq(qp, cq); - if (!buddy_cq) - __acquire(&cq->hwq.lock); - else - spin_lock(&buddy_cq->hwq.lock); -} - -static void bnxt_qplib_unlock_buddy_cq(struct bnxt_qplib_qp *qp, - struct bnxt_qplib_cq *cq) - __releases(&buddy_cq->hwq.lock) -{ - struct bnxt_qplib_cq *buddy_cq = NULL; - - buddy_cq = bnxt_qplib_find_buddy_cq(qp, cq); - if (!buddy_cq) - __release(&cq->hwq.lock); - else - spin_unlock(&buddy_cq->hwq.lock); + spin_unlock(&qp->rcq->flush_lock); + spin_unlock_irqrestore(&qp->scq->flush_lock, *flags); } void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp) { unsigned long flags; - bnxt_qplib_acquire_cq_locks(qp, &flags); + bnxt_qplib_acquire_cq_flush_locks(qp, &flags); __bnxt_qplib_add_flush_qp(qp); - bnxt_qplib_release_cq_locks(qp, &flags); + bnxt_qplib_release_cq_flush_locks(qp, &flags); } static void __bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp) @@ -177,7 +137,7 @@ void bnxt_qplib_clean_qp(struct bnxt_qplib_qp *qp) { unsigned long flags; - bnxt_qplib_acquire_cq_locks(qp, &flags); + bnxt_qplib_acquire_cq_flush_locks(qp, &flags); __clean_cq(qp->scq, (u64)(unsigned long)qp); qp->sq.hwq.prod = 0; qp->sq.hwq.cons = 0; @@ -186,7 +146,7 @@ void bnxt_qplib_clean_qp(struct bnxt_qplib_qp *qp) qp->rq.hwq.cons = 0; __bnxt_qplib_del_flush_qp(qp); - bnxt_qplib_release_cq_locks(qp, &flags); + bnxt_qplib_release_cq_flush_locks(qp, &flags); } static void bnxt_qpn_cqn_sched_task(struct work_struct *work) @@ -2107,9 +2067,6 @@ void bnxt_qplib_mark_qp_error(void *qp_handle) /* Must block new posting of SQ and RQ */ qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR; bnxt_qplib_cancel_phantom_processing(qp); - - /* Add qp to flush list of the CQ */ - __bnxt_qplib_add_flush_qp(qp); } /* Note: SQE is valid from sw_sq_cons up to cqe_sq_cons (exclusive) @@ -2285,9 +2242,9 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq, sw_sq_cons, cqe->wr_id, cqe->status); cqe++; (*budget)--; - bnxt_qplib_lock_buddy_cq(qp, cq); bnxt_qplib_mark_qp_error(qp); - bnxt_qplib_unlock_buddy_cq(qp, cq); + /* Add qp to flush list of the CQ */ + bnxt_qplib_add_flush_qp(qp); } else { if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) { /* Before we complete, do WA 9060 */ @@ -2403,9 +2360,7 @@ static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq, if (hwcqe->status != CQ_RES_RC_STATUS_OK) { qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR; /* Add qp to flush list of the CQ */ - bnxt_qplib_lock_buddy_cq(qp, cq); - __bnxt_qplib_add_flush_qp(qp); - bnxt_qplib_unlock_buddy_cq(qp, cq); + bnxt_qplib_add_flush_qp(qp); } } @@ -2489,9 +2444,7 @@ static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq, if (hwcqe->status != CQ_RES_RC_STATUS_OK) { qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR; /* Add qp to flush list of the CQ */ - bnxt_qplib_lock_buddy_cq(qp, cq); - __bnxt_qplib_add_flush_qp(qp); - bnxt_qplib_unlock_buddy_cq(qp, cq); + bnxt_qplib_add_flush_qp(qp); } } done: @@ -2501,11 +2454,9 @@ done: bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq) { struct cq_base *hw_cqe, **hw_cqe_ptr; - unsigned long flags; u32 sw_cons, raw_cons; bool rc = true; - spin_lock_irqsave(&cq->hwq.lock, flags); raw_cons = cq->hwq.cons; sw_cons = HWQ_CMP(raw_cons, &cq->hwq); hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr; @@ -2513,7 +2464,6 @@ bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq) /* Check for Valid bit. If the CQE is valid, return false */ rc = !CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements); - spin_unlock_irqrestore(&cq->hwq.lock, flags); return rc; } @@ -2602,9 +2552,7 @@ static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq, if (hwcqe->status != CQ_RES_RC_STATUS_OK) { qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR; /* Add qp to flush list of the CQ */ - bnxt_qplib_lock_buddy_cq(qp, cq); - __bnxt_qplib_add_flush_qp(qp); - bnxt_qplib_unlock_buddy_cq(qp, cq); + bnxt_qplib_add_flush_qp(qp); } } @@ -2719,9 +2667,7 @@ do_rq: */ /* Add qp to flush list of the CQ */ - bnxt_qplib_lock_buddy_cq(qp, cq); - __bnxt_qplib_add_flush_qp(qp); - bnxt_qplib_unlock_buddy_cq(qp, cq); + bnxt_qplib_add_flush_qp(qp); done: return rc; } @@ -2750,7 +2696,7 @@ int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq, u32 budget = num_cqes; unsigned long flags; - spin_lock_irqsave(&cq->hwq.lock, flags); + spin_lock_irqsave(&cq->flush_lock, flags); list_for_each_entry(qp, &cq->sqf_head, sq_flush) { dev_dbg(&cq->hwq.pdev->dev, "QPLIB: FP: Flushing SQ QP= %p", @@ -2764,7 +2710,7 @@ int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq, qp); __flush_rq(&qp->rq, qp, &cqe, &budget); } - spin_unlock_irqrestore(&cq->hwq.lock, flags); + spin_unlock_irqrestore(&cq->flush_lock, flags); return num_cqes - budget; } @@ -2773,11 +2719,9 @@ int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe, int num_cqes, struct bnxt_qplib_qp **lib_qp) { struct cq_base *hw_cqe, **hw_cqe_ptr; - unsigned long flags; u32 sw_cons, raw_cons; int budget, rc = 0; - spin_lock_irqsave(&cq->hwq.lock, flags); raw_cons = cq->hwq.cons; budget = num_cqes; @@ -2853,20 +2797,15 @@ int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe, bnxt_qplib_arm_cq(cq, DBR_DBR_TYPE_CQ); } exit: - spin_unlock_irqrestore(&cq->hwq.lock, flags); return num_cqes - budget; } void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type) { - unsigned long flags; - - spin_lock_irqsave(&cq->hwq.lock, flags); if (arm_type) bnxt_qplib_arm_cq(cq, arm_type); /* Using cq->arm_state variable to track whether to issue cq handler */ atomic_set(&cq->arm_state, 1); - spin_unlock_irqrestore(&cq->hwq.lock, flags); } void bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp *qp) diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h index ca0a2ffa3509..ade9f13c0fd1 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h @@ -389,6 +389,18 @@ struct bnxt_qplib_cq { struct list_head sqf_head, rqf_head; atomic_t arm_state; spinlock_t compl_lock; /* synch CQ handlers */ +/* Locking Notes: + * QP can move to error state from modify_qp, async error event or error + * CQE as part of poll_cq. When QP is moved to error state, it gets added + * to two flush lists, one each for SQ and RQ. + * Each flush list is protected by qplib_cq->flush_lock. Both scq and rcq + * flush_locks should be acquired when QP is moved to error. The control path + * operations(modify_qp and async error events) are synchronized with poll_cq + * using upper level CQ locks (bnxt_re_cq->cq_lock) of both SCQ and RCQ. + * The qplib_cq->flush_lock is required to synchronize two instances of poll_cq + * of the same QP while manipulating the flush list. + */ + spinlock_t flush_lock; /* QP flush management */ }; #define BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE sizeof(struct xrrq_irrq) diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c index 8329ec6a7946..80027a494730 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c @@ -305,9 +305,8 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw, err_event->res_err_state_reason); if (!qp) break; - bnxt_qplib_acquire_cq_locks(qp, &flags); bnxt_qplib_mark_qp_error(qp); - bnxt_qplib_release_cq_locks(qp, &flags); + rcfw->aeq_handler(rcfw, qp_event, qp); break; default: /* Command Response */ @@ -460,7 +459,11 @@ int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw, int rc; RCFW_CMD_PREP(req, INITIALIZE_FW, cmd_flags); - + /* Supply (log-base-2-of-host-page-size - base-page-shift) + * to bono to adjust the doorbell page sizes. + */ + req.log2_dbr_pg_size = cpu_to_le16(PAGE_SHIFT - + RCFW_DBR_BASE_PAGE_SHIFT); /* * VFs need not setup the HW context area, PF * shall setup this area for VF. Skipping the diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h index 6bee6e3636ea..c7cce2e4185e 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h @@ -49,6 +49,7 @@ #define RCFW_COMM_SIZE 0x104 #define RCFW_DBR_PCI_BAR_REGION 2 +#define RCFW_DBR_BASE_PAGE_SHIFT 12 #define RCFW_CMD_PREP(req, CMD, cmd_flags) \ do { \ diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c index 03057983341f..ee98e5efef84 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c @@ -139,7 +139,8 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw, attr->max_pkey = le32_to_cpu(sb->max_pkeys); attr->max_inline_data = le32_to_cpu(sb->max_inline_data); - attr->l2_db_size = (sb->l2_db_space_size + 1) * PAGE_SIZE; + attr->l2_db_size = (sb->l2_db_space_size + 1) * + (0x01 << RCFW_DBR_BASE_PAGE_SHIFT); attr->max_sgid = le32_to_cpu(sb->max_gid); bnxt_qplib_query_version(rcfw, attr->fw_ver); diff --git a/drivers/infiniband/hw/bnxt_re/roce_hsi.h b/drivers/infiniband/hw/bnxt_re/roce_hsi.h index 2d7ea096a247..3e5a4f760d0e 100644 --- a/drivers/infiniband/hw/bnxt_re/roce_hsi.h +++ b/drivers/infiniband/hw/bnxt_re/roce_hsi.h @@ -1761,7 +1761,30 @@ struct cmdq_initialize_fw { #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_2M (0x3UL << 4) #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_8M (0x4UL << 4) #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_1G (0x5UL << 4) - __le16 reserved16; + /* This value is (log-base-2-of-DBR-page-size - 12). + * 0 for 4KB. HW supported values are enumerated below. + */ + __le16 log2_dbr_pg_size; + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_MASK 0xfUL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_SFT 0 + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_4K 0x0UL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_8K 0x1UL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_16K 0x2UL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_32K 0x3UL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_64K 0x4UL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_128K 0x5UL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_256K 0x6UL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_512K 0x7UL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_1M 0x8UL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_2M 0x9UL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_4M 0xaUL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_8M 0xbUL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_16M 0xcUL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_32M 0xdUL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_64M 0xeUL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_128M 0xfUL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_LAST \ + CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_128M __le64 qpc_page_dir; __le64 mrw_page_dir; __le64 srq_page_dir; diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c index 9a566ee3ceff..82adc0d1d30e 100644 --- a/drivers/infiniband/hw/mlx4/cq.c +++ b/drivers/infiniband/hw/mlx4/cq.c @@ -601,6 +601,7 @@ static void use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct wc->dlid_path_bits = 0; if (is_eth) { + wc->slid = 0; wc->vlan_id = be16_to_cpu(hdr->tun.sl_vid); memcpy(&(wc->smac[0]), (char *)&hdr->tun.mac_31_0, 4); memcpy(&(wc->smac[4]), (char *)&hdr->tun.slid_mac_47_32, 2); @@ -851,7 +852,6 @@ repoll: } } - wc->slid = be16_to_cpu(cqe->rlid); g_mlpath_rqpn = be32_to_cpu(cqe->g_mlpath_rqpn); wc->src_qp = g_mlpath_rqpn & 0xffffff; wc->dlid_path_bits = (g_mlpath_rqpn >> 24) & 0x7f; @@ -860,6 +860,7 @@ repoll: wc->wc_flags |= mlx4_ib_ipoib_csum_ok(cqe->status, cqe->checksum) ? IB_WC_IP_CSUM_OK : 0; if (is_eth) { + wc->slid = 0; wc->sl = be16_to_cpu(cqe->sl_vid) >> 13; if (be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_CVLAN_PRESENT_MASK) { @@ -871,6 +872,7 @@ repoll: memcpy(wc->smac, cqe->smac, ETH_ALEN); wc->wc_flags |= (IB_WC_WITH_VLAN | IB_WC_WITH_SMAC); } else { + wc->slid = be16_to_cpu(cqe->rlid); wc->sl = be16_to_cpu(cqe->sl_vid) >> 12; wc->vlan_id = 0xffff; } diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 8d2ee9322f2e..5a0e4fc4785a 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -219,8 +219,6 @@ static int mlx4_ib_update_gids_v1_v2(struct gid_entry *gids, gid_tbl[i].version = 2; if (!ipv6_addr_v4mapped((struct in6_addr *)&gids[i].gid)) gid_tbl[i].type = 1; - else - memset(&gid_tbl[i].gid, 0, 12); } } @@ -366,8 +364,13 @@ static int mlx4_ib_del_gid(struct ib_device *device, if (!gids) { ret = -ENOMEM; } else { - for (i = 0; i < MLX4_MAX_PORT_GIDS; i++) - memcpy(&gids[i].gid, &port_gid_table->gids[i].gid, sizeof(union ib_gid)); + for (i = 0; i < MLX4_MAX_PORT_GIDS; i++) { + memcpy(&gids[i].gid, + &port_gid_table->gids[i].gid, + sizeof(union ib_gid)); + gids[i].gid_type = + port_gid_table->gids[i].gid_type; + } } } spin_unlock_bh(&iboe->lock); diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c index 5b974fb97611..15457c9569a7 100644 --- a/drivers/infiniband/hw/mlx5/cq.c +++ b/drivers/infiniband/hw/mlx5/cq.c @@ -226,7 +226,6 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe, wc->ex.invalidate_rkey = be32_to_cpu(cqe->imm_inval_pkey); break; } - wc->slid = be16_to_cpu(cqe->slid); wc->src_qp = be32_to_cpu(cqe->flags_rqpn) & 0xffffff; wc->dlid_path_bits = cqe->ml_path; g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3; @@ -241,10 +240,12 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe, } if (ll != IB_LINK_LAYER_ETHERNET) { + wc->slid = be16_to_cpu(cqe->slid); wc->sl = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0xf; return; } + wc->slid = 0; vlan_present = cqe->l4_l3_hdr_type & 0x1; roce_packet_type = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0x3; if (vlan_present) { @@ -1177,7 +1178,12 @@ static int resize_user(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq, if (ucmd.reserved0 || ucmd.reserved1) return -EINVAL; - umem = ib_umem_get(context, ucmd.buf_addr, entries * ucmd.cqe_size, + /* check multiplication overflow */ + if (ucmd.cqe_size && SIZE_MAX / ucmd.cqe_size <= entries - 1) + return -EINVAL; + + umem = ib_umem_get(context, ucmd.buf_addr, + (size_t)ucmd.cqe_size * entries, IB_ACCESS_LOCAL_WRITE, 1); if (IS_ERR(umem)) { err = PTR_ERR(umem); diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 4236c8086820..033b6af90de9 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -245,12 +245,16 @@ struct mlx5_core_dev *mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev *ibdev, struct mlx5_ib_multiport_info *mpi; struct mlx5_ib_port *port; + if (!mlx5_core_mp_enabled(ibdev->mdev) || + ll != IB_LINK_LAYER_ETHERNET) { + if (native_port_num) + *native_port_num = ib_port_num; + return ibdev->mdev; + } + if (native_port_num) *native_port_num = 1; - if (!mlx5_core_mp_enabled(ibdev->mdev) || ll != IB_LINK_LAYER_ETHERNET) - return ibdev->mdev; - port = &ibdev->port[ib_port_num - 1]; if (!port) return NULL; @@ -3263,7 +3267,7 @@ static void mlx5_ib_handle_event(struct work_struct *_work) struct mlx5_ib_dev *ibdev; struct ib_event ibev; bool fatal = false; - u8 port = 0; + u8 port = (u8)work->param; if (mlx5_core_is_mp_slave(work->dev)) { ibdev = mlx5_ib_get_ibdev_from_mpi(work->context); @@ -3283,8 +3287,6 @@ static void mlx5_ib_handle_event(struct work_struct *_work) case MLX5_DEV_EVENT_PORT_UP: case MLX5_DEV_EVENT_PORT_DOWN: case MLX5_DEV_EVENT_PORT_INITIALIZED: - port = (u8)work->param; - /* In RoCE, port up/down events are handled in * mlx5_netdev_event(). */ @@ -3298,24 +3300,19 @@ static void mlx5_ib_handle_event(struct work_struct *_work) case MLX5_DEV_EVENT_LID_CHANGE: ibev.event = IB_EVENT_LID_CHANGE; - port = (u8)work->param; break; case MLX5_DEV_EVENT_PKEY_CHANGE: ibev.event = IB_EVENT_PKEY_CHANGE; - port = (u8)work->param; - schedule_work(&ibdev->devr.ports[port - 1].pkey_change_work); break; case MLX5_DEV_EVENT_GUID_CHANGE: ibev.event = IB_EVENT_GID_CHANGE; - port = (u8)work->param; break; case MLX5_DEV_EVENT_CLIENT_REREG: ibev.event = IB_EVENT_CLIENT_REREGISTER; - port = (u8)work->param; break; case MLX5_DEV_EVENT_DELAY_DROP_TIMEOUT: schedule_work(&ibdev->delay_drop.delay_drop_work); @@ -3327,7 +3324,7 @@ static void mlx5_ib_handle_event(struct work_struct *_work) ibev.device = &ibdev->ib_dev; ibev.element.port_num = port; - if (port < 1 || port > ibdev->num_ports) { + if (!rdma_is_port_valid(&ibdev->ib_dev, port)) { mlx5_ib_warn(ibdev, "warning: event on port %d\n", port); goto out; } diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index 556e015678de..1961c6a45437 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -1816,7 +1816,6 @@ mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr, mr->ibmr.iova = sg_dma_address(sg) + sg_offset; mr->ibmr.length = 0; - mr->ndescs = sg_nents; for_each_sg(sgl, sg, sg_nents, i) { if (unlikely(i >= mr->max_descs)) @@ -1828,6 +1827,7 @@ mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr, sg_offset = 0; } + mr->ndescs = i; if (sg_offset_p) *sg_offset_p = sg_offset; diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 39d24bf694a8..36197fbac63a 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -1584,6 +1584,7 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, u32 uidx = MLX5_IB_DEFAULT_UIDX; struct mlx5_ib_create_qp ucmd; struct mlx5_ib_qp_base *base; + int mlx5_st; void *qpc; u32 *in; int err; @@ -1592,6 +1593,10 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, spin_lock_init(&qp->sq.lock); spin_lock_init(&qp->rq.lock); + mlx5_st = to_mlx5_st(init_attr->qp_type); + if (mlx5_st < 0) + return -EINVAL; + if (init_attr->rwq_ind_tbl) { if (!udata) return -ENOSYS; @@ -1753,7 +1758,7 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, qpc = MLX5_ADDR_OF(create_qp_in, in, qpc); - MLX5_SET(qpc, qpc, st, to_mlx5_st(init_attr->qp_type)); + MLX5_SET(qpc, qpc, st, mlx5_st); MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED); if (init_attr->qp_type != MLX5_IB_QPT_REG_UMR) @@ -3095,8 +3100,10 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp, goto out; if (mlx5_cur >= MLX5_QP_NUM_STATE || mlx5_new >= MLX5_QP_NUM_STATE || - !optab[mlx5_cur][mlx5_new]) + !optab[mlx5_cur][mlx5_new]) { + err = -EINVAL; goto out; + } op = optab[mlx5_cur][mlx5_new]; optpar = ib_mask_to_mlx5_opt(attr_mask); diff --git a/drivers/infiniband/hw/qedr/qedr_iw_cm.c b/drivers/infiniband/hw/qedr/qedr_iw_cm.c index 478b7317b80a..26dc374787f7 100644 --- a/drivers/infiniband/hw/qedr/qedr_iw_cm.c +++ b/drivers/infiniband/hw/qedr/qedr_iw_cm.c @@ -458,8 +458,7 @@ qedr_addr6_resolve(struct qedr_dev *dev, } return -EINVAL; } - neigh = dst_neigh_lookup(dst, &dst_in); - + neigh = dst_neigh_lookup(dst, &fl6.daddr); if (neigh) { rcu_read_lock(); if (neigh->nud_state & NUD_VALID) { @@ -494,10 +493,14 @@ int qedr_iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) qp = idr_find(&dev->qpidr, conn_param->qpn); - laddr = (struct sockaddr_in *)&cm_id->local_addr; - raddr = (struct sockaddr_in *)&cm_id->remote_addr; - laddr6 = (struct sockaddr_in6 *)&cm_id->local_addr; - raddr6 = (struct sockaddr_in6 *)&cm_id->remote_addr; + laddr = (struct sockaddr_in *)&cm_id->m_local_addr; + raddr = (struct sockaddr_in *)&cm_id->m_remote_addr; + laddr6 = (struct sockaddr_in6 *)&cm_id->m_local_addr; + raddr6 = (struct sockaddr_in6 *)&cm_id->m_remote_addr; + + DP_DEBUG(dev, QEDR_MSG_IWARP, "MAPPED %d %d\n", + ntohs(((struct sockaddr_in *)&cm_id->remote_addr)->sin_port), + ntohs(raddr->sin_port)); DP_DEBUG(dev, QEDR_MSG_IWARP, "Connect source address: %pISpc, remote address: %pISpc\n", @@ -599,8 +602,8 @@ int qedr_iw_create_listen(struct iw_cm_id *cm_id, int backlog) int rc; int i; - laddr = (struct sockaddr_in *)&cm_id->local_addr; - laddr6 = (struct sockaddr_in6 *)&cm_id->local_addr; + laddr = (struct sockaddr_in *)&cm_id->m_local_addr; + laddr6 = (struct sockaddr_in6 *)&cm_id->m_local_addr; DP_DEBUG(dev, QEDR_MSG_IWARP, "Create Listener address: %pISpc\n", &cm_id->local_addr); diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index 53f00dbf313f..875b17272d65 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -3034,6 +3034,11 @@ static int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, switch (wr->opcode) { case IB_WR_SEND_WITH_IMM: + if (unlikely(rdma_protocol_iwarp(&dev->ibdev, 1))) { + rc = -EINVAL; + *bad_wr = wr; + break; + } wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_IMM; swqe = (struct rdma_sq_send_wqe_1st *)wqe; swqe->wqe_size = 2; @@ -3075,6 +3080,11 @@ static int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, break; case IB_WR_RDMA_WRITE_WITH_IMM: + if (unlikely(rdma_protocol_iwarp(&dev->ibdev, 1))) { + rc = -EINVAL; + *bad_wr = wr; + break; + } wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR_WITH_IMM; rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe; @@ -3724,7 +3734,7 @@ int qedr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) { struct qedr_dev *dev = get_qedr_dev(ibcq->device); struct qedr_cq *cq = get_qedr_cq(ibcq); - union rdma_cqe *cqe = cq->latest_cqe; + union rdma_cqe *cqe; u32 old_cons, new_cons; unsigned long flags; int update = 0; @@ -3741,6 +3751,7 @@ int qedr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) return qedr_gsi_poll_cq(ibcq, num_entries, wc); spin_lock_irqsave(&cq->cq_lock, flags); + cqe = cq->latest_cqe; old_cons = qed_chain_get_cons_idx_u32(&cq->pbl); while (num_entries && is_valid_cqe(cq, cqe)) { struct qedr_qp *qp; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c index 21d29f7936f6..d39b0b7011b2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c @@ -124,7 +124,7 @@ void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force) trigger_cmd_completions(dev); } - mlx5_core_event(dev, MLX5_DEV_EVENT_SYS_ERROR, 0); + mlx5_core_event(dev, MLX5_DEV_EVENT_SYS_ERROR, 1); mlx5_core_err(dev, "end\n"); unlock: |