diff options
-rw-r--r-- | drivers/infiniband/core/umem.c | 16 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx4/main.c | 2 | ||||
-rw-r--r-- | drivers/infiniband/ulp/ipoib/ipoib_cm.c | 42 | ||||
-rw-r--r-- | net/ipv4/ipvs/ip_vs_sync.c | 41 | ||||
-rw-r--r-- | net/rxrpc/ar-output.c | 5 | ||||
-rw-r--r-- | net/xfrm/xfrm_state.c | 2 |
6 files changed, 74 insertions, 34 deletions
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c index b4aec5103c99..d40652a80151 100644 --- a/drivers/infiniband/core/umem.c +++ b/drivers/infiniband/core/umem.c @@ -225,13 +225,15 @@ void ib_umem_release(struct ib_umem *umem) * up here and not be able to take the mmap_sem. In that case * we defer the vm_locked accounting to the system workqueue. */ - if (context->closing && !down_write_trylock(&mm->mmap_sem)) { - INIT_WORK(&umem->work, ib_umem_account); - umem->mm = mm; - umem->diff = diff; - - schedule_work(&umem->work); - return; + if (context->closing) { + if (!down_write_trylock(&mm->mmap_sem)) { + INIT_WORK(&umem->work, ib_umem_account); + umem->mm = mm; + umem->diff = diff; + + schedule_work(&umem->work); + return; + } } else down_write(&mm->mmap_sem); diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 1095c82b38c2..c591616dccde 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -120,7 +120,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev, props->max_qp_init_rd_atom = dev->dev->caps.max_qp_init_rdma; props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp; props->max_srq = dev->dev->caps.num_srqs - dev->dev->caps.reserved_srqs; - props->max_srq_wr = dev->dev->caps.max_srq_wqes; + props->max_srq_wr = dev->dev->caps.max_srq_wqes - 1; props->max_srq_sge = dev->dev->caps.max_srq_sge; props->local_ca_ack_delay = dev->dev->caps.local_ca_ack_delay; props->atomic_cap = dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_ATOMIC ? diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c index 076a0bbb63d7..5ffc464c99aa 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c @@ -56,13 +56,6 @@ MODULE_PARM_DESC(cm_data_debug_level, #define IPOIB_CM_RX_DELAY (3 * 256 * HZ) #define IPOIB_CM_RX_UPDATE_MASK (0x3) -struct ipoib_cm_id { - struct ib_cm_id *id; - int flags; - u32 remote_qpn; - u32 remote_mtu; -}; - static struct ib_qp_attr ipoib_cm_err_attr = { .qp_state = IB_QPS_ERR }; @@ -309,6 +302,11 @@ static int ipoib_cm_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *even return -ENOMEM; p->dev = dev; p->id = cm_id; + cm_id->context = p; + p->state = IPOIB_CM_RX_LIVE; + p->jiffies = jiffies; + INIT_LIST_HEAD(&p->list); + p->qp = ipoib_cm_create_rx_qp(dev, p); if (IS_ERR(p->qp)) { ret = PTR_ERR(p->qp); @@ -320,24 +318,24 @@ static int ipoib_cm_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *even if (ret) goto err_modify; + spin_lock_irq(&priv->lock); + queue_delayed_work(ipoib_workqueue, + &priv->cm.stale_task, IPOIB_CM_RX_DELAY); + /* Add this entry to passive ids list head, but do not re-add it + * if IB_EVENT_QP_LAST_WQE_REACHED has moved it to flush list. */ + p->jiffies = jiffies; + if (p->state == IPOIB_CM_RX_LIVE) + list_move(&p->list, &priv->cm.passive_ids); + spin_unlock_irq(&priv->lock); + ret = ipoib_cm_send_rep(dev, cm_id, p->qp, &event->param.req_rcvd, psn); if (ret) { ipoib_warn(priv, "failed to send REP: %d\n", ret); - goto err_rep; + if (ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE)) + ipoib_warn(priv, "unable to move qp to error state\n"); } - - cm_id->context = p; - p->jiffies = jiffies; - p->state = IPOIB_CM_RX_LIVE; - spin_lock_irq(&priv->lock); - if (list_empty(&priv->cm.passive_ids)) - queue_delayed_work(ipoib_workqueue, - &priv->cm.stale_task, IPOIB_CM_RX_DELAY); - list_add(&p->list, &priv->cm.passive_ids); - spin_unlock_irq(&priv->lock); return 0; -err_rep: err_modify: ib_destroy_qp(p->qp); err_qp: @@ -754,9 +752,9 @@ static int ipoib_cm_rep_handler(struct ib_cm_id *cm_id, struct ib_cm_event *even p->mtu = be32_to_cpu(data->mtu); - if (p->mtu < priv->dev->mtu + IPOIB_ENCAP_LEN) { - ipoib_warn(priv, "Rejecting connection: mtu %d < device mtu %d + 4\n", - p->mtu, priv->dev->mtu); + if (p->mtu <= IPOIB_ENCAP_LEN) { + ipoib_warn(priv, "Rejecting connection: mtu %d <= %d\n", + p->mtu, IPOIB_ENCAP_LEN); return -EINVAL; } diff --git a/net/ipv4/ipvs/ip_vs_sync.c b/net/ipv4/ipvs/ip_vs_sync.c index 7ea2d981a932..356f067484e3 100644 --- a/net/ipv4/ipvs/ip_vs_sync.c +++ b/net/ipv4/ipvs/ip_vs_sync.c @@ -67,6 +67,11 @@ struct ip_vs_sync_conn_options { struct ip_vs_seq out_seq; /* outgoing seq. struct */ }; +struct ip_vs_sync_thread_data { + struct completion *startup; + int state; +}; + #define IP_VS_SYNC_CONN_TIMEOUT (3*60*HZ) #define SIMPLE_CONN_SIZE (sizeof(struct ip_vs_sync_conn)) #define FULL_CONN_SIZE \ @@ -751,6 +756,7 @@ static int sync_thread(void *startup) mm_segment_t oldmm; int state; const char *name; + struct ip_vs_sync_thread_data *tinfo = startup; /* increase the module use count */ ip_vs_use_count_inc(); @@ -789,7 +795,14 @@ static int sync_thread(void *startup) add_wait_queue(&sync_wait, &wait); set_sync_pid(state, current->pid); - complete((struct completion *)startup); + complete(tinfo->startup); + + /* + * once we call the completion queue above, we should + * null out that reference, since its allocated on the + * stack of the creating kernel thread + */ + tinfo->startup = NULL; /* processing master/backup loop here */ if (state == IP_VS_STATE_MASTER) @@ -801,6 +814,14 @@ static int sync_thread(void *startup) remove_wait_queue(&sync_wait, &wait); /* thread exits */ + + /* + * If we weren't explicitly stopped, then we + * exited in error, and should undo our state + */ + if ((!stop_master_sync) && (!stop_backup_sync)) + ip_vs_sync_state -= tinfo->state; + set_sync_pid(state, 0); IP_VS_INFO("sync thread stopped!\n"); @@ -812,6 +833,11 @@ static int sync_thread(void *startup) set_stop_sync(state, 0); wake_up(&stop_sync_wait); + /* + * we need to free the structure that was allocated + * for us in start_sync_thread + */ + kfree(tinfo); return 0; } @@ -838,11 +864,19 @@ int start_sync_thread(int state, char *mcast_ifn, __u8 syncid) { DECLARE_COMPLETION_ONSTACK(startup); pid_t pid; + struct ip_vs_sync_thread_data *tinfo; if ((state == IP_VS_STATE_MASTER && sync_master_pid) || (state == IP_VS_STATE_BACKUP && sync_backup_pid)) return -EEXIST; + /* + * Note that tinfo will be freed in sync_thread on exit + */ + tinfo = kmalloc(sizeof(struct ip_vs_sync_thread_data), GFP_KERNEL); + if (!tinfo) + return -ENOMEM; + IP_VS_DBG(7, "%s: pid %d\n", __FUNCTION__, current->pid); IP_VS_DBG(7, "Each ip_vs_sync_conn entry need %Zd bytes\n", sizeof(struct ip_vs_sync_conn)); @@ -858,8 +892,11 @@ int start_sync_thread(int state, char *mcast_ifn, __u8 syncid) ip_vs_backup_syncid = syncid; } + tinfo->state = state; + tinfo->startup = &startup; + repeat: - if ((pid = kernel_thread(fork_sync_thread, &startup, 0)) < 0) { + if ((pid = kernel_thread(fork_sync_thread, tinfo, 0)) < 0) { IP_VS_ERR("could not create fork_sync_thread due to %d... " "retrying.\n", pid); msleep_interruptible(1000); diff --git a/net/rxrpc/ar-output.c b/net/rxrpc/ar-output.c index 591c4422205e..cc9102c5b588 100644 --- a/net/rxrpc/ar-output.c +++ b/net/rxrpc/ar-output.c @@ -640,6 +640,7 @@ static int rxrpc_send_data(struct kiocb *iocb, goto efault; sp->remain -= copy; skb->mark += copy; + copied += copy; len -= copy; segment -= copy; @@ -709,6 +710,8 @@ static int rxrpc_send_data(struct kiocb *iocb, } while (segment > 0); +success: + ret = copied; out: call->tx_pending = skb; _leave(" = %d", ret); @@ -725,7 +728,7 @@ call_aborted: maybe_error: if (copied) - ret = copied; + goto success; goto out; efault: diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index 85f3f43a6cca..dfacb9c2a6e3 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -1729,7 +1729,7 @@ int xfrm_state_mtu(struct xfrm_state *x, int mtu) x->type && x->type->get_mtu) res = x->type->get_mtu(x, mtu); else - res = mtu; + res = mtu - x->props.header_len; spin_unlock_bh(&x->lock); return res; } |