summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2017-12-10 04:09:55 +0100
committerDavid S. Miller <davem@davemloft.net>2017-12-10 04:09:55 +0100
commit51e18a453f5f59a40c721d4aeab082b4e2e9fac6 (patch)
tree540d37e6f7a83466677ab6095fcdd2d6f72583c4 /net
parentmacvlan: fix memory hole in macvlan_dev (diff)
parentkmemcheck: rip it out for real (diff)
downloadlinux-51e18a453f5f59a40c721d4aeab082b4e2e9fac6.tar.xz
linux-51e18a453f5f59a40c721d4aeab082b4e2e9fac6.zip
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflict was two parallel additions of include files to sch_generic.c, no biggie. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/9p/trans_fd.c1
-rw-r--r--net/core/dev.c2
-rw-r--r--net/dccp/proto.c5
-rw-r--r--net/ipv4/inet_timewait_sock.c6
-rw-r--r--net/ipv4/tcp.c1
-rw-r--r--net/ipv4/tcp_bbr.c12
-rw-r--r--net/ipv4/tcp_input.c12
-rw-r--r--net/ipv4/tcp_rate.c10
-rw-r--r--net/ipv4/tcp_recovery.c28
-rw-r--r--net/ipv6/ip6_tunnel.c2
-rw-r--r--net/kcm/kcmsock.c68
-rw-r--r--net/rds/rdma.c2
-rw-r--r--net/sched/sch_choke.c3
-rw-r--r--net/sched/sch_generic.c3
-rw-r--r--net/sched/sch_gred.c3
-rw-r--r--net/sched/sch_red.c2
-rw-r--r--net/sched/sch_sfq.c3
-rw-r--r--net/sctp/socket.c1
-rw-r--r--net/socket.c110
-rw-r--r--net/tipc/server.c3
-rw-r--r--net/vmw_vsock/hyperv_transport.c2
21 files changed, 136 insertions, 143 deletions
diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
index 985046ae4231..80f5c79053a4 100644
--- a/net/9p/trans_fd.c
+++ b/net/9p/trans_fd.c
@@ -839,7 +839,6 @@ static int p9_socket_open(struct p9_client *client, struct socket *csocket)
if (IS_ERR(file)) {
pr_err("%s (%d): failed to map fd\n",
__func__, task_pid_nr(current));
- sock_release(csocket);
kfree(p);
return PTR_ERR(file);
}
diff --git a/net/core/dev.c b/net/core/dev.c
index e32cf5c7f200..8aa2f70995e8 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1106,7 +1106,7 @@ static int __dev_alloc_name(struct net *net, const char *name, char *buf)
* when the name is long and there isn't enough space left
* for the digits, or if all bits are used.
*/
- return p ? -ENFILE : -EEXIST;
+ return -ENFILE;
}
static int dev_alloc_name_ns(struct net *net,
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index b68168fcc06a..9d43c1f40274 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -259,6 +259,7 @@ int dccp_disconnect(struct sock *sk, int flags)
{
struct inet_connection_sock *icsk = inet_csk(sk);
struct inet_sock *inet = inet_sk(sk);
+ struct dccp_sock *dp = dccp_sk(sk);
int err = 0;
const int old_state = sk->sk_state;
@@ -278,6 +279,10 @@ int dccp_disconnect(struct sock *sk, int flags)
sk->sk_err = ECONNRESET;
dccp_clear_xmit_timers(sk);
+ ccid_hc_rx_delete(dp->dccps_hc_rx_ccid, sk);
+ ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk);
+ dp->dccps_hc_rx_ccid = NULL;
+ dp->dccps_hc_tx_ccid = NULL;
__skb_queue_purge(&sk->sk_receive_queue);
__skb_queue_purge(&sk->sk_write_queue);
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
index c690cd0d9b3f..b563e0c46bac 100644
--- a/net/ipv4/inet_timewait_sock.c
+++ b/net/ipv4/inet_timewait_sock.c
@@ -93,7 +93,7 @@ static void inet_twsk_add_bind_node(struct inet_timewait_sock *tw,
}
/*
- * Enter the time wait state.
+ * Enter the time wait state. This is called with locally disabled BH.
* Essentially we whip up a timewait bucket, copy the relevant info into it
* from the SK, and mess with hash chains and list linkage.
*/
@@ -111,7 +111,7 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
*/
bhead = &hashinfo->bhash[inet_bhashfn(twsk_net(tw), inet->inet_num,
hashinfo->bhash_size)];
- spin_lock_bh(&bhead->lock);
+ spin_lock(&bhead->lock);
tw->tw_tb = icsk->icsk_bind_hash;
WARN_ON(!icsk->icsk_bind_hash);
inet_twsk_add_bind_node(tw, &tw->tw_tb->owners);
@@ -137,7 +137,7 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
if (__sk_nulls_del_node_init_rcu(sk))
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
- spin_unlock_bh(lock);
+ spin_unlock(lock);
}
EXPORT_SYMBOL_GPL(__inet_twsk_hashdance);
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 180311636023..c470fec9062f 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2412,6 +2412,7 @@ int tcp_disconnect(struct sock *sk, int flags)
tp->snd_cwnd_cnt = 0;
tp->window_clamp = 0;
tcp_set_ca_state(sk, TCP_CA_Open);
+ tp->is_sack_reneg = 0;
tcp_clear_retrans(tp);
inet_csk_delack_init(sk);
/* Initialize rcv_mss to TCP_MIN_MSS to avoid division by 0
diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c
index 69ee877574d0..8322f26e770e 100644
--- a/net/ipv4/tcp_bbr.c
+++ b/net/ipv4/tcp_bbr.c
@@ -110,7 +110,8 @@ struct bbr {
u32 lt_last_lost; /* LT intvl start: tp->lost */
u32 pacing_gain:10, /* current gain for setting pacing rate */
cwnd_gain:10, /* current gain for setting cwnd */
- full_bw_cnt:3, /* number of rounds without large bw gains */
+ full_bw_reached:1, /* reached full bw in Startup? */
+ full_bw_cnt:2, /* number of rounds without large bw gains */
cycle_idx:3, /* current index in pacing_gain cycle array */
has_seen_rtt:1, /* have we seen an RTT sample yet? */
unused_b:5;
@@ -180,7 +181,7 @@ static bool bbr_full_bw_reached(const struct sock *sk)
{
const struct bbr *bbr = inet_csk_ca(sk);
- return bbr->full_bw_cnt >= bbr_full_bw_cnt;
+ return bbr->full_bw_reached;
}
/* Return the windowed max recent bandwidth sample, in pkts/uS << BW_SCALE. */
@@ -717,6 +718,7 @@ static void bbr_check_full_bw_reached(struct sock *sk,
return;
}
++bbr->full_bw_cnt;
+ bbr->full_bw_reached = bbr->full_bw_cnt >= bbr_full_bw_cnt;
}
/* If pipe is probably full, drain the queue and then enter steady-state. */
@@ -850,6 +852,7 @@ static void bbr_init(struct sock *sk)
bbr->restore_cwnd = 0;
bbr->round_start = 0;
bbr->idle_restart = 0;
+ bbr->full_bw_reached = 0;
bbr->full_bw = 0;
bbr->full_bw_cnt = 0;
bbr->cycle_mstamp = 0;
@@ -871,6 +874,11 @@ static u32 bbr_sndbuf_expand(struct sock *sk)
*/
static u32 bbr_undo_cwnd(struct sock *sk)
{
+ struct bbr *bbr = inet_csk_ca(sk);
+
+ bbr->full_bw = 0; /* spurious slow-down; reset full pipe detection */
+ bbr->full_bw_cnt = 0;
+ bbr_reset_lt_bw_sampling(sk);
return tcp_sk(sk)->snd_cwnd;
}
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 734cfc8ff76e..9550cc42de2d 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -579,6 +579,7 @@ void tcp_rcv_space_adjust(struct sock *sk)
int time;
int copied;
+ tcp_mstamp_refresh(tp);
time = tcp_stamp_us_delta(tp->tcp_mstamp, tp->rcvq_space.time);
if (time < (tp->rcv_rtt_est.rtt_us >> 3) || tp->rcv_rtt_est.rtt_us == 0)
return;
@@ -1941,6 +1942,8 @@ void tcp_enter_loss(struct sock *sk)
if (is_reneg) {
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSACKRENEGING);
tp->sacked_out = 0;
+ /* Mark SACK reneging until we recover from this loss event. */
+ tp->is_sack_reneg = 1;
}
tcp_clear_all_retrans_hints(tp);
@@ -2326,6 +2329,7 @@ static void tcp_undo_cwnd_reduction(struct sock *sk, bool unmark_loss)
}
tp->snd_cwnd_stamp = tcp_jiffies32;
tp->undo_marker = 0;
+ tp->rack.advanced = 1; /* Force RACK to re-exam losses */
}
static inline bool tcp_may_undo(const struct tcp_sock *tp)
@@ -2364,6 +2368,7 @@ static bool tcp_try_undo_recovery(struct sock *sk)
return true;
}
tcp_set_ca_state(sk, TCP_CA_Open);
+ tp->is_sack_reneg = 0;
return false;
}
@@ -2397,8 +2402,10 @@ static bool tcp_try_undo_loss(struct sock *sk, bool frto_undo)
NET_INC_STATS(sock_net(sk),
LINUX_MIB_TCPSPURIOUSRTOS);
inet_csk(sk)->icsk_retransmits = 0;
- if (frto_undo || tcp_is_sack(tp))
+ if (frto_undo || tcp_is_sack(tp)) {
tcp_set_ca_state(sk, TCP_CA_Open);
+ tp->is_sack_reneg = 0;
+ }
return true;
}
return false;
@@ -3495,6 +3502,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
struct tcp_sacktag_state sack_state;
struct rate_sample rs = { .prior_delivered = 0 };
u32 prior_snd_una = tp->snd_una;
+ bool is_sack_reneg = tp->is_sack_reneg;
u32 ack_seq = TCP_SKB_CB(skb)->seq;
u32 ack = TCP_SKB_CB(skb)->ack_seq;
bool is_dupack = false;
@@ -3611,7 +3619,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
delivered = tp->delivered - delivered; /* freshly ACKed or SACKed */
lost = tp->lost - lost; /* freshly marked lost */
- tcp_rate_gen(sk, delivered, lost, sack_state.rate);
+ tcp_rate_gen(sk, delivered, lost, is_sack_reneg, sack_state.rate);
tcp_cong_control(sk, ack, delivered, flag, sack_state.rate);
tcp_xmit_recovery(sk, rexmit);
return 1;
diff --git a/net/ipv4/tcp_rate.c b/net/ipv4/tcp_rate.c
index 3330a370d306..c61240e43923 100644
--- a/net/ipv4/tcp_rate.c
+++ b/net/ipv4/tcp_rate.c
@@ -106,7 +106,7 @@ void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
/* Update the connection delivery information and generate a rate sample. */
void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
- struct rate_sample *rs)
+ bool is_sack_reneg, struct rate_sample *rs)
{
struct tcp_sock *tp = tcp_sk(sk);
u32 snd_us, ack_us;
@@ -124,8 +124,12 @@ void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
rs->acked_sacked = delivered; /* freshly ACKed or SACKed */
rs->losses = lost; /* freshly marked lost */
- /* Return an invalid sample if no timing information is available. */
- if (!rs->prior_mstamp) {
+ /* Return an invalid sample if no timing information is available or
+ * in recovery from loss with SACK reneging. Rate samples taken during
+ * a SACK reneging event may overestimate bw by including packets that
+ * were SACKed before the reneg.
+ */
+ if (!rs->prior_mstamp || is_sack_reneg) {
rs->delivered = -1;
rs->interval_us = -1;
return;
diff --git a/net/ipv4/tcp_recovery.c b/net/ipv4/tcp_recovery.c
index d3ea89020c69..3a81720ac0c4 100644
--- a/net/ipv4/tcp_recovery.c
+++ b/net/ipv4/tcp_recovery.c
@@ -55,7 +55,8 @@ static void tcp_rack_detect_loss(struct sock *sk, u32 *reo_timeout)
* to queuing or delayed ACKs.
*/
reo_wnd = 1000;
- if ((tp->rack.reord || !tp->lost_out) && min_rtt != ~0U) {
+ if ((tp->rack.reord || inet_csk(sk)->icsk_ca_state < TCP_CA_Recovery) &&
+ min_rtt != ~0U) {
reo_wnd = max((min_rtt >> 2) * tp->rack.reo_wnd_steps, reo_wnd);
reo_wnd = min(reo_wnd, tp->srtt_us >> 3);
}
@@ -79,12 +80,12 @@ static void tcp_rack_detect_loss(struct sock *sk, u32 *reo_timeout)
*/
remaining = tp->rack.rtt_us + reo_wnd -
tcp_stamp_us_delta(tp->tcp_mstamp, skb->skb_mstamp);
- if (remaining < 0) {
+ if (remaining <= 0) {
tcp_rack_mark_skb_lost(sk, skb);
list_del_init(&skb->tcp_tsorted_anchor);
} else {
- /* Record maximum wait time (+1 to avoid 0) */
- *reo_timeout = max_t(u32, *reo_timeout, 1 + remaining);
+ /* Record maximum wait time */
+ *reo_timeout = max_t(u32, *reo_timeout, remaining);
}
}
}
@@ -116,13 +117,8 @@ void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq,
{
u32 rtt_us;
- if (tp->rack.mstamp &&
- !tcp_rack_sent_after(xmit_time, tp->rack.mstamp,
- end_seq, tp->rack.end_seq))
- return;
-
rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, xmit_time);
- if (sacked & TCPCB_RETRANS) {
+ if (rtt_us < tcp_min_rtt(tp) && (sacked & TCPCB_RETRANS)) {
/* If the sacked packet was retransmitted, it's ambiguous
* whether the retransmission or the original (or the prior
* retransmission) was sacked.
@@ -133,13 +129,15 @@ void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq,
* so it's at least one RTT (i.e., retransmission is at least
* an RTT later).
*/
- if (rtt_us < tcp_min_rtt(tp))
- return;
+ return;
}
- tp->rack.rtt_us = rtt_us;
- tp->rack.mstamp = xmit_time;
- tp->rack.end_seq = end_seq;
tp->rack.advanced = 1;
+ tp->rack.rtt_us = rtt_us;
+ if (tcp_rack_sent_after(xmit_time, tp->rack.mstamp,
+ end_seq, tp->rack.end_seq)) {
+ tp->rack.mstamp = xmit_time;
+ tp->rack.end_seq = end_seq;
+ }
}
/* We have waited long enough to accommodate reordering. Mark the expired
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 6a3b1a54a952..6ff2f21ae3fc 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -904,7 +904,7 @@ static int ipxip6_rcv(struct sk_buff *skb, u8 ipproto,
if (t->parms.collect_md) {
tun_dst = ipv6_tun_rx_dst(skb, 0, 0, 0);
if (!tun_dst)
- return 0;
+ goto drop;
}
ret = __ip6_tnl_rcv(t, skb, tpi, tun_dst, dscp_ecn_decapsulate,
log_ecn_error);
diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
index 0b750a22c4b9..d4e98f20fc2a 100644
--- a/net/kcm/kcmsock.c
+++ b/net/kcm/kcmsock.c
@@ -1625,60 +1625,30 @@ static struct proto kcm_proto = {
};
/* Clone a kcm socket. */
-static int kcm_clone(struct socket *osock, struct kcm_clone *info,
- struct socket **newsockp)
+static struct file *kcm_clone(struct socket *osock)
{
struct socket *newsock;
struct sock *newsk;
- struct file *newfile;
- int err, newfd;
- err = -ENFILE;
newsock = sock_alloc();
if (!newsock)
- goto out;
+ return ERR_PTR(-ENFILE);
newsock->type = osock->type;
newsock->ops = osock->ops;
__module_get(newsock->ops->owner);
- newfd = get_unused_fd_flags(0);
- if (unlikely(newfd < 0)) {
- err = newfd;
- goto out_fd_fail;
- }
-
- newfile = sock_alloc_file(newsock, 0, osock->sk->sk_prot_creator->name);
- if (IS_ERR(newfile)) {
- err = PTR_ERR(newfile);
- goto out_sock_alloc_fail;
- }
-
newsk = sk_alloc(sock_net(osock->sk), PF_KCM, GFP_KERNEL,
&kcm_proto, true);
if (!newsk) {
- err = -ENOMEM;
- goto out_sk_alloc_fail;
+ sock_release(newsock);
+ return ERR_PTR(-ENOMEM);
}
-
sock_init_data(newsock, newsk);
init_kcm_sock(kcm_sk(newsk), kcm_sk(osock->sk)->mux);
- fd_install(newfd, newfile);
- *newsockp = newsock;
- info->fd = newfd;
-
- return 0;
-
-out_sk_alloc_fail:
- fput(newfile);
-out_sock_alloc_fail:
- put_unused_fd(newfd);
-out_fd_fail:
- sock_release(newsock);
-out:
- return err;
+ return sock_alloc_file(newsock, 0, osock->sk->sk_prot_creator->name);
}
static int kcm_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
@@ -1708,17 +1678,25 @@ static int kcm_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
}
case SIOCKCMCLONE: {
struct kcm_clone info;
- struct socket *newsock = NULL;
-
- err = kcm_clone(sock, &info, &newsock);
- if (!err) {
- if (copy_to_user((void __user *)arg, &info,
- sizeof(info))) {
- err = -EFAULT;
- sys_close(info.fd);
- }
- }
+ struct file *file;
+
+ info.fd = get_unused_fd_flags(0);
+ if (unlikely(info.fd < 0))
+ return info.fd;
+ file = kcm_clone(sock);
+ if (IS_ERR(file)) {
+ put_unused_fd(info.fd);
+ return PTR_ERR(file);
+ }
+ if (copy_to_user((void __user *)arg, &info,
+ sizeof(info))) {
+ put_unused_fd(info.fd);
+ fput(file);
+ return -EFAULT;
+ }
+ fd_install(info.fd, file);
+ err = 0;
break;
}
default:
diff --git a/net/rds/rdma.c b/net/rds/rdma.c
index 8886f15abe90..bc2f1e0977d6 100644
--- a/net/rds/rdma.c
+++ b/net/rds/rdma.c
@@ -183,7 +183,7 @@ static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args,
long i;
int ret;
- if (rs->rs_bound_addr == 0) {
+ if (rs->rs_bound_addr == 0 || !rs->rs_transport) {
ret = -ENOTCONN; /* XXX not a great errno */
goto out;
}
diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c
index b30a2c70bd48..531250fceb9e 100644
--- a/net/sched/sch_choke.c
+++ b/net/sched/sch_choke.c
@@ -369,6 +369,9 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt)
ctl = nla_data(tb[TCA_CHOKE_PARMS]);
+ if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog))
+ return -EINVAL;
+
if (ctl->limit > CHOKE_MAX_QUEUE)
return -EINVAL;
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index ff6a5acf6ab0..981c08fe810b 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -27,6 +27,7 @@
#include <linux/slab.h>
#include <linux/if_vlan.h>
#include <linux/skb_array.h>
+#include <linux/if_macvlan.h>
#include <net/sch_generic.h>
#include <net/pkt_sched.h>
#include <net/dst.h>
@@ -391,6 +392,8 @@ unsigned long dev_trans_start(struct net_device *dev)
if (is_vlan_dev(dev))
dev = vlan_dev_real_dev(dev);
+ else if (netif_is_macvlan(dev))
+ dev = macvlan_dev_real_dev(dev);
res = netdev_get_tx_queue(dev, 0)->trans_start;
for (i = 1; i < dev->num_tx_queues; i++) {
val = netdev_get_tx_queue(dev, i)->trans_start;
diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
index 17c7130454bd..bc30f9186ac6 100644
--- a/net/sched/sch_gred.c
+++ b/net/sched/sch_gred.c
@@ -356,6 +356,9 @@ static inline int gred_change_vq(struct Qdisc *sch, int dp,
struct gred_sched *table = qdisc_priv(sch);
struct gred_sched_data *q = table->tab[dp];
+ if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog))
+ return -EINVAL;
+
if (!q) {
table->tab[dp] = q = *prealloc;
*prealloc = NULL;
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index 7f8ea9e297c3..9d874e60e032 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -212,6 +212,8 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt)
max_P = tb[TCA_RED_MAX_P] ? nla_get_u32(tb[TCA_RED_MAX_P]) : 0;
ctl = nla_data(tb[TCA_RED_PARMS]);
+ if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog))
+ return -EINVAL;
if (ctl->limit > 0) {
child = fifo_create_dflt(sch, &bfifo_qdisc_ops, ctl->limit);
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 09c1203c1711..930e5bd26d3d 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -639,6 +639,9 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
if (ctl->divisor &&
(!is_power_of_2(ctl->divisor) || ctl->divisor > 65536))
return -EINVAL;
+ if (ctl_v1 && !red_check_params(ctl_v1->qth_min, ctl_v1->qth_max,
+ ctl_v1->Wlog))
+ return -EINVAL;
if (ctl_v1 && ctl_v1->qth_min) {
p = kmalloc(sizeof(*p), GFP_KERNEL);
if (!p)
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 014847e25648..eb17a911aa29 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -5080,7 +5080,6 @@ static int sctp_getsockopt_peeloff_common(struct sock *sk, sctp_peeloff_arg_t *p
*newfile = sock_alloc_file(newsock, 0, NULL);
if (IS_ERR(*newfile)) {
put_unused_fd(retval);
- sock_release(newsock);
retval = PTR_ERR(*newfile);
*newfile = NULL;
return retval;
diff --git a/net/socket.c b/net/socket.c
index 42d8e9c9ccd5..05f361faec45 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -406,8 +406,10 @@ struct file *sock_alloc_file(struct socket *sock, int flags, const char *dname)
name.len = strlen(name.name);
}
path.dentry = d_alloc_pseudo(sock_mnt->mnt_sb, &name);
- if (unlikely(!path.dentry))
+ if (unlikely(!path.dentry)) {
+ sock_release(sock);
return ERR_PTR(-ENOMEM);
+ }
path.mnt = mntget(sock_mnt);
d_instantiate(path.dentry, SOCK_INODE(sock));
@@ -415,9 +417,11 @@ struct file *sock_alloc_file(struct socket *sock, int flags, const char *dname)
file = alloc_file(&path, FMODE_READ | FMODE_WRITE,
&socket_file_ops);
if (IS_ERR(file)) {
- /* drop dentry, keep inode */
+ /* drop dentry, keep inode for a bit */
ihold(d_inode(path.dentry));
path_put(&path);
+ /* ... and now kill it properly */
+ sock_release(sock);
return file;
}
@@ -1330,19 +1334,9 @@ SYSCALL_DEFINE3(socket, int, family, int, type, int, protocol)
retval = sock_create(family, type, protocol, &sock);
if (retval < 0)
- goto out;
-
- retval = sock_map_fd(sock, flags & (O_CLOEXEC | O_NONBLOCK));
- if (retval < 0)
- goto out_release;
-
-out:
- /* It may be already another descriptor 8) Not kernel problem. */
- return retval;
+ return retval;
-out_release:
- sock_release(sock);
- return retval;
+ return sock_map_fd(sock, flags & (O_CLOEXEC | O_NONBLOCK));
}
/*
@@ -1366,87 +1360,72 @@ SYSCALL_DEFINE4(socketpair, int, family, int, type, int, protocol,
flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
/*
+ * reserve descriptors and make sure we won't fail
+ * to return them to userland.
+ */
+ fd1 = get_unused_fd_flags(flags);
+ if (unlikely(fd1 < 0))
+ return fd1;
+
+ fd2 = get_unused_fd_flags(flags);
+ if (unlikely(fd2 < 0)) {
+ put_unused_fd(fd1);
+ return fd2;
+ }
+
+ err = put_user(fd1, &usockvec[0]);
+ if (err)
+ goto out;
+
+ err = put_user(fd2, &usockvec[1]);
+ if (err)
+ goto out;
+
+ /*
* Obtain the first socket and check if the underlying protocol
* supports the socketpair call.
*/
err = sock_create(family, type, protocol, &sock1);
- if (err < 0)
+ if (unlikely(err < 0))
goto out;
err = sock_create(family, type, protocol, &sock2);
- if (err < 0)
- goto out_release_1;
-
- err = sock1->ops->socketpair(sock1, sock2);
- if (err < 0)
- goto out_release_both;
-
- fd1 = get_unused_fd_flags(flags);
- if (unlikely(fd1 < 0)) {
- err = fd1;
- goto out_release_both;
+ if (unlikely(err < 0)) {
+ sock_release(sock1);
+ goto out;
}
- fd2 = get_unused_fd_flags(flags);
- if (unlikely(fd2 < 0)) {
- err = fd2;
- goto out_put_unused_1;
+ err = sock1->ops->socketpair(sock1, sock2);
+ if (unlikely(err < 0)) {
+ sock_release(sock2);
+ sock_release(sock1);
+ goto out;
}
newfile1 = sock_alloc_file(sock1, flags, NULL);
if (IS_ERR(newfile1)) {
err = PTR_ERR(newfile1);
- goto out_put_unused_both;
+ sock_release(sock2);
+ goto out;
}
newfile2 = sock_alloc_file(sock2, flags, NULL);
if (IS_ERR(newfile2)) {
err = PTR_ERR(newfile2);
- goto out_fput_1;
+ fput(newfile1);
+ goto out;
}
- err = put_user(fd1, &usockvec[0]);
- if (err)
- goto out_fput_both;
-
- err = put_user(fd2, &usockvec[1]);
- if (err)
- goto out_fput_both;
-
audit_fd_pair(fd1, fd2);
fd_install(fd1, newfile1);
fd_install(fd2, newfile2);
- /* fd1 and fd2 may be already another descriptors.
- * Not kernel problem.
- */
-
return 0;
-out_fput_both:
- fput(newfile2);
- fput(newfile1);
- put_unused_fd(fd2);
- put_unused_fd(fd1);
- goto out;
-
-out_fput_1:
- fput(newfile1);
- put_unused_fd(fd2);
- put_unused_fd(fd1);
- sock_release(sock2);
- goto out;
-
-out_put_unused_both:
+out:
put_unused_fd(fd2);
-out_put_unused_1:
put_unused_fd(fd1);
-out_release_both:
- sock_release(sock2);
-out_release_1:
- sock_release(sock1);
-out:
return err;
}
@@ -1562,7 +1541,6 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
if (IS_ERR(newfile)) {
err = PTR_ERR(newfile);
put_unused_fd(newfd);
- sock_release(newsock);
goto out_put;
}
diff --git a/net/tipc/server.c b/net/tipc/server.c
index acaef80fb88c..d60c30342327 100644
--- a/net/tipc/server.c
+++ b/net/tipc/server.c
@@ -314,6 +314,7 @@ static int tipc_accept_from_sock(struct tipc_conn *con)
newcon->usr_data = s->tipc_conn_new(newcon->conid);
if (!newcon->usr_data) {
sock_release(newsock);
+ conn_put(newcon);
return -ENOMEM;
}
@@ -511,7 +512,7 @@ bool tipc_topsrv_kern_subscr(struct net *net, u32 port, u32 type,
s = con->server;
scbr = s->tipc_conn_new(*conid);
if (!scbr) {
- tipc_close_conn(con);
+ conn_put(con);
return false;
}
diff --git a/net/vmw_vsock/hyperv_transport.c b/net/vmw_vsock/hyperv_transport.c
index 5583df708b8c..a827547aa102 100644
--- a/net/vmw_vsock/hyperv_transport.c
+++ b/net/vmw_vsock/hyperv_transport.c
@@ -487,7 +487,7 @@ static void hvs_release(struct vsock_sock *vsk)
lock_sock(sk);
- sk->sk_state = SS_DISCONNECTING;
+ sk->sk_state = TCP_CLOSING;
vsock_remove_sock(vsk);
release_sock(sk);