diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-03-19 18:05:34 +0100 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-03-19 18:05:34 +0100 |
commit | 1200b6809dfd9d73bc4c7db76d288c35fa4b2ebe (patch) | |
tree | 552e03de245cdbd0780ca1215914edc4a26540f7 /net/sctp | |
parent | Merge branch 'for-4.6' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/cg... (diff) | |
parent | bonding: fix bond_get_stats() (diff) | |
download | linux-1200b6809dfd9d73bc4c7db76d288c35fa4b2ebe.tar.xz linux-1200b6809dfd9d73bc4c7db76d288c35fa4b2ebe.zip |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller:
"Highlights:
1) Support more Realtek wireless chips, from Jes Sorenson.
2) New BPF types for per-cpu hash and arrap maps, from Alexei
Starovoitov.
3) Make several TCP sysctls per-namespace, from Nikolay Borisov.
4) Allow the use of SO_REUSEPORT in order to do per-thread processing
of incoming TCP/UDP connections. The muxing can be done using a
BPF program which hashes the incoming packet. From Craig Gallek.
5) Add a multiplexer for TCP streams, to provide a messaged based
interface. BPF programs can be used to determine the message
boundaries. From Tom Herbert.
6) Add 802.1AE MACSEC support, from Sabrina Dubroca.
7) Avoid factorial complexity when taking down an inetdev interface
with lots of configured addresses. We were doing things like
traversing the entire address less for each address removed, and
flushing the entire netfilter conntrack table for every address as
well.
8) Add and use SKB bulk free infrastructure, from Jesper Brouer.
9) Allow offloading u32 classifiers to hardware, and implement for
ixgbe, from John Fastabend.
10) Allow configuring IRQ coalescing parameters on a per-queue basis,
from Kan Liang.
11) Extend ethtool so that larger link mode masks can be supported.
From David Decotigny.
12) Introduce devlink, which can be used to configure port link types
(ethernet vs Infiniband, etc.), port splitting, and switch device
level attributes as a whole. From Jiri Pirko.
13) Hardware offload support for flower classifiers, from Amir Vadai.
14) Add "Local Checksum Offload". Basically, for a tunneled packet
the checksum of the outer header is 'constant' (because with the
checksum field filled into the inner protocol header, the payload
of the outer frame checksums to 'zero'), and we can take advantage
of that in various ways. From Edward Cree"
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1548 commits)
bonding: fix bond_get_stats()
net: bcmgenet: fix dma api length mismatch
net/mlx4_core: Fix backward compatibility on VFs
phy: mdio-thunder: Fix some Kconfig typos
lan78xx: add ndo_get_stats64
lan78xx: handle statistics counter rollover
RDS: TCP: Remove unused constant
RDS: TCP: Add sysctl tunables for sndbuf/rcvbuf on rds-tcp socket
net: smc911x: convert pxa dma to dmaengine
team: remove duplicate set of flag IFF_MULTICAST
bonding: remove duplicate set of flag IFF_MULTICAST
net: fix a comment typo
ethernet: micrel: fix some error codes
ip_tunnels, bpf: define IP_TUNNEL_OPTS_MAX and use it
bpf, dst: add and use dst_tclassid helper
bpf: make skb->tc_classid also readable
net: mvneta: bm: clarify dependencies
cls_bpf: reset class and reuse major in da
ldmvsw: Checkpatch sunvnet.c and sunvnet_common.c
ldmvsw: Add ldmvsw.c driver code
...
Diffstat (limited to 'net/sctp')
-rw-r--r-- | net/sctp/associola.c | 4 | ||||
-rw-r--r-- | net/sctp/chunk.c | 19 | ||||
-rw-r--r-- | net/sctp/input.c | 6 | ||||
-rw-r--r-- | net/sctp/output.c | 6 | ||||
-rw-r--r-- | net/sctp/outqueue.c | 30 | ||||
-rw-r--r-- | net/sctp/probe.c | 10 | ||||
-rw-r--r-- | net/sctp/proc.c | 2 | ||||
-rw-r--r-- | net/sctp/sm_make_chunk.c | 80 | ||||
-rw-r--r-- | net/sctp/sm_sideeffect.c | 23 | ||||
-rw-r--r-- | net/sctp/socket.c | 9 | ||||
-rw-r--r-- | net/sctp/transport.c | 2 |
11 files changed, 96 insertions, 95 deletions
diff --git a/net/sctp/associola.c b/net/sctp/associola.c index 2bf8ec92dde4..a19b3e607703 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c @@ -1263,7 +1263,7 @@ static struct sctp_transport *sctp_trans_elect_best(struct sctp_transport *curr, if (score_curr > score_best) return curr; else if (score_curr == score_best) - return sctp_trans_elect_tie(curr, best); + return sctp_trans_elect_tie(best, curr); else return best; } @@ -1493,7 +1493,7 @@ void sctp_assoc_rwnd_increase(struct sctp_association *asoc, unsigned int len) asoc->peer.sack_needed = 0; - sctp_outq_tail(&asoc->outqueue, sack); + sctp_outq_tail(&asoc->outqueue, sack, GFP_ATOMIC); /* Stop the SACK timer. */ timer = &asoc->timers[SCTP_EVENT_TIMEOUT_SACK]; diff --git a/net/sctp/chunk.c b/net/sctp/chunk.c index a3380917f197..958ef5f33f4b 100644 --- a/net/sctp/chunk.c +++ b/net/sctp/chunk.c @@ -70,19 +70,6 @@ static struct sctp_datamsg *sctp_datamsg_new(gfp_t gfp) return msg; } -void sctp_datamsg_free(struct sctp_datamsg *msg) -{ - struct sctp_chunk *chunk; - - /* This doesn't have to be a _safe vairant because - * sctp_chunk_free() only drops the refs. - */ - list_for_each_entry(chunk, &msg->chunks, frag_list) - sctp_chunk_free(chunk); - - sctp_datamsg_put(msg); -} - /* Final destructruction of datamsg memory. */ static void sctp_datamsg_destroy(struct sctp_datamsg *msg) { @@ -273,7 +260,8 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc, frag |= SCTP_DATA_SACK_IMM; } - chunk = sctp_make_datafrag_empty(asoc, sinfo, len, frag, 0); + chunk = sctp_make_datafrag_empty(asoc, sinfo, len, frag, + 0, GFP_KERNEL); if (!chunk) { err = -ENOMEM; @@ -309,7 +297,8 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc, (sinfo->sinfo_flags & SCTP_SACK_IMMEDIATELY)) frag |= SCTP_DATA_SACK_IMM; - chunk = sctp_make_datafrag_empty(asoc, sinfo, over, frag, 0); + chunk = sctp_make_datafrag_empty(asoc, sinfo, over, frag, + 0, GFP_KERNEL); if (!chunk) { err = -ENOMEM; diff --git a/net/sctp/input.c b/net/sctp/input.c index 49d2cc751386..db76f1ab4ac2 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c @@ -221,7 +221,7 @@ int sctp_rcv(struct sk_buff *skb) goto discard_release; /* Create an SCTP packet structure. */ - chunk = sctp_chunkify(skb, asoc, sk); + chunk = sctp_chunkify(skb, asoc, sk, GFP_ATOMIC); if (!chunk) goto discard_release; SCTP_INPUT_CB(skb)->chunk = chunk; @@ -937,7 +937,6 @@ static struct sctp_association *__sctp_lookup_association( struct sctp_transport *t; struct sctp_association *asoc = NULL; - rcu_read_lock(); t = sctp_addrs_lookup_transport(net, local, peer); if (!t || !sctp_transport_hold(t)) goto out; @@ -949,7 +948,6 @@ static struct sctp_association *__sctp_lookup_association( sctp_transport_put(t); out: - rcu_read_unlock(); return asoc; } @@ -962,7 +960,9 @@ struct sctp_association *sctp_lookup_association(struct net *net, { struct sctp_association *asoc; + rcu_read_lock(); asoc = __sctp_lookup_association(net, laddr, paddr, transportp); + rcu_read_unlock(); return asoc; } diff --git a/net/sctp/output.c b/net/sctp/output.c index 9d610eddd19e..736c004abfbc 100644 --- a/net/sctp/output.c +++ b/net/sctp/output.c @@ -153,7 +153,7 @@ void sctp_packet_free(struct sctp_packet *packet) */ sctp_xmit_t sctp_packet_transmit_chunk(struct sctp_packet *packet, struct sctp_chunk *chunk, - int one_packet) + int one_packet, gfp_t gfp) { sctp_xmit_t retval; int error = 0; @@ -163,7 +163,7 @@ sctp_xmit_t sctp_packet_transmit_chunk(struct sctp_packet *packet, switch ((retval = (sctp_packet_append_chunk(packet, chunk)))) { case SCTP_XMIT_PMTU_FULL: if (!packet->has_cookie_echo) { - error = sctp_packet_transmit(packet); + error = sctp_packet_transmit(packet, gfp); if (error < 0) chunk->skb->sk->sk_err = -error; @@ -376,7 +376,7 @@ static void sctp_packet_set_owner_w(struct sk_buff *skb, struct sock *sk) * * The return value is a normal kernel error return value. */ -int sctp_packet_transmit(struct sctp_packet *packet) +int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp) { struct sctp_transport *tp = packet->transport; struct sctp_association *asoc = tp->asoc; diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c index c0380cfb16ae..f03541d0f12d 100644 --- a/net/sctp/outqueue.c +++ b/net/sctp/outqueue.c @@ -68,7 +68,7 @@ static void sctp_mark_missing(struct sctp_outq *q, static void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 sack_ctsn); -static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout); +static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp); /* Add data to the front of the queue. */ static inline void sctp_outq_head_data(struct sctp_outq *q, @@ -285,7 +285,7 @@ void sctp_outq_free(struct sctp_outq *q) } /* Put a new chunk in an sctp_outq. */ -int sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk) +int sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk, gfp_t gfp) { struct net *net = sock_net(q->asoc->base.sk); int error = 0; @@ -341,7 +341,7 @@ int sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk) return error; if (!q->cork) - error = sctp_outq_flush(q, 0); + error = sctp_outq_flush(q, 0, gfp); return error; } @@ -510,7 +510,7 @@ void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport, * will be flushed at the end. */ if (reason != SCTP_RTXR_FAST_RTX) - error = sctp_outq_flush(q, /* rtx_timeout */ 1); + error = sctp_outq_flush(q, /* rtx_timeout */ 1, GFP_ATOMIC); if (error) q->asoc->base.sk->sk_err = -error; @@ -601,12 +601,12 @@ redo: * control chunks are already freed so there * is nothing we can do. */ - sctp_packet_transmit(pkt); + sctp_packet_transmit(pkt, GFP_ATOMIC); goto redo; } /* Send this packet. */ - error = sctp_packet_transmit(pkt); + error = sctp_packet_transmit(pkt, GFP_ATOMIC); /* If we are retransmitting, we should only * send a single packet. @@ -622,7 +622,7 @@ redo: case SCTP_XMIT_RWND_FULL: /* Send this packet. */ - error = sctp_packet_transmit(pkt); + error = sctp_packet_transmit(pkt, GFP_ATOMIC); /* Stop sending DATA as there is no more room * at the receiver. @@ -632,7 +632,7 @@ redo: case SCTP_XMIT_DELAY: /* Send this packet. */ - error = sctp_packet_transmit(pkt); + error = sctp_packet_transmit(pkt, GFP_ATOMIC); /* Stop sending DATA because of nagle delay. */ done = 1; @@ -685,12 +685,12 @@ redo: } /* Cork the outqueue so queued chunks are really queued. */ -int sctp_outq_uncork(struct sctp_outq *q) +int sctp_outq_uncork(struct sctp_outq *q, gfp_t gfp) { if (q->cork) q->cork = 0; - return sctp_outq_flush(q, 0); + return sctp_outq_flush(q, 0, gfp); } @@ -703,7 +703,7 @@ int sctp_outq_uncork(struct sctp_outq *q) * locking concerns must be made. Today we use the sock lock to protect * this function. */ -static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout) +static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp) { struct sctp_packet *packet; struct sctp_packet singleton; @@ -825,7 +825,7 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout) sctp_packet_init(&singleton, transport, sport, dport); sctp_packet_config(&singleton, vtag, 0); sctp_packet_append_chunk(&singleton, chunk); - error = sctp_packet_transmit(&singleton); + error = sctp_packet_transmit(&singleton, gfp); if (error < 0) return error; break; @@ -856,7 +856,7 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout) case SCTP_CID_ASCONF: case SCTP_CID_FWD_TSN: status = sctp_packet_transmit_chunk(packet, chunk, - one_packet); + one_packet, gfp); if (status != SCTP_XMIT_OK) { /* put the chunk back */ list_add(&chunk->list, &q->control_chunk_list); @@ -1011,7 +1011,7 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout) atomic_read(&chunk->skb->users) : -1); /* Add the chunk to the packet. */ - status = sctp_packet_transmit_chunk(packet, chunk, 0); + status = sctp_packet_transmit_chunk(packet, chunk, 0, gfp); switch (status) { case SCTP_XMIT_PMTU_FULL: @@ -1088,7 +1088,7 @@ sctp_flush_out: send_ready); packet = &t->packet; if (!sctp_packet_empty(packet)) - error = sctp_packet_transmit(packet); + error = sctp_packet_transmit(packet, gfp); /* Clear the burst limited state, if any */ sctp_transport_burst_reset(t); diff --git a/net/sctp/probe.c b/net/sctp/probe.c index 5e68b94ee640..6cc2152e0740 100644 --- a/net/sctp/probe.c +++ b/net/sctp/probe.c @@ -65,7 +65,7 @@ static struct { struct kfifo fifo; spinlock_t lock; wait_queue_head_t wait; - struct timespec tstart; + struct timespec64 tstart; } sctpw; static __printf(1, 2) void printl(const char *fmt, ...) @@ -85,7 +85,7 @@ static __printf(1, 2) void printl(const char *fmt, ...) static int sctpprobe_open(struct inode *inode, struct file *file) { kfifo_reset(&sctpw.fifo); - getnstimeofday(&sctpw.tstart); + ktime_get_ts64(&sctpw.tstart); return 0; } @@ -138,7 +138,7 @@ static sctp_disposition_t jsctp_sf_eat_sack(struct net *net, struct sk_buff *skb = chunk->skb; struct sctp_transport *sp; static __u32 lcwnd = 0; - struct timespec now; + struct timespec64 now; sp = asoc->peer.primary_path; @@ -149,8 +149,8 @@ static sctp_disposition_t jsctp_sf_eat_sack(struct net *net, (full || sp->cwnd != lcwnd)) { lcwnd = sp->cwnd; - getnstimeofday(&now); - now = timespec_sub(now, sctpw.tstart); + ktime_get_ts64(&now); + now = timespec64_sub(now, sctpw.tstart); printl("%lu.%06lu ", (unsigned long) now.tv_sec, (unsigned long) now.tv_nsec / NSEC_PER_USEC); diff --git a/net/sctp/proc.c b/net/sctp/proc.c index 963dffcc2618..5cfac8d5d3b3 100644 --- a/net/sctp/proc.c +++ b/net/sctp/proc.c @@ -161,7 +161,6 @@ static void sctp_seq_dump_remote_addrs(struct seq_file *seq, struct sctp_associa struct sctp_af *af; primary = &assoc->peer.primary_addr; - rcu_read_lock(); list_for_each_entry_rcu(transport, &assoc->peer.transport_addr_list, transports) { addr = &transport->ipaddr; @@ -172,7 +171,6 @@ static void sctp_seq_dump_remote_addrs(struct seq_file *seq, struct sctp_associa } af->seq_dump_addr(seq, addr); } - rcu_read_unlock(); } static void *sctp_eps_seq_start(struct seq_file *seq, loff_t *pos) diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index 1296e555fe29..e47abf254ff3 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -62,11 +62,13 @@ #include <net/sctp/sm.h> static struct sctp_chunk *sctp_make_control(const struct sctp_association *asoc, - __u8 type, __u8 flags, int paylen); + __u8 type, __u8 flags, int paylen, + gfp_t gfp); static struct sctp_chunk *sctp_make_data(const struct sctp_association *asoc, - __u8 flags, int paylen); + __u8 flags, int paylen, gfp_t gfp); static struct sctp_chunk *_sctp_make_chunk(const struct sctp_association *asoc, - __u8 type, __u8 flags, int paylen); + __u8 type, __u8 flags, int paylen, + gfp_t gfp); static sctp_cookie_param_t *sctp_pack_cookie(const struct sctp_endpoint *ep, const struct sctp_association *asoc, const struct sctp_chunk *init_chunk, @@ -318,7 +320,7 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc, * PLEASE DO NOT FIXME [This version does not support Host Name.] */ - retval = sctp_make_control(asoc, SCTP_CID_INIT, 0, chunksize); + retval = sctp_make_control(asoc, SCTP_CID_INIT, 0, chunksize, gfp); if (!retval) goto nodata; @@ -465,7 +467,7 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc, num_ext); /* Now allocate and fill out the chunk. */ - retval = sctp_make_control(asoc, SCTP_CID_INIT_ACK, 0, chunksize); + retval = sctp_make_control(asoc, SCTP_CID_INIT_ACK, 0, chunksize, gfp); if (!retval) goto nomem_chunk; @@ -570,7 +572,8 @@ struct sctp_chunk *sctp_make_cookie_echo(const struct sctp_association *asoc, cookie_len = asoc->peer.cookie_len; /* Build a cookie echo chunk. */ - retval = sctp_make_control(asoc, SCTP_CID_COOKIE_ECHO, 0, cookie_len); + retval = sctp_make_control(asoc, SCTP_CID_COOKIE_ECHO, 0, + cookie_len, GFP_ATOMIC); if (!retval) goto nodata; retval->subh.cookie_hdr = @@ -615,7 +618,7 @@ struct sctp_chunk *sctp_make_cookie_ack(const struct sctp_association *asoc, { struct sctp_chunk *retval; - retval = sctp_make_control(asoc, SCTP_CID_COOKIE_ACK, 0, 0); + retval = sctp_make_control(asoc, SCTP_CID_COOKIE_ACK, 0, 0, GFP_ATOMIC); /* RFC 2960 6.4 Multi-homed SCTP Endpoints * @@ -664,7 +667,7 @@ struct sctp_chunk *sctp_make_cwr(const struct sctp_association *asoc, cwr.lowest_tsn = htonl(lowest_tsn); retval = sctp_make_control(asoc, SCTP_CID_ECN_CWR, 0, - sizeof(sctp_cwrhdr_t)); + sizeof(sctp_cwrhdr_t), GFP_ATOMIC); if (!retval) goto nodata; @@ -698,7 +701,7 @@ struct sctp_chunk *sctp_make_ecne(const struct sctp_association *asoc, ecne.lowest_tsn = htonl(lowest_tsn); retval = sctp_make_control(asoc, SCTP_CID_ECN_ECNE, 0, - sizeof(sctp_ecnehdr_t)); + sizeof(sctp_ecnehdr_t), GFP_ATOMIC); if (!retval) goto nodata; retval->subh.ecne_hdr = @@ -713,7 +716,8 @@ nodata: */ struct sctp_chunk *sctp_make_datafrag_empty(struct sctp_association *asoc, const struct sctp_sndrcvinfo *sinfo, - int data_len, __u8 flags, __u16 ssn) + int data_len, __u8 flags, __u16 ssn, + gfp_t gfp) { struct sctp_chunk *retval; struct sctp_datahdr dp; @@ -734,7 +738,7 @@ struct sctp_chunk *sctp_make_datafrag_empty(struct sctp_association *asoc, dp.ssn = htons(ssn); chunk_len = sizeof(dp) + data_len; - retval = sctp_make_data(asoc, flags, chunk_len); + retval = sctp_make_data(asoc, flags, chunk_len, gfp); if (!retval) goto nodata; @@ -781,7 +785,7 @@ struct sctp_chunk *sctp_make_sack(const struct sctp_association *asoc) + sizeof(__u32) * num_dup_tsns; /* Create the chunk. */ - retval = sctp_make_control(asoc, SCTP_CID_SACK, 0, len); + retval = sctp_make_control(asoc, SCTP_CID_SACK, 0, len, GFP_ATOMIC); if (!retval) goto nodata; @@ -861,7 +865,7 @@ struct sctp_chunk *sctp_make_shutdown(const struct sctp_association *asoc, shut.cum_tsn_ack = htonl(ctsn); retval = sctp_make_control(asoc, SCTP_CID_SHUTDOWN, 0, - sizeof(sctp_shutdownhdr_t)); + sizeof(sctp_shutdownhdr_t), GFP_ATOMIC); if (!retval) goto nodata; @@ -879,7 +883,8 @@ struct sctp_chunk *sctp_make_shutdown_ack(const struct sctp_association *asoc, { struct sctp_chunk *retval; - retval = sctp_make_control(asoc, SCTP_CID_SHUTDOWN_ACK, 0, 0); + retval = sctp_make_control(asoc, SCTP_CID_SHUTDOWN_ACK, 0, 0, + GFP_ATOMIC); /* RFC 2960 6.4 Multi-homed SCTP Endpoints * @@ -908,7 +913,8 @@ struct sctp_chunk *sctp_make_shutdown_complete( */ flags |= asoc ? 0 : SCTP_CHUNK_FLAG_T; - retval = sctp_make_control(asoc, SCTP_CID_SHUTDOWN_COMPLETE, flags, 0); + retval = sctp_make_control(asoc, SCTP_CID_SHUTDOWN_COMPLETE, flags, + 0, GFP_ATOMIC); /* RFC 2960 6.4 Multi-homed SCTP Endpoints * @@ -947,7 +953,8 @@ struct sctp_chunk *sctp_make_abort(const struct sctp_association *asoc, flags = SCTP_CHUNK_FLAG_T; } - retval = sctp_make_control(asoc, SCTP_CID_ABORT, flags, hint); + retval = sctp_make_control(asoc, SCTP_CID_ABORT, flags, hint, + GFP_ATOMIC); /* RFC 2960 6.4 Multi-homed SCTP Endpoints * @@ -1139,7 +1146,8 @@ struct sctp_chunk *sctp_make_heartbeat(const struct sctp_association *asoc, struct sctp_chunk *retval; sctp_sender_hb_info_t hbinfo; - retval = sctp_make_control(asoc, SCTP_CID_HEARTBEAT, 0, sizeof(hbinfo)); + retval = sctp_make_control(asoc, SCTP_CID_HEARTBEAT, 0, + sizeof(hbinfo), GFP_ATOMIC); if (!retval) goto nodata; @@ -1167,7 +1175,8 @@ struct sctp_chunk *sctp_make_heartbeat_ack(const struct sctp_association *asoc, { struct sctp_chunk *retval; - retval = sctp_make_control(asoc, SCTP_CID_HEARTBEAT_ACK, 0, paylen); + retval = sctp_make_control(asoc, SCTP_CID_HEARTBEAT_ACK, 0, paylen, + GFP_ATOMIC); if (!retval) goto nodata; @@ -1200,7 +1209,7 @@ static struct sctp_chunk *sctp_make_op_error_space( struct sctp_chunk *retval; retval = sctp_make_control(asoc, SCTP_CID_ERROR, 0, - sizeof(sctp_errhdr_t) + size); + sizeof(sctp_errhdr_t) + size, GFP_ATOMIC); if (!retval) goto nodata; @@ -1271,7 +1280,8 @@ struct sctp_chunk *sctp_make_auth(const struct sctp_association *asoc) return NULL; retval = sctp_make_control(asoc, SCTP_CID_AUTH, 0, - hmac_desc->hmac_len + sizeof(sctp_authhdr_t)); + hmac_desc->hmac_len + sizeof(sctp_authhdr_t), + GFP_ATOMIC); if (!retval) return NULL; @@ -1309,11 +1319,11 @@ struct sctp_chunk *sctp_make_auth(const struct sctp_association *asoc) */ struct sctp_chunk *sctp_chunkify(struct sk_buff *skb, const struct sctp_association *asoc, - struct sock *sk) + struct sock *sk, gfp_t gfp) { struct sctp_chunk *retval; - retval = kmem_cache_zalloc(sctp_chunk_cachep, GFP_ATOMIC); + retval = kmem_cache_zalloc(sctp_chunk_cachep, gfp); if (!retval) goto nodata; @@ -1361,7 +1371,8 @@ const union sctp_addr *sctp_source(const struct sctp_chunk *chunk) * arguments, reserving enough space for a 'paylen' byte payload. */ static struct sctp_chunk *_sctp_make_chunk(const struct sctp_association *asoc, - __u8 type, __u8 flags, int paylen) + __u8 type, __u8 flags, int paylen, + gfp_t gfp) { struct sctp_chunk *retval; sctp_chunkhdr_t *chunk_hdr; @@ -1369,8 +1380,7 @@ static struct sctp_chunk *_sctp_make_chunk(const struct sctp_association *asoc, struct sock *sk; /* No need to allocate LL here, as this is only a chunk. */ - skb = alloc_skb(WORD_ROUND(sizeof(sctp_chunkhdr_t) + paylen), - GFP_ATOMIC); + skb = alloc_skb(WORD_ROUND(sizeof(sctp_chunkhdr_t) + paylen), gfp); if (!skb) goto nodata; @@ -1381,7 +1391,7 @@ static struct sctp_chunk *_sctp_make_chunk(const struct sctp_association *asoc, chunk_hdr->length = htons(sizeof(sctp_chunkhdr_t)); sk = asoc ? asoc->base.sk : NULL; - retval = sctp_chunkify(skb, asoc, sk); + retval = sctp_chunkify(skb, asoc, sk, gfp); if (!retval) { kfree_skb(skb); goto nodata; @@ -1400,16 +1410,18 @@ nodata: } static struct sctp_chunk *sctp_make_data(const struct sctp_association *asoc, - __u8 flags, int paylen) + __u8 flags, int paylen, gfp_t gfp) { - return _sctp_make_chunk(asoc, SCTP_CID_DATA, flags, paylen); + return _sctp_make_chunk(asoc, SCTP_CID_DATA, flags, paylen, gfp); } static struct sctp_chunk *sctp_make_control(const struct sctp_association *asoc, - __u8 type, __u8 flags, int paylen) + __u8 type, __u8 flags, int paylen, + gfp_t gfp) { - struct sctp_chunk *chunk = _sctp_make_chunk(asoc, type, flags, paylen); + struct sctp_chunk *chunk; + chunk = _sctp_make_chunk(asoc, type, flags, paylen, gfp); if (chunk) sctp_control_set_owner_w(chunk); @@ -2763,7 +2775,8 @@ static struct sctp_chunk *sctp_make_asconf(struct sctp_association *asoc, length += addrlen; /* Create the chunk. */ - retval = sctp_make_control(asoc, SCTP_CID_ASCONF, 0, length); + retval = sctp_make_control(asoc, SCTP_CID_ASCONF, 0, length, + GFP_ATOMIC); if (!retval) return NULL; @@ -2947,7 +2960,8 @@ static struct sctp_chunk *sctp_make_asconf_ack(const struct sctp_association *as int length = sizeof(asconf) + vparam_len; /* Create the chunk. */ - retval = sctp_make_control(asoc, SCTP_CID_ASCONF_ACK, 0, length); + retval = sctp_make_control(asoc, SCTP_CID_ASCONF_ACK, 0, length, + GFP_ATOMIC); if (!retval) return NULL; @@ -3507,7 +3521,7 @@ struct sctp_chunk *sctp_make_fwdtsn(const struct sctp_association *asoc, hint = (nstreams + 1) * sizeof(__u32); - retval = sctp_make_control(asoc, SCTP_CID_FWD_TSN, 0, hint); + retval = sctp_make_control(asoc, SCTP_CID_FWD_TSN, 0, hint, GFP_ATOMIC); if (!retval) return NULL; diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index b5327bb77458..3c22c41a2bc2 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c @@ -1019,13 +1019,13 @@ static void sctp_cmd_t1_timer_update(struct sctp_association *asoc, * encouraged for small fragments. */ static int sctp_cmd_send_msg(struct sctp_association *asoc, - struct sctp_datamsg *msg) + struct sctp_datamsg *msg, gfp_t gfp) { struct sctp_chunk *chunk; int error = 0; list_for_each_entry(chunk, &msg->chunks, frag_list) { - error = sctp_outq_tail(&asoc->outqueue, chunk); + error = sctp_outq_tail(&asoc->outqueue, chunk, gfp); if (error) break; } @@ -1249,7 +1249,7 @@ static int sctp_cmd_interpreter(sctp_event_t event_type, case SCTP_CMD_NEW_ASOC: /* Register a new association. */ if (local_cork) { - sctp_outq_uncork(&asoc->outqueue); + sctp_outq_uncork(&asoc->outqueue, gfp); local_cork = 0; } @@ -1269,7 +1269,7 @@ static int sctp_cmd_interpreter(sctp_event_t event_type, case SCTP_CMD_DELETE_TCB: if (local_cork) { - sctp_outq_uncork(&asoc->outqueue); + sctp_outq_uncork(&asoc->outqueue, gfp); local_cork = 0; } /* Delete the current association. */ @@ -1423,13 +1423,14 @@ static int sctp_cmd_interpreter(sctp_event_t event_type, local_cork = 1; } /* Send a chunk to our peer. */ - error = sctp_outq_tail(&asoc->outqueue, cmd->obj.chunk); + error = sctp_outq_tail(&asoc->outqueue, cmd->obj.chunk, + gfp); break; case SCTP_CMD_SEND_PKT: /* Send a full packet to our peer. */ packet = cmd->obj.packet; - sctp_packet_transmit(packet); + sctp_packet_transmit(packet, gfp); sctp_ootb_pkt_free(packet); break; @@ -1639,7 +1640,7 @@ static int sctp_cmd_interpreter(sctp_event_t event_type, */ chunk->pdiscard = 1; if (asoc) { - sctp_outq_uncork(&asoc->outqueue); + sctp_outq_uncork(&asoc->outqueue, gfp); local_cork = 0; } break; @@ -1677,7 +1678,7 @@ static int sctp_cmd_interpreter(sctp_event_t event_type, case SCTP_CMD_FORCE_PRIM_RETRAN: t = asoc->peer.retran_path; asoc->peer.retran_path = asoc->peer.primary_path; - error = sctp_outq_uncork(&asoc->outqueue); + error = sctp_outq_uncork(&asoc->outqueue, gfp); local_cork = 0; asoc->peer.retran_path = t; break; @@ -1704,7 +1705,7 @@ static int sctp_cmd_interpreter(sctp_event_t event_type, sctp_outq_cork(&asoc->outqueue); local_cork = 1; } - error = sctp_cmd_send_msg(asoc, cmd->obj.msg); + error = sctp_cmd_send_msg(asoc, cmd->obj.msg, gfp); break; case SCTP_CMD_SEND_NEXT_ASCONF: sctp_cmd_send_asconf(asoc); @@ -1734,9 +1735,9 @@ out: */ if (asoc && SCTP_EVENT_T_CHUNK == event_type && chunk) { if (chunk->end_of_packet || chunk->singleton) - error = sctp_outq_uncork(&asoc->outqueue); + error = sctp_outq_uncork(&asoc->outqueue, gfp); } else if (local_cork) - error = sctp_outq_uncork(&asoc->outqueue); + error = sctp_outq_uncork(&asoc->outqueue, gfp); return error; nomem: error = -ENOMEM; diff --git a/net/sctp/socket.c b/net/sctp/socket.c index de8eabf03eed..96e08111106f 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -6106,9 +6106,10 @@ static int sctp_getsockopt(struct sock *sk, int level, int optname, return retval; } -static void sctp_hash(struct sock *sk) +static int sctp_hash(struct sock *sk) { /* STUB */ + return 0; } static void sctp_unhash(struct sock *sk) @@ -7253,14 +7254,12 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk, /* Hook this new socket in to the bind_hash list. */ head = &sctp_port_hashtable[sctp_phashfn(sock_net(oldsk), inet_sk(oldsk)->inet_num)]; - local_bh_disable(); - spin_lock(&head->lock); + spin_lock_bh(&head->lock); pp = sctp_sk(oldsk)->bind_hash; sk_add_bind_node(newsk, &pp->owner); sctp_sk(newsk)->bind_hash = pp; inet_sk(newsk)->inet_num = inet_sk(oldsk)->inet_num; - spin_unlock(&head->lock); - local_bh_enable(); + spin_unlock_bh(&head->lock); /* Copy the bind_addr list from the original endpoint to the new * endpoint so that we can handle restarts properly diff --git a/net/sctp/transport.c b/net/sctp/transport.c index a431c14044a4..d517153891a6 100644 --- a/net/sctp/transport.c +++ b/net/sctp/transport.c @@ -72,7 +72,7 @@ static struct sctp_transport *sctp_transport_init(struct net *net, */ peer->rto = msecs_to_jiffies(net->sctp.rto_initial); - peer->last_time_heard = ktime_get(); + peer->last_time_heard = ktime_set(0, 0); peer->last_time_ecne_reduced = jiffies; peer->param_flags = SPP_HB_DISABLE | |