diff options
Diffstat (limited to 'net/sctp')
-rw-r--r-- | net/sctp/associola.c | 85 | ||||
-rw-r--r-- | net/sctp/chunk.c | 12 | ||||
-rw-r--r-- | net/sctp/output.c | 28 | ||||
-rw-r--r-- | net/sctp/outqueue.c | 660 | ||||
-rw-r--r-- | net/sctp/sm_make_chunk.c | 143 | ||||
-rw-r--r-- | net/sctp/socket.c | 43 | ||||
-rw-r--r-- | net/sctp/transport.c | 39 |
7 files changed, 471 insertions, 539 deletions
diff --git a/net/sctp/associola.c b/net/sctp/associola.c index a47179da24e6..5d5a16204d50 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c @@ -652,33 +652,20 @@ struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc, */ peer->param_flags = asoc->param_flags; - sctp_transport_route(peer, NULL, sp); - /* Initialize the pmtu of the transport. */ - if (peer->param_flags & SPP_PMTUD_DISABLE) { - if (asoc->pathmtu) - peer->pathmtu = asoc->pathmtu; - else - peer->pathmtu = SCTP_DEFAULT_MAXSEGMENT; - } + sctp_transport_route(peer, NULL, sp); /* If this is the first transport addr on this association, * initialize the association PMTU to the peer's PMTU. * If not and the current association PMTU is higher than the new * peer's PMTU, reset the association PMTU to the new peer's PMTU. */ - if (asoc->pathmtu) - asoc->pathmtu = min_t(int, peer->pathmtu, asoc->pathmtu); - else - asoc->pathmtu = peer->pathmtu; - - pr_debug("%s: association:%p PMTU set to %d\n", __func__, asoc, - asoc->pathmtu); + sctp_assoc_set_pmtu(asoc, asoc->pathmtu ? + min_t(int, peer->pathmtu, asoc->pathmtu) : + peer->pathmtu); peer->pmtu_pending = 0; - asoc->frag_point = sctp_frag_point(asoc, asoc->pathmtu); - /* The asoc->peer.port might not be meaningful yet, but * initialize the packet structure anyway. */ @@ -988,31 +975,6 @@ out: return match; } -/* Is this the association we are looking for? */ -struct sctp_transport *sctp_assoc_is_match(struct sctp_association *asoc, - struct net *net, - const union sctp_addr *laddr, - const union sctp_addr *paddr) -{ - struct sctp_transport *transport; - - if ((htons(asoc->base.bind_addr.port) == laddr->v4.sin_port) && - (htons(asoc->peer.port) == paddr->v4.sin_port) && - net_eq(sock_net(asoc->base.sk), net)) { - transport = sctp_assoc_lookup_paddr(asoc, paddr); - if (!transport) - goto out; - - if (sctp_bind_addr_match(&asoc->base.bind_addr, laddr, - sctp_sk(asoc->base.sk))) - goto out; - } - transport = NULL; - -out: - return transport; -} - /* Do delayed input processing. This is scheduled by sctp_rcv(). */ static void sctp_assoc_bh_rcv(struct work_struct *work) { @@ -1434,6 +1396,31 @@ sctp_assoc_choose_alter_transport(struct sctp_association *asoc, } } +void sctp_assoc_update_frag_point(struct sctp_association *asoc) +{ + int frag = sctp_mtu_payload(sctp_sk(asoc->base.sk), asoc->pathmtu, + sctp_datachk_len(&asoc->stream)); + + if (asoc->user_frag) + frag = min_t(int, frag, asoc->user_frag); + + frag = min_t(int, frag, SCTP_MAX_CHUNK_LEN - + sctp_datachk_len(&asoc->stream)); + + asoc->frag_point = SCTP_TRUNC4(frag); +} + +void sctp_assoc_set_pmtu(struct sctp_association *asoc, __u32 pmtu) +{ + if (asoc->pathmtu != pmtu) { + asoc->pathmtu = pmtu; + sctp_assoc_update_frag_point(asoc); + } + + pr_debug("%s: asoc:%p, pmtu:%d, frag_point:%d\n", __func__, asoc, + asoc->pathmtu, asoc->frag_point); +} + /* Update the association's pmtu and frag_point by going through all the * transports. This routine is called when a transport's PMTU has changed. */ @@ -1446,24 +1433,16 @@ void sctp_assoc_sync_pmtu(struct sctp_association *asoc) return; /* Get the lowest pmtu of all the transports. */ - list_for_each_entry(t, &asoc->peer.transport_addr_list, - transports) { + list_for_each_entry(t, &asoc->peer.transport_addr_list, transports) { if (t->pmtu_pending && t->dst) { - sctp_transport_update_pmtu( - t, SCTP_TRUNC4(dst_mtu(t->dst))); + sctp_transport_update_pmtu(t, sctp_dst_mtu(t->dst)); t->pmtu_pending = 0; } if (!pmtu || (t->pathmtu < pmtu)) pmtu = t->pathmtu; } - if (pmtu) { - asoc->pathmtu = pmtu; - asoc->frag_point = sctp_frag_point(asoc, pmtu); - } - - pr_debug("%s: asoc:%p, pmtu:%d, frag_point:%d\n", __func__, asoc, - asoc->pathmtu, asoc->frag_point); + sctp_assoc_set_pmtu(asoc, pmtu); } /* Should we send a SACK to update our peer? */ diff --git a/net/sctp/chunk.c b/net/sctp/chunk.c index be296d633e95..79daa98208c3 100644 --- a/net/sctp/chunk.c +++ b/net/sctp/chunk.c @@ -172,8 +172,6 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc, struct list_head *pos, *temp; struct sctp_chunk *chunk; struct sctp_datamsg *msg; - struct sctp_sock *sp; - struct sctp_af *af; int err; msg = sctp_datamsg_new(GFP_KERNEL); @@ -192,12 +190,7 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc, /* This is the biggest possible DATA chunk that can fit into * the packet */ - sp = sctp_sk(asoc->base.sk); - af = sp->pf->af; - max_data = asoc->pathmtu - af->net_header_len - - sizeof(struct sctphdr) - sctp_datachk_len(&asoc->stream) - - af->ip_options_len(asoc->base.sk); - max_data = SCTP_TRUNC4(max_data); + max_data = asoc->frag_point; /* If the the peer requested that we authenticate DATA chunks * we need to account for bundling of the AUTH chunks along with @@ -222,9 +215,6 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc, } } - /* Check what's our max considering the above */ - max_data = min_t(size_t, max_data, asoc->frag_point); - /* Set first_len and then account for possible bundles on first frag */ first_len = max_data; diff --git a/net/sctp/output.c b/net/sctp/output.c index 690d8557bb7b..e672dee302c7 100644 --- a/net/sctp/output.c +++ b/net/sctp/output.c @@ -90,8 +90,8 @@ void sctp_packet_config(struct sctp_packet *packet, __u32 vtag, { struct sctp_transport *tp = packet->transport; struct sctp_association *asoc = tp->asoc; + struct sctp_sock *sp = NULL; struct sock *sk; - size_t overhead = sizeof(struct ipv6hdr) + sizeof(struct sctphdr); pr_debug("%s: packet:%p vtag:0x%x\n", __func__, packet, vtag); packet->vtag = vtag; @@ -102,28 +102,20 @@ void sctp_packet_config(struct sctp_packet *packet, __u32 vtag, /* set packet max_size with pathmtu, then calculate overhead */ packet->max_size = tp->pathmtu; + if (asoc) { - struct sctp_sock *sp = sctp_sk(asoc->base.sk); - struct sctp_af *af = sp->pf->af; - - overhead = af->net_header_len + - af->ip_options_len(asoc->base.sk); - overhead += sizeof(struct sctphdr); - packet->overhead = overhead; - packet->size = overhead; - } else { - packet->overhead = overhead; - packet->size = overhead; - return; + sk = asoc->base.sk; + sp = sctp_sk(sk); } + packet->overhead = sctp_mtu_payload(sp, 0, 0); + packet->size = packet->overhead; + + if (!asoc) + return; /* update dst or transport pathmtu if in need */ - sk = asoc->base.sk; if (!sctp_transport_dst_check(tp)) { - sctp_transport_route(tp, NULL, sctp_sk(sk)); - if (asoc->param_flags & SPP_PMTUD_ENABLE) - sctp_assoc_sync_pmtu(asoc); - } else if (!sctp_transport_pmtu_check(tp)) { + sctp_transport_route(tp, NULL, sp); if (asoc->param_flags & SPP_PMTUD_ENABLE) sctp_assoc_sync_pmtu(asoc); } diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c index f211b3db6a35..d68aa33485a9 100644 --- a/net/sctp/outqueue.c +++ b/net/sctp/outqueue.c @@ -601,14 +601,14 @@ void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport, /* * Transmit DATA chunks on the retransmit queue. Upon return from - * sctp_outq_flush_rtx() the packet 'pkt' may contain chunks which + * __sctp_outq_flush_rtx() the packet 'pkt' may contain chunks which * need to be transmitted by the caller. * We assume that pkt->transport has already been set. * * The return value is a normal kernel error return value. */ -static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt, - int rtx_timeout, int *start_timer) +static int __sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt, + int rtx_timeout, int *start_timer, gfp_t gfp) { struct sctp_transport *transport = pkt->transport; struct sctp_chunk *chunk, *chunk1; @@ -684,12 +684,12 @@ redo: * control chunks are already freed so there * is nothing we can do. */ - sctp_packet_transmit(pkt, GFP_ATOMIC); + sctp_packet_transmit(pkt, gfp); goto redo; } /* Send this packet. */ - error = sctp_packet_transmit(pkt, GFP_ATOMIC); + error = sctp_packet_transmit(pkt, gfp); /* If we are retransmitting, we should only * send a single packet. @@ -705,7 +705,7 @@ redo: case SCTP_XMIT_RWND_FULL: /* Send this packet. */ - error = sctp_packet_transmit(pkt, GFP_ATOMIC); + error = sctp_packet_transmit(pkt, gfp); /* Stop sending DATA as there is no more room * at the receiver. @@ -715,7 +715,7 @@ redo: case SCTP_XMIT_DELAY: /* Send this packet. */ - error = sctp_packet_transmit(pkt, GFP_ATOMIC); + error = sctp_packet_transmit(pkt, gfp); /* Stop sending DATA because of nagle delay. */ done = 1; @@ -776,68 +776,43 @@ void sctp_outq_uncork(struct sctp_outq *q, gfp_t gfp) sctp_outq_flush(q, 0, gfp); } - -/* - * Try to flush an outqueue. - * - * Description: Send everything in q which we legally can, subject to - * congestion limitations. - * * Note: This function can be called from multiple contexts so appropriate - * locking concerns must be made. Today we use the sock lock to protect - * this function. - */ -static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp) +static int sctp_packet_singleton(struct sctp_transport *transport, + struct sctp_chunk *chunk, gfp_t gfp) { - struct sctp_packet *packet; + const struct sctp_association *asoc = transport->asoc; + const __u16 sport = asoc->base.bind_addr.port; + const __u16 dport = asoc->peer.port; + const __u32 vtag = asoc->peer.i.init_tag; struct sctp_packet singleton; - struct sctp_association *asoc = q->asoc; - __u16 sport = asoc->base.bind_addr.port; - __u16 dport = asoc->peer.port; - __u32 vtag = asoc->peer.i.init_tag; - struct sctp_transport *transport = NULL; - struct sctp_transport *new_transport; - struct sctp_chunk *chunk, *tmp; - enum sctp_xmit status; - int error = 0; - int start_timer = 0; - int one_packet = 0; + sctp_packet_init(&singleton, transport, sport, dport); + sctp_packet_config(&singleton, vtag, 0); + sctp_packet_append_chunk(&singleton, chunk); + return sctp_packet_transmit(&singleton, gfp); +} + +/* Struct to hold the context during sctp outq flush */ +struct sctp_flush_ctx { + struct sctp_outq *q; + /* Current transport being used. It's NOT the same as curr active one */ + struct sctp_transport *transport; /* These transports have chunks to send. */ struct list_head transport_list; - struct list_head *ltransport; - - INIT_LIST_HEAD(&transport_list); - packet = NULL; - - /* - * 6.10 Bundling - * ... - * When bundling control chunks with DATA chunks, an - * endpoint MUST place control chunks first in the outbound - * SCTP packet. The transmitter MUST transmit DATA chunks - * within a SCTP packet in increasing order of TSN. - * ... - */ - - list_for_each_entry_safe(chunk, tmp, &q->control_chunk_list, list) { - /* RFC 5061, 5.3 - * F1) This means that until such time as the ASCONF - * containing the add is acknowledged, the sender MUST - * NOT use the new IP address as a source for ANY SCTP - * packet except on carrying an ASCONF Chunk. - */ - if (asoc->src_out_of_asoc_ok && - chunk->chunk_hdr->type != SCTP_CID_ASCONF) - continue; - - list_del_init(&chunk->list); + struct sctp_association *asoc; + /* Packet on the current transport above */ + struct sctp_packet *packet; + gfp_t gfp; +}; - /* Pick the right transport to use. */ - new_transport = chunk->transport; +/* transport: current transport */ +static void sctp_outq_select_transport(struct sctp_flush_ctx *ctx, + struct sctp_chunk *chunk) +{ + struct sctp_transport *new_transport = chunk->transport; - if (!new_transport) { - /* - * If we have a prior transport pointer, see if + if (!new_transport) { + if (!sctp_chunk_is_data(chunk)) { + /* If we have a prior transport pointer, see if * the destination address of the chunk * matches the destination address of the * current transport. If not a match, then @@ -846,22 +821,26 @@ static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp) * after processing ASCONFs, we may have new * transports created. */ - if (transport && - sctp_cmp_addr_exact(&chunk->dest, - &transport->ipaddr)) - new_transport = transport; + if (ctx->transport && sctp_cmp_addr_exact(&chunk->dest, + &ctx->transport->ipaddr)) + new_transport = ctx->transport; else - new_transport = sctp_assoc_lookup_paddr(asoc, - &chunk->dest); + new_transport = sctp_assoc_lookup_paddr(ctx->asoc, + &chunk->dest); + } - /* if we still don't have a new transport, then - * use the current active path. - */ - if (!new_transport) - new_transport = asoc->peer.active_path; - } else if ((new_transport->state == SCTP_INACTIVE) || - (new_transport->state == SCTP_UNCONFIRMED) || - (new_transport->state == SCTP_PF)) { + /* if we still don't have a new transport, then + * use the current active path. + */ + if (!new_transport) + new_transport = ctx->asoc->peer.active_path; + } else { + __u8 type; + + switch (new_transport->state) { + case SCTP_INACTIVE: + case SCTP_UNCONFIRMED: + case SCTP_PF: /* If the chunk is Heartbeat or Heartbeat Ack, * send it to chunk->transport, even if it's * inactive. @@ -875,29 +854,64 @@ static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp) * * ASCONF_ACKs also must be sent to the source. */ - if (chunk->chunk_hdr->type != SCTP_CID_HEARTBEAT && - chunk->chunk_hdr->type != SCTP_CID_HEARTBEAT_ACK && - chunk->chunk_hdr->type != SCTP_CID_ASCONF_ACK) - new_transport = asoc->peer.active_path; + type = chunk->chunk_hdr->type; + if (type != SCTP_CID_HEARTBEAT && + type != SCTP_CID_HEARTBEAT_ACK && + type != SCTP_CID_ASCONF_ACK) + new_transport = ctx->asoc->peer.active_path; + break; + default: + break; } + } + + /* Are we switching transports? Take care of transport locks. */ + if (new_transport != ctx->transport) { + ctx->transport = new_transport; + ctx->packet = &ctx->transport->packet; - /* Are we switching transports? - * Take care of transport locks. + if (list_empty(&ctx->transport->send_ready)) + list_add_tail(&ctx->transport->send_ready, + &ctx->transport_list); + + sctp_packet_config(ctx->packet, + ctx->asoc->peer.i.init_tag, + ctx->asoc->peer.ecn_capable); + /* We've switched transports, so apply the + * Burst limit to the new transport. */ - if (new_transport != transport) { - transport = new_transport; - if (list_empty(&transport->send_ready)) { - list_add_tail(&transport->send_ready, - &transport_list); - } - packet = &transport->packet; - sctp_packet_config(packet, vtag, - asoc->peer.ecn_capable); - } + sctp_transport_burst_limited(ctx->transport); + } +} + +static void sctp_outq_flush_ctrl(struct sctp_flush_ctx *ctx) +{ + struct sctp_chunk *chunk, *tmp; + enum sctp_xmit status; + int one_packet, error; + + list_for_each_entry_safe(chunk, tmp, &ctx->q->control_chunk_list, list) { + one_packet = 0; + + /* RFC 5061, 5.3 + * F1) This means that until such time as the ASCONF + * containing the add is acknowledged, the sender MUST + * NOT use the new IP address as a source for ANY SCTP + * packet except on carrying an ASCONF Chunk. + */ + if (ctx->asoc->src_out_of_asoc_ok && + chunk->chunk_hdr->type != SCTP_CID_ASCONF) + continue; + + list_del_init(&chunk->list); + + /* Pick the right transport to use. Should always be true for + * the first chunk as we don't have a transport by then. + */ + sctp_outq_select_transport(ctx, chunk); switch (chunk->chunk_hdr->type) { - /* - * 6.10 Bundling + /* 6.10 Bundling * ... * An endpoint MUST NOT bundle INIT, INIT ACK or SHUTDOWN * COMPLETE with any other chunks. [Send them immediately.] @@ -905,20 +919,19 @@ static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp) case SCTP_CID_INIT: case SCTP_CID_INIT_ACK: case SCTP_CID_SHUTDOWN_COMPLETE: - sctp_packet_init(&singleton, transport, sport, dport); - sctp_packet_config(&singleton, vtag, 0); - sctp_packet_append_chunk(&singleton, chunk); - error = sctp_packet_transmit(&singleton, gfp); + error = sctp_packet_singleton(ctx->transport, chunk, + ctx->gfp); if (error < 0) { - asoc->base.sk->sk_err = -error; + ctx->asoc->base.sk->sk_err = -error; return; } break; case SCTP_CID_ABORT: if (sctp_test_T_bit(chunk)) - packet->vtag = asoc->c.my_vtag; + ctx->packet->vtag = ctx->asoc->c.my_vtag; /* fallthru */ + /* The following chunks are "response" chunks, i.e. * they are generated in response to something we * received. If we are sending these, then we can @@ -942,27 +955,27 @@ static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp) case SCTP_CID_FWD_TSN: case SCTP_CID_I_FWD_TSN: case SCTP_CID_RECONF: - status = sctp_packet_transmit_chunk(packet, chunk, - one_packet, gfp); - if (status != SCTP_XMIT_OK) { + status = sctp_packet_transmit_chunk(ctx->packet, chunk, + one_packet, ctx->gfp); + if (status != SCTP_XMIT_OK) { /* put the chunk back */ - list_add(&chunk->list, &q->control_chunk_list); + list_add(&chunk->list, &ctx->q->control_chunk_list); break; } - asoc->stats.octrlchunks++; + ctx->asoc->stats.octrlchunks++; /* PR-SCTP C5) If a FORWARD TSN is sent, the * sender MUST assure that at least one T3-rtx * timer is running. */ if (chunk->chunk_hdr->type == SCTP_CID_FWD_TSN || chunk->chunk_hdr->type == SCTP_CID_I_FWD_TSN) { - sctp_transport_reset_t3_rtx(transport); - transport->last_time_sent = jiffies; + sctp_transport_reset_t3_rtx(ctx->transport); + ctx->transport->last_time_sent = jiffies; } - if (chunk == asoc->strreset_chunk) - sctp_transport_reset_reconf_timer(transport); + if (chunk == ctx->asoc->strreset_chunk) + sctp_transport_reset_reconf_timer(ctx->transport); break; @@ -971,232 +984,186 @@ static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp) BUG(); } } +} - if (q->asoc->src_out_of_asoc_ok) - goto sctp_flush_out; +/* Returns false if new data shouldn't be sent */ +static bool sctp_outq_flush_rtx(struct sctp_flush_ctx *ctx, + int rtx_timeout) +{ + int error, start_timer = 0; + + if (ctx->asoc->peer.retran_path->state == SCTP_UNCONFIRMED) + return false; + + if (ctx->transport != ctx->asoc->peer.retran_path) { + /* Switch transports & prepare the packet. */ + ctx->transport = ctx->asoc->peer.retran_path; + ctx->packet = &ctx->transport->packet; + + if (list_empty(&ctx->transport->send_ready)) + list_add_tail(&ctx->transport->send_ready, + &ctx->transport_list); + + sctp_packet_config(ctx->packet, ctx->asoc->peer.i.init_tag, + ctx->asoc->peer.ecn_capable); + } + + error = __sctp_outq_flush_rtx(ctx->q, ctx->packet, rtx_timeout, + &start_timer, ctx->gfp); + if (error < 0) + ctx->asoc->base.sk->sk_err = -error; + + if (start_timer) { + sctp_transport_reset_t3_rtx(ctx->transport); + ctx->transport->last_time_sent = jiffies; + } + + /* This can happen on COOKIE-ECHO resend. Only + * one chunk can get bundled with a COOKIE-ECHO. + */ + if (ctx->packet->has_cookie_echo) + return false; + + /* Don't send new data if there is still data + * waiting to retransmit. + */ + if (!list_empty(&ctx->q->retransmit)) + return false; + + return true; +} + +static void sctp_outq_flush_data(struct sctp_flush_ctx *ctx, + int rtx_timeout) +{ + struct sctp_chunk *chunk; + enum sctp_xmit status; /* Is it OK to send data chunks? */ - switch (asoc->state) { + switch (ctx->asoc->state) { case SCTP_STATE_COOKIE_ECHOED: /* Only allow bundling when this packet has a COOKIE-ECHO * chunk. */ - if (!packet || !packet->has_cookie_echo) - break; + if (!ctx->packet || !ctx->packet->has_cookie_echo) + return; /* fallthru */ case SCTP_STATE_ESTABLISHED: case SCTP_STATE_SHUTDOWN_PENDING: case SCTP_STATE_SHUTDOWN_RECEIVED: - /* - * RFC 2960 6.1 Transmission of DATA Chunks - * - * C) When the time comes for the sender to transmit, - * before sending new DATA chunks, the sender MUST - * first transmit any outstanding DATA chunks which - * are marked for retransmission (limited by the - * current cwnd). - */ - if (!list_empty(&q->retransmit)) { - if (asoc->peer.retran_path->state == SCTP_UNCONFIRMED) - goto sctp_flush_out; - if (transport == asoc->peer.retran_path) - goto retran; - - /* Switch transports & prepare the packet. */ - - transport = asoc->peer.retran_path; + break; - if (list_empty(&transport->send_ready)) { - list_add_tail(&transport->send_ready, - &transport_list); - } + default: + /* Do nothing. */ + return; + } - packet = &transport->packet; - sctp_packet_config(packet, vtag, - asoc->peer.ecn_capable); - retran: - error = sctp_outq_flush_rtx(q, packet, - rtx_timeout, &start_timer); - if (error < 0) - asoc->base.sk->sk_err = -error; + /* RFC 2960 6.1 Transmission of DATA Chunks + * + * C) When the time comes for the sender to transmit, + * before sending new DATA chunks, the sender MUST + * first transmit any outstanding DATA chunks which + * are marked for retransmission (limited by the + * current cwnd). + */ + if (!list_empty(&ctx->q->retransmit) && + !sctp_outq_flush_rtx(ctx, rtx_timeout)) + return; - if (start_timer) { - sctp_transport_reset_t3_rtx(transport); - transport->last_time_sent = jiffies; - } + /* Apply Max.Burst limitation to the current transport in + * case it will be used for new data. We are going to + * rest it before we return, but we want to apply the limit + * to the currently queued data. + */ + if (ctx->transport) + sctp_transport_burst_limited(ctx->transport); - /* This can happen on COOKIE-ECHO resend. Only - * one chunk can get bundled with a COOKIE-ECHO. - */ - if (packet->has_cookie_echo) - goto sctp_flush_out; + /* Finally, transmit new packets. */ + while ((chunk = sctp_outq_dequeue_data(ctx->q)) != NULL) { + __u32 sid = ntohs(chunk->subh.data_hdr->stream); - /* Don't send new data if there is still data - * waiting to retransmit. - */ - if (!list_empty(&q->retransmit)) - goto sctp_flush_out; + /* Has this chunk expired? */ + if (sctp_chunk_abandoned(chunk)) { + sctp_sched_dequeue_done(ctx->q, chunk); + sctp_chunk_fail(chunk, 0); + sctp_chunk_free(chunk); + continue; } - /* Apply Max.Burst limitation to the current transport in - * case it will be used for new data. We are going to - * rest it before we return, but we want to apply the limit - * to the currently queued data. - */ - if (transport) - sctp_transport_burst_limited(transport); - - /* Finally, transmit new packets. */ - while ((chunk = sctp_outq_dequeue_data(q)) != NULL) { - __u32 sid = ntohs(chunk->subh.data_hdr->stream); - - /* Has this chunk expired? */ - if (sctp_chunk_abandoned(chunk)) { - sctp_sched_dequeue_done(q, chunk); - sctp_chunk_fail(chunk, 0); - sctp_chunk_free(chunk); - continue; - } + if (ctx->asoc->stream.out[sid].state == SCTP_STREAM_CLOSED) { + sctp_outq_head_data(ctx->q, chunk); + break; + } - if (asoc->stream.out[sid].state == SCTP_STREAM_CLOSED) { - sctp_outq_head_data(q, chunk); - goto sctp_flush_out; - } + sctp_outq_select_transport(ctx, chunk); - /* If there is a specified transport, use it. - * Otherwise, we want to use the active path. + pr_debug("%s: outq:%p, chunk:%p[%s], tx-tsn:0x%x skb->head:%p skb->users:%d\n", + __func__, ctx->q, chunk, chunk && chunk->chunk_hdr ? + sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)) : + "illegal chunk", ntohl(chunk->subh.data_hdr->tsn), + chunk->skb ? chunk->skb->head : NULL, chunk->skb ? + refcount_read(&chunk->skb->users) : -1); + + /* Add the chunk to the packet. */ + status = sctp_packet_transmit_chunk(ctx->packet, chunk, 0, + ctx->gfp); + if (status != SCTP_XMIT_OK) { + /* We could not append this chunk, so put + * the chunk back on the output queue. */ - new_transport = chunk->transport; - if (!new_transport || - ((new_transport->state == SCTP_INACTIVE) || - (new_transport->state == SCTP_UNCONFIRMED) || - (new_transport->state == SCTP_PF))) - new_transport = asoc->peer.active_path; - if (new_transport->state == SCTP_UNCONFIRMED) { - WARN_ONCE(1, "Attempt to send packet on unconfirmed path."); - sctp_sched_dequeue_done(q, chunk); - sctp_chunk_fail(chunk, 0); - sctp_chunk_free(chunk); - continue; - } - - /* Change packets if necessary. */ - if (new_transport != transport) { - transport = new_transport; + pr_debug("%s: could not transmit tsn:0x%x, status:%d\n", + __func__, ntohl(chunk->subh.data_hdr->tsn), + status); - /* Schedule to have this transport's - * packet flushed. - */ - if (list_empty(&transport->send_ready)) { - list_add_tail(&transport->send_ready, - &transport_list); - } - - packet = &transport->packet; - sctp_packet_config(packet, vtag, - asoc->peer.ecn_capable); - /* We've switched transports, so apply the - * Burst limit to the new transport. - */ - sctp_transport_burst_limited(transport); - } - - pr_debug("%s: outq:%p, chunk:%p[%s], tx-tsn:0x%x skb->head:%p " - "skb->users:%d\n", - __func__, q, chunk, chunk && chunk->chunk_hdr ? - sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)) : - "illegal chunk", ntohl(chunk->subh.data_hdr->tsn), - chunk->skb ? chunk->skb->head : NULL, chunk->skb ? - refcount_read(&chunk->skb->users) : -1); - - /* Add the chunk to the packet. */ - status = sctp_packet_transmit_chunk(packet, chunk, 0, gfp); - - switch (status) { - case SCTP_XMIT_PMTU_FULL: - case SCTP_XMIT_RWND_FULL: - case SCTP_XMIT_DELAY: - /* We could not append this chunk, so put - * the chunk back on the output queue. - */ - pr_debug("%s: could not transmit tsn:0x%x, status:%d\n", - __func__, ntohl(chunk->subh.data_hdr->tsn), - status); - - sctp_outq_head_data(q, chunk); - goto sctp_flush_out; - - case SCTP_XMIT_OK: - /* The sender is in the SHUTDOWN-PENDING state, - * The sender MAY set the I-bit in the DATA - * chunk header. - */ - if (asoc->state == SCTP_STATE_SHUTDOWN_PENDING) - chunk->chunk_hdr->flags |= SCTP_DATA_SACK_IMM; - if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) - asoc->stats.ouodchunks++; - else - asoc->stats.oodchunks++; - - /* Only now it's safe to consider this - * chunk as sent, sched-wise. - */ - sctp_sched_dequeue_done(q, chunk); - - break; + sctp_outq_head_data(ctx->q, chunk); + break; + } - default: - BUG(); - } + /* The sender is in the SHUTDOWN-PENDING state, + * The sender MAY set the I-bit in the DATA + * chunk header. + */ + if (ctx->asoc->state == SCTP_STATE_SHUTDOWN_PENDING) + chunk->chunk_hdr->flags |= SCTP_DATA_SACK_IMM; + if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) + ctx->asoc->stats.ouodchunks++; + else + ctx->asoc->stats.oodchunks++; - /* BUG: We assume that the sctp_packet_transmit() - * call below will succeed all the time and add the - * chunk to the transmitted list and restart the - * timers. - * It is possible that the call can fail under OOM - * conditions. - * - * Is this really a problem? Won't this behave - * like a lost TSN? - */ - list_add_tail(&chunk->transmitted_list, - &transport->transmitted); + /* Only now it's safe to consider this + * chunk as sent, sched-wise. + */ + sctp_sched_dequeue_done(ctx->q, chunk); - sctp_transport_reset_t3_rtx(transport); - transport->last_time_sent = jiffies; + list_add_tail(&chunk->transmitted_list, + &ctx->transport->transmitted); - /* Only let one DATA chunk get bundled with a - * COOKIE-ECHO chunk. - */ - if (packet->has_cookie_echo) - goto sctp_flush_out; - } - break; + sctp_transport_reset_t3_rtx(ctx->transport); + ctx->transport->last_time_sent = jiffies; - default: - /* Do nothing. */ - break; + /* Only let one DATA chunk get bundled with a + * COOKIE-ECHO chunk. + */ + if (ctx->packet->has_cookie_echo) + break; } +} -sctp_flush_out: +static void sctp_outq_flush_transports(struct sctp_flush_ctx *ctx) +{ + struct list_head *ltransport; + struct sctp_packet *packet; + struct sctp_transport *t; + int error = 0; - /* Before returning, examine all the transports touched in - * this call. Right now, we bluntly force clear all the - * transports. Things might change after we implement Nagle. - * But such an examination is still required. - * - * --xguo - */ - while ((ltransport = sctp_list_dequeue(&transport_list)) != NULL) { - struct sctp_transport *t = list_entry(ltransport, - struct sctp_transport, - send_ready); + while ((ltransport = sctp_list_dequeue(&ctx->transport_list)) != NULL) { + t = list_entry(ltransport, struct sctp_transport, send_ready); packet = &t->packet; if (!sctp_packet_empty(packet)) { - error = sctp_packet_transmit(packet, gfp); + error = sctp_packet_transmit(packet, ctx->gfp); if (error < 0) - asoc->base.sk->sk_err = -error; + ctx->q->asoc->base.sk->sk_err = -error; } /* Clear the burst limited state, if any */ @@ -1204,6 +1171,47 @@ sctp_flush_out: } } +/* Try to flush an outqueue. + * + * Description: Send everything in q which we legally can, subject to + * congestion limitations. + * * Note: This function can be called from multiple contexts so appropriate + * locking concerns must be made. Today we use the sock lock to protect + * this function. + */ + +static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp) +{ + struct sctp_flush_ctx ctx = { + .q = q, + .transport = NULL, + .transport_list = LIST_HEAD_INIT(ctx.transport_list), + .asoc = q->asoc, + .packet = NULL, + .gfp = gfp, + }; + + /* 6.10 Bundling + * ... + * When bundling control chunks with DATA chunks, an + * endpoint MUST place control chunks first in the outbound + * SCTP packet. The transmitter MUST transmit DATA chunks + * within a SCTP packet in increasing order of TSN. + * ... + */ + + sctp_outq_flush_ctrl(&ctx); + + if (q->asoc->src_out_of_asoc_ok) + goto sctp_flush_out; + + sctp_outq_flush_data(&ctx, rtx_timeout); + +sctp_flush_out: + + sctp_outq_flush_transports(&ctx); +} + /* Update unack_data based on the incoming SACK chunk */ static void sctp_sack_update_unack_data(struct sctp_association *assoc, struct sctp_sackhdr *sack) @@ -1457,7 +1465,7 @@ static void sctp_check_transmitted(struct sctp_outq *q, * the outstanding bytes for this chunk, so only * count bytes associated with a transport. */ - if (transport) { + if (transport && !tchunk->tsn_gap_acked) { /* If this chunk is being used for RTT * measurement, calculate the RTT and update * the RTO using this value. @@ -1469,14 +1477,34 @@ static void sctp_check_transmitted(struct sctp_outq *q, * first instance of the packet or a later * instance). */ - if (!tchunk->tsn_gap_acked && - !sctp_chunk_retransmitted(tchunk) && + if (!sctp_chunk_retransmitted(tchunk) && tchunk->rtt_in_progress) { tchunk->rtt_in_progress = 0; rtt = jiffies - tchunk->sent_at; sctp_transport_update_rto(transport, rtt); } + + if (TSN_lte(tsn, sack_ctsn)) { + /* + * SFR-CACC algorithm: + * 2) If the SACK contains gap acks + * and the flag CHANGEOVER_ACTIVE is + * set the receiver of the SACK MUST + * take the following action: + * + * B) For each TSN t being acked that + * has not been acked in any SACK so + * far, set cacc_saw_newack to 1 for + * the destination that the TSN was + * sent to. + */ + if (sack->num_gap_ack_blocks && + q->asoc->peer.primary_path->cacc. + changeover_active) + transport->cacc.cacc_saw_newack + = 1; + } } /* If the chunk hasn't been marked as ACKED, @@ -1508,28 +1536,6 @@ static void sctp_check_transmitted(struct sctp_outq *q, restart_timer = 1; forward_progress = true; - if (!tchunk->tsn_gap_acked) { - /* - * SFR-CACC algorithm: - * 2) If the SACK contains gap acks - * and the flag CHANGEOVER_ACTIVE is - * set the receiver of the SACK MUST - * take the following action: - * - * B) For each TSN t being acked that - * has not been acked in any SACK so - * far, set cacc_saw_newack to 1 for - * the destination that the TSN was - * sent to. - */ - if (transport && - sack->num_gap_ack_blocks && - q->asoc->peer.primary_path->cacc. - changeover_active) - transport->cacc.cacc_saw_newack - = 1; - } - list_add_tail(&tchunk->transmitted_list, &q->sacked); } else { @@ -1756,7 +1762,7 @@ static int sctp_acked(struct sctp_sackhdr *sack, __u32 tsn) if (TSN_lte(tsn, ctsn)) goto pass; - /* 3.3.4 Selective Acknowledgement (SACK) (3): + /* 3.3.4 Selective Acknowledgment (SACK) (3): * * Gap Ack Blocks: * These fields contain the Gap Ack Blocks. They are repeated diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index e62addb60434..4a4fd1971255 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -81,8 +81,6 @@ static int sctp_process_param(struct sctp_association *asoc, gfp_t gfp); static void *sctp_addto_param(struct sctp_chunk *chunk, int len, const void *data); -static void *sctp_addto_chunk_fixed(struct sctp_chunk *, int len, - const void *data); /* Control chunk destructor */ static void sctp_control_release_owner(struct sk_buff *skb) @@ -154,12 +152,11 @@ static const struct sctp_paramhdr prsctp_param = { cpu_to_be16(sizeof(struct sctp_paramhdr)), }; -/* A helper to initialize an op error inside a - * provided chunk, as most cause codes will be embedded inside an - * abort chunk. +/* A helper to initialize an op error inside a provided chunk, as most + * cause codes will be embedded inside an abort chunk. */ -void sctp_init_cause(struct sctp_chunk *chunk, __be16 cause_code, - size_t paylen) +int sctp_init_cause(struct sctp_chunk *chunk, __be16 cause_code, + size_t paylen) { struct sctp_errhdr err; __u16 len; @@ -167,33 +164,16 @@ void sctp_init_cause(struct sctp_chunk *chunk, __be16 cause_code, /* Cause code constants are now defined in network order. */ err.cause = cause_code; len = sizeof(err) + paylen; - err.length = htons(len); - chunk->subh.err_hdr = sctp_addto_chunk(chunk, sizeof(err), &err); -} - -/* A helper to initialize an op error inside a - * provided chunk, as most cause codes will be embedded inside an - * abort chunk. Differs from sctp_init_cause in that it won't oops - * if there isn't enough space in the op error chunk - */ -static int sctp_init_cause_fixed(struct sctp_chunk *chunk, __be16 cause_code, - size_t paylen) -{ - struct sctp_errhdr err; - __u16 len; - - /* Cause code constants are now defined in network order. */ - err.cause = cause_code; - len = sizeof(err) + paylen; - err.length = htons(len); + err.length = htons(len); if (skb_tailroom(chunk->skb) < len) return -ENOSPC; - chunk->subh.err_hdr = sctp_addto_chunk_fixed(chunk, sizeof(err), &err); + chunk->subh.err_hdr = sctp_addto_chunk(chunk, sizeof(err), &err); return 0; } + /* 3.3.2 Initiation (INIT) (1) * * This chunk is used to initiate a SCTP association between two @@ -779,10 +759,9 @@ struct sctp_chunk *sctp_make_datafrag_empty(const struct sctp_association *asoc, * association. This reports on which TSN's we've seen to date, * including duplicates and gaps. */ -struct sctp_chunk *sctp_make_sack(const struct sctp_association *asoc) +struct sctp_chunk *sctp_make_sack(struct sctp_association *asoc) { struct sctp_tsnmap *map = (struct sctp_tsnmap *)&asoc->peer.tsn_map; - struct sctp_association *aptr = (struct sctp_association *)asoc; struct sctp_gap_ack_block gabs[SCTP_MAX_GABS]; __u16 num_gabs, num_dup_tsns; struct sctp_transport *trans; @@ -857,7 +836,7 @@ struct sctp_chunk *sctp_make_sack(const struct sctp_association *asoc) /* Add the duplicate TSN information. */ if (num_dup_tsns) { - aptr->stats.idupchunks += num_dup_tsns; + asoc->stats.idupchunks += num_dup_tsns; sctp_addto_chunk(retval, sizeof(__u32) * num_dup_tsns, sctp_tsnmap_get_dups(map)); } @@ -869,11 +848,11 @@ struct sctp_chunk *sctp_make_sack(const struct sctp_association *asoc) * association so no transport will match after a wrap event like this, * Until the next sack */ - if (++aptr->peer.sack_generation == 0) { + if (++asoc->peer.sack_generation == 0) { list_for_each_entry(trans, &asoc->peer.transport_addr_list, transports) trans->sack_generation = 0; - aptr->peer.sack_generation = 1; + asoc->peer.sack_generation = 1; } nodata: return retval; @@ -1258,20 +1237,26 @@ nodata: return retval; } -/* Create an Operation Error chunk of a fixed size, - * specifically, max(asoc->pathmtu, SCTP_DEFAULT_MAXSEGMENT) - * This is a helper function to allocate an error chunk for - * for those invalid parameter codes in which we may not want - * to report all the errors, if the incoming chunk is large +/* Create an Operation Error chunk of a fixed size, specifically, + * min(asoc->pathmtu, SCTP_DEFAULT_MAXSEGMENT) - overheads. + * This is a helper function to allocate an error chunk for for those + * invalid parameter codes in which we may not want to report all the + * errors, if the incoming chunk is large. If it can't fit in a single + * packet, we ignore it. */ -static inline struct sctp_chunk *sctp_make_op_error_fixed( +static inline struct sctp_chunk *sctp_make_op_error_limited( const struct sctp_association *asoc, const struct sctp_chunk *chunk) { - size_t size = asoc ? asoc->pathmtu : 0; + size_t size = SCTP_DEFAULT_MAXSEGMENT; + struct sctp_sock *sp = NULL; - if (!size) - size = SCTP_DEFAULT_MAXSEGMENT; + if (asoc) { + size = min_t(size_t, size, asoc->pathmtu); + sp = sctp_sk(asoc->base.sk); + } + + size = sctp_mtu_payload(sp, size, sizeof(struct sctp_errhdr)); return sctp_make_op_error_space(asoc, chunk, size); } @@ -1523,18 +1508,6 @@ void *sctp_addto_chunk(struct sctp_chunk *chunk, int len, const void *data) return target; } -/* Append bytes to the end of a chunk. Returns NULL if there isn't sufficient - * space in the chunk - */ -static void *sctp_addto_chunk_fixed(struct sctp_chunk *chunk, - int len, const void *data) -{ - if (skb_tailroom(chunk->skb) >= len) - return sctp_addto_chunk(chunk, len, data); - else - return NULL; -} - /* Append bytes from user space to the end of a chunk. Will panic if * chunk is not big enough. * Returns a kernel err value. @@ -1829,6 +1802,9 @@ no_hmac: kt = ktime_get_real(); if (!asoc && ktime_before(bear_cookie->expiration, kt)) { + suseconds_t usecs = ktime_to_us(ktime_sub(kt, bear_cookie->expiration)); + __be32 n = htonl(usecs); + /* * Section 3.3.10.3 Stale Cookie Error (3) * @@ -1837,17 +1813,12 @@ no_hmac: * Stale Cookie Error: Indicates the receipt of a valid State * Cookie that has expired. */ - len = ntohs(chunk->chunk_hdr->length); - *errp = sctp_make_op_error_space(asoc, chunk, len); - if (*errp) { - suseconds_t usecs = ktime_to_us(ktime_sub(kt, bear_cookie->expiration)); - __be32 n = htonl(usecs); - - sctp_init_cause(*errp, SCTP_ERROR_STALE_COOKIE, - sizeof(n)); - sctp_addto_chunk(*errp, sizeof(n), &n); + *errp = sctp_make_op_error(asoc, chunk, + SCTP_ERROR_STALE_COOKIE, &n, + sizeof(n), 0); + if (*errp) *error = -SCTP_IERROR_STALE_COOKIE; - } else + else *error = -SCTP_IERROR_NOMEM; goto fail; @@ -1998,12 +1969,8 @@ static int sctp_process_hn_param(const struct sctp_association *asoc, if (*errp) sctp_chunk_free(*errp); - *errp = sctp_make_op_error_space(asoc, chunk, len); - - if (*errp) { - sctp_init_cause(*errp, SCTP_ERROR_DNS_FAILED, len); - sctp_addto_chunk(*errp, len, param.v); - } + *errp = sctp_make_op_error(asoc, chunk, SCTP_ERROR_DNS_FAILED, + param.v, len, 0); /* Stop processing this chunk. */ return 0; @@ -2128,23 +2095,23 @@ static enum sctp_ierror sctp_process_unk_param( /* Make an ERROR chunk, preparing enough room for * returning multiple unknown parameters. */ - if (NULL == *errp) - *errp = sctp_make_op_error_fixed(asoc, chunk); - - if (*errp) { - if (!sctp_init_cause_fixed(*errp, SCTP_ERROR_UNKNOWN_PARAM, - SCTP_PAD4(ntohs(param.p->length)))) - sctp_addto_chunk_fixed(*errp, - SCTP_PAD4(ntohs(param.p->length)), - param.v); - } else { - /* If there is no memory for generating the ERROR - * report as specified, an ABORT will be triggered - * to the peer and the association won't be - * established. - */ - retval = SCTP_IERROR_NOMEM; + if (!*errp) { + *errp = sctp_make_op_error_limited(asoc, chunk); + if (!*errp) { + /* If there is no memory for generating the + * ERROR report as specified, an ABORT will be + * triggered to the peer and the association + * won't be established. + */ + retval = SCTP_IERROR_NOMEM; + break; + } } + + if (!sctp_init_cause(*errp, SCTP_ERROR_UNKNOWN_PARAM, + ntohs(param.p->length))) + sctp_addto_chunk(*errp, ntohs(param.p->length), + param.v); break; default: break; @@ -2220,10 +2187,10 @@ static enum sctp_ierror sctp_verify_param(struct net *net, * MUST be aborted. The ABORT chunk SHOULD contain the error * cause 'Protocol Violation'. */ - if (SCTP_AUTH_RANDOM_LENGTH != - ntohs(param.p->length) - sizeof(struct sctp_paramhdr)) { + if (SCTP_AUTH_RANDOM_LENGTH != ntohs(param.p->length) - + sizeof(struct sctp_paramhdr)) { sctp_process_inv_paramlength(asoc, param.p, - chunk, err_chunk); + chunk, err_chunk); retval = SCTP_IERROR_ABORT; } break; diff --git a/net/sctp/socket.c b/net/sctp/socket.c index bf747094d26b..d20f7addee19 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -644,16 +644,15 @@ static int sctp_send_asconf_add_ip(struct sock *sk, list_for_each_entry(trans, &asoc->peer.transport_addr_list, transports) { - /* Clear the source and route cache */ - sctp_transport_dst_release(trans); trans->cwnd = min(4*asoc->pathmtu, max_t(__u32, 2*asoc->pathmtu, 4380)); trans->ssthresh = asoc->peer.i.a_rwnd; trans->rto = asoc->rto_initial; sctp_max_rto(asoc, trans); trans->rtt = trans->srtt = trans->rttvar = 0; + /* Clear the source and route cache */ sctp_transport_route(trans, NULL, - sctp_sk(asoc->base.sk)); + sctp_sk(asoc->base.sk)); } } retval = sctp_send_asconf(asoc, chunk); @@ -896,7 +895,6 @@ skip_mkasconf: */ list_for_each_entry(transport, &asoc->peer.transport_addr_list, transports) { - sctp_transport_dst_release(transport); sctp_transport_route(transport, NULL, sctp_sk(asoc->base.sk)); } @@ -1894,6 +1892,7 @@ static int sctp_sendmsg_to_asoc(struct sctp_association *asoc, struct sctp_sndrcvinfo *sinfo) { struct sock *sk = asoc->base.sk; + struct sctp_sock *sp = sctp_sk(sk); struct net *net = sock_net(sk); struct sctp_datamsg *datamsg; bool wait_connect = false; @@ -1912,13 +1911,16 @@ static int sctp_sendmsg_to_asoc(struct sctp_association *asoc, goto err; } - if (sctp_sk(sk)->disable_fragments && msg_len > asoc->frag_point) { + if (sp->disable_fragments && msg_len > asoc->frag_point) { err = -EMSGSIZE; goto err; } - if (asoc->pmtu_pending) - sctp_assoc_pending_pmtu(asoc); + if (asoc->pmtu_pending) { + if (sp->param_flags & SPP_PMTUD_ENABLE) + sctp_assoc_sync_pmtu(asoc); + asoc->pmtu_pending = 0; + } if (sctp_wspace(asoc) < msg_len) sctp_prsctp_prune(asoc, sinfo, msg_len - sctp_wspace(asoc)); @@ -1935,7 +1937,7 @@ static int sctp_sendmsg_to_asoc(struct sctp_association *asoc, if (err) goto err; - if (sctp_sk(sk)->strm_interleave) { + if (sp->strm_interleave) { timeo = sock_sndtimeo(sk, 0); err = sctp_wait_for_connect(asoc, &timeo); if (err) @@ -2538,7 +2540,7 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params, trans->pathmtu = params->spp_pathmtu; sctp_assoc_sync_pmtu(asoc); } else if (asoc) { - asoc->pathmtu = params->spp_pathmtu; + sctp_assoc_set_pmtu(asoc, params->spp_pathmtu); } else { sp->pathmtu = params->spp_pathmtu; } @@ -3208,7 +3210,6 @@ static int sctp_setsockopt_mappedv4(struct sock *sk, char __user *optval, unsign static int sctp_setsockopt_maxseg(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_sock *sp = sctp_sk(sk); - struct sctp_af *af = sp->pf->af; struct sctp_assoc_value params; struct sctp_association *asoc; int val; @@ -3230,30 +3231,24 @@ static int sctp_setsockopt_maxseg(struct sock *sk, char __user *optval, unsigned return -EINVAL; } + asoc = sctp_id2assoc(sk, params.assoc_id); + if (val) { int min_len, max_len; + __u16 datasize = asoc ? sctp_datachk_len(&asoc->stream) : + sizeof(struct sctp_data_chunk); - min_len = SCTP_DEFAULT_MINSEGMENT - af->net_header_len; - min_len -= af->ip_options_len(sk); - min_len -= sizeof(struct sctphdr) + - sizeof(struct sctp_data_chunk); - - max_len = SCTP_MAX_CHUNK_LEN - sizeof(struct sctp_data_chunk); + min_len = sctp_mtu_payload(sp, SCTP_DEFAULT_MINSEGMENT, + datasize); + max_len = SCTP_MAX_CHUNK_LEN - datasize; if (val < min_len || val > max_len) return -EINVAL; } - asoc = sctp_id2assoc(sk, params.assoc_id); if (asoc) { - if (val == 0) { - val = asoc->pathmtu - af->net_header_len; - val -= af->ip_options_len(sk); - val -= sizeof(struct sctphdr) + - sctp_datachk_len(&asoc->stream); - } asoc->user_frag = val; - asoc->frag_point = sctp_frag_point(asoc, asoc->pathmtu); + sctp_assoc_update_frag_point(asoc); } else { if (params.assoc_id && sctp_style(sk, UDP)) return -EINVAL; diff --git a/net/sctp/transport.c b/net/sctp/transport.c index 47f82bd794d9..445b7ef61677 100644 --- a/net/sctp/transport.c +++ b/net/sctp/transport.c @@ -242,9 +242,18 @@ void sctp_transport_pmtu(struct sctp_transport *transport, struct sock *sk) &transport->fl, sk); } - if (transport->dst) { - transport->pathmtu = SCTP_TRUNC4(dst_mtu(transport->dst)); - } else + if (transport->param_flags & SPP_PMTUD_DISABLE) { + struct sctp_association *asoc = transport->asoc; + + if (!transport->pathmtu && asoc && asoc->pathmtu) + transport->pathmtu = asoc->pathmtu; + if (transport->pathmtu) + return; + } + + if (transport->dst) + transport->pathmtu = sctp_dst_mtu(transport->dst); + else transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT; } @@ -290,6 +299,7 @@ void sctp_transport_route(struct sctp_transport *transport, struct sctp_association *asoc = transport->asoc; struct sctp_af *af = transport->af_specific; + sctp_transport_dst_release(transport); af->get_dst(transport, saddr, &transport->fl, sctp_opt2sk(opt)); if (saddr) @@ -297,21 +307,14 @@ void sctp_transport_route(struct sctp_transport *transport, else af->get_saddr(opt, transport, &transport->fl); - if ((transport->param_flags & SPP_PMTUD_DISABLE) && transport->pathmtu) { - return; - } - if (transport->dst) { - transport->pathmtu = SCTP_TRUNC4(dst_mtu(transport->dst)); + sctp_transport_pmtu(transport, sctp_opt2sk(opt)); - /* Initialize sk->sk_rcv_saddr, if the transport is the - * association's active path for getsockname(). - */ - if (asoc && (!asoc->peer.primary_path || - (transport == asoc->peer.active_path))) - opt->pf->to_sk_saddr(&transport->saddr, - asoc->base.sk); - } else - transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT; + /* Initialize sk->sk_rcv_saddr, if the transport is the + * association's active path for getsockname(). + */ + if (transport->dst && asoc && + (!asoc->peer.primary_path || transport == asoc->peer.active_path)) + opt->pf->to_sk_saddr(&transport->saddr, asoc->base.sk); } /* Hold a reference to a transport. */ @@ -634,7 +637,7 @@ unsigned long sctp_transport_timeout(struct sctp_transport *trans) trans->state != SCTP_PF) timeout += trans->hbinterval; - return timeout; + return max_t(unsigned long, timeout, HZ / 5); } /* Reset transport variables to their initial values */ |