diff options
author | Vlad Yasevich <vladislav.yasevich@hp.com> | 2010-05-06 09:56:07 +0200 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-05-06 09:56:07 +0200 |
commit | 50b5d6ad63821cea324a5a7a19854d4de1a0a819 (patch) | |
tree | a279d53880e3bdf144783598ad03bbaa33e2cd96 /net/sctp | |
parent | veth: Dont kfree_skb() after dev_forward_skb() (diff) | |
download | linux-50b5d6ad63821cea324a5a7a19854d4de1a0a819.tar.xz linux-50b5d6ad63821cea324a5a7a19854d4de1a0a819.zip |
sctp: Fix a race between ICMP protocol unreachable and connect()
ICMP protocol unreachable handling completely disregarded
the fact that the user may have locked the socket. It proceeded
to destroy the association, even though the user may have
held the lock and had a ref on the association. This resulted
in the following:
Attempt to release alive inet socket f6afcc00
=========================
[ BUG: held lock freed! ]
-------------------------
somenu/2672 is freeing memory f6afcc00-f6afcfff, with a lock still held
there!
(sk_lock-AF_INET){+.+.+.}, at: [<c122098a>] sctp_connect+0x13/0x4c
1 lock held by somenu/2672:
#0: (sk_lock-AF_INET){+.+.+.}, at: [<c122098a>] sctp_connect+0x13/0x4c
stack backtrace:
Pid: 2672, comm: somenu Not tainted 2.6.32-telco #55
Call Trace:
[<c1232266>] ? printk+0xf/0x11
[<c1038553>] debug_check_no_locks_freed+0xce/0xff
[<c10620b4>] kmem_cache_free+0x21/0x66
[<c1185f25>] __sk_free+0x9d/0xab
[<c1185f9c>] sk_free+0x1c/0x1e
[<c1216e38>] sctp_association_put+0x32/0x89
[<c1220865>] __sctp_connect+0x36d/0x3f4
[<c122098a>] ? sctp_connect+0x13/0x4c
[<c102d073>] ? autoremove_wake_function+0x0/0x33
[<c12209a8>] sctp_connect+0x31/0x4c
[<c11d1e80>] inet_dgram_connect+0x4b/0x55
[<c11834fa>] sys_connect+0x54/0x71
[<c103a3a2>] ? lock_release_non_nested+0x88/0x239
[<c1054026>] ? might_fault+0x42/0x7c
[<c1054026>] ? might_fault+0x42/0x7c
[<c11847ab>] sys_socketcall+0x6d/0x178
[<c10da994>] ? trace_hardirqs_on_thunk+0xc/0x10
[<c1002959>] syscall_call+0x7/0xb
This was because the sctp_wait_for_connect() would aqcure the socket
lock and then proceed to release the last reference count on the
association, thus cause the fully destruction path to finish freeing
the socket.
The simplest solution is to start a very short timer in case the socket
is owned by user. When the timer expires, we can do some verification
and be able to do the release properly.
Signed-off-by: Vlad Yasevich <vladislav.yasevich@hp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sctp')
-rw-r--r-- | net/sctp/input.c | 22 | ||||
-rw-r--r-- | net/sctp/sm_sideeffect.c | 35 | ||||
-rw-r--r-- | net/sctp/transport.c | 2 |
3 files changed, 55 insertions, 4 deletions
diff --git a/net/sctp/input.c b/net/sctp/input.c index 2a570184e5a9..ea2192444ce6 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c @@ -440,11 +440,25 @@ void sctp_icmp_proto_unreachable(struct sock *sk, { SCTP_DEBUG_PRINTK("%s\n", __func__); - sctp_do_sm(SCTP_EVENT_T_OTHER, - SCTP_ST_OTHER(SCTP_EVENT_ICMP_PROTO_UNREACH), - asoc->state, asoc->ep, asoc, t, - GFP_ATOMIC); + if (sock_owned_by_user(sk)) { + if (timer_pending(&t->proto_unreach_timer)) + return; + else { + if (!mod_timer(&t->proto_unreach_timer, + jiffies + (HZ/20))) + sctp_association_hold(asoc); + } + + } else { + if (timer_pending(&t->proto_unreach_timer) && + del_timer(&t->proto_unreach_timer)) + sctp_association_put(asoc); + sctp_do_sm(SCTP_EVENT_T_OTHER, + SCTP_ST_OTHER(SCTP_EVENT_ICMP_PROTO_UNREACH), + asoc->state, asoc->ep, asoc, t, + GFP_ATOMIC); + } } /* Common lookup code for icmp/icmpv6 error handler. */ diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index d5ae450b6f02..eb1f42f45fdd 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c @@ -397,6 +397,41 @@ out_unlock: sctp_transport_put(transport); } +/* Handle the timeout of the ICMP protocol unreachable timer. Trigger + * the correct state machine transition that will close the association. + */ +void sctp_generate_proto_unreach_event(unsigned long data) +{ + struct sctp_transport *transport = (struct sctp_transport *) data; + struct sctp_association *asoc = transport->asoc; + + sctp_bh_lock_sock(asoc->base.sk); + if (sock_owned_by_user(asoc->base.sk)) { + SCTP_DEBUG_PRINTK("%s:Sock is busy.\n", __func__); + + /* Try again later. */ + if (!mod_timer(&transport->proto_unreach_timer, + jiffies + (HZ/20))) + sctp_association_hold(asoc); + goto out_unlock; + } + + /* Is this structure just waiting around for us to actually + * get destroyed? + */ + if (asoc->base.dead) + goto out_unlock; + + sctp_do_sm(SCTP_EVENT_T_OTHER, + SCTP_ST_OTHER(SCTP_EVENT_ICMP_PROTO_UNREACH), + asoc->state, asoc->ep, asoc, transport, GFP_ATOMIC); + +out_unlock: + sctp_bh_unlock_sock(asoc->base.sk); + sctp_association_put(asoc); +} + + /* Inject a SACK Timeout event into the state machine. */ static void sctp_generate_sack_event(unsigned long data) { diff --git a/net/sctp/transport.c b/net/sctp/transport.c index be4d63d5a5cc..4a368038d46f 100644 --- a/net/sctp/transport.c +++ b/net/sctp/transport.c @@ -108,6 +108,8 @@ static struct sctp_transport *sctp_transport_init(struct sctp_transport *peer, (unsigned long)peer); setup_timer(&peer->hb_timer, sctp_generate_heartbeat_event, (unsigned long)peer); + setup_timer(&peer->proto_unreach_timer, + sctp_generate_proto_unreach_event, (unsigned long)peer); /* Initialize the 64-bit random nonce sent with heartbeat. */ get_random_bytes(&peer->hb_nonce, sizeof(peer->hb_nonce)); |