summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPavel Emelyanov <xemul@openvz.org>2007-11-15 11:57:06 +0100
committerDavid S. Miller <davem@davemloft.net>2007-11-15 11:57:06 +0100
commitdab6ba36888a12f3e3edff71eeef968fc159178a (patch)
treeb838883cd5cd8f55f2bcb31f16430dd5a10dcbca
parent[VIA_VELOCITY]: Don't oops on MTU change. (diff)
downloadlinux-dab6ba36888a12f3e3edff71eeef968fc159178a.tar.xz
linux-dab6ba36888a12f3e3edff71eeef968fc159178a.zip
[INET]: Fix potential kfree on vmalloc-ed area of request_sock_queue
The request_sock_queue's listen_opt is either vmalloc-ed or kmalloc-ed depending on the number of table entries. Thus it is expected to be handled properly on free, which is done in the reqsk_queue_destroy(). However the error path in inet_csk_listen_start() calls the lite version of reqsk_queue_destroy, called __reqsk_queue_destroy, which calls the kfree unconditionally. Fix this and move the __reqsk_queue_destroy into a .c file as it looks too big to be inline. As David also noticed, this is an error recovery path only, so no locking is required and the lopt is known to be not NULL. reqsk_queue_yank_listen_sk is also now only used in net/core/request_sock.c so we should move it there too. Signed-off-by: Pavel Emelyanov <xemul@openvz.org> Acked-by: Eric Dumazet <dada1@cosmosbay.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/net/request_sock.h18
-rw-r--r--net/core/request_sock.c35
2 files changed, 36 insertions, 17 deletions
diff --git a/include/net/request_sock.h b/include/net/request_sock.h
index 7aed02ce2b65..cff4608179c1 100644
--- a/include/net/request_sock.h
+++ b/include/net/request_sock.h
@@ -124,23 +124,7 @@ struct request_sock_queue {
extern int reqsk_queue_alloc(struct request_sock_queue *queue,
unsigned int nr_table_entries);
-static inline struct listen_sock *reqsk_queue_yank_listen_sk(struct request_sock_queue *queue)
-{
- struct listen_sock *lopt;
-
- write_lock_bh(&queue->syn_wait_lock);
- lopt = queue->listen_opt;
- queue->listen_opt = NULL;
- write_unlock_bh(&queue->syn_wait_lock);
-
- return lopt;
-}
-
-static inline void __reqsk_queue_destroy(struct request_sock_queue *queue)
-{
- kfree(reqsk_queue_yank_listen_sk(queue));
-}
-
+extern void __reqsk_queue_destroy(struct request_sock_queue *queue);
extern void reqsk_queue_destroy(struct request_sock_queue *queue);
static inline struct request_sock *
diff --git a/net/core/request_sock.c b/net/core/request_sock.c
index 5f0818d815e6..45aed75cb571 100644
--- a/net/core/request_sock.c
+++ b/net/core/request_sock.c
@@ -71,6 +71,41 @@ int reqsk_queue_alloc(struct request_sock_queue *queue,
EXPORT_SYMBOL(reqsk_queue_alloc);
+void __reqsk_queue_destroy(struct request_sock_queue *queue)
+{
+ struct listen_sock *lopt;
+ size_t lopt_size;
+
+ /*
+ * this is an error recovery path only
+ * no locking needed and the lopt is not NULL
+ */
+
+ lopt = queue->listen_opt;
+ lopt_size = sizeof(struct listen_sock) +
+ lopt->nr_table_entries * sizeof(struct request_sock *);
+
+ if (lopt_size > PAGE_SIZE)
+ vfree(lopt);
+ else
+ kfree(lopt);
+}
+
+EXPORT_SYMBOL(__reqsk_queue_destroy);
+
+static inline struct listen_sock *reqsk_queue_yank_listen_sk(
+ struct request_sock_queue *queue)
+{
+ struct listen_sock *lopt;
+
+ write_lock_bh(&queue->syn_wait_lock);
+ lopt = queue->listen_opt;
+ queue->listen_opt = NULL;
+ write_unlock_bh(&queue->syn_wait_lock);
+
+ return lopt;
+}
+
void reqsk_queue_destroy(struct request_sock_queue *queue)
{
/* make all the listen_opt local to us */