diff options
author | Herbert Xu <herbert@gondor.apana.org.au> | 2009-01-30 23:12:06 +0100 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2009-01-30 23:13:49 +0100 |
commit | 905db44087855e3c1709f538ecdc22fd149cadd8 (patch) | |
tree | 645708af472fd26bb73b5cd6abe10640322a3b93 /net/packet | |
parent | sfc: Replace stats_enabled flag with a disable count (diff) | |
download | linux-905db44087855e3c1709f538ecdc22fd149cadd8.tar.xz linux-905db44087855e3c1709f538ecdc22fd149cadd8.zip |
packet: Avoid lock_sock in mmap handler
As the mmap handler gets called under mmap_sem, and we may grab
mmap_sem elsewhere under the socket lock to access user data, we
should avoid grabbing the socket lock in the mmap handler.
Since the only thing we care about in the mmap handler is for
pg_vec* to be invariant, i.e., to exclude packet_set_ring, we
can achieve this by simply using a new mutex.
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Tested-by: Martin MOKREJŠ <mmokrejs@ribosome.natur.cuni.cz>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/packet')
-rw-r--r-- | net/packet/af_packet.c | 9 |
1 files changed, 7 insertions, 2 deletions
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 5f94db2f3e9e..9454d4ae46df 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -77,6 +77,7 @@ #include <linux/poll.h> #include <linux/module.h> #include <linux/init.h> +#include <linux/mutex.h> #ifdef CONFIG_INET #include <net/inet_common.h> @@ -175,6 +176,7 @@ struct packet_sock { #endif struct packet_type prot_hook; spinlock_t bind_lock; + struct mutex pg_vec_lock; unsigned int running:1, /* prot_hook is attached*/ auxdata:1, origdev:1; @@ -1069,6 +1071,7 @@ static int packet_create(struct net *net, struct socket *sock, int protocol) */ spin_lock_init(&po->bind_lock); + mutex_init(&po->pg_vec_lock); po->prot_hook.func = packet_rcv; if (sock->type == SOCK_PACKET) @@ -1865,6 +1868,7 @@ static int packet_set_ring(struct sock *sk, struct tpacket_req *req, int closing synchronize_net(); err = -EBUSY; + mutex_lock(&po->pg_vec_lock); if (closing || atomic_read(&po->mapped) == 0) { err = 0; #define XC(a, b) ({ __typeof__ ((a)) __t; __t = (a); (a) = (b); __t; }) @@ -1886,6 +1890,7 @@ static int packet_set_ring(struct sock *sk, struct tpacket_req *req, int closing if (atomic_read(&po->mapped)) printk(KERN_DEBUG "packet_mmap: vma is busy: %d\n", atomic_read(&po->mapped)); } + mutex_unlock(&po->pg_vec_lock); spin_lock(&po->bind_lock); if (was_running && !po->running) { @@ -1918,7 +1923,7 @@ static int packet_mmap(struct file *file, struct socket *sock, struct vm_area_st size = vma->vm_end - vma->vm_start; - lock_sock(sk); + mutex_lock(&po->pg_vec_lock); if (po->pg_vec == NULL) goto out; if (size != po->pg_vec_len*po->pg_vec_pages*PAGE_SIZE) @@ -1941,7 +1946,7 @@ static int packet_mmap(struct file *file, struct socket *sock, struct vm_area_st err = 0; out: - release_sock(sk); + mutex_unlock(&po->pg_vec_lock); return err; } #endif |