summaryrefslogtreecommitdiffstats
path: root/mm/memory.c
diff options
context:
space:
mode:
authorMichael S. Tsirkin <mst@redhat.com>2013-05-26 16:32:23 +0200
committerIngo Molnar <mingo@kernel.org>2013-05-28 09:41:11 +0200
commit662bbcb2747c2422cf98d3d97619509379eee466 (patch)
treed733c7257e59bde654d963cce0e26454a134d787 /mm/memory.c
parentmm, sched: Drop voluntary schedule from might_fault() (diff)
downloadlinux-662bbcb2747c2422cf98d3d97619509379eee466.tar.xz
linux-662bbcb2747c2422cf98d3d97619509379eee466.zip
mm, sched: Allow uaccess in atomic with pagefault_disable()
This changes might_fault() so that it does not trigger a false positive diagnostic for e.g. the following sequence: spin_lock_irqsave() pagefault_disable() copy_to_user() pagefault_enable() spin_unlock_irqrestore() In particular vhost wants to do this, to call socket ops from under a lock. There are 3 cases to consider: - CONFIG_PROVE_LOCKING - might_fault is non-inline so it's easy to move the in_atomic test to fix up the false positive warning. - CONFIG_DEBUG_ATOMIC_SLEEP - might_fault is currently inline, but we are calling a non-inline __might_sleep anyway, so let's use the non-line version of might_fault that does the right thing. - !CONFIG_DEBUG_ATOMIC_SLEEP && !CONFIG_PROVE_LOCKING __might_sleep is a nop so might_fault is a nop. Make this explicit. Signed-off-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/r/1369577426-26721-11-git-send-email-mst@redhat.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c11
1 files changed, 7 insertions, 4 deletions
diff --git a/mm/memory.c b/mm/memory.c
index c1f190f51f6f..d7d54a114773 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -4210,7 +4210,7 @@ void print_vma_addr(char *prefix, unsigned long ip)
up_read(&mm->mmap_sem);
}
-#ifdef CONFIG_PROVE_LOCKING
+#if defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP)
void might_fault(void)
{
/*
@@ -4222,14 +4222,17 @@ void might_fault(void)
if (segment_eq(get_fs(), KERNEL_DS))
return;
- __might_sleep(__FILE__, __LINE__, 0);
-
/*
* it would be nicer only to annotate paths which are not under
* pagefault_disable, however that requires a larger audit and
* providing helpers like get_user_atomic.
*/
- if (!in_atomic() && current->mm)
+ if (in_atomic())
+ return;
+
+ __might_sleep(__FILE__, __LINE__, 0);
+
+ if (current->mm)
might_lock_read(&current->mm->mmap_sem);
}
EXPORT_SYMBOL(might_fault);