summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2010-03-10 10:57:54 +0100
committerTejun Heo <tj@kernel.org>2010-03-29 16:07:12 +0200
commit10fad5e46f6c7bdfb01b1a012380a38e3c6ab346 (patch)
tree9ec6e3955e7f879f64ea79812cf5ecd41baa6939
parentmodule: encapsulate percpu handling better and record percpu_size (diff)
downloadlinux-10fad5e46f6c7bdfb01b1a012380a38e3c6ab346.tar.xz
linux-10fad5e46f6c7bdfb01b1a012380a38e3c6ab346.zip
percpu, module: implement and use is_kernel/module_percpu_address()
lockdep has custom code to check whether a pointer belongs to static percpu area which is somewhat broken. Implement proper is_kernel/module_percpu_address() and replace the custom code. On UP, percpu variables are regular static variables and can't be distinguished from them. Always return %false on UP. Signed-off-by: Tejun Heo <tj@kernel.org> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: Ingo Molnar <mingo@redhat.com>
-rw-r--r--include/linux/module.h1
-rw-r--r--include/linux/percpu.h7
-rw-r--r--kernel/lockdep.c21
-rw-r--r--kernel/module.c38
-rw-r--r--mm/percpu.c26
5 files changed, 77 insertions, 16 deletions
diff --git a/include/linux/module.h b/include/linux/module.h
index 87d247ac6761..f0e2659f4e3e 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -395,6 +395,7 @@ static inline int module_is_live(struct module *mod)
struct module *__module_text_address(unsigned long addr);
struct module *__module_address(unsigned long addr);
bool is_module_address(unsigned long addr);
+bool is_module_percpu_address(unsigned long addr);
bool is_module_text_address(unsigned long addr);
static inline int within_module_core(unsigned long addr, struct module *mod)
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index a93e5bfdccb8..11d5f834b54a 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -137,6 +137,7 @@ extern int __init pcpu_page_first_chunk(size_t reserved_size,
extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align);
extern void __percpu *__alloc_percpu(size_t size, size_t align);
extern void free_percpu(void __percpu *__pdata);
+extern bool is_kernel_percpu_address(unsigned long addr);
extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
@@ -163,6 +164,12 @@ static inline void free_percpu(void __percpu *p)
kfree(p);
}
+/* can't distinguish from other static vars, always false */
+static inline bool is_kernel_percpu_address(unsigned long addr)
+{
+ return false;
+}
+
static inline phys_addr_t per_cpu_ptr_to_phys(void *addr)
{
return __pa(addr);
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index c927a549db2c..9bbb9c841e48 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -582,9 +582,6 @@ static int static_obj(void *obj)
unsigned long start = (unsigned long) &_stext,
end = (unsigned long) &_end,
addr = (unsigned long) obj;
-#ifdef CONFIG_SMP
- int i;
-#endif
/*
* static variable?
@@ -595,24 +592,16 @@ static int static_obj(void *obj)
if (arch_is_kernel_data(addr))
return 1;
-#ifdef CONFIG_SMP
/*
- * percpu var?
+ * in-kernel percpu var?
*/
- for_each_possible_cpu(i) {
- start = (unsigned long) &__per_cpu_start + per_cpu_offset(i);
- end = (unsigned long) &__per_cpu_start + PERCPU_ENOUGH_ROOM
- + per_cpu_offset(i);
-
- if ((addr >= start) && (addr < end))
- return 1;
- }
-#endif
+ if (is_kernel_percpu_address(addr))
+ return 1;
/*
- * module var?
+ * module static or percpu var?
*/
- return is_module_address(addr);
+ return is_module_address(addr) || is_module_percpu_address(addr);
}
/*
diff --git a/kernel/module.c b/kernel/module.c
index e7a6e53fc73e..9f8d23d8b3a8 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -415,6 +415,40 @@ static void percpu_modcopy(struct module *mod,
memcpy(per_cpu_ptr(mod->percpu, cpu), from, size);
}
+/**
+ * is_module_percpu_address - test whether address is from module static percpu
+ * @addr: address to test
+ *
+ * Test whether @addr belongs to module static percpu area.
+ *
+ * RETURNS:
+ * %true if @addr is from module static percpu area
+ */
+bool is_module_percpu_address(unsigned long addr)
+{
+ struct module *mod;
+ unsigned int cpu;
+
+ preempt_disable();
+
+ list_for_each_entry_rcu(mod, &modules, list) {
+ if (!mod->percpu_size)
+ continue;
+ for_each_possible_cpu(cpu) {
+ void *start = per_cpu_ptr(mod->percpu, cpu);
+
+ if ((void *)addr >= start &&
+ (void *)addr < start + mod->percpu_size) {
+ preempt_enable();
+ return true;
+ }
+ }
+ }
+
+ preempt_enable();
+ return false;
+}
+
#else /* ... !CONFIG_SMP */
static inline void __percpu *mod_percpu(struct module *mod)
@@ -441,6 +475,10 @@ static inline void percpu_modcopy(struct module *mod,
/* pcpusec should be 0, and size of that section should be 0. */
BUG_ON(size != 0);
}
+bool is_module_percpu_address(unsigned long addr)
+{
+ return false;
+}
#endif /* CONFIG_SMP */
diff --git a/mm/percpu.c b/mm/percpu.c
index 768419d44ad7..6e09741ddc62 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -1304,6 +1304,32 @@ void free_percpu(void __percpu *ptr)
EXPORT_SYMBOL_GPL(free_percpu);
/**
+ * is_kernel_percpu_address - test whether address is from static percpu area
+ * @addr: address to test
+ *
+ * Test whether @addr belongs to in-kernel static percpu area. Module
+ * static percpu areas are not considered. For those, use
+ * is_module_percpu_address().
+ *
+ * RETURNS:
+ * %true if @addr is from in-kernel static percpu area, %false otherwise.
+ */
+bool is_kernel_percpu_address(unsigned long addr)
+{
+ const size_t static_size = __per_cpu_end - __per_cpu_start;
+ void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
+ unsigned int cpu;
+
+ for_each_possible_cpu(cpu) {
+ void *start = per_cpu_ptr(base, cpu);
+
+ if ((void *)addr >= start && (void *)addr < start + static_size)
+ return true;
+ }
+ return false;
+}
+
+/**
* per_cpu_ptr_to_phys - convert translated percpu address to physical address
* @addr: the address to be converted to physical address
*