summaryrefslogtreecommitdiffstats
path: root/kernel/module.c
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2017-03-20 12:26:55 +0100
committerThomas Gleixner <tglx@linutronix.de>2017-03-26 15:09:45 +0200
commit8ce371f9846ef1e8b3cc8f6865766cb5c1f17e40 (patch)
treeae7406dc794c7cf642b9c37bf85408e3a38c5dca /kernel/module.c
parentfutex: Drop hb->lock before enqueueing on the rtmutex (diff)
downloadlinux-8ce371f9846ef1e8b3cc8f6865766cb5c1f17e40.tar.xz
linux-8ce371f9846ef1e8b3cc8f6865766cb5c1f17e40.zip
lockdep: Fix per-cpu static objects
Since commit 383776fa7527 ("locking/lockdep: Handle statically initialized PER_CPU locks properly") we try to collapse per-cpu locks into a single class by giving them all the same key. For this key we choose the canonical address of the per-cpu object, which would be the offset into the per-cpu area. This has two problems: - there is a case where we run !0 lock->key through static_obj() and expect this to pass; it doesn't for canonical pointers. - 0 is a valid canonical address. Cure both issues by redefining the canonical address as the address of the per-cpu variable on the boot CPU. Since I didn't want to rely on CPU0 being the boot-cpu, or even existing at all, track the boot CPU in a variable. Fixes: 383776fa7527 ("locking/lockdep: Handle statically initialized PER_CPU locks properly") Reported-by: kernel test robot <fengguang.wu@intel.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Tested-by: Borislav Petkov <bp@suse.de> Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Cc: linux-mm@kvack.org Cc: wfg@linux.intel.com Cc: kernel test robot <fengguang.wu@intel.com> Cc: LKP <lkp@01.org> Link: http://lkml.kernel.org/r/20170320114108.kbvcsuepem45j5cr@hirez.programming.kicks-ass.net Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/module.c')
-rw-r--r--kernel/module.c6
1 files changed, 5 insertions, 1 deletions
diff --git a/kernel/module.c b/kernel/module.c
index 5ef618133849..6d9988031c5b 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -682,8 +682,12 @@ bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr)
void *va = (void *)addr;
if (va >= start && va < start + mod->percpu_size) {
- if (can_addr)
+ if (can_addr) {
*can_addr = (unsigned long) (va - start);
+ *can_addr += (unsigned long)
+ per_cpu_ptr(mod->percpu,
+ get_boot_cpu_id());
+ }
preempt_enable();
return true;
}