summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2013-06-05 16:44:47 +0200
committerKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2013-06-10 14:43:33 +0200
commit354e7b761992a8e3923badaf705c4acbb9d5659d (patch)
treeea12205a673d6f1c969c864da651e49db4ba87cf /arch
parentxen/smp: Don't leak interrupt name when offlining. (diff)
downloadlinux-354e7b761992a8e3923badaf705c4acbb9d5659d.tar.xz
linux-354e7b761992a8e3923badaf705c4acbb9d5659d.zip
xen/spinlock: Don't leak interrupt name when offlining.
When the user does: echo 0 > /sys/devices/system/cpu/cpu1/online echo 1 > /sys/devices/system/cpu/cpu1/online kmemleak reports: kmemleak: 7 new suspected memory leaks (see /sys/kernel/debug/kmemleak) unreferenced object 0xffff88003fa51260 (size 32): comm "swapper/0", pid 1, jiffies 4294667339 (age 1027.789s) hex dump (first 32 bytes): 73 70 69 6e 6c 6f 63 6b 31 00 00 00 00 00 00 00 spinlock1....... 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................ backtrace: [<ffffffff81660721>] kmemleak_alloc+0x21/0x50 [<ffffffff81190aac>] __kmalloc_track_caller+0xec/0x2a0 [<ffffffff812fe1bb>] kvasprintf+0x5b/0x90 [<ffffffff812fe228>] kasprintf+0x38/0x40 [<ffffffff81663789>] xen_init_lock_cpu+0x61/0xbe [<ffffffff816633a6>] xen_cpu_up+0x66/0x3e8 [<ffffffff8166bbf5>] _cpu_up+0xd1/0x14b [<ffffffff8166bd48>] cpu_up+0xd9/0xec [<ffffffff81ae6e4a>] smp_init+0x4b/0xa3 [<ffffffff81ac4981>] kernel_init_freeable+0xdb/0x1e6 [<ffffffff8165ce39>] kernel_init+0x9/0xf0 [<ffffffff8167edfc>] ret_from_fork+0x7c/0xb0 [<ffffffffffffffff>] 0xffffffffffffffff Instead of doing it like the "xen/smp: Don't leak interrupt name when offlining" patch did (which has a per-cpu structure which contains both the IRQ number and char*) we use a per-cpu pointers to a *char. The reason is that the "__this_cpu_read(lock_kicker_irq);" macro blows up with "__bad_size_call_parameter()" as the size of the returned structure is not within the parameters of what it expects and optimizes for. Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/xen/spinlock.c7
1 files changed, 6 insertions, 1 deletions
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index 3002ec1bb71a..a40f8508e760 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -7,6 +7,7 @@
#include <linux/debugfs.h>
#include <linux/log2.h>
#include <linux/gfp.h>
+#include <linux/slab.h>
#include <asm/paravirt.h>
@@ -165,6 +166,7 @@ static int xen_spin_trylock(struct arch_spinlock *lock)
return old == 0;
}
+static DEFINE_PER_CPU(char *, irq_name);
static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
static DEFINE_PER_CPU(struct xen_spinlock *, lock_spinners);
@@ -362,7 +364,7 @@ static irqreturn_t dummy_handler(int irq, void *dev_id)
void __cpuinit xen_init_lock_cpu(int cpu)
{
int irq;
- const char *name;
+ char *name;
WARN(per_cpu(lock_kicker_irq, cpu) >= 0, "spinlock on CPU%d exists on IRQ%d!\n",
cpu, per_cpu(lock_kicker_irq, cpu));
@@ -385,6 +387,7 @@ void __cpuinit xen_init_lock_cpu(int cpu)
if (irq >= 0) {
disable_irq(irq); /* make sure it's never delivered */
per_cpu(lock_kicker_irq, cpu) = irq;
+ per_cpu(irq_name, cpu) = name;
}
printk("cpu %d spinlock event irq %d\n", cpu, irq);
@@ -401,6 +404,8 @@ void xen_uninit_lock_cpu(int cpu)
unbind_from_irqhandler(per_cpu(lock_kicker_irq, cpu), NULL);
per_cpu(lock_kicker_irq, cpu) = -1;
+ kfree(per_cpu(irq_name, cpu));
+ per_cpu(irq_name, cpu) = NULL;
}
void __init xen_init_spinlocks(void)