diff options
author | David S. Miller <davem@davemloft.net> | 2009-09-09 08:16:06 +0200 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2009-09-09 08:16:06 +0200 |
commit | a8f22264550e64c0cd11fb6647284b0bd6407f9c (patch) | |
tree | e42ef7f04063bef6114c6488c70eff7089b5ea26 | |
parent | sparc: add basic support for 'perf' (diff) | |
download | linux-a8f22264550e64c0cd11fb6647284b0bd6407f9c.tar.xz linux-a8f22264550e64c0cd11fb6647284b0bd6407f9c.zip |
sparc64: Manage NMI watchdog enabling like x86.
Use a per-cpu 'wd_enabled' boolean and a global atomic_t count
of watchdog NMI enabled cpus which is set to '-1' if something
is wrong with the watchdog and it can't be used.
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | arch/sparc/include/asm/nmi.h | 2 | ||||
-rw-r--r-- | arch/sparc/kernel/nmi.c | 60 | ||||
-rw-r--r-- | arch/sparc/oprofile/init.c | 2 |
3 files changed, 45 insertions, 19 deletions
diff --git a/arch/sparc/include/asm/nmi.h b/arch/sparc/include/asm/nmi.h index fbd546dd4feb..c7d11e435df9 100644 --- a/arch/sparc/include/asm/nmi.h +++ b/arch/sparc/include/asm/nmi.h @@ -5,6 +5,6 @@ extern int __init nmi_init(void); extern void perfctr_irq(int irq, struct pt_regs *regs); extern void nmi_adjust_hz(unsigned int new_hz); -extern int nmi_usable; +extern atomic_t nmi_active; #endif /* __NMI_H */ diff --git a/arch/sparc/kernel/nmi.c b/arch/sparc/kernel/nmi.c index 2c0cc72d295b..d1614e8384ae 100644 --- a/arch/sparc/kernel/nmi.c +++ b/arch/sparc/kernel/nmi.c @@ -34,10 +34,17 @@ static int nmi_watchdog_active; static int panic_on_timeout; -int nmi_usable; -EXPORT_SYMBOL_GPL(nmi_usable); +/* nmi_active: + * >0: the NMI watchdog is active, but can be disabled + * <0: the NMI watchdog has not been set up, and cannot be enabled + * 0: the NMI watchdog is disabled, but can be enabled + */ +atomic_t nmi_active = ATOMIC_INIT(0); /* oprofile uses this */ +EXPORT_SYMBOL(nmi_active); static unsigned int nmi_hz = HZ; +static DEFINE_PER_CPU(short, wd_enabled); +static int endflag __initdata; static DEFINE_PER_CPU(unsigned int, last_irq_sum); static DEFINE_PER_CPU(local_t, alert_counter); @@ -110,7 +117,7 @@ notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs) __get_cpu_var(last_irq_sum) = sum; local_set(&__get_cpu_var(alert_counter), 0); } - if (nmi_usable) { + if (__get_cpu_var(wd_enabled)) { write_pic(picl_value(nmi_hz)); pcr_ops->write(pcr_enable); } @@ -121,8 +128,6 @@ static inline unsigned int get_nmi_count(int cpu) return cpu_data(cpu).__nmi_count; } -static int endflag __initdata; - static __init void nmi_cpu_busy(void *data) { local_irq_enable_in_hardirq(); @@ -143,12 +148,15 @@ static void report_broken_nmi(int cpu, int *prev_nmi_count) printk(KERN_WARNING "and attach the output of the 'dmesg' command.\n"); - nmi_usable = 0; + per_cpu(wd_enabled, cpu) = 0; + atomic_dec(&nmi_active); } -static void stop_watchdog(void *unused) +static void stop_nmi_watchdog(void *unused) { pcr_ops->write(PCR_PIC_PRIV); + __get_cpu_var(wd_enabled) = 0; + atomic_dec(&nmi_active); } static int __init check_nmi_watchdog(void) @@ -156,6 +164,9 @@ static int __init check_nmi_watchdog(void) unsigned int *prev_nmi_count; int cpu, err; + if (!atomic_read(&nmi_active)) + return 0; + prev_nmi_count = kmalloc(nr_cpu_ids * sizeof(unsigned int), GFP_KERNEL); if (!prev_nmi_count) { err = -ENOMEM; @@ -172,12 +183,15 @@ static int __init check_nmi_watchdog(void) mdelay((20 * 1000) / nmi_hz); /* wait 20 ticks */ for_each_online_cpu(cpu) { + if (!per_cpu(wd_enabled, cpu)) + continue; if (get_nmi_count(cpu) - prev_nmi_count[cpu] <= 5) report_broken_nmi(cpu, prev_nmi_count); } endflag = 1; - if (!nmi_usable) { + if (!atomic_read(&nmi_active)) { kfree(prev_nmi_count); + atomic_set(&nmi_active, -1); err = -ENODEV; goto error; } @@ -188,12 +202,26 @@ static int __init check_nmi_watchdog(void) kfree(prev_nmi_count); return 0; error: - on_each_cpu(stop_watchdog, NULL, 1); + on_each_cpu(stop_nmi_watchdog, NULL, 1); return err; } -static void start_watchdog(void *unused) +static void start_nmi_watchdog(void *unused) +{ + __get_cpu_var(wd_enabled) = 1; + atomic_inc(&nmi_active); + + pcr_ops->write(PCR_PIC_PRIV); + write_pic(picl_value(nmi_hz)); + + pcr_ops->write(pcr_enable); +} + +static void nmi_adjust_hz_one(void *unused) { + if (!__get_cpu_var(wd_enabled)) + return; + pcr_ops->write(PCR_PIC_PRIV); write_pic(picl_value(nmi_hz)); @@ -203,13 +231,13 @@ static void start_watchdog(void *unused) void nmi_adjust_hz(unsigned int new_hz) { nmi_hz = new_hz; - on_each_cpu(start_watchdog, NULL, 1); + on_each_cpu(nmi_adjust_hz_one, NULL, 1); } EXPORT_SYMBOL_GPL(nmi_adjust_hz); static int nmi_shutdown(struct notifier_block *nb, unsigned long cmd, void *p) { - on_each_cpu(stop_watchdog, NULL, 1); + on_each_cpu(stop_nmi_watchdog, NULL, 1); return 0; } @@ -221,16 +249,14 @@ int __init nmi_init(void) { int err; - nmi_usable = 1; - - on_each_cpu(start_watchdog, NULL, 1); + on_each_cpu(start_nmi_watchdog, NULL, 1); err = check_nmi_watchdog(); if (!err) { err = register_reboot_notifier(&nmi_reboot_notifier); if (err) { - nmi_usable = 0; - on_each_cpu(stop_watchdog, NULL, 1); + on_each_cpu(stop_nmi_watchdog, NULL, 1); + atomic_set(&nmi_active, -1); } } return err; diff --git a/arch/sparc/oprofile/init.c b/arch/sparc/oprofile/init.c index d172f86439b1..9ce34fd294c9 100644 --- a/arch/sparc/oprofile/init.c +++ b/arch/sparc/oprofile/init.c @@ -57,7 +57,7 @@ static void timer_stop(void) static int op_nmi_timer_init(struct oprofile_operations *ops) { - if (!nmi_usable) + if (atomic_read(&nmi_active) <= 0) return -ENODEV; ops->start = timer_start; |