summaryrefslogtreecommitdiffstats
path: root/kernel/watchdog.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/watchdog.c')
-rw-r--r--kernel/watchdog.c51
1 files changed, 29 insertions, 22 deletions
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index d117262deba3..df30ee08bdd4 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -3,15 +3,14 @@
*
* started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
*
- * this code detects hard lockups: incidents in where on a CPU
- * the kernel does not respond to anything except NMI.
- *
- * Note: Most of this code is borrowed heavily from softlockup.c,
- * so thanks to Ingo for the initial implementation.
- * Some chunks also taken from arch/x86/kernel/apic/nmi.c, thanks
+ * Note: Most of this code is borrowed heavily from the original softlockup
+ * detector, so thanks to Ingo for the initial implementation.
+ * Some chunks also taken from the old x86-specific nmi watchdog code, thanks
* to those contributors as well.
*/
+#define pr_fmt(fmt) "NMI watchdog: " fmt
+
#include <linux/mm.h>
#include <linux/cpu.h>
#include <linux/nmi.h>
@@ -117,9 +116,10 @@ static unsigned long get_sample_period(void)
{
/*
* convert watchdog_thresh from seconds to ns
- * the divide by 5 is to give hrtimer 5 chances to
- * increment before the hardlockup detector generates
- * a warning
+ * the divide by 5 is to give hrtimer several chances (two
+ * or three with the current relation between the soft
+ * and hard thresholds) to increment before the
+ * hardlockup detector generates a warning
*/
return get_softlockup_thresh() * (NSEC_PER_SEC / 5);
}
@@ -321,11 +321,9 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
*/
static int watchdog(void *unused)
{
- struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
+ struct sched_param param = { .sched_priority = 0 };
struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer);
- sched_setscheduler(current, SCHED_FIFO, &param);
-
/* initialize timestamp */
__touch_watchdog();
@@ -336,9 +334,11 @@ static int watchdog(void *unused)
set_current_state(TASK_INTERRUPTIBLE);
/*
- * Run briefly once per second to reset the softlockup timestamp.
- * If this gets delayed for more than 60 seconds then the
- * debug-printout triggers in watchdog_timer_fn().
+ * Run briefly (kicked by the hrtimer callback function) once every
+ * get_sample_period() seconds (4 seconds by default) to reset the
+ * softlockup timestamp. If this gets delayed for more than
+ * 2*watchdog_thresh seconds then the debug-printout triggers in
+ * watchdog_timer_fn().
*/
while (!kthread_should_stop()) {
__touch_watchdog();
@@ -349,8 +349,11 @@ static int watchdog(void *unused)
set_current_state(TASK_INTERRUPTIBLE);
}
+ /*
+ * Drop the policy/priority elevation during thread exit to avoid a
+ * scheduling latency spike.
+ */
__set_current_state(TASK_RUNNING);
- param.sched_priority = 0;
sched_setscheduler(current, SCHED_NORMAL, &param);
return 0;
}
@@ -376,18 +379,20 @@ static int watchdog_nmi_enable(int cpu)
/* Try to register using hardware perf events */
event = perf_event_create_kernel_counter(wd_attr, cpu, NULL, watchdog_overflow_callback, NULL);
if (!IS_ERR(event)) {
- printk(KERN_INFO "NMI watchdog enabled, takes one hw-pmu counter.\n");
+ pr_info("enabled, takes one hw-pmu counter.\n");
goto out_save;
}
/* vary the KERN level based on the returned errno */
if (PTR_ERR(event) == -EOPNOTSUPP)
- printk(KERN_INFO "NMI watchdog disabled (cpu%i): not supported (no LAPIC?)\n", cpu);
+ pr_info("disabled (cpu%i): not supported (no LAPIC?)\n", cpu);
else if (PTR_ERR(event) == -ENOENT)
- printk(KERN_WARNING "NMI watchdog disabled (cpu%i): hardware events not enabled\n", cpu);
+ pr_warning("disabled (cpu%i): hardware events not enabled\n",
+ cpu);
else
- printk(KERN_ERR "NMI watchdog disabled (cpu%i): unable to create perf event: %ld\n", cpu, PTR_ERR(event));
+ pr_err("disabled (cpu%i): unable to create perf event: %ld\n",
+ cpu, PTR_ERR(event));
return PTR_ERR(event);
/* success path */
@@ -439,9 +444,10 @@ static int watchdog_enable(int cpu)
/* create the watchdog thread */
if (!p) {
+ struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
p = kthread_create_on_node(watchdog, NULL, cpu_to_node(cpu), "watchdog/%d", cpu);
if (IS_ERR(p)) {
- printk(KERN_ERR "softlockup watchdog for %i failed\n", cpu);
+ pr_err("softlockup watchdog for %i failed\n", cpu);
if (!err) {
/* if hardlockup hasn't already set this */
err = PTR_ERR(p);
@@ -450,6 +456,7 @@ static int watchdog_enable(int cpu)
}
goto out;
}
+ sched_setscheduler(p, SCHED_FIFO, &param);
kthread_bind(p, cpu);
per_cpu(watchdog_touch_ts, cpu) = 0;
per_cpu(softlockup_watchdog, cpu) = p;
@@ -496,7 +503,7 @@ static void watchdog_enable_all_cpus(void)
watchdog_enabled = 1;
if (!watchdog_enabled)
- printk(KERN_ERR "watchdog: failed to be enabled on some cpus\n");
+ pr_err("failed to be enabled on some cpus\n");
}