summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2012-02-27 10:47:00 +0100
committerIngo Molnar <mingo@elte.hu>2012-03-12 20:43:16 +0100
commit3ccf3e8306156a28213adc720aba807e9a901ad5 (patch)
tree5b9db344b702299ea7eb53fbff3d0d74707d40ec /kernel
parentsched/nohz: Correctly initialize 'next_balance' in 'nohz' idle balancer (diff)
downloadlinux-3ccf3e8306156a28213adc720aba807e9a901ad5.tar.xz
linux-3ccf3e8306156a28213adc720aba807e9a901ad5.zip
printk/sched: Introduce special printk_sched() for those awkward moments
There's a few awkward printk()s inside of scheduler guts that people prefer to keep but really are rather deadlock prone. Fudge around it by storing the text in a per-cpu buffer and poll it using the existing printk_tick() handler. This will drop output when its more frequent than once a tick, however only the affinity thing could possible go that fast and for that just one should suffice to notify the admin he's done something silly.. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Andrew Morton <akpm@linux-foundation.org> Link: http://lkml.kernel.org/n/tip-wua3lmkt3dg8nfts66o6brne@git.kernel.org Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/printk.c40
-rw-r--r--kernel/sched/core.c2
-rw-r--r--kernel/sched/rt.c8
3 files changed, 45 insertions, 5 deletions
diff --git a/kernel/printk.c b/kernel/printk.c
index 13c0a1143f49..7ca7ba591e21 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -1208,13 +1208,47 @@ int is_console_locked(void)
return console_locked;
}
+/*
+ * Delayed printk facility, for scheduler-internal messages:
+ */
+#define PRINTK_BUF_SIZE 512
+
+#define PRINTK_PENDING_WAKEUP 0x01
+#define PRINTK_PENDING_SCHED 0x02
+
static DEFINE_PER_CPU(int, printk_pending);
+static DEFINE_PER_CPU(char [PRINTK_BUF_SIZE], printk_sched_buf);
+
+int printk_sched(const char *fmt, ...)
+{
+ unsigned long flags;
+ va_list args;
+ char *buf;
+ int r;
+
+ local_irq_save(flags);
+ buf = __get_cpu_var(printk_sched_buf);
+
+ va_start(args, fmt);
+ r = vsnprintf(buf, PRINTK_BUF_SIZE, fmt, args);
+ va_end(args);
+
+ __this_cpu_or(printk_pending, PRINTK_PENDING_SCHED);
+ local_irq_restore(flags);
+
+ return r;
+}
void printk_tick(void)
{
if (__this_cpu_read(printk_pending)) {
- __this_cpu_write(printk_pending, 0);
- wake_up_interruptible(&log_wait);
+ int pending = __this_cpu_xchg(printk_pending, 0);
+ if (pending & PRINTK_PENDING_SCHED) {
+ char *buf = __get_cpu_var(printk_sched_buf);
+ printk(KERN_WARNING "[sched_delayed] %s", buf);
+ }
+ if (pending & PRINTK_PENDING_WAKEUP)
+ wake_up_interruptible(&log_wait);
}
}
@@ -1228,7 +1262,7 @@ int printk_needs_cpu(int cpu)
void wake_up_klogd(void)
{
if (waitqueue_active(&log_wait))
- this_cpu_write(printk_pending, 1);
+ this_cpu_or(printk_pending, PRINTK_PENDING_WAKEUP);
}
/**
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index b1ccce819ce2..8781cec7c3e6 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1284,7 +1284,7 @@ static int select_fallback_rq(int cpu, struct task_struct *p)
* leave kernel.
*/
if (p->mm && printk_ratelimit()) {
- printk(KERN_INFO "process %d (%s) no longer affine to cpu%d\n",
+ printk_sched("process %d (%s) no longer affine to cpu%d\n",
task_pid_nr(p), p->comm, cpu);
}
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 7f7e7cdcb472..b60dad720173 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -864,8 +864,14 @@ static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
* but accrue some time due to boosting.
*/
if (likely(rt_b->rt_runtime)) {
+ static bool once = false;
+
rt_rq->rt_throttled = 1;
- printk_once(KERN_WARNING "sched: RT throttling activated\n");
+
+ if (!once) {
+ once = true;
+ printk_sched("sched: RT throttling activated\n");
+ }
} else {
/*
* In case we did anyway, make it go away,