summaryrefslogtreecommitdiffstats
path: root/kernel/time
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2013-04-21 11:05:47 +0200
committerIngo Molnar <mingo@kernel.org>2013-04-21 11:05:47 +0200
commita166fcf04d848ffa09f0e831805553089f190cf4 (patch)
tree1fc97c397238692375f1ebf7a39746188f6424db /kernel/time
parentMerge branch 'timers/nohz-reviewed' of git://git.kernel.org/pub/scm/linux/ker... (diff)
parentposix_timers: New API to prevent from stopping the tick when timers are running (diff)
downloadlinux-a166fcf04d848ffa09f0e831805553089f190cf4.tar.xz
linux-a166fcf04d848ffa09f0e831805553089f190cf4.zip
Merge branch 'timers/nohz-posix-timers-v2' of git://git.kernel.org/pub/scm/linux/kernel/git/frederic/linux-dynticks into timers/nohz
Pull posix cpu timers handling on full dynticks from Frederic Weisbecker. Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/time')
-rw-r--r--kernel/time/Kconfig1
-rw-r--r--kernel/time/tick-sched.c51
2 files changed, 52 insertions, 0 deletions
diff --git a/kernel/time/Kconfig b/kernel/time/Kconfig
index 99c3f13dd478..f6a792ab4983 100644
--- a/kernel/time/Kconfig
+++ b/kernel/time/Kconfig
@@ -111,6 +111,7 @@ config NO_HZ_FULL
select RCU_USER_QS
select RCU_NOCB_CPU
select CONTEXT_TRACKING_FORCE
+ select IRQ_WORK
help
Adaptively try to shutdown the tick whenever possible, even when
the CPU is running tasks. Typically this requires running a single
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index a76e09044f9f..884a9f302a06 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -147,6 +147,57 @@ static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs)
static cpumask_var_t nohz_full_mask;
bool have_nohz_full_mask;
+/*
+ * Re-evaluate the need for the tick on the current CPU
+ * and restart it if necessary.
+ */
+static void tick_nohz_full_check(void)
+{
+ /*
+ * STUB for now, will be filled with the full tick stop/restart
+ * infrastructure patches
+ */
+}
+
+static void nohz_full_kick_work_func(struct irq_work *work)
+{
+ tick_nohz_full_check();
+}
+
+static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = {
+ .func = nohz_full_kick_work_func,
+};
+
+/*
+ * Kick the current CPU if it's full dynticks in order to force it to
+ * re-evaluate its dependency on the tick and restart it if necessary.
+ */
+void tick_nohz_full_kick(void)
+{
+ if (tick_nohz_full_cpu(smp_processor_id()))
+ irq_work_queue(&__get_cpu_var(nohz_full_kick_work));
+}
+
+static void nohz_full_kick_ipi(void *info)
+{
+ tick_nohz_full_check();
+}
+
+/*
+ * Kick all full dynticks CPUs in order to force these to re-evaluate
+ * their dependency on the tick and restart it if necessary.
+ */
+void tick_nohz_full_kick_all(void)
+{
+ if (!have_nohz_full_mask)
+ return;
+
+ preempt_disable();
+ smp_call_function_many(nohz_full_mask,
+ nohz_full_kick_ipi, NULL, false);
+ preempt_enable();
+}
+
int tick_nohz_full_cpu(int cpu)
{
if (!have_nohz_full_mask)