summaryrefslogtreecommitdiffstats
path: root/kernel/rcutree_plugin.h
diff options
context:
space:
mode:
authorPaul E. McKenney <paul.mckenney@linaro.org>2012-02-14 19:12:54 +0100
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2012-02-21 18:42:11 +0100
commitc3ce910b1456a45fa88959af3735bd6b285e54af (patch)
treed0e74991e637dddbeb3a8ab70ecd983325133893 /kernel/rcutree_plugin.h
parentrcu: Add RCU_NONIDLE() for idle-loop RCU read-side critical sections (diff)
downloadlinux-c3ce910b1456a45fa88959af3735bd6b285e54af.tar.xz
linux-c3ce910b1456a45fa88959af3735bd6b285e54af.zip
rcu: Eliminate softirq-mediated RCU_FAST_NO_HZ idle-entry loop
If a softirq is pending, the current CPU has RCU callbacks pending, and RCU does not immediately need anything from this CPU, then the current code resets the RCU_FAST_NO_HZ state machine. This means that upon exit from the subsequent softirq handler, RCU_FAST_NO_HZ will try really hard to force RCU into dyntick-idle mode. And if the same conditions hold after a few tries (determined by RCU_IDLE_OPT_FLUSHES), the same situation can repeat, possibly endlessly. This scenario is not particularly good for battery lifetime. This commit therefore suppresses the early exit from the RCU_FAST_NO_HZ state machine in the case where there is a softirq pending. This change forces the state machine to retain its memory, and to enter holdoff if this condition persists. Reported-by: "Abou Gazala, Neven M" <neven.m.abou.gazala@intel.com> Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcutree_plugin.h')
-rw-r--r--kernel/rcutree_plugin.h3
1 files changed, 2 insertions, 1 deletions
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index f7ceadf4986e..392a65136a72 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -2133,7 +2133,8 @@ static void rcu_prepare_for_idle(int cpu)
/* First time through, initialize the counter. */
per_cpu(rcu_dyntick_drain, cpu) = RCU_IDLE_FLUSHES;
} else if (per_cpu(rcu_dyntick_drain, cpu) <= RCU_IDLE_OPT_FLUSHES &&
- !rcu_pending(cpu)) {
+ !rcu_pending(cpu) &&
+ !local_softirq_pending()) {
/* Can we go dyntick-idle despite still having callbacks? */
trace_rcu_prep_idle("Dyntick with callbacks");
per_cpu(rcu_dyntick_drain, cpu) = 0;