summaryrefslogtreecommitdiffstats
path: root/kernel/rcutree_plugin.h
diff options
context:
space:
mode:
authorPaul E. McKenney <paul.mckenney@linaro.org>2011-06-25 15:36:56 +0200
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2011-09-29 06:38:21 +0200
commitd4c08f2ac311a360230eef7e5395b0ec8d8f0670 (patch)
tree06e425b8153e076fbe43b037cf4497ac50afddb7 /kernel/rcutree_plugin.h
parentrcu: Make TINY_RCU also use softirq for RCU_BOOST=n (diff)
downloadlinux-d4c08f2ac311a360230eef7e5395b0ec8d8f0670.tar.xz
linux-d4c08f2ac311a360230eef7e5395b0ec8d8f0670.zip
rcu: Add grace-period, quiescent-state, and call_rcu trace events
Add trace events to record grace-period start and end, quiescent states, CPUs noticing grace-period start and end, grace-period initialization, call_rcu() invocation, tasks blocking in RCU read-side critical sections, tasks exiting those same critical sections, force_quiescent_state() detection of dyntick-idle and offline CPUs, CPUs entering and leaving dyntick-idle mode (except from NMIs), CPUs coming online and going offline, and CPUs being kicked for staying in dyntick-idle mode for too long (as in many weeks, even on 32-bit systems). Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> rcu: Add the rcu flavor to callback trace events The earlier trace events for registering RCU callbacks and for invoking them did not include the RCU flavor (rcu_bh, rcu_preempt, or rcu_sched). This commit adds the RCU flavor to those trace events. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcutree_plugin.h')
-rw-r--r--kernel/rcutree_plugin.h22
1 files changed, 19 insertions, 3 deletions
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index 94d9ca1e4061..bdb2e82f78d3 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -124,6 +124,8 @@ static void rcu_preempt_qs(int cpu)
rdp->passed_quiesc_completed = rdp->gpnum - 1;
barrier();
+ if (rdp->passed_quiesc == 0)
+ trace_rcu_grace_period("rcu_preempt", rdp->gpnum, "cpuqs");
rdp->passed_quiesc = 1;
current->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS;
}
@@ -190,6 +192,11 @@ static void rcu_preempt_note_context_switch(int cpu)
if (rnp->qsmask & rdp->grpmask)
rnp->gp_tasks = &t->rcu_node_entry;
}
+ trace_rcu_preempt_task(rdp->rsp->name,
+ t->pid,
+ (rnp->qsmask & rdp->grpmask)
+ ? rnp->gpnum
+ : rnp->gpnum + 1);
raw_spin_unlock_irqrestore(&rnp->lock, flags);
} else if (t->rcu_read_lock_nesting < 0 &&
t->rcu_read_unlock_special) {
@@ -344,6 +351,8 @@ static noinline void rcu_read_unlock_special(struct task_struct *t)
smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */
np = rcu_next_node_entry(t, rnp);
list_del_init(&t->rcu_node_entry);
+ trace_rcu_unlock_preempted_task("rcu_preempt",
+ rnp->gpnum, t->pid);
if (&t->rcu_node_entry == rnp->gp_tasks)
rnp->gp_tasks = np;
if (&t->rcu_node_entry == rnp->exp_tasks)
@@ -364,10 +373,17 @@ static noinline void rcu_read_unlock_special(struct task_struct *t)
* we aren't waiting on any CPUs, report the quiescent state.
* Note that rcu_report_unblock_qs_rnp() releases rnp->lock.
*/
- if (empty)
- raw_spin_unlock_irqrestore(&rnp->lock, flags);
- else
+ if (!empty && !rcu_preempt_blocked_readers_cgp(rnp)) {
+ trace_rcu_quiescent_state_report("preempt_rcu",
+ rnp->gpnum,
+ 0, rnp->qsmask,
+ rnp->level,
+ rnp->grplo,
+ rnp->grphi,
+ !!rnp->gp_tasks);
rcu_report_unblock_qs_rnp(rnp, flags);
+ } else
+ raw_spin_unlock_irqrestore(&rnp->lock, flags);
#ifdef CONFIG_RCU_BOOST
/* Unboost if we were boosted. */