summaryrefslogtreecommitdiffstats
path: root/kernel/rcutree.c
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2012-05-30 12:21:48 +0200
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2012-07-02 21:34:24 +0200
commit29154c57e35a191c83b19c61b1935c9f21957662 (patch)
treee761fdd59279de1e8c45bb42b40361c8bc4ff04d /kernel/rcutree.c
parentrcu: Prevent __call_rcu() from invoking RCU core on offline CPUs (diff)
downloadlinux-29154c57e35a191c83b19c61b1935c9f21957662.tar.xz
linux-29154c57e35a191c83b19c61b1935c9f21957662.zip
rcu: Split RCU core processing out of __call_rcu()
The __call_rcu() function is a bit overweight, so this commit splits it into actual enqueuing of and accounting for the callback (__call_rcu()) and associated RCU-core processing (__call_rcu_core()). Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Reviewed-by: Josh Triplett <josh@joshtriplett.org>
Diffstat (limited to '')
-rw-r--r--kernel/rcutree.c90
1 files changed, 49 insertions, 41 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index ceaa95923a87..70c4da7d2a97 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -1861,45 +1861,12 @@ static void invoke_rcu_core(void)
raise_softirq(RCU_SOFTIRQ);
}
-static void
-__call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
- struct rcu_state *rsp, bool lazy)
+/*
+ * Handle any core-RCU processing required by a call_rcu() invocation.
+ */
+static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp,
+ struct rcu_head *head, unsigned long flags)
{
- unsigned long flags;
- struct rcu_data *rdp;
-
- WARN_ON_ONCE((unsigned long)head & 0x3); /* Misaligned rcu_head! */
- debug_rcu_head_queue(head);
- head->func = func;
- head->next = NULL;
-
- smp_mb(); /* Ensure RCU update seen before callback registry. */
-
- /*
- * Opportunistically note grace-period endings and beginnings.
- * Note that we might see a beginning right after we see an
- * end, but never vice versa, since this CPU has to pass through
- * a quiescent state betweentimes.
- */
- local_irq_save(flags);
- rdp = this_cpu_ptr(rsp->rda);
-
- /* Add the callback to our list. */
- ACCESS_ONCE(rdp->qlen)++;
- if (lazy)
- rdp->qlen_lazy++;
- else
- rcu_idle_count_callbacks_posted();
- smp_mb(); /* Count before adding callback for rcu_barrier(). */
- *rdp->nxttail[RCU_NEXT_TAIL] = head;
- rdp->nxttail[RCU_NEXT_TAIL] = &head->next;
-
- if (__is_kfree_rcu_offset((unsigned long)func))
- trace_rcu_kfree_callback(rsp->name, head, (unsigned long)func,
- rdp->qlen_lazy, rdp->qlen);
- else
- trace_rcu_callback(rsp->name, head, rdp->qlen_lazy, rdp->qlen);
-
/*
* If called from an extended quiescent state, invoke the RCU
* core in order to force a re-evaluation of RCU's idleness.
@@ -1908,10 +1875,8 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
invoke_rcu_core();
/* If interrupts were disabled or CPU offline, don't invoke RCU core. */
- if (irqs_disabled_flags(flags) || cpu_is_offline(smp_processor_id())) {
- local_irq_restore(flags);
+ if (irqs_disabled_flags(flags) || cpu_is_offline(smp_processor_id()))
return;
- }
/*
* Force the grace period if too many callbacks or too long waiting.
@@ -1944,6 +1909,49 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
}
} else if (ULONG_CMP_LT(ACCESS_ONCE(rsp->jiffies_force_qs), jiffies))
force_quiescent_state(rsp, 1);
+}
+
+static void
+__call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
+ struct rcu_state *rsp, bool lazy)
+{
+ unsigned long flags;
+ struct rcu_data *rdp;
+
+ WARN_ON_ONCE((unsigned long)head & 0x3); /* Misaligned rcu_head! */
+ debug_rcu_head_queue(head);
+ head->func = func;
+ head->next = NULL;
+
+ smp_mb(); /* Ensure RCU update seen before callback registry. */
+
+ /*
+ * Opportunistically note grace-period endings and beginnings.
+ * Note that we might see a beginning right after we see an
+ * end, but never vice versa, since this CPU has to pass through
+ * a quiescent state betweentimes.
+ */
+ local_irq_save(flags);
+ rdp = this_cpu_ptr(rsp->rda);
+
+ /* Add the callback to our list. */
+ ACCESS_ONCE(rdp->qlen)++;
+ if (lazy)
+ rdp->qlen_lazy++;
+ else
+ rcu_idle_count_callbacks_posted();
+ smp_mb(); /* Count before adding callback for rcu_barrier(). */
+ *rdp->nxttail[RCU_NEXT_TAIL] = head;
+ rdp->nxttail[RCU_NEXT_TAIL] = &head->next;
+
+ if (__is_kfree_rcu_offset((unsigned long)func))
+ trace_rcu_kfree_callback(rsp->name, head, (unsigned long)func,
+ rdp->qlen_lazy, rdp->qlen);
+ else
+ trace_rcu_callback(rsp->name, head, rdp->qlen_lazy, rdp->qlen);
+
+ /* Go handle any RCU core processing required. */
+ __call_rcu_core(rsp, rdp, head, flags);
local_irq_restore(flags);
}