summaryrefslogtreecommitdiffstats
path: root/kernel/rcu
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.ibm.com>2018-12-01 01:11:14 +0100
committerPaul E. McKenney <paulmck@linux.ibm.com>2019-01-26 00:29:53 +0100
commit37f62d7cf00c085e1d7a91a6af286c4e8d32e1e1 (patch)
tree9e16f2b9587d5566f976803263bed1b5374ed88b /kernel/rcu
parentrcu: Accommodate zero jiffies_till_first_fqs and kthread kicking (diff)
downloadlinux-37f62d7cf00c085e1d7a91a6af286c4e8d32e1e1.tar.xz
linux-37f62d7cf00c085e1d7a91a6af286c4e8d32e1e1.zip
rcu: Move rcu_cpu_kthread_task to rcu_data structure
Given that RCU has a perfectly good per-CPU rcu_data structure, most per-CPU quantities should be stored there. This commit therefore moves the rcu_cpu_kthread_task per-CPU variable to the rcu_data structure. This also makes this variable unconditionally present, which should be acceptable given the memory reduction due to the RCU flavor consolidation and also due to simplifications this will enable. Signed-off-by: Paul E. McKenney <paulmck@linux.ibm.com>
Diffstat (limited to 'kernel/rcu')
-rw-r--r--kernel/rcu/tree.h6
-rw-r--r--kernel/rcu/tree_plugin.h11
2 files changed, 10 insertions, 7 deletions
diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
index d90b02b53c0e..ef517ba25192 100644
--- a/kernel/rcu/tree.h
+++ b/kernel/rcu/tree.h
@@ -234,7 +234,11 @@ struct rcu_data {
/* Leader CPU takes GP-end wakeups. */
#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
- /* 6) Diagnostic data, including RCU CPU stall warnings. */
+ /* 6) RCU priority boosting. */
+ struct task_struct *rcu_cpu_kthread_task;
+ /* rcuc per-CPU kthread or NULL. */
+
+ /* 7) Diagnostic data, including RCU CPU stall warnings. */
unsigned int softirq_snap; /* Snapshot of softirq activity. */
/* ->rcu_iw* fields protected by leaf rcu_node ->lock. */
struct irq_work rcu_iw; /* Check for non-irq activity. */
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 1b3dd2fc0cd6..359bf1f6f8e0 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -40,7 +40,6 @@
/*
* Control variables for per-CPU and per-rcu_node kthreads.
*/
-static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task);
DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
DEFINE_PER_CPU(char, rcu_cpu_has_work);
@@ -1308,9 +1307,9 @@ static void invoke_rcu_callbacks_kthread(void)
local_irq_save(flags);
__this_cpu_write(rcu_cpu_has_work, 1);
- if (__this_cpu_read(rcu_cpu_kthread_task) != NULL &&
- current != __this_cpu_read(rcu_cpu_kthread_task)) {
- rcu_wake_cond(__this_cpu_read(rcu_cpu_kthread_task),
+ if (__this_cpu_read(rcu_data.rcu_cpu_kthread_task) != NULL &&
+ current != __this_cpu_read(rcu_data.rcu_cpu_kthread_task)) {
+ rcu_wake_cond(__this_cpu_read(rcu_data.rcu_cpu_kthread_task),
__this_cpu_read(rcu_cpu_kthread_status));
}
local_irq_restore(flags);
@@ -1322,7 +1321,7 @@ static void invoke_rcu_callbacks_kthread(void)
*/
static bool rcu_is_callbacks_kthread(void)
{
- return __this_cpu_read(rcu_cpu_kthread_task) == current;
+ return __this_cpu_read(rcu_data.rcu_cpu_kthread_task) == current;
}
#define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000)
@@ -1459,7 +1458,7 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
}
static struct smp_hotplug_thread rcu_cpu_thread_spec = {
- .store = &rcu_cpu_kthread_task,
+ .store = &rcu_data.rcu_cpu_kthread_task,
.thread_should_run = rcu_cpu_kthread_should_run,
.thread_fn = rcu_cpu_kthread,
.thread_comm = "rcuc/%u",