diff options
author | Paul E. McKenney <paulmck@kernel.org> | 2020-03-22 21:18:54 +0100 |
---|---|---|
committer | Paul E. McKenney <paulmck@kernel.org> | 2020-04-27 20:03:52 +0200 |
commit | 9796e1ae7386ecf66eb234f7db7753845ebb2139 (patch) | |
tree | 1be80a53156ebed254add11ed03145ce4f984fea /kernel/rcu | |
parent | rcu-tasks: Handle the running-offline idle-task special case (diff) | |
download | linux-9796e1ae7386ecf66eb234f7db7753845ebb2139.tar.xz linux-9796e1ae7386ecf66eb234f7db7753845ebb2139.zip |
rcu-tasks: Make RCU tasks trace also wait for idle tasks
This commit scans the CPUs, adding each CPU's idle task to the list of
tasks that need quiescent states.
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
Diffstat (limited to 'kernel/rcu')
-rw-r--r-- | kernel/rcu/tasks.h | 18 |
1 files changed, 13 insertions, 5 deletions
diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h index e3a42d8f9eeb..f272e8f16b81 100644 --- a/kernel/rcu/tasks.h +++ b/kernel/rcu/tasks.h @@ -15,7 +15,7 @@ struct rcu_tasks; typedef void (*rcu_tasks_gp_func_t)(struct rcu_tasks *rtp); typedef void (*pregp_func_t)(void); typedef void (*pertask_func_t)(struct task_struct *t, struct list_head *hop); -typedef void (*postscan_func_t)(void); +typedef void (*postscan_func_t)(struct list_head *hop); typedef void (*holdouts_func_t)(struct list_head *hop, bool ndrpt, bool *frptp); typedef void (*postgp_func_t)(struct rcu_tasks *rtp); @@ -331,7 +331,7 @@ static void rcu_tasks_wait_gp(struct rcu_tasks *rtp) rcu_read_unlock(); set_tasks_gp_state(rtp, RTGS_POST_SCAN_TASKLIST); - rtp->postscan_func(); + rtp->postscan_func(&holdouts); /* * Each pass through the following loop scans the list of holdout @@ -415,7 +415,7 @@ static void rcu_tasks_pertask(struct task_struct *t, struct list_head *hop) } /* Processing between scanning taskslist and draining the holdout list. */ -void rcu_tasks_postscan(void) +void rcu_tasks_postscan(struct list_head *hop) { /* * Wait for tasks that are in the process of exiting. This @@ -936,9 +936,17 @@ static void rcu_tasks_trace_pertask(struct task_struct *t, trc_wait_for_one_reader(t, hop); } -/* Do intermediate processing between task and holdout scans. */ -static void rcu_tasks_trace_postscan(void) +/* + * Do intermediate processing between task and holdout scans and + * pick up the idle tasks. + */ +static void rcu_tasks_trace_postscan(struct list_head *hop) { + int cpu; + + for_each_possible_cpu(cpu) + rcu_tasks_trace_pertask(idle_task(cpu), hop); + // Re-enable CPU hotplug now that the tasklist scan has completed. cpus_read_unlock(); |