summaryrefslogtreecommitdiffstats
path: root/kernel/rcu/tasks.h
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/rcu/tasks.h')
-rw-r--r--kernel/rcu/tasks.h53
1 files changed, 41 insertions, 12 deletions
diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h
index 05d3e1375e4c..d5d9f2d03e8a 100644
--- a/kernel/rcu/tasks.h
+++ b/kernel/rcu/tasks.h
@@ -28,6 +28,8 @@ typedef void (*postgp_func_t)(struct rcu_tasks *rtp);
* @kthread_ptr: This flavor's grace-period/callback-invocation kthread.
* @gp_func: This flavor's grace-period-wait function.
* @gp_state: Grace period's most recent state transition (debugging).
+ * @gp_sleep: Per-grace-period sleep to prevent CPU-bound looping.
+ * @init_fract: Initial backoff sleep interval.
* @gp_jiffies: Time of last @gp_state transition.
* @gp_start: Most recent grace-period start in jiffies.
* @n_gps: Number of grace periods completed since boot.
@@ -48,6 +50,8 @@ struct rcu_tasks {
struct wait_queue_head cbs_wq;
raw_spinlock_t cbs_lock;
int gp_state;
+ int gp_sleep;
+ int init_fract;
unsigned long gp_jiffies;
unsigned long gp_start;
unsigned long n_gps;
@@ -81,7 +85,7 @@ static struct rcu_tasks rt_name = \
DEFINE_STATIC_SRCU(tasks_rcu_exit_srcu);
/* Avoid IPIing CPUs early in the grace period. */
-#define RCU_TASK_IPI_DELAY (HZ / 2)
+#define RCU_TASK_IPI_DELAY (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) ? HZ / 2 : 0)
static int rcu_task_ipi_delay __read_mostly = RCU_TASK_IPI_DELAY;
module_param(rcu_task_ipi_delay, int, 0644);
@@ -231,7 +235,7 @@ static int __noreturn rcu_tasks_kthread(void *arg)
cond_resched();
}
/* Paranoid sleep to keep this from entering a tight loop */
- schedule_timeout_idle(HZ/10);
+ schedule_timeout_idle(rtp->gp_sleep);
set_tasks_gp_state(rtp, RTGS_WAIT_CBS);
}
@@ -329,8 +333,10 @@ static void rcu_tasks_wait_gp(struct rcu_tasks *rtp)
*/
lastreport = jiffies;
- /* Start off with HZ/10 wait and slowly back off to 1 HZ wait. */
- fract = 10;
+ // Start off with initial wait and slowly back off to 1 HZ wait.
+ fract = rtp->init_fract;
+ if (fract > HZ)
+ fract = HZ;
for (;;) {
bool firstreport;
@@ -553,6 +559,8 @@ EXPORT_SYMBOL_GPL(rcu_barrier_tasks);
static int __init rcu_spawn_tasks_kthread(void)
{
+ rcu_tasks.gp_sleep = HZ / 10;
+ rcu_tasks.init_fract = 10;
rcu_tasks.pregp_func = rcu_tasks_pregp_step;
rcu_tasks.pertask_func = rcu_tasks_pertask;
rcu_tasks.postscan_func = rcu_tasks_postscan;
@@ -685,6 +693,7 @@ EXPORT_SYMBOL_GPL(rcu_barrier_tasks_rude);
static int __init rcu_spawn_tasks_rude_kthread(void)
{
+ rcu_tasks_rude.gp_sleep = HZ / 10;
rcu_spawn_tasks_kthread_generic(&rcu_tasks_rude);
return 0;
}
@@ -745,9 +754,9 @@ static DEFINE_PER_CPU(bool, trc_ipi_to_cpu);
// The number of detections of task quiescent state relying on
// heavyweight readers executing explicit memory barriers.
-unsigned long n_heavy_reader_attempts;
-unsigned long n_heavy_reader_updates;
-unsigned long n_heavy_reader_ofl_updates;
+static unsigned long n_heavy_reader_attempts;
+static unsigned long n_heavy_reader_updates;
+static unsigned long n_heavy_reader_ofl_updates;
void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func);
DEFINE_RCU_TASKS(rcu_tasks_trace, rcu_tasks_wait_gp, call_rcu_tasks_trace,
@@ -821,6 +830,12 @@ static void trc_read_check_handler(void *t_in)
WRITE_ONCE(t->trc_reader_checked, true);
goto reset_ipi;
}
+ // If we are racing with an rcu_read_unlock_trace(), try again later.
+ if (unlikely(t->trc_reader_nesting < 0)) {
+ if (WARN_ON_ONCE(atomic_dec_and_test(&trc_n_readers_need_end)))
+ wake_up(&trc_wait);
+ goto reset_ipi;
+ }
WRITE_ONCE(t->trc_reader_checked, true);
// Get here if the task is in a read-side critical section. Set
@@ -911,7 +926,8 @@ static void trc_wait_for_one_reader(struct task_struct *t,
// If currently running, send an IPI, either way, add to list.
trc_add_holdout(t, bhp);
- if (task_curr(t) && time_after(jiffies, rcu_tasks_trace.gp_start + rcu_task_ipi_delay)) {
+ if (task_curr(t) &&
+ time_after(jiffies + 1, rcu_tasks_trace.gp_start + rcu_task_ipi_delay)) {
// The task is currently running, so try IPIing it.
cpu = task_cpu(t);
@@ -1072,15 +1088,17 @@ static void rcu_tasks_trace_postgp(struct rcu_tasks *rtp)
if (ret)
break; // Count reached zero.
// Stall warning time, so make a list of the offenders.
+ rcu_read_lock();
for_each_process_thread(g, t)
if (READ_ONCE(t->trc_reader_special.b.need_qs))
trc_add_holdout(t, &holdouts);
+ rcu_read_unlock();
firstreport = true;
- list_for_each_entry_safe(t, g, &holdouts, trc_holdout_list)
- if (READ_ONCE(t->trc_reader_special.b.need_qs)) {
+ list_for_each_entry_safe(t, g, &holdouts, trc_holdout_list) {
+ if (READ_ONCE(t->trc_reader_special.b.need_qs))
show_stalled_task_trace(t, &firstreport);
- trc_del_holdout(t);
- }
+ trc_del_holdout(t); // Release task_struct reference.
+ }
if (firstreport)
pr_err("INFO: rcu_tasks_trace detected stalls? (Counter/taskslist mismatch?)\n");
show_stalled_ipi_trace();
@@ -1163,6 +1181,17 @@ EXPORT_SYMBOL_GPL(rcu_barrier_tasks_trace);
static int __init rcu_spawn_tasks_trace_kthread(void)
{
+ if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB)) {
+ rcu_tasks_trace.gp_sleep = HZ / 10;
+ rcu_tasks_trace.init_fract = 10;
+ } else {
+ rcu_tasks_trace.gp_sleep = HZ / 200;
+ if (rcu_tasks_trace.gp_sleep <= 0)
+ rcu_tasks_trace.gp_sleep = 1;
+ rcu_tasks_trace.init_fract = HZ / 5;
+ if (rcu_tasks_trace.init_fract <= 0)
+ rcu_tasks_trace.init_fract = 1;
+ }
rcu_tasks_trace.pregp_func = rcu_tasks_trace_pregp_step;
rcu_tasks_trace.pertask_func = rcu_tasks_trace_pertask;
rcu_tasks_trace.postscan_func = rcu_tasks_trace_postscan;