summaryrefslogtreecommitdiffstats
path: root/kernel/sched
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2021-01-21 16:09:32 +0100
committerPeter Zijlstra <peterz@infradead.org>2021-04-16 17:06:32 +0200
commitb5c4477366fb5e6a2f0f38742c33acd666c07698 (patch)
treebd7fa4cb0d72be06c611009cd76d94ce85142eeb /kernel/sched
parentcpumask: Introduce DYING mask (diff)
downloadlinux-b5c4477366fb5e6a2f0f38742c33acd666c07698.tar.xz
linux-b5c4477366fb5e6a2f0f38742c33acd666c07698.zip
sched: Use cpu_dying() to fix balance_push vs hotplug-rollback
Use the new cpu_dying() state to simplify and fix the balance_push() vs CPU hotplug rollback state. Specifically, we currently rely on notifiers sched_cpu_dying() / sched_cpu_activate() to terminate balance_push, however if the cpu_down() fails when we're past sched_cpu_deactivate(), it should terminate balance_push at that point and not wait until we hit sched_cpu_activate(). Similarly, when cpu_up() fails and we're going back down, balance_push should be active, where it currently is not. So instead, make sure balance_push is enabled below SCHED_AP_ACTIVE (when !cpu_active()), and gate it's utility with cpu_dying(). Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Valentin Schneider <valentin.schneider@arm.com> Link: https://lkml.kernel.org/r/YHgAYef83VQhKdC2@hirez.programming.kicks-ass.net
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/core.c26
-rw-r--r--kernel/sched/sched.h1
2 files changed, 15 insertions, 12 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 95bd6ab8115d..7d031da20df3 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1811,7 +1811,7 @@ static inline bool is_cpu_allowed(struct task_struct *p, int cpu)
return cpu_online(cpu);
/* Regular kernel threads don't get to stay during offline. */
- if (cpu_rq(cpu)->balance_push)
+ if (cpu_dying(cpu))
return false;
/* But are allowed during online. */
@@ -7638,6 +7638,9 @@ static DEFINE_PER_CPU(struct cpu_stop_work, push_work);
/*
* Ensure we only run per-cpu kthreads once the CPU goes !active.
+ *
+ * This is enabled below SCHED_AP_ACTIVE; when !cpu_active(), but only
+ * effective when the hotplug motion is down.
*/
static void balance_push(struct rq *rq)
{
@@ -7645,12 +7648,19 @@ static void balance_push(struct rq *rq)
lockdep_assert_held(&rq->lock);
SCHED_WARN_ON(rq->cpu != smp_processor_id());
+
/*
* Ensure the thing is persistent until balance_push_set(.on = false);
*/
rq->balance_callback = &balance_push_callback;
/*
+ * Only active while going offline.
+ */
+ if (!cpu_dying(rq->cpu))
+ return;
+
+ /*
* Both the cpu-hotplug and stop task are in this case and are
* required to complete the hotplug process.
*
@@ -7703,7 +7713,6 @@ static void balance_push_set(int cpu, bool on)
struct rq_flags rf;
rq_lock_irqsave(rq, &rf);
- rq->balance_push = on;
if (on) {
WARN_ON_ONCE(rq->balance_callback);
rq->balance_callback = &balance_push_callback;
@@ -7828,8 +7837,8 @@ int sched_cpu_activate(unsigned int cpu)
struct rq_flags rf;
/*
- * Make sure that when the hotplug state machine does a roll-back
- * we clear balance_push. Ideally that would happen earlier...
+ * Clear the balance_push callback and prepare to schedule
+ * regular tasks.
*/
balance_push_set(cpu, false);
@@ -8014,12 +8023,6 @@ int sched_cpu_dying(unsigned int cpu)
}
rq_unlock_irqrestore(rq, &rf);
- /*
- * Now that the CPU is offline, make sure we're welcome
- * to new tasks once we come back up.
- */
- balance_push_set(cpu, false);
-
calc_load_migrate(rq);
update_max_interval();
hrtick_clear(rq);
@@ -8204,7 +8207,7 @@ void __init sched_init(void)
rq->sd = NULL;
rq->rd = NULL;
rq->cpu_capacity = rq->cpu_capacity_orig = SCHED_CAPACITY_SCALE;
- rq->balance_callback = NULL;
+ rq->balance_callback = &balance_push_callback;
rq->active_balance = 0;
rq->next_balance = jiffies;
rq->push_cpu = 0;
@@ -8251,6 +8254,7 @@ void __init sched_init(void)
#ifdef CONFIG_SMP
idle_thread_set_boot_cpu();
+ balance_push_set(smp_processor_id(), false);
#endif
init_sched_fair_class();
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index cbb0b011e9e0..7e7e936b4938 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -983,7 +983,6 @@ struct rq {
unsigned long cpu_capacity_orig;
struct callback_head *balance_callback;
- unsigned char balance_push;
unsigned char nohz_idle_balance;
unsigned char idle_balance;