summaryrefslogtreecommitdiffstats
path: root/kernel/sched
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2019-08-06 15:13:17 +0200
committerPeter Zijlstra <peterz@infradead.org>2019-08-08 09:09:30 +0200
commit10e7071b2f491b0fb981717ea0a585c441906ede (patch)
treece4b0f5ba59efc3d238e0b3046e81a068e27b372 /kernel/sched
parentsched/{rt,deadline}: Fix set_next_task vs pick_next_task (diff)
downloadlinux-10e7071b2f491b0fb981717ea0a585c441906ede.tar.xz
linux-10e7071b2f491b0fb981717ea0a585c441906ede.zip
sched: Rework CPU hotplug task selection
The CPU hotplug task selection is the only place where we used put_prev_task() on a task that is not current. While looking at that, it occured to me that we can simplify all that by by using a custom pick loop. Since we don't need to put current, we can do away with the fake task too. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Aaron Lu <aaron.lwe@gmail.com> Cc: Valentin Schneider <valentin.schneider@arm.com> Cc: mingo@kernel.org Cc: Phil Auld <pauld@redhat.com> Cc: Julien Desfossez <jdesfossez@digitalocean.com> Cc: Nishanth Aravamudan <naravamudan@digitalocean.com>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/core.c32
-rw-r--r--kernel/sched/sched.h1
2 files changed, 15 insertions, 18 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 9a821ff68502..364b6d7da2be 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -6082,21 +6082,22 @@ static void calc_load_migrate(struct rq *rq)
atomic_long_add(delta, &calc_load_tasks);
}
-static void put_prev_task_fake(struct rq *rq, struct task_struct *prev)
+static struct task_struct *__pick_migrate_task(struct rq *rq)
{
-}
+ const struct sched_class *class;
+ struct task_struct *next;
-static const struct sched_class fake_sched_class = {
- .put_prev_task = put_prev_task_fake,
-};
+ for_each_class(class) {
+ next = class->pick_next_task(rq, NULL, NULL);
+ if (next) {
+ next->sched_class->put_prev_task(rq, next);
+ return next;
+ }
+ }
-static struct task_struct fake_task = {
- /*
- * Avoid pull_{rt,dl}_task()
- */
- .prio = MAX_PRIO + 1,
- .sched_class = &fake_sched_class,
-};
+ /* The idle class should always have a runnable task */
+ BUG();
+}
/*
* Migrate all tasks from the rq, sleeping tasks will be migrated by
@@ -6139,12 +6140,7 @@ static void migrate_tasks(struct rq *dead_rq, struct rq_flags *rf)
if (rq->nr_running == 1)
break;
- /*
- * pick_next_task() assumes pinned rq->lock:
- */
- next = pick_next_task(rq, &fake_task, rf);
- BUG_ON(!next);
- put_prev_task(rq, next);
+ next = __pick_migrate_task(rq);
/*
* Rules for changing task_struct::cpus_mask are holding
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index ea48aa5daeee..b3449d0dd7f0 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1751,6 +1751,7 @@ struct sched_class {
static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
{
+ WARN_ON_ONCE(rq->curr != prev);
prev->sched_class->put_prev_task(rq, prev);
}