diff options
author | Ingo Molnar <mingo@kernel.org> | 2022-05-06 10:21:46 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2022-05-06 10:21:46 +0200 |
commit | d70522fc541224b8351ac26f4765f2c6268f8d72 (patch) | |
tree | 5c924231da32485f3675f8e9911399e838fa2437 /kernel/sched | |
parent | smp: Make softirq handling RT safe in flush_smp_call_function_queue() (diff) | |
parent | Linux 5.18-rc5 (diff) | |
download | linux-d70522fc541224b8351ac26f4765f2c6268f8d72.tar.xz linux-d70522fc541224b8351ac26f4765f2c6268f8d72.zip |
Merge tag 'v5.18-rc5' into sched/core to pull in fixes & to resolve a conflict
- sched/core is on a pretty old -rc1 base - refresh it to include recent fixes.
- this also allows up to resolve a (trivial) .mailmap conflict
Conflicts:
.mailmap
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r-- | kernel/sched/core.c | 16 | ||||
-rw-r--r-- | kernel/sched/fair.c | 10 | ||||
-rw-r--r-- | kernel/sched/idle.c | 1 | ||||
-rw-r--r-- | kernel/sched/sched.h | 6 |
4 files changed, 16 insertions, 17 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 07bacb050198..48cfad152b86 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -5755,6 +5755,8 @@ static inline struct task_struct *pick_task(struct rq *rq) extern void task_vruntime_update(struct rq *rq, struct task_struct *p, bool in_fi); +static void queue_core_balance(struct rq *rq); + static struct task_struct * pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) { @@ -5804,7 +5806,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) } rq->core_pick = NULL; - return next; + goto out; } put_prev_task_balance(rq, prev, rf); @@ -5854,7 +5856,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) */ WARN_ON_ONCE(fi_before); task_vruntime_update(rq, next, false); - goto done; + goto out_set_next; } } @@ -5973,8 +5975,12 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) resched_curr(rq_i); } -done: +out_set_next: set_next_task(rq, next); +out: + if (rq->core->core_forceidle_count && next == rq->idle) + queue_core_balance(rq); + return next; } @@ -6003,7 +6009,7 @@ static bool try_steal_cookie(int this, int that) if (p == src->core_pick || p == src->curr) goto next; - if (!cpumask_test_cpu(this, &p->cpus_mask)) + if (!is_cpu_allowed(p, this)) goto next; if (p->core_occupation > dst->idle->core_occupation) @@ -6069,7 +6075,7 @@ static void sched_core_balance(struct rq *rq) static DEFINE_PER_CPU(struct callback_head, core_balance_head); -void queue_core_balance(struct rq *rq) +static void queue_core_balance(struct rq *rq) { if (!sched_core_enabled(rq)) return; diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index bc9f6e94c84e..906b2c7c48d1 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -3811,11 +3811,11 @@ static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s se->avg.runnable_sum = se->avg.runnable_avg * divider; - se->avg.load_sum = divider; - if (se_weight(se)) { - se->avg.load_sum = - div_u64(se->avg.load_avg * se->avg.load_sum, se_weight(se)); - } + se->avg.load_sum = se->avg.load_avg * divider; + if (se_weight(se) < se->avg.load_sum) + se->avg.load_sum = div_u64(se->avg.load_sum, se_weight(se)); + else + se->avg.load_sum = 1; enqueue_load_avg(cfs_rq, se); cfs_rq->avg.util_avg += se->avg.util_avg; diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c index 60295dbe0d62..34b717539e02 100644 --- a/kernel/sched/idle.c +++ b/kernel/sched/idle.c @@ -434,7 +434,6 @@ static void set_next_task_idle(struct rq *rq, struct task_struct *next, bool fir { update_idle_core(rq); schedstat_inc(rq->sched_goidle); - queue_core_balance(rq); } #ifdef CONFIG_SMP diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 4784898e8f83..7f338c53ce42 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1232,8 +1232,6 @@ static inline bool sched_group_cookie_match(struct rq *rq, return false; } -extern void queue_core_balance(struct rq *rq); - static inline bool sched_core_enqueued(struct task_struct *p) { return !RB_EMPTY_NODE(&p->core_node); @@ -1267,10 +1265,6 @@ static inline raw_spinlock_t *__rq_lockp(struct rq *rq) return &rq->__lock; } -static inline void queue_core_balance(struct rq *rq) -{ -} - static inline bool sched_cpu_cookie_match(struct rq *rq, struct task_struct *p) { return true; |