diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2023-04-28 23:53:30 +0200 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2023-04-28 23:53:30 +0200 |
commit | 586b222d748e91c619d68e9239654ebc7fed9b0c (patch) | |
tree | 433154fb388d301fe94831f5a5223545d20fb7f3 /kernel/fork.c | |
parent | Merge tag 'perf-core-2023-04-27' of git://git.kernel.org/pub/scm/linux/kernel... (diff) | |
parent | sched/clock: Fix local_clock() before sched_clock_init() (diff) | |
download | linux-586b222d748e91c619d68e9239654ebc7fed9b0c.tar.xz linux-586b222d748e91c619d68e9239654ebc7fed9b0c.zip |
Merge tag 'sched-core-2023-04-27' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler updates from Ingo Molnar:
- Allow unprivileged PSI poll()ing
- Fix performance regression introduced by mm_cid
- Improve livepatch stalls by adding livepatch task switching to
cond_resched(). This resolves livepatching busy-loop stalls with
certain CPU-bound kthreads
- Improve sched_move_task() performance on autogroup configs
- On core-scheduling CPUs, avoid selecting throttled tasks to run
- Misc cleanups, fixes and improvements
* tag 'sched-core-2023-04-27' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
sched/clock: Fix local_clock() before sched_clock_init()
sched/rt: Fix bad task migration for rt tasks
sched: Fix performance regression introduced by mm_cid
sched/core: Make sched_dynamic_mutex static
sched/psi: Allow unprivileged polling of N*2s period
sched/psi: Extract update_triggers side effect
sched/psi: Rename existing poll members in preparation
sched/psi: Rearrange polling code in preparation
sched/fair: Fix inaccurate tally of ttwu_move_affine
vhost: Fix livepatch timeouts in vhost_worker()
livepatch,sched: Add livepatch task switching to cond_resched()
livepatch: Skip task_call_func() for current task
livepatch: Convert stack entries array to percpu
sched: Interleave cfs bandwidth timers for improved single thread performance at low utilization
sched/core: Reduce cost of sched_move_task when config autogroup
sched/core: Avoid selecting the task that is throttled to run when core-sched enable
sched/topology: Make sched_energy_mutex,update static
Diffstat (limited to 'kernel/fork.c')
-rw-r--r-- | kernel/fork.c | 9 |
1 files changed, 8 insertions, 1 deletions
diff --git a/kernel/fork.c b/kernel/fork.c index 4342200d5e2b..eccb35a85216 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -924,6 +924,7 @@ void __mmdrop(struct mm_struct *mm) check_mm(mm); put_user_ns(mm->user_ns); mm_pasid_drop(mm); + mm_destroy_cid(mm); for (i = 0; i < NR_MM_COUNTERS; i++) percpu_counter_destroy(&mm->rss_stat[i]); @@ -1188,7 +1189,9 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node) #ifdef CONFIG_SCHED_MM_CID tsk->mm_cid = -1; + tsk->last_mm_cid = -1; tsk->mm_cid_active = 0; + tsk->migrate_from_cpu = -1; #endif return tsk; @@ -1296,18 +1299,22 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p, if (init_new_context(p, mm)) goto fail_nocontext; + if (mm_alloc_cid(mm)) + goto fail_cid; + for (i = 0; i < NR_MM_COUNTERS; i++) if (percpu_counter_init(&mm->rss_stat[i], 0, GFP_KERNEL_ACCOUNT)) goto fail_pcpu; mm->user_ns = get_user_ns(user_ns); lru_gen_init_mm(mm); - mm_init_cid(mm); return mm; fail_pcpu: while (i > 0) percpu_counter_destroy(&mm->rss_stat[--i]); + mm_destroy_cid(mm); +fail_cid: destroy_context(mm); fail_nocontext: mm_free_pgd(mm); |