summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorYang Yingliang <yangyingliang@huawei.com>2024-07-03 05:16:07 +0200
committerPeter Zijlstra <peterz@infradead.org>2024-07-29 12:22:32 +0200
commit31b164e2e4af84d08d2498083676e7eeaa102493 (patch)
treeef9dbd29c65283eec2f5619490c9a89d6fafecfa /kernel
parentsched/cputime: Fix mul_u64_u64_div_u64() precision for cputime (diff)
downloadlinux-31b164e2e4af84d08d2498083676e7eeaa102493.tar.xz
linux-31b164e2e4af84d08d2498083676e7eeaa102493.zip
sched/smt: Introduce sched_smt_present_inc/dec() helper
Introduce sched_smt_present_inc/dec() helper, so it can be called in normal or error path simply. No functional changed. Cc: stable@kernel.org Signed-off-by: Yang Yingliang <yangyingliang@huawei.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lore.kernel.org/r/20240703031610.587047-2-yangyingliang@huaweicloud.com
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/core.c26
1 files changed, 19 insertions, 7 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index a9f655025607..acc04ed9dbc2 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -7895,6 +7895,22 @@ static int cpuset_cpu_inactive(unsigned int cpu)
return 0;
}
+static inline void sched_smt_present_inc(int cpu)
+{
+#ifdef CONFIG_SCHED_SMT
+ if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
+ static_branch_inc_cpuslocked(&sched_smt_present);
+#endif
+}
+
+static inline void sched_smt_present_dec(int cpu)
+{
+#ifdef CONFIG_SCHED_SMT
+ if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
+ static_branch_dec_cpuslocked(&sched_smt_present);
+#endif
+}
+
int sched_cpu_activate(unsigned int cpu)
{
struct rq *rq = cpu_rq(cpu);
@@ -7906,13 +7922,10 @@ int sched_cpu_activate(unsigned int cpu)
*/
balance_push_set(cpu, false);
-#ifdef CONFIG_SCHED_SMT
/*
* When going up, increment the number of cores with SMT present.
*/
- if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
- static_branch_inc_cpuslocked(&sched_smt_present);
-#endif
+ sched_smt_present_inc(cpu);
set_cpu_active(cpu, true);
if (sched_smp_initialized) {
@@ -7981,13 +7994,12 @@ int sched_cpu_deactivate(unsigned int cpu)
}
rq_unlock_irqrestore(rq, &rf);
-#ifdef CONFIG_SCHED_SMT
/*
* When going down, decrement the number of cores with SMT present.
*/
- if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
- static_branch_dec_cpuslocked(&sched_smt_present);
+ sched_smt_present_dec(cpu);
+#ifdef CONFIG_SCHED_SMT
sched_core_cpu_deactivate(cpu);
#endif