diff options
author | Tim C Chen <tim.c.chen@linux.intel.com> | 2023-07-08 00:57:01 +0200 |
---|---|---|
committer | Peter Zijlstra <peterz@infradead.org> | 2023-07-13 15:21:51 +0200 |
commit | d24cb0d9113f5932b8832533ce82351b5911ed50 (patch) | |
tree | a64a4159f45e28f5a4a34da1a1c36f81dc3f3d48 /kernel/sched | |
parent | sched/fair: Determine active load balance for SMT sched groups (diff) | |
download | linux-d24cb0d9113f5932b8832533ce82351b5911ed50.tar.xz linux-d24cb0d9113f5932b8832533ce82351b5911ed50.zip |
sched/topology: Record number of cores in sched group
When balancing sibling domains that have different number of cores,
tasks in respective sibling domain should be proportional to the
number of cores in each domain. In preparation of implementing such a
policy, record the number of cores in a scheduling group.
Signed-off-by: Tim Chen <tim.c.chen@linux.intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/r/04641eeb0e95c21224352f5743ecb93dfac44654.1688770494.git.tim.c.chen@linux.intel.com
Diffstat (limited to 'kernel/sched')
-rw-r--r-- | kernel/sched/sched.h | 1 | ||||
-rw-r--r-- | kernel/sched/topology.c | 12 |
2 files changed, 12 insertions, 1 deletions
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 1dcea9bfa0a8..9baeb1a2dfdd 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1884,6 +1884,7 @@ struct sched_group { atomic_t ref; unsigned int group_weight; + unsigned int cores; struct sched_group_capacity *sgc; int asym_prefer_cpu; /* CPU of highest priority in group */ int flags; diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c index d3a3b2646ec4..7cfcfe5d27b9 100644 --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c @@ -1275,14 +1275,24 @@ build_sched_groups(struct sched_domain *sd, int cpu) static void init_sched_groups_capacity(int cpu, struct sched_domain *sd) { struct sched_group *sg = sd->groups; + struct cpumask *mask = sched_domains_tmpmask2; WARN_ON(!sg); do { - int cpu, max_cpu = -1; + int cpu, cores = 0, max_cpu = -1; sg->group_weight = cpumask_weight(sched_group_span(sg)); + cpumask_copy(mask, sched_group_span(sg)); + for_each_cpu(cpu, mask) { + cores++; +#ifdef CONFIG_SCHED_SMT + cpumask_andnot(mask, mask, cpu_smt_mask(cpu)); +#endif + } + sg->cores = cores; + if (!(sd->flags & SD_ASYM_PACKING)) goto next; |