diff options
author | Srikar Dronamraju <srikar@linux.vnet.ibm.com> | 2020-08-10 09:18:33 +0200 |
---|---|---|
committer | Michael Ellerman <mpe@ellerman.id.au> | 2020-09-16 14:13:32 +0200 |
commit | 72730bfc2a2b91a525f38dfc830f598bdb95f216 (patch) | |
tree | 0ecd72b68851159a16dd0557041e2f3fd0c7b3e5 /arch/powerpc/kernel/smp.c | |
parent | powerpc/smp: Allocate cpumask only after searching thread group (diff) | |
download | linux-72730bfc2a2b91a525f38dfc830f598bdb95f216.tar.xz linux-72730bfc2a2b91a525f38dfc830f598bdb95f216.zip |
powerpc/smp: Create coregroup domain
Add percpu coregroup maps and masks to create coregroup domain.
If a coregroup doesn't exist, the coregroup domain will be degenerated
in favour of SMT/CACHE domain. Do note this patch is only creating stubs
for cpu_to_coregroup_id. The actual cpu_to_coregroup_id implementation
would be in a subsequent patch.
Signed-off-by: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
Reviewed-by: Gautham R. Shenoy <ego@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20200810071834.92514-10-srikar@linux.vnet.ibm.com
Diffstat (limited to 'arch/powerpc/kernel/smp.c')
-rw-r--r-- | arch/powerpc/kernel/smp.c | 54 |
1 files changed, 53 insertions, 1 deletions
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 41f76c8cd024..3d96752d6570 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -81,12 +81,22 @@ DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map); DEFINE_PER_CPU(cpumask_var_t, cpu_smallcore_map); DEFINE_PER_CPU(cpumask_var_t, cpu_l2_cache_map); DEFINE_PER_CPU(cpumask_var_t, cpu_core_map); +DEFINE_PER_CPU(cpumask_var_t, cpu_coregroup_map); EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); EXPORT_PER_CPU_SYMBOL(cpu_l2_cache_map); EXPORT_PER_CPU_SYMBOL(cpu_core_map); EXPORT_SYMBOL_GPL(has_big_cores); +enum { +#ifdef CONFIG_SCHED_SMT + smt_idx, +#endif + cache_idx, + mc_idx, + die_idx, +}; + #define MAX_THREAD_LIST_SIZE 8 #define THREAD_GROUP_SHARE_L1 1 struct thread_groups { @@ -862,11 +872,27 @@ static const struct cpumask *smallcore_smt_mask(int cpu) } #endif +static struct cpumask *cpu_coregroup_mask(int cpu) +{ + return per_cpu(cpu_coregroup_map, cpu); +} + +static bool has_coregroup_support(void) +{ + return coregroup_enabled; +} + +static const struct cpumask *cpu_mc_mask(int cpu) +{ + return cpu_coregroup_mask(cpu); +} + static struct sched_domain_topology_level powerpc_topology[] = { #ifdef CONFIG_SCHED_SMT { cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT) }, #endif { shared_cache_mask, powerpc_shared_cache_flags, SD_INIT_NAME(CACHE) }, + { cpu_mc_mask, SD_INIT_NAME(MC) }, { cpu_cpu_mask, SD_INIT_NAME(DIE) }, { NULL, }, }; @@ -913,6 +939,10 @@ void __init smp_prepare_cpus(unsigned int max_cpus) GFP_KERNEL, cpu_to_node(cpu)); zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu), GFP_KERNEL, cpu_to_node(cpu)); + if (has_coregroup_support()) + zalloc_cpumask_var_node(&per_cpu(cpu_coregroup_map, cpu), + GFP_KERNEL, cpu_to_node(cpu)); + #ifdef CONFIG_NEED_MULTIPLE_NODES /* * numa_node_id() works after this. @@ -930,6 +960,9 @@ void __init smp_prepare_cpus(unsigned int max_cpus) cpumask_set_cpu(boot_cpuid, cpu_l2_cache_mask(boot_cpuid)); cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid)); + if (has_coregroup_support()) + cpumask_set_cpu(boot_cpuid, cpu_coregroup_mask(boot_cpuid)); + init_big_cores(); if (has_big_cores) { cpumask_set_cpu(boot_cpuid, @@ -1234,6 +1267,8 @@ static void remove_cpu_from_masks(int cpu) set_cpus_unrelated(cpu, i, cpu_sibling_mask); if (has_big_cores) set_cpus_unrelated(cpu, i, cpu_smallcore_mask); + if (has_coregroup_support()) + set_cpus_unrelated(cpu, i, cpu_coregroup_mask); } } #endif @@ -1294,6 +1329,20 @@ static void add_cpu_to_masks(int cpu) add_cpu_to_smallcore_masks(cpu); update_mask_by_l2(cpu, cpu_l2_cache_mask); + if (has_coregroup_support()) { + int coregroup_id = cpu_to_coregroup_id(cpu); + + cpumask_set_cpu(cpu, cpu_coregroup_mask(cpu)); + for_each_cpu_and(i, cpu_online_mask, cpu_cpu_mask(cpu)) { + int fcpu = cpu_first_thread_sibling(i); + + if (fcpu == first_thread) + set_cpus_related(cpu, i, cpu_coregroup_mask); + else if (coregroup_id == cpu_to_coregroup_id(i)) + set_cpus_related(cpu, i, cpu_coregroup_mask); + } + } + if (pkg_id == -1) { struct cpumask *(*mask)(int) = cpu_sibling_mask; @@ -1388,9 +1437,12 @@ static void fixup_topology(void) #ifdef CONFIG_SCHED_SMT if (has_big_cores) { pr_info("Big cores detected but using small core scheduling\n"); - powerpc_topology[0].mask = smallcore_smt_mask; + powerpc_topology[smt_idx].mask = smallcore_smt_mask; } #endif + + if (!has_coregroup_support()) + powerpc_topology[mc_idx].mask = powerpc_topology[cache_idx].mask; } void __init smp_cpus_done(unsigned int max_cpus) |