diff options
author | Lauro Ramos Venancio <lvenanci@redhat.com> | 2017-04-13 15:56:07 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2017-05-15 10:15:22 +0200 |
commit | 8c0334697dc37eb3d6d7632304d3a3662248daac (patch) | |
tree | 17976b610e67dcbe5f0944a0fffe94c5997d2545 /kernel/sched/topology.c | |
parent | sched/clock: Print a warning recommending 'tsc=unstable' (diff) | |
download | linux-8c0334697dc37eb3d6d7632304d3a3662248daac.tar.xz linux-8c0334697dc37eb3d6d7632304d3a3662248daac.zip |
sched/topology: Refactor function build_overlap_sched_groups()
Create functions build_group_from_child_sched_domain() and
init_overlap_sched_group(). No functional change.
Signed-off-by: Lauro Ramos Venancio <lvenanci@redhat.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Rik van Riel <riel@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/1492091769-19879-2-git-send-email-lvenanci@redhat.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched/topology.c')
-rw-r--r-- | kernel/sched/topology.c | 62 |
1 files changed, 43 insertions, 19 deletions
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c index 1b0b4fb12837..d786d45c44d9 100644 --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c @@ -513,6 +513,47 @@ int group_balance_cpu(struct sched_group *sg) return cpumask_first_and(sched_group_cpus(sg), sched_group_mask(sg)); } +static struct sched_group * +build_group_from_child_sched_domain(struct sched_domain *sd, int cpu) +{ + struct sched_group *sg; + struct cpumask *sg_span; + + sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(), + GFP_KERNEL, cpu_to_node(cpu)); + + if (!sg) + return NULL; + + sg_span = sched_group_cpus(sg); + if (sd->child) + cpumask_copy(sg_span, sched_domain_span(sd->child)); + else + cpumask_copy(sg_span, sched_domain_span(sd)); + + return sg; +} + +static void init_overlap_sched_group(struct sched_domain *sd, + struct sched_group *sg, int cpu) +{ + struct sd_data *sdd = sd->private; + struct cpumask *sg_span; + + sg->sgc = *per_cpu_ptr(sdd->sgc, cpu); + if (atomic_inc_return(&sg->sgc->ref) == 1) + build_group_mask(sd, sg); + + /* + * Initialize sgc->capacity such that even if we mess up the + * domains and no possible iteration will get us here, we won't + * die on a /0 trap. + */ + sg_span = sched_group_cpus(sg); + sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sg_span); + sg->sgc->min_capacity = SCHED_CAPACITY_SCALE; +} + static int build_overlap_sched_groups(struct sched_domain *sd, int cpu) { @@ -537,31 +578,14 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu) if (!cpumask_test_cpu(i, sched_domain_span(sibling))) continue; - sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(), - GFP_KERNEL, cpu_to_node(cpu)); - + sg = build_group_from_child_sched_domain(sibling, cpu); if (!sg) goto fail; sg_span = sched_group_cpus(sg); - if (sibling->child) - cpumask_copy(sg_span, sched_domain_span(sibling->child)); - else - cpumask_set_cpu(i, sg_span); - cpumask_or(covered, covered, sg_span); - sg->sgc = *per_cpu_ptr(sdd->sgc, i); - if (atomic_inc_return(&sg->sgc->ref) == 1) - build_group_mask(sd, sg); - - /* - * Initialize sgc->capacity such that even if we mess up the - * domains and no possible iteration will get us here, we won't - * die on a /0 trap. - */ - sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sg_span); - sg->sgc->min_capacity = SCHED_CAPACITY_SCALE; + init_overlap_sched_group(sd, sg, i); /* * Make sure the first group of this domain contains the |