diff options
Diffstat (limited to 'kernel/sched')
-rw-r--r-- | kernel/sched/core.c | 6 | ||||
-rw-r--r-- | kernel/sched/features.h | 16 |
2 files changed, 0 insertions, 22 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index ca665f8bec98..e0bd88b26a27 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2120,12 +2120,6 @@ __read_mostly bool sched_numa_balancing; void set_numabalancing_state(bool enabled) { sched_numa_balancing = enabled; -#ifdef CONFIG_SCHED_DEBUG - if (enabled) - sched_feat_set("NUMA"); - else - sched_feat_set("NO_NUMA"); -#endif /* CONFIG_SCHED_DEBUG */ } #ifdef CONFIG_PROC_SYSCTL diff --git a/kernel/sched/features.h b/kernel/sched/features.h index e6fd23b7459b..edf5902d5e57 100644 --- a/kernel/sched/features.h +++ b/kernel/sched/features.h @@ -72,21 +72,5 @@ SCHED_FEAT(RT_PUSH_IPI, true) SCHED_FEAT(FORCE_SD_OVERLAP, false) SCHED_FEAT(RT_RUNTIME_SHARE, true) SCHED_FEAT(LB_MIN, false) - SCHED_FEAT(ATTACH_AGE_LOAD, true) -/* - * Apply the automatic NUMA scheduling policy. Enabled automatically - * at runtime if running on a NUMA machine. Can be controlled via - * numa_balancing= - */ -#ifdef CONFIG_NUMA_BALANCING - -/* - * NUMA will favor moving tasks towards nodes where a higher number of - * hinting faults are recorded during active load balancing. It will - * resist moving tasks towards nodes where a lower number of hinting - * faults have been recorded. - */ -SCHED_FEAT(NUMA, true) -#endif |