summaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/cpu/aperfmperf.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/cpu/aperfmperf.c')
-rw-r--r--arch/x86/kernel/cpu/aperfmperf.c63
1 files changed, 38 insertions, 25 deletions
diff --git a/arch/x86/kernel/cpu/aperfmperf.c b/arch/x86/kernel/cpu/aperfmperf.c
index 6220503af26a..df528a4f6de3 100644
--- a/arch/x86/kernel/cpu/aperfmperf.c
+++ b/arch/x86/kernel/cpu/aperfmperf.c
@@ -17,6 +17,7 @@
#include <linux/smp.h>
#include <linux/syscore_ops.h>
+#include <asm/cpu.h>
#include <asm/cpu_device_id.h>
#include <asm/intel-family.h>
@@ -164,6 +165,17 @@ unsigned int arch_freq_get_on_cpu(int cpu)
return per_cpu(samples.khz, cpu);
}
+static void init_counter_refs(void)
+{
+ u64 aperf, mperf;
+
+ rdmsrl(MSR_IA32_APERF, aperf);
+ rdmsrl(MSR_IA32_MPERF, mperf);
+
+ this_cpu_write(cpu_samples.aperf, aperf);
+ this_cpu_write(cpu_samples.mperf, mperf);
+}
+
#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
/*
* APERF/MPERF frequency ratio computation.
@@ -405,17 +417,6 @@ out:
return true;
}
-static void init_counter_refs(void)
-{
- u64 aperf, mperf;
-
- rdmsrl(MSR_IA32_APERF, aperf);
- rdmsrl(MSR_IA32_MPERF, mperf);
-
- this_cpu_write(cpu_samples.aperf, aperf);
- this_cpu_write(cpu_samples.mperf, mperf);
-}
-
#ifdef CONFIG_PM_SLEEP
static struct syscore_ops freq_invariance_syscore_ops = {
.resume = init_counter_refs,
@@ -447,13 +448,8 @@ void freq_invariance_set_perf_ratio(u64 ratio, bool turbo_disabled)
freq_invariance_enable();
}
-void __init bp_init_freq_invariance(void)
+static void __init bp_init_freq_invariance(void)
{
- if (!cpu_feature_enabled(X86_FEATURE_APERFMPERF))
- return;
-
- init_counter_refs();
-
if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
return;
@@ -461,12 +457,6 @@ void __init bp_init_freq_invariance(void)
freq_invariance_enable();
}
-void ap_init_freq_invariance(void)
-{
- if (cpu_feature_enabled(X86_FEATURE_APERFMPERF))
- init_counter_refs();
-}
-
static void disable_freq_invariance_workfn(struct work_struct *work)
{
static_branch_disable(&arch_scale_freq_key);
@@ -481,6 +471,9 @@ static void scale_freq_tick(u64 acnt, u64 mcnt)
{
u64 freq_scale;
+ if (!arch_scale_freq_invariant())
+ return;
+
if (check_shl_overflow(acnt, 2*SCHED_CAPACITY_SHIFT, &acnt))
goto error;
@@ -501,13 +494,17 @@ error:
pr_warn("Scheduler frequency invariance went wobbly, disabling!\n");
schedule_work(&disable_freq_invariance_work);
}
+#else
+static inline void bp_init_freq_invariance(void) { }
+static inline void scale_freq_tick(u64 acnt, u64 mcnt) { }
+#endif /* CONFIG_X86_64 && CONFIG_SMP */
void arch_scale_freq_tick(void)
{
struct aperfmperf *s = this_cpu_ptr(&cpu_samples);
u64 acnt, mcnt, aperf, mperf;
- if (!arch_scale_freq_invariant())
+ if (!cpu_feature_enabled(X86_FEATURE_APERFMPERF))
return;
rdmsrl(MSR_IA32_APERF, aperf);
@@ -520,4 +517,20 @@ void arch_scale_freq_tick(void)
scale_freq_tick(acnt, mcnt);
}
-#endif /* CONFIG_X86_64 && CONFIG_SMP */
+
+static int __init bp_init_aperfmperf(void)
+{
+ if (!cpu_feature_enabled(X86_FEATURE_APERFMPERF))
+ return 0;
+
+ init_counter_refs();
+ bp_init_freq_invariance();
+ return 0;
+}
+early_initcall(bp_init_aperfmperf);
+
+void ap_init_aperfmperf(void)
+{
+ if (cpu_feature_enabled(X86_FEATURE_APERFMPERF))
+ init_counter_refs();
+}