summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2009-08-20 16:27:41 +0200
committerThomas Gleixner <tglx@linutronix.de>2009-08-31 09:35:46 +0200
commit08047c4f1740c7cee75d58e2919d48c09f951649 (patch)
treeffaf378e4be6dec0ef572f30385c1c545c7df494
parentx86: Make timer setup and global variables the same in time_32/64.c (diff)
downloadlinux-08047c4f1740c7cee75d58e2919d48c09f951649.tar.xz
linux-08047c4f1740c7cee75d58e2919d48c09f951649.zip
x86: Move calibrate_cpu to tsc.c
Move the code where it's only user is. Also we need to look whether this hardwired hackery might interfere with perfcounters. Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r--arch/x86/include/asm/time.h2
-rw-r--r--arch/x86/kernel/time_32.c1
-rw-r--r--arch/x86/kernel/time_64.c51
-rw-r--r--arch/x86/kernel/tsc.c57
4 files changed, 55 insertions, 56 deletions
diff --git a/arch/x86/include/asm/time.h b/arch/x86/include/asm/time.h
index 91bb162b5a31..9c5608b21c27 100644
--- a/arch/x86/include/asm/time.h
+++ b/arch/x86/include/asm/time.h
@@ -57,6 +57,4 @@ extern void time_init(void);
#endif /* CONFIG_PARAVIRT */
-extern unsigned long __init calibrate_cpu(void);
-
#endif /* _ASM_X86_TIME_H */
diff --git a/arch/x86/kernel/time_32.c b/arch/x86/kernel/time_32.c
index 186abc577b2b..fd876cc77487 100644
--- a/arch/x86/kernel/time_32.c
+++ b/arch/x86/kernel/time_32.c
@@ -21,7 +21,6 @@
#include <asm/timer.h>
#include <asm/hpet.h>
#include <asm/time.h>
-#include <asm/nmi.h>
#if defined(CONFIG_X86_32) && defined(CONFIG_X86_IO_APIC)
int timer_ack;
diff --git a/arch/x86/kernel/time_64.c b/arch/x86/kernel/time_64.c
index 78cbdf5c006b..e59a40ebff14 100644
--- a/arch/x86/kernel/time_64.c
+++ b/arch/x86/kernel/time_64.c
@@ -21,7 +21,6 @@
#include <asm/timer.h>
#include <asm/hpet.h>
#include <asm/time.h>
-#include <asm/nmi.h>
#if defined(CONFIG_X86_32) && defined(CONFIG_X86_IO_APIC)
int timer_ack;
@@ -84,56 +83,6 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-/*
- * calibrate_cpu is used on systems with fixed rate TSCs to determine
- * processor frequency
- */
-#define TICK_COUNT 100000000
-unsigned long __init calibrate_cpu(void)
-{
- int tsc_start, tsc_now;
- int i, no_ctr_free;
- unsigned long evntsel3 = 0, pmc3 = 0, pmc_now = 0;
- unsigned long flags;
-
- for (i = 0; i < 4; i++)
- if (avail_to_resrv_perfctr_nmi_bit(i))
- break;
- no_ctr_free = (i == 4);
- if (no_ctr_free) {
- WARN(1, KERN_WARNING "Warning: AMD perfctrs busy ... "
- "cpu_khz value may be incorrect.\n");
- i = 3;
- rdmsrl(MSR_K7_EVNTSEL3, evntsel3);
- wrmsrl(MSR_K7_EVNTSEL3, 0);
- rdmsrl(MSR_K7_PERFCTR3, pmc3);
- } else {
- reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i);
- reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i);
- }
- local_irq_save(flags);
- /* start measuring cycles, incrementing from 0 */
- wrmsrl(MSR_K7_PERFCTR0 + i, 0);
- wrmsrl(MSR_K7_EVNTSEL0 + i, 1 << 22 | 3 << 16 | 0x76);
- rdtscl(tsc_start);
- do {
- rdmsrl(MSR_K7_PERFCTR0 + i, pmc_now);
- tsc_now = get_cycles();
- } while ((tsc_now - tsc_start) < TICK_COUNT);
-
- local_irq_restore(flags);
- if (no_ctr_free) {
- wrmsrl(MSR_K7_EVNTSEL3, 0);
- wrmsrl(MSR_K7_PERFCTR3, pmc3);
- wrmsrl(MSR_K7_EVNTSEL3, evntsel3);
- } else {
- release_perfctr_nmi(MSR_K7_PERFCTR0 + i);
- release_evntsel_nmi(MSR_K7_EVNTSEL0 + i);
- }
-
- return pmc_now * tsc_khz / (tsc_now - tsc_start);
-}
-
static struct irqaction irq0 = {
.handler = timer_interrupt,
.flags = IRQF_DISABLED | IRQF_NOBALANCING | IRQF_IRQPOLL | IRQF_TIMER,
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 652bc214eebf..97a0bcbad100 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -17,6 +17,7 @@
#include <asm/time.h>
#include <asm/delay.h>
#include <asm/hypervisor.h>
+#include <asm/nmi.h>
unsigned int __read_mostly cpu_khz; /* TSC clocks / usec, not used here */
EXPORT_SYMBOL(cpu_khz);
@@ -852,6 +853,60 @@ static void __init init_tsc_clocksource(void)
clocksource_register(&clocksource_tsc);
}
+#ifdef CONFIG_X86_64
+/*
+ * calibrate_cpu is used on systems with fixed rate TSCs to determine
+ * processor frequency
+ */
+#define TICK_COUNT 100000000
+static unsigned long __init calibrate_cpu(void)
+{
+ int tsc_start, tsc_now;
+ int i, no_ctr_free;
+ unsigned long evntsel3 = 0, pmc3 = 0, pmc_now = 0;
+ unsigned long flags;
+
+ for (i = 0; i < 4; i++)
+ if (avail_to_resrv_perfctr_nmi_bit(i))
+ break;
+ no_ctr_free = (i == 4);
+ if (no_ctr_free) {
+ WARN(1, KERN_WARNING "Warning: AMD perfctrs busy ... "
+ "cpu_khz value may be incorrect.\n");
+ i = 3;
+ rdmsrl(MSR_K7_EVNTSEL3, evntsel3);
+ wrmsrl(MSR_K7_EVNTSEL3, 0);
+ rdmsrl(MSR_K7_PERFCTR3, pmc3);
+ } else {
+ reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i);
+ reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i);
+ }
+ local_irq_save(flags);
+ /* start measuring cycles, incrementing from 0 */
+ wrmsrl(MSR_K7_PERFCTR0 + i, 0);
+ wrmsrl(MSR_K7_EVNTSEL0 + i, 1 << 22 | 3 << 16 | 0x76);
+ rdtscl(tsc_start);
+ do {
+ rdmsrl(MSR_K7_PERFCTR0 + i, pmc_now);
+ tsc_now = get_cycles();
+ } while ((tsc_now - tsc_start) < TICK_COUNT);
+
+ local_irq_restore(flags);
+ if (no_ctr_free) {
+ wrmsrl(MSR_K7_EVNTSEL3, 0);
+ wrmsrl(MSR_K7_PERFCTR3, pmc3);
+ wrmsrl(MSR_K7_EVNTSEL3, evntsel3);
+ } else {
+ release_perfctr_nmi(MSR_K7_PERFCTR0 + i);
+ release_evntsel_nmi(MSR_K7_EVNTSEL0 + i);
+ }
+
+ return pmc_now * tsc_khz / (tsc_now - tsc_start);
+}
+#else
+static inline unsigned long calibrate_cpu(void) { return cpu_khz; }
+#endif
+
void __init tsc_init(void)
{
u64 lpj;
@@ -870,11 +925,9 @@ void __init tsc_init(void)
return;
}
-#ifdef CONFIG_X86_64
if (cpu_has(&boot_cpu_data, X86_FEATURE_CONSTANT_TSC) &&
(boot_cpu_data.x86_vendor == X86_VENDOR_AMD))
cpu_khz = calibrate_cpu();
-#endif
printk("Detected %lu.%03lu MHz processor.\n",
(unsigned long)cpu_khz / 1000,