summaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/tsc_sync.c
diff options
context:
space:
mode:
authorVenki Pallipadi <venkatesh.pallipadi@intel.com>2008-11-17 23:43:58 +0100
committerIngo Molnar <mingo@elte.hu>2008-11-18 00:15:02 +0100
commit93ce99e849433ede4ce8b410b749dc0cad1100b2 (patch)
tree0481209f97a05a3851f536b2e16bd2868f83d842 /arch/x86/kernel/tsc_sync.c
parentxen: fix scrub_page() (diff)
downloadlinux-93ce99e849433ede4ce8b410b749dc0cad1100b2.tar.xz
linux-93ce99e849433ede4ce8b410b749dc0cad1100b2.zip
x86: add rdtsc barrier to TSC sync check
Impact: fix incorrectly marked unstable TSC clock Patch (commit 0d12cdd "sched: improve sched_clock() performance") has a regression on one of the test systems here. With the patch, I see: checking TSC synchronization [CPU#0 -> CPU#1]: Measured 28 cycles TSC warp between CPUs, turning off TSC clock. Marking TSC unstable due to check_tsc_sync_source failed Whereas, without the patch syncs pass fine on all CPUs: checking TSC synchronization [CPU#0 -> CPU#1]: passed. Due to this, TSC is marked unstable, when it is not actually unstable. This is because syncs in check_tsc_wrap() goes away due to this commit. As per the discussion on this thread, correct way to fix this is to add explicit syncs as below? Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/tsc_sync.c')
-rw-r--r--arch/x86/kernel/tsc_sync.c4
1 files changed, 4 insertions, 0 deletions
diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c
index 9ffb01c31c40..1c0dfbca87c1 100644
--- a/arch/x86/kernel/tsc_sync.c
+++ b/arch/x86/kernel/tsc_sync.c
@@ -46,7 +46,9 @@ static __cpuinit void check_tsc_warp(void)
cycles_t start, now, prev, end;
int i;
+ rdtsc_barrier();
start = get_cycles();
+ rdtsc_barrier();
/*
* The measurement runs for 20 msecs:
*/
@@ -61,7 +63,9 @@ static __cpuinit void check_tsc_warp(void)
*/
__raw_spin_lock(&sync_lock);
prev = last_tsc;
+ rdtsc_barrier();
now = get_cycles();
+ rdtsc_barrier();
last_tsc = now;
__raw_spin_unlock(&sync_lock);