diff options
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/include/asm/vgtod.h | 1 | ||||
-rw-r--r-- | arch/x86/kernel/vsyscall_64.c | 34 | ||||
-rw-r--r-- | arch/x86/vdso/vclock_gettime.c | 55 |
3 files changed, 22 insertions, 68 deletions
diff --git a/arch/x86/include/asm/vgtod.h b/arch/x86/include/asm/vgtod.h index 646b4c1ca695..aa5add855a91 100644 --- a/arch/x86/include/asm/vgtod.h +++ b/arch/x86/include/asm/vgtod.h @@ -11,7 +11,6 @@ struct vsyscall_gtod_data { time_t wall_time_sec; u32 wall_time_nsec; - int sysctl_enabled; struct timezone sys_tz; struct { /* extract of a clocksource struct */ cycle_t (*vread)(void); diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c index 3cf1cef75a6a..9b2f3f51bc91 100644 --- a/arch/x86/kernel/vsyscall_64.c +++ b/arch/x86/kernel/vsyscall_64.c @@ -53,7 +53,6 @@ DEFINE_VVAR(int, vgetcpu_mode); DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) = { .lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock), - .sysctl_enabled = 1, }; void update_vsyscall_tz(void) @@ -103,15 +102,6 @@ static __always_inline int gettimeofday(struct timeval *tv, struct timezone *tz) return ret; } -static __always_inline long time_syscall(long *t) -{ - long secs; - asm volatile("syscall" - : "=a" (secs) - : "0" (__NR_time),"D" (t) : __syscall_clobber); - return secs; -} - static __always_inline void do_vgettimeofday(struct timeval * tv) { cycle_t now, base, mask, cycle_delta; @@ -122,8 +112,7 @@ static __always_inline void do_vgettimeofday(struct timeval * tv) seq = read_seqbegin(&VVAR(vsyscall_gtod_data).lock); vread = VVAR(vsyscall_gtod_data).clock.vread; - if (unlikely(!VVAR(vsyscall_gtod_data).sysctl_enabled || - !vread)) { + if (unlikely(!vread)) { gettimeofday(tv,NULL); return; } @@ -165,8 +154,6 @@ time_t __vsyscall(1) vtime(time_t *t) { unsigned seq; time_t result; - if (unlikely(!VVAR(vsyscall_gtod_data).sysctl_enabled)) - return time_syscall(t); do { seq = read_seqbegin(&VVAR(vsyscall_gtod_data).lock); @@ -227,22 +214,6 @@ static long __vsyscall(3) venosys_1(void) return -ENOSYS; } -#ifdef CONFIG_SYSCTL -static ctl_table kernel_table2[] = { - { .procname = "vsyscall64", - .data = &vsyscall_gtod_data.sysctl_enabled, .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec }, - {} -}; - -static ctl_table kernel_root_table2[] = { - { .procname = "kernel", .mode = 0555, - .child = kernel_table2 }, - {} -}; -#endif - /* Assume __initcall executes before all user space. Hopefully kmod doesn't violate that. We'll find out if it does. */ static void __cpuinit vsyscall_set_cpu(int cpu) @@ -301,9 +272,6 @@ static int __init vsyscall_init(void) BUG_ON((unsigned long) &vtime != VSYSCALL_ADDR(__NR_vtime)); BUG_ON((VSYSCALL_ADDR(0) != __fix_to_virt(VSYSCALL_FIRST_PAGE))); BUG_ON((unsigned long) &vgetcpu != VSYSCALL_ADDR(__NR_vgetcpu)); -#ifdef CONFIG_SYSCTL - register_sysctl_table(kernel_root_table2); -#endif on_each_cpu(cpu_vsyscall_init, NULL, 1); /* notifier priority > KVM */ hotcpu_notifier(cpu_vsyscall_notifier, 30); diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c index a724905fdae7..cf54813ac527 100644 --- a/arch/x86/vdso/vclock_gettime.c +++ b/arch/x86/vdso/vclock_gettime.c @@ -116,21 +116,21 @@ notrace static noinline int do_monotonic_coarse(struct timespec *ts) notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts) { - if (likely(gtod->sysctl_enabled)) - switch (clock) { - case CLOCK_REALTIME: - if (likely(gtod->clock.vread)) - return do_realtime(ts); - break; - case CLOCK_MONOTONIC: - if (likely(gtod->clock.vread)) - return do_monotonic(ts); - break; - case CLOCK_REALTIME_COARSE: - return do_realtime_coarse(ts); - case CLOCK_MONOTONIC_COARSE: - return do_monotonic_coarse(ts); - } + switch (clock) { + case CLOCK_REALTIME: + if (likely(gtod->clock.vread)) + return do_realtime(ts); + break; + case CLOCK_MONOTONIC: + if (likely(gtod->clock.vread)) + return do_monotonic(ts); + break; + case CLOCK_REALTIME_COARSE: + return do_realtime_coarse(ts); + case CLOCK_MONOTONIC_COARSE: + return do_monotonic_coarse(ts); + } + return vdso_fallback_gettime(clock, ts); } int clock_gettime(clockid_t, struct timespec *) @@ -139,7 +139,7 @@ int clock_gettime(clockid_t, struct timespec *) notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz) { long ret; - if (likely(gtod->sysctl_enabled && gtod->clock.vread)) { + if (likely(gtod->clock.vread)) { if (likely(tv != NULL)) { BUILD_BUG_ON(offsetof(struct timeval, tv_usec) != offsetof(struct timespec, tv_nsec) || @@ -161,27 +161,14 @@ notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz) int gettimeofday(struct timeval *, struct timezone *) __attribute__((weak, alias("__vdso_gettimeofday"))); -/* This will break when the xtime seconds get inaccurate, but that is - * unlikely */ - -static __always_inline long time_syscall(long *t) -{ - long secs; - asm volatile("syscall" - : "=a" (secs) - : "0" (__NR_time), "D" (t) : "cc", "r11", "cx", "memory"); - return secs; -} - +/* + * This will break when the xtime seconds get inaccurate, but that is + * unlikely + */ notrace time_t __vdso_time(time_t *t) { - time_t result; - - if (unlikely(!VVAR(vsyscall_gtod_data).sysctl_enabled)) - return time_syscall(t); - /* This is atomic on x86_64 so we don't need any locks. */ - result = ACCESS_ONCE(VVAR(vsyscall_gtod_data).wall_time_sec); + time_t result = ACCESS_ONCE(VVAR(vsyscall_gtod_data).wall_time_sec); if (t) *t = result; |