summaryrefslogtreecommitdiffstats
path: root/kernel/time
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/time')
-rw-r--r--kernel/time/Kconfig2
-rw-r--r--kernel/time/clockevents.c21
-rw-r--r--kernel/time/hrtimer.c4
-rw-r--r--kernel/time/ntp.c227
-rw-r--r--kernel/time/ntp_internal.h1
-rw-r--r--kernel/time/posix-cpu-timers.c6
-rw-r--r--kernel/time/posix-stubs.c20
-rw-r--r--kernel/time/tick-oneshot.c1
-rw-r--r--kernel/time/tick-sched.c38
-rw-r--r--kernel/time/time.c71
-rw-r--r--kernel/time/timekeeping.c182
-rw-r--r--kernel/time/timekeeping.h2
-rw-r--r--kernel/time/timer.c82
13 files changed, 383 insertions, 274 deletions
diff --git a/kernel/time/Kconfig b/kernel/time/Kconfig
index ac09bc29eb08..d689a9557e17 100644
--- a/kernel/time/Kconfig
+++ b/kernel/time/Kconfig
@@ -56,7 +56,7 @@ menu "Timers subsystem"
# Core internal switch. Selected by NO_HZ_COMMON / HIGH_RES_TIMERS. This is
# only related to the tick functionality. Oneshot clockevent devices
-# are supported independ of this.
+# are supported independent of this.
config TICK_ONESHOT
bool
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
index 4237e0744e26..16c027e9cc73 100644
--- a/kernel/time/clockevents.c
+++ b/kernel/time/clockevents.c
@@ -280,17 +280,22 @@ static int clockevents_program_min_delta(struct clock_event_device *dev)
static int clockevents_program_min_delta(struct clock_event_device *dev)
{
unsigned long long clc;
- int64_t delta;
+ int64_t delta = 0;
+ int i;
- delta = dev->min_delta_ns;
- dev->next_event = ktime_add_ns(ktime_get(), delta);
+ for (i = 0; i < 10; i++) {
+ delta += dev->min_delta_ns;
+ dev->next_event = ktime_add_ns(ktime_get(), delta);
- if (clockevent_state_shutdown(dev))
- return 0;
+ if (clockevent_state_shutdown(dev))
+ return 0;
- dev->retries++;
- clc = ((unsigned long long) delta * dev->mult) >> dev->shift;
- return dev->set_next_event((unsigned long) clc, dev);
+ dev->retries++;
+ clc = ((unsigned long long) delta * dev->mult) >> dev->shift;
+ if (dev->set_next_event((unsigned long) clc, dev) == 0)
+ return 0;
+ }
+ return -ETIME;
}
#endif /* CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST */
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index 88f75f92ef36..d32520840fde 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -758,9 +758,7 @@ void clock_was_set(void)
*/
void hrtimers_resume(void)
{
- WARN_ONCE(!irqs_disabled(),
- KERN_INFO "hrtimers_resume() called with IRQs enabled!");
-
+ lockdep_assert_irqs_disabled();
/* Retrigger on the local CPU */
retrigger_next_event(NULL);
/* And schedule a retrigger for all others */
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
index 99e03bec68e4..8d70da1b9a0d 100644
--- a/kernel/time/ntp.c
+++ b/kernel/time/ntp.c
@@ -493,6 +493,67 @@ out:
return leap;
}
+static void sync_hw_clock(struct work_struct *work);
+static DECLARE_DELAYED_WORK(sync_work, sync_hw_clock);
+
+static void sched_sync_hw_clock(struct timespec64 now,
+ unsigned long target_nsec, bool fail)
+
+{
+ struct timespec64 next;
+
+ getnstimeofday64(&next);
+ if (!fail)
+ next.tv_sec = 659;
+ else {
+ /*
+ * Try again as soon as possible. Delaying long periods
+ * decreases the accuracy of the work queue timer. Due to this
+ * the algorithm is very likely to require a short-sleep retry
+ * after the above long sleep to synchronize ts_nsec.
+ */
+ next.tv_sec = 0;
+ }
+
+ /* Compute the needed delay that will get to tv_nsec == target_nsec */
+ next.tv_nsec = target_nsec - next.tv_nsec;
+ if (next.tv_nsec <= 0)
+ next.tv_nsec += NSEC_PER_SEC;
+ if (next.tv_nsec >= NSEC_PER_SEC) {
+ next.tv_sec++;
+ next.tv_nsec -= NSEC_PER_SEC;
+ }
+
+ queue_delayed_work(system_power_efficient_wq, &sync_work,
+ timespec64_to_jiffies(&next));
+}
+
+static void sync_rtc_clock(void)
+{
+ unsigned long target_nsec;
+ struct timespec64 adjust, now;
+ int rc;
+
+ if (!IS_ENABLED(CONFIG_RTC_SYSTOHC))
+ return;
+
+ getnstimeofday64(&now);
+
+ adjust = now;
+ if (persistent_clock_is_local)
+ adjust.tv_sec -= (sys_tz.tz_minuteswest * 60);
+
+ /*
+ * The current RTC in use will provide the target_nsec it wants to be
+ * called at, and does rtc_tv_nsec_ok internally.
+ */
+ rc = rtc_set_ntp_time(adjust, &target_nsec);
+ if (rc == -ENODEV)
+ return;
+
+ sched_sync_hw_clock(now, target_nsec, rc);
+}
+
#ifdef CONFIG_GENERIC_CMOS_UPDATE
int __weak update_persistent_clock(struct timespec now)
{
@@ -508,76 +569,75 @@ int __weak update_persistent_clock64(struct timespec64 now64)
}
#endif
-#if defined(CONFIG_GENERIC_CMOS_UPDATE) || defined(CONFIG_RTC_SYSTOHC)
-static void sync_cmos_clock(struct work_struct *work);
-
-static DECLARE_DELAYED_WORK(sync_cmos_work, sync_cmos_clock);
-
-static void sync_cmos_clock(struct work_struct *work)
+static bool sync_cmos_clock(void)
{
+ static bool no_cmos;
struct timespec64 now;
- struct timespec64 next;
- int fail = 1;
+ struct timespec64 adjust;
+ int rc = -EPROTO;
+ long target_nsec = NSEC_PER_SEC / 2;
+
+ if (!IS_ENABLED(CONFIG_GENERIC_CMOS_UPDATE))
+ return false;
+
+ if (no_cmos)
+ return false;
/*
- * If we have an externally synchronized Linux clock, then update
- * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
- * called as close as possible to 500 ms before the new second starts.
- * This code is run on a timer. If the clock is set, that timer
- * may not expire at the correct time. Thus, we adjust...
- * We want the clock to be within a couple of ticks from the target.
+ * Historically update_persistent_clock64() has followed x86
+ * semantics, which match the MC146818A/etc RTC. This RTC will store
+ * 'adjust' and then in .5s it will advance once second.
+ *
+ * Architectures are strongly encouraged to use rtclib and not
+ * implement this legacy API.
*/
- if (!ntp_synced()) {
- /*
- * Not synced, exit, do not restart a timer (if one is
- * running, let it run out).
- */
- return;
- }
-
getnstimeofday64(&now);
- if (abs(now.tv_nsec - (NSEC_PER_SEC / 2)) <= tick_nsec * 5) {
- struct timespec64 adjust = now;
-
- fail = -ENODEV;
+ if (rtc_tv_nsec_ok(-1 * target_nsec, &adjust, &now)) {
if (persistent_clock_is_local)
adjust.tv_sec -= (sys_tz.tz_minuteswest * 60);
-#ifdef CONFIG_GENERIC_CMOS_UPDATE
- fail = update_persistent_clock64(adjust);
-#endif
-
-#ifdef CONFIG_RTC_SYSTOHC
- if (fail == -ENODEV)
- fail = rtc_set_ntp_time(adjust);
-#endif
+ rc = update_persistent_clock64(adjust);
+ /*
+ * The machine does not support update_persistent_clock64 even
+ * though it defines CONFIG_GENERIC_CMOS_UPDATE.
+ */
+ if (rc == -ENODEV) {
+ no_cmos = true;
+ return false;
+ }
}
- next.tv_nsec = (NSEC_PER_SEC / 2) - now.tv_nsec - (TICK_NSEC / 2);
- if (next.tv_nsec <= 0)
- next.tv_nsec += NSEC_PER_SEC;
+ sched_sync_hw_clock(now, target_nsec, rc);
+ return true;
+}
- if (!fail || fail == -ENODEV)
- next.tv_sec = 659;
- else
- next.tv_sec = 0;
+/*
+ * If we have an externally synchronized Linux clock, then update RTC clock
+ * accordingly every ~11 minutes. Generally RTCs can only store second
+ * precision, but many RTCs will adjust the phase of their second tick to
+ * match the moment of update. This infrastructure arranges to call to the RTC
+ * set at the correct moment to phase synchronize the RTC second tick over
+ * with the kernel clock.
+ */
+static void sync_hw_clock(struct work_struct *work)
+{
+ if (!ntp_synced())
+ return;
- if (next.tv_nsec >= NSEC_PER_SEC) {
- next.tv_sec++;
- next.tv_nsec -= NSEC_PER_SEC;
- }
- queue_delayed_work(system_power_efficient_wq,
- &sync_cmos_work, timespec64_to_jiffies(&next));
+ if (sync_cmos_clock())
+ return;
+
+ sync_rtc_clock();
}
void ntp_notify_cmos_timer(void)
{
- queue_delayed_work(system_power_efficient_wq, &sync_cmos_work, 0);
-}
-
-#else
-void ntp_notify_cmos_timer(void) { }
-#endif
+ if (!ntp_synced())
+ return;
+ if (IS_ENABLED(CONFIG_GENERIC_CMOS_UPDATE) ||
+ IS_ENABLED(CONFIG_RTC_SYSTOHC))
+ queue_delayed_work(system_power_efficient_wq, &sync_work, 0);
+}
/*
* Propagate a new txc->status value into the NTP state:
@@ -654,67 +714,6 @@ static inline void process_adjtimex_modes(struct timex *txc,
}
-
-/**
- * ntp_validate_timex - Ensures the timex is ok for use in do_adjtimex
- */
-int ntp_validate_timex(struct timex *txc)
-{
- if (txc->modes & ADJ_ADJTIME) {
- /* singleshot must not be used with any other mode bits */
- if (!(txc->modes & ADJ_OFFSET_SINGLESHOT))
- return -EINVAL;
- if (!(txc->modes & ADJ_OFFSET_READONLY) &&
- !capable(CAP_SYS_TIME))
- return -EPERM;
- } else {
- /* In order to modify anything, you gotta be super-user! */
- if (txc->modes && !capable(CAP_SYS_TIME))
- return -EPERM;
- /*
- * if the quartz is off by more than 10% then
- * something is VERY wrong!
- */
- if (txc->modes & ADJ_TICK &&
- (txc->tick < 900000/USER_HZ ||
- txc->tick > 1100000/USER_HZ))
- return -EINVAL;
- }
-
- if (txc->modes & ADJ_SETOFFSET) {
- /* In order to inject time, you gotta be super-user! */
- if (!capable(CAP_SYS_TIME))
- return -EPERM;
-
- if (txc->modes & ADJ_NANO) {
- struct timespec ts;
-
- ts.tv_sec = txc->time.tv_sec;
- ts.tv_nsec = txc->time.tv_usec;
- if (!timespec_inject_offset_valid(&ts))
- return -EINVAL;
-
- } else {
- if (!timeval_inject_offset_valid(&txc->time))
- return -EINVAL;
- }
- }
-
- /*
- * Check for potential multiplication overflows that can
- * only happen on 64-bit systems:
- */
- if ((txc->modes & ADJ_FREQUENCY) && (BITS_PER_LONG == 64)) {
- if (LLONG_MIN / PPM_SCALE > txc->freq)
- return -EINVAL;
- if (LLONG_MAX / PPM_SCALE < txc->freq)
- return -EINVAL;
- }
-
- return 0;
-}
-
-
/*
* adjtimex mainly allows reading (and writing, if superuser) of
* kernel time-keeping variables. used by xntpd.
diff --git a/kernel/time/ntp_internal.h b/kernel/time/ntp_internal.h
index 0a53e6ea47b1..909bd1f1bfb1 100644
--- a/kernel/time/ntp_internal.h
+++ b/kernel/time/ntp_internal.h
@@ -8,7 +8,6 @@ extern void ntp_clear(void);
extern u64 ntp_tick_length(void);
extern ktime_t ntp_get_next_leap(void);
extern int second_overflow(time64_t secs);
-extern int ntp_validate_timex(struct timex *);
extern int __do_adjtimex(struct timex *, struct timespec64 *, s32 *);
extern void __hardpps(const struct timespec64 *, const struct timespec64 *);
#endif /* _LINUX_NTP_INTERNAL_H */
diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
index 5b117110b55b..1f27887aa194 100644
--- a/kernel/time/posix-cpu-timers.c
+++ b/kernel/time/posix-cpu-timers.c
@@ -603,7 +603,7 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
/*
* Disarm any old timer after extracting its expiry time.
*/
- WARN_ON_ONCE(!irqs_disabled());
+ lockdep_assert_irqs_disabled();
ret = 0;
old_incr = timer->it.cpu.incr;
@@ -1034,7 +1034,7 @@ static void posix_cpu_timer_rearm(struct k_itimer *timer)
/*
* Now re-arm for the new expiry time.
*/
- WARN_ON_ONCE(!irqs_disabled());
+ lockdep_assert_irqs_disabled();
arm_timer(timer);
unlock:
unlock_task_sighand(p, &flags);
@@ -1125,7 +1125,7 @@ void run_posix_cpu_timers(struct task_struct *tsk)
struct k_itimer *timer, *next;
unsigned long flags;
- WARN_ON_ONCE(!irqs_disabled());
+ lockdep_assert_irqs_disabled();
/*
* The fast path checks that there are no expired thread or thread
diff --git a/kernel/time/posix-stubs.c b/kernel/time/posix-stubs.c
index 06f34feb635e..b258bee13b02 100644
--- a/kernel/time/posix-stubs.c
+++ b/kernel/time/posix-stubs.c
@@ -117,8 +117,7 @@ SYSCALL_DEFINE4(clock_nanosleep, const clockid_t, which_clock, int, flags,
const struct timespec __user *, rqtp,
struct timespec __user *, rmtp)
{
- struct timespec64 t64;
- struct timespec t;
+ struct timespec64 t;
switch (which_clock) {
case CLOCK_REALTIME:
@@ -129,16 +128,15 @@ SYSCALL_DEFINE4(clock_nanosleep, const clockid_t, which_clock, int, flags,
return -EINVAL;
}
- if (copy_from_user(&t, rqtp, sizeof (struct timespec)))
+ if (get_timespec64(&t, rqtp))
return -EFAULT;
- t64 = timespec_to_timespec64(t);
- if (!timespec64_valid(&t64))
+ if (!timespec64_valid(&t))
return -EINVAL;
if (flags & TIMER_ABSTIME)
rmtp = NULL;
current->restart_block.nanosleep.type = rmtp ? TT_NATIVE : TT_NONE;
current->restart_block.nanosleep.rmtp = rmtp;
- return hrtimer_nanosleep(&t64, flags & TIMER_ABSTIME ?
+ return hrtimer_nanosleep(&t, flags & TIMER_ABSTIME ?
HRTIMER_MODE_ABS : HRTIMER_MODE_REL,
which_clock);
}
@@ -203,8 +201,7 @@ COMPAT_SYSCALL_DEFINE4(clock_nanosleep, clockid_t, which_clock, int, flags,
struct compat_timespec __user *, rqtp,
struct compat_timespec __user *, rmtp)
{
- struct timespec64 t64;
- struct timespec t;
+ struct timespec64 t;
switch (which_clock) {
case CLOCK_REALTIME:
@@ -215,16 +212,15 @@ COMPAT_SYSCALL_DEFINE4(clock_nanosleep, clockid_t, which_clock, int, flags,
return -EINVAL;
}
- if (compat_get_timespec(&t, rqtp))
+ if (compat_get_timespec64(&t, rqtp))
return -EFAULT;
- t64 = timespec_to_timespec64(t);
- if (!timespec64_valid(&t64))
+ if (!timespec64_valid(&t))
return -EINVAL;
if (flags & TIMER_ABSTIME)
rmtp = NULL;
current->restart_block.nanosleep.type = rmtp ? TT_COMPAT : TT_NONE;
current->restart_block.nanosleep.compat_rmtp = rmtp;
- return hrtimer_nanosleep(&t64, flags & TIMER_ABSTIME ?
+ return hrtimer_nanosleep(&t, flags & TIMER_ABSTIME ?
HRTIMER_MODE_ABS : HRTIMER_MODE_REL,
which_clock);
}
diff --git a/kernel/time/tick-oneshot.c b/kernel/time/tick-oneshot.c
index 6b009c207671..c1f518e7aa80 100644
--- a/kernel/time/tick-oneshot.c
+++ b/kernel/time/tick-oneshot.c
@@ -33,6 +33,7 @@ int tick_program_event(ktime_t expires, int force)
* We don't need the clock event device any more, stop it.
*/
clockevents_switch_state(dev, CLOCK_EVT_STATE_ONESHOT_STOPPED);
+ dev->next_event = KTIME_MAX;
return 0;
}
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index c7a899c5ce64..99578f06c8d4 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -27,6 +27,7 @@
#include <linux/irq_work.h>
#include <linux/posix-timers.h>
#include <linux/context_tracking.h>
+#include <linux/mm.h>
#include <asm/irq_regs.h>
@@ -165,7 +166,6 @@ static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs)
#ifdef CONFIG_NO_HZ_FULL
cpumask_var_t tick_nohz_full_mask;
-cpumask_var_t housekeeping_mask;
bool tick_nohz_full_running;
static atomic_t tick_dep_mask;
@@ -198,7 +198,7 @@ static bool check_tick_dependency(atomic_t *dep)
static bool can_stop_full_tick(int cpu, struct tick_sched *ts)
{
- WARN_ON_ONCE(!irqs_disabled());
+ lockdep_assert_irqs_disabled();
if (unlikely(!cpu_online(cpu)))
return false;
@@ -385,20 +385,13 @@ out:
local_irq_restore(flags);
}
-/* Parse the boot-time nohz CPU list from the kernel parameters. */
-static int __init tick_nohz_full_setup(char *str)
+/* Get the boot-time nohz CPU list from the kernel parameters. */
+void __init tick_nohz_full_setup(cpumask_var_t cpumask)
{
alloc_bootmem_cpumask_var(&tick_nohz_full_mask);
- if (cpulist_parse(str, tick_nohz_full_mask) < 0) {
- pr_warn("NO_HZ: Incorrect nohz_full cpumask\n");
- free_bootmem_cpumask_var(tick_nohz_full_mask);
- return 1;
- }
+ cpumask_copy(tick_nohz_full_mask, cpumask);
tick_nohz_full_running = true;
-
- return 1;
}
-__setup("nohz_full=", tick_nohz_full_setup);
static int tick_nohz_cpu_down(unsigned int cpu)
{
@@ -437,13 +430,6 @@ void __init tick_nohz_init(void)
return;
}
- if (!alloc_cpumask_var(&housekeeping_mask, GFP_KERNEL)) {
- WARN(1, "NO_HZ: Can't allocate not-full dynticks cpumask\n");
- cpumask_clear(tick_nohz_full_mask);
- tick_nohz_full_running = false;
- return;
- }
-
/*
* Full dynticks uses irq work to drive the tick rescheduling on safe
* locking contexts. But then we need irq work to raise its own
@@ -452,7 +438,6 @@ void __init tick_nohz_init(void)
if (!arch_irq_work_has_interrupt()) {
pr_warn("NO_HZ: Can't run full dynticks because arch doesn't support irq work self-IPIs\n");
cpumask_clear(tick_nohz_full_mask);
- cpumask_copy(housekeeping_mask, cpu_possible_mask);
tick_nohz_full_running = false;
return;
}
@@ -465,9 +450,6 @@ void __init tick_nohz_init(void)
cpumask_clear_cpu(cpu, tick_nohz_full_mask);
}
- cpumask_andnot(housekeeping_mask,
- cpu_possible_mask, tick_nohz_full_mask);
-
for_each_cpu(cpu, tick_nohz_full_mask)
context_tracking_cpu_set(cpu);
@@ -477,12 +459,6 @@ void __init tick_nohz_init(void)
WARN_ON(ret < 0);
pr_info("NO_HZ: Full dynticks CPUs: %*pbl.\n",
cpumask_pr_args(tick_nohz_full_mask));
-
- /*
- * We need at least one CPU to handle housekeeping work such
- * as timekeeping, unbound timers, workqueues, ...
- */
- WARN_ON_ONCE(cpumask_empty(housekeeping_mask));
}
#endif
@@ -787,6 +763,7 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
if (!ts->tick_stopped) {
calc_load_nohz_start();
cpu_load_update_nohz_start();
+ quiet_vmstat();
ts->last_tick = hrtimer_get_expires(&ts->sched_timer);
ts->tick_stopped = 1;
@@ -960,8 +937,7 @@ void tick_nohz_idle_enter(void)
{
struct tick_sched *ts;
- WARN_ON_ONCE(irqs_disabled());
-
+ lockdep_assert_irqs_enabled();
/*
* Update the idle state in the scheduler domain hierarchy
* when tick_nohz_stop_sched_tick() is called from the idle loop.
diff --git a/kernel/time/time.c b/kernel/time/time.c
index 44a8c1402133..bd4e6c7dd689 100644
--- a/kernel/time/time.c
+++ b/kernel/time/time.c
@@ -82,7 +82,7 @@ SYSCALL_DEFINE1(time, time_t __user *, tloc)
SYSCALL_DEFINE1(stime, time_t __user *, tptr)
{
- struct timespec tv;
+ struct timespec64 tv;
int err;
if (get_user(tv.tv_sec, tptr))
@@ -90,11 +90,11 @@ SYSCALL_DEFINE1(stime, time_t __user *, tptr)
tv.tv_nsec = 0;
- err = security_settime(&tv, NULL);
+ err = security_settime64(&tv, NULL);
if (err)
return err;
- do_settimeofday(&tv);
+ do_settimeofday64(&tv);
return 0;
}
@@ -122,7 +122,7 @@ COMPAT_SYSCALL_DEFINE1(time, compat_time_t __user *, tloc)
COMPAT_SYSCALL_DEFINE1(stime, compat_time_t __user *, tptr)
{
- struct timespec tv;
+ struct timespec64 tv;
int err;
if (get_user(tv.tv_sec, tptr))
@@ -130,11 +130,11 @@ COMPAT_SYSCALL_DEFINE1(stime, compat_time_t __user *, tptr)
tv.tv_nsec = 0;
- err = security_settime(&tv, NULL);
+ err = security_settime64(&tv, NULL);
if (err)
return err;
- do_settimeofday(&tv);
+ do_settimeofday64(&tv);
return 0;
}
@@ -158,40 +158,6 @@ SYSCALL_DEFINE2(gettimeofday, struct timeval __user *, tv,
}
/*
- * Indicates if there is an offset between the system clock and the hardware
- * clock/persistent clock/rtc.
- */
-int persistent_clock_is_local;
-
-/*
- * Adjust the time obtained from the CMOS to be UTC time instead of
- * local time.
- *
- * This is ugly, but preferable to the alternatives. Otherwise we
- * would either need to write a program to do it in /etc/rc (and risk
- * confusion if the program gets run more than once; it would also be
- * hard to make the program warp the clock precisely n hours) or
- * compile in the timezone information into the kernel. Bad, bad....
- *
- * - TYT, 1992-01-01
- *
- * The best thing to do is to keep the CMOS clock in universal time (UTC)
- * as real UNIX machines always do it. This avoids all headaches about
- * daylight saving times and warping kernel clocks.
- */
-static inline void warp_clock(void)
-{
- if (sys_tz.tz_minuteswest != 0) {
- struct timespec adjust;
-
- persistent_clock_is_local = 1;
- adjust.tv_sec = sys_tz.tz_minuteswest * 60;
- adjust.tv_nsec = 0;
- timekeeping_inject_offset(&adjust);
- }
-}
-
-/*
* In case for some reason the CMOS clock has not already been running
* in UTC, but in some local time: The first time we set the timezone,
* we will warp the clock so that it is ticking UTC time instead of
@@ -224,7 +190,7 @@ int do_sys_settimeofday64(const struct timespec64 *tv, const struct timezone *tz
if (firsttime) {
firsttime = 0;
if (!tv)
- warp_clock();
+ timekeeping_warp_clock();
}
}
if (tv)
@@ -441,6 +407,7 @@ time64_t mktime64(const unsigned int year0, const unsigned int mon0,
}
EXPORT_SYMBOL(mktime64);
+#if __BITS_PER_LONG == 32
/**
* set_normalized_timespec - set timespec sec and nsec parts and normalize
*
@@ -501,6 +468,7 @@ struct timespec ns_to_timespec(const s64 nsec)
return ts;
}
EXPORT_SYMBOL(ns_to_timespec);
+#endif
/**
* ns_to_timeval - Convert nanoseconds to timeval
@@ -520,7 +488,6 @@ struct timeval ns_to_timeval(const s64 nsec)
}
EXPORT_SYMBOL(ns_to_timeval);
-#if BITS_PER_LONG == 32
/**
* set_normalized_timespec - set timespec sec and nsec parts and normalize
*
@@ -581,7 +548,7 @@ struct timespec64 ns_to_timespec64(const s64 nsec)
return ts;
}
EXPORT_SYMBOL(ns_to_timespec64);
-#endif
+
/**
* msecs_to_jiffies: - convert milliseconds to jiffies
* @m: time in milliseconds
@@ -853,24 +820,6 @@ unsigned long nsecs_to_jiffies(u64 n)
EXPORT_SYMBOL_GPL(nsecs_to_jiffies);
/*
- * Add two timespec values and do a safety check for overflow.
- * It's assumed that both values are valid (>= 0)
- */
-struct timespec timespec_add_safe(const struct timespec lhs,
- const struct timespec rhs)
-{
- struct timespec res;
-
- set_normalized_timespec(&res, lhs.tv_sec + rhs.tv_sec,
- lhs.tv_nsec + rhs.tv_nsec);
-
- if (res.tv_sec < lhs.tv_sec || res.tv_sec < rhs.tv_sec)
- res.tv_sec = TIME_T_MAX;
-
- return res;
-}
-
-/*
* Add two timespec64 values and do a safety check for overflow.
* It's assumed that both values are valid (>= 0).
* And, each timespec64 is in normalized form.
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 2cafb49aa65e..198afa78bf69 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -60,8 +60,27 @@ struct tk_fast {
struct tk_read_base base[2];
};
-static struct tk_fast tk_fast_mono ____cacheline_aligned;
-static struct tk_fast tk_fast_raw ____cacheline_aligned;
+/* Suspend-time cycles value for halted fast timekeeper. */
+static u64 cycles_at_suspend;
+
+static u64 dummy_clock_read(struct clocksource *cs)
+{
+ return cycles_at_suspend;
+}
+
+static struct clocksource dummy_clock = {
+ .read = dummy_clock_read,
+};
+
+static struct tk_fast tk_fast_mono ____cacheline_aligned = {
+ .base[0] = { .clock = &dummy_clock, },
+ .base[1] = { .clock = &dummy_clock, },
+};
+
+static struct tk_fast tk_fast_raw ____cacheline_aligned = {
+ .base[0] = { .clock = &dummy_clock, },
+ .base[1] = { .clock = &dummy_clock, },
+};
/* flag for if timekeeping is suspended */
int __read_mostly timekeeping_suspended;
@@ -477,17 +496,39 @@ u64 notrace ktime_get_boot_fast_ns(void)
}
EXPORT_SYMBOL_GPL(ktime_get_boot_fast_ns);
-/* Suspend-time cycles value for halted fast timekeeper. */
-static u64 cycles_at_suspend;
-static u64 dummy_clock_read(struct clocksource *cs)
+/*
+ * See comment for __ktime_get_fast_ns() vs. timestamp ordering
+ */
+static __always_inline u64 __ktime_get_real_fast_ns(struct tk_fast *tkf)
{
- return cycles_at_suspend;
+ struct tk_read_base *tkr;
+ unsigned int seq;
+ u64 now;
+
+ do {
+ seq = raw_read_seqcount_latch(&tkf->seq);
+ tkr = tkf->base + (seq & 0x01);
+ now = ktime_to_ns(tkr->base_real);
+
+ now += timekeeping_delta_to_ns(tkr,
+ clocksource_delta(
+ tk_clock_read(tkr),
+ tkr->cycle_last,
+ tkr->mask));
+ } while (read_seqcount_retry(&tkf->seq, seq));
+
+ return now;
}
-static struct clocksource dummy_clock = {
- .read = dummy_clock_read,
-};
+/**
+ * ktime_get_real_fast_ns: - NMI safe and fast access to clock realtime.
+ */
+u64 ktime_get_real_fast_ns(void)
+{
+ return __ktime_get_real_fast_ns(&tk_fast_mono);
+}
+EXPORT_SYMBOL_GPL(ktime_get_real_fast_ns);
/**
* halt_fast_timekeeper - Prevent fast timekeeper from accessing clocksource.
@@ -507,6 +548,7 @@ static void halt_fast_timekeeper(struct timekeeper *tk)
memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy));
cycles_at_suspend = tk_clock_read(tkr);
tkr_dummy.clock = &dummy_clock;
+ tkr_dummy.base_real = tkr->base + tk->offs_real;
update_fast_timekeeper(&tkr_dummy, &tk_fast_mono);
tkr = &tk->tkr_raw;
@@ -654,6 +696,7 @@ static void timekeeping_update(struct timekeeper *tk, unsigned int action)
update_vsyscall(tk);
update_pvclock_gtod(tk, action & TK_CLOCK_WAS_SET);
+ tk->tkr_mono.base_real = tk->tkr_mono.base + tk->offs_real;
update_fast_timekeeper(&tk->tkr_mono, &tk_fast_mono);
update_fast_timekeeper(&tk->tkr_raw, &tk_fast_raw);
@@ -1264,33 +1307,31 @@ EXPORT_SYMBOL(do_settimeofday64);
*
* Adds or subtracts an offset value from the current time.
*/
-int timekeeping_inject_offset(struct timespec *ts)
+static int timekeeping_inject_offset(struct timespec64 *ts)
{
struct timekeeper *tk = &tk_core.timekeeper;
unsigned long flags;
- struct timespec64 ts64, tmp;
+ struct timespec64 tmp;
int ret = 0;
- if (!timespec_inject_offset_valid(ts))
+ if (ts->tv_nsec < 0 || ts->tv_nsec >= NSEC_PER_SEC)
return -EINVAL;
- ts64 = timespec_to_timespec64(*ts);
-
raw_spin_lock_irqsave(&timekeeper_lock, flags);
write_seqcount_begin(&tk_core.seq);
timekeeping_forward_now(tk);
/* Make sure the proposed value is valid */
- tmp = timespec64_add(tk_xtime(tk), ts64);
- if (timespec64_compare(&tk->wall_to_monotonic, &ts64) > 0 ||
+ tmp = timespec64_add(tk_xtime(tk), *ts);
+ if (timespec64_compare(&tk->wall_to_monotonic, ts) > 0 ||
!timespec64_valid_strict(&tmp)) {
ret = -EINVAL;
goto error;
}
- tk_xtime_add(tk, &ts64);
- tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, ts64));
+ tk_xtime_add(tk, ts);
+ tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, *ts));
error: /* even if we error out, we forwarded the time, so call update */
timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
@@ -1303,7 +1344,40 @@ error: /* even if we error out, we forwarded the time, so call update */
return ret;
}
-EXPORT_SYMBOL(timekeeping_inject_offset);
+
+/*
+ * Indicates if there is an offset between the system clock and the hardware
+ * clock/persistent clock/rtc.
+ */
+int persistent_clock_is_local;
+
+/*
+ * Adjust the time obtained from the CMOS to be UTC time instead of
+ * local time.
+ *
+ * This is ugly, but preferable to the alternatives. Otherwise we
+ * would either need to write a program to do it in /etc/rc (and risk
+ * confusion if the program gets run more than once; it would also be
+ * hard to make the program warp the clock precisely n hours) or
+ * compile in the timezone information into the kernel. Bad, bad....
+ *
+ * - TYT, 1992-01-01
+ *
+ * The best thing to do is to keep the CMOS clock in universal time (UTC)
+ * as real UNIX machines always do it. This avoids all headaches about
+ * daylight saving times and warping kernel clocks.
+ */
+void timekeeping_warp_clock(void)
+{
+ if (sys_tz.tz_minuteswest != 0) {
+ struct timespec64 adjust;
+
+ persistent_clock_is_local = 1;
+ adjust.tv_sec = sys_tz.tz_minuteswest * 60;
+ adjust.tv_nsec = 0;
+ timekeeping_inject_offset(&adjust);
+ }
+}
/**
* __timekeeping_set_tai_offset - Sets the TAI offset from UTC and monotonic
@@ -2248,6 +2322,72 @@ ktime_t ktime_get_update_offsets_now(unsigned int *cwsseq, ktime_t *offs_real,
}
/**
+ * timekeeping_validate_timex - Ensures the timex is ok for use in do_adjtimex
+ */
+static int timekeeping_validate_timex(struct timex *txc)
+{
+ if (txc->modes & ADJ_ADJTIME) {
+ /* singleshot must not be used with any other mode bits */
+ if (!(txc->modes & ADJ_OFFSET_SINGLESHOT))
+ return -EINVAL;
+ if (!(txc->modes & ADJ_OFFSET_READONLY) &&
+ !capable(CAP_SYS_TIME))
+ return -EPERM;
+ } else {
+ /* In order to modify anything, you gotta be super-user! */
+ if (txc->modes && !capable(CAP_SYS_TIME))
+ return -EPERM;
+ /*
+ * if the quartz is off by more than 10% then
+ * something is VERY wrong!
+ */
+ if (txc->modes & ADJ_TICK &&
+ (txc->tick < 900000/USER_HZ ||
+ txc->tick > 1100000/USER_HZ))
+ return -EINVAL;
+ }
+
+ if (txc->modes & ADJ_SETOFFSET) {
+ /* In order to inject time, you gotta be super-user! */
+ if (!capable(CAP_SYS_TIME))
+ return -EPERM;
+
+ /*
+ * Validate if a timespec/timeval used to inject a time
+ * offset is valid. Offsets can be postive or negative, so
+ * we don't check tv_sec. The value of the timeval/timespec
+ * is the sum of its fields,but *NOTE*:
+ * The field tv_usec/tv_nsec must always be non-negative and
+ * we can't have more nanoseconds/microseconds than a second.
+ */
+ if (txc->time.tv_usec < 0)
+ return -EINVAL;
+
+ if (txc->modes & ADJ_NANO) {
+ if (txc->time.tv_usec >= NSEC_PER_SEC)
+ return -EINVAL;
+ } else {
+ if (txc->time.tv_usec >= USEC_PER_SEC)
+ return -EINVAL;
+ }
+ }
+
+ /*
+ * Check for potential multiplication overflows that can
+ * only happen on 64-bit systems:
+ */
+ if ((txc->modes & ADJ_FREQUENCY) && (BITS_PER_LONG == 64)) {
+ if (LLONG_MIN / PPM_SCALE > txc->freq)
+ return -EINVAL;
+ if (LLONG_MAX / PPM_SCALE < txc->freq)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+
+/**
* do_adjtimex() - Accessor function to NTP __do_adjtimex function
*/
int do_adjtimex(struct timex *txc)
@@ -2259,12 +2399,12 @@ int do_adjtimex(struct timex *txc)
int ret;
/* Validate the data before disabling interrupts */
- ret = ntp_validate_timex(txc);
+ ret = timekeeping_validate_timex(txc);
if (ret)
return ret;
if (txc->modes & ADJ_SETOFFSET) {
- struct timespec delta;
+ struct timespec64 delta;
delta.tv_sec = txc->time.tv_sec;
delta.tv_nsec = txc->time.tv_usec;
if (!(txc->modes & ADJ_NANO))
diff --git a/kernel/time/timekeeping.h b/kernel/time/timekeeping.h
index c9f9af339914..7a9b4eb7a1d5 100644
--- a/kernel/time/timekeeping.h
+++ b/kernel/time/timekeeping.h
@@ -11,7 +11,7 @@ extern ktime_t ktime_get_update_offsets_now(unsigned int *cwsseq,
extern int timekeeping_valid_for_hres(void);
extern u64 timekeeping_max_deferment(void);
-extern int timekeeping_inject_offset(struct timespec *ts);
+extern void timekeeping_warp_clock(void);
extern int timekeeping_suspend(void);
extern void timekeeping_resume(void);
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index f2674a056c26..af0b8bae4502 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -610,7 +610,7 @@ static bool timer_fixup_init(void *addr, enum debug_obj_state state)
}
/* Stub timer callback for improperly used timers. */
-static void stub_timer(unsigned long data)
+static void stub_timer(struct timer_list *unused)
{
WARN_ON(1);
}
@@ -626,7 +626,7 @@ static bool timer_fixup_activate(void *addr, enum debug_obj_state state)
switch (state) {
case ODEBUG_STATE_NOTAVAILABLE:
- setup_timer(timer, stub_timer, 0);
+ timer_setup(timer, stub_timer, 0);
return true;
case ODEBUG_STATE_ACTIVE:
@@ -665,7 +665,7 @@ static bool timer_fixup_assert_init(void *addr, enum debug_obj_state state)
switch (state) {
case ODEBUG_STATE_NOTAVAILABLE:
- setup_timer(timer, stub_timer, 0);
+ timer_setup(timer, stub_timer, 0);
return true;
default:
return false;
@@ -929,8 +929,11 @@ static struct timer_base *lock_timer_base(struct timer_list *timer,
}
}
+#define MOD_TIMER_PENDING_ONLY 0x01
+#define MOD_TIMER_REDUCE 0x02
+
static inline int
-__mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
+__mod_timer(struct timer_list *timer, unsigned long expires, unsigned int options)
{
struct timer_base *base, *new_base;
unsigned int idx = UINT_MAX;
@@ -950,7 +953,11 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
* larger granularity than you would get from adding a new
* timer with this expiry.
*/
- if (timer->expires == expires)
+ long diff = timer->expires - expires;
+
+ if (!diff)
+ return 1;
+ if (options & MOD_TIMER_REDUCE && diff <= 0)
return 1;
/*
@@ -962,6 +969,12 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
base = lock_timer_base(timer, &flags);
forward_timer_base(base);
+ if (timer_pending(timer) && (options & MOD_TIMER_REDUCE) &&
+ time_before_eq(timer->expires, expires)) {
+ ret = 1;
+ goto out_unlock;
+ }
+
clk = base->clk;
idx = calc_wheel_index(expires, clk);
@@ -971,7 +984,10 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
* subsequent call will exit in the expires check above.
*/
if (idx == timer_get_idx(timer)) {
- timer->expires = expires;
+ if (!(options & MOD_TIMER_REDUCE))
+ timer->expires = expires;
+ else if (time_after(timer->expires, expires))
+ timer->expires = expires;
ret = 1;
goto out_unlock;
}
@@ -981,7 +997,7 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
}
ret = detach_if_pending(timer, base, false);
- if (!ret && pending_only)
+ if (!ret && (options & MOD_TIMER_PENDING_ONLY))
goto out_unlock;
debug_activate(timer, expires);
@@ -1042,7 +1058,7 @@ out_unlock:
*/
int mod_timer_pending(struct timer_list *timer, unsigned long expires)
{
- return __mod_timer(timer, expires, true);
+ return __mod_timer(timer, expires, MOD_TIMER_PENDING_ONLY);
}
EXPORT_SYMBOL(mod_timer_pending);
@@ -1068,11 +1084,26 @@ EXPORT_SYMBOL(mod_timer_pending);
*/
int mod_timer(struct timer_list *timer, unsigned long expires)
{
- return __mod_timer(timer, expires, false);
+ return __mod_timer(timer, expires, 0);
}
EXPORT_SYMBOL(mod_timer);
/**
+ * timer_reduce - Modify a timer's timeout if it would reduce the timeout
+ * @timer: The timer to be modified
+ * @expires: New timeout in jiffies
+ *
+ * timer_reduce() is very similar to mod_timer(), except that it will only
+ * modify a running timer if that would reduce the expiration time (it will
+ * start a timer that isn't running).
+ */
+int timer_reduce(struct timer_list *timer, unsigned long expires)
+{
+ return __mod_timer(timer, expires, MOD_TIMER_REDUCE);
+}
+EXPORT_SYMBOL(timer_reduce);
+
+/**
* add_timer - start a timer
* @timer: the timer to be added
*
@@ -1560,8 +1591,11 @@ static int collect_expired_timers(struct timer_base *base,
* jiffies, otherwise forward to the next expiry time:
*/
if (time_after(next, jiffies)) {
- /* The call site will increment clock! */
- base->clk = jiffies - 1;
+ /*
+ * The call site will increment base->clk and then
+ * terminate the expiry loop immediately.
+ */
+ base->clk = jiffies;
return 0;
}
base->clk = next;
@@ -1668,9 +1702,20 @@ void run_local_timers(void)
raise_softirq(TIMER_SOFTIRQ);
}
-static void process_timeout(unsigned long __data)
+/*
+ * Since schedule_timeout()'s timer is defined on the stack, it must store
+ * the target task on the stack as well.
+ */
+struct process_timer {
+ struct timer_list timer;
+ struct task_struct *task;
+};
+
+static void process_timeout(struct timer_list *t)
{
- wake_up_process((struct task_struct *)__data);
+ struct process_timer *timeout = from_timer(timeout, t, timer);
+
+ wake_up_process(timeout->task);
}
/**
@@ -1704,7 +1749,7 @@ static void process_timeout(unsigned long __data)
*/
signed long __sched schedule_timeout(signed long timeout)
{
- struct timer_list timer;
+ struct process_timer timer;
unsigned long expire;
switch (timeout)
@@ -1738,13 +1783,14 @@ signed long __sched schedule_timeout(signed long timeout)
expire = timeout + jiffies;
- setup_timer_on_stack(&timer, process_timeout, (unsigned long)current);
- __mod_timer(&timer, expire, false);
+ timer.task = current;
+ timer_setup_on_stack(&timer.timer, process_timeout, 0);
+ __mod_timer(&timer.timer, expire, 0);
schedule();
- del_singleshot_timer_sync(&timer);
+ del_singleshot_timer_sync(&timer.timer);
/* Remove the timer from the object tracker */
- destroy_timer_on_stack(&timer);
+ destroy_timer_on_stack(&timer.timer);
timeout = expire - jiffies;