diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/compat.c | 3 | ||||
-rw-r--r-- | kernel/kexec.c | 2 | ||||
-rw-r--r-- | kernel/posix-cpu-timers.c | 11 | ||||
-rw-r--r-- | kernel/sched.c | 6 | ||||
-rw-r--r-- | kernel/sched_debug.c | 4 | ||||
-rw-r--r-- | kernel/softirq.c | 20 | ||||
-rw-r--r-- | kernel/time.c | 54 | ||||
-rw-r--r-- | kernel/time/ntp.c | 398 | ||||
-rw-r--r-- | kernel/time/timekeeping.c | 17 | ||||
-rw-r--r-- | kernel/workqueue.c | 6 |
10 files changed, 277 insertions, 244 deletions
diff --git a/kernel/compat.c b/kernel/compat.c index 4a856a3643bb..32c254a8ab9a 100644 --- a/kernel/compat.c +++ b/kernel/compat.c @@ -955,7 +955,8 @@ asmlinkage long compat_sys_adjtimex(struct compat_timex __user *utp) __put_user(txc.jitcnt, &utp->jitcnt) || __put_user(txc.calcnt, &utp->calcnt) || __put_user(txc.errcnt, &utp->errcnt) || - __put_user(txc.stbcnt, &utp->stbcnt)) + __put_user(txc.stbcnt, &utp->stbcnt) || + __put_user(txc.tai, &utp->tai)) ret = -EFAULT; return ret; diff --git a/kernel/kexec.c b/kernel/kexec.c index cb85c79989b4..1c5fcacbcf33 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c @@ -1217,7 +1217,7 @@ static int __init parse_crashkernel_mem(char *cmdline, } /* match ? */ - if (system_ram >= start && system_ram <= end) { + if (system_ram >= start && system_ram < end) { *crash_size = size; break; } diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c index ae5c6c147c4b..f1525ad06cb3 100644 --- a/kernel/posix-cpu-timers.c +++ b/kernel/posix-cpu-timers.c @@ -4,8 +4,9 @@ #include <linux/sched.h> #include <linux/posix-timers.h> -#include <asm/uaccess.h> #include <linux/errno.h> +#include <linux/math64.h> +#include <asm/uaccess.h> static int check_clock(const clockid_t which_clock) { @@ -47,12 +48,10 @@ static void sample_to_timespec(const clockid_t which_clock, union cpu_time_count cpu, struct timespec *tp) { - if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) { - tp->tv_sec = div_long_long_rem(cpu.sched, - NSEC_PER_SEC, &tp->tv_nsec); - } else { + if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) + *tp = ns_to_timespec(cpu.sched); + else cputime_to_timespec(cpu.cpu, tp); - } } static inline int cpu_time_before(const clockid_t which_clock, diff --git a/kernel/sched.c b/kernel/sched.c index e2f7f5acc807..34bcc5bc120e 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -8025,7 +8025,7 @@ static void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, se->my_q = cfs_rq; se->load.weight = tg->shares; - se->load.inv_weight = div64_64(1ULL<<32, se->load.weight); + se->load.inv_weight = div64_u64(1ULL<<32, se->load.weight); se->parent = parent; } #endif @@ -8692,7 +8692,7 @@ static void __set_se_shares(struct sched_entity *se, unsigned long shares) dequeue_entity(cfs_rq, se, 0); se->load.weight = shares; - se->load.inv_weight = div64_64((1ULL<<32), shares); + se->load.inv_weight = div64_u64((1ULL<<32), shares); if (on_rq) enqueue_entity(cfs_rq, se, 0); @@ -8787,7 +8787,7 @@ static unsigned long to_ratio(u64 period, u64 runtime) if (runtime == RUNTIME_INF) return 1ULL << 16; - return div64_64(runtime << 16, period); + return div64_u64(runtime << 16, period); } #ifdef CONFIG_CGROUP_SCHED diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c index 8a9498e7c831..6b4a12558e88 100644 --- a/kernel/sched_debug.c +++ b/kernel/sched_debug.c @@ -357,8 +357,8 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m) avg_per_cpu = p->se.sum_exec_runtime; if (p->se.nr_migrations) { - avg_per_cpu = div64_64(avg_per_cpu, - p->se.nr_migrations); + avg_per_cpu = div64_u64(avg_per_cpu, + p->se.nr_migrations); } else { avg_per_cpu = -1LL; } diff --git a/kernel/softirq.c b/kernel/softirq.c index 3c44956ee7e2..36e061740047 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -589,16 +589,20 @@ static void takeover_tasklets(unsigned int cpu) local_irq_disable(); /* Find end, append list for that CPU. */ - *__get_cpu_var(tasklet_vec).tail = per_cpu(tasklet_vec, cpu).head; - __get_cpu_var(tasklet_vec).tail = per_cpu(tasklet_vec, cpu).tail; - per_cpu(tasklet_vec, cpu).head = NULL; - per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head; + if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) { + *(__get_cpu_var(tasklet_vec).tail) = per_cpu(tasklet_vec, cpu).head; + __get_cpu_var(tasklet_vec).tail = per_cpu(tasklet_vec, cpu).tail; + per_cpu(tasklet_vec, cpu).head = NULL; + per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head; + } raise_softirq_irqoff(TASKLET_SOFTIRQ); - *__get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).head; - __get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).tail; - per_cpu(tasklet_hi_vec, cpu).head = NULL; - per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head; + if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) { + *__get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).head; + __get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).tail; + per_cpu(tasklet_hi_vec, cpu).head = NULL; + per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head; + } raise_softirq_irqoff(HI_SOFTIRQ); local_irq_enable(); diff --git a/kernel/time.c b/kernel/time.c index 86729042e4cd..cbe0d5a222ff 100644 --- a/kernel/time.c +++ b/kernel/time.c @@ -36,6 +36,7 @@ #include <linux/security.h> #include <linux/fs.h> #include <linux/slab.h> +#include <linux/math64.h> #include <asm/uaccess.h> #include <asm/unistd.h> @@ -391,13 +392,17 @@ EXPORT_SYMBOL(set_normalized_timespec); struct timespec ns_to_timespec(const s64 nsec) { struct timespec ts; + s32 rem; if (!nsec) return (struct timespec) {0, 0}; - ts.tv_sec = div_long_long_rem_signed(nsec, NSEC_PER_SEC, &ts.tv_nsec); - if (unlikely(nsec < 0)) - set_normalized_timespec(&ts, ts.tv_sec, ts.tv_nsec); + ts.tv_sec = div_s64_rem(nsec, NSEC_PER_SEC, &rem); + if (unlikely(rem < 0)) { + ts.tv_sec--; + rem += NSEC_PER_SEC; + } + ts.tv_nsec = rem; return ts; } @@ -527,8 +532,10 @@ jiffies_to_timespec(const unsigned long jiffies, struct timespec *value) * Convert jiffies to nanoseconds and separate with * one divide. */ - u64 nsec = (u64)jiffies * TICK_NSEC; - value->tv_sec = div_long_long_rem(nsec, NSEC_PER_SEC, &value->tv_nsec); + u32 rem; + value->tv_sec = div_u64_rem((u64)jiffies * TICK_NSEC, + NSEC_PER_SEC, &rem); + value->tv_nsec = rem; } EXPORT_SYMBOL(jiffies_to_timespec); @@ -566,12 +573,11 @@ void jiffies_to_timeval(const unsigned long jiffies, struct timeval *value) * Convert jiffies to nanoseconds and separate with * one divide. */ - u64 nsec = (u64)jiffies * TICK_NSEC; - long tv_usec; + u32 rem; - value->tv_sec = div_long_long_rem(nsec, NSEC_PER_SEC, &tv_usec); - tv_usec /= NSEC_PER_USEC; - value->tv_usec = tv_usec; + value->tv_sec = div_u64_rem((u64)jiffies * TICK_NSEC, + NSEC_PER_SEC, &rem); + value->tv_usec = rem / NSEC_PER_USEC; } EXPORT_SYMBOL(jiffies_to_timeval); @@ -587,9 +593,7 @@ clock_t jiffies_to_clock_t(long x) return x / (HZ / USER_HZ); # endif #else - u64 tmp = (u64)x * TICK_NSEC; - do_div(tmp, (NSEC_PER_SEC / USER_HZ)); - return (long)tmp; + return div_u64((u64)x * TICK_NSEC, NSEC_PER_SEC / USER_HZ); #endif } EXPORT_SYMBOL(jiffies_to_clock_t); @@ -601,16 +605,12 @@ unsigned long clock_t_to_jiffies(unsigned long x) return ~0UL; return x * (HZ / USER_HZ); #else - u64 jif; - /* Don't worry about loss of precision here .. */ if (x >= ~0UL / HZ * USER_HZ) return ~0UL; /* .. but do try to contain it here */ - jif = x * (u64) HZ; - do_div(jif, USER_HZ); - return jif; + return div_u64((u64)x * HZ, USER_HZ); #endif } EXPORT_SYMBOL(clock_t_to_jiffies); @@ -619,10 +619,9 @@ u64 jiffies_64_to_clock_t(u64 x) { #if (TICK_NSEC % (NSEC_PER_SEC / USER_HZ)) == 0 # if HZ < USER_HZ - x *= USER_HZ; - do_div(x, HZ); + x = div_u64(x * USER_HZ, HZ); # elif HZ > USER_HZ - do_div(x, HZ / USER_HZ); + x = div_u64(x, HZ / USER_HZ); # else /* Nothing to do */ # endif @@ -632,8 +631,7 @@ u64 jiffies_64_to_clock_t(u64 x) * but even this doesn't overflow in hundreds of years * in 64 bits, so.. */ - x *= TICK_NSEC; - do_div(x, (NSEC_PER_SEC / USER_HZ)); + x = div_u64(x * TICK_NSEC, (NSEC_PER_SEC / USER_HZ)); #endif return x; } @@ -642,21 +640,17 @@ EXPORT_SYMBOL(jiffies_64_to_clock_t); u64 nsec_to_clock_t(u64 x) { #if (NSEC_PER_SEC % USER_HZ) == 0 - do_div(x, (NSEC_PER_SEC / USER_HZ)); + return div_u64(x, NSEC_PER_SEC / USER_HZ); #elif (USER_HZ % 512) == 0 - x *= USER_HZ/512; - do_div(x, (NSEC_PER_SEC / 512)); + return div_u64(x * USER_HZ / 512, NSEC_PER_SEC / 512); #else /* * max relative error 5.7e-8 (1.8s per year) for USER_HZ <= 1024, * overflow after 64.99 years. * exact for HZ=60, 72, 90, 120, 144, 180, 300, 600, 900, ... */ - x *= 9; - do_div(x, (unsigned long)((9ull * NSEC_PER_SEC + (USER_HZ/2)) / - USER_HZ)); + return div_u64(x * 9, (9ull * NSEC_PER_SEC + (USER_HZ / 2)) / USER_HZ); #endif - return x; } #if (BITS_PER_LONG < 64) diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c index 5fd9b9469770..5125ddd8196b 100644 --- a/kernel/time/ntp.c +++ b/kernel/time/ntp.c @@ -15,7 +15,8 @@ #include <linux/jiffies.h> #include <linux/hrtimer.h> #include <linux/capability.h> -#include <asm/div64.h> +#include <linux/math64.h> +#include <linux/clocksource.h> #include <asm/timex.h> /* @@ -23,11 +24,14 @@ */ unsigned long tick_usec = TICK_USEC; /* USER_HZ period (usec) */ unsigned long tick_nsec; /* ACTHZ period (nsec) */ -static u64 tick_length, tick_length_base; +u64 tick_length; +static u64 tick_length_base; + +static struct hrtimer leap_timer; #define MAX_TICKADJ 500 /* microsecs */ #define MAX_TICKADJ_SCALED (((u64)(MAX_TICKADJ * NSEC_PER_USEC) << \ - TICK_LENGTH_SHIFT) / NTP_INTERVAL_FREQ) + NTP_SCALE_SHIFT) / NTP_INTERVAL_FREQ) /* * phase-lock loop variables @@ -35,11 +39,12 @@ static u64 tick_length, tick_length_base; /* TIME_ERROR prevents overwriting the CMOS clock */ static int time_state = TIME_OK; /* clock synchronization status */ int time_status = STA_UNSYNC; /* clock status bits */ -static s64 time_offset; /* time adjustment (ns) */ +static long time_tai; /* TAI offset (s) */ +static s64 time_offset; /* time adjustment (ns) */ static long time_constant = 2; /* pll time constant */ long time_maxerror = NTP_PHASE_LIMIT; /* maximum error (us) */ long time_esterror = NTP_PHASE_LIMIT; /* estimated error (us) */ -long time_freq; /* frequency offset (scaled ppm)*/ +static s64 time_freq; /* frequency offset (scaled ns/s)*/ static long time_reftime; /* time at last adjustment (s) */ long time_adjust; static long ntp_tick_adj; @@ -47,16 +52,56 @@ static long ntp_tick_adj; static void ntp_update_frequency(void) { u64 second_length = (u64)(tick_usec * NSEC_PER_USEC * USER_HZ) - << TICK_LENGTH_SHIFT; - second_length += (s64)ntp_tick_adj << TICK_LENGTH_SHIFT; - second_length += (s64)time_freq << (TICK_LENGTH_SHIFT - SHIFT_NSEC); + << NTP_SCALE_SHIFT; + second_length += (s64)ntp_tick_adj << NTP_SCALE_SHIFT; + second_length += time_freq; tick_length_base = second_length; - do_div(second_length, HZ); - tick_nsec = second_length >> TICK_LENGTH_SHIFT; + tick_nsec = div_u64(second_length, HZ) >> NTP_SCALE_SHIFT; + tick_length_base = div_u64(tick_length_base, NTP_INTERVAL_FREQ); +} + +static void ntp_update_offset(long offset) +{ + long mtemp; + s64 freq_adj; + + if (!(time_status & STA_PLL)) + return; - do_div(tick_length_base, NTP_INTERVAL_FREQ); + if (!(time_status & STA_NANO)) + offset *= NSEC_PER_USEC; + + /* + * Scale the phase adjustment and + * clamp to the operating range. + */ + offset = min(offset, MAXPHASE); + offset = max(offset, -MAXPHASE); + + /* + * Select how the frequency is to be controlled + * and in which mode (PLL or FLL). + */ + if (time_status & STA_FREQHOLD || time_reftime == 0) + time_reftime = xtime.tv_sec; + mtemp = xtime.tv_sec - time_reftime; + time_reftime = xtime.tv_sec; + + freq_adj = (s64)offset * mtemp; + freq_adj <<= NTP_SCALE_SHIFT - 2 * (SHIFT_PLL + 2 + time_constant); + time_status &= ~STA_MODE; + if (mtemp >= MINSEC && (time_status & STA_FLL || mtemp > MAXSEC)) { + freq_adj += div_s64((s64)offset << (NTP_SCALE_SHIFT - SHIFT_FLL), + mtemp); + time_status |= STA_MODE; + } + freq_adj += time_freq; + freq_adj = min(freq_adj, MAXFREQ_SCALED); + time_freq = max(freq_adj, -MAXFREQ_SCALED); + + time_offset = div_s64((s64)offset << NTP_SCALE_SHIFT, NTP_INTERVAL_FREQ); } /** @@ -78,62 +123,70 @@ void ntp_clear(void) } /* - * this routine handles the overflow of the microsecond field - * - * The tricky bits of code to handle the accurate clock support - * were provided by Dave Mills (Mills@UDEL.EDU) of NTP fame. - * They were originally developed for SUN and DEC kernels. - * All the kudos should go to Dave for this stuff. + * Leap second processing. If in leap-insert state at the end of the + * day, the system clock is set back one second; if in leap-delete + * state, the system clock is set ahead one second. */ -void second_overflow(void) +static enum hrtimer_restart ntp_leap_second(struct hrtimer *timer) { - long time_adj; + enum hrtimer_restart res = HRTIMER_NORESTART; - /* Bump the maxerror field */ - time_maxerror += MAXFREQ >> SHIFT_USEC; - if (time_maxerror > NTP_PHASE_LIMIT) { - time_maxerror = NTP_PHASE_LIMIT; - time_status |= STA_UNSYNC; - } + write_seqlock_irq(&xtime_lock); - /* - * Leap second processing. If in leap-insert state at the end of the - * day, the system clock is set back one second; if in leap-delete - * state, the system clock is set ahead one second. The microtime() - * routine or external clock driver will insure that reported time is - * always monotonic. The ugly divides should be replaced. - */ switch (time_state) { case TIME_OK: - if (time_status & STA_INS) - time_state = TIME_INS; - else if (time_status & STA_DEL) - time_state = TIME_DEL; break; case TIME_INS: - if (xtime.tv_sec % 86400 == 0) { - xtime.tv_sec--; - wall_to_monotonic.tv_sec++; - time_state = TIME_OOP; - printk(KERN_NOTICE "Clock: inserting leap second " - "23:59:60 UTC\n"); - } + xtime.tv_sec--; + wall_to_monotonic.tv_sec++; + time_state = TIME_OOP; + printk(KERN_NOTICE "Clock: " + "inserting leap second 23:59:60 UTC\n"); + leap_timer.expires = ktime_add_ns(leap_timer.expires, + NSEC_PER_SEC); + res = HRTIMER_RESTART; break; case TIME_DEL: - if ((xtime.tv_sec + 1) % 86400 == 0) { - xtime.tv_sec++; - wall_to_monotonic.tv_sec--; - time_state = TIME_WAIT; - printk(KERN_NOTICE "Clock: deleting leap second " - "23:59:59 UTC\n"); - } + xtime.tv_sec++; + time_tai--; + wall_to_monotonic.tv_sec--; + time_state = TIME_WAIT; + printk(KERN_NOTICE "Clock: " + "deleting leap second 23:59:59 UTC\n"); break; case TIME_OOP: + time_tai++; time_state = TIME_WAIT; - break; + /* fall through */ case TIME_WAIT: if (!(time_status & (STA_INS | STA_DEL))) - time_state = TIME_OK; + time_state = TIME_OK; + break; + } + update_vsyscall(&xtime, clock); + + write_sequnlock_irq(&xtime_lock); + + return res; +} + +/* + * this routine handles the overflow of the microsecond field + * + * The tricky bits of code to handle the accurate clock support + * were provided by Dave Mills (Mills@UDEL.EDU) of NTP fame. + * They were originally developed for SUN and DEC kernels. + * All the kudos should go to Dave for this stuff. + */ +void second_overflow(void) +{ + s64 time_adj; + + /* Bump the maxerror field */ + time_maxerror += MAXFREQ / NSEC_PER_USEC; + if (time_maxerror > NTP_PHASE_LIMIT) { + time_maxerror = NTP_PHASE_LIMIT; + time_status |= STA_UNSYNC; } /* @@ -143,7 +196,7 @@ void second_overflow(void) tick_length = tick_length_base; time_adj = shift_right(time_offset, SHIFT_PLL + time_constant); time_offset -= time_adj; - tick_length += (s64)time_adj << (TICK_LENGTH_SHIFT - SHIFT_UPDATE); + tick_length += time_adj; if (unlikely(time_adjust)) { if (time_adjust > MAX_TICKADJ) { @@ -154,25 +207,12 @@ void second_overflow(void) tick_length -= MAX_TICKADJ_SCALED; } else { tick_length += (s64)(time_adjust * NSEC_PER_USEC / - NTP_INTERVAL_FREQ) << TICK_LENGTH_SHIFT; + NTP_INTERVAL_FREQ) << NTP_SCALE_SHIFT; time_adjust = 0; } } } -/* - * Return how long ticks are at the moment, that is, how much time - * update_wall_time_one_tick will add to xtime next time we call it - * (assuming no calls to do_adjtimex in the meantime). - * The return value is in fixed-point nanoseconds shifted by the - * specified number of bits to the right of the binary point. - * This function has no side-effects. - */ -u64 current_tick_length(void) -{ - return tick_length; -} - #ifdef CONFIG_GENERIC_CMOS_UPDATE /* Disable the cmos update - used by virtualization and embedded */ @@ -236,8 +276,8 @@ static inline void notify_cmos_timer(void) { } */ int do_adjtimex(struct timex *txc) { - long mtemp, save_adjust, rem; - s64 freq_adj, temp64; + struct timespec ts; + long save_adjust, sec; int result; /* In order to modify anything, you gotta be super-user! */ @@ -247,147 +287,132 @@ int do_adjtimex(struct timex *txc) /* Now we validate the data before disabling interrupts */ if ((txc->modes & ADJ_OFFSET_SINGLESHOT) == ADJ_OFFSET_SINGLESHOT) { - /* singleshot must not be used with any other mode bits */ - if (txc->modes != ADJ_OFFSET_SINGLESHOT && - txc->modes != ADJ_OFFSET_SS_READ) + /* singleshot must not be used with any other mode bits */ + if (txc->modes & ~ADJ_OFFSET_SS_READ) return -EINVAL; } - if (txc->modes != ADJ_OFFSET_SINGLESHOT && (txc->modes & ADJ_OFFSET)) - /* adjustment Offset limited to +- .512 seconds */ - if (txc->offset <= - MAXPHASE || txc->offset >= MAXPHASE ) - return -EINVAL; - /* if the quartz is off by more than 10% something is VERY wrong ! */ if (txc->modes & ADJ_TICK) if (txc->tick < 900000/USER_HZ || txc->tick > 1100000/USER_HZ) return -EINVAL; + if (time_state != TIME_OK && txc->modes & ADJ_STATUS) + hrtimer_cancel(&leap_timer); + getnstimeofday(&ts); + write_seqlock_irq(&xtime_lock); - result = time_state; /* mostly `TIME_OK' */ /* Save for later - semantics of adjtime is to return old value */ save_adjust = time_adjust; -#if 0 /* STA_CLOCKERR is never set yet */ - time_status &= ~STA_CLOCKERR; /* reset STA_CLOCKERR */ -#endif /* If there are input parameters, then process them */ - if (txc->modes) - { - if (txc->modes & ADJ_STATUS) /* only set allowed bits */ - time_status = (txc->status & ~STA_RONLY) | - (time_status & STA_RONLY); - - if (txc->modes & ADJ_FREQUENCY) { /* p. 22 */ - if (txc->freq > MAXFREQ || txc->freq < -MAXFREQ) { - result = -EINVAL; - goto leave; - } - time_freq = ((s64)txc->freq * NSEC_PER_USEC) - >> (SHIFT_USEC - SHIFT_NSEC); - } - - if (txc->modes & ADJ_MAXERROR) { - if (txc->maxerror < 0 || txc->maxerror >= NTP_PHASE_LIMIT) { - result = -EINVAL; - goto leave; + if (txc->modes) { + if (txc->modes & ADJ_STATUS) { + if ((time_status & STA_PLL) && + !(txc->status & STA_PLL)) { + time_state = TIME_OK; + time_status = STA_UNSYNC; + } + /* only set allowed bits */ + time_status &= STA_RONLY; + time_status |= txc->status & ~STA_RONLY; + + switch (time_state) { + case TIME_OK: + start_timer: + sec = ts.tv_sec; + if (time_status & STA_INS) { + time_state = TIME_INS; + sec += 86400 - sec % 86400; + hrtimer_start(&leap_timer, ktime_set(sec, 0), HRTIMER_MODE_ABS); + } else if (time_status & STA_DEL) { + time_state = TIME_DEL; + sec += 86400 - (sec + 1) % 86400; + hrtimer_start(&leap_timer, ktime_set(sec, 0), HRTIMER_MODE_ABS); + } + break; + case TIME_INS: + case TIME_DEL: + time_state = TIME_OK; + goto start_timer; + break; + case TIME_WAIT: + if (!(time_status & (STA_INS | STA_DEL))) + time_state = TIME_OK; + break; + case TIME_OOP: + hrtimer_restart(&leap_timer); + break; + } } - time_maxerror = txc->maxerror; - } - if (txc->modes & ADJ_ESTERROR) { - if (txc->esterror < 0 || txc->esterror >= NTP_PHASE_LIMIT) { - result = -EINVAL; - goto leave; + if (txc->modes & ADJ_NANO) + time_status |= STA_NANO; + if (txc->modes & ADJ_MICRO) + time_status &= ~STA_NANO; + + if (txc->modes & ADJ_FREQUENCY) { + time_freq = (s64)txc->freq * PPM_SCALE; + time_freq = min(time_freq, MAXFREQ_SCALED); + time_freq = max(time_freq, -MAXFREQ_SCALED); } - time_esterror = txc->esterror; - } - if (txc->modes & ADJ_TIMECONST) { /* p. 24 */ - if (txc->constant < 0) { /* NTP v4 uses values > 6 */ - result = -EINVAL; - goto leave; + if (txc->modes & ADJ_MAXERROR) + time_maxerror = txc->maxerror; + if (txc->modes & ADJ_ESTERROR) + time_esterror = txc->esterror; + + if (txc->modes & ADJ_TIMECONST) { + time_constant = txc->constant; + if (!(time_status & STA_NANO)) + time_constant += 4; + time_constant = min(time_constant, (long)MAXTC); + time_constant = max(time_constant, 0l); } - time_constant = min(txc->constant + 4, (long)MAXTC); - } - if (txc->modes & ADJ_OFFSET) { /* values checked earlier */ - if (txc->modes == ADJ_OFFSET_SINGLESHOT) { - /* adjtime() is independent from ntp_adjtime() */ - time_adjust = txc->offset; + if (txc->modes & ADJ_TAI && txc->constant > 0) + time_tai = txc->constant; + + if (txc->modes & ADJ_OFFSET) { + if (txc->modes == ADJ_OFFSET_SINGLESHOT) + /* adjtime() is independent from ntp_adjtime() */ + time_adjust = txc->offset; + else + ntp_update_offset(txc->offset); } - else if (time_status & STA_PLL) { - time_offset = txc->offset * NSEC_PER_USEC; - - /* - * Scale the phase adjustment and - * clamp to the operating range. - */ - time_offset = min(time_offset, (s64)MAXPHASE * NSEC_PER_USEC); - time_offset = max(time_offset, (s64)-MAXPHASE * NSEC_PER_USEC); - - /* - * Select whether the frequency is to be controlled - * and in which mode (PLL or FLL). Clamp to the operating - * range. Ugly multiply/divide should be replaced someday. - */ - - if (time_status & STA_FREQHOLD || time_reftime == 0) - time_reftime = xtime.tv_sec; - mtemp = xtime.tv_sec - time_reftime; - time_reftime = xtime.tv_sec; - - freq_adj = time_offset * mtemp; - freq_adj = shift_right(freq_adj, time_constant * 2 + - (SHIFT_PLL + 2) * 2 - SHIFT_NSEC); - if (mtemp >= MINSEC && (time_status & STA_FLL || mtemp > MAXSEC)) { - u64 utemp64; - temp64 = time_offset << (SHIFT_NSEC - SHIFT_FLL); - if (time_offset < 0) { - utemp64 = -temp64; - do_div(utemp64, mtemp); - freq_adj -= utemp64; - } else { - utemp64 = temp64; - do_div(utemp64, mtemp); - freq_adj += utemp64; - } - } - freq_adj += time_freq; - freq_adj = min(freq_adj, (s64)MAXFREQ_NSEC); - time_freq = max(freq_adj, (s64)-MAXFREQ_NSEC); - time_offset = div_long_long_rem_signed(time_offset, - NTP_INTERVAL_FREQ, - &rem); - time_offset <<= SHIFT_UPDATE; - } /* STA_PLL */ - } /* txc->modes & ADJ_OFFSET */ - if (txc->modes & ADJ_TICK) - tick_usec = txc->tick; - - if (txc->modes & (ADJ_TICK|ADJ_FREQUENCY|ADJ_OFFSET)) - ntp_update_frequency(); - } /* txc->modes */ -leave: if ((time_status & (STA_UNSYNC|STA_CLOCKERR)) != 0) + if (txc->modes & ADJ_TICK) + tick_usec = txc->tick; + + if (txc->modes & (ADJ_TICK|ADJ_FREQUENCY|ADJ_OFFSET)) + ntp_update_frequency(); + } + + result = time_state; /* mostly `TIME_OK' */ + if (time_status & (STA_UNSYNC|STA_CLOCKERR)) result = TIME_ERROR; if ((txc->modes == ADJ_OFFSET_SINGLESHOT) || - (txc->modes == ADJ_OFFSET_SS_READ)) + (txc->modes == ADJ_OFFSET_SS_READ)) txc->offset = save_adjust; - else - txc->offset = ((long)shift_right(time_offset, SHIFT_UPDATE)) * - NTP_INTERVAL_FREQ / 1000; - txc->freq = (time_freq / NSEC_PER_USEC) << - (SHIFT_USEC - SHIFT_NSEC); + else { + txc->offset = shift_right(time_offset * NTP_INTERVAL_FREQ, + NTP_SCALE_SHIFT); + if (!(time_status & STA_NANO)) + txc->offset /= NSEC_PER_USEC; + } + txc->freq = shift_right((s32)(time_freq >> PPM_SCALE_INV_SHIFT) * + (s64)PPM_SCALE_INV, + NTP_SCALE_SHIFT); txc->maxerror = time_maxerror; txc->esterror = time_esterror; txc->status = time_status; txc->constant = time_constant; txc->precision = 1; - txc->tolerance = MAXFREQ; + txc->tolerance = MAXFREQ_SCALED / PPM_SCALE; txc->tick = tick_usec; + txc->tai = time_tai; /* PPS is not implemented, so these are zero */ txc->ppsfreq = 0; @@ -399,9 +424,15 @@ leave: if ((time_status & (STA_UNSYNC|STA_CLOCKERR)) != 0) txc->errcnt = 0; txc->stbcnt = 0; write_sequnlock_irq(&xtime_lock); - do_gettimeofday(&txc->time); + + txc->time.tv_sec = ts.tv_sec; + txc->time.tv_usec = ts.tv_nsec; + if (!(time_status & STA_NANO)) + txc->time.tv_usec /= NSEC_PER_USEC; + notify_cmos_timer(); - return(result); + + return result; } static int __init ntp_tick_adj_setup(char *str) @@ -411,3 +442,10 @@ static int __init ntp_tick_adj_setup(char *str) } __setup("ntp_tick_adj=", ntp_tick_adj_setup); + +void __init ntp_init(void) +{ + ntp_clear(); + hrtimer_init(&leap_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); + leap_timer.function = ntp_leap_second; +} diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 2d6087c7cf98..e91c29f961c9 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -53,7 +53,7 @@ void update_xtime_cache(u64 nsec) timespec_add_ns(&xtime_cache, nsec); } -static struct clocksource *clock; /* pointer to current clocksource */ +struct clocksource *clock; #ifdef CONFIG_GENERIC_TIME @@ -246,7 +246,7 @@ void __init timekeeping_init(void) write_seqlock_irqsave(&xtime_lock, flags); - ntp_clear(); + ntp_init(); clock = clocksource_get_next(); clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH); @@ -371,7 +371,7 @@ static __always_inline int clocksource_bigadjust(s64 error, s64 *interval, * here. This is tuned so that an error of about 1 msec is adjusted * within about 1 sec (or 2^20 nsec in 2^SHIFT_HZ ticks). */ - error2 = clock->error >> (TICK_LENGTH_SHIFT + 22 - 2 * SHIFT_HZ); + error2 = clock->error >> (NTP_SCALE_SHIFT + 22 - 2 * SHIFT_HZ); error2 = abs(error2); for (look_ahead = 0; error2 > 0; look_ahead++) error2 >>= 2; @@ -380,8 +380,7 @@ static __always_inline int clocksource_bigadjust(s64 error, s64 *interval, * Now calculate the error in (1 << look_ahead) ticks, but first * remove the single look ahead already included in the error. */ - tick_error = current_tick_length() >> - (TICK_LENGTH_SHIFT - clock->shift + 1); + tick_error = tick_length >> (NTP_SCALE_SHIFT - clock->shift + 1); tick_error -= clock->xtime_interval >> 1; error = ((error - tick_error) >> look_ahead) + tick_error; @@ -412,7 +411,7 @@ static void clocksource_adjust(s64 offset) s64 error, interval = clock->cycle_interval; int adj; - error = clock->error >> (TICK_LENGTH_SHIFT - clock->shift - 1); + error = clock->error >> (NTP_SCALE_SHIFT - clock->shift - 1); if (error > interval) { error >>= 2; if (likely(error <= interval)) @@ -434,7 +433,7 @@ static void clocksource_adjust(s64 offset) clock->xtime_interval += interval; clock->xtime_nsec -= offset; clock->error -= (interval - offset) << - (TICK_LENGTH_SHIFT - clock->shift); + (NTP_SCALE_SHIFT - clock->shift); } /** @@ -473,8 +472,8 @@ void update_wall_time(void) } /* accumulate error between NTP and clock interval */ - clock->error += current_tick_length(); - clock->error -= clock->xtime_interval << (TICK_LENGTH_SHIFT - clock->shift); + clock->error += tick_length; + clock->error -= clock->xtime_interval << (NTP_SCALE_SHIFT - clock->shift); } /* correct the clock when NTP error is too big */ diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 721093a22561..29fc39f1029c 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -195,7 +195,6 @@ static void delayed_work_timer_fn(unsigned long __data) int queue_delayed_work(struct workqueue_struct *wq, struct delayed_work *dwork, unsigned long delay) { - timer_stats_timer_set_start_info(&dwork->timer); if (delay == 0) return queue_work(wq, &dwork->work); @@ -219,11 +218,12 @@ int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, struct timer_list *timer = &dwork->timer; struct work_struct *work = &dwork->work; - timer_stats_timer_set_start_info(&dwork->timer); if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) { BUG_ON(timer_pending(timer)); BUG_ON(!list_empty(&work->entry)); + timer_stats_timer_set_start_info(&dwork->timer); + /* This stores cwq for the moment, for the timer_fn */ set_wq_data(work, wq_per_cpu(wq, raw_smp_processor_id())); timer->expires = jiffies + delay; @@ -564,7 +564,6 @@ EXPORT_SYMBOL(schedule_work); int schedule_delayed_work(struct delayed_work *dwork, unsigned long delay) { - timer_stats_timer_set_start_info(&dwork->timer); return queue_delayed_work(keventd_wq, dwork, delay); } EXPORT_SYMBOL(schedule_delayed_work); @@ -581,7 +580,6 @@ EXPORT_SYMBOL(schedule_delayed_work); int schedule_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay) { - timer_stats_timer_set_start_info(&dwork->timer); return queue_delayed_work_on(cpu, keventd_wq, dwork, delay); } EXPORT_SYMBOL(schedule_delayed_work_on); |