diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2017-11-23 16:29:05 +0100 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2017-11-23 16:29:05 +0100 |
commit | 866c9b94ef968445c52214b3748ecc52a8491bca (patch) | |
tree | 1fd073acb9be8e89e77b35c41e2964ac6feabee6 /drivers/clocksource | |
parent | timekeeping: Remove CONFIG_GENERIC_TIME_VSYSCALL_OLD (diff) | |
parent | treewide: Remove TIMER_FUNC_TYPE and TIMER_DATA_TYPE casts (diff) | |
download | linux-866c9b94ef968445c52214b3748ecc52a8491bca.tar.xz linux-866c9b94ef968445c52214b3748ecc52a8491bca.zip |
Merge tag 'for-linus-timers-conversion-final-v4.15-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux into timers/urgent
Pull the last batch of manual timer conversions from Kees Cook:
- final batch of "non trivial" timer conversions (multi-tree dependencies,
things Coccinelle couldn't handle, etc).
- treewide conversions via Coccinelle, in 4 steps:
- DEFINE_TIMER() functions converted to struct timer_list * argument
- init_timer() -> setup_timer()
- setup_timer() -> timer_setup()
- setup_timer() -> timer_setup() (with a single embedded structure)
- deprecated timer API removals (init_timer(), setup_*timer())
- finalization of new API (remove global casts)
Diffstat (limited to 'drivers/clocksource')
-rw-r--r-- | drivers/clocksource/arm_arch_timer.c | 60 |
1 files changed, 53 insertions, 7 deletions
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c index 0ecf5beb56ec..57cb2f00fc07 100644 --- a/drivers/clocksource/arm_arch_timer.c +++ b/drivers/clocksource/arm_arch_timer.c @@ -77,6 +77,7 @@ static bool arch_timer_mem_use_virtual; static bool arch_counter_suspend_stop; static bool vdso_default = true; +static cpumask_t evtstrm_available = CPU_MASK_NONE; static bool evtstrm_enable = IS_ENABLED(CONFIG_ARM_ARCH_TIMER_EVTSTREAM); static int __init early_evtstrm_cfg(char *buf) @@ -158,6 +159,7 @@ u32 arch_timer_reg_read(int access, enum arch_timer_reg reg, * if we don't have the cp15 accessors we won't have a problem. */ u64 (*arch_timer_read_counter)(void) = arch_counter_get_cntvct; +EXPORT_SYMBOL_GPL(arch_timer_read_counter); static u64 arch_counter_read(struct clocksource *cs) { @@ -217,6 +219,11 @@ static u32 notrace fsl_a008585_read_cntv_tval_el0(void) return __fsl_a008585_read_reg(cntv_tval_el0); } +static u64 notrace fsl_a008585_read_cntpct_el0(void) +{ + return __fsl_a008585_read_reg(cntpct_el0); +} + static u64 notrace fsl_a008585_read_cntvct_el0(void) { return __fsl_a008585_read_reg(cntvct_el0); @@ -258,6 +265,11 @@ static u32 notrace hisi_161010101_read_cntv_tval_el0(void) return __hisi_161010101_read_reg(cntv_tval_el0); } +static u64 notrace hisi_161010101_read_cntpct_el0(void) +{ + return __hisi_161010101_read_reg(cntpct_el0); +} + static u64 notrace hisi_161010101_read_cntvct_el0(void) { return __hisi_161010101_read_reg(cntvct_el0); @@ -288,6 +300,15 @@ static struct ate_acpi_oem_info hisi_161010101_oem_info[] = { #endif #ifdef CONFIG_ARM64_ERRATUM_858921 +static u64 notrace arm64_858921_read_cntpct_el0(void) +{ + u64 old, new; + + old = read_sysreg(cntpct_el0); + new = read_sysreg(cntpct_el0); + return (((old ^ new) >> 32) & 1) ? old : new; +} + static u64 notrace arm64_858921_read_cntvct_el0(void) { u64 old, new; @@ -309,16 +330,19 @@ static void erratum_set_next_event_tval_generic(const int access, unsigned long struct clock_event_device *clk) { unsigned long ctrl; - u64 cval = evt + arch_counter_get_cntvct(); + u64 cval; ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk); ctrl |= ARCH_TIMER_CTRL_ENABLE; ctrl &= ~ARCH_TIMER_CTRL_IT_MASK; - if (access == ARCH_TIMER_PHYS_ACCESS) + if (access == ARCH_TIMER_PHYS_ACCESS) { + cval = evt + arch_counter_get_cntpct(); write_sysreg(cval, cntp_cval_el0); - else + } else { + cval = evt + arch_counter_get_cntvct(); write_sysreg(cval, cntv_cval_el0); + } arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk); } @@ -345,6 +369,7 @@ static const struct arch_timer_erratum_workaround ool_workarounds[] = { .desc = "Freescale erratum a005858", .read_cntp_tval_el0 = fsl_a008585_read_cntp_tval_el0, .read_cntv_tval_el0 = fsl_a008585_read_cntv_tval_el0, + .read_cntpct_el0 = fsl_a008585_read_cntpct_el0, .read_cntvct_el0 = fsl_a008585_read_cntvct_el0, .set_next_event_phys = erratum_set_next_event_tval_phys, .set_next_event_virt = erratum_set_next_event_tval_virt, @@ -357,6 +382,7 @@ static const struct arch_timer_erratum_workaround ool_workarounds[] = { .desc = "HiSilicon erratum 161010101", .read_cntp_tval_el0 = hisi_161010101_read_cntp_tval_el0, .read_cntv_tval_el0 = hisi_161010101_read_cntv_tval_el0, + .read_cntpct_el0 = hisi_161010101_read_cntpct_el0, .read_cntvct_el0 = hisi_161010101_read_cntvct_el0, .set_next_event_phys = erratum_set_next_event_tval_phys, .set_next_event_virt = erratum_set_next_event_tval_virt, @@ -367,6 +393,7 @@ static const struct arch_timer_erratum_workaround ool_workarounds[] = { .desc = "HiSilicon erratum 161010101", .read_cntp_tval_el0 = hisi_161010101_read_cntp_tval_el0, .read_cntv_tval_el0 = hisi_161010101_read_cntv_tval_el0, + .read_cntpct_el0 = hisi_161010101_read_cntpct_el0, .read_cntvct_el0 = hisi_161010101_read_cntvct_el0, .set_next_event_phys = erratum_set_next_event_tval_phys, .set_next_event_virt = erratum_set_next_event_tval_virt, @@ -377,6 +404,7 @@ static const struct arch_timer_erratum_workaround ool_workarounds[] = { .match_type = ate_match_local_cap_id, .id = (void *)ARM64_WORKAROUND_858921, .desc = "ARM erratum 858921", + .read_cntpct_el0 = arm64_858921_read_cntpct_el0, .read_cntvct_el0 = arm64_858921_read_cntvct_el0, }, #endif @@ -739,6 +767,7 @@ static void arch_timer_evtstrm_enable(int divider) #ifdef CONFIG_COMPAT compat_elf_hwcap |= COMPAT_HWCAP_EVTSTRM; #endif + cpumask_set_cpu(smp_processor_id(), &evtstrm_available); } static void arch_timer_configure_evtstream(void) @@ -863,6 +892,16 @@ u32 arch_timer_get_rate(void) return arch_timer_rate; } +bool arch_timer_evtstrm_available(void) +{ + /* + * We might get called from a preemptible context. This is fine + * because availability of the event stream should be always the same + * for a preemptible context and context where we might resume a task. + */ + return cpumask_test_cpu(raw_smp_processor_id(), &evtstrm_available); +} + static u64 arch_counter_get_cntvct_mem(void) { u32 vct_lo, vct_hi, tmp_hi; @@ -889,7 +928,7 @@ static void __init arch_counter_register(unsigned type) /* Register the CP15 based counter if we have one */ if (type & ARCH_TIMER_TYPE_CP15) { - if (IS_ENABLED(CONFIG_ARM64) || + if ((IS_ENABLED(CONFIG_ARM64) && !is_hyp_mode_available()) || arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI) arch_timer_read_counter = arch_counter_get_cntvct; else @@ -928,6 +967,8 @@ static int arch_timer_dying_cpu(unsigned int cpu) { struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt); + cpumask_clear_cpu(smp_processor_id(), &evtstrm_available); + arch_timer_stop(clk); return 0; } @@ -937,10 +978,16 @@ static DEFINE_PER_CPU(unsigned long, saved_cntkctl); static int arch_timer_cpu_pm_notify(struct notifier_block *self, unsigned long action, void *hcpu) { - if (action == CPU_PM_ENTER) + if (action == CPU_PM_ENTER) { __this_cpu_write(saved_cntkctl, arch_timer_get_cntkctl()); - else if (action == CPU_PM_ENTER_FAILED || action == CPU_PM_EXIT) + + cpumask_clear_cpu(smp_processor_id(), &evtstrm_available); + } else if (action == CPU_PM_ENTER_FAILED || action == CPU_PM_EXIT) { arch_timer_set_cntkctl(__this_cpu_read(saved_cntkctl)); + + if (elf_hwcap & HWCAP_EVTSTRM) + cpumask_set_cpu(smp_processor_id(), &evtstrm_available); + } return NOTIFY_OK; } @@ -1016,7 +1063,6 @@ static int __init arch_timer_register(void) if (err) goto out_unreg_notify; - /* Register and immediately configure the timer on the boot CPU */ err = cpuhp_setup_state(CPUHP_AP_ARM_ARCH_TIMER_STARTING, "clockevents/arm/arch_timer:starting", |