diff options
Diffstat (limited to 'drivers/clocksource')
-rw-r--r-- | drivers/clocksource/arm_arch_timer.c | 47 | ||||
-rw-r--r-- | drivers/clocksource/clksrc-dbx500-prcmu.c | 8 | ||||
-rw-r--r-- | drivers/clocksource/dw_apb_timer_of.c | 28 | ||||
-rw-r--r-- | drivers/clocksource/hyperv_timer.c | 251 | ||||
-rw-r--r-- | drivers/clocksource/ingenic-ost.c | 9 | ||||
-rw-r--r-- | drivers/clocksource/ingenic-timer.c | 2 | ||||
-rw-r--r-- | drivers/clocksource/sh_cmt.c | 5 | ||||
-rw-r--r-- | drivers/clocksource/timer-atmel-tcb.c | 4 | ||||
-rw-r--r-- | drivers/clocksource/timer-fsl-ftm.c | 2 | ||||
-rw-r--r-- | drivers/clocksource/timer-microchip-pit64b.c | 2 | ||||
-rw-r--r-- | drivers/clocksource/timer-npcm7xx.c | 1 | ||||
-rw-r--r-- | drivers/clocksource/timer-of.c | 4 | ||||
-rw-r--r-- | drivers/clocksource/timer-pistachio.c | 4 | ||||
-rw-r--r-- | drivers/clocksource/timer-ti-dm-systimer.c | 157 | ||||
-rw-r--r-- | drivers/clocksource/timer-vf-pit.c | 2 |
15 files changed, 378 insertions, 148 deletions
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c index d0177824c518..4fb1f4da27ec 100644 --- a/drivers/clocksource/arm_arch_timer.c +++ b/drivers/clocksource/arm_arch_timer.c @@ -51,7 +51,7 @@ static unsigned arch_timers_present __initdata; -static void __iomem *arch_counter_base; +static void __iomem *arch_counter_base __ro_after_init; struct arch_timer { void __iomem *base; @@ -60,15 +60,24 @@ struct arch_timer { #define to_arch_timer(e) container_of(e, struct arch_timer, evt) -static u32 arch_timer_rate; -static int arch_timer_ppi[ARCH_TIMER_MAX_TIMER_PPI]; +static u32 arch_timer_rate __ro_after_init; +u32 arch_timer_rate1 __ro_after_init; +static int arch_timer_ppi[ARCH_TIMER_MAX_TIMER_PPI] __ro_after_init; + +static const char *arch_timer_ppi_names[ARCH_TIMER_MAX_TIMER_PPI] = { + [ARCH_TIMER_PHYS_SECURE_PPI] = "sec-phys", + [ARCH_TIMER_PHYS_NONSECURE_PPI] = "phys", + [ARCH_TIMER_VIRT_PPI] = "virt", + [ARCH_TIMER_HYP_PPI] = "hyp-phys", + [ARCH_TIMER_HYP_VIRT_PPI] = "hyp-virt", +}; static struct clock_event_device __percpu *arch_timer_evt; -static enum arch_timer_ppi_nr arch_timer_uses_ppi = ARCH_TIMER_VIRT_PPI; -static bool arch_timer_c3stop; -static bool arch_timer_mem_use_virtual; -static bool arch_counter_suspend_stop; +static enum arch_timer_ppi_nr arch_timer_uses_ppi __ro_after_init = ARCH_TIMER_VIRT_PPI; +static bool arch_timer_c3stop __ro_after_init; +static bool arch_timer_mem_use_virtual __ro_after_init; +static bool arch_counter_suspend_stop __ro_after_init; #ifdef CONFIG_GENERIC_GETTIMEOFDAY static enum vdso_clock_mode vdso_default = VDSO_CLOCKMODE_ARCHTIMER; #else @@ -76,7 +85,7 @@ static enum vdso_clock_mode vdso_default = VDSO_CLOCKMODE_NONE; #endif /* CONFIG_GENERIC_GETTIMEOFDAY */ static cpumask_t evtstrm_available = CPU_MASK_NONE; -static bool evtstrm_enable = IS_ENABLED(CONFIG_ARM_ARCH_TIMER_EVTSTREAM); +static bool evtstrm_enable __ro_after_init = IS_ENABLED(CONFIG_ARM_ARCH_TIMER_EVTSTREAM); static int __init early_evtstrm_cfg(char *buf) { @@ -176,7 +185,7 @@ static notrace u64 arch_counter_get_cntvct(void) * to exist on arm64. arm doesn't use this before DT is probed so even * if we don't have the cp15 accessors we won't have a problem. */ -u64 (*arch_timer_read_counter)(void) = arch_counter_get_cntvct; +u64 (*arch_timer_read_counter)(void) __ro_after_init = arch_counter_get_cntvct; EXPORT_SYMBOL_GPL(arch_timer_read_counter); static u64 arch_counter_read(struct clocksource *cs) @@ -925,7 +934,7 @@ static int validate_timer_rate(void) * rate was probed first, and don't verify that others match. If the first node * probed has a clock-frequency property, this overrides the HW register. */ -static void arch_timer_of_configure_rate(u32 rate, struct device_node *np) +static void __init arch_timer_of_configure_rate(u32 rate, struct device_node *np) { /* Who has more than one independent system counter? */ if (arch_timer_rate) @@ -939,7 +948,7 @@ static void arch_timer_of_configure_rate(u32 rate, struct device_node *np) pr_warn("frequency not available\n"); } -static void arch_timer_banner(unsigned type) +static void __init arch_timer_banner(unsigned type) { pr_info("%s%s%s timer(s) running at %lu.%02luMHz (%s%s%s).\n", type & ARCH_TIMER_TYPE_CP15 ? "cp15" : "", @@ -1280,8 +1289,9 @@ static void __init arch_timer_populate_kvm_info(void) static int __init arch_timer_of_init(struct device_node *np) { - int i, ret; + int i, irq, ret; u32 rate; + bool has_names; if (arch_timers_present & ARCH_TIMER_TYPE_CP15) { pr_warn("multiple nodes in dt, skipping\n"); @@ -1289,8 +1299,17 @@ static int __init arch_timer_of_init(struct device_node *np) } arch_timers_present |= ARCH_TIMER_TYPE_CP15; - for (i = ARCH_TIMER_PHYS_SECURE_PPI; i < ARCH_TIMER_MAX_TIMER_PPI; i++) - arch_timer_ppi[i] = irq_of_parse_and_map(np, i); + + has_names = of_property_read_bool(np, "interrupt-names"); + + for (i = ARCH_TIMER_PHYS_SECURE_PPI; i < ARCH_TIMER_MAX_TIMER_PPI; i++) { + if (has_names) + irq = of_irq_get_byname(np, arch_timer_ppi_names[i]); + else + irq = of_irq_get(np, i); + if (irq > 0) + arch_timer_ppi[i] = irq; + } arch_timer_populate_kvm_info(); diff --git a/drivers/clocksource/clksrc-dbx500-prcmu.c b/drivers/clocksource/clksrc-dbx500-prcmu.c index 996900d017c6..2fc93e46cea3 100644 --- a/drivers/clocksource/clksrc-dbx500-prcmu.c +++ b/drivers/clocksource/clksrc-dbx500-prcmu.c @@ -18,7 +18,7 @@ #define RATE_32K 32768 -#define TIMER_MODE_CONTINOUS 0x1 +#define TIMER_MODE_CONTINUOUS 0x1 #define TIMER_DOWNCOUNT_VAL 0xffffffff #define PRCMU_TIMER_REF 0 @@ -55,13 +55,13 @@ static int __init clksrc_dbx500_prcmu_init(struct device_node *node) /* * The A9 sub system expects the timer to be configured as - * a continous looping timer. + * a continuous looping timer. * The PRCMU should configure it but if it for some reason * don't we do it here. */ if (readl(clksrc_dbx500_timer_base + PRCMU_TIMER_MODE) != - TIMER_MODE_CONTINOUS) { - writel(TIMER_MODE_CONTINOUS, + TIMER_MODE_CONTINUOUS) { + writel(TIMER_MODE_CONTINUOUS, clksrc_dbx500_timer_base + PRCMU_TIMER_MODE); writel(TIMER_DOWNCOUNT_VAL, clksrc_dbx500_timer_base + PRCMU_TIMER_REF); diff --git a/drivers/clocksource/dw_apb_timer_of.c b/drivers/clocksource/dw_apb_timer_of.c index 42e7e43b8fcd..3819ef5b7098 100644 --- a/drivers/clocksource/dw_apb_timer_of.c +++ b/drivers/clocksource/dw_apb_timer_of.c @@ -38,7 +38,7 @@ static int __init timer_get_base_and_rate(struct device_node *np, } /* - * Not all implementations use a periphal clock, so don't panic + * Not all implementations use a peripheral clock, so don't panic * if it's not present */ pclk = of_clk_get_by_name(np, "pclk"); @@ -52,18 +52,34 @@ static int __init timer_get_base_and_rate(struct device_node *np, return 0; timer_clk = of_clk_get_by_name(np, "timer"); - if (IS_ERR(timer_clk)) - return PTR_ERR(timer_clk); + if (IS_ERR(timer_clk)) { + ret = PTR_ERR(timer_clk); + goto out_pclk_disable; + } ret = clk_prepare_enable(timer_clk); if (ret) - return ret; + goto out_timer_clk_put; *rate = clk_get_rate(timer_clk); - if (!(*rate)) - return -EINVAL; + if (!(*rate)) { + ret = -EINVAL; + goto out_timer_clk_disable; + } return 0; + +out_timer_clk_disable: + clk_disable_unprepare(timer_clk); +out_timer_clk_put: + clk_put(timer_clk); +out_pclk_disable: + if (!IS_ERR(pclk)) { + clk_disable_unprepare(pclk); + clk_put(pclk); + } + iounmap(*base); + return ret; } static int __init add_clockevent(struct device_node *event_timer) diff --git a/drivers/clocksource/hyperv_timer.c b/drivers/clocksource/hyperv_timer.c index 269a691bd2c4..977fd05ac35f 100644 --- a/drivers/clocksource/hyperv_timer.c +++ b/drivers/clocksource/hyperv_timer.c @@ -18,6 +18,9 @@ #include <linux/sched_clock.h> #include <linux/mm.h> #include <linux/cpuhotplug.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/acpi.h> #include <clocksource/hyperv_timer.h> #include <asm/hyperv-tlfs.h> #include <asm/mshyperv.h> @@ -43,14 +46,13 @@ static u64 hv_sched_clock_offset __ro_after_init; */ static bool direct_mode_enabled; -static int stimer0_irq; -static int stimer0_vector; +static int stimer0_irq = -1; static int stimer0_message_sint; +static DEFINE_PER_CPU(long, stimer0_evt); /* - * ISR for when stimer0 is operating in Direct Mode. Direct Mode - * does not use VMbus or any VMbus messages, so process here and not - * in the VMbus driver code. + * Common code for stimer0 interrupts coming via Direct Mode or + * as a VMbus message. */ void hv_stimer0_isr(void) { @@ -61,6 +63,16 @@ void hv_stimer0_isr(void) } EXPORT_SYMBOL_GPL(hv_stimer0_isr); +/* + * stimer0 interrupt handler for architectures that support + * per-cpu interrupts, which also implies Direct Mode. + */ +static irqreturn_t hv_stimer0_percpu_isr(int irq, void *dev_id) +{ + hv_stimer0_isr(); + return IRQ_HANDLED; +} + static int hv_ce_set_next_event(unsigned long delta, struct clock_event_device *evt) { @@ -68,16 +80,16 @@ static int hv_ce_set_next_event(unsigned long delta, current_tick = hv_read_reference_counter(); current_tick += delta; - hv_init_timer(0, current_tick); + hv_set_register(HV_REGISTER_STIMER0_COUNT, current_tick); return 0; } static int hv_ce_shutdown(struct clock_event_device *evt) { - hv_init_timer(0, 0); - hv_init_timer_config(0, 0); - if (direct_mode_enabled) - hv_disable_stimer0_percpu_irq(stimer0_irq); + hv_set_register(HV_REGISTER_STIMER0_COUNT, 0); + hv_set_register(HV_REGISTER_STIMER0_CONFIG, 0); + if (direct_mode_enabled && stimer0_irq >= 0) + disable_percpu_irq(stimer0_irq); return 0; } @@ -95,8 +107,9 @@ static int hv_ce_set_oneshot(struct clock_event_device *evt) * on the specified hardware vector/IRQ. */ timer_cfg.direct_mode = 1; - timer_cfg.apic_vector = stimer0_vector; - hv_enable_stimer0_percpu_irq(stimer0_irq); + timer_cfg.apic_vector = HYPERV_STIMER0_VECTOR; + if (stimer0_irq >= 0) + enable_percpu_irq(stimer0_irq, IRQ_TYPE_NONE); } else { /* * When it expires, the timer will generate a VMbus message, @@ -105,7 +118,7 @@ static int hv_ce_set_oneshot(struct clock_event_device *evt) timer_cfg.direct_mode = 0; timer_cfg.sintx = stimer0_message_sint; } - hv_init_timer_config(0, timer_cfg.as_uint64); + hv_set_register(HV_REGISTER_STIMER0_CONFIG, timer_cfg.as_uint64); return 0; } @@ -169,10 +182,58 @@ int hv_stimer_cleanup(unsigned int cpu) } EXPORT_SYMBOL_GPL(hv_stimer_cleanup); +/* + * These placeholders are overridden by arch specific code on + * architectures that need special setup of the stimer0 IRQ because + * they don't support per-cpu IRQs (such as x86/x64). + */ +void __weak hv_setup_stimer0_handler(void (*handler)(void)) +{ +}; + +void __weak hv_remove_stimer0_handler(void) +{ +}; + +/* Called only on architectures with per-cpu IRQs (i.e., not x86/x64) */ +static int hv_setup_stimer0_irq(void) +{ + int ret; + + ret = acpi_register_gsi(NULL, HYPERV_STIMER0_VECTOR, + ACPI_EDGE_SENSITIVE, ACPI_ACTIVE_HIGH); + if (ret < 0) { + pr_err("Can't register Hyper-V stimer0 GSI. Error %d", ret); + return ret; + } + stimer0_irq = ret; + + ret = request_percpu_irq(stimer0_irq, hv_stimer0_percpu_isr, + "Hyper-V stimer0", &stimer0_evt); + if (ret) { + pr_err("Can't request Hyper-V stimer0 IRQ %d. Error %d", + stimer0_irq, ret); + acpi_unregister_gsi(stimer0_irq); + stimer0_irq = -1; + } + return ret; +} + +static void hv_remove_stimer0_irq(void) +{ + if (stimer0_irq == -1) { + hv_remove_stimer0_handler(); + } else { + free_percpu_irq(stimer0_irq, &stimer0_evt); + acpi_unregister_gsi(stimer0_irq); + stimer0_irq = -1; + } +} + /* hv_stimer_alloc - Global initialization of the clockevent and stimer0 */ -int hv_stimer_alloc(void) +int hv_stimer_alloc(bool have_percpu_irqs) { - int ret = 0; + int ret; /* * Synthetic timers are always available except on old versions of @@ -188,29 +249,37 @@ int hv_stimer_alloc(void) direct_mode_enabled = ms_hyperv.misc_features & HV_STIMER_DIRECT_MODE_AVAILABLE; - if (direct_mode_enabled) { - ret = hv_setup_stimer0_irq(&stimer0_irq, &stimer0_vector, - hv_stimer0_isr); + + /* + * If Direct Mode isn't enabled, the remainder of the initialization + * is done later by hv_stimer_legacy_init() + */ + if (!direct_mode_enabled) + return 0; + + if (have_percpu_irqs) { + ret = hv_setup_stimer0_irq(); if (ret) - goto free_percpu; + goto free_clock_event; + } else { + hv_setup_stimer0_handler(hv_stimer0_isr); + } - /* - * Since we are in Direct Mode, stimer initialization - * can be done now with a CPUHP value in the same range - * as other clockevent devices. - */ - ret = cpuhp_setup_state(CPUHP_AP_HYPERV_TIMER_STARTING, - "clockevents/hyperv/stimer:starting", - hv_stimer_init, hv_stimer_cleanup); - if (ret < 0) - goto free_stimer0_irq; + /* + * Since we are in Direct Mode, stimer initialization + * can be done now with a CPUHP value in the same range + * as other clockevent devices. + */ + ret = cpuhp_setup_state(CPUHP_AP_HYPERV_TIMER_STARTING, + "clockevents/hyperv/stimer:starting", + hv_stimer_init, hv_stimer_cleanup); + if (ret < 0) { + hv_remove_stimer0_irq(); + goto free_clock_event; } return ret; -free_stimer0_irq: - hv_remove_stimer0_irq(stimer0_irq); - stimer0_irq = 0; -free_percpu: +free_clock_event: free_percpu(hv_clock_event); hv_clock_event = NULL; return ret; @@ -254,23 +323,6 @@ void hv_stimer_legacy_cleanup(unsigned int cpu) } EXPORT_SYMBOL_GPL(hv_stimer_legacy_cleanup); - -/* hv_stimer_free - Free global resources allocated by hv_stimer_alloc() */ -void hv_stimer_free(void) -{ - if (!hv_clock_event) - return; - - if (direct_mode_enabled) { - cpuhp_remove_state(CPUHP_AP_HYPERV_TIMER_STARTING); - hv_remove_stimer0_irq(stimer0_irq); - stimer0_irq = 0; - } - free_percpu(hv_clock_event); - hv_clock_event = NULL; -} -EXPORT_SYMBOL_GPL(hv_stimer_free); - /* * Do a global cleanup of clockevents for the cases of kexec and * vmbus exit @@ -287,12 +339,17 @@ void hv_stimer_global_cleanup(void) hv_stimer_legacy_cleanup(cpu); } - /* - * If Direct Mode is enabled, the cpuhp teardown callback - * (hv_stimer_cleanup) will be run on all CPUs to stop the - * stimers. - */ - hv_stimer_free(); + if (!hv_clock_event) + return; + + if (direct_mode_enabled) { + cpuhp_remove_state(CPUHP_AP_HYPERV_TIMER_STARTING); + hv_remove_stimer0_irq(); + stimer0_irq = -1; + } + free_percpu(hv_clock_event); + hv_clock_event = NULL; + } EXPORT_SYMBOL_GPL(hv_stimer_global_cleanup); @@ -302,14 +359,6 @@ EXPORT_SYMBOL_GPL(hv_stimer_global_cleanup); * the other that uses the TSC reference page feature as defined in the * TLFS. The MSR version is for compatibility with old versions of * Hyper-V and 32-bit x86. The TSC reference page version is preferred. - * - * The Hyper-V clocksource ratings of 250 are chosen to be below the - * TSC clocksource rating of 300. In configurations where Hyper-V offers - * an InvariantTSC, the TSC is not marked "unstable", so the TSC clocksource - * is available and preferred. With the higher rating, it will be the - * default. On older hardware and Hyper-V versions, the TSC is marked - * "unstable", so no TSC clocksource is created and the selected Hyper-V - * clocksource will be the default. */ u64 (*hv_read_reference_counter)(void); @@ -331,7 +380,7 @@ static u64 notrace read_hv_clock_tsc(void) u64 current_tick = hv_read_tsc_page(hv_get_tsc_page()); if (current_tick == U64_MAX) - hv_get_time_ref_count(current_tick); + current_tick = hv_get_register(HV_REGISTER_TIME_REF_COUNT); return current_tick; } @@ -352,9 +401,9 @@ static void suspend_hv_clock_tsc(struct clocksource *arg) u64 tsc_msr; /* Disable the TSC page */ - hv_get_reference_tsc(tsc_msr); + tsc_msr = hv_get_register(HV_REGISTER_REFERENCE_TSC); tsc_msr &= ~BIT_ULL(0); - hv_set_reference_tsc(tsc_msr); + hv_set_register(HV_REGISTER_REFERENCE_TSC, tsc_msr); } @@ -364,39 +413,44 @@ static void resume_hv_clock_tsc(struct clocksource *arg) u64 tsc_msr; /* Re-enable the TSC page */ - hv_get_reference_tsc(tsc_msr); + tsc_msr = hv_get_register(HV_REGISTER_REFERENCE_TSC); tsc_msr &= GENMASK_ULL(11, 0); tsc_msr |= BIT_ULL(0) | (u64)phys_addr; - hv_set_reference_tsc(tsc_msr); + hv_set_register(HV_REGISTER_REFERENCE_TSC, tsc_msr); } +#ifdef VDSO_CLOCKMODE_HVCLOCK static int hv_cs_enable(struct clocksource *cs) { - hv_enable_vdso_clocksource(); + vclocks_set_used(VDSO_CLOCKMODE_HVCLOCK); return 0; } +#endif static struct clocksource hyperv_cs_tsc = { .name = "hyperv_clocksource_tsc_page", - .rating = 250, + .rating = 500, .read = read_hv_clock_tsc_cs, .mask = CLOCKSOURCE_MASK(64), .flags = CLOCK_SOURCE_IS_CONTINUOUS, .suspend= suspend_hv_clock_tsc, .resume = resume_hv_clock_tsc, +#ifdef VDSO_CLOCKMODE_HVCLOCK .enable = hv_cs_enable, + .vdso_clock_mode = VDSO_CLOCKMODE_HVCLOCK, +#else + .vdso_clock_mode = VDSO_CLOCKMODE_NONE, +#endif }; static u64 notrace read_hv_clock_msr(void) { - u64 current_tick; /* * Read the partition counter to get the current tick count. This count * is set to 0 when the partition is created and is incremented in * 100 nanosecond units. */ - hv_get_time_ref_count(current_tick); - return current_tick; + return hv_get_register(HV_REGISTER_TIME_REF_COUNT); } static u64 notrace read_hv_clock_msr_cs(struct clocksource *arg) @@ -412,12 +466,36 @@ static u64 notrace read_hv_sched_clock_msr(void) static struct clocksource hyperv_cs_msr = { .name = "hyperv_clocksource_msr", - .rating = 250, + .rating = 500, .read = read_hv_clock_msr_cs, .mask = CLOCKSOURCE_MASK(64), .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; +/* + * Reference to pv_ops must be inline so objtool + * detection of noinstr violations can work correctly. + */ +#ifdef CONFIG_GENERIC_SCHED_CLOCK +static __always_inline void hv_setup_sched_clock(void *sched_clock) +{ + /* + * We're on an architecture with generic sched clock (not x86/x64). + * The Hyper-V sched clock read function returns nanoseconds, not + * the normal 100ns units of the Hyper-V synthetic clock. + */ + sched_clock_register(sched_clock, 64, NSEC_PER_SEC); +} +#elif defined CONFIG_PARAVIRT +static __always_inline void hv_setup_sched_clock(void *sched_clock) +{ + /* We're on x86/x64 *and* using PV ops */ + paravirt_set_sched_clock(sched_clock); +} +#else /* !CONFIG_GENERIC_SCHED_CLOCK && !CONFIG_PARAVIRT */ +static __always_inline void hv_setup_sched_clock(void *sched_clock) {} +#endif /* CONFIG_GENERIC_SCHED_CLOCK */ + static bool __init hv_init_tsc_clocksource(void) { u64 tsc_msr; @@ -429,6 +507,22 @@ static bool __init hv_init_tsc_clocksource(void) if (hv_root_partition) return false; + /* + * If Hyper-V offers TSC_INVARIANT, then the virtualized TSC correctly + * handles frequency and offset changes due to live migration, + * pause/resume, and other VM management operations. So lower the + * Hyper-V Reference TSC rating, causing the generic TSC to be used. + * TSC_INVARIANT is not offered on ARM64, so the Hyper-V Reference + * TSC will be preferred over the virtualized ARM64 arch counter. + * While the Hyper-V MSR clocksource won't be used since the + * Reference TSC clocksource is present, change its rating as + * well for consistency. + */ + if (ms_hyperv.features & HV_ACCESS_TSC_INVARIANT) { + hyperv_cs_tsc.rating = 250; + hyperv_cs_msr.rating = 250; + } + hv_read_reference_counter = read_hv_clock_tsc; phys_addr = virt_to_phys(hv_get_tsc_page()); @@ -439,12 +533,11 @@ static bool __init hv_init_tsc_clocksource(void) * (which already has at least the low 12 bits set to zero since * it is page aligned). Also set the "enable" bit, which is bit 0. */ - hv_get_reference_tsc(tsc_msr); + tsc_msr = hv_get_register(HV_REGISTER_REFERENCE_TSC); tsc_msr &= GENMASK_ULL(11, 0); tsc_msr = tsc_msr | 0x1 | (u64)phys_addr; - hv_set_reference_tsc(tsc_msr); + hv_set_register(HV_REGISTER_REFERENCE_TSC, tsc_msr); - hv_set_clocksource_vdso(hyperv_cs_tsc); clocksource_register_hz(&hyperv_cs_tsc, NSEC_PER_SEC/100); hv_sched_clock_offset = hv_read_reference_counter(); @@ -457,7 +550,7 @@ void __init hv_init_clocksource(void) { /* * Try to set up the TSC page clocksource. If it succeeds, we're - * done. Otherwise, set up the MSR clocksoruce. At least one of + * done. Otherwise, set up the MSR clocksource. At least one of * these will always be available except on very old versions of * Hyper-V on x86. In that case we won't have a Hyper-V * clocksource, but Linux will still run with a clocksource based diff --git a/drivers/clocksource/ingenic-ost.c b/drivers/clocksource/ingenic-ost.c index 029efc2731b4..06d25754e606 100644 --- a/drivers/clocksource/ingenic-ost.c +++ b/drivers/clocksource/ingenic-ost.c @@ -88,9 +88,9 @@ static int __init ingenic_ost_probe(struct platform_device *pdev) return PTR_ERR(ost->regs); map = device_node_to_regmap(dev->parent->of_node); - if (!map) { + if (IS_ERR(map)) { dev_err(dev, "regmap not found"); - return -EINVAL; + return PTR_ERR(map); } ost->clk = devm_clk_get(dev, "ost"); @@ -167,13 +167,14 @@ static const struct ingenic_ost_soc_info jz4725b_ost_soc_info = { .is64bit = false, }; -static const struct ingenic_ost_soc_info jz4770_ost_soc_info = { +static const struct ingenic_ost_soc_info jz4760b_ost_soc_info = { .is64bit = true, }; static const struct of_device_id ingenic_ost_of_match[] = { { .compatible = "ingenic,jz4725b-ost", .data = &jz4725b_ost_soc_info, }, - { .compatible = "ingenic,jz4770-ost", .data = &jz4770_ost_soc_info, }, + { .compatible = "ingenic,jz4760b-ost", .data = &jz4760b_ost_soc_info, }, + { .compatible = "ingenic,jz4770-ost", .data = &jz4760b_ost_soc_info, }, { } }; diff --git a/drivers/clocksource/ingenic-timer.c b/drivers/clocksource/ingenic-timer.c index 905fd6b163a8..24ed0f1f089b 100644 --- a/drivers/clocksource/ingenic-timer.c +++ b/drivers/clocksource/ingenic-timer.c @@ -264,6 +264,7 @@ static const struct ingenic_soc_info jz4725b_soc_info = { static const struct of_device_id ingenic_tcu_of_match[] = { { .compatible = "ingenic,jz4740-tcu", .data = &jz4740_soc_info, }, { .compatible = "ingenic,jz4725b-tcu", .data = &jz4725b_soc_info, }, + { .compatible = "ingenic,jz4760-tcu", .data = &jz4740_soc_info, }, { .compatible = "ingenic,jz4770-tcu", .data = &jz4740_soc_info, }, { .compatible = "ingenic,x1000-tcu", .data = &jz4740_soc_info, }, { /* sentinel */ } @@ -358,6 +359,7 @@ err_free_ingenic_tcu: TIMER_OF_DECLARE(jz4740_tcu_intc, "ingenic,jz4740-tcu", ingenic_tcu_init); TIMER_OF_DECLARE(jz4725b_tcu_intc, "ingenic,jz4725b-tcu", ingenic_tcu_init); +TIMER_OF_DECLARE(jz4760_tcu_intc, "ingenic,jz4760-tcu", ingenic_tcu_init); TIMER_OF_DECLARE(jz4770_tcu_intc, "ingenic,jz4770-tcu", ingenic_tcu_init); TIMER_OF_DECLARE(x1000_tcu_intc, "ingenic,x1000-tcu", ingenic_tcu_init); diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c index c98f8851fd68..d7ed99f0001f 100644 --- a/drivers/clocksource/sh_cmt.c +++ b/drivers/clocksource/sh_cmt.c @@ -339,8 +339,9 @@ static int sh_cmt_enable(struct sh_cmt_channel *ch) sh_cmt_write_cmcsr(ch, SH_CMT16_CMCSR_CMIE | SH_CMT16_CMCSR_CKS512); } else { - sh_cmt_write_cmcsr(ch, SH_CMT32_CMCSR_CMM | - SH_CMT32_CMCSR_CMTOUT_IE | + u32 cmtout = ch->cmt->info->model <= SH_CMT_48BIT ? + SH_CMT32_CMCSR_CMTOUT_IE : 0; + sh_cmt_write_cmcsr(ch, cmtout | SH_CMT32_CMCSR_CMM | SH_CMT32_CMCSR_CMR_IRQ | SH_CMT32_CMCSR_CKS_RCLK8); } diff --git a/drivers/clocksource/timer-atmel-tcb.c b/drivers/clocksource/timer-atmel-tcb.c index 787dbebbb432..27af17c99590 100644 --- a/drivers/clocksource/timer-atmel-tcb.c +++ b/drivers/clocksource/timer-atmel-tcb.c @@ -455,9 +455,9 @@ static int __init tcb_clksrc_init(struct device_node *node) tcaddr = tc.regs; if (bits == 32) { - /* use apropriate function to read 32 bit counter */ + /* use appropriate function to read 32 bit counter */ clksrc.read = tc_get_cycles32; - /* setup ony channel 0 */ + /* setup only channel 0 */ tcb_setup_single_chan(&tc, best_divisor_idx); tc_sched_clock = tc_sched_clock_read32; tc_delay_timer.read_current_timer = tc_delay_timer_read32; diff --git a/drivers/clocksource/timer-fsl-ftm.c b/drivers/clocksource/timer-fsl-ftm.c index 12a2ed7cfaff..93f336ec875a 100644 --- a/drivers/clocksource/timer-fsl-ftm.c +++ b/drivers/clocksource/timer-fsl-ftm.c @@ -116,7 +116,7 @@ static int ftm_set_next_event(unsigned long delta, * to the MOD register latches the value into a buffer. The MOD * register is updated with the value of its write buffer with * the following scenario: - * a, the counter source clock is diabled. + * a, the counter source clock is disabled. */ ftm_counter_disable(priv->clkevt_base); diff --git a/drivers/clocksource/timer-microchip-pit64b.c b/drivers/clocksource/timer-microchip-pit64b.c index ab623b25a47b..cfa4ec7ef396 100644 --- a/drivers/clocksource/timer-microchip-pit64b.c +++ b/drivers/clocksource/timer-microchip-pit64b.c @@ -237,7 +237,7 @@ static void __init mchp_pit64b_pres_compute(u32 *pres, u32 clk_rate, break; } - /* Use the bigest prescaler if we didn't match one. */ + /* Use the biggest prescaler if we didn't match one. */ if (*pres == MCHP_PIT64B_PRES_MAX) *pres = MCHP_PIT64B_PRES_MAX - 1; } diff --git a/drivers/clocksource/timer-npcm7xx.c b/drivers/clocksource/timer-npcm7xx.c index 9780ffd8010e..a00520cbb660 100644 --- a/drivers/clocksource/timer-npcm7xx.c +++ b/drivers/clocksource/timer-npcm7xx.c @@ -208,5 +208,6 @@ static int __init npcm7xx_timer_init(struct device_node *np) return 0; } +TIMER_OF_DECLARE(wpcm450, "nuvoton,wpcm450-timer", npcm7xx_timer_init); TIMER_OF_DECLARE(npcm7xx, "nuvoton,npcm750-timer", npcm7xx_timer_init); diff --git a/drivers/clocksource/timer-of.c b/drivers/clocksource/timer-of.c index 572da477c6d3..529cc6a51cdb 100644 --- a/drivers/clocksource/timer-of.c +++ b/drivers/clocksource/timer-of.c @@ -211,10 +211,10 @@ out_fail: } /** - * timer_of_cleanup - release timer_of ressources + * timer_of_cleanup - release timer_of resources * @to: timer_of structure * - * Release the ressources that has been used in timer_of_init(). + * Release the resources that has been used in timer_of_init(). * This function should be called in init error cases */ void __init timer_of_cleanup(struct timer_of *to) diff --git a/drivers/clocksource/timer-pistachio.c b/drivers/clocksource/timer-pistachio.c index a2dd85d0c1d7..6f37181a8c63 100644 --- a/drivers/clocksource/timer-pistachio.c +++ b/drivers/clocksource/timer-pistachio.c @@ -71,7 +71,7 @@ static u64 notrace pistachio_clocksource_read_cycles(struct clocksource *cs) { struct pistachio_clocksource *pcs = to_pistachio_clocksource(cs); - u32 counter, overflw; + u32 counter, overflow; unsigned long flags; /* @@ -80,7 +80,7 @@ pistachio_clocksource_read_cycles(struct clocksource *cs) */ raw_spin_lock_irqsave(&pcs->lock, flags); - overflw = gpt_readl(pcs->base, TIMER_CURRENT_OVERFLOW_VALUE, 0); + overflow = gpt_readl(pcs->base, TIMER_CURRENT_OVERFLOW_VALUE, 0); counter = gpt_readl(pcs->base, TIMER_CURRENT_VALUE, 0); raw_spin_unlock_irqrestore(&pcs->lock, flags); diff --git a/drivers/clocksource/timer-ti-dm-systimer.c b/drivers/clocksource/timer-ti-dm-systimer.c index 33b3e8aa2cc5..b6f97960d8ee 100644 --- a/drivers/clocksource/timer-ti-dm-systimer.c +++ b/drivers/clocksource/timer-ti-dm-systimer.c @@ -2,6 +2,7 @@ #include <linux/clk.h> #include <linux/clocksource.h> #include <linux/clockchips.h> +#include <linux/cpuhotplug.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/iopoll.h> @@ -449,13 +450,13 @@ static int dmtimer_set_next_event(unsigned long cycles, struct dmtimer_systimer *t = &clkevt->t; void __iomem *pend = t->base + t->pend; - writel_relaxed(0xffffffff - cycles, t->base + t->counter); while (readl_relaxed(pend) & WP_TCRR) cpu_relax(); + writel_relaxed(0xffffffff - cycles, t->base + t->counter); - writel_relaxed(OMAP_TIMER_CTRL_ST, t->base + t->ctrl); while (readl_relaxed(pend) & WP_TCLR) cpu_relax(); + writel_relaxed(OMAP_TIMER_CTRL_ST, t->base + t->ctrl); return 0; } @@ -490,18 +491,18 @@ static int dmtimer_set_periodic(struct clock_event_device *evt) dmtimer_clockevent_shutdown(evt); /* Looks like we need to first set the load value separately */ - writel_relaxed(clkevt->period, t->base + t->load); while (readl_relaxed(pend) & WP_TLDR) cpu_relax(); + writel_relaxed(clkevt->period, t->base + t->load); - writel_relaxed(clkevt->period, t->base + t->counter); while (readl_relaxed(pend) & WP_TCRR) cpu_relax(); + writel_relaxed(clkevt->period, t->base + t->counter); - writel_relaxed(OMAP_TIMER_CTRL_AR | OMAP_TIMER_CTRL_ST, - t->base + t->ctrl); while (readl_relaxed(pend) & WP_TCLR) cpu_relax(); + writel_relaxed(OMAP_TIMER_CTRL_AR | OMAP_TIMER_CTRL_ST, + t->base + t->ctrl); return 0; } @@ -530,17 +531,17 @@ static void omap_clockevent_unidle(struct clock_event_device *evt) writel_relaxed(OMAP_TIMER_INT_OVERFLOW, t->base + t->wakeup); } -static int __init dmtimer_clockevent_init(struct device_node *np) +static int __init dmtimer_clkevt_init_common(struct dmtimer_clockevent *clkevt, + struct device_node *np, + unsigned int features, + const struct cpumask *cpumask, + const char *name, + int rating) { - struct dmtimer_clockevent *clkevt; struct clock_event_device *dev; struct dmtimer_systimer *t; int error; - clkevt = kzalloc(sizeof(*clkevt), GFP_KERNEL); - if (!clkevt) - return -ENOMEM; - t = &clkevt->t; dev = &clkevt->dev; @@ -548,24 +549,23 @@ static int __init dmtimer_clockevent_init(struct device_node *np) * We mostly use cpuidle_coupled with ARM local timers for runtime, * so there's probably no use for CLOCK_EVT_FEAT_DYNIRQ here. */ - dev->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT; - dev->rating = 300; + dev->features = features; + dev->rating = rating; dev->set_next_event = dmtimer_set_next_event; dev->set_state_shutdown = dmtimer_clockevent_shutdown; dev->set_state_periodic = dmtimer_set_periodic; dev->set_state_oneshot = dmtimer_clockevent_shutdown; + dev->set_state_oneshot_stopped = dmtimer_clockevent_shutdown; dev->tick_resume = dmtimer_clockevent_shutdown; - dev->cpumask = cpu_possible_mask; + dev->cpumask = cpumask; dev->irq = irq_of_parse_and_map(np, 0); - if (!dev->irq) { - error = -ENXIO; - goto err_out_free; - } + if (!dev->irq) + return -ENXIO; error = dmtimer_systimer_setup(np, &clkevt->t); if (error) - goto err_out_free; + return error; clkevt->period = 0xffffffff - DIV_ROUND_CLOSEST(t->rate, HZ); @@ -577,38 +577,132 @@ static int __init dmtimer_clockevent_init(struct device_node *np) writel_relaxed(OMAP_TIMER_CTRL_POSTED, t->base + t->ifctrl); error = request_irq(dev->irq, dmtimer_clockevent_interrupt, - IRQF_TIMER, "clockevent", clkevt); + IRQF_TIMER, name, clkevt); if (error) goto err_out_unmap; writel_relaxed(OMAP_TIMER_INT_OVERFLOW, t->base + t->irq_ena); writel_relaxed(OMAP_TIMER_INT_OVERFLOW, t->base + t->wakeup); - pr_info("TI gptimer clockevent: %s%lu Hz at %pOF\n", - of_find_property(np, "ti,timer-alwon", NULL) ? + pr_info("TI gptimer %s: %s%lu Hz at %pOF\n", + name, of_find_property(np, "ti,timer-alwon", NULL) ? "always-on " : "", t->rate, np->parent); - clockevents_config_and_register(dev, t->rate, - 3, /* Timer internal resynch latency */ + return 0; + +err_out_unmap: + iounmap(t->base); + + return error; +} + +static int __init dmtimer_clockevent_init(struct device_node *np) +{ + struct dmtimer_clockevent *clkevt; + int error; + + clkevt = kzalloc(sizeof(*clkevt), GFP_KERNEL); + if (!clkevt) + return -ENOMEM; + + error = dmtimer_clkevt_init_common(clkevt, np, + CLOCK_EVT_FEAT_PERIODIC | + CLOCK_EVT_FEAT_ONESHOT, + cpu_possible_mask, "clockevent", + 300); + if (error) + goto err_out_free; + + clockevents_config_and_register(&clkevt->dev, clkevt->t.rate, + 3, /* Timer internal resync latency */ 0xffffffff); if (of_machine_is_compatible("ti,am33xx") || of_machine_is_compatible("ti,am43")) { - dev->suspend = omap_clockevent_idle; - dev->resume = omap_clockevent_unidle; + clkevt->dev.suspend = omap_clockevent_idle; + clkevt->dev.resume = omap_clockevent_unidle; } return 0; -err_out_unmap: - iounmap(t->base); - err_out_free: kfree(clkevt); return error; } +/* Dmtimer as percpu timer. See dra7 ARM architected timer wrap erratum i940 */ +static DEFINE_PER_CPU(struct dmtimer_clockevent, dmtimer_percpu_timer); + +static int __init dmtimer_percpu_timer_init(struct device_node *np, int cpu) +{ + struct dmtimer_clockevent *clkevt; + int error; + + if (!cpu_possible(cpu)) + return -EINVAL; + + if (!of_property_read_bool(np->parent, "ti,no-reset-on-init") || + !of_property_read_bool(np->parent, "ti,no-idle")) + pr_warn("Incomplete dtb for percpu dmtimer %pOF\n", np->parent); + + clkevt = per_cpu_ptr(&dmtimer_percpu_timer, cpu); + + error = dmtimer_clkevt_init_common(clkevt, np, CLOCK_EVT_FEAT_ONESHOT, + cpumask_of(cpu), "percpu-dmtimer", + 500); + if (error) + return error; + + return 0; +} + +/* See TRM for timer internal resynch latency */ +static int omap_dmtimer_starting_cpu(unsigned int cpu) +{ + struct dmtimer_clockevent *clkevt = per_cpu_ptr(&dmtimer_percpu_timer, cpu); + struct clock_event_device *dev = &clkevt->dev; + struct dmtimer_systimer *t = &clkevt->t; + + clockevents_config_and_register(dev, t->rate, 3, ULONG_MAX); + irq_force_affinity(dev->irq, cpumask_of(cpu)); + + return 0; +} + +static int __init dmtimer_percpu_timer_startup(void) +{ + struct dmtimer_clockevent *clkevt = per_cpu_ptr(&dmtimer_percpu_timer, 0); + struct dmtimer_systimer *t = &clkevt->t; + + if (t->sysc) { + cpuhp_setup_state(CPUHP_AP_TI_GP_TIMER_STARTING, + "clockevents/omap/gptimer:starting", + omap_dmtimer_starting_cpu, NULL); + } + + return 0; +} +subsys_initcall(dmtimer_percpu_timer_startup); + +static int __init dmtimer_percpu_quirk_init(struct device_node *np, u32 pa) +{ + struct device_node *arm_timer; + + arm_timer = of_find_compatible_node(NULL, NULL, "arm,armv7-timer"); + if (of_device_is_available(arm_timer)) { + pr_warn_once("ARM architected timer wrap issue i940 detected\n"); + return 0; + } + + if (pa == 0x48034000) /* dra7 dmtimer3 */ + return dmtimer_percpu_timer_init(np, 0); + else if (pa == 0x48036000) /* dra7 dmtimer4 */ + return dmtimer_percpu_timer_init(np, 1); + + return 0; +} + /* Clocksource */ static struct dmtimer_clocksource * to_dmtimer_clocksource(struct clocksource *cs) @@ -742,6 +836,9 @@ static int __init dmtimer_systimer_init(struct device_node *np) if (clockevent == pa) return dmtimer_clockevent_init(np); + if (of_machine_is_compatible("ti,dra7")) + return dmtimer_percpu_quirk_init(np, pa); + return 0; } diff --git a/drivers/clocksource/timer-vf-pit.c b/drivers/clocksource/timer-vf-pit.c index 1a86a4e7e344..911c92146eca 100644 --- a/drivers/clocksource/timer-vf-pit.c +++ b/drivers/clocksource/timer-vf-pit.c @@ -136,7 +136,7 @@ static int __init pit_clockevent_init(unsigned long rate, int irq) /* * The value for the LDVAL register trigger is calculated as: * LDVAL trigger = (period / clock period) - 1 - * The pit is a 32-bit down count timer, when the conter value + * The pit is a 32-bit down count timer, when the counter value * reaches 0, it will generate an interrupt, thus the minimal * LDVAL trigger value is 1. And then the min_delta is * minimal LDVAL trigger value + 1, and the max_delta is full 32-bit. |