summaryrefslogtreecommitdiffstats
path: root/arch/arm64/include
diff options
context:
space:
mode:
authorAnders Roxell <anders.roxell@linaro.org>2019-06-03 11:14:02 +0200
committerWill Deacon <will.deacon@arm.com>2019-06-05 14:24:06 +0200
commitf31e98bfae1c8792701ef03acd47344866cb2e14 (patch)
tree953a5c0e13ae1aed4a597b7f20c120b1f374ab85 /arch/arm64/include
parentarm64: smp: Moved cpu_logical_map[] to smp.h (diff)
downloadlinux-f31e98bfae1c8792701ef03acd47344866cb2e14.tar.xz
linux-f31e98bfae1c8792701ef03acd47344866cb2e14.zip
arm64: arch_timer: mark functions as __always_inline
If CONFIG_FUNCTION_GRAPH_TRACER is enabled function arch_counter_get_cntvct() is marked as notrace. However, function __arch_counter_get_cntvct is marked as inline. If CONFIG_OPTIMIZE_INLINING is set that will make the two functions tracable which they shouldn't. Rework so that functions __arch_counter_get_* are marked with __always_inline so they will be inlined even if CONFIG_OPTIMIZE_INLINING is turned on. Fixes: 0ea415390cd3 ("clocksource/arm_arch_timer: Use arch_timer_read_counter to access stable counters") Acked-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Anders Roxell <anders.roxell@linaro.org> Signed-off-by: Will Deacon <will.deacon@arm.com>
Diffstat (limited to 'arch/arm64/include')
-rw-r--r--arch/arm64/include/asm/arch_timer.h8
1 files changed, 4 insertions, 4 deletions
diff --git a/arch/arm64/include/asm/arch_timer.h b/arch/arm64/include/asm/arch_timer.h
index b7bca1ae09e6..50b3ab7ded4f 100644
--- a/arch/arm64/include/asm/arch_timer.h
+++ b/arch/arm64/include/asm/arch_timer.h
@@ -193,7 +193,7 @@ static inline void arch_timer_set_cntkctl(u32 cntkctl)
: "=r" (tmp) : "r" (_val)); \
} while (0)
-static inline u64 __arch_counter_get_cntpct_stable(void)
+static __always_inline u64 __arch_counter_get_cntpct_stable(void)
{
u64 cnt;
@@ -203,7 +203,7 @@ static inline u64 __arch_counter_get_cntpct_stable(void)
return cnt;
}
-static inline u64 __arch_counter_get_cntpct(void)
+static __always_inline u64 __arch_counter_get_cntpct(void)
{
u64 cnt;
@@ -213,7 +213,7 @@ static inline u64 __arch_counter_get_cntpct(void)
return cnt;
}
-static inline u64 __arch_counter_get_cntvct_stable(void)
+static __always_inline u64 __arch_counter_get_cntvct_stable(void)
{
u64 cnt;
@@ -223,7 +223,7 @@ static inline u64 __arch_counter_get_cntvct_stable(void)
return cnt;
}
-static inline u64 __arch_counter_get_cntvct(void)
+static __always_inline u64 __arch_counter_get_cntvct(void)
{
u64 cnt;