summaryrefslogtreecommitdiffstats
path: root/arch/sparc/kernel/time_64.c
diff options
context:
space:
mode:
authorPavel Tatashin <pasha.tatashin@oracle.com>2017-06-12 22:41:48 +0200
committerDavid S. Miller <davem@davemloft.net>2017-06-13 00:44:04 +0200
commiteae3fc9871111e9bbc77dad5481a3e805e02ac46 (patch)
tree46e76b92d42fe97e75fe59c77c2cca8e5f7713d6 /arch/sparc/kernel/time_64.c
parentsparc64: add hot-patched and inlined get_tick() (diff)
downloadlinux-eae3fc9871111e9bbc77dad5481a3e805e02ac46.tar.xz
linux-eae3fc9871111e9bbc77dad5481a3e805e02ac46.zip
sparc64: optimize functions that access tick
Replace read tick function pointers with the new hot-patched get_tick(). This optimizes the performance of functions such as: sched_clock() Signed-off-by: Pavel Tatashin <pasha.tatashin@oracle.com> Reviewed-by: Steven Sistare <steven.sistare@oracle.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc/kernel/time_64.c')
-rw-r--r--arch/sparc/kernel/time_64.c22
1 files changed, 13 insertions, 9 deletions
diff --git a/arch/sparc/kernel/time_64.c b/arch/sparc/kernel/time_64.c
index ca27415c393a..a612a91cb9cd 100644
--- a/arch/sparc/kernel/time_64.c
+++ b/arch/sparc/kernel/time_64.c
@@ -752,12 +752,10 @@ static unsigned long tb_ticks_per_usec __read_mostly;
void __delay(unsigned long loops)
{
- unsigned long bclock, now;
+ unsigned long bclock = get_tick();
- bclock = tick_operations.get_tick();
- do {
- now = tick_operations.get_tick();
- } while ((now-bclock) < loops);
+ while ((get_tick() - bclock) < loops)
+ ;
}
EXPORT_SYMBOL(__delay);
@@ -769,7 +767,7 @@ EXPORT_SYMBOL(udelay);
static u64 clocksource_tick_read(struct clocksource *cs)
{
- return tick_operations.get_tick();
+ return get_tick();
}
static void __init get_tick_patch(void)
@@ -853,13 +851,19 @@ unsigned long long sched_clock(void)
{
unsigned long quotient = tick_operations.ticks_per_nsec_quotient;
unsigned long offset = tick_operations.offset;
- unsigned long ticks = tick_operations.get_tick();
- return ((ticks * quotient) >> SPARC64_NSEC_PER_CYC_SHIFT) - offset;
+ /* Use barrier so the compiler emits the loads first and overlaps load
+ * latency with reading tick, because reading %tick/%stick is a
+ * post-sync instruction that will flush and restart subsequent
+ * instructions after it commits.
+ */
+ barrier();
+
+ return ((get_tick() * quotient) >> SPARC64_NSEC_PER_CYC_SHIFT) - offset;
}
int read_current_timer(unsigned long *timer_val)
{
- *timer_val = tick_operations.get_tick();
+ *timer_val = get_tick();
return 0;
}