diff options
author | Christophe Leroy <christophe.leroy@c-s.fr> | 2019-12-02 08:57:28 +0100 |
---|---|---|
committer | Michael Ellerman <mpe@ellerman.id.au> | 2020-01-23 11:31:15 +0100 |
commit | 654abc69ef2e69712e6d4e8a6cb9292b97a4aa39 (patch) | |
tree | 53db4020b085ae65ff852b613d5e825f3ca39f18 /arch/powerpc/kernel/vdso32 | |
parent | powerpc/32: Add VDSO version of getcpu on non SMP (diff) | |
download | linux-654abc69ef2e69712e6d4e8a6cb9292b97a4aa39.tar.xz linux-654abc69ef2e69712e6d4e8a6cb9292b97a4aa39.zip |
powerpc/vdso32: Add support for CLOCK_{REALTIME/MONOTONIC}_COARSE
This is copied and adapted from commit 5c929885f1bb ("powerpc/vdso64:
Add support for CLOCK_{REALTIME/MONOTONIC}_COARSE")
from Santosh Sivaraj <santosh@fossix.org>
Benchmark from vdsotest-all:
clock-gettime-realtime: syscall: 3601 nsec/call
clock-gettime-realtime: libc: 1072 nsec/call
clock-gettime-realtime: vdso: 931 nsec/call
clock-gettime-monotonic: syscall: 4034 nsec/call
clock-gettime-monotonic: libc: 1213 nsec/call
clock-gettime-monotonic: vdso: 1076 nsec/call
clock-gettime-realtime-coarse: syscall: 2722 nsec/call
clock-gettime-realtime-coarse: libc: 805 nsec/call
clock-gettime-realtime-coarse: vdso: 668 nsec/call
clock-gettime-monotonic-coarse: syscall: 2949 nsec/call
clock-gettime-monotonic-coarse: libc: 882 nsec/call
clock-gettime-monotonic-coarse: vdso: 745 nsec/call
Additional test passed with:
vdsotest -d 30 clock-gettime-monotonic-coarse verify
Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://github.com/linuxppc/issues/issues/41
Link: https://lore.kernel.org/r/d1d24a376e396540194eeb85a2efe481e92ade24.1575273217.git.christophe.leroy@c-s.fr
Diffstat (limited to 'arch/powerpc/kernel/vdso32')
-rw-r--r-- | arch/powerpc/kernel/vdso32/gettimeofday.S | 64 |
1 files changed, 57 insertions, 7 deletions
diff --git a/arch/powerpc/kernel/vdso32/gettimeofday.S b/arch/powerpc/kernel/vdso32/gettimeofday.S index 3306672f57a9..e9ce8ee56edb 100644 --- a/arch/powerpc/kernel/vdso32/gettimeofday.S +++ b/arch/powerpc/kernel/vdso32/gettimeofday.S @@ -69,7 +69,13 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime) cmpli cr0,r3,CLOCK_REALTIME cmpli cr1,r3,CLOCK_MONOTONIC cror cr0*4+eq,cr0*4+eq,cr1*4+eq - bne cr0,99f + + cmpli cr5,r3,CLOCK_REALTIME_COARSE + cmpli cr6,r3,CLOCK_MONOTONIC_COARSE + cror cr5*4+eq,cr5*4+eq,cr6*4+eq + + cror cr0*4+eq,cr0*4+eq,cr5*4+eq + bne cr0, .Lgettime_fallback mflr r12 /* r12 saves lr */ .cfi_register lr,r12 @@ -78,8 +84,10 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime) mr r9,r3 /* datapage ptr in r9 */ lis r7,NSEC_PER_SEC@h /* want nanoseconds */ ori r7,r7,NSEC_PER_SEC@l -50: bl __do_get_tspec@local /* get sec/nsec from tb & kernel */ - bne cr1,80f /* not monotonic -> all done */ + beq cr5, .Lcoarse_clocks +.Lprecise_clocks: + bl __do_get_tspec@local /* get sec/nsec from tb & kernel */ + bne cr1, .Lfinish /* not monotonic -> all done */ /* * CLOCK_MONOTONIC @@ -103,12 +111,53 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime) add r9,r9,r0 lwz r0,(CFG_TB_UPDATE_COUNT+LOPART)(r9) cmpl cr0,r8,r0 /* check if updated */ - bne- 50b + bne- .Lprecise_clocks + b .Lfinish_monotonic + + /* + * For coarse clocks we get data directly from the vdso data page, so + * we don't need to call __do_get_tspec, but we still need to do the + * counter trick. + */ +.Lcoarse_clocks: + lwz r8,(CFG_TB_UPDATE_COUNT+LOPART)(r9) + andi. r0,r8,1 /* pending update ? loop */ + bne- .Lcoarse_clocks + add r9,r9,r0 /* r0 is already 0 */ + + /* + * CLOCK_REALTIME_COARSE, below values are needed for MONOTONIC_COARSE + * too + */ + lwz r3,STAMP_XTIME_SEC+LOPART(r9) + lwz r4,STAMP_XTIME_NSEC+LOPART(r9) + bne cr6,1f + + /* CLOCK_MONOTONIC_COARSE */ + lwz r5,(WTOM_CLOCK_SEC+LOPART)(r9) + lwz r6,WTOM_CLOCK_NSEC(r9) + + /* check if counter has updated */ + or r0,r6,r5 +1: or r0,r0,r3 + or r0,r0,r4 + xor r0,r0,r0 + add r3,r3,r0 + lwz r0,CFG_TB_UPDATE_COUNT+LOPART(r9) + cmpl cr0,r0,r8 /* check if updated */ + bne- .Lcoarse_clocks + + /* Counter has not updated, so continue calculating proper values for + * sec and nsec if monotonic coarse, or just return with the proper + * values for realtime. + */ + bne cr6, .Lfinish /* Calculate and store result. Note that this mimics the C code, * which may cause funny results if nsec goes negative... is that * possible at all ? */ +.Lfinish_monotonic: add r3,r3,r5 add r4,r4,r6 cmpw cr0,r4,r7 @@ -116,11 +165,12 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime) blt 1f subf r4,r7,r4 addi r3,r3,1 -1: bge cr1,80f +1: bge cr1, .Lfinish addi r3,r3,-1 add r4,r4,r7 -80: stw r3,TSPC32_TV_SEC(r11) +.Lfinish: + stw r3,TSPC32_TV_SEC(r11) stw r4,TSPC32_TV_NSEC(r11) mtlr r12 @@ -131,7 +181,7 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime) /* * syscall fallback */ -99: +.Lgettime_fallback: li r0,__NR_clock_gettime .cfi_restore lr sc |