summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2015-05-27 03:39:36 +0200
committerRusty Russell <rusty@rustcorp.com.au>2015-05-28 04:02:06 +0200
commit7fc26327b75685f37f58d64bdb061460f834f80d (patch)
tree69fecbbe48ac91608e88987c0bd0c8e5cebfa1b5 /kernel
parentrcu: Move lockless_dereference() out of rcupdate.h (diff)
downloadlinux-7fc26327b75685f37f58d64bdb061460f834f80d.tar.xz
linux-7fc26327b75685f37f58d64bdb061460f834f80d.zip
seqlock: Introduce raw_read_seqcount_latch()
Because with latches there is a strict data dependency on the seq load we can avoid the rmb in favour of a read_barrier_depends. Suggested-by: Ingo Molnar <mingo@kernel.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/time/timekeeping.c2
1 files changed, 1 insertions, 1 deletions
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index cbfedddbf0cb..266dafe8f015 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -393,7 +393,7 @@ static __always_inline u64 __ktime_get_fast_ns(struct tk_fast *tkf)
u64 now;
do {
- seq = raw_read_seqcount(&tkf->seq);
+ seq = raw_read_seqcount_latch(&tkf->seq);
tkr = tkf->base + (seq & 0x01);
now = ktime_to_ns(tkr->base) + timekeeping_get_ns(tkr);
} while (read_seqcount_retry(&tkf->seq, seq));