From aa68744f80bfb6f26fbe7f10e42876066f7dac1b Mon Sep 17 00:00:00 2001 From: Waiman Long Date: Mon, 9 Nov 2015 19:09:23 -0500 Subject: locking/qspinlock: Avoid redundant read of next pointer With optimistic prefetch of the next node cacheline, the next pointer may have been properly inititalized. As a result, the reading of node->next in the contended path may be redundant. This patch eliminates the redundant read if the next pointer value is not NULL. Signed-off-by: Waiman Long Signed-off-by: Peter Zijlstra (Intel) Cc: Andrew Morton Cc: Davidlohr Bueso Cc: Douglas Hatch Cc: H. Peter Anvin Cc: Linus Torvalds Cc: Paul E. McKenney Cc: Peter Zijlstra Cc: Scott J Norton Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/1447114167-47185-4-git-send-email-Waiman.Long@hpe.com Signed-off-by: Ingo Molnar --- kernel/locking/qspinlock.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) (limited to 'kernel/locking/qspinlock.c') diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c index 365b2033f55e..986207887def 100644 --- a/kernel/locking/qspinlock.c +++ b/kernel/locking/qspinlock.c @@ -396,6 +396,7 @@ queue: * p,*,* -> n,*,* */ old = xchg_tail(lock, tail); + next = NULL; /* * if there was a previous node; link it and wait until reaching the @@ -463,10 +464,12 @@ queue: } /* - * contended path; wait for next, release. + * contended path; wait for next if not observed yet, release. */ - while (!(next = READ_ONCE(node->next))) - cpu_relax(); + if (!next) { + while (!(next = READ_ONCE(node->next))) + cpu_relax(); + } arch_mcs_spin_unlock_contended(&next->locked); pv_kick_node(lock, next); -- cgit v1.2.3