summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorJoshua Kinard <kumba@gentoo.org>2012-06-25 03:01:34 +0200
committerRalf Baechle <ralf@linux-mips.org>2012-10-11 11:02:36 +0200
commitb4f2a17ba96a79f1069a2c0f1c648cf6d497f2f3 (patch)
tree75f44d8efb4748d9639869dc7089533b0964b037 /arch
parentMerge branch 'i2c-embedded/for-next' of git://git.pengutronix.de/git/wsa/linux (diff)
downloadlinux-b4f2a17ba96a79f1069a2c0f1c648cf6d497f2f3.tar.xz
linux-b4f2a17ba96a79f1069a2c0f1c648cf6d497f2f3.zip
Improve atomic.h robustness
I've maintained this patch, originally from Thiemo Seufer in 2004, for a really long time, but I think it's time for it to get a look at for possible inclusion. I have had no problems with it across various SGI systems over the years. To quote the post here: http://www.linux-mips.org/archives/linux-mips/2004-12/msg00000.html "the atomic functions use so far memory references for the inline assembler to access the semaphore. This can lead to additional instructions in the ll/sc loop, because newer compilers don't expand the memory reference any more but leave it to the assembler. The appended patch uses registers instead, and makes the ll/sc arguments more explicit. In some cases it will lead also to better register scheduling because the register isn't bound to an output any more." Signed-off-by: Joshua Kinard <kumba@gentoo.org> Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/4029/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/mips/include/asm/atomic.h64
1 files changed, 29 insertions, 35 deletions
diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
index 3f4c5cb6433e..01cc6ba64831 100644
--- a/arch/mips/include/asm/atomic.h
+++ b/arch/mips/include/asm/atomic.h
@@ -59,8 +59,8 @@ static __inline__ void atomic_add(int i, atomic_t * v)
" sc %0, %1 \n"
" beqzl %0, 1b \n"
" .set mips0 \n"
- : "=&r" (temp), "=m" (v->counter)
- : "Ir" (i), "m" (v->counter));
+ : "=&r" (temp), "+m" (v->counter)
+ : "Ir" (i));
} else if (kernel_uses_llsc) {
int temp;
@@ -71,8 +71,8 @@ static __inline__ void atomic_add(int i, atomic_t * v)
" addu %0, %2 \n"
" sc %0, %1 \n"
" .set mips0 \n"
- : "=&r" (temp), "=m" (v->counter)
- : "Ir" (i), "m" (v->counter));
+ : "=&r" (temp), "+m" (v->counter)
+ : "Ir" (i));
} while (unlikely(!temp));
} else {
unsigned long flags;
@@ -102,8 +102,8 @@ static __inline__ void atomic_sub(int i, atomic_t * v)
" sc %0, %1 \n"
" beqzl %0, 1b \n"
" .set mips0 \n"
- : "=&r" (temp), "=m" (v->counter)
- : "Ir" (i), "m" (v->counter));
+ : "=&r" (temp), "+m" (v->counter)
+ : "Ir" (i));
} else if (kernel_uses_llsc) {
int temp;
@@ -114,8 +114,8 @@ static __inline__ void atomic_sub(int i, atomic_t * v)
" subu %0, %2 \n"
" sc %0, %1 \n"
" .set mips0 \n"
- : "=&r" (temp), "=m" (v->counter)
- : "Ir" (i), "m" (v->counter));
+ : "=&r" (temp), "+m" (v->counter)
+ : "Ir" (i));
} while (unlikely(!temp));
} else {
unsigned long flags;
@@ -146,9 +146,8 @@ static __inline__ int atomic_add_return(int i, atomic_t * v)
" beqzl %0, 1b \n"
" addu %0, %1, %3 \n"
" .set mips0 \n"
- : "=&r" (result), "=&r" (temp), "=m" (v->counter)
- : "Ir" (i), "m" (v->counter)
- : "memory");
+ : "=&r" (result), "=&r" (temp), "+m" (v->counter)
+ : "Ir" (i));
} else if (kernel_uses_llsc) {
int temp;
@@ -159,9 +158,8 @@ static __inline__ int atomic_add_return(int i, atomic_t * v)
" addu %0, %1, %3 \n"
" sc %0, %2 \n"
" .set mips0 \n"
- : "=&r" (result), "=&r" (temp), "=m" (v->counter)
- : "Ir" (i), "m" (v->counter)
- : "memory");
+ : "=&r" (result), "=&r" (temp), "+m" (v->counter)
+ : "Ir" (i));
} while (unlikely(!result));
result = temp + i;
@@ -212,9 +210,8 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v)
" subu %0, %1, %3 \n"
" sc %0, %2 \n"
" .set mips0 \n"
- : "=&r" (result), "=&r" (temp), "=m" (v->counter)
- : "Ir" (i), "m" (v->counter)
- : "memory");
+ : "=&r" (result), "=&r" (temp), "+m" (v->counter)
+ : "Ir" (i));
} while (unlikely(!result));
result = temp - i;
@@ -262,7 +259,7 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
" .set reorder \n"
"1: \n"
" .set mips0 \n"
- : "=&r" (result), "=&r" (temp), "=m" (v->counter)
+ : "=&r" (result), "=&r" (temp), "+m" (v->counter)
: "Ir" (i), "m" (v->counter)
: "memory");
} else if (kernel_uses_llsc) {
@@ -280,9 +277,8 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
" .set reorder \n"
"1: \n"
" .set mips0 \n"
- : "=&r" (result), "=&r" (temp), "=m" (v->counter)
- : "Ir" (i), "m" (v->counter)
- : "memory");
+ : "=&r" (result), "=&r" (temp), "+m" (v->counter)
+ : "Ir" (i));
} else {
unsigned long flags;
@@ -430,8 +426,8 @@ static __inline__ void atomic64_add(long i, atomic64_t * v)
" scd %0, %1 \n"
" beqzl %0, 1b \n"
" .set mips0 \n"
- : "=&r" (temp), "=m" (v->counter)
- : "Ir" (i), "m" (v->counter));
+ : "=&r" (temp), "+m" (v->counter)
+ : "Ir" (i));
} else if (kernel_uses_llsc) {
long temp;
@@ -442,8 +438,8 @@ static __inline__ void atomic64_add(long i, atomic64_t * v)
" daddu %0, %2 \n"
" scd %0, %1 \n"
" .set mips0 \n"
- : "=&r" (temp), "=m" (v->counter)
- : "Ir" (i), "m" (v->counter));
+ : "=&r" (temp), "+m" (v->counter)
+ : "Ir" (i));
} while (unlikely(!temp));
} else {
unsigned long flags;
@@ -473,8 +469,8 @@ static __inline__ void atomic64_sub(long i, atomic64_t * v)
" scd %0, %1 \n"
" beqzl %0, 1b \n"
" .set mips0 \n"
- : "=&r" (temp), "=m" (v->counter)
- : "Ir" (i), "m" (v->counter));
+ : "=&r" (temp), "+m" (v->counter)
+ : "Ir" (i));
} else if (kernel_uses_llsc) {
long temp;
@@ -485,8 +481,8 @@ static __inline__ void atomic64_sub(long i, atomic64_t * v)
" dsubu %0, %2 \n"
" scd %0, %1 \n"
" .set mips0 \n"
- : "=&r" (temp), "=m" (v->counter)
- : "Ir" (i), "m" (v->counter));
+ : "=&r" (temp), "+m" (v->counter)
+ : "Ir" (i));
} while (unlikely(!temp));
} else {
unsigned long flags;
@@ -517,9 +513,8 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v)
" beqzl %0, 1b \n"
" daddu %0, %1, %3 \n"
" .set mips0 \n"
- : "=&r" (result), "=&r" (temp), "=m" (v->counter)
- : "Ir" (i), "m" (v->counter)
- : "memory");
+ : "=&r" (result), "=&r" (temp), "+m" (v->counter)
+ : "Ir" (i));
} else if (kernel_uses_llsc) {
long temp;
@@ -649,9 +644,8 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
" .set reorder \n"
"1: \n"
" .set mips0 \n"
- : "=&r" (result), "=&r" (temp), "=m" (v->counter)
- : "Ir" (i), "m" (v->counter)
- : "memory");
+ : "=&r" (result), "=&r" (temp), "+m" (v->counter)
+ : "Ir" (i));
} else {
unsigned long flags;