diff options
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/include/asm/percpu.h | 71 |
1 files changed, 35 insertions, 36 deletions
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h index 38f9e965ff96..dd0cd4b6a76f 100644 --- a/arch/x86/include/asm/percpu.h +++ b/arch/x86/include/asm/percpu.h @@ -177,39 +177,6 @@ do { \ } \ } while (0) -/* - * Add return operation - */ -#define percpu_add_return_op(var, val) \ -({ \ - typeof(var) paro_ret__ = val; \ - switch (sizeof(var)) { \ - case 1: \ - asm("xaddb %0, "__percpu_arg(1) \ - : "+q" (paro_ret__), "+m" (var) \ - : : "memory"); \ - break; \ - case 2: \ - asm("xaddw %0, "__percpu_arg(1) \ - : "+r" (paro_ret__), "+m" (var) \ - : : "memory"); \ - break; \ - case 4: \ - asm("xaddl %0, "__percpu_arg(1) \ - : "+r" (paro_ret__), "+m" (var) \ - : : "memory"); \ - break; \ - case 8: \ - asm("xaddq %0, "__percpu_arg(1) \ - : "+re" (paro_ret__), "+m" (var) \ - : : "memory"); \ - break; \ - default: __bad_percpu_size(); \ - } \ - paro_ret__ += val; \ - paro_ret__; \ -}) - #define percpu_from_op(op, var, constraint) \ ({ \ typeof(var) pfo_ret__; \ @@ -263,6 +230,39 @@ do { \ }) /* + * Add return operation + */ +#define percpu_add_return_op(var, val) \ +({ \ + typeof(var) paro_ret__ = val; \ + switch (sizeof(var)) { \ + case 1: \ + asm("xaddb %0, "__percpu_arg(1) \ + : "+q" (paro_ret__), "+m" (var) \ + : : "memory"); \ + break; \ + case 2: \ + asm("xaddw %0, "__percpu_arg(1) \ + : "+r" (paro_ret__), "+m" (var) \ + : : "memory"); \ + break; \ + case 4: \ + asm("xaddl %0, "__percpu_arg(1) \ + : "+r" (paro_ret__), "+m" (var) \ + : : "memory"); \ + break; \ + case 8: \ + asm("xaddq %0, "__percpu_arg(1) \ + : "+re" (paro_ret__), "+m" (var) \ + : : "memory"); \ + break; \ + default: __bad_percpu_size(); \ + } \ + paro_ret__ += val; \ + paro_ret__; \ +}) + +/* * percpu_read() makes gcc load the percpu variable every time it is * accessed while percpu_read_stable() allows the value to be cached. * percpu_read_stable() is more efficient and can be used if its value @@ -352,6 +352,7 @@ do { \ #define __this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val) #define __this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val) #define __this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val) +#define __this_cpu_add_return_8(pcp, val) percpu_add_return_op(pcp, val) #define this_cpu_read_8(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) #define this_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val) @@ -359,14 +360,12 @@ do { \ #define this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val) #define this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val) #define this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val) +#define this_cpu_add_return_8(pcp, val) percpu_add_return_op(pcp, val) #define irqsafe_cpu_add_8(pcp, val) percpu_add_op((pcp), val) #define irqsafe_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val) #define irqsafe_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val) #define irqsafe_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val) - -#define __this_cpu_add_return_8(pcp, val) percpu_add_return_op(pcp, val) -#define this_cpu_add_return_8(pcp, val) percpu_add_return_op(pcp, val) #endif /* This is not atomic against other CPUs -- CPU preemption needs to be off */ |