summaryrefslogtreecommitdiffstats
path: root/arch/arm64/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/include')
-rw-r--r--arch/arm64/include/asm/assembler.h28
-rw-r--r--arch/arm64/include/asm/atomic.h99
-rw-r--r--arch/arm64/include/asm/atomic_ll_sc.h217
-rw-r--r--arch/arm64/include/asm/atomic_lse.h395
-rw-r--r--arch/arm64/include/asm/cache.h2
-rw-r--r--arch/arm64/include/asm/cmpxchg.h45
-rw-r--r--arch/arm64/include/asm/compat.h2
-rw-r--r--arch/arm64/include/asm/cpu_ops.h3
-rw-r--r--arch/arm64/include/asm/cpufeature.h28
-rw-r--r--arch/arm64/include/asm/cputype.h21
-rw-r--r--arch/arm64/include/asm/debug-monitors.h3
-rw-r--r--arch/arm64/include/asm/dma-mapping.h3
-rw-r--r--arch/arm64/include/asm/efi.h4
-rw-r--r--arch/arm64/include/asm/esr.h3
-rw-r--r--arch/arm64/include/asm/exception.h2
-rw-r--r--arch/arm64/include/asm/fpsimd.h2
-rw-r--r--arch/arm64/include/asm/futex.h3
-rw-r--r--arch/arm64/include/asm/hw_breakpoint.h3
-rw-r--r--arch/arm64/include/asm/io.h10
-rw-r--r--arch/arm64/include/asm/irqflags.h5
-rw-r--r--arch/arm64/include/asm/kasan.h11
-rw-r--r--arch/arm64/include/asm/lse.h49
-rw-r--r--arch/arm64/include/asm/memory.h141
-rw-r--r--arch/arm64/include/asm/mmu.h2
-rw-r--r--arch/arm64/include/asm/mmu_context.h4
-rw-r--r--arch/arm64/include/asm/pci.h2
-rw-r--r--arch/arm64/include/asm/pgtable-hwdef.h2
-rw-r--r--arch/arm64/include/asm/pgtable.h23
-rw-r--r--arch/arm64/include/asm/pointer_auth.h2
-rw-r--r--arch/arm64/include/asm/proc-fns.h2
-rw-r--r--arch/arm64/include/asm/processor.h15
-rw-r--r--arch/arm64/include/asm/ptrace.h5
-rw-r--r--arch/arm64/include/asm/signal32.h2
-rw-r--r--arch/arm64/include/asm/sysreg.h32
-rw-r--r--arch/arm64/include/asm/thread_info.h29
-rw-r--r--arch/arm64/include/asm/tlbflush.h1
-rw-r--r--arch/arm64/include/asm/topology.h23
-rw-r--r--arch/arm64/include/asm/uaccess.h12
-rw-r--r--arch/arm64/include/asm/vdso.h4
-rw-r--r--arch/arm64/include/asm/vdso_datapage.h4
-rw-r--r--arch/arm64/include/uapi/asm/stat.h17
41 files changed, 590 insertions, 670 deletions
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index e3a15c751b13..b8cf7c85ffa2 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -124,17 +124,6 @@ alternative_endif
.endm
/*
- * Sanitise a 64-bit bounded index wrt speculation, returning zero if out
- * of bounds.
- */
- .macro mask_nospec64, idx, limit, tmp
- sub \tmp, \idx, \limit
- bic \tmp, \tmp, \idx
- and \idx, \idx, \tmp, asr #63
- csdb
- .endm
-
-/*
* NOP sequence
*/
.macro nops, num
@@ -350,6 +339,13 @@ alternative_endif
.endm
/*
+ * tcr_set_t1sz - update TCR.T1SZ
+ */
+ .macro tcr_set_t1sz, valreg, t1sz
+ bfi \valreg, \t1sz, #TCR_T1SZ_OFFSET, #TCR_TxSZ_WIDTH
+ .endm
+
+/*
* tcr_compute_pa_size - set TCR.(I)PS to the highest supported
* ID_AA64MMFR0_EL1.PARange value
*
@@ -538,9 +534,13 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU
* In future this may be nop'ed out when dealing with 52-bit kernel VAs.
* ttbr: Value of ttbr to set, modified.
*/
- .macro offset_ttbr1, ttbr
-#ifdef CONFIG_ARM64_USER_VA_BITS_52
+ .macro offset_ttbr1, ttbr, tmp
+#ifdef CONFIG_ARM64_VA_BITS_52
+ mrs_s \tmp, SYS_ID_AA64MMFR2_EL1
+ and \tmp, \tmp, #(0xf << ID_AA64MMFR2_LVA_SHIFT)
+ cbnz \tmp, .Lskipoffs_\@
orr \ttbr, \ttbr, #TTBR1_BADDR_4852_OFFSET
+.Lskipoffs_\@ :
#endif
.endm
@@ -550,7 +550,7 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU
* to be nop'ed out when dealing with 52-bit kernel VAs.
*/
.macro restore_ttbr1, ttbr
-#ifdef CONFIG_ARM64_USER_VA_BITS_52
+#ifdef CONFIG_ARM64_VA_BITS_52
bic \ttbr, \ttbr, #TTBR1_BADDR_4852_OFFSET
#endif
.endm
diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h
index 657b0457d83c..9543b5e0534d 100644
--- a/arch/arm64/include/asm/atomic.h
+++ b/arch/arm64/include/asm/atomic.h
@@ -13,21 +13,91 @@
#include <linux/types.h>
#include <asm/barrier.h>
+#include <asm/cmpxchg.h>
#include <asm/lse.h>
-#ifdef __KERNEL__
-
-#define __ARM64_IN_ATOMIC_IMPL
-
-#if defined(CONFIG_ARM64_LSE_ATOMICS) && defined(CONFIG_AS_LSE)
-#include <asm/atomic_lse.h>
-#else
-#include <asm/atomic_ll_sc.h>
-#endif
-
-#undef __ARM64_IN_ATOMIC_IMPL
-
-#include <asm/cmpxchg.h>
+#define ATOMIC_OP(op) \
+static inline void arch_##op(int i, atomic_t *v) \
+{ \
+ __lse_ll_sc_body(op, i, v); \
+}
+
+ATOMIC_OP(atomic_andnot)
+ATOMIC_OP(atomic_or)
+ATOMIC_OP(atomic_xor)
+ATOMIC_OP(atomic_add)
+ATOMIC_OP(atomic_and)
+ATOMIC_OP(atomic_sub)
+
+#undef ATOMIC_OP
+
+#define ATOMIC_FETCH_OP(name, op) \
+static inline int arch_##op##name(int i, atomic_t *v) \
+{ \
+ return __lse_ll_sc_body(op##name, i, v); \
+}
+
+#define ATOMIC_FETCH_OPS(op) \
+ ATOMIC_FETCH_OP(_relaxed, op) \
+ ATOMIC_FETCH_OP(_acquire, op) \
+ ATOMIC_FETCH_OP(_release, op) \
+ ATOMIC_FETCH_OP( , op)
+
+ATOMIC_FETCH_OPS(atomic_fetch_andnot)
+ATOMIC_FETCH_OPS(atomic_fetch_or)
+ATOMIC_FETCH_OPS(atomic_fetch_xor)
+ATOMIC_FETCH_OPS(atomic_fetch_add)
+ATOMIC_FETCH_OPS(atomic_fetch_and)
+ATOMIC_FETCH_OPS(atomic_fetch_sub)
+ATOMIC_FETCH_OPS(atomic_add_return)
+ATOMIC_FETCH_OPS(atomic_sub_return)
+
+#undef ATOMIC_FETCH_OP
+#undef ATOMIC_FETCH_OPS
+
+#define ATOMIC64_OP(op) \
+static inline void arch_##op(long i, atomic64_t *v) \
+{ \
+ __lse_ll_sc_body(op, i, v); \
+}
+
+ATOMIC64_OP(atomic64_andnot)
+ATOMIC64_OP(atomic64_or)
+ATOMIC64_OP(atomic64_xor)
+ATOMIC64_OP(atomic64_add)
+ATOMIC64_OP(atomic64_and)
+ATOMIC64_OP(atomic64_sub)
+
+#undef ATOMIC64_OP
+
+#define ATOMIC64_FETCH_OP(name, op) \
+static inline long arch_##op##name(long i, atomic64_t *v) \
+{ \
+ return __lse_ll_sc_body(op##name, i, v); \
+}
+
+#define ATOMIC64_FETCH_OPS(op) \
+ ATOMIC64_FETCH_OP(_relaxed, op) \
+ ATOMIC64_FETCH_OP(_acquire, op) \
+ ATOMIC64_FETCH_OP(_release, op) \
+ ATOMIC64_FETCH_OP( , op)
+
+ATOMIC64_FETCH_OPS(atomic64_fetch_andnot)
+ATOMIC64_FETCH_OPS(atomic64_fetch_or)
+ATOMIC64_FETCH_OPS(atomic64_fetch_xor)
+ATOMIC64_FETCH_OPS(atomic64_fetch_add)
+ATOMIC64_FETCH_OPS(atomic64_fetch_and)
+ATOMIC64_FETCH_OPS(atomic64_fetch_sub)
+ATOMIC64_FETCH_OPS(atomic64_add_return)
+ATOMIC64_FETCH_OPS(atomic64_sub_return)
+
+#undef ATOMIC64_FETCH_OP
+#undef ATOMIC64_FETCH_OPS
+
+static inline long arch_atomic64_dec_if_positive(atomic64_t *v)
+{
+ return __lse_ll_sc_body(atomic64_dec_if_positive, v);
+}
#define ATOMIC_INIT(i) { (i) }
@@ -157,5 +227,4 @@
#include <asm-generic/atomic-instrumented.h>
-#endif
-#endif
+#endif /* __ASM_ATOMIC_H */
diff --git a/arch/arm64/include/asm/atomic_ll_sc.h b/arch/arm64/include/asm/atomic_ll_sc.h
index c8c850bc3dfb..7b012148bfd6 100644
--- a/arch/arm64/include/asm/atomic_ll_sc.h
+++ b/arch/arm64/include/asm/atomic_ll_sc.h
@@ -10,83 +10,92 @@
#ifndef __ASM_ATOMIC_LL_SC_H
#define __ASM_ATOMIC_LL_SC_H
-#ifndef __ARM64_IN_ATOMIC_IMPL
-#error "please don't include this file directly"
+#include <linux/stringify.h>
+
+#if IS_ENABLED(CONFIG_ARM64_LSE_ATOMICS) && IS_ENABLED(CONFIG_AS_LSE)
+#define __LL_SC_FALLBACK(asm_ops) \
+" b 3f\n" \
+" .subsection 1\n" \
+"3:\n" \
+asm_ops "\n" \
+" b 4f\n" \
+" .previous\n" \
+"4:\n"
+#else
+#define __LL_SC_FALLBACK(asm_ops) asm_ops
+#endif
+
+#ifndef CONFIG_CC_HAS_K_CONSTRAINT
+#define K
#endif
/*
* AArch64 UP and SMP safe atomic ops. We use load exclusive and
* store exclusive to ensure that these are atomic. We may loop
* to ensure that the update happens.
- *
- * NOTE: these functions do *not* follow the PCS and must explicitly
- * save any clobbered registers other than x0 (regardless of return
- * value). This is achieved through -fcall-saved-* compiler flags for
- * this file, which unfortunately don't work on a per-function basis
- * (the optimize attribute silently ignores these options).
*/
-#define ATOMIC_OP(op, asm_op) \
-__LL_SC_INLINE void \
-__LL_SC_PREFIX(arch_atomic_##op(int i, atomic_t *v)) \
+#define ATOMIC_OP(op, asm_op, constraint) \
+static inline void \
+__ll_sc_atomic_##op(int i, atomic_t *v) \
{ \
unsigned long tmp; \
int result; \
\
asm volatile("// atomic_" #op "\n" \
+ __LL_SC_FALLBACK( \
" prfm pstl1strm, %2\n" \
"1: ldxr %w0, %2\n" \
" " #asm_op " %w0, %w0, %w3\n" \
" stxr %w1, %w0, %2\n" \
-" cbnz %w1, 1b" \
+" cbnz %w1, 1b\n") \
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
- : "Ir" (i)); \
-} \
-__LL_SC_EXPORT(arch_atomic_##op);
+ : __stringify(constraint) "r" (i)); \
+}
-#define ATOMIC_OP_RETURN(name, mb, acq, rel, cl, op, asm_op) \
-__LL_SC_INLINE int \
-__LL_SC_PREFIX(arch_atomic_##op##_return##name(int i, atomic_t *v)) \
+#define ATOMIC_OP_RETURN(name, mb, acq, rel, cl, op, asm_op, constraint)\
+static inline int \
+__ll_sc_atomic_##op##_return##name(int i, atomic_t *v) \
{ \
unsigned long tmp; \
int result; \
\
asm volatile("// atomic_" #op "_return" #name "\n" \
+ __LL_SC_FALLBACK( \
" prfm pstl1strm, %2\n" \
"1: ld" #acq "xr %w0, %2\n" \
" " #asm_op " %w0, %w0, %w3\n" \
" st" #rel "xr %w1, %w0, %2\n" \
" cbnz %w1, 1b\n" \
-" " #mb \
+" " #mb ) \
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
- : "Ir" (i) \
+ : __stringify(constraint) "r" (i) \
: cl); \
\
return result; \
-} \
-__LL_SC_EXPORT(arch_atomic_##op##_return##name);
+}
-#define ATOMIC_FETCH_OP(name, mb, acq, rel, cl, op, asm_op) \
-__LL_SC_INLINE int \
-__LL_SC_PREFIX(arch_atomic_fetch_##op##name(int i, atomic_t *v)) \
+#define ATOMIC_FETCH_OP(name, mb, acq, rel, cl, op, asm_op, constraint) \
+static inline int \
+__ll_sc_atomic_fetch_##op##name(int i, atomic_t *v) \
{ \
unsigned long tmp; \
int val, result; \
\
asm volatile("// atomic_fetch_" #op #name "\n" \
+ __LL_SC_FALLBACK( \
" prfm pstl1strm, %3\n" \
"1: ld" #acq "xr %w0, %3\n" \
" " #asm_op " %w1, %w0, %w4\n" \
" st" #rel "xr %w2, %w1, %3\n" \
" cbnz %w2, 1b\n" \
-" " #mb \
+" " #mb ) \
: "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter) \
- : "Ir" (i) \
+ : __stringify(constraint) "r" (i) \
: cl); \
\
return result; \
-} \
-__LL_SC_EXPORT(arch_atomic_fetch_##op##name);
+}
#define ATOMIC_OPS(...) \
ATOMIC_OP(__VA_ARGS__) \
@@ -99,8 +108,8 @@ __LL_SC_EXPORT(arch_atomic_fetch_##op##name);
ATOMIC_FETCH_OP (_acquire, , a, , "memory", __VA_ARGS__)\
ATOMIC_FETCH_OP (_release, , , l, "memory", __VA_ARGS__)
-ATOMIC_OPS(add, add)
-ATOMIC_OPS(sub, sub)
+ATOMIC_OPS(add, add, I)
+ATOMIC_OPS(sub, sub, J)
#undef ATOMIC_OPS
#define ATOMIC_OPS(...) \
@@ -110,77 +119,82 @@ ATOMIC_OPS(sub, sub)
ATOMIC_FETCH_OP (_acquire, , a, , "memory", __VA_ARGS__)\
ATOMIC_FETCH_OP (_release, , , l, "memory", __VA_ARGS__)
-ATOMIC_OPS(and, and)
-ATOMIC_OPS(andnot, bic)
-ATOMIC_OPS(or, orr)
-ATOMIC_OPS(xor, eor)
+ATOMIC_OPS(and, and, K)
+ATOMIC_OPS(or, orr, K)
+ATOMIC_OPS(xor, eor, K)
+/*
+ * GAS converts the mysterious and undocumented BIC (immediate) alias to
+ * an AND (immediate) instruction with the immediate inverted. We don't
+ * have a constraint for this, so fall back to register.
+ */
+ATOMIC_OPS(andnot, bic, )
#undef ATOMIC_OPS
#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
-#define ATOMIC64_OP(op, asm_op) \
-__LL_SC_INLINE void \
-__LL_SC_PREFIX(arch_atomic64_##op(s64 i, atomic64_t *v)) \
+#define ATOMIC64_OP(op, asm_op, constraint) \
+static inline void \
+__ll_sc_atomic64_##op(s64 i, atomic64_t *v) \
{ \
s64 result; \
unsigned long tmp; \
\
asm volatile("// atomic64_" #op "\n" \
+ __LL_SC_FALLBACK( \
" prfm pstl1strm, %2\n" \
"1: ldxr %0, %2\n" \
" " #asm_op " %0, %0, %3\n" \
" stxr %w1, %0, %2\n" \
-" cbnz %w1, 1b" \
+" cbnz %w1, 1b") \
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
- : "Ir" (i)); \
-} \
-__LL_SC_EXPORT(arch_atomic64_##op);
+ : __stringify(constraint) "r" (i)); \
+}
-#define ATOMIC64_OP_RETURN(name, mb, acq, rel, cl, op, asm_op) \
-__LL_SC_INLINE s64 \
-__LL_SC_PREFIX(arch_atomic64_##op##_return##name(s64 i, atomic64_t *v))\
+#define ATOMIC64_OP_RETURN(name, mb, acq, rel, cl, op, asm_op, constraint)\
+static inline long \
+__ll_sc_atomic64_##op##_return##name(s64 i, atomic64_t *v) \
{ \
s64 result; \
unsigned long tmp; \
\
asm volatile("// atomic64_" #op "_return" #name "\n" \
+ __LL_SC_FALLBACK( \
" prfm pstl1strm, %2\n" \
"1: ld" #acq "xr %0, %2\n" \
" " #asm_op " %0, %0, %3\n" \
" st" #rel "xr %w1, %0, %2\n" \
" cbnz %w1, 1b\n" \
-" " #mb \
+" " #mb ) \
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
- : "Ir" (i) \
+ : __stringify(constraint) "r" (i) \
: cl); \
\
return result; \
-} \
-__LL_SC_EXPORT(arch_atomic64_##op##_return##name);
+}
-#define ATOMIC64_FETCH_OP(name, mb, acq, rel, cl, op, asm_op) \
-__LL_SC_INLINE s64 \
-__LL_SC_PREFIX(arch_atomic64_fetch_##op##name(s64 i, atomic64_t *v)) \
+#define ATOMIC64_FETCH_OP(name, mb, acq, rel, cl, op, asm_op, constraint)\
+static inline long \
+__ll_sc_atomic64_fetch_##op##name(s64 i, atomic64_t *v) \
{ \
s64 result, val; \
unsigned long tmp; \
\
asm volatile("// atomic64_fetch_" #op #name "\n" \
+ __LL_SC_FALLBACK( \
" prfm pstl1strm, %3\n" \
"1: ld" #acq "xr %0, %3\n" \
" " #asm_op " %1, %0, %4\n" \
" st" #rel "xr %w2, %1, %3\n" \
" cbnz %w2, 1b\n" \
-" " #mb \
+" " #mb ) \
: "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter) \
- : "Ir" (i) \
+ : __stringify(constraint) "r" (i) \
: cl); \
\
return result; \
-} \
-__LL_SC_EXPORT(arch_atomic64_fetch_##op##name);
+}
#define ATOMIC64_OPS(...) \
ATOMIC64_OP(__VA_ARGS__) \
@@ -193,8 +207,8 @@ __LL_SC_EXPORT(arch_atomic64_fetch_##op##name);
ATOMIC64_FETCH_OP (_acquire,, a, , "memory", __VA_ARGS__) \
ATOMIC64_FETCH_OP (_release,, , l, "memory", __VA_ARGS__)
-ATOMIC64_OPS(add, add)
-ATOMIC64_OPS(sub, sub)
+ATOMIC64_OPS(add, add, I)
+ATOMIC64_OPS(sub, sub, J)
#undef ATOMIC64_OPS
#define ATOMIC64_OPS(...) \
@@ -204,23 +218,29 @@ ATOMIC64_OPS(sub, sub)
ATOMIC64_FETCH_OP (_acquire,, a, , "memory", __VA_ARGS__) \
ATOMIC64_FETCH_OP (_release,, , l, "memory", __VA_ARGS__)
-ATOMIC64_OPS(and, and)
-ATOMIC64_OPS(andnot, bic)
-ATOMIC64_OPS(or, orr)
-ATOMIC64_OPS(xor, eor)
+ATOMIC64_OPS(and, and, L)
+ATOMIC64_OPS(or, orr, L)
+ATOMIC64_OPS(xor, eor, L)
+/*
+ * GAS converts the mysterious and undocumented BIC (immediate) alias to
+ * an AND (immediate) instruction with the immediate inverted. We don't
+ * have a constraint for this, so fall back to register.
+ */
+ATOMIC64_OPS(andnot, bic, )
#undef ATOMIC64_OPS
#undef ATOMIC64_FETCH_OP
#undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP
-__LL_SC_INLINE s64
-__LL_SC_PREFIX(arch_atomic64_dec_if_positive(atomic64_t *v))
+static inline s64
+__ll_sc_atomic64_dec_if_positive(atomic64_t *v)
{
s64 result;
unsigned long tmp;
asm volatile("// atomic64_dec_if_positive\n"
+ __LL_SC_FALLBACK(
" prfm pstl1strm, %2\n"
"1: ldxr %0, %2\n"
" subs %0, %0, #1\n"
@@ -228,20 +248,19 @@ __LL_SC_PREFIX(arch_atomic64_dec_if_positive(atomic64_t *v))
" stlxr %w1, %0, %2\n"
" cbnz %w1, 1b\n"
" dmb ish\n"
-"2:"
+"2:")
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
:
: "cc", "memory");
return result;
}
-__LL_SC_EXPORT(arch_atomic64_dec_if_positive);
-#define __CMPXCHG_CASE(w, sfx, name, sz, mb, acq, rel, cl) \
-__LL_SC_INLINE u##sz \
-__LL_SC_PREFIX(__cmpxchg_case_##name##sz(volatile void *ptr, \
+#define __CMPXCHG_CASE(w, sfx, name, sz, mb, acq, rel, cl, constraint) \
+static inline u##sz \
+__ll_sc__cmpxchg_case_##name##sz(volatile void *ptr, \
unsigned long old, \
- u##sz new)) \
+ u##sz new) \
{ \
unsigned long tmp; \
u##sz oldval; \
@@ -255,6 +274,7 @@ __LL_SC_PREFIX(__cmpxchg_case_##name##sz(volatile void *ptr, \
old = (u##sz)old; \
\
asm volatile( \
+ __LL_SC_FALLBACK( \
" prfm pstl1strm, %[v]\n" \
"1: ld" #acq "xr" #sfx "\t%" #w "[oldval], %[v]\n" \
" eor %" #w "[tmp], %" #w "[oldval], %" #w "[old]\n" \
@@ -262,46 +282,51 @@ __LL_SC_PREFIX(__cmpxchg_case_##name##sz(volatile void *ptr, \
" st" #rel "xr" #sfx "\t%w[tmp], %" #w "[new], %[v]\n" \
" cbnz %w[tmp], 1b\n" \
" " #mb "\n" \
- "2:" \
+ "2:") \
: [tmp] "=&r" (tmp), [oldval] "=&r" (oldval), \
[v] "+Q" (*(u##sz *)ptr) \
- : [old] "Kr" (old), [new] "r" (new) \
+ : [old] __stringify(constraint) "r" (old), [new] "r" (new) \
: cl); \
\
return oldval; \
-} \
-__LL_SC_EXPORT(__cmpxchg_case_##name##sz);
+}
-__CMPXCHG_CASE(w, b, , 8, , , , )
-__CMPXCHG_CASE(w, h, , 16, , , , )
-__CMPXCHG_CASE(w, , , 32, , , , )
-__CMPXCHG_CASE( , , , 64, , , , )
-__CMPXCHG_CASE(w, b, acq_, 8, , a, , "memory")
-__CMPXCHG_CASE(w, h, acq_, 16, , a, , "memory")
-__CMPXCHG_CASE(w, , acq_, 32, , a, , "memory")
-__CMPXCHG_CASE( , , acq_, 64, , a, , "memory")
-__CMPXCHG_CASE(w, b, rel_, 8, , , l, "memory")
-__CMPXCHG_CASE(w, h, rel_, 16, , , l, "memory")
-__CMPXCHG_CASE(w, , rel_, 32, , , l, "memory")
-__CMPXCHG_CASE( , , rel_, 64, , , l, "memory")
-__CMPXCHG_CASE(w, b, mb_, 8, dmb ish, , l, "memory")
-__CMPXCHG_CASE(w, h, mb_, 16, dmb ish, , l, "memory")
-__CMPXCHG_CASE(w, , mb_, 32, dmb ish, , l, "memory")
-__CMPXCHG_CASE( , , mb_, 64, dmb ish, , l, "memory")
+/*
+ * Earlier versions of GCC (no later than 8.1.0) appear to incorrectly
+ * handle the 'K' constraint for the value 4294967295 - thus we use no
+ * constraint for 32 bit operations.
+ */
+__CMPXCHG_CASE(w, b, , 8, , , , , K)
+__CMPXCHG_CASE(w, h, , 16, , , , , K)
+__CMPXCHG_CASE(w, , , 32, , , , , K)
+__CMPXCHG_CASE( , , , 64, , , , , L)
+__CMPXCHG_CASE(w, b, acq_, 8, , a, , "memory", K)
+__CMPXCHG_CASE(w, h, acq_, 16, , a, , "memory", K)
+__CMPXCHG_CASE(w, , acq_, 32, , a, , "memory", K)
+__CMPXCHG_CASE( , , acq_, 64, , a, , "memory", L)
+__CMPXCHG_CASE(w, b, rel_, 8, , , l, "memory", K)
+__CMPXCHG_CASE(w, h, rel_, 16, , , l, "memory", K)
+__CMPXCHG_CASE(w, , rel_, 32, , , l, "memory", K)
+__CMPXCHG_CASE( , , rel_, 64, , , l, "memory", L)
+__CMPXCHG_CASE(w, b, mb_, 8, dmb ish, , l, "memory", K)
+__CMPXCHG_CASE(w, h, mb_, 16, dmb ish, , l, "memory", K)
+__CMPXCHG_CASE(w, , mb_, 32, dmb ish, , l, "memory", K)
+__CMPXCHG_CASE( , , mb_, 64, dmb ish, , l, "memory", L)
#undef __CMPXCHG_CASE
#define __CMPXCHG_DBL(name, mb, rel, cl) \
-__LL_SC_INLINE long \
-__LL_SC_PREFIX(__cmpxchg_double##name(unsigned long old1, \
+static inline long \
+__ll_sc__cmpxchg_double##name(unsigned long old1, \
unsigned long old2, \
unsigned long new1, \
unsigned long new2, \
- volatile void *ptr)) \
+ volatile void *ptr) \
{ \
unsigned long tmp, ret; \
\
asm volatile("// __cmpxchg_double" #name "\n" \
+ __LL_SC_FALLBACK( \
" prfm pstl1strm, %2\n" \
"1: ldxp %0, %1, %2\n" \
" eor %0, %0, %3\n" \
@@ -311,18 +336,18 @@ __LL_SC_PREFIX(__cmpxchg_double##name(unsigned long old1, \
" st" #rel "xp %w0, %5, %6, %2\n" \
" cbnz %w0, 1b\n" \
" " #mb "\n" \
- "2:" \
+ "2:") \
: "=&r" (tmp), "=&r" (ret), "+Q" (*(unsigned long *)ptr) \
: "r" (old1), "r" (old2), "r" (new1), "r" (new2) \
: cl); \
\
return ret; \
-} \
-__LL_SC_EXPORT(__cmpxchg_double##name);
+}
__CMPXCHG_DBL( , , , )
__CMPXCHG_DBL(_mb, dmb ish, l, "memory")
#undef __CMPXCHG_DBL
+#undef K
#endif /* __ASM_ATOMIC_LL_SC_H */
diff --git a/arch/arm64/include/asm/atomic_lse.h b/arch/arm64/include/asm/atomic_lse.h
index 69acb1c19a15..c6bd87d2915b 100644
--- a/arch/arm64/include/asm/atomic_lse.h
+++ b/arch/arm64/include/asm/atomic_lse.h
@@ -10,22 +10,13 @@
#ifndef __ASM_ATOMIC_LSE_H
#define __ASM_ATOMIC_LSE_H
-#ifndef __ARM64_IN_ATOMIC_IMPL
-#error "please don't include this file directly"
-#endif
-
-#define __LL_SC_ATOMIC(op) __LL_SC_CALL(arch_atomic_##op)
#define ATOMIC_OP(op, asm_op) \
-static inline void arch_atomic_##op(int i, atomic_t *v) \
+static inline void __lse_atomic_##op(int i, atomic_t *v) \
{ \
- register int w0 asm ("w0") = i; \
- register atomic_t *x1 asm ("x1") = v; \
- \
- asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(op), \
-" " #asm_op " %w[i], %[v]\n") \
- : [i] "+r" (w0), [v] "+Q" (v->counter) \
- : "r" (x1) \
- : __LL_SC_CLOBBERS); \
+ asm volatile( \
+" " #asm_op " %w[i], %[v]\n" \
+ : [i] "+r" (i), [v] "+Q" (v->counter) \
+ : "r" (v)); \
}
ATOMIC_OP(andnot, stclr)
@@ -36,21 +27,15 @@ ATOMIC_OP(add, stadd)
#undef ATOMIC_OP
#define ATOMIC_FETCH_OP(name, mb, op, asm_op, cl...) \
-static inline int arch_atomic_fetch_##op##name(int i, atomic_t *v) \
+static inline int __lse_atomic_fetch_##op##name(int i, atomic_t *v) \
{ \
- register int w0 asm ("w0") = i; \
- register atomic_t *x1 asm ("x1") = v; \
- \
- asm volatile(ARM64_LSE_ATOMIC_INSN( \
- /* LL/SC */ \
- __LL_SC_ATOMIC(fetch_##op##name), \
- /* LSE atomics */ \
-" " #asm_op #mb " %w[i], %w[i], %[v]") \
- : [i] "+r" (w0), [v] "+Q" (v->counter) \
- : "r" (x1) \
- : __LL_SC_CLOBBERS, ##cl); \
+ asm volatile( \
+" " #asm_op #mb " %w[i], %w[i], %[v]" \
+ : [i] "+r" (i), [v] "+Q" (v->counter) \
+ : "r" (v) \
+ : cl); \
\
- return w0; \
+ return i; \
}
#define ATOMIC_FETCH_OPS(op, asm_op) \
@@ -68,23 +53,18 @@ ATOMIC_FETCH_OPS(add, ldadd)
#undef ATOMIC_FETCH_OPS
#define ATOMIC_OP_ADD_RETURN(name, mb, cl...) \
-static inline int arch_atomic_add_return##name(int i, atomic_t *v) \
+static inline int __lse_atomic_add_return##name(int i, atomic_t *v) \
{ \
- register int w0 asm ("w0") = i; \
- register atomic_t *x1 asm ("x1") = v; \
+ u32 tmp; \
\
- asm volatile(ARM64_LSE_ATOMIC_INSN( \
- /* LL/SC */ \
- __LL_SC_ATOMIC(add_return##name) \
- __nops(1), \
- /* LSE atomics */ \
- " ldadd" #mb " %w[i], w30, %[v]\n" \
- " add %w[i], %w[i], w30") \
- : [i] "+r" (w0), [v] "+Q" (v->counter) \
- : "r" (x1) \
- : __LL_SC_CLOBBERS, ##cl); \
+ asm volatile( \
+ " ldadd" #mb " %w[i], %w[tmp], %[v]\n" \
+ " add %w[i], %w[i], %w[tmp]" \
+ : [i] "+r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp) \
+ : "r" (v) \
+ : cl); \
\
- return w0; \
+ return i; \
}
ATOMIC_OP_ADD_RETURN(_relaxed, )
@@ -94,41 +74,26 @@ ATOMIC_OP_ADD_RETURN( , al, "memory")
#undef ATOMIC_OP_ADD_RETURN
-static inline void arch_atomic_and(int i, atomic_t *v)
+static inline void __lse_atomic_and(int i, atomic_t *v)
{
- register int w0 asm ("w0") = i;
- register atomic_t *x1 asm ("x1") = v;
-
- asm volatile(ARM64_LSE_ATOMIC_INSN(
- /* LL/SC */
- __LL_SC_ATOMIC(and)
- __nops(1),
- /* LSE atomics */
+ asm volatile(
" mvn %w[i], %w[i]\n"
- " stclr %w[i], %[v]")
- : [i] "+&r" (w0), [v] "+Q" (v->counter)
- : "r" (x1)
- : __LL_SC_CLOBBERS);
+ " stclr %w[i], %[v]"
+ : [i] "+&r" (i), [v] "+Q" (v->counter)
+ : "r" (v));
}
#define ATOMIC_FETCH_OP_AND(name, mb, cl...) \
-static inline int arch_atomic_fetch_and##name(int i, atomic_t *v) \
+static inline int __lse_atomic_fetch_and##name(int i, atomic_t *v) \
{ \
- register int w0 asm ("w0") = i; \
- register atomic_t *x1 asm ("x1") = v; \
- \
- asm volatile(ARM64_LSE_ATOMIC_INSN( \
- /* LL/SC */ \
- __LL_SC_ATOMIC(fetch_and##name) \
- __nops(1), \
- /* LSE atomics */ \
+ asm volatile( \
" mvn %w[i], %w[i]\n" \
- " ldclr" #mb " %w[i], %w[i], %[v]") \
- : [i] "+&r" (w0), [v] "+Q" (v->counter) \
- : "r" (x1) \
- : __LL_SC_CLOBBERS, ##cl); \
+ " ldclr" #mb " %w[i], %w[i], %[v]" \
+ : [i] "+&r" (i), [v] "+Q" (v->counter) \
+ : "r" (v) \
+ : cl); \
\
- return w0; \
+ return i; \
}
ATOMIC_FETCH_OP_AND(_relaxed, )
@@ -138,42 +103,29 @@ ATOMIC_FETCH_OP_AND( , al, "memory")
#undef ATOMIC_FETCH_OP_AND
-static inline void arch_atomic_sub(int i, atomic_t *v)
+static inline void __lse_atomic_sub(int i, atomic_t *v)
{
- register int w0 asm ("w0") = i;
- register atomic_t *x1 asm ("x1") = v;
-
- asm volatile(ARM64_LSE_ATOMIC_INSN(
- /* LL/SC */
- __LL_SC_ATOMIC(sub)
- __nops(1),
- /* LSE atomics */
+ asm volatile(
" neg %w[i], %w[i]\n"
- " stadd %w[i], %[v]")
- : [i] "+&r" (w0), [v] "+Q" (v->counter)
- : "r" (x1)
- : __LL_SC_CLOBBERS);
+ " stadd %w[i], %[v]"
+ : [i] "+&r" (i), [v] "+Q" (v->counter)
+ : "r" (v));
}
#define ATOMIC_OP_SUB_RETURN(name, mb, cl...) \
-static inline int arch_atomic_sub_return##name(int i, atomic_t *v) \
+static inline int __lse_atomic_sub_return##name(int i, atomic_t *v) \
{ \
- register int w0 asm ("w0") = i; \
- register atomic_t *x1 asm ("x1") = v; \
+ u32 tmp; \
\
- asm volatile(ARM64_LSE_ATOMIC_INSN( \
- /* LL/SC */ \
- __LL_SC_ATOMIC(sub_return##name) \
- __nops(2), \
- /* LSE atomics */ \
+ asm volatile( \
" neg %w[i], %w[i]\n" \
- " ldadd" #mb " %w[i], w30, %[v]\n" \
- " add %w[i], %w[i], w30") \
- : [i] "+&r" (w0), [v] "+Q" (v->counter) \
- : "r" (x1) \
- : __LL_SC_CLOBBERS , ##cl); \
+ " ldadd" #mb " %w[i], %w[tmp], %[v]\n" \
+ " add %w[i], %w[i], %w[tmp]" \
+ : [i] "+&r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp) \
+ : "r" (v) \
+ : cl); \
\
- return w0; \
+ return i; \
}
ATOMIC_OP_SUB_RETURN(_relaxed, )
@@ -184,23 +136,16 @@ ATOMIC_OP_SUB_RETURN( , al, "memory")
#undef ATOMIC_OP_SUB_RETURN
#define ATOMIC_FETCH_OP_SUB(name, mb, cl...) \
-static inline int arch_atomic_fetch_sub##name(int i, atomic_t *v) \
+static inline int __lse_atomic_fetch_sub##name(int i, atomic_t *v) \
{ \
- register int w0 asm ("w0") = i; \
- register atomic_t *x1 asm ("x1") = v; \
- \
- asm volatile(ARM64_LSE_ATOMIC_INSN( \
- /* LL/SC */ \
- __LL_SC_ATOMIC(fetch_sub##name) \
- __nops(1), \
- /* LSE atomics */ \
+ asm volatile( \
" neg %w[i], %w[i]\n" \
- " ldadd" #mb " %w[i], %w[i], %[v]") \
- : [i] "+&r" (w0), [v] "+Q" (v->counter) \
- : "r" (x1) \
- : __LL_SC_CLOBBERS, ##cl); \
+ " ldadd" #mb " %w[i], %w[i], %[v]" \
+ : [i] "+&r" (i), [v] "+Q" (v->counter) \
+ : "r" (v) \
+ : cl); \
\
- return w0; \
+ return i; \
}
ATOMIC_FETCH_OP_SUB(_relaxed, )
@@ -209,20 +154,14 @@ ATOMIC_FETCH_OP_SUB(_release, l, "memory")
ATOMIC_FETCH_OP_SUB( , al, "memory")
#undef ATOMIC_FETCH_OP_SUB
-#undef __LL_SC_ATOMIC
-#define __LL_SC_ATOMIC64(op) __LL_SC_CALL(arch_atomic64_##op)
#define ATOMIC64_OP(op, asm_op) \
-static inline void arch_atomic64_##op(s64 i, atomic64_t *v) \
+static inline void __lse_atomic64_##op(s64 i, atomic64_t *v) \
{ \
- register s64 x0 asm ("x0") = i; \
- register atomic64_t *x1 asm ("x1") = v; \
- \
- asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(op), \
-" " #asm_op " %[i], %[v]\n") \
- : [i] "+r" (x0), [v] "+Q" (v->counter) \
- : "r" (x1) \
- : __LL_SC_CLOBBERS); \
+ asm volatile( \
+" " #asm_op " %[i], %[v]\n" \
+ : [i] "+r" (i), [v] "+Q" (v->counter) \
+ : "r" (v)); \
}
ATOMIC64_OP(andnot, stclr)
@@ -233,21 +172,15 @@ ATOMIC64_OP(add, stadd)
#undef ATOMIC64_OP
#define ATOMIC64_FETCH_OP(name, mb, op, asm_op, cl...) \
-static inline s64 arch_atomic64_fetch_##op##name(s64 i, atomic64_t *v) \
+static inline long __lse_atomic64_fetch_##op##name(s64 i, atomic64_t *v)\
{ \
- register s64 x0 asm ("x0") = i; \
- register atomic64_t *x1 asm ("x1") = v; \
- \
- asm volatile(ARM64_LSE_ATOMIC_INSN( \
- /* LL/SC */ \
- __LL_SC_ATOMIC64(fetch_##op##name), \
- /* LSE atomics */ \
-" " #asm_op #mb " %[i], %[i], %[v]") \
- : [i] "+r" (x0), [v] "+Q" (v->counter) \
- : "r" (x1) \
- : __LL_SC_CLOBBERS, ##cl); \
+ asm volatile( \
+" " #asm_op #mb " %[i], %[i], %[v]" \
+ : [i] "+r" (i), [v] "+Q" (v->counter) \
+ : "r" (v) \
+ : cl); \
\
- return x0; \
+ return i; \
}
#define ATOMIC64_FETCH_OPS(op, asm_op) \
@@ -265,23 +198,18 @@ ATOMIC64_FETCH_OPS(add, ldadd)
#undef ATOMIC64_FETCH_OPS
#define ATOMIC64_OP_ADD_RETURN(name, mb, cl...) \
-static inline s64 arch_atomic64_add_return##name(s64 i, atomic64_t *v) \
+static inline long __lse_atomic64_add_return##name(s64 i, atomic64_t *v)\
{ \
- register s64 x0 asm ("x0") = i; \
- register atomic64_t *x1 asm ("x1") = v; \
+ unsigned long tmp; \
\
- asm volatile(ARM64_LSE_ATOMIC_INSN( \
- /* LL/SC */ \
- __LL_SC_ATOMIC64(add_return##name) \
- __nops(1), \
- /* LSE atomics */ \
- " ldadd" #mb " %[i], x30, %[v]\n" \
- " add %[i], %[i], x30") \
- : [i] "+r" (x0), [v] "+Q" (v->counter) \
- : "r" (x1) \
- : __LL_SC_CLOBBERS, ##cl); \
+ asm volatile( \
+ " ldadd" #mb " %[i], %x[tmp], %[v]\n" \
+ " add %[i], %[i], %x[tmp]" \
+ : [i] "+r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp) \
+ : "r" (v) \
+ : cl); \
\
- return x0; \
+ return i; \
}
ATOMIC64_OP_ADD_RETURN(_relaxed, )
@@ -291,41 +219,26 @@ ATOMIC64_OP_ADD_RETURN( , al, "memory")
#undef ATOMIC64_OP_ADD_RETURN
-static inline void arch_atomic64_and(s64 i, atomic64_t *v)
+static inline void __lse_atomic64_and(s64 i, atomic64_t *v)
{
- register s64 x0 asm ("x0") = i;
- register atomic64_t *x1 asm ("x1") = v;
-
- asm volatile(ARM64_LSE_ATOMIC_INSN(
- /* LL/SC */
- __LL_SC_ATOMIC64(and)
- __nops(1),
- /* LSE atomics */
+ asm volatile(
" mvn %[i], %[i]\n"
- " stclr %[i], %[v]")
- : [i] "+&r" (x0), [v] "+Q" (v->counter)
- : "r" (x1)
- : __LL_SC_CLOBBERS);
+ " stclr %[i], %[v]"
+ : [i] "+&r" (i), [v] "+Q" (v->counter)
+ : "r" (v));
}
#define ATOMIC64_FETCH_OP_AND(name, mb, cl...) \
-static inline s64 arch_atomic64_fetch_and##name(s64 i, atomic64_t *v) \
+static inline long __lse_atomic64_fetch_and##name(s64 i, atomic64_t *v) \
{ \
- register s64 x0 asm ("x0") = i; \
- register atomic64_t *x1 asm ("x1") = v; \
- \
- asm volatile(ARM64_LSE_ATOMIC_INSN( \
- /* LL/SC */ \
- __LL_SC_ATOMIC64(fetch_and##name) \
- __nops(1), \
- /* LSE atomics */ \
+ asm volatile( \
" mvn %[i], %[i]\n" \
- " ldclr" #mb " %[i], %[i], %[v]") \
- : [i] "+&r" (x0), [v] "+Q" (v->counter) \
- : "r" (x1) \
- : __LL_SC_CLOBBERS, ##cl); \
+ " ldclr" #mb " %[i], %[i], %[v]" \
+ : [i] "+&r" (i), [v] "+Q" (v->counter) \
+ : "r" (v) \
+ : cl); \
\
- return x0; \
+ return i; \
}
ATOMIC64_FETCH_OP_AND(_relaxed, )
@@ -335,42 +248,29 @@ ATOMIC64_FETCH_OP_AND( , al, "memory")
#undef ATOMIC64_FETCH_OP_AND
-static inline void arch_atomic64_sub(s64 i, atomic64_t *v)
+static inline void __lse_atomic64_sub(s64 i, atomic64_t *v)
{
- register s64 x0 asm ("x0") = i;
- register atomic64_t *x1 asm ("x1") = v;
-
- asm volatile(ARM64_LSE_ATOMIC_INSN(
- /* LL/SC */
- __LL_SC_ATOMIC64(sub)
- __nops(1),
- /* LSE atomics */
+ asm volatile(
" neg %[i], %[i]\n"
- " stadd %[i], %[v]")
- : [i] "+&r" (x0), [v] "+Q" (v->counter)
- : "r" (x1)
- : __LL_SC_CLOBBERS);
+ " stadd %[i], %[v]"
+ : [i] "+&r" (i), [v] "+Q" (v->counter)
+ : "r" (v));
}
#define ATOMIC64_OP_SUB_RETURN(name, mb, cl...) \
-static inline s64 arch_atomic64_sub_return##name(s64 i, atomic64_t *v) \
+static inline long __lse_atomic64_sub_return##name(s64 i, atomic64_t *v) \
{ \
- register s64 x0 asm ("x0") = i; \
- register atomic64_t *x1 asm ("x1") = v; \
+ unsigned long tmp; \
\
- asm volatile(ARM64_LSE_ATOMIC_INSN( \
- /* LL/SC */ \
- __LL_SC_ATOMIC64(sub_return##name) \
- __nops(2), \
- /* LSE atomics */ \
+ asm volatile( \
" neg %[i], %[i]\n" \
- " ldadd" #mb " %[i], x30, %[v]\n" \
- " add %[i], %[i], x30") \
- : [i] "+&r" (x0), [v] "+Q" (v->counter) \
- : "r" (x1) \
- : __LL_SC_CLOBBERS, ##cl); \
+ " ldadd" #mb " %[i], %x[tmp], %[v]\n" \
+ " add %[i], %[i], %x[tmp]" \
+ : [i] "+&r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp) \
+ : "r" (v) \
+ : cl); \
\
- return x0; \
+ return i; \
}
ATOMIC64_OP_SUB_RETURN(_relaxed, )
@@ -381,23 +281,16 @@ ATOMIC64_OP_SUB_RETURN( , al, "memory")
#undef ATOMIC64_OP_SUB_RETURN
#define ATOMIC64_FETCH_OP_SUB(name, mb, cl...) \
-static inline s64 arch_atomic64_fetch_sub##name(s64 i, atomic64_t *v) \
+static inline long __lse_atomic64_fetch_sub##name(s64 i, atomic64_t *v) \
{ \
- register s64 x0 asm ("x0") = i; \
- register atomic64_t *x1 asm ("x1") = v; \
- \
- asm volatile(ARM64_LSE_ATOMIC_INSN( \
- /* LL/SC */ \
- __LL_SC_ATOMIC64(fetch_sub##name) \
- __nops(1), \
- /* LSE atomics */ \
+ asm volatile( \
" neg %[i], %[i]\n" \
- " ldadd" #mb " %[i], %[i], %[v]") \
- : [i] "+&r" (x0), [v] "+Q" (v->counter) \
- : "r" (x1) \
- : __LL_SC_CLOBBERS, ##cl); \
+ " ldadd" #mb " %[i], %[i], %[v]" \
+ : [i] "+&r" (i), [v] "+Q" (v->counter) \
+ : "r" (v) \
+ : cl); \
\
- return x0; \
+ return i; \
}
ATOMIC64_FETCH_OP_SUB(_relaxed, )
@@ -407,54 +300,44 @@ ATOMIC64_FETCH_OP_SUB( , al, "memory")
#undef ATOMIC64_FETCH_OP_SUB
-static inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
+static inline s64 __lse_atomic64_dec_if_positive(atomic64_t *v)
{
- register long x0 asm ("x0") = (long)v;
-
- asm volatile(ARM64_LSE_ATOMIC_INSN(
- /* LL/SC */
- __LL_SC_ATOMIC64(dec_if_positive)
- __nops(6),
- /* LSE atomics */
- "1: ldr x30, %[v]\n"
- " subs %[ret], x30, #1\n"
+ unsigned long tmp;
+
+ asm volatile(
+ "1: ldr %x[tmp], %[v]\n"
+ " subs %[ret], %x[tmp], #1\n"
" b.lt 2f\n"
- " casal x30, %[ret], %[v]\n"
- " sub x30, x30, #1\n"
- " sub x30, x30, %[ret]\n"
- " cbnz x30, 1b\n"
- "2:")
- : [ret] "+&r" (x0), [v] "+Q" (v->counter)
+ " casal %x[tmp], %[ret], %[v]\n"
+ " sub %x[tmp], %x[tmp], #1\n"
+ " sub %x[tmp], %x[tmp], %[ret]\n"
+ " cbnz %x[tmp], 1b\n"
+ "2:"
+ : [ret] "+&r" (v), [v] "+Q" (v->counter), [tmp] "=&r" (tmp)
:
- : __LL_SC_CLOBBERS, "cc", "memory");
+ : "cc", "memory");
- return x0;
+ return (long)v;
}
-#undef __LL_SC_ATOMIC64
-
-#define __LL_SC_CMPXCHG(op) __LL_SC_CALL(__cmpxchg_case_##op)
-
#define __CMPXCHG_CASE(w, sfx, name, sz, mb, cl...) \
-static inline u##sz __cmpxchg_case_##name##sz(volatile void *ptr, \
+static inline u##sz __lse__cmpxchg_case_##name##sz(volatile void *ptr, \
u##sz old, \
u##sz new) \
{ \
register unsigned long x0 asm ("x0") = (unsigned long)ptr; \
register u##sz x1 asm ("x1") = old; \
register u##sz x2 asm ("x2") = new; \
+ unsigned long tmp; \
\
- asm volatile(ARM64_LSE_ATOMIC_INSN( \
- /* LL/SC */ \
- __LL_SC_CMPXCHG(name##sz) \
- __nops(2), \
- /* LSE atomics */ \
- " mov " #w "30, %" #w "[old]\n" \
- " cas" #mb #sfx "\t" #w "30, %" #w "[new], %[v]\n" \
- " mov %" #w "[ret], " #w "30") \
- : [ret] "+r" (x0), [v] "+Q" (*(unsigned long *)ptr) \
+ asm volatile( \
+ " mov %" #w "[tmp], %" #w "[old]\n" \
+ " cas" #mb #sfx "\t%" #w "[tmp], %" #w "[new], %[v]\n" \
+ " mov %" #w "[ret], %" #w "[tmp]" \
+ : [ret] "+r" (x0), [v] "+Q" (*(unsigned long *)ptr), \
+ [tmp] "=&r" (tmp) \
: [old] "r" (x1), [new] "r" (x2) \
- : __LL_SC_CLOBBERS, ##cl); \
+ : cl); \
\
return x0; \
}
@@ -476,13 +359,10 @@ __CMPXCHG_CASE(w, h, mb_, 16, al, "memory")
__CMPXCHG_CASE(w, , mb_, 32, al, "memory")
__CMPXCHG_CASE(x, , mb_, 64, al, "memory")
-#undef __LL_SC_CMPXCHG
#undef __CMPXCHG_CASE
-#define __LL_SC_CMPXCHG_DBL(op) __LL_SC_CALL(__cmpxchg_double##op)
-
#define __CMPXCHG_DBL(name, mb, cl...) \
-static inline long __cmpxchg_double##name(unsigned long old1, \
+static inline long __lse__cmpxchg_double##name(unsigned long old1, \
unsigned long old2, \
unsigned long new1, \
unsigned long new2, \
@@ -496,20 +376,16 @@ static inline long __cmpxchg_double##name(unsigned long old1, \
register unsigned long x3 asm ("x3") = new2; \
register unsigned long x4 asm ("x4") = (unsigned long)ptr; \
\
- asm volatile(ARM64_LSE_ATOMIC_INSN( \
- /* LL/SC */ \
- __LL_SC_CMPXCHG_DBL(name) \
- __nops(3), \
- /* LSE atomics */ \
+ asm volatile( \
" casp" #mb "\t%[old1], %[old2], %[new1], %[new2], %[v]\n"\
" eor %[old1], %[old1], %[oldval1]\n" \
" eor %[old2], %[old2], %[oldval2]\n" \
- " orr %[old1], %[old1], %[old2]") \
+ " orr %[old1], %[old1], %[old2]" \
: [old1] "+&r" (x0), [old2] "+&r" (x1), \
[v] "+Q" (*(unsigned long *)ptr) \
: [new1] "r" (x2), [new2] "r" (x3), [ptr] "r" (x4), \
[oldval1] "r" (oldval1), [oldval2] "r" (oldval2) \
- : __LL_SC_CLOBBERS, ##cl); \
+ : cl); \
\
return x0; \
}
@@ -517,7 +393,6 @@ static inline long __cmpxchg_double##name(unsigned long old1, \
__CMPXCHG_DBL( , )
__CMPXCHG_DBL(_mb, al, "memory")
-#undef __LL_SC_CMPXCHG_DBL
#undef __CMPXCHG_DBL
#endif /* __ASM_ATOMIC_LSE_H */
diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h
index 64eeaa41e7ca..43da6dd29592 100644
--- a/arch/arm64/include/asm/cache.h
+++ b/arch/arm64/include/asm/cache.h
@@ -78,7 +78,7 @@ static inline u32 cache_type_cwg(void)
return (read_cpuid_cachetype() >> CTR_CWG_SHIFT) & CTR_CWG_MASK;
}
-#define __read_mostly __attribute__((__section__(".data..read_mostly")))
+#define __read_mostly __section(.data..read_mostly)
static inline int cache_line_size_of_cpu(void)
{
diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h
index 7a299a20f6dc..a1398f2f9994 100644
--- a/arch/arm64/include/asm/cmpxchg.h
+++ b/arch/arm64/include/asm/cmpxchg.h
@@ -10,7 +10,6 @@
#include <linux/build_bug.h>
#include <linux/compiler.h>
-#include <asm/atomic.h>
#include <asm/barrier.h>
#include <asm/lse.h>
@@ -104,6 +103,50 @@ __XCHG_GEN(_mb)
#define arch_xchg_release(...) __xchg_wrapper(_rel, __VA_ARGS__)
#define arch_xchg(...) __xchg_wrapper( _mb, __VA_ARGS__)
+#define __CMPXCHG_CASE(name, sz) \
+static inline u##sz __cmpxchg_case_##name##sz(volatile void *ptr, \
+ u##sz old, \
+ u##sz new) \
+{ \
+ return __lse_ll_sc_body(_cmpxchg_case_##name##sz, \
+ ptr, old, new); \
+}
+
+__CMPXCHG_CASE( , 8)
+__CMPXCHG_CASE( , 16)
+__CMPXCHG_CASE( , 32)
+__CMPXCHG_CASE( , 64)
+__CMPXCHG_CASE(acq_, 8)
+__CMPXCHG_CASE(acq_, 16)
+__CMPXCHG_CASE(acq_, 32)
+__CMPXCHG_CASE(acq_, 64)
+__CMPXCHG_CASE(rel_, 8)
+__CMPXCHG_CASE(rel_, 16)
+__CMPXCHG_CASE(rel_, 32)
+__CMPXCHG_CASE(rel_, 64)
+__CMPXCHG_CASE(mb_, 8)
+__CMPXCHG_CASE(mb_, 16)
+__CMPXCHG_CASE(mb_, 32)
+__CMPXCHG_CASE(mb_, 64)
+
+#undef __CMPXCHG_CASE
+
+#define __CMPXCHG_DBL(name) \
+static inline long __cmpxchg_double##name(unsigned long old1, \
+ unsigned long old2, \
+ unsigned long new1, \
+ unsigned long new2, \
+ volatile void *ptr) \
+{ \
+ return __lse_ll_sc_body(_cmpxchg_double##name, \
+ old1, old2, new1, new2, ptr); \
+}
+
+__CMPXCHG_DBL( )
+__CMPXCHG_DBL(_mb)
+
+#undef __CMPXCHG_DBL
+
#define __CMPXCHG_GEN(sfx) \
static inline unsigned long __cmpxchg##sfx(volatile void *ptr, \
unsigned long old, \
diff --git a/arch/arm64/include/asm/compat.h b/arch/arm64/include/asm/compat.h
index fb8ad4616b3b..b0d53a265f1d 100644
--- a/arch/arm64/include/asm/compat.h
+++ b/arch/arm64/include/asm/compat.h
@@ -4,7 +4,6 @@
*/
#ifndef __ASM_COMPAT_H
#define __ASM_COMPAT_H
-#ifdef __KERNEL__
#ifdef CONFIG_COMPAT
/*
@@ -215,5 +214,4 @@ static inline int is_compat_thread(struct thread_info *thread)
}
#endif /* CONFIG_COMPAT */
-#endif /* __KERNEL__ */
#endif /* __ASM_COMPAT_H */
diff --git a/arch/arm64/include/asm/cpu_ops.h b/arch/arm64/include/asm/cpu_ops.h
index c09d633c3109..86aabf1e0199 100644
--- a/arch/arm64/include/asm/cpu_ops.h
+++ b/arch/arm64/include/asm/cpu_ops.h
@@ -23,6 +23,8 @@
* @cpu_boot: Boots a cpu into the kernel.
* @cpu_postboot: Optionally, perform any post-boot cleanup or necesary
* synchronisation. Called from the cpu being booted.
+ * @cpu_can_disable: Determines whether a CPU can be disabled based on
+ * mechanism-specific information.
* @cpu_disable: Prepares a cpu to die. May fail for some mechanism-specific
* reason, which will cause the hot unplug to be aborted. Called
* from the cpu to be killed.
@@ -42,6 +44,7 @@ struct cpu_operations {
int (*cpu_boot)(unsigned int);
void (*cpu_postboot)(void);
#ifdef CONFIG_HOTPLUG_CPU
+ bool (*cpu_can_disable)(unsigned int cpu);
int (*cpu_disable)(unsigned int cpu);
void (*cpu_die)(unsigned int cpu);
int (*cpu_kill)(unsigned int cpu);
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index c96ffa4722d3..9cde5d2e768f 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -289,9 +289,16 @@ struct arm64_cpu_capabilities {
u16 type;
bool (*matches)(const struct arm64_cpu_capabilities *caps, int scope);
/*
- * Take the appropriate actions to enable this capability for this CPU.
- * For each successfully booted CPU, this method is called for each
- * globally detected capability.
+ * Take the appropriate actions to configure this capability
+ * for this CPU. If the capability is detected by the kernel
+ * this will be called on all the CPUs in the system,
+ * including the hotplugged CPUs, regardless of whether the
+ * capability is available on that specific CPU. This is
+ * useful for some capabilities (e.g, working around CPU
+ * errata), where all the CPUs must take some action (e.g,
+ * changing system control/configuration). Thus, if an action
+ * is required only if the CPU has the capability, then the
+ * routine must check it before taking any action.
*/
void (*cpu_enable)(const struct arm64_cpu_capabilities *cap);
union {
@@ -363,21 +370,6 @@ cpucap_multi_entry_cap_matches(const struct arm64_cpu_capabilities *entry,
return false;
}
-/*
- * Take appropriate action for all matching entries in the shared capability
- * entry.
- */
-static inline void
-cpucap_multi_entry_cap_cpu_enable(const struct arm64_cpu_capabilities *entry)
-{
- const struct arm64_cpu_capabilities *caps;
-
- for (caps = entry->match_list; caps->matches; caps++)
- if (caps->matches(caps, SCOPE_LOCAL_CPU) &&
- caps->cpu_enable)
- caps->cpu_enable(caps);
-}
-
extern DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS];
extern struct static_key_false arm64_const_caps_ready;
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
index e7d46631cc42..b1454d117cd2 100644
--- a/arch/arm64/include/asm/cputype.h
+++ b/arch/arm64/include/asm/cputype.h
@@ -51,14 +51,6 @@
#define MIDR_CPU_MODEL_MASK (MIDR_IMPLEMENTOR_MASK | MIDR_PARTNUM_MASK | \
MIDR_ARCHITECTURE_MASK)
-#define MIDR_IS_CPU_MODEL_RANGE(midr, model, rv_min, rv_max) \
-({ \
- u32 _model = (midr) & MIDR_CPU_MODEL_MASK; \
- u32 rv = (midr) & (MIDR_REVISION_MASK | MIDR_VARIANT_MASK); \
- \
- _model == (model) && rv >= (rv_min) && rv <= (rv_max); \
- })
-
#define ARM_CPU_IMP_ARM 0x41
#define ARM_CPU_IMP_APM 0x50
#define ARM_CPU_IMP_CAVIUM 0x43
@@ -159,10 +151,19 @@ struct midr_range {
#define MIDR_REV(m, v, r) MIDR_RANGE(m, v, r, v, r)
#define MIDR_ALL_VERSIONS(m) MIDR_RANGE(m, 0, 0, 0xf, 0xf)
+static inline bool midr_is_cpu_model_range(u32 midr, u32 model, u32 rv_min,
+ u32 rv_max)
+{
+ u32 _model = midr & MIDR_CPU_MODEL_MASK;
+ u32 rv = midr & (MIDR_REVISION_MASK | MIDR_VARIANT_MASK);
+
+ return _model == model && rv >= rv_min && rv <= rv_max;
+}
+
static inline bool is_midr_in_range(u32 midr, struct midr_range const *range)
{
- return MIDR_IS_CPU_MODEL_RANGE(midr, range->model,
- range->rv_min, range->rv_max);
+ return midr_is_cpu_model_range(midr, range->model,
+ range->rv_min, range->rv_max);
}
static inline bool
diff --git a/arch/arm64/include/asm/debug-monitors.h b/arch/arm64/include/asm/debug-monitors.h
index d8ec5bb881c2..7619f473155f 100644
--- a/arch/arm64/include/asm/debug-monitors.h
+++ b/arch/arm64/include/asm/debug-monitors.h
@@ -5,8 +5,6 @@
#ifndef __ASM_DEBUG_MONITORS_H
#define __ASM_DEBUG_MONITORS_H
-#ifdef __KERNEL__
-
#include <linux/errno.h>
#include <linux/types.h>
#include <asm/brk-imm.h>
@@ -128,5 +126,4 @@ static inline int reinstall_suspended_bps(struct pt_regs *regs)
int aarch32_break_handler(struct pt_regs *regs);
#endif /* __ASSEMBLY */
-#endif /* __KERNEL__ */
#endif /* __ASM_DEBUG_MONITORS_H */
diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h
index bdcb0922a40c..fb3e5044f473 100644
--- a/arch/arm64/include/asm/dma-mapping.h
+++ b/arch/arm64/include/asm/dma-mapping.h
@@ -5,8 +5,6 @@
#ifndef __ASM_DMA_MAPPING_H
#define __ASM_DMA_MAPPING_H
-#ifdef __KERNEL__
-
#include <linux/types.h>
#include <linux/vmalloc.h>
@@ -27,5 +25,4 @@ static inline bool is_device_dma_coherent(struct device *dev)
return dev->dma_coherent;
}
-#endif /* __KERNEL__ */
#endif /* __ASM_DMA_MAPPING_H */
diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h
index 76a144702586..b54d3a86c444 100644
--- a/arch/arm64/include/asm/efi.h
+++ b/arch/arm64/include/asm/efi.h
@@ -79,7 +79,7 @@ static inline unsigned long efi_get_max_fdt_addr(unsigned long dram_base)
/*
* On arm64, we have to ensure that the initrd ends up in the linear region,
- * which is a 1 GB aligned region of size '1UL << (VA_BITS - 1)' that is
+ * which is a 1 GB aligned region of size '1UL << (VA_BITS_MIN - 1)' that is
* guaranteed to cover the kernel Image.
*
* Since the EFI stub is part of the kernel Image, we can relax the
@@ -90,7 +90,7 @@ static inline unsigned long efi_get_max_fdt_addr(unsigned long dram_base)
static inline unsigned long efi_get_max_initrd_addr(unsigned long dram_base,
unsigned long image_addr)
{
- return (image_addr & ~(SZ_1G - 1UL)) + (1UL << (VA_BITS - 1));
+ return (image_addr & ~(SZ_1G - 1UL)) + (1UL << (VA_BITS_MIN - 1));
}
#define efi_call_early(f, ...) sys_table_arg->boottime->f(__VA_ARGS__)
diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h
index 65ac18400979..cb29253ae86b 100644
--- a/arch/arm64/include/asm/esr.h
+++ b/arch/arm64/include/asm/esr.h
@@ -34,7 +34,8 @@
#define ESR_ELx_EC_SMC64 (0x17) /* EL2 and above */
#define ESR_ELx_EC_SYS64 (0x18)
#define ESR_ELx_EC_SVE (0x19)
-/* Unallocated EC: 0x1A - 0x1E */
+#define ESR_ELx_EC_ERET (0x1a) /* EL2 only */
+/* Unallocated EC: 0x1b - 0x1E */
#define ESR_ELx_EC_IMP_DEF (0x1f) /* EL3 only */
#define ESR_ELx_EC_IABT_LOW (0x20)
#define ESR_ELx_EC_IABT_CUR (0x21)
diff --git a/arch/arm64/include/asm/exception.h b/arch/arm64/include/asm/exception.h
index ed57b760f38c..a17393ff6677 100644
--- a/arch/arm64/include/asm/exception.h
+++ b/arch/arm64/include/asm/exception.h
@@ -30,4 +30,6 @@ static inline u32 disr_to_esr(u64 disr)
return esr;
}
+asmlinkage void enter_from_user_mode(void);
+
#endif /* __ASM_EXCEPTION_H */
diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h
index b6a2c352f4c3..59f10dd13f12 100644
--- a/arch/arm64/include/asm/fpsimd.h
+++ b/arch/arm64/include/asm/fpsimd.h
@@ -21,7 +21,7 @@
#include <linux/stddef.h>
#include <linux/types.h>
-#if defined(__KERNEL__) && defined(CONFIG_COMPAT)
+#ifdef CONFIG_COMPAT
/* Masks for extracting the FPSR and FPCR from the FPSCR */
#define VFP_FPSCR_STAT_MASK 0xf800009f
#define VFP_FPSCR_CTRL_MASK 0x07f79f00
diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h
index 6211e3105491..6cc26a127819 100644
--- a/arch/arm64/include/asm/futex.h
+++ b/arch/arm64/include/asm/futex.h
@@ -5,8 +5,6 @@
#ifndef __ASM_FUTEX_H
#define __ASM_FUTEX_H
-#ifdef __KERNEL__
-
#include <linux/futex.h>
#include <linux/uaccess.h>
@@ -129,5 +127,4 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *_uaddr,
return ret;
}
-#endif /* __KERNEL__ */
#endif /* __ASM_FUTEX_H */
diff --git a/arch/arm64/include/asm/hw_breakpoint.h b/arch/arm64/include/asm/hw_breakpoint.h
index db9ab760e6fd..bc7aaed4b34e 100644
--- a/arch/arm64/include/asm/hw_breakpoint.h
+++ b/arch/arm64/include/asm/hw_breakpoint.h
@@ -10,8 +10,6 @@
#include <asm/sysreg.h>
#include <asm/virt.h>
-#ifdef __KERNEL__
-
struct arch_hw_breakpoint_ctrl {
u32 __reserved : 19,
len : 8,
@@ -156,5 +154,4 @@ static inline int get_num_wrps(void)
ID_AA64DFR0_WRPS_SHIFT);
}
-#endif /* __KERNEL__ */
#endif /* __ASM_BREAKPOINT_H */
diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h
index 7ed92626949d..323cb306bd28 100644
--- a/arch/arm64/include/asm/io.h
+++ b/arch/arm64/include/asm/io.h
@@ -8,8 +8,6 @@
#ifndef __ASM_IO_H
#define __ASM_IO_H
-#ifdef __KERNEL__
-
#include <linux/types.h>
#include <asm/byteorder.h>
@@ -97,7 +95,7 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
({ \
unsigned long tmp; \
\
- rmb(); \
+ dma_rmb(); \
\
/* \
* Create a dummy control dependency from the IO read to any \
@@ -111,7 +109,7 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
})
#define __io_par(v) __iormb(v)
-#define __iowmb() wmb()
+#define __iowmb() dma_wmb()
/*
* Relaxed I/O memory access primitives. These follow the Device memory
@@ -165,14 +163,13 @@ extern void __memset_io(volatile void __iomem *, int, size_t);
* I/O memory mapping functions.
*/
extern void __iomem *__ioremap(phys_addr_t phys_addr, size_t size, pgprot_t prot);
-extern void __iounmap(volatile void __iomem *addr);
+extern void iounmap(volatile void __iomem *addr);
extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size);
#define ioremap(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE))
#define ioremap_nocache(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE))
#define ioremap_wc(addr, size) __ioremap((addr), (size), __pgprot(PROT_NORMAL_NC))
#define ioremap_wt(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE))
-#define iounmap __iounmap
/*
* PCI configuration space mapping function.
@@ -207,5 +204,4 @@ extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size);
extern int devmem_is_allowed(unsigned long pfn);
-#endif /* __KERNEL__ */
#endif /* __ASM_IO_H */
diff --git a/arch/arm64/include/asm/irqflags.h b/arch/arm64/include/asm/irqflags.h
index 7872f260c9ee..1a59f0ed1ae3 100644
--- a/arch/arm64/include/asm/irqflags.h
+++ b/arch/arm64/include/asm/irqflags.h
@@ -5,8 +5,6 @@
#ifndef __ASM_IRQFLAGS_H
#define __ASM_IRQFLAGS_H
-#ifdef __KERNEL__
-
#include <asm/alternative.h>
#include <asm/ptrace.h>
#include <asm/sysreg.h>
@@ -128,5 +126,4 @@ static inline void arch_local_irq_restore(unsigned long flags)
: "memory");
}
-#endif
-#endif
+#endif /* __ASM_IRQFLAGS_H */
diff --git a/arch/arm64/include/asm/kasan.h b/arch/arm64/include/asm/kasan.h
index b52aacd2c526..b0dc4abc3589 100644
--- a/arch/arm64/include/asm/kasan.h
+++ b/arch/arm64/include/asm/kasan.h
@@ -18,11 +18,8 @@
* KASAN_SHADOW_START: beginning of the kernel virtual addresses.
* KASAN_SHADOW_END: KASAN_SHADOW_START + 1/N of kernel virtual addresses,
* where N = (1 << KASAN_SHADOW_SCALE_SHIFT).
- */
-#define KASAN_SHADOW_START (VA_START)
-#define KASAN_SHADOW_END (KASAN_SHADOW_START + KASAN_SHADOW_SIZE)
-
-/*
+ *
+ * KASAN_SHADOW_OFFSET:
* This value is used to map an address to the corresponding shadow
* address by the following formula:
* shadow_addr = (address >> KASAN_SHADOW_SCALE_SHIFT) + KASAN_SHADOW_OFFSET
@@ -33,8 +30,8 @@
* KASAN_SHADOW_OFFSET = KASAN_SHADOW_END -
* (1ULL << (64 - KASAN_SHADOW_SCALE_SHIFT))
*/
-#define KASAN_SHADOW_OFFSET (KASAN_SHADOW_END - (1ULL << \
- (64 - KASAN_SHADOW_SCALE_SHIFT)))
+#define _KASAN_SHADOW_START(va) (KASAN_SHADOW_END - (1UL << ((va) - KASAN_SHADOW_SCALE_SHIFT)))
+#define KASAN_SHADOW_START _KASAN_SHADOW_START(vabits_actual)
void kasan_init(void);
void kasan_copy_shadow(pgd_t *pgdir);
diff --git a/arch/arm64/include/asm/lse.h b/arch/arm64/include/asm/lse.h
index 8262325e2fc6..80b388278149 100644
--- a/arch/arm64/include/asm/lse.h
+++ b/arch/arm64/include/asm/lse.h
@@ -2,56 +2,47 @@
#ifndef __ASM_LSE_H
#define __ASM_LSE_H
+#include <asm/atomic_ll_sc.h>
+
#if defined(CONFIG_AS_LSE) && defined(CONFIG_ARM64_LSE_ATOMICS)
#include <linux/compiler_types.h>
#include <linux/export.h>
+#include <linux/jump_label.h>
#include <linux/stringify.h>
#include <asm/alternative.h>
+#include <asm/atomic_lse.h>
#include <asm/cpucaps.h>
-#ifdef __ASSEMBLER__
-
-.arch_extension lse
-
-.macro alt_lse, llsc, lse
- alternative_insn "\llsc", "\lse", ARM64_HAS_LSE_ATOMICS
-.endm
-
-#else /* __ASSEMBLER__ */
-
__asm__(".arch_extension lse");
-/* Move the ll/sc atomics out-of-line */
-#define __LL_SC_INLINE notrace
-#define __LL_SC_PREFIX(x) __ll_sc_##x
-#define __LL_SC_EXPORT(x) EXPORT_SYMBOL(__LL_SC_PREFIX(x))
+extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS];
+extern struct static_key_false arm64_const_caps_ready;
+
+static inline bool system_uses_lse_atomics(void)
+{
+ return (static_branch_likely(&arm64_const_caps_ready)) &&
+ static_branch_likely(&cpu_hwcap_keys[ARM64_HAS_LSE_ATOMICS]);
+}
-/* Macro for constructing calls to out-of-line ll/sc atomics */
-#define __LL_SC_CALL(op) "bl\t" __stringify(__LL_SC_PREFIX(op)) "\n"
-#define __LL_SC_CLOBBERS "x16", "x17", "x30"
+#define __lse_ll_sc_body(op, ...) \
+({ \
+ system_uses_lse_atomics() ? \
+ __lse_##op(__VA_ARGS__) : \
+ __ll_sc_##op(__VA_ARGS__); \
+})
/* In-line patching at runtime */
#define ARM64_LSE_ATOMIC_INSN(llsc, lse) \
ALTERNATIVE(llsc, lse, ARM64_HAS_LSE_ATOMICS)
-#endif /* __ASSEMBLER__ */
#else /* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */
-#ifdef __ASSEMBLER__
-
-.macro alt_lse, llsc, lse
- \llsc
-.endm
-
-#else /* __ASSEMBLER__ */
+static inline bool system_uses_lse_atomics(void) { return false; }
-#define __LL_SC_INLINE static inline
-#define __LL_SC_PREFIX(x) x
-#define __LL_SC_EXPORT(x)
+#define __lse_ll_sc_body(op, ...) __ll_sc_##op(__VA_ARGS__)
#define ARM64_LSE_ATOMIC_INSN(llsc, lse) llsc
-#endif /* __ASSEMBLER__ */
#endif /* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */
#endif /* __ASM_LSE_H */
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index fb04f10a78ab..b61b50bf68b1 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -12,10 +12,10 @@
#include <linux/compiler.h>
#include <linux/const.h>
+#include <linux/sizes.h>
#include <linux/types.h>
#include <asm/bug.h>
#include <asm/page-def.h>
-#include <linux/sizes.h>
/*
* Size of the PCI I/O space. This must remain a power of two so that
@@ -26,37 +26,50 @@
/*
* VMEMMAP_SIZE - allows the whole linear region to be covered by
* a struct page array
+ *
+ * If we are configured with a 52-bit kernel VA then our VMEMMAP_SIZE
+ * needs to cover the memory region from the beginning of the 52-bit
+ * PAGE_OFFSET all the way to PAGE_END for 48-bit. This allows us to
+ * keep a constant PAGE_OFFSET and "fallback" to using the higher end
+ * of the VMEMMAP where 52-bit support is not available in hardware.
*/
-#define VMEMMAP_SIZE (UL(1) << (VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT))
+#define VMEMMAP_SIZE ((_PAGE_END(VA_BITS_MIN) - PAGE_OFFSET) \
+ >> (PAGE_SHIFT - STRUCT_PAGE_MAX_SHIFT))
/*
- * PAGE_OFFSET - the virtual address of the start of the linear map (top
- * (VA_BITS - 1))
- * KIMAGE_VADDR - the virtual address of the start of the kernel image
+ * PAGE_OFFSET - the virtual address of the start of the linear map, at the
+ * start of the TTBR1 address space.
+ * PAGE_END - the end of the linear map, where all other kernel mappings begin.
+ * KIMAGE_VADDR - the virtual address of the start of the kernel image.
* VA_BITS - the maximum number of bits for virtual addresses.
- * VA_START - the first kernel virtual address.
*/
#define VA_BITS (CONFIG_ARM64_VA_BITS)
-#define VA_START (UL(0xffffffffffffffff) - \
- (UL(1) << VA_BITS) + 1)
-#define PAGE_OFFSET (UL(0xffffffffffffffff) - \
- (UL(1) << (VA_BITS - 1)) + 1)
+#define _PAGE_OFFSET(va) (-(UL(1) << (va)))
+#define PAGE_OFFSET (_PAGE_OFFSET(VA_BITS))
#define KIMAGE_VADDR (MODULES_END)
-#define BPF_JIT_REGION_START (VA_START + KASAN_SHADOW_SIZE)
+#define BPF_JIT_REGION_START (KASAN_SHADOW_END)
#define BPF_JIT_REGION_SIZE (SZ_128M)
#define BPF_JIT_REGION_END (BPF_JIT_REGION_START + BPF_JIT_REGION_SIZE)
#define MODULES_END (MODULES_VADDR + MODULES_VSIZE)
#define MODULES_VADDR (BPF_JIT_REGION_END)
#define MODULES_VSIZE (SZ_128M)
-#define VMEMMAP_START (PAGE_OFFSET - VMEMMAP_SIZE)
+#define VMEMMAP_START (-VMEMMAP_SIZE - SZ_2M)
#define PCI_IO_END (VMEMMAP_START - SZ_2M)
#define PCI_IO_START (PCI_IO_END - PCI_IO_SIZE)
#define FIXADDR_TOP (PCI_IO_START - SZ_2M)
-#define KERNEL_START _text
-#define KERNEL_END _end
+#if VA_BITS > 48
+#define VA_BITS_MIN (48)
+#else
+#define VA_BITS_MIN (VA_BITS)
+#endif
+
+#define _PAGE_END(va) (-(UL(1) << ((va) - 1)))
-#ifdef CONFIG_ARM64_USER_VA_BITS_52
+#define KERNEL_START _text
+#define KERNEL_END _end
+
+#ifdef CONFIG_ARM64_VA_BITS_52
#define MAX_USER_VA_BITS 52
#else
#define MAX_USER_VA_BITS VA_BITS
@@ -68,12 +81,14 @@
* significantly, so double the (minimum) stack size when they are in use.
*/
#ifdef CONFIG_KASAN
-#define KASAN_SHADOW_SIZE (UL(1) << (VA_BITS - KASAN_SHADOW_SCALE_SHIFT))
+#define KASAN_SHADOW_OFFSET _AC(CONFIG_KASAN_SHADOW_OFFSET, UL)
+#define KASAN_SHADOW_END ((UL(1) << (64 - KASAN_SHADOW_SCALE_SHIFT)) \
+ + KASAN_SHADOW_OFFSET)
#define KASAN_THREAD_SHIFT 1
#else
-#define KASAN_SHADOW_SIZE (0)
#define KASAN_THREAD_SHIFT 0
-#endif
+#define KASAN_SHADOW_END (_PAGE_END(VA_BITS_MIN))
+#endif /* CONFIG_KASAN */
#define MIN_THREAD_SHIFT (14 + KASAN_THREAD_SHIFT)
@@ -117,14 +132,14 @@
* 16 KB granule: 128 level 3 entries, with contiguous bit
* 64 KB granule: 32 level 3 entries, with contiguous bit
*/
-#define SEGMENT_ALIGN SZ_2M
+#define SEGMENT_ALIGN SZ_2M
#else
/*
* 4 KB granule: 16 level 3 entries, with contiguous bit
* 16 KB granule: 4 level 3 entries, without contiguous bit
* 64 KB granule: 1 level 3 entry
*/
-#define SEGMENT_ALIGN SZ_64K
+#define SEGMENT_ALIGN SZ_64K
#endif
/*
@@ -157,10 +172,13 @@
#endif
#ifndef __ASSEMBLY__
+extern u64 vabits_actual;
+#define PAGE_END (_PAGE_END(vabits_actual))
#include <linux/bitops.h>
#include <linux/mmdebug.h>
+extern s64 physvirt_offset;
extern s64 memstart_addr;
/* PHYS_OFFSET - the physical address of the start of memory. */
#define PHYS_OFFSET ({ VM_BUG_ON(memstart_addr & 1); memstart_addr; })
@@ -176,9 +194,6 @@ static inline unsigned long kaslr_offset(void)
return kimage_vaddr - KIMAGE_VADDR;
}
-/* the actual size of a user virtual address */
-extern u64 vabits_user;
-
/*
* Allow all memory at the discovery stage. We will clip it later.
*/
@@ -201,24 +216,24 @@ extern u64 vabits_user;
* pass on to access_ok(), for instance.
*/
#define untagged_addr(addr) \
- ((__typeof__(addr))sign_extend64((u64)(addr), 55))
+ ((__force __typeof__(addr))sign_extend64((__force u64)(addr), 55))
#ifdef CONFIG_KASAN_SW_TAGS
#define __tag_shifted(tag) ((u64)(tag) << 56)
-#define __tag_set(addr, tag) (__typeof__(addr))( \
- ((u64)(addr) & ~__tag_shifted(0xff)) | __tag_shifted(tag))
#define __tag_reset(addr) untagged_addr(addr)
#define __tag_get(addr) (__u8)((u64)(addr) >> 56)
#else
+#define __tag_shifted(tag) 0UL
+#define __tag_reset(addr) (addr)
+#define __tag_get(addr) 0
+#endif /* CONFIG_KASAN_SW_TAGS */
+
static inline const void *__tag_set(const void *addr, u8 tag)
{
- return addr;
+ u64 __addr = (u64)addr & ~__tag_shifted(0xff);
+ return (const void *)(__addr | __tag_shifted(tag));
}
-#define __tag_reset(addr) (addr)
-#define __tag_get(addr) 0
-#endif
-
/*
* Physical vs virtual RAM address space conversion. These are
* private definitions which should NOT be used outside memory.h
@@ -227,19 +242,18 @@ static inline const void *__tag_set(const void *addr, u8 tag)
/*
- * The linear kernel range starts in the middle of the virtual adddress
+ * The linear kernel range starts at the bottom of the virtual address
* space. Testing the top bit for the start of the region is a
- * sufficient check.
+ * sufficient check and avoids having to worry about the tag.
*/
-#define __is_lm_address(addr) (!!((addr) & BIT(VA_BITS - 1)))
+#define __is_lm_address(addr) (!(((u64)addr) & BIT(vabits_actual - 1)))
-#define __lm_to_phys(addr) (((addr) & ~PAGE_OFFSET) + PHYS_OFFSET)
+#define __lm_to_phys(addr) (((addr) + physvirt_offset))
#define __kimg_to_phys(addr) ((addr) - kimage_voffset)
#define __virt_to_phys_nodebug(x) ({ \
- phys_addr_t __x = (phys_addr_t)(x); \
- __is_lm_address(__x) ? __lm_to_phys(__x) : \
- __kimg_to_phys(__x); \
+ phys_addr_t __x = (phys_addr_t)(__tag_reset(x)); \
+ __is_lm_address(__x) ? __lm_to_phys(__x) : __kimg_to_phys(__x); \
})
#define __pa_symbol_nodebug(x) __kimg_to_phys((phys_addr_t)(x))
@@ -250,9 +264,9 @@ extern phys_addr_t __phys_addr_symbol(unsigned long x);
#else
#define __virt_to_phys(x) __virt_to_phys_nodebug(x)
#define __phys_addr_symbol(x) __pa_symbol_nodebug(x)
-#endif
+#endif /* CONFIG_DEBUG_VIRTUAL */
-#define __phys_to_virt(x) ((unsigned long)((x) - PHYS_OFFSET) | PAGE_OFFSET)
+#define __phys_to_virt(x) ((unsigned long)((x) - physvirt_offset))
#define __phys_to_kimg(x) ((unsigned long)((x) + kimage_voffset))
/*
@@ -286,41 +300,38 @@ static inline void *phys_to_virt(phys_addr_t x)
#define __pa_nodebug(x) __virt_to_phys_nodebug((unsigned long)(x))
#define __va(x) ((void *)__phys_to_virt((phys_addr_t)(x)))
#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
-#define virt_to_pfn(x) __phys_to_pfn(__virt_to_phys((unsigned long)(x)))
-#define sym_to_pfn(x) __phys_to_pfn(__pa_symbol(x))
+#define virt_to_pfn(x) __phys_to_pfn(__virt_to_phys((unsigned long)(x)))
+#define sym_to_pfn(x) __phys_to_pfn(__pa_symbol(x))
/*
- * virt_to_page(k) convert a _valid_ virtual address to struct page *
- * virt_addr_valid(k) indicates whether a virtual address is valid
+ * virt_to_page(x) convert a _valid_ virtual address to struct page *
+ * virt_addr_valid(x) indicates whether a virtual address is valid
*/
#define ARCH_PFN_OFFSET ((unsigned long)PHYS_PFN_OFFSET)
#if !defined(CONFIG_SPARSEMEM_VMEMMAP) || defined(CONFIG_DEBUG_VIRTUAL)
-#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
-#define _virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
+#define virt_to_page(x) pfn_to_page(virt_to_pfn(x))
#else
-#define __virt_to_pgoff(kaddr) (((u64)(kaddr) & ~PAGE_OFFSET) / PAGE_SIZE * sizeof(struct page))
-#define __page_to_voff(kaddr) (((u64)(kaddr) & ~VMEMMAP_START) * PAGE_SIZE / sizeof(struct page))
-
-#define page_to_virt(page) ({ \
- unsigned long __addr = \
- ((__page_to_voff(page)) | PAGE_OFFSET); \
- const void *__addr_tag = \
- __tag_set((void *)__addr, page_kasan_tag(page)); \
- ((void *)__addr_tag); \
+#define page_to_virt(x) ({ \
+ __typeof__(x) __page = x; \
+ u64 __idx = ((u64)__page - VMEMMAP_START) / sizeof(struct page);\
+ u64 __addr = PAGE_OFFSET + (__idx * PAGE_SIZE); \
+ (void *)__tag_set((const void *)__addr, page_kasan_tag(__page));\
})
-#define virt_to_page(vaddr) ((struct page *)((__virt_to_pgoff(vaddr)) | VMEMMAP_START))
+#define virt_to_page(x) ({ \
+ u64 __idx = (__tag_reset((u64)x) - PAGE_OFFSET) / PAGE_SIZE; \
+ u64 __addr = VMEMMAP_START + (__idx * sizeof(struct page)); \
+ (struct page *)__addr; \
+})
+#endif /* !CONFIG_SPARSEMEM_VMEMMAP || CONFIG_DEBUG_VIRTUAL */
-#define _virt_addr_valid(kaddr) pfn_valid((((u64)(kaddr) & ~PAGE_OFFSET) \
- + PHYS_OFFSET) >> PAGE_SHIFT)
-#endif
-#endif
+#define virt_addr_valid(addr) ({ \
+ __typeof__(addr) __addr = addr; \
+ __is_lm_address(__addr) && pfn_valid(virt_to_pfn(__addr)); \
+})
-#define _virt_addr_is_linear(kaddr) \
- (__tag_reset((u64)(kaddr)) >= PAGE_OFFSET)
-#define virt_addr_valid(kaddr) \
- (_virt_addr_is_linear(kaddr) && _virt_addr_valid(kaddr))
+#endif /* !ASSEMBLY */
/*
* Given that the GIC architecture permits ITS implementations that can only be
@@ -335,4 +346,4 @@ static inline void *phys_to_virt(phys_addr_t x)
#include <asm-generic/memory_model.h>
-#endif
+#endif /* __ASM_MEMORY_H */
diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
index fd6161336653..f217e3292919 100644
--- a/arch/arm64/include/asm/mmu.h
+++ b/arch/arm64/include/asm/mmu.h
@@ -126,7 +126,7 @@ extern void init_mem_pgprot(void);
extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
unsigned long virt, phys_addr_t size,
pgprot_t prot, bool page_mappings_only);
-extern void *fixmap_remap_fdt(phys_addr_t dt_phys);
+extern void *fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot);
extern void mark_linear_text_alias_ro(void);
#define INIT_MM_CONTEXT(name) \
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
index 7ed0adb187a8..3827ff4040a3 100644
--- a/arch/arm64/include/asm/mmu_context.h
+++ b/arch/arm64/include/asm/mmu_context.h
@@ -63,7 +63,7 @@ extern u64 idmap_ptrs_per_pgd;
static inline bool __cpu_uses_extended_idmap(void)
{
- if (IS_ENABLED(CONFIG_ARM64_USER_VA_BITS_52))
+ if (IS_ENABLED(CONFIG_ARM64_VA_BITS_52))
return false;
return unlikely(idmap_t0sz != TCR_T0SZ(VA_BITS));
@@ -95,7 +95,7 @@ static inline void __cpu_set_tcr_t0sz(unsigned long t0sz)
isb();
}
-#define cpu_set_default_tcr_t0sz() __cpu_set_tcr_t0sz(TCR_T0SZ(VA_BITS))
+#define cpu_set_default_tcr_t0sz() __cpu_set_tcr_t0sz(TCR_T0SZ(vabits_actual))
#define cpu_set_idmap_tcr_t0sz() __cpu_set_tcr_t0sz(idmap_t0sz)
/*
diff --git a/arch/arm64/include/asm/pci.h b/arch/arm64/include/asm/pci.h
index 9e690686e8aa..70b323cf8300 100644
--- a/arch/arm64/include/asm/pci.h
+++ b/arch/arm64/include/asm/pci.h
@@ -1,7 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_PCI_H
#define __ASM_PCI_H
-#ifdef __KERNEL__
#include <linux/types.h>
#include <linux/slab.h>
@@ -35,5 +34,4 @@ static inline int pci_proc_domain(struct pci_bus *bus)
}
#endif /* CONFIG_PCI */
-#endif /* __KERNEL__ */
#endif /* __ASM_PCI_H */
diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h
index db92950bb1a0..3df60f97da1f 100644
--- a/arch/arm64/include/asm/pgtable-hwdef.h
+++ b/arch/arm64/include/asm/pgtable-hwdef.h
@@ -304,7 +304,7 @@
#define TTBR_BADDR_MASK_52 (((UL(1) << 46) - 1) << 2)
#endif
-#ifdef CONFIG_ARM64_USER_VA_BITS_52
+#ifdef CONFIG_ARM64_VA_BITS_52
/* Must be at least 64-byte aligned to prevent corruption of the TTBR */
#define TTBR1_BADDR_4852_OFFSET (((UL(1) << (52 - PGDIR_SHIFT)) - \
(UL(1) << (48 - PGDIR_SHIFT))) * 8)
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index e09760ece844..470ba7ae8821 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -21,9 +21,7 @@
* and fixed mappings
*/
#define VMALLOC_START (MODULES_END)
-#define VMALLOC_END (PAGE_OFFSET - PUD_SIZE - VMEMMAP_SIZE - SZ_64K)
-
-#define vmemmap ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT))
+#define VMALLOC_END (- PUD_SIZE - VMEMMAP_SIZE - SZ_64K)
#define FIRST_USER_ADDRESS 0UL
@@ -35,6 +33,8 @@
#include <linux/mm_types.h>
#include <linux/sched.h>
+extern struct page *vmemmap;
+
extern void __pte_error(const char *file, int line, unsigned long val);
extern void __pmd_error(const char *file, int line, unsigned long val);
extern void __pud_error(const char *file, int line, unsigned long val);
@@ -220,8 +220,10 @@ static inline void set_pte(pte_t *ptep, pte_t pte)
* Only if the new pte is valid and kernel, otherwise TLB maintenance
* or update_mmu_cache() have the necessary barriers.
*/
- if (pte_valid_not_user(pte))
+ if (pte_valid_not_user(pte)) {
dsb(ishst);
+ isb();
+ }
}
extern void __sync_icache_dcache(pte_t pteval);
@@ -484,8 +486,10 @@ static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
WRITE_ONCE(*pmdp, pmd);
- if (pmd_valid(pmd))
+ if (pmd_valid(pmd)) {
dsb(ishst);
+ isb();
+ }
}
static inline void pmd_clear(pmd_t *pmdp)
@@ -543,8 +547,10 @@ static inline void set_pud(pud_t *pudp, pud_t pud)
WRITE_ONCE(*pudp, pud);
- if (pud_valid(pud))
+ if (pud_valid(pud)) {
dsb(ishst);
+ isb();
+ }
}
static inline void pud_clear(pud_t *pudp)
@@ -602,6 +608,7 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
WRITE_ONCE(*pgdp, pgd);
dsb(ishst);
+ isb();
}
static inline void pgd_clear(pgd_t *pgdp)
@@ -859,8 +866,8 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
#define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)
-#define kc_vaddr_to_offset(v) ((v) & ~VA_START)
-#define kc_offset_to_vaddr(o) ((o) | VA_START)
+#define kc_vaddr_to_offset(v) ((v) & ~PAGE_END)
+#define kc_offset_to_vaddr(o) ((o) | PAGE_END)
#ifdef CONFIG_ARM64_PA_BITS_52
#define phys_to_ttbr(addr) (((addr) | ((addr) >> 46)) & TTBR_BADDR_MASK_52)
diff --git a/arch/arm64/include/asm/pointer_auth.h b/arch/arm64/include/asm/pointer_auth.h
index d328540cb85e..7a24bad1a58b 100644
--- a/arch/arm64/include/asm/pointer_auth.h
+++ b/arch/arm64/include/asm/pointer_auth.h
@@ -69,7 +69,7 @@ extern int ptrauth_prctl_reset_keys(struct task_struct *tsk, unsigned long arg);
* The EL0 pointer bits used by a pointer authentication code.
* This is dependent on TBI0 being enabled, or bits 63:56 would also apply.
*/
-#define ptrauth_user_pac_mask() GENMASK(54, vabits_user)
+#define ptrauth_user_pac_mask() GENMASK(54, vabits_actual)
/* Only valid for EL0 TTBR0 instruction pointers */
static inline unsigned long ptrauth_strip_insn_pac(unsigned long ptr)
diff --git a/arch/arm64/include/asm/proc-fns.h b/arch/arm64/include/asm/proc-fns.h
index 368d90a9d0e5..a2ce65a0c1fa 100644
--- a/arch/arm64/include/asm/proc-fns.h
+++ b/arch/arm64/include/asm/proc-fns.h
@@ -9,7 +9,6 @@
#ifndef __ASM_PROCFNS_H
#define __ASM_PROCFNS_H
-#ifdef __KERNEL__
#ifndef __ASSEMBLY__
#include <asm/page.h>
@@ -25,5 +24,4 @@ extern u64 cpu_do_resume(phys_addr_t ptr, u64 idmap_ttbr);
#include <asm/memory.h>
#endif /* __ASSEMBLY__ */
-#endif /* __KERNEL__ */
#endif /* __ASM_PROCFNS_H */
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index 844e2964b0f5..c67848c55009 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -20,7 +20,6 @@
#define NET_IP_ALIGN 0
#ifndef __ASSEMBLY__
-#ifdef __KERNEL__
#include <linux/build_bug.h>
#include <linux/cache.h>
@@ -42,8 +41,8 @@
* TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area.
*/
-#define DEFAULT_MAP_WINDOW_64 (UL(1) << VA_BITS)
-#define TASK_SIZE_64 (UL(1) << vabits_user)
+#define DEFAULT_MAP_WINDOW_64 (UL(1) << VA_BITS_MIN)
+#define TASK_SIZE_64 (UL(1) << vabits_actual)
#ifdef CONFIG_COMPAT
#if defined(CONFIG_ARM64_64K_PAGES) && defined(CONFIG_KUSER_HELPERS)
@@ -283,8 +282,6 @@ static inline void spin_lock_prefetch(const void *ptr)
#define HAVE_ARCH_PICK_MMAP_LAYOUT
-#endif
-
extern unsigned long __ro_after_init signal_minsigstksz; /* sigframe size */
extern void __init minsigstksz_setup(void);
@@ -306,6 +303,14 @@ extern void __init minsigstksz_setup(void);
/* PR_PAC_RESET_KEYS prctl */
#define PAC_RESET_KEYS(tsk, arg) ptrauth_prctl_reset_keys(tsk, arg)
+#ifdef CONFIG_ARM64_TAGGED_ADDR_ABI
+/* PR_{SET,GET}_TAGGED_ADDR_CTRL prctl */
+long set_tagged_addr_ctrl(unsigned long arg);
+long get_tagged_addr_ctrl(void);
+#define SET_TAGGED_ADDR_CTRL(arg) set_tagged_addr_ctrl(arg)
+#define GET_TAGGED_ADDR_CTRL() get_tagged_addr_ctrl()
+#endif
+
/*
* For CONFIG_GCC_PLUGIN_STACKLEAK
*
diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h
index 1dcf63a9ac1f..fbebb411ae20 100644
--- a/arch/arm64/include/asm/ptrace.h
+++ b/arch/arm64/include/asm/ptrace.h
@@ -301,6 +301,11 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
return regs->regs[0];
}
+static inline void regs_set_return_value(struct pt_regs *regs, unsigned long rc)
+{
+ regs->regs[0] = rc;
+}
+
/**
* regs_get_kernel_argument() - get Nth function argument in kernel
* @regs: pt_regs of that context
diff --git a/arch/arm64/include/asm/signal32.h b/arch/arm64/include/asm/signal32.h
index bd43d1cf724b..7e9f163d02ec 100644
--- a/arch/arm64/include/asm/signal32.h
+++ b/arch/arm64/include/asm/signal32.h
@@ -5,7 +5,6 @@
#ifndef __ASM_SIGNAL32_H
#define __ASM_SIGNAL32_H
-#ifdef __KERNEL__
#ifdef CONFIG_COMPAT
#include <linux/compat.h>
@@ -79,5 +78,4 @@ static inline void compat_setup_restart_syscall(struct pt_regs *regs)
{
}
#endif /* CONFIG_COMPAT */
-#endif /* __KERNEL__ */
#endif /* __ASM_SIGNAL32_H */
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index 06ebcfef73df..972d196c7714 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -212,6 +212,9 @@
#define SYS_FAR_EL1 sys_reg(3, 0, 6, 0, 0)
#define SYS_PAR_EL1 sys_reg(3, 0, 7, 4, 0)
+#define SYS_PAR_EL1_F BIT(1)
+#define SYS_PAR_EL1_FST GENMASK(6, 1)
+
/*** Statistical Profiling Extension ***/
/* ID registers */
#define SYS_PMSIDR_EL1 sys_reg(3, 0, 9, 9, 7)
@@ -499,28 +502,11 @@
#define SCTLR_EL2_RES1 ((BIT(4)) | (BIT(5)) | (BIT(11)) | (BIT(16)) | \
(BIT(18)) | (BIT(22)) | (BIT(23)) | (BIT(28)) | \
(BIT(29)))
-#define SCTLR_EL2_RES0 ((BIT(6)) | (BIT(7)) | (BIT(8)) | (BIT(9)) | \
- (BIT(10)) | (BIT(13)) | (BIT(14)) | (BIT(15)) | \
- (BIT(17)) | (BIT(20)) | (BIT(24)) | (BIT(26)) | \
- (BIT(27)) | (BIT(30)) | (BIT(31)) | \
- (0xffffefffUL << 32))
#ifdef CONFIG_CPU_BIG_ENDIAN
#define ENDIAN_SET_EL2 SCTLR_ELx_EE
-#define ENDIAN_CLEAR_EL2 0
#else
#define ENDIAN_SET_EL2 0
-#define ENDIAN_CLEAR_EL2 SCTLR_ELx_EE
-#endif
-
-/* SCTLR_EL2 value used for the hyp-stub */
-#define SCTLR_EL2_SET (SCTLR_ELx_IESB | ENDIAN_SET_EL2 | SCTLR_EL2_RES1)
-#define SCTLR_EL2_CLEAR (SCTLR_ELx_M | SCTLR_ELx_A | SCTLR_ELx_C | \
- SCTLR_ELx_SA | SCTLR_ELx_I | SCTLR_ELx_WXN | \
- SCTLR_ELx_DSSBS | ENDIAN_CLEAR_EL2 | SCTLR_EL2_RES0)
-
-#if (SCTLR_EL2_SET ^ SCTLR_EL2_CLEAR) != 0xffffffffffffffffUL
-#error "Inconsistent SCTLR_EL2 set/clear bits"
#endif
/* SCTLR_EL1 specific flags. */
@@ -539,16 +525,11 @@
#define SCTLR_EL1_RES1 ((BIT(11)) | (BIT(20)) | (BIT(22)) | (BIT(28)) | \
(BIT(29)))
-#define SCTLR_EL1_RES0 ((BIT(6)) | (BIT(10)) | (BIT(13)) | (BIT(17)) | \
- (BIT(27)) | (BIT(30)) | (BIT(31)) | \
- (0xffffefffUL << 32))
#ifdef CONFIG_CPU_BIG_ENDIAN
#define ENDIAN_SET_EL1 (SCTLR_EL1_E0E | SCTLR_ELx_EE)
-#define ENDIAN_CLEAR_EL1 0
#else
#define ENDIAN_SET_EL1 0
-#define ENDIAN_CLEAR_EL1 (SCTLR_EL1_E0E | SCTLR_ELx_EE)
#endif
#define SCTLR_EL1_SET (SCTLR_ELx_M | SCTLR_ELx_C | SCTLR_ELx_SA |\
@@ -556,13 +537,6 @@
SCTLR_EL1_DZE | SCTLR_EL1_UCT |\
SCTLR_EL1_NTWE | SCTLR_ELx_IESB | SCTLR_EL1_SPAN |\
ENDIAN_SET_EL1 | SCTLR_EL1_UCI | SCTLR_EL1_RES1)
-#define SCTLR_EL1_CLEAR (SCTLR_ELx_A | SCTLR_EL1_CP15BEN | SCTLR_EL1_ITD |\
- SCTLR_EL1_UMA | SCTLR_ELx_WXN | ENDIAN_CLEAR_EL1 |\
- SCTLR_ELx_DSSBS | SCTLR_EL1_NTWI | SCTLR_EL1_RES0)
-
-#if (SCTLR_EL1_SET ^ SCTLR_EL1_CLEAR) != 0xffffffffffffffffUL
-#error "Inconsistent SCTLR_EL1 set/clear bits"
-#endif
/* id_aa64isar0 */
#define ID_AA64ISAR0_TS_SHIFT 52
diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h
index 180b34ec5965..f0cec4160136 100644
--- a/arch/arm64/include/asm/thread_info.h
+++ b/arch/arm64/include/asm/thread_info.h
@@ -8,8 +8,6 @@
#ifndef __ASM_THREAD_INFO_H
#define __ASM_THREAD_INFO_H
-#ifdef __KERNEL__
-
#include <linux/compiler.h>
#ifndef __ASSEMBLY__
@@ -59,29 +57,18 @@ void arch_release_task_struct(struct task_struct *tsk);
#endif
-/*
- * thread information flags:
- * TIF_SYSCALL_TRACE - syscall trace active
- * TIF_SYSCALL_TRACEPOINT - syscall tracepoint for ftrace
- * TIF_SYSCALL_AUDIT - syscall auditing
- * TIF_SECCOMP - syscall secure computing
- * TIF_SYSCALL_EMU - syscall emulation active
- * TIF_SIGPENDING - signal pending
- * TIF_NEED_RESCHED - rescheduling necessary
- * TIF_NOTIFY_RESUME - callback before returning to user
- */
-#define TIF_SIGPENDING 0
-#define TIF_NEED_RESCHED 1
+#define TIF_SIGPENDING 0 /* signal pending */
+#define TIF_NEED_RESCHED 1 /* rescheduling necessary */
#define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
#define TIF_FOREIGN_FPSTATE 3 /* CPU's FP state is not current's */
#define TIF_UPROBE 4 /* uprobe breakpoint or singlestep */
#define TIF_FSCHECK 5 /* Check FS is USER_DS on return */
#define TIF_NOHZ 7
-#define TIF_SYSCALL_TRACE 8
-#define TIF_SYSCALL_AUDIT 9
-#define TIF_SYSCALL_TRACEPOINT 10
-#define TIF_SECCOMP 11
-#define TIF_SYSCALL_EMU 12
+#define TIF_SYSCALL_TRACE 8 /* syscall trace active */
+#define TIF_SYSCALL_AUDIT 9 /* syscall auditing */
+#define TIF_SYSCALL_TRACEPOINT 10 /* syscall tracepoint for ftrace */
+#define TIF_SECCOMP 11 /* syscall secure computing */
+#define TIF_SYSCALL_EMU 12 /* syscall emulation active */
#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
#define TIF_FREEZE 19
#define TIF_RESTORE_SIGMASK 20
@@ -90,6 +77,7 @@ void arch_release_task_struct(struct task_struct *tsk);
#define TIF_SVE 23 /* Scalable Vector Extension in use */
#define TIF_SVE_VL_INHERIT 24 /* Inherit sve_vl_onexec across exec */
#define TIF_SSBD 25 /* Wants SSB mitigation */
+#define TIF_TAGGED_ADDR 26 /* Allow tagged user addresses */
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
@@ -121,5 +109,4 @@ void arch_release_task_struct(struct task_struct *tsk);
.addr_limit = KERNEL_DS, \
}
-#endif /* __KERNEL__ */
#endif /* __ASM_THREAD_INFO_H */
diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
index 8af7a85f76bd..bc3949064725 100644
--- a/arch/arm64/include/asm/tlbflush.h
+++ b/arch/arm64/include/asm/tlbflush.h
@@ -251,6 +251,7 @@ static inline void __flush_tlb_kernel_pgtable(unsigned long kaddr)
dsb(ishst);
__tlbi(vaae1is, addr);
dsb(ish);
+ isb();
}
#endif
diff --git a/arch/arm64/include/asm/topology.h b/arch/arm64/include/asm/topology.h
index 0524f2438649..a4d945db95a2 100644
--- a/arch/arm64/include/asm/topology.h
+++ b/arch/arm64/include/asm/topology.h
@@ -4,29 +4,6 @@
#include <linux/cpumask.h>
-struct cpu_topology {
- int thread_id;
- int core_id;
- int package_id;
- int llc_id;
- cpumask_t thread_sibling;
- cpumask_t core_sibling;
- cpumask_t llc_sibling;
-};
-
-extern struct cpu_topology cpu_topology[NR_CPUS];
-
-#define topology_physical_package_id(cpu) (cpu_topology[cpu].package_id)
-#define topology_core_id(cpu) (cpu_topology[cpu].core_id)
-#define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_sibling)
-#define topology_sibling_cpumask(cpu) (&cpu_topology[cpu].thread_sibling)
-#define topology_llc_cpumask(cpu) (&cpu_topology[cpu].llc_sibling)
-
-void init_cpu_topology(void);
-void store_cpu_topology(unsigned int cpuid);
-void remove_cpu_topology(unsigned int cpuid);
-const struct cpumask *cpu_coregroup_mask(int cpu);
-
#ifdef CONFIG_NUMA
struct pci_bus;
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index 5a1c32260c1f..097d6bfac0b7 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -62,6 +62,10 @@ static inline unsigned long __range_ok(const void __user *addr, unsigned long si
{
unsigned long ret, limit = current_thread_info()->addr_limit;
+ if (IS_ENABLED(CONFIG_ARM64_TAGGED_ADDR_ABI) &&
+ test_thread_flag(TIF_TAGGED_ADDR))
+ addr = untagged_addr(addr);
+
__chk_user_ptr(addr);
asm volatile(
// A + B <= C + 1 for all A,B,C, in four easy steps:
@@ -215,7 +219,8 @@ static inline void uaccess_enable_not_uao(void)
/*
* Sanitise a uaccess pointer such that it becomes NULL if above the
- * current addr_limit.
+ * current addr_limit. In case the pointer is tagged (has the top byte set),
+ * untag the pointer before checking.
*/
#define uaccess_mask_ptr(ptr) (__typeof__(ptr))__uaccess_mask_ptr(ptr)
static inline void __user *__uaccess_mask_ptr(const void __user *ptr)
@@ -223,10 +228,11 @@ static inline void __user *__uaccess_mask_ptr(const void __user *ptr)
void __user *safe_ptr;
asm volatile(
- " bics xzr, %1, %2\n"
+ " bics xzr, %3, %2\n"
" csel %0, %1, xzr, eq\n"
: "=&r" (safe_ptr)
- : "r" (ptr), "r" (current_thread_info()->addr_limit)
+ : "r" (ptr), "r" (current_thread_info()->addr_limit),
+ "r" (untagged_addr(ptr))
: "cc");
csdb();
diff --git a/arch/arm64/include/asm/vdso.h b/arch/arm64/include/asm/vdso.h
index 9c15e0a06301..07468428fd29 100644
--- a/arch/arm64/include/asm/vdso.h
+++ b/arch/arm64/include/asm/vdso.h
@@ -5,8 +5,6 @@
#ifndef __ASM_VDSO_H
#define __ASM_VDSO_H
-#ifdef __KERNEL__
-
/*
* Default link address for the vDSO.
* Since we randomise the VDSO mapping, there's little point in trying
@@ -28,6 +26,4 @@
#endif /* !__ASSEMBLY__ */
-#endif /* __KERNEL__ */
-
#endif /* __ASM_VDSO_H */
diff --git a/arch/arm64/include/asm/vdso_datapage.h b/arch/arm64/include/asm/vdso_datapage.h
index ba6dbc3de864..1f38bf330a6e 100644
--- a/arch/arm64/include/asm/vdso_datapage.h
+++ b/arch/arm64/include/asm/vdso_datapage.h
@@ -5,8 +5,6 @@
#ifndef __ASM_VDSO_DATAPAGE_H
#define __ASM_VDSO_DATAPAGE_H
-#ifdef __KERNEL__
-
#ifndef __ASSEMBLY__
struct vdso_data {
@@ -32,6 +30,4 @@ struct vdso_data {
#endif /* !__ASSEMBLY__ */
-#endif /* __KERNEL__ */
-
#endif /* __ASM_VDSO_DATAPAGE_H */
diff --git a/arch/arm64/include/uapi/asm/stat.h b/arch/arm64/include/uapi/asm/stat.h
deleted file mode 100644
index 313325fa22fa..000000000000
--- a/arch/arm64/include/uapi/asm/stat.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-/*
- * Copyright (C) 2012 ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-#include <asm-generic/stat.h>