diff options
author | Christophe Leroy <christophe.leroy@csgroup.eu> | 2020-11-24 20:51:57 +0100 |
---|---|---|
committer | Michael Ellerman <mpe@ellerman.id.au> | 2020-12-09 13:48:14 +0100 |
commit | da481c4fe0e485cdab5cf4d2761be8b8fb38d3d1 (patch) | |
tree | 88463da95649927e886d2267bd6a86c00987e848 | |
parent | powerpc/32s: In add_hash_page(), calculate VSID later (diff) | |
download | linux-da481c4fe0e485cdab5cf4d2761be8b8fb38d3d1.tar.xz linux-da481c4fe0e485cdab5cf4d2761be8b8fb38d3d1.zip |
powerpc/32s: Cleanup around PTE_FLAGS_OFFSET in hash_low.S
PTE_FLAGS_OFFSET is defined in asm/page_32.h and used only
in hash_low.S
And PTE_FLAGS_OFFSET nullity depends on CONFIG_PTE_64BIT
Instead of tests like #if (PTE_FLAGS_OFFSET != 0), use
CONFIG_PTE_64BIT related code.
Also move the definition of PTE_FLAGS_OFFSET into hash_low.S
directly, that improves readability.
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/f5bc21db7a33dab55924734e6060c2e9daed562e.1606247495.git.christophe.leroy@csgroup.eu
-rw-r--r-- | arch/powerpc/include/asm/page_32.h | 6 | ||||
-rw-r--r-- | arch/powerpc/mm/book3s32/hash_low.S | 23 |
2 files changed, 13 insertions, 16 deletions
diff --git a/arch/powerpc/include/asm/page_32.h b/arch/powerpc/include/asm/page_32.h index d64dfe3ac712..56f217606327 100644 --- a/arch/powerpc/include/asm/page_32.h +++ b/arch/powerpc/include/asm/page_32.h @@ -16,12 +16,6 @@ #define ARCH_DMA_MINALIGN L1_CACHE_BYTES #endif -#ifdef CONFIG_PTE_64BIT -#define PTE_FLAGS_OFFSET 4 /* offset of PTE flags, in bytes */ -#else -#define PTE_FLAGS_OFFSET 0 -#endif - #if defined(CONFIG_PPC_256K_PAGES) || \ (defined(CONFIG_PPC_8xx) && defined(CONFIG_PPC_16K_PAGES)) #define PTE_SHIFT (PAGE_SHIFT - PTE_T_LOG2 - 2) /* 1/4 of a page */ diff --git a/arch/powerpc/mm/book3s32/hash_low.S b/arch/powerpc/mm/book3s32/hash_low.S index b0bb9d193400..0e6dc830c38b 100644 --- a/arch/powerpc/mm/book3s32/hash_low.S +++ b/arch/powerpc/mm/book3s32/hash_low.S @@ -26,6 +26,12 @@ #include <asm/feature-fixups.h> #include <asm/code-patching-asm.h> +#ifdef CONFIG_PTE_64BIT +#define PTE_FLAGS_OFFSET 4 /* offset of PTE flags, in bytes */ +#else +#define PTE_FLAGS_OFFSET 0 +#endif + /* * Load a PTE into the hash table, if possible. * The address is in r4, and r3 contains an access flag: @@ -88,6 +94,11 @@ _GLOBAL(hash_page) rlwimi r8,r4,22,20,29 /* insert next 10 bits of address */ #else rlwimi r8,r4,23,20,28 /* compute pte address */ + /* + * If PTE_64BIT is set, the low word is the flags word; use that + * word for locking since it contains all the interesting bits. + */ + addi r8,r8,PTE_FLAGS_OFFSET #endif /* @@ -95,13 +106,7 @@ _GLOBAL(hash_page) * because almost always, there won't be a permission violation * and there won't already be an HPTE, and thus we will have * to update the PTE to set _PAGE_HASHPTE. -- paulus. - * - * If PTE_64BIT is set, the low word is the flags word; use that - * word for locking since it contains all the interesting bits. */ -#if (PTE_FLAGS_OFFSET != 0) - addi r8,r8,PTE_FLAGS_OFFSET -#endif .Lretry: lwarx r6,0,r8 /* get linux-style pte, flag word */ #ifdef CONFIG_PPC_KUAP @@ -489,8 +494,9 @@ _GLOBAL(flush_hash_pages) rlwimi r5,r4,22,20,29 #else rlwimi r5,r4,23,20,28 + addi r5,r5,PTE_FLAGS_OFFSET #endif -1: lwz r0,PTE_FLAGS_OFFSET(r5) +1: lwz r0,0(r5) cmpwi cr1,r6,1 andi. r0,r0,_PAGE_HASHPTE bne 2f @@ -534,9 +540,6 @@ _GLOBAL(flush_hash_pages) * already clear, we're done (for this pte). If not, * clear it (atomically) and proceed. -- paulus. */ -#if (PTE_FLAGS_OFFSET != 0) - addi r5,r5,PTE_FLAGS_OFFSET -#endif 33: lwarx r8,0,r5 /* fetch the pte flags word */ andi. r0,r8,_PAGE_HASHPTE beq 8f /* done if HASHPTE is already clear */ |