diff options
author | Christophe Leroy <christophe.leroy@csgroup.eu> | 2020-05-19 07:49:16 +0200 |
---|---|---|
committer | Michael Ellerman <mpe@ellerman.id.au> | 2020-05-26 14:22:22 +0200 |
commit | 400dc0f86102d2ad11d3601f1948fbb02e926431 (patch) | |
tree | 06f9ea2dd308fa882cff4f9fc5d951aace3b1eb8 /arch/powerpc/kernel/head_8xx.S | |
parent | powerpc/8xx: Always pin TLBs at startup. (diff) | |
download | linux-400dc0f86102d2ad11d3601f1948fbb02e926431.tar.xz linux-400dc0f86102d2ad11d3601f1948fbb02e926431.zip |
powerpc/8xx: Drop special handling of Linear and IMMR mappings in I/D TLB handlers
Up to now, linear and IMMR mappings are managed via huge TLB entries
through specific code directly in TLB miss handlers. This implies
some patching of the TLB miss handlers at startup, and a lot of
dedicated code.
Remove all this specific dedicated code.
For now we are back to normal handling via standard 4k pages. In the
next patches, linear memory mapping and IMMR mapping will be managed
through huge pages.
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/221b7e3ead80a5969629938c023f8cfe45fdd2fb.1589866984.git.christophe.leroy@csgroup.eu
Diffstat (limited to 'arch/powerpc/kernel/head_8xx.S')
-rw-r--r-- | arch/powerpc/kernel/head_8xx.S | 29 |
1 files changed, 2 insertions, 27 deletions
diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S index b0cceee6405c..d1546f379757 100644 --- a/arch/powerpc/kernel/head_8xx.S +++ b/arch/powerpc/kernel/head_8xx.S @@ -207,32 +207,22 @@ InstructionTLBMiss: mfspr r10, SPRN_SRR0 /* Get effective address of fault */ INVALIDATE_ADJACENT_PAGES_CPU15(r10) mtspr SPRN_MD_EPN, r10 - /* Only modules will cause ITLB Misses as we always - * pin the first 8MB of kernel memory */ #ifdef ITLB_MISS_KERNEL mfcr r11 -#if defined(SIMPLE_KERNEL_ADDRESS) && defined(CONFIG_PIN_TLB_TEXT) +#if defined(SIMPLE_KERNEL_ADDRESS) cmpi cr0, r10, 0 /* Address >= 0x80000000 */ #else rlwinm r10, r10, 16, 0xfff8 cmpli cr0, r10, PAGE_OFFSET@h -#ifndef CONFIG_PIN_TLB_TEXT - /* It is assumed that kernel code fits into the first 32M */ -0: cmpli cr7, r10, (PAGE_OFFSET + 0x2000000)@h - patch_site 0b, patch__itlbmiss_linmem_top -#endif #endif #endif mfspr r10, SPRN_M_TWB /* Get level 1 table */ #ifdef ITLB_MISS_KERNEL -#if defined(SIMPLE_KERNEL_ADDRESS) && defined(CONFIG_PIN_TLB_TEXT) +#if defined(SIMPLE_KERNEL_ADDRESS) bge+ 3f #else blt+ 3f #endif -#ifndef CONFIG_PIN_TLB_TEXT - blt cr7, ITLBMissLinear -#endif rlwinm r10, r10, 0, 20, 31 oris r10, r10, (swapper_pg_dir - PAGE_OFFSET)@ha 3: @@ -327,19 +317,9 @@ DataStoreTLBMiss: mfspr r10, SPRN_MD_EPN rlwinm r10, r10, 16, 0xfff8 cmpli cr0, r10, PAGE_OFFSET@h -#ifndef CONFIG_PIN_TLB_IMMR - cmpli cr6, r10, VIRT_IMMR_BASE@h -#endif -0: cmpli cr7, r10, (PAGE_OFFSET + 0x2000000)@h - patch_site 0b, patch__dtlbmiss_linmem_top mfspr r10, SPRN_M_TWB /* Get level 1 table */ blt+ 3f -#ifndef CONFIG_PIN_TLB_IMMR -0: beq- cr6, DTLBMissIMMR - patch_site 0b, patch__dtlbmiss_immr_jmp -#endif - blt cr7, DTLBMissLinear rlwinm r10, r10, 0, 20, 31 oris r10, r10, (swapper_pg_dir - PAGE_OFFSET)@ha 3: @@ -571,14 +551,9 @@ FixupDAR:/* Entry point for dcbx workaround. */ cmpli cr1, r11, PAGE_OFFSET@h mfspr r11, SPRN_M_TWB /* Get level 1 table */ blt+ cr1, 3f - rlwinm r11, r10, 16, 0xfff8 - -0: cmpli cr7, r11, (PAGE_OFFSET + 0x1800000)@h - patch_site 0b, patch__fixupdar_linmem_top /* create physical page address from effective address */ tophys(r11, r10) - blt- cr7, 201f mfspr r11, SPRN_M_TWB /* Get level 1 table */ rlwinm r11, r11, 0, 20, 31 oris r11, r11, (swapper_pg_dir - PAGE_OFFSET)@ha |