diff options
author | Russell King <rmk+kernel@arm.linux.org.uk> | 2014-06-02 10:29:37 +0200 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2014-06-02 13:52:43 +0200 |
commit | 20e7e364331d7b5590695a839a18a00547403f18 (patch) | |
tree | df44deaa7b2a774dcde2495c3b78eb9c948f95bd /arch/arm | |
parent | ARM: ensure C page table setup code follows assembly code (diff) | |
download | linux-20e7e364331d7b5590695a839a18a00547403f18.tar.xz linux-20e7e364331d7b5590695a839a18a00547403f18.zip |
ARM: ensure C page table setup code follows assembly code (part II)
This does the same as the previous commit, but for the S bit, which also
needs to match the initial value which the assembly code used for the
same reasons. Again, we add a check for SMP to ensure that the page
tables are correctly setup for SMP.
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm')
-rw-r--r-- | arch/arm/mm/mmu.c | 27 |
1 files changed, 19 insertions, 8 deletions
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 92df149c88a8..df875c457068 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -117,6 +117,8 @@ static struct cachepolicy cache_policies[] __initdata = { }; #ifdef CONFIG_CPU_CP15 +static unsigned long initial_pmd_value __initdata = 0; + /* * Initialise the cache_policy variable with the initial state specified * via the "pmd" value. This is used to ensure that on ARMv6 and later, @@ -128,6 +130,8 @@ void __init init_default_cache_policy(unsigned long pmd) { int i; + initial_pmd_value = pmd; + pmd &= PMD_SECT_TEX(1) | PMD_SECT_BUFFERABLE | PMD_SECT_CACHEABLE; for (i = 0; i < ARRAY_SIZE(cache_policies); i++) @@ -414,9 +418,15 @@ static void __init build_mem_type_table(void) ecc_mask = 0; } - if (is_smp() && cachepolicy != CPOLICY_WRITEALLOC) { - pr_warn("Forcing write-allocate cache policy for SMP\n"); - cachepolicy = CPOLICY_WRITEALLOC; + if (is_smp()) { + if (cachepolicy != CPOLICY_WRITEALLOC) { + pr_warn("Forcing write-allocate cache policy for SMP\n"); + cachepolicy = CPOLICY_WRITEALLOC; + } + if (!(initial_pmd_value & PMD_SECT_S)) { + pr_warn("Forcing shared mappings for SMP\n"); + initial_pmd_value |= PMD_SECT_S; + } } /* @@ -541,11 +551,12 @@ static void __init build_mem_type_table(void) mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; #endif - if (is_smp()) { - /* - * Mark memory with the "shared" attribute - * for SMP systems - */ + /* + * If the initial page tables were created with the S bit + * set, then we need to do the same here for the same + * reasons given in early_cachepolicy(). + */ + if (initial_pmd_value & PMD_SECT_S) { user_pgprot |= L_PTE_SHARED; kern_pgprot |= L_PTE_SHARED; vecs_pgprot |= L_PTE_SHARED; |