diff options
author | Vladimir Murzin <vladimir.murzin@arm.com> | 2017-10-16 14:00:45 +0200 |
---|---|---|
committer | Russell King <rmk+kernel@armlinux.org.uk> | 2017-10-23 17:59:31 +0200 |
commit | 216218308cfb0939aeecb246b34faf6e179c8d57 (patch) | |
tree | ac012bba6a2ab6728d7747b57cbf7fdc4a892912 /arch/arm/mm | |
parent | ARM: 8712/1: NOMMU: Use more MPU regions to cover memory (diff) | |
download | linux-216218308cfb0939aeecb246b34faf6e179c8d57.tar.xz linux-216218308cfb0939aeecb246b34faf6e179c8d57.zip |
ARM: 8713/1: NOMMU: Support MPU in XIP configuration
Currently, there is assumption in early MPU setup code that kernel
image is located in RAM, which is obviously not true for XIP. To run
code from ROM we need to make sure that it is covered by MPU. However,
due to we allocate regions (semi-)dynamically we can run into issue of
trimming region we are running from in case ROM spawns several MPU
regions. To help deal with that we enforce minimum alignments for start
end end of XIP address space as 1MB and 128Kb correspondingly.
Tested-by: Alexandre TORGUE <alexandre.torgue@st.com>
Tested-by: Benjamin Gaignard <benjamin.gaignard@linaro.org>
Signed-off-by: Vladimir Murzin <vladimir.murzin@arm.com>
Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
Diffstat (limited to 'arch/arm/mm')
-rw-r--r-- | arch/arm/mm/pmsa-v7.c | 49 |
1 files changed, 44 insertions, 5 deletions
diff --git a/arch/arm/mm/pmsa-v7.c b/arch/arm/mm/pmsa-v7.c index ef204634a16e..106ae1c435a3 100644 --- a/arch/arm/mm/pmsa-v7.c +++ b/arch/arm/mm/pmsa-v7.c @@ -7,9 +7,11 @@ #include <linux/bitops.h> #include <linux/memblock.h> +#include <asm/cacheflush.h> #include <asm/cp15.h> #include <asm/cputype.h> #include <asm/mpu.h> +#include <asm/sections.h> #include "mm.h" @@ -20,6 +22,9 @@ struct region { }; static struct region __initdata mem[MPU_MAX_REGIONS]; +#ifdef CONFIG_XIP_KERNEL +static struct region __initdata xip[MPU_MAX_REGIONS]; +#endif static unsigned int __initdata mpu_min_region_order; static unsigned int __initdata mpu_max_regions; @@ -229,7 +234,6 @@ static int __init allocate_region(phys_addr_t base, phys_addr_t size, /* MPU initialisation functions */ void __init adjust_lowmem_bounds_mpu(void) { - phys_addr_t phys_offset = PHYS_OFFSET; phys_addr_t specified_mem_size, total_mem_size = 0; struct memblock_region *reg; bool first = true; @@ -256,8 +260,19 @@ void __init adjust_lowmem_bounds_mpu(void) /* ... and one for vectors */ mem_max_regions--; #endif + +#ifdef CONFIG_XIP_KERNEL + /* plus some regions to cover XIP ROM */ + num = allocate_region(CONFIG_XIP_PHYS_ADDR, __pa(_exiprom) - CONFIG_XIP_PHYS_ADDR, + mem_max_regions, xip); + + mem_max_regions -= num; +#endif + for_each_memblock(memory, reg) { if (first) { + phys_addr_t phys_offset = PHYS_OFFSET; + /* * Initially only use memory continuous from * PHYS_OFFSET */ @@ -355,7 +370,7 @@ static int __init __mpu_min_region_order(void) static int __init mpu_setup_region(unsigned int number, phys_addr_t start, unsigned int size_order, unsigned int properties, - unsigned int subregions) + unsigned int subregions, bool need_flush) { u32 size_data; @@ -374,6 +389,9 @@ static int __init mpu_setup_region(unsigned int number, phys_addr_t start, size_data = ((size_order - 1) << MPU_RSR_SZ) | 1 << MPU_RSR_EN; size_data |= subregions << MPU_RSR_SD; + if (need_flush) + flush_cache_all(); + dsb(); /* Ensure all previous data accesses occur with old mappings */ rgnr_write(number); isb(); @@ -416,7 +434,28 @@ void __init mpu_setup(void) /* Background */ err |= mpu_setup_region(region++, 0, 32, MPU_ACR_XN | MPU_RGN_STRONGLY_ORDERED | MPU_AP_PL1RW_PL0NA, - 0); + 0, false); + +#ifdef CONFIG_XIP_KERNEL + /* ROM */ + for (i = 0; i < ARRAY_SIZE(xip); i++) { + /* + * In case we overwrite RAM region we set earlier in + * head-nommu.S (which is cachable) all subsequent + * data access till we setup RAM bellow would be done + * with BG region (which is uncachable), thus we need + * to clean and invalidate cache. + */ + bool need_flush = region == MPU_RAM_REGION; + + if (!xip[i].size) + continue; + + err |= mpu_setup_region(region++, xip[i].base, ilog2(xip[i].size), + MPU_AP_PL1RO_PL0NA | MPU_RGN_NORMAL, + xip[i].subreg, need_flush); + } +#endif /* RAM */ for (i = 0; i < ARRAY_SIZE(mem); i++) { @@ -425,14 +464,14 @@ void __init mpu_setup(void) err |= mpu_setup_region(region++, mem[i].base, ilog2(mem[i].size), MPU_AP_PL1RW_PL0RW | MPU_RGN_NORMAL, - mem[i].subreg); + mem[i].subreg, false); } /* Vectors */ #ifndef CONFIG_CPU_V7M err |= mpu_setup_region(region++, vectors_base, ilog2(2 * PAGE_SIZE), MPU_AP_PL1RW_PL0NA | MPU_RGN_NORMAL, - 0); + 0, false); #endif if (err) { panic("MPU region initialization failure! %d", err); |