diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-08-02 23:37:45 +0200 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-08-02 23:37:45 +0200 |
commit | 6d039f8f037fda35da8124f09c4d2bbe55c9a575 (patch) | |
tree | 20933bee7aa954952597a0b23c94866744a751c5 /arch/arm/mm | |
parent | Merge branch 'parisc-3.11-rc4' of git://git.kernel.org/pub/scm/linux/kernel/g... (diff) | |
parent | Merge branch 'security-fixes' into fixes (diff) | |
download | linux-6d039f8f037fda35da8124f09c4d2bbe55c9a575.tar.xz linux-6d039f8f037fda35da8124f09c4d2bbe55c9a575.zip |
Merge branch 'fixes' of git://git.linaro.org/people/rmk/linux-arm
Pull ARM fixes from Russell King:
"I've thought long and hard about what to say for this pull request,
and I really can't work out anything sane to say to summarise much of
these commits. The problem is, for most of these are, yet again, lots
of small bits scattered around the place without any real overall
theme to them"
Most notable is probably the kuser page helper improvements.
* 'fixes' of git://git.linaro.org/people/rmk/linux-arm: (22 commits)
ARM: Add .text annotations where required after __CPUINIT removal
ARM: 7803/1: Fix deadlock scenario with smp_send_stop()
ARM: make vectors page inaccessible from userspace
ARM: move signal handlers into a vdso-like page
ARM: allow kuser helpers to be removed from the vector page
ARM: update FIQ support for relocation of vectors
ARM: use linker magic for vectors and vector stubs
ARM: move vector stubs
ARM: poison memory between kuser helpers
ARM: poison the vectors page
ARM: 7801/1: v6: prevent gcc 4.5 from reordering extended CP15 reads above is_smp() test
ARM: 7800/1: ARMv7-M: Fix name of NVIC handler function
ARM: Fix sorting of machine- initializers
ARM: 7791/1: a.out: remove partial a.out support
ARM: 7790/1: Fix deferred mm switch on VIVT processors
ARM: 7789/1: Do not run dummy_flush_tlb_a15_erratum() on non-Cortex-A15
ARM: 7787/1: virt: ensure visibility of __boot_cpu_mode
ARM: 7788/1: elf: fix lpae hwcap feature reporting in proc/cpuinfo
ARM: 7786/1: hyp: fix macro parameterisation
ARM: 7785/1: mm: restrict early_alloc to section-aligned memory
...
Diffstat (limited to 'arch/arm/mm')
-rw-r--r-- | arch/arm/mm/Kconfig | 34 | ||||
-rw-r--r-- | arch/arm/mm/context.c | 3 | ||||
-rw-r--r-- | arch/arm/mm/mmu.c | 57 | ||||
-rw-r--r-- | arch/arm/mm/proc-v7-2level.S | 2 | ||||
-rw-r--r-- | arch/arm/mm/proc-v7-3level.S | 2 | ||||
-rw-r--r-- | arch/arm/mm/proc-v7.S | 11 |
6 files changed, 95 insertions, 14 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 6cacdc8dd654..db5c2cab8fda 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -421,24 +421,28 @@ config CPU_32v3 select CPU_USE_DOMAINS if MMU select NEEDS_SYSCALL_FOR_CMPXCHG if SMP select TLS_REG_EMUL if SMP || !MMU + select NEED_KUSER_HELPERS config CPU_32v4 bool select CPU_USE_DOMAINS if MMU select NEEDS_SYSCALL_FOR_CMPXCHG if SMP select TLS_REG_EMUL if SMP || !MMU + select NEED_KUSER_HELPERS config CPU_32v4T bool select CPU_USE_DOMAINS if MMU select NEEDS_SYSCALL_FOR_CMPXCHG if SMP select TLS_REG_EMUL if SMP || !MMU + select NEED_KUSER_HELPERS config CPU_32v5 bool select CPU_USE_DOMAINS if MMU select NEEDS_SYSCALL_FOR_CMPXCHG if SMP select TLS_REG_EMUL if SMP || !MMU + select NEED_KUSER_HELPERS config CPU_32v6 bool @@ -776,6 +780,7 @@ config CPU_BPREDICT_DISABLE config TLS_REG_EMUL bool + select NEED_KUSER_HELPERS help An SMP system using a pre-ARMv6 processor (there are apparently a few prototypes like that in existence) and therefore access to @@ -783,11 +788,40 @@ config TLS_REG_EMUL config NEEDS_SYSCALL_FOR_CMPXCHG bool + select NEED_KUSER_HELPERS help SMP on a pre-ARMv6 processor? Well OK then. Forget about fast user space cmpxchg support. It is just not possible. +config NEED_KUSER_HELPERS + bool + +config KUSER_HELPERS + bool "Enable kuser helpers in vector page" if !NEED_KUSER_HELPERS + default y + help + Warning: disabling this option may break user programs. + + Provide kuser helpers in the vector page. The kernel provides + helper code to userspace in read only form at a fixed location + in the high vector page to allow userspace to be independent of + the CPU type fitted to the system. This permits binaries to be + run on ARMv4 through to ARMv7 without modification. + + However, the fixed address nature of these helpers can be used + by ROP (return orientated programming) authors when creating + exploits. + + If all of the binaries and libraries which run on your platform + are built specifically for your platform, and make no use of + these helpers, then you can turn this option off. However, + when such an binary or library is run, it will receive a SIGILL + signal, which will terminate the program. + + Say N here only if you are absolutely certain that you do not + need these helpers; otherwise, the safe option is to say Y. + config DMA_CACHE_RWFO bool "Enable read/write for ownership DMA cache maintenance" depends on CPU_V6K && SMP diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c index b55b1015724b..4a0544492f10 100644 --- a/arch/arm/mm/context.c +++ b/arch/arm/mm/context.c @@ -245,7 +245,8 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk) if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) { local_flush_bp_all(); local_flush_tlb_all(); - dummy_flush_tlb_a15_erratum(); + if (erratum_a15_798181()) + dummy_flush_tlb_a15_erratum(); } atomic64_set(&per_cpu(active_asids, cpu), asid); diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 4f56617a2392..53cdbd39ec8e 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -989,6 +989,7 @@ phys_addr_t arm_lowmem_limit __initdata = 0; void __init sanity_check_meminfo(void) { + phys_addr_t memblock_limit = 0; int i, j, highmem = 0; phys_addr_t vmalloc_limit = __pa(vmalloc_min - 1) + 1; @@ -1052,9 +1053,32 @@ void __init sanity_check_meminfo(void) bank->size = size_limit; } #endif - if (!bank->highmem && bank->start + bank->size > arm_lowmem_limit) - arm_lowmem_limit = bank->start + bank->size; + if (!bank->highmem) { + phys_addr_t bank_end = bank->start + bank->size; + if (bank_end > arm_lowmem_limit) + arm_lowmem_limit = bank_end; + + /* + * Find the first non-section-aligned page, and point + * memblock_limit at it. This relies on rounding the + * limit down to be section-aligned, which happens at + * the end of this function. + * + * With this algorithm, the start or end of almost any + * bank can be non-section-aligned. The only exception + * is that the start of the bank 0 must be section- + * aligned, since otherwise memory would need to be + * allocated when mapping the start of bank 0, which + * occurs before any free memory is mapped. + */ + if (!memblock_limit) { + if (!IS_ALIGNED(bank->start, SECTION_SIZE)) + memblock_limit = bank->start; + else if (!IS_ALIGNED(bank_end, SECTION_SIZE)) + memblock_limit = bank_end; + } + } j++; } #ifdef CONFIG_HIGHMEM @@ -1079,7 +1103,18 @@ void __init sanity_check_meminfo(void) #endif meminfo.nr_banks = j; high_memory = __va(arm_lowmem_limit - 1) + 1; - memblock_set_current_limit(arm_lowmem_limit); + + /* + * Round the memblock limit down to a section size. This + * helps to ensure that we will allocate memory from the + * last full section, which should be mapped. + */ + if (memblock_limit) + memblock_limit = round_down(memblock_limit, SECTION_SIZE); + if (!memblock_limit) + memblock_limit = arm_lowmem_limit; + + memblock_set_current_limit(memblock_limit); } static inline void prepare_page_table(void) @@ -1160,7 +1195,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc) /* * Allocate the vector page early. */ - vectors = early_alloc(PAGE_SIZE); + vectors = early_alloc(PAGE_SIZE * 2); early_trap_init(vectors); @@ -1205,15 +1240,27 @@ static void __init devicemaps_init(struct machine_desc *mdesc) map.pfn = __phys_to_pfn(virt_to_phys(vectors)); map.virtual = 0xffff0000; map.length = PAGE_SIZE; +#ifdef CONFIG_KUSER_HELPERS map.type = MT_HIGH_VECTORS; +#else + map.type = MT_LOW_VECTORS; +#endif create_mapping(&map); if (!vectors_high()) { map.virtual = 0; + map.length = PAGE_SIZE * 2; map.type = MT_LOW_VECTORS; create_mapping(&map); } + /* Now create a kernel read-only mapping */ + map.pfn += 1; + map.virtual = 0xffff0000 + PAGE_SIZE; + map.length = PAGE_SIZE; + map.type = MT_LOW_VECTORS; + create_mapping(&map); + /* * Ask the machine support to map in the statically mapped devices. */ @@ -1276,8 +1323,6 @@ void __init paging_init(struct machine_desc *mdesc) { void *zero_page; - memblock_set_current_limit(arm_lowmem_limit); - build_mem_type_table(); prepare_page_table(); map_lowmem(); diff --git a/arch/arm/mm/proc-v7-2level.S b/arch/arm/mm/proc-v7-2level.S index f64afb9f1bd5..bdd3be4be77a 100644 --- a/arch/arm/mm/proc-v7-2level.S +++ b/arch/arm/mm/proc-v7-2level.S @@ -110,7 +110,7 @@ ENTRY(cpu_v7_set_pte_ext) ARM( str r3, [r0, #2048]! ) THUMB( add r0, r0, #2048 ) THUMB( str r3, [r0] ) - ALT_SMP(mov pc,lr) + ALT_SMP(W(nop)) ALT_UP (mcr p15, 0, r0, c7, c10, 1) @ flush_pte #endif mov pc, lr diff --git a/arch/arm/mm/proc-v7-3level.S b/arch/arm/mm/proc-v7-3level.S index c36ac69488c8..01a719e18bb0 100644 --- a/arch/arm/mm/proc-v7-3level.S +++ b/arch/arm/mm/proc-v7-3level.S @@ -81,7 +81,7 @@ ENTRY(cpu_v7_set_pte_ext) tst r3, #1 << (55 - 32) @ L_PTE_DIRTY orreq r2, #L_PTE_RDONLY 1: strd r2, r3, [r0] - ALT_SMP(mov pc, lr) + ALT_SMP(W(nop)) ALT_UP (mcr p15, 0, r0, c7, c10, 1) @ flush_pte #endif mov pc, lr diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index 5c6d5a3050ea..73398bcf9bd8 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S @@ -75,13 +75,14 @@ ENTRY(cpu_v7_do_idle) ENDPROC(cpu_v7_do_idle) ENTRY(cpu_v7_dcache_clean_area) - ALT_SMP(mov pc, lr) @ MP extensions imply L1 PTW - ALT_UP(W(nop)) - dcache_line_size r2, r3 -1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry + ALT_SMP(W(nop)) @ MP extensions imply L1 PTW + ALT_UP_B(1f) + mov pc, lr +1: dcache_line_size r2, r3 +2: mcr p15, 0, r0, c7, c10, 1 @ clean D entry add r0, r0, r2 subs r1, r1, r2 - bhi 1b + bhi 2b dsb mov pc, lr ENDPROC(cpu_v7_dcache_clean_area) |