diff options
Diffstat (limited to 'arch/arm/mm')
-rw-r--r-- | arch/arm/mm/Kconfig | 6 | ||||
-rw-r--r-- | arch/arm/mm/alignment.c | 3 | ||||
-rw-r--r-- | arch/arm/mm/cache-feroceon-l2.c | 5 | ||||
-rw-r--r-- | arch/arm/mm/cache-tauros2.c | 5 | ||||
-rw-r--r-- | arch/arm/mm/dma-mapping.c | 2 | ||||
-rw-r--r-- | arch/arm/mm/fault.c | 4 | ||||
-rw-r--r-- | arch/arm/mm/ioremap.c | 9 | ||||
-rw-r--r-- | arch/arm/mm/kasan_init.c | 6 | ||||
-rw-r--r-- | arch/arm/mm/mmu.c | 35 | ||||
-rw-r--r-- | arch/arm/mm/nommu.c | 9 | ||||
-rw-r--r-- | arch/arm/mm/proc-v7-bugs.c | 9 |
11 files changed, 60 insertions, 33 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index a3a4589ec73b..fc439c2c16f8 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -631,7 +631,11 @@ config CPU_USE_DOMAINS bool help This option enables or disables the use of domain switching - via the set_fs() function. + using the DACR (domain access control register) to protect memory + domains from each other. In Linux we use three domains: kernel, user + and IO. The domains are used to protect userspace from kernelspace + and to handle IO-space as a special type of memory by assigning + manager or client roles to running code (such as a process). config CPU_V7M_NUM_IRQ int "Number of external interrupts connected to the NVIC" diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c index 6f499559d193..f8dd0b3cc8e0 100644 --- a/arch/arm/mm/alignment.c +++ b/arch/arm/mm/alignment.c @@ -935,6 +935,9 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs) if (type == TYPE_LDST) do_alignment_finish_ldst(addr, instr, regs, offset); + if (thumb_mode(regs)) + regs->ARM_cpsr = it_advance(regs->ARM_cpsr); + return 0; bad_or_fault: diff --git a/arch/arm/mm/cache-feroceon-l2.c b/arch/arm/mm/cache-feroceon-l2.c index 5c1b7a7b9af6..25dbd84a1aaf 100644 --- a/arch/arm/mm/cache-feroceon-l2.c +++ b/arch/arm/mm/cache-feroceon-l2.c @@ -1,12 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * arch/arm/mm/cache-feroceon-l2.c - Feroceon L2 cache controller support * * Copyright (C) 2008 Marvell Semiconductor * - * This file is licensed under the terms of the GNU General Public - * License version 2. This program is licensed "as is" without any - * warranty of any kind, whether express or implied. - * * References: * - Unified Layer 2 Cache for Feroceon CPU Cores, * Document ID MV-S104858-00, Rev. A, October 23 2007. diff --git a/arch/arm/mm/cache-tauros2.c b/arch/arm/mm/cache-tauros2.c index 88255bea65e4..b1e1aba602f7 100644 --- a/arch/arm/mm/cache-tauros2.c +++ b/arch/arm/mm/cache-tauros2.c @@ -1,12 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * arch/arm/mm/cache-tauros2.c - Tauros2 L2 cache controller support * * Copyright (C) 2008 Marvell Semiconductor * - * This file is licensed under the terms of the GNU General Public - * License version 2. This program is licensed "as is" without any - * warranty of any kind, whether express or implied. - * * References: * - PJ1 CPU Core Datasheet, * Document ID MV-S104837-01, Rev 0.7, January 24 2008. diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 16c2cdd04639..089c9c644cce 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -576,7 +576,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, *handle = DMA_MAPPING_ERROR; allowblock = gfpflags_allow_blocking(gfp); - cma = allowblock ? dev_get_cma_area(dev) : false; + cma = allowblock ? dev_get_cma_area(dev) : NULL; if (cma) buf->allocator = &cma_allocator; diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index a062e07516dd..46cccd6bf705 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -322,6 +322,10 @@ retry: return 0; } + /* The fault is fully completed (including releasing mmap lock) */ + if (fault & VM_FAULT_COMPLETED) + return 0; + if (!(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_RETRY) { flags |= FAULT_FLAG_TRIED; diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c index 576c0e6c92fc..2129070065c3 100644 --- a/arch/arm/mm/ioremap.c +++ b/arch/arm/mm/ioremap.c @@ -418,7 +418,7 @@ void *arch_memremap_wb(phys_addr_t phys_addr, size_t size) __builtin_return_address(0)); } -void __iounmap(volatile void __iomem *io_addr) +void iounmap(volatile void __iomem *io_addr) { void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr); struct static_vm *svm; @@ -446,13 +446,6 @@ void __iounmap(volatile void __iomem *io_addr) vunmap(addr); } - -void (*arch_iounmap)(volatile void __iomem *) = __iounmap; - -void iounmap(volatile void __iomem *cookie) -{ - arch_iounmap(cookie); -} EXPORT_SYMBOL(iounmap); #if defined(CONFIG_PCI) || IS_ENABLED(CONFIG_PCMCIA) diff --git a/arch/arm/mm/kasan_init.c b/arch/arm/mm/kasan_init.c index 5ad0d6c56d56..29caee9c79ce 100644 --- a/arch/arm/mm/kasan_init.c +++ b/arch/arm/mm/kasan_init.c @@ -236,7 +236,11 @@ void __init kasan_init(void) clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END); - kasan_populate_early_shadow(kasan_mem_to_shadow((void *)VMALLOC_START), + if (!IS_ENABLED(CONFIG_KASAN_VMALLOC)) + kasan_populate_early_shadow(kasan_mem_to_shadow((void *)VMALLOC_START), + kasan_mem_to_shadow((void *)VMALLOC_END)); + + kasan_populate_early_shadow(kasan_mem_to_shadow((void *)VMALLOC_END), kasan_mem_to_shadow((void *)-1UL) + 1); for_each_mem_range(i, &pa_start, &pa_end) { diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 5e2be37a198e..a49f0b9c0f75 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -296,6 +296,13 @@ static struct mem_type mem_types[] __ro_after_init = { .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, .domain = DOMAIN_KERNEL, }, + [MT_MEMORY_RO] = { + .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | + L_PTE_XN | L_PTE_RDONLY, + .prot_l1 = PMD_TYPE_TABLE, + .prot_sect = PMD_TYPE_SECT, + .domain = DOMAIN_KERNEL, + }, [MT_ROM] = { .prot_sect = PMD_TYPE_SECT, .domain = DOMAIN_KERNEL, @@ -405,6 +412,26 @@ void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot) local_flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE); } +static pgprot_t protection_map[16] __ro_after_init = { + [VM_NONE] = __PAGE_NONE, + [VM_READ] = __PAGE_READONLY, + [VM_WRITE] = __PAGE_COPY, + [VM_WRITE | VM_READ] = __PAGE_COPY, + [VM_EXEC] = __PAGE_READONLY_EXEC, + [VM_EXEC | VM_READ] = __PAGE_READONLY_EXEC, + [VM_EXEC | VM_WRITE] = __PAGE_COPY_EXEC, + [VM_EXEC | VM_WRITE | VM_READ] = __PAGE_COPY_EXEC, + [VM_SHARED] = __PAGE_NONE, + [VM_SHARED | VM_READ] = __PAGE_READONLY, + [VM_SHARED | VM_WRITE] = __PAGE_SHARED, + [VM_SHARED | VM_WRITE | VM_READ] = __PAGE_SHARED, + [VM_SHARED | VM_EXEC] = __PAGE_READONLY_EXEC, + [VM_SHARED | VM_EXEC | VM_READ] = __PAGE_READONLY_EXEC, + [VM_SHARED | VM_EXEC | VM_WRITE] = __PAGE_SHARED_EXEC, + [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = __PAGE_SHARED_EXEC +}; +DECLARE_VM_GET_PAGE_PROT + /* * Adjust the PMD section entries according to the CPU in use. */ @@ -489,6 +516,7 @@ static void __init build_mem_type_table(void) /* Also setup NX memory mapping */ mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_XN; + mem_types[MT_MEMORY_RO].prot_sect |= PMD_SECT_XN; } if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) { /* @@ -568,6 +596,7 @@ static void __init build_mem_type_table(void) mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; + mem_types[MT_MEMORY_RO].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; #endif /* @@ -587,6 +616,8 @@ static void __init build_mem_type_table(void) mem_types[MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED; mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S; mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_SHARED; + mem_types[MT_MEMORY_RO].prot_sect |= PMD_SECT_S; + mem_types[MT_MEMORY_RO].prot_pte |= L_PTE_SHARED; mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED; mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_S; mem_types[MT_MEMORY_RWX_NONCACHED].prot_pte |= L_PTE_SHARED; @@ -647,6 +678,8 @@ static void __init build_mem_type_table(void) mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot; mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd; mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot; + mem_types[MT_MEMORY_RO].prot_sect |= ecc_mask | cp->pmd; + mem_types[MT_MEMORY_RO].prot_pte |= kern_pgprot; mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot; mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= ecc_mask; mem_types[MT_ROM].prot_sect |= cp->pmd; @@ -1360,7 +1393,7 @@ static void __init devicemaps_init(const struct machine_desc *mdesc) map.pfn = __phys_to_pfn(__atags_pointer & SECTION_MASK); map.virtual = FDT_FIXED_BASE; map.length = FDT_FIXED_SIZE; - map.type = MT_ROM; + map.type = MT_MEMORY_RO; create_mapping(&map); } diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c index 2658f52903da..c42debaded95 100644 --- a/arch/arm/mm/nommu.c +++ b/arch/arm/mm/nommu.c @@ -230,14 +230,7 @@ void *arch_memremap_wb(phys_addr_t phys_addr, size_t size) return (void *)phys_addr; } -void __iounmap(volatile void __iomem *addr) -{ -} -EXPORT_SYMBOL(__iounmap); - -void (*arch_iounmap)(volatile void __iomem *); - -void iounmap(volatile void __iomem *addr) +void iounmap(volatile void __iomem *io_addr) { } EXPORT_SYMBOL(iounmap); diff --git a/arch/arm/mm/proc-v7-bugs.c b/arch/arm/mm/proc-v7-bugs.c index fb9f3eb6bf48..8bc7a2d6d6c7 100644 --- a/arch/arm/mm/proc-v7-bugs.c +++ b/arch/arm/mm/proc-v7-bugs.c @@ -108,8 +108,7 @@ static unsigned int spectre_v2_install_workaround(unsigned int method) #else static unsigned int spectre_v2_install_workaround(unsigned int method) { - pr_info("CPU%u: Spectre V2: workarounds disabled by configuration\n", - smp_processor_id()); + pr_info_once("Spectre V2: workarounds disabled by configuration\n"); return SPECTRE_VULNERABLE; } @@ -209,10 +208,10 @@ static int spectre_bhb_install_workaround(int method) return SPECTRE_VULNERABLE; spectre_bhb_method = method; - } - pr_info("CPU%u: Spectre BHB: using %s workaround\n", - smp_processor_id(), spectre_bhb_method_name(method)); + pr_info("CPU%u: Spectre BHB: enabling %s workaround for all CPUs\n", + smp_processor_id(), spectre_bhb_method_name(method)); + } return SPECTRE_MITIGATED; } |