diff options
author | Palmer Dabbelt <palmerdabbelt@google.com> | 2021-07-01 06:50:32 +0200 |
---|---|---|
committer | Palmer Dabbelt <palmerdabbelt@google.com> | 2021-07-01 06:50:32 +0200 |
commit | 01112e5e20f5298a81639806cd0a3c587aade467 (patch) | |
tree | b453c32b2e3a8905da331a81ad234c17b62cb9dc /arch/riscv/include | |
parent | riscv: Enable KFENCE for riscv64 (diff) | |
parent | riscv: Map the kernel with correct permissions the first time (diff) | |
download | linux-01112e5e20f5298a81639806cd0a3c587aade467.tar.xz linux-01112e5e20f5298a81639806cd0a3c587aade467.zip |
Merge branch 'riscv-wx-mappings' into for-next
This contains both the short-term fix for the W+X boot mappings and the
larger cleanup.
* riscv-wx-mappings:
riscv: Map the kernel with correct permissions the first time
riscv: Introduce set_kernel_memory helper
riscv: Simplify xip and !xip kernel address conversion macros
riscv: Remove CONFIG_PHYS_RAM_BASE_FIXED
riscv: mm: Fix W+X mappings at boot
Diffstat (limited to 'arch/riscv/include')
-rw-r--r-- | arch/riscv/include/asm/page.h | 27 | ||||
-rw-r--r-- | arch/riscv/include/asm/pgtable.h | 2 | ||||
-rw-r--r-- | arch/riscv/include/asm/sections.h | 17 | ||||
-rw-r--r-- | arch/riscv/include/asm/set_memory.h | 24 |
4 files changed, 50 insertions, 20 deletions
diff --git a/arch/riscv/include/asm/page.h b/arch/riscv/include/asm/page.h index a1b888f77d57..5d4622a44b09 100644 --- a/arch/riscv/include/asm/page.h +++ b/arch/riscv/include/asm/page.h @@ -83,55 +83,58 @@ extern unsigned long va_pa_offset; #ifdef CONFIG_64BIT extern unsigned long va_kernel_pa_offset; #endif -#ifdef CONFIG_XIP_KERNEL extern unsigned long va_kernel_xip_pa_offset; -#endif extern unsigned long pfn_base; +extern uintptr_t load_sz; #define ARCH_PFN_OFFSET (pfn_base) #else #define va_pa_offset 0 #ifdef CONFIG_64BIT #define va_kernel_pa_offset 0 #endif +#define va_kernel_xip_pa_offset 0 #define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT) #endif /* CONFIG_MMU */ extern unsigned long kernel_virt_addr; #ifdef CONFIG_64BIT +#define is_kernel_mapping(x) \ + ((x) >= kernel_virt_addr && (x) < (kernel_virt_addr + load_sz)) +#define is_linear_mapping(x) \ + ((x) >= PAGE_OFFSET && (x) < kernel_virt_addr) + #define linear_mapping_pa_to_va(x) ((void *)((unsigned long)(x) + va_pa_offset)) -#ifdef CONFIG_XIP_KERNEL #define kernel_mapping_pa_to_va(y) ({ \ unsigned long _y = y; \ (_y >= CONFIG_PHYS_RAM_BASE) ? \ (void *)((unsigned long)(_y) + va_kernel_pa_offset + XIP_OFFSET) : \ (void *)((unsigned long)(_y) + va_kernel_xip_pa_offset); \ }) -#else -#define kernel_mapping_pa_to_va(x) ((void *)((unsigned long)(x) + va_kernel_pa_offset)) -#endif #define __pa_to_va_nodebug(x) linear_mapping_pa_to_va(x) #define linear_mapping_va_to_pa(x) ((unsigned long)(x) - va_pa_offset) -#ifdef CONFIG_XIP_KERNEL #define kernel_mapping_va_to_pa(y) ({ \ unsigned long _y = y; \ (_y < kernel_virt_addr + XIP_OFFSET) ? \ ((unsigned long)(_y) - va_kernel_xip_pa_offset) : \ ((unsigned long)(_y) - va_kernel_pa_offset - XIP_OFFSET); \ }) -#else -#define kernel_mapping_va_to_pa(x) ((unsigned long)(x) - va_kernel_pa_offset) -#endif + #define __va_to_pa_nodebug(x) ({ \ unsigned long _x = x; \ - (_x < kernel_virt_addr) ? \ + is_linear_mapping(_x) ? \ linear_mapping_va_to_pa(_x) : kernel_mapping_va_to_pa(_x); \ }) #else +#define is_kernel_mapping(x) \ + ((x) >= kernel_virt_addr && (x) < (kernel_virt_addr + load_sz)) +#define is_linear_mapping(x) \ + ((x) >= PAGE_OFFSET) + #define __pa_to_va_nodebug(x) ((void *)((unsigned long) (x) + va_pa_offset)) #define __va_to_pa_nodebug(x) ((unsigned long)(x) - va_pa_offset) -#endif +#endif /* CONFIG_64BIT */ #ifdef CONFIG_DEBUG_VIRTUAL extern phys_addr_t __virt_to_phys(unsigned long x); diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h index f282f7a375e2..2056faf06f13 100644 --- a/arch/riscv/include/asm/pgtable.h +++ b/arch/riscv/include/asm/pgtable.h @@ -77,6 +77,8 @@ #ifdef CONFIG_XIP_KERNEL #define XIP_OFFSET SZ_8M +#else +#define XIP_OFFSET 0 #endif #ifndef __ASSEMBLY__ diff --git a/arch/riscv/include/asm/sections.h b/arch/riscv/include/asm/sections.h index 8a303fb1ee3b..32336e8a17cb 100644 --- a/arch/riscv/include/asm/sections.h +++ b/arch/riscv/include/asm/sections.h @@ -6,6 +6,7 @@ #define __ASM_SECTIONS_H #include <asm-generic/sections.h> +#include <linux/mm.h> extern char _start[]; extern char _start_kernel[]; @@ -13,4 +14,20 @@ extern char __init_data_begin[], __init_data_end[]; extern char __init_text_begin[], __init_text_end[]; extern char __alt_start[], __alt_end[]; +static inline bool is_va_kernel_text(uintptr_t va) +{ + uintptr_t start = (uintptr_t)_start; + uintptr_t end = (uintptr_t)__init_data_begin; + + return va >= start && va < end; +} + +static inline bool is_va_kernel_lm_alias_text(uintptr_t va) +{ + uintptr_t start = (uintptr_t)lm_alias(_start); + uintptr_t end = (uintptr_t)lm_alias(__init_data_begin); + + return va >= start && va < end; +} + #endif /* __ASM_SECTIONS_H */ diff --git a/arch/riscv/include/asm/set_memory.h b/arch/riscv/include/asm/set_memory.h index 9d4d455726d4..a2c14d4b3993 100644 --- a/arch/riscv/include/asm/set_memory.h +++ b/arch/riscv/include/asm/set_memory.h @@ -16,20 +16,28 @@ int set_memory_rw(unsigned long addr, int numpages); int set_memory_x(unsigned long addr, int numpages); int set_memory_nx(unsigned long addr, int numpages); int set_memory_rw_nx(unsigned long addr, int numpages); -void protect_kernel_text_data(void); +static __always_inline int set_kernel_memory(char *startp, char *endp, + int (*set_memory)(unsigned long start, + int num_pages)) +{ + unsigned long start = (unsigned long)startp; + unsigned long end = (unsigned long)endp; + int num_pages = PAGE_ALIGN(end - start) >> PAGE_SHIFT; + + return set_memory(start, num_pages); +} #else static inline int set_memory_ro(unsigned long addr, int numpages) { return 0; } static inline int set_memory_rw(unsigned long addr, int numpages) { return 0; } static inline int set_memory_x(unsigned long addr, int numpages) { return 0; } static inline int set_memory_nx(unsigned long addr, int numpages) { return 0; } -static inline void protect_kernel_text_data(void) {} static inline int set_memory_rw_nx(unsigned long addr, int numpages) { return 0; } -#endif - -#if defined(CONFIG_64BIT) && defined(CONFIG_STRICT_KERNEL_RWX) -void __init protect_kernel_linear_mapping_text_rodata(void); -#else -static inline void protect_kernel_linear_mapping_text_rodata(void) {} +static inline int set_kernel_memory(char *startp, char *endp, + int (*set_memory)(unsigned long start, + int num_pages)) +{ + return 0; +} #endif int set_direct_map_invalid_noflush(struct page *page); |