diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2020-07-25 23:42:11 +0200 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2020-07-25 23:42:11 +0200 |
commit | 04300d66f0a06d572d9f2ad6768c38cabde22179 (patch) | |
tree | a9cb2b9a730789d487b2952a998305027a1bc644 /arch | |
parent | Merge tag 'x86-urgent-2020-07-25' of git://git.kernel.org/pub/scm/linux/kerne... (diff) | |
parent | riscv: Parse all memory blocks to remove unusable memory (diff) | |
download | linux-04300d66f0a06d572d9f2ad6768c38cabde22179.tar.xz linux-04300d66f0a06d572d9f2ad6768c38cabde22179.zip |
Merge tag 'riscv-for-linus-5.8-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux into master
Pull RISC-V fixes from Palmer Dabbelt:
"A few more fixes this week:
- A fix to avoid using SBI calls during kasan initialization, as the
SBI calls themselves have not been probed yet.
- Three fixes related to systems with multiple memory regions"
* tag 'riscv-for-linus-5.8-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux:
riscv: Parse all memory blocks to remove unusable memory
RISC-V: Do not rely on initrd_start/end computed during early dt parsing
RISC-V: Set maximum number of mapped pages correctly
riscv: kasan: use local_tlb_flush_all() to avoid uninitialized __sbi_rfence
Diffstat (limited to 'arch')
-rw-r--r-- | arch/riscv/mm/init.c | 66 | ||||
-rw-r--r-- | arch/riscv/mm/kasan_init.c | 4 |
2 files changed, 47 insertions, 23 deletions
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c index f4adb3684f3d..79e9d55bdf1a 100644 --- a/arch/riscv/mm/init.c +++ b/arch/riscv/mm/init.c @@ -95,19 +95,40 @@ void __init mem_init(void) #ifdef CONFIG_BLK_DEV_INITRD static void __init setup_initrd(void) { + phys_addr_t start; unsigned long size; - if (initrd_start >= initrd_end) { - pr_info("initrd not found or empty"); + /* Ignore the virtul address computed during device tree parsing */ + initrd_start = initrd_end = 0; + + if (!phys_initrd_size) + return; + /* + * Round the memory region to page boundaries as per free_initrd_mem() + * This allows us to detect whether the pages overlapping the initrd + * are in use, but more importantly, reserves the entire set of pages + * as we don't want these pages allocated for other purposes. + */ + start = round_down(phys_initrd_start, PAGE_SIZE); + size = phys_initrd_size + (phys_initrd_start - start); + size = round_up(size, PAGE_SIZE); + + if (!memblock_is_region_memory(start, size)) { + pr_err("INITRD: 0x%08llx+0x%08lx is not a memory region", + (u64)start, size); goto disable; } - if (__pa_symbol(initrd_end) > PFN_PHYS(max_low_pfn)) { - pr_err("initrd extends beyond end of memory"); + + if (memblock_is_region_reserved(start, size)) { + pr_err("INITRD: 0x%08llx+0x%08lx overlaps in-use memory region\n", + (u64)start, size); goto disable; } - size = initrd_end - initrd_start; - memblock_reserve(__pa_symbol(initrd_start), size); + memblock_reserve(start, size); + /* Now convert initrd to virtual addresses */ + initrd_start = (unsigned long)__va(phys_initrd_start); + initrd_end = initrd_start + phys_initrd_size; initrd_below_start_ok = 1; pr_info("Initial ramdisk at: 0x%p (%lu bytes)\n", @@ -126,33 +147,36 @@ void __init setup_bootmem(void) { struct memblock_region *reg; phys_addr_t mem_size = 0; + phys_addr_t total_mem = 0; + phys_addr_t mem_start, end = 0; phys_addr_t vmlinux_end = __pa_symbol(&_end); phys_addr_t vmlinux_start = __pa_symbol(&_start); /* Find the memory region containing the kernel */ for_each_memblock(memory, reg) { - phys_addr_t end = reg->base + reg->size; - - if (reg->base <= vmlinux_start && vmlinux_end <= end) { - mem_size = min(reg->size, (phys_addr_t)-PAGE_OFFSET); - - /* - * Remove memblock from the end of usable area to the - * end of region - */ - if (reg->base + mem_size < end) - memblock_remove(reg->base + mem_size, - end - reg->base - mem_size); - } + end = reg->base + reg->size; + if (!total_mem) + mem_start = reg->base; + if (reg->base <= vmlinux_start && vmlinux_end <= end) + BUG_ON(reg->size == 0); + total_mem = total_mem + reg->size; } - BUG_ON(mem_size == 0); + + /* + * Remove memblock from the end of usable area to the + * end of region + */ + mem_size = min(total_mem, (phys_addr_t)-PAGE_OFFSET); + if (mem_start + mem_size < end) + memblock_remove(mem_start + mem_size, + end - mem_start - mem_size); /* Reserve from the start of the kernel to the end of the kernel */ memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start); - set_max_mapnr(PFN_DOWN(mem_size)); max_pfn = PFN_DOWN(memblock_end_of_DRAM()); max_low_pfn = max_pfn; + set_max_mapnr(max_low_pfn); #ifdef CONFIG_BLK_DEV_INITRD setup_initrd(); diff --git a/arch/riscv/mm/kasan_init.c b/arch/riscv/mm/kasan_init.c index 4a8b61806633..87b4ab3d3c77 100644 --- a/arch/riscv/mm/kasan_init.c +++ b/arch/riscv/mm/kasan_init.c @@ -44,7 +44,7 @@ asmlinkage void __init kasan_early_init(void) (__pa(((uintptr_t) kasan_early_shadow_pmd))), __pgprot(_PAGE_TABLE))); - flush_tlb_all(); + local_flush_tlb_all(); } static void __init populate(void *start, void *end) @@ -79,7 +79,7 @@ static void __init populate(void *start, void *end) pfn_pgd(PFN_DOWN(__pa(&pmd[offset])), __pgprot(_PAGE_TABLE))); - flush_tlb_all(); + local_flush_tlb_all(); memset(start, 0, end - start); } |