diff options
author | Kefeng Wang <wangkefeng.wang@huawei.com> | 2021-01-15 06:46:04 +0100 |
---|---|---|
committer | Palmer Dabbelt <palmerdabbelt@google.com> | 2021-02-19 08:17:57 +0100 |
commit | c72160fe05fb978ad859ba053c4462c2bb960b13 (patch) | |
tree | 7e049e02369f5e4d78f2c9f3cfe24a89163d8b72 | |
parent | initrd: Add the preprocessor guard in initrd.h (diff) | |
download | linux-c72160fe05fb978ad859ba053c4462c2bb960b13.tar.xz linux-c72160fe05fb978ad859ba053c4462c2bb960b13.zip |
initramfs: Provide a common initrd reserve function
Some architectures(eg, ARM and riscv) have similar logic to
check and reserve the memory of initrd, let's provide a common
function reserve_initrd_mem() to reduce duplicated code.
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
Signed-off-by: Palmer Dabbelt <palmerdabbelt@google.com>
-rw-r--r-- | include/linux/initrd.h | 6 | ||||
-rw-r--r-- | init/initramfs.c | 45 |
2 files changed, 51 insertions, 0 deletions
diff --git a/include/linux/initrd.h b/include/linux/initrd.h index fc30ac30e10e..85c15717af34 100644 --- a/include/linux/initrd.h +++ b/include/linux/initrd.h @@ -18,6 +18,12 @@ extern int initrd_below_start_ok; extern unsigned long initrd_start, initrd_end; extern void free_initrd_mem(unsigned long, unsigned long); +#ifdef CONFIG_BLK_DEV_INITRD +extern void __init reserve_initrd_mem(void); +#else +static inline void __init reserve_initrd_mem(void) {} +#endif + extern phys_addr_t phys_initrd_start; extern unsigned long phys_initrd_size; diff --git a/init/initramfs.c b/init/initramfs.c index 55b74d7e5260..f75c89e9d602 100644 --- a/init/initramfs.c +++ b/init/initramfs.c @@ -535,6 +535,51 @@ extern unsigned long __initramfs_size; #include <linux/initrd.h> #include <linux/kexec.h> +void __init reserve_initrd_mem(void) +{ + phys_addr_t start; + unsigned long size; + + /* Ignore the virtul address computed during device tree parsing */ + initrd_start = initrd_end = 0; + + if (!phys_initrd_size) + return; + /* + * Round the memory region to page boundaries as per free_initrd_mem() + * This allows us to detect whether the pages overlapping the initrd + * are in use, but more importantly, reserves the entire set of pages + * as we don't want these pages allocated for other purposes. + */ + start = round_down(phys_initrd_start, PAGE_SIZE); + size = phys_initrd_size + (phys_initrd_start - start); + size = round_up(size, PAGE_SIZE); + + if (!memblock_is_region_memory(start, size)) { + pr_err("INITRD: 0x%08llx+0x%08lx is not a memory region", + (u64)start, size); + goto disable; + } + + if (memblock_is_region_reserved(start, size)) { + pr_err("INITRD: 0x%08llx+0x%08lx overlaps in-use memory region\n", + (u64)start, size); + goto disable; + } + + memblock_reserve(start, size); + /* Now convert initrd to virtual addresses */ + initrd_start = (unsigned long)__va(phys_initrd_start); + initrd_end = initrd_start + phys_initrd_size; + initrd_below_start_ok = 1; + + return; +disable: + pr_cont(" - disabling initrd\n"); + initrd_start = 0; + initrd_end = 0; +} + void __weak __init free_initrd_mem(unsigned long start, unsigned long end) { #ifdef CONFIG_ARCH_KEEP_MEMBLOCK |