diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-11-16 21:50:35 +0100 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-11-16 21:50:35 +0100 |
commit | 441692aafc1731087bbaf657a8b6059d95c2a6df (patch) | |
tree | 9d3dd6d6e882bc9bb69e3389715b720ee9001ead /arch/arm/kernel/vmlinux-xip.lds.S | |
parent | Merge tag 'powerpc-4.15-1' of git://git.kernel.org/pub/scm/linux/kernel/git/p... (diff) | |
parent | Merge branch 'devel-stable' into for-next (diff) | |
download | linux-441692aafc1731087bbaf657a8b6059d95c2a6df.tar.xz linux-441692aafc1731087bbaf657a8b6059d95c2a6df.zip |
Merge branch 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm
Pull ARM updates from Russell King:
- add support for ELF fdpic binaries on both MMU and noMMU platforms
- linker script cleanups
- support for compressed .data section for XIP images
- discard memblock arrays when possible
- various cleanups
- atomic DMA pool updates
- better diagnostics of missing/corrupt device tree
- export information to allow userspace kexec tool to place images more
inteligently, so that the device tree isn't overwritten by the
booting kernel
- make early_printk more efficient on semihosted systems
- noMMU cleanups
- SA1111 PCMCIA update in preparation for further cleanups
* 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm: (38 commits)
ARM: 8719/1: NOMMU: work around maybe-uninitialized warning
ARM: 8717/2: debug printch/printascii: translate '\n' to "\r\n" not "\n\r"
ARM: 8713/1: NOMMU: Support MPU in XIP configuration
ARM: 8712/1: NOMMU: Use more MPU regions to cover memory
ARM: 8711/1: V7M: Add support for MPU to M-class
ARM: 8710/1: Kconfig: Kill CONFIG_VECTORS_BASE
ARM: 8709/1: NOMMU: Disallow MPU for XIP
ARM: 8708/1: NOMMU: Rework MPU to be mostly done in C
ARM: 8707/1: NOMMU: Update MPU accessors to use cp15 helpers
ARM: 8706/1: NOMMU: Move out MPU setup in separate module
ARM: 8702/1: head-common.S: Clear lr before jumping to start_kernel()
ARM: 8705/1: early_printk: use printascii() rather than printch()
ARM: 8703/1: debug.S: move hexbuf to a writable section
ARM: add additional table to compressed kernel
ARM: decompressor: fix BSS size calculation
pcmcia: sa1111: remove special sa1111 mmio accessors
pcmcia: sa1111: use sa1111_get_irq() to obtain IRQ resources
ARM: better diagnostics with missing/corrupt dtb
ARM: 8699/1: dma-mapping: Remove init_dma_coherent_pool_size()
ARM: 8698/1: dma-mapping: Mark atomic_pool as __ro_after_init
..
Diffstat (limited to 'arch/arm/kernel/vmlinux-xip.lds.S')
-rw-r--r-- | arch/arm/kernel/vmlinux-xip.lds.S | 115 |
1 files changed, 65 insertions, 50 deletions
diff --git a/arch/arm/kernel/vmlinux-xip.lds.S b/arch/arm/kernel/vmlinux-xip.lds.S index 0951df916b85..ec4b3f94ad80 100644 --- a/arch/arm/kernel/vmlinux-xip.lds.S +++ b/arch/arm/kernel/vmlinux-xip.lds.S @@ -7,6 +7,8 @@ /* No __ro_after_init data in the .rodata section - which will always be ro */ #define RO_AFTER_INIT_DATA +#include <linux/sizes.h> + #include <asm-generic/vmlinux.lds.h> #include <asm/cache.h> #include <asm/thread_info.h> @@ -78,9 +80,7 @@ SECTIONS *(.text.fixup) *(__ex_table) #endif -#ifndef CONFIG_SMP_ON_UP *(.alt.smp.init) -#endif *(.discard) *(.discard.*) } @@ -182,19 +182,7 @@ SECTIONS *(.taglist.init) __tagtable_end = .; } -#ifdef CONFIG_SMP_ON_UP - .init.smpalt : { - __smpalt_begin = .; - *(.alt.smp.init) - __smpalt_end = .; - } -#endif - .init.pv_table : { - __pv_table_begin = .; - *(.pv_table) - __pv_table_end = .; - } - .init.data : { + .init.rodata : { INIT_SETUP(16) INIT_CALLS CON_INITCALL @@ -202,48 +190,49 @@ SECTIONS INIT_RAM_FS } -#ifdef CONFIG_SMP - PERCPU_SECTION(L1_CACHE_BYTES) +#ifdef CONFIG_ARM_MPU + . = ALIGN(SZ_128K); #endif - _exiprom = .; /* End of XIP ROM area */ - __data_loc = ALIGN(4); /* location in binary */ - . = PAGE_OFFSET + TEXT_OFFSET; - .data : AT(__data_loc) { - _data = .; /* address in memory */ - _sdata = .; - - /* - * first, the init task union, aligned - * to an 8192 byte boundary. - */ - INIT_TASK_DATA(THREAD_SIZE) +/* + * From this point, stuff is considered writable and will be copied to RAM + */ + __data_loc = ALIGN(4); /* location in file */ + . = PAGE_OFFSET + TEXT_OFFSET; /* location in memory */ +#undef LOAD_OFFSET +#define LOAD_OFFSET (PAGE_OFFSET + TEXT_OFFSET - __data_loc) + + . = ALIGN(THREAD_SIZE); + _sdata = .; + RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) + .data.ro_after_init : AT(ADDR(.data.ro_after_init) - LOAD_OFFSET) { + *(.data..ro_after_init) + } + _edata = .; - . = ALIGN(PAGE_SIZE); - __init_begin = .; + . = ALIGN(PAGE_SIZE); + __init_begin = .; + .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { INIT_DATA + } + .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) { ARM_EXIT_KEEP(EXIT_DATA) - . = ALIGN(PAGE_SIZE); - __init_end = .; - - *(.data..ro_after_init) - - NOSAVE_DATA - CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES) - READ_MOSTLY_DATA(L1_CACHE_BYTES) - - /* - * and the usual data section - */ - DATA_DATA - CONSTRUCTORS - - _edata = .; } - _edata_loc = __data_loc + SIZEOF(.data); +#ifdef CONFIG_SMP + PERCPU_SECTION(L1_CACHE_BYTES) +#endif + + /* + * End of copied data. We need a dummy section to get its LMA. + * Also located before final ALIGN() as trailing padding is not stored + * in the resulting binary file and useless to copy. + */ + .data.endmark : AT(ADDR(.data.endmark) - LOAD_OFFSET) { } + _edata_loc = LOADADDR(.data.endmark); - BUG_TABLE + . = ALIGN(PAGE_SIZE); + __init_end = .; #ifdef CONFIG_HAVE_TCM /* @@ -302,7 +291,7 @@ SECTIONS } #endif - BSS_SECTION(0, 0, 0) + BSS_SECTION(0, 0, 8) _end = .; STABS_DEBUG @@ -323,3 +312,29 @@ ASSERT((__arch_info_end - __arch_info_begin), "no machine record defined") */ ASSERT(__hyp_idmap_text_end - (__hyp_idmap_text_start & PAGE_MASK) <= PAGE_SIZE, "HYP init code too big or misaligned") + +#ifdef CONFIG_XIP_DEFLATED_DATA +/* + * The .bss is used as a stack area for __inflate_kernel_data() whose stack + * frame is 9568 bytes. Make sure it has extra room left. + */ +ASSERT((_end - __bss_start) >= 12288, ".bss too small for CONFIG_XIP_DEFLATED_DATA") +#endif + +#ifdef CONFIG_ARM_MPU +/* + * Due to PMSAv7 restriction on base address and size we have to + * enforce minimal alignment restrictions. It was seen that weaker + * alignment restriction on _xiprom will likely force XIP address + * space spawns multiple MPU regions thus it is likely we run in + * situation when we are reprogramming MPU region we run on with + * something which doesn't cover reprogramming code itself, so as soon + * as we update MPU settings we'd immediately try to execute straight + * from background region which is XN. + * It seem that alignment in 1M should suit most users. + * _exiprom is aligned as 1/8 of 1M so can be covered by subregion + * disable + */ +ASSERT(!(_xiprom & (SZ_1M - 1)), "XIP start address may cause MPU programming issues") +ASSERT(!(_exiprom & (SZ_128K - 1)), "XIP end address may cause MPU programming issues") +#endif |