diff options
author | Ingo Molnar <mingo@kernel.org> | 2023-10-09 18:09:23 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2023-10-09 18:09:23 +0200 |
commit | fdb8b7a1af31d69ee1c8ddc02926cb409eaaecc3 (patch) | |
tree | 9af7010f5f93005adbf6f98c6557552b86d32697 /arch/loongarch | |
parent | locking/seqlock: Fix typo in comment (diff) | |
parent | Linux 6.6-rc5 (diff) | |
download | linux-fdb8b7a1af31d69ee1c8ddc02926cb409eaaecc3.tar.xz linux-fdb8b7a1af31d69ee1c8ddc02926cb409eaaecc3.zip |
Merge tag 'v6.6-rc5' into locking/core, to pick up fixes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/loongarch')
25 files changed, 200 insertions, 126 deletions
diff --git a/arch/loongarch/include/asm/addrspace.h b/arch/loongarch/include/asm/addrspace.h index 5c9c03bdf915..b24437e28c6e 100644 --- a/arch/loongarch/include/asm/addrspace.h +++ b/arch/loongarch/include/asm/addrspace.h @@ -19,7 +19,7 @@ */ #ifndef __ASSEMBLY__ #ifndef PHYS_OFFSET -#define PHYS_OFFSET _AC(0, UL) +#define PHYS_OFFSET _UL(0) #endif extern unsigned long vm_map_base; #endif /* __ASSEMBLY__ */ @@ -43,7 +43,7 @@ extern unsigned long vm_map_base; * Memory above this physical address will be considered highmem. */ #ifndef HIGHMEM_START -#define HIGHMEM_START (_AC(1, UL) << _AC(DMW_PABITS, UL)) +#define HIGHMEM_START (_UL(1) << _UL(DMW_PABITS)) #endif #define TO_PHYS(x) ( ((x) & TO_PHYS_MASK)) @@ -65,16 +65,16 @@ extern unsigned long vm_map_base; #define _ATYPE_ #define _ATYPE32_ #define _ATYPE64_ -#define _CONST64_(x) x #else #define _ATYPE_ __PTRDIFF_TYPE__ #define _ATYPE32_ int #define _ATYPE64_ __s64 +#endif + #ifdef CONFIG_64BIT -#define _CONST64_(x) x ## UL +#define _CONST64_(x) _UL(x) #else -#define _CONST64_(x) x ## ULL -#endif +#define _CONST64_(x) _ULL(x) #endif /* diff --git a/arch/loongarch/include/asm/elf.h b/arch/loongarch/include/asm/elf.h index 7af0cebf28d7..b9a4ab54285c 100644 --- a/arch/loongarch/include/asm/elf.h +++ b/arch/loongarch/include/asm/elf.h @@ -111,6 +111,15 @@ #define R_LARCH_TLS_GD_HI20 98 #define R_LARCH_32_PCREL 99 #define R_LARCH_RELAX 100 +#define R_LARCH_DELETE 101 +#define R_LARCH_ALIGN 102 +#define R_LARCH_PCREL20_S2 103 +#define R_LARCH_CFA 104 +#define R_LARCH_ADD6 105 +#define R_LARCH_SUB6 106 +#define R_LARCH_ADD_ULEB128 107 +#define R_LARCH_SUB_ULEB128 108 +#define R_LARCH_64_PCREL 109 #ifndef ELF_ARCH diff --git a/arch/loongarch/include/asm/exception.h b/arch/loongarch/include/asm/exception.h new file mode 100644 index 000000000000..af74a3fdcad1 --- /dev/null +++ b/arch/loongarch/include/asm/exception.h @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef __ASM_EXCEPTION_H +#define __ASM_EXCEPTION_H + +#include <asm/ptrace.h> +#include <linux/kprobes.h> + +void show_registers(struct pt_regs *regs); + +asmlinkage void cache_parity_error(void); +asmlinkage void noinstr do_ade(struct pt_regs *regs); +asmlinkage void noinstr do_ale(struct pt_regs *regs); +asmlinkage void noinstr do_bce(struct pt_regs *regs); +asmlinkage void noinstr do_bp(struct pt_regs *regs); +asmlinkage void noinstr do_ri(struct pt_regs *regs); +asmlinkage void noinstr do_fpu(struct pt_regs *regs); +asmlinkage void noinstr do_fpe(struct pt_regs *regs, unsigned long fcsr); +asmlinkage void noinstr do_lsx(struct pt_regs *regs); +asmlinkage void noinstr do_lasx(struct pt_regs *regs); +asmlinkage void noinstr do_lbt(struct pt_regs *regs); +asmlinkage void noinstr do_watch(struct pt_regs *regs); +asmlinkage void noinstr do_syscall(struct pt_regs *regs); +asmlinkage void noinstr do_reserved(struct pt_regs *regs); +asmlinkage void noinstr do_vint(struct pt_regs *regs, unsigned long sp); +asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, + unsigned long write, unsigned long address); + +asmlinkage void handle_ade(void); +asmlinkage void handle_ale(void); +asmlinkage void handle_bce(void); +asmlinkage void handle_sys(void); +asmlinkage void handle_bp(void); +asmlinkage void handle_ri(void); +asmlinkage void handle_fpu(void); +asmlinkage void handle_fpe(void); +asmlinkage void handle_lsx(void); +asmlinkage void handle_lasx(void); +asmlinkage void handle_lbt(void); +asmlinkage void handle_watch(void); +asmlinkage void handle_reserved(void); +asmlinkage void handle_vint(void); +asmlinkage void noinstr handle_loongarch_irq(struct pt_regs *regs); + +#endif /* __ASM_EXCEPTION_H */ diff --git a/arch/loongarch/include/asm/kasan.h b/arch/loongarch/include/asm/kasan.h index deeff8158f45..cd6084f4e153 100644 --- a/arch/loongarch/include/asm/kasan.h +++ b/arch/loongarch/include/asm/kasan.h @@ -10,8 +10,6 @@ #include <asm/io.h> #include <asm/pgtable.h> -#define __HAVE_ARCH_SHADOW_MAP - #define KASAN_SHADOW_SCALE_SHIFT 3 #define KASAN_SHADOW_OFFSET _AC(CONFIG_KASAN_SHADOW_OFFSET, UL) @@ -62,61 +60,22 @@ extern bool kasan_early_stage; extern unsigned char kasan_early_shadow_page[PAGE_SIZE]; +#define kasan_mem_to_shadow kasan_mem_to_shadow +void *kasan_mem_to_shadow(const void *addr); + +#define kasan_shadow_to_mem kasan_shadow_to_mem +const void *kasan_shadow_to_mem(const void *shadow_addr); + #define kasan_arch_is_ready kasan_arch_is_ready static __always_inline bool kasan_arch_is_ready(void) { return !kasan_early_stage; } -static inline void *kasan_mem_to_shadow(const void *addr) -{ - if (!kasan_arch_is_ready()) { - return (void *)(kasan_early_shadow_page); - } else { - unsigned long maddr = (unsigned long)addr; - unsigned long xrange = (maddr >> XRANGE_SHIFT) & 0xffff; - unsigned long offset = 0; - - maddr &= XRANGE_SHADOW_MASK; - switch (xrange) { - case XKPRANGE_CC_SEG: - offset = XKPRANGE_CC_SHADOW_OFFSET; - break; - case XKPRANGE_UC_SEG: - offset = XKPRANGE_UC_SHADOW_OFFSET; - break; - case XKVRANGE_VC_SEG: - offset = XKVRANGE_VC_SHADOW_OFFSET; - break; - default: - WARN_ON(1); - return NULL; - } - - return (void *)((maddr >> KASAN_SHADOW_SCALE_SHIFT) + offset); - } -} - -static inline const void *kasan_shadow_to_mem(const void *shadow_addr) +#define addr_has_metadata addr_has_metadata +static __always_inline bool addr_has_metadata(const void *addr) { - unsigned long addr = (unsigned long)shadow_addr; - - if (unlikely(addr > KASAN_SHADOW_END) || - unlikely(addr < KASAN_SHADOW_START)) { - WARN_ON(1); - return NULL; - } - - if (addr >= XKVRANGE_VC_SHADOW_OFFSET) - return (void *)(((addr - XKVRANGE_VC_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT) + XKVRANGE_VC_START); - else if (addr >= XKPRANGE_UC_SHADOW_OFFSET) - return (void *)(((addr - XKPRANGE_UC_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT) + XKPRANGE_UC_START); - else if (addr >= XKPRANGE_CC_SHADOW_OFFSET) - return (void *)(((addr - XKPRANGE_CC_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT) + XKPRANGE_CC_START); - else { - WARN_ON(1); - return NULL; - } + return (kasan_mem_to_shadow((void *)addr) != NULL); } void kasan_init(void); diff --git a/arch/loongarch/include/asm/smp.h b/arch/loongarch/include/asm/smp.h index 66ecb480c894..f81e5f01d619 100644 --- a/arch/loongarch/include/asm/smp.h +++ b/arch/loongarch/include/asm/smp.h @@ -70,6 +70,7 @@ struct secondary_data { extern struct secondary_data cpuboot_data; extern asmlinkage void smpboot_entry(void); +extern asmlinkage void start_secondary(void); extern void calculate_cpu_foreign_map(void); diff --git a/arch/loongarch/kernel/Makefile b/arch/loongarch/kernel/Makefile index c56ea0b75448..4fcc168f0732 100644 --- a/arch/loongarch/kernel/Makefile +++ b/arch/loongarch/kernel/Makefile @@ -19,6 +19,10 @@ obj-$(CONFIG_CPU_HAS_LBT) += lbt.o obj-$(CONFIG_ARCH_STRICT_ALIGN) += unaligned.o +CFLAGS_module.o += $(call cc-option,-Wno-override-init,) +CFLAGS_syscall.o += $(call cc-option,-Wno-override-init,) +CFLAGS_perf_event.o += $(call cc-option,-Wno-override-init,) + ifdef CONFIG_FUNCTION_TRACER ifndef CONFIG_DYNAMIC_FTRACE obj-y += mcount.o ftrace.o diff --git a/arch/loongarch/kernel/acpi.c b/arch/loongarch/kernel/acpi.c index 9450e09073eb..8e00a754e548 100644 --- a/arch/loongarch/kernel/acpi.c +++ b/arch/loongarch/kernel/acpi.c @@ -281,7 +281,6 @@ acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa) pr_info("SRAT: PXM %u -> CPU 0x%02x -> Node %u\n", pxm, pa->apic_id, node); } -void __init acpi_numa_arch_fixup(void) {} #endif void __init arch_reserve_mem_area(acpi_physical_address addr, size_t size) diff --git a/arch/loongarch/kernel/mem.c b/arch/loongarch/kernel/mem.c index 4a4107a6a965..aed901c57fb4 100644 --- a/arch/loongarch/kernel/mem.c +++ b/arch/loongarch/kernel/mem.c @@ -50,7 +50,6 @@ void __init memblock_init(void) } memblock_set_current_limit(PFN_PHYS(max_low_pfn)); - memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0); /* Reserve the first 2MB */ memblock_reserve(PHYS_OFFSET, 0x200000); @@ -58,4 +57,7 @@ void __init memblock_init(void) /* Reserve the kernel text/data/bss */ memblock_reserve(__pa_symbol(&_text), __pa_symbol(&_end) - __pa_symbol(&_text)); + + memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0); + memblock_set_node(0, PHYS_ADDR_MAX, &memblock.reserved, 0); } diff --git a/arch/loongarch/kernel/module-sections.c b/arch/loongarch/kernel/module-sections.c index d4dbcda1c4b0..e2f30ff9afde 100644 --- a/arch/loongarch/kernel/module-sections.c +++ b/arch/loongarch/kernel/module-sections.c @@ -6,6 +6,7 @@ #include <linux/elf.h> #include <linux/kernel.h> #include <linux/module.h> +#include <linux/moduleloader.h> #include <linux/ftrace.h> Elf_Addr module_emit_got_entry(struct module *mod, Elf_Shdr *sechdrs, Elf_Addr val) diff --git a/arch/loongarch/kernel/module.c b/arch/loongarch/kernel/module.c index b8b86088b2dd..b13b2858fe39 100644 --- a/arch/loongarch/kernel/module.c +++ b/arch/loongarch/kernel/module.c @@ -367,6 +367,24 @@ static int apply_r_larch_got_pc(struct module *mod, return apply_r_larch_pcala(mod, location, got, rela_stack, rela_stack_top, type); } +static int apply_r_larch_32_pcrel(struct module *mod, u32 *location, Elf_Addr v, + s64 *rela_stack, size_t *rela_stack_top, unsigned int type) +{ + ptrdiff_t offset = (void *)v - (void *)location; + + *(u32 *)location = offset; + return 0; +} + +static int apply_r_larch_64_pcrel(struct module *mod, u32 *location, Elf_Addr v, + s64 *rela_stack, size_t *rela_stack_top, unsigned int type) +{ + ptrdiff_t offset = (void *)v - (void *)location; + + *(u64 *)location = offset; + return 0; +} + /* * reloc_handlers_rela() - Apply a particular relocation to a module * @mod: the module to apply the reloc to @@ -382,7 +400,7 @@ typedef int (*reloc_rela_handler)(struct module *mod, u32 *location, Elf_Addr v, /* The handlers for known reloc types */ static reloc_rela_handler reloc_rela_handlers[] = { - [R_LARCH_NONE ... R_LARCH_RELAX] = apply_r_larch_error, + [R_LARCH_NONE ... R_LARCH_64_PCREL] = apply_r_larch_error, [R_LARCH_NONE] = apply_r_larch_none, [R_LARCH_32] = apply_r_larch_32, @@ -396,6 +414,8 @@ static reloc_rela_handler reloc_rela_handlers[] = { [R_LARCH_SOP_POP_32_S_10_5 ... R_LARCH_SOP_POP_32_U] = apply_r_larch_sop_imm_field, [R_LARCH_ADD32 ... R_LARCH_SUB64] = apply_r_larch_add_sub, [R_LARCH_PCALA_HI20...R_LARCH_PCALA64_HI12] = apply_r_larch_pcala, + [R_LARCH_32_PCREL] = apply_r_larch_32_pcrel, + [R_LARCH_64_PCREL] = apply_r_larch_64_pcrel, }; int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab, diff --git a/arch/loongarch/kernel/numa.c b/arch/loongarch/kernel/numa.c index c7d33c489e04..6e65ff12d5c7 100644 --- a/arch/loongarch/kernel/numa.c +++ b/arch/loongarch/kernel/numa.c @@ -436,7 +436,7 @@ void __init paging_init(void) void __init mem_init(void) { - high_memory = (void *) __va(get_num_physpages() << PAGE_SHIFT); + high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT); memblock_free_all(); } diff --git a/arch/loongarch/kernel/process.c b/arch/loongarch/kernel/process.c index 3cb082e0c992..767d94cce0de 100644 --- a/arch/loongarch/kernel/process.c +++ b/arch/loongarch/kernel/process.c @@ -37,6 +37,7 @@ #include <asm/bootinfo.h> #include <asm/cpu.h> #include <asm/elf.h> +#include <asm/exec.h> #include <asm/fpu.h> #include <asm/lbt.h> #include <asm/io.h> diff --git a/arch/loongarch/kernel/relocate_kernel.S b/arch/loongarch/kernel/relocate_kernel.S index d13252553a7c..f49f6b053763 100644 --- a/arch/loongarch/kernel/relocate_kernel.S +++ b/arch/loongarch/kernel/relocate_kernel.S @@ -72,7 +72,6 @@ copy_word: LONG_ADDI s5, s5, -1 beqz s5, process_entry b copy_word - b process_entry done: ibar 0 diff --git a/arch/loongarch/kernel/signal.c b/arch/loongarch/kernel/signal.c index 504fdfe85203..4a3686d13349 100644 --- a/arch/loongarch/kernel/signal.c +++ b/arch/loongarch/kernel/signal.c @@ -13,6 +13,7 @@ #include <linux/audit.h> #include <linux/cache.h> #include <linux/context_tracking.h> +#include <linux/entry-common.h> #include <linux/irqflags.h> #include <linux/sched.h> #include <linux/mm.h> @@ -891,8 +892,8 @@ static unsigned long setup_extcontext(struct extctx_layout *extctx, unsigned lon return new_sp; } -void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, - struct extctx_layout *extctx) +static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, + struct extctx_layout *extctx) { unsigned long sp; @@ -922,7 +923,7 @@ void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, * Atomically swap in the new signal mask, and wait for a signal. */ -asmlinkage long sys_rt_sigreturn(void) +SYSCALL_DEFINE0(rt_sigreturn) { int sig; sigset_t set; diff --git a/arch/loongarch/kernel/smp.c b/arch/loongarch/kernel/smp.c index 6667b0a90f81..ef35c871244f 100644 --- a/arch/loongarch/kernel/smp.c +++ b/arch/loongarch/kernel/smp.c @@ -13,6 +13,7 @@ #include <linux/cpumask.h> #include <linux/init.h> #include <linux/interrupt.h> +#include <linux/profile.h> #include <linux/seq_file.h> #include <linux/smp.h> #include <linux/threads.h> @@ -556,10 +557,12 @@ void smp_send_stop(void) smp_call_function(stop_this_cpu, NULL, 0); } +#ifdef CONFIG_PROFILING int setup_profiling_timer(unsigned int multiplier) { return 0; } +#endif static void flush_tlb_all_ipi(void *info) { diff --git a/arch/loongarch/kernel/syscall.c b/arch/loongarch/kernel/syscall.c index 3fc4211db989..b4c5acd7aa3b 100644 --- a/arch/loongarch/kernel/syscall.c +++ b/arch/loongarch/kernel/syscall.c @@ -13,6 +13,7 @@ #include <linux/unistd.h> #include <asm/asm.h> +#include <asm/exception.h> #include <asm/signal.h> #include <asm/switch_to.h> #include <asm-generic/syscalls.h> diff --git a/arch/loongarch/kernel/time.c b/arch/loongarch/kernel/time.c index c189e03cd5da..3064af94db9c 100644 --- a/arch/loongarch/kernel/time.c +++ b/arch/loongarch/kernel/time.c @@ -29,7 +29,7 @@ static void constant_event_handler(struct clock_event_device *dev) { } -irqreturn_t constant_timer_interrupt(int irq, void *data) +static irqreturn_t constant_timer_interrupt(int irq, void *data) { int cpu = smp_processor_id(); struct clock_event_device *cd; diff --git a/arch/loongarch/kernel/topology.c b/arch/loongarch/kernel/topology.c index caa7cd859078..3fd166006698 100644 --- a/arch/loongarch/kernel/topology.c +++ b/arch/loongarch/kernel/topology.c @@ -1,4 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 +#include <linux/acpi.h> #include <linux/cpu.h> #include <linux/cpumask.h> #include <linux/init.h> @@ -7,6 +8,8 @@ #include <linux/percpu.h> #include <asm/bootinfo.h> +#include <acpi/processor.h> + static DEFINE_PER_CPU(struct cpu, cpu_devices); #ifdef CONFIG_HOTPLUG_CPU diff --git a/arch/loongarch/kernel/traps.c b/arch/loongarch/kernel/traps.c index 65214774ef7c..aebfc3733a76 100644 --- a/arch/loongarch/kernel/traps.c +++ b/arch/loongarch/kernel/traps.c @@ -25,7 +25,6 @@ #include <linux/ptrace.h> #include <linux/kgdb.h> #include <linux/kdebug.h> -#include <linux/kprobes.h> #include <linux/notifier.h> #include <linux/irq.h> #include <linux/perf_event.h> @@ -35,6 +34,7 @@ #include <asm/branch.h> #include <asm/break.h> #include <asm/cpu.h> +#include <asm/exception.h> #include <asm/fpu.h> #include <asm/lbt.h> #include <asm/inst.h> @@ -53,21 +53,6 @@ #include "access-helper.h" -extern asmlinkage void handle_ade(void); -extern asmlinkage void handle_ale(void); -extern asmlinkage void handle_bce(void); -extern asmlinkage void handle_sys(void); -extern asmlinkage void handle_bp(void); -extern asmlinkage void handle_ri(void); -extern asmlinkage void handle_fpu(void); -extern asmlinkage void handle_fpe(void); -extern asmlinkage void handle_lbt(void); -extern asmlinkage void handle_lsx(void); -extern asmlinkage void handle_lasx(void); -extern asmlinkage void handle_reserved(void); -extern asmlinkage void handle_watch(void); -extern asmlinkage void handle_vint(void); - static void show_backtrace(struct task_struct *task, const struct pt_regs *regs, const char *loglvl, bool user) { @@ -439,8 +424,8 @@ static inline void setup_vint_size(unsigned int size) * happen together with Overflow or Underflow, and `ptrace' can set * any bits. */ -void force_fcsr_sig(unsigned long fcsr, void __user *fault_addr, - struct task_struct *tsk) +static void force_fcsr_sig(unsigned long fcsr, + void __user *fault_addr, struct task_struct *tsk) { int si_code = FPE_FLTUNK; @@ -458,7 +443,7 @@ void force_fcsr_sig(unsigned long fcsr, void __user *fault_addr, force_sig_fault(SIGFPE, si_code, fault_addr); } -int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcsr) +static int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcsr) { int si_code; @@ -824,7 +809,7 @@ out: asmlinkage void noinstr do_ri(struct pt_regs *regs) { int status = SIGILL; - unsigned int opcode = 0; + unsigned int __maybe_unused opcode; unsigned int __user *era = (unsigned int __user *)exception_era(regs); irqentry_state_t state = irqentry_enter(regs); diff --git a/arch/loongarch/kernel/vmlinux.lds.S b/arch/loongarch/kernel/vmlinux.lds.S index b1686afcf876..bb2ec86f37a8 100644 --- a/arch/loongarch/kernel/vmlinux.lds.S +++ b/arch/loongarch/kernel/vmlinux.lds.S @@ -53,33 +53,6 @@ SECTIONS . = ALIGN(PECOFF_SEGMENT_ALIGN); _etext = .; - /* - * struct alt_inst entries. From the header (alternative.h): - * "Alternative instructions for different CPU types or capabilities" - * Think locking instructions on spinlocks. - */ - . = ALIGN(4); - .altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) { - __alt_instructions = .; - *(.altinstructions) - __alt_instructions_end = .; - } - -#ifdef CONFIG_RELOCATABLE - . = ALIGN(8); - .la_abs : AT(ADDR(.la_abs) - LOAD_OFFSET) { - __la_abs_begin = .; - *(.la_abs) - __la_abs_end = .; - } -#endif - - .got : ALIGN(16) { *(.got) } - .plt : ALIGN(16) { *(.plt) } - .got.plt : ALIGN(16) { *(.got.plt) } - - .data.rel : { *(.data.rel*) } - . = ALIGN(PECOFF_SEGMENT_ALIGN); __init_begin = .; __inittext_begin = .; @@ -94,6 +67,18 @@ SECTIONS __initdata_begin = .; + /* + * struct alt_inst entries. From the header (alternative.h): + * "Alternative instructions for different CPU types or capabilities" + * Think locking instructions on spinlocks. + */ + . = ALIGN(4); + .altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) { + __alt_instructions = .; + *(.altinstructions) + __alt_instructions_end = .; + } + INIT_DATA_SECTION(16) .exit.data : { EXIT_DATA @@ -113,6 +98,11 @@ SECTIONS _sdata = .; RO_DATA(4096) + + .got : ALIGN(16) { *(.got) } + .plt : ALIGN(16) { *(.plt) } + .got.plt : ALIGN(16) { *(.got.plt) } + RW_DATA(1 << CONFIG_L1_CACHE_SHIFT, PAGE_SIZE, THREAD_SIZE) .rela.dyn : ALIGN(8) { @@ -121,6 +111,17 @@ SECTIONS __rela_dyn_end = .; } + .data.rel : { *(.data.rel*) } + +#ifdef CONFIG_RELOCATABLE + . = ALIGN(8); + .la_abs : AT(ADDR(.la_abs) - LOAD_OFFSET) { + __la_abs_begin = .; + *(.la_abs) + __la_abs_end = .; + } +#endif + .sdata : { *(.sdata) } diff --git a/arch/loongarch/mm/fault.c b/arch/loongarch/mm/fault.c index e6376e3dce86..1fc2f6813ea0 100644 --- a/arch/loongarch/mm/fault.c +++ b/arch/loongarch/mm/fault.c @@ -20,12 +20,12 @@ #include <linux/mm.h> #include <linux/smp.h> #include <linux/kdebug.h> -#include <linux/kprobes.h> #include <linux/perf_event.h> #include <linux/uaccess.h> #include <linux/kfence.h> #include <asm/branch.h> +#include <asm/exception.h> #include <asm/mmu_context.h> #include <asm/ptrace.h> diff --git a/arch/loongarch/mm/hugetlbpage.c b/arch/loongarch/mm/hugetlbpage.c index ba138117b124..1e76fcb83093 100644 --- a/arch/loongarch/mm/hugetlbpage.c +++ b/arch/loongarch/mm/hugetlbpage.c @@ -50,18 +50,6 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr, return (pte_t *) pmd; } -/* - * This function checks for proper alignment of input addr and len parameters. - */ -int is_aligned_hugepage_range(unsigned long addr, unsigned long len) -{ - if (len & ~HPAGE_MASK) - return -EINVAL; - if (addr & ~HPAGE_MASK) - return -EINVAL; - return 0; -} - int pmd_huge(pmd_t pmd) { return (pmd_val(pmd) & _PAGE_HUGE) != 0; diff --git a/arch/loongarch/mm/ioremap.c b/arch/loongarch/mm/ioremap.c index 73b0980ab6f5..70ca73019811 100644 --- a/arch/loongarch/mm/ioremap.c +++ b/arch/loongarch/mm/ioremap.c @@ -4,6 +4,7 @@ */ #include <asm/io.h> +#include <asm-generic/early_ioremap.h> void __init __iomem *early_ioremap(u64 phys_addr, unsigned long size) { diff --git a/arch/loongarch/mm/kasan_init.c b/arch/loongarch/mm/kasan_init.c index da68bc1a4643..cc3e81fe0186 100644 --- a/arch/loongarch/mm/kasan_init.c +++ b/arch/loongarch/mm/kasan_init.c @@ -35,6 +35,57 @@ static pgd_t kasan_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE); bool kasan_early_stage = true; +void *kasan_mem_to_shadow(const void *addr) +{ + if (!kasan_arch_is_ready()) { + return (void *)(kasan_early_shadow_page); + } else { + unsigned long maddr = (unsigned long)addr; + unsigned long xrange = (maddr >> XRANGE_SHIFT) & 0xffff; + unsigned long offset = 0; + + maddr &= XRANGE_SHADOW_MASK; + switch (xrange) { + case XKPRANGE_CC_SEG: + offset = XKPRANGE_CC_SHADOW_OFFSET; + break; + case XKPRANGE_UC_SEG: + offset = XKPRANGE_UC_SHADOW_OFFSET; + break; + case XKVRANGE_VC_SEG: + offset = XKVRANGE_VC_SHADOW_OFFSET; + break; + default: + WARN_ON(1); + return NULL; + } + + return (void *)((maddr >> KASAN_SHADOW_SCALE_SHIFT) + offset); + } +} + +const void *kasan_shadow_to_mem(const void *shadow_addr) +{ + unsigned long addr = (unsigned long)shadow_addr; + + if (unlikely(addr > KASAN_SHADOW_END) || + unlikely(addr < KASAN_SHADOW_START)) { + WARN_ON(1); + return NULL; + } + + if (addr >= XKVRANGE_VC_SHADOW_OFFSET) + return (void *)(((addr - XKVRANGE_VC_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT) + XKVRANGE_VC_START); + else if (addr >= XKPRANGE_UC_SHADOW_OFFSET) + return (void *)(((addr - XKPRANGE_UC_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT) + XKPRANGE_UC_START); + else if (addr >= XKPRANGE_CC_SHADOW_OFFSET) + return (void *)(((addr - XKPRANGE_CC_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT) + XKPRANGE_CC_START); + else { + WARN_ON(1); + return NULL; + } +} + /* * Alloc memory for shadow memory page table. */ diff --git a/arch/loongarch/mm/tlb.c b/arch/loongarch/mm/tlb.c index eb8572e201ea..2c0a411f23aa 100644 --- a/arch/loongarch/mm/tlb.c +++ b/arch/loongarch/mm/tlb.c @@ -261,7 +261,7 @@ unsigned long pcpu_handlers[NR_CPUS]; #endif extern long exception_handlers[VECSIZE * 128 / sizeof(long)]; -void setup_tlb_handler(int cpu) +static void setup_tlb_handler(int cpu) { setup_ptwalker(); local_flush_tlb_all(); |