diff options
Diffstat (limited to 'arch/riscv/kernel')
33 files changed, 863 insertions, 247 deletions
diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile index d8bbd3207100..dc93710f0b2f 100644 --- a/arch/riscv/kernel/Makefile +++ b/arch/riscv/kernel/Makefile @@ -31,7 +31,7 @@ obj-y += cacheinfo.o obj-y += patch.o obj-$(CONFIG_MMU) += vdso.o vdso/ -obj-$(CONFIG_RISCV_M_MODE) += clint.o traps_misaligned.o +obj-$(CONFIG_RISCV_M_MODE) += traps_misaligned.o obj-$(CONFIG_FPU) += fpu.o obj-$(CONFIG_SMP) += smpboot.o obj-$(CONFIG_SMP) += smp.o @@ -51,5 +51,8 @@ ifeq ($(CONFIG_RISCV_SBI), y) obj-$(CONFIG_SMP) += cpu_ops_sbi.o endif obj-$(CONFIG_HOTPLUG_CPU) += cpu-hotplug.o +obj-$(CONFIG_KGDB) += kgdb.o + +obj-$(CONFIG_JUMP_LABEL) += jump_label.o clean: diff --git a/arch/riscv/kernel/asm-offsets.c b/arch/riscv/kernel/asm-offsets.c index 07cb9c10de4e..db203442c08f 100644 --- a/arch/riscv/kernel/asm-offsets.c +++ b/arch/riscv/kernel/asm-offsets.c @@ -27,9 +27,6 @@ void asm_offsets(void) OFFSET(TASK_THREAD_S9, task_struct, thread.s[9]); OFFSET(TASK_THREAD_S10, task_struct, thread.s[10]); OFFSET(TASK_THREAD_S11, task_struct, thread.s[11]); - OFFSET(TASK_THREAD_SP, task_struct, thread.sp); - OFFSET(TASK_STACK, task_struct, stack); - OFFSET(TASK_TI, task_struct, thread_info); OFFSET(TASK_TI_FLAGS, task_struct, thread_info.flags); OFFSET(TASK_TI_PREEMPT_COUNT, task_struct, thread_info.preempt_count); OFFSET(TASK_TI_KERNEL_SP, task_struct, thread_info.kernel_sp); diff --git a/arch/riscv/kernel/cacheinfo.c b/arch/riscv/kernel/cacheinfo.c index 4c90c07d8c39..bd0f122965c3 100644 --- a/arch/riscv/kernel/cacheinfo.c +++ b/arch/riscv/kernel/cacheinfo.c @@ -7,6 +7,23 @@ #include <linux/cpu.h> #include <linux/of.h> #include <linux/of_device.h> +#include <asm/cacheinfo.h> + +static struct riscv_cacheinfo_ops *rv_cache_ops; + +void riscv_set_cacheinfo_ops(struct riscv_cacheinfo_ops *ops) +{ + rv_cache_ops = ops; +} +EXPORT_SYMBOL_GPL(riscv_set_cacheinfo_ops); + +const struct attribute_group * +cache_get_priv_group(struct cacheinfo *this_leaf) +{ + if (rv_cache_ops && rv_cache_ops->get_priv_group) + return rv_cache_ops->get_priv_group(this_leaf); + return NULL; +} static void ci_leaf_init(struct cacheinfo *this_leaf, struct device_node *node, diff --git a/arch/riscv/kernel/clint.c b/arch/riscv/kernel/clint.c deleted file mode 100644 index 3647980d14c3..000000000000 --- a/arch/riscv/kernel/clint.c +++ /dev/null @@ -1,44 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Copyright (c) 2019 Christoph Hellwig. - */ - -#include <linux/io.h> -#include <linux/of_address.h> -#include <linux/types.h> -#include <asm/clint.h> -#include <asm/csr.h> -#include <asm/timex.h> -#include <asm/smp.h> - -/* - * This is the layout used by the SiFive clint, which is also shared by the qemu - * virt platform, and the Kendryte KD210 at least. - */ -#define CLINT_IPI_OFF 0 -#define CLINT_TIME_CMP_OFF 0x4000 -#define CLINT_TIME_VAL_OFF 0xbff8 - -u32 __iomem *clint_ipi_base; - -void clint_init_boot_cpu(void) -{ - struct device_node *np; - void __iomem *base; - - np = of_find_compatible_node(NULL, NULL, "riscv,clint0"); - if (!np) { - panic("clint not found"); - return; - } - - base = of_iomap(np, 0); - if (!base) - panic("could not map CLINT"); - - clint_ipi_base = base + CLINT_IPI_OFF; - riscv_time_cmp = base + CLINT_TIME_CMP_OFF; - riscv_time_val = base + CLINT_TIME_VAL_OFF; - - clint_clear_ipi(boot_cpu_hartid); -} diff --git a/arch/riscv/kernel/cpu.c b/arch/riscv/kernel/cpu.c index 40a3c442ac5f..6d59e6906fdd 100644 --- a/arch/riscv/kernel/cpu.c +++ b/arch/riscv/kernel/cpu.c @@ -44,6 +44,22 @@ int riscv_of_processor_hartid(struct device_node *node) return hart; } +/* + * Find hart ID of the CPU DT node under which given DT node falls. + * + * To achieve this, we walk up the DT tree until we find an active + * RISC-V core (HART) node and extract the cpuid from it. + */ +int riscv_of_parent_hartid(struct device_node *node) +{ + for (; node; node = node->parent) { + if (of_device_is_compatible(node, "riscv")) + return riscv_of_processor_hartid(node); + } + + return -1; +} + #ifdef CONFIG_PROC_FS static void print_isa(struct seq_file *f, const char *isa) diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S index 56d071b2c0a1..524d918f3601 100644 --- a/arch/riscv/kernel/entry.S +++ b/arch/riscv/kernel/entry.S @@ -97,17 +97,36 @@ _save_context: la gp, __global_pointer$ .option pop - la ra, ret_from_exception +#ifdef CONFIG_TRACE_IRQFLAGS + call trace_hardirqs_off +#endif + +#ifdef CONFIG_CONTEXT_TRACKING + /* If previous state is in user mode, call context_tracking_user_exit. */ + li a0, SR_PP + and a0, s1, a0 + bnez a0, skip_context_tracking + call context_tracking_user_exit +skip_context_tracking: +#endif + /* * MSB of cause differentiates between * interrupts and exceptions */ bge s4, zero, 1f + la ra, ret_from_exception + /* Handle interrupts */ move a0, sp /* pt_regs */ - tail do_IRQ + la a1, handle_arch_irq + REG_L a1, (a1) + jr a1 1: +#ifdef CONFIG_TRACE_IRQFLAGS + call trace_hardirqs_on +#endif /* * Exceptions run with interrupts enabled or disabled depending on the * state of SR_PIE in m/sstatus. @@ -117,6 +136,7 @@ _save_context: csrs CSR_STATUS, SR_IE 1: + la ra, ret_from_exception /* Handle syscalls */ li t0, EXC_SYSCALL beq s4, t0, handle_syscall @@ -135,6 +155,17 @@ _save_context: tail do_trap_unknown handle_syscall: +#if defined(CONFIG_TRACE_IRQFLAGS) || defined(CONFIG_CONTEXT_TRACKING) + /* Recover a0 - a7 for system calls */ + REG_L a0, PT_A0(sp) + REG_L a1, PT_A1(sp) + REG_L a2, PT_A2(sp) + REG_L a3, PT_A3(sp) + REG_L a4, PT_A4(sp) + REG_L a5, PT_A5(sp) + REG_L a6, PT_A6(sp) + REG_L a7, PT_A7(sp) +#endif /* save the initial A0 value (needed in signal handlers) */ REG_S a0, PT_ORIG_A0(sp) /* @@ -188,6 +219,9 @@ ret_from_syscall_rejected: ret_from_exception: REG_L s0, PT_STATUS(sp) csrc CSR_STATUS, SR_IE +#ifdef CONFIG_TRACE_IRQFLAGS + call trace_hardirqs_off +#endif #ifdef CONFIG_RISCV_M_MODE /* the MPP value is too large to be used as an immediate arg for addi */ li t0, SR_MPP @@ -203,6 +237,10 @@ resume_userspace: andi s1, s0, _TIF_WORK_MASK bnez s1, work_pending +#ifdef CONFIG_CONTEXT_TRACKING + call context_tracking_user_enter +#endif + /* Save unwound kernel stack pointer in thread_info */ addi s0, sp, PT_SIZE_ON_STACK REG_S s0, TASK_TI_KERNEL_SP(tp) @@ -214,6 +252,16 @@ resume_userspace: csrw CSR_SCRATCH, tp restore_all: +#ifdef CONFIG_TRACE_IRQFLAGS + REG_L s1, PT_STATUS(sp) + andi t0, s1, SR_PIE + beqz t0, 1f + call trace_hardirqs_on + j 2f +1: + call trace_hardirqs_off +2: +#endif REG_L a0, PT_STATUS(sp) /* * The current load reservation is effectively part of the processor's @@ -387,12 +435,8 @@ ENTRY(__switch_to) lw a4, TASK_TI_CPU(a1) sw a3, TASK_TI_CPU(a1) sw a4, TASK_TI_CPU(a0) -#if TASK_TI != 0 -#error "TASK_TI != 0: tp will contain a 'struct thread_info', not a 'struct task_struct' so get_current() won't work." - addi tp, a1, TASK_TI -#else + /* The offset of thread_info in task_struct is zero. */ move tp, a1 -#endif ret ENDPROC(__switch_to) diff --git a/arch/riscv/kernel/ftrace.c b/arch/riscv/kernel/ftrace.c index ce69b34ff55d..99e12faa5498 100644 --- a/arch/riscv/kernel/ftrace.c +++ b/arch/riscv/kernel/ftrace.c @@ -7,10 +7,23 @@ #include <linux/ftrace.h> #include <linux/uaccess.h> +#include <linux/memory.h> #include <asm/cacheflush.h> #include <asm/patch.h> #ifdef CONFIG_DYNAMIC_FTRACE +int ftrace_arch_code_modify_prepare(void) __acquires(&text_mutex) +{ + mutex_lock(&text_mutex); + return 0; +} + +int ftrace_arch_code_modify_post_process(void) __releases(&text_mutex) +{ + mutex_unlock(&text_mutex); + return 0; +} + static int ftrace_check_current_call(unsigned long hook_pos, unsigned int *expected) { @@ -25,7 +38,8 @@ static int ftrace_check_current_call(unsigned long hook_pos, * Read the text we want to modify; * return must be -EFAULT on read error */ - if (probe_kernel_read(replaced, (void *)hook_pos, MCOUNT_INSN_SIZE)) + if (copy_from_kernel_nofault(replaced, (void *)hook_pos, + MCOUNT_INSN_SIZE)) return -EFAULT; /* @@ -51,7 +65,7 @@ static int __ftrace_modify_call(unsigned long hook_pos, unsigned long target, make_call(hook_pos, target, call); /* Replace the auipc-jalr pair at once. Return -EPERM on write error. */ - if (riscv_patch_text_nosync + if (patch_text_nosync ((void *)hook_pos, enable ? call : nops, MCOUNT_INSN_SIZE)) return -EPERM; @@ -83,6 +97,25 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, return __ftrace_modify_call(rec->ip, addr, false); } + +/* + * This is called early on, and isn't wrapped by + * ftrace_arch_code_modify_{prepare,post_process}() and therefor doesn't hold + * text_mutex, which triggers a lockdep failure. SMP isn't running so we could + * just directly poke the text, but it's simpler to just take the lock + * ourselves. + */ +int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec) +{ + int out; + + ftrace_arch_code_modify_prepare(); + out = ftrace_make_nop(mod, rec, MCOUNT_ADDR); + ftrace_arch_code_modify_post_process(); + + return out; +} + int ftrace_update_ftrace_func(ftrace_func_t func) { int ret = __ftrace_modify_call((unsigned long)&ftrace_call, diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S index 98a406474e7d..0a4e81b8dc79 100644 --- a/arch/riscv/kernel/head.S +++ b/arch/riscv/kernel/head.S @@ -138,9 +138,23 @@ secondary_start_common: la a0, swapper_pg_dir call relocate #endif + call setup_trap_vector tail smp_callin #endif /* CONFIG_SMP */ +.align 2 +setup_trap_vector: + /* Set trap vector to exception handler */ + la a0, handle_exception + csrw CSR_TVEC, a0 + + /* + * Set sup0 scratch register to 0, indicating to exception vector that + * we are presently executing in kernel. + */ + csrw CSR_SCRATCH, zero + ret + .Lsecondary_park: /* We lack SMP support or have too many harts, so park this hart */ wfi @@ -161,11 +175,20 @@ ENTRY(_start_kernel) /* Reset all registers except ra, a0, a1 */ call reset_regs - /* Setup a PMP to permit access to all of memory. */ + /* + * Setup a PMP to permit access to all of memory. Some machines may + * not implement PMPs, so we set up a quick trap handler to just skip + * touching the PMPs on any trap. + */ + la a0, pmp_done + csrw CSR_TVEC, a0 + li a0, -1 csrw CSR_PMPADDR0, a0 li a0, (PMP_A_NAPOT | PMP_R | PMP_W | PMP_X) csrw CSR_PMPCFG0, a0 +.align 2 +pmp_done: /* * The hartid in a0 is expected later on, and we have no firmware @@ -225,6 +248,7 @@ clear_bss_done: call relocate #endif /* CONFIG_MMU */ + call setup_trap_vector /* Restore C environment */ la tp, init_task sw zero, TASK_TI_CPU(tp) diff --git a/arch/riscv/kernel/irq.c b/arch/riscv/kernel/irq.c index 345c4f2eba13..7207fa08d78f 100644 --- a/arch/riscv/kernel/irq.c +++ b/arch/riscv/kernel/irq.c @@ -7,7 +7,6 @@ #include <linux/interrupt.h> #include <linux/irqchip.h> -#include <linux/irqdomain.h> #include <linux/seq_file.h> #include <asm/smp.h> @@ -17,37 +16,9 @@ int arch_show_interrupts(struct seq_file *p, int prec) return 0; } -asmlinkage __visible void __irq_entry do_IRQ(struct pt_regs *regs) -{ - struct pt_regs *old_regs = set_irq_regs(regs); - - irq_enter(); - switch (regs->cause & ~CAUSE_IRQ_FLAG) { - case RV_IRQ_TIMER: - riscv_timer_interrupt(); - break; -#ifdef CONFIG_SMP - case RV_IRQ_SOFT: - /* - * We only use software interrupts to pass IPIs, so if a non-SMP - * system gets one, then we don't know what to do. - */ - riscv_software_interrupt(); - break; -#endif - case RV_IRQ_EXT: - handle_arch_irq(regs); - break; - default: - pr_alert("unexpected interrupt cause 0x%lx", regs->cause); - BUG(); - } - irq_exit(); - - set_irq_regs(old_regs); -} - void __init init_IRQ(void) { irqchip_init(); + if (!handle_arch_irq) + panic("No interrupt controller found."); } diff --git a/arch/riscv/kernel/jump_label.c b/arch/riscv/kernel/jump_label.c new file mode 100644 index 000000000000..20e09056d141 --- /dev/null +++ b/arch/riscv/kernel/jump_label.c @@ -0,0 +1,53 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2020 Emil Renner Berthing + * + * Based on arch/arm64/kernel/jump_label.c + */ +#include <linux/jump_label.h> +#include <linux/kernel.h> +#include <linux/memory.h> +#include <linux/mutex.h> +#include <asm/bug.h> +#include <asm/patch.h> + +#define RISCV_INSN_NOP 0x00000013U +#define RISCV_INSN_JAL 0x0000006fU + +void arch_jump_label_transform(struct jump_entry *entry, + enum jump_label_type type) +{ + void *addr = (void *)jump_entry_code(entry); + u32 insn; + + if (type == JUMP_LABEL_JMP) { + long offset = jump_entry_target(entry) - jump_entry_code(entry); + + if (WARN_ON(offset & 1 || offset < -524288 || offset >= 524288)) + return; + + insn = RISCV_INSN_JAL | + (((u32)offset & GENMASK(19, 12)) << (12 - 12)) | + (((u32)offset & GENMASK(11, 11)) << (20 - 11)) | + (((u32)offset & GENMASK(10, 1)) << (21 - 1)) | + (((u32)offset & GENMASK(20, 20)) << (31 - 20)); + } else { + insn = RISCV_INSN_NOP; + } + + mutex_lock(&text_mutex); + patch_text_nosync(addr, &insn, sizeof(insn)); + mutex_unlock(&text_mutex); +} + +void arch_jump_label_transform_static(struct jump_entry *entry, + enum jump_label_type type) +{ + /* + * We use the same instructions in the arch_static_branch and + * arch_static_branch_jump inline functions, so there's no + * need to patch them up here. + * The core will call arch_jump_label_transform when those + * instructions need to be replaced. + */ +} diff --git a/arch/riscv/kernel/kgdb.c b/arch/riscv/kernel/kgdb.c new file mode 100644 index 000000000000..963ed7edcff2 --- /dev/null +++ b/arch/riscv/kernel/kgdb.c @@ -0,0 +1,390 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2020 SiFive + */ + +#include <linux/ptrace.h> +#include <linux/kdebug.h> +#include <linux/bug.h> +#include <linux/kgdb.h> +#include <linux/irqflags.h> +#include <linux/string.h> +#include <asm/cacheflush.h> +#include <asm/gdb_xml.h> +#include <asm/parse_asm.h> + +enum { + NOT_KGDB_BREAK = 0, + KGDB_SW_BREAK, + KGDB_COMPILED_BREAK, + KGDB_SW_SINGLE_STEP +}; + +static unsigned long stepped_address; +static unsigned int stepped_opcode; + +#if __riscv_xlen == 32 +/* C.JAL is an RV32C-only instruction */ +DECLARE_INSN(c_jal, MATCH_C_JAL, MASK_C_JAL) +#else +#define is_c_jal_insn(opcode) 0 +#endif +DECLARE_INSN(jalr, MATCH_JALR, MASK_JALR) +DECLARE_INSN(jal, MATCH_JAL, MASK_JAL) +DECLARE_INSN(c_jr, MATCH_C_JR, MASK_C_JR) +DECLARE_INSN(c_jalr, MATCH_C_JALR, MASK_C_JALR) +DECLARE_INSN(c_j, MATCH_C_J, MASK_C_J) +DECLARE_INSN(beq, MATCH_BEQ, MASK_BEQ) +DECLARE_INSN(bne, MATCH_BNE, MASK_BNE) +DECLARE_INSN(blt, MATCH_BLT, MASK_BLT) +DECLARE_INSN(bge, MATCH_BGE, MASK_BGE) +DECLARE_INSN(bltu, MATCH_BLTU, MASK_BLTU) +DECLARE_INSN(bgeu, MATCH_BGEU, MASK_BGEU) +DECLARE_INSN(c_beqz, MATCH_C_BEQZ, MASK_C_BEQZ) +DECLARE_INSN(c_bnez, MATCH_C_BNEZ, MASK_C_BNEZ) +DECLARE_INSN(sret, MATCH_SRET, MASK_SRET) + +static int decode_register_index(unsigned long opcode, int offset) +{ + return (opcode >> offset) & 0x1F; +} + +static int decode_register_index_short(unsigned long opcode, int offset) +{ + return ((opcode >> offset) & 0x7) + 8; +} + +/* Calculate the new address for after a step */ +static int get_step_address(struct pt_regs *regs, unsigned long *next_addr) +{ + unsigned long pc = regs->epc; + unsigned long *regs_ptr = (unsigned long *)regs; + unsigned int rs1_num, rs2_num; + int op_code; + + if (get_kernel_nofault(op_code, (void *)pc)) + return -EINVAL; + if ((op_code & __INSN_LENGTH_MASK) != __INSN_LENGTH_GE_32) { + if (is_c_jalr_insn(op_code) || is_c_jr_insn(op_code)) { + rs1_num = decode_register_index(op_code, RVC_C2_RS1_OPOFF); + *next_addr = regs_ptr[rs1_num]; + } else if (is_c_j_insn(op_code) || is_c_jal_insn(op_code)) { + *next_addr = EXTRACT_RVC_J_IMM(op_code) + pc; + } else if (is_c_beqz_insn(op_code)) { + rs1_num = decode_register_index_short(op_code, + RVC_C1_RS1_OPOFF); + if (!rs1_num || regs_ptr[rs1_num] == 0) + *next_addr = EXTRACT_RVC_B_IMM(op_code) + pc; + else + *next_addr = pc + 2; + } else if (is_c_bnez_insn(op_code)) { + rs1_num = + decode_register_index_short(op_code, RVC_C1_RS1_OPOFF); + if (rs1_num && regs_ptr[rs1_num] != 0) + *next_addr = EXTRACT_RVC_B_IMM(op_code) + pc; + else + *next_addr = pc + 2; + } else { + *next_addr = pc + 2; + } + } else { + if ((op_code & __INSN_OPCODE_MASK) == __INSN_BRANCH_OPCODE) { + bool result = false; + long imm = EXTRACT_BTYPE_IMM(op_code); + unsigned long rs1_val = 0, rs2_val = 0; + + rs1_num = decode_register_index(op_code, RVG_RS1_OPOFF); + rs2_num = decode_register_index(op_code, RVG_RS2_OPOFF); + if (rs1_num) + rs1_val = regs_ptr[rs1_num]; + if (rs2_num) + rs2_val = regs_ptr[rs2_num]; + + if (is_beq_insn(op_code)) + result = (rs1_val == rs2_val) ? true : false; + else if (is_bne_insn(op_code)) + result = (rs1_val != rs2_val) ? true : false; + else if (is_blt_insn(op_code)) + result = + ((long)rs1_val < + (long)rs2_val) ? true : false; + else if (is_bge_insn(op_code)) + result = + ((long)rs1_val >= + (long)rs2_val) ? true : false; + else if (is_bltu_insn(op_code)) + result = (rs1_val < rs2_val) ? true : false; + else if (is_bgeu_insn(op_code)) + result = (rs1_val >= rs2_val) ? true : false; + if (result) + *next_addr = imm + pc; + else + *next_addr = pc + 4; + } else if (is_jal_insn(op_code)) { + *next_addr = EXTRACT_JTYPE_IMM(op_code) + pc; + } else if (is_jalr_insn(op_code)) { + rs1_num = decode_register_index(op_code, RVG_RS1_OPOFF); + if (rs1_num) + *next_addr = ((unsigned long *)regs)[rs1_num]; + *next_addr += EXTRACT_ITYPE_IMM(op_code); + } else if (is_sret_insn(op_code)) { + *next_addr = pc; + } else { + *next_addr = pc + 4; + } + } + return 0; +} + +static int do_single_step(struct pt_regs *regs) +{ + /* Determine where the target instruction will send us to */ + unsigned long addr = 0; + int error = get_step_address(regs, &addr); + + if (error) + return error; + + /* Store the op code in the stepped address */ + error = get_kernel_nofault(stepped_opcode, (void *)addr); + if (error) + return error; + + stepped_address = addr; + + /* Replace the op code with the break instruction */ + error = copy_to_kernel_nofault((void *)stepped_address, + arch_kgdb_ops.gdb_bpt_instr, + BREAK_INSTR_SIZE); + /* Flush and return */ + if (!error) { + flush_icache_range(addr, addr + BREAK_INSTR_SIZE); + kgdb_single_step = 1; + atomic_set(&kgdb_cpu_doing_single_step, + raw_smp_processor_id()); + } else { + stepped_address = 0; + stepped_opcode = 0; + } + return error; +} + +/* Undo a single step */ +static void undo_single_step(struct pt_regs *regs) +{ + if (stepped_opcode != 0) { + copy_to_kernel_nofault((void *)stepped_address, + (void *)&stepped_opcode, BREAK_INSTR_SIZE); + flush_icache_range(stepped_address, + stepped_address + BREAK_INSTR_SIZE); + } + stepped_address = 0; + stepped_opcode = 0; + kgdb_single_step = 0; + atomic_set(&kgdb_cpu_doing_single_step, -1); +} + +struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = { + {DBG_REG_ZERO, GDB_SIZEOF_REG, -1}, + {DBG_REG_RA, GDB_SIZEOF_REG, offsetof(struct pt_regs, ra)}, + {DBG_REG_SP, GDB_SIZEOF_REG, offsetof(struct pt_regs, sp)}, + {DBG_REG_GP, GDB_SIZEOF_REG, offsetof(struct pt_regs, gp)}, + {DBG_REG_TP, GDB_SIZEOF_REG, offsetof(struct pt_regs, tp)}, + {DBG_REG_T0, GDB_SIZEOF_REG, offsetof(struct pt_regs, t0)}, + {DBG_REG_T1, GDB_SIZEOF_REG, offsetof(struct pt_regs, t1)}, + {DBG_REG_T2, GDB_SIZEOF_REG, offsetof(struct pt_regs, t2)}, + {DBG_REG_FP, GDB_SIZEOF_REG, offsetof(struct pt_regs, s0)}, + {DBG_REG_S1, GDB_SIZEOF_REG, offsetof(struct pt_regs, a1)}, + {DBG_REG_A0, GDB_SIZEOF_REG, offsetof(struct pt_regs, a0)}, + {DBG_REG_A1, GDB_SIZEOF_REG, offsetof(struct pt_regs, a1)}, + {DBG_REG_A2, GDB_SIZEOF_REG, offsetof(struct pt_regs, a2)}, + {DBG_REG_A3, GDB_SIZEOF_REG, offsetof(struct pt_regs, a3)}, + {DBG_REG_A4, GDB_SIZEOF_REG, offsetof(struct pt_regs, a4)}, + {DBG_REG_A5, GDB_SIZEOF_REG, offsetof(struct pt_regs, a5)}, + {DBG_REG_A6, GDB_SIZEOF_REG, offsetof(struct pt_regs, a6)}, + {DBG_REG_A7, GDB_SIZEOF_REG, offsetof(struct pt_regs, a7)}, + {DBG_REG_S2, GDB_SIZEOF_REG, offsetof(struct pt_regs, s2)}, + {DBG_REG_S3, GDB_SIZEOF_REG, offsetof(struct pt_regs, s3)}, + {DBG_REG_S4, GDB_SIZEOF_REG, offsetof(struct pt_regs, s4)}, + {DBG_REG_S5, GDB_SIZEOF_REG, offsetof(struct pt_regs, s5)}, + {DBG_REG_S6, GDB_SIZEOF_REG, offsetof(struct pt_regs, s6)}, + {DBG_REG_S7, GDB_SIZEOF_REG, offsetof(struct pt_regs, s7)}, + {DBG_REG_S8, GDB_SIZEOF_REG, offsetof(struct pt_regs, s8)}, + {DBG_REG_S9, GDB_SIZEOF_REG, offsetof(struct pt_regs, s9)}, + {DBG_REG_S10, GDB_SIZEOF_REG, offsetof(struct pt_regs, s10)}, + {DBG_REG_S11, GDB_SIZEOF_REG, offsetof(struct pt_regs, s11)}, + {DBG_REG_T3, GDB_SIZEOF_REG, offsetof(struct pt_regs, t3)}, + {DBG_REG_T4, GDB_SIZEOF_REG, offsetof(struct pt_regs, t4)}, + {DBG_REG_T5, GDB_SIZEOF_REG, offsetof(struct pt_regs, t5)}, + {DBG_REG_T6, GDB_SIZEOF_REG, offsetof(struct pt_regs, t6)}, + {DBG_REG_EPC, GDB_SIZEOF_REG, offsetof(struct pt_regs, epc)}, + {DBG_REG_STATUS, GDB_SIZEOF_REG, offsetof(struct pt_regs, status)}, + {DBG_REG_BADADDR, GDB_SIZEOF_REG, offsetof(struct pt_regs, badaddr)}, + {DBG_REG_CAUSE, GDB_SIZEOF_REG, offsetof(struct pt_regs, cause)}, +}; + +char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs) +{ + if (regno >= DBG_MAX_REG_NUM || regno < 0) + return NULL; + + if (dbg_reg_def[regno].offset != -1) + memcpy(mem, (void *)regs + dbg_reg_def[regno].offset, + dbg_reg_def[regno].size); + else + memset(mem, 0, dbg_reg_def[regno].size); + return dbg_reg_def[regno].name; +} + +int dbg_set_reg(int regno, void *mem, struct pt_regs *regs) +{ + if (regno >= DBG_MAX_REG_NUM || regno < 0) + return -EINVAL; + + if (dbg_reg_def[regno].offset != -1) + memcpy((void *)regs + dbg_reg_def[regno].offset, mem, + dbg_reg_def[regno].size); + return 0; +} + +void +sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task) +{ + /* Initialize to zero */ + memset((char *)gdb_regs, 0, NUMREGBYTES); + + gdb_regs[DBG_REG_SP_OFF] = task->thread.sp; + gdb_regs[DBG_REG_FP_OFF] = task->thread.s[0]; + gdb_regs[DBG_REG_S1_OFF] = task->thread.s[1]; + gdb_regs[DBG_REG_S2_OFF] = task->thread.s[2]; + gdb_regs[DBG_REG_S3_OFF] = task->thread.s[3]; + gdb_regs[DBG_REG_S4_OFF] = task->thread.s[4]; + gdb_regs[DBG_REG_S5_OFF] = task->thread.s[5]; + gdb_regs[DBG_REG_S6_OFF] = task->thread.s[6]; + gdb_regs[DBG_REG_S7_OFF] = task->thread.s[7]; + gdb_regs[DBG_REG_S8_OFF] = task->thread.s[8]; + gdb_regs[DBG_REG_S9_OFF] = task->thread.s[10]; + gdb_regs[DBG_REG_S10_OFF] = task->thread.s[11]; + gdb_regs[DBG_REG_EPC_OFF] = task->thread.ra; +} + +void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc) +{ + regs->epc = pc; +} + +void kgdb_arch_handle_qxfer_pkt(char *remcom_in_buffer, + char *remcom_out_buffer) +{ + if (!strncmp(remcom_in_buffer, gdb_xfer_read_target, + sizeof(gdb_xfer_read_target))) + strcpy(remcom_out_buffer, riscv_gdb_stub_target_desc); + else if (!strncmp(remcom_in_buffer, gdb_xfer_read_cpuxml, + sizeof(gdb_xfer_read_cpuxml))) + strcpy(remcom_out_buffer, riscv_gdb_stub_cpuxml); +} + +static inline void kgdb_arch_update_addr(struct pt_regs *regs, + char *remcom_in_buffer) +{ + unsigned long addr; + char *ptr; + + ptr = &remcom_in_buffer[1]; + if (kgdb_hex2long(&ptr, &addr)) + regs->epc = addr; +} + +int kgdb_arch_handle_exception(int vector, int signo, int err_code, + char *remcom_in_buffer, char *remcom_out_buffer, + struct pt_regs *regs) +{ + int err = 0; + + undo_single_step(regs); + + switch (remcom_in_buffer[0]) { + case 'c': + case 'D': + case 'k': + if (remcom_in_buffer[0] == 'c') + kgdb_arch_update_addr(regs, remcom_in_buffer); + break; + case 's': + kgdb_arch_update_addr(regs, remcom_in_buffer); + err = do_single_step(regs); + break; + default: + err = -1; + } + return err; +} + +static int kgdb_riscv_kgdbbreak(unsigned long addr) +{ + if (stepped_address == addr) + return KGDB_SW_SINGLE_STEP; + if (atomic_read(&kgdb_setting_breakpoint)) + if (addr == (unsigned long)&kgdb_compiled_break) + return KGDB_COMPILED_BREAK; + + return kgdb_has_hit_break(addr); +} + +static int kgdb_riscv_notify(struct notifier_block *self, unsigned long cmd, + void *ptr) +{ + struct die_args *args = (struct die_args *)ptr; + struct pt_regs *regs = args->regs; + unsigned long flags; + int type; + + if (user_mode(regs)) + return NOTIFY_DONE; + + type = kgdb_riscv_kgdbbreak(regs->epc); + if (type == NOT_KGDB_BREAK && cmd == DIE_TRAP) + return NOTIFY_DONE; + + local_irq_save(flags); + + if (kgdb_handle_exception(type == KGDB_SW_SINGLE_STEP ? 0 : 1, + args->signr, cmd, regs)) + return NOTIFY_DONE; + + if (type == KGDB_COMPILED_BREAK) + regs->epc += 4; + + local_irq_restore(flags); + + return NOTIFY_STOP; +} + +static struct notifier_block kgdb_notifier = { + .notifier_call = kgdb_riscv_notify, +}; + +int kgdb_arch_init(void) +{ + register_die_notifier(&kgdb_notifier); + + return 0; +} + +void kgdb_arch_exit(void) +{ + unregister_die_notifier(&kgdb_notifier); +} + +/* + * Global data + */ +#ifdef CONFIG_RISCV_ISA_C +const struct kgdb_arch arch_kgdb_ops = { + .gdb_bpt_instr = {0x02, 0x90}, /* c.ebreak */ +}; +#else +const struct kgdb_arch arch_kgdb_ops = { + .gdb_bpt_instr = {0x73, 0x00, 0x10, 0x00}, /* ebreak */ +}; +#endif diff --git a/arch/riscv/kernel/module.c b/arch/riscv/kernel/module.c index 8bbe5dbe1341..104fba889cf7 100644 --- a/arch/riscv/kernel/module.c +++ b/arch/riscv/kernel/module.c @@ -10,7 +10,7 @@ #include <linux/moduleloader.h> #include <linux/vmalloc.h> #include <linux/sizes.h> -#include <asm/pgtable.h> +#include <linux/pgtable.h> #include <asm/sections.h> static int apply_r_riscv_32_rela(struct module *me, u32 *location, Elf_Addr v) @@ -263,6 +263,13 @@ static int apply_r_riscv_add32_rela(struct module *me, u32 *location, return 0; } +static int apply_r_riscv_add64_rela(struct module *me, u32 *location, + Elf_Addr v) +{ + *(u64 *)location += (u64)v; + return 0; +} + static int apply_r_riscv_sub32_rela(struct module *me, u32 *location, Elf_Addr v) { @@ -270,6 +277,13 @@ static int apply_r_riscv_sub32_rela(struct module *me, u32 *location, return 0; } +static int apply_r_riscv_sub64_rela(struct module *me, u32 *location, + Elf_Addr v) +{ + *(u64 *)location -= (u64)v; + return 0; +} + static int (*reloc_handlers_rela[]) (struct module *me, u32 *location, Elf_Addr v) = { [R_RISCV_32] = apply_r_riscv_32_rela, @@ -290,7 +304,9 @@ static int (*reloc_handlers_rela[]) (struct module *me, u32 *location, [R_RISCV_RELAX] = apply_r_riscv_relax_rela, [R_RISCV_ALIGN] = apply_r_riscv_align_rela, [R_RISCV_ADD32] = apply_r_riscv_add32_rela, + [R_RISCV_ADD64] = apply_r_riscv_add64_rela, [R_RISCV_SUB32] = apply_r_riscv_sub32_rela, + [R_RISCV_SUB64] = apply_r_riscv_sub64_rela, }; int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab, diff --git a/arch/riscv/kernel/patch.c b/arch/riscv/kernel/patch.c index 8a4fc65ee022..3fe7a5296aa5 100644 --- a/arch/riscv/kernel/patch.c +++ b/arch/riscv/kernel/patch.c @@ -5,22 +5,22 @@ #include <linux/spinlock.h> #include <linux/mm.h> +#include <linux/memory.h> #include <linux/uaccess.h> #include <linux/stop_machine.h> #include <asm/kprobes.h> #include <asm/cacheflush.h> #include <asm/fixmap.h> +#include <asm/patch.h> -struct riscv_insn_patch { +struct patch_insn { void *addr; u32 insn; atomic_t cpu_count; }; #ifdef CONFIG_MMU -static DEFINE_RAW_SPINLOCK(patch_lock); - -static void __kprobes *patch_map(void *addr, int fixmap) +static void *patch_map(void *addr, int fixmap) { uintptr_t uintaddr = (uintptr_t) addr; struct page *page; @@ -37,65 +37,72 @@ static void __kprobes *patch_map(void *addr, int fixmap) return (void *)set_fixmap_offset(fixmap, page_to_phys(page) + (uintaddr & ~PAGE_MASK)); } +NOKPROBE_SYMBOL(patch_map); -static void __kprobes patch_unmap(int fixmap) +static void patch_unmap(int fixmap) { clear_fixmap(fixmap); } +NOKPROBE_SYMBOL(patch_unmap); -static int __kprobes riscv_insn_write(void *addr, const void *insn, size_t len) +static int patch_insn_write(void *addr, const void *insn, size_t len) { void *waddr = addr; bool across_pages = (((uintptr_t) addr & ~PAGE_MASK) + len) > PAGE_SIZE; - unsigned long flags = 0; int ret; - raw_spin_lock_irqsave(&patch_lock, flags); + /* + * Before reaching here, it was expected to lock the text_mutex + * already, so we don't need to give another lock here and could + * ensure that it was safe between each cores. + */ + lockdep_assert_held(&text_mutex); if (across_pages) patch_map(addr + len, FIX_TEXT_POKE1); waddr = patch_map(addr, FIX_TEXT_POKE0); - ret = probe_kernel_write(waddr, insn, len); + ret = copy_to_kernel_nofault(waddr, insn, len); patch_unmap(FIX_TEXT_POKE0); if (across_pages) patch_unmap(FIX_TEXT_POKE1); - raw_spin_unlock_irqrestore(&patch_lock, flags); - return ret; } +NOKPROBE_SYMBOL(patch_insn_write); #else -static int __kprobes riscv_insn_write(void *addr, const void *insn, size_t len) +static int patch_insn_write(void *addr, const void *insn, size_t len) { - return probe_kernel_write(addr, insn, len); + return copy_to_kernel_nofault(addr, insn, len); } +NOKPROBE_SYMBOL(patch_insn_write); #endif /* CONFIG_MMU */ -int __kprobes riscv_patch_text_nosync(void *addr, const void *insns, size_t len) +int patch_text_nosync(void *addr, const void *insns, size_t len) { u32 *tp = addr; int ret; - ret = riscv_insn_write(tp, insns, len); + ret = patch_insn_write(tp, insns, len); if (!ret) flush_icache_range((uintptr_t) tp, (uintptr_t) tp + len); return ret; } +NOKPROBE_SYMBOL(patch_text_nosync); -static int __kprobes riscv_patch_text_cb(void *data) +static int patch_text_cb(void *data) { - struct riscv_insn_patch *patch = data; + struct patch_insn *patch = data; int ret = 0; if (atomic_inc_return(&patch->cpu_count) == 1) { ret = - riscv_patch_text_nosync(patch->addr, &patch->insn, + patch_text_nosync(patch->addr, &patch->insn, GET_INSN_LENGTH(patch->insn)); atomic_inc(&patch->cpu_count); } else { @@ -106,15 +113,17 @@ static int __kprobes riscv_patch_text_cb(void *data) return ret; } +NOKPROBE_SYMBOL(patch_text_cb); -int __kprobes riscv_patch_text(void *addr, u32 insn) +int patch_text(void *addr, u32 insn) { - struct riscv_insn_patch patch = { + struct patch_insn patch = { .addr = addr, .insn = insn, .cpu_count = ATOMIC_INIT(0), }; - return stop_machine_cpuslocked(riscv_patch_text_cb, + return stop_machine_cpuslocked(patch_text_cb, &patch, cpu_online_mask); } +NOKPROBE_SYMBOL(patch_text); diff --git a/arch/riscv/kernel/process.c b/arch/riscv/kernel/process.c index 824d117cf202..2b97c493427c 100644 --- a/arch/riscv/kernel/process.c +++ b/arch/riscv/kernel/process.c @@ -24,6 +24,12 @@ register unsigned long gp_in_global __asm__("gp"); +#ifdef CONFIG_STACKPROTECTOR +#include <linux/stackprotector.h> +unsigned long __stack_chk_guard __read_mostly; +EXPORT_SYMBOL(__stack_chk_guard); +#endif + extern asmlinkage void ret_from_fork(void); extern asmlinkage void ret_from_kernel_thread(void); @@ -101,8 +107,8 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) return 0; } -int copy_thread_tls(unsigned long clone_flags, unsigned long usp, - unsigned long arg, struct task_struct *p, unsigned long tls) +int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg, + struct task_struct *p, unsigned long tls) { struct pt_regs *childregs = task_pt_regs(p); diff --git a/arch/riscv/kernel/ptrace.c b/arch/riscv/kernel/ptrace.c index 444dc7b0fd78..2d6395f5ad54 100644 --- a/arch/riscv/kernel/ptrace.c +++ b/arch/riscv/kernel/ptrace.c @@ -30,13 +30,10 @@ enum riscv_regset { static int riscv_gpr_get(struct task_struct *target, const struct user_regset *regset, - unsigned int pos, unsigned int count, - void *kbuf, void __user *ubuf) + struct membuf to) { - struct pt_regs *regs; - - regs = task_pt_regs(target); - return user_regset_copyout(&pos, &count, &kbuf, &ubuf, regs, 0, -1); + return membuf_write(&to, task_pt_regs(target), + sizeof(struct user_regs_struct)); } static int riscv_gpr_set(struct task_struct *target, @@ -55,21 +52,13 @@ static int riscv_gpr_set(struct task_struct *target, #ifdef CONFIG_FPU static int riscv_fpr_get(struct task_struct *target, const struct user_regset *regset, - unsigned int pos, unsigned int count, - void *kbuf, void __user *ubuf) + struct membuf to) { - int ret; struct __riscv_d_ext_state *fstate = &target->thread.fstate; - ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, fstate, 0, - offsetof(struct __riscv_d_ext_state, fcsr)); - if (!ret) { - ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, fstate, 0, - offsetof(struct __riscv_d_ext_state, fcsr) + - sizeof(fstate->fcsr)); - } - - return ret; + membuf_write(&to, fstate, offsetof(struct __riscv_d_ext_state, fcsr)); + membuf_store(&to, fstate->fcsr); + return membuf_zero(&to, 4); // explicitly pad } static int riscv_fpr_set(struct task_struct *target, @@ -98,8 +87,8 @@ static const struct user_regset riscv_user_regset[] = { .n = ELF_NGREG, .size = sizeof(elf_greg_t), .align = sizeof(elf_greg_t), - .get = &riscv_gpr_get, - .set = &riscv_gpr_set, + .regset_get = riscv_gpr_get, + .set = riscv_gpr_set, }, #ifdef CONFIG_FPU [REGSET_F] = { @@ -107,8 +96,8 @@ static const struct user_regset riscv_user_regset[] = { .n = ELF_NFPREG, .size = sizeof(elf_fpreg_t), .align = sizeof(elf_fpreg_t), - .get = &riscv_fpr_get, - .set = &riscv_fpr_set, + .regset_get = riscv_fpr_get, + .set = riscv_fpr_set, }, #endif }; diff --git a/arch/riscv/kernel/sbi.c b/arch/riscv/kernel/sbi.c index f383ef5672b2..226ccce0f9e0 100644 --- a/arch/riscv/kernel/sbi.c +++ b/arch/riscv/kernel/sbi.c @@ -547,6 +547,18 @@ static inline long sbi_get_firmware_version(void) return __sbi_base_ecall(SBI_EXT_BASE_GET_IMP_VERSION); } +static void sbi_send_cpumask_ipi(const struct cpumask *target) +{ + struct cpumask hartid_mask; + + riscv_cpuid_to_hartid_mask(target, &hartid_mask); + + sbi_send_ipi(cpumask_bits(&hartid_mask)); +} + +static struct riscv_ipi_ops sbi_ipi_ops = { + .ipi_inject = sbi_send_cpumask_ipi +}; int __init sbi_init(void) { @@ -587,5 +599,7 @@ int __init sbi_init(void) __sbi_rfence = __sbi_rfence_v01; } + riscv_set_ipi_ops(&sbi_ipi_ops); + return 0; } diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c index 145128a7e560..2c6dd329312b 100644 --- a/arch/riscv/kernel/setup.c +++ b/arch/riscv/kernel/setup.c @@ -18,11 +18,9 @@ #include <linux/swiotlb.h> #include <linux/smp.h> -#include <asm/clint.h> #include <asm/cpu_ops.h> #include <asm/setup.h> #include <asm/sections.h> -#include <asm/pgtable.h> #include <asm/sbi.h> #include <asm/tlbflush.h> #include <asm/thread_info.h> @@ -75,8 +73,11 @@ void __init setup_arch(char **cmdline_p) setup_bootmem(); paging_init(); +#if IS_ENABLED(CONFIG_BUILTIN_DTB) + unflatten_and_copy_device_tree(); +#else unflatten_device_tree(); - clint_init_boot_cpu(); +#endif #ifdef CONFIG_SWIOTLB swiotlb_init(1); diff --git a/arch/riscv/kernel/signal.c b/arch/riscv/kernel/signal.c index 17ba190e84a5..e996e08f1061 100644 --- a/arch/riscv/kernel/signal.c +++ b/arch/riscv/kernel/signal.c @@ -250,7 +250,7 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs) regs->a0 = -EINTR; break; } - /* fallthrough */ + fallthrough; case -ERESTARTNOINTR: regs->a0 = regs->orig_a0; regs->epc -= 0x4; diff --git a/arch/riscv/kernel/smp.c b/arch/riscv/kernel/smp.c index a65a8fa0c22d..ea028d9e0d24 100644 --- a/arch/riscv/kernel/smp.c +++ b/arch/riscv/kernel/smp.c @@ -16,8 +16,8 @@ #include <linux/sched.h> #include <linux/seq_file.h> #include <linux/delay.h> +#include <linux/irq_work.h> -#include <asm/clint.h> #include <asm/sbi.h> #include <asm/tlbflush.h> #include <asm/cacheflush.h> @@ -26,6 +26,7 @@ enum ipi_message_type { IPI_RESCHEDULE, IPI_CALL_FUNC, IPI_CPU_STOP, + IPI_IRQ_WORK, IPI_MAX }; @@ -84,9 +85,25 @@ static void ipi_stop(void) wait_for_interrupt(); } +static struct riscv_ipi_ops *ipi_ops; + +void riscv_set_ipi_ops(struct riscv_ipi_ops *ops) +{ + ipi_ops = ops; +} +EXPORT_SYMBOL_GPL(riscv_set_ipi_ops); + +void riscv_clear_ipi(void) +{ + if (ipi_ops && ipi_ops->ipi_clear) + ipi_ops->ipi_clear(); + + csr_clear(CSR_IP, IE_SIE); +} +EXPORT_SYMBOL_GPL(riscv_clear_ipi); + static void send_ipi_mask(const struct cpumask *mask, enum ipi_message_type op) { - struct cpumask hartid_mask; int cpu; smp_mb__before_atomic(); @@ -94,41 +111,40 @@ static void send_ipi_mask(const struct cpumask *mask, enum ipi_message_type op) set_bit(op, &ipi_data[cpu].bits); smp_mb__after_atomic(); - riscv_cpuid_to_hartid_mask(mask, &hartid_mask); - if (IS_ENABLED(CONFIG_RISCV_SBI)) - sbi_send_ipi(cpumask_bits(&hartid_mask)); + if (ipi_ops && ipi_ops->ipi_inject) + ipi_ops->ipi_inject(mask); else - clint_send_ipi_mask(mask); + pr_warn("SMP: IPI inject method not available\n"); } static void send_ipi_single(int cpu, enum ipi_message_type op) { - int hartid = cpuid_to_hartid_map(cpu); - smp_mb__before_atomic(); set_bit(op, &ipi_data[cpu].bits); smp_mb__after_atomic(); - if (IS_ENABLED(CONFIG_RISCV_SBI)) - sbi_send_ipi(cpumask_bits(cpumask_of(hartid))); + if (ipi_ops && ipi_ops->ipi_inject) + ipi_ops->ipi_inject(cpumask_of(cpu)); else - clint_send_ipi_single(hartid); + pr_warn("SMP: IPI inject method not available\n"); } -static inline void clear_ipi(void) +#ifdef CONFIG_IRQ_WORK +void arch_irq_work_raise(void) { - if (IS_ENABLED(CONFIG_RISCV_SBI)) - csr_clear(CSR_IP, IE_SIE); - else - clint_clear_ipi(cpuid_to_hartid_map(smp_processor_id())); + send_ipi_single(smp_processor_id(), IPI_IRQ_WORK); } +#endif -void riscv_software_interrupt(void) +void handle_IPI(struct pt_regs *regs) { + struct pt_regs *old_regs = set_irq_regs(regs); unsigned long *pending_ipis = &ipi_data[smp_processor_id()].bits; unsigned long *stats = ipi_data[smp_processor_id()].stats; - clear_ipi(); + irq_enter(); + + riscv_clear_ipi(); while (true) { unsigned long ops; @@ -138,7 +154,7 @@ void riscv_software_interrupt(void) ops = xchg(pending_ipis, 0); if (ops == 0) - return; + goto done; if (ops & (1 << IPI_RESCHEDULE)) { stats[IPI_RESCHEDULE]++; @@ -155,17 +171,27 @@ void riscv_software_interrupt(void) ipi_stop(); } + if (ops & (1 << IPI_IRQ_WORK)) { + stats[IPI_IRQ_WORK]++; + irq_work_run(); + } + BUG_ON((ops >> IPI_MAX) != 0); /* Order data access and bit testing. */ mb(); } + +done: + irq_exit(); + set_irq_regs(old_regs); } static const char * const ipi_names[] = { [IPI_RESCHEDULE] = "Rescheduling interrupts", [IPI_CALL_FUNC] = "Function call interrupts", [IPI_CPU_STOP] = "CPU stop interrupts", + [IPI_IRQ_WORK] = "IRQ work interrupts", }; void show_ipi_stats(struct seq_file *p, int prec) diff --git a/arch/riscv/kernel/smpboot.c b/arch/riscv/kernel/smpboot.c index 4e9922790f6e..96167d55ed98 100644 --- a/arch/riscv/kernel/smpboot.c +++ b/arch/riscv/kernel/smpboot.c @@ -24,7 +24,6 @@ #include <linux/of.h> #include <linux/sched/task_stack.h> #include <linux/sched/mm.h> -#include <asm/clint.h> #include <asm/cpu_ops.h> #include <asm/irq.h> #include <asm/mmu_context.h> @@ -106,7 +105,7 @@ void __init setup_smp(void) } } -int start_secondary_cpu(int cpu, struct task_struct *tidle) +static int start_secondary_cpu(int cpu, struct task_struct *tidle) { if (cpu_ops[cpu]->cpu_start) return cpu_ops[cpu]->cpu_start(cpu, tidle); @@ -121,7 +120,6 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle) ret = start_secondary_cpu(cpu, tidle); if (!ret) { - lockdep_assert_held(&cpu_running); wait_for_completion_timeout(&cpu_running, msecs_to_jiffies(1000)); @@ -146,18 +144,18 @@ void __init smp_cpus_done(unsigned int max_cpus) asmlinkage __visible void smp_callin(void) { struct mm_struct *mm = &init_mm; + unsigned int curr_cpuid = smp_processor_id(); - if (!IS_ENABLED(CONFIG_RISCV_SBI)) - clint_clear_ipi(cpuid_to_hartid_map(smp_processor_id())); + riscv_clear_ipi(); /* All kernel threads share the same mm context. */ mmgrab(mm); current->active_mm = mm; - trap_init(); - notify_cpu_starting(smp_processor_id()); - update_siblings_masks(smp_processor_id()); - set_cpu_online(smp_processor_id(), 1); + notify_cpu_starting(curr_cpuid); + update_siblings_masks(curr_cpuid); + set_cpu_online(curr_cpuid, 1); + /* * Remote TLB flushes are ignored while the CPU is offline, so emit * a local TLB flush right now just in case. diff --git a/arch/riscv/kernel/soc.c b/arch/riscv/kernel/soc.c index 0b3b3dc9ad0f..c7b0a73e382e 100644 --- a/arch/riscv/kernel/soc.c +++ b/arch/riscv/kernel/soc.c @@ -4,7 +4,7 @@ */ #include <linux/init.h> #include <linux/libfdt.h> -#include <asm/pgtable.h> +#include <linux/pgtable.h> #include <asm/soc.h> /* @@ -26,3 +26,30 @@ void __init soc_early_init(void) } } } + +static bool soc_builtin_dtb_match(unsigned long vendor_id, + unsigned long arch_id, unsigned long imp_id, + const struct soc_builtin_dtb *entry) +{ + return entry->vendor_id == vendor_id && + entry->arch_id == arch_id && + entry->imp_id == imp_id; +} + +void * __init soc_lookup_builtin_dtb(void) +{ + unsigned long vendor_id, arch_id, imp_id; + const struct soc_builtin_dtb *s; + + __asm__ ("csrr %0, mvendorid" : "=r"(vendor_id)); + __asm__ ("csrr %0, marchid" : "=r"(arch_id)); + __asm__ ("csrr %0, mimpid" : "=r"(imp_id)); + + for (s = (void *)&__soc_builtin_dtb_table_start; + (void *)s < (void *)&__soc_builtin_dtb_table_end; s++) { + if (soc_builtin_dtb_match(vendor_id, arch_id, imp_id, s)) + return s->dtb_func(); + } + + return NULL; +} diff --git a/arch/riscv/kernel/stacktrace.c b/arch/riscv/kernel/stacktrace.c index 837b9b38f825..595342910c3f 100644 --- a/arch/riscv/kernel/stacktrace.c +++ b/arch/riscv/kernel/stacktrace.c @@ -99,17 +99,18 @@ void notrace walk_stackframe(struct task_struct *task, static bool print_trace_address(unsigned long pc, void *arg) { - print_ip_sym(pc); + const char *loglvl = arg; + + print_ip_sym(loglvl, pc); return false; } -void show_stack(struct task_struct *task, unsigned long *sp) +void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl) { pr_cont("Call Trace:\n"); - walk_stackframe(task, NULL, print_trace_address, NULL); + walk_stackframe(task, NULL, print_trace_address, (void *)loglvl); } - static bool save_wchan(unsigned long pc, void *arg) { if (!in_sched_functions(pc)) { diff --git a/arch/riscv/kernel/sys_riscv.c b/arch/riscv/kernel/sys_riscv.c index f3619f59d85c..12f8a7fce78b 100644 --- a/arch/riscv/kernel/sys_riscv.c +++ b/arch/riscv/kernel/sys_riscv.c @@ -8,6 +8,7 @@ #include <linux/syscalls.h> #include <asm/unistd.h> #include <asm/cacheflush.h> +#include <asm-generic/mman-common.h> static long riscv_sys_mmap(unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, @@ -16,6 +17,11 @@ static long riscv_sys_mmap(unsigned long addr, unsigned long len, { if (unlikely(offset & (~PAGE_MASK >> page_shift_offset))) return -EINVAL; + + if ((prot & PROT_WRITE) && (prot & PROT_EXEC)) + if (unlikely(!(prot & PROT_READ))) + return -EINVAL; + return ksys_mmap_pgoff(addr, len, prot, flags, fd, offset >> (PAGE_SHIFT - page_shift_offset)); } diff --git a/arch/riscv/kernel/time.c b/arch/riscv/kernel/time.c index 6a53c02e9c73..4d3a1048ad8b 100644 --- a/arch/riscv/kernel/time.c +++ b/arch/riscv/kernel/time.c @@ -26,3 +26,12 @@ void __init time_init(void) lpj_fine = riscv_timebase / HZ; timer_probe(); } + +void clocksource_arch_init(struct clocksource *cs) +{ +#ifdef CONFIG_GENERIC_GETTIMEOFDAY + cs->vdso_clock_mode = VDSO_CLOCKMODE_ARCHTIMER; +#else + cs->vdso_clock_mode = VDSO_CLOCKMODE_NONE; +#endif +} diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c index 7f58fa53033f..ad14f4466d92 100644 --- a/arch/riscv/kernel/traps.c +++ b/arch/riscv/kernel/traps.c @@ -137,7 +137,7 @@ static inline unsigned long get_break_insn_length(unsigned long pc) { bug_insn_t insn; - if (probe_kernel_address((bug_insn_t *)pc, insn)) + if (get_kernel_nofault(insn, (bug_insn_t *)pc)) return 0; return GET_INSN_LENGTH(insn); @@ -147,6 +147,11 @@ asmlinkage __visible void do_trap_break(struct pt_regs *regs) { if (user_mode(regs)) force_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->epc); +#ifdef CONFIG_KGDB + else if (notify_die(DIE_TRAP, "EBREAK", regs, 0, regs->cause, SIGTRAP) + == NOTIFY_STOP) + return; +#endif else if (report_bug(regs->epc, regs) == BUG_TRAP_TYPE_WARN) regs->epc += get_break_insn_length(regs->epc); else @@ -160,7 +165,7 @@ int is_valid_bugaddr(unsigned long pc) if (pc < VMALLOC_START) return 0; - if (probe_kernel_address((bug_insn_t *)pc, insn)) + if (get_kernel_nofault(insn, (bug_insn_t *)pc)) return 0; if ((insn & __INSN_LENGTH_MASK) == __INSN_LENGTH_32) return (insn == __BUG_INSN_32); @@ -169,15 +174,7 @@ int is_valid_bugaddr(unsigned long pc) } #endif /* CONFIG_GENERIC_BUG */ +/* stvec & scratch is already set from head.S */ void trap_init(void) { - /* - * Set sup0 scratch register to 0, indicating to exception vector - * that we are presently executing in the kernel - */ - csr_write(CSR_SCRATCH, 0); - /* Set the exception vector address */ - csr_write(CSR_TVEC, &handle_exception); - /* Enable interrupts */ - csr_write(CSR_IE, IE_SIE); } diff --git a/arch/riscv/kernel/vdso.c b/arch/riscv/kernel/vdso.c index 484d95a70907..678204231700 100644 --- a/arch/riscv/kernel/vdso.c +++ b/arch/riscv/kernel/vdso.c @@ -11,8 +11,12 @@ #include <linux/slab.h> #include <linux/binfmts.h> #include <linux/err.h> - +#include <asm/page.h> +#ifdef GENERIC_TIME_VSYSCALL +#include <vdso/datapage.h> +#else #include <asm/vdso.h> +#endif extern char vdso_start[], vdso_end[]; @@ -26,7 +30,7 @@ static union { struct vdso_data data; u8 page[PAGE_SIZE]; } vdso_data_store __page_aligned_data; -static struct vdso_data *vdso_data = &vdso_data_store.data; +struct vdso_data *vdso_data = &vdso_data_store.data; static int __init vdso_init(void) { @@ -61,7 +65,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, vdso_len = (vdso_pages + 1) << PAGE_SHIFT; - down_write(&mm->mmap_sem); + mmap_write_lock(mm); vdso_base = get_unmapped_area(NULL, 0, vdso_len, 0, 0); if (IS_ERR_VALUE(vdso_base)) { ret = vdso_base; @@ -75,15 +79,24 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, */ mm->context.vdso = (void *)vdso_base; - ret = install_special_mapping(mm, vdso_base, vdso_len, + ret = + install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT, (VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC), vdso_pagelist); - if (unlikely(ret)) + if (unlikely(ret)) { mm->context.vdso = NULL; + goto end; + } + vdso_base += (vdso_pages << PAGE_SHIFT); + ret = install_special_mapping(mm, vdso_base, PAGE_SIZE, + (VM_READ | VM_MAYREAD), &vdso_pagelist[vdso_pages]); + + if (unlikely(ret)) + mm->context.vdso = NULL; end: - up_write(&mm->mmap_sem); + mmap_write_unlock(mm); return ret; } @@ -91,5 +104,8 @@ const char *arch_vma_name(struct vm_area_struct *vma) { if (vma->vm_mm && (vma->vm_start == (long)vma->vm_mm->context.vdso)) return "[vdso]"; + if (vma->vm_mm && (vma->vm_start == + (long)vma->vm_mm->context.vdso + PAGE_SIZE)) + return "[vdso_data]"; return NULL; } diff --git a/arch/riscv/kernel/vdso/Makefile b/arch/riscv/kernel/vdso/Makefile index 4c8b2a4a6a70..478e7338ddc1 100644 --- a/arch/riscv/kernel/vdso/Makefile +++ b/arch/riscv/kernel/vdso/Makefile @@ -1,12 +1,14 @@ # SPDX-License-Identifier: GPL-2.0-only # Copied from arch/tile/kernel/vdso/Makefile +# Absolute relocation type $(ARCH_REL_TYPE_ABS) needs to be defined before +# the inclusion of generic Makefile. +ARCH_REL_TYPE_ABS := R_RISCV_32|R_RISCV_64|R_RISCV_JUMP_SLOT +include $(srctree)/lib/vdso/Makefile # Symbols present in the vdso vdso-syms = rt_sigreturn ifdef CONFIG_64BIT -vdso-syms += gettimeofday -vdso-syms += clock_gettime -vdso-syms += clock_getres +vdso-syms += vgettimeofday endif vdso-syms += getcpu vdso-syms += flush_icache @@ -14,6 +16,12 @@ vdso-syms += flush_icache # Files to link into the vdso obj-vdso = $(patsubst %, %.o, $(vdso-syms)) note.o +ccflags-y := -fno-stack-protector + +ifneq ($(c-gettimeofday-y),) + CFLAGS_vgettimeofday.o += -fPIC -include $(c-gettimeofday-y) +endif + # Build rules targets := $(obj-vdso) vdso.so vdso.so.dbg vdso.lds vdso-dummy.o obj-vdso := $(addprefix $(obj)/, $(obj-vdso)) @@ -21,8 +29,12 @@ obj-vdso := $(addprefix $(obj)/, $(obj-vdso)) obj-y += vdso.o vdso-syms.o CPPFLAGS_vdso.lds += -P -C -U$(ARCH) +# Disable -pg to prevent insert call site +CFLAGS_REMOVE_vgettimeofday.o = $(CC_FLAGS_FTRACE) -Os + # Disable gcov profiling for VDSO code GCOV_PROFILE := n +KCOV_INSTRUMENT := n # Force dependency $(obj)/vdso.o: $(obj)/vdso.so diff --git a/arch/riscv/kernel/vdso/clock_getres.S b/arch/riscv/kernel/vdso/clock_getres.S deleted file mode 100644 index 91378a52eb22..000000000000 --- a/arch/riscv/kernel/vdso/clock_getres.S +++ /dev/null @@ -1,18 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (C) 2017 SiFive - */ - -#include <linux/linkage.h> -#include <asm/unistd.h> - - .text -/* int __vdso_clock_getres(clockid_t clock_id, struct timespec *res); */ -ENTRY(__vdso_clock_getres) - .cfi_startproc - /* For now, just do the syscall. */ - li a7, __NR_clock_getres - ecall - ret - .cfi_endproc -ENDPROC(__vdso_clock_getres) diff --git a/arch/riscv/kernel/vdso/clock_gettime.S b/arch/riscv/kernel/vdso/clock_gettime.S deleted file mode 100644 index 5371fd9bc01f..000000000000 --- a/arch/riscv/kernel/vdso/clock_gettime.S +++ /dev/null @@ -1,18 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (C) 2017 SiFive - */ - -#include <linux/linkage.h> -#include <asm/unistd.h> - - .text -/* int __vdso_clock_gettime(clockid_t clock_id, struct timespec *tp); */ -ENTRY(__vdso_clock_gettime) - .cfi_startproc - /* For now, just do the syscall. */ - li a7, __NR_clock_gettime - ecall - ret - .cfi_endproc -ENDPROC(__vdso_clock_gettime) diff --git a/arch/riscv/kernel/vdso/gettimeofday.S b/arch/riscv/kernel/vdso/gettimeofday.S deleted file mode 100644 index e6fb8af88632..000000000000 --- a/arch/riscv/kernel/vdso/gettimeofday.S +++ /dev/null @@ -1,18 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (C) 2017 SiFive - */ - -#include <linux/linkage.h> -#include <asm/unistd.h> - - .text -/* int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz); */ -ENTRY(__vdso_gettimeofday) - .cfi_startproc - /* For now, just do the syscall. */ - li a7, __NR_gettimeofday - ecall - ret - .cfi_endproc -ENDPROC(__vdso_gettimeofday) diff --git a/arch/riscv/kernel/vdso/vdso.lds.S b/arch/riscv/kernel/vdso/vdso.lds.S index f66a091cb890..e6f558bca71b 100644 --- a/arch/riscv/kernel/vdso/vdso.lds.S +++ b/arch/riscv/kernel/vdso/vdso.lds.S @@ -2,11 +2,13 @@ /* * Copyright (C) 2012 Regents of the University of California */ +#include <asm/page.h> OUTPUT_ARCH(riscv) SECTIONS { + PROVIDE(_vdso_data = . + PAGE_SIZE); . = SIZEOF_HEADERS; .hash : { *(.hash) } :text diff --git a/arch/riscv/kernel/vdso/vgettimeofday.c b/arch/riscv/kernel/vdso/vgettimeofday.c new file mode 100644 index 000000000000..cc0d80699c31 --- /dev/null +++ b/arch/riscv/kernel/vdso/vgettimeofday.c @@ -0,0 +1,31 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copied from arch/arm64/kernel/vdso/vgettimeofday.c + * + * Copyright (C) 2018 ARM Ltd. + * Copyright (C) 2020 SiFive + */ + +#include <linux/time.h> +#include <linux/types.h> + +extern +int __vdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts); +int __vdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts) +{ + return __cvdso_clock_gettime(clock, ts); +} + +extern +int __vdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz); +int __vdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz) +{ + return __cvdso_gettimeofday(tv, tz); +} + +extern +int __vdso_clock_getres(clockid_t clock_id, struct __kernel_timespec *res); +int __vdso_clock_getres(clockid_t clock_id, struct __kernel_timespec *res) +{ + return __cvdso_clock_getres(clock_id, res); +} diff --git a/arch/riscv/kernel/vmlinux.lds.S b/arch/riscv/kernel/vmlinux.lds.S index 0339b6bbe11a..34d00d9e6eac 100644 --- a/arch/riscv/kernel/vmlinux.lds.S +++ b/arch/riscv/kernel/vmlinux.lds.S @@ -27,13 +27,17 @@ SECTIONS __init_begin = .; INIT_TEXT_SECTION(PAGE_SIZE) - INIT_DATA_SECTION(16) . = ALIGN(8); __soc_early_init_table : { __soc_early_init_table_start = .; KEEP(*(__soc_early_init_table)) __soc_early_init_table_end = .; } + __soc_builtin_dtb_table : { + __soc_builtin_dtb_table_start = .; + KEEP(*(__soc_builtin_dtb_table)) + __soc_builtin_dtb_table_end = .; + } /* we have to discard exit text and such at runtime, not link time */ .exit.text : { @@ -62,6 +66,8 @@ SECTIONS _etext = .; } + INIT_DATA_SECTION(16) + /* Start of data section */ _sdata = .; RO_DATA(SECTION_ALIGN) |