diff options
author | Vincent Chen <vincent.chen@sifive.com> | 2020-04-16 04:38:08 +0200 |
---|---|---|
committer | Palmer Dabbelt <palmerdabbelt@google.com> | 2020-05-18 20:38:12 +0200 |
commit | edde5584c7ab5d18b87f092fe6fe8a72590e7100 (patch) | |
tree | 62900d9ef5b9aae651ed7a5132d178339bac5d6f /arch/riscv/kernel | |
parent | riscv: Use the XML target descriptions to report 3 system registers (diff) | |
download | linux-edde5584c7ab5d18b87f092fe6fe8a72590e7100.tar.xz linux-edde5584c7ab5d18b87f092fe6fe8a72590e7100.zip |
riscv: Add SW single-step support for KDB
In KGDB, the GDB in the host is responsible for the single-step operation
of the software. In other words, KGDB does not need to derive the next pc
address when performing a software single-step operation. KGDB just inserts
the break instruction at the indicated address according to the GDB
instructions. This approach does not work in KDB because the GDB does not
involve the KDB process. Therefore, this patch provides KDB a software
single-step mechanism to use.
Signed-off-by: Vincent Chen <vincent.chen@sifive.com>
Signed-off-by: Palmer Dabbelt <palmerdabbelt@google.com>
Diffstat (limited to 'arch/riscv/kernel')
-rw-r--r-- | arch/riscv/kernel/kgdb.c | 179 |
1 files changed, 177 insertions, 2 deletions
diff --git a/arch/riscv/kernel/kgdb.c b/arch/riscv/kernel/kgdb.c index eb1afab47679..f16ade84a11f 100644 --- a/arch/riscv/kernel/kgdb.c +++ b/arch/riscv/kernel/kgdb.c @@ -11,13 +11,179 @@ #include <linux/string.h> #include <asm/cacheflush.h> #include <asm/gdb_xml.h> +#include <asm/parse_asm.h> enum { NOT_KGDB_BREAK = 0, KGDB_SW_BREAK, KGDB_COMPILED_BREAK, + KGDB_SW_SINGLE_STEP }; +static unsigned long stepped_address; +static unsigned int stepped_opcode; + +#if __riscv_xlen == 32 +/* C.JAL is an RV32C-only instruction */ +DECLARE_INSN(c_jal, MATCH_C_JAL, MASK_C_JAL) +#else +#define is_c_jal_insn(opcode) 0 +#endif +DECLARE_INSN(jalr, MATCH_JALR, MASK_JALR) +DECLARE_INSN(jal, MATCH_JAL, MASK_JAL) +DECLARE_INSN(c_jr, MATCH_C_JR, MASK_C_JR) +DECLARE_INSN(c_jalr, MATCH_C_JALR, MASK_C_JALR) +DECLARE_INSN(c_j, MATCH_C_J, MASK_C_J) +DECLARE_INSN(beq, MATCH_BEQ, MASK_BEQ) +DECLARE_INSN(bne, MATCH_BNE, MASK_BNE) +DECLARE_INSN(blt, MATCH_BLT, MASK_BLT) +DECLARE_INSN(bge, MATCH_BGE, MASK_BGE) +DECLARE_INSN(bltu, MATCH_BLTU, MASK_BLTU) +DECLARE_INSN(bgeu, MATCH_BGEU, MASK_BGEU) +DECLARE_INSN(c_beqz, MATCH_C_BEQZ, MASK_C_BEQZ) +DECLARE_INSN(c_bnez, MATCH_C_BNEZ, MASK_C_BNEZ) +DECLARE_INSN(sret, MATCH_SRET, MASK_SRET) + +int decode_register_index(unsigned long opcode, int offset) +{ + return (opcode >> offset) & 0x1F; +} + +int decode_register_index_short(unsigned long opcode, int offset) +{ + return ((opcode >> offset) & 0x7) + 8; +} + +/* Calculate the new address for after a step */ +int get_step_address(struct pt_regs *regs, unsigned long *next_addr) +{ + unsigned long pc = regs->epc; + unsigned long *regs_ptr = (unsigned long *)regs; + unsigned int rs1_num, rs2_num; + int op_code; + + if (probe_kernel_address((void *)pc, op_code)) + return -EINVAL; + if ((op_code & __INSN_LENGTH_MASK) != __INSN_LENGTH_GE_32) { + if (is_c_jalr_insn(op_code) || is_c_jr_insn(op_code)) { + rs1_num = decode_register_index(op_code, RVC_C2_RS1_OPOFF); + *next_addr = regs_ptr[rs1_num]; + } else if (is_c_j_insn(op_code) || is_c_jal_insn(op_code)) { + *next_addr = EXTRACT_RVC_J_IMM(op_code) + pc; + } else if (is_c_beqz_insn(op_code)) { + rs1_num = decode_register_index_short(op_code, + RVC_C1_RS1_OPOFF); + if (!rs1_num || regs_ptr[rs1_num] == 0) + *next_addr = EXTRACT_RVC_B_IMM(op_code) + pc; + else + *next_addr = pc + 2; + } else if (is_c_bnez_insn(op_code)) { + rs1_num = + decode_register_index_short(op_code, RVC_C1_RS1_OPOFF); + if (rs1_num && regs_ptr[rs1_num] != 0) + *next_addr = EXTRACT_RVC_B_IMM(op_code) + pc; + else + *next_addr = pc + 2; + } else { + *next_addr = pc + 2; + } + } else { + if ((op_code & __INSN_OPCODE_MASK) == __INSN_BRANCH_OPCODE) { + bool result = false; + long imm = EXTRACT_BTYPE_IMM(op_code); + unsigned long rs1_val = 0, rs2_val = 0; + + rs1_num = decode_register_index(op_code, RVG_RS1_OPOFF); + rs2_num = decode_register_index(op_code, RVG_RS2_OPOFF); + if (rs1_num) + rs1_val = regs_ptr[rs1_num]; + if (rs2_num) + rs2_val = regs_ptr[rs2_num]; + + if (is_beq_insn(op_code)) + result = (rs1_val == rs2_val) ? true : false; + else if (is_bne_insn(op_code)) + result = (rs1_val != rs2_val) ? true : false; + else if (is_blt_insn(op_code)) + result = + ((long)rs1_val < + (long)rs2_val) ? true : false; + else if (is_bge_insn(op_code)) + result = + ((long)rs1_val >= + (long)rs2_val) ? true : false; + else if (is_bltu_insn(op_code)) + result = (rs1_val < rs2_val) ? true : false; + else if (is_bgeu_insn(op_code)) + result = (rs1_val >= rs2_val) ? true : false; + if (result) + *next_addr = imm + pc; + else + *next_addr = pc + 4; + } else if (is_jal_insn(op_code)) { + *next_addr = EXTRACT_JTYPE_IMM(op_code) + pc; + } else if (is_jalr_insn(op_code)) { + rs1_num = decode_register_index(op_code, RVG_RS1_OPOFF); + if (rs1_num) + *next_addr = ((unsigned long *)regs)[rs1_num]; + *next_addr += EXTRACT_ITYPE_IMM(op_code); + } else if (is_sret_insn(op_code)) { + *next_addr = pc; + } else { + *next_addr = pc + 4; + } + } + return 0; +} + +int do_single_step(struct pt_regs *regs) +{ + /* Determine where the target instruction will send us to */ + unsigned long addr = 0; + int error = get_step_address(regs, &addr); + + if (error) + return error; + + /* Store the op code in the stepped address */ + error = probe_kernel_address((void *)addr, stepped_opcode); + if (error) + return error; + + stepped_address = addr; + + /* Replace the op code with the break instruction */ + error = probe_kernel_write((void *)stepped_address, + arch_kgdb_ops.gdb_bpt_instr, + BREAK_INSTR_SIZE); + /* Flush and return */ + if (!error) { + flush_icache_range(addr, addr + BREAK_INSTR_SIZE); + kgdb_single_step = 1; + atomic_set(&kgdb_cpu_doing_single_step, + raw_smp_processor_id()); + } else { + stepped_address = 0; + stepped_opcode = 0; + } + return error; +} + +/* Undo a single step */ +static void undo_single_step(struct pt_regs *regs) +{ + if (stepped_opcode != 0) { + probe_kernel_write((void *)stepped_address, + (void *)&stepped_opcode, BREAK_INSTR_SIZE); + flush_icache_range(stepped_address, + stepped_address + BREAK_INSTR_SIZE); + } + stepped_address = 0; + stepped_opcode = 0; + kgdb_single_step = 0; + atomic_set(&kgdb_cpu_doing_single_step, -1); +} + struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = { {DBG_REG_ZERO, GDB_SIZEOF_REG, -1}, {DBG_REG_RA, GDB_SIZEOF_REG, offsetof(struct pt_regs, ra)}, @@ -135,6 +301,8 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code, { int err = 0; + undo_single_step(regs); + switch (remcom_in_buffer[0]) { case 'c': case 'D': @@ -142,15 +310,20 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code, if (remcom_in_buffer[0] == 'c') kgdb_arch_update_addr(regs, remcom_in_buffer); break; + case 's': + kgdb_arch_update_addr(regs, remcom_in_buffer); + err = do_single_step(regs); + break; default: err = -1; } - return err; } int kgdb_riscv_kgdbbreak(unsigned long addr) { + if (stepped_address == addr) + return KGDB_SW_SINGLE_STEP; if (atomic_read(&kgdb_setting_breakpoint)) if (addr == (unsigned long)&kgdb_compiled_break) return KGDB_COMPILED_BREAK; @@ -174,7 +347,9 @@ static int kgdb_riscv_notify(struct notifier_block *self, unsigned long cmd, return NOTIFY_DONE; local_irq_save(flags); - if (kgdb_handle_exception(1, args->signr, cmd, regs)) + + if (kgdb_handle_exception(type == KGDB_SW_SINGLE_STEP ? 0 : 1, + args->signr, cmd, regs)) return NOTIFY_DONE; if (type == KGDB_COMPILED_BREAK) |