diff options
author | Benjamin Gray <bgray@linux.ibm.com> | 2023-08-01 03:17:40 +0200 |
---|---|---|
committer | Michael Ellerman <mpe@ellerman.id.au> | 2023-08-16 15:54:50 +0200 |
commit | 1e60f3564bad09962646bf8c2af588ecf518d337 (patch) | |
tree | e85607e51ea81a9474f6f3e8e97e737c43c33191 /arch/powerpc | |
parent | powerpc/watchpoints: Don't track info persistently (diff) | |
download | linux-1e60f3564bad09962646bf8c2af588ecf518d337.tar.xz linux-1e60f3564bad09962646bf8c2af588ecf518d337.zip |
powerpc/watchpoints: Track perf single step directly on the breakpoint
There is a bug in the current watchpoint tracking logic, where the
teardown in arch_unregister_hw_breakpoint() uses bp->ctx->task, which it
does not have a reference of and parallel threads may be in the process
of destroying. This was partially addressed in commit fb822e6076d9
("powerpc/hw_breakpoint: Fix oops when destroying hw_breakpoint event"),
but the underlying issue of accessing a struct member in an unknown
state still remained. Syzkaller managed to trigger a null pointer
derefernce due to the race between the task destructor and checking the
pointer and dereferencing it in the loop.
While this null pointer dereference could be fixed by using READ_ONCE
to access the task up front, that just changes the error to manipulating
possbily freed memory.
Instead, the breakpoint logic needs to be reworked to remove any
dependency on a context or task struct during breakpoint removal.
The reason we have this currently is to clear thread.last_hit_ubp. This
member is used to differentiate the perf DAWR single-step sequence from
other causes of single-step, such as userspace just calling
ptrace(PTRACE_SINGLESTEP, ...). We need to differentiate them because,
when the single step interrupt is received, we need to know whether to
re-insert the DAWR breakpoint (perf) or not (ptrace / other).
arch_unregister_hw_breakpoint() needs to clear this information to
prevent dangling pointers to possibly freed memory. These pointers are
dereferenced in single_step_dabr_instruction() without a way to check
their validity.
This patch moves the tracking of this information to the breakpoint
itself. This means we no longer have to do anything special to clean up.
Signed-off-by: Benjamin Gray <bgray@linux.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://msgid.link/20230801011744.153973-4-bgray@linux.ibm.com
Diffstat (limited to 'arch/powerpc')
-rw-r--r-- | arch/powerpc/include/asm/hw_breakpoint.h | 1 | ||||
-rw-r--r-- | arch/powerpc/include/asm/processor.h | 5 | ||||
-rw-r--r-- | arch/powerpc/kernel/hw_breakpoint.c | 69 |
3 files changed, 23 insertions, 52 deletions
diff --git a/arch/powerpc/include/asm/hw_breakpoint.h b/arch/powerpc/include/asm/hw_breakpoint.h index 84d39fd42f71..66db0147d5b4 100644 --- a/arch/powerpc/include/asm/hw_breakpoint.h +++ b/arch/powerpc/include/asm/hw_breakpoint.h @@ -18,6 +18,7 @@ struct arch_hw_breakpoint { u16 len; /* length of the target data symbol */ u16 hw_len; /* length programmed in hw */ u8 flags; + bool perf_single_step; /* temporarily uninstalled for a perf single step */ }; /* Note: Don't change the first 6 bits below as they are in the same order diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h index 8a6754ffdc7e..9e67cb1c72e9 100644 --- a/arch/powerpc/include/asm/processor.h +++ b/arch/powerpc/include/asm/processor.h @@ -172,11 +172,6 @@ struct thread_struct { unsigned int align_ctl; /* alignment handling control */ #ifdef CONFIG_HAVE_HW_BREAKPOINT struct perf_event *ptrace_bps[HBP_NUM_MAX]; - /* - * Helps identify source of single-step exception and subsequent - * hw-breakpoint enablement - */ - struct perf_event *last_hit_ubp[HBP_NUM_MAX]; #endif /* CONFIG_HAVE_HW_BREAKPOINT */ struct arch_hw_breakpoint hw_brk[HBP_NUM_MAX]; /* hardware breakpoint info */ unsigned long trap_nr; /* last trap # on this thread */ diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c index e6749642604c..624375c18882 100644 --- a/arch/powerpc/kernel/hw_breakpoint.c +++ b/arch/powerpc/kernel/hw_breakpoint.c @@ -43,16 +43,6 @@ int hw_breakpoint_slots(int type) return 0; /* no instruction breakpoints available */ } -static bool single_step_pending(void) -{ - int i; - - for (i = 0; i < nr_wp_slots(); i++) { - if (current->thread.last_hit_ubp[i]) - return true; - } - return false; -} /* * Install a perf counter breakpoint. @@ -84,7 +74,7 @@ int arch_install_hw_breakpoint(struct perf_event *bp) * Do not install DABR values if the instruction must be single-stepped. * If so, DABR will be populated in single_step_dabr_instruction(). */ - if (!single_step_pending()) + if (!info->perf_single_step) __set_breakpoint(i, info); return 0; @@ -372,28 +362,6 @@ void arch_release_bp_slot(struct perf_event *bp) } /* - * Perform cleanup of arch-specific counters during unregistration - * of the perf-event - */ -void arch_unregister_hw_breakpoint(struct perf_event *bp) -{ - /* - * If the breakpoint is unregistered between a hw_breakpoint_handler() - * and the single_step_dabr_instruction(), then cleanup the breakpoint - * restoration variables to prevent dangling pointers. - * FIXME, this should not be using bp->ctx at all! Sayeth peterz. - */ - if (bp->ctx && bp->ctx->task && bp->ctx->task != ((void *)-1L)) { - int i; - - for (i = 0; i < nr_wp_slots(); i++) { - if (bp->ctx->task->thread.last_hit_ubp[i] == bp) - bp->ctx->task->thread.last_hit_ubp[i] = NULL; - } - } -} - -/* * Check for virtual address in kernel space. */ int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw) @@ -510,7 +478,9 @@ void thread_change_pc(struct task_struct *tsk, struct pt_regs *regs) int i; for (i = 0; i < nr_wp_slots(); i++) { - if (unlikely(tsk->thread.last_hit_ubp[i])) + struct perf_event *bp = __this_cpu_read(bp_per_reg[i]); + + if (unlikely(bp && counter_arch_bp(bp)->perf_single_step)) goto reset; } return; @@ -520,7 +490,7 @@ reset: for (i = 0; i < nr_wp_slots(); i++) { info = counter_arch_bp(__this_cpu_read(bp_per_reg[i])); __set_breakpoint(i, info); - tsk->thread.last_hit_ubp[i] = NULL; + info->perf_single_step = false; } } @@ -563,7 +533,8 @@ static bool stepping_handler(struct pt_regs *regs, struct perf_event **bp, for (i = 0; i < nr_wp_slots(); i++) { if (!hit[i]) continue; - current->thread.last_hit_ubp[i] = bp[i]; + + counter_arch_bp(bp[i])->perf_single_step = true; bp[i] = NULL; } regs_set_return_msr(regs, regs->msr | MSR_SE); @@ -770,24 +741,28 @@ NOKPROBE_SYMBOL(hw_breakpoint_handler); static int single_step_dabr_instruction(struct die_args *args) { struct pt_regs *regs = args->regs; - struct perf_event *bp = NULL; - struct arch_hw_breakpoint *info; - int i; bool found = false; /* * Check if we are single-stepping as a result of a * previous HW Breakpoint exception */ - for (i = 0; i < nr_wp_slots(); i++) { - bp = current->thread.last_hit_ubp[i]; + for (int i = 0; i < nr_wp_slots(); i++) { + struct perf_event *bp; + struct arch_hw_breakpoint *info; + + bp = __this_cpu_read(bp_per_reg[i]); if (!bp) continue; - found = true; info = counter_arch_bp(bp); + if (!info->perf_single_step) + continue; + + found = true; + /* * We shall invoke the user-defined callback function in the * single stepping handler to confirm to 'trigger-after-execute' @@ -795,19 +770,19 @@ static int single_step_dabr_instruction(struct die_args *args) */ if (!(info->type & HW_BRK_TYPE_EXTRANEOUS_IRQ)) perf_bp_event(bp, regs); - current->thread.last_hit_ubp[i] = NULL; + + info->perf_single_step = false; } if (!found) return NOTIFY_DONE; - for (i = 0; i < nr_wp_slots(); i++) { - bp = __this_cpu_read(bp_per_reg[i]); + for (int i = 0; i < nr_wp_slots(); i++) { + struct perf_event *bp = __this_cpu_read(bp_per_reg[i]); if (!bp) continue; - info = counter_arch_bp(bp); - __set_breakpoint(i, info); + __set_breakpoint(i, counter_arch_bp(bp)); } /* |