summaryrefslogtreecommitdiffstats
path: root/arch/x86/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-12-16 03:01:16 +0100
committerLinus Torvalds <torvalds@linux-foundation.org>2022-12-16 03:01:16 +0100
commitfe36bb8736ee9e38fa6173e1271ed8c5cf7bc907 (patch)
tree15948d02fe68c2b8c8d6de803006a82f226853de /arch/x86/mm
parentMerge tag '6.2-rc-smb3-client-fixes-part1' of git://git.samba.org/sfrench/cif... (diff)
parenttracing: Fix cpumask() example typo (diff)
downloadlinux-fe36bb8736ee9e38fa6173e1271ed8c5cf7bc907.tar.xz
linux-fe36bb8736ee9e38fa6173e1271ed8c5cf7bc907.zip
Merge tag 'trace-v6.2' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace
Pull tracing updates from Steven Rostedt: - Add options to the osnoise tracer: - 'panic_on_stop' option that panics the kernel if osnoise is greater than some user defined threshold. - 'preempt' option, to test noise while preemption is disabled - 'irq' option, to test noise when interrupts are disabled - Add .percent and .graph suffix to histograms to give different outputs - Add nohitcount to disable showing hitcount in histogram output - Add new __cpumask() to trace event fields to annotate that a unsigned long array is a cpumask to user space and should be treated as one. - Add trace_trigger kernel command line parameter to enable trace event triggers at boot up. Useful to trace stack traces, disable tracing and take snapshots. - Fix x86/kmmio mmio tracer to work with the updates to lockdep - Unify the panic and die notifiers - Add back ftrace_expect reference that is used to extract more information in the ftrace_bug() code. - Have trigger filter parsing errors show up in the tracing error log. - Updated MAINTAINERS file to add kernel tracing mailing list and patchwork info - Use IDA to keep track of event type numbers. - And minor fixes and clean ups * tag 'trace-v6.2' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace: (44 commits) tracing: Fix cpumask() example typo tracing: Improve panic/die notifiers ftrace: Prevent RCU stall on PREEMPT_VOLUNTARY kernels tracing: Do not synchronize freeing of trigger filter on boot up tracing: Remove pointer (asterisk) and brackets from cpumask_t field tracing: Have trigger filter parsing errors show up in error_log x86/mm/kmmio: Remove redundant preempt_disable() tracing: Fix infinite loop in tracing_read_pipe on overflowed print_trace_line Documentation/osnoise: Add osnoise/options documentation tracing/osnoise: Add preempt and/or irq disabled options tracing/osnoise: Add PANIC_ON_STOP option Documentation/osnoise: Escape underscore of NO_ prefix tracing: Fix some checker warnings tracing/osnoise: Make osnoise_options static tracing: remove unnecessary trace_trigger ifdef ring-buffer: Handle resize in early boot up tracing/hist: Fix issue of losting command info in error_log tracing: Fix issue of missing one synthetic field tracing/hist: Fix out-of-bound write on 'action_data.var_ref_idx' tracing/hist: Fix wrong return value in parse_action_params() ...
Diffstat (limited to 'arch/x86/mm')
-rw-r--r--arch/x86/mm/kmmio.c50
1 files changed, 30 insertions, 20 deletions
diff --git a/arch/x86/mm/kmmio.c b/arch/x86/mm/kmmio.c
index d3efbc5b3449..9f82019179e1 100644
--- a/arch/x86/mm/kmmio.c
+++ b/arch/x86/mm/kmmio.c
@@ -62,7 +62,13 @@ struct kmmio_context {
int active;
};
-static DEFINE_SPINLOCK(kmmio_lock);
+/*
+ * The kmmio_lock is taken in int3 context, which is treated as NMI context.
+ * This causes lockdep to complain about it bein in both NMI and normal
+ * context. Hide it from lockdep, as it should not have any other locks
+ * taken under it, and this is only enabled for debugging mmio anyway.
+ */
+static arch_spinlock_t kmmio_lock = __ARCH_SPIN_LOCK_UNLOCKED;
/* Protected by kmmio_lock */
unsigned int kmmio_count;
@@ -240,15 +246,14 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr)
page_base &= page_level_mask(l);
/*
- * Preemption is now disabled to prevent process switch during
- * single stepping. We can only handle one active kmmio trace
+ * Hold the RCU read lock over single stepping to avoid looking
+ * up the probe and kmmio_fault_page again. The rcu_read_lock_sched()
+ * also disables preemption and prevents process switch during
+ * the single stepping. We can only handle one active kmmio trace
* per cpu, so ensure that we finish it before something else
- * gets to run. We also hold the RCU read lock over single
- * stepping to avoid looking up the probe and kmmio_fault_page
- * again.
+ * gets to run.
*/
- preempt_disable();
- rcu_read_lock();
+ rcu_read_lock_sched_notrace();
faultpage = get_kmmio_fault_page(page_base);
if (!faultpage) {
@@ -317,8 +322,7 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr)
return 1; /* fault handled */
no_kmmio:
- rcu_read_unlock();
- preempt_enable_no_resched();
+ rcu_read_unlock_sched_notrace();
return ret;
}
@@ -346,10 +350,10 @@ static int post_kmmio_handler(unsigned long condition, struct pt_regs *regs)
ctx->probe->post_handler(ctx->probe, condition, regs);
/* Prevent racing against release_kmmio_fault_page(). */
- spin_lock(&kmmio_lock);
+ arch_spin_lock(&kmmio_lock);
if (ctx->fpage->count)
arm_kmmio_fault_page(ctx->fpage);
- spin_unlock(&kmmio_lock);
+ arch_spin_unlock(&kmmio_lock);
regs->flags &= ~X86_EFLAGS_TF;
regs->flags |= ctx->saved_flags;
@@ -357,8 +361,7 @@ static int post_kmmio_handler(unsigned long condition, struct pt_regs *regs)
/* These were acquired in kmmio_handler(). */
ctx->active--;
BUG_ON(ctx->active);
- rcu_read_unlock();
- preempt_enable_no_resched();
+ rcu_read_unlock_sched_notrace();
/*
* if somebody else is singlestepping across a probe point, flags
@@ -440,7 +443,8 @@ int register_kmmio_probe(struct kmmio_probe *p)
unsigned int l;
pte_t *pte;
- spin_lock_irqsave(&kmmio_lock, flags);
+ local_irq_save(flags);
+ arch_spin_lock(&kmmio_lock);
if (get_kmmio_probe(addr)) {
ret = -EEXIST;
goto out;
@@ -460,7 +464,9 @@ int register_kmmio_probe(struct kmmio_probe *p)
size += page_level_size(l);
}
out:
- spin_unlock_irqrestore(&kmmio_lock, flags);
+ arch_spin_unlock(&kmmio_lock);
+ local_irq_restore(flags);
+
/*
* XXX: What should I do here?
* Here was a call to global_flush_tlb(), but it does not exist
@@ -494,7 +500,8 @@ static void remove_kmmio_fault_pages(struct rcu_head *head)
struct kmmio_fault_page **prevp = &dr->release_list;
unsigned long flags;
- spin_lock_irqsave(&kmmio_lock, flags);
+ local_irq_save(flags);
+ arch_spin_lock(&kmmio_lock);
while (f) {
if (!f->count) {
list_del_rcu(&f->list);
@@ -506,7 +513,8 @@ static void remove_kmmio_fault_pages(struct rcu_head *head)
}
f = *prevp;
}
- spin_unlock_irqrestore(&kmmio_lock, flags);
+ arch_spin_unlock(&kmmio_lock);
+ local_irq_restore(flags);
/* This is the real RCU destroy call. */
call_rcu(&dr->rcu, rcu_free_kmmio_fault_pages);
@@ -540,14 +548,16 @@ void unregister_kmmio_probe(struct kmmio_probe *p)
if (!pte)
return;
- spin_lock_irqsave(&kmmio_lock, flags);
+ local_irq_save(flags);
+ arch_spin_lock(&kmmio_lock);
while (size < size_lim) {
release_kmmio_fault_page(addr + size, &release_list);
size += page_level_size(l);
}
list_del_rcu(&p->list);
kmmio_count--;
- spin_unlock_irqrestore(&kmmio_lock, flags);
+ arch_spin_unlock(&kmmio_lock);
+ local_irq_restore(flags);
if (!release_list)
return;