summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2023-03-22 11:28:36 +0100
committerPeter Zijlstra <peterz@infradead.org>2023-03-24 11:01:29 +0100
commit68e2d17c9eb311ab59aeb6d0c38aad8985fa2596 (patch)
tree4ad1d55f8685fc2cbb6f76bffc0c35084e89d201 /kernel
parentsched, smp: Trace smp callback causing an IPI (diff)
downloadlinux-68e2d17c9eb311ab59aeb6d0c38aad8985fa2596.tar.xz
linux-68e2d17c9eb311ab59aeb6d0c38aad8985fa2596.zip
trace: Add trace_ipi_send_cpu()
Because copying cpumasks around when targeting a single CPU is a bit daft... Tested-and-reviewed-by: Valentin Schneider <vschneid@redhat.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lkml.kernel.org/r/20230322103004.GA571242%40hirez.programming.kicks-ass.net
Diffstat (limited to 'kernel')
-rw-r--r--kernel/irq_work.c6
-rw-r--r--kernel/sched/core.c1
-rw-r--r--kernel/smp.c4
3 files changed, 5 insertions, 6 deletions
diff --git a/kernel/irq_work.c b/kernel/irq_work.c
index c33e88e32a67..2f4fb336dda1 100644
--- a/kernel/irq_work.c
+++ b/kernel/irq_work.c
@@ -78,10 +78,8 @@ void __weak arch_irq_work_raise(void)
static __always_inline void irq_work_raise(struct irq_work *work)
{
- if (trace_ipi_send_cpumask_enabled() && arch_irq_work_has_interrupt())
- trace_ipi_send_cpumask(cpumask_of(smp_processor_id()),
- _RET_IP_,
- work->func);
+ if (trace_ipi_send_cpu_enabled() && arch_irq_work_has_interrupt())
+ trace_ipi_send_cpu(smp_processor_id(), _RET_IP_, work->func);
arch_irq_work_raise();
}
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index b0a48cfc0a22..ad40755ddc11 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -96,6 +96,7 @@
#include "../../io_uring/io-wq.h"
#include "../smpboot.h"
+EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_send_cpu);
EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_send_cpumask);
/*
diff --git a/kernel/smp.c b/kernel/smp.c
index 37e9613a0889..43f0796ecdb2 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -107,7 +107,7 @@ static __always_inline void
send_call_function_single_ipi(int cpu, smp_call_func_t func)
{
if (call_function_single_prep_ipi(cpu)) {
- trace_ipi_send_cpumask(cpumask_of(cpu), _RET_IP_, func);
+ trace_ipi_send_cpu(cpu, _RET_IP_, func);
arch_send_call_function_single_ipi(cpu);
}
}
@@ -346,7 +346,7 @@ void __smp_call_single_queue(int cpu, struct llist_node *node)
* even if we haven't sent the smp_call IPI yet (e.g. the stopper
* executes migration_cpu_stop() on the remote CPU).
*/
- if (trace_ipi_send_cpumask_enabled()) {
+ if (trace_ipi_send_cpu_enabled()) {
call_single_data_t *csd;
smp_call_func_t func;