From 51a021244b9d579be6b4f8c15c493a76deb2a79e Mon Sep 17 00:00:00 2001 From: Vineet Gupta Date: Fri, 7 Oct 2016 17:02:10 -0700 Subject: atomic64: no need for CONFIG_ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE This came to light when implementing native 64-bit atomics for ARCv2. The atomic64 self-test code uses CONFIG_ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE to check whether atomic64_dec_if_positive() is available. It seems it was needed when not every arch defined it. However as of current code the Kconfig option seems needless - for CONFIG_GENERIC_ATOMIC64 it is auto-enabled in lib/Kconfig and a generic definition of API is present lib/atomic64.c - arches with native 64-bit atomics select it in arch/*/Kconfig and define the API in their headers So I see no point in keeping the Kconfig option Compile tested for: - blackfin (CONFIG_GENERIC_ATOMIC64) - x86 (!CONFIG_GENERIC_ATOMIC64) - ia64 Link: http://lkml.kernel.org/r/1473703083-8625-3-git-send-email-vgupta@synopsys.com Signed-off-by: Vineet Gupta Cc: Richard Henderson Cc: Ivan Kokshaysky Cc: Matt Turner Cc: Russell King Cc: Catalin Marinas Cc: Will Deacon Cc: Ralf Baechle Cc: "James E.J. Bottomley" Cc: Helge Deller Cc: Benjamin Herrenschmidt Cc: Paul Mackerras Cc: Michael Ellerman Cc: Martin Schwidefsky Cc: Heiko Carstens Cc: "David S. Miller" Cc: Chris Metcalf Cc: Thomas Gleixner Cc: Ingo Molnar Cc: "H. Peter Anvin" Cc: Vineet Gupta Cc: Zhaoxiu Zeng Cc: Linus Walleij Cc: Alexander Potapenko Cc: Andrey Ryabinin Cc: Herbert Xu Cc: Ming Lin Cc: Arnd Bergmann Cc: Geert Uytterhoeven Cc: Peter Zijlstra Cc: Borislav Petkov Cc: Andi Kleen Cc: Boqun Feng Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arm/Kconfig | 1 - 1 file changed, 1 deletion(-) (limited to 'arch/arm') diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 3cd9042fbb62..c297bc5e341c 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -2,7 +2,6 @@ config ARM bool default y select ARCH_CLOCKSOURCE_DATA - select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE select ARCH_HAS_DEVMEM_IS_ALLOWED select ARCH_HAS_ELF_RANDOMIZE select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST -- cgit v1.2.3 From 9a01c3ed5cdb35d9004eb92510ee6ea11b4a5f16 Mon Sep 17 00:00:00 2001 From: Chris Metcalf Date: Fri, 7 Oct 2016 17:02:45 -0700 Subject: nmi_backtrace: add more trigger_*_cpu_backtrace() methods Patch series "improvements to the nmi_backtrace code" v9. This patch series modifies the trigger_xxx_backtrace() NMI-based remote backtracing code to make it more flexible, and makes a few small improvements along the way. The motivation comes from the task isolation code, where there are scenarios where we want to be able to diagnose a case where some cpu is about to interrupt a task-isolated cpu. It can be helpful to see both where the interrupting cpu is, and also an approximation of where the cpu that is being interrupted is. The nmi_backtrace framework allows us to discover the stack of the interrupted cpu. I've tested that the change works as desired on tile, and build-tested x86, arm, mips, and sparc64. For x86 I confirmed that the generic cpuidle stuff as well as the architecture-specific routines are in the new cpuidle section. For arm, mips, and sparc I just build-tested it and made sure the generic cpuidle routines were in the new cpuidle section, but I didn't attempt to figure out which the platform-specific idle routines might be. That might be more usefully done by someone with platform experience in follow-up patches. This patch (of 4): Currently you can only request a backtrace of either all cpus, or all cpus but yourself. It can also be helpful to request a remote backtrace of a single cpu, and since we want that, the logical extension is to support a cpumask as the underlying primitive. This change modifies the existing lib/nmi_backtrace.c code to take a cpumask as its basic primitive, and modifies the linux/nmi.h code to use the new "cpumask" method instead. The existing clients of nmi_backtrace (arm and x86) are converted to using the new cpumask approach in this change. The other users of the backtracing API (sparc64 and mips) are converted to use the cpumask approach rather than the all/allbutself approach. The mips code ignored the "include_self" boolean but with this change it will now also dump a local backtrace if requested. Link: http://lkml.kernel.org/r/1472487169-14923-2-git-send-email-cmetcalf@mellanox.com Signed-off-by: Chris Metcalf Tested-by: Daniel Thompson [arm] Reviewed-by: Aaron Tomlin Reviewed-by: Petr Mladek Cc: "Rafael J. Wysocki" Cc: Russell King Cc: Thomas Gleixner Cc: Ingo Molnar Cc: Ralf Baechle Cc: David Miller Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arm/include/asm/irq.h | 5 +++-- arch/arm/kernel/smp.c | 4 ++-- arch/mips/include/asm/irq.h | 5 +++-- arch/mips/kernel/process.c | 11 +++++++++-- arch/sparc/include/asm/irq_64.h | 5 +++-- arch/sparc/kernel/process_64.c | 10 +++++----- arch/x86/include/asm/irq.h | 5 +++-- arch/x86/kernel/apic/hw_nmi.c | 18 +++++++++--------- include/linux/nmi.h | 31 ++++++++++++++++++++++++++----- lib/nmi_backtrace.c | 17 +++++++++-------- 10 files changed, 72 insertions(+), 39 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/include/asm/irq.h b/arch/arm/include/asm/irq.h index 1bd9510de1b9..e53638c8ed8a 100644 --- a/arch/arm/include/asm/irq.h +++ b/arch/arm/include/asm/irq.h @@ -36,8 +36,9 @@ extern void set_handle_irq(void (*handle_irq)(struct pt_regs *)); #endif #ifdef CONFIG_SMP -extern void arch_trigger_all_cpu_backtrace(bool); -#define arch_trigger_all_cpu_backtrace(x) arch_trigger_all_cpu_backtrace(x) +extern void arch_trigger_cpumask_backtrace(const cpumask_t *mask, + bool exclude_self); +#define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace #endif static inline int nr_legacy_irqs(void) diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 937c8920d741..5abc5697e4e5 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -760,7 +760,7 @@ static void raise_nmi(cpumask_t *mask) smp_cross_call(mask, IPI_CPU_BACKTRACE); } -void arch_trigger_all_cpu_backtrace(bool include_self) +void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self) { - nmi_trigger_all_cpu_backtrace(include_self, raise_nmi); + nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_nmi); } diff --git a/arch/mips/include/asm/irq.h b/arch/mips/include/asm/irq.h index 15e0fecbc300..6bf10e796553 100644 --- a/arch/mips/include/asm/irq.h +++ b/arch/mips/include/asm/irq.h @@ -51,7 +51,8 @@ extern int cp0_fdc_irq; extern int get_c0_fdc_int(void); -void arch_trigger_all_cpu_backtrace(bool); -#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace +void arch_trigger_cpumask_backtrace(const struct cpumask *mask, + bool exclude_self); +#define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace #endif /* _ASM_IRQ_H */ diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index d2d061520a23..9514e5f2209f 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c @@ -569,9 +569,16 @@ static void arch_dump_stack(void *info) dump_stack(); } -void arch_trigger_all_cpu_backtrace(bool include_self) +void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self) { - smp_call_function(arch_dump_stack, NULL, 1); + long this_cpu = get_cpu(); + + if (cpumask_test_cpu(this_cpu, mask) && !exclude_self) + dump_stack(); + + smp_call_function_many(mask, arch_dump_stack, NULL, 1); + + put_cpu(); } int mips_get_process_fp_mode(struct task_struct *task) diff --git a/arch/sparc/include/asm/irq_64.h b/arch/sparc/include/asm/irq_64.h index 3f70f900e834..1d51a11fb261 100644 --- a/arch/sparc/include/asm/irq_64.h +++ b/arch/sparc/include/asm/irq_64.h @@ -86,8 +86,9 @@ static inline unsigned long get_softint(void) return retval; } -void arch_trigger_all_cpu_backtrace(bool); -#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace +void arch_trigger_cpumask_backtrace(const struct cpumask *mask, + bool exclude_self); +#define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace extern void *hardirq_stack[NR_CPUS]; extern void *softirq_stack[NR_CPUS]; diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c index fa14402b33f9..47ff5588e521 100644 --- a/arch/sparc/kernel/process_64.c +++ b/arch/sparc/kernel/process_64.c @@ -239,7 +239,7 @@ static void __global_reg_poll(struct global_reg_snapshot *gp) } } -void arch_trigger_all_cpu_backtrace(bool include_self) +void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self) { struct thread_info *tp = current_thread_info(); struct pt_regs *regs = get_irq_regs(); @@ -255,15 +255,15 @@ void arch_trigger_all_cpu_backtrace(bool include_self) memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot)); - if (include_self) + if (cpumask_test_cpu(this_cpu, mask) && !exclude_self) __global_reg_self(tp, regs, this_cpu); smp_fetch_global_regs(); - for_each_online_cpu(cpu) { + for_each_cpu(cpu, mask) { struct global_reg_snapshot *gp; - if (!include_self && cpu == this_cpu) + if (exclude_self && cpu == this_cpu) continue; gp = &global_cpu_snapshot[cpu].reg; @@ -300,7 +300,7 @@ void arch_trigger_all_cpu_backtrace(bool include_self) static void sysrq_handle_globreg(int key) { - arch_trigger_all_cpu_backtrace(true); + trigger_all_cpu_backtrace(); } static struct sysrq_key_op sparc_globalreg_op = { diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h index e7de5c9a4fbd..16d3fa211962 100644 --- a/arch/x86/include/asm/irq.h +++ b/arch/x86/include/asm/irq.h @@ -50,8 +50,9 @@ extern int vector_used_by_percpu_irq(unsigned int vector); extern void init_ISA_irqs(void); #ifdef CONFIG_X86_LOCAL_APIC -void arch_trigger_all_cpu_backtrace(bool); -#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace +void arch_trigger_cpumask_backtrace(const struct cpumask *mask, + bool exclude_self); +#define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace #endif #endif /* _ASM_X86_IRQ_H */ diff --git a/arch/x86/kernel/apic/hw_nmi.c b/arch/x86/kernel/apic/hw_nmi.c index f29501e1a5c1..c73c9fb281e1 100644 --- a/arch/x86/kernel/apic/hw_nmi.c +++ b/arch/x86/kernel/apic/hw_nmi.c @@ -26,32 +26,32 @@ u64 hw_nmi_get_sample_period(int watchdog_thresh) } #endif -#ifdef arch_trigger_all_cpu_backtrace +#ifdef arch_trigger_cpumask_backtrace static void nmi_raise_cpu_backtrace(cpumask_t *mask) { apic->send_IPI_mask(mask, NMI_VECTOR); } -void arch_trigger_all_cpu_backtrace(bool include_self) +void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self) { - nmi_trigger_all_cpu_backtrace(include_self, nmi_raise_cpu_backtrace); + nmi_trigger_cpumask_backtrace(mask, exclude_self, + nmi_raise_cpu_backtrace); } -static int -arch_trigger_all_cpu_backtrace_handler(unsigned int cmd, struct pt_regs *regs) +static int nmi_cpu_backtrace_handler(unsigned int cmd, struct pt_regs *regs) { if (nmi_cpu_backtrace(regs)) return NMI_HANDLED; return NMI_DONE; } -NOKPROBE_SYMBOL(arch_trigger_all_cpu_backtrace_handler); +NOKPROBE_SYMBOL(nmi_cpu_backtrace_handler); -static int __init register_trigger_all_cpu_backtrace(void) +static int __init register_nmi_cpu_backtrace_handler(void) { - register_nmi_handler(NMI_LOCAL, arch_trigger_all_cpu_backtrace_handler, + register_nmi_handler(NMI_LOCAL, nmi_cpu_backtrace_handler, 0, "arch_bt"); return 0; } -early_initcall(register_trigger_all_cpu_backtrace); +early_initcall(register_nmi_cpu_backtrace_handler); #endif diff --git a/include/linux/nmi.h b/include/linux/nmi.h index 4630eeae18e0..a78c35cff1ae 100644 --- a/include/linux/nmi.h +++ b/include/linux/nmi.h @@ -35,21 +35,34 @@ static inline void hardlockup_detector_disable(void) {} * base function. Return whether such support was available, * to allow calling code to fall back to some other mechanism: */ -#ifdef arch_trigger_all_cpu_backtrace +#ifdef arch_trigger_cpumask_backtrace static inline bool trigger_all_cpu_backtrace(void) { - arch_trigger_all_cpu_backtrace(true); - + arch_trigger_cpumask_backtrace(cpu_online_mask, false); return true; } + static inline bool trigger_allbutself_cpu_backtrace(void) { - arch_trigger_all_cpu_backtrace(false); + arch_trigger_cpumask_backtrace(cpu_online_mask, true); + return true; +} + +static inline bool trigger_cpumask_backtrace(struct cpumask *mask) +{ + arch_trigger_cpumask_backtrace(mask, false); + return true; +} + +static inline bool trigger_single_cpu_backtrace(int cpu) +{ + arch_trigger_cpumask_backtrace(cpumask_of(cpu), false); return true; } /* generic implementation */ -void nmi_trigger_all_cpu_backtrace(bool include_self, +void nmi_trigger_cpumask_backtrace(const cpumask_t *mask, + bool exclude_self, void (*raise)(cpumask_t *mask)); bool nmi_cpu_backtrace(struct pt_regs *regs); @@ -62,6 +75,14 @@ static inline bool trigger_allbutself_cpu_backtrace(void) { return false; } +static inline bool trigger_cpumask_backtrace(struct cpumask *mask) +{ + return false; +} +static inline bool trigger_single_cpu_backtrace(int cpu) +{ + return false; +} #endif #ifdef CONFIG_LOCKUP_DETECTOR diff --git a/lib/nmi_backtrace.c b/lib/nmi_backtrace.c index 26caf51cc238..df347e355267 100644 --- a/lib/nmi_backtrace.c +++ b/lib/nmi_backtrace.c @@ -17,20 +17,21 @@ #include #include -#ifdef arch_trigger_all_cpu_backtrace +#ifdef arch_trigger_cpumask_backtrace /* For reliability, we're prepared to waste bits here. */ static DECLARE_BITMAP(backtrace_mask, NR_CPUS) __read_mostly; -/* "in progress" flag of arch_trigger_all_cpu_backtrace */ +/* "in progress" flag of arch_trigger_cpumask_backtrace */ static unsigned long backtrace_flag; /* - * When raise() is called it will be is passed a pointer to the + * When raise() is called it will be passed a pointer to the * backtrace_mask. Architectures that call nmi_cpu_backtrace() * directly from their raise() functions may rely on the mask * they are passed being updated as a side effect of this call. */ -void nmi_trigger_all_cpu_backtrace(bool include_self, +void nmi_trigger_cpumask_backtrace(const cpumask_t *mask, + bool exclude_self, void (*raise)(cpumask_t *mask)) { int i, this_cpu = get_cpu(); @@ -44,13 +45,13 @@ void nmi_trigger_all_cpu_backtrace(bool include_self, return; } - cpumask_copy(to_cpumask(backtrace_mask), cpu_online_mask); - if (!include_self) + cpumask_copy(to_cpumask(backtrace_mask), mask); + if (exclude_self) cpumask_clear_cpu(this_cpu, to_cpumask(backtrace_mask)); if (!cpumask_empty(to_cpumask(backtrace_mask))) { - pr_info("Sending NMI to %s CPUs:\n", - (include_self ? "all" : "other")); + pr_info("Sending NMI from CPU %d to CPUs %*pbl:\n", + this_cpu, nr_cpumask_bits, to_cpumask(backtrace_mask)); raise(to_cpumask(backtrace_mask)); } -- cgit v1.2.3 From 677664895278267a80bda0e3b26821d60cdbebf5 Mon Sep 17 00:00:00 2001 From: Chris Metcalf Date: Fri, 7 Oct 2016 17:02:49 -0700 Subject: nmi_backtrace: do a local dump_stack() instead of a self-NMI Currently on arm there is code that checks whether it should call dump_stack() explicitly, to avoid trying to raise an NMI when the current context is not preemptible by the backtrace IPI. Similarly, the forthcoming arch/tile support uses an IPI mechanism that does not support generating an NMI to self. Accordingly, move the code that guards this case into the generic mechanism, and invoke it unconditionally whenever we want a backtrace of the current cpu. It seems plausible that in all cases, dump_stack() will generate better information than generating a stack from the NMI handler. The register state will be missing, but that state is likely not particularly helpful in any case. Or, if we think it is helpful, we should be capturing and emitting the current register state in all cases when regs == NULL is passed to nmi_cpu_backtrace(). Link: http://lkml.kernel.org/r/1472487169-14923-3-git-send-email-cmetcalf@mellanox.com Signed-off-by: Chris Metcalf Tested-by: Daniel Thompson [arm] Reviewed-by: Petr Mladek Acked-by: Aaron Tomlin Cc: "Rafael J. Wysocki" Cc: Russell King Cc: Thomas Gleixner Cc: Ingo Molnar Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arm/kernel/smp.c | 9 --------- lib/nmi_backtrace.c | 9 +++++++++ 2 files changed, 9 insertions(+), 9 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 5abc5697e4e5..7dd14e8395e6 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -748,15 +748,6 @@ core_initcall(register_cpufreq_notifier); static void raise_nmi(cpumask_t *mask) { - /* - * Generate the backtrace directly if we are running in a calling - * context that is not preemptible by the backtrace IPI. Note - * that nmi_cpu_backtrace() automatically removes the current cpu - * from mask. - */ - if (cpumask_test_cpu(smp_processor_id(), mask) && irqs_disabled()) - nmi_cpu_backtrace(NULL); - smp_cross_call(mask, IPI_CPU_BACKTRACE); } diff --git a/lib/nmi_backtrace.c b/lib/nmi_backtrace.c index df347e355267..393a3cca1f47 100644 --- a/lib/nmi_backtrace.c +++ b/lib/nmi_backtrace.c @@ -49,6 +49,15 @@ void nmi_trigger_cpumask_backtrace(const cpumask_t *mask, if (exclude_self) cpumask_clear_cpu(this_cpu, to_cpumask(backtrace_mask)); + /* + * Don't try to send an NMI to this cpu; it may work on some + * architectures, but on others it may not, and we'll get + * information at least as useful just by doing a dump_stack() here. + * Note that nmi_cpu_backtrace(NULL) will clear the cpu bit. + */ + if (cpumask_test_cpu(this_cpu, to_cpumask(backtrace_mask))) + nmi_cpu_backtrace(NULL); + if (!cpumask_empty(to_cpumask(backtrace_mask))) { pr_info("Sending NMI from CPU %d to CPUs %*pbl:\n", this_cpu, nr_cpumask_bits, to_cpumask(backtrace_mask)); -- cgit v1.2.3 From 6727ad9e206cc08b80d8000a4d67f8417e53539d Mon Sep 17 00:00:00 2001 From: Chris Metcalf Date: Fri, 7 Oct 2016 17:02:55 -0700 Subject: nmi_backtrace: generate one-line reports for idle cpus When doing an nmi backtrace of many cores, most of which are idle, the output is a little overwhelming and very uninformative. Suppress messages for cpus that are idling when they are interrupted and just emit one line, "NMI backtrace for N skipped: idling at pc 0xNNN". We do this by grouping all the cpuidle code together into a new .cpuidle.text section, and then checking the address of the interrupted PC to see if it lies within that section. This commit suitably tags x86 and tile idle routines, and only adds in the minimal framework for other architectures. Link: http://lkml.kernel.org/r/1472487169-14923-5-git-send-email-cmetcalf@mellanox.com Signed-off-by: Chris Metcalf Acked-by: Peter Zijlstra (Intel) Tested-by: Peter Zijlstra (Intel) Tested-by: Daniel Thompson [arm] Tested-by: Petr Mladek Cc: Aaron Tomlin Cc: Peter Zijlstra (Intel) Cc: "Rafael J. Wysocki" Cc: Russell King Cc: Thomas Gleixner Cc: Ingo Molnar Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/alpha/kernel/vmlinux.lds.S | 1 + arch/arc/kernel/vmlinux.lds.S | 1 + arch/arm/kernel/vmlinux-xip.lds.S | 1 + arch/arm/kernel/vmlinux.lds.S | 1 + arch/arm64/kernel/vmlinux.lds.S | 1 + arch/avr32/kernel/vmlinux.lds.S | 1 + arch/blackfin/kernel/vmlinux.lds.S | 1 + arch/c6x/kernel/vmlinux.lds.S | 1 + arch/cris/kernel/vmlinux.lds.S | 1 + arch/frv/kernel/vmlinux.lds.S | 1 + arch/h8300/kernel/vmlinux.lds.S | 1 + arch/hexagon/kernel/vmlinux.lds.S | 1 + arch/ia64/kernel/vmlinux.lds.S | 1 + arch/m32r/kernel/vmlinux.lds.S | 1 + arch/m68k/kernel/vmlinux-nommu.lds | 1 + arch/m68k/kernel/vmlinux-std.lds | 1 + arch/m68k/kernel/vmlinux-sun3.lds | 1 + arch/metag/kernel/vmlinux.lds.S | 1 + arch/microblaze/kernel/vmlinux.lds.S | 1 + arch/mips/kernel/vmlinux.lds.S | 1 + arch/mn10300/kernel/vmlinux.lds.S | 1 + arch/nios2/kernel/vmlinux.lds.S | 1 + arch/openrisc/kernel/vmlinux.lds.S | 1 + arch/parisc/kernel/vmlinux.lds.S | 1 + arch/powerpc/kernel/vmlinux.lds.S | 1 + arch/s390/kernel/vmlinux.lds.S | 1 + arch/score/kernel/vmlinux.lds.S | 1 + arch/sh/kernel/vmlinux.lds.S | 1 + arch/sparc/kernel/vmlinux.lds.S | 1 + arch/tile/kernel/entry.S | 2 +- arch/tile/kernel/vmlinux.lds.S | 1 + arch/um/kernel/dyn.lds.S | 1 + arch/um/kernel/uml.lds.S | 1 + arch/unicore32/kernel/vmlinux.lds.S | 1 + arch/x86/include/asm/irqflags.h | 12 ++++++++---- arch/x86/kernel/acpi/cstate.c | 2 +- arch/x86/kernel/process.c | 4 ++-- arch/x86/kernel/vmlinux.lds.S | 1 + arch/xtensa/kernel/vmlinux.lds.S | 3 +++ drivers/acpi/processor_idle.c | 5 +++-- drivers/cpuidle/driver.c | 5 +++-- drivers/idle/intel_idle.c | 4 ++-- include/asm-generic/vmlinux.lds.h | 6 ++++++ include/linux/cpu.h | 5 +++++ kernel/sched/idle.c | 13 +++++++++++-- lib/nmi_backtrace.c | 16 +++++++++++----- scripts/mod/modpost.c | 2 +- scripts/recordmcount.c | 1 + scripts/recordmcount.pl | 1 + 49 files changed, 93 insertions(+), 22 deletions(-) (limited to 'arch/arm') diff --git a/arch/alpha/kernel/vmlinux.lds.S b/arch/alpha/kernel/vmlinux.lds.S index 647b84c15382..cebecfb76fbf 100644 --- a/arch/alpha/kernel/vmlinux.lds.S +++ b/arch/alpha/kernel/vmlinux.lds.S @@ -22,6 +22,7 @@ SECTIONS HEAD_TEXT TEXT_TEXT SCHED_TEXT + CPUIDLE_TEXT LOCK_TEXT *(.fixup) *(.gnu.warning) diff --git a/arch/arc/kernel/vmlinux.lds.S b/arch/arc/kernel/vmlinux.lds.S index 36611072305f..f35ed578e007 100644 --- a/arch/arc/kernel/vmlinux.lds.S +++ b/arch/arc/kernel/vmlinux.lds.S @@ -89,6 +89,7 @@ SECTIONS _text = .; TEXT_TEXT SCHED_TEXT + CPUIDLE_TEXT LOCK_TEXT KPROBES_TEXT *(.fixup) diff --git a/arch/arm/kernel/vmlinux-xip.lds.S b/arch/arm/kernel/vmlinux-xip.lds.S index cba1ec899a69..7fa487ef7e2f 100644 --- a/arch/arm/kernel/vmlinux-xip.lds.S +++ b/arch/arm/kernel/vmlinux-xip.lds.S @@ -98,6 +98,7 @@ SECTIONS IRQENTRY_TEXT TEXT_TEXT SCHED_TEXT + CPUIDLE_TEXT LOCK_TEXT KPROBES_TEXT *(.gnu.warning) diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index d24e5dd2aa7a..f7f55df0bf7b 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S @@ -111,6 +111,7 @@ SECTIONS SOFTIRQENTRY_TEXT TEXT_TEXT SCHED_TEXT + CPUIDLE_TEXT LOCK_TEXT HYPERVISOR_TEXT KPROBES_TEXT diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S index 5ce9b2929e0d..1105aab1e6d6 100644 --- a/arch/arm64/kernel/vmlinux.lds.S +++ b/arch/arm64/kernel/vmlinux.lds.S @@ -122,6 +122,7 @@ SECTIONS ENTRY_TEXT TEXT_TEXT SCHED_TEXT + CPUIDLE_TEXT LOCK_TEXT KPROBES_TEXT HYPERVISOR_TEXT diff --git a/arch/avr32/kernel/vmlinux.lds.S b/arch/avr32/kernel/vmlinux.lds.S index a4589176bed5..17f2730eb497 100644 --- a/arch/avr32/kernel/vmlinux.lds.S +++ b/arch/avr32/kernel/vmlinux.lds.S @@ -52,6 +52,7 @@ SECTIONS KPROBES_TEXT TEXT_TEXT SCHED_TEXT + CPUIDLE_TEXT LOCK_TEXT *(.fixup) *(.gnu.warning) diff --git a/arch/blackfin/kernel/vmlinux.lds.S b/arch/blackfin/kernel/vmlinux.lds.S index d920b959ff3a..68069a120055 100644 --- a/arch/blackfin/kernel/vmlinux.lds.S +++ b/arch/blackfin/kernel/vmlinux.lds.S @@ -33,6 +33,7 @@ SECTIONS #ifndef CONFIG_SCHEDULE_L1 SCHED_TEXT #endif + CPUIDLE_TEXT LOCK_TEXT IRQENTRY_TEXT SOFTIRQENTRY_TEXT diff --git a/arch/c6x/kernel/vmlinux.lds.S b/arch/c6x/kernel/vmlinux.lds.S index 50bc10f97bcb..a1a5c166bc9b 100644 --- a/arch/c6x/kernel/vmlinux.lds.S +++ b/arch/c6x/kernel/vmlinux.lds.S @@ -70,6 +70,7 @@ SECTIONS _stext = .; TEXT_TEXT SCHED_TEXT + CPUIDLE_TEXT LOCK_TEXT IRQENTRY_TEXT SOFTIRQENTRY_TEXT diff --git a/arch/cris/kernel/vmlinux.lds.S b/arch/cris/kernel/vmlinux.lds.S index 7552c2557506..979586261520 100644 --- a/arch/cris/kernel/vmlinux.lds.S +++ b/arch/cris/kernel/vmlinux.lds.S @@ -43,6 +43,7 @@ SECTIONS HEAD_TEXT TEXT_TEXT SCHED_TEXT + CPUIDLE_TEXT LOCK_TEXT *(.fixup) *(.text.__*) diff --git a/arch/frv/kernel/vmlinux.lds.S b/arch/frv/kernel/vmlinux.lds.S index 7e958d829ec9..aa6e573d57da 100644 --- a/arch/frv/kernel/vmlinux.lds.S +++ b/arch/frv/kernel/vmlinux.lds.S @@ -63,6 +63,7 @@ SECTIONS *(.text..tlbmiss) TEXT_TEXT SCHED_TEXT + CPUIDLE_TEXT LOCK_TEXT #ifdef CONFIG_DEBUG_INFO INIT_TEXT diff --git a/arch/h8300/kernel/vmlinux.lds.S b/arch/h8300/kernel/vmlinux.lds.S index cb5dfb02c88d..7f11da1b895e 100644 --- a/arch/h8300/kernel/vmlinux.lds.S +++ b/arch/h8300/kernel/vmlinux.lds.S @@ -29,6 +29,7 @@ SECTIONS _stext = . ; TEXT_TEXT SCHED_TEXT + CPUIDLE_TEXT LOCK_TEXT #if defined(CONFIG_ROMKERNEL) *(.int_redirect) diff --git a/arch/hexagon/kernel/vmlinux.lds.S b/arch/hexagon/kernel/vmlinux.lds.S index 5f268c1071b3..ec87e67feb19 100644 --- a/arch/hexagon/kernel/vmlinux.lds.S +++ b/arch/hexagon/kernel/vmlinux.lds.S @@ -50,6 +50,7 @@ SECTIONS _text = .; TEXT_TEXT SCHED_TEXT + CPUIDLE_TEXT LOCK_TEXT KPROBES_TEXT *(.fixup) diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S index dc506b05ffbd..f89d20c97412 100644 --- a/arch/ia64/kernel/vmlinux.lds.S +++ b/arch/ia64/kernel/vmlinux.lds.S @@ -46,6 +46,7 @@ SECTIONS { __end_ivt_text = .; TEXT_TEXT SCHED_TEXT + CPUIDLE_TEXT LOCK_TEXT KPROBES_TEXT *(.gnu.linkonce.t*) diff --git a/arch/m32r/kernel/vmlinux.lds.S b/arch/m32r/kernel/vmlinux.lds.S index 018e4a711d79..ad1fe56455aa 100644 --- a/arch/m32r/kernel/vmlinux.lds.S +++ b/arch/m32r/kernel/vmlinux.lds.S @@ -31,6 +31,7 @@ SECTIONS HEAD_TEXT TEXT_TEXT SCHED_TEXT + CPUIDLE_TEXT LOCK_TEXT *(.fixup) *(.gnu.warning) diff --git a/arch/m68k/kernel/vmlinux-nommu.lds b/arch/m68k/kernel/vmlinux-nommu.lds index 06a763f49fd3..d2c8abf1c8c4 100644 --- a/arch/m68k/kernel/vmlinux-nommu.lds +++ b/arch/m68k/kernel/vmlinux-nommu.lds @@ -45,6 +45,7 @@ SECTIONS { HEAD_TEXT TEXT_TEXT SCHED_TEXT + CPUIDLE_TEXT LOCK_TEXT *(.fixup) . = ALIGN(16); diff --git a/arch/m68k/kernel/vmlinux-std.lds b/arch/m68k/kernel/vmlinux-std.lds index d0993594f558..5b5ce1e4d1ed 100644 --- a/arch/m68k/kernel/vmlinux-std.lds +++ b/arch/m68k/kernel/vmlinux-std.lds @@ -16,6 +16,7 @@ SECTIONS HEAD_TEXT TEXT_TEXT SCHED_TEXT + CPUIDLE_TEXT LOCK_TEXT *(.fixup) *(.gnu.warning) diff --git a/arch/m68k/kernel/vmlinux-sun3.lds b/arch/m68k/kernel/vmlinux-sun3.lds index 8080469ee6c1..fe5ea1974b16 100644 --- a/arch/m68k/kernel/vmlinux-sun3.lds +++ b/arch/m68k/kernel/vmlinux-sun3.lds @@ -16,6 +16,7 @@ SECTIONS HEAD_TEXT TEXT_TEXT SCHED_TEXT + CPUIDLE_TEXT LOCK_TEXT *(.fixup) *(.gnu.warning) diff --git a/arch/metag/kernel/vmlinux.lds.S b/arch/metag/kernel/vmlinux.lds.S index 150ace92c7ad..e6c700eaf207 100644 --- a/arch/metag/kernel/vmlinux.lds.S +++ b/arch/metag/kernel/vmlinux.lds.S @@ -21,6 +21,7 @@ SECTIONS .text : { TEXT_TEXT SCHED_TEXT + CPUIDLE_TEXT LOCK_TEXT KPROBES_TEXT IRQENTRY_TEXT diff --git a/arch/microblaze/kernel/vmlinux.lds.S b/arch/microblaze/kernel/vmlinux.lds.S index 0a47f0410554..289d0e7f3e3a 100644 --- a/arch/microblaze/kernel/vmlinux.lds.S +++ b/arch/microblaze/kernel/vmlinux.lds.S @@ -33,6 +33,7 @@ SECTIONS { EXIT_TEXT EXIT_CALL SCHED_TEXT + CPUIDLE_TEXT LOCK_TEXT KPROBES_TEXT IRQENTRY_TEXT diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S index a82c178d0bb9..d5de67591735 100644 --- a/arch/mips/kernel/vmlinux.lds.S +++ b/arch/mips/kernel/vmlinux.lds.S @@ -55,6 +55,7 @@ SECTIONS .text : { TEXT_TEXT SCHED_TEXT + CPUIDLE_TEXT LOCK_TEXT KPROBES_TEXT IRQENTRY_TEXT diff --git a/arch/mn10300/kernel/vmlinux.lds.S b/arch/mn10300/kernel/vmlinux.lds.S index 13c4814c29f8..2d5f1c3f1afb 100644 --- a/arch/mn10300/kernel/vmlinux.lds.S +++ b/arch/mn10300/kernel/vmlinux.lds.S @@ -30,6 +30,7 @@ SECTIONS HEAD_TEXT TEXT_TEXT SCHED_TEXT + CPUIDLE_TEXT LOCK_TEXT KPROBES_TEXT *(.fixup) diff --git a/arch/nios2/kernel/vmlinux.lds.S b/arch/nios2/kernel/vmlinux.lds.S index e23e89539967..6a8045bb1a77 100644 --- a/arch/nios2/kernel/vmlinux.lds.S +++ b/arch/nios2/kernel/vmlinux.lds.S @@ -37,6 +37,7 @@ SECTIONS .text : { TEXT_TEXT SCHED_TEXT + CPUIDLE_TEXT LOCK_TEXT IRQENTRY_TEXT SOFTIRQENTRY_TEXT diff --git a/arch/openrisc/kernel/vmlinux.lds.S b/arch/openrisc/kernel/vmlinux.lds.S index d936de4c07ca..d68b9ede8423 100644 --- a/arch/openrisc/kernel/vmlinux.lds.S +++ b/arch/openrisc/kernel/vmlinux.lds.S @@ -47,6 +47,7 @@ SECTIONS _stext = .; TEXT_TEXT SCHED_TEXT + CPUIDLE_TEXT LOCK_TEXT KPROBES_TEXT IRQENTRY_TEXT diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S index f3ead0b6ce46..9ec8ec075dae 100644 --- a/arch/parisc/kernel/vmlinux.lds.S +++ b/arch/parisc/kernel/vmlinux.lds.S @@ -69,6 +69,7 @@ SECTIONS .text ALIGN(PAGE_SIZE) : { TEXT_TEXT SCHED_TEXT + CPUIDLE_TEXT LOCK_TEXT KPROBES_TEXT IRQENTRY_TEXT diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S index b5fba689fca6..7ed59f0d947f 100644 --- a/arch/powerpc/kernel/vmlinux.lds.S +++ b/arch/powerpc/kernel/vmlinux.lds.S @@ -52,6 +52,7 @@ SECTIONS /* careful! __ftr_alt_* sections need to be close to .text */ *(.text .fixup __ftr_alt_* .ref.text) SCHED_TEXT + CPUIDLE_TEXT LOCK_TEXT KPROBES_TEXT IRQENTRY_TEXT diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S index 429bfd111961..000e6e91f6a0 100644 --- a/arch/s390/kernel/vmlinux.lds.S +++ b/arch/s390/kernel/vmlinux.lds.S @@ -35,6 +35,7 @@ SECTIONS HEAD_TEXT TEXT_TEXT SCHED_TEXT + CPUIDLE_TEXT LOCK_TEXT KPROBES_TEXT IRQENTRY_TEXT diff --git a/arch/score/kernel/vmlinux.lds.S b/arch/score/kernel/vmlinux.lds.S index 7274b5c4287e..4117890b1db1 100644 --- a/arch/score/kernel/vmlinux.lds.S +++ b/arch/score/kernel/vmlinux.lds.S @@ -40,6 +40,7 @@ SECTIONS _text = .; /* Text and read-only data */ TEXT_TEXT SCHED_TEXT + CPUIDLE_TEXT LOCK_TEXT KPROBES_TEXT *(.text.*) diff --git a/arch/sh/kernel/vmlinux.lds.S b/arch/sh/kernel/vmlinux.lds.S index 235a4101999f..5b9a3cc90c58 100644 --- a/arch/sh/kernel/vmlinux.lds.S +++ b/arch/sh/kernel/vmlinux.lds.S @@ -36,6 +36,7 @@ SECTIONS TEXT_TEXT EXTRA_TEXT SCHED_TEXT + CPUIDLE_TEXT LOCK_TEXT KPROBES_TEXT IRQENTRY_TEXT diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S index d79b3b734245..572db686f845 100644 --- a/arch/sparc/kernel/vmlinux.lds.S +++ b/arch/sparc/kernel/vmlinux.lds.S @@ -49,6 +49,7 @@ SECTIONS HEAD_TEXT TEXT_TEXT SCHED_TEXT + CPUIDLE_TEXT LOCK_TEXT KPROBES_TEXT IRQENTRY_TEXT diff --git a/arch/tile/kernel/entry.S b/arch/tile/kernel/entry.S index 670a3569450f..101de132e363 100644 --- a/arch/tile/kernel/entry.S +++ b/arch/tile/kernel/entry.S @@ -50,7 +50,7 @@ STD_ENTRY(smp_nap) * When interrupted at _cpu_idle_nap, we bump the PC forward 8, and * as a result return to the function that called _cpu_idle(). */ -STD_ENTRY(_cpu_idle) +STD_ENTRY_SECTION(_cpu_idle, .cpuidle.text) movei r1, 1 IRQ_ENABLE_LOAD(r2, r3) mtspr INTERRUPT_CRITICAL_SECTION, r1 diff --git a/arch/tile/kernel/vmlinux.lds.S b/arch/tile/kernel/vmlinux.lds.S index 9d449caf8910..e1baf094fba4 100644 --- a/arch/tile/kernel/vmlinux.lds.S +++ b/arch/tile/kernel/vmlinux.lds.S @@ -42,6 +42,7 @@ SECTIONS .text : AT (ADDR(.text) - LOAD_OFFSET) { HEAD_TEXT SCHED_TEXT + CPUIDLE_TEXT LOCK_TEXT KPROBES_TEXT IRQENTRY_TEXT diff --git a/arch/um/kernel/dyn.lds.S b/arch/um/kernel/dyn.lds.S index adde088aeeff..4fdbcf958cd5 100644 --- a/arch/um/kernel/dyn.lds.S +++ b/arch/um/kernel/dyn.lds.S @@ -68,6 +68,7 @@ SECTIONS _stext = .; TEXT_TEXT SCHED_TEXT + CPUIDLE_TEXT LOCK_TEXT *(.fixup) *(.stub .text.* .gnu.linkonce.t.*) diff --git a/arch/um/kernel/uml.lds.S b/arch/um/kernel/uml.lds.S index 6899195602b7..1840f55ed042 100644 --- a/arch/um/kernel/uml.lds.S +++ b/arch/um/kernel/uml.lds.S @@ -28,6 +28,7 @@ SECTIONS _stext = .; TEXT_TEXT SCHED_TEXT + CPUIDLE_TEXT LOCK_TEXT *(.fixup) /* .gnu.warning sections are handled specially by elf32.em. */ diff --git a/arch/unicore32/kernel/vmlinux.lds.S b/arch/unicore32/kernel/vmlinux.lds.S index 77e407e49a63..56e788e8ee83 100644 --- a/arch/unicore32/kernel/vmlinux.lds.S +++ b/arch/unicore32/kernel/vmlinux.lds.S @@ -37,6 +37,7 @@ SECTIONS .text : { /* Real text segment */ TEXT_TEXT SCHED_TEXT + CPUIDLE_TEXT LOCK_TEXT *(.fixup) diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h index b77f5edb03b0..ac7692dcfa2e 100644 --- a/arch/x86/include/asm/irqflags.h +++ b/arch/x86/include/asm/irqflags.h @@ -4,6 +4,10 @@ #include #ifndef __ASSEMBLY__ + +/* Provide __cpuidle; we can't safely include */ +#define __cpuidle __attribute__((__section__(".cpuidle.text"))) + /* * Interrupt control: */ @@ -44,12 +48,12 @@ static inline void native_irq_enable(void) asm volatile("sti": : :"memory"); } -static inline void native_safe_halt(void) +static inline __cpuidle void native_safe_halt(void) { asm volatile("sti; hlt": : :"memory"); } -static inline void native_halt(void) +static inline __cpuidle void native_halt(void) { asm volatile("hlt": : :"memory"); } @@ -86,7 +90,7 @@ static inline notrace void arch_local_irq_enable(void) * Used in the idle loop; sti takes one instruction cycle * to complete: */ -static inline void arch_safe_halt(void) +static inline __cpuidle void arch_safe_halt(void) { native_safe_halt(); } @@ -95,7 +99,7 @@ static inline void arch_safe_halt(void) * Used when interrupts are already enabled or to * shutdown the processor: */ -static inline void halt(void) +static inline __cpuidle void halt(void) { native_halt(); } diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c index bdfad642123f..af15f4444330 100644 --- a/arch/x86/kernel/acpi/cstate.c +++ b/arch/x86/kernel/acpi/cstate.c @@ -152,7 +152,7 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu, } EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_probe); -void acpi_processor_ffh_cstate_enter(struct acpi_processor_cx *cx) +void __cpuidle acpi_processor_ffh_cstate_enter(struct acpi_processor_cx *cx) { unsigned int cpu = smp_processor_id(); struct cstate_entry *percpu_entry; diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 4002b475171c..28cea7802ecb 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -302,7 +302,7 @@ void arch_cpu_idle(void) /* * We use this if we don't have any better idle routine.. */ -void default_idle(void) +void __cpuidle default_idle(void) { trace_cpu_idle_rcuidle(1, smp_processor_id()); safe_halt(); @@ -417,7 +417,7 @@ static int prefer_mwait_c1_over_halt(const struct cpuinfo_x86 *c) * with interrupts enabled and no flags, which is backwards compatible with the * original MWAIT implementation. */ -static void mwait_idle(void) +static __cpuidle void mwait_idle(void) { if (!current_set_polling_and_test()) { trace_cpu_idle_rcuidle(1, smp_processor_id()); diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index 9297a002d8e5..dbf67f64d5ec 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S @@ -97,6 +97,7 @@ SECTIONS _stext = .; TEXT_TEXT SCHED_TEXT + CPUIDLE_TEXT LOCK_TEXT KPROBES_TEXT ENTRY_TEXT diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S index 72cfe3587dd8..31411fc82662 100644 --- a/arch/xtensa/kernel/vmlinux.lds.S +++ b/arch/xtensa/kernel/vmlinux.lds.S @@ -89,6 +89,9 @@ SECTIONS VMLINUX_SYMBOL(__sched_text_start) = .; *(.sched.literal .sched.text) VMLINUX_SYMBOL(__sched_text_end) = .; + VMLINUX_SYMBOL(__cpuidle_text_start) = .; + *(.cpuidle.literal .cpuidle.text) + VMLINUX_SYMBOL(__cpuidle_text_end) = .; VMLINUX_SYMBOL(__lock_text_start) = .; *(.spinlock.literal .spinlock.text) VMLINUX_SYMBOL(__lock_text_end) = .; diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index cea52528aa18..2237d3f24f0e 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -31,6 +31,7 @@ #include /* need_resched() */ #include #include +#include #include /* @@ -115,7 +116,7 @@ static const struct dmi_system_id processor_power_dmi_table[] = { * Callers should disable interrupts before the call and enable * interrupts after return. */ -static void acpi_safe_halt(void) +static void __cpuidle acpi_safe_halt(void) { if (!tif_need_resched()) { safe_halt(); @@ -645,7 +646,7 @@ static int acpi_idle_bm_check(void) * * Caller disables interrupt before call and enables interrupt after return. */ -static void acpi_idle_do_entry(struct acpi_processor_cx *cx) +static void __cpuidle acpi_idle_do_entry(struct acpi_processor_cx *cx) { if (cx->entry_method == ACPI_CSTATE_FFH) { /* Call into architectural FFH based C-state */ diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c index 389ade4572be..ab264d393233 100644 --- a/drivers/cpuidle/driver.c +++ b/drivers/cpuidle/driver.c @@ -14,6 +14,7 @@ #include #include #include +#include #include "cpuidle.h" @@ -178,8 +179,8 @@ static void __cpuidle_driver_init(struct cpuidle_driver *drv) } #ifdef CONFIG_ARCH_HAS_CPU_RELAX -static int poll_idle(struct cpuidle_device *dev, - struct cpuidle_driver *drv, int index) +static int __cpuidle poll_idle(struct cpuidle_device *dev, + struct cpuidle_driver *drv, int index) { local_irq_enable(); if (!current_set_polling_and_test()) { diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index 67ec58f9ef99..4466a2f969d7 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -863,8 +863,8 @@ static struct cpuidle_state dnv_cstates[] = { * * Must be called under local_irq_disable(). */ -static int intel_idle(struct cpuidle_device *dev, - struct cpuidle_driver *drv, int index) +static __cpuidle int intel_idle(struct cpuidle_device *dev, + struct cpuidle_driver *drv, int index) { unsigned long ecx = 1; /* break on interrupt flag */ struct cpuidle_state *state = &drv->states[index]; diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 24563970ff7b..3e42bcdd014b 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -454,6 +454,12 @@ *(.spinlock.text) \ VMLINUX_SYMBOL(__lock_text_end) = .; +#define CPUIDLE_TEXT \ + ALIGN_FUNCTION(); \ + VMLINUX_SYMBOL(__cpuidle_text_start) = .; \ + *(.cpuidle.text) \ + VMLINUX_SYMBOL(__cpuidle_text_end) = .; + #define KPROBES_TEXT \ ALIGN_FUNCTION(); \ VMLINUX_SYMBOL(__kprobes_text_start) = .; \ diff --git a/include/linux/cpu.h b/include/linux/cpu.h index 7572d9e9dced..b886dc17f2f3 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -231,6 +231,11 @@ void cpu_startup_entry(enum cpuhp_state state); void cpu_idle_poll_ctrl(bool enable); +/* Attach to any functions which should be considered cpuidle. */ +#define __cpuidle __attribute__((__section__(".cpuidle.text"))) + +bool cpu_in_idle(unsigned long pc); + void arch_cpu_idle(void); void arch_cpu_idle_prepare(void); void arch_cpu_idle_enter(void); diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c index 9fb873cfc75c..1d8718d5300d 100644 --- a/kernel/sched/idle.c +++ b/kernel/sched/idle.c @@ -16,6 +16,9 @@ #include "sched.h" +/* Linker adds these: start and end of __cpuidle functions */ +extern char __cpuidle_text_start[], __cpuidle_text_end[]; + /** * sched_idle_set_state - Record idle state for the current CPU. * @idle_state: State to record. @@ -53,7 +56,7 @@ static int __init cpu_idle_nopoll_setup(char *__unused) __setup("hlt", cpu_idle_nopoll_setup); #endif -static inline int cpu_idle_poll(void) +static noinline int __cpuidle cpu_idle_poll(void) { rcu_idle_enter(); trace_cpu_idle_rcuidle(0, smp_processor_id()); @@ -84,7 +87,7 @@ void __weak arch_cpu_idle(void) * * To use when the cpuidle framework cannot be used. */ -void default_idle_call(void) +void __cpuidle default_idle_call(void) { if (current_clr_polling_and_test()) { local_irq_enable(); @@ -271,6 +274,12 @@ static void cpu_idle_loop(void) } } +bool cpu_in_idle(unsigned long pc) +{ + return pc >= (unsigned long)__cpuidle_text_start && + pc < (unsigned long)__cpuidle_text_end; +} + void cpu_startup_entry(enum cpuhp_state state) { /* diff --git a/lib/nmi_backtrace.c b/lib/nmi_backtrace.c index 393a3cca1f47..75554754eadf 100644 --- a/lib/nmi_backtrace.c +++ b/lib/nmi_backtrace.c @@ -16,6 +16,7 @@ #include #include #include +#include #ifdef arch_trigger_cpumask_backtrace /* For reliability, we're prepared to waste bits here. */ @@ -87,11 +88,16 @@ bool nmi_cpu_backtrace(struct pt_regs *regs) int cpu = smp_processor_id(); if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) { - pr_warn("NMI backtrace for cpu %d\n", cpu); - if (regs) - show_regs(regs); - else - dump_stack(); + if (regs && cpu_in_idle(instruction_pointer(regs))) { + pr_warn("NMI backtrace for cpu %d skipped: idling at pc %#lx\n", + cpu, instruction_pointer(regs)); + } else { + pr_warn("NMI backtrace for cpu %d\n", cpu); + if (regs) + show_regs(regs); + else + dump_stack(); + } cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask)); return true; } diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c index 48958d3cec9e..bd8349759095 100644 --- a/scripts/mod/modpost.c +++ b/scripts/mod/modpost.c @@ -888,7 +888,7 @@ static void check_section(const char *modname, struct elf_info *elf, #define DATA_SECTIONS ".data", ".data.rel" #define TEXT_SECTIONS ".text", ".text.unlikely", ".sched.text", \ - ".kprobes.text" + ".kprobes.text", ".cpuidle.text" #define OTHER_TEXT_SECTIONS ".ref.text", ".head.text", ".spinlock.text", \ ".fixup", ".entry.text", ".exception.text", ".text.*", \ ".coldtext" diff --git a/scripts/recordmcount.c b/scripts/recordmcount.c index a68f03133df9..5423a58d1b06 100644 --- a/scripts/recordmcount.c +++ b/scripts/recordmcount.c @@ -365,6 +365,7 @@ is_mcounted_section_name(char const *const txtname) strcmp(".irqentry.text", txtname) == 0 || strcmp(".softirqentry.text", txtname) == 0 || strcmp(".kprobes.text", txtname) == 0 || + strcmp(".cpuidle.text", txtname) == 0 || strcmp(".text.unlikely", txtname) == 0; } diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl index 2d48011bc362..faac4b10d8ea 100755 --- a/scripts/recordmcount.pl +++ b/scripts/recordmcount.pl @@ -136,6 +136,7 @@ my %text_sections = ( ".irqentry.text" => 1, ".softirqentry.text" => 1, ".kprobes.text" => 1, + ".cpuidle.text" => 1, ".text.unlikely" => 1, ); -- cgit v1.2.3