diff options
-rw-r--r-- | include/linux/rcutiny.h | 4 | ||||
-rw-r--r-- | include/linux/rcutree.h | 4 | ||||
-rw-r--r-- | kernel/context_tracking.c | 71 | ||||
-rw-r--r-- | kernel/rcu/tree.c | 83 |
4 files changed, 67 insertions, 95 deletions
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index 5fed476f977f..591119413cf1 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h @@ -78,10 +78,6 @@ static inline void rcu_cpu_stall_reset(void) { } static inline int rcu_jiffies_till_stall_check(void) { return 21 * HZ; } static inline void rcu_idle_enter(void) { } static inline void rcu_idle_exit(void) { } -static inline void rcu_irq_enter(void) { } -static inline void rcu_irq_exit_irqson(void) { } -static inline void rcu_irq_enter_irqson(void) { } -static inline void rcu_irq_exit(void) { } static inline void rcu_irq_exit_check_preempt(void) { } #define rcu_is_idle_cpu(cpu) \ (is_idle_task(current) && !in_nmi() && !in_hardirq() && !in_serving_softirq()) diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index 9c6cfb742504..4522b6a7cc42 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h @@ -47,10 +47,6 @@ void cond_synchronize_rcu(unsigned long oldstate); void rcu_idle_enter(void); void rcu_idle_exit(void); -void rcu_irq_enter(void); -void rcu_irq_exit(void); -void rcu_irq_enter_irqson(void); -void rcu_irq_exit_irqson(void); bool rcu_is_idle_cpu(int cpu); #ifdef CONFIG_PROVE_RCU diff --git a/kernel/context_tracking.c b/kernel/context_tracking.c index b8a731f20778..c0d86dac98f1 100644 --- a/kernel/context_tracking.c +++ b/kernel/context_tracking.c @@ -36,24 +36,87 @@ void ct_idle_exit(void) } EXPORT_SYMBOL_GPL(ct_idle_exit); +/** + * ct_irq_enter - inform RCU that current CPU is entering irq away from idle + * + * Enter an interrupt handler, which might possibly result in exiting + * idle mode, in other words, entering the mode in which read-side critical + * sections can occur. The caller must have disabled interrupts. + * + * Note that the Linux kernel is fully capable of entering an interrupt + * handler that it never exits, for example when doing upcalls to user mode! + * This code assumes that the idle loop never does upcalls to user mode. + * If your architecture's idle loop does do upcalls to user mode (or does + * anything else that results in unbalanced calls to the irq_enter() and + * irq_exit() functions), RCU will give you what you deserve, good and hard. + * But very infrequently and irreproducibly. + * + * Use things like work queues to work around this limitation. + * + * You have been warned. + * + * If you add or remove a call to ct_irq_enter(), be sure to test with + * CONFIG_RCU_EQS_DEBUG=y. + */ noinstr void ct_irq_enter(void) { - rcu_irq_enter(); + lockdep_assert_irqs_disabled(); + ct_nmi_enter(); } +/** + * ct_irq_exit - inform RCU that current CPU is exiting irq towards idle + * + * Exit from an interrupt handler, which might possibly result in entering + * idle mode, in other words, leaving the mode in which read-side critical + * sections can occur. The caller must have disabled interrupts. + * + * This code assumes that the idle loop never does anything that might + * result in unbalanced calls to irq_enter() and irq_exit(). If your + * architecture's idle loop violates this assumption, RCU will give you what + * you deserve, good and hard. But very infrequently and irreproducibly. + * + * Use things like work queues to work around this limitation. + * + * You have been warned. + * + * If you add or remove a call to ct_irq_exit(), be sure to test with + * CONFIG_RCU_EQS_DEBUG=y. + */ noinstr void ct_irq_exit(void) { - rcu_irq_exit(); + lockdep_assert_irqs_disabled(); + ct_nmi_exit(); } +/* + * Wrapper for ct_irq_enter() where interrupts are enabled. + * + * If you add or remove a call to ct_irq_enter_irqson(), be sure to test + * with CONFIG_RCU_EQS_DEBUG=y. + */ void ct_irq_enter_irqson(void) { - rcu_irq_enter_irqson(); + unsigned long flags; + + local_irq_save(flags); + ct_irq_enter(); + local_irq_restore(flags); } +/* + * Wrapper for ct_irq_exit() where interrupts are enabled. + * + * If you add or remove a call to ct_irq_exit_irqson(), be sure to test + * with CONFIG_RCU_EQS_DEBUG=y. + */ void ct_irq_exit_irqson(void) { - rcu_irq_exit_irqson(); + unsigned long flags; + + local_irq_save(flags); + ct_irq_exit(); + local_irq_restore(flags); } noinstr void ct_nmi_enter(void) diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 051fed0844b6..75b433dba427 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -789,31 +789,6 @@ noinstr void rcu_nmi_exit(void) rcu_dynticks_task_enter(); } -/** - * rcu_irq_exit - inform RCU that current CPU is exiting irq towards idle - * - * Exit from an interrupt handler, which might possibly result in entering - * idle mode, in other words, leaving the mode in which read-side critical - * sections can occur. The caller must have disabled interrupts. - * - * This code assumes that the idle loop never does anything that might - * result in unbalanced calls to irq_enter() and irq_exit(). If your - * architecture's idle loop violates this assumption, RCU will give you what - * you deserve, good and hard. But very infrequently and irreproducibly. - * - * Use things like work queues to work around this limitation. - * - * You have been warned. - * - * If you add or remove a call to rcu_irq_exit(), be sure to test with - * CONFIG_RCU_EQS_DEBUG=y. - */ -void noinstr rcu_irq_exit(void) -{ - lockdep_assert_irqs_disabled(); - rcu_nmi_exit(); -} - #ifdef CONFIG_PROVE_RCU /** * rcu_irq_exit_check_preempt - Validate that scheduling is possible @@ -833,21 +808,6 @@ void rcu_irq_exit_check_preempt(void) #endif /* #ifdef CONFIG_PROVE_RCU */ /* - * Wrapper for rcu_irq_exit() where interrupts are enabled. - * - * If you add or remove a call to rcu_irq_exit_irqson(), be sure to test - * with CONFIG_RCU_EQS_DEBUG=y. - */ -void rcu_irq_exit_irqson(void) -{ - unsigned long flags; - - local_irq_save(flags); - rcu_irq_exit(); - local_irq_restore(flags); -} - -/* * Exit an RCU extended quiescent state, which can be either the * idle loop or adaptive-tickless usermode execution. * @@ -1041,49 +1001,6 @@ noinstr void rcu_nmi_enter(void) barrier(); } -/** - * rcu_irq_enter - inform RCU that current CPU is entering irq away from idle - * - * Enter an interrupt handler, which might possibly result in exiting - * idle mode, in other words, entering the mode in which read-side critical - * sections can occur. The caller must have disabled interrupts. - * - * Note that the Linux kernel is fully capable of entering an interrupt - * handler that it never exits, for example when doing upcalls to user mode! - * This code assumes that the idle loop never does upcalls to user mode. - * If your architecture's idle loop does do upcalls to user mode (or does - * anything else that results in unbalanced calls to the irq_enter() and - * irq_exit() functions), RCU will give you what you deserve, good and hard. - * But very infrequently and irreproducibly. - * - * Use things like work queues to work around this limitation. - * - * You have been warned. - * - * If you add or remove a call to rcu_irq_enter(), be sure to test with - * CONFIG_RCU_EQS_DEBUG=y. - */ -noinstr void rcu_irq_enter(void) -{ - lockdep_assert_irqs_disabled(); - rcu_nmi_enter(); -} - -/* - * Wrapper for rcu_irq_enter() where interrupts are enabled. - * - * If you add or remove a call to rcu_irq_enter_irqson(), be sure to test - * with CONFIG_RCU_EQS_DEBUG=y. - */ -void rcu_irq_enter_irqson(void) -{ - unsigned long flags; - - local_irq_save(flags); - rcu_irq_enter(); - local_irq_restore(flags); -} - /* * Check to see if any future non-offloaded RCU-related work will need * to be done by the current CPU, even if none need be done immediately, |