diff options
-rw-r--r-- | include/linux/rcupreempt.h | 51 | ||||
-rw-r--r-- | kernel/rcupreempt.c | 48 |
2 files changed, 53 insertions, 46 deletions
diff --git a/include/linux/rcupreempt.h b/include/linux/rcupreempt.h index 74304b4538d8..3eb8fdd19445 100644 --- a/include/linux/rcupreempt.h +++ b/include/linux/rcupreempt.h @@ -40,30 +40,15 @@ #include <linux/cpumask.h> #include <linux/seqlock.h> -struct rcu_dyntick_sched { - int dynticks; - int dynticks_snap; - int sched_qs; - int sched_qs_snap; - int sched_dynticks_snap; -}; - -DECLARE_PER_CPU(struct rcu_dyntick_sched, rcu_dyntick_sched); - -static inline void rcu_qsctr_inc(int cpu) -{ - struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu); - - rdssp->sched_qs++; -} -#define rcu_bh_qsctr_inc(cpu) +extern void rcu_qsctr_inc(int cpu); +static inline void rcu_bh_qsctr_inc(int cpu) { } /* * Someone might want to pass call_rcu_bh as a function pointer. * So this needs to just be a rename and not a macro function. * (no parentheses) */ -#define call_rcu_bh call_rcu +#define call_rcu_bh call_rcu /** * call_rcu_sched - Queue RCU callback for invocation after sched grace period. @@ -117,30 +102,12 @@ extern struct rcupreempt_trace *rcupreempt_trace_cpu(int cpu); struct softirq_action; #ifdef CONFIG_NO_HZ - -static inline void rcu_enter_nohz(void) -{ - static DEFINE_RATELIMIT_STATE(rs, 10 * HZ, 1); - - smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */ - __get_cpu_var(rcu_dyntick_sched).dynticks++; - WARN_ON_RATELIMIT(__get_cpu_var(rcu_dyntick_sched).dynticks & 0x1, &rs); -} - -static inline void rcu_exit_nohz(void) -{ - static DEFINE_RATELIMIT_STATE(rs, 10 * HZ, 1); - - __get_cpu_var(rcu_dyntick_sched).dynticks++; - smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */ - WARN_ON_RATELIMIT(!(__get_cpu_var(rcu_dyntick_sched).dynticks & 0x1), - &rs); -} - -#else /* CONFIG_NO_HZ */ -#define rcu_enter_nohz() do { } while (0) -#define rcu_exit_nohz() do { } while (0) -#endif /* CONFIG_NO_HZ */ +extern void rcu_enter_nohz(void); +extern void rcu_exit_nohz(void); +#else +# define rcu_enter_nohz() do { } while (0) +# define rcu_exit_nohz() do { } while (0) +#endif /* * A context switch is a grace period for rcupreempt synchronize_rcu() diff --git a/kernel/rcupreempt.c b/kernel/rcupreempt.c index 5d59e850fb71..ce97a4df64d3 100644 --- a/kernel/rcupreempt.c +++ b/kernel/rcupreempt.c @@ -147,7 +147,51 @@ struct rcu_ctrlblk { wait_queue_head_t sched_wq; /* Place for rcu_sched to sleep. */ }; +struct rcu_dyntick_sched { + int dynticks; + int dynticks_snap; + int sched_qs; + int sched_qs_snap; + int sched_dynticks_snap; +}; + +static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_dyntick_sched, rcu_dyntick_sched) = { + .dynticks = 1, +}; + +void rcu_qsctr_inc(int cpu) +{ + struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu); + + rdssp->sched_qs++; +} + +#ifdef CONFIG_NO_HZ + +void rcu_enter_nohz(void) +{ + static DEFINE_RATELIMIT_STATE(rs, 10 * HZ, 1); + + smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */ + __get_cpu_var(rcu_dyntick_sched).dynticks++; + WARN_ON_RATELIMIT(__get_cpu_var(rcu_dyntick_sched).dynticks & 0x1, &rs); +} + +void rcu_exit_nohz(void) +{ + static DEFINE_RATELIMIT_STATE(rs, 10 * HZ, 1); + + __get_cpu_var(rcu_dyntick_sched).dynticks++; + smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */ + WARN_ON_RATELIMIT(!(__get_cpu_var(rcu_dyntick_sched).dynticks & 0x1), + &rs); +} + +#endif /* CONFIG_NO_HZ */ + + static DEFINE_PER_CPU(struct rcu_data, rcu_data); + static struct rcu_ctrlblk rcu_ctrlblk = { .fliplock = __SPIN_LOCK_UNLOCKED(rcu_ctrlblk.fliplock), .completed = 0, @@ -427,10 +471,6 @@ static void __rcu_advance_callbacks(struct rcu_data *rdp) } } -DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_dyntick_sched, rcu_dyntick_sched) = { - .dynticks = 1, -}; - #ifdef CONFIG_NO_HZ static DEFINE_PER_CPU(int, rcu_update_flag); |