diff options
author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2017-02-16 02:50:50 +0100 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2017-04-18 20:38:19 +0200 |
commit | bdcabf4c7db719129ca6cb94b02f50aa4726c952 (patch) | |
tree | e487ed5e58c2bd3f77c790372ec207444a7f5e79 /kernel/rcu | |
parent | srcu: Allow early boot use of synchronize_srcu() (diff) | |
download | linux-bdcabf4c7db719129ca6cb94b02f50aa4726c952.tar.xz linux-bdcabf4c7db719129ca6cb94b02f50aa4726c952.zip |
rcu: Add single-element dequeue functions to rcu_segcblist
This commit adds single-element dequeue functions to rcu_segcblist.
These are less efficient than using the extract and insert functions,
but allow more precise debugging code. These functions are thus
expected to be used only in debug builds, for example, CONFIG_PROVE_RCU.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcu')
-rw-r--r-- | kernel/rcu/rcu_segcblist.h | 45 |
1 files changed, 45 insertions, 0 deletions
diff --git a/kernel/rcu/rcu_segcblist.h b/kernel/rcu/rcu_segcblist.h index 24078f3c0218..982e3e05b22a 100644 --- a/kernel/rcu/rcu_segcblist.h +++ b/kernel/rcu/rcu_segcblist.h @@ -290,6 +290,51 @@ static inline bool rcu_segcblist_pend_cbs(struct rcu_segcblist *rsclp) } /* + * Dequeue and return the first ready-to-invoke callback. If there + * are no ready-to-invoke callbacks, return NULL. Disables interrupts + * to avoid interference. Does not protect from interference from other + * CPUs or tasks. + */ +static inline struct rcu_head * +rcu_segcblist_dequeue(struct rcu_segcblist *rsclp) +{ + unsigned long flags; + int i; + struct rcu_head *rhp; + + local_irq_save(flags); + if (!rcu_segcblist_ready_cbs(rsclp)) { + local_irq_restore(flags); + return NULL; + } + rhp = rsclp->head; + BUG_ON(!rhp); + rsclp->head = rhp->next; + for (i = RCU_DONE_TAIL; i < RCU_CBLIST_NSEGS; i++) { + if (rsclp->tails[i] != &rhp->next) + break; + rsclp->tails[i] = &rsclp->head; + } + smp_mb(); /* Dequeue before decrement for rcu_barrier(). */ + WRITE_ONCE(rsclp->len, rsclp->len - 1); + local_irq_restore(flags); + return rhp; +} + +/* + * Account for the fact that a previously dequeued callback turned out + * to be marked as lazy. + */ +static inline void rcu_segcblist_dequeued_lazy(struct rcu_segcblist *rsclp) +{ + unsigned long flags; + + local_irq_save(flags); + rsclp->len_lazy--; + local_irq_restore(flags); +} + +/* * Return a pointer to the first callback in the specified rcu_segcblist * structure. This is useful for diagnostics. */ |