summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2015-09-29 23:47:17 +0200
committerTejun Heo <tj@kernel.org>2016-08-10 21:02:58 +0200
commitb2302c7fdc654d249c546aac6228b8e10969bc1e (patch)
treed1c0aa8b480c300ab85eb20651b2445a3137a816
parentpercpu_ref: remove unnecessary RCU grace period for staggered atomic switchin... (diff)
downloadlinux-b2302c7fdc654d249c546aac6228b8e10969bc1e.tar.xz
linux-b2302c7fdc654d249c546aac6228b8e10969bc1e.zip
percpu_ref: reorganize __percpu_ref_switch_to_atomic() and relocate percpu_ref_switch_to_atomic()
Reorganize __percpu_ref_switch_to_atomic() so that it looks structurally similar to __percpu_ref_switch_to_percpu() and relocate percpu_ref_switch_to_atomic so that the two internal functions are co-located. This patch doesn't introduce any functional differences. Signed-off-by: Tejun Heo <tj@kernel.org>
-rw-r--r--lib/percpu-refcount.c98
1 files changed, 50 insertions, 48 deletions
diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
index 8ade009ca2c9..599a78c0633b 100644
--- a/lib/percpu-refcount.c
+++ b/lib/percpu-refcount.c
@@ -161,56 +161,30 @@ static void percpu_ref_noop_confirm_switch(struct percpu_ref *ref)
static void __percpu_ref_switch_to_atomic(struct percpu_ref *ref,
percpu_ref_func_t *confirm_switch)
{
- if (!(ref->percpu_count_ptr & __PERCPU_REF_ATOMIC)) {
- /* switching from percpu to atomic */
- ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC;
-
- /*
- * Non-NULL ->confirm_switch is used to indicate that
- * switching is in progress. Use noop one if unspecified.
- */
- WARN_ON_ONCE(ref->confirm_switch);
- ref->confirm_switch =
- confirm_switch ?: percpu_ref_noop_confirm_switch;
-
- percpu_ref_get(ref); /* put after confirmation */
- call_rcu_sched(&ref->rcu, percpu_ref_switch_to_atomic_rcu);
- } else if (confirm_switch) {
- /*
- * Somebody else already set ATOMIC. Wait for its
- * completion and invoke @confirm_switch() directly.
- */
- wait_event(percpu_ref_switch_waitq, !ref->confirm_switch);
- confirm_switch(ref);
+ if (ref->percpu_count_ptr & __PERCPU_REF_ATOMIC) {
+ if (confirm_switch) {
+ /*
+ * Somebody else already set ATOMIC. Wait for its
+ * completion and invoke @confirm_switch() directly.
+ */
+ wait_event(percpu_ref_switch_waitq, !ref->confirm_switch);
+ confirm_switch(ref);
+ }
+ return;
}
-}
-/**
- * percpu_ref_switch_to_atomic - switch a percpu_ref to atomic mode
- * @ref: percpu_ref to switch to atomic mode
- * @confirm_switch: optional confirmation callback
- *
- * There's no reason to use this function for the usual reference counting.
- * Use percpu_ref_kill[_and_confirm]().
- *
- * Schedule switching of @ref to atomic mode. All its percpu counts will
- * be collected to the main atomic counter. On completion, when all CPUs
- * are guaraneed to be in atomic mode, @confirm_switch, which may not
- * block, is invoked. This function may be invoked concurrently with all
- * the get/put operations and can safely be mixed with kill and reinit
- * operations. Note that @ref will stay in atomic mode across kill/reinit
- * cycles until percpu_ref_switch_to_percpu() is called.
- *
- * This function normally doesn't block and can be called from any context
- * but it may block if @confirm_kill is specified and @ref is already in
- * the process of switching to atomic mode. In such cases, @confirm_switch
- * will be invoked after the switching is complete.
- */
-void percpu_ref_switch_to_atomic(struct percpu_ref *ref,
- percpu_ref_func_t *confirm_switch)
-{
- ref->force_atomic = true;
- __percpu_ref_switch_to_atomic(ref, confirm_switch);
+ /* switching from percpu to atomic */
+ ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC;
+
+ /*
+ * Non-NULL ->confirm_switch is used to indicate that switching is
+ * in progress. Use noop one if unspecified.
+ */
+ WARN_ON_ONCE(ref->confirm_switch);
+ ref->confirm_switch = confirm_switch ?: percpu_ref_noop_confirm_switch;
+
+ percpu_ref_get(ref); /* put after confirmation */
+ call_rcu_sched(&ref->rcu, percpu_ref_switch_to_atomic_rcu);
}
static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref)
@@ -241,6 +215,34 @@ static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref)
}
/**
+ * percpu_ref_switch_to_atomic - switch a percpu_ref to atomic mode
+ * @ref: percpu_ref to switch to atomic mode
+ * @confirm_switch: optional confirmation callback
+ *
+ * There's no reason to use this function for the usual reference counting.
+ * Use percpu_ref_kill[_and_confirm]().
+ *
+ * Schedule switching of @ref to atomic mode. All its percpu counts will
+ * be collected to the main atomic counter. On completion, when all CPUs
+ * are guaraneed to be in atomic mode, @confirm_switch, which may not
+ * block, is invoked. This function may be invoked concurrently with all
+ * the get/put operations and can safely be mixed with kill and reinit
+ * operations. Note that @ref will stay in atomic mode across kill/reinit
+ * cycles until percpu_ref_switch_to_percpu() is called.
+ *
+ * This function normally doesn't block and can be called from any context
+ * but it may block if @confirm_kill is specified and @ref is already in
+ * the process of switching to atomic mode. In such cases, @confirm_switch
+ * will be invoked after the switching is complete.
+ */
+void percpu_ref_switch_to_atomic(struct percpu_ref *ref,
+ percpu_ref_func_t *confirm_switch)
+{
+ ref->force_atomic = true;
+ __percpu_ref_switch_to_atomic(ref, confirm_switch);
+}
+
+/**
* percpu_ref_switch_to_percpu - switch a percpu_ref to percpu mode
* @ref: percpu_ref to switch to percpu mode
*