summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2015-09-29 17:47:19 -0400
committerTejun Heo <tj@kernel.org>2016-08-10 15:02:58 -0400
commit3f49bdd95855a33eea749304d2e10530a869218b (patch)
treefe3b7bd2f627ce5fd66084e9e20beec98df1f471 /lib
parent18808354b79622ed11857e41f9044ba17aec5b01 (diff)
downloadblackbird-op-linux-3f49bdd95855a33eea749304d2e10530a869218b.tar.gz
blackbird-op-linux-3f49bdd95855a33eea749304d2e10530a869218b.zip
percpu_ref: restructure operation mode switching
Restructure atomic/percpu mode switching. * The users of __percpu_ref_switch_to_atomic/percpu() now call a new function __percpu_ref_switch_mode() which calls either of the original switching functions depending on the current state of ref->force_atomic and the __PERCPU_REF_DEAD flag. The callers no longer check whether switching is necessary but always invoke __percpu_ref_switch_mode(). * !ref->confirm_switch waiting is collected into __percpu_ref_switch_mode(). This patch doesn't cause any behavior differences. Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'lib')
-rw-r--r--lib/percpu-refcount.c64
1 files changed, 29 insertions, 35 deletions
diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
index c3617a8525d7..f3ff793691ac 100644
--- a/lib/percpu-refcount.c
+++ b/lib/percpu-refcount.c
@@ -161,16 +161,6 @@ static void percpu_ref_noop_confirm_switch(struct percpu_ref *ref)
static void __percpu_ref_switch_to_atomic(struct percpu_ref *ref,
percpu_ref_func_t *confirm_switch)
{
- /*
- * If the previous ATOMIC switching hasn't finished yet, wait for
- * its completion. If the caller ensures that ATOMIC switching
- * isn't in progress, this function can be called from any context.
- * Do an extra confirm_switch test to circumvent the unconditional
- * might_sleep() in wait_event().
- */
- if (ref->confirm_switch)
- wait_event(percpu_ref_switch_waitq, !ref->confirm_switch);
-
if (ref->percpu_count_ptr & __PERCPU_REF_ATOMIC) {
if (confirm_switch)
confirm_switch(ref);
@@ -195,16 +185,6 @@ static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref)
unsigned long __percpu *percpu_count = percpu_count_ptr(ref);
int cpu;
- /*
- * If the previous ATOMIC switching hasn't finished yet, wait for
- * its completion. If the caller ensures that ATOMIC switching
- * isn't in progress, this function can be called from any context.
- * Do an extra confirm_switch test to circumvent the unconditional
- * might_sleep() in wait_event().
- */
- if (ref->confirm_switch)
- wait_event(percpu_ref_switch_waitq, !ref->confirm_switch);
-
BUG_ON(!percpu_count);
if (!(ref->percpu_count_ptr & __PERCPU_REF_ATOMIC))
@@ -225,6 +205,25 @@ static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref)
ref->percpu_count_ptr & ~__PERCPU_REF_ATOMIC);
}
+static void __percpu_ref_switch_mode(struct percpu_ref *ref,
+ percpu_ref_func_t *confirm_switch)
+{
+ /*
+ * If the previous ATOMIC switching hasn't finished yet, wait for
+ * its completion. If the caller ensures that ATOMIC switching
+ * isn't in progress, this function can be called from any context.
+ * Do an extra confirm_switch test to circumvent the unconditional
+ * might_sleep() in wait_event().
+ */
+ if (ref->confirm_switch)
+ wait_event(percpu_ref_switch_waitq, !ref->confirm_switch);
+
+ if (ref->force_atomic || (ref->percpu_count_ptr & __PERCPU_REF_DEAD))
+ __percpu_ref_switch_to_atomic(ref, confirm_switch);
+ else
+ __percpu_ref_switch_to_percpu(ref);
+}
+
/**
* percpu_ref_switch_to_atomic - switch a percpu_ref to atomic mode
* @ref: percpu_ref to switch to atomic mode
@@ -241,16 +240,15 @@ static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref)
* operations. Note that @ref will stay in atomic mode across kill/reinit
* cycles until percpu_ref_switch_to_percpu() is called.
*
- * This function normally doesn't block and can be called from any context
- * but it may block if @confirm_kill is specified and @ref is already in
- * the process of switching to atomic mode. In such cases, @confirm_switch
- * will be invoked after the switching is complete.
+ * This function may block if @ref is in the process of switching to atomic
+ * mode. If the caller ensures that @ref is not in the process of
+ * switching to atomic mode, this function can be called from any context.
*/
void percpu_ref_switch_to_atomic(struct percpu_ref *ref,
percpu_ref_func_t *confirm_switch)
{
ref->force_atomic = true;
- __percpu_ref_switch_to_atomic(ref, confirm_switch);
+ __percpu_ref_switch_mode(ref, confirm_switch);
}
/**
@@ -267,17 +265,14 @@ void percpu_ref_switch_to_atomic(struct percpu_ref *ref,
* dying or dead, the actual switching takes place on the following
* percpu_ref_reinit().
*
- * This function normally doesn't block and can be called from any context
- * but it may block if @ref is in the process of switching to atomic mode
- * by percpu_ref_switch_atomic().
+ * This function may block if @ref is in the process of switching to atomic
+ * mode. If the caller ensures that @ref is not in the process of
+ * switching to atomic mode, this function can be called from any context.
*/
void percpu_ref_switch_to_percpu(struct percpu_ref *ref)
{
ref->force_atomic = false;
-
- /* a dying or dead ref can't be switched to percpu mode w/o reinit */
- if (!(ref->percpu_count_ptr & __PERCPU_REF_DEAD))
- __percpu_ref_switch_to_percpu(ref);
+ __percpu_ref_switch_mode(ref, NULL);
}
/**
@@ -302,7 +297,7 @@ void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
"%s called more than once on %pf!", __func__, ref->release);
ref->percpu_count_ptr |= __PERCPU_REF_DEAD;
- __percpu_ref_switch_to_atomic(ref, confirm_kill);
+ __percpu_ref_switch_mode(ref, confirm_kill);
percpu_ref_put(ref);
}
EXPORT_SYMBOL_GPL(percpu_ref_kill_and_confirm);
@@ -324,7 +319,6 @@ void percpu_ref_reinit(struct percpu_ref *ref)
ref->percpu_count_ptr &= ~__PERCPU_REF_DEAD;
percpu_ref_get(ref);
- if (!ref->force_atomic)
- __percpu_ref_switch_to_percpu(ref);
+ __percpu_ref_switch_mode(ref, NULL);
}
EXPORT_SYMBOL_GPL(percpu_ref_reinit);
OpenPOWER on IntegriCloud