summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/srcu.h2
-rw-r--r--kernel/srcu.c19
2 files changed, 10 insertions, 11 deletions
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index a478c8eb8479..5b49d41868c8 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -35,7 +35,7 @@ struct srcu_struct_array {
};
/* Bit definitions for field ->c above and ->snap below. */
-#define SRCU_USAGE_BITS 2
+#define SRCU_USAGE_BITS 1
#define SRCU_REF_MASK (ULONG_MAX >> SRCU_USAGE_BITS)
#define SRCU_USAGE_COUNT (SRCU_REF_MASK + 1)
diff --git a/kernel/srcu.c b/kernel/srcu.c
index 17e95bcc901c..43f1d61e513e 100644
--- a/kernel/srcu.c
+++ b/kernel/srcu.c
@@ -138,14 +138,14 @@ static bool srcu_readers_active_idx_check(struct srcu_struct *sp, int idx)
/*
* Now, we check the ->snap array that srcu_readers_active_idx()
- * filled in from the per-CPU counter values. Since both
- * __srcu_read_lock() and __srcu_read_unlock() increment the
- * upper bits of the per-CPU counter, an increment/decrement
- * pair will change the value of the counter. Since there is
- * only one possible increment, the only way to wrap the counter
- * is to have a huge number of counter decrements, which requires
- * a huge number of tasks and huge SRCU read-side critical-section
- * nesting levels, even on 32-bit systems.
+ * filled in from the per-CPU counter values. Since
+ * __srcu_read_lock() increments the upper bits of the per-CPU
+ * counter, an increment/decrement pair will change the value
+ * of the counter. Since there is only one possible increment,
+ * the only way to wrap the counter is to have a huge number of
+ * counter decrements, which requires a huge number of tasks and
+ * huge SRCU read-side critical-section nesting levels, even on
+ * 32-bit systems.
*
* All of the ways of confusing the readings require that the scan
* in srcu_readers_active_idx() see the read-side task's decrement,
@@ -234,8 +234,7 @@ void __srcu_read_unlock(struct srcu_struct *sp, int idx)
{
preempt_disable();
smp_mb(); /* C */ /* Avoid leaking the critical section. */
- ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) +=
- SRCU_USAGE_COUNT - 1;
+ ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) -= 1;
preempt_enable();
}
EXPORT_SYMBOL_GPL(__srcu_read_unlock);
OpenPOWER on IntegriCloud