diff options
author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2017-02-20 14:57:17 -0800 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2017-04-18 11:38:19 -0700 |
commit | 2e8c28c2dd96c6f1f2d454a4e4b928385841e247 (patch) | |
tree | 51467dc9e6dbd051093f5d0d6863dd46beec6c11 /kernel/rcu/rcu.h | |
parent | bdcabf4c7db719129ca6cb94b02f50aa4726c952 (diff) | |
download | blackbird-obmc-linux-2e8c28c2dd96c6f1f2d454a4e4b928385841e247.tar.gz blackbird-obmc-linux-2e8c28c2dd96c6f1f2d454a4e4b928385841e247.zip |
srcu: Move rcu_seq_start() and friends to rcu.h
This commit moves rcu_seq_start(), rcu_seq_end(), rcu_seq_snap(),
and rcu_seq_done() from kernel/rcu/tree.c to kernel/rcu/rcu.h.
This will allow SRCU to use these functions, which in turn will
allow SRCU to move from a single global callback queue to a
per-CPU callback queue.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcu/rcu.h')
-rw-r--r-- | kernel/rcu/rcu.h | 40 |
1 files changed, 40 insertions, 0 deletions
diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h index 8700a81daf56..91e0bf31f6ce 100644 --- a/kernel/rcu/rcu.h +++ b/kernel/rcu/rcu.h @@ -56,6 +56,46 @@ #define DYNTICK_TASK_EXIT_IDLE (DYNTICK_TASK_NEST_VALUE + \ DYNTICK_TASK_FLAG) + +/* + * Grace-period counter management. + */ + +/* Adjust sequence number for start of update-side operation. */ +static inline void rcu_seq_start(unsigned long *sp) +{ + WRITE_ONCE(*sp, *sp + 1); + smp_mb(); /* Ensure update-side operation after counter increment. */ + WARN_ON_ONCE(!(*sp & 0x1)); +} + +/* Adjust sequence number for end of update-side operation. */ +static inline void rcu_seq_end(unsigned long *sp) +{ + smp_mb(); /* Ensure update-side operation before counter increment. */ + WRITE_ONCE(*sp, *sp + 1); + WARN_ON_ONCE(*sp & 0x1); +} + +/* Take a snapshot of the update side's sequence number. */ +static inline unsigned long rcu_seq_snap(unsigned long *sp) +{ + unsigned long s; + + s = (READ_ONCE(*sp) + 3) & ~0x1; + smp_mb(); /* Above access must not bleed into critical section. */ + return s; +} + +/* + * Given a snapshot from rcu_seq_snap(), determine whether or not a + * full update-side operation has occurred. + */ +static inline bool rcu_seq_done(unsigned long *sp, unsigned long s) +{ + return ULONG_CMP_GE(READ_ONCE(*sp), s); +} + /* * debug_rcu_head_queue()/debug_rcu_head_unqueue() are used internally * by call_rcu() and rcu callback execution, and are therefore not part of the |