diff options
Diffstat (limited to 'include')
-rw-r--r-- | include/linux/rculist.h | 17 | ||||
-rw-r--r-- | include/linux/rcupdate.h | 27 | ||||
-rw-r--r-- | include/linux/sched.h | 10 |
3 files changed, 29 insertions, 25 deletions
diff --git a/include/linux/rculist.h b/include/linux/rculist.h index e0f0fab20415..c92dd28eaa6c 100644 --- a/include/linux/rculist.h +++ b/include/linux/rculist.h @@ -286,23 +286,6 @@ static inline void list_splice_init_rcu(struct list_head *list, &pos->member != (head); \ pos = list_entry_rcu(pos->member.next, typeof(*pos), member)) - -/** - * list_for_each_continue_rcu - * @pos: the &struct list_head to use as a loop cursor. - * @head: the head for your list. - * - * Iterate over an rcu-protected list, continuing after current point. - * - * This list-traversal primitive may safely run concurrently with - * the _rcu list-mutation primitives such as list_add_rcu() - * as long as the traversal is guarded by rcu_read_lock(). - */ -#define list_for_each_continue_rcu(pos, head) \ - for ((pos) = rcu_dereference_raw(list_next_rcu(pos)); \ - (pos) != (head); \ - (pos) = rcu_dereference_raw(list_next_rcu(pos))) - /** * list_for_each_entry_continue_rcu - continue iteration over list of given type * @pos: the type * to use as a loop cursor. diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 7c968e4f929e..8fe7c1840d30 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -90,6 +90,25 @@ extern void do_trace_rcu_torture_read(char *rcutorturename, * that started after call_rcu() was invoked. RCU read-side critical * sections are delimited by rcu_read_lock() and rcu_read_unlock(), * and may be nested. + * + * Note that all CPUs must agree that the grace period extended beyond + * all pre-existing RCU read-side critical section. On systems with more + * than one CPU, this means that when "func()" is invoked, each CPU is + * guaranteed to have executed a full memory barrier since the end of its + * last RCU read-side critical section whose beginning preceded the call + * to call_rcu(). It also means that each CPU executing an RCU read-side + * critical section that continues beyond the start of "func()" must have + * executed a memory barrier after the call_rcu() but before the beginning + * of that RCU read-side critical section. Note that these guarantees + * include CPUs that are offline, idle, or executing in user mode, as + * well as CPUs that are executing in the kernel. + * + * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the + * resulting RCU callback function "func()", then both CPU A and CPU B are + * guaranteed to execute a full memory barrier during the time interval + * between the call to call_rcu() and the invocation of "func()" -- even + * if CPU A and CPU B are the same CPU (but again only if the system has + * more than one CPU). */ extern void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *head)); @@ -118,6 +137,9 @@ extern void call_rcu(struct rcu_head *head, * OR * - rcu_read_lock_bh() and rcu_read_unlock_bh(), if in process context. * These may be nested. + * + * See the description of call_rcu() for more detailed information on + * memory ordering guarantees. */ extern void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *head)); @@ -137,6 +159,9 @@ extern void call_rcu_bh(struct rcu_head *head, * OR * anything that disables preemption. * These may be nested. + * + * See the description of call_rcu() for more detailed information on + * memory ordering guarantees. */ extern void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu)); @@ -204,6 +229,8 @@ static inline void rcu_user_enter(void) { } static inline void rcu_user_exit(void) { } static inline void rcu_user_enter_after_irq(void) { } static inline void rcu_user_exit_after_irq(void) { } +static inline void rcu_user_hooks_switch(struct task_struct *prev, + struct task_struct *next) { } #endif /* CONFIG_RCU_USER_QS */ extern void exit_rcu(void); diff --git a/include/linux/sched.h b/include/linux/sched.h index 0dd42a02df2e..530c52ef873e 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -109,6 +109,8 @@ extern void update_cpu_load_nohz(void); extern unsigned long get_parent_ip(unsigned long addr); +extern void dump_cpu_task(int cpu); + struct seq_file; struct cfs_rq; struct task_group; @@ -1844,14 +1846,6 @@ static inline void rcu_copy_process(struct task_struct *p) #endif -static inline void rcu_switch(struct task_struct *prev, - struct task_struct *next) -{ -#ifdef CONFIG_RCU_USER_QS - rcu_user_hooks_switch(prev, next); -#endif -} - static inline void tsk_restore_flags(struct task_struct *task, unsigned long orig_flags, unsigned long flags) { |