summaryrefslogtreecommitdiffstats
path: root/include/asm-generic
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2013-09-10 12:15:23 +0200
committerIngo Molnar <mingo@kernel.org>2013-09-25 14:07:54 +0200
commitbdb43806589096ac4272fe1307e789846ac08d7c (patch)
treec854e7e508193766d5cbdd82e8709cfab5ea3be5 /include/asm-generic
parent01028747559ac6c6f642a7bbd2875cc4f66b2feb (diff)
downloadtalos-op-linux-bdb43806589096ac4272fe1307e789846ac08d7c.tar.gz
talos-op-linux-bdb43806589096ac4272fe1307e789846ac08d7c.zip
sched: Extract the basic add/sub preempt_count modifiers
Rewrite the preempt_count macros in order to extract the 3 basic preempt_count value modifiers: __preempt_count_add() __preempt_count_sub() and the new: __preempt_count_dec_and_test() And since we're at it anyway, replace the unconventional $op_preempt_count names with the more conventional preempt_count_$op. Since these basic operators are equivalent to the previous _notrace() variants, do away with the _notrace() versions. Signed-off-by: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/n/tip-ewbpdbupy9xpsjhg960zwbv8@git.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'include/asm-generic')
-rw-r--r--include/asm-generic/preempt.h35
1 files changed, 35 insertions, 0 deletions
diff --git a/include/asm-generic/preempt.h b/include/asm-generic/preempt.h
index 8100b1ec1715..82d958fc3823 100644
--- a/include/asm-generic/preempt.h
+++ b/include/asm-generic/preempt.h
@@ -65,4 +65,39 @@ static __always_inline bool test_preempt_need_resched(void)
return !(*preempt_count_ptr() & PREEMPT_NEED_RESCHED);
}
+/*
+ * The various preempt_count add/sub methods
+ */
+
+static __always_inline void __preempt_count_add(int val)
+{
+ *preempt_count_ptr() += val;
+}
+
+static __always_inline void __preempt_count_sub(int val)
+{
+ *preempt_count_ptr() -= val;
+}
+
+static __always_inline bool __preempt_count_dec_and_test(void)
+{
+ return !--*preempt_count_ptr();
+}
+
+/*
+ * Returns true when we need to resched -- even if we can not.
+ */
+static __always_inline bool need_resched(void)
+{
+ return unlikely(test_preempt_need_resched());
+}
+
+/*
+ * Returns true when we need to resched and can (barring IRQ state).
+ */
+static __always_inline bool should_resched(void)
+{
+ return unlikely(!*preempt_count_ptr());
+}
+
#endif /* __ASM_PREEMPT_H */
OpenPOWER on IntegriCloud