summaryrefslogtreecommitdiffstats
path: root/include/asm-generic
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-generic')
-rw-r--r--include/asm-generic/bug.h32
-rw-r--r--include/asm-generic/pgtable.h37
2 files changed, 43 insertions, 26 deletions
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
index 8ceab7bcd8b4..a5250895155e 100644
--- a/include/asm-generic/bug.h
+++ b/include/asm-generic/bug.h
@@ -16,12 +16,15 @@
#endif
#ifndef HAVE_ARCH_WARN_ON
-#define WARN_ON(condition) do { \
- if (unlikely((condition)!=0)) { \
- printk("BUG: warning at %s:%d/%s()\n", __FILE__, __LINE__, __FUNCTION__); \
- dump_stack(); \
- } \
-} while (0)
+#define WARN_ON(condition) ({ \
+ typeof(condition) __ret_warn_on = (condition); \
+ if (unlikely(__ret_warn_on)) { \
+ printk("BUG: warning at %s:%d/%s()\n", __FILE__, \
+ __LINE__, __FUNCTION__); \
+ dump_stack(); \
+ } \
+ unlikely(__ret_warn_on); \
+})
#endif
#else /* !CONFIG_BUG */
@@ -34,21 +37,18 @@
#endif
#ifndef HAVE_ARCH_WARN_ON
-#define WARN_ON(condition) do { if (condition) ; } while(0)
+#define WARN_ON(condition) unlikely((condition))
#endif
#endif
-#define WARN_ON_ONCE(condition) \
-({ \
+#define WARN_ON_ONCE(condition) ({ \
static int __warn_once = 1; \
- int __ret = 0; \
+ typeof(condition) __ret_warn_once = (condition);\
\
- if (unlikely((condition) && __warn_once)) { \
- __warn_once = 0; \
- WARN_ON(1); \
- __ret = 1; \
- } \
- __ret; \
+ if (likely(__warn_once)) \
+ if (WARN_ON(__ret_warn_once)) \
+ __warn_once = 0; \
+ unlikely(__ret_warn_once); \
})
#ifdef CONFIG_SMP
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 349260cd86ed..9d774d07d95b 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -15,19 +15,11 @@
* Note: the old pte is known to not be writable, so we don't need to
* worry about dirty bits etc getting lost.
*/
-#ifndef __HAVE_ARCH_SET_PTE_ATOMIC
#define ptep_establish(__vma, __address, __ptep, __entry) \
do { \
set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \
flush_tlb_page(__vma, __address); \
} while (0)
-#else /* __HAVE_ARCH_SET_PTE_ATOMIC */
-#define ptep_establish(__vma, __address, __ptep, __entry) \
-do { \
- set_pte_atomic(__ptep, __entry); \
- flush_tlb_page(__vma, __address); \
-} while (0)
-#endif /* __HAVE_ARCH_SET_PTE_ATOMIC */
#endif
#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
@@ -112,8 +104,13 @@ do { \
})
#endif
-#ifndef __HAVE_ARCH_PTE_CLEAR_FULL
-#define pte_clear_full(__mm, __address, __ptep, __full) \
+/*
+ * Some architectures may be able to avoid expensive synchronization
+ * primitives when modifications are made to PTE's which are already
+ * not present, or in the process of an address space destruction.
+ */
+#ifndef __HAVE_ARCH_PTE_CLEAR_NOT_PRESENT_FULL
+#define pte_clear_not_present_full(__mm, __address, __ptep, __full) \
do { \
pte_clear((__mm), (__address), (__ptep)); \
} while (0)
@@ -166,6 +163,26 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres
#endif
/*
+ * A facility to provide lazy MMU batching. This allows PTE updates and
+ * page invalidations to be delayed until a call to leave lazy MMU mode
+ * is issued. Some architectures may benefit from doing this, and it is
+ * beneficial for both shadow and direct mode hypervisors, which may batch
+ * the PTE updates which happen during this window. Note that using this
+ * interface requires that read hazards be removed from the code. A read
+ * hazard could result in the direct mode hypervisor case, since the actual
+ * write to the page tables may not yet have taken place, so reads though
+ * a raw PTE pointer after it has been modified are not guaranteed to be
+ * up to date. This mode can only be entered and left under the protection of
+ * the page table locks for all page tables which may be modified. In the UP
+ * case, this is required so that preemption is disabled, and in the SMP case,
+ * it must synchronize the delayed page table writes properly on other CPUs.
+ */
+#ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE
+#define arch_enter_lazy_mmu_mode() do {} while (0)
+#define arch_leave_lazy_mmu_mode() do {} while (0)
+#endif
+
+/*
* When walking page tables, get the address of the next boundary,
* or the end address of the range if that comes earlier. Although no
* vma end wraps to 0, rounded up __boundary may wrap to 0 throughout.
OpenPOWER on IntegriCloud