summaryrefslogtreecommitdiffstats
path: root/include/asm-i386/pgtable.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-i386/pgtable.h')
-rw-r--r--include/asm-i386/pgtable.h125
1 files changed, 88 insertions, 37 deletions
diff --git a/include/asm-i386/pgtable.h b/include/asm-i386/pgtable.h
index 09697fec3d2b..7d398f493dde 100644
--- a/include/asm-i386/pgtable.h
+++ b/include/asm-i386/pgtable.h
@@ -246,20 +246,85 @@ static inline pte_t pte_mkhuge(pte_t pte) { (pte).pte_low |= _PAGE_PSE; return p
# include <asm/pgtable-2level.h>
#endif
-static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
-{
- if (!pte_dirty(*ptep))
- return 0;
- return test_and_clear_bit(_PAGE_BIT_DIRTY, &ptep->pte_low);
-}
+/*
+ * Rules for using pte_update - it must be called after any PTE update which
+ * has not been done using the set_pte / clear_pte interfaces. It is used by
+ * shadow mode hypervisors to resynchronize the shadow page tables. Kernel PTE
+ * updates should either be sets, clears, or set_pte_atomic for P->P
+ * transitions, which means this hook should only be called for user PTEs.
+ * This hook implies a P->P protection or access change has taken place, which
+ * requires a subsequent TLB flush. The notification can optionally be delayed
+ * until the TLB flush event by using the pte_update_defer form of the
+ * interface, but care must be taken to assure that the flush happens while
+ * still holding the same page table lock so that the shadow and primary pages
+ * do not become out of sync on SMP.
+ */
+#define pte_update(mm, addr, ptep) do { } while (0)
+#define pte_update_defer(mm, addr, ptep) do { } while (0)
-static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
-{
- if (!pte_young(*ptep))
- return 0;
- return test_and_clear_bit(_PAGE_BIT_ACCESSED, &ptep->pte_low);
-}
+/*
+ * We only update the dirty/accessed state if we set
+ * the dirty bit by hand in the kernel, since the hardware
+ * will do the accessed bit for us, and we don't want to
+ * race with other CPU's that might be updating the dirty
+ * bit at the same time.
+ */
+#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
+#define ptep_set_access_flags(vma, address, ptep, entry, dirty) \
+do { \
+ if (dirty) { \
+ (ptep)->pte_low = (entry).pte_low; \
+ pte_update_defer((vma)->vm_mm, (addr), (ptep)); \
+ flush_tlb_page(vma, address); \
+ } \
+} while (0)
+
+/*
+ * We don't actually have these, but we want to advertise them so that
+ * we can encompass the flush here.
+ */
+#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
+#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
+
+/*
+ * Rules for using ptep_establish: the pte MUST be a user pte, and
+ * must be a present->present transition.
+ */
+#define __HAVE_ARCH_PTEP_ESTABLISH
+#define ptep_establish(vma, address, ptep, pteval) \
+do { \
+ set_pte_present((vma)->vm_mm, address, ptep, pteval); \
+ flush_tlb_page(vma, address); \
+} while (0)
+
+#define __HAVE_ARCH_PTEP_CLEAR_DIRTY_FLUSH
+#define ptep_clear_flush_dirty(vma, address, ptep) \
+({ \
+ int __dirty; \
+ __dirty = pte_dirty(*(ptep)); \
+ if (__dirty) { \
+ clear_bit(_PAGE_BIT_DIRTY, &(ptep)->pte_low); \
+ pte_update_defer((vma)->vm_mm, (addr), (ptep)); \
+ flush_tlb_page(vma, address); \
+ } \
+ __dirty; \
+})
+
+#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
+#define ptep_clear_flush_young(vma, address, ptep) \
+({ \
+ int __young; \
+ __young = pte_young(*(ptep)); \
+ if (__young) { \
+ clear_bit(_PAGE_BIT_ACCESSED, &(ptep)->pte_low); \
+ pte_update_defer((vma)->vm_mm, (addr), (ptep)); \
+ flush_tlb_page(vma, address); \
+ } \
+ __young; \
+})
+
+#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long addr, pte_t *ptep, int full)
{
pte_t pte;
@@ -272,9 +337,11 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long
return pte;
}
+#define __HAVE_ARCH_PTEP_SET_WRPROTECT
static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
clear_bit(_PAGE_BIT_RW, &ptep->pte_low);
+ pte_update(mm, addr, ptep);
}
/*
@@ -364,11 +431,11 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
#define pte_index(address) \
(((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
#define pte_offset_kernel(dir, address) \
- ((pte_t *) pmd_page_kernel(*(dir)) + pte_index(address))
+ ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address))
#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
-#define pmd_page_kernel(pmd) \
+#define pmd_page_vaddr(pmd) \
((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
/*
@@ -391,8 +458,6 @@ extern pte_t *lookup_address(unsigned long address);
static inline int set_kernel_exec(unsigned long vaddr, int enable) { return 0;}
#endif
-extern void noexec_setup(const char *str);
-
#if defined(CONFIG_HIGHPTE)
#define pte_offset_map(dir, address) \
((pte_t *)kmap_atomic(pmd_page(*(dir)),KM_PTE0) + pte_index(address))
@@ -408,26 +473,18 @@ extern void noexec_setup(const char *str);
#define pte_unmap_nested(pte) do { } while (0)
#endif
+/* Clear a kernel PTE and flush it from the TLB */
+#define kpte_clear_flush(ptep, vaddr) \
+do { \
+ pte_clear(&init_mm, vaddr, ptep); \
+ __flush_tlb_one(vaddr); \
+} while (0)
+
/*
* The i386 doesn't have any external MMU info: the kernel page
* tables contain all the necessary information.
- *
- * Also, we only update the dirty/accessed state if we set
- * the dirty bit by hand in the kernel, since the hardware
- * will do the accessed bit for us, and we don't want to
- * race with other CPU's that might be updating the dirty
- * bit at the same time.
*/
#define update_mmu_cache(vma,address,pte) do { } while (0)
-#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
-#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
- do { \
- if (__dirty) { \
- (__ptep)->pte_low = (__entry).pte_low; \
- flush_tlb_page(__vma, __address); \
- } \
- } while (0)
-
#endif /* !__ASSEMBLY__ */
#ifdef CONFIG_FLATMEM
@@ -441,12 +498,6 @@ extern void noexec_setup(const char *str);
#define GET_IOSPACE(pfn) 0
#define GET_PFN(pfn) (pfn)
-#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
-#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
-#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
-#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
-#define __HAVE_ARCH_PTEP_SET_WRPROTECT
-#define __HAVE_ARCH_PTE_SAME
#include <asm-generic/pgtable.h>
#endif /* _I386_PGTABLE_H */
OpenPOWER on IntegriCloud