summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/include/asm/book3s
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/include/asm/book3s')
-rw-r--r--arch/powerpc/include/asm/book3s/32/kup.h69
-rw-r--r--arch/powerpc/include/asm/book3s/32/pgalloc.h8
-rw-r--r--arch/powerpc/include/asm/book3s/32/pgtable.h23
-rw-r--r--arch/powerpc/include/asm/book3s/64/kup-radix.h40
-rw-r--r--arch/powerpc/include/asm/book3s/64/mmu-hash.h5
-rw-r--r--arch/powerpc/include/asm/book3s/64/mmu.h4
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgalloc.h17
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable-4k.h3
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable-64k.h3
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable.h11
-rw-r--r--arch/powerpc/include/asm/book3s/64/radix.h11
-rw-r--r--arch/powerpc/include/asm/book3s/64/tlbflush-radix.h10
-rw-r--r--arch/powerpc/include/asm/book3s/64/tlbflush.h19
-rw-r--r--arch/powerpc/include/asm/book3s/pgtable.h11
14 files changed, 145 insertions, 89 deletions
diff --git a/arch/powerpc/include/asm/book3s/32/kup.h b/arch/powerpc/include/asm/book3s/32/kup.h
index 677e9babef80..3c0ba22dc360 100644
--- a/arch/powerpc/include/asm/book3s/32/kup.h
+++ b/arch/powerpc/include/asm/book3s/32/kup.h
@@ -91,6 +91,7 @@
static inline void kuap_update_sr(u32 sr, u32 addr, u32 end)
{
+ addr &= 0xf0000000; /* align addr to start of segment */
barrier(); /* make sure thread.kuap is updated before playing with SRs */
while (addr < end) {
mtsrin(sr, addr);
@@ -101,41 +102,91 @@ static inline void kuap_update_sr(u32 sr, u32 addr, u32 end)
isync(); /* Context sync required after mtsrin() */
}
-static inline void allow_user_access(void __user *to, const void __user *from, u32 size)
+static __always_inline void allow_user_access(void __user *to, const void __user *from,
+ u32 size, unsigned long dir)
{
u32 addr, end;
- if (__builtin_constant_p(to) && to == NULL)
+ BUILD_BUG_ON(!__builtin_constant_p(dir));
+ BUILD_BUG_ON(dir == KUAP_CURRENT);
+
+ if (!(dir & KUAP_WRITE))
return;
addr = (__force u32)to;
- if (!addr || addr >= TASK_SIZE || !size)
+ if (unlikely(addr >= TASK_SIZE || !size))
return;
end = min(addr + size, TASK_SIZE);
+
current->thread.kuap = (addr & 0xf0000000) | ((((end - 1) >> 28) + 1) & 0xf);
kuap_update_sr(mfsrin(addr) & ~SR_KS, addr, end); /* Clear Ks */
}
-static inline void prevent_user_access(void __user *to, const void __user *from, u32 size)
+static __always_inline void prevent_user_access(void __user *to, const void __user *from,
+ u32 size, unsigned long dir)
{
- u32 addr = (__force u32)to;
- u32 end = min(addr + size, TASK_SIZE);
+ u32 addr, end;
+
+ BUILD_BUG_ON(!__builtin_constant_p(dir));
+
+ if (dir == KUAP_CURRENT) {
+ u32 kuap = current->thread.kuap;
- if (!addr || addr >= TASK_SIZE || !size)
+ if (unlikely(!kuap))
+ return;
+
+ addr = kuap & 0xf0000000;
+ end = kuap << 28;
+ } else if (dir & KUAP_WRITE) {
+ addr = (__force u32)to;
+ end = min(addr + size, TASK_SIZE);
+
+ if (unlikely(addr >= TASK_SIZE || !size))
+ return;
+ } else {
return;
+ }
current->thread.kuap = 0;
kuap_update_sr(mfsrin(addr) | SR_KS, addr, end); /* set Ks */
}
-static inline bool bad_kuap_fault(struct pt_regs *regs, bool is_write)
+static inline unsigned long prevent_user_access_return(void)
+{
+ unsigned long flags = current->thread.kuap;
+ unsigned long addr = flags & 0xf0000000;
+ unsigned long end = flags << 28;
+ void __user *to = (__force void __user *)addr;
+
+ if (flags)
+ prevent_user_access(to, to, end - addr, KUAP_READ_WRITE);
+
+ return flags;
+}
+
+static inline void restore_user_access(unsigned long flags)
{
+ unsigned long addr = flags & 0xf0000000;
+ unsigned long end = flags << 28;
+ void __user *to = (__force void __user *)addr;
+
+ if (flags)
+ allow_user_access(to, to, end - addr, KUAP_READ_WRITE);
+}
+
+static inline bool
+bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
+{
+ unsigned long begin = regs->kuap & 0xf0000000;
+ unsigned long end = regs->kuap << 28;
+
if (!is_write)
return false;
- return WARN(!regs->kuap, "Bug: write fault blocked by segment registers !");
+ return WARN(address < begin || address >= end,
+ "Bug: write fault blocked by segment registers !");
}
#endif /* CONFIG_PPC_KUAP */
diff --git a/arch/powerpc/include/asm/book3s/32/pgalloc.h b/arch/powerpc/include/asm/book3s/32/pgalloc.h
index 998317702630..dc5c039eb28e 100644
--- a/arch/powerpc/include/asm/book3s/32/pgalloc.h
+++ b/arch/powerpc/include/asm/book3s/32/pgalloc.h
@@ -49,7 +49,6 @@ static inline void pgtable_free(void *table, unsigned index_size)
#define get_hugepd_cache_index(x) (x)
-#ifdef CONFIG_SMP
static inline void pgtable_free_tlb(struct mmu_gather *tlb,
void *table, int shift)
{
@@ -66,13 +65,6 @@ static inline void __tlb_remove_table(void *_table)
pgtable_free(table, shift);
}
-#else
-static inline void pgtable_free_tlb(struct mmu_gather *tlb,
- void *table, int shift)
-{
- pgtable_free(table, shift);
-}
-#endif
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
unsigned long address)
diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h
index 838de59f6754..5b39c11e884a 100644
--- a/arch/powerpc/include/asm/book3s/32/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/32/pgtable.h
@@ -148,23 +148,21 @@ int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot);
*/
#include <asm/fixmap.h>
-#ifdef CONFIG_HIGHMEM
-#define KVIRT_TOP PKMAP_BASE
-#else
-#define KVIRT_TOP FIXADDR_START
-#endif
-
/*
* ioremap_bot starts at that address. Early ioremaps move down from there,
* until mem_init() at which point this becomes the top of the vmalloc
* and ioremap space
*/
-#ifdef CONFIG_NOT_COHERENT_CACHE
-#define IOREMAP_TOP ((KVIRT_TOP - CONFIG_CONSISTENT_SIZE) & PAGE_MASK)
+#ifdef CONFIG_HIGHMEM
+#define IOREMAP_TOP PKMAP_BASE
#else
-#define IOREMAP_TOP KVIRT_TOP
+#define IOREMAP_TOP FIXADDR_START
#endif
+/* PPC32 shares vmalloc area with ioremap */
+#define IOREMAP_START VMALLOC_START
+#define IOREMAP_END VMALLOC_END
+
/*
* Just any arbitrary offset to the start of the vmalloc VM area: the
* current 16MB value just means that there will be a 64MB "hole" after the
@@ -195,14 +193,17 @@ int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot);
#else
#define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
#endif
+
+#ifdef CONFIG_KASAN_VMALLOC
+#define VMALLOC_END _ALIGN_DOWN(ioremap_bot, PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT)
+#else
#define VMALLOC_END ioremap_bot
+#endif
#ifndef __ASSEMBLY__
#include <linux/sched.h>
#include <linux/threads.h>
-extern unsigned long ioremap_bot;
-
/* Bits to mask out from a PGD to get to the PUD page */
#define PGD_MASKED_BITS 0
diff --git a/arch/powerpc/include/asm/book3s/64/kup-radix.h b/arch/powerpc/include/asm/book3s/64/kup-radix.h
index f254de956d6a..90dd3a3fc8c7 100644
--- a/arch/powerpc/include/asm/book3s/64/kup-radix.h
+++ b/arch/powerpc/include/asm/book3s/64/kup-radix.h
@@ -63,6 +63,14 @@
* because that would require an expensive read/modify write of the AMR.
*/
+static inline unsigned long get_kuap(void)
+{
+ if (!early_mmu_has_feature(MMU_FTR_RADIX_KUAP))
+ return 0;
+
+ return mfspr(SPRN_AMR);
+}
+
static inline void set_kuap(unsigned long value)
{
if (!early_mmu_has_feature(MMU_FTR_RADIX_KUAP))
@@ -77,25 +85,43 @@ static inline void set_kuap(unsigned long value)
isync();
}
-static inline void allow_user_access(void __user *to, const void __user *from,
- unsigned long size)
+static __always_inline void allow_user_access(void __user *to, const void __user *from,
+ unsigned long size, unsigned long dir)
{
// This is written so we can resolve to a single case at build time
- if (__builtin_constant_p(to) && to == NULL)
+ BUILD_BUG_ON(!__builtin_constant_p(dir));
+ if (dir == KUAP_READ)
set_kuap(AMR_KUAP_BLOCK_WRITE);
- else if (__builtin_constant_p(from) && from == NULL)
+ else if (dir == KUAP_WRITE)
set_kuap(AMR_KUAP_BLOCK_READ);
- else
+ else if (dir == KUAP_READ_WRITE)
set_kuap(0);
+ else
+ BUILD_BUG();
}
static inline void prevent_user_access(void __user *to, const void __user *from,
- unsigned long size)
+ unsigned long size, unsigned long dir)
+{
+ set_kuap(AMR_KUAP_BLOCKED);
+}
+
+static inline unsigned long prevent_user_access_return(void)
{
+ unsigned long flags = get_kuap();
+
set_kuap(AMR_KUAP_BLOCKED);
+
+ return flags;
+}
+
+static inline void restore_user_access(unsigned long flags)
+{
+ set_kuap(flags);
}
-static inline bool bad_kuap_fault(struct pt_regs *regs, bool is_write)
+static inline bool
+bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
{
return WARN(mmu_has_feature(MMU_FTR_RADIX_KUAP) &&
(regs->kuap & (is_write ? AMR_KUAP_BLOCK_WRITE : AMR_KUAP_BLOCK_READ)),
diff --git a/arch/powerpc/include/asm/book3s/64/mmu-hash.h b/arch/powerpc/include/asm/book3s/64/mmu-hash.h
index 15b75005bc34..3fa1b962dc27 100644
--- a/arch/powerpc/include/asm/book3s/64/mmu-hash.h
+++ b/arch/powerpc/include/asm/book3s/64/mmu-hash.h
@@ -600,8 +600,11 @@ extern void slb_set_size(u16 size);
*
*/
#define MAX_USER_CONTEXT ((ASM_CONST(1) << CONTEXT_BITS) - 2)
+
+// The + 2 accounts for INVALID_REGION and 1 more to avoid overlap with kernel
#define MIN_USER_CONTEXT (MAX_KERNEL_CTX_CNT + MAX_VMALLOC_CTX_CNT + \
- MAX_IO_CTX_CNT + MAX_VMEMMAP_CTX_CNT)
+ MAX_IO_CTX_CNT + MAX_VMEMMAP_CTX_CNT + 2)
+
/*
* For platforms that support on 65bit VA we limit the context bits
*/
diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h
index 23b83d3593e2..bb3deb76c951 100644
--- a/arch/powerpc/include/asm/book3s/64/mmu.h
+++ b/arch/powerpc/include/asm/book3s/64/mmu.h
@@ -206,7 +206,6 @@ extern int mmu_io_psize;
void mmu_early_init_devtree(void);
void hash__early_init_devtree(void);
void radix__early_init_devtree(void);
-extern void radix_init_native(void);
extern void hash__early_init_mmu(void);
extern void radix__early_init_mmu(void);
static inline void early_init_mmu(void)
@@ -238,9 +237,6 @@ static inline void setup_initial_memory_limit(phys_addr_t first_memblock_base,
first_memblock_size);
}
-extern int (*register_process_table)(unsigned long base, unsigned long page_size,
- unsigned long tbl_size);
-
#ifdef CONFIG_PPC_PSERIES
extern void radix_init_pseries(void);
#else
diff --git a/arch/powerpc/include/asm/book3s/64/pgalloc.h b/arch/powerpc/include/asm/book3s/64/pgalloc.h
index d5a44912902f..a41e91bd0580 100644
--- a/arch/powerpc/include/asm/book3s/64/pgalloc.h
+++ b/arch/powerpc/include/asm/book3s/64/pgalloc.h
@@ -19,9 +19,7 @@ extern struct vmemmap_backing *vmemmap_list;
extern pmd_t *pmd_fragment_alloc(struct mm_struct *, unsigned long);
extern void pmd_fragment_free(unsigned long *);
extern void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift);
-#ifdef CONFIG_SMP
extern void __tlb_remove_table(void *_table);
-#endif
void pte_frag_destroy(void *pte_frag);
static inline pgd_t *radix__pgd_alloc(struct mm_struct *mm)
@@ -122,11 +120,6 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
unsigned long address)
{
- /*
- * By now all the pud entries should be none entries. So go
- * ahead and flush the page walk cache
- */
- flush_tlb_pgtable(tlb, address);
pgtable_free_tlb(tlb, pud, PUD_INDEX);
}
@@ -143,11 +136,6 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
unsigned long address)
{
- /*
- * By now all the pud entries should be none entries. So go
- * ahead and flush the page walk cache
- */
- flush_tlb_pgtable(tlb, address);
return pgtable_free_tlb(tlb, pmd, PMD_INDEX);
}
@@ -166,11 +154,6 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
unsigned long address)
{
- /*
- * By now all the pud entries should be none entries. So go
- * ahead and flush the page walk cache
- */
- flush_tlb_pgtable(tlb, address);
pgtable_free_tlb(tlb, table, PTE_INDEX);
}
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable-4k.h b/arch/powerpc/include/asm/book3s/64/pgtable-4k.h
index a069dfcac9a9..4e697bc2f4cd 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable-4k.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable-4k.h
@@ -70,9 +70,6 @@ static inline int get_hugepd_cache_index(int index)
/* should not reach */
}
-#else /* !CONFIG_HUGETLB_PAGE */
-static inline int pmd_huge(pmd_t pmd) { return 0; }
-static inline int pud_huge(pud_t pud) { return 0; }
#endif /* CONFIG_HUGETLB_PAGE */
#endif /* __ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable-64k.h b/arch/powerpc/include/asm/book3s/64/pgtable-64k.h
index e3d4dd4ae2fa..34d1018896b3 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable-64k.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable-64k.h
@@ -59,9 +59,6 @@ static inline int get_hugepd_cache_index(int index)
BUG();
}
-#else /* !CONFIG_HUGETLB_PAGE */
-static inline int pmd_huge(pmd_t pmd) { return 0; }
-static inline int pud_huge(pud_t pud) { return 0; }
#endif /* CONFIG_HUGETLB_PAGE */
static inline int remap_4k_pfn(struct vm_area_struct *vma, unsigned long addr,
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index 8308f32e9782..201a69e6a355 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -289,7 +289,6 @@ extern unsigned long __kernel_io_end;
#define KERN_IO_END __kernel_io_end
extern struct page *vmemmap;
-extern unsigned long ioremap_bot;
extern unsigned long pci_io_base;
#endif /* __ASSEMBLY__ */
@@ -317,6 +316,7 @@ extern unsigned long pci_io_base;
#define PHB_IO_BASE (ISA_IO_END)
#define PHB_IO_END (KERN_IO_START + FULL_IO_SIZE)
#define IOREMAP_BASE (PHB_IO_END)
+#define IOREMAP_START (ioremap_bot)
#define IOREMAP_END (KERN_IO_END)
/* Advertise special mapping type for AGP */
@@ -608,8 +608,10 @@ static inline bool pte_access_permitted(pte_t pte, bool write)
*/
static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
{
- return __pte((((pte_basic_t)(pfn) << PAGE_SHIFT) & PTE_RPN_MASK) |
- pgprot_val(pgprot));
+ VM_BUG_ON(pfn >> (64 - PAGE_SHIFT));
+ VM_BUG_ON((pfn << PAGE_SHIFT) & ~PTE_RPN_MASK);
+
+ return __pte(((pte_basic_t)pfn << PAGE_SHIFT) | pgprot_val(pgprot));
}
static inline unsigned long pte_pfn(pte_t pte)
@@ -1353,18 +1355,21 @@ static inline bool is_pte_rw_upgrade(unsigned long old_val, unsigned long new_va
* Like pmd_huge() and pmd_large(), but works regardless of config options
*/
#define pmd_is_leaf pmd_is_leaf
+#define pmd_leaf pmd_is_leaf
static inline bool pmd_is_leaf(pmd_t pmd)
{
return !!(pmd_raw(pmd) & cpu_to_be64(_PAGE_PTE));
}
#define pud_is_leaf pud_is_leaf
+#define pud_leaf pud_is_leaf
static inline bool pud_is_leaf(pud_t pud)
{
return !!(pud_raw(pud) & cpu_to_be64(_PAGE_PTE));
}
#define pgd_is_leaf pgd_is_leaf
+#define pgd_leaf pgd_is_leaf
static inline bool pgd_is_leaf(pgd_t pgd)
{
return !!(pgd_raw(pgd) & cpu_to_be64(_PAGE_PTE));
diff --git a/arch/powerpc/include/asm/book3s/64/radix.h b/arch/powerpc/include/asm/book3s/64/radix.h
index e04a839cb5b9..d97db3ad9aae 100644
--- a/arch/powerpc/include/asm/book3s/64/radix.h
+++ b/arch/powerpc/include/asm/book3s/64/radix.h
@@ -254,7 +254,13 @@ extern void radix__pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
extern pgtable_t radix__pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
extern pmd_t radix__pmdp_huge_get_and_clear(struct mm_struct *mm,
unsigned long addr, pmd_t *pmdp);
-extern int radix__has_transparent_hugepage(void);
+static inline int radix__has_transparent_hugepage(void)
+{
+ /* For radix 2M at PMD level means thp */
+ if (mmu_psize_defs[MMU_PAGE_2M].shift == PMD_SHIFT)
+ return 1;
+ return 0;
+}
#endif
extern int __meminit radix__vmemmap_create_mapping(unsigned long start,
@@ -266,9 +272,6 @@ extern void radix__vmemmap_remove_mapping(unsigned long start,
extern int radix__map_kernel_page(unsigned long ea, unsigned long pa,
pgprot_t flags, unsigned int psz);
-extern int radix__ioremap_range(unsigned long ea, phys_addr_t pa,
- unsigned long size, pgprot_t prot, int nid);
-
static inline unsigned long radix__get_tree_size(void)
{
unsigned long rts_field;
diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
index 05147cecb8df..ca8db193ae38 100644
--- a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
+++ b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
@@ -17,8 +17,8 @@ extern void radix__flush_tlb_lpid_page(unsigned int lpid,
unsigned long addr,
unsigned long page_size);
extern void radix__flush_pwc_lpid(unsigned int lpid);
-extern void radix__flush_tlb_lpid(unsigned int lpid);
-extern void radix__local_flush_tlb_lpid_guest(unsigned int lpid);
+extern void radix__flush_all_lpid(unsigned int lpid);
+extern void radix__flush_all_lpid_guest(unsigned int lpid);
#else
static inline void radix__tlbiel_all(unsigned int action) { WARN_ON(1); };
static inline void radix__flush_tlb_lpid_page(unsigned int lpid,
@@ -31,11 +31,11 @@ static inline void radix__flush_pwc_lpid(unsigned int lpid)
{
WARN_ON(1);
}
-static inline void radix__flush_tlb_lpid(unsigned int lpid)
+static inline void radix__flush_all_lpid(unsigned int lpid)
{
WARN_ON(1);
}
-static inline void radix__local_flush_tlb_lpid_guest(unsigned int lpid)
+static inline void radix__flush_all_lpid_guest(unsigned int lpid)
{
WARN_ON(1);
}
@@ -73,6 +73,4 @@ extern void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr);
extern void radix__flush_tlb_collapsed_pmd(struct mm_struct *mm, unsigned long addr);
extern void radix__flush_tlb_all(void);
-extern void radix__local_flush_tlb_lpid(unsigned int lpid);
-
#endif
diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush.h b/arch/powerpc/include/asm/book3s/64/tlbflush.h
index ebf572ea621e..dcb5c3839d2f 100644
--- a/arch/powerpc/include/asm/book3s/64/tlbflush.h
+++ b/arch/powerpc/include/asm/book3s/64/tlbflush.h
@@ -147,19 +147,12 @@ static inline void flush_tlb_fix_spurious_fault(struct vm_area_struct *vma,
flush_tlb_page(vma, address);
}
-/*
- * flush the page walk cache for the address
- */
-static inline void flush_tlb_pgtable(struct mmu_gather *tlb, unsigned long address)
-{
- /*
- * Flush the page table walk cache on freeing a page table. We already
- * have marked the upper/higher level page table entry none by now.
- * So it is safe to flush PWC here.
- */
- if (!radix_enabled())
- return;
+extern bool tlbie_capable;
+extern bool tlbie_enabled;
- radix__flush_tlb_pwc(tlb, address);
+static inline bool cputlb_use_tlbie(void)
+{
+ return tlbie_enabled;
}
+
#endif /* _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H */
diff --git a/arch/powerpc/include/asm/book3s/pgtable.h b/arch/powerpc/include/asm/book3s/pgtable.h
index 6436b65ac7bc..0e1263455d73 100644
--- a/arch/powerpc/include/asm/book3s/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/pgtable.h
@@ -26,5 +26,16 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
unsigned long size, pgprot_t vma_prot);
#define __HAVE_PHYS_MEM_ACCESS_PROT
+/*
+ * This gets called at the end of handling a page fault, when
+ * the kernel has put a new PTE into the page table for the process.
+ * We use it to ensure coherency between the i-cache and d-cache
+ * for the page which has just been mapped in.
+ * On machines which use an MMU hash table, we use this to put a
+ * corresponding HPTE into the hash table ahead of time, instead of
+ * waiting for the inevitable extra hash-table miss exception.
+ */
+void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep);
+
#endif /* __ASSEMBLY__ */
#endif
OpenPOWER on IntegriCloud