diff options
author | Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> | 2015-03-30 10:41:03 +0530 |
---|---|---|
committer | Michael Ellerman <mpe@ellerman.id.au> | 2015-04-17 11:23:39 +1000 |
commit | 691e95fd7396905a38d98919e9c150dbc3ea21a3 (patch) | |
tree | d89b898d4f42d167f0da169f482d7104b46870d8 /arch/powerpc/perf | |
parent | dac5657067919161eb3273ca787d8ae9814801e7 (diff) | |
download | blackbird-op-linux-691e95fd7396905a38d98919e9c150dbc3ea21a3.tar.gz blackbird-op-linux-691e95fd7396905a38d98919e9c150dbc3ea21a3.zip |
powerpc/mm/thp: Make page table walk safe against thp split/collapse
We can disable a THP split or a hugepage collapse by disabling irq.
We do send IPI to all the cpus in the early part of split/collapse,
and disabling local irq ensure we don't make progress with
split/collapse. If the THP is getting split we return NULL from
find_linux_pte_or_hugepte(). For all the current callers it should be ok.
We need to be careful if we want to use returned pte_t pointer outside
the irq disabled region. W.r.t to THP split, the pfn remains the same,
but then a hugepage collapse will result in a pfn change. There are
few steps we can take to avoid a hugepage collapse.One way is to take page
reference inside the irq disable region. Other option is to take
mmap_sem so that a parallel collapse will not happen. We can also
disable collapse by taking pmd_lock. Another method used by kvm
subsystem is to check whether we had a mmu_notifer update in between
using mmu_notifier_retry().
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Diffstat (limited to 'arch/powerpc/perf')
-rw-r--r-- | arch/powerpc/perf/callchain.c | 24 |
1 files changed, 14 insertions, 10 deletions
diff --git a/arch/powerpc/perf/callchain.c b/arch/powerpc/perf/callchain.c index ead55351b254..ff09cde20cd2 100644 --- a/arch/powerpc/perf/callchain.c +++ b/arch/powerpc/perf/callchain.c @@ -111,41 +111,45 @@ perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs) * interrupt context, so if the access faults, we read the page tables * to find which page (if any) is mapped and access it directly. */ -static int read_user_stack_slow(void __user *ptr, void *ret, int nb) +static int read_user_stack_slow(void __user *ptr, void *buf, int nb) { + int ret = -EFAULT; pgd_t *pgdir; pte_t *ptep, pte; unsigned shift; unsigned long addr = (unsigned long) ptr; unsigned long offset; - unsigned long pfn; + unsigned long pfn, flags; void *kaddr; pgdir = current->mm->pgd; if (!pgdir) return -EFAULT; + local_irq_save(flags); ptep = find_linux_pte_or_hugepte(pgdir, addr, &shift); + if (!ptep) + goto err_out; if (!shift) shift = PAGE_SHIFT; /* align address to page boundary */ offset = addr & ((1UL << shift) - 1); - addr -= offset; - if (ptep == NULL) - return -EFAULT; - pte = *ptep; + pte = READ_ONCE(*ptep); if (!pte_present(pte) || !(pte_val(pte) & _PAGE_USER)) - return -EFAULT; + goto err_out; pfn = pte_pfn(pte); if (!page_is_ram(pfn)) - return -EFAULT; + goto err_out; /* no highmem to worry about here */ kaddr = pfn_to_kaddr(pfn); - memcpy(ret, kaddr + offset, nb); - return 0; + memcpy(buf, kaddr + offset, nb); + ret = 0; +err_out: + local_irq_restore(flags); + return ret; } static int read_user_stack_64(unsigned long __user *ptr, unsigned long *ret) |