summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kvm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kvm')
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c5
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_mmu.c30
-rw-r--r--arch/powerpc/kvm/e500_mmu_host.c14
3 files changed, 38 insertions, 11 deletions
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index 534acb3c6c3d..26df3864d85a 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -539,12 +539,13 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
if (!writing && hpte_is_writable(r)) {
unsigned int hugepage_shift;
pte_t *ptep, pte;
+ unsigned long flags;
/*
* We need to protect against page table destruction
* while looking up and updating the pte.
*/
- rcu_read_lock_sched();
+ local_irq_save(flags);
ptep = find_linux_pte_or_hugepte(current->mm->pgd,
hva, &hugepage_shift);
if (ptep) {
@@ -553,7 +554,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
if (pte_write(pte))
write_ok = 1;
}
- rcu_read_unlock_sched();
+ local_irq_restore(flags);
}
}
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index 73e083cb9f7e..f559b25de173 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -26,11 +26,14 @@ static void *real_vmalloc_addr(void *x)
{
unsigned long addr = (unsigned long) x;
pte_t *p;
-
- p = find_linux_pte_or_hugepte(swapper_pg_dir, addr, NULL);
+ /*
+ * assume we don't have huge pages in vmalloc space...
+ * So don't worry about THP collapse/split. Called
+ * Only in realmode, hence won't need irq_save/restore.
+ */
+ p = __find_linux_pte_or_hugepte(swapper_pg_dir, addr, NULL);
if (!p || !pte_present(*p))
return NULL;
- /* assume we don't have huge pages in vmalloc space... */
addr = (pte_pfn(*p) << PAGE_SHIFT) | (addr & ~PAGE_MASK);
return __va(addr);
}
@@ -153,7 +156,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
pte_t *ptep;
unsigned int writing;
unsigned long mmu_seq;
- unsigned long rcbits;
+ unsigned long rcbits, irq_flags = 0;
psize = hpte_page_size(pteh, ptel);
if (!psize)
@@ -189,7 +192,16 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
/* Translate to host virtual address */
hva = __gfn_to_hva_memslot(memslot, gfn);
- ptep = find_linux_pte_or_hugepte(pgdir, hva, &hpage_shift);
+ /*
+ * If we had a page table table change after lookup, we would
+ * retry via mmu_notifier_retry.
+ */
+ if (realmode)
+ ptep = __find_linux_pte_or_hugepte(pgdir, hva, &hpage_shift);
+ else {
+ local_irq_save(irq_flags);
+ ptep = find_linux_pte_or_hugepte(pgdir, hva, &hpage_shift);
+ }
if (ptep) {
pte_t pte;
unsigned int host_pte_size;
@@ -202,9 +214,11 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
* We should always find the guest page size
* to <= host page size, if host is using hugepage
*/
- if (host_pte_size < psize)
+ if (host_pte_size < psize) {
+ if (!realmode)
+ local_irq_restore(flags);
return H_PARAMETER;
-
+ }
pte = kvmppc_read_update_linux_pte(ptep, writing, hpage_shift);
if (pte_present(pte) && !pte_protnone(pte)) {
if (writing && !pte_write(pte))
@@ -216,6 +230,8 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
pa |= gpa & ~PAGE_MASK;
}
}
+ if (!realmode)
+ local_irq_restore(irq_flags);
ptel &= ~(HPTE_R_PP0 - psize);
ptel |= pa;
diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c
index a1f5b0d4b1d6..4d33e199edcc 100644
--- a/arch/powerpc/kvm/e500_mmu_host.c
+++ b/arch/powerpc/kvm/e500_mmu_host.c
@@ -338,6 +338,7 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
pte_t *ptep;
unsigned int wimg = 0;
pgd_t *pgdir;
+ unsigned long flags;
/* used to check for invalidations in progress */
mmu_seq = kvm->mmu_notifier_seq;
@@ -468,14 +469,23 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
pgdir = vcpu_e500->vcpu.arch.pgdir;
+ /*
+ * We are just looking at the wimg bits, so we don't
+ * care much about the trans splitting bit.
+ * We are holding kvm->mmu_lock so a notifier invalidate
+ * can't run hence pfn won't change.
+ */
+ local_irq_save(flags);
ptep = find_linux_pte_or_hugepte(pgdir, hva, NULL);
if (ptep) {
pte_t pte = READ_ONCE(*ptep);
- if (pte_present(pte))
+ if (pte_present(pte)) {
wimg = (pte_val(pte) >> PTE_WIMGE_SHIFT) &
MAS2_WIMGE_MASK;
- else {
+ local_irq_restore(flags);
+ } else {
+ local_irq_restore(flags);
pr_err_ratelimited("%s: pte not present: gfn %lx,pfn %lx\n",
__func__, (long)gfn, pfn);
ret = -EINVAL;
OpenPOWER on IntegriCloud