summaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2014-05-26 19:48:37 +1000
committerAlexander Graf <agraf@suse.de>2014-05-30 14:26:28 +0200
commit1066f7724c73fca9ddb978c7f5b7411c54032047 (patch)
tree718243e1a2afa0c6df2eb77fee8c3fab6d494f51 /arch/powerpc
parent55765483e1df8135102ae9ca77dccbca9a7a6184 (diff)
downloadblackbird-op-linux-1066f7724c73fca9ddb978c7f5b7411c54032047.tar.gz
blackbird-op-linux-1066f7724c73fca9ddb978c7f5b7411c54032047.zip
KVM: PPC: Book3S HV: Put huge-page HPTEs in rmap chain for base address
Currently, when a huge page is faulted in for a guest, we select the rmap chain to insert the HPTE into based on the guest physical address that the guest tried to access. Since there is an rmap chain for each system page, there are many rmap chains for the area covered by a huge page (e.g. 256 for 16MB pages when PAGE_SIZE = 64kB), and the huge-page HPTE could end up in any one of them. For consistency, and to make the huge-page HPTEs easier to find, we now put huge-page HPTEs in the rmap chain corresponding to the base address of the huge page. Signed-off-by: Paul Mackerras <paulus@samba.org> Signed-off-by: Alexander Graf <agraf@suse.de>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c15
1 files changed, 13 insertions, 2 deletions
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index f32896ffd784..4e22ecbcf93f 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -585,6 +585,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
struct kvm *kvm = vcpu->kvm;
unsigned long *hptep, hpte[3], r;
unsigned long mmu_seq, psize, pte_size;
+ unsigned long gpa_base, gfn_base;
unsigned long gpa, gfn, hva, pfn;
struct kvm_memory_slot *memslot;
unsigned long *rmap;
@@ -623,7 +624,9 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
/* Translate the logical address and get the page */
psize = hpte_page_size(hpte[0], r);
- gpa = (r & HPTE_R_RPN & ~(psize - 1)) | (ea & (psize - 1));
+ gpa_base = r & HPTE_R_RPN & ~(psize - 1);
+ gfn_base = gpa_base >> PAGE_SHIFT;
+ gpa = gpa_base | (ea & (psize - 1));
gfn = gpa >> PAGE_SHIFT;
memslot = gfn_to_memslot(kvm, gfn);
@@ -635,6 +638,13 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
if (!kvm->arch.using_mmu_notifiers)
return -EFAULT; /* should never get here */
+ /*
+ * This should never happen, because of the slot_is_aligned()
+ * check in kvmppc_do_h_enter().
+ */
+ if (gfn_base < memslot->base_gfn)
+ return -EFAULT;
+
/* used to check for invalidations in progress */
mmu_seq = kvm->mmu_notifier_seq;
smp_rmb();
@@ -727,7 +737,8 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
goto out_unlock;
hpte[0] = (hpte[0] & ~HPTE_V_ABSENT) | HPTE_V_VALID;
- rmap = &memslot->arch.rmap[gfn - memslot->base_gfn];
+ /* Always put the HPTE in the rmap chain for the page base address */
+ rmap = &memslot->arch.rmap[gfn_base - memslot->base_gfn];
lock_rmap(rmap);
/* Check if we might have been invalidated; let the guest retry if so */
OpenPOWER on IntegriCloud