summaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorTakuya Yoshikawa <yoshikawa_takuya_b1@lab.ntt.co.jp>2015-10-19 15:13:29 +0900
committerPaolo Bonzini <pbonzini@redhat.com>2015-10-19 11:36:05 +0200
commit8c85ac1c0a1b41299370857765bc0950666ed5d9 (patch)
tree5b3e8f77536dff0cc45186012bf675ec7dafdcb0 /arch/x86
parent5690891bcec5fcfda38da974ffa5488e36a59811 (diff)
downloadtalos-op-linux-8c85ac1c0a1b41299370857765bc0950666ed5d9.tar.gz
talos-op-linux-8c85ac1c0a1b41299370857765bc0950666ed5d9.zip
KVM: x86: MMU: Initialize force_pt_level before calling mapping_level()
Commit fd1369021878 ("KVM: x86: MMU: Move mapping_level_dirty_bitmap() call in mapping_level()") forgot to initialize force_pt_level to false in FNAME(page_fault)() before calling mapping_level() like nonpaging_map() does. This can sometimes result in forcing page table level mapping unnecessarily. Fix this and move the first *force_pt_level check in mapping_level() before kvm_vcpu_gfn_to_memslot() call to make it a bit clearer that the variable must be initialized before mapping_level() gets called. This change can also avoid calling kvm_vcpu_gfn_to_memslot() when !check_hugepage_cache_consistency() check in tdp_page_fault() forces page table level mapping. Signed-off-by: Takuya Yoshikawa <yoshikawa_takuya_b1@lab.ntt.co.jp> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kvm/mmu.c7
-rw-r--r--arch/x86/kvm/paging_tmpl.h2
2 files changed, 5 insertions, 4 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index dd2a7c6ec2ca..7d85bcae3332 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -886,10 +886,11 @@ static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn,
int host_level, level, max_level;
struct kvm_memory_slot *slot;
- slot = kvm_vcpu_gfn_to_memslot(vcpu, large_gfn);
+ if (unlikely(*force_pt_level))
+ return PT_PAGE_TABLE_LEVEL;
- if (likely(!*force_pt_level))
- *force_pt_level = !memslot_valid_for_gpte(slot, true);
+ slot = kvm_vcpu_gfn_to_memslot(vcpu, large_gfn);
+ *force_pt_level = !memslot_valid_for_gpte(slot, true);
if (unlikely(*force_pt_level))
return PT_PAGE_TABLE_LEVEL;
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index bf39d0f3efa9..b41faa91a6f9 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -698,7 +698,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
int r;
pfn_t pfn;
int level = PT_PAGE_TABLE_LEVEL;
- bool force_pt_level;
+ bool force_pt_level = false;
unsigned long mmu_seq;
bool map_writable, is_self_change_mapping;
OpenPOWER on IntegriCloud