diff options
author | Takuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp> | 2012-07-02 17:59:33 +0900 |
---|---|---|
committer | Marcelo Tosatti <mtosatti@redhat.com> | 2012-07-18 16:55:04 -0300 |
commit | bcd3ef58283a471d6b65855b83f78bd39eb55391 (patch) | |
tree | d8814c9fbf17570a1cbed823b2dea7468fe9c1d9 /arch/x86/kvm/mmu.c | |
parent | f395302e09ef783b8f82d1160510a95aa8c66dbc (diff) | |
download | blackbird-op-linux-bcd3ef58283a471d6b65855b83f78bd39eb55391.tar.gz blackbird-op-linux-bcd3ef58283a471d6b65855b83f78bd39eb55391.zip |
KVM: MMU: Avoid handling same rmap_pde in kvm_handle_hva_range()
When we invalidate a THP page, we call the handler with the same
rmap_pde argument 512 times in the following loop:
for each guest page in the range
for each level
unmap using rmap
This patch avoids these extra handler calls by changing the loop order
like this:
for each level
for each rmap in the range
unmap using rmap
With the preceding patches in the patch series, this made THP page
invalidation more than 5 times faster on our x86 host: the host became
more responsive during swapping the guest's memory as a result.
Signed-off-by: Takuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r-- | arch/x86/kvm/mmu.c | 28 |
1 files changed, 18 insertions, 10 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 58adec384489..a5d6ef785b7e 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -1277,7 +1277,7 @@ static int kvm_handle_hva_range(struct kvm *kvm, kvm_for_each_memslot(memslot, slots) { unsigned long hva_start, hva_end; - gfn_t gfn, gfn_end; + gfn_t gfn_start, gfn_end; hva_start = max(start, memslot->userspace_addr); hva_end = min(end, memslot->userspace_addr + @@ -1286,19 +1286,27 @@ static int kvm_handle_hva_range(struct kvm *kvm, continue; /* * {gfn(page) | page intersects with [hva_start, hva_end)} = - * {gfn, gfn+1, ..., gfn_end-1}. + * {gfn_start, gfn_start+1, ..., gfn_end-1}. */ - gfn = hva_to_gfn_memslot(hva_start, memslot); + gfn_start = hva_to_gfn_memslot(hva_start, memslot); gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot); - for (; gfn < gfn_end; ++gfn) { - for (j = PT_PAGE_TABLE_LEVEL; - j < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++j) { - unsigned long *rmapp; + for (j = PT_PAGE_TABLE_LEVEL; + j < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++j) { + unsigned long idx, idx_end; + unsigned long *rmapp; - rmapp = __gfn_to_rmap(gfn, j, memslot); - ret |= handler(kvm, rmapp, memslot, data); - } + /* + * {idx(page_j) | page_j intersects with + * [hva_start, hva_end)} = {idx, idx+1, ..., idx_end}. + */ + idx = gfn_to_index(gfn_start, memslot->base_gfn, j); + idx_end = gfn_to_index(gfn_end - 1, memslot->base_gfn, j); + + rmapp = __gfn_to_rmap(gfn_start, j, memslot); + + for (; idx <= idx_end; ++idx) + ret |= handler(kvm, rmapp++, memslot, data); } } |