diff options
author | Avi Kivity <avi@qumranet.com> | 2007-07-20 08:18:27 +0300 |
---|---|---|
committer | Avi Kivity <avi@qumranet.com> | 2007-07-20 20:23:59 +0300 |
commit | c1158e63dfeb3928e94c768f0a403b3e0e799f70 (patch) | |
tree | 03a30831c27255d82d479b0242017fb2e9c342a5 /drivers | |
parent | 35f3f28613bc7263949db23a4c7078e425810c8c (diff) | |
download | blackbird-op-linux-c1158e63dfeb3928e94c768f0a403b3e0e799f70.tar.gz blackbird-op-linux-c1158e63dfeb3928e94c768f0a403b3e0e799f70.zip |
KVM: MMU: Fix oopses with SLUB
The kvm mmu uses page->private on shadow page tables; so does slub, and
an oops result. Fix by allocating regular pages for shadows instead of
using slub.
Tested-by: S.Çağlar Onur <caglar@pardus.org.tr>
Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/kvm/mmu.c | 39 |
1 files changed, 26 insertions, 13 deletions
diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c index 48d28f1ff4a1..d99d2fe53dca 100644 --- a/drivers/kvm/mmu.c +++ b/drivers/kvm/mmu.c @@ -154,7 +154,6 @@ struct kvm_rmap_desc { static struct kmem_cache *pte_chain_cache; static struct kmem_cache *rmap_desc_cache; -static struct kmem_cache *mmu_page_cache; static struct kmem_cache *mmu_page_header_cache; static int is_write_protection(struct kvm_vcpu *vcpu) @@ -225,6 +224,29 @@ static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc) kfree(mc->objects[--mc->nobjs]); } +static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache, + int min, gfp_t gfp_flags) +{ + struct page *page; + + if (cache->nobjs >= min) + return 0; + while (cache->nobjs < ARRAY_SIZE(cache->objects)) { + page = alloc_page(gfp_flags); + if (!page) + return -ENOMEM; + set_page_private(page, 0); + cache->objects[cache->nobjs++] = page_address(page); + } + return 0; +} + +static void mmu_free_memory_cache_page(struct kvm_mmu_memory_cache *mc) +{ + while (mc->nobjs) + __free_page(mc->objects[--mc->nobjs]); +} + static int __mmu_topup_memory_caches(struct kvm_vcpu *vcpu, gfp_t gfp_flags) { int r; @@ -237,8 +259,7 @@ static int __mmu_topup_memory_caches(struct kvm_vcpu *vcpu, gfp_t gfp_flags) rmap_desc_cache, 1, gfp_flags); if (r) goto out; - r = mmu_topup_memory_cache(&vcpu->mmu_page_cache, - mmu_page_cache, 4, gfp_flags); + r = mmu_topup_memory_cache_page(&vcpu->mmu_page_cache, 4, gfp_flags); if (r) goto out; r = mmu_topup_memory_cache(&vcpu->mmu_page_header_cache, @@ -266,7 +287,7 @@ static void mmu_free_memory_caches(struct kvm_vcpu *vcpu) { mmu_free_memory_cache(&vcpu->mmu_pte_chain_cache); mmu_free_memory_cache(&vcpu->mmu_rmap_desc_cache); - mmu_free_memory_cache(&vcpu->mmu_page_cache); + mmu_free_memory_cache_page(&vcpu->mmu_page_cache); mmu_free_memory_cache(&vcpu->mmu_page_header_cache); } @@ -458,7 +479,7 @@ static void kvm_mmu_free_page(struct kvm *kvm, { ASSERT(is_empty_shadow_page(page_head->spt)); list_del(&page_head->link); - kfree(page_head->spt); + __free_page(virt_to_page(page_head->spt)); kfree(page_head); ++kvm->n_free_mmu_pages; } @@ -1301,8 +1322,6 @@ void kvm_mmu_module_exit(void) kmem_cache_destroy(pte_chain_cache); if (rmap_desc_cache) kmem_cache_destroy(rmap_desc_cache); - if (mmu_page_cache) - kmem_cache_destroy(mmu_page_cache); if (mmu_page_header_cache) kmem_cache_destroy(mmu_page_header_cache); } @@ -1320,12 +1339,6 @@ int kvm_mmu_module_init(void) if (!rmap_desc_cache) goto nomem; - mmu_page_cache = kmem_cache_create("kvm_mmu_page", - PAGE_SIZE, - PAGE_SIZE, 0, NULL); - if (!mmu_page_cache) - goto nomem; - mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header", sizeof(struct kvm_mmu_page), 0, 0, NULL); |