From e244584fe3a5c20deddeca246548ac86dbc6e1d1 Mon Sep 17 00:00:00 2001 From: Izik Eidus Date: Wed, 10 Jun 2009 19:23:24 +0300 Subject: KVM: Fix dirty bit tracking for slots with large pages When slot is already allocated and being asked to be tracked we need to break the large pages. This code flush the mmu when someone ask a slot to start dirty bit tracking. Cc: stable@kernel.org Signed-off-by: Izik Eidus Signed-off-by: Avi Kivity --- virt/kvm/kvm_main.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'virt/kvm/kvm_main.c') diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 764554350ed8..013a5b3e9f75 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -1194,6 +1194,8 @@ int __kvm_set_memory_region(struct kvm *kvm, if (!new.dirty_bitmap) goto out_free; memset(new.dirty_bitmap, 0, dirty_bytes); + if (old.npages) + kvm_arch_flush_shadow(kvm); } #endif /* not defined CONFIG_S390 */ -- cgit v1.2.3 From 84261923d3dddb766736023bead6fa07b7e218d5 Mon Sep 17 00:00:00 2001 From: Marcelo Tosatti Date: Wed, 17 Jun 2009 10:53:47 -0300 Subject: KVM: protect concurrent make_all_cpus_request make_all_cpus_request contains a race condition which can trigger false request completed status, as follows: CPU0 CPU1 if (test_and_set_bit(req,&vcpu->requests)) .... if (test_and_set_bit(req,&vcpu->requests)) .. return proceed to smp_call_function_many(wait=1) Use a spinlock to serialize concurrent CPUs. Cc: stable@kernel.org Signed-off-by: Andrea Arcangeli Signed-off-by: Marcelo Tosatti Signed-off-by: Avi Kivity --- include/linux/kvm_host.h | 1 + virt/kvm/kvm_main.c | 3 +++ 2 files changed, 4 insertions(+) (limited to 'virt/kvm/kvm_main.c') diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index aacc5449f586..16713dc672e4 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -125,6 +125,7 @@ struct kvm_kernel_irq_routing_entry { struct kvm { struct mutex lock; /* protects the vcpus array and APIC accesses */ spinlock_t mmu_lock; + spinlock_t requests_lock; struct rw_semaphore slots_lock; struct mm_struct *mm; /* userspace tied to this vm */ int nmemslots; diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 013a5b3e9f75..2884baf1d5f9 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -746,6 +746,7 @@ static bool make_all_cpus_request(struct kvm *kvm, unsigned int req) cpumask_clear(cpus); me = get_cpu(); + spin_lock(&kvm->requests_lock); for (i = 0; i < KVM_MAX_VCPUS; ++i) { vcpu = kvm->vcpus[i]; if (!vcpu) @@ -762,6 +763,7 @@ static bool make_all_cpus_request(struct kvm *kvm, unsigned int req) smp_call_function_many(cpus, ack_flush, NULL, 1); else called = false; + spin_unlock(&kvm->requests_lock); put_cpu(); free_cpumask_var(cpus); return called; @@ -982,6 +984,7 @@ static struct kvm *kvm_create_vm(void) kvm->mm = current->mm; atomic_inc(&kvm->mm->mm_count); spin_lock_init(&kvm->mmu_lock); + spin_lock_init(&kvm->requests_lock); kvm_io_bus_init(&kvm->pio_bus); mutex_init(&kvm->lock); kvm_io_bus_init(&kvm->mmio_bus); -- cgit v1.2.3