diff options
author | Avi Kivity <avi@qumranet.com> | 2007-07-17 13:04:56 +0300 |
---|---|---|
committer | Avi Kivity <avi@qumranet.com> | 2007-07-20 20:16:29 +0300 |
commit | 90cb0529dd230548a7f0d6b315997be854caea1b (patch) | |
tree | 31aad8e119781b7df846a8f8d8522a82ff3e4b25 /drivers/kvm/kvm_main.c | |
parent | d55e2cb20123cdb5020ec4a2b2f1eace5038c292 (diff) |
KVM: Fix memory slot management functions for guest smp
The memory slot management functions were oriented against vcpu 0, where
they should be kvm-wide. This causes hangs starting X on guest smp.
Fix by making the functions (and resultant tail in the mmu) non-vcpu-specific.
Unfortunately this reduces the efficiency of the mmu object cache a bit. We
may have to revisit this later.
Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'drivers/kvm/kvm_main.c')
-rw-r--r-- | drivers/kvm/kvm_main.c | 68 |
1 files changed, 9 insertions, 59 deletions
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c index 1b206f197c6..05f0418f219 100644 --- a/drivers/kvm/kvm_main.c +++ b/drivers/kvm/kvm_main.c @@ -238,23 +238,6 @@ static void vcpu_load(struct kvm_vcpu *vcpu) kvm_arch_ops->vcpu_load(vcpu); } -/* - * Switches to specified vcpu, until a matching vcpu_put(). Will return NULL - * if the slot is not populated. - */ -static struct kvm_vcpu *vcpu_load_slot(struct kvm *kvm, int slot) -{ - struct kvm_vcpu *vcpu = &kvm->vcpus[slot]; - - mutex_lock(&vcpu->mutex); - if (!vcpu->vmcs) { - mutex_unlock(&vcpu->mutex); - return NULL; - } - kvm_arch_ops->vcpu_load(vcpu); - return vcpu; -} - static void vcpu_put(struct kvm_vcpu *vcpu) { kvm_arch_ops->vcpu_put(vcpu); @@ -663,13 +646,6 @@ void fx_init(struct kvm_vcpu *vcpu) } EXPORT_SYMBOL_GPL(fx_init); -static void do_remove_write_access(struct kvm_vcpu *vcpu, int slot) -{ - spin_lock(&vcpu->kvm->lock); - kvm_mmu_slot_remove_write_access(vcpu, slot); - spin_unlock(&vcpu->kvm->lock); -} - /* * Allocate some memory and give it an address in the guest physical address * space. @@ -792,19 +768,10 @@ raced: *memslot = new; ++kvm->memory_config_version; - spin_unlock(&kvm->lock); - - for (i = 0; i < KVM_MAX_VCPUS; ++i) { - struct kvm_vcpu *vcpu; + kvm_mmu_slot_remove_write_access(kvm, mem->slot); + kvm_flush_remote_tlbs(kvm); - vcpu = vcpu_load_slot(kvm, i); - if (!vcpu) - continue; - if (new.flags & KVM_MEM_LOG_DIRTY_PAGES) - do_remove_write_access(vcpu, mem->slot); - kvm_mmu_reset_context(vcpu); - vcpu_put(vcpu); - } + spin_unlock(&kvm->lock); kvm_free_physmem_slot(&old, &new); return 0; @@ -826,7 +793,6 @@ static int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot; int r, i; int n; - int cleared; unsigned long any = 0; spin_lock(&kvm->lock); @@ -855,23 +821,11 @@ static int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n)) goto out; - if (any) { - cleared = 0; - for (i = 0; i < KVM_MAX_VCPUS; ++i) { - struct kvm_vcpu *vcpu; - - vcpu = vcpu_load_slot(kvm, i); - if (!vcpu) - continue; - if (!cleared) { - do_remove_write_access(vcpu, log->slot); - memset(memslot->dirty_bitmap, 0, n); - cleared = 1; - } - kvm_arch_ops->tlb_flush(vcpu); - vcpu_put(vcpu); - } - } + spin_lock(&kvm->lock); + kvm_mmu_slot_remove_write_access(kvm, log->slot); + kvm_flush_remote_tlbs(kvm); + memset(memslot->dirty_bitmap, 0, n); + spin_unlock(&kvm->lock); r = 0; @@ -920,13 +874,9 @@ static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm, break; kvm->naliases = n; - spin_unlock(&kvm->lock); + kvm_mmu_zap_all(kvm); - vcpu_load(&kvm->vcpus[0]); - spin_lock(&kvm->lock); - kvm_mmu_zap_all(&kvm->vcpus[0]); spin_unlock(&kvm->lock); - vcpu_put(&kvm->vcpus[0]); return 0; |