aboutsummaryrefslogtreecommitdiff
path: root/arch/x86/kvm/paging_tmpl.h
diff options
context:
space:
mode:
authorAvi Kivity <avi@qumranet.com>2007-12-31 15:27:49 +0200
committerAvi Kivity <avi@qumranet.com>2008-01-30 18:01:21 +0200
commiteb787d10af8045dd00d4d4c9a8e90fa495f1b0c1 (patch)
tree6594a4f1ba3718d01a8682aeadb31a0f61ae6f86 /arch/x86/kvm/paging_tmpl.h
parentaaee2c94f7a1f7726e360a6cfb40173bd552bcff (diff)
KVM: MMU: Move kvm_free_some_pages() into critical section
If some other cpu steals mmu pages between our check and an attempt to allocate, we can run out of mmu pages. Fix by moving the check into the same critical section as the allocation. Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'arch/x86/kvm/paging_tmpl.h')
-rw-r--r--arch/x86/kvm/paging_tmpl.h1
1 files changed, 1 insertions, 0 deletions
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index a35b83a4fef..349920556be 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -402,6 +402,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
page = gfn_to_page(vcpu->kvm, walker.gfn);
spin_lock(&vcpu->kvm->mmu_lock);
+ kvm_mmu_free_some_pages(vcpu);
shadow_pte = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
&write_pt, page);
pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __FUNCTION__,