aboutsummaryrefslogtreecommitdiff
path: root/arch/x86/kvm/paging_tmpl.h
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-07-20 21:13:26 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2008-07-20 21:13:26 -0700
commitf076ab8d048f152b968bb1c6313fed88abb037fe (patch)
treefe9ff7bc40d04c5a12eb41a90cc0e1dbf8ae4e45 /arch/x86/kvm/paging_tmpl.h
parentdb6d8c7a4027b48d797b369a53f8470aaeed7063 (diff)
parent597a5f551ec4cd0aa0966e4fff4684ecc8c31c0d (diff)
Merge branch 'kvm-updates-2.6.27' of git://git.kernel.org/pub/scm/linux/kernel/git/avi/kvm
* 'kvm-updates-2.6.27' of git://git.kernel.org/pub/scm/linux/kernel/git/avi/kvm: (70 commits) KVM: Adjust smp_call_function_mask() callers to new requirements KVM: MMU: Fix potential race setting upper shadow ptes on nonpae hosts KVM: x86 emulator: emulate clflush KVM: MMU: improve invalid shadow root page handling KVM: MMU: nuke shadowed pgtable pages and ptes on memslot destruction KVM: Prefix some x86 low level function with kvm_, to avoid namespace issues KVM: check injected pic irq within valid pic irqs KVM: x86 emulator: Fix HLT instruction KVM: Apply the kernel sigmask to vcpus blocked due to being uninitialized KVM: VMX: Add ept_sync_context in flush_tlb KVM: mmu_shrink: kvm_mmu_zap_page requires slots_lock to be held x86: KVM guest: make kvm_smp_prepare_boot_cpu() static KVM: SVM: fix suspend/resume support KVM: s390: rename private structures KVM: s390: Set guest storage limit and offset to sane values KVM: Fix memory leak on guest exit KVM: s390: dont allocate dirty bitmap KVM: move slots_lock acquision down to vapic_exit KVM: VMX: Fake emulate Intel perfctr MSRs KVM: VMX: Fix a wrong usage of vmcs_config ...
Diffstat (limited to 'arch/x86/kvm/paging_tmpl.h')
-rw-r--r--arch/x86/kvm/paging_tmpl.h28
1 files changed, 15 insertions, 13 deletions
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 934c7b61939..4d918220bae 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -460,8 +460,9 @@ static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr)
static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu,
struct kvm_mmu_page *sp)
{
- int i, offset = 0, r = 0;
- pt_element_t pt;
+ int i, j, offset, r;
+ pt_element_t pt[256 / sizeof(pt_element_t)];
+ gpa_t pte_gpa;
if (sp->role.metaphysical
|| (PTTYPE == 32 && sp->role.level > PT_PAGE_TABLE_LEVEL)) {
@@ -469,19 +470,20 @@ static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu,
return;
}
- if (PTTYPE == 32)
+ pte_gpa = gfn_to_gpa(sp->gfn);
+ if (PTTYPE == 32) {
offset = sp->role.quadrant << PT64_LEVEL_BITS;
+ pte_gpa += offset * sizeof(pt_element_t);
+ }
- for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
- gpa_t pte_gpa = gfn_to_gpa(sp->gfn);
- pte_gpa += (i+offset) * sizeof(pt_element_t);
-
- r = kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &pt,
- sizeof(pt_element_t));
- if (r || is_present_pte(pt))
- sp->spt[i] = shadow_trap_nonpresent_pte;
- else
- sp->spt[i] = shadow_notrap_nonpresent_pte;
+ for (i = 0; i < PT64_ENT_PER_PAGE; i += ARRAY_SIZE(pt)) {
+ r = kvm_read_guest_atomic(vcpu->kvm, pte_gpa, pt, sizeof pt);
+ pte_gpa += ARRAY_SIZE(pt) * sizeof(pt_element_t);
+ for (j = 0; j < ARRAY_SIZE(pt); ++j)
+ if (r || is_present_pte(pt[j]))
+ sp->spt[i+j] = shadow_trap_nonpresent_pte;
+ else
+ sp->spt[i+j] = shadow_notrap_nonpresent_pte;
}
}