aboutsummaryrefslogtreecommitdiff
path: root/drivers/kvm/paging_tmpl.h
diff options
context:
space:
mode:
authorAvi Kivity <avi@qumranet.com>2007-01-26 00:56:41 -0800
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-01-26 13:50:57 -0800
commit73b1087e6176a34c01eea3db269848f72fad72c1 (patch)
tree05e74fb28c1980e8327587934f2a0fe2a3c0d683 /drivers/kvm/paging_tmpl.h
parent7993ba43db1c07245ada067791f91dbf018095ac (diff)
[PATCH] KVM: MMU: Report nx faults to the guest
With the recent guest page fault change, we perform access checks on our own instead of relying on the cpu. This means we have to perform the nx checks as well. Software like the google toolbar on windows appears to rely on this somehow. Signed-off-by: Avi Kivity <avi@qumranet.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers/kvm/paging_tmpl.h')
-rw-r--r--drivers/kvm/paging_tmpl.h15
1 files changed, 12 insertions, 3 deletions
diff --git a/drivers/kvm/paging_tmpl.h b/drivers/kvm/paging_tmpl.h
index afcd2a8f45b..149fa45fd9a 100644
--- a/drivers/kvm/paging_tmpl.h
+++ b/drivers/kvm/paging_tmpl.h
@@ -71,7 +71,7 @@ struct guest_walker {
*/
static int FNAME(walk_addr)(struct guest_walker *walker,
struct kvm_vcpu *vcpu, gva_t addr,
- int write_fault, int user_fault)
+ int write_fault, int user_fault, int fetch_fault)
{
hpa_t hpa;
struct kvm_memory_slot *slot;
@@ -123,6 +123,11 @@ static int FNAME(walk_addr)(struct guest_walker *walker,
if (user_fault && !(*ptep & PT_USER_MASK))
goto access_error;
+#if PTTYPE == 64
+ if (fetch_fault && is_nx(vcpu) && (*ptep & PT64_NX_MASK))
+ goto access_error;
+#endif
+
if (!(*ptep & PT_ACCESSED_MASK))
*ptep |= PT_ACCESSED_MASK; /* avoid rmw */
@@ -169,6 +174,8 @@ err:
walker->error_code |= PFERR_WRITE_MASK;
if (user_fault)
walker->error_code |= PFERR_USER_MASK;
+ if (fetch_fault)
+ walker->error_code |= PFERR_FETCH_MASK;
return 0;
}
@@ -372,6 +379,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
{
int write_fault = error_code & PFERR_WRITE_MASK;
int user_fault = error_code & PFERR_USER_MASK;
+ int fetch_fault = error_code & PFERR_FETCH_MASK;
struct guest_walker walker;
u64 *shadow_pte;
int fixed;
@@ -388,7 +396,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
/*
* Look up the shadow pte for the faulting address.
*/
- r = FNAME(walk_addr)(&walker, vcpu, addr, write_fault, user_fault);
+ r = FNAME(walk_addr)(&walker, vcpu, addr, write_fault, user_fault,
+ fetch_fault);
/*
* The page is not mapped by the guest. Let the guest handle it.
@@ -437,7 +446,7 @@ static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr)
pt_element_t guest_pte;
gpa_t gpa;
- FNAME(walk_addr)(&walker, vcpu, vaddr, 0, 0);
+ FNAME(walk_addr)(&walker, vcpu, vaddr, 0, 0, 0);
guest_pte = *walker.ptep;
FNAME(release_walker)(&walker);