diff options
author | Avi Kivity <avi@qumranet.com> | 2007-11-25 13:41:11 +0200 |
---|---|---|
committer | Avi Kivity <avi@qumranet.com> | 2008-01-30 17:53:18 +0200 |
commit | 298101da2f507c13eaf179ee4507a7c0fe3e7b06 (patch) | |
tree | 2c0808964e5bc04812f0379b945fb187aaf901eb /drivers/kvm | |
parent | 4bf8ed8dd2781a5e7603a83f8ee1d4f5aa04ebc4 (diff) |
KVM: Generalize exception injection mechanism
Instead of each subarch doing its own thing, add an API for queuing an
injection, and manage failed exception injection centerally (i.e., if
an inject failed due to a shadow page fault, we need to requeue it).
Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'drivers/kvm')
-rw-r--r-- | drivers/kvm/svm.c | 21 | ||||
-rw-r--r-- | drivers/kvm/vmx.c | 20 | ||||
-rw-r--r-- | drivers/kvm/x86.c | 33 | ||||
-rw-r--r-- | drivers/kvm/x86.h | 13 |
4 files changed, 86 insertions, 1 deletions
diff --git a/drivers/kvm/svm.c b/drivers/kvm/svm.c index c75c6b65b65..87072c647f2 100644 --- a/drivers/kvm/svm.c +++ b/drivers/kvm/svm.c @@ -188,6 +188,25 @@ static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer) vcpu->shadow_efer = efer; } +static void svm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr, + bool has_error_code, u32 error_code) +{ + struct vcpu_svm *svm = to_svm(vcpu); + + svm->vmcb->control.event_inj = nr + | SVM_EVTINJ_VALID + | (has_error_code ? SVM_EVTINJ_VALID_ERR : 0) + | SVM_EVTINJ_TYPE_EXEPT; + svm->vmcb->control.event_inj_err = error_code; +} + +static bool svm_exception_injected(struct kvm_vcpu *vcpu) +{ + struct vcpu_svm *svm = to_svm(vcpu); + + return !(svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_VALID); +} + static void svm_inject_gp(struct kvm_vcpu *vcpu, unsigned error_code) { struct vcpu_svm *svm = to_svm(vcpu); @@ -1712,6 +1731,8 @@ static struct kvm_x86_ops svm_x86_ops = { .patch_hypercall = svm_patch_hypercall, .get_irq = svm_get_irq, .set_irq = svm_set_irq, + .queue_exception = svm_queue_exception, + .exception_injected = svm_exception_injected, .inject_pending_irq = svm_intr_assist, .inject_pending_vectors = do_interrupt_requests, diff --git a/drivers/kvm/vmx.c b/drivers/kvm/vmx.c index fc5e7c8381c..f382956f176 100644 --- a/drivers/kvm/vmx.c +++ b/drivers/kvm/vmx.c @@ -595,6 +595,24 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu) vcpu->interrupt_window_open = 1; } +static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr, + bool has_error_code, u32 error_code) +{ + vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, + nr | INTR_TYPE_EXCEPTION + | (has_error_code ? INTR_INFO_DELIEVER_CODE_MASK : 0) + | INTR_INFO_VALID_MASK); + if (has_error_code) + vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code); +} + +static bool vmx_exception_injected(struct kvm_vcpu *vcpu) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + + return !(vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK); +} + static void vmx_inject_gp(struct kvm_vcpu *vcpu, unsigned error_code) { printk(KERN_DEBUG "inject_general_protection: rip 0x%lx\n", @@ -2641,6 +2659,8 @@ static struct kvm_x86_ops vmx_x86_ops = { .patch_hypercall = vmx_patch_hypercall, .get_irq = vmx_get_irq, .set_irq = vmx_inject_irq, + .queue_exception = vmx_queue_exception, + .exception_injected = vmx_exception_injected, .inject_pending_irq = vmx_intr_assist, .inject_pending_vectors = do_interrupt_requests, diff --git a/drivers/kvm/x86.c b/drivers/kvm/x86.c index c9e4b67bfb1..11440d12a2d 100644 --- a/drivers/kvm/x86.c +++ b/drivers/kvm/x86.c @@ -133,6 +133,32 @@ static void inject_gp(struct kvm_vcpu *vcpu) kvm_x86_ops->inject_gp(vcpu, 0); } +void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr) +{ + WARN_ON(vcpu->exception.pending); + vcpu->exception.pending = true; + vcpu->exception.has_error_code = false; + vcpu->exception.nr = nr; +} +EXPORT_SYMBOL_GPL(kvm_queue_exception); + +void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code) +{ + WARN_ON(vcpu->exception.pending); + vcpu->exception.pending = true; + vcpu->exception.has_error_code = true; + vcpu->exception.nr = nr; + vcpu->exception.error_code = error_code; +} +EXPORT_SYMBOL_GPL(kvm_queue_exception_e); + +static void __queue_exception(struct kvm_vcpu *vcpu) +{ + kvm_x86_ops->queue_exception(vcpu, vcpu->exception.nr, + vcpu->exception.has_error_code, + vcpu->exception.error_code); +} + /* * Load the pae pdptrs. Return true is they are all valid. */ @@ -2370,7 +2396,9 @@ again: goto out; } - if (irqchip_in_kernel(vcpu->kvm)) + if (vcpu->exception.pending) + __queue_exception(vcpu); + else if (irqchip_in_kernel(vcpu->kvm)) kvm_x86_ops->inject_pending_irq(vcpu); else kvm_x86_ops->inject_pending_vectors(vcpu, kvm_run); @@ -2409,6 +2437,9 @@ again: profile_hit(KVM_PROFILING, (void *)vcpu->rip); } + if (vcpu->exception.pending && kvm_x86_ops->exception_injected(vcpu)) + vcpu->exception.pending = false; + r = kvm_x86_ops->handle_exit(kvm_run, vcpu); if (r > 0) { diff --git a/drivers/kvm/x86.h b/drivers/kvm/x86.h index eed796402e3..1e71668694e 100644 --- a/drivers/kvm/x86.h +++ b/drivers/kvm/x86.h @@ -139,6 +139,13 @@ struct kvm_vcpu { struct kvm_pio_request pio; void *pio_data; + struct kvm_queued_exception { + bool pending; + bool has_error_code; + u8 nr; + u32 error_code; + } exception; + struct { int active; u8 save_iopl; @@ -224,6 +231,9 @@ struct kvm_x86_ops { unsigned char *hypercall_addr); int (*get_irq)(struct kvm_vcpu *vcpu); void (*set_irq)(struct kvm_vcpu *vcpu, int vec); + void (*queue_exception)(struct kvm_vcpu *vcpu, unsigned nr, + bool has_error_code, u32 error_code); + bool (*exception_injected)(struct kvm_vcpu *vcpu); void (*inject_pending_irq)(struct kvm_vcpu *vcpu); void (*inject_pending_vectors)(struct kvm_vcpu *vcpu, struct kvm_run *run); @@ -294,6 +304,9 @@ void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l); int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata); int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data); +void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr); +void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code); + void fx_init(struct kvm_vcpu *vcpu); int emulator_read_std(unsigned long addr, |