diff options
Diffstat (limited to 'drivers/kvm/svm.c')
-rw-r--r-- | drivers/kvm/svm.c | 1046 |
1 files changed, 484 insertions, 562 deletions
diff --git a/drivers/kvm/svm.c b/drivers/kvm/svm.c index bc818cc126e..729f1cd9360 100644 --- a/drivers/kvm/svm.c +++ b/drivers/kvm/svm.c @@ -16,12 +16,12 @@ #include "kvm_svm.h" #include "x86_emulate.h" +#include "irq.h" #include <linux/module.h> #include <linux/kernel.h> #include <linux/vmalloc.h> #include <linux/highmem.h> -#include <linux/profile.h> #include <linux/sched.h> #include <asm/desc.h> @@ -38,7 +38,6 @@ MODULE_LICENSE("GPL"); #define DR7_GD_MASK (1 << 13) #define DR6_BD_MASK (1 << 13) -#define CR4_DE_MASK (1UL << 3) #define SEG_TYPE_LDT 2 #define SEG_TYPE_BUSY_TSS16 3 @@ -50,6 +49,13 @@ MODULE_LICENSE("GPL"); #define SVM_FEATURE_LBRV (1 << 1) #define SVM_DEATURE_SVML (1 << 2) +static void kvm_reput_irq(struct vcpu_svm *svm); + +static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu) +{ + return container_of(vcpu, struct vcpu_svm, vcpu); +} + unsigned long iopm_base; unsigned long msrpm_base; @@ -94,20 +100,6 @@ static inline u32 svm_has(u32 feat) return svm_features & feat; } -static unsigned get_addr_size(struct kvm_vcpu *vcpu) -{ - struct vmcb_save_area *sa = &vcpu->svm->vmcb->save; - u16 cs_attrib; - - if (!(sa->cr0 & CR0_PE_MASK) || (sa->rflags & X86_EFLAGS_VM)) - return 2; - - cs_attrib = sa->cs.attrib; - - return (cs_attrib & SVM_SELECTOR_L_MASK) ? 8 : - (cs_attrib & SVM_SELECTOR_DB_MASK) ? 4 : 2; -} - static inline u8 pop_irq(struct kvm_vcpu *vcpu) { int word_index = __ffs(vcpu->irq_summary); @@ -182,7 +174,7 @@ static inline void write_dr7(unsigned long val) static inline void force_new_asid(struct kvm_vcpu *vcpu) { - vcpu->svm->asid_generation--; + to_svm(vcpu)->asid_generation--; } static inline void flush_guest_tlb(struct kvm_vcpu *vcpu) @@ -195,22 +187,24 @@ static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer) if (!(efer & KVM_EFER_LMA)) efer &= ~KVM_EFER_LME; - vcpu->svm->vmcb->save.efer = efer | MSR_EFER_SVME_MASK; + to_svm(vcpu)->vmcb->save.efer = efer | MSR_EFER_SVME_MASK; vcpu->shadow_efer = efer; } static void svm_inject_gp(struct kvm_vcpu *vcpu, unsigned error_code) { - vcpu->svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | + struct vcpu_svm *svm = to_svm(vcpu); + + svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_VALID_ERR | SVM_EVTINJ_TYPE_EXEPT | GP_VECTOR; - vcpu->svm->vmcb->control.event_inj_err = error_code; + svm->vmcb->control.event_inj_err = error_code; } static void inject_ud(struct kvm_vcpu *vcpu) { - vcpu->svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | + to_svm(vcpu)->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_EXEPT | UD_VECTOR; } @@ -229,19 +223,21 @@ static int is_external_interrupt(u32 info) static void skip_emulated_instruction(struct kvm_vcpu *vcpu) { - if (!vcpu->svm->next_rip) { + struct vcpu_svm *svm = to_svm(vcpu); + + if (!svm->next_rip) { printk(KERN_DEBUG "%s: NOP\n", __FUNCTION__); return; } - if (vcpu->svm->next_rip - vcpu->svm->vmcb->save.rip > 15) { + if (svm->next_rip - svm->vmcb->save.rip > MAX_INST_SIZE) { printk(KERN_ERR "%s: ip 0x%llx next 0x%llx\n", __FUNCTION__, - vcpu->svm->vmcb->save.rip, - vcpu->svm->next_rip); + svm->vmcb->save.rip, + svm->next_rip); } - vcpu->rip = vcpu->svm->vmcb->save.rip = vcpu->svm->next_rip; - vcpu->svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK; + vcpu->rip = svm->vmcb->save.rip = svm->next_rip; + svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK; vcpu->interrupt_window_open = 1; } @@ -351,8 +347,8 @@ err_1: } -static int set_msr_interception(u32 *msrpm, unsigned msr, - int read, int write) +static void set_msr_interception(u32 *msrpm, unsigned msr, + int read, int write) { int i; @@ -367,11 +363,10 @@ static int set_msr_interception(u32 *msrpm, unsigned msr, u32 mask = ((write) ? 0 : 2) | ((read) ? 0 : 1); *base = (*base & ~(0x3 << msr_shift)) | (mask << msr_shift); - return 1; + return; } } - printk(KERN_DEBUG "%s: not found 0x%x\n", __FUNCTION__, msr); - return 0; + BUG(); } static __init int svm_hardware_setup(void) @@ -382,8 +377,6 @@ static __init int svm_hardware_setup(void) void *iopm_va, *msrpm_va; int r; - kvm_emulator_want_group7_invlpg(); - iopm_pages = alloc_pages(GFP_KERNEL, IOPM_ALLOC_ORDER); if (!iopm_pages) @@ -458,11 +451,6 @@ static void init_sys_seg(struct vmcb_seg *seg, uint32_t type) seg->base = 0; } -static int svm_vcpu_setup(struct kvm_vcpu *vcpu) -{ - return 0; -} - static void init_vmcb(struct vmcb *vmcb) { struct vmcb_control_area *control = &vmcb->control; @@ -563,59 +551,83 @@ static void init_vmcb(struct vmcb *vmcb) * cr0 val on cpu init should be 0x60000010, we enable cpu * cache by default. the orderly way is to enable cache in bios. */ - save->cr0 = 0x00000010 | CR0_PG_MASK | CR0_WP_MASK; - save->cr4 = CR4_PAE_MASK; + save->cr0 = 0x00000010 | X86_CR0_PG | X86_CR0_WP; + save->cr4 = X86_CR4_PAE; /* rdx = ?? */ } -static int svm_create_vcpu(struct kvm_vcpu *vcpu) +static void svm_vcpu_reset(struct kvm_vcpu *vcpu) +{ + struct vcpu_svm *svm = to_svm(vcpu); + + init_vmcb(svm->vmcb); +} + +static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id) { + struct vcpu_svm *svm; struct page *page; - int r; + int err; + + svm = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); + if (!svm) { + err = -ENOMEM; + goto out; + } + + err = kvm_vcpu_init(&svm->vcpu, kvm, id); + if (err) + goto free_svm; + + if (irqchip_in_kernel(kvm)) { + err = kvm_create_lapic(&svm->vcpu); + if (err < 0) + goto free_svm; + } - r = -ENOMEM; - vcpu->svm = kzalloc(sizeof *vcpu->svm, GFP_KERNEL); - if (!vcpu->svm) - goto out1; page = alloc_page(GFP_KERNEL); - if (!page) - goto out2; - - vcpu->svm->vmcb = page_address(page); - clear_page(vcpu->svm->vmcb); - vcpu->svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT; - vcpu->svm->asid_generation = 0; - memset(vcpu->svm->db_regs, 0, sizeof(vcpu->svm->db_regs)); - init_vmcb(vcpu->svm->vmcb); - - fx_init(vcpu); - vcpu->fpu_active = 1; - vcpu->apic_base = 0xfee00000 | MSR_IA32_APICBASE_ENABLE; - if (vcpu == &vcpu->kvm->vcpus[0]) - vcpu->apic_base |= MSR_IA32_APICBASE_BSP; + if (!page) { + err = -ENOMEM; + goto uninit; + } - return 0; + svm->vmcb = page_address(page); + clear_page(svm->vmcb); + svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT; + svm->asid_generation = 0; + memset(svm->db_regs, 0, sizeof(svm->db_regs)); + init_vmcb(svm->vmcb); -out2: - kfree(vcpu->svm); -out1: - return r; + fx_init(&svm->vcpu); + svm->vcpu.fpu_active = 1; + svm->vcpu.apic_base = 0xfee00000 | MSR_IA32_APICBASE_ENABLE; + if (svm->vcpu.vcpu_id == 0) + svm->vcpu.apic_base |= MSR_IA32_APICBASE_BSP; + + return &svm->vcpu; + +uninit: + kvm_vcpu_uninit(&svm->vcpu); +free_svm: + kmem_cache_free(kvm_vcpu_cache, svm); +out: + return ERR_PTR(err); } static void svm_free_vcpu(struct kvm_vcpu *vcpu) { - if (!vcpu->svm) - return; - if (vcpu->svm->vmcb) - __free_page(pfn_to_page(vcpu->svm->vmcb_pa >> PAGE_SHIFT)); - kfree(vcpu->svm); + struct vcpu_svm *svm = to_svm(vcpu); + + __free_page(pfn_to_page(svm->vmcb_pa >> PAGE_SHIFT)); + kvm_vcpu_uninit(vcpu); + kmem_cache_free(kvm_vcpu_cache, svm); } -static void svm_vcpu_load(struct kvm_vcpu *vcpu) +static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { - int cpu, i; + struct vcpu_svm *svm = to_svm(vcpu); + int i; - cpu = get_cpu(); if (unlikely(cpu != vcpu->cpu)) { u64 tsc_this, delta; @@ -625,23 +637,24 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu) */ rdtscll(tsc_this); delta = vcpu->host_tsc - tsc_this; - vcpu->svm->vmcb->control.tsc_offset += delta; + svm->vmcb->control.tsc_offset += delta; vcpu->cpu = cpu; + kvm_migrate_apic_timer(vcpu); } for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) - rdmsrl(host_save_user_msrs[i], vcpu->svm->host_user_msrs[i]); + rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]); } static void svm_vcpu_put(struct kvm_vcpu *vcpu) { + struct vcpu_svm *svm = to_svm(vcpu); int i; for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) - wrmsrl(host_save_user_msrs[i], vcpu->svm->host_user_msrs[i]); + wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]); rdtscll(vcpu->host_tsc); - put_cpu(); } static void svm_vcpu_decache(struct kvm_vcpu *vcpu) @@ -650,31 +663,34 @@ static void svm_vcpu_decache(struct kvm_vcpu *vcpu) static void svm_cache_regs(struct kvm_vcpu *vcpu) { - vcpu->regs[VCPU_REGS_RAX] = vcpu->svm->vmcb->save.rax; - vcpu->regs[VCPU_REGS_RSP] = vcpu->svm->vmcb->save.rsp; - vcpu->rip = vcpu->svm->vmcb->save.rip; + struct vcpu_svm *svm = to_svm(vcpu); + + vcpu->regs[VCPU_REGS_RAX] = svm->vmcb->save.rax; + vcpu->regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp; + vcpu->rip = svm->vmcb->save.rip; } static void svm_decache_regs(struct kvm_vcpu *vcpu) { - vcpu->svm->vmcb->save.rax = vcpu->regs[VCPU_REGS_RAX]; - vcpu->svm->vmcb->save.rsp = vcpu->regs[VCPU_REGS_RSP]; - vcpu->svm->vmcb->save.rip = vcpu->rip; + struct vcpu_svm *svm = to_svm(vcpu); + svm->vmcb->save.rax = vcpu->regs[VCPU_REGS_RAX]; + svm->vmcb->save.rsp = vcpu->regs[VCPU_REGS_RSP]; + svm->vmcb->save.rip = vcpu->rip; } static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu) { - return vcpu->svm->vmcb->save.rflags; + return to_svm(vcpu)->vmcb->save.rflags; } static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) { - vcpu->svm->vmcb->save.rflags = rflags; + to_svm(vcpu)->vmcb->save.rflags = rflags; } static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg) { - struct vmcb_save_area *save = &vcpu->svm->vmcb->save; + struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save; switch (seg) { case VCPU_SREG_CS: return &save->cs; @@ -716,36 +732,36 @@ static void svm_get_segment(struct kvm_vcpu *vcpu, var->unusable = !var->present; } -static void svm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l) -{ - struct vmcb_seg *s = svm_seg(vcpu, VCPU_SREG_CS); - - *db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1; - *l = (s->attrib >> SVM_SELECTOR_L_SHIFT) & 1; -} - static void svm_get_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt) { - dt->limit = vcpu->svm->vmcb->save.idtr.limit; - dt->base = vcpu->svm->vmcb->save.idtr.base; + struct vcpu_svm *svm = to_svm(vcpu); + + dt->limit = svm->vmcb->save.idtr.limit; + dt->base = svm->vmcb->save.idtr.base; } static void svm_set_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt) { - vcpu->svm->vmcb->save.idtr.limit = dt->limit; - vcpu->svm->vmcb->save.idtr.base = dt->base ; + struct vcpu_svm *svm = to_svm(vcpu); + + svm->vmcb->save.idtr.limit = dt->limit; + svm->vmcb->save.idtr.base = dt->base ; } static void svm_get_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt) { - dt->limit = vcpu->svm->vmcb->save.gdtr.limit; - dt->base = vcpu->svm->vmcb->save.gdtr.base; + struct vcpu_svm *svm = to_svm(vcpu); + + dt->limit = svm->vmcb->save.gdtr.limit; + dt->base = svm->vmcb->save.gdtr.base; } static void svm_set_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt) { - vcpu->svm->vmcb->save.gdtr.limit = dt->limit; - vcpu->svm->vmcb->save.gdtr.base = dt->base ; + struct vcpu_svm *svm = to_svm(vcpu); + + svm->vmcb->save.gdtr.limit = dt->limit; + svm->vmcb->save.gdtr.base = dt->base ; } static void svm_decache_cr4_guest_bits(struct kvm_vcpu *vcpu) @@ -754,39 +770,42 @@ static void svm_decache_cr4_guest_bits(struct kvm_vcpu *vcpu) static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) { + struct vcpu_svm *svm = to_svm(vcpu); + #ifdef CONFIG_X86_64 if (vcpu->shadow_efer & KVM_EFER_LME) { - if (!is_paging(vcpu) && (cr0 & CR0_PG_MASK)) { + if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) { vcpu->shadow_efer |= KVM_EFER_LMA; - vcpu->svm->vmcb->save.efer |= KVM_EFER_LMA | KVM_EFER_LME; + svm->vmcb->save.efer |= KVM_EFER_LMA | KVM_EFER_LME; } - if (is_paging(vcpu) && !(cr0 & CR0_PG_MASK) ) { + if (is_paging(vcpu) && !(cr0 & X86_CR0_PG) ) { vcpu->shadow_efer &= ~KVM_EFER_LMA; - vcpu->svm->vmcb->save.efer &= ~(KVM_EFER_LMA | KVM_EFER_LME); + svm->vmcb->save.efer &= ~(KVM_EFER_LMA | KVM_EFER_LME); } } #endif - if ((vcpu->cr0 & CR0_TS_MASK) && !(cr0 & CR0_TS_MASK)) { - vcpu->svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR); + if ((vcpu->cr0 & X86_CR0_TS) && !(cr0 & X86_CR0_TS)) { + svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR); vcpu->fpu_active = 1; } vcpu->cr0 = cr0; - cr0 |= CR0_PG_MASK | CR0_WP_MASK; - cr0 &= ~(CR0_CD_MASK | CR0_NW_MASK); - vcpu->svm->vmcb->save.cr0 = cr0; + cr0 |= X86_CR0_PG | X86_CR0_WP; + cr0 &= ~(X86_CR0_CD | X86_CR0_NW); + svm->vmcb->save.cr0 = cr0; } static void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) { vcpu->cr4 = cr4; - vcpu->svm->vmcb->save.cr4 = cr4 | CR4_PAE_MASK; + to_svm(vcpu)->vmcb->save.cr4 = cr4 | X86_CR4_PAE; } static void svm_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg) { + struct vcpu_svm *svm = to_svm(vcpu); struct vmcb_seg *s = svm_seg(vcpu, seg); s->base = var->base; @@ -805,16 +824,16 @@ static void svm_set_segment(struct kvm_vcpu *vcpu, s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT; } if (seg == VCPU_SREG_CS) - vcpu->svm->vmcb->save.cpl - = (vcpu->svm->vmcb->save.cs.attrib + svm->vmcb->save.cpl + = (svm->vmcb->save.cs.attrib >> SVM_SELECTOR_DPL_SHIFT) & 3; } /* FIXME: - vcpu->svm->vmcb->control.int_ctl &= ~V_TPR_MASK; - vcpu->svm->vmcb->control.int_ctl |= (sregs->cr8 & V_TPR_MASK); + svm(vcpu)->vmcb->control.int_ctl &= ~V_TPR_MASK; + svm(vcpu)->vmcb->control.int_ctl |= (sregs->cr8 & V_TPR_MASK); */ @@ -823,61 +842,68 @@ static int svm_guest_debug(struct kvm_vcpu *vcpu, struct kvm_debug_guest *dbg) return -EOPNOTSUPP; } +static int svm_get_irq(struct kvm_vcpu *vcpu) +{ + struct vcpu_svm *svm = to_svm(vcpu); + u32 exit_int_info = svm->vmcb->control.exit_int_info; + + if (is_external_interrupt(exit_int_info)) + return exit_int_info & SVM_EVTINJ_VEC_MASK; + return -1; +} + static void load_host_msrs(struct kvm_vcpu *vcpu) { #ifdef CONFIG_X86_64 - wrmsrl(MSR_GS_BASE, vcpu->svm->host_gs_base); + wrmsrl(MSR_GS_BASE, to_svm(vcpu)->host_gs_base); #endif } static void save_host_msrs(struct kvm_vcpu *vcpu) { #ifdef CONFIG_X86_64 - rdmsrl(MSR_GS_BASE, vcpu->svm->host_gs_base); + rdmsrl(MSR_GS_BASE, to_svm(vcpu)->host_gs_base); #endif } -static void new_asid(struct kvm_vcpu *vcpu, struct svm_cpu_data *svm_data) +static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *svm_data) { if (svm_data->next_asid > svm_data->max_asid) { ++svm_data->asid_generation; svm_data->next_asid = 1; - vcpu->svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID; + svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID; } - vcpu->cpu = svm_data->cpu; - vcpu->svm->asid_generation = svm_data->asid_generation; - vcpu->svm->vmcb->control.asid = svm_data->next_asid++; -} - -static void svm_invlpg(struct kvm_vcpu *vcpu, gva_t address) -{ - invlpga(address, vcpu->svm->vmcb->control.asid); // is needed? + svm->vcpu.cpu = svm_data->cpu; + svm->asid_generation = svm_data->asid_generation; + svm->vmcb->control.asid = svm_data->next_asid++; } static unsigned long svm_get_dr(struct kvm_vcpu *vcpu, int dr) { - return vcpu->svm->db_regs[dr]; + return to_svm(vcpu)->db_regs[dr]; } static void svm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long value, int *exception) { + struct vcpu_svm *svm = to_svm(vcpu); + *exception = 0; - if (vcpu->svm->vmcb->save.dr7 & DR7_GD_MASK) { - vcpu->svm->vmcb->save.dr7 &= ~DR7_GD_MASK; - vcpu->svm->vmcb->save.dr6 |= DR6_BD_MASK; + if (svm->vmcb->save.dr7 & DR7_GD_MASK) { + svm->vmcb->save.dr7 &= ~DR7_GD_MASK; + svm->vmcb->save.dr6 |= DR6_BD_MASK; *exception = DB_VECTOR; return; } switch (dr) { case 0 ... 3: - vcpu->svm->db_regs[dr] = value; + svm->db_regs[dr] = value; return; case 4 ... 5: - if (vcpu->cr4 & CR4_DE_MASK) { + if (vcpu->cr4 & X86_CR4_DE) { *exception = UD_VECTOR; return; } @@ -886,7 +912,7 @@ static void svm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long value, *exception = GP_VECTOR; return; } - vcpu->svm->vmcb->save.dr7 = value; + svm->vmcb->save.dr7 = value; return; } default: @@ -897,42 +923,44 @@ static void svm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long value, } } -static int pf_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) +static int pf_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) { - u32 exit_int_info = vcpu->svm->vmcb->control.exit_int_info; + u32 exit_int_info = svm->vmcb->control.exit_int_info; + struct kvm *kvm = svm->vcpu.kvm; u64 fault_address; u32 error_code; enum emulation_result er; int r; - if (is_external_interrupt(exit_int_info)) - push_irq(vcpu, exit_int_info & SVM_EVTINJ_VEC_MASK); + if (!irqchip_in_kernel(kvm) && + is_external_interrupt(exit_int_info)) + push_irq(&svm->vcpu, exit_int_info & SVM_EVTINJ_VEC_MASK); - spin_lock(&vcpu->kvm->lock); + mutex_lock(&kvm->lock); - fault_address = vcpu->svm->vmcb->control.exit_info_2; - error_code = vcpu->svm->vmcb->control.exit_info_1; - r = kvm_mmu_page_fault(vcpu, fault_address, error_code); + fault_address = svm->vmcb->control.exit_info_2; + error_code = svm->vmcb->control.exit_info_1; + r = kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code); if (r < 0) { - spin_unlock(&vcpu->kvm->lock); + mutex_unlock(&kvm->lock); return r; } if (!r) { - spin_unlock(&vcpu->kvm->lock); + mutex_unlock(&kvm->lock); return 1; } - er = emulate_instruction(vcpu, kvm_run, fault_address, error_code); - spin_unlock(&vcpu->kvm->lock); + er = emulate_instruction(&svm->vcpu, kvm_run, fault_address, + error_code); + mutex_unlock(&kvm->lock); switch (er) { case EMULATE_DONE: return 1; case EMULATE_DO_MMIO: - ++vcpu->stat.mmio_exits; - kvm_run->exit_reason = KVM_EXIT_MMIO; + ++svm->vcpu.stat.mmio_exits; return 0; case EMULATE_FAIL: - vcpu_printf(vcpu, "%s: emulate fail\n", __FUNCTION__); + kvm_report_emulation_failure(&svm->vcpu, "pagetable"); break; default: BUG(); @@ -942,252 +970,142 @@ static int pf_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) return 0; } -static int nm_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) +static int nm_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) { - vcpu->svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR); - if (!(vcpu->cr0 & CR0_TS_MASK)) - vcpu->svm->vmcb->save.cr0 &= ~CR0_TS_MASK; - vcpu->fpu_active = 1; + svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR); + if (!(svm->vcpu.cr0 & X86_CR0_TS)) + svm->vmcb->save.cr0 &= ~X86_CR0_TS; + svm->vcpu.fpu_active = 1; - return 1; + return 1; } -static int shutdown_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) +static int shutdown_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) { /* * VMCB is undefined after a SHUTDOWN intercept * so reinitialize it. */ - clear_page(vcpu->svm->vmcb); - init_vmcb(vcpu->svm->vmcb); + clear_page(svm->vmcb); + init_vmcb(svm->vmcb); kvm_run->exit_reason = KVM_EXIT_SHUTDOWN; return 0; } -static int io_get_override(struct kvm_vcpu *vcpu, - struct vmcb_seg **seg, - int *addr_override) -{ - u8 inst[MAX_INST_SIZE]; - unsigned ins_length; - gva_t rip; - int i; - - rip = vcpu->svm->vmcb->save.rip; - ins_length = vcpu->svm->next_rip - rip; - rip += vcpu->svm->vmcb->save.cs.base; - - if (ins_length > MAX_INST_SIZE) - printk(KERN_DEBUG - "%s: inst length err, cs base 0x%llx rip 0x%llx " - "next rip 0x%llx ins_length %u\n", - __FUNCTION__, - vcpu->svm->vmcb->save.cs.base, - vcpu->svm->vmcb->save.rip, - vcpu->svm->vmcb->control.exit_info_2, - ins_length); - - if (kvm_read_guest(vcpu, rip, ins_length, inst) != ins_length) - /* #PF */ - return 0; - - *addr_override = 0; - *seg = NULL; - for (i = 0; i < ins_length; i++) - switch (inst[i]) { - case 0xf0: - case 0xf2: - case 0xf3: - case 0x66: - continue; - case 0x67: - *addr_override = 1; - continue; - case 0x2e: - *seg = &vcpu->svm->vmcb->save.cs; - continue; - case 0x36: - *seg = &vcpu->svm->vmcb->save.ss; - continue; - case 0x3e: - *seg = &vcpu->svm->vmcb->save.ds; - continue; - case 0x26: - *seg = &vcpu->svm->vmcb->save.es; - continue; - case 0x64: - *seg = &vcpu->svm->vmcb->save.fs; - continue; - case 0x65: - *seg = &vcpu->svm->vmcb->save.gs; - continue; - default: - return 1; - } - printk(KERN_DEBUG "%s: unexpected\n", __FUNCTION__); - return 0; -} - -static unsigned long io_adress(struct kvm_vcpu *vcpu, int ins, gva_t *address) +static int io_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) { - unsigned long addr_mask; - unsigned long *reg; - struct vmcb_seg *seg; - int addr_override; - struct vmcb_save_area *save_area = &vcpu->svm->vmcb->save; - u16 cs_attrib = save_area->cs.attrib; - unsigned addr_size = get_addr_size(vcpu); - - if (!io_get_override(vcpu, &seg, &addr_override)) - return 0; - - if (addr_override) - addr_size = (addr_size == 2) ? 4: (addr_size >> 1); + u32 io_info = svm->vmcb->control.exit_info_1; //address size bug? + int size, down, in, string, rep; + unsigned port; - if (ins) { - reg = &vcpu->regs[VCPU_REGS_RDI]; - seg = &vcpu->svm->vmcb->save.es; - } else { - reg = &vcpu->regs[VCPU_REGS_RSI]; - seg = (seg) ? seg : &vcpu->svm->vmcb->save.ds; - } + ++svm->vcpu.stat.io_exits; - addr_mask = ~0ULL >> (64 - (addr_size * 8)); + svm->next_rip = svm->vmcb->control.exit_info_2; - if ((cs_attrib & SVM_SELECTOR_L_MASK) && - !(vcpu->svm->vmcb->save.rflags & X86_EFLAGS_VM)) { - *address = (*reg & addr_mask); - return addr_mask; - } + string = (io_info & SVM_IOIO_STR_MASK) != 0; - if (!(seg->attrib & SVM_SELECTOR_P_SHIFT)) { - svm_inject_gp(vcpu, 0); - return 0; + if (string) { + if (emulate_instruction(&svm->vcpu, kvm_run, 0, 0) == EMULATE_DO_MMIO) + return 0; + return 1; } - *address = (*reg & addr_mask) + seg->base; - return addr_mask; -} - -static int io_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) -{ - u32 io_info = vcpu->svm->vmcb->control.exit_info_1; //address size bug? - int size, down, in, string, rep; - unsigned port; - unsigned long count; - gva_t address = 0; - - ++vcpu->stat.io_exits; - - vcpu->svm->next_rip = vcpu->svm->vmcb->control.exit_info_2; - in = (io_info & SVM_IOIO_TYPE_MASK) != 0; port = io_info >> 16; size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT; - string = (io_info & SVM_IOIO_STR_MASK) != 0; rep = (io_info & SVM_IOIO_REP_MASK) != 0; - count = 1; - down = (vcpu->svm->vmcb->save.rflags & X86_EFLAGS_DF) != 0; + down = (svm->vmcb->save.rflags & X86_EFLAGS_DF) != 0; - if (string) { - unsigned addr_mask; - - addr_mask = io_adress(vcpu, in, &address); - if (!addr_mask) { - printk(KERN_DEBUG "%s: get io address failed\n", - __FUNCTION__); - return 1; - } - - if (rep) - count = vcpu->regs[VCPU_REGS_RCX] & addr_mask; - } - return kvm_setup_pio(vcpu, kvm_run, in, size, count, string, down, - address, rep, port); + return kvm_emulate_pio(&svm->vcpu, kvm_run, in, size, port); } -static int nop_on_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) +static int nop_on_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) { return 1; } -static int halt_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) +static int halt_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) { - vcpu->svm->next_rip = vcpu->svm->vmcb->save.rip + 1; - skip_emulated_instruction(vcpu); - return kvm_emulate_halt(vcpu); + svm->next_rip = svm->vmcb->save.rip + 1; + skip_emulated_instruction(&svm->vcpu); + return kvm_emulate_halt(&svm->vcpu); } -static int vmmcall_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) +static int vmmcall_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) { - vcpu->svm->next_rip = vcpu->svm->vmcb->save.rip + 3; - skip_emulated_instruction(vcpu); - return kvm_hypercall(vcpu, kvm_run); + svm->next_rip = svm->vmcb->save.rip + 3; + skip_emulated_instruction(&svm->vcpu); + return kvm_hypercall(&svm->vcpu, kvm_run); } -static int invalid_op_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) +static int invalid_op_interception(struct vcpu_svm *svm, + struct kvm_run *kvm_run) { - inject_ud(vcpu); + inject_ud(&svm->vcpu); return 1; } -static int task_switch_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) +static int task_switch_interception(struct vcpu_svm *svm, + struct kvm_run *kvm_run) { - printk(KERN_DEBUG "%s: task swiche is unsupported\n", __FUNCTION__); + pr_unimpl(&svm->vcpu, "%s: task switch is unsupported\n", __FUNCTION__); kvm_run->exit_reason = KVM_EXIT_UNKNOWN; return 0; } -static int cpuid_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) +static int cpuid_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) { - vcpu->svm->next_rip = vcpu->svm->vmcb->save.rip + 2; - kvm_emulate_cpuid(vcpu); + svm->next_rip = svm->vmcb->save.rip + 2; + kvm_emulate_cpuid(&svm->vcpu); return 1; } -static int emulate_on_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) +static int emulate_on_interception(struct vcpu_svm *svm, + struct kvm_run *kvm_run) { - if (emulate_instruction(vcpu, NULL, 0, 0) != EMULATE_DONE) - printk(KERN_ERR "%s: failed\n", __FUNCTION__); + if (emulate_instruction(&svm->vcpu, NULL, 0, 0) != EMULATE_DONE) + pr_unimpl(&svm->vcpu, "%s: failed\n", __FUNCTION__); return 1; } static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data) { + struct vcpu_svm *svm = to_svm(vcpu); + switch (ecx) { case MSR_IA32_TIME_STAMP_COUNTER: { u64 tsc; rdtscll(tsc); - *data = vcpu->svm->vmcb->control.tsc_offset + tsc; + *data = svm->vmcb->control.tsc_offset + tsc; break; } case MSR_K6_STAR: - *data = vcpu->svm->vmcb->save.star; + *data = svm->vmcb->save.star; break; #ifdef CONFIG_X86_64 case MSR_LSTAR: - *data = vcpu->svm->vmcb->save.lstar; + *data = svm->vmcb->save.lstar; break; case MSR_CSTAR: - *data = vcpu->svm->vmcb->save.cstar; + *data = svm->vmcb->save.cstar; break; case MSR_KERNEL_GS_BASE: - *data = vcpu->svm->vmcb->save.kernel_gs_base; + *data = svm->vmcb->save.kernel_gs_base; break; case MSR_SYSCALL_MASK: - *data = vcpu->svm->vmcb->save.sfmask; + *data = svm->vmcb->save.sfmask; break; #endif case MSR_IA32_SYSENTER_CS: - *data = vcpu->svm->vmcb->save.sysenter_cs; + *data = svm->vmcb->save.sysenter_cs; break; case MSR_IA32_SYSENTER_EIP: - *data = vcpu->svm->vmcb->save.sysenter_eip; + *data = svm->vmcb->save.sysenter_eip; break; case MSR_IA32_SYSENTER_ESP: - *data = vcpu->svm->vmcb->save.sysenter_esp; + *data = svm->vmcb->save.sysenter_esp; break; default: return kvm_get_msr_common(vcpu, ecx, data); @@ -1195,57 +1113,59 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data) return 0; } -static int rdmsr_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) +static int rdmsr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) { - u32 ecx = vcpu->regs[VCPU_REGS_RCX]; + u32 ecx = svm->vcpu.regs[VCPU_REGS_RCX]; u64 data; - if (svm_get_msr(vcpu, ecx, &data)) - svm_inject_gp(vcpu, 0); + if (svm_get_msr(&svm->vcpu, ecx, &data)) + svm_inject_gp(&svm->vcpu, 0); else { - vcpu->svm->vmcb->save.rax = data & 0xffffffff; - vcpu->regs[VCPU_REGS_RDX] = data >> 32; - vcpu->svm->next_rip = vcpu->svm->vmcb->save.rip + 2; - skip_emulated_instruction(vcpu); + svm->vmcb->save.rax = data & 0xffffffff; + svm->vcpu.regs[VCPU_REGS_RDX] = data >> 32; + svm->next_rip = svm->vmcb->save.rip + 2; + skip_emulated_instruction(&svm->vcpu); } return 1; } static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data) { + struct vcpu_svm *svm = to_svm(vcpu); + switch (ecx) { case MSR_IA32_TIME_STAMP_COUNTER: { u64 tsc; rdtscll(tsc); - vcpu->svm->vmcb->control.tsc_offset = data - tsc; + svm->vmcb->control.tsc_offset = data - tsc; break; } case MSR_K6_STAR: - vcpu->svm->vmcb->save.star = data; + svm->vmcb->save.star = data; break; #ifdef CONFIG_X86_64 case MSR_LSTAR: - vcpu->svm->vmcb->save.lstar = data; + svm->vmcb->save.lstar = data; break; case MSR_CSTAR: - vcpu->svm->vmcb->save.cstar = data; + svm->vmcb->save.cstar = data; break; case MSR_KERNEL_GS_BASE: - vcpu->svm->vmcb->save.kernel_gs_base = data; + svm->vmcb->save.kernel_gs_base = data; break; case MSR_SYSCALL_MASK: - vcpu->svm->vmcb->save.sfmask = data; + svm->vmcb->save.sfmask = data; break; #endif case MSR_IA32_SYSENTER_CS: - vcpu->svm->vmcb->save.sysenter_cs = data; + svm->vmcb->save.sysenter_cs = data; break; case MSR_IA32_SYSENTER_EIP: - vcpu->svm->vmcb->save.sysenter_eip = data; + svm->vmcb->save.sysenter_eip = data; break; case MSR_IA32_SYSENTER_ESP: - vcpu->svm->vmcb->save.sysenter_esp = data; + svm->vmcb->save.sysenter_esp = data; break; default: return kvm_set_msr_common(vcpu, ecx, data); @@ -1253,37 +1173,39 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data) return 0; } -static int wrmsr_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) +static int wrmsr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) { - u32 ecx = vcpu->regs[VCPU_REGS_RCX]; - u64 data = (vcpu->svm->vmcb->save.rax & -1u) - | ((u64)(vcpu->regs[VCPU_REGS_RDX] & -1u) << 32); - vcpu->svm->next_rip = vcpu->svm->vmcb->save.rip + 2; - if (svm_set_msr(vcpu, ecx, data)) - svm_inject_gp(vcpu, 0); + u32 ecx = svm->vcpu.regs[VCPU_REGS_RCX]; + u64 data = (svm->vmcb->save.rax & -1u) + | ((u64)(svm->vcpu.regs[VCPU_REGS_RDX] & -1u) << 32); + svm->next_rip = svm->vmcb->save.rip + 2; + if (svm_set_msr(&svm->vcpu, ecx, data)) + svm_inject_gp(&svm->vcpu, 0); else - skip_emulated_instruction(vcpu); + skip_emulated_instruction(&svm->vcpu); return 1; } -static int msr_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) +static int msr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) { - if (vcpu->svm->vmcb->control.exit_info_1) - return wrmsr_interception(vcpu, kvm_run); + if (svm->vmcb->control.exit_info_1) + return wrmsr_interception(svm, kvm_run); else - return rdmsr_interception(vcpu, kvm_run); + return rdmsr_interception(svm, kvm_run); } -static int interrupt_window_interception(struct kvm_vcpu *vcpu, +static int interrupt_window_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) { + svm->vmcb->control.intercept &= ~(1ULL << INTERCEPT_VINTR); + svm->vmcb->control.int_ctl &= ~V_IRQ_MASK; /* * If the user space waits to inject interrupts, exit as soon as * possible */ if (kvm_run->request_interrupt_window && - !vcpu->irq_summary) { - ++vcpu->stat.irq_window_exits; + !svm->vcpu.irq_summary) { + ++svm->vcpu.stat.irq_window_exits; kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN; return 0; } @@ -1291,7 +1213,7 @@ static int interrupt_window_interception(struct kvm_vcpu *vcpu, return 1; } -static int (*svm_exit_handlers[])(struct kvm_vcpu *vcpu, +static int (*svm_exit_handlers[])(struct vcpu_svm *svm, struct kvm_run *kvm_run) = { [SVM_EXIT_READ_CR0] = emulate_on_interception, [SVM_EXIT_READ_CR3] = emulate_on_interception, @@ -1338,15 +1260,25 @@ static int (*svm_exit_handlers[])(struct kvm_vcpu *vcpu, }; -static int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) +static int handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) { - u32 exit_code = vcpu->svm->vmcb->control.exit_code; + struct vcpu_svm *svm = to_svm(vcpu); + u32 exit_code = svm->vmcb->control.exit_code; + + kvm_reput_irq(svm); - if (is_external_interrupt(vcpu->svm->vmcb->control.exit_int_info) && + if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) { + kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY; + kvm_run->fail_entry.hardware_entry_failure_reason + = svm->vmcb->control.exit_code; + return 0; + } + + if (is_external_interrupt(svm->vmcb->control.exit_int_info) && exit_code != SVM_EXIT_EXCP_BASE + PF_VECTOR) printk(KERN_ERR "%s: unexpected exit_ini_info 0x%x " "exit_code 0x%x\n", - __FUNCTION__, vcpu->svm->vmcb->control.exit_int_info, + __FUNCTION__, svm->vmcb->control.exit_int_info, exit_code); if (exit_code >= ARRAY_SIZE(svm_exit_handlers) @@ -1356,7 +1288,7 @@ static int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) return 0; } - return svm_exit_handlers[exit_code](vcpu, kvm_run); + return svm_exit_handlers[exit_code](svm, kvm_run); } static void reload_tss(struct kvm_vcpu *vcpu) @@ -1368,93 +1300,126 @@ static void reload_tss(struct kvm_vcpu *vcpu) load_TR_desc(); } -static void pre_svm_run(struct kvm_vcpu *vcpu) +static void pre_svm_run(struct vcpu_svm *svm) { int cpu = raw_smp_processor_id(); struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu); - vcpu->svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING; - if (vcpu->cpu != cpu || - vcpu->svm->asid_generation != svm_data->asid_generation) - new_asid(vcpu, svm_data); + svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING; + if (svm->vcpu.cpu != cpu || + svm->asid_generation != svm_data->asid_generation) + new_asid(svm, svm_data); } -static inline void kvm_do_inject_irq(struct kvm_vcpu *vcpu) +static inline void svm_inject_irq(struct vcpu_svm *svm, int irq) { struct vmcb_control_area *control; - control = &vcpu->svm->vmcb->control; - control->int_vector = pop_irq(vcpu); + control = &svm->vmcb->control; + control->int_vector = irq; control->int_ctl &= ~V_INTR_PRIO_MASK; control->int_ctl |= V_IRQ_MASK | ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT); } -static void kvm_reput_irq(struct kvm_vcpu *vcpu) +static void svm_set_irq(struct kvm_vcpu *vcpu, int irq) +{ + struct vcpu_svm *svm = to_svm(vcpu); + + svm_inject_irq(svm, irq); +} + +static void svm_intr_assist(struct kvm_vcpu *vcpu) +{ + struct vcpu_svm *svm = to_svm(vcpu); + struct vmcb *vmcb = svm->vmcb; + int intr_vector = -1; + + kvm_inject_pending_timer_irqs(vcpu); + if ((vmcb->control.exit_int_info & SVM_EVTINJ_VALID) && + ((vmcb->control.exit_int_info & SVM_EVTINJ_TYPE_MASK) == 0)) { + intr_vector = vmcb->control.exit_int_info & + SVM_EVTINJ_VEC_MASK; + vmcb->control.exit_int_info = 0; + svm_inject_irq(svm, intr_vector); + return; + } + + if (vmcb->control.int_ctl & V_IRQ_MASK) + return; + + if (!kvm_cpu_has_interrupt(vcpu)) + return; + + if (!(vmcb->save.rflags & X86_EFLAGS_IF) || + (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) || + (vmcb->control.event_inj & SVM_EVTINJ_VALID)) { + /* unable to deliver irq, set pending irq */ + vmcb->control.intercept |= (1ULL << INTERCEPT_VINTR); + svm_inject_irq(svm, 0x0); + return; + } + /* Okay, we can deliver the interrupt: grab it and update PIC state. */ + intr_vector = kvm_cpu_get_interrupt(vcpu); + svm_inject_irq(svm, intr_vector); + kvm_timer_intr_post(vcpu, intr_vector); +} + +static void kvm_reput_irq(struct vcpu_svm *svm) { - struct vmcb_control_area *control = &vcpu->svm->vmcb->control; + struct vmcb_control_area *control = &svm->vmcb->control; - if (control->int_ctl & V_IRQ_MASK) { + if ((control->int_ctl & V_IRQ_MASK) + && !irqchip_in_kernel(svm->vcpu.kvm)) { control->int_ctl &= ~V_IRQ_MASK; - push_irq(vcpu, control->int_vector); + push_irq(&svm->vcpu, control->int_vector); } - vcpu->interrupt_window_open = + svm->vcpu.interrupt_window_open = !(control->int_state & SVM_INTERRUPT_SHADOW_MASK); } +static void svm_do_inject_vector(struct vcpu_svm *svm) +{ + struct kvm_vcpu *vcpu = &svm->vcpu; + int word_index = __ffs(vcpu->irq_summary); + int bit_index = __ffs(vcpu->irq_pending[word_index]); + int irq = word_index * BITS_PER_LONG + bit_index; + + clear_bit(bit_index, &vcpu->irq_pending[word_index]); + if (!vcpu->irq_pending[word_index]) + clear_bit(word_index, &vcpu->irq_summary); + svm_inject_irq(svm, irq); +} + static void do_interrupt_requests(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { - struct vmcb_control_area *control = &vcpu->svm->vmcb->control; + struct vcpu_svm *svm = to_svm(vcpu); + struct vmcb_control_area *control = &svm->vmcb->control; - vcpu->interrupt_window_open = + svm->vcpu.interrupt_window_open = (!(control->int_state & SVM_INTERRUPT_SHADOW_MASK) && - (vcpu->svm->vmcb->save.rflags & X86_EFLAGS_IF)); + (svm->vmcb->save.rflags & X86_EFLAGS_IF)); - if (vcpu->interrupt_window_open && vcpu->irq_summary) + if (svm->vcpu.interrupt_window_open && svm->vcpu.irq_summary) /* * If interrupts enabled, and not blocked by sti or mov ss. Good. */ - kvm_do_inject_irq(vcpu); + svm_do_inject_vector(svm); /* * Interrupts blocked. Wait for unblock. */ - if (!vcpu->interrupt_window_open && - (vcpu->irq_summary || kvm_run->request_interrupt_window)) { + if (!svm->vcpu.interrupt_window_open && + (svm->vcpu.irq_summary || kvm_run->request_interrupt_window)) { control->intercept |= 1ULL << INTERCEPT_VINTR; } else control->intercept &= ~(1ULL << INTERCEPT_VINTR); } -static void post_kvm_run_save(struct kvm_vcpu *vcpu, - struct kvm_run *kvm_run) -{ - kvm_run->ready_for_interrupt_injection = (vcpu->interrupt_window_open && - vcpu->irq_summary == 0); - kvm_run->if_flag = (vcpu->svm->vmcb->save.rflags & X86_EFLAGS_IF) != 0; - kvm_run->cr8 = vcpu->cr8; - kvm_run->apic_base = vcpu->apic_base; -} - -/* - * Check if userspace requested an interrupt window, and that the - * interrupt window is open. - * - * No need to exit to userspace if we already have an interrupt queued. - */ -static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu, - struct kvm_run *kvm_run) -{ - return (!vcpu->irq_summary && - kvm_run->request_interrupt_window && - vcpu->interrupt_window_open && - (vcpu->svm->vmcb->save.rflags & X86_EFLAGS_IF)); -} - static void save_db_regs(unsigned long *db_regs) { asm volatile ("mov %%dr0, %0" : "=r"(db_regs[0])); @@ -1476,49 +1441,37 @@ static void svm_flush_tlb(struct kvm_vcpu *vcpu) force_new_asid(vcpu); } -static int svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) +static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu) +{ +} + +static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { + struct vcpu_svm *svm = to_svm(vcpu); u16 fs_selector; u16 gs_selector; u16 ldt_selector; - int r; - -again: - r = kvm_mmu_reload(vcpu); - if (unlikely(r)) - return r; - - if (!vcpu->mmio_read_completed) - do_interrupt_requests(vcpu, kvm_run); - clgi(); - - vcpu->guest_mode = 1; - if (vcpu->requests) - if (test_and_clear_bit(KVM_TLB_FLUSH, &vcpu->requests)) - svm_flush_tlb(vcpu); - - pre_svm_run(vcpu); + pre_svm_run(svm); save_host_msrs(vcpu); fs_selector = read_fs(); gs_selector = read_gs(); ldt_selector = read_ldt(); - vcpu->svm->host_cr2 = kvm_read_cr2(); - vcpu->svm->host_dr6 = read_dr6(); - vcpu->svm->host_dr7 = read_dr7(); - vcpu->svm->vmcb->save.cr2 = vcpu->cr2; + svm->host_cr2 = kvm_read_cr2(); + svm->host_dr6 = read_dr6(); + svm->host_dr7 = read_dr7(); + svm->vmcb->save.cr2 = vcpu->cr2; - if (vcpu->svm->vmcb->save.dr7 & 0xff) { + if (svm->vmcb->save.dr7 & 0xff) { write_dr7(0); - save_db_regs(vcpu->svm->host_db_regs); - load_db_regs(vcpu->svm->db_regs); + save_db_regs(svm->host_db_regs); + load_db_regs(svm->db_regs); } - if (vcpu->fpu_active) { - fx_save(vcpu->host_fx_image); - fx_restore(vcpu->guest_fx_image); - } + clgi(); + + local_irq_enable(); asm volatile ( #ifdef CONFIG_X86_64 @@ -1532,34 +1485,33 @@ again: #endif #ifdef CONFIG_X86_64 - "mov %c[rbx](%[vcpu]), %%rbx \n\t" - "mov %c[rcx](%[vcpu]), %%rcx \n\t" - "mov %c[rdx](%[vcpu]), %%rdx \n\t" - "mov %c[rsi](%[vcpu]), %%rsi \n\t" - "mov %c[rdi](%[vcpu]), %%rdi \n\t" - "mov %c[rbp](%[vcpu]), %%rbp \n\t" - "mov %c[r8](%[vcpu]), %%r8 \n\t" - "mov %c[r9](%[vcpu]), %%r9 \n\t" - "mov %c[r10](%[vcpu]), %%r10 \n\t" - "mov %c[r11](%[vcpu]), %%r11 \n\t" - "mov %c[r12](%[vcpu]), %%r12 \n\t" - "mov %c[r13](%[vcpu]), %%r13 \n\t" - "mov %c[r14](%[vcpu]), %%r14 \n\t" - "mov %c[r15](%[vcpu]), %%r15 \n\t" + "mov %c[rbx](%[svm]), %%rbx \n\t" + "mov %c[rcx](%[svm]), %%rcx \n\t" + "mov %c[rdx](%[svm]), %%rdx \n\t" + "mov %c[rsi](%[svm]), %%rsi \n\t" + "mov %c[rdi](%[svm]), %%rdi \n\t" + "mov %c[rbp](%[svm]), %%rbp \n\t" + "mov %c[r8](%[svm]), %%r8 \n\t" + "mov %c[r9](%[svm]), %%r9 \n\t" + "mov %c[r10](%[svm]), %%r10 \n\t" + "mov %c[r11](%[svm]), %%r11 \n\t" + "mov %c[r12](%[svm]), %%r12 \n\t" + "mov %c[r13](%[svm]), %%r13 \n\t" + "mov %c[r14](%[svm]), %%r14 \n\t" + "mov %c[r15](%[svm]), %%r15 \n\t" #else - "mov %c[rbx](%[vcpu]), %%ebx \n\t" - "mov %c[rcx](%[vcpu]), %%ecx \n\t" - "mov %c[rdx](%[vcpu]), %%edx \n\t" - "mov %c[rsi](%[vcpu]), %%esi \n\t" - "mov %c[rdi](%[vcpu]), %%edi \n\t" - "mov %c[rbp](%[vcpu]), %%ebp \n\t" + "mov %c[rbx](%[svm]), %%ebx \n\t" + "mov %c[rcx](%[svm]), %%ecx \n\t" + "mov %c[rdx](%[svm]), %%edx \n\t" + "mov %c[rsi](%[svm]), %%esi \n\t" + "mov %c[rdi](%[svm]), %%edi \n\t" + "mov %c[rbp](%[svm]), %%ebp \n\t" #endif #ifdef CONFIG_X86_64 /* Enter guest mode */ "push %%rax \n\t" - "mov %c[svm](%[vcpu]), %%rax \n\t" - "mov %c[vmcb](%%rax), %%rax \n\t" + "mov %c[vmcb](%[svm]), %%rax \n\t" SVM_VMLOAD "\n\t" SVM_VMRUN "\n\t" SVM_VMSAVE "\n\t" @@ -1567,8 +1519,7 @@ again: #else /* Enter guest mode */ "push %%eax \n\t" - "mov %c[svm](%[vcpu]), %%eax \n\t" - "mov %c[vmcb](%%eax), %%eax \n\t" + "mov %c[vmcb](%[svm]), %%eax \n\t" SVM_VMLOAD "\n\t" SVM_VMRUN "\n\t" SVM_VMSAVE "\n\t" @@ -1577,73 +1528,69 @@ again: /* Save guest registers, load host registers */ #ifdef CONFIG_X86_64 - "mov %%rbx, %c[rbx](%[vcpu]) \n\t" - "mov %%rcx, %c[rcx](%[vcpu]) \n\t" - "mov %%rdx, %c[rdx](%[vcpu]) \n\t" - "mov %%rsi, %c[rsi](%[vcpu]) \n\t" - "mov %%rdi, %c[rdi](%[vcpu]) \n\t" - "mov %%rbp, %c[rbp](%[vcpu]) \n\t" - "mov %%r8, %c[r8](%[vcpu]) \n\t" - "mov %%r9, %c[r9](%[vcpu]) \n\t" - "mov %%r10, %c[r10](%[vcpu]) \n\t" - "mov %%r11, %c[r11](%[vcpu]) \n\t" - "mov %%r12, %c[r12](%[vcpu]) \n\t" - "mov %%r13, %c[r13](%[vcpu]) \n\t" - "mov %%r14, %c[r14](%[vcpu]) \n\t" - "mov %%r15, %c[r15](%[vcpu]) \n\t" + "mov %%rbx, %c[rbx](%[svm]) \n\t" + "mov %%rcx, %c[rcx](%[svm]) \n\t" + "mov %%rdx, %c[rdx](%[svm]) \n\t" + "mov %%rsi, %c[rsi](%[svm]) \n\t" + "mov %%rdi, %c[rdi](%[svm]) \n\t" + "mov %%rbp, %c[rbp](%[svm]) \n\t" + "mov %%r8, %c[r8](%[svm]) \n\t" + "mov %%r9, %c[r9](%[svm]) \n\t" + "mov %%r10, %c[r10](%[svm]) \n\t" + "mov %%r11, %c[r11](%[svm]) \n\t" + "mov %%r12, %c[r12](%[svm]) \n\t" + "mov %%r13, %c[r13](%[svm]) \n\t" + "mov %%r14, %c[r14](%[svm]) \n\t" + "mov %%r15, %c[r15](%[svm]) \n\t" "pop %%r15; pop %%r14; pop %%r13; pop %%r12;" "pop %%r11; pop %%r10; pop %%r9; pop %%r8;" "pop %%rbp; pop %%rdi; pop %%rsi;" "pop %%rdx; pop %%rcx; pop %%rbx; \n\t" #else - "mov %%ebx, %c[rbx](%[vcpu]) \n\t" - "mov %%ecx, %c[rcx](%[vcpu]) \n\t" - "mov %%edx, %c[rdx](%[vcpu]) \n\t" - "mov %%esi, %c[rsi](%[vcpu]) \n\t" - "mov %%edi, %c[rdi](%[vcpu]) \n\t" - "mov %%ebp, %c[rbp](%[vcpu]) \n\t" + "mov %%ebx, %c[rbx](%[svm]) \n\t" + "mov %%ecx, %c[rcx](%[svm]) \n\t" + "mov %%edx, %c[rdx](%[svm]) \n\t" + "mov %%esi, %c[rsi](%[svm]) \n\t" + "mov %%edi, %c[rdi](%[svm]) \n\t" + "mov %%ebp, %c[rbp](%[svm]) \n\t" "pop %%ebp; pop %%edi; pop %%esi;" "pop %%edx; pop %%ecx; pop %%ebx; \n\t" #endif : - : [vcpu]"a"(vcpu), - [svm]"i"(offsetof(struct kvm_vcpu, svm)), + : [svm]"a"(svm), [vmcb]"i"(offsetof(struct vcpu_svm, vmcb_pa)), - [rbx]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RBX])), - [rcx]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RCX])), - [rdx]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RDX])), - [rsi]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RSI])), - [rdi]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RDI])), - [rbp]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RBP])) + [rbx]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_RBX])), + [rcx]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_RCX])), + [rdx]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_RDX])), + [rsi]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_RSI])), + [rdi]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_RDI])), + [rbp]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_RBP])) #ifdef CONFIG_X86_64 - ,[r8 ]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R8 ])), - [r9 ]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R9 ])), - [r10]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R10])), - [r11]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R11])), - [r12]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R12])), - [r13]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R13])), - [r14]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R14])), - [r15]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R15])) + ,[r8 ]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_R8])), + [r9 ]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_R9 ])), + [r10]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_R10])), + [r11]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_R11])), + [r12]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_R12])), + [r13]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_R13])), + [r14]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_R14])), + [r15]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_R15])) #endif : "cc", "memory" ); - vcpu->guest_mode = 0; + local_irq_disable(); - if (vcpu->fpu_active) { - fx_save(vcpu->guest_fx_image); - fx_restore(vcpu->host_fx_image); - } + stgi(); - if ((vcpu->svm->vmcb->save.dr7 & 0xff)) - load_db_regs(vcpu->svm->host_db_regs); + if ((svm->vmcb->save.dr7 & 0xff)) + load_db_regs(svm->host_db_regs); - vcpu->cr2 = vcpu->svm->vmcb->save.cr2; + vcpu->cr2 = svm->vmcb->save.cr2; - write_dr6(vcpu->svm->host_dr6); - write_dr7(vcpu->svm->host_dr7); - kvm_write_cr2(vcpu->svm->host_cr2); + write_dr6(svm->host_dr6); + write_dr7(svm->host_dr7); + kvm_write_cr2(svm->host_cr2); load_fs(fs_selector); load_gs(gs_selector); @@ -1652,57 +1599,19 @@ again: reload_tss(vcpu); - /* - * Profile KVM exit RIPs: - */ - if (unlikely(prof_on == KVM_PROFILING)) - profile_hit(KVM_PROFILING, - (void *)(unsigned long)vcpu->svm->vmcb->save.rip); - - stgi(); - - kvm_reput_irq(vcpu); - - vcpu->svm->next_rip = 0; - - if (vcpu->svm->vmcb->control.exit_code == SVM_EXIT_ERR) { - kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY; - kvm_run->fail_entry.hardware_entry_failure_reason - = vcpu->svm->vmcb->control.exit_code; - post_kvm_run_save(vcpu, kvm_run); - return 0; - } - - r = handle_exit(vcpu, kvm_run); - if (r > 0) { - if (signal_pending(current)) { - ++vcpu->stat.signal_exits; - post_kvm_run_save(vcpu, kvm_run); - kvm_run->exit_reason = KVM_EXIT_INTR; - return -EINTR; - } - - if (dm_request_for_irq_injection(vcpu, kvm_run)) { - ++vcpu->stat.request_irq_exits; - post_kvm_run_save(vcpu, kvm_run); - kvm_run->exit_reason = KVM_EXIT_INTR; - return -EINTR; - } - kvm_resched(vcpu); - goto again; - } - post_kvm_run_save(vcpu, kvm_run); - return r; + svm->next_rip = 0; } static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root) { - vcpu->svm->vmcb->save.cr3 = root; + struct vcpu_svm *svm = to_svm(vcpu); + + svm->vmcb->save.cr3 = root; force_new_asid(vcpu); if (vcpu->fpu_active) { - vcpu->svm->vmcb->control.intercept_exceptions |= (1 << NM_VECTOR); - vcpu->svm->vmcb->save.cr0 |= CR0_TS_MASK; + svm->vmcb->control.intercept_exceptions |= (1 << NM_VECTOR); + svm->vmcb->save.cr0 |= X86_CR0_TS; vcpu->fpu_active = 0; } } @@ -1711,26 +1620,27 @@ static void svm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long addr, uint32_t err_code) { - uint32_t exit_int_info = vcpu->svm->vmcb->control.exit_int_info; + struct vcpu_svm *svm = to_svm(vcpu); + uint32_t exit_int_info = svm->vmcb->control.exit_int_info; ++vcpu->stat.pf_guest; if (is_page_fault(exit_int_info)) { - vcpu->svm->vmcb->control.event_inj_err = 0; - vcpu->svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | - SVM_EVTINJ_VALID_ERR | - SVM_EVTINJ_TYPE_EXEPT | - DF_VECTOR; + svm->vmcb->control.event_inj_err = 0; + svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | + SVM_EVTINJ_VALID_ERR | + SVM_EVTINJ_TYPE_EXEPT | + DF_VECTOR; return; } vcpu->cr2 = addr; - vcpu->svm->vmcb->save.cr2 = addr; - vcpu->svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | - SVM_EVTINJ_VALID_ERR | - SVM_EVTINJ_TYPE_EXEPT | - PF_VECTOR; - vcpu->svm->vmcb->control.event_inj_err = err_code; + svm->vmcb->save.cr2 = addr; + svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | + SVM_EVTINJ_VALID_ERR | + SVM_EVTINJ_TYPE_EXEPT | + PF_VECTOR; + svm->vmcb->control.event_inj_err = err_code; } @@ -1757,17 +1667,25 @@ svm_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall) hypercall[3] = 0xc3; } -static struct kvm_arch_ops svm_arch_ops = { +static void svm_check_processor_compat(void *rtn) +{ + *(int *)rtn = 0; +} + +static struct kvm_x86_ops svm_x86_ops = { .cpu_has_kvm_support = has_svm, .disabled_by_bios = is_disabled, .hardware_setup = svm_hardware_setup, .hardware_unsetup = svm_hardware_unsetup, + .check_processor_compatibility = svm_check_processor_compat, .hardware_enable = svm_hardware_enable, .hardware_disable = svm_hardware_disable, .vcpu_create = svm_create_vcpu, .vcpu_free = svm_free_vcpu, + .vcpu_reset = svm_vcpu_reset, + .prepare_guest_switch = svm_prepare_guest_switch, .vcpu_load = svm_vcpu_load, .vcpu_put = svm_vcpu_put, .vcpu_decache = svm_vcpu_decache, @@ -1778,7 +1696,7 @@ static struct kvm_arch_ops svm_arch_ops = { .get_segment_base = svm_get_segment_base, .get_segment = svm_get_segment, .set_segment = svm_set_segment, - .get_cs_db_l_bits = svm_get_cs_db_l_bits, + .get_cs_db_l_bits = kvm_get_cs_db_l_bits, .decache_cr4_guest_bits = svm_decache_cr4_guest_bits, .set_cr0 = svm_set_cr0, .set_cr3 = svm_set_cr3, @@ -1795,26 +1713,30 @@ static struct kvm_arch_ops svm_arch_ops = { .get_rflags = svm_get_rflags, .set_rflags = svm_set_rflags, - .invlpg = svm_invlpg, .tlb_flush = svm_flush_tlb, .inject_page_fault = svm_inject_page_fault, .inject_gp = svm_inject_gp, .run = svm_vcpu_run, + .handle_exit = handle_exit, .skip_emulated_instruction = skip_emulated_instruction, - .vcpu_setup = svm_vcpu_setup, .patch_hypercall = svm_patch_hypercall, + .get_irq = svm_get_irq, + .set_irq = svm_set_irq, + .inject_pending_irq = svm_intr_assist, + .inject_pending_vectors = do_interrupt_requests, }; static int __init svm_init(void) { - return kvm_init_arch(&svm_arch_ops, THIS_MODULE); + return kvm_init_x86(&svm_x86_ops, sizeof(struct vcpu_svm), + THIS_MODULE); } static void __exit svm_exit(void) { - kvm_exit_arch(); + kvm_exit_x86(); } module_init(svm_init) |