diff options
author | Hollis Blanchard <hollisb@us.ibm.com> | 2008-11-05 09:36:18 -0600 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2008-12-31 16:52:22 +0200 |
commit | db93f5745d836f81cef0b4101a7c2685eeb55efb (patch) | |
tree | 970b0dfc93dbbe25eb988b008bbbeffd866f3f23 /arch/powerpc/kvm | |
parent | 5cbb5106f50b4515815cd32cf944958c0d4da83f (diff) |
KVM: ppc: create struct kvm_vcpu_44x and introduce container_of() accessor
This patch doesn't yet move all 44x-specific data into the new structure, but
is the first step down that path. In the future we may also want to create a
struct kvm_vcpu_booke.
Based on patch from Liu Yu <yu.liu@freescale.com>.
Signed-off-by: Hollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/powerpc/kvm')
-rw-r--r-- | arch/powerpc/kvm/44x.c | 62 | ||||
-rw-r--r-- | arch/powerpc/kvm/44x_tlb.c | 37 | ||||
-rw-r--r-- | arch/powerpc/kvm/booke.c | 9 | ||||
-rw-r--r-- | arch/powerpc/kvm/booke_interrupts.S | 6 | ||||
-rw-r--r-- | arch/powerpc/kvm/powerpc.c | 23 |
5 files changed, 92 insertions, 45 deletions
diff --git a/arch/powerpc/kvm/44x.c b/arch/powerpc/kvm/44x.c index f5d7028eeb0..22054b164b5 100644 --- a/arch/powerpc/kvm/44x.c +++ b/arch/powerpc/kvm/44x.c @@ -18,9 +18,13 @@ */ #include <linux/kvm_host.h> +#include <linux/err.h> + #include <asm/reg.h> #include <asm/cputable.h> #include <asm/tlbflush.h> +#include <asm/kvm_44x.h> +#include <asm/kvm_ppc.h> #include "44x_tlb.h" @@ -124,7 +128,8 @@ int kvmppc_core_check_processor_compat(void) int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu) { - struct kvmppc_44x_tlbe *tlbe = &vcpu->arch.guest_tlb[0]; + struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); + struct kvmppc_44x_tlbe *tlbe = &vcpu_44x->guest_tlb[0]; tlbe->tid = 0; tlbe->word0 = PPC44x_TLB_16M | PPC44x_TLB_VALID; @@ -150,6 +155,7 @@ int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu) int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu, struct kvm_translation *tr) { + struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); struct kvmppc_44x_tlbe *gtlbe; int index; gva_t eaddr; @@ -166,7 +172,7 @@ int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu, return 0; } - gtlbe = &vcpu->arch.guest_tlb[index]; + gtlbe = &vcpu_44x->guest_tlb[index]; tr->physical_address = tlb_xlate(gtlbe, eaddr); /* XXX what does "writeable" and "usermode" even mean? */ @@ -174,3 +180,55 @@ int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu, return 0; } + +struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) +{ + struct kvmppc_vcpu_44x *vcpu_44x; + struct kvm_vcpu *vcpu; + int err; + + vcpu_44x = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); + if (!vcpu_44x) { + err = -ENOMEM; + goto out; + } + + vcpu = &vcpu_44x->vcpu; + err = kvm_vcpu_init(vcpu, kvm, id); + if (err) + goto free_vcpu; + + return vcpu; + +free_vcpu: + kmem_cache_free(kvm_vcpu_cache, vcpu_44x); +out: + return ERR_PTR(err); +} + +void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) +{ + struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); + + kvm_vcpu_uninit(vcpu); + kmem_cache_free(kvm_vcpu_cache, vcpu_44x); +} + +static int kvmppc_44x_init(void) +{ + int r; + + r = kvmppc_booke_init(); + if (r) + return r; + + return kvm_init(NULL, sizeof(struct kvmppc_vcpu_44x), THIS_MODULE); +} + +static void kvmppc_44x_exit(void) +{ + kvmppc_booke_exit(); +} + +module_init(kvmppc_44x_init); +module_exit(kvmppc_44x_exit); diff --git a/arch/powerpc/kvm/44x_tlb.c b/arch/powerpc/kvm/44x_tlb.c index bb6da134cad..8b65fbd6c57 100644 --- a/arch/powerpc/kvm/44x_tlb.c +++ b/arch/powerpc/kvm/44x_tlb.c @@ -24,6 +24,7 @@ #include <linux/highmem.h> #include <asm/mmu-44x.h> #include <asm/kvm_ppc.h> +#include <asm/kvm_44x.h> #include "44x_tlb.h" @@ -43,7 +44,7 @@ void kvmppc_dump_tlbs(struct kvm_vcpu *vcpu) "nr", "tid", "word0", "word1", "word2"); for (i = 0; i < PPC44x_TLB_SIZE; i++) { - tlbe = &vcpu->arch.guest_tlb[i]; + tlbe = &vcpu_44x->guest_tlb[i]; if (tlbe->word0 & PPC44x_TLB_VALID) printk(" G%2d | %02X | %08X | %08X | %08X |\n", i, tlbe->tid, tlbe->word0, tlbe->word1, @@ -51,7 +52,7 @@ void kvmppc_dump_tlbs(struct kvm_vcpu *vcpu) } for (i = 0; i < PPC44x_TLB_SIZE; i++) { - tlbe = &vcpu->arch.shadow_tlb[i]; + tlbe = &vcpu_44x->shadow_tlb[i]; if (tlbe->word0 & PPC44x_TLB_VALID) printk(" S%2d | %02X | %08X | %08X | %08X |\n", i, tlbe->tid, tlbe->word0, tlbe->word1, @@ -82,11 +83,12 @@ static u32 kvmppc_44x_tlb_shadow_attrib(u32 attrib, int usermode) int kvmppc_44x_tlb_index(struct kvm_vcpu *vcpu, gva_t eaddr, unsigned int pid, unsigned int as) { + struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); int i; /* XXX Replace loop with fancy data structures. */ for (i = 0; i < PPC44x_TLB_SIZE; i++) { - struct kvmppc_44x_tlbe *tlbe = &vcpu->arch.guest_tlb[i]; + struct kvmppc_44x_tlbe *tlbe = &vcpu_44x->guest_tlb[i]; unsigned int tid; if (eaddr < get_tlb_eaddr(tlbe)) @@ -114,25 +116,27 @@ int kvmppc_44x_tlb_index(struct kvm_vcpu *vcpu, gva_t eaddr, unsigned int pid, struct kvmppc_44x_tlbe *kvmppc_44x_itlb_search(struct kvm_vcpu *vcpu, gva_t eaddr) { + struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); unsigned int as = !!(vcpu->arch.msr & MSR_IS); unsigned int index; index = kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as); if (index == -1) return NULL; - return &vcpu->arch.guest_tlb[index]; + return &vcpu_44x->guest_tlb[index]; } struct kvmppc_44x_tlbe *kvmppc_44x_dtlb_search(struct kvm_vcpu *vcpu, gva_t eaddr) { + struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); unsigned int as = !!(vcpu->arch.msr & MSR_DS); unsigned int index; index = kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as); if (index == -1) return NULL; - return &vcpu->arch.guest_tlb[index]; + return &vcpu_44x->guest_tlb[index]; } static int kvmppc_44x_tlbe_is_writable(struct kvmppc_44x_tlbe *tlbe) @@ -143,8 +147,9 @@ static int kvmppc_44x_tlbe_is_writable(struct kvmppc_44x_tlbe *tlbe) static void kvmppc_44x_shadow_release(struct kvm_vcpu *vcpu, unsigned int index) { - struct kvmppc_44x_tlbe *stlbe = &vcpu->arch.shadow_tlb[index]; - struct page *page = vcpu->arch.shadow_pages[index]; + struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); + struct kvmppc_44x_tlbe *stlbe = &vcpu_44x->shadow_tlb[index]; + struct page *page = vcpu_44x->shadow_pages[index]; if (get_tlb_v(stlbe)) { if (kvmppc_44x_tlbe_is_writable(stlbe)) @@ -164,7 +169,9 @@ void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu) void kvmppc_tlbe_set_modified(struct kvm_vcpu *vcpu, unsigned int i) { - vcpu->arch.shadow_tlb_mod[i] = 1; + struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); + + vcpu_44x->shadow_tlb_mod[i] = 1; } /* Caller must ensure that the specified guest TLB entry is safe to insert into @@ -172,6 +179,7 @@ void kvmppc_tlbe_set_modified(struct kvm_vcpu *vcpu, unsigned int i) void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn, u64 asid, u32 flags) { + struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); struct page *new_page; struct kvmppc_44x_tlbe *stlbe; hpa_t hpaddr; @@ -182,7 +190,7 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn, u64 asid, victim = kvmppc_tlb_44x_pos++; if (kvmppc_tlb_44x_pos > tlb_44x_hwater) kvmppc_tlb_44x_pos = 0; - stlbe = &vcpu->arch.shadow_tlb[victim]; + stlbe = &vcpu_44x->shadow_tlb[victim]; /* Get reference to new page. */ new_page = gfn_to_page(vcpu->kvm, gfn); @@ -196,7 +204,7 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn, u64 asid, /* Drop reference to old page. */ kvmppc_44x_shadow_release(vcpu, victim); - vcpu->arch.shadow_pages[victim] = new_page; + vcpu_44x->shadow_pages[victim] = new_page; /* XXX Make sure (va, size) doesn't overlap any other * entries. 440x6 user manual says the result would be @@ -224,12 +232,13 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn, u64 asid, static void kvmppc_mmu_invalidate(struct kvm_vcpu *vcpu, gva_t eaddr, gva_t eend, u32 asid) { + struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); unsigned int pid = !(asid & 0xff); int i; /* XXX Replace loop with fancy data structures. */ for (i = 0; i <= tlb_44x_hwater; i++) { - struct kvmppc_44x_tlbe *stlbe = &vcpu->arch.shadow_tlb[i]; + struct kvmppc_44x_tlbe *stlbe = &vcpu_44x->shadow_tlb[i]; unsigned int tid; if (!get_tlb_v(stlbe)) @@ -259,12 +268,13 @@ static void kvmppc_mmu_invalidate(struct kvm_vcpu *vcpu, gva_t eaddr, * switching address spaces. */ void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode) { + struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); int i; if (vcpu->arch.swap_pid) { /* XXX Replace loop with fancy data structures. */ for (i = 0; i <= tlb_44x_hwater; i++) { - struct kvmppc_44x_tlbe *stlbe = &vcpu->arch.shadow_tlb[i]; + struct kvmppc_44x_tlbe *stlbe = &vcpu_44x->shadow_tlb[i]; /* Future optimization: clear only userspace mappings. */ kvmppc_44x_shadow_release(vcpu, i); @@ -303,6 +313,7 @@ static int tlbe_is_host_safe(const struct kvm_vcpu *vcpu, int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws) { + struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); u64 eaddr; u64 raddr; u64 asid; @@ -317,7 +328,7 @@ int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws) return EMULATE_FAIL; } - tlbe = &vcpu->arch.guest_tlb[index]; + tlbe = &vcpu_44x->guest_tlb[index]; /* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */ if (tlbe->word0 & PPC44x_TLB_VALID) { diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index c619d1b912c..883e9db5f00 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c @@ -573,7 +573,7 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, return kvmppc_core_vcpu_translate(vcpu, tr); } -static int kvmppc_booke_init(void) +int kvmppc_booke_init(void) { unsigned long ivor[16]; unsigned long max_ivor = 0; @@ -618,14 +618,11 @@ static int kvmppc_booke_init(void) flush_icache_range(kvmppc_booke_handlers, kvmppc_booke_handlers + max_ivor + kvmppc_handler_len); - return kvm_init(NULL, sizeof(struct kvm_vcpu), THIS_MODULE); + return 0; } -static void __exit kvmppc_booke_exit(void) +void __exit kvmppc_booke_exit(void) { free_pages(kvmppc_booke_handlers, VCPU_SIZE_ORDER); kvm_exit(); } - -module_init(kvmppc_booke_init) -module_exit(kvmppc_booke_exit) diff --git a/arch/powerpc/kvm/booke_interrupts.S b/arch/powerpc/kvm/booke_interrupts.S index 95e165baf85..8d6929b7fdb 100644 --- a/arch/powerpc/kvm/booke_interrupts.S +++ b/arch/powerpc/kvm/booke_interrupts.S @@ -349,8 +349,8 @@ lightweight_exit: lis r5, tlb_44x_hwater@ha lwz r5, tlb_44x_hwater@l(r5) mtctr r5 - addi r9, r4, VCPU_SHADOW_TLB - addi r5, r4, VCPU_SHADOW_MOD + addi r9, r4, -VCPU_TO_44X + VCPU44x_SHADOW_TLB + addi r5, r4, -VCPU_TO_44X + VCPU44x_SHADOW_MOD li r3, 0 1: lbzx r7, r3, r5 @@ -377,7 +377,7 @@ lightweight_exit: /* Clear bitmap of modified TLB entries */ li r5, PPC44x_TLB_SIZE>>2 mtctr r5 - addi r5, r4, VCPU_SHADOW_MOD - 4 + addi r5, r4, -VCPU_TO_44X + VCPU44x_SHADOW_MOD - 4 li r6, 0 1: stwu r6, 4(r5) diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 8d0aaf96d83..237f3ba68d2 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -171,31 +171,12 @@ void kvm_arch_flush_shadow(struct kvm *kvm) struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) { - struct kvm_vcpu *vcpu; - int err; - - vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); - if (!vcpu) { - err = -ENOMEM; - goto out; - } - - err = kvm_vcpu_init(vcpu, kvm, id); - if (err) - goto free_vcpu; - - return vcpu; - -free_vcpu: - kmem_cache_free(kvm_vcpu_cache, vcpu); -out: - return ERR_PTR(err); + return kvmppc_core_vcpu_create(kvm, id); } void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) { - kvm_vcpu_uninit(vcpu); - kmem_cache_free(kvm_vcpu_cache, vcpu); + kvmppc_core_vcpu_free(vcpu); } void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) |