diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2008-01-31 00:40:09 +1100 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-01-31 00:40:09 +1100 |
commit | dd430ca20c40ecccd6954a7efd13d4398f507728 (patch) | |
tree | b65089436d17b2bcc6054ede2e335a821b50007f /arch/x86/xen/enlighten.c | |
parent | 60e233172eabdd1f831bd48631b9626ce2279d9b (diff) | |
parent | afadcd788f37bfa62d92662e54a720c26c91becf (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86
* git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86: (890 commits)
x86: fix nodemap_size according to nodeid bits
x86: fix overlap between pagetable with bss section
x86: add PCI IDs to k8topology_64.c
x86: fix early_ioremap pagetable ops
x86: use the same pgd_list for PAE and 64-bit
x86: defer cr3 reload when doing pud_clear()
x86: early boot debugging via FireWire (ohci1394_dma=early)
x86: don't special-case pmd allocations as much
x86: shrink some ifdefs in fault.c
x86: ignore spurious faults
x86: remove nx_enabled from fault.c
x86: unify fault_32|64.c
x86: unify fault_32|64.c with ifdefs
x86: unify fault_32|64.c by ifdef'd function bodies
x86: arch/x86/mm/init_32.c printk fixes
x86: arch/x86/mm/init_32.c cleanup
x86: arch/x86/mm/init_64.c printk fixes
x86: unify ioremap
x86: fixes some bugs about EFI memory map handling
x86: use reboot_type on EFI 32
...
Diffstat (limited to 'arch/x86/xen/enlighten.c')
-rw-r--r-- | arch/x86/xen/enlighten.c | 102 |
1 files changed, 57 insertions, 45 deletions
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 79ad1525215..de647bc6e74 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c @@ -141,8 +141,8 @@ static void __init xen_banner(void) printk(KERN_INFO "Hypervisor signature: %s\n", xen_start_info->magic); } -static void xen_cpuid(unsigned int *eax, unsigned int *ebx, - unsigned int *ecx, unsigned int *edx) +static void xen_cpuid(unsigned int *ax, unsigned int *bx, + unsigned int *cx, unsigned int *dx) { unsigned maskedx = ~0; @@ -150,18 +150,18 @@ static void xen_cpuid(unsigned int *eax, unsigned int *ebx, * Mask out inconvenient features, to try and disable as many * unsupported kernel subsystems as possible. */ - if (*eax == 1) + if (*ax == 1) maskedx = ~((1 << X86_FEATURE_APIC) | /* disable APIC */ (1 << X86_FEATURE_ACPI) | /* disable ACPI */ (1 << X86_FEATURE_ACC)); /* thermal monitoring */ asm(XEN_EMULATE_PREFIX "cpuid" - : "=a" (*eax), - "=b" (*ebx), - "=c" (*ecx), - "=d" (*edx) - : "0" (*eax), "2" (*ecx)); - *edx &= maskedx; + : "=a" (*ax), + "=b" (*bx), + "=c" (*cx), + "=d" (*dx) + : "0" (*ax), "2" (*cx)); + *dx &= maskedx; } static void xen_set_debugreg(int reg, unsigned long val) @@ -275,19 +275,12 @@ static unsigned long xen_store_tr(void) static void xen_set_ldt(const void *addr, unsigned entries) { - unsigned long linear_addr = (unsigned long)addr; struct mmuext_op *op; struct multicall_space mcs = xen_mc_entry(sizeof(*op)); op = mcs.args; op->cmd = MMUEXT_SET_LDT; - if (linear_addr) { - /* ldt my be vmalloced, use arbitrary_virt_to_machine */ - xmaddr_t maddr; - maddr = arbitrary_virt_to_machine((unsigned long)addr); - linear_addr = (unsigned long)maddr.maddr; - } - op->arg1.linear_addr = linear_addr; + op->arg1.linear_addr = (unsigned long)addr; op->arg2.nr_ents = entries; MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF); @@ -295,7 +288,7 @@ static void xen_set_ldt(const void *addr, unsigned entries) xen_mc_issue(PARAVIRT_LAZY_CPU); } -static void xen_load_gdt(const struct Xgt_desc_struct *dtr) +static void xen_load_gdt(const struct desc_ptr *dtr) { unsigned long *frames; unsigned long va = dtr->address; @@ -357,11 +350,11 @@ static void xen_load_tls(struct thread_struct *t, unsigned int cpu) } static void xen_write_ldt_entry(struct desc_struct *dt, int entrynum, - u32 low, u32 high) + const void *ptr) { unsigned long lp = (unsigned long)&dt[entrynum]; xmaddr_t mach_lp = virt_to_machine(lp); - u64 entry = (u64)high << 32 | low; + u64 entry = *(u64 *)ptr; preempt_disable(); @@ -395,12 +388,11 @@ static int cvt_gate_to_trap(int vector, u32 low, u32 high, } /* Locations of each CPU's IDT */ -static DEFINE_PER_CPU(struct Xgt_desc_struct, idt_desc); +static DEFINE_PER_CPU(struct desc_ptr, idt_desc); /* Set an IDT entry. If the entry is part of the current IDT, then also update Xen. */ -static void xen_write_idt_entry(struct desc_struct *dt, int entrynum, - u32 low, u32 high) +static void xen_write_idt_entry(gate_desc *dt, int entrynum, const gate_desc *g) { unsigned long p = (unsigned long)&dt[entrynum]; unsigned long start, end; @@ -412,14 +404,15 @@ static void xen_write_idt_entry(struct desc_struct *dt, int entrynum, xen_mc_flush(); - write_dt_entry(dt, entrynum, low, high); + native_write_idt_entry(dt, entrynum, g); if (p >= start && (p + 8) <= end) { struct trap_info info[2]; + u32 *desc = (u32 *)g; info[1].address = 0; - if (cvt_gate_to_trap(entrynum, low, high, &info[0])) + if (cvt_gate_to_trap(entrynum, desc[0], desc[1], &info[0])) if (HYPERVISOR_set_trap_table(info)) BUG(); } @@ -427,7 +420,7 @@ static void xen_write_idt_entry(struct desc_struct *dt, int entrynum, preempt_enable(); } -static void xen_convert_trap_info(const struct Xgt_desc_struct *desc, +static void xen_convert_trap_info(const struct desc_ptr *desc, struct trap_info *traps) { unsigned in, out, count; @@ -446,7 +439,7 @@ static void xen_convert_trap_info(const struct Xgt_desc_struct *desc, void xen_copy_trap_info(struct trap_info *traps) { - const struct Xgt_desc_struct *desc = &__get_cpu_var(idt_desc); + const struct desc_ptr *desc = &__get_cpu_var(idt_desc); xen_convert_trap_info(desc, traps); } @@ -454,7 +447,7 @@ void xen_copy_trap_info(struct trap_info *traps) /* Load a new IDT into Xen. In principle this can be per-CPU, so we hold a spinlock to protect the static traps[] array (static because it avoids allocation, and saves stack space). */ -static void xen_load_idt(const struct Xgt_desc_struct *desc) +static void xen_load_idt(const struct desc_ptr *desc) { static DEFINE_SPINLOCK(lock); static struct trap_info traps[257]; @@ -475,22 +468,21 @@ static void xen_load_idt(const struct Xgt_desc_struct *desc) /* Write a GDT descriptor entry. Ignore LDT descriptors, since they're handled differently. */ static void xen_write_gdt_entry(struct desc_struct *dt, int entry, - u32 low, u32 high) + const void *desc, int type) { preempt_disable(); - switch ((high >> 8) & 0xff) { - case DESCTYPE_LDT: - case DESCTYPE_TSS: + switch (type) { + case DESC_LDT: + case DESC_TSS: /* ignore */ break; default: { xmaddr_t maddr = virt_to_machine(&dt[entry]); - u64 desc = (u64)high << 32 | low; xen_mc_flush(); - if (HYPERVISOR_update_descriptor(maddr.maddr, desc)) + if (HYPERVISOR_update_descriptor(maddr.maddr, *(u64 *)desc)) BUG(); } @@ -499,11 +491,11 @@ static void xen_write_gdt_entry(struct desc_struct *dt, int entry, preempt_enable(); } -static void xen_load_esp0(struct tss_struct *tss, +static void xen_load_sp0(struct tss_struct *tss, struct thread_struct *thread) { struct multicall_space mcs = xen_mc_entry(0); - MULTI_stack_switch(mcs.mc, __KERNEL_DS, thread->esp0); + MULTI_stack_switch(mcs.mc, __KERNEL_DS, thread->sp0); xen_mc_issue(PARAVIRT_LAZY_CPU); } @@ -521,12 +513,12 @@ static void xen_io_delay(void) } #ifdef CONFIG_X86_LOCAL_APIC -static unsigned long xen_apic_read(unsigned long reg) +static u32 xen_apic_read(unsigned long reg) { return 0; } -static void xen_apic_write(unsigned long reg, unsigned long val) +static void xen_apic_write(unsigned long reg, u32 val) { /* Warn to see if there's any stray references */ WARN_ON(1); @@ -666,6 +658,13 @@ static __init void xen_alloc_pt_init(struct mm_struct *mm, u32 pfn) make_lowmem_page_readonly(__va(PFN_PHYS(pfn))); } +/* Early release_pt assumes that all pts are pinned, since there's + only init_mm and anything attached to that is pinned. */ +static void xen_release_pt_init(u32 pfn) +{ + make_lowmem_page_readwrite(__va(PFN_PHYS(pfn))); +} + static void pin_pagetable_pfn(unsigned level, unsigned long pfn) { struct mmuext_op op; @@ -677,7 +676,7 @@ static void pin_pagetable_pfn(unsigned level, unsigned long pfn) /* This needs to make sure the new pte page is pinned iff its being attached to a pinned pagetable. */ -static void xen_alloc_pt(struct mm_struct *mm, u32 pfn) +static void xen_alloc_ptpage(struct mm_struct *mm, u32 pfn, unsigned level) { struct page *page = pfn_to_page(pfn); @@ -686,7 +685,7 @@ static void xen_alloc_pt(struct mm_struct *mm, u32 pfn) if (!PageHighMem(page)) { make_lowmem_page_readonly(__va(PFN_PHYS(pfn))); - pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn); + pin_pagetable_pfn(level, pfn); } else /* make sure there are no stray mappings of this page */ @@ -694,6 +693,16 @@ static void xen_alloc_pt(struct mm_struct *mm, u32 pfn) } } +static void xen_alloc_pt(struct mm_struct *mm, u32 pfn) +{ + xen_alloc_ptpage(mm, pfn, MMUEXT_PIN_L1_TABLE); +} + +static void xen_alloc_pd(struct mm_struct *mm, u32 pfn) +{ + xen_alloc_ptpage(mm, pfn, MMUEXT_PIN_L2_TABLE); +} + /* This should never happen until we're OK to use struct page */ static void xen_release_pt(u32 pfn) { @@ -796,6 +805,9 @@ static __init void xen_pagetable_setup_done(pgd_t *base) /* This will work as long as patching hasn't happened yet (which it hasn't) */ pv_mmu_ops.alloc_pt = xen_alloc_pt; + pv_mmu_ops.alloc_pd = xen_alloc_pd; + pv_mmu_ops.release_pt = xen_release_pt; + pv_mmu_ops.release_pd = xen_release_pt; pv_mmu_ops.set_pte = xen_set_pte; if (!xen_feature(XENFEAT_auto_translated_physmap)) { @@ -953,7 +965,7 @@ static const struct pv_cpu_ops xen_cpu_ops __initdata = { .read_pmc = native_read_pmc, .iret = (void *)&hypercall_page[__HYPERVISOR_iret], - .irq_enable_sysexit = NULL, /* never called */ + .irq_enable_syscall_ret = NULL, /* never called */ .load_tr_desc = paravirt_nop, .set_ldt = xen_set_ldt, @@ -968,7 +980,7 @@ static const struct pv_cpu_ops xen_cpu_ops __initdata = { .write_ldt_entry = xen_write_ldt_entry, .write_gdt_entry = xen_write_gdt_entry, .write_idt_entry = xen_write_idt_entry, - .load_esp0 = xen_load_esp0, + .load_sp0 = xen_load_sp0, .set_iopl_mask = xen_set_iopl_mask, .io_delay = xen_io_delay, @@ -1019,10 +1031,10 @@ static const struct pv_mmu_ops xen_mmu_ops __initdata = { .pte_update_defer = paravirt_nop, .alloc_pt = xen_alloc_pt_init, - .release_pt = xen_release_pt, - .alloc_pd = paravirt_nop, + .release_pt = xen_release_pt_init, + .alloc_pd = xen_alloc_pt_init, .alloc_pd_clone = paravirt_nop, - .release_pd = paravirt_nop, + .release_pd = xen_release_pt_init, #ifdef CONFIG_HIGHPTE .kmap_atomic_pte = xen_kmap_atomic_pte, |