diff options
Diffstat (limited to 'arch/ia64/xen')
-rw-r--r-- | arch/ia64/xen/Kconfig | 26 | ||||
-rw-r--r-- | arch/ia64/xen/Makefile | 22 | ||||
-rw-r--r-- | arch/ia64/xen/grant-table.c | 155 | ||||
-rw-r--r-- | arch/ia64/xen/hypercall.S | 91 | ||||
-rw-r--r-- | arch/ia64/xen/hypervisor.c | 96 | ||||
-rw-r--r-- | arch/ia64/xen/irq_xen.c | 435 | ||||
-rw-r--r-- | arch/ia64/xen/irq_xen.h | 34 | ||||
-rw-r--r-- | arch/ia64/xen/machvec.c | 4 | ||||
-rw-r--r-- | arch/ia64/xen/suspend.c | 64 | ||||
-rw-r--r-- | arch/ia64/xen/time.c | 213 | ||||
-rw-r--r-- | arch/ia64/xen/time.h | 24 | ||||
-rw-r--r-- | arch/ia64/xen/xcom_hcall.c | 441 | ||||
-rw-r--r-- | arch/ia64/xen/xen_pv_ops.c | 364 | ||||
-rw-r--r-- | arch/ia64/xen/xencomm.c | 105 | ||||
-rw-r--r-- | arch/ia64/xen/xenivt.S | 52 | ||||
-rw-r--r-- | arch/ia64/xen/xensetup.S | 83 |
16 files changed, 2209 insertions, 0 deletions
diff --git a/arch/ia64/xen/Kconfig b/arch/ia64/xen/Kconfig new file mode 100644 index 00000000000..f1683a20275 --- /dev/null +++ b/arch/ia64/xen/Kconfig @@ -0,0 +1,26 @@ +# +# This Kconfig describes xen/ia64 options +# + +config XEN + bool "Xen hypervisor support" + default y + depends on PARAVIRT && MCKINLEY && IA64_PAGE_SIZE_16KB && EXPERIMENTAL + select XEN_XENCOMM + select NO_IDLE_HZ + + # those are required to save/restore. + select ARCH_SUSPEND_POSSIBLE + select SUSPEND + select PM_SLEEP + help + Enable Xen hypervisor support. Resulting kernel runs + both as a guest OS on Xen and natively on hardware. + +config XEN_XENCOMM + depends on XEN + bool + +config NO_IDLE_HZ + depends on XEN + bool diff --git a/arch/ia64/xen/Makefile b/arch/ia64/xen/Makefile new file mode 100644 index 00000000000..0ad0224693d --- /dev/null +++ b/arch/ia64/xen/Makefile @@ -0,0 +1,22 @@ +# +# Makefile for Xen components +# + +obj-y := hypercall.o xenivt.o xensetup.o xen_pv_ops.o irq_xen.o \ + hypervisor.o xencomm.o xcom_hcall.o grant-table.o time.o suspend.o + +obj-$(CONFIG_IA64_GENERIC) += machvec.o + +AFLAGS_xenivt.o += -D__IA64_ASM_PARAVIRTUALIZED_XEN + +# xen multi compile +ASM_PARAVIRT_MULTI_COMPILE_SRCS = ivt.S entry.S +ASM_PARAVIRT_OBJS = $(addprefix xen-,$(ASM_PARAVIRT_MULTI_COMPILE_SRCS:.S=.o)) +obj-y += $(ASM_PARAVIRT_OBJS) +define paravirtualized_xen +AFLAGS_$(1) += -D__IA64_ASM_PARAVIRTUALIZED_XEN +endef +$(foreach o,$(ASM_PARAVIRT_OBJS),$(eval $(call paravirtualized_xen,$(o)))) + +$(obj)/xen-%.o: $(src)/../kernel/%.S FORCE + $(call if_changed_dep,as_o_S) diff --git a/arch/ia64/xen/grant-table.c b/arch/ia64/xen/grant-table.c new file mode 100644 index 00000000000..777dd9a9108 --- /dev/null +++ b/arch/ia64/xen/grant-table.c @@ -0,0 +1,155 @@ +/****************************************************************************** + * arch/ia64/xen/grant-table.c + * + * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp> + * VA Linux Systems Japan K.K. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +#include <linux/module.h> +#include <linux/vmalloc.h> +#include <linux/mm.h> + +#include <xen/interface/xen.h> +#include <xen/interface/memory.h> +#include <xen/grant_table.h> + +#include <asm/xen/hypervisor.h> + +struct vm_struct *xen_alloc_vm_area(unsigned long size) +{ + int order; + unsigned long virt; + unsigned long nr_pages; + struct vm_struct *area; + + order = get_order(size); + virt = __get_free_pages(GFP_KERNEL, order); + if (virt == 0) + goto err0; + nr_pages = 1 << order; + scrub_pages(virt, nr_pages); + + area = kmalloc(sizeof(*area), GFP_KERNEL); + if (area == NULL) + goto err1; + + area->flags = VM_IOREMAP; + area->addr = (void *)virt; + area->size = size; + area->pages = NULL; + area->nr_pages = nr_pages; + area->phys_addr = 0; /* xenbus_map_ring_valloc uses this field! */ + + return area; + +err1: + free_pages(virt, order); +err0: + return NULL; +} +EXPORT_SYMBOL_GPL(xen_alloc_vm_area); + +void xen_free_vm_area(struct vm_struct *area) +{ + unsigned int order = get_order(area->size); + unsigned long i; + unsigned long phys_addr = __pa(area->addr); + + /* This area is used for foreign page mappping. + * So underlying machine page may not be assigned. */ + for (i = 0; i < (1 << order); i++) { + unsigned long ret; + unsigned long gpfn = (phys_addr >> PAGE_SHIFT) + i; + struct xen_memory_reservation reservation = { + .nr_extents = 1, + .address_bits = 0, + .extent_order = 0, + .domid = DOMID_SELF + }; + set_xen_guest_handle(reservation.extent_start, &gpfn); + ret = HYPERVISOR_memory_op(XENMEM_populate_physmap, + &reservation); + BUG_ON(ret != 1); + } + free_pages((unsigned long)area->addr, order); + kfree(area); +} +EXPORT_SYMBOL_GPL(xen_free_vm_area); + + +/**************************************************************************** + * grant table hack + * cmd: GNTTABOP_xxx + */ + +int arch_gnttab_map_shared(unsigned long *frames, unsigned long nr_gframes, + unsigned long max_nr_gframes, + struct grant_entry **__shared) +{ + *__shared = __va(frames[0] << PAGE_SHIFT); + return 0; +} + +void arch_gnttab_unmap_shared(struct grant_entry *shared, + unsigned long nr_gframes) +{ + /* nothing */ +} + +static void +gnttab_map_grant_ref_pre(struct gnttab_map_grant_ref *uop) +{ + uint32_t flags; + + flags = uop->flags; + + if (flags & GNTMAP_host_map) { + if (flags & GNTMAP_application_map) { + printk(KERN_DEBUG + "GNTMAP_application_map is not supported yet: " + "flags 0x%x\n", flags); + BUG(); + } + if (flags & GNTMAP_contains_pte) { + printk(KERN_DEBUG + "GNTMAP_contains_pte is not supported yet: " + "flags 0x%x\n", flags); + BUG(); + } + } else if (flags & GNTMAP_device_map) { + printk("GNTMAP_device_map is not supported yet 0x%x\n", flags); + BUG(); /* not yet. actually this flag is not used. */ + } else { + BUG(); + } +} + +int +HYPERVISOR_grant_table_op(unsigned int cmd, void *uop, unsigned int count) +{ + if (cmd == GNTTABOP_map_grant_ref) { + unsigned int i; + for (i = 0; i < count; i++) { + gnttab_map_grant_ref_pre( + (struct gnttab_map_grant_ref *)uop + i); + } + } + return xencomm_hypercall_grant_table_op(cmd, uop, count); +} + +EXPORT_SYMBOL(HYPERVISOR_grant_table_op); diff --git a/arch/ia64/xen/hypercall.S b/arch/ia64/xen/hypercall.S new file mode 100644 index 00000000000..d4ff0b9e79f --- /dev/null +++ b/arch/ia64/xen/hypercall.S @@ -0,0 +1,91 @@ +/* + * Support routines for Xen hypercalls + * + * Copyright (C) 2005 Dan Magenheimer <dan.magenheimer@hp.com> + * Copyright (C) 2008 Yaozu (Eddie) Dong <eddie.dong@intel.com> + */ + +#include <asm/asmmacro.h> +#include <asm/intrinsics.h> +#include <asm/xen/privop.h> + +/* + * Hypercalls without parameter. + */ +#define __HCALL0(name,hcall) \ + GLOBAL_ENTRY(name); \ + break hcall; \ + br.ret.sptk.many rp; \ + END(name) + +/* + * Hypercalls with 1 parameter. + */ +#define __HCALL1(name,hcall) \ + GLOBAL_ENTRY(name); \ + mov r8=r32; \ + break hcall; \ + br.ret.sptk.many rp; \ + END(name) + +/* + * Hypercalls with 2 parameters. + */ +#define __HCALL2(name,hcall) \ + GLOBAL_ENTRY(name); \ + mov r8=r32; \ + mov r9=r33; \ + break hcall; \ + br.ret.sptk.many rp; \ + END(name) + +__HCALL0(xen_get_psr, HYPERPRIVOP_GET_PSR) +__HCALL0(xen_get_ivr, HYPERPRIVOP_GET_IVR) +__HCALL0(xen_get_tpr, HYPERPRIVOP_GET_TPR) +__HCALL0(xen_hyper_ssm_i, HYPERPRIVOP_SSM_I) + +__HCALL1(xen_set_tpr, HYPERPRIVOP_SET_TPR) +__HCALL1(xen_eoi, HYPERPRIVOP_EOI) +__HCALL1(xen_thash, HYPERPRIVOP_THASH) +__HCALL1(xen_set_itm, HYPERPRIVOP_SET_ITM) +__HCALL1(xen_get_rr, HYPERPRIVOP_GET_RR) +__HCALL1(xen_fc, HYPERPRIVOP_FC) +__HCALL1(xen_get_cpuid, HYPERPRIVOP_GET_CPUID) +__HCALL1(xen_get_pmd, HYPERPRIVOP_GET_PMD) + +__HCALL2(xen_ptcga, HYPERPRIVOP_PTC_GA) +__HCALL2(xen_set_rr, HYPERPRIVOP_SET_RR) +__HCALL2(xen_set_kr, HYPERPRIVOP_SET_KR) + +#ifdef CONFIG_IA32_SUPPORT +__HCALL1(xen_get_eflag, HYPERPRIVOP_GET_EFLAG) +__HCALL1(xen_set_eflag, HYPERPRIVOP_SET_EFLAG) // refer SDM vol1 3.1.8 +#endif /* CONFIG_IA32_SUPPORT */ + +GLOBAL_ENTRY(xen_set_rr0_to_rr4) + mov r8=r32 + mov r9=r33 + mov r10=r34 + mov r11=r35 + mov r14=r36 + XEN_HYPER_SET_RR0_TO_RR4 + br.ret.sptk.many rp + ;; +END(xen_set_rr0_to_rr4) + +GLOBAL_ENTRY(xen_send_ipi) + mov r14=r32 + mov r15=r33 + mov r2=0x400 + break 0x1000 + ;; + br.ret.sptk.many rp + ;; +END(xen_send_ipi) + +GLOBAL_ENTRY(__hypercall) + mov r2=r37 + break 0x1000 + br.ret.sptk.many b0 + ;; +END(__hypercall) diff --git a/arch/ia64/xen/hypervisor.c b/arch/ia64/xen/hypervisor.c new file mode 100644 index 00000000000..cac4d97c0b5 --- /dev/null +++ b/arch/ia64/xen/hypervisor.c @@ -0,0 +1,96 @@ +/****************************************************************************** + * arch/ia64/xen/hypervisor.c + * + * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp> + * VA Linux Systems Japan K.K. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +#include <linux/efi.h> +#include <asm/xen/hypervisor.h> +#include <asm/xen/privop.h> + +#include "irq_xen.h" + +struct shared_info *HYPERVISOR_shared_info __read_mostly = + (struct shared_info *)XSI_BASE; +EXPORT_SYMBOL(HYPERVISOR_shared_info); + +DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu); + +struct start_info *xen_start_info; +EXPORT_SYMBOL(xen_start_info); + +EXPORT_SYMBOL(xen_domain_type); + +EXPORT_SYMBOL(__hypercall); + +/* Stolen from arch/x86/xen/enlighten.c */ +/* + * Flag to determine whether vcpu info placement is available on all + * VCPUs. We assume it is to start with, and then set it to zero on + * the first failure. This is because it can succeed on some VCPUs + * and not others, since it can involve hypervisor memory allocation, + * or because the guest failed to guarantee all the appropriate + * constraints on all VCPUs (ie buffer can't cross a page boundary). + * + * Note that any particular CPU may be using a placed vcpu structure, + * but we can only optimise if the all are. + * + * 0: not available, 1: available + */ + +static void __init xen_vcpu_setup(int cpu) +{ + /* + * WARNING: + * before changing MAX_VIRT_CPUS, + * check that shared_info fits on a page + */ + BUILD_BUG_ON(sizeof(struct shared_info) > PAGE_SIZE); + per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu]; +} + +void __init xen_setup_vcpu_info_placement(void) +{ + int cpu; + + for_each_possible_cpu(cpu) + xen_vcpu_setup(cpu); +} + +void __cpuinit +xen_cpu_init(void) +{ + xen_smp_intr_init(); +} + +/************************************************************************** + * opt feature + */ +void +xen_ia64_enable_opt_feature(void) +{ + /* Enable region 7 identity map optimizations in Xen */ + struct xen_ia64_opt_feature optf; + + optf.cmd = XEN_IA64_OPTF_IDENT_MAP_REG7; + optf.on = XEN_IA64_OPTF_ON; + optf.pgprot = pgprot_val(PAGE_KERNEL); + optf.key = 0; /* No key on linux. */ + HYPERVISOR_opt_feature(&optf); +} diff --git a/arch/ia64/xen/irq_xen.c b/arch/ia64/xen/irq_xen.c new file mode 100644 index 00000000000..af93aadb68b --- /dev/null +++ b/arch/ia64/xen/irq_xen.c @@ -0,0 +1,435 @@ +/****************************************************************************** + * arch/ia64/xen/irq_xen.c + * + * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> + * VA Linux Systems Japan K.K. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +#include <linux/cpu.h> + +#include <xen/interface/xen.h> +#include <xen/interface/callback.h> +#include <xen/events.h> + +#include <asm/xen/privop.h> + +#include "irq_xen.h" + +/*************************************************************************** + * pv_irq_ops + * irq operations + */ + +static int +xen_assign_irq_vector(int irq) +{ + struct physdev_irq irq_op; + + irq_op.irq = irq; + if (HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) + return -ENOSPC; + + return irq_op.vector; +} + +static void +xen_free_irq_vector(int vector) +{ + struct physdev_irq irq_op; + + if (vector < IA64_FIRST_DEVICE_VECTOR || + vector > IA64_LAST_DEVICE_VECTOR) + return; + + irq_op.vector = vector; + if (HYPERVISOR_physdev_op(PHYSDEVOP_free_irq_vector, &irq_op)) + printk(KERN_WARNING "%s: xen_free_irq_vecotr fail vector=%d\n", + __func__, vector); +} + + +static DEFINE_PER_CPU(int, timer_irq) = -1; +static DEFINE_PER_CPU(int, ipi_irq) = -1; +static DEFINE_PER_CPU(int, resched_irq) = -1; +static DEFINE_PER_CPU(int, cmc_irq) = -1; +static DEFINE_PER_CPU(int, cmcp_irq) = -1; +static DEFINE_PER_CPU(int, cpep_irq) = -1; +#define NAME_SIZE 15 +static DEFINE_PER_CPU(char[NAME_SIZE], timer_name); +static DEFINE_PER_CPU(char[NAME_SIZE], ipi_name); +static DEFINE_PER_CPU(char[NAME_SIZE], resched_name); +static DEFINE_PER_CPU(char[NAME_SIZE], cmc_name); +static DEFINE_PER_CPU(char[NAME_SIZE], cmcp_name); +static DEFINE_PER_CPU(char[NAME_SIZE], cpep_name); +#undef NAME_SIZE + +struct saved_irq { + unsigned int irq; + struct irqaction *action; +}; +/* 16 should be far optimistic value, since only several percpu irqs + * are registered early. + */ +#define MAX_LATE_IRQ 16 +static struct saved_irq saved_percpu_irqs[MAX_LATE_IRQ]; +static unsigned short late_irq_cnt; +static unsigned short saved_irq_cnt; +static int xen_slab_ready; + +#ifdef CONFIG_SMP +/* Dummy stub. Though we may check XEN_RESCHEDULE_VECTOR before __do_IRQ, + * it ends up to issue several memory accesses upon percpu data and + * thus adds unnecessary traffic to other paths. + */ +static irqreturn_t +xen_dummy_handler(int irq, void *dev_id) +{ + + return IRQ_HANDLED; +} + +static struct irqaction xen_ipi_irqaction = { + .handler = handle_IPI, + .flags = IRQF_DISABLED, + .name = "IPI" +}; + +static struct irqaction xen_resched_irqaction = { + .handler = xen_dummy_handler, + .flags = IRQF_DISABLED, + .name = "resched" +}; + +static struct irqaction xen_tlb_irqaction = { + .handler = xen_dummy_handler, + .flags = IRQF_DISABLED, + .name = "tlb_flush" +}; +#endif + +/* + * This is xen version percpu irq registration, which needs bind + * to xen specific evtchn sub-system. One trick here is that xen + * evtchn binding interface depends on kmalloc because related + * port needs to be freed at device/cpu down. So we cache the + * registration on BSP before slab is ready and then deal them + * at later point. For rest instances happening after slab ready, + * we hook them to xen evtchn immediately. + * + * FIXME: MCA is not supported by far, and thus "nomca" boot param is + * required. + */ +static void +__xen_register_percpu_irq(unsigned int cpu, unsigned int vec, + struct irqaction *action, int save) +{ + irq_desc_t *desc; + int irq = 0; + + if (xen_slab_ready) { + switch (vec) { + case IA64_TIMER_VECTOR: + snprintf(per_cpu(timer_name, cpu), + sizeof(per_cpu(timer_name, cpu)), + "%s%d", action->name, cpu); + irq = bind_virq_to_irqhandler(VIRQ_ITC, cpu, + action->handler, action->flags, + per_cpu(timer_name, cpu), action->dev_id); + per_cpu(timer_irq, cpu) = irq; + break; + case IA64_IPI_RESCHEDULE: + snprintf(per_cpu(resched_name, cpu), + sizeof(per_cpu(resched_name, cpu)), + "%s%d", action->name, cpu); + irq = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR, cpu, + action->handler, action->flags, + per_cpu(resched_name, cpu), action->dev_id); + per_cpu(resched_irq, cpu) = irq; + break; + case IA64_IPI_VECTOR: + snprintf(per_cpu(ipi_name, cpu), + sizeof(per_cpu(ipi_name, cpu)), + "%s%d", action->name, cpu); + irq = bind_ipi_to_irqhandler(XEN_IPI_VECTOR, cpu, + action->handler, action->flags, + per_cpu(ipi_name, cpu), action->dev_id); + per_cpu(ipi_irq, cpu) = irq; + break; + case IA64_CMC_VECTOR: + snprintf(per_cpu(cmc_name, cpu), + sizeof(per_cpu(cmc_name, cpu)), + "%s%d", action->name, cpu); + irq = bind_virq_to_irqhandler(VIRQ_MCA_CMC, cpu, + action->handler, + action->flags, + per_cpu(cmc_name, cpu), + action->dev_id); + per_cpu(cmc_irq, cpu) = irq; + break; + case IA64_CMCP_VECTOR: + snprintf(per_cpu(cmcp_name, cpu), + sizeof(per_cpu(cmcp_name, cpu)), + "%s%d", action->name, cpu); + irq = bind_ipi_to_irqhandler(XEN_CMCP_VECTOR, cpu, + action->handler, + action->flags, + per_cpu(cmcp_name, cpu), + action->dev_id); + per_cpu(cmcp_irq, cpu) = irq; + break; + case IA64_CPEP_VECTOR: + snprintf(per_cpu(cpep_name, cpu), + sizeof(per_cpu(cpep_name, cpu)), + "%s%d", action->name, cpu); + irq = bind_ipi_to_irqhandler(XEN_CPEP_VECTOR, cpu, + action->handler, + action->flags, + per_cpu(cpep_name, cpu), + action->dev_id); + per_cpu(cpep_irq, cpu) = irq; + break; + case IA64_CPE_VECTOR: + case IA64_MCA_RENDEZ_VECTOR: + case IA64_PERFMON_VECTOR: + case IA64_MCA_WAKEUP_VECTOR: + case IA64_SPURIOUS_INT_VECTOR: + /* No need to complain, these aren't supported. */ + break; + default: + printk(KERN_WARNING "Percpu irq %d is unsupported " + "by xen!\n", vec); + break; + } + BUG_ON(irq < 0); + + if (irq > 0) { + /* + * Mark percpu. Without this, migrate_irqs() will + * mark the interrupt for migrations and trigger it + * on cpu hotplug. + */ + desc = irq_desc + irq; + desc->status |= IRQ_PER_CPU; + } + } + + /* For BSP, we cache registered percpu irqs, and then re-walk + * them when initializing APs + */ + if (!cpu && save) { + BUG_ON(saved_irq_cnt == MAX_LATE_IRQ); + saved_percpu_irqs[saved_irq_cnt].irq = vec; + saved_percpu_irqs[saved_irq_cnt].action = action; + saved_irq_cnt++; + if (!xen_slab_ready) + late_irq_cnt++; + } +} + +static void +xen_register_percpu_irq(ia64_vector vec, struct irqaction *action) +{ + __xen_register_percpu_irq(smp_processor_id(), vec, action, 1); +} + +static void +xen_bind_early_percpu_irq(void) +{ + int i; + + xen_slab_ready = 1; + /* There's no race when accessing this cached array, since only + * BSP will face with such step shortly + */ + for (i = 0; i < late_irq_cnt; i++) + __xen_register_percpu_irq(smp_processor_id(), + saved_percpu_irqs[i].irq, + saved_percpu_irqs[i].action, 0); +} + +/* FIXME: There's no obvious point to check whether slab is ready. So + * a hack is used here by utilizing a late time hook. + */ + +#ifdef CONFIG_HOTPLUG_CPU +static int __devinit +unbind_evtchn_callback(struct notifier_block *nfb, + unsigned long action, void *hcpu) +{ + unsigned int cpu = (unsigned long)hcpu; + + if (action == CPU_DEAD) { + /* Unregister evtchn. */ + if (per_cpu(cpep_irq, cpu) >= 0) { + unbind_from_irqhandler(per_cpu(cpep_irq, cpu), NULL); + per_cpu(cpep_irq, cpu) = -1; + } + if (per_cpu(cmcp_irq, cpu) >= 0) { + unbind_from_irqhandler(per_cpu(cmcp_irq, cpu), NULL); + per_cpu(cmcp_irq, cpu) = -1; + } + if (per_cpu(cmc_irq, cpu) >= 0) { + unbind_from_irqhandler(per_cpu(cmc_irq, cpu), NULL); + per_cpu(cmc_irq, cpu) = -1; + } + if (per_cpu(ipi_irq, cpu) >= 0) { + unbind_from_irqhandler(per_cpu(ipi_irq, cpu), NULL); + per_cpu(ipi_irq, cpu) = -1; + } + if (per_cpu(resched_irq, cpu) >= 0) { + unbind_from_irqhandler(per_cpu(resched_irq, cpu), + NULL); + per_cpu(resched_irq, cpu) = -1; + } + if (per_cpu(timer_irq, cpu) >= 0) { + unbind_from_irqhandler(per_cpu(timer_irq, cpu), NULL); + per_cpu(timer_irq, cpu) = -1; + } + } + return NOTIFY_OK; +} + +static struct notifier_block unbind_evtchn_notifier = { + .notifier_call = unbind_evtchn_callback, + .priority = 0 +}; +#endif + +void xen_smp_intr_init_early(unsigned int cpu) +{ +#ifdef CONFIG_SMP + unsigned int i; + + for (i = 0; i < saved_irq_cnt; i++) + __xen_register_percpu_irq(cpu, saved_percpu_irqs[i].irq, + saved_percpu_irqs[i].action, 0); +#endif +} + +void xen_smp_intr_init(void) +{ +#ifdef CONFIG_SMP + unsigned int cpu = smp_processor_id(); + struct callback_register event = { + .type = CALLBACKTYPE_event, + .address = { .ip = (unsigned long)&xen_event_callback }, + }; + + if (cpu == 0) { + /* Initialization was already done for boot cpu. */ +#ifdef CONFIG_HOTPLUG_CPU + /* Register the notifier only once. */ + register_cpu_notifier(&unbind_evtchn_notifier); +#endif + return; + } + + /* This should be piggyback when setup vcpu guest context */ + BUG_ON(HYPERVISOR_callback_op(CALLBACKOP_register, &event)); +#endif /* CONFIG_SMP */ +} + +void __init +xen_irq_init(void) +{ + struct callback_register event = { + .type = CALLBACKTYPE_event, + .address = { .ip = (unsigned long)&xen_event_callback }, + }; + + xen_init_IRQ(); + BUG_ON(HYPERVISOR_callback_op(CALLBACKOP_register, &event)); + late_time_init = xen_bind_early_percpu_irq; +} + +void +xen_platform_send_ipi(int cpu, int vector, int delivery_mode, int redirect) +{ +#ifdef CONFIG_SMP + /* TODO: we need to call vcpu_up here */ + if (unlikely(vector == ap_wakeup_vector)) { + /* XXX + * This should be in __cpu_up(cpu) in ia64 smpboot.c + * like x86. But don't want to modify it, + * keep it untouched. + */ + xen_smp_intr_init_early(cpu); + + xen_send_ipi(cpu, vector); + /* vcpu_prepare_and_up(cpu); */ + return; + } +#endif + + switch (vector) { + case IA64_IPI_VECTOR: + xen_send_IPI_one(cpu, XEN_IPI_VECTOR); + break; + case IA64_IPI_RESCHEDULE: + xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR); + break; + case IA64_CMCP_VECTOR: + xen_send_IPI_one(cpu, XEN_CMCP_VECTOR); + break; + case IA64_CPEP_VECTOR: + xen_send_IPI_one(cpu, XEN_CPEP_VECTOR); + break; + case IA64_TIMER_VECTOR: { + /* this is used only once by check_sal_cache_flush() + at boot time */ + static int used = 0; + if (!used) { + xen_send_ipi(cpu, IA64_TIMER_VECTOR); + used = 1; + break; + } + /* fallthrough */ + } + default: + printk(KERN_WARNING "Unsupported IPI type 0x%x\n", + vector); + notify_remote_via_irq(0); /* defaults to 0 irq */ + break; + } +} + +static void __init +xen_register_ipi(void) +{ +#ifdef CONFIG_SMP + register_percpu_irq(IA64_IPI_VECTOR, &xen_ipi_irqaction); + register_percpu_irq(IA64_IPI_RESCHEDULE, &xen_resched_irqaction); + register_percpu_irq(IA64_IPI_LOCAL_TLB_FLUSH, &xen_tlb_irqaction); +#endif +} + +static void +xen_resend_irq(unsigned int vector) +{ + (void)resend_irq_on_evtchn(vector); +} + +const struct pv_irq_ops xen_irq_ops __initdata = { + .register_ipi = xen_register_ipi, + + .assign_irq_vector = xen_assign_irq_vector, + .free_irq_vector = xen_free_irq_vector, + .register_percpu_irq = xen_register_percpu_irq, + + .resend_irq = xen_resend_irq, +}; diff --git a/arch/ia64/xen/irq_xen.h b/arch/ia64/xen/irq_xen.h new file mode 100644 index 00000000000..26110f330c8 --- /dev/null +++ b/arch/ia64/xen/irq_xen.h @@ -0,0 +1,34 @@ +/****************************************************************************** + * arch/ia64/xen/irq_xen.h + * + * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> + * VA Linux Systems Japan K.K. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +#ifndef IRQ_XEN_H +#define IRQ_XEN_H + +extern void (*late_time_init)(void); +extern char xen_event_callback; +void __init xen_init_IRQ(void); + +extern const struct pv_irq_ops xen_irq_ops __initdata; +extern void xen_smp_intr_init(void); +extern void xen_send_ipi(int cpu, int vec); + +#endif /* IRQ_XEN_H */ diff --git a/arch/ia64/xen/machvec.c b/arch/ia64/xen/machvec.c new file mode 100644 index 00000000000..4ad588a7c27 --- /dev/null +++ b/arch/ia64/xen/machvec.c @@ -0,0 +1,4 @@ +#define MACHVEC_PLATFORM_NAME xen +#define MACHVEC_PLATFORM_HEADER <asm/machvec_xen.h> +#include <asm/machvec_init.h> + diff --git a/arch/ia64/xen/suspend.c b/arch/ia64/xen/suspend.c new file mode 100644 index 00000000000..fd66b048c6f --- /dev/null +++ b/arch/ia64/xen/suspend.c @@ -0,0 +1,64 @@ +/****************************************************************************** + * arch/ia64/xen/suspend.c + * + * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> + * VA Linux Systems Japan K.K. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * suspend/resume + */ + +#include <xen/xen-ops.h> +#include <asm/xen/hypervisor.h> +#include "time.h" + +void +xen_mm_pin_all(void) +{ + /* nothing */ +} + +void +xen_mm_unpin_all(void) +{ + /* nothing */ +} + +void xen_pre_device_suspend(void) +{ + /* nothing */ +} + +void +xen_pre_suspend() +{ + /* nothing */ +} + +void +xen_post_suspend(int suspend_cancelled) +{ + if (suspend_cancelled) + return; + + xen_ia64_enable_opt_feature(); + /* add more if necessary */ +} + +void xen_arch_resume(void) +{ + xen_timer_resume_on_aps(); +} diff --git a/arch/ia64/xen/time.c b/arch/ia64/xen/time.c new file mode 100644 index 00000000000..d15a94c330f --- /dev/null +++ b/arch/ia64/xen/time.c @@ -0,0 +1,213 @@ +/****************************************************************************** + * arch/ia64/xen/time.c + * + * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> + * VA Linux Systems Japan K.K. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +#include <linux/delay.h> +#include <linux/kernel_stat.h> +#include <linux/posix-timers.h> +#include <linux/irq.h> +#include <linux/clocksource.h> + +#include <asm/timex.h> + +#include <asm/xen/hypervisor.h> + +#include <xen/interface/vcpu.h> + +#include "../kernel/fsyscall_gtod_data.h" + +DEFINE_PER_CPU(struct vcpu_runstate_info, runstate); +DEFINE_PER_CPU(unsigned long, processed_stolen_time); +DEFINE_PER_CPU(unsigned long, processed_blocked_time); + +/* taken from i386/kernel/time-xen.c */ +static void xen_init_missing_ticks_accounting(int cpu) +{ + struct vcpu_register_runstate_memory_area area; + struct vcpu_runstate_info *runstate = &per_cpu(runstate, cpu); + int rc; + + memset(runstate, 0, sizeof(*runstate)); + + area.addr.v = runstate; + rc = HYPERVISOR_vcpu_op(VCPUOP_register_runstate_memory_area, cpu, + &area); + WARN_ON(rc && rc != -ENOSYS); + + per_cpu(processed_blocked_time, cpu) = runstate->time[RUNSTATE_blocked]; + per_cpu(processed_stolen_time, cpu) = runstate->time[RUNSTATE_runnable] + + runstate->time[RUNSTATE_offline]; +} + +/* + * Runstate accounting + */ +/* stolen from arch/x86/xen/time.c */ +static void get_runstate_snapshot(struct vcpu_runstate_info *res) +{ + u64 state_time; + struct vcpu_runstate_info *state; + + BUG_ON(preemptible()); + + state = &__get_cpu_var(runstate); + + /* + * The runstate info is always updated by the hypervisor on + * the current CPU, so there's no need to use anything + * stronger than a compiler barrier when fetching it. + */ + do { + state_time = state->state_entry_time; + rmb(); + *res = *state; + rmb(); + } while (state->state_entry_time != state_time); +} + +#define NS_PER_TICK (1000000000LL/HZ) + +static unsigned long +consider_steal_time(unsigned long new_itm) +{ + unsigned long stolen, blocked; + unsigned long delta_itm = 0, stolentick = 0; + int cpu = smp_processor_id(); + struct vcpu_runstate_info runstate; + struct task_struct *p = current; + + get_runstate_snapshot(&runstate); + + /* + * Check for vcpu migration effect + * In this case, itc value is reversed. + * This causes huge stolen value. + * This function just checks and reject this effect. + */ + if (!time_after_eq(runstate.time[RUNSTATE_blocked], + per_cpu(processed_blocked_time, cpu))) + blocked = 0; + + if (!time_after_eq(runstate.time[RUNSTATE_runnable] + + runstate.time[RUNSTATE_offline], + per_cpu(processed_stolen_time, cpu))) + stolen = 0; + + if (!time_after(delta_itm + new_itm, ia64_get_itc())) + stolentick = ia64_get_itc() - new_itm; + + do_div(stolentick, NS_PER_TICK); + stolentick++; + + do_div(stolen, NS_PER_TICK); + + if (stolen > stolentick) + stolen = stolentick; + + stolentick -= stolen; + do_div(blocked, NS_PER_TICK); + + if (blocked > stolentick) + blocked = stolentick; + + if (stolen > 0 || blocked > 0) { + account_steal_time(NULL, jiffies_to_cputime(stolen)); + account_steal_time(idle_task(cpu), jiffies_to_cputime(blocked)); + run_local_timers(); + + if (rcu_pending(cpu)) + rcu_check_callbacks(cpu, user_mode(get_irq_regs())); + + scheduler_tick(); + run_posix_cpu_timers(p); + delta_itm += local_cpu_data->itm_delta * (stolen + blocked); + + if (cpu == time_keeper_id) { + write_seqlock(&xtime_lock); + do_timer(stolen + blocked); + local_cpu_data->itm_next = delta_itm + new_itm; + write_sequnlock(&xtime_lock); + } else { + local_cpu_data->itm_next = delta_itm + new_itm; + } + per_cpu(processed_stolen_time, cpu) += NS_PER_TICK * stolen; + per_cpu(processed_blocked_time, cpu) += NS_PER_TICK * blocked; + } + return delta_itm; +} + +static int xen_do_steal_accounting(unsigned long *new_itm) +{ + unsigned long delta_itm; + delta_itm = consider_steal_time(*new_itm); + *new_itm += delta_itm; + if (time_after(*new_itm, ia64_get_itc()) && delta_itm) + return 1; + + return 0; +} + +static void xen_itc_jitter_data_reset(void) +{ + u64 lcycle, ret; + + do { + lcycle = itc_jitter_data.itc_lastcycle; + ret = cmpxchg(&itc_jitter_data.itc_lastcycle, lcycle, 0); + } while (unlikely(ret != lcycle)); +} + +struct pv_time_ops xen_time_ops __initdata = { + .init_missing_ticks_accounting = xen_init_missing_ticks_accounting, + .do_steal_accounting = xen_do_steal_accounting, + .clocksource_resume = xen_itc_jitter_data_reset, +}; + +/* Called after suspend, to resume time. */ +static void xen_local_tick_resume(void) +{ + /* Just trigger a tick. */ + ia64_cpu_local_tick(); + touch_softlockup_watchdog(); +} + +void +xen_timer_resume(void) +{ + unsigned int cpu; + + xen_local_tick_resume(); + + for_each_online_cpu(cpu) + xen_init_missing_ticks_accounting(cpu); +} + +static void ia64_cpu_local_tick_fn(void *unused) +{ + xen_local_tick_resume(); + xen_init_missing_ticks_accounting(smp_processor_id()); +} + +void +xen_timer_resume_on_aps(void) +{ + smp_call_function(&ia64_cpu_local_tick_fn, NULL, 1); +} diff --git a/arch/ia64/xen/time.h b/arch/ia64/xen/time.h new file mode 100644 index 00000000000..f98d7e1a42f --- /dev/null +++ b/arch/ia64/xen/time.h @@ -0,0 +1,24 @@ +/****************************************************************************** + * arch/ia64/xen/time.h + * + * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> + * VA Linux Systems Japan K.K. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +extern struct pv_time_ops xen_time_ops __initdata; +void xen_timer_resume_on_aps(void); diff --git a/arch/ia64/xen/xcom_hcall.c b/arch/ia64/xen/xcom_hcall.c new file mode 100644 index 00000000000..ccaf7431f7c --- /dev/null +++ b/arch/ia64/xen/xcom_hcall.c @@ -0,0 +1,441 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + * Tristan Gingold <tristan.gingold@bull.net> + * + * Copyright (c) 2007 + * Isaku Yamahata <yamahata at valinux co jp> + * VA Linux Systems Japan K.K. + * consolidate mini and inline version. + */ + +#include <linux/module.h> +#include <xen/interface/xen.h> +#include <xen/interface/memory.h> +#include <xen/interface/grant_table.h> +#include <xen/interface/callback.h> +#include <xen/interface/vcpu.h> +#include <asm/xen/hypervisor.h> +#include <asm/xen/xencomm.h> + +/* Xencomm notes: + * This file defines hypercalls to be used by xencomm. The hypercalls simply + * create inlines or mini descriptors for pointers and then call the raw arch + * hypercall xencomm_arch_hypercall_XXX + * + * If the arch wants to directly use these hypercalls, simply define macros + * in asm/xen/hypercall.h, eg: + * #define HYPERVISOR_sched_op xencomm_hypercall_sched_op + * + * The arch may also define HYPERVISOR_xxx as a function and do more operations + * before/after doing the hypercall. + * + * Note: because only inline or mini descriptors are created these functions + * must only be called with in kernel memory parameters. + */ + +int +xencomm_hypercall_console_io(int cmd, int count, char *str) +{ + /* xen early printk uses console io hypercall before + * xencomm initialization. In that case, we just ignore it. + */ + if (!xencomm_is_initialized()) + return 0; + + return xencomm_arch_hypercall_console_io + (cmd, count, xencomm_map_no_alloc(str, count)); +} +EXPORT_SYMBOL_GPL(xencomm_hypercall_console_io); + +int +xencomm_hypercall_event_channel_op(int cmd, void *op) +{ + struct xencomm_handle *desc; + desc = xencomm_map_no_alloc(op, sizeof(struct evtchn_op)); + if (desc == NULL) + return -EINVAL; + + return xencomm_arch_hypercall_event_channel_op(cmd, desc); +} +EXPORT_SYMBOL_GPL(xencomm_hypercall_event_channel_op); + +int +xencomm_hypercall_xen_version(int cmd, void *arg) +{ + struct xencomm_handle *desc; + unsigned int argsize; + + switch (cmd) { + case XENVER_version: + /* do not actually pass an argument */ + return xencomm_arch_hypercall_xen_version(cmd, 0); + case XENVER_extraversion: + argsize = sizeof(struct xen_extraversion); + break; + case XENVER_compile_info: + argsize = sizeof(struct xen_compile_info); + break; + case XENVER_capabilities: + argsize = sizeof(struct xen_capabilities_info); + break; + case XENVER_changeset: + argsize = sizeof(struct xen_changeset_info); + break; + case XENVER_platform_parameters: + argsize = sizeof(struct xen_platform_parameters); + break; + case XENVER_get_features: + argsize = (arg == NULL) ? 0 : sizeof(struct xen_feature_info); + break; + + default: + printk(KERN_DEBUG + "%s: unknown version op %d\n", __func__, cmd); + return -ENOSYS; + } + + desc = xencomm_map_no_alloc(arg, argsize); + if (desc == NULL) + return -EINVAL; + + return xencomm_arch_hypercall_xen_version(cmd, desc); +} +EXPORT_SYMBOL_GPL(xencomm_hypercall_xen_version); + +int +xencomm_hypercall_physdev_op(int cmd, void *op) +{ + unsigned int argsize; + + switch (cmd) { + case PHYSDEVOP_apic_read: + case PHYSDEVOP_apic_write: + argsize = sizeof(struct physdev_apic); + break; + case PHYSDEVOP_alloc_irq_vector: + case PHYSDEVOP_free_irq_vector: + argsize = sizeof(struct physdev_irq); + break; + case PHYSDEVOP_irq_status_query: + argsize = sizeof(struct physdev_irq_status_query); + break; + + default: + printk(KERN_DEBUG + "%s: unknown physdev op %d\n", __func__, cmd); + return -ENOSYS; + } + + return xencomm_arch_hypercall_physdev_op + (cmd, xencomm_map_no_alloc(op, argsize)); +} + +static int +xencommize_grant_table_op(struct xencomm_mini **xc_area, + unsigned int cmd, void *op, unsigned int count, + struct xencomm_handle **desc) +{ + struct xencomm_handle *desc1; + unsigned int argsize; + + switch (cmd) { + case GNTTABOP_map_grant_ref: + argsize = sizeof(struct gnttab_map_grant_ref); + break; + case GNTTABOP_unmap_grant_ref: + argsize = sizeof(struct gnttab_unmap_grant_ref); + break; + case GNTTABOP_setup_table: + { + struct gnttab_setup_table *setup = op; + + argsize = sizeof(*setup); + + if (count != 1) + return -EINVAL; + desc1 = __xencomm_map_no_alloc + (xen_guest_handle(setup->frame_list), + setup->nr_frames * + sizeof(*xen_guest_handle(setup->frame_list)), + *xc_area); + if (desc1 == NULL) + return -EINVAL; + (*xc_area)++; + set_xen_guest_handle(setup->frame_list, (void *)desc1); + break; + } + case GNTTABOP_dump_table: + argsize = sizeof(struct gnttab_dump_table); + break; + case GNTTABOP_transfer: + argsize = sizeof(struct gnttab_transfer); + break; + case GNTTABOP_copy: + argsize = sizeof(struct gnttab_copy); + break; + case GNTTABOP_query_size: + argsize = sizeof(struct gnttab_query_size); + break; + default: + printk(KERN_DEBUG "%s: unknown hypercall grant table op %d\n", + __func__, cmd); + BUG(); + } + + *desc = __xencomm_map_no_alloc(op, count * argsize, *xc_area); + if (*desc == NULL) + return -EINVAL; + (*xc_area)++; + + return 0; +} + +int +xencomm_hypercall_grant_table_op(unsigned int cmd, void *op, + unsigned int count) +{ + int rc; + struct xencomm_handle *desc; + XENCOMM_MINI_ALIGNED(xc_area, 2); + + rc = xencommize_grant_table_op(&xc_area, cmd, op, count, &desc); + if (rc) + return rc; + + return xencomm_arch_hypercall_grant_table_op(cmd, desc, count); +} +EXPORT_SYMBOL_GPL(xencomm_hypercall_grant_table_op); + +int +xencomm_hypercall_sched_op(int cmd, void *arg) +{ + struct xencomm_handle *desc; + unsigned int argsize; + + switch (cmd) { + case SCHEDOP_yield: + case SCHEDOP_block: + argsize = 0; + break; + case SCHEDOP_shutdown: + argsize = sizeof(struct sched_shutdown); + break; + case SCHEDOP_poll: + { + struct sched_poll *poll = arg; + struct xencomm_handle *ports; + + argsize = sizeof(struct sched_poll); + ports = xencomm_map_no_alloc(xen_guest_handle(poll->ports), + sizeof(*xen_guest_handle(poll->ports))); + + set_xen_guest_handle(poll->ports, (void *)ports); + break; + } + default: + printk(KERN_DEBUG "%s: unknown sched op %d\n", __func__, cmd); + return -ENOSYS; + } + + desc = xencomm_map_no_alloc(arg, argsize); + if (desc == NULL) + return -EINVAL; + + return xencomm_arch_hypercall_sched_op(cmd, desc); +} +EXPORT_SYMBOL_GPL(xencomm_hypercall_sched_op); + +int +xencomm_hypercall_multicall(void *call_list, int nr_calls) +{ + int rc; + int i; + struct multicall_entry *mce; + struct xencomm_handle *desc; + XENCOMM_MINI_ALIGNED(xc_area, nr_calls * 2); + + for (i = 0; i < nr_calls; i++) { + mce = (struct multicall_entry *)call_list + i; + + switch (mce->op) { + case __HYPERVISOR_update_va_mapping: + case __HYPERVISOR_mmu_update: + /* No-op on ia64. */ + break; + case __HYPERVISOR_grant_table_op: + rc = xencommize_grant_table_op + (&xc_area, + mce->args[0], (void *)mce->args[1], + mce->args[2], &desc); + if (rc) + return rc; + mce->args[1] = (unsigned long)desc; + break; + case __HYPERVISOR_memory_op: + default: + printk(KERN_DEBUG + "%s: unhandled multicall op entry op %lu\n", + __func__, mce->op); + return -ENOSYS; + } + } + + desc = xencomm_map_no_alloc(call_list, + nr_calls * sizeof(struct multicall_entry)); + if (desc == NULL) + return -EINVAL; + + return xencomm_arch_hypercall_multicall(desc, nr_calls); +} +EXPORT_SYMBOL_GPL(xencomm_hypercall_multicall); + +int +xencomm_hypercall_callback_op(int cmd, void *arg) +{ + unsigned int argsize; + switch (cmd) { + case CALLBACKOP_register: + argsize = sizeof(struct callback_register); + break; + case CALLBACKOP_unregister: + argsize = sizeof(struct callback_unregister); + break; + default: + printk(KERN_DEBUG + "%s: unknown callback op %d\n", __func__, cmd); + return -ENOSYS; + } + + return xencomm_arch_hypercall_callback_op + (cmd, xencomm_map_no_alloc(arg, argsize)); +} + +static int +xencommize_memory_reservation(struct xencomm_mini *xc_area, + struct xen_memory_reservation *mop) +{ + struct xencomm_handle *desc; + + desc = __xencomm_map_no_alloc(xen_guest_handle(mop->extent_start), + mop->nr_extents * + sizeof(*xen_guest_handle(mop->extent_start)), + xc_area); + if (desc == NULL) + return -EINVAL; + + set_xen_guest_handle(mop->extent_start, (void *)desc); + return 0; +} + +int +xencomm_hypercall_memory_op(unsigned int cmd, void *arg) +{ + GUEST_HANDLE(xen_pfn_t) extent_start_va[2] = { {NULL}, {NULL} }; + struct xen_memory_reservation *xmr = NULL; + int rc; + struct xencomm_handle *desc; + unsigned int argsize; + XENCOMM_MINI_ALIGNED(xc_area, 2); + + switch (cmd) { + case XENMEM_increase_reservation: + case XENMEM_decrease_reservation: + case XENMEM_populate_physmap: + xmr = (struct xen_memory_reservation *)arg; + set_xen_guest_handle(extent_start_va[0], + xen_guest_handle(xmr->extent_start)); + + argsize = sizeof(*xmr); + rc = xencommize_memory_reservation(xc_area, xmr); + if (rc) + return rc; + xc_area++; + break; + + case XENMEM_maximum_ram_page: + argsize = 0; + break; + + case XENMEM_add_to_physmap: + argsize = sizeof(struct xen_add_to_physmap); + break; + + default: + printk(KERN_DEBUG "%s: unknown memory op %d\n", __func__, cmd); + return -ENOSYS; + } + + desc = xencomm_map_no_alloc(arg, argsize); + if (desc == NULL) + return -EINVAL; + + rc = xencomm_arch_hypercall_memory_op(cmd, desc); + + switch (cmd) { + case XENMEM_increase_reservation: + case XENMEM_decrease_reservation: + case XENMEM_populate_physmap: + set_xen_guest_handle(xmr->extent_start, + xen_guest_handle(extent_start_va[0])); + break; + } + + return rc; +} +EXPORT_SYMBOL_GPL(xencomm_hypercall_memory_op); + +int +xencomm_hypercall_suspend(unsigned long srec) +{ + struct sched_shutdown arg; + + arg.reason = SHUTDOWN_suspend; + + return xencomm_arch_hypercall_sched_op( + SCHEDOP_shutdown, xencomm_map_no_alloc(&arg, sizeof(arg))); +} + +long +xencomm_hypercall_vcpu_op(int cmd, int cpu, void *arg) +{ + unsigned int argsize; + switch (cmd) { + case VCPUOP_register_runstate_memory_area: { + struct vcpu_register_runstate_memory_area *area = + (struct vcpu_register_runstate_memory_area *)arg; + argsize = sizeof(*arg); + set_xen_guest_handle(area->addr.h, + (void *)xencomm_map_no_alloc(area->addr.v, + sizeof(area->addr.v))); + break; + } + + default: + printk(KERN_DEBUG "%s: unknown vcpu op %d\n", __func__, cmd); + return -ENOSYS; + } + + return xencomm_arch_hypercall_vcpu_op(cmd, cpu, + xencomm_map_no_alloc(arg, argsize)); +} + +long +xencomm_hypercall_opt_feature(void *arg) +{ + return xencomm_arch_hypercall_opt_feature( + xencomm_map_no_alloc(arg, + sizeof(struct xen_ia64_opt_feature))); +} diff --git a/arch/ia64/xen/xen_pv_ops.c b/arch/ia64/xen/xen_pv_ops.c new file mode 100644 index 00000000000..04cd1235045 --- /dev/null +++ b/arch/ia64/xen/xen_pv_ops.c @@ -0,0 +1,364 @@ +/****************************************************************************** + * arch/ia64/xen/xen_pv_ops.c + * + * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> + * VA Linux Systems Japan K.K. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +#include <linux/console.h> +#include <linux/irq.h> +#include <linux/kernel.h> +#include <linux/pm.h> + +#include <asm/xen/hypervisor.h> +#include <asm/xen/xencomm.h> +#include <asm/xen/privop.h> + +#include "irq_xen.h" +#include "time.h" + +/*************************************************************************** + * general info + */ +static struct pv_info xen_info __initdata = { + .kernel_rpl = 2, /* or 1: determin at runtime */ + .paravirt_enabled = 1, + .name = "Xen/ia64", +}; + +#define IA64_RSC_PL_SHIFT 2 +#define IA64_RSC_PL_BIT_SIZE 2 +#define IA64_RSC_PL_MASK \ + (((1UL << IA64_RSC_PL_BIT_SIZE) - 1) << IA64_RSC_PL_SHIFT) + +static void __init +xen_info_init(void) +{ + /* Xenified Linux/ia64 may run on pl = 1 or 2. + * determin at run time. */ + unsigned long rsc = ia64_getreg(_IA64_REG_AR_RSC); + unsigned int rpl = (rsc & IA64_RSC_PL_MASK) >> IA64_RSC_PL_SHIFT; + xen_info.kernel_rpl = rpl; +} + +/*************************************************************************** + * pv_init_ops + * initialization hooks. + */ + +static void +xen_panic_hypercall(struct unw_frame_info *info, void *arg) +{ + current->thread.ksp = (__u64)info->sw - 16; + HYPERVISOR_shutdown(SHUTDOWN_crash); + /* we're never actually going to get here... */ +} + +static int +xen_panic_event(struct notifier_block *this, unsigned long event, void *ptr) +{ + unw_init_running(xen_panic_hypercall, NULL); + /* we're never actually going to get here... */ + return NOTIFY_DONE; +} + +static struct notifier_block xen_panic_block = { + xen_panic_event, NULL, 0 /* try to go last */ +}; + +static void xen_pm_power_off(void) +{ + local_irq_disable(); + HYPERVISOR_shutdown(SHUTDOWN_poweroff); +} + +static void __init +xen_banner(void) +{ + printk(KERN_INFO + "Running on Xen! pl = %d start_info_pfn=0x%lx nr_pages=%ld " + "flags=0x%x\n", + xen_info.kernel_rpl, + HYPERVISOR_shared_info->arch.start_info_pfn, + xen_start_info->nr_pages, xen_start_info->flags); +} + +static int __init +xen_reserve_memory(struct rsvd_region *region) +{ + region->start = (unsigned long)__va( + (HYPERVISOR_shared_info->arch.start_info_pfn << PAGE_SHIFT)); + region->end = region->start + PAGE_SIZE; + return 1; +} + +static void __init +xen_arch_setup_early(void) +{ + struct shared_info *s; + BUG_ON(!xen_pv_domain()); + + s = HYPERVISOR_shared_info; + xen_start_info = __va(s->arch.start_info_pfn << PAGE_SHIFT); + + /* Must be done before any hypercall. */ + xencomm_initialize(); + + xen_setup_features(); + /* Register a call for panic conditions. */ + atomic_notifier_chain_register(&panic_notifier_list, + &xen_panic_block); + pm_power_off = xen_pm_power_off; + + xen_ia64_enable_opt_feature(); +} + +static void __init +xen_arch_setup_console(char **cmdline_p) +{ + add_preferred_console("xenboot", 0, NULL); + add_preferred_console("tty", 0, NULL); + /* use hvc_xen */ + add_preferred_console("hvc", 0, NULL); + +#if !defined(CONFIG_VT) || !defined(CONFIG_DUMMY_CONSOLE) + conswitchp = NULL; +#endif +} + +static int __init +xen_arch_setup_nomca(void) +{ + return 1; +} + +static void __init +xen_post_smp_prepare_boot_cpu(void) +{ + xen_setup_vcpu_info_placement(); +} + +static const struct pv_init_ops xen_init_ops __initdata = { + .banner = xen_banner, + + .reserve_memory = xen_reserve_memory, + + .arch_setup_early = xen_arch_setup_early, + .arch_setup_console = xen_arch_setup_console, + .arch_setup_nomca = xen_arch_setup_nomca, + + .post_smp_prepare_boot_cpu = xen_post_smp_prepare_boot_cpu, +}; + +/*************************************************************************** + * pv_cpu_ops + * intrinsics hooks. + */ + +static void xen_setreg(int regnum, unsigned long val) +{ + switch (regnum) { + case _IA64_REG_AR_KR0 ... _IA64_REG_AR_KR7: + xen_set_kr(regnum - _IA64_REG_AR_KR0, val); + break; +#ifdef CONFIG_IA32_SUPPORT + case _IA64_REG_AR_EFLAG: + xen_set_eflag(val); + break; +#endif + case _IA64_REG_CR_TPR: + xen_set_tpr(val); + break; + case _IA64_REG_CR_ITM: + xen_set_itm(val); + break; + case _IA64_REG_CR_EOI: + xen_eoi(val); + break; + default: + ia64_native_setreg_func(regnum, val); + break; + } +} + +static unsigned long xen_getreg(int regnum) +{ + unsigned long res; + + switch (regnum) { + case _IA64_REG_PSR: + res = xen_get_psr(); + break; +#ifdef CONFIG_IA32_SUPPORT + case _IA64_REG_AR_EFLAG: + res = xen_get_eflag(); + break; +#endif + case _IA64_REG_CR_IVR: + res = xen_get_ivr(); + break; + case _IA64_REG_CR_TPR: + res = xen_get_tpr(); + break; + default: + res = ia64_native_getreg_func(regnum); + break; + } + return res; +} + +/* turning on interrupts is a bit more complicated.. write to the + * memory-mapped virtual psr.i bit first (to avoid race condition), + * then if any interrupts were pending, we have to execute a hyperprivop + * to ensure the pending interrupt gets delivered; else we're done! */ +static void +xen_ssm_i(void) +{ + int old = xen_get_virtual_psr_i(); + xen_set_virtual_psr_i(1); + barrier(); + if (!old && xen_get_virtual_pend()) + xen_hyper_ssm_i(); +} + +/* turning off interrupts can be paravirtualized simply by writing + * to a memory-mapped virtual psr.i bit (implemented as a 16-bit bool) */ +static void +xen_rsm_i(void) +{ + xen_set_virtual_psr_i(0); + barrier(); +} + +static unsigned long +xen_get_psr_i(void) +{ + return xen_get_virtual_psr_i() ? IA64_PSR_I : 0; +} + +static void +xen_intrin_local_irq_restore(unsigned long mask) +{ + if (mask & IA64_PSR_I) + xen_ssm_i(); + else + xen_rsm_i(); +} + +static const struct pv_cpu_ops xen_cpu_ops __initdata = { + .fc = xen_fc, + .thash = xen_thash, + .get_cpuid = xen_get_cpuid, + .get_pmd = xen_get_pmd, + .getreg = xen_getreg, + .setreg = xen_setreg, + .ptcga = xen_ptcga, + .get_rr = xen_get_rr, + .set_rr = xen_set_rr, + .set_rr0_to_rr4 = xen_set_rr0_to_rr4, + .ssm_i = xen_ssm_i, + .rsm_i = xen_rsm_i, + .get_psr_i = xen_get_psr_i, + .intrin_local_irq_restore + = xen_intrin_local_irq_restore, +}; + +/****************************************************************************** + * replacement of hand written assembly codes. + */ + +extern char xen_switch_to; +extern char xen_leave_syscall; +extern char xen_work_processed_syscall; +extern char xen_leave_kernel; + +const struct pv_cpu_asm_switch xen_cpu_asm_switch = { + .switch_to = (unsigned long)&xen_switch_to, + .leave_syscall = (unsigned long)&xen_leave_syscall, + .work_processed_syscall = (unsigned long)&xen_work_processed_syscall, + .leave_kernel = (unsigned long)&xen_leave_kernel, +}; + +/*************************************************************************** + * pv_iosapic_ops + * iosapic read/write hooks. + */ +static void +xen_pcat_compat_init(void) +{ + /* nothing */ +} + +static struct irq_chip* +xen_iosapic_get_irq_chip(unsigned long trigger) +{ + return NULL; +} + +static unsigned int +xen_iosapic_read(char __iomem *iosapic, unsigned int reg) +{ + struct physdev_apic apic_op; + int ret; + + apic_op.apic_physbase = (unsigned long)iosapic - + __IA64_UNCACHED_OFFSET; + apic_op.reg = reg; + ret = HYPERVISOR_physdev_op(PHYSDEVOP_apic_read, &apic_op); + if (ret) + return ret; + return apic_op.value; +} + +static void +xen_iosapic_write(char __iomem *iosapic, unsigned int reg, u32 val) +{ + struct physdev_apic apic_op; + + apic_op.apic_physbase = (unsigned long)iosapic - + __IA64_UNCACHED_OFFSET; + apic_op.reg = reg; + apic_op.value = val; + HYPERVISOR_physdev_op(PHYSDEVOP_apic_write, &apic_op); +} + +static const struct pv_iosapic_ops xen_iosapic_ops __initdata = { + .pcat_compat_init = xen_pcat_compat_init, + .__get_irq_chip = xen_iosapic_get_irq_chip, + + .__read = xen_iosapic_read, + .__write = xen_iosapic_write, +}; + +/*************************************************************************** + * pv_ops initialization + */ + +void __init +xen_setup_pv_ops(void) +{ + xen_info_init(); + pv_info = xen_info; + pv_init_ops = xen_init_ops; + pv_cpu_ops = xen_cpu_ops; + pv_iosapic_ops = xen_iosapic_ops; + pv_irq_ops = xen_irq_ops; + pv_time_ops = xen_time_ops; + + paravirt_cpu_asm_init(&xen_cpu_asm_switch); +} diff --git a/arch/ia64/xen/xencomm.c b/arch/ia64/xen/xencomm.c new file mode 100644 index 00000000000..1f5d7ac82e9 --- /dev/null +++ b/arch/ia64/xen/xencomm.c @@ -0,0 +1,105 @@ +/* + * Copyright (C) 2006 Hollis Blanchard <hollisb@us.ibm.com>, IBM Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include <linux/mm.h> + +static unsigned long kernel_virtual_offset; +static int is_xencomm_initialized; + +/* for xen early printk. It uses console io hypercall which uses xencomm. + * However early printk may use it before xencomm initialization. + */ +int +xencomm_is_initialized(void) +{ + return is_xencomm_initialized; +} + +void +xencomm_initialize(void) +{ + kernel_virtual_offset = KERNEL_START - ia64_tpa(KERNEL_START); + is_xencomm_initialized = 1; +} + +/* Translate virtual address to physical address. */ +unsigned long +xencomm_vtop(unsigned long vaddr) +{ + struct page *page; + struct vm_area_struct *vma; + + if (vaddr == 0) + return 0UL; + + if (REGION_NUMBER(vaddr) == 5) { + pgd_t *pgd; + pud_t *pud; + pmd_t *pmd; + pte_t *ptep; + + /* On ia64, TASK_SIZE refers to current. It is not initialized + during boot. + Furthermore the kernel is relocatable and __pa() doesn't + work on addresses. */ + if (vaddr >= KERNEL_START + && vaddr < (KERNEL_START + KERNEL_TR_PAGE_SIZE)) + return vaddr - kernel_virtual_offset; + + /* In kernel area -- virtually mapped. */ + pgd = pgd_offset_k(vaddr); + if (pgd_none(*pgd) || pgd_bad(*pgd)) + return ~0UL; + + pud = pud_offset(pgd, vaddr); + if (pud_none(*pud) || pud_bad(*pud)) + return ~0UL; + + pmd = pmd_offset(pud, vaddr); + if (pmd_none(*pmd) || pmd_bad(*pmd)) + return ~0UL; + + ptep = pte_offset_kernel(pmd, vaddr); + if (!ptep) + return ~0UL; + + return (pte_val(*ptep) & _PFN_MASK) | (vaddr & ~PAGE_MASK); + } + + if (vaddr > TASK_SIZE) { + /* percpu variables */ + if (REGION_NUMBER(vaddr) == 7 && + REGION_OFFSET(vaddr) >= (1ULL << IA64_MAX_PHYS_BITS)) + ia64_tpa(vaddr); + + /* kernel address */ + return __pa(vaddr); + } + + /* XXX double-check (lack of) locking */ + vma = find_extend_vma(current->mm, vaddr); + if (!vma) + return ~0UL; + + /* We assume the page is modified. */ + page = follow_page(vma, vaddr, FOLL_WRITE | FOLL_TOUCH); + if (!page) + return ~0UL; + + return (page_to_pfn(page) << PAGE_SHIFT) | (vaddr & ~PAGE_MASK); +} diff --git a/arch/ia64/xen/xenivt.S b/arch/ia64/xen/xenivt.S new file mode 100644 index 00000000000..3e71d50584d --- /dev/null +++ b/arch/ia64/xen/xenivt.S @@ -0,0 +1,52 @@ +/* + * arch/ia64/xen/ivt.S + * + * Copyright (C) 2005 Hewlett-Packard Co + * Dan Magenheimer <dan.magenheimer@hp.com> + * + * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> + * VA Linux Systems Japan K.K. + * pv_ops. + */ + +#include <asm/asmmacro.h> +#include <asm/kregs.h> +#include <asm/pgtable.h> + +#include "../kernel/minstate.h" + + .section .text,"ax" +GLOBAL_ENTRY(xen_event_callback) + mov r31=pr // prepare to save predicates + ;; + SAVE_MIN_WITH_COVER // uses r31; defines r2 and r3 + ;; + movl r3=XSI_PSR_IC + mov r14=1 + ;; + st4 [r3]=r14 + ;; + adds r3=8,r2 // set up second base pointer for SAVE_REST + srlz.i // ensure everybody knows psr.ic is back on + ;; + SAVE_REST + ;; +1: + alloc r14=ar.pfs,0,0,1,0 // must be first in an insn group + add out0=16,sp // pass pointer to pt_regs as first arg + ;; + br.call.sptk.many b0=xen_evtchn_do_upcall + ;; + movl r20=XSI_PSR_I_ADDR + ;; + ld8 r20=[r20] + ;; + adds r20=-1,r20 // vcpu_info->evtchn_upcall_pending + ;; + ld1 r20=[r20] + ;; + cmp.ne p6,p0=r20,r0 // if there are pending events, + (p6) br.spnt.few 1b // call evtchn_do_upcall again. + br.sptk.many xen_leave_kernel // we know ia64_leave_kernel is + // paravirtualized as xen_leave_kernel +END(xen_event_callback) diff --git a/arch/ia64/xen/xensetup.S b/arch/ia64/xen/xensetup.S new file mode 100644 index 00000000000..28fed1fcc07 --- /dev/null +++ b/arch/ia64/xen/xensetup.S @@ -0,0 +1,83 @@ +/* + * Support routines for Xen + * + * Copyright (C) 2005 Dan Magenheimer <dan.magenheimer@hp.com> + */ + +#include <asm/processor.h> +#include <asm/asmmacro.h> +#include <asm/pgtable.h> +#include <asm/system.h> +#include <asm/paravirt.h> +#include <asm/xen/privop.h> +#include <linux/elfnote.h> +#include <linux/init.h> +#include <xen/interface/elfnote.h> + + .section .data.read_mostly + .align 8 + .global xen_domain_type +xen_domain_type: + data4 XEN_NATIVE_ASM + .previous + + __INIT +ENTRY(startup_xen) + // Calculate load offset. + // The constant, LOAD_OFFSET, can't be used because the boot + // loader doesn't always load to the LMA specified by the vmlinux.lds. + mov r9=ip // must be the first instruction to make sure + // that r9 = the physical address of startup_xen. + // Usually r9 = startup_xen - LOAD_OFFSET + movl r8=startup_xen + ;; + sub r9=r9,r8 // Usually r9 = -LOAD_OFFSET. + + mov r10=PARAVIRT_HYPERVISOR_TYPE_XEN + movl r11=_start + ;; + add r11=r11,r9 + movl r8=hypervisor_type + ;; + add r8=r8,r9 + mov b0=r11 + ;; + st8 [r8]=r10 + br.cond.sptk.many b0 + ;; +END(startup_xen) + + ELFNOTE(Xen, XEN_ELFNOTE_GUEST_OS, .asciz "linux") + ELFNOTE(Xen, XEN_ELFNOTE_GUEST_VERSION, .asciz "2.6") + ELFNOTE(Xen, XEN_ELFNOTE_XEN_VERSION, .asciz "xen-3.0") + ELFNOTE(Xen, XEN_ELFNOTE_ENTRY, data8.ua startup_xen - LOAD_OFFSET) + +#define isBP p3 // are we the Bootstrap Processor? + + .text + +GLOBAL_ENTRY(xen_setup_hook) + mov r8=XEN_PV_DOMAIN_ASM +(isBP) movl r9=xen_domain_type;; +(isBP) st4 [r9]=r8 + movl r10=xen_ivt;; + + mov cr.iva=r10 + + /* Set xsi base. */ +#define FW_HYPERCALL_SET_SHARED_INFO_VA 0x600 +(isBP) mov r2=FW_HYPERCALL_SET_SHARED_INFO_VA +(isBP) movl r28=XSI_BASE;; +(isBP) break 0x1000;; + + /* setup pv_ops */ +(isBP) mov r4=rp + ;; +(isBP) br.call.sptk.many rp=xen_setup_pv_ops + ;; +(isBP) mov rp=r4 + ;; + + br.ret.sptk.many rp + ;; +END(xen_setup_hook) |