From 444933c6c6e82362ba8e0da26f41a53c433d11ef Mon Sep 17 00:00:00 2001 From: Isaku Yamahata Date: Mon, 19 May 2008 22:13:28 +0900 Subject: [IA64] pvops: preparation: remove extern in irq_ia64.c remove extern declaration of handle_IPI() in irq_ia64.c. Instead, declare it in asm-ia64/smp.h. Later handle_IPI() will be referenced from another file. Signed-off-by: Isaku Yamahata Signed-off-by: Tony Luck --- include/asm-ia64/smp.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include') diff --git a/include/asm-ia64/smp.h b/include/asm-ia64/smp.h index ec5f355fb7e..2984e262fc6 100644 --- a/include/asm-ia64/smp.h +++ b/include/asm-ia64/smp.h @@ -15,6 +15,7 @@ #include #include #include +#include #include #include @@ -123,6 +124,7 @@ extern void __init smp_build_cpu_map(void); extern void __init init_smp_config (void); extern void smp_do_timer (struct pt_regs *regs); +extern irqreturn_t handle_IPI(int irq, void *dev_id); extern void smp_send_reschedule (int cpu); extern void lock_ipi_calllock(void); extern void unlock_ipi_calllock(void); -- cgit v1.2.3 From 8311d21c35092aa4c4a12e0140e1ef3443489d77 Mon Sep 17 00:00:00 2001 From: Isaku Yamahata Date: Mon, 19 May 2008 22:13:29 +0900 Subject: [IA64] pvops: preparation: move the constants, LOAD_OFFSET, to a header file. Move the LOAD_OFFSET definition from vmlinux.lds.S into system.h. On paravirtualized environments, it is necessary to detect the execution environment. One of the solutions is the multi entry point. The multi entry point allows a boot loader to start the kernel execution from the entry point which is different from the ELF entry point. The non standard entry point will defined as the specialized elf note which contains the LMA of the entry point symbol. The constant, LOAD_OFFSET, is necessary to calculate the symbol's LMA. Move the definition into the public header file to make it available to the multi entry point support. Cc: "He, Qing" Signed-off-by: Isaku Yamahata Signed-off-by: Tony Luck --- include/asm-ia64/system.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/asm-ia64/system.h b/include/asm-ia64/system.h index 26e250bfb91..0db8c9812ce 100644 --- a/include/asm-ia64/system.h +++ b/include/asm-ia64/system.h @@ -26,6 +26,7 @@ */ #define KERNEL_START (GATE_ADDR+__IA64_UL_CONST(0x100000000)) #define PERCPU_ADDR (-PERCPU_PAGE_SIZE) +#define LOAD_OFFSET (KERNEL_START - KERNEL_TR_PAGE_SIZE) #ifndef __ASSEMBLY__ -- cgit v1.2.3 From 0e1a77ccdbc4ca59ccaf84168a0c3c1df4fadfc0 Mon Sep 17 00:00:00 2001 From: Isaku Yamahata Date: Mon, 19 May 2008 22:13:30 +0900 Subject: [IA64] pvops: preparation: introduce ia64_set_rr0_to_rr4() to make kernel paravirtualization friendly. make kernel paravirtualization friendly by introducing ia64_set_rr0_to_rr4(). ia64/Xen will replace setting rr[0-4] with single hypercall later. Signed-off-by: Isaku Yamahata Signed-off-by: Tony Luck --- include/asm-ia64/intrinsics.h | 9 +++++++++ include/asm-ia64/mmu_context.h | 6 +----- 2 files changed, 10 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/include/asm-ia64/intrinsics.h b/include/asm-ia64/intrinsics.h index f1135b5b94c..9b83f8f0167 100644 --- a/include/asm-ia64/intrinsics.h +++ b/include/asm-ia64/intrinsics.h @@ -18,6 +18,15 @@ # include #endif +#define ia64_set_rr0_to_rr4(val0, val1, val2, val3, val4) \ +do { \ + ia64_set_rr(0x0000000000000000UL, (val0)); \ + ia64_set_rr(0x2000000000000000UL, (val1)); \ + ia64_set_rr(0x4000000000000000UL, (val2)); \ + ia64_set_rr(0x6000000000000000UL, (val3)); \ + ia64_set_rr(0x8000000000000000UL, (val4)); \ +} while (0) + /* * Force an unresolved reference if someone tries to use * ia64_fetch_and_add() with a bad value. diff --git a/include/asm-ia64/mmu_context.h b/include/asm-ia64/mmu_context.h index cef2400983f..040bc87db93 100644 --- a/include/asm-ia64/mmu_context.h +++ b/include/asm-ia64/mmu_context.h @@ -152,11 +152,7 @@ reload_context (nv_mm_context_t context) # endif #endif - ia64_set_rr(0x0000000000000000UL, rr0); - ia64_set_rr(0x2000000000000000UL, rr1); - ia64_set_rr(0x4000000000000000UL, rr2); - ia64_set_rr(0x6000000000000000UL, rr3); - ia64_set_rr(0x8000000000000000UL, rr4); + ia64_set_rr0_to_rr4(rr0, rr1, rr2, rr3, rr4); ia64_srlz_i(); /* srlz.i implies srlz.d */ } -- cgit v1.2.3 From 72cb4256c7574e1c2c1350fa92eecd7cef9e4772 Mon Sep 17 00:00:00 2001 From: Isaku Yamahata Date: Mon, 19 May 2008 22:13:31 +0900 Subject: [IA64] pvops: preparation: introduce ia64_get_psr_i() to make kernel paravirtualization friendly. __local_irq_save() and local_save_flags() are used to mask interruptions. They read all psr bits that requres whole bit emulation. On the other hand, reading only psr.i, the single bit, can be virtualized cheaply. Signed-off-by: Isaku Yamahata Signed-off-by: Tony Luck --- include/asm-ia64/intrinsics.h | 2 ++ include/asm-ia64/system.h | 10 ++++++++-- 2 files changed, 10 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/asm-ia64/intrinsics.h b/include/asm-ia64/intrinsics.h index 9b83f8f0167..a3b96892f83 100644 --- a/include/asm-ia64/intrinsics.h +++ b/include/asm-ia64/intrinsics.h @@ -18,6 +18,8 @@ # include #endif +#define ia64_get_psr_i() (ia64_getreg(_IA64_REG_PSR) & IA64_PSR_I) + #define ia64_set_rr0_to_rr4(val0, val1, val2, val3, val4) \ do { \ ia64_set_rr(0x0000000000000000UL, (val0)); \ diff --git a/include/asm-ia64/system.h b/include/asm-ia64/system.h index 0db8c9812ce..927a381c20c 100644 --- a/include/asm-ia64/system.h +++ b/include/asm-ia64/system.h @@ -123,10 +123,16 @@ extern struct ia64_boot_param { * write a floating-point register right before reading the PSR * and that writes to PSR.mfl */ +#ifdef CONFIG_PARAVIRT +#define __local_save_flags() ia64_get_psr_i() +#else +#define __local_save_flags() ia64_getreg(_IA64_REG_PSR) +#endif + #define __local_irq_save(x) \ do { \ ia64_stop(); \ - (x) = ia64_getreg(_IA64_REG_PSR); \ + (x) = __local_save_flags(); \ ia64_stop(); \ ia64_rsm(IA64_PSR_I); \ } while (0) @@ -174,7 +180,7 @@ do { \ #endif /* !CONFIG_IA64_DEBUG_IRQ */ #define local_irq_enable() ({ ia64_stop(); ia64_ssm(IA64_PSR_I); ia64_srlz_d(); }) -#define local_save_flags(flags) ({ ia64_stop(); (flags) = ia64_getreg(_IA64_REG_PSR); }) +#define local_save_flags(flags) ({ ia64_stop(); (flags) = __local_save_flags(); }) #define irqs_disabled() \ ({ \ -- cgit v1.2.3 From 90aeb169c03a96e22674741f08054023c33d595b Mon Sep 17 00:00:00 2001 From: Isaku Yamahata Date: Mon, 19 May 2008 22:13:32 +0900 Subject: [IA64] pvops: introduce pv_info which describes some random info. introduce pv_info which describes some randome info about underlying execution environment. Cc: Jes Sorensen Signed-off-by: Isaku Yamahata Signed-off-by: Tony Luck --- include/asm-ia64/paravirt.h | 62 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 62 insertions(+) create mode 100644 include/asm-ia64/paravirt.h (limited to 'include') diff --git a/include/asm-ia64/paravirt.h b/include/asm-ia64/paravirt.h new file mode 100644 index 00000000000..26b43342308 --- /dev/null +++ b/include/asm-ia64/paravirt.h @@ -0,0 +1,62 @@ +/****************************************************************************** + * include/asm-ia64/paravirt.h + * + * Copyright (c) 2008 Isaku Yamahata + * VA Linux Systems Japan K.K. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + + +#ifndef __ASM_PARAVIRT_H +#define __ASM_PARAVIRT_H + +#ifdef CONFIG_PARAVIRT_GUEST + +#ifndef __ASSEMBLY__ + +#include +#include + +/****************************************************************************** + * general info + */ +struct pv_info { + unsigned int kernel_rpl; + int paravirt_enabled; + const char *name; +}; + +extern struct pv_info pv_info; + +static inline int paravirt_enabled(void) +{ + return pv_info.paravirt_enabled; +} + +static inline unsigned int get_kernel_rpl(void) +{ + return pv_info.kernel_rpl; +} + +#endif /* !__ASSEMBLY__ */ + +#else +/* fallback for native case */ + +#endif /* CONFIG_PARAVIRT_GUEST */ + +#endif /* __ASM_PARAVIRT_H */ -- cgit v1.2.3 From 3e0879deb700f322f6c81ab34f056fc72d15ec02 Mon Sep 17 00:00:00 2001 From: Isaku Yamahata Date: Mon, 19 May 2008 22:13:33 +0900 Subject: [IA64] pvops: add an early setup hook for pv_ops. This patch adds a setup hook in the very early boot sequence before start_kernel() to initialize paravirtualization stuff. The hook will be set by each pv loader code or by using multi entry point. Signed-off-by: Qing He Signed-off-by: Isaku Yamahata Signed-off-by: Tony Luck --- include/asm-ia64/paravirt.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'include') diff --git a/include/asm-ia64/paravirt.h b/include/asm-ia64/paravirt.h index 26b43342308..1032b216aea 100644 --- a/include/asm-ia64/paravirt.h +++ b/include/asm-ia64/paravirt.h @@ -26,6 +26,9 @@ #ifdef CONFIG_PARAVIRT_GUEST +#define PARAVIRT_HYPERVISOR_TYPE_DEFAULT 0 +#define PARAVIRT_HYPERVISOR_TYPE_XEN 1 + #ifndef __ASSEMBLY__ #include -- cgit v1.2.3 From 1ff730b52f0c3e4e3846c3ff345c5526b2633ba9 Mon Sep 17 00:00:00 2001 From: Isaku Yamahata Date: Mon, 19 May 2008 22:13:34 +0900 Subject: [IA64] pvops: introduce pv_cpu_ops to paravirtualize privileged instructions. introduce pv_cpu_ops to paravirtualize privleged instructions which are defined by ia64 intrinsics. make them indirect C function calls by introducing function tables, pv_cpu_ops. Signed-off-by: Yaozu (Eddie) Dong Signed-off-by: Isaku Yamahata Signed-off-by: Tony Luck --- include/asm-ia64/Kbuild | 2 +- include/asm-ia64/gcc_intrin.h | 24 +++++----- include/asm-ia64/intel_intrin.h | 41 ++++++++--------- include/asm-ia64/intrinsics.h | 62 ++++++++++++++++++++++---- include/asm-ia64/paravirt_privop.h | 91 ++++++++++++++++++++++++++++++++++++++ 5 files changed, 178 insertions(+), 42 deletions(-) create mode 100644 include/asm-ia64/paravirt_privop.h (limited to 'include') diff --git a/include/asm-ia64/Kbuild b/include/asm-ia64/Kbuild index eb24a3f47ca..ccbe8ae47a6 100644 --- a/include/asm-ia64/Kbuild +++ b/include/asm-ia64/Kbuild @@ -5,12 +5,12 @@ header-y += fpu.h header-y += fpswa.h header-y += ia64regs.h header-y += intel_intrin.h -header-y += intrinsics.h header-y += perfmon_default_smpl.h header-y += ptrace_offsets.h header-y += rse.h header-y += ucontext.h unifdef-y += gcc_intrin.h +unifdef-y += intrinsics.h unifdef-y += perfmon.h unifdef-y += ustack.h diff --git a/include/asm-ia64/gcc_intrin.h b/include/asm-ia64/gcc_intrin.h index 2fe292c275f..0f5b5592175 100644 --- a/include/asm-ia64/gcc_intrin.h +++ b/include/asm-ia64/gcc_intrin.h @@ -32,7 +32,7 @@ extern void ia64_bad_param_for_getreg (void); register unsigned long ia64_r13 asm ("r13") __used; #endif -#define ia64_setreg(regnum, val) \ +#define ia64_native_setreg(regnum, val) \ ({ \ switch (regnum) { \ case _IA64_REG_PSR_L: \ @@ -61,7 +61,7 @@ register unsigned long ia64_r13 asm ("r13") __used; } \ }) -#define ia64_getreg(regnum) \ +#define ia64_native_getreg(regnum) \ ({ \ __u64 ia64_intri_res; \ \ @@ -385,7 +385,7 @@ register unsigned long ia64_r13 asm ("r13") __used; #define ia64_invala() asm volatile ("invala" ::: "memory") -#define ia64_thash(addr) \ +#define ia64_native_thash(addr) \ ({ \ __u64 ia64_intri_res; \ asm volatile ("thash %0=%1" : "=r"(ia64_intri_res) : "r" (addr)); \ @@ -438,10 +438,10 @@ register unsigned long ia64_r13 asm ("r13") __used; #define ia64_set_pmd(index, val) \ asm volatile ("mov pmd[%0]=%1" :: "r"(index), "r"(val) : "memory") -#define ia64_set_rr(index, val) \ +#define ia64_native_set_rr(index, val) \ asm volatile ("mov rr[%0]=%1" :: "r"(index), "r"(val) : "memory"); -#define ia64_get_cpuid(index) \ +#define ia64_native_get_cpuid(index) \ ({ \ __u64 ia64_intri_res; \ asm volatile ("mov %0=cpuid[%r1]" : "=r"(ia64_intri_res) : "rO"(index)); \ @@ -477,33 +477,33 @@ register unsigned long ia64_r13 asm ("r13") __used; }) -#define ia64_get_pmd(index) \ +#define ia64_native_get_pmd(index) \ ({ \ __u64 ia64_intri_res; \ asm volatile ("mov %0=pmd[%1]" : "=r"(ia64_intri_res) : "r"(index)); \ ia64_intri_res; \ }) -#define ia64_get_rr(index) \ +#define ia64_native_get_rr(index) \ ({ \ __u64 ia64_intri_res; \ asm volatile ("mov %0=rr[%1]" : "=r"(ia64_intri_res) : "r" (index)); \ ia64_intri_res; \ }) -#define ia64_fc(addr) asm volatile ("fc %0" :: "r"(addr) : "memory") +#define ia64_native_fc(addr) asm volatile ("fc %0" :: "r"(addr) : "memory") #define ia64_sync_i() asm volatile (";; sync.i" ::: "memory") -#define ia64_ssm(mask) asm volatile ("ssm %0":: "i"((mask)) : "memory") -#define ia64_rsm(mask) asm volatile ("rsm %0":: "i"((mask)) : "memory") +#define ia64_native_ssm(mask) asm volatile ("ssm %0":: "i"((mask)) : "memory") +#define ia64_native_rsm(mask) asm volatile ("rsm %0":: "i"((mask)) : "memory") #define ia64_sum(mask) asm volatile ("sum %0":: "i"((mask)) : "memory") #define ia64_rum(mask) asm volatile ("rum %0":: "i"((mask)) : "memory") #define ia64_ptce(addr) asm volatile ("ptc.e %0" :: "r"(addr)) -#define ia64_ptcga(addr, size) \ +#define ia64_native_ptcga(addr, size) \ do { \ asm volatile ("ptc.ga %0,%1" :: "r"(addr), "r"(size) : "memory"); \ ia64_dv_serialize_data(); \ @@ -608,7 +608,7 @@ do { \ } \ }) -#define ia64_intrin_local_irq_restore(x) \ +#define ia64_native_intrin_local_irq_restore(x) \ do { \ asm volatile (";; cmp.ne p6,p7=%0,r0;;" \ "(p6) ssm psr.i;" \ diff --git a/include/asm-ia64/intel_intrin.h b/include/asm-ia64/intel_intrin.h index a520d103d80..53cec577558 100644 --- a/include/asm-ia64/intel_intrin.h +++ b/include/asm-ia64/intel_intrin.h @@ -16,8 +16,8 @@ * intrinsic */ -#define ia64_getreg __getReg -#define ia64_setreg __setReg +#define ia64_native_getreg __getReg +#define ia64_native_setreg __setReg #define ia64_hint __hint #define ia64_hint_pause __hint_pause @@ -39,10 +39,10 @@ #define ia64_invala_fr __invala_fr #define ia64_nop __nop #define ia64_sum __sum -#define ia64_ssm __ssm +#define ia64_native_ssm __ssm #define ia64_rum __rum -#define ia64_rsm __rsm -#define ia64_fc __fc +#define ia64_native_rsm __rsm +#define ia64_native_fc __fc #define ia64_ldfs __ldfs #define ia64_ldfd __ldfd @@ -88,16 +88,17 @@ __setIndReg(_IA64_REG_INDR_PMC, index, val) #define ia64_set_pmd(index, val) \ __setIndReg(_IA64_REG_INDR_PMD, index, val) -#define ia64_set_rr(index, val) \ +#define ia64_native_set_rr(index, val) \ __setIndReg(_IA64_REG_INDR_RR, index, val) -#define ia64_get_cpuid(index) __getIndReg(_IA64_REG_INDR_CPUID, index) -#define __ia64_get_dbr(index) __getIndReg(_IA64_REG_INDR_DBR, index) -#define ia64_get_ibr(index) __getIndReg(_IA64_REG_INDR_IBR, index) -#define ia64_get_pkr(index) __getIndReg(_IA64_REG_INDR_PKR, index) -#define ia64_get_pmc(index) __getIndReg(_IA64_REG_INDR_PMC, index) -#define ia64_get_pmd(index) __getIndReg(_IA64_REG_INDR_PMD, index) -#define ia64_get_rr(index) __getIndReg(_IA64_REG_INDR_RR, index) +#define ia64_native_get_cpuid(index) \ + __getIndReg(_IA64_REG_INDR_CPUID, index) +#define __ia64_get_dbr(index) __getIndReg(_IA64_REG_INDR_DBR, index) +#define ia64_get_ibr(index) __getIndReg(_IA64_REG_INDR_IBR, index) +#define ia64_get_pkr(index) __getIndReg(_IA64_REG_INDR_PKR, index) +#define ia64_get_pmc(index) __getIndReg(_IA64_REG_INDR_PMC, index) +#define ia64_native_get_pmd(index) __getIndReg(_IA64_REG_INDR_PMD, index) +#define ia64_native_get_rr(index) __getIndReg(_IA64_REG_INDR_RR, index) #define ia64_srlz_d __dsrlz #define ia64_srlz_i __isrlz @@ -119,16 +120,16 @@ #define ia64_ld8_acq __ld8_acq #define ia64_sync_i __synci -#define ia64_thash __thash -#define ia64_ttag __ttag +#define ia64_native_thash __thash +#define ia64_native_ttag __ttag #define ia64_itcd __itcd #define ia64_itci __itci #define ia64_itrd __itrd #define ia64_itri __itri #define ia64_ptce __ptce #define ia64_ptcl __ptcl -#define ia64_ptcg __ptcg -#define ia64_ptcga __ptcga +#define ia64_native_ptcg __ptcg +#define ia64_native_ptcga __ptcga #define ia64_ptri __ptri #define ia64_ptrd __ptrd #define ia64_dep_mi _m64_dep_mi @@ -145,13 +146,13 @@ #define ia64_lfetch_fault __lfetch_fault #define ia64_lfetch_fault_excl __lfetch_fault_excl -#define ia64_intrin_local_irq_restore(x) \ +#define ia64_native_intrin_local_irq_restore(x) \ do { \ if ((x) != 0) { \ - ia64_ssm(IA64_PSR_I); \ + ia64_native_ssm(IA64_PSR_I); \ ia64_srlz_d(); \ } else { \ - ia64_rsm(IA64_PSR_I); \ + ia64_native_rsm(IA64_PSR_I); \ } \ } while (0) diff --git a/include/asm-ia64/intrinsics.h b/include/asm-ia64/intrinsics.h index a3b96892f83..47d686dba1e 100644 --- a/include/asm-ia64/intrinsics.h +++ b/include/asm-ia64/intrinsics.h @@ -18,15 +18,15 @@ # include #endif -#define ia64_get_psr_i() (ia64_getreg(_IA64_REG_PSR) & IA64_PSR_I) - -#define ia64_set_rr0_to_rr4(val0, val1, val2, val3, val4) \ -do { \ - ia64_set_rr(0x0000000000000000UL, (val0)); \ - ia64_set_rr(0x2000000000000000UL, (val1)); \ - ia64_set_rr(0x4000000000000000UL, (val2)); \ - ia64_set_rr(0x6000000000000000UL, (val3)); \ - ia64_set_rr(0x8000000000000000UL, (val4)); \ +#define ia64_native_get_psr_i() (ia64_native_getreg(_IA64_REG_PSR) & IA64_PSR_I) + +#define ia64_native_set_rr0_to_rr4(val0, val1, val2, val3, val4) \ +do { \ + ia64_native_set_rr(0x0000000000000000UL, (val0)); \ + ia64_native_set_rr(0x2000000000000000UL, (val1)); \ + ia64_native_set_rr(0x4000000000000000UL, (val2)); \ + ia64_native_set_rr(0x6000000000000000UL, (val3)); \ + ia64_native_set_rr(0x8000000000000000UL, (val4)); \ } while (0) /* @@ -194,4 +194,48 @@ extern long ia64_cmpxchg_called_with_bad_pointer (void); #endif /* !CONFIG_IA64_DEBUG_CMPXCHG */ #endif + +#ifdef __KERNEL__ +#include +#endif + +#ifndef __ASSEMBLY__ +#if defined(CONFIG_PARAVIRT) && defined(__KERNEL__) +#define IA64_INTRINSIC_API(name) pv_cpu_ops.name +#define IA64_INTRINSIC_MACRO(name) paravirt_ ## name +#else +#define IA64_INTRINSIC_API(name) ia64_native_ ## name +#define IA64_INTRINSIC_MACRO(name) ia64_native_ ## name +#endif + +/************************************************/ +/* Instructions paravirtualized for correctness */ +/************************************************/ +/* fc, thash, get_cpuid, get_pmd, get_eflags, set_eflags */ +/* Note that "ttag" and "cover" are also privilege-sensitive; "ttag" + * is not currently used (though it may be in a long-format VHPT system!) + */ +#define ia64_fc IA64_INTRINSIC_API(fc) +#define ia64_thash IA64_INTRINSIC_API(thash) +#define ia64_get_cpuid IA64_INTRINSIC_API(get_cpuid) +#define ia64_get_pmd IA64_INTRINSIC_API(get_pmd) + + +/************************************************/ +/* Instructions paravirtualized for performance */ +/************************************************/ +#define ia64_ssm IA64_INTRINSIC_MACRO(ssm) +#define ia64_rsm IA64_INTRINSIC_MACRO(rsm) +#define ia64_getreg IA64_INTRINSIC_API(getreg) +#define ia64_setreg IA64_INTRINSIC_API(setreg) +#define ia64_set_rr IA64_INTRINSIC_API(set_rr) +#define ia64_get_rr IA64_INTRINSIC_API(get_rr) +#define ia64_ptcga IA64_INTRINSIC_API(ptcga) +#define ia64_get_psr_i IA64_INTRINSIC_API(get_psr_i) +#define ia64_intrin_local_irq_restore \ + IA64_INTRINSIC_API(intrin_local_irq_restore) +#define ia64_set_rr0_to_rr4 IA64_INTRINSIC_API(set_rr0_to_rr4) + +#endif /* !__ASSEMBLY__ */ + #endif /* _ASM_IA64_INTRINSICS_H */ diff --git a/include/asm-ia64/paravirt_privop.h b/include/asm-ia64/paravirt_privop.h new file mode 100644 index 00000000000..7b133ae86df --- /dev/null +++ b/include/asm-ia64/paravirt_privop.h @@ -0,0 +1,91 @@ +/****************************************************************************** + * include/asm-ia64/paravirt_privops.h + * + * Copyright (c) 2008 Isaku Yamahata + * VA Linux Systems Japan K.K. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +#ifndef _ASM_IA64_PARAVIRT_PRIVOP_H +#define _ASM_IA64_PARAVIRT_PRIVOP_H + +#ifdef CONFIG_PARAVIRT + +#ifndef __ASSEMBLY__ + +#include +#include /* for IA64_PSR_I */ + +/****************************************************************************** + * replacement of intrinsics operations. + */ + +struct pv_cpu_ops { + void (*fc)(unsigned long addr); + unsigned long (*thash)(unsigned long addr); + unsigned long (*get_cpuid)(int index); + unsigned long (*get_pmd)(int index); + unsigned long (*getreg)(int reg); + void (*setreg)(int reg, unsigned long val); + void (*ptcga)(unsigned long addr, unsigned long size); + unsigned long (*get_rr)(unsigned long index); + void (*set_rr)(unsigned long index, unsigned long val); + void (*set_rr0_to_rr4)(unsigned long val0, unsigned long val1, + unsigned long val2, unsigned long val3, + unsigned long val4); + void (*ssm_i)(void); + void (*rsm_i)(void); + unsigned long (*get_psr_i)(void); + void (*intrin_local_irq_restore)(unsigned long flags); +}; + +extern struct pv_cpu_ops pv_cpu_ops; + +extern void ia64_native_setreg_func(int regnum, unsigned long val); +extern unsigned long ia64_native_getreg_func(int regnum); + +/************************************************/ +/* Instructions paravirtualized for performance */ +/************************************************/ + +/* mask for ia64_native_ssm/rsm() must be constant.("i" constraing). + * static inline function doesn't satisfy it. */ +#define paravirt_ssm(mask) \ + do { \ + if ((mask) == IA64_PSR_I) \ + pv_cpu_ops.ssm_i(); \ + else \ + ia64_native_ssm(mask); \ + } while (0) + +#define paravirt_rsm(mask) \ + do { \ + if ((mask) == IA64_PSR_I) \ + pv_cpu_ops.rsm_i(); \ + else \ + ia64_native_rsm(mask); \ + } while (0) + +#endif /* __ASSEMBLY__ */ + +#else + +/* fallback for native case */ + +#endif /* CONFIG_PARAVIRT */ + +#endif /* _ASM_IA64_PARAVIRT_PRIVOP_H */ -- cgit v1.2.3 From e92e8c68a61ae7d845c1be0a58a081e7756b0735 Mon Sep 17 00:00:00 2001 From: Isaku Yamahata Date: Mon, 19 May 2008 22:13:36 +0900 Subject: [IA64] pvops: define paravirtualized instructions for native. pv_cpu_asm_ops: define paravirtualized introduce for native execution environment. Cc: Keith Owens Signed-off-by: Yaozu (Eddie) Dong Signed-off-by: Isaku Yamahata Signed-off-by: Tony Luck --- include/asm-ia64/native/inst.h | 165 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 165 insertions(+) create mode 100644 include/asm-ia64/native/inst.h (limited to 'include') diff --git a/include/asm-ia64/native/inst.h b/include/asm-ia64/native/inst.h new file mode 100644 index 00000000000..2a50b70b969 --- /dev/null +++ b/include/asm-ia64/native/inst.h @@ -0,0 +1,165 @@ +/****************************************************************************** + * include/asm-ia64/native/inst.h + * + * Copyright (c) 2008 Isaku Yamahata + * VA Linux Systems Japan K.K. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +#ifdef CONFIG_PARAVIRT_GUEST_ASM_CLOBBER_CHECK +# define PARAVIRT_POISON 0xdeadbeefbaadf00d +# define CLOBBER(clob) \ + ;; \ + movl clob = PARAVIRT_POISON; \ + ;; +#else +# define CLOBBER(clob) /* nothing */ +#endif + +#define MOV_FROM_IFA(reg) \ + mov reg = cr.ifa + +#define MOV_FROM_ITIR(reg) \ + mov reg = cr.itir + +#define MOV_FROM_ISR(reg) \ + mov reg = cr.isr + +#define MOV_FROM_IHA(reg) \ + mov reg = cr.iha + +#define MOV_FROM_IPSR(pred, reg) \ +(pred) mov reg = cr.ipsr + +#define MOV_FROM_IIM(reg) \ + mov reg = cr.iim + +#define MOV_FROM_IIP(reg) \ + mov reg = cr.iip + +#define MOV_FROM_IVR(reg, clob) \ + mov reg = cr.ivr \ + CLOBBER(clob) + +#define MOV_FROM_PSR(pred, reg, clob) \ +(pred) mov reg = psr \ + CLOBBER(clob) + +#define MOV_TO_IFA(reg, clob) \ + mov cr.ifa = reg \ + CLOBBER(clob) + +#define MOV_TO_ITIR(pred, reg, clob) \ +(pred) mov cr.itir = reg \ + CLOBBER(clob) + +#define MOV_TO_IHA(pred, reg, clob) \ +(pred) mov cr.iha = reg \ + CLOBBER(clob) + +#define MOV_TO_IPSR(pred, reg, clob) \ +(pred) mov cr.ipsr = reg \ + CLOBBER(clob) + +#define MOV_TO_IFS(pred, reg, clob) \ +(pred) mov cr.ifs = reg \ + CLOBBER(clob) + +#define MOV_TO_IIP(reg, clob) \ + mov cr.iip = reg \ + CLOBBER(clob) + +#define MOV_TO_KR(kr, reg, clob0, clob1) \ + mov IA64_KR(kr) = reg \ + CLOBBER(clob0) \ + CLOBBER(clob1) + +#define ITC_I(pred, reg, clob) \ +(pred) itc.i reg \ + CLOBBER(clob) + +#define ITC_D(pred, reg, clob) \ +(pred) itc.d reg \ + CLOBBER(clob) + +#define ITC_I_AND_D(pred_i, pred_d, reg, clob) \ +(pred_i) itc.i reg; \ +(pred_d) itc.d reg \ + CLOBBER(clob) + +#define THASH(pred, reg0, reg1, clob) \ +(pred) thash reg0 = reg1 \ + CLOBBER(clob) + +#define SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(clob0, clob1) \ + ssm psr.ic | PSR_DEFAULT_BITS \ + CLOBBER(clob0) \ + CLOBBER(clob1) \ + ;; \ + srlz.i /* guarantee that interruption collectin is on */ \ + ;; + +#define SSM_PSR_IC_AND_SRLZ_D(clob0, clob1) \ + ssm psr.ic \ + CLOBBER(clob0) \ + CLOBBER(clob1) \ + ;; \ + srlz.d + +#define RSM_PSR_IC(clob) \ + rsm psr.ic \ + CLOBBER(clob) + +#define SSM_PSR_I(pred, pred_clob, clob) \ +(pred) ssm psr.i \ + CLOBBER(clob) + +#define RSM_PSR_I(pred, clob0, clob1) \ +(pred) rsm psr.i \ + CLOBBER(clob0) \ + CLOBBER(clob1) + +#define RSM_PSR_I_IC(clob0, clob1, clob2) \ + rsm psr.i | psr.ic \ + CLOBBER(clob0) \ + CLOBBER(clob1) \ + CLOBBER(clob2) + +#define RSM_PSR_DT \ + rsm psr.dt + +#define SSM_PSR_DT_AND_SRLZ_I \ + ssm psr.dt \ + ;; \ + srlz.i + +#define BSW_0(clob0, clob1, clob2) \ + bsw.0 \ + CLOBBER(clob0) \ + CLOBBER(clob1) \ + CLOBBER(clob2) + +#define BSW_1(clob0, clob1) \ + bsw.1 \ + CLOBBER(clob0) \ + CLOBBER(clob1) + +#define COVER \ + cover + +#define RFI \ + rfi -- cgit v1.2.3 From 02e32e36f42f8ea7ee6060d02f2d69ad5bad6d50 Mon Sep 17 00:00:00 2001 From: Isaku Yamahata Date: Mon, 19 May 2008 22:13:37 +0900 Subject: [IA64] pvops: paravirtualize minstate.h. paravirtualize minstate.h which are hand written assembly code. They include sensitive or performance critical privileged instructions. So that they are appropriate for paravirtualization. Cc: Keith Owens Cc: Akio Takebe Signed-off-by: Yaozu (Eddie) Dong Signed-off-by: Isaku Yamahata Signed-off-by: Tony Luck --- include/asm-ia64/native/inst.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include') diff --git a/include/asm-ia64/native/inst.h b/include/asm-ia64/native/inst.h index 2a50b70b969..f1072ace0cf 100644 --- a/include/asm-ia64/native/inst.h +++ b/include/asm-ia64/native/inst.h @@ -20,6 +20,8 @@ * */ +#define DO_SAVE_MIN IA64_NATIVE_DO_SAVE_MIN + #ifdef CONFIG_PARAVIRT_GUEST_ASM_CLOBBER_CHECK # define PARAVIRT_POISON 0xdeadbeefbaadf00d # define CLOBBER(clob) \ -- cgit v1.2.3 From 4df8d22bbbb16ccfa4e10cc068135183c9e5e006 Mon Sep 17 00:00:00 2001 From: Isaku Yamahata Date: Tue, 27 May 2008 15:08:01 -0700 Subject: [IA64] pvops: paravirtualize entry.S paravirtualize ia64_swtich_to, ia64_leave_syscall and ia64_leave_kernel. They include sensitive or performance critical privileged instructions so that they need paravirtualization. To paravirtualize them by single source and multi compile they are converted into indirect jump. And define each pv instances. Cc: Keith Owens Cc: "Dong, Eddie" Signed-off-by: Isaku Yamahata Signed-off-by: Tony Luck --- include/asm-ia64/native/inst.h | 8 ++++++++ include/asm-ia64/paravirt_privop.h | 23 +++++++++++++++++++++++ 2 files changed, 31 insertions(+) (limited to 'include') diff --git a/include/asm-ia64/native/inst.h b/include/asm-ia64/native/inst.h index f1072ace0cf..c953a2ca4fc 100644 --- a/include/asm-ia64/native/inst.h +++ b/include/asm-ia64/native/inst.h @@ -22,6 +22,14 @@ #define DO_SAVE_MIN IA64_NATIVE_DO_SAVE_MIN +#define __paravirt_switch_to ia64_native_switch_to +#define __paravirt_leave_syscall ia64_native_leave_syscall +#define __paravirt_work_processed_syscall ia64_native_work_processed_syscall +#define __paravirt_leave_kernel ia64_native_leave_kernel +#define __paravirt_pending_syscall_end ia64_work_pending_syscall_end +#define __paravirt_work_processed_syscall_target \ + ia64_work_processed_syscall + #ifdef CONFIG_PARAVIRT_GUEST_ASM_CLOBBER_CHECK # define PARAVIRT_POISON 0xdeadbeefbaadf00d # define CLOBBER(clob) \ diff --git a/include/asm-ia64/paravirt_privop.h b/include/asm-ia64/paravirt_privop.h index 7b133ae86df..52482e6940a 100644 --- a/include/asm-ia64/paravirt_privop.h +++ b/include/asm-ia64/paravirt_privop.h @@ -80,12 +80,35 @@ extern unsigned long ia64_native_getreg_func(int regnum); ia64_native_rsm(mask); \ } while (0) +/****************************************************************************** + * replacement of hand written assembly codes. + */ +struct pv_cpu_asm_switch { + unsigned long switch_to; + unsigned long leave_syscall; + unsigned long work_processed_syscall; + unsigned long leave_kernel; +}; +void paravirt_cpu_asm_init(const struct pv_cpu_asm_switch *cpu_asm_switch); + #endif /* __ASSEMBLY__ */ +#define IA64_PARAVIRT_ASM_FUNC(name) paravirt_ ## name + #else /* fallback for native case */ +#define IA64_PARAVIRT_ASM_FUNC(name) ia64_native_ ## name #endif /* CONFIG_PARAVIRT */ +/* these routines utilize privilege-sensitive or performance-sensitive + * privileged instructions so the code must be replaced with + * paravirtualized versions */ +#define ia64_switch_to IA64_PARAVIRT_ASM_FUNC(switch_to) +#define ia64_leave_syscall IA64_PARAVIRT_ASM_FUNC(leave_syscall) +#define ia64_work_processed_syscall \ + IA64_PARAVIRT_ASM_FUNC(work_processed_syscall) +#define ia64_leave_kernel IA64_PARAVIRT_ASM_FUNC(leave_kernel) + #endif /* _ASM_IA64_PARAVIRT_PRIVOP_H */ -- cgit v1.2.3 From 213060a4d6991a95d0b9344406d195be3464accf Mon Sep 17 00:00:00 2001 From: Isaku Yamahata Date: Mon, 19 May 2008 22:13:40 +0900 Subject: [IA64] pvops: paravirtualize NR_IRQS Make NR_IRQ overridable by each pv instances. Pv instance may need each own number of irqs so that NR_IRQS should be the maximum number of nr_irqs each pv instances need. Cc: Jes Sorensen Signed-off-by: Isaku Yamahata Signed-off-by: Tony Luck --- include/asm-ia64/irq.h | 9 +-------- include/asm-ia64/native/irq.h | 35 +++++++++++++++++++++++++++++++++++ 2 files changed, 36 insertions(+), 8 deletions(-) create mode 100644 include/asm-ia64/native/irq.h (limited to 'include') diff --git a/include/asm-ia64/irq.h b/include/asm-ia64/irq.h index a66d26827cb..3627116fb0e 100644 --- a/include/asm-ia64/irq.h +++ b/include/asm-ia64/irq.h @@ -13,14 +13,7 @@ #include #include - -#define NR_VECTORS 256 - -#if (NR_VECTORS + 32 * NR_CPUS) < 1024 -#define NR_IRQS (NR_VECTORS + 32 * NR_CPUS) -#else -#define NR_IRQS 1024 -#endif +#include static __inline__ int irq_canonicalize (int irq) diff --git a/include/asm-ia64/native/irq.h b/include/asm-ia64/native/irq.h new file mode 100644 index 00000000000..efe9ff74a3c --- /dev/null +++ b/include/asm-ia64/native/irq.h @@ -0,0 +1,35 @@ +/****************************************************************************** + * include/asm-ia64/native/irq.h + * + * Copyright (c) 2008 Isaku Yamahata + * VA Linux Systems Japan K.K. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * moved from linux/include/asm-ia64/irq.h. + */ + +#ifndef _ASM_IA64_NATIVE_IRQ_H +#define _ASM_IA64_NATIVE_IRQ_H + +#define NR_VECTORS 256 + +#if (NR_VECTORS + 32 * NR_CPUS) < 1024 +#define IA64_NATIVE_NR_IRQS (NR_VECTORS + 32 * NR_CPUS) +#else +#define IA64_NATIVE_NR_IRQS 1024 +#endif + +#endif /* _ASM_IA64_NATIVE_IRQ_H */ -- cgit v1.2.3 From e51835d58a5abdf82211f36f500f666ca7ef9aee Mon Sep 17 00:00:00 2001 From: Isaku Yamahata Date: Mon, 19 May 2008 22:13:41 +0900 Subject: [IA64] pvops: define initialization hooks, pv_init_ops, for paravirtualized environment. define pv_init_ops hooks which represents various initialization hooks for paravirtualized environment. and add hooks. Signed-off-by: Alex Williamson Signed-off-by: Isaku Yamahata Signed-off-by: Tony Luck --- include/asm-ia64/paravirt.h | 70 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 70 insertions(+) (limited to 'include') diff --git a/include/asm-ia64/paravirt.h b/include/asm-ia64/paravirt.h index 1032b216aea..84d74c32eb9 100644 --- a/include/asm-ia64/paravirt.h +++ b/include/asm-ia64/paravirt.h @@ -55,11 +55,81 @@ static inline unsigned int get_kernel_rpl(void) return pv_info.kernel_rpl; } +/****************************************************************************** + * initialization hooks. + */ +struct rsvd_region; + +struct pv_init_ops { + void (*banner)(void); + + int (*reserve_memory)(struct rsvd_region *region); + + void (*arch_setup_early)(void); + void (*arch_setup_console)(char **cmdline_p); + int (*arch_setup_nomca)(void); + + void (*post_smp_prepare_boot_cpu)(void); +}; + +extern struct pv_init_ops pv_init_ops; + +static inline void paravirt_banner(void) +{ + if (pv_init_ops.banner) + pv_init_ops.banner(); +} + +static inline int paravirt_reserve_memory(struct rsvd_region *region) +{ + if (pv_init_ops.reserve_memory) + return pv_init_ops.reserve_memory(region); + return 0; +} + +static inline void paravirt_arch_setup_early(void) +{ + if (pv_init_ops.arch_setup_early) + pv_init_ops.arch_setup_early(); +} + +static inline void paravirt_arch_setup_console(char **cmdline_p) +{ + if (pv_init_ops.arch_setup_console) + pv_init_ops.arch_setup_console(cmdline_p); +} + +static inline int paravirt_arch_setup_nomca(void) +{ + if (pv_init_ops.arch_setup_nomca) + return pv_init_ops.arch_setup_nomca(); + return 0; +} + +static inline void paravirt_post_smp_prepare_boot_cpu(void) +{ + if (pv_init_ops.post_smp_prepare_boot_cpu) + pv_init_ops.post_smp_prepare_boot_cpu(); +} + #endif /* !__ASSEMBLY__ */ #else /* fallback for native case */ +#ifndef __ASSEMBLY__ + +#define paravirt_banner() do { } while (0) +#define paravirt_reserve_memory(region) 0 + +#define paravirt_arch_setup_early() do { } while (0) +#define paravirt_arch_setup_console(cmdline_p) do { } while (0) +#define paravirt_arch_setup_nomca() 0 +#define paravirt_post_smp_prepare_boot_cpu() do { } while (0) + +#endif /* __ASSEMBLY__ */ + + #endif /* CONFIG_PARAVIRT_GUEST */ #endif /* __ASM_PARAVIRT_H */ -- cgit v1.2.3 From 33b39e84209b0308b572dce017df7ee9b63f086c Mon Sep 17 00:00:00 2001 From: Isaku Yamahata Date: Mon, 19 May 2008 22:13:42 +0900 Subject: [IA64] pvops: add hooks, pv_iosapic_ops, to paravirtualize iosapic. add hooks to paravirtualize iosapic which is a real hardware resource. On virtualized environment it may be replaced something virtualized friendly. Define pv_iosapic_ops and add the hooks. Signed-off-by: Isaku Yamahata Signed-off-by: Tony Luck --- include/asm-ia64/iosapic.h | 18 ++++++++++++++++-- include/asm-ia64/paravirt.h | 40 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 56 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/asm-ia64/iosapic.h b/include/asm-ia64/iosapic.h index a3a4288daae..b9c102e15f2 100644 --- a/include/asm-ia64/iosapic.h +++ b/include/asm-ia64/iosapic.h @@ -55,13 +55,27 @@ #define NR_IOSAPICS 256 -static inline unsigned int __iosapic_read(char __iomem *iosapic, unsigned int reg) +#ifdef CONFIG_PARAVIRT_GUEST +#include +#else +#define iosapic_pcat_compat_init ia64_native_iosapic_pcat_compat_init +#define __iosapic_read __ia64_native_iosapic_read +#define __iosapic_write __ia64_native_iosapic_write +#define iosapic_get_irq_chip ia64_native_iosapic_get_irq_chip +#endif + +extern void __init ia64_native_iosapic_pcat_compat_init(void); +extern struct irq_chip *ia64_native_iosapic_get_irq_chip(unsigned long trigger); + +static inline unsigned int +__ia64_native_iosapic_read(char __iomem *iosapic, unsigned int reg) { writel(reg, iosapic + IOSAPIC_REG_SELECT); return readl(iosapic + IOSAPIC_WINDOW); } -static inline void __iosapic_write(char __iomem *iosapic, unsigned int reg, u32 val) +static inline void +__ia64_native_iosapic_write(char __iomem *iosapic, unsigned int reg, u32 val) { writel(reg, iosapic + IOSAPIC_REG_SELECT); writel(val, iosapic + IOSAPIC_WINDOW); diff --git a/include/asm-ia64/paravirt.h b/include/asm-ia64/paravirt.h index 84d74c32eb9..3a40f624e86 100644 --- a/include/asm-ia64/paravirt.h +++ b/include/asm-ia64/paravirt.h @@ -112,6 +112,46 @@ static inline void paravirt_post_smp_prepare_boot_cpu(void) pv_init_ops.post_smp_prepare_boot_cpu(); } +/****************************************************************************** + * replacement of iosapic operations. + */ + +struct pv_iosapic_ops { + void (*pcat_compat_init)(void); + + struct irq_chip *(*get_irq_chip)(unsigned long trigger); + + unsigned int (*__read)(char __iomem *iosapic, unsigned int reg); + void (*__write)(char __iomem *iosapic, unsigned int reg, u32 val); +}; + +extern struct pv_iosapic_ops pv_iosapic_ops; + +static inline void +iosapic_pcat_compat_init(void) +{ + if (pv_iosapic_ops.pcat_compat_init) + pv_iosapic_ops.pcat_compat_init(); +} + +static inline struct irq_chip* +iosapic_get_irq_chip(unsigned long trigger) +{ + return pv_iosapic_ops.get_irq_chip(trigger); +} + +static inline unsigned int +__iosapic_read(char __iomem *iosapic, unsigned int reg) +{ + return pv_iosapic_ops.__read(iosapic, reg); +} + +static inline void +__iosapic_write(char __iomem *iosapic, unsigned int reg, u32 val) +{ + return pv_iosapic_ops.__write(iosapic, reg, val); +} + #endif /* !__ASSEMBLY__ */ #else -- cgit v1.2.3 From 85cbc503787d577c215f9540c57294e1ec799144 Mon Sep 17 00:00:00 2001 From: Isaku Yamahata Date: Mon, 19 May 2008 22:13:43 +0900 Subject: [IA64] pvops: add hooks, pv_irq_ops, to paravirtualized irq related operations. introduce pv_irq_ops which adds hooks to paravirtualize irq related operations. On virtualized environment, interruption may be replaced by something virtualization friendly. So the irq related operation also may need paravirtualization. This patch adds necessary hooks to paravirtualize irq related operations. Signed-off-by: Yaozu (Eddie) Dong Signed-off-by: Isaku Yamahata Signed-off-by: Tony Luck --- include/asm-ia64/hw_irq.h | 23 ++++++++++++++++++---- include/asm-ia64/paravirt.h | 48 +++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 67 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/asm-ia64/hw_irq.h b/include/asm-ia64/hw_irq.h index 76366dc9c1a..5c99cbcb8a0 100644 --- a/include/asm-ia64/hw_irq.h +++ b/include/asm-ia64/hw_irq.h @@ -15,7 +15,11 @@ #include #include +#ifndef CONFIG_PARAVIRT typedef u8 ia64_vector; +#else +typedef u16 ia64_vector; +#endif /* * 0 special @@ -104,13 +108,24 @@ DECLARE_PER_CPU(int[IA64_NUM_VECTORS], vector_irq); extern struct hw_interrupt_type irq_type_ia64_lsapic; /* CPU-internal interrupt controller */ +#ifdef CONFIG_PARAVIRT_GUEST +#include +#else +#define ia64_register_ipi ia64_native_register_ipi +#define assign_irq_vector ia64_native_assign_irq_vector +#define free_irq_vector ia64_native_free_irq_vector +#define register_percpu_irq ia64_native_register_percpu_irq +#define ia64_resend_irq ia64_native_resend_irq +#endif + +extern void ia64_native_register_ipi(void); extern int bind_irq_vector(int irq, int vector, cpumask_t domain); -extern int assign_irq_vector (int irq); /* allocate a free vector */ -extern void free_irq_vector (int vector); +extern int ia64_native_assign_irq_vector (int irq); /* allocate a free vector */ +extern void ia64_native_free_irq_vector (int vector); extern int reserve_irq_vector (int vector); extern void __setup_vector_irq(int cpu); extern void ia64_send_ipi (int cpu, int vector, int delivery_mode, int redirect); -extern void register_percpu_irq (ia64_vector vec, struct irqaction *action); +extern void ia64_native_register_percpu_irq (ia64_vector vec, struct irqaction *action); extern int check_irq_used (int irq); extern void destroy_and_reserve_irq (unsigned int irq); @@ -122,7 +137,7 @@ static inline int irq_prepare_move(int irq, int cpu) { return 0; } static inline void irq_complete_move(unsigned int irq) {} #endif -static inline void ia64_resend_irq(unsigned int vector) +static inline void ia64_native_resend_irq(unsigned int vector) { platform_send_ipi(smp_processor_id(), vector, IA64_IPI_DM_INT, 0); } diff --git a/include/asm-ia64/paravirt.h b/include/asm-ia64/paravirt.h index 3a40f624e86..ee15646b6d6 100644 --- a/include/asm-ia64/paravirt.h +++ b/include/asm-ia64/paravirt.h @@ -152,6 +152,54 @@ __iosapic_write(char __iomem *iosapic, unsigned int reg, u32 val) return pv_iosapic_ops.__write(iosapic, reg, val); } +/****************************************************************************** + * replacement of irq operations. + */ + +struct pv_irq_ops { + void (*register_ipi)(void); + + int (*assign_irq_vector)(int irq); + void (*free_irq_vector)(int vector); + + void (*register_percpu_irq)(ia64_vector vec, + struct irqaction *action); + + void (*resend_irq)(unsigned int vector); +}; + +extern struct pv_irq_ops pv_irq_ops; + +static inline void +ia64_register_ipi(void) +{ + pv_irq_ops.register_ipi(); +} + +static inline int +assign_irq_vector(int irq) +{ + return pv_irq_ops.assign_irq_vector(irq); +} + +static inline void +free_irq_vector(int vector) +{ + return pv_irq_ops.free_irq_vector(vector); +} + +static inline void +register_percpu_irq(ia64_vector vec, struct irqaction *action) +{ + pv_irq_ops.register_percpu_irq(vec, action); +} + +static inline void +ia64_resend_irq(unsigned int vector) +{ + pv_irq_ops.resend_irq(vector); +} + #endif /* !__ASSEMBLY__ */ #else -- cgit v1.2.3 From 00d21d82b8a9e290286e09d8eedc20bfc33b0eee Mon Sep 17 00:00:00 2001 From: Isaku Yamahata Date: Mon, 19 May 2008 22:13:44 +0900 Subject: [IA64] pvops: add to hooks, pv_time_ops, for steal time accounting. Introduce pv_time_ops which adds hook to steal time accounting. On virtualized environment, cpus are shared by many guests and steal time is the time which is used for other guests. On virtualized environtment, streal time should be accounted. Signed-off-by: Isaku Yamahata Signed-off-by: Tony Luck --- include/asm-ia64/paravirt.h | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) (limited to 'include') diff --git a/include/asm-ia64/paravirt.h b/include/asm-ia64/paravirt.h index ee15646b6d6..1b4df129f57 100644 --- a/include/asm-ia64/paravirt.h +++ b/include/asm-ia64/paravirt.h @@ -200,6 +200,35 @@ ia64_resend_irq(unsigned int vector) pv_irq_ops.resend_irq(vector); } +/****************************************************************************** + * replacement of time operations. + */ + +extern struct itc_jitter_data_t itc_jitter_data; +extern volatile int time_keeper_id; + +struct pv_time_ops { + void (*init_missing_ticks_accounting)(int cpu); + int (*do_steal_accounting)(unsigned long *new_itm); + + void (*clocksource_resume)(void); +}; + +extern struct pv_time_ops pv_time_ops; + +static inline void +paravirt_init_missing_ticks_accounting(int cpu) +{ + if (pv_time_ops.init_missing_ticks_accounting) + pv_time_ops.init_missing_ticks_accounting(cpu); +} + +static inline int +paravirt_do_steal_accounting(unsigned long *new_itm) +{ + return pv_time_ops.do_steal_accounting(new_itm); +} + #endif /* !__ASSEMBLY__ */ #else @@ -215,6 +244,9 @@ ia64_resend_irq(unsigned int vector) #define paravirt_arch_setup_nomca() 0 #define paravirt_post_smp_prepare_boot_cpu() do { } while (0) +#define paravirt_init_missing_ticks_accounting(cpu) do { } while (0) +#define paravirt_do_steal_accounting(new_itm) 0 + #endif /* __ASSEMBLY__ */ -- cgit v1.2.3 From 8cac39b99b731c627097ce704acac1ff422d8286 Mon Sep 17 00:00:00 2001 From: Dimitri Sivanich Date: Wed, 2 Jul 2008 15:47:55 -0500 Subject: [IA64] Update ia64 mmr list for SGI uv This patch updates the ia64 mmr list for SGI uv. Signed-off-by: Dimitri Sivanich Signed-off-by: Tony Luck --- include/asm-ia64/uv/uv_mmrs.h | 423 +++++++++++++++++++++++++++++++++++++++++- 1 file changed, 415 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/include/asm-ia64/uv/uv_mmrs.h b/include/asm-ia64/uv/uv_mmrs.h index 1cc1dbb0182..c149ef08543 100644 --- a/include/asm-ia64/uv/uv_mmrs.h +++ b/include/asm-ia64/uv/uv_mmrs.h @@ -11,11 +11,284 @@ #ifndef __ASM_IA64_UV_MMRS__ #define __ASM_IA64_UV_MMRS__ -/* - * AUTO GENERATED - Do not edit - */ +#define UV_MMR_ENABLE (1UL << 63) + +/* ========================================================================= */ +/* UVH_BAU_DATA_CONFIG */ +/* ========================================================================= */ +#define UVH_BAU_DATA_CONFIG 0x61680UL +#define UVH_BAU_DATA_CONFIG_32 0x0438 + +#define UVH_BAU_DATA_CONFIG_VECTOR_SHFT 0 +#define UVH_BAU_DATA_CONFIG_VECTOR_MASK 0x00000000000000ffUL +#define UVH_BAU_DATA_CONFIG_DM_SHFT 8 +#define UVH_BAU_DATA_CONFIG_DM_MASK 0x0000000000000700UL +#define UVH_BAU_DATA_CONFIG_DESTMODE_SHFT 11 +#define UVH_BAU_DATA_CONFIG_DESTMODE_MASK 0x0000000000000800UL +#define UVH_BAU_DATA_CONFIG_STATUS_SHFT 12 +#define UVH_BAU_DATA_CONFIG_STATUS_MASK 0x0000000000001000UL +#define UVH_BAU_DATA_CONFIG_P_SHFT 13 +#define UVH_BAU_DATA_CONFIG_P_MASK 0x0000000000002000UL +#define UVH_BAU_DATA_CONFIG_T_SHFT 15 +#define UVH_BAU_DATA_CONFIG_T_MASK 0x0000000000008000UL +#define UVH_BAU_DATA_CONFIG_M_SHFT 16 +#define UVH_BAU_DATA_CONFIG_M_MASK 0x0000000000010000UL +#define UVH_BAU_DATA_CONFIG_APIC_ID_SHFT 32 +#define UVH_BAU_DATA_CONFIG_APIC_ID_MASK 0xffffffff00000000UL + +union uvh_bau_data_config_u { + unsigned long v; + struct uvh_bau_data_config_s { + unsigned long vector_ : 8; /* RW */ + unsigned long dm : 3; /* RW */ + unsigned long destmode : 1; /* RW */ + unsigned long status : 1; /* RO */ + unsigned long p : 1; /* RO */ + unsigned long rsvd_14 : 1; /* */ + unsigned long t : 1; /* RO */ + unsigned long m : 1; /* RW */ + unsigned long rsvd_17_31: 15; /* */ + unsigned long apic_id : 32; /* RW */ + } s; +}; + +/* ========================================================================= */ +/* UVH_EVENT_OCCURRED0 */ +/* ========================================================================= */ +#define UVH_EVENT_OCCURRED0 0x70000UL +#define UVH_EVENT_OCCURRED0_32 0x005e8 + +#define UVH_EVENT_OCCURRED0_LB_HCERR_SHFT 0 +#define UVH_EVENT_OCCURRED0_LB_HCERR_MASK 0x0000000000000001UL +#define UVH_EVENT_OCCURRED0_GR0_HCERR_SHFT 1 +#define UVH_EVENT_OCCURRED0_GR0_HCERR_MASK 0x0000000000000002UL +#define UVH_EVENT_OCCURRED0_GR1_HCERR_SHFT 2 +#define UVH_EVENT_OCCURRED0_GR1_HCERR_MASK 0x0000000000000004UL +#define UVH_EVENT_OCCURRED0_LH_HCERR_SHFT 3 +#define UVH_EVENT_OCCURRED0_LH_HCERR_MASK 0x0000000000000008UL +#define UVH_EVENT_OCCURRED0_RH_HCERR_SHFT 4 +#define UVH_EVENT_OCCURRED0_RH_HCERR_MASK 0x0000000000000010UL +#define UVH_EVENT_OCCURRED0_XN_HCERR_SHFT 5 +#define UVH_EVENT_OCCURRED0_XN_HCERR_MASK 0x0000000000000020UL +#define UVH_EVENT_OCCURRED0_SI_HCERR_SHFT 6 +#define UVH_EVENT_OCCURRED0_SI_HCERR_MASK 0x0000000000000040UL +#define UVH_EVENT_OCCURRED0_LB_AOERR0_SHFT 7 +#define UVH_EVENT_OCCURRED0_LB_AOERR0_MASK 0x0000000000000080UL +#define UVH_EVENT_OCCURRED0_GR0_AOERR0_SHFT 8 +#define UVH_EVENT_OCCURRED0_GR0_AOERR0_MASK 0x0000000000000100UL +#define UVH_EVENT_OCCURRED0_GR1_AOERR0_SHFT 9 +#define UVH_EVENT_OCCURRED0_GR1_AOERR0_MASK 0x0000000000000200UL +#define UVH_EVENT_OCCURRED0_LH_AOERR0_SHFT 10 +#define UVH_EVENT_OCCURRED0_LH_AOERR0_MASK 0x0000000000000400UL +#define UVH_EVENT_OCCURRED0_RH_AOERR0_SHFT 11 +#define UVH_EVENT_OCCURRED0_RH_AOERR0_MASK 0x0000000000000800UL +#define UVH_EVENT_OCCURRED0_XN_AOERR0_SHFT 12 +#define UVH_EVENT_OCCURRED0_XN_AOERR0_MASK 0x0000000000001000UL +#define UVH_EVENT_OCCURRED0_SI_AOERR0_SHFT 13 +#define UVH_EVENT_OCCURRED0_SI_AOERR0_MASK 0x0000000000002000UL +#define UVH_EVENT_OCCURRED0_LB_AOERR1_SHFT 14 +#define UVH_EVENT_OCCURRED0_LB_AOERR1_MASK 0x0000000000004000UL +#define UVH_EVENT_OCCURRED0_GR0_AOERR1_SHFT 15 +#define UVH_EVENT_OCCURRED0_GR0_AOERR1_MASK 0x0000000000008000UL +#define UVH_EVENT_OCCURRED0_GR1_AOERR1_SHFT 16 +#define UVH_EVENT_OCCURRED0_GR1_AOERR1_MASK 0x0000000000010000UL +#define UVH_EVENT_OCCURRED0_LH_AOERR1_SHFT 17 +#define UVH_EVENT_OCCURRED0_LH_AOERR1_MASK 0x0000000000020000UL +#define UVH_EVENT_OCCURRED0_RH_AOERR1_SHFT 18 +#define UVH_EVENT_OCCURRED0_RH_AOERR1_MASK 0x0000000000040000UL +#define UVH_EVENT_OCCURRED0_XN_AOERR1_SHFT 19 +#define UVH_EVENT_OCCURRED0_XN_AOERR1_MASK 0x0000000000080000UL +#define UVH_EVENT_OCCURRED0_SI_AOERR1_SHFT 20 +#define UVH_EVENT_OCCURRED0_SI_AOERR1_MASK 0x0000000000100000UL +#define UVH_EVENT_OCCURRED0_RH_VPI_INT_SHFT 21 +#define UVH_EVENT_OCCURRED0_RH_VPI_INT_MASK 0x0000000000200000UL +#define UVH_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_SHFT 22 +#define UVH_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_MASK 0x0000000000400000UL +#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_0_SHFT 23 +#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_0_MASK 0x0000000000800000UL +#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_1_SHFT 24 +#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_1_MASK 0x0000000001000000UL +#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_2_SHFT 25 +#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_2_MASK 0x0000000002000000UL +#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_3_SHFT 26 +#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_3_MASK 0x0000000004000000UL +#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_4_SHFT 27 +#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_4_MASK 0x0000000008000000UL +#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_5_SHFT 28 +#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_5_MASK 0x0000000010000000UL +#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_6_SHFT 29 +#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_6_MASK 0x0000000020000000UL +#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_7_SHFT 30 +#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_7_MASK 0x0000000040000000UL +#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_8_SHFT 31 +#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_8_MASK 0x0000000080000000UL +#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_9_SHFT 32 +#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_9_MASK 0x0000000100000000UL +#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_10_SHFT 33 +#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_10_MASK 0x0000000200000000UL +#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_11_SHFT 34 +#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_11_MASK 0x0000000400000000UL +#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_12_SHFT 35 +#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_12_MASK 0x0000000800000000UL +#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_13_SHFT 36 +#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_13_MASK 0x0000001000000000UL +#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_14_SHFT 37 +#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_14_MASK 0x0000002000000000UL +#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_15_SHFT 38 +#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_15_MASK 0x0000004000000000UL +#define UVH_EVENT_OCCURRED0_L1_NMI_INT_SHFT 39 +#define UVH_EVENT_OCCURRED0_L1_NMI_INT_MASK 0x0000008000000000UL +#define UVH_EVENT_OCCURRED0_STOP_CLOCK_SHFT 40 +#define UVH_EVENT_OCCURRED0_STOP_CLOCK_MASK 0x0000010000000000UL +#define UVH_EVENT_OCCURRED0_ASIC_TO_L1_SHFT 41 +#define UVH_EVENT_OCCURRED0_ASIC_TO_L1_MASK 0x0000020000000000UL +#define UVH_EVENT_OCCURRED0_L1_TO_ASIC_SHFT 42 +#define UVH_EVENT_OCCURRED0_L1_TO_ASIC_MASK 0x0000040000000000UL +#define UVH_EVENT_OCCURRED0_LTC_INT_SHFT 43 +#define UVH_EVENT_OCCURRED0_LTC_INT_MASK 0x0000080000000000UL +#define UVH_EVENT_OCCURRED0_LA_SEQ_TRIGGER_SHFT 44 +#define UVH_EVENT_OCCURRED0_LA_SEQ_TRIGGER_MASK 0x0000100000000000UL +#define UVH_EVENT_OCCURRED0_IPI_INT_SHFT 45 +#define UVH_EVENT_OCCURRED0_IPI_INT_MASK 0x0000200000000000UL +#define UVH_EVENT_OCCURRED0_EXTIO_INT0_SHFT 46 +#define UVH_EVENT_OCCURRED0_EXTIO_INT0_MASK 0x0000400000000000UL +#define UVH_EVENT_OCCURRED0_EXTIO_INT1_SHFT 47 +#define UVH_EVENT_OCCURRED0_EXTIO_INT1_MASK 0x0000800000000000UL +#define UVH_EVENT_OCCURRED0_EXTIO_INT2_SHFT 48 +#define UVH_EVENT_OCCURRED0_EXTIO_INT2_MASK 0x0001000000000000UL +#define UVH_EVENT_OCCURRED0_EXTIO_INT3_SHFT 49 +#define UVH_EVENT_OCCURRED0_EXTIO_INT3_MASK 0x0002000000000000UL +#define UVH_EVENT_OCCURRED0_PROFILE_INT_SHFT 50 +#define UVH_EVENT_OCCURRED0_PROFILE_INT_MASK 0x0004000000000000UL +#define UVH_EVENT_OCCURRED0_RTC0_SHFT 51 +#define UVH_EVENT_OCCURRED0_RTC0_MASK 0x0008000000000000UL +#define UVH_EVENT_OCCURRED0_RTC1_SHFT 52 +#define UVH_EVENT_OCCURRED0_RTC1_MASK 0x0010000000000000UL +#define UVH_EVENT_OCCURRED0_RTC2_SHFT 53 +#define UVH_EVENT_OCCURRED0_RTC2_MASK 0x0020000000000000UL +#define UVH_EVENT_OCCURRED0_RTC3_SHFT 54 +#define UVH_EVENT_OCCURRED0_RTC3_MASK 0x0040000000000000UL +#define UVH_EVENT_OCCURRED0_BAU_DATA_SHFT 55 +#define UVH_EVENT_OCCURRED0_BAU_DATA_MASK 0x0080000000000000UL +#define UVH_EVENT_OCCURRED0_POWER_MANAGEMENT_REQ_SHFT 56 +#define UVH_EVENT_OCCURRED0_POWER_MANAGEMENT_REQ_MASK 0x0100000000000000UL +union uvh_event_occurred0_u { + unsigned long v; + struct uvh_event_occurred0_s { + unsigned long lb_hcerr : 1; /* RW, W1C */ + unsigned long gr0_hcerr : 1; /* RW, W1C */ + unsigned long gr1_hcerr : 1; /* RW, W1C */ + unsigned long lh_hcerr : 1; /* RW, W1C */ + unsigned long rh_hcerr : 1; /* RW, W1C */ + unsigned long xn_hcerr : 1; /* RW, W1C */ + unsigned long si_hcerr : 1; /* RW, W1C */ + unsigned long lb_aoerr0 : 1; /* RW, W1C */ + unsigned long gr0_aoerr0 : 1; /* RW, W1C */ + unsigned long gr1_aoerr0 : 1; /* RW, W1C */ + unsigned long lh_aoerr0 : 1; /* RW, W1C */ + unsigned long rh_aoerr0 : 1; /* RW, W1C */ + unsigned long xn_aoerr0 : 1; /* RW, W1C */ + unsigned long si_aoerr0 : 1; /* RW, W1C */ + unsigned long lb_aoerr1 : 1; /* RW, W1C */ + unsigned long gr0_aoerr1 : 1; /* RW, W1C */ + unsigned long gr1_aoerr1 : 1; /* RW, W1C */ + unsigned long lh_aoerr1 : 1; /* RW, W1C */ + unsigned long rh_aoerr1 : 1; /* RW, W1C */ + unsigned long xn_aoerr1 : 1; /* RW, W1C */ + unsigned long si_aoerr1 : 1; /* RW, W1C */ + unsigned long rh_vpi_int : 1; /* RW, W1C */ + unsigned long system_shutdown_int : 1; /* RW, W1C */ + unsigned long lb_irq_int_0 : 1; /* RW, W1C */ + unsigned long lb_irq_int_1 : 1; /* RW, W1C */ + unsigned long lb_irq_int_2 : 1; /* RW, W1C */ + unsigned long lb_irq_int_3 : 1; /* RW, W1C */ + unsigned long lb_irq_int_4 : 1; /* RW, W1C */ + unsigned long lb_irq_int_5 : 1; /* RW, W1C */ + unsigned long lb_irq_int_6 : 1; /* RW, W1C */ + unsigned long lb_irq_int_7 : 1; /* RW, W1C */ + unsigned long lb_irq_int_8 : 1; /* RW, W1C */ + unsigned long lb_irq_int_9 : 1; /* RW, W1C */ + unsigned long lb_irq_int_10 : 1; /* RW, W1C */ + unsigned long lb_irq_int_11 : 1; /* RW, W1C */ + unsigned long lb_irq_int_12 : 1; /* RW, W1C */ + unsigned long lb_irq_int_13 : 1; /* RW, W1C */ + unsigned long lb_irq_int_14 : 1; /* RW, W1C */ + unsigned long lb_irq_int_15 : 1; /* RW, W1C */ + unsigned long l1_nmi_int : 1; /* RW, W1C */ + unsigned long stop_clock : 1; /* RW, W1C */ + unsigned long asic_to_l1 : 1; /* RW, W1C */ + unsigned long l1_to_asic : 1; /* RW, W1C */ + unsigned long ltc_int : 1; /* RW, W1C */ + unsigned long la_seq_trigger : 1; /* RW, W1C */ + unsigned long ipi_int : 1; /* RW, W1C */ + unsigned long extio_int0 : 1; /* RW, W1C */ + unsigned long extio_int1 : 1; /* RW, W1C */ + unsigned long extio_int2 : 1; /* RW, W1C */ + unsigned long extio_int3 : 1; /* RW, W1C */ + unsigned long profile_int : 1; /* RW, W1C */ + unsigned long rtc0 : 1; /* RW, W1C */ + unsigned long rtc1 : 1; /* RW, W1C */ + unsigned long rtc2 : 1; /* RW, W1C */ + unsigned long rtc3 : 1; /* RW, W1C */ + unsigned long bau_data : 1; /* RW, W1C */ + unsigned long power_management_req : 1; /* RW, W1C */ + unsigned long rsvd_57_63 : 7; /* */ + } s; +}; + +/* ========================================================================= */ +/* UVH_EVENT_OCCURRED0_ALIAS */ +/* ========================================================================= */ +#define UVH_EVENT_OCCURRED0_ALIAS 0x0000000000070008UL +#define UVH_EVENT_OCCURRED0_ALIAS_32 0x005f0 + +/* ========================================================================= */ +/* UVH_INT_CMPB */ +/* ========================================================================= */ +#define UVH_INT_CMPB 0x22080UL + +#define UVH_INT_CMPB_REAL_TIME_CMPB_SHFT 0 +#define UVH_INT_CMPB_REAL_TIME_CMPB_MASK 0x00ffffffffffffffUL + +union uvh_int_cmpb_u { + unsigned long v; + struct uvh_int_cmpb_s { + unsigned long real_time_cmpb : 56; /* RW */ + unsigned long rsvd_56_63 : 8; /* */ + } s; +}; + +/* ========================================================================= */ +/* UVH_INT_CMPC */ +/* ========================================================================= */ +#define UVH_INT_CMPC 0x22100UL + +#define UVH_INT_CMPC_REAL_TIME_CMPC_SHFT 0 +#define UVH_INT_CMPC_REAL_TIME_CMPC_MASK 0x00ffffffffffffffUL + +union uvh_int_cmpc_u { + unsigned long v; + struct uvh_int_cmpc_s { + unsigned long real_time_cmpc : 56; /* RW */ + unsigned long rsvd_56_63 : 8; /* */ + } s; +}; - #define UV_MMR_ENABLE (1UL << 63) +/* ========================================================================= */ +/* UVH_INT_CMPD */ +/* ========================================================================= */ +#define UVH_INT_CMPD 0x22180UL + +#define UVH_INT_CMPD_REAL_TIME_CMPD_SHFT 0 +#define UVH_INT_CMPD_REAL_TIME_CMPD_MASK 0x00ffffffffffffffUL + +union uvh_int_cmpd_u { + unsigned long v; + struct uvh_int_cmpd_s { + unsigned long real_time_cmpd : 56; /* RW */ + unsigned long rsvd_56_63 : 8; /* */ + } s; +}; /* ========================================================================= */ /* UVH_NODE_ID */ @@ -111,8 +384,8 @@ union uvh_rh_gam_alias210_redirect_config_2_mmr_u { #define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT 28 #define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffff0000000UL -#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_GR4_SHFT 46 -#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_GR4_MASK 0x0000400000000000UL +#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_GR4_SHFT 48 +#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_GR4_MASK 0x0001000000000000UL #define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_SHFT 52 #define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_MASK 0x00f0000000000000UL #define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63 @@ -123,8 +396,9 @@ union uvh_rh_gam_gru_overlay_config_mmr_u { struct uvh_rh_gam_gru_overlay_config_mmr_s { unsigned long rsvd_0_27: 28; /* */ unsigned long base : 18; /* RW */ + unsigned long rsvd_46_47: 2; /* */ unsigned long gr4 : 1; /* RW */ - unsigned long rsvd_47_51: 5; /* */ + unsigned long rsvd_49_51: 3; /* */ unsigned long n_gru : 4; /* RW */ unsigned long rsvd_56_62: 7; /* */ unsigned long enable : 1; /* RW */ @@ -157,7 +431,7 @@ union uvh_rh_gam_mmr_overlay_config_mmr_u { /* ========================================================================= */ /* UVH_RTC */ /* ========================================================================= */ -#define UVH_RTC 0x28000UL +#define UVH_RTC 0x340000UL #define UVH_RTC_REAL_TIME_CLOCK_SHFT 0 #define UVH_RTC_REAL_TIME_CLOCK_MASK 0x00ffffffffffffffUL @@ -170,6 +444,139 @@ union uvh_rtc_u { } s; }; +/* ========================================================================= */ +/* UVH_RTC1_INT_CONFIG */ +/* ========================================================================= */ +#define UVH_RTC1_INT_CONFIG 0x615c0UL + +#define UVH_RTC1_INT_CONFIG_VECTOR_SHFT 0 +#define UVH_RTC1_INT_CONFIG_VECTOR_MASK 0x00000000000000ffUL +#define UVH_RTC1_INT_CONFIG_DM_SHFT 8 +#define UVH_RTC1_INT_CONFIG_DM_MASK 0x0000000000000700UL +#define UVH_RTC1_INT_CONFIG_DESTMODE_SHFT 11 +#define UVH_RTC1_INT_CONFIG_DESTMODE_MASK 0x0000000000000800UL +#define UVH_RTC1_INT_CONFIG_STATUS_SHFT 12 +#define UVH_RTC1_INT_CONFIG_STATUS_MASK 0x0000000000001000UL +#define UVH_RTC1_INT_CONFIG_P_SHFT 13 +#define UVH_RTC1_INT_CONFIG_P_MASK 0x0000000000002000UL +#define UVH_RTC1_INT_CONFIG_T_SHFT 15 +#define UVH_RTC1_INT_CONFIG_T_MASK 0x0000000000008000UL +#define UVH_RTC1_INT_CONFIG_M_SHFT 16 +#define UVH_RTC1_INT_CONFIG_M_MASK 0x0000000000010000UL +#define UVH_RTC1_INT_CONFIG_APIC_ID_SHFT 32 +#define UVH_RTC1_INT_CONFIG_APIC_ID_MASK 0xffffffff00000000UL + +union uvh_rtc1_int_config_u { + unsigned long v; + struct uvh_rtc1_int_config_s { + unsigned long vector_ : 8; /* RW */ + unsigned long dm : 3; /* RW */ + unsigned long destmode : 1; /* RW */ + unsigned long status : 1; /* RO */ + unsigned long p : 1; /* RO */ + unsigned long rsvd_14 : 1; /* */ + unsigned long t : 1; /* RO */ + unsigned long m : 1; /* RW */ + unsigned long rsvd_17_31: 15; /* */ + unsigned long apic_id : 32; /* RW */ + } s; +}; + +/* ========================================================================= */ +/* UVH_RTC2_INT_CONFIG */ +/* ========================================================================= */ +#define UVH_RTC2_INT_CONFIG 0x61600UL + +#define UVH_RTC2_INT_CONFIG_VECTOR_SHFT 0 +#define UVH_RTC2_INT_CONFIG_VECTOR_MASK 0x00000000000000ffUL +#define UVH_RTC2_INT_CONFIG_DM_SHFT 8 +#define UVH_RTC2_INT_CONFIG_DM_MASK 0x0000000000000700UL +#define UVH_RTC2_INT_CONFIG_DESTMODE_SHFT 11 +#define UVH_RTC2_INT_CONFIG_DESTMODE_MASK 0x0000000000000800UL +#define UVH_RTC2_INT_CONFIG_STATUS_SHFT 12 +#define UVH_RTC2_INT_CONFIG_STATUS_MASK 0x0000000000001000UL +#define UVH_RTC2_INT_CONFIG_P_SHFT 13 +#define UVH_RTC2_INT_CONFIG_P_MASK 0x0000000000002000UL +#define UVH_RTC2_INT_CONFIG_T_SHFT 15 +#define UVH_RTC2_INT_CONFIG_T_MASK 0x0000000000008000UL +#define UVH_RTC2_INT_CONFIG_M_SHFT 16 +#define UVH_RTC2_INT_CONFIG_M_MASK 0x0000000000010000UL +#define UVH_RTC2_INT_CONFIG_APIC_ID_SHFT 32 +#define UVH_RTC2_INT_CONFIG_APIC_ID_MASK 0xffffffff00000000UL + +union uvh_rtc2_int_config_u { + unsigned long v; + struct uvh_rtc2_int_config_s { + unsigned long vector_ : 8; /* RW */ + unsigned long dm : 3; /* RW */ + unsigned long destmode : 1; /* RW */ + unsigned long status : 1; /* RO */ + unsigned long p : 1; /* RO */ + unsigned long rsvd_14 : 1; /* */ + unsigned long t : 1; /* RO */ + unsigned long m : 1; /* RW */ + unsigned long rsvd_17_31: 15; /* */ + unsigned long apic_id : 32; /* RW */ + } s; +}; + +/* ========================================================================= */ +/* UVH_RTC3_INT_CONFIG */ +/* ========================================================================= */ +#define UVH_RTC3_INT_CONFIG 0x61640UL + +#define UVH_RTC3_INT_CONFIG_VECTOR_SHFT 0 +#define UVH_RTC3_INT_CONFIG_VECTOR_MASK 0x00000000000000ffUL +#define UVH_RTC3_INT_CONFIG_DM_SHFT 8 +#define UVH_RTC3_INT_CONFIG_DM_MASK 0x0000000000000700UL +#define UVH_RTC3_INT_CONFIG_DESTMODE_SHFT 11 +#define UVH_RTC3_INT_CONFIG_DESTMODE_MASK 0x0000000000000800UL +#define UVH_RTC3_INT_CONFIG_STATUS_SHFT 12 +#define UVH_RTC3_INT_CONFIG_STATUS_MASK 0x0000000000001000UL +#define UVH_RTC3_INT_CONFIG_P_SHFT 13 +#define UVH_RTC3_INT_CONFIG_P_MASK 0x0000000000002000UL +#define UVH_RTC3_INT_CONFIG_T_SHFT 15 +#define UVH_RTC3_INT_CONFIG_T_MASK 0x0000000000008000UL +#define UVH_RTC3_INT_CONFIG_M_SHFT 16 +#define UVH_RTC3_INT_CONFIG_M_MASK 0x0000000000010000UL +#define UVH_RTC3_INT_CONFIG_APIC_ID_SHFT 32 +#define UVH_RTC3_INT_CONFIG_APIC_ID_MASK 0xffffffff00000000UL + +union uvh_rtc3_int_config_u { + unsigned long v; + struct uvh_rtc3_int_config_s { + unsigned long vector_ : 8; /* RW */ + unsigned long dm : 3; /* RW */ + unsigned long destmode : 1; /* RW */ + unsigned long status : 1; /* RO */ + unsigned long p : 1; /* RO */ + unsigned long rsvd_14 : 1; /* */ + unsigned long t : 1; /* RO */ + unsigned long m : 1; /* RW */ + unsigned long rsvd_17_31: 15; /* */ + unsigned long apic_id : 32; /* RW */ + } s; +}; + +/* ========================================================================= */ +/* UVH_RTC_INC_RATIO */ +/* ========================================================================= */ +#define UVH_RTC_INC_RATIO 0x350000UL + +#define UVH_RTC_INC_RATIO_FRACTION_SHFT 0 +#define UVH_RTC_INC_RATIO_FRACTION_MASK 0x00000000000fffffUL +#define UVH_RTC_INC_RATIO_RATIO_SHFT 20 +#define UVH_RTC_INC_RATIO_RATIO_MASK 0x0000000000700000UL + +union uvh_rtc_inc_ratio_u { + unsigned long v; + struct uvh_rtc_inc_ratio_s { + unsigned long fraction : 20; /* RW */ + unsigned long ratio : 3; /* RW */ + unsigned long rsvd_23_63: 41; /* */ + } s; +}; + /* ========================================================================= */ /* UVH_SI_ADDR_MAP_CONFIG */ /* ========================================================================= */ -- cgit v1.2.3