diff options
Diffstat (limited to 'include/asm-powerpc')
45 files changed, 3503 insertions, 199 deletions
diff --git a/include/asm-powerpc/bitops.h b/include/asm-powerpc/bitops.h new file mode 100644 index 00000000000..dc25c53704d --- /dev/null +++ b/include/asm-powerpc/bitops.h @@ -0,0 +1,437 @@ +/* + * PowerPC atomic bit operations. + * + * Merged version by David Gibson <david@gibson.dropbear.id.au>. + * Based on ppc64 versions by: Dave Engebretsen, Todd Inglett, Don + * Reed, Pat McCarthy, Peter Bergner, Anton Blanchard. They + * originally took it from the ppc32 code. + * + * Within a word, bits are numbered LSB first. Lot's of places make + * this assumption by directly testing bits with (val & (1<<nr)). + * This can cause confusion for large (> 1 word) bitmaps on a + * big-endian system because, unlike little endian, the number of each + * bit depends on the word size. + * + * The bitop functions are defined to work on unsigned longs, so for a + * ppc64 system the bits end up numbered: + * |63..............0|127............64|191...........128|255...........196| + * and on ppc32: + * |31.....0|63....31|95....64|127...96|159..128|191..160|223..192|255..224| + * + * There are a few little-endian macros used mostly for filesystem + * bitmaps, these work on similar bit arrays layouts, but + * byte-oriented: + * |7...0|15...8|23...16|31...24|39...32|47...40|55...48|63...56| + * + * The main difference is that bit 3-5 (64b) or 3-4 (32b) in the bit + * number field needs to be reversed compared to the big-endian bit + * fields. This can be achieved by XOR with 0x38 (64b) or 0x18 (32b). + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#ifndef _ASM_POWERPC_BITOPS_H +#define _ASM_POWERPC_BITOPS_H + +#ifdef __KERNEL__ + +#include <linux/compiler.h> +#include <asm/atomic.h> +#include <asm/synch.h> + +/* + * clear_bit doesn't imply a memory barrier + */ +#define smp_mb__before_clear_bit() smp_mb() +#define smp_mb__after_clear_bit() smp_mb() + +#define BITOP_MASK(nr) (1UL << ((nr) % BITS_PER_LONG)) +#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG) +#define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7) + +#ifdef CONFIG_PPC64 +#define LARXL "ldarx" +#define STCXL "stdcx." +#define CNTLZL "cntlzd" +#else +#define LARXL "lwarx" +#define STCXL "stwcx." +#define CNTLZL "cntlzw" +#endif + +static __inline__ void set_bit(int nr, volatile unsigned long *addr) +{ + unsigned long old; + unsigned long mask = BITOP_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); + + __asm__ __volatile__( +"1:" LARXL " %0,0,%3 # set_bit\n" + "or %0,%0,%2\n" + PPC405_ERR77(0,%3) + STCXL " %0,0,%3\n" + "bne- 1b" + : "=&r"(old), "=m"(*p) + : "r"(mask), "r"(p), "m"(*p) + : "cc" ); +} + +static __inline__ void clear_bit(int nr, volatile unsigned long *addr) +{ + unsigned long old; + unsigned long mask = BITOP_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); + + __asm__ __volatile__( +"1:" LARXL " %0,0,%3 # set_bit\n" + "andc %0,%0,%2\n" + PPC405_ERR77(0,%3) + STCXL " %0,0,%3\n" + "bne- 1b" + : "=&r"(old), "=m"(*p) + : "r"(mask), "r"(p), "m"(*p) + : "cc" ); +} + +static __inline__ void change_bit(int nr, volatile unsigned long *addr) +{ + unsigned long old; + unsigned long mask = BITOP_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); + + __asm__ __volatile__( +"1:" LARXL " %0,0,%3 # set_bit\n" + "xor %0,%0,%2\n" + PPC405_ERR77(0,%3) + STCXL " %0,0,%3\n" + "bne- 1b" + : "=&r"(old), "=m"(*p) + : "r"(mask), "r"(p), "m"(*p) + : "cc" ); +} + +static __inline__ int test_and_set_bit(unsigned long nr, + volatile unsigned long *addr) +{ + unsigned long old, t; + unsigned long mask = BITOP_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); + + __asm__ __volatile__( + EIEIO_ON_SMP +"1:" LARXL " %0,0,%3 # test_and_set_bit\n" + "or %1,%0,%2 \n" + PPC405_ERR77(0,%3) + STCXL " %1,0,%3 \n" + "bne- 1b" + ISYNC_ON_SMP + : "=&r" (old), "=&r" (t) + : "r" (mask), "r" (p) + : "cc", "memory"); + + return (old & mask) != 0; +} + +static __inline__ int test_and_clear_bit(unsigned long nr, + volatile unsigned long *addr) +{ + unsigned long old, t; + unsigned long mask = BITOP_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); + + __asm__ __volatile__( + EIEIO_ON_SMP +"1:" LARXL " %0,0,%3 # test_and_clear_bit\n" + "andc %1,%0,%2 \n" + PPC405_ERR77(0,%3) + STCXL " %1,0,%3 \n" + "bne- 1b" + ISYNC_ON_SMP + : "=&r" (old), "=&r" (t) + : "r" (mask), "r" (p) + : "cc", "memory"); + + return (old & mask) != 0; +} + +static __inline__ int test_and_change_bit(unsigned long nr, + volatile unsigned long *addr) +{ + unsigned long old, t; + unsigned long mask = BITOP_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); + + __asm__ __volatile__( + EIEIO_ON_SMP +"1:" LARXL " %0,0,%3 # test_and_change_bit\n" + "xor %1,%0,%2 \n" + PPC405_ERR77(0,%3) + STCXL " %1,0,%3 \n" + "bne- 1b" + ISYNC_ON_SMP + : "=&r" (old), "=&r" (t) + : "r" (mask), "r" (p) + : "cc", "memory"); + + return (old & mask) != 0; +} + +static __inline__ void set_bits(unsigned long mask, unsigned long *addr) +{ + unsigned long old; + + __asm__ __volatile__( +"1:" LARXL " %0,0,%3 # set_bit\n" + "or %0,%0,%2\n" + STCXL " %0,0,%3\n" + "bne- 1b" + : "=&r" (old), "=m" (*addr) + : "r" (mask), "r" (addr), "m" (*addr) + : "cc"); +} + +/* Non-atomic versions */ +static __inline__ int test_bit(unsigned long nr, + __const__ volatile unsigned long *addr) +{ + return 1UL & (addr[BITOP_WORD(nr)] >> (nr & (BITS_PER_LONG-1))); +} + +static __inline__ void __set_bit(unsigned long nr, + volatile unsigned long *addr) +{ + unsigned long mask = BITOP_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); + + *p |= mask; +} + +static __inline__ void __clear_bit(unsigned long nr, + volatile unsigned long *addr) +{ + unsigned long mask = BITOP_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); + + *p &= ~mask; +} + +static __inline__ void __change_bit(unsigned long nr, + volatile unsigned long *addr) +{ + unsigned long mask = BITOP_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); + + *p ^= mask; +} + +static __inline__ int __test_and_set_bit(unsigned long nr, + volatile unsigned long *addr) +{ + unsigned long mask = BITOP_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); + unsigned long old = *p; + + *p = old | mask; + return (old & mask) != 0; +} + +static __inline__ int __test_and_clear_bit(unsigned long nr, + volatile unsigned long *addr) +{ + unsigned long mask = BITOP_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); + unsigned long old = *p; + + *p = old & ~mask; + return (old & mask) != 0; +} + +static __inline__ int __test_and_change_bit(unsigned long nr, + volatile unsigned long *addr) +{ + unsigned long mask = BITOP_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); + unsigned long old = *p; + + *p = old ^ mask; + return (old & mask) != 0; +} + +/* + * Return the zero-based bit position (LE, not IBM bit numbering) of + * the most significant 1-bit in a double word. + */ +static __inline__ int __ilog2(unsigned long x) +{ + int lz; + + asm (CNTLZL " %0,%1" : "=r" (lz) : "r" (x)); + return BITS_PER_LONG - 1 - lz; +} + +/* + * Determines the bit position of the least significant 0 bit in the + * specified double word. The returned bit position will be + * zero-based, starting from the right side (63/31 - 0). + */ +static __inline__ unsigned long ffz(unsigned long x) +{ + /* no zero exists anywhere in the 8 byte area. */ + if ((x = ~x) == 0) + return BITS_PER_LONG; + + /* + * Calculate the bit position of the least signficant '1' bit in x + * (since x has been changed this will actually be the least signficant + * '0' bit in * the original x). Note: (x & -x) gives us a mask that + * is the least significant * (RIGHT-most) 1-bit of the value in x. + */ + return __ilog2(x & -x); +} + +static __inline__ int __ffs(unsigned long x) +{ + return __ilog2(x & -x); +} + +/* + * ffs: find first bit set. This is defined the same way as + * the libc and compiler builtin ffs routines, therefore + * differs in spirit from the above ffz (man ffs). + */ +static __inline__ int ffs(int x) +{ + unsigned long i = (unsigned long)x; + return __ilog2(i & -i) + 1; +} + +/* + * fls: find last (most-significant) bit set. + * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32. + */ +static __inline__ int fls(unsigned int x) +{ + int lz; + + asm ("cntlzw %0,%1" : "=r" (lz) : "r" (x)); + return 32 - lz; +} + +/* + * hweightN: returns the hamming weight (i.e. the number + * of bits set) of a N-bit word + */ +#define hweight64(x) generic_hweight64(x) +#define hweight32(x) generic_hweight32(x) +#define hweight16(x) generic_hweight16(x) +#define hweight8(x) generic_hweight8(x) + +#define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0) +unsigned long find_next_zero_bit(const unsigned long *addr, + unsigned long size, unsigned long offset); +/** + * find_first_bit - find the first set bit in a memory region + * @addr: The address to start the search at + * @size: The maximum size to search + * + * Returns the bit-number of the first set bit, not the number of the byte + * containing a bit. + */ +#define find_first_bit(addr, size) find_next_bit((addr), (size), 0) +unsigned long find_next_bit(const unsigned long *addr, + unsigned long size, unsigned long offset); + +/* Little-endian versions */ + +static __inline__ int test_le_bit(unsigned long nr, + __const__ unsigned long *addr) +{ + __const__ unsigned char *tmp = (__const__ unsigned char *) addr; + return (tmp[nr >> 3] >> (nr & 7)) & 1; +} + +#define __set_le_bit(nr, addr) \ + __set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr)) +#define __clear_le_bit(nr, addr) \ + __clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr)) + +#define test_and_set_le_bit(nr, addr) \ + test_and_set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr)) +#define test_and_clear_le_bit(nr, addr) \ + test_and_clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr)) + +#define __test_and_set_le_bit(nr, addr) \ + __test_and_set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr)) +#define __test_and_clear_le_bit(nr, addr) \ + __test_and_clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr)) + +#define find_first_zero_le_bit(addr, size) find_next_zero_le_bit((addr), (size), 0) +unsigned long find_next_zero_le_bit(const unsigned long *addr, + unsigned long size, unsigned long offset); + +/* Bitmap functions for the ext2 filesystem */ + +#define ext2_set_bit(nr,addr) \ + __test_and_set_le_bit((nr), (unsigned long*)addr) +#define ext2_clear_bit(nr, addr) \ + __test_and_clear_le_bit((nr), (unsigned long*)addr) + +#define ext2_set_bit_atomic(lock, nr, addr) \ + test_and_set_le_bit((nr), (unsigned long*)addr) +#define ext2_clear_bit_atomic(lock, nr, addr) \ + test_and_clear_le_bit((nr), (unsigned long*)addr) + +#define ext2_test_bit(nr, addr) test_le_bit((nr),(unsigned long*)addr) + +#define ext2_find_first_zero_bit(addr, size) \ + find_first_zero_le_bit((unsigned long*)addr, size) +#define ext2_find_next_zero_bit(addr, size, off) \ + find_next_zero_le_bit((unsigned long*)addr, size, off) + +/* Bitmap functions for the minix filesystem. */ + +#define minix_test_and_set_bit(nr,addr) \ + __test_and_set_le_bit(nr, (unsigned long *)addr) +#define minix_set_bit(nr,addr) \ + __set_le_bit(nr, (unsigned long *)addr) +#define minix_test_and_clear_bit(nr,addr) \ + __test_and_clear_le_bit(nr, (unsigned long *)addr) +#define minix_test_bit(nr,addr) \ + test_le_bit(nr, (unsigned long *)addr) + +#define minix_find_first_zero_bit(addr,size) \ + find_first_zero_le_bit((unsigned long *)addr, size) + +/* + * Every architecture must define this function. It's the fastest + * way of searching a 140-bit bitmap where the first 100 bits are + * unlikely to be set. It's guaranteed that at least one of the 140 + * bits is cleared. + */ +static inline int sched_find_first_bit(const unsigned long *b) +{ +#ifdef CONFIG_PPC64 + if (unlikely(b[0])) + return __ffs(b[0]); + if (unlikely(b[1])) + return __ffs(b[1]) + 64; + return __ffs(b[2]) + 128; +#else + if (unlikely(b[0])) + return __ffs(b[0]); + if (unlikely(b[1])) + return __ffs(b[1]) + 32; + if (unlikely(b[2])) + return __ffs(b[2]) + 64; + if (b[3]) + return __ffs(b[3]) + 96; + return __ffs(b[4]) + 128; +#endif +} + +#endif /* __KERNEL__ */ + +#endif /* _ASM_POWERPC_BITOPS_H */ diff --git a/include/asm-powerpc/bug.h b/include/asm-powerpc/bug.h index e4d028e8702..d625ee55f95 100644 --- a/include/asm-powerpc/bug.h +++ b/include/asm-powerpc/bug.h @@ -12,20 +12,16 @@ #ifndef __ASSEMBLY__ #ifdef __powerpc64__ -#define BUG_TABLE_ENTRY(label, line, file, func) \ - ".llong " #label "\n .long " #line "\n .llong " #file ", " #func "\n" -#define TRAP_OP(ra, rb) "1: tdnei " #ra ", " #rb "\n" -#define DATA_TYPE long long +#define BUG_TABLE_ENTRY ".llong" +#define BUG_TRAP_OP "tdnei" #else -#define BUG_TABLE_ENTRY(label, line, file, func) \ - ".long " #label ", " #line ", " #file ", " #func "\n" -#define TRAP_OP(ra, rb) "1: twnei " #ra ", " #rb "\n" -#define DATA_TYPE int +#define BUG_TABLE_ENTRY ".long" +#define BUG_TRAP_OP "twnei" #endif /* __powerpc64__ */ struct bug_entry { unsigned long bug_addr; - int line; + long line; const char *file; const char *function; }; @@ -43,29 +39,29 @@ struct bug_entry *find_bug(unsigned long bugaddr); #define BUG() do { \ __asm__ __volatile__( \ "1: twi 31,0,0\n" \ - ".section __bug_table,\"a\"\n\t" \ - BUG_TABLE_ENTRY(1b,%0,%1,%2) \ + ".section __bug_table,\"a\"\n" \ + "\t"BUG_TABLE_ENTRY" 1b,%0,%1,%2\n" \ ".previous" \ : : "i" (__LINE__), "i" (__FILE__), "i" (__FUNCTION__)); \ } while (0) #define BUG_ON(x) do { \ __asm__ __volatile__( \ - TRAP_OP(%0,0) \ - ".section __bug_table,\"a\"\n\t" \ - BUG_TABLE_ENTRY(1b,%1,%2,%3) \ + "1: "BUG_TRAP_OP" %0,0\n" \ + ".section __bug_table,\"a\"\n" \ + "\t"BUG_TABLE_ENTRY" 1b,%1,%2,%3\n" \ ".previous" \ - : : "r" ((DATA_TYPE)(x)), "i" (__LINE__), \ + : : "r" ((long)(x)), "i" (__LINE__), \ "i" (__FILE__), "i" (__FUNCTION__)); \ } while (0) #define WARN_ON(x) do { \ __asm__ __volatile__( \ - TRAP_OP(%0,0) \ - ".section __bug_table,\"a\"\n\t" \ - BUG_TABLE_ENTRY(1b,%1,%2,%3) \ + "1: "BUG_TRAP_OP" %0,0\n" \ + ".section __bug_table,\"a\"\n" \ + "\t"BUG_TABLE_ENTRY" 1b,%1,%2,%3\n" \ ".previous" \ - : : "r" ((DATA_TYPE)(x)), \ + : : "r" ((long)(x)), \ "i" (__LINE__ + BUG_WARNING_TRAP), \ "i" (__FILE__), "i" (__FUNCTION__)); \ } while (0) diff --git a/include/asm-powerpc/cputable.h b/include/asm-powerpc/cputable.h index c019501dace..79a0556a0ab 100644 --- a/include/asm-powerpc/cputable.h +++ b/include/asm-powerpc/cputable.h @@ -101,6 +101,7 @@ extern void do_cpu_ftr_fixups(unsigned long offset); #define CPU_FTR_COHERENT_ICACHE ASM_CONST(0x0000020000000000) #define CPU_FTR_LOCKLESS_TLBIE ASM_CONST(0x0000040000000000) #define CPU_FTR_MMCRA_SIHV ASM_CONST(0x0000080000000000) +#define CPU_FTR_CI_LARGE_PAGE ASM_CONST(0x0000100000000000) #else /* ensure on 32b processors the flags are available for compiling but * don't do anything */ @@ -116,6 +117,7 @@ extern void do_cpu_ftr_fixups(unsigned long offset); #define CPU_FTR_COHERENT_ICACHE ASM_CONST(0x0) #define CPU_FTR_LOCKLESS_TLBIE ASM_CONST(0x0) #define CPU_FTR_MMCRA_SIHV ASM_CONST(0x0) +#define CPU_FTR_CI_LARGE_PAGE ASM_CONST(0x0) #endif #ifndef __ASSEMBLY__ @@ -339,6 +341,7 @@ enum { #ifdef __powerpc64__ CPU_FTRS_POWER3 | CPU_FTRS_RS64 | CPU_FTRS_POWER4 | CPU_FTRS_PPC970 | CPU_FTRS_POWER5 | CPU_FTRS_CELL | + CPU_FTR_CI_LARGE_PAGE | #endif 0, diff --git a/include/asm-powerpc/elf.h b/include/asm-powerpc/elf.h index d22b10021b5..feac3458d71 100644 --- a/include/asm-powerpc/elf.h +++ b/include/asm-powerpc/elf.h @@ -1,11 +1,13 @@ #ifndef _ASM_POWERPC_ELF_H #define _ASM_POWERPC_ELF_H +#include <linux/sched.h> /* for task_struct */ #include <asm/types.h> #include <asm/ptrace.h> #include <asm/cputable.h> #include <asm/auxvec.h> #include <asm/page.h> +#include <asm/string.h> /* PowerPC relocations defined by the ABIs */ #define R_PPC_NONE 0 @@ -178,18 +180,22 @@ typedef elf_vrreg_t elf_vrregset_t32[ELF_NVRREG32]; static inline void ppc_elf_core_copy_regs(elf_gregset_t elf_regs, struct pt_regs *regs) { - int i; - int gprs = sizeof(struct pt_regs)/sizeof(ELF_GREG_TYPE); + int i, nregs; - if (gprs > ELF_NGREG) - gprs = ELF_NGREG; + memset((void *)elf_regs, 0, sizeof(elf_gregset_t)); - for (i=0; i < gprs; i++) - elf_regs[i] = (elf_greg_t)((ELF_GREG_TYPE *)regs)[i]; - - memset((char *)(elf_regs) + sizeof(struct pt_regs), 0, \ - sizeof(elf_gregset_t) - sizeof(struct pt_regs)); + /* Our registers are always unsigned longs, whether we're a 32 bit + * process or 64 bit, on either a 64 bit or 32 bit kernel. + * Don't use ELF_GREG_TYPE here. */ + nregs = sizeof(struct pt_regs) / sizeof(unsigned long); + if (nregs > ELF_NGREG) + nregs = ELF_NGREG; + for (i = 0; i < nregs; i++) { + /* This will correctly truncate 64 bit registers to 32 bits + * for a 32 bit process on a 64 bit kernel. */ + elf_regs[i] = (elf_greg_t)((ELF_GREG_TYPE *)regs)[i]; + } } #define ELF_CORE_COPY_REGS(gregs, regs) ppc_elf_core_copy_regs(gregs, regs); diff --git a/include/asm-powerpc/futex.h b/include/asm-powerpc/futex.h new file mode 100644 index 00000000000..37c94e52ab6 --- /dev/null +++ b/include/asm-powerpc/futex.h @@ -0,0 +1,84 @@ +#ifndef _ASM_POWERPC_FUTEX_H +#define _ASM_POWERPC_FUTEX_H + +#ifdef __KERNEL__ + +#include <linux/futex.h> +#include <asm/errno.h> +#include <asm/synch.h> +#include <asm/uaccess.h> +#include <asm/ppc_asm.h> + +#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \ + __asm__ __volatile ( \ + SYNC_ON_SMP \ +"1: lwarx %0,0,%2\n" \ + insn \ +"2: stwcx. %1,0,%2\n" \ + "bne- 1b\n" \ + "li %1,0\n" \ +"3: .section .fixup,\"ax\"\n" \ +"4: li %1,%3\n" \ + "b 3b\n" \ + ".previous\n" \ + ".section __ex_table,\"a\"\n" \ + ".align 3\n" \ + DATAL " 1b,4b,2b,4b\n" \ + ".previous" \ + : "=&r" (oldval), "=&r" (ret) \ + : "b" (uaddr), "i" (-EFAULT), "1" (oparg) \ + : "cr0", "memory") + +static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr) +{ + int op = (encoded_op >> 28) & 7; + int cmp = (encoded_op >> 24) & 15; + int oparg = (encoded_op << 8) >> 20; + int cmparg = (encoded_op << 20) >> 20; + int oldval = 0, ret; + if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) + oparg = 1 << oparg; + + if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) + return -EFAULT; + + inc_preempt_count(); + + switch (op) { + case FUTEX_OP_SET: + __futex_atomic_op("", ret, oldval, uaddr, oparg); + break; + case FUTEX_OP_ADD: + __futex_atomic_op("add %1,%0,%1\n", ret, oldval, uaddr, oparg); + break; + case FUTEX_OP_OR: + __futex_atomic_op("or %1,%0,%1\n", ret, oldval, uaddr, oparg); + break; + case FUTEX_OP_ANDN: + __futex_atomic_op("andc %1,%0,%1\n", ret, oldval, uaddr, oparg); + break; + case FUTEX_OP_XOR: + __futex_atomic_op("xor %1,%0,%1\n", ret, oldval, uaddr, oparg); + break; + default: + ret = -ENOSYS; + } + + dec_preempt_count(); + + if (!ret) { + switch (cmp) { + case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break; + case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break; + case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break; + case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break; + case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break; + case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break; + default: ret = -ENOSYS; + } + } + return ret; +} + +#endif /* __KERNEL__ */ +#endif /* _ASM_POWERPC_FUTEX_H */ diff --git a/include/asm-powerpc/ioctls.h b/include/asm-powerpc/ioctls.h index 5b94ff489b8..279a6229584 100644 --- a/include/asm-powerpc/ioctls.h +++ b/include/asm-powerpc/ioctls.h @@ -62,6 +62,9 @@ # define TIOCM_DSR 0x100 # define TIOCM_CD TIOCM_CAR # define TIOCM_RI TIOCM_RNG +#define TIOCM_OUT1 0x2000 +#define TIOCM_OUT2 0x4000 +#define TIOCM_LOOP 0x8000 #define TIOCGSOFTCAR 0x5419 #define TIOCSSOFTCAR 0x541A diff --git a/include/asm-powerpc/iommu.h b/include/asm-powerpc/iommu.h index 9d91bdd667a..6a35e6570cc 100644 --- a/include/asm-powerpc/iommu.h +++ b/include/asm-powerpc/iommu.h @@ -74,6 +74,11 @@ extern void iommu_devnode_init_pSeries(struct device_node *dn); /* Creates table for an individual device node */ extern void iommu_devnode_init_iSeries(struct device_node *dn); +/* Get table parameters from HV */ +extern void iommu_table_getparms_iSeries(unsigned long busno, + unsigned char slotno, + unsigned char virtbus, + struct iommu_table* tbl); #endif /* CONFIG_PPC_ISERIES */ diff --git a/include/asm-powerpc/ipcbuf.h b/include/asm-powerpc/ipcbuf.h new file mode 100644 index 00000000000..2c3e1d94db1 --- /dev/null +++ b/include/asm-powerpc/ipcbuf.h @@ -0,0 +1,34 @@ +#ifndef _ASM_POWERPC_IPCBUF_H +#define _ASM_POWERPC_IPCBUF_H + +/* + * The ipc64_perm structure for the powerpc is identical to + * kern_ipc_perm as we have always had 32-bit UIDs and GIDs in the + * kernel. Note extra padding because this structure is passed back + * and forth between kernel and user space. Pad space is left for: + * - 1 32-bit value to fill up for 8-byte alignment + * - 2 miscellaneous 64-bit values + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include <linux/types.h> + +struct ipc64_perm +{ + __kernel_key_t key; + __kernel_uid_t uid; + __kernel_gid_t gid; + __kernel_uid_t cuid; + __kernel_gid_t cgid; + __kernel_mode_t mode; + unsigned int seq; + unsigned int __pad1; + unsigned long long __unused1; + unsigned long long __unused2; +}; + +#endif /* _ASM_POWERPC_IPCBUF_H */ diff --git a/include/asm-powerpc/irq.h b/include/asm-powerpc/irq.h index c7c3f912a3c..b3935ea28ff 100644 --- a/include/asm-powerpc/irq.h +++ b/include/asm-powerpc/irq.h @@ -73,7 +73,7 @@ extern unsigned int real_irq_to_virt_slowpath(unsigned int real_irq); #define IC_INVALID 0 #define IC_OPEN_PIC 1 #define IC_PPC_XIC 2 -#define IC_BPA_IIC 3 +#define IC_CELL_PIC 3 #define IC_ISERIES 4 extern u64 ppc64_interrupt_controller; diff --git a/include/asm-powerpc/iseries/hv_call.h b/include/asm-powerpc/iseries/hv_call.h new file mode 100644 index 00000000000..e9f831c9a5e --- /dev/null +++ b/include/asm-powerpc/iseries/hv_call.h @@ -0,0 +1,113 @@ +/* + * HvCall.h + * Copyright (C) 2001 Mike Corrigan IBM Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +/* + * This file contains the "hypervisor call" interface which is used to + * drive the hypervisor from the OS. + */ +#ifndef _ASM_POWERPC_ISERIES_HV_CALL_H +#define _ASM_POWERPC_ISERIES_HV_CALL_H + +#include <asm/iseries/hv_call_sc.h> +#include <asm/iseries/hv_types.h> +#include <asm/paca.h> + +/* Type of yield for HvCallBaseYieldProcessor */ +#define HvCall_YieldTimed 0 /* Yield until specified time (tb) */ +#define HvCall_YieldToActive 1 /* Yield until all active procs have run */ +#define HvCall_YieldToProc 2 /* Yield until the specified processor has run */ + +/* interrupt masks for setEnabledInterrupts */ +#define HvCall_MaskIPI 0x00000001 +#define HvCall_MaskLpEvent 0x00000002 +#define HvCall_MaskLpProd 0x00000004 +#define HvCall_MaskTimeout 0x00000008 + +/* Log buffer formats */ +#define HvCall_LogBuffer_ASCII 0 +#define HvCall_LogBuffer_EBCDIC 1 + +#define HvCallBaseAckDeferredInts HvCallBase + 0 +#define HvCallBaseCpmPowerOff HvCallBase + 1 +#define HvCallBaseGetHwPatch HvCallBase + 2 +#define HvCallBaseReIplSpAttn HvCallBase + 3 +#define HvCallBaseSetASR HvCallBase + 4 +#define HvCallBaseSetASRAndRfi HvCallBase + 5 +#define HvCallBaseSetIMR HvCallBase + 6 +#define HvCallBaseSendIPI HvCallBase + 7 +#define HvCallBaseTerminateMachine HvCallBase + 8 +#define HvCallBaseTerminateMachineSrc HvCallBase + 9 +#define HvCallBaseProcessPlicInterrupts HvCallBase + 10 +#define HvCallBaseIsPrimaryCpmOrMsdIpl HvCallBase + 11 +#define HvCallBaseSetVirtualSIT HvCallBase + 12 +#define HvCallBaseVaryOffThisProcessor HvCallBase + 13 +#define HvCallBaseVaryOffMemoryChunk HvCallBase + 14 +#define HvCallBaseVaryOffInteractivePercentage HvCallBase + 15 +#define HvCallBaseSendLpProd HvCallBase + 16 +#define HvCallBaseSetEnabledInterrupts HvCallBase + 17 +#define HvCallBaseYieldProcessor HvCallBase + 18 +#define HvCallBaseVaryOffSharedProcUnits HvCallBase + 19 +#define HvCallBaseSetVirtualDecr HvCallBase + 20 +#define HvCallBaseClearLogBuffer HvCallBase + 21 +#define HvCallBaseGetLogBufferCodePage HvCallBase + 22 +#define HvCallBaseGetLogBufferFormat HvCallBase + 23 +#define HvCallBaseGetLogBufferLength HvCallBase + 24 +#define HvCallBaseReadLogBuffer HvCallBase + 25 +#define HvCallBaseSetLogBufferFormatAndCodePage HvCallBase + 26 +#define HvCallBaseWriteLogBuffer HvCallBase + 27 +#define HvCallBaseRouter28 HvCallBase + 28 +#define HvCallBaseRouter29 HvCallBase + 29 +#define HvCallBaseRouter30 HvCallBase + 30 +#define HvCallBaseSetDebugBus HvCallBase + 31 + +#define HvCallCcSetDABR HvCallCc + 7 + +static inline void HvCall_setVirtualDecr(void) +{ + /* + * Ignore any error return codes - most likely means that the + * target value for the LP has been increased and this vary off + * would bring us below the new target. + */ + HvCall0(HvCallBaseSetVirtualDecr); +} + +static inline void HvCall_yieldProcessor(unsigned typeOfYield, u64 yieldParm) +{ + HvCall2(HvCallBaseYieldProcessor, typeOfYield, yieldParm); +} + +static inline void HvCall_setEnabledInterrupts(u64 enabledInterrupts) +{ + HvCall1(HvCallBaseSetEnabledInterrupts, enabledInterrupts); +} + +static inline void HvCall_setLogBufferFormatAndCodepage(int format, + u32 codePage) +{ + HvCall2(HvCallBaseSetLogBufferFormatAndCodePage, format, codePage); +} + +extern void HvCall_writeLogBuffer(const void *buffer, u64 bufLen); + +static inline void HvCall_sendIPI(struct paca_struct *targetPaca) +{ + HvCall1(HvCallBaseSendIPI, targetPaca->paca_index); +} + +#endif /* _ASM_POWERPC_ISERIES_HV_CALL_H */ diff --git a/include/asm-powerpc/iseries/hv_call_event.h b/include/asm-powerpc/iseries/hv_call_event.h new file mode 100644 index 00000000000..46763a30590 --- /dev/null +++ b/include/asm-powerpc/iseries/hv_call_event.h @@ -0,0 +1,253 @@ +/* + * HvCallEvent.h + * Copyright (C) 2001 Mike Corrigan IBM Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +/* + * This file contains the "hypervisor call" interface which is used to + * drive the hypervisor from the OS. + */ +#ifndef _ASM_POWERPC_ISERIES_HV_CALL_EVENT_H +#define _ASM_POWERPC_ISERIES_HV_CALL_EVENT_H + +#include <asm/iseries/hv_call_sc.h> +#include <asm/iseries/hv_types.h> +#include <asm/abs_addr.h> + +struct HvLpEvent; + +typedef u8 HvLpEvent_Type; +typedef u8 HvLpEvent_AckInd; +typedef u8 HvLpEvent_AckType; + +struct HvCallEvent_PackedParms { + u8 xAckType:1; + u8 xAckInd:1; + u8 xRsvd:1; + u8 xTargetLp:5; + u8 xType; + u16 xSubtype; + HvLpInstanceId xSourceInstId; + HvLpInstanceId xTargetInstId; +}; + +typedef u8 HvLpDma_Direction; +typedef u8 HvLpDma_AddressType; + +struct HvCallEvent_PackedDmaParms { + u8 xDirection:1; + u8 xLocalAddrType:1; + u8 xRemoteAddrType:1; + u8 xRsvd1:5; + HvLpIndex xRemoteLp; + u8 xType; + u8 xRsvd2; + HvLpInstanceId xLocalInstId; + HvLpInstanceId xRemoteInstId; +}; + +typedef u64 HvLpEvent_Rc; +typedef u64 HvLpDma_Rc; + +#define HvCallEventAckLpEvent HvCallEvent + 0 +#define HvCallEventCancelLpEvent HvCallEvent + 1 +#define HvCallEventCloseLpEventPath HvCallEvent + 2 +#define HvCallEventDmaBufList HvCallEvent + 3 +#define HvCallEventDmaSingle HvCallEvent + 4 +#define HvCallEventDmaToSp HvCallEvent + 5 +#define HvCallEventGetOverflowLpEvents HvCallEvent + 6 +#define HvCallEventGetSourceLpInstanceId HvCallEvent + 7 +#define HvCallEventGetTargetLpInstanceId HvCallEvent + 8 +#define HvCallEventOpenLpEventPath HvCallEvent + 9 +#define HvCallEventSetLpEventStack HvCallEvent + 10 +#define HvCallEventSignalLpEvent HvCallEvent + 11 +#define HvCallEventSignalLpEventParms HvCallEvent + 12 +#define HvCallEventSetInterLpQueueIndex HvCallEvent + 13 +#define HvCallEventSetLpEventQueueInterruptProc HvCallEvent + 14 +#define HvCallEventRouter15 HvCallEvent + 15 + +static inline void HvCallEvent_getOverflowLpEvents(u8 queueIndex) +{ + HvCall1(HvCallEventGetOverflowLpEvents, queueIndex); +} + +static inline void HvCallEvent_setInterLpQueueIndex(u8 queueIndex) +{ + HvCall1(HvCallEventSetInterLpQueueIndex, queueIndex); +} + +static inline void HvCallEvent_setLpEventStack(u8 queueIndex, + char *eventStackAddr, u32 eventStackSize) +{ + u64 abs_addr; + + abs_addr = virt_to_abs(eventStackAddr); + HvCall3(HvCallEventSetLpEventStack, queueIndex, abs_addr, + eventStackSize); +} + +static inline void HvCallEvent_setLpEventQueueInterruptProc(u8 queueIndex, + u16 lpLogicalProcIndex) +{ + HvCall2(HvCallEventSetLpEventQueueInterruptProc, queueIndex, + lpLogicalProcIndex); +} + +static inline HvLpEvent_Rc HvCallEvent_signalLpEvent(struct HvLpEvent *event) +{ + u64 abs_addr; + +#ifdef DEBUG_SENDEVENT + printk("HvCallEvent_signalLpEvent: *event = %016lx\n ", + (unsigned long)event); +#endif + abs_addr = virt_to_abs(event); + return HvCall1(HvCallEventSignalLpEvent, abs_addr); +} + +static inline HvLpEvent_Rc HvCallEvent_signalLpEventFast(HvLpIndex targetLp, + HvLpEvent_Type type, u16 subtype, HvLpEvent_AckInd ackInd, + HvLpEvent_AckType ackType, HvLpInstanceId sourceInstanceId, + HvLpInstanceId targetInstanceId, u64 correlationToken, + u64 eventData1, u64 eventData2, u64 eventData3, + u64 eventData4, u64 eventData5) +{ + /* Pack the misc bits into a single Dword to pass to PLIC */ + union { + struct HvCallEvent_PackedParms parms; + u64 dword; + } packed; + packed.parms.xAckType = ackType; + packed.parms.xAckInd = ackInd; + packed.parms.xRsvd = 0; + packed.parms.xTargetLp = targetLp; + packed.parms.xType = type; + packed.parms.xSubtype = subtype; + packed.parms.xSourceInstId = sourceInstanceId; + packed.parms.xTargetInstId = targetInstanceId; + + return HvCall7(HvCallEventSignalLpEventParms, packed.dword, + correlationToken, eventData1, eventData2, + eventData3, eventData4, eventData5); +} + +static inline HvLpEvent_Rc HvCallEvent_ackLpEvent(struct HvLpEvent *event) +{ + u64 abs_addr; + + abs_addr = virt_to_abs(event); + return HvCall1(HvCallEventAckLpEvent, abs_addr); +} + +static inline HvLpEvent_Rc HvCallEvent_cancelLpEvent(struct HvLpEvent *event) +{ + u64 abs_addr; + + abs_addr = virt_to_abs(event); + return HvCall1(HvCallEventCancelLpEvent, abs_addr); +} + +static inline HvLpInstanceId HvCallEvent_getSourceLpInstanceId( + HvLpIndex targetLp, HvLpEvent_Type type) +{ + return HvCall2(HvCallEventGetSourceLpInstanceId, targetLp, type); +} + +static inline HvLpInstanceId HvCallEvent_getTargetLpInstanceId( + HvLpIndex targetLp, HvLpEvent_Type type) +{ + return HvCall2(HvCallEventGetTargetLpInstanceId, targetLp, type); +} + +static inline void HvCallEvent_openLpEventPath(HvLpIndex targetLp, + HvLpEvent_Type type) +{ + HvCall2(HvCallEventOpenLpEventPath, targetLp, type); +} + +static inline void HvCallEvent_closeLpEventPath(HvLpIndex targetLp, + HvLpEvent_Type type) +{ + HvCall2(HvCallEventCloseLpEventPath, targetLp, type); +} + +static inline HvLpDma_Rc HvCallEvent_dmaBufList(HvLpEvent_Type type, + HvLpIndex remoteLp, HvLpDma_Direction direction, + HvLpInstanceId localInstanceId, + HvLpInstanceId remoteInstanceId, + HvLpDma_AddressType localAddressType, + HvLpDma_AddressType remoteAddressType, + /* Do these need to be converted to absolute addresses? */ + u64 localBufList, u64 remoteBufList, u32 transferLength) +{ + /* Pack the misc bits into a single Dword to pass to PLIC */ + union { + struct HvCallEvent_PackedDmaParms parms; + u64 dword; + } packed; + + packed.parms.xDirection = direction; + packed.parms.xLocalAddrType = localAddressType; + packed.parms.xRemoteAddrType = remoteAddressType; + packed.parms.xRsvd1 = 0; + packed.parms.xRemoteLp = remoteLp; + packed.parms.xType = type; + packed.parms.xRsvd2 = 0; + packed.parms.xLocalInstId = localInstanceId; + packed.parms.xRemoteInstId = remoteInstanceId; + + return HvCall4(HvCallEventDmaBufList, packed.dword, localBufList, + remoteBufList, transferLength); +} + +static inline HvLpDma_Rc HvCallEvent_dmaSingle(HvLpEvent_Type type, + HvLpIndex remoteLp, HvLpDma_Direction direction, + HvLpInstanceId localInstanceId, + HvLpInstanceId remoteInstanceId, + HvLpDma_AddressType localAddressType, + HvLpDma_AddressType remoteAddressType, + u64 localAddrOrTce, u64 remoteAddrOrTce, u32 transferLength) +{ + /* Pack the misc bits into a single Dword to pass to PLIC */ + union { + struct HvCallEvent_PackedDmaParms parms; + u64 dword; + } packed; + + packed.parms.xDirection = direction; + packed.parms.xLocalAddrType = localAddressType; + packed.parms.xRemoteAddrType = remoteAddressType; + packed.parms.xRsvd1 = 0; + packed.parms.xRemoteLp = remoteLp; + packed.parms.xType = type; + packed.parms.xRsvd2 = 0; + packed.parms.xLocalInstId = localInstanceId; + packed.parms.xRemoteInstId = remoteInstanceId; + + return (HvLpDma_Rc)HvCall4(HvCallEventDmaSingle, packed.dword, + localAddrOrTce, remoteAddrOrTce, transferLength); +} + +static inline HvLpDma_Rc HvCallEvent_dmaToSp(void *local, u32 remote, + u32 length, HvLpDma_Direction dir) +{ + u64 abs_addr; + + abs_addr = virt_to_abs(local); + return HvCall4(HvCallEventDmaToSp, abs_addr, remote, length, dir); +} + +#endif /* _ASM_POWERPC_ISERIES_HV_CALL_EVENT_H */ diff --git a/include/asm-powerpc/iseries/hv_call_sc.h b/include/asm-powerpc/iseries/hv_call_sc.h new file mode 100644 index 00000000000..dec7e9d9ab7 --- /dev/null +++ b/include/asm-powerpc/iseries/hv_call_sc.h @@ -0,0 +1,51 @@ +/* + * HvCallSc.h + * Copyright (C) 2001 Mike Corrigan IBM Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +#ifndef _ASM_POWERPC_ISERIES_HV_CALL_SC_H +#define _ASM_POWERPC_ISERIES_HV_CALL_SC_H + +#include <linux/types.h> + +#define HvCallBase 0x8000000000000000ul +#define HvCallCc 0x8001000000000000ul +#define HvCallCfg 0x8002000000000000ul +#define HvCallEvent 0x8003000000000000ul +#define HvCallHpt 0x8004000000000000ul +#define HvCallPci 0x8005000000000000ul +#define HvCallSm 0x8007000000000000ul +#define HvCallXm 0x8009000000000000ul + +extern u64 HvCall0(u64); +extern u64 HvCall1(u64, u64); +extern u64 HvCall2(u64, u64, u64); +extern u64 HvCall3(u64, u64, u64, u64); +extern u64 HvCall4(u64, u64, u64, u64, u64); +extern u64 HvCall5(u64, u64, u64, u64, u64, u64); +extern u64 HvCall6(u64, u64, u64, u64, u64, u64, u64); +extern u64 HvCall7(u64, u64, u64, u64, u64, u64, u64, u64); + +extern u64 HvCall0Ret16(u64, void *); +extern u64 HvCall1Ret16(u64, void *, u64); +extern u64 HvCall2Ret16(u64, void *, u64, u64); +extern u64 HvCall3Ret16(u64, void *, u64, u64, u64); +extern u64 HvCall4Ret16(u64, void *, u64, u64, u64, u64); +extern u64 HvCall5Ret16(u64, void *, u64, u64, u64, u64, u64); +extern u64 HvCall6Ret16(u64, void *, u64, u64, u64, u64, u64, u64); +extern u64 HvCall7Ret16(u64, void *, u64, u64 ,u64 ,u64 ,u64 ,u64 ,u64); + +#endif /* _ASM_POWERPC_ISERIES_HV_CALL_SC_H */ diff --git a/include/asm-powerpc/iseries/hv_call_xm.h b/include/asm-powerpc/iseries/hv_call_xm.h new file mode 100644 index 00000000000..ca9202cb01e --- /dev/null +++ b/include/asm-powerpc/iseries/hv_call_xm.h @@ -0,0 +1,78 @@ +/* + * This file contains the "hypervisor call" interface which is used to + * drive the hypervisor from SLIC. + */ +#ifndef _ASM_POWERPC_ISERIES_HV_CALL_XM_H +#define _ASM_POWERPC_ISERIES_HV_CALL_XM_H + +#include <asm/iseries/hv_call_sc.h> +#include <asm/iseries/hv_types.h> + +#define HvCallXmGetTceTableParms HvCallXm + 0 +#define HvCallXmTestBus HvCallXm + 1 +#define HvCallXmConnectBusUnit HvCallXm + 2 +#define HvCallXmLoadTod HvCallXm + 8 +#define HvCallXmTestBusUnit HvCallXm + 9 +#define HvCallXmSetTce HvCallXm + 11 +#define HvCallXmSetTces HvCallXm + 13 + +/* + * Structure passed to HvCallXm_getTceTableParms + */ +struct iommu_table_cb { + unsigned long itc_busno; /* Bus number for this tce table */ + unsigned long itc_start; /* Will be NULL for secondary */ + unsigned long itc_totalsize; /* Size (in pages) of whole table */ + unsigned long itc_offset; /* Index into real tce table of the + start of our section */ + unsigned long itc_size; /* Size (in pages) of our section */ + unsigned long itc_index; /* Index of this tce table */ + unsigned short itc_maxtables; /* Max num of tables for partition */ + unsigned char itc_virtbus; /* Flag to indicate virtual bus */ + unsigned char itc_slotno; /* IOA Tce Slot Index */ + unsigned char itc_rsvd[4]; +}; + +static inline void HvCallXm_getTceTableParms(u64 cb) +{ + HvCall1(HvCallXmGetTceTableParms, cb); +} + +static inline u64 HvCallXm_setTce(u64 tceTableToken, u64 tceOffset, u64 tce) +{ + return HvCall3(HvCallXmSetTce, tceTableToken, tceOffset, tce); +} + +static inline u64 HvCallXm_setTces(u64 tceTableToken, u64 tceOffset, + u64 numTces, u64 tce1, u64 tce2, u64 tce3, u64 tce4) +{ + return HvCall7(HvCallXmSetTces, tceTableToken, tceOffset, numTces, + tce1, tce2, tce3, tce4); +} + +static inline u64 HvCallXm_testBus(u16 busNumber) +{ + return HvCall1(HvCallXmTestBus, busNumber); +} + +static inline u64 HvCallXm_testBusUnit(u16 busNumber, u8 subBusNumber, + u8 deviceId) +{ + return HvCall2(HvCallXmTestBusUnit, busNumber, + (subBusNumber << 8) | deviceId); +} + +static inline u64 HvCallXm_connectBusUnit(u16 busNumber, u8 subBusNumber, + u8 deviceId, u64 interruptToken) +{ + return HvCall5(HvCallXmConnectBusUnit, busNumber, + (subBusNumber << 8) | deviceId, interruptToken, 0, + 0 /* HvLpConfig::mapDsaToQueueIndex(HvLpDSA(busNumber, xBoard, xCard)) */); +} + +static inline u64 HvCallXm_loadTod(void) +{ + return HvCall0(HvCallXmLoadTod); +} + +#endif /* _ASM_POWERPC_ISERIES_HV_CALL_XM_H */ diff --git a/include/asm-powerpc/iseries/hv_lp_config.h b/include/asm-powerpc/iseries/hv_lp_config.h new file mode 100644 index 00000000000..bc00f036bca --- /dev/null +++ b/include/asm-powerpc/iseries/hv_lp_config.h @@ -0,0 +1,138 @@ +/* + * HvLpConfig.h + * Copyright (C) 2001 Mike Corrigan IBM Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +#ifndef _ASM_POWERPC_ISERIES_HV_LP_CONFIG_H +#define _ASM_POWERPC_ISERIES_HV_LP_CONFIG_H + +/* + * This file contains the interface to the LPAR configuration data + * to determine which resources should be allocated to each partition. + */ + +#include <asm/iseries/hv_call_sc.h> +#include <asm/iseries/hv_types.h> +#include <asm/iseries/it_lp_naca.h> + +enum { + HvCallCfg_Cur = 0, + HvCallCfg_Init = 1, + HvCallCfg_Max = 2, + HvCallCfg_Min = 3 +}; + +#define HvCallCfgGetSystemPhysicalProcessors HvCallCfg + 6 +#define HvCallCfgGetPhysicalProcessors HvCallCfg + 7 +#define HvCallCfgGetMsChunks HvCallCfg + 9 +#define HvCallCfgGetSharedPoolIndex HvCallCfg + 20 +#define HvCallCfgGetSharedProcUnits HvCallCfg + 21 +#define HvCallCfgGetNumProcsInSharedPool HvCallCfg + 22 +#define HvCallCfgGetVirtualLanIndexMap HvCallCfg + 30 +#define HvCallCfgGetHostingLpIndex HvCallCfg + 32 + +extern HvLpIndex HvLpConfig_getLpIndex_outline(void); + +static inline HvLpIndex HvLpConfig_getLpIndex(void) +{ + return itLpNaca.xLpIndex; +} + +static inline HvLpIndex HvLpConfig_getPrimaryLpIndex(void) +{ + return itLpNaca.xPrimaryLpIndex; +} + +static inline u64 HvLpConfig_getMsChunks(void) +{ + return HvCall2(HvCallCfgGetMsChunks, HvLpConfig_getLpIndex(), + HvCallCfg_Cur); +} + +static inline u64 HvLpConfig_getSystemPhysicalProcessors(void) +{ + return HvCall0(HvCallCfgGetSystemPhysicalProcessors); +} + +static inline u64 HvLpConfig_getNumProcsInSharedPool(HvLpSharedPoolIndex sPI) +{ + return (u16)HvCall1(HvCallCfgGetNumProcsInSharedPool, sPI); +} + +static inline u64 HvLpConfig_getPhysicalProcessors(void) +{ + return HvCall2(HvCallCfgGetPhysicalProcessors, HvLpConfig_getLpIndex(), + HvCallCfg_Cur); +} + +static inline HvLpSharedPoolIndex HvLpConfig_getSharedPoolIndex(void) +{ + return HvCall1(HvCallCfgGetSharedPoolIndex, HvLpConfig_getLpIndex()); +} + +static inline u64 HvLpConfig_getSharedProcUnits(void) +{ + return HvCall2(HvCallCfgGetSharedProcUnits, HvLpConfig_getLpIndex(), + HvCallCfg_Cur); +} + +static inline u64 HvLpConfig_getMaxSharedProcUnits(void) +{ + return HvCall2(HvCallCfgGetSharedProcUnits, HvLpConfig_getLpIndex(), + HvCallCfg_Max); +} + +static inline u64 HvLpConfig_getMaxPhysicalProcessors(void) +{ + return HvCall2(HvCallCfgGetPhysicalProcessors, HvLpConfig_getLpIndex(), + HvCallCfg_Max); +} + +static inline HvLpVirtualLanIndexMap HvLpConfig_getVirtualLanIndexMapForLp( + HvLpIndex lp) +{ + /* + * This is a new function in V5R1 so calls to this on older + * hypervisors will return -1 + */ + u64 retVal = HvCall1(HvCallCfgGetVirtualLanIndexMap, lp); + if (retVal == -1) + retVal = 0; + return retVal; +} + +static inline HvLpVirtualLanIndexMap HvLpConfig_getVirtualLanIndexMap(void) +{ + return HvLpConfig_getVirtualLanIndexMapForLp( + HvLpConfig_getLpIndex_outline()); +} + +static inline int HvLpConfig_doLpsCommunicateOnVirtualLan(HvLpIndex lp1, + HvLpIndex lp2) +{ + HvLpVirtualLanIndexMap virtualLanIndexMap1 = + HvLpConfig_getVirtualLanIndexMapForLp(lp1); + HvLpVirtualLanIndexMap virtualLanIndexMap2 = + HvLpConfig_getVirtualLanIndexMapForLp(lp2); + return ((virtualLanIndexMap1 & virtualLanIndexMap2) != 0); +} + +static inline HvLpIndex HvLpConfig_getHostingLpIndex(HvLpIndex lp) +{ + return HvCall1(HvCallCfgGetHostingLpIndex, lp); +} + +#endif /* _ASM_POWERPC_ISERIES_HV_LP_CONFIG_H */ diff --git a/include/asm-powerpc/iseries/hv_lp_event.h b/include/asm-powerpc/iseries/hv_lp_event.h new file mode 100644 index 00000000000..499ab1ad018 --- /dev/null +++ b/include/asm-powerpc/iseries/hv_lp_event.h @@ -0,0 +1,142 @@ +/* + * HvLpEvent.h + * Copyright (C) 2001 Mike Corrigan IBM Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +/* This file contains the class for HV events in the system. */ + +#ifndef _ASM_POWERPC_ISERIES_HV_LP_EVENT_H +#define _ASM_POWERPC_ISERIES_HV_LP_EVENT_H + +#include <asm/types.h> +#include <asm/ptrace.h> +#include <asm/iseries/hv_types.h> +#include <asm/iseries/hv_call_event.h> + +/* + * HvLpEvent is the structure for Lp Event messages passed between + * partitions through PLIC. + */ + +struct HvEventFlags { + u8 xValid:1; /* Indicates a valid request x00-x00 */ + u8 xRsvd1:4; /* Reserved ... */ + u8 xAckType:1; /* Immediate or deferred ... */ + u8 xAckInd:1; /* Indicates if ACK required ... */ + u8 xFunction:1; /* Interrupt or Acknowledge ... */ +}; + + +struct HvLpEvent { + struct HvEventFlags xFlags; /* Event flags x00-x00 */ + u8 xType; /* Type of message x01-x01 */ + u16 xSubtype; /* Subtype for event x02-x03 */ + u8 xSourceLp; /* Source LP x04-x04 */ + u8 xTargetLp; /* Target LP x05-x05 */ + u8 xSizeMinus1; /* Size of Derived class - 1 x06-x06 */ + u8 xRc; /* RC for Ack flows x07-x07 */ + u16 xSourceInstanceId; /* Source sides instance id x08-x09 */ + u16 xTargetInstanceId; /* Target sides instance id x0A-x0B */ + union { + u32 xSubtypeData; /* Data usable by the subtype x0C-x0F */ + u16 xSubtypeDataShort[2]; /* Data as 2 shorts */ + u8 xSubtypeDataChar[4]; /* Data as 4 chars */ + } x; + + u64 xCorrelationToken; /* Unique value for source/type x10-x17 */ +}; + +typedef void (*LpEventHandler)(struct HvLpEvent *, struct pt_regs *); + +/* Register a handler for an event type - returns 0 on success */ +extern int HvLpEvent_registerHandler(HvLpEvent_Type eventType, + LpEventHandler hdlr); + +/* + * Unregister a handler for an event type + * + * This call will sleep until the handler being removed is guaranteed to + * be no longer executing on any CPU. Do not call with locks held. + * + * returns 0 on success + * Unregister will fail if there are any paths open for the type + */ +extern int HvLpEvent_unregisterHandler(HvLpEvent_Type eventType); + +/* + * Open an Lp Event Path for an event type + * returns 0 on success + * openPath will fail if there is no handler registered for the event type. + * The lpIndex specified is the partition index for the target partition + * (for VirtualIo, VirtualLan and SessionMgr) other types specify zero) + */ +extern int HvLpEvent_openPath(HvLpEvent_Type eventType, HvLpIndex lpIndex); + +/* + * Close an Lp Event Path for a type and partition + * returns 0 on sucess + */ +extern int HvLpEvent_closePath(HvLpEvent_Type eventType, HvLpIndex lpIndex); + +#define HvLpEvent_Type_Hypervisor 0 +#define HvLpEvent_Type_MachineFac 1 +#define HvLpEvent_Type_SessionMgr 2 +#define HvLpEvent_Type_SpdIo 3 +#define HvLpEvent_Type_VirtualBus 4 +#define HvLpEvent_Type_PciIo 5 +#define HvLpEvent_Type_RioIo 6 +#define HvLpEvent_Type_VirtualLan 7 +#define HvLpEvent_Type_VirtualIo 8 +#define HvLpEvent_Type_NumTypes 9 + +#define HvLpEvent_Rc_Good 0 +#define HvLpEvent_Rc_BufferNotAvailable 1 +#define HvLpEvent_Rc_Cancelled 2 +#define HvLpEvent_Rc_GenericError 3 +#define HvLpEvent_Rc_InvalidAddress 4 +#define HvLpEvent_Rc_InvalidPartition 5 +#define HvLpEvent_Rc_InvalidSize 6 +#define HvLpEvent_Rc_InvalidSubtype 7 +#define HvLpEvent_Rc_InvalidSubtypeData 8 +#define HvLpEvent_Rc_InvalidType 9 +#define HvLpEvent_Rc_PartitionDead 10 +#define HvLpEvent_Rc_PathClosed 11 +#define HvLpEvent_Rc_SubtypeError 12 + +#define HvLpEvent_Function_Ack 0 +#define HvLpEvent_Function_Int 1 + +#define HvLpEvent_AckInd_NoAck 0 +#define HvLpEvent_AckInd_DoAck 1 + +#define HvLpEvent_AckType_ImmediateAck 0 +#define HvLpEvent_AckType_DeferredAck 1 + +#define HvLpDma_Direction_LocalToRemote 0 +#define HvLpDma_Direction_RemoteToLocal 1 + +#define HvLpDma_AddressType_TceIndex 0 +#define HvLpDma_AddressType_RealAddress 1 + +#define HvLpDma_Rc_Good 0 +#define HvLpDma_Rc_Error 1 +#define HvLpDma_Rc_PartitionDead 2 +#define HvLpDma_Rc_PathClosed 3 +#define HvLpDma_Rc_InvalidAddress 4 +#define HvLpDma_Rc_InvalidLength 5 + +#endif /* _ASM_POWERPC_ISERIES_HV_LP_EVENT_H */ diff --git a/include/asm-powerpc/iseries/hv_types.h b/include/asm-powerpc/iseries/hv_types.h new file mode 100644 index 00000000000..c38f7e3d01d --- /dev/null +++ b/include/asm-powerpc/iseries/hv_types.h @@ -0,0 +1,113 @@ +/* + * HvTypes.h + * Copyright (C) 2001 Mike Corrigan IBM Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +#ifndef _ASM_POWERPC_ISERIES_HV_TYPES_H +#define _ASM_POWERPC_ISERIES_HV_TYPES_H + +/* + * General typedefs for the hypervisor. + */ + +#include <asm/types.h> + +typedef u8 HvLpIndex; +typedef u16 HvLpInstanceId; +typedef u64 HvLpTOD; +typedef u64 HvLpSystemSerialNum; +typedef u8 HvLpDeviceSerialNum[12]; +typedef u16 HvLpSanHwSet; +typedef u16 HvLpBus; +typedef u16 HvLpBoard; +typedef u16 HvLpCard; +typedef u8 HvLpDeviceType[4]; +typedef u8 HvLpDeviceModel[3]; +typedef u64 HvIoToken; +typedef u8 HvLpName[8]; +typedef u32 HvIoId; +typedef u64 HvRealMemoryIndex; +typedef u32 HvLpIndexMap; /* Must hold HVMAXARCHITECTEDLPS bits!!! */ +typedef u16 HvLpVrmIndex; +typedef u32 HvXmGenerationId; +typedef u8 HvLpBusPool; +typedef u8 HvLpSharedPoolIndex; +typedef u16 HvLpSharedProcUnitsX100; +typedef u8 HvLpVirtualLanIndex; +typedef u16 HvLpVirtualLanIndexMap; /* Must hold HVMAXARCHITECTEDVIRTUALLANS bits!!! */ +typedef u16 HvBusNumber; /* Hypervisor Bus Number */ +typedef u8 HvSubBusNumber; /* Hypervisor SubBus Number */ +typedef u8 HvAgentId; /* Hypervisor DevFn */ + + +#define HVMAXARCHITECTEDLPS 32 +#define HVMAXARCHITECTEDVIRTUALLANS 16 +#define HVMAXARCHITECTEDVIRTUALDISKS 32 +#define HVMAXARCHITECTEDVIRTUALCDROMS 8 +#define HVMAXARCHITECTEDVIRTUALTAPES 8 +#define HVCHUNKSIZE (256 * 1024) +#define HVPAGESIZE (4 * 1024) +#define HVLPMINMEGSPRIMARY 256 +#define HVLPMINMEGSSECONDARY 64 +#define HVCHUNKSPERMEG 4 +#define HVPAGESPERMEG 256 +#define HVPAGESPERCHUNK 64 + +#define HvLpIndexInvalid ((HvLpIndex)0xff) + +/* + * Enums for the sub-components under PLIC + * Used in HvCall and HvPrimaryCall + */ +enum { + HvCallCompId = 0, + HvCallCpuCtlsCompId = 1, + HvCallCfgCompId = 2, + HvCallEventCompId = 3, + HvCallHptCompId = 4, + HvCallPciCompId = 5, + HvCallSlmCompId = 6, + HvCallSmCompId = 7, + HvCallSpdCompId = 8, + HvCallXmCompId = 9, + HvCallRioCompId = 10, + HvCallRsvd3CompId = 11, + HvCallRsvd2CompId = 12, + HvCallRsvd1CompId = 13, + HvCallMaxCompId = 14, + HvPrimaryCallCompId = 0, + HvPrimaryCallCfgCompId = 1, + HvPrimaryCallPciCompId = 2, + HvPrimaryCallSmCompId = 3, + HvPrimaryCallSpdCompId = 4, + HvPrimaryCallXmCompId = 5, + HvPrimaryCallRioCompId = 6, + HvPrimaryCallRsvd7CompId = 7, + HvPrimaryCallRsvd6CompId = 8, + HvPrimaryCallRsvd5CompId = 9, + HvPrimaryCallRsvd4CompId = 10, + HvPrimaryCallRsvd3CompId = 11, + HvPrimaryCallRsvd2CompId = 12, + HvPrimaryCallRsvd1CompId = 13, + HvPrimaryCallMaxCompId = HvCallMaxCompId +}; + +struct HvLpBufferList { + u64 addr; + u64 len; +}; + +#endif /* _ASM_POWERPC_ISERIES_HV_TYPES_H */ diff --git a/include/asm-powerpc/iseries/iseries_io.h b/include/asm-powerpc/iseries/iseries_io.h new file mode 100644 index 00000000000..56b2113ff0f --- /dev/null +++ b/include/asm-powerpc/iseries/iseries_io.h @@ -0,0 +1,49 @@ +#ifndef _ASM_POWERPC_ISERIES_ISERIES_IO_H +#define _ASM_POWERPC_ISERIES_ISERIES_IO_H + +#include <linux/config.h> + +#ifdef CONFIG_PPC_ISERIES +#include <linux/types.h> +/* + * File iSeries_io.h created by Allan Trautman on Thu Dec 28 2000. + * + * Remaps the io.h for the iSeries Io + * Copyright (C) 2000 Allan H Trautman, IBM Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the: + * Free Software Foundation, Inc., + * 59 Temple Place, Suite 330, + * Boston, MA 02111-1307 USA + * + * Change Activity: + * Created December 28, 2000 + * End Change Activity + */ + +extern u8 iSeries_Read_Byte(const volatile void __iomem * IoAddress); +extern u16 iSeries_Read_Word(const volatile void __iomem * IoAddress); +extern u32 iSeries_Read_Long(const volatile void __iomem * IoAddress); +extern void iSeries_Write_Byte(u8 IoData, volatile void __iomem * IoAddress); +extern void iSeries_Write_Word(u16 IoData, volatile void __iomem * IoAddress); +extern void iSeries_Write_Long(u32 IoData, volatile void __iomem * IoAddress); + +extern void iSeries_memset_io(volatile void __iomem *dest, char x, size_t n); +extern void iSeries_memcpy_toio(volatile void __iomem *dest, void *source, + size_t n); +extern void iSeries_memcpy_fromio(void *dest, + const volatile void __iomem *source, size_t n); + +#endif /* CONFIG_PPC_ISERIES */ +#endif /* _ASM_POWERPC_ISERIES_ISERIES_IO_H */ diff --git a/include/asm-powerpc/iseries/it_exp_vpd_panel.h b/include/asm-powerpc/iseries/it_exp_vpd_panel.h new file mode 100644 index 00000000000..66a17a230c5 --- /dev/null +++ b/include/asm-powerpc/iseries/it_exp_vpd_panel.h @@ -0,0 +1,52 @@ +/* + * ItExtVpdPanel.h + * Copyright (C) 2002 Dave Boutcher IBM Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +#ifndef _ASM_POWERPC_ISERIES_IT_EXT_VPD_PANEL_H +#define _ASM_POWERPC_ISERIES_IT_EXT_VPD_PANEL_H + +/* + * This struct maps the panel information + * + * Warning: + * This data must match the architecture for the panel information + */ + +#include <asm/types.h> + +struct ItExtVpdPanel { + /* Definition of the Extended Vpd On Panel Data Area */ + char systemSerial[8]; + char mfgID[4]; + char reserved1[24]; + char machineType[4]; + char systemID[6]; + char somUniqueCnt[4]; + char serialNumberCount; + char reserved2[7]; + u16 bbu3; + u16 bbu2; + u16 bbu1; + char xLocationLabel[8]; + u8 xRsvd1[6]; + u16 xFrameId; + u8 xRsvd2[48]; +}; + +extern struct ItExtVpdPanel xItExtVpdPanel; + +#endif /* _ASM_POWERPC_ISERIES_IT_EXT_VPD_PANEL_H */ diff --git a/include/asm-powerpc/iseries/it_lp_naca.h b/include/asm-powerpc/iseries/it_lp_naca.h new file mode 100644 index 00000000000..c3ef1de45d8 --- /dev/null +++ b/include/asm-powerpc/iseries/it_lp_naca.h @@ -0,0 +1,80 @@ +/* + * ItLpNaca.h + * Copyright (C) 2001 Mike Corrigan IBM Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +#ifndef _ASM_POWERPC_ISERIES_IT_LP_NACA_H +#define _ASM_POWERPC_ISERIES_IT_LP_NACA_H + +#include <linux/types.h> + +/* + * This control block contains the data that is shared between the + * hypervisor (PLIC) and the OS. + */ + +struct ItLpNaca { +// CACHE_LINE_1 0x0000 - 0x007F Contains read-only data + u32 xDesc; // Eye catcher x00-x03 + u16 xSize; // Size of this class x04-x05 + u16 xIntHdlrOffset; // Offset to IntHdlr array x06-x07 + u8 xMaxIntHdlrEntries; // Number of entries in array x08-x08 + u8 xPrimaryLpIndex; // LP Index of Primary x09-x09 + u8 xServiceLpIndex; // LP Ind of Service Focal Pointx0A-x0A + u8 xLpIndex; // LP Index x0B-x0B + u16 xMaxLpQueues; // Number of allocated queues x0C-x0D + u16 xLpQueueOffset; // Offset to start of LP queues x0E-x0F + u8 xPirEnvironMode:8; // Piranha or hardware x10-x10 + u8 xPirConsoleMode:8; // Piranha console indicator x11-x11 + u8 xPirDasdMode:8; // Piranha dasd indicator x12-x12 + u8 xRsvd1_0[5]; // Reserved for Piranha related x13-x17 + u8 xLparInstalled:1; // Is LPAR installed on system x18-x1F + u8 xSysPartitioned:1; // Is the system partitioned ... + u8 xHwSyncedTBs:1; // Hardware synced TBs ... + u8 xIntProcUtilHmt:1; // Utilize HMT for interrupts ... + u8 xRsvd1_1:4; // Reserved ... + u8 xSpVpdFormat:8; // VPD areas are in CSP format ... + u8 xIntProcRatio:8; // Ratio of int procs to procs ... + u8 xRsvd1_2[5]; // Reserved ... + u16 xRsvd1_3; // Reserved x20-x21 + u16 xPlicVrmIndex; // VRM index of PLIC x22-x23 + u16 xMinSupportedSlicVrmInd;// Min supported OS VRM index x24-x25 + u16 xMinCompatableSlicVrmInd;// Min compatible OS VRM index x26-x27 + u64 xLoadAreaAddr; // ER address of load area x28-x2F + u32 xLoadAreaChunks; // Chunks for the load area x30-x33 + u32 xPaseSysCallCRMask; // Mask used to test CR before x34-x37 + // doing an ASR switch on PASE + // system call. + u64 xSlicSegmentTablePtr; // Pointer to Slic seg table. x38-x3f + u8 xRsvd1_4[64]; // x40-x7F + +// CACHE_LINE_2 0x0080 - 0x00FF Contains local read-write data + u8 xRsvd2_0[128]; // Reserved x00-x7F + +// CACHE_LINE_3-6 0x0100 - 0x02FF Contains LP Queue indicators +// NB: Padding required to keep xInterrruptHdlr at x300 which is required +// for v4r4 PLIC. + u8 xOldLpQueue[128]; // LP Queue needed for v4r4 100-17F + u8 xRsvd3_0[384]; // Reserved 180-2FF + +// CACHE_LINE_7-8 0x0300 - 0x03FF Contains the address of the OS interrupt +// handlers + u64 xInterruptHdlr[32]; // Interrupt handlers 300-x3FF +}; + +extern struct ItLpNaca itLpNaca; + +#endif /* _ASM_POWERPC_ISERIES_IT_LP_NACA_H */ diff --git a/include/asm-powerpc/iseries/it_lp_queue.h b/include/asm-powerpc/iseries/it_lp_queue.h new file mode 100644 index 00000000000..a60d03afbf9 --- /dev/null +++ b/include/asm-powerpc/iseries/it_lp_queue.h @@ -0,0 +1,81 @@ +/* + * ItLpQueue.h + * Copyright (C) 2001 Mike Corrigan IBM Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +#ifndef _ASM_POWERPC_ISERIES_IT_LP_QUEUE_H +#define _ASM_POWERPC_ISERIES_IT_LP_QUEUE_H + +/* + * This control block defines the simple LP queue structure that is + * shared between the hypervisor (PLIC) and the OS in order to send + * events to an LP. + */ + +#include <asm/types.h> +#include <asm/ptrace.h> + +struct HvLpEvent; + +#define ITMaxLpQueues 8 + +#define NotUsed 0 // Queue will not be used by PLIC +#define DedicatedIo 1 // Queue dedicated to IO processor specified +#define DedicatedLp 2 // Queue dedicated to LP specified +#define Shared 3 // Queue shared for both IO and LP + +#define LpEventStackSize 4096 +#define LpEventMaxSize 256 +#define LpEventAlign 64 + +struct hvlpevent_queue { +/* + * The xSlicCurEventPtr is the pointer to the next event stack entry + * that will become valid. The OS must peek at this entry to determine + * if it is valid. PLIC will set the valid indicator as the very last + * store into that entry. + * + * When the OS has completed processing of the event then it will mark + * the event as invalid so that PLIC knows it can store into that event + * location again. + * + * If the event stack fills and there are overflow events, then PLIC + * will set the xPlicOverflowIntPending flag in which case the OS will + * have to fetch the additional LP events once they have drained the + * event stack. + * + * The first 16-bytes are known by both the OS and PLIC. The remainder + * of the cache line is for use by the OS. + */ + u8 xPlicOverflowIntPending;// 0x00 Overflow events are pending + u8 xPlicStatus; // 0x01 DedicatedIo or DedicatedLp or NotUsed + u16 xSlicLogicalProcIndex; // 0x02 Logical Proc Index for correlation + u8 xPlicRsvd[12]; // 0x04 + char *xSlicCurEventPtr; // 0x10 + char *xSlicLastValidEventPtr; // 0x18 + char *xSlicEventStackPtr; // 0x20 + u8 xIndex; // 0x28 unique sequential index. + u8 xSlicRsvd[3]; // 0x29-2b + spinlock_t lock; +}; + +extern struct hvlpevent_queue hvlpevent_queue; + +extern int hvlpevent_is_pending(void); +extern void process_hvlpevents(struct pt_regs *); +extern void setup_hvlpevent_queue(void); + +#endif /* _ASM_POWERPC_ISERIES_IT_LP_QUEUE_H */ diff --git a/include/asm-powerpc/iseries/it_lp_reg_save.h b/include/asm-powerpc/iseries/it_lp_reg_save.h new file mode 100644 index 00000000000..288044b702d --- /dev/null +++ b/include/asm-powerpc/iseries/it_lp_reg_save.h @@ -0,0 +1,84 @@ +/* + * ItLpRegSave.h + * Copyright (C) 2001 Mike Corrigan IBM Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +#ifndef _ASM_POWERPC_ISERIES_IT_LP_REG_SAVE_H +#define _ASM_POWERPC_ISERIES_IT_LP_REG_SAVE_H + +/* + * This control block contains the data that is shared between PLIC + * and the OS + */ + +struct ItLpRegSave { + u32 xDesc; // Eye catcher "LpRS" ebcdic 000-003 + u16 xSize; // Size of this class 004-005 + u8 xInUse; // Area is live 006-007 + u8 xRsvd1[9]; // Reserved 007-00F + + u8 xFixedRegSave[352]; // Fixed Register Save Area 010-16F + u32 xCTRL; // Control Register 170-173 + u32 xDEC; // Decrementer 174-177 + u32 xFPSCR; // FP Status and Control Reg 178-17B + u32 xPVR; // Processor Version Number 17C-17F + + u64 xMMCR0; // Monitor Mode Control Reg 0 180-187 + u32 xPMC1; // Perf Monitor Counter 1 188-18B + u32 xPMC2; // Perf Monitor Counter 2 18C-18F + u32 xPMC3; // Perf Monitor Counter 3 190-193 + u32 xPMC4; // Perf Monitor Counter 4 194-197 + u32 xPIR; // Processor ID Reg 198-19B + + u32 xMMCR1; // Monitor Mode Control Reg 1 19C-19F + u32 xMMCRA; // Monitor Mode Control Reg A 1A0-1A3 + u32 xPMC5; // Perf Monitor Counter 5 1A4-1A7 + u32 xPMC6; // Perf Monitor Counter 6 1A8-1AB + u32 xPMC7; // Perf Monitor Counter 7 1AC-1AF + u32 xPMC8; // Perf Monitor Counter 8 1B0-1B3 + u32 xTSC; // Thread Switch Control 1B4-1B7 + u32 xTST; // Thread Switch Timeout 1B8-1BB + u32 xRsvd; // Reserved 1BC-1BF + + u64 xACCR; // Address Compare Control Reg 1C0-1C7 + u64 xIMR; // Instruction Match Register 1C8-1CF + u64 xSDR1; // Storage Description Reg 1 1D0-1D7 + u64 xSPRG0; // Special Purpose Reg General0 1D8-1DF + u64 xSPRG1; // Special Purpose Reg General1 1E0-1E7 + u64 xSPRG2; // Special Purpose Reg General2 1E8-1EF + u64 xSPRG3; // Special Purpose Reg General3 1F0-1F7 + u64 xTB; // Time Base Register 1F8-1FF + + u64 xFPR[32]; // Floating Point Registers 200-2FF + + u64 xMSR; // Machine State Register 300-307 + u64 xNIA; // Next Instruction Address 308-30F + + u64 xDABR; // Data Address Breakpoint Reg 310-317 + u64 xIABR; // Inst Address Breakpoint Reg 318-31F + + u64 xHID0; // HW Implementation Dependent0 320-327 + + u64 xHID4; // HW Implementation Dependent4 328-32F + u64 xSCOMd; // SCON Data Reg (SPRG4) 330-337 + u64 xSCOMc; // SCON Command Reg (SPRG5) 338-33F + u64 xSDAR; // Sample Data Address Register 340-347 + u64 xSIAR; // Sample Inst Address Register 348-34F + + u8 xRsvd3[176]; // Reserved 350-3FF +}; + +#endif /* _ITLPREGSAVE_H */ diff --git a/include/asm-powerpc/iseries/lpar_map.h b/include/asm-powerpc/iseries/lpar_map.h new file mode 100644 index 00000000000..84fc321615b --- /dev/null +++ b/include/asm-powerpc/iseries/lpar_map.h @@ -0,0 +1,83 @@ +/* + * LparMap.h + * Copyright (C) 2001 Mike Corrigan IBM Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +#ifndef _ASM_POWERPC_ISERIES_LPAR_MAP_H +#define _ASM_POWERPC_ISERIES_LPAR_MAP_H + +#ifndef __ASSEMBLY__ + +#include <asm/types.h> + +/* + * The iSeries hypervisor will set up mapping for one or more + * ESID/VSID pairs (in SLB/segment registers) and will set up + * mappings of one or more ranges of pages to VAs. + * We will have the hypervisor set up the ESID->VSID mapping + * for the four kernel segments (C-F). With shared processors, + * the hypervisor will clear all segment registers and reload + * these four whenever the processor is switched from one + * partition to another. + */ + +/* The Vsid and Esid identified below will be used by the hypervisor + * to set up a memory mapping for part of the load area before giving + * control to the Linux kernel. The load area is 64 MB, but this must + * not attempt to map the whole load area. The Hashed Page Table may + * need to be located within the load area (if the total partition size + * is 64 MB), but cannot be mapped. Typically, this should specify + * to map half (32 MB) of the load area. + * + * The hypervisor will set up page table entries for the number of + * pages specified. + * + * In 32-bit mode, the hypervisor will load all four of the + * segment registers (identified by the low-order four bits of the + * Esid field. In 64-bit mode, the hypervisor will load one SLB + * entry to map the Esid to the Vsid. +*/ + +#define HvEsidsToMap 2 +#define HvRangesToMap 1 + +/* Hypervisor initially maps 32MB of the load area */ +#define HvPagesToMap 8192 + +struct LparMap { + u64 xNumberEsids; // Number of ESID/VSID pairs + u64 xNumberRanges; // Number of VA ranges to map + u64 xSegmentTableOffs; // Page number within load area of seg table + u64 xRsvd[5]; + struct { + u64 xKernelEsid; // Esid used to map kernel load + u64 xKernelVsid; // Vsid used to map kernel load + } xEsids[HvEsidsToMap]; + struct { + u64 xPages; // Number of pages to be mapped + u64 xOffset; // Offset from start of load area + u64 xVPN; // Virtual Page Number + } xRanges[HvRangesToMap]; +}; + +extern const struct LparMap xLparMap; + +#endif /* __ASSEMBLY__ */ + +/* the fixed address where the LparMap exists */ +#define LPARMAP_PHYS 0x7000 + +#endif /* _ASM_POWERPC_ISERIES_LPAR_MAP_H */ diff --git a/include/asm-powerpc/iseries/mf.h b/include/asm-powerpc/iseries/mf.h new file mode 100644 index 00000000000..e7bd57a03fb --- /dev/null +++ b/include/asm-powerpc/iseries/mf.h @@ -0,0 +1,57 @@ +/* + * mf.h + * Copyright (C) 2001 Troy D. Armstrong IBM Corporation + * Copyright (C) 2004 Stephen Rothwell IBM Corporation + * + * This modules exists as an interface between a Linux secondary partition + * running on an iSeries and the primary partition's Virtual Service + * Processor (VSP) object. The VSP has final authority over powering on/off + * all partitions in the iSeries. It also provides miscellaneous low-level + * machine facility type operations. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +#ifndef _ASM_POWERPC_ISERIES_MF_H +#define _ASM_POWERPC_ISERIES_MF_H + +#include <linux/types.h> + +#include <asm/iseries/hv_types.h> +#include <asm/iseries/hv_call_event.h> + +struct rtc_time; + +typedef void (*MFCompleteHandler)(void *clientToken, int returnCode); + +extern void mf_allocate_lp_events(HvLpIndex targetLp, HvLpEvent_Type type, + unsigned size, unsigned amount, MFCompleteHandler hdlr, + void *userToken); +extern void mf_deallocate_lp_events(HvLpIndex targetLp, HvLpEvent_Type type, + unsigned count, MFCompleteHandler hdlr, void *userToken); + +extern void mf_power_off(void); +extern void mf_reboot(void); + +extern void mf_display_src(u32 word); +extern void mf_display_progress(u16 value); +extern void mf_clear_src(void); + +extern void mf_init(void); + +extern int mf_get_rtc(struct rtc_time *tm); +extern int mf_get_boot_rtc(struct rtc_time *tm); +extern int mf_set_rtc(struct rtc_time *tm); + +#endif /* _ASM_POWERPC_ISERIES_MF_H */ diff --git a/include/asm-powerpc/iseries/vio.h b/include/asm-powerpc/iseries/vio.h new file mode 100644 index 00000000000..7e3a469420d --- /dev/null +++ b/include/asm-powerpc/iseries/vio.h @@ -0,0 +1,130 @@ +/* -*- linux-c -*- + * drivers/char/vio.h + * + * iSeries Virtual I/O Message Path header + * + * Authors: Dave Boutcher <boutcher@us.ibm.com> + * Ryan Arnold <ryanarn@us.ibm.com> + * Colin Devilbiss <devilbis@us.ibm.com> + * + * (C) Copyright 2000 IBM Corporation + * + * This header file is used by the iSeries virtual I/O device + * drivers. It defines the interfaces to the common functions + * (implemented in drivers/char/viopath.h) as well as defining + * common functions and structures. Currently (at the time I + * wrote this comment) the iSeries virtual I/O device drivers + * that use this are + * drivers/block/viodasd.c + * drivers/char/viocons.c + * drivers/char/viotape.c + * drivers/cdrom/viocd.c + * + * The iSeries virtual ethernet support (veth.c) uses a whole + * different set of functions. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) anyu later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ +#ifndef _ASM_POWERPC_ISERIES_VIO_H +#define _ASM_POWERPC_ISERIES_VIO_H + +#include <asm/iseries/hv_types.h> +#include <asm/iseries/hv_lp_event.h> + +/* + * iSeries virtual I/O events use the subtype field in + * HvLpEvent to figure out what kind of vio event is coming + * in. We use a table to route these, and this defines + * the maximum number of distinct subtypes + */ +#define VIO_MAX_SUBTYPES 8 + +/* + * Each subtype can register a handler to process their events. + * The handler must have this interface. + */ +typedef void (vio_event_handler_t) (struct HvLpEvent * event); + +extern int viopath_open(HvLpIndex remoteLp, int subtype, int numReq); +extern int viopath_close(HvLpIndex remoteLp, int subtype, int numReq); +extern int vio_setHandler(int subtype, vio_event_handler_t * beh); +extern int vio_clearHandler(int subtype); +extern int viopath_isactive(HvLpIndex lp); +extern HvLpInstanceId viopath_sourceinst(HvLpIndex lp); +extern HvLpInstanceId viopath_targetinst(HvLpIndex lp); +extern void vio_set_hostlp(void); +extern void *vio_get_event_buffer(int subtype); +extern void vio_free_event_buffer(int subtype, void *buffer); + +extern HvLpIndex viopath_hostLp; +extern HvLpIndex viopath_ourLp; + +#define VIOCHAR_MAX_DATA 200 + +#define VIOMAJOR_SUBTYPE_MASK 0xff00 +#define VIOMINOR_SUBTYPE_MASK 0x00ff +#define VIOMAJOR_SUBTYPE_SHIFT 8 + +#define VIOVERSION 0x0101 + +/* + * This is the general structure for VIO errors; each module should have + * a table of them, and each table should be terminated by an entry of + * { 0, 0, NULL }. Then, to find a specific error message, a module + * should pass its local table and the return code. + */ +struct vio_error_entry { + u16 rc; + int errno; + const char *msg; +}; +extern const struct vio_error_entry *vio_lookup_rc( + const struct vio_error_entry *local_table, u16 rc); + +enum viosubtypes { + viomajorsubtype_monitor = 0x0100, + viomajorsubtype_blockio = 0x0200, + viomajorsubtype_chario = 0x0300, + viomajorsubtype_config = 0x0400, + viomajorsubtype_cdio = 0x0500, + viomajorsubtype_tape = 0x0600, + viomajorsubtype_scsi = 0x0700 +}; + +enum vioconfigsubtype { + vioconfigget = 0x0001, +}; + +enum viorc { + viorc_good = 0x0000, + viorc_noConnection = 0x0001, + viorc_noReceiver = 0x0002, + viorc_noBufferAvailable = 0x0003, + viorc_invalidMessageType = 0x0004, + viorc_invalidRange = 0x0201, + viorc_invalidToken = 0x0202, + viorc_DMAError = 0x0203, + viorc_useError = 0x0204, + viorc_releaseError = 0x0205, + viorc_invalidDisk = 0x0206, + viorc_openRejected = 0x0301 +}; + +struct device; + +extern struct device *iSeries_vio_dev; + +#endif /* _ASM_POWERPC_ISERIES_VIO_H */ diff --git a/include/asm-powerpc/kexec.h b/include/asm-powerpc/kexec.h new file mode 100644 index 00000000000..062ab9ba68e --- /dev/null +++ b/include/asm-powerpc/kexec.h @@ -0,0 +1,49 @@ +#ifndef _ASM_POWERPC_KEXEC_H +#define _ASM_POWERPC_KEXEC_H + +/* + * Maximum page that is mapped directly into kernel memory. + * XXX: Since we copy virt we can use any page we allocate + */ +#define KEXEC_SOURCE_MEMORY_LIMIT (-1UL) + +/* + * Maximum address we can reach in physical address mode. + * XXX: I want to allow initrd in highmem. Otherwise set to rmo on LPAR. + */ +#define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL) + +/* Maximum address we can use for the control code buffer */ +#ifdef __powerpc64__ +#define KEXEC_CONTROL_MEMORY_LIMIT (-1UL) +#else +/* TASK_SIZE, probably left over from use_mm ?? */ +#define KEXEC_CONTROL_MEMORY_LIMIT TASK_SIZE +#endif + +#define KEXEC_CONTROL_CODE_SIZE 4096 + +/* The native architecture */ +#ifdef __powerpc64__ +#define KEXEC_ARCH KEXEC_ARCH_PPC64 +#else +#define KEXEC_ARCH KEXEC_ARCH_PPC +#endif + +#ifndef __ASSEMBLY__ + +#define MAX_NOTE_BYTES 1024 +typedef u32 note_buf_t[MAX_NOTE_BYTES / sizeof(u32)]; + +extern note_buf_t crash_notes[]; + +#ifdef __powerpc64__ +extern void kexec_smp_wait(void); /* get and clear naca physid, wait for + master to copy new code to 0 */ +#else +struct kimage; +extern void machine_kexec_simple(struct kimage *image); +#endif + +#endif /* ! __ASSEMBLY__ */ +#endif /* _ASM_POWERPC_KEXEC_H */ diff --git a/include/asm-powerpc/kprobes.h b/include/asm-powerpc/kprobes.h index b2f09f17fbe..6cd0a3bfa28 100644 --- a/include/asm-powerpc/kprobes.h +++ b/include/asm-powerpc/kprobes.h @@ -27,6 +27,7 @@ */ #include <linux/types.h> #include <linux/ptrace.h> +#include <linux/percpu.h> struct pt_regs; @@ -53,6 +54,20 @@ struct arch_specific_insn { kprobe_opcode_t *insn; }; +struct prev_kprobe { + struct kprobe *kp; + unsigned long status; + unsigned long saved_msr; +}; + +/* per-cpu kprobe control block */ +struct kprobe_ctlblk { + unsigned long kprobe_status; + unsigned long kprobe_saved_msr; + struct pt_regs jprobe_saved_regs; + struct prev_kprobe prev_kprobe; +}; + #ifdef CONFIG_KPROBES extern int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *data); diff --git a/include/asm-powerpc/machdep.h b/include/asm-powerpc/machdep.h index 451b345cfc7..fa03864d06e 100644 --- a/include/asm-powerpc/machdep.h +++ b/include/asm-powerpc/machdep.h @@ -47,20 +47,22 @@ struct machdep_calls { #ifdef CONFIG_PPC64 void (*hpte_invalidate)(unsigned long slot, unsigned long va, - int large, + int psize, int local); long (*hpte_updatepp)(unsigned long slot, unsigned long newpp, unsigned long va, - int large, + int pize, int local); void (*hpte_updateboltedpp)(unsigned long newpp, - unsigned long ea); + unsigned long ea, + int psize); long (*hpte_insert)(unsigned long hpte_group, unsigned long va, unsigned long prpn, + unsigned long rflags, unsigned long vflags, - unsigned long rflags); + int psize); long (*hpte_remove)(unsigned long hpte_group); void (*flush_hash_range)(unsigned long number, int local); @@ -80,6 +82,7 @@ struct machdep_calls { void (*iommu_dev_setup)(struct pci_dev *dev); void (*iommu_bus_setup)(struct pci_bus *bus); void (*irq_bus_setup)(struct pci_bus *bus); + int (*set_dabr)(unsigned long dabr); #endif int (*probe)(int platform); diff --git a/include/asm-powerpc/numnodes.h b/include/asm-powerpc/numnodes.h new file mode 100644 index 00000000000..795533aca09 --- /dev/null +++ b/include/asm-powerpc/numnodes.h @@ -0,0 +1,7 @@ +#ifndef _ASM_POWERPC_MAX_NUMNODES_H +#define _ASM_POWERPC_MAX_NUMNODES_H + +/* Max 16 Nodes */ +#define NODES_SHIFT 4 + +#endif /* _ASM_POWERPC_MAX_NUMNODES_H */ diff --git a/include/asm-powerpc/ppc_asm.h b/include/asm-powerpc/ppc_asm.h index f99f2af82ca..c534ca41224 100644 --- a/include/asm-powerpc/ppc_asm.h +++ b/include/asm-powerpc/ppc_asm.h @@ -506,6 +506,13 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601) #else #define __ASM_CONST(x) x##UL #define ASM_CONST(x) __ASM_CONST(x) + +#ifdef CONFIG_PPC64 +#define DATAL ".llong" +#else +#define DATAL ".long" +#endif + #endif /* __ASSEMBLY__ */ #endif /* _ASM_POWERPC_PPC_ASM_H */ diff --git a/include/asm-powerpc/processor.h b/include/asm-powerpc/processor.h index eee954a001f..1dc4bf7b52b 100644 --- a/include/asm-powerpc/processor.h +++ b/include/asm-powerpc/processor.h @@ -70,7 +70,7 @@ extern unsigned char ucBoardRevMaj, ucBoardRevMin; #define PLATFORM_LPAR 0x0001 #define PLATFORM_POWERMAC 0x0400 #define PLATFORM_MAPLE 0x0500 -#define PLATFORM_BPA 0x1000 +#define PLATFORM_CELL 0x1000 /* Compatibility with drivers coming from PPC32 world */ #define _machine (systemcfg->platform) diff --git a/include/asm-powerpc/prom.h b/include/asm-powerpc/prom.h index 3a0104fa046..7587bf5f38c 100644 --- a/include/asm-powerpc/prom.h +++ b/include/asm-powerpc/prom.h @@ -178,6 +178,14 @@ extern struct device_node *of_get_next_child(const struct device_node *node, extern struct device_node *of_node_get(struct device_node *node); extern void of_node_put(struct device_node *node); +/* For scanning the flat device-tree at boot time */ +int __init of_scan_flat_dt(int (*it)(unsigned long node, + const char *uname, int depth, + void *data), + void *data); +void* __init of_get_flat_dt_prop(unsigned long node, const char *name, + unsigned long *size); + /* For updating the device tree at runtime */ extern void of_attach_node(struct device_node *); extern void of_detach_node(const struct device_node *); diff --git a/include/asm-powerpc/ptrace.h b/include/asm-powerpc/ptrace.h new file mode 100644 index 00000000000..1f7ecdb0b6c --- /dev/null +++ b/include/asm-powerpc/ptrace.h @@ -0,0 +1,248 @@ +#ifndef _ASM_POWERPC_PTRACE_H +#define _ASM_POWERPC_PTRACE_H + +/* + * Copyright (C) 2001 PPC64 Team, IBM Corp + * + * This struct defines the way the registers are stored on the + * kernel stack during a system call or other kernel entry. + * + * this should only contain volatile regs + * since we can keep non-volatile in the thread_struct + * should set this up when only volatiles are saved + * by intr code. + * + * Since this is going on the stack, *CARE MUST BE TAKEN* to insure + * that the overall structure is a multiple of 16 bytes in length. + * + * Note that the offsets of the fields in this struct correspond with + * the PT_* values below. This simplifies arch/powerpc/kernel/ptrace.c. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#ifndef __ASSEMBLY__ + +struct pt_regs { + unsigned long gpr[32]; + unsigned long nip; + unsigned long msr; + unsigned long orig_gpr3; /* Used for restarting system calls */ + unsigned long ctr; + unsigned long link; + unsigned long xer; + unsigned long ccr; +#ifdef __powerpc64__ + unsigned long softe; /* Soft enabled/disabled */ +#else + unsigned long mq; /* 601 only (not used at present) */ + /* Used on APUS to hold IPL value. */ +#endif + unsigned long trap; /* Reason for being here */ + /* N.B. for critical exceptions on 4xx, the dar and dsisr + fields are overloaded to hold srr0 and srr1. */ + unsigned long dar; /* Fault registers */ + unsigned long dsisr; /* on 4xx/Book-E used for ESR */ + unsigned long result; /* Result of a system call */ +}; + +#endif /* __ASSEMBLY__ */ + +#ifdef __KERNEL__ + +#ifdef __powerpc64__ + +#define STACK_FRAME_OVERHEAD 112 /* size of minimum stack frame */ + +/* Size of dummy stack frame allocated when calling signal handler. */ +#define __SIGNAL_FRAMESIZE 128 +#define __SIGNAL_FRAMESIZE32 64 + +#else /* __powerpc64__ */ + +#define STACK_FRAME_OVERHEAD 16 /* size of minimum stack frame */ + +/* Size of stack frame allocated when calling signal handler. */ +#define __SIGNAL_FRAMESIZE 64 + +#endif /* __powerpc64__ */ + +#ifndef __ASSEMBLY__ + +#define instruction_pointer(regs) ((regs)->nip) +#ifdef CONFIG_SMP +extern unsigned long profile_pc(struct pt_regs *regs); +#else +#define profile_pc(regs) instruction_pointer(regs) +#endif + +#ifdef __powerpc64__ +#define user_mode(regs) ((((regs)->msr) >> MSR_PR_LG) & 0x1) +#else +#define user_mode(regs) (((regs)->msr & MSR_PR) != 0) +#endif + +#define force_successful_syscall_return() \ + do { \ + current_thread_info()->syscall_noerror = 1; \ + } while(0) + +/* + * We use the least-significant bit of the trap field to indicate + * whether we have saved the full set of registers, or only a + * partial set. A 1 there means the partial set. + * On 4xx we use the next bit to indicate whether the exception + * is a critical exception (1 means it is). + */ +#define FULL_REGS(regs) (((regs)->trap & 1) == 0) +#ifndef __powerpc64__ +#define IS_CRITICAL_EXC(regs) (((regs)->trap & 2) == 0) +#endif /* ! __powerpc64__ */ +#define TRAP(regs) ((regs)->trap & ~0xF) +#ifdef __powerpc64__ +#define CHECK_FULL_REGS(regs) BUG_ON(regs->trap & 1) +#else +#define CHECK_FULL_REGS(regs) \ +do { \ + if ((regs)->trap & 1) \ + printk(KERN_CRIT "%s: partial register set\n", __FUNCTION__); \ +} while (0) +#endif /* __powerpc64__ */ + +#endif /* __ASSEMBLY__ */ + +#endif /* __KERNEL__ */ + +/* + * Offsets used by 'ptrace' system call interface. + * These can't be changed without breaking binary compatibility + * with MkLinux, etc. + */ +#define PT_R0 0 +#define PT_R1 1 +#define PT_R2 2 +#define PT_R3 3 +#define PT_R4 4 +#define PT_R5 5 +#define PT_R6 6 +#define PT_R7 7 +#define PT_R8 8 +#define PT_R9 9 +#define PT_R10 10 +#define PT_R11 11 +#define PT_R12 12 +#define PT_R13 13 +#define PT_R14 14 +#define PT_R15 15 +#define PT_R16 16 +#define PT_R17 17 +#define PT_R18 18 +#define PT_R19 19 +#define PT_R20 20 +#define PT_R21 21 +#define PT_R22 22 +#define PT_R23 23 +#define PT_R24 24 +#define PT_R25 25 +#define PT_R26 26 +#define PT_R27 27 +#define PT_R28 28 +#define PT_R29 29 +#define PT_R30 30 +#define PT_R31 31 + +#define PT_NIP 32 +#define PT_MSR 33 +#ifdef __KERNEL__ +#define PT_ORIG_R3 34 +#endif +#define PT_CTR 35 +#define PT_LNK 36 +#define PT_XER 37 +#define PT_CCR 38 +#ifndef __powerpc64__ +#define PT_MQ 39 +#else +#define PT_SOFTE 39 +#define PT_TRAP 40 +#define PT_DAR 41 +#define PT_DSISR 42 +#define PT_RESULT 43 +#endif + +#define PT_FPR0 48 /* each FP reg occupies 2 slots in this space */ + +#ifndef __powerpc64__ + +#define PT_FPR31 (PT_FPR0 + 2*31) +#define PT_FPSCR (PT_FPR0 + 2*32 + 1) + +#else /* __powerpc64__ */ + +#define PT_FPSCR (PT_FPR0 + 32) /* each FP reg occupies 1 slot in 64-bit space */ + +#ifdef __KERNEL__ +#define PT_FPSCR32 (PT_FPR0 + 2*32 + 1) /* each FP reg occupies 2 32-bit userspace slots */ +#endif + +#define PT_VR0 82 /* each Vector reg occupies 2 slots in 64-bit */ +#define PT_VSCR (PT_VR0 + 32*2 + 1) +#define PT_VRSAVE (PT_VR0 + 33*2) + +#ifdef __KERNEL__ +#define PT_VR0_32 164 /* each Vector reg occupies 4 slots in 32-bit */ +#define PT_VSCR_32 (PT_VR0 + 32*4 + 3) +#define PT_VRSAVE_32 (PT_VR0 + 33*4) +#endif + +#endif /* __powerpc64__ */ + +/* + * Get/set all the altivec registers vr0..vr31, vscr, vrsave, in one go. + * The transfer totals 34 quadword. Quadwords 0-31 contain the + * corresponding vector registers. Quadword 32 contains the vscr as the + * last word (offset 12) within that quadword. Quadword 33 contains the + * vrsave as the first word (offset 0) within the quadword. + * + * This definition of the VMX state is compatible with the current PPC32 + * ptrace interface. This allows signal handling and ptrace to use the same + * structures. This also simplifies the implementation of a bi-arch + * (combined (32- and 64-bit) gdb. + */ +#define PTRACE_GETVRREGS 18 +#define PTRACE_SETVRREGS 19 + +#ifndef __powerpc64__ +/* Get/set all the upper 32-bits of the SPE registers, accumulator, and + * spefscr, in one go */ +#define PTRACE_GETEVRREGS 20 +#define PTRACE_SETEVRREGS 21 +#endif /* __powerpc64__ */ + +/* + * Get or set a debug register. The first 16 are DABR registers and the + * second 16 are IABR registers. + */ +#define PTRACE_GET_DEBUGREG 25 +#define PTRACE_SET_DEBUGREG 26 + +#ifdef __powerpc64__ +/* Additional PTRACE requests implemented on PowerPC. */ +#define PPC_PTRACE_GETREGS 0x99 /* Get GPRs 0 - 31 */ +#define PPC_PTRACE_SETREGS 0x98 /* Set GPRs 0 - 31 */ +#define PPC_PTRACE_GETFPREGS 0x97 /* Get FPRs 0 - 31 */ +#define PPC_PTRACE_SETFPREGS 0x96 /* Set FPRs 0 - 31 */ + +/* Calls to trace a 64bit program from a 32bit program */ +#define PPC_PTRACE_PEEKTEXT_3264 0x95 +#define PPC_PTRACE_PEEKDATA_3264 0x94 +#define PPC_PTRACE_POKETEXT_3264 0x93 +#define PPC_PTRACE_POKEDATA_3264 0x92 +#define PPC_PTRACE_PEEKUSR_3264 0x91 +#define PPC_PTRACE_POKEUSR_3264 0x90 +#endif /* __powerpc64__ */ + +#endif /* _ASM_POWERPC_PTRACE_H */ diff --git a/include/asm-powerpc/rtas.h b/include/asm-powerpc/rtas.h index 2c050332471..d1bb611ea62 100644 --- a/include/asm-powerpc/rtas.h +++ b/include/asm-powerpc/rtas.h @@ -149,28 +149,11 @@ struct rtas_error_log { unsigned char buffer[1]; }; -struct flash_block { - char *data; - unsigned long length; -}; - -/* This struct is very similar but not identical to - * that needed by the rtas flash update. - * All we need to do for rtas is rewrite num_blocks - * into a version/length and translate the pointers - * to absolute. +/* + * This can be set by the rtas_flash module so that it can get called + * as the absolutely last thing before the kernel terminates. */ -#define FLASH_BLOCKS_PER_NODE ((PAGE_SIZE - 16) / sizeof(struct flash_block)) -struct flash_block_list { - unsigned long num_blocks; - struct flash_block_list *next; - struct flash_block blocks[FLASH_BLOCKS_PER_NODE]; -}; -struct flash_block_list_header { /* just the header of flash_block_list */ - unsigned long num_blocks; - struct flash_block_list *next; -}; -extern struct flash_block_list_header rtas_firmware_flash_list; +extern void (*rtas_flash_term_hook)(int); extern struct rtas_t rtas; diff --git a/include/asm-powerpc/sigcontext.h b/include/asm-powerpc/sigcontext.h new file mode 100644 index 00000000000..165d630e1cf --- /dev/null +++ b/include/asm-powerpc/sigcontext.h @@ -0,0 +1,52 @@ +#ifndef _ASM_POWERPC_SIGCONTEXT_H +#define _ASM_POWERPC_SIGCONTEXT_H + +/* + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#include <linux/compiler.h> +#include <asm/ptrace.h> +#ifdef __powerpc64__ +#include <asm/elf.h> +#endif + +struct sigcontext { + unsigned long _unused[4]; + int signal; +#ifdef __powerpc64__ + int _pad0; +#endif + unsigned long handler; + unsigned long oldmask; + struct pt_regs __user *regs; +#ifdef __powerpc64__ + elf_gregset_t gp_regs; + elf_fpregset_t fp_regs; +/* + * To maintain compatibility with current implementations the sigcontext is + * extended by appending a pointer (v_regs) to a quadword type (elf_vrreg_t) + * followed by an unstructured (vmx_reserve) field of 69 doublewords. This + * allows the array of vector registers to be quadword aligned independent of + * the alignment of the containing sigcontext or ucontext. It is the + * responsibility of the code setting the sigcontext to set this pointer to + * either NULL (if this processor does not support the VMX feature) or the + * address of the first quadword within the allocated (vmx_reserve) area. + * + * The pointer (v_regs) of vector type (elf_vrreg_t) is type compatible with + * an array of 34 quadword entries (elf_vrregset_t). The entries with + * indexes 0-31 contain the corresponding vector registers. The entry with + * index 32 contains the vscr as the last word (offset 12) within the + * quadword. This allows the vscr to be stored as either a quadword (since + * it must be copied via a vector register to/from storage) or as a word. + * The entry with index 33 contains the vrsave as the first word (offset 0) + * within the quadword. + */ + elf_vrreg_t __user *v_regs; + long vmx_reserve[ELF_NVRREG+ELF_NVRREG+1]; +#endif +}; + +#endif /* _ASM_POWERPC_SIGCONTEXT_H */ diff --git a/include/asm-powerpc/smp.h b/include/asm-powerpc/smp.h new file mode 100644 index 00000000000..8bcdd0faefe --- /dev/null +++ b/include/asm-powerpc/smp.h @@ -0,0 +1,119 @@ +/* + * smp.h: PowerPC-specific SMP code. + * + * Original was a copy of sparc smp.h. Now heavily modified + * for PPC. + * + * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) + * Copyright (C) 1996-2001 Cort Dougan <cort@fsmlabs.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#ifndef _ASM_POWERPC_SMP_H +#define _ASM_POWERPC_SMP_H +#ifdef __KERNEL__ + +#include <linux/config.h> +#include <linux/threads.h> +#include <linux/cpumask.h> +#include <linux/kernel.h> + +#ifndef __ASSEMBLY__ + +#ifdef CONFIG_PPC64 +#include <asm/paca.h> +#endif + +extern int boot_cpuid; +extern int boot_cpuid_phys; + +extern void cpu_die(void); + +#ifdef CONFIG_SMP + +extern void smp_send_debugger_break(int cpu); +struct pt_regs; +extern void smp_message_recv(int, struct pt_regs *); + +#ifdef CONFIG_HOTPLUG_CPU +extern void fixup_irqs(cpumask_t map); +int generic_cpu_disable(void); +int generic_cpu_enable(unsigned int cpu); +void generic_cpu_die(unsigned int cpu); +void generic_mach_cpu_die(void); +#endif + +#ifdef CONFIG_PPC64 +#define raw_smp_processor_id() (get_paca()->paca_index) +#define hard_smp_processor_id() (get_paca()->hw_cpu_id) +#else +/* 32-bit */ +extern int smp_hw_index[]; + +#define raw_smp_processor_id() (current_thread_info()->cpu) +#define hard_smp_processor_id() (smp_hw_index[smp_processor_id()]) +#define get_hard_smp_processor_id(cpu) (smp_hw_index[(cpu)]) +#define set_hard_smp_processor_id(cpu, phys)\ + (smp_hw_index[(cpu)] = (phys)) +#endif + +extern cpumask_t cpu_sibling_map[NR_CPUS]; + +/* Since OpenPIC has only 4 IPIs, we use slightly different message numbers. + * + * Make sure this matches openpic_request_IPIs in open_pic.c, or what shows up + * in /proc/interrupts will be wrong!!! --Troy */ +#define PPC_MSG_CALL_FUNCTION 0 +#define PPC_MSG_RESCHEDULE 1 +/* This is unused now */ +#if 0 +#define PPC_MSG_MIGRATE_TASK 2 +#endif +#define PPC_MSG_DEBUGGER_BREAK 3 + +void smp_init_iSeries(void); +void smp_init_pSeries(void); +void smp_init_cell(void); +void smp_setup_cpu_maps(void); + +extern int __cpu_disable(void); +extern void __cpu_die(unsigned int cpu); + +#else +/* for UP */ +#define smp_setup_cpu_maps() +#define smp_release_cpus() + +#endif /* CONFIG_SMP */ + +#ifdef CONFIG_PPC64 +#define get_hard_smp_processor_id(CPU) (paca[(CPU)].hw_cpu_id) +#define set_hard_smp_processor_id(CPU, VAL) \ + do { (paca[(CPU)].hw_cpu_id = (VAL)); } while (0) +#else +/* 32-bit */ +#ifndef CONFIG_SMP +#define get_hard_smp_processor_id(cpu) boot_cpuid_phys +#define set_hard_smp_processor_id(cpu, phys) +#endif +#endif + +extern int smt_enabled_at_boot; + +extern int smp_mpic_probe(void); +extern void smp_mpic_setup_cpu(int cpu); +extern void smp_generic_kick_cpu(int nr); + +extern void smp_generic_give_timebase(void); +extern void smp_generic_take_timebase(void); + +extern struct smp_ops_t *smp_ops; + +#endif /* __ASSEMBLY__ */ + +#endif /* __KERNEL__ */ +#endif /* _ASM_POWERPC_SMP_H) */ diff --git a/include/asm-powerpc/sparsemem.h b/include/asm-powerpc/sparsemem.h new file mode 100644 index 00000000000..1c95ab99deb --- /dev/null +++ b/include/asm-powerpc/sparsemem.h @@ -0,0 +1,16 @@ +#ifndef _ASM_POWERPC_SPARSEMEM_H +#define _ASM_POWERPC_SPARSEMEM_H 1 + +#ifdef CONFIG_SPARSEMEM +/* + * SECTION_SIZE_BITS 2^N: how big each section will be + * MAX_PHYSADDR_BITS 2^N: how much physical address space we have + * MAX_PHYSMEM_BITS 2^N: how much memory we can have in that space + */ +#define SECTION_SIZE_BITS 24 +#define MAX_PHYSADDR_BITS 38 +#define MAX_PHYSMEM_BITS 36 + +#endif /* CONFIG_SPARSEMEM */ + +#endif /* _ASM_POWERPC_SPARSEMEM_H */ diff --git a/include/asm-powerpc/stat.h b/include/asm-powerpc/stat.h new file mode 100644 index 00000000000..e4edc510b53 --- /dev/null +++ b/include/asm-powerpc/stat.h @@ -0,0 +1,81 @@ +#ifndef _ASM_POWERPC_STAT_H +#define _ASM_POWERPC_STAT_H +/* + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#include <linux/types.h> + +#define STAT_HAVE_NSEC 1 + +#ifndef __powerpc64__ +struct __old_kernel_stat { + unsigned short st_dev; + unsigned short st_ino; + unsigned short st_mode; + unsigned short st_nlink; + unsigned short st_uid; + unsigned short st_gid; + unsigned short st_rdev; + unsigned long st_size; + unsigned long st_atime; + unsigned long st_mtime; + unsigned long st_ctime; +}; +#endif /* !__powerpc64__ */ + +struct stat { + unsigned long st_dev; + ino_t st_ino; +#ifdef __powerpc64__ + nlink_t st_nlink; + mode_t st_mode; +#else + mode_t st_mode; + nlink_t st_nlink; +#endif + uid_t st_uid; + gid_t st_gid; + unsigned long st_rdev; + off_t st_size; + unsigned long st_blksize; + unsigned long st_blocks; + unsigned long st_atime; + unsigned long st_atime_nsec; + unsigned long st_mtime; + unsigned long st_mtime_nsec; + unsigned long st_ctime; + unsigned long st_ctime_nsec; + unsigned long __unused4; + unsigned long __unused5; +#ifdef __powerpc64__ + unsigned long __unused6; +#endif +}; + +/* This matches struct stat64 in glibc2.1. Only used for 32 bit. */ +struct stat64 { + unsigned long long st_dev; /* Device. */ + unsigned long long st_ino; /* File serial number. */ + unsigned int st_mode; /* File mode. */ + unsigned int st_nlink; /* Link count. */ + unsigned int st_uid; /* User ID of the file's owner. */ + unsigned int st_gid; /* Group ID of the file's group. */ + unsigned long long st_rdev; /* Device number, if device. */ + unsigned short __pad2; + long long st_size; /* Size of file, in bytes. */ + int st_blksize; /* Optimal block size for I/O. */ + long long st_blocks; /* Number 512-byte blocks allocated. */ + int st_atime; /* Time of last access. */ + unsigned int st_atime_nsec; + int st_mtime; /* Time of last modification. */ + unsigned int st_mtime_nsec; + int st_ctime; /* Time of last status change. */ + unsigned int st_ctime_nsec; + unsigned int __unused4; + unsigned int __unused5; +}; + +#endif /* _ASM_POWERPC_STAT_H */ diff --git a/include/asm-powerpc/system.h b/include/asm-powerpc/system.h index 5b2ecbc4790..3536a5cd7a2 100644 --- a/include/asm-powerpc/system.h +++ b/include/asm-powerpc/system.h @@ -289,7 +289,7 @@ __cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new) #ifdef CONFIG_PPC64 static __inline__ unsigned long -__cmpxchg_u64(volatile long *p, unsigned long old, unsigned long new) +__cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new) { unsigned long prev; @@ -359,5 +359,53 @@ extern void reloc_got2(unsigned long); #define PTRRELOC(x) ((typeof(x)) add_reloc_offset((unsigned long)(x))) +static inline void create_instruction(unsigned long addr, unsigned int instr) +{ + unsigned int *p; + p = (unsigned int *)addr; + *p = instr; + asm ("dcbst 0, %0; sync; icbi 0,%0; sync; isync" : : "r" (p)); +} + +/* Flags for create_branch: + * "b" == create_branch(addr, target, 0); + * "ba" == create_branch(addr, target, BRANCH_ABSOLUTE); + * "bl" == create_branch(addr, target, BRANCH_SET_LINK); + * "bla" == create_branch(addr, target, BRANCH_ABSOLUTE | BRANCH_SET_LINK); + */ +#define BRANCH_SET_LINK 0x1 +#define BRANCH_ABSOLUTE 0x2 + +static inline void create_branch(unsigned long addr, + unsigned long target, int flags) +{ + unsigned int instruction; + + if (! (flags & BRANCH_ABSOLUTE)) + target = target - addr; + + /* Mask out the flags and target, so they don't step on each other. */ + instruction = 0x48000000 | (flags & 0x3) | (target & 0x03FFFFFC); + + create_instruction(addr, instruction); +} + +static inline void create_function_call(unsigned long addr, void * func) +{ + unsigned long func_addr; + +#ifdef CONFIG_PPC64 + /* + * On PPC64 the function pointer actually points to the function's + * descriptor. The first entry in the descriptor is the address + * of the function text. + */ + func_addr = *(unsigned long *)func; +#else + func_addr = (unsigned long)func; +#endif + create_branch(addr, func_addr, BRANCH_SET_LINK); +} + #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_SYSTEM_H */ diff --git a/include/asm-powerpc/termios.h b/include/asm-powerpc/termios.h index c5b8e5358f8..7f80a019b6a 100644 --- a/include/asm-powerpc/termios.h +++ b/include/asm-powerpc/termios.h @@ -94,142 +94,9 @@ struct termio { #define INIT_C_CC "\003\034\177\025\004\001\000\000\000\000\027\022\032\021\023\026\025" #endif -#define FIOCLEX _IO('f', 1) -#define FIONCLEX _IO('f', 2) -#define FIOASYNC _IOW('f', 125, int) -#define FIONBIO _IOW('f', 126, int) -#define FIONREAD _IOR('f', 127, int) -#define TIOCINQ FIONREAD - -#define TIOCGETP _IOR('t', 8, struct sgttyb) -#define TIOCSETP _IOW('t', 9, struct sgttyb) -#define TIOCSETN _IOW('t', 10, struct sgttyb) /* TIOCSETP wo flush */ - -#define TIOCSETC _IOW('t', 17, struct tchars) -#define TIOCGETC _IOR('t', 18, struct tchars) -#define TCGETS _IOR('t', 19, struct termios) -#define TCSETS _IOW('t', 20, struct termios) -#define TCSETSW _IOW('t', 21, struct termios) -#define TCSETSF _IOW('t', 22, struct termios) - -#define TCGETA _IOR('t', 23, struct termio) -#define TCSETA _IOW('t', 24, struct termio) -#define TCSETAW _IOW('t', 25, struct termio) -#define TCSETAF _IOW('t', 28, struct termio) - -#define TCSBRK _IO('t', 29) -#define TCXONC _IO('t', 30) -#define TCFLSH _IO('t', 31) - -#define TIOCSWINSZ _IOW('t', 103, struct winsize) -#define TIOCGWINSZ _IOR('t', 104, struct winsize) -#define TIOCSTART _IO('t', 110) /* start output, like ^Q */ -#define TIOCSTOP _IO('t', 111) /* stop output, like ^S */ -#define TIOCOUTQ _IOR('t', 115, int) /* output queue size */ - -#define TIOCGLTC _IOR('t', 116, struct ltchars) -#define TIOCSLTC _IOW('t', 117, struct ltchars) -#define TIOCSPGRP _IOW('t', 118, int) -#define TIOCGPGRP _IOR('t', 119, int) - -#define TIOCEXCL 0x540C -#define TIOCNXCL 0x540D -#define TIOCSCTTY 0x540E - -#define TIOCSTI 0x5412 -#define TIOCMGET 0x5415 -#define TIOCMBIS 0x5416 -#define TIOCMBIC 0x5417 -#define TIOCMSET 0x5418 -#define TIOCGSOFTCAR 0x5419 -#define TIOCSSOFTCAR 0x541A -#define TIOCLINUX 0x541C -#define TIOCCONS 0x541D -#define TIOCGSERIAL 0x541E -#define TIOCSSERIAL 0x541F -#define TIOCPKT 0x5420 - -#define TIOCNOTTY 0x5422 -#define TIOCSETD 0x5423 -#define TIOCGETD 0x5424 -#define TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */ - -#define TIOCSERCONFIG 0x5453 -#define TIOCSERGWILD 0x5454 -#define TIOCSERSWILD 0x5455 -#define TIOCGLCKTRMIOS 0x5456 -#define TIOCSLCKTRMIOS 0x5457 -#define TIOCSERGSTRUCT 0x5458 /* For debugging only */ -#define TIOCSERGETLSR 0x5459 /* Get line status register */ -#define TIOCSERGETMULTI 0x545A /* Get multiport config */ -#define TIOCSERSETMULTI 0x545B /* Set multiport config */ - -#define TIOCMIWAIT 0x545C /* wait for a change on serial input line(s) */ -#define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */ - -/* Used for packet mode */ -#define TIOCPKT_DATA 0 -#define TIOCPKT_FLUSHREAD 1 -#define TIOCPKT_FLUSHWRITE 2 -#define TIOCPKT_STOP 4 -#define TIOCPKT_START 8 -#define TIOCPKT_NOSTOP 16 -#define TIOCPKT_DOSTOP 32 - -/* modem lines */ -#define TIOCM_LE 0x001 -#define TIOCM_DTR 0x002 -#define TIOCM_RTS 0x004 -#define TIOCM_ST 0x008 -#define TIOCM_SR 0x010 -#define TIOCM_CTS 0x020 -#define TIOCM_CAR 0x040 -#define TIOCM_RNG 0x080 -#define TIOCM_DSR 0x100 -#define TIOCM_CD TIOCM_CAR -#define TIOCM_RI TIOCM_RNG -#define TIOCM_OUT1 0x2000 -#define TIOCM_OUT2 0x4000 -#define TIOCM_LOOP 0x8000 - -/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */ -#define TIOCSER_TEMT 0x01 /* Transmitter physically empty */ - #ifdef __KERNEL__ -/* - * Translate a "termio" structure into a "termios". Ugh. - */ -#define SET_LOW_TERMIOS_BITS(termios, termio, x) { \ - unsigned short __tmp; \ - get_user(__tmp,&(termio)->x); \ - (termios)->x = (0xffff0000 & (termios)->x) | __tmp; \ -} - -#define user_termio_to_kernel_termios(termios, termio) \ -({ \ - SET_LOW_TERMIOS_BITS(termios, termio, c_iflag); \ - SET_LOW_TERMIOS_BITS(termios, termio, c_oflag); \ - SET_LOW_TERMIOS_BITS(termios, termio, c_cflag); \ - SET_LOW_TERMIOS_BITS(termios, termio, c_lflag); \ - copy_from_user((termios)->c_cc, (termio)->c_cc, NCC); \ -}) - -/* - * Translate a "termios" structure into a "termio". Ugh. - */ -#define kernel_termios_to_user_termio(termio, termios) \ -({ \ - put_user((termios)->c_iflag, &(termio)->c_iflag); \ - put_user((termios)->c_oflag, &(termio)->c_oflag); \ - put_user((termios)->c_cflag, &(termio)->c_cflag); \ - put_user((termios)->c_lflag, &(termio)->c_lflag); \ - put_user((termios)->c_line, &(termio)->c_line); \ - copy_to_user((termio)->c_cc, (termios)->c_cc, NCC); \ -}) - -#define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios)) -#define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios)) +#include <asm-generic/termios.h> #endif /* __KERNEL__ */ diff --git a/include/asm-powerpc/thread_info.h b/include/asm-powerpc/thread_info.h index ab17db79f69..e525f49bd17 100644 --- a/include/asm-powerpc/thread_info.h +++ b/include/asm-powerpc/thread_info.h @@ -65,23 +65,27 @@ struct thread_info { /* thread information allocation */ -#ifdef CONFIG_DEBUG_STACK_USAGE -#define THREAD_INFO_GFP GFP_KERNEL | __GFP_ZERO -#else -#define THREAD_INFO_GFP GFP_KERNEL -#endif - #if THREAD_SHIFT >= PAGE_SHIFT #define THREAD_ORDER (THREAD_SHIFT - PAGE_SHIFT) +#ifdef CONFIG_DEBUG_STACK_USAGE #define alloc_thread_info(tsk) \ - ((struct thread_info *)__get_free_pages(THREAD_INFO_GFP, THREAD_ORDER)) + ((struct thread_info *)__get_free_pages(GFP_KERNEL | \ + __GFP_ZERO, THREAD_ORDER)) +#else +#define alloc_thread_info(tsk) \ + ((struct thread_info *)__get_free_pages(GFP_KERNEL, THREAD_ORDER)) +#endif #define free_thread_info(ti) free_pages((unsigned long)ti, THREAD_ORDER) #else /* THREAD_SHIFT < PAGE_SHIFT */ -#define alloc_thread_info(tsk) kmalloc(THREAD_SIZE, THREAD_INFO_GFP) +#ifdef CONFIG_DEBUG_STACK_USAGE +#define alloc_thread_info(tsk) kzalloc(THREAD_SIZE, GFP_KERNEL) +#else +#define alloc_thread_info(tsk) kmalloc(THREAD_SIZE, GFP_KERNEL) +#endif #define free_thread_info(ti) kfree(ti) #endif /* THREAD_SHIFT < PAGE_SHIFT */ diff --git a/include/asm-powerpc/time.h b/include/asm-powerpc/time.h index 410e795f7d4..d9b86a17271 100644 --- a/include/asm-powerpc/time.h +++ b/include/asm-powerpc/time.h @@ -21,7 +21,7 @@ #include <asm/processor.h> #ifdef CONFIG_PPC64 #include <asm/paca.h> -#include <asm/iSeries/HvCall.h> +#include <asm/iseries/hv_call.h> #endif /* time.c */ diff --git a/include/asm-powerpc/tlb.h b/include/asm-powerpc/tlb.h new file mode 100644 index 00000000000..56659f12177 --- /dev/null +++ b/include/asm-powerpc/tlb.h @@ -0,0 +1,70 @@ +/* + * TLB shootdown specifics for powerpc + * + * Copyright (C) 2002 Anton Blanchard, IBM Corp. + * Copyright (C) 2002 Paul Mackerras, IBM Corp. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#ifndef _ASM_POWERPC_TLB_H +#define _ASM_POWERPC_TLB_H + +#include <linux/config.h> +#ifndef __powerpc64__ +#include <asm/pgtable.h> +#endif +#include <asm/pgalloc.h> +#include <asm/tlbflush.h> +#ifndef __powerpc64__ +#include <asm/page.h> +#include <asm/mmu.h> +#endif + +struct mmu_gather; + +#define tlb_start_vma(tlb, vma) do { } while (0) +#define tlb_end_vma(tlb, vma) do { } while (0) + +#if !defined(CONFIG_PPC_STD_MMU) + +#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm) + +#elif defined(__powerpc64__) + +extern void pte_free_finish(void); + +static inline void tlb_flush(struct mmu_gather *tlb) +{ + flush_tlb_pending(); + pte_free_finish(); +} + +#else + +extern void tlb_flush(struct mmu_gather *tlb); + +#endif + +/* Get the generic bits... */ +#include <asm-generic/tlb.h> + +#if !defined(CONFIG_PPC_STD_MMU) || defined(__powerpc64__) + +#define __tlb_remove_tlb_entry(tlb, pte, address) do { } while (0) + +#else +extern void flush_hash_entry(struct mm_struct *mm, pte_t *ptep, + unsigned long address); + +static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, + unsigned long address) +{ + if (pte_val(*ptep) & _PAGE_HASHPTE) + flush_hash_entry(tlb->mm, ptep, address); +} + +#endif +#endif /* __ASM_POWERPC_TLB_H */ diff --git a/include/asm-powerpc/tlbflush.h b/include/asm-powerpc/tlbflush.h new file mode 100644 index 00000000000..a2998eee37b --- /dev/null +++ b/include/asm-powerpc/tlbflush.h @@ -0,0 +1,147 @@ +#ifndef _ASM_POWERPC_TLBFLUSH_H +#define _ASM_POWERPC_TLBFLUSH_H +/* + * TLB flushing: + * + * - flush_tlb_mm(mm) flushes the specified mm context TLB's + * - flush_tlb_page(vma, vmaddr) flushes one page + * - flush_tlb_page_nohash(vma, vmaddr) flushes one page if SW loaded TLB + * - flush_tlb_range(vma, start, end) flushes a range of pages + * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages + * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#ifdef __KERNEL__ + +#include <linux/config.h> + +struct mm_struct; + +#ifdef CONFIG_PPC64 + +#include <linux/percpu.h> +#include <asm/page.h> + +#define PPC64_TLB_BATCH_NR 192 + +struct ppc64_tlb_batch { + unsigned long index; + struct mm_struct *mm; + real_pte_t pte[PPC64_TLB_BATCH_NR]; + unsigned long vaddr[PPC64_TLB_BATCH_NR]; + unsigned int psize; +}; +DECLARE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch); + +extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch); + +static inline void flush_tlb_pending(void) +{ + struct ppc64_tlb_batch *batch = &get_cpu_var(ppc64_tlb_batch); + + if (batch->index) + __flush_tlb_pending(batch); + put_cpu_var(ppc64_tlb_batch); +} + +extern void flush_hash_page(unsigned long va, real_pte_t pte, int psize, + int local); +extern void flush_hash_range(unsigned long number, int local); + +#else /* CONFIG_PPC64 */ + +#include <linux/mm.h> + +extern void _tlbie(unsigned long address); +extern void _tlbia(void); + +/* + * TODO: (CONFIG_FSL_BOOKE) determine if flush_tlb_range & + * flush_tlb_kernel_range are best implemented as tlbia vs + * specific tlbie's + */ + +#if (defined(CONFIG_4xx) && !defined(CONFIG_44x)) || defined(CONFIG_8xx) +#define flush_tlb_pending() asm volatile ("tlbia; sync" : : : "memory") +#elif defined(CONFIG_4xx) || defined(CONFIG_FSL_BOOKE) +#define flush_tlb_pending() _tlbia() +#endif + +/* + * This gets called at the end of handling a page fault, when + * the kernel has put a new PTE into the page table for the process. + * We use it to ensure coherency between the i-cache and d-cache + * for the page which has just been mapped in. + * On machines which use an MMU hash table, we use this to put a + * corresponding HPTE into the hash table ahead of time, instead of + * waiting for the inevitable extra hash-table miss exception. + */ +extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t); + +#endif /* CONFIG_PPC64 */ + +#if defined(CONFIG_PPC64) || defined(CONFIG_4xx) || \ + defined(CONFIG_FSL_BOOKE) || defined(CONFIG_8xx) + +static inline void flush_tlb_mm(struct mm_struct *mm) +{ + flush_tlb_pending(); +} + +static inline void flush_tlb_page(struct vm_area_struct *vma, + unsigned long vmaddr) +{ +#ifdef CONFIG_PPC64 + flush_tlb_pending(); +#else + _tlbie(vmaddr); +#endif +} + +static inline void flush_tlb_page_nohash(struct vm_area_struct *vma, + unsigned long vmaddr) +{ +#ifndef CONFIG_PPC64 + _tlbie(vmaddr); +#endif +} + +static inline void flush_tlb_range(struct vm_area_struct *vma, + unsigned long start, unsigned long end) +{ + flush_tlb_pending(); +} + +static inline void flush_tlb_kernel_range(unsigned long start, + unsigned long end) +{ + flush_tlb_pending(); +} + +#else /* 6xx, 7xx, 7xxx cpus */ + +extern void flush_tlb_mm(struct mm_struct *mm); +extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); +extern void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr); +extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, + unsigned long end); +extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); + +#endif + +/* + * This is called in munmap when we have freed up some page-table + * pages. We don't need to do anything here, there's nothing special + * about our page-table pages. -- paulus + */ +static inline void flush_tlb_pgtables(struct mm_struct *mm, + unsigned long start, unsigned long end) +{ +} + +#endif /*__KERNEL__ */ +#endif /* _ASM_POWERPC_TLBFLUSH_H */ diff --git a/include/asm-powerpc/uaccess.h b/include/asm-powerpc/uaccess.h new file mode 100644 index 00000000000..33af730f0d1 --- /dev/null +++ b/include/asm-powerpc/uaccess.h @@ -0,0 +1,468 @@ +#ifndef _ARCH_POWERPC_UACCESS_H +#define _ARCH_POWERPC_UACCESS_H + +#ifdef __KERNEL__ +#ifndef __ASSEMBLY__ + +#include <linux/sched.h> +#include <linux/errno.h> +#include <asm/processor.h> + +#define VERIFY_READ 0 +#define VERIFY_WRITE 1 + +/* + * The fs value determines whether argument validity checking should be + * performed or not. If get_fs() == USER_DS, checking is performed, with + * get_fs() == KERNEL_DS, checking is bypassed. + * + * For historical reasons, these macros are grossly misnamed. + * + * The fs/ds values are now the highest legal address in the "segment". + * This simplifies the checking in the routines below. + */ + +#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) + +#define KERNEL_DS MAKE_MM_SEG(~0UL) +#ifdef __powerpc64__ +/* We use TASK_SIZE_USER64 as TASK_SIZE is not constant */ +#define USER_DS MAKE_MM_SEG(TASK_SIZE_USER64 - 1) +#else +#define USER_DS MAKE_MM_SEG(TASK_SIZE - 1) +#endif + +#define get_ds() (KERNEL_DS) +#define get_fs() (current->thread.fs) +#define set_fs(val) (current->thread.fs = (val)) + +#define segment_eq(a, b) ((a).seg == (b).seg) + +#ifdef __powerpc64__ +/* + * This check is sufficient because there is a large enough + * gap between user addresses and the kernel addresses + */ +#define __access_ok(addr, size, segment) \ + (((addr) <= (segment).seg) && ((size) <= (segment).seg)) + +#else + +#define __access_ok(addr, size, segment) \ + (((addr) <= (segment).seg) && \ + (((size) == 0) || (((size) - 1) <= ((segment).seg - (addr))))) + +#endif + +#define access_ok(type, addr, size) \ + (__chk_user_ptr(addr), \ + __access_ok((__force unsigned long)(addr), (size), get_fs())) + +/* + * The exception table consists of pairs of addresses: the first is the + * address of an instruction that is allowed to fault, and the second is + * the address at which the program should continue. No registers are + * modified, so it is entirely up to the continuation code to figure out + * what to do. + * + * All the routines below use bits of fixup code that are out of line + * with the main instruction path. This means when everything is well, + * we don't even have to jump over them. Further, they do not intrude + * on our cache or tlb entries. + */ + +struct exception_table_entry { + unsigned long insn; + unsigned long fixup; +}; + +/* + * These are the main single-value transfer routines. They automatically + * use the right size if we just have the right pointer type. + * + * This gets kind of ugly. We want to return _two_ values in "get_user()" + * and yet we don't want to do any pointers, because that is too much + * of a performance impact. Thus we have a few rather ugly macros here, + * and hide all the ugliness from the user. + * + * The "__xxx" versions of the user access functions are versions that + * do not verify the address space, that must have been done previously + * with a separate "access_ok()" call (this is used when we do multiple + * accesses to the same area of user memory). + * + * As we use the same address space for kernel and user data on the + * PowerPC, we can just do these as direct assignments. (Of course, the + * exception handling means that it's no longer "just"...) + * + * The "user64" versions of the user access functions are versions that + * allow access of 64-bit data. The "get_user" functions do not + * properly handle 64-bit data because the value gets down cast to a long. + * The "put_user" functions already handle 64-bit data properly but we add + * "user64" versions for completeness + */ +#define get_user(x, ptr) \ + __get_user_check((x), (ptr), sizeof(*(ptr))) +#define put_user(x, ptr) \ + __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) + +#define __get_user(x, ptr) \ + __get_user_nocheck((x), (ptr), sizeof(*(ptr))) +#define __put_user(x, ptr) \ + __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) +#ifndef __powerpc64__ +#define __get_user64(x, ptr) \ + __get_user64_nocheck((x), (ptr), sizeof(*(ptr))) +#define __put_user64(x, ptr) __put_user(x, ptr) +#endif + +#define __get_user_unaligned __get_user +#define __put_user_unaligned __put_user + +extern long __put_user_bad(void); + +#ifdef __powerpc64__ +#define __EX_TABLE_ALIGN "3" +#define __EX_TABLE_TYPE "llong" +#else +#define __EX_TABLE_ALIGN "2" +#define __EX_TABLE_TYPE "long" +#endif + +/* + * We don't tell gcc that we are accessing memory, but this is OK + * because we do not write to any memory gcc knows about, so there + * are no aliasing issues. + */ +#define __put_user_asm(x, addr, err, op) \ + __asm__ __volatile__( \ + "1: " op " %1,0(%2) # put_user\n" \ + "2:\n" \ + ".section .fixup,\"ax\"\n" \ + "3: li %0,%3\n" \ + " b 2b\n" \ + ".previous\n" \ + ".section __ex_table,\"a\"\n" \ + " .align " __EX_TABLE_ALIGN "\n" \ + " ."__EX_TABLE_TYPE" 1b,3b\n" \ + ".previous" \ + : "=r" (err) \ + : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err)) + +#ifdef __powerpc64__ +#define __put_user_asm2(x, ptr, retval) \ + __put_user_asm(x, ptr, retval, "std") +#else /* __powerpc64__ */ +#define __put_user_asm2(x, addr, err) \ + __asm__ __volatile__( \ + "1: stw %1,0(%2)\n" \ + "2: stw %1+1,4(%2)\n" \ + "3:\n" \ + ".section .fixup,\"ax\"\n" \ + "4: li %0,%3\n" \ + " b 3b\n" \ + ".previous\n" \ + ".section __ex_table,\"a\"\n" \ + " .align " __EX_TABLE_ALIGN "\n" \ + " ." __EX_TABLE_TYPE " 1b,4b\n" \ + " ." __EX_TABLE_TYPE " 2b,4b\n" \ + ".previous" \ + : "=r" (err) \ + : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err)) +#endif /* __powerpc64__ */ + +#define __put_user_size(x, ptr, size, retval) \ +do { \ + retval = 0; \ + switch (size) { \ + case 1: __put_user_asm(x, ptr, retval, "stb"); break; \ + case 2: __put_user_asm(x, ptr, retval, "sth"); break; \ + case 4: __put_user_asm(x, ptr, retval, "stw"); break; \ + case 8: __put_user_asm2(x, ptr, retval); break; \ + default: __put_user_bad(); \ + } \ +} while (0) + +#define __put_user_nocheck(x, ptr, size) \ +({ \ + long __pu_err; \ + might_sleep(); \ + __chk_user_ptr(ptr); \ + __put_user_size((x), (ptr), (size), __pu_err); \ + __pu_err; \ +}) + +#define __put_user_check(x, ptr, size) \ +({ \ + long __pu_err = -EFAULT; \ + __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ + might_sleep(); \ + if (access_ok(VERIFY_WRITE, __pu_addr, size)) \ + __put_user_size((x), __pu_addr, (size), __pu_err); \ + __pu_err; \ +}) + +extern long __get_user_bad(void); + +#define __get_user_asm(x, addr, err, op) \ + __asm__ __volatile__( \ + "1: "op" %1,0(%2) # get_user\n" \ + "2:\n" \ + ".section .fixup,\"ax\"\n" \ + "3: li %0,%3\n" \ + " li %1,0\n" \ + " b 2b\n" \ + ".previous\n" \ + ".section __ex_table,\"a\"\n" \ + " .align "__EX_TABLE_ALIGN "\n" \ + " ." __EX_TABLE_TYPE " 1b,3b\n" \ + ".previous" \ + : "=r" (err), "=r" (x) \ + : "b" (addr), "i" (-EFAULT), "0" (err)) + +#ifdef __powerpc64__ +#define __get_user_asm2(x, addr, err) \ + __get_user_asm(x, addr, err, "ld") +#else /* __powerpc64__ */ +#define __get_user_asm2(x, addr, err) \ + __asm__ __volatile__( \ + "1: lwz %1,0(%2)\n" \ + "2: lwz %1+1,4(%2)\n" \ + "3:\n" \ + ".section .fixup,\"ax\"\n" \ + "4: li %0,%3\n" \ + " li %1,0\n" \ + " li %1+1,0\n" \ + " b 3b\n" \ + ".previous\n" \ + ".section __ex_table,\"a\"\n" \ + " .align " __EX_TABLE_ALIGN "\n" \ + " ." __EX_TABLE_TYPE " 1b,4b\n" \ + " ." __EX_TABLE_TYPE " 2b,4b\n" \ + ".previous" \ + : "=r" (err), "=&r" (x) \ + : "b" (addr), "i" (-EFAULT), "0" (err)) +#endif /* __powerpc64__ */ + +#define __get_user_size(x, ptr, size, retval) \ +do { \ + retval = 0; \ + __chk_user_ptr(ptr); \ + if (size > sizeof(x)) \ + (x) = __get_user_bad(); \ + switch (size) { \ + case 1: __get_user_asm(x, ptr, retval, "lbz"); break; \ + case 2: __get_user_asm(x, ptr, retval, "lhz"); break; \ + case 4: __get_user_asm(x, ptr, retval, "lwz"); break; \ + case 8: __get_user_asm2(x, ptr, retval); break; \ + default: (x) = __get_user_bad(); \ + } \ +} while (0) + +#define __get_user_nocheck(x, ptr, size) \ +({ \ + long __gu_err; \ + unsigned long __gu_val; \ + __chk_user_ptr(ptr); \ + might_sleep(); \ + __get_user_size(__gu_val, (ptr), (size), __gu_err); \ + (x) = (__typeof__(*(ptr)))__gu_val; \ + __gu_err; \ +}) + +#ifndef __powerpc64__ +#define __get_user64_nocheck(x, ptr, size) \ +({ \ + long __gu_err; \ + long long __gu_val; \ + __chk_user_ptr(ptr); \ + might_sleep(); \ + __get_user_size(__gu_val, (ptr), (size), __gu_err); \ + (x) = (__typeof__(*(ptr)))__gu_val; \ + __gu_err; \ +}) +#endif /* __powerpc64__ */ + +#define __get_user_check(x, ptr, size) \ +({ \ + long __gu_err = -EFAULT; \ + unsigned long __gu_val = 0; \ + const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ + might_sleep(); \ + if (access_ok(VERIFY_READ, __gu_addr, (size))) \ + __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ + (x) = (__typeof__(*(ptr)))__gu_val; \ + __gu_err; \ +}) + +/* more complex routines */ + +extern unsigned long __copy_tofrom_user(void __user *to, + const void __user *from, unsigned long size); + +#ifndef __powerpc64__ + +extern inline unsigned long copy_from_user(void *to, + const void __user *from, unsigned long n) +{ + unsigned long over; + + if (access_ok(VERIFY_READ, from, n)) + return __copy_tofrom_user((__force void __user *)to, from, n); + if ((unsigned long)from < TASK_SIZE) { + over = (unsigned long)from + n - TASK_SIZE; + return __copy_tofrom_user((__force void __user *)to, from, + n - over) + over; + } + return n; +} + +extern inline unsigned long copy_to_user(void __user *to, + const void *from, unsigned long n) +{ + unsigned long over; + + if (access_ok(VERIFY_WRITE, to, n)) + return __copy_tofrom_user(to, (__force void __user *)from, n); + if ((unsigned long)to < TASK_SIZE) { + over = (unsigned long)to + n - TASK_SIZE; + return __copy_tofrom_user(to, (__force void __user *)from, + n - over) + over; + } + return n; +} + +#else /* __powerpc64__ */ + +#define __copy_in_user(to, from, size) \ + __copy_tofrom_user((to), (from), (size)) + +extern unsigned long copy_from_user(void *to, const void __user *from, + unsigned long n); +extern unsigned long copy_to_user(void __user *to, const void *from, + unsigned long n); +extern unsigned long copy_in_user(void __user *to, const void __user *from, + unsigned long n); + +#endif /* __powerpc64__ */ + +static inline unsigned long __copy_from_user_inatomic(void *to, + const void __user *from, unsigned long n) +{ + if (__builtin_constant_p(n) && (n <= 8)) { + unsigned long ret; + + switch (n) { + case 1: + __get_user_size(*(u8 *)to, from, 1, ret); + break; + case 2: + __get_user_size(*(u16 *)to, from, 2, ret); + break; + case 4: + __get_user_size(*(u32 *)to, from, 4, ret); + break; + case 8: + __get_user_size(*(u64 *)to, from, 8, ret); + break; + } + if (ret == 0) + return 0; + } + return __copy_tofrom_user((__force void __user *)to, from, n); +} + +static inline unsigned long __copy_to_user_inatomic(void __user *to, + const void *from, unsigned long n) +{ + if (__builtin_constant_p(n) && (n <= 8)) { + unsigned long ret; + + switch (n) { + case 1: + __put_user_size(*(u8 *)from, (u8 __user *)to, 1, ret); + break; + case 2: + __put_user_size(*(u16 *)from, (u16 __user *)to, 2, ret); + break; + case 4: + __put_user_size(*(u32 *)from, (u32 __user *)to, 4, ret); + break; + case 8: + __put_user_size(*(u64 *)from, (u64 __user *)to, 8, ret); + break; + } + if (ret == 0) + return 0; + } + return __copy_tofrom_user(to, (__force const void __user *)from, n); +} + +static inline unsigned long __copy_from_user(void *to, + const void __user *from, unsigned long size) +{ + might_sleep(); + return __copy_from_user_inatomic(to, from, size); +} + +static inline unsigned long __copy_to_user(void __user *to, + const void *from, unsigned long size) +{ + might_sleep(); + return __copy_to_user_inatomic(to, from, size); +} + +extern unsigned long __clear_user(void __user *addr, unsigned long size); + +static inline unsigned long clear_user(void __user *addr, unsigned long size) +{ + might_sleep(); + if (likely(access_ok(VERIFY_WRITE, addr, size))) + return __clear_user(addr, size); + if ((unsigned long)addr < TASK_SIZE) { + unsigned long over = (unsigned long)addr + size - TASK_SIZE; + return __clear_user(addr, size - over) + over; + } + return size; +} + +extern int __strncpy_from_user(char *dst, const char __user *src, long count); + +static inline long strncpy_from_user(char *dst, const char __user *src, + long count) +{ + might_sleep(); + if (likely(access_ok(VERIFY_READ, src, 1))) + return __strncpy_from_user(dst, src, count); + return -EFAULT; +} + +/* + * Return the size of a string (including the ending 0) + * + * Return 0 for error + */ +extern int __strnlen_user(const char __user *str, long len, unsigned long top); + +/* + * Returns the length of the string at str (including the null byte), + * or 0 if we hit a page we can't access, + * or something > len if we didn't find a null byte. + * + * The `top' parameter to __strnlen_user is to make sure that + * we can never overflow from the user area into kernel space. + */ +static inline int strnlen_user(const char __user *str, long len) +{ + unsigned long top = current->thread.fs.seg; + + if ((unsigned long)str > top) + return 0; + return __strnlen_user(str, len, top); +} + +#define strlen_user(str) strnlen_user((str), 0x7ffffffe) + +#endif /* __ASSEMBLY__ */ +#endif /* __KERNEL__ */ + +#endif /* _ARCH_POWERPC_UACCESS_H */ diff --git a/include/asm-powerpc/ucontext.h b/include/asm-powerpc/ucontext.h new file mode 100644 index 00000000000..d9a4ddf0cc8 --- /dev/null +++ b/include/asm-powerpc/ucontext.h @@ -0,0 +1,40 @@ +#ifndef _ASM_POWERPC_UCONTEXT_H +#define _ASM_POWERPC_UCONTEXT_H + +#ifdef __powerpc64__ +#include <asm/sigcontext.h> +#else +#include <asm/elf.h> +#endif +#include <asm/signal.h> + +#ifndef __powerpc64__ +struct mcontext { + elf_gregset_t mc_gregs; + elf_fpregset_t mc_fregs; + unsigned long mc_pad[2]; + elf_vrregset_t mc_vregs __attribute__((__aligned__(16))); +}; +#endif + +struct ucontext { + unsigned long uc_flags; + struct ucontext __user *uc_link; + stack_t uc_stack; +#ifndef __powerpc64__ + int uc_pad[7]; + struct mcontext __user *uc_regs;/* points to uc_mcontext field */ +#endif + sigset_t uc_sigmask; + /* glibc has 1024-bit signal masks, ours are 64-bit */ +#ifdef __powerpc64__ + sigset_t __unused[15]; /* Allow for uc_sigmask growth */ + struct sigcontext uc_mcontext; /* last for extensibility */ +#else + int uc_maskext[30]; + int uc_pad2[3]; + struct mcontext uc_mcontext; +#endif +}; + +#endif /* _ASM_POWERPC_UCONTEXT_H */ |