diff options
author | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2009-09-13 21:16:56 -0700 |
---|---|---|
committer | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2009-09-13 21:16:56 -0700 |
commit | fc8e1ead9314cf0e0f1922e661428b93d3a50d88 (patch) | |
tree | f3cb97c4769b74f6627a59769f1ed5c92a13c58a /arch/powerpc/include | |
parent | 2bcaa6a4238094c5695d5b1943078388d82d3004 (diff) | |
parent | 9de48cc300fb10f7d9faa978670becf5e352462a (diff) |
Merge branch 'next' into for-linus
Diffstat (limited to 'arch/powerpc/include')
54 files changed, 633 insertions, 225 deletions
diff --git a/arch/powerpc/include/asm/8253pit.h b/arch/powerpc/include/asm/8253pit.h index b70d6e53b30..a71c9c1455a 100644 --- a/arch/powerpc/include/asm/8253pit.h +++ b/arch/powerpc/include/asm/8253pit.h @@ -1,10 +1,3 @@ -#ifndef _ASM_POWERPC_8253PIT_H -#define _ASM_POWERPC_8253PIT_H - /* * 8253/8254 Programmable Interval Timer */ - -#define PIT_TICK_RATE 1193182UL - -#endif /* _ASM_POWERPC_8253PIT_H */ diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h index b401950f525..4012483b189 100644 --- a/arch/powerpc/include/asm/atomic.h +++ b/arch/powerpc/include/asm/atomic.h @@ -470,8 +470,11 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) +#else /* __powerpc64__ */ +#include <asm-generic/atomic64.h> + #endif /* __powerpc64__ */ -#include <asm-generic/atomic.h> +#include <asm-generic/atomic-long.h> #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_ATOMIC_H_ */ diff --git a/arch/powerpc/include/asm/bitsperlong.h b/arch/powerpc/include/asm/bitsperlong.h new file mode 100644 index 00000000000..5f1659032c4 --- /dev/null +++ b/arch/powerpc/include/asm/bitsperlong.h @@ -0,0 +1,12 @@ +#ifndef __ASM_POWERPC_BITSPERLONG_H +#define __ASM_POWERPC_BITSPERLONG_H + +#if defined(__powerpc64__) +# define __BITS_PER_LONG 64 +#else +# define __BITS_PER_LONG 32 +#endif + +#include <asm-generic/bitsperlong.h> + +#endif /* __ASM_POWERPC_BITSPERLONG_H */ diff --git a/arch/powerpc/include/asm/cpm1.h b/arch/powerpc/include/asm/cpm1.h index 2ff798744c1..7685ffde882 100644 --- a/arch/powerpc/include/asm/cpm1.h +++ b/arch/powerpc/include/asm/cpm1.h @@ -598,8 +598,6 @@ typedef struct risc_timer_pram { #define CICR_IEN ((uint)0x00000080) /* Int. enable */ #define CICR_SPS ((uint)0x00000001) /* SCC Spread */ -#define IMAP_ADDR (get_immrbase()) - #define CPM_PIN_INPUT 0 #define CPM_PIN_OUTPUT 1 #define CPM_PIN_PRIMARY 0 diff --git a/arch/powerpc/include/asm/cpm2.h b/arch/powerpc/include/asm/cpm2.h index 0f5e8ff59a8..990ff191da8 100644 --- a/arch/powerpc/include/asm/cpm2.h +++ b/arch/powerpc/include/asm/cpm2.h @@ -14,10 +14,6 @@ #include <asm/cpm.h> #include <sysdev/fsl_soc.h> -#ifdef CONFIG_PPC_85xx -#define CPM_MAP_ADDR (get_immrbase() + 0x80000) -#endif - /* CPM Command register. */ #define CPM_CR_RST ((uint)0x80000000) diff --git a/arch/powerpc/include/asm/delay.h b/arch/powerpc/include/asm/delay.h index f9200a65c63..52e4d54da2a 100644 --- a/arch/powerpc/include/asm/delay.h +++ b/arch/powerpc/include/asm/delay.h @@ -2,8 +2,11 @@ #define _ASM_POWERPC_DELAY_H #ifdef __KERNEL__ +#include <asm/time.h> + /* * Copyright 1996, Paul Mackerras. + * Copyright (C) 2009 Freescale Semiconductor, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License @@ -30,5 +33,40 @@ extern void udelay(unsigned long usecs); #define mdelay(n) udelay((n) * 1000) #endif +/** + * spin_event_timeout - spin until a condition gets true or a timeout elapses + * @condition: a C expression to evalate + * @timeout: timeout, in microseconds + * @delay: the number of microseconds to delay between each evaluation of + * @condition + * + * The process spins until the condition evaluates to true (non-zero) or the + * timeout elapses. The return value of this macro is the value of + * @condition when the loop terminates. This allows you to determine the cause + * of the loop terminates. If the return value is zero, then you know a + * timeout has occurred. + * + * This primary purpose of this macro is to poll on a hardware register + * until a status bit changes. The timeout ensures that the loop still + * terminates even if the bit never changes. The delay is for devices that + * need a delay in between successive reads. + * + * gcc will optimize out the if-statement if @delay is a constant. + */ +#define spin_event_timeout(condition, timeout, delay) \ +({ \ + typeof(condition) __ret; \ + unsigned long __loops = tb_ticks_per_usec * timeout; \ + unsigned long __start = get_tbl(); \ + while (!(__ret = (condition)) && (tb_ticks_since(__start) <= __loops)) \ + if (delay) \ + udelay(delay); \ + else \ + cpu_relax(); \ + if (!__ret) \ + __ret = (condition); \ + __ret; \ +}) + #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_DELAY_H */ diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h index cb448d68452..b44aaabdd1a 100644 --- a/arch/powerpc/include/asm/dma-mapping.h +++ b/arch/powerpc/include/asm/dma-mapping.h @@ -15,9 +15,18 @@ #include <linux/scatterlist.h> #include <linux/dma-attrs.h> #include <asm/io.h> +#include <asm/swiotlb.h> #define DMA_ERROR_CODE (~(dma_addr_t)0x0) +/* Some dma direct funcs must be visible for use in other dma_ops */ +extern void *dma_direct_alloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t flag); +extern void dma_direct_free_coherent(struct device *dev, size_t size, + void *vaddr, dma_addr_t dma_handle); + +extern unsigned long get_dma_direct_offset(struct device *dev); + #ifdef CONFIG_NOT_COHERENT_CACHE /* * DMA-consistent mapping functions for PowerPCs that don't support @@ -78,6 +87,8 @@ struct dma_mapping_ops { dma_addr_t dma_address, size_t size, enum dma_data_direction direction, struct dma_attrs *attrs); + int (*addr_needs_map)(struct device *dev, dma_addr_t addr, + size_t size); #ifdef CONFIG_PPC_NEED_DMA_SYNC_OPS void (*sync_single_range_for_cpu)(struct device *hwdev, dma_addr_t dma_handle, unsigned long offset, @@ -298,7 +309,9 @@ static inline void dma_sync_single_for_cpu(struct device *dev, struct dma_mapping_ops *dma_ops = get_dma_ops(dev); BUG_ON(!dma_ops); - dma_ops->sync_single_range_for_cpu(dev, dma_handle, 0, + + if (dma_ops->sync_single_range_for_cpu) + dma_ops->sync_single_range_for_cpu(dev, dma_handle, 0, size, direction); } @@ -309,7 +322,9 @@ static inline void dma_sync_single_for_device(struct device *dev, struct dma_mapping_ops *dma_ops = get_dma_ops(dev); BUG_ON(!dma_ops); - dma_ops->sync_single_range_for_device(dev, dma_handle, + + if (dma_ops->sync_single_range_for_device) + dma_ops->sync_single_range_for_device(dev, dma_handle, 0, size, direction); } @@ -320,7 +335,9 @@ static inline void dma_sync_sg_for_cpu(struct device *dev, struct dma_mapping_ops *dma_ops = get_dma_ops(dev); BUG_ON(!dma_ops); - dma_ops->sync_sg_for_cpu(dev, sgl, nents, direction); + + if (dma_ops->sync_sg_for_cpu) + dma_ops->sync_sg_for_cpu(dev, sgl, nents, direction); } static inline void dma_sync_sg_for_device(struct device *dev, @@ -330,7 +347,9 @@ static inline void dma_sync_sg_for_device(struct device *dev, struct dma_mapping_ops *dma_ops = get_dma_ops(dev); BUG_ON(!dma_ops); - dma_ops->sync_sg_for_device(dev, sgl, nents, direction); + + if (dma_ops->sync_sg_for_device) + dma_ops->sync_sg_for_device(dev, sgl, nents, direction); } static inline void dma_sync_single_range_for_cpu(struct device *dev, @@ -340,7 +359,9 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev, struct dma_mapping_ops *dma_ops = get_dma_ops(dev); BUG_ON(!dma_ops); - dma_ops->sync_single_range_for_cpu(dev, dma_handle, + + if (dma_ops->sync_single_range_for_cpu) + dma_ops->sync_single_range_for_cpu(dev, dma_handle, offset, size, direction); } @@ -351,7 +372,9 @@ static inline void dma_sync_single_range_for_device(struct device *dev, struct dma_mapping_ops *dma_ops = get_dma_ops(dev); BUG_ON(!dma_ops); - dma_ops->sync_single_range_for_device(dev, dma_handle, offset, + + if (dma_ops->sync_single_range_for_device) + dma_ops->sync_single_range_for_device(dev, dma_handle, offset, size, direction); } #else /* CONFIG_PPC_NEED_DMA_SYNC_OPS */ diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h index d6b4a12cdef..014a624f4c8 100644 --- a/arch/powerpc/include/asm/elf.h +++ b/arch/powerpc/include/asm/elf.h @@ -256,11 +256,11 @@ do { \ * even if we have an executable stack. */ # define elf_read_implies_exec(ex, exec_stk) (test_thread_flag(TIF_32BIT) ? \ - (exec_stk != EXSTACK_DISABLE_X) : 0) + (exec_stk == EXSTACK_DEFAULT) : 0) #else # define SET_PERSONALITY(ex) \ set_personality(PER_LINUX | (current->personality & (~PER_MASK))) -# define elf_read_implies_exec(ex, exec_stk) (exec_stk != EXSTACK_DISABLE_X) +# define elf_read_implies_exec(ex, exec_stk) (exec_stk == EXSTACK_DEFAULT) #endif /* __powerpc64__ */ extern int dcache_bsize; diff --git a/arch/powerpc/include/asm/emulated_ops.h b/arch/powerpc/include/asm/emulated_ops.h new file mode 100644 index 00000000000..9154e852673 --- /dev/null +++ b/arch/powerpc/include/asm/emulated_ops.h @@ -0,0 +1,73 @@ +/* + * Copyright 2007 Sony Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. + * If not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef _ASM_POWERPC_EMULATED_OPS_H +#define _ASM_POWERPC_EMULATED_OPS_H + +#include <asm/atomic.h> + + +#ifdef CONFIG_PPC_EMULATED_STATS + +struct ppc_emulated_entry { + const char *name; + atomic_t val; +}; + +extern struct ppc_emulated { +#ifdef CONFIG_ALTIVEC + struct ppc_emulated_entry altivec; +#endif + struct ppc_emulated_entry dcba; + struct ppc_emulated_entry dcbz; + struct ppc_emulated_entry fp_pair; + struct ppc_emulated_entry isel; + struct ppc_emulated_entry mcrxr; + struct ppc_emulated_entry mfpvr; + struct ppc_emulated_entry multiple; + struct ppc_emulated_entry popcntb; + struct ppc_emulated_entry spe; + struct ppc_emulated_entry string; + struct ppc_emulated_entry unaligned; +#ifdef CONFIG_MATH_EMULATION + struct ppc_emulated_entry math; +#elif defined(CONFIG_8XX_MINIMAL_FPEMU) + struct ppc_emulated_entry 8xx; +#endif +#ifdef CONFIG_VSX + struct ppc_emulated_entry vsx; +#endif +} ppc_emulated; + +extern u32 ppc_warn_emulated; + +extern void ppc_warn_emulated_print(const char *type); + +#define PPC_WARN_EMULATED(type) \ + do { \ + atomic_inc(&ppc_emulated.type.val); \ + if (ppc_warn_emulated) \ + ppc_warn_emulated_print(ppc_emulated.type.name); \ + } while (0) + +#else /* !CONFIG_PPC_EMULATED_STATS */ + +#define PPC_WARN_EMULATED(type) do { } while (0) + +#endif /* !CONFIG_PPC_EMULATED_STATS */ + +#endif /* _ASM_POWERPC_EMULATED_OPS_H */ diff --git a/arch/powerpc/include/asm/feature-fixups.h b/arch/powerpc/include/asm/feature-fixups.h index e4094a5cb05..cbd4dfa4bce 100644 --- a/arch/powerpc/include/asm/feature-fixups.h +++ b/arch/powerpc/include/asm/feature-fixups.h @@ -8,8 +8,6 @@ * 2 of the License, or (at your option) any later version. */ -#ifdef __ASSEMBLY__ - /* * Feature section common macros * @@ -23,10 +21,12 @@ /* 64 bits kernel, 32 bits code (ie. vdso32) */ #define FTR_ENTRY_LONG .llong #define FTR_ENTRY_OFFSET .long 0xffffffff; .long +#elif defined(CONFIG_PPC64) +#define FTR_ENTRY_LONG .llong +#define FTR_ENTRY_OFFSET .llong #else -/* 64 bit kernel 64 bit code, or 32 bit kernel 32 bit code */ -#define FTR_ENTRY_LONG PPC_LONG -#define FTR_ENTRY_OFFSET PPC_LONG +#define FTR_ENTRY_LONG .long +#define FTR_ENTRY_OFFSET .long #endif #define START_FTR_SECTION(label) label##1: @@ -141,6 +141,21 @@ label##5: \ #define ALT_FW_FTR_SECTION_END_IFCLR(msk) \ ALT_FW_FTR_SECTION_END_NESTED_IFCLR(msk, 97) +#ifndef __ASSEMBLY__ + +#define ASM_MMU_FTR_IF(section_if, section_else, msk, val) \ + stringify_in_c(BEGIN_MMU_FTR_SECTION) \ + section_if "; " \ + stringify_in_c(MMU_FTR_SECTION_ELSE) \ + section_else "; " \ + stringify_in_c(ALT_MMU_FTR_SECTION_END((msk), (val))) + +#define ASM_MMU_FTR_IFSET(section_if, section_else, msk) \ + ASM_MMU_FTR_IF(section_if, section_else, (msk), (msk)) + +#define ASM_MMU_FTR_IFCLR(section_if, section_else, msk) \ + ASM_MMU_FTR_IF(section_if, section_else, (msk), 0) + #endif /* __ASSEMBLY__ */ /* LWSYNC feature sections */ diff --git a/arch/powerpc/include/asm/fsl_lbc.h b/arch/powerpc/include/asm/fsl_lbc.h index 63a4f779f53..1b5a21041f9 100644 --- a/arch/powerpc/include/asm/fsl_lbc.h +++ b/arch/powerpc/include/asm/fsl_lbc.h @@ -95,8 +95,8 @@ struct fsl_lbc_bank { }; struct fsl_lbc_regs { - struct fsl_lbc_bank bank[8]; - u8 res0[0x28]; + struct fsl_lbc_bank bank[12]; + u8 res0[0x8]; __be32 mar; /**< UPM Address Register */ u8 res1[0x4]; __be32 mamr; /**< UPMA Mode Register */ diff --git a/arch/powerpc/include/asm/highmem.h b/arch/powerpc/include/asm/highmem.h index 684a73f4324..a74c4ee6c02 100644 --- a/arch/powerpc/include/asm/highmem.h +++ b/arch/powerpc/include/asm/highmem.h @@ -22,9 +22,7 @@ #ifdef __KERNEL__ -#include <linux/init.h> #include <linux/interrupt.h> -#include <linux/highmem.h> #include <asm/kmap_types.h> #include <asm/tlbflush.h> #include <asm/page.h> @@ -62,6 +60,9 @@ extern pte_t *pkmap_page_table; extern void *kmap_high(struct page *page); extern void kunmap_high(struct page *page); +extern void *kmap_atomic_prot(struct page *page, enum km_type type, + pgprot_t prot); +extern void kunmap_atomic(void *kvaddr, enum km_type type); static inline void *kmap(struct page *page) { @@ -79,62 +80,11 @@ static inline void kunmap(struct page *page) kunmap_high(page); } -/* - * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap - * gives a more generic (and caching) interface. But kmap_atomic can - * be used in IRQ contexts, so in some (very limited) cases we need - * it. - */ -static inline void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot) -{ - unsigned int idx; - unsigned long vaddr; - - /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ - pagefault_disable(); - if (!PageHighMem(page)) - return page_address(page); - - debug_kmap_atomic(type); - idx = type + KM_TYPE_NR*smp_processor_id(); - vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); -#ifdef CONFIG_DEBUG_HIGHMEM - BUG_ON(!pte_none(*(kmap_pte-idx))); -#endif - __set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot), 1); - local_flush_tlb_page(NULL, vaddr); - - return (void*) vaddr; -} - static inline void *kmap_atomic(struct page *page, enum km_type type) { return kmap_atomic_prot(page, type, kmap_prot); } -static inline void kunmap_atomic(void *kvaddr, enum km_type type) -{ -#ifdef CONFIG_DEBUG_HIGHMEM - unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; - enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); - - if (vaddr < __fix_to_virt(FIX_KMAP_END)) { - pagefault_enable(); - return; - } - - BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); - - /* - * force other mappings to Oops if they'll try to access - * this pte without first remap it - */ - pte_clear(&init_mm, vaddr, kmap_pte-idx); - local_flush_tlb_page(NULL, vaddr); -#endif - pagefault_enable(); -} - static inline struct page *kmap_atomic_to_page(void *ptr) { unsigned long idx, vaddr = (unsigned long) ptr; @@ -148,6 +98,7 @@ static inline struct page *kmap_atomic_to_page(void *ptr) return pte_page(*pte); } + #define flush_cache_kmaps() flush_cache_all() #endif /* __KERNEL__ */ diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h index b7e034b0a6d..8b505eaaa38 100644 --- a/arch/powerpc/include/asm/hw_irq.h +++ b/arch/powerpc/include/asm/hw_irq.h @@ -68,37 +68,37 @@ static inline int irqs_disabled_flags(unsigned long flags) #if defined(CONFIG_BOOKE) #define SET_MSR_EE(x) mtmsr(x) -#define local_irq_restore(flags) __asm__ __volatile__("wrtee %0" : : "r" (flags) : "memory") +#define raw_local_irq_restore(flags) __asm__ __volatile__("wrtee %0" : : "r" (flags) : "memory") #else #define SET_MSR_EE(x) mtmsr(x) -#define local_irq_restore(flags) mtmsr(flags) +#define raw_local_irq_restore(flags) mtmsr(flags) #endif -static inline void local_irq_disable(void) +static inline void raw_local_irq_disable(void) { #ifdef CONFIG_BOOKE __asm__ __volatile__("wrteei 0": : :"memory"); #else unsigned long msr; - __asm__ __volatile__("": : :"memory"); + msr = mfmsr(); SET_MSR_EE(msr & ~MSR_EE); #endif } -static inline void local_irq_enable(void) +static inline void raw_local_irq_enable(void) { #ifdef CONFIG_BOOKE __asm__ __volatile__("wrteei 1": : :"memory"); #else unsigned long msr; - __asm__ __volatile__("": : :"memory"); + msr = mfmsr(); SET_MSR_EE(msr | MSR_EE); #endif } -static inline void local_irq_save_ptr(unsigned long *flags) +static inline void raw_local_irq_save_ptr(unsigned long *flags) { unsigned long msr; msr = mfmsr(); @@ -108,15 +108,14 @@ static inline void local_irq_save_ptr(unsigned long *flags) #else SET_MSR_EE(msr & ~MSR_EE); #endif - __asm__ __volatile__("": : :"memory"); } -#define local_save_flags(flags) ((flags) = mfmsr()) -#define local_irq_save(flags) local_irq_save_ptr(&flags) -#define irqs_disabled() ((mfmsr() & MSR_EE) == 0) +#define raw_local_save_flags(flags) ((flags) = mfmsr()) +#define raw_local_irq_save(flags) raw_local_irq_save_ptr(&flags) +#define raw_irqs_disabled() ((mfmsr() & MSR_EE) == 0) +#define raw_irqs_disabled_flags(flags) (((flags) & MSR_EE) == 0) -#define hard_irq_enable() local_irq_enable() -#define hard_irq_disable() local_irq_disable() +#define hard_irq_disable() raw_local_irq_disable() static inline int irqs_disabled_flags(unsigned long flags) { @@ -131,5 +130,43 @@ static inline int irqs_disabled_flags(unsigned long flags) */ struct irq_chip; +#ifdef CONFIG_PERF_COUNTERS + +#ifdef CONFIG_PPC64 +static inline unsigned long test_perf_counter_pending(void) +{ + unsigned long x; + + asm volatile("lbz %0,%1(13)" + : "=r" (x) + : "i" (offsetof(struct paca_struct, perf_counter_pending))); + return x; +} + +static inline void set_perf_counter_pending(void) +{ + asm volatile("stb %0,%1(13)" : : + "r" (1), + "i" (offsetof(struct paca_struct, perf_counter_pending))); +} + +static inline void clear_perf_counter_pending(void) +{ + asm volatile("stb %0,%1(13)" : : + "r" (0), + "i" (offsetof(struct paca_struct, perf_counter_pending))); +} +#endif /* CONFIG_PPC64 */ + +#else /* CONFIG_PERF_COUNTERS */ + +static inline unsigned long test_perf_counter_pending(void) +{ + return 0; +} + +static inline void clear_perf_counter_pending(void) {} +#endif /* CONFIG_PERF_COUNTERS */ + #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_HW_IRQ_H */ diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h index 7464c0daddd..7ead7c16fb7 100644 --- a/arch/powerpc/include/asm/iommu.h +++ b/arch/powerpc/include/asm/iommu.h @@ -35,6 +35,16 @@ #define IOMMU_PAGE_MASK (~((1 << IOMMU_PAGE_SHIFT) - 1)) #define IOMMU_PAGE_ALIGN(addr) _ALIGN_UP(addr, IOMMU_PAGE_SIZE) +/* Cell page table entries */ +#define CBE_IOPTE_PP_W 0x8000000000000000ul /* protection: write */ +#define CBE_IOPTE_PP_R 0x4000000000000000ul /* protection: read */ +#define CBE_IOPTE_M 0x2000000000000000ul /* coherency required */ +#define CBE_IOPTE_SO_R 0x1000000000000000ul /* ordering: writes */ +#define CBE_IOPTE_SO_RW 0x1800000000000000ul /* ordering: r & w */ +#define CBE_IOPTE_RPN_Mask 0x07fffffffffff000ul /* RPN */ +#define CBE_IOPTE_H 0x0000000000000800ul /* cache hint */ +#define CBE_IOPTE_IOID_Mask 0x00000000000007fful /* ioid */ + /* Boot time flags */ extern int iommu_is_off; extern int iommu_force_on; diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index dfdf13c9fef..fddc3ed715f 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -34,7 +34,7 @@ #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 /* We don't currently support large pages. */ -#define KVM_PAGES_PER_HPAGE (1<<31) +#define KVM_PAGES_PER_HPAGE (1UL << 31) struct kvm; struct kvm_run; diff --git a/arch/powerpc/include/asm/lppaca.h b/arch/powerpc/include/asm/lppaca.h index d2a65e8ca6a..f78f65c38f0 100644 --- a/arch/powerpc/include/asm/lppaca.h +++ b/arch/powerpc/include/asm/lppaca.h @@ -20,6 +20,11 @@ #define _ASM_POWERPC_LPPACA_H #ifdef __KERNEL__ +/* These definitions relate to hypervisors that only exist when using + * a server type processor + */ +#ifdef CONFIG_PPC_BOOK3S + //============================================================================= // // This control block contains the data that is shared between the @@ -158,5 +163,6 @@ struct slb_shadow { extern struct slb_shadow slb_shadow[]; +#endif /* CONFIG_PPC_BOOK3S */ #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_LPPACA_H */ diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h index 0efdb1dfdc5..11d1fc3a896 100644 --- a/arch/powerpc/include/asm/machdep.h +++ b/arch/powerpc/include/asm/machdep.h @@ -110,6 +110,10 @@ struct machdep_calls { void (*show_percpuinfo)(struct seq_file *m, int i); void (*init_IRQ)(void); + + /* Return an irq, or NO_IRQ to indicate there are none pending. + * If for some reason there is no irq, but the interrupt + * shouldn't be counted as spurious, return NO_IRQ_IGNORE. */ unsigned int (*get_irq)(void); #ifdef CONFIG_KEXEC void (*kexec_cpu_down)(int crash_shutdown, int secondary); diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h index e7b99bac9f4..7b1c49811a2 100644 --- a/arch/powerpc/include/asm/mman.h +++ b/arch/powerpc/include/asm/mman.h @@ -1,7 +1,7 @@ #ifndef _ASM_POWERPC_MMAN_H #define _ASM_POWERPC_MMAN_H -#include <asm-generic/mman.h> +#include <asm-generic/mman-common.h> /* * This program is free software; you can redistribute it and/or diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h index cbf15438709..fb57ded592f 100644 --- a/arch/powerpc/include/asm/mmu.h +++ b/arch/powerpc/include/asm/mmu.h @@ -52,6 +52,11 @@ */ #define MMU_FTR_NEED_DTLB_SW_LRU ASM_CONST(0x00200000) +/* This indicates that the processor uses the ISA 2.06 server tlbie + * mnemonics + */ +#define MMU_FTR_TLBIE_206 ASM_CONST(0x00400000) + #ifndef __ASSEMBLY__ #include <asm/cputable.h> @@ -69,10 +74,10 @@ extern void early_init_mmu_secondary(void); #endif /* !__ASSEMBLY__ */ -#ifdef CONFIG_PPC64 +#if defined(CONFIG_PPC_STD_MMU_64) /* 64-bit classic hash table MMU */ # include <asm/mmu-hash64.h> -#elif defined(CONFIG_PPC_STD_MMU) +#elif defined(CONFIG_PPC_STD_MMU_32) /* 32-bit classic hash table MMU */ # include <asm/mmu-hash32.h> #elif defined(CONFIG_40x) diff --git a/arch/powerpc/include/asm/mpc52xx.h b/arch/powerpc/include/asm/mpc52xx.h index 52e049cd9e6..1b4f697abbd 100644 --- a/arch/powerpc/include/asm/mpc52xx.h +++ b/arch/powerpc/include/asm/mpc52xx.h @@ -16,6 +16,7 @@ #ifndef __ASSEMBLY__ #include <asm/types.h> #include <asm/prom.h> +#include <asm/mpc5xxx.h> #endif /* __ASSEMBLY__ */ #include <linux/suspend.h> @@ -268,7 +269,6 @@ struct mpc52xx_intr { #ifndef __ASSEMBLY__ /* mpc52xx_common.c */ -extern unsigned int mpc52xx_find_ipb_freq(struct device_node *node); extern void mpc5200_setup_xlb_arbiter(void); extern void mpc52xx_declare_of_platform_devices(void); extern void mpc52xx_map_common_devices(void); diff --git a/arch/powerpc/include/asm/mpc52xx_psc.h b/arch/powerpc/include/asm/mpc52xx_psc.h index a218da6bec7..fb841205745 100644 --- a/arch/powerpc/include/asm/mpc52xx_psc.h +++ b/arch/powerpc/include/asm/mpc52xx_psc.h @@ -28,6 +28,10 @@ #define MPC52xx_PSC_MAXNUM 6 /* Programmable Serial Controller (PSC) status register bits */ +#define MPC52xx_PSC_SR_UNEX_RX 0x0001 +#define MPC52xx_PSC_SR_DATA_VAL 0x0002 +#define MPC52xx_PSC_SR_DATA_OVR 0x0004 +#define MPC52xx_PSC_SR_CMDSEND 0x0008 #define MPC52xx_PSC_SR_CDE 0x0080 #define MPC52xx_PSC_SR_RXRDY 0x0100 #define MPC52xx_PSC_SR_RXFULL 0x0200 @@ -61,6 +65,12 @@ #define MPC52xx_PSC_RXTX_FIFO_EMPTY 0x0001 /* PSC interrupt status/mask bits */ +#define MPC52xx_PSC_IMR_UNEX_RX_SLOT 0x0001 +#define MPC52xx_PSC_IMR_DATA_VALID 0x0002 +#define MPC52xx_PSC_IMR_DATA_OVR 0x0004 +#define MPC52xx_PSC_IMR_CMD_SEND 0x0008 +#define MPC52xx_PSC_IMR_ERROR 0x0040 +#define MPC52xx_PSC_IMR_DEOF 0x0080 #define MPC52xx_PSC_IMR_TXRDY 0x0100 #define MPC52xx_PSC_IMR_RXRDY 0x0200 #define MPC52xx_PSC_IMR_DB 0x0400 @@ -117,6 +127,7 @@ #define MPC52xx_PSC_SICR_SIM_FIR (0x6 << 24) #define MPC52xx_PSC_SICR_SIM_CODEC_24 (0x7 << 24) #define MPC52xx_PSC_SICR_SIM_CODEC_32 (0xf << 24) +#define MPC52xx_PSC_SICR_AWR (1 << 30) #define MPC52xx_PSC_SICR_GENCLK (1 << 23) #define MPC52xx_PSC_SICR_I2S (1 << 22) #define MPC52xx_PSC_SICR_CLKPOL (1 << 21) diff --git a/arch/powerpc/include/asm/mpc512x.h b/arch/powerpc/include/asm/mpc5xxx.h index c48a1658eea..5ce9c5fa434 100644 --- a/arch/powerpc/include/asm/mpc512x.h +++ b/arch/powerpc/include/asm/mpc5xxx.h @@ -4,7 +4,7 @@ * Author: John Rigby, <jrigby@freescale.com>, Friday Apr 13 2007 * * Description: - * MPC5121 Prototypes and definitions + * MPC5xxx Prototypes and definitions * * This is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by @@ -13,10 +13,10 @@ * */ -#ifndef __ASM_POWERPC_MPC512x_H__ -#define __ASM_POWERPC_MPC512x_H__ +#ifndef __ASM_POWERPC_MPC5xxx_H__ +#define __ASM_POWERPC_MPC5xxx_H__ -extern unsigned long mpc512x_find_ips_freq(struct device_node *node); +extern unsigned long mpc5xxx_get_bus_frequency(struct device_node *node); -#endif /* __ASM_POWERPC_MPC512x_H__ */ +#endif /* __ASM_POWERPC_MPC5xxx_H__ */ diff --git a/arch/powerpc/include/asm/mpc86xx.h b/arch/powerpc/include/asm/mpc86xx.h deleted file mode 100644 index 15f650f987e..00000000000 --- a/arch/powerpc/include/asm/mpc86xx.h +++ /dev/null @@ -1,33 +0,0 @@ -/* - * MPC86xx definitions - * - * Author: Jeff Brown - * - * Copyright 2004 Freescale Semiconductor, Inc - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - */ - -#ifdef __KERNEL__ -#ifndef __ASM_POWERPC_MPC86xx_H__ -#define __ASM_POWERPC_MPC86xx_H__ - -#include <asm/mmu.h> - -#ifdef CONFIG_PPC_86xx - -#define CPU0_BOOT_RELEASE 0x01000000 -#define CPU1_BOOT_RELEASE 0x02000000 -#define CPU_ALL_RELEASED (CPU0_BOOT_RELEASE | CPU1_BOOT_RELEASE) -#define MCM_PORT_CONFIG_OFFSET 0x1010 - -/* Offset from CCSRBAR */ -#define MPC86xx_MCM_OFFSET (0x00000) -#define MPC86xx_MCM_SIZE (0x02000) - -#endif /* CONFIG_PPC_86xx */ -#endif /* __ASM_POWERPC_MPC86xx_H__ */ -#endif /* __KERNEL__ */ diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h index 082b3aedf14..c8a3cbfe02f 100644 --- a/arch/powerpc/include/asm/paca.h +++ b/arch/powerpc/include/asm/paca.h @@ -43,6 +43,7 @@ struct task_struct; * processor. */ struct paca_struct { +#ifdef CONFIG_PPC_BOOK3S /* * Because hw_cpu_id, unlike other paca fields, is accessed * routinely from other CPUs (from the IRQ code), we stick to @@ -51,7 +52,7 @@ struct paca_struct { */ struct lppaca *lppaca_ptr; /* Pointer to LpPaca for PLIC */ - +#endif /* CONFIG_PPC_BOOK3S */ /* * MAGIC: the spinlock functions in arch/powerpc/lib/locks.c * load lock_token and paca_index with a single lwz @@ -64,13 +65,16 @@ struct paca_struct { u64 kernel_toc; /* Kernel TOC address */ u64 kernelbase; /* Base address of kernel */ u64 kernel_msr; /* MSR while running in kernel */ +#ifdef CONFIG_PPC_STD_MMU_64 u64 stab_real; /* Absolute address of segment table */ u64 stab_addr; /* Virtual address of segment table */ +#endif /* CONFIG_PPC_STD_MMU_64 */ void *emergency_sp; /* pointer to emergency stack */ u64 data_offset; /* per cpu data offset */ s16 hw_cpu_id; /* Physical processor number */ u8 cpu_start; /* At startup, processor spins until */ /* this becomes non-zero. */ +#ifdef CONFIG_PPC_STD_MMU_64 struct slb_shadow *slb_shadow_ptr; /* @@ -81,11 +85,13 @@ struct paca_struct { u64 exmc[10]; /* used for machine checks */ u64 exslb[10]; /* used for SLB/segment table misses * on the linear mapping */ - - mm_context_t context; + /* SLB related definitions */ u16 vmalloc_sllp; u16 slb_cache_ptr; u16 slb_cache[SLB_CACHE_ENTRIES]; +#endif /* CONFIG_PPC_STD_MMU_64 */ + + mm_context_t context; /* * then miscellaneous read-write fields @@ -99,6 +105,7 @@ struct paca_struct { u8 soft_enabled; /* irq soft-enable flag */ u8 hard_enabled; /* set if irqs are enabled in MSR */ u8 io_sync; /* writel() needs spin_unlock sync */ + u8 perf_counter_pending; /* PM interrupt while soft-disabled */ /* Stuff for accurate time accounting */ u64 user_time; /* accumulated usermode TB ticks */ diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h index 32cbf16f10e..4940662ee87 100644 --- a/arch/powerpc/include/asm/page.h +++ b/arch/powerpc/include/asm/page.h @@ -231,6 +231,11 @@ extern void copy_user_page(void *to, void *from, unsigned long vaddr, struct page *p); extern int page_is_ram(unsigned long pfn); +#ifdef CONFIG_PPC_SMLPAR +void arch_free_page(struct page *page, int order); +#define HAVE_ARCH_FREE_PAGE +#endif + struct vm_area_struct; typedef struct page *pgtable_t; diff --git a/arch/powerpc/include/asm/page_32.h b/arch/powerpc/include/asm/page_32.h index a0e3f6e6b4e..bd0849dbcaa 100644 --- a/arch/powerpc/include/asm/page_32.h +++ b/arch/powerpc/include/asm/page_32.h @@ -41,7 +41,7 @@ extern void clear_pages(void *page, int order); static inline void clear_page(void *page) { clear_pages(page, 0); } extern void copy_page(void *to, void *from); -#include <asm-generic/page.h> +#include <asm-generic/getorder.h> #define PGD_T_LOG2 (__builtin_ffs(sizeof(pgd_t)) - 1) #define PTE_T_LOG2 (__builtin_ffs(sizeof(pte_t)) - 1) diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h index 043bfdfe4f7..5817a3b747e 100644 --- a/arch/powerpc/include/asm/page_64.h +++ b/arch/powerpc/include/asm/page_64.h @@ -180,6 +180,6 @@ do { \ (test_thread_flag(TIF_32BIT) ? \ VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64) -#include <asm-generic/page.h> +#include <asm-generic/getorder.h> #endif /* _ASM_POWERPC_PAGE_64_H */ diff --git a/arch/powerpc/include/asm/pci-bridge.h b/arch/powerpc/include/asm/pci-bridge.h index 84007afabdb..4c61fa0b8d7 100644 --- a/arch/powerpc/include/asm/pci-bridge.h +++ b/arch/powerpc/include/asm/pci-bridge.h @@ -86,17 +86,12 @@ struct pci_controller { void *io_base_alloc; #endif resource_size_t io_base_phys; -#ifndef CONFIG_PPC64 resource_size_t pci_io_size; -#endif /* Some machines (PReP) have a non 1:1 mapping of * the PCI memory space in the CPU bus space */ resource_size_t pci_mem_offset; -#ifdef CONFIG_PPC64 - unsigned long pci_io_size; -#endif /* Some machines have a special region to forward the ISA * "memory" cycles such as VGA memory regions. Left to 0 @@ -140,10 +135,12 @@ struct pci_controller { struct resource io_resource; struct resource mem_resources[3]; int global_number; /* PCI domain number */ + + resource_size_t dma_window_base_cur; + resource_size_t dma_window_size; + #ifdef CONFIG_PPC64 unsigned long buid; - unsigned long dma_window_base_cur; - unsigned long dma_window_size; void *private_data; #endif /* CONFIG_PPC64 */ @@ -185,7 +182,6 @@ extern int early_find_capability(struct pci_controller *hose, int bus, extern void setup_indirect_pci(struct pci_controller* hose, resource_size_t cfg_addr, resource_size_t cfg_data, u32 flags); -extern void setup_grackle(struct pci_controller *hose); #else /* CONFIG_PPC64 */ /* @@ -221,6 +217,7 @@ struct pci_dn { #define PCI_DN(dn) ((struct pci_dn *) (dn)->data) extern struct device_node *fetch_dev_dn(struct pci_dev *dev); +extern void * update_dn_pci_info(struct device_node *dn, void *data); /* Get a device_node from a pci_dev. This code must be fast except * in the case where the sysdata is incorrect and needs to be fixed diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h index ba17d5d90a4..d9483c504d2 100644 --- a/arch/powerpc/include/asm/pci.h +++ b/arch/powerpc/include/asm/pci.h @@ -195,19 +195,6 @@ extern void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res, struct pci_bus_region *region); -static inline struct resource *pcibios_select_root(struct pci_dev *pdev, - struct resource *res) -{ - struct resource *root = NULL; - - if (res->flags & IORESOURCE_IO) - root = &ioport_resource; - if (res->flags & IORESOURCE_MEM) - root = &iomem_resource; - - return root; -} - extern void pcibios_claim_one_bus(struct pci_bus *b); extern void pcibios_finish_adding_to_bus(struct pci_bus *bus); diff --git a/arch/powerpc/include/asm/perf_counter.h b/arch/powerpc/include/asm/perf_counter.h new file mode 100644 index 00000000000..0ea0639fcf7 --- /dev/null +++ b/arch/powerpc/include/asm/perf_counter.h @@ -0,0 +1,110 @@ +/* + * Performance counter support - PowerPC-specific definitions. + * + * Copyright 2008-2009 Paul Mackerras, IBM Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#include <linux/types.h> + +#include <asm/hw_irq.h> + +#define MAX_HWCOUNTERS 8 +#define MAX_EVENT_ALTERNATIVES 8 +#define MAX_LIMITED_HWCOUNTERS 2 + +/* + * This struct provides the constants and functions needed to + * describe the PMU on a particular POWER-family CPU. + */ +struct power_pmu { + const char *name; + int n_counter; + int max_alternatives; + unsigned long add_fields; + unsigned long test_adder; + int (*compute_mmcr)(u64 events[], int n_ev, + unsigned int hwc[], unsigned long mmcr[]); + int (*get_constraint)(u64 event, unsigned long *mskp, + unsigned long *valp); + int (*get_alternatives)(u64 event, unsigned int flags, + u64 alt[]); + void (*disable_pmc)(unsigned int pmc, unsigned long mmcr[]); + int (*limited_pmc_event)(u64 event); + u32 flags; + int n_generic; + int *generic_events; + int (*cache_events)[PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX]; +}; + +/* + * Values for power_pmu.flags + */ +#define PPMU_LIMITED_PMC5_6 1 /* PMC5/6 have limited function */ +#define PPMU_ALT_SIPR 2 /* uses alternate posn for SIPR/HV */ + +/* + * Values for flags to get_alternatives() + */ +#define PPMU_LIMITED_PMC_OK 1 /* can put this on a limited PMC */ +#define PPMU_LIMITED_PMC_REQD 2 /* have to put this on a limited PMC */ +#define PPMU_ONLY_COUNT_RUN 4 /* only counting in run state */ + +extern int register_power_pmu(struct power_pmu *); + +struct pt_regs; +extern unsigned long perf_misc_flags(struct pt_regs *regs); +extern unsigned long perf_instruction_pointer(struct pt_regs *regs); + +#define PERF_COUNTER_INDEX_OFFSET 1 + +/* + * Only override the default definitions in include/linux/perf_counter.h + * if we have hardware PMU support. + */ +#ifdef CONFIG_PPC_PERF_CTRS +#define perf_misc_flags(regs) perf_misc_flags(regs) +#endif + +/* + * The power_pmu.get_constraint function returns a 32/64-bit value and + * a 32/64-bit mask that express the constraints between this event and + * other events. + * + * The value and mask are divided up into (non-overlapping) bitfields + * of three different types: + * + * Select field: this expresses the constraint that some set of bits + * in MMCR* needs to be set to a specific value for this event. For a + * select field, the mask contains 1s in every bit of the field, and + * the value contains a unique value for each possible setting of the + * MMCR* bits. The constraint checking code will ensure that two events + * that set the same field in their masks have the same value in their + * value dwords. + * + * Add field: this expresses the constraint that there can be at most + * N events in a particular class. A field of k bits can be used for + * N <= 2^(k-1) - 1. The mask has the most significant bit of the field + * set (and the other bits 0), and the value has only the least significant + * bit of the field set. In addition, the 'add_fields' and 'test_adder' + * in the struct power_pmu for this processor come into play. The + * add_fields value contains 1 in the LSB of the field, and the + * test_adder contains 2^(k-1) - 1 - N in the field. + * + * NAND field: this expresses the constraint that you may not have events + * in all of a set of classes. (For example, on PPC970, you can't select + * events from the FPU, ISU and IDU simultaneously, although any two are + * possible.) For N classes, the field is N+1 bits wide, and each class + * is assigned one bit from the least-significant N bits. The mask has + * only the most-significant bit set, and the value has only the bit + * for the event's class set. The test_adder has the least significant + * bit set in the field. + * + * If an event is not subject to the constraint expressed by a particular + * field, then it will have 0 in both the mask and value for that field. + */ diff --git a/arch/powerpc/include/asm/pgalloc-32.h b/arch/powerpc/include/asm/pgalloc-32.h index 0815eb40aca..c9500d666a1 100644 --- a/arch/powerpc/include/asm/pgalloc-32.h +++ b/arch/powerpc/include/asm/pgalloc-32.h @@ -16,7 +16,7 @@ extern void pgd_free(struct mm_struct *mm, pgd_t *pgd); */ /* #define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); }) */ #define pmd_free(mm, x) do { } while (0) -#define __pmd_free_tlb(tlb,x) do { } while (0) +#define __pmd_free_tlb(tlb,x,a) do { } while (0) /* #define pgd_populate(mm, pmd, pte) BUG() */ #ifndef CONFIG_BOOKE diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h index afda2bdd860..e6f069c4f71 100644 --- a/arch/powerpc/include/asm/pgalloc-64.h +++ b/arch/powerpc/include/asm/pgalloc-64.h @@ -118,11 +118,11 @@ static inline void pgtable_free(pgtable_free_t pgf) kmem_cache_free(pgtable_cache[cachenum], p); } -#define __pmd_free_tlb(tlb, pmd) \ +#define __pmd_free_tlb(tlb, pmd,addr) \ pgtable_free_tlb(tlb, pgtable_free_cache(pmd, \ PMD_CACHE_NUM, PMD_TABLE_SIZE-1)) #ifndef CONFIG_PPC_64K_PAGES -#define __pud_free_tlb(tlb, pud) \ +#define __pud_free_tlb(tlb, pud, addr) \ pgtable_free_tlb(tlb, pgtable_free_cache(pud, \ PUD_CACHE_NUM, PUD_TABLE_SIZE-1)) #endif /* CONFIG_PPC_64K_PAGES */ diff --git a/arch/powerpc/include/asm/pgalloc.h b/arch/powerpc/include/asm/pgalloc.h index 5d8480265a7..1730e5e298d 100644 --- a/arch/powerpc/include/asm/pgalloc.h +++ b/arch/powerpc/include/asm/pgalloc.h @@ -38,14 +38,14 @@ static inline pgtable_free_t pgtable_free_cache(void *p, int cachenum, extern void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf); #ifdef CONFIG_SMP -#define __pte_free_tlb(tlb,ptepage) \ +#define __pte_free_tlb(tlb,ptepage,address) \ do { \ pgtable_page_dtor(ptepage); \ pgtable_free_tlb(tlb, pgtable_free_cache(page_address(ptepage), \ - PTE_NONCACHE_NUM, PTE_TABLE_SIZE-1)); \ + PTE_NONCACHE_NUM, PTE_TABLE_SIZE-1)); \ } while (0) #else -#define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, (pte)) +#define __pte_free_tlb(tlb, pte, address) pte_free((tlb)->mm, (pte)) #endif diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h index c40db05f21e..8cd083c6150 100644 --- a/arch/powerpc/include/asm/pgtable-ppc64.h +++ b/arch/powerpc/include/asm/pgtable-ppc64.h @@ -31,9 +31,11 @@ #error TASK_SIZE_USER64 exceeds pagetable range #endif +#ifdef CONFIG_PPC_STD_MMU_64 #if TASK_SIZE_USER64 > (1UL << (USER_ESID_BITS + SID_SHIFT)) #error TASK_SIZE_USER64 exceeds user VSID range #endif +#endif /* * Define the address range of the vmalloc VM area. @@ -199,8 +201,11 @@ static inline unsigned long pte_update(struct mm_struct *mm, if (!huge) assert_pte_locked(mm, addr); +#ifdef CONFIG_PPC_STD_MMU_64 if (old & _PAGE_HASHPTE) hpte_need_flush(mm, addr, ptep, old, huge); +#endif + return old; } diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h index 640ccbbc097..b74f16d45cb 100644 --- a/arch/powerpc/include/asm/ppc-opcode.h +++ b/arch/powerpc/include/asm/ppc-opcode.h @@ -25,6 +25,7 @@ #define PPC_INST_LSWI 0x7c0004aa #define PPC_INST_LSWX 0x7c00042a #define PPC_INST_LWSYNC 0x7c2004ac +#define PPC_INST_LXVD2X 0x7c000698 #define PPC_INST_MCRXR 0x7c000400 #define PPC_INST_MCRXR_MASK 0xfc0007fe #define PPC_INST_MFSPR_PVR 0x7c1f42a6 @@ -43,14 +44,18 @@ #define PPC_INST_STSWI 0x7c0005aa #define PPC_INST_STSWX 0x7c00052a +#define PPC_INST_STXVD2X 0x7c000798 +#define PPC_INST_TLBIE 0x7c000264 #define PPC_INST_TLBILX 0x7c000024 #define PPC_INST_WAIT 0x7c00007c /* macros to insert fields into opcodes */ -#define __PPC_RA(a) ((a & 0x1f) << 16) -#define __PPC_RB(b) ((b & 0x1f) << 11) -#define __PPC_T_TLB(t) ((t & 0x3) << 21) -#define __PPC_WC(w) ((w & 0x3) << 21) +#define __PPC_RA(a) (((a) & 0x1f) << 16) +#define __PPC_RB(b) (((b) & 0x1f) << 11) +#define __PPC_RS(s) (((s) & 0x1f) << 21) +#define __PPC_XS(s) ((((s) & 0x1f) << 21) | (((s) & 0x20) >> 5)) +#define __PPC_T_TLB(t) (((t) & 0x3) << 21) +#define __PPC_WC(w) (((w) & 0x3) << 21) /* Deal with instructions that older assemblers aren't aware of */ #define PPC_DCBAL(a, b) stringify_in_c(.long PPC_INST_DCBAL | \ @@ -69,5 +74,17 @@ #define PPC_TLBILX_VA(a, b) PPC_TLBILX(3, a, b) #define PPC_WAIT(w) stringify_in_c(.long PPC_INST_WAIT | \ __PPC_WC(w)) +#define PPC_TLBIE(lp,a) stringify_in_c(.long PPC_INST_TLBIE | \ + __PPC_RB(a) | __PPC_RS(lp)) + +/* + * Define what the VSX XX1 form instructions will look like, then add + * the 128 bit load store instructions based on that. + */ +#define VSX_XX1(s, a, b) (__PPC_XS(s) | __PPC_RA(a) | __PPC_RB(b)) +#define STXVD2X(s, a, b) stringify_in_c(.long PPC_INST_STXVD2X | \ + VSX_XX1((s), (a), (b))) +#define LXVD2X(s, a, b) stringify_in_c(.long PPC_INST_LXVD2X | \ + VSX_XX1((s), (a), (b))) #endif /* _ASM_POWERPC_PPC_OPCODE_H */ diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h index 384d90c9c27..f9729529c20 100644 --- a/arch/powerpc/include/asm/ppc_asm.h +++ b/arch/powerpc/include/asm/ppc_asm.h @@ -76,16 +76,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_PURR); \ REST_10GPRS(22, base) #endif -/* - * Define what the VSX XX1 form instructions will look like, then add - * the 128 bit load store instructions based on that. - */ -#define VSX_XX1(xs, ra, rb) (((xs) & 0x1f) << 21 | ((ra) << 16) | \ - ((rb) << 11) | (((xs) >> 5))) - -#define STXVD2X(xs, ra, rb) .long (0x7c000798 | VSX_XX1((xs), (ra), (rb))) -#define LXVD2X(xs, ra, rb) .long (0x7c000698 | VSX_XX1((xs), (ra), (rb))) - #define SAVE_2GPRS(n, base) SAVE_GPR(n, base); SAVE_GPR(n+1, base) #define SAVE_4GPRS(n, base) SAVE_2GPRS(n, base); SAVE_2GPRS(n+2, base) #define SAVE_8GPRS(n, base) SAVE_4GPRS(n, base); SAVE_4GPRS(n+4, base) diff --git a/arch/powerpc/include/asm/ps3.h b/arch/powerpc/include/asm/ps3.h index cdb6fd814de..7f065e178ec 100644 --- a/arch/powerpc/include/asm/ps3.h +++ b/arch/powerpc/include/asm/ps3.h @@ -53,6 +53,13 @@ enum ps3_param_av_multi_out ps3_os_area_get_av_multi_out(void); extern u64 ps3_os_area_get_rtc_diff(void); extern void ps3_os_area_set_rtc_diff(u64 rtc_diff); +struct ps3_os_area_flash_ops { + ssize_t (*read)(void *buf, size_t count, loff_t pos); + ssize_t (*write)(const void *buf, size_t count, loff_t pos); +}; + +extern void ps3_os_area_flash_register(const struct ps3_os_area_flash_ops *ops); + /* dma routines */ enum ps3_dma_page_size { @@ -418,15 +425,15 @@ static inline struct ps3_system_bus_driver * * @data: Data to set */ -static inline void ps3_system_bus_set_driver_data( +static inline void ps3_system_bus_set_drvdata( struct ps3_system_bus_device *dev, void *data) { - dev->core.driver_data = data; + dev_set_drvdata(&dev->core, data); } -static inline void *ps3_system_bus_get_driver_data( +static inline void *ps3_system_bus_get_drvdata( struct ps3_system_bus_device *dev) { - return dev->core.driver_data; + return dev_get_drvdata(&dev->core); } /* These two need global scope for get_dma_ops(). */ @@ -520,7 +527,4 @@ void ps3_sync_irq(int node); u32 ps3_get_hw_thread_id(int cpu); u64 ps3_get_spe_id(void *arg); -/* mutex synchronizing GPU accesses and video mode changes */ -extern struct mutex ps3_gpu_mutex; - #endif diff --git a/arch/powerpc/include/asm/ps3gpu.h b/arch/powerpc/include/asm/ps3gpu.h new file mode 100644 index 00000000000..b2b89591907 --- /dev/null +++ b/arch/powerpc/include/asm/ps3gpu.h @@ -0,0 +1,86 @@ +/* + * PS3 GPU declarations. + * + * Copyright 2009 Sony Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. + * If not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef _ASM_POWERPC_PS3GPU_H +#define _ASM_POWERPC_PS3GPU_H + +#include <linux/mutex.h> + +#include <asm/lv1call.h> + + +#define L1GPU_CONTEXT_ATTRIBUTE_DISPLAY_SYNC 0x101 +#define L1GPU_CONTEXT_ATTRIBUTE_DISPLAY_FLIP 0x102 + +#define L1GPU_CONTEXT_ATTRIBUTE_FB_SETUP 0x600 +#define L1GPU_CONTEXT_ATTRIBUTE_FB_BLIT 0x601 +#define L1GPU_CONTEXT_ATTRIBUTE_FB_BLIT_SYNC 0x602 +#define L1GPU_CONTEXT_ATTRIBUTE_FB_CLOSE 0x603 + +#define L1GPU_FB_BLIT_WAIT_FOR_COMPLETION (1ULL << 32) + +#define L1GPU_DISPLAY_SYNC_HSYNC 1 +#define L1GPU_DISPLAY_SYNC_VSYNC 2 + + +/* mutex synchronizing GPU accesses and video mode changes */ +extern struct mutex ps3_gpu_mutex; + + +static inline int lv1_gpu_display_sync(u64 context_handle, u64 head, + u64 ddr_offset) +{ + return lv1_gpu_context_attribute(context_handle, + L1GPU_CONTEXT_ATTRIBUTE_DISPLAY_SYNC, + head, ddr_offset, 0, 0); +} + +static inline int lv1_gpu_display_flip(u64 context_handle, u64 head, + u64 ddr_offset) +{ + return lv1_gpu_context_attribute(context_handle, + L1GPU_CONTEXT_ATTRIBUTE_DISPLAY_FLIP, + head, ddr_offset, 0, 0); +} + +static inline int lv1_gpu_fb_setup(u64 context_handle, u64 xdr_lpar, + u64 xdr_size, u64 ioif_offset) +{ + return lv1_gpu_context_attribute(context_handle, + L1GPU_CONTEXT_ATTRIBUTE_FB_SETUP, + xdr_lpar, xdr_size, ioif_offset, 0); +} + +static inline int lv1_gpu_fb_blit(u64 context_handle, u64 ddr_offset, + u64 ioif_offset, u64 sync_width, u64 pitch) +{ + return lv1_gpu_context_attribute(context_handle, + L1GPU_CONTEXT_ATTRIBUTE_FB_BLIT, + ddr_offset, ioif_offset, sync_width, + pitch); +} + +static inline int lv1_gpu_fb_close(u64 context_handle) +{ + return lv1_gpu_context_attribute(context_handle, + L1GPU_CONTEXT_ATTRIBUTE_FB_CLOSE, 0, + 0, 0, 0); +} + +#endif /* _ASM_POWERPC_PS3GPU_H */ diff --git a/arch/powerpc/include/asm/pte-hash64-64k.h b/arch/powerpc/include/asm/pte-hash64-64k.h index e05d26fa372..82b72207c51 100644 --- a/arch/powerpc/include/asm/pte-hash64-64k.h +++ b/arch/powerpc/include/asm/pte-hash64-64k.h @@ -47,7 +47,8 @@ * generic accessors and iterators here */ #define __real_pte(e,p) ((real_pte_t) { \ - (e), pte_val(*((p) + PTRS_PER_PTE)) }) + (e), ((e) & _PAGE_COMBO) ? \ + (pte_val(*((p) + PTRS_PER_PTE))) : 0 }) #define __rpte_to_hidx(r,index) ((pte_val((r).pte) & _PAGE_COMBO) ? \ (((r).hidx >> ((index)<<2)) & 0xf) : ((pte_val((r).pte) >> 12) & 0xf)) #define __rpte_to_pte(r) ((r).pte) diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h index c9c678fb253..8c341490cfc 100644 --- a/arch/powerpc/include/asm/ptrace.h +++ b/arch/powerpc/include/asm/ptrace.h @@ -135,7 +135,9 @@ do { \ * These are defined as per linux/ptrace.h, which see. */ #define arch_has_single_step() (1) +#define arch_has_block_step() (!cpu_has_feature(CPU_FTR_601)) extern void user_enable_single_step(struct task_struct *); +extern void user_enable_block_step(struct task_struct *); extern void user_disable_single_step(struct task_struct *); #endif /* __ASSEMBLY__ */ @@ -288,4 +290,6 @@ extern void user_disable_single_step(struct task_struct *); #define PPC_PTRACE_PEEKUSR_3264 0x91 #define PPC_PTRACE_POKEUSR_3264 0x90 +#define PTRACE_SINGLEBLOCK 0x100 /* resume execution until next branch */ + #endif /* _ASM_POWERPC_PTRACE_H */ diff --git a/arch/powerpc/include/asm/qe.h b/arch/powerpc/include/asm/qe.h index 2701753d993..157c5ca581c 100644 --- a/arch/powerpc/include/asm/qe.h +++ b/arch/powerpc/include/asm/qe.h @@ -22,7 +22,7 @@ #include <asm/cpm.h> #include <asm/immap_qe.h> -#define QE_NUM_OF_SNUM 28 +#define QE_NUM_OF_SNUM 256 /* There are 256 serial number in QE */ #define QE_NUM_OF_BRGS 16 #define QE_NUM_OF_PORTS 1024 @@ -152,6 +152,9 @@ unsigned int qe_get_brg_clk(void); int qe_setbrg(enum qe_clock brg, unsigned int rate, unsigned int multiplier); int qe_get_snum(void); void qe_put_snum(u8 snum); +unsigned int qe_get_num_of_risc(void); +unsigned int qe_get_num_of_snums(void); + /* we actually use cpm_muram implementation, define this for convenience */ #define qe_muram_init cpm_muram_init #define qe_muram_alloc cpm_muram_alloc @@ -231,12 +234,16 @@ struct qe_bd { #define QE_ALIGNMENT_OF_PRAM 64 /* RISC allocation */ -enum qe_risc_allocation { - QE_RISC_ALLOCATION_RISC1 = 1, /* RISC 1 */ - QE_RISC_ALLOCATION_RISC2 = 2, /* RISC 2 */ - QE_RISC_ALLOCATION_RISC1_AND_RISC2 = 3 /* Dynamically choose - RISC 1 or RISC 2 */ -}; +#define QE_RISC_ALLOCATION_RISC1 0x1 /* RISC 1 */ +#define QE_RISC_ALLOCATION_RISC2 0x2 /* RISC 2 */ +#define QE_RISC_ALLOCATION_RISC3 0x4 /* RISC 3 */ +#define QE_RISC_ALLOCATION_RISC4 0x8 /* RISC 4 */ +#define QE_RISC_ALLOCATION_RISC1_AND_RISC2 (QE_RISC_ALLOCATION_RISC1 | \ + QE_RISC_ALLOCATION_RISC2) +#define QE_RISC_ALLOCATION_FOUR_RISCS (QE_RISC_ALLOCATION_RISC1 | \ + QE_RISC_ALLOCATION_RISC2 | \ + QE_RISC_ALLOCATION_RISC3 | \ + QE_RISC_ALLOCATION_RISC4) /* QE extended filtering Table Lookup Key Size */ enum qe_fltr_tbl_lookup_key_size { @@ -668,6 +675,8 @@ struct ucc_slow_pram { #define UCC_GETH_UPSMR_RMM 0x00001000 #define UCC_GETH_UPSMR_CAM 0x00000400 #define UCC_GETH_UPSMR_BRO 0x00000200 +#define UCC_GETH_UPSMR_SMM 0x00000080 +#define UCC_GETH_UPSMR_SGMM 0x00000020 /* UCC Transmit On Demand Register (UTODR) */ #define UCC_SLOW_TOD 0x8000 diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index e8018d540e8..1170267736d 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -492,11 +492,13 @@ #define MMCR0_FCHV 0x00000001UL /* freeze conditions in hypervisor mode */ #define SPRN_MMCR1 798 #define SPRN_MMCRA 0x312 +#define MMCRA_SDSYNC 0x80000000UL /* SDAR synced with SIAR */ #define MMCRA_SIHV 0x10000000UL /* state of MSR HV when SIAR set */ #define MMCRA_SIPR 0x08000000UL /* state of MSR PR when SIAR set */ #define MMCRA_SLOT 0x07000000UL /* SLOT bits (37-39) */ #define MMCRA_SLOT_SHIFT 24 #define MMCRA_SAMPLE_ENABLE 0x00000001UL /* enable sampling */ +#define POWER6_MMCRA_SDSYNC 0x0000080000000000ULL /* SDAR/SIAR synced */ #define POWER6_MMCRA_SIHV 0x0000040000000000ULL #define POWER6_MMCRA_SIPR 0x0000020000000000ULL #define POWER6_MMCRA_THRM 0x00000020UL @@ -743,17 +745,18 @@ asm volatile("mfmsr %0" : "=r" (rval)); rval;}) #ifdef CONFIG_PPC64 #define __mtmsrd(v, l) asm volatile("mtmsrd %0," __stringify(l) \ - : : "r" (v)) + : : "r" (v) : "memory") #define mtmsrd(v) __mtmsrd((v), 0) #define mtmsr(v) mtmsrd(v) #else -#define mtmsr(v) asm volatile("mtmsr %0" : : "r" (v)) +#define mtmsr(v) asm volatile("mtmsr %0" : : "r" (v) : "memory") #endif #define mfspr(rn) ({unsigned long rval; \ asm volatile("mfspr %0," __stringify(rn) \ : "=r" (rval)); rval;}) -#define mtspr(rn, v) asm volatile("mtspr " __stringify(rn) ",%0" : : "r" (v)) +#define mtspr(rn, v) asm volatile("mtspr " __stringify(rn) ",%0" : : "r" (v)\ + : "memory") #ifdef __powerpc64__ #ifdef CONFIG_PPC_CELL diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h index 601ddbc4600..6bcf364cbb2 100644 --- a/arch/powerpc/include/asm/reg_booke.h +++ b/arch/powerpc/include/asm/reg_booke.h @@ -389,12 +389,14 @@ #define ICCR_CACHE 1 /* Cacheable */ /* Bit definitions for L1CSR0. */ +#define L1CSR0_CPE 0x00010000 /* Data Cache Parity Enable */ #define L1CSR0_CLFC 0x00000100 /* Cache Lock Bits Flash Clear */ #define L1CSR0_DCFI 0x00000002 /* Data Cache Flash Invalidate */ #define L1CSR0_CFI 0x00000002 /* Cache Flash Invalidate */ #define L1CSR0_DCE 0x00000001 /* Data Cache Enable */ /* Bit definitions for L1CSR1. */ +#define L1CSR1_CPE 0x00010000 /* Instruction Cache Parity Enable */ #define L1CSR1_ICLFR 0x00000100 /* Instr Cache Lock Bits Flash Reset */ #define L1CSR1_ICFI 0x00000002 /* Instr Cache Flash Invalidate */ #define L1CSR1_ICE 0x00000001 /* Instr Cache Enable */ diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h index 01c12339b30..168fce72620 100644 --- a/arch/powerpc/include/asm/rtas.h +++ b/arch/powerpc/include/asm/rtas.h @@ -58,7 +58,7 @@ struct rtas_t { unsigned long entry; /* physical address pointer */ unsigned long base; /* physical address pointer */ unsigned long size; - spinlock_t lock; + raw_spinlock_t lock; struct rtas_args args; struct device_node *dev; /* virtual address pointer */ }; @@ -245,5 +245,8 @@ static inline u32 rtas_config_addr(int busno, int devfn, int reg) (devfn << 8) | (reg & 0xff); } +extern void __cpuinit rtas_give_timebase(void); +extern void __cpuinit rtas_take_timebase(void); + #endif /* __KERNEL__ */ #endif /* _POWERPC_RTAS_H */ diff --git a/arch/powerpc/include/asm/scatterlist.h b/arch/powerpc/include/asm/scatterlist.h index fcf7d55afe4..912bf597870 100644 --- a/arch/powerpc/include/asm/scatterlist.h +++ b/arch/powerpc/include/asm/scatterlist.h @@ -21,7 +21,7 @@ struct scatterlist { unsigned int offset; unsigned int length; - /* For TCE support */ + /* For TCE or SWIOTLB support */ dma_addr_t dma_address; u32 dma_length; }; @@ -34,11 +34,7 @@ struct scatterlist { * is 0. */ #define sg_dma_address(sg) ((sg)->dma_address) -#ifdef __powerpc64__ #define sg_dma_len(sg) ((sg)->dma_length) -#else -#define sg_dma_len(sg) ((sg)->length) -#endif #ifdef __powerpc64__ #define ISA_DMA_THRESHOLD (~0UL) diff --git a/arch/powerpc/include/asm/signal.h b/arch/powerpc/include/asm/signal.h index 69f709d8e8e..3eb13be11d8 100644 --- a/arch/powerpc/include/asm/signal.h +++ b/arch/powerpc/include/asm/signal.h @@ -94,7 +94,7 @@ typedef struct { #define MINSIGSTKSZ 2048 #define SIGSTKSZ 8192 -#include <asm-generic/signal.h> +#include <asm-generic/signal-defs.h> struct old_sigaction { __sighandler_t sa_handler; diff --git a/arch/powerpc/include/asm/swiotlb.h b/arch/powerpc/include/asm/swiotlb.h new file mode 100644 index 00000000000..30891d6e2bc --- /dev/null +++ b/arch/powerpc/include/asm/swiotlb.h @@ -0,0 +1,27 @@ +/* + * Copyright (C) 2009 Becky Bruce, Freescale Semiconductor + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#ifndef __ASM_SWIOTLB_H +#define __ASM_SWIOTLB_H + +#include <linux/swiotlb.h> + +extern struct dma_mapping_ops swiotlb_dma_ops; +extern struct dma_mapping_ops swiotlb_pci_dma_ops; + +int swiotlb_arch_address_needs_mapping(struct device *, dma_addr_t, + size_t size); + +static inline void dma_mark_clean(void *addr, size_t size) {} + +extern unsigned int ppc_swiotlb_enable; +int __init swiotlb_setup_bus_notifier(void); + +#endif /* __ASM_SWIOTLB_H */ diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h index d98a30dfd41..370600ca276 100644 --- a/arch/powerpc/include/asm/systbl.h +++ b/arch/powerpc/include/asm/systbl.h @@ -322,6 +322,7 @@ SYSCALL_SPU(epoll_create1) SYSCALL_SPU(dup3) SYSCALL_SPU(pipe2) SYSCALL(inotify_init1) -SYSCALL(ni_syscall) +SYSCALL_SPU(perf_counter_open) COMPAT_SYS_SPU(preadv) COMPAT_SYS_SPU(pwritev) +COMPAT_SYS(rt_tgsigqueueinfo) diff --git a/arch/powerpc/include/asm/system.h b/arch/powerpc/include/asm/system.h index 2b2420a4988..bb8e006a47c 100644 --- a/arch/powerpc/include/asm/system.h +++ b/arch/powerpc/include/asm/system.h @@ -211,7 +211,7 @@ extern struct task_struct *_switch(struct thread_struct *prev, extern unsigned int rtas_data; extern int mem_init_done; /* set on boot once kmalloc can be called */ -extern int init_bootmem_done; /* set on !NUMA once bootmem is available */ +extern int init_bootmem_done; /* set once bootmem is available */ extern phys_addr_t memory_limit; extern unsigned long klimit; diff --git a/arch/powerpc/include/asm/termios.h b/arch/powerpc/include/asm/termios.h index 2c14fea07c8..a24f48704a3 100644 --- a/arch/powerpc/include/asm/termios.h +++ b/arch/powerpc/include/asm/termios.h @@ -78,7 +78,7 @@ struct termio { #ifdef __KERNEL__ -#include <asm-generic/termios.h> +#include <asm-generic/termios-base.h> #endif /* __KERNEL__ */ diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h index 9aba5a38a7c..c8b32925567 100644 --- a/arch/powerpc/include/asm/thread_info.h +++ b/arch/powerpc/include/asm/thread_info.h @@ -46,15 +46,13 @@ struct thread_info { /* * macros/functions for gaining access to the thread information structure - * - * preempt_count needs to be 1 initially, until the scheduler is functional. */ #define INIT_THREAD_INFO(tsk) \ { \ .task = &tsk, \ .exec_domain = &default_exec_domain, \ .cpu = 0, \ - .preempt_count = 1, \ + .preempt_count = INIT_PREEMPT_COUNT, \ .restart_block = { \ .fn = do_no_restart_syscall, \ }, \ diff --git a/arch/powerpc/include/asm/types.h b/arch/powerpc/include/asm/types.h index 7ce27a52bb3..a5aea0ca34e 100644 --- a/arch/powerpc/include/asm/types.h +++ b/arch/powerpc/include/asm/types.h @@ -40,15 +40,6 @@ typedef struct { #endif /* __ASSEMBLY__ */ #ifdef __KERNEL__ -/* - * These aren't exported outside the kernel to avoid name space clashes - */ -#ifdef __powerpc64__ -#define BITS_PER_LONG 64 -#else -#define BITS_PER_LONG 32 -#endif - #ifndef __ASSEMBLY__ typedef __vector128 vector128; diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h index 3f06f8ec81c..cef080bfc60 100644 --- a/arch/powerpc/include/asm/unistd.h +++ b/arch/powerpc/include/asm/unistd.h @@ -341,12 +341,14 @@ #define __NR_dup3 316 #define __NR_pipe2 317 #define __NR_inotify_init1 318 +#define __NR_perf_counter_open 319 #define __NR_preadv 320 #define __NR_pwritev 321 +#define __NR_rt_tgsigqueueinfo 322 #ifdef __KERNEL__ -#define __NR_syscalls 322 +#define __NR_syscalls 323 #define __NR__exit __NR_exit #define NR_syscalls __NR_syscalls diff --git a/arch/powerpc/include/asm/xilinx_pci.h b/arch/powerpc/include/asm/xilinx_pci.h new file mode 100644 index 00000000000..7a8275caf6a --- /dev/null +++ b/arch/powerpc/include/asm/xilinx_pci.h @@ -0,0 +1,21 @@ +/* + * Xilinx pci external definitions + * + * Copyright 2009 Roderick Colenbrander + * Copyright 2009 Secret Lab Technologies Ltd. + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + */ + +#ifndef INCLUDE_XILINX_PCI +#define INCLUDE_XILINX_PCI + +#ifdef CONFIG_XILINX_PCI +extern void __init xilinx_pci_init(void); +#else +static inline void __init xilinx_pci_init(void) { return; } +#endif + +#endif /* INCLUDE_XILINX_PCI */ |