diff options
Diffstat (limited to 'arch/powerpc/include/asm')
31 files changed, 642 insertions, 291 deletions
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h index ef8a248dfd5..1e94b07a020 100644 --- a/arch/powerpc/include/asm/cputable.h +++ b/arch/powerpc/include/asm/cputable.h @@ -62,6 +62,7 @@ enum powerpc_pmc_type { PPC_PMC_DEFAULT = 0, PPC_PMC_IBM = 1, PPC_PMC_PA6T = 2, + PPC_PMC_G4 = 3, }; struct pt_regs; @@ -192,6 +193,7 @@ extern const char *powerpc_base_platform; #define CPU_FTR_NO_SLBIE_B LONG_ASM_CONST(0x0008000000000000) #define CPU_FTR_VSX LONG_ASM_CONST(0x0010000000000000) #define CPU_FTR_SAO LONG_ASM_CONST(0x0020000000000000) +#define CPU_FTR_CP_USE_DCBTZ LONG_ASM_CONST(0x0040000000000000) #ifndef __ASSEMBLY__ @@ -387,10 +389,11 @@ extern const char *powerpc_base_platform; CPU_FTR_MMCRA | CPU_FTR_CTRL) #define CPU_FTRS_POWER4 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ - CPU_FTR_MMCRA) + CPU_FTR_MMCRA | CPU_FTR_CP_USE_DCBTZ) #define CPU_FTRS_PPC970 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ - CPU_FTR_ALTIVEC_COMP | CPU_FTR_CAN_NAP | CPU_FTR_MMCRA) + CPU_FTR_ALTIVEC_COMP | CPU_FTR_CAN_NAP | CPU_FTR_MMCRA | \ + CPU_FTR_CP_USE_DCBTZ) #define CPU_FTRS_POWER5 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ CPU_FTR_MMCRA | CPU_FTR_SMT | \ @@ -411,7 +414,8 @@ extern const char *powerpc_base_platform; #define CPU_FTRS_CELL (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \ - CPU_FTR_PAUSE_ZERO | CPU_FTR_CI_LARGE_PAGE | CPU_FTR_CELL_TB_BUG) + CPU_FTR_PAUSE_ZERO | CPU_FTR_CI_LARGE_PAGE | \ + CPU_FTR_CELL_TB_BUG | CPU_FTR_CP_USE_DCBTZ) #define CPU_FTRS_PA6T (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | \ CPU_FTR_ALTIVEC_COMP | CPU_FTR_CI_LARGE_PAGE | \ diff --git a/arch/powerpc/include/asm/dcr-regs.h b/arch/powerpc/include/asm/dcr-regs.h index f15296cf359..828e3aa1f2f 100644 --- a/arch/powerpc/include/asm/dcr-regs.h +++ b/arch/powerpc/include/asm/dcr-regs.h @@ -68,6 +68,13 @@ #define SDR0_UART3 0x0123 #define SDR0_CUST0 0x4000 +/* SDR for 405EZ */ +#define DCRN_SDR_ICINTSTAT 0x4510 +#define ICINTSTAT_ICRX 0x80000000 +#define ICINTSTAT_ICTX0 0x40000000 +#define ICINTSTAT_ICTX1 0x20000000 +#define ICINTSTAT_ICTX 0x60000000 + /* SDRs (460EX/460GT) */ #define SDR0_ETH_CFG 0x4103 #define SDR0_ETH_CFG_ECS 0x00000100 /* EMAC int clk source */ diff --git a/arch/powerpc/include/asm/device.h b/arch/powerpc/include/asm/device.h index 228ab2a315b..dfd504caccc 100644 --- a/arch/powerpc/include/asm/device.h +++ b/arch/powerpc/include/asm/device.h @@ -16,9 +16,6 @@ struct dev_archdata { /* DMA operations on that device */ struct dma_mapping_ops *dma_ops; void *dma_data; - - /* NUMA node if applicable */ - int numa_node; }; #endif /* _ASM_POWERPC_DEVICE_H */ diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h index c7ca45f97dd..fddb229bd74 100644 --- a/arch/powerpc/include/asm/dma-mapping.h +++ b/arch/powerpc/include/asm/dma-mapping.h @@ -44,8 +44,6 @@ extern void __dma_sync_page(struct page *page, unsigned long offset, #endif /* ! CONFIG_NOT_COHERENT_CACHE */ -#ifdef CONFIG_PPC64 - static inline unsigned long device_to_mask(struct device *dev) { if (dev->dma_mask && *dev->dma_mask) @@ -76,8 +74,24 @@ struct dma_mapping_ops { struct dma_attrs *attrs); int (*dma_supported)(struct device *dev, u64 mask); int (*set_dma_mask)(struct device *dev, u64 dma_mask); + dma_addr_t (*map_page)(struct device *dev, struct page *page, + unsigned long offset, size_t size, + enum dma_data_direction direction, + struct dma_attrs *attrs); + void (*unmap_page)(struct device *dev, + dma_addr_t dma_address, size_t size, + enum dma_data_direction direction, + struct dma_attrs *attrs); }; +/* + * Available generic sets of operations + */ +#ifdef CONFIG_PPC64 +extern struct dma_mapping_ops dma_iommu_ops; +#endif +extern struct dma_mapping_ops dma_direct_ops; + static inline struct dma_mapping_ops *get_dma_ops(struct device *dev) { /* We don't handle the NULL dev case for ISA for now. We could @@ -85,8 +99,19 @@ static inline struct dma_mapping_ops *get_dma_ops(struct device *dev) * only ISA DMA device we support is the floppy and we have a hack * in the floppy driver directly to get a device for us. */ - if (unlikely(dev == NULL || dev->archdata.dma_ops == NULL)) + + if (unlikely(dev == NULL) || dev->archdata.dma_ops == NULL) { +#ifdef CONFIG_PPC64 return NULL; +#else + /* Use default on 32-bit if dma_ops is not set up */ + /* TODO: Long term, we should fix drivers so that dev and + * archdata dma_ops are set up for all buses. + */ + return &dma_direct_ops; +#endif + } + return dev->archdata.dma_ops; } @@ -123,6 +148,12 @@ static inline int dma_set_mask(struct device *dev, u64 dma_mask) return 0; } +/* + * TODO: map_/unmap_single will ideally go away, to be completely + * replaced by map/unmap_page. Until then, we allow dma_ops to have + * one or the other, or both by checking to see if the specific + * function requested exists; and if not, falling back on the other set. + */ static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *cpu_addr, size_t size, @@ -132,7 +163,14 @@ static inline dma_addr_t dma_map_single_attrs(struct device *dev, struct dma_mapping_ops *dma_ops = get_dma_ops(dev); BUG_ON(!dma_ops); - return dma_ops->map_single(dev, cpu_addr, size, direction, attrs); + + if (dma_ops->map_single) + return dma_ops->map_single(dev, cpu_addr, size, direction, + attrs); + + return dma_ops->map_page(dev, virt_to_page(cpu_addr), + (unsigned long)cpu_addr % PAGE_SIZE, size, + direction, attrs); } static inline void dma_unmap_single_attrs(struct device *dev, @@ -144,7 +182,13 @@ static inline void dma_unmap_single_attrs(struct device *dev, struct dma_mapping_ops *dma_ops = get_dma_ops(dev); BUG_ON(!dma_ops); - dma_ops->unmap_single(dev, dma_addr, size, direction, attrs); + + if (dma_ops->unmap_single) { + dma_ops->unmap_single(dev, dma_addr, size, direction, attrs); + return; + } + + dma_ops->unmap_page(dev, dma_addr, size, direction, attrs); } static inline dma_addr_t dma_map_page_attrs(struct device *dev, @@ -156,8 +200,13 @@ static inline dma_addr_t dma_map_page_attrs(struct device *dev, struct dma_mapping_ops *dma_ops = get_dma_ops(dev); BUG_ON(!dma_ops); + + if (dma_ops->map_page) + return dma_ops->map_page(dev, page, offset, size, direction, + attrs); + return dma_ops->map_single(dev, page_address(page) + offset, size, - direction, attrs); + direction, attrs); } static inline void dma_unmap_page_attrs(struct device *dev, @@ -169,6 +218,12 @@ static inline void dma_unmap_page_attrs(struct device *dev, struct dma_mapping_ops *dma_ops = get_dma_ops(dev); BUG_ON(!dma_ops); + + if (dma_ops->unmap_page) { + dma_ops->unmap_page(dev, dma_address, size, direction, attrs); + return; + } + dma_ops->unmap_single(dev, dma_address, size, direction, attrs); } @@ -253,126 +308,6 @@ static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg, dma_unmap_sg_attrs(dev, sg, nhwentries, direction, NULL); } -/* - * Available generic sets of operations - */ -extern struct dma_mapping_ops dma_iommu_ops; -extern struct dma_mapping_ops dma_direct_ops; - -#else /* CONFIG_PPC64 */ - -#define dma_supported(dev, mask) (1) - -static inline int dma_set_mask(struct device *dev, u64 dma_mask) -{ - if (!dev->dma_mask || !dma_supported(dev, mask)) - return -EIO; - - *dev->dma_mask = dma_mask; - - return 0; -} - -static inline void *dma_alloc_coherent(struct device *dev, size_t size, - dma_addr_t * dma_handle, - gfp_t gfp) -{ -#ifdef CONFIG_NOT_COHERENT_CACHE - return __dma_alloc_coherent(size, dma_handle, gfp); -#else - void *ret; - /* ignore region specifiers */ - gfp &= ~(__GFP_DMA | __GFP_HIGHMEM); - - if (dev == NULL || dev->coherent_dma_mask < 0xffffffff) - gfp |= GFP_DMA; - - ret = (void *)__get_free_pages(gfp, get_order(size)); - - if (ret != NULL) { - memset(ret, 0, size); - *dma_handle = virt_to_bus(ret); - } - - return ret; -#endif -} - -static inline void -dma_free_coherent(struct device *dev, size_t size, void *vaddr, - dma_addr_t dma_handle) -{ -#ifdef CONFIG_NOT_COHERENT_CACHE - __dma_free_coherent(size, vaddr); -#else - free_pages((unsigned long)vaddr, get_order(size)); -#endif -} - -static inline dma_addr_t -dma_map_single(struct device *dev, void *ptr, size_t size, - enum dma_data_direction direction) -{ - BUG_ON(direction == DMA_NONE); - - __dma_sync(ptr, size, direction); - - return virt_to_bus(ptr); -} - -static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, - size_t size, - enum dma_data_direction direction) -{ - /* We do nothing. */ -} - -static inline dma_addr_t -dma_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, - enum dma_data_direction direction) -{ - BUG_ON(direction == DMA_NONE); - - __dma_sync_page(page, offset, size, direction); - - return page_to_bus(page) + offset; -} - -static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address, - size_t size, - enum dma_data_direction direction) -{ - /* We do nothing. */ -} - -static inline int -dma_map_sg(struct device *dev, struct scatterlist *sgl, int nents, - enum dma_data_direction direction) -{ - struct scatterlist *sg; - int i; - - BUG_ON(direction == DMA_NONE); - - for_each_sg(sgl, sg, nents, i) { - BUG_ON(!sg_page(sg)); - __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction); - sg->dma_address = page_to_bus(sg_page(sg)) + sg->offset; - } - - return nents; -} - -static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg, - int nhwentries, - enum dma_data_direction direction) -{ - /* We don't do anything here. */ -} - -#endif /* CONFIG_PPC64 */ - static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction direction) diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h index 64c6ee22eef..d812929390e 100644 --- a/arch/powerpc/include/asm/elf.h +++ b/arch/powerpc/include/asm/elf.h @@ -232,7 +232,7 @@ typedef elf_vrregset_t elf_fpxregset_t; #endif /* __powerpc64__ */ #ifdef __powerpc64__ -# define SET_PERSONALITY(ex, ibcs2) \ +# define SET_PERSONALITY(ex) \ do { \ unsigned long new_flags = 0; \ if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \ @@ -256,7 +256,7 @@ do { \ # define elf_read_implies_exec(ex, exec_stk) (test_thread_flag(TIF_32BIT) ? \ (exec_stk != EXSTACK_DISABLE_X) : 0) #else -# define SET_PERSONALITY(ex, ibcs2) set_personality((ibcs2)?PER_SVR4:PER_LINUX) +# define SET_PERSONALITY(ex) set_personality(PER_LINUX) #endif /* __powerpc64__ */ extern int dcache_bsize; diff --git a/arch/powerpc/include/asm/exception.h b/arch/powerpc/include/asm/exception.h index 329148b5acc..d3d4534e3c7 100644 --- a/arch/powerpc/include/asm/exception.h +++ b/arch/powerpc/include/asm/exception.h @@ -53,14 +53,8 @@ * low halfword of the address, but for Kdump we need the whole low * word. */ -#ifdef CONFIG_CRASH_DUMP #define LOAD_HANDLER(reg, label) \ - oris reg,reg,(label)@h; /* virt addr of handler ... */ \ - ori reg,reg,(label)@l; /* .. and the rest */ -#else -#define LOAD_HANDLER(reg, label) \ - ori reg,reg,(label)@l; /* virt addr of handler ... */ -#endif + addi reg,reg,(label)-_stext; /* virt addr of handler ... */ #define EXCEPTION_PROLOG_1(area) \ mfspr r13,SPRN_SPRG3; /* get paca address into r13 */ \ @@ -72,37 +66,12 @@ std r9,area+EX_R13(r13); \ mfcr r9 -/* - * Equal to EXCEPTION_PROLOG_PSERIES, except that it forces 64bit mode. - * The firmware calls the registered system_reset_fwnmi and - * machine_check_fwnmi handlers in 32bit mode if the cpu happens to run - * a 32bit application at the time of the event. - * This firmware bug is present on POWER4 and JS20. - */ -#define EXCEPTION_PROLOG_PSERIES_FORCE_64BIT(area, label) \ - EXCEPTION_PROLOG_1(area); \ - clrrdi r12,r13,32; /* get high part of &label */ \ - mfmsr r10; \ - /* force 64bit mode */ \ - li r11,5; /* MSR_SF_LG|MSR_ISF_LG */ \ - rldimi r10,r11,61,0; /* insert into top 3 bits */ \ - /* done 64bit mode */ \ - mfspr r11,SPRN_SRR0; /* save SRR0 */ \ - LOAD_HANDLER(r12,label) \ - ori r10,r10,MSR_IR|MSR_DR|MSR_RI; \ - mtspr SPRN_SRR0,r12; \ - mfspr r12,SPRN_SRR1; /* and SRR1 */ \ - mtspr SPRN_SRR1,r10; \ - rfid; \ - b . /* prevent speculative execution */ - #define EXCEPTION_PROLOG_PSERIES(area, label) \ EXCEPTION_PROLOG_1(area); \ - clrrdi r12,r13,32; /* get high part of &label */ \ - mfmsr r10; \ + ld r12,PACAKBASE(r13); /* get high part of &label */ \ + ld r10,PACAKMSR(r13); /* get MSR value for kernel */ \ mfspr r11,SPRN_SRR0; /* save SRR0 */ \ LOAD_HANDLER(r12,label) \ - ori r10,r10,MSR_IR|MSR_DR|MSR_RI; \ mtspr SPRN_SRR0,r12; \ mfspr r12,SPRN_SRR1; /* and SRR1 */ \ mtspr SPRN_SRR1,r10; \ @@ -210,11 +179,10 @@ label##_pSeries: \ std r10,PACA_EXGEN+EX_R13(r13); \ std r11,PACA_EXGEN+EX_R11(r13); \ std r12,PACA_EXGEN+EX_R12(r13); \ - clrrdi r12,r13,32; /* get high part of &label */ \ - mfmsr r10; \ + ld r12,PACAKBASE(r13); /* get high part of &label */ \ + ld r10,PACAKMSR(r13); /* get MSR value for kernel */ \ mfspr r11,SPRN_SRR0; /* save SRR0 */ \ LOAD_HANDLER(r12,label##_common) \ - ori r10,r10,MSR_IR|MSR_DR|MSR_RI; \ mtspr SPRN_SRR0,r12; \ mfspr r12,SPRN_SRR1; /* and SRR1 */ \ mtspr SPRN_SRR1,r10; \ diff --git a/arch/powerpc/include/asm/fsl_lbc.h b/arch/powerpc/include/asm/fsl_lbc.h index 303f5484c05..63a4f779f53 100644 --- a/arch/powerpc/include/asm/fsl_lbc.h +++ b/arch/powerpc/include/asm/fsl_lbc.h @@ -23,9 +23,9 @@ #ifndef __ASM_FSL_LBC_H #define __ASM_FSL_LBC_H +#include <linux/compiler.h> #include <linux/types.h> -#include <linux/spinlock.h> -#include <asm/io.h> +#include <linux/io.h> struct fsl_lbc_bank { __be32 br; /**< Base Register */ @@ -227,9 +227,6 @@ struct fsl_lbc_regs { u8 res8[0xF00]; }; -extern struct fsl_lbc_regs __iomem *fsl_lbc_regs; -extern spinlock_t fsl_lbc_lock; - /* * FSL UPM routines */ @@ -268,44 +265,7 @@ static inline void fsl_upm_end_pattern(struct fsl_upm *upm) cpu_relax(); } -/** - * fsl_upm_run_pattern - actually run an UPM pattern - * @upm: pointer to the fsl_upm structure obtained via fsl_upm_find - * @io_base: remapped pointer to where memory access should happen - * @mar: MAR register content during pattern execution - * - * This function triggers dummy write to the memory specified by the io_base, - * thus UPM pattern actually executed. Note that mar usage depends on the - * pre-programmed AMX bits in the UPM RAM. - */ -static inline int fsl_upm_run_pattern(struct fsl_upm *upm, - void __iomem *io_base, u32 mar) -{ - int ret = 0; - unsigned long flags; - - spin_lock_irqsave(&fsl_lbc_lock, flags); - - out_be32(&fsl_lbc_regs->mar, mar << (32 - upm->width)); - - switch (upm->width) { - case 8: - out_8(io_base, 0x0); - break; - case 16: - out_be16(io_base, 0x0); - break; - case 32: - out_be32(io_base, 0x0); - break; - default: - ret = -EINVAL; - break; - } - - spin_unlock_irqrestore(&fsl_lbc_lock, flags); - - return ret; -} +extern int fsl_upm_run_pattern(struct fsl_upm *upm, void __iomem *io_base, + u32 mar); #endif /* __ASM_FSL_LBC_H */ diff --git a/arch/powerpc/include/asm/highmem.h b/arch/powerpc/include/asm/highmem.h index 5d99b6489d5..91c589520c0 100644 --- a/arch/powerpc/include/asm/highmem.h +++ b/arch/powerpc/include/asm/highmem.h @@ -84,7 +84,7 @@ static inline void *kmap_atomic_prot(struct page *page, enum km_type type, pgpro #ifdef CONFIG_DEBUG_HIGHMEM BUG_ON(!pte_none(*(kmap_pte-idx))); #endif - set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot)); + __set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot)); flush_tlb_page(NULL, vaddr); return (void*) vaddr; diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h index 77c7fa025e6..08266d2728b 100644 --- a/arch/powerpc/include/asm/io.h +++ b/arch/powerpc/include/asm/io.h @@ -711,7 +711,7 @@ static inline void * phys_to_virt(unsigned long address) /* * Change "struct page" to physical address. */ -#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) +#define page_to_phys(page) ((phys_addr_t)page_to_pfn(page) << PAGE_SHIFT) /* We do NOT want virtual merging, it would put too much pressure on * our iommu allocator. Instead, we want drivers to be smart enough diff --git a/arch/powerpc/include/asm/irq.h b/arch/powerpc/include/asm/irq.h index a372f76836c..0a5137676e1 100644 --- a/arch/powerpc/include/asm/irq.h +++ b/arch/powerpc/include/asm/irq.h @@ -236,15 +236,27 @@ extern unsigned int irq_find_mapping(struct irq_host *host, extern unsigned int irq_create_direct_mapping(struct irq_host *host); /** - * irq_radix_revmap - Find a linux virq from a hw irq number. + * irq_radix_revmap_insert - Insert a hw irq to linux virq number mapping. + * @host: host owning this hardware interrupt + * @virq: linux irq number + * @hwirq: hardware irq number in that host space + * + * This is for use by irq controllers that use a radix tree reverse + * mapping for fast lookup. + */ +extern void irq_radix_revmap_insert(struct irq_host *host, unsigned int virq, + irq_hw_number_t hwirq); + +/** + * irq_radix_revmap_lookup - Find a linux virq from a hw irq number. * @host: host owning this hardware interrupt * @hwirq: hardware irq number in that host space * * This is a fast path, for use by irq controller code that uses radix tree * revmaps */ -extern unsigned int irq_radix_revmap(struct irq_host *host, - irq_hw_number_t hwirq); +extern unsigned int irq_radix_revmap_lookup(struct irq_host *host, + irq_hw_number_t hwirq); /** * irq_linear_revmap - Find a linux virq from a hw irq number. diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h index 893aafd87fd..2740c44ff71 100644 --- a/arch/powerpc/include/asm/machdep.h +++ b/arch/powerpc/include/asm/machdep.h @@ -88,8 +88,6 @@ struct machdep_calls { unsigned long (*tce_get)(struct iommu_table *tbl, long index); void (*tce_flush)(struct iommu_table *tbl); - void (*pci_dma_dev_setup)(struct pci_dev *dev); - void (*pci_dma_bus_setup)(struct pci_bus *bus); void __iomem * (*ioremap)(phys_addr_t addr, unsigned long size, unsigned long flags); @@ -101,6 +99,9 @@ struct machdep_calls { #endif #endif /* CONFIG_PPC64 */ + void (*pci_dma_dev_setup)(struct pci_dev *dev); + void (*pci_dma_bus_setup)(struct pci_bus *bus); + int (*probe)(void); void (*setup_arch)(void); /* Optional, may be NULL */ void (*init_early)(void); diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h index 9209f755763..e7b99bac9f4 100644 --- a/arch/powerpc/include/asm/mman.h +++ b/arch/powerpc/include/asm/mman.h @@ -44,7 +44,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot) static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags) { - return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : 0; + return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0); } #define arch_vm_get_page_prot(vm_flags) arch_vm_get_page_prot(vm_flags) diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h index c2df53c5ceb..5a441742ffb 100644 --- a/arch/powerpc/include/asm/mmu-hash64.h +++ b/arch/powerpc/include/asm/mmu-hash64.h @@ -437,7 +437,7 @@ typedef struct { }) #endif /* 1 */ -/* This is only valid for addresses >= KERNELBASE */ +/* This is only valid for addresses >= PAGE_OFFSET */ static inline unsigned long get_kernel_vsid(unsigned long ea, int ssize) { if (ssize == MMU_SEGSIZE_256M) diff --git a/arch/powerpc/include/asm/mpic.h b/arch/powerpc/include/asm/mpic.h index fe566a348a8..34d9ac433ac 100644 --- a/arch/powerpc/include/asm/mpic.h +++ b/arch/powerpc/include/asm/mpic.h @@ -5,6 +5,7 @@ #include <linux/irq.h> #include <linux/sysdev.h> #include <asm/dcr.h> +#include <asm/msi_bitmap.h> /* * Global registers @@ -301,8 +302,7 @@ struct mpic #endif #ifdef CONFIG_PCI_MSI - spinlock_t bitmap_lock; - unsigned long *hwirq_bitmap; + struct msi_bitmap msi_bitmap; #endif #ifdef CONFIG_MPIC_BROKEN_REGREAD diff --git a/arch/powerpc/include/asm/msi_bitmap.h b/arch/powerpc/include/asm/msi_bitmap.h new file mode 100644 index 00000000000..97ac3f46ae0 --- /dev/null +++ b/arch/powerpc/include/asm/msi_bitmap.h @@ -0,0 +1,35 @@ +#ifndef _POWERPC_SYSDEV_MSI_BITMAP_H +#define _POWERPC_SYSDEV_MSI_BITMAP_H + +/* + * Copyright 2008, Michael Ellerman, IBM Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; version 2 of the + * License. + * + */ + +#include <linux/of.h> +#include <asm/irq.h> + +struct msi_bitmap { + struct device_node *of_node; + unsigned long *bitmap; + spinlock_t lock; + unsigned int irq_count; +}; + +int msi_bitmap_alloc_hwirqs(struct msi_bitmap *bmp, int num); +void msi_bitmap_free_hwirqs(struct msi_bitmap *bmp, unsigned int offset, + unsigned int num); +void msi_bitmap_reserve_hwirq(struct msi_bitmap *bmp, unsigned int hwirq); + +int msi_bitmap_reserve_dt_hwirqs(struct msi_bitmap *bmp); + +int msi_bitmap_alloc(struct msi_bitmap *bmp, unsigned int irq_count, + struct device_node *of_node); +void msi_bitmap_free(struct msi_bitmap *bmp); + +#endif /* _POWERPC_SYSDEV_MSI_BITMAP_H */ diff --git a/arch/powerpc/include/asm/of_device.h b/arch/powerpc/include/asm/of_device.h index 3c123990ca2..a64debf177d 100644 --- a/arch/powerpc/include/asm/of_device.h +++ b/arch/powerpc/include/asm/of_device.h @@ -24,8 +24,5 @@ extern struct of_device *of_device_alloc(struct device_node *np, extern int of_device_uevent(struct device *dev, struct kobj_uevent_env *env); -/* This is just here during the transition */ -#include <linux/of_device.h> - #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_OF_DEVICE_H */ diff --git a/arch/powerpc/include/asm/of_platform.h b/arch/powerpc/include/asm/of_platform.h index 18659ef7213..53b46507ffd 100644 --- a/arch/powerpc/include/asm/of_platform.h +++ b/arch/powerpc/include/asm/of_platform.h @@ -11,9 +11,6 @@ * */ -/* This is just here during the transition */ -#include <linux/of_platform.h> - /* Platform drivers register/unregister */ static inline int of_register_platform_driver(struct of_platform_driver *drv) { diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h index 6493a395508..082b3aedf14 100644 --- a/arch/powerpc/include/asm/paca.h +++ b/arch/powerpc/include/asm/paca.h @@ -62,6 +62,8 @@ struct paca_struct { u16 paca_index; /* Logical processor number */ u64 kernel_toc; /* Kernel TOC address */ + u64 kernelbase; /* Base address of kernel */ + u64 kernel_msr; /* MSR while running in kernel */ u64 stab_real; /* Absolute address of segment table */ u64 stab_addr; /* Virtual address of segment table */ void *emergency_sp; /* pointer to emergency stack */ diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h index e088545cb3f..64e144505f6 100644 --- a/arch/powerpc/include/asm/page.h +++ b/arch/powerpc/include/asm/page.h @@ -71,15 +71,21 @@ #define PAGE_OFFSET ASM_CONST(CONFIG_PAGE_OFFSET) #define LOAD_OFFSET ASM_CONST((CONFIG_KERNEL_START-CONFIG_PHYSICAL_START)) -#if defined(CONFIG_RELOCATABLE) && defined(CONFIG_FLATMEM) +#if defined(CONFIG_RELOCATABLE) #ifndef __ASSEMBLY__ extern phys_addr_t memstart_addr; extern phys_addr_t kernstart_addr; #endif #define PHYSICAL_START kernstart_addr -#define MEMORY_START memstart_addr #else #define PHYSICAL_START ASM_CONST(CONFIG_PHYSICAL_START) +#endif + +#ifdef CONFIG_PPC64 +#define MEMORY_START 0UL +#elif defined(CONFIG_RELOCATABLE) +#define MEMORY_START memstart_addr +#else #define MEMORY_START (PHYSICAL_START + PAGE_OFFSET - KERNELBASE) #endif @@ -92,8 +98,8 @@ extern phys_addr_t kernstart_addr; #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) -#define __va(x) ((void *)((unsigned long)(x) - PHYSICAL_START + KERNELBASE)) -#define __pa(x) ((unsigned long)(x) + PHYSICAL_START - KERNELBASE) +#define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET - MEMORY_START)) +#define __pa(x) ((unsigned long)(x) - PAGE_OFFSET + MEMORY_START) /* * Unfortunately the PLT is in the BSS in the PPC32 ELF ABI, diff --git a/arch/powerpc/include/asm/page_32.h b/arch/powerpc/include/asm/page_32.h index ebfae530a37..d77072a32cc 100644 --- a/arch/powerpc/include/asm/page_32.h +++ b/arch/powerpc/include/asm/page_32.h @@ -13,10 +13,16 @@ #define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES #endif +#ifdef CONFIG_PTE_64BIT +#define PTE_FLAGS_OFFSET 4 /* offset of PTE flags, in bytes */ +#else +#define PTE_FLAGS_OFFSET 0 +#endif + #ifndef __ASSEMBLY__ /* * The basic type of a PTE - 64 bits for those CPUs with > 32 bit - * physical addressing. For now this just the IBM PPC440. + * physical addressing. */ #ifdef CONFIG_PTE_64BIT typedef unsigned long long pte_basic_t; diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h index a05a942b1c2..0e52c7828ea 100644 --- a/arch/powerpc/include/asm/pci.h +++ b/arch/powerpc/include/asm/pci.h @@ -60,6 +60,14 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) return channel ? 15 : 14; } +#ifdef CONFIG_PCI +extern void set_pci_dma_ops(struct dma_mapping_ops *dma_ops); +extern struct dma_mapping_ops *get_pci_dma_ops(void); +#else /* CONFIG_PCI */ +#define set_pci_dma_ops(d) +#define get_pci_dma_ops() NULL +#endif + #ifdef CONFIG_PPC64 /* @@ -70,9 +78,6 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) #define PCI_DISABLE_MWI #ifdef CONFIG_PCI -extern void set_pci_dma_ops(struct dma_mapping_ops *dma_ops); -extern struct dma_mapping_ops *get_pci_dma_ops(void); - static inline void pci_dma_burst_advice(struct pci_dev *pdev, enum pci_dma_burst_strategy *strat, unsigned long *strategy_parameter) @@ -89,9 +94,6 @@ static inline void pci_dma_burst_advice(struct pci_dev *pdev, *strat = PCI_DMA_BURST_MULTIPLE; *strategy_parameter = cacheline_size; } -#else /* CONFIG_PCI */ -#define set_pci_dma_ops(d) -#define get_pci_dma_ops() NULL #endif #else /* 32-bit */ diff --git a/arch/powerpc/include/asm/pgtable-ppc32.h b/arch/powerpc/include/asm/pgtable-ppc32.h index 6fe39e32704..6ab7c67cb5a 100644 --- a/arch/powerpc/include/asm/pgtable-ppc32.h +++ b/arch/powerpc/include/asm/pgtable-ppc32.h @@ -261,6 +261,7 @@ extern int icache_44x_need_flush; #define _PAGE_HWEXEC 0x00000004 /* H: Execute permission */ #define _PAGE_ACCESSED 0x00000008 /* S: Page referenced */ #define _PAGE_DIRTY 0x00000010 /* S: Page dirty */ +#define _PAGE_SPECIAL 0x00000020 /* S: Special page */ #define _PAGE_USER 0x00000040 /* S: User page */ #define _PAGE_ENDIAN 0x00000080 /* H: E bit */ #define _PAGE_GUARDED 0x00000100 /* H: G bit */ @@ -276,6 +277,7 @@ extern int icache_44x_need_flush; /* ERPN in a PTE never gets cleared, ignore it */ #define _PTE_NONE_MASK 0xffffffff00000000ULL +#define __HAVE_ARCH_PTE_SPECIAL #elif defined(CONFIG_FSL_BOOKE) /* @@ -305,6 +307,7 @@ extern int icache_44x_need_flush; #define _PAGE_COHERENT 0x00100 /* H: M bit */ #define _PAGE_NO_CACHE 0x00200 /* H: I bit */ #define _PAGE_WRITETHRU 0x00400 /* H: W bit */ +#define _PAGE_SPECIAL 0x00800 /* S: Special page */ #ifdef CONFIG_PTE_64BIT /* ERPN in a PTE never gets cleared, ignore it */ @@ -315,6 +318,8 @@ extern int icache_44x_need_flush; #define _PMD_PRESENT_MASK (PAGE_MASK) #define _PMD_BAD (~PAGE_MASK) +#define __HAVE_ARCH_PTE_SPECIAL + #elif defined(CONFIG_8xx) /* Definitions for 8xx embedded chips. */ #define _PAGE_PRESENT 0x0001 /* Page is valid */ @@ -362,8 +367,14 @@ extern int icache_44x_need_flush; #define _PAGE_ACCESSED 0x100 /* R: page referenced */ #define _PAGE_EXEC 0x200 /* software: i-cache coherency required */ #define _PAGE_RW 0x400 /* software: user write access allowed */ +#define _PAGE_SPECIAL 0x800 /* software: Special page */ +#ifdef CONFIG_PTE_64BIT +/* We never clear the high word of the pte */ +#define _PTE_NONE_MASK (0xffffffff00000000ULL | _PAGE_HASHPTE) +#else #define _PTE_NONE_MASK _PAGE_HASHPTE +#endif #define _PMD_PRESENT 0 #define _PMD_PRESENT_MASK (PAGE_MASK) @@ -372,6 +383,8 @@ extern int icache_44x_need_flush; /* Hash table based platforms need atomic updates of the linux PTE */ #define PTE_ATOMIC_UPDATES 1 +#define __HAVE_ARCH_PTE_SPECIAL + #endif /* @@ -404,6 +417,9 @@ extern int icache_44x_need_flush; #ifndef _PAGE_WRITETHRU #define _PAGE_WRITETHRU 0 #endif +#ifndef _PAGE_SPECIAL +#define _PAGE_SPECIAL 0 +#endif #ifndef _PMD_PRESENT_MASK #define _PMD_PRESENT_MASK _PMD_PRESENT #endif @@ -415,11 +431,11 @@ extern int icache_44x_need_flush; #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) -#define PAGE_PROT_BITS __pgprot(_PAGE_GUARDED | _PAGE_COHERENT | _PAGE_NO_CACHE | \ - _PAGE_WRITETHRU | _PAGE_ENDIAN | \ - _PAGE_USER | _PAGE_ACCESSED | \ - _PAGE_RW | _PAGE_HWWRITE | _PAGE_DIRTY | \ - _PAGE_EXEC | _PAGE_HWEXEC) +#define PAGE_PROT_BITS (_PAGE_GUARDED | _PAGE_COHERENT | _PAGE_NO_CACHE | \ + _PAGE_WRITETHRU | _PAGE_ENDIAN | \ + _PAGE_USER | _PAGE_ACCESSED | \ + _PAGE_RW | _PAGE_HWWRITE | _PAGE_DIRTY | \ + _PAGE_EXEC | _PAGE_HWEXEC) /* * Note: the _PAGE_COHERENT bit automatically gets set in the hardware * PTE if CONFIG_SMP is defined (hash_page does this); there is no need @@ -517,7 +533,8 @@ extern unsigned long bad_call_to_PMD_PAGE_SIZE(void); #define pte_none(pte) ((pte_val(pte) & ~_PTE_NONE_MASK) == 0) #define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT) -#define pte_clear(mm,addr,ptep) do { set_pte_at((mm), (addr), (ptep), __pte(0)); } while (0) +#define pte_clear(mm, addr, ptep) \ + do { pte_update(ptep, ~_PAGE_HASHPTE, 0); } while (0) #define pmd_none(pmd) (!pmd_val(pmd)) #define pmd_bad(pmd) (pmd_val(pmd) & _PMD_BAD) @@ -533,7 +550,7 @@ static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; } static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } -static inline int pte_special(pte_t pte) { return 0; } +static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; } static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; } static inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; } @@ -552,10 +569,10 @@ static inline pte_t pte_mkdirty(pte_t pte) { static inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= _PAGE_ACCESSED; return pte; } static inline pte_t pte_mkspecial(pte_t pte) { - return pte; } -static inline unsigned long pte_pgprot(pte_t pte) + pte_val(pte) |= _PAGE_SPECIAL; return pte; } +static inline pgprot_t pte_pgprot(pte_t pte) { - return __pgprot(pte_val(pte)) & PAGE_PROT_BITS; + return __pgprot(pte_val(pte) & PAGE_PROT_BITS); } static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) @@ -575,6 +592,10 @@ extern int flush_hash_pages(unsigned context, unsigned long va, extern void add_hash_page(unsigned context, unsigned long va, unsigned long pmdval); +/* Flush an entry from the TLB/hash table */ +extern void flush_hash_entry(struct mm_struct *mm, pte_t *ptep, + unsigned long address); + /* * Atomic PTE updates. * @@ -612,9 +633,6 @@ static inline unsigned long pte_update(pte_t *p, return old; } #else /* CONFIG_PTE_64BIT */ -/* TODO: Change that to only modify the low word and move set_pte_at() - * out of line - */ static inline unsigned long long pte_update(pte_t *p, unsigned long clr, unsigned long set) @@ -652,14 +670,36 @@ static inline unsigned long long pte_update(pte_t *p, * On machines which use an MMU hash table we avoid changing the * _PAGE_HASHPTE bit. */ -static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, + +static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte) { -#if _PAGE_HASHPTE != 0 +#if (_PAGE_HASHPTE != 0) && defined(CONFIG_SMP) && !defined(CONFIG_PTE_64BIT) pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte) & ~_PAGE_HASHPTE); +#elif defined(CONFIG_PTE_64BIT) && defined(CONFIG_SMP) +#if _PAGE_HASHPTE != 0 + if (pte_val(*ptep) & _PAGE_HASHPTE) + flush_hash_entry(mm, ptep, addr); +#endif + __asm__ __volatile__("\ + stw%U0%X0 %2,%0\n\ + eieio\n\ + stw%U0%X0 %L2,%1" + : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4)) + : "r" (pte) : "memory"); #else - *ptep = pte; + *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE) + | (pte_val(pte) & ~_PAGE_HASHPTE)); +#endif +} + +static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pte) +{ +#if defined(CONFIG_PTE_64BIT) && defined(CONFIG_SMP) + WARN_ON(pte_present(*ptep)); #endif + __set_pte_at(mm, addr, ptep, pte); } /* diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h index 4597c491e9b..4c0a8c62859 100644 --- a/arch/powerpc/include/asm/pgtable-ppc64.h +++ b/arch/powerpc/include/asm/pgtable-ppc64.h @@ -117,10 +117,10 @@ #define PAGE_AGP __pgprot(_PAGE_BASE | _PAGE_WRENABLE | _PAGE_NO_CACHE) #define HAVE_PAGE_AGP -#define PAGE_PROT_BITS __pgprot(_PAGE_GUARDED | _PAGE_COHERENT | \ - _PAGE_NO_CACHE | _PAGE_WRITETHRU | \ - _PAGE_4K_PFN | _PAGE_RW | _PAGE_USER | \ - _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_EXEC) +#define PAGE_PROT_BITS (_PAGE_GUARDED | _PAGE_COHERENT | \ + _PAGE_NO_CACHE | _PAGE_WRITETHRU | \ + _PAGE_4K_PFN | _PAGE_RW | _PAGE_USER | \ + _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_EXEC) /* PTEIDX nibble */ #define _PTEIDX_SECONDARY 0x8 #define _PTEIDX_GROUP_IX 0x7 @@ -264,9 +264,9 @@ static inline pte_t pte_mkhuge(pte_t pte) { return pte; } static inline pte_t pte_mkspecial(pte_t pte) { pte_val(pte) |= _PAGE_SPECIAL; return pte; } -static inline unsigned long pte_pgprot(pte_t pte) +static inline pgprot_t pte_pgprot(pte_t pte) { - return __pgprot(pte_val(pte)) & PAGE_PROT_BITS; + return __pgprot(pte_val(pte) & PAGE_PROT_BITS); } /* Atomic PTE updates */ diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h index 0966899d974..c4a029ccb4d 100644 --- a/arch/powerpc/include/asm/ppc_asm.h +++ b/arch/powerpc/include/asm/ppc_asm.h @@ -268,7 +268,7 @@ n: * Loads the value of the constant expression 'expr' into register 'rn' * using immediate instructions only. Use this when it's important not * to reference other data (i.e. on ppc64 when the TOC pointer is not - * valid). + * valid) and when 'expr' is a constant or absolute address. * * LOAD_REG_ADDR(rn, name) * Loads the address of label 'name' into register 'rn'. Use this when diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h index be980f4ee49..67453766bff 100644 --- a/arch/powerpc/include/asm/reg_booke.h +++ b/arch/powerpc/include/asm/reg_booke.h @@ -109,6 +109,7 @@ #define SPRN_EVPR 0x3D6 /* Exception Vector Prefix Register */ #define SPRN_L1CSR0 0x3F2 /* L1 Cache Control and Status Register 0 */ #define SPRN_L1CSR1 0x3F3 /* L1 Cache Control and Status Register 1 */ +#define SPRN_MMUCSR0 0x3F4 /* MMU Control and Status Register 0 */ #define SPRN_PIT 0x3DB /* Programmable Interval Timer */ #define SPRN_BUCSR 0x3F5 /* Branch Unit Control and Status */ #define SPRN_L2CSR0 0x3F9 /* L2 Data Cache Control and Status Register 0 */ @@ -410,6 +411,12 @@ #define L2CSR0_L2LOA 0x00000080 /* L2 Cache Lock Overflow Allocate */ #define L2CSR0_L2LO 0x00000020 /* L2 Cache Lock Overflow */ +/* Bit definitions for MMUCSR0 */ +#define MMUCSR0_TLB1FI 0x00000002 /* TLB1 Flash invalidate */ +#define MMUCSR0_TLB0FI 0x00000004 /* TLB0 Flash invalidate */ +#define MMUCSR0_TLB2FI 0x00000040 /* TLB2 Flash invalidate */ +#define MMUCSR0_TLB3FI 0x00000020 /* TLB3 Flash invalidate */ + /* Bit definitions for SGR. */ #define SGR_NORMAL 0 /* Speculative fetching allowed. */ #define SGR_GUARDED 1 /* Speculative fetching disallowed. */ diff --git a/arch/powerpc/include/asm/sections.h b/arch/powerpc/include/asm/sections.h index 07956f3e784..6fbce725c71 100644 --- a/arch/powerpc/include/asm/sections.h +++ b/arch/powerpc/include/asm/sections.h @@ -18,6 +18,12 @@ static inline int in_kernel_text(unsigned long addr) return 0; } +static inline int overlaps_kernel_text(unsigned long start, unsigned long end) +{ + return start < (unsigned long)__init_end && + (unsigned long)_stext < end; +} + #undef dereference_function_descriptor static inline void *dereference_function_descriptor(void *ptr) { diff --git a/arch/powerpc/include/asm/sfp-machine.h b/arch/powerpc/include/asm/sfp-machine.h new file mode 100644 index 00000000000..ced34f1dc8f --- /dev/null +++ b/arch/powerpc/include/asm/sfp-machine.h @@ -0,0 +1,353 @@ +/* Machine-dependent software floating-point definitions. PPC version. + Copyright (C) 1997 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Library General Public License as + published by the Free Software Foundation; either version 2 of the + License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Library General Public License for more details. + + You should have received a copy of the GNU Library General Public + License along with the GNU C Library; see the file COPYING.LIB. If + not, write to the Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + + Actually, this is a PPC (32bit) version, written based on the + i386, sparc, and sparc64 versions, by me, + Peter Maydell (pmaydell@chiark.greenend.org.uk). + Comments are by and large also mine, although they may be inaccurate. + + In picking out asm fragments I've gone with the lowest common + denominator, which also happens to be the hardware I have :-> + That is, a SPARC without hardware multiply and divide. + */ + +/* basic word size definitions */ +#define _FP_W_TYPE_SIZE 32 +#define _FP_W_TYPE unsigned long +#define _FP_WS_TYPE signed long +#define _FP_I_TYPE long + +#define __ll_B ((UWtype) 1 << (W_TYPE_SIZE / 2)) +#define __ll_lowpart(t) ((UWtype) (t) & (__ll_B - 1)) +#define __ll_highpart(t) ((UWtype) (t) >> (W_TYPE_SIZE / 2)) + +/* You can optionally code some things like addition in asm. For + * example, i386 defines __FP_FRAC_ADD_2 as asm. If you don't + * then you get a fragment of C code [if you change an #ifdef 0 + * in op-2.h] or a call to add_ssaaaa (see below). + * Good places to look for asm fragments to use are gcc and glibc. + * gcc's longlong.h is useful. + */ + +/* We need to know how to multiply and divide. If the host word size + * is >= 2*fracbits you can use FP_MUL_MEAT_n_imm(t,R,X,Y) which + * codes the multiply with whatever gcc does to 'a * b'. + * _FP_MUL_MEAT_n_wide(t,R,X,Y,f) is used when you have an asm + * function that can multiply two 1W values and get a 2W result. + * Otherwise you're stuck with _FP_MUL_MEAT_n_hard(t,R,X,Y) which + * does bitshifting to avoid overflow. + * For division there is FP_DIV_MEAT_n_imm(t,R,X,Y,f) for word size + * >= 2*fracbits, where f is either _FP_DIV_HELP_imm or + * _FP_DIV_HELP_ldiv (see op-1.h). + * _FP_DIV_MEAT_udiv() is if you have asm to do 2W/1W => (1W, 1W). + * [GCC and glibc have longlong.h which has the asm macro udiv_qrnnd + * to do this.] + * In general, 'n' is the number of words required to hold the type, + * and 't' is either S, D or Q for single/double/quad. + * -- PMM + */ +/* Example: SPARC64: + * #define _FP_MUL_MEAT_S(R,X,Y) _FP_MUL_MEAT_1_imm(S,R,X,Y) + * #define _FP_MUL_MEAT_D(R,X,Y) _FP_MUL_MEAT_1_wide(D,R,X,Y,umul_ppmm) + * #define _FP_MUL_MEAT_Q(R,X,Y) _FP_MUL_MEAT_2_wide(Q,R,X,Y,umul_ppmm) + * + * #define _FP_DIV_MEAT_S(R,X,Y) _FP_DIV_MEAT_1_imm(S,R,X,Y,_FP_DIV_HELP_imm) + * #define _FP_DIV_MEAT_D(R,X,Y) _FP_DIV_MEAT_1_udiv(D,R,X,Y) + * #define _FP_DIV_MEAT_Q(R,X,Y) _FP_DIV_MEAT_2_udiv_64(Q,R,X,Y) + * + * Example: i386: + * #define _FP_MUL_MEAT_S(R,X,Y) _FP_MUL_MEAT_1_wide(S,R,X,Y,_i386_mul_32_64) + * #define _FP_MUL_MEAT_D(R,X,Y) _FP_MUL_MEAT_2_wide(D,R,X,Y,_i386_mul_32_64) + * + * #define _FP_DIV_MEAT_S(R,X,Y) _FP_DIV_MEAT_1_udiv(S,R,X,Y,_i386_div_64_32) + * #define _FP_DIV_MEAT_D(R,X,Y) _FP_DIV_MEAT_2_udiv_64(D,R,X,Y) + */ + +#define _FP_MUL_MEAT_S(R,X,Y) _FP_MUL_MEAT_1_wide(_FP_WFRACBITS_S,R,X,Y,umul_ppmm) +#define _FP_MUL_MEAT_D(R,X,Y) _FP_MUL_MEAT_2_wide(_FP_WFRACBITS_D,R,X,Y,umul_ppmm) + +#define _FP_DIV_MEAT_S(R,X,Y) _FP_DIV_MEAT_1_udiv(S,R,X,Y) +#define _FP_DIV_MEAT_D(R,X,Y) _FP_DIV_MEAT_2_udiv(D,R,X,Y) + +/* These macros define what NaN looks like. They're supposed to expand to + * a comma-separated set of 32bit unsigned ints that encode NaN. + */ +#define _FP_NANFRAC_S ((_FP_QNANBIT_S << 1) - 1) +#define _FP_NANFRAC_D ((_FP_QNANBIT_D << 1) - 1), -1 +#define _FP_NANFRAC_Q ((_FP_QNANBIT_Q << 1) - 1), -1, -1, -1 +#define _FP_NANSIGN_S 0 +#define _FP_NANSIGN_D 0 +#define _FP_NANSIGN_Q 0 + +#define _FP_KEEPNANFRACP 1 + +/* Exception flags. We use the bit positions of the appropriate bits + in the FPSCR, which also correspond to the FE_* bits. This makes + everything easier ;-). */ +#define FP_EX_INVALID (1 << (31 - 2)) +#define FP_EX_INVALID_SNAN EFLAG_VXSNAN +#define FP_EX_INVALID_ISI EFLAG_VXISI +#define FP_EX_INVALID_IDI EFLAG_VXIDI +#define FP_EX_INVALID_ZDZ EFLAG_VXZDZ +#define FP_EX_INVALID_IMZ EFLAG_VXIMZ +#define FP_EX_OVERFLOW (1 << (31 - 3)) +#define FP_EX_UNDERFLOW (1 << (31 - 4)) +#define FP_EX_DIVZERO (1 << (31 - 5)) +#define FP_EX_INEXACT (1 << (31 - 6)) + +/* This macro appears to be called when both X and Y are NaNs, and + * has to choose one and copy it to R. i386 goes for the larger of the + * two, sparc64 just picks Y. I don't understand this at all so I'll + * go with sparc64 because it's shorter :-> -- PMM + */ +#define _FP_CHOOSENAN(fs, wc, R, X, Y, OP) \ + do { \ + R##_s = Y##_s; \ + _FP_FRAC_COPY_##wc(R,Y); \ + R##_c = FP_CLS_NAN; \ + } while (0) + + +#include <linux/kernel.h> +#include <linux/sched.h> + +#define __FPU_FPSCR (current->thread.fpscr.val) + +/* We only actually write to the destination register + * if exceptions signalled (if any) will not trap. + */ +#define __FPU_ENABLED_EXC \ +({ \ + (__FPU_FPSCR >> 3) & 0x1f; \ +}) + +#define __FPU_TRAP_P(bits) \ + ((__FPU_ENABLED_EXC & (bits)) != 0) + +#define __FP_PACK_S(val,X) \ +({ int __exc = _FP_PACK_CANONICAL(S,1,X); \ + if(!__exc || !__FPU_TRAP_P(__exc)) \ + _FP_PACK_RAW_1_P(S,val,X); \ + __exc; \ +}) + +#define __FP_PACK_D(val,X) \ + do { \ + _FP_PACK_CANONICAL(D, 2, X); \ + if (!FP_CUR_EXCEPTIONS || !__FPU_TRAP_P(FP_CUR_EXCEPTIONS)) \ + _FP_PACK_RAW_2_P(D, val, X); \ + } while (0) + +#define __FP_PACK_DS(val,X) \ + do { \ + FP_DECL_S(__X); \ + FP_CONV(S, D, 1, 2, __X, X); \ + _FP_PACK_CANONICAL(S, 1, __X); \ + if (!FP_CUR_EXCEPTIONS || !__FPU_TRAP_P(FP_CUR_EXCEPTIONS)) { \ + _FP_UNPACK_CANONICAL(S, 1, __X); \ + FP_CONV(D, S, 2, 1, X, __X); \ + _FP_PACK_CANONICAL(D, 2, X); \ + if (!FP_CUR_EXCEPTIONS || !__FPU_TRAP_P(FP_CUR_EXCEPTIONS)) \ + _FP_PACK_RAW_2_P(D, val, X); \ + } \ + } while (0) + +/* Obtain the current rounding mode. */ +#define FP_ROUNDMODE \ +({ \ + __FPU_FPSCR & 0x3; \ +}) + +/* the asm fragments go here: all these are taken from glibc-2.0.5's + * stdlib/longlong.h + */ + +#include <linux/types.h> +#include <asm/byteorder.h> + +/* add_ssaaaa is used in op-2.h and should be equivalent to + * #define add_ssaaaa(sh,sl,ah,al,bh,bl) (sh = ah+bh+ (( sl = al+bl) < al)) + * add_ssaaaa(high_sum, low_sum, high_addend_1, low_addend_1, + * high_addend_2, low_addend_2) adds two UWtype integers, composed by + * HIGH_ADDEND_1 and LOW_ADDEND_1, and HIGH_ADDEND_2 and LOW_ADDEND_2 + * respectively. The result is placed in HIGH_SUM and LOW_SUM. Overflow + * (i.e. carry out) is not stored anywhere, and is lost. + */ +#define add_ssaaaa(sh, sl, ah, al, bh, bl) \ + do { \ + if (__builtin_constant_p (bh) && (bh) == 0) \ + __asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{aze|addze} %0,%2" \ + : "=r" ((USItype)(sh)), \ + "=&r" ((USItype)(sl)) \ + : "%r" ((USItype)(ah)), \ + "%r" ((USItype)(al)), \ + "rI" ((USItype)(bl))); \ + else if (__builtin_constant_p (bh) && (bh) ==~(USItype) 0) \ + __asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{ame|addme} %0,%2" \ + : "=r" ((USItype)(sh)), \ + "=&r" ((USItype)(sl)) \ + : "%r" ((USItype)(ah)), \ + "%r" ((USItype)(al)), \ + "rI" ((USItype)(bl))); \ + else \ + __asm__ ("{a%I5|add%I5c} %1,%4,%5\n\t{ae|adde} %0,%2,%3" \ + : "=r" ((USItype)(sh)), \ + "=&r" ((USItype)(sl)) \ + : "%r" ((USItype)(ah)), \ + "r" ((USItype)(bh)), \ + "%r" ((USItype)(al)), \ + "rI" ((USItype)(bl))); \ + } while (0) + +/* sub_ddmmss is used in op-2.h and udivmodti4.c and should be equivalent to + * #define sub_ddmmss(sh, sl, ah, al, bh, bl) (sh = ah-bh - ((sl = al-bl) > al)) + * sub_ddmmss(high_difference, low_difference, high_minuend, low_minuend, + * high_subtrahend, low_subtrahend) subtracts two two-word UWtype integers, + * composed by HIGH_MINUEND_1 and LOW_MINUEND_1, and HIGH_SUBTRAHEND_2 and + * LOW_SUBTRAHEND_2 respectively. The result is placed in HIGH_DIFFERENCE + * and LOW_DIFFERENCE. Overflow (i.e. carry out) is not stored anywhere, + * and is lost. + */ +#define sub_ddmmss(sh, sl, ah, al, bh, bl) \ + do { \ + if (__builtin_constant_p (ah) && (ah) == 0) \ + __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfze|subfze} %0,%2" \ + : "=r" ((USItype)(sh)), \ + "=&r" ((USItype)(sl)) \ + : "r" ((USItype)(bh)), \ + "rI" ((USItype)(al)), \ + "r" ((USItype)(bl))); \ + else if (__builtin_constant_p (ah) && (ah) ==~(USItype) 0) \ + __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfme|subfme} %0,%2" \ + : "=r" ((USItype)(sh)), \ + "=&r" ((USItype)(sl)) \ + : "r" ((USItype)(bh)), \ + "rI" ((USItype)(al)), \ + "r" ((USItype)(bl))); \ + else if (__builtin_constant_p (bh) && (bh) == 0) \ + __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{ame|addme} %0,%2" \ + : "=r" ((USItype)(sh)), \ + "=&r" ((USItype)(sl)) \ + : "r" ((USItype)(ah)), \ + "rI" ((USItype)(al)), \ + "r" ((USItype)(bl))); \ + else if (__builtin_constant_p (bh) && (bh) ==~(USItype) 0) \ + __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{aze|addze} %0,%2" \ + : "=r" ((USItype)(sh)), \ + "=&r" ((USItype)(sl)) \ + : "r" ((USItype)(ah)), \ + "rI" ((USItype)(al)), \ + "r" ((USItype)(bl))); \ + else \ + __asm__ ("{sf%I4|subf%I4c} %1,%5,%4\n\t{sfe|subfe} %0,%3,%2" \ + : "=r" ((USItype)(sh)), \ + "=&r" ((USItype)(sl)) \ + : "r" ((USItype)(ah)), \ + "r" ((USItype)(bh)), \ + "rI" ((USItype)(al)), \ + "r" ((USItype)(bl))); \ + } while (0) + +/* asm fragments for mul and div */ + +/* umul_ppmm(high_prod, low_prod, multipler, multiplicand) multiplies two + * UWtype integers MULTIPLER and MULTIPLICAND, and generates a two UWtype + * word product in HIGH_PROD and LOW_PROD. + */ +#define umul_ppmm(ph, pl, m0, m1) \ + do { \ + USItype __m0 = (m0), __m1 = (m1); \ + __asm__ ("mulhwu %0,%1,%2" \ + : "=r" ((USItype)(ph)) \ + : "%r" (__m0), \ + "r" (__m1)); \ + (pl) = __m0 * __m1; \ + } while (0) + +/* udiv_qrnnd(quotient, remainder, high_numerator, low_numerator, + * denominator) divides a UDWtype, composed by the UWtype integers + * HIGH_NUMERATOR and LOW_NUMERATOR, by DENOMINATOR and places the quotient + * in QUOTIENT and the remainder in REMAINDER. HIGH_NUMERATOR must be less + * than DENOMINATOR for correct operation. If, in addition, the most + * significant bit of DENOMINATOR must be 1, then the pre-processor symbol + * UDIV_NEEDS_NORMALIZATION is defined to 1. + */ +#define udiv_qrnnd(q, r, n1, n0, d) \ + do { \ + UWtype __d1, __d0, __q1, __q0, __r1, __r0, __m; \ + __d1 = __ll_highpart (d); \ + __d0 = __ll_lowpart (d); \ + \ + __r1 = (n1) % __d1; \ + __q1 = (n1) / __d1; \ + __m = (UWtype) __q1 * __d0; \ + __r1 = __r1 * __ll_B | __ll_highpart (n0); \ + if (__r1 < __m) \ + { \ + __q1--, __r1 += (d); \ + if (__r1 >= (d)) /* we didn't get carry when adding to __r1 */ \ + if (__r1 < __m) \ + __q1--, __r1 += (d); \ + } \ + __r1 -= __m; \ + \ + __r0 = __r1 % __d1; \ + __q0 = __r1 / __d1; \ + __m = (UWtype) __q0 * __d0; \ + __r0 = __r0 * __ll_B | __ll_lowpart (n0); \ + if (__r0 < __m) \ + { \ + __q0--, __r0 += (d); \ + if (__r0 >= (d)) \ + if (__r0 < __m) \ + __q0--, __r0 += (d); \ + } \ + __r0 -= __m; \ + \ + (q) = (UWtype) __q1 * __ll_B | __q0; \ + (r) = __r0; \ + } while (0) + +#define UDIV_NEEDS_NORMALIZATION 1 + +#define abort() \ + return 0 + +#ifdef __BIG_ENDIAN +#define __BYTE_ORDER __BIG_ENDIAN +#else +#define __BYTE_ORDER __LITTLE_ENDIAN +#endif + +/* Exception flags. */ +#define EFLAG_INVALID (1 << (31 - 2)) +#define EFLAG_OVERFLOW (1 << (31 - 3)) +#define EFLAG_UNDERFLOW (1 << (31 - 4)) +#define EFLAG_DIVZERO (1 << (31 - 5)) +#define EFLAG_INEXACT (1 << (31 - 6)) + +#define EFLAG_VXSNAN (1 << (31 - 7)) +#define EFLAG_VXISI (1 << (31 - 8)) +#define EFLAG_VXIDI (1 << (31 - 9)) +#define EFLAG_VXZDZ (1 << (31 - 10)) +#define EFLAG_VXIMZ (1 << (31 - 11)) +#define EFLAG_VXVC (1 << (31 - 12)) +#define EFLAG_VXSOFT (1 << (31 - 21)) +#define EFLAG_VXSQRT (1 << (31 - 22)) +#define EFLAG_VXCVI (1 << (31 - 23)) diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h index 4d28e1e4521..1866cec4f96 100644 --- a/arch/powerpc/include/asm/smp.h +++ b/arch/powerpc/include/asm/smp.h @@ -56,9 +56,16 @@ extern int smp_hw_index[]; #define raw_smp_processor_id() (current_thread_info()->cpu) #define hard_smp_processor_id() (smp_hw_index[smp_processor_id()]) -#define get_hard_smp_processor_id(cpu) (smp_hw_index[(cpu)]) -#define set_hard_smp_processor_id(cpu, phys)\ - (smp_hw_index[(cpu)] = (phys)) + +static inline int get_hard_smp_processor_id(int cpu) +{ + return smp_hw_index[cpu]; +} + +static inline void set_hard_smp_processor_id(int cpu, int phys) +{ + smp_hw_index[cpu] = phys; +} #endif DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); @@ -86,15 +93,21 @@ extern void __cpu_die(unsigned int cpu); #else /* for UP */ -#define hard_smp_processor_id() 0 +#define hard_smp_processor_id() get_hard_smp_processor_id(0) #define smp_setup_cpu_maps() #endif /* CONFIG_SMP */ #ifdef CONFIG_PPC64 -#define get_hard_smp_processor_id(CPU) (paca[(CPU)].hw_cpu_id) -#define set_hard_smp_processor_id(CPU, VAL) \ - do { (paca[(CPU)].hw_cpu_id = (VAL)); } while (0) +static inline int get_hard_smp_processor_id(int cpu) +{ + return paca[cpu].hw_cpu_id; +} + +static inline void set_hard_smp_processor_id(int cpu, int phys) +{ + paca[cpu].hw_cpu_id = phys; +} extern void smp_release_cpus(void); @@ -102,10 +115,17 @@ extern void smp_release_cpus(void); /* 32-bit */ #ifndef CONFIG_SMP extern int boot_cpuid_phys; -#define get_hard_smp_processor_id(cpu) boot_cpuid_phys -#define set_hard_smp_processor_id(cpu, phys) -#endif -#endif +static inline int get_hard_smp_processor_id(int cpu) +{ + return boot_cpuid_phys; +} + +static inline void set_hard_smp_processor_id(int cpu, int phys) +{ + boot_cpuid_phys = phys; +} +#endif /* !CONFIG_SMP */ +#endif /* !CONFIG_PPC64 */ extern int smt_enabled_at_boot; diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h index f6cc7a43b4f..803def23665 100644 --- a/arch/powerpc/include/asm/systbl.h +++ b/arch/powerpc/include/asm/systbl.h @@ -32,7 +32,7 @@ COMPAT_SYS_SPU(stime) COMPAT_SYS(ptrace) SYSCALL_SPU(alarm) OLDSYS(fstat) -COMPAT_SYS(pause) +SYSCALL(pause) COMPAT_SYS(utime) SYSCALL(ni_syscall) SYSCALL(ni_syscall) diff --git a/arch/powerpc/include/asm/tlbflush.h b/arch/powerpc/include/asm/tlbflush.h index 361cd5c7a32..a2c6bfd85fb 100644 --- a/arch/powerpc/include/asm/tlbflush.h +++ b/arch/powerpc/include/asm/tlbflush.h @@ -29,6 +29,9 @@ #include <linux/mm.h> extern void _tlbie(unsigned long address, unsigned int pid); +extern void _tlbil_all(void); +extern void _tlbil_pid(unsigned int pid); +extern void _tlbil_va(unsigned long address, unsigned int pid); #if defined(CONFIG_40x) || defined(CONFIG_8xx) #define _tlbia() asm volatile ("tlbia; sync" : : : "memory") @@ -38,31 +41,31 @@ extern void _tlbia(void); static inline void flush_tlb_mm(struct mm_struct *mm) { - _tlbia(); + _tlbil_pid(mm->context.id); } static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr) { - _tlbie(vmaddr, vma ? vma->vm_mm->context.id : 0); + _tlbil_va(vmaddr, vma ? vma->vm_mm->context.id : 0); } static inline void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long vmaddr) { - _tlbie(vmaddr, vma ? vma->vm_mm->context.id : 0); + flush_tlb_page(vma, vmaddr); } static inline void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { - _tlbia(); + _tlbil_pid(vma->vm_mm->context.id); } static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end) { - _tlbia(); + _tlbil_pid(0); } #elif defined(CONFIG_PPC32) diff --git a/arch/powerpc/include/asm/types.h b/arch/powerpc/include/asm/types.h index d3374bc865b..c004c13f291 100644 --- a/arch/powerpc/include/asm/types.h +++ b/arch/powerpc/include/asm/types.h @@ -48,14 +48,7 @@ typedef struct { typedef __vector128 vector128; -/* Physical address used by some IO functions */ -#if defined(CONFIG_PPC64) || defined(CONFIG_PHYS_64BIT) -typedef u64 phys_addr_t; -#else -typedef u32 phys_addr_t; -#endif - -#ifdef __powerpc64__ +#if defined(__powerpc64__) || defined(CONFIG_PHYS_64BIT) typedef u64 dma_addr_t; #else typedef u32 dma_addr_t; |