diff options
Diffstat (limited to 'arch/powerpc/kernel')
37 files changed, 1505 insertions, 1048 deletions
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index 7af23c43fd4..4fe53d08ab8 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -17,11 +17,11 @@ obj-y += vdso32/ obj-$(CONFIG_PPC64) += setup_64.o binfmt_elf32.o sys_ppc32.o \ signal_64.o ptrace32.o \ paca.o cpu_setup_ppc970.o \ - firmware.o sysfs.o + firmware.o sysfs.o nvram_64.o obj-$(CONFIG_PPC64) += vdso64/ obj-$(CONFIG_ALTIVEC) += vecemu.o vector.o obj-$(CONFIG_PPC_970_NAP) += idle_power4.o -obj-$(CONFIG_PPC_OF) += of_device.o prom_parse.o +obj-$(CONFIG_PPC_OF) += of_device.o of_platform.o prom_parse.o procfs-$(CONFIG_PPC64) := proc_ppc64.o obj-$(CONFIG_PROC_FS) += $(procfs-y) rtaspci-$(CONFIG_PPC64) := rtas_pci.o @@ -32,7 +32,6 @@ obj-$(CONFIG_LPARCFG) += lparcfg.o obj-$(CONFIG_IBMVIO) += vio.o obj-$(CONFIG_IBMEBUS) += ibmebus.o obj-$(CONFIG_GENERIC_TBSYNC) += smp-tbsync.o -obj64-$(CONFIG_PPC_MULTIPLATFORM) += nvram_64.o obj-$(CONFIG_CRASH_DUMP) += crash_dump.o obj-$(CONFIG_6xx) += idle_6xx.o l2cr_6xx.o cpu_setup_6xx.o obj-$(CONFIG_TAU) += tau_6xx.o @@ -59,11 +58,11 @@ obj-$(CONFIG_BOOTX_TEXT) += btext.o obj-$(CONFIG_SMP) += smp.o obj-$(CONFIG_KPROBES) += kprobes.o obj-$(CONFIG_PPC_UDBG_16550) += legacy_serial.o udbg_16550.o + module-$(CONFIG_PPC64) += module_64.o obj-$(CONFIG_MODULES) += $(module-y) -pci64-$(CONFIG_PPC64) += pci_64.o pci_dn.o pci_iommu.o \ - pci_direct_iommu.o iomap.o +pci64-$(CONFIG_PPC64) += pci_64.o pci_dn.o pci32-$(CONFIG_PPC32) := pci_32.o obj-$(CONFIG_PCI) += $(pci64-y) $(pci32-y) kexec-$(CONFIG_PPC64) := machine_kexec_64.o @@ -72,8 +71,12 @@ obj-$(CONFIG_KEXEC) += machine_kexec.o crash.o $(kexec-y) obj-$(CONFIG_AUDIT) += audit.o obj64-$(CONFIG_AUDIT) += compat_audit.o +ifneq ($(CONFIG_PPC_INDIRECT_IO),y) +obj-y += iomap.o +endif + ifeq ($(CONFIG_PPC_ISERIES),y) -$(obj)/head_64.o: $(obj)/lparmap.s +extra-y += lparmap.s AFLAGS_head_64.o += -I$(obj) endif diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index d06f378597b..e96521530d2 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -118,7 +118,8 @@ int main(void) DEFINE(PACASTABRR, offsetof(struct paca_struct, stab_rr)); DEFINE(PACAR1, offsetof(struct paca_struct, saved_r1)); DEFINE(PACATOC, offsetof(struct paca_struct, kernel_toc)); - DEFINE(PACAPROCENABLED, offsetof(struct paca_struct, proc_enabled)); + DEFINE(PACASOFTIRQEN, offsetof(struct paca_struct, soft_enabled)); + DEFINE(PACAHARDIRQEN, offsetof(struct paca_struct, hard_enabled)); DEFINE(PACASLBCACHE, offsetof(struct paca_struct, slb_cache)); DEFINE(PACASLBCACHEPTR, offsetof(struct paca_struct, slb_cache_ptr)); DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id)); diff --git a/arch/powerpc/kernel/cpu_setup_ppc970.S b/arch/powerpc/kernel/cpu_setup_ppc970.S index 652594891d5..bf118c38575 100644 --- a/arch/powerpc/kernel/cpu_setup_ppc970.S +++ b/arch/powerpc/kernel/cpu_setup_ppc970.S @@ -83,6 +83,22 @@ _GLOBAL(__setup_cpu_ppc970) rldimi r0,r11,52,8 /* set NAP and DPM */ li r11,0 rldimi r0,r11,32,31 /* clear EN_ATTN */ + b load_hids /* Jump to shared code */ + + +_GLOBAL(__setup_cpu_ppc970MP) + /* Do nothing if not running in HV mode */ + mfmsr r0 + rldicl. r0,r0,4,63 + beqlr + + mfspr r0,SPRN_HID0 + li r11,0x15 /* clear DOZE and SLEEP */ + rldimi r0,r11,52,6 /* set DEEPNAP, NAP and DPM */ + li r11,0 + rldimi r0,r11,32,31 /* clear EN_ATTN */ + +load_hids: mtspr SPRN_HID0,r0 mfspr r0,SPRN_HID0 mfspr r0,SPRN_HID0 diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index bfd499ee375..9d1614c3ce6 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c @@ -42,6 +42,7 @@ extern void __setup_cpu_745x(unsigned long offset, struct cpu_spec* spec); #endif /* CONFIG_PPC32 */ #ifdef CONFIG_PPC64 extern void __setup_cpu_ppc970(unsigned long offset, struct cpu_spec* spec); +extern void __setup_cpu_ppc970MP(unsigned long offset, struct cpu_spec* spec); extern void __restore_cpu_ppc970(void); #endif /* CONFIG_PPC64 */ @@ -222,9 +223,9 @@ static struct cpu_spec cpu_specs[] = { .icache_bsize = 128, .dcache_bsize = 128, .num_pmcs = 8, - .cpu_setup = __setup_cpu_ppc970, + .cpu_setup = __setup_cpu_ppc970MP, .cpu_restore = __restore_cpu_ppc970, - .oprofile_cpu_type = "ppc64/970", + .oprofile_cpu_type = "ppc64/970MP", .oprofile_type = PPC_OPROFILE_POWER4, .platform = "ppc970", }, @@ -276,10 +277,45 @@ static struct cpu_spec cpu_specs[] = { .oprofile_mmcra_sipr = MMCRA_SIPR, .platform = "power5+", }, + { /* POWER6 in P5+ mode; 2.04-compliant processor */ + .pvr_mask = 0xffffffff, + .pvr_value = 0x0f000001, + .cpu_name = "POWER5+", + .cpu_features = CPU_FTRS_POWER5, + .cpu_user_features = COMMON_USER_POWER5_PLUS, + .icache_bsize = 128, + .dcache_bsize = 128, + .num_pmcs = 6, + .oprofile_cpu_type = "ppc64/power6", + .oprofile_type = PPC_OPROFILE_POWER4, + .oprofile_mmcra_sihv = POWER6_MMCRA_SIHV, + .oprofile_mmcra_sipr = POWER6_MMCRA_SIPR, + .oprofile_mmcra_clear = POWER6_MMCRA_THRM | + POWER6_MMCRA_OTHER, + .platform = "power5+", + }, { /* Power6 */ .pvr_mask = 0xffff0000, .pvr_value = 0x003e0000, - .cpu_name = "POWER6", + .cpu_name = "POWER6 (raw)", + .cpu_features = CPU_FTRS_POWER6, + .cpu_user_features = COMMON_USER_POWER6 | + PPC_FEATURE_POWER6_EXT, + .icache_bsize = 128, + .dcache_bsize = 128, + .num_pmcs = 6, + .oprofile_cpu_type = "ppc64/power6", + .oprofile_type = PPC_OPROFILE_POWER4, + .oprofile_mmcra_sihv = POWER6_MMCRA_SIHV, + .oprofile_mmcra_sipr = POWER6_MMCRA_SIPR, + .oprofile_mmcra_clear = POWER6_MMCRA_THRM | + POWER6_MMCRA_OTHER, + .platform = "power6x", + }, + { /* 2.05-compliant processor, i.e. Power6 "architected" mode */ + .pvr_mask = 0xffffffff, + .pvr_value = 0x0f000002, + .cpu_name = "POWER6 (architected)", .cpu_features = CPU_FTRS_POWER6, .cpu_user_features = COMMON_USER_POWER6, .icache_bsize = 128, @@ -303,6 +339,9 @@ static struct cpu_spec cpu_specs[] = { PPC_FEATURE_SMT, .icache_bsize = 128, .dcache_bsize = 128, + .num_pmcs = 4, + .oprofile_cpu_type = "ppc64/cell-be", + .oprofile_type = PPC_OPROFILE_CELL, .platform = "ppc-cell-be", }, { /* PA Semi PA6T */ @@ -801,6 +840,17 @@ static struct cpu_spec cpu_specs[] = { .cpu_setup = __setup_cpu_603, .platform = "ppc603", }, + { /* e300c3 on 83xx */ + .pvr_mask = 0x7fff0000, + .pvr_value = 0x00850000, + .cpu_name = "e300c3", + .cpu_features = CPU_FTRS_E300, + .cpu_user_features = COMMON_USER, + .icache_bsize = 32, + .dcache_bsize = 32, + .cpu_setup = __setup_cpu_603, + .platform = "ppc603", + }, { /* default match, we assume split I/D cache & TB (non-601)... */ .pvr_mask = 0x00000000, .pvr_value = 0x00000000, @@ -1169,19 +1219,15 @@ static struct cpu_spec cpu_specs[] = { #endif /* CONFIG_PPC32 */ }; -struct cpu_spec *identify_cpu(unsigned long offset) +struct cpu_spec *identify_cpu(unsigned long offset, unsigned int pvr) { struct cpu_spec *s = cpu_specs; struct cpu_spec **cur = &cur_cpu_spec; - unsigned int pvr = mfspr(SPRN_PVR); int i; s = PTRRELOC(s); cur = PTRRELOC(cur); - if (*cur != NULL) - return PTRRELOC(*cur); - for (i = 0; i < ARRAY_SIZE(cpu_specs); i++,s++) if ((pvr & s->pvr_mask) == s->pvr_value) { *cur = cpu_specs + i; diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c index 1af41f7616d..89b03c8da9d 100644 --- a/arch/powerpc/kernel/crash.c +++ b/arch/powerpc/kernel/crash.c @@ -111,7 +111,7 @@ void crash_ipi_callback(struct pt_regs *regs) if (!cpu_online(cpu)) return; - local_irq_disable(); + hard_irq_disable(); if (!cpu_isset(cpu, cpus_in_crash)) crash_save_this_cpu(regs, cpu); cpu_set(cpu, cpus_in_crash); @@ -289,7 +289,7 @@ void default_machine_crash_shutdown(struct pt_regs *regs) * an SMP system. * The kernel is broken so disable interrupts. */ - local_irq_disable(); + hard_irq_disable(); for_each_irq(irq) { struct irq_desc *desc = irq_desc + irq; diff --git a/arch/powerpc/kernel/dma_64.c b/arch/powerpc/kernel/dma_64.c index 6c168f6ea14..7b0e754383c 100644 --- a/arch/powerpc/kernel/dma_64.c +++ b/arch/powerpc/kernel/dma_64.c @@ -1,151 +1,194 @@ /* - * Copyright (C) 2004 IBM Corporation + * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation * - * Implements the generic device dma API for ppc64. Handles - * the pci and vio busses + * Provide default implementations of the DMA mapping callbacks for + * directly mapped busses and busses using the iommu infrastructure */ #include <linux/device.h> #include <linux/dma-mapping.h> -/* Include the busses we support */ -#include <linux/pci.h> -#include <asm/vio.h> -#include <asm/ibmebus.h> -#include <asm/scatterlist.h> #include <asm/bug.h> +#include <asm/iommu.h> +#include <asm/abs_addr.h> -static struct dma_mapping_ops *get_dma_ops(struct device *dev) -{ -#ifdef CONFIG_PCI - if (dev->bus == &pci_bus_type) - return &pci_dma_ops; -#endif -#ifdef CONFIG_IBMVIO - if (dev->bus == &vio_bus_type) - return &vio_dma_ops; -#endif -#ifdef CONFIG_IBMEBUS - if (dev->bus == &ibmebus_bus_type) - return &ibmebus_dma_ops; -#endif - return NULL; -} +/* + * Generic iommu implementation + */ -int dma_supported(struct device *dev, u64 mask) +static inline unsigned long device_to_mask(struct device *dev) { - struct dma_mapping_ops *dma_ops = get_dma_ops(dev); + if (dev->dma_mask && *dev->dma_mask) + return *dev->dma_mask; + /* Assume devices without mask can take 32 bit addresses */ + return 0xfffffffful; +} - BUG_ON(!dma_ops); - return dma_ops->dma_supported(dev, mask); +/* Allocates a contiguous real buffer and creates mappings over it. + * Returns the virtual address of the buffer and sets dma_handle + * to the dma address (mapping) of the first page. + */ +static void *dma_iommu_alloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t flag) +{ + return iommu_alloc_coherent(dev->archdata.dma_data, size, dma_handle, + device_to_mask(dev), flag, + dev->archdata.numa_node); } -EXPORT_SYMBOL(dma_supported); -int dma_set_mask(struct device *dev, u64 dma_mask) +static void dma_iommu_free_coherent(struct device *dev, size_t size, + void *vaddr, dma_addr_t dma_handle) { -#ifdef CONFIG_PCI - if (dev->bus == &pci_bus_type) - return pci_set_dma_mask(to_pci_dev(dev), dma_mask); -#endif -#ifdef CONFIG_IBMVIO - if (dev->bus == &vio_bus_type) - return -EIO; -#endif /* CONFIG_IBMVIO */ -#ifdef CONFIG_IBMEBUS - if (dev->bus == &ibmebus_bus_type) - return -EIO; -#endif - BUG(); - return 0; + iommu_free_coherent(dev->archdata.dma_data, size, vaddr, dma_handle); } -EXPORT_SYMBOL(dma_set_mask); -void *dma_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flag) +/* Creates TCEs for a user provided buffer. The user buffer must be + * contiguous real kernel storage (not vmalloc). The address of the buffer + * passed here is the kernel (virtual) address of the buffer. The buffer + * need not be page aligned, the dma_addr_t returned will point to the same + * byte within the page as vaddr. + */ +static dma_addr_t dma_iommu_map_single(struct device *dev, void *vaddr, + size_t size, + enum dma_data_direction direction) { - struct dma_mapping_ops *dma_ops = get_dma_ops(dev); - - BUG_ON(!dma_ops); - - return dma_ops->alloc_coherent(dev, size, dma_handle, flag); + return iommu_map_single(dev->archdata.dma_data, vaddr, size, + device_to_mask(dev), direction); } -EXPORT_SYMBOL(dma_alloc_coherent); -void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, - dma_addr_t dma_handle) + +static void dma_iommu_unmap_single(struct device *dev, dma_addr_t dma_handle, + size_t size, + enum dma_data_direction direction) { - struct dma_mapping_ops *dma_ops = get_dma_ops(dev); + iommu_unmap_single(dev->archdata.dma_data, dma_handle, size, direction); +} - BUG_ON(!dma_ops); - dma_ops->free_coherent(dev, size, cpu_addr, dma_handle); +static int dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist, + int nelems, enum dma_data_direction direction) +{ + return iommu_map_sg(dev->archdata.dma_data, sglist, nelems, + device_to_mask(dev), direction); } -EXPORT_SYMBOL(dma_free_coherent); -dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, size_t size, - enum dma_data_direction direction) +static void dma_iommu_unmap_sg(struct device *dev, struct scatterlist *sglist, + int nelems, enum dma_data_direction direction) { - struct dma_mapping_ops *dma_ops = get_dma_ops(dev); - - BUG_ON(!dma_ops); - - return dma_ops->map_single(dev, cpu_addr, size, direction); + iommu_unmap_sg(dev->archdata.dma_data, sglist, nelems, direction); } -EXPORT_SYMBOL(dma_map_single); -void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, - enum dma_data_direction direction) +/* We support DMA to/from any memory page via the iommu */ +static int dma_iommu_dma_supported(struct device *dev, u64 mask) { - struct dma_mapping_ops *dma_ops = get_dma_ops(dev); - - BUG_ON(!dma_ops); - - dma_ops->unmap_single(dev, dma_addr, size, direction); + struct iommu_table *tbl = dev->archdata.dma_data; + + if (!tbl || tbl->it_offset > mask) { + printk(KERN_INFO + "Warning: IOMMU offset too big for device mask\n"); + if (tbl) + printk(KERN_INFO + "mask: 0x%08lx, table offset: 0x%08lx\n", + mask, tbl->it_offset); + else + printk(KERN_INFO "mask: 0x%08lx, table unavailable\n", + mask); + return 0; + } else + return 1; } -EXPORT_SYMBOL(dma_unmap_single); -dma_addr_t dma_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, - enum dma_data_direction direction) -{ - struct dma_mapping_ops *dma_ops = get_dma_ops(dev); +struct dma_mapping_ops dma_iommu_ops = { + .alloc_coherent = dma_iommu_alloc_coherent, + .free_coherent = dma_iommu_free_coherent, + .map_single = dma_iommu_map_single, + .unmap_single = dma_iommu_unmap_single, + .map_sg = dma_iommu_map_sg, + .unmap_sg = dma_iommu_unmap_sg, + .dma_supported = dma_iommu_dma_supported, +}; +EXPORT_SYMBOL(dma_iommu_ops); - BUG_ON(!dma_ops); +/* + * Generic direct DMA implementation + * + * This implementation supports a global offset that can be applied if + * the address at which memory is visible to devices is not 0. + */ +unsigned long dma_direct_offset; - return dma_ops->map_single(dev, page_address(page) + offset, size, - direction); +static void *dma_direct_alloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t flag) +{ + struct page *page; + void *ret; + int node = dev->archdata.numa_node; + + /* TODO: Maybe use the numa node here too ? */ + page = alloc_pages_node(node, flag, get_order(size)); + if (page == NULL) + return NULL; + ret = page_address(page); + memset(ret, 0, size); + *dma_handle = virt_to_abs(ret) | dma_direct_offset; + + return ret; } -EXPORT_SYMBOL(dma_map_page); -void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, - enum dma_data_direction direction) +static void dma_direct_free_coherent(struct device *dev, size_t size, + void *vaddr, dma_addr_t dma_handle) { - struct dma_mapping_ops *dma_ops = get_dma_ops(dev); + free_pages((unsigned long)vaddr, get_order(size)); +} - BUG_ON(!dma_ops); +static dma_addr_t dma_direct_map_single(struct device *dev, void *ptr, + size_t size, + enum dma_data_direction direction) +{ + return virt_to_abs(ptr) | dma_direct_offset; +} - dma_ops->unmap_single(dev, dma_address, size, direction); +static void dma_direct_unmap_single(struct device *dev, dma_addr_t dma_addr, + size_t size, + enum dma_data_direction direction) +{ } -EXPORT_SYMBOL(dma_unmap_page); -int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, - enum dma_data_direction direction) +static int dma_direct_map_sg(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction direction) { - struct dma_mapping_ops *dma_ops = get_dma_ops(dev); + int i; - BUG_ON(!dma_ops); + for (i = 0; i < nents; i++, sg++) { + sg->dma_address = (page_to_phys(sg->page) + sg->offset) | + dma_direct_offset; + sg->dma_length = sg->length; + } - return dma_ops->map_sg(dev, sg, nents, direction); + return nents; } -EXPORT_SYMBOL(dma_map_sg); -void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, - enum dma_data_direction direction) +static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction direction) { - struct dma_mapping_ops *dma_ops = get_dma_ops(dev); - - BUG_ON(!dma_ops); +} - dma_ops->unmap_sg(dev, sg, nhwentries, direction); +static int dma_direct_dma_supported(struct device *dev, u64 mask) +{ + /* Could be improved to check for memory though it better be + * done via some global so platforms can set the limit in case + * they have limited DMA windows + */ + return mask >= DMA_32BIT_MASK; } -EXPORT_SYMBOL(dma_unmap_sg); + +struct dma_mapping_ops dma_direct_ops = { + .alloc_coherent = dma_direct_alloc_coherent, + .free_coherent = dma_direct_free_coherent, + .map_single = dma_direct_map_single, + .unmap_single = dma_direct_unmap_single, + .map_sg = dma_direct_map_sg, + .unmap_sg = dma_direct_unmap_sg, + .dma_supported = dma_direct_dma_supported, +}; +EXPORT_SYMBOL(dma_direct_ops); diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 748e74fcf54..1a3d4de197d 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -87,15 +87,19 @@ system_call_common: addi r9,r1,STACK_FRAME_OVERHEAD ld r11,exception_marker@toc(r2) std r11,-16(r9) /* "regshere" marker */ + li r10,1 + stb r10,PACASOFTIRQEN(r13) + stb r10,PACAHARDIRQEN(r13) + std r10,SOFTE(r1) #ifdef CONFIG_PPC_ISERIES BEGIN_FW_FTR_SECTION /* Hack for handling interrupts when soft-enabling on iSeries */ cmpdi cr1,r0,0x5555 /* syscall 0x5555 */ andi. r10,r12,MSR_PR /* from kernel */ crand 4*cr0+eq,4*cr1+eq,4*cr0+eq - beq hardware_interrupt_entry - lbz r10,PACAPROCENABLED(r13) - std r10,SOFTE(r1) + bne 2f + b hardware_interrupt_entry +2: END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) #endif mfmsr r11 @@ -460,9 +464,9 @@ _GLOBAL(ret_from_except_lite) #endif restore: + ld r5,SOFTE(r1) #ifdef CONFIG_PPC_ISERIES BEGIN_FW_FTR_SECTION - ld r5,SOFTE(r1) cmpdi 0,r5,0 beq 4f /* Check for pending interrupts (iSeries) */ @@ -472,21 +476,25 @@ BEGIN_FW_FTR_SECTION beq+ 4f /* skip do_IRQ if no interrupts */ li r3,0 - stb r3,PACAPROCENABLED(r13) /* ensure we are soft-disabled */ + stb r3,PACASOFTIRQEN(r13) /* ensure we are soft-disabled */ ori r10,r10,MSR_EE mtmsrd r10 /* hard-enable again */ addi r3,r1,STACK_FRAME_OVERHEAD bl .do_IRQ b .ret_from_except_lite /* loop back and handle more */ - -4: stb r5,PACAPROCENABLED(r13) +4: END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) #endif + stb r5,PACASOFTIRQEN(r13) ld r3,_MSR(r1) andi. r0,r3,MSR_RI beq- unrecov_restore + /* extract EE bit and use it to restore paca->hard_enabled */ + rldicl r4,r3,49,63 /* r0 = (r3 >> 15) & 1 */ + stb r4,PACAHARDIRQEN(r13) + andi. r0,r3,MSR_PR /* @@ -538,25 +546,15 @@ do_work: /* Check that preempt_count() == 0 and interrupts are enabled */ lwz r8,TI_PREEMPT(r9) cmpwi cr1,r8,0 -#ifdef CONFIG_PPC_ISERIES -BEGIN_FW_FTR_SECTION ld r0,SOFTE(r1) cmpdi r0,0 -END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) -#endif -BEGIN_FW_FTR_SECTION - andi. r0,r3,MSR_EE -END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES) crandc eq,cr1*4+eq,eq bne restore /* here we are preempting the current task */ 1: -#ifdef CONFIG_PPC_ISERIES -BEGIN_FW_FTR_SECTION li r0,1 - stb r0,PACAPROCENABLED(r13) -END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) -#endif + stb r0,PACASOFTIRQEN(r13) + stb r0,PACAHARDIRQEN(r13) ori r10,r10,MSR_EE mtmsrd r10,1 /* reenable interrupts */ bl .preempt_schedule @@ -639,8 +637,7 @@ _GLOBAL(enter_rtas) /* There is no way it is acceptable to get here with interrupts enabled, * check it with the asm equivalent of WARN_ON */ - mfmsr r6 - andi. r0,r6,MSR_EE + lbz r0,PACASOFTIRQEN(r13) 1: tdnei r0,0 .section __bug_table,"a" .llong 1b,__LINE__ + 0x1000000, 1f, 2f @@ -649,7 +646,13 @@ _GLOBAL(enter_rtas) 1: .asciz __FILE__ 2: .asciz "enter_rtas" .previous - + + /* Hard-disable interrupts */ + mfmsr r6 + rldicl r7,r6,48,1 + rotldi r7,r7,16 + mtmsrd r7,1 + /* Unfortunately, the stack pointer and the MSR are also clobbered, * so they are saved in the PACA which allows us to restore * our original state after RTAS returns. @@ -735,8 +738,6 @@ _STATIC(rtas_restore_regs) #endif /* CONFIG_PPC_RTAS */ -#ifdef CONFIG_PPC_MULTIPLATFORM - _GLOBAL(enter_prom) mflr r0 std r0,16(r1) @@ -821,5 +822,3 @@ _GLOBAL(enter_prom) ld r0,16(r1) mtlr r0 blr - -#endif /* CONFIG_PPC_MULTIPLATFORM */ diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index e720729f3e5..71b1fe58e9e 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@ -35,9 +35,7 @@ #include <asm/thread_info.h> #include <asm/firmware.h> -#ifdef CONFIG_PPC_ISERIES #define DO_SOFT_DISABLE -#endif /* * We layout physical memory as follows: @@ -74,13 +72,11 @@ .text .globl _stext _stext: -#ifdef CONFIG_PPC_MULTIPLATFORM _GLOBAL(__start) /* NOP this out unconditionally */ BEGIN_FTR_SECTION b .__start_initialization_multiplatform END_FTR_SECTION(0, 1) -#endif /* CONFIG_PPC_MULTIPLATFORM */ /* Catch branch to 0 in real mode */ trap @@ -308,7 +304,9 @@ exception_marker: std r9,_LINK(r1); \ mfctr r10; /* save CTR in stackframe */ \ std r10,_CTR(r1); \ + lbz r10,PACASOFTIRQEN(r13); \ mfspr r11,SPRN_XER; /* save XER in stackframe */ \ + std r10,SOFTE(r1); \ std r11,_XER(r1); \ li r9,(n)+1; \ std r9,_TRAP(r1); /* set trap number */ \ @@ -343,6 +341,34 @@ label##_pSeries: \ EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common) +#define MASKABLE_EXCEPTION_PSERIES(n, label) \ + . = n; \ + .globl label##_pSeries; \ +label##_pSeries: \ + HMT_MEDIUM; \ + mtspr SPRN_SPRG1,r13; /* save r13 */ \ + mfspr r13,SPRN_SPRG3; /* get paca address into r13 */ \ + std r9,PACA_EXGEN+EX_R9(r13); /* save r9, r10 */ \ + std r10,PACA_EXGEN+EX_R10(r13); \ + lbz r10,PACASOFTIRQEN(r13); \ + mfcr r9; \ + cmpwi r10,0; \ + beq masked_interrupt; \ + mfspr r10,SPRN_SPRG1; \ + std r10,PACA_EXGEN+EX_R13(r13); \ + std r11,PACA_EXGEN+EX_R11(r13); \ + std r12,PACA_EXGEN+EX_R12(r13); \ + clrrdi r12,r13,32; /* get high part of &label */ \ + mfmsr r10; \ + mfspr r11,SPRN_SRR0; /* save SRR0 */ \ + LOAD_HANDLER(r12,label##_common) \ + ori r10,r10,MSR_IR|MSR_DR|MSR_RI; \ + mtspr SPRN_SRR0,r12; \ + mfspr r12,SPRN_SRR1; /* and SRR1 */ \ + mtspr SPRN_SRR1,r10; \ + rfid; \ + b . /* prevent speculative execution */ + #define STD_EXCEPTION_ISERIES(n, label, area) \ .globl label##_iSeries; \ label##_iSeries: \ @@ -358,40 +384,32 @@ label##_iSeries: \ HMT_MEDIUM; \ mtspr SPRN_SPRG1,r13; /* save r13 */ \ EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN); \ - lbz r10,PACAPROCENABLED(r13); \ + lbz r10,PACASOFTIRQEN(r13); \ cmpwi 0,r10,0; \ beq- label##_iSeries_masked; \ EXCEPTION_PROLOG_ISERIES_2; \ b label##_common; \ -#ifdef DO_SOFT_DISABLE +#ifdef CONFIG_PPC_ISERIES #define DISABLE_INTS \ -BEGIN_FW_FTR_SECTION; \ - lbz r10,PACAPROCENABLED(r13); \ li r11,0; \ - std r10,SOFTE(r1); \ + stb r11,PACASOFTIRQEN(r13); \ +BEGIN_FW_FTR_SECTION; \ + stb r11,PACAHARDIRQEN(r13); \ +END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES); \ +BEGIN_FW_FTR_SECTION; \ mfmsr r10; \ - stb r11,PACAPROCENABLED(r13); \ ori r10,r10,MSR_EE; \ mtmsrd r10,1; \ END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) -#define ENABLE_INTS \ -BEGIN_FW_FTR_SECTION; \ - lbz r10,PACAPROCENABLED(r13); \ - mfmsr r11; \ - std r10,SOFTE(r1); \ - ori r11,r11,MSR_EE; \ -END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES); \ -BEGIN_FW_FTR_SECTION; \ - ld r12,_MSR(r1); \ - mfmsr r11; \ - rlwimi r11,r12,0,MSR_EE; \ -END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES); \ - mtmsrd r11,1 +#else +#define DISABLE_INTS \ + li r11,0; \ + stb r11,PACASOFTIRQEN(r13); \ + stb r11,PACAHARDIRQEN(r13) -#else /* hard enable/disable interrupts */ -#define DISABLE_INTS +#endif /* CONFIG_PPC_ISERIES */ #define ENABLE_INTS \ ld r12,_MSR(r1); \ @@ -399,8 +417,6 @@ END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES); \ rlwimi r11,r12,0,MSR_EE; \ mtmsrd r11,1 -#endif - #define STD_EXCEPTION_COMMON(trap, label, hdlr) \ .align 7; \ .globl label##_common; \ @@ -541,11 +557,11 @@ instruction_access_slb_pSeries: mfspr r12,SPRN_SRR1 /* and SRR1 */ b .slb_miss_realmode /* Rel. branch works in real mode */ - STD_EXCEPTION_PSERIES(0x500, hardware_interrupt) + MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt) STD_EXCEPTION_PSERIES(0x600, alignment) STD_EXCEPTION_PSERIES(0x700, program_check) STD_EXCEPTION_PSERIES(0x800, fp_unavailable) - STD_EXCEPTION_PSERIES(0x900, decrementer) + MASKABLE_EXCEPTION_PSERIES(0x900, decrementer) STD_EXCEPTION_PSERIES(0xa00, trap_0a) STD_EXCEPTION_PSERIES(0xb00, trap_0b) @@ -597,7 +613,24 @@ system_call_pSeries: /*** pSeries interrupt support ***/ /* moved from 0xf00 */ - STD_EXCEPTION_PSERIES(., performance_monitor) + MASKABLE_EXCEPTION_PSERIES(., performance_monitor) + +/* + * An interrupt came in while soft-disabled; clear EE in SRR1, + * clear paca->hard_enabled and return. + */ +masked_interrupt: + stb r10,PACAHARDIRQEN(r13) + mtcrf 0x80,r9 + ld r9,PACA_EXGEN+EX_R9(r13) + mfspr r10,SPRN_SRR1 + rldicl r10,r10,48,1 /* clear MSR_EE */ + rotldi r10,r10,16 + mtspr SPRN_SRR1,r10 + ld r10,PACA_EXGEN+EX_R10(r13) + mfspr r13,SPRN_SPRG1 + rfid + b . .align 7 do_stab_bolted_pSeries: @@ -792,7 +825,7 @@ system_reset_iSeries: cmpwi 0,r23,0 beq iSeries_secondary_smp_loop /* Loop until told to go */ - bne .__secondary_start /* Loop until told to go */ + bne __secondary_start /* Loop until told to go */ iSeries_secondary_smp_loop: /* Let the Hypervisor know we are alive */ /* 8002 is a call to HvCallCfg::getLps, a harmless Hypervisor function */ @@ -813,7 +846,6 @@ iSeries_secondary_smp_loop: b 1b /* If SMP not configured, secondaries * loop forever */ - .globl decrementer_iSeries_masked decrementer_iSeries_masked: /* We may not have a valid TOC pointer in here. */ li r11,1 @@ -824,7 +856,6 @@ decrementer_iSeries_masked: mtspr SPRN_DEC,r12 /* fall through */ - .globl hardware_interrupt_iSeries_masked hardware_interrupt_iSeries_masked: mtcrf 0x80,r9 /* Restore regs */ ld r12,PACALPPACAPTR(r13) @@ -926,10 +957,18 @@ bad_stack: * any task or sent any task a signal, you should use * ret_from_except or ret_from_except_lite instead of this. */ +fast_exc_return_irq: /* restores irq state too */ + ld r3,SOFTE(r1) + ld r12,_MSR(r1) + stb r3,PACASOFTIRQEN(r13) /* restore paca->soft_enabled */ + rldicl r4,r12,49,63 /* get MSR_EE to LSB */ + stb r4,PACAHARDIRQEN(r13) /* restore paca->hard_enabled */ + b 1f + .globl fast_exception_return fast_exception_return: ld r12,_MSR(r1) - ld r11,_NIP(r1) +1: ld r11,_NIP(r1) andi. r3,r12,MSR_RI /* check if RI is set */ beq- unrecov_fer @@ -952,7 +991,8 @@ fast_exception_return: REST_8GPRS(2, r1) mfmsr r10 - clrrdi r10,r10,2 /* clear RI (LE is 0 already) */ + rldicl r10,r10,48,1 /* clear EE */ + rldicr r10,r10,16,61 /* clear RI (LE is 0 already) */ mtmsrd r10,1 mtspr SPRN_SRR1,r12 @@ -1326,6 +1366,16 @@ BEGIN_FW_FTR_SECTION * interrupts if necessary. */ beq 13f +END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) +#endif +BEGIN_FW_FTR_SECTION + /* + * Here we have interrupts hard-disabled, so it is sufficient + * to restore paca->{soft,hard}_enable and get out. + */ + beq fast_exc_return_irq /* Return from exception on success */ +END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES) + /* For a hash failure, we don't bother re-enabling interrupts */ ble- 12f @@ -1337,14 +1387,6 @@ BEGIN_FW_FTR_SECTION ld r3,SOFTE(r1) bl .local_irq_restore b 11f -END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) -#endif -BEGIN_FW_FTR_SECTION - beq fast_exception_return /* Return from exception on success */ - ble- 12f /* Failure return from hash_page */ - - /* fall through */ -END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES) /* Here we have a page fault that hash_page can't handle. */ handle_page_fault: @@ -1362,6 +1404,8 @@ handle_page_fault: bl .bad_page_fault b .ret_from_except +13: b .ret_from_except_lite + /* We have a page fault that hash_page could handle but HV refused * the PTE insertion */ @@ -1371,8 +1415,6 @@ handle_page_fault: bl .low_hash_fault b .ret_from_except -13: b .ret_from_except_lite - /* here we have a segment miss */ do_ste_alloc: bl .ste_allocate /* try to insert stab entry */ @@ -1560,7 +1602,7 @@ _GLOBAL(generic_secondary_smp_init) ld r1,PACAEMERGSP(r13) subi r1,r1,STACK_FRAME_OVERHEAD - b .__secondary_start + b __secondary_start #endif #ifdef CONFIG_PPC_ISERIES @@ -1595,7 +1637,6 @@ _STATIC(__start_initialization_iSeries) b .start_here_common #endif /* CONFIG_PPC_ISERIES */ -#ifdef CONFIG_PPC_MULTIPLATFORM _STATIC(__mmu_off) mfmsr r3 @@ -1621,13 +1662,11 @@ _STATIC(__mmu_off) * */ _GLOBAL(__start_initialization_multiplatform) -#ifdef CONFIG_PPC_MULTIPLATFORM /* * Are we booted from a PROM Of-type client-interface ? */ cmpldi cr0,r5,0 bne .__boot_from_prom /* yes -> prom */ -#endif /* Save parameters */ mr r31,r3 @@ -1656,7 +1695,6 @@ _GLOBAL(__start_initialization_multiplatform) bl .__mmu_off b .__after_prom_start -#ifdef CONFIG_PPC_MULTIPLATFORM _STATIC(__boot_from_prom) /* Save parameters */ mr r31,r3 @@ -1696,7 +1734,6 @@ _STATIC(__boot_from_prom) bl .prom_init /* We never return */ trap -#endif /* * At this point, r3 contains the physical address we are running at, @@ -1752,8 +1789,6 @@ _STATIC(__after_prom_start) bl .copy_and_flush /* copy the rest */ b .start_here_multiplatform -#endif /* CONFIG_PPC_MULTIPLATFORM */ - /* * Copy routine used to copy the kernel to start at physical address 0 * and flush and invalidate the caches as needed. @@ -1836,7 +1871,7 @@ _GLOBAL(pmac_secondary_start) ld r1,PACAEMERGSP(r13) subi r1,r1,STACK_FRAME_OVERHEAD - b .__secondary_start + b __secondary_start #endif /* CONFIG_PPC_PMAC */ @@ -1853,7 +1888,7 @@ _GLOBAL(pmac_secondary_start) * r13 = paca virtual address * SPRG3 = paca virtual address */ -_GLOBAL(__secondary_start) +__secondary_start: /* Set thread priority to MEDIUM */ HMT_MEDIUM @@ -1877,11 +1912,16 @@ _GLOBAL(__secondary_start) /* enable MMU and jump to start_secondary */ LOAD_REG_ADDR(r3, .start_secondary_prolog) LOAD_REG_IMMEDIATE(r4, MSR_KERNEL) -#ifdef DO_SOFT_DISABLE +#ifdef CONFIG_PPC_ISERIES BEGIN_FW_FTR_SECTION ori r4,r4,MSR_EE END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) #endif +BEGIN_FW_FTR_SECTION + stb r7,PACASOFTIRQEN(r13) + stb r7,PACAHARDIRQEN(r13) +END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES) + mtspr SPRN_SRR0,r3 mtspr SPRN_SRR1,r4 rfid @@ -1913,7 +1953,6 @@ _GLOBAL(enable_64b_mode) isync blr -#ifdef CONFIG_PPC_MULTIPLATFORM /* * This is where the main kernel code starts. */ @@ -1977,7 +2016,6 @@ _STATIC(start_here_multiplatform) mtspr SPRN_SRR1,r4 rfid b . /* prevent speculative execution */ -#endif /* CONFIG_PPC_MULTIPLATFORM */ /* This is where all platforms converge execution */ _STATIC(start_here_common) @@ -2005,15 +2043,18 @@ _STATIC(start_here_common) /* Load up the kernel context */ 5: -#ifdef DO_SOFT_DISABLE -BEGIN_FW_FTR_SECTION li r5,0 - stb r5,PACAPROCENABLED(r13) /* Soft Disabled */ + stb r5,PACASOFTIRQEN(r13) /* Soft Disabled */ +#ifdef CONFIG_PPC_ISERIES +BEGIN_FW_FTR_SECTION mfmsr r5 ori r5,r5,MSR_EE /* Hard Enabled */ mtmsrd r5 END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) #endif +BEGIN_FW_FTR_SECTION + stb r5,PACAHARDIRQEN(r13) +END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES) bl .start_kernel diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c index 39db7a3affe..82bd2f10770 100644 --- a/arch/powerpc/kernel/ibmebus.c +++ b/arch/powerpc/kernel/ibmebus.c @@ -112,7 +112,7 @@ static int ibmebus_dma_supported(struct device *dev, u64 mask) return 1; } -struct dma_mapping_ops ibmebus_dma_ops = { +static struct dma_mapping_ops ibmebus_dma_ops = { .alloc_coherent = ibmebus_alloc_coherent, .free_coherent = ibmebus_free_coherent, .map_single = ibmebus_map_single, @@ -176,6 +176,10 @@ static struct ibmebus_dev* __devinit ibmebus_register_device_common( dev->ofdev.dev.bus = &ibmebus_bus_type; dev->ofdev.dev.release = ibmebus_dev_release; + dev->ofdev.dev.archdata.of_node = dev->ofdev.node; + dev->ofdev.dev.archdata.dma_ops = &ibmebus_dma_ops; + dev->ofdev.dev.archdata.numa_node = of_node_to_nid(dev->ofdev.node); + /* An ibmebusdev is based on a of_device. We have to change the * bus type to use our own DMA mapping operations. */ @@ -210,11 +214,10 @@ static struct ibmebus_dev* __devinit ibmebus_register_device_node( return NULL; } - dev = kmalloc(sizeof(struct ibmebus_dev), GFP_KERNEL); + dev = kzalloc(sizeof(struct ibmebus_dev), GFP_KERNEL); if (!dev) { return NULL; } - memset(dev, 0, sizeof(struct ibmebus_dev)); dev->ofdev.node = of_node_get(dn); diff --git a/arch/powerpc/kernel/idle.c b/arch/powerpc/kernel/idle.c index 4180c3998b3..8994af327b4 100644 --- a/arch/powerpc/kernel/idle.c +++ b/arch/powerpc/kernel/idle.c @@ -39,6 +39,13 @@ #define cpu_should_die() 0 #endif +static int __init powersave_off(char *arg) +{ + ppc_md.power_save = NULL; + return 0; +} +__setup("powersave=off", powersave_off); + /* * The body of the idle task. */ diff --git a/arch/powerpc/kernel/idle_power4.S b/arch/powerpc/kernel/idle_power4.S index 30de81da7b4..ba319547860 100644 --- a/arch/powerpc/kernel/idle_power4.S +++ b/arch/powerpc/kernel/idle_power4.S @@ -30,6 +30,13 @@ END_FTR_SECTION_IFCLR(CPU_FTR_CAN_NAP) beqlr /* Go to NAP now */ + mfmsr r7 + rldicl r0,r7,48,1 + rotldi r0,r0,16 + mtmsrd r0,1 /* hard-disable interrupts */ + li r0,1 + stb r0,PACASOFTIRQEN(r13) /* we'll hard-enable shortly */ + stb r0,PACAHARDIRQEN(r13) BEGIN_FTR_SECTION DSSALL sync @@ -38,7 +45,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) ld r8,TI_LOCAL_FLAGS(r9) /* set napping bit */ ori r8,r8,_TLF_NAPPING /* so when we take an exception */ std r8,TI_LOCAL_FLAGS(r9) /* it will return to our caller */ - mfmsr r7 ori r7,r7,MSR_EE oris r7,r7,MSR_POW@h 1: sync diff --git a/arch/powerpc/kernel/io.c b/arch/powerpc/kernel/io.c index e98180686b3..34ae11494dd 100644 --- a/arch/powerpc/kernel/io.c +++ b/arch/powerpc/kernel/io.c @@ -25,13 +25,11 @@ #include <asm/firmware.h> #include <asm/bug.h> -void _insb(volatile u8 __iomem *port, void *buf, long count) +void _insb(const volatile u8 __iomem *port, void *buf, long count) { u8 *tbuf = buf; u8 tmp; - BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES)); - if (unlikely(count <= 0)) return; asm volatile("sync"); @@ -48,8 +46,6 @@ void _outsb(volatile u8 __iomem *port, const void *buf, long count) { const u8 *tbuf = buf; - BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES)); - if (unlikely(count <= 0)) return; asm volatile("sync"); @@ -60,13 +56,11 @@ void _outsb(volatile u8 __iomem *port, const void *buf, long count) } EXPORT_SYMBOL(_outsb); -void _insw_ns(volatile u16 __iomem *port, void *buf, long count) +void _insw_ns(const volatile u16 __iomem *port, void *buf, long count) { u16 *tbuf = buf; u16 tmp; - BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES)); - if (unlikely(count <= 0)) return; asm volatile("sync"); @@ -83,8 +77,6 @@ void _outsw_ns(volatile u16 __iomem *port, const void *buf, long count) { const u16 *tbuf = buf; - BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES)); - if (unlikely(count <= 0)) return; asm volatile("sync"); @@ -95,13 +87,11 @@ void _outsw_ns(volatile u16 __iomem *port, const void *buf, long count) } EXPORT_SYMBOL(_outsw_ns); -void _insl_ns(volatile u32 __iomem *port, void *buf, long count) +void _insl_ns(const volatile u32 __iomem *port, void *buf, long count) { u32 *tbuf = buf; u32 tmp; - BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES)); - if (unlikely(count <= 0)) return; asm volatile("sync"); @@ -118,8 +108,6 @@ void _outsl_ns(volatile u32 __iomem *port, const void *buf, long count) { const u32 *tbuf = buf; - BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES)); - if (unlikely(count <= 0)) return; asm volatile("sync"); @@ -129,3 +117,90 @@ void _outsl_ns(volatile u32 __iomem *port, const void *buf, long count) asm volatile("sync"); } EXPORT_SYMBOL(_outsl_ns); + +#define IO_CHECK_ALIGN(v,a) ((((unsigned long)(v)) & ((a) - 1)) == 0) + +void _memset_io(volatile void __iomem *addr, int c, unsigned long n) +{ + void *p = (void __force *)addr; + u32 lc = c; + lc |= lc << 8; + lc |= lc << 16; + + __asm__ __volatile__ ("sync" : : : "memory"); + while(n && !IO_CHECK_ALIGN(p, 4)) { + *((volatile u8 *)p) = c; + p++; + n--; + } + while(n >= 4) { + *((volatile u32 *)p) = lc; + p += 4; + n -= 4; + } + while(n) { + *((volatile u8 *)p) = c; + p++; + n--; + } + __asm__ __volatile__ ("sync" : : : "memory"); +} +EXPORT_SYMBOL(_memset_io); + +void _memcpy_fromio(void *dest, const volatile void __iomem *src, + unsigned long n) +{ + void *vsrc = (void __force *) src; + + __asm__ __volatile__ ("sync" : : : "memory"); + while(n && (!IO_CHECK_ALIGN(vsrc, 4) || !IO_CHECK_ALIGN(dest, 4))) { + *((u8 *)dest) = *((volatile u8 *)vsrc); + __asm__ __volatile__ ("eieio" : : : "memory"); + vsrc++; + dest++; + n--; + } + while(n > 4) { + *((u32 *)dest) = *((volatile u32 *)vsrc); + __asm__ __volatile__ ("eieio" : : : "memory"); + vsrc += 4; + dest += 4; + n -= 4; + } + while(n) { + *((u8 *)dest) = *((volatile u8 *)vsrc); + __asm__ __volatile__ ("eieio" : : : "memory"); + vsrc++; + dest++; + n--; + } + __asm__ __volatile__ ("sync" : : : "memory"); +} +EXPORT_SYMBOL(_memcpy_fromio); + +void _memcpy_toio(volatile void __iomem *dest, const void *src, unsigned long n) +{ + void *vdest = (void __force *) dest; + + __asm__ __volatile__ ("sync" : : : "memory"); + while(n && (!IO_CHECK_ALIGN(vdest, 4) || !IO_CHECK_ALIGN(src, 4))) { + *((volatile u8 *)vdest) = *((u8 *)src); + src++; + vdest++; + n--; + } + while(n > 4) { + *((volatile u32 *)vdest) = *((volatile u32 *)src); + src += 4; + vdest += 4; + n-=4; + } + while(n) { + *((volatile u8 *)vdest) = *((u8 *)src); + src++; + vdest++; + n--; + } + __asm__ __volatile__ ("sync" : : : "memory"); +} +EXPORT_SYMBOL(_memcpy_toio); diff --git a/arch/powerpc/kernel/iomap.c b/arch/powerpc/kernel/iomap.c index a13a93dfc65..c6811337105 100644 --- a/arch/powerpc/kernel/iomap.c +++ b/arch/powerpc/kernel/iomap.c @@ -106,7 +106,7 @@ EXPORT_SYMBOL(iowrite32_rep); void __iomem *ioport_map(unsigned long port, unsigned int len) { - return (void __iomem *) (port+pci_io_base); + return (void __iomem *) (port + _IO_BASE); } void ioport_unmap(void __iomem *addr) diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c index ba6b7256084..95edad4faf2 100644 --- a/arch/powerpc/kernel/iommu.c +++ b/arch/powerpc/kernel/iommu.c @@ -258,9 +258,9 @@ static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, spin_unlock_irqrestore(&(tbl->it_lock), flags); } -int iommu_map_sg(struct device *dev, struct iommu_table *tbl, - struct scatterlist *sglist, int nelems, - unsigned long mask, enum dma_data_direction direction) +int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist, + int nelems, unsigned long mask, + enum dma_data_direction direction) { dma_addr_t dma_next = 0, dma_addr; unsigned long flags; diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 5e37bf14ef2..0bd8c766583 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -64,8 +64,9 @@ #include <asm/ptrace.h> #include <asm/machdep.h> #include <asm/udbg.h> -#ifdef CONFIG_PPC_ISERIES +#ifdef CONFIG_PPC64 #include <asm/paca.h> +#include <asm/firmware.h> #endif int __irq_offset_value; @@ -95,6 +96,74 @@ extern atomic_t ipi_sent; EXPORT_SYMBOL(irq_desc); int distribute_irqs = 1; + +static inline unsigned long get_hard_enabled(void) +{ + unsigned long enabled; + + __asm__ __volatile__("lbz %0,%1(13)" + : "=r" (enabled) : "i" (offsetof(struct paca_struct, hard_enabled))); + + return enabled; +} + +static inline void set_soft_enabled(unsigned long enable) +{ + __asm__ __volatile__("stb %0,%1(13)" + : : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled))); +} + +void local_irq_restore(unsigned long en) +{ + /* + * get_paca()->soft_enabled = en; + * Is it ever valid to use local_irq_restore(0) when soft_enabled is 1? + * That was allowed before, and in such a case we do need to take care + * that gcc will set soft_enabled directly via r13, not choose to use + * an intermediate register, lest we're preempted to a different cpu. + */ + set_soft_enabled(en); + if (!en) + return; + + if (firmware_has_feature(FW_FEATURE_ISERIES)) { + /* + * Do we need to disable preemption here? Not really: in the + * unlikely event that we're preempted to a different cpu in + * between getting r13, loading its lppaca_ptr, and loading + * its any_int, we might call iseries_handle_interrupts without + * an interrupt pending on the new cpu, but that's no disaster, + * is it? And the business of preempting us off the old cpu + * would itself involve a local_irq_restore which handles the + * interrupt to that cpu. + * + * But use "local_paca->lppaca_ptr" instead of "get_lppaca()" + * to avoid any preemption checking added into get_paca(). + */ + if (local_paca->lppaca_ptr->int_dword.any_int) + iseries_handle_interrupts(); + return; + } + + /* + * if (get_paca()->hard_enabled) return; + * But again we need to take care that gcc gets hard_enabled directly + * via r13, not choose to use an intermediate register, lest we're + * preempted to a different cpu in between the two instructions. + */ + if (get_hard_enabled()) + return; + + /* + * Need to hard-enable interrupts here. Since currently disabled, + * no need to take further asm precautions against preemption; but + * use local_paca instead of get_paca() to avoid preemption checking. + */ + local_paca->hard_enabled = en; + if ((int)mfspr(SPRN_DEC) < 0) + mtspr(SPRN_DEC, 1); + hard_irq_enable(); +} #endif /* CONFIG_PPC64 */ int show_interrupts(struct seq_file *p, void *v) @@ -246,7 +315,8 @@ void do_IRQ(struct pt_regs *regs) set_irq_regs(old_regs); #ifdef CONFIG_PPC_ISERIES - if (get_lppaca()->int_dword.fields.decr_int) { + if (firmware_has_feature(FW_FEATURE_ISERIES) && + get_lppaca()->int_dword.fields.decr_int) { get_lppaca()->int_dword.fields.decr_int = 0; /* Signal a fake decrementer interrupt */ timer_interrupt(regs); @@ -626,10 +696,14 @@ EXPORT_SYMBOL_GPL(irq_of_parse_and_map); void irq_dispose_mapping(unsigned int virq) { - struct irq_host *host = irq_map[virq].host; + struct irq_host *host; irq_hw_number_t hwirq; unsigned long flags; + if (virq == NO_IRQ) + return; + + host = irq_map[virq].host; WARN_ON (host == NULL); if (host == NULL) return; diff --git a/arch/powerpc/kernel/of_device.c b/arch/powerpc/kernel/of_device.c index 397c83eda20..8a06724e029 100644 --- a/arch/powerpc/kernel/of_device.c +++ b/arch/powerpc/kernel/of_device.c @@ -9,30 +9,26 @@ #include <asm/of_device.h> /** - * of_match_device - Tell if an of_device structure has a matching - * of_match structure + * of_match_node - Tell if an device_node has a matching of_match structure * @ids: array of of device match structures to search in - * @dev: the of device structure to match against + * @node: the of device structure to match against * - * Used by a driver to check whether an of_device present in the - * system is in its list of supported devices. + * Low level utility function used by device matching. */ -const struct of_device_id *of_match_device(const struct of_device_id *matches, - const struct of_device *dev) +const struct of_device_id *of_match_node(const struct of_device_id *matches, + const struct device_node *node) { - if (!dev->node) - return NULL; while (matches->name[0] || matches->type[0] || matches->compatible[0]) { int match = 1; if (matches->name[0]) - match &= dev->node->name - && !strcmp(matches->name, dev->node->name); + match &= node->name + && !strcmp(matches->name, node->name); if (matches->type[0]) - match &= dev->node->type - && !strcmp(matches->type, dev->node->type); + match &= node->type + && !strcmp(matches->type, node->type); if (matches->compatible[0]) - match &= device_is_compatible(dev->node, - matches->compatible); + match &= device_is_compatible(node, + matches->compatible); if (match) return matches; matches++; @@ -40,16 +36,21 @@ const struct of_device_id *of_match_device(const struct of_device_id *matches, return NULL; } -static int of_platform_bus_match(struct device *dev, struct device_driver *drv) +/** + * of_match_device - Tell if an of_device structure has a matching + * of_match structure + * @ids: array of of device match structures to search in + * @dev: the of device structure to match against + * + * Used by a driver to check whether an of_device present in the + * system is in its list of supported devices. + */ +const struct of_device_id *of_match_device(const struct of_device_id *matches, + const struct of_device *dev) { - struct of_device * of_dev = to_of_device(dev); - struct of_platform_driver * of_drv = to_of_platform_driver(drv); - const struct of_device_id * matches = of_drv->match_table; - - if (!matches) - return 0; - - return of_match_device(matches, of_dev) != NULL; + if (!dev->node) + return NULL; + return of_match_node(matches, dev->node); } struct of_device *of_dev_get(struct of_device *dev) @@ -71,96 +72,8 @@ void of_dev_put(struct of_device *dev) put_device(&dev->dev); } - -static int of_device_probe(struct device *dev) -{ - int error = -ENODEV; - struct of_platform_driver *drv; - struct of_device *of_dev; - const struct of_device_id *match; - - drv = to_of_platform_driver(dev->driver); - of_dev = to_of_device(dev); - - if (!drv->probe) - return error; - - of_dev_get(of_dev); - - match = of_match_device(drv->match_table, of_dev); - if (match) - error = drv->probe(of_dev, match); - if (error) - of_dev_put(of_dev); - - return error; -} - -static int of_device_remove(struct device *dev) -{ - struct of_device * of_dev = to_of_device(dev); - struct of_platform_driver * drv = to_of_platform_driver(dev->driver); - - if (dev->driver && drv->remove) - drv->remove(of_dev); - return 0; -} - -static int of_device_suspend(struct device *dev, pm_message_t state) -{ - struct of_device * of_dev = to_of_device(dev); - struct of_platform_driver * drv = to_of_platform_driver(dev->driver); - int error = 0; - - if (dev->driver && drv->suspend) - error = drv->suspend(of_dev, state); - return error; -} - -static int of_device_resume(struct device * dev) -{ - struct of_device * of_dev = to_of_device(dev); - struct of_platform_driver * drv = to_of_platform_driver(dev->driver); - int error = 0; - - if (dev->driver && drv->resume) - error = drv->resume(of_dev); - return error; -} - -struct bus_type of_platform_bus_type = { - .name = "of_platform", - .match = of_platform_bus_match, - .probe = of_device_probe, - .remove = of_device_remove, - .suspend = of_device_suspend, - .resume = of_device_resume, -}; - -static int __init of_bus_driver_init(void) -{ - return bus_register(&of_platform_bus_type); -} - -postcore_initcall(of_bus_driver_init); - -int of_register_driver(struct of_platform_driver *drv) -{ - /* initialize common driver fields */ - drv->driver.name = drv->name; - drv->driver.bus = &of_platform_bus_type; - - /* register with core */ - return driver_register(&drv->driver); -} - -void of_unregister_driver(struct of_platform_driver *drv) -{ - driver_unregister(&drv->driver); -} - - -static ssize_t dev_show_devspec(struct device *dev, struct device_attribute *attr, char *buf) +static ssize_t dev_show_devspec(struct device *dev, + struct device_attribute *attr, char *buf) { struct of_device *ofdev; @@ -208,41 +121,11 @@ void of_device_unregister(struct of_device *ofdev) device_unregister(&ofdev->dev); } -struct of_device* of_platform_device_create(struct device_node *np, - const char *bus_id, - struct device *parent) -{ - struct of_device *dev; - - dev = kmalloc(sizeof(*dev), GFP_KERNEL); - if (!dev) - return NULL; - memset(dev, 0, sizeof(*dev)); - - dev->node = of_node_get(np); - dev->dma_mask = 0xffffffffUL; - dev->dev.dma_mask = &dev->dma_mask; - dev->dev.parent = parent; - dev->dev.bus = &of_platform_bus_type; - dev->dev.release = of_release_dev; - - strlcpy(dev->dev.bus_id, bus_id, BUS_ID_SIZE); - - if (of_device_register(dev) != 0) { - kfree(dev); - return NULL; - } - - return dev; -} +EXPORT_SYMBOL(of_match_node); EXPORT_SYMBOL(of_match_device); -EXPORT_SYMBOL(of_platform_bus_type); -EXPORT_SYMBOL(of_register_driver); -EXPORT_SYMBOL(of_unregister_driver); EXPORT_SYMBOL(of_device_register); EXPORT_SYMBOL(of_device_unregister); EXPORT_SYMBOL(of_dev_get); EXPORT_SYMBOL(of_dev_put); -EXPORT_SYMBOL(of_platform_device_create); EXPORT_SYMBOL(of_release_dev); diff --git a/arch/powerpc/kernel/of_platform.c b/arch/powerpc/kernel/of_platform.c new file mode 100644 index 00000000000..b3189d0161b --- /dev/null +++ b/arch/powerpc/kernel/of_platform.c @@ -0,0 +1,489 @@ +/* + * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corp. + * <benh@kernel.crashing.org> + * and Arnd Bergmann, IBM Corp. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + */ + +#undef DEBUG + +#include <linux/string.h> +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/mod_devicetable.h> +#include <linux/slab.h> +#include <linux/pci.h> + +#include <asm/errno.h> +#include <asm/dcr.h> +#include <asm/of_device.h> +#include <asm/of_platform.h> +#include <asm/topology.h> +#include <asm/pci-bridge.h> +#include <asm/ppc-pci.h> +#include <asm/atomic.h> + + +/* + * The list of OF IDs below is used for matching bus types in the + * system whose devices are to be exposed as of_platform_devices. + * + * This is the default list valid for most platforms. This file provides + * functions who can take an explicit list if necessary though + * + * The search is always performed recursively looking for children of + * the provided device_node and recursively if such a children matches + * a bus type in the list + */ + +static struct of_device_id of_default_bus_ids[] = { + { .type = "soc", }, + { .compatible = "soc", }, + { .type = "spider", }, + { .type = "axon", }, + { .type = "plb5", }, + { .type = "plb4", }, + { .type = "opb", }, + {}, +}; + +static atomic_t bus_no_reg_magic; + +/* + * + * OF platform device type definition & base infrastructure + * + */ + +static int of_platform_bus_match(struct device *dev, struct device_driver *drv) +{ + struct of_device * of_dev = to_of_device(dev); + struct of_platform_driver * of_drv = to_of_platform_driver(drv); + const struct of_device_id * matches = of_drv->match_table; + + if (!matches) + return 0; + + return of_match_device(matches, of_dev) != NULL; +} + +static int of_platform_device_probe(struct device *dev) +{ + int error = -ENODEV; + struct of_platform_driver *drv; + struct of_device *of_dev; + const struct of_device_id *match; + + drv = to_of_platform_driver(dev->driver); + of_dev = to_of_device(dev); + + if (!drv->probe) + return error; + + of_dev_get(of_dev); + + match = of_match_device(drv->match_table, of_dev); + if (match) + error = drv->probe(of_dev, match); + if (error) + of_dev_put(of_dev); + + return error; +} + +static int of_platform_device_remove(struct device *dev) +{ + struct of_device * of_dev = to_of_device(dev); + struct of_platform_driver * drv = to_of_platform_driver(dev->driver); + + if (dev->driver && drv->remove) + drv->remove(of_dev); + return 0; +} + +static int of_platform_device_suspend(struct device *dev, pm_message_t state) +{ + struct of_device * of_dev = to_of_device(dev); + struct of_platform_driver * drv = to_of_platform_driver(dev->driver); + int error = 0; + + if (dev->driver && drv->suspend) + error = drv->suspend(of_dev, state); + return error; +} + +static int of_platform_device_resume(struct device * dev) +{ + struct of_device * of_dev = to_of_device(dev); + struct of_platform_driver * drv = to_of_platform_driver(dev->driver); + int error = 0; + + if (dev->driver && drv->resume) + error = drv->resume(of_dev); + return error; +} + +struct bus_type of_platform_bus_type = { + .name = "of_platform", + .match = of_platform_bus_match, + .probe = of_platform_device_probe, + .remove = of_platform_device_remove, + .suspend = of_platform_device_suspend, + .resume = of_platform_device_resume, +}; +EXPORT_SYMBOL(of_platform_bus_type); + +static int __init of_bus_driver_init(void) +{ + return bus_register(&of_platform_bus_type); +} + +postcore_initcall(of_bus_driver_init); + +int of_register_platform_driver(struct of_platform_driver *drv) +{ + /* initialize common driver fields */ + drv->driver.name = drv->name; + drv->driver.bus = &of_platform_bus_type; + + /* register with core */ + return driver_register(&drv->driver); +} +EXPORT_SYMBOL(of_register_platform_driver); + +void of_unregister_platform_driver(struct of_platform_driver *drv) +{ + driver_unregister(&drv->driver); +} +EXPORT_SYMBOL(of_unregister_platform_driver); + +static void of_platform_make_bus_id(struct of_device *dev) +{ + struct device_node *node = dev->node; + char *name = dev->dev.bus_id; + const u32 *reg; + u64 addr; + long magic; + + /* + * If it's a DCR based device, use 'd' for native DCRs + * and 'D' for MMIO DCRs. + */ +#ifdef CONFIG_PPC_DCR + reg = get_property(node, "dcr-reg", NULL); + if (reg) { +#ifdef CONFIG_PPC_DCR_NATIVE + snprintf(name, BUS_ID_SIZE, "d%x.%s", + *reg, node->name); +#else /* CONFIG_PPC_DCR_NATIVE */ + addr = of_translate_dcr_address(node, *reg, NULL); + if (addr != OF_BAD_ADDR) { + snprintf(name, BUS_ID_SIZE, + "D%llx.%s", (unsigned long long)addr, + node->name); + return; + } +#endif /* !CONFIG_PPC_DCR_NATIVE */ + } +#endif /* CONFIG_PPC_DCR */ + + /* + * For MMIO, get the physical address + */ + reg = get_property(node, "reg", NULL); + if (reg) { + addr = of_translate_address(node, reg); + if (addr != OF_BAD_ADDR) { + snprintf(name, BUS_ID_SIZE, + "%llx.%s", (unsigned long long)addr, + node->name); + return; + } + } + + /* + * No BusID, use the node name and add a globally incremented + * counter (and pray...) + */ + magic = atomic_add_return(1, &bus_no_reg_magic); + snprintf(name, BUS_ID_SIZE, "%s.%d", node->name, magic - 1); +} + +struct of_device* of_platform_device_create(struct device_node *np, + const char *bus_id, + struct device *parent) +{ + struct of_device *dev; + + dev = kmalloc(sizeof(*dev), GFP_KERNEL); + if (!dev) + return NULL; + memset(dev, 0, sizeof(*dev)); + + dev->node = of_node_get(np); + dev->dma_mask = 0xffffffffUL; + dev->dev.dma_mask = &dev->dma_mask; + dev->dev.parent = parent; + dev->dev.bus = &of_platform_bus_type; + dev->dev.release = of_release_dev; + dev->dev.archdata.of_node = np; + dev->dev.archdata.numa_node = of_node_to_nid(np); + + /* We do not fill the DMA ops for platform devices by default. + * This is currently the responsibility of the platform code + * to do such, possibly using a device notifier + */ + + if (bus_id) + strlcpy(dev->dev.bus_id, bus_id, BUS_ID_SIZE); + else + of_platform_make_bus_id(dev); + + if (of_device_register(dev) != 0) { + kfree(dev); + return NULL; + } + + return dev; +} +EXPORT_SYMBOL(of_platform_device_create); + + + +/** + * of_platform_bus_create - Create an OF device for a bus node and all its + * children. Optionally recursively instanciate matching busses. + * @bus: device node of the bus to instanciate + * @matches: match table, NULL to use the default, OF_NO_DEEP_PROBE to + * disallow recursive creation of child busses + */ +static int of_platform_bus_create(struct device_node *bus, + struct of_device_id *matches, + struct device *parent) +{ + struct device_node *child; + struct of_device *dev; + int rc = 0; + + for (child = NULL; (child = of_get_next_child(bus, child)); ) { + pr_debug(" create child: %s\n", child->full_name); + dev = of_platform_device_create(child, NULL, parent); + if (dev == NULL) + rc = -ENOMEM; + else if (!of_match_node(matches, child)) + continue; + if (rc == 0) { + pr_debug(" and sub busses\n"); + rc = of_platform_bus_create(child, matches, &dev->dev); + } if (rc) { + of_node_put(child); + break; + } + } + return rc; +} + +/** + * of_platform_bus_probe - Probe the device-tree for platform busses + * @root: parent of the first level to probe or NULL for the root of the tree + * @matches: match table, NULL to use the default + * @parent: parent to hook devices from, NULL for toplevel + * + * Note that children of the provided root are not instanciated as devices + * unless the specified root itself matches the bus list and is not NULL. + */ + +int of_platform_bus_probe(struct device_node *root, + struct of_device_id *matches, + struct device *parent) +{ + struct device_node *child; + struct of_device *dev; + int rc = 0; + + if (matches == NULL) + matches = of_default_bus_ids; + if (matches == OF_NO_DEEP_PROBE) + return -EINVAL; + if (root == NULL) + root = of_find_node_by_path("/"); + else + of_node_get(root); + + pr_debug("of_platform_bus_probe()\n"); + pr_debug(" starting at: %s\n", root->full_name); + + /* Do a self check of bus type, if there's a match, create + * children + */ + if (of_match_node(matches, root)) { + pr_debug(" root match, create all sub devices\n"); + dev = of_platform_device_create(root, NULL, parent); + if (dev == NULL) { + rc = -ENOMEM; + goto bail; + } + pr_debug(" create all sub busses\n"); + rc = of_platform_bus_create(root, matches, &dev->dev); + goto bail; + } + for (child = NULL; (child = of_get_next_child(root, child)); ) { + if (!of_match_node(matches, child)) + continue; + + pr_debug(" match: %s\n", child->full_name); + dev = of_platform_device_create(child, NULL, parent); + if (dev == NULL) + rc = -ENOMEM; + else + rc = of_platform_bus_create(child, matches, &dev->dev); + if (rc) { + of_node_put(child); + break; + } + } + bail: + of_node_put(root); + return rc; +} +EXPORT_SYMBOL(of_platform_bus_probe); + +static int of_dev_node_match(struct device *dev, void *data) +{ + return to_of_device(dev)->node == data; +} + +struct of_device *of_find_device_by_node(struct device_node *np) +{ + struct device *dev; + + dev = bus_find_device(&of_platform_bus_type, + NULL, np, of_dev_node_match); + if (dev) + return to_of_device(dev); + return NULL; +} +EXPORT_SYMBOL(of_find_device_by_node); + +static int of_dev_phandle_match(struct device *dev, void *data) +{ + phandle *ph = data; + return to_of_device(dev)->node->linux_phandle == *ph; +} + +struct of_device *of_find_device_by_phandle(phandle ph) +{ + struct device *dev; + + dev = bus_find_device(&of_platform_bus_type, + NULL, &ph, of_dev_phandle_match); + if (dev) + return to_of_device(dev); + return NULL; +} +EXPORT_SYMBOL(of_find_device_by_phandle); + + +#ifdef CONFIG_PPC_OF_PLATFORM_PCI + +/* The probing of PCI controllers from of_platform is currently + * 64 bits only, mostly due to gratuitous differences between + * the 32 and 64 bits PCI code on PowerPC and the 32 bits one + * lacking some bits needed here. + */ + +static int __devinit of_pci_phb_probe(struct of_device *dev, + const struct of_device_id *match) +{ + struct pci_controller *phb; + + /* Check if we can do that ... */ + if (ppc_md.pci_setup_phb == NULL) + return -ENODEV; + + printk(KERN_INFO "Setting up PCI bus %s\n", dev->node->full_name); + + /* Alloc and setup PHB data structure */ + phb = pcibios_alloc_controller(dev->node); + if (!phb) + return -ENODEV; + + /* Setup parent in sysfs */ + phb->parent = &dev->dev; + + /* Setup the PHB using arch provided callback */ + if (ppc_md.pci_setup_phb(phb)) { + pcibios_free_controller(phb); + return -ENODEV; + } + + /* Process "ranges" property */ + pci_process_bridge_OF_ranges(phb, dev->node, 0); + + /* Setup IO space. + * This will not work properly for ISA IOs, something needs to be done + * about it if we ever generalize that way of probing PCI brigdes + */ + pci_setup_phb_io_dynamic(phb, 0); + + /* Init pci_dn data structures */ + pci_devs_phb_init_dynamic(phb); + + /* Register devices with EEH */ +#ifdef CONFIG_EEH + if (dev->node->child) + eeh_add_device_tree_early(dev->node); +#endif /* CONFIG_EEH */ + + /* Scan the bus */ + scan_phb(phb); + + /* Claim resources. This might need some rework as well depending + * wether we are doing probe-only or not, like assigning unassigned + * resources etc... + */ + pcibios_claim_one_bus(phb->bus); + + /* Finish EEH setup */ +#ifdef CONFIG_EEH + eeh_add_device_tree_late(phb->bus); +#endif + + /* Add probed PCI devices to the device model */ + pci_bus_add_devices(phb->bus); + + return 0; +} + +static struct of_device_id of_pci_phb_ids[] = { + { .type = "pci", }, + { .type = "pcix", }, + { .type = "pcie", }, + { .type = "pciex", }, + { .type = "ht", }, + {} +}; + +static struct of_platform_driver of_pci_phb_driver = { + .name = "of-pci", + .match_table = of_pci_phb_ids, + .probe = of_pci_phb_probe, + .driver = { + .multithread_probe = 1, + }, +}; + +static __init int of_pci_phb_init(void) +{ + return of_register_platform_driver(&of_pci_phb_driver); +} + +device_initcall(of_pci_phb_init); + +#endif /* CONFIG_PPC_OF_PLATFORM_PCI */ diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c index 0d9ff72e285..2f54cd81dea 100644 --- a/arch/powerpc/kernel/pci_32.c +++ b/arch/powerpc/kernel/pci_32.c @@ -12,6 +12,7 @@ #include <linux/errno.h> #include <linux/bootmem.h> #include <linux/irq.h> +#include <linux/list.h> #include <asm/processor.h> #include <asm/io.h> @@ -99,7 +100,7 @@ pcibios_fixup_resources(struct pci_dev *dev) continue; if (res->end == 0xffffffff) { DBG("PCI:%s Resource %d [%016llx-%016llx] is unassigned\n", - pci_name(dev), i, res->start, res->end); + pci_name(dev), i, (u64)res->start, (u64)res->end); res->end -= res->start; res->start = 0; res->flags |= IORESOURCE_UNSET; @@ -115,11 +116,9 @@ pcibios_fixup_resources(struct pci_dev *dev) if (offset != 0) { res->start += offset; res->end += offset; -#ifdef DEBUG - printk("Fixup res %d (%lx) of dev %s: %llx -> %llx\n", - i, res->flags, pci_name(dev), - res->start - offset, res->start); -#endif + DBG("Fixup res %d (%lx) of dev %s: %llx -> %llx\n", + i, res->flags, pci_name(dev), + (u64)res->start - offset, (u64)res->start); } } @@ -255,7 +254,7 @@ pcibios_allocate_bus_resources(struct list_head *bus_list) } DBG("PCI: bridge rsrc %llx..%llx (%lx), parent %p\n", - res->start, res->end, res->flags, pr); + (u64)res->start, (u64)res->end, res->flags, pr); if (pr) { if (request_resource(pr, res) == 0) continue; @@ -306,7 +305,7 @@ reparent_resources(struct resource *parent, struct resource *res) for (p = res->child; p != NULL; p = p->sibling) { p->parent = res; DBG(KERN_INFO "PCI: reparented %s [%llx..%llx] under %s\n", - p->name, p->start, p->end, res->name); + p->name, (u64)p->start, (u64)p->end, res->name); } return 0; } @@ -362,7 +361,7 @@ pci_relocate_bridge_resource(struct pci_bus *bus, int i) } if (request_resource(pr, res)) { DBG(KERN_ERR "PCI: huh? couldn't move to %llx..%llx\n", - res->start, res->end); + (u64)res->start, (u64)res->end); return -1; /* "can't happen" */ } update_bridge_base(bus, i); @@ -480,14 +479,14 @@ static inline void alloc_resource(struct pci_dev *dev, int idx) struct resource *pr, *r = &dev->resource[idx]; DBG("PCI:%s: Resource %d: %016llx-%016llx (f=%lx)\n", - pci_name(dev), idx, r->start, r->end, r->flags); + pci_name(dev), idx, (u64)r->start, (u64)r->end, r->flags); pr = pci_find_parent_resource(dev, r); if (!pr || request_resource(pr, r) < 0) { printk(KERN_ERR "PCI: Cannot allocate resource region %d" " of device %s\n", idx, pci_name(dev)); if (pr) DBG("PCI: parent is %p: %016llx-%016llx (f=%lx)\n", - pr, pr->start, pr->end, pr->flags); + pr, (u64)pr->start, (u64)pr->end, pr->flags); /* We'll assign a new address later */ r->flags |= IORESOURCE_UNSET; r->end -= r->start; @@ -960,7 +959,7 @@ pci_process_bridge_OF_ranges(struct pci_controller *hose, res->flags = IORESOURCE_IO; res->start = ranges[2]; DBG("PCI: IO 0x%llx -> 0x%llx\n", - res->start, res->start + size - 1); + (u64)res->start, (u64)res->start + size - 1); break; case 2: /* memory space */ memno = 0; @@ -982,7 +981,7 @@ pci_process_bridge_OF_ranges(struct pci_controller *hose, res->flags |= IORESOURCE_PREFETCH; res->start = ranges[na+2]; DBG("PCI: MEM[%d] 0x%llx -> 0x%llx\n", memno, - res->start, res->start + size - 1); + (u64)res->start, (u64)res->start + size - 1); } break; } @@ -1268,7 +1267,10 @@ pcibios_init(void) if (pci_assign_all_buses) hose->first_busno = next_busno; hose->last_busno = 0xff; - bus = pci_scan_bus(hose->first_busno, hose->ops, hose); + bus = pci_scan_bus_parented(hose->parent, hose->first_busno, + hose->ops, hose); + if (bus) + pci_bus_add_devices(bus); hose->last_busno = bus->subordinate; if (pci_assign_all_buses || next_busno <= hose->last_busno) next_busno = hose->last_busno + pcibios_assign_bus_offset; @@ -1282,10 +1284,6 @@ pcibios_init(void) if (pci_assign_all_buses && have_of) pcibios_make_OF_bus_map(); - /* Do machine dependent PCI interrupt routing */ - if (ppc_md.pci_swizzle && ppc_md.pci_map_irq) - pci_fixup_irqs(ppc_md.pci_swizzle, ppc_md.pci_map_irq); - /* Call machine dependent fixup */ if (ppc_md.pcibios_fixup) ppc_md.pcibios_fixup(); @@ -1308,25 +1306,6 @@ pcibios_init(void) subsys_initcall(pcibios_init); -unsigned char __init -common_swizzle(struct pci_dev *dev, unsigned char *pinp) -{ - struct pci_controller *hose = dev->sysdata; - - if (dev->bus->number != hose->first_busno) { - u8 pin = *pinp; - do { - pin = bridge_swizzle(pin, PCI_SLOT(dev->devfn)); - /* Move up the chain of bridges. */ - dev = dev->bus->self; - } while (dev->bus->self); - *pinp = pin; - - /* The slot is the idsel of the last bridge. */ - } - return PCI_SLOT(dev->devfn); -} - unsigned long resource_fixup(struct pci_dev * dev, struct resource * res, unsigned long start, unsigned long size) { @@ -1338,6 +1317,7 @@ void __init pcibios_fixup_bus(struct pci_bus *bus) struct pci_controller *hose = (struct pci_controller *) bus->sysdata; unsigned long io_offset; struct resource *res; + struct pci_dev *dev; int i; io_offset = (unsigned long)hose->io_base_virt - isa_io_base; @@ -1390,8 +1370,16 @@ void __init pcibios_fixup_bus(struct pci_bus *bus) } } + /* Platform specific bus fixups */ if (ppc_md.pcibios_fixup_bus) ppc_md.pcibios_fixup_bus(bus); + + /* Read default IRQs and fixup if necessary */ + list_for_each_entry(dev, &bus->devices, bus_list) { + pci_read_irq_line(dev); + if (ppc_md.pci_irq_fixup) + ppc_md.pci_irq_fixup(dev); + } } char __init *pcibios_setup(char *str) @@ -1571,7 +1559,7 @@ static struct resource *__pci_mmap_make_offset(struct pci_dev *dev, *offset += hose->pci_mem_offset; res_bit = IORESOURCE_MEM; } else { - io_offset = hose->io_base_virt - ___IO_BASE; + io_offset = hose->io_base_virt - (void __iomem *)_IO_BASE; *offset += io_offset; res_bit = IORESOURCE_IO; } @@ -1826,7 +1814,8 @@ void pci_resource_to_user(const struct pci_dev *dev, int bar, return; if (rsrc->flags & IORESOURCE_IO) - offset = ___IO_BASE - hose->io_base_virt + hose->io_base_phys; + offset = (void __iomem *)_IO_BASE - hose->io_base_virt + + hose->io_base_phys; *start = rsrc->start + offset; *end = rsrc->end + offset; @@ -1845,35 +1834,6 @@ pci_init_resource(struct resource *res, unsigned long start, unsigned long end, res->child = NULL; } -void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max) -{ - unsigned long start = pci_resource_start(dev, bar); - unsigned long len = pci_resource_len(dev, bar); - unsigned long flags = pci_resource_flags(dev, bar); - - if (!len) - return NULL; - if (max && len > max) - len = max; - if (flags & IORESOURCE_IO) - return ioport_map(start, len); - if (flags & IORESOURCE_MEM) - /* Not checking IORESOURCE_CACHEABLE because PPC does - * not currently distinguish between ioremap and - * ioremap_nocache. - */ - return ioremap(start, len); - /* What? */ - return NULL; -} - -void pci_iounmap(struct pci_dev *dev, void __iomem *addr) -{ - /* Nothing to do */ -} -EXPORT_SYMBOL(pci_iomap); -EXPORT_SYMBOL(pci_iounmap); - unsigned long pci_address_to_pio(phys_addr_t address) { struct pci_controller* hose = hose_head; diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c index 9bae8a5bf67..6fa9a0a5c8d 100644 --- a/arch/powerpc/kernel/pci_64.c +++ b/arch/powerpc/kernel/pci_64.c @@ -42,11 +42,9 @@ unsigned long pci_probe_only = 1; int pci_assign_all_buses = 0; -#ifdef CONFIG_PPC_MULTIPLATFORM static void fixup_resource(struct resource *res, struct pci_dev *dev); static void do_bus_setup(struct pci_bus *bus); static void phbs_remap_io(void); -#endif /* pci_io_base -- the base address from which io bars are offsets. * This is the lowest I/O base address (so bar values are always positive), @@ -63,7 +61,7 @@ void iSeries_pcibios_init(void); LIST_HEAD(hose_list); -struct dma_mapping_ops pci_dma_ops; +struct dma_mapping_ops *pci_dma_ops; EXPORT_SYMBOL(pci_dma_ops); int global_phb_number; /* Global phb counter */ @@ -212,6 +210,10 @@ struct pci_controller * pcibios_alloc_controller(struct device_node *dev) void pcibios_free_controller(struct pci_controller *phb) { + spin_lock(&hose_spinlock); + list_del(&phb->list_node); + spin_unlock(&hose_spinlock); + if (phb->is_dynamic) kfree(phb); } @@ -251,7 +253,6 @@ static void __init pcibios_claim_of_setup(void) pcibios_claim_one_bus(b); } -#ifdef CONFIG_PPC_MULTIPLATFORM static u32 get_int_prop(struct device_node *np, const char *name, u32 def) { const u32 *prop; @@ -329,7 +330,7 @@ struct pci_dev *of_create_pci_dev(struct device_node *node, struct pci_dev *dev; const char *type; - dev = kmalloc(sizeof(struct pci_dev), GFP_KERNEL); + dev = kzalloc(sizeof(struct pci_dev), GFP_KERNEL); if (!dev) return NULL; type = get_property(node, "device_type", NULL); @@ -338,7 +339,6 @@ struct pci_dev *of_create_pci_dev(struct device_node *node, DBG(" create device, devfn: %x, type: %s\n", devfn, type); - memset(dev, 0, sizeof(struct pci_dev)); dev->bus = bus; dev->sysdata = node; dev->dev.parent = bus->bridge; @@ -506,7 +506,6 @@ void __devinit of_scan_pci_bridge(struct device_node *node, pci_scan_child_bus(bus); } EXPORT_SYMBOL(of_scan_pci_bridge); -#endif /* CONFIG_PPC_MULTIPLATFORM */ void __devinit scan_phb(struct pci_controller *hose) { @@ -517,7 +516,7 @@ void __devinit scan_phb(struct pci_controller *hose) DBG("Scanning PHB %s\n", node ? node->full_name : "<NO NAME>"); - bus = pci_create_bus(NULL, hose->first_busno, hose->ops, node); + bus = pci_create_bus(hose->parent, hose->first_busno, hose->ops, node); if (bus == NULL) { printk(KERN_ERR "Failed to create bus for PCI domain %04x\n", hose->global_number); @@ -540,7 +539,7 @@ void __devinit scan_phb(struct pci_controller *hose) } mode = PCI_PROBE_NORMAL; -#ifdef CONFIG_PPC_MULTIPLATFORM + if (node && ppc_md.pci_probe_mode) mode = ppc_md.pci_probe_mode(bus); DBG(" probe mode: %d\n", mode); @@ -548,7 +547,7 @@ void __devinit scan_phb(struct pci_controller *hose) bus->subordinate = hose->last_busno; of_scan_bus(node, bus); } -#endif /* CONFIG_PPC_MULTIPLATFORM */ + if (mode == PCI_PROBE_NORMAL) hose->last_busno = bus->subordinate = pci_scan_child_bus(bus); } @@ -592,11 +591,9 @@ static int __init pcibios_init(void) if (ppc64_isabridge_dev != NULL) printk(KERN_DEBUG "ISA bridge at %s\n", pci_name(ppc64_isabridge_dev)); -#ifdef CONFIG_PPC_MULTIPLATFORM if (!firmware_has_feature(FW_FEATURE_ISERIES)) /* map in PCI I/O space */ phbs_remap_io(); -#endif printk(KERN_DEBUG "PCI: Probing PCI hardware done\n"); @@ -873,8 +870,6 @@ void pcibios_add_platform_entries(struct pci_dev *pdev) device_create_file(&pdev->dev, &dev_attr_devspec); } -#ifdef CONFIG_PPC_MULTIPLATFORM - #define ISA_SPACE_MASK 0x1 #define ISA_SPACE_IO 0x1 @@ -975,11 +970,7 @@ void __devinit pci_process_bridge_OF_ranges(struct pci_controller *hose, res = NULL; pci_space = ranges[0]; pci_addr = ((unsigned long)ranges[1] << 32) | ranges[2]; - - cpu_phys_addr = ranges[3]; - if (na >= 2) - cpu_phys_addr = (cpu_phys_addr << 32) | ranges[4]; - + cpu_phys_addr = of_translate_address(dev, &ranges[3]); size = ((unsigned long)ranges[na+3] << 32) | ranges[na+4]; ranges += np; if (size == 0) @@ -1145,7 +1136,7 @@ int unmap_bus_range(struct pci_bus *bus) if (get_bus_io_range(bus, &start_phys, &start_virt, &size)) return 1; - if (iounmap_explicit((void __iomem *) start_virt, size)) + if (__iounmap_explicit((void __iomem *) start_virt, size)) return 1; return 0; @@ -1213,23 +1204,52 @@ void __devinit pcibios_fixup_device_resources(struct pci_dev *dev, } EXPORT_SYMBOL(pcibios_fixup_device_resources); +void __devinit pcibios_setup_new_device(struct pci_dev *dev) +{ + struct dev_archdata *sd = &dev->dev.archdata; + + sd->of_node = pci_device_to_OF_node(dev); + + DBG("PCI device %s OF node: %s\n", pci_name(dev), + sd->of_node ? sd->of_node->full_name : "<none>"); + + sd->dma_ops = pci_dma_ops; +#ifdef CONFIG_NUMA + sd->numa_node = pcibus_to_node(dev->bus); +#else + sd->numa_node = -1; +#endif + if (ppc_md.pci_dma_dev_setup) + ppc_md.pci_dma_dev_setup(dev); +} +EXPORT_SYMBOL(pcibios_setup_new_device); static void __devinit do_bus_setup(struct pci_bus *bus) { struct pci_dev *dev; - ppc_md.iommu_bus_setup(bus); + if (ppc_md.pci_dma_bus_setup) + ppc_md.pci_dma_bus_setup(bus); list_for_each_entry(dev, &bus->devices, bus_list) - ppc_md.iommu_dev_setup(dev); + pcibios_setup_new_device(dev); - if (ppc_md.irq_bus_setup) - ppc_md.irq_bus_setup(bus); + /* Read default IRQs and fixup if necessary */ + list_for_each_entry(dev, &bus->devices, bus_list) { + pci_read_irq_line(dev); + if (ppc_md.pci_irq_fixup) + ppc_md.pci_irq_fixup(dev); + } } void __devinit pcibios_fixup_bus(struct pci_bus *bus) { struct pci_dev *dev = bus->self; + struct device_node *np; + + np = pci_bus_to_OF_node(bus); + + DBG("pcibios_fixup_bus(%s)\n", np ? np->full_name : "<???>"); if (dev && pci_probe_only && (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) { @@ -1343,8 +1363,6 @@ struct pci_controller* pci_find_hose_for_OF_device(struct device_node* node) return NULL; } -#endif /* CONFIG_PPC_MULTIPLATFORM */ - unsigned long pci_address_to_pio(phys_addr_t address) { struct pci_controller *hose, *tmp; diff --git a/arch/powerpc/kernel/pci_direct_iommu.c b/arch/powerpc/kernel/pci_direct_iommu.c deleted file mode 100644 index 72ce082ce73..00000000000 --- a/arch/powerpc/kernel/pci_direct_iommu.c +++ /dev/null @@ -1,98 +0,0 @@ -/* - * Support for DMA from PCI devices to main memory on - * machines without an iommu or with directly addressable - * RAM (typically a pmac with 2Gb of RAM or less) - * - * Copyright (C) 2003 Benjamin Herrenschmidt (benh@kernel.crashing.org) - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#include <linux/kernel.h> -#include <linux/pci.h> -#include <linux/delay.h> -#include <linux/string.h> -#include <linux/init.h> -#include <linux/bootmem.h> -#include <linux/mm.h> -#include <linux/dma-mapping.h> - -#include <asm/sections.h> -#include <asm/io.h> -#include <asm/prom.h> -#include <asm/pci-bridge.h> -#include <asm/machdep.h> -#include <asm/pmac_feature.h> -#include <asm/abs_addr.h> -#include <asm/ppc-pci.h> - -static void *pci_direct_alloc_coherent(struct device *hwdev, size_t size, - dma_addr_t *dma_handle, gfp_t flag) -{ - void *ret; - - ret = (void *)__get_free_pages(flag, get_order(size)); - if (ret != NULL) { - memset(ret, 0, size); - *dma_handle = virt_to_abs(ret); - } - return ret; -} - -static void pci_direct_free_coherent(struct device *hwdev, size_t size, - void *vaddr, dma_addr_t dma_handle) -{ - free_pages((unsigned long)vaddr, get_order(size)); -} - -static dma_addr_t pci_direct_map_single(struct device *hwdev, void *ptr, - size_t size, enum dma_data_direction direction) -{ - return virt_to_abs(ptr); -} - -static void pci_direct_unmap_single(struct device *hwdev, dma_addr_t dma_addr, - size_t size, enum dma_data_direction direction) -{ -} - -static int pci_direct_map_sg(struct device *hwdev, struct scatterlist *sg, - int nents, enum dma_data_direction direction) -{ - int i; - - for (i = 0; i < nents; i++, sg++) { - sg->dma_address = page_to_phys(sg->page) + sg->offset; - sg->dma_length = sg->length; - } - - return nents; -} - -static void pci_direct_unmap_sg(struct device *hwdev, struct scatterlist *sg, - int nents, enum dma_data_direction direction) -{ -} - -static int pci_direct_dma_supported(struct device *dev, u64 mask) -{ - return mask < 0x100000000ull; -} - -static struct dma_mapping_ops pci_direct_ops = { - .alloc_coherent = pci_direct_alloc_coherent, - .free_coherent = pci_direct_free_coherent, - .map_single = pci_direct_map_single, - .unmap_single = pci_direct_unmap_single, - .map_sg = pci_direct_map_sg, - .unmap_sg = pci_direct_unmap_sg, - .dma_supported = pci_direct_dma_supported, -}; - -void __init pci_direct_iommu_init(void) -{ - pci_dma_ops = pci_direct_ops; -} diff --git a/arch/powerpc/kernel/pci_iommu.c b/arch/powerpc/kernel/pci_iommu.c deleted file mode 100644 index 0688b2534ac..00000000000 --- a/arch/powerpc/kernel/pci_iommu.c +++ /dev/null @@ -1,164 +0,0 @@ -/* - * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation - * - * Rewrite, cleanup, new allocation schemes: - * Copyright (C) 2004 Olof Johansson, IBM Corporation - * - * Dynamic DMA mapping support, platform-independent parts. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - - -#include <linux/init.h> -#include <linux/types.h> -#include <linux/slab.h> -#include <linux/mm.h> -#include <linux/spinlock.h> -#include <linux/string.h> -#include <linux/pci.h> -#include <linux/dma-mapping.h> -#include <asm/io.h> -#include <asm/prom.h> -#include <asm/iommu.h> -#include <asm/pci-bridge.h> -#include <asm/machdep.h> -#include <asm/ppc-pci.h> - -/* - * We can use ->sysdata directly and avoid the extra work in - * pci_device_to_OF_node since ->sysdata will have been initialised - * in the iommu init code for all devices. - */ -#define PCI_GET_DN(dev) ((struct device_node *)((dev)->sysdata)) - -static inline struct iommu_table *device_to_table(struct device *hwdev) -{ - struct pci_dev *pdev; - - if (!hwdev) { - pdev = ppc64_isabridge_dev; - if (!pdev) - return NULL; - } else - pdev = to_pci_dev(hwdev); - - return PCI_DN(PCI_GET_DN(pdev))->iommu_table; -} - - -static inline unsigned long device_to_mask(struct device *hwdev) -{ - struct pci_dev *pdev; - - if (!hwdev) { - pdev = ppc64_isabridge_dev; - if (!pdev) /* This is the best guess we can do */ - return 0xfffffffful; - } else - pdev = to_pci_dev(hwdev); - - if (pdev->dma_mask) - return pdev->dma_mask; - - /* Assume devices without mask can take 32 bit addresses */ - return 0xfffffffful; -} - - -/* Allocates a contiguous real buffer and creates mappings over it. - * Returns the virtual address of the buffer and sets dma_handle - * to the dma address (mapping) of the first page. - */ -static void *pci_iommu_alloc_coherent(struct device *hwdev, size_t size, - dma_addr_t *dma_handle, gfp_t flag) -{ - return iommu_alloc_coherent(device_to_table(hwdev), size, dma_handle, - device_to_mask(hwdev), flag, - pcibus_to_node(to_pci_dev(hwdev)->bus)); -} - -static void pci_iommu_free_coherent(struct device *hwdev, size_t size, - void *vaddr, dma_addr_t dma_handle) -{ - iommu_free_coherent(device_to_table(hwdev), size, vaddr, dma_handle); -} - -/* Creates TCEs for a user provided buffer. The user buffer must be - * contiguous real kernel storage (not vmalloc). The address of the buffer - * passed here is the kernel (virtual) address of the buffer. The buffer - * need not be page aligned, the dma_addr_t returned will point to the same - * byte within the page as vaddr. - */ -static dma_addr_t pci_iommu_map_single(struct device *hwdev, void *vaddr, - size_t size, enum dma_data_direction direction) -{ - return iommu_map_single(device_to_table(hwdev), vaddr, size, - device_to_mask(hwdev), direction); -} - - -static void pci_iommu_unmap_single(struct device *hwdev, dma_addr_t dma_handle, - size_t size, enum dma_data_direction direction) -{ - iommu_unmap_single(device_to_table(hwdev), dma_handle, size, direction); -} - - -static int pci_iommu_map_sg(struct device *pdev, struct scatterlist *sglist, - int nelems, enum dma_data_direction direction) -{ - return iommu_map_sg(pdev, device_to_table(pdev), sglist, - nelems, device_to_mask(pdev), direction); -} - -static void pci_iommu_unmap_sg(struct device *pdev, struct scatterlist *sglist, - int nelems, enum dma_data_direction direction) -{ - iommu_unmap_sg(device_to_table(pdev), sglist, nelems, direction); -} - -/* We support DMA to/from any memory page via the iommu */ -static int pci_iommu_dma_supported(struct device *dev, u64 mask) -{ - struct iommu_table *tbl = device_to_table(dev); - - if (!tbl || tbl->it_offset > mask) { - printk(KERN_INFO "Warning: IOMMU table offset too big for device mask\n"); - if (tbl) - printk(KERN_INFO "mask: 0x%08lx, table offset: 0x%08lx\n", - mask, tbl->it_offset); - else - printk(KERN_INFO "mask: 0x%08lx, table unavailable\n", - mask); - return 0; - } else - return 1; -} - -struct dma_mapping_ops pci_iommu_ops = { - .alloc_coherent = pci_iommu_alloc_coherent, - .free_coherent = pci_iommu_free_coherent, - .map_single = pci_iommu_map_single, - .unmap_single = pci_iommu_unmap_single, - .map_sg = pci_iommu_map_sg, - .unmap_sg = pci_iommu_unmap_sg, - .dma_supported = pci_iommu_dma_supported, -}; - -void pci_iommu_init(void) -{ - pci_dma_ops = pci_iommu_ops; -} diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c index 807193a3c78..9179f0739ea 100644 --- a/arch/powerpc/kernel/ppc_ksyms.c +++ b/arch/powerpc/kernel/ppc_ksyms.c @@ -49,6 +49,10 @@ #include <asm/commproc.h> #endif +#ifdef CONFIG_PPC64 +EXPORT_SYMBOL(local_irq_restore); +#endif + #ifdef CONFIG_PPC32 extern void transfer_to_handler(void); extern void do_IRQ(struct pt_regs *regs); diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index bdb412d4b74..c18dbe77fdc 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -538,35 +538,31 @@ static struct ibm_pa_feature { {CPU_FTR_REAL_LE, PPC_FEATURE_TRUE_LE, 5, 0, 0}, }; -static void __init check_cpu_pa_features(unsigned long node) +static void __init scan_features(unsigned long node, unsigned char *ftrs, + unsigned long tablelen, + struct ibm_pa_feature *fp, + unsigned long ft_size) { - unsigned char *pa_ftrs; - unsigned long len, tablelen, i, bit; - - pa_ftrs = of_get_flat_dt_prop(node, "ibm,pa-features", &tablelen); - if (pa_ftrs == NULL) - return; + unsigned long i, len, bit; /* find descriptor with type == 0 */ for (;;) { if (tablelen < 3) return; - len = 2 + pa_ftrs[0]; + len = 2 + ftrs[0]; if (tablelen < len) return; /* descriptor 0 not found */ - if (pa_ftrs[1] == 0) + if (ftrs[1] == 0) break; tablelen -= len; - pa_ftrs += len; + ftrs += len; } /* loop over bits we know about */ - for (i = 0; i < ARRAY_SIZE(ibm_pa_features); ++i) { - struct ibm_pa_feature *fp = &ibm_pa_features[i]; - - if (fp->pabyte >= pa_ftrs[0]) + for (i = 0; i < ft_size; ++i, ++fp) { + if (fp->pabyte >= ftrs[0]) continue; - bit = (pa_ftrs[2 + fp->pabyte] >> (7 - fp->pabit)) & 1; + bit = (ftrs[2 + fp->pabyte] >> (7 - fp->pabit)) & 1; if (bit ^ fp->invert) { cur_cpu_spec->cpu_features |= fp->cpu_features; cur_cpu_spec->cpu_user_features |= fp->cpu_user_ftrs; @@ -577,16 +573,59 @@ static void __init check_cpu_pa_features(unsigned long node) } } +static void __init check_cpu_pa_features(unsigned long node) +{ + unsigned char *pa_ftrs; + unsigned long tablelen; + + pa_ftrs = of_get_flat_dt_prop(node, "ibm,pa-features", &tablelen); + if (pa_ftrs == NULL) + return; + + scan_features(node, pa_ftrs, tablelen, + ibm_pa_features, ARRAY_SIZE(ibm_pa_features)); +} + +static struct feature_property { + const char *name; + u32 min_value; + unsigned long cpu_feature; + unsigned long cpu_user_ftr; +} feature_properties[] __initdata = { +#ifdef CONFIG_ALTIVEC + {"altivec", 0, CPU_FTR_ALTIVEC, PPC_FEATURE_HAS_ALTIVEC}, + {"ibm,vmx", 1, CPU_FTR_ALTIVEC, PPC_FEATURE_HAS_ALTIVEC}, +#endif /* CONFIG_ALTIVEC */ +#ifdef CONFIG_PPC64 + {"ibm,dfp", 1, 0, PPC_FEATURE_HAS_DFP}, + {"ibm,purr", 1, CPU_FTR_PURR, 0}, + {"ibm,spurr", 1, CPU_FTR_SPURR, 0}, +#endif /* CONFIG_PPC64 */ +}; + +static void __init check_cpu_feature_properties(unsigned long node) +{ + unsigned long i; + struct feature_property *fp = feature_properties; + const u32 *prop; + + for (i = 0; i < ARRAY_SIZE(feature_properties); ++i, ++fp) { + prop = of_get_flat_dt_prop(node, fp->name, NULL); + if (prop && *prop >= fp->min_value) { + cur_cpu_spec->cpu_features |= fp->cpu_feature; + cur_cpu_spec->cpu_user_features |= fp->cpu_user_ftr; + } + } +} + static int __init early_init_dt_scan_cpus(unsigned long node, const char *uname, int depth, void *data) { static int logical_cpuid = 0; char *type = of_get_flat_dt_prop(node, "device_type", NULL); -#ifdef CONFIG_ALTIVEC - u32 *prop; -#endif - u32 *intserv; + const u32 *prop; + const u32 *intserv; int i, nthreads; unsigned long len; int found = 0; @@ -643,24 +682,27 @@ static int __init early_init_dt_scan_cpus(unsigned long node, intserv[i]); boot_cpuid = logical_cpuid; set_hard_smp_processor_id(boot_cpuid, intserv[i]); - } -#ifdef CONFIG_ALTIVEC - /* Check if we have a VMX and eventually update CPU features */ - prop = (u32 *)of_get_flat_dt_prop(node, "ibm,vmx", NULL); - if (prop && (*prop) > 0) { - cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC; - cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC; - } - - /* Same goes for Apple's "altivec" property */ - prop = (u32 *)of_get_flat_dt_prop(node, "altivec", NULL); - if (prop) { - cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC; - cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC; + /* + * PAPR defines "logical" PVR values for cpus that + * meet various levels of the architecture: + * 0x0f000001 Architecture version 2.04 + * 0x0f000002 Architecture version 2.05 + * If the cpu-version property in the cpu node contains + * such a value, we call identify_cpu again with the + * logical PVR value in order to use the cpu feature + * bits appropriate for the architecture level. + * + * A POWER6 partition in "POWER6 architected" mode + * uses the 0x0f000002 PVR value; in POWER5+ mode + * it uses 0x0f000001. + */ + prop = of_get_flat_dt_prop(node, "cpu-version", NULL); + if (prop && (*prop & 0xff000000) == 0x0f000000) + identify_cpu(0, *prop); } -#endif /* CONFIG_ALTIVEC */ + check_cpu_feature_properties(node); check_cpu_pa_features(node); #ifdef CONFIG_PPC_PSERIES @@ -1674,6 +1716,7 @@ struct device_node *of_get_cpu_node(int cpu, unsigned int *thread) } return NULL; } +EXPORT_SYMBOL(of_get_cpu_node); #ifdef DEBUG static struct debugfs_blob_wrapper flat_dt_blob; diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index b91761639d9..46cf32670dd 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c @@ -173,8 +173,8 @@ static unsigned long __initdata dt_string_start, dt_string_end; static unsigned long __initdata prom_initrd_start, prom_initrd_end; #ifdef CONFIG_PPC64 -static int __initdata iommu_force_on; -static int __initdata ppc64_iommu_off; +static int __initdata prom_iommu_force_on; +static int __initdata prom_iommu_off; static unsigned long __initdata prom_tce_alloc_start; static unsigned long __initdata prom_tce_alloc_end; #endif @@ -582,9 +582,9 @@ static void __init early_cmdline_parse(void) while (*opt && *opt == ' ') opt++; if (!strncmp(opt, RELOC("off"), 3)) - RELOC(ppc64_iommu_off) = 1; + RELOC(prom_iommu_off) = 1; else if (!strncmp(opt, RELOC("force"), 5)) - RELOC(iommu_force_on) = 1; + RELOC(prom_iommu_force_on) = 1; } #endif } @@ -627,6 +627,7 @@ static void __init early_cmdline_parse(void) /* Option vector 3: processor options supported */ #define OV3_FP 0x80 /* floating point */ #define OV3_VMX 0x40 /* VMX/Altivec */ +#define OV3_DFP 0x20 /* decimal FP */ /* Option vector 5: PAPR/OF options supported */ #define OV5_LPAR 0x80 /* logical partitioning supported */ @@ -642,6 +643,7 @@ static void __init early_cmdline_parse(void) static unsigned char ibm_architecture_vec[] = { W(0xfffe0000), W(0x003a0000), /* POWER5/POWER5+ */ W(0xffff0000), W(0x003e0000), /* POWER6 */ + W(0xffffffff), W(0x0f000002), /* all 2.05-compliant */ W(0xfffffffe), W(0x0f000001), /* all 2.04-compliant and earlier */ 5 - 1, /* 5 option vectors */ @@ -668,7 +670,7 @@ static unsigned char ibm_architecture_vec[] = { /* option vector 3: processor options supported */ 3 - 2, /* length */ 0, /* don't ignore, don't halt */ - OV3_FP | OV3_VMX, + OV3_FP | OV3_VMX | OV3_DFP, /* option vector 4: IBM PAPR implementation */ 2 - 2, /* length */ @@ -1167,7 +1169,7 @@ static void __init prom_initialize_tce_table(void) u64 local_alloc_top, local_alloc_bottom; u64 i; - if (RELOC(ppc64_iommu_off)) + if (RELOC(prom_iommu_off)) return; prom_debug("starting prom_initialize_tce_table\n"); @@ -2283,11 +2285,11 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4, * Fill in some infos for use by the kernel later on */ #ifdef CONFIG_PPC64 - if (RELOC(ppc64_iommu_off)) + if (RELOC(prom_iommu_off)) prom_setprop(_prom->chosen, "/chosen", "linux,iommu-off", NULL, 0); - if (RELOC(iommu_force_on)) + if (RELOC(prom_iommu_force_on)) prom_setprop(_prom->chosen, "/chosen", "linux,iommu-force-on", NULL, 0); diff --git a/arch/powerpc/kernel/prom_parse.c b/arch/powerpc/kernel/prom_parse.c index 603dff3ad62..0dfbe1cd28e 100644 --- a/arch/powerpc/kernel/prom_parse.c +++ b/arch/powerpc/kernel/prom_parse.c @@ -25,6 +25,12 @@ #define OF_CHECK_COUNTS(na, ns) ((na) > 0 && (na) <= OF_MAX_ADDR_CELLS && \ (ns) > 0) +static struct of_bus *of_match_bus(struct device_node *np); +static int __of_address_to_resource(struct device_node *dev, + const u32 *addrp, u64 size, unsigned int flags, + struct resource *r); + + /* Debug utility */ #ifdef DEBUG static void of_dump_addr(const char *s, const u32 *addr, int na) @@ -101,6 +107,7 @@ static unsigned int of_bus_default_get_flags(const u32 *addr) } +#ifdef CONFIG_PCI /* * PCI bus specific translator */ @@ -153,15 +160,156 @@ static unsigned int of_bus_pci_get_flags(const u32 *addr) switch((w >> 24) & 0x03) { case 0x01: flags |= IORESOURCE_IO; + break; case 0x02: /* 32 bits */ case 0x03: /* 64 bits */ flags |= IORESOURCE_MEM; + break; } if (w & 0x40000000) flags |= IORESOURCE_PREFETCH; return flags; } +const u32 *of_get_pci_address(struct device_node *dev, int bar_no, u64 *size, + unsigned int *flags) +{ + const u32 *prop; + unsigned int psize; + struct device_node *parent; + struct of_bus *bus; + int onesize, i, na, ns; + + /* Get parent & match bus type */ + parent = of_get_parent(dev); + if (parent == NULL) + return NULL; + bus = of_match_bus(parent); + if (strcmp(bus->name, "pci")) { + of_node_put(parent); + return NULL; + } + bus->count_cells(dev, &na, &ns); + of_node_put(parent); + if (!OF_CHECK_COUNTS(na, ns)) + return NULL; + + /* Get "reg" or "assigned-addresses" property */ + prop = get_property(dev, bus->addresses, &psize); + if (prop == NULL) + return NULL; + psize /= 4; + + onesize = na + ns; + for (i = 0; psize >= onesize; psize -= onesize, prop += onesize, i++) + if ((prop[0] & 0xff) == ((bar_no * 4) + PCI_BASE_ADDRESS_0)) { + if (size) + *size = of_read_number(prop + na, ns); + if (flags) + *flags = bus->get_flags(prop); + return prop; + } + return NULL; +} +EXPORT_SYMBOL(of_get_pci_address); + +int of_pci_address_to_resource(struct device_node *dev, int bar, + struct resource *r) +{ + const u32 *addrp; + u64 size; + unsigned int flags; + + addrp = of_get_pci_address(dev, bar, &size, &flags); + if (addrp == NULL) + return -EINVAL; + return __of_address_to_resource(dev, addrp, size, flags, r); +} +EXPORT_SYMBOL_GPL(of_pci_address_to_resource); + +static u8 of_irq_pci_swizzle(u8 slot, u8 pin) +{ + return (((pin - 1) + slot) % 4) + 1; +} + +int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq) +{ + struct device_node *dn, *ppnode; + struct pci_dev *ppdev; + u32 lspec; + u32 laddr[3]; + u8 pin; + int rc; + + /* Check if we have a device node, if yes, fallback to standard OF + * parsing + */ + dn = pci_device_to_OF_node(pdev); + if (dn) + return of_irq_map_one(dn, 0, out_irq); + + /* Ok, we don't, time to have fun. Let's start by building up an + * interrupt spec. we assume #interrupt-cells is 1, which is standard + * for PCI. If you do different, then don't use that routine. + */ + rc = pci_read_config_byte(pdev, PCI_INTERRUPT_PIN, &pin); + if (rc != 0) + return rc; + /* No pin, exit */ + if (pin == 0) + return -ENODEV; + + /* Now we walk up the PCI tree */ + lspec = pin; + for (;;) { + /* Get the pci_dev of our parent */ + ppdev = pdev->bus->self; + + /* Ouch, it's a host bridge... */ + if (ppdev == NULL) { +#ifdef CONFIG_PPC64 + ppnode = pci_bus_to_OF_node(pdev->bus); +#else + struct pci_controller *host; + host = pci_bus_to_host(pdev->bus); + ppnode = host ? host->arch_data : NULL; +#endif + /* No node for host bridge ? give up */ + if (ppnode == NULL) + return -EINVAL; + } else + /* We found a P2P bridge, check if it has a node */ + ppnode = pci_device_to_OF_node(ppdev); + + /* Ok, we have found a parent with a device-node, hand over to + * the OF parsing code. + * We build a unit address from the linux device to be used for + * resolution. Note that we use the linux bus number which may + * not match your firmware bus numbering. + * Fortunately, in most cases, interrupt-map-mask doesn't include + * the bus number as part of the matching. + * You should still be careful about that though if you intend + * to rely on this function (you ship a firmware that doesn't + * create device nodes for all PCI devices). + */ + if (ppnode) + break; + + /* We can only get here if we hit a P2P bridge with no node, + * let's do standard swizzling and try again + */ + lspec = of_irq_pci_swizzle(PCI_SLOT(pdev->devfn), lspec); + pdev = ppdev; + } + + laddr[0] = (pdev->bus->number << 16) + | (pdev->devfn << 8); + laddr[1] = laddr[2] = 0; + return of_irq_map_raw(ppnode, &lspec, 1, laddr, out_irq); +} +EXPORT_SYMBOL_GPL(of_irq_map_pci); +#endif /* CONFIG_PCI */ + /* * ISA bus specific translator */ @@ -223,6 +371,7 @@ static unsigned int of_bus_isa_get_flags(const u32 *addr) */ static struct of_bus of_busses[] = { +#ifdef CONFIG_PCI /* PCI */ { .name = "pci", @@ -233,6 +382,7 @@ static struct of_bus of_busses[] = { .translate = of_bus_pci_translate, .get_flags = of_bus_pci_get_flags, }, +#endif /* CONFIG_PCI */ /* ISA */ { .name = "isa", @@ -445,48 +595,6 @@ const u32 *of_get_address(struct device_node *dev, int index, u64 *size, } EXPORT_SYMBOL(of_get_address); -const u32 *of_get_pci_address(struct device_node *dev, int bar_no, u64 *size, - unsigned int *flags) -{ - const u32 *prop; - unsigned int psize; - struct device_node *parent; - struct of_bus *bus; - int onesize, i, na, ns; - - /* Get parent & match bus type */ - parent = of_get_parent(dev); - if (parent == NULL) - return NULL; - bus = of_match_bus(parent); - if (strcmp(bus->name, "pci")) { - of_node_put(parent); - return NULL; - } - bus->count_cells(dev, &na, &ns); - of_node_put(parent); - if (!OF_CHECK_COUNTS(na, ns)) - return NULL; - - /* Get "reg" or "assigned-addresses" property */ - prop = get_property(dev, bus->addresses, &psize); - if (prop == NULL) - return NULL; - psize /= 4; - - onesize = na + ns; - for (i = 0; psize >= onesize; psize -= onesize, prop += onesize, i++) - if ((prop[0] & 0xff) == ((bar_no * 4) + PCI_BASE_ADDRESS_0)) { - if (size) - *size = of_read_number(prop + na, ns); - if (flags) - *flags = bus->get_flags(prop); - return prop; - } - return NULL; -} -EXPORT_SYMBOL(of_get_pci_address); - static int __of_address_to_resource(struct device_node *dev, const u32 *addrp, u64 size, unsigned int flags, struct resource *r) @@ -529,20 +637,6 @@ int of_address_to_resource(struct device_node *dev, int index, } EXPORT_SYMBOL_GPL(of_address_to_resource); -int of_pci_address_to_resource(struct device_node *dev, int bar, - struct resource *r) -{ - const u32 *addrp; - u64 size; - unsigned int flags; - - addrp = of_get_pci_address(dev, bar, &size, &flags); - if (addrp == NULL) - return -EINVAL; - return __of_address_to_resource(dev, addrp, size, flags, r); -} -EXPORT_SYMBOL_GPL(of_pci_address_to_resource); - void of_parse_dma_window(struct device_node *dn, const void *dma_window_prop, unsigned long *busno, unsigned long *phys, unsigned long *size) { @@ -898,87 +992,3 @@ int of_irq_map_one(struct device_node *device, int index, struct of_irq *out_irq return res; } EXPORT_SYMBOL_GPL(of_irq_map_one); - -#ifdef CONFIG_PCI -static u8 of_irq_pci_swizzle(u8 slot, u8 pin) -{ - return (((pin - 1) + slot) % 4) + 1; -} - -int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq) -{ - struct device_node *dn, *ppnode; - struct pci_dev *ppdev; - u32 lspec; - u32 laddr[3]; - u8 pin; - int rc; - - /* Check if we have a device node, if yes, fallback to standard OF - * parsing - */ - dn = pci_device_to_OF_node(pdev); - if (dn) - return of_irq_map_one(dn, 0, out_irq); - - /* Ok, we don't, time to have fun. Let's start by building up an - * interrupt spec. we assume #interrupt-cells is 1, which is standard - * for PCI. If you do different, then don't use that routine. - */ - rc = pci_read_config_byte(pdev, PCI_INTERRUPT_PIN, &pin); - if (rc != 0) - return rc; - /* No pin, exit */ - if (pin == 0) - return -ENODEV; - - /* Now we walk up the PCI tree */ - lspec = pin; - for (;;) { - /* Get the pci_dev of our parent */ - ppdev = pdev->bus->self; - - /* Ouch, it's a host bridge... */ - if (ppdev == NULL) { -#ifdef CONFIG_PPC64 - ppnode = pci_bus_to_OF_node(pdev->bus); -#else - struct pci_controller *host; - host = pci_bus_to_host(pdev->bus); - ppnode = host ? host->arch_data : NULL; -#endif - /* No node for host bridge ? give up */ - if (ppnode == NULL) - return -EINVAL; - } else - /* We found a P2P bridge, check if it has a node */ - ppnode = pci_device_to_OF_node(ppdev); - - /* Ok, we have found a parent with a device-node, hand over to - * the OF parsing code. - * We build a unit address from the linux device to be used for - * resolution. Note that we use the linux bus number which may - * not match your firmware bus numbering. - * Fortunately, in most cases, interrupt-map-mask doesn't include - * the bus number as part of the matching. - * You should still be careful about that though if you intend - * to rely on this function (you ship a firmware that doesn't - * create device nodes for all PCI devices). - */ - if (ppnode) - break; - - /* We can only get here if we hit a P2P bridge with no node, - * let's do standard swizzling and try again - */ - lspec = of_irq_pci_swizzle(PCI_SLOT(pdev->devfn), lspec); - pdev = ppdev; - } - - laddr[0] = (pdev->bus->number << 16) - | (pdev->devfn << 8); - laddr[1] = laddr[2] = 0; - return of_irq_map_raw(ppnode, &lspec, 1, laddr, out_irq); -} -EXPORT_SYMBOL_GPL(of_irq_map_pci); -#endif /* CONFIG_PCI */ diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c index 6ef80d4e38d..387ed0d9ad6 100644 --- a/arch/powerpc/kernel/rtas.c +++ b/arch/powerpc/kernel/rtas.c @@ -810,9 +810,9 @@ asmlinkage int ppc_rtas(struct rtas_args __user *uargs) return 0; } +#ifdef CONFIG_HOTPLUG_CPU /* This version can't take the spinlock, because it never returns */ - -struct rtas_args rtas_stop_self_args = { +static struct rtas_args rtas_stop_self_args = { /* The token is initialized for real in setup_system() */ .token = RTAS_UNKNOWN_SERVICE, .nargs = 0, @@ -834,6 +834,7 @@ void rtas_stop_self(void) panic("Alas, I survived.\n"); } +#endif /* * Call early during boot, before mem init or bootmem, to retrieve the RTAS diff --git a/arch/powerpc/kernel/rtas_flash.c b/arch/powerpc/kernel/rtas_flash.c index 6f6fc977cb3..b9561d30051 100644 --- a/arch/powerpc/kernel/rtas_flash.c +++ b/arch/powerpc/kernel/rtas_flash.c @@ -681,14 +681,12 @@ static int initialize_flash_pde_data(const char *rtas_call_name, int *status; int token; - dp->data = kmalloc(buf_size, GFP_KERNEL); + dp->data = kzalloc(buf_size, GFP_KERNEL); if (dp->data == NULL) { remove_flash_pde(dp); return -ENOMEM; } - memset(dp->data, 0, buf_size); - /* * This code assumes that the status int is the first member of the * struct diff --git a/arch/powerpc/kernel/rtas_pci.c b/arch/powerpc/kernel/rtas_pci.c index b4a0de79c06..ace9f4c86e6 100644 --- a/arch/powerpc/kernel/rtas_pci.c +++ b/arch/powerpc/kernel/rtas_pci.c @@ -38,6 +38,7 @@ #include <asm/rtas.h> #include <asm/mpic.h> #include <asm/ppc-pci.h> +#include <asm/eeh.h> /* RTAS tokens */ static int read_pci_config; @@ -231,32 +232,13 @@ void __init init_pci_config_tokens (void) unsigned long __devinit get_phb_buid (struct device_node *phb) { - int addr_cells; - const unsigned int *buid_vals; - unsigned int len; - unsigned long buid; - - if (ibm_read_pci_config == -1) return 0; + struct resource r; - /* PHB's will always be children of the root node, - * or so it is promised by the current firmware. */ - if (phb->parent == NULL) + if (ibm_read_pci_config == -1) return 0; - if (phb->parent->parent) - return 0; - - buid_vals = get_property(phb, "reg", &len); - if (buid_vals == NULL) + if (of_address_to_resource(phb, 0, &r)) return 0; - - addr_cells = prom_n_addr_cells(phb); - if (addr_cells == 1) { - buid = (unsigned long) buid_vals[0]; - } else { - buid = (((unsigned long)buid_vals[0]) << 32UL) | - (((unsigned long)buid_vals[1]) & 0xffffffff); - } - return buid; + return r.start; } static int phb_set_bus_ranges(struct device_node *dev, @@ -276,8 +258,10 @@ static int phb_set_bus_ranges(struct device_node *dev, return 0; } -int __devinit setup_phb(struct device_node *dev, struct pci_controller *phb) +int __devinit rtas_setup_phb(struct pci_controller *phb) { + struct device_node *dev = phb->arch_data; + if (is_python(dev)) python_countermeasures(dev); @@ -309,7 +293,7 @@ unsigned long __init find_and_init_phbs(void) phb = pcibios_alloc_controller(node); if (!phb) continue; - setup_phb(node, phb); + rtas_setup_phb(phb); pci_process_bridge_OF_ranges(phb, node, 0); pci_setup_phb_io(phb, index == 0); index++; @@ -381,7 +365,6 @@ int pcibios_remove_root_bus(struct pci_controller *phb) } } - list_del(&phb->list_node); pcibios_free_controller(phb); return 0; diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c index a4c2964a3ca..61c65d19ef0 100644 --- a/arch/powerpc/kernel/setup_32.c +++ b/arch/powerpc/kernel/setup_32.c @@ -63,10 +63,6 @@ unsigned int DMA_MODE_WRITE; int have_of = 1; -#ifdef CONFIG_PPC_MULTIPLATFORM -dev_t boot_dev; -#endif /* CONFIG_PPC_MULTIPLATFORM */ - #ifdef CONFIG_VGA_CONSOLE unsigned long vgacon_remap_base; #endif @@ -101,7 +97,7 @@ unsigned long __init early_init(unsigned long dt_ptr) * Identify the CPU type and fix up code sections * that depend on which cpu we have. */ - spec = identify_cpu(offset); + spec = identify_cpu(offset, mfspr(SPRN_PVR)); do_feature_fixups(spec->cpu_features, PTRRELOC(&__start___ftr_fixup), diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 16278968dab..3733de30e84 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -33,6 +33,7 @@ #include <linux/serial.h> #include <linux/serial_8250.h> #include <linux/bootmem.h> +#include <linux/pci.h> #include <asm/io.h> #include <asm/kdump.h> #include <asm/prom.h> @@ -71,7 +72,6 @@ int have_of = 1; int boot_cpuid = 0; -dev_t boot_dev; u64 ppc64_pft_size; /* Pick defaults since we might want to patch instructions @@ -171,7 +171,7 @@ void __init setup_paca(int cpu) void __init early_setup(unsigned long dt_ptr) { /* Identify CPU type */ - identify_cpu(0); + identify_cpu(0, mfspr(SPRN_PVR)); /* Assume we're on cpu 0 for now. Don't write to the paca yet! */ setup_paca(0); @@ -226,8 +226,8 @@ void early_setup_secondary(void) { struct paca_struct *lpaca = get_paca(); - /* Mark enabled in PACA */ - lpaca->proc_enabled = 0; + /* Mark interrupts enabled in PACA */ + lpaca->soft_enabled = 0; /* Initialize hash table for that CPU */ htab_initialize_secondary(); @@ -392,7 +392,8 @@ void __init setup_system(void) * setting up the hash table pointers. It also sets up some interrupt-mapping * related options that will be used by finish_device_tree() */ - ppc_md.init_early(); + if (ppc_md.init_early) + ppc_md.init_early(); /* * We can discover serial ports now since the above did setup the @@ -598,3 +599,10 @@ void __init setup_per_cpu_areas(void) } } #endif + + +#ifdef CONFIG_PPC_INDIRECT_IO +struct ppc_pci_io ppc_pci_io; +EXPORT_SYMBOL(ppc_pci_io); +#endif /* CONFIG_PPC_INDIRECT_IO */ + diff --git a/arch/powerpc/kernel/smp-tbsync.c b/arch/powerpc/kernel/smp-tbsync.c index de59c6c31a5..bc892e69b4f 100644 --- a/arch/powerpc/kernel/smp-tbsync.c +++ b/arch/powerpc/kernel/smp-tbsync.c @@ -78,7 +78,7 @@ static int __devinit start_contest(int cmd, long offset, int num) { int i, score=0; u64 tb; - long mark; + u64 mark; tbsync->cmd = cmd; @@ -116,8 +116,7 @@ void __devinit smp_generic_give_timebase(void) printk("Synchronizing timebase\n"); /* if this fails then this kernel won't work anyway... */ - tbsync = kmalloc( sizeof(*tbsync), GFP_KERNEL ); - memset( tbsync, 0, sizeof(*tbsync) ); + tbsync = kzalloc( sizeof(*tbsync), GFP_KERNEL ); mb(); running = 1; diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 35c6309bdb7..9b28c238b6c 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -65,6 +65,7 @@ cpumask_t cpu_sibling_map[NR_CPUS] = { [0 ... NR_CPUS-1] = CPU_MASK_NONE }; EXPORT_SYMBOL(cpu_online_map); EXPORT_SYMBOL(cpu_possible_map); +EXPORT_SYMBOL(cpu_sibling_map); /* SMP operations for this machine */ struct smp_ops_t *smp_ops; diff --git a/arch/powerpc/kernel/sys_ppc32.c b/arch/powerpc/kernel/sys_ppc32.c index d15c33e9595..03a2a2f30d6 100644 --- a/arch/powerpc/kernel/sys_ppc32.c +++ b/arch/powerpc/kernel/sys_ppc32.c @@ -51,6 +51,7 @@ #include <asm/time.h> #include <asm/mmu_context.h> #include <asm/ppc-pci.h> +#include <asm/syscalls.h> /* readdir & getdents */ #define NAME_OFFSET(de) ((int) ((de)->d_name - (char __user *) (de))) diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c index d45a168bdac..22123a0d541 100644 --- a/arch/powerpc/kernel/sysfs.c +++ b/arch/powerpc/kernel/sysfs.c @@ -200,10 +200,9 @@ static void register_cpu_online(unsigned int cpu) struct cpu *c = &per_cpu(cpu_devices, cpu); struct sys_device *s = &c->sysdev; -#ifndef CONFIG_PPC_ISERIES - if (cpu_has_feature(CPU_FTR_SMT)) + if (!firmware_has_feature(FW_FEATURE_ISERIES) && + cpu_has_feature(CPU_FTR_SMT)) sysdev_create_file(s, &attr_smt_snooze_delay); -#endif /* PMC stuff */ @@ -242,10 +241,9 @@ static void unregister_cpu_online(unsigned int cpu) BUG_ON(c->no_control); -#ifndef CONFIG_PPC_ISERIES - if (cpu_has_feature(CPU_FTR_SMT)) + if (!firmware_has_feature(FW_FEATURE_ISERIES) && + cpu_has_feature(CPU_FTR_SMT)) sysdev_remove_file(s, &attr_smt_snooze_delay); -#endif /* PMC stuff */ @@ -299,6 +297,72 @@ static struct notifier_block __cpuinitdata sysfs_cpu_nb = { .notifier_call = sysfs_cpu_notify, }; +static DEFINE_MUTEX(cpu_mutex); + +int cpu_add_sysdev_attr(struct sysdev_attribute *attr) +{ + int cpu; + + mutex_lock(&cpu_mutex); + + for_each_possible_cpu(cpu) { + sysdev_create_file(get_cpu_sysdev(cpu), attr); + } + + mutex_unlock(&cpu_mutex); + return 0; +} +EXPORT_SYMBOL_GPL(cpu_add_sysdev_attr); + +int cpu_add_sysdev_attr_group(struct attribute_group *attrs) +{ + int cpu; + struct sys_device *sysdev; + + mutex_lock(&cpu_mutex); + + for_each_possible_cpu(cpu) { + sysdev = get_cpu_sysdev(cpu); + sysfs_create_group(&sysdev->kobj, attrs); + } + + mutex_unlock(&cpu_mutex); + return 0; +} +EXPORT_SYMBOL_GPL(cpu_add_sysdev_attr_group); + + +void cpu_remove_sysdev_attr(struct sysdev_attribute *attr) +{ + int cpu; + + mutex_lock(&cpu_mutex); + + for_each_possible_cpu(cpu) { + sysdev_remove_file(get_cpu_sysdev(cpu), attr); + } + + mutex_unlock(&cpu_mutex); +} +EXPORT_SYMBOL_GPL(cpu_remove_sysdev_attr); + +void cpu_remove_sysdev_attr_group(struct attribute_group *attrs) +{ + int cpu; + struct sys_device *sysdev; + + mutex_lock(&cpu_mutex); + + for_each_possible_cpu(cpu) { + sysdev = get_cpu_sysdev(cpu); + sysfs_remove_group(&sysdev->kobj, attrs); + } + + mutex_unlock(&cpu_mutex); +} +EXPORT_SYMBOL_GPL(cpu_remove_sysdev_attr_group); + + /* NUMA stuff */ #ifdef CONFIG_NUMA diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 46a24de36fe..f6f0c6b07c4 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -631,7 +631,8 @@ void timer_interrupt(struct pt_regs * regs) calculate_steal_time(); #ifdef CONFIG_PPC_ISERIES - get_lppaca()->int_dword.fields.decr_int = 0; + if (firmware_has_feature(FW_FEATURE_ISERIES)) + get_lppaca()->int_dword.fields.decr_int = 0; #endif while ((ticks = tb_ticks_since(per_cpu(last_jiffy, cpu))) @@ -674,7 +675,7 @@ void timer_interrupt(struct pt_regs * regs) set_dec(next_dec); #ifdef CONFIG_PPC_ISERIES - if (hvlpevent_is_pending()) + if (firmware_has_feature(FW_FEATURE_ISERIES) && hvlpevent_is_pending()) process_hvlpevents(); #endif @@ -774,7 +775,7 @@ int do_settimeofday(struct timespec *tv) * settimeofday to perform this operation. */ #ifdef CONFIG_PPC_ISERIES - if (first_settimeofday) { + if (firmware_has_feature(FW_FEATURE_ISERIES) && first_settimeofday) { iSeries_tb_recal(); first_settimeofday = 0; } diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index c66b4771ef4..0d4e203fa7a 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -53,10 +53,6 @@ #endif #include <asm/kexec.h> -#ifdef CONFIG_PPC64 /* XXX */ -#define _IO_BASE pci_io_base -#endif - #ifdef CONFIG_DEBUGGER int (*__debugger)(struct pt_regs *regs); int (*__debugger_ipi)(struct pt_regs *regs); @@ -241,7 +237,7 @@ void system_reset_exception(struct pt_regs *regs) */ static inline int check_io_access(struct pt_regs *regs) { -#if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32) +#ifdef CONFIG_PPC32 unsigned long msr = regs->msr; const struct exception_table_entry *entry; unsigned int *nip = (unsigned int *)regs->nip; @@ -274,7 +270,7 @@ static inline int check_io_access(struct pt_regs *regs) return 1; } } -#endif /* CONFIG_PPC_PMAC && CONFIG_PPC32 */ +#endif /* CONFIG_PPC32 */ return 0; } diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c index ed007878d1b..a80f8f1d2e5 100644 --- a/arch/powerpc/kernel/vio.c +++ b/arch/powerpc/kernel/vio.c @@ -81,15 +81,15 @@ static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev) struct iommu_table *tbl; unsigned long offset, size; - dma_window = get_property(dev->dev.platform_data, - "ibm,my-dma-window", NULL); + dma_window = get_property(dev->dev.archdata.of_node, + "ibm,my-dma-window", NULL); if (!dma_window) return NULL; tbl = kmalloc(sizeof(*tbl), GFP_KERNEL); - of_parse_dma_window(dev->dev.platform_data, dma_window, - &tbl->it_index, &offset, &size); + of_parse_dma_window(dev->dev.archdata.of_node, dma_window, + &tbl->it_index, &offset, &size); /* TCE table size - measured in tce entries */ tbl->it_size = size >> IOMMU_PAGE_SHIFT; @@ -117,7 +117,8 @@ static const struct vio_device_id *vio_match_device( { while (ids->type[0] != '\0') { if ((strncmp(dev->type, ids->type, strlen(ids->type)) == 0) && - device_is_compatible(dev->dev.platform_data, ids->compat)) + device_is_compatible(dev->dev.archdata.of_node, + ids->compat)) return ids; ids++; } @@ -198,9 +199,9 @@ EXPORT_SYMBOL(vio_unregister_driver); /* vio_dev refcount hit 0 */ static void __devinit vio_dev_release(struct device *dev) { - if (dev->platform_data) { - /* XXX free TCE table */ - of_node_put(dev->platform_data); + if (dev->archdata.of_node) { + /* XXX should free TCE table */ + of_node_put(dev->archdata.of_node); } kfree(to_vio_dev(dev)); } @@ -210,7 +211,7 @@ static void __devinit vio_dev_release(struct device *dev) * @of_node: The OF node for this device. * * Creates and initializes a vio_dev structure from the data in - * of_node (dev.platform_data) and adds it to the list of virtual devices. + * of_node and adds it to the list of virtual devices. * Returns a pointer to the created vio_dev or NULL if node has * NULL device_type or compatible fields. */ @@ -240,8 +241,6 @@ struct vio_dev * __devinit vio_register_device_node(struct device_node *of_node) if (viodev == NULL) return NULL; - viodev->dev.platform_data = of_node_get(of_node); - viodev->irq = irq_of_parse_and_map(of_node, 0); snprintf(viodev->dev.bus_id, BUS_ID_SIZE, "%x", *unit_address); @@ -254,7 +253,10 @@ struct vio_dev * __devinit vio_register_device_node(struct device_node *of_node) if (unit_address != NULL) viodev->unit_address = *unit_address; } - viodev->iommu_table = vio_build_iommu_table(viodev); + viodev->dev.archdata.of_node = of_node_get(of_node); + viodev->dev.archdata.dma_ops = &dma_iommu_ops; + viodev->dev.archdata.dma_data = vio_build_iommu_table(viodev); + viodev->dev.archdata.numa_node = of_node_to_nid(of_node); /* init generic 'struct device' fields: */ viodev->dev.parent = &vio_bus_device.dev; @@ -285,10 +287,11 @@ static int __init vio_bus_init(void) #ifdef CONFIG_PPC_ISERIES if (firmware_has_feature(FW_FEATURE_ISERIES)) { iommu_vio_init(); - vio_bus_device.iommu_table = &vio_iommu_table; + vio_bus_device.dev.archdata.dma_ops = &dma_iommu_ops; + vio_bus_device.dev.archdata.dma_data = &vio_iommu_table; iSeries_vio_dev = &vio_bus_device.dev; } -#endif +#endif /* CONFIG_PPC_ISERIES */ err = bus_register(&vio_bus_type); if (err) { @@ -336,7 +339,7 @@ static ssize_t name_show(struct device *dev, static ssize_t devspec_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct device_node *of_node = dev->platform_data; + struct device_node *of_node = dev->archdata.of_node; return sprintf(buf, "%s\n", of_node ? of_node->full_name : "none"); } @@ -353,62 +356,6 @@ void __devinit vio_unregister_device(struct vio_dev *viodev) } EXPORT_SYMBOL(vio_unregister_device); -static dma_addr_t vio_map_single(struct device *dev, void *vaddr, - size_t size, enum dma_data_direction direction) -{ - return iommu_map_single(to_vio_dev(dev)->iommu_table, vaddr, size, - ~0ul, direction); -} - -static void vio_unmap_single(struct device *dev, dma_addr_t dma_handle, - size_t size, enum dma_data_direction direction) -{ - iommu_unmap_single(to_vio_dev(dev)->iommu_table, dma_handle, size, - direction); -} - -static int vio_map_sg(struct device *dev, struct scatterlist *sglist, - int nelems, enum dma_data_direction direction) -{ - return iommu_map_sg(dev, to_vio_dev(dev)->iommu_table, sglist, - nelems, ~0ul, direction); -} - -static void vio_unmap_sg(struct device *dev, struct scatterlist *sglist, - int nelems, enum dma_data_direction direction) -{ - iommu_unmap_sg(to_vio_dev(dev)->iommu_table, sglist, nelems, direction); -} - -static void *vio_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flag) -{ - return iommu_alloc_coherent(to_vio_dev(dev)->iommu_table, size, - dma_handle, ~0ul, flag, -1); -} - -static void vio_free_coherent(struct device *dev, size_t size, - void *vaddr, dma_addr_t dma_handle) -{ - iommu_free_coherent(to_vio_dev(dev)->iommu_table, size, vaddr, - dma_handle); -} - -static int vio_dma_supported(struct device *dev, u64 mask) -{ - return 1; -} - -struct dma_mapping_ops vio_dma_ops = { - .alloc_coherent = vio_alloc_coherent, - .free_coherent = vio_free_coherent, - .map_single = vio_map_single, - .unmap_single = vio_unmap_single, - .map_sg = vio_map_sg, - .unmap_sg = vio_unmap_sg, - .dma_supported = vio_dma_supported, -}; - static int vio_bus_match(struct device *dev, struct device_driver *drv) { const struct vio_dev *vio_dev = to_vio_dev(dev); @@ -422,13 +369,14 @@ static int vio_hotplug(struct device *dev, char **envp, int num_envp, char *buffer, int buffer_size) { const struct vio_dev *vio_dev = to_vio_dev(dev); - struct device_node *dn = dev->platform_data; + struct device_node *dn; const char *cp; int length; if (!num_envp) return -ENOMEM; + dn = dev->archdata.of_node; if (!dn) return -ENODEV; cp = get_property(dn, "compatible", &length); @@ -465,7 +413,7 @@ struct bus_type vio_bus_type = { */ const void *vio_get_attribute(struct vio_dev *vdev, char *which, int *length) { - return get_property(vdev->dev.platform_data, which, length); + return get_property(vdev->dev.archdata.of_node, which, length); } EXPORT_SYMBOL(vio_get_attribute); |