diff options
Diffstat (limited to 'arch/arm/mm')
43 files changed, 559 insertions, 212 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 83c025e72ce..9264d814cd7 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -17,7 +17,7 @@ config CPU_ARM610 select CPU_CP15_MMU select CPU_COPY_V3 if MMU select CPU_TLB_V3 if MMU - select CPU_PABRT_NOIFAR + select CPU_PABRT_LEGACY help The ARM610 is the successor to the ARM3 processor and was produced by VLSI Technology Inc. @@ -31,7 +31,7 @@ config CPU_ARM7TDMI depends on !MMU select CPU_32v4T select CPU_ABRT_LV4T - select CPU_PABRT_NOIFAR + select CPU_PABRT_LEGACY select CPU_CACHE_V4 help A 32-bit RISC microprocessor based on the ARM7 processor core @@ -49,7 +49,7 @@ config CPU_ARM710 select CPU_CP15_MMU select CPU_COPY_V3 if MMU select CPU_TLB_V3 if MMU - select CPU_PABRT_NOIFAR + select CPU_PABRT_LEGACY help A 32-bit RISC microprocessor based on the ARM7 processor core designed by Advanced RISC Machines Ltd. The ARM710 is the @@ -64,7 +64,7 @@ config CPU_ARM720T bool "Support ARM720T processor" if ARCH_INTEGRATOR select CPU_32v4T select CPU_ABRT_LV4T - select CPU_PABRT_NOIFAR + select CPU_PABRT_LEGACY select CPU_CACHE_V4 select CPU_CACHE_VIVT select CPU_CP15_MMU @@ -83,7 +83,7 @@ config CPU_ARM740T depends on !MMU select CPU_32v4T select CPU_ABRT_LV4T - select CPU_PABRT_NOIFAR + select CPU_PABRT_LEGACY select CPU_CACHE_V3 # although the core is v4t select CPU_CP15_MPU help @@ -100,7 +100,7 @@ config CPU_ARM9TDMI depends on !MMU select CPU_32v4T select CPU_ABRT_NOMMU - select CPU_PABRT_NOIFAR + select CPU_PABRT_LEGACY select CPU_CACHE_V4 help A 32-bit RISC microprocessor based on the ARM9 processor core @@ -114,7 +114,7 @@ config CPU_ARM920T bool "Support ARM920T processor" if ARCH_INTEGRATOR select CPU_32v4T select CPU_ABRT_EV4T - select CPU_PABRT_NOIFAR + select CPU_PABRT_LEGACY select CPU_CACHE_V4WT select CPU_CACHE_VIVT select CPU_CP15_MMU @@ -122,10 +122,7 @@ config CPU_ARM920T select CPU_TLB_V4WBI if MMU help The ARM920T is licensed to be produced by numerous vendors, - and is used in the Maverick EP9312 and the Samsung S3C2410. - - More information on the Maverick EP9312 at - <http://linuxdevices.com/products/PD2382866068.html>. + and is used in the Cirrus EP93xx and the Samsung S3C2410. Say Y if you want support for the ARM920T processor. Otherwise, say N. @@ -135,7 +132,7 @@ config CPU_ARM922T bool "Support ARM922T processor" if ARCH_INTEGRATOR select CPU_32v4T select CPU_ABRT_EV4T - select CPU_PABRT_NOIFAR + select CPU_PABRT_LEGACY select CPU_CACHE_V4WT select CPU_CACHE_VIVT select CPU_CP15_MMU @@ -154,7 +151,7 @@ config CPU_ARM925T bool "Support ARM925T processor" if ARCH_OMAP1 select CPU_32v4T select CPU_ABRT_EV4T - select CPU_PABRT_NOIFAR + select CPU_PABRT_LEGACY select CPU_CACHE_V4WT select CPU_CACHE_VIVT select CPU_CP15_MMU @@ -173,7 +170,7 @@ config CPU_ARM926T bool "Support ARM926T processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB select CPU_32v5 select CPU_ABRT_EV5TJ - select CPU_PABRT_NOIFAR + select CPU_PABRT_LEGACY select CPU_CACHE_VIVT select CPU_CP15_MMU select CPU_COPY_V4WB if MMU @@ -191,7 +188,7 @@ config CPU_FA526 bool select CPU_32v4 select CPU_ABRT_EV4 - select CPU_PABRT_NOIFAR + select CPU_PABRT_LEGACY select CPU_CACHE_VIVT select CPU_CP15_MMU select CPU_CACHE_FA @@ -210,7 +207,7 @@ config CPU_ARM940T depends on !MMU select CPU_32v4T select CPU_ABRT_NOMMU - select CPU_PABRT_NOIFAR + select CPU_PABRT_LEGACY select CPU_CACHE_VIVT select CPU_CP15_MPU help @@ -228,7 +225,7 @@ config CPU_ARM946E depends on !MMU select CPU_32v5 select CPU_ABRT_NOMMU - select CPU_PABRT_NOIFAR + select CPU_PABRT_LEGACY select CPU_CACHE_VIVT select CPU_CP15_MPU help @@ -244,7 +241,7 @@ config CPU_ARM1020 bool "Support ARM1020T (rev 0) processor" if ARCH_INTEGRATOR select CPU_32v5 select CPU_ABRT_EV4T - select CPU_PABRT_NOIFAR + select CPU_PABRT_LEGACY select CPU_CACHE_V4WT select CPU_CACHE_VIVT select CPU_CP15_MMU @@ -262,7 +259,7 @@ config CPU_ARM1020E bool "Support ARM1020E processor" if ARCH_INTEGRATOR select CPU_32v5 select CPU_ABRT_EV4T - select CPU_PABRT_NOIFAR + select CPU_PABRT_LEGACY select CPU_CACHE_V4WT select CPU_CACHE_VIVT select CPU_CP15_MMU @@ -275,7 +272,7 @@ config CPU_ARM1022 bool "Support ARM1022E processor" if ARCH_INTEGRATOR select CPU_32v5 select CPU_ABRT_EV4T - select CPU_PABRT_NOIFAR + select CPU_PABRT_LEGACY select CPU_CACHE_VIVT select CPU_CP15_MMU select CPU_COPY_V4WB if MMU # can probably do better @@ -293,7 +290,7 @@ config CPU_ARM1026 bool "Support ARM1026EJ-S processor" if ARCH_INTEGRATOR select CPU_32v5 select CPU_ABRT_EV5T # But need Jazelle, but EV5TJ ignores bit 10 - select CPU_PABRT_NOIFAR + select CPU_PABRT_LEGACY select CPU_CACHE_VIVT select CPU_CP15_MMU select CPU_COPY_V4WB if MMU # can probably do better @@ -311,7 +308,7 @@ config CPU_SA110 select CPU_32v3 if ARCH_RPC select CPU_32v4 if !ARCH_RPC select CPU_ABRT_EV4 - select CPU_PABRT_NOIFAR + select CPU_PABRT_LEGACY select CPU_CACHE_V4WB select CPU_CACHE_VIVT select CPU_CP15_MMU @@ -331,7 +328,7 @@ config CPU_SA1100 bool select CPU_32v4 select CPU_ABRT_EV4 - select CPU_PABRT_NOIFAR + select CPU_PABRT_LEGACY select CPU_CACHE_V4WB select CPU_CACHE_VIVT select CPU_CP15_MMU @@ -342,7 +339,7 @@ config CPU_XSCALE bool select CPU_32v5 select CPU_ABRT_EV5T - select CPU_PABRT_NOIFAR + select CPU_PABRT_LEGACY select CPU_CACHE_VIVT select CPU_CP15_MMU select CPU_TLB_V4WBI if MMU @@ -352,7 +349,7 @@ config CPU_XSC3 bool select CPU_32v5 select CPU_ABRT_EV5T - select CPU_PABRT_NOIFAR + select CPU_PABRT_LEGACY select CPU_CACHE_VIVT select CPU_CP15_MMU select CPU_TLB_V4WBI if MMU @@ -363,7 +360,7 @@ config CPU_MOHAWK bool select CPU_32v5 select CPU_ABRT_EV5T - select CPU_PABRT_NOIFAR + select CPU_PABRT_LEGACY select CPU_CACHE_VIVT select CPU_CP15_MMU select CPU_TLB_V4WBI if MMU @@ -374,7 +371,7 @@ config CPU_FEROCEON bool select CPU_32v5 select CPU_ABRT_EV5T - select CPU_PABRT_NOIFAR + select CPU_PABRT_LEGACY select CPU_CACHE_VIVT select CPU_CP15_MMU select CPU_COPY_FEROCEON if MMU @@ -394,7 +391,7 @@ config CPU_V6 bool "Support ARM V6 processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB || MACH_REALVIEW_PBX select CPU_32v6 select CPU_ABRT_EV6 - select CPU_PABRT_NOIFAR + select CPU_PABRT_V6 select CPU_CACHE_V6 select CPU_CACHE_VIPT select CPU_CP15_MMU @@ -420,7 +417,7 @@ config CPU_V7 select CPU_32v6K select CPU_32v7 select CPU_ABRT_EV7 - select CPU_PABRT_IFAR + select CPU_PABRT_V7 select CPU_CACHE_V7 select CPU_CACHE_VIPT select CPU_CP15_MMU @@ -482,10 +479,13 @@ config CPU_ABRT_EV6 config CPU_ABRT_EV7 bool -config CPU_PABRT_IFAR +config CPU_PABRT_LEGACY + bool + +config CPU_PABRT_V6 bool -config CPU_PABRT_NOIFAR +config CPU_PABRT_V7 bool # The cache model @@ -758,7 +758,7 @@ config CACHE_FEROCEON_L2_WRITETHROUGH config CACHE_L2X0 bool "Enable the L2x0 outer cache controller" depends on REALVIEW_EB_ARM11MP || MACH_REALVIEW_PB11MP || MACH_REALVIEW_PB1176 || \ - REALVIEW_EB_A9MP || ARCH_MX35 || ARCH_MX31 || MACH_REALVIEW_PBX + REALVIEW_EB_A9MP || ARCH_MX35 || ARCH_MX31 || MACH_REALVIEW_PBX || ARCH_NOMADIK default y select OUTER_CACHE help @@ -771,3 +771,8 @@ config CACHE_XSC3L2 select OUTER_CACHE help This option enables the L2 cache on XScale3. + +config ARM_L1_CACHE_SHIFT + int + default 6 if ARCH_OMAP3 + default 5 diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile index 63e3f6dd0e2..055cb2aa813 100644 --- a/arch/arm/mm/Makefile +++ b/arch/arm/mm/Makefile @@ -27,6 +27,10 @@ obj-$(CONFIG_CPU_ABRT_EV5TJ) += abort-ev5tj.o obj-$(CONFIG_CPU_ABRT_EV6) += abort-ev6.o obj-$(CONFIG_CPU_ABRT_EV7) += abort-ev7.o +obj-$(CONFIG_CPU_PABRT_LEGACY) += pabort-legacy.o +obj-$(CONFIG_CPU_PABRT_V6) += pabort-v6.o +obj-$(CONFIG_CPU_PABRT_V7) += pabort-v7.o + obj-$(CONFIG_CPU_CACHE_V3) += cache-v3.o obj-$(CONFIG_CPU_CACHE_V4) += cache-v4.o obj-$(CONFIG_CPU_CACHE_V4WT) += cache-v4wt.o diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c index 03cd27d917b..b270d6228fe 100644 --- a/arch/arm/mm/alignment.c +++ b/arch/arm/mm/alignment.c @@ -159,7 +159,9 @@ union offset_union { #define __get8_unaligned_check(ins,val,addr,err) \ __asm__( \ - "1: "ins" %1, [%2], #1\n" \ + ARM( "1: "ins" %1, [%2], #1\n" ) \ + THUMB( "1: "ins" %1, [%2]\n" ) \ + THUMB( " add %2, %2, #1\n" ) \ "2:\n" \ " .section .fixup,\"ax\"\n" \ " .align 2\n" \ @@ -215,7 +217,9 @@ union offset_union { do { \ unsigned int err = 0, v = val, a = addr; \ __asm__( FIRST_BYTE_16 \ - "1: "ins" %1, [%2], #1\n" \ + ARM( "1: "ins" %1, [%2], #1\n" ) \ + THUMB( "1: "ins" %1, [%2]\n" ) \ + THUMB( " add %2, %2, #1\n" ) \ " mov %1, %1, "NEXT_BYTE"\n" \ "2: "ins" %1, [%2]\n" \ "3:\n" \ @@ -245,11 +249,17 @@ union offset_union { do { \ unsigned int err = 0, v = val, a = addr; \ __asm__( FIRST_BYTE_32 \ - "1: "ins" %1, [%2], #1\n" \ + ARM( "1: "ins" %1, [%2], #1\n" ) \ + THUMB( "1: "ins" %1, [%2]\n" ) \ + THUMB( " add %2, %2, #1\n" ) \ " mov %1, %1, "NEXT_BYTE"\n" \ - "2: "ins" %1, [%2], #1\n" \ + ARM( "2: "ins" %1, [%2], #1\n" ) \ + THUMB( "2: "ins" %1, [%2]\n" ) \ + THUMB( " add %2, %2, #1\n" ) \ " mov %1, %1, "NEXT_BYTE"\n" \ - "3: "ins" %1, [%2], #1\n" \ + ARM( "3: "ins" %1, [%2], #1\n" ) \ + THUMB( "3: "ins" %1, [%2]\n" ) \ + THUMB( " add %2, %2, #1\n" ) \ " mov %1, %1, "NEXT_BYTE"\n" \ "4: "ins" %1, [%2]\n" \ "5:\n" \ diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S index 8f5c13f4c93..295e25dd638 100644 --- a/arch/arm/mm/cache-v6.S +++ b/arch/arm/mm/cache-v6.S @@ -12,6 +12,7 @@ #include <linux/linkage.h> #include <linux/init.h> #include <asm/assembler.h> +#include <asm/unwind.h> #include "proc-macros.S" @@ -121,11 +122,13 @@ ENTRY(v6_coherent_kern_range) * - the Icache does not read data from the write buffer */ ENTRY(v6_coherent_user_range) - + UNWIND(.fnstart ) #ifdef HARVARD_CACHE bic r0, r0, #CACHE_LINE_SIZE - 1 -1: mcr p15, 0, r0, c7, c10, 1 @ clean D line +1: + USER( mcr p15, 0, r0, c7, c10, 1 ) @ clean D line add r0, r0, #CACHE_LINE_SIZE +2: cmp r0, r1 blo 1b #endif @@ -143,6 +146,19 @@ ENTRY(v6_coherent_user_range) mov pc, lr /* + * Fault handling for the cache operation above. If the virtual address in r0 + * isn't mapped, just try the next page. + */ +9001: + mov r0, r0, lsr #12 + mov r0, r0, lsl #12 + add r0, r0, #4096 + b 2b + UNWIND(.fnend ) +ENDPROC(v6_coherent_user_range) +ENDPROC(v6_coherent_kern_range) + +/* * v6_flush_kern_dcache_page(kaddr) * * Ensure that the data held in the page kaddr is written back diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S index be93ff02a98..e1bd9759617 100644 --- a/arch/arm/mm/cache-v7.S +++ b/arch/arm/mm/cache-v7.S @@ -13,6 +13,7 @@ #include <linux/linkage.h> #include <linux/init.h> #include <asm/assembler.h> +#include <asm/unwind.h> #include "proc-macros.S" @@ -21,7 +22,7 @@ * * Flush the whole D-cache. * - * Corrupted registers: r0-r5, r7, r9-r11 + * Corrupted registers: r0-r7, r9-r11 (r6 only in Thumb mode) * * - mm - mm_struct describing address space */ @@ -51,8 +52,12 @@ loop1: loop2: mov r9, r4 @ create working copy of max way size loop3: - orr r11, r10, r9, lsl r5 @ factor way and cache number into r11 - orr r11, r11, r7, lsl r2 @ factor index number into r11 + ARM( orr r11, r10, r9, lsl r5 ) @ factor way and cache number into r11 + THUMB( lsl r6, r9, r5 ) + THUMB( orr r11, r10, r6 ) @ factor way and cache number into r11 + ARM( orr r11, r11, r7, lsl r2 ) @ factor index number into r11 + THUMB( lsl r6, r7, r2 ) + THUMB( orr r11, r11, r6 ) @ factor index number into r11 mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way subs r9, r9, #1 @ decrement the way bge loop3 @@ -82,11 +87,13 @@ ENDPROC(v7_flush_dcache_all) * */ ENTRY(v7_flush_kern_cache_all) - stmfd sp!, {r4-r5, r7, r9-r11, lr} + ARM( stmfd sp!, {r4-r5, r7, r9-r11, lr} ) + THUMB( stmfd sp!, {r4-r7, r9-r11, lr} ) bl v7_flush_dcache_all mov r0, #0 mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate - ldmfd sp!, {r4-r5, r7, r9-r11, lr} + ARM( ldmfd sp!, {r4-r5, r7, r9-r11, lr} ) + THUMB( ldmfd sp!, {r4-r7, r9-r11, lr} ) mov pc, lr ENDPROC(v7_flush_kern_cache_all) @@ -147,13 +154,16 @@ ENTRY(v7_coherent_kern_range) * - the Icache does not read data from the write buffer */ ENTRY(v7_coherent_user_range) + UNWIND(.fnstart ) dcache_line_size r2, r3 sub r3, r2, #1 bic r0, r0, r3 -1: mcr p15, 0, r0, c7, c11, 1 @ clean D line to the point of unification +1: + USER( mcr p15, 0, r0, c7, c11, 1 ) @ clean D line to the point of unification dsb - mcr p15, 0, r0, c7, c5, 1 @ invalidate I line + USER( mcr p15, 0, r0, c7, c5, 1 ) @ invalidate I line add r0, r0, r2 +2: cmp r0, r1 blo 1b mov r0, #0 @@ -161,6 +171,17 @@ ENTRY(v7_coherent_user_range) dsb isb mov pc, lr + +/* + * Fault handling for the cache operation above. If the virtual address in r0 + * isn't mapped, just try the next page. + */ +9001: + mov r0, r0, lsr #12 + mov r0, r0, lsl #12 + add r0, r0, #4096 + b 2b + UNWIND(.fnend ) ENDPROC(v7_coherent_kern_range) ENDPROC(v7_coherent_user_range) diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c index fc84fcc7438..a9e22e31eaa 100644 --- a/arch/arm/mm/context.c +++ b/arch/arm/mm/context.c @@ -50,15 +50,12 @@ void __new_context(struct mm_struct *mm) isb(); flush_tlb_all(); if (icache_is_vivt_asid_tagged()) { - asm("mcr p15, 0, %0, c7, c5, 0 @ invalidate I-cache\n" - "mcr p15, 0, %0, c7, c5, 6 @ flush BTAC/BTB\n" - : - : "r" (0)); + __flush_icache_all(); dsb(); } } spin_unlock(&cpu_asid_lock); - mm->cpu_vm_mask = cpumask_of_cpu(smp_processor_id()); + cpumask_copy(mm_cpumask(mm), cpumask_of(smp_processor_id())); mm->context.id = asid; } diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 510c179b0ac..b9590a7085c 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -36,7 +36,34 @@ #define CONSISTENT_PTE_INDEX(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PGDIR_SHIFT) #define NUM_CONSISTENT_PTES (CONSISTENT_DMA_SIZE >> PGDIR_SHIFT) +static u64 get_coherent_dma_mask(struct device *dev) +{ + u64 mask = ISA_DMA_THRESHOLD; + + if (dev) { + mask = dev->coherent_dma_mask; + /* + * Sanity check the DMA mask - it must be non-zero, and + * must be able to be satisfied by a DMA allocation. + */ + if (mask == 0) { + dev_warn(dev, "coherent DMA mask is unset\n"); + return 0; + } + + if ((~mask) & ISA_DMA_THRESHOLD) { + dev_warn(dev, "coherent DMA mask %#llx is smaller " + "than system GFP_DMA mask %#llx\n", + mask, (unsigned long long)ISA_DMA_THRESHOLD); + return 0; + } + } + + return mask; +} + +#ifdef CONFIG_MMU /* * These are the page tables (2MB each) covering uncached, DMA consistent allocations */ @@ -152,7 +179,8 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, struct page *page; struct arm_vm_region *c; unsigned long order; - u64 mask = ISA_DMA_THRESHOLD, limit; + u64 mask = get_coherent_dma_mask(dev); + u64 limit; if (!consistent_pte[0]) { printk(KERN_ERR "%s: not initialised\n", __func__); @@ -160,25 +188,8 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, return NULL; } - if (dev) { - mask = dev->coherent_dma_mask; - - /* - * Sanity check the DMA mask - it must be non-zero, and - * must be able to be satisfied by a DMA allocation. - */ - if (mask == 0) { - dev_warn(dev, "coherent DMA mask is unset\n"); - goto no_page; - } - - if ((~mask) & ISA_DMA_THRESHOLD) { - dev_warn(dev, "coherent DMA mask %#llx is smaller " - "than system GFP_DMA mask %#llx\n", - mask, (unsigned long long)ISA_DMA_THRESHOLD); - goto no_page; - } - } + if (!mask) + goto no_page; /* * Sanity check the allocation size. @@ -194,7 +205,7 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, order = get_order(size); - if (mask != 0xffffffff) + if (mask < 0xffffffffULL) gfp |= GFP_DMA; page = alloc_pages(gfp, order); @@ -267,6 +278,31 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, *handle = ~0; return NULL; } +#else /* !CONFIG_MMU */ +static void * +__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, + pgprot_t prot) +{ + void *virt; + u64 mask = get_coherent_dma_mask(dev); + + if (!mask) + goto error; + + if (mask < 0xffffffffULL) + gfp |= GFP_DMA; + virt = kmalloc(size, gfp); + if (!virt) + goto error; + + *handle = virt_to_dma(dev, virt); + return virt; + +error: + *handle = ~0; + return NULL; +} +#endif /* CONFIG_MMU */ /* * Allocate DMA-coherent memory space and return both the kernel remapped @@ -311,9 +347,10 @@ EXPORT_SYMBOL(dma_alloc_writecombine); static int dma_mmap(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, dma_addr_t dma_addr, size_t size) { + int ret = -ENXIO; +#ifdef CONFIG_MMU unsigned long flags, user_size, kern_size; struct arm_vm_region *c; - int ret = -ENXIO; user_size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; @@ -334,6 +371,7 @@ static int dma_mmap(struct device *dev, struct vm_area_struct *vma, vma->vm_page_prot); } } +#endif /* CONFIG_MMU */ return ret; } @@ -358,6 +396,7 @@ EXPORT_SYMBOL(dma_mmap_writecombine); * free a page as defined by the above mapping. * Must not be called with IRQs disabled. */ +#ifdef CONFIG_MMU void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle) { struct arm_vm_region *c; @@ -444,6 +483,14 @@ void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr __func__, cpu_addr); dump_stack(); } +#else /* !CONFIG_MMU */ +void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle) +{ + if (dma_release_from_coherent(dev, get_order(size), cpu_addr)) + return; + kfree(cpu_addr); +} +#endif /* CONFIG_MMU */ EXPORT_SYMBOL(dma_free_coherent); /* @@ -451,10 +498,12 @@ EXPORT_SYMBOL(dma_free_coherent); */ static int __init consistent_init(void) { + int ret = 0; +#ifdef CONFIG_MMU pgd_t *pgd; pmd_t *pmd; pte_t *pte; - int ret = 0, i = 0; + int i = 0; u32 base = CONSISTENT_BASE; do { @@ -477,6 +526,7 @@ static int __init consistent_init(void) consistent_pte[i++] = pte; base += (1 << PGDIR_SHIFT); } while (base < CONSISTENT_END); +#endif /* !CONFIG_MMU */ return ret; } diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c index bc0099d5ae8..d0d17b6a370 100644 --- a/arch/arm/mm/fault-armv.c +++ b/arch/arm/mm/fault-armv.c @@ -153,14 +153,11 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte) page = pfn_to_page(pfn); mapping = page_mapping(page); - if (mapping) { #ifndef CONFIG_SMP - int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags); - - if (dirty) - __flush_dcache_page(mapping, page); + if (test_and_clear_bit(PG_dcache_dirty, &page->flags)) + __flush_dcache_page(mapping, page); #endif - + if (mapping) { if (cache_is_vivt()) make_coherent(mapping, vma, addr, pfn); else if (vma->vm_flags & VM_EXEC) diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index 6fdcbb70982..10e06801afb 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -16,6 +16,8 @@ #include <linux/kprobes.h> #include <linux/uaccess.h> #include <linux/page-flags.h> +#include <linux/sched.h> +#include <linux/highmem.h> #include <asm/system.h> #include <asm/pgtable.h> @@ -23,6 +25,20 @@ #include "fault.h" +/* + * Fault status register encodings. We steal bit 31 for our own purposes. + */ +#define FSR_LNX_PF (1 << 31) +#define FSR_WRITE (1 << 11) +#define FSR_FS4 (1 << 10) +#define FSR_FS3_0 (15) + +static inline int fsr_fs(unsigned int fsr) +{ + return (fsr & FSR_FS3_0) | (fsr & FSR_FS4) >> 6; +} + +#ifdef CONFIG_MMU #ifdef CONFIG_KPROBES static inline int notify_page_fault(struct pt_regs *regs, unsigned int fsr) @@ -97,6 +113,10 @@ void show_pte(struct mm_struct *mm, unsigned long addr) printk("\n"); } +#else /* CONFIG_MMU */ +void show_pte(struct mm_struct *mm, unsigned long addr) +{ } +#endif /* CONFIG_MMU */ /* * Oops. The kernel tried to access some page that wasn't present. @@ -171,21 +191,39 @@ void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs) __do_kernel_fault(mm, addr, fsr, regs); } +#ifdef CONFIG_MMU #define VM_FAULT_BADMAP 0x010000 #define VM_FAULT_BADACCESS 0x020000 -static int +/* + * Check that the permissions on the VMA allow for the fault which occurred. + * If we encountered a write fault, we must have write permission, otherwise + * we allow any permission. + */ +static inline bool access_error(unsigned int fsr, struct vm_area_struct *vma) +{ + unsigned int mask = VM_READ | VM_WRITE | VM_EXEC; + + if (fsr & FSR_WRITE) + mask = VM_WRITE; + if (fsr & FSR_LNX_PF) + mask = VM_EXEC; + + return vma->vm_flags & mask ? false : true; +} + +static int __kprobes __do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr, struct task_struct *tsk) { struct vm_area_struct *vma; - int fault, mask; + int fault; vma = find_vma(mm, addr); fault = VM_FAULT_BADMAP; - if (!vma) + if (unlikely(!vma)) goto out; - if (vma->vm_start > addr) + if (unlikely(vma->vm_start > addr)) goto check_stack; /* @@ -193,47 +231,24 @@ __do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr, * memory access, so we can handle it. */ good_area: - if (fsr & (1 << 11)) /* write? */ - mask = VM_WRITE; - else - mask = VM_READ|VM_EXEC|VM_WRITE; - - fault = VM_FAULT_BADACCESS; - if (!(vma->vm_flags & mask)) + if (access_error(fsr, vma)) { + fault = VM_FAULT_BADACCESS; goto out; + } /* - * If for any reason at all we couldn't handle - * the fault, make sure we exit gracefully rather - * than endlessly redo the fault. + * If for any reason at all we couldn't handle the fault, make + * sure we exit gracefully rather than endlessly redo the fault. */ -survive: - fault = handle_mm_fault(mm, vma, addr & PAGE_MASK, (fsr & (1 << 11)) ? FAULT_FLAG_WRITE : 0); - if (unlikely(fault & VM_FAULT_ERROR)) { - if (fault & VM_FAULT_OOM) - goto out_of_memory; - else if (fault & VM_FAULT_SIGBUS) - return fault; - BUG(); - } + fault = handle_mm_fault(mm, vma, addr & PAGE_MASK, (fsr & FSR_WRITE) ? FAULT_FLAG_WRITE : 0); + if (unlikely(fault & VM_FAULT_ERROR)) + return fault; if (fault & VM_FAULT_MAJOR) tsk->maj_flt++; else tsk->min_flt++; return fault; -out_of_memory: - if (!is_global_init(tsk)) - goto out; - - /* - * If we are out of memory for pid1, sleep for a while and retry - */ - up_read(&mm->mmap_sem); - yield(); - down_read(&mm->mmap_sem); - goto survive; - check_stack: if (vma->vm_flags & VM_GROWSDOWN && !expand_stack(vma, addr)) goto good_area; @@ -270,6 +285,18 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) if (!user_mode(regs) && !search_exception_tables(regs->ARM_pc)) goto no_context; down_read(&mm->mmap_sem); + } else { + /* + * The above down_read_trylock() might have succeeded in + * which case, we'll have missed the might_sleep() from + * down_read() + */ + might_sleep(); +#ifdef CONFIG_DEBUG_VM + if (!user_mode(regs) && + !search_exception_tables(regs->ARM_pc)) + goto no_context; +#endif } fault = __do_page_fault(mm, addr, fsr, tsk); @@ -281,6 +308,16 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) if (likely(!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP | VM_FAULT_BADACCESS)))) return 0; + if (fault & VM_FAULT_OOM) { + /* + * We ran out of memory, call the OOM killer, and return to + * userspace (which will retry the fault, or kill us if we + * got oom-killed) + */ + pagefault_out_of_memory(); + return 0; + } + /* * If we are in kernel mode at this point, we * have no context to handle this fault with. @@ -288,16 +325,6 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) if (!user_mode(regs)) goto no_context; - if (fault & VM_FAULT_OOM) { - /* - * We ran out of memory, or some other thing - * happened to us that made us unable to handle - * the page fault gracefully. - */ - printk("VM: killing process %s\n", tsk->comm); - do_group_exit(SIGKILL); - return 0; - } if (fault & VM_FAULT_SIGBUS) { /* * We had some memory, but were unable to @@ -322,6 +349,13 @@ no_context: __do_kernel_fault(mm, addr, fsr, regs); return 0; } +#else /* CONFIG_MMU */ +static int +do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) +{ + return 0; +} +#endif /* CONFIG_MMU */ /* * First Level Translation Fault Handler @@ -340,6 +374,7 @@ no_context: * interrupt or a critical region, and should only copy the information * from the master page table, nothing more. */ +#ifdef CONFIG_MMU static int __kprobes do_translation_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) @@ -378,6 +413,14 @@ bad_area: do_bad_area(addr, fsr, regs); return 0; } +#else /* CONFIG_MMU */ +static int +do_translation_fault(unsigned long addr, unsigned int fsr, + struct pt_regs *regs) +{ + return 0; +} +#endif /* CONFIG_MMU */ /* * Some section permission faults need to be handled gracefully. @@ -465,10 +508,10 @@ hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *) asmlinkage void __exception do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs) { - const struct fsr_info *inf = fsr_info + (fsr & 15) + ((fsr & (1 << 10)) >> 6); + const struct fsr_info *inf = fsr_info + fsr_fs(fsr); struct siginfo info; - if (!inf->fn(addr, fsr, regs)) + if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs)) return; printk(KERN_ALERT "Unhandled fault: %s (0x%03x) at 0x%08lx\n", @@ -481,9 +524,58 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs) arm_notify_die("", regs, &info, fsr, 0); } + +static struct fsr_info ifsr_info[] = { + { do_bad, SIGBUS, 0, "unknown 0" }, + { do_bad, SIGBUS, 0, "unknown 1" }, + { do_bad, SIGBUS, 0, "debug event" }, + { do_bad, SIGSEGV, SEGV_ACCERR, "section access flag fault" }, + { do_bad, SIGBUS, 0, "unknown 4" }, + { do_translation_fault, SIGSEGV, SEGV_MAPERR, "section translation fault" }, + { do_bad, SIGSEGV, SEGV_ACCERR, "page access flag fault" }, + { do_page_fault, SIGSEGV, SEGV_MAPERR, "page translation fault" }, + { do_bad, SIGBUS, 0, "external abort on non-linefetch" }, + { do_bad, SIGSEGV, SEGV_ACCERR, "section domain fault" }, + { do_bad, SIGBUS, 0, "unknown 10" }, + { do_bad, SIGSEGV, SEGV_ACCERR, "page domain fault" }, + { do_bad, SIGBUS, 0, "external abort on translation" }, + { do_sect_fault, SIGSEGV, SEGV_ACCERR, "section permission fault" }, + { do_bad, SIGBUS, 0, "external abort on translation" }, + { do_page_fault, SIGSEGV, SEGV_ACCERR, "page permission fault" }, + { do_bad, SIGBUS, 0, "unknown 16" }, + { do_bad, SIGBUS, 0, "unknown 17" }, + { do_bad, SIGBUS, 0, "unknown 18" }, + { do_bad, SIGBUS, 0, "unknown 19" }, + { do_bad, SIGBUS, 0, "unknown 20" }, + { do_bad, SIGBUS, 0, "unknown 21" }, + { do_bad, SIGBUS, 0, "unknown 22" }, + { do_bad, SIGBUS, 0, "unknown 23" }, + { do_bad, SIGBUS, 0, "unknown 24" }, + { do_bad, SIGBUS, 0, "unknown 25" }, + { do_bad, SIGBUS, 0, "unknown 26" }, + { do_bad, SIGBUS, 0, "unknown 27" }, + { do_bad, SIGBUS, 0, "unknown 28" }, + { do_bad, SIGBUS, 0, "unknown 29" }, + { do_bad, SIGBUS, 0, "unknown 30" }, + { do_bad, SIGBUS, 0, "unknown 31" }, +}; + asmlinkage void __exception -do_PrefetchAbort(unsigned long addr, struct pt_regs *regs) +do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs) { - do_translation_fault(addr, 0, regs); + const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr); + struct siginfo info; + + if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs)) + return; + + printk(KERN_ALERT "Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n", + inf->name, ifsr, addr); + + info.si_signo = inf->sig; + info.si_errno = 0; + info.si_code = inf->code; + info.si_addr = (void __user *)addr; + arm_notify_die("", regs, &info, ifsr, 0); } diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c index c07222eb5ce..7f294f307c8 100644 --- a/arch/arm/mm/flush.c +++ b/arch/arm/mm/flush.c @@ -18,10 +18,6 @@ #include "mm.h" -#ifdef CONFIG_ARM_ERRATA_411920 -extern void v6_icache_inval_all(void); -#endif - #ifdef CONFIG_CPU_CACHE_VIPT #define ALIAS_FLUSH_START 0xffff4000 @@ -35,45 +31,35 @@ static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr) flush_tlb_kernel_page(to); asm( "mcrr p15, 0, %1, %0, c14\n" - " mcr p15, 0, %2, c7, c10, 4\n" -#ifndef CONFIG_ARM_ERRATA_411920 - " mcr p15, 0, %2, c7, c5, 0\n" -#endif + " mcr p15, 0, %2, c7, c10, 4" : : "r" (to), "r" (to + PAGE_SIZE - L1_CACHE_BYTES), "r" (zero) : "cc"); -#ifdef CONFIG_ARM_ERRATA_411920 - v6_icache_inval_all(); -#endif + __flush_icache_all(); } void flush_cache_mm(struct mm_struct *mm) { if (cache_is_vivt()) { - if (cpu_isset(smp_processor_id(), mm->cpu_vm_mask)) + if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) __cpuc_flush_user_all(); return; } if (cache_is_vipt_aliasing()) { asm( "mcr p15, 0, %0, c7, c14, 0\n" - " mcr p15, 0, %0, c7, c10, 4\n" -#ifndef CONFIG_ARM_ERRATA_411920 - " mcr p15, 0, %0, c7, c5, 0\n" -#endif + " mcr p15, 0, %0, c7, c10, 4" : : "r" (0) : "cc"); -#ifdef CONFIG_ARM_ERRATA_411920 - v6_icache_inval_all(); -#endif + __flush_icache_all(); } } void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { if (cache_is_vivt()) { - if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) + if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end), vma->vm_flags); return; @@ -81,23 +67,18 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned if (cache_is_vipt_aliasing()) { asm( "mcr p15, 0, %0, c7, c14, 0\n" - " mcr p15, 0, %0, c7, c10, 4\n" -#ifndef CONFIG_ARM_ERRATA_411920 - " mcr p15, 0, %0, c7, c5, 0\n" -#endif + " mcr p15, 0, %0, c7, c10, 4" : : "r" (0) : "cc"); -#ifdef CONFIG_ARM_ERRATA_411920 - v6_icache_inval_all(); -#endif + __flush_icache_all(); } } void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn) { if (cache_is_vivt()) { - if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) { + if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { unsigned long addr = user_addr & PAGE_MASK; __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags); } @@ -113,7 +94,7 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, unsigned long len, int write) { if (cache_is_vivt()) { - if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) { + if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { unsigned long addr = (unsigned long)kaddr; __cpuc_coherent_kern_range(addr, addr + len); } @@ -126,7 +107,7 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, } /* VIPT non-aliasing cache */ - if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask) && + if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm)) && vma->vm_flags & VM_EXEC) { unsigned long addr = (unsigned long)kaddr; /* only flushing the kernel mapping on non-aliasing VIPT */ @@ -144,7 +125,14 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page) * page. This ensures that data in the physical page is mutually * coherent with the kernels mapping. */ - __cpuc_flush_dcache_page(page_address(page)); +#ifdef CONFIG_HIGHMEM + /* + * kmap_atomic() doesn't set the page virtual address, and + * kunmap_atomic() takes care of cache flushing already. + */ + if (page_address(page)) +#endif + __cpuc_flush_dcache_page(page_address(page)); /* * If this is a page cache page, and we have an aliasing VIPT cache, diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c index a34954d9df7..30f82fb5918 100644 --- a/arch/arm/mm/highmem.c +++ b/arch/arm/mm/highmem.c @@ -40,11 +40,18 @@ void *kmap_atomic(struct page *page, enum km_type type) { unsigned int idx; unsigned long vaddr; + void *kmap; pagefault_disable(); if (!PageHighMem(page)) return page_address(page); + debug_kmap_atomic(type); + + kmap = kmap_high_get(page); + if (kmap) + return kmap; + idx = type + KM_TYPE_NR * smp_processor_id(); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); #ifdef CONFIG_DEBUG_HIGHMEM @@ -80,6 +87,9 @@ void kunmap_atomic(void *kvaddr, enum km_type type) #else (void) idx; /* to kill a warning */ #endif + } else if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) { + /* this address was obtained through kmap_high_get() */ + kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)])); } pagefault_enable(); } diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 3a7279c1ce5..52c40d15567 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -15,6 +15,7 @@ #include <linux/mman.h> #include <linux/nodemask.h> #include <linux/initrd.h> +#include <linux/sort.h> #include <linux/highmem.h> #include <asm/mach-types.h> @@ -272,7 +273,6 @@ static void __init bootmem_init_node(int node, struct meminfo *mi, struct membank *bank = &mi->bank[i]; if (!bank->highmem) free_bootmem_node(pgdat, bank_phys_start(bank), bank_phys_size(bank)); - memory_present(node, bank_pfn_start(bank), bank_pfn_end(bank)); } /* @@ -349,12 +349,56 @@ static void __init bootmem_free_node(int node, struct meminfo *mi) free_area_init_node(node, zone_size, min, zhole_size); } +#ifndef CONFIG_SPARSEMEM +int pfn_valid(unsigned long pfn) +{ + struct meminfo *mi = &meminfo; + unsigned int left = 0, right = mi->nr_banks; + + do { + unsigned int mid = (right + left) / 2; + struct membank *bank = &mi->bank[mid]; + + if (pfn < bank_pfn_start(bank)) + right = mid; + else if (pfn >= bank_pfn_end(bank)) + left = mid + 1; + else + return 1; + } while (left < right); + return 0; +} +EXPORT_SYMBOL(pfn_valid); + +static void arm_memory_present(struct meminfo *mi, int node) +{ +} +#else +static void arm_memory_present(struct meminfo *mi, int node) +{ + int i; + for_each_nodebank(i, mi, node) { + struct membank *bank = &mi->bank[i]; + memory_present(node, bank_pfn_start(bank), bank_pfn_end(bank)); + } +} +#endif + +static int __init meminfo_cmp(const void *_a, const void *_b) +{ + const struct membank *a = _a, *b = _b; + long cmp = bank_pfn_start(a) - bank_pfn_start(b); + return cmp < 0 ? -1 : cmp > 0 ? 1 : 0; +} + void __init bootmem_init(void) { struct meminfo *mi = &meminfo; unsigned long min, max_low, max_high; int node, initrd_node; + sort(&mi->bank, mi->nr_banks, sizeof(mi->bank[0]), meminfo_cmp, NULL); + /* * Locate which node contains the ramdisk image, if any. */ @@ -395,6 +439,12 @@ void __init bootmem_init(void) */ if (node == initrd_node) bootmem_reserve_initrd(node); + + /* + * Sparsemem tries to allocate bootmem in memory_present(), + * so must be done after the fixed reservations + */ + arm_memory_present(mi, node); } /* @@ -451,7 +501,7 @@ free_memmap(int node, unsigned long start_pfn, unsigned long end_pfn) /* * Convert start_pfn/end_pfn to a struct page pointer. */ - start_pg = pfn_to_page(start_pfn); + start_pg = pfn_to_page(start_pfn - 1) + 1; end_pg = pfn_to_page(end_pfn); /* @@ -564,8 +614,8 @@ void __init mem_init(void) printk(KERN_NOTICE "Memory: %luKB available (%dK code, " "%dK data, %dK init, %luK highmem)\n", - (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), - codesize >> 10, datasize >> 10, initsize >> 10, + nr_free_pages() << (PAGE_SHIFT-10), codesize >> 10, + datasize >> 10, initsize >> 10, (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))); if (PAGE_SIZE >= 16384 && num_physpages <= 128) { @@ -581,6 +631,14 @@ void __init mem_init(void) void free_initmem(void) { +#ifdef CONFIG_HAVE_TCM + extern char *__tcm_start, *__tcm_end; + + totalram_pages += free_area(__phys_to_pfn(__pa(__tcm_start)), + __phys_to_pfn(__pa(__tcm_end)), + "TCM link"); +#endif + if (!machine_is_integrator() && !machine_is_cintegrator()) totalram_pages += free_area(__phys_to_pfn(__pa(__init_begin)), __phys_to_pfn(__pa(__init_end)), diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c index f7457fea6de..2b7996401b0 100644 --- a/arch/arm/mm/mmap.c +++ b/arch/arm/mm/mmap.c @@ -124,7 +124,7 @@ int valid_phys_addr_range(unsigned long addr, size_t size) { if (addr < PHYS_OFFSET) return 0; - if (addr + size >= __pa(high_memory - 1)) + if (addr + size > __pa(high_memory - 1) + 1) return 0; return 1; diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 4426ee67cec..ea67be0223a 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -21,6 +21,7 @@ #include <asm/cachetype.h> #include <asm/setup.h> #include <asm/sizes.h> +#include <asm/smp_plat.h> #include <asm/tlb.h> #include <asm/highmem.h> @@ -116,6 +117,13 @@ static void __init early_cachepolicy(char **p) } if (i == ARRAY_SIZE(cache_policies)) printk(KERN_ERR "ERROR: unknown or unsupported cache policy\n"); + /* + * This restriction is partly to do with the way we boot; it is + * unpredictable to have memory mapped using two different sets of + * memory attributes (shared, type, and cache attribs). We can not + * change these attributes once the initial assembly has setup the + * page tables. + */ if (cpu_architecture() >= CPU_ARCH_ARMv6) { printk(KERN_WARNING "Only cachepolicy=writeback supported on ARMv6 and later\n"); cachepolicy = CPOLICY_WRITEBACK; @@ -709,10 +717,6 @@ static void __init sanity_check_meminfo(void) if (meminfo.nr_banks >= NR_BANKS) { printk(KERN_CRIT "NR_BANKS too low, " "ignoring high memory\n"); - } else if (cache_is_vipt_aliasing()) { - printk(KERN_CRIT "HIGHMEM is not yet supported " - "with VIPT aliasing cache, " - "ignoring high memory\n"); } else { memmove(bank + 1, bank, (meminfo.nr_banks - i) * sizeof(*bank)); @@ -726,6 +730,8 @@ static void __init sanity_check_meminfo(void) bank->size = VMALLOC_MIN - __va(bank->start); } #else + bank->highmem = highmem; + /* * Check whether this memory bank would entirely overlap * the vmalloc area. @@ -754,6 +760,38 @@ static void __init sanity_check_meminfo(void) #endif j++; } +#ifdef CONFIG_HIGHMEM + if (highmem) { + const char *reason = NULL; + + if (cache_is_vipt_aliasing()) { + /* + * Interactions between kmap and other mappings + * make highmem support with aliasing VIPT caches + * rather difficult. + */ + reason = "with VIPT aliasing cache"; +#ifdef CONFIG_SMP + } else if (tlb_ops_need_broadcast()) { + /* + * kmap_high needs to occasionally flush TLB entries, + * however, if the TLB entries need to be broadcast + * we may deadlock: + * kmap_high(irqs off)->flush_all_zero_pkmaps-> + * flush_tlb_kernel_range->smp_call_function_many + * (must not be called with irqs off) + */ + reason = "without hardware TLB ops broadcasting"; +#endif + } + if (reason) { + printk(KERN_CRIT "HIGHMEM is not supported %s, ignoring high memory\n", + reason); + while (j > 0 && meminfo.bank[j - 1].highmem) + j--; + } + } +#endif meminfo.nr_banks = j; } diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c index ad7bacc693b..900811cc913 100644 --- a/arch/arm/mm/nommu.c +++ b/arch/arm/mm/nommu.c @@ -12,6 +12,7 @@ #include <asm/cacheflush.h> #include <asm/sections.h> #include <asm/page.h> +#include <asm/setup.h> #include <asm/mach/arch.h> #include "mm.h" diff --git a/arch/arm/mm/pabort-legacy.S b/arch/arm/mm/pabort-legacy.S new file mode 100644 index 00000000000..87970eba88e --- /dev/null +++ b/arch/arm/mm/pabort-legacy.S @@ -0,0 +1,19 @@ +#include <linux/linkage.h> +#include <asm/assembler.h> + +/* + * Function: legacy_pabort + * + * Params : r0 = address of aborted instruction + * + * Returns : r0 = address of abort + * : r1 = Simulated IFSR with section translation fault status + * + * Purpose : obtain information about current prefetch abort. + */ + + .align 5 +ENTRY(legacy_pabort) + mov r1, #5 + mov pc, lr +ENDPROC(legacy_pabort) diff --git a/arch/arm/mm/pabort-v6.S b/arch/arm/mm/pabort-v6.S new file mode 100644 index 00000000000..06e3d1ef211 --- /dev/null +++ b/arch/arm/mm/pabort-v6.S @@ -0,0 +1,19 @@ +#include <linux/linkage.h> +#include <asm/assembler.h> + +/* + * Function: v6_pabort + * + * Params : r0 = address of aborted instruction + * + * Returns : r0 = address of abort + * : r1 = IFSR + * + * Purpose : obtain information about current prefetch abort. + */ + + .align 5 +ENTRY(v6_pabort) + mrc p15, 0, r1, c5, c0, 1 @ get IFSR + mov pc, lr +ENDPROC(v6_pabort) diff --git a/arch/arm/mm/pabort-v7.S b/arch/arm/mm/pabort-v7.S new file mode 100644 index 00000000000..a8b3b300a18 --- /dev/null +++ b/arch/arm/mm/pabort-v7.S @@ -0,0 +1,20 @@ +#include <linux/linkage.h> +#include <asm/assembler.h> + +/* + * Function: v6_pabort + * + * Params : r0 = address of aborted instruction + * + * Returns : r0 = address of abort + * : r1 = IFSR + * + * Purpose : obtain information about current prefetch abort. + */ + + .align 5 +ENTRY(v7_pabort) + mrc p15, 0, r0, c6, c0, 2 @ get IFAR + mrc p15, 0, r1, c5, c0, 1 @ get IFSR + mov pc, lr +ENDPROC(v7_pabort) diff --git a/arch/arm/mm/proc-arm1020.S b/arch/arm/mm/proc-arm1020.S index b5551bf010a..d9fb4b98c49 100644 --- a/arch/arm/mm/proc-arm1020.S +++ b/arch/arm/mm/proc-arm1020.S @@ -449,7 +449,7 @@ arm1020_crval: .type arm1020_processor_functions, #object arm1020_processor_functions: .word v4t_early_abort - .word pabort_noifar + .word legacy_pabort .word cpu_arm1020_proc_init .word cpu_arm1020_proc_fin .word cpu_arm1020_reset diff --git a/arch/arm/mm/proc-arm1020e.S b/arch/arm/mm/proc-arm1020e.S index 8bc6740c29e..7453b75dcea 100644 --- a/arch/arm/mm/proc-arm1020e.S +++ b/arch/arm/mm/proc-arm1020e.S @@ -430,7 +430,7 @@ arm1020e_crval: .type arm1020e_processor_functions, #object arm1020e_processor_functions: .word v4t_early_abort - .word pabort_noifar + .word legacy_pabort .word cpu_arm1020e_proc_init .word cpu_arm1020e_proc_fin .word cpu_arm1020e_reset diff --git a/arch/arm/mm/proc-arm1022.S b/arch/arm/mm/proc-arm1022.S index 2cd03e66c0a..8eb72d75a8b 100644 --- a/arch/arm/mm/proc-arm1022.S +++ b/arch/arm/mm/proc-arm1022.S @@ -413,7 +413,7 @@ arm1022_crval: .type arm1022_processor_functions, #object arm1022_processor_functions: .word v4t_early_abort - .word pabort_noifar + .word legacy_pabort .word cpu_arm1022_proc_init .word cpu_arm1022_proc_fin .word cpu_arm1022_reset diff --git a/arch/arm/mm/proc-arm1026.S b/arch/arm/mm/proc-arm1026.S index ad961a897f6..3b59f0d6713 100644 --- a/arch/arm/mm/proc-arm1026.S +++ b/arch/arm/mm/proc-arm1026.S @@ -408,7 +408,7 @@ arm1026_crval: .type arm1026_processor_functions, #object arm1026_processor_functions: .word v5t_early_abort - .word pabort_noifar + .word legacy_pabort .word cpu_arm1026_proc_init .word cpu_arm1026_proc_fin .word cpu_arm1026_reset diff --git a/arch/arm/mm/proc-arm6_7.S b/arch/arm/mm/proc-arm6_7.S index 80d6e1de069..3f9cd3d8f6d 100644 --- a/arch/arm/mm/proc-arm6_7.S +++ b/arch/arm/mm/proc-arm6_7.S @@ -278,7 +278,7 @@ __arm7_setup: mov r0, #0 .type arm6_processor_functions, #object ENTRY(arm6_processor_functions) .word cpu_arm6_data_abort - .word pabort_noifar + .word legacy_pabort .word cpu_arm6_proc_init .word cpu_arm6_proc_fin .word cpu_arm6_reset @@ -295,7 +295,7 @@ ENTRY(arm6_processor_functions) .type arm7_processor_functions, #object ENTRY(arm7_processor_functions) .word cpu_arm7_data_abort - .word pabort_noifar + .word legacy_pabort .word cpu_arm7_proc_init .word cpu_arm7_proc_fin .word cpu_arm7_reset diff --git a/arch/arm/mm/proc-arm720.S b/arch/arm/mm/proc-arm720.S index 85ae18695f1..0b62de24466 100644 --- a/arch/arm/mm/proc-arm720.S +++ b/arch/arm/mm/proc-arm720.S @@ -181,7 +181,7 @@ arm720_crval: .type arm720_processor_functions, #object ENTRY(arm720_processor_functions) .word v4t_late_abort - .word pabort_noifar + .word legacy_pabort .word cpu_arm720_proc_init .word cpu_arm720_proc_fin .word cpu_arm720_reset diff --git a/arch/arm/mm/proc-arm740.S b/arch/arm/mm/proc-arm740.S index 4f95bee63e9..01860cdeb2e 100644 --- a/arch/arm/mm/proc-arm740.S +++ b/arch/arm/mm/proc-arm740.S @@ -126,7 +126,7 @@ __arm740_setup: .type arm740_processor_functions, #object ENTRY(arm740_processor_functions) .word v4t_late_abort - .word pabort_noifar + .word legacy_pabort .word cpu_arm740_proc_init .word cpu_arm740_proc_fin .word cpu_arm740_reset diff --git a/arch/arm/mm/proc-arm7tdmi.S b/arch/arm/mm/proc-arm7tdmi.S index 93e05fa7bed..1201b986382 100644 --- a/arch/arm/mm/proc-arm7tdmi.S +++ b/arch/arm/mm/proc-arm7tdmi.S @@ -64,7 +64,7 @@ __arm7tdmi_setup: .type arm7tdmi_processor_functions, #object ENTRY(arm7tdmi_processor_functions) .word v4t_late_abort - .word pabort_noifar + .word legacy_pabort .word cpu_arm7tdmi_proc_init .word cpu_arm7tdmi_proc_fin .word cpu_arm7tdmi_reset diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S index 914d688394f..2b7c197cc58 100644 --- a/arch/arm/mm/proc-arm920.S +++ b/arch/arm/mm/proc-arm920.S @@ -395,7 +395,7 @@ arm920_crval: .type arm920_processor_functions, #object arm920_processor_functions: .word v4t_early_abort - .word pabort_noifar + .word legacy_pabort .word cpu_arm920_proc_init .word cpu_arm920_proc_fin .word cpu_arm920_reset diff --git a/arch/arm/mm/proc-arm922.S b/arch/arm/mm/proc-arm922.S index 51c9c9859e5..06a1aa4e339 100644 --- a/arch/arm/mm/proc-arm922.S +++ b/arch/arm/mm/proc-arm922.S @@ -399,7 +399,7 @@ arm922_crval: .type arm922_processor_functions, #object arm922_processor_functions: .word v4t_early_abort - .word pabort_noifar + .word legacy_pabort .word cpu_arm922_proc_init .word cpu_arm922_proc_fin .word cpu_arm922_reset diff --git a/arch/arm/mm/proc-arm925.S b/arch/arm/mm/proc-arm925.S index 2724526d89c..cb53435a85a 100644 --- a/arch/arm/mm/proc-arm925.S +++ b/arch/arm/mm/proc-arm925.S @@ -462,7 +462,7 @@ arm925_crval: .type arm925_processor_functions, #object arm925_processor_functions: .word v4t_early_abort - .word pabort_noifar + .word legacy_pabort .word cpu_arm925_proc_init .word cpu_arm925_proc_fin .word cpu_arm925_reset diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S index 54466937bff..1c4848704bb 100644 --- a/arch/arm/mm/proc-arm926.S +++ b/arch/arm/mm/proc-arm926.S @@ -415,7 +415,7 @@ arm926_crval: .type arm926_processor_functions, #object arm926_processor_functions: .word v5tj_early_abort - .word pabort_noifar + .word legacy_pabort .word cpu_arm926_proc_init .word cpu_arm926_proc_fin .word cpu_arm926_reset diff --git a/arch/arm/mm/proc-arm940.S b/arch/arm/mm/proc-arm940.S index f595117caf5..5b0f8464c8f 100644 --- a/arch/arm/mm/proc-arm940.S +++ b/arch/arm/mm/proc-arm940.S @@ -322,7 +322,7 @@ __arm940_setup: .type arm940_processor_functions, #object ENTRY(arm940_processor_functions) .word nommu_early_abort - .word pabort_noifar + .word legacy_pabort .word cpu_arm940_proc_init .word cpu_arm940_proc_fin .word cpu_arm940_reset diff --git a/arch/arm/mm/proc-arm946.S b/arch/arm/mm/proc-arm946.S index e03f6ff1fb2..40c0449a139 100644 --- a/arch/arm/mm/proc-arm946.S +++ b/arch/arm/mm/proc-arm946.S @@ -377,7 +377,7 @@ __arm946_setup: .type arm946_processor_functions, #object ENTRY(arm946_processor_functions) .word nommu_early_abort - .word pabort_noifar + .word legacy_pabort .word cpu_arm946_proc_init .word cpu_arm946_proc_fin .word cpu_arm946_reset diff --git a/arch/arm/mm/proc-arm9tdmi.S b/arch/arm/mm/proc-arm9tdmi.S index be6c11d2b3f..28545c29dbc 100644 --- a/arch/arm/mm/proc-arm9tdmi.S +++ b/arch/arm/mm/proc-arm9tdmi.S @@ -64,7 +64,7 @@ __arm9tdmi_setup: .type arm9tdmi_processor_functions, #object ENTRY(arm9tdmi_processor_functions) .word nommu_early_abort - .word pabort_noifar + .word legacy_pabort .word cpu_arm9tdmi_proc_init .word cpu_arm9tdmi_proc_fin .word cpu_arm9tdmi_reset diff --git a/arch/arm/mm/proc-fa526.S b/arch/arm/mm/proc-fa526.S index 08b8a955d5d..08f5ac237ad 100644 --- a/arch/arm/mm/proc-fa526.S +++ b/arch/arm/mm/proc-fa526.S @@ -191,7 +191,7 @@ fa526_cr1_set: .type fa526_processor_functions, #object fa526_processor_functions: .word v4_early_abort - .word pabort_noifar + .word legacy_pabort .word cpu_fa526_proc_init .word cpu_fa526_proc_fin .word cpu_fa526_reset diff --git a/arch/arm/mm/proc-feroceon.S b/arch/arm/mm/proc-feroceon.S index 0fe1f8fc348..d0d7795200f 100644 --- a/arch/arm/mm/proc-feroceon.S +++ b/arch/arm/mm/proc-feroceon.S @@ -499,7 +499,7 @@ feroceon_crval: .type feroceon_processor_functions, #object feroceon_processor_functions: .word v5t_early_abort - .word pabort_noifar + .word legacy_pabort .word cpu_feroceon_proc_init .word cpu_feroceon_proc_fin .word cpu_feroceon_reset diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S index 54b1f721dec..7d63beaf974 100644 --- a/arch/arm/mm/proc-macros.S +++ b/arch/arm/mm/proc-macros.S @@ -77,19 +77,15 @@ * Sanity check the PTE configuration for the code below - which makes * certain assumptions about how these bits are layed out. */ +#ifdef CONFIG_MMU #if L_PTE_SHARED != PTE_EXT_SHARED #error PTE shared bit mismatch #endif -#if L_PTE_BUFFERABLE != PTE_BUFFERABLE -#error PTE bufferable bit mismatch -#endif -#if L_PTE_CACHEABLE != PTE_CACHEABLE -#error PTE cacheable bit mismatch -#endif #if (L_PTE_EXEC+L_PTE_USER+L_PTE_WRITE+L_PTE_DIRTY+L_PTE_YOUNG+\ L_PTE_FILE+L_PTE_PRESENT) > L_PTE_SHARED #error Invalid Linux PTE bit settings #endif +#endif /* CONFIG_MMU */ /* * The ARMv6 and ARMv7 set_pte_ext translation function. diff --git a/arch/arm/mm/proc-mohawk.S b/arch/arm/mm/proc-mohawk.S index 540f5078496..52b5fd74fbb 100644 --- a/arch/arm/mm/proc-mohawk.S +++ b/arch/arm/mm/proc-mohawk.S @@ -359,7 +359,7 @@ mohawk_crval: .type mohawk_processor_functions, #object mohawk_processor_functions: .word v5t_early_abort - .word pabort_noifar + .word legacy_pabort .word cpu_mohawk_proc_init .word cpu_mohawk_proc_fin .word cpu_mohawk_reset diff --git a/arch/arm/mm/proc-sa110.S b/arch/arm/mm/proc-sa110.S index 90a7e5279f2..7b706b38990 100644 --- a/arch/arm/mm/proc-sa110.S +++ b/arch/arm/mm/proc-sa110.S @@ -199,7 +199,7 @@ sa110_crval: .type sa110_processor_functions, #object ENTRY(sa110_processor_functions) .word v4_early_abort - .word pabort_noifar + .word legacy_pabort .word cpu_sa110_proc_init .word cpu_sa110_proc_fin .word cpu_sa110_reset diff --git a/arch/arm/mm/proc-sa1100.S b/arch/arm/mm/proc-sa1100.S index 451e2d953e2..ee7700242c1 100644 --- a/arch/arm/mm/proc-sa1100.S +++ b/arch/arm/mm/proc-sa1100.S @@ -214,7 +214,7 @@ sa1100_crval: .type sa1100_processor_functions, #object ENTRY(sa1100_processor_functions) .word v4_early_abort - .word pabort_noifar + .word legacy_pabort .word cpu_sa1100_proc_init .word cpu_sa1100_proc_fin .word cpu_sa1100_reset diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S index 524ddae9259..70f75d2e3ea 100644 --- a/arch/arm/mm/proc-v6.S +++ b/arch/arm/mm/proc-v6.S @@ -32,8 +32,10 @@ #ifndef CONFIG_SMP #define TTB_FLAGS TTB_RGN_WBWA +#define PMD_FLAGS PMD_SECT_WB #else #define TTB_FLAGS TTB_RGN_WBWA|TTB_S +#define PMD_FLAGS PMD_SECT_WBWA|PMD_SECT_S #endif ENTRY(cpu_v6_proc_init) @@ -191,7 +193,7 @@ v6_crval: .type v6_processor_functions, #object ENTRY(v6_processor_functions) .word v6_early_abort - .word pabort_noifar + .word v6_pabort .word cpu_v6_proc_init .word cpu_v6_proc_fin .word cpu_v6_reset @@ -222,10 +224,9 @@ __v6_proc_info: .long 0x0007b000 .long 0x0007f000 .long PMD_TYPE_SECT | \ - PMD_SECT_BUFFERABLE | \ - PMD_SECT_CACHEABLE | \ PMD_SECT_AP_WRITE | \ - PMD_SECT_AP_READ + PMD_SECT_AP_READ | \ + PMD_FLAGS .long PMD_TYPE_SECT | \ PMD_SECT_XN | \ PMD_SECT_AP_WRITE | \ diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index 180a08d03a0..3a285218fd1 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S @@ -33,9 +33,11 @@ #ifndef CONFIG_SMP /* PTWs cacheable, inner WB not shareable, outer WB not shareable */ #define TTB_FLAGS TTB_IRGN_WB|TTB_RGN_OC_WB +#define PMD_FLAGS PMD_SECT_WB #else /* PTWs cacheable, inner WBWA shareable, outer WBWA not shareable */ #define TTB_FLAGS TTB_IRGN_WBWA|TTB_S|TTB_NOS|TTB_RGN_OC_WBWA +#define PMD_FLAGS PMD_SECT_WBWA|PMD_SECT_S #endif ENTRY(cpu_v7_proc_init) @@ -127,7 +129,9 @@ ENDPROC(cpu_v7_switch_mm) */ ENTRY(cpu_v7_set_pte_ext) #ifdef CONFIG_MMU - str r1, [r0], #-2048 @ linux version + ARM( str r1, [r0], #-2048 ) @ linux version + THUMB( str r1, [r0] ) @ linux version + THUMB( sub r0, r0, #2048 ) bic r3, r1, #0x000003f0 bic r3, r3, #PTE_TYPE_MASK @@ -182,9 +186,10 @@ cpu_v7_name: */ __v7_setup: #ifdef CONFIG_SMP - mrc p15, 0, r0, c1, c0, 1 @ Enable SMP/nAMP mode and - orr r0, r0, #(1 << 6) | (1 << 0) @ TLB ops broadcasting - mcr p15, 0, r0, c1, c0, 1 + mrc p15, 0, r0, c1, c0, 1 + tst r0, #(1 << 6) @ SMP/nAMP mode enabled? + orreq r0, r0, #(1 << 6) | (1 << 0) @ Enable SMP/nAMP mode and + mcreq p15, 0, r0, c1, c0, 1 @ TLB ops broadcasting #endif adr r12, __v7_setup_stack @ the local stack stmia r12, {r0-r5, r7, r9, r11, lr} @@ -232,7 +237,6 @@ __v7_setup: mcr p15, 0, r4, c2, c0, 1 @ load TTB1 mov r10, #0x1f @ domains 0, 1 = manager mcr p15, 0, r10, c3, c0, 0 @ load domain access register -#endif /* * Memory region attributes with SCTLR.TRE=1 * @@ -265,6 +269,7 @@ __v7_setup: ldr r6, =0x40e040e0 @ NMRR mcr p15, 0, r5, c10, c2, 0 @ write PRRR mcr p15, 0, r6, c10, c2, 1 @ write NMRR +#endif adr r5, v7_crval ldmia r5, {r5, r6} #ifdef CONFIG_CPU_ENDIAN_BE8 @@ -273,6 +278,7 @@ __v7_setup: mrc p15, 0, r0, c1, c0, 0 @ read control register bic r0, r0, r5 @ clear bits them orr r0, r0, r6 @ set them + THUMB( orr r0, r0, #1 << 30 ) @ Thumb exceptions mov pc, lr @ return to head.S:__ret ENDPROC(__v7_setup) @@ -292,7 +298,7 @@ __v7_setup_stack: .type v7_processor_functions, #object ENTRY(v7_processor_functions) .word v7_early_abort - .word pabort_ifar + .word v7_pabort .word cpu_v7_proc_init .word cpu_v7_proc_fin .word cpu_v7_reset @@ -323,10 +329,9 @@ __v7_proc_info: .long 0x000f0000 @ Required ID value .long 0x000f0000 @ Mask for ID .long PMD_TYPE_SECT | \ - PMD_SECT_BUFFERABLE | \ - PMD_SECT_CACHEABLE | \ PMD_SECT_AP_WRITE | \ - PMD_SECT_AP_READ + PMD_SECT_AP_READ | \ + PMD_FLAGS .long PMD_TYPE_SECT | \ PMD_SECT_XN | \ PMD_SECT_AP_WRITE | \ diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S index 33515c214b9..2028f370288 100644 --- a/arch/arm/mm/proc-xsc3.S +++ b/arch/arm/mm/proc-xsc3.S @@ -428,7 +428,7 @@ xsc3_crval: .type xsc3_processor_functions, #object ENTRY(xsc3_processor_functions) .word v5t_early_abort - .word pabort_noifar + .word legacy_pabort .word cpu_xsc3_proc_init .word cpu_xsc3_proc_fin .word cpu_xsc3_reset diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S index 0cce37b9393..f056c283682 100644 --- a/arch/arm/mm/proc-xscale.S +++ b/arch/arm/mm/proc-xscale.S @@ -17,7 +17,7 @@ * * 2001 Sep 08: * Completely revisited, many important fixes - * Nicolas Pitre <nico@cam.org> + * Nicolas Pitre <nico@fluxnic.net> */ #include <linux/linkage.h> @@ -511,7 +511,7 @@ xscale_crval: .type xscale_processor_functions, #object ENTRY(xscale_processor_functions) .word v5t_early_abort - .word pabort_noifar + .word legacy_pabort .word cpu_xscale_proc_init .word cpu_xscale_proc_fin .word cpu_xscale_reset |