aboutsummaryrefslogtreecommitdiff
path: root/arch/arm/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/Kconfig5
-rw-r--r--arch/arm/mm/cache-v6.S20
-rw-r--r--arch/arm/mm/cache-v7.S19
-rw-r--r--arch/arm/mm/context.c5
-rw-r--r--arch/arm/mm/dma-mapping.c4
-rw-r--r--arch/arm/mm/fault-armv.c9
-rw-r--r--arch/arm/mm/fault.c5
-rw-r--r--arch/arm/mm/flush.c31
-rw-r--r--arch/arm/mm/highmem.c2
-rw-r--r--arch/arm/mm/init.c22
-rw-r--r--arch/arm/mm/mmu.c7
-rw-r--r--arch/arm/mm/proc-v6.S7
-rw-r--r--arch/arm/mm/proc-v7.S14
13 files changed, 94 insertions, 56 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index e993140edd8..9264d814cd7 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -122,10 +122,7 @@ config CPU_ARM920T
select CPU_TLB_V4WBI if MMU
help
The ARM920T is licensed to be produced by numerous vendors,
- and is used in the Maverick EP9312 and the Samsung S3C2410.
-
- More information on the Maverick EP9312 at
- <http://linuxdevices.com/products/PD2382866068.html>.
+ and is used in the Cirrus EP93xx and the Samsung S3C2410.
Say Y if you want support for the ARM920T processor.
Otherwise, say N.
diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S
index 8f5c13f4c93..295e25dd638 100644
--- a/arch/arm/mm/cache-v6.S
+++ b/arch/arm/mm/cache-v6.S
@@ -12,6 +12,7 @@
#include <linux/linkage.h>
#include <linux/init.h>
#include <asm/assembler.h>
+#include <asm/unwind.h>
#include "proc-macros.S"
@@ -121,11 +122,13 @@ ENTRY(v6_coherent_kern_range)
* - the Icache does not read data from the write buffer
*/
ENTRY(v6_coherent_user_range)
-
+ UNWIND(.fnstart )
#ifdef HARVARD_CACHE
bic r0, r0, #CACHE_LINE_SIZE - 1
-1: mcr p15, 0, r0, c7, c10, 1 @ clean D line
+1:
+ USER( mcr p15, 0, r0, c7, c10, 1 ) @ clean D line
add r0, r0, #CACHE_LINE_SIZE
+2:
cmp r0, r1
blo 1b
#endif
@@ -143,6 +146,19 @@ ENTRY(v6_coherent_user_range)
mov pc, lr
/*
+ * Fault handling for the cache operation above. If the virtual address in r0
+ * isn't mapped, just try the next page.
+ */
+9001:
+ mov r0, r0, lsr #12
+ mov r0, r0, lsl #12
+ add r0, r0, #4096
+ b 2b
+ UNWIND(.fnend )
+ENDPROC(v6_coherent_user_range)
+ENDPROC(v6_coherent_kern_range)
+
+/*
* v6_flush_kern_dcache_page(kaddr)
*
* Ensure that the data held in the page kaddr is written back
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
index bda0ec31a4e..e1bd9759617 100644
--- a/arch/arm/mm/cache-v7.S
+++ b/arch/arm/mm/cache-v7.S
@@ -13,6 +13,7 @@
#include <linux/linkage.h>
#include <linux/init.h>
#include <asm/assembler.h>
+#include <asm/unwind.h>
#include "proc-macros.S"
@@ -153,13 +154,16 @@ ENTRY(v7_coherent_kern_range)
* - the Icache does not read data from the write buffer
*/
ENTRY(v7_coherent_user_range)
+ UNWIND(.fnstart )
dcache_line_size r2, r3
sub r3, r2, #1
bic r0, r0, r3
-1: mcr p15, 0, r0, c7, c11, 1 @ clean D line to the point of unification
+1:
+ USER( mcr p15, 0, r0, c7, c11, 1 ) @ clean D line to the point of unification
dsb
- mcr p15, 0, r0, c7, c5, 1 @ invalidate I line
+ USER( mcr p15, 0, r0, c7, c5, 1 ) @ invalidate I line
add r0, r0, r2
+2:
cmp r0, r1
blo 1b
mov r0, #0
@@ -167,6 +171,17 @@ ENTRY(v7_coherent_user_range)
dsb
isb
mov pc, lr
+
+/*
+ * Fault handling for the cache operation above. If the virtual address in r0
+ * isn't mapped, just try the next page.
+ */
+9001:
+ mov r0, r0, lsr #12
+ mov r0, r0, lsl #12
+ add r0, r0, #4096
+ b 2b
+ UNWIND(.fnend )
ENDPROC(v7_coherent_kern_range)
ENDPROC(v7_coherent_user_range)
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index 6bda76a4319..a9e22e31eaa 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -50,10 +50,7 @@ void __new_context(struct mm_struct *mm)
isb();
flush_tlb_all();
if (icache_is_vivt_asid_tagged()) {
- asm("mcr p15, 0, %0, c7, c5, 0 @ invalidate I-cache\n"
- "mcr p15, 0, %0, c7, c5, 6 @ flush BTAC/BTB\n"
- :
- : "r" (0));
+ __flush_icache_all();
dsb();
}
}
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index b30925fcbcd..b9590a7085c 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -205,7 +205,7 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
order = get_order(size);
- if (mask != 0xffffffff)
+ if (mask < 0xffffffffULL)
gfp |= GFP_DMA;
page = alloc_pages(gfp, order);
@@ -289,7 +289,7 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
if (!mask)
goto error;
- if (mask != 0xffffffff)
+ if (mask < 0xffffffffULL)
gfp |= GFP_DMA;
virt = kmalloc(size, gfp);
if (!virt)
diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c
index bc0099d5ae8..d0d17b6a370 100644
--- a/arch/arm/mm/fault-armv.c
+++ b/arch/arm/mm/fault-armv.c
@@ -153,14 +153,11 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
page = pfn_to_page(pfn);
mapping = page_mapping(page);
- if (mapping) {
#ifndef CONFIG_SMP
- int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags);
-
- if (dirty)
- __flush_dcache_page(mapping, page);
+ if (test_and_clear_bit(PG_dcache_dirty, &page->flags))
+ __flush_dcache_page(mapping, page);
#endif
-
+ if (mapping) {
if (cache_is_vivt())
make_coherent(mapping, vma, addr, pfn);
else if (vma->vm_flags & VM_EXEC)
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index ae0e25f5a70..10e06801afb 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -292,6 +292,11 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
* down_read()
*/
might_sleep();
+#ifdef CONFIG_DEBUG_VM
+ if (!user_mode(regs) &&
+ !search_exception_tables(regs->ARM_pc))
+ goto no_context;
+#endif
}
fault = __do_page_fault(mm, addr, fsr, tsk);
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index b27942909b2..7f294f307c8 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -18,10 +18,6 @@
#include "mm.h"
-#ifdef CONFIG_ARM_ERRATA_411920
-extern void v6_icache_inval_all(void);
-#endif
-
#ifdef CONFIG_CPU_CACHE_VIPT
#define ALIAS_FLUSH_START 0xffff4000
@@ -35,16 +31,11 @@ static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
flush_tlb_kernel_page(to);
asm( "mcrr p15, 0, %1, %0, c14\n"
- " mcr p15, 0, %2, c7, c10, 4\n"
-#ifndef CONFIG_ARM_ERRATA_411920
- " mcr p15, 0, %2, c7, c5, 0\n"
-#endif
+ " mcr p15, 0, %2, c7, c10, 4"
:
: "r" (to), "r" (to + PAGE_SIZE - L1_CACHE_BYTES), "r" (zero)
: "cc");
-#ifdef CONFIG_ARM_ERRATA_411920
- v6_icache_inval_all();
-#endif
+ __flush_icache_all();
}
void flush_cache_mm(struct mm_struct *mm)
@@ -57,16 +48,11 @@ void flush_cache_mm(struct mm_struct *mm)
if (cache_is_vipt_aliasing()) {
asm( "mcr p15, 0, %0, c7, c14, 0\n"
- " mcr p15, 0, %0, c7, c10, 4\n"
-#ifndef CONFIG_ARM_ERRATA_411920
- " mcr p15, 0, %0, c7, c5, 0\n"
-#endif
+ " mcr p15, 0, %0, c7, c10, 4"
:
: "r" (0)
: "cc");
-#ifdef CONFIG_ARM_ERRATA_411920
- v6_icache_inval_all();
-#endif
+ __flush_icache_all();
}
}
@@ -81,16 +67,11 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned
if (cache_is_vipt_aliasing()) {
asm( "mcr p15, 0, %0, c7, c14, 0\n"
- " mcr p15, 0, %0, c7, c10, 4\n"
-#ifndef CONFIG_ARM_ERRATA_411920
- " mcr p15, 0, %0, c7, c5, 0\n"
-#endif
+ " mcr p15, 0, %0, c7, c10, 4"
:
: "r" (0)
: "cc");
-#ifdef CONFIG_ARM_ERRATA_411920
- v6_icache_inval_all();
-#endif
+ __flush_icache_all();
}
}
diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c
index 73cae57fa70..30f82fb5918 100644
--- a/arch/arm/mm/highmem.c
+++ b/arch/arm/mm/highmem.c
@@ -46,6 +46,8 @@ void *kmap_atomic(struct page *page, enum km_type type)
if (!PageHighMem(page))
return page_address(page);
+ debug_kmap_atomic(type);
+
kmap = kmap_high_get(page);
if (kmap)
return kmap;
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 877c492f8e1..52c40d15567 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -273,7 +273,6 @@ static void __init bootmem_init_node(int node, struct meminfo *mi,
struct membank *bank = &mi->bank[i];
if (!bank->highmem)
free_bootmem_node(pgdat, bank_phys_start(bank), bank_phys_size(bank));
- memory_present(node, bank_pfn_start(bank), bank_pfn_end(bank));
}
/*
@@ -370,6 +369,19 @@ int pfn_valid(unsigned long pfn)
return 0;
}
EXPORT_SYMBOL(pfn_valid);
+
+static void arm_memory_present(struct meminfo *mi, int node)
+{
+}
+#else
+static void arm_memory_present(struct meminfo *mi, int node)
+{
+ int i;
+ for_each_nodebank(i, mi, node) {
+ struct membank *bank = &mi->bank[i];
+ memory_present(node, bank_pfn_start(bank), bank_pfn_end(bank));
+ }
+}
#endif
static int __init meminfo_cmp(const void *_a, const void *_b)
@@ -427,6 +439,12 @@ void __init bootmem_init(void)
*/
if (node == initrd_node)
bootmem_reserve_initrd(node);
+
+ /*
+ * Sparsemem tries to allocate bootmem in memory_present(),
+ * so must be done after the fixed reservations
+ */
+ arm_memory_present(mi, node);
}
/*
@@ -483,7 +501,7 @@ free_memmap(int node, unsigned long start_pfn, unsigned long end_pfn)
/*
* Convert start_pfn/end_pfn to a struct page pointer.
*/
- start_pg = pfn_to_page(start_pfn);
+ start_pg = pfn_to_page(start_pfn - 1) + 1;
end_pg = pfn_to_page(end_pfn);
/*
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 02243eeccf5..ea67be0223a 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -117,6 +117,13 @@ static void __init early_cachepolicy(char **p)
}
if (i == ARRAY_SIZE(cache_policies))
printk(KERN_ERR "ERROR: unknown or unsupported cache policy\n");
+ /*
+ * This restriction is partly to do with the way we boot; it is
+ * unpredictable to have memory mapped using two different sets of
+ * memory attributes (shared, type, and cache attribs). We can not
+ * change these attributes once the initial assembly has setup the
+ * page tables.
+ */
if (cpu_architecture() >= CPU_ARCH_ARMv6) {
printk(KERN_WARNING "Only cachepolicy=writeback supported on ARMv6 and later\n");
cachepolicy = CPOLICY_WRITEBACK;
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S
index 194737d60a2..70f75d2e3ea 100644
--- a/arch/arm/mm/proc-v6.S
+++ b/arch/arm/mm/proc-v6.S
@@ -32,8 +32,10 @@
#ifndef CONFIG_SMP
#define TTB_FLAGS TTB_RGN_WBWA
+#define PMD_FLAGS PMD_SECT_WB
#else
#define TTB_FLAGS TTB_RGN_WBWA|TTB_S
+#define PMD_FLAGS PMD_SECT_WBWA|PMD_SECT_S
#endif
ENTRY(cpu_v6_proc_init)
@@ -222,10 +224,9 @@ __v6_proc_info:
.long 0x0007b000
.long 0x0007f000
.long PMD_TYPE_SECT | \
- PMD_SECT_BUFFERABLE | \
- PMD_SECT_CACHEABLE | \
PMD_SECT_AP_WRITE | \
- PMD_SECT_AP_READ
+ PMD_SECT_AP_READ | \
+ PMD_FLAGS
.long PMD_TYPE_SECT | \
PMD_SECT_XN | \
PMD_SECT_AP_WRITE | \
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 23ebcf6eab9..3a285218fd1 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -33,9 +33,11 @@
#ifndef CONFIG_SMP
/* PTWs cacheable, inner WB not shareable, outer WB not shareable */
#define TTB_FLAGS TTB_IRGN_WB|TTB_RGN_OC_WB
+#define PMD_FLAGS PMD_SECT_WB
#else
/* PTWs cacheable, inner WBWA shareable, outer WBWA not shareable */
#define TTB_FLAGS TTB_IRGN_WBWA|TTB_S|TTB_NOS|TTB_RGN_OC_WBWA
+#define PMD_FLAGS PMD_SECT_WBWA|PMD_SECT_S
#endif
ENTRY(cpu_v7_proc_init)
@@ -184,9 +186,10 @@ cpu_v7_name:
*/
__v7_setup:
#ifdef CONFIG_SMP
- mrc p15, 0, r0, c1, c0, 1 @ Enable SMP/nAMP mode and
- orr r0, r0, #(1 << 6) | (1 << 0) @ TLB ops broadcasting
- mcr p15, 0, r0, c1, c0, 1
+ mrc p15, 0, r0, c1, c0, 1
+ tst r0, #(1 << 6) @ SMP/nAMP mode enabled?
+ orreq r0, r0, #(1 << 6) | (1 << 0) @ Enable SMP/nAMP mode and
+ mcreq p15, 0, r0, c1, c0, 1 @ TLB ops broadcasting
#endif
adr r12, __v7_setup_stack @ the local stack
stmia r12, {r0-r5, r7, r9, r11, lr}
@@ -326,10 +329,9 @@ __v7_proc_info:
.long 0x000f0000 @ Required ID value
.long 0x000f0000 @ Mask for ID
.long PMD_TYPE_SECT | \
- PMD_SECT_BUFFERABLE | \
- PMD_SECT_CACHEABLE | \
PMD_SECT_AP_WRITE | \
- PMD_SECT_AP_READ
+ PMD_SECT_AP_READ | \
+ PMD_FLAGS
.long PMD_TYPE_SECT | \
PMD_SECT_XN | \
PMD_SECT_AP_WRITE | \