aboutsummaryrefslogtreecommitdiff
path: root/arch/arm/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/Makefile2
-rw-r--r--arch/arm/mm/abort-ev7.S1
-rw-r--r--arch/arm/mm/abort-nommu.S1
-rw-r--r--arch/arm/mm/alignment.c2
-rw-r--r--arch/arm/mm/cache-l2x0.c2
-rw-r--r--arch/arm/mm/cache-v7.S10
-rw-r--r--arch/arm/mm/cache-xsc3l2.c3
-rw-r--r--arch/arm/mm/copypage-v6.c1
-rw-r--r--arch/arm/mm/dma-mapping.c (renamed from arch/arm/mm/consistent.c)104
-rw-r--r--arch/arm/mm/extable.c2
-rw-r--r--arch/arm/mm/fault-armv.c2
-rw-r--r--arch/arm/mm/fault.c7
-rw-r--r--arch/arm/mm/flush.c1
-rw-r--r--arch/arm/mm/init.c193
-rw-r--r--arch/arm/mm/iomap.c3
-rw-r--r--arch/arm/mm/ioremap.c12
-rw-r--r--arch/arm/mm/mm.h2
-rw-r--r--arch/arm/mm/mmap.c6
-rw-r--r--arch/arm/mm/mmu.c34
-rw-r--r--arch/arm/mm/nommu.c18
-rw-r--r--arch/arm/mm/proc-v7.S10
-rw-r--r--arch/arm/mm/tlb-v7.S2
22 files changed, 316 insertions, 102 deletions
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile
index 2e27a8c8372..480f78a3611 100644
--- a/arch/arm/mm/Makefile
+++ b/arch/arm/mm/Makefile
@@ -2,7 +2,7 @@
# Makefile for the linux arm-specific parts of the memory manager.
#
-obj-y := consistent.o extable.o fault.o init.o \
+obj-y := dma-mapping.o extable.o fault.o init.o \
iomap.o
obj-$(CONFIG_MMU) += fault-armv.o flush.o ioremap.o mmap.o \
diff --git a/arch/arm/mm/abort-ev7.S b/arch/arm/mm/abort-ev7.S
index eb90bce38e1..2e6dc040c65 100644
--- a/arch/arm/mm/abort-ev7.S
+++ b/arch/arm/mm/abort-ev7.S
@@ -30,3 +30,4 @@ ENTRY(v7_early_abort)
* New designs should not need to patch up faults.
*/
mov pc, lr
+ENDPROC(v7_early_abort)
diff --git a/arch/arm/mm/abort-nommu.S b/arch/arm/mm/abort-nommu.S
index a7cc7f9ee45..625e580945b 100644
--- a/arch/arm/mm/abort-nommu.S
+++ b/arch/arm/mm/abort-nommu.S
@@ -17,3 +17,4 @@ ENTRY(nommu_early_abort)
mov r0, #0 @ clear r0, r1 (no FSR/FAR)
mov r1, #0
mov pc, lr
+ENDPROC(nommu_early_abort)
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
index e162cca5917..133e65d166b 100644
--- a/arch/arm/mm/alignment.c
+++ b/arch/arm/mm/alignment.c
@@ -17,8 +17,8 @@
#include <linux/string.h>
#include <linux/proc_fs.h>
#include <linux/init.h>
+#include <linux/uaccess.h>
-#include <asm/uaccess.h>
#include <asm/unaligned.h>
#include "fault.h"
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index 76b800a9519..b480f1d3591 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -18,9 +18,9 @@
*/
#include <linux/init.h>
#include <linux/spinlock.h>
+#include <linux/io.h>
#include <asm/cacheflush.h>
-#include <asm/io.h>
#include <asm/hardware/cache-l2x0.h>
#define CACHE_LINE_SIZE 32
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
index 35ffc4d9599..d19c2bec2b1 100644
--- a/arch/arm/mm/cache-v7.S
+++ b/arch/arm/mm/cache-v7.S
@@ -66,6 +66,7 @@ finished:
mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
isb
mov pc, lr
+ENDPROC(v7_flush_dcache_all)
/*
* v7_flush_cache_all()
@@ -85,6 +86,7 @@ ENTRY(v7_flush_kern_cache_all)
mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate
ldmfd sp!, {r4-r5, r7, r9-r11, lr}
mov pc, lr
+ENDPROC(v7_flush_kern_cache_all)
/*
* v7_flush_cache_all()
@@ -110,6 +112,8 @@ ENTRY(v7_flush_user_cache_all)
*/
ENTRY(v7_flush_user_cache_range)
mov pc, lr
+ENDPROC(v7_flush_user_cache_all)
+ENDPROC(v7_flush_user_cache_range)
/*
* v7_coherent_kern_range(start,end)
@@ -155,6 +159,8 @@ ENTRY(v7_coherent_user_range)
dsb
isb
mov pc, lr
+ENDPROC(v7_coherent_kern_range)
+ENDPROC(v7_coherent_user_range)
/*
* v7_flush_kern_dcache_page(kaddr)
@@ -174,6 +180,7 @@ ENTRY(v7_flush_kern_dcache_page)
blo 1b
dsb
mov pc, lr
+ENDPROC(v7_flush_kern_dcache_page)
/*
* v7_dma_inv_range(start,end)
@@ -202,6 +209,7 @@ ENTRY(v7_dma_inv_range)
blo 1b
dsb
mov pc, lr
+ENDPROC(v7_dma_inv_range)
/*
* v7_dma_clean_range(start,end)
@@ -219,6 +227,7 @@ ENTRY(v7_dma_clean_range)
blo 1b
dsb
mov pc, lr
+ENDPROC(v7_dma_clean_range)
/*
* v7_dma_flush_range(start,end)
@@ -236,6 +245,7 @@ ENTRY(v7_dma_flush_range)
blo 1b
dsb
mov pc, lr
+ENDPROC(v7_dma_flush_range)
__INITDATA
diff --git a/arch/arm/mm/cache-xsc3l2.c b/arch/arm/mm/cache-xsc3l2.c
index 158bd96763d..10b1bae1a25 100644
--- a/arch/arm/mm/cache-xsc3l2.c
+++ b/arch/arm/mm/cache-xsc3l2.c
@@ -18,10 +18,11 @@
*/
#include <linux/init.h>
#include <linux/spinlock.h>
+#include <linux/io.h>
#include <asm/system.h>
+#include <asm/cputype.h>
#include <asm/cacheflush.h>
-#include <asm/io.h>
#define CR_L2 (1 << 26)
diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c
index 3adb79257f4..0e21c076758 100644
--- a/arch/arm/mm/copypage-v6.c
+++ b/arch/arm/mm/copypage-v6.c
@@ -16,6 +16,7 @@
#include <asm/shmparam.h>
#include <asm/tlbflush.h>
#include <asm/cacheflush.h>
+#include <asm/cachetype.h>
#include "mm.h"
diff --git a/arch/arm/mm/consistent.c b/arch/arm/mm/dma-mapping.c
index db7b3e38ef1..67960017dc8 100644
--- a/arch/arm/mm/consistent.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -1,5 +1,5 @@
/*
- * linux/arch/arm/mm/consistent.c
+ * linux/arch/arm/mm/dma-mapping.c
*
* Copyright (C) 2000-2004 Russell King
*
@@ -512,3 +512,105 @@ void dma_cache_maint(const void *start, size_t size, int direction)
}
}
EXPORT_SYMBOL(dma_cache_maint);
+
+/**
+ * dma_map_sg - map a set of SG buffers for streaming mode DMA
+ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+ * @sg: list of buffers
+ * @nents: number of buffers to map
+ * @dir: DMA transfer direction
+ *
+ * Map a set of buffers described by scatterlist in streaming mode for DMA.
+ * This is the scatter-gather version of the dma_map_single interface.
+ * Here the scatter gather list elements are each tagged with the
+ * appropriate dma address and length. They are obtained via
+ * sg_dma_{address,length}.
+ *
+ * Device ownership issues as mentioned for dma_map_single are the same
+ * here.
+ */
+int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction dir)
+{
+ struct scatterlist *s;
+ int i, j;
+
+ for_each_sg(sg, s, nents, i) {
+ s->dma_address = dma_map_page(dev, sg_page(s), s->offset,
+ s->length, dir);
+ if (dma_mapping_error(dev, s->dma_address))
+ goto bad_mapping;
+ }
+ return nents;
+
+ bad_mapping:
+ for_each_sg(sg, s, i, j)
+ dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir);
+ return 0;
+}
+EXPORT_SYMBOL(dma_map_sg);
+
+/**
+ * dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
+ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+ * @sg: list of buffers
+ * @nents: number of buffers to unmap (returned from dma_map_sg)
+ * @dir: DMA transfer direction (same as was passed to dma_map_sg)
+ *
+ * Unmap a set of streaming mode DMA translations. Again, CPU access
+ * rules concerning calls here are the same as for dma_unmap_single().
+ */
+void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction dir)
+{
+ struct scatterlist *s;
+ int i;
+
+ for_each_sg(sg, s, nents, i)
+ dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir);
+}
+EXPORT_SYMBOL(dma_unmap_sg);
+
+/**
+ * dma_sync_sg_for_cpu
+ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+ * @sg: list of buffers
+ * @nents: number of buffers to map (returned from dma_map_sg)
+ * @dir: DMA transfer direction (same as was passed to dma_map_sg)
+ */
+void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir)
+{
+ struct scatterlist *s;
+ int i;
+
+ for_each_sg(sg, s, nents, i) {
+ dmabounce_sync_for_cpu(dev, sg_dma_address(s), 0,
+ sg_dma_len(s), dir);
+ }
+}
+EXPORT_SYMBOL(dma_sync_sg_for_cpu);
+
+/**
+ * dma_sync_sg_for_device
+ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+ * @sg: list of buffers
+ * @nents: number of buffers to map (returned from dma_map_sg)
+ * @dir: DMA transfer direction (same as was passed to dma_map_sg)
+ */
+void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir)
+{
+ struct scatterlist *s;
+ int i;
+
+ for_each_sg(sg, s, nents, i) {
+ if (!dmabounce_sync_for_device(dev, sg_dma_address(s), 0,
+ sg_dma_len(s), dir))
+ continue;
+
+ if (!arch_is_coherent())
+ dma_cache_maint(sg_virt(s), s->length, dir);
+ }
+}
+EXPORT_SYMBOL(dma_sync_sg_for_device);
diff --git a/arch/arm/mm/extable.c b/arch/arm/mm/extable.c
index 9592c3ee4cb..9d285626bc7 100644
--- a/arch/arm/mm/extable.c
+++ b/arch/arm/mm/extable.c
@@ -2,7 +2,7 @@
* linux/arch/arm/mm/extable.c
*/
#include <linux/module.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
int fixup_exception(struct pt_regs *regs)
{
diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c
index a8ec97b4752..af6ed6ef9a8 100644
--- a/arch/arm/mm/fault-armv.c
+++ b/arch/arm/mm/fault-armv.c
@@ -17,7 +17,9 @@
#include <linux/init.h>
#include <linux/pagemap.h>
+#include <asm/bugs.h>
#include <asm/cacheflush.h>
+#include <asm/cachetype.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index 28ad7ab1c0c..2df8d9facf5 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -13,11 +13,11 @@
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/kprobes.h>
+#include <linux/uaccess.h>
#include <asm/system.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
-#include <asm/uaccess.h>
#include "fault.h"
@@ -72,9 +72,8 @@ void show_pte(struct mm_struct *mm, unsigned long addr)
}
pmd = pmd_offset(pgd, addr);
-#if PTRS_PER_PMD != 1
- printk(", *pmd=%08lx", pmd_val(*pmd));
-#endif
+ if (PTRS_PER_PMD != 1)
+ printk(", *pmd=%08lx", pmd_val(*pmd));
if (pmd_none(*pmd))
break;
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index 029ee65fda2..0fa9bf388f0 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -12,6 +12,7 @@
#include <linux/pagemap.h>
#include <asm/cacheflush.h>
+#include <asm/cachetype.h>
#include <asm/system.h>
#include <asm/tlbflush.h>
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 30a69d67d67..82c4b421798 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -26,9 +26,42 @@
#include "mm.h"
-extern void _text, _etext, __data_start, _end, __init_begin, __init_end;
-extern unsigned long phys_initrd_start;
-extern unsigned long phys_initrd_size;
+static unsigned long phys_initrd_start __initdata = 0;
+static unsigned long phys_initrd_size __initdata = 0;
+
+static void __init early_initrd(char **p)
+{
+ unsigned long start, size;
+
+ start = memparse(*p, p);
+ if (**p == ',') {
+ size = memparse((*p) + 1, p);
+
+ phys_initrd_start = start;
+ phys_initrd_size = size;
+ }
+}
+__early_param("initrd=", early_initrd);
+
+static int __init parse_tag_initrd(const struct tag *tag)
+{
+ printk(KERN_WARNING "ATAG_INITRD is deprecated; "
+ "please update your bootloader.\n");
+ phys_initrd_start = __virt_to_phys(tag->u.initrd.start);
+ phys_initrd_size = tag->u.initrd.size;
+ return 0;
+}
+
+__tagtable(ATAG_INITRD, parse_tag_initrd);
+
+static int __init parse_tag_initrd2(const struct tag *tag)
+{
+ phys_initrd_start = tag->u.initrd.start;
+ phys_initrd_size = tag->u.initrd.size;
+ return 0;
+}
+
+__tagtable(ATAG_INITRD2, parse_tag_initrd2);
/*
* This is used to pass memory configuration data from paging_init
@@ -36,10 +69,6 @@ extern unsigned long phys_initrd_size;
*/
static struct meminfo meminfo = { 0, };
-#define for_each_nodebank(iter,mi,no) \
- for (iter = 0; iter < mi->nr_banks; iter++) \
- if (mi->bank[iter].node == no)
-
void show_mem(void)
{
int free = 0, total = 0, reserved = 0;
@@ -50,14 +79,15 @@ void show_mem(void)
show_free_areas();
for_each_online_node(node) {
pg_data_t *n = NODE_DATA(node);
- struct page *map = n->node_mem_map - n->node_start_pfn;
+ struct page *map = pgdat_page_nr(n, 0) - n->node_start_pfn;
for_each_nodebank (i,mi,node) {
+ struct membank *bank = &mi->bank[i];
unsigned int pfn1, pfn2;
struct page *page, *end;
- pfn1 = __phys_to_pfn(mi->bank[i].start);
- pfn2 = __phys_to_pfn(mi->bank[i].size + mi->bank[i].start);
+ pfn1 = bank_pfn_start(bank);
+ pfn2 = bank_pfn_end(bank);
page = map + pfn1;
end = map + pfn2;
@@ -96,17 +126,17 @@ void show_mem(void)
static unsigned int __init
find_bootmap_pfn(int node, struct meminfo *mi, unsigned int bootmap_pages)
{
- unsigned int start_pfn, bank, bootmap_pfn;
+ unsigned int start_pfn, i, bootmap_pfn;
start_pfn = PAGE_ALIGN(__pa(&_end)) >> PAGE_SHIFT;
bootmap_pfn = 0;
- for_each_nodebank(bank, mi, node) {
+ for_each_nodebank(i, mi, node) {
+ struct membank *bank = &mi->bank[i];
unsigned int start, end;
- start = mi->bank[bank].start >> PAGE_SHIFT;
- end = (mi->bank[bank].size +
- mi->bank[bank].start) >> PAGE_SHIFT;
+ start = bank_pfn_start(bank);
+ end = bank_pfn_end(bank);
if (end < start_pfn)
continue;
@@ -145,13 +175,10 @@ static int __init check_initrd(struct meminfo *mi)
initrd_node = -1;
for (i = 0; i < mi->nr_banks; i++) {
- unsigned long bank_end;
-
- bank_end = mi->bank[i].start + mi->bank[i].size;
-
- if (mi->bank[i].start <= phys_initrd_start &&
- end <= bank_end)
- initrd_node = mi->bank[i].node;
+ struct membank *bank = &mi->bank[i];
+ if (bank_phys_start(bank) <= phys_initrd_start &&
+ end <= bank_phys_end(bank))
+ initrd_node = bank->node;
}
}
@@ -171,19 +198,17 @@ static inline void map_memory_bank(struct membank *bank)
#ifdef CONFIG_MMU
struct map_desc map;
- map.pfn = __phys_to_pfn(bank->start);
- map.virtual = __phys_to_virt(bank->start);
- map.length = bank->size;
+ map.pfn = bank_pfn_start(bank);
+ map.virtual = __phys_to_virt(bank_phys_start(bank));
+ map.length = bank_phys_size(bank);
map.type = MT_MEMORY;
create_mapping(&map);
#endif
}
-static unsigned long __init
-bootmem_init_node(int node, int initrd_node, struct meminfo *mi)
+static unsigned long __init bootmem_init_node(int node, struct meminfo *mi)
{
- unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
unsigned long start_pfn, end_pfn, boot_pfn;
unsigned int boot_pages;
pg_data_t *pgdat;
@@ -199,8 +224,8 @@ bootmem_init_node(int node, int initrd_node, struct meminfo *mi)
struct membank *bank = &mi->bank[i];
unsigned long start, end;
- start = bank->start >> PAGE_SHIFT;
- end = (bank->start + bank->size) >> PAGE_SHIFT;
+ start = bank_pfn_start(bank);
+ end = bank_pfn_end(bank);
if (start_pfn > start)
start_pfn = start;
@@ -230,8 +255,11 @@ bootmem_init_node(int node, int initrd_node, struct meminfo *mi)
pgdat = NODE_DATA(node);
init_bootmem_node(pgdat, boot_pfn, start_pfn, end_pfn);
- for_each_nodebank(i, mi, node)
- free_bootmem_node(pgdat, mi->bank[i].start, mi->bank[i].size);
+ for_each_nodebank(i, mi, node) {
+ struct membank *bank = &mi->bank[i];
+ free_bootmem_node(pgdat, bank_phys_start(bank), bank_phys_size(bank));
+ memory_present(node, bank_pfn_start(bank), bank_pfn_end(bank));
+ }
/*
* Reserve the bootmem bitmap for this node.
@@ -239,31 +267,39 @@ bootmem_init_node(int node, int initrd_node, struct meminfo *mi)
reserve_bootmem_node(pgdat, boot_pfn << PAGE_SHIFT,
boot_pages << PAGE_SHIFT, BOOTMEM_DEFAULT);
- /*
- * Reserve any special node zero regions.
- */
- if (node == 0)
- reserve_node_zero(pgdat);
+ return end_pfn;
+}
+static void __init bootmem_reserve_initrd(int node)
+{
#ifdef CONFIG_BLK_DEV_INITRD
- /*
- * If the initrd is in this node, reserve its memory.
- */
- if (node == initrd_node) {
- int res = reserve_bootmem_node(pgdat, phys_initrd_start,
- phys_initrd_size, BOOTMEM_EXCLUSIVE);
-
- if (res == 0) {
- initrd_start = __phys_to_virt(phys_initrd_start);
- initrd_end = initrd_start + phys_initrd_size;
- } else {
- printk(KERN_ERR
- "INITRD: 0x%08lx+0x%08lx overlaps in-use "
- "memory region - disabling initrd\n",
- phys_initrd_start, phys_initrd_size);
- }
+ pg_data_t *pgdat = NODE_DATA(node);
+ int res;
+
+ res = reserve_bootmem_node(pgdat, phys_initrd_start,
+ phys_initrd_size, BOOTMEM_EXCLUSIVE);
+
+ if (res == 0) {
+ initrd_start = __phys_to_virt(phys_initrd_start);
+ initrd_end = initrd_start + phys_initrd_size;
+ } else {
+ printk(KERN_ERR
+ "INITRD: 0x%08lx+0x%08lx overlaps in-use "
+ "memory region - disabling initrd\n",
+ phys_initrd_start, phys_initrd_size);
}
#endif
+}
+
+static void __init bootmem_free_node(int node, struct meminfo *mi)
+{
+ unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
+ unsigned long start_pfn, end_pfn;
+ pg_data_t *pgdat = NODE_DATA(node);
+ int i;
+
+ start_pfn = pgdat->bdata->node_min_pfn;
+ end_pfn = pgdat->bdata->node_low_pfn;
/*
* initialise the zones within this node.
@@ -284,7 +320,7 @@ bootmem_init_node(int node, int initrd_node, struct meminfo *mi)
*/
zhole_size[0] = zone_size[0];
for_each_nodebank(i, mi, node)
- zhole_size[0] -= mi->bank[i].size >> PAGE_SHIFT;
+ zhole_size[0] -= bank_pfn_size(&mi->bank[i]);
/*
* Adjust the sizes according to any special requirements for
@@ -293,21 +329,12 @@ bootmem_init_node(int node, int initrd_node, struct meminfo *mi)
arch_adjust_zones(node, zone_size, zhole_size);
free_area_init_node(node, zone_size, start_pfn, zhole_size);
-
- return end_pfn;
}
void __init bootmem_init(struct meminfo *mi)
{
unsigned long memend_pfn = 0;
- int node, initrd_node, i;
-
- /*
- * Invalidate the node number for empty or invalid memory banks
- */
- for (i = 0; i < mi->nr_banks; i++)
- if (mi->bank[i].size == 0 || mi->bank[i].node >= MAX_NUMNODES)
- mi->bank[i].node = -1;
+ int node, initrd_node;
memcpy(&meminfo, mi, sizeof(meminfo));
@@ -320,9 +347,19 @@ void __init bootmem_init(struct meminfo *mi)
* Run through each node initialising the bootmem allocator.
*/
for_each_node(node) {
- unsigned long end_pfn;
+ unsigned long end_pfn = bootmem_init_node(node, mi);
- end_pfn = bootmem_init_node(node, initrd_node, mi);
+ /*
+ * Reserve any special node zero regions.
+ */
+ if (node == 0)
+ reserve_node_zero(NODE_DATA(node));
+
+ /*
+ * If the initrd is in this node, reserve its memory.
+ */
+ if (node == initrd_node)
+ bootmem_reserve_initrd(node);
/*
* Remember the highest memory PFN.
@@ -331,6 +368,19 @@ void __init bootmem_init(struct meminfo *mi)
memend_pfn = end_pfn;
}
+ /*
+ * sparse_init() needs the bootmem allocator up and running.
+ */
+ sparse_init();
+
+ /*
+ * Now free memory in each node - free_area_init_node needs
+ * the sparse mem_map arrays initialized by sparse_init()
+ * for memmap_init_zone(), otherwise all PFNs are invalid.
+ */
+ for_each_node(node)
+ bootmem_free_node(node, mi);
+
high_memory = __va(memend_pfn << PAGE_SHIFT);
/*
@@ -401,7 +451,9 @@ static void __init free_unused_memmap_node(int node, struct meminfo *mi)
* information on the command line.
*/
for_each_nodebank(i, mi, node) {
- bank_start = mi->bank[i].start >> PAGE_SHIFT;
+ struct membank *bank = &mi->bank[i];
+
+ bank_start = bank_pfn_start(bank);
if (bank_start < prev_bank_end) {
printk(KERN_ERR "MEM: unordered memory banks. "
"Not freeing memmap.\n");
@@ -415,8 +467,7 @@ static void __init free_unused_memmap_node(int node, struct meminfo *mi)
if (prev_bank_end && prev_bank_end != bank_start)
free_memmap(node, prev_bank_end, bank_start);
- prev_bank_end = (mi->bank[i].start +
- mi->bank[i].size) >> PAGE_SHIFT;
+ prev_bank_end = bank_pfn_end(bank);
}
}
@@ -461,8 +512,8 @@ void __init mem_init(void)
num_physpages = 0;
for (i = 0; i < meminfo.nr_banks; i++) {
- num_physpages += meminfo.bank[i].size >> PAGE_SHIFT;
- printk(" %ldMB", meminfo.bank[i].size >> 20);
+ num_physpages += bank_pfn_size(&meminfo.bank[i]);
+ printk(" %ldMB", bank_phys_size(&meminfo.bank[i]) >> 20);
}
printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT));
diff --git a/arch/arm/mm/iomap.c b/arch/arm/mm/iomap.c
index 7429f8c0101..ffad039cbb7 100644
--- a/arch/arm/mm/iomap.c
+++ b/arch/arm/mm/iomap.c
@@ -7,8 +7,7 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/ioport.h>
-
-#include <asm/io.h>
+#include <linux/io.h>
#ifdef __io
void __iomem *ioport_map(unsigned long port, unsigned int nr)
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index b81dbf9ffb7..8a41912ec7c 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -24,9 +24,10 @@
#include <linux/errno.h>
#include <linux/mm.h>
#include <linux/vmalloc.h>
+#include <linux/io.h>
+#include <asm/cputype.h>
#include <asm/cacheflush.h>
-#include <asm/io.h>
#include <asm/mmu_context.h>
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
@@ -332,15 +333,14 @@ __arm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype)
}
EXPORT_SYMBOL(__arm_ioremap);
-void __iounmap(volatile void __iomem *addr)
+void __iounmap(volatile void __iomem *io_addr)
{
+ void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
#ifndef CONFIG_SMP
struct vm_struct **p, *tmp;
#endif
unsigned int section_mapping = 0;
- addr = (volatile void __iomem *)(PAGE_MASK & (unsigned long)addr);
-
#ifndef CONFIG_SMP
/*
* If this is a section based mapping we need to handle it
@@ -351,7 +351,7 @@ void __iounmap(volatile void __iomem *addr)
*/
write_lock(&vmlist_lock);
for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) {
- if((tmp->flags & VM_IOREMAP) && (tmp->addr == addr)) {
+ if ((tmp->flags & VM_IOREMAP) && (tmp->addr == addr)) {
if (tmp->flags & VM_ARM_SECTION_MAPPING) {
*p = tmp->next;
unmap_area_sections((unsigned long)tmp->addr,
@@ -366,6 +366,6 @@ void __iounmap(volatile void __iomem *addr)
#endif
if (!section_mapping)
- vunmap((void __force *)addr);
+ vunmap(addr);
}
EXPORT_SYMBOL(__iounmap);
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h
index 7647c597fc5..96590104ba0 100644
--- a/arch/arm/mm/mm.h
+++ b/arch/arm/mm/mm.h
@@ -35,3 +35,5 @@ struct pglist_data;
void __init create_mapping(struct map_desc *md);
void __init bootmem_init(struct meminfo *mi);
void reserve_node_zero(struct pglist_data *pgdat);
+
+extern void _text, _stext, _etext, __data_start, _end, __init_begin, __init_end;
diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
index 3f6dc40b835..5358fcc7f61 100644
--- a/arch/arm/mm/mmap.c
+++ b/arch/arm/mm/mmap.c
@@ -6,6 +6,8 @@
#include <linux/mman.h>
#include <linux/shm.h>
#include <linux/sched.h>
+#include <linux/io.h>
+#include <asm/cputype.h>
#include <asm/system.h>
#define COLOUR_ALIGN(addr,pgoff) \
@@ -37,8 +39,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
* caches alias. This is indicated by bits 9 and 21 of the
* cache type register.
*/
- cache_type = read_cpuid(CPUID_CACHETYPE);
- if (cache_type != read_cpuid(CPUID_ID)) {
+ cache_type = read_cpuid_cachetype();
+ if (cache_type != read_cpuid_id()) {
aliasing = (cache_type | cache_type >> 12) & (1 << 11);
if (aliasing)
do_align = filp || flags & MAP_SHARED;
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 25d9a11eb61..e7af83e569d 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -15,6 +15,7 @@
#include <linux/mman.h>
#include <linux/nodemask.h>
+#include <asm/cputype.h>
#include <asm/mach-types.h>
#include <asm/setup.h>
#include <asm/sizes.h>
@@ -27,9 +28,6 @@
DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
-extern void _stext, _etext, __data_start, _end;
-extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
-
/*
* empty_zero_page is a special page that is used for
* zero-initialized data and COW.
@@ -568,12 +566,35 @@ void __init iotable_init(struct map_desc *io_desc, int nr)
create_mapping(io_desc + i);
}
+static unsigned long __initdata vmalloc_reserve = SZ_128M;
+
+/*
+ * vmalloc=size forces the vmalloc area to be exactly 'size'
+ * bytes. This can be used to increase (or decrease) the vmalloc
+ * area - the default is 128m.
+ */
+static void __init early_vmalloc(char **arg)
+{
+ vmalloc_reserve = memparse(*arg, arg);
+
+ if (vmalloc_reserve < SZ_16M) {
+ vmalloc_reserve = SZ_16M;
+ printk(KERN_WARNING
+ "vmalloc area too small, limiting to %luMB\n",
+ vmalloc_reserve >> 20);
+ }
+}
+__early_param("vmalloc=", early_vmalloc);
+
+#define VMALLOC_MIN (void *)(VMALLOC_END - vmalloc_reserve)
+
static int __init check_membank_valid(struct membank *mb)
{
/*
- * Check whether this memory region has non-zero size.
+ * Check whether this memory region has non-zero size or
+ * invalid node number.
*/
- if (mb->size == 0)
+ if (mb->size == 0 || mb->node >= MAX_NUMNODES)
return 0;
/*
@@ -607,8 +628,7 @@ static int __init check_membank_valid(struct membank *mb)
static void __init sanity_check_meminfo(struct meminfo *mi)
{
- int i;
- int j;
+ int i, j;
for (i = 0, j = 0; i < mi->nr_banks; i++) {
if (check_membank_valid(&mi->bank[i]))
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
index 63c62fdea52..07b62b23897 100644
--- a/arch/arm/mm/nommu.c
+++ b/arch/arm/mm/nommu.c
@@ -7,16 +7,14 @@
#include <linux/mm.h>
#include <linux/pagemap.h>
#include <linux/bootmem.h>
+#include <linux/io.h>
#include <asm/cacheflush.h>
-#include <asm/io.h>
#include <asm/page.h>
#include <asm/mach/arch.h>
#include "mm.h"
-extern void _stext, __data_start, _end;
-
/*
* Reserve the various regions of node 0
*/
@@ -43,12 +41,26 @@ void __init reserve_node_zero(pg_data_t *pgdat)
BOOTMEM_DEFAULT);
}
+static void __init sanity_check_meminfo(struct meminfo *mi)
+{
+ int i, j;
+
+ for (i = 0, j = 0; i < mi->nr_banks; i++) {
+ struct membank *mb = &mi->bank[i];
+
+ if (mb->size != 0 && mb->node < MAX_NUMNODES)
+ mi->bank[j++] = mi->bank[i];
+ }
+ mi->nr_banks = j;
+}
+
/*
* paging_init() sets up the page tables, initialises the zone memory
* maps, and sets up the zero page, bad page and bad page tables.
*/
void __init paging_init(struct meminfo *mi, struct machine_desc *mdesc)
{
+ sanity_check_meminfo(mi);
bootmem_init(mi);
}
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index b49f9a4c82c..a67e26f3dce 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -25,9 +25,11 @@
ENTRY(cpu_v7_proc_init)
mov pc, lr
+ENDPROC(cpu_v7_proc_init)
ENTRY(cpu_v7_proc_fin)
mov pc, lr
+ENDPROC(cpu_v7_proc_fin)
/*
* cpu_v7_reset(loc)
@@ -43,6 +45,7 @@ ENTRY(cpu_v7_proc_fin)
.align 5
ENTRY(cpu_v7_reset)
mov pc, r0
+ENDPROC(cpu_v7_reset)
/*
* cpu_v7_do_idle()
@@ -52,8 +55,9 @@ ENTRY(cpu_v7_reset)
* IRQs are already disabled.
*/
ENTRY(cpu_v7_do_idle)
- .long 0xe320f003 @ ARM V7 WFI instruction
+ wfi
mov pc, lr
+ENDPROC(cpu_v7_do_idle)
ENTRY(cpu_v7_dcache_clean_area)
#ifndef TLB_CAN_READ_FROM_L1_CACHE
@@ -65,6 +69,7 @@ ENTRY(cpu_v7_dcache_clean_area)
dsb
#endif
mov pc, lr
+ENDPROC(cpu_v7_dcache_clean_area)
/*
* cpu_v7_switch_mm(pgd_phys, tsk)
@@ -89,6 +94,7 @@ ENTRY(cpu_v7_switch_mm)
isb
#endif
mov pc, lr
+ENDPROC(cpu_v7_switch_mm)
/*
* cpu_v7_set_pte_ext(ptep, pte)
@@ -141,6 +147,7 @@ ENTRY(cpu_v7_set_pte_ext)
mcr p15, 0, r0, c7, c10, 1 @ flush_pte
#endif
mov pc, lr
+ENDPROC(cpu_v7_set_pte_ext)
cpu_v7_name:
.ascii "ARMv7 Processor"
@@ -188,6 +195,7 @@ __v7_setup:
bic r0, r0, r5 @ clear bits them
orr r0, r0, r6 @ set them
mov pc, lr @ return to head.S:__ret
+ENDPROC(__v7_setup)
/*
* V X F I D LR
diff --git a/arch/arm/mm/tlb-v7.S b/arch/arm/mm/tlb-v7.S
index b56dda8052f..24ba5109f2e 100644
--- a/arch/arm/mm/tlb-v7.S
+++ b/arch/arm/mm/tlb-v7.S
@@ -51,6 +51,7 @@ ENTRY(v7wbi_flush_user_tlb_range)
mcr p15, 0, ip, c7, c5, 6 @ flush BTAC/BTB
dsb
mov pc, lr
+ENDPROC(v7wbi_flush_user_tlb_range)
/*
* v7wbi_flush_kern_tlb_range(start,end)
@@ -77,6 +78,7 @@ ENTRY(v7wbi_flush_kern_tlb_range)
dsb
isb
mov pc, lr
+ENDPROC(v7wbi_flush_kern_tlb_range)
.section ".text.init", #alloc, #execinstr