aboutsummaryrefslogtreecommitdiff
path: root/arch/ia64/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64/mm')
-rw-r--r--arch/ia64/mm/contig.c113
-rw-r--r--arch/ia64/mm/discontig.c50
-rw-r--r--arch/ia64/mm/init.c64
3 files changed, 126 insertions, 101 deletions
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c
index 1e79551231b..44ce5ed9444 100644
--- a/arch/ia64/mm/contig.c
+++ b/arch/ia64/mm/contig.c
@@ -30,71 +30,73 @@ static unsigned long max_gap;
#endif
/**
- * show_mem - display a memory statistics summary
+ * show_mem - give short summary of memory stats
*
- * Just walks the pages in the system and describes where they're allocated.
+ * Shows a simple page count of reserved and used pages in the system.
+ * For discontig machines, it does this on a per-pgdat basis.
*/
-void
-show_mem (void)
+void show_mem(void)
{
- int i, total = 0, reserved = 0;
- int shared = 0, cached = 0;
+ int i, total_reserved = 0;
+ int total_shared = 0, total_cached = 0;
+ unsigned long total_present = 0;
+ pg_data_t *pgdat;
printk(KERN_INFO "Mem-info:\n");
show_free_areas();
-
printk(KERN_INFO "Free swap: %6ldkB\n",
nr_swap_pages<<(PAGE_SHIFT-10));
- i = max_mapnr;
- for (i = 0; i < max_mapnr; i++) {
- if (!pfn_valid(i)) {
+ printk(KERN_INFO "Node memory in pages:\n");
+ for_each_online_pgdat(pgdat) {
+ unsigned long present;
+ unsigned long flags;
+ int shared = 0, cached = 0, reserved = 0;
+
+ pgdat_resize_lock(pgdat, &flags);
+ present = pgdat->node_present_pages;
+ for(i = 0; i < pgdat->node_spanned_pages; i++) {
+ struct page *page;
+ if (pfn_valid(pgdat->node_start_pfn + i))
+ page = pfn_to_page(pgdat->node_start_pfn + i);
+ else {
#ifdef CONFIG_VIRTUAL_MEM_MAP
- if (max_gap < LARGE_GAP)
- continue;
- i = vmemmap_find_next_valid_pfn(0, i) - 1;
+ if (max_gap < LARGE_GAP)
+ continue;
#endif
- continue;
+ i = vmemmap_find_next_valid_pfn(pgdat->node_id,
+ i) - 1;
+ continue;
+ }
+ if (PageReserved(page))
+ reserved++;
+ else if (PageSwapCache(page))
+ cached++;
+ else if (page_count(page))
+ shared += page_count(page)-1;
}
- total++;
- if (PageReserved(mem_map+i))
- reserved++;
- else if (PageSwapCache(mem_map+i))
- cached++;
- else if (page_count(mem_map + i))
- shared += page_count(mem_map + i) - 1;
+ pgdat_resize_unlock(pgdat, &flags);
+ total_present += present;
+ total_reserved += reserved;
+ total_cached += cached;
+ total_shared += shared;
+ printk(KERN_INFO "Node %4d: RAM: %11ld, rsvd: %8d, "
+ "shrd: %10d, swpd: %10d\n", pgdat->node_id,
+ present, reserved, shared, cached);
}
- printk(KERN_INFO "%d pages of RAM\n", total);
- printk(KERN_INFO "%d reserved pages\n", reserved);
- printk(KERN_INFO "%d pages shared\n", shared);
- printk(KERN_INFO "%d pages swap cached\n", cached);
- printk(KERN_INFO "%ld pages in page table cache\n",
+ printk(KERN_INFO "%ld pages of RAM\n", total_present);
+ printk(KERN_INFO "%d reserved pages\n", total_reserved);
+ printk(KERN_INFO "%d pages shared\n", total_shared);
+ printk(KERN_INFO "%d pages swap cached\n", total_cached);
+ printk(KERN_INFO "Total of %ld pages in page table cache\n",
pgtable_quicklist_total_size());
+ printk(KERN_INFO "%d free buffer pages\n", nr_free_buffer_pages());
}
+
/* physical address where the bootmem map is located */
unsigned long bootmap_start;
/**
- * find_max_pfn - adjust the maximum page number callback
- * @start: start of range
- * @end: end of range
- * @arg: address of pointer to global max_pfn variable
- *
- * Passed as a callback function to efi_memmap_walk() to determine the highest
- * available page frame number in the system.
- */
-int
-find_max_pfn (unsigned long start, unsigned long end, void *arg)
-{
- unsigned long *max_pfnp = arg, pfn;
-
- pfn = (PAGE_ALIGN(end - 1) - PAGE_OFFSET) >> PAGE_SHIFT;
- if (pfn > *max_pfnp)
- *max_pfnp = pfn;
- return 0;
-}
-
-/**
* find_bootmap_location - callback to find a memory area for the bootmap
* @start: start of region
* @end: end of region
@@ -155,9 +157,10 @@ find_memory (void)
reserve_memory();
/* first find highest page frame number */
- max_pfn = 0;
- efi_memmap_walk(find_max_pfn, &max_pfn);
-
+ min_low_pfn = ~0UL;
+ max_low_pfn = 0;
+ efi_memmap_walk(find_max_min_low_pfn, NULL);
+ max_pfn = max_low_pfn;
/* how many bytes to cover all the pages */
bootmap_size = bootmem_bootmap_pages(max_pfn) << PAGE_SHIFT;
@@ -167,7 +170,8 @@ find_memory (void)
if (bootmap_start == ~0UL)
panic("Cannot find %ld bytes for bootmap\n", bootmap_size);
- bootmap_size = init_bootmem(bootmap_start >> PAGE_SHIFT, max_pfn);
+ bootmap_size = init_bootmem_node(NODE_DATA(0),
+ (bootmap_start >> PAGE_SHIFT), 0, max_pfn);
/* Free all available memory, then mark bootmem-map as being in use. */
efi_memmap_walk(filter_rsvd_memory, free_bootmem);
@@ -175,11 +179,6 @@ find_memory (void)
find_initrd();
-#ifdef CONFIG_CRASH_DUMP
- /* If we are doing a crash dump, we still need to know the real mem
- * size before original memory map is * reset. */
- saved_max_pfn = max_pfn;
-#endif
}
#ifdef CONFIG_SMP
@@ -237,9 +236,11 @@ paging_init (void)
num_physpages = 0;
efi_memmap_walk(count_pages, &num_physpages);
- max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT;
memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
+#ifdef CONFIG_ZONE_DMA
+ max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT;
max_zone_pfns[ZONE_DMA] = max_dma;
+#endif
max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
#ifdef CONFIG_VIRTUAL_MEM_MAP
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index 96722cb1b49..872da7a2acc 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -37,7 +37,9 @@ struct early_node_data {
unsigned long pernode_size;
struct bootmem_data bootmem_data;
unsigned long num_physpages;
+#ifdef CONFIG_ZONE_DMA
unsigned long num_dma_physpages;
+#endif
unsigned long min_pfn;
unsigned long max_pfn;
};
@@ -86,9 +88,6 @@ static int __init build_node_maps(unsigned long start, unsigned long len,
bdp->node_low_pfn = max(epfn, bdp->node_low_pfn);
}
- min_low_pfn = min(min_low_pfn, bdp->node_boot_start>>PAGE_SHIFT);
- max_low_pfn = max(max_low_pfn, bdp->node_low_pfn);
-
return 0;
}
@@ -412,37 +411,6 @@ static void __init memory_less_nodes(void)
return;
}
-#ifdef CONFIG_SPARSEMEM
-/**
- * register_sparse_mem - notify SPARSEMEM that this memory range exists.
- * @start: physical start of range
- * @end: physical end of range
- * @arg: unused
- *
- * Simply calls SPARSEMEM to register memory section(s).
- */
-static int __init register_sparse_mem(unsigned long start, unsigned long end,
- void *arg)
-{
- int nid;
-
- start = __pa(start) >> PAGE_SHIFT;
- end = __pa(end) >> PAGE_SHIFT;
- nid = early_pfn_to_nid(start);
- memory_present(nid, start, end);
-
- return 0;
-}
-
-static void __init arch_sparse_init(void)
-{
- efi_memmap_walk(register_sparse_mem, NULL);
- sparse_init();
-}
-#else
-#define arch_sparse_init() do {} while (0)
-#endif
-
/**
* find_memory - walk the EFI memory map and setup the bootmem allocator
*
@@ -467,12 +435,16 @@ void __init find_memory(void)
/* These actually end up getting called by call_pernode_memory() */
efi_memmap_walk(filter_rsvd_memory, build_node_maps);
efi_memmap_walk(filter_rsvd_memory, find_pernode_space);
+ efi_memmap_walk(find_max_min_low_pfn, NULL);
for_each_online_node(node)
if (mem_data[node].bootmem_data.node_low_pfn) {
node_clear(node, memory_less_mask);
mem_data[node].min_pfn = ~0UL;
}
+
+ efi_memmap_walk(register_active_ranges, NULL);
+
/*
* Initialize the boot memory maps in reverse order since that's
* what the bootmem allocator expects
@@ -654,11 +626,12 @@ static __init int count_node_pages(unsigned long start, unsigned long len, int n
{
unsigned long end = start + len;
- add_active_range(node, start >> PAGE_SHIFT, end >> PAGE_SHIFT);
mem_data[node].num_physpages += len >> PAGE_SHIFT;
+#ifdef CONFIG_ZONE_DMA
if (start <= __pa(MAX_DMA_ADDRESS))
mem_data[node].num_dma_physpages +=
(min(end, __pa(MAX_DMA_ADDRESS)) - start) >>PAGE_SHIFT;
+#endif
start = GRANULEROUNDDOWN(start);
start = ORDERROUNDDOWN(start);
end = GRANULEROUNDUP(end);
@@ -686,10 +659,11 @@ void __init paging_init(void)
max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT;
- arch_sparse_init();
-
efi_memmap_walk(filter_rsvd_memory, count_node_pages);
+ sparse_memory_present_with_active_regions(MAX_NUMNODES);
+ sparse_init();
+
#ifdef CONFIG_VIRTUAL_MEM_MAP
vmalloc_end -= PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) *
sizeof(struct page));
@@ -710,7 +684,9 @@ void __init paging_init(void)
}
memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
+#ifdef CONFIG_ZONE_DMA
max_zone_pfns[ZONE_DMA] = max_dma;
+#endif
max_zone_pfns[ZONE_NORMAL] = max_pfn;
free_area_init_nodes(max_zone_pfns);
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index 07d82cd7cbd..5b70241741b 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -19,6 +19,7 @@
#include <linux/swap.h>
#include <linux/proc_fs.h>
#include <linux/bitops.h>
+#include <linux/kexec.h>
#include <asm/a.out.h>
#include <asm/dma.h>
@@ -67,7 +68,7 @@ max_pgt_pages(void)
#ifndef CONFIG_NUMA
node_free_pages = nr_free_pages();
#else
- node_free_pages = nr_free_pages_pgdat(NODE_DATA(numa_node_id()));
+ node_free_pages = node_page_state(numa_node_id(), NR_FREE_PAGES);
#endif
max_pgt_pages = node_free_pages / PGT_FRACTION_OF_NODE_MEM;
max_pgt_pages = max(max_pgt_pages, MIN_PGT_PAGES);
@@ -128,6 +129,25 @@ lazy_mmu_prot_update (pte_t pte)
set_bit(PG_arch_1, &page->flags); /* mark page as clean */
}
+/*
+ * Since DMA is i-cache coherent, any (complete) pages that were written via
+ * DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to
+ * flush them when they get mapped into an executable vm-area.
+ */
+void
+dma_mark_clean(void *addr, size_t size)
+{
+ unsigned long pg_addr, end;
+
+ pg_addr = PAGE_ALIGN((unsigned long) addr);
+ end = (unsigned long) addr + size;
+ while (pg_addr + PAGE_SIZE <= end) {
+ struct page *page = virt_to_page(pg_addr);
+ set_bit(PG_arch_1, &page->flags);
+ pg_addr += PAGE_SIZE;
+ }
+}
+
inline void
ia64_set_rbs_bot (void)
{
@@ -135,7 +155,7 @@ ia64_set_rbs_bot (void)
if (stack_size > MAX_USER_STACK_SIZE)
stack_size = MAX_USER_STACK_SIZE;
- current->thread.rbs_bot = STACK_TOP - stack_size;
+ current->thread.rbs_bot = PAGE_ALIGN(current->mm->start_stack - stack_size);
}
/*
@@ -156,9 +176,8 @@ ia64_init_addr_space (void)
* the problem. When the process attempts to write to the register backing store
* for the first time, it will get a SEGFAULT in this case.
*/
- vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
if (vma) {
- memset(vma, 0, sizeof(*vma));
vma->vm_mm = current->mm;
vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
vma->vm_end = vma->vm_start + PAGE_SIZE;
@@ -175,9 +194,8 @@ ia64_init_addr_space (void)
/* map NaT-page at address zero to speed up speculative dereferencing of NULL: */
if (!(current->personality & MMAP_PAGE_ZERO)) {
- vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
if (vma) {
- memset(vma, 0, sizeof(*vma));
vma->vm_mm = current->mm;
vma->vm_end = PAGE_SIZE;
vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT);
@@ -586,13 +604,27 @@ find_largest_hole (u64 start, u64 end, void *arg)
return 0;
}
+#endif /* CONFIG_VIRTUAL_MEM_MAP */
+
int __init
register_active_ranges(u64 start, u64 end, void *arg)
{
- add_active_range(0, __pa(start) >> PAGE_SHIFT, __pa(end) >> PAGE_SHIFT);
+ int nid = paddr_to_nid(__pa(start));
+
+ if (nid < 0)
+ nid = 0;
+#ifdef CONFIG_KEXEC
+ if (start > crashk_res.start && start < crashk_res.end)
+ start = crashk_res.end;
+ if (end > crashk_res.start && end < crashk_res.end)
+ end = crashk_res.start;
+#endif
+
+ if (start < end)
+ add_active_range(nid, __pa(start) >> PAGE_SHIFT,
+ __pa(end) >> PAGE_SHIFT);
return 0;
}
-#endif /* CONFIG_VIRTUAL_MEM_MAP */
static int __init
count_reserved_pages (u64 start, u64 end, void *arg)
@@ -607,6 +639,22 @@ count_reserved_pages (u64 start, u64 end, void *arg)
return 0;
}
+int
+find_max_min_low_pfn (unsigned long start, unsigned long end, void *arg)
+{
+ unsigned long pfn_start, pfn_end;
+#ifdef CONFIG_FLATMEM
+ pfn_start = (PAGE_ALIGN(__pa(start))) >> PAGE_SHIFT;
+ pfn_end = (PAGE_ALIGN(__pa(end - 1))) >> PAGE_SHIFT;
+#else
+ pfn_start = GRANULEROUNDDOWN(__pa(start)) >> PAGE_SHIFT;
+ pfn_end = GRANULEROUNDUP(__pa(end - 1)) >> PAGE_SHIFT;
+#endif
+ min_low_pfn = min(min_low_pfn, pfn_start);
+ max_low_pfn = max(max_low_pfn, pfn_end);
+ return 0;
+}
+
/*
* Boot command-line option "nolwsys" can be used to disable the use of any light-weight
* system call handler. When this option is in effect, all fsyscalls will end up bubbling