aboutsummaryrefslogtreecommitdiff
path: root/arch/x86_64/mm/numa.c
diff options
context:
space:
mode:
authorAndi Kleen <ak@suse.de>2005-08-26 18:34:10 -0700
committerLinus Torvalds <torvalds@g5.osdl.org>2005-08-26 19:37:12 -0700
commit485761bd6a72d33b3d4fa884927b2b0d983b701e (patch)
treec75562513489f62c8dcfd41acd467bca3d3202cc /arch/x86_64/mm/numa.c
parentbebf4688e9dbbfdd421736685d607bced91a3c91 (diff)
[PATCH] x86_64: Tell VM about holes in nodes
Some nodes can have large holes on x86-64. This fixes problems with the VM allowing too many dirty pages because it overestimates the number of available RAM in a node. In extreme cases you can end up with all RAM filled with dirty pages which can lead to deadlocks and other nasty behaviour. This patch just tells the VM about the known holes from e820. Reserved (like the kernel text or mem_map) is still not taken into account, but that should be only a few percent error now. Small detail is that the flat setup uses the NUMA free_area_init_node() now too because it offers more flexibility. (akpm: lotsa thanks to Martin for working this problem out) Cc: Martin Bligh <mbligh@mbligh.org> Signed-off-by: Andi Kleen <ak@suse.de> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/x86_64/mm/numa.c')
-rw-r--r--arch/x86_64/mm/numa.c8
1 files changed, 7 insertions, 1 deletions
diff --git a/arch/x86_64/mm/numa.c b/arch/x86_64/mm/numa.c
index 70cb2904a90..6a156f5692a 100644
--- a/arch/x86_64/mm/numa.c
+++ b/arch/x86_64/mm/numa.c
@@ -126,9 +126,11 @@ void __init setup_node_zones(int nodeid)
{
unsigned long start_pfn, end_pfn;
unsigned long zones[MAX_NR_ZONES];
+ unsigned long holes[MAX_NR_ZONES];
unsigned long dma_end_pfn;
memset(zones, 0, sizeof(unsigned long) * MAX_NR_ZONES);
+ memset(holes, 0, sizeof(unsigned long) * MAX_NR_ZONES);
start_pfn = node_start_pfn(nodeid);
end_pfn = node_end_pfn(nodeid);
@@ -139,13 +141,17 @@ void __init setup_node_zones(int nodeid)
dma_end_pfn = __pa(MAX_DMA_ADDRESS) >> PAGE_SHIFT;
if (start_pfn < dma_end_pfn) {
zones[ZONE_DMA] = dma_end_pfn - start_pfn;
+ holes[ZONE_DMA] = e820_hole_size(start_pfn, dma_end_pfn);
zones[ZONE_NORMAL] = end_pfn - dma_end_pfn;
+ holes[ZONE_NORMAL] = e820_hole_size(dma_end_pfn, end_pfn);
+
} else {
zones[ZONE_NORMAL] = end_pfn - start_pfn;
+ holes[ZONE_NORMAL] = e820_hole_size(start_pfn, end_pfn);
}
free_area_init_node(nodeid, NODE_DATA(nodeid), zones,
- start_pfn, NULL);
+ start_pfn, holes);
}
void __init numa_init_array(void)