aboutsummaryrefslogtreecommitdiff
path: root/arch/powerpc/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r--arch/powerpc/mm/40x_mmu.c16
-rw-r--r--arch/powerpc/mm/hugetlbpage.c5
-rw-r--r--arch/powerpc/mm/numa.c130
3 files changed, 100 insertions, 51 deletions
diff --git a/arch/powerpc/mm/40x_mmu.c b/arch/powerpc/mm/40x_mmu.c
index cecbbc76f62..29954dc2894 100644
--- a/arch/powerpc/mm/40x_mmu.c
+++ b/arch/powerpc/mm/40x_mmu.c
@@ -93,7 +93,7 @@ void __init MMU_init_hw(void)
unsigned long __init mmu_mapin_ram(void)
{
- unsigned long v, s;
+ unsigned long v, s, mapped;
phys_addr_t p;
v = KERNELBASE;
@@ -130,5 +130,17 @@ unsigned long __init mmu_mapin_ram(void)
s -= LARGE_PAGE_SIZE_4M;
}
- return total_lowmem - s;
+ mapped = total_lowmem - s;
+
+ /* If the size of RAM is not an exact power of two, we may not
+ * have covered RAM in its entirety with 16 and 4 MiB
+ * pages. Consequently, restrict the top end of RAM currently
+ * allocable so that calls to the LMB to allocate PTEs for "tail"
+ * coverage with normal-sized pages (or other reasons) do not
+ * attempt to allocate outside the allowed range.
+ */
+
+ __initial_memory_limit_addr = memstart_addr + mapped;
+
+ return mapped;
}
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index a117024ab8c..f0c3b88d50f 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -507,6 +507,9 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
{
struct hstate *hstate = hstate_file(file);
int mmu_psize = shift_to_mmu_psize(huge_page_shift(hstate));
+
+ if (!mmu_huge_psizes[mmu_psize])
+ return -EINVAL;
return slice_get_unmapped_area(addr, len, flags, mmu_psize, 1, 0);
}
@@ -677,7 +680,7 @@ repeat:
return err;
}
-void set_huge_psize(int psize)
+static void __init set_huge_psize(int psize)
{
/* Check that it is a page size supported by the hardware and
* that it fits within pagetable limits. */
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index eb505ad34a8..cf81049e1e5 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -865,10 +865,77 @@ static struct notifier_block __cpuinitdata ppc64_numa_nb = {
.priority = 1 /* Must run before sched domains notifier. */
};
+static void mark_reserved_regions_for_nid(int nid)
+{
+ struct pglist_data *node = NODE_DATA(nid);
+ int i;
+
+ for (i = 0; i < lmb.reserved.cnt; i++) {
+ unsigned long physbase = lmb.reserved.region[i].base;
+ unsigned long size = lmb.reserved.region[i].size;
+ unsigned long start_pfn = physbase >> PAGE_SHIFT;
+ unsigned long end_pfn = ((physbase + size) >> PAGE_SHIFT);
+ struct node_active_region node_ar;
+ unsigned long node_end_pfn = node->node_start_pfn +
+ node->node_spanned_pages;
+
+ /*
+ * Check to make sure that this lmb.reserved area is
+ * within the bounds of the node that we care about.
+ * Checking the nid of the start and end points is not
+ * sufficient because the reserved area could span the
+ * entire node.
+ */
+ if (end_pfn <= node->node_start_pfn ||
+ start_pfn >= node_end_pfn)
+ continue;
+
+ get_node_active_region(start_pfn, &node_ar);
+ while (start_pfn < end_pfn &&
+ node_ar.start_pfn < node_ar.end_pfn) {
+ unsigned long reserve_size = size;
+ /*
+ * if reserved region extends past active region
+ * then trim size to active region
+ */
+ if (end_pfn > node_ar.end_pfn)
+ reserve_size = (node_ar.end_pfn << PAGE_SHIFT)
+ - (start_pfn << PAGE_SHIFT);
+ /*
+ * Only worry about *this* node, others may not
+ * yet have valid NODE_DATA().
+ */
+ if (node_ar.nid == nid) {
+ dbg("reserve_bootmem %lx %lx nid=%d\n",
+ physbase, reserve_size, node_ar.nid);
+ reserve_bootmem_node(NODE_DATA(node_ar.nid),
+ physbase, reserve_size,
+ BOOTMEM_DEFAULT);
+ }
+ /*
+ * if reserved region is contained in the active region
+ * then done.
+ */
+ if (end_pfn <= node_ar.end_pfn)
+ break;
+
+ /*
+ * reserved region extends past the active region
+ * get next active region that contains this
+ * reserved region
+ */
+ start_pfn = node_ar.end_pfn;
+ physbase = start_pfn << PAGE_SHIFT;
+ size = size - reserve_size;
+ get_node_active_region(start_pfn, &node_ar);
+ }
+ }
+}
+
+
void __init do_init_bootmem(void)
{
int nid;
- unsigned int i;
min_low_pfn = 0;
max_low_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT;
@@ -890,7 +957,13 @@ void __init do_init_bootmem(void)
get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
- /* Allocate the node structure node local if possible */
+ /*
+ * Allocate the node structure node local if possible
+ *
+ * Be careful moving this around, as it relies on all
+ * previous nodes' bootmem to be initialized and have
+ * all reserved areas marked.
+ */
NODE_DATA(nid) = careful_allocation(nid,
sizeof(struct pglist_data),
SMP_CACHE_BYTES, end_pfn);
@@ -922,53 +995,14 @@ void __init do_init_bootmem(void)
start_pfn, end_pfn);
free_bootmem_with_active_regions(nid, end_pfn);
- }
-
- /* Mark reserved regions */
- for (i = 0; i < lmb.reserved.cnt; i++) {
- unsigned long physbase = lmb.reserved.region[i].base;
- unsigned long size = lmb.reserved.region[i].size;
- unsigned long start_pfn = physbase >> PAGE_SHIFT;
- unsigned long end_pfn = ((physbase + size) >> PAGE_SHIFT);
- struct node_active_region node_ar;
-
- get_node_active_region(start_pfn, &node_ar);
- while (start_pfn < end_pfn &&
- node_ar.start_pfn < node_ar.end_pfn) {
- unsigned long reserve_size = size;
- /*
- * if reserved region extends past active region
- * then trim size to active region
- */
- if (end_pfn > node_ar.end_pfn)
- reserve_size = (node_ar.end_pfn << PAGE_SHIFT)
- - (start_pfn << PAGE_SHIFT);
- dbg("reserve_bootmem %lx %lx nid=%d\n", physbase,
- reserve_size, node_ar.nid);
- reserve_bootmem_node(NODE_DATA(node_ar.nid), physbase,
- reserve_size, BOOTMEM_DEFAULT);
- /*
- * if reserved region is contained in the active region
- * then done.
- */
- if (end_pfn <= node_ar.end_pfn)
- break;
-
- /*
- * reserved region extends past the active region
- * get next active region that contains this
- * reserved region
- */
- start_pfn = node_ar.end_pfn;
- physbase = start_pfn << PAGE_SHIFT;
- size = size - reserve_size;
- get_node_active_region(start_pfn, &node_ar);
- }
-
- }
-
- for_each_online_node(nid)
+ /*
+ * Be very careful about moving this around. Future
+ * calls to careful_allocation() depend on this getting
+ * done correctly.
+ */
+ mark_reserved_regions_for_nid(nid);
sparse_memory_present_with_active_regions(nid);
+ }
}
void __init paging_init(void)