aboutsummaryrefslogtreecommitdiff
path: root/arch/x86/mm/init_64.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/mm/init_64.c')
-rw-r--r--arch/x86/mm/init_64.c13
1 files changed, 4 insertions, 9 deletions
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index cc50a13ce8d..eabcaed76c2 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -354,17 +354,10 @@ static void __init find_early_table_space(unsigned long end)
* need roughly 0.5KB per GB.
*/
start = 0x8000;
- table_start = find_e820_area(start, end, tables);
+ table_start = find_e820_area(start, end, tables, PAGE_SIZE);
if (table_start == -1UL)
panic("Cannot find space for the kernel page tables");
- /*
- * When you have a lot of RAM like 256GB, early_table will not fit
- * into 0x8000 range, find_e820_area() will find area after kernel
- * bss but the table_start is not page aligned, so need to round it
- * up to avoid overlap with bss:
- */
- table_start = round_up(table_start, PAGE_SIZE);
table_start >>= PAGE_SHIFT;
table_end = table_start;
@@ -420,7 +413,9 @@ void __init_refok init_memory_mapping(unsigned long start, unsigned long end)
mmu_cr4_features = read_cr4();
__flush_tlb_all();
- reserve_early(table_start << PAGE_SHIFT, table_end << PAGE_SHIFT);
+ if (!after_bootmem)
+ reserve_early(table_start << PAGE_SHIFT,
+ table_end << PAGE_SHIFT, "PGTABLE");
}
#ifndef CONFIG_NUMA