diff options
-rw-r--r-- | arch/i386/Kconfig | 1 | ||||
-rw-r--r-- | include/linux/mmzone.h | 40 | ||||
-rw-r--r-- | mm/Kconfig | 19 | ||||
-rw-r--r-- | mm/sparse.c | 26 |
4 files changed, 49 insertions, 37 deletions
diff --git a/arch/i386/Kconfig b/arch/i386/Kconfig index 619d843ba23..dcb0ad098c6 100644 --- a/arch/i386/Kconfig +++ b/arch/i386/Kconfig @@ -754,6 +754,7 @@ config NUMA depends on SMP && HIGHMEM64G && (X86_NUMAQ || X86_GENERICARCH || (X86_SUMMIT && ACPI)) default n if X86_PC default y if (X86_NUMAQ || X86_SUMMIT) + select SPARSEMEM_STATIC # Need comments to help the hapless user trying to turn on NUMA support comment "NUMA (NUMA-Q) requires SMP, 64GB highmem support" diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index b97054bbc39..79cf578e21b 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -487,39 +487,29 @@ struct mem_section { unsigned long section_mem_map; }; -#ifdef CONFIG_ARCH_SPARSEMEM_EXTREME -/* - * Should we ever require GCC 4 or later then the flat array scheme - * can be eliminated and a uniform solution for EXTREME and !EXTREME can - * be arrived at. - */ -#define SECTION_ROOT_SHIFT (PAGE_SHIFT-3) -#define SECTION_ROOT_MASK ((1UL<<SECTION_ROOT_SHIFT) - 1) -#define SECTION_TO_ROOT(_sec) ((_sec) >> SECTION_ROOT_SHIFT) -#define NR_SECTION_ROOTS (NR_MEM_SECTIONS >> SECTION_ROOT_SHIFT) +#ifdef CONFIG_SPARSEMEM_EXTREME +#define SECTIONS_PER_ROOT (PAGE_SIZE / sizeof (struct mem_section)) +#else +#define SECTIONS_PER_ROOT 1 +#endif -extern struct mem_section *mem_section[NR_SECTION_ROOTS]; - -static inline struct mem_section *__nr_to_section(unsigned long nr) -{ - if (!mem_section[SECTION_TO_ROOT(nr)]) - return NULL; - return &mem_section[SECTION_TO_ROOT(nr)][nr & SECTION_ROOT_MASK]; -} +#define SECTION_NR_TO_ROOT(sec) ((sec) / SECTIONS_PER_ROOT) +#define NR_SECTION_ROOTS (NR_MEM_SECTIONS / SECTIONS_PER_ROOT) +#define SECTION_ROOT_MASK (SECTIONS_PER_ROOT - 1) +#ifdef CONFIG_SPARSEMEM_EXTREME +extern struct mem_section *mem_section[NR_SECTION_ROOTS]; #else - -extern struct mem_section mem_section[NR_MEM_SECTIONS]; +extern struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]; +#endif static inline struct mem_section *__nr_to_section(unsigned long nr) { - return &mem_section[nr]; + if (!mem_section[SECTION_NR_TO_ROOT(nr)]) + return NULL; + return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK]; } -#define sparse_index_init(_sec, _nid) do {} while (0) - -#endif - /* * We use the lower bits of the mem_map pointer to store * a little bit of information. There should be at least diff --git a/mm/Kconfig b/mm/Kconfig index fc644c5c065..4e9937ac352 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -91,10 +91,23 @@ config HAVE_MEMORY_PRESENT depends on ARCH_HAVE_MEMORY_PRESENT || SPARSEMEM # +# SPARSEMEM_EXTREME (which is the default) does some bootmem +# allocations when memory_present() is called. If this can not +# be done on your architecture, select this option. However, +# statically allocating the mem_section[] array can potentially +# consume vast quantities of .bss, so be careful. +# +# This option will also potentially produce smaller runtime code +# with gcc 3.4 and later. +# +config SPARSEMEM_STATIC + def_bool n + +# # Architectecture platforms which require a two level mem_section in SPARSEMEM # must select this option. This is usually for architecture platforms with # an extremely sparse physical address space. # -config ARCH_SPARSEMEM_EXTREME - def_bool n - depends on SPARSEMEM && 64BIT +config SPARSEMEM_EXTREME + def_bool y + depends on SPARSEMEM && !SPARSEMEM_STATIC diff --git a/mm/sparse.c b/mm/sparse.c index b2b456bf0a5..fa01292157a 100644 --- a/mm/sparse.c +++ b/mm/sparse.c @@ -13,28 +13,36 @@ * * 1) mem_section - memory sections, mem_map's for valid memory */ -#ifdef CONFIG_ARCH_SPARSEMEM_EXTREME +#ifdef CONFIG_SPARSEMEM_EXTREME struct mem_section *mem_section[NR_SECTION_ROOTS] ____cacheline_maxaligned_in_smp; +#else +struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT] + ____cacheline_maxaligned_in_smp; +#endif +EXPORT_SYMBOL(mem_section); + +static void sparse_alloc_root(unsigned long root, int nid) +{ +#ifdef CONFIG_SPARSEMEM_EXTREME + mem_section[root] = alloc_bootmem_node(NODE_DATA(nid), PAGE_SIZE); +#endif +} static void sparse_index_init(unsigned long section, int nid) { - unsigned long root = SECTION_TO_ROOT(section); + unsigned long root = SECTION_NR_TO_ROOT(section); if (mem_section[root]) return; - mem_section[root] = alloc_bootmem_node(NODE_DATA(nid), PAGE_SIZE); + + sparse_alloc_root(root, nid); + if (mem_section[root]) memset(mem_section[root], 0, PAGE_SIZE); else panic("memory_present: NO MEMORY\n"); } -#else -struct mem_section mem_section[NR_MEM_SECTIONS] - ____cacheline_maxaligned_in_smp; -#endif -EXPORT_SYMBOL(mem_section); - /* Record a memory area against a node. */ void memory_present(int nid, unsigned long start, unsigned long end) { |