aboutsummaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/page_alloc.c24
-rw-r--r--mm/rmap.c9
-rw-r--r--mm/shmem.c5
-rw-r--r--mm/slab.c4
-rw-r--r--mm/sparse-vmemmap.c12
5 files changed, 42 insertions, 12 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 12376ae3f73..b5a58d476c1 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -305,7 +305,6 @@ static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
{
int i;
- VM_BUG_ON((gfp_flags & (__GFP_WAIT | __GFP_HIGHMEM)) == __GFP_HIGHMEM);
/*
* clear_highpage() will use KM_USER0, so it's a bug to use __GFP_ZERO
* and __GFP_HIGHMEM from hard or soft interrupt context.
@@ -3266,6 +3265,16 @@ static void inline setup_usemap(struct pglist_data *pgdat,
#endif /* CONFIG_SPARSEMEM */
#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
+
+/* Return a sensible default order for the pageblock size. */
+static inline int pageblock_default_order(void)
+{
+ if (HPAGE_SHIFT > PAGE_SHIFT)
+ return HUGETLB_PAGE_ORDER;
+
+ return MAX_ORDER-1;
+}
+
/* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
static inline void __init set_pageblock_order(unsigned int order)
{
@@ -3281,7 +3290,16 @@ static inline void __init set_pageblock_order(unsigned int order)
}
#else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
-/* Defined this way to avoid accidently referencing HUGETLB_PAGE_ORDER */
+/*
+ * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order()
+ * and pageblock_default_order() are unused as pageblock_order is set
+ * at compile-time. See include/linux/pageblock-flags.h for the values of
+ * pageblock_order based on the kernel config
+ */
+static inline int pageblock_default_order(unsigned int order)
+{
+ return MAX_ORDER-1;
+}
#define set_pageblock_order(x) do {} while (0)
#endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
@@ -3366,7 +3384,7 @@ static void __meminit free_area_init_core(struct pglist_data *pgdat,
if (!size)
continue;
- set_pageblock_order(HUGETLB_PAGE_ORDER);
+ set_pageblock_order(pageblock_default_order());
setup_usemap(pgdat, zone, size);
ret = init_currently_empty_zone(zone, zone_start_pfn,
size, MEMMAP_EARLY);
diff --git a/mm/rmap.c b/mm/rmap.c
index dc3be5f5b0d..dbc2ca2057a 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -471,11 +471,12 @@ int page_mkclean(struct page *page)
if (page_mapped(page)) {
struct address_space *mapping = page_mapping(page);
- if (mapping)
+ if (mapping) {
ret = page_mkclean_file(mapping, page);
- if (page_test_dirty(page)) {
- page_clear_dirty(page);
- ret = 1;
+ if (page_test_dirty(page)) {
+ page_clear_dirty(page);
+ ret = 1;
+ }
}
}
diff --git a/mm/shmem.c b/mm/shmem.c
index 253d205914b..51b3d6ccdda 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1072,7 +1072,7 @@ shmem_alloc_page(gfp_t gfp, struct shmem_inode_info *info,
pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx);
pvma.vm_pgoff = idx;
pvma.vm_end = PAGE_SIZE;
- page = alloc_page_vma(gfp | __GFP_ZERO, &pvma, 0);
+ page = alloc_page_vma(gfp, &pvma, 0);
mpol_free(pvma.vm_policy);
return page;
}
@@ -1093,7 +1093,7 @@ shmem_swapin(struct shmem_inode_info *info,swp_entry_t entry,unsigned long idx)
static inline struct page *
shmem_alloc_page(gfp_t gfp,struct shmem_inode_info *info, unsigned long idx)
{
- return alloc_page(gfp | __GFP_ZERO);
+ return alloc_page(gfp);
}
#endif
@@ -1306,6 +1306,7 @@ repeat:
info->alloced++;
spin_unlock(&info->lock);
+ clear_highpage(filepage);
flush_dcache_page(filepage);
SetPageUptodate(filepage);
}
diff --git a/mm/slab.c b/mm/slab.c
index c31cd3682a0..202465a193c 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2881,6 +2881,8 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
unsigned int objnr;
struct slab *slabp;
+ BUG_ON(virt_to_cache(objp) != cachep);
+
objp -= obj_offset(cachep);
kfree_debugcheck(objp);
page = virt_to_head_page(objp);
@@ -3759,8 +3761,6 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp)
{
unsigned long flags;
- BUG_ON(virt_to_cache(objp) != cachep);
-
local_irq_save(flags);
debug_check_no_locks_freed(objp, obj_size(cachep));
__cache_free(cachep, objp);
diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
index 22620f6a976..cd75b21dd4c 100644
--- a/mm/sparse-vmemmap.c
+++ b/mm/sparse-vmemmap.c
@@ -34,6 +34,16 @@
* or to back the page tables that are used to create the mapping.
* Uses the main allocators if they are available, else bootmem.
*/
+
+static void * __init_refok __earlyonly_bootmem_alloc(int node,
+ unsigned long size,
+ unsigned long align,
+ unsigned long goal)
+{
+ return __alloc_bootmem_node(NODE_DATA(node), size, align, goal);
+}
+
+
void * __meminit vmemmap_alloc_block(unsigned long size, int node)
{
/* If the main allocator is up use that, fallback to bootmem. */
@@ -44,7 +54,7 @@ void * __meminit vmemmap_alloc_block(unsigned long size, int node)
return page_address(page);
return NULL;
} else
- return __alloc_bootmem_node(NODE_DATA(node), size, size,
+ return __earlyonly_bootmem_alloc(node, size, size,
__pa(MAX_DMA_ADDRESS));
}