aboutsummaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/filemap.c32
-rw-r--r--mm/madvise.c3
-rw-r--r--mm/memory_hotplug.c14
-rw-r--r--mm/mempolicy.c1
-rw-r--r--mm/migrate.c11
-rw-r--r--mm/oom_kill.c71
-rw-r--r--mm/page_alloc.c42
-rw-r--r--mm/shmem.c3
-rw-r--r--mm/slab.c51
-rw-r--r--mm/slob.c10
-rw-r--r--mm/sparse.c16
-rw-r--r--mm/vmscan.c2
12 files changed, 170 insertions, 86 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index 3ef20739e72..fd57442186c 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -697,6 +697,38 @@ unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
return ret;
}
+/**
+ * find_get_pages_contig - gang contiguous pagecache lookup
+ * @mapping: The address_space to search
+ * @index: The starting page index
+ * @nr_pages: The maximum number of pages
+ * @pages: Where the resulting pages are placed
+ *
+ * find_get_pages_contig() works exactly like find_get_pages(), except
+ * that the returned number of pages are guaranteed to be contiguous.
+ *
+ * find_get_pages_contig() returns the number of pages which were found.
+ */
+unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index,
+ unsigned int nr_pages, struct page **pages)
+{
+ unsigned int i;
+ unsigned int ret;
+
+ read_lock_irq(&mapping->tree_lock);
+ ret = radix_tree_gang_lookup(&mapping->page_tree,
+ (void **)pages, index, nr_pages);
+ for (i = 0; i < ret; i++) {
+ if (pages[i]->mapping == NULL || pages[i]->index != index)
+ break;
+
+ page_cache_get(pages[i]);
+ index++;
+ }
+ read_unlock_irq(&mapping->tree_lock);
+ return i;
+}
+
/*
* Like find_get_pages, except we only return pages which are tagged with
* `tag'. We update *index to index the next page for the traversal.
diff --git a/mm/madvise.c b/mm/madvise.c
index af3d573b014..4e196155a0c 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -168,6 +168,9 @@ static long madvise_remove(struct vm_area_struct *vma,
return -EINVAL;
}
+ if ((vma->vm_flags & (VM_SHARED|VM_WRITE)) != (VM_SHARED|VM_WRITE))
+ return -EACCES;
+
mapping = vma->vm_file->f_mapping;
offset = (loff_t)(start - vma->vm_start)
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 1fe76d963ac..70df5c0d957 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -69,12 +69,16 @@ int __add_pages(struct zone *zone, unsigned long phys_start_pfn,
for (i = 0; i < nr_pages; i += PAGES_PER_SECTION) {
err = __add_section(zone, phys_start_pfn + i);
- if (err)
+ /* We want to keep adding the rest of the
+ * sections if the first ones already exist
+ */
+ if (err && (err != -EEXIST))
break;
}
return err;
}
+EXPORT_SYMBOL_GPL(__add_pages);
static void grow_zone_span(struct zone *zone,
unsigned long start_pfn, unsigned long end_pfn)
@@ -87,8 +91,8 @@ static void grow_zone_span(struct zone *zone,
if (start_pfn < zone->zone_start_pfn)
zone->zone_start_pfn = start_pfn;
- if (end_pfn > old_zone_end_pfn)
- zone->spanned_pages = end_pfn - zone->zone_start_pfn;
+ zone->spanned_pages = max(old_zone_end_pfn, end_pfn) -
+ zone->zone_start_pfn;
zone_span_writeunlock(zone);
}
@@ -102,8 +106,8 @@ static void grow_pgdat_span(struct pglist_data *pgdat,
if (start_pfn < pgdat->node_start_pfn)
pgdat->node_start_pfn = start_pfn;
- if (end_pfn > old_pgdat_end_pfn)
- pgdat->node_spanned_pages = end_pfn - pgdat->node_start_pfn;
+ pgdat->node_spanned_pages = max(old_pgdat_end_pfn, end_pfn) -
+ pgdat->node_start_pfn;
}
int online_pages(unsigned long pfn, unsigned long nr_pages)
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index dec8249e972..8778f58880c 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1761,7 +1761,6 @@ static void gather_stats(struct page *page, void *private, int pte_dirty)
md->mapcount_max = count;
md->node[page_to_nid(page)]++;
- cond_resched();
}
#ifdef CONFIG_HUGETLB_PAGE
diff --git a/mm/migrate.c b/mm/migrate.c
index d444229f259..1c25040693d 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -439,6 +439,17 @@ redo:
goto unlock_both;
}
+ /* Make sure the dirty bit is up to date */
+ if (try_to_unmap(page, 1) == SWAP_FAIL) {
+ rc = -EPERM;
+ goto unlock_both;
+ }
+
+ if (page_mapcount(page)) {
+ rc = -EAGAIN;
+ goto unlock_both;
+ }
+
/*
* Default handling if a filesystem does not provide
* a migration function. We can only migrate clean
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 78747afad6b..042e6436c3e 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -46,15 +46,25 @@
unsigned long badness(struct task_struct *p, unsigned long uptime)
{
unsigned long points, cpu_time, run_time, s;
- struct list_head *tsk;
+ struct mm_struct *mm;
+ struct task_struct *child;
- if (!p->mm)
+ task_lock(p);
+ mm = p->mm;
+ if (!mm) {
+ task_unlock(p);
return 0;
+ }
/*
* The memory size of the process is the basis for the badness.
*/
- points = p->mm->total_vm;
+ points = mm->total_vm;
+
+ /*
+ * After this unlock we can no longer dereference local variable `mm'
+ */
+ task_unlock(p);
/*
* Processes which fork a lot of child processes are likely
@@ -64,11 +74,11 @@ unsigned long badness(struct task_struct *p, unsigned long uptime)
* child is eating the vast majority of memory, adding only half
* to the parents will make the child our kill candidate of choice.
*/
- list_for_each(tsk, &p->children) {
- struct task_struct *chld;
- chld = list_entry(tsk, struct task_struct, sibling);
- if (chld->mm != p->mm && chld->mm)
- points += chld->mm->total_vm/2 + 1;
+ list_for_each_entry(child, &p->children, sibling) {
+ task_lock(child);
+ if (child->mm != mm && child->mm)
+ points += child->mm->total_vm/2 + 1;
+ task_unlock(child);
}
/*
@@ -244,17 +254,24 @@ static void __oom_kill_task(task_t *p, const char *message)
force_sig(SIGKILL, p);
}
-static struct mm_struct *oom_kill_task(task_t *p, const char *message)
+static int oom_kill_task(task_t *p, const char *message)
{
- struct mm_struct *mm = get_task_mm(p);
+ struct mm_struct *mm;
task_t * g, * q;
- if (!mm)
- return NULL;
- if (mm == &init_mm) {
- mmput(mm);
- return NULL;
- }
+ mm = p->mm;
+
+ /* WARNING: mm may not be dereferenced since we did not obtain its
+ * value from get_task_mm(p). This is OK since all we need to do is
+ * compare mm to q->mm below.
+ *
+ * Furthermore, even if mm contains a non-NULL value, p->mm may
+ * change to NULL at any time since we do not hold task_lock(p).
+ * However, this is of no concern to us.
+ */
+
+ if (mm == NULL || mm == &init_mm)
+ return 1;
__oom_kill_task(p, message);
/*
@@ -266,13 +283,12 @@ static struct mm_struct *oom_kill_task(task_t *p, const char *message)
__oom_kill_task(q, message);
while_each_thread(g, q);
- return mm;
+ return 0;
}
-static struct mm_struct *oom_kill_process(struct task_struct *p,
- unsigned long points, const char *message)
+static int oom_kill_process(struct task_struct *p, unsigned long points,
+ const char *message)
{
- struct mm_struct *mm;
struct task_struct *c;
struct list_head *tsk;
@@ -283,9 +299,8 @@ static struct mm_struct *oom_kill_process(struct task_struct *p,
c = list_entry(tsk, struct task_struct, sibling);
if (c->mm == p->mm)
continue;
- mm = oom_kill_task(c, message);
- if (mm)
- return mm;
+ if (!oom_kill_task(c, message))
+ return 0;
}
return oom_kill_task(p, message);
}
@@ -300,7 +315,6 @@ static struct mm_struct *oom_kill_process(struct task_struct *p,
*/
void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order)
{
- struct mm_struct *mm = NULL;
task_t *p;
unsigned long points = 0;
@@ -320,12 +334,12 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order)
*/
switch (constrained_alloc(zonelist, gfp_mask)) {
case CONSTRAINT_MEMORY_POLICY:
- mm = oom_kill_process(current, points,
+ oom_kill_process(current, points,
"No available memory (MPOL_BIND)");
break;
case CONSTRAINT_CPUSET:
- mm = oom_kill_process(current, points,
+ oom_kill_process(current, points,
"No available memory in cpuset");
break;
@@ -347,8 +361,7 @@ retry:
panic("Out of memory and no killable processes...\n");
}
- mm = oom_kill_process(p, points, "Out of memory");
- if (!mm)
+ if (oom_kill_process(p, points, "Out of memory"))
goto retry;
break;
@@ -357,8 +370,6 @@ retry:
out:
read_unlock(&tasklist_lock);
cpuset_unlock();
- if (mm)
- mmput(mm);
/*
* Give "p" a good chance of killing itself before we
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 97d6827c7d6..253a450c400 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -39,6 +39,7 @@
#include <linux/mempolicy.h>
#include <asm/tlbflush.h>
+#include <asm/div64.h>
#include "internal.h"
/*
@@ -232,11 +233,13 @@ static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
* zone->lock is already acquired when we use these.
* So, we don't need atomic page->flags operations here.
*/
-static inline unsigned long page_order(struct page *page) {
+static inline unsigned long page_order(struct page *page)
+{
return page_private(page);
}
-static inline void set_page_order(struct page *page, int order) {
+static inline void set_page_order(struct page *page, int order)
+{
set_page_private(page, order);
__SetPageBuddy(page);
}
@@ -299,9 +302,9 @@ static inline int page_is_buddy(struct page *page, int order)
if (PageBuddy(page) && page_order(page) == order) {
BUG_ON(page_count(page) != 0);
- return 1;
+ return 1;
}
- return 0;
+ return 0;
}
/*
@@ -948,7 +951,7 @@ restart:
goto got_pg;
do {
- if (cpuset_zone_allowed(*z, gfp_mask))
+ if (cpuset_zone_allowed(*z, gfp_mask|__GFP_HARDWALL))
wakeup_kswapd(*z, order);
} while (*(++z));
@@ -967,7 +970,8 @@ restart:
alloc_flags |= ALLOC_HARDER;
if (gfp_mask & __GFP_HIGH)
alloc_flags |= ALLOC_HIGH;
- alloc_flags |= ALLOC_CPUSET;
+ if (wait)
+ alloc_flags |= ALLOC_CPUSET;
/*
* Go through the zonelist again. Let __GFP_HIGH and allocations
@@ -1960,7 +1964,7 @@ static inline void free_zone_pagesets(int cpu)
}
}
-static int __cpuinit pageset_cpuup_callback(struct notifier_block *nfb,
+static int pageset_cpuup_callback(struct notifier_block *nfb,
unsigned long action,
void *hcpu)
{
@@ -2121,14 +2125,22 @@ static void __init alloc_node_mem_map(struct pglist_data *pgdat)
#ifdef CONFIG_FLAT_NODE_MEM_MAP
/* ia64 gets its own node_mem_map, before this, without bootmem */
if (!pgdat->node_mem_map) {
- unsigned long size;
+ unsigned long size, start, end;
struct page *map;
- size = (pgdat->node_spanned_pages + 1) * sizeof(struct page);
+ /*
+ * The zone's endpoints aren't required to be MAX_ORDER
+ * aligned but the node_mem_map endpoints must be in order
+ * for the buddy allocator to function correctly.
+ */
+ start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
+ end = pgdat->node_start_pfn + pgdat->node_spanned_pages;
+ end = ALIGN(end, MAX_ORDER_NR_PAGES);
+ size = (end - start) * sizeof(struct page);
map = alloc_remap(pgdat->node_id, size);
if (!map)
map = alloc_bootmem_node(pgdat, size);
- pgdat->node_mem_map = map;
+ pgdat->node_mem_map = map + (pgdat->node_start_pfn - start);
}
#ifdef CONFIG_FLATMEM
/*
@@ -2564,9 +2576,11 @@ void setup_per_zone_pages_min(void)
}
for_each_zone(zone) {
- unsigned long tmp;
+ u64 tmp;
+
spin_lock_irqsave(&zone->lru_lock, flags);
- tmp = (pages_min * zone->present_pages) / lowmem_pages;
+ tmp = (u64)pages_min * zone->present_pages;
+ do_div(tmp, lowmem_pages);
if (is_highmem(zone)) {
/*
* __GFP_HIGH and PF_MEMALLOC allocations usually don't
@@ -2593,8 +2607,8 @@ void setup_per_zone_pages_min(void)
zone->pages_min = tmp;
}
- zone->pages_low = zone->pages_min + tmp / 4;
- zone->pages_high = zone->pages_min + tmp / 2;
+ zone->pages_low = zone->pages_min + (tmp >> 2);
+ zone->pages_high = zone->pages_min + (tmp >> 1);
spin_unlock_irqrestore(&zone->lru_lock, flags);
}
diff --git a/mm/shmem.c b/mm/shmem.c
index 37eaf42ed2c..4c5e68e4e9a 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -46,6 +46,8 @@
#include <linux/mempolicy.h>
#include <linux/namei.h>
#include <linux/ctype.h>
+#include <linux/migrate.h>
+
#include <asm/uaccess.h>
#include <asm/div64.h>
#include <asm/pgtable.h>
@@ -2173,6 +2175,7 @@ static struct address_space_operations shmem_aops = {
.prepare_write = shmem_prepare_write,
.commit_write = simple_commit_write,
#endif
+ .migratepage = migrate_page,
};
static struct file_operations shmem_file_operations = {
diff --git a/mm/slab.c b/mm/slab.c
index e6ef9bd5233..f1b644eb39d 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -207,11 +207,6 @@ typedef unsigned int kmem_bufctl_t;
#define BUFCTL_ACTIVE (((kmem_bufctl_t)(~0U))-2)
#define SLAB_LIMIT (((kmem_bufctl_t)(~0U))-3)
-/* Max number of objs-per-slab for caches which use off-slab slabs.
- * Needed to avoid a possible looping condition in cache_grow().
- */
-static unsigned long offslab_limit;
-
/*
* struct slab
*
@@ -700,6 +695,14 @@ static enum {
FULL
} g_cpucache_up;
+/*
+ * used by boot code to determine if it can use slab based allocator
+ */
+int slab_is_available(void)
+{
+ return g_cpucache_up == FULL;
+}
+
static DEFINE_PER_CPU(struct work_struct, reap_work);
static void free_block(struct kmem_cache *cachep, void **objpp, int len,
@@ -979,7 +982,8 @@ static void __drain_alien_cache(struct kmem_cache *cachep,
* That way we could avoid the overhead of putting the objects
* into the free lists and getting them back later.
*/
- transfer_objects(rl3->shared, ac, ac->limit);
+ if (rl3->shared)
+ transfer_objects(rl3->shared, ac, ac->limit);
free_block(cachep, ac->entry, ac->avail, node);
ac->avail = 0;
@@ -1036,7 +1040,7 @@ static inline void free_alien_cache(struct array_cache **ac_ptr)
#endif
-static int __devinit cpuup_callback(struct notifier_block *nfb,
+static int cpuup_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
long cpu = (long)hcpu;
@@ -1347,12 +1351,6 @@ void __init kmem_cache_init(void)
NULL, NULL);
}
- /* Inc off-slab bufctl limit until the ceiling is hit. */
- if (!(OFF_SLAB(sizes->cs_cachep))) {
- offslab_limit = sizes->cs_size - sizeof(struct slab);
- offslab_limit /= sizeof(kmem_bufctl_t);
- }
-
sizes->cs_dmacachep = kmem_cache_create(names->name_dma,
sizes->cs_size,
ARCH_KMALLOC_MINALIGN,
@@ -1771,6 +1769,7 @@ static void set_up_list3s(struct kmem_cache *cachep, int index)
static size_t calculate_slab_order(struct kmem_cache *cachep,
size_t size, size_t align, unsigned long flags)
{
+ unsigned long offslab_limit;
size_t left_over = 0;
int gfporder;
@@ -1782,9 +1781,18 @@ static size_t calculate_slab_order(struct kmem_cache *cachep,
if (!num)
continue;
- /* More than offslab_limit objects will cause problems */
- if ((flags & CFLGS_OFF_SLAB) && num > offslab_limit)
- break;
+ if (flags & CFLGS_OFF_SLAB) {
+ /*
+ * Max number of objs-per-slab for caches which
+ * use off-slab slabs. Needed to avoid a possible
+ * looping condition in cache_grow().
+ */
+ offslab_limit = size - sizeof(struct slab);
+ offslab_limit /= sizeof(kmem_bufctl_t);
+
+ if (num > offslab_limit)
+ break;
+ }
/* Found something acceptable - save it away */
cachep->num = num;
@@ -2191,11 +2199,14 @@ static void drain_cpu_caches(struct kmem_cache *cachep)
check_irq_on();
for_each_online_node(node) {
l3 = cachep->nodelists[node];
- if (l3) {
+ if (l3 && l3->alien)
+ drain_alien_cache(cachep, l3->alien);
+ }
+
+ for_each_online_node(node) {
+ l3 = cachep->nodelists[node];
+ if (l3)
drain_array(cachep, l3, l3->shared, 1, node);
- if (l3->alien)
- drain_alien_cache(cachep, l3->alien);
- }
}
}
diff --git a/mm/slob.c b/mm/slob.c
index 9bcc7e2cabf..a68255ba455 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -354,9 +354,7 @@ void *__alloc_percpu(size_t size)
if (!pdata)
return NULL;
- for (i = 0; i < NR_CPUS; i++) {
- if (!cpu_possible(i))
- continue;
+ for_each_possible_cpu(i) {
pdata->ptrs[i] = kmalloc(size, GFP_KERNEL);
if (!pdata->ptrs[i])
goto unwind_oom;
@@ -383,11 +381,9 @@ free_percpu(const void *objp)
int i;
struct percpu_data *p = (struct percpu_data *) (~(unsigned long) objp);
- for (i = 0; i < NR_CPUS; i++) {
- if (!cpu_possible(i))
- continue;
+ for_each_possible_cpu(i)
kfree(p->ptrs[i]);
- }
+
kfree(p);
}
EXPORT_SYMBOL(free_percpu);
diff --git a/mm/sparse.c b/mm/sparse.c
index 0a51f36ba3a..100040c0dfb 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -32,7 +32,10 @@ static struct mem_section *sparse_index_alloc(int nid)
unsigned long array_size = SECTIONS_PER_ROOT *
sizeof(struct mem_section);
- section = alloc_bootmem_node(NODE_DATA(nid), array_size);
+ if (slab_is_available())
+ section = kmalloc_node(array_size, GFP_KERNEL, nid);
+ else
+ section = alloc_bootmem_node(NODE_DATA(nid), array_size);
if (section)
memset(section, 0, array_size);
@@ -84,11 +87,8 @@ int __section_nr(struct mem_section* ms)
unsigned long root_nr;
struct mem_section* root;
- for (root_nr = 0;
- root_nr < NR_MEM_SECTIONS;
- root_nr += SECTIONS_PER_ROOT) {
- root = __nr_to_section(root_nr);
-
+ for (root_nr = 0; root_nr < NR_SECTION_ROOTS; root_nr++) {
+ root = __nr_to_section(root_nr * SECTIONS_PER_ROOT);
if (!root)
continue;
@@ -281,9 +281,9 @@ int sparse_add_one_section(struct zone *zone, unsigned long start_pfn,
ret = sparse_init_one_section(ms, section_nr, memmap);
- if (ret <= 0)
- __kfree_section_memmap(memmap, nr_pages);
out:
pgdat_resize_unlock(pgdat, &flags);
+ if (ret <= 0)
+ __kfree_section_memmap(memmap, nr_pages);
return ret;
}
diff --git a/mm/vmscan.c b/mm/vmscan.c
index acdf001d694..4649a63a8cb 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1328,7 +1328,7 @@ repeat:
not required for correctness. So if the last cpu in a node goes
away, we get changed to run anywhere: as the first one comes back,
restore their cpu bindings. */
-static int __devinit cpu_callback(struct notifier_block *nfb,
+static int cpu_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
pg_data_t *pgdat;