diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/mempolicy.c | 1 | ||||
-rw-r--r-- | mm/oom_kill.c | 71 | ||||
-rw-r--r-- | mm/page_alloc.c | 12 | ||||
-rw-r--r-- | mm/shmem.c | 3 | ||||
-rw-r--r-- | mm/slab.c | 2 | ||||
-rw-r--r-- | mm/slob.c | 10 | ||||
-rw-r--r-- | mm/vmscan.c | 2 |
7 files changed, 56 insertions, 45 deletions
diff --git a/mm/mempolicy.c b/mm/mempolicy.c index dec8249e972..8778f58880c 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -1761,7 +1761,6 @@ static void gather_stats(struct page *page, void *private, int pte_dirty) md->mapcount_max = count; md->node[page_to_nid(page)]++; - cond_resched(); } #ifdef CONFIG_HUGETLB_PAGE diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 78747afad6b..042e6436c3e 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -46,15 +46,25 @@ unsigned long badness(struct task_struct *p, unsigned long uptime) { unsigned long points, cpu_time, run_time, s; - struct list_head *tsk; + struct mm_struct *mm; + struct task_struct *child; - if (!p->mm) + task_lock(p); + mm = p->mm; + if (!mm) { + task_unlock(p); return 0; + } /* * The memory size of the process is the basis for the badness. */ - points = p->mm->total_vm; + points = mm->total_vm; + + /* + * After this unlock we can no longer dereference local variable `mm' + */ + task_unlock(p); /* * Processes which fork a lot of child processes are likely @@ -64,11 +74,11 @@ unsigned long badness(struct task_struct *p, unsigned long uptime) * child is eating the vast majority of memory, adding only half * to the parents will make the child our kill candidate of choice. */ - list_for_each(tsk, &p->children) { - struct task_struct *chld; - chld = list_entry(tsk, struct task_struct, sibling); - if (chld->mm != p->mm && chld->mm) - points += chld->mm->total_vm/2 + 1; + list_for_each_entry(child, &p->children, sibling) { + task_lock(child); + if (child->mm != mm && child->mm) + points += child->mm->total_vm/2 + 1; + task_unlock(child); } /* @@ -244,17 +254,24 @@ static void __oom_kill_task(task_t *p, const char *message) force_sig(SIGKILL, p); } -static struct mm_struct *oom_kill_task(task_t *p, const char *message) +static int oom_kill_task(task_t *p, const char *message) { - struct mm_struct *mm = get_task_mm(p); + struct mm_struct *mm; task_t * g, * q; - if (!mm) - return NULL; - if (mm == &init_mm) { - mmput(mm); - return NULL; - } + mm = p->mm; + + /* WARNING: mm may not be dereferenced since we did not obtain its + * value from get_task_mm(p). This is OK since all we need to do is + * compare mm to q->mm below. + * + * Furthermore, even if mm contains a non-NULL value, p->mm may + * change to NULL at any time since we do not hold task_lock(p). + * However, this is of no concern to us. + */ + + if (mm == NULL || mm == &init_mm) + return 1; __oom_kill_task(p, message); /* @@ -266,13 +283,12 @@ static struct mm_struct *oom_kill_task(task_t *p, const char *message) __oom_kill_task(q, message); while_each_thread(g, q); - return mm; + return 0; } -static struct mm_struct *oom_kill_process(struct task_struct *p, - unsigned long points, const char *message) +static int oom_kill_process(struct task_struct *p, unsigned long points, + const char *message) { - struct mm_struct *mm; struct task_struct *c; struct list_head *tsk; @@ -283,9 +299,8 @@ static struct mm_struct *oom_kill_process(struct task_struct *p, c = list_entry(tsk, struct task_struct, sibling); if (c->mm == p->mm) continue; - mm = oom_kill_task(c, message); - if (mm) - return mm; + if (!oom_kill_task(c, message)) + return 0; } return oom_kill_task(p, message); } @@ -300,7 +315,6 @@ static struct mm_struct *oom_kill_process(struct task_struct *p, */ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order) { - struct mm_struct *mm = NULL; task_t *p; unsigned long points = 0; @@ -320,12 +334,12 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order) */ switch (constrained_alloc(zonelist, gfp_mask)) { case CONSTRAINT_MEMORY_POLICY: - mm = oom_kill_process(current, points, + oom_kill_process(current, points, "No available memory (MPOL_BIND)"); break; case CONSTRAINT_CPUSET: - mm = oom_kill_process(current, points, + oom_kill_process(current, points, "No available memory in cpuset"); break; @@ -347,8 +361,7 @@ retry: panic("Out of memory and no killable processes...\n"); } - mm = oom_kill_process(p, points, "Out of memory"); - if (!mm) + if (oom_kill_process(p, points, "Out of memory")) goto retry; break; @@ -357,8 +370,6 @@ retry: out: read_unlock(&tasklist_lock); cpuset_unlock(); - if (mm) - mmput(mm); /* * Give "p" a good chance of killing itself before we diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 97d6827c7d6..ea77c999047 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -232,11 +232,13 @@ static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags) * zone->lock is already acquired when we use these. * So, we don't need atomic page->flags operations here. */ -static inline unsigned long page_order(struct page *page) { +static inline unsigned long page_order(struct page *page) +{ return page_private(page); } -static inline void set_page_order(struct page *page, int order) { +static inline void set_page_order(struct page *page, int order) +{ set_page_private(page, order); __SetPageBuddy(page); } @@ -299,9 +301,9 @@ static inline int page_is_buddy(struct page *page, int order) if (PageBuddy(page) && page_order(page) == order) { BUG_ON(page_count(page) != 0); - return 1; + return 1; } - return 0; + return 0; } /* @@ -1960,7 +1962,7 @@ static inline void free_zone_pagesets(int cpu) } } -static int __cpuinit pageset_cpuup_callback(struct notifier_block *nfb, +static int pageset_cpuup_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { diff --git a/mm/shmem.c b/mm/shmem.c index 37eaf42ed2c..4c5e68e4e9a 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -46,6 +46,8 @@ #include <linux/mempolicy.h> #include <linux/namei.h> #include <linux/ctype.h> +#include <linux/migrate.h> + #include <asm/uaccess.h> #include <asm/div64.h> #include <asm/pgtable.h> @@ -2173,6 +2175,7 @@ static struct address_space_operations shmem_aops = { .prepare_write = shmem_prepare_write, .commit_write = simple_commit_write, #endif + .migratepage = migrate_page, }; static struct file_operations shmem_file_operations = { diff --git a/mm/slab.c b/mm/slab.c index e6ef9bd5233..af5c5237e11 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -1036,7 +1036,7 @@ static inline void free_alien_cache(struct array_cache **ac_ptr) #endif -static int __devinit cpuup_callback(struct notifier_block *nfb, +static int cpuup_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { long cpu = (long)hcpu; diff --git a/mm/slob.c b/mm/slob.c index 9bcc7e2cabf..a68255ba455 100644 --- a/mm/slob.c +++ b/mm/slob.c @@ -354,9 +354,7 @@ void *__alloc_percpu(size_t size) if (!pdata) return NULL; - for (i = 0; i < NR_CPUS; i++) { - if (!cpu_possible(i)) - continue; + for_each_possible_cpu(i) { pdata->ptrs[i] = kmalloc(size, GFP_KERNEL); if (!pdata->ptrs[i]) goto unwind_oom; @@ -383,11 +381,9 @@ free_percpu(const void *objp) int i; struct percpu_data *p = (struct percpu_data *) (~(unsigned long) objp); - for (i = 0; i < NR_CPUS; i++) { - if (!cpu_possible(i)) - continue; + for_each_possible_cpu(i) kfree(p->ptrs[i]); - } + kfree(p); } EXPORT_SYMBOL(free_percpu); diff --git a/mm/vmscan.c b/mm/vmscan.c index acdf001d694..4649a63a8cb 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1328,7 +1328,7 @@ repeat: not required for correctness. So if the last cpu in a node goes away, we get changed to run anywhere: as the first one comes back, restore their cpu bindings. */ -static int __devinit cpu_callback(struct notifier_block *nfb, +static int cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { pg_data_t *pgdat; |