aboutsummaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/mempool.c2
-rw-r--r--mm/oom_kill.c10
-rw-r--r--mm/page_alloc.c8
-rw-r--r--mm/slab.c15
4 files changed, 18 insertions, 17 deletions
diff --git a/mm/mempool.c b/mm/mempool.c
index 9a72f7d918f..65f2957b8d5 100644
--- a/mm/mempool.c
+++ b/mm/mempool.c
@@ -205,7 +205,7 @@ void * mempool_alloc(mempool_t *pool, unsigned int __nocast gfp_mask)
void *element;
unsigned long flags;
wait_queue_t wait;
- int gfp_temp;
+ unsigned int gfp_temp;
might_sleep_if(gfp_mask & __GFP_WAIT);
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 59666d905f1..1e56076672f 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -253,14 +253,16 @@ static struct mm_struct *oom_kill_process(struct task_struct *p)
* OR try to be smart about which process to kill. Note that we
* don't have to be perfect here, we just have to be good.
*/
-void out_of_memory(unsigned int __nocast gfp_mask)
+void out_of_memory(unsigned int __nocast gfp_mask, int order)
{
struct mm_struct *mm = NULL;
task_t * p;
- printk("oom-killer: gfp_mask=0x%x\n", gfp_mask);
- /* print memory stats */
- show_mem();
+ if (printk_ratelimit()) {
+ printk("oom-killer: gfp_mask=0x%x, order=%d\n",
+ gfp_mask, order);
+ show_mem();
+ }
read_lock(&tasklist_lock);
retry:
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 3c9f7f88112..1d6ba6a4b59 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -897,12 +897,6 @@ rebalance:
cond_resched();
if (likely(did_some_progress)) {
- /*
- * Go through the zonelist yet one more time, keep
- * very high watermark here, this is only to catch
- * a parallel oom killing, we must fail if we're still
- * under heavy pressure.
- */
for (i = 0; (z = zones[i]) != NULL; i++) {
if (!zone_watermark_ok(z, order, z->pages_min,
classzone_idx, can_try_harder,
@@ -936,7 +930,7 @@ rebalance:
goto got_pg;
}
- out_of_memory(gfp_mask);
+ out_of_memory(gfp_mask, order);
goto restart;
}
diff --git a/mm/slab.c b/mm/slab.c
index 122d031baab..c9e706db463 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -584,7 +584,8 @@ static inline struct array_cache *ac_data(kmem_cache_t *cachep)
return cachep->array[smp_processor_id()];
}
-static inline kmem_cache_t *__find_general_cachep(size_t size, int gfpflags)
+static inline kmem_cache_t *__find_general_cachep(size_t size,
+ unsigned int __nocast gfpflags)
{
struct cache_sizes *csizep = malloc_sizes;
@@ -608,7 +609,8 @@ static inline kmem_cache_t *__find_general_cachep(size_t size, int gfpflags)
return csizep->cs_cachep;
}
-kmem_cache_t *kmem_find_general_cachep(size_t size, int gfpflags)
+kmem_cache_t *kmem_find_general_cachep(size_t size,
+ unsigned int __nocast gfpflags)
{
return __find_general_cachep(size, gfpflags);
}
@@ -2100,7 +2102,7 @@ cache_alloc_debugcheck_before(kmem_cache_t *cachep, unsigned int __nocast flags)
#if DEBUG
static void *
cache_alloc_debugcheck_after(kmem_cache_t *cachep,
- unsigned long flags, void *objp, void *caller)
+ unsigned int __nocast flags, void *objp, void *caller)
{
if (!objp)
return objp;
@@ -2372,6 +2374,9 @@ void *kmem_cache_alloc_node(kmem_cache_t *cachep, int flags, int nodeid)
struct slab *slabp;
kmem_bufctl_t next;
+ if (nodeid == -1)
+ return kmem_cache_alloc(cachep, flags);
+
for (loop = 0;;loop++) {
struct list_head *q;
@@ -2439,7 +2444,7 @@ got_slabp:
}
EXPORT_SYMBOL(kmem_cache_alloc_node);
-void *kmalloc_node(size_t size, int flags, int node)
+void *kmalloc_node(size_t size, unsigned int __nocast flags, int node)
{
kmem_cache_t *cachep;
@@ -3091,7 +3096,7 @@ unsigned int ksize(const void *objp)
* @s: the string to duplicate
* @gfp: the GFP mask used in the kmalloc() call when allocating memory
*/
-char *kstrdup(const char *s, int gfp)
+char *kstrdup(const char *s, unsigned int __nocast gfp)
{
size_t len;
char *buf;