aboutsummaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorJeff Garzik <jeff@garzik.org>2006-10-11 04:59:46 -0400
committerJeff Garzik <jeff@garzik.org>2006-10-11 04:59:46 -0400
commit701328a7b58d50d8640c21ba5fdf3170b1ddac16 (patch)
tree99a3fe44310a97e92ad1cb3a01f2ee3f6ed0d59a /mm
parent53e36ada37cb8b01cfbf674580a79edc0bb764c7 (diff)
parent53a5fbdc2dff55161a206ed1a1385a8fa8055c34 (diff)
Merge branch 'master' into upstream-fixes
Diffstat (limited to 'mm')
-rw-r--r--mm/memory.c9
-rw-r--r--mm/slab.c35
2 files changed, 27 insertions, 17 deletions
diff --git a/mm/memory.c b/mm/memory.c
index 9cf3f341a28..b5a4aadd961 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1086,6 +1086,7 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
default:
BUG();
}
+ cond_resched();
}
if (pages) {
pages[i] = page;
@@ -2169,11 +2170,13 @@ retry:
* after the next truncate_count read.
*/
- /* no page was available -- either SIGBUS or OOM */
- if (new_page == NOPAGE_SIGBUS)
+ /* no page was available -- either SIGBUS, OOM or REFAULT */
+ if (unlikely(new_page == NOPAGE_SIGBUS))
return VM_FAULT_SIGBUS;
- if (new_page == NOPAGE_OOM)
+ else if (unlikely(new_page == NOPAGE_OOM))
return VM_FAULT_OOM;
+ else if (unlikely(new_page == NOPAGE_REFAULT))
+ return VM_FAULT_MINOR;
/*
* Should we do an early C-O-W break?
diff --git a/mm/slab.c b/mm/slab.c
index e9a63b5a7fb..266449d604b 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1106,15 +1106,18 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
int nodeid = slabp->nodeid;
struct kmem_list3 *l3;
struct array_cache *alien = NULL;
+ int node;
+
+ node = numa_node_id();
/*
* Make sure we are not freeing a object from another node to the array
* cache on this cpu.
*/
- if (likely(slabp->nodeid == numa_node_id()))
+ if (likely(slabp->nodeid == node))
return 0;
- l3 = cachep->nodelists[numa_node_id()];
+ l3 = cachep->nodelists[node];
STATS_INC_NODEFREES(cachep);
if (l3->alien && l3->alien[nodeid]) {
alien = l3->alien[nodeid];
@@ -1325,7 +1328,6 @@ static void init_list(struct kmem_cache *cachep, struct kmem_list3 *list,
{
struct kmem_list3 *ptr;
- BUG_ON(cachep->nodelists[nodeid] != list);
ptr = kmalloc_node(sizeof(struct kmem_list3), GFP_KERNEL, nodeid);
BUG_ON(!ptr);
@@ -1352,6 +1354,7 @@ void __init kmem_cache_init(void)
struct cache_names *names;
int i;
int order;
+ int node;
for (i = 0; i < NUM_INIT_LISTS; i++) {
kmem_list3_init(&initkmem_list3[i]);
@@ -1386,12 +1389,14 @@ void __init kmem_cache_init(void)
* 6) Resize the head arrays of the kmalloc caches to their final sizes.
*/
+ node = numa_node_id();
+
/* 1) create the cache_cache */
INIT_LIST_HEAD(&cache_chain);
list_add(&cache_cache.next, &cache_chain);
cache_cache.colour_off = cache_line_size();
cache_cache.array[smp_processor_id()] = &initarray_cache.cache;
- cache_cache.nodelists[numa_node_id()] = &initkmem_list3[CACHE_CACHE];
+ cache_cache.nodelists[node] = &initkmem_list3[CACHE_CACHE];
cache_cache.buffer_size = ALIGN(cache_cache.buffer_size,
cache_line_size());
@@ -1496,19 +1501,18 @@ void __init kmem_cache_init(void)
}
/* 5) Replace the bootstrap kmem_list3's */
{
- int node;
+ int nid;
+
/* Replace the static kmem_list3 structures for the boot cpu */
- init_list(&cache_cache, &initkmem_list3[CACHE_CACHE],
- numa_node_id());
+ init_list(&cache_cache, &initkmem_list3[CACHE_CACHE], node);
- for_each_online_node(node) {
+ for_each_online_node(nid) {
init_list(malloc_sizes[INDEX_AC].cs_cachep,
- &initkmem_list3[SIZE_AC + node], node);
+ &initkmem_list3[SIZE_AC + nid], nid);
if (INDEX_AC != INDEX_L3) {
init_list(malloc_sizes[INDEX_L3].cs_cachep,
- &initkmem_list3[SIZE_L3 + node],
- node);
+ &initkmem_list3[SIZE_L3 + nid], nid);
}
}
}
@@ -2918,6 +2922,9 @@ static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
int batchcount;
struct kmem_list3 *l3;
struct array_cache *ac;
+ int node;
+
+ node = numa_node_id();
check_irq_off();
ac = cpu_cache_get(cachep);
@@ -2931,7 +2938,7 @@ retry:
*/
batchcount = BATCHREFILL_LIMIT;
}
- l3 = cachep->nodelists[numa_node_id()];
+ l3 = cachep->nodelists[node];
BUG_ON(ac->avail > 0 || !l3);
spin_lock(&l3->list_lock);
@@ -2961,7 +2968,7 @@ retry:
STATS_SET_HIGH(cachep);
ac->entry[ac->avail++] = slab_get_obj(cachep, slabp,
- numa_node_id());
+ node);
}
check_slabp(cachep, slabp);
@@ -2980,7 +2987,7 @@ alloc_done:
if (unlikely(!ac->avail)) {
int x;
- x = cache_grow(cachep, flags, numa_node_id());
+ x = cache_grow(cachep, flags, node);
/* cache_grow can reenable interrupts, then ac could change. */
ac = cpu_cache_get(cachep);