aboutsummaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorDavid Woodhouse <dwmw2@infradead.org>2007-08-01 11:23:57 +0100
committerDavid Woodhouse <dwmw2@infradead.org>2007-08-01 11:23:57 +0100
commit440fdb53b4ae58602711b5b8c3a139ace2404dbb (patch)
treec6fb88d6ad537ec53aeecadc75a61ab6147d4c9c /mm
parent8b2b403ce0f1a816b7a6a4f47c8798003b26c07a (diff)
parent8d4fbcfbe0a4bfc73e7f0297c59ae514e1f1436f (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Diffstat (limited to 'mm')
-rw-r--r--mm/Kconfig4
-rw-r--r--mm/bounce.c4
-rw-r--r--mm/filemap.c2
-rw-r--r--mm/hugetlb.c1
-rw-r--r--mm/migrate.c24
-rw-r--r--mm/mmap.c34
-rw-r--r--mm/oom_kill.c3
-rw-r--r--mm/page_alloc.c14
-rw-r--r--mm/slab.c2
-rw-r--r--mm/slub.c3
-rw-r--r--mm/swapfile.c6
-rw-r--r--mm/vmstat.c1
12 files changed, 78 insertions, 20 deletions
diff --git a/mm/Kconfig b/mm/Kconfig
index 86187221e78..e24d348083c 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -116,11 +116,11 @@ config SPARSEMEM_EXTREME
config MEMORY_HOTPLUG
bool "Allow for memory hot-add"
depends on SPARSEMEM || X86_64_ACPI_NUMA
- depends on HOTPLUG && !SOFTWARE_SUSPEND && ARCH_ENABLE_MEMORY_HOTPLUG
+ depends on HOTPLUG && !HIBERNATION && ARCH_ENABLE_MEMORY_HOTPLUG
depends on (IA64 || X86 || PPC64 || SUPERH)
comment "Memory hotplug is currently incompatible with Software Suspend"
- depends on SPARSEMEM && HOTPLUG && SOFTWARE_SUSPEND
+ depends on SPARSEMEM && HOTPLUG && HIBERNATION
config MEMORY_HOTPLUG_SPARSE
def_bool y
diff --git a/mm/bounce.c b/mm/bounce.c
index ad401fc5744..179fe38a241 100644
--- a/mm/bounce.c
+++ b/mm/bounce.c
@@ -190,7 +190,7 @@ static int bounce_end_io_read_isa(struct bio *bio, unsigned int bytes_done, int
return 0;
}
-static void __blk_queue_bounce(request_queue_t *q, struct bio **bio_orig,
+static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
mempool_t *pool)
{
struct page *page;
@@ -275,7 +275,7 @@ static void __blk_queue_bounce(request_queue_t *q, struct bio **bio_orig,
*bio_orig = bio;
}
-void blk_queue_bounce(request_queue_t *q, struct bio **bio_orig)
+void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig)
{
mempool_t *pool;
diff --git a/mm/filemap.c b/mm/filemap.c
index 49a6fe375d0..6cf700d4184 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -843,7 +843,7 @@ static void shrink_readahead_size_eio(struct file *filp,
/**
* do_generic_mapping_read - generic file read routine
* @mapping: address_space to be read
- * @_ra: file's readahead state
+ * @ra: file's readahead state
* @filp: the file to read
* @ppos: current file position
* @desc: read_descriptor
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index f127940ec24..d7ca59d66c5 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -84,6 +84,7 @@ static struct page *dequeue_huge_page(struct vm_area_struct *vma,
list_del(&page->lru);
free_huge_pages--;
free_huge_pages_node[nid]--;
+ break;
}
}
return page;
diff --git a/mm/migrate.c b/mm/migrate.c
index 34d8ada053e..37c73b90200 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -49,9 +49,8 @@ int isolate_lru_page(struct page *page, struct list_head *pagelist)
struct zone *zone = page_zone(page);
spin_lock_irq(&zone->lru_lock);
- if (PageLRU(page)) {
+ if (PageLRU(page) && get_page_unless_zero(page)) {
ret = 0;
- get_page(page);
ClearPageLRU(page);
if (PageActive(page))
del_page_from_active_list(zone, page);
@@ -632,18 +631,35 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
goto unlock;
wait_on_page_writeback(page);
}
-
/*
- * Establish migration ptes or remove ptes
+ * By try_to_unmap(), page->mapcount goes down to 0 here. In this case,
+ * we cannot notice that anon_vma is freed while we migrates a page.
+ * This rcu_read_lock() delays freeing anon_vma pointer until the end
+ * of migration. File cache pages are no problem because of page_lock()
+ */
+ rcu_read_lock();
+ /*
+ * This is a corner case handling.
+ * When a new swap-cache is read into, it is linked to LRU
+ * and treated as swapcache but has no rmap yet.
+ * Calling try_to_unmap() against a page->mapping==NULL page is
+ * BUG. So handle it here.
*/
+ if (!page->mapping)
+ goto rcu_unlock;
+ /* Establish migration ptes or remove ptes */
try_to_unmap(page, 1);
+
if (!page_mapped(page))
rc = move_to_new_page(newpage, page);
if (rc)
remove_migration_ptes(page, page);
+rcu_unlock:
+ rcu_read_unlock();
unlock:
+
unlock_page(page);
if (rc != -EAGAIN) {
diff --git a/mm/mmap.c b/mm/mmap.c
index 7afc7a7cec6..b6537211b9c 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1029,6 +1029,40 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr,
}
EXPORT_SYMBOL(do_mmap_pgoff);
+/*
+ * Some shared mappigns will want the pages marked read-only
+ * to track write events. If so, we'll downgrade vm_page_prot
+ * to the private version (using protection_map[] without the
+ * VM_SHARED bit).
+ */
+int vma_wants_writenotify(struct vm_area_struct *vma)
+{
+ unsigned int vm_flags = vma->vm_flags;
+
+ /* If it was private or non-writable, the write bit is already clear */
+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
+ return 0;
+
+ /* The backer wishes to know when pages are first written to? */
+ if (vma->vm_ops && vma->vm_ops->page_mkwrite)
+ return 1;
+
+ /* The open routine did something to the protections already? */
+ if (pgprot_val(vma->vm_page_prot) !=
+ pgprot_val(protection_map[vm_flags &
+ (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]))
+ return 0;
+
+ /* Specialty mapping? */
+ if (vm_flags & (VM_PFNMAP|VM_INSERTPAGE))
+ return 0;
+
+ /* Can the mapping track the dirty pages? */
+ return vma->vm_file && vma->vm_file->f_mapping &&
+ mapping_cap_account_dirty(vma->vm_file->f_mapping);
+}
+
+
unsigned long mmap_region(struct file *file, unsigned long addr,
unsigned long len, unsigned long flags,
unsigned int vm_flags, unsigned long pgoff,
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index a7001410ab1..f9b82ad5047 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -17,6 +17,7 @@
#include <linux/oom.h>
#include <linux/mm.h>
+#include <linux/err.h>
#include <linux/sched.h>
#include <linux/swap.h>
#include <linux/timex.h>
@@ -156,7 +157,7 @@ unsigned long badness(struct task_struct *p, unsigned long uptime)
}
#ifdef DEBUG
- printk(KERN_DEBUG "OOMkill: task %d (%s) got %d points\n",
+ printk(KERN_DEBUG "OOMkill: task %d (%s) got %lu points\n",
p->pid, p->comm, points);
#endif
return points;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 40954fb8159..3da85b81dab 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -726,7 +726,7 @@ static void __drain_pages(unsigned int cpu)
}
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_HIBERNATION
void mark_free_pages(struct zone *zone)
{
@@ -772,7 +772,7 @@ void drain_local_pages(void)
__drain_pages(smp_processor_id());
local_irq_restore(flags);
}
-#endif /* CONFIG_PM */
+#endif /* CONFIG_HIBERNATION */
/*
* Free a 0-order page
@@ -1350,6 +1350,10 @@ nofail_alloc:
if (page)
goto got_pg;
+ /* The OOM killer will not help higher order allocs so fail */
+ if (order > PAGE_ALLOC_COSTLY_ORDER)
+ goto nopage;
+
out_of_memory(zonelist, gfp_mask, order);
goto restart;
}
@@ -2775,11 +2779,11 @@ unsigned long __meminit __absent_pages_in_range(int nid,
if (i == -1)
return 0;
+ prev_end_pfn = min(early_node_map[i].start_pfn, range_end_pfn);
+
/* Account for ranges before physical memory on this node */
if (early_node_map[i].start_pfn > range_start_pfn)
- hole_pages = early_node_map[i].start_pfn - range_start_pfn;
-
- prev_end_pfn = early_node_map[i].start_pfn;
+ hole_pages = prev_end_pfn - range_start_pfn;
/* Find all holes for the zone within the node */
for (; i != -1; i = next_active_region_index_in_nid(i, nid)) {
diff --git a/mm/slab.c b/mm/slab.c
index bde271c001b..a684778b2b4 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2776,7 +2776,7 @@ static int cache_grow(struct kmem_cache *cachep,
* 'nodeid'.
*/
if (!objp)
- objp = kmem_getpages(cachep, flags, nodeid);
+ objp = kmem_getpages(cachep, local_flags, nodeid);
if (!objp)
goto failed;
diff --git a/mm/slub.c b/mm/slub.c
index 9b2d6178d06..6c6d74ff069 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1131,6 +1131,7 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
slab_pad_check(s, page);
for_each_object(p, s, page_address(page))
check_object(s, page, p, 0);
+ ClearSlabDebug(page);
}
mod_zone_page_state(page_zone(page),
@@ -1169,7 +1170,6 @@ static void discard_slab(struct kmem_cache *s, struct page *page)
atomic_long_dec(&n->nr_slabs);
reset_page_mapcount(page);
- ClearSlabDebug(page);
__ClearPageSlab(page);
free_slab(s, page);
}
@@ -1656,6 +1656,7 @@ static void __always_inline slab_free(struct kmem_cache *s,
unsigned long flags;
local_irq_save(flags);
+ debug_check_no_locks_freed(object, s->objsize);
if (likely(page == s->cpu_slab[smp_processor_id()] &&
!SlabDebug(page))) {
object[page->offset] = page->lockless_freelist;
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 7ff0a81c7b0..f071648e136 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -425,7 +425,7 @@ void free_swap_and_cache(swp_entry_t entry)
}
}
-#ifdef CONFIG_SOFTWARE_SUSPEND
+#ifdef CONFIG_HIBERNATION
/*
* Find the swap type that corresponds to given device (if any).
*
@@ -951,7 +951,7 @@ sector_t map_swap_page(struct swap_info_struct *sis, pgoff_t offset)
}
}
-#ifdef CONFIG_SOFTWARE_SUSPEND
+#ifdef CONFIG_HIBERNATION
/*
* Get the (PAGE_SIZE) block corresponding to given offset on the swapdev
* corresponding to given index in swap_info (swap type).
@@ -966,7 +966,7 @@ sector_t swapdev_block(int swap_type, pgoff_t offset)
sis = swap_info + swap_type;
return (sis->flags & SWP_WRITEOK) ? map_swap_page(sis, offset) : 0;
}
-#endif /* CONFIG_SOFTWARE_SUSPEND */
+#endif /* CONFIG_HIBERNATION */
/*
* Free all of a swapdev's extent information
diff --git a/mm/vmstat.c b/mm/vmstat.c
index fadf791cd7e..c64d169537b 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -10,6 +10,7 @@
*/
#include <linux/mm.h>
+#include <linux/err.h>
#include <linux/module.h>
#include <linux/cpu.h>
#include <linux/sched.h>