aboutsummaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/Kconfig4
-rw-r--r--mm/bounce.c4
-rw-r--r--mm/hugetlb.c1
-rw-r--r--mm/migrate.c24
-rw-r--r--mm/mmap.c34
-rw-r--r--mm/oom_kill.c1
-rw-r--r--mm/page_alloc.c10
-rw-r--r--mm/slab.c2
-rw-r--r--mm/swapfile.c6
-rw-r--r--mm/vmstat.c1
10 files changed, 70 insertions, 17 deletions
diff --git a/mm/Kconfig b/mm/Kconfig
index 86187221e78..e24d348083c 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -116,11 +116,11 @@ config SPARSEMEM_EXTREME
config MEMORY_HOTPLUG
bool "Allow for memory hot-add"
depends on SPARSEMEM || X86_64_ACPI_NUMA
- depends on HOTPLUG && !SOFTWARE_SUSPEND && ARCH_ENABLE_MEMORY_HOTPLUG
+ depends on HOTPLUG && !HIBERNATION && ARCH_ENABLE_MEMORY_HOTPLUG
depends on (IA64 || X86 || PPC64 || SUPERH)
comment "Memory hotplug is currently incompatible with Software Suspend"
- depends on SPARSEMEM && HOTPLUG && SOFTWARE_SUSPEND
+ depends on SPARSEMEM && HOTPLUG && HIBERNATION
config MEMORY_HOTPLUG_SPARSE
def_bool y
diff --git a/mm/bounce.c b/mm/bounce.c
index ad401fc5744..179fe38a241 100644
--- a/mm/bounce.c
+++ b/mm/bounce.c
@@ -190,7 +190,7 @@ static int bounce_end_io_read_isa(struct bio *bio, unsigned int bytes_done, int
return 0;
}
-static void __blk_queue_bounce(request_queue_t *q, struct bio **bio_orig,
+static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
mempool_t *pool)
{
struct page *page;
@@ -275,7 +275,7 @@ static void __blk_queue_bounce(request_queue_t *q, struct bio **bio_orig,
*bio_orig = bio;
}
-void blk_queue_bounce(request_queue_t *q, struct bio **bio_orig)
+void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig)
{
mempool_t *pool;
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index f127940ec24..d7ca59d66c5 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -84,6 +84,7 @@ static struct page *dequeue_huge_page(struct vm_area_struct *vma,
list_del(&page->lru);
free_huge_pages--;
free_huge_pages_node[nid]--;
+ break;
}
}
return page;
diff --git a/mm/migrate.c b/mm/migrate.c
index 34d8ada053e..37c73b90200 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -49,9 +49,8 @@ int isolate_lru_page(struct page *page, struct list_head *pagelist)
struct zone *zone = page_zone(page);
spin_lock_irq(&zone->lru_lock);
- if (PageLRU(page)) {
+ if (PageLRU(page) && get_page_unless_zero(page)) {
ret = 0;
- get_page(page);
ClearPageLRU(page);
if (PageActive(page))
del_page_from_active_list(zone, page);
@@ -632,18 +631,35 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
goto unlock;
wait_on_page_writeback(page);
}
-
/*
- * Establish migration ptes or remove ptes
+ * By try_to_unmap(), page->mapcount goes down to 0 here. In this case,
+ * we cannot notice that anon_vma is freed while we migrates a page.
+ * This rcu_read_lock() delays freeing anon_vma pointer until the end
+ * of migration. File cache pages are no problem because of page_lock()
+ */
+ rcu_read_lock();
+ /*
+ * This is a corner case handling.
+ * When a new swap-cache is read into, it is linked to LRU
+ * and treated as swapcache but has no rmap yet.
+ * Calling try_to_unmap() against a page->mapping==NULL page is
+ * BUG. So handle it here.
*/
+ if (!page->mapping)
+ goto rcu_unlock;
+ /* Establish migration ptes or remove ptes */
try_to_unmap(page, 1);
+
if (!page_mapped(page))
rc = move_to_new_page(newpage, page);
if (rc)
remove_migration_ptes(page, page);
+rcu_unlock:
+ rcu_read_unlock();
unlock:
+
unlock_page(page);
if (rc != -EAGAIN) {
diff --git a/mm/mmap.c b/mm/mmap.c
index 7afc7a7cec6..b6537211b9c 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1029,6 +1029,40 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr,
}
EXPORT_SYMBOL(do_mmap_pgoff);
+/*
+ * Some shared mappigns will want the pages marked read-only
+ * to track write events. If so, we'll downgrade vm_page_prot
+ * to the private version (using protection_map[] without the
+ * VM_SHARED bit).
+ */
+int vma_wants_writenotify(struct vm_area_struct *vma)
+{
+ unsigned int vm_flags = vma->vm_flags;
+
+ /* If it was private or non-writable, the write bit is already clear */
+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
+ return 0;
+
+ /* The backer wishes to know when pages are first written to? */
+ if (vma->vm_ops && vma->vm_ops->page_mkwrite)
+ return 1;
+
+ /* The open routine did something to the protections already? */
+ if (pgprot_val(vma->vm_page_prot) !=
+ pgprot_val(protection_map[vm_flags &
+ (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]))
+ return 0;
+
+ /* Specialty mapping? */
+ if (vm_flags & (VM_PFNMAP|VM_INSERTPAGE))
+ return 0;
+
+ /* Can the mapping track the dirty pages? */
+ return vma->vm_file && vma->vm_file->f_mapping &&
+ mapping_cap_account_dirty(vma->vm_file->f_mapping);
+}
+
+
unsigned long mmap_region(struct file *file, unsigned long addr,
unsigned long len, unsigned long flags,
unsigned int vm_flags, unsigned long pgoff,
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index a7001410ab1..10367654ae7 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -17,6 +17,7 @@
#include <linux/oom.h>
#include <linux/mm.h>
+#include <linux/err.h>
#include <linux/sched.h>
#include <linux/swap.h>
#include <linux/timex.h>
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 40954fb8159..0bd4d82ddff 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -726,7 +726,7 @@ static void __drain_pages(unsigned int cpu)
}
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_HIBERNATION
void mark_free_pages(struct zone *zone)
{
@@ -772,7 +772,7 @@ void drain_local_pages(void)
__drain_pages(smp_processor_id());
local_irq_restore(flags);
}
-#endif /* CONFIG_PM */
+#endif /* CONFIG_HIBERNATION */
/*
* Free a 0-order page
@@ -2775,11 +2775,11 @@ unsigned long __meminit __absent_pages_in_range(int nid,
if (i == -1)
return 0;
+ prev_end_pfn = min(early_node_map[i].start_pfn, range_end_pfn);
+
/* Account for ranges before physical memory on this node */
if (early_node_map[i].start_pfn > range_start_pfn)
- hole_pages = early_node_map[i].start_pfn - range_start_pfn;
-
- prev_end_pfn = early_node_map[i].start_pfn;
+ hole_pages = prev_end_pfn - range_start_pfn;
/* Find all holes for the zone within the node */
for (; i != -1; i = next_active_region_index_in_nid(i, nid)) {
diff --git a/mm/slab.c b/mm/slab.c
index bde271c001b..a684778b2b4 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2776,7 +2776,7 @@ static int cache_grow(struct kmem_cache *cachep,
* 'nodeid'.
*/
if (!objp)
- objp = kmem_getpages(cachep, flags, nodeid);
+ objp = kmem_getpages(cachep, local_flags, nodeid);
if (!objp)
goto failed;
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 7ff0a81c7b0..f071648e136 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -425,7 +425,7 @@ void free_swap_and_cache(swp_entry_t entry)
}
}
-#ifdef CONFIG_SOFTWARE_SUSPEND
+#ifdef CONFIG_HIBERNATION
/*
* Find the swap type that corresponds to given device (if any).
*
@@ -951,7 +951,7 @@ sector_t map_swap_page(struct swap_info_struct *sis, pgoff_t offset)
}
}
-#ifdef CONFIG_SOFTWARE_SUSPEND
+#ifdef CONFIG_HIBERNATION
/*
* Get the (PAGE_SIZE) block corresponding to given offset on the swapdev
* corresponding to given index in swap_info (swap type).
@@ -966,7 +966,7 @@ sector_t swapdev_block(int swap_type, pgoff_t offset)
sis = swap_info + swap_type;
return (sis->flags & SWP_WRITEOK) ? map_swap_page(sis, offset) : 0;
}
-#endif /* CONFIG_SOFTWARE_SUSPEND */
+#endif /* CONFIG_HIBERNATION */
/*
* Free all of a swapdev's extent information
diff --git a/mm/vmstat.c b/mm/vmstat.c
index fadf791cd7e..c64d169537b 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -10,6 +10,7 @@
*/
#include <linux/mm.h>
+#include <linux/err.h>
#include <linux/module.h>
#include <linux/cpu.h>
#include <linux/sched.h>