diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/rmap.c | 21 | ||||
-rw-r--r-- | mm/vmscan.c | 226 |
2 files changed, 227 insertions, 20 deletions
diff --git a/mm/rmap.c b/mm/rmap.c index d85a99d28c0..13fad5fcdf7 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -52,6 +52,7 @@ #include <linux/init.h> #include <linux/rmap.h> #include <linux/rcupdate.h> +#include <linux/module.h> #include <asm/tlbflush.h> @@ -541,7 +542,8 @@ void page_remove_rmap(struct page *page) * Subfunctions of try_to_unmap: try_to_unmap_one called * repeatedly from either try_to_unmap_anon or try_to_unmap_file. */ -static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma) +static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, + int ignore_refs) { struct mm_struct *mm = vma->vm_mm; unsigned long address; @@ -564,7 +566,8 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma) * skipped over this mm) then we should reactivate it. */ if ((vma->vm_flags & VM_LOCKED) || - ptep_clear_flush_young(vma, address, pte)) { + (ptep_clear_flush_young(vma, address, pte) + && !ignore_refs)) { ret = SWAP_FAIL; goto out_unmap; } @@ -698,7 +701,7 @@ static void try_to_unmap_cluster(unsigned long cursor, pte_unmap_unlock(pte - 1, ptl); } -static int try_to_unmap_anon(struct page *page) +static int try_to_unmap_anon(struct page *page, int ignore_refs) { struct anon_vma *anon_vma; struct vm_area_struct *vma; @@ -709,7 +712,7 @@ static int try_to_unmap_anon(struct page *page) return ret; list_for_each_entry(vma, &anon_vma->head, anon_vma_node) { - ret = try_to_unmap_one(page, vma); + ret = try_to_unmap_one(page, vma, ignore_refs); if (ret == SWAP_FAIL || !page_mapped(page)) break; } @@ -726,7 +729,7 @@ static int try_to_unmap_anon(struct page *page) * * This function is only called from try_to_unmap for object-based pages. */ -static int try_to_unmap_file(struct page *page) +static int try_to_unmap_file(struct page *page, int ignore_refs) { struct address_space *mapping = page->mapping; pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); @@ -740,7 +743,7 @@ static int try_to_unmap_file(struct page *page) spin_lock(&mapping->i_mmap_lock); vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { - ret = try_to_unmap_one(page, vma); + ret = try_to_unmap_one(page, vma, ignore_refs); if (ret == SWAP_FAIL || !page_mapped(page)) goto out; } @@ -825,16 +828,16 @@ out: * SWAP_AGAIN - we missed a mapping, try again later * SWAP_FAIL - the page is unswappable */ -int try_to_unmap(struct page *page) +int try_to_unmap(struct page *page, int ignore_refs) { int ret; BUG_ON(!PageLocked(page)); if (PageAnon(page)) - ret = try_to_unmap_anon(page); + ret = try_to_unmap_anon(page, ignore_refs); else - ret = try_to_unmap_file(page); + ret = try_to_unmap_file(page, ignore_refs); if (!page_mapped(page)) ret = SWAP_SUCCESS; diff --git a/mm/vmscan.c b/mm/vmscan.c index aa4b80dbe3a..8f326ce2b69 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -483,7 +483,7 @@ static int shrink_list(struct list_head *page_list, struct scan_control *sc) if (!sc->may_swap) goto keep_locked; - switch (try_to_unmap(page)) { + switch (try_to_unmap(page, 0)) { case SWAP_FAIL: goto activate_locked; case SWAP_AGAIN: @@ -623,7 +623,7 @@ static int swap_page(struct page *page) struct address_space *mapping = page_mapping(page); if (page_mapped(page) && mapping) - if (try_to_unmap(page) != SWAP_SUCCESS) + if (try_to_unmap(page, 0) != SWAP_SUCCESS) goto unlock_retry; if (PageDirty(page)) { @@ -659,6 +659,154 @@ unlock_retry: retry: return -EAGAIN; } + +/* + * Page migration was first developed in the context of the memory hotplug + * project. The main authors of the migration code are: + * + * IWAMOTO Toshihiro <iwamoto@valinux.co.jp> + * Hirokazu Takahashi <taka@valinux.co.jp> + * Dave Hansen <haveblue@us.ibm.com> + * Christoph Lameter <clameter@sgi.com> + */ + +/* + * Remove references for a page and establish the new page with the correct + * basic settings to be able to stop accesses to the page. + */ +static int migrate_page_remove_references(struct page *newpage, + struct page *page, int nr_refs) +{ + struct address_space *mapping = page_mapping(page); + struct page **radix_pointer; + + /* + * Avoid doing any of the following work if the page count + * indicates that the page is in use or truncate has removed + * the page. + */ + if (!mapping || page_mapcount(page) + nr_refs != page_count(page)) + return 1; + + /* + * Establish swap ptes for anonymous pages or destroy pte + * maps for files. + * + * In order to reestablish file backed mappings the fault handlers + * will take the radix tree_lock which may then be used to stop + * processses from accessing this page until the new page is ready. + * + * A process accessing via a swap pte (an anonymous page) will take a + * page_lock on the old page which will block the process until the + * migration attempt is complete. At that time the PageSwapCache bit + * will be examined. If the page was migrated then the PageSwapCache + * bit will be clear and the operation to retrieve the page will be + * retried which will find the new page in the radix tree. Then a new + * direct mapping may be generated based on the radix tree contents. + * + * If the page was not migrated then the PageSwapCache bit + * is still set and the operation may continue. + */ + try_to_unmap(page, 1); + + /* + * Give up if we were unable to remove all mappings. + */ + if (page_mapcount(page)) + return 1; + + write_lock_irq(&mapping->tree_lock); + + radix_pointer = (struct page **)radix_tree_lookup_slot( + &mapping->page_tree, + page_index(page)); + + if (!page_mapping(page) || page_count(page) != nr_refs || + *radix_pointer != page) { + write_unlock_irq(&mapping->tree_lock); + return 1; + } + + /* + * Now we know that no one else is looking at the page. + * + * Certain minimal information about a page must be available + * in order for other subsystems to properly handle the page if they + * find it through the radix tree update before we are finished + * copying the page. + */ + get_page(newpage); + newpage->index = page->index; + newpage->mapping = page->mapping; + if (PageSwapCache(page)) { + SetPageSwapCache(newpage); + set_page_private(newpage, page_private(page)); + } + + *radix_pointer = newpage; + __put_page(page); + write_unlock_irq(&mapping->tree_lock); + + return 0; +} + +/* + * Copy the page to its new location + */ +void migrate_page_copy(struct page *newpage, struct page *page) +{ + copy_highpage(newpage, page); + + if (PageError(page)) + SetPageError(newpage); + if (PageReferenced(page)) + SetPageReferenced(newpage); + if (PageUptodate(page)) + SetPageUptodate(newpage); + if (PageActive(page)) + SetPageActive(newpage); + if (PageChecked(page)) + SetPageChecked(newpage); + if (PageMappedToDisk(page)) + SetPageMappedToDisk(newpage); + + if (PageDirty(page)) { + clear_page_dirty_for_io(page); + set_page_dirty(newpage); + } + + ClearPageSwapCache(page); + ClearPageActive(page); + ClearPagePrivate(page); + set_page_private(page, 0); + page->mapping = NULL; + + /* + * If any waiters have accumulated on the new page then + * wake them up. + */ + if (PageWriteback(newpage)) + end_page_writeback(newpage); +} + +/* + * Common logic to directly migrate a single page suitable for + * pages that do not use PagePrivate. + * + * Pages are locked upon entry and exit. + */ +int migrate_page(struct page *newpage, struct page *page) +{ + BUG_ON(PageWriteback(page)); /* Writeback must be complete */ + + if (migrate_page_remove_references(newpage, page, 2)) + return -EAGAIN; + + migrate_page_copy(newpage, page); + + return 0; +} + /* * migrate_pages * @@ -672,11 +820,6 @@ retry: * are movable anymore because t has become empty * or no retryable pages exist anymore. * - * SIMPLIFIED VERSION: This implementation of migrate_pages - * is only swapping out pages and never touches the second - * list. The direct migration patchset - * extends this function to avoid the use of swap. - * * Return: Number of pages not migrated when "to" ran empty. */ int migrate_pages(struct list_head *from, struct list_head *to, @@ -697,6 +840,9 @@ redo: retry = 0; list_for_each_entry_safe(page, page2, from, lru) { + struct page *newpage = NULL; + struct address_space *mapping; + cond_resched(); rc = 0; @@ -704,6 +850,9 @@ redo: /* page was freed from under us. So we are done. */ goto next; + if (to && list_empty(to)) + break; + /* * Skip locked pages during the first two passes to give the * functions holding the lock time to release the page. Later we @@ -740,12 +889,64 @@ redo: } } + if (!to) { + rc = swap_page(page); + goto next; + } + + newpage = lru_to_page(to); + lock_page(newpage); + /* - * Page is properly locked and writeback is complete. + * Pages are properly locked and writeback is complete. * Try to migrate the page. */ - rc = swap_page(page); - goto next; + mapping = page_mapping(page); + if (!mapping) + goto unlock_both; + + /* + * Trigger writeout if page is dirty + */ + if (PageDirty(page)) { + switch (pageout(page, mapping)) { + case PAGE_KEEP: + case PAGE_ACTIVATE: + goto unlock_both; + + case PAGE_SUCCESS: + unlock_page(newpage); + goto next; + + case PAGE_CLEAN: + ; /* try to migrate the page below */ + } + } + /* + * If we have no buffer or can release the buffer + * then do a simple migration. + */ + if (!page_has_buffers(page) || + try_to_release_page(page, GFP_KERNEL)) { + rc = migrate_page(newpage, page); + goto unlock_both; + } + + /* + * On early passes with mapped pages simply + * retry. There may be a lock held for some + * buffers that may go away. Later + * swap them out. + */ + if (pass > 4) { + unlock_page(newpage); + newpage = NULL; + rc = swap_page(page); + goto next; + } + +unlock_both: + unlock_page(newpage); unlock_page: unlock_page(page); @@ -758,7 +959,10 @@ next: list_move(&page->lru, failed); nr_failed++; } else { - /* Success */ + if (newpage) { + /* Successful migration. Return page to LRU */ + move_to_lru(newpage); + } list_move(&page->lru, moved); } } |