aboutsummaryrefslogtreecommitdiff
path: root/mm/rmap.c
diff options
context:
space:
mode:
authorSteve French <sfrench@us.ibm.com>2005-09-06 15:47:31 -0700
committerSteve French <sfrench@us.ibm.com>2005-09-06 15:47:31 -0700
commitc08319a9d50b5c9cb4fdb33728bd16497cf4ddd3 (patch)
tree5fbec9030029da1ec387c18b85f26f19ee50da44 /mm/rmap.c
parentbfa0d75a1eee59f0577e3c1697ff570b77581a35 (diff)
parent4706df3d3c42af802597d82c8b1542c3d52eab23 (diff)
Merge with /pub/scm/linux/kernel/git/torvalds/linux-2.6.git
Diffstat (limited to 'mm/rmap.c')
-rw-r--r--mm/rmap.c29
1 files changed, 11 insertions, 18 deletions
diff --git a/mm/rmap.c b/mm/rmap.c
index 08ac5c7fa91..450f5241b5a 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -34,9 +34,8 @@
* anon_vma->lock
* mm->page_table_lock
* zone->lru_lock (in mark_page_accessed)
- * swap_list_lock (in swap_free etc's swap_info_get)
+ * swap_lock (in swap_duplicate, swap_info_get)
* mmlist_lock (in mmput, drain_mmlist and others)
- * swap_device_lock (in swap_duplicate, swap_info_get)
* mapping->private_lock (in __set_page_dirty_buffers)
* inode_lock (in set_page_dirty's __mark_inode_dirty)
* sb_lock (within inode_lock in fs/fs-writeback.c)
@@ -290,8 +289,6 @@ static int page_referenced_one(struct page *page,
pte_t *pte;
int referenced = 0;
- if (!get_mm_counter(mm, rss))
- goto out;
address = vma_address(page, vma);
if (address == -EFAULT)
goto out;
@@ -442,22 +439,19 @@ int page_referenced(struct page *page, int is_locked, int ignore_token)
void page_add_anon_rmap(struct page *page,
struct vm_area_struct *vma, unsigned long address)
{
- struct anon_vma *anon_vma = vma->anon_vma;
- pgoff_t index;
-
BUG_ON(PageReserved(page));
- BUG_ON(!anon_vma);
inc_mm_counter(vma->vm_mm, anon_rss);
- anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
- index = (address - vma->vm_start) >> PAGE_SHIFT;
- index += vma->vm_pgoff;
- index >>= PAGE_CACHE_SHIFT - PAGE_SHIFT;
-
if (atomic_inc_and_test(&page->_mapcount)) {
- page->index = index;
+ struct anon_vma *anon_vma = vma->anon_vma;
+
+ BUG_ON(!anon_vma);
+ anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
page->mapping = (struct address_space *) anon_vma;
+
+ page->index = linear_page_index(vma, address);
+
inc_page_state(nr_mapped);
}
/* else checking page index and mapping is racy */
@@ -518,8 +512,6 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma)
pte_t pteval;
int ret = SWAP_AGAIN;
- if (!get_mm_counter(mm, rss))
- goto out;
address = vma_address(page, vma);
if (address == -EFAULT)
goto out;
@@ -532,6 +524,8 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma)
* If the page is mlock()d, we cannot swap it out.
* If it's recently referenced (perhaps page_referenced
* skipped over this mm) then we should reactivate it.
+ *
+ * Pages belonging to VM_RESERVED regions should not happen here.
*/
if ((vma->vm_flags & (VM_LOCKED|VM_RESERVED)) ||
ptep_clear_flush_young(vma, address, pte)) {
@@ -767,8 +761,7 @@ static int try_to_unmap_file(struct page *page)
if (vma->vm_flags & (VM_LOCKED|VM_RESERVED))
continue;
cursor = (unsigned long) vma->vm_private_data;
- while (get_mm_counter(vma->vm_mm, rss) &&
- cursor < max_nl_cursor &&
+ while ( cursor < max_nl_cursor &&
cursor < vma->vm_end - vma->vm_start) {
try_to_unmap_cluster(cursor, &mapcount, vma);
cursor += CLUSTER_SIZE;