aboutsummaryrefslogtreecommitdiff
path: root/mm/memory.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c213
1 files changed, 142 insertions, 71 deletions
diff --git a/mm/memory.c b/mm/memory.c
index 2998cfc12f5..9ab206b829a 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -333,9 +333,9 @@ static inline void add_mm_rss(struct mm_struct *mm, int file_rss, int anon_rss)
}
/*
- * This function is called to print an error when a pte in a
- * !VM_RESERVED region is found pointing to an invalid pfn (which
- * is an error.
+ * This function is called to print an error when a bad pte
+ * is found. For example, we might have a PFN-mapped pte in
+ * a region that doesn't allow it.
*
* The calling function must still handle the error.
*/
@@ -350,6 +350,59 @@ void print_bad_pte(struct vm_area_struct *vma, pte_t pte, unsigned long vaddr)
}
/*
+ * This function gets the "struct page" associated with a pte.
+ *
+ * NOTE! Some mappings do not have "struct pages". A raw PFN mapping
+ * will have each page table entry just pointing to a raw page frame
+ * number, and as far as the VM layer is concerned, those do not have
+ * pages associated with them - even if the PFN might point to memory
+ * that otherwise is perfectly fine and has a "struct page".
+ *
+ * The way we recognize those mappings is through the rules set up
+ * by "remap_pfn_range()": the vma will have the VM_PFNMAP bit set,
+ * and the vm_pgoff will point to the first PFN mapped: thus every
+ * page that is a raw mapping will always honor the rule
+ *
+ * pfn_of_page == vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT)
+ *
+ * and if that isn't true, the page has been COW'ed (in which case it
+ * _does_ have a "struct page" associated with it even if it is in a
+ * VM_PFNMAP range).
+ */
+struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
+{
+ unsigned long pfn = pte_pfn(pte);
+
+ if (vma->vm_flags & VM_PFNMAP) {
+ unsigned long off = (addr - vma->vm_start) >> PAGE_SHIFT;
+ if (pfn == vma->vm_pgoff + off)
+ return NULL;
+ }
+
+ /*
+ * Add some anal sanity checks for now. Eventually,
+ * we should just do "return pfn_to_page(pfn)", but
+ * in the meantime we check that we get a valid pfn,
+ * and that the resulting page looks ok.
+ *
+ * Remove this test eventually!
+ */
+ if (unlikely(!pfn_valid(pfn))) {
+ print_bad_pte(vma, pte, addr);
+ return NULL;
+ }
+
+ /*
+ * NOTE! We still have PageReserved() pages in the page
+ * tables.
+ *
+ * The PAGE_ZERO() pages and various VDSO mappings can
+ * cause them to exist.
+ */
+ return pfn_to_page(pfn);
+}
+
+/*
* copy one vm_area from one task to the other. Assumes the page tables
* already present in the new task to be cleared in the whole range
* covered by this vma.
@@ -363,7 +416,6 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
unsigned long vm_flags = vma->vm_flags;
pte_t pte = *src_pte;
struct page *page;
- unsigned long pfn;
/* pte contains position in swap or file, so copy. */
if (unlikely(!pte_present(pte))) {
@@ -381,23 +433,6 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
goto out_set_pte;
}
- /* If the region is VM_RESERVED, the mapping is not
- * mapped via rmap - duplicate the pte as is.
- */
- if (vm_flags & VM_RESERVED)
- goto out_set_pte;
-
- pfn = pte_pfn(pte);
- /* If the pte points outside of valid memory but
- * the region is not VM_RESERVED, we have a problem.
- */
- if (unlikely(!pfn_valid(pfn))) {
- print_bad_pte(vma, pte, addr);
- goto out_set_pte; /* try to do something sane */
- }
-
- page = pfn_to_page(pfn);
-
/*
* If it's a COW mapping, write protect it both
* in the parent and the child
@@ -414,9 +449,13 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
if (vm_flags & VM_SHARED)
pte = pte_mkclean(pte);
pte = pte_mkold(pte);
- get_page(page);
- page_dup_rmap(page);
- rss[!!PageAnon(page)]++;
+
+ page = vm_normal_page(vma, addr, pte);
+ if (page) {
+ get_page(page);
+ page_dup_rmap(page);
+ rss[!!PageAnon(page)]++;
+ }
out_set_pte:
set_pte_at(dst_mm, addr, dst_pte, pte);
@@ -528,7 +567,7 @@ int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
* readonly mappings. The tradeoff is that copy_page_range is more
* efficient than faulting.
*/
- if (!(vma->vm_flags & (VM_HUGETLB|VM_NONLINEAR|VM_RESERVED))) {
+ if (!(vma->vm_flags & (VM_HUGETLB|VM_NONLINEAR|VM_PFNMAP))) {
if (!vma->anon_vma)
return 0;
}
@@ -568,17 +607,11 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
continue;
}
if (pte_present(ptent)) {
- struct page *page = NULL;
+ struct page *page;
(*zap_work) -= PAGE_SIZE;
- if (!(vma->vm_flags & VM_RESERVED)) {
- unsigned long pfn = pte_pfn(ptent);
- if (unlikely(!pfn_valid(pfn)))
- print_bad_pte(vma, ptent, addr);
- else
- page = pfn_to_page(pfn);
- }
+ page = vm_normal_page(vma, addr, ptent);
if (unlikely(details) && page) {
/*
* unmap_shared_mapping_pages() wants to
@@ -834,7 +867,7 @@ unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address,
/*
* Do a quick page-table lookup for a single page.
*/
-struct page *follow_page(struct mm_struct *mm, unsigned long address,
+struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
unsigned int flags)
{
pgd_t *pgd;
@@ -842,8 +875,8 @@ struct page *follow_page(struct mm_struct *mm, unsigned long address,
pmd_t *pmd;
pte_t *ptep, pte;
spinlock_t *ptl;
- unsigned long pfn;
struct page *page;
+ struct mm_struct *mm = vma->vm_mm;
page = follow_huge_addr(mm, address, flags & FOLL_WRITE);
if (!IS_ERR(page)) {
@@ -879,11 +912,10 @@ struct page *follow_page(struct mm_struct *mm, unsigned long address,
goto unlock;
if ((flags & FOLL_WRITE) && !pte_write(pte))
goto unlock;
- pfn = pte_pfn(pte);
- if (!pfn_valid(pfn))
+ page = vm_normal_page(vma, address, pte);
+ if (unlikely(!page))
goto unlock;
- page = pfn_to_page(pfn);
if (flags & FOLL_GET)
get_page(page);
if (flags & FOLL_TOUCH) {
@@ -956,8 +988,10 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
return i ? : -EFAULT;
}
if (pages) {
- pages[i] = pte_page(*pte);
- get_page(pages[i]);
+ struct page *page = vm_normal_page(vma, start, *pte);
+ pages[i] = page;
+ if (page)
+ get_page(page);
}
pte_unmap(pte);
if (vmas)
@@ -968,7 +1002,7 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
continue;
}
- if (!vma || (vma->vm_flags & (VM_IO | VM_RESERVED))
+ if (!vma || (vma->vm_flags & VM_IO)
|| !(vm_flags & vma->vm_flags))
return i ? : -EFAULT;
@@ -992,7 +1026,7 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
foll_flags |= FOLL_WRITE;
cond_resched();
- while (!(page = follow_page(mm, start, foll_flags))) {
+ while (!(page = follow_page(vma, start, foll_flags))) {
int ret;
ret = __handle_mm_fault(mm, vma, start,
foll_flags & FOLL_WRITE);
@@ -1191,10 +1225,17 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
* rest of the world about it:
* VM_IO tells people not to look at these pages
* (accesses can have side effects).
- * VM_RESERVED tells the core MM not to "manage" these pages
- * (e.g. refcount, mapcount, try to swap them out).
+ * VM_RESERVED is specified all over the place, because
+ * in 2.4 it kept swapout's vma scan off this vma; but
+ * in 2.6 the LRU scan won't even find its pages, so this
+ * flag means no more than count its pages in reserved_vm,
+ * and omit it from core dump, even when VM_IO turned off.
+ * VM_PFNMAP tells the core MM that the base pages are just
+ * raw PFN mappings, and do not have a "struct page" associated
+ * with them.
*/
- vma->vm_flags |= VM_IO | VM_RESERVED;
+ vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
+ vma->vm_pgoff = pfn;
BUG_ON(addr >= end);
pfn -= addr >> PAGE_SHIFT;
@@ -1249,6 +1290,26 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
return pte;
}
+static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va)
+{
+ /*
+ * If the source page was a PFN mapping, we don't have
+ * a "struct page" for it. We do a best-effort copy by
+ * just copying from the original user address. If that
+ * fails, we just zero-fill it. Live with it.
+ */
+ if (unlikely(!src)) {
+ void *kaddr = kmap_atomic(dst, KM_USER0);
+ unsigned long left = __copy_from_user_inatomic(kaddr, (void __user *)va, PAGE_SIZE);
+ if (left)
+ memset(kaddr, 0, PAGE_SIZE);
+ kunmap_atomic(kaddr, KM_USER0);
+ return;
+
+ }
+ copy_user_highpage(dst, src, va);
+}
+
/*
* This routine handles present pages, when users try to write
* to a shared page. It is done by copying the page to a new address
@@ -1271,22 +1332,14 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, pte_t *page_table, pmd_t *pmd,
spinlock_t *ptl, pte_t orig_pte)
{
- struct page *old_page, *new_page;
- unsigned long pfn = pte_pfn(orig_pte);
+ struct page *old_page, *src_page, *new_page;
pte_t entry;
int ret = VM_FAULT_MINOR;
- BUG_ON(vma->vm_flags & VM_RESERVED);
-
- if (unlikely(!pfn_valid(pfn))) {
- /*
- * Page table corrupted: show pte and kill process.
- */
- print_bad_pte(vma, orig_pte, address);
- ret = VM_FAULT_OOM;
- goto unlock;
- }
- old_page = pfn_to_page(pfn);
+ old_page = vm_normal_page(vma, address, orig_pte);
+ src_page = old_page;
+ if (!old_page)
+ goto gotten;
if (PageAnon(old_page) && !TestSetPageLocked(old_page)) {
int reuse = can_share_swap_page(old_page);
@@ -1307,11 +1360,12 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
* Ok, we need to copy. Oh, well..
*/
page_cache_get(old_page);
+gotten:
pte_unmap_unlock(page_table, ptl);
if (unlikely(anon_vma_prepare(vma)))
goto oom;
- if (old_page == ZERO_PAGE(address)) {
+ if (src_page == ZERO_PAGE(address)) {
new_page = alloc_zeroed_user_highpage(vma, address);
if (!new_page)
goto oom;
@@ -1319,7 +1373,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
new_page = alloc_page_vma(GFP_HIGHUSER, vma, address);
if (!new_page)
goto oom;
- copy_user_highpage(new_page, old_page, address);
+ cow_user_page(new_page, src_page, address);
}
/*
@@ -1327,11 +1381,14 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
*/
page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
if (likely(pte_same(*page_table, orig_pte))) {
- page_remove_rmap(old_page);
- if (!PageAnon(old_page)) {
+ if (old_page) {
+ page_remove_rmap(old_page);
+ if (!PageAnon(old_page)) {
+ dec_mm_counter(mm, file_rss);
+ inc_mm_counter(mm, anon_rss);
+ }
+ } else
inc_mm_counter(mm, anon_rss);
- dec_mm_counter(mm, file_rss);
- }
flush_cache_page(vma, address, pfn);
entry = mk_pte(new_page, vma->vm_page_prot);
entry = maybe_mkwrite(pte_mkdirty(entry), vma);
@@ -1345,13 +1402,16 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
new_page = old_page;
ret |= VM_FAULT_WRITE;
}
- page_cache_release(new_page);
- page_cache_release(old_page);
+ if (new_page)
+ page_cache_release(new_page);
+ if (old_page)
+ page_cache_release(old_page);
unlock:
pte_unmap_unlock(page_table, ptl);
return ret;
oom:
- page_cache_release(old_page);
+ if (old_page)
+ page_cache_release(old_page);
return VM_FAULT_OOM;
}
@@ -1849,7 +1909,6 @@ static int do_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
int anon = 0;
pte_unmap(page_table);
-
if (vma->vm_file) {
mapping = vma->vm_file->f_mapping;
sequence = mapping->truncate_count;
@@ -1882,7 +1941,7 @@ retry:
page = alloc_page_vma(GFP_HIGHUSER, vma, address);
if (!page)
goto oom;
- copy_user_highpage(page, new_page, address);
+ cow_user_page(page, new_page, address);
page_cache_release(new_page);
new_page = page;
anon = 1;
@@ -1924,7 +1983,7 @@ retry:
inc_mm_counter(mm, anon_rss);
lru_cache_add_active(new_page);
page_add_anon_rmap(new_page, vma, address);
- } else if (!(vma->vm_flags & VM_RESERVED)) {
+ } else {
inc_mm_counter(mm, file_rss);
page_add_file_rmap(new_page);
}
@@ -2101,6 +2160,12 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
spin_unlock(&mm->page_table_lock);
return 0;
}
+#else
+/* Workaround for gcc 2.96 */
+int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
+{
+ return 0;
+}
#endif /* __PAGETABLE_PUD_FOLDED */
#ifndef __PAGETABLE_PMD_FOLDED
@@ -2129,6 +2194,12 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
spin_unlock(&mm->page_table_lock);
return 0;
}
+#else
+/* Workaround for gcc 2.96 */
+int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
+{
+ return 0;
+}
#endif /* __PAGETABLE_PMD_FOLDED */
int make_pages_present(unsigned long addr, unsigned long end)
@@ -2203,7 +2274,7 @@ static int __init gate_vma_init(void)
gate_vma.vm_start = FIXADDR_USER_START;
gate_vma.vm_end = FIXADDR_USER_END;
gate_vma.vm_page_prot = PAGE_READONLY;
- gate_vma.vm_flags = VM_RESERVED;
+ gate_vma.vm_flags = 0;
return 0;
}
__initcall(gate_vma_init);