From 9a840895147b12de5cdd633c600b38686840ee53 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Mon, 21 Sep 2009 17:02:01 -0700 Subject: ksm: identify PageKsm pages KSM will need to identify its kernel merged pages unambiguously, and /proc/kpageflags will probably like to do so too. Since KSM will only be substituting anonymous pages, statistics are best preserved by making a PageKsm page a special PageAnon page: one with no anon_vma. But KSM then needs its own page_add_ksm_rmap() - keep it in ksm.h near PageKsm; and do_wp_page() must COW them, unlike singly mapped PageAnons. Signed-off-by: Hugh Dickins Signed-off-by: Chris Wright Signed-off-by: Izik Eidus Cc: Wu Fengguang Cc: Andrea Arcangeli Cc: Rik van Riel Cc: Wu Fengguang Cc: Balbir Singh Cc: Hugh Dickins Cc: KAMEZAWA Hiroyuki Cc: Lee Schermerhorn Cc: Avi Kivity Cc: Nick Piggin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/proc/page.c | 5 +++++ include/linux/ksm.h | 29 +++++++++++++++++++++++++++++ mm/memory.c | 3 ++- 3 files changed, 36 insertions(+), 1 deletion(-) diff --git a/fs/proc/page.c b/fs/proc/page.c index 2707c6c7a20..2281c2cbfe2 100644 --- a/fs/proc/page.c +++ b/fs/proc/page.c @@ -2,6 +2,7 @@ #include #include #include +#include #include #include #include @@ -95,6 +96,8 @@ static const struct file_operations proc_kpagecount_operations = { #define KPF_UNEVICTABLE 18 #define KPF_NOPAGE 20 +#define KPF_KSM 21 + /* kernel hacking assistances * WARNING: subject to change, never rely on them! */ @@ -137,6 +140,8 @@ static u64 get_uflags(struct page *page) u |= 1 << KPF_MMAP; if (PageAnon(page)) u |= 1 << KPF_ANON; + if (PageKsm(page)) + u |= 1 << KPF_KSM; /* * compound pages: export both head/tail info diff --git a/include/linux/ksm.h b/include/linux/ksm.h index eb2a448981e..a485c14ecd5 100644 --- a/include/linux/ksm.h +++ b/include/linux/ksm.h @@ -10,6 +10,7 @@ #include #include #include +#include #ifdef CONFIG_KSM int ksm_madvise(struct vm_area_struct *vma, unsigned long start, @@ -29,6 +30,27 @@ static inline void ksm_exit(struct mm_struct *mm) if (test_bit(MMF_VM_MERGEABLE, &mm->flags)) __ksm_exit(mm); } + +/* + * A KSM page is one of those write-protected "shared pages" or "merged pages" + * which KSM maps into multiple mms, wherever identical anonymous page content + * is found in VM_MERGEABLE vmas. It's a PageAnon page, with NULL anon_vma. + */ +static inline int PageKsm(struct page *page) +{ + return ((unsigned long)page->mapping == PAGE_MAPPING_ANON); +} + +/* + * But we have to avoid the checking which page_add_anon_rmap() performs. + */ +static inline void page_add_ksm_rmap(struct page *page) +{ + if (atomic_inc_and_test(&page->_mapcount)) { + page->mapping = (void *) PAGE_MAPPING_ANON; + __inc_zone_page_state(page, NR_ANON_PAGES); + } +} #else /* !CONFIG_KSM */ static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start, @@ -45,6 +67,13 @@ static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm) static inline void ksm_exit(struct mm_struct *mm) { } + +static inline int PageKsm(struct page *page) +{ + return 0; +} + +/* No stub required for page_add_ksm_rmap(page) */ #endif /* !CONFIG_KSM */ #endif diff --git a/mm/memory.c b/mm/memory.c index 7a61a11f186..1a435b81876 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -45,6 +45,7 @@ #include #include #include +#include #include #include #include @@ -1974,7 +1975,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, * Take out anonymous pages first, anonymous shared vmas are * not dirty accountable. */ - if (PageAnon(old_page)) { + if (PageAnon(old_page) && !PageKsm(old_page)) { if (!trylock_page(old_page)) { page_cache_get(old_page); pte_unmap_unlock(page_table, ptl); -- cgit v1.2.3