aboutsummaryrefslogtreecommitdiff
path: root/fs/proc
diff options
context:
space:
mode:
Diffstat (limited to 'fs/proc')
-rw-r--r--fs/proc/base.c36
-rw-r--r--fs/proc/inode.c3
-rw-r--r--fs/proc/proc_misc.c2
-rw-r--r--fs/proc/task_mmu.c134
-rw-r--r--fs/proc/vmcore.c2
5 files changed, 139 insertions, 38 deletions
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 989af5e55d1..ec158dd02b3 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -715,6 +715,40 @@ static const struct file_operations proc_oom_adjust_operations = {
.write = oom_adjust_write,
};
+static ssize_t clear_refs_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct task_struct *task;
+ char buffer[PROC_NUMBUF], *end;
+ struct mm_struct *mm;
+
+ memset(buffer, 0, sizeof(buffer));
+ if (count > sizeof(buffer) - 1)
+ count = sizeof(buffer) - 1;
+ if (copy_from_user(buffer, buf, count))
+ return -EFAULT;
+ if (!simple_strtol(buffer, &end, 0))
+ return -EINVAL;
+ if (*end == '\n')
+ end++;
+ task = get_proc_task(file->f_path.dentry->d_inode);
+ if (!task)
+ return -ESRCH;
+ mm = get_task_mm(task);
+ if (mm) {
+ clear_refs_smap(mm);
+ mmput(mm);
+ }
+ put_task_struct(task);
+ if (end - buffer == 0)
+ return -EIO;
+ return end - buffer;
+}
+
+static struct file_operations proc_clear_refs_operations = {
+ .write = clear_refs_write,
+};
+
#ifdef CONFIG_AUDITSYSCALL
#define TMPBUFLEN 21
static ssize_t proc_loginuid_read(struct file * file, char __user * buf,
@@ -1851,6 +1885,7 @@ static struct pid_entry tgid_base_stuff[] = {
REG("mounts", S_IRUGO, mounts),
REG("mountstats", S_IRUSR, mountstats),
#ifdef CONFIG_MMU
+ REG("clear_refs", S_IWUSR, clear_refs),
REG("smaps", S_IRUGO, smaps),
#endif
#ifdef CONFIG_SECURITY
@@ -2132,6 +2167,7 @@ static struct pid_entry tid_base_stuff[] = {
LNK("exe", exe),
REG("mounts", S_IRUGO, mounts),
#ifdef CONFIG_MMU
+ REG("clear_refs", S_IWUSR, clear_refs),
REG("smaps", S_IRUGO, smaps),
#endif
#ifdef CONFIG_SECURITY
diff --git a/fs/proc/inode.c b/fs/proc/inode.c
index c372eb151a3..22b1158389a 100644
--- a/fs/proc/inode.c
+++ b/fs/proc/inode.c
@@ -109,8 +109,7 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag
{
struct proc_inode *ei = (struct proc_inode *) foo;
- if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
- SLAB_CTOR_CONSTRUCTOR)
+ if (flags & SLAB_CTOR_CONSTRUCTOR)
inode_init_once(&ei->vfs_inode);
}
diff --git a/fs/proc/proc_misc.c b/fs/proc/proc_misc.c
index e2c4c0a5c90..75ec6523d29 100644
--- a/fs/proc/proc_misc.c
+++ b/fs/proc/proc_misc.c
@@ -398,8 +398,6 @@ static const struct file_operations proc_modules_operations = {
#endif
#ifdef CONFIG_SLAB
-extern struct seq_operations slabinfo_op;
-extern ssize_t slabinfo_write(struct file *, const char __user *, size_t, loff_t *);
static int slabinfo_open(struct inode *inode, struct file *file)
{
return seq_open(file, &slabinfo_op);
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 7445980c802..4008c060f7e 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -120,6 +120,14 @@ struct mem_size_stats
unsigned long shared_dirty;
unsigned long private_clean;
unsigned long private_dirty;
+ unsigned long referenced;
+};
+
+struct pmd_walker {
+ struct vm_area_struct *vma;
+ void *private;
+ void (*action)(struct vm_area_struct *, pmd_t *, unsigned long,
+ unsigned long, void *);
};
static int show_map_internal(struct seq_file *m, void *v, struct mem_size_stats *mss)
@@ -181,18 +189,20 @@ static int show_map_internal(struct seq_file *m, void *v, struct mem_size_stats
if (mss)
seq_printf(m,
- "Size: %8lu kB\n"
- "Rss: %8lu kB\n"
- "Shared_Clean: %8lu kB\n"
- "Shared_Dirty: %8lu kB\n"
- "Private_Clean: %8lu kB\n"
- "Private_Dirty: %8lu kB\n",
+ "Size: %8lu kB\n"
+ "Rss: %8lu kB\n"
+ "Shared_Clean: %8lu kB\n"
+ "Shared_Dirty: %8lu kB\n"
+ "Private_Clean: %8lu kB\n"
+ "Private_Dirty: %8lu kB\n"
+ "Referenced: %8lu kB\n",
(vma->vm_end - vma->vm_start) >> 10,
mss->resident >> 10,
mss->shared_clean >> 10,
mss->shared_dirty >> 10,
mss->private_clean >> 10,
- mss->private_dirty >> 10);
+ mss->private_dirty >> 10,
+ mss->referenced >> 10);
if (m->count < m->size) /* vma is copied successfully */
m->version = (vma != get_gate_vma(task))? vma->vm_start: 0;
@@ -205,15 +215,16 @@ static int show_map(struct seq_file *m, void *v)
}
static void smaps_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
- unsigned long addr, unsigned long end,
- struct mem_size_stats *mss)
+ unsigned long addr, unsigned long end,
+ void *private)
{
+ struct mem_size_stats *mss = private;
pte_t *pte, ptent;
spinlock_t *ptl;
struct page *page;
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
- do {
+ for (; addr != end; pte++, addr += PAGE_SIZE) {
ptent = *pte;
if (!pte_present(ptent))
continue;
@@ -224,6 +235,9 @@ static void smaps_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
if (!page)
continue;
+ /* Accumulate the size in pages that have been accessed. */
+ if (pte_young(ptent) || PageReferenced(page))
+ mss->referenced += PAGE_SIZE;
if (page_mapcount(page) >= 2) {
if (pte_dirty(ptent))
mss->shared_dirty += PAGE_SIZE;
@@ -235,57 +249,99 @@ static void smaps_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
else
mss->private_clean += PAGE_SIZE;
}
- } while (pte++, addr += PAGE_SIZE, addr != end);
+ }
pte_unmap_unlock(pte - 1, ptl);
cond_resched();
}
-static inline void smaps_pmd_range(struct vm_area_struct *vma, pud_t *pud,
- unsigned long addr, unsigned long end,
- struct mem_size_stats *mss)
+static void clear_refs_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
+ unsigned long addr, unsigned long end,
+ void *private)
+{
+ pte_t *pte, ptent;
+ spinlock_t *ptl;
+ struct page *page;
+
+ pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
+ for (; addr != end; pte++, addr += PAGE_SIZE) {
+ ptent = *pte;
+ if (!pte_present(ptent))
+ continue;
+
+ page = vm_normal_page(vma, addr, ptent);
+ if (!page)
+ continue;
+
+ /* Clear accessed and referenced bits. */
+ ptep_test_and_clear_young(vma, addr, pte);
+ ClearPageReferenced(page);
+ }
+ pte_unmap_unlock(pte - 1, ptl);
+ cond_resched();
+}
+
+static inline void walk_pmd_range(struct pmd_walker *walker, pud_t *pud,
+ unsigned long addr, unsigned long end)
{
pmd_t *pmd;
unsigned long next;
- pmd = pmd_offset(pud, addr);
- do {
+ for (pmd = pmd_offset(pud, addr); addr != end;
+ pmd++, addr = next) {
next = pmd_addr_end(addr, end);
if (pmd_none_or_clear_bad(pmd))
continue;
- smaps_pte_range(vma, pmd, addr, next, mss);
- } while (pmd++, addr = next, addr != end);
+ walker->action(walker->vma, pmd, addr, next, walker->private);
+ }
}
-static inline void smaps_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
- unsigned long addr, unsigned long end,
- struct mem_size_stats *mss)
+static inline void walk_pud_range(struct pmd_walker *walker, pgd_t *pgd,
+ unsigned long addr, unsigned long end)
{
pud_t *pud;
unsigned long next;
- pud = pud_offset(pgd, addr);
- do {
+ for (pud = pud_offset(pgd, addr); addr != end;
+ pud++, addr = next) {
next = pud_addr_end(addr, end);
if (pud_none_or_clear_bad(pud))
continue;
- smaps_pmd_range(vma, pud, addr, next, mss);
- } while (pud++, addr = next, addr != end);
+ walk_pmd_range(walker, pud, addr, next);
+ }
}
-static inline void smaps_pgd_range(struct vm_area_struct *vma,
- unsigned long addr, unsigned long end,
- struct mem_size_stats *mss)
+/*
+ * walk_page_range - walk the page tables of a VMA with a callback
+ * @vma - VMA to walk
+ * @action - callback invoked for every bottom-level (PTE) page table
+ * @private - private data passed to the callback function
+ *
+ * Recursively walk the page table for the memory area in a VMA, calling
+ * a callback for every bottom-level (PTE) page table.
+ */
+static inline void walk_page_range(struct vm_area_struct *vma,
+ void (*action)(struct vm_area_struct *,
+ pmd_t *, unsigned long,
+ unsigned long, void *),
+ void *private)
{
+ unsigned long addr = vma->vm_start;
+ unsigned long end = vma->vm_end;
+ struct pmd_walker walker = {
+ .vma = vma,
+ .private = private,
+ .action = action,
+ };
pgd_t *pgd;
unsigned long next;
- pgd = pgd_offset(vma->vm_mm, addr);
- do {
+ for (pgd = pgd_offset(vma->vm_mm, addr); addr != end;
+ pgd++, addr = next) {
next = pgd_addr_end(addr, end);
if (pgd_none_or_clear_bad(pgd))
continue;
- smaps_pud_range(vma, pgd, addr, next, mss);
- } while (pgd++, addr = next, addr != end);
+ walk_pud_range(&walker, pgd, addr, next);
+ }
}
static int show_smap(struct seq_file *m, void *v)
@@ -295,10 +351,22 @@ static int show_smap(struct seq_file *m, void *v)
memset(&mss, 0, sizeof mss);
if (vma->vm_mm && !is_vm_hugetlb_page(vma))
- smaps_pgd_range(vma, vma->vm_start, vma->vm_end, &mss);
+ walk_page_range(vma, smaps_pte_range, &mss);
return show_map_internal(m, v, &mss);
}
+void clear_refs_smap(struct mm_struct *mm)
+{
+ struct vm_area_struct *vma;
+
+ down_read(&mm->mmap_sem);
+ for (vma = mm->mmap; vma; vma = vma->vm_next)
+ if (vma->vm_mm && !is_vm_hugetlb_page(vma))
+ walk_page_range(vma, clear_refs_pte_range, NULL);
+ flush_tlb_mm(mm);
+ up_read(&mm->mmap_sem);
+}
+
static void *m_start(struct seq_file *m, loff_t *pos)
{
struct proc_maps_private *priv = m->private;
diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
index d96050728c4..523e1098ae8 100644
--- a/fs/proc/vmcore.c
+++ b/fs/proc/vmcore.c
@@ -514,7 +514,7 @@ static int __init parse_crash_elf64_headers(void)
/* Do some basic Verification. */
if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
(ehdr.e_type != ET_CORE) ||
- !elf_check_arch(&ehdr) ||
+ !vmcore_elf_check_arch(&ehdr) ||
ehdr.e_ident[EI_CLASS] != ELFCLASS64 ||
ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
ehdr.e_version != EV_CURRENT ||