aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--include/linux/memcontrol.h36
-rw-r--r--include/linux/mm_types.h6
-rw-r--r--include/linux/sched.h1
-rw-r--r--kernel/fork.c11
-rw-r--r--mm/memcontrol.c57
5 files changed, 104 insertions, 7 deletions
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 3f121b27677..7d1f119c796 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -3,6 +3,9 @@
* Copyright IBM Corporation, 2007
* Author Balbir Singh <balbir@linux.vnet.ibm.com>
*
+ * Copyright 2007 OpenVZ SWsoft Inc
+ * Author: Pavel Emelianov <xemul@openvz.org>
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
@@ -17,5 +20,38 @@
#ifndef _LINUX_MEMCONTROL_H
#define _LINUX_MEMCONTROL_H
+struct mem_cgroup;
+struct page_cgroup;
+
+#ifdef CONFIG_CGROUP_MEM_CONT
+
+extern void mm_init_cgroup(struct mm_struct *mm, struct task_struct *p);
+extern void mm_free_cgroup(struct mm_struct *mm);
+extern void page_assign_page_cgroup(struct page *page,
+ struct page_cgroup *pc);
+extern struct page_cgroup *page_get_page_cgroup(struct page *page);
+
+#else /* CONFIG_CGROUP_MEM_CONT */
+static inline void mm_init_cgroup(struct mm_struct *mm,
+ struct task_struct *p)
+{
+}
+
+static inline void mm_free_cgroup(struct mm_struct *mm)
+{
+}
+
+static inline void page_assign_page_cgroup(struct page *page,
+ struct page_cgroup *pc)
+{
+}
+
+static inline struct page_cgroup *page_get_page_cgroup(struct page *page)
+{
+ return NULL;
+}
+
+#endif /* CONFIG_CGROUP_MEM_CONT */
+
#endif /* _LINUX_MEMCONTROL_H */
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index f4c03e0b355..34023c65d46 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -88,6 +88,9 @@ struct page {
void *virtual; /* Kernel virtual address (NULL if
not kmapped, ie. highmem) */
#endif /* WANT_PAGE_VIRTUAL */
+#ifdef CONFIG_CGROUP_MEM_CONT
+ unsigned long page_cgroup;
+#endif
};
/*
@@ -219,6 +222,9 @@ struct mm_struct {
/* aio bits */
rwlock_t ioctx_list_lock;
struct kioctx *ioctx_list;
+#ifdef CONFIG_CGROUP_MEM_CONT
+ struct mem_cgroup *mem_cgroup;
+#endif
};
#endif /* _LINUX_MM_TYPES_H */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 7c8ca05c3ca..8a4812c1c03 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -92,6 +92,7 @@ struct sched_param {
#include <asm/processor.h>
+struct mem_cgroup;
struct exec_domain;
struct futex_pi_state;
struct robust_list_head;
diff --git a/kernel/fork.c b/kernel/fork.c
index 3995297567a..b2ef8e4fad7 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -40,6 +40,7 @@
#include <linux/ptrace.h>
#include <linux/mount.h>
#include <linux/audit.h>
+#include <linux/memcontrol.h>
#include <linux/profile.h>
#include <linux/rmap.h>
#include <linux/acct.h>
@@ -340,7 +341,7 @@ __cacheline_aligned_in_smp DEFINE_SPINLOCK(mmlist_lock);
#include <linux/init_task.h>
-static struct mm_struct * mm_init(struct mm_struct * mm)
+static struct mm_struct * mm_init(struct mm_struct * mm, struct task_struct *p)
{
atomic_set(&mm->mm_users, 1);
atomic_set(&mm->mm_count, 1);
@@ -357,11 +358,14 @@ static struct mm_struct * mm_init(struct mm_struct * mm)
mm->ioctx_list = NULL;
mm->free_area_cache = TASK_UNMAPPED_BASE;
mm->cached_hole_size = ~0UL;
+ mm_init_cgroup(mm, p);
if (likely(!mm_alloc_pgd(mm))) {
mm->def_flags = 0;
return mm;
}
+
+ mm_free_cgroup(mm);
free_mm(mm);
return NULL;
}
@@ -376,7 +380,7 @@ struct mm_struct * mm_alloc(void)
mm = allocate_mm();
if (mm) {
memset(mm, 0, sizeof(*mm));
- mm = mm_init(mm);
+ mm = mm_init(mm, current);
}
return mm;
}
@@ -390,6 +394,7 @@ void fastcall __mmdrop(struct mm_struct *mm)
{
BUG_ON(mm == &init_mm);
mm_free_pgd(mm);
+ mm_free_cgroup(mm);
destroy_context(mm);
free_mm(mm);
}
@@ -511,7 +516,7 @@ static struct mm_struct *dup_mm(struct task_struct *tsk)
mm->token_priority = 0;
mm->last_interval = 0;
- if (!mm_init(mm))
+ if (!mm_init(mm, tsk))
goto fail_nomem;
if (init_new_context(tsk, mm))
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 80e48cd9d0c..4d4805eb37c 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -3,6 +3,9 @@
* Copyright IBM Corporation, 2007
* Author Balbir Singh <balbir@linux.vnet.ibm.com>
*
+ * Copyright 2007 OpenVZ SWsoft Inc
+ * Author: Pavel Emelianov <xemul@openvz.org>
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
@@ -17,6 +20,7 @@
#include <linux/res_counter.h>
#include <linux/memcontrol.h>
#include <linux/cgroup.h>
+#include <linux/mm.h>
struct cgroup_subsys mem_cgroup_subsys;
@@ -35,6 +39,13 @@ struct mem_cgroup {
* the counter to account for memory usage
*/
struct res_counter res;
+ /*
+ * Per cgroup active and inactive list, similar to the
+ * per zone LRU lists.
+ * TODO: Consider making these lists per zone
+ */
+ struct list_head active_list;
+ struct list_head inactive_list;
};
/*
@@ -56,6 +67,37 @@ struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
css);
}
+static inline
+struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
+{
+ return container_of(task_subsys_state(p, mem_cgroup_subsys_id),
+ struct mem_cgroup, css);
+}
+
+void mm_init_cgroup(struct mm_struct *mm, struct task_struct *p)
+{
+ struct mem_cgroup *mem;
+
+ mem = mem_cgroup_from_task(p);
+ css_get(&mem->css);
+ mm->mem_cgroup = mem;
+}
+
+void mm_free_cgroup(struct mm_struct *mm)
+{
+ css_put(&mm->mem_cgroup->css);
+}
+
+void page_assign_page_cgroup(struct page *page, struct page_cgroup *pc)
+{
+ page->page_cgroup = (unsigned long)pc;
+}
+
+struct page_cgroup *page_get_page_cgroup(struct page *page)
+{
+ return page->page_cgroup;
+}
+
static ssize_t mem_cgroup_read(struct cgroup *cont, struct cftype *cft,
struct file *file, char __user *userbuf, size_t nbytes,
loff_t *ppos)
@@ -91,14 +133,21 @@ static struct cftype mem_cgroup_files[] = {
},
};
+static struct mem_cgroup init_mem_cgroup;
+
static struct cgroup_subsys_state *
mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
{
struct mem_cgroup *mem;
- mem = kzalloc(sizeof(struct mem_cgroup), GFP_KERNEL);
- if (!mem)
- return -ENOMEM;
+ if (unlikely((cont->parent) == NULL)) {
+ mem = &init_mem_cgroup;
+ init_mm.mem_cgroup = mem;
+ } else
+ mem = kzalloc(sizeof(struct mem_cgroup), GFP_KERNEL);
+
+ if (mem == NULL)
+ return NULL;
res_counter_init(&mem->res);
return &mem->css;
@@ -123,5 +172,5 @@ struct cgroup_subsys mem_cgroup_subsys = {
.create = mem_cgroup_create,
.destroy = mem_cgroup_destroy,
.populate = mem_cgroup_populate,
- .early_init = 0,
+ .early_init = 1,
};