aboutsummaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorBalbir Singh <balbir@linux.vnet.ibm.com>2009-01-07 18:08:05 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2009-01-08 08:31:05 -0800
commit28dbc4b6a01fb579a9441c7b81e3d3413dc452df (patch)
treec45d94be6cd243f3e677ec8279bfec87855802d0 /mm
parent52bc0d82100cd896213a9a25ec01c1ba87b939db (diff)
memcg: memory cgroup resource counters for hierarchy
Add support for building hierarchies in resource counters. Cgroups allows us to build a deep hierarchy, but we currently don't link the resource counters belonging to the memory controller control groups, in the same fashion as the corresponding cgroup entries in the cgroup hierarchy. This patch provides the infrastructure for resource counters that have the same hiearchy as their cgroup counter parts. These set of patches are based on the resource counter hiearchy patches posted by Pavel Emelianov. NOTE: Building hiearchies is expensive, deeper hierarchies imply charging the all the way up to the root. It is known that hiearchies are expensive, so the user needs to be careful and aware of the trade-offs before creating very deep ones. [akpm@linux-foundation.org: coding-style fixes] Signed-off-by: Balbir Singh <balbir@linux.vnet.ibm.com> Cc: YAMAMOTO Takashi <yamamoto@valinux.co.jp> Cc: Paul Menage <menage@google.com> Cc: Li Zefan <lizf@cn.fujitsu.com> Cc: David Rientjes <rientjes@google.com> Cc: Pavel Emelianov <xemul@openvz.org> Cc: Dhaval Giani <dhaval@linux.vnet.ibm.com> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/memcontrol.c20
1 files changed, 13 insertions, 7 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 9846f617115..e72fb2b4a7d 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -471,6 +471,7 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
{
struct mem_cgroup *mem;
int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
+ struct res_counter *fail_res;
/*
* We always charge the cgroup the mm_struct belongs to.
* The mm_struct's mem_cgroup changes on task migration if the
@@ -499,11 +500,12 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
int ret;
bool noswap = false;
- ret = res_counter_charge(&mem->res, PAGE_SIZE);
+ ret = res_counter_charge(&mem->res, PAGE_SIZE, &fail_res);
if (likely(!ret)) {
if (!do_swap_account)
break;
- ret = res_counter_charge(&mem->memsw, PAGE_SIZE);
+ ret = res_counter_charge(&mem->memsw, PAGE_SIZE,
+ &fail_res);
if (likely(!ret))
break;
/* mem+swap counter fails */
@@ -1709,22 +1711,26 @@ static void __init enable_swap_cgroup(void)
static struct cgroup_subsys_state *
mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
{
- struct mem_cgroup *mem;
+ struct mem_cgroup *mem, *parent;
int node;
mem = mem_cgroup_alloc();
if (!mem)
return ERR_PTR(-ENOMEM);
- res_counter_init(&mem->res);
- res_counter_init(&mem->memsw);
-
for_each_node_state(node, N_POSSIBLE)
if (alloc_mem_cgroup_per_zone_info(mem, node))
goto free_out;
/* root ? */
- if (cont->parent == NULL)
+ if (cont->parent == NULL) {
enable_swap_cgroup();
+ parent = NULL;
+ } else
+ parent = mem_cgroup_from_cont(cont->parent);
+
+ res_counter_init(&mem->res, parent ? &parent->res : NULL);
+ res_counter_init(&mem->memsw, parent ? &parent->memsw : NULL);
+
return &mem->css;
free_out: