diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/Makefile | 1 | ||||
-rw-r--r-- | kernel/cgroup.c | 318 | ||||
-rw-r--r-- | kernel/cpuset.c | 394 | ||||
-rw-r--r-- | kernel/fork.c | 11 | ||||
-rw-r--r-- | kernel/kexec.c | 18 | ||||
-rw-r--r-- | kernel/pid.c | 1 | ||||
-rw-r--r-- | kernel/res_counter.c | 134 | ||||
-rw-r--r-- | kernel/sysctl.c | 9 |
8 files changed, 631 insertions, 255 deletions
diff --git a/kernel/Makefile b/kernel/Makefile index 135a1b94344..685697c0a18 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -43,6 +43,7 @@ obj-$(CONFIG_CGROUP_DEBUG) += cgroup_debug.o obj-$(CONFIG_CPUSETS) += cpuset.o obj-$(CONFIG_CGROUP_NS) += ns_cgroup.o obj-$(CONFIG_IKCONFIG) += configs.o +obj-$(CONFIG_RESOURCE_COUNTERS) += res_counter.o obj-$(CONFIG_STOP_MACHINE) += stop_machine.o obj-$(CONFIG_KPROBES_SANITY_TEST) += test_kprobes.o obj-$(CONFIG_AUDIT) += audit.o auditfilter.o diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 1a3c23936d4..4766bb65e4d 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -141,7 +141,7 @@ enum { ROOT_NOPREFIX, /* mounted subsystems have no named prefix */ }; -inline int cgroup_is_releasable(const struct cgroup *cgrp) +static int cgroup_is_releasable(const struct cgroup *cgrp) { const int bits = (1 << CGRP_RELEASABLE) | @@ -149,7 +149,7 @@ inline int cgroup_is_releasable(const struct cgroup *cgrp) return (cgrp->flags & bits) == bits; } -inline int notify_on_release(const struct cgroup *cgrp) +static int notify_on_release(const struct cgroup *cgrp) { return test_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags); } @@ -489,7 +489,7 @@ static struct css_set *find_css_set( * Any task can increment and decrement the count field without lock. * So in general, code holding cgroup_mutex can't rely on the count * field not changing. However, if the count goes to zero, then only - * attach_task() can increment it again. Because a count of zero + * cgroup_attach_task() can increment it again. Because a count of zero * means that no tasks are currently attached, therefore there is no * way a task attached to that cgroup can fork (the other way to * increment the count). So code holding cgroup_mutex can safely @@ -520,17 +520,17 @@ static struct css_set *find_css_set( * The task_lock() exception * * The need for this exception arises from the action of - * attach_task(), which overwrites one tasks cgroup pointer with + * cgroup_attach_task(), which overwrites one tasks cgroup pointer with * another. It does so using cgroup_mutexe, however there are * several performance critical places that need to reference * task->cgroup without the expense of grabbing a system global * mutex. Therefore except as noted below, when dereferencing or, as - * in attach_task(), modifying a task'ss cgroup pointer we use + * in cgroup_attach_task(), modifying a task'ss cgroup pointer we use * task_lock(), which acts on a spinlock (task->alloc_lock) already in * the task_struct routinely used for such matters. * * P.S. One more locking exception. RCU is used to guard the - * update of a tasks cgroup pointer by attach_task() + * update of a tasks cgroup pointer by cgroup_attach_task() */ /** @@ -586,11 +586,27 @@ static struct inode *cgroup_new_inode(mode_t mode, struct super_block *sb) return inode; } +/* + * Call subsys's pre_destroy handler. + * This is called before css refcnt check. + */ + +static void cgroup_call_pre_destroy(struct cgroup *cgrp) +{ + struct cgroup_subsys *ss; + for_each_subsys(cgrp->root, ss) + if (ss->pre_destroy && cgrp->subsys[ss->subsys_id]) + ss->pre_destroy(ss, cgrp); + return; +} + + static void cgroup_diput(struct dentry *dentry, struct inode *inode) { /* is dentry a directory ? if so, kfree() associated cgroup */ if (S_ISDIR(inode->i_mode)) { struct cgroup *cgrp = dentry->d_fsdata; + struct cgroup_subsys *ss; BUG_ON(!(cgroup_is_removed(cgrp))); /* It's possible for external users to be holding css * reference counts on a cgroup; css_put() needs to @@ -599,6 +615,23 @@ static void cgroup_diput(struct dentry *dentry, struct inode *inode) * queue the cgroup to be handled by the release * agent */ synchronize_rcu(); + + mutex_lock(&cgroup_mutex); + /* + * Release the subsystem state objects. + */ + for_each_subsys(cgrp->root, ss) { + if (cgrp->subsys[ss->subsys_id]) + ss->destroy(ss, cgrp); + } + + cgrp->root->number_of_cgroups--; + mutex_unlock(&cgroup_mutex); + + /* Drop the active superblock reference that we took when we + * created the cgroup */ + deactivate_super(cgrp->root->sb); + kfree(cgrp); } iput(inode); @@ -1161,7 +1194,7 @@ static void get_first_subsys(const struct cgroup *cgrp, * Call holding cgroup_mutex. May take task_lock of * the task 'pid' during call. */ -static int attach_task(struct cgroup *cgrp, struct task_struct *tsk) +int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk) { int retval = 0; struct cgroup_subsys *ss; @@ -1181,9 +1214,8 @@ static int attach_task(struct cgroup *cgrp, struct task_struct *tsk) for_each_subsys(root, ss) { if (ss->can_attach) { retval = ss->can_attach(ss, cgrp, tsk); - if (retval) { + if (retval) return retval; - } } } @@ -1192,9 +1224,8 @@ static int attach_task(struct cgroup *cgrp, struct task_struct *tsk) * based on its final set of cgroups */ newcg = find_css_set(cg, cgrp); - if (!newcg) { + if (!newcg) return -ENOMEM; - } task_lock(tsk); if (tsk->flags & PF_EXITING) { @@ -1214,9 +1245,8 @@ static int attach_task(struct cgroup *cgrp, struct task_struct *tsk) write_unlock(&css_set_lock); for_each_subsys(root, ss) { - if (ss->attach) { + if (ss->attach) ss->attach(ss, cgrp, oldcgrp, tsk); - } } set_bit(CGRP_RELEASABLE, &oldcgrp->flags); synchronize_rcu(); @@ -1239,7 +1269,7 @@ static int attach_task_by_pid(struct cgroup *cgrp, char *pidbuf) if (pid) { rcu_read_lock(); - tsk = find_task_by_pid(pid); + tsk = find_task_by_vpid(pid); if (!tsk || tsk->flags & PF_EXITING) { rcu_read_unlock(); return -ESRCH; @@ -1257,7 +1287,7 @@ static int attach_task_by_pid(struct cgroup *cgrp, char *pidbuf) get_task_struct(tsk); } - ret = attach_task(cgrp, tsk); + ret = cgroup_attach_task(cgrp, tsk); put_task_struct(tsk); return ret; } @@ -1329,9 +1359,14 @@ static ssize_t cgroup_common_file_write(struct cgroup *cgrp, goto out1; } buffer[nbytes] = 0; /* nul-terminate */ + strstrip(buffer); /* strip -just- trailing whitespace */ mutex_lock(&cgroup_mutex); + /* + * This was already checked for in cgroup_file_write(), but + * check again now we're holding cgroup_mutex. + */ if (cgroup_is_removed(cgrp)) { retval = -ENODEV; goto out2; @@ -1349,24 +1384,9 @@ static ssize_t cgroup_common_file_write(struct cgroup *cgrp, clear_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags); break; case FILE_RELEASE_AGENT: - { - struct cgroupfs_root *root = cgrp->root; - /* Strip trailing newline */ - if (nbytes && (buffer[nbytes-1] == '\n')) { - buffer[nbytes-1] = 0; - } - if (nbytes < sizeof(root->release_agent_path)) { - /* We never write anything other than '\0' - * into the last char of release_agent_path, - * so it always remains a NUL-terminated - * string */ - strncpy(root->release_agent_path, buffer, nbytes); - root->release_agent_path[nbytes] = 0; - } else { - retval = -ENOSPC; - } + BUILD_BUG_ON(sizeof(cgrp->root->release_agent_path) < PATH_MAX); + strcpy(cgrp->root->release_agent_path, buffer); break; - } default: retval = -EINVAL; goto out2; @@ -1387,7 +1407,7 @@ static ssize_t cgroup_file_write(struct file *file, const char __user *buf, struct cftype *cft = __d_cft(file->f_dentry); struct cgroup *cgrp = __d_cgrp(file->f_dentry->d_parent); - if (!cft) + if (!cft || cgroup_is_removed(cgrp)) return -ENODEV; if (cft->write) return cft->write(cgrp, cft, file, buf, nbytes, ppos); @@ -1457,7 +1477,7 @@ static ssize_t cgroup_file_read(struct file *file, char __user *buf, struct cftype *cft = __d_cft(file->f_dentry); struct cgroup *cgrp = __d_cgrp(file->f_dentry->d_parent); - if (!cft) + if (!cft || cgroup_is_removed(cgrp)) return -ENODEV; if (cft->read) @@ -1675,6 +1695,29 @@ static void cgroup_advance_iter(struct cgroup *cgrp, it->task = cg->tasks.next; } +/* + * To reduce the fork() overhead for systems that are not actually + * using their cgroups capability, we don't maintain the lists running + * through each css_set to its tasks until we see the list actually + * used - in other words after the first call to cgroup_iter_start(). + * + * The tasklist_lock is not held here, as do_each_thread() and + * while_each_thread() are protected by RCU. + */ +void cgroup_enable_task_cg_lists(void) +{ + struct task_struct *p, *g; + write_lock(&css_set_lock); + use_task_css_set_links = 1; + do_each_thread(g, p) { + task_lock(p); + if (list_empty(&p->cg_list)) + list_add(&p->cg_list, &p->cgroups->tasks); + task_unlock(p); + } while_each_thread(g, p); + write_unlock(&css_set_lock); +} + void cgroup_iter_start(struct cgroup *cgrp, struct cgroup_iter *it) { /* @@ -1682,18 +1725,9 @@ void cgroup_iter_start(struct cgroup *cgrp, struct cgroup_iter *it) * we need to enable the list linking each css_set to its * tasks, and fix up all existing tasks. */ - if (!use_task_css_set_links) { - struct task_struct *p, *g; - write_lock(&css_set_lock); - use_task_css_set_links = 1; - do_each_thread(g, p) { - task_lock(p); - if (list_empty(&p->cg_list)) - list_add(&p->cg_list, &p->cgroups->tasks); - task_unlock(p); - } while_each_thread(g, p); - write_unlock(&css_set_lock); - } + if (!use_task_css_set_links) + cgroup_enable_task_cg_lists(); + read_lock(&css_set_lock); it->cg_link = &cgrp->css_sets; cgroup_advance_iter(cgrp, it); @@ -1726,6 +1760,166 @@ void cgroup_iter_end(struct cgroup *cgrp, struct cgroup_iter *it) read_unlock(&css_set_lock); } +static inline int started_after_time(struct task_struct *t1, + struct timespec *time, + struct task_struct *t2) +{ + int start_diff = timespec_compare(&t1->start_time, time); + if (start_diff > 0) { + return 1; + } else if (start_diff < 0) { + return 0; + } else { + /* + * Arbitrarily, if two processes started at the same + * time, we'll say that the lower pointer value + * started first. Note that t2 may have exited by now + * so this may not be a valid pointer any longer, but + * that's fine - it still serves to distinguish + * between two tasks started (effectively) simultaneously. + */ + return t1 > t2; + } +} + +/* + * This function is a callback from heap_insert() and is used to order + * the heap. + * In this case we order the heap in descending task start time. + */ +static inline int started_after(void *p1, void *p2) +{ + struct task_struct *t1 = p1; + struct task_struct *t2 = p2; + return started_after_time(t1, &t2->start_time, t2); +} + +/** + * cgroup_scan_tasks - iterate though all the tasks in a cgroup + * @scan: struct cgroup_scanner containing arguments for the scan + * + * Arguments include pointers to callback functions test_task() and + * process_task(). + * Iterate through all the tasks in a cgroup, calling test_task() for each, + * and if it returns true, call process_task() for it also. + * The test_task pointer may be NULL, meaning always true (select all tasks). + * Effectively duplicates cgroup_iter_{start,next,end}() + * but does not lock css_set_lock for the call to process_task(). + * The struct cgroup_scanner may be embedded in any structure of the caller's + * creation. + * It is guaranteed that process_task() will act on every task that + * is a member of the cgroup for the duration of this call. This + * function may or may not call process_task() for tasks that exit + * or move to a different cgroup during the call, or are forked or + * move into the cgroup during the call. + * + * Note that test_task() may be called with locks held, and may in some + * situations be called multiple times for the same task, so it should + * be cheap. + * If the heap pointer in the struct cgroup_scanner is non-NULL, a heap has been + * pre-allocated and will be used for heap operations (and its "gt" member will + * be overwritten), else a temporary heap will be used (allocation of which + * may cause this function to fail). + */ +int cgroup_scan_tasks(struct cgroup_scanner *scan) +{ + int retval, i; + struct cgroup_iter it; + struct task_struct *p, *dropped; + /* Never dereference latest_task, since it's not refcounted */ + struct task_struct *latest_task = NULL; + struct ptr_heap tmp_heap; + struct ptr_heap *heap; + struct timespec latest_time = { 0, 0 }; + + if (scan->heap) { + /* The caller supplied our heap and pre-allocated its memory */ + heap = scan->heap; + heap->gt = &started_after; + } else { + /* We need to allocate our own heap memory */ + heap = &tmp_heap; + retval = heap_init(heap, PAGE_SIZE, GFP_KERNEL, &started_after); + if (retval) + /* cannot allocate the heap */ + return retval; + } + + again: + /* + * Scan tasks in the cgroup, using the scanner's "test_task" callback + * to determine which are of interest, and using the scanner's + * "process_task" callback to process any of them that need an update. + * Since we don't want to hold any locks during the task updates, + * gather tasks to be processed in a heap structure. + * The heap is sorted by descending task start time. + * If the statically-sized heap fills up, we overflow tasks that + * started later, and in future iterations only consider tasks that + * started after the latest task in the previous pass. This + * guarantees forward progress and that we don't miss any tasks. + */ + heap->size = 0; + cgroup_iter_start(scan->cg, &it); + while ((p = cgroup_iter_next(scan->cg, &it))) { + /* + * Only affect tasks that qualify per the caller's callback, + * if he provided one + */ + if (scan->test_task && !scan->test_task(p, scan)) + continue; + /* + * Only process tasks that started after the last task + * we processed + */ + if (!started_after_time(p, &latest_time, latest_task)) + continue; + dropped = heap_insert(heap, p); + if (dropped == NULL) { + /* + * The new task was inserted; the heap wasn't + * previously full + */ + get_task_struct(p); + } else if (dropped != p) { + /* + * The new task was inserted, and pushed out a + * different task + */ + get_task_struct(p); + put_task_struct(dropped); + } + /* + * Else the new task was newer than anything already in + * the heap and wasn't inserted + */ + } + cgroup_iter_end(scan->cg, &it); + + if (heap->size) { + for (i = 0; i < heap->size; i++) { + struct task_struct *p = heap->ptrs[i]; + if (i == 0) { + latest_time = p->start_time; + latest_task = p; + } + /* Process the task per the caller's callback */ + scan->process_task(p, scan); + put_task_struct(p); + } + /* + * If we had to process any tasks at all, scan again + * in case some of them were in the middle of forking + * children that didn't get processed. + * Not the most efficient way to do it, but it avoids + * having to take callback_mutex in the fork path + */ + goto again; + } + if (heap == &tmp_heap) + heap_free(&tmp_heap); + return 0; +} + /* * Stuff for reading the 'tasks' file. * @@ -1761,7 +1955,7 @@ static int pid_array_load(pid_t *pidarray, int npids, struct cgroup *cgrp) while ((tsk = cgroup_iter_next(cgrp, &it))) { if (unlikely(n == npids)) break; - pidarray[n++] = task_pid_nr(tsk); + pidarray[n++] = task_pid_vnr(tsk); } cgroup_iter_end(cgrp, &it); return n; @@ -2126,9 +2320,8 @@ static inline int cgroup_has_css_refs(struct cgroup *cgrp) * matter, since it can only happen if the cgroup * has been deleted and hence no longer needs the * release agent to be called anyway. */ - if (css && atomic_read(&css->refcnt)) { + if (css && atomic_read(&css->refcnt)) return 1; - } } return 0; } @@ -2138,7 +2331,6 @@ static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry) struct cgroup *cgrp = dentry->d_fsdata; struct dentry *d; struct cgroup *parent; - struct cgroup_subsys *ss; struct super_block *sb; struct cgroupfs_root *root; @@ -2157,17 +2349,19 @@ static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry) parent = cgrp->parent; root = cgrp->root; sb = root->sb; + /* + * Call pre_destroy handlers of subsys + */ + cgroup_call_pre_destroy(cgrp); + /* + * Notify subsyses that rmdir() request comes. + */ if (cgroup_has_css_refs(cgrp)) { mutex_unlock(&cgroup_mutex); return -EBUSY; } - for_each_subsys(root, ss) { - if (cgrp->subsys[ss->subsys_id]) - ss->destroy(ss, cgrp); - } - spin_lock(&release_list_lock); set_bit(CGRP_REMOVED, &cgrp->flags); if (!list_empty(&cgrp->release_list)) @@ -2182,15 +2376,11 @@ static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry) cgroup_d_remove_dir(d); dput(d); - root->number_of_cgroups--; set_bit(CGRP_RELEASABLE, &parent->flags); check_for_release(parent); mutex_unlock(&cgroup_mutex); - /* Drop the active superblock reference that we took when we - * created the cgroup */ - deactivate_super(sb); return 0; } @@ -2324,7 +2514,7 @@ out: * - Used for /proc/<pid>/cgroup. * - No need to task_lock(tsk) on this tsk->cgroup reference, as it * doesn't really matter if tsk->cgroup changes after we read it, - * and we take cgroup_mutex, keeping attach_task() from changing it + * and we take cgroup_mutex, keeping cgroup_attach_task() from changing it * anyway. No need to check that tsk->cgroup != NULL, thanks to * the_top_cgroup_hack in cgroup_exit(), which sets an exiting tasks * cgroup to top_cgroup. @@ -2435,7 +2625,7 @@ static struct file_operations proc_cgroupstats_operations = { * A pointer to the shared css_set was automatically copied in * fork.c by dup_task_struct(). However, we ignore that copy, since * it was not made under the protection of RCU or cgroup_mutex, so - * might no longer be a valid cgroup pointer. attach_task() might + * might no longer be a valid cgroup pointer. cgroup_attach_task() might * have already changed current->cgroups, allowing the previously * referenced cgroup group to be removed and freed. * @@ -2514,8 +2704,8 @@ void cgroup_post_fork(struct task_struct *child) * attach us to a different cgroup, decrementing the count on * the first cgroup that we never incremented. But in this case, * top_cgroup isn't going away, and either task has PF_EXITING set, - * which wards off any attach_task() attempts, or task is a failed - * fork, never visible to attach_task. + * which wards off any cgroup_attach_task() attempts, or task is a failed + * fork, never visible to cgroup_attach_task. * */ void cgroup_exit(struct task_struct *tsk, int run_callbacks) @@ -2655,7 +2845,7 @@ int cgroup_clone(struct task_struct *tsk, struct cgroup_subsys *subsys) } /* All seems fine. Finish by moving the task into the new cgroup */ - ret = attach_task(child, tsk); + ret = cgroup_attach_task(child, tsk); mutex_unlock(&cgroup_mutex); out_release: diff --git a/kernel/cpuset.c b/kernel/cpuset.c index cfaf6419d81..67b2bfe2781 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c @@ -38,7 +38,6 @@ #include <linux/mount.h> #include <linux/namei.h> #include <linux/pagemap.h> -#include <linux/prio_heap.h> #include <linux/proc_fs.h> #include <linux/rcupdate.h> #include <linux/sched.h> @@ -56,6 +55,8 @@ #include <asm/atomic.h> #include <linux/mutex.h> #include <linux/kfifo.h> +#include <linux/workqueue.h> +#include <linux/cgroup.h> /* * Tracks how many cpusets are currently defined in system. @@ -64,7 +65,7 @@ */ int number_of_cpusets __read_mostly; -/* Retrieve the cpuset from a cgroup */ +/* Forward declare cgroup structures */ struct cgroup_subsys cpuset_subsys; struct cpuset; @@ -96,6 +97,9 @@ struct cpuset { /* partition number for rebuild_sched_domains() */ int pn; + + /* used for walking a cpuset heirarchy */ + struct list_head stack_list; }; /* Retrieve the cpuset for a cgroup */ @@ -111,7 +115,10 @@ static inline struct cpuset *task_cs(struct task_struct *task) return container_of(task_subsys_state(task, cpuset_subsys_id), struct cpuset, css); } - +struct cpuset_hotplug_scanner { + struct cgroup_scanner scan; + struct cgroup *to; +}; /* bits in struct cpuset flags field */ typedef enum { @@ -160,17 +167,17 @@ static inline int is_spread_slab(const struct cpuset *cs) * number, and avoid having to lock and reload mems_allowed unless * the cpuset they're using changes generation. * - * A single, global generation is needed because attach_task() could + * A single, global generation is needed because cpuset_attach_task() could * reattach a task to a different cpuset, which must not have its * generation numbers aliased with those of that tasks previous cpuset. * * Generations are needed for mems_allowed because one task cannot - * modify anothers memory placement. So we must enable every task, + * modify another's memory placement. So we must enable every task, * on every visit to __alloc_pages(), to efficiently check whether * its current->cpuset->mems_allowed has changed, requiring an update * of its current->mems_allowed. * - * Since cpuset_mems_generation is guarded by manage_mutex, + * Since writes to cpuset_mems_generation are guarded by the cgroup lock * there is no need to mark it atomic. */ static int cpuset_mems_generation; @@ -182,17 +189,20 @@ static struct cpuset top_cpuset = { }; /* - * We have two global cpuset mutexes below. They can nest. - * It is ok to first take manage_mutex, then nest callback_mutex. We also - * require taking task_lock() when dereferencing a tasks cpuset pointer. - * See "The task_lock() exception", at the end of this comment. + * There are two global mutexes guarding cpuset structures. The first + * is the main control groups cgroup_mutex, accessed via + * cgroup_lock()/cgroup_unlock(). The second is the cpuset-specific + * callback_mutex, below. They can nest. It is ok to first take + * cgroup_mutex, then nest callback_mutex. We also require taking + * task_lock() when dereferencing a task's cpuset pointer. See "The + * task_lock() exception", at the end of this comment. * * A task must hold both mutexes to modify cpusets. If a task - * holds manage_mutex, then it blocks others wanting that mutex, + * holds cgroup_mutex, then it blocks others wanting that mutex, * ensuring that it is the only task able to also acquire callback_mutex * and be able to modify cpusets. It can perform various checks on * the cpuset structure first, knowing nothing will change. It can - * also allocate memory while just holding manage_mutex. While it is + * also allocate memory while just holding cgroup_mutex. While it is * performing these checks, various callback routines can briefly * acquire callback_mutex to query cpusets. Once it is ready to make * the changes, it takes callback_mutex, blocking everyone else. @@ -208,60 +218,16 @@ static struct cpuset top_cpuset = { * The task_struct fields mems_allowed and mems_generation may only * be accessed in the context of that task, so require no locks. * - * Any task can increment and decrement the count field without lock. - * So in general, code holding manage_mutex or callback_mutex can't rely - * on the count field not changing. However, if the count goes to - * zero, then only attach_task(), which holds both mutexes, can - * increment it again. Because a count of zero means that no tasks - * are currently attached, therefore there is no way a task attached - * to that cpuset can fork (the other way to increment the count). - * So code holding manage_mutex or callback_mutex can safely assume that - * if the count is zero, it will stay zero. Similarly, if a task - * holds manage_mutex or callback_mutex on a cpuset with zero count, it - * knows that the cpuset won't be removed, as cpuset_rmdir() needs - * both of those mutexes. - * * The cpuset_common_file_write handler for operations that modify - * the cpuset hierarchy holds manage_mutex across the entire operation, + * the cpuset hierarchy holds cgroup_mutex across the entire operation, * single threading all such cpuset modifications across the system. * * The cpuset_common_file_read() handlers only hold callback_mutex across * small pieces of code, such as when reading out possibly multi-word * cpumasks and nodemasks. * - * The fork and exit callbacks cpuset_fork() and cpuset_exit(), don't - * (usually) take either mutex. These are the two most performance - * critical pieces of code here. The exception occurs on cpuset_exit(), - * when a task in a notify_on_release cpuset exits. Then manage_mutex - * is taken, and if the cpuset count is zero, a usermode call made - * to /sbin/cpuset_release_agent with the name of the cpuset (path - * relative to the root of cpuset file system) as the argument. - * - * A cpuset can only be deleted if both its 'count' of using tasks - * is zero, and its list of 'children' cpusets is empty. Since all - * tasks in the system use _some_ cpuset, and since there is always at - * least one task in the system (init), therefore, top_cpuset - * always has either children cpusets and/or using tasks. So we don't - * need a special hack to ensure that top_cpuset cannot be deleted. - * - * The above "Tale of Two Semaphores" would be complete, but for: - * - * The task_lock() exception - * - * The need for this exception arises from the action of attach_task(), - * which overwrites one tasks cpuset pointer with another. It does - * so using both mutexes, however there are several performance - * critical places that need to reference task->cpuset without the - * expense of grabbing a system global mutex. Therefore except as - * noted below, when dereferencing or, as in attach_task(), modifying - * a tasks cpuset pointer we use task_lock(), which acts on a spinlock - * (task->alloc_lock) already in the task_struct routinely used for - * such matters. - * - * P.S. One more locking exception. RCU is used to guard the - * update of a tasks cpuset pointer by attach_task() and the - * access of task->cpuset->mems_generation via that pointer in - * the routine cpuset_update_task_memory_state(). + * Accessing a task's cpuset should be done in accordance with the + * guidelines for accessing subsystem state in kernel/cgroup.c */ static DEFINE_MUTEX(callback_mutex); @@ -354,15 +320,14 @@ static void guarantee_online_mems(const struct cpuset *cs, nodemask_t *pmask) * Do not call this routine if in_interrupt(). * * Call without callback_mutex or task_lock() held. May be - * called with or without manage_mutex held. Thanks in part to - * 'the_top_cpuset_hack', the tasks cpuset pointer will never + * called with or without cgroup_mutex held. Thanks in part to + * 'the_top_cpuset_hack', the task's cpuset pointer will never * be NULL. This routine also might acquire callback_mutex and * current->mm->mmap_sem during call. * * Reading current->cpuset->mems_generation doesn't need task_lock * to guard the current->cpuset derefence, because it is guarded - * from concurrent freeing of current->cpuset by attach_task(), - * using RCU. + * from concurrent freeing of current->cpuset using RCU. * * The rcu_dereference() is technically probably not needed, * as I don't actually mind if I see a new cpuset pointer but @@ -424,7 +389,7 @@ void cpuset_update_task_memory_state(void) * * One cpuset is a subset of another if all its allowed CPUs and * Memory Nodes are a subset of the other, and its exclusive flags - * are only set if the other's are set. Call holding manage_mutex. + * are only set if the other's are set. Call holding cgroup_mutex. */ static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q) @@ -442,7 +407,7 @@ static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q) * If we replaced the flag and mask values of the current cpuset * (cur) with those values in the trial cpuset (trial), would * our various subset and exclusive rules still be valid? Presumes - * manage_mutex held. + * cgroup_mutex held. * * 'cur' is the address of an actual, in-use cpuset. Operations * such as list traversal that depend on the actual address of the @@ -476,7 +441,10 @@ static int validate_change(const struct cpuset *cur, const struct cpuset *trial) if (!is_cpuset_subset(trial, par)) return -EACCES; - /* If either I or some sibling (!= me) is exclusive, we can't overlap */ + /* + * If either I or some sibling (!= me) is exclusive, we can't + * overlap + */ list_for_each_entry(cont, &par->css.cgroup->children, sibling) { c = cgroup_cs(cont); if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) && @@ -732,22 +700,50 @@ static inline int started_after(void *p1, void *p2) return started_after_time(t1, &t2->start_time, t2); } -/* - * Call with manage_mutex held. May take callback_mutex during call. +/** + * cpuset_test_cpumask - test a task's cpus_allowed versus its cpuset's + * @tsk: task to test + * @scan: struct cgroup_scanner contained in its struct cpuset_hotplug_scanner + * + * Call with cgroup_mutex held. May take callback_mutex during call. + * Called for each task in a cgroup by cgroup_scan_tasks(). + * Return nonzero if this tasks's cpus_allowed mask should be changed (in other + * words, if its mask is not equal to its cpuset's mask). + */ +int cpuset_test_cpumask(struct task_struct *tsk, struct cgroup_scanner *scan) +{ + return !cpus_equal(tsk->cpus_allowed, + (cgroup_cs(scan->cg))->cpus_allowed); +} + +/** + * cpuset_change_cpumask - make a task's cpus_allowed the same as its cpuset's + * @tsk: task to test + * @scan: struct cgroup_scanner containing the cgroup of the task + * + * Called by cgroup_scan_tasks() for each task in a cgroup whose + * cpus_allowed mask needs to be changed. + * + * We don't need to re-check for the cgroup/cpuset membership, since we're + * holding cgroup_lock() at this point. */ +void cpuset_change_cpumask(struct task_struct *tsk, struct cgroup_scanner *scan) +{ + set_cpus_allowed(tsk, (cgroup_cs(scan->cg))->cpus_allowed); +} +/** + * update_cpumask - update the cpus_allowed mask of a cpuset and all tasks in it + * @cs: the cpuset to consider + * @buf: buffer of cpu numbers written to this cpuset + */ static int update_cpumask(struct cpuset *cs, char *buf) { struct cpuset trialcs; - int retval, i; - int is_load_balanced; - struct cgroup_iter it; - struct cgroup *cgrp = cs->css.cgroup; - struct task_struct *p, *dropped; - /* Never dereference latest_task, since it's not refcounted */ - struct task_struct *latest_task = NULL; + struct cgroup_scanner scan; struct ptr_heap heap; - struct timespec latest_time = { 0, 0 }; + int retval; + int is_load_balanced; /* top_cpuset.cpus_allowed tracks cpu_online_map; it's read-only */ if (cs == &top_cpuset) @@ -756,7 +752,7 @@ static int update_cpumask(struct cpuset *cs, char *buf) trialcs = *cs; /* - * An empty cpus_allowed is ok iff there are no tasks in the cpuset. + * An empty cpus_allowed is ok only if the cpuset has no tasks. * Since cpulist_parse() fails on an empty mask, we special case * that parsing. The validate_change() call ensures that cpusets * with tasks have cpus. @@ -777,6 +773,7 @@ static int update_cpumask(struct cpuset *cs, char *buf) /* Nothing to do if the cpus didn't change */ if (cpus_equal(cs->cpus_allowed, trialcs.cpus_allowed)) return 0; + retval = heap_init(&heap, PAGE_SIZE, GFP_KERNEL, &started_after); if (retval) return retval; @@ -787,62 +784,19 @@ static int update_cpumask(struct cpuset *cs, char *buf) cs->cpus_allowed = trialcs.cpus_allowed; mutex_unlock(&callback_mutex); - again: /* * Scan tasks in the cpuset, and update the cpumasks of any - * that need an update. Since we can't call set_cpus_allowed() - * while holding tasklist_lock, gather tasks to be processed - * in a heap structure. If the statically-sized heap fills up, - * overflow tasks that started later, and in future iterations - * only consider tasks that started after the latest task in - * the previous pass. This guarantees forward progress and - * that we don't miss any tasks + * that need an update. */ - heap.size = 0; - cgroup_iter_start(cgrp, &it); - while ((p = cgroup_iter_next(cgrp, &it))) { - /* Only affect tasks that don't have the right cpus_allowed */ - if (cpus_equal(p->cpus_allowed, cs->cpus_allowed)) - continue; - /* - * Only process tasks that started after the last task - * we processed - */ - if (!started_after_time(p, &latest_time, latest_task)) - continue; - dropped = heap_insert(&heap, p); - if (dropped == NULL) { - get_task_struct(p); - } else if (dropped != p) { - get_task_struct(p); - put_task_struct(dropped); - } - } - cgroup_iter_end(cgrp, &it); - if (heap.size) { - for (i = 0; i < heap.size; i++) { - struct task_struct *p = heap.ptrs[i]; - if (i == 0) { - latest_time = p->start_time; - latest_task = p; - } - set_cpus_allowed(p, cs->cpus_allowed); - put_task_struct(p); - } - /* - * If we had to process any tasks at all, scan again - * in case some of them were in the middle of forking - * children that didn't notice the new cpumask - * restriction. Not the most efficient way to do it, - * but it avoids having to take callback_mutex in the - * fork path - */ - goto again; - } + scan.cg = cs->css.cgroup; + scan.test_task = cpuset_test_cpumask; + scan.process_task = cpuset_change_cpumask; + scan.heap = &heap; + cgroup_scan_tasks(&scan); heap_free(&heap); + if (is_load_balanced) rebuild_sched_domains(); - return 0; } @@ -854,11 +808,11 @@ static int update_cpumask(struct cpuset *cs, char *buf) * Temporarilly set tasks mems_allowed to target nodes of migration, * so that the migration code can allocate pages on these nodes. * - * Call holding manage_mutex, so our current->cpuset won't change - * during this call, as manage_mutex holds off any attach_task() + * Call holding cgroup_mutex, so current's cpuset won't change + * during this call, as manage_mutex holds off any cpuset_attach() * calls. Therefore we don't need to take task_lock around the * call to guarantee_online_mems(), as we know no one is changing - * our tasks cpuset. + * our task's cpuset. * * Hold callback_mutex around the two modifications of our tasks * mems_allowed to synchronize with cpuset_mems_allowed(). @@ -903,7 +857,7 @@ static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from, * the cpuset is marked 'memory_migrate', migrate the tasks * pages to the new memory. * - * Call with manage_mutex held. May take callback_mutex during call. + * Call with cgroup_mutex held. May take callback_mutex during call. * Will take tasklist_lock, scan tasklist for tasks in cpuset cs, * lock each such tasks mm->mmap_sem, scan its vma's and rebind * their mempolicies to the cpusets new mems_allowed. @@ -1016,7 +970,7 @@ static int update_nodemask(struct cpuset *cs, char *buf) * tasklist_lock. Forks can happen again now - the mpol_copy() * cpuset_being_rebound check will catch such forks, and rebind * their vma mempolicies too. Because we still hold the global - * cpuset manage_mutex, we know that no other rebind effort will + * cgroup_mutex, we know that no other rebind effort will * be contending for the global variable cpuset_being_rebound. * It's ok if we rebind the same mm twice; mpol_rebind_mm() * is idempotent. Also migrate pages in each mm to new nodes. @@ -1031,7 +985,7 @@ static int update_nodemask(struct cpuset *cs, char *buf) mmput(mm); } - /* We're done rebinding vma's to this cpusets new mems_allowed. */ + /* We're done rebinding vmas to this cpuset's new mems_allowed. */ kfree(mmarray); cpuset_being_rebound = NULL; retval = 0; @@ -1045,7 +999,7 @@ int current_cpuset_is_being_rebound(void) } /* - * Call with manage_mutex held. + * Call with cgroup_mutex held. */ static int update_memory_pressure_enabled(struct cpuset *cs, char *buf) @@ -1066,7 +1020,7 @@ static int update_memory_pressure_enabled(struct cpuset *cs, char *buf) * cs: the cpuset to update * buf: the buffer where we read the 0 or 1 * - * Call with manage_mutex held. + * Call with cgroup_mutex held. */ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, char *buf) @@ -1200,6 +1154,7 @@ static int fmeter_getrate(struct fmeter *fmp) return val; } +/* Called by cgroups to determine if a cpuset is usable; cgroup_mutex held */ static int cpuset_can_attach(struct cgroup_subsys *ss, struct cgroup *cont, struct task_struct *tsk) { @@ -1547,7 +1502,8 @@ static int cpuset_populate(struct cgroup_subsys *ss, struct cgroup *cont) * If this becomes a problem for some users who wish to * allow that scenario, then cpuset_post_clone() could be * changed to grant parent->cpus_allowed-sibling_cpus_exclusive - * (and likewise for mems) to the new cgroup. + * (and likewise for mems) to the new cgroup. Called with cgroup_mutex + * held. */ static void cpuset_post_clone(struct cgroup_subsys *ss, struct cgroup *cgroup) @@ -1571,11 +1527,8 @@ static void cpuset_post_clone(struct cgroup_subsys *ss, /* * cpuset_create - create a cpuset - * parent: cpuset that will be parent of the new cpuset. - * name: name of the new cpuset. Will be strcpy'ed. - * mode: mode to set on new inode - * - * Must be called with the mutex on the parent inode held + * ss: cpuset cgroup subsystem + * cont: control group that the new cpuset will be part of */ static struct cgroup_subsys_state *cpuset_create( @@ -1687,53 +1640,140 @@ int __init cpuset_init(void) return 0; } +/** + * cpuset_do_move_task - move a given task to another cpuset + * @tsk: pointer to task_struct the task to move + * @scan: struct cgroup_scanner contained in its struct cpuset_hotplug_scanner + * + * Called by cgroup_scan_tasks() for each task in a cgroup. + * Return nonzero to stop the walk through the tasks. + */ +void cpuset_do_move_task(struct task_struct *tsk, struct cgroup_scanner *scan) +{ + struct cpuset_hotplug_scanner *chsp; + + chsp = container_of(scan, struct cpuset_hotplug_scanner, scan); + cgroup_attach_task(chsp->to, tsk); +} + +/** + * move_member_tasks_to_cpuset - move tasks from one cpuset to another + * @from: cpuset in which the tasks currently reside + * @to: cpuset to which the tasks will be moved + * + * Called with cgroup_mutex held + * callback_mutex must not be held, as cpuset_attach() will take it. + * + * The cgroup_scan_tasks() function will scan all the tasks in a cgroup, + * calling callback functions for each. + */ +static void move_member_tasks_to_cpuset(struct cpuset *from, struct cpuset *to) +{ + struct cpuset_hotplug_scanner scan; + + scan.scan.cg = from->css.cgroup; + scan.scan.test_task = NULL; /* select all tasks in cgroup */ + scan.scan.process_task = cpuset_do_move_task; + scan.scan.heap = NULL; + scan.to = to->css.cgroup; + + if (cgroup_scan_tasks((struct cgroup_scanner *)&scan)) + printk(KERN_ERR "move_member_tasks_to_cpuset: " + "cgroup_scan_tasks failed\n"); +} + /* * If common_cpu_mem_hotplug_unplug(), below, unplugs any CPUs * or memory nodes, we need to walk over the cpuset hierarchy, * removing that CPU or node from all cpusets. If this removes the - * last CPU or node from a cpuset, then the guarantee_online_cpus() - * or guarantee_online_mems() code will use that emptied cpusets - * parent online CPUs or nodes. Cpusets that were already empty of - * CPUs or nodes are left empty. + * last CPU or node from a cpuset, then move the tasks in the empty + * cpuset to its next-highest non-empty parent. * - * This routine is intentionally inefficient in a couple of regards. - * It will check all cpusets in a subtree even if the top cpuset of - * the subtree has no offline CPUs or nodes. It checks both CPUs and - * nodes, even though the caller could have been coded to know that - * only one of CPUs or nodes needed to be checked on a given call. - * This was done to minimize text size rather than cpu cycles. + * Called with cgroup_mutex held + * callback_mutex must not be held, as cpuset_attach() will take it. + */ +static void remove_tasks_in_empty_cpuset(struct cpuset *cs) +{ + struct cpuset *parent; + + /* + * The cgroup's css_sets list is in use if there are tasks + * in the cpuset; the list is empty if there are none; + * the cs->css.refcnt seems always 0. + */ + if (list_empty(&cs->css.cgroup->css_sets)) + return; + + /* + * Find its next-highest non-empty parent, (top cpuset + * has online cpus, so can't be empty). + */ + parent = cs->parent; + while (cpus_empty(parent->cpus_allowed) || + nodes_empty(parent->mems_allowed)) + parent = parent->parent; + + move_member_tasks_to_cpuset(cs, parent); +} + +/* + * Walk the specified cpuset subtree and look for empty cpusets. + * The tasks of such cpuset must be moved to a parent cpuset. + * + * Called with cgroup_mutex held. We take callback_mutex to modify + * cpus_allowed and mems_allowed. * - * Call with both manage_mutex and callback_mutex held. + * This walk processes the tree from top to bottom, completing one layer + * before dropping down to the next. It always processes a node before + * any of its children. * - * Recursive, on depth of cpuset subtree. + * For now, since we lack memory hot unplug, we'll never see a cpuset + * that has tasks along with an empty 'mems'. But if we did see such + * a cpuset, we'd handle it just like we do if its 'cpus' was empty. */ - -static void guarantee_online_cpus_mems_in_subtree(const struct cpuset *cur) +static void scan_for_empty_cpusets(const struct cpuset *root) { + struct cpuset *cp; /* scans cpusets being updated */ + struct cpuset *child; /* scans child cpusets of cp */ + struct list_head queue; struct cgroup *cont; - struct cpuset *c; - /* Each of our child cpusets mems must be online */ - list_for_each_entry(cont, &cur->css.cgroup->children, sibling) { - c = cgroup_cs(cont); - guarantee_online_cpus_mems_in_subtree(c); - if (!cpus_empty(c->cpus_allowed)) - guarantee_online_cpus(c, &c->cpus_allowed); - if (!nodes_empty(c->mems_allowed)) - guarantee_online_mems(c, &c->mems_allowed); + INIT_LIST_HEAD(&queue); + + list_add_tail((struct list_head *)&root->stack_list, &queue); + + while (!list_empty(&queue)) { + cp = container_of(queue.next, struct cpuset, stack_list); + list_del(queue.next); + list_for_each_entry(cont, &cp->css.cgroup->children, sibling) { + child = cgroup_cs(cont); + list_add_tail(&child->stack_list, &queue); + } + cont = cp->css.cgroup; + + /* Continue past cpusets with all cpus, mems online */ + if (cpus_subset(cp->cpus_allowed, cpu_online_map) && + nodes_subset(cp->mems_allowed, node_states[N_HIGH_MEMORY])) + continue; + + /* Remove offline cpus and mems from this cpuset. */ + mutex_lock(&callback_mutex); + cpus_and(cp->cpus_allowed, cp->cpus_allowed, cpu_online_map); + nodes_and(cp->mems_allowed, cp->mems_allowed, + node_states[N_HIGH_MEMORY]); + mutex_unlock(&callback_mutex); + + /* Move tasks from the empty cpuset to a parent */ + if (cpus_empty(cp->cpus_allowed) || + nodes_empty(cp->mems_allowed)) + remove_tasks_in_empty_cpuset(cp); } } /* * The cpus_allowed and mems_allowed nodemasks in the top_cpuset track * cpu_online_map and node_states[N_HIGH_MEMORY]. Force the top cpuset to - * track what's online after any CPU or memory node hotplug or unplug - * event. - * - * To ensure that we don't remove a CPU or node from the top cpuset - * that is currently in use by a child cpuset (which would violate - * the rule that cpusets must be subsets of their parent), we first - * call the recursive routine guarantee_online_cpus_mems_in_subtree(). + * track what's online after any CPU or memory node hotplug or unplug event. * * Since there are two callers of this routine, one for CPU hotplug * events and one for memory node hotplug events, we could have coded @@ -1744,13 +1784,11 @@ static void guarantee_online_cpus_mems_in_subtree(const struct cpuset *cur) static void common_cpu_mem_hotplug_unplug(void) { cgroup_lock(); - mutex_lock(&callback_mutex); - guarantee_online_cpus_mems_in_subtree(&top_cpuset); top_cpuset.cpus_allowed = cpu_online_map; top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY]; + scan_for_empty_cpusets(&top_cpuset); - mutex_unlock(&callback_mutex); cgroup_unlock(); } @@ -1826,7 +1864,7 @@ cpumask_t cpuset_cpus_allowed(struct task_struct *tsk) /** * cpuset_cpus_allowed_locked - return cpus_allowed mask from a tasks cpuset. - * Must be called with callback_mutex held. + * Must be called with callback_mutex held. **/ cpumask_t cpuset_cpus_allowed_locked(struct task_struct *tsk) { @@ -2163,10 +2201,8 @@ void __cpuset_memory_pressure_bump(void) * - Used for /proc/<pid>/cpuset. * - No need to task_lock(tsk) on this tsk->cpuset reference, as it * doesn't really matter if tsk->cpuset changes after we read it, - * and we take manage_mutex, keeping attach_task() from changing it - * anyway. No need to check that tsk->cpuset != NULL, thanks to - * the_top_cpuset_hack in cpuset_exit(), which sets an exiting tasks - * cpuset to top_cpuset. + * and we take cgroup_mutex, keeping cpuset_attach() from changing it + * anyway. */ static int proc_cpuset_show(struct seq_file *m, void *unused_v) { diff --git a/kernel/fork.c b/kernel/fork.c index 3995297567a..b2ef8e4fad7 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -40,6 +40,7 @@ #include <linux/ptrace.h> #include <linux/mount.h> #include <linux/audit.h> +#include <linux/memcontrol.h> #include <linux/profile.h> #include <linux/rmap.h> #include <linux/acct.h> @@ -340,7 +341,7 @@ __cacheline_aligned_in_smp DEFINE_SPINLOCK(mmlist_lock); #include <linux/init_task.h> -static struct mm_struct * mm_init(struct mm_struct * mm) +static struct mm_struct * mm_init(struct mm_struct * mm, struct task_struct *p) { atomic_set(&mm->mm_users, 1); atomic_set(&mm->mm_count, 1); @@ -357,11 +358,14 @@ static struct mm_struct * mm_init(struct mm_struct * mm) mm->ioctx_list = NULL; mm->free_area_cache = TASK_UNMAPPED_BASE; mm->cached_hole_size = ~0UL; + mm_init_cgroup(mm, p); if (likely(!mm_alloc_pgd(mm))) { mm->def_flags = 0; return mm; } + + mm_free_cgroup(mm); free_mm(mm); return NULL; } @@ -376,7 +380,7 @@ struct mm_struct * mm_alloc(void) mm = allocate_mm(); if (mm) { memset(mm, 0, sizeof(*mm)); - mm = mm_init(mm); + mm = mm_init(mm, current); } return mm; } @@ -390,6 +394,7 @@ void fastcall __mmdrop(struct mm_struct *mm) { BUG_ON(mm == &init_mm); mm_free_pgd(mm); + mm_free_cgroup(mm); destroy_context(mm); free_mm(mm); } @@ -511,7 +516,7 @@ static struct mm_struct *dup_mm(struct task_struct *tsk) mm->token_priority = 0; mm->last_interval = 0; - if (!mm_init(mm)) + if (!mm_init(mm, tsk)) goto fail_nomem; if (init_new_context(tsk, mm)) diff --git a/kernel/kexec.c b/kernel/kexec.c index 9a26eec9eb0..06a0e277565 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c @@ -1361,8 +1361,8 @@ unsigned long __attribute__ ((weak)) paddr_vmcoreinfo_note(void) static int __init crash_save_vmcoreinfo_init(void) { - vmcoreinfo_append_str("OSRELEASE=%s\n", init_uts_ns.name.release); - vmcoreinfo_append_str("PAGESIZE=%ld\n", PAGE_SIZE); + VMCOREINFO_OSRELEASE(init_uts_ns.name.release); + VMCOREINFO_PAGESIZE(PAGE_SIZE); VMCOREINFO_SYMBOL(init_uts_ns); VMCOREINFO_SYMBOL(node_online_map); @@ -1376,15 +1376,15 @@ static int __init crash_save_vmcoreinfo_init(void) #ifdef CONFIG_SPARSEMEM VMCOREINFO_SYMBOL(mem_section); VMCOREINFO_LENGTH(mem_section, NR_SECTION_ROOTS); - VMCOREINFO_SIZE(mem_section); + VMCOREINFO_STRUCT_SIZE(mem_section); VMCOREINFO_OFFSET(mem_section, section_mem_map); #endif - VMCOREINFO_SIZE(page); - VMCOREINFO_SIZE(pglist_data); - VMCOREINFO_SIZE(zone); - VMCOREINFO_SIZE(free_area); - VMCOREINFO_SIZE(list_head); - VMCOREINFO_TYPEDEF_SIZE(nodemask_t); + VMCOREINFO_STRUCT_SIZE(page); + VMCOREINFO_STRUCT_SIZE(pglist_data); + VMCOREINFO_STRUCT_SIZE(zone); + VMCOREINFO_STRUCT_SIZE(free_area); + VMCOREINFO_STRUCT_SIZE(list_head); + VMCOREINFO_SIZE(nodemask_t); VMCOREINFO_OFFSET(page, flags); VMCOREINFO_OFFSET(page, _count); VMCOREINFO_OFFSET(page, mapping); diff --git a/kernel/pid.c b/kernel/pid.c index f815455431b..3b30bccdfcd 100644 --- a/kernel/pid.c +++ b/kernel/pid.c @@ -368,6 +368,7 @@ struct task_struct * fastcall pid_task(struct pid *pid, enum pid_type type) } return result; } +EXPORT_SYMBOL(pid_task); /* * Must be called under rcu_read_lock() or with tasklist_lock read-held. diff --git a/kernel/res_counter.c b/kernel/res_counter.c new file mode 100644 index 00000000000..16cbec2d5d6 --- /dev/null +++ b/kernel/res_counter.c @@ -0,0 +1,134 @@ +/* + * resource cgroups + * + * Copyright 2007 OpenVZ SWsoft Inc + * + * Author: Pavel Emelianov <xemul@openvz.org> + * + */ + +#include <linux/types.h> +#include <linux/parser.h> +#include <linux/fs.h> +#include <linux/res_counter.h> +#include <linux/uaccess.h> + +void res_counter_init(struct res_counter *counter) +{ + spin_lock_init(&counter->lock); + counter->limit = (unsigned long long)LLONG_MAX; +} + +int res_counter_charge_locked(struct res_counter *counter, unsigned long val) +{ + if (counter->usage + val > counter->limit) { + counter->failcnt++; + return -ENOMEM; + } + + counter->usage += val; + return 0; +} + +int res_counter_charge(struct res_counter *counter, unsigned long val) +{ + int ret; + unsigned long flags; + + spin_lock_irqsave(&counter->lock, flags); + ret = res_counter_charge_locked(counter, val); + spin_unlock_irqrestore(&counter->lock, flags); + return ret; +} + +void res_counter_uncharge_locked(struct res_counter *counter, unsigned long val) +{ + if (WARN_ON(counter->usage < val)) + val = counter->usage; + + counter->usage -= val; +} + +void res_counter_uncharge(struct res_counter *counter, unsigned long val) +{ + unsigned long flags; + + spin_lock_irqsave(&counter->lock, flags); + res_counter_uncharge_locked(counter, val); + spin_unlock_irqrestore(&counter->lock, flags); +} + + +static inline unsigned long long * +res_counter_member(struct res_counter *counter, int member) +{ + switch (member) { + case RES_USAGE: + return &counter->usage; + case RES_LIMIT: + return &counter->limit; + case RES_FAILCNT: + return &counter->failcnt; + }; + + BUG(); + return NULL; +} + +ssize_t res_counter_read(struct res_counter *counter, int member, + const char __user *userbuf, size_t nbytes, loff_t *pos, + int (*read_strategy)(unsigned long long val, char *st_buf)) +{ + unsigned long long *val; + char buf[64], *s; + + s = buf; + val = res_counter_member(counter, member); + if (read_strategy) + s += read_strategy(*val, s); + else + s += sprintf(s, "%llu\n", *val); + return simple_read_from_buffer((void __user *)userbuf, nbytes, + pos, buf, s - buf); +} + +ssize_t res_counter_write(struct res_counter *counter, int member, + const char __user *userbuf, size_t nbytes, loff_t *pos, + int (*write_strategy)(char *st_buf, unsigned long long *val)) +{ + int ret; + char *buf, *end; + unsigned long flags; + unsigned long long tmp, *val; + + buf = kmalloc(nbytes + 1, GFP_KERNEL); + ret = -ENOMEM; + if (buf == NULL) + goto out; + + buf[nbytes] = '\0'; + ret = -EFAULT; + if (copy_from_user(buf, userbuf, nbytes)) + goto out_free; + + ret = -EINVAL; + + if (write_strategy) { + if (write_strategy(buf, &tmp)) { + goto out_free; + } + } else { + tmp = simple_strtoull(buf, &end, 10); + if (*end != '\0') + goto out_free; + } + spin_lock_irqsave(&counter->lock, flags); + val = res_counter_member(counter, member); + *val = tmp; + spin_unlock_irqrestore(&counter->lock, flags); + ret = nbytes; +out_free: + kfree(buf); +out: + return ret; +} diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 86daaa26d12..8c98d8147d8 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -67,6 +67,7 @@ extern int sysctl_overcommit_memory; extern int sysctl_overcommit_ratio; extern int sysctl_panic_on_oom; extern int sysctl_oom_kill_allocating_task; +extern int sysctl_oom_dump_tasks; extern int max_threads; extern int core_uses_pid; extern int suid_dumpable; @@ -871,6 +872,14 @@ static struct ctl_table vm_table[] = { .proc_handler = &proc_dointvec, }, { + .ctl_name = CTL_UNNUMBERED, + .procname = "oom_dump_tasks", + .data = &sysctl_oom_dump_tasks, + .maxlen = sizeof(sysctl_oom_dump_tasks), + .mode = 0644, + .proc_handler = &proc_dointvec, + }, + { .ctl_name = VM_OVERCOMMIT_RATIO, .procname = "overcommit_ratio", .data = &sysctl_overcommit_ratio, |