aboutsummaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorSrivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>2008-01-25 21:08:00 +0100
committerIngo Molnar <mingo@elte.hu>2008-01-25 21:08:00 +0100
commita183561567b5446d3362b4839bd4f744f4b2af1e (patch)
tree7bfa46fd0bf4a96e96500732d188f1ef4b04454d /kernel
parent58e2d4ca581167c2a079f4ee02be2f0bc52e8729 (diff)
sched: introduce a mutex and corresponding API to serialize access to doms_curarray
doms_cur[] array represents various scheduling domains which are mutually exclusive. Currently cpusets code can modify this array (by calling partition_sched_domains()) as a result of user modifying sched_load_balance flag for various cpusets. This patch introduces a mutex and corresponding API (only when CONFIG_FAIR_GROUP_SCHED is defined) which allows a reader to safely read the doms_cur[] array w/o worrying abt concurrent modifications to the array. The fair group scheduler code (introduced in next patch of this series) makes use of this mutex to walk thr' doms_cur[] array while rebalancing shares of task groups across cpus. Signed-off-by: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c19
1 files changed, 19 insertions, 0 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index c915f3e6e59..d9585f15043 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -185,6 +185,9 @@ static struct cfs_rq *init_cfs_rq_p[NR_CPUS];
*/
static DEFINE_MUTEX(task_group_mutex);
+/* doms_cur_mutex serializes access to doms_cur[] array */
+static DEFINE_MUTEX(doms_cur_mutex);
+
/* Default task group.
* Every task in system belong to this group at bootup.
*/
@@ -234,11 +237,23 @@ static inline void unlock_task_group_list(void)
mutex_unlock(&task_group_mutex);
}
+static inline void lock_doms_cur(void)
+{
+ mutex_lock(&doms_cur_mutex);
+}
+
+static inline void unlock_doms_cur(void)
+{
+ mutex_unlock(&doms_cur_mutex);
+}
+
#else
static inline void set_task_cfs_rq(struct task_struct *p, unsigned int cpu) { }
static inline void lock_task_group_list(void) { }
static inline void unlock_task_group_list(void) { }
+static inline void lock_doms_cur(void) { }
+static inline void unlock_doms_cur(void) { }
#endif /* CONFIG_FAIR_GROUP_SCHED */
@@ -6543,6 +6558,8 @@ void partition_sched_domains(int ndoms_new, cpumask_t *doms_new)
{
int i, j;
+ lock_doms_cur();
+
/* always unregister in case we don't destroy any domains */
unregister_sched_domain_sysctl();
@@ -6583,6 +6600,8 @@ match2:
ndoms_cur = ndoms_new;
register_sched_domain_sysctl();
+
+ unlock_doms_cur();
}
#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)