aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2007-08-10 23:05:11 +0200
committerIngo Molnar <mingo@elte.hu>2007-08-10 23:05:11 +0200
commite56f31aad9d8c0102bc074cdab4e3ee76b38600d (patch)
tree3ff937cbd564cc57249f736c2a58ae477e4fbef5
parent529c77261bccd9d37f110f58b0753d95beaa9fa2 (diff)
sched: fix typo in the FAIR_GROUP_SCHED branch
while there's no in-tree way to turn group scheduling at the moment, fix a typo in it nevertheless. Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--kernel/sched_fair.c7
1 files changed, 3 insertions, 4 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index e91db32cadf..c5af38948a1 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -959,13 +959,12 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
for_each_leaf_cfs_rq(busiest, busy_cfs_rq) {
#ifdef CONFIG_FAIR_GROUP_SCHED
struct cfs_rq *this_cfs_rq;
- long imbalances;
+ long imbalance;
unsigned long maxload;
this_cfs_rq = cpu_cfs_rq(busy_cfs_rq, this_cpu);
- imbalance = busy_cfs_rq->load.weight -
- this_cfs_rq->load.weight;
+ imbalance = busy_cfs_rq->load.weight - this_cfs_rq->load.weight;
/* Don't pull if this_cfs_rq has more load than busy_cfs_rq */
if (imbalance <= 0)
continue;
@@ -976,7 +975,7 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
*this_best_prio = cfs_rq_best_prio(this_cfs_rq);
#else
-#define maxload rem_load_move
+# define maxload rem_load_move
#endif
/* pass busy_cfs_rq argument into
* load_balance_[start|next]_fair iterators