aboutsummaryrefslogtreecommitdiff
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorNick Piggin <npiggin@suse.de>2007-02-12 00:53:51 -0800
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-02-12 09:48:37 -0800
commitff91691bccdb741efb2df0489058a4961fa79598 (patch)
treeeeef6ce3d48df86a7b2c1178a9ba54210b8b8981 /kernel/sched.c
parent0a9ac38246b11892ad20a1eb9deb67adf8c0db2f (diff)
[PATCH] sched: avoid div in rebalance_tick
Avoid expensive integer divide 3 times per CPU per tick. A userspace test of this loop went from 26ns, down to 19ns on a G5; and from 123ns down to 28ns on a P3. (Also avoid a variable bit shift, as suggested by Alan. The effect of this wasn't noticable on the CPUs I tested with). Signed-off-by: Nick Piggin <npiggin@suse.de> Cc: Ingo Molnar <mingo@elte.hu> Cc: Alan Cox <alan@lxorguk.ukuu.org.uk> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c8
1 files changed, 5 insertions, 3 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 1fd67e16cd3..08f86178aa3 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2897,14 +2897,16 @@ static void active_load_balance(struct rq *busiest_rq, int busiest_cpu)
static void update_load(struct rq *this_rq)
{
unsigned long this_load;
- int i, scale;
+ unsigned int i, scale;
this_load = this_rq->raw_weighted_load;
/* Update our load: */
- for (i = 0, scale = 1; i < 3; i++, scale <<= 1) {
+ for (i = 0, scale = 1; i < 3; i++, scale += scale) {
unsigned long old_load, new_load;
+ /* scale is effectively 1 << i now, and >> i divides by scale */
+
old_load = this_rq->cpu_load[i];
new_load = this_load;
/*
@@ -2914,7 +2916,7 @@ static void update_load(struct rq *this_rq)
*/
if (new_load > old_load)
new_load += scale-1;
- this_rq->cpu_load[i] = (old_load*(scale-1) + new_load) / scale;
+ this_rq->cpu_load[i] = (old_load*(scale-1) + new_load) >> i;
}
}