aboutsummaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2007-10-15 17:00:10 +0200
committerIngo Molnar <mingo@elte.hu>2007-10-15 17:00:10 +0200
commitddc972975091ba5f839bf24d0f9ef54fe90ee741 (patch)
tree438406a2930bdcb36c8a1bc6778aab49e1fba36d /kernel
parentd822cecedad88b69a7d68aa8d49e1f238aa320c7 (diff)
sched debug: check spread
debug feature: check how well we schedule within a reasonable vruntime 'spread' range. (note that CPU overload can increase the spread, so this is not a hard condition, but normal loads should be within the spread.) Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c3
-rw-r--r--kernel/sched_debug.c2
-rw-r--r--kernel/sched_fair.c17
3 files changed, 22 insertions, 0 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index fe1165b226a..213294fdcd0 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -250,6 +250,9 @@ struct cfs_rq {
* It is set to NULL otherwise (i.e when none are currently running).
*/
struct sched_entity *curr;
+
+ unsigned long nr_spread_over;
+
#ifdef CONFIG_FAIR_GROUP_SCHED
struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
index b24f17de19e..4659c90c341 100644
--- a/kernel/sched_debug.c
+++ b/kernel/sched_debug.c
@@ -140,6 +140,8 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
SEQ_printf(m, " .%-30s: %ld\n", "bkl_cnt",
rq->bkl_cnt);
#endif
+ SEQ_printf(m, " .%-30s: %ld\n", "nr_spread_over",
+ cfs_rq->nr_spread_over);
}
static void print_cpu(struct seq_file *m, int cpu)
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 87acc5cedd2..8ea4c9b3e41 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -447,6 +447,19 @@ static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
#endif
}
+static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
+{
+#ifdef CONFIG_SCHED_DEBUG
+ s64 d = se->vruntime - cfs_rq->min_vruntime;
+
+ if (d < 0)
+ d = -d;
+
+ if (d > 3*sysctl_sched_latency)
+ schedstat_inc(cfs_rq, nr_spread_over);
+#endif
+}
+
static void
place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
{
@@ -494,6 +507,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int wakeup)
}
update_stats_enqueue(cfs_rq, se);
+ check_spread(cfs_rq, se);
if (se != cfs_rq->curr)
__enqueue_entity(cfs_rq, se);
account_entity_enqueue(cfs_rq, se);
@@ -587,6 +601,7 @@ static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
update_stats_curr_end(cfs_rq, prev);
+ check_spread(cfs_rq, prev);
if (prev->on_rq) {
update_stats_wait_start(cfs_rq, prev);
/* Put 'current' back into the tree. */
@@ -996,6 +1011,8 @@ static void task_new_fair(struct rq *rq, struct task_struct *p)
}
update_stats_enqueue(cfs_rq, se);
+ check_spread(cfs_rq, se);
+ check_spread(cfs_rq, curr);
__enqueue_entity(cfs_rq, se);
account_entity_enqueue(cfs_rq, se);
resched_task(rq->curr);