From 2070ee01d314ecec8a570c07647ccf4ced6340bb Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 21 Mar 2008 16:43:47 +0100 Subject: sched: cleanup old and rarely used 'debug' features. TREE_AVG and APPROX_AVG are initial task placement policies that have been disabled for a long while.. time to remove them. Signed-off-by: Peter Zijlstra CC: Srivatsa Vaddagiri Signed-off-by: Ingo Molnar --- kernel/sched.c | 8 ++------ kernel/sched_fair.c | 14 -------------- 2 files changed, 2 insertions(+), 20 deletions(-) (limited to 'kernel') diff --git a/kernel/sched.c b/kernel/sched.c index 3f7c5eb254e..366a90923a3 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -594,18 +594,14 @@ enum { SCHED_FEAT_NEW_FAIR_SLEEPERS = 1, SCHED_FEAT_WAKEUP_PREEMPT = 2, SCHED_FEAT_START_DEBIT = 4, - SCHED_FEAT_TREE_AVG = 8, - SCHED_FEAT_APPROX_AVG = 16, - SCHED_FEAT_HRTICK = 32, - SCHED_FEAT_DOUBLE_TICK = 64, + SCHED_FEAT_HRTICK = 8, + SCHED_FEAT_DOUBLE_TICK = 16, }; const_debug unsigned int sysctl_sched_features = SCHED_FEAT_NEW_FAIR_SLEEPERS * 1 | SCHED_FEAT_WAKEUP_PREEMPT * 1 | SCHED_FEAT_START_DEBIT * 1 | - SCHED_FEAT_TREE_AVG * 0 | - SCHED_FEAT_APPROX_AVG * 0 | SCHED_FEAT_HRTICK * 1 | SCHED_FEAT_DOUBLE_TICK * 0; diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index b85cac4b5e2..86a93376282 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -302,11 +302,6 @@ static u64 __sched_vslice(unsigned long rq_weight, unsigned long nr_running) return vslice; } -static u64 sched_vslice(struct cfs_rq *cfs_rq) -{ - return __sched_vslice(cfs_rq->load.weight, cfs_rq->nr_running); -} - static u64 sched_vslice_add(struct cfs_rq *cfs_rq, struct sched_entity *se) { return __sched_vslice(cfs_rq->load.weight + se->load.weight, @@ -504,15 +499,6 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) } else vruntime = cfs_rq->min_vruntime; - if (sched_feat(TREE_AVG)) { - struct sched_entity *last = __pick_last_entity(cfs_rq); - if (last) { - vruntime += last->vruntime; - vruntime >>= 1; - } - } else if (sched_feat(APPROX_AVG) && cfs_rq->nr_running) - vruntime += sched_vslice(cfs_rq)/2; - /* * The 'current' period is already promised to the current tasks, * however the extra weight of the new task will slow them down a -- cgit v1.2.3 From 23e3c3cd2e39a3c9d07ee07d882c8cf6ddd61c86 Mon Sep 17 00:00:00 2001 From: Roel Kluin <12o3l@tiscali.nl> Date: Thu, 13 Mar 2008 17:41:59 +0100 Subject: sched: remove double unlikely from schedule() Combine two unlikely's Signed-off-by: Roel Kluin <12o3l@tiscali.nl> Signed-off-by: Ingo Molnar --- kernel/sched.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/sched.c b/kernel/sched.c index 366a90923a3..573179eb553 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -3882,7 +3882,7 @@ need_resched_nonpreemptible: if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { if (unlikely((prev->state & TASK_INTERRUPTIBLE) && - unlikely(signal_pending(prev)))) { + signal_pending(prev))) { prev->state = TASK_RUNNING; } else { deactivate_task(rq, prev, 1); -- cgit v1.2.3 From 9aefd0abd8610e8f3bb097debf3afb73f8b7b210 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Wed, 12 Mar 2008 18:31:58 +0100 Subject: sched: add exported arch_reinit_sched_domains() to header file. Needed so it can be called from outside of sched.c. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky Signed-off-by: Ingo Molnar --- kernel/sched.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/sched.c b/kernel/sched.c index 573179eb553..78482e51b58 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -6920,7 +6920,7 @@ match2: } #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) -static int arch_reinit_sched_domains(void) +int arch_reinit_sched_domains(void) { int err; -- cgit v1.2.3 From 22e52b072dd87faa9b2559fe89d4e8f2370f81ca Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Wed, 12 Mar 2008 18:31:59 +0100 Subject: sched: add arch_update_cpu_topology hook. Will be called each time the scheduling domains are rebuild. Needed for architectures that don't have a static cpu topology. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky Signed-off-by: Ingo Molnar --- kernel/sched.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'kernel') diff --git a/kernel/sched.c b/kernel/sched.c index 78482e51b58..28c73f07efb 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -6807,6 +6807,10 @@ static int ndoms_cur; /* number of sched domains in 'doms_cur' */ */ static cpumask_t fallback_doms; +void __attribute__((weak)) arch_update_cpu_topology(void) +{ +} + /* * Set up scheduler domains and groups. Callers must hold the hotplug lock. * For now this just excludes isolated cpus, but could be used to @@ -6816,6 +6820,7 @@ static int arch_init_sched_domains(const cpumask_t *cpu_map) { int err; + arch_update_cpu_topology(); ndoms_cur = 1; doms_cur = kmalloc(sizeof(cpumask_t), GFP_KERNEL); if (!doms_cur) -- cgit v1.2.3