diff options
author | Ingo Molnar <mingo@elte.hu> | 2007-08-02 17:41:40 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2007-08-02 17:41:40 +0200 |
commit | cad60d93e18ba52b6f069b2edb031c89bf603b07 (patch) | |
tree | dfe74c165e7607c233d223614ef400163c6ba44c | |
parent | 4e6f96f313561d86d248edf0eaff2336d8217e1b (diff) |
[PATCH] sched: ->task_new cleanup
make sched_class.task_new == NULL a 'default method', this
allows the removal of task_rt_new.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r-- | include/linux/sched.h | 2 | ||||
-rw-r--r-- | kernel/sched.c | 11 | ||||
-rw-r--r-- | kernel/sched_fair.c | 4 | ||||
-rw-r--r-- | kernel/sched_rt.c | 10 |
4 files changed, 10 insertions, 17 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index 81eec7e36c8..c9e0c2a6a95 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -874,7 +874,7 @@ struct sched_class { void (*set_curr_task) (struct rq *rq); void (*task_tick) (struct rq *rq, struct task_struct *p); - void (*task_new) (struct rq *rq, struct task_struct *p); + void (*task_new) (struct rq *rq, struct task_struct *p, u64 now); }; struct load_weight { diff --git a/kernel/sched.c b/kernel/sched.c index 7bed2c58b98..915c75e5a27 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -1641,22 +1641,27 @@ void fastcall wake_up_new_task(struct task_struct *p, unsigned long clone_flags) unsigned long flags; struct rq *rq; int this_cpu; + u64 now; rq = task_rq_lock(p, &flags); BUG_ON(p->state != TASK_RUNNING); this_cpu = smp_processor_id(); /* parent's CPU */ + now = rq_clock(rq); p->prio = effective_prio(p); - if (!sysctl_sched_child_runs_first || (clone_flags & CLONE_VM) || - task_cpu(p) != this_cpu || !current->se.on_rq) { + if (!p->sched_class->task_new || !sysctl_sched_child_runs_first || + (clone_flags & CLONE_VM) || task_cpu(p) != this_cpu || + !current->se.on_rq) { + activate_task(rq, p, 0); } else { /* * Let the scheduling class do new task startup * management (if any): */ - p->sched_class->task_new(rq, p); + p->sched_class->task_new(rq, p, now); + inc_nr_running(p, rq, now); } check_preempt_curr(rq, p); task_rq_unlock(rq, &flags); diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 6971db0a716..243da6cae71 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -1041,11 +1041,10 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr) * monopolize the CPU. Note: the parent runqueue is locked, * the child is not running yet. */ -static void task_new_fair(struct rq *rq, struct task_struct *p) +static void task_new_fair(struct rq *rq, struct task_struct *p, u64 now) { struct cfs_rq *cfs_rq = task_cfs_rq(p); struct sched_entity *se = &p->se; - u64 now = rq_clock(rq); sched_info_queued(p); @@ -1072,7 +1071,6 @@ static void task_new_fair(struct rq *rq, struct task_struct *p) p->se.wait_runtime = -(sysctl_sched_granularity / 2); __enqueue_entity(cfs_rq, se); - inc_nr_running(p, rq, now); } #ifdef CONFIG_FAIR_GROUP_SCHED diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 1192a2741b9..ade20dc422f 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c @@ -229,15 +229,6 @@ static void task_tick_rt(struct rq *rq, struct task_struct *p) requeue_task_rt(rq, p); } -/* - * No parent/child timeslice management necessary for RT tasks, - * just activate them: - */ -static void task_new_rt(struct rq *rq, struct task_struct *p) -{ - activate_task(rq, p, 1); -} - static struct sched_class rt_sched_class __read_mostly = { .enqueue_task = enqueue_task_rt, .dequeue_task = dequeue_task_rt, @@ -251,5 +242,4 @@ static struct sched_class rt_sched_class __read_mostly = { .load_balance = load_balance_rt, .task_tick = task_tick_rt, - .task_new = task_new_rt, }; |