diff options
author | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-12-18 08:11:01 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-12-18 08:11:01 -0800 |
commit | 51dad801e271f3754a728e5b9a2ef974576490cc (patch) | |
tree | 5296e5a91ededf39ba38de54f21de1e2479f8057 | |
parent | 3c615e19a4c518e349cc81a7f43223c7ec5f9e9a (diff) | |
parent | 6cbf1c126cf6a727287d61b122fde00a8b827bfe (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched
* git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched:
sched: do not hurt SCHED_BATCH on wakeup
sched: touch softlockup watchdog after idling
sched: sysctl, proc_dointvec_minmax() expects int values for
sched: mark rwsem functions as __sched for wchan/profiling
sched: fix crash on ia64, introduce task_current()
-rw-r--r-- | kernel/rwsem.c | 5 | ||||
-rw-r--r-- | kernel/sched.c | 18 | ||||
-rw-r--r-- | kernel/sched_fair.c | 3 | ||||
-rw-r--r-- | kernel/sysctl.c | 8 | ||||
-rw-r--r-- | lib/rwsem.c | 2 |
5 files changed, 21 insertions, 15 deletions
diff --git a/kernel/rwsem.c b/kernel/rwsem.c index 1ec620c0306..cae050b05f5 100644 --- a/kernel/rwsem.c +++ b/kernel/rwsem.c @@ -6,6 +6,7 @@ #include <linux/types.h> #include <linux/kernel.h> +#include <linux/sched.h> #include <linux/module.h> #include <linux/rwsem.h> @@ -15,7 +16,7 @@ /* * lock for reading */ -void down_read(struct rw_semaphore *sem) +void __sched down_read(struct rw_semaphore *sem) { might_sleep(); rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_); @@ -42,7 +43,7 @@ EXPORT_SYMBOL(down_read_trylock); /* * lock for writing */ -void down_write(struct rw_semaphore *sem) +void __sched down_write(struct rw_semaphore *sem) { might_sleep(); rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_); diff --git a/kernel/sched.c b/kernel/sched.c index c6e551de795..3df84ea6aba 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -508,10 +508,15 @@ EXPORT_SYMBOL_GPL(cpu_clock); # define finish_arch_switch(prev) do { } while (0) #endif +static inline int task_current(struct rq *rq, struct task_struct *p) +{ + return rq->curr == p; +} + #ifndef __ARCH_WANT_UNLOCKED_CTXSW static inline int task_running(struct rq *rq, struct task_struct *p) { - return rq->curr == p; + return task_current(rq, p); } static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next) @@ -540,7 +545,7 @@ static inline int task_running(struct rq *rq, struct task_struct *p) #ifdef CONFIG_SMP return p->oncpu; #else - return rq->curr == p; + return task_current(rq, p); #endif } @@ -663,6 +668,7 @@ void sched_clock_idle_wakeup_event(u64 delta_ns) struct rq *rq = cpu_rq(smp_processor_id()); u64 now = sched_clock(); + touch_softlockup_watchdog(); rq->idle_clock += delta_ns; /* * Override the previous timestamp and ignore all @@ -3334,7 +3340,7 @@ unsigned long long task_sched_runtime(struct task_struct *p) rq = task_rq_lock(p, &flags); ns = p->se.sum_exec_runtime; - if (rq->curr == p) { + if (task_current(rq, p)) { update_rq_clock(rq); delta_exec = rq->clock - p->se.exec_start; if ((s64)delta_exec > 0) @@ -4021,7 +4027,7 @@ void rt_mutex_setprio(struct task_struct *p, int prio) oldprio = p->prio; on_rq = p->se.on_rq; - running = task_running(rq, p); + running = task_current(rq, p); if (on_rq) { dequeue_task(rq, p, 0); if (running) @@ -4332,7 +4338,7 @@ recheck: } update_rq_clock(rq); on_rq = p->se.on_rq; - running = task_running(rq, p); + running = task_current(rq, p); if (on_rq) { deactivate_task(rq, p, 0); if (running) @@ -7101,7 +7107,7 @@ void sched_move_task(struct task_struct *tsk) update_rq_clock(rq); - running = task_running(rq, tsk); + running = task_current(rq, tsk); on_rq = tsk->se.on_rq; if (on_rq) { diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index c33f0ceb3de..da7c061e720 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -511,8 +511,7 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) if (!initial) { /* sleeps upto a single latency don't count. */ - if (sched_feat(NEW_FAIR_SLEEPERS) && entity_is_task(se) && - task_of(se)->policy != SCHED_BATCH) + if (sched_feat(NEW_FAIR_SLEEPERS) && entity_is_task(se)) vruntime -= sysctl_sched_latency; /* ensure we never gain time by being placed backwards. */ diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 1135de73087..c68f68dcc60 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -225,10 +225,10 @@ static struct ctl_table root_table[] = { }; #ifdef CONFIG_SCHED_DEBUG -static unsigned long min_sched_granularity_ns = 100000; /* 100 usecs */ -static unsigned long max_sched_granularity_ns = NSEC_PER_SEC; /* 1 second */ -static unsigned long min_wakeup_granularity_ns; /* 0 usecs */ -static unsigned long max_wakeup_granularity_ns = NSEC_PER_SEC; /* 1 second */ +static int min_sched_granularity_ns = 100000; /* 100 usecs */ +static int max_sched_granularity_ns = NSEC_PER_SEC; /* 1 second */ +static int min_wakeup_granularity_ns; /* 0 usecs */ +static int max_wakeup_granularity_ns = NSEC_PER_SEC; /* 1 second */ #endif static struct ctl_table kern_table[] = { diff --git a/lib/rwsem.c b/lib/rwsem.c index cdb4e3d0560..7d02700a4b0 100644 --- a/lib/rwsem.c +++ b/lib/rwsem.c @@ -146,7 +146,7 @@ __rwsem_do_wake(struct rw_semaphore *sem, int downgrading) /* * wait for a lock to be granted */ -static struct rw_semaphore * +static struct rw_semaphore __sched * rwsem_down_failed_common(struct rw_semaphore *sem, struct rwsem_waiter *waiter, signed long adjustment) { |