aboutsummaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorJohn W. Linville <linville@tuxdriver.com>2006-09-11 14:08:41 -0400
committerJohn W. Linville <linville@tuxdriver.com>2006-09-11 14:08:41 -0400
commit623b3e1d645e42030897d18d37db10133d763006 (patch)
tree090ff481cd72b47ff87d50615461712a33ba41e6 /kernel
parentc576af479162c0a11d4e2691ebc97354958d9285 (diff)
parent38f5745c5a90641079fd5b48600ae63f7ab6edcd (diff)
Merge branch 'from-linus' into upstream
Diffstat (limited to 'kernel')
-rw-r--r--kernel/delayacct.c16
-rw-r--r--kernel/exit.c3
-rw-r--r--kernel/fork.c6
-rw-r--r--kernel/futex.c84
-rw-r--r--kernel/irq/handle.c5
-rw-r--r--kernel/panic.c2
-rw-r--r--kernel/power/Kconfig6
-rw-r--r--kernel/spinlock.c2
8 files changed, 28 insertions, 96 deletions
diff --git a/kernel/delayacct.c b/kernel/delayacct.c
index 57ca3730205..36752f124c6 100644
--- a/kernel/delayacct.c
+++ b/kernel/delayacct.c
@@ -41,24 +41,11 @@ void delayacct_init(void)
void __delayacct_tsk_init(struct task_struct *tsk)
{
- spin_lock_init(&tsk->delays_lock);
- /* No need to acquire tsk->delays_lock for allocation here unless
- __delayacct_tsk_init called after tsk is attached to tasklist
- */
tsk->delays = kmem_cache_zalloc(delayacct_cache, SLAB_KERNEL);
if (tsk->delays)
spin_lock_init(&tsk->delays->lock);
}
-void __delayacct_tsk_exit(struct task_struct *tsk)
-{
- struct task_delay_info *delays = tsk->delays;
- spin_lock(&tsk->delays_lock);
- tsk->delays = NULL;
- spin_unlock(&tsk->delays_lock);
- kmem_cache_free(delayacct_cache, delays);
-}
-
/*
* Start accounting for a delay statistic using
* its starting timestamp (@start)
@@ -118,8 +105,6 @@ int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk)
struct timespec ts;
unsigned long t1,t2,t3;
- spin_lock(&tsk->delays_lock);
-
/* Though tsk->delays accessed later, early exit avoids
* unnecessary returning of other data
*/
@@ -161,7 +146,6 @@ int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk)
spin_unlock(&tsk->delays->lock);
done:
- spin_unlock(&tsk->delays_lock);
return 0;
}
diff --git a/kernel/exit.c b/kernel/exit.c
index dba194a8d41..d891883420f 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -908,7 +908,6 @@ fastcall NORET_TYPE void do_exit(long code)
audit_free(tsk);
taskstats_exit_send(tsk, tidstats, group_dead, mycpu);
taskstats_exit_free(tidstats);
- delayacct_tsk_exit(tsk);
exit_mm(tsk);
@@ -1054,7 +1053,7 @@ static int eligible_child(pid_t pid, int options, struct task_struct *p)
* Do not consider thread group leaders that are
* in a non-empty thread group:
*/
- if (current->tgid != p->tgid && delay_group_leader(p))
+ if (delay_group_leader(p))
return 2;
if (security_task_wait(p))
diff --git a/kernel/fork.c b/kernel/fork.c
index aa36c43783c..f9b014e3e70 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -117,6 +117,7 @@ void __put_task_struct(struct task_struct *tsk)
security_task_free(tsk);
free_uid(tsk->user);
put_group_info(tsk->group_info);
+ delayacct_tsk_free(tsk);
if (!profile_handoff_task(tsk))
free_task(tsk);
@@ -1011,7 +1012,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
retval = -EFAULT;
if (clone_flags & CLONE_PARENT_SETTID)
if (put_user(p->pid, parent_tidptr))
- goto bad_fork_cleanup;
+ goto bad_fork_cleanup_delays_binfmt;
INIT_LIST_HEAD(&p->children);
INIT_LIST_HEAD(&p->sibling);
@@ -1277,7 +1278,8 @@ bad_fork_cleanup_policy:
bad_fork_cleanup_cpuset:
#endif
cpuset_exit(p);
-bad_fork_cleanup:
+bad_fork_cleanup_delays_binfmt:
+ delayacct_tsk_free(p);
if (p->binfmt)
module_put(p->binfmt->module);
bad_fork_cleanup_put_domain:
diff --git a/kernel/futex.c b/kernel/futex.c
index b9b8aea5389..9d260e838cf 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -1120,9 +1120,10 @@ static int futex_wait(u32 __user *uaddr, u32 val, unsigned long time)
* if there are waiters then it will block, it does PI, etc. (Due to
* races the kernel might see a 0 value of the futex too.)
*/
-static int do_futex_lock_pi(u32 __user *uaddr, int detect, int trylock,
- struct hrtimer_sleeper *to)
+static int futex_lock_pi(u32 __user *uaddr, int detect, unsigned long sec,
+ long nsec, int trylock)
{
+ struct hrtimer_sleeper timeout, *to = NULL;
struct task_struct *curr = current;
struct futex_hash_bucket *hb;
u32 uval, newval, curval;
@@ -1132,6 +1133,13 @@ static int do_futex_lock_pi(u32 __user *uaddr, int detect, int trylock,
if (refill_pi_state_cache())
return -ENOMEM;
+ if (sec != MAX_SCHEDULE_TIMEOUT) {
+ to = &timeout;
+ hrtimer_init(&to->timer, CLOCK_REALTIME, HRTIMER_ABS);
+ hrtimer_init_sleeper(to, current);
+ to->timer.expires = ktime_set(sec, nsec);
+ }
+
q.pi_state = NULL;
retry:
down_read(&curr->mm->mmap_sem);
@@ -1307,7 +1315,7 @@ static int do_futex_lock_pi(u32 __user *uaddr, int detect, int trylock,
if (!detect && ret == -EDEADLK && 0)
force_sig(SIGKILL, current);
- return ret;
+ return ret != -EINTR ? ret : -ERESTARTNOINTR;
out_unlock_release_sem:
queue_unlock(&q, hb);
@@ -1342,76 +1350,6 @@ static int do_futex_lock_pi(u32 __user *uaddr, int detect, int trylock,
}
/*
- * Restart handler
- */
-static long futex_lock_pi_restart(struct restart_block *restart)
-{
- struct hrtimer_sleeper timeout, *to = NULL;
- int ret;
-
- restart->fn = do_no_restart_syscall;
-
- if (restart->arg2 || restart->arg3) {
- to = &timeout;
- hrtimer_init(&to->timer, CLOCK_REALTIME, HRTIMER_ABS);
- hrtimer_init_sleeper(to, current);
- to->timer.expires.tv64 = ((u64)restart->arg1 << 32) |
- (u64) restart->arg0;
- }
-
- pr_debug("lock_pi restart: %p, %d (%d)\n",
- (u32 __user *)restart->arg0, current->pid);
-
- ret = do_futex_lock_pi((u32 __user *)restart->arg0, restart->arg1,
- 0, to);
-
- if (ret != -EINTR)
- return ret;
-
- restart->fn = futex_lock_pi_restart;
-
- /* The other values are filled in */
- return -ERESTART_RESTARTBLOCK;
-}
-
-/*
- * Called from the syscall entry below.
- */
-static int futex_lock_pi(u32 __user *uaddr, int detect, unsigned long sec,
- long nsec, int trylock)
-{
- struct hrtimer_sleeper timeout, *to = NULL;
- struct restart_block *restart;
- int ret;
-
- if (sec != MAX_SCHEDULE_TIMEOUT) {
- to = &timeout;
- hrtimer_init(&to->timer, CLOCK_REALTIME, HRTIMER_ABS);
- hrtimer_init_sleeper(to, current);
- to->timer.expires = ktime_set(sec, nsec);
- }
-
- ret = do_futex_lock_pi(uaddr, detect, trylock, to);
-
- if (ret != -EINTR)
- return ret;
-
- pr_debug("lock_pi interrupted: %p, %d (%d)\n", uaddr, current->pid);
-
- restart = &current_thread_info()->restart_block;
- restart->fn = futex_lock_pi_restart;
- restart->arg0 = (unsigned long) uaddr;
- restart->arg1 = detect;
- if (to) {
- restart->arg2 = to->timer.expires.tv64 & 0xFFFFFFFF;
- restart->arg3 = to->timer.expires.tv64 >> 32;
- } else
- restart->arg2 = restart->arg3 = 0;
-
- return -ERESTART_RESTARTBLOCK;
-}
-
-/*
* Userspace attempted a TID -> 0 atomic transition, and failed.
* This is the in-kernel slowpath: we look up the PI state (if any),
* and do the rt-mutex unlock.
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index fc4e906aedb..48a53f68af9 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -20,6 +20,11 @@
/**
* handle_bad_irq - handle spurious and unhandled irqs
+ * @irq: the interrupt number
+ * @desc: description of the interrupt
+ * @regs: pointer to a register structure
+ *
+ * Handles spurious and unhandled IRQ's. It also prints a debugmessage.
*/
void fastcall
handle_bad_irq(unsigned int irq, struct irq_desc *desc, struct pt_regs *regs)
diff --git a/kernel/panic.c b/kernel/panic.c
index 9b8dcfd1ca9..8010b9b17ac 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -173,7 +173,7 @@ const char *print_tainted(void)
void add_taint(unsigned flag)
{
- debug_locks_off(); /* can't trust the integrity of the kernel anymore */
+ debug_locks = 0; /* can't trust the integrity of the kernel anymore */
tainted |= flag;
}
EXPORT_SYMBOL(add_taint);
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index ae44a70aae8..619ecabf7c5 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -56,7 +56,7 @@ config PM_TRACE
config SOFTWARE_SUSPEND
bool "Software Suspend"
- depends on PM && SWAP && (X86 && (!SMP || SUSPEND_SMP)) || ((FRV || PPC32) && !SMP)
+ depends on PM && SWAP && ((X86 && (!SMP || SUSPEND_SMP) && !X86_PAE) || ((FRV || PPC32) && !SMP))
---help---
Enable the possibility of suspending the machine.
It doesn't need ACPI or APM.
@@ -78,6 +78,10 @@ config SOFTWARE_SUSPEND
For more information take a look at <file:Documentation/power/swsusp.txt>.
+ (For now, swsusp is incompatible with PAE aka HIGHMEM_64G on i386.
+ we need identity mapping for resume to work, and that is trivial
+ to get with 4MB pages, but less than trivial on PAE).
+
config PM_STD_PARTITION
string "Default resume partition"
depends on SOFTWARE_SUSPEND
diff --git a/kernel/spinlock.c b/kernel/spinlock.c
index bfd6ad9c033..fb524b009ee 100644
--- a/kernel/spinlock.c
+++ b/kernel/spinlock.c
@@ -72,7 +72,7 @@ EXPORT_SYMBOL(_write_trylock);
* not re-enabled during lock-acquire (which the preempt-spin-ops do):
*/
#if !defined(CONFIG_PREEMPT) || !defined(CONFIG_SMP) || \
- defined(CONFIG_PROVE_LOCKING)
+ defined(CONFIG_DEBUG_LOCK_ALLOC)
void __lockfunc _read_lock(rwlock_t *lock)
{