aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arch/mips/kernel/irixsig.c4
-rw-r--r--arch/um/kernel/smp.c1
-rw-r--r--drivers/char/tty_io.c20
-rw-r--r--fs/exec.c30
-rw-r--r--include/linux/init_task.h2
-rw-r--r--include/linux/pid.h2
-rw-r--r--include/linux/sched.h51
-rw-r--r--include/linux/signal.h2
-rw-r--r--include/linux/slab.h1
-rw-r--r--kernel/exit.c129
-rw-r--r--kernel/fork.c121
-rw-r--r--kernel/kmod.c2
-rw-r--r--kernel/pid.c40
-rw-r--r--kernel/ptrace.c8
-rw-r--r--kernel/signal.c344
-rw-r--r--kernel/sys.c73
16 files changed, 309 insertions, 521 deletions
diff --git a/arch/mips/kernel/irixsig.c b/arch/mips/kernel/irixsig.c
index 08273a2a501..8150f071f80 100644
--- a/arch/mips/kernel/irixsig.c
+++ b/arch/mips/kernel/irixsig.c
@@ -603,7 +603,7 @@ repeat:
/* move to end of parent's list to avoid starvation */
write_lock_irq(&tasklist_lock);
remove_parent(p);
- add_parent(p, p->parent);
+ add_parent(p);
write_unlock_irq(&tasklist_lock);
retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0;
if (retval)
@@ -643,7 +643,7 @@ repeat:
write_lock_irq(&tasklist_lock);
remove_parent(p);
p->parent = p->real_parent;
- add_parent(p, p->parent);
+ add_parent(p);
do_notify_parent(p, SIGCHLD);
write_unlock_irq(&tasklist_lock);
} else
diff --git a/arch/um/kernel/smp.c b/arch/um/kernel/smp.c
index c8d8d0ac1a7..511116aebaf 100644
--- a/arch/um/kernel/smp.c
+++ b/arch/um/kernel/smp.c
@@ -143,7 +143,6 @@ void smp_prepare_cpus(unsigned int maxcpus)
idle = idle_thread(cpu);
init_idle(idle, cpu);
- unhash_process(idle);
waittime = 200000000;
while (waittime-- && !cpu_isset(cpu, cpu_callin_map))
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
index 811dadb9ce3..0bfd1b63662 100644
--- a/drivers/char/tty_io.c
+++ b/drivers/char/tty_io.c
@@ -1094,8 +1094,8 @@ static void do_tty_hangup(void *data)
p->signal->tty = NULL;
if (!p->signal->leader)
continue;
- send_group_sig_info(SIGHUP, SEND_SIG_PRIV, p);
- send_group_sig_info(SIGCONT, SEND_SIG_PRIV, p);
+ group_send_sig_info(SIGHUP, SEND_SIG_PRIV, p);
+ group_send_sig_info(SIGCONT, SEND_SIG_PRIV, p);
if (tty->pgrp > 0)
p->signal->tty_old_pgrp = tty->pgrp;
} while_each_task_pid(tty->session, PIDTYPE_SID, p);
@@ -2672,7 +2672,7 @@ static void __do_SAK(void *arg)
tty_hangup(tty);
#else
struct tty_struct *tty = arg;
- struct task_struct *p;
+ struct task_struct *g, *p;
int session;
int i;
struct file *filp;
@@ -2693,8 +2693,18 @@ static void __do_SAK(void *arg)
tty->driver->flush_buffer(tty);
read_lock(&tasklist_lock);
+ /* Kill the entire session */
do_each_task_pid(session, PIDTYPE_SID, p) {
- if (p->signal->tty == tty || session > 0) {
+ printk(KERN_NOTICE "SAK: killed process %d"
+ " (%s): p->signal->session==tty->session\n",
+ p->pid, p->comm);
+ send_sig(SIGKILL, p, 1);
+ } while_each_task_pid(session, PIDTYPE_SID, p);
+ /* Now kill any processes that happen to have the
+ * tty open.
+ */
+ do_each_thread(g, p) {
+ if (p->signal->tty == tty) {
printk(KERN_NOTICE "SAK: killed process %d"
" (%s): p->signal->session==tty->session\n",
p->pid, p->comm);
@@ -2721,7 +2731,7 @@ static void __do_SAK(void *arg)
rcu_read_unlock();
}
task_unlock(p);
- } while_each_task_pid(session, PIDTYPE_SID, p);
+ } while_each_thread(g, p);
read_unlock(&tasklist_lock);
#endif
}
diff --git a/fs/exec.c b/fs/exec.c
index c7397c46ad6..950ebd43cdc 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -616,6 +616,15 @@ static int de_thread(struct task_struct *tsk)
kmem_cache_free(sighand_cachep, newsighand);
return -EAGAIN;
}
+
+ /*
+ * child_reaper ignores SIGKILL, change it now.
+ * Reparenting needs write_lock on tasklist_lock,
+ * so it is safe to do it under read_lock.
+ */
+ if (unlikely(current->group_leader == child_reaper))
+ child_reaper = current;
+
zap_other_threads(current);
read_unlock(&tasklist_lock);
@@ -699,22 +708,30 @@ static int de_thread(struct task_struct *tsk)
remove_parent(current);
remove_parent(leader);
- switch_exec_pids(leader, current);
+
+ /* Become a process group leader with the old leader's pid.
+ * Note: The old leader also uses thispid until release_task
+ * is called. Odd but simple and correct.
+ */
+ detach_pid(current, PIDTYPE_PID);
+ current->pid = leader->pid;
+ attach_pid(current, PIDTYPE_PID, current->pid);
+ attach_pid(current, PIDTYPE_PGID, current->signal->pgrp);
+ attach_pid(current, PIDTYPE_SID, current->signal->session);
+ list_add_tail(&current->tasks, &init_task.tasks);
current->parent = current->real_parent = leader->real_parent;
leader->parent = leader->real_parent = child_reaper;
current->group_leader = current;
leader->group_leader = leader;
- add_parent(current, current->parent);
- add_parent(leader, leader->parent);
+ add_parent(current);
+ add_parent(leader);
if (ptrace) {
current->ptrace = ptrace;
__ptrace_link(current, parent);
}
- list_del(&current->tasks);
- list_add_tail(&current->tasks, &init_task.tasks);
current->exit_signal = SIGCHLD;
BUG_ON(leader->exit_state != EXIT_ZOMBIE);
@@ -751,7 +768,6 @@ no_thread_group:
/*
* Move our state over to newsighand and switch it in.
*/
- spin_lock_init(&newsighand->siglock);
atomic_set(&newsighand->count, 1);
memcpy(newsighand->action, oldsighand->action,
sizeof(newsighand->action));
@@ -768,7 +784,7 @@ no_thread_group:
write_unlock_irq(&tasklist_lock);
if (atomic_dec_and_test(&oldsighand->count))
- sighand_free(oldsighand);
+ kmem_cache_free(sighand_cachep, oldsighand);
}
BUG_ON(!thread_group_leader(current));
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 92146f3b742..41ecbb847f3 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -62,6 +62,8 @@
.posix_timers = LIST_HEAD_INIT(sig.posix_timers), \
.cpu_timers = INIT_CPU_TIMERS(sig.cpu_timers), \
.rlim = INIT_RLIMITS, \
+ .pgrp = 1, \
+ .session = 1, \
}
#define INIT_SIGHAND(sighand) { \
diff --git a/include/linux/pid.h b/include/linux/pid.h
index 5b2fcb19d2d..5b9082cc600 100644
--- a/include/linux/pid.h
+++ b/include/linux/pid.h
@@ -4,7 +4,6 @@
enum pid_type
{
PIDTYPE_PID,
- PIDTYPE_TGID,
PIDTYPE_PGID,
PIDTYPE_SID,
PIDTYPE_MAX
@@ -38,7 +37,6 @@ extern struct pid *FASTCALL(find_pid(enum pid_type, int));
extern int alloc_pidmap(void);
extern void FASTCALL(free_pidmap(int));
-extern void switch_exec_pids(struct task_struct *leader, struct task_struct *thread);
#define do_each_task_pid(who, type, task) \
if ((task = find_task_by_pid_type(type, who))) { \
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 20b4f0372e4..d04186d8cc6 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -355,16 +355,8 @@ struct sighand_struct {
atomic_t count;
struct k_sigaction action[_NSIG];
spinlock_t siglock;
- struct rcu_head rcu;
};
-extern void sighand_free_cb(struct rcu_head *rhp);
-
-static inline void sighand_free(struct sighand_struct *sp)
-{
- call_rcu(&sp->rcu, sighand_free_cb);
-}
-
/*
* NOTE! "signal_struct" does not have it's own
* locking, because a shared signal_struct always
@@ -760,6 +752,7 @@ struct task_struct {
/* PID/PID hash table linkage. */
struct pid pids[PIDTYPE_MAX];
+ struct list_head thread_group;
struct completion *vfork_done; /* for vfork() */
int __user *set_child_tid; /* CLONE_CHILD_SETTID */
@@ -1101,7 +1094,6 @@ extern void force_sig_specific(int, struct task_struct *);
extern int send_sig(int, struct task_struct *, int);
extern void zap_other_threads(struct task_struct *p);
extern int kill_pg(pid_t, int, int);
-extern int kill_sl(pid_t, int, int);
extern int kill_proc(pid_t, int, int);
extern struct sigqueue *sigqueue_alloc(void);
extern void sigqueue_free(struct sigqueue *);
@@ -1158,10 +1150,8 @@ extern void flush_thread(void);
extern void exit_thread(void);
extern void exit_files(struct task_struct *);
-extern void exit_signal(struct task_struct *);
-extern void __exit_signal(struct task_struct *);
-extern void exit_sighand(struct task_struct *);
-extern void __exit_sighand(struct task_struct *);
+extern void __cleanup_signal(struct signal_struct *);
+extern void __cleanup_sighand(struct sighand_struct *);
extern void exit_itimers(struct signal_struct *);
extern NORET_TYPE void do_group_exit(int);
@@ -1185,19 +1175,7 @@ extern void wait_task_inactive(task_t * p);
#endif
#define remove_parent(p) list_del_init(&(p)->sibling)
-#define add_parent(p, parent) list_add_tail(&(p)->sibling,&(parent)->children)
-
-#define REMOVE_LINKS(p) do { \
- if (thread_group_leader(p)) \
- list_del_init(&(p)->tasks); \
- remove_parent(p); \
- } while (0)
-
-#define SET_LINKS(p) do { \
- if (thread_group_leader(p)) \
- list_add_tail(&(p)->tasks,&init_task.tasks); \
- add_parent(p, (p)->parent); \
- } while (0)
+#define add_parent(p) list_add_tail(&(p)->sibling,&(p)->parent->children)
#define next_task(p) list_entry((p)->tasks.next, struct task_struct, tasks)
#define prev_task(p) list_entry((p)->tasks.prev, struct task_struct, tasks)
@@ -1215,20 +1193,22 @@ extern void wait_task_inactive(task_t * p);
#define while_each_thread(g, t) \
while ((t = next_thread(t)) != g)
-extern task_t * FASTCALL(next_thread(const task_t *p));
-
#define thread_group_leader(p) (p->pid == p->tgid)
+static inline task_t *next_thread(task_t *p)
+{
+ return list_entry(rcu_dereference(p->thread_group.next),
+ task_t, thread_group);
+}
+
static inline int thread_group_empty(task_t *p)
{
- return list_empty(&p->pids[PIDTYPE_TGID].pid_list);
+ return list_empty(&p->thread_group);
}
#define delay_group_leader(p) \
(thread_group_leader(p) && !thread_group_empty(p))
-extern void unhash_process(struct task_struct *p);
-
/*
* Protects ->fs, ->files, ->mm, ->ptrace, ->group_info, ->comm, keyring
* subscriptions and synchronises with wait4(). Also used in procfs. Also
@@ -1248,6 +1228,15 @@ static inline void task_unlock(struct task_struct *p)
spin_unlock(&p->alloc_lock);
}
+extern struct sighand_struct *lock_task_sighand(struct task_struct *tsk,
+ unsigned long *flags);
+
+static inline void unlock_task_sighand(struct task_struct *tsk,
+ unsigned long *flags)
+{
+ spin_unlock_irqrestore(&tsk->sighand->siglock, *flags);
+}
+
#ifndef __HAVE_THREAD_FUNCTIONS
#define task_thread_info(task) (task)->thread_info
diff --git a/include/linux/signal.h b/include/linux/signal.h
index b7d093520bb..162a8fd10b2 100644
--- a/include/linux/signal.h
+++ b/include/linux/signal.h
@@ -249,6 +249,8 @@ static inline void init_sigpending(struct sigpending *sig)
INIT_LIST_HEAD(&sig->list);
}
+extern void flush_sigqueue(struct sigpending *queue);
+
/* Test if 'sig' is valid signal. Use this instead of testing _NSIG directly */
static inline int valid_signal(unsigned long sig)
{
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 15e1d9736b1..3af03b19c98 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -210,7 +210,6 @@ extern kmem_cache_t *names_cachep;
extern kmem_cache_t *files_cachep;
extern kmem_cache_t *filp_cachep;
extern kmem_cache_t *fs_cachep;
-extern kmem_cache_t *signal_cachep;
extern kmem_cache_t *sighand_cachep;
extern kmem_cache_t *bio_cachep;
diff --git a/kernel/exit.c b/kernel/exit.c
index a8c7efc7a68..bc0ec674d3f 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -29,6 +29,7 @@
#include <linux/cpuset.h>
#include <linux/syscalls.h>
#include <linux/signal.h>
+#include <linux/posix-timers.h>
#include <linux/cn_proc.h>
#include <linux/mutex.h>
#include <linux/futex.h>
@@ -50,15 +51,80 @@ static void __unhash_process(struct task_struct *p)
{
nr_threads--;
detach_pid(p, PIDTYPE_PID);
- detach_pid(p, PIDTYPE_TGID);
if (thread_group_leader(p)) {
detach_pid(p, PIDTYPE_PGID);
detach_pid(p, PIDTYPE_SID);
- if (p->pid)
- __get_cpu_var(process_counts)--;
+
+ list_del_init(&p->tasks);
+ __get_cpu_var(process_counts)--;
+ }
+ list_del_rcu(&p->thread_group);
+ remove_parent(p);
+}
+
+/*
+ * This function expects the tasklist_lock write-locked.
+ */
+static void __exit_signal(struct task_struct *tsk)
+{
+ struct signal_struct *sig = tsk->signal;
+ struct sighand_struct *sighand;
+
+ BUG_ON(!sig);
+ BUG_ON(!atomic_read(&sig->count));
+
+ rcu_read_lock();
+ sighand = rcu_dereference(tsk->sighand);
+ spin_lock(&sighand->siglock);
+
+ posix_cpu_timers_exit(tsk);
+ if (atomic_dec_and_test(&sig->count))
+ posix_cpu_timers_exit_group(tsk);
+ else {
+ /*
+ * If there is any task waiting for the group exit
+ * then notify it:
+ */
+ if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count) {
+ wake_up_process(sig->group_exit_task);
+ sig->group_exit_task = NULL;
+ }
+ if (tsk == sig->curr_target)
+ sig->curr_target = next_thread(tsk);
+ /*
+ * Accumulate here the counters for all threads but the
+ * group leader as they die, so they can be added into
+ * the process-wide totals when those are taken.
+ * The group leader stays around as a zombie as long
+ * as there are other threads. When it gets reaped,
+ * the exit.c code will add its counts into these totals.
+ * We won't ever get here for the group leader, since it
+ * will have been the last reference on the signal_struct.
+ */
+ sig->utime = cputime_add(sig->utime, tsk->utime);
+ sig->stime = cputime_add(sig->stime, tsk->stime);
+ sig->min_flt += tsk->min_flt;
+ sig->maj_flt += tsk->maj_flt;
+ sig->nvcsw += tsk->nvcsw;
+ sig->nivcsw += tsk->nivcsw;
+ sig->sched_time += tsk->sched_time;
+ sig = NULL; /* Marker for below. */
}
- REMOVE_LINKS(p);
+ __unhash_process(tsk);
+
+ tsk->signal = NULL;
+ tsk->sighand = NULL;
+ spin_unlock(&sighand->siglock);
+ rcu_read_unlock();
+
+ __cleanup_sighand(sighand);
+ clear_tsk_thread_flag(tsk,TIF_SIGPENDING);
+ flush_sigqueue(&tsk->pending);
+ if (sig) {
+ flush_sigqueue(&sig->shared_pending);
+ __cleanup_signal(sig);
+ }
}
void release_task(struct task_struct * p)
@@ -67,21 +133,14 @@ void release_task(struct task_struct * p)
task_t *leader;
struct dentry *proc_dentry;
-repeat:
+repeat:
atomic_dec(&p->user->processes);
spin_lock(&p->proc_lock);
proc_dentry = proc_pid_unhash(p);
write_lock_irq(&tasklist_lock);
- if (unlikely(p->ptrace))
- __ptrace_unlink(p);
+ ptrace_unlink(p);
BUG_ON(!list_empty(&p->ptrace_list) || !list_empty(&p->ptrace_children));
__exit_signal(p);
- /*
- * Note that the fastpath in sys_times depends on __exit_signal having
- * updated the counters before a task is removed from the tasklist of
- * the process by __unhash_process.
- */
- __unhash_process(p);
/*
* If we are the last non-leader member of the thread
@@ -116,21 +175,6 @@ repeat:
goto repeat;
}
-/* we are using it only for SMP init */
-
-void unhash_process(struct task_struct *p)
-{
- struct dentry *proc_dentry;
-
- spin_lock(&p->proc_lock);
- proc_dentry = proc_pid_unhash(p);
- write_lock_irq(&tasklist_lock);
- __unhash_process(p);
- write_unlock_irq(&tasklist_lock);
- spin_unlock(&p->proc_lock);
- proc_pid_flush(proc_dentry);
-}
-
/*
* This checks not only the pgrp, but falls back on the pid if no
* satisfactory pgrp is found. I dunno - gdb doesn't work correctly
@@ -238,10 +282,10 @@ static void reparent_to_init(void)
ptrace_unlink(current);
/* Reparent to init */
- REMOVE_LINKS(current);
+ remove_parent(current);
current->parent = child_reaper;
current->real_parent = child_reaper;
- SET_LINKS(current);
+ add_parent(current);
/* Set the exit signal to SIGCHLD so we signal init on exit */
current->exit_signal = SIGCHLD;
@@ -538,13 +582,13 @@ static void exit_mm(struct task_struct * tsk)
mmput(mm);
}
-static inline void choose_new_parent(task_t *p, task_t *reaper, task_t *child_reaper)
+static inline void choose_new_parent(task_t *p, task_t *reaper)
{
/*
* Make sure we're not reparenting to ourselves and that
* the parent is not a zombie.
*/
- BUG_ON(p == reaper || reaper->exit_state >= EXIT_ZOMBIE);
+ BUG_ON(p == reaper || reaper->exit_state);
p->real_parent = reaper;
}
@@ -569,9 +613,9 @@ static void reparent_thread(task_t *p, task_t *father, int traced)
* anyway, so let go of it.
*/
p->ptrace = 0;
- list_del_init(&p->sibling);
+ remove_parent(p);
p->parent = p->real_parent;
- list_add_tail(&p->sibling, &p->parent->children);
+ add_parent(p);
/* If we'd notified the old parent about this child's death,
* also notify the new parent.
@@ -645,7 +689,7 @@ static void forget_original_parent(struct task_struct * father,
if (father == p->real_parent) {
/* reparent with a reaper, real father it's us */
- choose_new_parent(p, reaper, child_reaper);
+ choose_new_parent(p, reaper);
reparent_thread(p, father, 0);
} else {
/* reparent ptraced task to its real parent */
@@ -666,7 +710,7 @@ static void forget_original_parent(struct task_struct * father,
}
list_for_each_safe(_p, _n, &father->ptrace_children) {
p = list_entry(_p,struct task_struct,ptrace_list);
- choose_new_parent(p, reaper, child_reaper);
+ choose_new_parent(p, reaper);
reparent_thread(p, father, 1);
}
}
@@ -807,7 +851,7 @@ fastcall NORET_TYPE void do_exit(long code)
panic("Aiee, killing interrupt handler!");
if (unlikely(!tsk->pid))
panic("Attempted to kill the idle task!");
- if (unlikely(tsk->pid == 1))
+ if (unlikely(tsk == child_reaper))
panic("Attempted to kill init!");
if (unlikely(current->ptrace & PT_TRACE_EXIT)) {
@@ -920,13 +964,6 @@ asmlinkage long sys_exit(int error_code)
do_exit((error_code&0xff)<<8);
}
-task_t fastcall *next_thread(const task_t *p)
-{
- return pid_task(p->pids[PIDTYPE_TGID].pid_list.next, PIDTYPE_TGID);
-}
-
-EXPORT_SYMBOL(next_thread);
-
/*
* Take down every thread in the group. This is called by fatal signals
* as well as by sys_exit_group (below).
@@ -941,7 +978,6 @@ do_group_exit(int exit_code)
else if (!thread_group_empty(current)) {
struct signal_struct *const sig = current->signal;
struct sighand_struct *const sighand = current->sighand;
- read_lock(&tasklist_lock);
spin_lock_irq(&sighand->siglock);
if (sig->flags & SIGNAL_GROUP_EXIT)
/* Another thread got here before we took the lock. */
@@ -951,7 +987,6 @@ do_group_exit(int exit_code)
zap_other_threads(current);
}
spin_unlock_irq(&sighand->siglock);
- read_unlock(&tasklist_lock);
}
do_exit(exit_code);
@@ -1281,7 +1316,7 @@ bail_ref:
/* move to end of parent's list to avoid starvation */
remove_parent(p);
- add_parent(p, p->parent);
+ add_parent(p);
write_unlock_irq(&tasklist_lock);
diff --git a/kernel/fork.c b/kernel/fork.c
index c49bd193b05..b3f7a1bb5e5 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -84,7 +84,7 @@ static kmem_cache_t *task_struct_cachep;
#endif
/* SLAB cache for signal_struct structures (tsk->signal) */
-kmem_cache_t *signal_cachep;
+static kmem_cache_t *signal_cachep;
/* SLAB cache for sighand_struct structures (tsk->sighand) */
kmem_cache_t *sighand_cachep;
@@ -786,14 +786,6 @@ int unshare_files(void)
EXPORT_SYMBOL(unshare_files);
-void sighand_free_cb(struct rcu_head *rhp)
-{
- struct sighand_struct *sp;
-
- sp = container_of(rhp, struct sighand_struct, rcu);
- kmem_cache_free(sighand_cachep, sp);
-}
-
static inline int copy_sighand(unsigned long clone_flags, struct task_struct * tsk)
{
struct sighand_struct *sig;
@@ -806,12 +798,17 @@ static inline int copy_sighand(unsigned long clone_flags, struct task_struct * t
rcu_assign_pointer(tsk->sighand, sig);
if (!sig)
return -ENOMEM;
- spin_lock_init(&sig->siglock);
atomic_set(&sig->count, 1);
memcpy(sig->action, current->sighand->action, sizeof(sig->action));
return 0;
}
+void __cleanup_sighand(struct sighand_struct *sighand)
+{
+ if (atomic_dec_and_test(&sighand->count))
+ kmem_cache_free(sighand_cachep, sighand);
+}
+
static inline int copy_signal(unsigned long clone_flags, struct task_struct * tsk)
{
struct signal_struct *sig;
@@ -881,6 +878,22 @@ static inline int copy_signal(unsigned long clone_flags, struct task_struct * ts
return 0;
}
+void __cleanup_signal(struct signal_struct *sig)
+{
+ exit_thread_group_keys(sig);
+ kmem_cache_free(signal_cachep, sig);
+}
+
+static inline void cleanup_signal(struct task_struct *tsk)
+{
+ struct signal_struct *sig = tsk->signal;
+
+ atomic_dec(&sig->live);
+
+ if (atomic_dec_and_test(&sig->count))
+ __cleanup_signal(sig);
+}
+
static inline void copy_flags(unsigned long clone_flags, struct task_struct *p)
{
unsigned long new_flags = p->flags;
@@ -1095,6 +1108,7 @@ static task_t *copy_process(unsigned long clone_flags,
* We dont wake it up yet.
*/
p->group_leader = p;
+ INIT_LIST_HEAD(&p->thread_group);
INIT_LIST_HEAD(&p->ptrace_children);
INIT_LIST_HEAD(&p->ptrace_list);
@@ -1118,16 +1132,6 @@ static task_t *copy_process(unsigned long clone_flags,
!cpu_online(task_cpu(p))))
set_task_cpu(p, smp_processor_id());
- /*
- * Check for pending SIGKILL! The new thread should not be allowed
- * to slip out of an OOM kill. (or normal SIGKILL.)
- */
- if (sigismember(&current->pending.signal, SIGKILL)) {
- write_unlock_irq(&tasklist_lock);
- retval = -EINTR;
- goto bad_fork_cleanup_namespace;
- }
-
/* CLONE_PARENT re-uses the old parent */
if (clone_flags & (CLONE_PARENT|CLONE_THREAD))
p->real_parent = current->real_parent;
@@ -1136,6 +1140,23 @@ static task_t *copy_process(unsigned long clone_flags,
p->parent = p->real_parent;
spin_lock(&current->sighand->siglock);
+
+ /*
+ * Process group and session signals need to be delivered to just the
+ * parent before the fork or both the parent and the child after the
+ * fork. Restart if a signal comes in before we add the new process to
+ * it's process group.
+ * A fatal signal pending means that current will exit, so the new
+ * thread can't slip out of an OOM kill (or normal SIGKILL).
+ */
+ recalc_sigpending();
+ if (signal_pending(current)) {
+ spin_unlock(&current->sighand->siglock);
+ write_unlock_irq(&tasklist_lock);
+ retval = -ERESTARTNOINTR;
+ goto bad_fork_cleanup_namespace;
+ }
+
if (clone_flags & CLONE_THREAD) {
/*
* Important: if an exit-all has been started then
@@ -1148,17 +1169,9 @@ static task_t *copy_process(unsigned long clone_flags,
retval = -EAGAIN;
goto bad_fork_cleanup_namespace;
}
- p->group_leader = current->group_leader;
- if (current->signal->group_stop_count > 0) {
- /*
- * There is an all-stop in progress for the group.
- * We ourselves will stop as soon as we check signals.
- * Make the new thread part of that group stop too.
- */
- current->signal->group_stop_count++;
- set_tsk_thread_flag(p, TIF_SIGPENDING);
- }
+ p->group_leader = current->group_leader;
+ list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group);
if (!cputime_eq(current->signal->it_virt_expires,
cputime_zero) ||
@@ -1181,23 +1194,25 @@ static task_t *copy_process(unsigned long clone_flags,
*/
p->ioprio = current->ioprio;
- SET_LINKS(p);
- if (unlikely(p->ptrace & PT_PTRACED))
- __ptrace_link(p, current->parent);
-
- if (thread_group_leader(p)) {
- p->signal->tty = current->signal->tty;
- p->signal->pgrp = process_group(current);
- p->signal->session = current->signal->session;
- attach_pid(p, PIDTYPE_PGID, process_group(p));
- attach_pid(p, PIDTYPE_SID, p->signal->session);
- if (p->pid)
+ if (likely(p->pid)) {
+ add_parent(p);
+ if (unlikely(p->ptrace & PT_PTRACED))
+ __ptrace_link(p, current->parent);
+
+ if (thread_group_leader(p)) {
+ p->signal->tty = current->signal->tty;
+ p->signal->pgrp = process_group(current);
+ p->signal->session = current->signal->session;
+ attach_pid(p, PIDTYPE_PGID, process_group(p));
+ attach_pid(p, PIDTYPE_SID, p->signal->session);
+
+ list_add_tail(&p->tasks, &init_task.tasks);
__get_cpu_var(process_counts)++;
+ }
+ attach_pid(p, PIDTYPE_PID, p->pid);
+ nr_threads++;
}
- attach_pid(p, PIDTYPE_TGID, p->tgid);
- attach_pid(p, PIDTYPE_PID, p->pid);
- nr_threads++;
total_forks++;
spin_unlock(&current->sighand->siglock);
write_unlock_irq(&tasklist_lock);
@@ -1212,9 +1227,9 @@ bad_fork_cleanup_mm:
if (p->mm)
mmput(p->mm);
bad_fork_cleanup_signal:
- exit_signal(p);
+ cleanup_signal(p);
bad_fork_cleanup_sighand:
- exit_sighand(p);
+ __cleanup_sighand(p->sighand);
bad_fork_cleanup_fs:
exit_fs(p); /* blocking */
bad_fork_cleanup_files:
@@ -1261,7 +1276,7 @@ task_t * __devinit fork_idle(int cpu)
if (!task)
return ERR_PTR(-ENOMEM);
init_idle(task, cpu);
- unhash_process(task);
+
return task;
}
@@ -1353,11 +1368,21 @@ long do_fork(unsigned long clone_flags,
#define ARCH_MIN_MMSTRUCT_ALIGN 0
#endif
+static void sighand_ctor(void *data, kmem_cache_t *cachep, unsigned long flags)
+{
+ struct sighand_struct *sighand = data;
+
+ if ((flags & (SLAB_CTOR_VERIFY | SLAB_CTOR_CONSTRUCTOR)) ==
+ SLAB_CTOR_CONSTRUCTOR)
+ spin_lock_init(&sighand->siglock);
+}
+
void __init proc_caches_init(void)
{
sighand_cachep = kmem_cache_create("sighand_cache",
sizeof(struct sighand_struct), 0,
- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_DESTROY_BY_RCU,
+ sighand_ctor, NULL);
signal_cachep = kmem_cache_create("signal_cache",
sizeof(struct signal_struct), 0,
SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
diff --git a/kernel/kmod.c b/kernel/kmod.c
index 51a892063aa..20a997c73c3 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -170,7 +170,7 @@ static int wait_for_helper(void *data)
sa.sa.sa_handler = SIG_IGN;
sa.sa.sa_flags = 0;
siginitset(&sa.sa.sa_mask, sigmask(SIGCHLD));
- do_sigaction(SIGCHLD, &sa, (struct k_sigaction *)0);
+ do_sigaction(SIGCHLD, &sa, NULL);
allow_signal(SIGCHLD);
pid = kernel_thread(____call_usermodehelper, sub_info, SIGCHLD);
diff --git a/kernel/pid.c b/kernel/pid.c
index 1acc0724699..a9f2dfd006d 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -218,36 +218,6 @@ task_t *find_task_by_pid_type(int type, int nr)
EXPORT_SYMBOL(find_task_by_pid_type);
/*
- * This function switches the PIDs if a non-leader thread calls
- * sys_execve() - this must be done without releasing the PID.
- * (which a detach_pid() would eventually do.)
- */
-void switch_exec_pids(task_t *leader, task_t *thread)
-{
- __detach_pid(leader, PIDTYPE_PID);
- __detach_pid(leader, PIDTYPE_TGID);
- __detach_pid(leader, PIDTYPE_PGID);
- __detach_pid(leader, PIDTYPE_SID);
-
- __detach_pid(thread, PIDTYPE_PID);
- __detach_pid(thread, PIDTYPE_TGID);
-
- leader->pid = leader->tgid = thread->pid;
- thread->pid = thread->tgid;
-
- attach_pid(thread, PIDTYPE_PID, thread->pid);
- attach_pid(thread, PIDTYPE_TGID, thread->tgid);
- attach_pid(thread, PIDTYPE_PGID, thread->signal->pgrp);
- attach_pid(thread, PIDTYPE_SID, thread->signal->session);
- list_add_tail(&thread->tasks, &init_task.tasks);
-
- attach_pid(leader, PIDTYPE_PID, leader->pid);
- attach_pid(leader, PIDTYPE_TGID, leader->tgid);
- attach_pid(leader, PIDTYPE_PGID, leader->signal->pgrp);
- attach_pid(leader, PIDTYPE_SID, leader->signal->session);
-}
-
-/*
* The pid hash table is scaled according to the amount of memory in the
* machine. From a minimum of 16 slots up to 4096 slots at one gigabyte or
* more.
@@ -277,16 +247,8 @@ void __init pidhash_init(void)
void __init pidmap_init(void)
{
- int i;
-
pidmap_array->page = (void *)get_zeroed_page(GFP_KERNEL);
+ /* Reserve PID 0. We never call free_pidmap(0) */
set_bit(0, pidmap_array->page);
atomic_dec(&pidmap_array->nr_free);
-
- /*
- * Allocate PID 0, and hash it via all PID types:
- */
-
- for (i = 0; i < PIDTYPE_MAX; i++)
- attach_pid(current, i, 0);
}
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index d95a72c9279..86a7f6c60cb 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -35,9 +35,9 @@ void __ptrace_link(task_t *child, task_t *new_parent)
if (child->parent == new_parent)
return;
list_add(&child->ptrace_list, &child->parent->ptrace_children);
- REMOVE_LINKS(child);
+ remove_parent(child);
child->parent = new_parent;
- SET_LINKS(child);
+ add_parent(child);
}
/*
@@ -77,9 +77,9 @@ void __ptrace_unlink(task_t *child)
child->ptrace = 0;
if (!list_empty(&child->ptrace_list)) {
list_del_init(&child->ptrace_list);
- REMOVE_LINKS(child);
+ remove_parent(child);
child->parent = child->real_parent;
- SET_LINKS(child);
+ add_parent(child);
}
ptrace_untrace(child);
diff --git a/kernel/signal.c b/kernel/signal.c
index 75f7341b0c3..4922928d91f 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -22,7 +22,6 @@
#include <linux/security.h>
#include <linux/syscalls.h>
#include <linux/ptrace.h>
-#include <linux/posix-timers.h>
#include <linux/signal.h>
#include <linux/audit.h>
#include <linux/capability.h>
@@ -147,6 +146,8 @@ static kmem_cache_t *sigqueue_cachep;
#define sig_kernel_stop(sig) \
(((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_STOP_MASK))
+#define sig_needs_tasklist(sig) ((sig) == SIGCONT)
+
#define sig_user_defined(t, signr) \
(((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_DFL) && \
((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_IGN))
@@ -292,7 +293,7 @@ static void __sigqueue_free(struct sigqueue *q)
kmem_cache_free(sigqueue_cachep, q);
}
-static void flush_sigqueue(struct sigpending *queue)
+void flush_sigqueue(struct sigpending *queue)
{
struct sigqueue *q;
@@ -307,9 +308,7 @@ static void flush_sigqueue(struct sigpending *queue)
/*
* Flush all pending signals for a task.
*/
-
-void
-flush_signals(struct task_struct *t)
+void flush_signals(struct task_struct *t)
{
unsigned long flags;
@@ -321,109 +320,6 @@ flush_signals(struct task_struct *t)
}
/*
- * This function expects the tasklist_lock write-locked.
- */
-void __exit_sighand(struct task_struct *tsk)
-{
- struct sighand_struct * sighand = tsk->sighand;
-
- /* Ok, we're done with the signal handlers */
- tsk->sighand = NULL;
- if (atomic_dec_and_test(&sighand->count))
- sighand_free(sighand);
-}
-
-void exit_sighand(struct task_struct *tsk)
-{
- write_lock_irq(&tasklist_lock);
- rcu_read_lock();
- if (tsk->sighand != NULL) {
- struct sighand_struct *sighand = rcu_dereference(tsk->sighand);
- spin_lock(&sighand->siglock);
- __exit_sighand(tsk);
- spin_unlock(&sighand->siglock);
- }
- rcu_read_unlock();
- write_unlock_irq(&tasklist_lock);
-}
-
-/*
- * This function expects the tasklist_lock write-locked.
- */
-void __exit_signal(struct task_struct *tsk)
-{
- struct signal_struct * sig = tsk->signal;
- struct sighand_struct * sighand;
-
- if (!sig)
- BUG();
- if (!atomic_read(&sig->count))
- BUG();
- rcu_read_lock();
- sighand = rcu_dereference(tsk->sighand);
- spin_lock(&sighand->siglock);
- posix_cpu_timers_exit(tsk);
- if (atomic_dec_and_test(&sig->count)) {
- posix_cpu_timers_exit_group(tsk);
- tsk->signal = NULL;
- __exit_sighand(tsk);
- spin_unlock(&sighand->siglock);
- flush_sigqueue(&sig->shared_pending);
- } else {
- /*
- * If there is any task waiting for the group exit
- * then notify it:
- */
- if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count) {
- wake_up_process(sig->group_exit_task);
- sig->group_exit_task = NULL;
- }
- if (tsk == sig->curr_target)
- sig->curr_target = next_thread(tsk);
- tsk->signal = NULL;
- /*
- * Accumulate here the counters for all threads but the
- * group leader as they die, so they can be added into
- * the process-wide totals when those are taken.
- * The group leader stays around as a zombie as long
- * as there are other threads. When it gets reaped,
- * the exit.c code will add its counts into these totals.
- * We won't ever get here for the group leader, since it
- * will have been the last reference on the signal_struct.
- */
- sig->utime = cputime_add(sig->utime, tsk->utime);
- sig->stime = cputime_add(sig->stime, tsk->stime);
- sig->min_flt += tsk->min_flt;
- sig->maj_flt += tsk->maj_flt;
- sig->nvcsw += tsk->nvcsw;
- sig->nivcsw += tsk->nivcsw;
- sig->sched_time += tsk->sched_time;
- __exit_sighand(tsk);
- spin_unlock(&sighand->siglock);
- sig = NULL; /* Marker for below. */
- }
- rcu_read_unlock();
- clear_tsk_thread_flag(tsk,TIF_SIGPENDING);
- flush_sigqueue(&tsk->pending);
- if (sig) {
- /*
- * We are cleaning up the signal_struct here.
- */
- exit_thread_group_keys(sig);
- kmem_cache_free(signal_cachep, sig);
- }
-}
-
-void exit_signal(struct task_struct *tsk)
-{
- atomic_dec(&tsk->signal->live);
-
- write_lock_irq(&tasklist_lock);
- __exit_signal(tsk);
- write_unlock_irq(&tasklist_lock);
-}
-
-/*
* Flush all handlers for a task.
*/
@@ -695,9 +591,7 @@ static int check_kill_permission(int sig, struct siginfo *info,
}
/* forward decl */
-static void do_notify_parent_cldstop(struct task_struct *tsk,
- int to_self,
- int why);
+static void do_notify_parent_cldstop(struct task_struct *tsk, int why);
/*
* Handle magic process-wide effects of stop/continue signals.
@@ -747,7 +641,7 @@ static void handle_stop_signal(int sig, struct task_struct *p)
p->signal->group_stop_count = 0;
p->signal->flags = SIGNAL_STOP_CONTINUED;
spin_unlock(&p->sighand->siglock);
- do_notify_parent_cldstop(p, (p->ptrace & PT_PTRACED), CLD_STOPPED);
+ do_notify_parent_cldstop(p, CLD_STOPPED);
spin_lock(&p->sighand->siglock);
}
rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
@@ -788,7 +682,7 @@ static void handle_stop_signal(int sig, struct task_struct *p)
p->signal->flags = SIGNAL_STOP_CONTINUED;
p->signal->group_exit_code = 0;
spin_unlock(&p->sighand->siglock);
- do_notify_parent_cldstop(p, (p->ptrace & PT_PTRACED), CLD_CONTINUED);
+ do_notify_parent_cldstop(p, CLD_CONTINUED);
spin_lock(&p->sighand->siglock);
} else {
/*
@@ -1120,27 +1014,37 @@ void zap_other_threads(struct task_struct *p)
/*
* Must be called under rcu_read_lock() or with tasklist_lock read-held.
*/
+struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long *flags)
+{
+ struct sighand_struct *sighand;
+
+ for (;;) {
+ sighand = rcu_dereference(tsk->sighand);
+ if (unlikely(sighand == NULL))
+ break;
+
+ spin_lock_irqsave(&sighand->siglock, *flags);
+ if (likely(sighand == tsk->sighand))
+ break;
+ spin_unlock_irqrestore(&sighand->siglock, *flags);
+ }
+
+ return sighand;
+}
+
int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
{
unsigned long flags;
- struct sighand_struct *sp;
int ret;
-retry:
ret = check_kill_permission(sig, info, p);
- if (!ret && sig && (sp = rcu_dereference(p->sighand))) {
- spin_lock_irqsave(&sp->siglock, flags);
- if (p->sighand != sp) {
- spin_unlock_irqrestore(&sp->siglock, flags);
- goto retry;
- }
- if ((atomic_read(&sp->count) == 0) ||
- (atomic_read(&p->usage) == 0)) {
- spin_unlock_irqrestore(&sp->siglock, flags);
- return -ESRCH;
+
+ if (!ret && sig) {
+ ret = -ESRCH;
+ if (lock_task_sighand(p, &flags)) {
+ ret = __group_send_sig_info(sig, info, p);
+ unlock_task_sighand(p, &flags);
}
- ret = __group_send_sig_info(sig, info, p);
- spin_unlock_irqrestore(&sp->siglock, flags);
}
return ret;
@@ -1189,7 +1093,7 @@ kill_proc_info(int sig, struct siginfo *info, pid_t pid)
struct task_struct *p;
rcu_read_lock();
- if (unlikely(sig_kernel_stop(sig) || sig == SIGCONT)) {
+ if (unlikely(sig_needs_tasklist(sig))) {
read_lock(&tasklist_lock);
acquired_tasklist_lock = 1;
}
@@ -1405,12 +1309,10 @@ void sigqueue_free(struct sigqueue *q)
__sigqueue_free(q);
}
-int
-send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
+int send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
{
unsigned long flags;
int ret = 0;
- struct sighand_struct *sh;
BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
@@ -1424,48 +1326,17 @@ send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
*/
rcu_read_lock();
- if (unlikely(p->flags & PF_EXITING)) {
+ if (!likely(lock_task_sighand(p, &flags))) {
ret = -1;
goto out_err;
}
-retry:
- sh = rcu_dereference(p->sighand);
-
- spin_lock_irqsave(&sh->siglock, flags);
- if (p->sighand != sh) {
- /* We raced with exec() in a multithreaded process... */
- spin_unlock_irqrestore(&sh->siglock, flags);
- goto retry;
- }
-
- /*
- * We do the check here again to handle the following scenario:
- *
- * CPU 0 CPU 1
- * send_sigqueue
- * check PF_EXITING
- * interrupt exit code running
- * __exit_signal
- * lock sighand->siglock
- * unlock sighand->siglock
- * lock sh->siglock
- * add(tsk->pending) flush_sigqueue(tsk->pending)
- *
- */
-
- if (unlikely(p->flags & PF_EXITING)) {
- ret = -1;
- goto out;
- }
-
if (unlikely(!list_empty(&q->list))) {
/*
* If an SI_TIMER entry is already queue just increment
* the overrun count.
*/
- if (q->info.si_code != SI_TIMER)
- BUG();
+ BUG_ON(q->info.si_code != SI_TIMER);
q->info.si_overrun++;
goto out;
}
@@ -1481,7 +1352,7 @@ retry:
signal_wake_up(p, sig == SIGKILL);
out:
- spin_unlock_irqrestore(&sh->siglock, flags);
+ unlock_task_sighand(p, &flags);
out_err:
rcu_read_unlock();
@@ -1613,14 +1484,14 @@ void do_notify_parent(struct task_struct *tsk, int sig)
spin_unlock_irqrestore(&psig->siglock, flags);
}
-static void do_notify_parent_cldstop(struct task_struct *tsk, int to_self, int why)
+static void do_notify_parent_cldstop(struct task_struct *tsk, int why)
{
struct siginfo info;
unsigned long flags;
struct task_struct *parent;
struct sighand_struct *sighand;
- if (to_self)
+ if (tsk->ptrace & PT_PTRACED)
parent = tsk->parent;
else {
tsk = tsk->group_leader;
@@ -1695,7 +1566,7 @@ static void ptrace_stop(int exit_code, int nostop_code, siginfo_t *info)
!(current->ptrace & PT_ATTACHED)) &&
(likely(current->parent->signal != current->signal) ||
!unlikely(current->signal->flags & SIGNAL_GROUP_EXIT))) {
- do_notify_parent_cldstop(current, 1, CLD_TRAPPED);
+ do_notify_parent_cldstop(current, CLD_TRAPPED);
read_unlock(&tasklist_lock);
schedule();
} else {
@@ -1744,25 +1615,17 @@ void ptrace_notify(int exit_code)
static void
finish_stop(int stop_count)
{
- int to_self;
-
/*
* If there are no other threads in the group, or if there is
* a group stop in progress and we are the last to stop,
* report to the parent. When ptraced, every thread reports itself.
*/
- if (stop_count < 0 || (current->ptrace & PT_PTRACED))
- to_self = 1;
- else if (stop_count == 0)
- to_self = 0;
- else
- goto out;
-
- read_lock(&tasklist_lock);
- do_notify_parent_cldstop(current, to_self, CLD_STOPPED);
- read_unlock(&tasklist_lock);
+ if (stop_count == 0 || (current->ptrace & PT_PTRACED)) {
+ read_lock(&tasklist_lock);
+ do_notify_parent_cldstop(current, CLD_STOPPED);
+ read_unlock(&tasklist_lock);
+ }
-out:
schedule();
/*
* Now we don't run again until continued.
@@ -1776,12 +1639,10 @@ out:
* Returns nonzero if we've actually stopped and released the siglock.
* Returns zero if we didn't stop and still hold the siglock.
*/
-static int
-do_signal_stop(int signr)
+static int do_signal_stop(int signr)
{
struct signal_struct *sig = current->signal;
- struct sighand_struct *sighand = current->sighand;
- int stop_count = -1;
+ int stop_count;
if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED))
return 0;
@@ -1791,86 +1652,37 @@ do_signal_stop(int signr)
* There is a group stop in progress. We don't need to
* start another one.
*/
- signr = sig->group_exit_code;
stop_count = --sig->group_stop_count;
- current->exit_code = signr;
- set_current_state(TASK_STOPPED);
- if (stop_count == 0)
- sig->flags = SIGNAL_STOP_STOPPED;
- spin_unlock_irq(&sighand->siglock);
- }
- else if (thread_group_empty(current)) {
- /*
- * Lock must be held through transition to stopped state.
- */
- current->exit_code = current->signal->group_exit_code = signr;
- set_current_state(TASK_STOPPED);
- sig->flags = SIGNAL_STOP_STOPPED;
- spin_unlock_irq(&sighand->siglock);
- }
- else {
+ } else {
/*
* There is no group stop already in progress.
- * We must initiate one now, but that requires
- * dropping siglock to get both the tasklist lock
- * and siglock again in the proper order. Note that
- * this allows an intervening SIGCONT to be posted.
- * We need to check for that and bail out if necessary.
+ * We must initiate one now.
*/
struct task_struct *t;
- spin_unlock_irq(&sighand->siglock);
-
- /* signals can be posted during this window */
+ sig->group_exit_code = signr;
- read_lock(&tasklist_lock);
- spin_lock_irq(&sighand->siglock);
-
- if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED)) {
+ stop_count = 0;
+ for (t = next_thread(current); t != current; t = next_thread(t))
/*
- * Another stop or continue happened while we
- * didn't have the lock. We can just swallow this
- * signal now. If we raced with a SIGCONT, that
- * should have just cleared it now. If we raced
- * with another processor delivering a stop signal,
- * then the SIGCONT that wakes us up should clear it.
+ * Setting state to TASK_STOPPED for a group
+ * stop is always done with the siglock held,
+ * so this check has no races.
*/
- read_unlock(&tasklist_lock);
- return 0;
- }
-
- if (sig->group_stop_count == 0) {
- sig->group_exit_code = signr;
- stop_count = 0;
- for (t = next_thread(current); t != current;
- t = next_thread(t))
- /*
- * Setting state to TASK_STOPPED for a group
- * stop is always done with the siglock held,
- * so this check has no races.
- */
- if (!t->exit_state &&
- !(t->state & (TASK_STOPPED|TASK_TRACED))) {
- stop_count++;
- signal_wake_up(t, 0);
- }
- sig->group_stop_count = stop_count;
- }
- else {
- /* A race with another thread while unlocked. */
- signr = sig->group_exit_code;
- stop_count = --sig->group_stop_count;
- }
-
- current->exit_code = signr;
- set_current_state(TASK_STOPPED);
- if (stop_count == 0)
- sig->flags = SIGNAL_STOP_STOPPED;
-
- spin_unlock_irq(&sighand->siglock);
- read_unlock(&tasklist_lock);
+ if (!t->exit_state &&
+ !(t->state & (TASK_STOPPED|TASK_TRACED))) {
+ stop_count++;
+ signal_wake_up(t, 0);
+ }
+ sig->group_stop_count = stop_count;
}
+ if (stop_count == 0)
+ sig->flags = SIGNAL_STOP_STOPPED;
+ current->exit_code = sig->group_exit_code;
+ __set_current_state(TASK_STOPPED);
+
+ spin_unlock_irq(&current->sighand->siglock);
finish_stop(stop_count);
return 1;
}
@@ -1990,7 +1802,7 @@ relock:
continue;
/* Init gets no signals it doesn't want. */
- if (current->pid == 1)
+ if (current == child_reaper)
continue;
if (sig_kernel_stop(signr)) {
@@ -2430,8 +2242,7 @@ sys_rt_sigqueueinfo(int pid, int sig, siginfo_t __user *uinfo)
return kill_proc_info(sig, &info, pid);
}
-int
-do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
+int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
{
struct k_sigaction *k;
sigset_t mask;
@@ -2457,6 +2268,7 @@ do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
if (act) {
sigdelsetmask(&act->sa.sa_mask,
sigmask(SIGKILL) | sigmask(SIGSTOP));
+ *k = *act;
/*
* POSIX 3.3.1.3:
* "Setting a signal action to SIG_IGN for a signal that is
@@ -2469,19 +2281,8 @@ do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
* be discarded, whether or not it is blocked"
*/
if (act->sa.sa_handler == SIG_IGN ||
- (act->sa.sa_handler == SIG_DFL &&
- sig_kernel_ignore(sig))) {
- /*
- * This is a fairly rare case, so we only take the
- * tasklist_lock once we're sure we'll need it.
- * Now we must do this little unlock and relock
- * dance to maintain the lock hierarchy.
- */
+ (act->sa.sa_handler == SIG_DFL && sig_kernel_ignore(sig))) {
struct task_struct *t = current;
- spin_unlock_irq(&t->sighand->siglock);
- read_lock(&tasklist_lock);
- spin_lock_irq(&t->sighand->siglock);
- *k = *act;
sigemptyset(&mask);
sigaddset(&mask, sig);
rm_from_queue_full(&mask, &t->signal->shared_pending);
@@ -2490,12 +2291,7 @@ do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
recalc_sigpending_tsk(t);
t = next_thread(t);
} while (t != current);
- spin_unlock_irq(&current->sighand->siglock);
- read_unlock(&tasklist_lock);
- return 0;
}
-
- *k = *act;
}
spin_unlock_irq(&current->sighand->siglock);
diff --git a/kernel/sys.c b/kernel/sys.c
index c93d37f71ae..7ef7f6054c2 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -1202,69 +1202,24 @@ asmlinkage long sys_times(struct tms __user * tbuf)
*/
if (tbuf) {
struct tms tmp;
+ struct task_struct *tsk = current;
+ struct task_struct *t;
cputime_t utime, stime, cutime, cstime;
-#ifdef CONFIG_SMP
- if (thread_group_empty(current)) {
- /*
- * Single thread case without the use of any locks.
- *
- * We may race with release_task if two threads are
- * executing. However, release task first adds up the
- * counters (__exit_signal) before removing the task
- * from the process tasklist (__unhash_process).
- * __exit_signal also acquires and releases the
- * siglock which results in the proper memory ordering
- * so that the list modifications are always visible
- * after the counters have been updated.
- *
- * If the counters have been updated by the second thread
- * but the thread has not yet been removed from the list
- * then the other branch will be executing which will
- * block on tasklist_lock until the exit handling of the
- * other task is finished.
- *
- * This also implies that the sighand->siglock cannot
- * be held by another processor. So we can also
- * skip acquiring that lock.
- */
- utime = cputime_add(current->signal->utime, current->utime);
- stime = cputime_add(current->signal->utime, current->stime);
- cutime = current->signal->cutime;
- cstime = current->signal->cstime;
- } else
-#endif
- {
+ spin_lock_irq(&tsk->sighand->siglock);
+ utime = tsk->signal->utime;
+ stime = tsk->signal->stime;
+ t = tsk;
+ do {
+ utime = cputime_add(utime, t->utime);
+ stime = cputime_add(stime, t->stime);
+ t = next_thread(t);
+ } while (t != tsk);
- /* Process with multiple threads */
- struct task_struct *tsk = current;
- struct task_struct *t;
+ cutime = tsk->signal->cutime;
+ cstime = tsk->signal->cstime;
+ spin_unlock_irq(&tsk->sighand->siglock);
- read_lock(&tasklist_lock);
- utime = tsk->signal->utime;
- stime = tsk->signal->stime;
- t = tsk;
- do {
- utime = cputime_add(utime, t->utime);
- stime = cputime_add(stime, t->stime);
- t = next_thread(t);
- } while (t != tsk);
-
- /*
- * While we have tasklist_lock read-locked, no dying thread
- * can be updating current->signal->[us]time. Instead,
- * we got their counts included in the live thread loop.
- * However, another thread can come in right now and
- * do a wait call that updates current->signal->c[us]time.
- * To make sure we always see that pair updated atomically,
- * we take the siglock around fetching them.
- */
- spin_lock_irq(&tsk->sighand->siglock);
- cutime = tsk->signal->cutime;
- cstime = tsk->signal->cstime;
- spin_unlock_irq(&tsk->sighand->siglock);
- read_unlock(&tasklist_lock);
- }
tmp.tms_utime = cputime_to_clock_t(utime);
tmp.tms_stime = cputime_to_clock_t(stime);
tmp.tms_cutime = cputime_to_clock_t(cutime);