aboutsummaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/audit.c6
-rw-r--r--kernel/auditsc.c2
-rw-r--r--kernel/exit.c1
-rw-r--r--kernel/kexec.c7
-rw-r--r--kernel/posix-cpu-timers.c110
-rw-r--r--kernel/posix-timers.c2
-rw-r--r--kernel/power/swsusp.c2
-rw-r--r--kernel/sched.c1
-rw-r--r--kernel/signal.c14
9 files changed, 75 insertions, 70 deletions
diff --git a/kernel/audit.c b/kernel/audit.c
index aefa73a8a58..0c56320d38d 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -133,7 +133,7 @@ struct audit_buffer {
struct list_head list;
struct sk_buff *skb; /* formatted skb ready to send */
struct audit_context *ctx; /* NULL or associated context */
- int gfp_mask;
+ gfp_t gfp_mask;
};
static void audit_set_pid(struct audit_buffer *ab, pid_t pid)
@@ -647,7 +647,7 @@ static inline void audit_get_stamp(struct audit_context *ctx,
* will be written at syscall exit. If there is no associated task, tsk
* should be NULL. */
-struct audit_buffer *audit_log_start(struct audit_context *ctx, int gfp_mask,
+struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask,
int type)
{
struct audit_buffer *ab = NULL;
@@ -879,7 +879,7 @@ void audit_log_end(struct audit_buffer *ab)
/* Log an audit record. This is a convenience function that calls
* audit_log_start, audit_log_vformat, and audit_log_end. It may be
* called in any context. */
-void audit_log(struct audit_context *ctx, int gfp_mask, int type,
+void audit_log(struct audit_context *ctx, gfp_t gfp_mask, int type,
const char *fmt, ...)
{
struct audit_buffer *ab;
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index 88696f639aa..d8a68509e72 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -803,7 +803,7 @@ static void audit_log_task_info(struct audit_buffer *ab)
up_read(&mm->mmap_sem);
}
-static void audit_log_exit(struct audit_context *context, unsigned int gfp_mask)
+static void audit_log_exit(struct audit_context *context, gfp_t gfp_mask)
{
int i;
struct audit_buffer *ab;
diff --git a/kernel/exit.c b/kernel/exit.c
index 43077732619..3b25b182d2b 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -843,6 +843,7 @@ fastcall NORET_TYPE void do_exit(long code)
group_dead = atomic_dec_and_test(&tsk->signal->live);
if (group_dead) {
del_timer_sync(&tsk->signal->real_timer);
+ exit_itimers(tsk->signal);
acct_process(code);
}
exit_mm(tsk);
diff --git a/kernel/kexec.c b/kernel/kexec.c
index cdd4dcd8fb6..36c5d9cd4cc 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -90,7 +90,7 @@ int kexec_should_crash(struct task_struct *p)
static int kimage_is_destination_range(struct kimage *image,
unsigned long start, unsigned long end);
static struct page *kimage_alloc_page(struct kimage *image,
- unsigned int gfp_mask,
+ gfp_t gfp_mask,
unsigned long dest);
static int do_kimage_alloc(struct kimage **rimage, unsigned long entry,
@@ -326,8 +326,7 @@ static int kimage_is_destination_range(struct kimage *image,
return 0;
}
-static struct page *kimage_alloc_pages(unsigned int gfp_mask,
- unsigned int order)
+static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order)
{
struct page *pages;
@@ -654,7 +653,7 @@ static kimage_entry_t *kimage_dst_used(struct kimage *image,
}
static struct page *kimage_alloc_page(struct kimage *image,
- unsigned int gfp_mask,
+ gfp_t gfp_mask,
unsigned long destination)
{
/*
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index b3f3edc475d..bf374fceb39 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -91,7 +91,7 @@ static inline union cpu_time_count cpu_time_sub(clockid_t which_clock,
* Update expiry time from increment, and increase overrun count,
* given the current clock sample.
*/
-static inline void bump_cpu_timer(struct k_itimer *timer,
+static void bump_cpu_timer(struct k_itimer *timer,
union cpu_time_count now)
{
int i;
@@ -110,7 +110,7 @@ static inline void bump_cpu_timer(struct k_itimer *timer,
for (i = 0; incr < delta - incr; i++)
incr = incr << 1;
for (; i >= 0; incr >>= 1, i--) {
- if (delta <= incr)
+ if (delta < incr)
continue;
timer->it.cpu.expires.sched += incr;
timer->it_overrun += 1 << i;
@@ -128,7 +128,7 @@ static inline void bump_cpu_timer(struct k_itimer *timer,
for (i = 0; cputime_lt(incr, cputime_sub(delta, incr)); i++)
incr = cputime_add(incr, incr);
for (; i >= 0; incr = cputime_halve(incr), i--) {
- if (cputime_le(delta, incr))
+ if (cputime_lt(delta, incr))
continue;
timer->it.cpu.expires.cpu =
cputime_add(timer->it.cpu.expires.cpu, incr);
@@ -380,28 +380,31 @@ int posix_cpu_timer_create(struct k_itimer *new_timer)
int posix_cpu_timer_del(struct k_itimer *timer)
{
struct task_struct *p = timer->it.cpu.task;
+ int ret = 0;
- if (timer->it.cpu.firing)
- return TIMER_RETRY;
-
- if (unlikely(p == NULL))
- return 0;
+ if (likely(p != NULL)) {
+ read_lock(&tasklist_lock);
+ if (unlikely(p->signal == NULL)) {
+ /*
+ * We raced with the reaping of the task.
+ * The deletion should have cleared us off the list.
+ */
+ BUG_ON(!list_empty(&timer->it.cpu.entry));
+ } else {
+ spin_lock(&p->sighand->siglock);
+ if (timer->it.cpu.firing)
+ ret = TIMER_RETRY;
+ else
+ list_del(&timer->it.cpu.entry);
+ spin_unlock(&p->sighand->siglock);
+ }
+ read_unlock(&tasklist_lock);
- spin_lock(&p->sighand->siglock);
- if (!list_empty(&timer->it.cpu.entry)) {
- /*
- * Take us off the task's timer list. We don't need to
- * take tasklist_lock and check for the task being reaped.
- * If it was reaped, it already called posix_cpu_timers_exit
- * and posix_cpu_timers_exit_group to clear all the timers
- * that pointed to it.
- */
- list_del(&timer->it.cpu.entry);
- put_task_struct(p);
+ if (!ret)
+ put_task_struct(p);
}
- spin_unlock(&p->sighand->siglock);
- return 0;
+ return ret;
}
/*
@@ -418,8 +421,6 @@ static void cleanup_timers(struct list_head *head,
cputime_t ptime = cputime_add(utime, stime);
list_for_each_entry_safe(timer, next, head, entry) {
- put_task_struct(timer->task);
- timer->task = NULL;
list_del_init(&timer->entry);
if (cputime_lt(timer->expires.cpu, ptime)) {
timer->expires.cpu = cputime_zero;
@@ -431,8 +432,6 @@ static void cleanup_timers(struct list_head *head,
++head;
list_for_each_entry_safe(timer, next, head, entry) {
- put_task_struct(timer->task);
- timer->task = NULL;
list_del_init(&timer->entry);
if (cputime_lt(timer->expires.cpu, utime)) {
timer->expires.cpu = cputime_zero;
@@ -444,8 +443,6 @@ static void cleanup_timers(struct list_head *head,
++head;
list_for_each_entry_safe(timer, next, head, entry) {
- put_task_struct(timer->task);
- timer->task = NULL;
list_del_init(&timer->entry);
if (timer->expires.sched < sched_time) {
timer->expires.sched = 0;
@@ -489,6 +486,9 @@ static void process_timer_rebalance(struct task_struct *p,
struct task_struct *t = p;
unsigned int nthreads = atomic_read(&p->signal->live);
+ if (!nthreads)
+ return;
+
switch (clock_idx) {
default:
BUG();
@@ -497,7 +497,7 @@ static void process_timer_rebalance(struct task_struct *p,
left = cputime_div(cputime_sub(expires.cpu, val.cpu),
nthreads);
do {
- if (!unlikely(t->exit_state)) {
+ if (!unlikely(t->flags & PF_EXITING)) {
ticks = cputime_add(prof_ticks(t), left);
if (cputime_eq(t->it_prof_expires,
cputime_zero) ||
@@ -512,7 +512,7 @@ static void process_timer_rebalance(struct task_struct *p,
left = cputime_div(cputime_sub(expires.cpu, val.cpu),
nthreads);
do {
- if (!unlikely(t->exit_state)) {
+ if (!unlikely(t->flags & PF_EXITING)) {
ticks = cputime_add(virt_ticks(t), left);
if (cputime_eq(t->it_virt_expires,
cputime_zero) ||
@@ -527,7 +527,7 @@ static void process_timer_rebalance(struct task_struct *p,
nsleft = expires.sched - val.sched;
do_div(nsleft, nthreads);
do {
- if (!unlikely(t->exit_state)) {
+ if (!unlikely(t->flags & PF_EXITING)) {
ns = t->sched_time + nsleft;
if (t->it_sched_expires == 0 ||
t->it_sched_expires > ns) {
@@ -566,6 +566,9 @@ static void arm_timer(struct k_itimer *timer, union cpu_time_count now)
struct cpu_timer_list *next;
unsigned long i;
+ if (CPUCLOCK_PERTHREAD(timer->it_clock) && (p->flags & PF_EXITING))
+ return;
+
head = (CPUCLOCK_PERTHREAD(timer->it_clock) ?
p->cpu_timers : p->signal->cpu_timers);
head += CPUCLOCK_WHICH(timer->it_clock);
@@ -576,17 +579,15 @@ static void arm_timer(struct k_itimer *timer, union cpu_time_count now)
listpos = head;
if (CPUCLOCK_WHICH(timer->it_clock) == CPUCLOCK_SCHED) {
list_for_each_entry(next, head, entry) {
- if (next->expires.sched > nt->expires.sched) {
- listpos = &next->entry;
+ if (next->expires.sched > nt->expires.sched)
break;
- }
+ listpos = &next->entry;
}
} else {
list_for_each_entry(next, head, entry) {
- if (cputime_gt(next->expires.cpu, nt->expires.cpu)) {
- listpos = &next->entry;
+ if (cputime_gt(next->expires.cpu, nt->expires.cpu))
break;
- }
+ listpos = &next->entry;
}
}
list_add(&nt->entry, listpos);
@@ -730,9 +731,15 @@ int posix_cpu_timer_set(struct k_itimer *timer, int flags,
* Disarm any old timer after extracting its expiry time.
*/
BUG_ON(!irqs_disabled());
+
+ ret = 0;
spin_lock(&p->sighand->siglock);
old_expires = timer->it.cpu.expires;
- list_del_init(&timer->it.cpu.entry);
+ if (unlikely(timer->it.cpu.firing)) {
+ timer->it.cpu.firing = -1;
+ ret = TIMER_RETRY;
+ } else
+ list_del_init(&timer->it.cpu.entry);
spin_unlock(&p->sighand->siglock);
/*
@@ -780,7 +787,7 @@ int posix_cpu_timer_set(struct k_itimer *timer, int flags,
}
}
- if (unlikely(timer->it.cpu.firing)) {
+ if (unlikely(ret)) {
/*
* We are colliding with the timer actually firing.
* Punt after filling in the timer's old value, and
@@ -788,8 +795,6 @@ int posix_cpu_timer_set(struct k_itimer *timer, int flags,
* it as an overrun (thanks to bump_cpu_timer above).
*/
read_unlock(&tasklist_lock);
- timer->it.cpu.firing = -1;
- ret = TIMER_RETRY;
goto out;
}
@@ -955,14 +960,16 @@ void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp)
static void check_thread_timers(struct task_struct *tsk,
struct list_head *firing)
{
+ int maxfire;
struct list_head *timers = tsk->cpu_timers;
+ maxfire = 20;
tsk->it_prof_expires = cputime_zero;
while (!list_empty(timers)) {
struct cpu_timer_list *t = list_entry(timers->next,
struct cpu_timer_list,
entry);
- if (cputime_lt(prof_ticks(tsk), t->expires.cpu)) {
+ if (!--maxfire || cputime_lt(prof_ticks(tsk), t->expires.cpu)) {
tsk->it_prof_expires = t->expires.cpu;
break;
}
@@ -971,12 +978,13 @@ static void check_thread_timers(struct task_struct *tsk,
}
++timers;
+ maxfire = 20;
tsk->it_virt_expires = cputime_zero;
while (!list_empty(timers)) {
struct cpu_timer_list *t = list_entry(timers->next,
struct cpu_timer_list,
entry);
- if (cputime_lt(virt_ticks(tsk), t->expires.cpu)) {
+ if (!--maxfire || cputime_lt(virt_ticks(tsk), t->expires.cpu)) {
tsk->it_virt_expires = t->expires.cpu;
break;
}
@@ -985,12 +993,13 @@ static void check_thread_timers(struct task_struct *tsk,
}
++timers;
+ maxfire = 20;
tsk->it_sched_expires = 0;
while (!list_empty(timers)) {
struct cpu_timer_list *t = list_entry(timers->next,
struct cpu_timer_list,
entry);
- if (tsk->sched_time < t->expires.sched) {
+ if (!--maxfire || tsk->sched_time < t->expires.sched) {
tsk->it_sched_expires = t->expires.sched;
break;
}
@@ -1007,6 +1016,7 @@ static void check_thread_timers(struct task_struct *tsk,
static void check_process_timers(struct task_struct *tsk,
struct list_head *firing)
{
+ int maxfire;
struct signal_struct *const sig = tsk->signal;
cputime_t utime, stime, ptime, virt_expires, prof_expires;
unsigned long long sched_time, sched_expires;
@@ -1039,12 +1049,13 @@ static void check_process_timers(struct task_struct *tsk,
} while (t != tsk);
ptime = cputime_add(utime, stime);
+ maxfire = 20;
prof_expires = cputime_zero;
while (!list_empty(timers)) {
struct cpu_timer_list *t = list_entry(timers->next,
struct cpu_timer_list,
entry);
- if (cputime_lt(ptime, t->expires.cpu)) {
+ if (!--maxfire || cputime_lt(ptime, t->expires.cpu)) {
prof_expires = t->expires.cpu;
break;
}
@@ -1053,12 +1064,13 @@ static void check_process_timers(struct task_struct *tsk,
}
++timers;
+ maxfire = 20;
virt_expires = cputime_zero;
while (!list_empty(timers)) {
struct cpu_timer_list *t = list_entry(timers->next,
struct cpu_timer_list,
entry);
- if (cputime_lt(utime, t->expires.cpu)) {
+ if (!--maxfire || cputime_lt(utime, t->expires.cpu)) {
virt_expires = t->expires.cpu;
break;
}
@@ -1067,12 +1079,13 @@ static void check_process_timers(struct task_struct *tsk,
}
++timers;
+ maxfire = 20;
sched_expires = 0;
while (!list_empty(timers)) {
struct cpu_timer_list *t = list_entry(timers->next,
struct cpu_timer_list,
entry);
- if (sched_time < t->expires.sched) {
+ if (!--maxfire || sched_time < t->expires.sched) {
sched_expires = t->expires.sched;
break;
}
@@ -1155,6 +1168,9 @@ static void check_process_timers(struct task_struct *tsk,
unsigned long long sched_left, sched;
const unsigned int nthreads = atomic_read(&sig->live);
+ if (!nthreads)
+ return;
+
prof_left = cputime_sub(prof_expires, utime);
prof_left = cputime_sub(prof_left, stime);
prof_left = cputime_div(prof_left, nthreads);
@@ -1191,7 +1207,7 @@ static void check_process_timers(struct task_struct *tsk,
do {
t = next_thread(t);
- } while (unlikely(t->exit_state));
+ } while (unlikely(t->flags & PF_EXITING));
} while (t != tsk);
}
}
diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
index b7b532acd9f..dda3cda73c7 100644
--- a/kernel/posix-timers.c
+++ b/kernel/posix-timers.c
@@ -1157,7 +1157,7 @@ retry_delete:
}
/*
- * This is called by __exit_signal, only when there are no more
+ * This is called by do_exit or de_thread, only when there are no more
* references to the shared signal_struct.
*/
void exit_itimers(struct signal_struct *sig)
diff --git a/kernel/power/swsusp.c b/kernel/power/swsusp.c
index 2d5c4567644..10bc5ec496d 100644
--- a/kernel/power/swsusp.c
+++ b/kernel/power/swsusp.c
@@ -1095,7 +1095,7 @@ static inline void eat_page(void *page)
*eaten_memory = c;
}
-unsigned long get_usable_page(unsigned gfp_mask)
+unsigned long get_usable_page(gfp_t gfp_mask)
{
unsigned long m;
diff --git a/kernel/sched.c b/kernel/sched.c
index 1f31a528fdb..1e5cafdf4e2 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3879,6 +3879,7 @@ EXPORT_SYMBOL(cpu_present_map);
#ifndef CONFIG_SMP
cpumask_t cpu_online_map = CPU_MASK_ALL;
+EXPORT_SYMBOL_GPL(cpu_online_map);
cpumask_t cpu_possible_map = CPU_MASK_ALL;
#endif
diff --git a/kernel/signal.c b/kernel/signal.c
index 50c99264377..f2b96b08fb4 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -397,20 +397,8 @@ void __exit_signal(struct task_struct *tsk)
flush_sigqueue(&tsk->pending);
if (sig) {
/*
- * We are cleaning up the signal_struct here. We delayed
- * calling exit_itimers until after flush_sigqueue, just in
- * case our thread-local pending queue contained a queued
- * timer signal that would have been cleared in
- * exit_itimers. When that called sigqueue_free, it would
- * attempt to re-take the tasklist_lock and deadlock. This
- * can never happen if we ensure that all queues the
- * timer's signal might be queued on have been flushed
- * first. The shared_pending queue, and our own pending
- * queue are the only queues the timer could be on, since
- * there are no other threads left in the group and timer
- * signals are constrained to threads inside the group.
+ * We are cleaning up the signal_struct here.
*/
- exit_itimers(sig);
exit_thread_group_keys(sig);
kmem_cache_free(signal_cachep, sig);
}