aboutsummaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/audit.c8
-rw-r--r--kernel/auditsc.c2
-rw-r--r--kernel/cpuset.c13
-rw-r--r--kernel/exit.c3
-rw-r--r--kernel/fork.c2
-rw-r--r--kernel/kexec.c7
-rw-r--r--kernel/kfifo.c4
-rw-r--r--kernel/params.c10
-rw-r--r--kernel/posix-cpu-timers.c91
-rw-r--r--kernel/posix-timers.c2
-rw-r--r--kernel/power/Kconfig2
-rw-r--r--kernel/power/disk.c6
-rw-r--r--kernel/power/power.h7
-rw-r--r--kernel/power/swsusp.c36
-rw-r--r--kernel/rcupdate.c13
-rw-r--r--kernel/sched.c1
-rw-r--r--kernel/signal.c87
-rw-r--r--kernel/sys.c52
-rw-r--r--kernel/time.c1
19 files changed, 220 insertions, 127 deletions
diff --git a/kernel/audit.c b/kernel/audit.c
index 83096b67510..0c56320d38d 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -133,7 +133,7 @@ struct audit_buffer {
struct list_head list;
struct sk_buff *skb; /* formatted skb ready to send */
struct audit_context *ctx; /* NULL or associated context */
- int gfp_mask;
+ gfp_t gfp_mask;
};
static void audit_set_pid(struct audit_buffer *ab, pid_t pid)
@@ -560,7 +560,7 @@ static void audit_buffer_free(struct audit_buffer *ab)
}
static struct audit_buffer * audit_buffer_alloc(struct audit_context *ctx,
- unsigned int __nocast gfp_mask, int type)
+ gfp_t gfp_mask, int type)
{
unsigned long flags;
struct audit_buffer *ab = NULL;
@@ -647,7 +647,7 @@ static inline void audit_get_stamp(struct audit_context *ctx,
* will be written at syscall exit. If there is no associated task, tsk
* should be NULL. */
-struct audit_buffer *audit_log_start(struct audit_context *ctx, int gfp_mask,
+struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask,
int type)
{
struct audit_buffer *ab = NULL;
@@ -879,7 +879,7 @@ void audit_log_end(struct audit_buffer *ab)
/* Log an audit record. This is a convenience function that calls
* audit_log_start, audit_log_vformat, and audit_log_end. It may be
* called in any context. */
-void audit_log(struct audit_context *ctx, int gfp_mask, int type,
+void audit_log(struct audit_context *ctx, gfp_t gfp_mask, int type,
const char *fmt, ...)
{
struct audit_buffer *ab;
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index 88696f639aa..d8a68509e72 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -803,7 +803,7 @@ static void audit_log_task_info(struct audit_buffer *ab)
up_read(&mm->mmap_sem);
}
-static void audit_log_exit(struct audit_context *context, unsigned int gfp_mask)
+static void audit_log_exit(struct audit_context *context, gfp_t gfp_mask)
{
int i;
struct audit_buffer *ab;
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 79866bc6b3a..28176d083f7 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -968,8 +968,6 @@ static ssize_t cpuset_common_file_read(struct file *file, char __user *buf,
char *page;
ssize_t retval = 0;
char *s;
- char *start;
- size_t n;
if (!(page = (char *)__get_free_page(GFP_KERNEL)))
return -ENOMEM;
@@ -999,14 +997,7 @@ static ssize_t cpuset_common_file_read(struct file *file, char __user *buf,
*s++ = '\n';
*s = '\0';
- /* Do nothing if *ppos is at the eof or beyond the eof. */
- if (s - page <= *ppos)
- return 0;
-
- start = page + *ppos;
- n = s - start;
- retval = n - copy_to_user(buf, start, min(n, nbytes));
- *ppos += retval;
+ retval = simple_read_from_buffer(buf, nbytes, ppos, page, s - page);
out:
free_page((unsigned long)page);
return retval;
@@ -1679,7 +1670,7 @@ static const struct cpuset *nearest_exclusive_ancestor(const struct cpuset *cs)
* GFP_USER - only nodes in current tasks mems allowed ok.
**/
-int cpuset_zone_allowed(struct zone *z, unsigned int __nocast gfp_mask)
+int cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
{
int node; /* node that zone z is on */
const struct cpuset *cs; /* current cpuset ancestors */
diff --git a/kernel/exit.c b/kernel/exit.c
index ee6d8b8abef..3b25b182d2b 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -843,6 +843,7 @@ fastcall NORET_TYPE void do_exit(long code)
group_dead = atomic_dec_and_test(&tsk->signal->live);
if (group_dead) {
del_timer_sync(&tsk->signal->real_timer);
+ exit_itimers(tsk->signal);
acct_process(code);
}
exit_mm(tsk);
@@ -1203,7 +1204,7 @@ static int wait_task_stopped(task_t *p, int delayed_group_leader, int noreap,
exit_code = p->exit_code;
if (unlikely(!exit_code) ||
- unlikely(p->state > TASK_STOPPED))
+ unlikely(p->state & TASK_TRACED))
goto bail_ref;
return wait_noreap_copyout(p, pid, uid,
why, (exit_code << 8) | 0x7f,
diff --git a/kernel/fork.c b/kernel/fork.c
index 533ce27f4b2..280bd44ac44 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -848,7 +848,7 @@ static inline void copy_flags(unsigned long clone_flags, struct task_struct *p)
{
unsigned long new_flags = p->flags;
- new_flags &= ~PF_SUPERPRIV;
+ new_flags &= ~(PF_SUPERPRIV | PF_NOFREEZE);
new_flags |= PF_FORKNOEXEC;
if (!(clone_flags & CLONE_PTRACE))
p->ptrace = 0;
diff --git a/kernel/kexec.c b/kernel/kexec.c
index cdd4dcd8fb6..36c5d9cd4cc 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -90,7 +90,7 @@ int kexec_should_crash(struct task_struct *p)
static int kimage_is_destination_range(struct kimage *image,
unsigned long start, unsigned long end);
static struct page *kimage_alloc_page(struct kimage *image,
- unsigned int gfp_mask,
+ gfp_t gfp_mask,
unsigned long dest);
static int do_kimage_alloc(struct kimage **rimage, unsigned long entry,
@@ -326,8 +326,7 @@ static int kimage_is_destination_range(struct kimage *image,
return 0;
}
-static struct page *kimage_alloc_pages(unsigned int gfp_mask,
- unsigned int order)
+static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order)
{
struct page *pages;
@@ -654,7 +653,7 @@ static kimage_entry_t *kimage_dst_used(struct kimage *image,
}
static struct page *kimage_alloc_page(struct kimage *image,
- unsigned int gfp_mask,
+ gfp_t gfp_mask,
unsigned long destination)
{
/*
diff --git a/kernel/kfifo.c b/kernel/kfifo.c
index 179baafcdd9..64ab045c3d9 100644
--- a/kernel/kfifo.c
+++ b/kernel/kfifo.c
@@ -36,7 +36,7 @@
* struct kfifo with kfree().
*/
struct kfifo *kfifo_init(unsigned char *buffer, unsigned int size,
- unsigned int __nocast gfp_mask, spinlock_t *lock)
+ gfp_t gfp_mask, spinlock_t *lock)
{
struct kfifo *fifo;
@@ -64,7 +64,7 @@ EXPORT_SYMBOL(kfifo_init);
*
* The size will be rounded-up to a power of 2.
*/
-struct kfifo *kfifo_alloc(unsigned int size, unsigned int __nocast gfp_mask, spinlock_t *lock)
+struct kfifo *kfifo_alloc(unsigned int size, gfp_t gfp_mask, spinlock_t *lock)
{
unsigned char *buffer;
struct kfifo *ret;
diff --git a/kernel/params.c b/kernel/params.c
index fbf173215fd..1a8614bac5d 100644
--- a/kernel/params.c
+++ b/kernel/params.c
@@ -80,8 +80,6 @@ static char *next_arg(char *args, char **param, char **val)
int in_quote = 0, quoted = 0;
char *next;
- /* Chew any extra spaces */
- while (*args == ' ') args++;
if (*args == '"') {
args++;
in_quote = 1;
@@ -121,6 +119,10 @@ static char *next_arg(char *args, char **param, char **val)
next = args + i + 1;
} else
next = args + i;
+
+ /* Chew up trailing spaces. */
+ while (*next == ' ')
+ next++;
return next;
}
@@ -135,6 +137,10 @@ int parse_args(const char *name,
DEBUGP("Parsing ARGS: %s\n", args);
+ /* Chew leading spaces */
+ while (*args == ' ')
+ args++;
+
while (*args) {
int ret;
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index ad85d3f0dcc..bf374fceb39 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -91,7 +91,7 @@ static inline union cpu_time_count cpu_time_sub(clockid_t which_clock,
* Update expiry time from increment, and increase overrun count,
* given the current clock sample.
*/
-static inline void bump_cpu_timer(struct k_itimer *timer,
+static void bump_cpu_timer(struct k_itimer *timer,
union cpu_time_count now)
{
int i;
@@ -110,7 +110,7 @@ static inline void bump_cpu_timer(struct k_itimer *timer,
for (i = 0; incr < delta - incr; i++)
incr = incr << 1;
for (; i >= 0; incr >>= 1, i--) {
- if (delta <= incr)
+ if (delta < incr)
continue;
timer->it.cpu.expires.sched += incr;
timer->it_overrun += 1 << i;
@@ -128,7 +128,7 @@ static inline void bump_cpu_timer(struct k_itimer *timer,
for (i = 0; cputime_lt(incr, cputime_sub(delta, incr)); i++)
incr = cputime_add(incr, incr);
for (; i >= 0; incr = cputime_halve(incr), i--) {
- if (cputime_le(delta, incr))
+ if (cputime_lt(delta, incr))
continue;
timer->it.cpu.expires.cpu =
cputime_add(timer->it.cpu.expires.cpu, incr);
@@ -380,14 +380,9 @@ int posix_cpu_timer_create(struct k_itimer *new_timer)
int posix_cpu_timer_del(struct k_itimer *timer)
{
struct task_struct *p = timer->it.cpu.task;
+ int ret = 0;
- if (timer->it.cpu.firing)
- return TIMER_RETRY;
-
- if (unlikely(p == NULL))
- return 0;
-
- if (!list_empty(&timer->it.cpu.entry)) {
+ if (likely(p != NULL)) {
read_lock(&tasklist_lock);
if (unlikely(p->signal == NULL)) {
/*
@@ -396,18 +391,20 @@ int posix_cpu_timer_del(struct k_itimer *timer)
*/
BUG_ON(!list_empty(&timer->it.cpu.entry));
} else {
- /*
- * Take us off the task's timer list.
- */
spin_lock(&p->sighand->siglock);
- list_del(&timer->it.cpu.entry);
+ if (timer->it.cpu.firing)
+ ret = TIMER_RETRY;
+ else
+ list_del(&timer->it.cpu.entry);
spin_unlock(&p->sighand->siglock);
}
read_unlock(&tasklist_lock);
+
+ if (!ret)
+ put_task_struct(p);
}
- put_task_struct(p);
- return 0;
+ return ret;
}
/*
@@ -424,7 +421,6 @@ static void cleanup_timers(struct list_head *head,
cputime_t ptime = cputime_add(utime, stime);
list_for_each_entry_safe(timer, next, head, entry) {
- timer->task = NULL;
list_del_init(&timer->entry);
if (cputime_lt(timer->expires.cpu, ptime)) {
timer->expires.cpu = cputime_zero;
@@ -436,7 +432,6 @@ static void cleanup_timers(struct list_head *head,
++head;
list_for_each_entry_safe(timer, next, head, entry) {
- timer->task = NULL;
list_del_init(&timer->entry);
if (cputime_lt(timer->expires.cpu, utime)) {
timer->expires.cpu = cputime_zero;
@@ -448,7 +443,6 @@ static void cleanup_timers(struct list_head *head,
++head;
list_for_each_entry_safe(timer, next, head, entry) {
- timer->task = NULL;
list_del_init(&timer->entry);
if (timer->expires.sched < sched_time) {
timer->expires.sched = 0;
@@ -492,6 +486,9 @@ static void process_timer_rebalance(struct task_struct *p,
struct task_struct *t = p;
unsigned int nthreads = atomic_read(&p->signal->live);
+ if (!nthreads)
+ return;
+
switch (clock_idx) {
default:
BUG();
@@ -500,7 +497,7 @@ static void process_timer_rebalance(struct task_struct *p,
left = cputime_div(cputime_sub(expires.cpu, val.cpu),
nthreads);
do {
- if (!unlikely(t->exit_state)) {
+ if (!unlikely(t->flags & PF_EXITING)) {
ticks = cputime_add(prof_ticks(t), left);
if (cputime_eq(t->it_prof_expires,
cputime_zero) ||
@@ -515,7 +512,7 @@ static void process_timer_rebalance(struct task_struct *p,
left = cputime_div(cputime_sub(expires.cpu, val.cpu),
nthreads);
do {
- if (!unlikely(t->exit_state)) {
+ if (!unlikely(t->flags & PF_EXITING)) {
ticks = cputime_add(virt_ticks(t), left);
if (cputime_eq(t->it_virt_expires,
cputime_zero) ||
@@ -530,7 +527,7 @@ static void process_timer_rebalance(struct task_struct *p,
nsleft = expires.sched - val.sched;
do_div(nsleft, nthreads);
do {
- if (!unlikely(t->exit_state)) {
+ if (!unlikely(t->flags & PF_EXITING)) {
ns = t->sched_time + nsleft;
if (t->it_sched_expires == 0 ||
t->it_sched_expires > ns) {
@@ -569,6 +566,9 @@ static void arm_timer(struct k_itimer *timer, union cpu_time_count now)
struct cpu_timer_list *next;
unsigned long i;
+ if (CPUCLOCK_PERTHREAD(timer->it_clock) && (p->flags & PF_EXITING))
+ return;
+
head = (CPUCLOCK_PERTHREAD(timer->it_clock) ?
p->cpu_timers : p->signal->cpu_timers);
head += CPUCLOCK_WHICH(timer->it_clock);
@@ -579,17 +579,15 @@ static void arm_timer(struct k_itimer *timer, union cpu_time_count now)
listpos = head;
if (CPUCLOCK_WHICH(timer->it_clock) == CPUCLOCK_SCHED) {
list_for_each_entry(next, head, entry) {
- if (next->expires.sched > nt->expires.sched) {
- listpos = &next->entry;
+ if (next->expires.sched > nt->expires.sched)
break;
- }
+ listpos = &next->entry;
}
} else {
list_for_each_entry(next, head, entry) {
- if (cputime_gt(next->expires.cpu, nt->expires.cpu)) {
- listpos = &next->entry;
+ if (cputime_gt(next->expires.cpu, nt->expires.cpu))
break;
- }
+ listpos = &next->entry;
}
}
list_add(&nt->entry, listpos);
@@ -733,9 +731,15 @@ int posix_cpu_timer_set(struct k_itimer *timer, int flags,
* Disarm any old timer after extracting its expiry time.
*/
BUG_ON(!irqs_disabled());
+
+ ret = 0;
spin_lock(&p->sighand->siglock);
old_expires = timer->it.cpu.expires;
- list_del_init(&timer->it.cpu.entry);
+ if (unlikely(timer->it.cpu.firing)) {
+ timer->it.cpu.firing = -1;
+ ret = TIMER_RETRY;
+ } else
+ list_del_init(&timer->it.cpu.entry);
spin_unlock(&p->sighand->siglock);
/*
@@ -783,7 +787,7 @@ int posix_cpu_timer_set(struct k_itimer *timer, int flags,
}
}
- if (unlikely(timer->it.cpu.firing)) {
+ if (unlikely(ret)) {
/*
* We are colliding with the timer actually firing.
* Punt after filling in the timer's old value, and
@@ -791,8 +795,6 @@ int posix_cpu_timer_set(struct k_itimer *timer, int flags,
* it as an overrun (thanks to bump_cpu_timer above).
*/
read_unlock(&tasklist_lock);
- timer->it.cpu.firing = -1;
- ret = TIMER_RETRY;
goto out;
}
@@ -958,14 +960,16 @@ void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp)
static void check_thread_timers(struct task_struct *tsk,
struct list_head *firing)
{
+ int maxfire;
struct list_head *timers = tsk->cpu_timers;
+ maxfire = 20;
tsk->it_prof_expires = cputime_zero;
while (!list_empty(timers)) {
struct cpu_timer_list *t = list_entry(timers->next,
struct cpu_timer_list,
entry);
- if (cputime_lt(prof_ticks(tsk), t->expires.cpu)) {
+ if (!--maxfire || cputime_lt(prof_ticks(tsk), t->expires.cpu)) {
tsk->it_prof_expires = t->expires.cpu;
break;
}
@@ -974,12 +978,13 @@ static void check_thread_timers(struct task_struct *tsk,
}
++timers;
+ maxfire = 20;
tsk->it_virt_expires = cputime_zero;
while (!list_empty(timers)) {
struct cpu_timer_list *t = list_entry(timers->next,
struct cpu_timer_list,
entry);
- if (cputime_lt(virt_ticks(tsk), t->expires.cpu)) {
+ if (!--maxfire || cputime_lt(virt_ticks(tsk), t->expires.cpu)) {
tsk->it_virt_expires = t->expires.cpu;
break;
}
@@ -988,12 +993,13 @@ static void check_thread_timers(struct task_struct *tsk,
}
++timers;
+ maxfire = 20;
tsk->it_sched_expires = 0;
while (!list_empty(timers)) {
struct cpu_timer_list *t = list_entry(timers->next,
struct cpu_timer_list,
entry);
- if (tsk->sched_time < t->expires.sched) {
+ if (!--maxfire || tsk->sched_time < t->expires.sched) {
tsk->it_sched_expires = t->expires.sched;
break;
}
@@ -1010,6 +1016,7 @@ static void check_thread_timers(struct task_struct *tsk,
static void check_process_timers(struct task_struct *tsk,
struct list_head *firing)
{
+ int maxfire;
struct signal_struct *const sig = tsk->signal;
cputime_t utime, stime, ptime, virt_expires, prof_expires;
unsigned long long sched_time, sched_expires;
@@ -1042,12 +1049,13 @@ static void check_process_timers(struct task_struct *tsk,
} while (t != tsk);
ptime = cputime_add(utime, stime);
+ maxfire = 20;
prof_expires = cputime_zero;
while (!list_empty(timers)) {
struct cpu_timer_list *t = list_entry(timers->next,
struct cpu_timer_list,
entry);
- if (cputime_lt(ptime, t->expires.cpu)) {
+ if (!--maxfire || cputime_lt(ptime, t->expires.cpu)) {
prof_expires = t->expires.cpu;
break;
}
@@ -1056,12 +1064,13 @@ static void check_process_timers(struct task_struct *tsk,
}
++timers;
+ maxfire = 20;
virt_expires = cputime_zero;
while (!list_empty(timers)) {
struct cpu_timer_list *t = list_entry(timers->next,
struct cpu_timer_list,
entry);
- if (cputime_lt(utime, t->expires.cpu)) {
+ if (!--maxfire || cputime_lt(utime, t->expires.cpu)) {
virt_expires = t->expires.cpu;
break;
}
@@ -1070,12 +1079,13 @@ static void check_process_timers(struct task_struct *tsk,
}
++timers;
+ maxfire = 20;
sched_expires = 0;
while (!list_empty(timers)) {
struct cpu_timer_list *t = list_entry(timers->next,
struct cpu_timer_list,
entry);
- if (sched_time < t->expires.sched) {
+ if (!--maxfire || sched_time < t->expires.sched) {
sched_expires = t->expires.sched;
break;
}
@@ -1158,6 +1168,9 @@ static void check_process_timers(struct task_struct *tsk,
unsigned long long sched_left, sched;
const unsigned int nthreads = atomic_read(&sig->live);
+ if (!nthreads)
+ return;
+
prof_left = cputime_sub(prof_expires, utime);
prof_left = cputime_sub(prof_left, stime);
prof_left = cputime_div(prof_left, nthreads);
@@ -1194,7 +1207,7 @@ static void check_process_timers(struct task_struct *tsk,
do {
t = next_thread(t);
- } while (unlikely(t->exit_state));
+ } while (unlikely(t->flags & PF_EXITING));
} while (t != tsk);
}
}
diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
index b7b532acd9f..dda3cda73c7 100644
--- a/kernel/posix-timers.c
+++ b/kernel/posix-timers.c
@@ -1157,7 +1157,7 @@ retry_delete:
}
/*
- * This is called by __exit_signal, only when there are no more
+ * This is called by do_exit or de_thread, only when there are no more
* references to the shared signal_struct.
*/
void exit_itimers(struct signal_struct *sig)
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index 396c7873e80..46a5e5acff9 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -29,7 +29,7 @@ config PM_DEBUG
config SOFTWARE_SUSPEND
bool "Software Suspend"
- depends on PM && SWAP && (X86 || ((FVR || PPC32) && !SMP))
+ depends on PM && SWAP && (X86 && (!SMP || SUSPEND_SMP)) || ((FVR || PPC32) && !SMP)
---help---
Enable the possibility of suspending the machine.
It doesn't need APM.
diff --git a/kernel/power/disk.c b/kernel/power/disk.c
index 2d8bf054d03..761956e813f 100644
--- a/kernel/power/disk.c
+++ b/kernel/power/disk.c
@@ -17,12 +17,12 @@
#include <linux/delay.h>
#include <linux/fs.h>
#include <linux/mount.h>
+#include <linux/pm.h>
#include "power.h"
extern suspend_disk_method_t pm_disk_mode;
-extern struct pm_ops * pm_ops;
extern int swsusp_suspend(void);
extern int swsusp_write(void);
@@ -49,13 +49,11 @@ dev_t swsusp_resume_device;
static void power_down(suspend_disk_method_t mode)
{
- unsigned long flags;
int error = 0;
- local_irq_save(flags);
switch(mode) {
case PM_DISK_PLATFORM:
- device_shutdown();
+ kernel_power_off_prepare();
error = pm_ops->enter(PM_SUSPEND_DISK);
break;
case PM_DISK_SHUTDOWN:
diff --git a/kernel/power/power.h b/kernel/power/power.h
index cd6a3493cc0..6748de23e83 100644
--- a/kernel/power/power.h
+++ b/kernel/power/power.h
@@ -1,7 +1,7 @@
#include <linux/suspend.h>
#include <linux/utsname.h>
-/* With SUSPEND_CONSOLE defined, it suspend looks *really* cool, but
+/* With SUSPEND_CONSOLE defined suspend looks *really* cool, but
we probably do not take enough locks for switching consoles, etc,
so bad things might happen.
*/
@@ -9,6 +9,9 @@
#define SUSPEND_CONSOLE (MAX_NR_CONSOLES-1)
#endif
+#define MAX_PBES ((PAGE_SIZE - sizeof(struct new_utsname) \
+ - 4 - 3*sizeof(unsigned long) - sizeof(int) \
+ - sizeof(void *)) / sizeof(swp_entry_t))
struct swsusp_info {
struct new_utsname uts;
@@ -18,7 +21,7 @@ struct swsusp_info {
unsigned long image_pages;
unsigned long pagedir_pages;
suspend_pagedir_t * suspend_pagedir;
- swp_entry_t pagedir[768];
+ swp_entry_t pagedir[MAX_PBES];
} __attribute__((aligned(PAGE_SIZE)));
diff --git a/kernel/power/swsusp.c b/kernel/power/swsusp.c
index d967e875ee8..10bc5ec496d 100644
--- a/kernel/power/swsusp.c
+++ b/kernel/power/swsusp.c
@@ -363,7 +363,7 @@ static void lock_swapdevices(void)
}
/**
- * write_swap_page - Write one page to a fresh swap location.
+ * write_page - Write one page to a fresh swap location.
* @addr: Address we're writing.
* @loc: Place to store the entry we used.
*
@@ -402,15 +402,14 @@ static int write_page(unsigned long addr, swp_entry_t * loc)
static void data_free(void)
{
swp_entry_t entry;
- int i;
+ struct pbe * p;
- for (i = 0; i < nr_copy_pages; i++) {
- entry = (pagedir_nosave + i)->swap_address;
+ for_each_pbe(p, pagedir_nosave) {
+ entry = p->swap_address;
if (entry.val)
swap_free(entry);
else
break;
- (pagedir_nosave + i)->swap_address = (swp_entry_t){0};
}
}
@@ -863,6 +862,9 @@ static int alloc_image_pages(void)
return 0;
}
+/* Free pages we allocated for suspend. Suspend pages are alocated
+ * before atomic copy, so we need to free them after resume.
+ */
void swsusp_free(void)
{
BUG_ON(PageNosave(virt_to_page(pagedir_save)));
@@ -918,6 +920,7 @@ static int swsusp_alloc(void)
pagedir_nosave = NULL;
nr_copy_pages = calc_nr(nr_copy_pages);
+ nr_copy_pages_check = nr_copy_pages;
pr_debug("suspend: (pages needed: %d + %d free: %d)\n",
nr_copy_pages, PAGES_FOR_IO, nr_free_pages());
@@ -928,6 +931,10 @@ static int swsusp_alloc(void)
if (!enough_swap())
return -ENOSPC;
+ if (MAX_PBES < nr_copy_pages / PBES_PER_PAGE +
+ !!(nr_copy_pages % PBES_PER_PAGE))
+ return -ENOSPC;
+
if (!(pagedir_save = alloc_pagedir(nr_copy_pages))) {
printk(KERN_ERR "suspend: Allocating pagedir failed.\n");
return -ENOMEM;
@@ -940,7 +947,6 @@ static int swsusp_alloc(void)
return error;
}
- nr_copy_pages_check = nr_copy_pages;
return 0;
}
@@ -1089,7 +1095,7 @@ static inline void eat_page(void *page)
*eaten_memory = c;
}
-static unsigned long get_usable_page(unsigned gfp_mask)
+unsigned long get_usable_page(gfp_t gfp_mask)
{
unsigned long m;
@@ -1103,7 +1109,7 @@ static unsigned long get_usable_page(unsigned gfp_mask)
return m;
}
-static void free_eaten_memory(void)
+void free_eaten_memory(void)
{
unsigned long m;
void **c;
@@ -1213,8 +1219,9 @@ static struct pbe * swsusp_pagedir_relocate(struct pbe *pblist)
free_pagedir(pblist);
free_eaten_memory();
pblist = NULL;
- }
- else
+ /* Is this even worth handling? It should never ever happen, and we
+ have just lost user's state, anyway... */
+ } else
printk("swsusp: Relocated %d pages\n", rel);
return pblist;
@@ -1434,9 +1441,9 @@ static int read_pagedir(struct pbe *pblist)
}
if (error)
- free_page((unsigned long)pblist);
-
- BUG_ON(i != swsusp_info.pagedir_pages);
+ free_pagedir(pblist);
+ else
+ BUG_ON(i != swsusp_info.pagedir_pages);
return error;
}
@@ -1474,11 +1481,12 @@ static int read_suspend_image(void)
/* Allocate memory for the image and read the data from swap */
error = check_pagedir(pagedir_nosave);
- free_eaten_memory();
+
if (!error)
error = data_read(pagedir_nosave);
if (error) { /* We fail cleanly */
+ free_eaten_memory();
for_each_pbe (p, pagedir_nosave)
if (p->address) {
free_page(p->address);
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index bef3b6901b7..2559d4b8f23 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -71,7 +71,7 @@ DEFINE_PER_CPU(struct rcu_data, rcu_bh_data) = { 0L };
/* Fake initialization required by compiler */
static DEFINE_PER_CPU(struct tasklet_struct, rcu_tasklet) = {NULL};
-static int maxbatch = 10;
+static int maxbatch = 10000;
#ifndef __HAVE_ARCH_CMPXCHG
/*
@@ -109,6 +109,10 @@ void fastcall call_rcu(struct rcu_head *head,
rdp = &__get_cpu_var(rcu_data);
*rdp->nxttail = head;
rdp->nxttail = &head->next;
+
+ if (unlikely(++rdp->count > 10000))
+ set_need_resched();
+
local_irq_restore(flags);
}
@@ -140,6 +144,12 @@ void fastcall call_rcu_bh(struct rcu_head *head,
rdp = &__get_cpu_var(rcu_bh_data);
*rdp->nxttail = head;
rdp->nxttail = &head->next;
+ rdp->count++;
+/*
+ * Should we directly call rcu_do_batch() here ?
+ * if (unlikely(rdp->count > 10000))
+ * rcu_do_batch(rdp);
+ */
local_irq_restore(flags);
}
@@ -157,6 +167,7 @@ static void rcu_do_batch(struct rcu_data *rdp)
next = rdp->donelist = list->next;
list->func(list);
list = next;
+ rdp->count--;
if (++count >= maxbatch)
break;
}
diff --git a/kernel/sched.c b/kernel/sched.c
index 1f31a528fdb..1e5cafdf4e2 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3879,6 +3879,7 @@ EXPORT_SYMBOL(cpu_present_map);
#ifndef CONFIG_SMP
cpumask_t cpu_online_map = CPU_MASK_ALL;
+EXPORT_SYMBOL_GPL(cpu_online_map);
cpumask_t cpu_possible_map = CPU_MASK_ALL;
#endif
diff --git a/kernel/signal.c b/kernel/signal.c
index b92c3c9f8b9..f2b96b08fb4 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -262,7 +262,7 @@ next_signal(struct sigpending *pending, sigset_t *mask)
return sig;
}
-static struct sigqueue *__sigqueue_alloc(struct task_struct *t, unsigned int __nocast flags,
+static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
int override_rlimit)
{
struct sigqueue *q = NULL;
@@ -397,20 +397,8 @@ void __exit_signal(struct task_struct *tsk)
flush_sigqueue(&tsk->pending);
if (sig) {
/*
- * We are cleaning up the signal_struct here. We delayed
- * calling exit_itimers until after flush_sigqueue, just in
- * case our thread-local pending queue contained a queued
- * timer signal that would have been cleared in
- * exit_itimers. When that called sigqueue_free, it would
- * attempt to re-take the tasklist_lock and deadlock. This
- * can never happen if we ensure that all queues the
- * timer's signal might be queued on have been flushed
- * first. The shared_pending queue, and our own pending
- * queue are the only queues the timer could be on, since
- * there are no other threads left in the group and timer
- * signals are constrained to threads inside the group.
+ * We are cleaning up the signal_struct here.
*/
- exit_itimers(sig);
exit_thread_group_keys(sig);
kmem_cache_free(signal_cachep, sig);
}
@@ -578,7 +566,8 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
* is to alert stop-signal processing code when another
* processor has come along and cleared the flag.
*/
- tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
+ if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT))
+ tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
}
if ( signr &&
((info->si_code & __SI_MASK) == __SI_TIMER) &&
@@ -936,34 +925,31 @@ force_sig_specific(int sig, struct task_struct *t)
* as soon as they're available, so putting the signal on the shared queue
* will be equivalent to sending it to one such thread.
*/
-#define wants_signal(sig, p, mask) \
- (!sigismember(&(p)->blocked, sig) \
- && !((p)->state & mask) \
- && !((p)->flags & PF_EXITING) \
- && (task_curr(p) || !signal_pending(p)))
-
+static inline int wants_signal(int sig, struct task_struct *p)
+{
+ if (sigismember(&p->blocked, sig))
+ return 0;
+ if (p->flags & PF_EXITING)
+ return 0;
+ if (sig == SIGKILL)
+ return 1;
+ if (p->state & (TASK_STOPPED | TASK_TRACED))
+ return 0;
+ return task_curr(p) || !signal_pending(p);
+}
static void
__group_complete_signal(int sig, struct task_struct *p)
{
- unsigned int mask;
struct task_struct *t;
/*
- * Don't bother traced and stopped tasks (but
- * SIGKILL will punch through that).
- */
- mask = TASK_STOPPED | TASK_TRACED;
- if (sig == SIGKILL)
- mask = 0;
-
- /*
* Now find a thread we can wake up to take the signal off the queue.
*
* If the main thread wants the signal, it gets first crack.
* Probably the least surprising to the average bear.
*/
- if (wants_signal(sig, p, mask))
+ if (wants_signal(sig, p))
t = p;
else if (thread_group_empty(p))
/*
@@ -981,7 +967,7 @@ __group_complete_signal(int sig, struct task_struct *p)
t = p->signal->curr_target = p;
BUG_ON(t->tgid != p->tgid);
- while (!wants_signal(sig, t, mask)) {
+ while (!wants_signal(sig, t)) {
t = next_thread(t);
if (t == p->signal->curr_target)
/*
@@ -1195,6 +1181,40 @@ kill_proc_info(int sig, struct siginfo *info, pid_t pid)
return error;
}
+/* like kill_proc_info(), but doesn't use uid/euid of "current" */
+int kill_proc_info_as_uid(int sig, struct siginfo *info, pid_t pid,
+ uid_t uid, uid_t euid)
+{
+ int ret = -EINVAL;
+ struct task_struct *p;
+
+ if (!valid_signal(sig))
+ return ret;
+
+ read_lock(&tasklist_lock);
+ p = find_task_by_pid(pid);
+ if (!p) {
+ ret = -ESRCH;
+ goto out_unlock;
+ }
+ if ((!info || ((unsigned long)info != 1 &&
+ (unsigned long)info != 2 && SI_FROMUSER(info)))
+ && (euid != p->suid) && (euid != p->uid)
+ && (uid != p->suid) && (uid != p->uid)) {
+ ret = -EPERM;
+ goto out_unlock;
+ }
+ if (sig && p->sighand) {
+ unsigned long flags;
+ spin_lock_irqsave(&p->sighand->siglock, flags);
+ ret = __group_send_sig_info(sig, info, p);
+ spin_unlock_irqrestore(&p->sighand->siglock, flags);
+ }
+out_unlock:
+ read_unlock(&tasklist_lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(kill_proc_info_as_uid);
/*
* kill_something_info() interprets pid in interesting ways just like kill(2).
@@ -1766,7 +1786,8 @@ do_signal_stop(int signr)
* stop is always done with the siglock held,
* so this check has no races.
*/
- if (t->state < TASK_STOPPED) {
+ if (!t->exit_state &&
+ !(t->state & (TASK_STOPPED|TASK_TRACED))) {
stop_count++;
signal_wake_up(t, 0);
}
diff --git a/kernel/sys.c b/kernel/sys.c
index f723522e698..2fa1ed18123 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -361,17 +361,35 @@ out_unlock:
return retval;
}
+/**
+ * emergency_restart - reboot the system
+ *
+ * Without shutting down any hardware or taking any locks
+ * reboot the system. This is called when we know we are in
+ * trouble so this is our best effort to reboot. This is
+ * safe to call in interrupt context.
+ */
void emergency_restart(void)
{
machine_emergency_restart();
}
EXPORT_SYMBOL_GPL(emergency_restart);
-void kernel_restart(char *cmd)
+/**
+ * kernel_restart - reboot the system
+ *
+ * Shutdown everything and perform a clean reboot.
+ * This is not safe to call in interrupt context.
+ */
+void kernel_restart_prepare(char *cmd)
{
notifier_call_chain(&reboot_notifier_list, SYS_RESTART, cmd);
system_state = SYSTEM_RESTART;
device_shutdown();
+}
+void kernel_restart(char *cmd)
+{
+ kernel_restart_prepare(cmd);
if (!cmd) {
printk(KERN_EMERG "Restarting system.\n");
} else {
@@ -382,6 +400,12 @@ void kernel_restart(char *cmd)
}
EXPORT_SYMBOL_GPL(kernel_restart);
+/**
+ * kernel_kexec - reboot the system
+ *
+ * Move into place and start executing a preloaded standalone
+ * executable. If nothing was preloaded return an error.
+ */
void kernel_kexec(void)
{
#ifdef CONFIG_KEXEC
@@ -390,9 +414,7 @@ void kernel_kexec(void)
if (!image) {
return;
}
- notifier_call_chain(&reboot_notifier_list, SYS_RESTART, NULL);
- system_state = SYSTEM_RESTART;
- device_shutdown();
+ kernel_restart_prepare(NULL);
printk(KERN_EMERG "Starting new kernel\n");
machine_shutdown();
machine_kexec(image);
@@ -400,21 +422,39 @@ void kernel_kexec(void)
}
EXPORT_SYMBOL_GPL(kernel_kexec);
-void kernel_halt(void)
+/**
+ * kernel_halt - halt the system
+ *
+ * Shutdown everything and perform a clean system halt.
+ */
+void kernel_halt_prepare(void)
{
notifier_call_chain(&reboot_notifier_list, SYS_HALT, NULL);
system_state = SYSTEM_HALT;
device_shutdown();
+}
+void kernel_halt(void)
+{
+ kernel_halt_prepare();
printk(KERN_EMERG "System halted.\n");
machine_halt();
}
EXPORT_SYMBOL_GPL(kernel_halt);
-void kernel_power_off(void)
+/**
+ * kernel_power_off - power_off the system
+ *
+ * Shutdown everything and perform a clean system power_off.
+ */
+void kernel_power_off_prepare(void)
{
notifier_call_chain(&reboot_notifier_list, SYS_POWER_OFF, NULL);
system_state = SYSTEM_POWER_OFF;
device_shutdown();
+}
+void kernel_power_off(void)
+{
+ kernel_power_off_prepare();
printk(KERN_EMERG "Power down.\n");
machine_power_off();
}
diff --git a/kernel/time.c b/kernel/time.c
index dd5ae1162a8..40c2410ac99 100644
--- a/kernel/time.c
+++ b/kernel/time.c
@@ -570,6 +570,7 @@ void getnstimeofday(struct timespec *tv)
tv->tv_sec = x.tv_sec;
tv->tv_nsec = x.tv_usec * NSEC_PER_USEC;
}
+EXPORT_SYMBOL_GPL(getnstimeofday);
#endif
#if (BITS_PER_LONG < 64)