aboutsummaryrefslogtreecommitdiff
path: root/arch/blackfin/kernel/process.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/blackfin/kernel/process.c')
-rw-r--r--arch/blackfin/kernel/process.c69
1 files changed, 45 insertions, 24 deletions
diff --git a/arch/blackfin/kernel/process.c b/arch/blackfin/kernel/process.c
index 6b8459c6616..be9fdd00d7c 100644
--- a/arch/blackfin/kernel/process.c
+++ b/arch/blackfin/kernel/process.c
@@ -32,6 +32,8 @@
#include <linux/unistd.h>
#include <linux/user.h>
#include <linux/uaccess.h>
+#include <linux/sched.h>
+#include <linux/tick.h>
#include <linux/fs.h>
#include <linux/err.h>
@@ -69,33 +71,44 @@ EXPORT_SYMBOL(pm_power_off);
* The idle loop on BFIN
*/
#ifdef CONFIG_IDLE_L1
-void default_idle(void)__attribute__((l1_text));
+static void default_idle(void)__attribute__((l1_text));
void cpu_idle(void)__attribute__((l1_text));
#endif
-void default_idle(void)
+/*
+ * This is our default idle handler. We need to disable
+ * interrupts here to ensure we don't miss a wakeup call.
+ */
+static void default_idle(void)
{
- while (!need_resched()) {
- local_irq_disable();
- if (likely(!need_resched()))
- idle_with_irq_disabled();
- local_irq_enable();
- }
-}
+ local_irq_disable();
+ if (!need_resched())
+ idle_with_irq_disabled();
-void (*idle)(void) = default_idle;
+ local_irq_enable();
+}
/*
- * The idle thread. There's no useful work to be
- * done, so just try to conserve power and have a
- * low exit latency (ie sit in a loop waiting for
- * somebody to say that they'd like to reschedule)
+ * The idle thread. We try to conserve power, while trying to keep
+ * overall latency low. The architecture specific idle is passed
+ * a value to indicate the level of "idleness" of the system.
*/
void cpu_idle(void)
{
/* endless idle loop with no priority at all */
while (1) {
- idle();
+ void (*idle)(void) = pm_idle;
+
+#ifdef CONFIG_HOTPLUG_CPU
+ if (cpu_is_offline(smp_processor_id()))
+ cpu_die();
+#endif
+ if (!idle)
+ idle = default_idle;
+ tick_nohz_stop_sched_tick();
+ while (!need_resched())
+ idle();
+ tick_nohz_restart_sched_tick();
preempt_enable_no_resched();
schedule();
preempt_disable();
@@ -189,7 +202,7 @@ copy_thread(int nr, unsigned long clone_flags,
* sys_execve() executes a new program.
*/
-asmlinkage int sys_execve(char *name, char **argv, char **envp)
+asmlinkage int sys_execve(char __user *name, char __user * __user *argv, char __user * __user *envp)
{
int error;
char *filename;
@@ -232,23 +245,25 @@ unsigned long get_wchan(struct task_struct *p)
void finish_atomic_sections (struct pt_regs *regs)
{
+ int __user *up0 = (int __user *)&regs->p0;
+
if (regs->pc < ATOMIC_SEQS_START || regs->pc >= ATOMIC_SEQS_END)
return;
switch (regs->pc) {
case ATOMIC_XCHG32 + 2:
- put_user(regs->r1, (int *)regs->p0);
+ put_user(regs->r1, up0);
regs->pc += 2;
break;
case ATOMIC_CAS32 + 2:
case ATOMIC_CAS32 + 4:
if (regs->r0 == regs->r1)
- put_user(regs->r2, (int *)regs->p0);
+ put_user(regs->r2, up0);
regs->pc = ATOMIC_CAS32 + 8;
break;
case ATOMIC_CAS32 + 6:
- put_user(regs->r2, (int *)regs->p0);
+ put_user(regs->r2, up0);
regs->pc += 2;
break;
@@ -256,7 +271,7 @@ void finish_atomic_sections (struct pt_regs *regs)
regs->r0 = regs->r1 + regs->r0;
/* fall through */
case ATOMIC_ADD32 + 4:
- put_user(regs->r0, (int *)regs->p0);
+ put_user(regs->r0, up0);
regs->pc = ATOMIC_ADD32 + 6;
break;
@@ -264,7 +279,7 @@ void finish_atomic_sections (struct pt_regs *regs)
regs->r0 = regs->r1 - regs->r0;
/* fall through */
case ATOMIC_SUB32 + 4:
- put_user(regs->r0, (int *)regs->p0);
+ put_user(regs->r0, up0);
regs->pc = ATOMIC_SUB32 + 6;
break;
@@ -272,7 +287,7 @@ void finish_atomic_sections (struct pt_regs *regs)
regs->r0 = regs->r1 | regs->r0;
/* fall through */
case ATOMIC_IOR32 + 4:
- put_user(regs->r0, (int *)regs->p0);
+ put_user(regs->r0, up0);
regs->pc = ATOMIC_IOR32 + 6;
break;
@@ -280,7 +295,7 @@ void finish_atomic_sections (struct pt_regs *regs)
regs->r0 = regs->r1 & regs->r0;
/* fall through */
case ATOMIC_AND32 + 4:
- put_user(regs->r0, (int *)regs->p0);
+ put_user(regs->r0, up0);
regs->pc = ATOMIC_AND32 + 6;
break;
@@ -288,7 +303,7 @@ void finish_atomic_sections (struct pt_regs *regs)
regs->r0 = regs->r1 ^ regs->r0;
/* fall through */
case ATOMIC_XOR32 + 4:
- put_user(regs->r0, (int *)regs->p0);
+ put_user(regs->r0, up0);
regs->pc = ATOMIC_XOR32 + 6;
break;
}
@@ -309,6 +324,12 @@ int _access_ok(unsigned long addr, unsigned long size)
return 1;
if (addr >= memory_mtd_end && (addr + size) <= physical_mem_end)
return 1;
+
+#ifdef CONFIG_ROMFS_MTD_FS
+ /* For XIP, allow user space to use pointers within the ROMFS. */
+ if (addr >= memory_mtd_start && (addr + size) <= memory_mtd_end)
+ return 1;
+#endif
#else
if (addr >= memory_start && (addr + size) <= physical_mem_end)
return 1;