aboutsummaryrefslogtreecommitdiff
path: root/arch/mips/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/kernel')
-rw-r--r--arch/mips/kernel/8250-platform.c47
-rw-r--r--arch/mips/kernel/Makefile19
-rw-r--r--arch/mips/kernel/cpu-probe.c30
-rw-r--r--arch/mips/kernel/entry.S2
-rw-r--r--arch/mips/kernel/genex.S2
-rw-r--r--arch/mips/kernel/head.S10
-rw-r--r--arch/mips/kernel/irq-mv6434x.c111
-rw-r--r--arch/mips/kernel/mips-mt-fpaff.c176
-rw-r--r--arch/mips/kernel/mips-mt.c205
-rw-r--r--arch/mips/kernel/pcspeaker.c (renamed from arch/mips/kernel/i8253.c)0
-rw-r--r--arch/mips/kernel/proc.c2
-rw-r--r--arch/mips/kernel/process.c4
-rw-r--r--arch/mips/kernel/r4k_switch.S7
-rw-r--r--arch/mips/kernel/setup.c16
-rw-r--r--arch/mips/kernel/smp.c2
-rw-r--r--arch/mips/kernel/smtc.c2
-rw-r--r--arch/mips/kernel/syscall.c5
-rw-r--r--arch/mips/kernel/traps.c79
-rw-r--r--arch/mips/kernel/unaligned.c41
19 files changed, 400 insertions, 360 deletions
diff --git a/arch/mips/kernel/8250-platform.c b/arch/mips/kernel/8250-platform.c
new file mode 100644
index 00000000000..cbf3fe20ad1
--- /dev/null
+++ b/arch/mips/kernel/8250-platform.c
@@ -0,0 +1,47 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2007 Ralf Baechle (ralf@linux-mips.org)
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/serial_8250.h>
+
+#define PORT(base, int) \
+{ \
+ .iobase = base, \
+ .irq = int, \
+ .uartclk = 1843200, \
+ .iotype = UPIO_PORT, \
+ .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST, \
+ .regshift = 0, \
+}
+
+static struct plat_serial8250_port uart8250_data[] = {
+ PORT(0x3F8, 4),
+ PORT(0x2F8, 3),
+ PORT(0x3E8, 4),
+ PORT(0x2E8, 3),
+ { },
+};
+
+static struct platform_device uart8250_device = {
+ .name = "serial8250",
+ .id = PLAT8250_DEV_PLATFORM,
+ .dev = {
+ .platform_data = uart8250_data,
+ },
+};
+
+static int __init uart8250_init(void)
+{
+ return platform_device_register(&uart8250_device);
+}
+
+module_init(uart8250_init);
+
+MODULE_AUTHOR("Ralf Baechle <ralf@linux-mips.org>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Generic 8250 UART probe driver");
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index 49246264cc7..5c8085b6d7a 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -14,14 +14,15 @@ binfmt_irix-objs := irixelf.o irixinv.o irixioctl.o irixsig.o \
obj-$(CONFIG_STACKTRACE) += stacktrace.o
obj-$(CONFIG_MODULES) += mips_ksyms.o module.o
+obj-$(CONFIG_CPU_LOONGSON2) += r4k_fpu.o r4k_switch.o
+obj-$(CONFIG_CPU_MIPS32) += r4k_fpu.o r4k_switch.o
+obj-$(CONFIG_CPU_MIPS64) += r4k_fpu.o r4k_switch.o
obj-$(CONFIG_CPU_R3000) += r2300_fpu.o r2300_switch.o
-obj-$(CONFIG_CPU_TX39XX) += r2300_fpu.o r2300_switch.o
-obj-$(CONFIG_CPU_TX49XX) += r4k_fpu.o r4k_switch.o
obj-$(CONFIG_CPU_R4000) += r4k_fpu.o r4k_switch.o
-obj-$(CONFIG_CPU_VR41XX) += r4k_fpu.o r4k_switch.o
obj-$(CONFIG_CPU_R4300) += r4k_fpu.o r4k_switch.o
obj-$(CONFIG_CPU_R4X00) += r4k_fpu.o r4k_switch.o
obj-$(CONFIG_CPU_R5000) += r4k_fpu.o r4k_switch.o
+obj-$(CONFIG_CPU_R6000) += r6000_fpu.o r4k_switch.o
obj-$(CONFIG_CPU_R5432) += r4k_fpu.o r4k_switch.o
obj-$(CONFIG_CPU_R8000) += r4k_fpu.o r4k_switch.o
obj-$(CONFIG_CPU_RM7000) += r4k_fpu.o r4k_switch.o
@@ -29,13 +30,14 @@ obj-$(CONFIG_CPU_RM9000) += r4k_fpu.o r4k_switch.o
obj-$(CONFIG_CPU_NEVADA) += r4k_fpu.o r4k_switch.o
obj-$(CONFIG_CPU_R10000) += r4k_fpu.o r4k_switch.o
obj-$(CONFIG_CPU_SB1) += r4k_fpu.o r4k_switch.o
-obj-$(CONFIG_CPU_MIPS32) += r4k_fpu.o r4k_switch.o
-obj-$(CONFIG_CPU_MIPS64) += r4k_fpu.o r4k_switch.o
-obj-$(CONFIG_CPU_R6000) += r6000_fpu.o r4k_switch.o
+obj-$(CONFIG_CPU_TX39XX) += r2300_fpu.o r2300_switch.o
+obj-$(CONFIG_CPU_TX49XX) += r4k_fpu.o r4k_switch.o
+obj-$(CONFIG_CPU_VR41XX) += r4k_fpu.o r4k_switch.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_MIPS_MT) += mips-mt.o
+obj-$(CONFIG_MIPS_MT_FPAFF) += mips-mt-fpaff.o
obj-$(CONFIG_MIPS_MT_SMTC) += smtc.o smtc-asm.o smtc-proc.o
obj-$(CONFIG_MIPS_MT_SMP) += smp-mt.o
@@ -47,7 +49,6 @@ obj-$(CONFIG_I8259) += i8259.o
obj-$(CONFIG_IRQ_CPU) += irq_cpu.o
obj-$(CONFIG_IRQ_CPU_RM7K) += irq-rm7000.o
obj-$(CONFIG_IRQ_CPU_RM9K) += irq-rm9000.o
-obj-$(CONFIG_IRQ_MV64340) += irq-mv6434x.o
obj-$(CONFIG_MIPS_BOARDS_GEN) += irq-msc01.o
obj-$(CONFIG_32BIT) += scall32-o32.o
@@ -62,9 +63,11 @@ obj-$(CONFIG_PROC_FS) += proc.o
obj-$(CONFIG_64BIT) += cpu-bugs64.o
-obj-$(CONFIG_I8253) += i8253.o
+obj-$(CONFIG_PCSPEAKER) += pcspeaker.o
obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
CFLAGS_cpu-bugs64.o = $(shell if $(CC) $(CFLAGS) -Wa,-mdaddi -c -o /dev/null -xc /dev/null >/dev/null 2>&1; then echo "-DHAVE_AS_SET_DADDI"; fi)
+
+obj-$(CONFIG_HAVE_STD_PC_SERIAL_PORT) += 8250-platform.o
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index b12eeee0e97..c6b8b074a81 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -186,9 +186,29 @@ static inline void check_wait(void)
}
}
+static inline void check_errata(void)
+{
+ struct cpuinfo_mips *c = &current_cpu_data;
+
+ switch (c->cputype) {
+ case CPU_34K:
+ /*
+ * Erratum "RPS May Cause Incorrect Instruction Execution"
+ * This code only handles VPE0, any SMP/SMTC/RTOS code
+ * making use of VPE1 will be responsable for that VPE.
+ */
+ if ((c->processor_id & PRID_REV_MASK) <= PRID_REV_34K_V1_0_2)
+ write_c0_config7(read_c0_config7() | MIPS_CONF7_RPS);
+ break;
+ default:
+ break;
+ }
+}
+
void __init check_bugs32(void)
{
check_wait();
+ check_errata();
}
/*
@@ -485,6 +505,14 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c)
MIPS_CPU_LLSC;
c->tlbsize = 64;
break;
+ case PRID_IMP_LOONGSON2:
+ c->cputype = CPU_LOONGSON2;
+ c->isa_level = MIPS_CPU_ISA_III;
+ c->options = R4K_OPTS |
+ MIPS_CPU_FPU | MIPS_CPU_LLSC |
+ MIPS_CPU_32FPR;
+ c->tlbsize = 64;
+ break;
}
}
@@ -588,6 +616,8 @@ static inline unsigned int decode_config3(struct cpuinfo_mips *c)
c->options |= MIPS_CPU_VEIC;
if (config3 & MIPS_CONF3_MT)
c->ases |= MIPS_ASE_MIPSMT;
+ if (config3 & MIPS_CONF3_ULRI)
+ c->options |= MIPS_CPU_ULRI;
return config3 & MIPS_CONF_M;
}
diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S
index 686249c5c32..e29598ae939 100644
--- a/arch/mips/kernel/entry.S
+++ b/arch/mips/kernel/entry.S
@@ -84,6 +84,7 @@ FEXPORT(restore_all) # restore full frame
LONG_S sp, TI_REGS($28)
jal deferred_smtc_ipi
LONG_S s0, TI_REGS($28)
+#ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP
/* Re-arm any temporarily masked interrupts not explicitly "acked" */
mfc0 v0, CP0_TCSTATUS
ori v1, v0, TCSTATUS_IXMT
@@ -110,6 +111,7 @@ FEXPORT(restore_all) # restore full frame
_ehb
xor t0, t0, t3
mtc0 t0, CP0_TCCONTEXT
+#endif /* CONFIG_MIPS_MT_SMTC_IM_BACKSTOP */
#endif /* CONFIG_MIPS_MT_SMTC */
.set noat
RESTORE_TEMP
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S
index 297bd56c234..c0f19d638b9 100644
--- a/arch/mips/kernel/genex.S
+++ b/arch/mips/kernel/genex.S
@@ -243,9 +243,11 @@ NESTED(except_vec_vi_handler, 0, sp)
*/
mfc0 t1, CP0_STATUS
and t0, a0, t1
+#ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP
mfc0 t2, CP0_TCCONTEXT
or t0, t0, t2
mtc0 t0, CP0_TCCONTEXT
+#endif /* CONFIG_MIPS_MT_SMTC_IM_BACKSTOP */
xor t1, t1, t0
mtc0 t1, CP0_STATUS
_ehb
diff --git a/arch/mips/kernel/head.S b/arch/mips/kernel/head.S
index 6f57ca44291..f78538eceef 100644
--- a/arch/mips/kernel/head.S
+++ b/arch/mips/kernel/head.S
@@ -16,6 +16,7 @@
#include <linux/init.h>
#include <linux/threads.h>
+#include <asm/addrspace.h>
#include <asm/asm.h>
#include <asm/asmmacro.h>
#include <asm/irqflags.h>
@@ -129,24 +130,25 @@
#endif
.endm
+#ifndef CONFIG_NO_EXCEPT_FILL
/*
* Reserved space for exception handlers.
* Necessary for machines which link their kernels at KSEG0.
*/
.fill 0x400
+#endif
EXPORT(stext) # used for profiling
EXPORT(_stext)
-#ifdef CONFIG_MIPS_SIM
+#ifdef CONFIG_BOOT_RAW
/*
* Give us a fighting chance of running if execution beings at the
* kernel load address. This is needed because this platform does
* not have a ELF loader yet.
*/
- j kernel_entry
-#endif
__INIT
+#endif
NESTED(kernel_entry, 16, sp) # kernel entry point
@@ -197,9 +199,7 @@ NESTED(kernel_entry, 16, sp) # kernel entry point
j start_kernel
END(kernel_entry)
-#ifdef CONFIG_QEMU
__INIT
-#endif
#ifdef CONFIG_SMP
/*
diff --git a/arch/mips/kernel/irq-mv6434x.c b/arch/mips/kernel/irq-mv6434x.c
deleted file mode 100644
index 3dd561832e4..00000000000
--- a/arch/mips/kernel/irq-mv6434x.c
+++ /dev/null
@@ -1,111 +0,0 @@
-/*
- * Copyright 2002 Momentum Computer
- * Author: mdharm@momenco.com
- * Copyright (C) 2004, 06 Ralf Baechle <ralf@linux-mips.org>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/kernel.h>
-#include <linux/kernel_stat.h>
-#include <linux/mv643xx.h>
-#include <linux/sched.h>
-
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/marvell.h>
-
-static unsigned int irq_base;
-
-static inline int ls1bit32(unsigned int x)
-{
- int b = 31, s;
-
- s = 16; if (x << 16 == 0) s = 0; b -= s; x <<= s;
- s = 8; if (x << 8 == 0) s = 0; b -= s; x <<= s;
- s = 4; if (x << 4 == 0) s = 0; b -= s; x <<= s;
- s = 2; if (x << 2 == 0) s = 0; b -= s; x <<= s;
- s = 1; if (x << 1 == 0) s = 0; b -= s;
-
- return b;
-}
-
-/* mask off an interrupt -- 1 is enable, 0 is disable */
-static inline void mask_mv64340_irq(unsigned int irq)
-{
- uint32_t value;
-
- if (irq < (irq_base + 32)) {
- value = MV_READ(MV64340_INTERRUPT0_MASK_0_LOW);
- value &= ~(1 << (irq - irq_base));
- MV_WRITE(MV64340_INTERRUPT0_MASK_0_LOW, value);
- } else {
- value = MV_READ(MV64340_INTERRUPT0_MASK_0_HIGH);
- value &= ~(1 << (irq - irq_base - 32));
- MV_WRITE(MV64340_INTERRUPT0_MASK_0_HIGH, value);
- }
-}
-
-/* unmask an interrupt -- 1 is enable, 0 is disable */
-static inline void unmask_mv64340_irq(unsigned int irq)
-{
- uint32_t value;
-
- if (irq < (irq_base + 32)) {
- value = MV_READ(MV64340_INTERRUPT0_MASK_0_LOW);
- value |= 1 << (irq - irq_base);
- MV_WRITE(MV64340_INTERRUPT0_MASK_0_LOW, value);
- } else {
- value = MV_READ(MV64340_INTERRUPT0_MASK_0_HIGH);
- value |= 1 << (irq - irq_base - 32);
- MV_WRITE(MV64340_INTERRUPT0_MASK_0_HIGH, value);
- }
-}
-
-/*
- * Interrupt handler for interrupts coming from the Marvell chip.
- * It could be built in ethernet ports etc...
- */
-void ll_mv64340_irq(void)
-{
- unsigned int irq_src_low, irq_src_high;
- unsigned int irq_mask_low, irq_mask_high;
-
- /* read the interrupt status registers */
- irq_mask_low = MV_READ(MV64340_INTERRUPT0_MASK_0_LOW);
- irq_mask_high = MV_READ(MV64340_INTERRUPT0_MASK_0_HIGH);
- irq_src_low = MV_READ(MV64340_MAIN_INTERRUPT_CAUSE_LOW);
- irq_src_high = MV_READ(MV64340_MAIN_INTERRUPT_CAUSE_HIGH);
-
- /* mask for just the interrupts we want */
- irq_src_low &= irq_mask_low;
- irq_src_high &= irq_mask_high;
-
- if (irq_src_low)
- do_IRQ(ls1bit32(irq_src_low) + irq_base);
- else
- do_IRQ(ls1bit32(irq_src_high) + irq_base + 32);
-}
-
-struct irq_chip mv64340_irq_type = {
- .name = "MV-64340",
- .ack = mask_mv64340_irq,
- .mask = mask_mv64340_irq,
- .mask_ack = mask_mv64340_irq,
- .unmask = unmask_mv64340_irq,
-};
-
-void __init mv64340_irq_init(unsigned int base)
-{
- int i;
-
- for (i = base; i < base + 64; i++)
- set_irq_chip_and_handler(i, &mv64340_irq_type,
- handle_level_irq);
-
- irq_base = base;
-}
diff --git a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c
new file mode 100644
index 00000000000..ede5d73d652
--- /dev/null
+++ b/arch/mips/kernel/mips-mt-fpaff.c
@@ -0,0 +1,176 @@
+/*
+ * General MIPS MT support routines, usable in AP/SP, SMVP, or SMTC kernels
+ * Copyright (C) 2005 Mips Technologies, Inc
+ */
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/security.h>
+#include <linux/types.h>
+#include <asm/uaccess.h>
+
+/*
+ * CPU mask used to set process affinity for MT VPEs/TCs with FPUs
+ */
+cpumask_t mt_fpu_cpumask;
+
+static int fpaff_threshold = -1;
+unsigned long mt_fpemul_threshold = 0;
+
+/*
+ * Replacement functions for the sys_sched_setaffinity() and
+ * sys_sched_getaffinity() system calls, so that we can integrate
+ * FPU affinity with the user's requested processor affinity.
+ * This code is 98% identical with the sys_sched_setaffinity()
+ * and sys_sched_getaffinity() system calls, and should be
+ * updated when kernel/sched.c changes.
+ */
+
+/*
+ * find_process_by_pid - find a process with a matching PID value.
+ * used in sys_sched_set/getaffinity() in kernel/sched.c, so
+ * cloned here.
+ */
+static inline struct task_struct *find_process_by_pid(pid_t pid)
+{
+ return pid ? find_task_by_pid(pid) : current;
+}
+
+
+/*
+ * mipsmt_sys_sched_setaffinity - set the cpu affinity of a process
+ */
+asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len,
+ unsigned long __user *user_mask_ptr)
+{
+ cpumask_t new_mask;
+ cpumask_t effective_mask;
+ int retval;
+ struct task_struct *p;
+
+ if (len < sizeof(new_mask))
+ return -EINVAL;
+
+ if (copy_from_user(&new_mask, user_mask_ptr, sizeof(new_mask)))
+ return -EFAULT;
+
+ lock_cpu_hotplug();
+ read_lock(&tasklist_lock);
+
+ p = find_process_by_pid(pid);
+ if (!p) {
+ read_unlock(&tasklist_lock);
+ unlock_cpu_hotplug();
+ return -ESRCH;
+ }
+
+ /*
+ * It is not safe to call set_cpus_allowed with the
+ * tasklist_lock held. We will bump the task_struct's
+ * usage count and drop tasklist_lock before invoking
+ * set_cpus_allowed.
+ */
+ get_task_struct(p);
+
+ retval = -EPERM;
+ if ((current->euid != p->euid) && (current->euid != p->uid) &&
+ !capable(CAP_SYS_NICE)) {
+ read_unlock(&tasklist_lock);
+ goto out_unlock;
+ }
+
+ retval = security_task_setscheduler(p, 0, NULL);
+ if (retval)
+ goto out_unlock;
+
+ /* Record new user-specified CPU set for future reference */
+ p->thread.user_cpus_allowed = new_mask;
+
+ /* Unlock the task list */
+ read_unlock(&tasklist_lock);
+
+ /* Compute new global allowed CPU set if necessary */
+ if ((p->thread.mflags & MF_FPUBOUND)
+ && cpus_intersects(new_mask, mt_fpu_cpumask)) {
+ cpus_and(effective_mask, new_mask, mt_fpu_cpumask);
+ retval = set_cpus_allowed(p, effective_mask);
+ } else {
+ p->thread.mflags &= ~MF_FPUBOUND;
+ retval = set_cpus_allowed(p, new_mask);
+ }
+
+
+out_unlock:
+ put_task_struct(p);
+ unlock_cpu_hotplug();
+ return retval;
+}
+
+/*
+ * mipsmt_sys_sched_getaffinity - get the cpu affinity of a process
+ */
+asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len,
+ unsigned long __user *user_mask_ptr)
+{
+ unsigned int real_len;
+ cpumask_t mask;
+ int retval;
+ struct task_struct *p;
+
+ real_len = sizeof(mask);
+ if (len < real_len)
+ return -EINVAL;
+
+ lock_cpu_hotplug();
+ read_lock(&tasklist_lock);
+
+ retval = -ESRCH;
+ p = find_process_by_pid(pid);
+ if (!p)
+ goto out_unlock;
+ retval = security_task_getscheduler(p);
+ if (retval)
+ goto out_unlock;
+
+ cpus_and(mask, p->thread.user_cpus_allowed, cpu_possible_map);
+
+out_unlock:
+ read_unlock(&tasklist_lock);
+ unlock_cpu_hotplug();
+ if (retval)
+ return retval;
+ if (copy_to_user(user_mask_ptr, &mask, real_len))
+ return -EFAULT;
+ return real_len;
+}
+
+
+static int __init fpaff_thresh(char *str)
+{
+ get_option(&str, &fpaff_threshold);
+ return 1;
+}
+__setup("fpaff=", fpaff_thresh);
+
+/*
+ * FPU Use Factor empirically derived from experiments on 34K
+ */
+#define FPUSEFACTOR 333
+
+static __init int mt_fp_affinity_init(void)
+{
+ if (fpaff_threshold >= 0) {
+ mt_fpemul_threshold = fpaff_threshold;
+ } else {
+ mt_fpemul_threshold =
+ (FPUSEFACTOR * (loops_per_jiffy/(500000/HZ))) / HZ;
+ }
+ printk(KERN_DEBUG "FPU Affinity set after %ld emulations\n",
+ mt_fpemul_threshold);
+
+ return 0;
+}
+arch_initcall(mt_fp_affinity_init);
diff --git a/arch/mips/kernel/mips-mt.c b/arch/mips/kernel/mips-mt.c
index ba01800b601..1a7d8923129 100644
--- a/arch/mips/kernel/mips-mt.c
+++ b/arch/mips/kernel/mips-mt.c
@@ -6,7 +6,6 @@
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/sched.h>
-#include <linux/cpumask.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/security.h>
@@ -23,149 +22,6 @@
#include <asm/cacheflush.h>
/*
- * CPU mask used to set process affinity for MT VPEs/TCs with FPUs
- */
-
-cpumask_t mt_fpu_cpumask;
-
-#ifdef CONFIG_MIPS_MT_FPAFF
-
-#include <linux/cpu.h>
-#include <linux/delay.h>
-#include <asm/uaccess.h>
-
-unsigned long mt_fpemul_threshold = 0;
-
-/*
- * Replacement functions for the sys_sched_setaffinity() and
- * sys_sched_getaffinity() system calls, so that we can integrate
- * FPU affinity with the user's requested processor affinity.
- * This code is 98% identical with the sys_sched_setaffinity()
- * and sys_sched_getaffinity() system calls, and should be
- * updated when kernel/sched.c changes.
- */
-
-/*
- * find_process_by_pid - find a process with a matching PID value.
- * used in sys_sched_set/getaffinity() in kernel/sched.c, so
- * cloned here.
- */
-static inline struct task_struct *find_process_by_pid(pid_t pid)
-{
- return pid ? find_task_by_pid(pid) : current;
-}
-
-
-/*
- * mipsmt_sys_sched_setaffinity - set the cpu affinity of a process
- */
-asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len,
- unsigned long __user *user_mask_ptr)
-{
- cpumask_t new_mask;
- cpumask_t effective_mask;
- int retval;
- struct task_struct *p;
-
- if (len < sizeof(new_mask))
- return -EINVAL;
-
- if (copy_from_user(&new_mask, user_mask_ptr, sizeof(new_mask)))
- return -EFAULT;
-
- lock_cpu_hotplug();
- read_lock(&tasklist_lock);
-
- p = find_process_by_pid(pid);
- if (!p) {
- read_unlock(&tasklist_lock);
- unlock_cpu_hotplug();
- return -ESRCH;
- }
-
- /*
- * It is not safe to call set_cpus_allowed with the
- * tasklist_lock held. We will bump the task_struct's
- * usage count and drop tasklist_lock before invoking
- * set_cpus_allowed.
- */
- get_task_struct(p);
-
- retval = -EPERM;
- if ((current->euid != p->euid) && (current->euid != p->uid) &&
- !capable(CAP_SYS_NICE)) {
- read_unlock(&tasklist_lock);
- goto out_unlock;
- }
-
- retval = security_task_setscheduler(p, 0, NULL);
- if (retval)
- goto out_unlock;
-
- /* Record new user-specified CPU set for future reference */
- p->thread.user_cpus_allowed = new_mask;
-
- /* Unlock the task list */
- read_unlock(&tasklist_lock);
-
- /* Compute new global allowed CPU set if necessary */
- if( (p->thread.mflags & MF_FPUBOUND)
- && cpus_intersects(new_mask, mt_fpu_cpumask)) {
- cpus_and(effective_mask, new_mask, mt_fpu_cpumask);
- retval = set_cpus_allowed(p, effective_mask);
- } else {
- p->thread.mflags &= ~MF_FPUBOUND;
- retval = set_cpus_allowed(p, new_mask);
- }
-
-
-out_unlock:
- put_task_struct(p);
- unlock_cpu_hotplug();
- return retval;
-}
-
-/*
- * mipsmt_sys_sched_getaffinity - get the cpu affinity of a process
- */
-asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len,
- unsigned long __user *user_mask_ptr)
-{
- unsigned int real_len;
- cpumask_t mask;
- int retval;
- struct task_struct *p;
-
- real_len = sizeof(mask);
- if (len < real_len)
- return -EINVAL;
-
- lock_cpu_hotplug();
- read_lock(&tasklist_lock);
-
- retval = -ESRCH;
- p = find_process_by_pid(pid);
- if (!p)
- goto out_unlock;
- retval = security_task_getscheduler(p);
- if (retval)
- goto out_unlock;
-
- cpus_and(mask, p->thread.user_cpus_allowed, cpu_possible_map);
-
-out_unlock:
- read_unlock(&tasklist_lock);
- unlock_cpu_hotplug();
- if (retval)
- return retval;
- if (copy_to_user(user_mask_ptr, &mask, real_len))
- return -EFAULT;
- return real_len;
-}
-
-#endif /* CONFIG_MIPS_MT_FPAFF */
-
-/*
* Dump new MIPS MT state for the core. Does not leave TCs halted.
* Takes an argument which taken to be a pre-call MVPControl value.
*/
@@ -195,27 +51,31 @@ void mips_mt_regdump(unsigned long mvpctl)
nvpe = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
ntc = ((mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
printk("-- per-VPE State --\n");
- for(i = 0; i < nvpe; i++) {
- for(tc = 0; tc < ntc; tc++) {
+ for (i = 0; i < nvpe; i++) {
+ for (tc = 0; tc < ntc; tc++) {
settc(tc);
- if((read_tc_c0_tcbind() & TCBIND_CURVPE) == i) {
- printk(" VPE %d\n", i);
- printk(" VPEControl : %08lx\n", read_vpe_c0_vpecontrol());
- printk(" VPEConf0 : %08lx\n", read_vpe_c0_vpeconf0());
- printk(" VPE%d.Status : %08lx\n",
- i, read_vpe_c0_status());
- printk(" VPE%d.EPC : %08lx\n", i, read_vpe_c0_epc());
- printk(" VPE%d.Cause : %08lx\n", i, read_vpe_c0_cause());
- printk(" VPE%d.Config7 : %08lx\n",
- i, read_vpe_c0_config7());
- break; /* Next VPE */
+ if ((read_tc_c0_tcbind() & TCBIND_CURVPE) == i) {
+ printk(" VPE %d\n", i);
+ printk(" VPEControl : %08lx\n",
+ read_vpe_c0_vpecontrol());
+ printk(" VPEConf0 : %08lx\n",
+ read_vpe_c0_vpeconf0());
+ printk(" VPE%d.Status : %08lx\n",
+ i, read_vpe_c0_status());
+ printk(" VPE%d.EPC : %08lx\n",
+ i, read_vpe_c0_epc());
+ printk(" VPE%d.Cause : %08lx\n",
+ i, read_vpe_c0_cause());
+ printk(" VPE%d.Config7 : %08lx\n",
+ i, read_vpe_c0_config7());
+ break; /* Next VPE */
+ }
}
- }
}
printk("-- per-TC State --\n");
- for(tc = 0; tc < ntc; tc++) {
+ for (tc = 0; tc < ntc; tc++) {
settc(tc);
- if(read_tc_c0_tcbind() == read_c0_tcbind()) {
+ if (read_tc_c0_tcbind() == read_c0_tcbind()) {
/* Are we dumping ourself? */
haltval = 0; /* Then we're not halted, and mustn't be */
tcstatval = flags; /* And pre-dump TCStatus is flags */
@@ -310,17 +170,6 @@ static int __init ndflush(char *s)
return 1;
}
__setup("ndflush=", ndflush);
-#ifdef CONFIG_MIPS_MT_FPAFF
-static int fpaff_threshold = -1;
-
-static int __init fpaff_thresh(char *str)
-{
- get_option(&str, &fpaff_threshold);
- return 1;
-}
-
-__setup("fpaff=", fpaff_thresh);
-#endif /* CONFIG_MIPS_MT_FPAFF */
static unsigned int itc_base = 0;
@@ -376,20 +225,6 @@ void mips_mt_set_cpuoptions(void)
if (mt_n_dflushes != 1)
printk("D-Cache Flushes Repeated %d times\n", mt_n_dflushes);
-#ifdef CONFIG_MIPS_MT_FPAFF
- /* FPU Use Factor empirically derived from experiments on 34K */
-#define FPUSEFACTOR 333
-
- if (fpaff_threshold >= 0) {
- mt_fpemul_threshold = fpaff_threshold;
- } else {
- mt_fpemul_threshold =
- (FPUSEFACTOR * (loops_per_jiffy/(500000/HZ))) / HZ;
- }
- printk("FPU Affinity set after %ld emulations\n",
- mt_fpemul_threshold);
-#endif /* CONFIG_MIPS_MT_FPAFF */
-
if (itc_base != 0) {
/*
* Configure ITC mapping. This code is very
diff --git a/arch/mips/kernel/i8253.c b/arch/mips/kernel/pcspeaker.c
index 475df690421..475df690421 100644
--- a/arch/mips/kernel/i8253.c
+++ b/arch/mips/kernel/pcspeaker.c
diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c
index 5ddc2e9deec..ec04f5a1a5e 100644
--- a/arch/mips/kernel/proc.c
+++ b/arch/mips/kernel/proc.c
@@ -14,7 +14,6 @@
#include <asm/cpu-features.h>
#include <asm/mipsregs.h>
#include <asm/processor.h>
-#include <asm/watch.h>
unsigned int vced_count, vcei_count;
@@ -84,6 +83,7 @@ static const char *cpu_name[] = {
[CPU_VR4181A] = "NEC VR4181A",
[CPU_SR71000] = "Sandcraft SR71000",
[CPU_PR4450] = "Philips PR4450",
+ [CPU_LOONGSON2] = "ICT Loongson-2",
};
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index 6bdfb5a9fa1..8f4cf27c715 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -46,7 +46,7 @@
* power and have a low exit latency (ie sit in a loop waiting for somebody to
* say that they'd like to reschedule)
*/
-ATTRIB_NORET void cpu_idle(void)
+void __noreturn cpu_idle(void)
{
/* endless idle loop with no priority at all */
while (1) {
@@ -213,7 +213,7 @@ int dump_task_fpu (struct task_struct *t, elf_fpregset_t *fpr)
/*
* Create a kernel thread
*/
-static ATTRIB_NORET void kernel_thread_helper(void *arg, int (*fn)(void *))
+static void __noreturn kernel_thread_helper(void *arg, int (*fn)(void *))
{
do_exit(fn(arg));
}
diff --git a/arch/mips/kernel/r4k_switch.S b/arch/mips/kernel/r4k_switch.S
index 06729596812..d9bfae53c43 100644
--- a/arch/mips/kernel/r4k_switch.S
+++ b/arch/mips/kernel/r4k_switch.S
@@ -85,12 +85,7 @@
move $28, a2
cpu_restore_nonscratch a1
-#if (_THREAD_SIZE - 32) < 0x10000
- PTR_ADDIU t0, $28, _THREAD_SIZE - 32
-#else
- PTR_LI t0, _THREAD_SIZE - 32
- PTR_ADDU t0, $28
-#endif
+ PTR_ADDU t0, $28, _THREAD_SIZE - 32
set_saved_sp t0, t1, t2
#ifdef CONFIG_MIPS_MT_SMTC
/* Read-modify-writes of Status must be atomic on a VPE */
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index 4975da0bfb6..316685fca05 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -20,6 +20,7 @@
#include <linux/highmem.h>
#include <linux/console.h>
#include <linux/pfn.h>
+#include <linux/debugfs.h>
#include <asm/addrspace.h>
#include <asm/bootinfo.h>
@@ -574,3 +575,18 @@ __setup("nodsp", dsp_disable);
unsigned long kernelsp[NR_CPUS];
unsigned long fw_arg0, fw_arg1, fw_arg2, fw_arg3;
+
+#ifdef CONFIG_DEBUG_FS
+struct dentry *mips_debugfs_dir;
+static int __init debugfs_mips(void)
+{
+ struct dentry *d;
+
+ d = debugfs_create_dir("mips", NULL);
+ if (IS_ERR(d))
+ return PTR_ERR(d);
+ mips_debugfs_dir = d;
+ return 0;
+}
+arch_initcall(debugfs_mips);
+#endif
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index a1b017f2dbb..be7362bc2c9 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -52,7 +52,7 @@ EXPORT_SYMBOL(phys_cpu_present_map);
EXPORT_SYMBOL(cpu_online_map);
extern void __init calibrate_delay(void);
-extern ATTRIB_NORET void cpu_idle(void);
+extern void cpu_idle(void);
/*
* First C code run on the secondary CPUs after being started up by
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
index 046b03b1705..342d873b2ec 100644
--- a/arch/mips/kernel/smtc.c
+++ b/arch/mips/kernel/smtc.c
@@ -1104,7 +1104,7 @@ void smtc_idle_loop_hook(void)
mtflags = dmt();
pdb_msg = &id_ho_db_msg[0];
im = read_c0_status();
- vpe = cpu_data[smp_processor_id()].vpe_id;
+ vpe = current_cpu_data.vpe_id;
for (bit = 0; bit < 8; bit++) {
/*
* In current prototype, I/O interrupts
diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c
index 9dd5a2df8ea..b947c61c0cc 100644
--- a/arch/mips/kernel/syscall.c
+++ b/arch/mips/kernel/syscall.c
@@ -272,9 +272,8 @@ asmlinkage int sys_set_thread_area(unsigned long addr)
struct thread_info *ti = task_thread_info(current);
ti->tp_value = addr;
-
- /* If some future MIPS implementation has this register in hardware,
- * we will need to update it here (and in context switches). */
+ if (cpu_has_userlocal)
+ write_c0_userlocal(addr);
return 0;
}
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 3ea7863c451..5e9fa83c4ef 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -39,7 +39,6 @@
#include <asm/traps.h>
#include <asm/uaccess.h>
#include <asm/mmu_context.h>
-#include <asm/watch.h>
#include <asm/types.h>
#include <asm/stacktrace.h>
@@ -70,6 +69,7 @@ extern asmlinkage void handle_reserved(void);
extern int fpu_emulator_cop1Handler(struct pt_regs *xcp,
struct mips_fpu_struct *ctx, int has_fpu);
+void (*board_watchpoint_handler)(struct pt_regs *regs);
void (*board_be_init)(void);
int (*board_be_handler)(struct pt_regs *regs, int is_fixup);
void (*board_nmi_handler_setup)(void);
@@ -311,7 +311,7 @@ void show_registers(struct pt_regs *regs)
static DEFINE_SPINLOCK(die_lock);
-NORET_TYPE void ATTRIB_NORET die(const char * str, struct pt_regs * regs)
+void __noreturn die(const char * str, struct pt_regs * regs)
{
static int die_counter;
#ifdef CONFIG_MIPS_MT_SMTC
@@ -373,7 +373,7 @@ asmlinkage void do_be(struct pt_regs *regs)
action = MIPS_BE_FIXUP;
if (board_be_handler)
- action = board_be_handler(regs, fixup != 0);
+ action = board_be_handler(regs, fixup != NULL);
switch (action) {
case MIPS_BE_DISCARD:
@@ -753,6 +753,33 @@ asmlinkage void do_ri(struct pt_regs *regs)
force_sig(SIGILL, current);
}
+/*
+ * MIPS MT processors may have fewer FPU contexts than CPU threads. If we've
+ * emulated more than some threshold number of instructions, force migration to
+ * a "CPU" that has FP support.
+ */
+static void mt_ase_fp_affinity(void)
+{
+#ifdef CONFIG_MIPS_MT_FPAFF
+ if (mt_fpemul_threshold > 0 &&
+ ((current->thread.emulated_fp++ > mt_fpemul_threshold))) {
+ /*
+ * If there's no FPU present, or if the application has already
+ * restricted the allowed set to exclude any CPUs with FPUs,
+ * we'll skip the procedure.
+ */
+ if (cpus_intersects(current->cpus_allowed, mt_fpu_cpumask)) {
+ cpumask_t tmask;
+
+ cpus_and(tmask, current->thread.user_cpus_allowed,
+ mt_fpu_cpumask);
+ set_cpus_allowed(current, tmask);
+ current->thread.mflags |= MF_FPUBOUND;
+ }
+ }
+#endif /* CONFIG_MIPS_MT_FPAFF */
+}
+
asmlinkage void do_cpu(struct pt_regs *regs)
{
unsigned int cpid;
@@ -786,36 +813,8 @@ asmlinkage void do_cpu(struct pt_regs *regs)
&current->thread.fpu, 0);
if (sig)
force_sig(sig, current);
-#ifdef CONFIG_MIPS_MT_FPAFF
- else {
- /*
- * MIPS MT processors may have fewer FPU contexts
- * than CPU threads. If we've emulated more than
- * some threshold number of instructions, force
- * migration to a "CPU" that has FP support.
- */
- if(mt_fpemul_threshold > 0
- && ((current->thread.emulated_fp++
- > mt_fpemul_threshold))) {
- /*
- * If there's no FPU present, or if the
- * application has already restricted
- * the allowed set to exclude any CPUs
- * with FPUs, we'll skip the procedure.
- */
- if (cpus_intersects(current->cpus_allowed,
- mt_fpu_cpumask)) {
- cpumask_t tmask;
-
- cpus_and(tmask,
- current->thread.user_cpus_allowed,
- mt_fpu_cpumask);
- set_cpus_allowed(current, tmask);
- current->thread.mflags |= MF_FPUBOUND;
- }
- }
- }
-#endif /* CONFIG_MIPS_MT_FPAFF */
+ else
+ mt_ase_fp_affinity();
}
return;
@@ -835,6 +834,11 @@ asmlinkage void do_mdmx(struct pt_regs *regs)
asmlinkage void do_watch(struct pt_regs *regs)
{
+ if (board_watchpoint_handler) {
+ (*board_watchpoint_handler)(regs);
+ return;
+ }
+
/*
* We use the watch exception where available to detect stack
* overflows.
@@ -1343,7 +1347,14 @@ void __init per_cpu_trap_init(void)
set_c0_status(ST0_MX);
#ifdef CONFIG_CPU_MIPSR2
- write_c0_hwrena (0x0000000f); /* Allow rdhwr to all registers */
+ if (cpu_has_mips_r2) {
+ unsigned int enable = 0x0000000f;
+
+ if (cpu_has_userlocal)
+ enable |= (1 << 29);
+
+ write_c0_hwrena(enable);
+ }
#endif
#ifdef CONFIG_MIPS_MT_SMTC
diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c
index 18c4a3c45a3..8b9c34ffae1 100644
--- a/arch/mips/kernel/unaligned.c
+++ b/arch/mips/kernel/unaligned.c
@@ -77,6 +77,7 @@
#include <linux/signal.h>
#include <linux/smp.h>
#include <linux/sched.h>
+#include <linux/debugfs.h>
#include <asm/asm.h>
#include <asm/branch.h>
#include <asm/byteorder.h>
@@ -87,9 +88,18 @@
#define STR(x) __STR(x)
#define __STR(x) #x
-#ifdef CONFIG_PROC_FS
-unsigned long unaligned_instructions;
+enum {
+ UNALIGNED_ACTION_QUIET,
+ UNALIGNED_ACTION_SIGNAL,
+ UNALIGNED_ACTION_SHOW,
+};
+#ifdef CONFIG_DEBUG_FS
+static u32 unaligned_instructions;
+static u32 unaligned_action;
+#else
+#define unaligned_action UNALIGNED_ACTION_QUIET
#endif
+extern void show_registers(struct pt_regs *regs);
static inline int emulate_load_store_insn(struct pt_regs *regs,
void __user *addr, unsigned int __user *pc,
@@ -459,7 +469,7 @@ static inline int emulate_load_store_insn(struct pt_regs *regs,
goto sigill;
}
-#ifdef CONFIG_PROC_FS
+#ifdef CONFIG_DEBUG_FS
unaligned_instructions++;
#endif
@@ -516,6 +526,10 @@ asmlinkage void do_ade(struct pt_regs *regs)
pc = (unsigned int __user *) exception_epc(regs);
if (user_mode(regs) && (current->thread.mflags & MF_FIXADE) == 0)
goto sigbus;
+ if (unaligned_action == UNALIGNED_ACTION_SIGNAL)
+ goto sigbus;
+ else if (unaligned_action == UNALIGNED_ACTION_SHOW)
+ show_registers(regs);
/*
* Do branch emulation only if we didn't forward the exception.
@@ -546,3 +560,24 @@ sigbus:
* XXX On return from the signal handler we should advance the epc
*/
}
+
+#ifdef CONFIG_DEBUG_FS
+extern struct dentry *mips_debugfs_dir;
+static int __init debugfs_unaligned(void)
+{
+ struct dentry *d;
+
+ if (!mips_debugfs_dir)
+ return -ENODEV;
+ d = debugfs_create_u32("unaligned_instructions", S_IRUGO,
+ mips_debugfs_dir, &unaligned_instructions);
+ if (IS_ERR(d))
+ return PTR_ERR(d);
+ d = debugfs_create_u32("unaligned_action", S_IRUGO | S_IWUSR,
+ mips_debugfs_dir, &unaligned_action);
+ if (IS_ERR(d))
+ return PTR_ERR(d);
+ return 0;
+}
+__initcall(debugfs_unaligned);
+#endif