aboutsummaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/ia64/include/asm/sections.h3
-rw-r--r--arch/ia64/kernel/head.S9
-rw-r--r--arch/ia64/kernel/vmlinux.lds.S8
-rw-r--r--arch/ia64/mm/contig.c2
-rw-r--r--arch/ia64/mm/discontig.c2
-rw-r--r--arch/mips/Kconfig53
-rw-r--r--arch/mips/kernel/Makefile1
-rw-r--r--arch/mips/kernel/cevt-r4k.c173
-rw-r--r--arch/mips/kernel/cevt-smtc.c321
-rw-r--r--arch/mips/kernel/cpu-probe.c10
-rw-r--r--arch/mips/kernel/entry.S10
-rw-r--r--arch/mips/kernel/genex.S4
-rw-r--r--arch/mips/kernel/head.S1
-rw-r--r--arch/mips/kernel/mips-mt-fpaff.c2
-rw-r--r--arch/mips/kernel/process.c19
-rw-r--r--arch/mips/kernel/ptrace.c2
-rw-r--r--arch/mips/kernel/smtc.c260
-rw-r--r--arch/mips/kernel/traps.c6
-rw-r--r--arch/mips/mti-malta/Makefile2
-rw-r--r--arch/mips/mti-malta/malta-smtc.c9
-rw-r--r--arch/mips/sibyte/swarm/Makefile3
-rw-r--r--arch/mips/sibyte/swarm/platform.c85
-rw-r--r--arch/mn10300/kernel/irq.c71
-rw-r--r--arch/mn10300/unit-asb2303/unit-init.c2
-rw-r--r--arch/mn10300/unit-asb2305/unit-init.c2
-rw-r--r--arch/powerpc/boot/dts/holly.dts106
-rw-r--r--arch/powerpc/kernel/idle.c6
-rw-r--r--arch/powerpc/platforms/fsl_uli1575.c12
-rw-r--r--arch/s390/kernel/time.c2
-rw-r--r--arch/s390/lib/delay.c88
-rw-r--r--arch/x86/boot/compressed/relocs.c2
-rw-r--r--arch/x86/kernel/acpi/boot.c16
-rw-r--r--arch/x86/kernel/cpu/mtrr/main.c2
-rw-r--r--arch/x86/kernel/kgdb.c7
-rw-r--r--arch/x86/kernel/pci-gart_64.c20
-rw-r--r--arch/x86/kernel/vmi_32.c2
36 files changed, 866 insertions, 457 deletions
diff --git a/arch/ia64/include/asm/sections.h b/arch/ia64/include/asm/sections.h
index f6679989103..1a873b36a4a 100644
--- a/arch/ia64/include/asm/sections.h
+++ b/arch/ia64/include/asm/sections.h
@@ -11,6 +11,9 @@
#include <asm-generic/sections.h>
extern char __per_cpu_start[], __per_cpu_end[], __phys_per_cpu_start[];
+#ifdef CONFIG_SMP
+extern char __cpu0_per_cpu[];
+#endif
extern char __start___vtop_patchlist[], __end___vtop_patchlist[];
extern char __start___rse_patchlist[], __end___rse_patchlist[];
extern char __start___mckinley_e9_bundles[], __end___mckinley_e9_bundles[];
diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S
index 8bdea8eb62e..66e491d8baa 100644
--- a/arch/ia64/kernel/head.S
+++ b/arch/ia64/kernel/head.S
@@ -367,16 +367,17 @@ start_ap:
;;
#else
(isAP) br.few 2f
- mov r20=r19
- sub r19=r19,r18
+ movl r20=__cpu0_per_cpu
;;
shr.u r18=r18,3
1:
- ld8 r21=[r20],8;;
- st8[r19]=r21,8
+ ld8 r21=[r19],8;;
+ st8[r20]=r21,8
adds r18=-1,r18;;
cmp4.lt p7,p6=0,r18
(p7) br.cond.dptk.few 1b
+ mov r19=r20
+ ;;
2:
#endif
tpa r19=r19
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
index de71da811cd..10a7d47e851 100644
--- a/arch/ia64/kernel/vmlinux.lds.S
+++ b/arch/ia64/kernel/vmlinux.lds.S
@@ -215,9 +215,6 @@ SECTIONS
/* Per-cpu data: */
percpu : { } :percpu
. = ALIGN(PERCPU_PAGE_SIZE);
-#ifdef CONFIG_SMP
- . = . + PERCPU_PAGE_SIZE; /* cpu0 per-cpu space */
-#endif
__phys_per_cpu_start = .;
.data.percpu PERCPU_ADDR : AT(__phys_per_cpu_start - LOAD_OFFSET)
{
@@ -233,6 +230,11 @@ SECTIONS
data : { } :data
.data : AT(ADDR(.data) - LOAD_OFFSET)
{
+#ifdef CONFIG_SMP
+ . = ALIGN(PERCPU_PAGE_SIZE);
+ __cpu0_per_cpu = .;
+ . = . + PERCPU_PAGE_SIZE; /* cpu0 per-cpu space */
+#endif
DATA_DATA
*(.data1)
*(.gnu.linkonce.d*)
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c
index e566ff43884..0ee085efbe2 100644
--- a/arch/ia64/mm/contig.c
+++ b/arch/ia64/mm/contig.c
@@ -163,7 +163,7 @@ per_cpu_init (void)
* get_zeroed_page().
*/
if (first_time) {
- void *cpu0_data = __phys_per_cpu_start - PERCPU_PAGE_SIZE;
+ void *cpu0_data = __cpu0_per_cpu;
first_time=0;
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index 78026aabaa7..d8c5fcd89e5 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -144,7 +144,7 @@ static void *per_cpu_node_setup(void *cpu_data, int node)
for_each_possible_early_cpu(cpu) {
if (cpu == 0) {
- void *cpu0_data = __phys_per_cpu_start - PERCPU_PAGE_SIZE;
+ void *cpu0_data = __cpu0_per_cpu;
__per_cpu_offset[cpu] = (char*)cpu0_data -
__per_cpu_start;
} else if (node == node_cpuid[cpu].nid) {
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 49896a2a1d7..1e06d233fa8 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -211,6 +211,7 @@ config MIPS_MALTA
select SYS_SUPPORTS_64BIT_KERNEL
select SYS_SUPPORTS_BIG_ENDIAN
select SYS_SUPPORTS_LITTLE_ENDIAN
+ select SYS_SUPPORTS_MIPS_CMP if BROKEN # because SYNC_R4K is broken
select SYS_SUPPORTS_MULTITHREADING
select SYS_SUPPORTS_SMARTMIPS
help
@@ -1403,7 +1404,6 @@ config MIPS_MT_SMTC
depends on CPU_MIPS32_R2
#depends on CPU_MIPS64_R2 # once there is hardware ...
depends on SYS_SUPPORTS_MULTITHREADING
- select GENERIC_CLOCKEVENTS_BROADCAST
select CPU_MIPSR2_IRQ_VI
select CPU_MIPSR2_IRQ_EI
select MIPS_MT
@@ -1451,32 +1451,17 @@ config MIPS_VPE_LOADER
Includes a loader for loading an elf relocatable object
onto another VPE and running it.
-config MIPS_MT_SMTC_INSTANT_REPLAY
- bool "Low-latency Dispatch of Deferred SMTC IPIs"
- depends on MIPS_MT_SMTC && !PREEMPT
- default y
- help
- SMTC pseudo-interrupts between TCs are deferred and queued
- if the target TC is interrupt-inhibited (IXMT). In the first
- SMTC prototypes, these queued IPIs were serviced on return
- to user mode, or on entry into the kernel idle loop. The
- INSTANT_REPLAY option dispatches them as part of local_irq_restore()
- processing, which adds runtime overhead (hence the option to turn
- it off), but ensures that IPIs are handled promptly even under
- heavy I/O interrupt load.
-
config MIPS_MT_SMTC_IM_BACKSTOP
bool "Use per-TC register bits as backstop for inhibited IM bits"
depends on MIPS_MT_SMTC
- default y
+ default n
help
To support multiple TC microthreads acting as "CPUs" within
a VPE, VPE-wide interrupt mask bits must be specially manipulated
during interrupt handling. To support legacy drivers and interrupt
controller management code, SMTC has a "backstop" to track and
if necessary restore the interrupt mask. This has some performance
- impact on interrupt service overhead. Disable it only if you know
- what you are doing.
+ impact on interrupt service overhead.
config MIPS_MT_SMTC_IRQAFF
bool "Support IRQ affinity API"
@@ -1486,10 +1471,8 @@ config MIPS_MT_SMTC_IRQAFF
Enables SMP IRQ affinity API (/proc/irq/*/smp_affinity, etc.)
for SMTC Linux kernel. Requires platform support, of which
an example can be found in the MIPS kernel i8259 and Malta
- platform code. It is recommended that MIPS_MT_SMTC_INSTANT_REPLAY
- be enabled if MIPS_MT_SMTC_IRQAFF is used. Adds overhead to
- interrupt dispatch, and should be used only if you know what
- you are doing.
+ platform code. Adds some overhead to interrupt dispatch, and
+ should be used only if you know what you are doing.
config MIPS_VPE_LOADER_TOM
bool "Load VPE program into memory hidden from linux"
@@ -1517,6 +1500,18 @@ config MIPS_APSP_KSPD
"exit" syscall notifying other kernel modules the SP program is
exiting. You probably want to say yes here.
+config MIPS_CMP
+ bool "MIPS CMP framework support"
+ depends on SYS_SUPPORTS_MIPS_CMP
+ select SYNC_R4K if BROKEN
+ select SYS_SUPPORTS_SMP
+ select SYS_SUPPORTS_SCHED_SMT if SMP
+ select WEAK_ORDERING
+ default n
+ help
+ This is a placeholder option for the GCMP work. It will need to
+ be handled differently...
+
config SB1_PASS_1_WORKAROUNDS
bool
depends on CPU_SB1_PASS_1
@@ -1693,6 +1688,9 @@ config SMP
config SMP_UP
bool
+config SYS_SUPPORTS_MIPS_CMP
+ bool
+
config SYS_SUPPORTS_SMP
bool
@@ -1740,17 +1738,6 @@ config NR_CPUS
performance should round up your number of processors to the next
power of two.
-config MIPS_CMP
- bool "MIPS CMP framework support"
- depends on SMP
- select SYNC_R4K
- select SYS_SUPPORTS_SCHED_SMT
- select WEAK_ORDERING
- default n
- help
- This is a placeholder option for the GCMP work. It will need to
- be handled differently...
-
source "kernel/time/Kconfig"
#
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index 706f9397479..25775cb5400 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -10,6 +10,7 @@ obj-y += cpu-probe.o branch.o entry.o genex.o irq.o process.o \
obj-$(CONFIG_CEVT_BCM1480) += cevt-bcm1480.o
obj-$(CONFIG_CEVT_R4K) += cevt-r4k.o
+obj-$(CONFIG_MIPS_MT_SMTC) += cevt-smtc.o
obj-$(CONFIG_CEVT_DS1287) += cevt-ds1287.o
obj-$(CONFIG_CEVT_GT641XX) += cevt-gt641xx.o
obj-$(CONFIG_CEVT_SB1250) += cevt-sb1250.o
diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c
index 24a2d907aa0..4a4c59f2737 100644
--- a/arch/mips/kernel/cevt-r4k.c
+++ b/arch/mips/kernel/cevt-r4k.c
@@ -12,6 +12,14 @@
#include <asm/smtc_ipi.h>
#include <asm/time.h>
+#include <asm/cevt-r4k.h>
+
+/*
+ * The SMTC Kernel for the 34K, 1004K, et. al. replaces several
+ * of these routines with SMTC-specific variants.
+ */
+
+#ifndef CONFIG_MIPS_MT_SMTC
static int mips_next_event(unsigned long delta,
struct clock_event_device *evt)
@@ -19,60 +27,27 @@ static int mips_next_event(unsigned long delta,
unsigned int cnt;
int res;
-#ifdef CONFIG_MIPS_MT_SMTC
- {
- unsigned long flags, vpflags;
- local_irq_save(flags);
- vpflags = dvpe();
-#endif
cnt = read_c0_count();
cnt += delta;
write_c0_compare(cnt);
res = ((int)(read_c0_count() - cnt) > 0) ? -ETIME : 0;
-#ifdef CONFIG_MIPS_MT_SMTC
- evpe(vpflags);
- local_irq_restore(flags);
- }
-#endif
return res;
}
-static void mips_set_mode(enum clock_event_mode mode,
- struct clock_event_device *evt)
+#endif /* CONFIG_MIPS_MT_SMTC */
+
+void mips_set_clock_mode(enum clock_event_mode mode,
+ struct clock_event_device *evt)
{
/* Nothing to do ... */
}
-static DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device);
-static int cp0_timer_irq_installed;
+DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device);
+int cp0_timer_irq_installed;
-/*
- * Timer ack for an R4k-compatible timer of a known frequency.
- */
-static void c0_timer_ack(void)
-{
- write_c0_compare(read_c0_compare());
-}
+#ifndef CONFIG_MIPS_MT_SMTC
-/*
- * Possibly handle a performance counter interrupt.
- * Return true if the timer interrupt should not be checked
- */
-static inline int handle_perf_irq(int r2)
-{
- /*
- * The performance counter overflow interrupt may be shared with the
- * timer interrupt (cp0_perfcount_irq < 0). If it is and a
- * performance counter has overflowed (perf_irq() == IRQ_HANDLED)
- * and we can't reliably determine if a counter interrupt has also
- * happened (!r2) then don't check for a timer interrupt.
- */
- return (cp0_perfcount_irq < 0) &&
- perf_irq() == IRQ_HANDLED &&
- !r2;
-}
-
-static irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
+irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
{
const int r2 = cpu_has_mips_r2;
struct clock_event_device *cd;
@@ -93,12 +68,8 @@ static irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
* interrupt. Being the paranoiacs we are we check anyway.
*/
if (!r2 || (read_c0_cause() & (1 << 30))) {
- c0_timer_ack();
-#ifdef CONFIG_MIPS_MT_SMTC
- if (cpu_data[cpu].vpe_id)
- goto out;
- cpu = 0;
-#endif
+ /* Clear Count/Compare Interrupt */
+ write_c0_compare(read_c0_compare());
cd = &per_cpu(mips_clockevent_device, cpu);
cd->event_handler(cd);
}
@@ -107,65 +78,16 @@ out:
return IRQ_HANDLED;
}
-static struct irqaction c0_compare_irqaction = {
+#endif /* Not CONFIG_MIPS_MT_SMTC */
+
+struct irqaction c0_compare_irqaction = {
.handler = c0_compare_interrupt,
-#ifdef CONFIG_MIPS_MT_SMTC
- .flags = IRQF_DISABLED,
-#else
.flags = IRQF_DISABLED | IRQF_PERCPU,
-#endif
.name = "timer",
};
-#ifdef CONFIG_MIPS_MT_SMTC
-DEFINE_PER_CPU(struct clock_event_device, smtc_dummy_clockevent_device);
-
-static void smtc_set_mode(enum clock_event_mode mode,
- struct clock_event_device *evt)
-{
-}
-
-static void mips_broadcast(cpumask_t mask)
-{
- unsigned int cpu;
-
- for_each_cpu_mask(cpu, mask)
- smtc_send_ipi(cpu, SMTC_CLOCK_TICK, 0);
-}
-
-static void setup_smtc_dummy_clockevent_device(void)
-{
- //uint64_t mips_freq = mips_hpt_^frequency;
- unsigned int cpu = smp_processor_id();
- struct clock_event_device *cd;
- cd = &per_cpu(smtc_dummy_clockevent_device, cpu);
-
- cd->name = "SMTC";
- cd->features = CLOCK_EVT_FEAT_DUMMY;
-
- /* Calculate the min / max delta */
- cd->mult = 0; //div_sc((unsigned long) mips_freq, NSEC_PER_SEC, 32);
- cd->shift = 0; //32;
- cd->max_delta_ns = 0; //clockevent_delta2ns(0x7fffffff, cd);
- cd->min_delta_ns = 0; //clockevent_delta2ns(0x30, cd);
-
- cd->rating = 200;
- cd->irq = 17; //-1;
-// if (cpu)
-// cd->cpumask = CPU_MASK_ALL; // cpumask_of_cpu(cpu);
-// else
- cd->cpumask = cpumask_of_cpu(cpu);
-
- cd->set_mode = smtc_set_mode;
-
- cd->broadcast = mips_broadcast;
-
- clockevents_register_device(cd);
-}
-#endif
-
-static void mips_event_handler(struct clock_event_device *dev)
+void mips_event_handler(struct clock_event_device *dev)
{
}
@@ -177,7 +99,23 @@ static int c0_compare_int_pending(void)
return (read_c0_cause() >> cp0_compare_irq) & 0x100;
}
-static int c0_compare_int_usable(void)
+/*
+ * Compare interrupt can be routed and latched outside the core,
+ * so a single execution hazard barrier may not be enough to give
+ * it time to clear as seen in the Cause register. 4 time the
+ * pipeline depth seems reasonably conservative, and empirically
+ * works better in configurations with high CPU/bus clock ratios.
+ */
+
+#define compare_change_hazard() \
+ do { \
+ irq_disable_hazard(); \
+ irq_disable_hazard(); \
+ irq_disable_hazard(); \
+ irq_disable_hazard(); \
+ } while (0)
+
+int c0_compare_int_usable(void)
{
unsigned int delta;
unsigned int cnt;
@@ -187,7 +125,7 @@ static int c0_compare_int_usable(void)
*/
if (c0_compare_int_pending()) {
write_c0_compare(read_c0_count());
- irq_disable_hazard();
+ compare_change_hazard();
if (c0_compare_int_pending())
return 0;
}
@@ -196,7 +134,7 @@ static int c0_compare_int_usable(void)
cnt = read_c0_count();
cnt += delta;
write_c0_compare(cnt);
- irq_disable_hazard();
+ compare_change_hazard();
if ((int)(read_c0_count() - cnt) < 0)
break;
/* increase delta if the timer was already expired */
@@ -205,11 +143,12 @@ static int c0_compare_int_usable(void)
while ((int)(read_c0_count() - cnt) <= 0)
; /* Wait for expiry */
+ compare_change_hazard();
if (!c0_compare_int_pending())
return 0;
write_c0_compare(read_c0_count());
- irq_disable_hazard();
+ compare_change_hazard();
if (c0_compare_int_pending())
return 0;
@@ -219,6 +158,8 @@ static int c0_compare_int_usable(void)
return 1;
}
+#ifndef CONFIG_MIPS_MT_SMTC
+
int __cpuinit mips_clockevent_init(void)
{
uint64_t mips_freq = mips_hpt_frequency;
@@ -229,17 +170,6 @@ int __cpuinit mips_clockevent_init(void)
if (!cpu_has_counter || !mips_hpt_frequency)
return -ENXIO;
-#ifdef CONFIG_MIPS_MT_SMTC
- setup_smtc_dummy_clockevent_device();
-
- /*
- * On SMTC we only register VPE0's compare interrupt as clockevent
- * device.
- */
- if (cpu)
- return 0;
-#endif
-
if (!c0_compare_int_usable())
return -ENXIO;
@@ -265,13 +195,9 @@ int __cpuinit mips_clockevent_init(void)
cd->rating = 300;
cd->irq = irq;
-#ifdef CONFIG_MIPS_MT_SMTC
- cd->cpumask = CPU_MASK_ALL;
-#else
cd->cpumask = cpumask_of_cpu(cpu);
-#endif
cd->set_next_event = mips_next_event;
- cd->set_mode = mips_set_mode;
+ cd->set_mode = mips_set_clock_mode;
cd->event_handler = mips_event_handler;
clockevents_register_device(cd);
@@ -281,12 +207,9 @@ int __cpuinit mips_clockevent_init(void)
cp0_timer_irq_installed = 1;
-#ifdef CONFIG_MIPS_MT_SMTC
-#define CPUCTR_IMASKBIT (0x100 << cp0_compare_irq)
- setup_irq_smtc(irq, &c0_compare_irqaction, CPUCTR_IMASKBIT);
-#else
setup_irq(irq, &c0_compare_irqaction);
-#endif
return 0;
}
+
+#endif /* Not CONFIG_MIPS_MT_SMTC */
diff --git a/arch/mips/kernel/cevt-smtc.c b/arch/mips/kernel/cevt-smtc.c
new file mode 100644
index 00000000000..5162fe4b595
--- /dev/null
+++ b/arch/mips/kernel/cevt-smtc.c
@@ -0,0 +1,321 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2007 MIPS Technologies, Inc.
+ * Copyright (C) 2007 Ralf Baechle <ralf@linux-mips.org>
+ * Copyright (C) 2008 Kevin D. Kissell, Paralogos sarl
+ */
+#include <linux/clockchips.h>
+#include <linux/interrupt.h>
+#include <linux/percpu.h>
+
+#include <asm/smtc_ipi.h>
+#include <asm/time.h>
+#include <asm/cevt-r4k.h>
+
+/*
+ * Variant clock event timer support for SMTC on MIPS 34K, 1004K
+ * or other MIPS MT cores.
+ *
+ * Notes on SMTC Support:
+ *
+ * SMTC has multiple microthread TCs pretending to be Linux CPUs.
+ * But there's only one Count/Compare pair per VPE, and Compare
+ * interrupts are taken opportunisitically by available TCs
+ * bound to the VPE with the Count register. The new timer
+ * framework provides for global broadcasts, but we really
+ * want VPE-level multicasts for best behavior. So instead
+ * of invoking the high-level clock-event broadcast code,
+ * this version of SMTC support uses the historical SMTC
+ * multicast mechanisms "under the hood", appearing to the
+ * generic clock layer as if the interrupts are per-CPU.
+ *
+ * The approach taken here is to maintain a set of NR_CPUS
+ * virtual timers, and track which "CPU" needs to be alerted
+ * at each event.
+ *
+ * It's unlikely that we'll see a MIPS MT core with more than
+ * 2 VPEs, but we *know* that we won't need to handle more
+ * VPEs than we have "CPUs". So NCPUs arrays of NCPUs elements
+ * is always going to be overkill, but always going to be enough.
+ */
+
+unsigned long smtc_nexttime[NR_CPUS][NR_CPUS];
+static int smtc_nextinvpe[NR_CPUS];
+
+/*
+ * Timestamps stored are absolute values to be programmed
+ * into Count register. Valid timestamps will never be zero.
+ * If a Zero Count value is actually calculated, it is converted
+ * to be a 1, which will introduce 1 or two CPU cycles of error
+ * roughly once every four billion events, which at 1000 HZ means
+ * about once every 50 days. If that's actually a problem, one
+ * could alternate squashing 0 to 1 and to -1.
+ */
+
+#define MAKEVALID(x) (((x) == 0L) ? 1L : (x))
+#define ISVALID(x) ((x) != 0L)
+
+/*
+ * Time comparison is subtle, as it's really truncated
+ * modular arithmetic.
+ */
+
+#define IS_SOONER(a, b, reference) \
+ (((a) - (unsigned long)(reference)) < ((b) - (unsigned long)(reference)))
+
+/*
+ * CATCHUP_INCREMENT, used when the function falls behind the counter.
+ * Could be an increasing function instead of a constant;
+ */
+
+#define CATCHUP_INCREMENT 64
+
+static int mips_next_event(unsigned long delta,
+ struct clock_event_device *evt)
+{
+ unsigned long flags;
+ unsigned int mtflags;
+ unsigned long timestamp, reference, previous;
+ unsigned long nextcomp = 0L;
+ int vpe = current_cpu_data.vpe_id;
+ int cpu = smp_processor_id();
+ local_irq_save(flags);
+ mtflags = dmt();
+
+ /*
+ * Maintain the per-TC virtual timer
+ * and program the per-VPE shared Count register
+ * as appropriate here...
+ */
+ reference = (unsigned long)read_c0_count();
+ timestamp = MAKEVALID(reference + delta);
+ /*
+ * To really model the clock, we have to catch the case
+ * where the current next-in-VPE timestamp is the old
+ * timestamp for the calling CPE, but the new value is
+ * in fact later. In that case, we have to do a full
+ * scan and discover the new next-in-VPE CPU id and
+ * timestamp.
+ */
+ previous = smtc_nexttime[vpe][cpu];
+ if (cpu == smtc_nextinvpe[vpe] && ISVALID(previous)
+ && IS_SOONER(previous, timestamp, reference)) {
+ int i;
+ int soonest = cpu;
+
+ /*
+ * Update timestamp array here, so that new
+ * value gets considered along with those of
+ * other virtual CPUs on the VPE.
+ */
+ smtc_nexttime[vpe][cpu] = timestamp;
+ for_each_online_cpu(i) {
+ if (ISVALID(smtc_nexttime[vpe][i])
+ && IS_SOONER(smtc_nexttime[vpe][i],
+ smtc_nexttime[vpe][soonest], reference)) {
+ soonest = i;
+ }
+ }
+ smtc_nextinvpe[vpe] = soonest;
+ nextcomp = smtc_nexttime[vpe][soonest];
+ /*
+ * Otherwise, we don't have to process the whole array rank,
+ * we just have to see if the event horizon has gotten closer.
+ */
+ } else {
+ if (!ISVALID(smtc_nexttime[vpe][smtc_nextinvpe[vpe]]) ||
+ IS_SOONER(timestamp,
+ smtc_nexttime[vpe][smtc_nextinvpe[vpe]], reference)) {
+ smtc_nextinvpe[vpe] = cpu;
+ nextcomp = timestamp;
+ }
+ /*
+ * Since next-in-VPE may me the same as the executing
+ * virtual CPU, we update the array *after* checking
+ * its value.
+ */
+ smtc_nexttime[vpe][cpu] = timestamp;
+ }
+
+ /*
+ * It may be that, in fact, we don't need to update Compare,
+ * but if we do, we want to make sure we didn't fall into
+ * a crack just behind Count.
+ */
+ if (ISVALID(nextcomp)) {
+ write_c0_compare(nextcomp);
+ ehb();
+ /*
+ * We never return an error, we just make sure
+ * that we trigger the handlers as quickly as
+ * we can if we fell behind.
+ */
+ while ((nextcomp - (unsigned long)read_c0_count())
+ > (unsigned long)LONG_MAX) {
+ nextcomp += CATCHUP_INCREMENT;
+ write_c0_compare(nextcomp);
+ ehb();
+ }
+ }
+ emt(mtflags);
+ local_irq_restore(flags);
+ return 0;
+}
+
+
+void smtc_distribute_timer(int vpe)
+{
+ unsigned long flags;
+ unsigned int mtflags;
+ int cpu;
+ struct clock_event_device *cd;
+ unsigned long nextstamp = 0L;
+ unsigned long reference;
+
+
+repeat:
+ for_each_online_cpu(cpu) {
+ /*
+ * Find virtual CPUs within the current VPE who have
+ * unserviced timer requests whose time is now past.
+ */
+ local_irq_save(flags);
+ mtflags = dmt();
+ if (cpu_data[cpu].vpe_id == vpe &&
+ ISVALID(smtc_nexttime[vpe][cpu])) {
+ reference = (unsigned long)read_c0_count();
+ if ((smtc_nexttime[vpe][cpu] - reference)
+ > (unsigned long)LONG_MAX) {
+ smtc_nexttime[vpe][cpu] = 0L;
+ emt(mtflags);
+ local_irq_restore(flags);
+ /*
+ * We don't send IPIs to ourself.
+ */
+ if (cpu != smp_processor_id()) {
+ smtc_send_ipi(cpu, SMTC_CLOCK_TICK, 0);
+ } else {
+ cd = &per_cpu(mips_clockevent_device, cpu);
+ cd->event_handler(cd);
+ }
+ } else {
+ /* Local to VPE but Valid Time not yet reached. */
+ if (!ISVALID(nextstamp) ||
+ IS_SOONER(smtc_nexttime[vpe][cpu], nextstamp,
+ reference)) {
+ smtc_nextinvpe[vpe] = cpu;
+ nextstamp = smtc_nexttime[vpe][cpu];
+ }
+ emt(mtflags);
+ local_irq_restore(flags);
+ }
+ } else {
+ emt(mtflags);
+ local_irq_restore(flags);
+
+ }
+ }
+ /* Reprogram for interrupt at next soonest timestamp for VPE */
+ if (ISVALID(nextstamp)) {
+ write_c0_compare(nextstamp);
+ ehb();
+ if ((nextstamp - (unsigned long)read_c0_count())
+ > (unsigned long)LONG_MAX)
+ goto repeat;
+ }
+}
+
+
+irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
+{
+ int cpu = smp_processor_id();
+
+ /* If we're running SMTC, we've got MIPS MT and therefore MIPS32R2 */
+ handle_perf_irq(1);
+
+ if (read_c0_cause() & (1 << 30)) {
+ /* Clear Count/Compare Interrupt */
+ write_c0_compare(read_c0_compare());
+ smtc_distribute_timer(cpu_data[cpu].vpe_id);
+ }
+ return IRQ_HANDLED;
+}
+
+
+int __cpuinit mips_clockevent_init(void)
+{
+ uint64_t mips_freq = mips_hpt_frequency;
+ unsigned int cpu = smp_processor_id();
+ struct clock_event_device *cd;
+ unsigned int irq;
+ int i;
+ int j;
+
+ if (!cpu_has_counter || !mips_hpt_frequency)
+ return -ENXIO;
+ if (cpu == 0) {
+ for (i = 0; i < num_possible_cpus(); i++) {
+ smtc_nextinvpe[i] = 0;
+ for (j = 0; j < num_possible_cpus(); j++)
+ smtc_nexttime[i][j] = 0L;
+ }
+ /*
+ * SMTC also can't have the usablility test
+ * run by secondary TCs once Compare is in use.
+ */
+ if (!c0_compare_int_usable())
+ return -ENXIO;
+ }
+
+ /*
+ * With vectored interrupts things are getting platform specific.
+ * get_c0_compare_int is a hook to allow a platform to return the
+ * interrupt number of it's liking.
+ */
+ irq = MIPS_CPU_IRQ_BASE + cp0_compare_irq;
+ if (get_c0_compare_int)
+ irq = get_c0_compare_int();
+
+ cd = &per_cpu(mips_clockevent_device, cpu);
+
+ cd->name = "MIPS";
+ cd->features = CLOCK_EVT_FEAT_ONESHOT;
+
+ /* Calculate the min / max delta */
+ cd->mult = div_sc((unsigned long) mips_freq, NSEC_PER_SEC, 32);
+ cd->shift = 32;
+ cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd);
+ cd->min_delta_ns = clockevent_delta2ns(0x300, cd);
+
+ cd->rating = 300;
+ cd->irq = irq;
+ cd->cpumask = cpumask_of_cpu(cpu);
+ cd->set_next_event = mips_next_event;
+ cd->set_mode = mips_set_clock_mode;
+ cd->event_handler = mips_event_handler;
+
+ clockevents_register_device(cd);
+
+ /*
+ * On SMTC we only want to do the data structure
+ * initialization and IRQ setup once.
+ */
+ if (cpu)
+ return 0;
+ /*
+ * And we need the hwmask associated with the c0_compare
+ * vector to be initialized.
+ */
+ irq_hwmask[irq] = (0x100 << cp0_compare_irq);
+ if (cp0_timer_irq_installed)
+ return 0;
+
+ cp0_timer_irq_installed = 1;
+
+ setup_irq(irq, &c0_compare_irqaction);
+
+ return 0;
+}
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index 11c92dc5379..e621fda8ab3 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -54,14 +54,18 @@ extern void r4k_wait(void);
* interrupt is requested" restriction in the MIPS32/MIPS64 architecture makes
* using this version a gamble.
*/
-static void r4k_wait_irqoff(void)
+void r4k_wait_irqoff(void)
{
local_irq_disable();
if (!need_resched())
- __asm__(" .set mips3 \n"
+ __asm__(" .set push \n"
+ " .set mips3 \n"
" wait \n"
- " .set mips0 \n");
+ " .set pop \n");
local_irq_enable();
+ __asm__(" .globl __pastwait \n"
+ "__pastwait: \n");
+ return;
}
/*
diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S
index e29598ae939..ffa331029e0 100644
--- a/arch/mips/kernel/entry.S
+++ b/arch/mips/kernel/entry.S
@@ -79,11 +79,6 @@ FEXPORT(syscall_exit)
FEXPORT(restore_all) # restore full frame
#ifdef CONFIG_MIPS_MT_SMTC
-/* Detect and execute deferred IPI "interrupts" */
- LONG_L s0, TI_REGS($28)
- LONG_S sp, TI_REGS($28)
- jal deferred_smtc_ipi
- LONG_S s0, TI_REGS($28)
#ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP
/* Re-arm any temporarily masked interrupts not explicitly "acked" */
mfc0 v0, CP0_TCSTATUS
@@ -112,6 +107,11 @@ FEXPORT(restore_all) # restore full frame
xor t0, t0, t3
mtc0 t0, CP0_TCCONTEXT
#endif /* CONFIG_MIPS_MT_SMTC_IM_BACKSTOP */
+/* Detect and execute deferred IPI "interrupts" */
+ LONG_L s0, TI_REGS($28)
+ LONG_S sp, TI_REGS($28)
+ jal deferred_smtc_ipi
+ LONG_S s0, TI_REGS($28)
#endif /* CONFIG_MIPS_MT_SMTC */
.set noat
RESTORE_TEMP
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S
index f886dd7f708..01dcbe38fa0 100644
--- a/arch/mips/kernel/genex.S
+++ b/arch/mips/kernel/genex.S
@@ -282,8 +282,8 @@ NESTED(except_vec_vi_handler, 0, sp)
and t0, a0, t1
#ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP
mfc0 t2, CP0_TCCONTEXT
- or t0, t0, t2
- mtc0 t0, CP0_TCCONTEXT
+ or t2, t0, t2
+ mtc0 t2, CP0_TCCONTEXT
#endif /* CONFIG_MIPS_MT_SMTC_IM_BACKSTOP */
xor t1, t1, t0
mtc0 t1, CP0_STATUS
diff --git a/arch/mips/kernel/head.S b/arch/mips/kernel/head.S
index 361364501d3..492a0a8d70f 100644
--- a/arch/mips/kernel/head.S
+++ b/arch/mips/kernel/head.S
@@ -22,6 +22,7 @@
#include <asm/irqflags.h>
#include <asm/regdef.h>
#include <asm/page.h>
+#include <asm/pgtable-bits.h>
#include <asm/mipsregs.h>
#include <asm/stackframe.h>
diff --git a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c
index df4d3f2f740..dc9eb72ed9d 100644
--- a/arch/mips/kernel/mips-mt-fpaff.c
+++ b/arch/mips/kernel/mips-mt-fpaff.c
@@ -159,7 +159,7 @@ __setup("fpaff=", fpaff_thresh);
/*
* FPU Use Factor empirically derived from experiments on 34K
*/
-#define FPUSEFACTOR 333
+#define FPUSEFACTOR 2000
static __init int mt_fp_affinity_init(void)
{
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index ce7684335a4..22fc19bbe87 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -55,7 +55,7 @@ void __noreturn cpu_idle(void)
while (1) {
tick_nohz_stop_sched_tick(1);
while (!need_resched()) {
-#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
+#ifdef CONFIG_MIPS_MT_SMTC
extern void smtc_idle_loop_hook(void);
smtc_idle_loop_hook();
@@ -145,19 +145,18 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
*/
p->thread.cp0_status = read_c0_status() & ~(ST0_CU2|ST0_CU1);
childregs->cp0_status &= ~(ST0_CU2|ST0_CU1);
+
+#ifdef CONFIG_MIPS_MT_SMTC
+ /*
+ * SMTC restores TCStatus after Status, and the CU bits
+ * are aliased there.
+ */
+ childregs->cp0_tcstatus &= ~(ST0_CU2|ST0_CU1);
+#endif
clear_tsk_thread_flag(p, TIF_USEDFPU);
#ifdef CONFIG_MIPS_MT_FPAFF
clear_tsk_thread_flag(p, TIF_FPUBOUND);
-
- /*
- * FPU affinity support is cleaner if we track the
- * user-visible CPU affinity from the very beginning.
- * The generic cpus_allowed mask will already have
- * been copied from the parent before copy_thread
- * is invoked.
- */
- p->thread.user_cpus_allowed = p->cpus_allowed;
#endif /* CONFIG_MIPS_MT_FPAFF */
if (clone_flags & CLONE_SETTLS)
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index 35234b92b9a..96ffc9c6d19 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -238,7 +238,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
case FPC_EIR: { /* implementation / version register */
unsigned int flags;
#ifdef CONFIG_MIPS_MT_SMTC
- unsigned int irqflags;
+ unsigned long irqflags;
unsigned int mtflags;
#endif /* CONFIG_MIPS_MT_SMTC */
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
index a516286532a..897fb2b4751 100644
--- a/arch/mips/kernel/smtc.c
+++ b/arch/mips/kernel/smtc.c
@@ -1,4 +1,21 @@
-/* Copyright (C) 2004 Mips Technologies, Inc */
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) 2004 Mips Technologies, Inc
+ * Copyright (C) 2008 Kevin D. Kissell
+ */
#include <linux/clockchips.h>
#include <linux/kernel.h>
@@ -21,7 +38,6 @@
#include <asm/time.h>
#include <asm/addrspace.h>
#include <asm/smtc.h>
-#include <asm/smtc_ipi.h>
#include <asm/smtc_proc.h>
/*
@@ -58,11 +74,6 @@ unsigned long irq_hwmask[NR_IRQS];
asiduse smtc_live_asid[MAX_SMTC_TLBS][MAX_SMTC_ASIDS];
-/*
- * Clock interrupt "latch" buffers, per "CPU"
- */
-
-static atomic_t ipi_timer_latch[NR_CPUS];
/*
* Number of InterProcessor Interrupt (IPI) message buffers to allocate
@@ -70,7 +81,7 @@ static atomic_t ipi_timer_latch[NR_CPUS];
#define IPIBUF_PER_CPU 4
-static struct smtc_ipi_q IPIQ[NR_CPUS];
+struct smtc_ipi_q IPIQ[NR_CPUS];
static struct smtc_ipi_q freeIPIq;
@@ -282,7 +293,7 @@ static void smtc_configure_tlb(void)
* phys_cpu_present_map and the logical/physical mappings.
*/
-int __init mipsmt_build_cpu_map(int start_cpu_slot)
+int __init smtc_build_cpu_map(int start_cpu_slot)
{
int i, ntcs;
@@ -325,7 +336,12 @@ static void smtc_tc_setup(int vpe, int tc, int cpu)
write_tc_c0_tcstatus((read_tc_c0_tcstatus()
& ~(TCSTATUS_TKSU | TCSTATUS_DA | TCSTATUS_IXMT))
| TCSTATUS_A);
- write_tc_c0_tccontext(0);
+ /*
+ * TCContext gets an offset from the base of the IPIQ array
+ * to be used in low-level code to detect the presence of
+ * an active IPI queue
+ */
+ write_tc_c0_tccontext((sizeof(struct smtc_ipi_q) * cpu) << 16);
/* Bind tc to vpe */
write_tc_c0_tcbind(vpe);
/* In general, all TCs should have the same cpu_data indications */
@@ -336,10 +352,18 @@ static void smtc_tc_setup(int vpe, int tc, int cpu)
cpu_data[cpu].options &= ~MIPS_CPU_FPU;
cpu_data[cpu].vpe_id = vpe;
cpu_data[cpu].tc_id = tc;
+ /* Multi-core SMTC hasn't been tested, but be prepared */
+ cpu_data[cpu].core = (read_vpe_c0_ebase() >> 1) & 0xff;
}
+/*
+ * Tweak to get Count registes in as close a sync as possible.
+ * Value seems good for 34K-class cores.
+ */
+
+#define CP0_SKEW 8
-void mipsmt_prepare_cpus(void)
+void smtc_prepare_cpus(int cpus)
{
int i, vpe, tc, ntc, nvpe, tcpervpe[NR_CPUS], slop, cpu;
unsigned long flags;
@@ -363,13 +387,13 @@ void mipsmt_prepare_cpus(void)
IPIQ[i].head = IPIQ[i].tail = NULL;
spin_lock_init(&IPIQ[i].lock);
IPIQ[i].depth = 0;
- atomic_set(&ipi_timer_latch[i], 0);
}
/* cpu_data index starts at zero */
cpu = 0;
cpu_data[cpu].vpe_id = 0;
cpu_data[cpu].tc_id = 0;
+ cpu_data[cpu].core = (read_c0_ebase() >> 1) & 0xff;
cpu++;
/* Report on boot-time options */
@@ -484,7 +508,8 @@ void mipsmt_prepare_cpus(void)
write_vpe_c0_compare(0);
/* Propagate Config7 */
write_vpe_c0_config7(read_c0_config7());
- write_vpe_c0_count(read_c0_count());
+ write_vpe_c0_count(read_c0_count() + CP0_SKEW);
+ ehb();
}
/* enable multi-threading within VPE */
write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() | VPECONTROL_TE);
@@ -556,7 +581,7 @@ void mipsmt_prepare_cpus(void)
void __cpuinit smtc_boot_secondary(int cpu, struct task_struct *idle)
{
extern u32 kernelsp[NR_CPUS];
- long flags;
+ unsigned long flags;
int mtflags;
LOCK_MT_PRA();
@@ -585,24 +610,22 @@ void __cpuinit smtc_boot_secondary(int cpu, struct task_struct *idle)
void smtc_init_secondary(void)
{
- /*
- * Start timer on secondary VPEs if necessary.
- * plat_timer_setup has already have been invoked by init/main
- * on "boot" TC. Like per_cpu_trap_init() hack, this assumes that
- * SMTC init code assigns TCs consdecutively and in ascending order
- * to across available VPEs.
- */
- if (((read_c0_tcbind() & TCBIND_CURTC) != 0) &&
- ((read_c0_tcbind() & TCBIND_CURVPE)
- != cpu_data[smp_processor_id() - 1].vpe_id)){
- write_c0_compare(read_c0_count() + mips_hpt_frequency/HZ);
- }
-
local_irq_enable();
}
void smtc_smp_finish(void)
{
+ int cpu = smp_processor_id();
+
+ /*
+ * Lowest-numbered CPU per VPE starts a clock tick.
+ * Like per_cpu_trap_init() hack, this assumes that
+ * SMTC init code assigns TCs consdecutively and
+ * in ascending order across available VPEs.
+ */
+ if (cpu > 0 && (cpu_data[cpu].vpe_id != cpu_data[cpu - 1].vpe_id))
+ write_c0_compare(read_c0_count() + mips_hpt_frequency/HZ);
+
printk("TC %d going on-line as CPU %d\n",
cpu_data[smp_processor_id()].tc_id, smp_processor_id());
}
@@ -753,8 +776,10 @@ void smtc_send_ipi(int cpu, int type, unsigned int action)
{
int tcstatus;
struct smtc_ipi *pipi;
- long flags;
+ unsigned long flags;
int mtflags;
+ unsigned long tcrestart;
+ extern void r4k_wait_irqoff(void), __pastwait(void);
if (cpu == smp_processor_id()) {
printk("Cannot Send IPI to self!\n");
@@ -771,8 +796,6 @@ void smtc_send_ipi(int cpu, int type, unsigned int action)
pipi->arg = (void *)action;
pipi->dest = cpu;
if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) {
- if (type == SMTC_CLOCK_TICK)
- atomic_inc(&ipi_timer_latch[cpu]);
/* If not on same VPE, enqueue and send cross-VPE interrupt */
smtc_ipi_nq(&IPIQ[cpu], pipi);
LOCK_CORE_PRA();
@@ -800,22 +823,29 @@ void smtc_send_ipi(int cpu, int type, unsigned int action)
if ((tcstatus & TCSTATUS_IXMT) != 0) {
/*
- * Spin-waiting here can deadlock,
- * so we queue the message for the target TC.
+ * If we're in the the irq-off version of the wait
+ * loop, we need to force exit from the wait and
+ * do a direct post of the IPI.
+ */
+ if (cpu_wait == r4k_wait_irqoff) {
+ tcrestart = read_tc_c0_tcrestart();
+ if (tcrestart >= (unsigned long)r4k_wait_irqoff
+ && tcrestart < (unsigned long)__pastwait) {
+ write_tc_c0_tcrestart(__pastwait);
+ tcstatus &= ~TCSTATUS_IXMT;
+ write_tc_c0_tcstatus(tcstatus);
+ goto postdirect;
+ }
+ }
+ /*
+ * Otherwise we queue the message for the target TC
+ * to pick up when he does a local_irq_restore()
*/
write_tc_c0_tchalt(0);
UNLOCK_CORE_PRA();
- /* Try to reduce redundant timer interrupt messages */
- if (type == SMTC_CLOCK_TICK) {
- if (atomic_postincrement(&ipi_timer_latch[cpu])!=0){
- smtc_ipi_nq(&freeIPIq, pipi);
- return;
- }
- }
smtc_ipi_nq(&IPIQ[cpu], pipi);
} else {
- if (type == SMTC_CLOCK_TICK)
- atomic_inc(&ipi_timer_latch[cpu]);
+postdirect:
post_direct_ipi(cpu, pipi);
write_tc_c0_tchalt(0);
UNLOCK_CORE_PRA();
@@ -883,7 +913,7 @@ static void ipi_call_interrupt(void)
smp_call_function_interrupt();
}
-DECLARE_PER_CPU(struct clock_event_device, smtc_dummy_clockevent_device);
+DECLARE_PER_CPU(struct clock_event_device, mips_clockevent_device);
void ipi_decode(struct smtc_ipi *pipi)
{
@@ -891,20 +921,13 @@ void ipi_decode(struct smtc_ipi *pipi)
struct clock_event_device *cd;
void *arg_copy = pipi->arg;
int type_copy = pipi->type;
- int ticks;
-
smtc_ipi_nq(&freeIPIq, pipi);
switch (type_copy) {
case SMTC_CLOCK_TICK:
irq_enter();
kstat_this_cpu.irqs[MIPS_CPU_IRQ_BASE + 1]++;
- cd = &per_cpu(smtc_dummy_clockevent_device, cpu);
- ticks = atomic_read(&ipi_timer_latch[cpu]);
- atomic_sub(ticks, &ipi_timer_latch[cpu]);
- while (ticks) {
- cd->event_handler(cd);
- ticks--;
- }
+ cd = &per_cpu(mips_clockevent_device, cpu);
+ cd->event_handler(cd);
irq_exit();
break;
@@ -937,24 +960,48 @@ void ipi_decode(struct smtc_ipi *pipi)
}
}
+/*
+ * Similar to smtc_ipi_replay(), but invoked from context restore,
+ * so it reuses the current exception frame rather than set up a
+ * new one with self_ipi.
+ */
+
void deferred_smtc_ipi(void)
{
- struct smtc_ipi *pipi;
- unsigned long flags;
-/* DEBUG */
- int q = smp_processor_id();
+ int cpu = smp_processor_id();
/*
* Test is not atomic, but much faster than a dequeue,
* and the vast majority of invocations will have a null queue.
+ * If irq_disabled when this was called, then any IPIs queued
+ * after we test last will be taken on the next irq_enable/restore.
+ * If interrupts were enabled, then any IPIs added after the
+ * last test will be taken directly.
*/
- if (IPIQ[q].head != NULL) {
- while((pipi = smtc_ipi_dq(&IPIQ[q])) != NULL) {
- /* ipi_decode() should be called with interrupts off */
- local_irq_save(flags);
+
+ while (IPIQ[cpu].head != NULL) {
+ struct smtc_ipi_q *q = &IPIQ[cpu];
+ struct smtc_ipi *pipi;
+ unsigned long flags;
+
+ /*
+ * It may be possible we'll come in with interrupts
+ * already enabled.
+ */
+ local_irq_save(flags);
+
+ spin_lock(&q->lock);
+ pipi = __smtc_ipi_dq(q);
+ spin_unlock(&q->lock);
+ if (pipi != NULL)
ipi_decode(pipi);
- local_irq_restore(flags);
- }
+ /*
+ * The use of the __raw_local restore isn't
+ * as obviously necessary here as in smtc_ipi_replay(),
+ * but it's more efficient, given that we're already
+ * running down the IPI queue.
+ */
+ __raw_local_irq_restore(flags);
}
}
@@ -975,7 +1022,7 @@ static irqreturn_t ipi_interrupt(int irq, void *dev_idm)
struct smtc_ipi *pipi;
unsigned long tcstatus;
int sent;
- long flags;
+ unsigned long flags;
unsigned int mtflags;
unsigned int vpflags;
@@ -1066,55 +1113,53 @@ static void setup_cross_vpe_interrupts(unsigned int nvpe)
/*
* SMTC-specific hacks invoked from elsewhere in the kernel.
- *
- * smtc_ipi_replay is called from raw_local_irq_restore which is only ever
- * called with interrupts disabled. We do rely on interrupts being disabled
- * here because using spin_lock_irqsave()/spin_unlock_irqrestore() would
- * result in a recursive call to raw_local_irq_restore().
*/
-static void __smtc_ipi_replay(void)
+ /*
+ * smtc_ipi_replay is called from raw_local_irq_restore
+ */
+
+void smtc_ipi_replay(void)
{
unsigned int cpu = smp_processor_id();
/*
* To the extent that we've ever turned interrupts off,
* we may have accumulated deferred IPIs. This is subtle.
- * If we use the smtc_ipi_qdepth() macro, we'll get an
- * exact number - but we'll also disable interrupts
- * and create a window of failure where a new IPI gets
- * queued after we test the depth but before we re-enable
- * interrupts. So long as IXMT never gets set, however,
* we should be OK: If we pick up something and dispatch
* it here, that's great. If we see nothing, but concurrent
* with this operation, another TC sends us an IPI, IXMT
* is clear, and we'll handle it as a real pseudo-interrupt
- * and not a pseudo-pseudo interrupt.
+ * and not a pseudo-pseudo interrupt. The important thing
+ * is to do the last check for queued message *after* the
+ * re-enabling of interrupts.
*/
- if (IPIQ[cpu].depth > 0) {
- while (1) {
- struct smtc_ipi_q *q = &IPIQ[cpu];
- struct smtc_ipi *pipi;
- extern void self_ipi(struct smtc_ipi *);
-
- spin_lock(&q->lock);
- pipi = __smtc_ipi_dq(q);
- spin_unlock(&q->lock);
- if (!pipi)
- break;
+ while (IPIQ[cpu].head != NULL) {
+ struct smtc_ipi_q *q = &IPIQ[cpu];
+ struct smtc_ipi *pipi;
+ unsigned long flags;
+
+ /*
+ * It's just possible we'll come in with interrupts
+ * already enabled.
+ */
+ local_irq_save(flags);
+
+ spin_lock(&q->lock);
+ pipi = __smtc_ipi_dq(q);
+ spin_unlock(&q->lock);
+ /*
+ ** But use a raw restore here to avoid recursion.
+ */
+ __raw_local_irq_restore(flags);
+ if (pipi) {
self_ipi(pipi);
smtc_cpu_stats[cpu].selfipis++;
}
}
}
-void smtc_ipi_replay(void)
-{
- raw_local_irq_disable();
- __smtc_ipi_replay();
-}
-
EXPORT_SYMBOL(smtc_ipi_replay);
void smtc_idle_loop_hook(void)
@@ -1193,40 +1238,13 @@ void smtc_idle_loop_hook(void)
}
}
- /*
- * Now that we limit outstanding timer IPIs, check for hung TC
- */
- for (tc = 0; tc < NR_CPUS; tc++) {
- /* Don't check ourself - we'll dequeue IPIs just below */
- if ((tc != smp_processor_id()) &&
- atomic_read(&ipi_timer_latch[tc]) > timerq_limit) {
- if (clock_hang_reported[tc] == 0) {
- pdb_msg += sprintf(pdb_msg,
- "TC %d looks hung with timer latch at %d\n",
- tc, atomic_read(&ipi_timer_latch[tc]));
- clock_hang_reported[tc]++;
- }
- }
- }
emt(mtflags);
local_irq_restore(flags);
if (pdb_msg != &id_ho_db_msg[0])
printk("CPU%d: %s", smp_processor_id(), id_ho_db_msg);
#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */
- /*
- * Replay any accumulated deferred IPIs. If "Instant Replay"
- * is in use, there should never be any.
- */
-#ifndef CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY
- {
- unsigned long flags;
-
- local_irq_save(flags);
- __smtc_ipi_replay();
- local_irq_restore(flags);
- }
-#endif /* CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY */
+ smtc_ipi_replay();
}
void smtc_soft_dump(void)
@@ -1242,10 +1260,6 @@ void smtc_soft_dump(void)
printk("%d: %ld\n", i, smtc_cpu_stats[i].selfipis);
}
smtc_ipi_qdump();
- printk("Timer IPI Backlogs:\n");
- for (i=0; i < NR_CPUS; i++) {
- printk("%d: %d\n", i, atomic_read(&ipi_timer_latch[i]));
- }
printk("%d Recoveries of \"stolen\" FPU\n",
atomic_read(&smtc_fpu_recoveries));
}
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 5fd0cd020af..b602ac6eb47 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -825,8 +825,10 @@ static void mt_ase_fp_affinity(void)
if (cpus_intersects(current->cpus_allowed, mt_fpu_cpumask)) {
cpumask_t tmask;
- cpus_and(tmask, current->thread.user_cpus_allowed,
- mt_fpu_cpumask);
+ current->thread.user_cpus_allowed
+ = current->cpus_allowed;
+ cpus_and(tmask, current->cpus_allowed,
+ mt_fpu_cpumask);
set_cpus_allowed(current, tmask);
set_thread_flag(TIF_FPUBOUND);
}
diff --git a/arch/mips/mti-malta/Makefile b/arch/mips/mti-malta/Makefile
index 3b7dd722c32..cef2db8d222 100644
--- a/arch/mips/mti-malta/Makefile
+++ b/arch/mips/mti-malta/Makefile
@@ -15,6 +15,6 @@ obj-$(CONFIG_EARLY_PRINTK) += malta-console.o
obj-$(CONFIG_PCI) += malta-pci.o
# FIXME FIXME FIXME
-obj-$(CONFIG_MIPS_MT_SMTC) += malta_smtc.o
+obj-$(CONFIG_MIPS_MT_SMTC) += malta-smtc.o
EXTRA_CFLAGS += -Werror
diff --git a/arch/mips/mti-malta/malta-smtc.c b/arch/mips/mti-malta/malta-smtc.c
index 5ea705e4945..f84a46a8ae6 100644
--- a/arch/mips/mti-malta/malta-smtc.c
+++ b/arch/mips/mti-malta/malta-smtc.c
@@ -84,12 +84,17 @@ static void msmtc_cpus_done(void)
static void __init msmtc_smp_setup(void)
{
- mipsmt_build_cpu_map(0);
+ /*
+ * we won't get the definitive value until
+ * we've run smtc_prepare_cpus later, but
+ * we would appear to need an upper bound now.
+ */
+ smp_num_siblings = smtc_build_cpu_map(0);
}
static void __init msmtc_prepare_cpus(unsigned int max_cpus)
{
- mipsmt_prepare_cpus();
+ smtc_prepare_cpus(max_cpus);
}
struct plat_smp_ops msmtc_smp_ops = {
diff --git a/arch/mips/sibyte/swarm/Makefile b/arch/mips/sibyte/swarm/Makefile
index f18ba9201bb..7b45f199d92 100644
--- a/arch/mips/sibyte/swarm/Makefile
+++ b/arch/mips/sibyte/swarm/Makefile
@@ -1,3 +1,4 @@
-obj-y := setup.o rtc_xicor1241.o rtc_m41t81.o
+obj-y := platform.o setup.o rtc_xicor1241.o \
+ rtc_m41t81.o
obj-$(CONFIG_I2C_BOARDINFO) += swarm-i2c.o
diff --git a/arch/mips/sibyte/swarm/platform.c b/arch/mips/sibyte/swarm/platform.c
new file mode 100644
index 00000000000..54847fe1e56
--- /dev/null
+++ b/arch/mips/sibyte/swarm/platform.c
@@ -0,0 +1,85 @@
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/ata_platform.h>
+
+#include <asm/sibyte/board.h>
+#include <asm/sibyte/sb1250_genbus.h>
+#include <asm/sibyte/sb1250_regs.h>
+
+#if defined(CONFIG_SIBYTE_SWARM) || defined(CONFIG_SIBYTE_LITTLESUR)
+
+#define DRV_NAME "pata-swarm"
+
+#define SWARM_IDE_SHIFT 5
+#define SWARM_IDE_BASE 0x1f0
+#define SWARM_IDE_CTRL 0x3f6
+
+static struct resource swarm_pata_resource[] = {
+ {
+ .name = "Swarm GenBus IDE",
+ .flags = IORESOURCE_MEM,
+ }, {
+ .name = "Swarm GenBus IDE",
+ .flags = IORESOURCE_MEM,
+ }, {
+ .name = "Swarm GenBus IDE",
+ .flags = IORESOURCE_IRQ,
+ .start = K_INT_GB_IDE,
+ .end = K_INT_GB_IDE,
+ },
+};
+
+static struct pata_platform_info pata_platform_data = {
+ .ioport_shift = SWARM_IDE_SHIFT,
+};
+
+static struct platform_device swarm_pata_device = {
+ .name = "pata_platform",
+ .id = -1,
+ .resource = swarm_pata_resource,
+ .num_resources = ARRAY_SIZE(swarm_pata_resource),
+ .dev = {
+ .platform_data = &pata_platform_data,
+ .coherent_dma_mask = ~0, /* grumble */
+ },
+};
+
+static int __init swarm_pata_init(void)
+{
+ u8 __iomem *base;
+ phys_t offset, size;
+ struct resource *r;
+
+ if (!SIBYTE_HAVE_IDE)
+ return -ENODEV;
+
+ base = ioremap(A_IO_EXT_BASE, 0x800);
+ offset = __raw_readq(base + R_IO_EXT_REG(R_IO_EXT_START_ADDR, IDE_CS));
+ size = __raw_readq(base + R_IO_EXT_REG(R_IO_EXT_MULT_SIZE, IDE_CS));
+ iounmap(base);
+
+ offset = G_IO_START_ADDR(offset) << S_IO_ADDRBASE;
+ size = (G_IO_MULT_SIZE(size) + 1) << S_IO_REGSIZE;
+ if (offset < A_PHYS_GENBUS || offset >= A_PHYS_GENBUS_END) {
+ pr_info(DRV_NAME ": PATA interface at GenBus disabled\n");
+
+ return -EBUSY;
+ }
+
+ pr_info(DRV_NAME ": PATA interface at GenBus slot %i\n", IDE_CS);
+
+ r = swarm_pata_resource;
+ r[0].start = offset + (SWARM_IDE_BASE << SWARM_IDE_SHIFT);
+ r[0].end = offset + ((SWARM_IDE_BASE + 8) << SWARM_IDE_SHIFT) - 1;
+ r[1].start = offset + (SWARM_IDE_CTRL << SWARM_IDE_SHIFT);
+ r[1].end = offset + ((SWARM_IDE_CTRL + 1) << SWARM_IDE_SHIFT) - 1;
+
+ return platform_device_register(&swarm_pata_device);
+}
+
+device_initcall(swarm_pata_init);
+
+#endif /* defined(CONFIG_SIBYTE_SWARM) || defined(CONFIG_SIBYTE_LITTLESUR) */
diff --git a/arch/mn10300/kernel/irq.c b/arch/mn10300/kernel/irq.c
index 761c434a248..56c64ccc9c2 100644
--- a/arch/mn10300/kernel/irq.c
+++ b/arch/mn10300/kernel/irq.c
@@ -20,22 +20,8 @@ EXPORT_SYMBOL(__mn10300_irq_enabled_epsw);
atomic_t irq_err_count;
/*
- * MN10300 INTC controller operations
+ * MN10300 interrupt controller operations
*/
-static void mn10300_cpupic_disable(unsigned int irq)
-{
- u16 tmp = GxICR(irq);
- GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_DETECT;
- tmp = GxICR(irq);
-}
-
-static void mn10300_cpupic_enable(unsigned int irq)
-{
- u16 tmp = GxICR(irq);
- GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_ENABLE;
- tmp = GxICR(irq);
-}
-
static void mn10300_cpupic_ack(unsigned int irq)
{
u16 tmp;
@@ -60,26 +46,54 @@ static void mn10300_cpupic_mask_ack(unsigned int irq)
static void mn10300_cpupic_unmask(unsigned int irq)
{
u16 tmp = GxICR(irq);
- GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_ENABLE | GxICR_DETECT;
+ GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_ENABLE;
tmp = GxICR(irq);
}
-static void mn10300_cpupic_end(unsigned int irq)
+static void mn10300_cpupic_unmask_clear(unsigned int irq)
{
+ /* the MN10300 PIC latches its interrupt request bit, even after the
+ * device has ceased to assert its interrupt line and the interrupt
+ * channel has been disabled in the PIC, so for level-triggered
+ * interrupts we need to clear the request bit when we re-enable */
u16 tmp = GxICR(irq);
- GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_ENABLE;
+ GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_ENABLE | GxICR_DETECT;
tmp = GxICR(irq);
}
-static struct irq_chip mn10300_cpu_pic = {
- .name = "cpu",
- .disable = mn10300_cpupic_disable,
- .enable = mn10300_cpupic_enable,
+/*
+ * MN10300 PIC level-triggered IRQ handling.
+ *
+ * The PIC has no 'ACK' function per se. It is possible to clear individual
+ * channel latches, but each latch relatches whether or not the channel is
+ * masked, so we need to clear the latch when we unmask the channel.
+ *
+ * Also for this reason, we don't supply an ack() op (it's unused anyway if
+ * mask_ack() is provided), and mask_ack() just masks.
+ */
+static struct irq_chip mn10300_cpu_pic_level = {
+ .name = "cpu_l",
+ .disable = mn10300_cpupic_mask,
+ .enable = mn10300_cpupic_unmask_clear,
+ .ack = NULL,
+ .mask = mn10300_cpupic_mask,
+ .mask_ack = mn10300_cpupic_mask,
+ .unmask = mn10300_cpupic_unmask_clear,
+};
+
+/*
+ * MN10300 PIC edge-triggered IRQ handling.
+ *
+ * We use the latch clearing function of the PIC as the 'ACK' function.
+ */
+static struct irq_chip mn10300_cpu_pic_edge = {
+ .name = "cpu_e",
+ .disable = mn10300_cpupic_mask,
+ .enable = mn10300_cpupic_unmask,
.ack = mn10300_cpupic_ack,
.mask = mn10300_cpupic_mask,
.mask_ack = mn10300_cpupic_mask_ack,
.unmask = mn10300_cpupic_unmask,
- .end = mn10300_cpupic_end,
};
/*
@@ -114,7 +128,8 @@ void set_intr_level(int irq, u16 level)
*/
void set_intr_postackable(int irq)
{
- set_irq_handler(irq, handle_level_irq);
+ set_irq_chip_and_handler(irq, &mn10300_cpu_pic_level,
+ handle_level_irq);
}
/*
@@ -126,8 +141,12 @@ void __init init_IRQ(void)
for (irq = 0; irq < NR_IRQS; irq++)
if (irq_desc[irq].chip == &no_irq_type)
- set_irq_chip_and_handler(irq, &mn10300_cpu_pic,
- handle_edge_irq);
+ /* due to the PIC latching interrupt requests, even
+ * when the IRQ is disabled, IRQ_PENDING is superfluous
+ * and we can use handle_level_irq() for edge-triggered
+ * interrupts */
+ set_irq_chip_and_handler(irq, &mn10300_cpu_pic_edge,
+ handle_level_irq);
unit_init_IRQ();
}
diff --git a/arch/mn10300/unit-asb2303/unit-init.c b/arch/mn10300/unit-asb2303/unit-init.c
index 14b2c817cff..70e8cb4ea26 100644
--- a/arch/mn10300/unit-asb2303/unit-init.c
+++ b/arch/mn10300/unit-asb2303/unit-init.c
@@ -51,7 +51,7 @@ void __init unit_init_IRQ(void)
switch (GET_XIRQ_TRIGGER(extnum)) {
case XIRQ_TRIGGER_HILEVEL:
case XIRQ_TRIGGER_LOWLEVEL:
- set_irq_handler(XIRQ2IRQ(extnum), handle_level_irq);
+ set_intr_postackable(XIRQ2IRQ(extnum));
break;
default:
break;
diff --git a/arch/mn10300/unit-asb2305/unit-init.c b/arch/mn10300/unit-asb2305/unit-init.c
index 6a352414a35..72812a9439a 100644
--- a/arch/mn10300/unit-asb2305/unit-init.c
+++ b/arch/mn10300/unit-asb2305/unit-init.c
@@ -52,7 +52,7 @@ void __init unit_init_IRQ(void)
switch (GET_XIRQ_TRIGGER(extnum)) {
case XIRQ_TRIGGER_HILEVEL:
case XIRQ_TRIGGER_LOWLEVEL:
- set_irq_handler(XIRQ2IRQ(extnum), handle_level_irq);
+ set_intr_postackable(XIRQ2IRQ(extnum));
break;
default:
break;
diff --git a/arch/powerpc/boot/dts/holly.dts b/arch/powerpc/boot/dts/holly.dts
index f87fe7b9ced..c6e11ebeceb 100644
--- a/arch/powerpc/boot/dts/holly.dts
+++ b/arch/powerpc/boot/dts/holly.dts
@@ -133,61 +133,61 @@
reg = <0x00007400 0x00000400>;
big-endian;
};
+ };
- pci@1000 {
- device_type = "pci";
- compatible = "tsi109-pci", "tsi108-pci";
- #interrupt-cells = <1>;
- #size-cells = <2>;
- #address-cells = <3>;
- reg = <0x00001000 0x00001000>;
- bus-range = <0x0 0x0>;
- /*----------------------------------------------------+
- | PCI memory range.
- | 01 denotes I/O space
- | 02 denotes 32-bit memory space
- +----------------------------------------------------*/
- ranges = <0x02000000 0x00000000 0x40000000 0x40000000 0x00000000 0x10000000
- 0x01000000 0x00000000 0x00000000 0x7e000000 0x00000000 0x00010000>;
- clock-frequency = <133333332>;
- interrupt-parent = <&MPIC>;
+ pci@c0001000 {
+ device_type = "pci";
+ compatible = "tsi109-pci", "tsi108-pci";
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ reg = <0xc0001000 0x00001000>;
+ bus-range = <0x0 0x0>;
+ /*----------------------------------------------------+
+ | PCI memory range.
+ | 01 denotes I/O space
+ | 02 denotes 32-bit memory space
+ +----------------------------------------------------*/
+ ranges = <0x02000000 0x00000000 0x40000000 0x40000000 0x00000000 0x10000000
+ 0x01000000 0x00000000 0x00000000 0x7e000000 0x00000000 0x00010000>;
+ clock-frequency = <133333332>;
+ interrupt-parent = <&MPIC>;
+ interrupts = <0x17 0x2>;
+ interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
+ /*----------------------------------------------------+
+ | The INTA, INTB, INTC, INTD are shared.
+ +----------------------------------------------------*/
+ interrupt-map = <
+ 0x800 0x0 0x0 0x1 &RT0 0x24 0x0
+ 0x800 0x0 0x0 0x2 &RT0 0x25 0x0
+ 0x800 0x0 0x0 0x3 &RT0 0x26 0x0
+ 0x800 0x0 0x0 0x4 &RT0 0x27 0x0
+
+ 0x1000 0x0 0x0 0x1 &RT0 0x25 0x0
+ 0x1000 0x0 0x0 0x2 &RT0 0x26 0x0
+ 0x1000 0x0 0x0 0x3 &RT0 0x27 0x0
+ 0x1000 0x0 0x0 0x4 &RT0 0x24 0x0
+
+ 0x1800 0x0 0x0 0x1 &RT0 0x26 0x0
+ 0x1800 0x0 0x0 0x2 &RT0 0x27 0x0
+ 0x1800 0x0 0x0 0x3 &RT0 0x24 0x0
+ 0x1800 0x0 0x0 0x4 &RT0 0x25 0x0
+
+ 0x2000 0x0 0x0 0x1 &RT0 0x27 0x0
+ 0x2000 0x0 0x0 0x2 &RT0 0x24 0x0
+ 0x2000 0x0 0x0 0x3 &RT0 0x25 0x0
+ 0x2000 0x0 0x0 0x4 &RT0 0x26 0x0
+ >;
+
+ RT0: router@1180 {
+ device_type = "pic-router";
+ interrupt-controller;
+ big-endian;
+ clock-frequency = <0>;
+ #address-cells = <0>;
+ #interrupt-cells = <2>;
interrupts = <0x17 0x2>;
- interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
- /*----------------------------------------------------+
- | The INTA, INTB, INTC, INTD are shared.
- +----------------------------------------------------*/
- interrupt-map = <
- 0x800 0x0 0x0 0x1 &RT0 0x24 0x0
- 0x800 0x0 0x0 0x2 &RT0 0x25 0x0
- 0x800 0x0 0x0 0x3 &RT0 0x26 0x0
- 0x800 0x0 0x0 0x4 &RT0 0x27 0x0
-
- 0x1000 0x0 0x0 0x1 &RT0 0x25 0x0
- 0x1000 0x0 0x0 0x2 &RT0 0x26 0x0
- 0x1000 0x0 0x0 0x3 &RT0 0x27 0x0
- 0x1000 0x0 0x0 0x4 &RT0 0x24 0x0
-
- 0x1800 0x0 0x0 0x1 &RT0 0x26 0x0
- 0x1800 0x0 0x0 0x2 &RT0 0x27 0x0
- 0x1800 0x0 0x0 0x3 &RT0 0x24 0x0
- 0x1800 0x0 0x0 0x4 &RT0 0x25 0x0
-
- 0x2000 0x0 0x0 0x1 &RT0 0x27 0x0
- 0x2000 0x0 0x0 0x2 &RT0 0x24 0x0
- 0x2000 0x0 0x0 0x3 &RT0 0x25 0x0
- 0x2000 0x0 0x0 0x4 &RT0 0x26 0x0
- >;
-
- RT0: router@1180 {
- device_type = "pic-router";
- interrupt-controller;
- big-endian;
- clock-frequency = <0>;
- #address-cells = <0>;
- #interrupt-cells = <2>;
- interrupts = <0x17 0x2>;
- interrupt-parent = <&MPIC>;
- };
+ interrupt-parent = <&MPIC>;
};
};
diff --git a/arch/powerpc/kernel/idle.c b/arch/powerpc/kernel/idle.c
index d308a9f70f1..31982d05d81 100644
--- a/arch/powerpc/kernel/idle.c
+++ b/arch/powerpc/kernel/idle.c
@@ -34,11 +34,7 @@
#include <asm/smp.h>
#ifdef CONFIG_HOTPLUG_CPU
-/* this is used for software suspend, and that shuts down
- * CPUs even while the system is still booting... */
-#define cpu_should_die() (cpu_is_offline(smp_processor_id()) && \
- (system_state == SYSTEM_RUNNING \
- || system_state == SYSTEM_BOOTING))
+#define cpu_should_die() cpu_is_offline(smp_processor_id())
#else
#define cpu_should_die() 0
#endif
diff --git a/arch/powerpc/platforms/fsl_uli1575.c b/arch/powerpc/platforms/fsl_uli1575.c
index ef74a0763ec..8c619963bec 100644
--- a/arch/powerpc/platforms/fsl_uli1575.c
+++ b/arch/powerpc/platforms/fsl_uli1575.c
@@ -219,11 +219,21 @@ static void __devinit quirk_final_uli5249(struct pci_dev *dev)
int i;
u8 *dummy;
struct pci_bus *bus = dev->bus;
+ resource_size_t end = 0;
+
+ for (i = PCI_BRIDGE_RESOURCES; i < PCI_BRIDGE_RESOURCES+3; i++) {
+ unsigned long flags = pci_resource_flags(dev, i);
+ if ((flags & (IORESOURCE_MEM|IORESOURCE_PREFETCH)) == IORESOURCE_MEM)
+ end = pci_resource_end(dev, i);
+ }
for (i = 0; i < PCI_BUS_NUM_RESOURCES; i++) {
if ((bus->resource[i]) &&
(bus->resource[i]->flags & IORESOURCE_MEM)) {
- dummy = ioremap(bus->resource[i]->end - 3, 0x4);
+ if (bus->resource[i]->end == end)
+ dummy = ioremap(bus->resource[i]->start, 0x4);
+ else
+ dummy = ioremap(bus->resource[i]->end - 3, 0x4);
if (dummy) {
in_8(dummy);
iounmap(dummy);
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index ca114fe46ff..06acb1a18bb 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -169,6 +169,8 @@ void init_cpu_timer(void)
static void clock_comparator_interrupt(__u16 code)
{
+ if (S390_lowcore.clock_comparator == -1ULL)
+ set_clock_comparator(S390_lowcore.clock_comparator);
}
static void etr_timing_alert(struct etr_irq_parm *);
diff --git a/arch/s390/lib/delay.c b/arch/s390/lib/delay.c
index fc6ab6094df..0953cee05ef 100644
--- a/arch/s390/lib/delay.c
+++ b/arch/s390/lib/delay.c
@@ -1,14 +1,9 @@
/*
- * arch/s390/lib/delay.c
* Precise Delay Loops for S390
*
- * S390 version
- * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
- * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
- *
- * Derived from "arch/i386/lib/delay.c"
- * Copyright (C) 1993 Linus Torvalds
- * Copyright (C) 1997 Martin Mares <mj@atrey.karlin.mff.cuni.cz>
+ * Copyright IBM Corp. 1999,2008
+ * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
+ * Heiko Carstens <heiko.carstens@de.ibm.com>,
*/
#include <linux/sched.h>
@@ -29,30 +24,31 @@ void __delay(unsigned long loops)
asm volatile("0: brct %0,0b" : : "d" ((loops/2) + 1));
}
-/*
- * Waits for 'usecs' microseconds using the TOD clock comparator.
- */
-void __udelay(unsigned long usecs)
+static void __udelay_disabled(unsigned long usecs)
{
- u64 end, time, old_cc = 0;
- unsigned long flags, cr0, mask, dummy;
- int irq_context;
+ unsigned long mask, cr0, cr0_saved;
+ u64 clock_saved;
- irq_context = in_interrupt();
- if (!irq_context)
- local_bh_disable();
- local_irq_save(flags);
- if (raw_irqs_disabled_flags(flags)) {
- old_cc = local_tick_disable();
- S390_lowcore.clock_comparator = -1ULL;
- __ctl_store(cr0, 0, 0);
- dummy = (cr0 & 0xffff00e0) | 0x00000800;
- __ctl_load(dummy , 0, 0);
- mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_EXT;
- } else
- mask = psw_kernel_bits | PSW_MASK_WAIT |
- PSW_MASK_EXT | PSW_MASK_IO;
+ clock_saved = local_tick_disable();
+ set_clock_comparator(get_clock() + ((u64) usecs << 12));
+ __ctl_store(cr0_saved, 0, 0);
+ cr0 = (cr0_saved & 0xffff00e0) | 0x00000800;
+ __ctl_load(cr0 , 0, 0);
+ mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_EXT;
+ trace_hardirqs_on();
+ __load_psw_mask(mask);
+ local_irq_disable();
+ __ctl_load(cr0_saved, 0, 0);
+ local_tick_enable(clock_saved);
+ set_clock_comparator(S390_lowcore.clock_comparator);
+}
+static void __udelay_enabled(unsigned long usecs)
+{
+ unsigned long mask;
+ u64 end, time;
+
+ mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_EXT | PSW_MASK_IO;
end = get_clock() + ((u64) usecs << 12);
do {
time = end < S390_lowcore.clock_comparator ?
@@ -62,13 +58,37 @@ void __udelay(unsigned long usecs)
__load_psw_mask(mask);
local_irq_disable();
} while (get_clock() < end);
+ set_clock_comparator(S390_lowcore.clock_comparator);
+}
- if (raw_irqs_disabled_flags(flags)) {
- __ctl_load(cr0, 0, 0);
- local_tick_enable(old_cc);
+/*
+ * Waits for 'usecs' microseconds using the TOD clock comparator.
+ */
+void __udelay(unsigned long usecs)
+{
+ unsigned long flags;
+
+ preempt_disable();
+ local_irq_save(flags);
+ if (in_irq()) {
+ __udelay_disabled(usecs);
+ goto out;
+ }
+ if (in_softirq()) {
+ if (raw_irqs_disabled_flags(flags))
+ __udelay_disabled(usecs);
+ else
+ __udelay_enabled(usecs);
+ goto out;
}
- if (!irq_context)
+ if (raw_irqs_disabled_flags(flags)) {
+ local_bh_disable();
+ __udelay_disabled(usecs);
_local_bh_enable();
- set_clock_comparator(S390_lowcore.clock_comparator);
+ goto out;
+ }
+ __udelay_enabled(usecs);
+out:
local_irq_restore(flags);
+ preempt_enable();
}
diff --git a/arch/x86/boot/compressed/relocs.c b/arch/x86/boot/compressed/relocs.c
index a1310c52fc0..857e492c571 100644
--- a/arch/x86/boot/compressed/relocs.c
+++ b/arch/x86/boot/compressed/relocs.c
@@ -492,7 +492,7 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
continue;
}
sh_symtab = sec_symtab->symtab;
- sym_strtab = sec->link->strtab;
+ sym_strtab = sec_symtab->link->strtab;
for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
Elf32_Rel *rel;
Elf32_Sym *sym;
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index bfd10fd211c..c102af85df9 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -1605,6 +1605,14 @@ static struct dmi_system_id __initdata acpi_dmi_table[] = {
*/
{
.callback = dmi_ignore_irq0_timer_override,
+ .ident = "HP nx6115 laptop",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nx6115"),
+ },
+ },
+ {
+ .callback = dmi_ignore_irq0_timer_override,
.ident = "HP NX6125 laptop",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
@@ -1619,6 +1627,14 @@ static struct dmi_system_id __initdata acpi_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nx6325"),
},
},
+ {
+ .callback = dmi_ignore_irq0_timer_override,
+ .ident = "HP 6715b laptop",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq 6715b"),
+ },
+ },
{}
};
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
index b117d7f8a56..885c8265e6b 100644
--- a/arch/x86/kernel/cpu/mtrr/main.c
+++ b/arch/x86/kernel/cpu/mtrr/main.c
@@ -834,7 +834,7 @@ static int __init enable_mtrr_cleanup_setup(char *str)
enable_mtrr_cleanup = 1;
return 0;
}
-early_param("enble_mtrr_cleanup", enable_mtrr_cleanup_setup);
+early_param("enable_mtrr_cleanup", enable_mtrr_cleanup_setup);
struct var_mtrr_state {
unsigned long range_startk;
diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
index 8282a213968..10435a120d2 100644
--- a/arch/x86/kernel/kgdb.c
+++ b/arch/x86/kernel/kgdb.c
@@ -455,12 +455,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
return NOTIFY_DONE;
case DIE_NMI_IPI:
- if (atomic_read(&kgdb_active) != -1) {
- /* KGDB CPU roundup */
- kgdb_nmicallback(raw_smp_processor_id(), regs);
- was_in_debug_nmi[raw_smp_processor_id()] = 1;
- touch_nmi_watchdog();
- }
+ /* Just ignore, we will handle the roundup on DIE_NMI. */
return NOTIFY_DONE;
case DIE_NMIUNKNOWN:
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
index 49285f8fd4d..be33a5442d8 100644
--- a/arch/x86/kernel/pci-gart_64.c
+++ b/arch/x86/kernel/pci-gart_64.c
@@ -626,7 +626,6 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
struct pci_dev *dev;
void *gatt;
int i, error;
- unsigned long start_pfn, end_pfn;
printk(KERN_INFO "PCI-DMA: Disabling AGP.\n");
aper_size = aper_base = info->aper_size = 0;
@@ -672,12 +671,6 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
printk(KERN_INFO "PCI-DMA: aperture base @ %x size %u KB\n",
aper_base, aper_size>>10);
- /* need to map that range */
- end_pfn = (aper_base>>PAGE_SHIFT) + (aper_size>>PAGE_SHIFT);
- if (end_pfn > max_low_pfn_mapped) {
- start_pfn = (aper_base>>PAGE_SHIFT);
- init_memory_mapping(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT);
- }
return 0;
nommu:
@@ -727,7 +720,8 @@ void __init gart_iommu_init(void)
{
struct agp_kern_info info;
unsigned long iommu_start;
- unsigned long aper_size;
+ unsigned long aper_base, aper_size;
+ unsigned long start_pfn, end_pfn;
unsigned long scratch;
long i;
@@ -765,8 +759,16 @@ void __init gart_iommu_init(void)
return;
}
+ /* need to map that range */
+ aper_size = info.aper_size << 20;
+ aper_base = info.aper_base;
+ end_pfn = (aper_base>>PAGE_SHIFT) + (aper_size>>PAGE_SHIFT);
+ if (end_pfn > max_low_pfn_mapped) {
+ start_pfn = (aper_base>>PAGE_SHIFT);
+ init_memory_mapping(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT);
+ }
+
printk(KERN_INFO "PCI-DMA: using GART IOMMU.\n");
- aper_size = info.aper_size * 1024 * 1024;
iommu_size = check_iommu_size(info.aper_base, aper_size);
iommu_pages = iommu_size >> PAGE_SHIFT;
diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c
index 6ca515d6db5..edfb09f3047 100644
--- a/arch/x86/kernel/vmi_32.c
+++ b/arch/x86/kernel/vmi_32.c
@@ -235,7 +235,7 @@ static void vmi_write_ldt_entry(struct desc_struct *dt, int entry,
const void *desc)
{
u32 *ldt_entry = (u32 *)desc;
- vmi_ops.write_idt_entry(dt, entry, ldt_entry[0], ldt_entry[1]);
+ vmi_ops.write_ldt_entry(dt, entry, ldt_entry[0], ldt_entry[1]);
}
static void vmi_load_sp0(struct tss_struct *tss,