aboutsummaryrefslogtreecommitdiff
path: root/include/asm-mips
diff options
context:
space:
mode:
authorKevin D. Kissell <kevink@paralogos.com>2008-09-09 21:48:52 +0200
committerRalf Baechle <ralf@linux-mips.org>2008-10-03 17:58:58 +0100
commit8531a35e5e275b17c57c39b7911bc2b37025f28c (patch)
treec593e23c875d0639a8f422c0ceb8b2a7738d143e /include/asm-mips
parentd2bb01b042a38219fbddaafc214c5beb96248d2f (diff)
[MIPS] SMTC: Fix SMTC dyntick support.
Rework of SMTC support to make it work with the new clock event system, allowing "tickless" operation, and to make it compatible with the use of the "wait_irqoff" idle loop. The new clocking scheme means that the previously optional IPI instant replay mechanism is now required, and has been made more robust. Signed-off-by: Kevin D. Kissell <kevink@paralogos.com> Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'include/asm-mips')
-rw-r--r--include/asm-mips/cevt-r4k.h46
-rw-r--r--include/asm-mips/irqflags.h26
-rw-r--r--include/asm-mips/smtc.h8
3 files changed, 74 insertions, 6 deletions
diff --git a/include/asm-mips/cevt-r4k.h b/include/asm-mips/cevt-r4k.h
new file mode 100644
index 00000000000..fa4328f9124
--- /dev/null
+++ b/include/asm-mips/cevt-r4k.h
@@ -0,0 +1,46 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2008 Kevin D. Kissell
+ */
+
+/*
+ * Definitions used for common event timer implementation
+ * for MIPS 4K-type processors and their MIPS MT variants.
+ * Avoids unsightly extern declarations in C files.
+ */
+#ifndef __ASM_CEVT_R4K_H
+#define __ASM_CEVT_R4K_H
+
+DECLARE_PER_CPU(struct clock_event_device, mips_clockevent_device);
+
+void mips_event_handler(struct clock_event_device *dev);
+int c0_compare_int_usable(void);
+void mips_set_clock_mode(enum clock_event_mode, struct clock_event_device *);
+irqreturn_t c0_compare_interrupt(int, void *);
+
+extern struct irqaction c0_compare_irqaction;
+extern int cp0_timer_irq_installed;
+
+/*
+ * Possibly handle a performance counter interrupt.
+ * Return true if the timer interrupt should not be checked
+ */
+
+static inline int handle_perf_irq(int r2)
+{
+ /*
+ * The performance counter overflow interrupt may be shared with the
+ * timer interrupt (cp0_perfcount_irq < 0). If it is and a
+ * performance counter has overflowed (perf_irq() == IRQ_HANDLED)
+ * and we can't reliably determine if a counter interrupt has also
+ * happened (!r2) then don't check for a timer interrupt.
+ */
+ return (cp0_perfcount_irq < 0) &&
+ perf_irq() == IRQ_HANDLED &&
+ !r2;
+}
+
+#endif /* __ASM_CEVT_R4K_H */
diff --git a/include/asm-mips/irqflags.h b/include/asm-mips/irqflags.h
index 881e8866501..701ec0ba8fa 100644
--- a/include/asm-mips/irqflags.h
+++ b/include/asm-mips/irqflags.h
@@ -38,8 +38,17 @@ __asm__(
" .set pop \n"
" .endm");
+extern void smtc_ipi_replay(void);
+
static inline void raw_local_irq_enable(void)
{
+#ifdef CONFIG_MIPS_MT_SMTC
+ /*
+ * SMTC kernel needs to do a software replay of queued
+ * IPIs, at the cost of call overhead on each local_irq_enable()
+ */
+ smtc_ipi_replay();
+#endif
__asm__ __volatile__(
"raw_local_irq_enable"
: /* no outputs */
@@ -47,6 +56,7 @@ static inline void raw_local_irq_enable(void)
: "memory");
}
+
/*
* For cli() we have to insert nops to make sure that the new value
* has actually arrived in the status register before the end of this
@@ -185,15 +195,14 @@ __asm__(
" .set pop \n"
" .endm \n");
-extern void smtc_ipi_replay(void);
static inline void raw_local_irq_restore(unsigned long flags)
{
unsigned long __tmp1;
-#ifdef CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY
+#ifdef CONFIG_MIPS_MT_SMTC
/*
- * CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY does prompt replay of deferred
+ * SMTC kernel needs to do a software replay of queued
* IPIs, at the cost of branch and call overhead on each
* local_irq_restore()
*/
@@ -208,6 +217,17 @@ static inline void raw_local_irq_restore(unsigned long flags)
: "memory");
}
+static inline void __raw_local_irq_restore(unsigned long flags)
+{
+ unsigned long __tmp1;
+
+ __asm__ __volatile__(
+ "raw_local_irq_restore\t%0"
+ : "=r" (__tmp1)
+ : "0" (flags)
+ : "memory");
+}
+
static inline int raw_irqs_disabled_flags(unsigned long flags)
{
#ifdef CONFIG_MIPS_MT_SMTC
diff --git a/include/asm-mips/smtc.h b/include/asm-mips/smtc.h
index 3639b28f80d..ea60bf08dcb 100644
--- a/include/asm-mips/smtc.h
+++ b/include/asm-mips/smtc.h
@@ -6,6 +6,7 @@
*/
#include <asm/mips_mt.h>
+#include <asm/smtc_ipi.h>
/*
* System-wide SMTC status information
@@ -38,14 +39,15 @@ struct mm_struct;
struct task_struct;
void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu);
-
+void self_ipi(struct smtc_ipi *);
void smtc_flush_tlb_asid(unsigned long asid);
-extern int mipsmt_build_cpu_map(int startslot);
-extern void mipsmt_prepare_cpus(void);
+extern int smtc_build_cpu_map(int startslot);
+extern void smtc_prepare_cpus(int cpus);
extern void smtc_smp_finish(void);
extern void smtc_boot_secondary(int cpu, struct task_struct *t);
extern void smtc_cpus_done(void);
+
/*
* Sharing the TLB between multiple VPEs means that the
* "random" index selection function is not allowed to