aboutsummaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorKevin D. Kissell <kevink@mips.com>2007-08-03 19:38:03 +0200
committerRalf Baechle <ralf@linux-mips.org>2007-10-11 23:45:57 +0100
commitf571eff0a24ed97a919f2b61bb4afdeab4b43002 (patch)
treee9d6c597fafca02720f000cf795a37f2d163f10f /include
parentbbf25010f1a6b761914430f5fca081ec8c7accd1 (diff)
[MIPS] IRQ Affinity Support for SMTC on Malta Platform
Signed-off-by: Kevin D. Kissell <kevink@mips.com> Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'include')
-rw-r--r--include/asm-mips/irq.h67
-rw-r--r--include/asm-mips/smtc_ipi.h1
2 files changed, 66 insertions, 2 deletions
diff --git a/include/asm-mips/irq.h b/include/asm-mips/irq.h
index 2cb52cf8bd4..a58f0eecc68 100644
--- a/include/asm-mips/irq.h
+++ b/include/asm-mips/irq.h
@@ -46,6 +46,38 @@ static inline void smtc_im_ack_irq(unsigned int irq)
#endif /* CONFIG_MIPS_MT_SMTC */
+#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
+#include <linux/cpumask.h>
+
+extern void plat_set_irq_affinity(unsigned int irq, cpumask_t affinity);
+extern void smtc_forward_irq(unsigned int irq);
+
+/*
+ * IRQ affinity hook invoked at the beginning of interrupt dispatch
+ * if option is enabled.
+ *
+ * Up through Linux 2.6.22 (at least) cpumask operations are very
+ * inefficient on MIPS. Initial prototypes of SMTC IRQ affinity
+ * used a "fast path" per-IRQ-descriptor cache of affinity information
+ * to reduce latency. As there is a project afoot to optimize the
+ * cpumask implementations, this version is optimistically assuming
+ * that cpumask.h macro overhead is reasonable during interrupt dispatch.
+ */
+#define IRQ_AFFINITY_HOOK(irq) \
+do { \
+ if (!cpu_isset(smp_processor_id(), irq_desc[irq].affinity)) { \
+ smtc_forward_irq(irq); \
+ irq_exit(); \
+ return; \
+ } \
+} while (0)
+
+#else /* Not doing SMTC affinity */
+
+#define IRQ_AFFINITY_HOOK(irq) do { } while (0)
+
+#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */
+
#ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP
/*
@@ -56,13 +88,27 @@ static inline void smtc_im_ack_irq(unsigned int irq)
*/
#define __DO_IRQ_SMTC_HOOK(irq) \
do { \
+ IRQ_AFFINITY_HOOK(irq); \
if (irq_hwmask[irq] & 0x0000ff00) \
write_c0_tccontext(read_c0_tccontext() & \
- ~(irq_hwmask[irq] & 0x0000ff00)); \
+ ~(irq_hwmask[irq] & 0x0000ff00)); \
+} while (0)
+
+#define __NO_AFFINITY_IRQ_SMTC_HOOK(irq) \
+do { \
+ if (irq_hwmask[irq] & 0x0000ff00) \
+ write_c0_tccontext(read_c0_tccontext() & \
+ ~(irq_hwmask[irq] & 0x0000ff00)); \
} while (0)
+
#else
-#define __DO_IRQ_SMTC_HOOK(irq) do { } while (0)
+#define __DO_IRQ_SMTC_HOOK(irq) \
+do { \
+ IRQ_AFFINITY_HOOK(irq); \
+} while (0)
+#define __NO_AFFINITY_IRQ_SMTC_HOOK(irq) do { } while (0)
+
#endif
/*
@@ -81,6 +127,23 @@ do { \
irq_exit(); \
} while (0)
+#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
+/*
+ * To avoid inefficient and in some cases pathological re-checking of
+ * IRQ affinity, we have this variant that skips the affinity check.
+ */
+
+
+#define do_IRQ_no_affinity(irq) \
+do { \
+ irq_enter(); \
+ __NO_AFFINITY_IRQ_SMTC_HOOK(irq); \
+ generic_handle_irq(irq); \
+ irq_exit(); \
+} while (0)
+
+#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */
+
extern void arch_init_irq(void);
extern void spurious_interrupt(void);
diff --git a/include/asm-mips/smtc_ipi.h b/include/asm-mips/smtc_ipi.h
index a52a4a7a36e..e09131a6127 100644
--- a/include/asm-mips/smtc_ipi.h
+++ b/include/asm-mips/smtc_ipi.h
@@ -34,6 +34,7 @@ struct smtc_ipi {
#define LINUX_SMP_IPI 1
#define SMTC_CLOCK_TICK 2
+#define IRQ_AFFINITY_IPI 3
/*
* A queue of IPI messages