aboutsummaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2006-07-03 00:24:45 -0700
committerLinus Torvalds <torvalds@g5.osdl.org>2006-07-03 15:27:03 -0700
commit6375e2b74c620794e1a27a26e4338aec2e41346a (patch)
treef966478bc214af98e9ca6235506377b5dd872526 /include
parent2601e64d262ee5ed4d4a5737345803800d9c4db3 (diff)
[PATCH] lockdep: irqtrace cleanup of include/asm-x86_64/irqflags.h
Clean up the x86-64 irqflags.h file: - macro => inline function transformation - simplifications - style fixes Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Arjan van de Ven <arjan@linux.intel.com> Cc: Andi Kleen <ak@muc.de> Cc: Jan Beulich <jbeulich@novell.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include')
-rw-r--r--include/asm-x86_64/irqflags.h156
1 files changed, 118 insertions, 38 deletions
diff --git a/include/asm-x86_64/irqflags.h b/include/asm-x86_64/irqflags.h
index 22f3c06b247..cce6937e87c 100644
--- a/include/asm-x86_64/irqflags.h
+++ b/include/asm-x86_64/irqflags.h
@@ -5,57 +5,137 @@
*
* This file gets included from lowlevel asm headers too, to provide
* wrapped versions of the local_irq_*() APIs, based on the
- * raw_local_irq_*() macros from the lowlevel headers.
+ * raw_local_irq_*() functions from the lowlevel headers.
*/
#ifndef _ASM_IRQFLAGS_H
#define _ASM_IRQFLAGS_H
#ifndef __ASSEMBLY__
+/*
+ * Interrupt control:
+ */
+
+static inline unsigned long __raw_local_save_flags(void)
+{
+ unsigned long flags;
+
+ __asm__ __volatile__(
+ "# __raw_save_flags\n\t"
+ "pushfq ; popq %q0"
+ : "=g" (flags)
+ : /* no input */
+ : "memory"
+ );
-/* interrupt control.. */
-#define raw_local_save_flags(x) do { warn_if_not_ulong(x); __asm__ __volatile__("# save_flags \n\t pushfq ; popq %q0":"=g" (x): /* no input */ :"memory"); } while (0)
-#define raw_local_irq_restore(x) __asm__ __volatile__("# restore_flags \n\t pushq %0 ; popfq": /* no output */ :"g" (x):"memory", "cc")
+ return flags;
+}
+
+#define raw_local_save_flags(flags) \
+ do { (flags) = __raw_local_save_flags(); } while (0)
+
+static inline void raw_local_irq_restore(unsigned long flags)
+{
+ __asm__ __volatile__(
+ "pushq %0 ; popfq"
+ : /* no output */
+ :"g" (flags)
+ :"memory", "cc"
+ );
+}
#ifdef CONFIG_X86_VSMP
-/* Interrupt control for VSMP architecture */
-#define raw_local_irq_disable() do { unsigned long flags; raw_local_save_flags(flags); raw_local_irq_restore((flags & ~(1 << 9)) | (1 << 18)); } while (0)
-#define raw_local_irq_enable() do { unsigned long flags; raw_local_save_flags(flags); raw_local_irq_restore((flags | (1 << 9)) & ~(1 << 18)); } while (0)
-
-#define raw_irqs_disabled_flags(flags) \
-({ \
- (flags & (1<<18)) || !(flags & (1<<9)); \
-})
-
-/* For spinlocks etc */
-#define raw_local_irq_save(x) do { raw_local_save_flags(x); raw_local_irq_restore((x & ~(1 << 9)) | (1 << 18)); } while (0)
-#else /* CONFIG_X86_VSMP */
-#define raw_local_irq_disable() __asm__ __volatile__("cli": : :"memory")
-#define raw_local_irq_enable() __asm__ __volatile__("sti": : :"memory")
-
-#define raw_irqs_disabled_flags(flags) \
-({ \
- !(flags & (1<<9)); \
-})
-
-/* For spinlocks etc */
-#define raw_local_irq_save(x) do { warn_if_not_ulong(x); __asm__ __volatile__("# raw_local_irq_save \n\t pushfq ; popq %0 ; cli":"=g" (x): /* no input */ :"memory"); } while (0)
+
+/*
+ * Interrupt control for the VSMP architecture:
+ */
+
+static inline void raw_local_irq_disable(void)
+{
+ unsigned long flags = __raw_local_save_flags();
+
+ raw_local_irq_restore((flags & ~(1 << 9)) | (1 << 18));
+}
+
+static inline void raw_local_irq_enable(void)
+{
+ unsigned long flags = __raw_local_save_flags();
+
+ raw_local_irq_restore((flags | (1 << 9)) & ~(1 << 18));
+}
+
+static inline int raw_irqs_disabled_flags(unsigned long flags)
+{
+ return !(flags & (1<<9)) || (flags & (1 << 18));
+}
+
+#else /* CONFIG_X86_VSMP */
+
+static inline void raw_local_irq_disable(void)
+{
+ __asm__ __volatile__("cli" : : : "memory");
+}
+
+static inline void raw_local_irq_enable(void)
+{
+ __asm__ __volatile__("sti" : : : "memory");
+}
+
+static inline int raw_irqs_disabled_flags(unsigned long flags)
+{
+ return !(flags & (1 << 9));
+}
+
#endif
-#define raw_irqs_disabled() \
-({ \
- unsigned long flags; \
- raw_local_save_flags(flags); \
- raw_irqs_disabled_flags(flags); \
-})
+/*
+ * For spinlocks, etc.:
+ */
+
+static inline unsigned long __raw_local_irq_save(void)
+{
+ unsigned long flags = __raw_local_save_flags();
+
+ raw_local_irq_disable();
+
+ return flags;
+}
-/* used in the idle loop; sti takes one instruction cycle to complete */
-#define raw_safe_halt() __asm__ __volatile__("sti; hlt": : :"memory")
-/* used when interrupts are already enabled or to shutdown the processor */
-#define halt() __asm__ __volatile__("hlt": : :"memory")
+#define raw_local_irq_save(flags) \
+ do { (flags) = __raw_local_irq_save(); } while (0)
+
+static inline int raw_irqs_disabled(void)
+{
+ unsigned long flags = __raw_local_save_flags();
+
+ return raw_irqs_disabled_flags(flags);
+}
+
+/*
+ * Used in the idle loop; sti takes one instruction cycle
+ * to complete:
+ */
+static inline void raw_safe_halt(void)
+{
+ __asm__ __volatile__("sti; hlt" : : : "memory");
+}
+
+/*
+ * Used when interrupts are already enabled or to
+ * shutdown the processor:
+ */
+static inline void halt(void)
+{
+ __asm__ __volatile__("hlt": : :"memory");
+}
#else /* __ASSEMBLY__: */
-# define TRACE_IRQS_ON
-# define TRACE_IRQS_OFF
+# ifdef CONFIG_TRACE_IRQFLAGS
+# define TRACE_IRQS_ON call trace_hardirqs_on_thunk
+# define TRACE_IRQS_OFF call trace_hardirqs_off_thunk
+# else
+# define TRACE_IRQS_ON
+# define TRACE_IRQS_OFF
+# endif
#endif
#endif