From 79f12614a6537cc3ac9ca4d1ea26f6e4f4a34aee Mon Sep 17 00:00:00 2001 From: Ravikiran G Thirumalai Date: Wed, 11 Jan 2006 22:46:18 +0100 Subject: [PATCH] x86_64: Inclusion of ScaleMP vSMP architecture patches - vsmp_arch Introduce vSMP arch to the kernel. This patch: 1. Adds CONFIG_X86_VSMP 2. Adds machine specific macros for local_irq_disabled, local_irq_enabled and irqs_disabled 3. Writes to the vSMP CTL device to indicate kernel compiled with CONFIG_VSMP Signed-off-by: Ravikiran Thirumalai Signed-off-by: Shai Fultheim Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- include/asm-x86_64/system.h | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) (limited to 'include/asm-x86_64') diff --git a/include/asm-x86_64/system.h b/include/asm-x86_64/system.h index 80272190570..38c1e8a69c9 100644 --- a/include/asm-x86_64/system.h +++ b/include/asm-x86_64/system.h @@ -326,8 +326,25 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, /* interrupt control.. */ #define local_save_flags(x) do { warn_if_not_ulong(x); __asm__ __volatile__("# save_flags \n\t pushfq ; popq %q0":"=g" (x): /* no input */ :"memory"); } while (0) #define local_irq_restore(x) __asm__ __volatile__("# restore_flags \n\t pushq %0 ; popfq": /* no output */ :"g" (x):"memory", "cc") + +#ifdef CONFIG_X86_VSMP +/* Interrupt control for VSMP architecture */ +#define local_irq_disable() do { unsigned long flags; local_save_flags(flags); local_irq_restore((flags & ~(1 << 9)) | (1 << 18)); } while (0) +#define local_irq_enable() do { unsigned long flags; local_save_flags(flags); local_irq_restore((flags | (1 << 9)) & ~(1 << 18)); } while (0) + +#define irqs_disabled() \ +({ \ + unsigned long flags; \ + local_save_flags(flags); \ + (flags & (1<<18)) || !(flags & (1<<9)); \ +}) + +/* For spinlocks etc */ +#define local_irq_save(x) do { local_save_flags(x); local_irq_restore((x & ~(1 << 9)) | (1 << 18)); } while (0) +#else /* CONFIG_X86_VSMP */ #define local_irq_disable() __asm__ __volatile__("cli": : :"memory") #define local_irq_enable() __asm__ __volatile__("sti": : :"memory") + /* used in the idle loop; sti takes one instruction cycle to complete */ #define safe_halt() __asm__ __volatile__("sti; hlt": : :"memory") /* used when interrupts are already enabled or to shutdown the processor */ @@ -342,6 +359,7 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, /* For spinlocks etc */ #define local_irq_save(x) do { warn_if_not_ulong(x); __asm__ __volatile__("# local_irq_save \n\t pushfq ; popq %0 ; cli":"=g" (x): /* no input */ :"memory"); } while (0) +#endif void cpu_idle_wait(void); -- cgit v1.2.3