aboutsummaryrefslogtreecommitdiff
path: root/include/asm-x86/smp_64.h
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2008-01-30 13:30:36 +0100
committerIngo Molnar <mingo@elte.hu>2008-01-30 13:30:36 +0100
commitae9d983be1eefac4b5efad69a188e7ac89a75797 (patch)
treeecdf7ad736e1fe98dff2277649b573135d1381fd /include/asm-x86/smp_64.h
parentc2805aa1d8ae51c7582d2ccbd736afa545cf5cc4 (diff)
x86: cleanup smp.h variants
Bring the smp.h variants into sync to prepare merging and paravirt support. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'include/asm-x86/smp_64.h')
-rw-r--r--include/asm-x86/smp_64.h133
1 files changed, 51 insertions, 82 deletions
diff --git a/include/asm-x86/smp_64.h b/include/asm-x86/smp_64.h
index ab612b0ff27..2feddda91e1 100644
--- a/include/asm-x86/smp_64.h
+++ b/include/asm-x86/smp_64.h
@@ -1,130 +1,99 @@
#ifndef __ASM_SMP_H
#define __ASM_SMP_H
-/*
- * We need the APIC definitions automatically as part of 'smp.h'
- */
-#include <linux/threads.h>
#include <linux/cpumask.h>
-#include <linux/bitops.h>
#include <linux/init.h>
-extern int disable_apic;
-#include <asm/mpspec.h>
+/*
+ * We need the APIC definitions automatically as part of 'smp.h'
+ */
#include <asm/apic.h>
#include <asm/io_apic.h>
-#include <asm/thread_info.h>
-
-#ifdef CONFIG_SMP
-
+#include <asm/mpspec.h>
#include <asm/pda.h>
+#include <asm/thread_info.h>
-struct pt_regs;
-
-extern cpumask_t cpu_present_mask;
-extern cpumask_t cpu_possible_map;
-extern cpumask_t cpu_online_map;
extern cpumask_t cpu_callout_map;
extern cpumask_t cpu_initialized;
-/*
- * Private routines/data
- */
-
+extern int smp_num_siblings;
+extern unsigned int num_processors;
+
extern void smp_alloc_memory(void);
-extern volatile unsigned long smp_invalidate_needed;
extern void lock_ipi_call_lock(void);
extern void unlock_ipi_call_lock(void);
-extern int smp_num_siblings;
-extern void smp_send_reschedule(int cpu);
+
extern int smp_call_function_mask(cpumask_t mask, void (*func)(void *),
void *info, int wait);
-/*
- * cpu_sibling_map and cpu_core_map now live
- * in the per cpu area
- *
- * extern cpumask_t cpu_sibling_map[NR_CPUS];
- * extern cpumask_t cpu_core_map[NR_CPUS];
- */
+extern u8 __initdata x86_cpu_to_apicid_init[];
+extern void *x86_cpu_to_apicid_ptr;
+extern u8 bios_cpu_apicid[];
+
DECLARE_PER_CPU(cpumask_t, cpu_sibling_map);
DECLARE_PER_CPU(cpumask_t, cpu_core_map);
DECLARE_PER_CPU(u8, cpu_llc_id);
+DECLARE_PER_CPU(u8, x86_cpu_to_apicid);
-#define SMP_TRAMPOLINE_BASE 0x6000
-
-/*
- * On x86 all CPUs are mapped 1:1 to the APIC space.
- * This simplifies scheduling and IPI sending and
- * compresses data structures.
- */
-
-static inline int num_booting_cpus(void)
+static inline int cpu_present_to_apicid(int mps_cpu)
{
- return cpus_weight(cpu_callout_map);
+ if (mps_cpu < NR_CPUS)
+ return (int)bios_cpu_apicid[mps_cpu];
+ else
+ return BAD_APICID;
}
-#define raw_smp_processor_id() read_pda(cpunumber)
+#ifdef CONFIG_SMP
+
+#define SMP_TRAMPOLINE_BASE 0x6000
extern int __cpu_disable(void);
extern void __cpu_die(unsigned int cpu);
extern void prefill_possible_map(void);
-extern unsigned num_processors;
extern unsigned __cpuinitdata disabled_cpus;
-#define NO_PROC_ID 0xFF /* No processor magic marker */
-
-#endif /* CONFIG_SMP */
-
-#define safe_smp_processor_id() smp_processor_id()
+#define raw_smp_processor_id() read_pda(cpunumber)
+#define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu)
-static inline int hard_smp_processor_id(void)
-{
- /* we don't want to mark this access volatile - bad code generation */
- return GET_APIC_ID(*(unsigned int *)(APIC_BASE+APIC_ID));
-}
+#define stack_smp_processor_id() \
+ ({ \
+ struct thread_info *ti; \
+ __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
+ ti->cpu; \
+})
/*
- * Some lowlevel functions might want to know about
- * the real APIC ID <-> CPU # mapping.
+ * On x86 all CPUs are mapped 1:1 to the APIC space. This simplifies
+ * scheduling and IPI sending and compresses data structures.
*/
-extern u8 __initdata x86_cpu_to_apicid_init[];
-extern void *x86_cpu_to_apicid_ptr;
-DECLARE_PER_CPU(u8, x86_cpu_to_apicid); /* physical ID */
-extern u8 bios_cpu_apicid[];
-
-static inline int cpu_present_to_apicid(int mps_cpu)
+static inline int num_booting_cpus(void)
{
- if (mps_cpu < NR_CPUS)
- return (int)bios_cpu_apicid[mps_cpu];
- else
- return BAD_APICID;
+ return cpus_weight(cpu_callout_map);
}
-#ifndef CONFIG_SMP
+extern void smp_send_reschedule(int cpu);
+
+#else /* CONFIG_SMP */
+
+extern unsigned int boot_cpu_id;
+#define cpu_physical_id(cpu) boot_cpu_id
#define stack_smp_processor_id() 0
-#define cpu_logical_map(x) (x)
-#else
-#include <asm/thread_info.h>
-#define stack_smp_processor_id() \
-({ \
- struct thread_info *ti; \
- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
- ti->cpu; \
-})
-#endif
+
+#endif /* !CONFIG_SMP */
+
+#define safe_smp_processor_id() smp_processor_id()
static __inline int logical_smp_processor_id(void)
{
/* we don't want to mark this access volatile - bad code generation */
- return GET_APIC_LOGICAL_ID(*(unsigned long *)(APIC_BASE+APIC_LDR));
+ return GET_APIC_LOGICAL_ID(*(u32 *)(APIC_BASE + APIC_LDR));
+}
+
+static inline int hard_smp_processor_id(void)
+{
+ /* we don't want to mark this access volatile - bad code generation */
+ return GET_APIC_ID(*(u32 *)(APIC_BASE + APIC_ID));
}
-#ifdef CONFIG_SMP
-#define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu)
-#else
-extern unsigned int boot_cpu_id;
-#define cpu_physical_id(cpu) boot_cpu_id
-#endif /* !CONFIG_SMP */
#endif