aboutsummaryrefslogtreecommitdiff
path: root/arch/x86/mach-voyager/voyager_smp.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/mach-voyager/voyager_smp.c')
-rw-r--r--arch/x86/mach-voyager/voyager_smp.c31
1 files changed, 16 insertions, 15 deletions
diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c
index b87f8548e75..69371434b0c 100644
--- a/arch/x86/mach-voyager/voyager_smp.c
+++ b/arch/x86/mach-voyager/voyager_smp.c
@@ -29,15 +29,15 @@
#include <asm/arch_hooks.h>
/* TLB state -- visible externally, indexed physically */
-DEFINE_PER_CPU(struct tlb_state, cpu_tlbstate) ____cacheline_aligned = { &init_mm, 0 };
+DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = { &init_mm, 0 };
/* CPU IRQ affinity -- set to all ones initially */
static unsigned long cpu_irq_affinity[NR_CPUS] __cacheline_aligned = { [0 ... NR_CPUS-1] = ~0UL };
/* per CPU data structure (for /proc/cpuinfo et al), visible externally
* indexed physically */
-struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned;
-EXPORT_SYMBOL(cpu_data);
+DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
+EXPORT_PER_CPU_SYMBOL(cpu_info);
/* physical ID of the CPU used to boot the system */
unsigned char boot_cpu_id;
@@ -389,7 +389,7 @@ find_smp_config(void)
/* The boot CPU must be extended */
voyager_extended_vic_processors = 1<<boot_cpu_id;
- /* initially, all of the first 8 cpu's can boot */
+ /* initially, all of the first 8 CPUs can boot */
voyager_allowed_boot_processors = 0xff;
/* set up everything for just this CPU, we can alter
* this as we start the other CPUs later */
@@ -430,7 +430,7 @@ find_smp_config(void)
void __init
smp_store_cpu_info(int id)
{
- struct cpuinfo_x86 *c=&cpu_data[id];
+ struct cpuinfo_x86 *c = &cpu_data(id);
*c = boot_cpu_data;
@@ -442,8 +442,8 @@ static __u32 __init
setup_trampoline(void)
{
/* these two are global symbols in trampoline.S */
- extern __u8 trampoline_end[];
- extern __u8 trampoline_data[];
+ extern const __u8 trampoline_end[];
+ extern const __u8 trampoline_data[];
memcpy((__u8 *)trampoline_base, trampoline_data,
trampoline_end - trampoline_data);
@@ -634,7 +634,7 @@ do_boot_cpu(__u8 cpu)
cpu, smp_processor_id()));
printk("CPU%d: ", cpu);
- print_cpu_info(&cpu_data[cpu]);
+ print_cpu_info(&cpu_data(cpu));
wmb();
cpu_set(cpu, cpu_callout_map);
cpu_set(cpu, cpu_present_map);
@@ -683,7 +683,7 @@ smp_boot_cpus(void)
*/
smp_store_cpu_info(boot_cpu_id);
printk("CPU%d: ", boot_cpu_id);
- print_cpu_info(&cpu_data[boot_cpu_id]);
+ print_cpu_info(&cpu_data(boot_cpu_id));
if(is_cpu_quad()) {
/* booting on a Quad CPU */
@@ -714,7 +714,7 @@ smp_boot_cpus(void)
unsigned long bogosum = 0;
for (i = 0; i < NR_CPUS; i++)
if (cpu_isset(i, cpu_online_map))
- bogosum += cpu_data[i].loops_per_jiffy;
+ bogosum += cpu_data(i).loops_per_jiffy;
printk(KERN_INFO "Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
cpucount+1,
bogosum/(500000/HZ),
@@ -1010,7 +1010,7 @@ static struct call_data_struct * call_data;
/* execute a thread on a new CPU. The function to be called must be
* previously set up. This is used to schedule a function for
- * execution on all CPU's - set up the function then broadcast a
+ * execution on all CPUs - set up the function then broadcast a
* function_interrupt CPI to come here on each CPU */
static void
smp_call_function_interrupt(void)
@@ -1037,6 +1037,7 @@ smp_call_function_interrupt(void)
*/
irq_enter();
(*func)(info);
+ __get_cpu_var(irq_stat).irq_call_count++;
irq_exit();
if (wait) {
mb();
@@ -1094,7 +1095,7 @@ voyager_smp_call_function_mask (cpumask_t cpumask,
* CPI here. We don't use this actually for counting so losing
* ticks doesn't matter
*
- * FIXME: For those CPU's which actually have a local APIC, we could
+ * FIXME: For those CPUs which actually have a local APIC, we could
* try to use it to trigger this interrupt instead of having to
* broadcast the timer tick. Unfortunately, all my pentium DYADs have
* no local APIC, so I can't do this
@@ -1286,7 +1287,7 @@ smp_local_timer_interrupt(void)
/*
* We take the 'long' return path, and there every subsystem
- * grabs the apropriate locks (kernel lock/ irq lock).
+ * grabs the appropriate locks (kernel lock/ irq lock).
*
* we might want to decouple profiling from the 'long path',
* and do the profiling totally in assembly.
@@ -1758,7 +1759,7 @@ set_vic_irq_affinity(unsigned int irq, cpumask_t mask)
real_mask = cpus_addr(mask)[0] & voyager_extended_vic_processors;
if(cpus_addr(mask)[0] == 0)
- /* can't have no cpu's to accept the interrupt -- extremely
+ /* can't have no CPUs to accept the interrupt -- extremely
* bad things will happen */
return;
@@ -1790,7 +1791,7 @@ set_vic_irq_affinity(unsigned int irq, cpumask_t mask)
}
/* this is magic, we now have the correct affinity maps, so
* enable the interrupt. This will send an enable CPI to
- * those cpu's who need to enable it in their local masks,
+ * those CPUs who need to enable it in their local masks,
* causing them to correct for the new affinity . If the
* interrupt is currently globally disabled, it will simply be
* disabled again as it comes in (voyager lazy disable). If