diff options
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/kernel/irqinit_32.c | 12 | ||||
-rw-r--r-- | arch/x86/mach-default/setup.c | 12 | ||||
-rw-r--r-- | arch/x86/mach-voyager/setup.c | 12 | ||||
-rw-r--r-- | arch/x86/mach-voyager/voyager_smp.c | 25 |
4 files changed, 35 insertions, 26 deletions
diff --git a/arch/x86/kernel/irqinit_32.c b/arch/x86/kernel/irqinit_32.c index 1507ad4e674..10a09c2f182 100644 --- a/arch/x86/kernel/irqinit_32.c +++ b/arch/x86/kernel/irqinit_32.c @@ -78,15 +78,6 @@ void __init init_ISA_irqs(void) } } -/* - * IRQ2 is cascade interrupt to second interrupt controller - */ -static struct irqaction irq2 = { - .handler = no_action, - .mask = CPU_MASK_NONE, - .name = "cascade", -}; - DEFINE_PER_CPU(vector_irq_t, vector_irq) = { [0 ... IRQ0_VECTOR - 1] = -1, [IRQ0_VECTOR] = 0, @@ -178,9 +169,6 @@ void __init native_init_IRQ(void) alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt); #endif - if (!acpi_ioapic) - setup_irq(2, &irq2); - /* setup after call gates are initialised (usually add in * the architecture specific gates) */ diff --git a/arch/x86/mach-default/setup.c b/arch/x86/mach-default/setup.c index df167f26562..a265a7c6319 100644 --- a/arch/x86/mach-default/setup.c +++ b/arch/x86/mach-default/setup.c @@ -38,6 +38,15 @@ void __init pre_intr_init_hook(void) init_ISA_irqs(); } +/* + * IRQ2 is cascade interrupt to second interrupt controller + */ +static struct irqaction irq2 = { + .handler = no_action, + .mask = CPU_MASK_NONE, + .name = "cascade", +}; + /** * intr_init_hook - post gate setup interrupt initialisation * @@ -53,6 +62,9 @@ void __init intr_init_hook(void) if (x86_quirks->arch_intr_init()) return; } + if (!acpi_ioapic) + setup_irq(2, &irq2); + } /** diff --git a/arch/x86/mach-voyager/setup.c b/arch/x86/mach-voyager/setup.c index a580b9562e7..d914a7996a6 100644 --- a/arch/x86/mach-voyager/setup.c +++ b/arch/x86/mach-voyager/setup.c @@ -33,13 +33,23 @@ void __init intr_init_hook(void) setup_irq(2, &irq2); } -void __init pre_setup_arch_hook(void) +static void voyager_disable_tsc(void) { /* Voyagers run their CPUs from independent clocks, so disable * the TSC code because we can't sync them */ setup_clear_cpu_cap(X86_FEATURE_TSC); } +void __init pre_setup_arch_hook(void) +{ + voyager_disable_tsc(); +} + +void __init pre_time_init_hook(void) +{ + voyager_disable_tsc(); +} + void __init trap_init_hook(void) { } diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c index 9840b7ec749..7ffcdeec463 100644 --- a/arch/x86/mach-voyager/voyager_smp.c +++ b/arch/x86/mach-voyager/voyager_smp.c @@ -81,7 +81,7 @@ static void enable_local_vic_irq(unsigned int irq); static void disable_local_vic_irq(unsigned int irq); static void before_handle_vic_irq(unsigned int irq); static void after_handle_vic_irq(unsigned int irq); -static void set_vic_irq_affinity(unsigned int irq, cpumask_t mask); +static void set_vic_irq_affinity(unsigned int irq, const struct cpumask *mask); static void ack_vic_irq(unsigned int irq); static void vic_enable_cpi(void); static void do_boot_cpu(__u8 cpuid); @@ -211,8 +211,6 @@ static __u32 cpu_booted_map; static cpumask_t smp_commenced_mask = CPU_MASK_NONE; /* This is for the new dynamic CPU boot code */ -cpumask_t cpu_callin_map = CPU_MASK_NONE; -cpumask_t cpu_callout_map = CPU_MASK_NONE; /* The per processor IRQ masks (these are usually kept in sync) */ static __u16 vic_irq_mask[NR_CPUS] __cacheline_aligned; @@ -378,7 +376,7 @@ void __init find_smp_config(void) cpus_addr(phys_cpu_present_map)[0] |= voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK + 3) << 24; - cpu_possible_map = phys_cpu_present_map; + init_cpu_possible(&phys_cpu_present_map); printk("VOYAGER SMP: phys_cpu_present_map = 0x%lx\n", cpus_addr(phys_cpu_present_map)[0]); /* Here we set up the VIC to enable SMP */ @@ -1599,16 +1597,16 @@ static void after_handle_vic_irq(unsigned int irq) * change the mask and then do an interrupt enable CPI to re-enable on * the selected processors */ -void set_vic_irq_affinity(unsigned int irq, cpumask_t mask) +void set_vic_irq_affinity(unsigned int irq, const struct cpumask *mask) { /* Only extended processors handle interrupts */ unsigned long real_mask; unsigned long irq_mask = 1 << irq; int cpu; - real_mask = cpus_addr(mask)[0] & voyager_extended_vic_processors; + real_mask = cpus_addr(*mask)[0] & voyager_extended_vic_processors; - if (cpus_addr(mask)[0] == 0) + if (cpus_addr(*mask)[0] == 0) /* can't have no CPUs to accept the interrupt -- extremely * bad things will happen */ return; @@ -1750,10 +1748,11 @@ static void __cpuinit voyager_smp_prepare_boot_cpu(void) init_gdt(smp_processor_id()); switch_to_new_gdt(); - cpu_set(smp_processor_id(), cpu_online_map); - cpu_set(smp_processor_id(), cpu_callout_map); - cpu_set(smp_processor_id(), cpu_possible_map); - cpu_set(smp_processor_id(), cpu_present_map); + cpu_online_map = cpumask_of_cpu(smp_processor_id()); + cpu_callout_map = cpumask_of_cpu(smp_processor_id()); + cpu_callin_map = CPU_MASK_NONE; + cpu_present_map = cpumask_of_cpu(smp_processor_id()); + } static int __cpuinit voyager_cpu_up(unsigned int cpu) @@ -1783,9 +1782,9 @@ void __init smp_setup_processor_id(void) x86_write_percpu(cpu_number, hard_smp_processor_id()); } -static void voyager_send_call_func(cpumask_t callmask) +static void voyager_send_call_func(const struct cpumask *callmask) { - __u32 mask = cpus_addr(callmask)[0] & ~(1 << smp_processor_id()); + __u32 mask = cpus_addr(*callmask)[0] & ~(1 << smp_processor_id()); send_CPI(mask, VIC_CALL_FUNCTION_CPI); } |