diff options
Diffstat (limited to 'arch/s390/kernel/smp.c')
-rw-r--r-- | arch/s390/kernel/smp.c | 575 |
1 files changed, 422 insertions, 153 deletions
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 264ea906db4..aa37fa15451 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -42,6 +42,7 @@ #include <asm/tlbflush.h> #include <asm/timer.h> #include <asm/lowcore.h> +#include <asm/sclp.h> #include <asm/cpu.h> /* @@ -53,11 +54,27 @@ EXPORT_SYMBOL(lowcore_ptr); cpumask_t cpu_online_map = CPU_MASK_NONE; EXPORT_SYMBOL(cpu_online_map); -cpumask_t cpu_possible_map = CPU_MASK_NONE; +cpumask_t cpu_possible_map = CPU_MASK_ALL; EXPORT_SYMBOL(cpu_possible_map); static struct task_struct *current_set[NR_CPUS]; +static u8 smp_cpu_type; +static int smp_use_sigp_detection; + +enum s390_cpu_state { + CPU_STATE_STANDBY, + CPU_STATE_CONFIGURED, +}; + +#ifdef CONFIG_HOTPLUG_CPU +static DEFINE_MUTEX(smp_cpu_state_mutex); +#endif +static int smp_cpu_state[NR_CPUS]; + +static DEFINE_PER_CPU(struct cpu, cpu_devices); +DEFINE_PER_CPU(struct s390_idle_data, s390_idle); + static void smp_ext_bitcall(int, ec_bit_sig); /* @@ -193,6 +210,33 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info, } EXPORT_SYMBOL(smp_call_function_single); +/** + * smp_call_function_mask(): Run a function on a set of other CPUs. + * @mask: The set of cpus to run on. Must not include the current cpu. + * @func: The function to run. This must be fast and non-blocking. + * @info: An arbitrary pointer to pass to the function. + * @wait: If true, wait (atomically) until function has completed on other CPUs. + * + * Returns 0 on success, else a negative status code. + * + * If @wait is true, then returns once @func has returned; otherwise + * it returns just before the target cpu calls @func. + * + * You must not call this function with disabled interrupts or from a + * hardware interrupt handler or from a bottom half handler. + */ +int +smp_call_function_mask(cpumask_t mask, + void (*func)(void *), void *info, + int wait) +{ + preempt_disable(); + __smp_call_function_map(func, info, 0, wait, mask); + preempt_enable(); + return 0; +} +EXPORT_SYMBOL(smp_call_function_mask); + void smp_send_stop(void) { int cpu, rc; @@ -217,33 +261,6 @@ void smp_send_stop(void) } /* - * Reboot, halt and power_off routines for SMP. - */ -void machine_restart_smp(char *__unused) -{ - smp_send_stop(); - do_reipl(); -} - -void machine_halt_smp(void) -{ - smp_send_stop(); - if (MACHINE_IS_VM && strlen(vmhalt_cmd) > 0) - __cpcmd(vmhalt_cmd, NULL, 0, NULL); - signal_processor(smp_processor_id(), sigp_stop_and_store_status); - for (;;); -} - -void machine_power_off_smp(void) -{ - smp_send_stop(); - if (MACHINE_IS_VM && strlen(vmpoff_cmd) > 0) - __cpcmd(vmpoff_cmd, NULL, 0, NULL); - signal_processor(smp_processor_id(), sigp_stop_and_store_status); - for (;;); -} - -/* * This is the main routine where commands issued by other * cpus are handled. */ @@ -355,6 +372,13 @@ void smp_ctl_clear_bit(int cr, int bit) } EXPORT_SYMBOL(smp_ctl_clear_bit); +/* + * In early ipl state a temp. logically cpu number is needed, so the sigp + * functions can be used to sense other cpus. Since NR_CPUS is >= 2 on + * CONFIG_SMP and the ipl cpu is logical cpu 0, it must be 1. + */ +#define CPU_INIT_NO 1 + #if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE) /* @@ -375,9 +399,10 @@ static void __init smp_get_save_area(unsigned int cpu, unsigned int phy_cpu) "kernel was compiled with NR_CPUS=%i\n", cpu, NR_CPUS); return; } - zfcpdump_save_areas[cpu] = alloc_bootmem(sizeof(union save_area)); - __cpu_logical_map[1] = (__u16) phy_cpu; - while (signal_processor(1, sigp_stop_and_store_status) == sigp_busy) + zfcpdump_save_areas[cpu] = kmalloc(sizeof(union save_area), GFP_KERNEL); + __cpu_logical_map[CPU_INIT_NO] = (__u16) phy_cpu; + while (signal_processor(CPU_INIT_NO, sigp_stop_and_store_status) == + sigp_busy) cpu_relax(); memcpy(zfcpdump_save_areas[cpu], (void *)(unsigned long) store_prefix() + SAVE_AREA_BASE, @@ -397,32 +422,155 @@ static inline void smp_get_save_area(unsigned int cpu, unsigned int phy_cpu) { } #endif /* CONFIG_ZFCPDUMP || CONFIG_ZFCPDUMP_MODULE */ -/* - * Lets check how many CPUs we have. - */ -static unsigned int __init smp_count_cpus(void) +static int cpu_stopped(int cpu) { - unsigned int cpu, num_cpus; - __u16 boot_cpu_addr; + __u32 status; - /* - * cpu 0 is the boot cpu. See smp_prepare_boot_cpu. - */ + /* Check for stopped state */ + if (signal_processor_ps(&status, 0, cpu, sigp_sense) == + sigp_status_stored) { + if (status & 0x40) + return 1; + } + return 0; +} + +static int cpu_known(int cpu_id) +{ + int cpu; + + for_each_present_cpu(cpu) { + if (__cpu_logical_map[cpu] == cpu_id) + return 1; + } + return 0; +} + +static int smp_rescan_cpus_sigp(cpumask_t avail) +{ + int cpu_id, logical_cpu; + + logical_cpu = first_cpu(avail); + if (logical_cpu == NR_CPUS) + return 0; + for (cpu_id = 0; cpu_id <= 65535; cpu_id++) { + if (cpu_known(cpu_id)) + continue; + __cpu_logical_map[logical_cpu] = cpu_id; + if (!cpu_stopped(logical_cpu)) + continue; + cpu_set(logical_cpu, cpu_present_map); + smp_cpu_state[logical_cpu] = CPU_STATE_CONFIGURED; + logical_cpu = next_cpu(logical_cpu, avail); + if (logical_cpu == NR_CPUS) + break; + } + return 0; +} + +static int smp_rescan_cpus_sclp(cpumask_t avail) +{ + struct sclp_cpu_info *info; + int cpu_id, logical_cpu, cpu; + int rc; + + logical_cpu = first_cpu(avail); + if (logical_cpu == NR_CPUS) + return 0; + info = kmalloc(sizeof(*info), GFP_KERNEL); + if (!info) + return -ENOMEM; + rc = sclp_get_cpu_info(info); + if (rc) + goto out; + for (cpu = 0; cpu < info->combined; cpu++) { + if (info->has_cpu_type && info->cpu[cpu].type != smp_cpu_type) + continue; + cpu_id = info->cpu[cpu].address; + if (cpu_known(cpu_id)) + continue; + __cpu_logical_map[logical_cpu] = cpu_id; + cpu_set(logical_cpu, cpu_present_map); + if (cpu >= info->configured) + smp_cpu_state[logical_cpu] = CPU_STATE_STANDBY; + else + smp_cpu_state[logical_cpu] = CPU_STATE_CONFIGURED; + logical_cpu = next_cpu(logical_cpu, avail); + if (logical_cpu == NR_CPUS) + break; + } +out: + kfree(info); + return rc; +} + +static int smp_rescan_cpus(void) +{ + cpumask_t avail; + + cpus_xor(avail, cpu_possible_map, cpu_present_map); + if (smp_use_sigp_detection) + return smp_rescan_cpus_sigp(avail); + else + return smp_rescan_cpus_sclp(avail); +} + +static void __init smp_detect_cpus(void) +{ + unsigned int cpu, c_cpus, s_cpus; + struct sclp_cpu_info *info; + u16 boot_cpu_addr, cpu_addr; + + c_cpus = 1; + s_cpus = 0; boot_cpu_addr = S390_lowcore.cpu_data.cpu_addr; - current_thread_info()->cpu = 0; - num_cpus = 1; - for (cpu = 0; cpu <= 65535; cpu++) { - if ((__u16) cpu == boot_cpu_addr) + info = kmalloc(sizeof(*info), GFP_KERNEL); + if (!info) + panic("smp_detect_cpus failed to allocate memory\n"); + /* Use sigp detection algorithm if sclp doesn't work. */ + if (sclp_get_cpu_info(info)) { + smp_use_sigp_detection = 1; + for (cpu = 0; cpu <= 65535; cpu++) { + if (cpu == boot_cpu_addr) + continue; + __cpu_logical_map[CPU_INIT_NO] = cpu; + if (!cpu_stopped(CPU_INIT_NO)) + continue; + smp_get_save_area(c_cpus, cpu); + c_cpus++; + } + goto out; + } + + if (info->has_cpu_type) { + for (cpu = 0; cpu < info->combined; cpu++) { + if (info->cpu[cpu].address == boot_cpu_addr) { + smp_cpu_type = info->cpu[cpu].type; + break; + } + } + } + + for (cpu = 0; cpu < info->combined; cpu++) { + if (info->has_cpu_type && info->cpu[cpu].type != smp_cpu_type) + continue; + cpu_addr = info->cpu[cpu].address; + if (cpu_addr == boot_cpu_addr) continue; - __cpu_logical_map[1] = (__u16) cpu; - if (signal_processor(1, sigp_sense) == sigp_not_operational) + __cpu_logical_map[CPU_INIT_NO] = cpu_addr; + if (!cpu_stopped(CPU_INIT_NO)) { + s_cpus++; continue; - smp_get_save_area(num_cpus, cpu); - num_cpus++; + } + smp_get_save_area(c_cpus, cpu_addr); + c_cpus++; } - printk("Detected %d CPU's\n", (int) num_cpus); - printk("Boot cpu address %2X\n", boot_cpu_addr); - return num_cpus; +out: + kfree(info); + printk(KERN_INFO "CPUs: %d configured, %d standby\n", c_cpus, s_cpus); + get_online_cpus(); + smp_rescan_cpus(); + put_online_cpus(); } /* @@ -453,8 +601,6 @@ int __cpuinit start_secondary(void *cpuvoid) return 0; } -DEFINE_PER_CPU(struct s390_idle_data, s390_idle); - static void __init smp_create_idle(unsigned int cpu) { struct task_struct *p; @@ -470,37 +616,82 @@ static void __init smp_create_idle(unsigned int cpu) spin_lock_init(&(&per_cpu(s390_idle, cpu))->lock); } -static int cpu_stopped(int cpu) +static int __cpuinit smp_alloc_lowcore(int cpu) { - __u32 status; + unsigned long async_stack, panic_stack; + struct _lowcore *lowcore; + int lc_order; + + lc_order = sizeof(long) == 8 ? 1 : 0; + lowcore = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, lc_order); + if (!lowcore) + return -ENOMEM; + async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER); + if (!async_stack) + goto out_async_stack; + panic_stack = __get_free_page(GFP_KERNEL); + if (!panic_stack) + goto out_panic_stack; + + *lowcore = S390_lowcore; + lowcore->async_stack = async_stack + ASYNC_SIZE; + lowcore->panic_stack = panic_stack + PAGE_SIZE; - /* Check for stopped state */ - if (signal_processor_ps(&status, 0, cpu, sigp_sense) == - sigp_status_stored) { - if (status & 0x40) - return 1; +#ifndef CONFIG_64BIT + if (MACHINE_HAS_IEEE) { + unsigned long save_area; + + save_area = get_zeroed_page(GFP_KERNEL); + if (!save_area) + goto out_save_area; + lowcore->extended_save_area_addr = (u32) save_area; } +#endif + lowcore_ptr[cpu] = lowcore; return 0; + +#ifndef CONFIG_64BIT +out_save_area: + free_page(panic_stack); +#endif +out_panic_stack: + free_pages(async_stack, ASYNC_ORDER); +out_async_stack: + free_pages((unsigned long) lowcore, lc_order); + return -ENOMEM; } -/* Upping and downing of CPUs */ +#ifdef CONFIG_HOTPLUG_CPU +static void smp_free_lowcore(int cpu) +{ + struct _lowcore *lowcore; + int lc_order; + + lc_order = sizeof(long) == 8 ? 1 : 0; + lowcore = lowcore_ptr[cpu]; +#ifndef CONFIG_64BIT + if (MACHINE_HAS_IEEE) + free_page((unsigned long) lowcore->extended_save_area_addr); +#endif + free_page(lowcore->panic_stack - PAGE_SIZE); + free_pages(lowcore->async_stack - ASYNC_SIZE, ASYNC_ORDER); + free_pages((unsigned long) lowcore, lc_order); + lowcore_ptr[cpu] = NULL; +} +#endif /* CONFIG_HOTPLUG_CPU */ -int __cpu_up(unsigned int cpu) +/* Upping and downing of CPUs */ +int __cpuinit __cpu_up(unsigned int cpu) { struct task_struct *idle; struct _lowcore *cpu_lowcore; struct stack_frame *sf; sigp_ccode ccode; - int curr_cpu; - for (curr_cpu = 0; curr_cpu <= 65535; curr_cpu++) { - __cpu_logical_map[cpu] = (__u16) curr_cpu; - if (cpu_stopped(cpu)) - break; - } - - if (!cpu_stopped(cpu)) - return -ENODEV; + if (smp_cpu_state[cpu] != CPU_STATE_CONFIGURED) + return -EIO; + if (smp_alloc_lowcore(cpu)) + return -ENOMEM; ccode = signal_processor_p((__u32)(unsigned long)(lowcore_ptr[cpu]), cpu, sigp_set_prefix); @@ -515,6 +706,7 @@ int __cpu_up(unsigned int cpu) cpu_lowcore = lowcore_ptr[cpu]; cpu_lowcore->kernel_stack = (unsigned long) task_stack_page(idle) + THREAD_SIZE; + cpu_lowcore->thread_info = (unsigned long) task_thread_info(idle); sf = (struct stack_frame *) (cpu_lowcore->kernel_stack - sizeof(struct pt_regs) - sizeof(struct stack_frame)); @@ -528,6 +720,8 @@ int __cpu_up(unsigned int cpu) cpu_lowcore->percpu_offset = __per_cpu_offset[cpu]; cpu_lowcore->current_task = (unsigned long) idle; cpu_lowcore->cpu_data.cpu_nr = cpu; + cpu_lowcore->softirq_pending = 0; + cpu_lowcore->ext_call_fast = 0; eieio(); while (signal_processor(cpu, sigp_restart) == sigp_busy) @@ -538,44 +732,20 @@ int __cpu_up(unsigned int cpu) return 0; } -static unsigned int __initdata additional_cpus; -static unsigned int __initdata possible_cpus; - -void __init smp_setup_cpu_possible_map(void) +static int __init setup_possible_cpus(char *s) { - unsigned int phy_cpus, pos_cpus, cpu; - - phy_cpus = smp_count_cpus(); - pos_cpus = min(phy_cpus + additional_cpus, (unsigned int) NR_CPUS); - - if (possible_cpus) - pos_cpus = min(possible_cpus, (unsigned int) NR_CPUS); + int pcpus, cpu; - for (cpu = 0; cpu < pos_cpus; cpu++) + pcpus = simple_strtoul(s, NULL, 0); + cpu_possible_map = cpumask_of_cpu(0); + for (cpu = 1; cpu < pcpus && cpu < NR_CPUS; cpu++) cpu_set(cpu, cpu_possible_map); - - phy_cpus = min(phy_cpus, pos_cpus); - - for (cpu = 0; cpu < phy_cpus; cpu++) - cpu_set(cpu, cpu_present_map); -} - -#ifdef CONFIG_HOTPLUG_CPU - -static int __init setup_additional_cpus(char *s) -{ - additional_cpus = simple_strtoul(s, NULL, 0); - return 0; -} -early_param("additional_cpus", setup_additional_cpus); - -static int __init setup_possible_cpus(char *s) -{ - possible_cpus = simple_strtoul(s, NULL, 0); return 0; } early_param("possible_cpus", setup_possible_cpus); +#ifdef CONFIG_HOTPLUG_CPU + int __cpu_disable(void) { struct ec_creg_mask_parms cr_parms; @@ -612,7 +782,8 @@ void __cpu_die(unsigned int cpu) /* Wait until target cpu is down */ while (!smp_cpu_not_running(cpu)) cpu_relax(); - printk("Processor %d spun down\n", cpu); + smp_free_lowcore(cpu); + printk(KERN_INFO "Processor %d spun down\n", cpu); } void cpu_die(void) @@ -625,49 +796,19 @@ void cpu_die(void) #endif /* CONFIG_HOTPLUG_CPU */ -/* - * Cycle through the processors and setup structures. - */ - void __init smp_prepare_cpus(unsigned int max_cpus) { - unsigned long stack; unsigned int cpu; - int i; + + smp_detect_cpus(); /* request the 0x1201 emergency signal external interrupt */ if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0) panic("Couldn't request external interrupt 0x1201"); memset(lowcore_ptr, 0, sizeof(lowcore_ptr)); - /* - * Initialize prefix pages and stacks for all possible cpus - */ print_cpu_info(&S390_lowcore.cpu_data); + smp_alloc_lowcore(smp_processor_id()); - for_each_possible_cpu(i) { - lowcore_ptr[i] = (struct _lowcore *) - __get_free_pages(GFP_KERNEL | GFP_DMA, - sizeof(void*) == 8 ? 1 : 0); - stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER); - if (!lowcore_ptr[i] || !stack) - panic("smp_boot_cpus failed to allocate memory\n"); - - *(lowcore_ptr[i]) = S390_lowcore; - lowcore_ptr[i]->async_stack = stack + ASYNC_SIZE; - stack = __get_free_pages(GFP_KERNEL, 0); - if (!stack) - panic("smp_boot_cpus failed to allocate memory\n"); - lowcore_ptr[i]->panic_stack = stack + PAGE_SIZE; -#ifndef CONFIG_64BIT - if (MACHINE_HAS_IEEE) { - lowcore_ptr[i]->extended_save_area_addr = - (__u32) __get_free_pages(GFP_KERNEL, 0); - if (!lowcore_ptr[i]->extended_save_area_addr) - panic("smp_boot_cpus failed to " - "allocate memory\n"); - } -#endif - } #ifndef CONFIG_64BIT if (MACHINE_HAS_IEEE) ctl_set_bit(14, 29); /* enable extended save area */ @@ -683,15 +824,17 @@ void __init smp_prepare_boot_cpu(void) { BUG_ON(smp_processor_id() != 0); + current_thread_info()->cpu = 0; + cpu_set(0, cpu_present_map); cpu_set(0, cpu_online_map); S390_lowcore.percpu_offset = __per_cpu_offset[0]; current_set[0] = current; + smp_cpu_state[0] = CPU_STATE_CONFIGURED; spin_lock_init(&(&__get_cpu_var(s390_idle))->lock); } void __init smp_cpus_done(unsigned int max_cpus) { - cpu_present_map = cpu_possible_map; } /* @@ -705,7 +848,79 @@ int setup_profiling_timer(unsigned int multiplier) return 0; } -static DEFINE_PER_CPU(struct cpu, cpu_devices); +#ifdef CONFIG_HOTPLUG_CPU +static ssize_t cpu_configure_show(struct sys_device *dev, char *buf) +{ + ssize_t count; + + mutex_lock(&smp_cpu_state_mutex); + count = sprintf(buf, "%d\n", smp_cpu_state[dev->id]); + mutex_unlock(&smp_cpu_state_mutex); + return count; +} + +static ssize_t cpu_configure_store(struct sys_device *dev, const char *buf, + size_t count) +{ + int cpu = dev->id; + int val, rc; + char delim; + + if (sscanf(buf, "%d %c", &val, &delim) != 1) + return -EINVAL; + if (val != 0 && val != 1) + return -EINVAL; + + mutex_lock(&smp_cpu_state_mutex); + get_online_cpus(); + rc = -EBUSY; + if (cpu_online(cpu)) + goto out; + rc = 0; + switch (val) { + case 0: + if (smp_cpu_state[cpu] == CPU_STATE_CONFIGURED) { + rc = sclp_cpu_deconfigure(__cpu_logical_map[cpu]); + if (!rc) + smp_cpu_state[cpu] = CPU_STATE_STANDBY; + } + break; + case 1: + if (smp_cpu_state[cpu] == CPU_STATE_STANDBY) { + rc = sclp_cpu_configure(__cpu_logical_map[cpu]); + if (!rc) + smp_cpu_state[cpu] = CPU_STATE_CONFIGURED; + } + break; + default: + break; + } +out: + put_online_cpus(); + mutex_unlock(&smp_cpu_state_mutex); + return rc ? rc : count; +} +static SYSDEV_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store); +#endif /* CONFIG_HOTPLUG_CPU */ + +static ssize_t show_cpu_address(struct sys_device *dev, char *buf) +{ + return sprintf(buf, "%d\n", __cpu_logical_map[dev->id]); +} +static SYSDEV_ATTR(address, 0444, show_cpu_address, NULL); + + +static struct attribute *cpu_common_attrs[] = { +#ifdef CONFIG_HOTPLUG_CPU + &attr_configure.attr, +#endif + &attr_address.attr, + NULL, +}; + +static struct attribute_group cpu_common_attr_group = { + .attrs = cpu_common_attrs, +}; static ssize_t show_capability(struct sys_device *dev, char *buf) { @@ -750,15 +965,15 @@ static ssize_t show_idle_time(struct sys_device *dev, char *buf) } static SYSDEV_ATTR(idle_time_us, 0444, show_idle_time, NULL); -static struct attribute *cpu_attrs[] = { +static struct attribute *cpu_online_attrs[] = { &attr_capability.attr, &attr_idle_count.attr, &attr_idle_time_us.attr, NULL, }; -static struct attribute_group cpu_attr_group = { - .attrs = cpu_attrs, +static struct attribute_group cpu_online_attr_group = { + .attrs = cpu_online_attrs, }; static int __cpuinit smp_cpu_notify(struct notifier_block *self, @@ -778,12 +993,12 @@ static int __cpuinit smp_cpu_notify(struct notifier_block *self, idle->idle_time = 0; idle->idle_count = 0; spin_unlock_irq(&idle->lock); - if (sysfs_create_group(&s->kobj, &cpu_attr_group)) + if (sysfs_create_group(&s->kobj, &cpu_online_attr_group)) return NOTIFY_BAD; break; case CPU_DEAD: case CPU_DEAD_FROZEN: - sysfs_remove_group(&s->kobj, &cpu_attr_group); + sysfs_remove_group(&s->kobj, &cpu_online_attr_group); break; } return NOTIFY_OK; @@ -793,6 +1008,62 @@ static struct notifier_block __cpuinitdata smp_cpu_nb = { .notifier_call = smp_cpu_notify, }; +static int smp_add_present_cpu(int cpu) +{ + struct cpu *c = &per_cpu(cpu_devices, cpu); + struct sys_device *s = &c->sysdev; + int rc; + + c->hotpluggable = 1; + rc = register_cpu(c, cpu); + if (rc) + goto out; + rc = sysfs_create_group(&s->kobj, &cpu_common_attr_group); + if (rc) + goto out_cpu; + if (!cpu_online(cpu)) + goto out; + rc = sysfs_create_group(&s->kobj, &cpu_online_attr_group); + if (!rc) + return 0; + sysfs_remove_group(&s->kobj, &cpu_common_attr_group); +out_cpu: +#ifdef CONFIG_HOTPLUG_CPU + unregister_cpu(c); +#endif +out: + return rc; +} + +#ifdef CONFIG_HOTPLUG_CPU +static ssize_t rescan_store(struct sys_device *dev, const char *buf, + size_t count) +{ + cpumask_t newcpus; + int cpu; + int rc; + + mutex_lock(&smp_cpu_state_mutex); + get_online_cpus(); + newcpus = cpu_present_map; + rc = smp_rescan_cpus(); + if (rc) + goto out; + cpus_andnot(newcpus, cpu_present_map, newcpus); + for_each_cpu_mask(cpu, newcpus) { + rc = smp_add_present_cpu(cpu); + if (rc) + cpu_clear(cpu, cpu_present_map); + } + rc = 0; +out: + put_online_cpus(); + mutex_unlock(&smp_cpu_state_mutex); + return rc ? rc : count; +} +static SYSDEV_ATTR(rescan, 0200, NULL, rescan_store); +#endif /* CONFIG_HOTPLUG_CPU */ + static int __init topology_init(void) { int cpu; @@ -800,16 +1071,14 @@ static int __init topology_init(void) register_cpu_notifier(&smp_cpu_nb); - for_each_possible_cpu(cpu) { - struct cpu *c = &per_cpu(cpu_devices, cpu); - struct sys_device *s = &c->sysdev; - - c->hotpluggable = 1; - register_cpu(c, cpu); - if (!cpu_online(cpu)) - continue; - s = &c->sysdev; - rc = sysfs_create_group(&s->kobj, &cpu_attr_group); +#ifdef CONFIG_HOTPLUG_CPU + rc = sysfs_create_file(&cpu_sysdev_class.kset.kobj, + &attr_rescan.attr); + if (rc) + return rc; +#endif + for_each_present_cpu(cpu) { + rc = smp_add_present_cpu(cpu); if (rc) return rc; } |