aboutsummaryrefslogtreecommitdiff
path: root/arch/ia64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64')
-rw-r--r--arch/ia64/Kconfig1
-rw-r--r--arch/ia64/kernel/mca.c6
-rw-r--r--arch/ia64/kernel/palinfo.c2
-rw-r--r--arch/ia64/kernel/perfmon.c6
-rw-r--r--arch/ia64/kernel/process.c2
-rw-r--r--arch/ia64/kernel/smp.c254
-rw-r--r--arch/ia64/kernel/smpboot.c6
-rw-r--r--arch/ia64/kernel/uncached.c5
-rw-r--r--arch/ia64/sn/kernel/sn2/sn_hwperf.c2
9 files changed, 30 insertions, 254 deletions
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 16be41446b5..18bcc10903b 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -303,6 +303,7 @@ config VIRT_CPU_ACCOUNTING
config SMP
bool "Symmetric multi-processing support"
+ select USE_GENERIC_SMP_HELPERS
help
This enables support for systems with more than one CPU. If you have
a system with only one CPU, say N. If you have a system with more
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index 705176b434b..7dd96c12717 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -707,7 +707,7 @@ ia64_mca_cmc_vector_enable (void *dummy)
static void
ia64_mca_cmc_vector_disable_keventd(struct work_struct *unused)
{
- on_each_cpu(ia64_mca_cmc_vector_disable, NULL, 1, 0);
+ on_each_cpu(ia64_mca_cmc_vector_disable, NULL, 0);
}
/*
@@ -719,7 +719,7 @@ ia64_mca_cmc_vector_disable_keventd(struct work_struct *unused)
static void
ia64_mca_cmc_vector_enable_keventd(struct work_struct *unused)
{
- on_each_cpu(ia64_mca_cmc_vector_enable, NULL, 1, 0);
+ on_each_cpu(ia64_mca_cmc_vector_enable, NULL, 0);
}
/*
@@ -1881,7 +1881,7 @@ static int __cpuinit mca_cpu_callback(struct notifier_block *nfb,
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
smp_call_function_single(hotcpu, ia64_mca_cmc_vector_adjust,
- NULL, 1, 0);
+ NULL, 0);
break;
}
return NOTIFY_OK;
diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
index 9dc00f7fe10..e5c57f413ca 100644
--- a/arch/ia64/kernel/palinfo.c
+++ b/arch/ia64/kernel/palinfo.c
@@ -921,7 +921,7 @@ int palinfo_handle_smp(pal_func_cpu_u_t *f, char *page)
/* will send IPI to other CPU and wait for completion of remote call */
- if ((ret=smp_call_function_single(f->req_cpu, palinfo_smp_call, &ptr, 0, 1))) {
+ if ((ret=smp_call_function_single(f->req_cpu, palinfo_smp_call, &ptr, 1))) {
printk(KERN_ERR "palinfo: remote CPU call from %d to %d on function %d: "
"error %d\n", smp_processor_id(), f->req_cpu, f->func_id, ret);
return 0;
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index 7714a97b010..19d4493c619 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -1820,7 +1820,7 @@ pfm_syswide_cleanup_other_cpu(pfm_context_t *ctx)
int ret;
DPRINT(("calling CPU%d for cleanup\n", ctx->ctx_cpu));
- ret = smp_call_function_single(ctx->ctx_cpu, pfm_syswide_force_stop, ctx, 0, 1);
+ ret = smp_call_function_single(ctx->ctx_cpu, pfm_syswide_force_stop, ctx, 1);
DPRINT(("called CPU%d for cleanup ret=%d\n", ctx->ctx_cpu, ret));
}
#endif /* CONFIG_SMP */
@@ -6508,7 +6508,7 @@ pfm_install_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl)
}
/* save the current system wide pmu states */
- ret = on_each_cpu(pfm_alt_save_pmu_state, NULL, 0, 1);
+ ret = on_each_cpu(pfm_alt_save_pmu_state, NULL, 1);
if (ret) {
DPRINT(("on_each_cpu() failed: %d\n", ret));
goto cleanup_reserve;
@@ -6553,7 +6553,7 @@ pfm_remove_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl)
pfm_alt_intr_handler = NULL;
- ret = on_each_cpu(pfm_alt_restore_pmu_state, NULL, 0, 1);
+ ret = on_each_cpu(pfm_alt_restore_pmu_state, NULL, 1);
if (ret) {
DPRINT(("on_each_cpu() failed: %d\n", ret));
}
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c
index a3a34b4eb03..fabaf08d9a6 100644
--- a/arch/ia64/kernel/process.c
+++ b/arch/ia64/kernel/process.c
@@ -286,7 +286,7 @@ void cpu_idle_wait(void)
{
smp_mb();
/* kick all the CPUs so that they exit out of pm_idle */
- smp_call_function(do_nothing, NULL, 0, 1);
+ smp_call_function(do_nothing, NULL, 1);
}
EXPORT_SYMBOL_GPL(cpu_idle_wait);
diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c
index 983296f1c81..3676468612b 100644
--- a/arch/ia64/kernel/smp.c
+++ b/arch/ia64/kernel/smp.c
@@ -60,25 +60,9 @@ static struct local_tlb_flush_counts {
static DEFINE_PER_CPU(unsigned int, shadow_flush_counts[NR_CPUS]) ____cacheline_aligned;
-
-/*
- * Structure and data for smp_call_function(). This is designed to minimise static memory
- * requirements. It also looks cleaner.
- */
-static __cacheline_aligned DEFINE_SPINLOCK(call_lock);
-
-struct call_data_struct {
- void (*func) (void *info);
- void *info;
- long wait;
- atomic_t started;
- atomic_t finished;
-};
-
-static volatile struct call_data_struct *call_data;
-
#define IPI_CALL_FUNC 0
#define IPI_CPU_STOP 1
+#define IPI_CALL_FUNC_SINGLE 2
#define IPI_KDUMP_CPU_STOP 3
/* This needs to be cacheline aligned because it is written to by *other* CPUs. */
@@ -86,43 +70,6 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(u64, ipi_operation);
extern void cpu_halt (void);
-void
-lock_ipi_calllock(void)
-{
- spin_lock_irq(&call_lock);
-}
-
-void
-unlock_ipi_calllock(void)
-{
- spin_unlock_irq(&call_lock);
-}
-
-static inline void
-handle_call_data(void)
-{
- struct call_data_struct *data;
- void (*func)(void *info);
- void *info;
- int wait;
-
- /* release the 'pointer lock' */
- data = (struct call_data_struct *)call_data;
- func = data->func;
- info = data->info;
- wait = data->wait;
-
- mb();
- atomic_inc(&data->started);
- /* At this point the structure may be gone unless wait is true. */
- (*func)(info);
-
- /* Notify the sending CPU that the task is done. */
- mb();
- if (wait)
- atomic_inc(&data->finished);
-}
-
static void
stop_this_cpu(void)
{
@@ -163,13 +110,15 @@ handle_IPI (int irq, void *dev_id)
ops &= ~(1 << which);
switch (which) {
- case IPI_CALL_FUNC:
- handle_call_data();
- break;
-
case IPI_CPU_STOP:
stop_this_cpu();
break;
+ case IPI_CALL_FUNC:
+ generic_smp_call_function_interrupt();
+ break;
+ case IPI_CALL_FUNC_SINGLE:
+ generic_smp_call_function_single_interrupt();
+ break;
#ifdef CONFIG_KEXEC
case IPI_KDUMP_CPU_STOP:
unw_init_running(kdump_cpu_freeze, NULL);
@@ -187,6 +136,8 @@ handle_IPI (int irq, void *dev_id)
return IRQ_HANDLED;
}
+
+
/*
* Called with preemption disabled.
*/
@@ -334,7 +285,7 @@ smp_flush_tlb_cpumask(cpumask_t xcpumask)
void
smp_flush_tlb_all (void)
{
- on_each_cpu((void (*)(void *))local_flush_tlb_all, NULL, 1, 1);
+ on_each_cpu((void (*)(void *))local_flush_tlb_all, NULL, 1);
}
void
@@ -357,193 +308,18 @@ smp_flush_tlb_mm (struct mm_struct *mm)
* anyhow, and once a CPU is interrupted, the cost of local_flush_tlb_all() is
* rather trivial.
*/
- on_each_cpu((void (*)(void *))local_finish_flush_tlb_mm, mm, 1, 1);
+ on_each_cpu((void (*)(void *))local_finish_flush_tlb_mm, mm, 1);
}
-/*
- * Run a function on a specific CPU
- * <func> The function to run. This must be fast and non-blocking.
- * <info> An arbitrary pointer to pass to the function.
- * <nonatomic> Currently unused.
- * <wait> If true, wait until function has completed on other CPUs.
- * [RETURNS] 0 on success, else a negative status code.
- *
- * Does not return until the remote CPU is nearly ready to execute <func>
- * or is or has executed.
- */
-
-int
-smp_call_function_single (int cpuid, void (*func) (void *info), void *info, int nonatomic,
- int wait)
-{
- struct call_data_struct data;
- int cpus = 1;
- int me = get_cpu(); /* prevent preemption and reschedule on another processor */
-
- if (cpuid == me) {
- local_irq_disable();
- func(info);
- local_irq_enable();
- put_cpu();
- return 0;
- }
-
- data.func = func;
- data.info = info;
- atomic_set(&data.started, 0);
- data.wait = wait;
- if (wait)
- atomic_set(&data.finished, 0);
-
- spin_lock_bh(&call_lock);
-
- call_data = &data;
- mb(); /* ensure store to call_data precedes setting of IPI_CALL_FUNC */
- send_IPI_single(cpuid, IPI_CALL_FUNC);
-
- /* Wait for response */
- while (atomic_read(&data.started) != cpus)
- cpu_relax();
-
- if (wait)
- while (atomic_read(&data.finished) != cpus)
- cpu_relax();
- call_data = NULL;
-
- spin_unlock_bh(&call_lock);
- put_cpu();
- return 0;
-}
-EXPORT_SYMBOL(smp_call_function_single);
-
-/**
- * smp_call_function_mask(): Run a function on a set of other CPUs.
- * <mask> The set of cpus to run on. Must not include the current cpu.
- * <func> The function to run. This must be fast and non-blocking.
- * <info> An arbitrary pointer to pass to the function.
- * <wait> If true, wait (atomically) until function
- * has completed on other CPUs.
- *
- * Returns 0 on success, else a negative status code.
- *
- * If @wait is true, then returns once @func has returned; otherwise
- * it returns just before the target cpu calls @func.
- *
- * You must not call this function with disabled interrupts or from a
- * hardware interrupt handler or from a bottom half handler.
- */
-int smp_call_function_mask(cpumask_t mask,
- void (*func)(void *), void *info,
- int wait)
+void arch_send_call_function_single_ipi(int cpu)
{
- struct call_data_struct data;
- cpumask_t allbutself;
- int cpus;
-
- spin_lock(&call_lock);
- allbutself = cpu_online_map;
- cpu_clear(smp_processor_id(), allbutself);
-
- cpus_and(mask, mask, allbutself);
- cpus = cpus_weight(mask);
- if (!cpus) {
- spin_unlock(&call_lock);
- return 0;
- }
-
- /* Can deadlock when called with interrupts disabled */
- WARN_ON(irqs_disabled());
-
- data.func = func;
- data.info = info;
- atomic_set(&data.started, 0);
- data.wait = wait;
- if (wait)
- atomic_set(&data.finished, 0);
-
- call_data = &data;
- mb(); /* ensure store to call_data precedes setting of IPI_CALL_FUNC*/
-
- /* Send a message to other CPUs */
- if (cpus_equal(mask, allbutself))
- send_IPI_allbutself(IPI_CALL_FUNC);
- else
- send_IPI_mask(mask, IPI_CALL_FUNC);
-
- /* Wait for response */
- while (atomic_read(&data.started) != cpus)
- cpu_relax();
-
- if (wait)
- while (atomic_read(&data.finished) != cpus)
- cpu_relax();
- call_data = NULL;
-
- spin_unlock(&call_lock);
- return 0;
-
+ send_IPI_single(cpu, IPI_CALL_FUNC_SINGLE);
}
-EXPORT_SYMBOL(smp_call_function_mask);
-/*
- * this function sends a 'generic call function' IPI to all other CPUs
- * in the system.
- */
-
-/*
- * [SUMMARY] Run a function on all other CPUs.
- * <func> The function to run. This must be fast and non-blocking.
- * <info> An arbitrary pointer to pass to the function.
- * <nonatomic> currently unused.
- * <wait> If true, wait (atomically) until function has completed on other CPUs.
- * [RETURNS] 0 on success, else a negative status code.
- *
- * Does not return until remote CPUs are nearly ready to execute <func> or are or have
- * executed.
- *
- * You must not call this function with disabled interrupts or from a
- * hardware interrupt handler or from a bottom half handler.
- */
-int
-smp_call_function (void (*func) (void *info), void *info, int nonatomic, int wait)
+void arch_send_call_function_ipi(cpumask_t mask)
{
- struct call_data_struct data;
- int cpus;
-
- spin_lock(&call_lock);
- cpus = num_online_cpus() - 1;
- if (!cpus) {
- spin_unlock(&call_lock);
- return 0;
- }
-
- /* Can deadlock when called with interrupts disabled */
- WARN_ON(irqs_disabled());
-
- data.func = func;
- data.info = info;
- atomic_set(&data.started, 0);
- data.wait = wait;
- if (wait)
- atomic_set(&data.finished, 0);
-
- call_data = &data;
- mb(); /* ensure store to call_data precedes setting of IPI_CALL_FUNC */
- send_IPI_allbutself(IPI_CALL_FUNC);
-
- /* Wait for response */
- while (atomic_read(&data.started) != cpus)
- cpu_relax();
-
- if (wait)
- while (atomic_read(&data.finished) != cpus)
- cpu_relax();
- call_data = NULL;
-
- spin_unlock(&call_lock);
- return 0;
+ send_IPI_mask(mask, IPI_CALL_FUNC);
}
-EXPORT_SYMBOL(smp_call_function);
/*
* this function calls the 'stop' function on all other CPUs in the system.
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c
index d7ad42b77d4..9d1d429c6c5 100644
--- a/arch/ia64/kernel/smpboot.c
+++ b/arch/ia64/kernel/smpboot.c
@@ -317,7 +317,7 @@ ia64_sync_itc (unsigned int master)
go[MASTER] = 1;
- if (smp_call_function_single(master, sync_master, NULL, 1, 0) < 0) {
+ if (smp_call_function_single(master, sync_master, NULL, 0) < 0) {
printk(KERN_ERR "sync_itc: failed to get attention of CPU %u!\n", master);
return;
}
@@ -395,14 +395,14 @@ smp_callin (void)
fix_b0_for_bsp();
- lock_ipi_calllock();
+ ipi_call_lock_irq();
spin_lock(&vector_lock);
/* Setup the per cpu irq handling data structures */
__setup_vector_irq(cpuid);
cpu_set(cpuid, cpu_online_map);
per_cpu(cpu_state, cpuid) = CPU_ONLINE;
spin_unlock(&vector_lock);
- unlock_ipi_calllock();
+ ipi_call_unlock_irq();
smp_setup_percpu_timer();
diff --git a/arch/ia64/kernel/uncached.c b/arch/ia64/kernel/uncached.c
index e77995a6e3e..8eff8c1d40a 100644
--- a/arch/ia64/kernel/uncached.c
+++ b/arch/ia64/kernel/uncached.c
@@ -123,8 +123,7 @@ static int uncached_add_chunk(struct uncached_pool *uc_pool, int nid)
status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL);
if (status == PAL_VISIBILITY_OK_REMOTE_NEEDED) {
atomic_set(&uc_pool->status, 0);
- status = smp_call_function(uncached_ipi_visibility, uc_pool,
- 0, 1);
+ status = smp_call_function(uncached_ipi_visibility, uc_pool, 1);
if (status || atomic_read(&uc_pool->status))
goto failed;
} else if (status != PAL_VISIBILITY_OK)
@@ -146,7 +145,7 @@ static int uncached_add_chunk(struct uncached_pool *uc_pool, int nid)
if (status != PAL_STATUS_SUCCESS)
goto failed;
atomic_set(&uc_pool->status, 0);
- status = smp_call_function(uncached_ipi_mc_drain, uc_pool, 0, 1);
+ status = smp_call_function(uncached_ipi_mc_drain, uc_pool, 1);
if (status || atomic_read(&uc_pool->status))
goto failed;
diff --git a/arch/ia64/sn/kernel/sn2/sn_hwperf.c b/arch/ia64/sn/kernel/sn2/sn_hwperf.c
index 8cc0c4753d8..636588e7e06 100644
--- a/arch/ia64/sn/kernel/sn2/sn_hwperf.c
+++ b/arch/ia64/sn/kernel/sn2/sn_hwperf.c
@@ -629,7 +629,7 @@ static int sn_hwperf_op_cpu(struct sn_hwperf_op_info *op_info)
if (use_ipi) {
/* use an interprocessor interrupt to call SAL */
smp_call_function_single(cpu, sn_hwperf_call_sal,
- op_info, 1, 1);
+ op_info, 1);
}
else {
/* migrate the task before calling SAL */