diff options
author | Heiko Carstens <heiko.carstens@de.ibm.com> | 2007-02-12 15:47:04 +0100 |
---|---|---|
committer | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2007-02-12 15:47:04 +0100 |
commit | 0ec67667ab414b18a0518d5b11c842fd342e9cb1 (patch) | |
tree | a68f29d957fc4dc24145c22b040242406cee8054 /arch/s390/kernel | |
parent | 509cb37e173d4e39cec47238397e91b718730794 (diff) |
[S390] smp_call_function/smp_call_function_on locking.
smp_call_function and smp_call_function_on share the same lock and
smp_call_function_on disables softirq's so it can be called from
softirq context as well. Hence smp_call_function muss disable
softirqs as well to avoid deadlocks.
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/kernel')
-rw-r--r-- | arch/s390/kernel/smp.c | 15 |
1 files changed, 9 insertions, 6 deletions
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 65b52320d14..83a4ea6e3d6 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -57,7 +57,7 @@ static void smp_ext_bitcall(int, ec_bit_sig); static void smp_ext_bitcall_others(ec_bit_sig); /* -5B * Structure and data for smp_call_function(). This is designed to minimise + * Structure and data for smp_call_function(). This is designed to minimise * static memory requirements. It also looks cleaner. */ static DEFINE_SPINLOCK(call_lock); @@ -104,7 +104,7 @@ int smp_call_function (void (*func) (void *info), void *info, int nonatomic, * remote CPUs are nearly ready to execute <<func>> or are or have executed. * * You must not call this function with disabled interrupts or from a - * hardware interrupt handler or from a bottom half handler. + * hardware interrupt handler. */ { struct call_data_struct data; @@ -113,8 +113,8 @@ int smp_call_function (void (*func) (void *info), void *info, int nonatomic, if (cpus <= 0) return 0; - /* Can deadlock when called with interrupts disabled */ - WARN_ON(irqs_disabled()); + /* Can deadlock when interrupts are disabled or if in wrong context */ + WARN_ON(irqs_disabled() || in_irq()); data.func = func; data.info = info; @@ -123,7 +123,7 @@ int smp_call_function (void (*func) (void *info), void *info, int nonatomic, if (wait) atomic_set(&data.finished, 0); - spin_lock(&call_lock); + spin_lock_bh(&call_lock); call_data = &data; /* Send a message to all other CPUs and wait for them to respond */ smp_ext_bitcall_others(ec_call_function); @@ -135,7 +135,7 @@ int smp_call_function (void (*func) (void *info), void *info, int nonatomic, if (wait) while (atomic_read(&data.finished) != cpus) cpu_relax(); - spin_unlock(&call_lock); + spin_unlock_bh(&call_lock); return 0; } @@ -159,6 +159,9 @@ int smp_call_function_on(void (*func) (void *info), void *info, if (!cpu_online(cpu)) return -EINVAL; + /* Can deadlock when interrupts are disabled or if in wrong context */ + WARN_ON(irqs_disabled() || in_irq()); + /* disable preemption for local function call */ curr_cpu = get_cpu(); |