aboutsummaryrefslogtreecommitdiff
path: root/arch/sparc/kernel/smp_64.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-04-02 16:33:42 +0200
committerIngo Molnar <mingo@elte.hu>2009-04-02 16:33:51 +0200
commit83f2f0ed715eb15a8e13c07df479d65bbc10d8d5 (patch)
treee47d7edefa210b328f8cfba43d170fc67d6e0671 /arch/sparc/kernel/smp_64.c
parent3de46fda4c104deef17ec70f85361f5c6b84ce0e (diff)
parent4fe70410d9a219dabb47328effccae7e7f2a6e26 (diff)
Merge branch 'linus' into x86/urgent
Merge needed to go past commit 7ca43e756 (mm: use debug_kmap_atomic) and fix it. Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/sparc/kernel/smp_64.c')
-rw-r--r--arch/sparc/kernel/smp_64.c18
1 files changed, 9 insertions, 9 deletions
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index 6cd1a5b6506..708e12a26b0 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -808,9 +808,9 @@ static void smp_start_sync_tick_client(int cpu)
extern unsigned long xcall_call_function;
-void arch_send_call_function_ipi(cpumask_t mask)
+void arch_send_call_function_ipi_mask(const struct cpumask *mask)
{
- xcall_deliver((u64) &xcall_call_function, 0, 0, &mask);
+ xcall_deliver((u64) &xcall_call_function, 0, 0, mask);
}
extern unsigned long xcall_call_function_single;
@@ -850,7 +850,7 @@ static void tsb_sync(void *info)
void smp_tsb_sync(struct mm_struct *mm)
{
- smp_call_function_mask(mm->cpu_vm_mask, tsb_sync, mm, 1);
+ smp_call_function_many(mm_cpumask(mm), tsb_sync, mm, 1);
}
extern unsigned long xcall_flush_tlb_mm;
@@ -1031,7 +1031,7 @@ void smp_fetch_global_regs(void)
* If the address space is non-shared (ie. mm->count == 1) we avoid
* cross calls when we want to flush the currently running process's
* tlb state. This is done by clearing all cpu bits except the current
- * processor's in current->active_mm->cpu_vm_mask and performing the
+ * processor's in current->mm->cpu_vm_mask and performing the
* flush locally only. This will force any subsequent cpus which run
* this task to flush the context from the local tlb if the process
* migrates to another cpu (again).
@@ -1055,13 +1055,13 @@ void smp_flush_tlb_mm(struct mm_struct *mm)
int cpu = get_cpu();
if (atomic_read(&mm->mm_users) == 1) {
- mm->cpu_vm_mask = cpumask_of_cpu(cpu);
+ cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
goto local_flush_and_out;
}
smp_cross_call_masked(&xcall_flush_tlb_mm,
ctx, 0, 0,
- &mm->cpu_vm_mask);
+ mm_cpumask(mm));
local_flush_and_out:
__flush_tlb_mm(ctx, SECONDARY_CONTEXT);
@@ -1074,12 +1074,12 @@ void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long
u32 ctx = CTX_HWBITS(mm->context);
int cpu = get_cpu();
- if (mm == current->active_mm && atomic_read(&mm->mm_users) == 1)
- mm->cpu_vm_mask = cpumask_of_cpu(cpu);
+ if (mm == current->mm && atomic_read(&mm->mm_users) == 1)
+ cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
else
smp_cross_call_masked(&xcall_flush_tlb_pending,
ctx, nr, (unsigned long) vaddrs,
- &mm->cpu_vm_mask);
+ mm_cpumask(mm));
__flush_tlb_pending(ctx, nr, vaddrs);