aboutsummaryrefslogtreecommitdiff
path: root/include/asm-sparc64
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-sparc64')
-rw-r--r--include/asm-sparc64/mmu_context.h50
1 files changed, 38 insertions, 12 deletions
diff --git a/include/asm-sparc64/mmu_context.h b/include/asm-sparc64/mmu_context.h
index ca36ea96f64..e7974321d05 100644
--- a/include/asm-sparc64/mmu_context.h
+++ b/include/asm-sparc64/mmu_context.h
@@ -42,7 +42,7 @@ static inline void tsb_context_switch(struct mm_struct *mm)
__pa(&mm->context.tsb_descr));
}
-extern void tsb_grow(struct mm_struct *mm, unsigned long mm_rss, gfp_t gfp_flags);
+extern void tsb_grow(struct mm_struct *mm, unsigned long mm_rss);
#ifdef CONFIG_SMP
extern void smp_tsb_sync(struct mm_struct *mm);
#else
@@ -74,18 +74,43 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str
ctx_valid = CTX_VALID(mm->context);
if (!ctx_valid)
get_new_mmu_context(mm);
- spin_unlock_irqrestore(&mm->context.lock, flags);
- if (!ctx_valid || (old_mm != mm)) {
- load_secondary_context(mm);
- tsb_context_switch(mm);
- }
+ /* We have to be extremely careful here or else we will miss
+ * a TSB grow if we switch back and forth between a kernel
+ * thread and an address space which has it's TSB size increased
+ * on another processor.
+ *
+ * It is possible to play some games in order to optimize the
+ * switch, but the safest thing to do is to unconditionally
+ * perform the secondary context load and the TSB context switch.
+ *
+ * For reference the bad case is, for address space "A":
+ *
+ * CPU 0 CPU 1
+ * run address space A
+ * set cpu0's bits in cpu_vm_mask
+ * switch to kernel thread, borrow
+ * address space A via entry_lazy_tlb
+ * run address space A
+ * set cpu1's bit in cpu_vm_mask
+ * flush_tlb_pending()
+ * reset cpu_vm_mask to just cpu1
+ * TSB grow
+ * run address space A
+ * context was valid, so skip
+ * TSB context switch
+ *
+ * At that point cpu0 continues to use a stale TSB, the one from
+ * before the TSB grow performed on cpu1. cpu1 did not cross-call
+ * cpu0 to update it's TSB because at that point the cpu_vm_mask
+ * only had cpu1 set in it.
+ */
+ load_secondary_context(mm);
+ tsb_context_switch(mm);
- /* Even if (mm == old_mm) we _must_ check
- * the cpu_vm_mask. If we do not we could
- * corrupt the TLB state because of how
- * smp_flush_tlb_{page,range,mm} on sparc64
- * and lazy tlb switches work. -DaveM
+ /* Any time a processor runs a context on an address space
+ * for the first time, we must flush that context out of the
+ * local TLB.
*/
cpu = smp_processor_id();
if (!ctx_valid || !cpu_isset(cpu, mm->cpu_vm_mask)) {
@@ -93,6 +118,7 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str
__flush_tlb_mm(CTX_HWBITS(mm->context),
SECONDARY_CONTEXT);
}
+ spin_unlock_irqrestore(&mm->context.lock, flags);
}
#define deactivate_mm(tsk,mm) do { } while (0)
@@ -109,11 +135,11 @@ static inline void activate_mm(struct mm_struct *active_mm, struct mm_struct *mm
cpu = smp_processor_id();
if (!cpu_isset(cpu, mm->cpu_vm_mask))
cpu_set(cpu, mm->cpu_vm_mask);
- spin_unlock_irqrestore(&mm->context.lock, flags);
load_secondary_context(mm);
__flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT);
tsb_context_switch(mm);
+ spin_unlock_irqrestore(&mm->context.lock, flags);
}
#endif /* !(__ASSEMBLY__) */