diff options
Diffstat (limited to 'include/asm-s390/tlbflush.h')
-rw-r--r-- | include/asm-s390/tlbflush.h | 159 |
1 files changed, 56 insertions, 103 deletions
diff --git a/include/asm-s390/tlbflush.h b/include/asm-s390/tlbflush.h index 66793f55c8b..a69bd2490d5 100644 --- a/include/asm-s390/tlbflush.h +++ b/include/asm-s390/tlbflush.h @@ -6,69 +6,19 @@ #include <asm/pgalloc.h> /* - * TLB flushing: - * - * - flush_tlb() flushes the current mm struct TLBs - * - flush_tlb_all() flushes all processes TLBs - * - flush_tlb_mm(mm) flushes the specified mm context TLB's - * - flush_tlb_page(vma, vmaddr) flushes one page - * - flush_tlb_range(vma, start, end) flushes a range of pages - * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages - * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables - */ - -/* - * S/390 has three ways of flushing TLBs - * 'ptlb' does a flush of the local processor - * 'csp' flushes the TLBs on all PUs of a SMP - * 'ipte' invalidates a pte in a page table and flushes that out of - * the TLBs of all PUs of a SMP + * Flush all tlb entries on the local cpu. */ - -#define local_flush_tlb() \ -do { asm volatile("ptlb": : :"memory"); } while (0) - -#ifndef CONFIG_SMP - -/* - * We always need to flush, since s390 does not flush tlb - * on each context switch - */ - -static inline void flush_tlb(void) -{ - local_flush_tlb(); -} -static inline void flush_tlb_all(void) -{ - local_flush_tlb(); -} -static inline void flush_tlb_mm(struct mm_struct *mm) +static inline void __tlb_flush_local(void) { - local_flush_tlb(); + asm volatile("ptlb" : : : "memory"); } -static inline void flush_tlb_page(struct vm_area_struct *vma, - unsigned long addr) -{ - local_flush_tlb(); -} -static inline void flush_tlb_range(struct vm_area_struct *vma, - unsigned long start, unsigned long end) -{ - local_flush_tlb(); -} - -#define flush_tlb_kernel_range(start, end) \ - local_flush_tlb(); - -#else - -#include <asm/smp.h> - -extern void smp_ptlb_all(void); -static inline void global_flush_tlb(void) +/* + * Flush all tlb entries on all cpus. + */ +static inline void __tlb_flush_global(void) { + extern void smp_ptlb_all(void); register unsigned long reg2 asm("2"); register unsigned long reg3 asm("3"); register unsigned long reg4 asm("4"); @@ -90,72 +40,75 @@ static inline void global_flush_tlb(void) } /* - * We only have to do global flush of tlb if process run since last - * flush on any other pu than current. - * If we have threads (mm->count > 1) we always do a global flush, - * since the process runs on more than one processor at the same time. + * Flush all tlb entries of a page table on all cpus. */ +static inline void __tlb_flush_idte(pgd_t *pgd) +{ + asm volatile( + " .insn rrf,0xb98e0000,0,%0,%1,0" + : : "a" (2048), "a" (__pa(pgd) & PAGE_MASK) : "cc" ); +} -static inline void __flush_tlb_mm(struct mm_struct * mm) +static inline void __tlb_flush_mm(struct mm_struct * mm) { cpumask_t local_cpumask; if (unlikely(cpus_empty(mm->cpu_vm_mask))) return; + /* + * If the machine has IDTE we prefer to do a per mm flush + * on all cpus instead of doing a local flush if the mm + * only ran on the local cpu. + */ if (MACHINE_HAS_IDTE) { - pgd_t *shadow_pgd = get_shadow_pgd(mm->pgd); + pgd_t *shadow_pgd = get_shadow_table(mm->pgd); - if (shadow_pgd) { - asm volatile( - " .insn rrf,0xb98e0000,0,%0,%1,0" - : : "a" (2048), - "a" (__pa(shadow_pgd) & PAGE_MASK) : "cc" ); - } - asm volatile( - " .insn rrf,0xb98e0000,0,%0,%1,0" - : : "a" (2048), "a" (__pa(mm->pgd)&PAGE_MASK) : "cc"); + if (shadow_pgd) + __tlb_flush_idte(shadow_pgd); + __tlb_flush_idte(mm->pgd); return; } preempt_disable(); + /* + * If the process only ran on the local cpu, do a local flush. + */ local_cpumask = cpumask_of_cpu(smp_processor_id()); if (cpus_equal(mm->cpu_vm_mask, local_cpumask)) - local_flush_tlb(); + __tlb_flush_local(); else - global_flush_tlb(); + __tlb_flush_global(); preempt_enable(); } -static inline void flush_tlb(void) -{ - __flush_tlb_mm(current->mm); -} -static inline void flush_tlb_all(void) -{ - global_flush_tlb(); -} -static inline void flush_tlb_mm(struct mm_struct *mm) -{ - __flush_tlb_mm(mm); -} -static inline void flush_tlb_page(struct vm_area_struct *vma, - unsigned long addr) +static inline void __tlb_flush_mm_cond(struct mm_struct * mm) { - __flush_tlb_mm(vma->vm_mm); + if (atomic_read(&mm->mm_users) <= 1 && mm == current->active_mm) + __tlb_flush_mm(mm); } -static inline void flush_tlb_range(struct vm_area_struct *vma, - unsigned long start, unsigned long end) -{ - __flush_tlb_mm(vma->vm_mm); -} - -#define flush_tlb_kernel_range(start, end) global_flush_tlb() -#endif +/* + * TLB flushing: + * flush_tlb() - flushes the current mm struct TLBs + * flush_tlb_all() - flushes all processes TLBs + * flush_tlb_mm(mm) - flushes the specified mm context TLB's + * flush_tlb_page(vma, vmaddr) - flushes one page + * flush_tlb_range(vma, start, end) - flushes a range of pages + * flush_tlb_kernel_range(start, end) - flushes a range of kernel pages + */ -static inline void flush_tlb_pgtables(struct mm_struct *mm, - unsigned long start, unsigned long end) -{ - /* S/390 does not keep any page table caches in TLB */ -} +/* + * flush_tlb_mm goes together with ptep_set_wrprotect for the + * copy_page_range operation and flush_tlb_range is related to + * ptep_get_and_clear for change_protection. ptep_set_wrprotect and + * ptep_get_and_clear do not flush the TLBs directly if the mm has + * only one user. At the end of the update the flush_tlb_mm and + * flush_tlb_range functions need to do the flush. + */ +#define flush_tlb() do { } while (0) +#define flush_tlb_all() do { } while (0) +#define flush_tlb_mm(mm) __tlb_flush_mm_cond(mm) +#define flush_tlb_page(vma, addr) do { } while (0) +#define flush_tlb_range(vma, start, end) __tlb_flush_mm_cond(mm) +#define flush_tlb_kernel_range(start, end) __tlb_flush_mm(&init_mm) #endif /* _S390_TLBFLUSH_H */ |