diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2008-01-30 13:30:35 +0100 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-01-30 13:30:35 +0100 |
commit | 0b9c99b6f21c2e9e00938e9c57942ed71bfe4d21 (patch) | |
tree | 3d14168b8a58d03f91870b985e3dc9bf5d8aa2cc | |
parent | 1075cf7a959f72833e54dd2d4f885617e58e3e0a (diff) |
x86: cleanup tlbflush.h variants
Bring the tlbflush.h variants into sync to prepare merging and
paravirt support.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r-- | arch/x86/kernel/smp_64.c | 12 | ||||
-rw-r--r-- | arch/x86/mach-voyager/voyager_smp.c | 7 | ||||
-rw-r--r-- | arch/x86/mm/boot_ioremap_32.c | 2 | ||||
-rw-r--r-- | include/asm-x86/cpufeature.h | 9 | ||||
-rw-r--r-- | include/asm-x86/tlbflush_32.h | 120 | ||||
-rw-r--r-- | include/asm-x86/tlbflush_64.h | 78 |
6 files changed, 128 insertions, 100 deletions
diff --git a/arch/x86/kernel/smp_64.c b/arch/x86/kernel/smp_64.c index 62b0f2a1b1e..7142447b566 100644 --- a/arch/x86/kernel/smp_64.c +++ b/arch/x86/kernel/smp_64.c @@ -55,7 +55,6 @@ union smp_flush_state { cpumask_t flush_cpumask; struct mm_struct *flush_mm; unsigned long flush_va; -#define FLUSH_ALL -1ULL spinlock_t tlbstate_lock; }; char pad[SMP_CACHE_BYTES]; @@ -153,7 +152,7 @@ asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs) if (f->flush_mm == read_pda(active_mm)) { if (read_pda(mmu_state) == TLBSTATE_OK) { - if (f->flush_va == FLUSH_ALL) + if (f->flush_va == TLB_FLUSH_ALL) local_flush_tlb(); else __flush_tlb_one(f->flush_va); @@ -166,11 +165,12 @@ out: add_pda(irq_tlb_count, 1); } -static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm, - unsigned long va) +void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm, + unsigned long va) { int sender; union smp_flush_state *f; + cpumask_t cpumask = *cpumaskp; /* Caller has disabled preemption */ sender = smp_processor_id() % NUM_INVALIDATE_TLB_VECTORS; @@ -223,7 +223,7 @@ void flush_tlb_current_task(void) local_flush_tlb(); if (!cpus_empty(cpu_mask)) - flush_tlb_others(cpu_mask, mm, FLUSH_ALL); + flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL); preempt_enable(); } @@ -242,7 +242,7 @@ void flush_tlb_mm (struct mm_struct * mm) leave_mm(smp_processor_id()); } if (!cpus_empty(cpu_mask)) - flush_tlb_others(cpu_mask, mm, FLUSH_ALL); + flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL); preempt_enable(); } diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c index 981def2b4e9..b472a2df0b7 100644 --- a/arch/x86/mach-voyager/voyager_smp.c +++ b/arch/x86/mach-voyager/voyager_smp.c @@ -800,7 +800,6 @@ static void smp_reschedule_interrupt(void) static struct mm_struct *flush_mm; static unsigned long flush_va; static DEFINE_SPINLOCK(tlbstate_lock); -#define FLUSH_ALL 0xffffffff /* * We cannot call mmdrop() because we are in interrupt context, @@ -834,7 +833,7 @@ static void smp_invalidate_interrupt(void) if (flush_mm == per_cpu(cpu_tlbstate, cpu).active_mm) { if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) { - if (flush_va == FLUSH_ALL) + if (flush_va == TLB_FLUSH_ALL) local_flush_tlb(); else __flush_tlb_one(flush_va); @@ -903,7 +902,7 @@ void flush_tlb_current_task(void) cpu_mask = cpus_addr(mm->cpu_vm_mask)[0] & ~(1 << smp_processor_id()); local_flush_tlb(); if (cpu_mask) - voyager_flush_tlb_others(cpu_mask, mm, FLUSH_ALL); + voyager_flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL); preempt_enable(); } @@ -923,7 +922,7 @@ void flush_tlb_mm(struct mm_struct *mm) leave_mm(smp_processor_id()); } if (cpu_mask) - voyager_flush_tlb_others(cpu_mask, mm, FLUSH_ALL); + voyager_flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL); preempt_enable(); } diff --git a/arch/x86/mm/boot_ioremap_32.c b/arch/x86/mm/boot_ioremap_32.c index f14da2a53ec..b20f74a2770 100644 --- a/arch/x86/mm/boot_ioremap_32.c +++ b/arch/x86/mm/boot_ioremap_32.c @@ -57,7 +57,7 @@ static void __boot_ioremap(unsigned long phys_addr, unsigned long nrpages, pte = boot_vaddr_to_pte(virtual_source); for (i=0; i < nrpages; i++, phys_addr += PAGE_SIZE, pte++) { set_pte(pte, pfn_pte(phys_addr>>PAGE_SHIFT, PAGE_KERNEL)); - __flush_tlb_one(&vaddr[i*PAGE_SIZE]); + __flush_tlb_one((unsigned long) &vaddr[i*PAGE_SIZE]); } } diff --git a/include/asm-x86/cpufeature.h b/include/asm-x86/cpufeature.h index 4c7875554d0..acbf6681740 100644 --- a/include/asm-x86/cpufeature.h +++ b/include/asm-x86/cpufeature.h @@ -163,6 +163,12 @@ #define cpu_has_clflush boot_cpu_has(X86_FEATURE_CLFLSH) #define cpu_has_bts boot_cpu_has(X86_FEATURE_BTS) +#if defined(CONFIG_X86_INVLPG) || defined(CONFIG_X86_64) +# define cpu_has_invlpg 1 +#else +# define cpu_has_invlpg (boot_cpu_data.x86 > 3) +#endif + #ifdef CONFIG_X86_64 #undef cpu_has_vme @@ -183,6 +189,9 @@ #undef cpu_has_centaur_mcr #define cpu_has_centaur_mcr 0 +#undef cpu_has_pge +#define cpu_has_pge 1 + #endif /* CONFIG_X86_64 */ #endif /* _ASM_X86_CPUFEATURE_H */ diff --git a/include/asm-x86/tlbflush_32.h b/include/asm-x86/tlbflush_32.h index 2bd5b95e204..9e07cc8f2d9 100644 --- a/include/asm-x86/tlbflush_32.h +++ b/include/asm-x86/tlbflush_32.h @@ -1,8 +1,11 @@ -#ifndef _I386_TLBFLUSH_H -#define _I386_TLBFLUSH_H +#ifndef _X86_TLBFLUSH_H +#define _X86_TLBFLUSH_H #include <linux/mm.h> +#include <linux/sched.h> + #include <asm/processor.h> +#include <asm/system.h> #ifdef CONFIG_PARAVIRT #include <asm/paravirt.h> @@ -12,62 +15,41 @@ #define __flush_tlb_single(addr) __native_flush_tlb_single(addr) #endif -#define __native_flush_tlb() \ - do { \ - unsigned int tmpreg; \ - \ - __asm__ __volatile__( \ - "movl %%cr3, %0; \n" \ - "movl %0, %%cr3; # flush TLB \n" \ - : "=r" (tmpreg) \ - :: "memory"); \ - } while (0) +static inline void __native_flush_tlb(void) +{ + write_cr3(read_cr3()); +} -/* - * Global pages have to be flushed a bit differently. Not a real - * performance problem because this does not happen often. - */ -#define __native_flush_tlb_global() \ - do { \ - unsigned int tmpreg, cr4, cr4_orig; \ - \ - __asm__ __volatile__( \ - "movl %%cr4, %2; # turn off PGE \n" \ - "movl %2, %1; \n" \ - "andl %3, %1; \n" \ - "movl %1, %%cr4; \n" \ - "movl %%cr3, %0; \n" \ - "movl %0, %%cr3; # flush TLB \n" \ - "movl %2, %%cr4; # turn PGE back on \n" \ - : "=&r" (tmpreg), "=&r" (cr4), "=&r" (cr4_orig) \ - : "i" (~X86_CR4_PGE) \ - : "memory"); \ - } while (0) - -#define __native_flush_tlb_single(addr) \ - __asm__ __volatile__("invlpg (%0)" ::"r" (addr) : "memory") - -# define __flush_tlb_all() \ - do { \ - if (cpu_has_pge) \ - __flush_tlb_global(); \ - else \ - __flush_tlb(); \ - } while (0) - -#define cpu_has_invlpg (boot_cpu_data.x86 > 3) - -#ifdef CONFIG_X86_INVLPG -# define __flush_tlb_one(addr) __flush_tlb_single(addr) -#else -# define __flush_tlb_one(addr) \ - do { \ - if (cpu_has_invlpg) \ - __flush_tlb_single(addr); \ - else \ - __flush_tlb(); \ - } while (0) -#endif +static inline void __native_flush_tlb_global(void) +{ + unsigned long cr4 = read_cr4(); + + /* clear PGE */ + write_cr4(cr4 & ~X86_CR4_PGE); + /* write old PGE again and flush TLBs */ + write_cr4(cr4); +} + +static inline void __native_flush_tlb_single(unsigned long addr) +{ + __asm__ __volatile__("invlpg (%0)" ::"r" (addr) : "memory"); +} + +static inline void __flush_tlb_all(void) +{ + if (cpu_has_pge) + __flush_tlb_global(); + else + __flush_tlb(); +} + +static inline void __flush_tlb_one(unsigned long addr) +{ + if (cpu_has_invlpg) + __flush_tlb_single(addr); + else + __flush_tlb(); +} /* * TLB flushing: @@ -86,11 +68,8 @@ #define TLB_FLUSH_ALL 0xffffffff - #ifndef CONFIG_SMP -#include <linux/sched.h> - #define flush_tlb() __flush_tlb() #define flush_tlb_all() __flush_tlb_all() #define local_flush_tlb() __flush_tlb() @@ -102,21 +81,22 @@ static inline void flush_tlb_mm(struct mm_struct *mm) } static inline void flush_tlb_page(struct vm_area_struct *vma, - unsigned long addr) + unsigned long addr) { if (vma->vm_mm == current->active_mm) __flush_tlb_one(addr); } static inline void flush_tlb_range(struct vm_area_struct *vma, - unsigned long start, unsigned long end) + unsigned long start, unsigned long end) { if (vma->vm_mm == current->active_mm) __flush_tlb(); } static inline void native_flush_tlb_others(const cpumask_t *cpumask, - struct mm_struct *mm, unsigned long va) + struct mm_struct *mm, + unsigned long va) { } @@ -124,8 +104,7 @@ static inline void native_flush_tlb_others(const cpumask_t *cpumask, #include <asm/smp.h> -#define local_flush_tlb() \ - __flush_tlb() +#define local_flush_tlb() __flush_tlb() extern void flush_tlb_all(void); extern void flush_tlb_current_task(void); @@ -134,7 +113,8 @@ extern void flush_tlb_page(struct vm_area_struct *, unsigned long); #define flush_tlb() flush_tlb_current_task() -static inline void flush_tlb_range(struct vm_area_struct * vma, unsigned long start, unsigned long end) +static inline void flush_tlb_range(struct vm_area_struct *vma, + unsigned long start, unsigned long end) { flush_tlb_mm(vma->vm_mm); } @@ -152,17 +132,17 @@ struct tlb_state char __cacheline_padding[L1_CACHE_BYTES-8]; }; DECLARE_PER_CPU(struct tlb_state, cpu_tlbstate); + #endif /* SMP */ #ifndef CONFIG_PARAVIRT -#define flush_tlb_others(mask, mm, va) \ - native_flush_tlb_others(&mask, mm, va) +#define flush_tlb_others(mask, mm, va) native_flush_tlb_others(&mask, mm, va) #endif static inline void flush_tlb_kernel_range(unsigned long start, - unsigned long end) + unsigned long end) { flush_tlb_all(); } -#endif /* _I386_TLBFLUSH_H */ +#endif /* _X86_TLBFLUSH_H */ diff --git a/include/asm-x86/tlbflush_64.h b/include/asm-x86/tlbflush_64.h index 7731fd23d57..0bed440ba9f 100644 --- a/include/asm-x86/tlbflush_64.h +++ b/include/asm-x86/tlbflush_64.h @@ -1,26 +1,55 @@ -#ifndef _X8664_TLBFLUSH_H -#define _X8664_TLBFLUSH_H +#ifndef _X86_TLBFLUSH_H +#define _X86_TLBFLUSH_H #include <linux/mm.h> #include <linux/sched.h> + #include <asm/processor.h> #include <asm/system.h> -static inline void __flush_tlb(void) +#ifdef CONFIG_PARAVIRT +#include <asm/paravirt.h> +#else +#define __flush_tlb() __native_flush_tlb() +#define __flush_tlb_global() __native_flush_tlb_global() +#define __flush_tlb_single(addr) __native_flush_tlb_single(addr) +#endif + +static inline void __native_flush_tlb(void) { write_cr3(read_cr3()); } -static inline void __flush_tlb_all(void) +static inline void __native_flush_tlb_global(void) { unsigned long cr4 = read_cr4(); - write_cr4(cr4 & ~X86_CR4_PGE); /* clear PGE */ - write_cr4(cr4); /* write old PGE again and flush TLBs */ + + /* clear PGE */ + write_cr4(cr4 & ~X86_CR4_PGE); + /* write old PGE again and flush TLBs */ + write_cr4(cr4); } -#define __flush_tlb_one(addr) \ - __asm__ __volatile__("invlpg (%0)" :: "r" (addr) : "memory") +static inline void __native_flush_tlb_single(unsigned long addr) +{ + __asm__ __volatile__("invlpg (%0)" ::"r" (addr) : "memory"); +} + +static inline void __flush_tlb_all(void) +{ + if (cpu_has_pge) + __flush_tlb_global(); + else + __flush_tlb(); +} +static inline void __flush_tlb_one(unsigned long addr) +{ + if (cpu_has_invlpg) + __flush_tlb_single(addr); + else + __flush_tlb(); +} /* * TLB flushing: @@ -37,6 +66,8 @@ static inline void __flush_tlb_all(void) * range a few INVLPGs in a row are a win. */ +#define TLB_FLUSH_ALL -1ULL + #ifndef CONFIG_SMP #define flush_tlb() __flush_tlb() @@ -50,25 +81,30 @@ static inline void flush_tlb_mm(struct mm_struct *mm) } static inline void flush_tlb_page(struct vm_area_struct *vma, - unsigned long addr) + unsigned long addr) { if (vma->vm_mm == current->active_mm) __flush_tlb_one(addr); } static inline void flush_tlb_range(struct vm_area_struct *vma, - unsigned long start, unsigned long end) + unsigned long start, unsigned long end) { if (vma->vm_mm == current->active_mm) __flush_tlb(); } -#else +static inline void native_flush_tlb_others(const cpumask_t *cpumask, + struct mm_struct *mm, + unsigned long va) +{ +} + +#else /* SMP */ #include <asm/smp.h> -#define local_flush_tlb() \ - __flush_tlb() +#define local_flush_tlb() __flush_tlb() extern void flush_tlb_all(void); extern void flush_tlb_current_task(void); @@ -77,24 +113,28 @@ extern void flush_tlb_page(struct vm_area_struct *, unsigned long); #define flush_tlb() flush_tlb_current_task() -static inline void flush_tlb_range(struct vm_area_struct * vma, unsigned long start, unsigned long end) +static inline void flush_tlb_range(struct vm_area_struct *vma, + unsigned long start, unsigned long end) { flush_tlb_mm(vma->vm_mm); } +void native_flush_tlb_others(const cpumask_t *cpumask, struct mm_struct *mm, + unsigned long va); + #define TLBSTATE_OK 1 #define TLBSTATE_LAZY 2 -/* Roughly an IPI every 20MB with 4k pages for freeing page table - ranges. Cost is about 42k of memory for each CPU. */ -#define ARCH_FREE_PTE_NR 5350 +#endif /* SMP */ +#ifndef CONFIG_PARAVIRT +#define flush_tlb_others(mask, mm, va) native_flush_tlb_others(&mask, mm, va) #endif static inline void flush_tlb_kernel_range(unsigned long start, - unsigned long end) + unsigned long end) { flush_tlb_all(); } -#endif /* _X8664_TLBFLUSH_H */ +#endif /* _X86_TLBFLUSH_H */ |