diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2008-01-30 13:34:08 +0100 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-01-30 13:34:08 +0100 |
commit | 3b233e52f70bf102078b2c0c3f7f86a441689056 (patch) | |
tree | f9c65948016a3bffd3a3b0c8d327d28fc34a7ea8 /arch/x86/mm | |
parent | cd8ddf1a2800026dd58433333cce7a65cbc6c6d2 (diff) |
x86: optimize clflush
clflush is sufficient to be issued on one CPU. The invalidation is
broadcast throughout the coherence domain.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/mm')
-rw-r--r-- | arch/x86/mm/pageattr.c | 22 |
1 files changed, 8 insertions, 14 deletions
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 90b658ac39c..bbfc8e2466a 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -64,35 +64,29 @@ static void cpa_flush_all(void) on_each_cpu(__cpa_flush_all, NULL, 1, 1); } -struct clflush_data { - unsigned long addr; - int numpages; -}; - static void __cpa_flush_range(void *arg) { - struct clflush_data *cld = arg; - /* * We could optimize that further and do individual per page * tlb invalidates for a low number of pages. Caveat: we must * flush the high aliases on 64bit as well. */ __flush_tlb_all(); - - clflush_cache_range((void *) cld->addr, cld->numpages * PAGE_SIZE); } static void cpa_flush_range(unsigned long addr, int numpages) { - struct clflush_data cld; - BUG_ON(irqs_disabled()); - cld.addr = addr; - cld.numpages = numpages; + on_each_cpu(__cpa_flush_range, NULL, 1, 1); - on_each_cpu(__cpa_flush_range, &cld, 1, 1); + /* + * We only need to flush on one CPU, + * clflush is a MESI-coherent instruction that + * will cause all other CPUs to flush the same + * cachelines: + */ + clflush_cache_range((void *) addr, numpages * PAGE_SIZE); } /* |