aboutsummaryrefslogtreecommitdiff
path: root/arch/x86
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-01-30 13:34:09 +0100
committerIngo Molnar <mingo@elte.hu>2008-01-30 13:34:09 +0100
commit4c61afcdb2cd4be299c1442b33adf312b695e2d7 (patch)
tree8f51b96e2f6520c63b7c54dd84f4840ab9157590 /arch/x86
parent3b233e52f70bf102078b2c0c3f7f86a441689056 (diff)
x86: fix clflush_page_range logic
only present ptes must be flushed. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/mm/pageattr.c31
1 files changed, 24 insertions, 7 deletions
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index bbfc8e2466a..97ec9e7d29d 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -26,7 +26,6 @@ within(unsigned long addr, unsigned long start, unsigned long end)
* Flushing functions
*/
-
/**
* clflush_cache_range - flush a cache range with clflush
* @addr: virtual start address
@@ -35,13 +34,19 @@ within(unsigned long addr, unsigned long start, unsigned long end)
* clflush is an unordered instruction which needs fencing with mfence
* to avoid ordering issues.
*/
-void clflush_cache_range(void *addr, int size)
+void clflush_cache_range(void *vaddr, unsigned int size)
{
- int i;
+ void *vend = vaddr + size - 1;
mb();
- for (i = 0; i < size; i += boot_cpu_data.x86_clflush_size)
- clflush(addr+i);
+
+ for (; vaddr < vend; vaddr += boot_cpu_data.x86_clflush_size)
+ clflush(vaddr);
+ /*
+ * Flush any possible final partial cacheline:
+ */
+ clflush(vend);
+
mb();
}
@@ -74,9 +79,13 @@ static void __cpa_flush_range(void *arg)
__flush_tlb_all();
}
-static void cpa_flush_range(unsigned long addr, int numpages)
+static void cpa_flush_range(unsigned long start, int numpages)
{
+ unsigned int i, level;
+ unsigned long addr;
+
BUG_ON(irqs_disabled());
+ WARN_ON(PAGE_ALIGN(start) != start);
on_each_cpu(__cpa_flush_range, NULL, 1, 1);
@@ -86,7 +95,15 @@ static void cpa_flush_range(unsigned long addr, int numpages)
* will cause all other CPUs to flush the same
* cachelines:
*/
- clflush_cache_range((void *) addr, numpages * PAGE_SIZE);
+ for (i = 0, addr = start; i < numpages; i++, addr += PAGE_SIZE) {
+ pte_t *pte = lookup_address(addr, &level);
+
+ /*
+ * Only flush present addresses:
+ */
+ if (pte && pte_present(*pte))
+ clflush_cache_range((void *) addr, PAGE_SIZE);
+ }
}
/*