diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2008-01-30 13:34:07 +0100 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-01-30 13:34:07 +0100 |
commit | d7c8f21a8cad0228c7c5ce2bb6dbd95d1ee49d13 (patch) | |
tree | d1e305bec62022a0bec82a3499a372c2c7c40583 /arch/x86/mm | |
parent | d1028a154c65d7fadd1b2d0276c077014d401ec7 (diff) |
x86: cpa: move flush to cpa
The set_memory_* and set_pages_* family of API's currently requires the
callers to do a global tlb flush after the function call; forgetting this is
a very nasty deathtrap. This patch moves the global tlb flush into
each of the callers
Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/mm')
-rw-r--r-- | arch/x86/mm/init_32.c | 14 | ||||
-rw-r--r-- | arch/x86/mm/init_64.c | 10 | ||||
-rw-r--r-- | arch/x86/mm/ioremap.c | 2 | ||||
-rw-r--r-- | arch/x86/mm/pageattr.c | 137 |
4 files changed, 71 insertions, 92 deletions
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index f7b941c3b2c..0d3369b900e 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -752,15 +752,11 @@ void mark_rodata_ro(void) printk("Write protecting the kernel text: %luk\n", size >> 10); #ifdef CONFIG_CPA_DEBUG - global_flush_tlb(); - printk("Testing CPA: Reverting %lx-%lx\n", start, start+size); set_pages_rw(virt_to_page(start), size>>PAGE_SHIFT); - global_flush_tlb(); printk("Testing CPA: write protecting again\n"); set_pages_ro(virt_to_page(start), size>>PAGE_SHIFT); - global_flush_tlb(); #endif } #endif @@ -770,22 +766,12 @@ void mark_rodata_ro(void) printk("Write protecting the kernel read-only data: %luk\n", size >> 10); - /* - * set_pages_*() requires a global_flush_tlb() call after it. - * We do this after the printk so that if something went wrong in the - * change, the printk gets out at least to give a better debug hint - * of who is the culprit. - */ - global_flush_tlb(); - #ifdef CONFIG_CPA_DEBUG printk("Testing CPA: undo %lx-%lx\n", start, start + size); set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT); - global_flush_tlb(); printk("Testing CPA: write protecting again\n"); set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT); - global_flush_tlb(); #endif } #endif diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 4757be7b5e5..9b69fa54a83 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -610,22 +610,12 @@ void mark_rodata_ro(void) printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n", (end - start) >> 10); - /* - * set_memory_*() requires a global_flush_tlb() call after it. - * We do this after the printk so that if something went wrong in the - * change, the printk gets out at least to give a better debug hint - * of who is the culprit. - */ - global_flush_tlb(); - #ifdef CONFIG_CPA_DEBUG printk("Testing CPA: undo %lx-%lx\n", start, end); set_memory_rw(start, (end-start) >> PAGE_SHIFT); - global_flush_tlb(); printk("Testing CPA: again\n"); set_memory_ro(start, (end-start) >> PAGE_SHIFT); - global_flush_tlb(); #endif } #endif diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index b86f66fa518..6a9a1418bc9 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c @@ -96,8 +96,6 @@ static int ioremap_change_attr(unsigned long paddr, unsigned long size, err = set_memory_wb(vaddr, nrpages); break; } - if (!err) - global_flush_tlb(); return err; } diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index e4d2b6930e6..a2d747c0614 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -23,6 +23,36 @@ within(unsigned long addr, unsigned long start, unsigned long end) } /* + * Flushing functions + */ +void clflush_cache_range(void *addr, int size) +{ + int i; + + for (i = 0; i < size; i += boot_cpu_data.x86_clflush_size) + clflush(addr+i); +} + +static void flush_kernel_map(void *arg) +{ + /* + * Flush all to work around Errata in early athlons regarding + * large page flushing. + */ + __flush_tlb_all(); + + if (boot_cpu_data.x86_model >= 4) + wbinvd(); +} + +static void global_flush_tlb(void) +{ + BUG_ON(irqs_disabled()); + + on_each_cpu(flush_kernel_map, NULL, 1, 1); +} + +/* * Certain areas of memory on x86 require very specific protection flags, * for example the BIOS area or kernel text. Callers don't always get this * right (again, ioremap() on BIOS memory is not uncommon) so this function @@ -328,149 +358,124 @@ static int change_page_attr_clear(unsigned long addr, int numpages, int set_memory_uc(unsigned long addr, int numpages) { - pgprot_t uncached; + int err; - pgprot_val(uncached) = _PAGE_PCD | _PAGE_PWT; - return change_page_attr_set(addr, numpages, uncached); + err = change_page_attr_set(addr, numpages, + __pgprot(_PAGE_PCD | _PAGE_PWT)); + global_flush_tlb(); + return err; } EXPORT_SYMBOL(set_memory_uc); int set_memory_wb(unsigned long addr, int numpages) { - pgprot_t uncached; + int err; - pgprot_val(uncached) = _PAGE_PCD | _PAGE_PWT; - return change_page_attr_clear(addr, numpages, uncached); + err = change_page_attr_clear(addr, numpages, + __pgprot(_PAGE_PCD | _PAGE_PWT)); + global_flush_tlb(); + return err; } EXPORT_SYMBOL(set_memory_wb); int set_memory_x(unsigned long addr, int numpages) { - pgprot_t nx; + int err; - pgprot_val(nx) = _PAGE_NX; - return change_page_attr_clear(addr, numpages, nx); + err = change_page_attr_clear(addr, numpages, + __pgprot(_PAGE_NX)); + global_flush_tlb(); + return err; } EXPORT_SYMBOL(set_memory_x); int set_memory_nx(unsigned long addr, int numpages) { - pgprot_t nx; + int err; - pgprot_val(nx) = _PAGE_NX; - return change_page_attr_set(addr, numpages, nx); + err = change_page_attr_set(addr, numpages, + __pgprot(_PAGE_NX)); + global_flush_tlb(); + return err; } EXPORT_SYMBOL(set_memory_nx); int set_memory_ro(unsigned long addr, int numpages) { - pgprot_t rw; + int err; - pgprot_val(rw) = _PAGE_RW; - return change_page_attr_clear(addr, numpages, rw); + err = change_page_attr_clear(addr, numpages, + __pgprot(_PAGE_RW)); + global_flush_tlb(); + return err; } int set_memory_rw(unsigned long addr, int numpages) { - pgprot_t rw; + int err; - pgprot_val(rw) = _PAGE_RW; - return change_page_attr_set(addr, numpages, rw); + err = change_page_attr_set(addr, numpages, + __pgprot(_PAGE_RW)); + global_flush_tlb(); + return err; } int set_memory_np(unsigned long addr, int numpages) { - pgprot_t present; + int err; - pgprot_val(present) = _PAGE_PRESENT; - return change_page_attr_clear(addr, numpages, present); + err = change_page_attr_clear(addr, numpages, + __pgprot(_PAGE_PRESENT)); + global_flush_tlb(); + return err; } int set_pages_uc(struct page *page, int numpages) { unsigned long addr = (unsigned long)page_address(page); - pgprot_t uncached; - pgprot_val(uncached) = _PAGE_PCD | _PAGE_PWT; - return change_page_attr_set(addr, numpages, uncached); + return set_memory_uc(addr, numpages); } EXPORT_SYMBOL(set_pages_uc); int set_pages_wb(struct page *page, int numpages) { unsigned long addr = (unsigned long)page_address(page); - pgprot_t uncached; - pgprot_val(uncached) = _PAGE_PCD | _PAGE_PWT; - return change_page_attr_clear(addr, numpages, uncached); + return set_memory_wb(addr, numpages); } EXPORT_SYMBOL(set_pages_wb); int set_pages_x(struct page *page, int numpages) { unsigned long addr = (unsigned long)page_address(page); - pgprot_t nx; - pgprot_val(nx) = _PAGE_NX; - return change_page_attr_clear(addr, numpages, nx); + return set_memory_x(addr, numpages); } EXPORT_SYMBOL(set_pages_x); int set_pages_nx(struct page *page, int numpages) { unsigned long addr = (unsigned long)page_address(page); - pgprot_t nx; - pgprot_val(nx) = _PAGE_NX; - return change_page_attr_set(addr, numpages, nx); + return set_memory_nx(addr, numpages); } EXPORT_SYMBOL(set_pages_nx); int set_pages_ro(struct page *page, int numpages) { unsigned long addr = (unsigned long)page_address(page); - pgprot_t rw; - pgprot_val(rw) = _PAGE_RW; - return change_page_attr_clear(addr, numpages, rw); + return set_memory_ro(addr, numpages); } int set_pages_rw(struct page *page, int numpages) { unsigned long addr = (unsigned long)page_address(page); - pgprot_t rw; - - pgprot_val(rw) = _PAGE_RW; - return change_page_attr_set(addr, numpages, rw); -} - -void clflush_cache_range(void *addr, int size) -{ - int i; - - for (i = 0; i < size; i += boot_cpu_data.x86_clflush_size) - clflush(addr+i); -} -static void flush_kernel_map(void *arg) -{ - /* - * Flush all to work around Errata in early athlons regarding - * large page flushing. - */ - __flush_tlb_all(); - - if (boot_cpu_data.x86_model >= 4) - wbinvd(); + return set_memory_rw(addr, numpages); } -void global_flush_tlb(void) -{ - BUG_ON(irqs_disabled()); - - on_each_cpu(flush_kernel_map, NULL, 1, 1); -} -EXPORT_SYMBOL(global_flush_tlb); #ifdef CONFIG_DEBUG_PAGEALLOC |