diff options
author | Nicolas Pitre <nico@cam.org> | 2009-06-12 03:09:29 +0100 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2009-06-14 11:20:37 +0100 |
commit | 73be1591579084a8103a7005dd3172f3e9dd7362 (patch) | |
tree | 0f9dc996b9effc48728b2e25f51c41eded8cb518 /arch/arm/include | |
parent | 44b7532b8b464f606053562400719c9c21276037 (diff) |
[ARM] 5545/2: add flush_kernel_dcache_page() for ARM
Without this, the default implementation is a no op which is completely
wrong with a VIVT cache, and usage of sg_copy_buffer() produces
unpredictable results.
Tested-by: Sebastian Andrzej Siewior <bigeasy@breakpoint.cc>
CC: stable@kernel.org
Signed-off-by: Nicolas Pitre <nico@marvell.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/include')
-rw-r--r-- | arch/arm/include/asm/cacheflush.h | 8 |
1 files changed, 8 insertions, 0 deletions
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h index bb7d695f390..1a711ea8418 100644 --- a/arch/arm/include/asm/cacheflush.h +++ b/arch/arm/include/asm/cacheflush.h @@ -429,6 +429,14 @@ static inline void flush_anon_page(struct vm_area_struct *vma, __flush_anon_page(vma, page, vmaddr); } +#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE +static inline void flush_kernel_dcache_page(struct page *page) +{ + /* highmem pages are always flushed upon kunmap already */ + if ((cache_is_vivt() || cache_is_vipt_aliasing()) && !PageHighMem(page)) + __cpuc_flush_dcache_page(page_address(page)); +} + #define flush_dcache_mmap_lock(mapping) \ spin_lock_irq(&(mapping)->tree_lock) #define flush_dcache_mmap_unlock(mapping) \ |