aboutsummaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/asm-mips/cacheflush.h1
-rw-r--r--include/asm-mips/mach-generic/ide.h46
2 files changed, 45 insertions, 2 deletions
diff --git a/include/asm-mips/cacheflush.h b/include/asm-mips/cacheflush.h
index aeae9fabf4a..47bc8f6c20d 100644
--- a/include/asm-mips/cacheflush.h
+++ b/include/asm-mips/cacheflush.h
@@ -74,6 +74,7 @@ static inline void copy_from_user_page(struct vm_area_struct *vma,
extern void (*flush_cache_sigtramp)(unsigned long addr);
extern void (*flush_icache_all)(void);
+extern void (*local_flush_data_cache_page)(void * addr);
extern void (*flush_data_cache_page)(unsigned long addr);
/*
diff --git a/include/asm-mips/mach-generic/ide.h b/include/asm-mips/mach-generic/ide.h
index 550979a9ea9..e3315359500 100644
--- a/include/asm-mips/mach-generic/ide.h
+++ b/include/asm-mips/mach-generic/ide.h
@@ -104,65 +104,107 @@ static __inline__ unsigned long ide_default_io_base(int index)
#endif
/* MIPS port and memory-mapped I/O string operations. */
+static inline void __ide_flush_prologue(void)
+{
+#ifdef CONFIG_SMP
+ if (cpu_has_dc_aliases)
+ preempt_disable();
+#endif
+}
+
+static inline void __ide_flush_epilogue(void)
+{
+#ifdef CONFIG_SMP
+ if (cpu_has_dc_aliases)
+ preempt_enable();
+#endif
+}
static inline void __ide_flush_dcache_range(unsigned long addr, unsigned long size)
{
if (cpu_has_dc_aliases) {
unsigned long end = addr + size;
- for (; addr < end; addr += PAGE_SIZE)
- flush_dcache_page(virt_to_page(addr));
+
+ while (addr < end) {
+ local_flush_data_cache_page((void *)addr);
+ addr += PAGE_SIZE;
+ }
}
}
+/*
+ * insw() and gang might be called with interrupts disabled, so we can't
+ * send IPIs for flushing due to the potencial of deadlocks, see the comment
+ * above smp_call_function() in arch/mips/kernel/smp.c. We work around the
+ * problem by disabling preemption so we know we actually perform the flush
+ * on the processor that actually has the lines to be flushed which hopefully
+ * is even better for performance anyway.
+ */
static inline void __ide_insw(unsigned long port, void *addr,
unsigned int count)
{
+ __ide_flush_prologue();
insw(port, addr, count);
__ide_flush_dcache_range((unsigned long)addr, count * 2);
+ __ide_flush_epilogue();
}
static inline void __ide_insl(unsigned long port, void *addr, unsigned int count)
{
+ __ide_flush_prologue();
insl(port, addr, count);
__ide_flush_dcache_range((unsigned long)addr, count * 4);
+ __ide_flush_epilogue();
}
static inline void __ide_outsw(unsigned long port, const void *addr,
unsigned long count)
{
+ __ide_flush_prologue();
outsw(port, addr, count);
__ide_flush_dcache_range((unsigned long)addr, count * 2);
+ __ide_flush_epilogue();
}
static inline void __ide_outsl(unsigned long port, const void *addr,
unsigned long count)
{
+ __ide_flush_prologue();
outsl(port, addr, count);
__ide_flush_dcache_range((unsigned long)addr, count * 4);
+ __ide_flush_epilogue();
}
static inline void __ide_mm_insw(void __iomem *port, void *addr, u32 count)
{
+ __ide_flush_prologue();
readsw(port, addr, count);
__ide_flush_dcache_range((unsigned long)addr, count * 2);
+ __ide_flush_epilogue();
}
static inline void __ide_mm_insl(void __iomem *port, void *addr, u32 count)
{
+ __ide_flush_prologue();
readsl(port, addr, count);
__ide_flush_dcache_range((unsigned long)addr, count * 4);
+ __ide_flush_epilogue();
}
static inline void __ide_mm_outsw(void __iomem *port, void *addr, u32 count)
{
+ __ide_flush_prologue();
writesw(port, addr, count);
__ide_flush_dcache_range((unsigned long)addr, count * 2);
+ __ide_flush_epilogue();
}
static inline void __ide_mm_outsl(void __iomem * port, void *addr, u32 count)
{
+ __ide_flush_prologue();
writesl(port, addr, count);
__ide_flush_dcache_range((unsigned long)addr, count * 4);
+ __ide_flush_epilogue();
}
/* ide_insw calls insw, not __ide_insw. Why? */