aboutsummaryrefslogtreecommitdiff
path: root/include/asm-sh/io.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-sh/io.h')
-rw-r--r--include/asm-sh/io.h38
1 files changed, 22 insertions, 16 deletions
diff --git a/include/asm-sh/io.h b/include/asm-sh/io.h
index 6ed34d8eac5..556aabe844c 100644
--- a/include/asm-sh/io.h
+++ b/include/asm-sh/io.h
@@ -243,12 +243,20 @@ static inline void ctrl_outl(unsigned int b, unsigned long addr)
static inline void ctrl_delay(void)
{
+#ifdef P2SEG
ctrl_inw(P2SEG);
+#endif
}
#define IO_SPACE_LIMIT 0xffffffff
-#ifdef CONFIG_MMU
+#if !defined(CONFIG_MMU)
+#define virt_to_phys(address) ((unsigned long)(address))
+#define phys_to_virt(address) ((void *)(address))
+#elif defined(CONFIG_SUPERH64)
+#define virt_to_phys(address) (__pa(address))
+#define phys_to_virt(address) (__va(address))
+#else
/*
* Change virtual addresses to physical addresses and vv.
* These are trivial on the 1:1 Linux/SuperH mapping
@@ -262,28 +270,24 @@ static inline void *phys_to_virt(unsigned long address)
{
return (void *)P1SEGADDR(address);
}
-#else
-#define phys_to_virt(address) ((void *)(address))
-#define virt_to_phys(address) ((unsigned long)(address))
#endif
/*
- * readX/writeX() are used to access memory mapped devices. On some
- * architectures the memory mapped IO stuff needs to be accessed
- * differently. On the x86 architecture, we just read/write the
- * memory location directly.
+ * On 32-bit SH, we traditionally have the whole physical address space
+ * mapped at all times (as MIPS does), so "ioremap()" and "iounmap()" do
+ * not need to do anything but place the address in the proper segment.
+ * This is true for P1 and P2 addresses, as well as some P3 ones.
+ * However, most of the P3 addresses and newer cores using extended
+ * addressing need to map through page tables, so the ioremap()
+ * implementation becomes a bit more complicated.
*
- * On SH, we traditionally have the whole physical address space mapped
- * at all times (as MIPS does), so "ioremap()" and "iounmap()" do not
- * need to do anything but place the address in the proper segment. This
- * is true for P1 and P2 addresses, as well as some P3 ones. However,
- * most of the P3 addresses and newer cores using extended addressing
- * need to map through page tables, so the ioremap() implementation
- * becomes a bit more complicated. See arch/sh/mm/ioremap.c for
- * additional notes on this.
+ * See arch/sh/mm/ioremap.c for additional notes on this.
*
* We cheat a bit and always return uncachable areas until we've fixed
* the drivers to handle caching properly.
+ *
+ * On the SH-5 the concept of segmentation in the 1:1 PXSEG sense simply
+ * doesn't exist, so everything must go through page tables.
*/
#ifdef CONFIG_MMU
void __iomem *__ioremap(unsigned long offset, unsigned long size,
@@ -297,6 +301,7 @@ void __iounmap(void __iomem *addr);
static inline void __iomem *
__ioremap_mode(unsigned long offset, unsigned long size, unsigned long flags)
{
+#ifdef CONFIG_SUPERH32
unsigned long last_addr = offset + size - 1;
/*
@@ -311,6 +316,7 @@ __ioremap_mode(unsigned long offset, unsigned long size, unsigned long flags)
return (void __iomem *)P2SEGADDR(offset);
}
+#endif
return __ioremap(offset, size, flags);
}