aboutsummaryrefslogtreecommitdiff
path: root/arch/sh/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sh/mm')
-rw-r--r--arch/sh/mm/ioremap.c99
1 files changed, 81 insertions, 18 deletions
diff --git a/arch/sh/mm/ioremap.c b/arch/sh/mm/ioremap.c
index e794e27a72f..96fa4a999e2 100644
--- a/arch/sh/mm/ioremap.c
+++ b/arch/sh/mm/ioremap.c
@@ -6,13 +6,19 @@
* 640k-1MB IO memory area on PC's
*
* (C) Copyright 1995 1996 Linus Torvalds
+ * (C) Copyright 2005, 2006 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of this
+ * archive for more details.
*/
-
#include <linux/vmalloc.h>
+#include <linux/module.h>
#include <linux/mm.h>
#include <asm/io.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
+#include <asm/addrspace.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
@@ -80,9 +86,15 @@ int remap_area_pages(unsigned long address, unsigned long phys_addr,
if (address >= end)
BUG();
do {
+ pud_t *pud;
pmd_t *pmd;
- pmd = pmd_alloc(&init_mm, dir, address);
+
error = -ENOMEM;
+
+ pud = pud_alloc(&init_mm, dir, address);
+ if (!pud)
+ break;
+ pmd = pmd_alloc(&init_mm, pud, address);
if (!pmd)
break;
if (remap_area_pmd(pmd, address, end - address,
@@ -97,10 +109,6 @@ int remap_area_pages(unsigned long address, unsigned long phys_addr,
}
/*
- * Generic mapping function (not visible outside):
- */
-
-/*
* Remap an arbitrary physical address space into the kernel virtual
* address space. Needed when the kernel wants to access high addresses
* directly.
@@ -109,11 +117,11 @@ int remap_area_pages(unsigned long address, unsigned long phys_addr,
* have to convert them into an offset in a page-aligned mapping, but the
* caller shouldn't need to know that small detail.
*/
-void * p3_ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
+void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,
+ unsigned long flags)
{
- void * addr;
struct vm_struct * area;
- unsigned long offset, last_addr;
+ unsigned long offset, last_addr, addr, orig_addr;
/* Don't allow wraparound or zero size */
last_addr = phys_addr + size - 1;
@@ -124,7 +132,7 @@ void * p3_ioremap(unsigned long phys_addr, unsigned long size, unsigned long fla
* Don't remap the low PCI/ISA area, it's always mapped..
*/
if (phys_addr >= 0xA0000 && last_addr < 0x100000)
- return phys_to_virt(phys_addr);
+ return (void __iomem *)phys_to_virt(phys_addr);
/*
* Don't allow anybody to remap normal RAM that we're using..
@@ -146,16 +154,71 @@ void * p3_ioremap(unsigned long phys_addr, unsigned long size, unsigned long fla
if (!area)
return NULL;
area->phys_addr = phys_addr;
- addr = area->addr;
- if (remap_area_pages((unsigned long) addr, phys_addr, size, flags)) {
- vunmap(addr);
- return NULL;
+ orig_addr = addr = (unsigned long)area->addr;
+
+#ifdef CONFIG_32BIT
+ /*
+ * First try to remap through the PMB once a valid VMA has been
+ * established. Smaller allocations (or the rest of the size
+ * remaining after a PMB mapping due to the size not being
+ * perfectly aligned on a PMB size boundary) are then mapped
+ * through the UTLB using conventional page tables.
+ *
+ * PMB entries are all pre-faulted.
+ */
+ if (unlikely(size >= 0x1000000)) {
+ unsigned long mapped = pmb_remap(addr, phys_addr, size, flags);
+
+ if (likely(mapped)) {
+ addr += mapped;
+ phys_addr += mapped;
+ size -= mapped;
+ }
}
- return (void *) (offset + (char *)addr);
+#endif
+
+ if (likely(size))
+ if (remap_area_pages(addr, phys_addr, size, flags)) {
+ vunmap((void *)orig_addr);
+ return NULL;
+ }
+
+ return (void __iomem *)(offset + (char *)orig_addr);
}
+EXPORT_SYMBOL(__ioremap);
-void p3_iounmap(void *addr)
+void __iounmap(void __iomem *addr)
{
- if (addr > high_memory)
- vfree((void *)(PAGE_MASK & (unsigned long)addr));
+ unsigned long vaddr = (unsigned long __force)addr;
+ struct vm_struct *p;
+
+ if (PXSEG(vaddr) < P3SEG)
+ return;
+
+#ifdef CONFIG_32BIT
+ /*
+ * Purge any PMB entries that may have been established for this
+ * mapping, then proceed with conventional VMA teardown.
+ *
+ * XXX: Note that due to the way that remove_vm_area() does
+ * matching of the resultant VMA, we aren't able to fast-forward
+ * the address past the PMB space until the end of the VMA where
+ * the page tables reside. As such, unmap_vm_area() will be
+ * forced to linearly scan over the area until it finds the page
+ * tables where PTEs that need to be unmapped actually reside,
+ * which is far from optimal. Perhaps we need to use a separate
+ * VMA for the PMB mappings?
+ * -- PFM.
+ */
+ pmb_unmap(vaddr);
+#endif
+
+ p = remove_vm_area((void *)(vaddr & PAGE_MASK));
+ if (!p) {
+ printk(KERN_ERR "%s: bad address %p\n", __FUNCTION__, addr);
+ return;
+ }
+
+ kfree(p);
}
+EXPORT_SYMBOL(__iounmap);