aboutsummaryrefslogtreecommitdiff
path: root/arch/sh/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sh/mm')
-rw-r--r--arch/sh/mm/Kconfig6
-rw-r--r--arch/sh/mm/Makefile_323
-rw-r--r--arch/sh/mm/Makefile_643
-rw-r--r--arch/sh/mm/asids-debugfs.c79
-rw-r--r--arch/sh/mm/cache-debugfs.c6
-rw-r--r--arch/sh/mm/cache-sh2a.c8
-rw-r--r--arch/sh/mm/cache-sh4.c2
-rw-r--r--arch/sh/mm/consistent.c19
-rw-r--r--arch/sh/mm/fault_32.c74
-rw-r--r--arch/sh/mm/init.c71
-rw-r--r--arch/sh/mm/ioremap_32.c3
-rw-r--r--arch/sh/mm/mmap.c125
-rw-r--r--arch/sh/mm/pg-nommu.c1
-rw-r--r--arch/sh/mm/pg-sh4.c17
-rw-r--r--arch/sh/mm/pmb.c2
-rw-r--r--arch/sh/mm/tlb-nommu.c1
16 files changed, 351 insertions, 69 deletions
diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig
index 8a03926ea84..555ec9714b9 100644
--- a/arch/sh/mm/Kconfig
+++ b/arch/sh/mm/Kconfig
@@ -132,7 +132,11 @@ config ARCH_SELECT_MEMORY_MODEL
config ARCH_ENABLE_MEMORY_HOTPLUG
def_bool y
- depends on SPARSEMEM
+ depends on SPARSEMEM && MMU
+
+config ARCH_ENABLE_MEMORY_HOTREMOVE
+ def_bool y
+ depends on SPARSEMEM && MMU
config ARCH_MEMORY_PROBE
def_bool y
diff --git a/arch/sh/mm/Makefile_32 b/arch/sh/mm/Makefile_32
index 70e0906023c..cb2f3f29959 100644
--- a/arch/sh/mm/Makefile_32
+++ b/arch/sh/mm/Makefile_32
@@ -2,7 +2,7 @@
# Makefile for the Linux SuperH-specific parts of the memory manager.
#
-obj-y := init.o extable_32.o consistent.o
+obj-y := init.o extable_32.o consistent.o mmap.o
ifndef CONFIG_CACHE_OFF
cache-$(CONFIG_CPU_SH2) := cache-sh2.o
@@ -18,6 +18,7 @@ mmu-y := tlb-nommu.o pg-nommu.o
mmu-$(CONFIG_MMU) := fault_32.o tlbflush_32.o ioremap_32.o
obj-y += $(mmu-y)
+obj-$(CONFIG_DEBUG_FS) += asids-debugfs.o
ifdef CONFIG_DEBUG_FS
obj-$(CONFIG_CPU_SH4) += cache-debugfs.o
diff --git a/arch/sh/mm/Makefile_64 b/arch/sh/mm/Makefile_64
index 0d92a8a3ac9..2863ffb7006 100644
--- a/arch/sh/mm/Makefile_64
+++ b/arch/sh/mm/Makefile_64
@@ -2,7 +2,7 @@
# Makefile for the Linux SuperH-specific parts of the memory manager.
#
-obj-y := init.o consistent.o
+obj-y := init.o consistent.o mmap.o
mmu-y := tlb-nommu.o pg-nommu.o extable_32.o
mmu-$(CONFIG_MMU) := fault_64.o ioremap_64.o tlbflush_64.o tlb-sh5.o \
@@ -13,6 +13,7 @@ obj-y += cache-sh5.o
endif
obj-y += $(mmu-y)
+obj-$(CONFIG_DEBUG_FS) += asids-debugfs.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
obj-$(CONFIG_NUMA) += numa.o
diff --git a/arch/sh/mm/asids-debugfs.c b/arch/sh/mm/asids-debugfs.c
new file mode 100644
index 00000000000..8e912a15e94
--- /dev/null
+++ b/arch/sh/mm/asids-debugfs.c
@@ -0,0 +1,79 @@
+/*
+ * debugfs ops for process ASIDs
+ *
+ * Copyright (C) 2000, 2001 Paolo Alberelli
+ * Copyright (C) 2003 - 2008 Paul Mundt
+ * Copyright (C) 2003, 2004 Richard Curnow
+ *
+ * Provides a debugfs file that lists out the ASIDs currently associated
+ * with the processes.
+ *
+ * In the SH-5 case, if the DM.PC register is examined through the debug
+ * link, this shows ASID + PC. To make use of this, the PID->ASID
+ * relationship needs to be known. This is primarily for debugging.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/spinlock.h>
+#include <asm/processor.h>
+#include <asm/mmu_context.h>
+
+static int asids_seq_show(struct seq_file *file, void *iter)
+{
+ struct task_struct *p;
+
+ read_lock(&tasklist_lock);
+
+ for_each_process(p) {
+ int pid = p->pid;
+
+ if (unlikely(!pid))
+ continue;
+
+ if (p->mm)
+ seq_printf(file, "%5d : %02lx\n", pid,
+ cpu_asid(smp_processor_id(), p->mm));
+ else
+ seq_printf(file, "%5d : (none)\n", pid);
+ }
+
+ read_unlock(&tasklist_lock);
+
+ return 0;
+}
+
+static int asids_debugfs_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, asids_seq_show, inode->i_private);
+}
+
+static const struct file_operations asids_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = asids_debugfs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int __init asids_debugfs_init(void)
+{
+ struct dentry *asids_dentry;
+
+ asids_dentry = debugfs_create_file("asids", S_IRUSR, sh_debugfs_root,
+ NULL, &asids_debugfs_fops);
+ if (!asids_dentry)
+ return -ENOMEM;
+ if (IS_ERR(asids_dentry))
+ return PTR_ERR(asids_dentry);
+
+ return 0;
+}
+module_init(asids_debugfs_init);
+
+MODULE_LICENSE("GPL v2");
diff --git a/arch/sh/mm/cache-debugfs.c b/arch/sh/mm/cache-debugfs.c
index 0e189ccd4a7..5ba067b2659 100644
--- a/arch/sh/mm/cache-debugfs.c
+++ b/arch/sh/mm/cache-debugfs.c
@@ -130,12 +130,18 @@ static int __init cache_debugfs_init(void)
dcache_dentry = debugfs_create_file("dcache", S_IRUSR, sh_debugfs_root,
(unsigned int *)CACHE_TYPE_DCACHE,
&cache_debugfs_fops);
+ if (!dcache_dentry)
+ return -ENOMEM;
if (IS_ERR(dcache_dentry))
return PTR_ERR(dcache_dentry);
icache_dentry = debugfs_create_file("icache", S_IRUSR, sh_debugfs_root,
(unsigned int *)CACHE_TYPE_ICACHE,
&cache_debugfs_fops);
+ if (!icache_dentry) {
+ debugfs_remove(dcache_dentry);
+ return -ENOMEM;
+ }
if (IS_ERR(icache_dentry)) {
debugfs_remove(dcache_dentry);
return PTR_ERR(icache_dentry);
diff --git a/arch/sh/mm/cache-sh2a.c b/arch/sh/mm/cache-sh2a.c
index 62c0c5f3512..24d86a79406 100644
--- a/arch/sh/mm/cache-sh2a.c
+++ b/arch/sh/mm/cache-sh2a.c
@@ -59,7 +59,7 @@ void __flush_purge_region(void *start, int size)
for (v = begin; v < end; v+=L1_CACHE_BYTES) {
ctrl_outl((v & CACHE_PHYSADDR_MASK),
- CACHE_OC_ADDRESS_ARRAY | (v & 0x000003f0) | 0x00000008);
+ CACHE_OC_ADDRESS_ARRAY | (v & 0x000007f0) | 0x00000008);
}
back_to_cached();
local_irq_restore(flags);
@@ -82,14 +82,14 @@ void __flush_invalidate_region(void *start, int size)
/* I-cache invalidate */
for (v = begin; v < end; v+=L1_CACHE_BYTES) {
ctrl_outl((v & CACHE_PHYSADDR_MASK),
- CACHE_IC_ADDRESS_ARRAY | (v & 0x000003f0) | 0x00000008);
+ CACHE_IC_ADDRESS_ARRAY | (v & 0x000007f0) | 0x00000008);
}
#else
for (v = begin; v < end; v+=L1_CACHE_BYTES) {
ctrl_outl((v & CACHE_PHYSADDR_MASK),
- CACHE_IC_ADDRESS_ARRAY | (v & 0x000003f0) | 0x00000008);
+ CACHE_IC_ADDRESS_ARRAY | (v & 0x000007f0) | 0x00000008);
ctrl_outl((v & CACHE_PHYSADDR_MASK),
- CACHE_OC_ADDRESS_ARRAY | (v & 0x000003f0) | 0x00000008);
+ CACHE_OC_ADDRESS_ARRAY | (v & 0x000007f0) | 0x00000008);
}
#endif
back_to_cached();
diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c
index 1fdc8d90254..5cfe08dbb59 100644
--- a/arch/sh/mm/cache-sh4.c
+++ b/arch/sh/mm/cache-sh4.c
@@ -261,7 +261,7 @@ void flush_dcache_page(struct page *page)
}
/* TODO: Selective icache invalidation through IC address array.. */
-static inline void __uses_jump_to_uncached flush_icache_all(void)
+static void __uses_jump_to_uncached flush_icache_all(void)
{
unsigned long flags, ccr;
diff --git a/arch/sh/mm/consistent.c b/arch/sh/mm/consistent.c
index 64b8f7f96f9..edcd5fbf965 100644
--- a/arch/sh/mm/consistent.c
+++ b/arch/sh/mm/consistent.c
@@ -16,14 +16,6 @@
#include <asm/addrspace.h>
#include <asm/io.h>
-struct dma_coherent_mem {
- void *virt_base;
- u32 device_base;
- int size;
- int flags;
- unsigned long *bitmap;
-};
-
void *dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp)
{
@@ -44,12 +36,14 @@ void *dma_alloc_coherent(struct device *dev, size_t size,
*/
dma_cache_sync(dev, ret, size, DMA_BIDIRECTIONAL);
- ret_nocache = ioremap_nocache(virt_to_phys(ret), size);
+ ret_nocache = (void __force *)ioremap_nocache(virt_to_phys(ret), size);
if (!ret_nocache) {
free_pages((unsigned long)ret, order);
return NULL;
}
+ split_page(pfn_to_page(virt_to_phys(ret) >> PAGE_SHIFT), order);
+
*dma_handle = virt_to_phys(ret);
return ret_nocache;
}
@@ -58,13 +52,14 @@ EXPORT_SYMBOL(dma_alloc_coherent);
void dma_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle)
{
- struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
int order = get_order(size);
+ unsigned long pfn = dma_handle >> PAGE_SHIFT;
+ int k;
if (!dma_release_from_coherent(dev, order, vaddr)) {
WARN_ON(irqs_disabled()); /* for portability */
- BUG_ON(mem && mem->flags & DMA_MEMORY_EXCLUSIVE);
- free_pages((unsigned long)phys_to_virt(dma_handle), order);
+ for (k = 0; k < (1 << order); k++)
+ __free_pages(pfn_to_page(pfn + k), 0);
iounmap(vaddr);
}
}
diff --git a/arch/sh/mm/fault_32.c b/arch/sh/mm/fault_32.c
index 0c776fdfbdd..31a33ebdef6 100644
--- a/arch/sh/mm/fault_32.c
+++ b/arch/sh/mm/fault_32.c
@@ -2,7 +2,7 @@
* Page fault handler for SH with an MMU.
*
* Copyright (C) 1999 Niibe Yutaka
- * Copyright (C) 2003 - 2007 Paul Mundt
+ * Copyright (C) 2003 - 2008 Paul Mundt
*
* Based on linux/arch/i386/mm/fault.c:
* Copyright (C) 1995 Linus Torvalds
@@ -15,11 +15,11 @@
#include <linux/mm.h>
#include <linux/hardirq.h>
#include <linux/kprobes.h>
+#include <linux/marker.h>
#include <asm/io_trapped.h>
#include <asm/system.h>
#include <asm/mmu_context.h>
#include <asm/tlbflush.h>
-#include <asm/kgdb.h>
/*
* This routine handles page faults. It determines the address,
@@ -37,10 +37,10 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
int fault;
siginfo_t info;
-#ifdef CONFIG_SH_KGDB
- if (kgdb_nofault && kgdb_bus_err_hook)
- kgdb_bus_err_hook();
-#endif
+ /*
+ * We don't bother with any notifier callbacks here, as they are
+ * all handled through the __do_page_fault() fast-path.
+ */
tsk = current;
si_code = SEGV_MAPERR;
@@ -61,7 +61,6 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
pgd = get_TTB() + offset;
pgd_k = swapper_pg_dir + offset;
- /* This will never happen with the folded page table. */
if (!pgd_present(*pgd)) {
if (!pgd_present(*pgd_k))
goto bad_area_nosemaphore;
@@ -71,9 +70,13 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
pud = pud_offset(pgd, address);
pud_k = pud_offset(pgd_k, address);
- if (pud_present(*pud) || !pud_present(*pud_k))
- goto bad_area_nosemaphore;
- set_pud(pud, *pud_k);
+
+ if (!pud_present(*pud)) {
+ if (!pud_present(*pud_k))
+ goto bad_area_nosemaphore;
+ set_pud(pud, *pud_k);
+ return;
+ }
pmd = pmd_offset(pud, address);
pmd_k = pmd_offset(pud_k, address);
@@ -242,17 +245,25 @@ do_sigbus:
goto no_context;
}
-#ifdef CONFIG_SH_STORE_QUEUES
-/*
- * This is a special case for the SH-4 store queues, as pages for this
- * space still need to be faulted in before it's possible to flush the
- * store queue cache for writeout to the remapped region.
- */
-#define P3_ADDR_MAX (P4SEG_STORE_QUE + 0x04000000)
-#else
-#define P3_ADDR_MAX P4SEG
+static inline int notify_page_fault(struct pt_regs *regs, int trap)
+{
+ int ret = 0;
+
+ trace_mark(kernel_arch_trap_entry, "trap_id %d ip #p%ld",
+ trap >> 5, instruction_pointer(regs));
+
+#ifdef CONFIG_KPROBES
+ if (!user_mode(regs)) {
+ preempt_disable();
+ if (kprobe_running() && kprobe_fault_handler(regs, trap))
+ ret = 1;
+ preempt_enable();
+ }
#endif
+ return ret;
+}
+
/*
* Called with interrupts disabled.
*/
@@ -265,11 +276,12 @@ asmlinkage int __kprobes __do_page_fault(struct pt_regs *regs,
pmd_t *pmd;
pte_t *pte;
pte_t entry;
+ int ret = 0;
-#ifdef CONFIG_SH_KGDB
- if (kgdb_nofault && kgdb_bus_err_hook)
- kgdb_bus_err_hook();
-#endif
+ if (notify_page_fault(regs, lookup_exception_vector()))
+ goto out;
+
+ ret = 1;
/*
* We don't take page faults for P1, P2, and parts of P4, these
@@ -280,24 +292,23 @@ asmlinkage int __kprobes __do_page_fault(struct pt_regs *regs,
pgd = pgd_offset_k(address);
} else {
if (unlikely(address >= TASK_SIZE || !current->mm))
- return 1;
+ goto out;
pgd = pgd_offset(current->mm, address);
}
pud = pud_offset(pgd, address);
if (pud_none_or_clear_bad(pud))
- return 1;
+ goto out;
pmd = pmd_offset(pud, address);
if (pmd_none_or_clear_bad(pmd))
- return 1;
-
+ goto out;
pte = pte_offset_kernel(pmd, address);
entry = *pte;
if (unlikely(pte_none(entry) || pte_not_present(entry)))
- return 1;
+ goto out;
if (unlikely(writeaccess && !pte_write(entry)))
- return 1;
+ goto out;
if (writeaccess)
entry = pte_mkdirty(entry);
@@ -314,5 +325,8 @@ asmlinkage int __kprobes __do_page_fault(struct pt_regs *regs,
set_pte(pte, entry);
update_mmu_cache(NULL, address, entry);
- return 0;
+ ret = 0;
+out:
+ trace_mark(kernel_arch_trap_exit, MARK_NOARGS);
+ return ret;
}
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
index b75a7acd62f..6cbef8caeb5 100644
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -23,7 +23,19 @@
DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
pgd_t swapper_pg_dir[PTRS_PER_PGD];
-unsigned long cached_to_uncached = 0;
+
+#ifdef CONFIG_SUPERH32
+/*
+ * Handle trivial transitions between cached and uncached
+ * segments, making use of the 1:1 mapping relationship in
+ * 512MB lowmem.
+ *
+ * This is the offset of the uncached section from its cached alias.
+ * Default value only valid in 29 bit mode, in 32bit mode will be
+ * overridden in pmb_init.
+ */
+unsigned long cached_to_uncached = P2SEG - P1SEG;
+#endif
#ifdef CONFIG_MMU
static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot)
@@ -58,9 +70,7 @@ static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot)
}
set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot));
-
- if (cached_to_uncached)
- flush_tlb_one(get_asid(), addr);
+ flush_tlb_one(get_asid(), addr);
}
/*
@@ -113,7 +123,6 @@ void __init page_table_range_init(unsigned long start, unsigned long end,
if (!pmd_present(*pmd)) {
pte_t *pte_table;
pte_table = (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE);
- memset(pte_table, 0, PAGE_SIZE);
pmd_populate_kernel(&init_mm, pmd, pte_table);
}
@@ -128,6 +137,7 @@ void __init page_table_range_init(unsigned long start, unsigned long end,
void __init paging_init(void)
{
unsigned long max_zone_pfns[MAX_NR_ZONES];
+ unsigned long vaddr;
int nid;
/* We don't need to map the kernel through the TLB, as
@@ -139,10 +149,15 @@ void __init paging_init(void)
* check for a null value. */
set_TTB(swapper_pg_dir);
- /* Populate the relevant portions of swapper_pg_dir so that
+ /*
+ * Populate the relevant portions of swapper_pg_dir so that
* we can use the fixmap entries without calling kmalloc.
- * pte's will be filled in by __set_fixmap(). */
- page_table_range_init(FIXADDR_START, FIXADDR_TOP, swapper_pg_dir);
+ * pte's will be filled in by __set_fixmap().
+ */
+ vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
+ page_table_range_init(vaddr, 0, swapper_pg_dir);
+
+ kmap_coherent_init();
memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
@@ -165,15 +180,6 @@ void __init paging_init(void)
#ifdef CONFIG_SUPERH32
/* Set up the uncached fixmap */
set_fixmap_nocache(FIX_UNCACHED, __pa(&__uncached_start));
-
-#ifdef CONFIG_29BIT
- /*
- * Handle trivial transitions between cached and uncached
- * segments, making use of the 1:1 mapping relationship in
- * 512MB lowmem.
- */
- cached_to_uncached = P2SEG - P1SEG;
-#endif
#endif
}
@@ -265,6 +271,35 @@ void free_initrd_mem(unsigned long start, unsigned long end)
}
#endif
+#if THREAD_SHIFT < PAGE_SHIFT
+static struct kmem_cache *thread_info_cache;
+
+struct thread_info *alloc_thread_info(struct task_struct *tsk)
+{
+ struct thread_info *ti;
+
+ ti = kmem_cache_alloc(thread_info_cache, GFP_KERNEL);
+ if (unlikely(ti == NULL))
+ return NULL;
+#ifdef CONFIG_DEBUG_STACK_USAGE
+ memset(ti, 0, THREAD_SIZE);
+#endif
+ return ti;
+}
+
+void free_thread_info(struct thread_info *ti)
+{
+ kmem_cache_free(thread_info_cache, ti);
+}
+
+void thread_info_cache_init(void)
+{
+ thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE,
+ THREAD_SIZE, 0, NULL);
+ BUG_ON(thread_info_cache == NULL);
+}
+#endif /* THREAD_SHIFT < PAGE_SHIFT */
+
#ifdef CONFIG_MEMORY_HOTPLUG
int arch_add_memory(int nid, u64 start, u64 size)
{
@@ -292,4 +327,4 @@ int memory_add_physaddr_to_nid(u64 addr)
}
EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
#endif
-#endif
+#endif /* CONFIG_MEMORY_HOTPLUG */
diff --git a/arch/sh/mm/ioremap_32.c b/arch/sh/mm/ioremap_32.c
index 882a32ebc6b..32946fba123 100644
--- a/arch/sh/mm/ioremap_32.c
+++ b/arch/sh/mm/ioremap_32.c
@@ -116,9 +116,10 @@ EXPORT_SYMBOL(__ioremap);
void __iounmap(void __iomem *addr)
{
unsigned long vaddr = (unsigned long __force)addr;
+ unsigned long seg = PXSEG(vaddr);
struct vm_struct *p;
- if (PXSEG(vaddr) < P3SEG || is_pci_memaddr(vaddr))
+ if (seg < P3SEG || seg >= P3_ADDR_MAX || is_pci_memaddr(vaddr))
return;
#ifdef CONFIG_32BIT
diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
new file mode 100644
index 00000000000..931f4d003fa
--- /dev/null
+++ b/arch/sh/mm/mmap.c
@@ -0,0 +1,125 @@
+/*
+ * arch/sh/mm/mmap.c
+ *
+ * Copyright (C) 2008 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/module.h>
+#include <asm/page.h>
+#include <asm/processor.h>
+
+#ifdef CONFIG_MMU
+unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */
+EXPORT_SYMBOL(shm_align_mask);
+
+/*
+ * To avoid cache aliases, we map the shared page with same color.
+ */
+#define COLOUR_ALIGN(addr, pgoff) \
+ ((((addr) + shm_align_mask) & ~shm_align_mask) + \
+ (((pgoff) << PAGE_SHIFT) & shm_align_mask))
+
+unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ unsigned long len, unsigned long pgoff, unsigned long flags)
+{
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma;
+ unsigned long start_addr;
+ int do_colour_align;
+
+ if (flags & MAP_FIXED) {
+ /* We do not accept a shared mapping if it would violate
+ * cache aliasing constraints.
+ */
+ if ((flags & MAP_SHARED) && (addr & shm_align_mask))
+ return -EINVAL;
+ return addr;
+ }
+
+ if (unlikely(len > TASK_SIZE))
+ return -ENOMEM;
+
+ do_colour_align = 0;
+ if (filp || (flags & MAP_SHARED))
+ do_colour_align = 1;
+
+ if (addr) {
+ if (do_colour_align)
+ addr = COLOUR_ALIGN(addr, pgoff);
+ else
+ addr = PAGE_ALIGN(addr);
+
+ vma = find_vma(mm, addr);
+ if (TASK_SIZE - len >= addr &&
+ (!vma || addr + len <= vma->vm_start))
+ return addr;
+ }
+
+ if (len > mm->cached_hole_size) {
+ start_addr = addr = mm->free_area_cache;
+ } else {
+ mm->cached_hole_size = 0;
+ start_addr = addr = TASK_UNMAPPED_BASE;
+ }
+
+full_search:
+ if (do_colour_align)
+ addr = COLOUR_ALIGN(addr, pgoff);
+ else
+ addr = PAGE_ALIGN(mm->free_area_cache);
+
+ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
+ /* At this point: (!vma || addr < vma->vm_end). */
+ if (unlikely(TASK_SIZE - len < addr)) {
+ /*
+ * Start a new search - just in case we missed
+ * some holes.
+ */
+ if (start_addr != TASK_UNMAPPED_BASE) {
+ start_addr = addr = TASK_UNMAPPED_BASE;
+ mm->cached_hole_size = 0;
+ goto full_search;
+ }
+ return -ENOMEM;
+ }
+ if (likely(!vma || addr + len <= vma->vm_start)) {
+ /*
+ * Remember the place where we stopped the search:
+ */
+ mm->free_area_cache = addr + len;
+ return addr;
+ }
+ if (addr + mm->cached_hole_size < vma->vm_start)
+ mm->cached_hole_size = vma->vm_start - addr;
+
+ addr = vma->vm_end;
+ if (do_colour_align)
+ addr = COLOUR_ALIGN(addr, pgoff);
+ }
+}
+#endif /* CONFIG_MMU */
+
+/*
+ * You really shouldn't be using read() or write() on /dev/mem. This
+ * might go away in the future.
+ */
+int valid_phys_addr_range(unsigned long addr, size_t count)
+{
+ if (addr < __MEMORY_START)
+ return 0;
+ if (addr + count > __pa(high_memory))
+ return 0;
+
+ return 1;
+}
+
+int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
+{
+ return 1;
+}
diff --git a/arch/sh/mm/pg-nommu.c b/arch/sh/mm/pg-nommu.c
index 677dd57f087..91ed4e695ff 100644
--- a/arch/sh/mm/pg-nommu.c
+++ b/arch/sh/mm/pg-nommu.c
@@ -13,6 +13,7 @@
#include <linux/kernel.h>
#include <linux/string.h>
#include <asm/page.h>
+#include <asm/uaccess.h>
void copy_page(void *to, void *from)
{
diff --git a/arch/sh/mm/pg-sh4.c b/arch/sh/mm/pg-sh4.c
index 38870e0fc18..2fe14da1f83 100644
--- a/arch/sh/mm/pg-sh4.c
+++ b/arch/sh/mm/pg-sh4.c
@@ -7,6 +7,7 @@
* Released under the terms of the GNU GPL v2.0.
*/
#include <linux/mm.h>
+#include <linux/init.h>
#include <linux/mutex.h>
#include <linux/fs.h>
#include <linux/highmem.h>
@@ -16,6 +17,20 @@
#define CACHE_ALIAS (current_cpu_data.dcache.alias_mask)
+#define kmap_get_fixmap_pte(vaddr) \
+ pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), (vaddr))
+
+static pte_t *kmap_coherent_pte;
+
+void __init kmap_coherent_init(void)
+{
+ unsigned long vaddr;
+
+ /* cache the first coherent kmap pte */
+ vaddr = __fix_to_virt(FIX_CMAP_BEGIN);
+ kmap_coherent_pte = kmap_get_fixmap_pte(vaddr);
+}
+
static inline void *kmap_coherent(struct page *page, unsigned long addr)
{
enum fixed_addresses idx;
@@ -34,6 +49,8 @@ static inline void *kmap_coherent(struct page *page, unsigned long addr)
update_mmu_cache(NULL, vaddr, pte);
+ set_pte(kmap_coherent_pte - (FIX_CMAP_END - idx), pte);
+
return (void *)vaddr;
}
diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c
index cef727669c8..84241676265 100644
--- a/arch/sh/mm/pmb.c
+++ b/arch/sh/mm/pmb.c
@@ -394,6 +394,8 @@ static int __init pmb_debugfs_init(void)
dentry = debugfs_create_file("pmb", S_IFREG | S_IRUGO,
sh_debugfs_root, NULL, &pmb_debugfs_fops);
+ if (!dentry)
+ return -ENOMEM;
if (IS_ERR(dentry))
return PTR_ERR(dentry);
diff --git a/arch/sh/mm/tlb-nommu.c b/arch/sh/mm/tlb-nommu.c
index 15111bc7ddd..71c742b5aee 100644
--- a/arch/sh/mm/tlb-nommu.c
+++ b/arch/sh/mm/tlb-nommu.c
@@ -10,6 +10,7 @@
#include <linux/kernel.h>
#include <linux/mm.h>
#include <asm/pgtable.h>
+#include <asm/tlbflush.h>
/*
* Nothing too terribly exciting here ..