diff options
author | Jeff Garzik <jeff@garzik.org> | 2006-09-27 18:16:47 -0400 |
---|---|---|
committer | Jeff Garzik <jeff@garzik.org> | 2006-09-27 18:16:47 -0400 |
commit | 3b9f6cb8a1ec791be79c6c7595fea922f12d1e64 (patch) | |
tree | 2393a448add846e6c2ed12f68106c3018b72c6a9 /arch/sh/mm | |
parent | c38778c3a9aeadcd1ee319cfc8ea5a9cbf8cdafa (diff) | |
parent | a77c64c1a641950626181b4857abb701d8f38ccc (diff) |
Merge branch 'master' into upstream
Diffstat (limited to 'arch/sh/mm')
-rw-r--r-- | arch/sh/mm/Kconfig | 78 | ||||
-rw-r--r-- | arch/sh/mm/Makefile | 16 | ||||
-rw-r--r-- | arch/sh/mm/cache-debugfs.c | 147 | ||||
-rw-r--r-- | arch/sh/mm/cache-sh4.c | 685 | ||||
-rw-r--r-- | arch/sh/mm/cache-sh7705.c | 19 | ||||
-rw-r--r-- | arch/sh/mm/clear_page.S | 99 | ||||
-rw-r--r-- | arch/sh/mm/consistent.c | 2 | ||||
-rw-r--r-- | arch/sh/mm/fault.c | 207 | ||||
-rw-r--r-- | arch/sh/mm/hugetlbpage.c | 52 | ||||
-rw-r--r-- | arch/sh/mm/init.c | 32 | ||||
-rw-r--r-- | arch/sh/mm/ioremap.c | 17 | ||||
-rw-r--r-- | arch/sh/mm/pg-nommu.c | 17 | ||||
-rw-r--r-- | arch/sh/mm/pg-sh4.c | 24 | ||||
-rw-r--r-- | arch/sh/mm/pmb.c | 400 | ||||
-rw-r--r-- | arch/sh/mm/tlb-flush.c | 134 | ||||
-rw-r--r-- | arch/sh/mm/tlb-sh4.c | 8 |
16 files changed, 1454 insertions, 483 deletions
diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig index fb586b1cf8b..9dd606464d2 100644 --- a/arch/sh/mm/Kconfig +++ b/arch/sh/mm/Kconfig @@ -20,7 +20,10 @@ config CPU_SH4 config CPU_SH4A bool select CPU_SH4 - select CPU_HAS_INTC2_IRQ + +config CPU_SH4AL_DSP + bool + select CPU_SH4A config CPU_SUBTYPE_ST40 bool @@ -48,6 +51,12 @@ config CPU_SUBTYPE_SH7705 select CPU_SH3 select CPU_HAS_PINT_IRQ +config CPU_SUBTYPE_SH7706 + bool "Support SH7706 processor" + select CPU_SH3 + help + Select SH7706 if you have a 133 Mhz SH-3 HD6417706 CPU. + config CPU_SUBTYPE_SH7707 bool "Support SH7707 processor" select CPU_SH3 @@ -69,6 +78,12 @@ config CPU_SUBTYPE_SH7709 help Select SH7709 if you have a 80 Mhz SH-3 HD6417709 CPU. +config CPU_SUBTYPE_SH7710 + bool "Support SH7710 processor" + select CPU_SH3 + help + Select SH7710 if you have a SH3-DSP SH7710 CPU. + comment "SH-4 Processor Support" config CPU_SUBTYPE_SH7750 @@ -133,10 +148,6 @@ config CPU_SUBTYPE_ST40GX1 comment "SH-4A Processor Support" -config CPU_SUBTYPE_SH73180 - bool "Support SH73180 processor" - select CPU_SH4A - config CPU_SUBTYPE_SH7770 bool "Support SH7770 processor" select CPU_SH4A @@ -144,6 +155,17 @@ config CPU_SUBTYPE_SH7770 config CPU_SUBTYPE_SH7780 bool "Support SH7780 processor" select CPU_SH4A + select CPU_HAS_INTC2_IRQ + +comment "SH4AL-DSP Processor Support" + +config CPU_SUBTYPE_SH73180 + bool "Support SH73180 processor" + select CPU_SH4AL_DSP + +config CPU_SUBTYPE_SH7343 + bool "Support SH7343 processor" + select CPU_SH4AL_DSP endmenu @@ -161,15 +183,59 @@ config MMU turning this off will boot the kernel on these machines with the MMU implicitly switched off. +config PAGE_OFFSET + hex + default "0x80000000" if MMU + default "0x00000000" + +config MEMORY_START + hex "Physical memory start address" + default "0x08000000" + ---help--- + Computers built with Hitachi SuperH processors always + map the ROM starting at address zero. But the processor + does not specify the range that RAM takes. + + The physical memory (RAM) start address will be automatically + set to 08000000. Other platforms, such as the Solution Engine + boards typically map RAM at 0C000000. + + Tweak this only when porting to a new machine which does not + already have a defconfig. Changing it from the known correct + value on any of the known systems will only lead to disaster. + +config MEMORY_SIZE + hex "Physical memory size" + default "0x00400000" + help + This sets the default memory size assumed by your SH kernel. It can + be overridden as normal by the 'mem=' argument on the kernel command + line. If unsure, consult your board specifications or just leave it + as 0x00400000 which was the default value before this became + configurable. + config 32BIT bool "Support 32-bit physical addressing through PMB" - depends on CPU_SH4A + depends on CPU_SH4A && MMU default y help If you say Y here, physical addressing will be extended to 32-bits through the SH-4A PMB. If this is not set, legacy 29-bit physical addressing will be used. +config VSYSCALL + bool "Support vsyscall page" + depends on MMU + default y + help + This will enable support for the kernel mapping a vDSO page + in process space, and subsequently handing down the entry point + to the libc through the ELF auxiliary vector. + + From the kernel side this is used for the signal trampoline. + For systems with an MMU that can afford to give up a page, + (the default value) say Y. + choice prompt "HugeTLB page size" depends on HUGETLB_PAGE && CPU_SH4 && MMU diff --git a/arch/sh/mm/Makefile b/arch/sh/mm/Makefile index 9489a142464..3ffd7f68c0a 100644 --- a/arch/sh/mm/Makefile +++ b/arch/sh/mm/Makefile @@ -6,20 +6,26 @@ obj-y := init.o extable.o consistent.o obj-$(CONFIG_CPU_SH2) += cache-sh2.o obj-$(CONFIG_CPU_SH3) += cache-sh3.o -obj-$(CONFIG_CPU_SH4) += cache-sh4.o pg-sh4.o +obj-$(CONFIG_CPU_SH4) += cache-sh4.o obj-$(CONFIG_DMA_PAGE_OPS) += pg-dma.o obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o mmu-y := fault-nommu.o tlb-nommu.o pg-nommu.o -mmu-$(CONFIG_MMU) := fault.o clear_page.o copy_page.o +mmu-$(CONFIG_MMU) := fault.o clear_page.o copy_page.o tlb-flush.o \ + ioremap.o obj-y += $(mmu-y) +ifdef CONFIG_DEBUG_FS +obj-$(CONFIG_CPU_SH4) += cache-debugfs.o +endif + ifdef CONFIG_MMU -obj-$(CONFIG_CPU_SH3) += tlb-sh3.o -obj-$(CONFIG_CPU_SH4) += tlb-sh4.o ioremap.o +obj-$(CONFIG_CPU_SH3) += tlb-sh3.o +obj-$(CONFIG_CPU_SH4) += tlb-sh4.o pg-sh4.o obj-$(CONFIG_SH7705_CACHE_32KB) += pg-sh7705.o endif -obj-$(CONFIG_SH7705_CACHE_32KB) += cache-sh7705.o +obj-$(CONFIG_SH7705_CACHE_32KB) += cache-sh7705.o +obj-$(CONFIG_32BIT) += pmb.o diff --git a/arch/sh/mm/cache-debugfs.c b/arch/sh/mm/cache-debugfs.c new file mode 100644 index 00000000000..a22d914e4d1 --- /dev/null +++ b/arch/sh/mm/cache-debugfs.c @@ -0,0 +1,147 @@ +/* + * debugfs ops for the L1 cache + * + * Copyright (C) 2006 Paul Mundt + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include <linux/init.h> +#include <linux/module.h> +#include <linux/debugfs.h> +#include <linux/seq_file.h> +#include <asm/processor.h> +#include <asm/uaccess.h> +#include <asm/cache.h> +#include <asm/io.h> + +enum cache_type { + CACHE_TYPE_ICACHE, + CACHE_TYPE_DCACHE, + CACHE_TYPE_UNIFIED, +}; + +static int cache_seq_show(struct seq_file *file, void *iter) +{ + unsigned int cache_type = (unsigned int)file->private; + struct cache_info *cache; + unsigned int waysize, way, cache_size; + unsigned long ccr, base; + static unsigned long addrstart = 0; + + /* + * Go uncached immediately so we don't skew the results any + * more than we already are.. + */ + jump_to_P2(); + + ccr = ctrl_inl(CCR); + if ((ccr & CCR_CACHE_ENABLE) == 0) { + back_to_P1(); + + seq_printf(file, "disabled\n"); + return 0; + } + + if (cache_type == CACHE_TYPE_DCACHE) { + base = CACHE_OC_ADDRESS_ARRAY; + cache = &cpu_data->dcache; + } else { + base = CACHE_IC_ADDRESS_ARRAY; + cache = &cpu_data->icache; + } + + /* + * Due to the amount of data written out (depending on the cache size), + * we may be iterated over multiple times. In this case, keep track of + * the entry position in addrstart, and rewind it when we've hit the + * end of the cache. + * + * Likewise, the same code is used for multiple caches, so care must + * be taken for bouncing addrstart back and forth so the appropriate + * cache is hit. + */ + cache_size = cache->ways * cache->sets * cache->linesz; + if (((addrstart & 0xff000000) != base) || + (addrstart & 0x00ffffff) > cache_size) + addrstart = base; + + waysize = cache->sets; + + /* + * If the OC is already in RAM mode, we only have + * half of the entries to consider.. + */ + if ((ccr & CCR_CACHE_ORA) && cache_type == CACHE_TYPE_DCACHE) + waysize >>= 1; + + waysize <<= cache->entry_shift; + + for (way = 0; way < cache->ways; way++) { + unsigned long addr; + unsigned int line; + + seq_printf(file, "-----------------------------------------\n"); + seq_printf(file, "Way %d\n", way); + seq_printf(file, "-----------------------------------------\n"); + + for (addr = addrstart, line = 0; + addr < addrstart + waysize; + addr += cache->linesz, line++) { + unsigned long data = ctrl_inl(addr); + + /* Check the V bit, ignore invalid cachelines */ + if ((data & 1) == 0) + continue; + + /* U: Dirty, cache tag is 10 bits up */ + seq_printf(file, "%3d: %c 0x%lx\n", + line, data & 2 ? 'U' : ' ', + data & 0x1ffffc00); + } + + addrstart += cache->way_incr; + } + + back_to_P1(); + + return 0; +} + +static int cache_debugfs_open(struct inode *inode, struct file *file) +{ + return single_open(file, cache_seq_show, inode->u.generic_ip); +} + +static struct file_operations cache_debugfs_fops = { + .owner = THIS_MODULE, + .open = cache_debugfs_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + +static int __init cache_debugfs_init(void) +{ + struct dentry *dcache_dentry, *icache_dentry; + + dcache_dentry = debugfs_create_file("dcache", S_IRUSR, NULL, + (unsigned int *)CACHE_TYPE_DCACHE, + &cache_debugfs_fops); + if (IS_ERR(dcache_dentry)) + return PTR_ERR(dcache_dentry); + + icache_dentry = debugfs_create_file("icache", S_IRUSR, NULL, + (unsigned int *)CACHE_TYPE_ICACHE, + &cache_debugfs_fops); + if (IS_ERR(icache_dentry)) { + debugfs_remove(dcache_dentry); + return PTR_ERR(icache_dentry); + } + + return 0; +} +module_init(cache_debugfs_init); + +MODULE_LICENSE("GPL v2"); diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c index 524cea5b47f..e48cc22724d 100644 --- a/arch/sh/mm/cache-sh4.c +++ b/arch/sh/mm/cache-sh4.c @@ -2,49 +2,120 @@ * arch/sh/mm/cache-sh4.c * * Copyright (C) 1999, 2000, 2002 Niibe Yutaka - * Copyright (C) 2001, 2002, 2003, 2004 Paul Mundt + * Copyright (C) 2001 - 2006 Paul Mundt * Copyright (C) 2003 Richard Curnow * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ - #include <linux/init.h> -#include <linux/mman.h> #include <linux/mm.h> -#include <linux/threads.h> #include <asm/addrspace.h> -#include <asm/page.h> #include <asm/pgtable.h> #include <asm/processor.h> #include <asm/cache.h> #include <asm/io.h> -#include <asm/uaccess.h> #include <asm/pgalloc.h> #include <asm/mmu_context.h> #include <asm/cacheflush.h> -extern void __flush_cache_4096_all(unsigned long start); -static void __flush_cache_4096_all_ex(unsigned long start); -extern void __flush_dcache_all(void); -static void __flush_dcache_all_ex(void); +/* + * The maximum number of pages we support up to when doing ranged dcache + * flushing. Anything exceeding this will simply flush the dcache in its + * entirety. + */ +#define MAX_DCACHE_PAGES 64 /* XXX: Tune for ways */ + +static void __flush_dcache_segment_1way(unsigned long start, + unsigned long extent); +static void __flush_dcache_segment_2way(unsigned long start, + unsigned long extent); +static void __flush_dcache_segment_4way(unsigned long start, + unsigned long extent); + +static void __flush_cache_4096(unsigned long addr, unsigned long phys, + unsigned long exec_offset); + +/* + * This is initialised here to ensure that it is not placed in the BSS. If + * that were to happen, note that cache_init gets called before the BSS is + * cleared, so this would get nulled out which would be hopeless. + */ +static void (*__flush_dcache_segment_fn)(unsigned long, unsigned long) = + (void (*)(unsigned long, unsigned long))0xdeadbeef; + +static void compute_alias(struct cache_info *c) +{ + c->alias_mask = ((c->sets - 1) << c->entry_shift) & ~(PAGE_SIZE - 1); + c->n_aliases = (c->alias_mask >> PAGE_SHIFT) + 1; +} + +static void __init emit_cache_params(void) +{ + printk("PVR=%08x CVR=%08x PRR=%08x\n", + ctrl_inl(CCN_PVR), + ctrl_inl(CCN_CVR), + ctrl_inl(CCN_PRR)); + printk("I-cache : n_ways=%d n_sets=%d way_incr=%d\n", + cpu_data->icache.ways, + cpu_data->icache.sets, + cpu_data->icache.way_incr); + printk("I-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n", + cpu_data->icache.entry_mask, + cpu_data->icache.alias_mask, + cpu_data->icache.n_aliases); + printk("D-cache : n_ways=%d n_sets=%d way_incr=%d\n", + cpu_data->dcache.ways, + cpu_data->dcache.sets, + cpu_data->dcache.way_incr); + printk("D-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n", + cpu_data->dcache.entry_mask, + cpu_data->dcache.alias_mask, + cpu_data->dcache.n_aliases); + + if (!__flush_dcache_segment_fn) + panic("unknown number of cache ways\n"); +} /* * SH-4 has virtually indexed and physically tagged cache. */ -struct semaphore p3map_sem[4]; +/* Worst case assumed to be 64k cache, direct-mapped i.e. 4 synonym bits. */ +#define MAX_P3_SEMAPHORES 16 + +struct semaphore p3map_sem[MAX_P3_SEMAPHORES]; void __init p3_cache_init(void) { - if (remap_area_pages(P3SEG, 0, PAGE_SIZE*4, _PAGE_CACHABLE)) + int i; + + compute_alias(&cpu_data->icache); + compute_alias(&cpu_data->dcache); + + switch (cpu_data->dcache.ways) { + case 1: + __flush_dcache_segment_fn = __flush_dcache_segment_1way; + break; + case 2: + __flush_dcache_segment_fn = __flush_dcache_segment_2way; + break; + case 4: + __flush_dcache_segment_fn = __flush_dcache_segment_4way; + break; + default: + __flush_dcache_segment_fn = NULL; + break; + } + + emit_cache_params(); + + if (remap_area_pages(P3SEG, 0, PAGE_SIZE * 4, _PAGE_CACHABLE)) panic("%s failed.", __FUNCTION__); - sema_init (&p3map_sem[0], 1); - sema_init (&p3map_sem[1], 1); - sema_init (&p3map_sem[2], 1); - sema_init (&p3map_sem[3], 1); + for (i = 0; i < cpu_data->dcache.n_aliases; i++) + sema_init(&p3map_sem[i], 1); } /* @@ -89,7 +160,6 @@ void __flush_purge_region(void *start, int size) } } - /* * No write back please */ @@ -108,40 +178,6 @@ void __flush_invalidate_region(void *start, int size) } } -static void __flush_dcache_all_ex(void) -{ - unsigned long addr, end_addr, entry_offset; - - end_addr = CACHE_OC_ADDRESS_ARRAY + (cpu_data->dcache.sets << cpu_data->dcache.entry_shift) * cpu_data->dcache.ways; - entry_offset = 1 << cpu_data->dcache.entry_shift; - for (addr = CACHE_OC_ADDRESS_ARRAY; addr < end_addr; addr += entry_offset) { - ctrl_outl(0, addr); - } -} - -static void __flush_cache_4096_all_ex(unsigned long start) -{ - unsigned long addr, entry_offset; - int i; - - entry_offset = 1 << cpu_data->dcache.entry_shift; - for (i = 0; i < cpu_data->dcache.ways; i++, start += cpu_data->dcache.way_incr) { - for (addr = CACHE_OC_ADDRESS_ARRAY + start; - addr < CACHE_OC_ADDRESS_ARRAY + 4096 + start; - addr += entry_offset) { - ctrl_outl(0, addr); - } - } -} - -void flush_cache_4096_all(unsigned long start) -{ - if (cpu_data->dcache.ways == 1) - __flush_cache_4096_all(start); - else - __flush_cache_4096_all_ex(start); -} - /* * Write back the range of D-cache, and purge the I-cache. * @@ -153,14 +189,14 @@ void flush_icache_range(unsigned long start, unsigned long end) } /* - * Write back the D-cache and purge the I-cache for signal trampoline. + * Write back the D-cache and purge the I-cache for signal trampoline. * .. which happens to be the same behavior as flush_icache_range(). * So, we simply flush out a line. */ void flush_cache_sigtramp(unsigned long addr) { unsigned long v, index; - unsigned long flags; + unsigned long flags; int i; v = addr & ~(L1_CACHE_BYTES-1); @@ -172,30 +208,33 @@ void flush_cache_sigtramp(unsigned long addr) local_irq_save(flags); jump_to_P2(); - for(i = 0; i < cpu_data->icache.ways; i++, index += cpu_data->icache.way_incr) + + for (i = 0; i < cpu_data->icache.ways; + i++, index += cpu_data->icache.way_incr) ctrl_outl(0, index); /* Clear out Valid-bit */ + back_to_P1(); + wmb(); local_irq_restore(flags); } static inline void flush_cache_4096(unsigned long start, unsigned long phys) { - unsigned long flags; - extern void __flush_cache_4096(unsigned long addr, unsigned long phys, unsigned long exec_offset); + unsigned long flags, exec_offset = 0; /* - * SH7751, SH7751R, and ST40 have no restriction to handle cache. - * (While SH7750 must do that at P2 area.) + * All types of SH-4 require PC to be in P2 to operate on the I-cache. + * Some types of SH-4 require PC to be in P2 to operate on the D-cache. */ - if ((cpu_data->flags & CPU_HAS_P2_FLUSH_BUG) - || start < CACHE_OC_ADDRESS_ARRAY) { - local_irq_save(flags); - __flush_cache_4096(start | SH_CACHE_ASSOC, P1SEGADDR(phys), 0x20000000); - local_irq_restore(flags); - } else { - __flush_cache_4096(start | SH_CACHE_ASSOC, P1SEGADDR(phys), 0); - } + if ((cpu_data->flags & CPU_HAS_P2_FLUSH_BUG) || + (start < CACHE_OC_ADDRESS_ARRAY)) + exec_offset = 0x20000000; + + local_irq_save(flags); + __flush_cache_4096(start | SH_CACHE_ASSOC, + P1SEGADDR(phys), exec_offset); + local_irq_restore(flags); } /* @@ -206,15 +245,19 @@ void flush_dcache_page(struct page *page) { if (test_bit(PG_mapped, &page->flags)) { unsigned long phys = PHYSADDR(page_address(page)); + unsigned long addr = CACHE_OC_ADDRESS_ARRAY; + int i, n; /* Loop all the D-cache */ - flush_cache_4096(CACHE_OC_ADDRESS_ARRAY, phys); - flush_cache_4096(CACHE_OC_ADDRESS_ARRAY | 0x1000, phys); - flush_cache_4096(CACHE_OC_ADDRESS_ARRAY | 0x2000, phys); - flush_cache_4096(CACHE_OC_ADDRESS_ARRAY | 0x3000, phys); + n = cpu_data->dcache.n_aliases; + for (i = 0; i < n; i++, addr += PAGE_SIZE) + flush_cache_4096(addr, phys); } + + wmb(); } +/* TODO: Selective icache invalidation through IC address array.. */ static inline void flush_icache_all(void) { unsigned long flags, ccr; @@ -227,34 +270,142 @@ static inline void flush_icache_all(void) ccr |= CCR_CACHE_ICI; ctrl_outl(ccr, CCR); + /* + * back_to_P1() will take care of the barrier for us, don't add + * another one! + */ + back_to_P1(); local_irq_restore(flags); } +void flush_dcache_all(void) +{ + (*__flush_dcache_segment_fn)(0UL, cpu_data->dcache.way_size); + wmb(); +} + void flush_cache_all(void) { - if (cpu_data->dcache.ways == 1) - __flush_dcache_all(); - else - __flush_dcache_all_ex(); + flush_dcache_all(); flush_icache_all(); } +static void __flush_cache_mm(struct mm_struct *mm, unsigned long start, + unsigned long end) +{ + unsigned long d = 0, p = start & PAGE_MASK; + unsigned long alias_mask = cpu_data->dcache.alias_mask; + unsigned long n_aliases = cpu_data->dcache.n_aliases; + unsigned long select_bit; + unsigned long all_aliases_mask; + unsigned long addr_offset; + pgd_t *dir; + pmd_t *pmd; + pud_t *pud; + pte_t *pte; + int i; + + dir = pgd_offset(mm, p); + pud = pud_offset(dir, p); + pmd = pmd_offset(pud, p); + end = PAGE_ALIGN(end); + + all_aliases_mask = (1 << n_aliases) - 1; + + do { + if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) { + p &= PMD_MASK; + p += PMD_SIZE; + pmd++; + + continue; + } + + pte = pte_offset_kernel(pmd, p); + + do { + unsigned long phys; + pte_t entry = *pte; + + if (!(pte_val(entry) & _PAGE_PRESENT)) { + pte++; + p += PAGE_SIZE; + continue; + } + + phys = pte_val(entry) & PTE_PHYS_MASK; + + if ((p ^ phys) & alias_mask) { + d |= 1 << ((p & alias_mask) >> PAGE_SHIFT); + d |= 1 << ((phys & alias_mask) >> PAGE_SHIFT); + + if (d == all_aliases_mask) + goto loop_exit; + } + + pte++; + p += PAGE_SIZE; + } while (p < end && ((unsigned long)pte & ~PAGE_MASK)); + pmd++; + } while (p < end); + +loop_exit: + addr_offset = 0; + select_bit = 1; + + for (i = 0; i < n_aliases; i++) { + if (d & select_bit) { + (*__flush_dcache_segment_fn)(addr_offset, PAGE_SIZE); + wmb(); + } + + select_bit <<= 1; + addr_offset += PAGE_SIZE; + } +} + +/* + * Note : (RPC) since the caches are physically tagged, the only point + * of flush_cache_mm for SH-4 is to get rid of aliases from the + * D-cache. The assumption elsewhere, e.g. flush_cache_range, is that + * lines can stay resident so long as the virtual address they were + * accessed with (hence cache set) is in accord with the physical + * address (i.e. tag). It's no different here. So I reckon we don't + * need to flush the I-cache, since aliases don't matter for that. We + * should try that. + * + * Caller takes mm->mmap_sem. + */ void flush_cache_mm(struct mm_struct *mm) { - /* Is there any good way? */ - /* XXX: possibly call flush_cache_range for each vm area */ - /* - * FIXME: Really, the optimal solution here would be able to flush out - * individual lines created by the specified context, but this isn't - * feasible for a number of architectures (such as MIPS, and some - * SPARC) .. is this possible for SuperH? - * - * In the meantime, we'll just flush all of the caches.. this - * seems to be the simplest way to avoid at least a few wasted - * cache flushes. -Lethal + /* + * If cache is only 4k-per-way, there are never any 'aliases'. Since + * the cache is physically tagged, the data can just be left in there. */ - flush_cache_all(); + if (cpu_data->dcache.n_aliases == 0) + return; + + /* + * Don't bother groveling around the dcache for the VMA ranges + * if there are too many PTEs to make it worthwhile. + */ + if (mm->nr_ptes >= MAX_DCACHE_PAGES) + flush_dcache_all(); + else { + struct vm_area_struct *vma; + + /* + * In this case there are reasonably sized ranges to flush, + * iterate through the VMA list and take care of any aliases. + */ + for (vma = mm->mmap; vma; vma = vma->vm_next) + __flush_cache_mm(mm, vma->vm_start, vma->vm_end); + } + + /* Only touch the icache if one of the VMAs has VM_EXEC set. */ + if (mm->exec_vm) + flush_icache_all(); } /* @@ -263,27 +414,40 @@ void flush_cache_mm(struct mm_struct *mm) * ADDR: Virtual Address (U0 address) * PFN: Physical page number */ -void flush_cache_page(struct vm_area_struct *vma, unsigned long address, unsigned long pfn) +void flush_cache_page(struct vm_area_struct *vma, unsigned long address, + unsigned long pfn) { unsigned long phys = pfn << PAGE_SHIFT; + unsigned int alias_mask; + + alias_mask = cpu_data->dcache.alias_mask; /* We only need to flush D-cache when we have alias */ - if ((address^phys) & CACHE_ALIAS) { + if ((address^phys) & alias_mask) { /* Loop 4K of the D-cache */ flush_cache_4096( - CACHE_OC_ADDRESS_ARRAY | (address & CACHE_ALIAS), + CACHE_OC_ADDRESS_ARRAY | (address & alias_mask), phys); /* Loop another 4K of the D-cache */ flush_cache_4096( - CACHE_OC_ADDRESS_ARRAY | (phys & CACHE_ALIAS), + CACHE_OC_ADDRESS_ARRAY | (phys & alias_mask), phys); } - if (vma->vm_flags & VM_EXEC) - /* Loop 4K (half) of the I-cache */ + alias_mask = cpu_data->icache.alias_mask; + if (vma->vm_flags & VM_EXEC) { + /* + * Evict entries from the portion of the cache from which code + * may have been executed at this address (virtual). There's + * no need to evict from the portion corresponding to the + * physical address as for the D-cache, because we know the + * kernel has never executed the code through its identity + * translation. + */ flush_cache_4096( - CACHE_IC_ADDRESS_ARRAY | (address & 0x1000), + CACHE_IC_ADDRESS_ARRAY | (address & alias_mask), phys); + } } /* @@ -298,52 +462,31 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long address, unsigne void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { - unsigned long p = start & PAGE_MASK; - pgd_t *dir; - pmd_t *pmd; - pte_t *pte; - pte_t entry; - unsigned long phys; - unsigned long d = 0; - - dir = pgd_offset(vma->vm_mm, p); - pmd = pmd_offset(dir, p); + /* + * If cache is only 4k-per-way, there are never any 'aliases'. Since + * the cache is physically tagged, the data can just be left in there. + */ + if (cpu_data->dcache.n_aliases == 0) + return; - do { - if (pmd_none(*pmd) || pmd_bad(*pmd)) { - p &= ~((1 << PMD_SHIFT) -1); - p += (1 << PMD_SHIFT); - pmd++; - continue; - } - pte = pte_offset_kernel(pmd, p); - do { - entry = *pte; - if ((pte_val(entry) & _PAGE_PRESENT)) { - phys = pte_val(entry)&PTE_PHYS_MASK; - if ((p^phys) & CACHE_ALIAS) { - d |= 1 << ((p & CACHE_ALIAS)>>12); - d |= 1 << ((phys & CACHE_ALIAS)>>12); - if (d == 0x0f) - goto loop_exit; - } - } - pte++; - p += PAGE_SIZE; - } while (p < end && ((unsigned long)pte & ~PAGE_MASK)); - pmd++; - } while (p < end); - loop_exit: - if (d & 1) - flush_cache_4096_all(0); - if (d & 2) - flush_cache_4096_all(0x1000); - if (d & 4) - flush_cache_4096_all(0x2000); - if (d & 8) - flush_cache_4096_all(0x3000); - if (vma->vm_flags & VM_EXEC) + /* + * Don't bother with the lookup and alias check if we have a + * wide range to cover, just blow away the dcache in its + * entirety instead. -- PFM. + */ + if (((end - start) >> PAGE_SHIFT) >= MAX_DCACHE_PAGES) + flush_dcache_all(); + else + __flush_cache_mm(vma->vm_mm, start, end); + + if (vma->vm_flags & VM_EXEC) { + /* + * TODO: Is this required??? Need to look at how I-cache + * coherency is assured when new programs are loaded to see if + * this matters. + */ flush_icache_all(); + } } /* @@ -357,5 +500,273 @@ void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, unsigned long addr, int len) { flush_cache_page(vma, addr, page_to_pfn(page)); + mb(); +} + +/** + * __flush_cache_4096 + * + * @addr: address in memory mapped cache array + * @phys: P1 address to flush (has to match tags if addr has 'A' bit + * set i.e. associative write) + * @exec_offset: set to 0x20000000 if flush has to be executed from P2 + * region else 0x0 + * + * The offset into the cache array implied by 'addr' selects the + * 'colour' of the virtual address range that will be flushed. The + * operation (purge/write-back) is selected by the lower 2 bits of + * 'phys'. + */ +static void __flush_cache_4096(unsigned long addr, unsigned long phys, + unsigned long exec_offset) +{ + int way_count; + unsigned long base_addr = addr; + struct cache_info *dcache; + unsigned long way_incr; + unsigned long a, ea, p; + unsigned long temp_pc; + + dcache = &cpu_data->dcache; + /* Write this way for better assembly. */ + way_count = dcache->ways; + way_incr = dcache->way_incr; + + /* + * Apply exec_offset (i.e. branch to P2 if required.). + * + * FIXME: + * + * If I write "=r" for the (temp_pc), it puts this in r6 hence + * trashing exec_offset before it's been added on - why? Hence + * "=&r" as a 'workaround' + */ + asm volatile("mov.l 1f, %0\n\t" + "add %1, %0\n\t" + "jmp @%0\n\t" + "nop\n\t" + ".balign 4\n\t" + "1: .long 2f\n\t" + "2:\n" : "=&r" (temp_pc) : "r" (exec_offset)); + + /* + * We know there will be >=1 iteration, so write as do-while to avoid + * pointless nead-of-loop check for 0 iterations. + */ + do { + ea = base_addr + PAGE_SIZE; + a = base_addr; + p = phys; + + do { + *(volatile unsigned long *)a = p; + /* + * Next line: intentionally not p+32, saves an add, p + * will do since only the cache tag bits need to + * match. + */ + *(volatile unsigned long *)(a+32) = p; + a += 64; + p += 64; + } while (a < ea); + + base_addr += way_incr; + } while (--way_count != 0); } +/* + * Break the 1, 2 and 4 way variants of this out into separate functions to + * avoid nearly all the overhead of having the conditional stuff in the function + * bodies (+ the 1 and 2 way cases avoid saving any registers too). + */ +static void __flush_dcache_segment_1way(unsigned long start, + unsigned long extent_per_way) +{ + unsigned long orig_sr, sr_with_bl; + unsigned long base_addr; + unsigned long way_incr, linesz, way_size; + struct cache_info *dcache; + register unsigned long a0, a0e; + + asm volatile("stc sr, %0" : "=r" (orig_sr)); + sr_with_bl = orig_sr | (1<<28); + base_addr = ((unsigned long)&empty_zero_page[0]); + + /* + * The previous code aligned base_addr to 16k, i.e. the way_size of all + * existing SH-4 D-caches. Whilst I don't see a need to have this + * aligned to any better than the cache line size (which it will be + * anyway by construction), let's align it to at least the way_size of + * any existing or conceivable SH-4 D-cache. -- RPC + */ + base_addr = ((base_addr >> 16) << 16); + base_addr |= start; + + dcache = &cpu_data->dcache; + linesz = dcache->linesz; + way_incr = dcache->way_incr; + way_size = dcache->way_size; + + a0 = base_addr; + a0e = base_addr + extent_per_way; + do { + asm volatile("ldc %0, sr" : : "r" (sr_with_bl)); + asm volatile("movca.l r0, @%0\n\t" + "ocbi @%0" : : "r" (a0)); + a0 += linesz; + asm volatile("movca.l r0, @%0\n\t" + "ocbi @%0" : : "r" (a0)); + a0 += linesz; + asm volatile("movca.l r0, @%0\n\t" + "ocbi @%0" : : "r" (a0)); + a0 += linesz; + asm volatile("movca.l r0, @%0\n\t" + "ocbi @%0" : : "r" (a0)); + asm volatile("ldc %0, sr" : : "r" (orig_sr)); + a0 += linesz; + } while (a0 < a0e); +} + +static void __flush_dcache_segment_2way(unsigned long start, + unsigned long extent_per_way) +{ + unsigned long orig_sr, sr_with_bl; + unsigned long base_addr; + unsigned long way_incr, linesz, way_size; + struct cache_info *dcache; + register unsigned long a0, a1, a0e; + + asm volatile("stc sr, %0" : "=r" (orig_sr)); + sr_with_bl = orig_sr | (1<<28); + base_addr = ((unsigned long)&empty_zero_page[0]); + + /* See comment under 1-way above */ + base_addr = ((base_addr >> 16) << 16); + base_addr |= start; + + dcache = &cpu_data->dcache; + linesz = dcache->linesz; + way_incr = dcache->way_incr; + way_size = dcache->way_size; + + a0 = base_addr; + a1 = a0 + way_incr; + a0e = base_addr + extent_per_way; + do { + asm volatile("ldc %0, sr" : : "r" (sr_with_bl)); + asm volatile("movca.l r0, @%0\n\t" + "movca.l r0, @%1\n\t" + "ocbi @%0\n\t" + "ocbi @%1" : : + "r" (a0), "r" (a1)); + a0 += linesz; + a1 += linesz; + asm volatile("movca.l r0, @%0\n\t" + "movca.l r0, @%1\n\t" + "ocbi @%0\n\t" + "ocbi @%1" : : + "r" (a0), "r" (a1)); + a0 += linesz; + a1 += linesz; + asm volatile("movca.l r0, @%0\n\t" + "movca.l r0, @%1\n\t" + "ocbi @%0\n\t" + "ocbi @%1" : : + "r" (a0), "r" (a1)); + a0 += linesz; + a1 += linesz; + asm volatile("movca.l r0, @%0\n\t" + "movca.l r0, @%1\n\t" + "ocbi @%0\n\t" + "ocbi @%1" : : + "r" (a0), "r" (a1)); + asm volatile("ldc %0, sr" : : "r" (orig_sr)); + a0 += linesz; + a1 += linesz; + } while (a0 < a0e); +} + +static void __flush_dcache_segment_4way(unsigned long start, + unsigned long extent_per_way) +{ + unsigned long orig_sr, sr_with_bl; + unsigned long base_addr; + unsigned long way_incr, linesz, way_size; + struct cache_info *dcache; + register unsigned long a0, a1, a2, a3, a0e; + + asm volatile("stc sr, %0" : "=r" (orig_sr)); + sr_with_bl = orig_sr | (1<<28); + base_addr = ((unsigned long)&empty_zero_page[0]); + + /* See comment under 1-way above */ + base_addr = ((base_addr >> 16) << 16); + base_addr |= start; + + dcache = &cpu_data->dcache; + linesz = dcache->linesz; + way_incr = dcache->way_incr; + way_size = dcache->way_size; + + a0 = base_addr; + a1 = a0 + way_incr; + a2 = a1 + way_incr; + a3 = a2 + way_incr; + a0e = base_addr + extent_per_way; + do { + asm volatile("ldc %0, sr" : : "r" (sr_with_bl)); + asm volatile("movca.l r0, @%0\n\t" + "movca.l r0, @%1\n\t" + "movca.l r0, @%2\n\t" + "movca.l r0, @%3\n\t" + "ocbi @%0\n\t" + "ocbi @%1\n\t" + "ocbi @%2\n\t" + "ocbi @%3\n\t" : : + "r" (a0), "r" (a1), "r" (a2), "r" (a3)); + a0 += linesz; + a1 += linesz; + a2 += linesz; + a3 += linesz; + asm volatile("movca.l r0, @%0\n\t" + "movca.l r0, @%1\n\t" + "movca.l r0, @%2\n\t" + "movca.l r0, @%3\n\t" + "ocbi @%0\n\t" + "ocbi @%1\n\t" + "ocbi @%2\n\t" + "ocbi @%3\n\t" : : + "r" (a0), "r" (a1), "r" (a2), "r" (a3)); + a0 += linesz; + a1 += linesz; + a2 += linesz; + a3 += linesz; + asm volatile("movca.l r0, @%0\n\t" + "movca.l r0, @%1\n\t" + "movca.l r0, @%2\n\t" + "movca.l r0, @%3\n\t" + "ocbi @%0\n\t" + "ocbi @%1\n\t" + "ocbi @%2\n\t" + "ocbi @%3\n\t" : : + "r" (a0), "r" (a1), "r" (a2), "r" (a3)); + a0 += linesz; + a1 += linesz; + a2 += linesz; + a3 += linesz; + asm volatile("movca.l r0, @%0\n\t" + "movca.l r0, @%1\n\t" + "movca.l r0, @%2\n\t" + "movca.l r0, @%3\n\t" + "ocbi @%0\n\t" + "ocbi @%1\n\t" + "ocbi @%2\n\t" + "ocbi @%3\n\t" : : + "r" (a0), "r" (a1), "r" (a2), "r" (a3)); + asm volatile("ldc %0, sr" : : "r" (orig_sr)); + a0 += linesz; + a1 += linesz; + a2 += linesz; + a3 += linesz; + } while (a0 < a0e); +} diff --git a/arch/sh/mm/cache-sh7705.c b/arch/sh/mm/cache-sh7705.c index bf94eedb0a8..045abdf078f 100644 --- a/arch/sh/mm/cache-sh7705.c +++ b/arch/sh/mm/cache-sh7705.c @@ -9,7 +9,6 @@ * for more details. * */ - #include <linux/init.h> #include <linux/mman.h> #include <linux/mm.h> @@ -25,14 +24,10 @@ #include <asm/mmu_context.h> #include <asm/cacheflush.h> -/* The 32KB cache on the SH7705 suffers from the same synonym problem - * as SH4 CPUs */ - -#define __pte_offset(address) \ - ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) -#define pte_offset(dir, address) ((pte_t *) pmd_page_vaddr(*(dir)) + \ - __pte_offset(address)) - +/* + * The 32KB cache on the SH7705 suffers from the same synonym problem + * as SH4 CPUs + */ static inline void cache_wback_all(void) { unsigned long ways, waysize, addrstart; @@ -73,7 +68,6 @@ void flush_icache_range(unsigned long start, unsigned long end) __flush_wback_region((void *)start, end - start); } - /* * Writeback&Invalidate the D-cache of the page */ @@ -128,7 +122,6 @@ static void __flush_dcache_page(unsigned long phys) local_irq_restore(flags); } - /* * Write back & invalidate the D-cache of the page. * (To avoid "alias" issues) @@ -186,7 +179,8 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start, * * ADDRESS: Virtual Address (U0 address) */ -void flush_cache_page(struct vm_area_struct *vma, unsigned long address, unsigned long pfn) +void flush_cache_page(struct vm_area_struct *vma, unsigned long address, + unsigned long pfn) { __flush_dcache_page(pfn << PAGE_SHIFT); } @@ -203,4 +197,3 @@ void flush_icache_page(struct vm_area_struct *vma, struct page *page) { __flush_purge_region(page_address(page), PAGE_SIZE); } - diff --git a/arch/sh/mm/clear_page.S b/arch/sh/mm/clear_page.S index 08acead7b2a..7b96425ae27 100644 --- a/arch/sh/mm/clear_page.S +++ b/arch/sh/mm/clear_page.S @@ -193,102 +193,5 @@ ENTRY(__clear_user_page) nop .L4096: .word 4096 -ENTRY(__flush_cache_4096) - mov.l 1f,r3 - add r6,r3 - mov r4,r0 - mov #64,r2 - shll r2 - mov #64,r6 - jmp @r3 - mov #96,r7 - .align 2 -1: .long 2f -2: - .rept 32 - mov.l r5,@r0 - mov.l r5,@(32,r0) - mov.l r5,@(r0,r6) - mov.l r5,@(r0,r7) - add r2,r5 - add r2,r0 - .endr - nop - nop - nop - nop - nop - nop - nop - rts - nop - -ENTRY(__flush_dcache_all) - mov.l 2f,r0 - mov.l 3f,r4 - and r0,r4 ! r4 = (unsigned long)&empty_zero_page[0] & ~0xffffc000 - stc sr,r1 ! save SR - mov.l 4f,r2 - or r1,r2 - mov #32,r3 - shll2 r3 -1: - ldc r2,sr ! set BL bit - movca.l r0,@r4 - ocbi @r4 - add #32,r4 - movca.l r0,@r4 - ocbi @r4 - add #32,r4 - movca.l r0,@r4 - ocbi @r4 - add #32,r4 - movca.l r0,@r4 - ocbi @r4 - ldc r1,sr ! restore SR - dt r3 - bf/s 1b - add #32,r4 - - rts - nop - .align 2 -2: .long 0xffffc000 -3: .long empty_zero_page -4: .long 0x10000000 ! BL bit - -/* __flush_cache_4096_all(unsigned long addr) */ -ENTRY(__flush_cache_4096_all) - mov.l 2f,r0 - mov.l 3f,r2 - and r0,r2 - or r2,r4 ! r4 = addr | (unsigned long)&empty_zero_page[0] & ~0x3fff - stc sr,r1 ! save SR - mov.l 4f,r2 - or r1,r2 - mov #32,r3 -1: - ldc r2,sr ! set BL bit - movca.l r0,@r4 - ocbi @r4 - add #32,r4 - movca.l r0,@r4 - ocbi @r4 - add #32,r4 - movca.l r0,@r4 - ocbi @r4 - add #32,r4 - movca.l r0,@r4 - ocbi @r4 - ldc r1,sr ! restore SR - dt r3 - bf/s 1b - add #32,r4 - - rts - nop - .align 2 -2: .long 0xffffc000 -3: .long empty_zero_page -4: .long 0x10000000 ! BL bit #endif + diff --git a/arch/sh/mm/consistent.c b/arch/sh/mm/consistent.c index ee73e30263a..c81e6b67ad3 100644 --- a/arch/sh/mm/consistent.c +++ b/arch/sh/mm/consistent.c @@ -9,6 +9,8 @@ */ #include <linux/mm.h> #include <linux/dma-mapping.h> +#include <asm/cacheflush.h> +#include <asm/addrspace.h> #include <asm/io.h> void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *handle) diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c index 775f86cd3fe..c69fd603226 100644 --- a/arch/sh/mm/fault.c +++ b/arch/sh/mm/fault.c @@ -1,33 +1,22 @@ -/* $Id: fault.c,v 1.14 2004/01/13 05:52:11 kkojima Exp $ +/* + * Page fault handler for SH with an MMU. * - * linux/arch/sh/mm/fault.c * Copyright (C) 1999 Niibe Yutaka * Copyright (C) 2003 Paul Mundt * * Based on linux/arch/i386/mm/fault.c: * Copyright (C) 1995 Linus Torvalds + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. */ - -#include <linux/signal.h> -#include <linux/sched.h> #include <linux/kernel.h> -#include <linux/errno.h> -#include <linux/string.h> -#include <linux/types.h> -#include <linux/ptrace.h> -#include <linux/mman.h> #include <linux/mm.h> -#include <linux/smp.h> -#include <linux/smp_lock.h> -#include <linux/interrupt.h> -#include <linux/module.h> - +#include <linux/hardirq.h> +#include <linux/kprobes.h> #include <asm/system.h> -#include <asm/io.h> -#include <asm/uaccess.h> -#include <asm/pgalloc.h> #include <asm/mmu_context.h> -#include <asm/cacheflush.h> #include <asm/kgdb.h> extern void die(const char *,struct pt_regs *,long); @@ -187,18 +176,30 @@ do_sigbus: goto no_context; } +#ifdef CONFIG_SH_STORE_QUEUES /* - * Called with interrupt disabled. + * This is a special case for the SH-4 store queues, as pages for this + * space still need to be faulted in before it's possible to flush the + * store queue cache for writeout to the remapped region. */ -asmlinkage int __do_page_fault(struct pt_regs *regs, unsigned long writeaccess, - unsigned long address) +#define P3_ADDR_MAX (P4SEG_STORE_QUE + 0x04000000) +#else +#define P3_ADDR_MAX P4SEG +#endif + +/* + * Called with interrupts disabled. + */ +asmlinkage int __kprobes __do_page_fault(struct pt_regs *regs, + unsigned long writeaccess, + unsigned long address) { - unsigned long addrmax = P4SEG; pgd_t *pgd; + pud_t *pud; pmd_t *pmd; pte_t *pte; pte_t entry; - struct mm_struct *mm; + struct mm_struct *mm = current->mm; spinlock_t *ptl; int ret = 1; @@ -207,31 +208,37 @@ asmlinkage int __do_page_fault(struct pt_regs *regs, unsigned long writeaccess, kgdb_bus_err_hook(); #endif -#ifdef CONFIG_SH_STORE_QUEUES - addrmax = P4SEG_STORE_QUE + 0x04000000; -#endif - - if (address >= P3SEG && address < addrmax) { + /* + * We don't take page faults for P1, P2, and parts of P4, these + * are always mapped, whether it be due to legacy behaviour in + * 29-bit mode, or due to PMB configuration in 32-bit mode. + */ + if (address >= P3SEG && address < P3_ADDR_MAX) { pgd = pgd_offset_k(address); mm = NULL; - } else if (address >= TASK_SIZE) - return 1; - else if (!(mm = current->mm)) - return 1; - else + } else { + if (unlikely(address >= TASK_SIZE || !mm)) + return 1; + pgd = pgd_offset(mm, address); + } - pmd = pmd_offset(pgd, address); + pud = pud_offset(pgd, address); + if (pud_none_or_clear_bad(pud)) + return 1; + pmd = pmd_offset(pud, address); if (pmd_none_or_clear_bad(pmd)) return 1; + if (mm) pte = pte_offset_map_lock(mm, pmd, address, &ptl); else pte = pte_offset_kernel(pmd, address); entry = *pte; - if (pte_none(entry) || pte_not_present(entry) - || (writeaccess && !pte_write(entry))) + if (unlikely(pte_none(entry) || pte_not_present(entry))) + goto unlock; + if (unlikely(writeaccess && !pte_write(entry))) goto unlock; if (writeaccess) @@ -243,13 +250,7 @@ asmlinkage int __do_page_fault(struct pt_regs *regs, unsigned long writeaccess, * ITLB is not affected by "ldtlb" instruction. * So, we need to flush the entry by ourselves. */ - - { - unsigned long flags; - local_irq_save(flags); - __flush_tlb_page(get_asid(), address&PAGE_MASK); - local_irq_restore(flags); - } + __flush_tlb_page(get_asid(), address & PAGE_MASK); #endif set_pte(pte, entry); @@ -260,121 +261,3 @@ unlock: pte_unmap_unlock(pte, ptl); return ret; } - -void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) -{ - if (vma->vm_mm && vma->vm_mm->context != NO_CONTEXT) { - unsigned long flags; - unsigned long asid; - unsigned long saved_asid = MMU_NO_ASID; - - asid = vma->vm_mm->context & MMU_CONTEXT_ASID_MASK; - page &= PAGE_MASK; - - local_irq_save(flags); - if (vma->vm_mm != current->mm) { - saved_asid = get_asid(); - set_asid(asid); - } - __flush_tlb_page(asid, page); - if (saved_asid != MMU_NO_ASID) - set_asid(saved_asid); - local_irq_restore(flags); - } -} - -void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, - unsigned long end) -{ - struct mm_struct *mm = vma->vm_mm; - - if (mm->context != NO_CONTEXT) { - unsigned long flags; - int size; - - local_irq_save(flags); - size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; - if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */ - mm->context = NO_CONTEXT; - if (mm == current->mm) - activate_context(mm); - } else { - unsigned long asid = mm->context&MMU_CONTEXT_ASID_MASK; - unsigned long saved_asid = MMU_NO_ASID; - - start &= PAGE_MASK; - end += (PAGE_SIZE - 1); - end &= PAGE_MASK; - if (mm != current->mm) { - saved_asid = get_asid(); - set_asid(asid); - } - while (start < end) { - __flush_tlb_page(asid, start); - start += PAGE_SIZE; - } - if (saved_asid != MMU_NO_ASID) - set_asid(saved_asid); - } - local_irq_restore(flags); - } -} - -void flush_tlb_kernel_range(unsigned long start, unsigned long end) -{ - unsigned long flags; - int size; - - local_irq_save(flags); - size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; - if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */ - flush_tlb_all(); - } else { - unsigned long asid = init_mm.context&MMU_CONTEXT_ASID_MASK; - unsigned long saved_asid = get_asid(); - - start &= PAGE_MASK; - end += (PAGE_SIZE - 1); - end &= PAGE_MASK; - set_asid(asid); - while (start < end) { - __flush_tlb_page(asid, start); - start += PAGE_SIZE; - } - set_asid(saved_asid); - } - local_irq_restore(flags); -} - -void flush_tlb_mm(struct mm_struct *mm) -{ - /* Invalidate all TLB of this process. */ - /* Instead of invalidating each TLB, we get new MMU context. */ - if (mm->context != NO_CONTEXT) { - unsigned long flags; - - local_irq_save(flags); - mm->context = NO_CONTEXT; - if (mm == current->mm) - activate_context(mm); - local_irq_restore(flags); - } -} - -void flush_tlb_all(void) -{ - unsigned long flags, status; - - /* - * Flush all the TLB. - * - * Write to the MMU control register's bit: - * TF-bit for SH-3, TI-bit for SH-4. - * It's same position, bit #2. - */ - local_irq_save(flags); - status = ctrl_inl(MMUCR); - status |= 0x04; - ctrl_outl(status, MMUCR); - local_irq_restore(flags); -} diff --git a/arch/sh/mm/hugetlbpage.c b/arch/sh/mm/hugetlbpage.c index 2a85bc15a41..329059d6b54 100644 --- a/arch/sh/mm/hugetlbpage.c +++ b/arch/sh/mm/hugetlbpage.c @@ -26,61 +26,41 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr) { pgd_t *pgd; + pud_t *pud; pmd_t *pmd; pte_t *pte = NULL; pgd = pgd_offset(mm, addr); if (pgd) { - pmd = pmd_alloc(mm, pgd, addr); - if (pmd) - pte = pte_alloc_map(mm, pmd, addr); + pud = pud_alloc(mm, pgd, addr); + if (pud) { + pmd = pmd_alloc(mm, pud, addr); + if (pmd) + pte = pte_alloc_map(mm, pmd, addr); + } } + return pte; } pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) { pgd_t *pgd; + pud_t *pud; pmd_t *pmd; pte_t *pte = NULL; pgd = pgd_offset(mm, addr); if (pgd) { - pmd = pmd_offset(pgd, addr); - if (pmd) - pte = pte_offset_map(pmd, addr); - } - return pte; -} - -void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, - pte_t *ptep, pte_t entry) -{ - int i; - - for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) { - set_pte_at(mm, addr, ptep, entry); - ptep++; - addr += PAGE_SIZE; - pte_val(entry) += PAGE_SIZE; + pud = pud_offset(pgd, addr); + if (pud) { + pmd = pmd_offset(pud, addr); + if (pmd) + pte = pte_offset_map(pmd, addr); + } } -} - -pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, - pte_t *ptep) -{ - pte_t entry; - int i; - - entry = *ptep; - for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) { - pte_clear(mm, addr, ptep); - addr += PAGE_SIZE; - ptep++; - } - - return entry; + return pte; } struct page *follow_huge_addr(struct mm_struct *mm, diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index 8ea27ca4b70..7154d1ce978 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c @@ -24,7 +24,7 @@ #include <linux/highmem.h> #include <linux/bootmem.h> #include <linux/pagemap.h> - +#include <linux/proc_fs.h> #include <asm/processor.h> #include <asm/system.h> #include <asm/uaccess.h> @@ -80,6 +80,7 @@ void show_mem(void) static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot) { pgd_t *pgd; + pud_t *pud; pmd_t *pmd; pte_t *pte; @@ -89,7 +90,17 @@ static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot) return; } - pmd = pmd_offset(pgd, addr); + pud = pud_offset(pgd, addr); + if (pud_none(*pud)) { + pmd = (pmd_t *)get_zeroed_page(GFP_ATOMIC); + set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER)); + if (pmd != pmd_offset(pud, 0)) { + pud_ERROR(*pud); + return; + } + } + + pmd = pmd_offset(pud, addr); if (pmd_none(*pmd)) { pte = (pte_t *)get_zeroed_page(GFP_ATOMIC); set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER)); @@ -212,6 +223,8 @@ void __init paging_init(void) free_area_init_node(0, NODE_DATA(0), zones_size, __MEMORY_START >> PAGE_SHIFT, 0); } +static struct kcore_list kcore_mem, kcore_vmalloc; + void __init mem_init(void) { extern unsigned long empty_zero_page[1024]; @@ -237,8 +250,13 @@ void __init mem_init(void) * Setup wrappers for copy/clear_page(), these will get overridden * later in the boot process if a better method is available. */ +#ifdef CONFIG_MMU copy_page = copy_page_slow; clear_page = clear_page_slow; +#else + copy_page = copy_page_nommu; + clear_page = clear_page_nommu; +#endif /* this will put all low memory onto the freelists */ totalram_pages += free_all_bootmem_node(NODE_DATA(0)); @@ -254,7 +272,12 @@ void __init mem_init(void) datasize = (unsigned long) &_edata - (unsigned long) &_etext; initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; - printk("Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init)\n", + kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT); + kclist_add(&kcore_vmalloc, (void *)VMALLOC_START, + VMALLOC_END - VMALLOC_START); + + printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, " + "%dk reserved, %dk data, %dk init)\n", (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), max_mapnr << (PAGE_SHIFT-10), codesize >> 10, @@ -263,6 +286,9 @@ void __init mem_init(void) initsize >> 10); p3_cache_init(); + + /* Initialize the vDSO */ + vsyscall_init(); } void free_initmem(void) diff --git a/arch/sh/mm/ioremap.c b/arch/sh/mm/ioremap.c index 96fa4a999e2..a9fe80cfc23 100644 --- a/arch/sh/mm/ioremap.c +++ b/arch/sh/mm/ioremap.c @@ -15,6 +15,7 @@ #include <linux/vmalloc.h> #include <linux/module.h> #include <linux/mm.h> +#include <linux/pci.h> #include <asm/io.h> #include <asm/page.h> #include <asm/pgalloc.h> @@ -135,6 +136,20 @@ void __iomem *__ioremap(unsigned long phys_addr, unsigned long size, return (void __iomem *)phys_to_virt(phys_addr); /* + * If we're on an SH7751 or SH7780 PCI controller, PCI memory is + * mapped at the end of the address space (typically 0xfd000000) + * in a non-translatable area, so mapping through page tables for + * this area is not only pointless, but also fundamentally + * broken. Just return the physical address instead. + * + * For boards that map a small PCI memory aperture somewhere in + * P1/P2 space, ioremap() will already do the right thing, + * and we'll never get this far. + */ + if (is_pci_memaddr(phys_addr) && is_pci_memaddr(last_addr)) + return (void __iomem *)phys_addr; + + /* * Don't allow anybody to remap normal RAM that we're using.. */ if (phys_addr < virt_to_phys(high_memory)) @@ -192,7 +207,7 @@ void __iounmap(void __iomem *addr) unsigned long vaddr = (unsigned long __force)addr; struct vm_struct *p; - if (PXSEG(vaddr) < P3SEG) + if (PXSEG(vaddr) < P3SEG || is_pci_memaddr(vaddr)) return; #ifdef CONFIG_32BIT diff --git a/arch/sh/mm/pg-nommu.c b/arch/sh/mm/pg-nommu.c index 8f9165a4e33..d15221beaa1 100644 --- a/arch/sh/mm/pg-nommu.c +++ b/arch/sh/mm/pg-nommu.c @@ -14,23 +14,24 @@ #include <linux/string.h> #include <asm/page.h> -static void copy_page_nommu(void *to, void *from) +void copy_page_nommu(void *to, void *from) { memcpy(to, from, PAGE_SIZE); } -static void clear_page_nommu(void *to) +void clear_page_nommu(void *to) { memset(to, 0, PAGE_SIZE); } -static int __init pg_nommu_init(void) +__kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n) { - copy_page = copy_page_nommu; - clear_page = clear_page_nommu; - + memcpy(to, from, n); return 0; } -subsys_initcall(pg_nommu_init); - +__kernel_size_t __clear_user(void *to, __kernel_size_t n) +{ + memset(to, 0, n); + return 0; +} diff --git a/arch/sh/mm/pg-sh4.c b/arch/sh/mm/pg-sh4.c index c776b60fc25..07371ed7a31 100644 --- a/arch/sh/mm/pg-sh4.c +++ b/arch/sh/mm/pg-sh4.c @@ -2,7 +2,7 @@ * arch/sh/mm/pg-sh4.c * * Copyright (C) 1999, 2000, 2002 Niibe Yutaka - * Copyright (C) 2002 Paul Mundt + * Copyright (C) 2002 - 2005 Paul Mundt * * Released under the terms of the GNU GPL v2.0. */ @@ -23,6 +23,8 @@ extern struct semaphore p3map_sem[]; +#define CACHE_ALIAS (cpu_data->dcache.alias_mask) + /* * clear_user_page * @to: P1 address @@ -35,14 +37,15 @@ void clear_user_page(void *to, unsigned long address, struct page *page) if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) clear_page(to); else { - pgprot_t pgprot = __pgprot(_PAGE_PRESENT | + pgprot_t pgprot = __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_CACHABLE | - _PAGE_DIRTY | _PAGE_ACCESSED | + _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_HW_SHARED | _PAGE_FLAGS_HARD); unsigned long phys_addr = PHYSADDR(to); unsigned long p3_addr = P3SEG + (address & CACHE_ALIAS); - pgd_t *dir = pgd_offset_k(p3_addr); - pmd_t *pmd = pmd_offset(dir, p3_addr); + pgd_t *pgd = pgd_offset_k(p3_addr); + pud_t *pud = pud_offset(pgd, p3_addr); + pmd_t *pmd = pmd_offset(pud, p3_addr); pte_t *pte = pte_offset_kernel(pmd, p3_addr); pte_t entry; unsigned long flags; @@ -67,21 +70,22 @@ void clear_user_page(void *to, unsigned long address, struct page *page) * @address: U0 address to be mapped * @page: page (virt_to_page(to)) */ -void copy_user_page(void *to, void *from, unsigned long address, +void copy_user_page(void *to, void *from, unsigned long address, struct page *page) { __set_bit(PG_mapped, &page->flags); if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) copy_page(to, from); else { - pgprot_t pgprot = __pgprot(_PAGE_PRESENT | + pgprot_t pgprot = __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_CACHABLE | - _PAGE_DIRTY | _PAGE_ACCESSED | + _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_HW_SHARED | _PAGE_FLAGS_HARD); unsigned long phys_addr = PHYSADDR(to); unsigned long p3_addr = P3SEG + (address & CACHE_ALIAS); - pgd_t *dir = pgd_offset_k(p3_addr); - pmd_t *pmd = pmd_offset(dir, p3_addr); + pgd_t *pgd = pgd_offset_k(p3_addr); + pud_t *pud = pud_offset(pgd, p3_addr); + pmd_t *pmd = pmd_offset(pud, p3_addr); pte_t *pte = pte_offset_kernel(pmd, p3_addr); pte_t entry; unsigned long flags; diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c new file mode 100644 index 00000000000..92e745341e4 --- /dev/null +++ b/arch/sh/mm/pmb.c @@ -0,0 +1,400 @@ +/* + * arch/sh/mm/pmb.c + * + * Privileged Space Mapping Buffer (PMB) Support. + * + * Copyright (C) 2005, 2006 Paul Mundt + * + * P1/P2 Section mapping definitions from map32.h, which was: + * + * Copyright 2003 (c) Lineo Solutions,Inc. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/bitops.h> +#include <linux/debugfs.h> +#include <linux/fs.h> +#include <linux/seq_file.h> +#include <linux/err.h> +#include <asm/system.h> +#include <asm/uaccess.h> +#include <asm/pgtable.h> +#include <asm/mmu.h> +#include <asm/io.h> + +#define NR_PMB_ENTRIES 16 + +static kmem_cache_t *pmb_cache; +static unsigned long pmb_map; + +static struct pmb_entry pmb_init_map[] = { + /* vpn ppn flags (ub/sz/c/wt) */ + + /* P1 Section Mappings */ + { 0x80000000, 0x00000000, PMB_SZ_64M | PMB_C, }, + { 0x84000000, 0x04000000, PMB_SZ_64M | PMB_C, }, + { 0x88000000, 0x08000000, PMB_SZ_128M | PMB_C, }, + { 0x90000000, 0x10000000, PMB_SZ_64M | PMB_C, }, + { 0x94000000, 0x14000000, PMB_SZ_64M | PMB_C, }, + { 0x98000000, 0x18000000, PMB_SZ_64M | PMB_C, }, + + /* P2 Section Mappings */ + { 0xa0000000, 0x00000000, PMB_UB | PMB_SZ_64M | PMB_WT, }, + { 0xa4000000, 0x04000000, PMB_UB | PMB_SZ_64M | PMB_WT, }, + { 0xa8000000, 0x08000000, PMB_UB | PMB_SZ_128M | PMB_WT, }, + { 0xb0000000, 0x10000000, PMB_UB | PMB_SZ_64M | PMB_WT, }, + { 0xb4000000, 0x14000000, PMB_UB | PMB_SZ_64M | PMB_WT, }, + { 0xb8000000, 0x18000000, PMB_UB | PMB_SZ_64M | PMB_WT, }, +}; + +static inline unsigned long mk_pmb_entry(unsigned int entry) +{ + return (entry & PMB_E_MASK) << PMB_E_SHIFT; +} + +static inline unsigned long mk_pmb_addr(unsigned int entry) +{ + return mk_pmb_entry(entry) | PMB_ADDR; +} + +static inline unsigned long mk_pmb_data(unsigned int entry) +{ + return mk_pmb_entry(entry) | PMB_DATA; +} + +struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn, + unsigned long flags) +{ + struct pmb_entry *pmbe; + + pmbe = kmem_cache_alloc(pmb_cache, GFP_KERNEL); + if (!pmbe) + return ERR_PTR(-ENOMEM); + + pmbe->vpn = vpn; + pmbe->ppn = ppn; + pmbe->flags = flags; + + return pmbe; +} + +void pmb_free(struct pmb_entry *pmbe) +{ + kmem_cache_free(pmb_cache, pmbe); +} + +/* + * Must be in P2 for __set_pmb_entry() + */ +int __set_pmb_entry(unsigned long vpn, unsigned long ppn, + unsigned long flags, int *entry) +{ + unsigned int pos = *entry; + + if (unlikely(pos == PMB_NO_ENTRY)) + pos = find_first_zero_bit(&pmb_map, NR_PMB_ENTRIES); + +repeat: + if (unlikely(pos > NR_PMB_ENTRIES)) + return -ENOSPC; + + if (test_and_set_bit(pos, &pmb_map)) { + pos = find_first_zero_bit(&pmb_map, NR_PMB_ENTRIES); + goto repeat; + } + + ctrl_outl(vpn | PMB_V, mk_pmb_addr(pos)); + +#ifdef CONFIG_SH_WRITETHROUGH + /* + * When we are in 32-bit address extended mode, CCR.CB becomes + * invalid, so care must be taken to manually adjust cacheable + * translations. + */ + if (likely(flags & PMB_C)) + flags |= PMB_WT; +#endif + + ctrl_outl(ppn | flags | PMB_V, mk_pmb_data(pos)); + + *entry = pos; + + return 0; +} + +int set_pmb_entry(struct pmb_entry *pmbe) +{ + int ret; + + jump_to_P2(); + ret = __set_pmb_entry(pmbe->vpn, pmbe->ppn, pmbe->flags, &pmbe->entry); + back_to_P1(); + + return ret; +} + +void clear_pmb_entry(struct pmb_entry *pmbe) +{ + unsigned int entry = pmbe->entry; + unsigned long addr; + + /* + * Don't allow clearing of wired init entries, P1 or P2 access + * without a corresponding mapping in the PMB will lead to reset + * by the TLB. + */ + if (unlikely(entry < ARRAY_SIZE(pmb_init_map) || + entry >= NR_PMB_ENTRIES)) + return; + + jump_to_P2(); + + /* Clear V-bit */ + addr = mk_pmb_addr(entry); + ctrl_outl(ctrl_inl(addr) & ~PMB_V, addr); + + addr = mk_pmb_data(entry); + ctrl_outl(ctrl_inl(addr) & ~PMB_V, addr); + + back_to_P1(); + + clear_bit(entry, &pmb_map); +} + +static DEFINE_SPINLOCK(pmb_list_lock); +static struct pmb_entry *pmb_list; + +static inline void pmb_list_add(struct pmb_entry *pmbe) +{ + struct pmb_entry **p, *tmp; + + p = &pmb_list; + while ((tmp = *p) != NULL) + p = &tmp->next; + + pmbe->next = tmp; + *p = pmbe; +} + +static inline void pmb_list_del(struct pmb_entry *pmbe) +{ + struct pmb_entry **p, *tmp; + + for (p = &pmb_list; (tmp = *p); p = &tmp->next) + if (tmp == pmbe) { + *p = tmp->next; + return; + } +} + +static struct { + unsigned long size; + int flag; +} pmb_sizes[] = { + { .size = 0x20000000, .flag = PMB_SZ_512M, }, + { .size = 0x08000000, .flag = PMB_SZ_128M, }, + { .size = 0x04000000, .flag = PMB_SZ_64M, }, + { .size = 0x01000000, .flag = PMB_SZ_16M, }, +}; + +long pmb_remap(unsigned long vaddr, unsigned long phys, + unsigned long size, unsigned long flags) +{ + struct pmb_entry *pmbp; + unsigned long wanted; + int pmb_flags, i; + + /* Convert typical pgprot value to the PMB equivalent */ + if (flags & _PAGE_CACHABLE) { + if (flags & _PAGE_WT) + pmb_flags = PMB_WT; + else + pmb_flags = PMB_C; + } else + pmb_flags = PMB_WT | PMB_UB; + + pmbp = NULL; + wanted = size; + +again: + for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) { + struct pmb_entry *pmbe; + int ret; + + if (size < pmb_sizes[i].size) + continue; + + pmbe = pmb_alloc(vaddr, phys, pmb_flags | pmb_sizes[i].flag); + if (IS_ERR(pmbe)) + return PTR_ERR(pmbe); + + ret = set_pmb_entry(pmbe); + if (ret != 0) { + pmb_free(pmbe); + return -EBUSY; + } + + phys += pmb_sizes[i].size; + vaddr += pmb_sizes[i].size; + size -= pmb_sizes[i].size; + + /* + * Link adjacent entries that span multiple PMB entries + * for easier tear-down. + */ + if (likely(pmbp)) + pmbp->link = pmbe; + + pmbp = pmbe; + } + + if (size >= 0x1000000) + goto again; + + return wanted - size; +} + +void pmb_unmap(unsigned long addr) +{ + struct pmb_entry **p, *pmbe; + + for (p = &pmb_list; (pmbe = *p); p = &pmbe->next) + if (pmbe->vpn == addr) + break; + + if (unlikely(!pmbe)) + return; + + WARN_ON(!test_bit(pmbe->entry, &pmb_map)); + + do { + struct pmb_entry *pmblink = pmbe; + + clear_pmb_entry(pmbe); + pmbe = pmblink->link; + + pmb_free(pmblink); + } while (pmbe); +} + +static void pmb_cache_ctor(void *pmb, kmem_cache_t *cachep, unsigned long flags) +{ + struct pmb_entry *pmbe = pmb; + + memset(pmb, 0, sizeof(struct pmb_entry)); + + spin_lock_irq(&pmb_list_lock); + + pmbe->entry = PMB_NO_ENTRY; + pmb_list_add(pmbe); + + spin_unlock_irq(&pmb_list_lock); +} + +static void pmb_cache_dtor(void *pmb, kmem_cache_t *cachep, unsigned long flags) +{ + spin_lock_irq(&pmb_list_lock); + pmb_list_del(pmb); + spin_unlock_irq(&pmb_list_lock); +} + +static int __init pmb_init(void) +{ + unsigned int nr_entries = ARRAY_SIZE(pmb_init_map); + unsigned int entry; + + BUG_ON(unlikely(nr_entries >= NR_PMB_ENTRIES)); + + pmb_cache = kmem_cache_create("pmb", sizeof(struct pmb_entry), + 0, 0, pmb_cache_ctor, pmb_cache_dtor); + BUG_ON(!pmb_cache); + + jump_to_P2(); + + /* + * Ordering is important, P2 must be mapped in the PMB before we + * can set PMB.SE, and P1 must be mapped before we jump back to + * P1 space. + */ + for (entry = 0; entry < nr_entries; entry++) { + struct pmb_entry *pmbe = pmb_init_map + entry; + + __set_pmb_entry(pmbe->vpn, pmbe->ppn, pmbe->flags, &entry); + } + + ctrl_outl(0, PMB_IRMCR); + + /* PMB.SE and UB[7] */ + ctrl_outl((1 << 31) | (1 << 7), PMB_PASCR); + + back_to_P1(); + + return 0; +} +arch_initcall(pmb_init); + +static int pmb_seq_show(struct seq_file *file, void *iter) +{ + int i; + + seq_printf(file, "V: Valid, C: Cacheable, WT: Write-Through\n" + "CB: Copy-Back, B: Buffered, UB: Unbuffered\n"); + seq_printf(file, "ety vpn ppn size flags\n"); + + for (i = 0; i < NR_PMB_ENTRIES; i++) { + unsigned long addr, data; + unsigned int size; + char *sz_str = NULL; + + addr = ctrl_inl(mk_pmb_addr(i)); + data = ctrl_inl(mk_pmb_data(i)); + + size = data & PMB_SZ_MASK; + sz_str = (size == PMB_SZ_16M) ? " 16MB": + (size == PMB_SZ_64M) ? " 64MB": + (size == PMB_SZ_128M) ? "128MB": + "512MB"; + + /* 02: V 0x88 0x08 128MB C CB B */ + seq_printf(file, "%02d: %c 0x%02lx 0x%02lx %s %c %s %s\n", + i, ((addr & PMB_V) && (data & PMB_V)) ? 'V' : ' ', + (addr >> 24) & 0xff, (data >> 24) & 0xff, + sz_str, (data & PMB_C) ? 'C' : ' ', + (data & PMB_WT) ? "WT" : "CB", + (data & PMB_UB) ? "UB" : " B"); + } + + return 0; +} + +static int pmb_debugfs_open(struct inode *inode, struct file *file) +{ + return single_open(file, pmb_seq_show, NULL); +} + +static struct file_operations pmb_debugfs_fops = { + .owner = THIS_MODULE, + .open = pmb_debugfs_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + +static int __init pmb_debugfs_init(void) +{ + struct dentry *dentry; + + dentry = debugfs_create_file("pmb", S_IFREG | S_IRUGO, + NULL, NULL, &pmb_debugfs_fops); + if (IS_ERR(dentry)) + return PTR_ERR(dentry); + + return 0; +} +postcore_initcall(pmb_debugfs_init); diff --git a/arch/sh/mm/tlb-flush.c b/arch/sh/mm/tlb-flush.c new file mode 100644 index 00000000000..73ec7f6084f --- /dev/null +++ b/arch/sh/mm/tlb-flush.c @@ -0,0 +1,134 @@ +/* + * TLB flushing operations for SH with an MMU. + * + * Copyright (C) 1999 Niibe Yutaka + * Copyright (C) 2003 Paul Mundt + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include <linux/mm.h> +#include <asm/mmu_context.h> +#include <asm/tlbflush.h> + +void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) +{ + if (vma->vm_mm && vma->vm_mm->context.id != NO_CONTEXT) { + unsigned long flags; + unsigned long asid; + unsigned long saved_asid = MMU_NO_ASID; + + asid = vma->vm_mm->context.id & MMU_CONTEXT_ASID_MASK; + page &= PAGE_MASK; + + local_irq_save(flags); + if (vma->vm_mm != current->mm) { + saved_asid = get_asid(); + set_asid(asid); + } + __flush_tlb_page(asid, page); + if (saved_asid != MMU_NO_ASID) + set_asid(saved_asid); + local_irq_restore(flags); + } +} + +void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, + unsigned long end) +{ + struct mm_struct *mm = vma->vm_mm; + + if (mm->context.id != NO_CONTEXT) { + unsigned long flags; + int size; + + local_irq_save(flags); + size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; + if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */ + mm->context.id = NO_CONTEXT; + if (mm == current->mm) + activate_context(mm); + } else { + unsigned long asid; + unsigned long saved_asid = MMU_NO_ASID; + + asid = mm->context.id & MMU_CONTEXT_ASID_MASK; + start &= PAGE_MASK; + end += (PAGE_SIZE - 1); + end &= PAGE_MASK; + if (mm != current->mm) { + saved_asid = get_asid(); + set_asid(asid); + } + while (start < end) { + __flush_tlb_page(asid, start); + start += PAGE_SIZE; + } + if (saved_asid != MMU_NO_ASID) + set_asid(saved_asid); + } + local_irq_restore(flags); + } +} + +void flush_tlb_kernel_range(unsigned long start, unsigned long end) +{ + unsigned long flags; + int size; + + local_irq_save(flags); + size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; + if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */ + flush_tlb_all(); + } else { + unsigned long asid; + unsigned long saved_asid = get_asid(); + + asid = init_mm.context.id & MMU_CONTEXT_ASID_MASK; + start &= PAGE_MASK; + end += (PAGE_SIZE - 1); + end &= PAGE_MASK; + set_asid(asid); + while (start < end) { + __flush_tlb_page(asid, start); + start += PAGE_SIZE; + } + set_asid(saved_asid); + } + local_irq_restore(flags); +} + +void flush_tlb_mm(struct mm_struct *mm) +{ + /* Invalidate all TLB of this process. */ + /* Instead of invalidating each TLB, we get new MMU context. */ + if (mm->context.id != NO_CONTEXT) { + unsigned long flags; + + local_irq_save(flags); + mm->context.id = NO_CONTEXT; + if (mm == current->mm) + activate_context(mm); + local_irq_restore(flags); + } +} + +void flush_tlb_all(void) +{ + unsigned long flags, status; + + /* + * Flush all the TLB. + * + * Write to the MMU control register's bit: + * TF-bit for SH-3, TI-bit for SH-4. + * It's same position, bit #2. + */ + local_irq_save(flags); + status = ctrl_inl(MMUCR); + status |= 0x04; + ctrl_outl(status, MMUCR); + ctrl_barrier(); + local_irq_restore(flags); +} diff --git a/arch/sh/mm/tlb-sh4.c b/arch/sh/mm/tlb-sh4.c index 115b1b6be40..812b2d567de 100644 --- a/arch/sh/mm/tlb-sh4.c +++ b/arch/sh/mm/tlb-sh4.c @@ -36,7 +36,6 @@ void update_mmu_cache(struct vm_area_struct * vma, unsigned long vpn; struct page *page; unsigned long pfn; - unsigned long ptea; /* Ptrace may call this routine. */ if (vma && current->active_mm != vma->vm_mm) @@ -59,10 +58,11 @@ void update_mmu_cache(struct vm_area_struct * vma, ctrl_outl(vpn, MMU_PTEH); pteval = pte_val(pte); + /* Set PTEA register */ - /* TODO: make this look less hacky */ - ptea = ((pteval >> 28) & 0xe) | (pteval & 0x1); - ctrl_outl(ptea, MMU_PTEA); + if (cpu_data->flags & CPU_HAS_PTEA) + /* TODO: make this look less hacky */ + ctrl_outl(((pteval >> 28) & 0xe) | (pteval & 0x1), MMU_PTEA); /* Set PTEL register */ pteval &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */ |