diff options
author | Trond Myklebust <Trond.Myklebust@netapp.com> | 2006-06-28 23:27:48 -0400 |
---|---|---|
committer | Trond Myklebust <Trond.Myklebust@netapp.com> | 2006-06-28 23:27:48 -0400 |
commit | 9f2fa466383ce100b90fe52cb4489d7a26bf72a9 (patch) | |
tree | 7b72b1fae85137435d5b98f4614df2195f612acc /arch/arm | |
parent | 607f31e80b6f982d7c0dd7a5045377fc368fe507 (diff) | |
parent | 0a6047eef1c465c38aacfbdab193161b3f0cd144 (diff) |
Merge branch 'master' of /home/trondmy/kernel/linux-2.6/
Diffstat (limited to 'arch/arm')
47 files changed, 870 insertions, 117 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 3d1a3fb7d5f..f123c7c9fc9 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -188,23 +188,27 @@ config ARCH_IMX config ARCH_IOP3XX bool "IOP3xx-based" + depends on MMU select PCI help Support for Intel's IOP3XX (XScale) family of processors. config ARCH_IXP4XX bool "IXP4xx-based" + depends on MMU help Support for Intel's IXP4XX (XScale) family of processors. config ARCH_IXP2000 bool "IXP2400/2800-based" + depends on MMU select PCI help Support for Intel's IXP2400/2800 (XScale) family of processors. config ARCH_IXP23XX bool "IXP23XX-based" + depends on MMU select PCI help Support for Intel's IXP23xx (XScale) family of processors. @@ -229,6 +233,7 @@ config ARCH_PNX4008 config ARCH_PXA bool "PXA2xx-based" + depends on MMU select ARCH_MTD_XIP help Support for Intel's PXA2XX processor line. @@ -339,6 +344,10 @@ config XSCALE_PMU depends on CPU_XSCALE && !XSCALE_PMU_TIMER default y +if !MMU +source "arch/arm/Kconfig-nommu" +endif + endmenu source "arch/arm/common/Kconfig" diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile index a601b8b55f3..7cffbaef064 100644 --- a/arch/arm/kernel/Makefile +++ b/arch/arm/kernel/Makefile @@ -22,6 +22,9 @@ obj-$(CONFIG_PCI) += bios32.o obj-$(CONFIG_SMP) += smp.o obj-$(CONFIG_OABI_COMPAT) += sys_oabi-compat.o +obj-$(CONFIG_CRUNCH) += crunch.o crunch-bits.o +AFLAGS_crunch-bits.o := -Wa,-mcpu=ep9312 + obj-$(CONFIG_IWMMXT) += iwmmxt.o AFLAGS_iwmmxt.o := -Wa,-mcpu=iwmmxt diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c index c49b5d4d7fc..da69e660574 100644 --- a/arch/arm/kernel/armksyms.c +++ b/arch/arm/kernel/armksyms.c @@ -109,11 +109,13 @@ EXPORT_SYMBOL(memchr); EXPORT_SYMBOL(__memzero); /* user mem (segment) */ -EXPORT_SYMBOL(__arch_copy_from_user); -EXPORT_SYMBOL(__arch_copy_to_user); -EXPORT_SYMBOL(__arch_clear_user); -EXPORT_SYMBOL(__arch_strnlen_user); -EXPORT_SYMBOL(__arch_strncpy_from_user); +EXPORT_SYMBOL(__strnlen_user); +EXPORT_SYMBOL(__strncpy_from_user); + +#ifdef CONFIG_MMU +EXPORT_SYMBOL(__copy_from_user); +EXPORT_SYMBOL(__copy_to_user); +EXPORT_SYMBOL(__clear_user); EXPORT_SYMBOL(__get_user_1); EXPORT_SYMBOL(__get_user_2); @@ -123,6 +125,7 @@ EXPORT_SYMBOL(__put_user_1); EXPORT_SYMBOL(__put_user_2); EXPORT_SYMBOL(__put_user_4); EXPORT_SYMBOL(__put_user_8); +#endif /* crypto hash */ EXPORT_SYMBOL(sha_transform); diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c index 396efba9bac..447ede5143a 100644 --- a/arch/arm/kernel/asm-offsets.c +++ b/arch/arm/kernel/asm-offsets.c @@ -60,6 +60,9 @@ int main(void) #ifdef CONFIG_IWMMXT DEFINE(TI_IWMMXT_STATE, offsetof(struct thread_info, fpstate.iwmmxt)); #endif +#ifdef CONFIG_CRUNCH + DEFINE(TI_CRUNCH_STATE, offsetof(struct thread_info, crunchstate)); +#endif BLANK(); DEFINE(S_R0, offsetof(struct pt_regs, ARM_r0)); DEFINE(S_R1, offsetof(struct pt_regs, ARM_r1)); diff --git a/arch/arm/kernel/crunch-bits.S b/arch/arm/kernel/crunch-bits.S new file mode 100644 index 00000000000..a26886758c6 --- /dev/null +++ b/arch/arm/kernel/crunch-bits.S @@ -0,0 +1,305 @@ +/* + * arch/arm/kernel/crunch-bits.S + * Cirrus MaverickCrunch context switching and handling + * + * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org> + * + * Shamelessly stolen from the iWMMXt code by Nicolas Pitre, which is + * Copyright (c) 2003-2004, MontaVista Software, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/linkage.h> +#include <asm/ptrace.h> +#include <asm/thread_info.h> +#include <asm/asm-offsets.h> +#include <asm/arch/ep93xx-regs.h> + +/* + * We can't use hex constants here due to a bug in gas. + */ +#define CRUNCH_MVDX0 0 +#define CRUNCH_MVDX1 8 +#define CRUNCH_MVDX2 16 +#define CRUNCH_MVDX3 24 +#define CRUNCH_MVDX4 32 +#define CRUNCH_MVDX5 40 +#define CRUNCH_MVDX6 48 +#define CRUNCH_MVDX7 56 +#define CRUNCH_MVDX8 64 +#define CRUNCH_MVDX9 72 +#define CRUNCH_MVDX10 80 +#define CRUNCH_MVDX11 88 +#define CRUNCH_MVDX12 96 +#define CRUNCH_MVDX13 104 +#define CRUNCH_MVDX14 112 +#define CRUNCH_MVDX15 120 +#define CRUNCH_MVAX0L 128 +#define CRUNCH_MVAX0M 132 +#define CRUNCH_MVAX0H 136 +#define CRUNCH_MVAX1L 140 +#define CRUNCH_MVAX1M 144 +#define CRUNCH_MVAX1H 148 +#define CRUNCH_MVAX2L 152 +#define CRUNCH_MVAX2M 156 +#define CRUNCH_MVAX2H 160 +#define CRUNCH_MVAX3L 164 +#define CRUNCH_MVAX3M 168 +#define CRUNCH_MVAX3H 172 +#define CRUNCH_DSPSC 176 + +#define CRUNCH_SIZE 184 + + .text + +/* + * Lazy switching of crunch coprocessor context + * + * r10 = struct thread_info pointer + * r9 = ret_from_exception + * lr = undefined instr exit + * + * called from prefetch exception handler with interrupts disabled + */ +ENTRY(crunch_task_enable) + ldr r8, =(EP93XX_APB_VIRT_BASE + 0x00130000) @ syscon addr + + ldr r1, [r8, #0x80] + tst r1, #0x00800000 @ access to crunch enabled? + movne pc, lr @ if so no business here + mov r3, #0xaa @ unlock syscon swlock + str r3, [r8, #0xc0] + orr r1, r1, #0x00800000 @ enable access to crunch + str r1, [r8, #0x80] + + ldr r3, =crunch_owner + add r0, r10, #TI_CRUNCH_STATE @ get task crunch save area + ldr r2, [sp, #60] @ current task pc value + ldr r1, [r3] @ get current crunch owner + str r0, [r3] @ this task now owns crunch + sub r2, r2, #4 @ adjust pc back + str r2, [sp, #60] + + ldr r2, [r8, #0x80] + mov r2, r2 @ flush out enable (@@@) + + teq r1, #0 @ test for last ownership + mov lr, r9 @ normal exit from exception + beq crunch_load @ no owner, skip save + +crunch_save: + cfstr64 mvdx0, [r1, #CRUNCH_MVDX0] @ save 64b registers + cfstr64 mvdx1, [r1, #CRUNCH_MVDX1] + cfstr64 mvdx2, [r1, #CRUNCH_MVDX2] + cfstr64 mvdx3, [r1, #CRUNCH_MVDX3] + cfstr64 mvdx4, [r1, #CRUNCH_MVDX4] + cfstr64 mvdx5, [r1, #CRUNCH_MVDX5] + cfstr64 mvdx6, [r1, #CRUNCH_MVDX6] + cfstr64 mvdx7, [r1, #CRUNCH_MVDX7] + cfstr64 mvdx8, [r1, #CRUNCH_MVDX8] + cfstr64 mvdx9, [r1, #CRUNCH_MVDX9] + cfstr64 mvdx10, [r1, #CRUNCH_MVDX10] + cfstr64 mvdx11, [r1, #CRUNCH_MVDX11] + cfstr64 mvdx12, [r1, #CRUNCH_MVDX12] + cfstr64 mvdx13, [r1, #CRUNCH_MVDX13] + cfstr64 mvdx14, [r1, #CRUNCH_MVDX14] + cfstr64 mvdx15, [r1, #CRUNCH_MVDX15] + +#ifdef __ARMEB__ +#error fix me for ARMEB +#endif + + cfmv32al mvfx0, mvax0 @ save 72b accumulators + cfstr32 mvfx0, [r1, #CRUNCH_MVAX0L] + cfmv32am mvfx0, mvax0 + cfstr32 mvfx0, [r1, #CRUNCH_MVAX0M] + cfmv32ah mvfx0, mvax0 + cfstr32 mvfx0, [r1, #CRUNCH_MVAX0H] + cfmv32al mvfx0, mvax1 + cfstr32 mvfx0, [r1, #CRUNCH_MVAX1L] + cfmv32am mvfx0, mvax1 + cfstr32 mvfx0, [r1, #CRUNCH_MVAX1M] + cfmv32ah mvfx0, mvax1 + cfstr32 mvfx0, [r1, #CRUNCH_MVAX1H] + cfmv32al mvfx0, mvax2 + cfstr32 mvfx0, [r1, #CRUNCH_MVAX2L] + cfmv32am mvfx0, mvax2 + cfstr32 mvfx0, [r1, #CRUNCH_MVAX2M] + cfmv32ah mvfx0, mvax2 + cfstr32 mvfx0, [r1, #CRUNCH_MVAX2H] + cfmv32al mvfx0, mvax3 + cfstr32 mvfx0, [r1, #CRUNCH_MVAX3L] + cfmv32am mvfx0, mvax3 + cfstr32 mvfx0, [r1, #CRUNCH_MVAX3M] + cfmv32ah mvfx0, mvax3 + cfstr32 mvfx0, [r1, #CRUNCH_MVAX3H] + + cfmv32sc mvdx0, dspsc @ save status word + cfstr64 mvdx0, [r1, #CRUNCH_DSPSC] + + teq r0, #0 @ anything to load? + cfldr64eq mvdx0, [r1, #CRUNCH_MVDX0] @ mvdx0 was clobbered + moveq pc, lr + +crunch_load: + cfldr64 mvdx0, [r0, #CRUNCH_DSPSC] @ load status word + cfmvsc32 dspsc, mvdx0 + + cfldr32 mvfx0, [r0, #CRUNCH_MVAX0L] @ load 72b accumulators + cfmval32 mvax0, mvfx0 + cfldr32 mvfx0, [r0, #CRUNCH_MVAX0M] + cfmvam32 mvax0, mvfx0 + cfldr32 mvfx0, [r0, #CRUNCH_MVAX0H] + cfmvah32 mvax0, mvfx0 + cfldr32 mvfx0, [r0, #CRUNCH_MVAX1L] + cfmval32 mvax1, mvfx0 + cfldr32 mvfx0, [r0, #CRUNCH_MVAX1M] + cfmvam32 mvax1, mvfx0 + cfldr32 mvfx0, [r0, #CRUNCH_MVAX1H] + cfmvah32 mvax1, mvfx0 + cfldr32 mvfx0, [r0, #CRUNCH_MVAX2L] + cfmval32 mvax2, mvfx0 + cfldr32 mvfx0, [r0, #CRUNCH_MVAX2M] + cfmvam32 mvax2, mvfx0 + cfldr32 mvfx0, [r0, #CRUNCH_MVAX2H] + cfmvah32 mvax2, mvfx0 + cfldr32 mvfx0, [r0, #CRUNCH_MVAX3L] + cfmval32 mvax3, mvfx0 + cfldr32 mvfx0, [r0, #CRUNCH_MVAX3M] + cfmvam32 mvax3, mvfx0 + cfldr32 mvfx0, [r0, #CRUNCH_MVAX3H] + cfmvah32 mvax3, mvfx0 + + cfldr64 mvdx0, [r0, #CRUNCH_MVDX0] @ load 64b registers + cfldr64 mvdx1, [r0, #CRUNCH_MVDX1] + cfldr64 mvdx2, [r0, #CRUNCH_MVDX2] + cfldr64 mvdx3, [r0, #CRUNCH_MVDX3] + cfldr64 mvdx4, [r0, #CRUNCH_MVDX4] + cfldr64 mvdx5, [r0, #CRUNCH_MVDX5] + cfldr64 mvdx6, [r0, #CRUNCH_MVDX6] + cfldr64 mvdx7, [r0, #CRUNCH_MVDX7] + cfldr64 mvdx8, [r0, #CRUNCH_MVDX8] + cfldr64 mvdx9, [r0, #CRUNCH_MVDX9] + cfldr64 mvdx10, [r0, #CRUNCH_MVDX10] + cfldr64 mvdx11, [r0, #CRUNCH_MVDX11] + cfldr64 mvdx12, [r0, #CRUNCH_MVDX12] + cfldr64 mvdx13, [r0, #CRUNCH_MVDX13] + cfldr64 mvdx14, [r0, #CRUNCH_MVDX14] + cfldr64 mvdx15, [r0, #CRUNCH_MVDX15] + + mov pc, lr + +/* + * Back up crunch regs to save area and disable access to them + * (mainly for gdb or sleep mode usage) + * + * r0 = struct thread_info pointer of target task or NULL for any + */ +ENTRY(crunch_task_disable) + stmfd sp!, {r4, r5, lr} + + mrs ip, cpsr + orr r2, ip, #PSR_I_BIT @ disable interrupts + msr cpsr_c, r2 + + ldr r4, =(EP93XX_APB_VIRT_BASE + 0x00130000) @ syscon addr + + ldr r3, =crunch_owner + add r2, r0, #TI_CRUNCH_STATE @ get task crunch save area + ldr r1, [r3] @ get current crunch owner + teq r1, #0 @ any current owner? + beq 1f @ no: quit + teq r0, #0 @ any owner? + teqne r1, r2 @ or specified one? + bne 1f @ no: quit + + ldr r5, [r4, #0x80] @ enable access to crunch + mov r2, #0xaa + str r2, [r4, #0xc0] + orr r5, r5, #0x00800000 + str r5, [r4, #0x80] + + mov r0, #0 @ nothing to load + str r0, [r3] @ no more current owner + ldr r2, [r4, #0x80] @ flush out enable (@@@) + mov r2, r2 + bl crunch_save + + mov r2, #0xaa @ disable access to crunch + str r2, [r4, #0xc0] + bic r5, r5, #0x00800000 + str r5, [r4, #0x80] + ldr r5, [r4, #0x80] @ flush out enable (@@@) + mov r5, r5 + +1: msr cpsr_c, ip @ restore interrupt mode + ldmfd sp!, {r4, r5, pc} + +/* + * Copy crunch state to given memory address + * + * r0 = struct thread_info pointer of target task + * r1 = memory address where to store crunch state + * + * this is called mainly in the creation of signal stack frames + */ +ENTRY(crunch_task_copy) + mrs ip, cpsr + orr r2, ip, #PSR_I_BIT @ disable interrupts + msr cpsr_c, r2 + + ldr r3, =crunch_owner + add r2, r0, #TI_CRUNCH_STATE @ get task crunch save area + ldr r3, [r3] @ get current crunch owner + teq r2, r3 @ does this task own it... + beq 1f + + @ current crunch values are in the task save area + msr cpsr_c, ip @ restore interrupt mode + mov r0, r1 + mov r1, r2 + mov r2, #CRUNCH_SIZE + b memcpy + +1: @ this task owns crunch regs -- grab a copy from there + mov r0, #0 @ nothing to load + mov r3, lr @ preserve return address + bl crunch_save + msr cpsr_c, ip @ restore interrupt mode + mov pc, r3 + +/* + * Restore crunch state from given memory address + * + * r0 = struct thread_info pointer of target task + * r1 = memory address where to get crunch state from + * + * this is used to restore crunch state when unwinding a signal stack frame + */ +ENTRY(crunch_task_restore) + mrs ip, cpsr + orr r2, ip, #PSR_I_BIT @ disable interrupts + msr cpsr_c, r2 + + ldr r3, =crunch_owner + add r2, r0, #TI_CRUNCH_STATE @ get task crunch save area + ldr r3, [r3] @ get current crunch owner + teq r2, r3 @ does this task own it... + beq 1f + + @ this task doesn't own crunch regs -- use its save area + msr cpsr_c, ip @ restore interrupt mode + mov r0, r2 + mov r2, #CRUNCH_SIZE + b memcpy + +1: @ this task owns crunch regs -- load them directly + mov r0, r1 + mov r1, #0 @ nothing to save + mov r3, lr @ preserve return address + bl crunch_load + msr cpsr_c, ip @ restore interrupt mode + mov pc, r3 diff --git a/arch/arm/kernel/crunch.c b/arch/arm/kernel/crunch.c new file mode 100644 index 00000000000..748175921f9 --- /dev/null +++ b/arch/arm/kernel/crunch.c @@ -0,0 +1,83 @@ +/* + * arch/arm/kernel/crunch.c + * Cirrus MaverickCrunch context switching and handling + * + * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/module.h> +#include <linux/config.h> +#include <linux/types.h> +#include <linux/kernel.h> +#include <linux/signal.h> +#include <linux/sched.h> +#include <linux/init.h> +#include <asm/arch/ep93xx-regs.h> +#include <asm/thread_notify.h> +#include <asm/io.h> + +struct crunch_state *crunch_owner; + +void crunch_task_release(struct thread_info *thread) +{ + local_irq_disable(); + if (crunch_owner == &thread->crunchstate) + crunch_owner = NULL; + local_irq_enable(); +} + +static int crunch_enabled(u32 devcfg) +{ + return !!(devcfg & EP93XX_SYSCON_DEVICE_CONFIG_CRUNCH_ENABLE); +} + +static int crunch_do(struct notifier_block *self, unsigned long cmd, void *t) +{ + struct thread_info *thread = (struct thread_info *)t; + struct crunch_state *crunch_state; + u32 devcfg; + + crunch_state = &thread->crunchstate; + + switch (cmd) { + case THREAD_NOTIFY_FLUSH: + memset(crunch_state, 0, sizeof(*crunch_state)); + + /* + * FALLTHROUGH: Ensure we don't try to overwrite our newly + * initialised state information on the first fault. + */ + + case THREAD_NOTIFY_RELEASE: + crunch_task_release(thread); + break; + + case THREAD_NOTIFY_SWITCH: + devcfg = __raw_readl(EP93XX_SYSCON_DEVICE_CONFIG); + if (crunch_enabled(devcfg) || crunch_owner == crunch_state) { + devcfg ^= EP93XX_SYSCON_DEVICE_CONFIG_CRUNCH_ENABLE; + __raw_writel(0xaa, EP93XX_SYSCON_SWLOCK); + __raw_writel(devcfg, EP93XX_SYSCON_DEVICE_CONFIG); + } + break; + } + + return NOTIFY_DONE; +} + +static struct notifier_block crunch_notifier_block = { + .notifier_call = crunch_do, +}; + +static int __init crunch_init(void) +{ + thread_register_notifier(&crunch_notifier_block); + + return 0; +} + +late_initcall(crunch_init); diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index 86c92523a34..6423a38839b 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -492,9 +492,15 @@ call_fpe: b do_fpe @ CP#1 (FPE) b do_fpe @ CP#2 (FPE) mov pc, lr @ CP#3 +#ifdef CONFIG_CRUNCH + b crunch_task_enable @ CP#4 (MaverickCrunch) + b crunch_task_enable @ CP#5 (MaverickCrunch) + b crunch_task_enable @ CP#6 (MaverickCrunch) +#else mov pc, lr @ CP#4 mov pc, lr @ CP#5 mov pc, lr @ CP#6 +#endif mov pc, lr @ CP#7 mov pc, lr @ CP#8 mov pc, lr @ CP#9 diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c index a1d1b2906e8..c40bdc77005 100644 --- a/arch/arm/kernel/ptrace.c +++ b/arch/arm/kernel/ptrace.c @@ -634,6 +634,32 @@ static int ptrace_setwmmxregs(struct task_struct *tsk, void __user *ufp) #endif +#ifdef CONFIG_CRUNCH +/* + * Get the child Crunch state. + */ +static int ptrace_getcrunchregs(struct task_struct *tsk, void __user *ufp) +{ + struct thread_info *thread = task_thread_info(tsk); + + crunch_task_disable(thread); /* force it to ram */ + return copy_to_user(ufp, &thread->crunchstate, CRUNCH_SIZE) + ? -EFAULT : 0; +} + +/* + * Set the child Crunch state. + */ +static int ptrace_setcrunchregs(struct task_struct *tsk, void __user *ufp) +{ + struct thread_info *thread = task_thread_info(tsk); + + crunch_task_release(thread); /* force a reload */ + return copy_from_user(&thread->crunchstate, ufp, CRUNCH_SIZE) + ? -EFAULT : 0; +} +#endif + long arch_ptrace(struct task_struct *child, long request, long addr, long data) { unsigned long tmp; @@ -765,6 +791,16 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) child->ptrace_message = data; break; +#ifdef CONFIG_CRUNCH + case PTRACE_GETCRUNCHREGS: + ret = ptrace_getcrunchregs(child, (void __user *)data); + break; + + case PTRACE_SETCRUNCHREGS: + ret = ptrace_setcrunchregs(child, (void __user *)data); + break; +#endif + default: ret = ptrace_request(child, request, addr, data); break; diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index 1ce05ec086c..83a8d3c95eb 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c @@ -132,6 +132,37 @@ sys_sigaction(int sig, const struct old_sigaction __user *act, return ret; } +#ifdef CONFIG_CRUNCH +static int preserve_crunch_context(struct crunch_sigframe *frame) +{ + char kbuf[sizeof(*frame) + 8]; + struct crunch_sigframe *kframe; + + /* the crunch context must be 64 bit aligned */ + kframe = (struct crunch_sigframe *)((unsigned long)(kbuf + 8) & ~7); + kframe->magic = CRUNCH_MAGIC; + kframe->size = CRUNCH_STORAGE_SIZE; + crunch_task_copy(current_thread_info(), &kframe->storage); + return __copy_to_user(frame, kframe, sizeof(*frame)); +} + +static int restore_crunch_context(struct crunch_sigframe *frame) +{ + char kbuf[sizeof(*frame) + 8]; + struct crunch_sigframe *kframe; + + /* the crunch context must be 64 bit aligned */ + kframe = (struct crunch_sigframe *)((unsigned long)(kbuf + 8) & ~7); + if (__copy_from_user(kframe, frame, sizeof(*frame))) + return -1; + if (kframe->magic != CRUNCH_MAGIC || + kframe->size != CRUNCH_STORAGE_SIZE) + return -1; + crunch_task_restore(current_thread_info(), &kframe->storage); + return 0; +} +#endif + #ifdef CONFIG_IWMMXT static int preserve_iwmmxt_context(struct iwmmxt_sigframe *frame) @@ -214,6 +245,10 @@ static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf) err |= !valid_user_regs(regs); aux = (struct aux_sigframe __user *) sf->uc.uc_regspace; +#ifdef CONFIG_CRUNCH + if (err == 0) + err |= restore_crunch_context(&aux->crunch); +#endif #ifdef CONFIG_IWMMXT if (err == 0 && test_thread_flag(TIF_USING_IWMMXT)) err |= restore_iwmmxt_context(&aux->iwmmxt); @@ -333,6 +368,10 @@ setup_sigframe(struct sigframe __user *sf, struct pt_regs *regs, sigset_t *set) err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set)); aux = (struct aux_sigframe __user *) sf->uc.uc_regspace; +#ifdef CONFIG_CRUNCH + if (err == 0) + err |= preserve_crunch_context(&aux->crunch); +#endif #ifdef CONFIG_IWMMXT if (err == 0 && test_thread_flag(TIF_USING_IWMMXT)) err |= preserve_iwmmxt_context(&aux->iwmmxt); diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index 2b254e88595..2df9688a702 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S @@ -80,6 +80,10 @@ SECTIONS *(.exit.text) *(.exit.data) *(.exitcall.exit) +#ifndef CONFIG_MMU + *(.fixup) + *(__ex_table) +#endif } .text : { /* Real text segment */ @@ -87,7 +91,9 @@ SECTIONS *(.text) SCHED_TEXT LOCK_TEXT +#ifdef CONFIG_MMU *(.fixup) +#endif *(.gnu.warning) *(.rodata) *(.rodata.*) @@ -142,7 +148,9 @@ SECTIONS */ . = ALIGN(32); __start___ex_table = .; +#ifdef CONFIG_MMU *(__ex_table) +#endif __stop___ex_table = .; /* diff --git a/arch/arm/lib/Makefile b/arch/arm/lib/Makefile index 7b726b627ea..30351cd4560 100644 --- a/arch/arm/lib/Makefile +++ b/arch/arm/lib/Makefile @@ -6,28 +6,31 @@ lib-y := backtrace.o changebit.o csumipv6.o csumpartial.o \ csumpartialcopy.o csumpartialcopyuser.o clearbit.o \ - copy_page.o delay.o findbit.o memchr.o memcpy.o \ + delay.o findbit.o memchr.o memcpy.o \ memmove.o memset.o memzero.o setbit.o \ strncpy_from_user.o strnlen_user.o \ strchr.o strrchr.o \ testchangebit.o testclearbit.o testsetbit.o \ - getuser.o putuser.o clear_user.o \ ashldi3.o ashrdi3.o lshrdi3.o muldi3.o \ ucmpdi2.o lib1funcs.o div64.o sha1.o \ io-readsb.o io-writesb.o io-readsl.o io-writesl.o +mmu-y := clear_user.o copy_page.o getuser.o putuser.o + # the code in uaccess.S is not preemption safe and # probably faster on ARMv3 only ifeq ($(CONFIG_PREEMPT),y) - lib-y += copy_from_user.o copy_to_user.o + mmu-y += copy_from_user.o copy_to_user.o else ifneq ($(CONFIG_CPU_32v3),y) - lib-y += copy_from_user.o copy_to_user.o + mmu-y += copy_from_user.o copy_to_user.o else - lib-y += uaccess.o + mmu-y += uaccess.o endif endif +lib-$(CONFIG_MMU) += $(mmu-y) + ifeq ($(CONFIG_CPU_32v3),y) lib-y += io-readsw-armv3.o io-writesw-armv3.o else diff --git a/arch/arm/lib/backtrace.S b/arch/arm/lib/backtrace.S index 058b80d72aa..91f993f2e9d 100644 --- a/arch/arm/lib/backtrace.S +++ b/arch/arm/lib/backtrace.S @@ -97,16 +97,13 @@ ENTRY(c_backtrace) b 1007f /* - * Fixup for LDMDB + * Fixup for LDMDB. Note that this must not be in the fixup section. */ - .section .fixup,"ax" - .align 0 1007: ldr r0, =.Lbad mov r1, frame bl printk ldmfd sp!, {r4 - r8, pc} .ltorg - .previous .section __ex_table,"a" .align 3 diff --git a/arch/arm/lib/clear_user.S b/arch/arm/lib/clear_user.S index ea435ae2e4a..ecb28dcdaf7 100644 --- a/arch/arm/lib/clear_user.S +++ b/arch/arm/lib/clear_user.S @@ -12,13 +12,13 @@ .text -/* Prototype: int __arch_clear_user(void *addr, size_t sz) +/* Prototype: int __clear_user(void *addr, size_t sz) * Purpose : clear some user memory * Params : addr - user memory address to clear * : sz - number of bytes to clear * Returns : number of bytes NOT cleared */ -ENTRY(__arch_clear_user) +ENTRY(__clear_user) stmfd sp!, {r1, lr} mov r2, #0 cmp r1, #4 diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S index 7497393a0e8..6b7363ce749 100644 --- a/arch/arm/lib/copy_from_user.S +++ b/arch/arm/lib/copy_from_user.S @@ -16,7 +16,7 @@ /* * Prototype: * - * size_t __arch_copy_from_user(void *to, const void *from, size_t n) + * size_t __copy_from_user(void *to, const void *from, size_t n) * * Purpose: * @@ -83,7 +83,7 @@ .text -ENTRY(__arch_copy_from_user) +ENTRY(__copy_from_user) #include "copy_template.S" diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S index 4a6d8ea1402..5224d94688d 100644 --- a/arch/arm/lib/copy_to_user.S +++ b/arch/arm/lib/copy_to_user.S @@ -16,7 +16,7 @@ /* * Prototype: * - * size_t __arch_copy_to_user(void *to, const void *from, size_t n) + * size_t __copy_to_user(void *to, const void *from, size_t n) * * Purpose: * @@ -86,7 +86,7 @@ .text -ENTRY(__arch_copy_to_user) +ENTRY(__copy_to_user) #include "copy_template.S" diff --git a/arch/arm/lib/strncpy_from_user.S b/arch/arm/lib/strncpy_from_user.S index 35649f04fca..36e3741a377 100644 --- a/arch/arm/lib/strncpy_from_user.S +++ b/arch/arm/lib/strncpy_from_user.S @@ -20,7 +20,7 @@ * returns the number of characters copied (strlen of copied string), * -EFAULT on exception, or "len" if we fill the whole buffer */ -ENTRY(__arch_strncpy_from_user) +ENTRY(__strncpy_from_user) mov ip, r1 1: subs r2, r2, #1 USER( ldrplbt r3, [r1], #1) diff --git a/arch/arm/lib/strnlen_user.S b/arch/arm/lib/strnlen_user.S index 3668a15991e..18d8fa4f925 100644 --- a/arch/arm/lib/strnlen_user.S +++ b/arch/arm/lib/strnlen_user.S @@ -14,13 +14,13 @@ .text .align 5 -/* Prototype: unsigned long __arch_strnlen_user(const char *str, long n) +/* Prototype: unsigned long __strnlen_user(const char *str, long n) * Purpose : get length of a string in user memory * Params : str - address of string in user memory * Returns : length of string *including terminator* * or zero on exception, or n + 1 if too long */ -ENTRY(__arch_strnlen_user) +ENTRY(__strnlen_user) mov r2, r0 1: USER( ldrbt r3, [r0], #1) diff --git a/arch/arm/lib/uaccess.S b/arch/arm/lib/uaccess.S index 1f1545d737b..b48bd6d5fd8 100644 --- a/arch/arm/lib/uaccess.S +++ b/arch/arm/lib/uaccess.S @@ -19,7 +19,7 @@ #define PAGE_SHIFT 12 -/* Prototype: int __arch_copy_to_user(void *to, const char *from, size_t n) +/* Prototype: int __copy_to_user(void *to, const char *from, size_t n) * Purpose : copy a block to user memory from kernel memory * Params : to - user memory * : from - kernel memory @@ -39,7 +39,7 @@ USER( strgtbt r3, [r0], #1) @ May fault sub r2, r2, ip b .Lc2u_dest_aligned -ENTRY(__arch_copy_to_user) +ENTRY(__copy_to_user) stmfd sp!, {r2, r4 - r7, lr} cmp r2, #4 blt .Lc2u_not_enough @@ -283,7 +283,7 @@ USER( strgtbt r3, [r0], #1) @ May fault 9001: ldmfd sp!, {r0, r4 - r7, pc} .previous -/* Prototype: unsigned long __arch_copy_from_user(void *to,const void *from,unsigned long n); +/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n); * Purpose : copy a block from user memory to kernel memory * Params : to - kernel memory * : from - user memory @@ -302,7 +302,7 @@ USER( ldrgtbt r3, [r1], #1) @ May fault sub r2, r2, ip b .Lcfu_dest_aligned -ENTRY(__arch_copy_from_user) +ENTRY(__copy_from_user) stmfd sp!, {r0, r2, r4 - r7, lr} cmp r2, #4 blt .Lcfu_not_enough diff --git a/arch/arm/mach-ep93xx/Kconfig b/arch/arm/mach-ep93xx/Kconfig index cec5a21ca4e..e15e4c54a25 100644 --- a/arch/arm/mach-ep93xx/Kconfig +++ b/arch/arm/mach-ep93xx/Kconfig @@ -2,8 +2,19 @@ if ARCH_EP93XX menu "Cirrus EP93xx Implementation Options" +config CRUNCH + bool "Support for MaverickCrunch" + help + Enable kernel support for MaverickCrunch. + comment "EP93xx Platforms" +config MACH_EDB9315 + bool "Support Cirrus Logic EDB9315" + help + Say 'Y' here if you want your kernel to support the Cirrus + Logic EDB9315 Evaluation Board. + config MACH_GESBC9312 bool "Support Glomation GESBC-9312-sx" help diff --git a/arch/arm/mach-ep93xx/Makefile b/arch/arm/mach-ep93xx/Makefile index 05a48a21038..dfa7e2e8a18 100644 --- a/arch/arm/mach-ep93xx/Makefile +++ b/arch/arm/mach-ep93xx/Makefile @@ -6,5 +6,6 @@ obj-m := obj-n := obj- := +obj-$(CONFIG_MACH_EDB9315) += edb9315.o obj-$(CONFIG_MACH_GESBC9312) += gesbc9312.o obj-$(CONFIG_MACH_TS72XX) += ts72xx.o diff --git a/arch/arm/mach-ep93xx/edb9315.c b/arch/arm/mach-ep93xx/edb9315.c new file mode 100644 index 00000000000..ef7482faad8 --- /dev/null +++ b/arch/arm/mach-ep93xx/edb9315.c @@ -0,0 +1,62 @@ +/* + * arch/arm/mach-ep93xx/edb9315.c + * Cirrus Logic EDB9315 support. + * + * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + */ + +#include <linux/config.h> +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/mm.h> +#include <linux/sched.h> +#include <linux/interrupt.h> +#include <linux/ioport.h> +#include <linux/mtd/physmap.h> +#include <linux/platform_device.h> +#include <asm/io.h> +#include <asm/hardware.h> +#include <asm/mach-types.h> +#include <asm/mach/arch.h> + +static struct physmap_flash_data edb9315_flash_data = { + .width = 4, +}; + +static struct resource edb9315_flash_resource = { + .start = 0x60000000, + .end = 0x61ffffff, + .flags = IORESOURCE_MEM, +}; + +static struct platform_device edb9315_flash = { + .name = "physmap-flash", + .id = 0, + .dev = { + .platform_data = &edb9315_flash_data, + }, + .num_resources = 1, + .resource = &edb9315_flash_resource, +}; + +static void __init edb9315_init_machine(void) +{ + ep93xx_init_devices(); + platform_device_register(&edb9315_flash); +} + +MACHINE_START(EDB9315, "Cirrus Logic EDB9315 Evaluation Board") + /* Maintainer: Lennert Buytenhek <buytenh@wantstofly.org> */ + .phys_io = EP93XX_APB_PHYS_BASE, + .io_pg_offst = ((EP93XX_APB_VIRT_BASE) >> 18) & 0xfffc, + .boot_params = 0x00000100, + .map_io = ep93xx_map_io, + .init_irq = ep93xx_init_irq, + .timer = &ep93xx_timer, + .init_machine = edb9315_init_machine, +MACHINE_END diff --git a/arch/arm/mach-ep93xx/gesbc9312.c b/arch/arm/mach-ep93xx/gesbc9312.c index 47cc6c8b7c7..2c28d66d260 100644 --- a/arch/arm/mach-ep93xx/gesbc9312.c +++ b/arch/arm/mach-ep93xx/gesbc9312.c @@ -30,7 +30,7 @@ static struct physmap_flash_data gesbc9312_flash_data = { static struct resource gesbc9312_flash_resource = { .start = 0x60000000, - .end = 0x60800000, + .end = 0x607fffff, .flags = IORESOURCE_MEM, }; diff --git a/arch/arm/mach-ep93xx/ts72xx.c b/arch/arm/mach-ep93xx/ts72xx.c index 6e5a56cd5ae..0b3b875b187 100644 --- a/arch/arm/mach-ep93xx/ts72xx.c +++ b/arch/arm/mach-ep93xx/ts72xx.c @@ -118,7 +118,7 @@ static struct physmap_flash_data ts72xx_flash_data = { static struct resource ts72xx_flash_resource = { .start = TS72XX_NOR_PHYS_BASE, - .end = TS72XX_NOR_PHYS_BASE + 0x01000000, + .end = TS72XX_NOR_PHYS_BASE + 0x00ffffff, .flags = IORESOURCE_MEM, }; diff --git a/arch/arm/mach-ixp23xx/espresso.c b/arch/arm/mach-ixp23xx/espresso.c index dc5e489c70b..357351fbb1e 100644 --- a/arch/arm/mach-ixp23xx/espresso.c +++ b/arch/arm/mach-ixp23xx/espresso.c @@ -59,7 +59,7 @@ static struct physmap_flash_data espresso_flash_data = { static struct resource espresso_flash_resource = { .start = 0x90000000, - .end = 0x92000000, + .end = 0x91ffffff, .flags = IORESOURCE_MEM, }; diff --git a/arch/arm/mach-ixp23xx/ixdp2351.c b/arch/arm/mach-ixp23xx/ixdp2351.c index 535b334ee04..e0886871cc7 100644 --- a/arch/arm/mach-ixp23xx/ixdp2351.c +++ b/arch/arm/mach-ixp23xx/ixdp2351.c @@ -304,7 +304,7 @@ static struct physmap_flash_data ixdp2351_flash_data = { static struct resource ixdp2351_flash_resource = { .start = 0x90000000, - .end = 0x94000000, + .end = 0x93ffffff, .flags = IORESOURCE_MEM, }; diff --git a/arch/arm/mach-ixp23xx/roadrunner.c b/arch/arm/mach-ixp23xx/roadrunner.c index b9f5d13fcfe..92ad18f4125 100644 --- a/arch/arm/mach-ixp23xx/roadrunner.c +++ b/arch/arm/mach-ixp23xx/roadrunner.c @@ -143,7 +143,7 @@ static struct physmap_flash_data roadrunner_flash_data = { static struct resource roadrunner_flash_resource = { .start = 0x90000000, - .end = 0x94000000, + .end = 0x93ffffff, .flags = IORESOURCE_MEM, }; diff --git a/arch/arm/mach-pxa/irq.c b/arch/arm/mach-pxa/irq.c index 539b596005f..d9635ff4b10 100644 --- a/arch/arm/mach-pxa/irq.c +++ b/arch/arm/mach-pxa/irq.c @@ -88,8 +88,8 @@ static int pxa_gpio_irq_type(unsigned int irq, unsigned int type) if (type == IRQT_PROBE) { /* Don't mess with enabled GPIOs using preconfigured edges or - GPIOs set to alternate function during probe */ - if ((GPIO_IRQ_rising_edge[idx] | GPIO_IRQ_falling_edge[idx]) & + GPIOs set to alternate function or to output during probe */ + if ((GPIO_IRQ_rising_edge[idx] | GPIO_IRQ_falling_edge[idx] | GPDR(gpio)) & GPIO_bit(gpio)) return 0; if (GAFR(gpio) & (0x3 << (((gpio) & 0xf)*2))) diff --git a/arch/arm/mach-s3c2410/s3c244x.c b/arch/arm/mach-s3c2410/s3c244x.c index 838bc525e83..9a2258270de 100644 --- a/arch/arm/mach-s3c2410/s3c244x.c +++ b/arch/arm/mach-s3c2410/s3c244x.c @@ -69,6 +69,7 @@ void __init s3c244x_map_io(struct map_desc *mach_desc, int size) s3c_device_i2c.name = "s3c2440-i2c"; s3c_device_nand.name = "s3c2440-nand"; + s3c_device_usbgadget.name = "s3c2440-usbgadget"; } void __init s3c244x_init_clocks(int xtal) diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index ecf5e232a6f..c4bca753165 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -15,8 +15,8 @@ config CPU_ARM610 select CPU_32v3 select CPU_CACHE_V3 select CPU_CACHE_VIVT - select CPU_COPY_V3 - select CPU_TLB_V3 + select CPU_COPY_V3 if MMU + select CPU_TLB_V3 if MMU help The ARM610 is the successor to the ARM3 processor and was produced by VLSI Technology Inc. @@ -31,8 +31,8 @@ config CPU_ARM710 select CPU_32v3 select CPU_CACHE_V3 select CPU_CACHE_VIVT - select CPU_COPY_V3 - select CPU_TLB_V3 + select CPU_COPY_V3 if MMU + select CPU_TLB_V3 if MMU help A 32-bit RISC microprocessor based on the ARM7 processor core designed by Advanced RISC Machines Ltd. The ARM710 is the @@ -50,8 +50,8 @@ config CPU_ARM720T select CPU_ABRT_LV4T select CPU_CACHE_V4 select CPU_CACHE_VIVT - select CPU_COPY_V4WT - select CPU_TLB_V4WT + select CPU_COPY_V4WT if MMU + select CPU_TLB_V4WT if MMU help A 32-bit RISC processor with 8kByte Cache, Write Buffer and MMU built around an ARM7TDMI core. @@ -68,8 +68,8 @@ config CPU_ARM920T select CPU_ABRT_EV4T select CPU_CACHE_V4WT select CPU_CACHE_VIVT - select CPU_COPY_V4WB - select CPU_TLB_V4WBI + select CPU_COPY_V4WB if MMU + select CPU_TLB_V4WBI if MMU help The ARM920T is licensed to be produced by numerous vendors, and is used in the Maverick EP9312 and the Samsung S3C2410. @@ -89,8 +89,8 @@ config CPU_ARM922T select CPU_ABRT_EV4T select CPU_CACHE_V4WT select CPU_CACHE_VIVT - select CPU_COPY_V4WB - select CPU_TLB_V4WBI + select CPU_COPY_V4WB if MMU + select CPU_TLB_V4WBI if MMU help The ARM922T is a version of the ARM920T, but with smaller instruction and data caches. It is used in Altera's @@ -108,8 +108,8 @@ config CPU_ARM925T select CPU_ABRT_EV4T select CPU_CACHE_V4WT select CPU_CACHE_VIVT - select CPU_COPY_V4WB - select CPU_TLB_V4WBI + select CPU_COPY_V4WB if MMU + select CPU_TLB_V4WBI if MMU help The ARM925T is a mix between the ARM920T and ARM926T, but with different instruction and data caches. It is used in TI's OMAP @@ -126,8 +126,8 @@ config CPU_ARM926T select CPU_32v5 select CPU_ABRT_EV5TJ select CPU_CACHE_VIVT - select CPU_COPY_V4WB - select CPU_TLB_V4WBI + select CPU_COPY_V4WB if MMU + select CPU_TLB_V4WBI if MMU help This is a variant of the ARM920. It has slightly different instruction sequences for cache and TLB operations. Curiously, @@ -144,8 +144,8 @@ config CPU_ARM1020 select CPU_ABRT_EV4T select CPU_CACHE_V4WT select CPU_CACHE_VIVT - select CPU_COPY_V4WB - select CPU_TLB_V4WBI + select CPU_COPY_V4WB if MMU + select CPU_TLB_V4WBI if MMU help The ARM1020 is the 32K cached version of the ARM10 processor, with an addition of a floating-point unit. @@ -161,8 +161,8 @@ config CPU_ARM1020E select CPU_ABRT_EV4T select CPU_CACHE_V4WT select CPU_CACHE_VIVT - select CPU_COPY_V4WB - select CPU_TLB_V4WBI + select CPU_COPY_V4WB if MMU + select CPU_TLB_V4WBI if MMU depends on n # ARM1022E @@ -172,8 +172,8 @@ config CPU_ARM1022 select CPU_32v5 select CPU_ABRT_EV4T select CPU_CACHE_VIVT - select CPU_COPY_V4WB # can probably do better - select CPU_TLB_V4WBI + select CPU_COPY_V4WB if MMU # can probably do better + select CPU_TLB_V4WBI if MMU help The ARM1022E is an implementation of the ARMv5TE architecture based upon the ARM10 integer core with a 16KiB L1 Harvard cache, @@ -189,8 +189,8 @@ config CPU_ARM1026 select CPU_32v5 select CPU_ABRT_EV5T # But need Jazelle, but EV5TJ ignores bit 10 select CPU_CACHE_VIVT - select CPU_COPY_V4WB # can probably do better - select CPU_TLB_V4WBI + select CPU_COPY_V4WB if MMU # can probably do better + select CPU_TLB_V4WBI if MMU help The ARM1026EJ-S is an implementation of the ARMv5TEJ architecture based upon the ARM10 integer core. @@ -207,8 +207,8 @@ config CPU_SA110 select CPU_ABRT_EV4 select CPU_CACHE_V4WB select CPU_CACHE_VIVT - select CPU_COPY_V4WB - select CPU_TLB_V4WB + select CPU_COPY_V4WB if MMU + select CPU_TLB_V4WB if MMU help The Intel StrongARM(R) SA-110 is a 32-bit microprocessor and is available at five speeds ranging from 100 MHz to 233 MHz. @@ -227,7 +227,7 @@ config CPU_SA1100 select CPU_ABRT_EV4 select CPU_CACHE_V4WB select CPU_CACHE_VIVT - select CPU_TLB_V4WB + select CPU_TLB_V4WB if MMU # XScale config CPU_XSCALE @@ -237,7 +237,7 @@ config CPU_XSCALE select CPU_32v5 select CPU_ABRT_EV5T select CPU_CACHE_VIVT - select CPU_TLB_V4WBI + select CPU_TLB_V4WBI if MMU # XScale Core Version 3 config CPU_XSC3 @@ -247,7 +247,7 @@ config CPU_XSC3 select CPU_32v5 select CPU_ABRT_EV5T select CPU_CACHE_VIVT - select CPU_TLB_V4WBI + select CPU_TLB_V4WBI if MMU select IO_36 # ARMv6 @@ -258,8 +258,8 @@ config CPU_V6 select CPU_ABRT_EV6 select CPU_CACHE_V6 select CPU_CACHE_VIPT - select CPU_COPY_V6 - select CPU_TLB_V6 + select CPU_COPY_V6 if MMU + select CPU_TLB_V6 if MMU # ARMv6k config CPU_32v6K @@ -277,17 +277,17 @@ config CPU_32v6K # This defines the compiler instruction set which depends on the machine type. config CPU_32v3 bool - select TLS_REG_EMUL if SMP + select TLS_REG_EMUL if SMP || !MMU select NEEDS_SYSCALL_FOR_CMPXCHG if SMP config CPU_32v4 bool - select TLS_REG_EMUL if SMP + select TLS_REG_EMUL if SMP || !MMU select NEEDS_SYSCALL_FOR_CMPXCHG if SMP config CPU_32v5 bool - select TLS_REG_EMUL if SMP + select TLS_REG_EMUL if SMP || !MMU select NEEDS_SYSCALL_FOR_CMPXCHG if SMP config CPU_32v6 @@ -334,6 +334,7 @@ config CPU_CACHE_VIVT config CPU_CACHE_VIPT bool +if MMU # The copy-page model config CPU_COPY_V3 bool @@ -372,6 +373,8 @@ config CPU_TLB_V4WBI config CPU_TLB_V6 bool +endif + # # CPU supports 36-bit I/O # diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile index 07a53850578..21a2770226e 100644 --- a/arch/arm/mm/Makefile +++ b/arch/arm/mm/Makefile @@ -2,10 +2,16 @@ # Makefile for the linux arm-specific parts of the memory manager. # -obj-y := consistent.o extable.o fault-armv.o \ - fault.o flush.o init.o ioremap.o mmap.o \ +obj-y := consistent.o extable.o fault.o init.o \ + iomap.o + +obj-$(CONFIG_MMU) += fault-armv.o flush.o ioremap.o mmap.o \ mm-armv.o +ifneq ($(CONFIG_MMU),y) +obj-y += nommu.o +endif + obj-$(CONFIG_MODULES) += proc-syms.o obj-$(CONFIG_ALIGNMENT_TRAP) += alignment.o diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 9ea1f87a707..989fd681c82 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -26,8 +26,6 @@ #include <asm/mach/arch.h> #include <asm/mach/map.h> -#define TABLE_SIZE (2 * PTRS_PER_PTE * sizeof(pte_t)) - DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; diff --git a/arch/arm/mm/iomap.c b/arch/arm/mm/iomap.c new file mode 100644 index 00000000000..62066f3020c --- /dev/null +++ b/arch/arm/mm/iomap.c @@ -0,0 +1,55 @@ +/* + * linux/arch/arm/mm/iomap.c + * + * Map IO port and PCI memory spaces so that {read,write}[bwl] can + * be used to access this memory. + */ +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/ioport.h> + +#include <asm/io.h> + +#ifdef __io +void __iomem *ioport_map(unsigned long port, unsigned int nr) +{ + return __io(port); +} +EXPORT_SYMBOL(ioport_map); + +void ioport_unmap(void __iomem *addr) +{ +} +EXPORT_SYMBOL(ioport_unmap); +#endif + +#ifdef CONFIG_PCI +void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) +{ + unsigned long start = pci_resource_start(dev, bar); + unsigned long len = pci_resource_len(dev, bar); + unsigned long flags = pci_resource_flags(dev, bar); + + if (!len || !start) + return NULL; + if (maxlen && len > maxlen) + len = maxlen; + if (flags & IORESOURCE_IO) + return ioport_map(start, len); + if (flags & IORESOURCE_MEM) { + if (flags & IORESOURCE_CACHEABLE) + return ioremap(start, len); + return ioremap_nocache(start, len); + } + return NULL; +} +EXPORT_SYMBOL(pci_iomap); + +void pci_iounmap(struct pci_dev *dev, void __iomem *addr) +{ + if ((unsigned long)addr >= VMALLOC_START && + (unsigned long)addr < VMALLOC_END) + iounmap(addr); +} +EXPORT_SYMBOL(pci_iounmap); +#endif diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c index c1f7180c7be..7691cfdba56 100644 --- a/arch/arm/mm/ioremap.c +++ b/arch/arm/mm/ioremap.c @@ -176,50 +176,3 @@ void __iounmap(void __iomem *addr) vunmap((void *)(PAGE_MASK & (unsigned long)addr)); } EXPORT_SYMBOL(__iounmap); - -#ifdef __io -void __iomem *ioport_map(unsigned long port, unsigned int nr) -{ - return __io(port); -} -EXPORT_SYMBOL(ioport_map); - -void ioport_unmap(void __iomem *addr) -{ -} -EXPORT_SYMBOL(ioport_unmap); -#endif - -#ifdef CONFIG_PCI -#include <linux/pci.h> -#include <linux/ioport.h> - -void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) -{ - unsigned long start = pci_resource_start(dev, bar); - unsigned long len = pci_resource_len(dev, bar); - unsigned long flags = pci_resource_flags(dev, bar); - - if (!len || !start) - return NULL; - if (maxlen && len > maxlen) - len = maxlen; - if (flags & IORESOURCE_IO) - return ioport_map(start, len); - if (flags & IORESOURCE_MEM) { - if (flags & IORESOURCE_CACHEABLE) - return ioremap(start, len); - return ioremap_nocache(start, len); - } - return NULL; -} -EXPORT_SYMBOL(pci_iomap); - -void pci_iounmap(struct pci_dev *dev, void __iomem *addr) -{ - if ((unsigned long)addr >= VMALLOC_START && - (unsigned long)addr < VMALLOC_END) - iounmap(addr); -} -EXPORT_SYMBOL(pci_iounmap); -#endif diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c new file mode 100644 index 00000000000..1464ed817b5 --- /dev/null +++ b/arch/arm/mm/nommu.c @@ -0,0 +1,39 @@ +/* + * linux/arch/arm/mm/nommu.c + * + * ARM uCLinux supporting functions. + */ +#include <linux/module.h> +#include <linux/mm.h> +#include <linux/pagemap.h> + +#include <asm/cacheflush.h> +#include <asm/io.h> +#include <asm/page.h> + +void flush_dcache_page(struct page *page) +{ + __cpuc_flush_dcache_page(page_address(page)); +} +EXPORT_SYMBOL(flush_dcache_page); + +void __iomem *__ioremap_pfn(unsigned long pfn, unsigned long offset, + size_t size, unsigned long flags) +{ + if (pfn >= (0x100000000ULL >> PAGE_SHIFT)) + return NULL; + return (void __iomem *) (offset + (pfn << PAGE_SHIFT)); +} +EXPORT_SYMBOL(__ioremap_pfn); + +void __iomem *__ioremap(unsigned long phys_addr, size_t size, + unsigned long flags) +{ + return (void __iomem *)phys_addr; +} +EXPORT_SYMBOL(__ioremap); + +void __iounmap(void __iomem *addr) +{ +} +EXPORT_SYMBOL(__iounmap); diff --git a/arch/arm/mm/proc-arm1020.S b/arch/arm/mm/proc-arm1020.S index 959588884fa..b9abbafca81 100644 --- a/arch/arm/mm/proc-arm1020.S +++ b/arch/arm/mm/proc-arm1020.S @@ -3,6 +3,7 @@ * * Copyright (C) 2000 ARM Limited * Copyright (C) 2000 Deep Blue Solutions Ltd. + * hacked for non-paged-MM by Hyok S. Choi, 2003. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -101,7 +102,9 @@ ENTRY(cpu_arm1020_reset) mov ip, #0 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches mcr p15, 0, ip, c7, c10, 4 @ drain WB +#ifdef CONFIG_MMU mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs +#endif mrc p15, 0, ip, c1, c0, 0 @ ctrl register bic ip, ip, #0x000f @ ............wcam bic ip, ip, #0x1100 @ ...i...s........ @@ -359,6 +362,7 @@ ENTRY(cpu_arm1020_dcache_clean_area) */ .align 5 ENTRY(cpu_arm1020_switch_mm) +#ifdef CONFIG_MMU #ifndef CONFIG_CPU_DCACHE_DISABLE mcr p15, 0, r3, c7, c10, 4 mov r1, #0xF @ 16 segments @@ -383,6 +387,7 @@ ENTRY(cpu_arm1020_switch_mm) mcr p15, 0, r1, c7, c10, 4 @ drain WB mcr p15, 0, r0, c2, c0, 0 @ load page table pointer mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs +#endif /* CONFIG_MMU */ mov pc, lr /* @@ -392,6 +397,7 @@ ENTRY(cpu_arm1020_switch_mm) */ .align 5 ENTRY(cpu_arm1020_set_pte) +#ifdef CONFIG_MMU str r1, [r0], #-2048 @ linux version eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY @@ -421,6 +427,7 @@ ENTRY(cpu_arm1020_set_pte) mcr p15, 0, r0, c7, c10, 1 @ clean D entry #endif mcr p15, 0, r0, c7, c10, 4 @ drain WB +#endif /* CONFIG_MMU */ mov pc, lr __INIT @@ -430,7 +437,9 @@ __arm1020_setup: mov r0, #0 mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer on v4 +#ifdef CONFIG_MMU mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs on v4 +#endif mrc p15, 0, r0, c1, c0 @ get control register v4 ldr r5, arm1020_cr1_clear bic r0, r0, r5 diff --git a/arch/arm/mm/proc-arm1020e.S b/arch/arm/mm/proc-arm1020e.S index be6d081ff2b..bcd5ee022e0 100644 --- a/arch/arm/mm/proc-arm1020e.S +++ b/arch/arm/mm/proc-arm1020e.S @@ -3,6 +3,7 @@ * * Copyright (C) 2000 ARM Limited * Copyright (C) 2000 Deep Blue Solutions Ltd. + * hacked for non-paged-MM by Hyok S. Choi, 2003. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -101,7 +102,9 @@ ENTRY(cpu_arm1020e_reset) mov ip, #0 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches mcr p15, 0, ip, c7, c10, 4 @ drain WB +#ifdef CONFIG_MMU mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs +#endif mrc p15, 0, ip, c1, c0, 0 @ ctrl register bic ip, ip, #0x000f @ ............wcam bic ip, ip, #0x1100 @ ...i...s........ @@ -344,6 +347,7 @@ ENTRY(cpu_arm1020e_dcache_clean_area) */ .align 5 ENTRY(cpu_arm1020e_switch_mm) +#ifdef CONFIG_MMU #ifndef CONFIG_CPU_DCACHE_DISABLE mcr p15, 0, r3, c7, c10, 4 mov r1, #0xF @ 16 segments @@ -367,6 +371,7 @@ ENTRY(cpu_arm1020e_switch_mm) mcr p15, 0, r1, c7, c10, 4 @ drain WB mcr p15, 0, r0, c2, c0, 0 @ load page table pointer mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs +#endif mov pc, lr /* @@ -376,6 +381,7 @@ ENTRY(cpu_arm1020e_switch_mm) */ .align 5 ENTRY(cpu_arm1020e_set_pte) +#ifdef CONFIG_MMU str r1, [r0], #-2048 @ linux version eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY @@ -403,6 +409,7 @@ ENTRY(cpu_arm1020e_set_pte) #ifndef CONFIG_CPU_DCACHE_DISABLE mcr p15, 0, r0, c7, c10, 1 @ clean D entry #endif +#endif /* CONFIG_MMU */ mov pc, lr __INIT @@ -412,7 +419,9 @@ __arm1020e_setup: mov r0, #0 mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer on v4 +#ifdef CONFIG_MMU mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs on v4 +#endif mrc p15, 0, r0, c1, c0 @ get control register v4 ldr r5, arm1020e_cr1_clear bic r0, r0, r5 diff --git a/arch/arm/mm/proc-arm1022.S b/arch/arm/mm/proc-arm1022.S index f778545d57a..b0ccff4fadd 100644 --- a/arch/arm/mm/proc-arm1022.S +++ b/arch/arm/mm/proc-arm1022.S @@ -3,6 +3,7 @@ * * Copyright (C) 2000 ARM Limited * Copyright (C) 2000 Deep Blue Solutions Ltd. + * hacked for non-paged-MM by Hyok S. Choi, 2003. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -90,7 +91,9 @@ ENTRY(cpu_arm1022_reset) mov ip, #0 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches mcr p15, 0, ip, c7, c10, 4 @ drain WB +#ifdef CONFIG_MMU mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs +#endif mrc p15, 0, ip, c1, c0, 0 @ ctrl register bic ip, ip, #0x000f @ ............wcam bic ip, ip, #0x1100 @ ...i...s........ @@ -333,6 +336,7 @@ ENTRY(cpu_arm1022_dcache_clean_area) */ .align 5 ENTRY(cpu_arm1022_switch_mm) +#ifdef CONFIG_MMU #ifndef CONFIG_CPU_DCACHE_DISABLE mov r1, #(CACHE_DSEGMENTS - 1) << 5 @ 16 segments 1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries @@ -349,6 +353,7 @@ ENTRY(cpu_arm1022_switch_mm) mcr p15, 0, r1, c7, c10, 4 @ drain WB mcr p15, 0, r0, c2, c0, 0 @ load page table pointer mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs +#endif mov pc, lr /* @@ -358,6 +363,7 @@ ENTRY(cpu_arm1022_switch_mm) */ .align 5 ENTRY(cpu_arm1022_set_pte) +#ifdef CONFIG_MMU str r1, [r0], #-2048 @ linux version eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY @@ -385,6 +391,7 @@ ENTRY(cpu_arm1022_set_pte) #ifndef CONFIG_CPU_DCACHE_DISABLE mcr p15, 0, r0, c7, c10, 1 @ clean D entry #endif +#endif /* CONFIG_MMU */ mov pc, lr __INIT @@ -394,7 +401,9 @@ __arm1022_setup: mov r0, #0 mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer on v4 +#ifdef CONFIG_MMU mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs on v4 +#endif mrc p15, 0, r0, c1, c0 @ get control register v4 ldr r5, arm1022_cr1_clear bic r0, r0, r5 diff --git a/arch/arm/mm/proc-arm1026.S b/arch/arm/mm/proc-arm1026.S index 148c111fde7..abe850c9a64 100644 --- a/arch/arm/mm/proc-arm1026.S +++ b/arch/arm/mm/proc-arm1026.S @@ -3,6 +3,7 @@ * * Copyright (C) 2000 ARM Limited * Copyright (C) 2000 Deep Blue Solutions Ltd. + * hacked for non-paged-MM by Hyok S. Choi, 2003. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -90,7 +91,9 @@ ENTRY(cpu_arm1026_reset) mov ip, #0 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches mcr p15, 0, ip, c7, c10, 4 @ drain WB +#ifdef CONFIG_MMU mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs +#endif mrc p15, 0, ip, c1, c0, 0 @ ctrl register bic ip, ip, #0x000f @ ............wcam bic ip, ip, #0x1100 @ ...i...s........ @@ -327,6 +330,7 @@ ENTRY(cpu_arm1026_dcache_clean_area) */ .align 5 ENTRY(cpu_arm1026_switch_mm) +#ifdef CONFIG_MMU mov r1, #0 #ifndef CONFIG_CPU_DCACHE_DISABLE 1: mrc p15, 0, r15, c7, c14, 3 @ test, clean, invalidate @@ -338,6 +342,7 @@ ENTRY(cpu_arm1026_switch_mm) mcr p15, 0, r1, c7, c10, 4 @ drain WB mcr p15, 0, r0, c2, c0, 0 @ load page table pointer mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs +#endif mov pc, lr /* @@ -347,6 +352,7 @@ ENTRY(cpu_arm1026_switch_mm) */ .align 5 ENTRY(cpu_arm1026_set_pte) +#ifdef CONFIG_MMU str r1, [r0], #-2048 @ linux version eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY @@ -374,6 +380,7 @@ ENTRY(cpu_arm1026_set_pte) #ifndef CONFIG_CPU_DCACHE_DISABLE mcr p15, 0, r0, c7, c10, 1 @ clean D entry #endif +#endif /* CONFIG_MMU */ mov pc, lr @@ -384,8 +391,10 @@ __arm1026_setup: mov r0, #0 mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer on v4 +#ifdef CONFIG_MMU mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs on v4 mcr p15, 0, r4, c2, c0 @ load page table pointer +#endif #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH mov r0, #4 @ explicitly disable writeback mcr p15, 7, r0, c15, c0, 0 diff --git a/arch/arm/mm/proc-arm6_7.S b/arch/arm/mm/proc-arm6_7.S index 540359b475d..7a705edfa4b 100644 --- a/arch/arm/mm/proc-arm6_7.S +++ b/arch/arm/mm/proc-arm6_7.S @@ -2,6 +2,7 @@ * linux/arch/arm/mm/proc-arm6,7.S * * Copyright (C) 1997-2000 Russell King + * hacked for non-paged-MM by Hyok S. Choi, 2003. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -199,10 +200,12 @@ ENTRY(cpu_arm7_do_idle) */ ENTRY(cpu_arm6_switch_mm) ENTRY(cpu_arm7_switch_mm) +#ifdef CONFIG_MMU mov r1, #0 mcr p15, 0, r1, c7, c0, 0 @ flush cache mcr p15, 0, r0, c2, c0, 0 @ update page table ptr mcr p15, 0, r1, c5, c0, 0 @ flush TLBs +#endif mov pc, lr /* @@ -214,6 +217,7 @@ ENTRY(cpu_arm7_switch_mm) .align 5 ENTRY(cpu_arm6_set_pte) ENTRY(cpu_arm7_set_pte) +#ifdef CONFIG_MMU str r1, [r0], #-2048 @ linux version eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY @@ -232,6 +236,7 @@ ENTRY(cpu_arm7_set_pte) movne r2, #0 str r2, [r0] @ hardware version +#endif /* CONFIG_MMU */ mov pc, lr /* @@ -243,7 +248,9 @@ ENTRY(cpu_arm6_reset) ENTRY(cpu_arm7_reset) mov r1, #0 mcr p15, 0, r1, c7, c0, 0 @ flush cache +#ifdef CONFIG_MMU mcr p15, 0, r1, c5, c0, 0 @ flush TLB +#endif mov r1, #0x30 mcr p15, 0, r1, c1, c0, 0 @ turn off MMU etc mov pc, r0 @@ -253,19 +260,27 @@ ENTRY(cpu_arm7_reset) .type __arm6_setup, #function __arm6_setup: mov r0, #0 mcr p15, 0, r0, c7, c0 @ flush caches on v3 +#ifdef CONFIG_MMU mcr p15, 0, r0, c5, c0 @ flush TLBs on v3 mov r0, #0x3d @ . ..RS BLDP WCAM orr r0, r0, #0x100 @ . ..01 0011 1101 +#else + mov r0, #0x3c @ . ..RS BLDP WCA. +#endif mov pc, lr .size __arm6_setup, . - __arm6_setup .type __arm7_setup, #function __arm7_setup: mov r0, #0 mcr p15, 0, r0, c7, c0 @ flush caches on v3 +#ifdef CONFIG_MMU mcr p15, 0, r0, c5, c0 @ flush TLBs on v3 mcr p15, 0, r0, c3, c0 @ load domain access register mov r0, #0x7d @ . ..RS BLDP WCAM orr r0, r0, #0x100 @ . ..01 0111 1101 +#else + mov r0, #0x7c @ . ..RS BLDP WCA. +#endif mov pc, lr .size __arm7_setup, . - __arm7_setup diff --git a/arch/arm/mm/proc-arm720.S b/arch/arm/mm/proc-arm720.S index 26f00ee2ad9..86102467d37 100644 --- a/arch/arm/mm/proc-arm720.S +++ b/arch/arm/mm/proc-arm720.S @@ -4,6 +4,7 @@ * Copyright (C) 2000 Steve Hill (sjhill@cotw.com) * Rob Scott (rscott@mtrob.fdns.net) * Copyright (C) 2000 ARM Limited, Deep Blue Solutions Ltd. + * hacked for non-paged-MM by Hyok S. Choi, 2004. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -29,6 +30,7 @@ * out of 'proc-arm6,7.S' per RMK discussion * 07-25-2000 SJH Added idle function. * 08-25-2000 DBS Updated for integration of ARM Ltd version. + * 04-20-2004 HSC modified for non-paged memory management mode. */ #include <linux/linkage.h> #include <linux/init.h> @@ -75,10 +77,12 @@ ENTRY(cpu_arm720_do_idle) * the new. */ ENTRY(cpu_arm720_switch_mm) +#ifdef CONFIG_MMU mov r1, #0 mcr p15, 0, r1, c7, c7, 0 @ invalidate cache mcr p15, 0, r0, c2, c0, 0 @ update page table ptr mcr p15, 0, r1, c8, c7, 0 @ flush TLB (v4) +#endif mov pc, lr /* @@ -89,6 +93,7 @@ ENTRY(cpu_arm720_switch_mm) */ .align 5 ENTRY(cpu_arm720_set_pte) +#ifdef CONFIG_MMU str r1, [r0], #-2048 @ linux version eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY @@ -107,6 +112,7 @@ ENTRY(cpu_arm720_set_pte) movne r2, #0 str r2, [r0] @ hardware version +#endif mov pc, lr /* @@ -117,7 +123,9 @@ ENTRY(cpu_arm720_set_pte) ENTRY(cpu_arm720_reset) mov ip, #0 mcr p15, 0, ip, c7, c7, 0 @ invalidate cache +#ifdef CONFIG_MMU mcr p15, 0, ip, c8, c7, 0 @ flush TLB (v4) +#endif mrc p15, 0, ip, c1, c0, 0 @ get ctrl register bic ip, ip, #0x000f @ ............wcam bic ip, ip, #0x2100 @ ..v....s........ @@ -130,7 +138,9 @@ ENTRY(cpu_arm720_reset) __arm710_setup: mov r0, #0 mcr p15, 0, r0, c7, c7, 0 @ invalidate caches +#ifdef CONFIG_MMU mcr p15, 0, r0, c8, c7, 0 @ flush TLB (v4) +#endif mrc p15, 0, r0, c1, c0 @ get control register ldr r5, arm710_cr1_clear bic r0, r0, r5 @@ -156,7 +166,9 @@ arm710_cr1_set: __arm720_setup: mov r0, #0 mcr p15, 0, r0, c7, c7, 0 @ invalidate caches +#ifdef CONFIG_MMU mcr p15, 0, r0, c8, c7, 0 @ flush TLB (v4) +#endif mrc p15, 0, r0, c1, c0 @ get control register ldr r5, arm720_cr1_clear bic r0, r0, r5 diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S index a17f79e0199..31dc839ba07 100644 --- a/arch/arm/mm/proc-arm920.S +++ b/arch/arm/mm/proc-arm920.S @@ -3,6 +3,7 @@ * * Copyright (C) 1999,2000 ARM Limited * Copyright (C) 2000 Deep Blue Solutions Ltd. + * hacked for non-paged-MM by Hyok S. Choi, 2003. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -97,7 +98,9 @@ ENTRY(cpu_arm920_reset) mov ip, #0 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches mcr p15, 0, ip, c7, c10, 4 @ drain WB +#ifdef CONFIG_MMU mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs +#endif mrc p15, 0, ip, c1, c0, 0 @ ctrl register bic ip, ip, #0x000f @ ............wcam bic ip, ip, #0x1100 @ ...i...s........ @@ -317,6 +320,7 @@ ENTRY(cpu_arm920_dcache_clean_area) */ .align 5 ENTRY(cpu_arm920_switch_mm) +#ifdef CONFIG_MMU mov ip, #0 #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache @@ -337,6 +341,7 @@ ENTRY(cpu_arm920_switch_mm) mcr p15, 0, ip, c7, c10, 4 @ drain WB mcr p15, 0, r0, c2, c0, 0 @ load page table pointer mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs +#endif mov pc, lr /* @@ -346,6 +351,7 @@ ENTRY(cpu_arm920_switch_mm) */ .align 5 ENTRY(cpu_arm920_set_pte) +#ifdef CONFIG_MMU str r1, [r0], #-2048 @ linux version eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY @@ -372,6 +378,7 @@ ENTRY(cpu_arm920_set_pte) mov r0, r0 mcr p15, 0, r0, c7, c10, 1 @ clean D entry mcr p15, 0, r0, c7, c10, 4 @ drain WB +#endif /* CONFIG_MMU */ mov pc, lr __INIT @@ -381,7 +388,9 @@ __arm920_setup: mov r0, #0 mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer on v4 +#ifdef CONFIG_MMU mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs on v4 +#endif mrc p15, 0, r0, c1, c0 @ get control register v4 ldr r5, arm920_cr1_clear bic r0, r0, r5 diff --git a/arch/arm/mm/proc-arm922.S b/arch/arm/mm/proc-arm922.S index bbde4a024a4..9e57c34f5c0 100644 --- a/arch/arm/mm/proc-arm922.S +++ b/arch/arm/mm/proc-arm922.S @@ -4,6 +4,7 @@ * Copyright (C) 1999,2000 ARM Limited * Copyright (C) 2000 Deep Blue Solutions Ltd. * Copyright (C) 2001 Altera Corporation + * hacked for non-paged-MM by Hyok S. Choi, 2003. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -99,7 +100,9 @@ ENTRY(cpu_arm922_reset) mov ip, #0 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches mcr p15, 0, ip, c7, c10, 4 @ drain WB +#ifdef CONFIG_MMU mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs +#endif mrc p15, 0, ip, c1, c0, 0 @ ctrl register bic ip, ip, #0x000f @ ............wcam bic ip, ip, #0x1100 @ ...i...s........ @@ -321,6 +324,7 @@ ENTRY(cpu_arm922_dcache_clean_area) */ .align 5 ENTRY(cpu_arm922_switch_mm) +#ifdef CONFIG_MMU mov ip, #0 #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache @@ -341,6 +345,7 @@ ENTRY(cpu_arm922_switch_mm) mcr p15, 0, ip, c7, c10, 4 @ drain WB mcr p15, 0, r0, c2, c0, 0 @ load page table pointer mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs +#endif mov pc, lr /* @@ -350,6 +355,7 @@ ENTRY(cpu_arm922_switch_mm) */ .align 5 ENTRY(cpu_arm922_set_pte) +#ifdef CONFIG_MMU str r1, [r0], #-2048 @ linux version eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY @@ -376,6 +382,7 @@ ENTRY(cpu_arm922_set_pte) mov r0, r0 mcr p15, 0, r0, c7, c10, 1 @ clean D entry mcr p15, 0, r0, c7, c10, 4 @ drain WB +#endif /* CONFIG_MMU */ mov pc, lr __INIT @@ -385,7 +392,9 @@ __arm922_setup: mov r0, #0 mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer on v4 +#ifdef CONFIG_MMU mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs on v4 +#endif mrc p15, 0, r0, c1, c0 @ get control register v4 ldr r5, arm922_cr1_clear bic r0, r0, r5 diff --git a/arch/arm/mm/proc-arm925.S b/arch/arm/mm/proc-arm925.S index 224ce226a01..8d47c9f3f93 100644 --- a/arch/arm/mm/proc-arm925.S +++ b/arch/arm/mm/proc-arm925.S @@ -9,6 +9,8 @@ * Update for Linux-2.6 and cache flush improvements * Copyright (C) 2004 Nokia Corporation by Tony Lindgren <tony@atomide.com> * + * hacked for non-paged-MM by Hyok S. Choi, 2004. + * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or @@ -122,7 +124,9 @@ ENTRY(cpu_arm925_reset) mov ip, #0 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches mcr p15, 0, ip, c7, c10, 4 @ drain WB +#ifdef CONFIG_MMU mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs +#endif mrc p15, 0, ip, c1, c0, 0 @ ctrl register bic ip, ip, #0x000f @ ............wcam bic ip, ip, #0x1100 @ ...i...s........ @@ -369,6 +373,7 @@ ENTRY(cpu_arm925_dcache_clean_area) */ .align 5 ENTRY(cpu_arm925_switch_mm) +#ifdef CONFIG_MMU mov ip, #0 #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache @@ -383,6 +388,7 @@ ENTRY(cpu_arm925_switch_mm) mcr p15, 0, ip, c7, c10, 4 @ drain WB mcr p15, 0, r0, c2, c0, 0 @ load page table pointer mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs +#endif mov pc, lr /* @@ -392,6 +398,7 @@ ENTRY(cpu_arm925_switch_mm) */ .align 5 ENTRY(cpu_arm925_set_pte) +#ifdef CONFIG_MMU str r1, [r0], #-2048 @ linux version eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY @@ -420,6 +427,7 @@ ENTRY(cpu_arm925_set_pte) mcr p15, 0, r0, c7, c10, 1 @ clean D entry #endif mcr p15, 0, r0, c7, c10, 4 @ drain WB +#endif /* CONFIG_MMU */ mov pc, lr __INIT @@ -438,7 +446,9 @@ __arm925_setup: mov r0, #0 mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer on v4 +#ifdef CONFIG_MMU mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs on v4 +#endif #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH mov r0, #4 @ disable write-back on caches explicitly diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S index 4e2a087cf38..cb4d8f33d2a 100644 --- a/arch/arm/mm/proc-arm926.S +++ b/arch/arm/mm/proc-arm926.S @@ -3,6 +3,7 @@ * * Copyright (C) 1999-2001 ARM Limited * Copyright (C) 2000 Deep Blue Solutions Ltd. + * hacked for non-paged-MM by Hyok S. Choi, 2003. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -85,7 +86,9 @@ ENTRY(cpu_arm926_reset) mov ip, #0 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches mcr p15, 0, ip, c7, c10, 4 @ drain WB +#ifdef CONFIG_MMU mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs +#endif mrc p15, 0, ip, c1, c0, 0 @ ctrl register bic ip, ip, #0x000f @ ............wcam bic ip, ip, #0x1100 @ ...i...s........ @@ -329,6 +332,7 @@ ENTRY(cpu_arm926_dcache_clean_area) */ .align 5 ENTRY(cpu_arm926_switch_mm) +#ifdef CONFIG_MMU mov ip, #0 #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache @@ -341,6 +345,7 @@ ENTRY(cpu_arm926_switch_mm) mcr p15, 0, ip, c7, c10, 4 @ drain WB mcr p15, 0, r0, c2, c0, 0 @ load page table pointer mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs +#endif mov pc, lr /* @@ -350,6 +355,7 @@ ENTRY(cpu_arm926_switch_mm) */ .align 5 ENTRY(cpu_arm926_set_pte) +#ifdef CONFIG_MMU str r1, [r0], #-2048 @ linux version eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY @@ -378,6 +384,7 @@ ENTRY(cpu_arm926_set_pte) mcr p15, 0, r0, c7, c10, 1 @ clean D entry #endif mcr p15, 0, r0, c7, c10, 4 @ drain WB +#endif mov pc, lr __INIT @@ -387,7 +394,9 @@ __arm926_setup: mov r0, #0 mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer on v4 +#ifdef CONFIG_MMU mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs on v4 +#endif #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH diff --git a/arch/arm/mm/proc-sa110.S b/arch/arm/mm/proc-sa110.S index a2dd5ae1077..5a760a2c629 100644 --- a/arch/arm/mm/proc-sa110.S +++ b/arch/arm/mm/proc-sa110.S @@ -2,6 +2,7 @@ * linux/arch/arm/mm/proc-sa110.S * * Copyright (C) 1997-2002 Russell King + * hacked for non-paged-MM by Hyok S. Choi, 2003. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -67,7 +68,9 @@ ENTRY(cpu_sa110_reset) mov ip, #0 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches mcr p15, 0, ip, c7, c10, 4 @ drain WB +#ifdef CONFIG_MMU mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs +#endif mrc p15, 0, ip, c1, c0, 0 @ ctrl register bic ip, ip, #0x000f @ ............wcam bic ip, ip, #0x1100 @ ...i...s........ @@ -130,11 +133,15 @@ ENTRY(cpu_sa110_dcache_clean_area) */ .align 5 ENTRY(cpu_sa110_switch_mm) +#ifdef CONFIG_MMU str lr, [sp, #-4]! bl v4wb_flush_kern_cache_all @ clears IP mcr p15, 0, r0, c2, c0, 0 @ load page table pointer mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs ldr pc, [sp], #4 +#else + mov pc, lr +#endif /* * cpu_sa110_set_pte(ptep, pte) @@ -143,6 +150,7 @@ ENTRY(cpu_sa110_switch_mm) */ .align 5 ENTRY(cpu_sa110_set_pte) +#ifdef CONFIG_MMU str r1, [r0], #-2048 @ linux version eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY @@ -164,6 +172,7 @@ ENTRY(cpu_sa110_set_pte) mov r0, r0 mcr p15, 0, r0, c7, c10, 1 @ clean D entry mcr p15, 0, r0, c7, c10, 4 @ drain WB +#endif mov pc, lr __INIT @@ -173,7 +182,9 @@ __sa110_setup: mov r10, #0 mcr p15, 0, r10, c7, c7 @ invalidate I,D caches on v4 mcr p15, 0, r10, c7, c10, 4 @ drain write buffer on v4 +#ifdef CONFIG_MMU mcr p15, 0, r10, c8, c7 @ invalidate I,D TLBs on v4 +#endif mrc p15, 0, r0, c1, c0 @ get control register v4 ldr r5, sa110_cr1_clear bic r0, r0, r5 diff --git a/arch/arm/mm/proc-sa1100.S b/arch/arm/mm/proc-sa1100.S index 777ad99c143..0a2107ad4c3 100644 --- a/arch/arm/mm/proc-sa1100.S +++ b/arch/arm/mm/proc-sa1100.S @@ -2,6 +2,7 @@ * linux/arch/arm/mm/proc-sa1100.S * * Copyright (C) 1997-2002 Russell King + * hacked for non-paged-MM by Hyok S. Choi, 2003. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -77,7 +78,9 @@ ENTRY(cpu_sa1100_reset) mov ip, #0 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches mcr p15, 0, ip, c7, c10, 4 @ drain WB +#ifdef CONFIG_MMU mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs +#endif mrc p15, 0, ip, c1, c0, 0 @ ctrl register bic ip, ip, #0x000f @ ............wcam bic ip, ip, #0x1100 @ ...i...s........ @@ -142,12 +145,16 @@ ENTRY(cpu_sa1100_dcache_clean_area) */ .align 5 ENTRY(cpu_sa1100_switch_mm) +#ifdef CONFIG_MMU str lr, [sp, #-4]! bl v4wb_flush_kern_cache_all @ clears IP mcr p15, 0, ip, c9, c0, 0 @ invalidate RB mcr p15, 0, r0, c2, c0, 0 @ load page table pointer mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs ldr pc, [sp], #4 +#else + mov pc, lr +#endif /* * cpu_sa1100_set_pte(ptep, pte) @@ -156,6 +163,7 @@ ENTRY(cpu_sa1100_switch_mm) */ .align 5 ENTRY(cpu_sa1100_set_pte) +#ifdef CONFIG_MMU str r1, [r0], #-2048 @ linux version eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY @@ -177,6 +185,7 @@ ENTRY(cpu_sa1100_set_pte) mov r0, r0 mcr p15, 0, r0, c7, c10, 1 @ clean D entry mcr p15, 0, r0, c7, c10, 4 @ drain WB +#endif mov pc, lr __INIT @@ -186,7 +195,9 @@ __sa1100_setup: mov r0, #0 mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer on v4 +#ifdef CONFIG_MMU mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs on v4 +#endif mrc p15, 0, r0, c1, c0 @ get control register v4 ldr r5, sa1100_cr1_clear bic r0, r0, r5 diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S index 09b1a41a6de..ca13d4d05f6 100644 --- a/arch/arm/mm/proc-v6.S +++ b/arch/arm/mm/proc-v6.S @@ -2,6 +2,7 @@ * linux/arch/arm/mm/proc-v6.S * * Copyright (C) 2001 Deep Blue Solutions Ltd. + * Modified by Catalin Marinas for noMMU support * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -88,6 +89,7 @@ ENTRY(cpu_v6_dcache_clean_area) * - we are not using split page tables */ ENTRY(cpu_v6_switch_mm) +#ifdef CONFIG_MMU mov r2, #0 ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id #ifdef CONFIG_SMP @@ -97,6 +99,7 @@ ENTRY(cpu_v6_switch_mm) mcr p15, 0, r2, c7, c10, 4 @ drain write buffer mcr p15, 0, r0, c2, c0, 0 @ set TTB 0 mcr p15, 0, r1, c13, c0, 1 @ set context ID +#endif mov pc, lr /* @@ -119,6 +122,7 @@ ENTRY(cpu_v6_switch_mm) * 1111 0 1 1 r/w r/w */ ENTRY(cpu_v6_set_pte) +#ifdef CONFIG_MMU str r1, [r0], #-2048 @ linux version bic r2, r1, #0x000003f0 @@ -145,6 +149,7 @@ ENTRY(cpu_v6_set_pte) str r2, [r0] mcr p15, 0, r0, c7, c10, 1 @ flush_pte +#endif mov pc, lr @@ -194,12 +199,14 @@ __v6_setup: mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache mcr p15, 0, r0, c7, c15, 0 @ clean+invalidate cache mcr p15, 0, r0, c7, c10, 4 @ drain write buffer +#ifdef CONFIG_MMU mcr p15, 0, r0, c8, c7, 0 @ invalidate I + D TLBs mcr p15, 0, r0, c2, c0, 2 @ TTB control register #ifdef CONFIG_SMP orr r4, r4, #TTB_RGN_WBWA|TTB_S @ mark PTWs shared, outer cacheable #endif mcr p15, 0, r4, c2, c0, 1 @ load TTB1 +#endif /* CONFIG_MMU */ #ifdef CONFIG_VFP mrc p15, 0, r0, c1, c0, 2 orr r0, r0, #(0xf << 20) |