From e31c243f984628d02f045dc4b622f1e2827860dc Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 10 Apr 2008 16:10:55 +0100 Subject: FRV: Add support for emulation of userspace atomic ops [try #2] Use traps 120-126 to emulate atomic cmpxchg32, xchg32, and XOR-, OR-, AND-, SUB- and ADD-to-memory operations for userspace. Signed-off-by: David Howells Signed-off-by: Linus Torvalds --- arch/frv/kernel/entry-table.S | 8 +- arch/frv/kernel/entry.S | 20 ++++ arch/frv/kernel/traps.c | 227 ++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 254 insertions(+), 1 deletion(-) (limited to 'arch/frv') diff --git a/arch/frv/kernel/entry-table.S b/arch/frv/kernel/entry-table.S index d3b9253d862..bf35f33e48c 100644 --- a/arch/frv/kernel/entry-table.S +++ b/arch/frv/kernel/entry-table.S @@ -316,8 +316,14 @@ __trap_fixup_kernel_data_tlb_miss: .section .trap.vector .org TBR_TT_TRAP0 >> 2 .long system_call - .rept 126 + .rept 119 .long __entry_unsupported_trap .endr + + # userspace atomic op emulation, traps 120-126 + .rept 7 + .long __entry_atomic_op + .endr + .org TBR_TT_BREAK >> 2 .long __entry_debug_exception diff --git a/arch/frv/kernel/entry.S b/arch/frv/kernel/entry.S index f36d7f4a7c2..b8a4b94779b 100644 --- a/arch/frv/kernel/entry.S +++ b/arch/frv/kernel/entry.S @@ -654,6 +654,26 @@ __entry_debug_exception: movgs gr4,psr jmpl @(gr5,gr0) ; call ill_insn(esfr1,epcr0,esr0) +############################################################################### +# +# handle atomic operation emulation for userspace +# +############################################################################### + .globl __entry_atomic_op +__entry_atomic_op: + LEDS 0x6012 + sethi.p %hi(atomic_operation),gr5 + setlo %lo(atomic_operation),gr5 + movsg esfr1,gr8 + movsg epcr0,gr9 + movsg esr0,gr10 + + # now that we've accessed the exception regs, we can enable exceptions + movsg psr,gr4 + ori gr4,#PSR_ET,gr4 + movgs gr4,psr + jmpl @(gr5,gr0) ; call atomic_operation(esfr1,epcr0,esr0) + ############################################################################### # # handle media exception diff --git a/arch/frv/kernel/traps.c b/arch/frv/kernel/traps.c index 2e6098c8557..2f7e66877f3 100644 --- a/arch/frv/kernel/traps.c +++ b/arch/frv/kernel/traps.c @@ -100,6 +100,233 @@ asmlinkage void illegal_instruction(unsigned long esfr1, unsigned long epcr0, un force_sig_info(info.si_signo, &info, current); } /* end illegal_instruction() */ +/*****************************************************************************/ +/* + * handle atomic operations with errors + * - arguments in gr8, gr9, gr10 + * - original memory value placed in gr5 + * - replacement memory value placed in gr9 + */ +asmlinkage void atomic_operation(unsigned long esfr1, unsigned long epcr0, + unsigned long esr0) +{ + static DEFINE_SPINLOCK(atomic_op_lock); + unsigned long x, y, z, *p; + mm_segment_t oldfs; + siginfo_t info; + int ret; + + y = 0; + z = 0; + + oldfs = get_fs(); + if (!user_mode(__frame)) + set_fs(KERNEL_DS); + + switch (__frame->tbr & TBR_TT) { + /* TIRA gr0,#120 + * u32 __atomic_user_cmpxchg32(u32 *ptr, u32 test, u32 new) + */ + case TBR_TT_ATOMIC_CMPXCHG32: + p = (unsigned long *) __frame->gr8; + x = __frame->gr9; + y = __frame->gr10; + + for (;;) { + ret = get_user(z, p); + if (ret < 0) + goto error; + + if (z != x) + goto done; + + spin_lock_irq(&atomic_op_lock); + + if (__get_user(z, p) == 0) { + if (z != x) + goto done2; + + if (__put_user(y, p) == 0) + goto done2; + goto error2; + } + + spin_unlock_irq(&atomic_op_lock); + } + + /* TIRA gr0,#121 + * u32 __atomic_kernel_xchg32(void *v, u32 new) + */ + case TBR_TT_ATOMIC_XCHG32: + p = (unsigned long *) __frame->gr8; + y = __frame->gr9; + + for (;;) { + ret = get_user(z, p); + if (ret < 0) + goto error; + + spin_lock_irq(&atomic_op_lock); + + if (__get_user(z, p) == 0) { + if (__put_user(y, p) == 0) + goto done2; + goto error2; + } + + spin_unlock_irq(&atomic_op_lock); + } + + /* TIRA gr0,#122 + * ulong __atomic_kernel_XOR_return(ulong i, ulong *v) + */ + case TBR_TT_ATOMIC_XOR: + p = (unsigned long *) __frame->gr8; + x = __frame->gr9; + + for (;;) { + ret = get_user(z, p); + if (ret < 0) + goto error; + + spin_lock_irq(&atomic_op_lock); + + if (__get_user(z, p) == 0) { + y = x ^ z; + if (__put_user(y, p) == 0) + goto done2; + goto error2; + } + + spin_unlock_irq(&atomic_op_lock); + } + + /* TIRA gr0,#123 + * ulong __atomic_kernel_OR_return(ulong i, ulong *v) + */ + case TBR_TT_ATOMIC_OR: + p = (unsigned long *) __frame->gr8; + x = __frame->gr9; + + for (;;) { + ret = get_user(z, p); + if (ret < 0) + goto error; + + spin_lock_irq(&atomic_op_lock); + + if (__get_user(z, p) == 0) { + y = x ^ z; + if (__put_user(y, p) == 0) + goto done2; + goto error2; + } + + spin_unlock_irq(&atomic_op_lock); + } + + /* TIRA gr0,#124 + * ulong __atomic_kernel_AND_return(ulong i, ulong *v) + */ + case TBR_TT_ATOMIC_AND: + p = (unsigned long *) __frame->gr8; + x = __frame->gr9; + + for (;;) { + ret = get_user(z, p); + if (ret < 0) + goto error; + + spin_lock_irq(&atomic_op_lock); + + if (__get_user(z, p) == 0) { + y = x & z; + if (__put_user(y, p) == 0) + goto done2; + goto error2; + } + + spin_unlock_irq(&atomic_op_lock); + } + + /* TIRA gr0,#125 + * int __atomic_user_sub_return(atomic_t *v, int i) + */ + case TBR_TT_ATOMIC_SUB: + p = (unsigned long *) __frame->gr8; + x = __frame->gr9; + + for (;;) { + ret = get_user(z, p); + if (ret < 0) + goto error; + + spin_lock_irq(&atomic_op_lock); + + if (__get_user(z, p) == 0) { + y = z - x; + if (__put_user(y, p) == 0) + goto done2; + goto error2; + } + + spin_unlock_irq(&atomic_op_lock); + } + + /* TIRA gr0,#126 + * int __atomic_user_add_return(atomic_t *v, int i) + */ + case TBR_TT_ATOMIC_ADD: + p = (unsigned long *) __frame->gr8; + x = __frame->gr9; + + for (;;) { + ret = get_user(z, p); + if (ret < 0) + goto error; + + spin_lock_irq(&atomic_op_lock); + + if (__get_user(z, p) == 0) { + y = z + x; + if (__put_user(y, p) == 0) + goto done2; + goto error2; + } + + spin_unlock_irq(&atomic_op_lock); + } + + default: + BUG(); + } + +done2: + spin_unlock_irq(&atomic_op_lock); +done: + if (!user_mode(__frame)) + set_fs(oldfs); + __frame->gr5 = z; + __frame->gr9 = y; + return; + +error2: + spin_unlock_irq(&atomic_op_lock); +error: + if (!user_mode(__frame)) + set_fs(oldfs); + __frame->pc -= 4; + + die_if_kernel("-- Atomic Op Error --\n"); + + info.si_signo = SIGSEGV; + info.si_code = SEGV_ACCERR; + info.si_errno = 0; + info.si_addr = (void *) __frame->pc; + + force_sig_info(info.si_signo, &info, current); +} + /*****************************************************************************/ /* * -- cgit v1.2.3 From ed9b949f55bc8a6bb6083ce0eddb53d06aee302a Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 10 Apr 2008 16:11:00 +0100 Subject: FRV: Make NOMMU-mode work with base addresses other than 0xC0000000 [try #2] Make NOMMU-mode work with base addresses other than 0xC0000000 by: (1) Giving the code that sets up the protection registers the right address in __sdram_base. Rather than being hard coded to 0xC0000000, the value of __page_offset is obtained from the linker script. (2) Eliminate the check in __switch_to() that verifies the current thread info is in the 0xCxxxxxxx region. Signed-off-by: David Howells Signed-off-by: Linus Torvalds --- arch/frv/kernel/head.inc | 2 +- arch/frv/kernel/switch_to.S | 7 ------- 2 files changed, 1 insertion(+), 8 deletions(-) (limited to 'arch/frv') diff --git a/arch/frv/kernel/head.inc b/arch/frv/kernel/head.inc index d424cd2eb21..bff66628b99 100644 --- a/arch/frv/kernel/head.inc +++ b/arch/frv/kernel/head.inc @@ -46,5 +46,5 @@ #ifdef CONFIG_MMU __sdram_base = 0x00000000 /* base address to which SDRAM relocated */ #else -__sdram_base = 0xc0000000 /* base address to which SDRAM relocated */ +__sdram_base = __page_offset /* base address to which SDRAM relocated */ #endif diff --git a/arch/frv/kernel/switch_to.S b/arch/frv/kernel/switch_to.S index b5275fa9cd0..b06668670fc 100644 --- a/arch/frv/kernel/switch_to.S +++ b/arch/frv/kernel/switch_to.S @@ -102,13 +102,6 @@ __switch_to: movgs gr14,lr bar - srli gr15,#28,gr5 - subicc gr5,#0xc,gr0,icc0 - beq icc0,#0,111f - break - nop -111: - # jump to __switch_back or ret_from_fork as appropriate # - move prev to GR8 movgs gr4,psr -- cgit v1.2.3