diff options
author | Hirokazu Takata <takata@linux-m32r.org> | 2006-02-20 18:28:17 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-02-20 20:00:12 -0800 |
commit | cf535ea52e68e3ee6f4a90cc383faa1ee857f14d (patch) | |
tree | 1217d6b27e99c81b4e9cf003ddd4f71a93ad0c2b /arch | |
parent | b04ec261bd64f927bf3fce5cf9eeb0225557939d (diff) |
[PATCH] m32r: update sys_tas() routine
This patch updates and fixes sys_tas() routine for m32r.
In the previous implementation, a lockup rarely caused at sys_tas()
routine in SMP environment.
> > The problem is that touching *addr will generate an oops if that page isn't
> > paged in. If we convert it to use get_user() then that's an improvement,
> > but we must not run get_user() under spinlock or local_irq_disable().
I rewrote sys_tas() routine by using "lock -> unlock" instructions, and
utilizing the m32r's interrupt handling characteristics; the m32r processor
can accept interrupts only at the 32-bit instruction boundary. So, the
"unlock" instruction can be executed continuously after the "lock"
instruction execution without any interruptions.
In addition, to solve such a page_fault problem, I use a fixup code like
get_user().
And, as for the kernel lockup problem, we found that a calling
do_page_fault() routine with disabling interrupts might cause a lockup at
flush_tlb_others(), because we checked a completion of IPI handler's
operations in a spin-locked critical section.
Therefore, by using "lock -> unlock" code, we can implement the sys_tas()
rouitine without disabling interrupts explicitly, then no lockups would
happen at flush_tlb_others(), I hope.
Compile check and some working test in SMP environment have done.
Signed-off-by: Hirokazu Takata <takata@linux-m32r.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/m32r/kernel/sys_m32r.c | 61 |
1 files changed, 34 insertions, 27 deletions
diff --git a/arch/m32r/kernel/sys_m32r.c b/arch/m32r/kernel/sys_m32r.c index fe55b28d372..670cb49210a 100644 --- a/arch/m32r/kernel/sys_m32r.c +++ b/arch/m32r/kernel/sys_m32r.c @@ -29,28 +29,7 @@ /* * sys_tas() - test-and-set - * linuxthreads testing version */ -#ifndef CONFIG_SMP -asmlinkage int sys_tas(int *addr) -{ - int oldval; - unsigned long flags; - - if (!access_ok(VERIFY_WRITE, addr, sizeof (int))) - return -EFAULT; - local_irq_save(flags); - oldval = *addr; - if (!oldval) - *addr = 1; - local_irq_restore(flags); - return oldval; -} -#else /* CONFIG_SMP */ -#include <linux/spinlock.h> - -static DEFINE_SPINLOCK(tas_lock); - asmlinkage int sys_tas(int *addr) { int oldval; @@ -58,15 +37,43 @@ asmlinkage int sys_tas(int *addr) if (!access_ok(VERIFY_WRITE, addr, sizeof (int))) return -EFAULT; - _raw_spin_lock(&tas_lock); - oldval = *addr; - if (!oldval) - *addr = 1; - _raw_spin_unlock(&tas_lock); + /* atomic operation: + * oldval = *addr; *addr = 1; + */ + __asm__ __volatile__ ( + DCACHE_CLEAR("%0", "r4", "%1") + " .fillinsn\n" + "1:\n" + " lock %0, @%1 -> unlock %2, @%1\n" + "2:\n" + /* NOTE: + * The m32r processor can accept interrupts only + * at the 32-bit instruction boundary. + * So, in the above code, the "unlock" instruction + * can be executed continuously after the "lock" + * instruction execution without any interruptions. + */ + ".section .fixup,\"ax\"\n" + " .balign 4\n" + "3: ldi %0, #%3\n" + " seth r14, #high(2b)\n" + " or3 r14, r14, #low(2b)\n" + " jmp r14\n" + ".previous\n" + ".section __ex_table,\"a\"\n" + " .balign 4\n" + " .long 1b,3b\n" + ".previous\n" + : "=&r" (oldval) + : "r" (addr), "r" (1), "i"(-EFAULT) + : "r14", "memory" +#ifdef CONFIG_CHIP_M32700_TS1 + , "r4" +#endif /* CONFIG_CHIP_M32700_TS1 */ + ); return oldval; } -#endif /* CONFIG_SMP */ /* * sys_pipe() is the normal C calling standard for creating |