diff options
author | David S. Miller <davem@davemloft.net> | 2006-01-31 18:34:51 -0800 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2006-03-20 01:11:29 -0800 |
commit | 86b818687d4894063ecd1190e54717a0cce8c009 (patch) | |
tree | d2951295358502c88f7fe0c02517d729cff4eb9a /arch | |
parent | 9954863975910a1b9372b7d5006a6cba43bdd288 (diff) |
[SPARC64]: Fix race in LOAD_PER_CPU_BASE()
Since we use %g5 itself as a temporary, it can get clobbered
if we take an interrupt mid-stream and thus cause end up with
the final %g5 value too early as a result of rtrap processing.
Set %g5 at the very end, atomically, to avoid this problem.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/sparc64/kernel/etrap.S | 4 | ||||
-rw-r--r-- | arch/sparc64/kernel/rtrap.S | 2 | ||||
-rw-r--r-- | arch/sparc64/kernel/winfixup.S | 6 |
3 files changed, 6 insertions, 6 deletions
diff --git a/arch/sparc64/kernel/etrap.S b/arch/sparc64/kernel/etrap.S index db768101729..d974d18b15b 100644 --- a/arch/sparc64/kernel/etrap.S +++ b/arch/sparc64/kernel/etrap.S @@ -100,7 +100,7 @@ etrap_irq: stx %i7, [%sp + PTREGS_OFF + PT_V9_I7] wrpr %g0, ETRAP_PSTATE2, %pstate mov %l6, %g6 - LOAD_PER_CPU_BASE(%g4, %g3) + LOAD_PER_CPU_BASE(%g4, %g3, %l1) jmpl %l2 + 0x4, %g0 ldx [%g6 + TI_TASK], %g4 @@ -250,7 +250,7 @@ scetrap: stx %i6, [%sp + PTREGS_OFF + PT_V9_I6] mov %l6, %g6 stx %i7, [%sp + PTREGS_OFF + PT_V9_I7] - LOAD_PER_CPU_BASE(%g4, %g3) + LOAD_PER_CPU_BASE(%g4, %g3, %l1) ldx [%g6 + TI_TASK], %g4 done diff --git a/arch/sparc64/kernel/rtrap.S b/arch/sparc64/kernel/rtrap.S index 89794ebdcbc..64bc03610bc 100644 --- a/arch/sparc64/kernel/rtrap.S +++ b/arch/sparc64/kernel/rtrap.S @@ -226,7 +226,7 @@ rt_continue: ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1 brz,pt %l3, 1f nop /* Must do this before thread reg is clobbered below. */ - LOAD_PER_CPU_BASE(%g6, %g7) + LOAD_PER_CPU_BASE(%i0, %i1, %i2) 1: ldx [%sp + PTREGS_OFF + PT_V9_G6], %g6 ldx [%sp + PTREGS_OFF + PT_V9_G7], %g7 diff --git a/arch/sparc64/kernel/winfixup.S b/arch/sparc64/kernel/winfixup.S index c0545d089c9..ade991b7d07 100644 --- a/arch/sparc64/kernel/winfixup.S +++ b/arch/sparc64/kernel/winfixup.S @@ -86,7 +86,7 @@ fill_fixup: wrpr %l1, (PSTATE_IE | PSTATE_AG | PSTATE_RMO), %pstate mov %o7, %g6 ldx [%g6 + TI_TASK], %g4 - LOAD_PER_CPU_BASE(%g1, %g2) + LOAD_PER_CPU_BASE(%g1, %g2, %g3) /* This is the same as below, except we handle this a bit special * since we must preserve %l5 and %l6, see comment above. @@ -209,7 +209,7 @@ fill_fixup_mna: wrpr %l1, (PSTATE_IE | PSTATE_AG | PSTATE_RMO), %pstate mov %o7, %g6 ! Get current back. ldx [%g6 + TI_TASK], %g4 ! Finish it. - LOAD_PER_CPU_BASE(%g1, %g2) + LOAD_PER_CPU_BASE(%g1, %g2, %g3) call mem_address_unaligned add %sp, PTREGS_OFF, %o0 @@ -312,7 +312,7 @@ fill_fixup_dax: wrpr %l1, (PSTATE_IE | PSTATE_AG | PSTATE_RMO), %pstate mov %o7, %g6 ! Get current back. ldx [%g6 + TI_TASK], %g4 ! Finish it. - LOAD_PER_CPU_BASE(%g1, %g2) + LOAD_PER_CPU_BASE(%g1, %g2, %g3) call spitfire_data_access_exception add %sp, PTREGS_OFF, %o0 |