aboutsummaryrefslogtreecommitdiff
path: root/arch/sparc64/kernel/tsb.S
diff options
context:
space:
mode:
authorDavid S. Miller <davem@sunset.davemloft.net>2006-02-01 15:55:21 -0800
committerDavid S. Miller <davem@sunset.davemloft.net>2006-03-20 01:11:32 -0800
commit517af33237ecfc3c8a93b335365fa61e741ceca4 (patch)
tree58eff40eb4c517c4fd49fd347d38273ee1e1ee4b /arch/sparc64/kernel/tsb.S
parentb0fd4e49aea8a460afab7bc67cd618e2d19291d4 (diff)
[SPARC64]: Access TSB with physical addresses when possible.
This way we don't need to lock the TSB into the TLB. The trick is that every TSB load/store is registered into a special instruction patch section. The default uses virtual addresses, and the patch instructions use physical address load/stores. We can't do this on all chips because only cheetah+ and later have the physical variant of the atomic quad load. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc64/kernel/tsb.S')
-rw-r--r--arch/sparc64/kernel/tsb.S35
1 files changed, 30 insertions, 5 deletions
diff --git a/arch/sparc64/kernel/tsb.S b/arch/sparc64/kernel/tsb.S
index e1dd37f5e53..ff6a79beb98 100644
--- a/arch/sparc64/kernel/tsb.S
+++ b/arch/sparc64/kernel/tsb.S
@@ -53,7 +53,7 @@ tsb_reload:
/* Load and check PTE. */
ldxa [%g5] ASI_PHYS_USE_EC, %g5
brgez,a,pn %g5, tsb_do_fault
- stx %g0, [%g1]
+ TSB_STORE(%g1, %g0)
/* If it is larger than the base page size, don't
* bother putting it into the TSB.
@@ -64,7 +64,7 @@ tsb_reload:
and %g2, %g4, %g2
cmp %g2, %g7
bne,a,pn %xcc, tsb_tlb_reload
- stx %g0, [%g1]
+ TSB_STORE(%g1, %g0)
TSB_WRITE(%g1, %g5, %g6)
@@ -131,13 +131,13 @@ winfix_trampoline:
/* Insert an entry into the TSB.
*
- * %o0: TSB entry pointer
+ * %o0: TSB entry pointer (virt or phys address)
* %o1: tag
* %o2: pte
*/
.align 32
- .globl tsb_insert
-tsb_insert:
+ .globl __tsb_insert
+__tsb_insert:
rdpr %pstate, %o5
wrpr %o5, PSTATE_IE, %pstate
TSB_LOCK_TAG(%o0, %g2, %g3)
@@ -146,6 +146,31 @@ tsb_insert:
retl
nop
+ /* Flush the given TSB entry if it has the matching
+ * tag.
+ *
+ * %o0: TSB entry pointer (virt or phys address)
+ * %o1: tag
+ */
+ .align 32
+ .globl tsb_flush
+tsb_flush:
+ sethi %hi(TSB_TAG_LOCK_HIGH), %g2
+1: TSB_LOAD_TAG(%o0, %g1)
+ srlx %g1, 32, %o3
+ andcc %o3, %g2, %g0
+ bne,pn %icc, 1b
+ membar #LoadLoad
+ cmp %g1, %o1
+ bne,pt %xcc, 2f
+ clr %o3
+ TSB_CAS_TAG(%o0, %g1, %o3)
+ cmp %g1, %o3
+ bne,pn %xcc, 1b
+ nop
+2: retl
+ TSB_MEMBAR
+
/* Reload MMU related context switch state at
* schedule() time.
*