aboutsummaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorDavid S. Miller <davem@sunset.davemloft.net>2006-02-11 00:29:34 -0800
committerDavid S. Miller <davem@sunset.davemloft.net>2006-03-20 01:12:16 -0800
commit36a68e77c554f1ef1c206fd618e6daf82d3e38a3 (patch)
tree59a4591b76ef18e54b5b1b2687268e0dd6bb69ef /arch
parent12eaa328f9fb2d3fcb5afb682c762690d05a3cd8 (diff)
[SPARC64]: Simplify sun4v TLB handling using macros.
There was also a bug in sun4v_itlb_miss, it loaded the MMU Fault Status base into %g3 instead of %g2. This pointed out a fast path for TSB miss processing, since we have %g2 with the MMU Fault Status base, we can use that to quickly load up the PGD phys address. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch')
-rw-r--r--arch/sparc64/kernel/sun4v_tlb_miss.S130
-rw-r--r--arch/sparc64/kernel/tsb.S18
2 files changed, 61 insertions, 87 deletions
diff --git a/arch/sparc64/kernel/sun4v_tlb_miss.S b/arch/sparc64/kernel/sun4v_tlb_miss.S
index f6222623de3..f7129137f9a 100644
--- a/arch/sparc64/kernel/sun4v_tlb_miss.S
+++ b/arch/sparc64/kernel/sun4v_tlb_miss.S
@@ -6,48 +6,55 @@
.text
.align 32
-sun4v_itlb_miss:
- /* Load MMU Miss base into %g2. */
- ldxa [%g0] ASI_SCRATCHPAD, %g3
-
- /* Load UTSB reg into %g1. */
- mov SCRATCHPAD_UTSBREG1, %g1
- ldxa [%g1] ASI_SCRATCHPAD, %g1
+ /* Load ITLB fault information into VADDR and CTX, using BASE. */
+#define LOAD_ITLB_INFO(BASE, VADDR, CTX) \
+ ldx [BASE + HV_FAULT_I_ADDR_OFFSET], VADDR; \
+ ldx [BASE + HV_FAULT_I_CTX_OFFSET], CTX;
+
+ /* Load DTLB fault information into VADDR and CTX, using BASE. */
+#define LOAD_DTLB_INFO(BASE, VADDR, CTX) \
+ ldx [BASE + HV_FAULT_D_ADDR_OFFSET], VADDR; \
+ ldx [BASE + HV_FAULT_D_CTX_OFFSET], CTX;
- /* Create a TAG TARGET, "(vaddr>>22) | (ctx << 48)", in %g6.
- * Branch if kernel TLB miss. The kernel TSB and user TSB miss
- * code wants the missing virtual address in %g4, so that value
- * cannot be modified through the entirety of this handler.
+ /* DEST = (CTX << 48) | (VADDR >> 22)
+ *
+ * Branch to ZERO_CTX_LABEL is context is zero.
*/
- ldx [%g2 + HV_FAULT_I_ADDR_OFFSET], %g4
- ldx [%g2 + HV_FAULT_I_CTX_OFFSET], %g5
- srlx %g4, 22, %g3
- sllx %g5, 48, %g6
- or %g6, %g3, %g6
- brz,pn %g5, kvmap_itlb_4v
- nop
+#define COMPUTE_TAG_TARGET(DEST, VADDR, CTX, TMP, ZERO_CTX_LABEL) \
+ srlx VADDR, 22, TMP; \
+ sllx CTX, 48, DEST; \
+ brz,pn CTX, ZERO_CTX_LABEL; \
+ or DEST, TMP, DEST;
/* Create TSB pointer. This is something like:
*
* index_mask = (512 << (tsb_reg & 0x7UL)) - 1UL;
* tsb_base = tsb_reg & ~0x7UL;
- */
- and %g1, 0x7, %g3
- andn %g1, 0x7, %g1
- mov 512, %g7
- sllx %g7, %g3, %g7
- sub %g7, 1, %g7
-
- /* TSB index mask is in %g7, tsb base is in %g1. Compute
- * the TSB entry pointer into %g1:
- *
* tsb_index = ((vaddr >> PAGE_SHIFT) & tsb_mask);
* tsb_ptr = tsb_base + (tsb_index * 16);
*/
- srlx %g4, PAGE_SHIFT, %g3
- and %g3, %g7, %g3
- sllx %g3, 4, %g3
- add %g1, %g3, %g1
+#define COMPUTE_TSB_PTR(TSB_PTR, VADDR, TMP1, TMP2) \
+ and TSB_PTR, 0x7, TMP1; \
+ mov 512, TMP2; \
+ andn TSB_PTR, 0x7, TSB_PTR; \
+ sllx TMP2, TMP1, TMP2; \
+ srlx VADDR, PAGE_SHIFT, TMP1; \
+ sub TMP2, 1, TMP2; \
+ and TMP1, TMP2, TMP1; \
+ sllx TMP1, 4, TMP1; \
+ add TSB_PTR, TMP1, TSB_PTR;
+
+sun4v_itlb_miss:
+ /* Load MMU Miss base into %g2. */
+ ldxa [%g0] ASI_SCRATCHPAD, %g2
+
+ /* Load UTSB reg into %g1. */
+ mov SCRATCHPAD_UTSBREG1, %g1
+ ldxa [%g1] ASI_SCRATCHPAD, %g1
+
+ LOAD_ITLB_INFO(%g2, %g4, %g5)
+ COMPUTE_TAG_TARGET(%g6, %g4, %g5, %g3, kvmap_itlb_4v)
+ COMPUTE_TSB_PTR(%g1, %g4, %g3, %g7)
/* Load TSB tag/pte into %g2/%g3 and compare the tag. */
ldda [%g1] ASI_QUAD_LDD_PHYS, %g2
@@ -91,40 +98,9 @@ sun4v_dtlb_miss:
mov SCRATCHPAD_UTSBREG1, %g1
ldxa [%g1 + %g1] ASI_SCRATCHPAD, %g1
- /* Create a TAG TARGET, "(vaddr>>22) | (ctx << 48)", in %g6.
- * Branch if kernel TLB miss. The kernel TSB and user TSB miss
- * code wants the missing virtual address in %g4, so that value
- * cannot be modified through the entirety of this handler.
- */
- ldx [%g2 + HV_FAULT_D_ADDR_OFFSET], %g4
- ldx [%g2 + HV_FAULT_D_CTX_OFFSET], %g5
- srlx %g4, 22, %g3
- sllx %g5, 48, %g6
- or %g6, %g3, %g6
- brz,pn %g5, kvmap_dtlb_4v
- nop
-
- /* Create TSB pointer. This is something like:
- *
- * index_mask = (512 << (tsb_reg & 0x7UL)) - 1UL;
- * tsb_base = tsb_reg & ~0x7UL;
- */
- and %g1, 0x7, %g3
- andn %g1, 0x7, %g1
- mov 512, %g7
- sllx %g7, %g3, %g7
- sub %g7, 1, %g7
-
- /* TSB index mask is in %g7, tsb base is in %g1. Compute
- * the TSB entry pointer into %g1:
- *
- * tsb_index = ((vaddr >> PAGE_SHIFT) & tsb_mask);
- * tsb_ptr = tsb_base + (tsb_index * 16);
- */
- srlx %g4, PAGE_SHIFT, %g3
- and %g3, %g7, %g3
- sllx %g3, 4, %g3
- add %g1, %g3, %g1
+ LOAD_DTLB_INFO(%g2, %g4, %g5)
+ COMPUTE_TAG_TARGET(%g6, %g4, %g5, %g3, kvmap_dtlb_4v)
+ COMPUTE_TSB_PTR(%g1, %g4, %g3, %g7)
/* Load TSB tag/pte into %g2/%g3 and compare the tag. */
ldda [%g1] ASI_QUAD_LDD_PHYS, %g2
@@ -169,7 +145,8 @@ sun4v_dtlb_prot:
mov FAULT_CODE_DTLB | FAULT_CODE_WRITE, %g4
/* Called from trap table with TAG TARGET placed into
- * %g6 and SCRATCHPAD_UTSBREG1 contents in %g1.
+ * %g6, SCRATCHPAD_UTSBREG1 contents in %g1, and
+ * SCRATCHPAD_MMU_MISS contents in %g2.
*/
sun4v_itsb_miss:
ba,pt %xcc, sun4v_tsb_miss_common
@@ -189,16 +166,15 @@ sun4v_dtsb_miss:
* tsb_ptr = tsb_base + (tsb_index * 16);
*/
sun4v_tsb_miss_common:
- and %g1, 0x7, %g2
- andn %g1, 0x7, %g1
- mov 512, %g7
- sllx %g7, %g2, %g7
- sub %g7, 1, %g7
- srlx %g4, PAGE_SHIFT, %g2
- and %g2, %g7, %g2
- sllx %g2, 4, %g2
- ba,pt %xcc, tsb_miss_page_table_walk
- add %g1, %g2, %g1
+ COMPUTE_TSB_PTR(%g1, %g4, %g5, %g7)
+
+ /* Branch directly to page table lookup. We have SCRATCHPAD_MMU_MISS
+ * still in %g2, so it's quite trivial to get at the PGD PHYS value
+ * so we can preload it into %g7.
+ */
+ sub %g2, TRAP_PER_CPU_FAULT_INFO, %g2
+ ba,pt %xcc, tsb_miss_page_table_walk_sun4v_fastpath
+ ldx [%g2 + TRAP_PER_CPU_PGD_PADDR], %g7
/* Instruction Access Exception, tl0. */
sun4v_iacc:
diff --git a/arch/sparc64/kernel/tsb.S b/arch/sparc64/kernel/tsb.S
index 8a9351258af..667dcb077be 100644
--- a/arch/sparc64/kernel/tsb.S
+++ b/arch/sparc64/kernel/tsb.S
@@ -25,26 +25,24 @@
*/
tsb_miss_dtlb:
mov TLB_TAG_ACCESS, %g4
- ldxa [%g4] ASI_DMMU, %g4
ba,pt %xcc, tsb_miss_page_table_walk
- nop
+ ldxa [%g4] ASI_DMMU, %g4
tsb_miss_itlb:
mov TLB_TAG_ACCESS, %g4
- ldxa [%g4] ASI_IMMU, %g4
ba,pt %xcc, tsb_miss_page_table_walk
- nop
+ ldxa [%g4] ASI_IMMU, %g4
- /* The sun4v TLB miss handlers jump directly here instead
- * of tsb_miss_{d,i}tlb with registers setup as follows:
- *
- * %g4: missing virtual address
- * %g1: TSB entry address loaded
- * %g6: TAG TARGET ((vaddr >> 22) | (ctx << 48))
+ /* At this point we have:
+ * %g4 -- missing virtual address
+ * %g1 -- TSB entry address
+ * %g6 -- TAG TARGET ((vaddr >> 22) | (ctx << 48))
*/
tsb_miss_page_table_walk:
TRAP_LOAD_PGD_PHYS(%g7, %g5)
+ /* And now we have the PGD base physical address in %g7. */
+tsb_miss_page_table_walk_sun4v_fastpath:
USER_PGTABLE_WALK_TL1(%g4, %g7, %g5, %g2, tsb_do_fault)
tsb_reload: