aboutsummaryrefslogtreecommitdiff
path: root/arch/sparc64/lib/U3memcpy.S
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 15:20:36 -0700
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 15:20:36 -0700
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /arch/sparc64/lib/U3memcpy.S
Linux-2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'arch/sparc64/lib/U3memcpy.S')
-rw-r--r--arch/sparc64/lib/U3memcpy.S422
1 files changed, 422 insertions, 0 deletions
diff --git a/arch/sparc64/lib/U3memcpy.S b/arch/sparc64/lib/U3memcpy.S
new file mode 100644
index 00000000000..7cae9cc6a20
--- /dev/null
+++ b/arch/sparc64/lib/U3memcpy.S
@@ -0,0 +1,422 @@
+/* U3memcpy.S: UltraSparc-III optimized memcpy.
+ *
+ * Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com)
+ */
+
+#ifdef __KERNEL__
+#include <asm/visasm.h>
+#include <asm/asi.h>
+#define GLOBAL_SPARE %g7
+#else
+#define ASI_BLK_P 0xf0
+#define FPRS_FEF 0x04
+#ifdef MEMCPY_DEBUG
+#define VISEntryHalf rd %fprs, %o5; wr %g0, FPRS_FEF, %fprs; \
+ clr %g1; clr %g2; clr %g3; subcc %g0, %g0, %g0;
+#define VISExitHalf and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs
+#else
+#define VISEntryHalf rd %fprs, %o5; wr %g0, FPRS_FEF, %fprs
+#define VISExitHalf and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs
+#endif
+#define GLOBAL_SPARE %g5
+#endif
+
+#ifndef EX_LD
+#define EX_LD(x) x
+#endif
+
+#ifndef EX_ST
+#define EX_ST(x) x
+#endif
+
+#ifndef EX_RETVAL
+#define EX_RETVAL(x) x
+#endif
+
+#ifndef LOAD
+#define LOAD(type,addr,dest) type [addr], dest
+#endif
+
+#ifndef STORE
+#define STORE(type,src,addr) type src, [addr]
+#endif
+
+#ifndef STORE_BLK
+#define STORE_BLK(src,addr) stda src, [addr] ASI_BLK_P
+#endif
+
+#ifndef FUNC_NAME
+#define FUNC_NAME U3memcpy
+#endif
+
+#ifndef PREAMBLE
+#define PREAMBLE
+#endif
+
+#ifndef XCC
+#define XCC xcc
+#endif
+
+ .register %g2,#scratch
+ .register %g3,#scratch
+
+ /* Special/non-trivial issues of this code:
+ *
+ * 1) %o5 is preserved from VISEntryHalf to VISExitHalf
+ * 2) Only low 32 FPU registers are used so that only the
+ * lower half of the FPU register set is dirtied by this
+ * code. This is especially important in the kernel.
+ * 3) This code never prefetches cachelines past the end
+ * of the source buffer.
+ */
+
+ .text
+ .align 64
+
+ /* The cheetah's flexible spine, oversized liver, enlarged heart,
+ * slender muscular body, and claws make it the swiftest hunter
+ * in Africa and the fastest animal on land. Can reach speeds
+ * of up to 2.4GB per second.
+ */
+
+ .globl FUNC_NAME
+ .type FUNC_NAME,#function
+FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
+ srlx %o2, 31, %g2
+ cmp %g2, 0
+ tne %xcc, 5
+ PREAMBLE
+ mov %o0, %o4
+ cmp %o2, 0
+ be,pn %XCC, 85f
+ or %o0, %o1, %o3
+ cmp %o2, 16
+ blu,a,pn %XCC, 80f
+ or %o3, %o2, %o3
+
+ cmp %o2, (3 * 64)
+ blu,pt %XCC, 70f
+ andcc %o3, 0x7, %g0
+
+ /* Clobbers o5/g1/g2/g3/g7/icc/xcc. We must preserve
+ * o5 from here until we hit VISExitHalf.
+ */
+ VISEntryHalf
+
+ /* Is 'dst' already aligned on an 64-byte boundary? */
+ andcc %o0, 0x3f, %g2
+ be,pt %XCC, 2f
+
+ /* Compute abs((dst & 0x3f) - 0x40) into %g2. This is the number
+ * of bytes to copy to make 'dst' 64-byte aligned. We pre-
+ * subtract this from 'len'.
+ */
+ sub %o0, %o1, GLOBAL_SPARE
+ sub %g2, 0x40, %g2
+ sub %g0, %g2, %g2
+ sub %o2, %g2, %o2
+ andcc %g2, 0x7, %g1
+ be,pt %icc, 2f
+ and %g2, 0x38, %g2
+
+1: subcc %g1, 0x1, %g1
+ EX_LD(LOAD(ldub, %o1 + 0x00, %o3))
+ EX_ST(STORE(stb, %o3, %o1 + GLOBAL_SPARE))
+ bgu,pt %XCC, 1b
+ add %o1, 0x1, %o1
+
+ add %o1, GLOBAL_SPARE, %o0
+
+2: cmp %g2, 0x0
+ and %o1, 0x7, %g1
+ be,pt %icc, 3f
+ alignaddr %o1, %g0, %o1
+
+ EX_LD(LOAD(ldd, %o1, %f4))
+1: EX_LD(LOAD(ldd, %o1 + 0x8, %f6))
+ add %o1, 0x8, %o1
+ subcc %g2, 0x8, %g2
+ faligndata %f4, %f6, %f0
+ EX_ST(STORE(std, %f0, %o0))
+ be,pn %icc, 3f
+ add %o0, 0x8, %o0
+
+ EX_LD(LOAD(ldd, %o1 + 0x8, %f4))
+ add %o1, 0x8, %o1
+ subcc %g2, 0x8, %g2
+ faligndata %f6, %f4, %f2
+ EX_ST(STORE(std, %f2, %o0))
+ bne,pt %icc, 1b
+ add %o0, 0x8, %o0
+
+3: LOAD(prefetch, %o1 + 0x000, #one_read)
+ LOAD(prefetch, %o1 + 0x040, #one_read)
+ andn %o2, (0x40 - 1), GLOBAL_SPARE
+ LOAD(prefetch, %o1 + 0x080, #one_read)
+ LOAD(prefetch, %o1 + 0x0c0, #one_read)
+ LOAD(prefetch, %o1 + 0x100, #one_read)
+ EX_LD(LOAD(ldd, %o1 + 0x000, %f0))
+ LOAD(prefetch, %o1 + 0x140, #one_read)
+ EX_LD(LOAD(ldd, %o1 + 0x008, %f2))
+ LOAD(prefetch, %o1 + 0x180, #one_read)
+ EX_LD(LOAD(ldd, %o1 + 0x010, %f4))
+ LOAD(prefetch, %o1 + 0x1c0, #one_read)
+ faligndata %f0, %f2, %f16
+ EX_LD(LOAD(ldd, %o1 + 0x018, %f6))
+ faligndata %f2, %f4, %f18
+ EX_LD(LOAD(ldd, %o1 + 0x020, %f8))
+ faligndata %f4, %f6, %f20
+ EX_LD(LOAD(ldd, %o1 + 0x028, %f10))
+ faligndata %f6, %f8, %f22
+
+ EX_LD(LOAD(ldd, %o1 + 0x030, %f12))
+ faligndata %f8, %f10, %f24
+ EX_LD(LOAD(ldd, %o1 + 0x038, %f14))
+ faligndata %f10, %f12, %f26
+ EX_LD(LOAD(ldd, %o1 + 0x040, %f0))
+
+ subcc GLOBAL_SPARE, 0x80, GLOBAL_SPARE
+ add %o1, 0x40, %o1
+ bgu,pt %XCC, 1f
+ srl GLOBAL_SPARE, 6, %o3
+ ba,pt %xcc, 2f
+ nop
+
+ .align 64
+1:
+ EX_LD(LOAD(ldd, %o1 + 0x008, %f2))
+ faligndata %f12, %f14, %f28
+ EX_LD(LOAD(ldd, %o1 + 0x010, %f4))
+ faligndata %f14, %f0, %f30
+ EX_ST(STORE_BLK(%f16, %o0))
+ EX_LD(LOAD(ldd, %o1 + 0x018, %f6))
+ faligndata %f0, %f2, %f16
+ add %o0, 0x40, %o0
+
+ EX_LD(LOAD(ldd, %o1 + 0x020, %f8))
+ faligndata %f2, %f4, %f18
+ EX_LD(LOAD(ldd, %o1 + 0x028, %f10))
+ faligndata %f4, %f6, %f20
+ EX_LD(LOAD(ldd, %o1 + 0x030, %f12))
+ subcc %o3, 0x01, %o3
+ faligndata %f6, %f8, %f22
+ EX_LD(LOAD(ldd, %o1 + 0x038, %f14))
+
+ faligndata %f8, %f10, %f24
+ EX_LD(LOAD(ldd, %o1 + 0x040, %f0))
+ LOAD(prefetch, %o1 + 0x1c0, #one_read)
+ faligndata %f10, %f12, %f26
+ bg,pt %XCC, 1b
+ add %o1, 0x40, %o1
+
+ /* Finally we copy the last full 64-byte block. */
+2:
+ EX_LD(LOAD(ldd, %o1 + 0x008, %f2))
+ faligndata %f12, %f14, %f28
+ EX_LD(LOAD(ldd, %o1 + 0x010, %f4))
+ faligndata %f14, %f0, %f30
+ EX_ST(STORE_BLK(%f16, %o0))
+ EX_LD(LOAD(ldd, %o1 + 0x018, %f6))
+ faligndata %f0, %f2, %f16
+ EX_LD(LOAD(ldd, %o1 + 0x020, %f8))
+ faligndata %f2, %f4, %f18
+ EX_LD(LOAD(ldd, %o1 + 0x028, %f10))
+ faligndata %f4, %f6, %f20
+ EX_LD(LOAD(ldd, %o1 + 0x030, %f12))
+ faligndata %f6, %f8, %f22
+ EX_LD(LOAD(ldd, %o1 + 0x038, %f14))
+ faligndata %f8, %f10, %f24
+ cmp %g1, 0
+ be,pt %XCC, 1f
+ add %o0, 0x40, %o0
+ EX_LD(LOAD(ldd, %o1 + 0x040, %f0))
+1: faligndata %f10, %f12, %f26
+ faligndata %f12, %f14, %f28
+ faligndata %f14, %f0, %f30
+ EX_ST(STORE_BLK(%f16, %o0))
+ add %o0, 0x40, %o0
+ add %o1, 0x40, %o1
+ membar #Sync
+
+ /* Now we copy the (len modulo 64) bytes at the end.
+ * Note how we borrow the %f0 loaded above.
+ *
+ * Also notice how this code is careful not to perform a
+ * load past the end of the src buffer.
+ */
+ and %o2, 0x3f, %o2
+ andcc %o2, 0x38, %g2
+ be,pn %XCC, 2f
+ subcc %g2, 0x8, %g2
+ be,pn %XCC, 2f
+ cmp %g1, 0
+
+ sub %o2, %g2, %o2
+ be,a,pt %XCC, 1f
+ EX_LD(LOAD(ldd, %o1 + 0x00, %f0))
+
+1: EX_LD(LOAD(ldd, %o1 + 0x08, %f2))
+ add %o1, 0x8, %o1
+ subcc %g2, 0x8, %g2
+ faligndata %f0, %f2, %f8
+ EX_ST(STORE(std, %f8, %o0))
+ be,pn %XCC, 2f
+ add %o0, 0x8, %o0
+ EX_LD(LOAD(ldd, %o1 + 0x08, %f0))
+ add %o1, 0x8, %o1
+ subcc %g2, 0x8, %g2
+ faligndata %f2, %f0, %f8
+ EX_ST(STORE(std, %f8, %o0))
+ bne,pn %XCC, 1b
+ add %o0, 0x8, %o0
+
+ /* If anything is left, we copy it one byte at a time.
+ * Note that %g1 is (src & 0x3) saved above before the
+ * alignaddr was performed.
+ */
+2:
+ cmp %o2, 0
+ add %o1, %g1, %o1
+ VISExitHalf
+ be,pn %XCC, 85f
+ sub %o0, %o1, %o3
+
+ andcc %g1, 0x7, %g0
+ bne,pn %icc, 90f
+ andcc %o2, 0x8, %g0
+ be,pt %icc, 1f
+ nop
+ EX_LD(LOAD(ldx, %o1, %o5))
+ EX_ST(STORE(stx, %o5, %o1 + %o3))
+ add %o1, 0x8, %o1
+
+1: andcc %o2, 0x4, %g0
+ be,pt %icc, 1f
+ nop
+ EX_LD(LOAD(lduw, %o1, %o5))
+ EX_ST(STORE(stw, %o5, %o1 + %o3))
+ add %o1, 0x4, %o1
+
+1: andcc %o2, 0x2, %g0
+ be,pt %icc, 1f
+ nop
+ EX_LD(LOAD(lduh, %o1, %o5))
+ EX_ST(STORE(sth, %o5, %o1 + %o3))
+ add %o1, 0x2, %o1
+
+1: andcc %o2, 0x1, %g0
+ be,pt %icc, 85f
+ nop
+ EX_LD(LOAD(ldub, %o1, %o5))
+ ba,pt %xcc, 85f
+ EX_ST(STORE(stb, %o5, %o1 + %o3))
+
+ .align 64
+70: /* 16 < len <= 64 */
+ bne,pn %XCC, 75f
+ sub %o0, %o1, %o3
+
+72:
+ andn %o2, 0xf, GLOBAL_SPARE
+ and %o2, 0xf, %o2
+1: subcc GLOBAL_SPARE, 0x10, GLOBAL_SPARE
+ EX_LD(LOAD(ldx, %o1 + 0x00, %o5))
+ EX_LD(LOAD(ldx, %o1 + 0x08, %g1))
+ EX_ST(STORE(stx, %o5, %o1 + %o3))
+ add %o1, 0x8, %o1
+ EX_ST(STORE(stx, %g1, %o1 + %o3))
+ bgu,pt %XCC, 1b
+ add %o1, 0x8, %o1
+73: andcc %o2, 0x8, %g0
+ be,pt %XCC, 1f
+ nop
+ sub %o2, 0x8, %o2
+ EX_LD(LOAD(ldx, %o1, %o5))
+ EX_ST(STORE(stx, %o5, %o1 + %o3))
+ add %o1, 0x8, %o1
+1: andcc %o2, 0x4, %g0
+ be,pt %XCC, 1f
+ nop
+ sub %o2, 0x4, %o2
+ EX_LD(LOAD(lduw, %o1, %o5))
+ EX_ST(STORE(stw, %o5, %o1 + %o3))
+ add %o1, 0x4, %o1
+1: cmp %o2, 0
+ be,pt %XCC, 85f
+ nop
+ ba,pt %xcc, 90f
+ nop
+
+75:
+ andcc %o0, 0x7, %g1
+ sub %g1, 0x8, %g1
+ be,pn %icc, 2f
+ sub %g0, %g1, %g1
+ sub %o2, %g1, %o2
+
+1: subcc %g1, 1, %g1
+ EX_LD(LOAD(ldub, %o1, %o5))
+ EX_ST(STORE(stb, %o5, %o1 + %o3))
+ bgu,pt %icc, 1b
+ add %o1, 1, %o1
+
+2: add %o1, %o3, %o0
+ andcc %o1, 0x7, %g1
+ bne,pt %icc, 8f
+ sll %g1, 3, %g1
+
+ cmp %o2, 16
+ bgeu,pt %icc, 72b
+ nop
+ ba,a,pt %xcc, 73b
+
+8: mov 64, %o3
+ andn %o1, 0x7, %o1
+ EX_LD(LOAD(ldx, %o1, %g2))
+ sub %o3, %g1, %o3
+ andn %o2, 0x7, GLOBAL_SPARE
+ sllx %g2, %g1, %g2
+1: EX_LD(LOAD(ldx, %o1 + 0x8, %g3))
+ subcc GLOBAL_SPARE, 0x8, GLOBAL_SPARE
+ add %o1, 0x8, %o1
+ srlx %g3, %o3, %o5
+ or %o5, %g2, %o5
+ EX_ST(STORE(stx, %o5, %o0))
+ add %o0, 0x8, %o0
+ bgu,pt %icc, 1b
+ sllx %g3, %g1, %g2
+
+ srl %g1, 3, %g1
+ andcc %o2, 0x7, %o2
+ be,pn %icc, 85f
+ add %o1, %g1, %o1
+ ba,pt %xcc, 90f
+ sub %o0, %o1, %o3
+
+ .align 64
+80: /* 0 < len <= 16 */
+ andcc %o3, 0x3, %g0
+ bne,pn %XCC, 90f
+ sub %o0, %o1, %o3
+
+1:
+ subcc %o2, 4, %o2
+ EX_LD(LOAD(lduw, %o1, %g1))
+ EX_ST(STORE(stw, %g1, %o1 + %o3))
+ bgu,pt %XCC, 1b
+ add %o1, 4, %o1
+
+85: retl
+ mov EX_RETVAL(%o4), %o0
+
+ .align 32
+90:
+ subcc %o2, 1, %o2
+ EX_LD(LOAD(ldub, %o1, %g1))
+ EX_ST(STORE(stb, %g1, %o1 + %o3))
+ bgu,pt %XCC, 90b
+ add %o1, 1, %o1
+ retl
+ mov EX_RETVAL(%o4), %o0
+
+ .size FUNC_NAME, .-FUNC_NAME