aboutsummaryrefslogtreecommitdiff
path: root/arch/mips/lib/csum_partial.S
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/lib/csum_partial.S')
-rw-r--r--arch/mips/lib/csum_partial.S293
1 files changed, 154 insertions, 139 deletions
diff --git a/arch/mips/lib/csum_partial.S b/arch/mips/lib/csum_partial.S
index 15611d9df7a..9db357294be 100644
--- a/arch/mips/lib/csum_partial.S
+++ b/arch/mips/lib/csum_partial.S
@@ -12,43 +12,66 @@
#include <asm/regdef.h>
#ifdef CONFIG_64BIT
-#define T0 ta0
-#define T1 ta1
-#define T2 ta2
-#define T3 ta3
-#define T4 t0
-#define T7 t3
-#else
-#define T0 t0
-#define T1 t1
-#define T2 t2
-#define T3 t3
-#define T4 t4
-#define T7 t7
+/*
+ * As we are sharing code base with the mips32 tree (which use the o32 ABI
+ * register definitions). We need to redefine the register definitions from
+ * the n64 ABI register naming to the o32 ABI register naming.
+ */
+#undef t0
+#undef t1
+#undef t2
+#undef t3
+#define t0 $8
+#define t1 $9
+#define t2 $10
+#define t3 $11
+#define t4 $12
+#define t5 $13
+#define t6 $14
+#define t7 $15
+
+#define USE_DOUBLE
#endif
+#ifdef USE_DOUBLE
+
+#define LOAD ld
+#define ADD daddu
+#define NBYTES 8
+
+#else
+
+#define LOAD lw
+#define ADD addu
+#define NBYTES 4
+
+#endif /* USE_DOUBLE */
+
+#define UNIT(unit) ((unit)*NBYTES)
+
#define ADDC(sum,reg) \
- addu sum, reg; \
+ ADD sum, reg; \
sltu v1, sum, reg; \
- addu sum, v1
+ ADD sum, v1
-#define CSUM_BIGCHUNK(src, offset, sum, _t0, _t1, _t2, _t3) \
- lw _t0, (offset + 0x00)(src); \
- lw _t1, (offset + 0x04)(src); \
- lw _t2, (offset + 0x08)(src); \
- lw _t3, (offset + 0x0c)(src); \
- ADDC(sum, _t0); \
- ADDC(sum, _t1); \
- ADDC(sum, _t2); \
- ADDC(sum, _t3); \
- lw _t0, (offset + 0x10)(src); \
- lw _t1, (offset + 0x14)(src); \
- lw _t2, (offset + 0x18)(src); \
- lw _t3, (offset + 0x1c)(src); \
+#define CSUM_BIGCHUNK1(src, offset, sum, _t0, _t1, _t2, _t3) \
+ LOAD _t0, (offset + UNIT(0))(src); \
+ LOAD _t1, (offset + UNIT(1))(src); \
+ LOAD _t2, (offset + UNIT(2))(src); \
+ LOAD _t3, (offset + UNIT(3))(src); \
ADDC(sum, _t0); \
ADDC(sum, _t1); \
ADDC(sum, _t2); \
- ADDC(sum, _t3); \
+ ADDC(sum, _t3)
+
+#ifdef USE_DOUBLE
+#define CSUM_BIGCHUNK(src, offset, sum, _t0, _t1, _t2, _t3) \
+ CSUM_BIGCHUNK1(src, offset, sum, _t0, _t1, _t2, _t3)
+#else
+#define CSUM_BIGCHUNK(src, offset, sum, _t0, _t1, _t2, _t3) \
+ CSUM_BIGCHUNK1(src, offset, sum, _t0, _t1, _t2, _t3); \
+ CSUM_BIGCHUNK1(src, offset + 0x10, sum, _t0, _t1, _t2, _t3)
+#endif
/*
* a0: source address
@@ -61,86 +84,27 @@
.text
.set noreorder
-
-/* unknown src alignment and < 8 bytes to go */
-small_csumcpy:
- move a1, T2
-
- andi T0, a1, 4
- beqz T0, 1f
- andi T0, a1, 2
-
- /* Still a full word to go */
- ulw T1, (src)
- PTR_ADDIU src, 4
- ADDC(sum, T1)
-
-1: move T1, zero
- beqz T0, 1f
- andi T0, a1, 1
-
- /* Still a halfword to go */
- ulhu T1, (src)
- PTR_ADDIU src, 2
-
-1: beqz T0, 1f
- sll T1, T1, 16
-
- lbu T2, (src)
- nop
-
-#ifdef __MIPSEB__
- sll T2, T2, 8
-#endif
- or T1, T2
-
-1: ADDC(sum, T1)
-
- /* fold checksum */
- sll v1, sum, 16
- addu sum, v1
- sltu v1, sum, v1
- srl sum, sum, 16
- addu sum, v1
-
- /* odd buffer alignment? */
- beqz T7, 1f
- nop
- sll v1, sum, 8
- srl sum, sum, 8
- or sum, v1
- andi sum, 0xffff
-1:
- .set reorder
- /* Add the passed partial csum. */
- ADDC(sum, a2)
- jr ra
- .set noreorder
-
-/* ------------------------------------------------------------------------- */
-
.align 5
LEAF(csum_partial)
move sum, zero
- move T7, zero
+ move t7, zero
sltiu t8, a1, 0x8
bnez t8, small_csumcpy /* < 8 bytes to copy */
- move T2, a1
+ move t2, a1
- beqz a1, out
- andi T7, src, 0x1 /* odd buffer? */
+ andi t7, src, 0x1 /* odd buffer? */
hword_align:
- beqz T7, word_align
+ beqz t7, word_align
andi t8, src, 0x2
- lbu T0, (src)
+ lbu t0, (src)
LONG_SUBU a1, a1, 0x1
#ifdef __MIPSEL__
- sll T0, T0, 8
+ sll t0, t0, 8
#endif
- ADDC(sum, T0)
+ ADDC(sum, t0)
PTR_ADDU src, src, 0x1
andi t8, src, 0x2
@@ -148,9 +112,9 @@ word_align:
beqz t8, dword_align
sltiu t8, a1, 56
- lhu T0, (src)
+ lhu t0, (src)
LONG_SUBU a1, a1, 0x2
- ADDC(sum, T0)
+ ADDC(sum, t0)
sltiu t8, a1, 56
PTR_ADDU src, src, 0x2
@@ -162,9 +126,9 @@ dword_align:
beqz t8, qword_align
andi t8, src, 0x8
- lw T0, 0x00(src)
+ lw t0, 0x00(src)
LONG_SUBU a1, a1, 0x4
- ADDC(sum, T0)
+ ADDC(sum, t0)
PTR_ADDU src, src, 0x4
andi t8, src, 0x8
@@ -172,11 +136,17 @@ qword_align:
beqz t8, oword_align
andi t8, src, 0x10
- lw T0, 0x00(src)
- lw T1, 0x04(src)
+#ifdef USE_DOUBLE
+ ld t0, 0x00(src)
+ LONG_SUBU a1, a1, 0x8
+ ADDC(sum, t0)
+#else
+ lw t0, 0x00(src)
+ lw t1, 0x04(src)
LONG_SUBU a1, a1, 0x8
- ADDC(sum, T0)
- ADDC(sum, T1)
+ ADDC(sum, t0)
+ ADDC(sum, t1)
+#endif
PTR_ADDU src, src, 0x8
andi t8, src, 0x10
@@ -184,75 +154,120 @@ oword_align:
beqz t8, begin_movement
LONG_SRL t8, a1, 0x7
- lw T3, 0x08(src)
- lw T4, 0x0c(src)
- lw T0, 0x00(src)
- lw T1, 0x04(src)
- ADDC(sum, T3)
- ADDC(sum, T4)
- ADDC(sum, T0)
- ADDC(sum, T1)
+#ifdef USE_DOUBLE
+ ld t0, 0x00(src)
+ ld t1, 0x08(src)
+ ADDC(sum, t0)
+ ADDC(sum, t1)
+#else
+ CSUM_BIGCHUNK1(src, 0x00, sum, t0, t1, t3, t4)
+#endif
LONG_SUBU a1, a1, 0x10
PTR_ADDU src, src, 0x10
LONG_SRL t8, a1, 0x7
begin_movement:
beqz t8, 1f
- andi T2, a1, 0x40
+ andi t2, a1, 0x40
move_128bytes:
- CSUM_BIGCHUNK(src, 0x00, sum, T0, T1, T3, T4)
- CSUM_BIGCHUNK(src, 0x20, sum, T0, T1, T3, T4)
- CSUM_BIGCHUNK(src, 0x40, sum, T0, T1, T3, T4)
- CSUM_BIGCHUNK(src, 0x60, sum, T0, T1, T3, T4)
+ CSUM_BIGCHUNK(src, 0x00, sum, t0, t1, t3, t4)
+ CSUM_BIGCHUNK(src, 0x20, sum, t0, t1, t3, t4)
+ CSUM_BIGCHUNK(src, 0x40, sum, t0, t1, t3, t4)
+ CSUM_BIGCHUNK(src, 0x60, sum, t0, t1, t3, t4)
LONG_SUBU t8, t8, 0x01
bnez t8, move_128bytes
PTR_ADDU src, src, 0x80
1:
- beqz T2, 1f
- andi T2, a1, 0x20
+ beqz t2, 1f
+ andi t2, a1, 0x20
move_64bytes:
- CSUM_BIGCHUNK(src, 0x00, sum, T0, T1, T3, T4)
- CSUM_BIGCHUNK(src, 0x20, sum, T0, T1, T3, T4)
+ CSUM_BIGCHUNK(src, 0x00, sum, t0, t1, t3, t4)
+ CSUM_BIGCHUNK(src, 0x20, sum, t0, t1, t3, t4)
PTR_ADDU src, src, 0x40
1:
- beqz T2, do_end_words
+ beqz t2, do_end_words
andi t8, a1, 0x1c
move_32bytes:
- CSUM_BIGCHUNK(src, 0x00, sum, T0, T1, T3, T4)
+ CSUM_BIGCHUNK(src, 0x00, sum, t0, t1, t3, t4)
andi t8, a1, 0x1c
PTR_ADDU src, src, 0x20
do_end_words:
- beqz t8, maybe_end_cruft
- LONG_SRL t8, t8, 0x2
+ beqz t8, small_csumcpy
+ andi t2, a1, 0x3
+ LONG_SRL t8, t8, 0x2
end_words:
- lw T0, (src)
+ lw t0, (src)
LONG_SUBU t8, t8, 0x1
- ADDC(sum, T0)
+ ADDC(sum, t0)
bnez t8, end_words
PTR_ADDU src, src, 0x4
-maybe_end_cruft:
- andi T2, a1, 0x3
+/* unknown src alignment and < 8 bytes to go */
+small_csumcpy:
+ move a1, t2
-small_memcpy:
- j small_csumcpy; move a1, T2 /* XXX ??? */
- beqz t2, out
- move a1, T2
+ andi t0, a1, 4
+ beqz t0, 1f
+ andi t0, a1, 2
-end_bytes:
- lb T0, (src)
- LONG_SUBU a1, a1, 0x1
- bnez a2, end_bytes
- PTR_ADDU src, src, 0x1
+ /* Still a full word to go */
+ ulw t1, (src)
+ PTR_ADDIU src, 4
+ ADDC(sum, t1)
+
+1: move t1, zero
+ beqz t0, 1f
+ andi t0, a1, 1
+
+ /* Still a halfword to go */
+ ulhu t1, (src)
+ PTR_ADDIU src, 2
+
+1: beqz t0, 1f
+ sll t1, t1, 16
+
+ lbu t2, (src)
+ nop
-out:
+#ifdef __MIPSEB__
+ sll t2, t2, 8
+#endif
+ or t1, t2
+
+1: ADDC(sum, t1)
+
+ /* fold checksum */
+#ifdef USE_DOUBLE
+ dsll32 v1, sum, 0
+ daddu sum, v1
+ sltu v1, sum, v1
+ dsra32 sum, sum, 0
+ addu sum, v1
+#endif
+ sll v1, sum, 16
+ addu sum, v1
+ sltu v1, sum, v1
+ srl sum, sum, 16
+ addu sum, v1
+
+ /* odd buffer alignment? */
+ beqz t7, 1f
+ nop
+ sll v1, sum, 8
+ srl sum, sum, 8
+ or sum, v1
+ andi sum, 0xffff
+1:
+ .set reorder
+ /* Add the passed partial csum. */
+ ADDC(sum, a2)
jr ra
- move v0, sum
+ .set noreorder
END(csum_partial)