aboutsummaryrefslogtreecommitdiff
path: root/include/asm-sparc/xor_32.h
diff options
context:
space:
mode:
authorSam Ravnborg <sam@ravnborg.org>2008-07-17 21:55:51 -0700
committerDavid S. Miller <davem@davemloft.net>2008-07-17 21:55:51 -0700
commitf5e706ad886b6a5eb59637830110b09ccebf01c5 (patch)
treeea043a0a28e16a2ac6395c35d737f52698a165b7 /include/asm-sparc/xor_32.h
parent5e3609f60c09f0f15f71f80c6d7933b2c7be71a6 (diff)
sparc: join the remaining header files
With this commit all sparc64 header files are moved to asm-sparc. The remaining files (71 files) were too different to be trivially merged so divide them up in a _32.h and a _64.h file which are both included from the file with no bit size. The following script were used: cd include FILES=`wc -l asm-sparc64/*h | grep -v '^ 1' | cut -b 20-` for FILE in ${FILES}; do echo $FILE: BASE=`echo $FILE | cut -d '.' -f 1` FN32=${BASE}_32.h FN64=${BASE}_64.h GUARD=___ASM_SPARC_`echo $BASE | tr '-' '_' | tr [:lower:] [:upper:]`_H git mv asm-sparc/$FILE asm-sparc/$FN32 git mv asm-sparc64/$FILE asm-sparc/$FN64 echo git mv done printf "#ifndef %s\n" $GUARD > asm-sparc/$FILE printf "#define %s\n" $GUARD >> asm-sparc/$FILE printf "#if defined(__sparc__) && defined(__arch64__)\n" >> asm-sparc/$FILE printf "#include <asm-sparc/%s>\n" $FN64 >> asm-sparc/$FILE printf "#else\n" >> asm-sparc/$FILE printf "#include <asm-sparc/%s>\n" $FN32 >> asm-sparc/$FILE printf "#endif\n" >> asm-sparc/$FILE printf "#endif\n" >> asm-sparc/$FILE git add asm-sparc/$FILE echo new file done printf "#include <asm-sparc/%s>\n" $FILE > asm-sparc64/$FILE git add asm-sparc64/$FILE echo sparc64 file done done The guard contains three '_' to avoid conflict with existing guards. In additing the two Kbuild files are emptied to avoid breaking headers_* targets. We will reintroduce the exported header files when the necessary kbuild changes are merged. Signed-off-by: Sam Ravnborg <sam@ravnborg.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/asm-sparc/xor_32.h')
-rw-r--r--include/asm-sparc/xor_32.h269
1 files changed, 269 insertions, 0 deletions
diff --git a/include/asm-sparc/xor_32.h b/include/asm-sparc/xor_32.h
new file mode 100644
index 00000000000..f34b2cfa820
--- /dev/null
+++ b/include/asm-sparc/xor_32.h
@@ -0,0 +1,269 @@
+/*
+ * include/asm-sparc/xor.h
+ *
+ * Optimized RAID-5 checksumming functions for 32-bit Sparc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * You should have received a copy of the GNU General Public License
+ * (for example /usr/src/linux/COPYING); if not, write to the Free
+ * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/*
+ * High speed xor_block operation for RAID4/5 utilizing the
+ * ldd/std SPARC instructions.
+ *
+ * Copyright (C) 1999 Jakub Jelinek (jj@ultra.linux.cz)
+ */
+
+static void
+sparc_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
+{
+ int lines = bytes / (sizeof (long)) / 8;
+
+ do {
+ __asm__ __volatile__(
+ "ldd [%0 + 0x00], %%g2\n\t"
+ "ldd [%0 + 0x08], %%g4\n\t"
+ "ldd [%0 + 0x10], %%o0\n\t"
+ "ldd [%0 + 0x18], %%o2\n\t"
+ "ldd [%1 + 0x00], %%o4\n\t"
+ "ldd [%1 + 0x08], %%l0\n\t"
+ "ldd [%1 + 0x10], %%l2\n\t"
+ "ldd [%1 + 0x18], %%l4\n\t"
+ "xor %%g2, %%o4, %%g2\n\t"
+ "xor %%g3, %%o5, %%g3\n\t"
+ "xor %%g4, %%l0, %%g4\n\t"
+ "xor %%g5, %%l1, %%g5\n\t"
+ "xor %%o0, %%l2, %%o0\n\t"
+ "xor %%o1, %%l3, %%o1\n\t"
+ "xor %%o2, %%l4, %%o2\n\t"
+ "xor %%o3, %%l5, %%o3\n\t"
+ "std %%g2, [%0 + 0x00]\n\t"
+ "std %%g4, [%0 + 0x08]\n\t"
+ "std %%o0, [%0 + 0x10]\n\t"
+ "std %%o2, [%0 + 0x18]\n"
+ :
+ : "r" (p1), "r" (p2)
+ : "g2", "g3", "g4", "g5",
+ "o0", "o1", "o2", "o3", "o4", "o5",
+ "l0", "l1", "l2", "l3", "l4", "l5");
+ p1 += 8;
+ p2 += 8;
+ } while (--lines > 0);
+}
+
+static void
+sparc_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+ unsigned long *p3)
+{
+ int lines = bytes / (sizeof (long)) / 8;
+
+ do {
+ __asm__ __volatile__(
+ "ldd [%0 + 0x00], %%g2\n\t"
+ "ldd [%0 + 0x08], %%g4\n\t"
+ "ldd [%0 + 0x10], %%o0\n\t"
+ "ldd [%0 + 0x18], %%o2\n\t"
+ "ldd [%1 + 0x00], %%o4\n\t"
+ "ldd [%1 + 0x08], %%l0\n\t"
+ "ldd [%1 + 0x10], %%l2\n\t"
+ "ldd [%1 + 0x18], %%l4\n\t"
+ "xor %%g2, %%o4, %%g2\n\t"
+ "xor %%g3, %%o5, %%g3\n\t"
+ "ldd [%2 + 0x00], %%o4\n\t"
+ "xor %%g4, %%l0, %%g4\n\t"
+ "xor %%g5, %%l1, %%g5\n\t"
+ "ldd [%2 + 0x08], %%l0\n\t"
+ "xor %%o0, %%l2, %%o0\n\t"
+ "xor %%o1, %%l3, %%o1\n\t"
+ "ldd [%2 + 0x10], %%l2\n\t"
+ "xor %%o2, %%l4, %%o2\n\t"
+ "xor %%o3, %%l5, %%o3\n\t"
+ "ldd [%2 + 0x18], %%l4\n\t"
+ "xor %%g2, %%o4, %%g2\n\t"
+ "xor %%g3, %%o5, %%g3\n\t"
+ "xor %%g4, %%l0, %%g4\n\t"
+ "xor %%g5, %%l1, %%g5\n\t"
+ "xor %%o0, %%l2, %%o0\n\t"
+ "xor %%o1, %%l3, %%o1\n\t"
+ "xor %%o2, %%l4, %%o2\n\t"
+ "xor %%o3, %%l5, %%o3\n\t"
+ "std %%g2, [%0 + 0x00]\n\t"
+ "std %%g4, [%0 + 0x08]\n\t"
+ "std %%o0, [%0 + 0x10]\n\t"
+ "std %%o2, [%0 + 0x18]\n"
+ :
+ : "r" (p1), "r" (p2), "r" (p3)
+ : "g2", "g3", "g4", "g5",
+ "o0", "o1", "o2", "o3", "o4", "o5",
+ "l0", "l1", "l2", "l3", "l4", "l5");
+ p1 += 8;
+ p2 += 8;
+ p3 += 8;
+ } while (--lines > 0);
+}
+
+static void
+sparc_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+ unsigned long *p3, unsigned long *p4)
+{
+ int lines = bytes / (sizeof (long)) / 8;
+
+ do {
+ __asm__ __volatile__(
+ "ldd [%0 + 0x00], %%g2\n\t"
+ "ldd [%0 + 0x08], %%g4\n\t"
+ "ldd [%0 + 0x10], %%o0\n\t"
+ "ldd [%0 + 0x18], %%o2\n\t"
+ "ldd [%1 + 0x00], %%o4\n\t"
+ "ldd [%1 + 0x08], %%l0\n\t"
+ "ldd [%1 + 0x10], %%l2\n\t"
+ "ldd [%1 + 0x18], %%l4\n\t"
+ "xor %%g2, %%o4, %%g2\n\t"
+ "xor %%g3, %%o5, %%g3\n\t"
+ "ldd [%2 + 0x00], %%o4\n\t"
+ "xor %%g4, %%l0, %%g4\n\t"
+ "xor %%g5, %%l1, %%g5\n\t"
+ "ldd [%2 + 0x08], %%l0\n\t"
+ "xor %%o0, %%l2, %%o0\n\t"
+ "xor %%o1, %%l3, %%o1\n\t"
+ "ldd [%2 + 0x10], %%l2\n\t"
+ "xor %%o2, %%l4, %%o2\n\t"
+ "xor %%o3, %%l5, %%o3\n\t"
+ "ldd [%2 + 0x18], %%l4\n\t"
+ "xor %%g2, %%o4, %%g2\n\t"
+ "xor %%g3, %%o5, %%g3\n\t"
+ "ldd [%3 + 0x00], %%o4\n\t"
+ "xor %%g4, %%l0, %%g4\n\t"
+ "xor %%g5, %%l1, %%g5\n\t"
+ "ldd [%3 + 0x08], %%l0\n\t"
+ "xor %%o0, %%l2, %%o0\n\t"
+ "xor %%o1, %%l3, %%o1\n\t"
+ "ldd [%3 + 0x10], %%l2\n\t"
+ "xor %%o2, %%l4, %%o2\n\t"
+ "xor %%o3, %%l5, %%o3\n\t"
+ "ldd [%3 + 0x18], %%l4\n\t"
+ "xor %%g2, %%o4, %%g2\n\t"
+ "xor %%g3, %%o5, %%g3\n\t"
+ "xor %%g4, %%l0, %%g4\n\t"
+ "xor %%g5, %%l1, %%g5\n\t"
+ "xor %%o0, %%l2, %%o0\n\t"
+ "xor %%o1, %%l3, %%o1\n\t"
+ "xor %%o2, %%l4, %%o2\n\t"
+ "xor %%o3, %%l5, %%o3\n\t"
+ "std %%g2, [%0 + 0x00]\n\t"
+ "std %%g4, [%0 + 0x08]\n\t"
+ "std %%o0, [%0 + 0x10]\n\t"
+ "std %%o2, [%0 + 0x18]\n"
+ :
+ : "r" (p1), "r" (p2), "r" (p3), "r" (p4)
+ : "g2", "g3", "g4", "g5",
+ "o0", "o1", "o2", "o3", "o4", "o5",
+ "l0", "l1", "l2", "l3", "l4", "l5");
+ p1 += 8;
+ p2 += 8;
+ p3 += 8;
+ p4 += 8;
+ } while (--lines > 0);
+}
+
+static void
+sparc_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+ unsigned long *p3, unsigned long *p4, unsigned long *p5)
+{
+ int lines = bytes / (sizeof (long)) / 8;
+
+ do {
+ __asm__ __volatile__(
+ "ldd [%0 + 0x00], %%g2\n\t"
+ "ldd [%0 + 0x08], %%g4\n\t"
+ "ldd [%0 + 0x10], %%o0\n\t"
+ "ldd [%0 + 0x18], %%o2\n\t"
+ "ldd [%1 + 0x00], %%o4\n\t"
+ "ldd [%1 + 0x08], %%l0\n\t"
+ "ldd [%1 + 0x10], %%l2\n\t"
+ "ldd [%1 + 0x18], %%l4\n\t"
+ "xor %%g2, %%o4, %%g2\n\t"
+ "xor %%g3, %%o5, %%g3\n\t"
+ "ldd [%2 + 0x00], %%o4\n\t"
+ "xor %%g4, %%l0, %%g4\n\t"
+ "xor %%g5, %%l1, %%g5\n\t"
+ "ldd [%2 + 0x08], %%l0\n\t"
+ "xor %%o0, %%l2, %%o0\n\t"
+ "xor %%o1, %%l3, %%o1\n\t"
+ "ldd [%2 + 0x10], %%l2\n\t"
+ "xor %%o2, %%l4, %%o2\n\t"
+ "xor %%o3, %%l5, %%o3\n\t"
+ "ldd [%2 + 0x18], %%l4\n\t"
+ "xor %%g2, %%o4, %%g2\n\t"
+ "xor %%g3, %%o5, %%g3\n\t"
+ "ldd [%3 + 0x00], %%o4\n\t"
+ "xor %%g4, %%l0, %%g4\n\t"
+ "xor %%g5, %%l1, %%g5\n\t"
+ "ldd [%3 + 0x08], %%l0\n\t"
+ "xor %%o0, %%l2, %%o0\n\t"
+ "xor %%o1, %%l3, %%o1\n\t"
+ "ldd [%3 + 0x10], %%l2\n\t"
+ "xor %%o2, %%l4, %%o2\n\t"
+ "xor %%o3, %%l5, %%o3\n\t"
+ "ldd [%3 + 0x18], %%l4\n\t"
+ "xor %%g2, %%o4, %%g2\n\t"
+ "xor %%g3, %%o5, %%g3\n\t"
+ "ldd [%4 + 0x00], %%o4\n\t"
+ "xor %%g4, %%l0, %%g4\n\t"
+ "xor %%g5, %%l1, %%g5\n\t"
+ "ldd [%4 + 0x08], %%l0\n\t"
+ "xor %%o0, %%l2, %%o0\n\t"
+ "xor %%o1, %%l3, %%o1\n\t"
+ "ldd [%4 + 0x10], %%l2\n\t"
+ "xor %%o2, %%l4, %%o2\n\t"
+ "xor %%o3, %%l5, %%o3\n\t"
+ "ldd [%4 + 0x18], %%l4\n\t"
+ "xor %%g2, %%o4, %%g2\n\t"
+ "xor %%g3, %%o5, %%g3\n\t"
+ "xor %%g4, %%l0, %%g4\n\t"
+ "xor %%g5, %%l1, %%g5\n\t"
+ "xor %%o0, %%l2, %%o0\n\t"
+ "xor %%o1, %%l3, %%o1\n\t"
+ "xor %%o2, %%l4, %%o2\n\t"
+ "xor %%o3, %%l5, %%o3\n\t"
+ "std %%g2, [%0 + 0x00]\n\t"
+ "std %%g4, [%0 + 0x08]\n\t"
+ "std %%o0, [%0 + 0x10]\n\t"
+ "std %%o2, [%0 + 0x18]\n"
+ :
+ : "r" (p1), "r" (p2), "r" (p3), "r" (p4), "r" (p5)
+ : "g2", "g3", "g4", "g5",
+ "o0", "o1", "o2", "o3", "o4", "o5",
+ "l0", "l1", "l2", "l3", "l4", "l5");
+ p1 += 8;
+ p2 += 8;
+ p3 += 8;
+ p4 += 8;
+ p5 += 8;
+ } while (--lines > 0);
+}
+
+static struct xor_block_template xor_block_SPARC = {
+ .name = "SPARC",
+ .do_2 = sparc_2,
+ .do_3 = sparc_3,
+ .do_4 = sparc_4,
+ .do_5 = sparc_5,
+};
+
+/* For grins, also test the generic routines. */
+#include <asm-generic/xor.h>
+
+#undef XOR_TRY_TEMPLATES
+#define XOR_TRY_TEMPLATES \
+ do { \
+ xor_speed(&xor_block_8regs); \
+ xor_speed(&xor_block_32regs); \
+ xor_speed(&xor_block_SPARC); \
+ } while (0)