aboutsummaryrefslogtreecommitdiff
path: root/arch/mips/include/asm/barrier.h
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-10-12 12:39:30 +0200
committerIngo Molnar <mingo@elte.hu>2008-10-12 12:39:50 +0200
commit4c7145a1ec1bb789d5f07e47510e8bda546a7c4a (patch)
treee2767b77e5413473a3bba302237f4669a203f183 /arch/mips/include/asm/barrier.h
parent74e91604b2452c15bbe72d77b37cf47ed0310d13 (diff)
parentfd048088306656824958e7783ffcee27e241b361 (diff)
Merge branch 'linus' into x86/spinlocks
Done to prevent this failure of an Octopus merge: Added arch/arm/include/asm/byteorder.h in both, but differently. ERROR: Merge conflict in arch/arm/include/asm/byteorder.h Auto-merging include/asm-x86/spinlock.h ERROR: Merge conflict in include/asm-x86/spinlock.h fatal: merge program failed
Diffstat (limited to 'arch/mips/include/asm/barrier.h')
-rw-r--r--arch/mips/include/asm/barrier.h155
1 files changed, 155 insertions, 0 deletions
diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h
new file mode 100644
index 00000000000..8e9ac313ca3
--- /dev/null
+++ b/arch/mips/include/asm/barrier.h
@@ -0,0 +1,155 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 by Ralf Baechle (ralf@linux-mips.org)
+ */
+#ifndef __ASM_BARRIER_H
+#define __ASM_BARRIER_H
+
+/*
+ * read_barrier_depends - Flush all pending reads that subsequents reads
+ * depend on.
+ *
+ * No data-dependent reads from memory-like regions are ever reordered
+ * over this barrier. All reads preceding this primitive are guaranteed
+ * to access memory (but not necessarily other CPUs' caches) before any
+ * reads following this primitive that depend on the data return by
+ * any of the preceding reads. This primitive is much lighter weight than
+ * rmb() on most CPUs, and is never heavier weight than is
+ * rmb().
+ *
+ * These ordering constraints are respected by both the local CPU
+ * and the compiler.
+ *
+ * Ordering is not guaranteed by anything other than these primitives,
+ * not even by data dependencies. See the documentation for
+ * memory_barrier() for examples and URLs to more information.
+ *
+ * For example, the following code would force ordering (the initial
+ * value of "a" is zero, "b" is one, and "p" is "&a"):
+ *
+ * <programlisting>
+ * CPU 0 CPU 1
+ *
+ * b = 2;
+ * memory_barrier();
+ * p = &b; q = p;
+ * read_barrier_depends();
+ * d = *q;
+ * </programlisting>
+ *
+ * because the read of "*q" depends on the read of "p" and these
+ * two reads are separated by a read_barrier_depends(). However,
+ * the following code, with the same initial values for "a" and "b":
+ *
+ * <programlisting>
+ * CPU 0 CPU 1
+ *
+ * a = 2;
+ * memory_barrier();
+ * b = 3; y = b;
+ * read_barrier_depends();
+ * x = a;
+ * </programlisting>
+ *
+ * does not enforce ordering, since there is no data dependency between
+ * the read of "a" and the read of "b". Therefore, on some CPUs, such
+ * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
+ * in cases like this where there are no data dependencies.
+ */
+
+#define read_barrier_depends() do { } while(0)
+#define smp_read_barrier_depends() do { } while(0)
+
+#ifdef CONFIG_CPU_HAS_SYNC
+#define __sync() \
+ __asm__ __volatile__( \
+ ".set push\n\t" \
+ ".set noreorder\n\t" \
+ ".set mips2\n\t" \
+ "sync\n\t" \
+ ".set pop" \
+ : /* no output */ \
+ : /* no input */ \
+ : "memory")
+#else
+#define __sync() do { } while(0)
+#endif
+
+#define __fast_iob() \
+ __asm__ __volatile__( \
+ ".set push\n\t" \
+ ".set noreorder\n\t" \
+ "lw $0,%0\n\t" \
+ "nop\n\t" \
+ ".set pop" \
+ : /* no output */ \
+ : "m" (*(int *)CKSEG1) \
+ : "memory")
+
+#define fast_wmb() __sync()
+#define fast_rmb() __sync()
+#define fast_mb() __sync()
+#ifdef CONFIG_SGI_IP28
+#define fast_iob() \
+ __asm__ __volatile__( \
+ ".set push\n\t" \
+ ".set noreorder\n\t" \
+ "lw $0,%0\n\t" \
+ "sync\n\t" \
+ "lw $0,%0\n\t" \
+ ".set pop" \
+ : /* no output */ \
+ : "m" (*(int *)CKSEG1ADDR(0x1fa00004)) \
+ : "memory")
+#else
+#define fast_iob() \
+ do { \
+ __sync(); \
+ __fast_iob(); \
+ } while (0)
+#endif
+
+#ifdef CONFIG_CPU_HAS_WB
+
+#include <asm/wbflush.h>
+
+#define wmb() fast_wmb()
+#define rmb() fast_rmb()
+#define mb() wbflush()
+#define iob() wbflush()
+
+#else /* !CONFIG_CPU_HAS_WB */
+
+#define wmb() fast_wmb()
+#define rmb() fast_rmb()
+#define mb() fast_mb()
+#define iob() fast_iob()
+
+#endif /* !CONFIG_CPU_HAS_WB */
+
+#if defined(CONFIG_WEAK_ORDERING) && defined(CONFIG_SMP)
+#define __WEAK_ORDERING_MB " sync \n"
+#else
+#define __WEAK_ORDERING_MB " \n"
+#endif
+#if defined(CONFIG_WEAK_REORDERING_BEYOND_LLSC) && defined(CONFIG_SMP)
+#define __WEAK_LLSC_MB " sync \n"
+#else
+#define __WEAK_LLSC_MB " \n"
+#endif
+
+#define smp_mb() __asm__ __volatile__(__WEAK_ORDERING_MB : : :"memory")
+#define smp_rmb() __asm__ __volatile__(__WEAK_ORDERING_MB : : :"memory")
+#define smp_wmb() __asm__ __volatile__(__WEAK_ORDERING_MB : : :"memory")
+
+#define set_mb(var, value) \
+ do { var = value; smp_mb(); } while (0)
+
+#define smp_llsc_mb() __asm__ __volatile__(__WEAK_LLSC_MB : : :"memory")
+#define smp_llsc_rmb() __asm__ __volatile__(__WEAK_LLSC_MB : : :"memory")
+#define smp_llsc_wmb() __asm__ __volatile__(__WEAK_LLSC_MB : : :"memory")
+
+#endif /* __ASM_BARRIER_H */