summaryrefslogtreecommitdiff
path: root/arch/mips/include/asm/barrier.h
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2013-01-22 15:59:30 +0400
committerRalf Baechle <ralf@linux-mips.org>2013-02-01 13:00:22 +0400
commit7034228792cc561e79ff8600f02884bd4c80e287 (patch)
tree89b77af37d087d9de236fc5d21f60bf552d0a2c6 /arch/mips/include/asm/barrier.h
parent405ab01c70e18058d9c01a1256769a61fc65413e (diff)
downloadlinux-7034228792cc561e79ff8600f02884bd4c80e287.tar.xz
MIPS: Whitespace cleanup.
Having received another series of whitespace patches I decided to do this once and for all rather than dealing with this kind of patches trickling in forever. Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch/mips/include/asm/barrier.h')
-rw-r--r--arch/mips/include/asm/barrier.h10
1 files changed, 5 insertions, 5 deletions
diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h
index f7fdc24e972d..314ab5532019 100644
--- a/arch/mips/include/asm/barrier.h
+++ b/arch/mips/include/asm/barrier.h
@@ -18,7 +18,7 @@
* over this barrier. All reads preceding this primitive are guaranteed
* to access memory (but not necessarily other CPUs' caches) before any
* reads following this primitive that depend on the data return by
- * any of the preceding reads. This primitive is much lighter weight than
+ * any of the preceding reads. This primitive is much lighter weight than
* rmb() on most CPUs, and is never heavier weight than is
* rmb().
*
@@ -43,7 +43,7 @@
* </programlisting>
*
* because the read of "*q" depends on the read of "p" and these
- * two reads are separated by a read_barrier_depends(). However,
+ * two reads are separated by a read_barrier_depends(). However,
* the following code, with the same initial values for "a" and "b":
*
* <programlisting>
@@ -57,7 +57,7 @@
* </programlisting>
*
* does not enforce ordering, since there is no data dependency between
- * the read of "a" and the read of "b". Therefore, on some CPUs, such
+ * the read of "a" and the read of "b". Therefore, on some CPUs, such
* as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
* in cases like this where there are no data dependencies.
*/
@@ -92,7 +92,7 @@
: "memory")
#ifdef CONFIG_CPU_CAVIUM_OCTEON
# define OCTEON_SYNCW_STR ".set push\n.set arch=octeon\nsyncw\nsyncw\n.set pop\n"
-# define __syncw() __asm__ __volatile__(OCTEON_SYNCW_STR : : : "memory")
+# define __syncw() __asm__ __volatile__(OCTEON_SYNCW_STR : : : "memory")
# define fast_wmb() __syncw()
# define fast_rmb() barrier()
@@ -158,7 +158,7 @@
#endif
#if defined(CONFIG_WEAK_REORDERING_BEYOND_LLSC) && defined(CONFIG_SMP)
-#define __WEAK_LLSC_MB " sync \n"
+#define __WEAK_LLSC_MB " sync \n"
#else
#define __WEAK_LLSC_MB " \n"
#endif