summaryrefslogtreecommitdiffstats
path: root/arch/mips
diff options
context:
space:
mode:
authorMaciej W. Rozycki <macro@codesourcery.com>2013-11-01 23:47:05 +0000
committerRalf Baechle <ralf@linux-mips.org>2013-11-04 18:19:20 +0100
commitedf7b938321e5a8c170044e186afdf757bc597fa (patch)
tree7ffa1603ce0ac714230eaa9c489c96cf80029900 /arch/mips
parentdc73e4c1b6df2795830a398192aaba7fb3c5cbb7 (diff)
downloadtalos-op-linux-edf7b938321e5a8c170044e186afdf757bc597fa.tar.gz
talos-op-linux-edf7b938321e5a8c170044e186afdf757bc597fa.zip
MIPS: Random whitespace clean-ups
Another whitespace clean-up, this removes tabs from between sentences in some comments. Signed-off-by: Maciej W. Rozycki <macro@codesourcery.com> Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/6103/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch/mips')
-rw-r--r--arch/mips/include/asm/addrspace.h4
-rw-r--r--arch/mips/include/asm/atomic.h2
-rw-r--r--arch/mips/include/asm/barrier.h6
3 files changed, 6 insertions, 6 deletions
diff --git a/arch/mips/include/asm/addrspace.h b/arch/mips/include/asm/addrspace.h
index 13d61c002e4f..3f745459fdb5 100644
--- a/arch/mips/include/asm/addrspace.h
+++ b/arch/mips/include/asm/addrspace.h
@@ -58,7 +58,7 @@
/*
* Memory segments (64bit kernel mode addresses)
- * The compatibility segments use the full 64-bit sign extended value. Note
+ * The compatibility segments use the full 64-bit sign extended value. Note
* the R8000 doesn't have them so don't reference these in generic MIPS code.
*/
#define XKUSEG _CONST64_(0x0000000000000000)
@@ -131,7 +131,7 @@
/*
* The ultimate limited of the 64-bit MIPS architecture: 2 bits for selecting
- * the region, 3 bits for the CCA mode. This leaves 59 bits of which the
+ * the region, 3 bits for the CCA mode. This leaves 59 bits of which the
* R8000 implements most with its 48-bit physical address space.
*/
#define TO_PHYS_MASK _CONST64_(0x07ffffffffffffff) /* 2^^59 - 1 */
diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
index 08b607969a16..7eed2f261710 100644
--- a/arch/mips/include/asm/atomic.h
+++ b/arch/mips/include/asm/atomic.h
@@ -1,5 +1,5 @@
/*
- * Atomic operations that C can't guarantee us. Useful for
+ * Atomic operations that C can't guarantee us. Useful for
* resource counting etc..
*
* But use these as seldom as possible since they are much more slower
diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h
index 314ab5532019..f26d8e1bf3c3 100644
--- a/arch/mips/include/asm/barrier.h
+++ b/arch/mips/include/asm/barrier.h
@@ -18,7 +18,7 @@
* over this barrier. All reads preceding this primitive are guaranteed
* to access memory (but not necessarily other CPUs' caches) before any
* reads following this primitive that depend on the data return by
- * any of the preceding reads. This primitive is much lighter weight than
+ * any of the preceding reads. This primitive is much lighter weight than
* rmb() on most CPUs, and is never heavier weight than is
* rmb().
*
@@ -43,7 +43,7 @@
* </programlisting>
*
* because the read of "*q" depends on the read of "p" and these
- * two reads are separated by a read_barrier_depends(). However,
+ * two reads are separated by a read_barrier_depends(). However,
* the following code, with the same initial values for "a" and "b":
*
* <programlisting>
@@ -57,7 +57,7 @@
* </programlisting>
*
* does not enforce ordering, since there is no data dependency between
- * the read of "a" and the read of "b". Therefore, on some CPUs, such
+ * the read of "a" and the read of "b". Therefore, on some CPUs, such
* as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
* in cases like this where there are no data dependencies.
*/
OpenPOWER on IntegriCloud