summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/lib/checksum_64.S
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@ozlabs.org>2016-11-03 16:15:42 +1100
committerMichael Ellerman <mpe@ellerman.id.au>2017-01-25 13:34:18 +1100
commitd4fde568a34a93897dfb9ae64cfe9dda9d5c908c (patch)
tree88beaa27d42bc3b780167930f79129ecd29fab38 /arch/powerpc/lib/checksum_64.S
parentb492f7e4e07a28e706db26cf4943bb0911435426 (diff)
downloadtalos-op-linux-d4fde568a34a93897dfb9ae64cfe9dda9d5c908c.tar.gz
talos-op-linux-d4fde568a34a93897dfb9ae64cfe9dda9d5c908c.zip
powerpc/64: Use optimized checksum routines on little-endian
Currently we have optimized hand-coded assembly checksum routines for big-endian 64-bit systems, but for little-endian we use the generic C routines. This modifies the optimized routines to work for little-endian. With this, we no longer need to enable CONFIG_GENERIC_CSUM. This also fixes a couple of comments in checksum_64.S so they accurately reflect what the associated instruction does. Signed-off-by: Paul Mackerras <paulus@ozlabs.org> [mpe: Use the more common __BIG_ENDIAN__] Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Diffstat (limited to 'arch/powerpc/lib/checksum_64.S')
-rw-r--r--arch/powerpc/lib/checksum_64.S12
1 files changed, 10 insertions, 2 deletions
diff --git a/arch/powerpc/lib/checksum_64.S b/arch/powerpc/lib/checksum_64.S
index d0d311e108ff..d7f1a966136e 100644
--- a/arch/powerpc/lib/checksum_64.S
+++ b/arch/powerpc/lib/checksum_64.S
@@ -36,7 +36,7 @@ _GLOBAL(__csum_partial)
* work to calculate the correct checksum, we ignore that case
* and take the potential slowdown of unaligned loads.
*/
- rldicl. r6,r3,64-1,64-2 /* r6 = (r3 & 0x3) >> 1 */
+ rldicl. r6,r3,64-1,64-2 /* r6 = (r3 >> 1) & 0x3 */
beq .Lcsum_aligned
li r7,4
@@ -168,8 +168,12 @@ _GLOBAL(__csum_partial)
beq .Lcsum_finish
lbz r6,0(r3)
+#ifdef __BIG_ENDIAN__
sldi r9,r6,8 /* Pad the byte out to 16 bits */
adde r0,r0,r9
+#else
+ adde r0,r0,r6
+#endif
.Lcsum_finish:
addze r0,r0 /* add in final carry */
@@ -224,7 +228,7 @@ _GLOBAL(csum_partial_copy_generic)
* If the source and destination are relatively unaligned we only
* align the source. This keeps things simple.
*/
- rldicl. r6,r3,64-1,64-2 /* r6 = (r3 & 0x3) >> 1 */
+ rldicl. r6,r3,64-1,64-2 /* r6 = (r3 >> 1) & 0x3 */
beq .Lcopy_aligned
li r9,4
@@ -386,8 +390,12 @@ dstnr; sth r6,0(r4)
beq .Lcopy_finish
srcnr; lbz r6,0(r3)
+#ifdef __BIG_ENDIAN__
sldi r9,r6,8 /* Pad the byte out to 16 bits */
adde r0,r0,r9
+#else
+ adde r0,r0,r6
+#endif
dstnr; stb r6,0(r4)
.Lcopy_finish:
OpenPOWER on IntegriCloud