summaryrefslogtreecommitdiffstats
path: root/llvm
diff options
context:
space:
mode:
authorChad Rosier <mcrosier@codeaurora.org>2014-08-04 21:20:25 +0000
committerChad Rosier <mcrosier@codeaurora.org>2014-08-04 21:20:25 +0000
commit5908ab4dd6595a7bcbe3d4952a3ca5630b262088 (patch)
tree9d7b7502a5e4d47f3c28983d9db5084b8be55948 /llvm
parent35487d8e50a4d2807f066939c68f6efa0d36af2c (diff)
downloadbcm5719-llvm-5908ab4dd6595a7bcbe3d4952a3ca5630b262088.tar.gz
bcm5719-llvm-5908ab4dd6595a7bcbe3d4952a3ca5630b262088.zip
[AArch64] Extend the number of scalar instructions supported in the AdvSIMD
scalar integer instruction pass. This is a patch I had lying around from a few months ago. The pass is currently disabled by default, so nothing to interesting. llvm-svn: 214779
Diffstat (limited to 'llvm')
-rw-r--r--llvm/lib/Target/AArch64/AArch64AdvSIMDScalarPass.cpp6
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-AdvSIMD-Scalar.ll41
2 files changed, 47 insertions, 0 deletions
diff --git a/llvm/lib/Target/AArch64/AArch64AdvSIMDScalarPass.cpp b/llvm/lib/Target/AArch64/AArch64AdvSIMDScalarPass.cpp
index 780e0750241..4f782b62edd 100644
--- a/llvm/lib/Target/AArch64/AArch64AdvSIMDScalarPass.cpp
+++ b/llvm/lib/Target/AArch64/AArch64AdvSIMDScalarPass.cpp
@@ -166,6 +166,12 @@ static int getTransformOpcode(unsigned Opc) {
return AArch64::ADDv1i64;
case AArch64::SUBXrr:
return AArch64::SUBv1i64;
+ case AArch64::ANDXrr:
+ return AArch64::ANDv8i8;
+ case AArch64::EORXrr:
+ return AArch64::EORv8i8;
+ case AArch64::ORRXrr:
+ return AArch64::ORRv8i8;
}
// No AdvSIMD equivalent, so just return the original opcode.
return Opc;
diff --git a/llvm/test/CodeGen/AArch64/arm64-AdvSIMD-Scalar.ll b/llvm/test/CodeGen/AArch64/arm64-AdvSIMD-Scalar.ll
index c4597d5a481..a3d7727c8ae 100644
--- a/llvm/test/CodeGen/AArch64/arm64-AdvSIMD-Scalar.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-AdvSIMD-Scalar.ll
@@ -65,3 +65,44 @@ define double @add_sub_su64(<2 x i64> %a, <2 x i64> %b) nounwind readnone {
%retval = bitcast i64 %sub.i to double
ret double %retval
}
+define double @and_su64(<2 x i64> %a, <2 x i64> %b) nounwind readnone {
+; CHECK-LABEL: and_su64:
+; CHECK: and.8b v0, v1, v0
+; CHECK-NEXT: ret
+; GENERIC-LABEL: and_su64:
+; GENERIC: and v0.8b, v1.8b, v0.8b
+; GENERIC-NEXT: ret
+ %vecext = extractelement <2 x i64> %a, i32 0
+ %vecext1 = extractelement <2 x i64> %b, i32 0
+ %or.i = and i64 %vecext1, %vecext
+ %retval = bitcast i64 %or.i to double
+ ret double %retval
+}
+
+define double @orr_su64(<2 x i64> %a, <2 x i64> %b) nounwind readnone {
+; CHECK-LABEL: orr_su64:
+; CHECK: orr.8b v0, v1, v0
+; CHECK-NEXT: ret
+; GENERIC-LABEL: orr_su64:
+; GENERIC: orr v0.8b, v1.8b, v0.8b
+; GENERIC-NEXT: ret
+ %vecext = extractelement <2 x i64> %a, i32 0
+ %vecext1 = extractelement <2 x i64> %b, i32 0
+ %or.i = or i64 %vecext1, %vecext
+ %retval = bitcast i64 %or.i to double
+ ret double %retval
+}
+
+define double @xorr_su64(<2 x i64> %a, <2 x i64> %b) nounwind readnone {
+; CHECK-LABEL: xorr_su64:
+; CHECK: eor.8b v0, v1, v0
+; CHECK-NEXT: ret
+; GENERIC-LABEL: xorr_su64:
+; GENERIC: eor v0.8b, v1.8b, v0.8b
+; GENERIC-NEXT: ret
+ %vecext = extractelement <2 x i64> %a, i32 0
+ %vecext1 = extractelement <2 x i64> %b, i32 0
+ %xor.i = xor i64 %vecext1, %vecext
+ %retval = bitcast i64 %xor.i to double
+ ret double %retval
+}
OpenPOWER on IntegriCloud