summaryrefslogtreecommitdiffstats
path: root/llvm/test/Bitcode/ssse3_palignr.ll
diff options
context:
space:
mode:
authorChad Rosier <mcrosier@apple.com>2011-10-25 01:22:20 +0000
committerChad Rosier <mcrosier@apple.com>2011-10-25 01:22:20 +0000
commit48d436618d13ccf0e88f27db89678d0bc89bf107 (patch)
tree0faf679c189abb93f104b13bffd14790d73dcf2f /llvm/test/Bitcode/ssse3_palignr.ll
parente042bbfdf8f68f869fafeb172af7cbf7f9b8bc5e (diff)
downloadbcm5719-llvm-48d436618d13ccf0e88f27db89678d0bc89bf107.tar.gz
bcm5719-llvm-48d436618d13ccf0e88f27db89678d0bc89bf107.zip
Fix these test cases to not use .bc files. Otherwise, we run into issues with
bitcode reader/writer backward compatibility. llvm-svn: 142896
Diffstat (limited to 'llvm/test/Bitcode/ssse3_palignr.ll')
-rw-r--r--llvm/test/Bitcode/ssse3_palignr.ll82
1 files changed, 81 insertions, 1 deletions
diff --git a/llvm/test/Bitcode/ssse3_palignr.ll b/llvm/test/Bitcode/ssse3_palignr.ll
index f62ca118c1b..eb844497d9d 100644
--- a/llvm/test/Bitcode/ssse3_palignr.ll
+++ b/llvm/test/Bitcode/ssse3_palignr.ll
@@ -1,2 +1,82 @@
-; RUN: llvm-dis < %s.bc | FileCheck %s
+; RUN: opt < %s | llvm-dis | FileCheck %s
; CHECK-NOT: {@llvm\\.palign}
+
+define <4 x i32> @align1(<4 x i32> %a, <4 x i32> %b) nounwind readnone ssp {
+entry:
+ %0 = bitcast <4 x i32> %b to <2 x i64> ; <<2 x i64>> [#uses=1]
+ %1 = bitcast <4 x i32> %a to <2 x i64> ; <<2 x i64>> [#uses=1]
+ %2 = tail call <2 x i64> @llvm.x86.ssse3.palign.r.128(<2 x i64> %1, <2 x i64> %0, i8 15) ; <<2 x i64>> [#uses=1]
+ %3 = bitcast <2 x i64> %2 to <4 x i32> ; <<4 x i32>> [#uses=1]
+ ret <4 x i32> %3
+}
+
+define double @align8(<2 x i32> %a, <2 x i32> %b) nounwind readnone ssp {
+entry:
+ %0 = bitcast <2 x i32> %b to <1 x i64> ; <<1 x i64>> [#uses=1]
+ %1 = bitcast <2 x i32> %a to <1 x i64> ; <<1 x i64>> [#uses=1]
+ %2 = tail call <1 x i64> @llvm.x86.ssse3.palign.r(<1 x i64> %1, <1 x i64> %0, i8 7) ; <<1 x i64>> [#uses=1]
+ %3 = extractelement <1 x i64> %2, i32 0 ; <i64> [#uses=1]
+ %retval12 = bitcast i64 %3 to double ; <double> [#uses=1]
+ ret double %retval12
+}
+
+declare <1 x i64> @llvm.x86.ssse3.palign.r(<1 x i64>, <1 x i64>, i8) nounwind readnone
+
+define double @align7(<2 x i32> %a, <2 x i32> %b) nounwind readnone ssp {
+entry:
+ %0 = bitcast <2 x i32> %b to <1 x i64> ; <<1 x i64>> [#uses=1]
+ %1 = bitcast <2 x i32> %a to <1 x i64> ; <<1 x i64>> [#uses=1]
+ %2 = tail call <1 x i64> @llvm.x86.ssse3.palign.r(<1 x i64> %1, <1 x i64> %0, i8 16) ; <<1 x i64>> [#uses=1]
+ %3 = extractelement <1 x i64> %2, i32 0 ; <i64> [#uses=1]
+ %retval12 = bitcast i64 %3 to double ; <double> [#uses=1]
+ ret double %retval12
+}
+
+define double @align6(<2 x i32> %a, <2 x i32> %b) nounwind readnone ssp {
+entry:
+ %0 = bitcast <2 x i32> %b to <1 x i64> ; <<1 x i64>> [#uses=1]
+ %1 = bitcast <2 x i32> %a to <1 x i64> ; <<1 x i64>> [#uses=1]
+ %2 = tail call <1 x i64> @llvm.x86.ssse3.palign.r(<1 x i64> %1, <1 x i64> %0, i8 9) ; <<1 x i64>> [#uses=1]
+ %3 = extractelement <1 x i64> %2, i32 0 ; <i64> [#uses=1]
+ %retval12 = bitcast i64 %3 to double ; <double> [#uses=1]
+ ret double %retval12
+}
+
+define double @align5(<2 x i32> %a, <2 x i32> %b) nounwind readnone ssp {
+entry:
+ %0 = bitcast <2 x i32> %b to <1 x i64> ; <<1 x i64>> [#uses=1]
+ %1 = bitcast <2 x i32> %a to <1 x i64> ; <<1 x i64>> [#uses=1]
+ %2 = tail call <1 x i64> @llvm.x86.ssse3.palign.r(<1 x i64> %1, <1 x i64> %0, i8 8) ; <<1 x i64>> [#uses=1]
+ %3 = extractelement <1 x i64> %2, i32 0 ; <i64> [#uses=1]
+ %retval12 = bitcast i64 %3 to double ; <double> [#uses=1]
+ ret double %retval12
+}
+
+define <4 x i32> @align4(<4 x i32> %a, <4 x i32> %b) nounwind readnone ssp {
+entry:
+ %0 = bitcast <4 x i32> %b to <2 x i64> ; <<2 x i64>> [#uses=1]
+ %1 = bitcast <4 x i32> %a to <2 x i64> ; <<2 x i64>> [#uses=1]
+ %2 = tail call <2 x i64> @llvm.x86.ssse3.palign.r.128(<2 x i64> %1, <2 x i64> %0, i8 32) ; <<2 x i64>> [#uses=1]
+ %3 = bitcast <2 x i64> %2 to <4 x i32> ; <<4 x i32>> [#uses=1]
+ ret <4 x i32> %3
+}
+
+declare <2 x i64> @llvm.x86.ssse3.palign.r.128(<2 x i64>, <2 x i64>, i8) nounwind readnone
+
+define <4 x i32> @align3(<4 x i32> %a, <4 x i32> %b) nounwind readnone ssp {
+entry:
+ %0 = bitcast <4 x i32> %b to <2 x i64> ; <<2 x i64>> [#uses=1]
+ %1 = bitcast <4 x i32> %a to <2 x i64> ; <<2 x i64>> [#uses=1]
+ %2 = tail call <2 x i64> @llvm.x86.ssse3.palign.r.128(<2 x i64> %1, <2 x i64> %0, i8 17) ; <<2 x i64>> [#uses=1]
+ %3 = bitcast <2 x i64> %2 to <4 x i32> ; <<4 x i32>> [#uses=1]
+ ret <4 x i32> %3
+}
+
+define <4 x i32> @align2(<4 x i32> %a, <4 x i32> %b) nounwind readnone ssp {
+entry:
+ %0 = bitcast <4 x i32> %b to <2 x i64> ; <<2 x i64>> [#uses=1]
+ %1 = bitcast <4 x i32> %a to <2 x i64> ; <<2 x i64>> [#uses=1]
+ %2 = tail call <2 x i64> @llvm.x86.ssse3.palign.r.128(<2 x i64> %1, <2 x i64> %0, i8 16) ; <<2 x i64>> [#uses=1]
+ %3 = bitcast <2 x i64> %2 to <4 x i32> ; <<4 x i32>> [#uses=1]
+ ret <4 x i32> %3
+}
OpenPOWER on IntegriCloud