diff options
Diffstat (limited to 'llvm/test/CodeGen/PowerPC/buildvec_canonicalize.ll')
-rw-r--r-- | llvm/test/CodeGen/PowerPC/buildvec_canonicalize.ll | 21 |
1 files changed, 10 insertions, 11 deletions
diff --git a/llvm/test/CodeGen/PowerPC/buildvec_canonicalize.ll b/llvm/test/CodeGen/PowerPC/buildvec_canonicalize.ll index e06090c1925..66428c77e45 100644 --- a/llvm/test/CodeGen/PowerPC/buildvec_canonicalize.ll +++ b/llvm/test/CodeGen/PowerPC/buildvec_canonicalize.ll @@ -1,27 +1,26 @@ ; There should be exactly one vxor here. -; RUN: llvm-upgrade < %s | llvm-as | \ +; RUN: llvm-as < %s | \ ; RUN: llc -march=ppc32 -mcpu=g5 --enable-unsafe-fp-math | \ ; RUN: grep vxor | count 1 ; There should be exactly one vsplti here. -; RUN: llvm-upgrade < %s | llvm-as | \ +; RUN: llvm-as < %s | \ ; RUN: llc -march=ppc32 -mcpu=g5 --enable-unsafe-fp-math | \ ; RUN: grep vsplti | count 1 - -void %VXOR(<4 x float>* %P1, <4 x int>* %P2, <4 x float>* %P3) { - %tmp = load <4 x float>* %P3 - %tmp3 = load <4 x float>* %P1 - %tmp4 = mul <4 x float> %tmp, %tmp3 +define void @VXOR(<4 x float>* %P1, <4 x i32>* %P2, <4 x float>* %P3) { + %tmp = load <4 x float>* %P3 ; <<4 x float>> [#uses=1] + %tmp3 = load <4 x float>* %P1 ; <<4 x float>> [#uses=1] + %tmp4 = mul <4 x float> %tmp, %tmp3 ; <<4 x float>> [#uses=1] store <4 x float> %tmp4, <4 x float>* %P3 store <4 x float> zeroinitializer, <4 x float>* %P1 - store <4 x int> zeroinitializer, <4 x int>* %P2 + store <4 x i32> zeroinitializer, <4 x i32>* %P2 ret void } -void %VSPLTI(<4 x int>* %P2, <8 x short>* %P3) { - store <4 x int> cast (<16 x sbyte> < sbyte -1, sbyte -1, sbyte -1, sbyte -1, sbyte -1, sbyte -1, sbyte -1, sbyte -1, sbyte -1, sbyte -1, sbyte -1, sbyte -1, sbyte -1, sbyte -1, sbyte -1, sbyte -1 > to <4 x int>), <4 x int>* %P2 - store <8 x short> < short -1, short -1, short -1, short -1, short -1, short -1, short -1, short -1 >, <8 x short>* %P3 +define void @VSPLTI(<4 x i32>* %P2, <8 x i16>* %P3) { + store <4 x i32> bitcast (<16 x i8> < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1 > to <4 x i32>), <4 x i32>* %P2 + store <8 x i16> < i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1 >, <8 x i16>* %P3 ret void } |