diff options
Diffstat (limited to 'llvm/test/CodeGen/X86/combine-64bit-vec-binop.ll')
-rw-r--r-- | llvm/test/CodeGen/X86/combine-64bit-vec-binop.ll | 23 |
1 files changed, 1 insertions, 22 deletions
diff --git a/llvm/test/CodeGen/X86/combine-64bit-vec-binop.ll b/llvm/test/CodeGen/X86/combine-64bit-vec-binop.ll index 2842cb1d9b6..2935a2095bb 100644 --- a/llvm/test/CodeGen/X86/combine-64bit-vec-binop.ll +++ b/llvm/test/CodeGen/X86/combine-64bit-vec-binop.ll @@ -1,4 +1,4 @@ -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=sse4.1 | FileCheck %s --check-prefix=SSE41 define double @test1_add(double %A, double %B) { @@ -6,7 +6,6 @@ define double @test1_add(double %A, double %B) { ; SSE41: # BB#0: ; SSE41-NEXT: paddd %xmm1, %xmm0 ; SSE41-NEXT: retq -; %1 = bitcast double %A to <2 x i32> %2 = bitcast double %B to <2 x i32> %add = add <2 x i32> %1, %2 @@ -19,7 +18,6 @@ define double @test2_add(double %A, double %B) { ; SSE41: # BB#0: ; SSE41-NEXT: paddw %xmm1, %xmm0 ; SSE41-NEXT: retq -; %1 = bitcast double %A to <4 x i16> %2 = bitcast double %B to <4 x i16> %add = add <4 x i16> %1, %2 @@ -32,7 +30,6 @@ define double @test3_add(double %A, double %B) { ; SSE41: # BB#0: ; SSE41-NEXT: paddb %xmm1, %xmm0 ; SSE41-NEXT: retq -; %1 = bitcast double %A to <8 x i8> %2 = bitcast double %B to <8 x i8> %add = add <8 x i8> %1, %2 @@ -45,7 +42,6 @@ define double @test1_sub(double %A, double %B) { ; SSE41: # BB#0: ; SSE41-NEXT: psubd %xmm1, %xmm0 ; SSE41-NEXT: retq -; %1 = bitcast double %A to <2 x i32> %2 = bitcast double %B to <2 x i32> %sub = sub <2 x i32> %1, %2 @@ -58,7 +54,6 @@ define double @test2_sub(double %A, double %B) { ; SSE41: # BB#0: ; SSE41-NEXT: psubw %xmm1, %xmm0 ; SSE41-NEXT: retq -; %1 = bitcast double %A to <4 x i16> %2 = bitcast double %B to <4 x i16> %sub = sub <4 x i16> %1, %2 @@ -71,7 +66,6 @@ define double @test3_sub(double %A, double %B) { ; SSE41: # BB#0: ; SSE41-NEXT: psubb %xmm1, %xmm0 ; SSE41-NEXT: retq -; %1 = bitcast double %A to <8 x i8> %2 = bitcast double %B to <8 x i8> %sub = sub <8 x i8> %1, %2 @@ -84,7 +78,6 @@ define double @test1_mul(double %A, double %B) { ; SSE41: # BB#0: ; SSE41-NEXT: pmulld %xmm1, %xmm0 ; SSE41-NEXT: retq -; %1 = bitcast double %A to <2 x i32> %2 = bitcast double %B to <2 x i32> %mul = mul <2 x i32> %1, %2 @@ -97,7 +90,6 @@ define double @test2_mul(double %A, double %B) { ; SSE41: # BB#0: ; SSE41-NEXT: pmullw %xmm1, %xmm0 ; SSE41-NEXT: retq -; %1 = bitcast double %A to <4 x i16> %2 = bitcast double %B to <4 x i16> %mul = mul <4 x i16> %1, %2 @@ -114,7 +106,6 @@ define double @test3_mul(double %A, double %B) { ; SSE41-NEXT: pmullw %xmm2, %xmm0 ; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u] ; SSE41-NEXT: retq -; %1 = bitcast double %A to <8 x i8> %2 = bitcast double %B to <8 x i8> %mul = mul <8 x i8> %1, %2 @@ -127,7 +118,6 @@ define double @test1_and(double %A, double %B) { ; SSE41: # BB#0: ; SSE41-NEXT: andps %xmm1, %xmm0 ; SSE41-NEXT: retq -; %1 = bitcast double %A to <2 x i32> %2 = bitcast double %B to <2 x i32> %and = and <2 x i32> %1, %2 @@ -140,7 +130,6 @@ define double @test2_and(double %A, double %B) { ; SSE41: # BB#0: ; SSE41-NEXT: andps %xmm1, %xmm0 ; SSE41-NEXT: retq -; %1 = bitcast double %A to <4 x i16> %2 = bitcast double %B to <4 x i16> %and = and <4 x i16> %1, %2 @@ -153,7 +142,6 @@ define double @test3_and(double %A, double %B) { ; SSE41: # BB#0: ; SSE41-NEXT: andps %xmm1, %xmm0 ; SSE41-NEXT: retq -; %1 = bitcast double %A to <8 x i8> %2 = bitcast double %B to <8 x i8> %and = and <8 x i8> %1, %2 @@ -166,7 +154,6 @@ define double @test1_or(double %A, double %B) { ; SSE41: # BB#0: ; SSE41-NEXT: orps %xmm1, %xmm0 ; SSE41-NEXT: retq -; %1 = bitcast double %A to <2 x i32> %2 = bitcast double %B to <2 x i32> %or = or <2 x i32> %1, %2 @@ -179,7 +166,6 @@ define double @test2_or(double %A, double %B) { ; SSE41: # BB#0: ; SSE41-NEXT: orps %xmm1, %xmm0 ; SSE41-NEXT: retq -; %1 = bitcast double %A to <4 x i16> %2 = bitcast double %B to <4 x i16> %or = or <4 x i16> %1, %2 @@ -192,7 +178,6 @@ define double @test3_or(double %A, double %B) { ; SSE41: # BB#0: ; SSE41-NEXT: orps %xmm1, %xmm0 ; SSE41-NEXT: retq -; %1 = bitcast double %A to <8 x i8> %2 = bitcast double %B to <8 x i8> %or = or <8 x i8> %1, %2 @@ -205,7 +190,6 @@ define double @test1_xor(double %A, double %B) { ; SSE41: # BB#0: ; SSE41-NEXT: xorps %xmm1, %xmm0 ; SSE41-NEXT: retq -; %1 = bitcast double %A to <2 x i32> %2 = bitcast double %B to <2 x i32> %xor = xor <2 x i32> %1, %2 @@ -218,7 +202,6 @@ define double @test2_xor(double %A, double %B) { ; SSE41: # BB#0: ; SSE41-NEXT: xorps %xmm1, %xmm0 ; SSE41-NEXT: retq -; %1 = bitcast double %A to <4 x i16> %2 = bitcast double %B to <4 x i16> %xor = xor <4 x i16> %1, %2 @@ -231,7 +214,6 @@ define double @test3_xor(double %A, double %B) { ; SSE41: # BB#0: ; SSE41-NEXT: xorps %xmm1, %xmm0 ; SSE41-NEXT: retq -; %1 = bitcast double %A to <8 x i8> %2 = bitcast double %B to <8 x i8> %xor = xor <8 x i8> %1, %2 @@ -244,7 +226,6 @@ define double @test_fadd(double %A, double %B) { ; SSE41: # BB#0: ; SSE41-NEXT: addps %xmm1, %xmm0 ; SSE41-NEXT: retq -; %1 = bitcast double %A to <2 x float> %2 = bitcast double %B to <2 x float> %add = fadd <2 x float> %1, %2 @@ -257,7 +238,6 @@ define double @test_fsub(double %A, double %B) { ; SSE41: # BB#0: ; SSE41-NEXT: subps %xmm1, %xmm0 ; SSE41-NEXT: retq -; %1 = bitcast double %A to <2 x float> %2 = bitcast double %B to <2 x float> %sub = fsub <2 x float> %1, %2 @@ -270,7 +250,6 @@ define double @test_fmul(double %A, double %B) { ; SSE41: # BB#0: ; SSE41-NEXT: mulps %xmm1, %xmm0 ; SSE41-NEXT: retq -; %1 = bitcast double %A to <2 x float> %2 = bitcast double %B to <2 x float> %mul = fmul <2 x float> %1, %2 |