diff options
-rw-r--r-- | llvm/test/CodeGen/X86/vec_partial.ll | 36 | ||||
-rw-r--r-- | llvm/test/CodeGen/X86/vec_reassociate.ll | 160 |
2 files changed, 135 insertions, 61 deletions
diff --git a/llvm/test/CodeGen/X86/vec_partial.ll b/llvm/test/CodeGen/X86/vec_partial.ll index e5ac81add7f..ee15c2af6dd 100644 --- a/llvm/test/CodeGen/X86/vec_partial.ll +++ b/llvm/test/CodeGen/X86/vec_partial.ll @@ -1,12 +1,18 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s +; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X86 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X64 ; PR11580 define <3 x float> @addf3(<3 x float> %x) { -; CHECK-LABEL: addf3: -; CHECK: # BB#0: # %entry -; CHECK-NEXT: addps {{.*}}(%rip), %xmm0 -; CHECK-NEXT: retq +; X86-LABEL: addf3: +; X86: # BB#0: # %entry +; X86-NEXT: addps {{\.LCPI.*}}, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: addf3: +; X64: # BB#0: # %entry +; X64-NEXT: addps {{.*}}(%rip), %xmm0 +; X64-NEXT: retq entry: %add = fadd <3 x float> %x, <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00> ret <3 x float> %add @@ -14,9 +20,13 @@ entry: ; PR11580 define <4 x float> @cvtf3_f4(<3 x float> %x) { -; CHECK-LABEL: cvtf3_f4: -; CHECK: # BB#0: # %entry -; CHECK-NEXT: retq +; X86-LABEL: cvtf3_f4: +; X86: # BB#0: # %entry +; X86-NEXT: retl +; +; X64-LABEL: cvtf3_f4: +; X64: # BB#0: # %entry +; X64-NEXT: retq entry: %extractVec = shufflevector <3 x float> %x, <3 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 undef> ret <4 x float> %extractVec @@ -24,9 +34,13 @@ entry: ; PR11580 define <3 x float> @cvtf4_f3(<4 x float> %x) { -; CHECK-LABEL: cvtf4_f3: -; CHECK: # BB#0: # %entry -; CHECK-NEXT: retq +; X86-LABEL: cvtf4_f3: +; X86: # BB#0: # %entry +; X86-NEXT: retl +; +; X64-LABEL: cvtf4_f3: +; X64: # BB#0: # %entry +; X64-NEXT: retq entry: %extractVec = shufflevector <4 x float> %x, <4 x float> undef, <3 x i32> <i32 0, i32 1, i32 2> ret <3 x float> %extractVec diff --git a/llvm/test/CodeGen/X86/vec_reassociate.ll b/llvm/test/CodeGen/X86/vec_reassociate.ll index 0d3373528f5..5234b0c8a77 100644 --- a/llvm/test/CodeGen/X86/vec_reassociate.ll +++ b/llvm/test/CodeGen/X86/vec_reassociate.ll @@ -1,10 +1,17 @@ -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+sse4.1 | FileCheck %s +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=X86 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=X64 define <4 x i32> @add_4i32(<4 x i32> %a0, <4 x i32> %a1) { - ;CHECK-LABEL: @add_4i32 - ;CHECK: # BB#0: - ;CHECK-NEXT: paddd %xmm1, %xmm0 - ;CHECK-NEXT: retq +; X86-LABEL: add_4i32: +; X86: # BB#0: +; X86-NEXT: paddd %xmm1, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: add_4i32: +; X64: # BB#0: +; X64-NEXT: paddd %xmm1, %xmm0 +; X64-NEXT: retq %1 = add <4 x i32> %a0, <i32 1, i32 -2, i32 3, i32 -4> %2 = add <4 x i32> %a1, <i32 -1, i32 2, i32 -3, i32 4> %3 = add <4 x i32> %1, %2 @@ -12,10 +19,15 @@ define <4 x i32> @add_4i32(<4 x i32> %a0, <4 x i32> %a1) { } define <4 x i32> @add_4i32_commute(<4 x i32> %a0, <4 x i32> %a1) { - ;CHECK-LABEL: @add_4i32_commute - ;CHECK: # BB#0: - ;CHECK-NEXT: paddd %xmm1, %xmm0 - ;CHECK-NEXT: retq +; X86-LABEL: add_4i32_commute: +; X86: # BB#0: +; X86-NEXT: paddd %xmm1, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: add_4i32_commute: +; X64: # BB#0: +; X64-NEXT: paddd %xmm1, %xmm0 +; X64-NEXT: retq %1 = add <4 x i32> <i32 1, i32 -2, i32 3, i32 -4>, %a0 %2 = add <4 x i32> <i32 -1, i32 2, i32 -3, i32 4>, %a1 %3 = add <4 x i32> %1, %2 @@ -23,11 +35,17 @@ define <4 x i32> @add_4i32_commute(<4 x i32> %a0, <4 x i32> %a1) { } define <4 x i32> @mul_4i32(<4 x i32> %a0, <4 x i32> %a1) { - ;CHECK-LABEL: @mul_4i32 - ;CHECK: # BB#0: - ;CHECK-NEXT: pmulld %xmm1, %xmm0 - ;CHECK-NEXT: pmulld .LCPI2_0(%rip), %xmm0 - ;CHECK-NEXT: retq +; X86-LABEL: mul_4i32: +; X86: # BB#0: +; X86-NEXT: pmulld %xmm1, %xmm0 +; X86-NEXT: pmulld {{\.LCPI.*}}, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: mul_4i32: +; X64: # BB#0: +; X64-NEXT: pmulld %xmm1, %xmm0 +; X64-NEXT: pmulld {{.*}}(%rip), %xmm0 +; X64-NEXT: retq %1 = mul <4 x i32> %a0, <i32 1, i32 2, i32 3, i32 4> %2 = mul <4 x i32> %a1, <i32 4, i32 3, i32 2, i32 1> %3 = mul <4 x i32> %1, %2 @@ -35,11 +53,17 @@ define <4 x i32> @mul_4i32(<4 x i32> %a0, <4 x i32> %a1) { } define <4 x i32> @mul_4i32_commute(<4 x i32> %a0, <4 x i32> %a1) { - ;CHECK-LABEL: @mul_4i32_commute - ;CHECK: # BB#0: - ;CHECK-NEXT: pmulld %xmm1, %xmm0 - ;CHECK-NEXT: pmulld .LCPI3_0(%rip), %xmm0 - ;CHECK-NEXT: retq +; X86-LABEL: mul_4i32_commute: +; X86: # BB#0: +; X86-NEXT: pmulld %xmm1, %xmm0 +; X86-NEXT: pmulld {{\.LCPI.*}}, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: mul_4i32_commute: +; X64: # BB#0: +; X64-NEXT: pmulld %xmm1, %xmm0 +; X64-NEXT: pmulld {{.*}}(%rip), %xmm0 +; X64-NEXT: retq %1 = mul <4 x i32> <i32 1, i32 2, i32 3, i32 4>, %a0 %2 = mul <4 x i32> <i32 4, i32 3, i32 2, i32 1>, %a1 %3 = mul <4 x i32> %1, %2 @@ -47,11 +71,17 @@ define <4 x i32> @mul_4i32_commute(<4 x i32> %a0, <4 x i32> %a1) { } define <4 x i32> @and_4i32(<4 x i32> %a0, <4 x i32> %a1) { - ;CHECK-LABEL: @and_4i32 - ;CHECK: # BB#0: - ;CHECK-NEXT: andps %xmm1, %xmm0 - ;CHECK-NEXT: andps .LCPI4_0(%rip), %xmm0 - ;CHECK-NEXT: retq +; X86-LABEL: and_4i32: +; X86: # BB#0: +; X86-NEXT: andps %xmm1, %xmm0 +; X86-NEXT: andps {{\.LCPI.*}}, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: and_4i32: +; X64: # BB#0: +; X64-NEXT: andps %xmm1, %xmm0 +; X64-NEXT: andps {{.*}}(%rip), %xmm0 +; X64-NEXT: retq %1 = and <4 x i32> %a0, <i32 -2, i32 -2, i32 3, i32 3> %2 = and <4 x i32> %a1, <i32 -1, i32 -1, i32 1, i32 1> %3 = and <4 x i32> %1, %2 @@ -59,11 +89,17 @@ define <4 x i32> @and_4i32(<4 x i32> %a0, <4 x i32> %a1) { } define <4 x i32> @and_4i32_commute(<4 x i32> %a0, <4 x i32> %a1) { - ;CHECK-LABEL: @and_4i32_commute - ;CHECK: # BB#0: - ;CHECK-NEXT: andps %xmm1, %xmm0 - ;CHECK-NEXT: andps .LCPI5_0(%rip), %xmm0 - ;CHECK-NEXT: retq +; X86-LABEL: and_4i32_commute: +; X86: # BB#0: +; X86-NEXT: andps %xmm1, %xmm0 +; X86-NEXT: andps {{\.LCPI.*}}, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: and_4i32_commute: +; X64: # BB#0: +; X64-NEXT: andps %xmm1, %xmm0 +; X64-NEXT: andps {{.*}}(%rip), %xmm0 +; X64-NEXT: retq %1 = and <4 x i32> <i32 -2, i32 -2, i32 3, i32 3>, %a0 %2 = and <4 x i32> <i32 -1, i32 -1, i32 1, i32 1>, %a1 %3 = and <4 x i32> %1, %2 @@ -71,11 +107,17 @@ define <4 x i32> @and_4i32_commute(<4 x i32> %a0, <4 x i32> %a1) { } define <4 x i32> @or_4i32(<4 x i32> %a0, <4 x i32> %a1) { - ;CHECK-LABEL: @or_4i32 - ;CHECK: # BB#0: - ;CHECK-NEXT: orps %xmm1, %xmm0 - ;CHECK-NEXT: orps .LCPI6_0(%rip), %xmm0 - ;CHECK-NEXT: retq +; X86-LABEL: or_4i32: +; X86: # BB#0: +; X86-NEXT: orps %xmm1, %xmm0 +; X86-NEXT: orps {{\.LCPI.*}}, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: or_4i32: +; X64: # BB#0: +; X64-NEXT: orps %xmm1, %xmm0 +; X64-NEXT: orps {{.*}}(%rip), %xmm0 +; X64-NEXT: retq %1 = or <4 x i32> %a0, <i32 -2, i32 -2, i32 3, i32 3> %2 = or <4 x i32> %a1, <i32 -1, i32 -1, i32 1, i32 1> %3 = or <4 x i32> %1, %2 @@ -83,23 +125,35 @@ define <4 x i32> @or_4i32(<4 x i32> %a0, <4 x i32> %a1) { } define <4 x i32> @or_4i32_commute(<4 x i32> %a0, <4 x i32> %a1) { - ;CHECK-LABEL: @or_4i32_commute - ;CHECK: # BB#0: - ;CHECK-NEXT: orps %xmm1, %xmm0 - ;CHECK-NEXT: orps .LCPI7_0(%rip), %xmm0 - ;CHECK-NEXT: retq - %1 = or <4 x i32> <i32 -2, i32 -2, i32 3, i32 3>, %a0 +; X86-LABEL: or_4i32_commute: +; X86: # BB#0: +; X86-NEXT: orps %xmm1, %xmm0 +; X86-NEXT: orps {{\.LCPI.*}}, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: or_4i32_commute: +; X64: # BB#0: +; X64-NEXT: orps %xmm1, %xmm0 +; X64-NEXT: orps {{.*}}(%rip), %xmm0 +; X64-NEXT: retq + %1 = or <4 x i32> <i32 -2, i32 -2, i32 3, i32 3>, %a0 %2 = or <4 x i32> <i32 -1, i32 -1, i32 1, i32 1>, %a1 %3 = or <4 x i32> %1, %2 ret <4 x i32> %3 } define <4 x i32> @xor_4i32(<4 x i32> %a0, <4 x i32> %a1) { - ;CHECK-LABEL: @xor_4i32 - ;CHECK: # BB#0: - ;CHECK-NEXT: xorps %xmm1, %xmm0 - ;CHECK-NEXT: xorps .LCPI8_0(%rip), %xmm0 - ;CHECK-NEXT: retq +; X86-LABEL: xor_4i32: +; X86: # BB#0: +; X86-NEXT: xorps %xmm1, %xmm0 +; X86-NEXT: xorps {{\.LCPI.*}}, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: xor_4i32: +; X64: # BB#0: +; X64-NEXT: xorps %xmm1, %xmm0 +; X64-NEXT: xorps {{.*}}(%rip), %xmm0 +; X64-NEXT: retq %1 = xor <4 x i32> %a0, <i32 -2, i32 -2, i32 3, i32 3> %2 = xor <4 x i32> %a1, <i32 -1, i32 -1, i32 1, i32 1> %3 = xor <4 x i32> %1, %2 @@ -107,11 +161,17 @@ define <4 x i32> @xor_4i32(<4 x i32> %a0, <4 x i32> %a1) { } define <4 x i32> @xor_4i32_commute(<4 x i32> %a0, <4 x i32> %a1) { - ;CHECK-LABEL: @xor_4i32_commute - ;CHECK: # BB#0: - ;CHECK-NEXT: xorps %xmm1, %xmm0 - ;CHECK-NEXT: xorps .LCPI9_0(%rip), %xmm0 - ;CHECK-NEXT: retq +; X86-LABEL: xor_4i32_commute: +; X86: # BB#0: +; X86-NEXT: xorps %xmm1, %xmm0 +; X86-NEXT: xorps {{\.LCPI.*}}, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: xor_4i32_commute: +; X64: # BB#0: +; X64-NEXT: xorps %xmm1, %xmm0 +; X64-NEXT: xorps {{.*}}(%rip), %xmm0 +; X64-NEXT: retq %1 = xor <4 x i32> <i32 -2, i32 -2, i32 3, i32 3>, %a0 %2 = xor <4 x i32> <i32 -1, i32 -1, i32 1, i32 1>, %a1 %3 = xor <4 x i32> %1, %2 |