diff options
| author | Craig Topper <craig.topper@intel.com> | 2019-11-11 15:51:00 -0800 |
|---|---|---|
| committer | Craig Topper <craig.topper@intel.com> | 2019-11-11 15:56:47 -0800 |
| commit | 9e5116f756f05b68e8394e392027dca7bc574559 (patch) | |
| tree | b76fe4fa719b1510693460fe7c869b8dabf9a712 | |
| parent | 774e829c29017d35e8af3b854f21c792caf30181 (diff) | |
| download | bcm5719-llvm-9e5116f756f05b68e8394e392027dca7bc574559.tar.gz bcm5719-llvm-9e5116f756f05b68e8394e392027dca7bc574559.zip | |
[X86] Add avx512 command lines to vector-constrained-fp-intrinsics.ll. NFC
| -rw-r--r-- | llvm/test/CodeGen/X86/vector-constrained-fp-intrinsics.ll | 345 |
1 files changed, 234 insertions, 111 deletions
diff --git a/llvm/test/CodeGen/X86/vector-constrained-fp-intrinsics.ll b/llvm/test/CodeGen/X86/vector-constrained-fp-intrinsics.ll index a742d4aa2e7..2e01b10b320 100644 --- a/llvm/test/CodeGen/X86/vector-constrained-fp-intrinsics.ll +++ b/llvm/test/CodeGen/X86/vector-constrained-fp-intrinsics.ll @@ -1,6 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -O3 -mtriple=x86_64-pc-linux < %s | FileCheck %s -; RUN: llc -O3 -mtriple=x86_64-pc-linux -mattr=+avx < %s | FileCheck --check-prefix=AVX %s +; RUN: llc -O3 -mtriple=x86_64-pc-linux -mattr=+avx < %s | FileCheck %s --check-prefix=AVX --check-prefix=AVX1 +; RUN: llc -O3 -mtriple=x86_64-pc-linux -mattr=+avx512f < %s | FileCheck %s --check-prefix=AVX --check-prefix=AVX512 --check-prefix=AVX512F +; RUN: llc -O3 -mtriple=x86_64-pc-linux -mattr=+avx512dq < %s | FileCheck %s --check-prefix=AVX --check-prefix=AVX512 --check-prefix=AVX512DQ define <1 x float> @constrained_vector_fdiv_v1f32() #0 { ; CHECK-LABEL: constrained_vector_fdiv_v1f32: @@ -119,11 +121,18 @@ define <4 x double> @constrained_vector_fdiv_v4f64() #0 { ; CHECK-NEXT: divpd %xmm2, %xmm1 ; CHECK-NEXT: retq ; -; AVX-LABEL: constrained_vector_fdiv_v4f64: -; AVX: # %bb.0: # %entry -; AVX-NEXT: vmovapd {{.*#+}} ymm0 = [1.0E+0,2.0E+0,3.0E+0,4.0E+0] -; AVX-NEXT: vdivpd {{.*}}(%rip), %ymm0, %ymm0 -; AVX-NEXT: retq +; AVX1-LABEL: constrained_vector_fdiv_v4f64: +; AVX1: # %bb.0: # %entry +; AVX1-NEXT: vmovapd {{.*#+}} ymm0 = [1.0E+0,2.0E+0,3.0E+0,4.0E+0] +; AVX1-NEXT: vdivpd {{.*}}(%rip), %ymm0, %ymm0 +; AVX1-NEXT: retq +; +; AVX512-LABEL: constrained_vector_fdiv_v4f64: +; AVX512: # %bb.0: # %entry +; AVX512-NEXT: vbroadcastsd {{.*#+}} ymm0 = [1.0E+1,1.0E+1,1.0E+1,1.0E+1] +; AVX512-NEXT: vmovapd {{.*#+}} ymm1 = [1.0E+0,2.0E+0,3.0E+0,4.0E+0] +; AVX512-NEXT: vdivpd %ymm0, %ymm1, %ymm0 +; AVX512-NEXT: retq entry: %div = call <4 x double> @llvm.experimental.constrained.fdiv.v4f64( <4 x double> <double 1.000000e+00, double 2.000000e+00, @@ -504,11 +513,17 @@ define <4 x double> @constrained_vector_fmul_v4f64() #0 { ; CHECK-NEXT: mulpd {{.*}}(%rip), %xmm1 ; CHECK-NEXT: retq ; -; AVX-LABEL: constrained_vector_fmul_v4f64: -; AVX: # %bb.0: # %entry -; AVX-NEXT: vmovapd {{.*#+}} ymm0 = [1.7976931348623157E+308,1.7976931348623157E+308,1.7976931348623157E+308,1.7976931348623157E+308] -; AVX-NEXT: vmulpd {{.*}}(%rip), %ymm0, %ymm0 -; AVX-NEXT: retq +; AVX1-LABEL: constrained_vector_fmul_v4f64: +; AVX1: # %bb.0: # %entry +; AVX1-NEXT: vmovapd {{.*#+}} ymm0 = [1.7976931348623157E+308,1.7976931348623157E+308,1.7976931348623157E+308,1.7976931348623157E+308] +; AVX1-NEXT: vmulpd {{.*}}(%rip), %ymm0, %ymm0 +; AVX1-NEXT: retq +; +; AVX512-LABEL: constrained_vector_fmul_v4f64: +; AVX512: # %bb.0: # %entry +; AVX512-NEXT: vbroadcastsd {{.*#+}} ymm0 = [1.7976931348623157E+308,1.7976931348623157E+308,1.7976931348623157E+308,1.7976931348623157E+308] +; AVX512-NEXT: vmulpd {{.*}}(%rip), %ymm0, %ymm0 +; AVX512-NEXT: retq entry: %mul = call <4 x double> @llvm.experimental.constrained.fmul.v4f64( <4 x double> <double 0x7FEFFFFFFFFFFFFF, double 0x7FEFFFFFFFFFFFFF, @@ -4024,17 +4039,29 @@ define <3 x i64> @constrained_vector_fptosi_v3i64_v3f32() #0 { ; CHECK-NEXT: cvttss2si {{.*}}(%rip), %rcx ; CHECK-NEXT: retq ; -; AVX-LABEL: constrained_vector_fptosi_v3i64_v3f32: -; AVX: # %bb.0: # %entry -; AVX-NEXT: vcvttss2si {{.*}}(%rip), %rax -; AVX-NEXT: vmovq %rax, %xmm0 -; AVX-NEXT: vcvttss2si {{.*}}(%rip), %rax -; AVX-NEXT: vmovq %rax, %xmm1 -; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] -; AVX-NEXT: vcvttss2si {{.*}}(%rip), %rax -; AVX-NEXT: vmovq %rax, %xmm1 -; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 -; AVX-NEXT: retq +; AVX1-LABEL: constrained_vector_fptosi_v3i64_v3f32: +; AVX1: # %bb.0: # %entry +; AVX1-NEXT: vcvttss2si {{.*}}(%rip), %rax +; AVX1-NEXT: vmovq %rax, %xmm0 +; AVX1-NEXT: vcvttss2si {{.*}}(%rip), %rax +; AVX1-NEXT: vmovq %rax, %xmm1 +; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] +; AVX1-NEXT: vcvttss2si {{.*}}(%rip), %rax +; AVX1-NEXT: vmovq %rax, %xmm1 +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX1-NEXT: retq +; +; AVX512-LABEL: constrained_vector_fptosi_v3i64_v3f32: +; AVX512: # %bb.0: # %entry +; AVX512-NEXT: vcvttss2si {{.*}}(%rip), %rax +; AVX512-NEXT: vmovq %rax, %xmm0 +; AVX512-NEXT: vcvttss2si {{.*}}(%rip), %rax +; AVX512-NEXT: vmovq %rax, %xmm1 +; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] +; AVX512-NEXT: vcvttss2si {{.*}}(%rip), %rax +; AVX512-NEXT: vmovq %rax, %xmm1 +; AVX512-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 +; AVX512-NEXT: retq entry: %result = call <3 x i64> @llvm.experimental.constrained.fptosi.v3i64.v3f32( <3 x float><float 42.0, float 43.0, @@ -4058,20 +4085,35 @@ define <4 x i64> @constrained_vector_fptosi_v4i64_v4f32() #0 { ; CHECK-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] ; CHECK-NEXT: retq ; -; AVX-LABEL: constrained_vector_fptosi_v4i64_v4f32: -; AVX: # %bb.0: # %entry -; AVX-NEXT: vcvttss2si {{.*}}(%rip), %rax -; AVX-NEXT: vmovq %rax, %xmm0 -; AVX-NEXT: vcvttss2si {{.*}}(%rip), %rax -; AVX-NEXT: vmovq %rax, %xmm1 -; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] -; AVX-NEXT: vcvttss2si {{.*}}(%rip), %rax -; AVX-NEXT: vmovq %rax, %xmm1 -; AVX-NEXT: vcvttss2si {{.*}}(%rip), %rax -; AVX-NEXT: vmovq %rax, %xmm2 -; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0] -; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 -; AVX-NEXT: retq +; AVX1-LABEL: constrained_vector_fptosi_v4i64_v4f32: +; AVX1: # %bb.0: # %entry +; AVX1-NEXT: vcvttss2si {{.*}}(%rip), %rax +; AVX1-NEXT: vmovq %rax, %xmm0 +; AVX1-NEXT: vcvttss2si {{.*}}(%rip), %rax +; AVX1-NEXT: vmovq %rax, %xmm1 +; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] +; AVX1-NEXT: vcvttss2si {{.*}}(%rip), %rax +; AVX1-NEXT: vmovq %rax, %xmm1 +; AVX1-NEXT: vcvttss2si {{.*}}(%rip), %rax +; AVX1-NEXT: vmovq %rax, %xmm2 +; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0] +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 +; AVX1-NEXT: retq +; +; AVX512-LABEL: constrained_vector_fptosi_v4i64_v4f32: +; AVX512: # %bb.0: # %entry +; AVX512-NEXT: vcvttss2si {{.*}}(%rip), %rax +; AVX512-NEXT: vmovq %rax, %xmm0 +; AVX512-NEXT: vcvttss2si {{.*}}(%rip), %rax +; AVX512-NEXT: vmovq %rax, %xmm1 +; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] +; AVX512-NEXT: vcvttss2si {{.*}}(%rip), %rax +; AVX512-NEXT: vmovq %rax, %xmm1 +; AVX512-NEXT: vcvttss2si {{.*}}(%rip), %rax +; AVX512-NEXT: vmovq %rax, %xmm2 +; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0] +; AVX512-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 +; AVX512-NEXT: retq entry: %result = call <4 x i64> @llvm.experimental.constrained.fptosi.v4i64.v4f32( <4 x float><float 42.0, float 43.0, @@ -4237,17 +4279,29 @@ define <3 x i64> @constrained_vector_fptosi_v3i64_v3f64() #0 { ; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %rcx ; CHECK-NEXT: retq ; -; AVX-LABEL: constrained_vector_fptosi_v3i64_v3f64: -; AVX: # %bb.0: # %entry -; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %rax -; AVX-NEXT: vmovq %rax, %xmm0 -; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %rax -; AVX-NEXT: vmovq %rax, %xmm1 -; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] -; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %rax -; AVX-NEXT: vmovq %rax, %xmm1 -; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 -; AVX-NEXT: retq +; AVX1-LABEL: constrained_vector_fptosi_v3i64_v3f64: +; AVX1: # %bb.0: # %entry +; AVX1-NEXT: vcvttsd2si {{.*}}(%rip), %rax +; AVX1-NEXT: vmovq %rax, %xmm0 +; AVX1-NEXT: vcvttsd2si {{.*}}(%rip), %rax +; AVX1-NEXT: vmovq %rax, %xmm1 +; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] +; AVX1-NEXT: vcvttsd2si {{.*}}(%rip), %rax +; AVX1-NEXT: vmovq %rax, %xmm1 +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX1-NEXT: retq +; +; AVX512-LABEL: constrained_vector_fptosi_v3i64_v3f64: +; AVX512: # %bb.0: # %entry +; AVX512-NEXT: vcvttsd2si {{.*}}(%rip), %rax +; AVX512-NEXT: vmovq %rax, %xmm0 +; AVX512-NEXT: vcvttsd2si {{.*}}(%rip), %rax +; AVX512-NEXT: vmovq %rax, %xmm1 +; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] +; AVX512-NEXT: vcvttsd2si {{.*}}(%rip), %rax +; AVX512-NEXT: vmovq %rax, %xmm1 +; AVX512-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 +; AVX512-NEXT: retq entry: %result = call <3 x i64> @llvm.experimental.constrained.fptosi.v3i64.v3f64( <3 x double><double 42.1, double 42.2, @@ -4271,20 +4325,35 @@ define <4 x i64> @constrained_vector_fptosi_v4i64_v4f64() #0 { ; CHECK-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] ; CHECK-NEXT: retq ; -; AVX-LABEL: constrained_vector_fptosi_v4i64_v4f64: -; AVX: # %bb.0: # %entry -; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %rax -; AVX-NEXT: vmovq %rax, %xmm0 -; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %rax -; AVX-NEXT: vmovq %rax, %xmm1 -; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] -; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %rax -; AVX-NEXT: vmovq %rax, %xmm1 -; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %rax -; AVX-NEXT: vmovq %rax, %xmm2 -; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0] -; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 -; AVX-NEXT: retq +; AVX1-LABEL: constrained_vector_fptosi_v4i64_v4f64: +; AVX1: # %bb.0: # %entry +; AVX1-NEXT: vcvttsd2si {{.*}}(%rip), %rax +; AVX1-NEXT: vmovq %rax, %xmm0 +; AVX1-NEXT: vcvttsd2si {{.*}}(%rip), %rax +; AVX1-NEXT: vmovq %rax, %xmm1 +; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] +; AVX1-NEXT: vcvttsd2si {{.*}}(%rip), %rax +; AVX1-NEXT: vmovq %rax, %xmm1 +; AVX1-NEXT: vcvttsd2si {{.*}}(%rip), %rax +; AVX1-NEXT: vmovq %rax, %xmm2 +; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0] +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 +; AVX1-NEXT: retq +; +; AVX512-LABEL: constrained_vector_fptosi_v4i64_v4f64: +; AVX512: # %bb.0: # %entry +; AVX512-NEXT: vcvttsd2si {{.*}}(%rip), %rax +; AVX512-NEXT: vmovq %rax, %xmm0 +; AVX512-NEXT: vcvttsd2si {{.*}}(%rip), %rax +; AVX512-NEXT: vmovq %rax, %xmm1 +; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] +; AVX512-NEXT: vcvttsd2si {{.*}}(%rip), %rax +; AVX512-NEXT: vmovq %rax, %xmm1 +; AVX512-NEXT: vcvttsd2si {{.*}}(%rip), %rax +; AVX512-NEXT: vmovq %rax, %xmm2 +; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0] +; AVX512-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 +; AVX512-NEXT: retq entry: %result = call <4 x i64> @llvm.experimental.constrained.fptosi.v4i64.v4f64( <4 x double><double 42.1, double 42.2, @@ -4449,17 +4518,29 @@ define <3 x i64> @constrained_vector_fptoui_v3i64_v3f32() #0 { ; CHECK-NEXT: cvttss2si {{.*}}(%rip), %rcx ; CHECK-NEXT: retq ; -; AVX-LABEL: constrained_vector_fptoui_v3i64_v3f32: -; AVX: # %bb.0: # %entry -; AVX-NEXT: vcvttss2si {{.*}}(%rip), %rax -; AVX-NEXT: vmovq %rax, %xmm0 -; AVX-NEXT: vcvttss2si {{.*}}(%rip), %rax -; AVX-NEXT: vmovq %rax, %xmm1 -; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] -; AVX-NEXT: vcvttss2si {{.*}}(%rip), %rax -; AVX-NEXT: vmovq %rax, %xmm1 -; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 -; AVX-NEXT: retq +; AVX1-LABEL: constrained_vector_fptoui_v3i64_v3f32: +; AVX1: # %bb.0: # %entry +; AVX1-NEXT: vcvttss2si {{.*}}(%rip), %rax +; AVX1-NEXT: vmovq %rax, %xmm0 +; AVX1-NEXT: vcvttss2si {{.*}}(%rip), %rax +; AVX1-NEXT: vmovq %rax, %xmm1 +; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] +; AVX1-NEXT: vcvttss2si {{.*}}(%rip), %rax +; AVX1-NEXT: vmovq %rax, %xmm1 +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX1-NEXT: retq +; +; AVX512-LABEL: constrained_vector_fptoui_v3i64_v3f32: +; AVX512: # %bb.0: # %entry +; AVX512-NEXT: vcvttss2si {{.*}}(%rip), %rax +; AVX512-NEXT: vmovq %rax, %xmm0 +; AVX512-NEXT: vcvttss2si {{.*}}(%rip), %rax +; AVX512-NEXT: vmovq %rax, %xmm1 +; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] +; AVX512-NEXT: vcvttss2si {{.*}}(%rip), %rax +; AVX512-NEXT: vmovq %rax, %xmm1 +; AVX512-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 +; AVX512-NEXT: retq entry: %result = call <3 x i64> @llvm.experimental.constrained.fptoui.v3i64.v3f32( <3 x float><float 42.0, float 43.0, @@ -4483,20 +4564,35 @@ define <4 x i64> @constrained_vector_fptoui_v4i64_v4f32() #0 { ; CHECK-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] ; CHECK-NEXT: retq ; -; AVX-LABEL: constrained_vector_fptoui_v4i64_v4f32: -; AVX: # %bb.0: # %entry -; AVX-NEXT: vcvttss2si {{.*}}(%rip), %rax -; AVX-NEXT: vmovq %rax, %xmm0 -; AVX-NEXT: vcvttss2si {{.*}}(%rip), %rax -; AVX-NEXT: vmovq %rax, %xmm1 -; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] -; AVX-NEXT: vcvttss2si {{.*}}(%rip), %rax -; AVX-NEXT: vmovq %rax, %xmm1 -; AVX-NEXT: vcvttss2si {{.*}}(%rip), %rax -; AVX-NEXT: vmovq %rax, %xmm2 -; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0] -; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 -; AVX-NEXT: retq +; AVX1-LABEL: constrained_vector_fptoui_v4i64_v4f32: +; AVX1: # %bb.0: # %entry +; AVX1-NEXT: vcvttss2si {{.*}}(%rip), %rax +; AVX1-NEXT: vmovq %rax, %xmm0 +; AVX1-NEXT: vcvttss2si {{.*}}(%rip), %rax +; AVX1-NEXT: vmovq %rax, %xmm1 +; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] +; AVX1-NEXT: vcvttss2si {{.*}}(%rip), %rax +; AVX1-NEXT: vmovq %rax, %xmm1 +; AVX1-NEXT: vcvttss2si {{.*}}(%rip), %rax +; AVX1-NEXT: vmovq %rax, %xmm2 +; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0] +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 +; AVX1-NEXT: retq +; +; AVX512-LABEL: constrained_vector_fptoui_v4i64_v4f32: +; AVX512: # %bb.0: # %entry +; AVX512-NEXT: vcvttss2si {{.*}}(%rip), %rax +; AVX512-NEXT: vmovq %rax, %xmm0 +; AVX512-NEXT: vcvttss2si {{.*}}(%rip), %rax +; AVX512-NEXT: vmovq %rax, %xmm1 +; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] +; AVX512-NEXT: vcvttss2si {{.*}}(%rip), %rax +; AVX512-NEXT: vmovq %rax, %xmm1 +; AVX512-NEXT: vcvttss2si {{.*}}(%rip), %rax +; AVX512-NEXT: vmovq %rax, %xmm2 +; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0] +; AVX512-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 +; AVX512-NEXT: retq entry: %result = call <4 x i64> @llvm.experimental.constrained.fptoui.v4i64.v4f32( <4 x float><float 42.0, float 43.0, @@ -4661,17 +4757,29 @@ define <3 x i64> @constrained_vector_fptoui_v3i64_v3f64() #0 { ; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %rcx ; CHECK-NEXT: retq ; -; AVX-LABEL: constrained_vector_fptoui_v3i64_v3f64: -; AVX: # %bb.0: # %entry -; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %rax -; AVX-NEXT: vmovq %rax, %xmm0 -; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %rax -; AVX-NEXT: vmovq %rax, %xmm1 -; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] -; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %rax -; AVX-NEXT: vmovq %rax, %xmm1 -; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 -; AVX-NEXT: retq +; AVX1-LABEL: constrained_vector_fptoui_v3i64_v3f64: +; AVX1: # %bb.0: # %entry +; AVX1-NEXT: vcvttsd2si {{.*}}(%rip), %rax +; AVX1-NEXT: vmovq %rax, %xmm0 +; AVX1-NEXT: vcvttsd2si {{.*}}(%rip), %rax +; AVX1-NEXT: vmovq %rax, %xmm1 +; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] +; AVX1-NEXT: vcvttsd2si {{.*}}(%rip), %rax +; AVX1-NEXT: vmovq %rax, %xmm1 +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX1-NEXT: retq +; +; AVX512-LABEL: constrained_vector_fptoui_v3i64_v3f64: +; AVX512: # %bb.0: # %entry +; AVX512-NEXT: vcvttsd2si {{.*}}(%rip), %rax +; AVX512-NEXT: vmovq %rax, %xmm0 +; AVX512-NEXT: vcvttsd2si {{.*}}(%rip), %rax +; AVX512-NEXT: vmovq %rax, %xmm1 +; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] +; AVX512-NEXT: vcvttsd2si {{.*}}(%rip), %rax +; AVX512-NEXT: vmovq %rax, %xmm1 +; AVX512-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 +; AVX512-NEXT: retq entry: %result = call <3 x i64> @llvm.experimental.constrained.fptoui.v3i64.v3f64( <3 x double><double 42.1, double 42.2, @@ -4695,20 +4803,35 @@ define <4 x i64> @constrained_vector_fptoui_v4i64_v4f64() #0 { ; CHECK-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] ; CHECK-NEXT: retq ; -; AVX-LABEL: constrained_vector_fptoui_v4i64_v4f64: -; AVX: # %bb.0: # %entry -; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %rax -; AVX-NEXT: vmovq %rax, %xmm0 -; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %rax -; AVX-NEXT: vmovq %rax, %xmm1 -; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] -; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %rax -; AVX-NEXT: vmovq %rax, %xmm1 -; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %rax -; AVX-NEXT: vmovq %rax, %xmm2 -; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0] -; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 -; AVX-NEXT: retq +; AVX1-LABEL: constrained_vector_fptoui_v4i64_v4f64: +; AVX1: # %bb.0: # %entry +; AVX1-NEXT: vcvttsd2si {{.*}}(%rip), %rax +; AVX1-NEXT: vmovq %rax, %xmm0 +; AVX1-NEXT: vcvttsd2si {{.*}}(%rip), %rax +; AVX1-NEXT: vmovq %rax, %xmm1 +; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] +; AVX1-NEXT: vcvttsd2si {{.*}}(%rip), %rax +; AVX1-NEXT: vmovq %rax, %xmm1 +; AVX1-NEXT: vcvttsd2si {{.*}}(%rip), %rax +; AVX1-NEXT: vmovq %rax, %xmm2 +; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0] +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 +; AVX1-NEXT: retq +; +; AVX512-LABEL: constrained_vector_fptoui_v4i64_v4f64: +; AVX512: # %bb.0: # %entry +; AVX512-NEXT: vcvttsd2si {{.*}}(%rip), %rax +; AVX512-NEXT: vmovq %rax, %xmm0 +; AVX512-NEXT: vcvttsd2si {{.*}}(%rip), %rax +; AVX512-NEXT: vmovq %rax, %xmm1 +; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] +; AVX512-NEXT: vcvttsd2si {{.*}}(%rip), %rax +; AVX512-NEXT: vmovq %rax, %xmm1 +; AVX512-NEXT: vcvttsd2si {{.*}}(%rip), %rax +; AVX512-NEXT: vmovq %rax, %xmm2 +; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0] +; AVX512-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 +; AVX512-NEXT: retq entry: %result = call <4 x i64> @llvm.experimental.constrained.fptoui.v4i64.v4f64( <4 x double><double 42.1, double 42.2, |

