summaryrefslogtreecommitdiffstats
path: root/clang/test
diff options
context:
space:
mode:
authorTomasz Krupa <tomasz.krupa@intel.com>2018-06-15 18:05:59 +0000
committerTomasz Krupa <tomasz.krupa@intel.com>2018-06-15 18:05:59 +0000
commitf1792bb3d6494c936a1d16483cee97688d473ac3 (patch)
treec42dc0f7f29c08e04a2c5c776de88307c874af1f /clang/test
parentbcaab53d479e7005ee69e06321bbb493f9b7f5e6 (diff)
downloadbcm5719-llvm-f1792bb3d6494c936a1d16483cee97688d473ac3.tar.gz
bcm5719-llvm-f1792bb3d6494c936a1d16483cee97688d473ac3.zip
[X86] Lowering sqrt intrinsics to native IR
Reviewers: craig.topper, spatel, RKSimon, igorb, uriel.k Reviewed By: craig.topper Subscribers: tkrupa, cfe-commits Differential Revision: https://reviews.llvm.org/D41168 llvm-svn: 334850
Diffstat (limited to 'clang/test')
-rw-r--r--clang/test/CodeGen/avx-builtins.c4
-rw-r--r--clang/test/CodeGen/avx512f-builtins.c124
-rw-r--r--clang/test/CodeGen/avx512vl-builtins.c16
-rw-r--r--clang/test/CodeGen/sse-builtins.c6
-rw-r--r--clang/test/CodeGen/sse2-builtins.c10
5 files changed, 120 insertions, 40 deletions
diff --git a/clang/test/CodeGen/avx-builtins.c b/clang/test/CodeGen/avx-builtins.c
index 5743e36c1de..bd72dcaae54 100644
--- a/clang/test/CodeGen/avx-builtins.c
+++ b/clang/test/CodeGen/avx-builtins.c
@@ -1116,13 +1116,13 @@ __m256 test_mm256_shuffle_ps(__m256 A, __m256 B) {
__m256d test_mm256_sqrt_pd(__m256d A) {
// CHECK-LABEL: test_mm256_sqrt_pd
- // CHECK: call <4 x double> @llvm.x86.avx.sqrt.pd.256(<4 x double> %{{.*}})
+ // CHECK: call <4 x double> @llvm.sqrt.v4f64(<4 x double> %{{.*}})
return _mm256_sqrt_pd(A);
}
__m256 test_mm256_sqrt_ps(__m256 A) {
// CHECK-LABEL: test_mm256_sqrt_ps
- // CHECK: call <8 x float> @llvm.x86.avx.sqrt.ps.256(<8 x float> %{{.*}})
+ // CHECK: call <8 x float> @llvm.sqrt.v8f32(<8 x float> %{{.*}})
return _mm256_sqrt_ps(A);
}
diff --git a/clang/test/CodeGen/avx512f-builtins.c b/clang/test/CodeGen/avx512f-builtins.c
index 2d5b95940fb..3942daa5c53 100644
--- a/clang/test/CodeGen/avx512f-builtins.c
+++ b/clang/test/CodeGen/avx512f-builtins.c
@@ -5,84 +5,100 @@
__m512d test_mm512_sqrt_pd(__m512d a)
{
// CHECK-LABEL: @test_mm512_sqrt_pd
- // CHECK: @llvm.x86.avx512.mask.sqrt.pd.512
+ // CHECK: call <8 x double> @llvm.sqrt.v8f64(<8 x double> %{{.*}})
return _mm512_sqrt_pd(a);
}
__m512d test_mm512_mask_sqrt_pd (__m512d __W, __mmask8 __U, __m512d __A)
{
// CHECK-LABEL: @test_mm512_mask_sqrt_pd
- // CHECK: @llvm.x86.avx512.mask.sqrt.pd.512
+ // CHECK: call <8 x double> @llvm.sqrt.v8f64(<8 x double> %{{.*}})
+ // CHECK: bitcast i8 %{{.*}} to <8 x i1>
+ // CHECK: select <8 x i1> %{{.*}}, <8 x double> %{{.*}}, <8 x double> %{{.*}}
return _mm512_mask_sqrt_pd (__W,__U,__A);
}
__m512d test_mm512_maskz_sqrt_pd (__mmask8 __U, __m512d __A)
{
// CHECK-LABEL: @test_mm512_maskz_sqrt_pd
- // CHECK: @llvm.x86.avx512.mask.sqrt.pd.512
+ // CHECK: call <8 x double> @llvm.sqrt.v8f64(<8 x double> %{{.*}})
+ // CHECK: bitcast i8 %{{.*}} to <8 x i1>
+ // CHECK: select <8 x i1> %{{.*}}, <8 x double> %{{.*}}, <8 x double> {{.*}}
return _mm512_maskz_sqrt_pd (__U,__A);
}
__m512d test_mm512_mask_sqrt_round_pd(__m512d __W,__mmask8 __U,__m512d __A)
{
// CHECK-LABEL: @test_mm512_mask_sqrt_round_pd
- // CHECK: @llvm.x86.avx512.mask.sqrt.pd.512
+ // CHECK: call <8 x double> @llvm.sqrt.v8f64(<8 x double> %{{.*}})
+ // CHECK: bitcast i8 %{{.*}} to <8 x i1>
+ // CHECK: select <8 x i1> %{{.*}}, <8 x double> %{{.*}}, <8 x double> %{{.*}}
return _mm512_mask_sqrt_round_pd(__W,__U,__A,_MM_FROUND_CUR_DIRECTION);
}
__m512d test_mm512_maskz_sqrt_round_pd(__mmask8 __U,__m512d __A)
{
// CHECK-LABEL: @test_mm512_maskz_sqrt_round_pd
- // CHECK: @llvm.x86.avx512.mask.sqrt.pd.512
+ // CHECK: call <8 x double> @llvm.sqrt.v8f64(<8 x double> %{{.*}})
+ // CHECK: bitcast i8 %{{.*}} to <8 x i1>
+ // CHECK: select <8 x i1> %{{.*}}, <8 x double> %{{.*}}, <8 x double> {{.*}}
return _mm512_maskz_sqrt_round_pd(__U,__A,_MM_FROUND_CUR_DIRECTION);
}
__m512d test_mm512_sqrt_round_pd(__m512d __A)
{
// CHECK-LABEL: @test_mm512_sqrt_round_pd
- // CHECK: @llvm.x86.avx512.mask.sqrt.pd.512
+ // CHECK: call <8 x double> @llvm.sqrt.v8f64(<8 x double> %{{.*}})
return _mm512_sqrt_round_pd(__A,_MM_FROUND_CUR_DIRECTION);
}
__m512 test_mm512_sqrt_ps(__m512 a)
{
// CHECK-LABEL: @test_mm512_sqrt_ps
- // CHECK: @llvm.x86.avx512.mask.sqrt.ps.512
+ // CHECK: call <16 x float> @llvm.sqrt.v16f32(<16 x float> %{{.*}})
return _mm512_sqrt_ps(a);
}
__m512 test_mm512_mask_sqrt_ps(__m512 __W, __mmask16 __U, __m512 __A)
{
// CHECK-LABEL: @test_mm512_mask_sqrt_ps
- // CHECK: @llvm.x86.avx512.mask.sqrt.ps.512
+ // CHECK: call <16 x float> @llvm.sqrt.v16f32(<16 x float> %{{.*}})
+ // CHECK: bitcast i16 %{{.*}} to <16 x i1>
+ // CHECK: select <16 x i1> %{{.*}}, <16 x float> %{{.*}}, <16 x float> %{{.*}}
return _mm512_mask_sqrt_ps( __W, __U, __A);
}
__m512 test_mm512_maskz_sqrt_ps( __mmask16 __U, __m512 __A)
{
// CHECK-LABEL: @test_mm512_maskz_sqrt_ps
- // CHECK: @llvm.x86.avx512.mask.sqrt.ps.512
+ // CHECK: call <16 x float> @llvm.sqrt.v16f32(<16 x float> %{{.*}})
+ // CHECK: bitcast i16 %{{.*}} to <16 x i1>
+ // CHECK: select <16 x i1> %{{.*}}, <16 x float> %{{.*}}, <16 x float> {{.*}}
return _mm512_maskz_sqrt_ps(__U ,__A);
}
__m512 test_mm512_mask_sqrt_round_ps(__m512 __W,__mmask16 __U,__m512 __A)
{
// CHECK-LABEL: @test_mm512_mask_sqrt_round_ps
- // CHECK: @llvm.x86.avx512.mask.sqrt.ps.512
+ // CHECK: call <16 x float> @llvm.sqrt.v16f32(<16 x float> %{{.*}})
+ // CHECK: bitcast i16 %{{.*}} to <16 x i1>
+ // CHECK: select <16 x i1> %{{.*}}, <16 x float> %{{.*}}, <16 x float> %{{.*}}
return _mm512_mask_sqrt_round_ps(__W,__U,__A,_MM_FROUND_CUR_DIRECTION);
}
__m512 test_mm512_maskz_sqrt_round_ps(__mmask16 __U,__m512 __A)
{
// CHECK-LABEL: @test_mm512_maskz_sqrt_round_ps
- // CHECK: @llvm.x86.avx512.mask.sqrt.ps.512
+ // CHECK: call <16 x float> @llvm.sqrt.v16f32(<16 x float> %{{.*}})
+ // CHECK: bitcast i16 %{{.*}} to <16 x i1>
+ // CHECK: select <16 x i1> %{{.*}}, <16 x float> %{{.*}}, <16 x float> {{.*}}
return _mm512_maskz_sqrt_round_ps(__U,__A,_MM_FROUND_CUR_DIRECTION);
}
__m512 test_mm512_sqrt_round_ps(__m512 __A)
{
// CHECK-LABEL: @test_mm512_sqrt_round_ps
- // CHECK: @llvm.x86.avx512.mask.sqrt.ps.512
+ // CHECK: call <16 x float> @llvm.sqrt.v16f32(<16 x float> %{{.*}})
return _mm512_sqrt_round_ps(__A,_MM_FROUND_CUR_DIRECTION);
}
@@ -5088,53 +5104,117 @@ __m512 test_mm512_maskz_shuffle_ps(__mmask16 __U, __m512 __M, __m512 __V) {
__m128d test_mm_sqrt_round_sd(__m128d __A, __m128d __B) {
// CHECK-LABEL: @test_mm_sqrt_round_sd
- // CHECK: @llvm.x86.avx512.mask.sqrt.sd
+ // CHECK: extractelement <2 x double> %{{.*}}, i64 0
+ // CHECK: extractelement <2 x double> %{{.*}}, i64 0
+ // CHECK: call double @llvm.sqrt.f64(double %{{.*}})
+ // CHECK: select i1 {{.*}}, double {{.*}}, double {{.*}}
+ // CHECK: insertelement <2 x double> %{{.*}}, double {{.*}}, i64 0
return _mm_sqrt_round_sd(__A, __B, 4);
}
__m128d test_mm_mask_sqrt_sd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B){
- // CHECK: @llvm.x86.avx512.mask.sqrt.sd
+ // CHECK-LABEL: @test_mm_mask_sqrt_sd
+ // CHECK: extractelement <2 x double> %{{.*}}, i64 0
+ // CHECK: extractelement <2 x double> %{{.*}}, i64 0
+ // CHECK: bitcast i8 %{{.*}} to <8 x i1>
+ // CHECK: extractelement <8 x i1> %{{.*}}, i64 0
+ // CHECK: call double @llvm.sqrt.f64(double %{{.*}})
+ // CHECK: select i1 {{.*}}, double {{.*}}, double {{.*}}
+ // CHECK: insertelement <2 x double> %{{.*}}, double {{.*}}, i64 0
return _mm_mask_sqrt_sd(__W,__U,__A,__B);
}
__m128d test_mm_mask_sqrt_round_sd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B){
- // CHECK: @llvm.x86.avx512.mask.sqrt.sd
+ // CHECK-LABEL: @test_mm_mask_sqrt_round_sd
+ // CHECK: extractelement <2 x double> %{{.*}}, i64 0
+ // CHECK: extractelement <2 x double> %{{.*}}, i64 0
+ // CHECK: bitcast i8 %{{.*}} to <8 x i1>
+ // CHECK: extractelement <8 x i1> %{{.*}}, i64 0
+ // CHECK: call double @llvm.sqrt.f64(double %{{.*}})
+ // CHECK: select i1 {{.*}}, double {{.*}}, double {{.*}}
+ // CHECK: insertelement <2 x double> %{{.*}}, double {{.*}}, i64 0
return _mm_mask_sqrt_round_sd(__W,__U,__A,__B,_MM_FROUND_CUR_DIRECTION);
}
__m128d test_mm_maskz_sqrt_sd(__mmask8 __U, __m128d __A, __m128d __B){
- // CHECK: @llvm.x86.avx512.mask.sqrt.sd
+ // CHECK-LABEL: @test_mm_maskz_sqrt_sd
+ // CHECK: extractelement <2 x double> %{{.*}}, i64 0
+ // CHECK: extractelement <2 x double> %{{.*}}, i64 0
+ // CHECK: bitcast i8 %{{.*}} to <8 x i1>
+ // CHECK: extractelement <8 x i1> %{{.*}}, i64 0
+ // CHECK: call double @llvm.sqrt.f64(double %{{.*}})
+ // CHECK: select i1 {{.*}}, double {{.*}}, double {{.*}}
+ // CHECK: insertelement <2 x double> %{{.*}}, double {{.*}}, i64 0
return _mm_maskz_sqrt_sd(__U,__A,__B);
}
__m128d test_mm_maskz_sqrt_round_sd(__mmask8 __U, __m128d __A, __m128d __B){
- // CHECK: @llvm.x86.avx512.mask.sqrt.sd
+ // CHECK-LABEL: @test_mm_maskz_sqrt_round_sd
+ // CHECK: extractelement <2 x double> %{{.*}}, i64 0
+ // CHECK: extractelement <2 x double> %{{.*}}, i64 0
+ // CHECK: bitcast i8 %{{.*}} to <8 x i1>
+ // CHECK: extractelement <8 x i1> %{{.*}}, i64 0
+ // CHECK: call double @llvm.sqrt.f64(double %{{.*}})
+ // CHECK: select i1 {{.*}}, double {{.*}}, double {{.*}}
+ // CHECK: insertelement <2 x double> %{{.*}}, double {{.*}}, i64 0
return _mm_maskz_sqrt_round_sd(__U,__A,__B,_MM_FROUND_CUR_DIRECTION);
}
__m128 test_mm_sqrt_round_ss(__m128 __A, __m128 __B) {
// CHECK-LABEL: @test_mm_sqrt_round_ss
- // CHECK: @llvm.x86.avx512.mask.sqrt.ss
+ // CHECK: extractelement <4 x float> %{{.*}}, i64 0
+ // CHECK: extractelement <4 x float> %{{.*}}, i64 0
+ // CHECK: call float @llvm.sqrt.f32(float %{{.*}})
+ // CHECK: select i1 {{.*}}, float {{.*}}, float {{.*}}
+ // CHECK: insertelement <4 x float> %{{.*}}, float {{.*}}, i64 0
return _mm_sqrt_round_ss(__A, __B, 4);
}
__m128 test_mm_mask_sqrt_ss(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B){
- // CHECK: @llvm.x86.avx512.mask.sqrt.ss
+ // CHECK-LABEL: @test_mm_mask_sqrt_ss
+ // CHECK: extractelement <4 x float> %{{.*}}, i64 0
+ // CHECK: extractelement <4 x float> %{{.*}}, i64 0
+ // CHECK: bitcast i8 %{{.*}} to <8 x i1>
+ // CHECK: extractelement <8 x i1> %{{.*}}, i64 0
+ // CHECK: call float @llvm.sqrt.f32(float %{{.*}})
+ // CHECK: select i1 {{.*}}, float {{.*}}, float {{.*}}
+ // CHECK: insertelement <4 x float> %{{.*}}, float {{.*}}, i64 0
return _mm_mask_sqrt_ss(__W,__U,__A,__B);
}
__m128 test_mm_mask_sqrt_round_ss(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B){
- // CHECK: @llvm.x86.avx512.mask.sqrt.ss
+ // CHECK-LABEL: @test_mm_mask_sqrt_round_ss
+ // CHECK: extractelement <4 x float> %{{.*}}, i64 0
+ // CHECK: extractelement <4 x float> %{{.*}}, i64 0
+ // CHECK: bitcast i8 %{{.*}} to <8 x i1>
+ // CHECK: extractelement <8 x i1> %{{.*}}, i64 0
+ // CHECK: call float @llvm.sqrt.f32(float %{{.*}})
+ // CHECK: select i1 {{.*}}, float {{.*}}, float {{.*}}
+ // CHECK: insertelement <4 x float> %{{.*}}, float {{.*}}, i64 0
return _mm_mask_sqrt_round_ss(__W,__U,__A,__B,_MM_FROUND_CUR_DIRECTION);
}
__m128 test_mm_maskz_sqrt_ss(__mmask8 __U, __m128 __A, __m128 __B){
- // CHECK: @llvm.x86.avx512.mask.sqrt.ss
+ // CHECK-LABEL: @test_mm_maskz_sqrt_ss
+ // CHECK: extractelement <4 x float> %{{.*}}, i64 0
+ // CHECK: extractelement <4 x float> %{{.*}}, i64 0
+ // CHECK: bitcast i8 %{{.*}} to <8 x i1>
+ // CHECK: extractelement <8 x i1> %{{.*}}, i64 0
+ // CHECK: call float @llvm.sqrt.f32(float %{{.*}})
+ // CHECK: select i1 {{.*}}, float {{.*}}, float {{.*}}
+ // CHECK: insertelement <4 x float> %{{.*}}, float {{.*}}, i64 0
return _mm_maskz_sqrt_ss(__U,__A,__B);
}
__m128 test_mm_maskz_sqrt_round_ss(__mmask8 __U, __m128 __A, __m128 __B){
- // CHECK: @llvm.x86.avx512.mask.sqrt.ss
+ // CHECK-LABEL: @test_mm_maskz_sqrt_round_ss
+ // CHECK: extractelement <4 x float> %{{.*}}, i64 0
+ // CHECK: extractelement <4 x float> %{{.*}}, i64 0
+ // CHECK: bitcast i8 %{{.*}} to <8 x i1>
+ // CHECK: extractelement <8 x i1> %{{.*}}, i64 0
+ // CHECK: call float @llvm.sqrt.f32(float %{{.*}})
+ // CHECK: select i1 {{.*}}, float {{.*}}, float {{.*}}
+ // CHECK: insertelement <4 x float> %{{.*}}, float {{.*}}, i64 0
return _mm_maskz_sqrt_round_ss(__U,__A,__B,_MM_FROUND_CUR_DIRECTION);
}
diff --git a/clang/test/CodeGen/avx512vl-builtins.c b/clang/test/CodeGen/avx512vl-builtins.c
index 3cf63b17bc4..295796089f3 100644
--- a/clang/test/CodeGen/avx512vl-builtins.c
+++ b/clang/test/CodeGen/avx512vl-builtins.c
@@ -3506,49 +3506,49 @@ void test_mm256_mask_i32scatter_epi32(int *__addr, __mmask8 __mask, __m256i __i
}
__m128d test_mm_mask_sqrt_pd(__m128d __W, __mmask8 __U, __m128d __A) {
// CHECK-LABEL: @test_mm_mask_sqrt_pd
- // CHECK: @llvm.x86.sse2.sqrt.pd
+ // CHECK: @llvm.sqrt.v2f64
// CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}
return _mm_mask_sqrt_pd(__W,__U,__A);
}
__m128d test_mm_maskz_sqrt_pd(__mmask8 __U, __m128d __A) {
// CHECK-LABEL: @test_mm_maskz_sqrt_pd
- // CHECK: @llvm.x86.sse2.sqrt.pd
+ // CHECK: @llvm.sqrt.v2f64
// CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}
return _mm_maskz_sqrt_pd(__U,__A);
}
__m256d test_mm256_mask_sqrt_pd(__m256d __W, __mmask8 __U, __m256d __A) {
// CHECK-LABEL: @test_mm256_mask_sqrt_pd
- // CHECK: @llvm.x86.avx.sqrt.pd.256
+ // CHECK: @llvm.sqrt.v4f64
// CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
return _mm256_mask_sqrt_pd(__W,__U,__A);
}
__m256d test_mm256_maskz_sqrt_pd(__mmask8 __U, __m256d __A) {
// CHECK-LABEL: @test_mm256_maskz_sqrt_pd
- // CHECK: @llvm.x86.avx.sqrt.pd.256
+ // CHECK: @llvm.sqrt.v4f64
// CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
return _mm256_maskz_sqrt_pd(__U,__A);
}
__m128 test_mm_mask_sqrt_ps(__m128 __W, __mmask8 __U, __m128 __A) {
// CHECK-LABEL: @test_mm_mask_sqrt_ps
- // CHECK: @llvm.x86.sse.sqrt.ps
+ // CHECK: @llvm.sqrt.v4f32
// CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
return _mm_mask_sqrt_ps(__W,__U,__A);
}
__m128 test_mm_maskz_sqrt_ps(__mmask8 __U, __m128 __A) {
// CHECK-LABEL: @test_mm_maskz_sqrt_ps
- // CHECK: @llvm.x86.sse.sqrt.ps
+ // CHECK: @llvm.sqrt.v4f32
// CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
return _mm_maskz_sqrt_ps(__U,__A);
}
__m256 test_mm256_mask_sqrt_ps(__m256 __W, __mmask8 __U, __m256 __A) {
// CHECK-LABEL: @test_mm256_mask_sqrt_ps
- // CHECK: @llvm.x86.avx.sqrt.ps.256
+ // CHECK: @llvm.sqrt.v8f32
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_mask_sqrt_ps(__W,__U,__A);
}
__m256 test_mm256_maskz_sqrt_ps(__mmask8 __U, __m256 __A) {
// CHECK-LABEL: @test_mm256_maskz_sqrt_ps
- // CHECK: @llvm.x86.avx.sqrt.ps.256
+ // CHECK: @llvm.sqrt.v8f32
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_maskz_sqrt_ps(__U,__A);
}
diff --git a/clang/test/CodeGen/sse-builtins.c b/clang/test/CodeGen/sse-builtins.c
index 18e51349fb5..b7c7a7fc7e4 100644
--- a/clang/test/CodeGen/sse-builtins.c
+++ b/clang/test/CodeGen/sse-builtins.c
@@ -639,13 +639,15 @@ __m128 test_mm_shuffle_ps(__m128 A, __m128 B) {
__m128 test_mm_sqrt_ps(__m128 x) {
// CHECK-LABEL: test_mm_sqrt_ps
- // CHECK: call <4 x float> @llvm.x86.sse.sqrt.ps(<4 x float> {{.*}})
+ // CHECK: call <4 x float> @llvm.sqrt.v4f32(<4 x float> {{.*}})
return _mm_sqrt_ps(x);
}
__m128 test_sqrt_ss(__m128 x) {
// CHECK: define {{.*}} @test_sqrt_ss
- // CHECK: call <4 x float> @llvm.x86.sse.sqrt.ss
+ // CHECK: extractelement <4 x float> {{.*}}, i64 0
+ // CHECK: call float @llvm.sqrt.f32(float {{.*}})
+ // CHECK: insertelement <4 x float> {{.*}}, float {{.*}}, i64 0
return _mm_sqrt_ss(x);
}
diff --git a/clang/test/CodeGen/sse2-builtins.c b/clang/test/CodeGen/sse2-builtins.c
index 0d79aabbafb..35554bc1876 100644
--- a/clang/test/CodeGen/sse2-builtins.c
+++ b/clang/test/CodeGen/sse2-builtins.c
@@ -1188,17 +1188,15 @@ __m128i test_mm_slli_si128_2(__m128i A) {
__m128d test_mm_sqrt_pd(__m128d A) {
// CHECK-LABEL: test_mm_sqrt_pd
- // CHECK: call <2 x double> @llvm.x86.sse2.sqrt.pd(<2 x double> %{{.*}})
+ // CHECK: call <2 x double> @llvm.sqrt.v2f64(<2 x double> %{{.*}})
return _mm_sqrt_pd(A);
}
__m128d test_mm_sqrt_sd(__m128d A, __m128d B) {
// CHECK-LABEL: test_mm_sqrt_sd
- // CHECK: call <2 x double> @llvm.x86.sse2.sqrt.sd(<2 x double> %{{.*}})
- // CHECK: extractelement <2 x double> %{{.*}}, i32 0
- // CHECK: insertelement <2 x double> undef, double %{{.*}}, i32 0
- // CHECK: extractelement <2 x double> %{{.*}}, i32 1
- // CHECK: insertelement <2 x double> %{{.*}}, double %{{.*}}, i32 1
+ // CHECK: extractelement <2 x double> %{{.*}}, i64 0
+ // CHECK: call double @llvm.sqrt.f64(double {{.*}})
+ // CHECK: insertelement <2 x double> %{{.*}}, double %{{.*}}, i64 0
return _mm_sqrt_sd(A, B);
}
OpenPOWER on IntegriCloud