summaryrefslogtreecommitdiffstats
path: root/llvm/test
diff options
context:
space:
mode:
authorCraig Topper <craig.topper@gmail.com>2016-08-01 04:29:16 +0000
committerCraig Topper <craig.topper@gmail.com>2016-08-01 04:29:16 +0000
commit9161e4ec220478ef03d78e5e1979cf5f0c8da0a6 (patch)
treed523965137fed5659576d92bb609f7d5058b3a6b /llvm/test
parent394617631474d0dea5b6ada812e68fd37a3e138c (diff)
downloadbcm5719-llvm-9161e4ec220478ef03d78e5e1979cf5f0c8da0a6.tar.gz
bcm5719-llvm-9161e4ec220478ef03d78e5e1979cf5f0c8da0a6.zip
[AVX512] Replace scalar fp arithmetic intrinsics with native IR in an AVX512 test. The intrinsics aren't lowered to AVX512 instructions.
The intrinsics really should be removed and autoupgraded. llvm-svn: 277320
Diffstat (limited to 'llvm/test')
-rw-r--r--llvm/test/CodeGen/X86/stack-folding-fp-avx512vl.ll64
1 files changed, 40 insertions, 24 deletions
diff --git a/llvm/test/CodeGen/X86/stack-folding-fp-avx512vl.ll b/llvm/test/CodeGen/X86/stack-folding-fp-avx512vl.ll
index 5dec0b8fa11..e9aefc62d8b 100644
--- a/llvm/test/CodeGen/X86/stack-folding-fp-avx512vl.ll
+++ b/llvm/test/CodeGen/X86/stack-folding-fp-avx512vl.ll
@@ -52,10 +52,12 @@ define <2 x double> @stack_fold_addsd_int(<2 x double> %a0, <2 x double> %a1) {
;CHECK-LABEL: stack_fold_addsd_int
;CHECK: vaddsd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
- %2 = call <2 x double> @llvm.x86.sse2.add.sd(<2 x double> %a0, <2 x double> %a1)
- ret <2 x double> %2
+ %2 = extractelement <2 x double> %a0, i32 0
+ %3 = extractelement <2 x double> %a1, i32 0
+ %4 = fadd double %2, %3
+ %5 = insertelement <2 x double> %a0, double %4, i32 0
+ ret <2 x double> %5
}
-declare <2 x double> @llvm.x86.sse2.add.sd(<2 x double>, <2 x double>) nounwind readnone
define float @stack_fold_addss(float %a0, float %a1) {
;CHECK-LABEL: stack_fold_addss
@@ -69,10 +71,12 @@ define <4 x float> @stack_fold_addss_int(<4 x float> %a0, <4 x float> %a1) {
;CHECK-LABEL: stack_fold_addss_int
;CHECK: vaddss {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
- %2 = call <4 x float> @llvm.x86.sse.add.ss(<4 x float> %a0, <4 x float> %a1)
- ret <4 x float> %2
+ %2 = extractelement <4 x float> %a0, i32 0
+ %3 = extractelement <4 x float> %a1, i32 0
+ %4 = fadd float %2, %3
+ %5 = insertelement <4 x float> %a0, float %4, i32 0
+ ret <4 x float> %5
}
-declare <4 x float> @llvm.x86.sse.add.ss(<4 x float>, <4 x float>) nounwind readnone
define <2 x double> @stack_fold_andpd(<2 x double> %a0, <2 x double> %a1) {
;CHECK-LABEL: stack_fold_andpd
@@ -130,10 +134,12 @@ define <2 x double> @stack_fold_divsd_int(<2 x double> %a0, <2 x double> %a1) {
;CHECK-LABEL: stack_fold_divsd_int
;CHECK: vdivsd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
- %2 = call <2 x double> @llvm.x86.sse2.div.sd(<2 x double> %a0, <2 x double> %a1)
- ret <2 x double> %2
+ %2 = extractelement <2 x double> %a0, i32 0
+ %3 = extractelement <2 x double> %a1, i32 0
+ %4 = fdiv double %2, %3
+ %5 = insertelement <2 x double> %a0, double %4, i32 0
+ ret <2 x double> %5
}
-declare <2 x double> @llvm.x86.sse2.div.sd(<2 x double>, <2 x double>) nounwind readnone
define float @stack_fold_divss(float %a0, float %a1) {
;CHECK-LABEL: stack_fold_divss
@@ -147,10 +153,12 @@ define <4 x float> @stack_fold_divss_int(<4 x float> %a0, <4 x float> %a1) {
;CHECK-LABEL: stack_fold_divss_int
;CHECK: vdivss {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
- %2 = call <4 x float> @llvm.x86.sse.div.ss(<4 x float> %a0, <4 x float> %a1)
- ret <4 x float> %2
+ %2 = extractelement <4 x float> %a0, i32 0
+ %3 = extractelement <4 x float> %a1, i32 0
+ %4 = fdiv float %2, %3
+ %5 = insertelement <4 x float> %a0, float %4, i32 0
+ ret <4 x float> %5
}
-declare <4 x float> @llvm.x86.sse.div.ss(<4 x float>, <4 x float>) nounwind readnone
define <4 x float> @stack_fold_insertps(<4 x float> %a0, <4 x float> %a1) {
;CHECK-LABEL: stack_fold_insertps
@@ -174,10 +182,12 @@ define <2 x double> @stack_fold_mulsd_int(<2 x double> %a0, <2 x double> %a1) {
;CHECK-LABEL: stack_fold_mulsd_int
;CHECK: vmulsd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
- %2 = call <2 x double> @llvm.x86.sse2.mul.sd(<2 x double> %a0, <2 x double> %a1)
- ret <2 x double> %2
+ %2 = extractelement <2 x double> %a0, i32 0
+ %3 = extractelement <2 x double> %a1, i32 0
+ %4 = fmul double %2, %3
+ %5 = insertelement <2 x double> %a0, double %4, i32 0
+ ret <2 x double> %5
}
-declare <2 x double> @llvm.x86.sse2.mul.sd(<2 x double>, <2 x double>) nounwind readnone
define float @stack_fold_mulss(float %a0, float %a1) {
;CHECK-LABEL: stack_fold_mulss
@@ -191,10 +201,12 @@ define <4 x float> @stack_fold_mulss_int(<4 x float> %a0, <4 x float> %a1) {
;CHECK-LABEL: stack_fold_mulss_int
;CHECK: vmulss {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
- %2 = call <4 x float> @llvm.x86.sse.mul.ss(<4 x float> %a0, <4 x float> %a1)
- ret <4 x float> %2
+ %2 = extractelement <4 x float> %a0, i32 0
+ %3 = extractelement <4 x float> %a1, i32 0
+ %4 = fmul float %2, %3
+ %5 = insertelement <4 x float> %a0, float %4, i32 0
+ ret <4 x float> %5
}
-declare <4 x float> @llvm.x86.sse.mul.ss(<4 x float>, <4 x float>) nounwind readnone
define <2 x double> @stack_fold_orpd(<2 x double> %a0, <2 x double> %a1) {
;CHECK-LABEL: stack_fold_orpd
@@ -292,10 +304,12 @@ define <2 x double> @stack_fold_subsd_int(<2 x double> %a0, <2 x double> %a1) {
;CHECK-LABEL: stack_fold_subsd_int
;CHECK: vsubsd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
- %2 = call <2 x double> @llvm.x86.sse2.sub.sd(<2 x double> %a0, <2 x double> %a1)
- ret <2 x double> %2
+ %2 = extractelement <2 x double> %a0, i32 0
+ %3 = extractelement <2 x double> %a1, i32 0
+ %4 = fsub double %2, %3
+ %5 = insertelement <2 x double> %a0, double %4, i32 0
+ ret <2 x double> %5
}
-declare <2 x double> @llvm.x86.sse2.sub.sd(<2 x double>, <2 x double>) nounwind readnone
define float @stack_fold_subss(float %a0, float %a1) {
;CHECK-LABEL: stack_fold_subss
@@ -309,10 +323,12 @@ define <4 x float> @stack_fold_subss_int(<4 x float> %a0, <4 x float> %a1) {
;CHECK-LABEL: stack_fold_subss_int
;CHECK: vsubss {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
- %2 = call <4 x float> @llvm.x86.sse.sub.ss(<4 x float> %a0, <4 x float> %a1)
- ret <4 x float> %2
+ %2 = extractelement <4 x float> %a0, i32 0
+ %3 = extractelement <4 x float> %a1, i32 0
+ %4 = fsub float %2, %3
+ %5 = insertelement <4 x float> %a0, float %4, i32 0
+ ret <4 x float> %5
}
-declare <4 x float> @llvm.x86.sse.sub.ss(<4 x float>, <4 x float>) nounwind readnone
define <2 x double> @stack_fold_xorpd(<2 x double> %a0, <2 x double> %a1) {
;CHECK-LABEL: stack_fold_xorpd
OpenPOWER on IntegriCloud