summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/X86/vector-constrained-fp-intrinsics.ll
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test/CodeGen/X86/vector-constrained-fp-intrinsics.ll')
-rw-r--r--llvm/test/CodeGen/X86/vector-constrained-fp-intrinsics.ll109
1 files changed, 73 insertions, 36 deletions
diff --git a/llvm/test/CodeGen/X86/vector-constrained-fp-intrinsics.ll b/llvm/test/CodeGen/X86/vector-constrained-fp-intrinsics.ll
index ec564d13558..c0d7252ddb7 100644
--- a/llvm/test/CodeGen/X86/vector-constrained-fp-intrinsics.ll
+++ b/llvm/test/CodeGen/X86/vector-constrained-fp-intrinsics.ll
@@ -544,14 +544,19 @@ entry:
define <2 x double> @constrained_vector_fadd_v2f64() {
; CHECK-LABEL: constrained_vector_fadd_v2f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: movapd {{.*#+}} xmm0 = [1.7976931348623157E+308,1.7976931348623157E+308]
-; CHECK-NEXT: addpd {{.*}}(%rip), %xmm0
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT: addsd %xmm0, %xmm1
+; CHECK-NEXT: addsd {{.*}}(%rip), %xmm0
+; CHECK-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_fadd_v2f64:
; AVX: # %bb.0: # %entry
-; AVX-NEXT: vmovapd {{.*#+}} xmm0 = [1.7976931348623157E+308,1.7976931348623157E+308]
-; AVX-NEXT: vaddpd {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vaddsd {{.*}}(%rip), %xmm0, %xmm1
+; AVX-NEXT: vaddsd {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; AVX-NEXT: retq
entry:
%add = call <2 x double> @llvm.experimental.constrained.fadd.v2f64(
@@ -598,22 +603,24 @@ entry:
define <3 x double> @constrained_vector_fadd_v3f64() {
; CHECK-LABEL: constrained_vector_fadd_v3f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: movapd {{.*#+}} xmm0 = [1.7976931348623157E+308,1.7976931348623157E+308]
-; CHECK-NEXT: addpd {{.*}}(%rip), %xmm0
-; CHECK-NEXT: xorpd %xmm1, %xmm1
+; CHECK-NEXT: xorpd %xmm2, %xmm2
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT: addsd %xmm1, %xmm2
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: addsd %xmm1, %xmm0
; CHECK-NEXT: addsd {{.*}}(%rip), %xmm1
-; CHECK-NEXT: movsd %xmm1, -{{[0-9]+}}(%rsp)
-; CHECK-NEXT: movapd %xmm0, %xmm1
-; CHECK-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
+; CHECK-NEXT: movsd %xmm2, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: fldl -{{[0-9]+}}(%rsp)
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_fadd_v3f64:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vxorpd %xmm0, %xmm0, %xmm0
-; AVX-NEXT: vaddsd {{.*}}(%rip), %xmm0, %xmm0
-; AVX-NEXT: vmovapd {{.*#+}} xmm1 = [1.7976931348623157E+308,1.7976931348623157E+308]
-; AVX-NEXT: vaddpd {{.*}}(%rip), %xmm1, %xmm1
+; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT: vaddsd %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vaddsd {{.*}}(%rip), %xmm1, %xmm2
+; AVX-NEXT: vaddsd {{.*}}(%rip), %xmm1, %xmm1
+; AVX-NEXT: vunpcklpd {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX-NEXT: retq
entry:
@@ -629,16 +636,28 @@ entry:
define <4 x double> @constrained_vector_fadd_v4f64() {
; CHECK-LABEL: constrained_vector_fadd_v4f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: movapd {{.*#+}} xmm1 = [1.7976931348623157E+308,1.7976931348623157E+308]
-; CHECK-NEXT: movapd {{.*#+}} xmm0 = [1.0E+0,1.0000000000000001E-1]
-; CHECK-NEXT: addpd %xmm1, %xmm0
-; CHECK-NEXT: addpd {{.*}}(%rip), %xmm1
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
+; CHECK-NEXT: addsd %xmm1, %xmm2
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: addsd %xmm1, %xmm0
+; CHECK-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; CHECK-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
+; CHECK-NEXT: addsd %xmm1, %xmm2
+; CHECK-NEXT: addsd {{.*}}(%rip), %xmm1
+; CHECK-NEXT: unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_fadd_v4f64:
; AVX: # %bb.0: # %entry
-; AVX-NEXT: vmovapd {{.*#+}} ymm0 = [1.7976931348623157E+308,1.7976931348623157E+308,1.7976931348623157E+308,1.7976931348623157E+308]
-; AVX-NEXT: vaddpd {{.*}}(%rip), %ymm0, %ymm0
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vaddsd {{.*}}(%rip), %xmm0, %xmm1
+; AVX-NEXT: vaddsd {{.*}}(%rip), %xmm0, %xmm2
+; AVX-NEXT: vunpcklpd {{.*#+}} xmm1 = xmm2[0],xmm1[0]
+; AVX-NEXT: vaddsd {{.*}}(%rip), %xmm0, %xmm2
+; AVX-NEXT: vaddsd {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX-NEXT: retq
entry:
%add = call <4 x double> @llvm.experimental.constrained.fadd.v4f64(
@@ -675,14 +694,19 @@ entry:
define <2 x double> @constrained_vector_fsub_v2f64() {
; CHECK-LABEL: constrained_vector_fsub_v2f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: movapd {{.*#+}} xmm0 = [-1.7976931348623157E+308,-1.7976931348623157E+308]
-; CHECK-NEXT: subpd {{.*}}(%rip), %xmm0
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movapd %xmm0, %xmm1
+; CHECK-NEXT: subsd {{.*}}(%rip), %xmm1
+; CHECK-NEXT: subsd {{.*}}(%rip), %xmm0
+; CHECK-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_fsub_v2f64:
; AVX: # %bb.0: # %entry
-; AVX-NEXT: vmovapd {{.*#+}} xmm0 = [-1.7976931348623157E+308,-1.7976931348623157E+308]
-; AVX-NEXT: vsubpd {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vsubsd {{.*}}(%rip), %xmm0, %xmm1
+; AVX-NEXT: vsubsd {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; AVX-NEXT: retq
entry:
%sub = call <2 x double> @llvm.experimental.constrained.fsub.v2f64(
@@ -732,12 +756,12 @@ define <3 x double> @constrained_vector_fsub_v3f64() {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xorpd %xmm0, %xmm0
; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
-; CHECK-NEXT: subsd %xmm0, %xmm1
-; CHECK-NEXT: movapd {{.*#+}} xmm0 = [-1.7976931348623157E+308,-1.7976931348623157E+308]
-; CHECK-NEXT: subpd {{.*}}(%rip), %xmm0
-; CHECK-NEXT: movsd %xmm1, -{{[0-9]+}}(%rsp)
-; CHECK-NEXT: movapd %xmm0, %xmm1
-; CHECK-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
+; CHECK-NEXT: movapd %xmm1, %xmm2
+; CHECK-NEXT: subsd %xmm0, %xmm2
+; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: subsd {{.*}}(%rip), %xmm0
+; CHECK-NEXT: subsd {{.*}}(%rip), %xmm1
+; CHECK-NEXT: movsd %xmm2, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: fldl -{{[0-9]+}}(%rsp)
; CHECK-NEXT: retq
;
@@ -746,8 +770,9 @@ define <3 x double> @constrained_vector_fsub_v3f64() {
; AVX-NEXT: vxorpd %xmm0, %xmm0, %xmm0
; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; AVX-NEXT: vsubsd %xmm0, %xmm1, %xmm0
-; AVX-NEXT: vmovapd {{.*#+}} xmm1 = [-1.7976931348623157E+308,-1.7976931348623157E+308]
-; AVX-NEXT: vsubpd {{.*}}(%rip), %xmm1, %xmm1
+; AVX-NEXT: vsubsd {{.*}}(%rip), %xmm1, %xmm2
+; AVX-NEXT: vsubsd {{.*}}(%rip), %xmm1, %xmm1
+; AVX-NEXT: vunpcklpd {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX-NEXT: retq
entry:
@@ -763,16 +788,28 @@ entry:
define <4 x double> @constrained_vector_fsub_v4f64() {
; CHECK-LABEL: constrained_vector_fsub_v4f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: movapd {{.*#+}} xmm1 = [-1.7976931348623157E+308,-1.7976931348623157E+308]
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT: movapd %xmm1, %xmm2
+; CHECK-NEXT: subsd {{.*}}(%rip), %xmm2
; CHECK-NEXT: movapd %xmm1, %xmm0
-; CHECK-NEXT: subpd {{.*}}(%rip), %xmm0
-; CHECK-NEXT: subpd {{.*}}(%rip), %xmm1
+; CHECK-NEXT: subsd {{.*}}(%rip), %xmm0
+; CHECK-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; CHECK-NEXT: movapd %xmm1, %xmm2
+; CHECK-NEXT: subsd {{.*}}(%rip), %xmm2
+; CHECK-NEXT: subsd {{.*}}(%rip), %xmm1
+; CHECK-NEXT: unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_fsub_v4f64:
; AVX: # %bb.0: # %entry
-; AVX-NEXT: vmovapd {{.*#+}} ymm0 = [-1.7976931348623157E+308,-1.7976931348623157E+308,-1.7976931348623157E+308,-1.7976931348623157E+308]
-; AVX-NEXT: vsubpd {{.*}}(%rip), %ymm0, %ymm0
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vsubsd {{.*}}(%rip), %xmm0, %xmm1
+; AVX-NEXT: vsubsd {{.*}}(%rip), %xmm0, %xmm2
+; AVX-NEXT: vunpcklpd {{.*#+}} xmm1 = xmm2[0],xmm1[0]
+; AVX-NEXT: vsubsd {{.*}}(%rip), %xmm0, %xmm2
+; AVX-NEXT: vsubsd {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX-NEXT: retq
entry:
%sub = call <4 x double> @llvm.experimental.constrained.fsub.v4f64(
OpenPOWER on IntegriCloud