summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/X86/vector-constrained-fp-intrinsics.ll
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test/CodeGen/X86/vector-constrained-fp-intrinsics.ll')
-rw-r--r--llvm/test/CodeGen/X86/vector-constrained-fp-intrinsics.ll219
1 files changed, 219 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/X86/vector-constrained-fp-intrinsics.ll b/llvm/test/CodeGen/X86/vector-constrained-fp-intrinsics.ll
index 4477d0daa5f..ec564d13558 100644
--- a/llvm/test/CodeGen/X86/vector-constrained-fp-intrinsics.ll
+++ b/llvm/test/CodeGen/X86/vector-constrained-fp-intrinsics.ll
@@ -3831,6 +3831,217 @@ entry:
ret <4 x double> %min
}
+define <1 x float> @constrained_vector_fptrunc_v1f64() {
+; CHECK-LABEL: constrained_vector_fptrunc_v1f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: cvtsd2ss %xmm0, %xmm0
+; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_fptrunc_v1f64:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vcvtsd2ss %xmm0, %xmm0, %xmm0
+; AVX-NEXT: retq
+entry:
+ %result = call <1 x float> @llvm.experimental.constrained.fptrunc.v1f32.v1f64(
+ <1 x double><double 42.1>,
+ metadata !"round.dynamic",
+ metadata !"fpexcept.strict")
+ ret <1 x float> %result
+}
+
+define <2 x float> @constrained_vector_fptrunc_v2f64() {
+; CHECK-LABEL: constrained_vector_fptrunc_v2f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: cvtsd2ss %xmm0, %xmm1
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: cvtsd2ss %xmm0, %xmm0
+; CHECK-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_fptrunc_v2f64:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vcvtsd2ss %xmm0, %xmm0, %xmm0
+; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT: vcvtsd2ss %xmm1, %xmm1, %xmm1
+; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
+; AVX-NEXT: retq
+entry:
+ %result = call <2 x float> @llvm.experimental.constrained.fptrunc.v2f32.v2f64(
+ <2 x double><double 42.1, double 42.2>,
+ metadata !"round.dynamic",
+ metadata !"fpexcept.strict")
+ ret <2 x float> %result
+}
+
+define <3 x float> @constrained_vector_fptrunc_v3f64() {
+; CHECK-LABEL: constrained_vector_fptrunc_v3f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: cvtsd2ss %xmm0, %xmm1
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: cvtsd2ss %xmm0, %xmm0
+; CHECK-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT: cvtsd2ss %xmm1, %xmm1
+; CHECK-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_fptrunc_v3f64:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vcvtsd2ss %xmm0, %xmm0, %xmm0
+; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT: vcvtsd2ss %xmm1, %xmm1, %xmm1
+; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
+; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT: vcvtsd2ss %xmm1, %xmm1, %xmm1
+; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0],xmm0[3]
+; AVX-NEXT: retq
+entry:
+ %result = call <3 x float> @llvm.experimental.constrained.fptrunc.v3f32.v3f64(
+ <3 x double><double 42.1, double 42.2,
+ double 42.3>,
+ metadata !"round.dynamic",
+ metadata !"fpexcept.strict")
+ ret <3 x float> %result
+}
+
+define <4 x float> @constrained_vector_fptrunc_v4f64() {
+; CHECK-LABEL: constrained_vector_fptrunc_v4f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: cvtsd2ss %xmm0, %xmm0
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT: cvtsd2ss %xmm1, %xmm1
+; CHECK-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: cvtsd2ss %xmm0, %xmm2
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: cvtsd2ss %xmm0, %xmm0
+; CHECK-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; CHECK-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_fptrunc_v4f64:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vcvtpd2psy {{.*}}(%rip), %xmm0
+; AVX-NEXT: retq
+entry:
+ %result = call <4 x float> @llvm.experimental.constrained.fptrunc.v4f32.v4f64(
+ <4 x double><double 42.1, double 42.2,
+ double 42.3, double 42.4>,
+ metadata !"round.dynamic",
+ metadata !"fpexcept.strict")
+ ret <4 x float> %result
+}
+
+define <1 x double> @constrained_vector_fpext_v1f32() {
+; CHECK-LABEL: constrained_vector_fpext_v1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: cvtss2sd %xmm0, %xmm0
+; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_fpext_v1f32:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
+; AVX-NEXT: retq
+entry:
+ %result = call <1 x double> @llvm.experimental.constrained.fpext.v1f64.v1f32(
+ <1 x float><float 42.0>,
+ metadata !"fpexcept.strict")
+ ret <1 x double> %result
+}
+
+define <2 x double> @constrained_vector_fpext_v2f32() {
+; CHECK-LABEL: constrained_vector_fpext_v2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: cvtss2sd %xmm0, %xmm1
+; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: cvtss2sd %xmm0, %xmm0
+; CHECK-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_fpext_v2f32:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
+; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX-NEXT: vcvtss2sd %xmm1, %xmm1, %xmm1
+; AVX-NEXT: vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX-NEXT: retq
+entry:
+ %result = call <2 x double> @llvm.experimental.constrained.fpext.v2f64.v2f32(
+ <2 x float><float 42.0, float 43.0>,
+ metadata !"fpexcept.strict")
+ ret <2 x double> %result
+}
+
+define <3 x double> @constrained_vector_fpext_v3f32() {
+; CHECK-LABEL: constrained_vector_fpext_v3f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: cvtss2sd %xmm0, %xmm0
+; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-NEXT: cvtss2sd %xmm1, %xmm1
+; CHECK-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; CHECK-NEXT: cvtss2sd %xmm2, %xmm2
+; CHECK-NEXT: movsd %xmm2, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: fldl -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_fpext_v3f32:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
+; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX-NEXT: vcvtss2sd %xmm1, %xmm1, %xmm1
+; AVX-NEXT: vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX-NEXT: vcvtss2sd %xmm1, %xmm1, %xmm1
+; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX-NEXT: retq
+entry:
+ %result = call <3 x double> @llvm.experimental.constrained.fpext.v3f64.v3f32(
+ <3 x float><float 42.0, float 43.0,
+ float 44.0>,
+ metadata !"fpexcept.strict")
+ ret <3 x double> %result
+}
+
+define <4 x double> @constrained_vector_fpext_v4f32() {
+; CHECK-LABEL: constrained_vector_fpext_v4f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: cvtss2sd %xmm0, %xmm1
+; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: cvtss2sd %xmm0, %xmm0
+; CHECK-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-NEXT: cvtss2sd %xmm1, %xmm2
+; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-NEXT: cvtss2sd %xmm1, %xmm1
+; CHECK-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_fpext_v4f32:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vcvtps2pd {{.*}}(%rip), %ymm0
+; AVX-NEXT: retq
+entry:
+ %result = call <4 x double> @llvm.experimental.constrained.fpext.v4f64.v4f32(
+ <4 x float><float 42.0, float 43.0,
+ float 44.0, float 45.0>,
+ metadata !"fpexcept.strict")
+ ret <4 x double> %result
+}
+
define <1 x float> @constrained_vector_ceil_v1f32() {
; CHECK-LABEL: constrained_vector_ceil_v1f32:
; CHECK: # %bb.0: # %entry
@@ -4413,6 +4624,8 @@ declare <2 x double> @llvm.experimental.constrained.rint.v2f64(<2 x double>, met
declare <2 x double> @llvm.experimental.constrained.nearbyint.v2f64(<2 x double>, metadata, metadata)
declare <2 x double> @llvm.experimental.constrained.maxnum.v2f64(<2 x double>, <2 x double>, metadata, metadata)
declare <2 x double> @llvm.experimental.constrained.minnum.v2f64(<2 x double>, <2 x double>, metadata, metadata)
+declare <2 x float> @llvm.experimental.constrained.fptrunc.v2f32.v2f64(<2 x double>, metadata, metadata)
+declare <2 x double> @llvm.experimental.constrained.fpext.v2f64.v2f32(<2 x float>, metadata)
declare <2 x double> @llvm.experimental.constrained.ceil.v2f64(<2 x double>, metadata, metadata)
declare <2 x double> @llvm.experimental.constrained.floor.v2f64(<2 x double>, metadata, metadata)
declare <2 x double> @llvm.experimental.constrained.round.v2f64(<2 x double>, metadata, metadata)
@@ -4438,6 +4651,8 @@ declare <1 x float> @llvm.experimental.constrained.rint.v1f32(<1 x float>, metad
declare <1 x float> @llvm.experimental.constrained.nearbyint.v1f32(<1 x float>, metadata, metadata)
declare <1 x float> @llvm.experimental.constrained.maxnum.v1f32(<1 x float>, <1 x float>, metadata, metadata)
declare <1 x float> @llvm.experimental.constrained.minnum.v1f32(<1 x float>, <1 x float>, metadata, metadata)
+declare <1 x float> @llvm.experimental.constrained.fptrunc.v1f32.v1f64(<1 x double>, metadata, metadata)
+declare <1 x double> @llvm.experimental.constrained.fpext.v1f64.v1f32(<1 x float>, metadata)
declare <1 x float> @llvm.experimental.constrained.ceil.v1f32(<1 x float>, metadata, metadata)
declare <1 x float> @llvm.experimental.constrained.floor.v1f32(<1 x float>, metadata, metadata)
declare <1 x float> @llvm.experimental.constrained.round.v1f32(<1 x float>, metadata, metadata)
@@ -4482,6 +4697,8 @@ declare <3 x float> @llvm.experimental.constrained.maxnum.v3f32(<3 x float>, <3
declare <3 x double> @llvm.experimental.constrained.maxnum.v3f64(<3 x double>, <3 x double>, metadata, metadata)
declare <3 x float> @llvm.experimental.constrained.minnum.v3f32(<3 x float>, <3 x float>, metadata, metadata)
declare <3 x double> @llvm.experimental.constrained.minnum.v3f64(<3 x double>, <3 x double>, metadata, metadata)
+declare <3 x float> @llvm.experimental.constrained.fptrunc.v3f32.v3f64(<3 x double>, metadata, metadata)
+declare <3 x double> @llvm.experimental.constrained.fpext.v3f64.v3f32(<3 x float>, metadata)
declare <3 x float> @llvm.experimental.constrained.ceil.v3f32(<3 x float>, metadata, metadata)
declare <3 x double> @llvm.experimental.constrained.ceil.v3f64(<3 x double>, metadata, metadata)
declare <3 x float> @llvm.experimental.constrained.floor.v3f32(<3 x float>, metadata, metadata)
@@ -4511,6 +4728,8 @@ declare <4 x double> @llvm.experimental.constrained.rint.v4f64(<4 x double>, met
declare <4 x double> @llvm.experimental.constrained.nearbyint.v4f64(<4 x double>, metadata, metadata)
declare <4 x double> @llvm.experimental.constrained.maxnum.v4f64(<4 x double>, <4 x double>, metadata, metadata)
declare <4 x double> @llvm.experimental.constrained.minnum.v4f64(<4 x double>, <4 x double>, metadata, metadata)
+declare <4 x float> @llvm.experimental.constrained.fptrunc.v4f32.v4f64(<4 x double>, metadata, metadata)
+declare <4 x double> @llvm.experimental.constrained.fpext.v4f64.v4f32(<4 x float>, metadata)
declare <4 x double> @llvm.experimental.constrained.ceil.v4f64(<4 x double>, metadata, metadata)
declare <4 x double> @llvm.experimental.constrained.floor.v4f64(<4 x double>, metadata, metadata)
declare <4 x double> @llvm.experimental.constrained.round.v4f64(<4 x double>, metadata, metadata)
OpenPOWER on IntegriCloud