summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--llvm/test/CodeGen/PowerPC/fp-intrinsics-fptosi-legal.ll3
-rw-r--r--llvm/test/CodeGen/PowerPC/vector-constrained-fp-intrinsics.ll478
-rw-r--r--llvm/test/CodeGen/SystemZ/fp-strict-add-01.ll52
-rw-r--r--llvm/test/CodeGen/SystemZ/fp-strict-add-02.ll52
-rw-r--r--llvm/test/CodeGen/SystemZ/fp-strict-add-03.ll4
-rw-r--r--llvm/test/CodeGen/SystemZ/fp-strict-add-04.ll4
-rw-r--r--llvm/test/CodeGen/SystemZ/fp-strict-alias.ll41
-rw-r--r--llvm/test/CodeGen/SystemZ/fp-strict-conv-01.ll26
-rw-r--r--llvm/test/CodeGen/SystemZ/fp-strict-conv-02.ll25
-rw-r--r--llvm/test/CodeGen/SystemZ/fp-strict-conv-03.ll25
-rw-r--r--llvm/test/CodeGen/SystemZ/fp-strict-conv-04.ll25
-rw-r--r--llvm/test/CodeGen/SystemZ/fp-strict-conv-09.ll14
-rw-r--r--llvm/test/CodeGen/SystemZ/fp-strict-conv-10.ll14
-rw-r--r--llvm/test/CodeGen/SystemZ/fp-strict-conv-11.ll14
-rw-r--r--llvm/test/CodeGen/SystemZ/fp-strict-conv-12.ll14
-rw-r--r--llvm/test/CodeGen/SystemZ/fp-strict-conv-14.ll26
-rw-r--r--llvm/test/CodeGen/SystemZ/fp-strict-conv-15.ll17
-rw-r--r--llvm/test/CodeGen/SystemZ/fp-strict-conv-16.ll18
-rw-r--r--llvm/test/CodeGen/SystemZ/fp-strict-div-01.ll52
-rw-r--r--llvm/test/CodeGen/SystemZ/fp-strict-div-02.ll52
-rw-r--r--llvm/test/CodeGen/SystemZ/fp-strict-div-03.ll4
-rw-r--r--llvm/test/CodeGen/SystemZ/fp-strict-div-04.ll4
-rw-r--r--llvm/test/CodeGen/SystemZ/fp-strict-mul-01.ll52
-rw-r--r--llvm/test/CodeGen/SystemZ/fp-strict-mul-02.ll70
-rw-r--r--llvm/test/CodeGen/SystemZ/fp-strict-mul-03.ll52
-rw-r--r--llvm/test/CodeGen/SystemZ/fp-strict-mul-04.ll70
-rw-r--r--llvm/test/CodeGen/SystemZ/fp-strict-mul-05.ll4
-rw-r--r--llvm/test/CodeGen/SystemZ/fp-strict-mul-06.ll34
-rw-r--r--llvm/test/CodeGen/SystemZ/fp-strict-mul-07.ll34
-rw-r--r--llvm/test/CodeGen/SystemZ/fp-strict-mul-08.ll34
-rw-r--r--llvm/test/CodeGen/SystemZ/fp-strict-mul-09.ll34
-rw-r--r--llvm/test/CodeGen/SystemZ/fp-strict-mul-10.ll17
-rw-r--r--llvm/test/CodeGen/SystemZ/fp-strict-mul-11.ll9
-rw-r--r--llvm/test/CodeGen/SystemZ/fp-strict-round-01.ll73
-rw-r--r--llvm/test/CodeGen/SystemZ/fp-strict-round-02.ll73
-rw-r--r--llvm/test/CodeGen/SystemZ/fp-strict-round-03.ll73
-rw-r--r--llvm/test/CodeGen/SystemZ/fp-strict-sqrt-01.ll25
-rw-r--r--llvm/test/CodeGen/SystemZ/fp-strict-sqrt-02.ll25
-rw-r--r--llvm/test/CodeGen/SystemZ/fp-strict-sqrt-03.ll4
-rw-r--r--llvm/test/CodeGen/SystemZ/fp-strict-sqrt-04.ll4
-rw-r--r--llvm/test/CodeGen/SystemZ/fp-strict-sub-01.ll52
-rw-r--r--llvm/test/CodeGen/SystemZ/fp-strict-sub-02.ll52
-rw-r--r--llvm/test/CodeGen/SystemZ/fp-strict-sub-03.ll4
-rw-r--r--llvm/test/CodeGen/SystemZ/fp-strict-sub-04.ll4
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-strict-add-01.ll8
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-strict-add-02.ll8
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-strict-conv-01.ll25
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-strict-conv-03.ll9
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-strict-div-01.ll10
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-strict-div-02.ll10
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-strict-max-01.ll21
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-strict-min-01.ll21
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-strict-mul-01.ll10
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-strict-mul-02.ll10
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-strict-mul-03.ll10
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-strict-mul-04.ll10
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-strict-mul-05.ll18
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-strict-round-01.ll50
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-strict-round-02.ll50
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-strict-sqrt-01.ll10
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-strict-sqrt-02.ll10
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-strict-sub-01.ll9
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-strict-sub-02.ll10
-rw-r--r--llvm/test/CodeGen/SystemZ/vector-constrained-fp-intrinsics.ll478
-rw-r--r--llvm/test/CodeGen/X86/constrained-fp80-trunc-ext.ll18
-rw-r--r--llvm/test/CodeGen/X86/fp-intrinsics.ll94
-rw-r--r--llvm/test/CodeGen/X86/vector-constrained-fp-intrinsics-fma.ll30
-rw-r--r--llvm/test/CodeGen/X86/vector-constrained-fp-intrinsics.ll605
-rw-r--r--llvm/test/Feature/fp-intrinsics.ll90
-rw-r--r--llvm/test/Transforms/DCE/calls-errno.ll17
-rw-r--r--llvm/test/Transforms/InstCombine/constant-fold-libfunc.ll2
-rw-r--r--llvm/test/Transforms/InstCombine/memcpy-1.ll2
-rw-r--r--llvm/test/Verifier/fp-intrinsics.ll26
73 files changed, 1771 insertions, 1663 deletions
diff --git a/llvm/test/CodeGen/PowerPC/fp-intrinsics-fptosi-legal.ll b/llvm/test/CodeGen/PowerPC/fp-intrinsics-fptosi-legal.ll
index 98f2f36db74..dff47786e38 100644
--- a/llvm/test/CodeGen/PowerPC/fp-intrinsics-fptosi-legal.ll
+++ b/llvm/test/CodeGen/PowerPC/fp-intrinsics-fptosi-legal.ll
@@ -8,10 +8,11 @@
; Verify that no gross errors happen.
; CHECK-LABEL: @f20
; COMMON: cfdctsiz
-define i32 @f20(double %a) {
+define i32 @f20(double %a) strictfp {
entry:
%result = call i32 @llvm.experimental.constrained.fptosi.i32.f64(double 42.1,
metadata !"fpexcept.strict")
+ strictfp
ret i32 %result
}
diff --git a/llvm/test/CodeGen/PowerPC/vector-constrained-fp-intrinsics.ll b/llvm/test/CodeGen/PowerPC/vector-constrained-fp-intrinsics.ll
index ac939583f8f..0b4defcd88a 100644
--- a/llvm/test/CodeGen/PowerPC/vector-constrained-fp-intrinsics.ll
+++ b/llvm/test/CodeGen/PowerPC/vector-constrained-fp-intrinsics.ll
@@ -2,7 +2,7 @@
; RUN: llc -O3 -mtriple=powerpc64le-linux-gnu < %s | FileCheck --check-prefix=PC64LE %s
; RUN: llc -O3 -mtriple=powerpc64le-linux-gnu -mcpu=pwr9 < %s | FileCheck --check-prefix=PC64LE9 %s
-define <1 x float> @constrained_vector_fdiv_v1f32() nounwind {
+define <1 x float> @constrained_vector_fdiv_v1f32() #0 {
; PC64LE-LABEL: constrained_vector_fdiv_v1f32:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: addis 3, 2, .LCPI0_0@toc@ha
@@ -29,11 +29,11 @@ entry:
<1 x float> <float 1.000000e+00>,
<1 x float> <float 1.000000e+01>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <1 x float> %div
}
-define <2 x double> @constrained_vector_fdiv_v2f64() nounwind {
+define <2 x double> @constrained_vector_fdiv_v2f64() #0 {
; PC64LE-LABEL: constrained_vector_fdiv_v2f64:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: addis 3, 2, .LCPI1_0@toc@ha
@@ -62,11 +62,11 @@ entry:
<2 x double> <double 1.000000e+00, double 2.000000e+00>,
<2 x double> <double 1.000000e+01, double 1.000000e+01>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <2 x double> %div
}
-define <3 x float> @constrained_vector_fdiv_v3f32() nounwind {
+define <3 x float> @constrained_vector_fdiv_v3f32() #0 {
; PC64LE-LABEL: constrained_vector_fdiv_v3f32:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: addis 3, 2, .LCPI2_0@toc@ha
@@ -123,11 +123,11 @@ entry:
<3 x float> <float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>,
<3 x float> <float 1.000000e+01, float 1.000000e+01, float 1.000000e+01>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <3 x float> %div
}
-define <3 x double> @constrained_vector_fdiv_v3f64() nounwind {
+define <3 x double> @constrained_vector_fdiv_v3f64() #0 {
; PC64LE-LABEL: constrained_vector_fdiv_v3f64:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: addis 3, 2, .LCPI3_2@toc@ha
@@ -172,11 +172,11 @@ entry:
<3 x double> <double 1.000000e+00, double 2.000000e+00, double 3.000000e+00>,
<3 x double> <double 1.000000e+01, double 1.000000e+01, double 1.000000e+01>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <3 x double> %div
}
-define <4 x double> @constrained_vector_fdiv_v4f64() nounwind {
+define <4 x double> @constrained_vector_fdiv_v4f64() #0 {
; PC64LE-LABEL: constrained_vector_fdiv_v4f64:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: addis 3, 2, .LCPI4_0@toc@ha
@@ -216,11 +216,11 @@ entry:
<4 x double> <double 1.000000e+01, double 1.000000e+01,
double 1.000000e+01, double 1.000000e+01>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <4 x double> %div
}
-define <1 x float> @constrained_vector_frem_v1f32() nounwind {
+define <1 x float> @constrained_vector_frem_v1f32() #0 {
; PC64LE-LABEL: constrained_vector_frem_v1f32:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: mflr 0
@@ -261,11 +261,11 @@ entry:
<1 x float> <float 1.000000e+00>,
<1 x float> <float 1.000000e+01>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <1 x float> %rem
}
-define <2 x double> @constrained_vector_frem_v2f64() nounwind {
+define <2 x double> @constrained_vector_frem_v2f64() #0 {
; PC64LE-LABEL: constrained_vector_frem_v2f64:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: mflr 0
@@ -330,11 +330,11 @@ entry:
<2 x double> <double 1.000000e+00, double 2.000000e+00>,
<2 x double> <double 1.000000e+01, double 1.000000e+01>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <2 x double> %rem
}
-define <3 x float> @constrained_vector_frem_v3f32() nounwind {
+define <3 x float> @constrained_vector_frem_v3f32() #0 {
; PC64LE-LABEL: constrained_vector_frem_v3f32:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: mflr 0
@@ -431,11 +431,11 @@ entry:
<3 x float> <float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>,
<3 x float> <float 1.000000e+01, float 1.000000e+01, float 1.000000e+01>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <3 x float> %rem
}
-define <3 x double> @constrained_vector_frem_v3f64() nounwind {
+define <3 x double> @constrained_vector_frem_v3f64() #0 {
; PC64LE-LABEL: constrained_vector_frem_v3f64:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: mflr 0
@@ -522,11 +522,11 @@ entry:
<3 x double> <double 1.000000e+00, double 2.000000e+00, double 3.000000e+00>,
<3 x double> <double 1.000000e+01, double 1.000000e+01, double 1.000000e+01>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <3 x double> %rem
}
-define <4 x double> @constrained_vector_frem_v4f64() nounwind {
+define <4 x double> @constrained_vector_frem_v4f64() #0 {
; PC64LE-LABEL: constrained_vector_frem_v4f64:
; PC64LE: # %bb.0:
; PC64LE-NEXT: mflr 0
@@ -632,11 +632,11 @@ define <4 x double> @constrained_vector_frem_v4f64() nounwind {
<4 x double> <double 1.000000e+01, double 1.000000e+01,
double 1.000000e+01, double 1.000000e+01>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <4 x double> %rem
}
-define <1 x float> @constrained_vector_fmul_v1f32() nounwind {
+define <1 x float> @constrained_vector_fmul_v1f32() #0 {
; PC64LE-LABEL: constrained_vector_fmul_v1f32:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: addis 3, 2, .LCPI10_0@toc@ha
@@ -663,11 +663,11 @@ entry:
<1 x float> <float 0x7FF0000000000000>,
<1 x float> <float 2.000000e+00>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <1 x float> %mul
}
-define <2 x double> @constrained_vector_fmul_v2f64() nounwind {
+define <2 x double> @constrained_vector_fmul_v2f64() #0 {
; PC64LE-LABEL: constrained_vector_fmul_v2f64:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: addis 3, 2, .LCPI11_0@toc@ha
@@ -696,11 +696,11 @@ entry:
<2 x double> <double 0x7FEFFFFFFFFFFFFF, double 0x7FEFFFFFFFFFFFFF>,
<2 x double> <double 2.000000e+00, double 3.000000e+00>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <2 x double> %mul
}
-define <3 x float> @constrained_vector_fmul_v3f32() nounwind {
+define <3 x float> @constrained_vector_fmul_v3f32() #0 {
; PC64LE-LABEL: constrained_vector_fmul_v3f32:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: addis 3, 2, .LCPI12_1@toc@ha
@@ -758,11 +758,11 @@ entry:
float 0x7FF0000000000000>,
<3 x float> <float 1.000000e+00, float 1.000000e+01, float 1.000000e+02>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <3 x float> %mul
}
-define <3 x double> @constrained_vector_fmul_v3f64() nounwind {
+define <3 x double> @constrained_vector_fmul_v3f64() #0 {
; PC64LE-LABEL: constrained_vector_fmul_v3f64:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: addis 3, 2, .LCPI13_2@toc@ha
@@ -808,11 +808,11 @@ entry:
double 0x7FEFFFFFFFFFFFFF>,
<3 x double> <double 1.000000e+00, double 1.000000e+01, double 1.000000e+02>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <3 x double> %mul
}
-define <4 x double> @constrained_vector_fmul_v4f64() nounwind {
+define <4 x double> @constrained_vector_fmul_v4f64() #0 {
; PC64LE-LABEL: constrained_vector_fmul_v4f64:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: addis 3, 2, .LCPI14_0@toc@ha
@@ -852,11 +852,11 @@ entry:
<4 x double> <double 2.000000e+00, double 3.000000e+00,
double 4.000000e+00, double 5.000000e+00>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <4 x double> %mul
}
-define <1 x float> @constrained_vector_fadd_v1f32() nounwind {
+define <1 x float> @constrained_vector_fadd_v1f32() #0 {
; PC64LE-LABEL: constrained_vector_fadd_v1f32:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: addis 3, 2, .LCPI15_0@toc@ha
@@ -883,11 +883,11 @@ entry:
<1 x float> <float 0x7FF0000000000000>,
<1 x float> <float 1.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <1 x float> %add
}
-define <2 x double> @constrained_vector_fadd_v2f64() nounwind {
+define <2 x double> @constrained_vector_fadd_v2f64() #0 {
; PC64LE-LABEL: constrained_vector_fadd_v2f64:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: addis 3, 2, .LCPI16_0@toc@ha
@@ -916,11 +916,11 @@ entry:
<2 x double> <double 0x7FEFFFFFFFFFFFFF, double 0x7FEFFFFFFFFFFFFF>,
<2 x double> <double 1.000000e+00, double 1.000000e-01>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <2 x double> %add
}
-define <3 x float> @constrained_vector_fadd_v3f32() nounwind {
+define <3 x float> @constrained_vector_fadd_v3f32() #0 {
; PC64LE-LABEL: constrained_vector_fadd_v3f32:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: addis 3, 2, .LCPI17_0@toc@ha
@@ -976,11 +976,11 @@ entry:
float 0xFFFFFFFFE0000000>,
<3 x float> <float 2.0, float 1.0, float 0.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <3 x float> %add
}
-define <3 x double> @constrained_vector_fadd_v3f64() nounwind {
+define <3 x double> @constrained_vector_fadd_v3f64() #0 {
; PC64LE-LABEL: constrained_vector_fadd_v3f64:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: addis 3, 2, .LCPI18_1@toc@ha
@@ -1024,11 +1024,11 @@ entry:
double 0x7FEFFFFFFFFFFFFF>,
<3 x double> <double 2.0, double 1.0, double 0.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <3 x double> %add
}
-define <4 x double> @constrained_vector_fadd_v4f64() nounwind {
+define <4 x double> @constrained_vector_fadd_v4f64() #0 {
; PC64LE-LABEL: constrained_vector_fadd_v4f64:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: addis 3, 2, .LCPI19_0@toc@ha
@@ -1068,11 +1068,11 @@ entry:
<4 x double> <double 1.000000e+00, double 1.000000e-01,
double 2.000000e+00, double 2.000000e-01>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <4 x double> %add
}
-define <1 x float> @constrained_vector_fsub_v1f32() nounwind {
+define <1 x float> @constrained_vector_fsub_v1f32() #0 {
; PC64LE-LABEL: constrained_vector_fsub_v1f32:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: addis 3, 2, .LCPI20_0@toc@ha
@@ -1099,11 +1099,11 @@ entry:
<1 x float> <float 0x7FF0000000000000>,
<1 x float> <float 1.000000e+00>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <1 x float> %sub
}
-define <2 x double> @constrained_vector_fsub_v2f64() nounwind {
+define <2 x double> @constrained_vector_fsub_v2f64() #0 {
; PC64LE-LABEL: constrained_vector_fsub_v2f64:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: addis 3, 2, .LCPI21_0@toc@ha
@@ -1132,11 +1132,11 @@ entry:
<2 x double> <double 0xFFEFFFFFFFFFFFFF, double 0xFFEFFFFFFFFFFFFF>,
<2 x double> <double 1.000000e+00, double 1.000000e-01>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <2 x double> %sub
}
-define <3 x float> @constrained_vector_fsub_v3f32() nounwind {
+define <3 x float> @constrained_vector_fsub_v3f32() #0 {
; PC64LE-LABEL: constrained_vector_fsub_v3f32:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: addis 3, 2, .LCPI22_0@toc@ha
@@ -1192,11 +1192,11 @@ entry:
float 0xFFFFFFFFE0000000>,
<3 x float> <float 2.0, float 1.0, float 0.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <3 x float> %sub
}
-define <3 x double> @constrained_vector_fsub_v3f64() nounwind {
+define <3 x double> @constrained_vector_fsub_v3f64() #0 {
; PC64LE-LABEL: constrained_vector_fsub_v3f64:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: addis 3, 2, .LCPI23_1@toc@ha
@@ -1240,11 +1240,11 @@ entry:
double 0xFFEFFFFFFFFFFFFF>,
<3 x double> <double 2.0, double 1.0, double 0.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <3 x double> %sub
}
-define <4 x double> @constrained_vector_fsub_v4f64() nounwind {
+define <4 x double> @constrained_vector_fsub_v4f64() #0 {
; PC64LE-LABEL: constrained_vector_fsub_v4f64:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: addis 3, 2, .LCPI24_0@toc@ha
@@ -1284,11 +1284,11 @@ entry:
<4 x double> <double 1.000000e+00, double 1.000000e-01,
double 2.000000e+00, double 2.000000e-01>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <4 x double> %sub
}
-define <1 x float> @constrained_vector_sqrt_v1f32() nounwind {
+define <1 x float> @constrained_vector_sqrt_v1f32() #0 {
; PC64LE-LABEL: constrained_vector_sqrt_v1f32:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: addis 3, 2, .LCPI25_0@toc@ha
@@ -1310,11 +1310,11 @@ entry:
%sqrt = call <1 x float> @llvm.experimental.constrained.sqrt.v1f32(
<1 x float> <float 42.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <1 x float> %sqrt
}
-define <2 x double> @constrained_vector_sqrt_v2f64() nounwind {
+define <2 x double> @constrained_vector_sqrt_v2f64() #0 {
; PC64LE-LABEL: constrained_vector_sqrt_v2f64:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: addis 3, 2, .LCPI26_0@toc@ha
@@ -1335,11 +1335,11 @@ entry:
%sqrt = call <2 x double> @llvm.experimental.constrained.sqrt.v2f64(
<2 x double> <double 42.0, double 42.1>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <2 x double> %sqrt
}
-define <3 x float> @constrained_vector_sqrt_v3f32() nounwind {
+define <3 x float> @constrained_vector_sqrt_v3f32() #0 {
; PC64LE-LABEL: constrained_vector_sqrt_v3f32:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: addis 3, 2, .LCPI27_2@toc@ha
@@ -1391,11 +1391,11 @@ entry:
%sqrt = call <3 x float> @llvm.experimental.constrained.sqrt.v3f32(
<3 x float> <float 42.0, float 43.0, float 44.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <3 x float> %sqrt
}
-define <3 x double> @constrained_vector_sqrt_v3f64() nounwind {
+define <3 x double> @constrained_vector_sqrt_v3f64() #0 {
; PC64LE-LABEL: constrained_vector_sqrt_v3f64:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: addis 3, 2, .LCPI28_1@toc@ha
@@ -1428,11 +1428,11 @@ entry:
%sqrt = call <3 x double> @llvm.experimental.constrained.sqrt.v3f64(
<3 x double> <double 42.0, double 42.1, double 42.2>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <3 x double> %sqrt
}
-define <4 x double> @constrained_vector_sqrt_v4f64() nounwind {
+define <4 x double> @constrained_vector_sqrt_v4f64() #0 {
; PC64LE-LABEL: constrained_vector_sqrt_v4f64:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: addis 3, 2, .LCPI29_0@toc@ha
@@ -1463,11 +1463,11 @@ define <4 x double> @constrained_vector_sqrt_v4f64() nounwind {
<4 x double> <double 42.0, double 42.1,
double 42.2, double 42.3>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <4 x double> %sqrt
}
-define <1 x float> @constrained_vector_pow_v1f32() nounwind {
+define <1 x float> @constrained_vector_pow_v1f32() #0 {
; PC64LE-LABEL: constrained_vector_pow_v1f32:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: mflr 0
@@ -1508,11 +1508,11 @@ entry:
<1 x float> <float 42.0>,
<1 x float> <float 3.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <1 x float> %pow
}
-define <2 x double> @constrained_vector_pow_v2f64() nounwind {
+define <2 x double> @constrained_vector_pow_v2f64() #0 {
; PC64LE-LABEL: constrained_vector_pow_v2f64:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: mflr 0
@@ -1577,11 +1577,11 @@ entry:
<2 x double> <double 42.1, double 42.2>,
<2 x double> <double 3.0, double 3.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <2 x double> %pow
}
-define <3 x float> @constrained_vector_pow_v3f32() nounwind {
+define <3 x float> @constrained_vector_pow_v3f32() #0 {
; PC64LE-LABEL: constrained_vector_pow_v3f32:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: mflr 0
@@ -1678,11 +1678,11 @@ entry:
<3 x float> <float 42.0, float 43.0, float 44.0>,
<3 x float> <float 3.0, float 3.0, float 3.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <3 x float> %pow
}
-define <3 x double> @constrained_vector_pow_v3f64() nounwind {
+define <3 x double> @constrained_vector_pow_v3f64() #0 {
; PC64LE-LABEL: constrained_vector_pow_v3f64:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: mflr 0
@@ -1769,11 +1769,11 @@ entry:
<3 x double> <double 42.0, double 42.1, double 42.2>,
<3 x double> <double 3.0, double 3.0, double 3.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <3 x double> %pow
}
-define <4 x double> @constrained_vector_pow_v4f64() nounwind {
+define <4 x double> @constrained_vector_pow_v4f64() #0 {
; PC64LE-LABEL: constrained_vector_pow_v4f64:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: mflr 0
@@ -1880,11 +1880,11 @@ entry:
<4 x double> <double 3.0, double 3.0,
double 3.0, double 3.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <4 x double> %pow
}
-define <1 x float> @constrained_vector_powi_v1f32() nounwind {
+define <1 x float> @constrained_vector_powi_v1f32() #0 {
; PC64LE-LABEL: constrained_vector_powi_v1f32:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: mflr 0
@@ -1923,11 +1923,11 @@ entry:
<1 x float> <float 42.0>,
i32 3,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <1 x float> %powi
}
-define <2 x double> @constrained_vector_powi_v2f64() nounwind {
+define <2 x double> @constrained_vector_powi_v2f64() #0 {
; PC64LE-LABEL: constrained_vector_powi_v2f64:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: mflr 0
@@ -1984,11 +1984,11 @@ entry:
<2 x double> <double 42.1, double 42.2>,
i32 3,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <2 x double> %powi
}
-define <3 x float> @constrained_vector_powi_v3f32() nounwind {
+define <3 x float> @constrained_vector_powi_v3f32() #0 {
;
;
; PC64LE-LABEL: constrained_vector_powi_v3f32:
@@ -2079,11 +2079,11 @@ entry:
<3 x float> <float 42.0, float 43.0, float 44.0>,
i32 3,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <3 x float> %powi
}
-define <3 x double> @constrained_vector_powi_v3f64() nounwind {
+define <3 x double> @constrained_vector_powi_v3f64() #0 {
; PC64LE-LABEL: constrained_vector_powi_v3f64:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: mflr 0
@@ -2162,11 +2162,11 @@ entry:
<3 x double> <double 42.0, double 42.1, double 42.2>,
i32 3,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <3 x double> %powi
}
-define <4 x double> @constrained_vector_powi_v4f64() nounwind {
+define <4 x double> @constrained_vector_powi_v4f64() #0 {
; PC64LE-LABEL: constrained_vector_powi_v4f64:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: mflr 0
@@ -2264,11 +2264,11 @@ entry:
double 42.3, double 42.4>,
i32 3,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <4 x double> %powi
}
-define <1 x float> @constrained_vector_sin_v1f32() nounwind {
+define <1 x float> @constrained_vector_sin_v1f32() #0 {
; PC64LE-LABEL: constrained_vector_sin_v1f32:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: mflr 0
@@ -2304,11 +2304,11 @@ entry:
%sin = call <1 x float> @llvm.experimental.constrained.sin.v1f32(
<1 x float> <float 42.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <1 x float> %sin
}
-define <2 x double> @constrained_vector_sin_v2f64() nounwind {
+define <2 x double> @constrained_vector_sin_v2f64() #0 {
; PC64LE-LABEL: constrained_vector_sin_v2f64:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: mflr 0
@@ -2360,11 +2360,11 @@ entry:
%sin = call <2 x double> @llvm.experimental.constrained.sin.v2f64(
<2 x double> <double 42.0, double 42.1>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <2 x double> %sin
}
-define <3 x float> @constrained_vector_sin_v3f32() nounwind {
+define <3 x float> @constrained_vector_sin_v3f32() #0 {
; PC64LE-LABEL: constrained_vector_sin_v3f32:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: mflr 0
@@ -2446,11 +2446,11 @@ entry:
%sin = call <3 x float> @llvm.experimental.constrained.sin.v3f32(
<3 x float> <float 42.0, float 43.0, float 44.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <3 x float> %sin
}
-define <3 x double> @constrained_vector_sin_v3f64() nounwind {
+define <3 x double> @constrained_vector_sin_v3f64() #0 {
; PC64LE-LABEL: constrained_vector_sin_v3f64:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: mflr 0
@@ -2522,11 +2522,11 @@ entry:
%sin = call <3 x double> @llvm.experimental.constrained.sin.v3f64(
<3 x double> <double 42.0, double 42.1, double 42.2>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <3 x double> %sin
}
-define <4 x double> @constrained_vector_sin_v4f64() nounwind {
+define <4 x double> @constrained_vector_sin_v4f64() #0 {
; PC64LE-LABEL: constrained_vector_sin_v4f64:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: mflr 0
@@ -2615,11 +2615,11 @@ entry:
<4 x double> <double 42.0, double 42.1,
double 42.2, double 42.3>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <4 x double> %sin
}
-define <1 x float> @constrained_vector_cos_v1f32() nounwind {
+define <1 x float> @constrained_vector_cos_v1f32() #0 {
; PC64LE-LABEL: constrained_vector_cos_v1f32:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: mflr 0
@@ -2655,11 +2655,11 @@ entry:
%cos = call <1 x float> @llvm.experimental.constrained.cos.v1f32(
<1 x float> <float 42.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <1 x float> %cos
}
-define <2 x double> @constrained_vector_cos_v2f64() nounwind {
+define <2 x double> @constrained_vector_cos_v2f64() #0 {
; PC64LE-LABEL: constrained_vector_cos_v2f64:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: mflr 0
@@ -2711,11 +2711,11 @@ entry:
%cos = call <2 x double> @llvm.experimental.constrained.cos.v2f64(
<2 x double> <double 42.0, double 42.1>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <2 x double> %cos
}
-define <3 x float> @constrained_vector_cos_v3f32() nounwind {
+define <3 x float> @constrained_vector_cos_v3f32() #0 {
; PC64LE-LABEL: constrained_vector_cos_v3f32:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: mflr 0
@@ -2797,11 +2797,11 @@ entry:
%cos = call <3 x float> @llvm.experimental.constrained.cos.v3f32(
<3 x float> <float 42.0, float 43.0, float 44.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <3 x float> %cos
}
-define <3 x double> @constrained_vector_cos_v3f64() nounwind {
+define <3 x double> @constrained_vector_cos_v3f64() #0 {
; PC64LE-LABEL: constrained_vector_cos_v3f64:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: mflr 0
@@ -2873,11 +2873,11 @@ entry:
%cos = call <3 x double> @llvm.experimental.constrained.cos.v3f64(
<3 x double> <double 42.0, double 42.1, double 42.2>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <3 x double> %cos
}
-define <4 x double> @constrained_vector_cos_v4f64() nounwind {
+define <4 x double> @constrained_vector_cos_v4f64() #0 {
; PC64LE-LABEL: constrained_vector_cos_v4f64:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: mflr 0
@@ -2966,11 +2966,11 @@ entry:
<4 x double> <double 42.0, double 42.1,
double 42.2, double 42.3>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <4 x double> %cos
}
-define <1 x float> @constrained_vector_exp_v1f32() nounwind {
+define <1 x float> @constrained_vector_exp_v1f32() #0 {
; PC64LE-LABEL: constrained_vector_exp_v1f32:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: mflr 0
@@ -3006,11 +3006,11 @@ entry:
%exp = call <1 x float> @llvm.experimental.constrained.exp.v1f32(
<1 x float> <float 42.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <1 x float> %exp
}
-define <2 x double> @constrained_vector_exp_v2f64() nounwind {
+define <2 x double> @constrained_vector_exp_v2f64() #0 {
; PC64LE-LABEL: constrained_vector_exp_v2f64:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: mflr 0
@@ -3062,11 +3062,11 @@ entry:
%exp = call <2 x double> @llvm.experimental.constrained.exp.v2f64(
<2 x double> <double 42.0, double 42.1>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <2 x double> %exp
}
-define <3 x float> @constrained_vector_exp_v3f32() nounwind {
+define <3 x float> @constrained_vector_exp_v3f32() #0 {
; PC64LE-LABEL: constrained_vector_exp_v3f32:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: mflr 0
@@ -3148,11 +3148,11 @@ entry:
%exp = call <3 x float> @llvm.experimental.constrained.exp.v3f32(
<3 x float> <float 42.0, float 43.0, float 44.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <3 x float> %exp
}
-define <3 x double> @constrained_vector_exp_v3f64() nounwind {
+define <3 x double> @constrained_vector_exp_v3f64() #0 {
; PC64LE-LABEL: constrained_vector_exp_v3f64:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: mflr 0
@@ -3224,11 +3224,11 @@ entry:
%exp = call <3 x double> @llvm.experimental.constrained.exp.v3f64(
<3 x double> <double 42.0, double 42.1, double 42.2>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <3 x double> %exp
}
-define <4 x double> @constrained_vector_exp_v4f64() nounwind {
+define <4 x double> @constrained_vector_exp_v4f64() #0 {
; PC64LE-LABEL: constrained_vector_exp_v4f64:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: mflr 0
@@ -3317,11 +3317,11 @@ entry:
<4 x double> <double 42.0, double 42.1,
double 42.2, double 42.3>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <4 x double> %exp
}
-define <1 x float> @constrained_vector_exp2_v1f32() nounwind {
+define <1 x float> @constrained_vector_exp2_v1f32() #0 {
; PC64LE-LABEL: constrained_vector_exp2_v1f32:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: mflr 0
@@ -3357,11 +3357,11 @@ entry:
%exp2 = call <1 x float> @llvm.experimental.constrained.exp2.v1f32(
<1 x float> <float 42.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <1 x float> %exp2
}
-define <2 x double> @constrained_vector_exp2_v2f64() nounwind {
+define <2 x double> @constrained_vector_exp2_v2f64() #0 {
; PC64LE-LABEL: constrained_vector_exp2_v2f64:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: mflr 0
@@ -3413,11 +3413,11 @@ entry:
%exp2 = call <2 x double> @llvm.experimental.constrained.exp2.v2f64(
<2 x double> <double 42.1, double 42.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <2 x double> %exp2
}
-define <3 x float> @constrained_vector_exp2_v3f32() nounwind {
+define <3 x float> @constrained_vector_exp2_v3f32() #0 {
; PC64LE-LABEL: constrained_vector_exp2_v3f32:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: mflr 0
@@ -3499,11 +3499,11 @@ entry:
%exp2 = call <3 x float> @llvm.experimental.constrained.exp2.v3f32(
<3 x float> <float 42.0, float 43.0, float 44.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <3 x float> %exp2
}
-define <3 x double> @constrained_vector_exp2_v3f64() nounwind {
+define <3 x double> @constrained_vector_exp2_v3f64() #0 {
; PC64LE-LABEL: constrained_vector_exp2_v3f64:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: mflr 0
@@ -3575,11 +3575,11 @@ entry:
%exp2 = call <3 x double> @llvm.experimental.constrained.exp2.v3f64(
<3 x double> <double 42.0, double 42.1, double 42.2>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <3 x double> %exp2
}
-define <4 x double> @constrained_vector_exp2_v4f64() nounwind {
+define <4 x double> @constrained_vector_exp2_v4f64() #0 {
; PC64LE-LABEL: constrained_vector_exp2_v4f64:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: mflr 0
@@ -3668,11 +3668,11 @@ entry:
<4 x double> <double 42.1, double 42.2,
double 42.3, double 42.4>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <4 x double> %exp2
}
-define <1 x float> @constrained_vector_log_v1f32() nounwind {
+define <1 x float> @constrained_vector_log_v1f32() #0 {
; PC64LE-LABEL: constrained_vector_log_v1f32:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: mflr 0
@@ -3708,11 +3708,11 @@ entry:
%log = call <1 x float> @llvm.experimental.constrained.log.v1f32(
<1 x float> <float 42.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <1 x float> %log
}
-define <2 x double> @constrained_vector_log_v2f64() nounwind {
+define <2 x double> @constrained_vector_log_v2f64() #0 {
; PC64LE-LABEL: constrained_vector_log_v2f64:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: mflr 0
@@ -3764,11 +3764,11 @@ entry:
%log = call <2 x double> @llvm.experimental.constrained.log.v2f64(
<2 x double> <double 42.0, double 42.1>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <2 x double> %log
}
-define <3 x float> @constrained_vector_log_v3f32() nounwind {
+define <3 x float> @constrained_vector_log_v3f32() #0 {
; PC64LE-LABEL: constrained_vector_log_v3f32:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: mflr 0
@@ -3850,11 +3850,11 @@ entry:
%log = call <3 x float> @llvm.experimental.constrained.log.v3f32(
<3 x float> <float 42.0, float 43.0, float 44.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <3 x float> %log
}
-define <3 x double> @constrained_vector_log_v3f64() nounwind {
+define <3 x double> @constrained_vector_log_v3f64() #0 {
; PC64LE-LABEL: constrained_vector_log_v3f64:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: mflr 0
@@ -3926,11 +3926,11 @@ entry:
%log = call <3 x double> @llvm.experimental.constrained.log.v3f64(
<3 x double> <double 42.0, double 42.1, double 42.2>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <3 x double> %log
}
-define <4 x double> @constrained_vector_log_v4f64() nounwind {
+define <4 x double> @constrained_vector_log_v4f64() #0 {
; PC64LE-LABEL: constrained_vector_log_v4f64:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: mflr 0
@@ -4019,11 +4019,11 @@ entry:
<4 x double> <double 42.0, double 42.1,
double 42.2, double 42.3>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <4 x double> %log
}
-define <1 x float> @constrained_vector_log10_v1f32() nounwind {
+define <1 x float> @constrained_vector_log10_v1f32() #0 {
; PC64LE-LABEL: constrained_vector_log10_v1f32:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: mflr 0
@@ -4059,11 +4059,11 @@ entry:
%log10 = call <1 x float> @llvm.experimental.constrained.log10.v1f32(
<1 x float> <float 42.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <1 x float> %log10
}
-define <2 x double> @constrained_vector_log10_v2f64() nounwind {
+define <2 x double> @constrained_vector_log10_v2f64() #0 {
; PC64LE-LABEL: constrained_vector_log10_v2f64:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: mflr 0
@@ -4115,11 +4115,11 @@ entry:
%log10 = call <2 x double> @llvm.experimental.constrained.log10.v2f64(
<2 x double> <double 42.0, double 42.1>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <2 x double> %log10
}
-define <3 x float> @constrained_vector_log10_v3f32() nounwind {
+define <3 x float> @constrained_vector_log10_v3f32() #0 {
; PC64LE-LABEL: constrained_vector_log10_v3f32:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: mflr 0
@@ -4201,11 +4201,11 @@ entry:
%log10 = call <3 x float> @llvm.experimental.constrained.log10.v3f32(
<3 x float> <float 42.0, float 43.0, float 44.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <3 x float> %log10
}
-define <3 x double> @constrained_vector_log10_v3f64() nounwind {
+define <3 x double> @constrained_vector_log10_v3f64() #0 {
; PC64LE-LABEL: constrained_vector_log10_v3f64:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: mflr 0
@@ -4277,11 +4277,11 @@ entry:
%log10 = call <3 x double> @llvm.experimental.constrained.log10.v3f64(
<3 x double> <double 42.0, double 42.1, double 42.2>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <3 x double> %log10
}
-define <4 x double> @constrained_vector_log10_v4f64() nounwind {
+define <4 x double> @constrained_vector_log10_v4f64() #0 {
; PC64LE-LABEL: constrained_vector_log10_v4f64:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: mflr 0
@@ -4370,11 +4370,11 @@ entry:
<4 x double> <double 42.0, double 42.1,
double 42.2, double 42.3>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <4 x double> %log10
}
-define <1 x float> @constrained_vector_log2_v1f32() nounwind {
+define <1 x float> @constrained_vector_log2_v1f32() #0 {
; PC64LE-LABEL: constrained_vector_log2_v1f32:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: mflr 0
@@ -4410,11 +4410,11 @@ entry:
%log2 = call <1 x float> @llvm.experimental.constrained.log2.v1f32(
<1 x float> <float 42.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <1 x float> %log2
}
-define <2 x double> @constrained_vector_log2_v2f64() nounwind {
+define <2 x double> @constrained_vector_log2_v2f64() #0 {
; PC64LE-LABEL: constrained_vector_log2_v2f64:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: mflr 0
@@ -4466,11 +4466,11 @@ entry:
%log2 = call <2 x double> @llvm.experimental.constrained.log2.v2f64(
<2 x double> <double 42.0, double 42.1>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <2 x double> %log2
}
-define <3 x float> @constrained_vector_log2_v3f32() nounwind {
+define <3 x float> @constrained_vector_log2_v3f32() #0 {
; PC64LE-LABEL: constrained_vector_log2_v3f32:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: mflr 0
@@ -4552,11 +4552,11 @@ entry:
%log2 = call <3 x float> @llvm.experimental.constrained.log2.v3f32(
<3 x float> <float 42.0, float 43.0, float 44.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <3 x float> %log2
}
-define <3 x double> @constrained_vector_log2_v3f64() nounwind {
+define <3 x double> @constrained_vector_log2_v3f64() #0 {
; PC64LE-LABEL: constrained_vector_log2_v3f64:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: mflr 0
@@ -4628,11 +4628,11 @@ entry:
%log2 = call <3 x double> @llvm.experimental.constrained.log2.v3f64(
<3 x double> <double 42.0, double 42.1, double 42.2>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <3 x double> %log2
}
-define <4 x double> @constrained_vector_log2_v4f64() nounwind {
+define <4 x double> @constrained_vector_log2_v4f64() #0 {
; PC64LE-LABEL: constrained_vector_log2_v4f64:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: mflr 0
@@ -4721,11 +4721,11 @@ entry:
<4 x double> <double 42.0, double 42.1,
double 42.2, double 42.3>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <4 x double> %log2
}
-define <1 x float> @constrained_vector_rint_v1f32() nounwind {
+define <1 x float> @constrained_vector_rint_v1f32() #0 {
; PC64LE-LABEL: constrained_vector_rint_v1f32:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: mflr 0
@@ -4761,11 +4761,11 @@ entry:
%rint = call <1 x float> @llvm.experimental.constrained.rint.v1f32(
<1 x float> <float 42.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <1 x float> %rint
}
-define <2 x double> @constrained_vector_rint_v2f64() nounwind {
+define <2 x double> @constrained_vector_rint_v2f64() #0 {
; PC64LE-LABEL: constrained_vector_rint_v2f64:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: mflr 0
@@ -4817,11 +4817,11 @@ entry:
%rint = call <2 x double> @llvm.experimental.constrained.rint.v2f64(
<2 x double> <double 42.1, double 42.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <2 x double> %rint
}
-define <3 x float> @constrained_vector_rint_v3f32() nounwind {
+define <3 x float> @constrained_vector_rint_v3f32() #0 {
; PC64LE-LABEL: constrained_vector_rint_v3f32:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: mflr 0
@@ -4903,11 +4903,11 @@ define <3 x float> @constrained_vector_rint_v3f32() nounwind {
%rint = call <3 x float> @llvm.experimental.constrained.rint.v3f32(
<3 x float> <float 42.0, float 43.0, float 44.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <3 x float> %rint
}
-define <3 x double> @constrained_vector_rint_v3f64() nounwind {
+define <3 x double> @constrained_vector_rint_v3f64() #0 {
; PC64LE-LABEL: constrained_vector_rint_v3f64:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: mflr 0
@@ -4979,11 +4979,11 @@ entry:
%rint = call <3 x double> @llvm.experimental.constrained.rint.v3f64(
<3 x double> <double 42.0, double 42.1, double 42.2>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <3 x double> %rint
}
-define <4 x double> @constrained_vector_rint_v4f64() nounwind {
+define <4 x double> @constrained_vector_rint_v4f64() #0 {
; PC64LE-LABEL: constrained_vector_rint_v4f64:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: mflr 0
@@ -5072,11 +5072,11 @@ entry:
<4 x double> <double 42.1, double 42.2,
double 42.3, double 42.4>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <4 x double> %rint
}
-define <1 x float> @constrained_vector_nearbyint_v1f32() nounwind {
+define <1 x float> @constrained_vector_nearbyint_v1f32() #0 {
; PC64LE-LABEL: constrained_vector_nearbyint_v1f32:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: mflr 0
@@ -5112,11 +5112,11 @@ entry:
%nearby = call <1 x float> @llvm.experimental.constrained.nearbyint.v1f32(
<1 x float> <float 42.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <1 x float> %nearby
}
-define <2 x double> @constrained_vector_nearbyint_v2f64() nounwind {
+define <2 x double> @constrained_vector_nearbyint_v2f64() #0 {
; PC64LE-LABEL: constrained_vector_nearbyint_v2f64:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: mflr 0
@@ -5168,11 +5168,11 @@ entry:
%nearby = call <2 x double> @llvm.experimental.constrained.nearbyint.v2f64(
<2 x double> <double 42.1, double 42.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <2 x double> %nearby
}
-define <3 x float> @constrained_vector_nearbyint_v3f32() nounwind {
+define <3 x float> @constrained_vector_nearbyint_v3f32() #0 {
; PC64LE-LABEL: constrained_vector_nearbyint_v3f32:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: mflr 0
@@ -5254,11 +5254,11 @@ entry:
%nearby = call <3 x float> @llvm.experimental.constrained.nearbyint.v3f32(
<3 x float> <float 42.0, float 43.0, float 44.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <3 x float> %nearby
}
-define <3 x double> @constrained_vector_nearby_v3f64() nounwind {
+define <3 x double> @constrained_vector_nearby_v3f64() #0 {
; PC64LE-LABEL: constrained_vector_nearby_v3f64:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: mflr 0
@@ -5330,11 +5330,11 @@ entry:
%nearby = call <3 x double> @llvm.experimental.constrained.nearbyint.v3f64(
<3 x double> <double 42.0, double 42.1, double 42.2>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <3 x double> %nearby
}
-define <4 x double> @constrained_vector_nearbyint_v4f64() nounwind {
+define <4 x double> @constrained_vector_nearbyint_v4f64() #0 {
; PC64LE-LABEL: constrained_vector_nearbyint_v4f64:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: mflr 0
@@ -5423,11 +5423,11 @@ entry:
<4 x double> <double 42.1, double 42.2,
double 42.3, double 42.4>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <4 x double> %nearby
}
-define <1 x float> @constrained_vector_maxnum_v1f32() nounwind {
+define <1 x float> @constrained_vector_maxnum_v1f32() #0 {
; PC64LE-LABEL: constrained_vector_maxnum_v1f32:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: mflr 0
@@ -5467,11 +5467,11 @@ entry:
%max = call <1 x float> @llvm.experimental.constrained.maxnum.v1f32(
<1 x float> <float 42.0>, <1 x float> <float 41.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <1 x float> %max
}
-define <2 x double> @constrained_vector_maxnum_v2f64() nounwind {
+define <2 x double> @constrained_vector_maxnum_v2f64() #0 {
; PC64LE-LABEL: constrained_vector_maxnum_v2f64:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: mflr 0
@@ -5532,11 +5532,11 @@ entry:
<2 x double> <double 43.0, double 42.0>,
<2 x double> <double 41.0, double 40.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <2 x double> %max
}
-define <3 x float> @constrained_vector_maxnum_v3f32() nounwind {
+define <3 x float> @constrained_vector_maxnum_v3f32() #0 {
; PC64LE-LABEL: constrained_vector_maxnum_v3f32:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: mflr 0
@@ -5635,11 +5635,11 @@ entry:
<3 x float> <float 43.0, float 44.0, float 45.0>,
<3 x float> <float 41.0, float 42.0, float 43.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <3 x float> %max
}
-define <3 x double> @constrained_vector_max_v3f64() nounwind {
+define <3 x double> @constrained_vector_max_v3f64() #0 {
; PC64LE-LABEL: constrained_vector_max_v3f64:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: mflr 0
@@ -5724,11 +5724,11 @@ entry:
<3 x double> <double 43.0, double 44.0, double 45.0>,
<3 x double> <double 40.0, double 41.0, double 42.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <3 x double> %max
}
-define <4 x double> @constrained_vector_maxnum_v4f64() nounwind {
+define <4 x double> @constrained_vector_maxnum_v4f64() #0 {
; PC64LE-LABEL: constrained_vector_maxnum_v4f64:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: mflr 0
@@ -5835,11 +5835,11 @@ entry:
<4 x double> <double 40.0, double 41.0,
double 42.0, double 43.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <4 x double> %max
}
-define <1 x float> @constrained_vector_minnum_v1f32() nounwind {
+define <1 x float> @constrained_vector_minnum_v1f32() #0 {
; PC64LE-LABEL: constrained_vector_minnum_v1f32:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: mflr 0
@@ -5879,11 +5879,11 @@ define <1 x float> @constrained_vector_minnum_v1f32() nounwind {
%min = call <1 x float> @llvm.experimental.constrained.minnum.v1f32(
<1 x float> <float 42.0>, <1 x float> <float 41.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <1 x float> %min
}
-define <2 x double> @constrained_vector_minnum_v2f64() nounwind {
+define <2 x double> @constrained_vector_minnum_v2f64() #0 {
; PC64LE-LABEL: constrained_vector_minnum_v2f64:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: mflr 0
@@ -5944,11 +5944,11 @@ entry:
<2 x double> <double 43.0, double 42.0>,
<2 x double> <double 41.0, double 40.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <2 x double> %min
}
-define <3 x float> @constrained_vector_minnum_v3f32() nounwind {
+define <3 x float> @constrained_vector_minnum_v3f32() #0 {
; PC64LE-LABEL: constrained_vector_minnum_v3f32:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: mflr 0
@@ -6047,11 +6047,11 @@ entry:
<3 x float> <float 43.0, float 44.0, float 45.0>,
<3 x float> <float 41.0, float 42.0, float 43.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <3 x float> %min
}
-define <3 x double> @constrained_vector_min_v3f64() nounwind {
+define <3 x double> @constrained_vector_min_v3f64() #0 {
; PC64LE-LABEL: constrained_vector_min_v3f64:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: mflr 0
@@ -6136,11 +6136,11 @@ entry:
<3 x double> <double 43.0, double 44.0, double 45.0>,
<3 x double> <double 40.0, double 41.0, double 42.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <3 x double> %min
}
-define <4 x double> @constrained_vector_minnum_v4f64() nounwind {
+define <4 x double> @constrained_vector_minnum_v4f64() #0 {
; PC64LE-LABEL: constrained_vector_minnum_v4f64:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: mflr 0
@@ -6247,11 +6247,11 @@ entry:
<4 x double> <double 40.0, double 41.0,
double 42.0, double 43.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <4 x double> %min
}
-define <1 x float> @constrained_vector_fptrunc_v1f64() nounwind {
+define <1 x float> @constrained_vector_fptrunc_v1f64() #0 {
; PC64LE-LABEL: constrained_vector_fptrunc_v1f64:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: addis 3, 2, .LCPI95_0@toc@ha
@@ -6273,11 +6273,11 @@ entry:
%result = call <1 x float> @llvm.experimental.constrained.fptrunc.v1f32.v1f64(
<1 x double><double 42.1>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <1 x float> %result
}
-define <2 x float> @constrained_vector_fptrunc_v2f64() nounwind {
+define <2 x float> @constrained_vector_fptrunc_v2f64() #0 {
; PC64LE-LABEL: constrained_vector_fptrunc_v2f64:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: addis 3, 2, .LCPI96_0@toc@ha
@@ -6311,11 +6311,11 @@ entry:
%result = call <2 x float> @llvm.experimental.constrained.fptrunc.v2f32.v2f64(
<2 x double><double 42.1, double 42.2>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <2 x float> %result
}
-define <3 x float> @constrained_vector_fptrunc_v3f64() nounwind {
+define <3 x float> @constrained_vector_fptrunc_v3f64() #0 {
; PC64LE-LABEL: constrained_vector_fptrunc_v3f64:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: addis 3, 2, .LCPI97_0@toc@ha
@@ -6368,11 +6368,11 @@ entry:
<3 x double><double 42.1, double 42.2,
double 42.3>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <3 x float> %result
}
-define <4 x float> @constrained_vector_fptrunc_v4f64() nounwind {
+define <4 x float> @constrained_vector_fptrunc_v4f64() #0 {
; PC64LE-LABEL: constrained_vector_fptrunc_v4f64:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: addis 3, 2, .LCPI98_0@toc@ha
@@ -6411,11 +6411,11 @@ entry:
<4 x double><double 42.1, double 42.2,
double 42.3, double 42.4>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <4 x float> %result
}
-define <1 x double> @constrained_vector_fpext_v1f32() nounwind {
+define <1 x double> @constrained_vector_fpext_v1f32() #0 {
; PC64LE-LABEL: constrained_vector_fpext_v1f32:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: addis 3, 2, .LCPI99_0@toc@ha
@@ -6432,11 +6432,11 @@ define <1 x double> @constrained_vector_fpext_v1f32() nounwind {
entry:
%result = call <1 x double> @llvm.experimental.constrained.fpext.v1f64.v1f32(
<1 x float><float 42.0>,
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <1 x double> %result
}
-define <2 x double> @constrained_vector_fpext_v2f32() nounwind {
+define <2 x double> @constrained_vector_fpext_v2f32() #0 {
; PC64LE-LABEL: constrained_vector_fpext_v2f32:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: addis 3, 2, .LCPI100_0@toc@ha
@@ -6457,11 +6457,11 @@ define <2 x double> @constrained_vector_fpext_v2f32() nounwind {
entry:
%result = call <2 x double> @llvm.experimental.constrained.fpext.v2f64.v2f32(
<2 x float><float 42.0, float 43.0>,
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <2 x double> %result
}
-define <3 x double> @constrained_vector_fpext_v3f32() nounwind {
+define <3 x double> @constrained_vector_fpext_v3f32() #0 {
; PC64LE-LABEL: constrained_vector_fpext_v3f32:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: addis 3, 2, .LCPI101_0@toc@ha
@@ -6485,11 +6485,11 @@ entry:
%result = call <3 x double> @llvm.experimental.constrained.fpext.v3f64.v3f32(
<3 x float><float 42.0, float 43.0,
float 44.0>,
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <3 x double> %result
}
-define <4 x double> @constrained_vector_fpext_v4f32() nounwind {
+define <4 x double> @constrained_vector_fpext_v4f32() #0 {
; PC64LE-LABEL: constrained_vector_fpext_v4f32:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: addis 3, 2, .LCPI102_0@toc@ha
@@ -6521,11 +6521,11 @@ entry:
%result = call <4 x double> @llvm.experimental.constrained.fpext.v4f64.v4f32(
<4 x float><float 42.0, float 43.0,
float 44.0, float 45.0>,
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <4 x double> %result
}
-define <1 x float> @constrained_vector_ceil_v1f32() nounwind {
+define <1 x float> @constrained_vector_ceil_v1f32() #0 {
; PC64LE-LABEL: constrained_vector_ceil_v1f32:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: addis 3, 2, .LCPI103_0@toc@ha
@@ -6547,11 +6547,11 @@ entry:
%ceil = call <1 x float> @llvm.experimental.constrained.ceil.v1f32(
<1 x float> <float 1.5>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <1 x float> %ceil
}
-define <2 x double> @constrained_vector_ceil_v2f64() nounwind {
+define <2 x double> @constrained_vector_ceil_v2f64() #0 {
; PC64LE-LABEL: constrained_vector_ceil_v2f64:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: addis 3, 2, .LCPI104_0@toc@ha
@@ -6572,11 +6572,11 @@ entry:
%ceil = call <2 x double> @llvm.experimental.constrained.ceil.v2f64(
<2 x double> <double 1.1, double 1.9>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <2 x double> %ceil
}
-define <3 x float> @constrained_vector_ceil_v3f32() nounwind {
+define <3 x float> @constrained_vector_ceil_v3f32() #0 {
; PC64LE-LABEL: constrained_vector_ceil_v3f32:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: addis 3, 2, .LCPI105_2@toc@ha
@@ -6628,11 +6628,11 @@ entry:
%ceil = call <3 x float> @llvm.experimental.constrained.ceil.v3f32(
<3 x float> <float 1.5, float 2.5, float 3.5>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <3 x float> %ceil
}
-define <3 x double> @constrained_vector_ceil_v3f64() nounwind {
+define <3 x double> @constrained_vector_ceil_v3f64() #0 {
; PC64LE-LABEL: constrained_vector_ceil_v3f64:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: addis 3, 2, .LCPI106_1@toc@ha
@@ -6665,11 +6665,11 @@ entry:
%ceil = call <3 x double> @llvm.experimental.constrained.ceil.v3f64(
<3 x double> <double 1.1, double 1.9, double 1.5>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <3 x double> %ceil
}
-define <1 x float> @constrained_vector_floor_v1f32() nounwind {
+define <1 x float> @constrained_vector_floor_v1f32() #0 {
; PC64LE-LABEL: constrained_vector_floor_v1f32:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: addis 3, 2, .LCPI107_0@toc@ha
@@ -6691,12 +6691,12 @@ entry:
%floor = call <1 x float> @llvm.experimental.constrained.floor.v1f32(
<1 x float> <float 1.5>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <1 x float> %floor
}
-define <2 x double> @constrained_vector_floor_v2f64() nounwind {
+define <2 x double> @constrained_vector_floor_v2f64() #0 {
; PC64LE-LABEL: constrained_vector_floor_v2f64:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: addis 3, 2, .LCPI108_0@toc@ha
@@ -6717,11 +6717,11 @@ entry:
%floor = call <2 x double> @llvm.experimental.constrained.floor.v2f64(
<2 x double> <double 1.1, double 1.9>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <2 x double> %floor
}
-define <3 x float> @constrained_vector_floor_v3f32() nounwind {
+define <3 x float> @constrained_vector_floor_v3f32() #0 {
; PC64LE-LABEL: constrained_vector_floor_v3f32:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: addis 3, 2, .LCPI109_2@toc@ha
@@ -6773,11 +6773,11 @@ entry:
%floor = call <3 x float> @llvm.experimental.constrained.floor.v3f32(
<3 x float> <float 1.5, float 2.5, float 3.5>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <3 x float> %floor
}
-define <3 x double> @constrained_vector_floor_v3f64() nounwind {
+define <3 x double> @constrained_vector_floor_v3f64() #0 {
; PC64LE-LABEL: constrained_vector_floor_v3f64:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: addis 3, 2, .LCPI110_1@toc@ha
@@ -6810,11 +6810,11 @@ entry:
%floor = call <3 x double> @llvm.experimental.constrained.floor.v3f64(
<3 x double> <double 1.1, double 1.9, double 1.5>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <3 x double> %floor
}
-define <1 x float> @constrained_vector_round_v1f32() nounwind {
+define <1 x float> @constrained_vector_round_v1f32() #0 {
; PC64LE-LABEL: constrained_vector_round_v1f32:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: addis 3, 2, .LCPI111_0@toc@ha
@@ -6836,11 +6836,11 @@ entry:
%round = call <1 x float> @llvm.experimental.constrained.round.v1f32(
<1 x float> <float 1.5>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <1 x float> %round
}
-define <2 x double> @constrained_vector_round_v2f64() nounwind {
+define <2 x double> @constrained_vector_round_v2f64() #0 {
; PC64LE-LABEL: constrained_vector_round_v2f64:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: addis 3, 2, .LCPI112_0@toc@ha
@@ -6861,11 +6861,11 @@ entry:
%round = call <2 x double> @llvm.experimental.constrained.round.v2f64(
<2 x double> <double 1.1, double 1.9>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <2 x double> %round
}
-define <3 x float> @constrained_vector_round_v3f32() nounwind {
+define <3 x float> @constrained_vector_round_v3f32() #0 {
; PC64LE-LABEL: constrained_vector_round_v3f32:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: addis 3, 2, .LCPI113_2@toc@ha
@@ -6917,12 +6917,12 @@ entry:
%round = call <3 x float> @llvm.experimental.constrained.round.v3f32(
<3 x float> <float 1.5, float 2.5, float 3.5>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <3 x float> %round
}
-define <3 x double> @constrained_vector_round_v3f64() nounwind {
+define <3 x double> @constrained_vector_round_v3f64() #0 {
; PC64LE-LABEL: constrained_vector_round_v3f64:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: addis 3, 2, .LCPI114_1@toc@ha
@@ -6955,11 +6955,11 @@ entry:
%round = call <3 x double> @llvm.experimental.constrained.round.v3f64(
<3 x double> <double 1.1, double 1.9, double 1.5>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <3 x double> %round
}
-define <1 x float> @constrained_vector_trunc_v1f32() nounwind {
+define <1 x float> @constrained_vector_trunc_v1f32() #0 {
; PC64LE-LABEL: constrained_vector_trunc_v1f32:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: addis 3, 2, .LCPI115_0@toc@ha
@@ -6981,11 +6981,11 @@ entry:
%trunc = call <1 x float> @llvm.experimental.constrained.trunc.v1f32(
<1 x float> <float 1.5>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <1 x float> %trunc
}
-define <2 x double> @constrained_vector_trunc_v2f64() nounwind {
+define <2 x double> @constrained_vector_trunc_v2f64() #0 {
; PC64LE-LABEL: constrained_vector_trunc_v2f64:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: addis 3, 2, .LCPI116_0@toc@ha
@@ -7006,11 +7006,11 @@ entry:
%trunc = call <2 x double> @llvm.experimental.constrained.trunc.v2f64(
<2 x double> <double 1.1, double 1.9>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <2 x double> %trunc
}
-define <3 x float> @constrained_vector_trunc_v3f32() nounwind {
+define <3 x float> @constrained_vector_trunc_v3f32() #0 {
; PC64LE-LABEL: constrained_vector_trunc_v3f32:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: addis 3, 2, .LCPI117_2@toc@ha
@@ -7062,11 +7062,11 @@ entry:
%trunc = call <3 x float> @llvm.experimental.constrained.trunc.v3f32(
<3 x float> <float 1.5, float 2.5, float 3.5>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <3 x float> %trunc
}
-define <3 x double> @constrained_vector_trunc_v3f64() nounwind {
+define <3 x double> @constrained_vector_trunc_v3f64() #0 {
; PC64LE-LABEL: constrained_vector_trunc_v3f64:
; PC64LE: # %bb.0: # %entry
; PC64LE-NEXT: addis 3, 2, .LCPI118_1@toc@ha
@@ -7099,10 +7099,12 @@ entry:
%trunc = call <3 x double> @llvm.experimental.constrained.trunc.v3f64(
<3 x double> <double 1.1, double 1.9, double 1.5>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #1
ret <3 x double> %trunc
}
+attributes #0 = { nounwind strictfp noimplicitfloat }
+attributes #1 = { strictfp }
; Single width declarations
declare <2 x double> @llvm.experimental.constrained.fadd.v2f64(<2 x double>, <2 x double>, metadata, metadata)
diff --git a/llvm/test/CodeGen/SystemZ/fp-strict-add-01.ll b/llvm/test/CodeGen/SystemZ/fp-strict-add-01.ll
index 57f75cc871d..d95ab9331e8 100644
--- a/llvm/test/CodeGen/SystemZ/fp-strict-add-01.ll
+++ b/llvm/test/CodeGen/SystemZ/fp-strict-add-01.ll
@@ -8,19 +8,19 @@ declare float @foo()
declare float @llvm.experimental.constrained.fadd.f32(float, float, metadata, metadata)
; Check register addition.
-define float @f1(float %f1, float %f2) {
+define float @f1(float %f1, float %f2) #0 {
; CHECK-LABEL: f1:
; CHECK: aebr %f0, %f2
; CHECK: br %r14
%res = call float @llvm.experimental.constrained.fadd.f32(
float %f1, float %f2,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret float %res
}
; Check the low end of the AEB range.
-define float @f2(float %f1, float *%ptr) {
+define float @f2(float %f1, float *%ptr) #0 {
; CHECK-LABEL: f2:
; CHECK: aeb %f0, 0(%r2)
; CHECK: br %r14
@@ -28,12 +28,12 @@ define float @f2(float %f1, float *%ptr) {
%res = call float @llvm.experimental.constrained.fadd.f32(
float %f1, float %f2,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret float %res
}
; Check the high end of the aligned AEB range.
-define float @f3(float %f1, float *%base) {
+define float @f3(float %f1, float *%base) #0 {
; CHECK-LABEL: f3:
; CHECK: aeb %f0, 4092(%r2)
; CHECK: br %r14
@@ -42,13 +42,13 @@ define float @f3(float %f1, float *%base) {
%res = call float @llvm.experimental.constrained.fadd.f32(
float %f1, float %f2,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret float %res
}
; Check the next word up, which needs separate address logic.
; Other sequences besides this one would be OK.
-define float @f4(float %f1, float *%base) {
+define float @f4(float %f1, float *%base) #0 {
; CHECK-LABEL: f4:
; CHECK: aghi %r2, 4096
; CHECK: aeb %f0, 0(%r2)
@@ -58,12 +58,12 @@ define float @f4(float %f1, float *%base) {
%res = call float @llvm.experimental.constrained.fadd.f32(
float %f1, float %f2,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret float %res
}
; Check negative displacements, which also need separate address logic.
-define float @f5(float %f1, float *%base) {
+define float @f5(float %f1, float *%base) #0 {
; CHECK-LABEL: f5:
; CHECK: aghi %r2, -4
; CHECK: aeb %f0, 0(%r2)
@@ -73,12 +73,12 @@ define float @f5(float %f1, float *%base) {
%res = call float @llvm.experimental.constrained.fadd.f32(
float %f1, float %f2,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret float %res
}
; Check that AEB allows indices.
-define float @f6(float %f1, float *%base, i64 %index) {
+define float @f6(float %f1, float *%base, i64 %index) #0 {
; CHECK-LABEL: f6:
; CHECK: sllg %r1, %r3, 2
; CHECK: aeb %f0, 400(%r1,%r2)
@@ -89,12 +89,12 @@ define float @f6(float %f1, float *%base, i64 %index) {
%res = call float @llvm.experimental.constrained.fadd.f32(
float %f1, float %f2,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret float %res
}
; Check that additions of spilled values can use AEB rather than AEBR.
-define float @f7(float *%ptr0) {
+define float @f7(float *%ptr0) #0 {
; CHECK-LABEL: f7:
; CHECK: brasl %r14, foo@PLT
; CHECK-SCALAR: aeb %f0, 16{{[04]}}(%r15)
@@ -122,52 +122,54 @@ define float @f7(float *%ptr0) {
%val9 = load float, float *%ptr9
%val10 = load float, float *%ptr10
- %ret = call float @foo()
+ %ret = call float @foo() #0
%add0 = call float @llvm.experimental.constrained.fadd.f32(
float %ret, float %val0,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%add1 = call float @llvm.experimental.constrained.fadd.f32(
float %add0, float %val1,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%add2 = call float @llvm.experimental.constrained.fadd.f32(
float %add1, float %val2,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%add3 = call float @llvm.experimental.constrained.fadd.f32(
float %add2, float %val3,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%add4 = call float @llvm.experimental.constrained.fadd.f32(
float %add3, float %val4,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%add5 = call float @llvm.experimental.constrained.fadd.f32(
float %add4, float %val5,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%add6 = call float @llvm.experimental.constrained.fadd.f32(
float %add5, float %val6,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%add7 = call float @llvm.experimental.constrained.fadd.f32(
float %add6, float %val7,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%add8 = call float @llvm.experimental.constrained.fadd.f32(
float %add7, float %val8,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%add9 = call float @llvm.experimental.constrained.fadd.f32(
float %add8, float %val9,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%add10 = call float @llvm.experimental.constrained.fadd.f32(
float %add9, float %val10,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret float %add10
}
+
+attributes #0 = { strictfp }
diff --git a/llvm/test/CodeGen/SystemZ/fp-strict-add-02.ll b/llvm/test/CodeGen/SystemZ/fp-strict-add-02.ll
index 739290969af..2693aef7c7f 100644
--- a/llvm/test/CodeGen/SystemZ/fp-strict-add-02.ll
+++ b/llvm/test/CodeGen/SystemZ/fp-strict-add-02.ll
@@ -7,19 +7,19 @@ declare double @foo()
declare double @llvm.experimental.constrained.fadd.f64(double, double, metadata, metadata)
; Check register addition.
-define double @f1(double %f1, double %f2) {
+define double @f1(double %f1, double %f2) #0 {
; CHECK-LABEL: f1:
; CHECK: adbr %f0, %f2
; CHECK: br %r14
%res = call double @llvm.experimental.constrained.fadd.f64(
double %f1, double %f2,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %res
}
; Check the low end of the ADB range.
-define double @f2(double %f1, double *%ptr) {
+define double @f2(double %f1, double *%ptr) #0 {
; CHECK-LABEL: f2:
; CHECK: adb %f0, 0(%r2)
; CHECK: br %r14
@@ -27,12 +27,12 @@ define double @f2(double %f1, double *%ptr) {
%res = call double @llvm.experimental.constrained.fadd.f64(
double %f1, double %f2,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %res
}
; Check the high end of the aligned ADB range.
-define double @f3(double %f1, double *%base) {
+define double @f3(double %f1, double *%base) #0 {
; CHECK-LABEL: f3:
; CHECK: adb %f0, 4088(%r2)
; CHECK: br %r14
@@ -41,13 +41,13 @@ define double @f3(double %f1, double *%base) {
%res = call double @llvm.experimental.constrained.fadd.f64(
double %f1, double %f2,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %res
}
; Check the next doubleword up, which needs separate address logic.
; Other sequences besides this one would be OK.
-define double @f4(double %f1, double *%base) {
+define double @f4(double %f1, double *%base) #0 {
; CHECK-LABEL: f4:
; CHECK: aghi %r2, 4096
; CHECK: adb %f0, 0(%r2)
@@ -57,12 +57,12 @@ define double @f4(double %f1, double *%base) {
%res = call double @llvm.experimental.constrained.fadd.f64(
double %f1, double %f2,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %res
}
; Check negative displacements, which also need separate address logic.
-define double @f5(double %f1, double *%base) {
+define double @f5(double %f1, double *%base) #0 {
; CHECK-LABEL: f5:
; CHECK: aghi %r2, -8
; CHECK: adb %f0, 0(%r2)
@@ -72,12 +72,12 @@ define double @f5(double %f1, double *%base) {
%res = call double @llvm.experimental.constrained.fadd.f64(
double %f1, double %f2,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %res
}
; Check that ADB allows indices.
-define double @f6(double %f1, double *%base, i64 %index) {
+define double @f6(double %f1, double *%base, i64 %index) #0 {
; CHECK-LABEL: f6:
; CHECK: sllg %r1, %r3, 3
; CHECK: adb %f0, 800(%r1,%r2)
@@ -88,12 +88,12 @@ define double @f6(double %f1, double *%base, i64 %index) {
%res = call double @llvm.experimental.constrained.fadd.f64(
double %f1, double %f2,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %res
}
; Check that additions of spilled values can use ADB rather than ADBR.
-define double @f7(double *%ptr0) {
+define double @f7(double *%ptr0) #0 {
; CHECK-LABEL: f7:
; CHECK: brasl %r14, foo@PLT
; CHECK-SCALAR: adb %f0, 160(%r15)
@@ -121,52 +121,54 @@ define double @f7(double *%ptr0) {
%val9 = load double, double *%ptr9
%val10 = load double, double *%ptr10
- %ret = call double @foo()
+ %ret = call double @foo() #0
%add0 = call double @llvm.experimental.constrained.fadd.f64(
double %ret, double %val0,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%add1 = call double @llvm.experimental.constrained.fadd.f64(
double %add0, double %val1,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%add2 = call double @llvm.experimental.constrained.fadd.f64(
double %add1, double %val2,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%add3 = call double @llvm.experimental.constrained.fadd.f64(
double %add2, double %val3,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%add4 = call double @llvm.experimental.constrained.fadd.f64(
double %add3, double %val4,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%add5 = call double @llvm.experimental.constrained.fadd.f64(
double %add4, double %val5,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%add6 = call double @llvm.experimental.constrained.fadd.f64(
double %add5, double %val6,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%add7 = call double @llvm.experimental.constrained.fadd.f64(
double %add6, double %val7,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%add8 = call double @llvm.experimental.constrained.fadd.f64(
double %add7, double %val8,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%add9 = call double @llvm.experimental.constrained.fadd.f64(
double %add8, double %val9,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%add10 = call double @llvm.experimental.constrained.fadd.f64(
double %add9, double %val10,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %add10
}
+
+attributes #0 = { strictfp }
diff --git a/llvm/test/CodeGen/SystemZ/fp-strict-add-03.ll b/llvm/test/CodeGen/SystemZ/fp-strict-add-03.ll
index d2535c9f0b0..0aeef7c2545 100644
--- a/llvm/test/CodeGen/SystemZ/fp-strict-add-03.ll
+++ b/llvm/test/CodeGen/SystemZ/fp-strict-add-03.ll
@@ -5,7 +5,7 @@
declare fp128 @llvm.experimental.constrained.fadd.f128(fp128, fp128, metadata, metadata)
; There is no memory form of 128-bit addition.
-define void @f1(fp128 *%ptr, float %f2) {
+define void @f1(fp128 *%ptr, float %f2) strictfp {
; CHECK-LABEL: f1:
; CHECK-DAG: lxebr %f0, %f0
; CHECK-DAG: ld %f1, 0(%r2)
@@ -19,7 +19,7 @@ define void @f1(fp128 *%ptr, float %f2) {
%sum = call fp128 @llvm.experimental.constrained.fadd.f128(
fp128 %f1, fp128 %f2x,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") strictfp
store fp128 %sum, fp128 *%ptr
ret void
}
diff --git a/llvm/test/CodeGen/SystemZ/fp-strict-add-04.ll b/llvm/test/CodeGen/SystemZ/fp-strict-add-04.ll
index d4ec5fc6854..98a3454a898 100644
--- a/llvm/test/CodeGen/SystemZ/fp-strict-add-04.ll
+++ b/llvm/test/CodeGen/SystemZ/fp-strict-add-04.ll
@@ -4,7 +4,7 @@
declare fp128 @llvm.experimental.constrained.fadd.f128(fp128, fp128, metadata, metadata)
-define void @f1(fp128 *%ptr1, fp128 *%ptr2) {
+define void @f1(fp128 *%ptr1, fp128 *%ptr2) strictfp {
; CHECK-LABEL: f1:
; CHECK-DAG: vl [[REG1:%v[0-9]+]], 0(%r2)
; CHECK-DAG: vl [[REG2:%v[0-9]+]], 0(%r3)
@@ -16,7 +16,7 @@ define void @f1(fp128 *%ptr1, fp128 *%ptr2) {
%sum = call fp128 @llvm.experimental.constrained.fadd.f128(
fp128 %f1, fp128 %f2,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") strictfp
store fp128 %sum, fp128 *%ptr1
ret void
}
diff --git a/llvm/test/CodeGen/SystemZ/fp-strict-alias.ll b/llvm/test/CodeGen/SystemZ/fp-strict-alias.ll
index fe27b61c20b..4d675cba4ce 100644
--- a/llvm/test/CodeGen/SystemZ/fp-strict-alias.ll
+++ b/llvm/test/CodeGen/SystemZ/fp-strict-alias.ll
@@ -30,7 +30,7 @@ define void @f1(float %f1, float %f2, float *%ptr1, float *%ptr2) {
ret void
}
-define void @f2(float %f1, float %f2, float *%ptr1, float *%ptr2) {
+define void @f2(float %f1, float %f2, float *%ptr1, float *%ptr2) #0 {
; CHECK-LABEL: f2:
; CHECK: sqebr
; CHECK: ste
@@ -41,11 +41,11 @@ define void @f2(float %f1, float %f2, float *%ptr1, float *%ptr2) {
%sqrt1 = call float @llvm.experimental.constrained.sqrt.f32(
float %f1,
metadata !"round.dynamic",
- metadata !"fpexcept.ignore")
+ metadata !"fpexcept.ignore") #0
%sqrt2 = call float @llvm.experimental.constrained.sqrt.f32(
float %f2,
metadata !"round.dynamic",
- metadata !"fpexcept.ignore")
+ metadata !"fpexcept.ignore") #0
store float %sqrt1, float *%ptr1
store float %sqrt2, float *%ptr2
@@ -53,7 +53,7 @@ define void @f2(float %f1, float %f2, float *%ptr1, float *%ptr2) {
ret void
}
-define void @f3(float %f1, float %f2, float *%ptr1, float *%ptr2) {
+define void @f3(float %f1, float %f2, float *%ptr1, float *%ptr2) #0 {
; CHECK-LABEL: f3:
; CHECK: sqebr
; CHECK: ste
@@ -64,11 +64,11 @@ define void @f3(float %f1, float %f2, float *%ptr1, float *%ptr2) {
%sqrt1 = call float @llvm.experimental.constrained.sqrt.f32(
float %f1,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%sqrt2 = call float @llvm.experimental.constrained.sqrt.f32(
float %f2,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
store float %sqrt1, float *%ptr1
store float %sqrt2, float *%ptr2
@@ -98,7 +98,7 @@ define void @f4(float %f1, float %f2, float *%ptr1, float *%ptr2) {
ret void
}
-define void @f5(float %f1, float %f2, float *%ptr1, float *%ptr2) {
+define void @f5(float %f1, float %f2, float *%ptr1, float *%ptr2) #0 {
; CHECK-LABEL: f5:
; CHECK: sqebr
; CHECK: ste
@@ -109,11 +109,11 @@ define void @f5(float %f1, float %f2, float *%ptr1, float *%ptr2) {
%sqrt1 = call float @llvm.experimental.constrained.sqrt.f32(
float %f1,
metadata !"round.dynamic",
- metadata !"fpexcept.ignore")
+ metadata !"fpexcept.ignore") #0
%sqrt2 = call float @llvm.experimental.constrained.sqrt.f32(
float %f2,
metadata !"round.dynamic",
- metadata !"fpexcept.ignore")
+ metadata !"fpexcept.ignore") #0
store volatile float %sqrt1, float *%ptr1
store volatile float %sqrt2, float *%ptr2
@@ -121,7 +121,7 @@ define void @f5(float %f1, float %f2, float *%ptr1, float *%ptr2) {
ret void
}
-define void @f6(float %f1, float %f2, float *%ptr1, float *%ptr2) {
+define void @f6(float %f1, float %f2, float *%ptr1, float *%ptr2) #0 {
; CHECK-LABEL: f6:
; CHECK: sqebr
; CHECK: sqebr
@@ -132,11 +132,11 @@ define void @f6(float %f1, float %f2, float *%ptr1, float *%ptr2) {
%sqrt1 = call float @llvm.experimental.constrained.sqrt.f32(
float %f1,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%sqrt2 = call float @llvm.experimental.constrained.sqrt.f32(
float %f2,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
store volatile float %sqrt1, float *%ptr1
store volatile float %sqrt2, float *%ptr2
@@ -166,7 +166,7 @@ define void @f7(float %f1, float %f2, float *%ptr1, float *%ptr2) {
ret void
}
-define void @f8(float %f1, float %f2, float *%ptr1, float *%ptr2) {
+define void @f8(float %f1, float %f2, float *%ptr1, float *%ptr2) #0 {
; CHECK-LABEL: f8:
; CHECK: sqebr
; CHECK: sqebr
@@ -177,13 +177,13 @@ define void @f8(float %f1, float %f2, float *%ptr1, float *%ptr2) {
%sqrt1 = call float @llvm.experimental.constrained.sqrt.f32(
float %f1,
metadata !"round.dynamic",
- metadata !"fpexcept.ignore")
+ metadata !"fpexcept.ignore") #0
%sqrt2 = call float @llvm.experimental.constrained.sqrt.f32(
float %f2,
metadata !"round.dynamic",
- metadata !"fpexcept.ignore")
+ metadata !"fpexcept.ignore") #0
- call void @llvm.s390.sfpc(i32 0)
+ call void @llvm.s390.sfpc(i32 0) #0
store float %sqrt1, float *%ptr1
store float %sqrt2, float *%ptr2
@@ -191,7 +191,7 @@ define void @f8(float %f1, float %f2, float *%ptr1, float *%ptr2) {
ret void
}
-define void @f9(float %f1, float %f2, float *%ptr1, float *%ptr2) {
+define void @f9(float %f1, float %f2, float *%ptr1, float *%ptr2) #0 {
; CHECK-LABEL: f9:
; CHECK: sqebr
; CHECK: sqebr
@@ -202,13 +202,13 @@ define void @f9(float %f1, float %f2, float *%ptr1, float *%ptr2) {
%sqrt1 = call float @llvm.experimental.constrained.sqrt.f32(
float %f1,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%sqrt2 = call float @llvm.experimental.constrained.sqrt.f32(
float %f2,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
- call void @llvm.s390.sfpc(i32 0)
+ call void @llvm.s390.sfpc(i32 0) #0
store float %sqrt1, float *%ptr1
store float %sqrt2, float *%ptr2
@@ -216,3 +216,4 @@ define void @f9(float %f1, float %f2, float *%ptr1, float *%ptr2) {
ret void
}
+attributes #0 = { strictfp }
diff --git a/llvm/test/CodeGen/SystemZ/fp-strict-conv-01.ll b/llvm/test/CodeGen/SystemZ/fp-strict-conv-01.ll
index b20ab71808b..45dc51ea56b 100644
--- a/llvm/test/CodeGen/SystemZ/fp-strict-conv-01.ll
+++ b/llvm/test/CodeGen/SystemZ/fp-strict-conv-01.ll
@@ -13,7 +13,7 @@ declare float @llvm.experimental.constrained.fadd.f32(float, float, metadata, me
declare double @llvm.experimental.constrained.fadd.f64(double, double, metadata, metadata)
; Test f64->f32.
-define float @f1(double %d1, double %d2) {
+define float @f1(double %d1, double %d2) #0 {
; CHECK-LABEL: f1:
; CHECK-SCALAR: ledbr %f0, %f2
; CHECK-VECTOR: ledbra %f0, 0, %f2, 0
@@ -21,12 +21,12 @@ define float @f1(double %d1, double %d2) {
%res = call float @llvm.experimental.constrained.fptrunc.f32.f64(
double %d2,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret float %res
}
; Test f128->f32.
-define float @f2(fp128 *%ptr) {
+define float @f2(fp128 *%ptr) #0 {
; CHECK-LABEL: f2:
; CHECK: lexbr %f0, %f0
; CHECK: br %r14
@@ -34,13 +34,13 @@ define float @f2(fp128 *%ptr) {
%res = call float @llvm.experimental.constrained.fptrunc.f32.f128(
fp128 %val,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret float %res
}
; Make sure that we don't use %f0 as the destination of LEXBR when %f2
; is still live.
-define void @f3(float *%dst, fp128 *%ptr, float %d1, float %d2) {
+define void @f3(float *%dst, fp128 *%ptr, float %d1, float %d2) #0 {
; CHECK-LABEL: f3:
; CHECK: lexbr %f1, %f1
; CHECK: aebr %f1, %f2
@@ -50,17 +50,17 @@ define void @f3(float *%dst, fp128 *%ptr, float %d1, float %d2) {
%conv = call float @llvm.experimental.constrained.fptrunc.f32.f128(
fp128 %val,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%res = call float @llvm.experimental.constrained.fadd.f32(
float %conv, float %d2,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
store float %res, float *%dst
ret void
}
; Test f128->f64.
-define double @f4(fp128 *%ptr) {
+define double @f4(fp128 *%ptr) #0 {
; CHECK-LABEL: f4:
; CHECK: ldxbr %f0, %f0
; CHECK: br %r14
@@ -68,12 +68,12 @@ define double @f4(fp128 *%ptr) {
%res = call double @llvm.experimental.constrained.fptrunc.f64.f128(
fp128 %val,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %res
}
; Like f3, but for f128->f64.
-define void @f5(double *%dst, fp128 *%ptr, double %d1, double %d2) {
+define void @f5(double *%dst, fp128 *%ptr, double %d1, double %d2) #0 {
; CHECK-LABEL: f5:
; CHECK: ldxbr %f1, %f1
; CHECK-SCALAR: adbr %f1, %f2
@@ -85,11 +85,13 @@ define void @f5(double *%dst, fp128 *%ptr, double %d1, double %d2) {
%conv = call double @llvm.experimental.constrained.fptrunc.f64.f128(
fp128 %val,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%res = call double @llvm.experimental.constrained.fadd.f64(
double %conv, double %d2,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
store double %res, double *%dst
ret void
}
+
+attributes #0 = { strictfp }
diff --git a/llvm/test/CodeGen/SystemZ/fp-strict-conv-02.ll b/llvm/test/CodeGen/SystemZ/fp-strict-conv-02.ll
index 0f24b91e266..4cada62d003 100644
--- a/llvm/test/CodeGen/SystemZ/fp-strict-conv-02.ll
+++ b/llvm/test/CodeGen/SystemZ/fp-strict-conv-02.ll
@@ -5,41 +5,41 @@
declare double @llvm.experimental.constrained.fpext.f64.f32(float, metadata)
; Check register extension.
-define double @f1(float %val) {
+define double @f1(float %val) #0 {
; CHECK-LABEL: f1:
; CHECK: ldebr %f0, %f0
; CHECK: br %r14
%res = call double @llvm.experimental.constrained.fpext.f64.f32(float %val,
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %res
}
; Check the low end of the LDEB range.
-define double @f2(float *%ptr) {
+define double @f2(float *%ptr) #0 {
; CHECK-LABEL: f2:
; CHECK: ldeb %f0, 0(%r2)
; CHECK: br %r14
%val = load float, float *%ptr
%res = call double @llvm.experimental.constrained.fpext.f64.f32(float %val,
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %res
}
; Check the high end of the aligned LDEB range.
-define double @f3(float *%base) {
+define double @f3(float *%base) #0 {
; CHECK-LABEL: f3:
; CHECK: ldeb %f0, 4092(%r2)
; CHECK: br %r14
%ptr = getelementptr float, float *%base, i64 1023
%val = load float, float *%ptr
%res = call double @llvm.experimental.constrained.fpext.f64.f32(float %val,
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %res
}
; Check the next word up, which needs separate address logic.
; Other sequences besides this one would be OK.
-define double @f4(float *%base) {
+define double @f4(float *%base) #0 {
; CHECK-LABEL: f4:
; CHECK: aghi %r2, 4096
; CHECK: ldeb %f0, 0(%r2)
@@ -47,12 +47,12 @@ define double @f4(float *%base) {
%ptr = getelementptr float, float *%base, i64 1024
%val = load float, float *%ptr
%res = call double @llvm.experimental.constrained.fpext.f64.f32(float %val,
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %res
}
; Check negative displacements, which also need separate address logic.
-define double @f5(float *%base) {
+define double @f5(float *%base) #0 {
; CHECK-LABEL: f5:
; CHECK: aghi %r2, -4
; CHECK: ldeb %f0, 0(%r2)
@@ -60,12 +60,12 @@ define double @f5(float *%base) {
%ptr = getelementptr float, float *%base, i64 -1
%val = load float, float *%ptr
%res = call double @llvm.experimental.constrained.fpext.f64.f32(float %val,
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %res
}
; Check that LDEB allows indices.
-define double @f6(float *%base, i64 %index) {
+define double @f6(float *%base, i64 %index) #0 {
; CHECK-LABEL: f6:
; CHECK: sllg %r1, %r3, 2
; CHECK: ldeb %f0, 400(%r1,%r2)
@@ -74,7 +74,8 @@ define double @f6(float *%base, i64 %index) {
%ptr2 = getelementptr float, float *%ptr1, i64 100
%val = load float, float *%ptr2
%res = call double @llvm.experimental.constrained.fpext.f64.f32(float %val,
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %res
}
+attributes #0 = { strictfp }
diff --git a/llvm/test/CodeGen/SystemZ/fp-strict-conv-03.ll b/llvm/test/CodeGen/SystemZ/fp-strict-conv-03.ll
index b3fbac975a1..7a8a7a88e28 100644
--- a/llvm/test/CodeGen/SystemZ/fp-strict-conv-03.ll
+++ b/llvm/test/CodeGen/SystemZ/fp-strict-conv-03.ll
@@ -5,20 +5,20 @@
declare fp128 @llvm.experimental.constrained.fpext.f128.f32(float, metadata)
; Check register extension.
-define void @f1(fp128 *%dst, float %val) {
+define void @f1(fp128 *%dst, float %val) #0 {
; CHECK-LABEL: f1:
; CHECK: lxebr %f0, %f0
; CHECK: std %f0, 0(%r2)
; CHECK: std %f2, 8(%r2)
; CHECK: br %r14
%res = call fp128 @llvm.experimental.constrained.fpext.f128.f32(float %val,
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
store fp128 %res, fp128 *%dst
ret void
}
; Check the low end of the LXEB range.
-define void @f2(fp128 *%dst, float *%ptr) {
+define void @f2(fp128 *%dst, float *%ptr) #0 {
; CHECK-LABEL: f2:
; CHECK: lxeb %f0, 0(%r3)
; CHECK: std %f0, 0(%r2)
@@ -26,13 +26,13 @@ define void @f2(fp128 *%dst, float *%ptr) {
; CHECK: br %r14
%val = load float, float *%ptr
%res = call fp128 @llvm.experimental.constrained.fpext.f128.f32(float %val,
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
store fp128 %res, fp128 *%dst
ret void
}
; Check the high end of the aligned LXEB range.
-define void @f3(fp128 *%dst, float *%base) {
+define void @f3(fp128 *%dst, float *%base) #0 {
; CHECK-LABEL: f3:
; CHECK: lxeb %f0, 4092(%r3)
; CHECK: std %f0, 0(%r2)
@@ -41,14 +41,14 @@ define void @f3(fp128 *%dst, float *%base) {
%ptr = getelementptr float, float *%base, i64 1023
%val = load float, float *%ptr
%res = call fp128 @llvm.experimental.constrained.fpext.f128.f32(float %val,
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
store fp128 %res, fp128 *%dst
ret void
}
; Check the next word up, which needs separate address logic.
; Other sequences besides this one would be OK.
-define void @f4(fp128 *%dst, float *%base) {
+define void @f4(fp128 *%dst, float *%base) #0 {
; CHECK-LABEL: f4:
; CHECK: aghi %r3, 4096
; CHECK: lxeb %f0, 0(%r3)
@@ -58,13 +58,13 @@ define void @f4(fp128 *%dst, float *%base) {
%ptr = getelementptr float, float *%base, i64 1024
%val = load float, float *%ptr
%res = call fp128 @llvm.experimental.constrained.fpext.f128.f32(float %val,
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
store fp128 %res, fp128 *%dst
ret void
}
; Check negative displacements, which also need separate address logic.
-define void @f5(fp128 *%dst, float *%base) {
+define void @f5(fp128 *%dst, float *%base) #0 {
; CHECK-LABEL: f5:
; CHECK: aghi %r3, -4
; CHECK: lxeb %f0, 0(%r3)
@@ -74,13 +74,13 @@ define void @f5(fp128 *%dst, float *%base) {
%ptr = getelementptr float, float *%base, i64 -1
%val = load float, float *%ptr
%res = call fp128 @llvm.experimental.constrained.fpext.f128.f32(float %val,
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
store fp128 %res, fp128 *%dst
ret void
}
; Check that LXEB allows indices.
-define void @f6(fp128 *%dst, float *%base, i64 %index) {
+define void @f6(fp128 *%dst, float *%base, i64 %index) #0 {
; CHECK-LABEL: f6:
; CHECK: sllg %r1, %r4, 2
; CHECK: lxeb %f0, 400(%r1,%r3)
@@ -91,8 +91,9 @@ define void @f6(fp128 *%dst, float *%base, i64 %index) {
%ptr2 = getelementptr float, float *%ptr1, i64 100
%val = load float, float *%ptr2
%res = call fp128 @llvm.experimental.constrained.fpext.f128.f32(float %val,
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
store fp128 %res, fp128 *%dst
ret void
}
+attributes #0 = { strictfp }
diff --git a/llvm/test/CodeGen/SystemZ/fp-strict-conv-04.ll b/llvm/test/CodeGen/SystemZ/fp-strict-conv-04.ll
index 657cdcdfd50..4ddfe1031c3 100644
--- a/llvm/test/CodeGen/SystemZ/fp-strict-conv-04.ll
+++ b/llvm/test/CodeGen/SystemZ/fp-strict-conv-04.ll
@@ -5,20 +5,20 @@
declare fp128 @llvm.experimental.constrained.fpext.f128.f64(double, metadata)
; Check register extension.
-define void @f1(fp128 *%dst, double %val) {
+define void @f1(fp128 *%dst, double %val) #0 {
; CHECK-LABEL: f1:
; CHECK: lxdbr %f0, %f0
; CHECK: std %f0, 0(%r2)
; CHECK: std %f2, 8(%r2)
; CHECK: br %r14
%res = call fp128 @llvm.experimental.constrained.fpext.f128.f64(double %val,
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
store fp128 %res, fp128 *%dst
ret void
}
; Check the low end of the LXDB range.
-define void @f2(fp128 *%dst, double *%ptr) {
+define void @f2(fp128 *%dst, double *%ptr) #0 {
; CHECK-LABEL: f2:
; CHECK: lxdb %f0, 0(%r3)
; CHECK: std %f0, 0(%r2)
@@ -26,13 +26,13 @@ define void @f2(fp128 *%dst, double *%ptr) {
; CHECK: br %r14
%val = load double, double *%ptr
%res = call fp128 @llvm.experimental.constrained.fpext.f128.f64(double %val,
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
store fp128 %res, fp128 *%dst
ret void
}
; Check the high end of the aligned LXDB range.
-define void @f3(fp128 *%dst, double *%base) {
+define void @f3(fp128 *%dst, double *%base) #0 {
; CHECK-LABEL: f3:
; CHECK: lxdb %f0, 4088(%r3)
; CHECK: std %f0, 0(%r2)
@@ -41,14 +41,14 @@ define void @f3(fp128 *%dst, double *%base) {
%ptr = getelementptr double, double *%base, i64 511
%val = load double, double *%ptr
%res = call fp128 @llvm.experimental.constrained.fpext.f128.f64(double %val,
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
store fp128 %res, fp128 *%dst
ret void
}
; Check the next doubleword up, which needs separate address logic.
; Other sequences besides this one would be OK.
-define void @f4(fp128 *%dst, double *%base) {
+define void @f4(fp128 *%dst, double *%base) #0 {
; CHECK-LABEL: f4:
; CHECK: aghi %r3, 4096
; CHECK: lxdb %f0, 0(%r3)
@@ -58,13 +58,13 @@ define void @f4(fp128 *%dst, double *%base) {
%ptr = getelementptr double, double *%base, i64 512
%val = load double, double *%ptr
%res = call fp128 @llvm.experimental.constrained.fpext.f128.f64(double %val,
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
store fp128 %res, fp128 *%dst
ret void
}
; Check negative displacements, which also need separate address logic.
-define void @f5(fp128 *%dst, double *%base) {
+define void @f5(fp128 *%dst, double *%base) #0 {
; CHECK-LABEL: f5:
; CHECK: aghi %r3, -8
; CHECK: lxdb %f0, 0(%r3)
@@ -74,13 +74,13 @@ define void @f5(fp128 *%dst, double *%base) {
%ptr = getelementptr double, double *%base, i64 -1
%val = load double, double *%ptr
%res = call fp128 @llvm.experimental.constrained.fpext.f128.f64(double %val,
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
store fp128 %res, fp128 *%dst
ret void
}
; Check that LXDB allows indices.
-define void @f6(fp128 *%dst, double *%base, i64 %index) {
+define void @f6(fp128 *%dst, double *%base, i64 %index) #0 {
; CHECK-LABEL: f6:
; CHECK: sllg %r1, %r4, 3
; CHECK: lxdb %f0, 800(%r1,%r3)
@@ -91,8 +91,9 @@ define void @f6(fp128 *%dst, double *%base, i64 %index) {
%ptr2 = getelementptr double, double *%ptr1, i64 100
%val = load double, double *%ptr2
%res = call fp128 @llvm.experimental.constrained.fpext.f128.f64(double %val,
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
store fp128 %res, fp128 *%dst
ret void
}
+attributes #0 = { strictfp }
diff --git a/llvm/test/CodeGen/SystemZ/fp-strict-conv-09.ll b/llvm/test/CodeGen/SystemZ/fp-strict-conv-09.ll
index b1a11e66e2a..abdb865d4df 100644
--- a/llvm/test/CodeGen/SystemZ/fp-strict-conv-09.ll
+++ b/llvm/test/CodeGen/SystemZ/fp-strict-conv-09.ll
@@ -7,27 +7,27 @@ declare i32 @llvm.experimental.constrained.fptosi.i32.f64(double, metadata)
declare i32 @llvm.experimental.constrained.fptosi.i32.f128(fp128, metadata)
; Test f32->i32.
-define i32 @f1(float %f) {
+define i32 @f1(float %f) #0 {
; CHECK-LABEL: f1:
; CHECK: cfebr %r2, 5, %f0
; CHECK: br %r14
%conv = call i32 @llvm.experimental.constrained.fptosi.i32.f32(float %f,
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret i32 %conv
}
; Test f64->i32.
-define i32 @f2(double %f) {
+define i32 @f2(double %f) #0 {
; CHECK-LABEL: f2:
; CHECK: cfdbr %r2, 5, %f0
; CHECK: br %r14
%conv = call i32 @llvm.experimental.constrained.fptosi.i32.f64(double %f,
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret i32 %conv
}
; Test f128->i32.
-define i32 @f3(fp128 *%src) {
+define i32 @f3(fp128 *%src) #0 {
; CHECK-LABEL: f3:
; CHECK: ld %f0, 0(%r2)
; CHECK: ld %f2, 8(%r2)
@@ -35,6 +35,8 @@ define i32 @f3(fp128 *%src) {
; CHECK: br %r14
%f = load fp128, fp128 *%src
%conv = call i32 @llvm.experimental.constrained.fptosi.i32.f128(fp128 %f,
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret i32 %conv
}
+
+attributes #0 = { strictfp }
diff --git a/llvm/test/CodeGen/SystemZ/fp-strict-conv-10.ll b/llvm/test/CodeGen/SystemZ/fp-strict-conv-10.ll
index cc6450a6c04..aef02cd8cdb 100644
--- a/llvm/test/CodeGen/SystemZ/fp-strict-conv-10.ll
+++ b/llvm/test/CodeGen/SystemZ/fp-strict-conv-10.ll
@@ -14,7 +14,7 @@ declare i32 @llvm.experimental.constrained.fptoui.i32.f64(double, metadata)
declare i32 @llvm.experimental.constrained.fptoui.i32.f128(fp128, metadata)
; Test f32->i32.
-define i32 @f1(float %f) {
+define i32 @f1(float %f) #0 {
; CHECK-LABEL: f1:
; CHECK: # %bb.0:
; CHECK-NEXT: larl %r1, .LCPI0_0
@@ -30,12 +30,12 @@ define i32 @f1(float %f) {
; CHECK-NEXT: xr %r2, %r0
; CHECK-NEXT: br %r14
%conv = call i32 @llvm.experimental.constrained.fptoui.i32.f32(float %f,
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret i32 %conv
}
; Test f64->i32.
-define i32 @f2(double %f) {
+define i32 @f2(double %f) #0 {
; CHECK-LABEL: f2:
; CHECK: # %bb.0:
; CHECK-NEXT: larl %r1, .LCPI1_0
@@ -51,12 +51,12 @@ define i32 @f2(double %f) {
; CHECK-NEXT: xr %r2, %r0
; CHECK-NEXT: br %r14
%conv = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double %f,
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret i32 %conv
}
; Test f128->i32.
-define i32 @f3(fp128 *%src) {
+define i32 @f3(fp128 *%src) #0 {
; CHECK-LABEL: f3:
; CHECK: # %bb.0:
; CHECK-NEXT: ld %f0, 0(%r2)
@@ -75,6 +75,8 @@ define i32 @f3(fp128 *%src) {
; CHECK-NEXT: br %r14
%f = load fp128, fp128 *%src
%conv = call i32 @llvm.experimental.constrained.fptoui.i32.f128(fp128 %f,
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret i32 %conv
}
+
+attributes #0 = { strictfp }
diff --git a/llvm/test/CodeGen/SystemZ/fp-strict-conv-11.ll b/llvm/test/CodeGen/SystemZ/fp-strict-conv-11.ll
index bc3e9b9b8a5..d9cc33700e7 100644
--- a/llvm/test/CodeGen/SystemZ/fp-strict-conv-11.ll
+++ b/llvm/test/CodeGen/SystemZ/fp-strict-conv-11.ll
@@ -7,27 +7,27 @@ declare i64 @llvm.experimental.constrained.fptosi.i64.f64(double, metadata)
declare i64 @llvm.experimental.constrained.fptosi.i64.f128(fp128, metadata)
; Test f32->i64.
-define i64 @f1(float %f) {
+define i64 @f1(float %f) #0 {
; CHECK-LABEL: f1:
; CHECK: cgebr %r2, 5, %f0
; CHECK: br %r14
%conv = call i64 @llvm.experimental.constrained.fptosi.i64.f32(float %f,
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret i64 %conv
}
; Test f64->i64.
-define i64 @f2(double %f) {
+define i64 @f2(double %f) #0 {
; CHECK-LABEL: f2:
; CHECK: cgdbr %r2, 5, %f0
; CHECK: br %r14
%conv = call i64 @llvm.experimental.constrained.fptosi.i64.f64(double %f,
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret i64 %conv
}
; Test f128->i64.
-define i64 @f3(fp128 *%src) {
+define i64 @f3(fp128 *%src) #0 {
; CHECK-LABEL: f3:
; CHECK: ld %f0, 0(%r2)
; CHECK: ld %f2, 8(%r2)
@@ -35,6 +35,8 @@ define i64 @f3(fp128 *%src) {
; CHECK: br %r14
%f = load fp128, fp128 *%src
%conv = call i64 @llvm.experimental.constrained.fptosi.i64.f128(fp128 %f,
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret i64 %conv
}
+
+attributes #0 = { strictfp }
diff --git a/llvm/test/CodeGen/SystemZ/fp-strict-conv-12.ll b/llvm/test/CodeGen/SystemZ/fp-strict-conv-12.ll
index e9811cf2fd9..2d7618f4996 100644
--- a/llvm/test/CodeGen/SystemZ/fp-strict-conv-12.ll
+++ b/llvm/test/CodeGen/SystemZ/fp-strict-conv-12.ll
@@ -13,7 +13,7 @@ declare i64 @llvm.experimental.constrained.fptoui.i64.f64(double, metadata)
declare i64 @llvm.experimental.constrained.fptoui.i64.f128(fp128, metadata)
; Test f32->i64.
-define i64 @f1(float %f) {
+define i64 @f1(float %f) #0 {
; CHECK-LABEL: f1:
; CHECK: # %bb.0:
; CHECK-NEXT: larl %r1, .LCPI0_0
@@ -29,12 +29,12 @@ define i64 @f1(float %f) {
; CHECK-NEXT: xgr %r2, %r0
; CHECK-NEXT: br %r14
%conv = call i64 @llvm.experimental.constrained.fptoui.i64.f32(float %f,
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret i64 %conv
}
; Test f64->i64.
-define i64 @f2(double %f) {
+define i64 @f2(double %f) #0 {
; CHECK-LABEL: f2:
; CHECK: # %bb.0:
; CHECK-NEXT: larl %r1, .LCPI1_0
@@ -50,12 +50,12 @@ define i64 @f2(double %f) {
; CHECK-NEXT: xgr %r2, %r0
; CHECK-NEXT: br %r14
%conv = call i64 @llvm.experimental.constrained.fptoui.i64.f64(double %f,
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret i64 %conv
}
; Test f128->i64.
-define i64 @f3(fp128 *%src) {
+define i64 @f3(fp128 *%src) #0 {
; CHECK-LABEL: f3:
; CHECK: # %bb.0:
; CHECK-NEXT: ld %f0, 0(%r2)
@@ -74,6 +74,8 @@ define i64 @f3(fp128 *%src) {
; CHECK-NEXT: br %r14
%f = load fp128, fp128 *%src
%conv = call i64 @llvm.experimental.constrained.fptoui.i64.f128(fp128 %f,
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret i64 %conv
}
+
+attributes #0 = { strictfp }
diff --git a/llvm/test/CodeGen/SystemZ/fp-strict-conv-14.ll b/llvm/test/CodeGen/SystemZ/fp-strict-conv-14.ll
index 70a02c55799..83478bab74d 100644
--- a/llvm/test/CodeGen/SystemZ/fp-strict-conv-14.ll
+++ b/llvm/test/CodeGen/SystemZ/fp-strict-conv-14.ll
@@ -11,27 +11,27 @@ declare i64 @llvm.experimental.constrained.fptoui.i64.f64(double, metadata)
declare i64 @llvm.experimental.constrained.fptoui.i64.f128(fp128, metadata)
; Test f32->i32.
-define i32 @f1(float %f) {
+define i32 @f1(float %f) #0 {
; CHECK-LABEL: f1:
; CHECK: clfebr %r2, 5, %f0, 0
; CHECK: br %r14
%conv = call i32 @llvm.experimental.constrained.fptoui.i32.f32(float %f,
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret i32 %conv
}
; Test f64->i32.
-define i32 @f2(double %f) {
+define i32 @f2(double %f) #0 {
; CHECK-LABEL: f2:
; CHECK: clfdbr %r2, 5, %f0, 0
; CHECK: br %r14
%conv = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double %f,
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret i32 %conv
}
; Test f128->i32.
-define i32 @f3(fp128 *%src) {
+define i32 @f3(fp128 *%src) #0 {
; CHECK-LABEL: f3:
; CHECK-DAG: ld %f0, 0(%r2)
; CHECK-DAG: ld %f2, 8(%r2)
@@ -39,32 +39,32 @@ define i32 @f3(fp128 *%src) {
; CHECK: br %r14
%f = load fp128, fp128 *%src
%conv = call i32 @llvm.experimental.constrained.fptoui.i32.f128(fp128 %f,
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret i32 %conv
}
; Test f32->i64.
-define i64 @f4(float %f) {
+define i64 @f4(float %f) #0 {
; CHECK-LABEL: f4:
; CHECK: clgebr %r2, 5, %f0, 0
; CHECK: br %r14
%conv = call i64 @llvm.experimental.constrained.fptoui.i64.f32(float %f,
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret i64 %conv
}
; Test f64->i64.
-define i64 @f5(double %f) {
+define i64 @f5(double %f) #0 {
; CHECK-LABEL: f5:
; CHECK: clgdbr %r2, 5, %f0, 0
; CHECK: br %r14
%conv = call i64 @llvm.experimental.constrained.fptoui.i64.f64(double %f,
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret i64 %conv
}
; Test f128->i64.
-define i64 @f6(fp128 *%src) {
+define i64 @f6(fp128 *%src) #0 {
; CHECK-LABEL: f6:
; CHECK-DAG: ld %f0, 0(%r2)
; CHECK-DAG: ld %f2, 8(%r2)
@@ -72,6 +72,8 @@ define i64 @f6(fp128 *%src) {
; CHECK: br %r14
%f = load fp128, fp128 *%src
%conv = call i64 @llvm.experimental.constrained.fptoui.i64.f128(fp128 %f,
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret i64 %conv
}
+
+attributes #0 = { strictfp }
diff --git a/llvm/test/CodeGen/SystemZ/fp-strict-conv-15.ll b/llvm/test/CodeGen/SystemZ/fp-strict-conv-15.ll
index 64a82c32fbe..9b080c14e18 100644
--- a/llvm/test/CodeGen/SystemZ/fp-strict-conv-15.ll
+++ b/llvm/test/CodeGen/SystemZ/fp-strict-conv-15.ll
@@ -9,7 +9,7 @@ declare fp128 @llvm.experimental.constrained.fpext.f128.f32(float, metadata)
declare fp128 @llvm.experimental.constrained.fpext.f128.f64(double, metadata)
; Test f128->f64.
-define double @f1(fp128 *%ptr) {
+define double @f1(fp128 *%ptr) #0 {
; CHECK-LABEL: f1:
; CHECK: vl [[REG:%v[0-9]+]], 0(%r2)
; CHECK: wflrx %f0, [[REG]], 0, 0
@@ -18,12 +18,12 @@ define double @f1(fp128 *%ptr) {
%res = call double @llvm.experimental.constrained.fptrunc.f64.f128(
fp128 %val,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %res
}
; Test f128->f32.
-define float @f2(fp128 *%ptr) {
+define float @f2(fp128 *%ptr) #0 {
; CHECK-LABEL: f2:
; CHECK: vl [[REG:%v[0-9]+]], 0(%r2)
; CHECK: wflrx %f0, [[REG]], 0, 3
@@ -33,32 +33,33 @@ define float @f2(fp128 *%ptr) {
%res = call float @llvm.experimental.constrained.fptrunc.f32.f128(
fp128 %val,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret float %res
}
; Test f64->f128.
-define void @f3(fp128 *%dst, double %val) {
+define void @f3(fp128 *%dst, double %val) #0 {
; CHECK-LABEL: f3:
; CHECK: wflld [[RES:%v[0-9]+]], %f0
; CHECK: vst [[RES]], 0(%r2)
; CHECK: br %r14
%res = call fp128 @llvm.experimental.constrained.fpext.f128.f64(double %val,
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
store fp128 %res, fp128 *%dst
ret void
}
; Test f32->f128.
-define void @f4(fp128 *%dst, float %val) {
+define void @f4(fp128 *%dst, float %val) #0 {
; CHECK-LABEL: f4:
; CHECK: ldebr %f0, %f0
; CHECK: wflld [[RES:%v[0-9]+]], %f0
; CHECK: vst [[RES]], 0(%r2)
; CHECK: br %r14
%res = call fp128 @llvm.experimental.constrained.fpext.f128.f32(float %val,
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
store fp128 %res, fp128 *%dst
ret void
}
+attributes #0 = { strictfp }
diff --git a/llvm/test/CodeGen/SystemZ/fp-strict-conv-16.ll b/llvm/test/CodeGen/SystemZ/fp-strict-conv-16.ll
index fbbb608ac7c..f2e4121d1f2 100644
--- a/llvm/test/CodeGen/SystemZ/fp-strict-conv-16.ll
+++ b/llvm/test/CodeGen/SystemZ/fp-strict-conv-16.ll
@@ -11,7 +11,7 @@ declare i32 @llvm.experimental.constrained.fptoui.i32.f128(fp128, metadata)
declare i64 @llvm.experimental.constrained.fptoui.i64.f128(fp128, metadata)
; Test signed f128->i32.
-define i32 @f5(fp128 *%src) {
+define i32 @f5(fp128 *%src) #0 {
; CHECK-LABEL: f5:
; CHECK: vl %v0, 0(%r2)
; CHECK: vrepg %v2, %v0, 1
@@ -19,12 +19,12 @@ define i32 @f5(fp128 *%src) {
; CHECK: br %r14
%f = load fp128, fp128 *%src
%conv = call i32 @llvm.experimental.constrained.fptosi.i32.f128(fp128 %f,
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret i32 %conv
}
; Test signed f128->i64.
-define i64 @f6(fp128 *%src) {
+define i64 @f6(fp128 *%src) #0 {
; CHECK-LABEL: f6:
; CHECK: vl %v0, 0(%r2)
; CHECK: vrepg %v2, %v0, 1
@@ -32,12 +32,12 @@ define i64 @f6(fp128 *%src) {
; CHECK: br %r14
%f = load fp128, fp128 *%src
%conv = call i64 @llvm.experimental.constrained.fptosi.i64.f128(fp128 %f,
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret i64 %conv
}
; Test unsigned f128->i32.
-define i32 @f7(fp128 *%src) {
+define i32 @f7(fp128 *%src) #0 {
; CHECK-LABEL: f7:
; CHECK: vl %v0, 0(%r2)
; CHECK: vrepg %v2, %v0, 1
@@ -45,12 +45,12 @@ define i32 @f7(fp128 *%src) {
; CHECK: br %r14
%f = load fp128, fp128 *%src
%conv = call i32 @llvm.experimental.constrained.fptoui.i32.f128(fp128 %f,
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret i32 %conv
}
; Test unsigned f128->i64.
-define i64 @f8(fp128 *%src) {
+define i64 @f8(fp128 *%src) #0 {
; CHECK-LABEL: f8:
; CHECK: vl %v0, 0(%r2)
; CHECK: vrepg %v2, %v0, 1
@@ -58,6 +58,8 @@ define i64 @f8(fp128 *%src) {
; CHECK: br %r14
%f = load fp128, fp128 *%src
%conv = call i64 @llvm.experimental.constrained.fptoui.i64.f128(fp128 %f,
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret i64 %conv
}
+
+attributes #0 = { strictfp }
diff --git a/llvm/test/CodeGen/SystemZ/fp-strict-div-01.ll b/llvm/test/CodeGen/SystemZ/fp-strict-div-01.ll
index 7f9a4fce17e..27cb70cd06f 100644
--- a/llvm/test/CodeGen/SystemZ/fp-strict-div-01.ll
+++ b/llvm/test/CodeGen/SystemZ/fp-strict-div-01.ll
@@ -8,19 +8,19 @@ declare float @foo()
declare float @llvm.experimental.constrained.fdiv.f32(float, float, metadata, metadata)
; Check register division.
-define float @f1(float %f1, float %f2) {
+define float @f1(float %f1, float %f2) #0 {
; CHECK-LABEL: f1:
; CHECK: debr %f0, %f2
; CHECK: br %r14
%res = call float @llvm.experimental.constrained.fdiv.f32(
float %f1, float %f2,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret float %res
}
; Check the low end of the DEB range.
-define float @f2(float %f1, float *%ptr) {
+define float @f2(float %f1, float *%ptr) #0 {
; CHECK-LABEL: f2:
; CHECK: deb %f0, 0(%r2)
; CHECK: br %r14
@@ -28,12 +28,12 @@ define float @f2(float %f1, float *%ptr) {
%res = call float @llvm.experimental.constrained.fdiv.f32(
float %f1, float %f2,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret float %res
}
; Check the high end of the aligned DEB range.
-define float @f3(float %f1, float *%base) {
+define float @f3(float %f1, float *%base) #0 {
; CHECK-LABEL: f3:
; CHECK: deb %f0, 4092(%r2)
; CHECK: br %r14
@@ -42,13 +42,13 @@ define float @f3(float %f1, float *%base) {
%res = call float @llvm.experimental.constrained.fdiv.f32(
float %f1, float %f2,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret float %res
}
; Check the next word up, which needs separate address logic.
; Other sequences besides this one would be OK.
-define float @f4(float %f1, float *%base) {
+define float @f4(float %f1, float *%base) #0 {
; CHECK-LABEL: f4:
; CHECK: aghi %r2, 4096
; CHECK: deb %f0, 0(%r2)
@@ -58,12 +58,12 @@ define float @f4(float %f1, float *%base) {
%res = call float @llvm.experimental.constrained.fdiv.f32(
float %f1, float %f2,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret float %res
}
; Check negative displacements, which also need separate address logic.
-define float @f5(float %f1, float *%base) {
+define float @f5(float %f1, float *%base) #0 {
; CHECK-LABEL: f5:
; CHECK: aghi %r2, -4
; CHECK: deb %f0, 0(%r2)
@@ -73,12 +73,12 @@ define float @f5(float %f1, float *%base) {
%res = call float @llvm.experimental.constrained.fdiv.f32(
float %f1, float %f2,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret float %res
}
; Check that DEB allows indices.
-define float @f6(float %f1, float *%base, i64 %index) {
+define float @f6(float %f1, float *%base, i64 %index) #0 {
; CHECK-LABEL: f6:
; CHECK: sllg %r1, %r3, 2
; CHECK: deb %f0, 400(%r1,%r2)
@@ -89,12 +89,12 @@ define float @f6(float %f1, float *%base, i64 %index) {
%res = call float @llvm.experimental.constrained.fdiv.f32(
float %f1, float %f2,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret float %res
}
; Check that divisions of spilled values can use DEB rather than DEBR.
-define float @f7(float *%ptr0) {
+define float @f7(float *%ptr0) #0 {
; CHECK-LABEL: f7:
; CHECK: brasl %r14, foo@PLT
; CHECK-SCALAR: deb %f0, 16{{[04]}}(%r15)
@@ -122,52 +122,54 @@ define float @f7(float *%ptr0) {
%val9 = load float, float *%ptr9
%val10 = load float, float *%ptr10
- %ret = call float @foo()
+ %ret = call float @foo() #0
%div0 = call float @llvm.experimental.constrained.fdiv.f32(
float %ret, float %val0,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%div1 = call float @llvm.experimental.constrained.fdiv.f32(
float %div0, float %val1,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%div2 = call float @llvm.experimental.constrained.fdiv.f32(
float %div1, float %val2,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%div3 = call float @llvm.experimental.constrained.fdiv.f32(
float %div2, float %val3,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%div4 = call float @llvm.experimental.constrained.fdiv.f32(
float %div3, float %val4,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%div5 = call float @llvm.experimental.constrained.fdiv.f32(
float %div4, float %val5,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%div6 = call float @llvm.experimental.constrained.fdiv.f32(
float %div5, float %val6,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%div7 = call float @llvm.experimental.constrained.fdiv.f32(
float %div6, float %val7,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%div8 = call float @llvm.experimental.constrained.fdiv.f32(
float %div7, float %val8,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%div9 = call float @llvm.experimental.constrained.fdiv.f32(
float %div8, float %val9,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%div10 = call float @llvm.experimental.constrained.fdiv.f32(
float %div9, float %val10,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret float %div10
}
+
+attributes #0 = { strictfp }
diff --git a/llvm/test/CodeGen/SystemZ/fp-strict-div-02.ll b/llvm/test/CodeGen/SystemZ/fp-strict-div-02.ll
index 850af2172f0..d8fc2bd7430 100644
--- a/llvm/test/CodeGen/SystemZ/fp-strict-div-02.ll
+++ b/llvm/test/CodeGen/SystemZ/fp-strict-div-02.ll
@@ -8,19 +8,19 @@ declare double @foo()
declare double @llvm.experimental.constrained.fdiv.f64(double, double, metadata, metadata)
; Check register division.
-define double @f1(double %f1, double %f2) {
+define double @f1(double %f1, double %f2) #0 {
; CHECK-LABEL: f1:
; CHECK: ddbr %f0, %f2
; CHECK: br %r14
%res = call double @llvm.experimental.constrained.fdiv.f64(
double %f1, double %f2,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %res
}
; Check the low end of the DDB range.
-define double @f2(double %f1, double *%ptr) {
+define double @f2(double %f1, double *%ptr) #0 {
; CHECK-LABEL: f2:
; CHECK: ddb %f0, 0(%r2)
; CHECK: br %r14
@@ -28,12 +28,12 @@ define double @f2(double %f1, double *%ptr) {
%res = call double @llvm.experimental.constrained.fdiv.f64(
double %f1, double %f2,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %res
}
; Check the high end of the aligned DDB range.
-define double @f3(double %f1, double *%base) {
+define double @f3(double %f1, double *%base) #0 {
; CHECK-LABEL: f3:
; CHECK: ddb %f0, 4088(%r2)
; CHECK: br %r14
@@ -42,13 +42,13 @@ define double @f3(double %f1, double *%base) {
%res = call double @llvm.experimental.constrained.fdiv.f64(
double %f1, double %f2,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %res
}
; Check the next doubleword up, which needs separate address logic.
; Other sequences besides this one would be OK.
-define double @f4(double %f1, double *%base) {
+define double @f4(double %f1, double *%base) #0 {
; CHECK-LABEL: f4:
; CHECK: aghi %r2, 4096
; CHECK: ddb %f0, 0(%r2)
@@ -58,12 +58,12 @@ define double @f4(double %f1, double *%base) {
%res = call double @llvm.experimental.constrained.fdiv.f64(
double %f1, double %f2,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %res
}
; Check negative displacements, which also need separate address logic.
-define double @f5(double %f1, double *%base) {
+define double @f5(double %f1, double *%base) #0 {
; CHECK-LABEL: f5:
; CHECK: aghi %r2, -8
; CHECK: ddb %f0, 0(%r2)
@@ -73,12 +73,12 @@ define double @f5(double %f1, double *%base) {
%res = call double @llvm.experimental.constrained.fdiv.f64(
double %f1, double %f2,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %res
}
; Check that DDB allows indices.
-define double @f6(double %f1, double *%base, i64 %index) {
+define double @f6(double %f1, double *%base, i64 %index) #0 {
; CHECK-LABEL: f6:
; CHECK: sllg %r1, %r3, 3
; CHECK: ddb %f0, 800(%r1,%r2)
@@ -89,12 +89,12 @@ define double @f6(double %f1, double *%base, i64 %index) {
%res = call double @llvm.experimental.constrained.fdiv.f64(
double %f1, double %f2,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %res
}
; Check that divisions of spilled values can use DDB rather than DDBR.
-define double @f7(double *%ptr0) {
+define double @f7(double *%ptr0) #0 {
; CHECK-LABEL: f7:
; CHECK: brasl %r14, foo@PLT
; CHECK-SCALAR: ddb %f0, 160(%r15)
@@ -122,52 +122,54 @@ define double @f7(double *%ptr0) {
%val9 = load double, double *%ptr9
%val10 = load double, double *%ptr10
- %ret = call double @foo()
+ %ret = call double @foo() #0
%div0 = call double @llvm.experimental.constrained.fdiv.f64(
double %ret, double %val0,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%div1 = call double @llvm.experimental.constrained.fdiv.f64(
double %div0, double %val1,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%div2 = call double @llvm.experimental.constrained.fdiv.f64(
double %div1, double %val2,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%div3 = call double @llvm.experimental.constrained.fdiv.f64(
double %div2, double %val3,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%div4 = call double @llvm.experimental.constrained.fdiv.f64(
double %div3, double %val4,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%div5 = call double @llvm.experimental.constrained.fdiv.f64(
double %div4, double %val5,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%div6 = call double @llvm.experimental.constrained.fdiv.f64(
double %div5, double %val6,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%div7 = call double @llvm.experimental.constrained.fdiv.f64(
double %div6, double %val7,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%div8 = call double @llvm.experimental.constrained.fdiv.f64(
double %div7, double %val8,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%div9 = call double @llvm.experimental.constrained.fdiv.f64(
double %div8, double %val9,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%div10 = call double @llvm.experimental.constrained.fdiv.f64(
double %div9, double %val10,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %div10
}
+
+attributes #0 = { strictfp }
diff --git a/llvm/test/CodeGen/SystemZ/fp-strict-div-03.ll b/llvm/test/CodeGen/SystemZ/fp-strict-div-03.ll
index 860f91d8ed8..fcd2184ac4f 100644
--- a/llvm/test/CodeGen/SystemZ/fp-strict-div-03.ll
+++ b/llvm/test/CodeGen/SystemZ/fp-strict-div-03.ll
@@ -5,7 +5,7 @@
declare fp128 @llvm.experimental.constrained.fdiv.f128(fp128, fp128, metadata, metadata)
; There is no memory form of 128-bit division.
-define void @f1(fp128 *%ptr, float %f2) {
+define void @f1(fp128 *%ptr, float %f2) strictfp {
; CHECK-LABEL: f1:
; CHECK-DAG: lxebr %f0, %f0
; CHECK-DAG: ld %f1, 0(%r2)
@@ -19,7 +19,7 @@ define void @f1(fp128 *%ptr, float %f2) {
%sum = call fp128 @llvm.experimental.constrained.fdiv.f128(
fp128 %f1, fp128 %f2x,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") strictfp
store fp128 %sum, fp128 *%ptr
ret void
}
diff --git a/llvm/test/CodeGen/SystemZ/fp-strict-div-04.ll b/llvm/test/CodeGen/SystemZ/fp-strict-div-04.ll
index e3f117515aa..a43eebb5f0a 100644
--- a/llvm/test/CodeGen/SystemZ/fp-strict-div-04.ll
+++ b/llvm/test/CodeGen/SystemZ/fp-strict-div-04.ll
@@ -4,7 +4,7 @@
declare fp128 @llvm.experimental.constrained.fdiv.f128(fp128, fp128, metadata, metadata)
-define void @f1(fp128 *%ptr1, fp128 *%ptr2) {
+define void @f1(fp128 *%ptr1, fp128 *%ptr2) strictfp {
; CHECK-LABEL: f1:
; CHECK-DAG: vl [[REG1:%v[0-9]+]], 0(%r2)
; CHECK-DAG: vl [[REG2:%v[0-9]+]], 0(%r3)
@@ -16,7 +16,7 @@ define void @f1(fp128 *%ptr1, fp128 *%ptr2) {
%sum = call fp128 @llvm.experimental.constrained.fdiv.f128(
fp128 %f1, fp128 %f2,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") strictfp
store fp128 %sum, fp128 *%ptr1
ret void
}
diff --git a/llvm/test/CodeGen/SystemZ/fp-strict-mul-01.ll b/llvm/test/CodeGen/SystemZ/fp-strict-mul-01.ll
index 3e07091bf3c..623ef9007f6 100644
--- a/llvm/test/CodeGen/SystemZ/fp-strict-mul-01.ll
+++ b/llvm/test/CodeGen/SystemZ/fp-strict-mul-01.ll
@@ -8,19 +8,19 @@ declare float @foo()
declare float @llvm.experimental.constrained.fmul.f32(float, float, metadata, metadata)
; Check register multiplication.
-define float @f1(float %f1, float %f2) {
+define float @f1(float %f1, float %f2) #0 {
; CHECK-LABEL: f1:
; CHECK: meebr %f0, %f2
; CHECK: br %r14
%res = call float @llvm.experimental.constrained.fmul.f32(
float %f1, float %f2,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret float %res
}
; Check the low end of the MEEB range.
-define float @f2(float %f1, float *%ptr) {
+define float @f2(float %f1, float *%ptr) #0 {
; CHECK-LABEL: f2:
; CHECK: meeb %f0, 0(%r2)
; CHECK: br %r14
@@ -28,12 +28,12 @@ define float @f2(float %f1, float *%ptr) {
%res = call float @llvm.experimental.constrained.fmul.f32(
float %f1, float %f2,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret float %res
}
; Check the high end of the aligned MEEB range.
-define float @f3(float %f1, float *%base) {
+define float @f3(float %f1, float *%base) #0 {
; CHECK-LABEL: f3:
; CHECK: meeb %f0, 4092(%r2)
; CHECK: br %r14
@@ -42,13 +42,13 @@ define float @f3(float %f1, float *%base) {
%res = call float @llvm.experimental.constrained.fmul.f32(
float %f1, float %f2,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret float %res
}
; Check the next word up, which needs separate address logic.
; Other sequences besides this one would be OK.
-define float @f4(float %f1, float *%base) {
+define float @f4(float %f1, float *%base) #0 {
; CHECK-LABEL: f4:
; CHECK: aghi %r2, 4096
; CHECK: meeb %f0, 0(%r2)
@@ -58,12 +58,12 @@ define float @f4(float %f1, float *%base) {
%res = call float @llvm.experimental.constrained.fmul.f32(
float %f1, float %f2,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret float %res
}
; Check negative displacements, which also need separate address logic.
-define float @f5(float %f1, float *%base) {
+define float @f5(float %f1, float *%base) #0 {
; CHECK-LABEL: f5:
; CHECK: aghi %r2, -4
; CHECK: meeb %f0, 0(%r2)
@@ -73,12 +73,12 @@ define float @f5(float %f1, float *%base) {
%res = call float @llvm.experimental.constrained.fmul.f32(
float %f1, float %f2,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret float %res
}
; Check that MEEB allows indices.
-define float @f6(float %f1, float *%base, i64 %index) {
+define float @f6(float %f1, float *%base, i64 %index) #0 {
; CHECK-LABEL: f6:
; CHECK: sllg %r1, %r3, 2
; CHECK: meeb %f0, 400(%r1,%r2)
@@ -89,12 +89,12 @@ define float @f6(float %f1, float *%base, i64 %index) {
%res = call float @llvm.experimental.constrained.fmul.f32(
float %f1, float %f2,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret float %res
}
; Check that multiplications of spilled values can use MEEB rather than MEEBR.
-define float @f7(float *%ptr0) {
+define float @f7(float *%ptr0) #0 {
; CHECK-LABEL: f7:
; CHECK: brasl %r14, foo@PLT
; CHECK-SCALAR: meeb %f0, 16{{[04]}}(%r15)
@@ -122,52 +122,54 @@ define float @f7(float *%ptr0) {
%val9 = load float, float *%ptr9
%val10 = load float, float *%ptr10
- %ret = call float @foo()
+ %ret = call float @foo() #0
%mul0 = call float @llvm.experimental.constrained.fmul.f32(
float %ret, float %val0,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%mul1 = call float @llvm.experimental.constrained.fmul.f32(
float %mul0, float %val1,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%mul2 = call float @llvm.experimental.constrained.fmul.f32(
float %mul1, float %val2,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%mul3 = call float @llvm.experimental.constrained.fmul.f32(
float %mul2, float %val3,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%mul4 = call float @llvm.experimental.constrained.fmul.f32(
float %mul3, float %val4,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%mul5 = call float @llvm.experimental.constrained.fmul.f32(
float %mul4, float %val5,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%mul6 = call float @llvm.experimental.constrained.fmul.f32(
float %mul5, float %val6,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%mul7 = call float @llvm.experimental.constrained.fmul.f32(
float %mul6, float %val7,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%mul8 = call float @llvm.experimental.constrained.fmul.f32(
float %mul7, float %val8,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%mul9 = call float @llvm.experimental.constrained.fmul.f32(
float %mul8, float %val9,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%mul10 = call float @llvm.experimental.constrained.fmul.f32(
float %mul9, float %val10,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret float %mul10
}
+
+attributes #0 = { strictfp }
diff --git a/llvm/test/CodeGen/SystemZ/fp-strict-mul-02.ll b/llvm/test/CodeGen/SystemZ/fp-strict-mul-02.ll
index 6f080f6e4ff..7acabef29f4 100644
--- a/llvm/test/CodeGen/SystemZ/fp-strict-mul-02.ll
+++ b/llvm/test/CodeGen/SystemZ/fp-strict-mul-02.ll
@@ -7,7 +7,7 @@ declare float @foo()
declare double @llvm.experimental.constrained.fmul.f64(double, double, metadata, metadata)
; Check register multiplication.
-define double @f1(float %f1, float %f2) {
+define double @f1(float %f1, float %f2) #0 {
; CHECK-LABEL: f1:
; CHECK: mdebr %f0, %f2
; CHECK: br %r14
@@ -16,12 +16,12 @@ define double @f1(float %f1, float %f2) {
%res = call double @llvm.experimental.constrained.fmul.f64(
double %f1x, double %f2x,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %res
}
; Check the low end of the MDEB range.
-define double @f2(float %f1, float *%ptr) {
+define double @f2(float %f1, float *%ptr) #0 {
; CHECK-LABEL: f2:
; CHECK: mdeb %f0, 0(%r2)
; CHECK: br %r14
@@ -31,12 +31,12 @@ define double @f2(float %f1, float *%ptr) {
%res = call double @llvm.experimental.constrained.fmul.f64(
double %f1x, double %f2x,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %res
}
; Check the high end of the aligned MDEB range.
-define double @f3(float %f1, float *%base) {
+define double @f3(float %f1, float *%base) #0 {
; CHECK-LABEL: f3:
; CHECK: mdeb %f0, 4092(%r2)
; CHECK: br %r14
@@ -47,13 +47,13 @@ define double @f3(float %f1, float *%base) {
%res = call double @llvm.experimental.constrained.fmul.f64(
double %f1x, double %f2x,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %res
}
; Check the next word up, which needs separate address logic.
; Other sequences besides this one would be OK.
-define double @f4(float %f1, float *%base) {
+define double @f4(float %f1, float *%base) #0 {
; CHECK-LABEL: f4:
; CHECK: aghi %r2, 4096
; CHECK: mdeb %f0, 0(%r2)
@@ -65,12 +65,12 @@ define double @f4(float %f1, float *%base) {
%res = call double @llvm.experimental.constrained.fmul.f64(
double %f1x, double %f2x,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %res
}
; Check negative displacements, which also need separate address logic.
-define double @f5(float %f1, float *%base) {
+define double @f5(float %f1, float *%base) #0 {
; CHECK-LABEL: f5:
; CHECK: aghi %r2, -4
; CHECK: mdeb %f0, 0(%r2)
@@ -82,12 +82,12 @@ define double @f5(float %f1, float *%base) {
%res = call double @llvm.experimental.constrained.fmul.f64(
double %f1x, double %f2x,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %res
}
; Check that MDEB allows indices.
-define double @f6(float %f1, float *%base, i64 %index) {
+define double @f6(float %f1, float *%base, i64 %index) #0 {
; CHECK-LABEL: f6:
; CHECK: sllg %r1, %r3, 2
; CHECK: mdeb %f0, 400(%r1,%r2)
@@ -100,12 +100,12 @@ define double @f6(float %f1, float *%base, i64 %index) {
%res = call double @llvm.experimental.constrained.fmul.f64(
double %f1x, double %f2x,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %res
}
; Check that multiplications of spilled values can use MDEB rather than MDEBR.
-define float @f7(float *%ptr0) {
+define float @f7(float *%ptr0) #0 {
; CHECK-LABEL: f7:
; CHECK: brasl %r14, foo@PLT
; CHECK: mdeb %f0, 16{{[04]}}(%r15)
@@ -157,18 +157,18 @@ define float @f7(float *%ptr0) {
store float %frob9, float *%ptr9
store float %frob10, float *%ptr10
- %ret = call float @foo()
+ %ret = call float @foo() #0
%accext0 = fpext float %ret to double
%ext0 = fpext float %frob0 to double
%mul0 = call double @llvm.experimental.constrained.fmul.f64(
double %accext0, double %ext0,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%extra0 = call double @llvm.experimental.constrained.fmul.f64(
double %mul0, double 1.01,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%trunc0 = fptrunc double %extra0 to float
%accext1 = fpext float %trunc0 to double
@@ -176,11 +176,11 @@ define float @f7(float *%ptr0) {
%mul1 = call double @llvm.experimental.constrained.fmul.f64(
double %accext1, double %ext1,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%extra1 = call double @llvm.experimental.constrained.fmul.f64(
double %mul1, double 1.11,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%trunc1 = fptrunc double %extra1 to float
%accext2 = fpext float %trunc1 to double
@@ -188,11 +188,11 @@ define float @f7(float *%ptr0) {
%mul2 = call double @llvm.experimental.constrained.fmul.f64(
double %accext2, double %ext2,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%extra2 = call double @llvm.experimental.constrained.fmul.f64(
double %mul2, double 1.21,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%trunc2 = fptrunc double %extra2 to float
%accext3 = fpext float %trunc2 to double
@@ -200,11 +200,11 @@ define float @f7(float *%ptr0) {
%mul3 = call double @llvm.experimental.constrained.fmul.f64(
double %accext3, double %ext3,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%extra3 = call double @llvm.experimental.constrained.fmul.f64(
double %mul3, double 1.31,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%trunc3 = fptrunc double %extra3 to float
%accext4 = fpext float %trunc3 to double
@@ -212,11 +212,11 @@ define float @f7(float *%ptr0) {
%mul4 = call double @llvm.experimental.constrained.fmul.f64(
double %accext4, double %ext4,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%extra4 = call double @llvm.experimental.constrained.fmul.f64(
double %mul4, double 1.41,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%trunc4 = fptrunc double %extra4 to float
%accext5 = fpext float %trunc4 to double
@@ -224,11 +224,11 @@ define float @f7(float *%ptr0) {
%mul5 = call double @llvm.experimental.constrained.fmul.f64(
double %accext5, double %ext5,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%extra5 = call double @llvm.experimental.constrained.fmul.f64(
double %mul5, double 1.51,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%trunc5 = fptrunc double %extra5 to float
%accext6 = fpext float %trunc5 to double
@@ -236,11 +236,11 @@ define float @f7(float *%ptr0) {
%mul6 = call double @llvm.experimental.constrained.fmul.f64(
double %accext6, double %ext6,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%extra6 = call double @llvm.experimental.constrained.fmul.f64(
double %mul6, double 1.61,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%trunc6 = fptrunc double %extra6 to float
%accext7 = fpext float %trunc6 to double
@@ -248,11 +248,11 @@ define float @f7(float *%ptr0) {
%mul7 = call double @llvm.experimental.constrained.fmul.f64(
double %accext7, double %ext7,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%extra7 = call double @llvm.experimental.constrained.fmul.f64(
double %mul7, double 1.71,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%trunc7 = fptrunc double %extra7 to float
%accext8 = fpext float %trunc7 to double
@@ -260,11 +260,11 @@ define float @f7(float *%ptr0) {
%mul8 = call double @llvm.experimental.constrained.fmul.f64(
double %accext8, double %ext8,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%extra8 = call double @llvm.experimental.constrained.fmul.f64(
double %mul8, double 1.81,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%trunc8 = fptrunc double %extra8 to float
%accext9 = fpext float %trunc8 to double
@@ -272,12 +272,14 @@ define float @f7(float *%ptr0) {
%mul9 = call double @llvm.experimental.constrained.fmul.f64(
double %accext9, double %ext9,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%extra9 = call double @llvm.experimental.constrained.fmul.f64(
double %mul9, double 1.91,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%trunc9 = fptrunc double %extra9 to float
ret float %trunc9
}
+
+attributes #0 = { strictfp }
diff --git a/llvm/test/CodeGen/SystemZ/fp-strict-mul-03.ll b/llvm/test/CodeGen/SystemZ/fp-strict-mul-03.ll
index 736bd2e506f..edfc5d46ba9 100644
--- a/llvm/test/CodeGen/SystemZ/fp-strict-mul-03.ll
+++ b/llvm/test/CodeGen/SystemZ/fp-strict-mul-03.ll
@@ -8,19 +8,19 @@ declare double @foo()
declare double @llvm.experimental.constrained.fmul.f64(double, double, metadata, metadata)
; Check register multiplication.
-define double @f1(double %f1, double %f2) {
+define double @f1(double %f1, double %f2) #0 {
; CHECK-LABEL: f1:
; CHECK: mdbr %f0, %f2
; CHECK: br %r14
%res = call double @llvm.experimental.constrained.fmul.f64(
double %f1, double %f2,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %res
}
; Check the low end of the MDB range.
-define double @f2(double %f1, double *%ptr) {
+define double @f2(double %f1, double *%ptr) #0 {
; CHECK-LABEL: f2:
; CHECK: mdb %f0, 0(%r2)
; CHECK: br %r14
@@ -28,12 +28,12 @@ define double @f2(double %f1, double *%ptr) {
%res = call double @llvm.experimental.constrained.fmul.f64(
double %f1, double %f2,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %res
}
; Check the high end of the aligned MDB range.
-define double @f3(double %f1, double *%base) {
+define double @f3(double %f1, double *%base) #0 {
; CHECK-LABEL: f3:
; CHECK: mdb %f0, 4088(%r2)
; CHECK: br %r14
@@ -42,13 +42,13 @@ define double @f3(double %f1, double *%base) {
%res = call double @llvm.experimental.constrained.fmul.f64(
double %f1, double %f2,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %res
}
; Check the next doubleword up, which needs separate address logic.
; Other sequences besides this one would be OK.
-define double @f4(double %f1, double *%base) {
+define double @f4(double %f1, double *%base) #0 {
; CHECK-LABEL: f4:
; CHECK: aghi %r2, 4096
; CHECK: mdb %f0, 0(%r2)
@@ -58,12 +58,12 @@ define double @f4(double %f1, double *%base) {
%res = call double @llvm.experimental.constrained.fmul.f64(
double %f1, double %f2,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %res
}
; Check negative displacements, which also need separate address logic.
-define double @f5(double %f1, double *%base) {
+define double @f5(double %f1, double *%base) #0 {
; CHECK-LABEL: f5:
; CHECK: aghi %r2, -8
; CHECK: mdb %f0, 0(%r2)
@@ -73,12 +73,12 @@ define double @f5(double %f1, double *%base) {
%res = call double @llvm.experimental.constrained.fmul.f64(
double %f1, double %f2,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %res
}
; Check that MDB allows indices.
-define double @f6(double %f1, double *%base, i64 %index) {
+define double @f6(double %f1, double *%base, i64 %index) #0 {
; CHECK-LABEL: f6:
; CHECK: sllg %r1, %r3, 3
; CHECK: mdb %f0, 800(%r1,%r2)
@@ -89,12 +89,12 @@ define double @f6(double %f1, double *%base, i64 %index) {
%res = call double @llvm.experimental.constrained.fmul.f64(
double %f1, double %f2,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %res
}
; Check that multiplications of spilled values can use MDB rather than MDBR.
-define double @f7(double *%ptr0) {
+define double @f7(double *%ptr0) #0 {
; CHECK-LABEL: f7:
; CHECK: brasl %r14, foo@PLT
; CHECK-SCALAR: mdb %f0, 160(%r15)
@@ -122,52 +122,54 @@ define double @f7(double *%ptr0) {
%val9 = load double, double *%ptr9
%val10 = load double, double *%ptr10
- %ret = call double @foo()
+ %ret = call double @foo() #0
%mul0 = call double @llvm.experimental.constrained.fmul.f64(
double %ret, double %val0,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%mul1 = call double @llvm.experimental.constrained.fmul.f64(
double %mul0, double %val1,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%mul2 = call double @llvm.experimental.constrained.fmul.f64(
double %mul1, double %val2,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%mul3 = call double @llvm.experimental.constrained.fmul.f64(
double %mul2, double %val3,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%mul4 = call double @llvm.experimental.constrained.fmul.f64(
double %mul3, double %val4,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%mul5 = call double @llvm.experimental.constrained.fmul.f64(
double %mul4, double %val5,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%mul6 = call double @llvm.experimental.constrained.fmul.f64(
double %mul5, double %val6,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%mul7 = call double @llvm.experimental.constrained.fmul.f64(
double %mul6, double %val7,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%mul8 = call double @llvm.experimental.constrained.fmul.f64(
double %mul7, double %val8,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%mul9 = call double @llvm.experimental.constrained.fmul.f64(
double %mul8, double %val9,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%mul10 = call double @llvm.experimental.constrained.fmul.f64(
double %mul9, double %val10,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %mul10
}
+
+attributes #0 = { strictfp }
diff --git a/llvm/test/CodeGen/SystemZ/fp-strict-mul-04.ll b/llvm/test/CodeGen/SystemZ/fp-strict-mul-04.ll
index a613030792e..924845a99d7 100644
--- a/llvm/test/CodeGen/SystemZ/fp-strict-mul-04.ll
+++ b/llvm/test/CodeGen/SystemZ/fp-strict-mul-04.ll
@@ -10,7 +10,7 @@ declare double @foo()
; Check register multiplication. "mxdbr %f0, %f2" is not valid from LLVM's
; point of view, because %f2 is the low register of the FP128 %f0. Pass the
; multiplier in %f4 instead.
-define void @f1(double %f1, double %dummy, double %f2, fp128 *%dst) {
+define void @f1(double %f1, double %dummy, double %f2, fp128 *%dst) #0 {
; CHECK-LABEL: f1:
; CHECK: mxdbr %f0, %f4
; CHECK: std %f0, 0(%r2)
@@ -21,13 +21,13 @@ define void @f1(double %f1, double %dummy, double %f2, fp128 *%dst) {
%res = call fp128 @llvm.experimental.constrained.fmul.f128(
fp128 %f1x, fp128 %f2x,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
store fp128 %res, fp128 *%dst
ret void
}
; Check the low end of the MXDB range.
-define void @f2(double %f1, double *%ptr, fp128 *%dst) {
+define void @f2(double %f1, double *%ptr, fp128 *%dst) #0 {
; CHECK-LABEL: f2:
; CHECK: mxdb %f0, 0(%r2)
; CHECK: std %f0, 0(%r3)
@@ -39,13 +39,13 @@ define void @f2(double %f1, double *%ptr, fp128 *%dst) {
%res = call fp128 @llvm.experimental.constrained.fmul.f128(
fp128 %f1x, fp128 %f2x,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
store fp128 %res, fp128 *%dst
ret void
}
; Check the high end of the aligned MXDB range.
-define void @f3(double %f1, double *%base, fp128 *%dst) {
+define void @f3(double %f1, double *%base, fp128 *%dst) #0 {
; CHECK-LABEL: f3:
; CHECK: mxdb %f0, 4088(%r2)
; CHECK: std %f0, 0(%r3)
@@ -58,14 +58,14 @@ define void @f3(double %f1, double *%base, fp128 *%dst) {
%res = call fp128 @llvm.experimental.constrained.fmul.f128(
fp128 %f1x, fp128 %f2x,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
store fp128 %res, fp128 *%dst
ret void
}
; Check the next doubleword up, which needs separate address logic.
; Other sequences besides this one would be OK.
-define void @f4(double %f1, double *%base, fp128 *%dst) {
+define void @f4(double %f1, double *%base, fp128 *%dst) #0 {
; CHECK-LABEL: f4:
; CHECK: aghi %r2, 4096
; CHECK: mxdb %f0, 0(%r2)
@@ -79,13 +79,13 @@ define void @f4(double %f1, double *%base, fp128 *%dst) {
%res = call fp128 @llvm.experimental.constrained.fmul.f128(
fp128 %f1x, fp128 %f2x,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
store fp128 %res, fp128 *%dst
ret void
}
; Check negative displacements, which also need separate address logic.
-define void @f5(double %f1, double *%base, fp128 *%dst) {
+define void @f5(double %f1, double *%base, fp128 *%dst) #0 {
; CHECK-LABEL: f5:
; CHECK: aghi %r2, -8
; CHECK: mxdb %f0, 0(%r2)
@@ -99,13 +99,13 @@ define void @f5(double %f1, double *%base, fp128 *%dst) {
%res = call fp128 @llvm.experimental.constrained.fmul.f128(
fp128 %f1x, fp128 %f2x,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
store fp128 %res, fp128 *%dst
ret void
}
; Check that MXDB allows indices.
-define void @f6(double %f1, double *%base, i64 %index, fp128 *%dst) {
+define void @f6(double %f1, double *%base, i64 %index, fp128 *%dst) #0 {
; CHECK-LABEL: f6:
; CHECK: sllg %r1, %r3, 3
; CHECK: mxdb %f0, 800(%r1,%r2)
@@ -120,13 +120,13 @@ define void @f6(double %f1, double *%base, i64 %index, fp128 *%dst) {
%res = call fp128 @llvm.experimental.constrained.fmul.f128(
fp128 %f1x, fp128 %f2x,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
store fp128 %res, fp128 *%dst
ret void
}
; Check that multiplications of spilled values can use MXDB rather than MXDBR.
-define double @f7(double *%ptr0) {
+define double @f7(double *%ptr0) #0 {
; CHECK-LABEL: f7:
; CHECK: brasl %r14, foo@PLT
; CHECK: mxdb %f0, 160(%r15)
@@ -178,19 +178,19 @@ define double @f7(double *%ptr0) {
store double %frob9, double *%ptr9
store double %frob10, double *%ptr10
- %ret = call double @foo()
+ %ret = call double @foo() #0
%accext0 = fpext double %ret to fp128
%ext0 = fpext double %frob0 to fp128
%mul0 = call fp128 @llvm.experimental.constrained.fmul.f128(
fp128 %accext0, fp128 %ext0,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%const0 = fpext double 1.01 to fp128
%extra0 = call fp128 @llvm.experimental.constrained.fmul.f128(
fp128 %mul0, fp128 %const0,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%trunc0 = fptrunc fp128 %extra0 to double
%accext1 = fpext double %trunc0 to fp128
@@ -198,12 +198,12 @@ define double @f7(double *%ptr0) {
%mul1 = call fp128 @llvm.experimental.constrained.fmul.f128(
fp128 %accext1, fp128 %ext1,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%const1 = fpext double 1.11 to fp128
%extra1 = call fp128 @llvm.experimental.constrained.fmul.f128(
fp128 %mul1, fp128 %const1,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%trunc1 = fptrunc fp128 %extra1 to double
%accext2 = fpext double %trunc1 to fp128
@@ -211,12 +211,12 @@ define double @f7(double *%ptr0) {
%mul2 = call fp128 @llvm.experimental.constrained.fmul.f128(
fp128 %accext2, fp128 %ext2,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%const2 = fpext double 1.21 to fp128
%extra2 = call fp128 @llvm.experimental.constrained.fmul.f128(
fp128 %mul2, fp128 %const2,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%trunc2 = fptrunc fp128 %extra2 to double
%accext3 = fpext double %trunc2 to fp128
@@ -224,12 +224,12 @@ define double @f7(double *%ptr0) {
%mul3 = call fp128 @llvm.experimental.constrained.fmul.f128(
fp128 %accext3, fp128 %ext3,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%const3 = fpext double 1.31 to fp128
%extra3 = call fp128 @llvm.experimental.constrained.fmul.f128(
fp128 %mul3, fp128 %const3,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%trunc3 = fptrunc fp128 %extra3 to double
%accext4 = fpext double %trunc3 to fp128
@@ -237,12 +237,12 @@ define double @f7(double *%ptr0) {
%mul4 = call fp128 @llvm.experimental.constrained.fmul.f128(
fp128 %accext4, fp128 %ext4,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%const4 = fpext double 1.41 to fp128
%extra4 = call fp128 @llvm.experimental.constrained.fmul.f128(
fp128 %mul4, fp128 %const4,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%trunc4 = fptrunc fp128 %extra4 to double
%accext5 = fpext double %trunc4 to fp128
@@ -250,12 +250,12 @@ define double @f7(double *%ptr0) {
%mul5 = call fp128 @llvm.experimental.constrained.fmul.f128(
fp128 %accext5, fp128 %ext5,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%const5 = fpext double 1.51 to fp128
%extra5 = call fp128 @llvm.experimental.constrained.fmul.f128(
fp128 %mul5, fp128 %const5,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%trunc5 = fptrunc fp128 %extra5 to double
%accext6 = fpext double %trunc5 to fp128
@@ -263,12 +263,12 @@ define double @f7(double *%ptr0) {
%mul6 = call fp128 @llvm.experimental.constrained.fmul.f128(
fp128 %accext6, fp128 %ext6,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%const6 = fpext double 1.61 to fp128
%extra6 = call fp128 @llvm.experimental.constrained.fmul.f128(
fp128 %mul6, fp128 %const6,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%trunc6 = fptrunc fp128 %extra6 to double
%accext7 = fpext double %trunc6 to fp128
@@ -276,12 +276,12 @@ define double @f7(double *%ptr0) {
%mul7 = call fp128 @llvm.experimental.constrained.fmul.f128(
fp128 %accext7, fp128 %ext7,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%const7 = fpext double 1.71 to fp128
%extra7 = call fp128 @llvm.experimental.constrained.fmul.f128(
fp128 %mul7, fp128 %const7,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%trunc7 = fptrunc fp128 %extra7 to double
%accext8 = fpext double %trunc7 to fp128
@@ -289,12 +289,12 @@ define double @f7(double *%ptr0) {
%mul8 = call fp128 @llvm.experimental.constrained.fmul.f128(
fp128 %accext8, fp128 %ext8,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%const8 = fpext double 1.81 to fp128
%extra8 = call fp128 @llvm.experimental.constrained.fmul.f128(
fp128 %mul8, fp128 %const8,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%trunc8 = fptrunc fp128 %extra8 to double
%accext9 = fpext double %trunc8 to fp128
@@ -302,13 +302,15 @@ define double @f7(double *%ptr0) {
%mul9 = call fp128 @llvm.experimental.constrained.fmul.f128(
fp128 %accext9, fp128 %ext9,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%const9 = fpext double 1.91 to fp128
%extra9 = call fp128 @llvm.experimental.constrained.fmul.f128(
fp128 %mul9, fp128 %const9,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%trunc9 = fptrunc fp128 %extra9 to double
ret double %trunc9
}
+
+attributes #0 = { strictfp }
diff --git a/llvm/test/CodeGen/SystemZ/fp-strict-mul-05.ll b/llvm/test/CodeGen/SystemZ/fp-strict-mul-05.ll
index 32bab44bff3..0a8ee0bf7bd 100644
--- a/llvm/test/CodeGen/SystemZ/fp-strict-mul-05.ll
+++ b/llvm/test/CodeGen/SystemZ/fp-strict-mul-05.ll
@@ -5,7 +5,7 @@
declare fp128 @llvm.experimental.constrained.fmul.f128(fp128, fp128, metadata, metadata)
; There is no memory form of 128-bit multiplication.
-define void @f1(fp128 *%ptr, float %f2) {
+define void @f1(fp128 *%ptr, float %f2) strictfp {
; CHECK-LABEL: f1:
; CHECK-DAG: lxebr %f0, %f0
; CHECK-DAG: ld %f1, 0(%r2)
@@ -19,7 +19,7 @@ define void @f1(fp128 *%ptr, float %f2) {
%diff = call fp128 @llvm.experimental.constrained.fmul.f128(
fp128 %f1, fp128 %f2x,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") strictfp
store fp128 %diff, fp128 *%ptr
ret void
}
diff --git a/llvm/test/CodeGen/SystemZ/fp-strict-mul-06.ll b/llvm/test/CodeGen/SystemZ/fp-strict-mul-06.ll
index 0de99aeedc9..0f640545916 100644
--- a/llvm/test/CodeGen/SystemZ/fp-strict-mul-06.ll
+++ b/llvm/test/CodeGen/SystemZ/fp-strict-mul-06.ll
@@ -5,7 +5,7 @@
declare float @llvm.experimental.constrained.fma.f32(float, float, float, metadata, metadata)
-define float @f1(float %f1, float %f2, float %acc) {
+define float @f1(float %f1, float %f2, float %acc) #0 {
; CHECK-LABEL: f1:
; CHECK-SCALAR: maebr %f4, %f0, %f2
; CHECK-SCALAR: ler %f0, %f4
@@ -14,11 +14,11 @@ define float @f1(float %f1, float %f2, float %acc) {
%res = call float @llvm.experimental.constrained.fma.f32 (
float %f1, float %f2, float %acc,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret float %res
}
-define float @f2(float %f1, float *%ptr, float %acc) {
+define float @f2(float %f1, float *%ptr, float %acc) #0 {
; CHECK-LABEL: f2:
; CHECK: maeb %f2, %f0, 0(%r2)
; CHECK-SCALAR: ler %f0, %f2
@@ -28,11 +28,11 @@ define float @f2(float %f1, float *%ptr, float %acc) {
%res = call float @llvm.experimental.constrained.fma.f32 (
float %f1, float %f2, float %acc,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret float %res
}
-define float @f3(float %f1, float *%base, float %acc) {
+define float @f3(float %f1, float *%base, float %acc) #0 {
; CHECK-LABEL: f3:
; CHECK: maeb %f2, %f0, 4092(%r2)
; CHECK-SCALAR: ler %f0, %f2
@@ -43,11 +43,11 @@ define float @f3(float %f1, float *%base, float %acc) {
%res = call float @llvm.experimental.constrained.fma.f32 (
float %f1, float %f2, float %acc,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret float %res
}
-define float @f4(float %f1, float *%base, float %acc) {
+define float @f4(float %f1, float *%base, float %acc) #0 {
; The important thing here is that we don't generate an out-of-range
; displacement. Other sequences besides this one would be OK.
;
@@ -62,11 +62,11 @@ define float @f4(float %f1, float *%base, float %acc) {
%res = call float @llvm.experimental.constrained.fma.f32 (
float %f1, float %f2, float %acc,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret float %res
}
-define float @f5(float %f1, float *%base, float %acc) {
+define float @f5(float %f1, float *%base, float %acc) #0 {
; Here too the important thing is that we don't generate an out-of-range
; displacement. Other sequences besides this one would be OK.
;
@@ -81,11 +81,11 @@ define float @f5(float %f1, float *%base, float %acc) {
%res = call float @llvm.experimental.constrained.fma.f32 (
float %f1, float %f2, float %acc,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret float %res
}
-define float @f6(float %f1, float *%base, i64 %index, float %acc) {
+define float @f6(float %f1, float *%base, i64 %index, float %acc) #0 {
; CHECK-LABEL: f6:
; CHECK: sllg %r1, %r3, 2
; CHECK: maeb %f2, %f0, 0(%r1,%r2)
@@ -97,11 +97,11 @@ define float @f6(float %f1, float *%base, i64 %index, float %acc) {
%res = call float @llvm.experimental.constrained.fma.f32 (
float %f1, float %f2, float %acc,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret float %res
}
-define float @f7(float %f1, float *%base, i64 %index, float %acc) {
+define float @f7(float %f1, float *%base, i64 %index, float %acc) #0 {
; CHECK-LABEL: f7:
; CHECK: sllg %r1, %r3, 2
; CHECK: maeb %f2, %f0, 4092({{%r1,%r2|%r2,%r1}})
@@ -114,11 +114,11 @@ define float @f7(float %f1, float *%base, i64 %index, float %acc) {
%res = call float @llvm.experimental.constrained.fma.f32 (
float %f1, float %f2, float %acc,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret float %res
}
-define float @f8(float %f1, float *%base, i64 %index, float %acc) {
+define float @f8(float %f1, float *%base, i64 %index, float %acc) #0 {
; CHECK-LABEL: f8:
; CHECK: sllg %r1, %r3, 2
; CHECK: lay %r1, 4096({{%r1,%r2|%r2,%r1}})
@@ -132,6 +132,8 @@ define float @f8(float %f1, float *%base, i64 %index, float %acc) {
%res = call float @llvm.experimental.constrained.fma.f32 (
float %f1, float %f2, float %acc,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret float %res
}
+
+attributes #0 = { strictfp }
diff --git a/llvm/test/CodeGen/SystemZ/fp-strict-mul-07.ll b/llvm/test/CodeGen/SystemZ/fp-strict-mul-07.ll
index b088aae16ad..d929fbba0ff 100644
--- a/llvm/test/CodeGen/SystemZ/fp-strict-mul-07.ll
+++ b/llvm/test/CodeGen/SystemZ/fp-strict-mul-07.ll
@@ -5,7 +5,7 @@
declare double @llvm.experimental.constrained.fma.f64(double %f1, double %f2, double %f3, metadata, metadata)
-define double @f1(double %f1, double %f2, double %acc) {
+define double @f1(double %f1, double %f2, double %acc) #0 {
; CHECK-LABEL: f1:
; CHECK-SCALAR: madbr %f4, %f0, %f2
; CHECK-SCALAR: ldr %f0, %f4
@@ -14,11 +14,11 @@ define double @f1(double %f1, double %f2, double %acc) {
%res = call double @llvm.experimental.constrained.fma.f64 (
double %f1, double %f2, double %acc,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %res
}
-define double @f2(double %f1, double *%ptr, double %acc) {
+define double @f2(double %f1, double *%ptr, double %acc) #0 {
; CHECK-LABEL: f2:
; CHECK: madb %f2, %f0, 0(%r2)
; CHECK: ldr %f0, %f2
@@ -27,11 +27,11 @@ define double @f2(double %f1, double *%ptr, double %acc) {
%res = call double @llvm.experimental.constrained.fma.f64 (
double %f1, double %f2, double %acc,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %res
}
-define double @f3(double %f1, double *%base, double %acc) {
+define double @f3(double %f1, double *%base, double %acc) #0 {
; CHECK-LABEL: f3:
; CHECK: madb %f2, %f0, 4088(%r2)
; CHECK: ldr %f0, %f2
@@ -41,11 +41,11 @@ define double @f3(double %f1, double *%base, double %acc) {
%res = call double @llvm.experimental.constrained.fma.f64 (
double %f1, double %f2, double %acc,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %res
}
-define double @f4(double %f1, double *%base, double %acc) {
+define double @f4(double %f1, double *%base, double %acc) #0 {
; The important thing here is that we don't generate an out-of-range
; displacement. Other sequences besides this one would be OK.
;
@@ -59,11 +59,11 @@ define double @f4(double %f1, double *%base, double %acc) {
%res = call double @llvm.experimental.constrained.fma.f64 (
double %f1, double %f2, double %acc,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %res
}
-define double @f5(double %f1, double *%base, double %acc) {
+define double @f5(double %f1, double *%base, double %acc) #0 {
; Here too the important thing is that we don't generate an out-of-range
; displacement. Other sequences besides this one would be OK.
;
@@ -77,11 +77,11 @@ define double @f5(double %f1, double *%base, double %acc) {
%res = call double @llvm.experimental.constrained.fma.f64 (
double %f1, double %f2, double %acc,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %res
}
-define double @f6(double %f1, double *%base, i64 %index, double %acc) {
+define double @f6(double %f1, double *%base, i64 %index, double %acc) #0 {
; CHECK-LABEL: f6:
; CHECK: sllg %r1, %r3, 3
; CHECK: madb %f2, %f0, 0(%r1,%r2)
@@ -92,11 +92,11 @@ define double @f6(double %f1, double *%base, i64 %index, double %acc) {
%res = call double @llvm.experimental.constrained.fma.f64 (
double %f1, double %f2, double %acc,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %res
}
-define double @f7(double %f1, double *%base, i64 %index, double %acc) {
+define double @f7(double %f1, double *%base, i64 %index, double %acc) #0 {
; CHECK-LABEL: f7:
; CHECK: sllg %r1, %r3, 3
; CHECK: madb %f2, %f0, 4088({{%r1,%r2|%r2,%r1}})
@@ -108,11 +108,11 @@ define double @f7(double %f1, double *%base, i64 %index, double %acc) {
%res = call double @llvm.experimental.constrained.fma.f64 (
double %f1, double %f2, double %acc,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %res
}
-define double @f8(double %f1, double *%base, i64 %index, double %acc) {
+define double @f8(double %f1, double *%base, i64 %index, double %acc) #0 {
; CHECK-LABEL: f8:
; CHECK: sllg %r1, %r3, 3
; CHECK: lay %r1, 4096({{%r1,%r2|%r2,%r1}})
@@ -125,6 +125,8 @@ define double @f8(double %f1, double *%base, i64 %index, double %acc) {
%res = call double @llvm.experimental.constrained.fma.f64 (
double %f1, double %f2, double %acc,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %res
}
+
+attributes #0 = { strictfp }
diff --git a/llvm/test/CodeGen/SystemZ/fp-strict-mul-08.ll b/llvm/test/CodeGen/SystemZ/fp-strict-mul-08.ll
index ea3b7a1dba4..4c5101a795c 100644
--- a/llvm/test/CodeGen/SystemZ/fp-strict-mul-08.ll
+++ b/llvm/test/CodeGen/SystemZ/fp-strict-mul-08.ll
@@ -5,7 +5,7 @@
declare float @llvm.experimental.constrained.fma.f32(float %f1, float %f2, float %f3, metadata, metadata)
-define float @f1(float %f1, float %f2, float %acc) {
+define float @f1(float %f1, float %f2, float %acc) #0 {
; CHECK-LABEL: f1:
; CHECK-SCALAR: msebr %f4, %f0, %f2
; CHECK-SCALAR: ler %f0, %f4
@@ -15,11 +15,11 @@ define float @f1(float %f1, float %f2, float %acc) {
%res = call float @llvm.experimental.constrained.fma.f32 (
float %f1, float %f2, float %negacc,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret float %res
}
-define float @f2(float %f1, float *%ptr, float %acc) {
+define float @f2(float %f1, float *%ptr, float %acc) #0 {
; CHECK-LABEL: f2:
; CHECK: mseb %f2, %f0, 0(%r2)
; CHECK-SCALAR: ler %f0, %f2
@@ -30,11 +30,11 @@ define float @f2(float %f1, float *%ptr, float %acc) {
%res = call float @llvm.experimental.constrained.fma.f32 (
float %f1, float %f2, float %negacc,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret float %res
}
-define float @f3(float %f1, float *%base, float %acc) {
+define float @f3(float %f1, float *%base, float %acc) #0 {
; CHECK-LABEL: f3:
; CHECK: mseb %f2, %f0, 4092(%r2)
; CHECK-SCALAR: ler %f0, %f2
@@ -46,11 +46,11 @@ define float @f3(float %f1, float *%base, float %acc) {
%res = call float @llvm.experimental.constrained.fma.f32 (
float %f1, float %f2, float %negacc,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret float %res
}
-define float @f4(float %f1, float *%base, float %acc) {
+define float @f4(float %f1, float *%base, float %acc) #0 {
; The important thing here is that we don't generate an out-of-range
; displacement. Other sequences besides this one would be OK.
;
@@ -66,11 +66,11 @@ define float @f4(float %f1, float *%base, float %acc) {
%res = call float @llvm.experimental.constrained.fma.f32 (
float %f1, float %f2, float %negacc,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret float %res
}
-define float @f5(float %f1, float *%base, float %acc) {
+define float @f5(float %f1, float *%base, float %acc) #0 {
; Here too the important thing is that we don't generate an out-of-range
; displacement. Other sequences besides this one would be OK.
;
@@ -86,11 +86,11 @@ define float @f5(float %f1, float *%base, float %acc) {
%res = call float @llvm.experimental.constrained.fma.f32 (
float %f1, float %f2, float %negacc,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret float %res
}
-define float @f6(float %f1, float *%base, i64 %index, float %acc) {
+define float @f6(float %f1, float *%base, i64 %index, float %acc) #0 {
; CHECK-LABEL: f6:
; CHECK: sllg %r1, %r3, 2
; CHECK: mseb %f2, %f0, 0(%r1,%r2)
@@ -103,11 +103,11 @@ define float @f6(float %f1, float *%base, i64 %index, float %acc) {
%res = call float @llvm.experimental.constrained.fma.f32 (
float %f1, float %f2, float %negacc,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret float %res
}
-define float @f7(float %f1, float *%base, i64 %index, float %acc) {
+define float @f7(float %f1, float *%base, i64 %index, float %acc) #0 {
; CHECK-LABEL: f7:
; CHECK: sllg %r1, %r3, 2
; CHECK: mseb %f2, %f0, 4092({{%r1,%r2|%r2,%r1}})
@@ -121,11 +121,11 @@ define float @f7(float %f1, float *%base, i64 %index, float %acc) {
%res = call float @llvm.experimental.constrained.fma.f32 (
float %f1, float %f2, float %negacc,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret float %res
}
-define float @f8(float %f1, float *%base, i64 %index, float %acc) {
+define float @f8(float %f1, float *%base, i64 %index, float %acc) #0 {
; CHECK-LABEL: f8:
; CHECK: sllg %r1, %r3, 2
; CHECK: lay %r1, 4096({{%r1,%r2|%r2,%r1}})
@@ -140,6 +140,8 @@ define float @f8(float %f1, float *%base, i64 %index, float %acc) {
%res = call float @llvm.experimental.constrained.fma.f32 (
float %f1, float %f2, float %negacc,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret float %res
}
+
+attributes #0 = { strictfp }
diff --git a/llvm/test/CodeGen/SystemZ/fp-strict-mul-09.ll b/llvm/test/CodeGen/SystemZ/fp-strict-mul-09.ll
index e8f6eeb4165..357148c3b01 100644
--- a/llvm/test/CodeGen/SystemZ/fp-strict-mul-09.ll
+++ b/llvm/test/CodeGen/SystemZ/fp-strict-mul-09.ll
@@ -5,7 +5,7 @@
declare double @llvm.experimental.constrained.fma.f64(double %f1, double %f2, double %f3, metadata, metadata)
-define double @f1(double %f1, double %f2, double %acc) {
+define double @f1(double %f1, double %f2, double %acc) #0 {
; CHECK-LABEL: f1:
; CHECK-SCALAR: msdbr %f4, %f0, %f2
; CHECK-SCALAR: ldr %f0, %f4
@@ -15,11 +15,11 @@ define double @f1(double %f1, double %f2, double %acc) {
%res = call double @llvm.experimental.constrained.fma.f64 (
double %f1, double %f2, double %negacc,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %res
}
-define double @f2(double %f1, double *%ptr, double %acc) {
+define double @f2(double %f1, double *%ptr, double %acc) #0 {
; CHECK-LABEL: f2:
; CHECK: msdb %f2, %f0, 0(%r2)
; CHECK: ldr %f0, %f2
@@ -29,11 +29,11 @@ define double @f2(double %f1, double *%ptr, double %acc) {
%res = call double @llvm.experimental.constrained.fma.f64 (
double %f1, double %f2, double %negacc,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %res
}
-define double @f3(double %f1, double *%base, double %acc) {
+define double @f3(double %f1, double *%base, double %acc) #0 {
; CHECK-LABEL: f3:
; CHECK: msdb %f2, %f0, 4088(%r2)
; CHECK: ldr %f0, %f2
@@ -44,11 +44,11 @@ define double @f3(double %f1, double *%base, double %acc) {
%res = call double @llvm.experimental.constrained.fma.f64 (
double %f1, double %f2, double %negacc,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %res
}
-define double @f4(double %f1, double *%base, double %acc) {
+define double @f4(double %f1, double *%base, double %acc) #0 {
; The important thing here is that we don't generate an out-of-range
; displacement. Other sequences besides this one would be OK.
;
@@ -63,11 +63,11 @@ define double @f4(double %f1, double *%base, double %acc) {
%res = call double @llvm.experimental.constrained.fma.f64 (
double %f1, double %f2, double %negacc,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %res
}
-define double @f5(double %f1, double *%base, double %acc) {
+define double @f5(double %f1, double *%base, double %acc) #0 {
; Here too the important thing is that we don't generate an out-of-range
; displacement. Other sequences besides this one would be OK.
;
@@ -82,11 +82,11 @@ define double @f5(double %f1, double *%base, double %acc) {
%res = call double @llvm.experimental.constrained.fma.f64 (
double %f1, double %f2, double %negacc,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %res
}
-define double @f6(double %f1, double *%base, i64 %index, double %acc) {
+define double @f6(double %f1, double *%base, i64 %index, double %acc) #0 {
; CHECK-LABEL: f6:
; CHECK: sllg %r1, %r3, 3
; CHECK: msdb %f2, %f0, 0(%r1,%r2)
@@ -98,11 +98,11 @@ define double @f6(double %f1, double *%base, i64 %index, double %acc) {
%res = call double @llvm.experimental.constrained.fma.f64 (
double %f1, double %f2, double %negacc,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %res
}
-define double @f7(double %f1, double *%base, i64 %index, double %acc) {
+define double @f7(double %f1, double *%base, i64 %index, double %acc) #0 {
; CHECK-LABEL: f7:
; CHECK: sllg %r1, %r3, 3
; CHECK: msdb %f2, %f0, 4088({{%r1,%r2|%r2,%r1}})
@@ -115,11 +115,11 @@ define double @f7(double %f1, double *%base, i64 %index, double %acc) {
%res = call double @llvm.experimental.constrained.fma.f64 (
double %f1, double %f2, double %negacc,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %res
}
-define double @f8(double %f1, double *%base, i64 %index, double %acc) {
+define double @f8(double %f1, double *%base, i64 %index, double %acc) #0 {
; CHECK-LABEL: f8:
; CHECK: sllg %r1, %r3, 3
; CHECK: lay %r1, 4096({{%r1,%r2|%r2,%r1}})
@@ -133,6 +133,8 @@ define double @f8(double %f1, double *%base, i64 %index, double %acc) {
%res = call double @llvm.experimental.constrained.fma.f64 (
double %f1, double %f2, double %negacc,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %res
}
+
+attributes #0 = { strictfp }
diff --git a/llvm/test/CodeGen/SystemZ/fp-strict-mul-10.ll b/llvm/test/CodeGen/SystemZ/fp-strict-mul-10.ll
index dc0a4bbccbd..a4291ec8d37 100644
--- a/llvm/test/CodeGen/SystemZ/fp-strict-mul-10.ll
+++ b/llvm/test/CodeGen/SystemZ/fp-strict-mul-10.ll
@@ -3,19 +3,19 @@
declare double @llvm.experimental.constrained.fma.f64(double %f1, double %f2, double %f3, metadata, metadata)
declare float @llvm.experimental.constrained.fma.f32(float %f1, float %f2, float %f3, metadata, metadata)
-define double @f1(double %f1, double %f2, double %acc) {
+define double @f1(double %f1, double %f2, double %acc) #0 {
; CHECK-LABEL: f1:
; CHECK: wfnmadb %f0, %f0, %f2, %f4
; CHECK: br %r14
%res = call double @llvm.experimental.constrained.fma.f64 (
double %f1, double %f2, double %acc,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%negres = fsub double -0.0, %res
ret double %negres
}
-define double @f2(double %f1, double %f2, double %acc) {
+define double @f2(double %f1, double %f2, double %acc) #0 {
; CHECK-LABEL: f2:
; CHECK: wfnmsdb %f0, %f0, %f2, %f4
; CHECK: br %r14
@@ -23,24 +23,24 @@ define double @f2(double %f1, double %f2, double %acc) {
%res = call double @llvm.experimental.constrained.fma.f64 (
double %f1, double %f2, double %negacc,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%negres = fsub double -0.0, %res
ret double %negres
}
-define float @f3(float %f1, float %f2, float %acc) {
+define float @f3(float %f1, float %f2, float %acc) #0 {
; CHECK-LABEL: f3:
; CHECK: wfnmasb %f0, %f0, %f2, %f4
; CHECK: br %r14
%res = call float @llvm.experimental.constrained.fma.f32 (
float %f1, float %f2, float %acc,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%negres = fsub float -0.0, %res
ret float %negres
}
-define float @f4(float %f1, float %f2, float %acc) {
+define float @f4(float %f1, float %f2, float %acc) #0 {
; CHECK-LABEL: f4:
; CHECK: wfnmssb %f0, %f0, %f2, %f4
; CHECK: br %r14
@@ -48,8 +48,9 @@ define float @f4(float %f1, float %f2, float %acc) {
%res = call float @llvm.experimental.constrained.fma.f32 (
float %f1, float %f2, float %negacc,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%negres = fsub float -0.0, %res
ret float %negres
}
+attributes #0 = { strictfp }
diff --git a/llvm/test/CodeGen/SystemZ/fp-strict-mul-11.ll b/llvm/test/CodeGen/SystemZ/fp-strict-mul-11.ll
index a8133ad2e3e..58e5bc453e6 100644
--- a/llvm/test/CodeGen/SystemZ/fp-strict-mul-11.ll
+++ b/llvm/test/CodeGen/SystemZ/fp-strict-mul-11.ll
@@ -4,7 +4,7 @@
declare fp128 @llvm.experimental.constrained.fmul.f128(fp128, fp128, metadata, metadata)
-define void @f1(fp128 *%ptr1, fp128 *%ptr2) {
+define void @f1(fp128 *%ptr1, fp128 *%ptr2) #0 {
; CHECK-LABEL: f1:
; CHECK-DAG: vl [[REG1:%v[0-9]+]], 0(%r2)
; CHECK-DAG: vl [[REG2:%v[0-9]+]], 0(%r3)
@@ -16,12 +16,12 @@ define void @f1(fp128 *%ptr1, fp128 *%ptr2) {
%sum = call fp128 @llvm.experimental.constrained.fmul.f128(
fp128 %f1, fp128 %f2,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
store fp128 %sum, fp128 *%ptr1
ret void
}
-define void @f2(double %f1, double %f2, fp128 *%dst) {
+define void @f2(double %f1, double %f2, fp128 *%dst) #0 {
; CHECK-LABEL: f2:
; CHECK-DAG: wflld [[REG1:%v[0-9]+]], %f0
; CHECK-DAG: wflld [[REG2:%v[0-9]+]], %f2
@@ -33,8 +33,9 @@ define void @f2(double %f1, double %f2, fp128 *%dst) {
%res = call fp128 @llvm.experimental.constrained.fmul.f128(
fp128 %f1x, fp128 %f2x,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
store fp128 %res, fp128 *%dst
ret void
}
+attributes #0 = { strictfp }
diff --git a/llvm/test/CodeGen/SystemZ/fp-strict-round-01.ll b/llvm/test/CodeGen/SystemZ/fp-strict-round-01.ll
index dfd2c07dca5..92b5bdc65f5 100644
--- a/llvm/test/CodeGen/SystemZ/fp-strict-round-01.ll
+++ b/llvm/test/CodeGen/SystemZ/fp-strict-round-01.ll
@@ -4,33 +4,33 @@
; Test rint for f32.
declare float @llvm.experimental.constrained.rint.f32(float, metadata, metadata)
-define float @f1(float %f) {
+define float @f1(float %f) #0 {
; CHECK-LABEL: f1:
; CHECK: fiebr %f0, 0, %f0
; CHECK: br %r14
%res = call float @llvm.experimental.constrained.rint.f32(
float %f,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret float %res
}
; Test rint for f64.
declare double @llvm.experimental.constrained.rint.f64(double, metadata, metadata)
-define double @f2(double %f) {
+define double @f2(double %f) #0 {
; CHECK-LABEL: f2:
; CHECK: fidbr %f0, 0, %f0
; CHECK: br %r14
%res = call double @llvm.experimental.constrained.rint.f64(
double %f,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %res
}
; Test rint for f128.
declare fp128 @llvm.experimental.constrained.rint.f128(fp128, metadata, metadata)
-define void @f3(fp128 *%ptr) {
+define void @f3(fp128 *%ptr) #0 {
; CHECK-LABEL: f3:
; CHECK: fixbr %f0, 0, %f0
; CHECK: br %r14
@@ -38,40 +38,40 @@ define void @f3(fp128 *%ptr) {
%res = call fp128 @llvm.experimental.constrained.rint.f128(
fp128 %src,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
store fp128 %res, fp128 *%ptr
ret void
}
; Test nearbyint for f32.
declare float @llvm.experimental.constrained.nearbyint.f32(float, metadata, metadata)
-define float @f4(float %f) {
+define float @f4(float %f) #0 {
; CHECK-LABEL: f4:
; CHECK: brasl %r14, nearbyintf@PLT
; CHECK: br %r14
%res = call float @llvm.experimental.constrained.nearbyint.f32(
float %f,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret float %res
}
; Test nearbyint for f64.
declare double @llvm.experimental.constrained.nearbyint.f64(double, metadata, metadata)
-define double @f5(double %f) {
+define double @f5(double %f) #0 {
; CHECK-LABEL: f5:
; CHECK: brasl %r14, nearbyint@PLT
; CHECK: br %r14
%res = call double @llvm.experimental.constrained.nearbyint.f64(
double %f,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %res
}
; Test nearbyint for f128.
declare fp128 @llvm.experimental.constrained.nearbyint.f128(fp128, metadata, metadata)
-define void @f6(fp128 *%ptr) {
+define void @f6(fp128 *%ptr) #0 {
; CHECK-LABEL: f6:
; CHECK: brasl %r14, nearbyintl@PLT
; CHECK: br %r14
@@ -79,40 +79,40 @@ define void @f6(fp128 *%ptr) {
%res = call fp128 @llvm.experimental.constrained.nearbyint.f128(
fp128 %src,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
store fp128 %res, fp128 *%ptr
ret void
}
; Test floor for f32.
declare float @llvm.experimental.constrained.floor.f32(float, metadata, metadata)
-define float @f7(float %f) {
+define float @f7(float %f) #0 {
; CHECK-LABEL: f7:
; CHECK: brasl %r14, floorf@PLT
; CHECK: br %r14
%res = call float @llvm.experimental.constrained.floor.f32(
float %f,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret float %res
}
; Test floor for f64.
declare double @llvm.experimental.constrained.floor.f64(double, metadata, metadata)
-define double @f8(double %f) {
+define double @f8(double %f) #0 {
; CHECK-LABEL: f8:
; CHECK: brasl %r14, floor@PLT
; CHECK: br %r14
%res = call double @llvm.experimental.constrained.floor.f64(
double %f,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %res
}
; Test floor for f128.
declare fp128 @llvm.experimental.constrained.floor.f128(fp128, metadata, metadata)
-define void @f9(fp128 *%ptr) {
+define void @f9(fp128 *%ptr) #0 {
; CHECK-LABEL: f9:
; CHECK: brasl %r14, floorl@PLT
; CHECK: br %r14
@@ -120,40 +120,40 @@ define void @f9(fp128 *%ptr) {
%res = call fp128 @llvm.experimental.constrained.floor.f128(
fp128 %src,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
store fp128 %res, fp128 *%ptr
ret void
}
; Test ceil for f32.
declare float @llvm.experimental.constrained.ceil.f32(float, metadata, metadata)
-define float @f10(float %f) {
+define float @f10(float %f) #0 {
; CHECK-LABEL: f10:
; CHECK: brasl %r14, ceilf@PLT
; CHECK: br %r14
%res = call float @llvm.experimental.constrained.ceil.f32(
float %f,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret float %res
}
; Test ceil for f64.
declare double @llvm.experimental.constrained.ceil.f64(double, metadata, metadata)
-define double @f11(double %f) {
+define double @f11(double %f) #0 {
; CHECK-LABEL: f11:
; CHECK: brasl %r14, ceil@PLT
; CHECK: br %r14
%res = call double @llvm.experimental.constrained.ceil.f64(
double %f,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %res
}
; Test ceil for f128.
declare fp128 @llvm.experimental.constrained.ceil.f128(fp128, metadata, metadata)
-define void @f12(fp128 *%ptr) {
+define void @f12(fp128 *%ptr) #0 {
; CHECK-LABEL: f12:
; CHECK: brasl %r14, ceill@PLT
; CHECK: br %r14
@@ -161,40 +161,40 @@ define void @f12(fp128 *%ptr) {
%res = call fp128 @llvm.experimental.constrained.ceil.f128(
fp128 %src,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
store fp128 %res, fp128 *%ptr
ret void
}
; Test trunc for f32.
declare float @llvm.experimental.constrained.trunc.f32(float, metadata, metadata)
-define float @f13(float %f) {
+define float @f13(float %f) #0 {
; CHECK-LABEL: f13:
; CHECK: brasl %r14, truncf@PLT
; CHECK: br %r14
%res = call float @llvm.experimental.constrained.trunc.f32(
float %f,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret float %res
}
; Test trunc for f64.
declare double @llvm.experimental.constrained.trunc.f64(double, metadata, metadata)
-define double @f14(double %f) {
+define double @f14(double %f) #0 {
; CHECK-LABEL: f14:
; CHECK: brasl %r14, trunc@PLT
; CHECK: br %r14
%res = call double @llvm.experimental.constrained.trunc.f64(
double %f,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %res
}
; Test trunc for f128.
declare fp128 @llvm.experimental.constrained.trunc.f128(fp128, metadata, metadata)
-define void @f15(fp128 *%ptr) {
+define void @f15(fp128 *%ptr) #0 {
; CHECK-LABEL: f15:
; CHECK: brasl %r14, truncl@PLT
; CHECK: br %r14
@@ -202,40 +202,40 @@ define void @f15(fp128 *%ptr) {
%res = call fp128 @llvm.experimental.constrained.trunc.f128(
fp128 %src,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
store fp128 %res, fp128 *%ptr
ret void
}
; Test round for f32.
declare float @llvm.experimental.constrained.round.f32(float, metadata, metadata)
-define float @f16(float %f) {
+define float @f16(float %f) #0 {
; CHECK-LABEL: f16:
; CHECK: brasl %r14, roundf@PLT
; CHECK: br %r14
%res = call float @llvm.experimental.constrained.round.f32(
float %f,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret float %res
}
; Test round for f64.
declare double @llvm.experimental.constrained.round.f64(double, metadata, metadata)
-define double @f17(double %f) {
+define double @f17(double %f) #0 {
; CHECK-LABEL: f17:
; CHECK: brasl %r14, round@PLT
; CHECK: br %r14
%res = call double @llvm.experimental.constrained.round.f64(
double %f,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %res
}
; Test round for f128.
declare fp128 @llvm.experimental.constrained.round.f128(fp128, metadata, metadata)
-define void @f18(fp128 *%ptr) {
+define void @f18(fp128 *%ptr) #0 {
; CHECK-LABEL: f18:
; CHECK: brasl %r14, roundl@PLT
; CHECK: br %r14
@@ -243,8 +243,9 @@ define void @f18(fp128 *%ptr) {
%res = call fp128 @llvm.experimental.constrained.round.f128(
fp128 %src,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
store fp128 %res, fp128 *%ptr
ret void
}
+attributes #0 = { strictfp }
diff --git a/llvm/test/CodeGen/SystemZ/fp-strict-round-02.ll b/llvm/test/CodeGen/SystemZ/fp-strict-round-02.ll
index fe1be576993..223e1a076bb 100644
--- a/llvm/test/CodeGen/SystemZ/fp-strict-round-02.ll
+++ b/llvm/test/CodeGen/SystemZ/fp-strict-round-02.ll
@@ -7,20 +7,20 @@
; Test rint for f32.
declare float @llvm.experimental.constrained.rint.f32(float, metadata, metadata)
-define float @f1(float %f) {
+define float @f1(float %f) #0 {
; CHECK-LABEL: f1:
; CHECK: fiebr %f0, 0, %f0
; CHECK: br %r14
%res = call float @llvm.experimental.constrained.rint.f32(
float %f,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret float %res
}
; Test rint for f64.
declare double @llvm.experimental.constrained.rint.f64(double, metadata, metadata)
-define double @f2(double %f) {
+define double @f2(double %f) #0 {
; CHECK-LABEL: f2:
; CHECK-SCALAR: fidbr %f0, 0, %f0
; CHECK-VECTOR: fidbra %f0, 0, %f0, 0
@@ -28,13 +28,13 @@ define double @f2(double %f) {
%res = call double @llvm.experimental.constrained.rint.f64(
double %f,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %res
}
; Test rint for f128.
declare fp128 @llvm.experimental.constrained.rint.f128(fp128, metadata, metadata)
-define void @f3(fp128 *%ptr) {
+define void @f3(fp128 *%ptr) #0 {
; CHECK-LABEL: f3:
; CHECK: fixbr %f0, 0, %f0
; CHECK: br %r14
@@ -42,40 +42,40 @@ define void @f3(fp128 *%ptr) {
%res = call fp128 @llvm.experimental.constrained.rint.f128(
fp128 %src,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
store fp128 %res, fp128 *%ptr
ret void
}
; Test nearbyint for f32.
declare float @llvm.experimental.constrained.nearbyint.f32(float, metadata, metadata)
-define float @f4(float %f) {
+define float @f4(float %f) #0 {
; CHECK-LABEL: f4:
; CHECK: fiebra %f0, 0, %f0, 4
; CHECK: br %r14
%res = call float @llvm.experimental.constrained.nearbyint.f32(
float %f,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret float %res
}
; Test nearbyint for f64.
declare double @llvm.experimental.constrained.nearbyint.f64(double, metadata, metadata)
-define double @f5(double %f) {
+define double @f5(double %f) #0 {
; CHECK-LABEL: f5:
; CHECK: fidbra %f0, 0, %f0, 4
; CHECK: br %r14
%res = call double @llvm.experimental.constrained.nearbyint.f64(
double %f,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %res
}
; Test nearbyint for f128.
declare fp128 @llvm.experimental.constrained.nearbyint.f128(fp128, metadata, metadata)
-define void @f6(fp128 *%ptr) {
+define void @f6(fp128 *%ptr) #0 {
; CHECK-LABEL: f6:
; CHECK: fixbra %f0, 0, %f0, 4
; CHECK: br %r14
@@ -83,40 +83,40 @@ define void @f6(fp128 *%ptr) {
%res = call fp128 @llvm.experimental.constrained.nearbyint.f128(
fp128 %src,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
store fp128 %res, fp128 *%ptr
ret void
}
; Test floor for f32.
declare float @llvm.experimental.constrained.floor.f32(float, metadata, metadata)
-define float @f7(float %f) {
+define float @f7(float %f) #0 {
; CHECK-LABEL: f7:
; CHECK: fiebra %f0, 7, %f0, 4
; CHECK: br %r14
%res = call float @llvm.experimental.constrained.floor.f32(
float %f,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret float %res
}
; Test floor for f64.
declare double @llvm.experimental.constrained.floor.f64(double, metadata, metadata)
-define double @f8(double %f) {
+define double @f8(double %f) #0 {
; CHECK-LABEL: f8:
; CHECK: fidbra %f0, 7, %f0, 4
; CHECK: br %r14
%res = call double @llvm.experimental.constrained.floor.f64(
double %f,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %res
}
; Test floor for f128.
declare fp128 @llvm.experimental.constrained.floor.f128(fp128, metadata, metadata)
-define void @f9(fp128 *%ptr) {
+define void @f9(fp128 *%ptr) #0 {
; CHECK-LABEL: f9:
; CHECK: fixbra %f0, 7, %f0, 4
; CHECK: br %r14
@@ -124,40 +124,40 @@ define void @f9(fp128 *%ptr) {
%res = call fp128 @llvm.experimental.constrained.floor.f128(
fp128 %src,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
store fp128 %res, fp128 *%ptr
ret void
}
; Test ceil for f32.
declare float @llvm.experimental.constrained.ceil.f32(float, metadata, metadata)
-define float @f10(float %f) {
+define float @f10(float %f) #0 {
; CHECK-LABEL: f10:
; CHECK: fiebra %f0, 6, %f0, 4
; CHECK: br %r14
%res = call float @llvm.experimental.constrained.ceil.f32(
float %f,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret float %res
}
; Test ceil for f64.
declare double @llvm.experimental.constrained.ceil.f64(double, metadata, metadata)
-define double @f11(double %f) {
+define double @f11(double %f) #0 {
; CHECK-LABEL: f11:
; CHECK: fidbra %f0, 6, %f0, 4
; CHECK: br %r14
%res = call double @llvm.experimental.constrained.ceil.f64(
double %f,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %res
}
; Test ceil for f128.
declare fp128 @llvm.experimental.constrained.ceil.f128(fp128, metadata, metadata)
-define void @f12(fp128 *%ptr) {
+define void @f12(fp128 *%ptr) #0 {
; CHECK-LABEL: f12:
; CHECK: fixbra %f0, 6, %f0, 4
; CHECK: br %r14
@@ -165,40 +165,40 @@ define void @f12(fp128 *%ptr) {
%res = call fp128 @llvm.experimental.constrained.ceil.f128(
fp128 %src,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
store fp128 %res, fp128 *%ptr
ret void
}
; Test trunc for f32.
declare float @llvm.experimental.constrained.trunc.f32(float, metadata, metadata)
-define float @f13(float %f) {
+define float @f13(float %f) #0 {
; CHECK-LABEL: f13:
; CHECK: fiebra %f0, 5, %f0, 4
; CHECK: br %r14
%res = call float @llvm.experimental.constrained.trunc.f32(
float %f,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret float %res
}
; Test trunc for f64.
declare double @llvm.experimental.constrained.trunc.f64(double, metadata, metadata)
-define double @f14(double %f) {
+define double @f14(double %f) #0 {
; CHECK-LABEL: f14:
; CHECK: fidbra %f0, 5, %f0, 4
; CHECK: br %r14
%res = call double @llvm.experimental.constrained.trunc.f64(
double %f,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %res
}
; Test trunc for f128.
declare fp128 @llvm.experimental.constrained.trunc.f128(fp128, metadata, metadata)
-define void @f15(fp128 *%ptr) {
+define void @f15(fp128 *%ptr) #0 {
; CHECK-LABEL: f15:
; CHECK: fixbra %f0, 5, %f0, 4
; CHECK: br %r14
@@ -206,40 +206,40 @@ define void @f15(fp128 *%ptr) {
%res = call fp128 @llvm.experimental.constrained.trunc.f128(
fp128 %src,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
store fp128 %res, fp128 *%ptr
ret void
}
; Test round for f32.
declare float @llvm.experimental.constrained.round.f32(float, metadata, metadata)
-define float @f16(float %f) {
+define float @f16(float %f) #0 {
; CHECK-LABEL: f16:
; CHECK: fiebra %f0, 1, %f0, 4
; CHECK: br %r14
%res = call float @llvm.experimental.constrained.round.f32(
float %f,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret float %res
}
; Test round for f64.
declare double @llvm.experimental.constrained.round.f64(double, metadata, metadata)
-define double @f17(double %f) {
+define double @f17(double %f) #0 {
; CHECK-LABEL: f17:
; CHECK: fidbra %f0, 1, %f0, 4
; CHECK: br %r14
%res = call double @llvm.experimental.constrained.round.f64(
double %f,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %res
}
; Test round for f128.
declare fp128 @llvm.experimental.constrained.round.f128(fp128, metadata, metadata)
-define void @f18(fp128 *%ptr) {
+define void @f18(fp128 *%ptr) #0 {
; CHECK-LABEL: f18:
; CHECK: fixbra %f0, 1, %f0, 4
; CHECK: br %r14
@@ -247,8 +247,9 @@ define void @f18(fp128 *%ptr) {
%res = call fp128 @llvm.experimental.constrained.round.f128(
fp128 %src,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
store fp128 %res, fp128 *%ptr
ret void
}
+attributes #0 = { strictfp }
diff --git a/llvm/test/CodeGen/SystemZ/fp-strict-round-03.ll b/llvm/test/CodeGen/SystemZ/fp-strict-round-03.ll
index e1f634b9ad4..811fe8340f1 100644
--- a/llvm/test/CodeGen/SystemZ/fp-strict-round-03.ll
+++ b/llvm/test/CodeGen/SystemZ/fp-strict-round-03.ll
@@ -4,33 +4,33 @@
; Test rint for f32.
declare float @llvm.experimental.constrained.rint.f32(float, metadata, metadata)
-define float @f1(float %f) {
+define float @f1(float %f) #0 {
; CHECK-LABEL: f1:
; CHECK: fiebra %f0, 0, %f0, 0
; CHECK: br %r14
%res = call float @llvm.experimental.constrained.rint.f32(
float %f,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret float %res
}
; Test rint for f64.
declare double @llvm.experimental.constrained.rint.f64(double, metadata, metadata)
-define double @f2(double %f) {
+define double @f2(double %f) #0 {
; CHECK-LABEL: f2:
; CHECK: fidbra %f0, 0, %f0, 0
; CHECK: br %r14
%res = call double @llvm.experimental.constrained.rint.f64(
double %f,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %res
}
; Test rint for f128.
declare fp128 @llvm.experimental.constrained.rint.f128(fp128, metadata, metadata)
-define void @f3(fp128 *%ptr) {
+define void @f3(fp128 *%ptr) #0 {
; CHECK-LABEL: f3:
; CHECK: vl [[REG:%v[0-9]+]], 0(%r2)
; CHECK: wfixb [[RES:%v[0-9]+]], [[REG]], 0, 0
@@ -40,40 +40,40 @@ define void @f3(fp128 *%ptr) {
%res = call fp128 @llvm.experimental.constrained.rint.f128(
fp128 %src,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
store fp128 %res, fp128 *%ptr
ret void
}
; Test nearbyint for f32.
declare float @llvm.experimental.constrained.nearbyint.f32(float, metadata, metadata)
-define float @f4(float %f) {
+define float @f4(float %f) #0 {
; CHECK-LABEL: f4:
; CHECK: fiebra %f0, 0, %f0, 4
; CHECK: br %r14
%res = call float @llvm.experimental.constrained.nearbyint.f32(
float %f,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret float %res
}
; Test nearbyint for f64.
declare double @llvm.experimental.constrained.nearbyint.f64(double, metadata, metadata)
-define double @f5(double %f) {
+define double @f5(double %f) #0 {
; CHECK-LABEL: f5:
; CHECK: fidbra %f0, 0, %f0, 4
; CHECK: br %r14
%res = call double @llvm.experimental.constrained.nearbyint.f64(
double %f,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %res
}
; Test nearbyint for f128.
declare fp128 @llvm.experimental.constrained.nearbyint.f128(fp128, metadata, metadata)
-define void @f6(fp128 *%ptr) {
+define void @f6(fp128 *%ptr) #0 {
; CHECK-LABEL: f6:
; CHECK: vl [[REG:%v[0-9]+]], 0(%r2)
; CHECK: wfixb [[RES:%v[0-9]+]], [[REG]], 4, 0
@@ -83,40 +83,40 @@ define void @f6(fp128 *%ptr) {
%res = call fp128 @llvm.experimental.constrained.nearbyint.f128(
fp128 %src,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
store fp128 %res, fp128 *%ptr
ret void
}
; Test floor for f32.
declare float @llvm.experimental.constrained.floor.f32(float, metadata, metadata)
-define float @f7(float %f) {
+define float @f7(float %f) #0 {
; CHECK-LABEL: f7:
; CHECK: fiebra %f0, 7, %f0, 4
; CHECK: br %r14
%res = call float @llvm.experimental.constrained.floor.f32(
float %f,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret float %res
}
; Test floor for f64.
declare double @llvm.experimental.constrained.floor.f64(double, metadata, metadata)
-define double @f8(double %f) {
+define double @f8(double %f) #0 {
; CHECK-LABEL: f8:
; CHECK: fidbra %f0, 7, %f0, 4
; CHECK: br %r14
%res = call double @llvm.experimental.constrained.floor.f64(
double %f,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %res
}
; Test floor for f128.
declare fp128 @llvm.experimental.constrained.floor.f128(fp128, metadata, metadata)
-define void @f9(fp128 *%ptr) {
+define void @f9(fp128 *%ptr) #0 {
; CHECK-LABEL: f9:
; CHECK: vl [[REG:%v[0-9]+]], 0(%r2)
; CHECK: wfixb [[RES:%v[0-9]+]], [[REG]], 4, 7
@@ -126,40 +126,40 @@ define void @f9(fp128 *%ptr) {
%res = call fp128 @llvm.experimental.constrained.floor.f128(
fp128 %src,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
store fp128 %res, fp128 *%ptr
ret void
}
; Test ceil for f32.
declare float @llvm.experimental.constrained.ceil.f32(float, metadata, metadata)
-define float @f10(float %f) {
+define float @f10(float %f) #0 {
; CHECK-LABEL: f10:
; CHECK: fiebra %f0, 6, %f0, 4
; CHECK: br %r14
%res = call float @llvm.experimental.constrained.ceil.f32(
float %f,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret float %res
}
; Test ceil for f64.
declare double @llvm.experimental.constrained.ceil.f64(double, metadata, metadata)
-define double @f11(double %f) {
+define double @f11(double %f) #0 {
; CHECK-LABEL: f11:
; CHECK: fidbra %f0, 6, %f0, 4
; CHECK: br %r14
%res = call double @llvm.experimental.constrained.ceil.f64(
double %f,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %res
}
; Test ceil for f128.
declare fp128 @llvm.experimental.constrained.ceil.f128(fp128, metadata, metadata)
-define void @f12(fp128 *%ptr) {
+define void @f12(fp128 *%ptr) #0 {
; CHECK-LABEL: f12:
; CHECK: vl [[REG:%v[0-9]+]], 0(%r2)
; CHECK: wfixb [[RES:%v[0-9]+]], [[REG]], 4, 6
@@ -169,40 +169,40 @@ define void @f12(fp128 *%ptr) {
%res = call fp128 @llvm.experimental.constrained.ceil.f128(
fp128 %src,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
store fp128 %res, fp128 *%ptr
ret void
}
; Test trunc for f32.
declare float @llvm.experimental.constrained.trunc.f32(float, metadata, metadata)
-define float @f13(float %f) {
+define float @f13(float %f) #0 {
; CHECK-LABEL: f13:
; CHECK: fiebra %f0, 5, %f0, 4
; CHECK: br %r14
%res = call float @llvm.experimental.constrained.trunc.f32(
float %f,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret float %res
}
; Test trunc for f64.
declare double @llvm.experimental.constrained.trunc.f64(double, metadata, metadata)
-define double @f14(double %f) {
+define double @f14(double %f) #0 {
; CHECK-LABEL: f14:
; CHECK: fidbra %f0, 5, %f0, 4
; CHECK: br %r14
%res = call double @llvm.experimental.constrained.trunc.f64(
double %f,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %res
}
; Test trunc for f128.
declare fp128 @llvm.experimental.constrained.trunc.f128(fp128, metadata, metadata)
-define void @f15(fp128 *%ptr) {
+define void @f15(fp128 *%ptr) #0 {
; CHECK-LABEL: f15:
; CHECK: vl [[REG:%v[0-9]+]], 0(%r2)
; CHECK: wfixb [[RES:%v[0-9]+]], [[REG]], 4, 5
@@ -212,40 +212,40 @@ define void @f15(fp128 *%ptr) {
%res = call fp128 @llvm.experimental.constrained.trunc.f128(
fp128 %src,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
store fp128 %res, fp128 *%ptr
ret void
}
; Test round for f32.
declare float @llvm.experimental.constrained.round.f32(float, metadata, metadata)
-define float @f16(float %f) {
+define float @f16(float %f) #0 {
; CHECK-LABEL: f16:
; CHECK: fiebra %f0, 1, %f0, 4
; CHECK: br %r14
%res = call float @llvm.experimental.constrained.round.f32(
float %f,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret float %res
}
; Test round for f64.
declare double @llvm.experimental.constrained.round.f64(double, metadata, metadata)
-define double @f17(double %f) {
+define double @f17(double %f) #0 {
; CHECK-LABEL: f17:
; CHECK: fidbra %f0, 1, %f0, 4
; CHECK: br %r14
%res = call double @llvm.experimental.constrained.round.f64(
double %f,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %res
}
; Test round for f128.
declare fp128 @llvm.experimental.constrained.round.f128(fp128, metadata, metadata)
-define void @f18(fp128 *%ptr) {
+define void @f18(fp128 *%ptr) #0 {
; CHECK-LABEL: f18:
; CHECK: vl [[REG:%v[0-9]+]], 0(%r2)
; CHECK: wfixb [[RES:%v[0-9]+]], [[REG]], 4, 1
@@ -255,8 +255,9 @@ define void @f18(fp128 *%ptr) {
%res = call fp128 @llvm.experimental.constrained.round.f128(
fp128 %src,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
store fp128 %res, fp128 *%ptr
ret void
}
+attributes #0 = { strictfp }
diff --git a/llvm/test/CodeGen/SystemZ/fp-strict-sqrt-01.ll b/llvm/test/CodeGen/SystemZ/fp-strict-sqrt-01.ll
index 158308c49be..cff83950617 100644
--- a/llvm/test/CodeGen/SystemZ/fp-strict-sqrt-01.ll
+++ b/llvm/test/CodeGen/SystemZ/fp-strict-sqrt-01.ll
@@ -7,19 +7,19 @@
declare float @llvm.experimental.constrained.sqrt.f32(float, metadata, metadata)
; Check register square root.
-define float @f1(float %val) {
+define float @f1(float %val) #0 {
; CHECK-LABEL: f1:
; CHECK: sqebr %f0, %f0
; CHECK: br %r14
%res = call float @llvm.experimental.constrained.sqrt.f32(
float %val,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret float %res
}
; Check the low end of the SQEB range.
-define float @f2(float *%ptr) {
+define float @f2(float *%ptr) #0 {
; CHECK-LABEL: f2:
; CHECK: sqeb %f0, 0(%r2)
; CHECK: br %r14
@@ -27,12 +27,12 @@ define float @f2(float *%ptr) {
%res = call float @llvm.experimental.constrained.sqrt.f32(
float %val,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret float %res
}
; Check the high end of the aligned SQEB range.
-define float @f3(float *%base) {
+define float @f3(float *%base) #0 {
; CHECK-LABEL: f3:
; CHECK: sqeb %f0, 4092(%r2)
; CHECK: br %r14
@@ -41,13 +41,13 @@ define float @f3(float *%base) {
%res = call float @llvm.experimental.constrained.sqrt.f32(
float %val,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret float %res
}
; Check the next word up, which needs separate address logic.
; Other sequences besides this one would be OK.
-define float @f4(float *%base) {
+define float @f4(float *%base) #0 {
; CHECK-LABEL: f4:
; CHECK: aghi %r2, 4096
; CHECK: sqeb %f0, 0(%r2)
@@ -57,12 +57,12 @@ define float @f4(float *%base) {
%res = call float @llvm.experimental.constrained.sqrt.f32(
float %val,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret float %res
}
; Check negative displacements, which also need separate address logic.
-define float @f5(float *%base) {
+define float @f5(float *%base) #0 {
; CHECK-LABEL: f5:
; CHECK: aghi %r2, -4
; CHECK: sqeb %f0, 0(%r2)
@@ -72,12 +72,12 @@ define float @f5(float *%base) {
%res = call float @llvm.experimental.constrained.sqrt.f32(
float %val,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret float %res
}
; Check that SQEB allows indices.
-define float @f6(float *%base, i64 %index) {
+define float @f6(float *%base, i64 %index) #0 {
; CHECK-LABEL: f6:
; CHECK: sllg %r1, %r3, 2
; CHECK: sqeb %f0, 400(%r1,%r2)
@@ -88,7 +88,8 @@ define float @f6(float *%base, i64 %index) {
%res = call float @llvm.experimental.constrained.sqrt.f32(
float %val,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret float %res
}
+attributes #0 = { strictfp }
diff --git a/llvm/test/CodeGen/SystemZ/fp-strict-sqrt-02.ll b/llvm/test/CodeGen/SystemZ/fp-strict-sqrt-02.ll
index 4e90939d2e7..791c39301e4 100644
--- a/llvm/test/CodeGen/SystemZ/fp-strict-sqrt-02.ll
+++ b/llvm/test/CodeGen/SystemZ/fp-strict-sqrt-02.ll
@@ -7,19 +7,19 @@
declare double @llvm.experimental.constrained.sqrt.f64(double, metadata, metadata)
; Check register square root.
-define double @f1(double %val) {
+define double @f1(double %val) #0 {
; CHECK-LABEL: f1:
; CHECK: sqdbr %f0, %f0
; CHECK: br %r14
%res = call double @llvm.experimental.constrained.sqrt.f64(
double %val,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %res
}
; Check the low end of the SQDB range.
-define double @f2(double *%ptr) {
+define double @f2(double *%ptr) #0 {
; CHECK-LABEL: f2:
; CHECK: sqdb %f0, 0(%r2)
; CHECK: br %r14
@@ -27,12 +27,12 @@ define double @f2(double *%ptr) {
%res = call double @llvm.experimental.constrained.sqrt.f64(
double %val,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %res
}
; Check the high end of the aligned SQDB range.
-define double @f3(double *%base) {
+define double @f3(double *%base) #0 {
; CHECK-LABEL: f3:
; CHECK: sqdb %f0, 4088(%r2)
; CHECK: br %r14
@@ -41,13 +41,13 @@ define double @f3(double *%base) {
%res = call double @llvm.experimental.constrained.sqrt.f64(
double %val,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %res
}
; Check the next doubleword up, which needs separate address logic.
; Other sequences besides this one would be OK.
-define double @f4(double *%base) {
+define double @f4(double *%base) #0 {
; CHECK-LABEL: f4:
; CHECK: aghi %r2, 4096
; CHECK: sqdb %f0, 0(%r2)
@@ -57,12 +57,12 @@ define double @f4(double *%base) {
%res = call double @llvm.experimental.constrained.sqrt.f64(
double %val,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %res
}
; Check negative displacements, which also need separate address logic.
-define double @f5(double *%base) {
+define double @f5(double *%base) #0 {
; CHECK-LABEL: f5:
; CHECK: aghi %r2, -8
; CHECK: sqdb %f0, 0(%r2)
@@ -72,12 +72,12 @@ define double @f5(double *%base) {
%res = call double @llvm.experimental.constrained.sqrt.f64(
double %val,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %res
}
; Check that SQDB allows indices.
-define double @f6(double *%base, i64 %index) {
+define double @f6(double *%base, i64 %index) #0 {
; CHECK-LABEL: f6:
; CHECK: sllg %r1, %r3, 3
; CHECK: sqdb %f0, 800(%r1,%r2)
@@ -88,7 +88,8 @@ define double @f6(double *%base, i64 %index) {
%res = call double @llvm.experimental.constrained.sqrt.f64(
double %val,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %res
}
+attributes #0 = { strictfp }
diff --git a/llvm/test/CodeGen/SystemZ/fp-strict-sqrt-03.ll b/llvm/test/CodeGen/SystemZ/fp-strict-sqrt-03.ll
index 9bcceb74f71..0f2f2729362 100644
--- a/llvm/test/CodeGen/SystemZ/fp-strict-sqrt-03.ll
+++ b/llvm/test/CodeGen/SystemZ/fp-strict-sqrt-03.ll
@@ -5,7 +5,7 @@
declare fp128 @llvm.experimental.constrained.sqrt.f128(fp128, metadata, metadata)
; There's no memory form of SQXBR.
-define void @f1(fp128 *%ptr) {
+define void @f1(fp128 *%ptr) strictfp {
; CHECK-LABEL: f1:
; CHECK: ld %f0, 0(%r2)
; CHECK: ld %f2, 8(%r2)
@@ -17,7 +17,7 @@ define void @f1(fp128 *%ptr) {
%sqrt = call fp128 @llvm.experimental.constrained.sqrt.f128(
fp128 %orig,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") strictfp
store fp128 %sqrt, fp128 *%ptr
ret void
}
diff --git a/llvm/test/CodeGen/SystemZ/fp-strict-sqrt-04.ll b/llvm/test/CodeGen/SystemZ/fp-strict-sqrt-04.ll
index f24f958bcb0..0667aeb281f 100644
--- a/llvm/test/CodeGen/SystemZ/fp-strict-sqrt-04.ll
+++ b/llvm/test/CodeGen/SystemZ/fp-strict-sqrt-04.ll
@@ -4,7 +4,7 @@
declare fp128 @llvm.experimental.constrained.sqrt.f128(fp128, metadata, metadata)
-define void @f1(fp128 *%ptr) {
+define void @f1(fp128 *%ptr) strictfp {
; CHECK-LABEL: f1:
; CHECK-DAG: vl [[REG:%v[0-9]+]], 0(%r2)
; CHECK: wfsqxb [[RES:%v[0-9]+]], [[REG]]
@@ -14,7 +14,7 @@ define void @f1(fp128 *%ptr) {
%res = call fp128 @llvm.experimental.constrained.sqrt.f128(
fp128 %f,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") strictfp
store fp128 %res, fp128 *%ptr
ret void
}
diff --git a/llvm/test/CodeGen/SystemZ/fp-strict-sub-01.ll b/llvm/test/CodeGen/SystemZ/fp-strict-sub-01.ll
index a4c485a2f95..82156e4856b 100644
--- a/llvm/test/CodeGen/SystemZ/fp-strict-sub-01.ll
+++ b/llvm/test/CodeGen/SystemZ/fp-strict-sub-01.ll
@@ -8,19 +8,19 @@ declare float @foo()
declare float @llvm.experimental.constrained.fsub.f32(float, float, metadata, metadata)
; Check register subtraction.
-define float @f1(float %f1, float %f2) {
+define float @f1(float %f1, float %f2) #0 {
; CHECK-LABEL: f1:
; CHECK: sebr %f0, %f2
; CHECK: br %r14
%res = call float @llvm.experimental.constrained.fsub.f32(
float %f1, float %f2,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret float %res
}
; Check the low end of the SEB range.
-define float @f2(float %f1, float *%ptr) {
+define float @f2(float %f1, float *%ptr) #0 {
; CHECK-LABEL: f2:
; CHECK: seb %f0, 0(%r2)
; CHECK: br %r14
@@ -28,12 +28,12 @@ define float @f2(float %f1, float *%ptr) {
%res = call float @llvm.experimental.constrained.fsub.f32(
float %f1, float %f2,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret float %res
}
; Check the high end of the aligned SEB range.
-define float @f3(float %f1, float *%base) {
+define float @f3(float %f1, float *%base) #0 {
; CHECK-LABEL: f3:
; CHECK: seb %f0, 4092(%r2)
; CHECK: br %r14
@@ -42,13 +42,13 @@ define float @f3(float %f1, float *%base) {
%res = call float @llvm.experimental.constrained.fsub.f32(
float %f1, float %f2,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret float %res
}
; Check the next word up, which needs separate address logic.
; Other sequences besides this one would be OK.
-define float @f4(float %f1, float *%base) {
+define float @f4(float %f1, float *%base) #0 {
; CHECK-LABEL: f4:
; CHECK: aghi %r2, 4096
; CHECK: seb %f0, 0(%r2)
@@ -58,12 +58,12 @@ define float @f4(float %f1, float *%base) {
%res = call float @llvm.experimental.constrained.fsub.f32(
float %f1, float %f2,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret float %res
}
; Check negative displacements, which also need separate address logic.
-define float @f5(float %f1, float *%base) {
+define float @f5(float %f1, float *%base) #0 {
; CHECK-LABEL: f5:
; CHECK: aghi %r2, -4
; CHECK: seb %f0, 0(%r2)
@@ -73,12 +73,12 @@ define float @f5(float %f1, float *%base) {
%res = call float @llvm.experimental.constrained.fsub.f32(
float %f1, float %f2,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret float %res
}
; Check that SEB allows indices.
-define float @f6(float %f1, float *%base, i64 %index) {
+define float @f6(float %f1, float *%base, i64 %index) #0 {
; CHECK-LABEL: f6:
; CHECK: sllg %r1, %r3, 2
; CHECK: seb %f0, 400(%r1,%r2)
@@ -89,12 +89,12 @@ define float @f6(float %f1, float *%base, i64 %index) {
%res = call float @llvm.experimental.constrained.fsub.f32(
float %f1, float %f2,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret float %res
}
; Check that subtractions of spilled values can use SEB rather than SEBR.
-define float @f7(float *%ptr0) {
+define float @f7(float *%ptr0) #0 {
; CHECK-LABEL: f7:
; CHECK: brasl %r14, foo@PLT
; CHECK-SCALAR: seb %f0, 16{{[04]}}(%r15)
@@ -122,52 +122,54 @@ define float @f7(float *%ptr0) {
%val9 = load float, float *%ptr9
%val10 = load float, float *%ptr10
- %ret = call float @foo()
+ %ret = call float @foo() #0
%sub0 = call float @llvm.experimental.constrained.fsub.f32(
float %ret, float %val0,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%sub1 = call float @llvm.experimental.constrained.fsub.f32(
float %sub0, float %val1,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%sub2 = call float @llvm.experimental.constrained.fsub.f32(
float %sub1, float %val2,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%sub3 = call float @llvm.experimental.constrained.fsub.f32(
float %sub2, float %val3,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%sub4 = call float @llvm.experimental.constrained.fsub.f32(
float %sub3, float %val4,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%sub5 = call float @llvm.experimental.constrained.fsub.f32(
float %sub4, float %val5,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%sub6 = call float @llvm.experimental.constrained.fsub.f32(
float %sub5, float %val6,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%sub7 = call float @llvm.experimental.constrained.fsub.f32(
float %sub6, float %val7,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%sub8 = call float @llvm.experimental.constrained.fsub.f32(
float %sub7, float %val8,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%sub9 = call float @llvm.experimental.constrained.fsub.f32(
float %sub8, float %val9,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%sub10 = call float @llvm.experimental.constrained.fsub.f32(
float %sub9, float %val10,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret float %sub10
}
+
+attributes #0 = { strictfp }
diff --git a/llvm/test/CodeGen/SystemZ/fp-strict-sub-02.ll b/llvm/test/CodeGen/SystemZ/fp-strict-sub-02.ll
index 0d3cdd35103..6184d88d83a 100644
--- a/llvm/test/CodeGen/SystemZ/fp-strict-sub-02.ll
+++ b/llvm/test/CodeGen/SystemZ/fp-strict-sub-02.ll
@@ -8,19 +8,19 @@ declare double @foo()
declare double @llvm.experimental.constrained.fsub.f64(double, double, metadata, metadata)
; Check register subtraction.
-define double @f1(double %f1, double %f2) {
+define double @f1(double %f1, double %f2) #0 {
; CHECK-LABEL: f1:
; CHECK: sdbr %f0, %f2
; CHECK: br %r14
%res = call double @llvm.experimental.constrained.fsub.f64(
double %f1, double %f2,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %res
}
; Check the low end of the SDB range.
-define double @f2(double %f1, double *%ptr) {
+define double @f2(double %f1, double *%ptr) #0 {
; CHECK-LABEL: f2:
; CHECK: sdb %f0, 0(%r2)
; CHECK: br %r14
@@ -28,12 +28,12 @@ define double @f2(double %f1, double *%ptr) {
%res = call double @llvm.experimental.constrained.fsub.f64(
double %f1, double %f2,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %res
}
; Check the high end of the aligned SDB range.
-define double @f3(double %f1, double *%base) {
+define double @f3(double %f1, double *%base) #0 {
; CHECK-LABEL: f3:
; CHECK: sdb %f0, 4088(%r2)
; CHECK: br %r14
@@ -42,13 +42,13 @@ define double @f3(double %f1, double *%base) {
%res = call double @llvm.experimental.constrained.fsub.f64(
double %f1, double %f2,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %res
}
; Check the next doubleword up, which needs separate address logic.
; Other sequences besides this one would be OK.
-define double @f4(double %f1, double *%base) {
+define double @f4(double %f1, double *%base) #0 {
; CHECK-LABEL: f4:
; CHECK: aghi %r2, 4096
; CHECK: sdb %f0, 0(%r2)
@@ -58,12 +58,12 @@ define double @f4(double %f1, double *%base) {
%res = call double @llvm.experimental.constrained.fsub.f64(
double %f1, double %f2,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %res
}
; Check negative displacements, which also need separate address logic.
-define double @f5(double %f1, double *%base) {
+define double @f5(double %f1, double *%base) #0 {
; CHECK-LABEL: f5:
; CHECK: aghi %r2, -8
; CHECK: sdb %f0, 0(%r2)
@@ -73,12 +73,12 @@ define double @f5(double %f1, double *%base) {
%res = call double @llvm.experimental.constrained.fsub.f64(
double %f1, double %f2,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %res
}
; Check that SDB allows indices.
-define double @f6(double %f1, double *%base, i64 %index) {
+define double @f6(double %f1, double *%base, i64 %index) #0 {
; CHECK-LABEL: f6:
; CHECK: sllg %r1, %r3, 3
; CHECK: sdb %f0, 800(%r1,%r2)
@@ -89,12 +89,12 @@ define double @f6(double %f1, double *%base, i64 %index) {
%res = call double @llvm.experimental.constrained.fsub.f64(
double %f1, double %f2,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %res
}
; Check that subtractions of spilled values can use SDB rather than SDBR.
-define double @f7(double *%ptr0) {
+define double @f7(double *%ptr0) #0 {
; CHECK-LABEL: f7:
; CHECK: brasl %r14, foo@PLT
; CHECK-SCALAR: sdb %f0, 16{{[04]}}(%r15)
@@ -122,52 +122,54 @@ define double @f7(double *%ptr0) {
%val9 = load double, double *%ptr9
%val10 = load double, double *%ptr10
- %ret = call double @foo()
+ %ret = call double @foo() #0
%sub0 = call double @llvm.experimental.constrained.fsub.f64(
double %ret, double %val0,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%sub1 = call double @llvm.experimental.constrained.fsub.f64(
double %sub0, double %val1,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%sub2 = call double @llvm.experimental.constrained.fsub.f64(
double %sub1, double %val2,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%sub3 = call double @llvm.experimental.constrained.fsub.f64(
double %sub2, double %val3,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%sub4 = call double @llvm.experimental.constrained.fsub.f64(
double %sub3, double %val4,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%sub5 = call double @llvm.experimental.constrained.fsub.f64(
double %sub4, double %val5,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%sub6 = call double @llvm.experimental.constrained.fsub.f64(
double %sub5, double %val6,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%sub7 = call double @llvm.experimental.constrained.fsub.f64(
double %sub6, double %val7,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%sub8 = call double @llvm.experimental.constrained.fsub.f64(
double %sub7, double %val8,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%sub9 = call double @llvm.experimental.constrained.fsub.f64(
double %sub8, double %val9,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%sub10 = call double @llvm.experimental.constrained.fsub.f64(
double %sub9, double %val10,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %sub10
}
+
+attributes #0 = { strictfp }
diff --git a/llvm/test/CodeGen/SystemZ/fp-strict-sub-03.ll b/llvm/test/CodeGen/SystemZ/fp-strict-sub-03.ll
index 63bb2fdd5bc..cc3ee09e3a2 100644
--- a/llvm/test/CodeGen/SystemZ/fp-strict-sub-03.ll
+++ b/llvm/test/CodeGen/SystemZ/fp-strict-sub-03.ll
@@ -5,7 +5,7 @@
declare fp128 @llvm.experimental.constrained.fsub.f128(fp128, fp128, metadata, metadata)
; There is no memory form of 128-bit subtraction.
-define void @f1(fp128 *%ptr, float %f2) {
+define void @f1(fp128 *%ptr, float %f2) strictfp {
; CHECK-LABEL: f1:
; CHECK-DAG: lxebr %f0, %f0
; CHECK-DAG: ld %f1, 0(%r2)
@@ -19,7 +19,7 @@ define void @f1(fp128 *%ptr, float %f2) {
%sum = call fp128 @llvm.experimental.constrained.fsub.f128(
fp128 %f1, fp128 %f2x,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") strictfp
store fp128 %sum, fp128 *%ptr
ret void
}
diff --git a/llvm/test/CodeGen/SystemZ/fp-strict-sub-04.ll b/llvm/test/CodeGen/SystemZ/fp-strict-sub-04.ll
index 1e8326847f2..0eaf5f3afef 100644
--- a/llvm/test/CodeGen/SystemZ/fp-strict-sub-04.ll
+++ b/llvm/test/CodeGen/SystemZ/fp-strict-sub-04.ll
@@ -4,7 +4,7 @@
declare fp128 @llvm.experimental.constrained.fsub.f128(fp128, fp128, metadata, metadata)
-define void @f1(fp128 *%ptr1, fp128 *%ptr2) {
+define void @f1(fp128 *%ptr1, fp128 *%ptr2) strictfp {
; CHECK-LABEL: f1:
; CHECK-DAG: vl [[REG1:%v[0-9]+]], 0(%r2)
; CHECK-DAG: vl [[REG2:%v[0-9]+]], 0(%r3)
@@ -16,7 +16,7 @@ define void @f1(fp128 *%ptr1, fp128 *%ptr2) {
%sum = call fp128 @llvm.experimental.constrained.fsub.f128(
fp128 %f1, fp128 %f2,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") strictfp
store fp128 %sum, fp128 *%ptr1
ret void
}
diff --git a/llvm/test/CodeGen/SystemZ/vec-strict-add-01.ll b/llvm/test/CodeGen/SystemZ/vec-strict-add-01.ll
index d1270b9d2a0..1ac2e190d1d 100644
--- a/llvm/test/CodeGen/SystemZ/vec-strict-add-01.ll
+++ b/llvm/test/CodeGen/SystemZ/vec-strict-add-01.ll
@@ -7,19 +7,19 @@ declare <2 x double> @llvm.experimental.constrained.fadd.v2f64(<2 x double>, <2
; Test a v2f64 addition.
define <2 x double> @f5(<2 x double> %dummy, <2 x double> %val1,
- <2 x double> %val2) {
+ <2 x double> %val2) strictfp {
; CHECK-LABEL: f5:
; CHECK: vfadb %v24, %v26, %v28
; CHECK: br %r14
%ret = call <2 x double> @llvm.experimental.constrained.fadd.v2f64(
<2 x double> %val1, <2 x double> %val2,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") strictfp
ret <2 x double> %ret
}
; Test an f64 addition that uses vector registers.
-define double @f6(<2 x double> %val1, <2 x double> %val2) {
+define double @f6(<2 x double> %val1, <2 x double> %val2) strictfp {
; CHECK-LABEL: f6:
; CHECK: wfadb %f0, %v24, %v26
; CHECK: br %r14
@@ -28,6 +28,6 @@ define double @f6(<2 x double> %val1, <2 x double> %val2) {
%ret = call double @llvm.experimental.constrained.fadd.f64(
double %scalar1, double %scalar2,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") strictfp
ret double %ret
}
diff --git a/llvm/test/CodeGen/SystemZ/vec-strict-add-02.ll b/llvm/test/CodeGen/SystemZ/vec-strict-add-02.ll
index 4aee31aee45..7cdd6383178 100644
--- a/llvm/test/CodeGen/SystemZ/vec-strict-add-02.ll
+++ b/llvm/test/CodeGen/SystemZ/vec-strict-add-02.ll
@@ -7,19 +7,19 @@ declare <4 x float> @llvm.experimental.constrained.fadd.v4f32(<4 x float>, <4 x
; Test a v4f32 addition.
define <4 x float> @f1(<4 x float> %dummy, <4 x float> %val1,
- <4 x float> %val2) {
+ <4 x float> %val2) strictfp {
; CHECK-LABEL: f1:
; CHECK: vfasb %v24, %v26, %v28
; CHECK: br %r14
%ret = call <4 x float> @llvm.experimental.constrained.fadd.v4f32(
<4 x float> %val1, <4 x float> %val2,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") strictfp
ret <4 x float> %ret
}
; Test an f32 addition that uses vector registers.
-define float @f2(<4 x float> %val1, <4 x float> %val2) {
+define float @f2(<4 x float> %val1, <4 x float> %val2) strictfp {
; CHECK-LABEL: f2:
; CHECK: wfasb %f0, %v24, %v26
; CHECK: br %r14
@@ -28,6 +28,6 @@ define float @f2(<4 x float> %val1, <4 x float> %val2) {
%ret = call float @llvm.experimental.constrained.fadd.f32(
float %scalar1, float %scalar2,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") strictfp
ret float %ret
}
diff --git a/llvm/test/CodeGen/SystemZ/vec-strict-conv-01.ll b/llvm/test/CodeGen/SystemZ/vec-strict-conv-01.ll
index a5fa0066b14..045e2b2344c 100644
--- a/llvm/test/CodeGen/SystemZ/vec-strict-conv-01.ll
+++ b/llvm/test/CodeGen/SystemZ/vec-strict-conv-01.ll
@@ -14,54 +14,55 @@ declare <2 x i64> @llvm.experimental.constrained.fptoui.v2i64.v2f32(<2 x float>,
declare <2 x i64> @llvm.experimental.constrained.fptosi.v2i64.v2f32(<2 x float>, metadata)
; Test conversion of f64s to signed i64s.
-define <2 x i64> @f1(<2 x double> %doubles) {
+define <2 x i64> @f1(<2 x double> %doubles) #0 {
; CHECK-LABEL: f1:
; CHECK: vcgdb %v24, %v24, 0, 5
; CHECK: br %r14
%dwords = call <2 x i64> @llvm.experimental.constrained.fptosi.v2i64.v2f64(<2 x double> %doubles,
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <2 x i64> %dwords
}
; Test conversion of f64s to unsigned i64s.
-define <2 x i64> @f2(<2 x double> %doubles) {
+define <2 x i64> @f2(<2 x double> %doubles) #0 {
; CHECK-LABEL: f2:
; CHECK: vclgdb %v24, %v24, 0, 5
; CHECK: br %r14
%dwords = call <2 x i64> @llvm.experimental.constrained.fptoui.v2i64.v2f64(<2 x double> %doubles,
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <2 x i64> %dwords
}
; Test conversion of f64s to signed i32s, which must compile.
-define void @f5(<2 x double> %doubles, <2 x i32> *%ptr) {
+define void @f5(<2 x double> %doubles, <2 x i32> *%ptr) #0 {
%words = call <2 x i32> @llvm.experimental.constrained.fptosi.v2i32.v2f64(<2 x double> %doubles,
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
store <2 x i32> %words, <2 x i32> *%ptr
ret void
}
; Test conversion of f64s to unsigned i32s, which must compile.
-define void @f6(<2 x double> %doubles, <2 x i32> *%ptr) {
+define void @f6(<2 x double> %doubles, <2 x i32> *%ptr) #0 {
%words = call <2 x i32> @llvm.experimental.constrained.fptoui.v2i32.v2f64(<2 x double> %doubles,
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
store <2 x i32> %words, <2 x i32> *%ptr
ret void
}
; Test conversion of f32s to signed i64s, which must compile.
-define <2 x i64> @f9(<2 x float> *%ptr) {
+define <2 x i64> @f9(<2 x float> *%ptr) #0 {
%floats = load <2 x float>, <2 x float> *%ptr
%dwords = call <2 x i64> @llvm.experimental.constrained.fptosi.v2i64.v2f32(<2 x float> %floats,
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <2 x i64> %dwords
}
; Test conversion of f32s to unsigned i64s, which must compile.
-define <2 x i64> @f10(<2 x float> *%ptr) {
+define <2 x i64> @f10(<2 x float> *%ptr) #0 {
%floats = load <2 x float>, <2 x float> *%ptr
%dwords = call <2 x i64> @llvm.experimental.constrained.fptoui.v2i64.v2f32(<2 x float> %floats,
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <2 x i64> %dwords
}
+attributes #0 = { strictfp }
diff --git a/llvm/test/CodeGen/SystemZ/vec-strict-conv-03.ll b/llvm/test/CodeGen/SystemZ/vec-strict-conv-03.ll
index 6b9bc80cbde..ace2e1ec42e 100644
--- a/llvm/test/CodeGen/SystemZ/vec-strict-conv-03.ll
+++ b/llvm/test/CodeGen/SystemZ/vec-strict-conv-03.ll
@@ -8,22 +8,23 @@ declare <4 x i32> @llvm.experimental.constrained.fptoui.v4i32.v4f32(<4 x float>,
declare <4 x i32> @llvm.experimental.constrained.fptosi.v4i32.v4f32(<4 x float>, metadata)
; Test conversion of f32s to signed i32s.
-define <4 x i32> @f1(<4 x float> %floats) {
+define <4 x i32> @f1(<4 x float> %floats) #0 {
; CHECK-LABEL: f1:
; CHECK: vcfeb %v24, %v24, 0, 5
; CHECK: br %r14
%words = call <4 x i32> @llvm.experimental.constrained.fptosi.v4i32.v4f32(<4 x float> %floats,
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <4 x i32> %words
}
; Test conversion of f32s to unsigned i32s.
-define <4 x i32> @f2(<4 x float> %floats) {
+define <4 x i32> @f2(<4 x float> %floats) #0 {
; CHECK-LABEL: f2:
; CHECK: vclfeb %v24, %v24, 0, 5
; CHECK: br %r14
%words = call <4 x i32> @llvm.experimental.constrained.fptoui.v4i32.v4f32(<4 x float> %floats,
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <4 x i32> %words
}
+attributes #0 = { strictfp }
diff --git a/llvm/test/CodeGen/SystemZ/vec-strict-div-01.ll b/llvm/test/CodeGen/SystemZ/vec-strict-div-01.ll
index ec54776254a..bb15c0d40ce 100644
--- a/llvm/test/CodeGen/SystemZ/vec-strict-div-01.ll
+++ b/llvm/test/CodeGen/SystemZ/vec-strict-div-01.ll
@@ -7,19 +7,19 @@ declare <2 x double> @llvm.experimental.constrained.fdiv.v2f64(<2 x double>, <2
; Test a v2f64 division.
define <2 x double> @f5(<2 x double> %dummy, <2 x double> %val1,
- <2 x double> %val2) {
+ <2 x double> %val2) #0 {
; CHECK-LABEL: f5:
; CHECK: vfddb %v24, %v26, %v28
; CHECK: br %r14
%ret = call <2 x double> @llvm.experimental.constrained.fdiv.v2f64(
<2 x double> %val1, <2 x double> %val2,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <2 x double> %ret
}
; Test an f64 division that uses vector registers.
-define double @f6(<2 x double> %val1, <2 x double> %val2) {
+define double @f6(<2 x double> %val1, <2 x double> %val2) #0 {
; CHECK-LABEL: f6:
; CHECK: wfddb %f0, %v24, %v26
; CHECK: br %r14
@@ -28,6 +28,8 @@ define double @f6(<2 x double> %val1, <2 x double> %val2) {
%ret = call double @llvm.experimental.constrained.fdiv.f64(
double %scalar1, double %scalar2,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %ret
}
+
+attributes #0 = { strictfp }
diff --git a/llvm/test/CodeGen/SystemZ/vec-strict-div-02.ll b/llvm/test/CodeGen/SystemZ/vec-strict-div-02.ll
index 0fce46295eb..b791d67b324 100644
--- a/llvm/test/CodeGen/SystemZ/vec-strict-div-02.ll
+++ b/llvm/test/CodeGen/SystemZ/vec-strict-div-02.ll
@@ -7,19 +7,19 @@ declare <4 x float> @llvm.experimental.constrained.fdiv.v4f32(<4 x float>, <4 x
; Test a v4f32 division.
define <4 x float> @f1(<4 x float> %dummy, <4 x float> %val1,
- <4 x float> %val2) {
+ <4 x float> %val2) #0 {
; CHECK-LABEL: f1:
; CHECK: vfdsb %v24, %v26, %v28
; CHECK: br %r14
%ret = call <4 x float> @llvm.experimental.constrained.fdiv.v4f32(
<4 x float> %val1, <4 x float> %val2,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <4 x float> %ret
}
; Test an f32 division that uses vector registers.
-define float @f2(<4 x float> %val1, <4 x float> %val2) {
+define float @f2(<4 x float> %val1, <4 x float> %val2) #0 {
; CHECK-LABEL: f2:
; CHECK: wfdsb %f0, %v24, %v26
; CHECK: br %r14
@@ -28,6 +28,8 @@ define float @f2(<4 x float> %val1, <4 x float> %val2) {
%ret = call float @llvm.experimental.constrained.fdiv.f32(
float %scalar1, float %scalar2,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret float %ret
}
+
+attributes #0 = { strictfp }
diff --git a/llvm/test/CodeGen/SystemZ/vec-strict-max-01.ll b/llvm/test/CodeGen/SystemZ/vec-strict-max-01.ll
index 82e7c32c0ef..c734a6aa5fe 100644
--- a/llvm/test/CodeGen/SystemZ/vec-strict-max-01.ll
+++ b/llvm/test/CodeGen/SystemZ/vec-strict-max-01.ll
@@ -11,57 +11,57 @@ declare <4 x float> @llvm.experimental.constrained.maxnum.v4f32(<4 x float>, <4
declare fp128 @llvm.experimental.constrained.maxnum.f128(fp128, fp128, metadata, metadata)
; Test the f64 maxnum intrinsic.
-define double @f1(double %dummy, double %val1, double %val2) {
+define double @f1(double %dummy, double %val1, double %val2) #0 {
; CHECK-LABEL: f1:
; CHECK: wfmaxdb %f0, %f2, %f4, 4
; CHECK: br %r14
%ret = call double @llvm.experimental.constrained.maxnum.f64(
double %val1, double %val2,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %ret
}
; Test the v2f64 maxnum intrinsic.
define <2 x double> @f2(<2 x double> %dummy, <2 x double> %val1,
- <2 x double> %val2) {
+ <2 x double> %val2) #0 {
; CHECK-LABEL: f2:
; CHECK: vfmaxdb %v24, %v26, %v28, 4
; CHECK: br %r14
%ret = call <2 x double> @llvm.experimental.constrained.maxnum.v2f64(
<2 x double> %val1, <2 x double> %val2,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <2 x double> %ret
}
; Test the f32 maxnum intrinsic.
-define float @f3(float %dummy, float %val1, float %val2) {
+define float @f3(float %dummy, float %val1, float %val2) #0 {
; CHECK-LABEL: f3:
; CHECK: wfmaxsb %f0, %f2, %f4, 4
; CHECK: br %r14
%ret = call float @llvm.experimental.constrained.maxnum.f32(
float %val1, float %val2,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret float %ret
}
; Test the v4f32 maxnum intrinsic.
define <4 x float> @f4(<4 x float> %dummy, <4 x float> %val1,
- <4 x float> %val2) {
+ <4 x float> %val2) #0 {
; CHECK-LABEL: f4:
; CHECK: vfmaxsb %v24, %v26, %v28, 4
; CHECK: br %r14
%ret = call <4 x float> @llvm.experimental.constrained.maxnum.v4f32(
<4 x float> %val1, <4 x float> %val2,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <4 x float> %ret
}
; Test the f128 maxnum intrinsic.
-define void @f5(fp128 *%ptr1, fp128 *%ptr2, fp128 *%dst) {
+define void @f5(fp128 *%ptr1, fp128 *%ptr2, fp128 *%dst) #0 {
; CHECK-LABEL: f5:
; CHECK-DAG: vl [[REG1:%v[0-9]+]], 0(%r2)
; CHECK-DAG: vl [[REG2:%v[0-9]+]], 0(%r3)
@@ -73,8 +73,9 @@ define void @f5(fp128 *%ptr1, fp128 *%ptr2, fp128 *%dst) {
%res = call fp128 @llvm.experimental.constrained.maxnum.f128(
fp128 %val1, fp128 %val2,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
store fp128 %res, fp128* %dst
ret void
}
+attributes #0 = { strictfp }
diff --git a/llvm/test/CodeGen/SystemZ/vec-strict-min-01.ll b/llvm/test/CodeGen/SystemZ/vec-strict-min-01.ll
index 641b9c33475..25882568bdc 100644
--- a/llvm/test/CodeGen/SystemZ/vec-strict-min-01.ll
+++ b/llvm/test/CodeGen/SystemZ/vec-strict-min-01.ll
@@ -11,57 +11,57 @@ declare <4 x float> @llvm.experimental.constrained.minnum.v4f32(<4 x float>, <4
declare fp128 @llvm.experimental.constrained.minnum.f128(fp128, fp128, metadata, metadata)
; Test the f64 minnum intrinsic.
-define double @f1(double %dummy, double %val1, double %val2) {
+define double @f1(double %dummy, double %val1, double %val2) #0 {
; CHECK-LABEL: f1:
; CHECK: wfmindb %f0, %f2, %f4, 4
; CHECK: br %r14
%ret = call double @llvm.experimental.constrained.minnum.f64(
double %val1, double %val2,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %ret
}
; Test the v2f64 minnum intrinsic.
define <2 x double> @f2(<2 x double> %dummy, <2 x double> %val1,
- <2 x double> %val2) {
+ <2 x double> %val2) #0 {
; CHECK-LABEL: f2:
; CHECK: vfmindb %v24, %v26, %v28, 4
; CHECK: br %r14
%ret = call <2 x double> @llvm.experimental.constrained.minnum.v2f64(
<2 x double> %val1, <2 x double> %val2,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <2 x double> %ret
}
; Test the f32 minnum intrinsic.
-define float @f3(float %dummy, float %val1, float %val2) {
+define float @f3(float %dummy, float %val1, float %val2) #0 {
; CHECK-LABEL: f3:
; CHECK: wfminsb %f0, %f2, %f4, 4
; CHECK: br %r14
%ret = call float @llvm.experimental.constrained.minnum.f32(
float %val1, float %val2,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret float %ret
}
; Test the v4f32 minnum intrinsic.
define <4 x float> @f4(<4 x float> %dummy, <4 x float> %val1,
- <4 x float> %val2) {
+ <4 x float> %val2) #0 {
; CHECK-LABEL: f4:
; CHECK: vfminsb %v24, %v26, %v28, 4
; CHECK: br %r14
%ret = call <4 x float> @llvm.experimental.constrained.minnum.v4f32(
<4 x float> %val1, <4 x float> %val2,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <4 x float> %ret
}
; Test the f128 minnum intrinsic.
-define void @f5(fp128 *%ptr1, fp128 *%ptr2, fp128 *%dst) {
+define void @f5(fp128 *%ptr1, fp128 *%ptr2, fp128 *%dst) #0 {
; CHECK-LABEL: f5:
; CHECK-DAG: vl [[REG1:%v[0-9]+]], 0(%r2)
; CHECK-DAG: vl [[REG2:%v[0-9]+]], 0(%r3)
@@ -73,8 +73,9 @@ define void @f5(fp128 *%ptr1, fp128 *%ptr2, fp128 *%dst) {
%res = call fp128 @llvm.experimental.constrained.minnum.f128(
fp128 %val1, fp128 %val2,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
store fp128 %res, fp128* %dst
ret void
}
+attributes #0 = { strictfp }
diff --git a/llvm/test/CodeGen/SystemZ/vec-strict-mul-01.ll b/llvm/test/CodeGen/SystemZ/vec-strict-mul-01.ll
index 0bb10188bac..52c40f4cb73 100644
--- a/llvm/test/CodeGen/SystemZ/vec-strict-mul-01.ll
+++ b/llvm/test/CodeGen/SystemZ/vec-strict-mul-01.ll
@@ -7,19 +7,19 @@ declare <2 x double> @llvm.experimental.constrained.fmul.v2f64(<2 x double>, <2
; Test a v2f64 multiplication.
define <2 x double> @f5(<2 x double> %dummy, <2 x double> %val1,
- <2 x double> %val2) {
+ <2 x double> %val2) #0 {
; CHECK-LABEL: f5:
; CHECK: vfmdb %v24, %v26, %v28
; CHECK: br %r14
%ret = call <2 x double> @llvm.experimental.constrained.fmul.v2f64(
<2 x double> %val1, <2 x double> %val2,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <2 x double> %ret
}
; Test an f64 multiplication that uses vector registers.
-define double @f6(<2 x double> %val1, <2 x double> %val2) {
+define double @f6(<2 x double> %val1, <2 x double> %val2) #0 {
; CHECK-LABEL: f6:
; CHECK: wfmdb %f0, %v24, %v26
; CHECK: br %r14
@@ -28,6 +28,8 @@ define double @f6(<2 x double> %val1, <2 x double> %val2) {
%ret = call double @llvm.experimental.constrained.fmul.f64(
double %scalar1, double %scalar2,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %ret
}
+
+attributes #0 = { strictfp }
diff --git a/llvm/test/CodeGen/SystemZ/vec-strict-mul-02.ll b/llvm/test/CodeGen/SystemZ/vec-strict-mul-02.ll
index 61447aa9b0d..fc9c1575952 100644
--- a/llvm/test/CodeGen/SystemZ/vec-strict-mul-02.ll
+++ b/llvm/test/CodeGen/SystemZ/vec-strict-mul-02.ll
@@ -6,7 +6,7 @@ declare <2 x double> @llvm.experimental.constrained.fma.v2f64(<2 x double>, <2 x
; Test a v2f64 multiply-and-add.
define <2 x double> @f4(<2 x double> %dummy, <2 x double> %val1,
- <2 x double> %val2, <2 x double> %val3) {
+ <2 x double> %val2, <2 x double> %val3) #0 {
; CHECK-LABEL: f4:
; CHECK: vfmadb %v24, %v26, %v28, %v30
; CHECK: br %r14
@@ -15,13 +15,13 @@ define <2 x double> @f4(<2 x double> %dummy, <2 x double> %val1,
<2 x double> %val2,
<2 x double> %val3,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <2 x double> %ret
}
; Test a v2f64 multiply-and-subtract.
define <2 x double> @f5(<2 x double> %dummy, <2 x double> %val1,
- <2 x double> %val2, <2 x double> %val3) {
+ <2 x double> %val2, <2 x double> %val3) #0 {
; CHECK-LABEL: f5:
; CHECK: vfmsdb %v24, %v26, %v28, %v30
; CHECK: br %r14
@@ -31,6 +31,8 @@ define <2 x double> @f5(<2 x double> %dummy, <2 x double> %val1,
<2 x double> %val2,
<2 x double> %negval3,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <2 x double> %ret
}
+
+attributes #0 = { strictfp }
diff --git a/llvm/test/CodeGen/SystemZ/vec-strict-mul-03.ll b/llvm/test/CodeGen/SystemZ/vec-strict-mul-03.ll
index a61d55913a1..a05ed27d4c6 100644
--- a/llvm/test/CodeGen/SystemZ/vec-strict-mul-03.ll
+++ b/llvm/test/CodeGen/SystemZ/vec-strict-mul-03.ll
@@ -7,19 +7,19 @@ declare <4 x float> @llvm.experimental.constrained.fmul.v4f32(<4 x float>, <4 x
; Test a v4f32 multiplication.
define <4 x float> @f1(<4 x float> %dummy, <4 x float> %val1,
- <4 x float> %val2) {
+ <4 x float> %val2) #0 {
; CHECK-LABEL: f1:
; CHECK: vfmsb %v24, %v26, %v28
; CHECK: br %r14
%ret = call <4 x float> @llvm.experimental.constrained.fmul.v4f32(
<4 x float> %val1, <4 x float> %val2,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <4 x float> %ret
}
; Test an f32 multiplication that uses vector registers.
-define float @f2(<4 x float> %val1, <4 x float> %val2) {
+define float @f2(<4 x float> %val1, <4 x float> %val2) #0 {
; CHECK-LABEL: f2:
; CHECK: wfmsb %f0, %v24, %v26
; CHECK: br %r14
@@ -28,6 +28,8 @@ define float @f2(<4 x float> %val1, <4 x float> %val2) {
%ret = call float @llvm.experimental.constrained.fmul.f32(
float %scalar1, float %scalar2,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret float %ret
}
+
+attributes #0 = { strictfp }
diff --git a/llvm/test/CodeGen/SystemZ/vec-strict-mul-04.ll b/llvm/test/CodeGen/SystemZ/vec-strict-mul-04.ll
index e24c3895906..3a4b1448d46 100644
--- a/llvm/test/CodeGen/SystemZ/vec-strict-mul-04.ll
+++ b/llvm/test/CodeGen/SystemZ/vec-strict-mul-04.ll
@@ -6,7 +6,7 @@ declare <4 x float> @llvm.experimental.constrained.fma.v4f32(<4 x float>, <4 x f
; Test a v4f32 multiply-and-add.
define <4 x float> @f1(<4 x float> %dummy, <4 x float> %val1,
- <4 x float> %val2, <4 x float> %val3) {
+ <4 x float> %val2, <4 x float> %val3) #0 {
; CHECK-LABEL: f1:
; CHECK: vfmasb %v24, %v26, %v28, %v30
; CHECK: br %r14
@@ -15,13 +15,13 @@ define <4 x float> @f1(<4 x float> %dummy, <4 x float> %val1,
<4 x float> %val2,
<4 x float> %val3,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <4 x float> %ret
}
; Test a v4f32 multiply-and-subtract.
define <4 x float> @f2(<4 x float> %dummy, <4 x float> %val1,
- <4 x float> %val2, <4 x float> %val3) {
+ <4 x float> %val2, <4 x float> %val3) #0 {
; CHECK-LABEL: f2:
; CHECK: vfmssb %v24, %v26, %v28, %v30
; CHECK: br %r14
@@ -32,6 +32,8 @@ define <4 x float> @f2(<4 x float> %dummy, <4 x float> %val1,
<4 x float> %val2,
<4 x float> %negval3,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <4 x float> %ret
}
+
+attributes #0 = { strictfp }
diff --git a/llvm/test/CodeGen/SystemZ/vec-strict-mul-05.ll b/llvm/test/CodeGen/SystemZ/vec-strict-mul-05.ll
index 9fdefc505b0..b99378bc7b7 100644
--- a/llvm/test/CodeGen/SystemZ/vec-strict-mul-05.ll
+++ b/llvm/test/CodeGen/SystemZ/vec-strict-mul-05.ll
@@ -7,7 +7,7 @@ declare <4 x float> @llvm.experimental.constrained.fma.v4f32(<4 x float>, <4 x f
; Test a v2f64 negative multiply-and-add.
define <2 x double> @f1(<2 x double> %dummy, <2 x double> %val1,
- <2 x double> %val2, <2 x double> %val3) {
+ <2 x double> %val2, <2 x double> %val3) #0 {
; CHECK-LABEL: f1:
; CHECK: vfnmadb %v24, %v26, %v28, %v30
; CHECK: br %r14
@@ -16,14 +16,14 @@ define <2 x double> @f1(<2 x double> %dummy, <2 x double> %val1,
<2 x double> %val2,
<2 x double> %val3,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%negret = fsub <2 x double> <double -0.0, double -0.0>, %ret
ret <2 x double> %negret
}
; Test a v2f64 negative multiply-and-subtract.
define <2 x double> @f2(<2 x double> %dummy, <2 x double> %val1,
- <2 x double> %val2, <2 x double> %val3) {
+ <2 x double> %val2, <2 x double> %val3) #0 {
; CHECK-LABEL: f2:
; CHECK: vfnmsdb %v24, %v26, %v28, %v30
; CHECK: br %r14
@@ -33,14 +33,14 @@ define <2 x double> @f2(<2 x double> %dummy, <2 x double> %val1,
<2 x double> %val2,
<2 x double> %negval3,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%negret = fsub <2 x double> <double -0.0, double -0.0>, %ret
ret <2 x double> %negret
}
; Test a v4f32 negative multiply-and-add.
define <4 x float> @f3(<4 x float> %dummy, <4 x float> %val1,
- <4 x float> %val2, <4 x float> %val3) {
+ <4 x float> %val2, <4 x float> %val3) #0 {
; CHECK-LABEL: f3:
; CHECK: vfnmasb %v24, %v26, %v28, %v30
; CHECK: br %r14
@@ -49,7 +49,7 @@ define <4 x float> @f3(<4 x float> %dummy, <4 x float> %val1,
<4 x float> %val2,
<4 x float> %val3,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%negret = fsub <4 x float> <float -0.0, float -0.0,
float -0.0, float -0.0>, %ret
ret <4 x float> %negret
@@ -57,7 +57,7 @@ define <4 x float> @f3(<4 x float> %dummy, <4 x float> %val1,
; Test a v4f32 negative multiply-and-subtract.
define <4 x float> @f4(<4 x float> %dummy, <4 x float> %val1,
- <4 x float> %val2, <4 x float> %val3) {
+ <4 x float> %val2, <4 x float> %val3) #0 {
; CHECK-LABEL: f4:
; CHECK: vfnmssb %v24, %v26, %v28, %v30
; CHECK: br %r14
@@ -68,8 +68,10 @@ define <4 x float> @f4(<4 x float> %dummy, <4 x float> %val1,
<4 x float> %val2,
<4 x float> %negval3,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%negret = fsub <4 x float> <float -0.0, float -0.0,
float -0.0, float -0.0>, %ret
ret <4 x float> %negret
}
+
+attributes #0 = { strictfp }
diff --git a/llvm/test/CodeGen/SystemZ/vec-strict-round-01.ll b/llvm/test/CodeGen/SystemZ/vec-strict-round-01.ll
index e86357bb139..9f73c73c212 100644
--- a/llvm/test/CodeGen/SystemZ/vec-strict-round-01.ll
+++ b/llvm/test/CodeGen/SystemZ/vec-strict-round-01.ll
@@ -15,73 +15,73 @@ declare <2 x double> @llvm.experimental.constrained.ceil.v2f64(<2 x double>, met
declare <2 x double> @llvm.experimental.constrained.trunc.v2f64(<2 x double>, metadata, metadata)
declare <2 x double> @llvm.experimental.constrained.round.v2f64(<2 x double>, metadata, metadata)
-define <2 x double> @f1(<2 x double> %val) {
+define <2 x double> @f1(<2 x double> %val) #0 {
; CHECK-LABEL: f1:
; CHECK: vfidb %v24, %v24, 0, 0
; CHECK: br %r14
%res = call <2 x double> @llvm.experimental.constrained.rint.v2f64(
<2 x double> %val,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <2 x double> %res
}
-define <2 x double> @f2(<2 x double> %val) {
+define <2 x double> @f2(<2 x double> %val) #0 {
; CHECK-LABEL: f2:
; CHECK: vfidb %v24, %v24, 4, 0
; CHECK: br %r14
%res = call <2 x double> @llvm.experimental.constrained.nearbyint.v2f64(
<2 x double> %val,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <2 x double> %res
}
-define <2 x double> @f3(<2 x double> %val) {
+define <2 x double> @f3(<2 x double> %val) #0 {
; CHECK-LABEL: f3:
; CHECK: vfidb %v24, %v24, 4, 7
; CHECK: br %r14
%res = call <2 x double> @llvm.experimental.constrained.floor.v2f64(
<2 x double> %val,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <2 x double> %res
}
-define <2 x double> @f4(<2 x double> %val) {
+define <2 x double> @f4(<2 x double> %val) #0 {
; CHECK-LABEL: f4:
; CHECK: vfidb %v24, %v24, 4, 6
; CHECK: br %r14
%res = call <2 x double> @llvm.experimental.constrained.ceil.v2f64(
<2 x double> %val,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <2 x double> %res
}
-define <2 x double> @f5(<2 x double> %val) {
+define <2 x double> @f5(<2 x double> %val) #0 {
; CHECK-LABEL: f5:
; CHECK: vfidb %v24, %v24, 4, 5
; CHECK: br %r14
%res = call <2 x double> @llvm.experimental.constrained.trunc.v2f64(
<2 x double> %val,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <2 x double> %res
}
-define <2 x double> @f6(<2 x double> %val) {
+define <2 x double> @f6(<2 x double> %val) #0 {
; CHECK-LABEL: f6:
; CHECK: vfidb %v24, %v24, 4, 1
; CHECK: br %r14
%res = call <2 x double> @llvm.experimental.constrained.round.v2f64(
<2 x double> %val,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <2 x double> %res
}
-define double @f7(<2 x double> %val) {
+define double @f7(<2 x double> %val) #0 {
; CHECK-LABEL: f7:
; CHECK: wfidb %f0, %v24, 0, 0
; CHECK: br %r14
@@ -89,11 +89,11 @@ define double @f7(<2 x double> %val) {
%res = call double @llvm.experimental.constrained.rint.f64(
double %scalar,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %res
}
-define double @f8(<2 x double> %val) {
+define double @f8(<2 x double> %val) #0 {
; CHECK-LABEL: f8:
; CHECK: wfidb %f0, %v24, 4, 0
; CHECK: br %r14
@@ -101,11 +101,11 @@ define double @f8(<2 x double> %val) {
%res = call double @llvm.experimental.constrained.nearbyint.f64(
double %scalar,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %res
}
-define double @f9(<2 x double> %val) {
+define double @f9(<2 x double> %val) #0 {
; CHECK-LABEL: f9:
; CHECK: wfidb %f0, %v24, 4, 7
; CHECK: br %r14
@@ -113,12 +113,12 @@ define double @f9(<2 x double> %val) {
%res = call double @llvm.experimental.constrained.floor.f64(
double %scalar,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %res
}
-define double @f10(<2 x double> %val) {
+define double @f10(<2 x double> %val) #0 {
; CHECK-LABEL: f10:
; CHECK: wfidb %f0, %v24, 4, 6
; CHECK: br %r14
@@ -126,11 +126,11 @@ define double @f10(<2 x double> %val) {
%res = call double @llvm.experimental.constrained.ceil.f64(
double %scalar,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %res
}
-define double @f11(<2 x double> %val) {
+define double @f11(<2 x double> %val) #0 {
; CHECK-LABEL: f11:
; CHECK: wfidb %f0, %v24, 4, 5
; CHECK: br %r14
@@ -138,11 +138,11 @@ define double @f11(<2 x double> %val) {
%res = call double @llvm.experimental.constrained.trunc.f64(
double %scalar,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %res
}
-define double @f12(<2 x double> %val) {
+define double @f12(<2 x double> %val) #0 {
; CHECK-LABEL: f12:
; CHECK: wfidb %f0, %v24, 4, 1
; CHECK: br %r14
@@ -150,6 +150,8 @@ define double @f12(<2 x double> %val) {
%res = call double @llvm.experimental.constrained.round.f64(
double %scalar,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %res
}
+
+attributes #0 = { strictfp }
diff --git a/llvm/test/CodeGen/SystemZ/vec-strict-round-02.ll b/llvm/test/CodeGen/SystemZ/vec-strict-round-02.ll
index 2ee9b10ccdf..9eec926f4cb 100644
--- a/llvm/test/CodeGen/SystemZ/vec-strict-round-02.ll
+++ b/llvm/test/CodeGen/SystemZ/vec-strict-round-02.ll
@@ -15,73 +15,73 @@ declare <4 x float> @llvm.experimental.constrained.ceil.v4f32(<4 x float>, metad
declare <4 x float> @llvm.experimental.constrained.trunc.v4f32(<4 x float>, metadata, metadata)
declare <4 x float> @llvm.experimental.constrained.round.v4f32(<4 x float>, metadata, metadata)
-define <4 x float> @f1(<4 x float> %val) {
+define <4 x float> @f1(<4 x float> %val) #0 {
; CHECK-LABEL: f1:
; CHECK: vfisb %v24, %v24, 0, 0
; CHECK: br %r14
%res = call <4 x float> @llvm.experimental.constrained.rint.v4f32(
<4 x float> %val,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <4 x float> %res
}
-define <4 x float> @f2(<4 x float> %val) {
+define <4 x float> @f2(<4 x float> %val) #0 {
; CHECK-LABEL: f2:
; CHECK: vfisb %v24, %v24, 4, 0
; CHECK: br %r14
%res = call <4 x float> @llvm.experimental.constrained.nearbyint.v4f32(
<4 x float> %val,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <4 x float> %res
}
-define <4 x float> @f3(<4 x float> %val) {
+define <4 x float> @f3(<4 x float> %val) #0 {
; CHECK-LABEL: f3:
; CHECK: vfisb %v24, %v24, 4, 7
; CHECK: br %r14
%res = call <4 x float> @llvm.experimental.constrained.floor.v4f32(
<4 x float> %val,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <4 x float> %res
}
-define <4 x float> @f4(<4 x float> %val) {
+define <4 x float> @f4(<4 x float> %val) #0 {
; CHECK-LABEL: f4:
; CHECK: vfisb %v24, %v24, 4, 6
; CHECK: br %r14
%res = call <4 x float> @llvm.experimental.constrained.ceil.v4f32(
<4 x float> %val,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <4 x float> %res
}
-define <4 x float> @f5(<4 x float> %val) {
+define <4 x float> @f5(<4 x float> %val) #0 {
; CHECK-LABEL: f5:
; CHECK: vfisb %v24, %v24, 4, 5
; CHECK: br %r14
%res = call <4 x float> @llvm.experimental.constrained.trunc.v4f32(
<4 x float> %val,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <4 x float> %res
}
-define <4 x float> @f6(<4 x float> %val) {
+define <4 x float> @f6(<4 x float> %val) #0 {
; CHECK-LABEL: f6:
; CHECK: vfisb %v24, %v24, 4, 1
; CHECK: br %r14
%res = call <4 x float> @llvm.experimental.constrained.round.v4f32(
<4 x float> %val,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <4 x float> %res
}
-define float @f7(<4 x float> %val) {
+define float @f7(<4 x float> %val) #0 {
; CHECK-LABEL: f7:
; CHECK: wfisb %f0, %v24, 0, 0
; CHECK: br %r14
@@ -89,11 +89,11 @@ define float @f7(<4 x float> %val) {
%res = call float @llvm.experimental.constrained.rint.f32(
float %scalar,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret float %res
}
-define float @f8(<4 x float> %val) {
+define float @f8(<4 x float> %val) #0 {
; CHECK-LABEL: f8:
; CHECK: wfisb %f0, %v24, 4, 0
; CHECK: br %r14
@@ -101,11 +101,11 @@ define float @f8(<4 x float> %val) {
%res = call float @llvm.experimental.constrained.nearbyint.f32(
float %scalar,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret float %res
}
-define float @f9(<4 x float> %val) {
+define float @f9(<4 x float> %val) #0 {
; CHECK-LABEL: f9:
; CHECK: wfisb %f0, %v24, 4, 7
; CHECK: br %r14
@@ -113,11 +113,11 @@ define float @f9(<4 x float> %val) {
%res = call float @llvm.experimental.constrained.floor.f32(
float %scalar,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret float %res
}
-define float @f10(<4 x float> %val) {
+define float @f10(<4 x float> %val) #0 {
; CHECK-LABEL: f10:
; CHECK: wfisb %f0, %v24, 4, 6
; CHECK: br %r14
@@ -125,11 +125,11 @@ define float @f10(<4 x float> %val) {
%res = call float @llvm.experimental.constrained.ceil.f32(
float %scalar,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret float %res
}
-define float @f11(<4 x float> %val) {
+define float @f11(<4 x float> %val) #0 {
; CHECK-LABEL: f11:
; CHECK: wfisb %f0, %v24, 4, 5
; CHECK: br %r14
@@ -137,11 +137,11 @@ define float @f11(<4 x float> %val) {
%res = call float @llvm.experimental.constrained.trunc.f32(
float %scalar,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret float %res
}
-define float @f12(<4 x float> %val) {
+define float @f12(<4 x float> %val) #0 {
; CHECK-LABEL: f12:
; CHECK: wfisb %f0, %v24, 4, 1
; CHECK: br %r14
@@ -149,6 +149,8 @@ define float @f12(<4 x float> %val) {
%res = call float @llvm.experimental.constrained.round.f32(
float %scalar,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret float %res
}
+
+attributes #0 = { strictfp }
diff --git a/llvm/test/CodeGen/SystemZ/vec-strict-sqrt-01.ll b/llvm/test/CodeGen/SystemZ/vec-strict-sqrt-01.ll
index f59558f11ac..f7fee09af3f 100644
--- a/llvm/test/CodeGen/SystemZ/vec-strict-sqrt-01.ll
+++ b/llvm/test/CodeGen/SystemZ/vec-strict-sqrt-01.ll
@@ -5,18 +5,18 @@
declare double @llvm.experimental.constrained.sqrt.f64(double, metadata, metadata)
declare <2 x double> @llvm.experimental.constrained.sqrt.v2f64(<2 x double>, metadata, metadata)
-define <2 x double> @f1(<2 x double> %val) {
+define <2 x double> @f1(<2 x double> %val) #0 {
; CHECK-LABEL: f1:
; CHECK: vfsqdb %v24, %v24
; CHECK: br %r14
%ret = call <2 x double> @llvm.experimental.constrained.sqrt.v2f64(
<2 x double> %val,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <2 x double> %ret
}
-define double @f2(<2 x double> %val) {
+define double @f2(<2 x double> %val) #0 {
; CHECK-LABEL: f2:
; CHECK: wfsqdb %f0, %v24
; CHECK: br %r14
@@ -24,6 +24,8 @@ define double @f2(<2 x double> %val) {
%ret = call double @llvm.experimental.constrained.sqrt.f64(
double %scalar,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %ret
}
+
+attributes #0 = { strictfp }
diff --git a/llvm/test/CodeGen/SystemZ/vec-strict-sqrt-02.ll b/llvm/test/CodeGen/SystemZ/vec-strict-sqrt-02.ll
index 591c2c48cdb..8c60bd3f685 100644
--- a/llvm/test/CodeGen/SystemZ/vec-strict-sqrt-02.ll
+++ b/llvm/test/CodeGen/SystemZ/vec-strict-sqrt-02.ll
@@ -5,18 +5,18 @@
declare float @llvm.experimental.constrained.sqrt.f32(float, metadata, metadata)
declare <4 x float> @llvm.experimental.constrained.sqrt.v4f32(<4 x float>, metadata, metadata)
-define <4 x float> @f1(<4 x float> %val) {
+define <4 x float> @f1(<4 x float> %val) #0 {
; CHECK-LABEL: f1:
; CHECK: vfsqsb %v24, %v24
; CHECK: br %r14
%ret = call <4 x float> @llvm.experimental.constrained.sqrt.v4f32(
<4 x float> %val,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <4 x float> %ret
}
-define float @f2(<4 x float> %val) {
+define float @f2(<4 x float> %val) #0 {
; CHECK-LABEL: f2:
; CHECK: wfsqsb %f0, %v24
; CHECK: br %r14
@@ -24,6 +24,8 @@ define float @f2(<4 x float> %val) {
%ret = call float @llvm.experimental.constrained.sqrt.f32(
float %scalar,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret float %ret
}
+
+attributes #0 = { strictfp }
diff --git a/llvm/test/CodeGen/SystemZ/vec-strict-sub-01.ll b/llvm/test/CodeGen/SystemZ/vec-strict-sub-01.ll
index 8564d227529..a379613c389 100644
--- a/llvm/test/CodeGen/SystemZ/vec-strict-sub-01.ll
+++ b/llvm/test/CodeGen/SystemZ/vec-strict-sub-01.ll
@@ -7,19 +7,19 @@ declare <2 x double> @llvm.experimental.constrained.fsub.v2f64(<2 x double>, <2
; Test a v2f64 subtraction.
define <2 x double> @f6(<2 x double> %dummy, <2 x double> %val1,
- <2 x double> %val2) {
+ <2 x double> %val2) #0 {
; CHECK-LABEL: f6:
; CHECK: vfsdb %v24, %v26, %v28
; CHECK: br %r14
%ret = call <2 x double> @llvm.experimental.constrained.fsub.v2f64(
<2 x double> %val1, <2 x double> %val2,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <2 x double> %ret
}
; Test an f64 subtraction that uses vector registers.
-define double @f7(<2 x double> %val1, <2 x double> %val2) {
+define double @f7(<2 x double> %val1, <2 x double> %val2) #0 {
; CHECK-LABEL: f7:
; CHECK: wfsdb %f0, %v24, %v26
; CHECK: br %r14
@@ -28,7 +28,8 @@ define double @f7(<2 x double> %val1, <2 x double> %val2) {
%ret = call double @llvm.experimental.constrained.fsub.f64(
double %scalar1, double %scalar2,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %ret
}
+attributes #0 = { strictfp }
diff --git a/llvm/test/CodeGen/SystemZ/vec-strict-sub-02.ll b/llvm/test/CodeGen/SystemZ/vec-strict-sub-02.ll
index 1843678d23c..fc93e6a0918 100644
--- a/llvm/test/CodeGen/SystemZ/vec-strict-sub-02.ll
+++ b/llvm/test/CodeGen/SystemZ/vec-strict-sub-02.ll
@@ -7,19 +7,19 @@ declare <4 x float> @llvm.experimental.constrained.fsub.v4f32(<4 x float>, <4 x
; Test a v4f32 subtraction.
define <4 x float> @f6(<4 x float> %dummy, <4 x float> %val1,
- <4 x float> %val2) {
+ <4 x float> %val2) #0 {
; CHECK-LABEL: f6:
; CHECK: vfssb %v24, %v26, %v28
; CHECK: br %r14
%ret = call <4 x float> @llvm.experimental.constrained.fsub.v4f32(
<4 x float> %val1, <4 x float> %val2,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <4 x float> %ret
}
; Test an f32 subtraction that uses vector registers.
-define float @f7(<4 x float> %val1, <4 x float> %val2) {
+define float @f7(<4 x float> %val1, <4 x float> %val2) #0 {
; CHECK-LABEL: f7:
; CHECK: wfssb %f0, %v24, %v26
; CHECK: br %r14
@@ -28,6 +28,8 @@ define float @f7(<4 x float> %val1, <4 x float> %val2) {
%ret = call float @llvm.experimental.constrained.fsub.f32(
float %scalar1, float %scalar2,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret float %ret
}
+
+attributes #0 = { strictfp }
diff --git a/llvm/test/CodeGen/SystemZ/vector-constrained-fp-intrinsics.ll b/llvm/test/CodeGen/SystemZ/vector-constrained-fp-intrinsics.ll
index 8ab4c6db255..64097eea38f 100644
--- a/llvm/test/CodeGen/SystemZ/vector-constrained-fp-intrinsics.ll
+++ b/llvm/test/CodeGen/SystemZ/vector-constrained-fp-intrinsics.ll
@@ -2,7 +2,7 @@
; RUN: llc -O3 -mtriple=s390x-linux-gnu < %s | FileCheck --check-prefix=S390X %s
; RUN: llc -O3 -mtriple=s390x-linux-gnu -mcpu=z13 < %s | FileCheck --check-prefix=SZ13 %s
-define <1 x float> @constrained_vector_fdiv_v1f32() {
+define <1 x float> @constrained_vector_fdiv_v1f32() #0 {
; S390X-LABEL: constrained_vector_fdiv_v1f32:
; S390X: # %bb.0: # %entry
; S390X-NEXT: larl %r1, .LCPI0_0
@@ -23,11 +23,11 @@ entry:
<1 x float> <float 1.000000e+00>,
<1 x float> <float 1.000000e+01>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <1 x float> %div
}
-define <2 x double> @constrained_vector_fdiv_v2f64() {
+define <2 x double> @constrained_vector_fdiv_v2f64() #0 {
; S390X-LABEL: constrained_vector_fdiv_v2f64:
; S390X: # %bb.0: # %entry
; S390X-NEXT: larl %r1, .LCPI1_0
@@ -53,11 +53,11 @@ entry:
<2 x double> <double 1.000000e+00, double 2.000000e+00>,
<2 x double> <double 1.000000e+01, double 1.000000e+01>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <2 x double> %div
}
-define <3 x float> @constrained_vector_fdiv_v3f32() {
+define <3 x float> @constrained_vector_fdiv_v3f32() #0 {
; S390X-LABEL: constrained_vector_fdiv_v3f32:
; S390X: # %bb.0: # %entry
; S390X-NEXT: larl %r1, .LCPI2_0
@@ -93,11 +93,11 @@ entry:
<3 x float> <float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>,
<3 x float> <float 1.000000e+01, float 1.000000e+01, float 1.000000e+01>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <3 x float> %div
}
-define void @constrained_vector_fdiv_v3f64(<3 x double>* %a) {
+define void @constrained_vector_fdiv_v3f64(<3 x double>* %a) #0 {
; S390X-LABEL: constrained_vector_fdiv_v3f64:
; S390X: # %bb.0: # %entry
; S390X-NEXT: ld %f0, 16(%r2)
@@ -134,12 +134,12 @@ entry:
<3 x double> <double 1.000000e+00, double 2.000000e+00, double 3.000000e+00>,
<3 x double> %b,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
store <3 x double> %div, <3 x double>* %a
ret void
}
-define <4 x double> @constrained_vector_fdiv_v4f64() {
+define <4 x double> @constrained_vector_fdiv_v4f64() #0 {
; S390X-LABEL: constrained_vector_fdiv_v4f64:
; S390X: # %bb.0: # %entry
; S390X-NEXT: larl %r1, .LCPI4_0
@@ -176,11 +176,11 @@ entry:
<4 x double> <double 1.000000e+01, double 1.000000e+01,
double 1.000000e+01, double 1.000000e+01>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <4 x double> %div
}
-define <1 x float> @constrained_vector_frem_v1f32() {
+define <1 x float> @constrained_vector_frem_v1f32() #0 {
; S390X-LABEL: constrained_vector_frem_v1f32:
; S390X: # %bb.0: # %entry
; S390X-NEXT: stmg %r14, %r15, 112(%r15)
@@ -216,11 +216,11 @@ entry:
<1 x float> <float 1.000000e+00>,
<1 x float> <float 1.000000e+01>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <1 x float> %rem
}
-define <2 x double> @constrained_vector_frem_v2f64() {
+define <2 x double> @constrained_vector_frem_v2f64() #0 {
; S390X-LABEL: constrained_vector_frem_v2f64:
; S390X: # %bb.0: # %entry
; S390X-NEXT: stmg %r14, %r15, 112(%r15)
@@ -281,11 +281,11 @@ entry:
<2 x double> <double 1.000000e+00, double 2.000000e+00>,
<2 x double> <double 1.000000e+01, double 1.000000e+01>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <2 x double> %rem
}
-define <3 x float> @constrained_vector_frem_v3f32() {
+define <3 x float> @constrained_vector_frem_v3f32() #0 {
; S390X-LABEL: constrained_vector_frem_v3f32:
; S390X: # %bb.0: # %entry
; S390X-NEXT: stmg %r14, %r15, 112(%r15)
@@ -365,11 +365,11 @@ entry:
<3 x float> <float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>,
<3 x float> <float 1.000000e+01, float 1.000000e+01, float 1.000000e+01>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <3 x float> %rem
}
-define void @constrained_vector_frem_v3f64(<3 x double>* %a) {
+define void @constrained_vector_frem_v3f64(<3 x double>* %a) #0 {
; S390X-LABEL: constrained_vector_frem_v3f64:
; S390X: # %bb.0: # %entry
; S390X-NEXT: stmg %r13, %r15, 104(%r15)
@@ -456,12 +456,12 @@ entry:
<3 x double> <double 1.000000e+00, double 2.000000e+00, double 3.000000e+00>,
<3 x double> %b,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
store <3 x double> %rem, <3 x double>* %a
ret void
}
-define <4 x double> @constrained_vector_frem_v4f64() {
+define <4 x double> @constrained_vector_frem_v4f64() #0 {
; S390X-LABEL: constrained_vector_frem_v4f64:
; S390X: # %bb.0:
; S390X-NEXT: stmg %r14, %r15, 112(%r15)
@@ -558,11 +558,11 @@ define <4 x double> @constrained_vector_frem_v4f64() {
<4 x double> <double 1.000000e+01, double 1.000000e+01,
double 1.000000e+01, double 1.000000e+01>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <4 x double> %rem
}
-define <1 x float> @constrained_vector_fmul_v1f32() {
+define <1 x float> @constrained_vector_fmul_v1f32() #0 {
; S390X-LABEL: constrained_vector_fmul_v1f32:
; S390X: # %bb.0: # %entry
; S390X-NEXT: larl %r1, .LCPI10_0
@@ -583,11 +583,11 @@ entry:
<1 x float> <float 0x7FF0000000000000>,
<1 x float> <float 2.000000e+00>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <1 x float> %mul
}
-define <2 x double> @constrained_vector_fmul_v2f64() {
+define <2 x double> @constrained_vector_fmul_v2f64() #0 {
; S390X-LABEL: constrained_vector_fmul_v2f64:
; S390X: # %bb.0: # %entry
; S390X-NEXT: larl %r1, .LCPI11_0
@@ -613,11 +613,11 @@ entry:
<2 x double> <double 0x7FEFFFFFFFFFFFFF, double 0x7FEFFFFFFFFFFFFF>,
<2 x double> <double 2.000000e+00, double 3.000000e+00>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <2 x double> %mul
}
-define <3 x float> @constrained_vector_fmul_v3f32() {
+define <3 x float> @constrained_vector_fmul_v3f32() #0 {
; S390X-LABEL: constrained_vector_fmul_v3f32:
; S390X: # %bb.0: # %entry
; S390X-NEXT: larl %r1, .LCPI12_0
@@ -652,11 +652,11 @@ entry:
float 0x7FF0000000000000>,
<3 x float> <float 1.000000e+00, float 1.000000e+01, float 1.000000e+02>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <3 x float> %mul
}
-define void @constrained_vector_fmul_v3f64(<3 x double>* %a) {
+define void @constrained_vector_fmul_v3f64(<3 x double>* %a) #0 {
; S390X-LABEL: constrained_vector_fmul_v3f64:
; S390X: # %bb.0: # %entry
; S390X-NEXT: ld %f0, 8(%r2)
@@ -691,12 +691,12 @@ entry:
double 0x7FEFFFFFFFFFFFFF>,
<3 x double> %b,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
store <3 x double> %mul, <3 x double>* %a
ret void
}
-define <4 x double> @constrained_vector_fmul_v4f64() {
+define <4 x double> @constrained_vector_fmul_v4f64() #0 {
; S390X-LABEL: constrained_vector_fmul_v4f64:
; S390X: # %bb.0: # %entry
; S390X-NEXT: larl %r1, .LCPI14_0
@@ -733,11 +733,11 @@ entry:
<4 x double> <double 2.000000e+00, double 3.000000e+00,
double 4.000000e+00, double 5.000000e+00>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <4 x double> %mul
}
-define <1 x float> @constrained_vector_fadd_v1f32() {
+define <1 x float> @constrained_vector_fadd_v1f32() #0 {
; S390X-LABEL: constrained_vector_fadd_v1f32:
; S390X: # %bb.0: # %entry
; S390X-NEXT: larl %r1, .LCPI15_0
@@ -758,11 +758,11 @@ entry:
<1 x float> <float 0x7FF0000000000000>,
<1 x float> <float 1.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <1 x float> %add
}
-define <2 x double> @constrained_vector_fadd_v2f64() {
+define <2 x double> @constrained_vector_fadd_v2f64() #0 {
; S390X-LABEL: constrained_vector_fadd_v2f64:
; S390X: # %bb.0: # %entry
; S390X-NEXT: larl %r1, .LCPI16_0
@@ -788,11 +788,11 @@ entry:
<2 x double> <double 0x7FEFFFFFFFFFFFFF, double 0x7FEFFFFFFFFFFFFF>,
<2 x double> <double 1.000000e+00, double 1.000000e-01>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <2 x double> %add
}
-define <3 x float> @constrained_vector_fadd_v3f32() {
+define <3 x float> @constrained_vector_fadd_v3f32() #0 {
; S390X-LABEL: constrained_vector_fadd_v3f32:
; S390X: # %bb.0: # %entry
; S390X-NEXT: larl %r1, .LCPI17_0
@@ -825,11 +825,11 @@ entry:
float 0xFFFFFFFFE0000000>,
<3 x float> <float 2.0, float 1.0, float 0.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <3 x float> %add
}
-define void @constrained_vector_fadd_v3f64(<3 x double>* %a) {
+define void @constrained_vector_fadd_v3f64(<3 x double>* %a) #0 {
; S390X-LABEL: constrained_vector_fadd_v3f64:
; S390X: # %bb.0: # %entry
; S390X-NEXT: ld %f0, 8(%r2)
@@ -864,12 +864,12 @@ entry:
double 0x7FEFFFFFFFFFFFFF>,
<3 x double> %b,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
store <3 x double> %add, <3 x double>* %a
ret void
}
-define <4 x double> @constrained_vector_fadd_v4f64() {
+define <4 x double> @constrained_vector_fadd_v4f64() #0 {
; S390X-LABEL: constrained_vector_fadd_v4f64:
; S390X: # %bb.0: # %entry
; S390X-NEXT: larl %r1, .LCPI19_0
@@ -906,11 +906,11 @@ entry:
<4 x double> <double 1.000000e+00, double 1.000000e-01,
double 2.000000e+00, double 2.000000e-01>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <4 x double> %add
}
-define <1 x float> @constrained_vector_fsub_v1f32() {
+define <1 x float> @constrained_vector_fsub_v1f32() #0 {
; S390X-LABEL: constrained_vector_fsub_v1f32:
; S390X: # %bb.0: # %entry
; S390X-NEXT: larl %r1, .LCPI20_0
@@ -931,11 +931,11 @@ entry:
<1 x float> <float 0x7FF0000000000000>,
<1 x float> <float 1.000000e+00>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <1 x float> %sub
}
-define <2 x double> @constrained_vector_fsub_v2f64() {
+define <2 x double> @constrained_vector_fsub_v2f64() #0 {
; S390X-LABEL: constrained_vector_fsub_v2f64:
; S390X: # %bb.0: # %entry
; S390X-NEXT: larl %r1, .LCPI21_0
@@ -960,11 +960,11 @@ entry:
<2 x double> <double 0xFFEFFFFFFFFFFFFF, double 0xFFEFFFFFFFFFFFFF>,
<2 x double> <double 1.000000e+00, double 1.000000e-01>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <2 x double> %sub
}
-define <3 x float> @constrained_vector_fsub_v3f32() {
+define <3 x float> @constrained_vector_fsub_v3f32() #0 {
; S390X-LABEL: constrained_vector_fsub_v3f32:
; S390X: # %bb.0: # %entry
; S390X-NEXT: larl %r1, .LCPI22_0
@@ -1000,11 +1000,11 @@ entry:
float 0xFFFFFFFFE0000000>,
<3 x float> <float 2.0, float 1.0, float 0.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <3 x float> %sub
}
-define void @constrained_vector_fsub_v3f64(<3 x double>* %a) {
+define void @constrained_vector_fsub_v3f64(<3 x double>* %a) #0 {
; S390X-LABEL: constrained_vector_fsub_v3f64:
; S390X: # %bb.0: # %entry
; S390X-NEXT: larl %r1, .LCPI23_0
@@ -1038,12 +1038,12 @@ entry:
double 0xFFEFFFFFFFFFFFFF>,
<3 x double> %b,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
store <3 x double> %sub, <3 x double>* %a
ret void
}
-define <4 x double> @constrained_vector_fsub_v4f64() {
+define <4 x double> @constrained_vector_fsub_v4f64() #0 {
; S390X-LABEL: constrained_vector_fsub_v4f64:
; S390X: # %bb.0: # %entry
; S390X-NEXT: larl %r1, .LCPI24_0
@@ -1080,11 +1080,11 @@ entry:
<4 x double> <double 1.000000e+00, double 1.000000e-01,
double 2.000000e+00, double 2.000000e-01>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <4 x double> %sub
}
-define <1 x float> @constrained_vector_sqrt_v1f32() {
+define <1 x float> @constrained_vector_sqrt_v1f32() #0 {
; S390X-LABEL: constrained_vector_sqrt_v1f32:
; S390X: # %bb.0: # %entry
; S390X-NEXT: larl %r1, .LCPI25_0
@@ -1101,11 +1101,11 @@ entry:
%sqrt = call <1 x float> @llvm.experimental.constrained.sqrt.v1f32(
<1 x float> <float 42.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <1 x float> %sqrt
}
-define <2 x double> @constrained_vector_sqrt_v2f64() {
+define <2 x double> @constrained_vector_sqrt_v2f64() #0 {
; S390X-LABEL: constrained_vector_sqrt_v2f64:
; S390X: # %bb.0: # %entry
; S390X-NEXT: larl %r1, .LCPI26_0
@@ -1125,11 +1125,11 @@ entry:
%sqrt = call <2 x double> @llvm.experimental.constrained.sqrt.v2f64(
<2 x double> <double 42.0, double 42.1>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <2 x double> %sqrt
}
-define <3 x float> @constrained_vector_sqrt_v3f32() {
+define <3 x float> @constrained_vector_sqrt_v3f32() #0 {
; S390X-LABEL: constrained_vector_sqrt_v3f32:
; S390X: # %bb.0: # %entry
; S390X-NEXT: larl %r1, .LCPI27_0
@@ -1156,11 +1156,11 @@ entry:
%sqrt = call <3 x float> @llvm.experimental.constrained.sqrt.v3f32(
<3 x float> <float 42.0, float 43.0, float 44.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <3 x float> %sqrt
}
-define void @constrained_vector_sqrt_v3f64(<3 x double>* %a) {
+define void @constrained_vector_sqrt_v3f64(<3 x double>* %a) #0 {
; S390X-LABEL: constrained_vector_sqrt_v3f64:
; S390X: # %bb.0: # %entry
; S390X-NEXT: ld %f0, 8(%r2)
@@ -1186,12 +1186,12 @@ entry:
%sqrt = call <3 x double> @llvm.experimental.constrained.sqrt.v3f64(
<3 x double> %b,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
store <3 x double> %sqrt, <3 x double>* %a
ret void
}
-define <4 x double> @constrained_vector_sqrt_v4f64() {
+define <4 x double> @constrained_vector_sqrt_v4f64() #0 {
; S390X-LABEL: constrained_vector_sqrt_v4f64:
; S390X: # %bb.0: # %entry
; S390X-NEXT: larl %r1, .LCPI29_0
@@ -1219,11 +1219,11 @@ define <4 x double> @constrained_vector_sqrt_v4f64() {
<4 x double> <double 42.0, double 42.1,
double 42.2, double 42.3>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <4 x double> %sqrt
}
-define <1 x float> @constrained_vector_pow_v1f32() {
+define <1 x float> @constrained_vector_pow_v1f32() #0 {
; S390X-LABEL: constrained_vector_pow_v1f32:
; S390X: # %bb.0: # %entry
; S390X-NEXT: stmg %r14, %r15, 112(%r15)
@@ -1260,11 +1260,11 @@ entry:
<1 x float> <float 42.0>,
<1 x float> <float 3.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <1 x float> %pow
}
-define <2 x double> @constrained_vector_pow_v2f64() {
+define <2 x double> @constrained_vector_pow_v2f64() #0 {
; S390X-LABEL: constrained_vector_pow_v2f64:
; S390X: # %bb.0: # %entry
; S390X-NEXT: stmg %r14, %r15, 112(%r15)
@@ -1327,11 +1327,11 @@ entry:
<2 x double> <double 42.1, double 42.2>,
<2 x double> <double 3.0, double 3.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <2 x double> %pow
}
-define <3 x float> @constrained_vector_pow_v3f32() {
+define <3 x float> @constrained_vector_pow_v3f32() #0 {
; S390X-LABEL: constrained_vector_pow_v3f32:
; S390X: # %bb.0: # %entry
; S390X-NEXT: stmg %r14, %r15, 112(%r15)
@@ -1413,11 +1413,11 @@ entry:
<3 x float> <float 42.0, float 43.0, float 44.0>,
<3 x float> <float 3.0, float 3.0, float 3.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <3 x float> %pow
}
-define void @constrained_vector_pow_v3f64(<3 x double>* %a) {
+define void @constrained_vector_pow_v3f64(<3 x double>* %a) #0 {
; S390X-LABEL: constrained_vector_pow_v3f64:
; S390X: # %bb.0: # %entry
; S390X-NEXT: stmg %r13, %r15, 104(%r15)
@@ -1508,12 +1508,12 @@ entry:
<3 x double> %b,
<3 x double> <double 3.0, double 3.0, double 3.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
store <3 x double> %pow, <3 x double>* %a
ret void
}
-define <4 x double> @constrained_vector_pow_v4f64() {
+define <4 x double> @constrained_vector_pow_v4f64() #0 {
; S390X-LABEL: constrained_vector_pow_v4f64:
; S390X: # %bb.0: # %entry
; S390X-NEXT: stmg %r14, %r15, 112(%r15)
@@ -1613,11 +1613,11 @@ entry:
<4 x double> <double 3.0, double 3.0,
double 3.0, double 3.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <4 x double> %pow
}
-define <1 x float> @constrained_vector_powi_v1f32() {
+define <1 x float> @constrained_vector_powi_v1f32() #0 {
; S390X-LABEL: constrained_vector_powi_v1f32:
; S390X: # %bb.0: # %entry
; S390X-NEXT: stmg %r14, %r15, 112(%r15)
@@ -1652,11 +1652,11 @@ entry:
<1 x float> <float 42.0>,
i32 3,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <1 x float> %powi
}
-define <2 x double> @constrained_vector_powi_v2f64() {
+define <2 x double> @constrained_vector_powi_v2f64() #0 {
; S390X-LABEL: constrained_vector_powi_v2f64:
; S390X: # %bb.0: # %entry
; S390X-NEXT: stmg %r14, %r15, 112(%r15)
@@ -1709,11 +1709,11 @@ entry:
<2 x double> <double 42.1, double 42.2>,
i32 3,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <2 x double> %powi
}
-define <3 x float> @constrained_vector_powi_v3f32() {
+define <3 x float> @constrained_vector_powi_v3f32() #0 {
; S390X-LABEL: constrained_vector_powi_v3f32:
; S390X: # %bb.0: # %entry
; S390X-NEXT: stmg %r14, %r15, 112(%r15)
@@ -1785,11 +1785,11 @@ entry:
<3 x float> <float 42.0, float 43.0, float 44.0>,
i32 3,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <3 x float> %powi
}
-define void @constrained_vector_powi_v3f64(<3 x double>* %a) {
+define void @constrained_vector_powi_v3f64(<3 x double>* %a) #0 {
; S390X-LABEL: constrained_vector_powi_v3f64:
; S390X: # %bb.0: # %entry
; S390X-NEXT: stmg %r13, %r15, 104(%r15)
@@ -1865,12 +1865,12 @@ entry:
<3 x double> <double 42.0, double 42.1, double 42.2>,
i32 3,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
store <3 x double> %powi, <3 x double>* %a
ret void
}
-define <4 x double> @constrained_vector_powi_v4f64() {
+define <4 x double> @constrained_vector_powi_v4f64() #0 {
; S390X-LABEL: constrained_vector_powi_v4f64:
; S390X: # %bb.0: # %entry
; S390X-NEXT: stmg %r14, %r15, 112(%r15)
@@ -1959,11 +1959,11 @@ entry:
double 42.3, double 42.4>,
i32 3,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <4 x double> %powi
}
-define <1 x float> @constrained_vector_sin_v1f32() {
+define <1 x float> @constrained_vector_sin_v1f32() #0 {
; S390X-LABEL: constrained_vector_sin_v1f32:
; S390X: # %bb.0: # %entry
; S390X-NEXT: stmg %r14, %r15, 112(%r15)
@@ -1995,11 +1995,11 @@ entry:
%sin = call <1 x float> @llvm.experimental.constrained.sin.v1f32(
<1 x float> <float 42.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <1 x float> %sin
}
-define <2 x double> @constrained_vector_sin_v2f64() {
+define <2 x double> @constrained_vector_sin_v2f64() #0 {
; S390X-LABEL: constrained_vector_sin_v2f64:
; S390X: # %bb.0: # %entry
; S390X-NEXT: stmg %r14, %r15, 112(%r15)
@@ -2047,11 +2047,11 @@ entry:
%sin = call <2 x double> @llvm.experimental.constrained.sin.v2f64(
<2 x double> <double 42.0, double 42.1>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <2 x double> %sin
}
-define <3 x float> @constrained_vector_sin_v3f32() {
+define <3 x float> @constrained_vector_sin_v3f32() #0 {
; S390X-LABEL: constrained_vector_sin_v3f32:
; S390X: # %bb.0: # %entry
; S390X-NEXT: stmg %r14, %r15, 112(%r15)
@@ -2116,11 +2116,11 @@ entry:
%sin = call <3 x float> @llvm.experimental.constrained.sin.v3f32(
<3 x float> <float 42.0, float 43.0, float 44.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <3 x float> %sin
}
-define void @constrained_vector_sin_v3f64(<3 x double>* %a) {
+define void @constrained_vector_sin_v3f64(<3 x double>* %a) #0 {
; S390X-LABEL: constrained_vector_sin_v3f64:
; S390X: # %bb.0: # %entry
; S390X-NEXT: stmg %r13, %r15, 104(%r15)
@@ -2194,12 +2194,12 @@ entry:
%sin = call <3 x double> @llvm.experimental.constrained.sin.v3f64(
<3 x double> %b,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
store <3 x double> %sin, <3 x double>* %a
ret void
}
-define <4 x double> @constrained_vector_sin_v4f64() {
+define <4 x double> @constrained_vector_sin_v4f64() #0 {
; S390X-LABEL: constrained_vector_sin_v4f64:
; S390X: # %bb.0: # %entry
; S390X-NEXT: stmg %r14, %r15, 112(%r15)
@@ -2279,11 +2279,11 @@ entry:
<4 x double> <double 42.0, double 42.1,
double 42.2, double 42.3>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <4 x double> %sin
}
-define <1 x float> @constrained_vector_cos_v1f32() {
+define <1 x float> @constrained_vector_cos_v1f32() #0 {
; S390X-LABEL: constrained_vector_cos_v1f32:
; S390X: # %bb.0: # %entry
; S390X-NEXT: stmg %r14, %r15, 112(%r15)
@@ -2315,11 +2315,11 @@ entry:
%cos = call <1 x float> @llvm.experimental.constrained.cos.v1f32(
<1 x float> <float 42.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <1 x float> %cos
}
-define <2 x double> @constrained_vector_cos_v2f64() {
+define <2 x double> @constrained_vector_cos_v2f64() #0 {
; S390X-LABEL: constrained_vector_cos_v2f64:
; S390X: # %bb.0: # %entry
; S390X-NEXT: stmg %r14, %r15, 112(%r15)
@@ -2367,11 +2367,11 @@ entry:
%cos = call <2 x double> @llvm.experimental.constrained.cos.v2f64(
<2 x double> <double 42.0, double 42.1>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <2 x double> %cos
}
-define <3 x float> @constrained_vector_cos_v3f32() {
+define <3 x float> @constrained_vector_cos_v3f32() #0 {
; S390X-LABEL: constrained_vector_cos_v3f32:
; S390X: # %bb.0: # %entry
; S390X-NEXT: stmg %r14, %r15, 112(%r15)
@@ -2436,11 +2436,11 @@ entry:
%cos = call <3 x float> @llvm.experimental.constrained.cos.v3f32(
<3 x float> <float 42.0, float 43.0, float 44.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <3 x float> %cos
}
-define void @constrained_vector_cos_v3f64(<3 x double>* %a) {
+define void @constrained_vector_cos_v3f64(<3 x double>* %a) #0 {
; S390X-LABEL: constrained_vector_cos_v3f64:
; S390X: # %bb.0: # %entry
; S390X-NEXT: stmg %r13, %r15, 104(%r15)
@@ -2514,12 +2514,12 @@ entry:
%cos = call <3 x double> @llvm.experimental.constrained.cos.v3f64(
<3 x double> %b,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
store <3 x double> %cos, <3 x double>* %a
ret void
}
-define <4 x double> @constrained_vector_cos_v4f64() {
+define <4 x double> @constrained_vector_cos_v4f64() #0 {
; S390X-LABEL: constrained_vector_cos_v4f64:
; S390X: # %bb.0: # %entry
; S390X-NEXT: stmg %r14, %r15, 112(%r15)
@@ -2599,11 +2599,11 @@ entry:
<4 x double> <double 42.0, double 42.1,
double 42.2, double 42.3>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <4 x double> %cos
}
-define <1 x float> @constrained_vector_exp_v1f32() {
+define <1 x float> @constrained_vector_exp_v1f32() #0 {
; S390X-LABEL: constrained_vector_exp_v1f32:
; S390X: # %bb.0: # %entry
; S390X-NEXT: stmg %r14, %r15, 112(%r15)
@@ -2635,11 +2635,11 @@ entry:
%exp = call <1 x float> @llvm.experimental.constrained.exp.v1f32(
<1 x float> <float 42.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <1 x float> %exp
}
-define <2 x double> @constrained_vector_exp_v2f64() {
+define <2 x double> @constrained_vector_exp_v2f64() #0 {
; S390X-LABEL: constrained_vector_exp_v2f64:
; S390X: # %bb.0: # %entry
; S390X-NEXT: stmg %r14, %r15, 112(%r15)
@@ -2687,11 +2687,11 @@ entry:
%exp = call <2 x double> @llvm.experimental.constrained.exp.v2f64(
<2 x double> <double 42.0, double 42.1>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <2 x double> %exp
}
-define <3 x float> @constrained_vector_exp_v3f32() {
+define <3 x float> @constrained_vector_exp_v3f32() #0 {
; S390X-LABEL: constrained_vector_exp_v3f32:
; S390X: # %bb.0: # %entry
; S390X-NEXT: stmg %r14, %r15, 112(%r15)
@@ -2756,11 +2756,11 @@ entry:
%exp = call <3 x float> @llvm.experimental.constrained.exp.v3f32(
<3 x float> <float 42.0, float 43.0, float 44.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <3 x float> %exp
}
-define void @constrained_vector_exp_v3f64(<3 x double>* %a) {
+define void @constrained_vector_exp_v3f64(<3 x double>* %a) #0 {
; S390X-LABEL: constrained_vector_exp_v3f64:
; S390X: # %bb.0: # %entry
; S390X-NEXT: stmg %r13, %r15, 104(%r15)
@@ -2834,12 +2834,12 @@ entry:
%exp = call <3 x double> @llvm.experimental.constrained.exp.v3f64(
<3 x double> %b,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
store <3 x double> %exp, <3 x double>* %a
ret void
}
-define <4 x double> @constrained_vector_exp_v4f64() {
+define <4 x double> @constrained_vector_exp_v4f64() #0 {
; S390X-LABEL: constrained_vector_exp_v4f64:
; S390X: # %bb.0: # %entry
; S390X-NEXT: stmg %r14, %r15, 112(%r15)
@@ -2919,11 +2919,11 @@ entry:
<4 x double> <double 42.0, double 42.1,
double 42.2, double 42.3>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <4 x double> %exp
}
-define <1 x float> @constrained_vector_exp2_v1f32() {
+define <1 x float> @constrained_vector_exp2_v1f32() #0 {
; S390X-LABEL: constrained_vector_exp2_v1f32:
; S390X: # %bb.0: # %entry
; S390X-NEXT: stmg %r14, %r15, 112(%r15)
@@ -2955,11 +2955,11 @@ entry:
%exp2 = call <1 x float> @llvm.experimental.constrained.exp2.v1f32(
<1 x float> <float 42.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <1 x float> %exp2
}
-define <2 x double> @constrained_vector_exp2_v2f64() {
+define <2 x double> @constrained_vector_exp2_v2f64() #0 {
; S390X-LABEL: constrained_vector_exp2_v2f64:
; S390X: # %bb.0: # %entry
; S390X-NEXT: stmg %r14, %r15, 112(%r15)
@@ -3007,11 +3007,11 @@ entry:
%exp2 = call <2 x double> @llvm.experimental.constrained.exp2.v2f64(
<2 x double> <double 42.1, double 42.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <2 x double> %exp2
}
-define <3 x float> @constrained_vector_exp2_v3f32() {
+define <3 x float> @constrained_vector_exp2_v3f32() #0 {
; S390X-LABEL: constrained_vector_exp2_v3f32:
; S390X: # %bb.0: # %entry
; S390X-NEXT: stmg %r14, %r15, 112(%r15)
@@ -3076,11 +3076,11 @@ entry:
%exp2 = call <3 x float> @llvm.experimental.constrained.exp2.v3f32(
<3 x float> <float 42.0, float 43.0, float 44.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <3 x float> %exp2
}
-define void @constrained_vector_exp2_v3f64(<3 x double>* %a) {
+define void @constrained_vector_exp2_v3f64(<3 x double>* %a) #0 {
; S390X-LABEL: constrained_vector_exp2_v3f64:
; S390X: # %bb.0: # %entry
; S390X-NEXT: stmg %r13, %r15, 104(%r15)
@@ -3154,12 +3154,12 @@ entry:
%exp2 = call <3 x double> @llvm.experimental.constrained.exp2.v3f64(
<3 x double> %b,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
store <3 x double> %exp2, <3 x double>* %a
ret void
}
-define <4 x double> @constrained_vector_exp2_v4f64() {
+define <4 x double> @constrained_vector_exp2_v4f64() #0 {
; S390X-LABEL: constrained_vector_exp2_v4f64:
; S390X: # %bb.0: # %entry
; S390X-NEXT: stmg %r14, %r15, 112(%r15)
@@ -3239,11 +3239,11 @@ entry:
<4 x double> <double 42.1, double 42.2,
double 42.3, double 42.4>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <4 x double> %exp2
}
-define <1 x float> @constrained_vector_log_v1f32() {
+define <1 x float> @constrained_vector_log_v1f32() #0 {
; S390X-LABEL: constrained_vector_log_v1f32:
; S390X: # %bb.0: # %entry
; S390X-NEXT: stmg %r14, %r15, 112(%r15)
@@ -3275,11 +3275,11 @@ entry:
%log = call <1 x float> @llvm.experimental.constrained.log.v1f32(
<1 x float> <float 42.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <1 x float> %log
}
-define <2 x double> @constrained_vector_log_v2f64() {
+define <2 x double> @constrained_vector_log_v2f64() #0 {
; S390X-LABEL: constrained_vector_log_v2f64:
; S390X: # %bb.0: # %entry
; S390X-NEXT: stmg %r14, %r15, 112(%r15)
@@ -3327,11 +3327,11 @@ entry:
%log = call <2 x double> @llvm.experimental.constrained.log.v2f64(
<2 x double> <double 42.0, double 42.1>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <2 x double> %log
}
-define <3 x float> @constrained_vector_log_v3f32() {
+define <3 x float> @constrained_vector_log_v3f32() #0 {
; S390X-LABEL: constrained_vector_log_v3f32:
; S390X: # %bb.0: # %entry
; S390X-NEXT: stmg %r14, %r15, 112(%r15)
@@ -3396,11 +3396,11 @@ entry:
%log = call <3 x float> @llvm.experimental.constrained.log.v3f32(
<3 x float> <float 42.0, float 43.0, float 44.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <3 x float> %log
}
-define void @constrained_vector_log_v3f64(<3 x double>* %a) {
+define void @constrained_vector_log_v3f64(<3 x double>* %a) #0 {
; S390X-LABEL: constrained_vector_log_v3f64:
; S390X: # %bb.0: # %entry
; S390X-NEXT: stmg %r13, %r15, 104(%r15)
@@ -3474,12 +3474,12 @@ entry:
%log = call <3 x double> @llvm.experimental.constrained.log.v3f64(
<3 x double> %b,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
store <3 x double> %log, <3 x double>* %a
ret void
}
-define <4 x double> @constrained_vector_log_v4f64() {
+define <4 x double> @constrained_vector_log_v4f64() #0 {
; S390X-LABEL: constrained_vector_log_v4f64:
; S390X: # %bb.0: # %entry
; S390X-NEXT: stmg %r14, %r15, 112(%r15)
@@ -3559,11 +3559,11 @@ entry:
<4 x double> <double 42.0, double 42.1,
double 42.2, double 42.3>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <4 x double> %log
}
-define <1 x float> @constrained_vector_log10_v1f32() {
+define <1 x float> @constrained_vector_log10_v1f32() #0 {
; S390X-LABEL: constrained_vector_log10_v1f32:
; S390X: # %bb.0: # %entry
; S390X-NEXT: stmg %r14, %r15, 112(%r15)
@@ -3595,11 +3595,11 @@ entry:
%log10 = call <1 x float> @llvm.experimental.constrained.log10.v1f32(
<1 x float> <float 42.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <1 x float> %log10
}
-define <2 x double> @constrained_vector_log10_v2f64() {
+define <2 x double> @constrained_vector_log10_v2f64() #0 {
; S390X-LABEL: constrained_vector_log10_v2f64:
; S390X: # %bb.0: # %entry
; S390X-NEXT: stmg %r14, %r15, 112(%r15)
@@ -3647,11 +3647,11 @@ entry:
%log10 = call <2 x double> @llvm.experimental.constrained.log10.v2f64(
<2 x double> <double 42.0, double 42.1>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <2 x double> %log10
}
-define <3 x float> @constrained_vector_log10_v3f32() {
+define <3 x float> @constrained_vector_log10_v3f32() #0 {
; S390X-LABEL: constrained_vector_log10_v3f32:
; S390X: # %bb.0: # %entry
; S390X-NEXT: stmg %r14, %r15, 112(%r15)
@@ -3716,11 +3716,11 @@ entry:
%log10 = call <3 x float> @llvm.experimental.constrained.log10.v3f32(
<3 x float> <float 42.0, float 43.0, float 44.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <3 x float> %log10
}
-define void @constrained_vector_log10_v3f64(<3 x double>* %a) {
+define void @constrained_vector_log10_v3f64(<3 x double>* %a) #0 {
; S390X-LABEL: constrained_vector_log10_v3f64:
; S390X: # %bb.0: # %entry
; S390X-NEXT: stmg %r13, %r15, 104(%r15)
@@ -3794,12 +3794,12 @@ entry:
%log10 = call <3 x double> @llvm.experimental.constrained.log10.v3f64(
<3 x double> %b,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
store <3 x double> %log10, <3 x double>* %a
ret void
}
-define <4 x double> @constrained_vector_log10_v4f64() {
+define <4 x double> @constrained_vector_log10_v4f64() #0 {
; S390X-LABEL: constrained_vector_log10_v4f64:
; S390X: # %bb.0: # %entry
; S390X-NEXT: stmg %r14, %r15, 112(%r15)
@@ -3879,11 +3879,11 @@ entry:
<4 x double> <double 42.0, double 42.1,
double 42.2, double 42.3>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <4 x double> %log10
}
-define <1 x float> @constrained_vector_log2_v1f32() {
+define <1 x float> @constrained_vector_log2_v1f32() #0 {
; S390X-LABEL: constrained_vector_log2_v1f32:
; S390X: # %bb.0: # %entry
; S390X-NEXT: stmg %r14, %r15, 112(%r15)
@@ -3915,11 +3915,11 @@ entry:
%log2 = call <1 x float> @llvm.experimental.constrained.log2.v1f32(
<1 x float> <float 42.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <1 x float> %log2
}
-define <2 x double> @constrained_vector_log2_v2f64() {
+define <2 x double> @constrained_vector_log2_v2f64() #0 {
; S390X-LABEL: constrained_vector_log2_v2f64:
; S390X: # %bb.0: # %entry
; S390X-NEXT: stmg %r14, %r15, 112(%r15)
@@ -3967,11 +3967,11 @@ entry:
%log2 = call <2 x double> @llvm.experimental.constrained.log2.v2f64(
<2 x double> <double 42.0, double 42.1>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <2 x double> %log2
}
-define <3 x float> @constrained_vector_log2_v3f32() {
+define <3 x float> @constrained_vector_log2_v3f32() #0 {
; S390X-LABEL: constrained_vector_log2_v3f32:
; S390X: # %bb.0: # %entry
; S390X-NEXT: stmg %r14, %r15, 112(%r15)
@@ -4036,11 +4036,11 @@ entry:
%log2 = call <3 x float> @llvm.experimental.constrained.log2.v3f32(
<3 x float> <float 42.0, float 43.0, float 44.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <3 x float> %log2
}
-define void @constrained_vector_log2_v3f64(<3 x double>* %a) {
+define void @constrained_vector_log2_v3f64(<3 x double>* %a) #0 {
; S390X-LABEL: constrained_vector_log2_v3f64:
; S390X: # %bb.0: # %entry
; S390X-NEXT: stmg %r13, %r15, 104(%r15)
@@ -4114,12 +4114,12 @@ entry:
%log2 = call <3 x double> @llvm.experimental.constrained.log2.v3f64(
<3 x double> %b,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
store <3 x double> %log2, <3 x double>* %a
ret void
}
-define <4 x double> @constrained_vector_log2_v4f64() {
+define <4 x double> @constrained_vector_log2_v4f64() #0 {
; S390X-LABEL: constrained_vector_log2_v4f64:
; S390X: # %bb.0: # %entry
; S390X-NEXT: stmg %r14, %r15, 112(%r15)
@@ -4199,11 +4199,11 @@ entry:
<4 x double> <double 42.0, double 42.1,
double 42.2, double 42.3>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <4 x double> %log2
}
-define <1 x float> @constrained_vector_rint_v1f32() {
+define <1 x float> @constrained_vector_rint_v1f32() #0 {
; S390X-LABEL: constrained_vector_rint_v1f32:
; S390X: # %bb.0: # %entry
; S390X-NEXT: larl %r1, .LCPI75_0
@@ -4222,11 +4222,11 @@ entry:
%rint = call <1 x float> @llvm.experimental.constrained.rint.v1f32(
<1 x float> <float 42.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <1 x float> %rint
}
-define <2 x double> @constrained_vector_rint_v2f64() {
+define <2 x double> @constrained_vector_rint_v2f64() #0 {
; S390X-LABEL: constrained_vector_rint_v2f64:
; S390X: # %bb.0: # %entry
; S390X-NEXT: larl %r1, .LCPI76_0
@@ -4247,11 +4247,11 @@ entry:
%rint = call <2 x double> @llvm.experimental.constrained.rint.v2f64(
<2 x double> <double 42.1, double 42.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <2 x double> %rint
}
-define <3 x float> @constrained_vector_rint_v3f32() {
+define <3 x float> @constrained_vector_rint_v3f32() #0 {
; S390X-LABEL: constrained_vector_rint_v3f32:
; S390X: # %bb.0: # %entry
; S390X-NEXT: larl %r1, .LCPI77_0
@@ -4284,11 +4284,11 @@ define <3 x float> @constrained_vector_rint_v3f32() {
%rint = call <3 x float> @llvm.experimental.constrained.rint.v3f32(
<3 x float> <float 42.0, float 43.0, float 44.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <3 x float> %rint
}
-define void @constrained_vector_rint_v3f64(<3 x double>* %a) {
+define void @constrained_vector_rint_v3f64(<3 x double>* %a) #0 {
; S390X-LABEL: constrained_vector_rint_v3f64:
; S390X: # %bb.0: # %entry
; S390X-NEXT: ld %f0, 0(%r2)
@@ -4316,12 +4316,12 @@ entry:
%rint = call <3 x double> @llvm.experimental.constrained.rint.v3f64(
<3 x double> %b,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
store <3 x double> %rint, <3 x double>* %a
ret void
}
-define <4 x double> @constrained_vector_rint_v4f64() {
+define <4 x double> @constrained_vector_rint_v4f64() #0 {
; S390X-LABEL: constrained_vector_rint_v4f64:
; S390X: # %bb.0: # %entry
; S390X-NEXT: larl %r1, .LCPI79_0
@@ -4352,11 +4352,11 @@ entry:
<4 x double> <double 42.1, double 42.2,
double 42.3, double 42.4>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <4 x double> %rint
}
-define <1 x float> @constrained_vector_nearbyint_v1f32() {
+define <1 x float> @constrained_vector_nearbyint_v1f32() #0 {
; S390X-LABEL: constrained_vector_nearbyint_v1f32:
; S390X: # %bb.0: # %entry
; S390X-NEXT: stmg %r14, %r15, 112(%r15)
@@ -4381,11 +4381,11 @@ entry:
%nearby = call <1 x float> @llvm.experimental.constrained.nearbyint.v1f32(
<1 x float> <float 42.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <1 x float> %nearby
}
-define <2 x double> @constrained_vector_nearbyint_v2f64() {
+define <2 x double> @constrained_vector_nearbyint_v2f64() #0 {
; S390X-LABEL: constrained_vector_nearbyint_v2f64:
; S390X: # %bb.0: # %entry
; S390X-NEXT: stmg %r14, %r15, 112(%r15)
@@ -4419,11 +4419,11 @@ entry:
%nearby = call <2 x double> @llvm.experimental.constrained.nearbyint.v2f64(
<2 x double> <double 42.1, double 42.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <2 x double> %nearby
}
-define <3 x float> @constrained_vector_nearbyint_v3f32() {
+define <3 x float> @constrained_vector_nearbyint_v3f32() #0 {
; S390X-LABEL: constrained_vector_nearbyint_v3f32:
; S390X: # %bb.0: # %entry
; S390X-NEXT: stmg %r14, %r15, 112(%r15)
@@ -4475,11 +4475,11 @@ entry:
%nearby = call <3 x float> @llvm.experimental.constrained.nearbyint.v3f32(
<3 x float> <float 42.0, float 43.0, float 44.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <3 x float> %nearby
}
-define void @constrained_vector_nearbyint_v3f64(<3 x double>* %a) {
+define void @constrained_vector_nearbyint_v3f64(<3 x double>* %a) #0 {
; S390X-LABEL: constrained_vector_nearbyint_v3f64:
; S390X: # %bb.0: # %entry
; S390X-NEXT: stmg %r13, %r15, 104(%r15)
@@ -4528,12 +4528,12 @@ entry:
%nearby = call <3 x double> @llvm.experimental.constrained.nearbyint.v3f64(
<3 x double> %b,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
store <3 x double> %nearby, <3 x double>* %a
ret void
}
-define <4 x double> @constrained_vector_nearbyint_v4f64() {
+define <4 x double> @constrained_vector_nearbyint_v4f64() #0 {
; S390X-LABEL: constrained_vector_nearbyint_v4f64:
; S390X: # %bb.0: # %entry
; S390X-NEXT: stmg %r14, %r15, 112(%r15)
@@ -4589,11 +4589,11 @@ entry:
<4 x double> <double 42.1, double 42.2,
double 42.3, double 42.4>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <4 x double> %nearby
}
-define <1 x float> @constrained_vector_maxnum_v1f32() {
+define <1 x float> @constrained_vector_maxnum_v1f32() #0 {
; S390X-LABEL: constrained_vector_maxnum_v1f32:
; S390X: # %bb.0: # %entry
; S390X-NEXT: stmg %r14, %r15, 112(%r15)
@@ -4629,11 +4629,11 @@ entry:
%max = call <1 x float> @llvm.experimental.constrained.maxnum.v1f32(
<1 x float> <float 42.0>, <1 x float> <float 41.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <1 x float> %max
}
-define <2 x double> @constrained_vector_maxnum_v2f64() {
+define <2 x double> @constrained_vector_maxnum_v2f64() #0 {
; S390X-LABEL: constrained_vector_maxnum_v2f64:
; S390X: # %bb.0: # %entry
; S390X-NEXT: stmg %r14, %r15, 112(%r15)
@@ -4690,11 +4690,11 @@ entry:
<2 x double> <double 43.0, double 42.0>,
<2 x double> <double 41.0, double 40.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <2 x double> %max
}
-define <3 x float> @constrained_vector_maxnum_v3f32() {
+define <3 x float> @constrained_vector_maxnum_v3f32() #0 {
; S390X-LABEL: constrained_vector_maxnum_v3f32:
; S390X: # %bb.0: # %entry
; S390X-NEXT: stmg %r14, %r15, 112(%r15)
@@ -4778,11 +4778,11 @@ entry:
<3 x float> <float 43.0, float 44.0, float 45.0>,
<3 x float> <float 41.0, float 42.0, float 43.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <3 x float> %max
}
-define void @constrained_vector_log10_maxnum_v3f64(<3 x double>* %a) {
+define void @constrained_vector_log10_maxnum_v3f64(<3 x double>* %a) #0 {
; S390X-LABEL: constrained_vector_log10_maxnum_v3f64:
; S390X: # %bb.0: # %entry
; S390X-NEXT: stmg %r13, %r15, 104(%r15)
@@ -4869,12 +4869,12 @@ entry:
<3 x double> %b,
<3 x double> <double 40.0, double 41.0, double 42.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
store <3 x double> %max, <3 x double>* %a
ret void
}
-define <4 x double> @constrained_vector_maxnum_v4f64() {
+define <4 x double> @constrained_vector_maxnum_v4f64() #0 {
; S390X-LABEL: constrained_vector_maxnum_v4f64:
; S390X: # %bb.0: # %entry
; S390X-NEXT: stmg %r14, %r15, 112(%r15)
@@ -4972,11 +4972,11 @@ entry:
<4 x double> <double 40.0, double 41.0,
double 42.0, double 43.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <4 x double> %max
}
-define <1 x float> @constrained_vector_minnum_v1f32() {
+define <1 x float> @constrained_vector_minnum_v1f32() #0 {
; S390X-LABEL: constrained_vector_minnum_v1f32:
; S390X: # %bb.0: # %entry
; S390X-NEXT: stmg %r14, %r15, 112(%r15)
@@ -5012,11 +5012,11 @@ define <1 x float> @constrained_vector_minnum_v1f32() {
%min = call <1 x float> @llvm.experimental.constrained.minnum.v1f32(
<1 x float> <float 42.0>, <1 x float> <float 41.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <1 x float> %min
}
-define <2 x double> @constrained_vector_minnum_v2f64() {
+define <2 x double> @constrained_vector_minnum_v2f64() #0 {
; S390X-LABEL: constrained_vector_minnum_v2f64:
; S390X: # %bb.0: # %entry
; S390X-NEXT: stmg %r14, %r15, 112(%r15)
@@ -5073,11 +5073,11 @@ entry:
<2 x double> <double 43.0, double 42.0>,
<2 x double> <double 41.0, double 40.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <2 x double> %min
}
-define <3 x float> @constrained_vector_minnum_v3f32() {
+define <3 x float> @constrained_vector_minnum_v3f32() #0 {
; S390X-LABEL: constrained_vector_minnum_v3f32:
; S390X: # %bb.0: # %entry
; S390X-NEXT: stmg %r14, %r15, 112(%r15)
@@ -5161,11 +5161,11 @@ entry:
<3 x float> <float 43.0, float 44.0, float 45.0>,
<3 x float> <float 41.0, float 42.0, float 43.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <3 x float> %min
}
-define void @constrained_vector_minnum_v3f64(<3 x double>* %a) {
+define void @constrained_vector_minnum_v3f64(<3 x double>* %a) #0 {
; S390X-LABEL: constrained_vector_minnum_v3f64:
; S390X: # %bb.0: # %entry
; S390X-NEXT: stmg %r13, %r15, 104(%r15)
@@ -5256,12 +5256,12 @@ entry:
<3 x double> %b,
<3 x double> <double 3.0, double 3.0, double 3.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
store <3 x double> %min, <3 x double>* %a
ret void
}
-define <4 x double> @constrained_vector_minnum_v4f64() {
+define <4 x double> @constrained_vector_minnum_v4f64() #0 {
; S390X-LABEL: constrained_vector_minnum_v4f64:
; S390X: # %bb.0: # %entry
; S390X-NEXT: stmg %r14, %r15, 112(%r15)
@@ -5359,11 +5359,11 @@ entry:
<4 x double> <double 40.0, double 41.0,
double 42.0, double 43.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <4 x double> %min
}
-define <1 x float> @constrained_vector_fptrunc_v1f64() {
+define <1 x float> @constrained_vector_fptrunc_v1f64() #0 {
; S390X-LABEL: constrained_vector_fptrunc_v1f64:
; S390X: # %bb.0: # %entry
; S390X-NEXT: larl %r1, .LCPI95_0
@@ -5381,11 +5381,11 @@ entry:
%result = call <1 x float> @llvm.experimental.constrained.fptrunc.v1f32.v1f64(
<1 x double><double 42.1>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <1 x float> %result
}
-define <2 x float> @constrained_vector_fptrunc_v2f64() {
+define <2 x float> @constrained_vector_fptrunc_v2f64() #0 {
; S390X-LABEL: constrained_vector_fptrunc_v2f64:
; S390X: # %bb.0: # %entry
; S390X-NEXT: larl %r1, .LCPI96_0
@@ -5411,11 +5411,11 @@ entry:
%result = call <2 x float> @llvm.experimental.constrained.fptrunc.v2f32.v2f64(
<2 x double><double 42.1, double 42.2>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <2 x float> %result
}
-define void @constrained_vector_fptrunc_v3f64(<3 x double>* %src, <3 x float>* %dest) {
+define void @constrained_vector_fptrunc_v3f64(<3 x double>* %src, <3 x float>* %dest) #0 {
; S390X-LABEL: constrained_vector_fptrunc_v3f64:
; S390X: # %bb.0: # %entry
; S390X-NEXT: ld %f0, 0(%r2)
@@ -5451,12 +5451,12 @@ entry:
%result = call <3 x float> @llvm.experimental.constrained.fptrunc.v3f32.v3f64(
<3 x double> %b,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
store <3 x float> %result, <3 x float>* %dest
ret void
}
-define <4 x float> @constrained_vector_fptrunc_v4f64() {
+define <4 x float> @constrained_vector_fptrunc_v4f64() #0 {
; S390X-LABEL: constrained_vector_fptrunc_v4f64:
; S390X: # %bb.0: # %entry
; S390X-NEXT: larl %r1, .LCPI98_0
@@ -5496,11 +5496,11 @@ entry:
<4 x double><double 42.1, double 42.2,
double 42.3, double 42.4>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <4 x float> %result
}
-define <1 x double> @constrained_vector_fpext_v1f32() {
+define <1 x double> @constrained_vector_fpext_v1f32() #0 {
; S390X-LABEL: constrained_vector_fpext_v1f32:
; S390X: # %bb.0: # %entry
; S390X-NEXT: larl %r1, .LCPI99_0
@@ -5516,11 +5516,11 @@ define <1 x double> @constrained_vector_fpext_v1f32() {
entry:
%result = call <1 x double> @llvm.experimental.constrained.fpext.v1f64.v1f32(
<1 x float><float 42.0>,
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <1 x double> %result
}
-define <2 x double> @constrained_vector_fpext_v2f32() {
+define <2 x double> @constrained_vector_fpext_v2f32() #0 {
; S390X-LABEL: constrained_vector_fpext_v2f32:
; S390X: # %bb.0: # %entry
; S390X-NEXT: larl %r1, .LCPI100_0
@@ -5540,11 +5540,11 @@ define <2 x double> @constrained_vector_fpext_v2f32() {
entry:
%result = call <2 x double> @llvm.experimental.constrained.fpext.v2f64.v2f32(
<2 x float><float 42.0, float 43.0>,
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <2 x double> %result
}
-define void @constrained_vector_fpext_v3f64(<3 x float>* %src, <3 x double>* %dest) {
+define void @constrained_vector_fpext_v3f64(<3 x float>* %src, <3 x double>* %dest) #0 {
; S390X-LABEL: constrained_vector_fpext_v3f64:
; S390X: # %bb.0: # %entry
; S390X-NEXT: lg %r0, 0(%r2)
@@ -5576,12 +5576,12 @@ entry:
%b = load <3 x float>, <3 x float>* %src
%result = call <3 x double> @llvm.experimental.constrained.fpext.v3f64.v3f32(
<3 x float> %b,
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
store <3 x double> %result, <3 x double>* %dest
ret void
}
-define <4 x double> @constrained_vector_fpext_v4f32() {
+define <4 x double> @constrained_vector_fpext_v4f32() #0 {
; S390X-LABEL: constrained_vector_fpext_v4f32:
; S390X: # %bb.0: # %entry
; S390X-NEXT: larl %r1, .LCPI102_0
@@ -5611,11 +5611,11 @@ entry:
%result = call <4 x double> @llvm.experimental.constrained.fpext.v4f64.v4f32(
<4 x float><float 42.0, float 43.0,
float 44.0, float 45.0>,
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <4 x double> %result
}
-define <1 x float> @constrained_vector_ceil_v1f32() {
+define <1 x float> @constrained_vector_ceil_v1f32() #0 {
; S390X-LABEL: constrained_vector_ceil_v1f32:
; S390X: # %bb.0: # %entry
; S390X-NEXT: stmg %r14, %r15, 112(%r15)
@@ -5639,11 +5639,11 @@ entry:
%ceil = call <1 x float> @llvm.experimental.constrained.ceil.v1f32(
<1 x float> <float 1.5>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <1 x float> %ceil
}
-define <2 x double> @constrained_vector_ceil_v2f64() {
+define <2 x double> @constrained_vector_ceil_v2f64() #0 {
; S390X-LABEL: constrained_vector_ceil_v2f64:
; S390X: # %bb.0: # %entry
; S390X-NEXT: stmg %r14, %r15, 112(%r15)
@@ -5677,11 +5677,11 @@ entry:
%ceil = call <2 x double> @llvm.experimental.constrained.ceil.v2f64(
<2 x double> <double 1.1, double 1.9>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <2 x double> %ceil
}
-define <3 x float> @constrained_vector_ceil_v3f32() {
+define <3 x float> @constrained_vector_ceil_v3f32() #0 {
; S390X-LABEL: constrained_vector_ceil_v3f32:
; S390X: # %bb.0: # %entry
; S390X-NEXT: stmg %r14, %r15, 112(%r15)
@@ -5732,11 +5732,11 @@ entry:
%ceil = call <3 x float> @llvm.experimental.constrained.ceil.v3f32(
<3 x float> <float 1.5, float 2.5, float 3.5>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <3 x float> %ceil
}
-define void @constrained_vector_ceil_v3f64(<3 x double>* %a) {
+define void @constrained_vector_ceil_v3f64(<3 x double>* %a) #0 {
; S390X-LABEL: constrained_vector_ceil_v3f64:
; S390X: # %bb.0: # %entry
; S390X-NEXT: stmg %r13, %r15, 104(%r15)
@@ -5785,12 +5785,12 @@ entry:
%ceil = call <3 x double> @llvm.experimental.constrained.ceil.v3f64(
<3 x double> %b,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
store <3 x double> %ceil, <3 x double>* %a
ret void
}
-define <1 x float> @constrained_vector_floor_v1f32() {
+define <1 x float> @constrained_vector_floor_v1f32() #0 {
; S390X-LABEL: constrained_vector_floor_v1f32:
; S390X: # %bb.0: # %entry
; S390X-NEXT: stmg %r14, %r15, 112(%r15)
@@ -5814,12 +5814,12 @@ entry:
%floor = call <1 x float> @llvm.experimental.constrained.floor.v1f32(
<1 x float> <float 1.5>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <1 x float> %floor
}
-define <2 x double> @constrained_vector_floor_v2f64() {
+define <2 x double> @constrained_vector_floor_v2f64() #0 {
; S390X-LABEL: constrained_vector_floor_v2f64:
; S390X: # %bb.0: # %entry
; S390X-NEXT: stmg %r14, %r15, 112(%r15)
@@ -5853,11 +5853,11 @@ entry:
%floor = call <2 x double> @llvm.experimental.constrained.floor.v2f64(
<2 x double> <double 1.1, double 1.9>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <2 x double> %floor
}
-define <3 x float> @constrained_vector_floor_v3f32() {
+define <3 x float> @constrained_vector_floor_v3f32() #0 {
; S390X-LABEL: constrained_vector_floor_v3f32:
; S390X: # %bb.0: # %entry
; S390X-NEXT: stmg %r14, %r15, 112(%r15)
@@ -5908,11 +5908,11 @@ entry:
%floor = call <3 x float> @llvm.experimental.constrained.floor.v3f32(
<3 x float> <float 1.5, float 2.5, float 3.5>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <3 x float> %floor
}
-define void @constrained_vector_floor_v3f64(<3 x double>* %a) {
+define void @constrained_vector_floor_v3f64(<3 x double>* %a) #0 {
; S390X-LABEL: constrained_vector_floor_v3f64:
; S390X: # %bb.0: # %entry
; S390X-NEXT: stmg %r13, %r15, 104(%r15)
@@ -5961,12 +5961,12 @@ entry:
%floor = call <3 x double> @llvm.experimental.constrained.floor.v3f64(
<3 x double> %b,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
store <3 x double> %floor, <3 x double>* %a
ret void
}
-define <1 x float> @constrained_vector_round_v1f32() {
+define <1 x float> @constrained_vector_round_v1f32() #0 {
; S390X-LABEL: constrained_vector_round_v1f32:
; S390X: # %bb.0: # %entry
; S390X-NEXT: stmg %r14, %r15, 112(%r15)
@@ -5990,11 +5990,11 @@ entry:
%round = call <1 x float> @llvm.experimental.constrained.round.v1f32(
<1 x float> <float 1.5>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <1 x float> %round
}
-define <2 x double> @constrained_vector_round_v2f64() {
+define <2 x double> @constrained_vector_round_v2f64() #0 {
; S390X-LABEL: constrained_vector_round_v2f64:
; S390X: # %bb.0: # %entry
; S390X-NEXT: stmg %r14, %r15, 112(%r15)
@@ -6028,11 +6028,11 @@ entry:
%round = call <2 x double> @llvm.experimental.constrained.round.v2f64(
<2 x double> <double 1.1, double 1.9>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <2 x double> %round
}
-define <3 x float> @constrained_vector_round_v3f32() {
+define <3 x float> @constrained_vector_round_v3f32() #0 {
; S390X-LABEL: constrained_vector_round_v3f32:
; S390X: # %bb.0: # %entry
; S390X-NEXT: stmg %r14, %r15, 112(%r15)
@@ -6083,12 +6083,12 @@ entry:
%round = call <3 x float> @llvm.experimental.constrained.round.v3f32(
<3 x float> <float 1.5, float 2.5, float 3.5>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <3 x float> %round
}
-define void @constrained_vector_round_v3f64(<3 x double>* %a) {
+define void @constrained_vector_round_v3f64(<3 x double>* %a) #0 {
; S390X-LABEL: constrained_vector_round_v3f64:
; S390X: # %bb.0: # %entry
; S390X-NEXT: stmg %r13, %r15, 104(%r15)
@@ -6137,12 +6137,12 @@ entry:
%round = call <3 x double> @llvm.experimental.constrained.round.v3f64(
<3 x double> %b,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
store <3 x double> %round, <3 x double>* %a
ret void
}
-define <1 x float> @constrained_vector_trunc_v1f32() {
+define <1 x float> @constrained_vector_trunc_v1f32() #0 {
; S390X-LABEL: constrained_vector_trunc_v1f32:
; S390X: # %bb.0: # %entry
; S390X-NEXT: stmg %r14, %r15, 112(%r15)
@@ -6166,11 +6166,11 @@ entry:
%trunc = call <1 x float> @llvm.experimental.constrained.trunc.v1f32(
<1 x float> <float 1.5>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <1 x float> %trunc
}
-define <2 x double> @constrained_vector_trunc_v2f64() {
+define <2 x double> @constrained_vector_trunc_v2f64() #0 {
; S390X-LABEL: constrained_vector_trunc_v2f64:
; S390X: # %bb.0: # %entry
; S390X-NEXT: stmg %r14, %r15, 112(%r15)
@@ -6204,11 +6204,11 @@ entry:
%trunc = call <2 x double> @llvm.experimental.constrained.trunc.v2f64(
<2 x double> <double 1.1, double 1.9>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <2 x double> %trunc
}
-define <3 x float> @constrained_vector_trunc_v3f32() {
+define <3 x float> @constrained_vector_trunc_v3f32() #0 {
; S390X-LABEL: constrained_vector_trunc_v3f32:
; S390X: # %bb.0: # %entry
; S390X-NEXT: stmg %r14, %r15, 112(%r15)
@@ -6259,11 +6259,11 @@ entry:
%trunc = call <3 x float> @llvm.experimental.constrained.trunc.v3f32(
<3 x float> <float 1.5, float 2.5, float 3.5>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <3 x float> %trunc
}
-define void @constrained_vector_trunc_v3f64(<3 x double>* %a) {
+define void @constrained_vector_trunc_v3f64(<3 x double>* %a) #0 {
; S390X-LABEL: constrained_vector_trunc_v3f64:
; S390X: # %bb.0: # %entry
; S390X-NEXT: stmg %r13, %r15, 104(%r15)
@@ -6312,11 +6312,13 @@ entry:
%trunc = call <3 x double> @llvm.experimental.constrained.trunc.v3f64(
<3 x double> %b,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
store <3 x double> %trunc, <3 x double>* %a
ret void
}
+attributes #0 = { strictfp }
+
declare <2 x double> @llvm.experimental.constrained.fadd.v2f64(<2 x double>, <2 x double>, metadata, metadata)
declare <2 x double> @llvm.experimental.constrained.fsub.v2f64(<2 x double>, <2 x double>, metadata, metadata)
declare <2 x double> @llvm.experimental.constrained.fmul.v2f64(<2 x double>, <2 x double>, metadata, metadata)
diff --git a/llvm/test/CodeGen/X86/constrained-fp80-trunc-ext.ll b/llvm/test/CodeGen/X86/constrained-fp80-trunc-ext.ll
index ae07f84343d..9c408c70cfb 100644
--- a/llvm/test/CodeGen/X86/constrained-fp80-trunc-ext.ll
+++ b/llvm/test/CodeGen/X86/constrained-fp80-trunc-ext.ll
@@ -1,7 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -O3 -mtriple=x86_64-gnu-linux < %s | FileCheck %s
-define x86_fp80 @constrained_fpext_f32_as_fp80(float %mem) {
+define x86_fp80 @constrained_fpext_f32_as_fp80(float %mem) #0 {
; CHECK-LABEL: constrained_fpext_f32_as_fp80:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movss %xmm0, -{{[0-9]+}}(%rsp)
@@ -10,11 +10,11 @@ define x86_fp80 @constrained_fpext_f32_as_fp80(float %mem) {
entry:
%ext = call x86_fp80 @llvm.experimental.constrained.fpext.f80.f32(
float %mem,
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret x86_fp80 %ext
}
-define float @constrained_fptrunc_f80_to_f32(x86_fp80 %reg) {
+define float @constrained_fptrunc_f80_to_f32(x86_fp80 %reg) #0 {
; CHECK-LABEL: constrained_fptrunc_f80_to_f32:
; CHECK: # %bb.0:
; CHECK-NEXT: fldt {{[0-9]+}}(%rsp)
@@ -24,11 +24,11 @@ define float @constrained_fptrunc_f80_to_f32(x86_fp80 %reg) {
%trunc = call float @llvm.experimental.constrained.fptrunc.f32.f80(
x86_fp80 %reg,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret float %trunc
}
-define x86_fp80 @constrained_fpext_f64_to_f80(double %mem) {
+define x86_fp80 @constrained_fpext_f64_to_f80(double %mem) #0 {
; CHECK-LABEL: constrained_fpext_f64_to_f80:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movsd %xmm0, -{{[0-9]+}}(%rsp)
@@ -37,11 +37,11 @@ define x86_fp80 @constrained_fpext_f64_to_f80(double %mem) {
entry:
%ext = call x86_fp80 @llvm.experimental.constrained.fpext.f80.f64(
double %mem,
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret x86_fp80 %ext
}
-define double @constrained_fptrunc_f80_to_f64(x86_fp80 %reg) {
+define double @constrained_fptrunc_f80_to_f64(x86_fp80 %reg) #0 {
; CHECK-LABEL: constrained_fptrunc_f80_to_f64:
; CHECK: # %bb.0:
; CHECK-NEXT: fldt {{[0-9]+}}(%rsp)
@@ -51,10 +51,12 @@ define double @constrained_fptrunc_f80_to_f64(x86_fp80 %reg) {
%trunc = call double @llvm.experimental.constrained.fptrunc.f64.f80(
x86_fp80 %reg,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %trunc
}
+attributes #0 = { strictfp }
+
declare x86_fp80 @llvm.experimental.constrained.fpext.f80.f32(float, metadata)
declare x86_fp80 @llvm.experimental.constrained.fpext.f80.f64(double, metadata)
declare float @llvm.experimental.constrained.fptrunc.f32.f80(x86_fp80, metadata, metadata)
diff --git a/llvm/test/CodeGen/X86/fp-intrinsics.ll b/llvm/test/CodeGen/X86/fp-intrinsics.ll
index cbb1c386a62..a41bd9249c1 100644
--- a/llvm/test/CodeGen/X86/fp-intrinsics.ll
+++ b/llvm/test/CodeGen/X86/fp-intrinsics.ll
@@ -11,13 +11,13 @@
;
; CHECK-LABEL: f1
; COMMON: divsd
-define double @f1() {
+define double @f1() #0 {
entry:
%div = call double @llvm.experimental.constrained.fdiv.f64(
double 1.000000e+00,
double 1.000000e+01,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %div
}
@@ -31,13 +31,13 @@ entry:
;
; CHECK-LABEL: f2
; COMMON: subsd
-define double @f2(double %a) {
+define double @f2(double %a) #0 {
entry:
%sub = call double @llvm.experimental.constrained.fsub.f64(
double %a,
double 0.000000e+00,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %sub
}
@@ -54,21 +54,21 @@ entry:
; COMMON: subsd
; COMMON: mulsd
; COMMON: subsd
-define double @f3(double %a, double %b) {
+define double @f3(double %a, double %b) #0 {
entry:
%sub = call double @llvm.experimental.constrained.fsub.f64(
double -0.000000e+00, double %a,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%mul = call double @llvm.experimental.constrained.fmul.f64(
double %sub, double %b,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%ret = call double @llvm.experimental.constrained.fsub.f64(
double -0.000000e+00,
double %mul,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %ret
}
@@ -87,7 +87,7 @@ entry:
; COMMON: testl
; COMMON: jle
; COMMON: addsd
-define double @f4(i32 %n, double %a) {
+define double @f4(i32 %n, double %a) #0 {
entry:
%cmp = icmp sgt i32 %n, 0
br i1 %cmp, label %if.then, label %if.end
@@ -96,7 +96,7 @@ if.then:
%add = call double @llvm.experimental.constrained.fadd.f64(
double 1.000000e+00, double %a,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
br label %if.end
if.end:
@@ -107,112 +107,112 @@ if.end:
; Verify that sqrt(42.0) isn't simplified when the rounding mode is unknown.
; CHECK-LABEL: f5
; COMMON: sqrtsd
-define double @f5() {
+define double @f5() #0 {
entry:
%result = call double @llvm.experimental.constrained.sqrt.f64(double 42.0,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %result
}
; Verify that pow(42.1, 3.0) isn't simplified when the rounding mode is unknown.
; CHECK-LABEL: f6
; COMMON: pow
-define double @f6() {
+define double @f6() #0 {
entry:
%result = call double @llvm.experimental.constrained.pow.f64(double 42.1,
double 3.0,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %result
}
; Verify that powi(42.1, 3) isn't simplified when the rounding mode is unknown.
; CHECK-LABEL: f7
; COMMON: powi
-define double @f7() {
+define double @f7() #0 {
entry:
%result = call double @llvm.experimental.constrained.powi.f64(double 42.1,
i32 3,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %result
}
; Verify that sin(42.0) isn't simplified when the rounding mode is unknown.
; CHECK-LABEL: f8
; COMMON: sin
-define double @f8() {
+define double @f8() #0 {
entry:
%result = call double @llvm.experimental.constrained.sin.f64(double 42.0,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %result
}
; Verify that cos(42.0) isn't simplified when the rounding mode is unknown.
; CHECK-LABEL: f9
; COMMON: cos
-define double @f9() {
+define double @f9() #0 {
entry:
%result = call double @llvm.experimental.constrained.cos.f64(double 42.0,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %result
}
; Verify that exp(42.0) isn't simplified when the rounding mode is unknown.
; CHECK-LABEL: f10
; COMMON: exp
-define double @f10() {
+define double @f10() #0 {
entry:
%result = call double @llvm.experimental.constrained.exp.f64(double 42.0,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %result
}
; Verify that exp2(42.1) isn't simplified when the rounding mode is unknown.
; CHECK-LABEL: f11
; COMMON: exp2
-define double @f11() {
+define double @f11() #0 {
entry:
%result = call double @llvm.experimental.constrained.exp2.f64(double 42.1,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %result
}
; Verify that log(42.0) isn't simplified when the rounding mode is unknown.
; CHECK-LABEL: f12
; COMMON: log
-define double @f12() {
+define double @f12() #0 {
entry:
%result = call double @llvm.experimental.constrained.log.f64(double 42.0,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %result
}
; Verify that log10(42.0) isn't simplified when the rounding mode is unknown.
; CHECK-LABEL: f13
; COMMON: log10
-define double @f13() {
+define double @f13() #0 {
entry:
%result = call double @llvm.experimental.constrained.log10.f64(double 42.0,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %result
}
; Verify that log2(42.0) isn't simplified when the rounding mode is unknown.
; CHECK-LABEL: f14
; COMMON: log2
-define double @f14() {
+define double @f14() #0 {
entry:
%result = call double @llvm.experimental.constrained.log2.f64(double 42.0,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %result
}
@@ -220,11 +220,11 @@ entry:
; CHECK-LABEL: f15
; NO-FMA: rint
; HAS-FMA: vroundsd
-define double @f15() {
+define double @f15() #0 {
entry:
%result = call double @llvm.experimental.constrained.rint.f64(double 42.1,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %result
}
@@ -233,12 +233,12 @@ entry:
; CHECK-LABEL: f16
; NO-FMA: nearbyint
; HAS-FMA: vroundsd
-define double @f16() {
+define double @f16() #0 {
entry:
%result = call double @llvm.experimental.constrained.nearbyint.f64(
double 42.1,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %result
}
@@ -247,14 +247,14 @@ entry:
; CHECK-LABEL: f17
; FMACALL32: jmp fmaf # TAILCALL
; FMA32: vfmadd213ss
-define float @f17() {
+define float @f17() #0 {
entry:
%result = call float @llvm.experimental.constrained.fma.f32(
float 3.5,
float 3.5,
float 3.5,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret float %result
}
@@ -263,26 +263,26 @@ entry:
; CHECK-LABEL: f18
; FMACALL64: jmp fma # TAILCALL
; FMA64: vfmadd213sd
-define double @f18() {
+define double @f18() #0 {
entry:
%result = call double @llvm.experimental.constrained.fma.f64(
double 42.1,
double 42.1,
double 42.1,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %result
}
; CHECK-LABEL: f19
; COMMON: fmod
-define double @f19() {
+define double @f19() #0 {
entry:
%rem = call double @llvm.experimental.constrained.frem.f64(
double 1.000000e+00,
double 1.000000e+01,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %rem
}
@@ -312,10 +312,10 @@ entry:
; HAS-FMA: setae
; HAS-FMA: shll
; HAS-FMA: xorl
-define i32 @f20u(double %x) {
+define i32 @f20u(double %x) #0 {
entry:
%result = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double %x,
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret i32 %result
}
@@ -324,24 +324,26 @@ entry:
; Verify that no gross errors happen.
; CHECK-LABEL: @f21
; COMMON: cvtsd2ss
-define float @f21() {
+define float @f21() #0 {
entry:
%result = call float @llvm.experimental.constrained.fptrunc.f32.f64(
double 42.1,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret float %result
}
; CHECK-LABEL: @f22
; COMMON: cvtss2sd
-define double @f22(float %x) {
+define double @f22(float %x) #0 {
entry:
%result = call double @llvm.experimental.constrained.fpext.f64.f32(float %x,
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %result
}
+attributes #0 = { strictfp }
+
@llvm.fp.env = thread_local global i8 zeroinitializer, section "llvm.metadata"
declare double @llvm.experimental.constrained.fadd.f64(double, double, metadata, metadata)
declare double @llvm.experimental.constrained.fsub.f64(double, double, metadata, metadata)
diff --git a/llvm/test/CodeGen/X86/vector-constrained-fp-intrinsics-fma.ll b/llvm/test/CodeGen/X86/vector-constrained-fp-intrinsics-fma.ll
index e35e76d2f38..5f39f33dd6a 100644
--- a/llvm/test/CodeGen/X86/vector-constrained-fp-intrinsics-fma.ll
+++ b/llvm/test/CodeGen/X86/vector-constrained-fp-intrinsics-fma.ll
@@ -1,7 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -O3 -mtriple=x86_64-pc-linux -mattr=+fma < %s | FileCheck %s
-define <1 x float> @constrained_vector_fma_v1f32() {
+define <1 x float> @constrained_vector_fma_v1f32() #0 {
; CHECK-LABEL: constrained_vector_fma_v1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
@@ -14,11 +14,11 @@ entry:
<1 x float> <float 2.5>,
<1 x float> <float 4.5>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <1 x float> %fma
}
-define <2 x double> @constrained_vector_fma_v2f64() {
+define <2 x double> @constrained_vector_fma_v2f64() #0 {
; CHECK-LABEL: constrained_vector_fma_v2f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmovapd {{.*#+}} xmm1 = [1.5E+0,5.0E-1]
@@ -31,11 +31,11 @@ entry:
<2 x double> <double 3.5, double 2.5>,
<2 x double> <double 5.5, double 4.5>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <2 x double> %fma
}
-define <3 x float> @constrained_vector_fma_v3f32() {
+define <3 x float> @constrained_vector_fma_v3f32() #0 {
; CHECK-LABEL: constrained_vector_fma_v3f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
@@ -56,11 +56,11 @@ entry:
<3 x float> <float 5.5, float 4.5, float 3.5>,
<3 x float> <float 8.5, float 7.5, float 6.5>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <3 x float> %fma
}
-define <3 x double> @constrained_vector_fma_v3f64() {
+define <3 x double> @constrained_vector_fma_v3f64() #0 {
; CHECK-LABEL: constrained_vector_fma_v3f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
@@ -77,11 +77,11 @@ entry:
<3 x double> <double 5.5, double 4.5, double 3.5>,
<3 x double> <double 8.5, double 7.5, double 6.5>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <3 x double> %fma
}
-define <4 x double> @constrained_vector_fma_v4f64() {
+define <4 x double> @constrained_vector_fma_v4f64() #0 {
; CHECK-LABEL: constrained_vector_fma_v4f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmovapd {{.*#+}} ymm1 = [3.5E+0,2.5E+0,1.5E+0,5.0E-1]
@@ -94,11 +94,11 @@ entry:
<4 x double> <double 7.5, double 6.5, double 5.5, double 4.5>,
<4 x double> <double 11.5, double 10.5, double 9.5, double 8.5>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <4 x double> %fma
}
-define <4 x float> @constrained_vector_fma_v4f32() {
+define <4 x float> @constrained_vector_fma_v4f32() #0 {
; CHECK-LABEL: constrained_vector_fma_v4f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmovaps {{.*#+}} xmm1 = [3.5E+0,2.5E+0,1.5E+0,5.0E-1]
@@ -111,11 +111,11 @@ entry:
<4 x float> <float 7.5, float 6.5, float 5.5, float 4.5>,
<4 x float> <float 11.5, float 10.5, float 9.5, float 8.5>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <4 x float> %fma
}
-define <8 x float> @constrained_vector_fma_v8f32() {
+define <8 x float> @constrained_vector_fma_v8f32() #0 {
; CHECK-LABEL: constrained_vector_fma_v8f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmovaps {{.*#+}} ymm1 = [3.5E+0,2.5E+0,1.5E+0,5.0E-1,7.5E+0,6.5E+0,5.5E+0,4.5E+0]
@@ -131,10 +131,12 @@ entry:
<8 x float> <float 11.5, float 10.5, float 9.5, float 8.5,
float 15.5, float 14.5, float 13.5, float 12.5>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <8 x float> %fma
}
+attributes #0 = { strictfp }
+
; Single width declarations
declare <2 x double> @llvm.experimental.constrained.fma.v2f64(<2 x double>, <2 x double>, <2 x double>, metadata, metadata)
declare <4 x float> @llvm.experimental.constrained.fma.v4f32(<4 x float>, <4 x float>, <4 x float>, metadata, metadata)
diff --git a/llvm/test/CodeGen/X86/vector-constrained-fp-intrinsics.ll b/llvm/test/CodeGen/X86/vector-constrained-fp-intrinsics.ll
index 19b2b4864ea..a742d4aa2e7 100644
--- a/llvm/test/CodeGen/X86/vector-constrained-fp-intrinsics.ll
+++ b/llvm/test/CodeGen/X86/vector-constrained-fp-intrinsics.ll
@@ -2,7 +2,7 @@
; RUN: llc -O3 -mtriple=x86_64-pc-linux < %s | FileCheck %s
; RUN: llc -O3 -mtriple=x86_64-pc-linux -mattr=+avx < %s | FileCheck --check-prefix=AVX %s
-define <1 x float> @constrained_vector_fdiv_v1f32() {
+define <1 x float> @constrained_vector_fdiv_v1f32() #0 {
; CHECK-LABEL: constrained_vector_fdiv_v1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
@@ -19,11 +19,11 @@ entry:
<1 x float> <float 1.000000e+00>,
<1 x float> <float 1.000000e+01>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <1 x float> %div
}
-define <2 x double> @constrained_vector_fdiv_v2f64() {
+define <2 x double> @constrained_vector_fdiv_v2f64() #0 {
; CHECK-LABEL: constrained_vector_fdiv_v2f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movapd {{.*#+}} xmm0 = [1.0E+0,2.0E+0]
@@ -40,11 +40,11 @@ entry:
<2 x double> <double 1.000000e+00, double 2.000000e+00>,
<2 x double> <double 1.000000e+01, double 1.000000e+01>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <2 x double> %div
}
-define <3 x float> @constrained_vector_fdiv_v3f32() {
+define <3 x float> @constrained_vector_fdiv_v3f32() #0 {
; CHECK-LABEL: constrained_vector_fdiv_v3f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
@@ -75,11 +75,11 @@ entry:
<3 x float> <float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>,
<3 x float> <float 1.000000e+01, float 1.000000e+01, float 1.000000e+01>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <3 x float> %div
}
-define <3 x double> @constrained_vector_fdiv_v3f64() {
+define <3 x double> @constrained_vector_fdiv_v3f64() #0 {
; CHECK-LABEL: constrained_vector_fdiv_v3f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movapd {{.*#+}} xmm0 = [1.0E+0,2.0E+0]
@@ -105,11 +105,11 @@ entry:
<3 x double> <double 1.000000e+00, double 2.000000e+00, double 3.000000e+00>,
<3 x double> <double 1.000000e+01, double 1.000000e+01, double 1.000000e+01>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <3 x double> %div
}
-define <4 x double> @constrained_vector_fdiv_v4f64() {
+define <4 x double> @constrained_vector_fdiv_v4f64() #0 {
; CHECK-LABEL: constrained_vector_fdiv_v4f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movapd {{.*#+}} xmm2 = [1.0E+1,1.0E+1]
@@ -131,11 +131,11 @@ entry:
<4 x double> <double 1.000000e+01, double 1.000000e+01,
double 1.000000e+01, double 1.000000e+01>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <4 x double> %div
}
-define <1 x float> @constrained_vector_frem_v1f32() {
+define <1 x float> @constrained_vector_frem_v1f32() #0 {
; CHECK-LABEL: constrained_vector_frem_v1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pushq %rax
@@ -162,11 +162,11 @@ entry:
<1 x float> <float 1.000000e+00>,
<1 x float> <float 1.000000e+01>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <1 x float> %rem
}
-define <2 x double> @constrained_vector_frem_v2f64() {
+define <2 x double> @constrained_vector_frem_v2f64() #0 {
; CHECK-LABEL: constrained_vector_frem_v2f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
@@ -205,11 +205,11 @@ entry:
<2 x double> <double 1.000000e+00, double 2.000000e+00>,
<2 x double> <double 1.000000e+01, double 1.000000e+01>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <2 x double> %rem
}
-define <3 x float> @constrained_vector_frem_v3f32() {
+define <3 x float> @constrained_vector_frem_v3f32() #0 {
; CHECK-LABEL: constrained_vector_frem_v3f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $40, %rsp
@@ -261,11 +261,11 @@ entry:
<3 x float> <float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>,
<3 x float> <float 1.000000e+01, float 1.000000e+01, float 1.000000e+01>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <3 x float> %rem
}
-define <3 x double> @constrained_vector_frem_v3f64() {
+define <3 x double> @constrained_vector_frem_v3f64() #0 {
; CHECK-LABEL: constrained_vector_frem_v3f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
@@ -319,11 +319,11 @@ entry:
<3 x double> <double 1.000000e+00, double 2.000000e+00, double 3.000000e+00>,
<3 x double> <double 1.000000e+01, double 1.000000e+01, double 1.000000e+01>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <3 x double> %rem
}
-define <4 x double> @constrained_vector_frem_v4f64() {
+define <4 x double> @constrained_vector_frem_v4f64() #0 {
; CHECK-LABEL: constrained_vector_frem_v4f64:
; CHECK: # %bb.0:
; CHECK-NEXT: subq $40, %rsp
@@ -386,11 +386,11 @@ define <4 x double> @constrained_vector_frem_v4f64() {
<4 x double> <double 1.000000e+01, double 1.000000e+01,
double 1.000000e+01, double 1.000000e+01>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <4 x double> %rem
}
-define <1 x float> @constrained_vector_fmul_v1f32() {
+define <1 x float> @constrained_vector_fmul_v1f32() #0 {
; CHECK-LABEL: constrained_vector_fmul_v1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
@@ -407,11 +407,11 @@ entry:
<1 x float> <float 0x7FF0000000000000>,
<1 x float> <float 2.000000e+00>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <1 x float> %mul
}
-define <2 x double> @constrained_vector_fmul_v2f64() {
+define <2 x double> @constrained_vector_fmul_v2f64() #0 {
; CHECK-LABEL: constrained_vector_fmul_v2f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movapd {{.*#+}} xmm0 = [1.7976931348623157E+308,1.7976931348623157E+308]
@@ -428,11 +428,11 @@ entry:
<2 x double> <double 0x7FEFFFFFFFFFFFFF, double 0x7FEFFFFFFFFFFFFF>,
<2 x double> <double 2.000000e+00, double 3.000000e+00>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <2 x double> %mul
}
-define <3 x float> @constrained_vector_fmul_v3f32() {
+define <3 x float> @constrained_vector_fmul_v3f32() #0 {
; CHECK-LABEL: constrained_vector_fmul_v3f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
@@ -460,11 +460,11 @@ entry:
float 0x7FF0000000000000>,
<3 x float> <float 1.000000e+00, float 1.000000e+01, float 1.000000e+02>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <3 x float> %mul
}
-define <3 x double> @constrained_vector_fmul_v3f64() {
+define <3 x double> @constrained_vector_fmul_v3f64() #0 {
; CHECK-LABEL: constrained_vector_fmul_v3f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movapd {{.*#+}} xmm0 = [1.7976931348623157E+308,1.7976931348623157E+308]
@@ -491,11 +491,11 @@ entry:
double 0x7FEFFFFFFFFFFFFF>,
<3 x double> <double 1.000000e+00, double 1.000000e+01, double 1.000000e+02>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <3 x double> %mul
}
-define <4 x double> @constrained_vector_fmul_v4f64() {
+define <4 x double> @constrained_vector_fmul_v4f64() #0 {
; CHECK-LABEL: constrained_vector_fmul_v4f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movapd {{.*#+}} xmm1 = [1.7976931348623157E+308,1.7976931348623157E+308]
@@ -516,11 +516,11 @@ entry:
<4 x double> <double 2.000000e+00, double 3.000000e+00,
double 4.000000e+00, double 5.000000e+00>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <4 x double> %mul
}
-define <1 x float> @constrained_vector_fadd_v1f32() {
+define <1 x float> @constrained_vector_fadd_v1f32() #0 {
; CHECK-LABEL: constrained_vector_fadd_v1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
@@ -537,11 +537,11 @@ entry:
<1 x float> <float 0x7FF0000000000000>,
<1 x float> <float 1.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <1 x float> %add
}
-define <2 x double> @constrained_vector_fadd_v2f64() {
+define <2 x double> @constrained_vector_fadd_v2f64() #0 {
; CHECK-LABEL: constrained_vector_fadd_v2f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
@@ -563,11 +563,11 @@ entry:
<2 x double> <double 0x7FEFFFFFFFFFFFFF, double 0x7FEFFFFFFFFFFFFF>,
<2 x double> <double 1.000000e+00, double 1.000000e-01>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <2 x double> %add
}
-define <3 x float> @constrained_vector_fadd_v3f32() {
+define <3 x float> @constrained_vector_fadd_v3f32() #0 {
; CHECK-LABEL: constrained_vector_fadd_v3f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xorps %xmm1, %xmm1
@@ -596,11 +596,11 @@ entry:
float 0xFFFFFFFFE0000000>,
<3 x float> <float 2.0, float 1.0, float 0.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <3 x float> %add
}
-define <3 x double> @constrained_vector_fadd_v3f64() {
+define <3 x double> @constrained_vector_fadd_v3f64() #0 {
; CHECK-LABEL: constrained_vector_fadd_v3f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xorpd %xmm2, %xmm2
@@ -629,11 +629,11 @@ entry:
double 0x7FEFFFFFFFFFFFFF>,
<3 x double> <double 2.0, double 1.0, double 0.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <3 x double> %add
}
-define <4 x double> @constrained_vector_fadd_v4f64() {
+define <4 x double> @constrained_vector_fadd_v4f64() #0 {
; CHECK-LABEL: constrained_vector_fadd_v4f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
@@ -666,11 +666,11 @@ entry:
<4 x double> <double 1.000000e+00, double 1.000000e-01,
double 2.000000e+00, double 2.000000e-01>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <4 x double> %add
}
-define <1 x float> @constrained_vector_fsub_v1f32() {
+define <1 x float> @constrained_vector_fsub_v1f32() #0 {
; CHECK-LABEL: constrained_vector_fsub_v1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
@@ -687,11 +687,11 @@ entry:
<1 x float> <float 0x7FF0000000000000>,
<1 x float> <float 1.000000e+00>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <1 x float> %sub
}
-define <2 x double> @constrained_vector_fsub_v2f64() {
+define <2 x double> @constrained_vector_fsub_v2f64() #0 {
; CHECK-LABEL: constrained_vector_fsub_v2f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
@@ -713,11 +713,11 @@ entry:
<2 x double> <double 0xFFEFFFFFFFFFFFFF, double 0xFFEFFFFFFFFFFFFF>,
<2 x double> <double 1.000000e+00, double 1.000000e-01>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <2 x double> %sub
}
-define <3 x float> @constrained_vector_fsub_v3f32() {
+define <3 x float> @constrained_vector_fsub_v3f32() #0 {
; CHECK-LABEL: constrained_vector_fsub_v3f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xorps %xmm0, %xmm0
@@ -747,11 +747,11 @@ entry:
float 0xFFFFFFFFE0000000>,
<3 x float> <float 2.0, float 1.0, float 0.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <3 x float> %sub
}
-define <3 x double> @constrained_vector_fsub_v3f64() {
+define <3 x double> @constrained_vector_fsub_v3f64() #0 {
; CHECK-LABEL: constrained_vector_fsub_v3f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xorpd %xmm0, %xmm0
@@ -781,11 +781,11 @@ entry:
double 0xFFEFFFFFFFFFFFFF>,
<3 x double> <double 2.0, double 1.0, double 0.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <3 x double> %sub
}
-define <4 x double> @constrained_vector_fsub_v4f64() {
+define <4 x double> @constrained_vector_fsub_v4f64() #0 {
; CHECK-LABEL: constrained_vector_fsub_v4f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
@@ -818,11 +818,11 @@ entry:
<4 x double> <double 1.000000e+00, double 1.000000e-01,
double 2.000000e+00, double 2.000000e-01>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <4 x double> %sub
}
-define <1 x float> @constrained_vector_sqrt_v1f32() {
+define <1 x float> @constrained_vector_sqrt_v1f32() #0 {
; CHECK-LABEL: constrained_vector_sqrt_v1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
@@ -838,11 +838,11 @@ entry:
%sqrt = call <1 x float> @llvm.experimental.constrained.sqrt.v1f32(
<1 x float> <float 42.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <1 x float> %sqrt
}
-define <2 x double> @constrained_vector_sqrt_v2f64() {
+define <2 x double> @constrained_vector_sqrt_v2f64() #0 {
; CHECK-LABEL: constrained_vector_sqrt_v2f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sqrtpd {{.*}}(%rip), %xmm0
@@ -856,11 +856,11 @@ entry:
%sqrt = call <2 x double> @llvm.experimental.constrained.sqrt.v2f64(
<2 x double> <double 42.0, double 42.1>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <2 x double> %sqrt
}
-define <3 x float> @constrained_vector_sqrt_v3f32() {
+define <3 x float> @constrained_vector_sqrt_v3f32() #0 {
; CHECK-LABEL: constrained_vector_sqrt_v3f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
@@ -888,11 +888,11 @@ entry:
%sqrt = call <3 x float> @llvm.experimental.constrained.sqrt.v3f32(
<3 x float> <float 42.0, float 43.0, float 44.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <3 x float> %sqrt
}
-define <3 x double> @constrained_vector_sqrt_v3f64() {
+define <3 x double> @constrained_vector_sqrt_v3f64() #0 {
; CHECK-LABEL: constrained_vector_sqrt_v3f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
@@ -915,11 +915,11 @@ entry:
%sqrt = call <3 x double> @llvm.experimental.constrained.sqrt.v3f64(
<3 x double> <double 42.0, double 42.1, double 42.2>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <3 x double> %sqrt
}
-define <4 x double> @constrained_vector_sqrt_v4f64() {
+define <4 x double> @constrained_vector_sqrt_v4f64() #0 {
; CHECK-LABEL: constrained_vector_sqrt_v4f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sqrtpd {{.*}}(%rip), %xmm0
@@ -935,11 +935,11 @@ define <4 x double> @constrained_vector_sqrt_v4f64() {
<4 x double> <double 42.0, double 42.1,
double 42.2, double 42.3>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <4 x double> %sqrt
}
-define <1 x float> @constrained_vector_pow_v1f32() {
+define <1 x float> @constrained_vector_pow_v1f32() #0 {
; CHECK-LABEL: constrained_vector_pow_v1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pushq %rax
@@ -966,11 +966,11 @@ entry:
<1 x float> <float 42.0>,
<1 x float> <float 3.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <1 x float> %pow
}
-define <2 x double> @constrained_vector_pow_v2f64() {
+define <2 x double> @constrained_vector_pow_v2f64() #0 {
; CHECK-LABEL: constrained_vector_pow_v2f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
@@ -1009,11 +1009,11 @@ entry:
<2 x double> <double 42.1, double 42.2>,
<2 x double> <double 3.0, double 3.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <2 x double> %pow
}
-define <3 x float> @constrained_vector_pow_v3f32() {
+define <3 x float> @constrained_vector_pow_v3f32() #0 {
; CHECK-LABEL: constrained_vector_pow_v3f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $40, %rsp
@@ -1065,11 +1065,11 @@ entry:
<3 x float> <float 42.0, float 43.0, float 44.0>,
<3 x float> <float 3.0, float 3.0, float 3.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <3 x float> %pow
}
-define <3 x double> @constrained_vector_pow_v3f64() {
+define <3 x double> @constrained_vector_pow_v3f64() #0 {
; CHECK-LABEL: constrained_vector_pow_v3f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
@@ -1123,11 +1123,11 @@ entry:
<3 x double> <double 42.0, double 42.1, double 42.2>,
<3 x double> <double 3.0, double 3.0, double 3.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <3 x double> %pow
}
-define <4 x double> @constrained_vector_pow_v4f64() {
+define <4 x double> @constrained_vector_pow_v4f64() #0 {
; CHECK-LABEL: constrained_vector_pow_v4f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $40, %rsp
@@ -1191,11 +1191,11 @@ entry:
<4 x double> <double 3.0, double 3.0,
double 3.0, double 3.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <4 x double> %pow
}
-define <1 x float> @constrained_vector_powi_v1f32() {
+define <1 x float> @constrained_vector_powi_v1f32() #0 {
; CHECK-LABEL: constrained_vector_powi_v1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pushq %rax
@@ -1222,11 +1222,11 @@ entry:
<1 x float> <float 42.0>,
i32 3,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <1 x float> %powi
}
-define <2 x double> @constrained_vector_powi_v2f64() {
+define <2 x double> @constrained_vector_powi_v2f64() #0 {
; CHECK-LABEL: constrained_vector_powi_v2f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
@@ -1265,11 +1265,11 @@ entry:
<2 x double> <double 42.1, double 42.2>,
i32 3,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <2 x double> %powi
}
-define <3 x float> @constrained_vector_powi_v3f32() {
+define <3 x float> @constrained_vector_powi_v3f32() #0 {
; CHECK-LABEL: constrained_vector_powi_v3f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $40, %rsp
@@ -1321,11 +1321,11 @@ entry:
<3 x float> <float 42.0, float 43.0, float 44.0>,
i32 3,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <3 x float> %powi
}
-define <3 x double> @constrained_vector_powi_v3f64() {
+define <3 x double> @constrained_vector_powi_v3f64() #0 {
; CHECK-LABEL: constrained_vector_powi_v3f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
@@ -1379,11 +1379,11 @@ entry:
<3 x double> <double 42.0, double 42.1, double 42.2>,
i32 3,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <3 x double> %powi
}
-define <4 x double> @constrained_vector_powi_v4f64() {
+define <4 x double> @constrained_vector_powi_v4f64() #0 {
; CHECK-LABEL: constrained_vector_powi_v4f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $40, %rsp
@@ -1446,11 +1446,11 @@ entry:
double 42.3, double 42.4>,
i32 3,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <4 x double> %powi
}
-define <1 x float> @constrained_vector_sin_v1f32() {
+define <1 x float> @constrained_vector_sin_v1f32() #0 {
; CHECK-LABEL: constrained_vector_sin_v1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pushq %rax
@@ -1474,11 +1474,11 @@ entry:
%sin = call <1 x float> @llvm.experimental.constrained.sin.v1f32(
<1 x float> <float 42.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <1 x float> %sin
}
-define <2 x double> @constrained_vector_sin_v2f64() {
+define <2 x double> @constrained_vector_sin_v2f64() #0 {
; CHECK-LABEL: constrained_vector_sin_v2f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
@@ -1512,11 +1512,11 @@ entry:
%sin = call <2 x double> @llvm.experimental.constrained.sin.v2f64(
<2 x double> <double 42.0, double 42.1>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <2 x double> %sin
}
-define <3 x float> @constrained_vector_sin_v3f32() {
+define <3 x float> @constrained_vector_sin_v3f32() #0 {
; CHECK-LABEL: constrained_vector_sin_v3f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $40, %rsp
@@ -1561,11 +1561,11 @@ entry:
%sin = call <3 x float> @llvm.experimental.constrained.sin.v3f32(
<3 x float> <float 42.0, float 43.0, float 44.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <3 x float> %sin
}
-define <3 x double> @constrained_vector_sin_v3f64() {
+define <3 x double> @constrained_vector_sin_v3f64() #0 {
; CHECK-LABEL: constrained_vector_sin_v3f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
@@ -1612,11 +1612,11 @@ entry:
%sin = call <3 x double> @llvm.experimental.constrained.sin.v3f64(
<3 x double> <double 42.0, double 42.1, double 42.2>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <3 x double> %sin
}
-define <4 x double> @constrained_vector_sin_v4f64() {
+define <4 x double> @constrained_vector_sin_v4f64() #0 {
; CHECK-LABEL: constrained_vector_sin_v4f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $40, %rsp
@@ -1670,11 +1670,11 @@ entry:
<4 x double> <double 42.0, double 42.1,
double 42.2, double 42.3>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <4 x double> %sin
}
-define <1 x float> @constrained_vector_cos_v1f32() {
+define <1 x float> @constrained_vector_cos_v1f32() #0 {
; CHECK-LABEL: constrained_vector_cos_v1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pushq %rax
@@ -1698,11 +1698,11 @@ entry:
%cos = call <1 x float> @llvm.experimental.constrained.cos.v1f32(
<1 x float> <float 42.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <1 x float> %cos
}
-define <2 x double> @constrained_vector_cos_v2f64() {
+define <2 x double> @constrained_vector_cos_v2f64() #0 {
; CHECK-LABEL: constrained_vector_cos_v2f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
@@ -1736,11 +1736,11 @@ entry:
%cos = call <2 x double> @llvm.experimental.constrained.cos.v2f64(
<2 x double> <double 42.0, double 42.1>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <2 x double> %cos
}
-define <3 x float> @constrained_vector_cos_v3f32() {
+define <3 x float> @constrained_vector_cos_v3f32() #0 {
; CHECK-LABEL: constrained_vector_cos_v3f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $40, %rsp
@@ -1785,11 +1785,11 @@ entry:
%cos = call <3 x float> @llvm.experimental.constrained.cos.v3f32(
<3 x float> <float 42.0, float 43.0, float 44.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <3 x float> %cos
}
-define <3 x double> @constrained_vector_cos_v3f64() {
+define <3 x double> @constrained_vector_cos_v3f64() #0 {
; CHECK-LABEL: constrained_vector_cos_v3f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
@@ -1836,11 +1836,11 @@ entry:
%cos = call <3 x double> @llvm.experimental.constrained.cos.v3f64(
<3 x double> <double 42.0, double 42.1, double 42.2>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <3 x double> %cos
}
-define <4 x double> @constrained_vector_cos_v4f64() {
+define <4 x double> @constrained_vector_cos_v4f64() #0 {
; CHECK-LABEL: constrained_vector_cos_v4f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $40, %rsp
@@ -1894,11 +1894,11 @@ entry:
<4 x double> <double 42.0, double 42.1,
double 42.2, double 42.3>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <4 x double> %cos
}
-define <1 x float> @constrained_vector_exp_v1f32() {
+define <1 x float> @constrained_vector_exp_v1f32() #0 {
; CHECK-LABEL: constrained_vector_exp_v1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pushq %rax
@@ -1922,11 +1922,11 @@ entry:
%exp = call <1 x float> @llvm.experimental.constrained.exp.v1f32(
<1 x float> <float 42.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <1 x float> %exp
}
-define <2 x double> @constrained_vector_exp_v2f64() {
+define <2 x double> @constrained_vector_exp_v2f64() #0 {
; CHECK-LABEL: constrained_vector_exp_v2f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
@@ -1960,11 +1960,11 @@ entry:
%exp = call <2 x double> @llvm.experimental.constrained.exp.v2f64(
<2 x double> <double 42.0, double 42.1>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <2 x double> %exp
}
-define <3 x float> @constrained_vector_exp_v3f32() {
+define <3 x float> @constrained_vector_exp_v3f32() #0 {
; CHECK-LABEL: constrained_vector_exp_v3f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $40, %rsp
@@ -2009,11 +2009,11 @@ entry:
%exp = call <3 x float> @llvm.experimental.constrained.exp.v3f32(
<3 x float> <float 42.0, float 43.0, float 44.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <3 x float> %exp
}
-define <3 x double> @constrained_vector_exp_v3f64() {
+define <3 x double> @constrained_vector_exp_v3f64() #0 {
; CHECK-LABEL: constrained_vector_exp_v3f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
@@ -2060,11 +2060,11 @@ entry:
%exp = call <3 x double> @llvm.experimental.constrained.exp.v3f64(
<3 x double> <double 42.0, double 42.1, double 42.2>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <3 x double> %exp
}
-define <4 x double> @constrained_vector_exp_v4f64() {
+define <4 x double> @constrained_vector_exp_v4f64() #0 {
; CHECK-LABEL: constrained_vector_exp_v4f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $40, %rsp
@@ -2118,11 +2118,11 @@ entry:
<4 x double> <double 42.0, double 42.1,
double 42.2, double 42.3>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <4 x double> %exp
}
-define <1 x float> @constrained_vector_exp2_v1f32() {
+define <1 x float> @constrained_vector_exp2_v1f32() #0 {
; CHECK-LABEL: constrained_vector_exp2_v1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pushq %rax
@@ -2146,11 +2146,11 @@ entry:
%exp2 = call <1 x float> @llvm.experimental.constrained.exp2.v1f32(
<1 x float> <float 42.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <1 x float> %exp2
}
-define <2 x double> @constrained_vector_exp2_v2f64() {
+define <2 x double> @constrained_vector_exp2_v2f64() #0 {
; CHECK-LABEL: constrained_vector_exp2_v2f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
@@ -2184,11 +2184,11 @@ entry:
%exp2 = call <2 x double> @llvm.experimental.constrained.exp2.v2f64(
<2 x double> <double 42.1, double 42.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <2 x double> %exp2
}
-define <3 x float> @constrained_vector_exp2_v3f32() {
+define <3 x float> @constrained_vector_exp2_v3f32() #0 {
; CHECK-LABEL: constrained_vector_exp2_v3f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $40, %rsp
@@ -2233,11 +2233,11 @@ entry:
%exp2 = call <3 x float> @llvm.experimental.constrained.exp2.v3f32(
<3 x float> <float 42.0, float 43.0, float 44.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <3 x float> %exp2
}
-define <3 x double> @constrained_vector_exp2_v3f64() {
+define <3 x double> @constrained_vector_exp2_v3f64() #0 {
; CHECK-LABEL: constrained_vector_exp2_v3f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
@@ -2284,11 +2284,11 @@ entry:
%exp2 = call <3 x double> @llvm.experimental.constrained.exp2.v3f64(
<3 x double> <double 42.0, double 42.1, double 42.2>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <3 x double> %exp2
}
-define <4 x double> @constrained_vector_exp2_v4f64() {
+define <4 x double> @constrained_vector_exp2_v4f64() #0 {
; CHECK-LABEL: constrained_vector_exp2_v4f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $40, %rsp
@@ -2342,11 +2342,11 @@ entry:
<4 x double> <double 42.1, double 42.2,
double 42.3, double 42.4>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <4 x double> %exp2
}
-define <1 x float> @constrained_vector_log_v1f32() {
+define <1 x float> @constrained_vector_log_v1f32() #0 {
; CHECK-LABEL: constrained_vector_log_v1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pushq %rax
@@ -2370,11 +2370,11 @@ entry:
%log = call <1 x float> @llvm.experimental.constrained.log.v1f32(
<1 x float> <float 42.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <1 x float> %log
}
-define <2 x double> @constrained_vector_log_v2f64() {
+define <2 x double> @constrained_vector_log_v2f64() #0 {
; CHECK-LABEL: constrained_vector_log_v2f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
@@ -2408,11 +2408,11 @@ entry:
%log = call <2 x double> @llvm.experimental.constrained.log.v2f64(
<2 x double> <double 42.0, double 42.1>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <2 x double> %log
}
-define <3 x float> @constrained_vector_log_v3f32() {
+define <3 x float> @constrained_vector_log_v3f32() #0 {
; CHECK-LABEL: constrained_vector_log_v3f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $40, %rsp
@@ -2457,11 +2457,11 @@ entry:
%log = call <3 x float> @llvm.experimental.constrained.log.v3f32(
<3 x float> <float 42.0, float 43.0, float 44.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <3 x float> %log
}
-define <3 x double> @constrained_vector_log_v3f64() {
+define <3 x double> @constrained_vector_log_v3f64() #0 {
; CHECK-LABEL: constrained_vector_log_v3f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
@@ -2508,11 +2508,11 @@ entry:
%log = call <3 x double> @llvm.experimental.constrained.log.v3f64(
<3 x double> <double 42.0, double 42.1, double 42.2>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <3 x double> %log
}
-define <4 x double> @constrained_vector_log_v4f64() {
+define <4 x double> @constrained_vector_log_v4f64() #0 {
; CHECK-LABEL: constrained_vector_log_v4f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $40, %rsp
@@ -2566,11 +2566,11 @@ entry:
<4 x double> <double 42.0, double 42.1,
double 42.2, double 42.3>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <4 x double> %log
}
-define <1 x float> @constrained_vector_log10_v1f32() {
+define <1 x float> @constrained_vector_log10_v1f32() #0 {
; CHECK-LABEL: constrained_vector_log10_v1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pushq %rax
@@ -2594,11 +2594,11 @@ entry:
%log10 = call <1 x float> @llvm.experimental.constrained.log10.v1f32(
<1 x float> <float 42.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <1 x float> %log10
}
-define <2 x double> @constrained_vector_log10_v2f64() {
+define <2 x double> @constrained_vector_log10_v2f64() #0 {
; CHECK-LABEL: constrained_vector_log10_v2f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
@@ -2632,11 +2632,11 @@ entry:
%log10 = call <2 x double> @llvm.experimental.constrained.log10.v2f64(
<2 x double> <double 42.0, double 42.1>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <2 x double> %log10
}
-define <3 x float> @constrained_vector_log10_v3f32() {
+define <3 x float> @constrained_vector_log10_v3f32() #0 {
; CHECK-LABEL: constrained_vector_log10_v3f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $40, %rsp
@@ -2681,11 +2681,11 @@ entry:
%log10 = call <3 x float> @llvm.experimental.constrained.log10.v3f32(
<3 x float> <float 42.0, float 43.0, float 44.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <3 x float> %log10
}
-define <3 x double> @constrained_vector_log10_v3f64() {
+define <3 x double> @constrained_vector_log10_v3f64() #0 {
; CHECK-LABEL: constrained_vector_log10_v3f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
@@ -2732,11 +2732,11 @@ entry:
%log10 = call <3 x double> @llvm.experimental.constrained.log10.v3f64(
<3 x double> <double 42.0, double 42.1, double 42.2>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <3 x double> %log10
}
-define <4 x double> @constrained_vector_log10_v4f64() {
+define <4 x double> @constrained_vector_log10_v4f64() #0 {
; CHECK-LABEL: constrained_vector_log10_v4f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $40, %rsp
@@ -2790,11 +2790,11 @@ entry:
<4 x double> <double 42.0, double 42.1,
double 42.2, double 42.3>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <4 x double> %log10
}
-define <1 x float> @constrained_vector_log2_v1f32() {
+define <1 x float> @constrained_vector_log2_v1f32() #0 {
; CHECK-LABEL: constrained_vector_log2_v1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pushq %rax
@@ -2818,11 +2818,11 @@ entry:
%log2 = call <1 x float> @llvm.experimental.constrained.log2.v1f32(
<1 x float> <float 42.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <1 x float> %log2
}
-define <2 x double> @constrained_vector_log2_v2f64() {
+define <2 x double> @constrained_vector_log2_v2f64() #0 {
; CHECK-LABEL: constrained_vector_log2_v2f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
@@ -2856,11 +2856,11 @@ entry:
%log2 = call <2 x double> @llvm.experimental.constrained.log2.v2f64(
<2 x double> <double 42.0, double 42.1>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <2 x double> %log2
}
-define <3 x float> @constrained_vector_log2_v3f32() {
+define <3 x float> @constrained_vector_log2_v3f32() #0 {
; CHECK-LABEL: constrained_vector_log2_v3f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $40, %rsp
@@ -2905,11 +2905,11 @@ entry:
%log2 = call <3 x float> @llvm.experimental.constrained.log2.v3f32(
<3 x float> <float 42.0, float 43.0, float 44.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <3 x float> %log2
}
-define <3 x double> @constrained_vector_log2_v3f64() {
+define <3 x double> @constrained_vector_log2_v3f64() #0 {
; CHECK-LABEL: constrained_vector_log2_v3f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
@@ -2956,11 +2956,11 @@ entry:
%log2 = call <3 x double> @llvm.experimental.constrained.log2.v3f64(
<3 x double> <double 42.0, double 42.1, double 42.2>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <3 x double> %log2
}
-define <4 x double> @constrained_vector_log2_v4f64() {
+define <4 x double> @constrained_vector_log2_v4f64() #0 {
; CHECK-LABEL: constrained_vector_log2_v4f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $40, %rsp
@@ -3014,11 +3014,11 @@ entry:
<4 x double> <double 42.0, double 42.1,
double 42.2, double 42.3>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <4 x double> %log2
}
-define <1 x float> @constrained_vector_rint_v1f32() {
+define <1 x float> @constrained_vector_rint_v1f32() #0 {
; CHECK-LABEL: constrained_vector_rint_v1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pushq %rax
@@ -3038,11 +3038,11 @@ entry:
%rint = call <1 x float> @llvm.experimental.constrained.rint.v1f32(
<1 x float> <float 42.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <1 x float> %rint
}
-define <2 x double> @constrained_vector_rint_v2f64() {
+define <2 x double> @constrained_vector_rint_v2f64() #0 {
; CHECK-LABEL: constrained_vector_rint_v2f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
@@ -3066,11 +3066,11 @@ entry:
%rint = call <2 x double> @llvm.experimental.constrained.rint.v2f64(
<2 x double> <double 42.1, double 42.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <2 x double> %rint
}
-define <3 x float> @constrained_vector_rint_v3f32() {
+define <3 x float> @constrained_vector_rint_v3f32() #0 {
; CHECK-LABEL: constrained_vector_rint_v3f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $40, %rsp
@@ -3107,11 +3107,11 @@ define <3 x float> @constrained_vector_rint_v3f32() {
%rint = call <3 x float> @llvm.experimental.constrained.rint.v3f32(
<3 x float> <float 42.0, float 43.0, float 44.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <3 x float> %rint
}
-define <3 x double> @constrained_vector_rint_v3f64() {
+define <3 x double> @constrained_vector_rint_v3f64() #0 {
; CHECK-LABEL: constrained_vector_rint_v3f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
@@ -3145,11 +3145,11 @@ entry:
%rint = call <3 x double> @llvm.experimental.constrained.rint.v3f64(
<3 x double> <double 42.0, double 42.1, double 42.2>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <3 x double> %rint
}
-define <4 x double> @constrained_vector_rint_v4f64() {
+define <4 x double> @constrained_vector_rint_v4f64() #0 {
; CHECK-LABEL: constrained_vector_rint_v4f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $40, %rsp
@@ -3184,11 +3184,11 @@ entry:
<4 x double> <double 42.1, double 42.2,
double 42.3, double 42.4>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <4 x double> %rint
}
-define <1 x float> @constrained_vector_nearbyint_v1f32() {
+define <1 x float> @constrained_vector_nearbyint_v1f32() #0 {
; CHECK-LABEL: constrained_vector_nearbyint_v1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pushq %rax
@@ -3208,11 +3208,11 @@ entry:
%nearby = call <1 x float> @llvm.experimental.constrained.nearbyint.v1f32(
<1 x float> <float 42.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <1 x float> %nearby
}
-define <2 x double> @constrained_vector_nearbyint_v2f64() {
+define <2 x double> @constrained_vector_nearbyint_v2f64() #0 {
; CHECK-LABEL: constrained_vector_nearbyint_v2f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
@@ -3236,11 +3236,11 @@ entry:
%nearby = call <2 x double> @llvm.experimental.constrained.nearbyint.v2f64(
<2 x double> <double 42.1, double 42.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <2 x double> %nearby
}
-define <3 x float> @constrained_vector_nearbyint_v3f32() {
+define <3 x float> @constrained_vector_nearbyint_v3f32() #0 {
; CHECK-LABEL: constrained_vector_nearbyint_v3f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $40, %rsp
@@ -3277,11 +3277,11 @@ entry:
%nearby = call <3 x float> @llvm.experimental.constrained.nearbyint.v3f32(
<3 x float> <float 42.0, float 43.0, float 44.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <3 x float> %nearby
}
-define <3 x double> @constrained_vector_nearby_v3f64() {
+define <3 x double> @constrained_vector_nearby_v3f64() #0 {
; CHECK-LABEL: constrained_vector_nearby_v3f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
@@ -3315,11 +3315,11 @@ entry:
%nearby = call <3 x double> @llvm.experimental.constrained.nearbyint.v3f64(
<3 x double> <double 42.0, double 42.1, double 42.2>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <3 x double> %nearby
}
-define <4 x double> @constrained_vector_nearbyint_v4f64() {
+define <4 x double> @constrained_vector_nearbyint_v4f64() #0 {
; CHECK-LABEL: constrained_vector_nearbyint_v4f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $40, %rsp
@@ -3354,11 +3354,11 @@ entry:
<4 x double> <double 42.1, double 42.2,
double 42.3, double 42.4>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <4 x double> %nearby
}
-define <1 x float> @constrained_vector_maxnum_v1f32() {
+define <1 x float> @constrained_vector_maxnum_v1f32() #0 {
; CHECK-LABEL: constrained_vector_maxnum_v1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pushq %rax
@@ -3384,11 +3384,11 @@ entry:
%max = call <1 x float> @llvm.experimental.constrained.maxnum.v1f32(
<1 x float> <float 42.0>, <1 x float> <float 41.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <1 x float> %max
}
-define <2 x double> @constrained_vector_maxnum_v2f64() {
+define <2 x double> @constrained_vector_maxnum_v2f64() #0 {
; CHECK-LABEL: constrained_vector_maxnum_v2f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
@@ -3427,11 +3427,11 @@ entry:
<2 x double> <double 43.0, double 42.0>,
<2 x double> <double 41.0, double 40.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <2 x double> %max
}
-define <3 x float> @constrained_vector_maxnum_v3f32() {
+define <3 x float> @constrained_vector_maxnum_v3f32() #0 {
; CHECK-LABEL: constrained_vector_maxnum_v3f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $40, %rsp
@@ -3483,11 +3483,11 @@ entry:
<3 x float> <float 43.0, float 44.0, float 45.0>,
<3 x float> <float 41.0, float 42.0, float 43.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <3 x float> %max
}
-define <3 x double> @constrained_vector_max_v3f64() {
+define <3 x double> @constrained_vector_max_v3f64() #0 {
; CHECK-LABEL: constrained_vector_max_v3f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
@@ -3541,11 +3541,11 @@ entry:
<3 x double> <double 43.0, double 44.0, double 45.0>,
<3 x double> <double 40.0, double 41.0, double 42.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <3 x double> %max
}
-define <4 x double> @constrained_vector_maxnum_v4f64() {
+define <4 x double> @constrained_vector_maxnum_v4f64() #0 {
; CHECK-LABEL: constrained_vector_maxnum_v4f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $40, %rsp
@@ -3609,11 +3609,11 @@ entry:
<4 x double> <double 40.0, double 41.0,
double 42.0, double 43.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <4 x double> %max
}
-define <1 x float> @constrained_vector_minnum_v1f32() {
+define <1 x float> @constrained_vector_minnum_v1f32() #0 {
; CHECK-LABEL: constrained_vector_minnum_v1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pushq %rax
@@ -3639,11 +3639,11 @@ define <1 x float> @constrained_vector_minnum_v1f32() {
%min = call <1 x float> @llvm.experimental.constrained.minnum.v1f32(
<1 x float> <float 42.0>, <1 x float> <float 41.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <1 x float> %min
}
-define <2 x double> @constrained_vector_minnum_v2f64() {
+define <2 x double> @constrained_vector_minnum_v2f64() #0 {
; CHECK-LABEL: constrained_vector_minnum_v2f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
@@ -3682,11 +3682,11 @@ entry:
<2 x double> <double 43.0, double 42.0>,
<2 x double> <double 41.0, double 40.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <2 x double> %min
}
-define <3 x float> @constrained_vector_minnum_v3f32() {
+define <3 x float> @constrained_vector_minnum_v3f32() #0 {
; CHECK-LABEL: constrained_vector_minnum_v3f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $40, %rsp
@@ -3738,11 +3738,11 @@ entry:
<3 x float> <float 43.0, float 44.0, float 45.0>,
<3 x float> <float 41.0, float 42.0, float 43.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <3 x float> %min
}
-define <3 x double> @constrained_vector_min_v3f64() {
+define <3 x double> @constrained_vector_min_v3f64() #0 {
; CHECK-LABEL: constrained_vector_min_v3f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
@@ -3796,11 +3796,11 @@ entry:
<3 x double> <double 43.0, double 44.0, double 45.0>,
<3 x double> <double 40.0, double 41.0, double 42.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <3 x double> %min
}
-define <4 x double> @constrained_vector_minnum_v4f64() {
+define <4 x double> @constrained_vector_minnum_v4f64() #0 {
; CHECK-LABEL: constrained_vector_minnum_v4f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $40, %rsp
@@ -3864,11 +3864,11 @@ entry:
<4 x double> <double 40.0, double 41.0,
double 42.0, double 43.0>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <4 x double> %min
}
-define <1 x i32> @constrained_vector_fptosi_v1i32_v1f32() {
+define <1 x i32> @constrained_vector_fptosi_v1i32_v1f32() #0 {
; CHECK-LABEL: constrained_vector_fptosi_v1i32_v1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cvttss2si {{.*}}(%rip), %eax
@@ -3881,11 +3881,11 @@ define <1 x i32> @constrained_vector_fptosi_v1i32_v1f32() {
entry:
%result = call <1 x i32> @llvm.experimental.constrained.fptosi.v1i32.v1f32(
<1 x float><float 42.0>,
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <1 x i32> %result
}
-define <2 x i32> @constrained_vector_fptosi_v2i32_v2f32() {
+define <2 x i32> @constrained_vector_fptosi_v2i32_v2f32() #0 {
; CHECK-LABEL: constrained_vector_fptosi_v2i32_v2f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cvttss2si {{.*}}(%rip), %eax
@@ -3905,11 +3905,11 @@ define <2 x i32> @constrained_vector_fptosi_v2i32_v2f32() {
entry:
%result = call <2 x i32> @llvm.experimental.constrained.fptosi.v2i32.v2f32(
<2 x float><float 42.0, float 43.0>,
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <2 x i32> %result
}
-define <3 x i32> @constrained_vector_fptosi_v3i32_v3f32() {
+define <3 x i32> @constrained_vector_fptosi_v3i32_v3f32() #0 {
; CHECK-LABEL: constrained_vector_fptosi_v3i32_v3f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cvttss2si {{.*}}(%rip), %eax
@@ -3935,11 +3935,11 @@ entry:
%result = call <3 x i32> @llvm.experimental.constrained.fptosi.v3i32.v3f32(
<3 x float><float 42.0, float 43.0,
float 44.0>,
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <3 x i32> %result
}
-define <4 x i32> @constrained_vector_fptosi_v4i32_v4f32() {
+define <4 x i32> @constrained_vector_fptosi_v4i32_v4f32() #0 {
; CHECK-LABEL: constrained_vector_fptosi_v4i32_v4f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cvttss2si {{.*}}(%rip), %eax
@@ -3970,11 +3970,11 @@ entry:
%result = call <4 x i32> @llvm.experimental.constrained.fptosi.v4i32.v4f32(
<4 x float><float 42.0, float 43.0,
float 44.0, float 45.0>,
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <4 x i32> %result
}
-define <1 x i64> @constrained_vector_fptosi_v1i64_v1f32() {
+define <1 x i64> @constrained_vector_fptosi_v1i64_v1f32() #0 {
; CHECK-LABEL: constrained_vector_fptosi_v1i64_v1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cvttss2si {{.*}}(%rip), %rax
@@ -3987,11 +3987,11 @@ define <1 x i64> @constrained_vector_fptosi_v1i64_v1f32() {
entry:
%result = call <1 x i64> @llvm.experimental.constrained.fptosi.v1i64.v1f32(
<1 x float><float 42.0>,
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <1 x i64> %result
}
-define <2 x i64> @constrained_vector_fptosi_v2i64_v2f32() {
+define <2 x i64> @constrained_vector_fptosi_v2i64_v2f32() #0 {
; CHECK-LABEL: constrained_vector_fptosi_v2i64_v2f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cvttss2si {{.*}}(%rip), %rax
@@ -4012,11 +4012,11 @@ define <2 x i64> @constrained_vector_fptosi_v2i64_v2f32() {
entry:
%result = call <2 x i64> @llvm.experimental.constrained.fptosi.v2i64.v2f32(
<2 x float><float 42.0, float 43.0>,
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <2 x i64> %result
}
-define <3 x i64> @constrained_vector_fptosi_v3i64_v3f32() {
+define <3 x i64> @constrained_vector_fptosi_v3i64_v3f32() #0 {
; CHECK-LABEL: constrained_vector_fptosi_v3i64_v3f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cvttss2si {{.*}}(%rip), %rax
@@ -4039,11 +4039,11 @@ entry:
%result = call <3 x i64> @llvm.experimental.constrained.fptosi.v3i64.v3f32(
<3 x float><float 42.0, float 43.0,
float 44.0>,
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <3 x i64> %result
}
-define <4 x i64> @constrained_vector_fptosi_v4i64_v4f32() {
+define <4 x i64> @constrained_vector_fptosi_v4i64_v4f32() #0 {
; CHECK-LABEL: constrained_vector_fptosi_v4i64_v4f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cvttss2si {{.*}}(%rip), %rax
@@ -4076,11 +4076,11 @@ entry:
%result = call <4 x i64> @llvm.experimental.constrained.fptosi.v4i64.v4f32(
<4 x float><float 42.0, float 43.0,
float 44.0, float 45.0>,
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <4 x i64> %result
}
-define <1 x i32> @constrained_vector_fptosi_v1i32_v1f64() {
+define <1 x i32> @constrained_vector_fptosi_v1i32_v1f64() #0 {
; CHECK-LABEL: constrained_vector_fptosi_v1i32_v1f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %eax
@@ -4093,12 +4093,12 @@ define <1 x i32> @constrained_vector_fptosi_v1i32_v1f64() {
entry:
%result = call <1 x i32> @llvm.experimental.constrained.fptosi.v1i32.v1f64(
<1 x double><double 42.1>,
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <1 x i32> %result
}
-define <2 x i32> @constrained_vector_fptosi_v2i32_v2f64() {
+define <2 x i32> @constrained_vector_fptosi_v2i32_v2f64() #0 {
; CHECK-LABEL: constrained_vector_fptosi_v2i32_v2f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %eax
@@ -4118,11 +4118,11 @@ define <2 x i32> @constrained_vector_fptosi_v2i32_v2f64() {
entry:
%result = call <2 x i32> @llvm.experimental.constrained.fptosi.v2i32.v2f64(
<2 x double><double 42.1, double 42.2>,
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <2 x i32> %result
}
-define <3 x i32> @constrained_vector_fptosi_v3i32_v3f64() {
+define <3 x i32> @constrained_vector_fptosi_v3i32_v3f64() #0 {
; CHECK-LABEL: constrained_vector_fptosi_v3i32_v3f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %eax
@@ -4148,11 +4148,11 @@ entry:
%result = call <3 x i32> @llvm.experimental.constrained.fptosi.v3i32.v3f64(
<3 x double><double 42.1, double 42.2,
double 42.3>,
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <3 x i32> %result
}
-define <4 x i32> @constrained_vector_fptosi_v4i32_v4f64() {
+define <4 x i32> @constrained_vector_fptosi_v4i32_v4f64() #0 {
; CHECK-LABEL: constrained_vector_fptosi_v4i32_v4f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %eax
@@ -4183,11 +4183,11 @@ entry:
%result = call <4 x i32> @llvm.experimental.constrained.fptosi.v4i32.v4f64(
<4 x double><double 42.1, double 42.2,
double 42.3, double 42.4>,
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <4 x i32> %result
}
-define <1 x i64> @constrained_vector_fptosi_v1i64_v1f64() {
+define <1 x i64> @constrained_vector_fptosi_v1i64_v1f64() #0 {
; CHECK-LABEL: constrained_vector_fptosi_v1i64_v1f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %rax
@@ -4200,11 +4200,11 @@ define <1 x i64> @constrained_vector_fptosi_v1i64_v1f64() {
entry:
%result = call <1 x i64> @llvm.experimental.constrained.fptosi.v1i64.v1f64(
<1 x double><double 42.1>,
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <1 x i64> %result
}
-define <2 x i64> @constrained_vector_fptosi_v2i64_v2f64() {
+define <2 x i64> @constrained_vector_fptosi_v2i64_v2f64() #0 {
; CHECK-LABEL: constrained_vector_fptosi_v2i64_v2f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %rax
@@ -4225,11 +4225,11 @@ define <2 x i64> @constrained_vector_fptosi_v2i64_v2f64() {
entry:
%result = call <2 x i64> @llvm.experimental.constrained.fptosi.v2i64.v2f64(
<2 x double><double 42.1, double 42.2>,
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <2 x i64> %result
}
-define <3 x i64> @constrained_vector_fptosi_v3i64_v3f64() {
+define <3 x i64> @constrained_vector_fptosi_v3i64_v3f64() #0 {
; CHECK-LABEL: constrained_vector_fptosi_v3i64_v3f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %rax
@@ -4252,11 +4252,11 @@ entry:
%result = call <3 x i64> @llvm.experimental.constrained.fptosi.v3i64.v3f64(
<3 x double><double 42.1, double 42.2,
double 42.3>,
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <3 x i64> %result
}
-define <4 x i64> @constrained_vector_fptosi_v4i64_v4f64() {
+define <4 x i64> @constrained_vector_fptosi_v4i64_v4f64() #0 {
; CHECK-LABEL: constrained_vector_fptosi_v4i64_v4f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %rax
@@ -4289,11 +4289,11 @@ entry:
%result = call <4 x i64> @llvm.experimental.constrained.fptosi.v4i64.v4f64(
<4 x double><double 42.1, double 42.2,
double 42.3, double 42.4>,
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <4 x i64> %result
}
-define <1 x i32> @constrained_vector_fptoui_v1i32_v1f32() {
+define <1 x i32> @constrained_vector_fptoui_v1i32_v1f32() #0 {
; CHECK-LABEL: constrained_vector_fptoui_v1i32_v1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cvttss2si {{.*}}(%rip), %eax
@@ -4306,11 +4306,11 @@ define <1 x i32> @constrained_vector_fptoui_v1i32_v1f32() {
entry:
%result = call <1 x i32> @llvm.experimental.constrained.fptoui.v1i32.v1f32(
<1 x float><float 42.0>,
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <1 x i32> %result
}
-define <2 x i32> @constrained_vector_fptoui_v2i32_v2f32() {
+define <2 x i32> @constrained_vector_fptoui_v2i32_v2f32() #0 {
; CHECK-LABEL: constrained_vector_fptoui_v2i32_v2f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cvttss2si {{.*}}(%rip), %eax
@@ -4330,11 +4330,11 @@ define <2 x i32> @constrained_vector_fptoui_v2i32_v2f32() {
entry:
%result = call <2 x i32> @llvm.experimental.constrained.fptoui.v2i32.v2f32(
<2 x float><float 42.0, float 43.0>,
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <2 x i32> %result
}
-define <3 x i32> @constrained_vector_fptoui_v3i32_v3f32() {
+define <3 x i32> @constrained_vector_fptoui_v3i32_v3f32() #0 {
; CHECK-LABEL: constrained_vector_fptoui_v3i32_v3f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cvttss2si {{.*}}(%rip), %eax
@@ -4360,11 +4360,11 @@ entry:
%result = call <3 x i32> @llvm.experimental.constrained.fptoui.v3i32.v3f32(
<3 x float><float 42.0, float 43.0,
float 44.0>,
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <3 x i32> %result
}
-define <4 x i32> @constrained_vector_fptoui_v4i32_v4f32() {
+define <4 x i32> @constrained_vector_fptoui_v4i32_v4f32() #0 {
; CHECK-LABEL: constrained_vector_fptoui_v4i32_v4f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cvttss2si {{.*}}(%rip), %eax
@@ -4395,11 +4395,11 @@ entry:
%result = call <4 x i32> @llvm.experimental.constrained.fptoui.v4i32.v4f32(
<4 x float><float 42.0, float 43.0,
float 44.0, float 45.0>,
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <4 x i32> %result
}
-define <1 x i64> @constrained_vector_fptoui_v1i64_v1f32() {
+define <1 x i64> @constrained_vector_fptoui_v1i64_v1f32() #0 {
; CHECK-LABEL: constrained_vector_fptoui_v1i64_v1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cvttss2si {{.*}}(%rip), %rax
@@ -4412,11 +4412,11 @@ define <1 x i64> @constrained_vector_fptoui_v1i64_v1f32() {
entry:
%result = call <1 x i64> @llvm.experimental.constrained.fptoui.v1i64.v1f32(
<1 x float><float 42.0>,
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <1 x i64> %result
}
-define <2 x i64> @constrained_vector_fptoui_v2i64_v2f32() {
+define <2 x i64> @constrained_vector_fptoui_v2i64_v2f32() #0 {
; CHECK-LABEL: constrained_vector_fptoui_v2i64_v2f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cvttss2si {{.*}}(%rip), %rax
@@ -4437,11 +4437,11 @@ define <2 x i64> @constrained_vector_fptoui_v2i64_v2f32() {
entry:
%result = call <2 x i64> @llvm.experimental.constrained.fptoui.v2i64.v2f32(
<2 x float><float 42.0, float 43.0>,
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <2 x i64> %result
}
-define <3 x i64> @constrained_vector_fptoui_v3i64_v3f32() {
+define <3 x i64> @constrained_vector_fptoui_v3i64_v3f32() #0 {
; CHECK-LABEL: constrained_vector_fptoui_v3i64_v3f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cvttss2si {{.*}}(%rip), %rax
@@ -4464,11 +4464,11 @@ entry:
%result = call <3 x i64> @llvm.experimental.constrained.fptoui.v3i64.v3f32(
<3 x float><float 42.0, float 43.0,
float 44.0>,
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <3 x i64> %result
}
-define <4 x i64> @constrained_vector_fptoui_v4i64_v4f32() {
+define <4 x i64> @constrained_vector_fptoui_v4i64_v4f32() #0 {
; CHECK-LABEL: constrained_vector_fptoui_v4i64_v4f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cvttss2si {{.*}}(%rip), %rax
@@ -4501,11 +4501,11 @@ entry:
%result = call <4 x i64> @llvm.experimental.constrained.fptoui.v4i64.v4f32(
<4 x float><float 42.0, float 43.0,
float 44.0, float 45.0>,
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <4 x i64> %result
}
-define <1 x i32> @constrained_vector_fptoui_v1i32_v1f64() {
+define <1 x i32> @constrained_vector_fptoui_v1i32_v1f64() #0 {
; CHECK-LABEL: constrained_vector_fptoui_v1i32_v1f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %eax
@@ -4518,11 +4518,11 @@ define <1 x i32> @constrained_vector_fptoui_v1i32_v1f64() {
entry:
%result = call <1 x i32> @llvm.experimental.constrained.fptoui.v1i32.v1f64(
<1 x double><double 42.1>,
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <1 x i32> %result
}
-define <2 x i32> @constrained_vector_fptoui_v2i32_v2f64() {
+define <2 x i32> @constrained_vector_fptoui_v2i32_v2f64() #0 {
; CHECK-LABEL: constrained_vector_fptoui_v2i32_v2f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %eax
@@ -4542,11 +4542,11 @@ define <2 x i32> @constrained_vector_fptoui_v2i32_v2f64() {
entry:
%result = call <2 x i32> @llvm.experimental.constrained.fptoui.v2i32.v2f64(
<2 x double><double 42.1, double 42.2>,
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <2 x i32> %result
}
-define <3 x i32> @constrained_vector_fptoui_v3i32_v3f64() {
+define <3 x i32> @constrained_vector_fptoui_v3i32_v3f64() #0 {
; CHECK-LABEL: constrained_vector_fptoui_v3i32_v3f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %eax
@@ -4572,11 +4572,11 @@ entry:
%result = call <3 x i32> @llvm.experimental.constrained.fptoui.v3i32.v3f64(
<3 x double><double 42.1, double 42.2,
double 42.3>,
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <3 x i32> %result
}
-define <4 x i32> @constrained_vector_fptoui_v4i32_v4f64() {
+define <4 x i32> @constrained_vector_fptoui_v4i32_v4f64() #0 {
; CHECK-LABEL: constrained_vector_fptoui_v4i32_v4f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %eax
@@ -4607,11 +4607,11 @@ entry:
%result = call <4 x i32> @llvm.experimental.constrained.fptoui.v4i32.v4f64(
<4 x double><double 42.1, double 42.2,
double 42.3, double 42.4>,
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <4 x i32> %result
}
-define <1 x i64> @constrained_vector_fptoui_v1i64_v1f64() {
+define <1 x i64> @constrained_vector_fptoui_v1i64_v1f64() #0 {
; CHECK-LABEL: constrained_vector_fptoui_v1i64_v1f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %rax
@@ -4624,11 +4624,11 @@ define <1 x i64> @constrained_vector_fptoui_v1i64_v1f64() {
entry:
%result = call <1 x i64> @llvm.experimental.constrained.fptoui.v1i64.v1f64(
<1 x double><double 42.1>,
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <1 x i64> %result
}
-define <2 x i64> @constrained_vector_fptoui_v2i64_v2f64() {
+define <2 x i64> @constrained_vector_fptoui_v2i64_v2f64() #0 {
; CHECK-LABEL: constrained_vector_fptoui_v2i64_v2f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %rax
@@ -4649,11 +4649,11 @@ define <2 x i64> @constrained_vector_fptoui_v2i64_v2f64() {
entry:
%result = call <2 x i64> @llvm.experimental.constrained.fptoui.v2i64.v2f64(
<2 x double><double 42.1, double 42.2>,
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <2 x i64> %result
}
-define <3 x i64> @constrained_vector_fptoui_v3i64_v3f64() {
+define <3 x i64> @constrained_vector_fptoui_v3i64_v3f64() #0 {
; CHECK-LABEL: constrained_vector_fptoui_v3i64_v3f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %rax
@@ -4676,11 +4676,11 @@ entry:
%result = call <3 x i64> @llvm.experimental.constrained.fptoui.v3i64.v3f64(
<3 x double><double 42.1, double 42.2,
double 42.3>,
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <3 x i64> %result
}
-define <4 x i64> @constrained_vector_fptoui_v4i64_v4f64() {
+define <4 x i64> @constrained_vector_fptoui_v4i64_v4f64() #0 {
; CHECK-LABEL: constrained_vector_fptoui_v4i64_v4f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %rax
@@ -4713,12 +4713,12 @@ entry:
%result = call <4 x i64> @llvm.experimental.constrained.fptoui.v4i64.v4f64(
<4 x double><double 42.1, double 42.2,
double 42.3, double 42.4>,
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <4 x i64> %result
}
-define <1 x float> @constrained_vector_fptrunc_v1f64() {
+define <1 x float> @constrained_vector_fptrunc_v1f64() #0 {
; CHECK-LABEL: constrained_vector_fptrunc_v1f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
@@ -4734,11 +4734,11 @@ entry:
%result = call <1 x float> @llvm.experimental.constrained.fptrunc.v1f32.v1f64(
<1 x double><double 42.1>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <1 x float> %result
}
-define <2 x float> @constrained_vector_fptrunc_v2f64() {
+define <2 x float> @constrained_vector_fptrunc_v2f64() #0 {
; CHECK-LABEL: constrained_vector_fptrunc_v2f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
@@ -4760,11 +4760,11 @@ entry:
%result = call <2 x float> @llvm.experimental.constrained.fptrunc.v2f32.v2f64(
<2 x double><double 42.1, double 42.2>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <2 x float> %result
}
-define <3 x float> @constrained_vector_fptrunc_v3f64() {
+define <3 x float> @constrained_vector_fptrunc_v3f64() #0 {
; CHECK-LABEL: constrained_vector_fptrunc_v3f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
@@ -4793,11 +4793,11 @@ entry:
<3 x double><double 42.1, double 42.2,
double 42.3>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <3 x float> %result
}
-define <4 x float> @constrained_vector_fptrunc_v4f64() {
+define <4 x float> @constrained_vector_fptrunc_v4f64() #0 {
; CHECK-LABEL: constrained_vector_fptrunc_v4f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
@@ -4822,11 +4822,11 @@ entry:
<4 x double><double 42.1, double 42.2,
double 42.3, double 42.4>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <4 x float> %result
}
-define <1 x double> @constrained_vector_fpext_v1f32() {
+define <1 x double> @constrained_vector_fpext_v1f32() #0 {
; CHECK-LABEL: constrained_vector_fpext_v1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
@@ -4841,11 +4841,11 @@ define <1 x double> @constrained_vector_fpext_v1f32() {
entry:
%result = call <1 x double> @llvm.experimental.constrained.fpext.v1f64.v1f32(
<1 x float><float 42.0>,
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <1 x double> %result
}
-define <2 x double> @constrained_vector_fpext_v2f32() {
+define <2 x double> @constrained_vector_fpext_v2f32() #0 {
; CHECK-LABEL: constrained_vector_fpext_v2f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
@@ -4866,11 +4866,11 @@ define <2 x double> @constrained_vector_fpext_v2f32() {
entry:
%result = call <2 x double> @llvm.experimental.constrained.fpext.v2f64.v2f32(
<2 x float><float 42.0, float 43.0>,
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <2 x double> %result
}
-define <3 x double> @constrained_vector_fpext_v3f32() {
+define <3 x double> @constrained_vector_fpext_v3f32() #0 {
; CHECK-LABEL: constrained_vector_fpext_v3f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
@@ -4898,11 +4898,11 @@ entry:
%result = call <3 x double> @llvm.experimental.constrained.fpext.v3f64.v3f32(
<3 x float><float 42.0, float 43.0,
float 44.0>,
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <3 x double> %result
}
-define <4 x double> @constrained_vector_fpext_v4f32() {
+define <4 x double> @constrained_vector_fpext_v4f32() #0 {
; CHECK-LABEL: constrained_vector_fpext_v4f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
@@ -4925,11 +4925,11 @@ entry:
%result = call <4 x double> @llvm.experimental.constrained.fpext.v4f64.v4f32(
<4 x float><float 42.0, float 43.0,
float 44.0, float 45.0>,
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <4 x double> %result
}
-define <1 x float> @constrained_vector_ceil_v1f32() {
+define <1 x float> @constrained_vector_ceil_v1f32() #0 {
; CHECK-LABEL: constrained_vector_ceil_v1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pushq %rax
@@ -4949,11 +4949,11 @@ entry:
%ceil = call <1 x float> @llvm.experimental.constrained.ceil.v1f32(
<1 x float> <float 1.5>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <1 x float> %ceil
}
-define <2 x double> @constrained_vector_ceil_v2f64() {
+define <2 x double> @constrained_vector_ceil_v2f64() #0 {
; CHECK-LABEL: constrained_vector_ceil_v2f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
@@ -4977,11 +4977,11 @@ entry:
%ceil = call <2 x double> @llvm.experimental.constrained.ceil.v2f64(
<2 x double> <double 1.1, double 1.9>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <2 x double> %ceil
}
-define <3 x float> @constrained_vector_ceil_v3f32() {
+define <3 x float> @constrained_vector_ceil_v3f32() #0 {
; CHECK-LABEL: constrained_vector_ceil_v3f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $40, %rsp
@@ -5018,11 +5018,11 @@ entry:
%ceil = call <3 x float> @llvm.experimental.constrained.ceil.v3f32(
<3 x float> <float 1.5, float 2.5, float 3.5>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <3 x float> %ceil
}
-define <3 x double> @constrained_vector_ceil_v3f64() {
+define <3 x double> @constrained_vector_ceil_v3f64() #0 {
; CHECK-LABEL: constrained_vector_ceil_v3f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
@@ -5056,11 +5056,11 @@ entry:
%ceil = call <3 x double> @llvm.experimental.constrained.ceil.v3f64(
<3 x double> <double 1.1, double 1.9, double 1.5>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <3 x double> %ceil
}
-define <1 x float> @constrained_vector_floor_v1f32() {
+define <1 x float> @constrained_vector_floor_v1f32() #0 {
; CHECK-LABEL: constrained_vector_floor_v1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pushq %rax
@@ -5080,12 +5080,12 @@ entry:
%floor = call <1 x float> @llvm.experimental.constrained.floor.v1f32(
<1 x float> <float 1.5>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <1 x float> %floor
}
-define <2 x double> @constrained_vector_floor_v2f64() {
+define <2 x double> @constrained_vector_floor_v2f64() #0 {
; CHECK-LABEL: constrained_vector_floor_v2f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
@@ -5109,11 +5109,11 @@ entry:
%floor = call <2 x double> @llvm.experimental.constrained.floor.v2f64(
<2 x double> <double 1.1, double 1.9>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <2 x double> %floor
}
-define <3 x float> @constrained_vector_floor_v3f32() {
+define <3 x float> @constrained_vector_floor_v3f32() #0 {
; CHECK-LABEL: constrained_vector_floor_v3f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $40, %rsp
@@ -5150,11 +5150,11 @@ entry:
%floor = call <3 x float> @llvm.experimental.constrained.floor.v3f32(
<3 x float> <float 1.5, float 2.5, float 3.5>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <3 x float> %floor
}
-define <3 x double> @constrained_vector_floor_v3f64() {
+define <3 x double> @constrained_vector_floor_v3f64() #0 {
; CHECK-LABEL: constrained_vector_floor_v3f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
@@ -5188,11 +5188,11 @@ entry:
%floor = call <3 x double> @llvm.experimental.constrained.floor.v3f64(
<3 x double> <double 1.1, double 1.9, double 1.5>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <3 x double> %floor
}
-define <1 x float> @constrained_vector_round_v1f32() {
+define <1 x float> @constrained_vector_round_v1f32() #0 {
; CHECK-LABEL: constrained_vector_round_v1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pushq %rax
@@ -5216,11 +5216,11 @@ entry:
%round = call <1 x float> @llvm.experimental.constrained.round.v1f32(
<1 x float> <float 1.5>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <1 x float> %round
}
-define <2 x double> @constrained_vector_round_v2f64() {
+define <2 x double> @constrained_vector_round_v2f64() #0 {
; CHECK-LABEL: constrained_vector_round_v2f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
@@ -5254,11 +5254,11 @@ entry:
%round = call <2 x double> @llvm.experimental.constrained.round.v2f64(
<2 x double> <double 1.1, double 1.9>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <2 x double> %round
}
-define <3 x float> @constrained_vector_round_v3f32() {
+define <3 x float> @constrained_vector_round_v3f32() #0 {
; CHECK-LABEL: constrained_vector_round_v3f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $40, %rsp
@@ -5303,12 +5303,12 @@ entry:
%round = call <3 x float> @llvm.experimental.constrained.round.v3f32(
<3 x float> <float 1.5, float 2.5, float 3.5>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <3 x float> %round
}
-define <3 x double> @constrained_vector_round_v3f64() {
+define <3 x double> @constrained_vector_round_v3f64() #0 {
; CHECK-LABEL: constrained_vector_round_v3f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
@@ -5355,11 +5355,11 @@ entry:
%round = call <3 x double> @llvm.experimental.constrained.round.v3f64(
<3 x double> <double 1.1, double 1.9, double 1.5>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <3 x double> %round
}
-define <1 x float> @constrained_vector_trunc_v1f32() {
+define <1 x float> @constrained_vector_trunc_v1f32() #0 {
; CHECK-LABEL: constrained_vector_trunc_v1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pushq %rax
@@ -5379,11 +5379,11 @@ entry:
%trunc = call <1 x float> @llvm.experimental.constrained.trunc.v1f32(
<1 x float> <float 1.5>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <1 x float> %trunc
}
-define <2 x double> @constrained_vector_trunc_v2f64() {
+define <2 x double> @constrained_vector_trunc_v2f64() #0 {
; CHECK-LABEL: constrained_vector_trunc_v2f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
@@ -5407,11 +5407,11 @@ entry:
%trunc = call <2 x double> @llvm.experimental.constrained.trunc.v2f64(
<2 x double> <double 1.1, double 1.9>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <2 x double> %trunc
}
-define <3 x float> @constrained_vector_trunc_v3f32() {
+define <3 x float> @constrained_vector_trunc_v3f32() #0 {
; CHECK-LABEL: constrained_vector_trunc_v3f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $40, %rsp
@@ -5448,11 +5448,11 @@ entry:
%trunc = call <3 x float> @llvm.experimental.constrained.trunc.v3f32(
<3 x float> <float 1.5, float 2.5, float 3.5>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <3 x float> %trunc
}
-define <3 x double> @constrained_vector_trunc_v3f64() {
+define <3 x double> @constrained_vector_trunc_v3f64() #0 {
; CHECK-LABEL: constrained_vector_trunc_v3f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: subq $24, %rsp
@@ -5486,10 +5486,11 @@ entry:
%trunc = call <3 x double> @llvm.experimental.constrained.trunc.v3f64(
<3 x double> <double 1.1, double 1.9, double 1.5>,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret <3 x double> %trunc
}
+attributes #0 = { strictfp }
; Single width declarations
declare <2 x double> @llvm.experimental.constrained.fadd.v2f64(<2 x double>, <2 x double>, metadata, metadata)
diff --git a/llvm/test/Feature/fp-intrinsics.ll b/llvm/test/Feature/fp-intrinsics.ll
index 40641472d1c..355cbd90e2e 100644
--- a/llvm/test/Feature/fp-intrinsics.ll
+++ b/llvm/test/Feature/fp-intrinsics.ll
@@ -3,13 +3,13 @@
; Test to verify that constants aren't folded when the rounding mode is unknown.
; CHECK-LABEL: @f1
; CHECK: call double @llvm.experimental.constrained.fdiv.f64
-define double @f1() {
+define double @f1() #0 {
entry:
%div = call double @llvm.experimental.constrained.fdiv.f64(
double 1.000000e+00,
double 1.000000e+01,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %div
}
@@ -23,12 +23,12 @@ entry:
;
; CHECK-LABEL: @f2
; CHECK: call double @llvm.experimental.constrained.fsub.f64
-define double @f2(double %a) {
+define double @f2(double %a) #0 {
entry:
%div = call double @llvm.experimental.constrained.fsub.f64(
double %a, double 0.000000e+00,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %div
}
@@ -45,21 +45,21 @@ entry:
; CHECK: call double @llvm.experimental.constrained.fsub.f64
; CHECK: call double @llvm.experimental.constrained.fmul.f64
; CHECK: call double @llvm.experimental.constrained.fsub.f64
-define double @f3(double %a, double %b) {
+define double @f3(double %a, double %b) #0 {
entry:
%sub = call double @llvm.experimental.constrained.fsub.f64(
double -0.000000e+00, double %a,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%mul = call double @llvm.experimental.constrained.fmul.f64(
double %sub, double %b,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
%ret = call double @llvm.experimental.constrained.fsub.f64(
double -0.000000e+00,
double %mul,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %ret
}
@@ -77,7 +77,7 @@ entry:
; CHECK-LABEL: @f4
; CHECK-NOT: select
; CHECK: br i1 %cmp
-define double @f4(i32 %n, double %a) {
+define double @f4(i32 %n, double %a) #0 {
entry:
%cmp = icmp sgt i32 %n, 0
br i1 %cmp, label %if.then, label %if.end
@@ -86,7 +86,7 @@ if.then:
%add = call double @llvm.experimental.constrained.fadd.f64(
double 1.000000e+00, double %a,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
br label %if.end
if.end:
@@ -97,123 +97,123 @@ if.end:
; Verify that sqrt(42.0) isn't simplified when the rounding mode is unknown.
; CHECK-LABEL: f5
; CHECK: call double @llvm.experimental.constrained.sqrt
-define double @f5() {
+define double @f5() #0 {
entry:
%result = call double @llvm.experimental.constrained.sqrt.f64(double 42.0,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %result
}
; Verify that pow(42.1, 3.0) isn't simplified when the rounding mode is unknown.
; CHECK-LABEL: f6
; CHECK: call double @llvm.experimental.constrained.pow
-define double @f6() {
+define double @f6() #0 {
entry:
%result = call double @llvm.experimental.constrained.pow.f64(double 42.1,
double 3.0,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %result
}
; Verify that powi(42.1, 3) isn't simplified when the rounding mode is unknown.
; CHECK-LABEL: f7
; CHECK: call double @llvm.experimental.constrained.powi
-define double @f7() {
+define double @f7() #0 {
entry:
%result = call double @llvm.experimental.constrained.powi.f64(double 42.1,
i32 3,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %result
}
; Verify that sin(42.0) isn't simplified when the rounding mode is unknown.
; CHECK-LABEL: f8
; CHECK: call double @llvm.experimental.constrained.sin
-define double @f8() {
+define double @f8() #0 {
entry:
%result = call double @llvm.experimental.constrained.sin.f64(double 42.0,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %result
}
; Verify that cos(42.0) isn't simplified when the rounding mode is unknown.
; CHECK-LABEL: f9
; CHECK: call double @llvm.experimental.constrained.cos
-define double @f9() {
+define double @f9() #0 {
entry:
%result = call double @llvm.experimental.constrained.cos.f64(double 42.0,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %result
}
; Verify that exp(42.0) isn't simplified when the rounding mode is unknown.
; CHECK-LABEL: f10
; CHECK: call double @llvm.experimental.constrained.exp
-define double @f10() {
+define double @f10() #0 {
entry:
%result = call double @llvm.experimental.constrained.exp.f64(double 42.0,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %result
}
; Verify that exp2(42.1) isn't simplified when the rounding mode is unknown.
; CHECK-LABEL: f11
; CHECK: call double @llvm.experimental.constrained.exp2
-define double @f11() {
+define double @f11() #0 {
entry:
%result = call double @llvm.experimental.constrained.exp2.f64(double 42.1,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %result
}
; Verify that log(42.0) isn't simplified when the rounding mode is unknown.
; CHECK-LABEL: f12
; CHECK: call double @llvm.experimental.constrained.log
-define double @f12() {
+define double @f12() #0 {
entry:
%result = call double @llvm.experimental.constrained.log.f64(double 42.0,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %result
}
; Verify that log10(42.0) isn't simplified when the rounding mode is unknown.
; CHECK-LABEL: f13
; CHECK: call double @llvm.experimental.constrained.log10
-define double @f13() {
+define double @f13() #0 {
entry:
%result = call double @llvm.experimental.constrained.log10.f64(double 42.0,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %result
}
; Verify that log2(42.0) isn't simplified when the rounding mode is unknown.
; CHECK-LABEL: f14
; CHECK: call double @llvm.experimental.constrained.log2
-define double @f14() {
+define double @f14() #0 {
entry:
%result = call double @llvm.experimental.constrained.log2.f64(double 42.0,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %result
}
; Verify that rint(42.1) isn't simplified when the rounding mode is unknown.
; CHECK-LABEL: f15
; CHECK: call double @llvm.experimental.constrained.rint
-define double @f15() {
+define double @f15() #0 {
entry:
%result = call double @llvm.experimental.constrained.rint.f64(double 42.1,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %result
}
@@ -221,12 +221,12 @@ entry:
; unknown.
; CHECK-LABEL: f16
; CHECK: call double @llvm.experimental.constrained.nearbyint
-define double @f16() {
+define double @f16() #0 {
entry:
%result = call double @llvm.experimental.constrained.nearbyint.f64(
double 42.1,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %result
}
@@ -234,11 +234,11 @@ entry:
; unknown.
; CHECK-LABEL: f17
; CHECK: call double @llvm.experimental.constrained.fma
-define double @f17() {
+define double @f17() #0 {
entry:
%result = call double @llvm.experimental.constrained.fma.f64(double 42.1, double 42.1, double 42.1,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %result
}
@@ -246,11 +246,11 @@ entry:
; unknown.
; CHECK-LABEL: f18
; CHECK: call zeroext i32 @llvm.experimental.constrained.fptoui
-define zeroext i32 @f18() {
+define zeroext i32 @f18() #0 {
entry:
%result = call zeroext i32 @llvm.experimental.constrained.fptoui.i32.f64(
double 42.1,
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret i32 %result
}
@@ -258,10 +258,10 @@ entry:
; unknown.
; CHECK-LABEL: f19
; CHECK: call i32 @llvm.experimental.constrained.fptosi
-define i32 @f19() {
+define i32 @f19() #0 {
entry:
%result = call i32 @llvm.experimental.constrained.fptosi.i32.f64(double 42.1,
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret i32 %result
}
@@ -269,12 +269,12 @@ entry:
; unknown.
; CHECK-LABEL: f20
; CHECK: call float @llvm.experimental.constrained.fptrunc
-define float @f20() {
+define float @f20() #0 {
entry:
%result = call float @llvm.experimental.constrained.fptrunc.f32.f64(
double 42.1,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret float %result
}
@@ -282,13 +282,15 @@ entry:
; unknown.
; CHECK-LABEL: f21
; CHECK: call double @llvm.experimental.constrained.fpext
-define double @f21() {
+define double @f21() #0 {
entry:
%result = call double @llvm.experimental.constrained.fpext.f64.f32(float 42.0,
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %result
}
+attributes #0 = { strictfp }
+
@llvm.fp.env = thread_local global i8 zeroinitializer, section "llvm.metadata"
declare double @llvm.experimental.constrained.fdiv.f64(double, double, metadata, metadata)
declare double @llvm.experimental.constrained.fmul.f64(double, double, metadata, metadata)
diff --git a/llvm/test/Transforms/DCE/calls-errno.ll b/llvm/test/Transforms/DCE/calls-errno.ll
index 20ee0d06d3a..376b8d1e588 100644
--- a/llvm/test/Transforms/DCE/calls-errno.ll
+++ b/llvm/test/Transforms/DCE/calls-errno.ll
@@ -76,10 +76,6 @@ entry:
; CHECK-NEXT: %cos3 = call double @cos(double 0.000000e+00)
%cos3 = call double @cos(double 0.000000e+00) nobuiltin
-; cos(1) strictfp sets FP status flags
-; CHECK-NEXT: %cos4 = call double @cos(double 1.000000e+00)
- %cos4 = call double @cos(double 1.000000e+00) strictfp
-
; pow(0, 1) is 0
%pow1 = call double @pow(double 0x7FF0000000000000, double 1.000000e+00)
@@ -97,3 +93,16 @@ entry:
; CHECK-NEXT: ret void
ret void
}
+
+define void @Tstrict() strictfp {
+entry:
+; CHECK-LABEL: @Tstrict(
+; CHECK-NEXT: entry:
+
+; cos(1) strictfp sets FP status flags
+; CHECK-NEXT: %cos4 = call double @cos(double 1.000000e+00)
+ %cos4 = call double @cos(double 1.000000e+00) strictfp
+
+; CHECK-NEXT: ret void
+ ret void
+}
diff --git a/llvm/test/Transforms/InstCombine/constant-fold-libfunc.ll b/llvm/test/Transforms/InstCombine/constant-fold-libfunc.ll
index 5d1aa821ea1..af33d989544 100644
--- a/llvm/test/Transforms/InstCombine/constant-fold-libfunc.ll
+++ b/llvm/test/Transforms/InstCombine/constant-fold-libfunc.ll
@@ -23,7 +23,7 @@ define double @test_acos_nobuiltin() {
; Check that we don't constant fold strictfp results that require rounding.
-define double @test_acos_strictfp() {
+define double @test_acos_strictfp() strictfp {
; CHECK-LABEL: @test_acos_strictfp
%pi = call double @acos(double -1.000000e+00) strictfp
; CHECK: call double @acos(double -1.000000e+00)
diff --git a/llvm/test/Transforms/InstCombine/memcpy-1.ll b/llvm/test/Transforms/InstCombine/memcpy-1.ll
index ef020726dbe..789e5ebd746 100644
--- a/llvm/test/Transforms/InstCombine/memcpy-1.ll
+++ b/llvm/test/Transforms/InstCombine/memcpy-1.ll
@@ -20,7 +20,7 @@ define i8* @test_simplify1(i8* %mem1, i8* %mem2, i32 %size) {
; Verify that the strictfp attr doesn't block this optimization.
-define i8* @test_simplify2(i8* %mem1, i8* %mem2, i32 %size) {
+define i8* @test_simplify2(i8* %mem1, i8* %mem2, i32 %size) strictfp {
; CHECK-LABEL: @test_simplify2(
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 [[MEM1:%.*]], i8* align 1 [[MEM2:%.*]], i32 [[SIZE:%.*]], i1 false)
; CHECK-NEXT: ret i8* [[MEM1]]
diff --git a/llvm/test/Verifier/fp-intrinsics.ll b/llvm/test/Verifier/fp-intrinsics.ll
index 36e5442bf3c..12a3c9d6582 100644
--- a/llvm/test/Verifier/fp-intrinsics.ll
+++ b/llvm/test/Verifier/fp-intrinsics.ll
@@ -15,64 +15,66 @@ declare double @llvm.experimental.constrained.sqrt.f64(double, metadata, metadat
; CHECK1: attributes #[[ATTR]] = { inaccessiblememonly nounwind willreturn }
; Note: FP exceptions aren't usually caught through normal unwind mechanisms,
; but we may want to revisit this for asynchronous exception handling.
-define double @f1(double %a, double %b) {
+define double @f1(double %a, double %b) #0 {
entry:
%fadd = call double @llvm.experimental.constrained.fadd.f64(
double %a, double %b,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %fadd
}
-define double @f1u(double %a) {
+define double @f1u(double %a) #0 {
entry:
%fsqrt = call double @llvm.experimental.constrained.sqrt.f64(
double %a,
metadata !"round.dynamic",
- metadata !"fpexcept.strict")
+ metadata !"fpexcept.strict") #0
ret double %fsqrt
}
; Test an illegal value for the rounding mode argument.
; CHECK2: invalid rounding mode argument
-;T2: define double @f2(double %a, double %b) {
+;T2: define double @f2(double %a, double %b) #0 {
;T2: entry:
;T2: %fadd = call double @llvm.experimental.constrained.fadd.f64(
;T2: double %a, double %b,
;T2: metadata !"round.dynomite",
-;T2: metadata !"fpexcept.strict")
+;T2: metadata !"fpexcept.strict") #0
;T2: ret double %fadd
;T2: }
; Test an illegal value for the exception behavior argument.
; CHECK3: invalid exception behavior argument
-;T3: define double @f3(double %a, double %b) {
+;T3: define double @f3(double %a, double %b) #0 {
;T3: entry:
;T3: %fadd = call double @llvm.experimental.constrained.fadd.f64(
;T3: double %a, double %b,
;T3: metadata !"round.dynamic",
-;T3: metadata !"fpexcept.restrict")
+;T3: metadata !"fpexcept.restrict") #0
;T3: ret double %fadd
;T3: }
; Test an illegal value for the rounding mode argument.
; CHECK4: invalid rounding mode argument
-;T4: define double @f4(double %a) {
+;T4: define double @f4(double %a) #0 {
;T4: entry:
;T4: %fadd = call double @llvm.experimental.constrained.sqrt.f64(
;T4: double %a,
;T4: metadata !"round.dynomite",
-;T4: metadata !"fpexcept.strict")
+;T4: metadata !"fpexcept.strict") #0
;T4: ret double %fadd
;T4: }
; Test an illegal value for the exception behavior argument.
; CHECK5: invalid exception behavior argument
-;T5: define double @f5(double %a) {
+;T5: define double @f5(double %a) #0 {
;T5: entry:
;T5: %fadd = call double @llvm.experimental.constrained.sqrt.f64(
;T5: double %a,
;T5: metadata !"round.dynamic",
-;T5: metadata !"fpexcept.restrict")
+;T5: metadata !"fpexcept.restrict") #0
;T5: ret double %fadd
;T5: }
+
+attributes #0 = { strictfp }
OpenPOWER on IntegriCloud