; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2 -O3 | FileCheck %s --check-prefixes=CHECK,SSE ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 -O3 | FileCheck %s --check-prefixes=CHECK,SSE ; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx -O3 | FileCheck %s --check-prefixes=CHECK,AVX ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx -O3 | FileCheck %s --check-prefixes=CHECK,AVX ; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx512f -mattr=+avx512vl -O3 | FileCheck %s --check-prefixes=CHECK,AVX ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f -mattr=+avx512vl -O3 | FileCheck %s --check-prefixes=CHECK,AVX ; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=-sse -O3 | FileCheck %s --check-prefixes=X87 declare double @llvm.experimental.constrained.fadd.f64(double, double, metadata, metadata) declare float @llvm.experimental.constrained.fadd.f32(float, float, metadata, metadata) declare double @llvm.experimental.constrained.fsub.f64(double, double, metadata, metadata) declare float @llvm.experimental.constrained.fsub.f32(float, float, metadata, metadata) declare double @llvm.experimental.constrained.fmul.f64(double, double, metadata, metadata) declare float @llvm.experimental.constrained.fmul.f32(float, float, metadata, metadata) declare double @llvm.experimental.constrained.fdiv.f64(double, double, metadata, metadata) declare float @llvm.experimental.constrained.fdiv.f32(float, float, metadata, metadata) define x86_regcallcc double @f1(double %a, double %b) #0 { ; SSE-LABEL: f1: ; SSE: # %bb.0: ; SSE-NEXT: addsd %xmm1, %xmm0 ; SSE-NEXT: ret{{[l|q]}} ; ; AVX-LABEL: f1: ; AVX: # %bb.0: ; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0 ; AVX-NEXT: ret{{[l|q]}} ; ; X87-LABEL: f1: ; X87: # %bb.0: ; X87-NEXT: fldl {{[0-9]+}}(%esp) ; X87-NEXT: faddl {{[0-9]+}}(%esp) ; X87-NEXT: retl %ret = call double @llvm.experimental.constrained.fadd.f64(double %a, double %b, metadata !"round.dynamic", metadata !"fpexcept.strict") #0 ret double %ret } define x86_regcallcc float @f2(float %a, float %b) #0 { ; SSE-LABEL: f2: ; SSE: # %bb.0: ; SSE-NEXT: addss %xmm1, %xmm0 ; SSE-NEXT: ret{{[l|q]}} ; ; AVX-LABEL: f2: ; AVX: # %bb.0: ; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0 ; AVX-NEXT: ret{{[l|q]}} ; ; X87-LABEL: f2: ; X87: # %bb.0: ; X87-NEXT: flds {{[0-9]+}}(%esp) ; X87-NEXT: fadds {{[0-9]+}}(%esp) ; X87-NEXT: retl %ret = call float @llvm.experimental.constrained.fadd.f32(float %a, float %b, metadata !"round.dynamic", metadata !"fpexcept.strict") #0 ret float %ret } define x86_regcallcc double @f3(double %a, double %b) #0 { ; SSE-LABEL: f3: ; SSE: # %bb.0: ; SSE-NEXT: subsd %xmm1, %xmm0 ; SSE-NEXT: ret{{[l|q]}} ; ; AVX-LABEL: f3: ; AVX: # %bb.0: ; AVX-NEXT: vsubsd %xmm1, %xmm0, %xmm0 ; AVX-NEXT: ret{{[l|q]}} ; ; X87-LABEL: f3: ; X87: # %bb.0: ; X87-NEXT: fldl {{[0-9]+}}(%esp) ; X87-NEXT: fsubl {{[0-9]+}}(%esp) ; X87-NEXT: retl %ret = call double @llvm.experimental.constrained.fsub.f64(double %a, double %b, metadata !"round.dynamic", metadata !"fpexcept.strict") #0 ret double %ret } define x86_regcallcc float @f4(float %a, float %b) #0 { ; SSE-LABEL: f4: ; SSE: # %bb.0: ; SSE-NEXT: subss %xmm1, %xmm0 ; SSE-NEXT: ret{{[l|q]}} ; ; AVX-LABEL: f4: ; AVX: # %bb.0: ; AVX-NEXT: vsubss %xmm1, %xmm0, %xmm0 ; AVX-NEXT: ret{{[l|q]}} ; ; X87-LABEL: f4: ; X87: # %bb.0: ; X87-NEXT: flds {{[0-9]+}}(%esp) ; X87-NEXT: fsubs {{[0-9]+}}(%esp) ; X87-NEXT: retl %ret = call float @llvm.experimental.constrained.fsub.f32(float %a, float %b, metadata !"round.dynamic", metadata !"fpexcept.strict") #0 ret float %ret } define x86_regcallcc double @f5(double %a, double %b) #0 { ; SSE-LABEL: f5: ; SSE: # %bb.0: ; SSE-NEXT: mulsd %xmm1, %xmm0 ; SSE-NEXT: ret{{[l|q]}} ; ; AVX-LABEL: f5: ; AVX: # %bb.0: ; AVX-NEXT: vmulsd %xmm1, %xmm0, %xmm0 ; AVX-NEXT: ret{{[l|q]}} ; ; X87-LABEL: f5: ; X87: # %bb.0: ; X87-NEXT: fldl {{[0-9]+}}(%esp) ; X87-NEXT: fmull {{[0-9]+}}(%esp) ; X87-NEXT: retl %ret = call double @llvm.experimental.constrained.fmul.f64(double %a, double %b, metadata !"round.dynamic", metadata !"fpexcept.strict") #0 ret double %ret } define x86_regcallcc float @f6(float %a, float %b) #0 { ; SSE-LABEL: f6: ; SSE: # %bb.0: ; SSE-NEXT: mulss %xmm1, %xmm0 ; SSE-NEXT: ret{{[l|q]}} ; ; AVX-LABEL: f6: ; AVX: # %bb.0: ; AVX-NEXT: vmulss %xmm1, %xmm0, %xmm0 ; AVX-NEXT: ret{{[l|q]}} ; ; X87-LABEL: f6: ; X87: # %bb.0: ; X87-NEXT: flds {{[0-9]+}}(%esp) ; X87-NEXT: fmuls {{[0-9]+}}(%esp) ; X87-NEXT: retl %ret = call float @llvm.experimental.constrained.fmul.f32(float %a, float %b, metadata !"round.dynamic", metadata !"fpexcept.strict") #0 ret float %ret } define x86_regcallcc double @f7(double %a, double %b) #0 { ; SSE-LABEL: f7: ; SSE: # %bb.0: ; SSE-NEXT: divsd %xmm1, %xmm0 ; SSE-NEXT: ret{{[l|q]}} ; ; AVX-LABEL: f7: ; AVX: # %bb.0: ; AVX-NEXT: vdivsd %xmm1, %xmm0, %xmm0 ; AVX-NEXT: ret{{[l|q]}} ; ; X87-LABEL: f7: ; X87: # %bb.0: ; X87-NEXT: fldl {{[0-9]+}}(%esp) ; X87-NEXT: fdivl {{[0-9]+}}(%esp) ; X87-NEXT: retl %ret = call double @llvm.experimental.constrained.fdiv.f64(double %a, double %b, metadata !"round.dynamic", metadata !"fpexcept.strict") #0 ret double %ret } define x86_regcallcc float @f8(float %a, float %b) #0 { ; SSE-LABEL: f8: ; SSE: # %bb.0: ; SSE-NEXT: divss %xmm1, %xmm0 ; SSE-NEXT: ret{{[l|q]}} ; ; AVX-LABEL: f8: ; AVX: # %bb.0: ; AVX-NEXT: vdivss %xmm1, %xmm0, %xmm0 ; AVX-NEXT: ret{{[l|q]}} ; ; X87-LABEL: f8: ; X87: # %bb.0: ; X87-NEXT: flds {{[0-9]+}}(%esp) ; X87-NEXT: fdivs {{[0-9]+}}(%esp) ; X87-NEXT: retl %ret = call float @llvm.experimental.constrained.fdiv.f32(float %a, float %b, metadata !"round.dynamic", metadata !"fpexcept.strict") #0 ret float %ret } attributes #0 = { strictfp }