summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--llvm/lib/Target/X86/X86ISelLowering.cpp146
-rw-r--r--llvm/lib/Target/X86/X86ISelLowering.h3
-rw-r--r--llvm/test/CodeGen/X86/fp-intrinsics.ll547
-rw-r--r--llvm/test/CodeGen/X86/vector-constrained-fp-intrinsics.ll342
4 files changed, 866 insertions, 172 deletions
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index bcb091eb52e..ef3a02d159d 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -249,19 +249,27 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
// Promote i8 FP_TO_SINT to larger FP_TO_SINTS's, as X86 doesn't have
// this operation.
- setOperationAction(ISD::FP_TO_SINT, MVT::i8, Promote);
- setOperationAction(ISD::FP_TO_SINT, MVT::i16, Custom);
- setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
+ setOperationAction(ISD::FP_TO_SINT, MVT::i8, Promote);
+ // FIXME: setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i8, Promote);
+ setOperationAction(ISD::FP_TO_SINT, MVT::i16, Custom);
+ setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i16, Custom);
+ setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
+ setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom);
// In 32-bit mode these are custom lowered. In 64-bit mode F32 and F64
// are Legal, f80 is custom lowered.
- setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
+ setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
+ setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i64, Custom);
// Handle FP_TO_UINT by promoting the destination to a larger signed
// conversion.
- setOperationAction(ISD::FP_TO_UINT, MVT::i8, Promote);
- setOperationAction(ISD::FP_TO_UINT, MVT::i16, Promote);
- setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
- setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom);
+ setOperationAction(ISD::FP_TO_UINT, MVT::i8, Promote);
+ // FIXME: setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i8, Promote);
+ setOperationAction(ISD::FP_TO_UINT, MVT::i16, Promote);
+ // FIXME: setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i16, Promote);
+ setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
+ setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom);
+ setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom);
+ setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i64, Custom);
}
// TODO: when we have SSE, these could be more efficient, by using movd/movq.
@@ -18899,11 +18907,13 @@ SDValue X86TargetLowering::LowerUINT_TO_FP(SDValue Op,
// result.
SDValue
X86TargetLowering::FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG,
- bool IsSigned) const {
+ bool IsSigned, SDValue &Chain) const {
+ bool IsStrict = Op->isStrictFPOpcode();
SDLoc DL(Op);
EVT DstTy = Op.getValueType();
- EVT TheVT = Op.getOperand(0).getValueType();
+ SDValue Value = Op.getOperand(IsStrict ? 1 : 0);
+ EVT TheVT = Value.getValueType();
auto PtrVT = getPointerTy(DAG.getDataLayout());
if (TheVT != MVT::f32 && TheVT != MVT::f64 && TheVT != MVT::f80) {
@@ -18917,6 +18927,8 @@ X86TargetLowering::FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG,
// used for the 32-bit subtarget, but also for f80 on a 64-bit target.
bool UnsignedFixup = !IsSigned && DstTy == MVT::i64;
+ // FIXME: This does not generate an invalid exception if the input does not
+ // fit in i32. PR44019
if (!IsSigned && DstTy != MVT::i64) {
// Replace the fp-to-uint32 operation with an fp-to-sint64 FIST.
// The low 32 bits of the fist result will have the correct uint32 result.
@@ -18935,8 +18947,11 @@ X86TargetLowering::FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG,
int SSFI = MF.getFrameInfo().CreateStackObject(MemSize, MemSize, false);
SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
- SDValue Chain = DAG.getEntryNode();
- SDValue Value = Op.getOperand(0);
+ if (IsStrict)
+ Chain = Op.getOperand(0);
+ else
+ Chain = DAG.getEntryNode();
+
SDValue Adjust; // 0x0 or 0x80000000, for result sign bit adjustment.
if (UnsignedFixup) {
@@ -18955,6 +18970,7 @@ X86TargetLowering::FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG,
// Being a power of 2, Thresh is exactly representable in all FP formats.
// For X87 we'd like to use the smallest FP type for this constant, but
// for DAG type consistency we have to match the FP operand type.
+ // FIXME: This code generates a spurious inexact exception for 1.0.
APFloat Thresh(APFloat::IEEEsingle(), APInt(32, 0x5f000000));
LLVM_ATTRIBUTE_UNUSED APFloat::opStatus Status = APFloat::opOK;
@@ -18980,7 +18996,14 @@ X86TargetLowering::FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG,
DAG.getConstant(0, DL, MVT::i64),
DAG.getConstant(APInt::getSignMask(64),
DL, MVT::i64));
- SDValue Sub = DAG.getNode(ISD::FSUB, DL, TheVT, Value, ThreshVal);
+ SDValue Sub;
+ if (IsStrict) {
+ Sub = DAG.getNode(ISD::STRICT_FSUB, DL, { TheVT, MVT::Other},
+ { Chain, Value, ThreshVal });
+ Chain = Sub.getValue(1);
+ } else
+ Sub = DAG.getNode(ISD::FSUB, DL, TheVT, Value, ThreshVal);
+
Cmp = DAG.getSetCC(DL, getSetCCResultType(DAG.getDataLayout(),
*DAG.getContext(), TheVT),
Value, ThreshVal, ISD::SETLT);
@@ -19014,6 +19037,7 @@ X86TargetLowering::FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG,
Ops, DstTy, MMO);
SDValue Res = DAG.getLoad(Op.getValueType(), SDLoc(Op), FIST, StackSlot, MPI);
+ Chain = Res.getValue(1);
// If we need an unsigned fixup, XOR the result with adjust.
if (UnsignedFixup)
@@ -19509,9 +19533,11 @@ SDValue X86TargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const {
}
SDValue X86TargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const {
- bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT;
+ bool IsStrict = Op->isStrictFPOpcode();
+ bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT ||
+ Op.getOpcode() == ISD::STRICT_FP_TO_SINT;
MVT VT = Op.getSimpleValueType();
- SDValue Src = Op.getOperand(0);
+ SDValue Src = Op.getOperand(IsStrict ? 1 : 0);
MVT SrcVT = Src.getSimpleValueType();
SDLoc dl(Op);
@@ -19522,6 +19548,8 @@ SDValue X86TargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const {
else
LC = RTLIB::getFPTOUINT(SrcVT, VT);
+ // FIXME: Strict fp!
+ assert(!IsStrict && "Unhandled strict operation!");
MakeLibCallOptions CallOptions;
return makeLibCall(DAG, LC, VT, Src, CallOptions, SDLoc(Op)).first;
}
@@ -19540,6 +19568,8 @@ SDValue X86TargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const {
DAG.getUNDEF(MVT::v8f64),
Src, DAG.getIntPtrConstant(0, dl));
}
+ // FIXME: Strict fp!
+ assert(!IsStrict && "Unhandled strict operation!");
SDValue Res = DAG.getNode(Opc, dl, ResVT, Src);
Res = DAG.getNode(ISD::TRUNCATE, dl, TruncVT, Res);
return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2i1, Res,
@@ -19548,6 +19578,8 @@ SDValue X86TargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const {
assert(Subtarget.hasDQI() && Subtarget.hasVLX() && "Requires AVX512DQVL!");
if (VT == MVT::v2i64 && SrcVT == MVT::v2f32) {
+ // FIXME: Strict fp!
+ assert(!IsStrict && "Unhandled strict operation!");
return DAG.getNode(IsSigned ? X86ISD::CVTTP2SI : X86ISD::CVTTP2UI, dl, VT,
DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32, Src,
DAG.getUNDEF(MVT::v2f32)));
@@ -19572,9 +19604,21 @@ SDValue X86TargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const {
assert(VT == MVT::i32 && "Unexpected VT!");
// Promote i32 to i64 and use a signed operation on 64-bit targets.
+ // FIXME: This does not generate an invalid exception if the input does not
+ // fit in i32. PR44019
if (Subtarget.is64Bit()) {
- SDValue Res = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i64, Src);
- return DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
+ SDValue Res, Chain;
+ if (IsStrict) {
+ Res = DAG.getNode(ISD::STRICT_FP_TO_SINT, dl, { MVT::i64, MVT::Other},
+ { Op.getOperand(0), Src });
+ Chain = Res.getValue(1);
+ } else
+ Res = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i64, Src);
+
+ Res = DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
+ if (IsStrict)
+ return DAG.getMergeValues({ Res, Chain }, dl);
+ return Res;
}
// Use default expansion for SSE1/2 targets without SSE3. With SSE3 we can
@@ -19584,10 +19628,22 @@ SDValue X86TargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const {
}
// Promote i16 to i32 if we can use a SSE operation.
+ // FIXME: This does not generate an invalid exception if the input does not
+ // fit in i16. PR44019
if (VT == MVT::i16 && UseSSEReg) {
assert(IsSigned && "Expected i16 FP_TO_UINT to have been promoted!");
- SDValue Res = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, Src);
- return DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
+ SDValue Res, Chain;
+ if (IsStrict) {
+ Res = DAG.getNode(ISD::STRICT_FP_TO_SINT, dl, { MVT::i32, MVT::Other},
+ { Op.getOperand(0), Src });
+ Chain = Res.getValue(1);
+ } else
+ Res = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, Src);
+
+ Res = DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
+ if (IsStrict)
+ return DAG.getMergeValues({ Res, Chain }, dl);
+ return Res;
}
// If this is a FP_TO_SINT using SSEReg we're done.
@@ -19595,8 +19651,12 @@ SDValue X86TargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const {
return Op;
// Fall back to X87.
- if (SDValue V = FP_TO_INTHelper(Op, DAG, IsSigned))
+ SDValue Chain;
+ if (SDValue V = FP_TO_INTHelper(Op, DAG, IsSigned, Chain)) {
+ if (IsStrict)
+ return DAG.getMergeValues({V, Chain}, dl);
return V;
+ }
llvm_unreachable("Expected FP_TO_INTHelper to handle all remaining cases.");
}
@@ -27716,7 +27776,9 @@ SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
case ISD::SIGN_EXTEND_VECTOR_INREG:
return LowerEXTEND_VECTOR_INREG(Op, Subtarget, DAG);
case ISD::FP_TO_SINT:
- case ISD::FP_TO_UINT: return LowerFP_TO_INT(Op, DAG);
+ case ISD::STRICT_FP_TO_SINT:
+ case ISD::FP_TO_UINT:
+ case ISD::STRICT_FP_TO_UINT: return LowerFP_TO_INT(Op, DAG);
case ISD::FP_EXTEND: return LowerFP_EXTEND(Op, DAG);
case ISD::FP_ROUND: return LowerFP_ROUND(Op, DAG);
case ISD::STRICT_FP_ROUND: return LowerSTRICT_FP_ROUND(Op, DAG);
@@ -28130,10 +28192,14 @@ void X86TargetLowering::ReplaceNodeResults(SDNode *N,
return;
}
case ISD::FP_TO_SINT:
- case ISD::FP_TO_UINT: {
- bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT;
+ case ISD::STRICT_FP_TO_SINT:
+ case ISD::FP_TO_UINT:
+ case ISD::STRICT_FP_TO_UINT: {
+ bool IsStrict = N->isStrictFPOpcode();
+ bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT ||
+ N->getOpcode() == ISD::STRICT_FP_TO_SINT;
EVT VT = N->getValueType(0);
- SDValue Src = N->getOperand(0);
+ SDValue Src = N->getOperand(IsStrict ? 1 : 0);
EVT SrcVT = Src.getValueType();
if (VT.isVector() && VT.getScalarSizeInBits() < 32) {
@@ -28144,13 +28210,19 @@ void X86TargetLowering::ReplaceNodeResults(SDNode *N,
unsigned NewEltWidth = std::min(128 / VT.getVectorNumElements(), 32U);
MVT PromoteVT = MVT::getVectorVT(MVT::getIntegerVT(NewEltWidth),
VT.getVectorNumElements());
- SDValue Res = DAG.getNode(ISD::FP_TO_SINT, dl, PromoteVT, Src);
+ SDValue Res;
+ SDValue Chain;
+ if (IsStrict) {
+ Res = DAG.getNode(ISD::FP_TO_SINT, dl, { PromoteVT, MVT::Other },
+ { N->getOperand(0), Src });
+ Chain = Res.getValue(1);
+ } else
+ Res = DAG.getNode(ISD::FP_TO_SINT, dl, PromoteVT, Src);
// Preserve what we know about the size of the original result. Except
// when the result is v2i32 since we can't widen the assert.
if (PromoteVT != MVT::v2i32)
- Res = DAG.getNode(N->getOpcode() == ISD::FP_TO_UINT ? ISD::AssertZext
- : ISD::AssertSext,
+ Res = DAG.getNode(!IsSigned ? ISD::AssertZext : ISD::AssertSext,
dl, PromoteVT, Res,
DAG.getValueType(VT.getVectorElementType()));
@@ -28165,6 +28237,8 @@ void X86TargetLowering::ReplaceNodeResults(SDNode *N,
ConcatOps[0] = Res;
Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, ConcatVT, ConcatOps);
Results.push_back(Res);
+ if (IsStrict)
+ Results.push_back(Chain);
return;
}
@@ -28183,6 +28257,8 @@ void X86TargetLowering::ReplaceNodeResults(SDNode *N,
// legalization to v8i32<-v8f64.
return;
}
+ // FIXME: Strict fp.
+ assert(!IsStrict && "Missing STRICT_FP_TO_SINT support!");
unsigned Opc = IsSigned ? X86ISD::CVTTP2SI : X86ISD::CVTTP2UI;
SDValue Res = DAG.getNode(Opc, dl, MVT::v4i32, Src);
Results.push_back(Res);
@@ -28210,14 +28286,26 @@ void X86TargetLowering::ReplaceNodeResults(SDNode *N,
SDValue Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VecInVT,
DAG.getConstantFP(0.0, dl, VecInVT), Src,
ZeroIdx);
- Res = DAG.getNode(N->getOpcode(), SDLoc(N), VecVT, Res);
+ SDValue Chain;
+ if (IsStrict) {
+ SDVTList Tys = DAG.getVTList(VecVT, MVT::Other);
+ Res = DAG.getNode(N->getOpcode(), SDLoc(N), Tys, N->getOperand(0), Res);
+ Chain = Res.getValue(1);
+ } else
+ Res = DAG.getNode(N->getOpcode(), SDLoc(N), VecVT, Res);
Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Res, ZeroIdx);
Results.push_back(Res);
+ if (IsStrict)
+ Results.push_back(Chain);
return;
}
- if (SDValue V = FP_TO_INTHelper(SDValue(N, 0), DAG, IsSigned))
+ SDValue Chain;
+ if (SDValue V = FP_TO_INTHelper(SDValue(N, 0), DAG, IsSigned, Chain)) {
Results.push_back(V);
+ if (IsStrict)
+ Results.push_back(Chain);
+ }
return;
}
case ISD::SINT_TO_FP: {
diff --git a/llvm/lib/Target/X86/X86ISelLowering.h b/llvm/lib/Target/X86/X86ISelLowering.h
index 184983d30ac..576f2fa627c 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.h
+++ b/llvm/lib/Target/X86/X86ISelLowering.h
@@ -1316,7 +1316,8 @@ namespace llvm {
unsigned getAddressSpace(void) const;
- SDValue FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG, bool isSigned) const;
+ SDValue FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG, bool isSigned,
+ SDValue &Chain) const;
SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerVSELECT(SDValue Op, SelectionDAG &DAG) const;
diff --git a/llvm/test/CodeGen/X86/fp-intrinsics.ll b/llvm/test/CodeGen/X86/fp-intrinsics.ll
index 9841b9fc105..a321be50427 100644
--- a/llvm/test/CodeGen/X86/fp-intrinsics.ll
+++ b/llvm/test/CodeGen/X86/fp-intrinsics.ll
@@ -1,4 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -O3 -mtriple=i686-pc-linux -mattr=+cmov < %s | FileCheck %s --check-prefix=COMMON --check-prefix=X87
; RUN: llc -O3 -mtriple=i686-pc-linux -mattr=sse2 < %s | FileCheck %s --check-prefix=COMMON --check-prefix=X86-SSE
; RUN: llc -O3 -mtriple=x86_64-pc-linux < %s | FileCheck %s --check-prefix=COMMON --check-prefix=SSE
; RUN: llc -O3 -mtriple=x86_64-pc-linux -mattr=+avx < %s | FileCheck %s --check-prefix=COMMON --check-prefix=AVX --check-prefix=AVX1
@@ -14,6 +15,12 @@
; }
;
define double @f1() #0 {
+; X87-LABEL: f1:
+; X87: # %bb.0: # %entry
+; X87-NEXT: fld1
+; X87-NEXT: fdivs {{\.LCPI.*}}
+; X87-NEXT: retl
+;
; X86-SSE-LABEL: f1:
; X86-SSE: # %bb.0: # %entry
; X86-SSE-NEXT: subl $12, %esp
@@ -55,6 +62,12 @@ entry:
; }
;
define double @f2(double %a) #0 {
+; X87-LABEL: f2:
+; X87: # %bb.0: # %entry
+; X87-NEXT: fldz
+; X87-NEXT: fsubrl {{[0-9]+}}(%esp)
+; X87-NEXT: retl
+;
; X86-SSE-LABEL: f2:
; X86-SSE: # %bb.0: # %entry
; X86-SSE-NEXT: subl $12, %esp
@@ -98,6 +111,16 @@ entry:
; }
;
define double @f3(double %a, double %b) #0 {
+; X87-LABEL: f3:
+; X87: # %bb.0: # %entry
+; X87-NEXT: fldz
+; X87-NEXT: fchs
+; X87-NEXT: fld %st(0)
+; X87-NEXT: fsubl {{[0-9]+}}(%esp)
+; X87-NEXT: fmull {{[0-9]+}}(%esp)
+; X87-NEXT: fsubrp %st, %st(1)
+; X87-NEXT: retl
+;
; X86-SSE-LABEL: f3:
; X86-SSE: # %bb.0: # %entry
; X86-SSE-NEXT: subl $12, %esp
@@ -159,6 +182,17 @@ entry:
;
;
define double @f4(i32 %n, double %a) #0 {
+; X87-LABEL: f4:
+; X87: # %bb.0: # %entry
+; X87-NEXT: fldl {{[0-9]+}}(%esp)
+; X87-NEXT: cmpl $0, {{[0-9]+}}(%esp)
+; X87-NEXT: jle .LBB3_2
+; X87-NEXT: # %bb.1: # %if.then
+; X87-NEXT: fld1
+; X87-NEXT: faddp %st, %st(1)
+; X87-NEXT: .LBB3_2: # %if.end
+; X87-NEXT: retl
+;
; X86-SSE-LABEL: f4:
; X86-SSE: # %bb.0: # %entry
; X86-SSE-NEXT: subl $12, %esp
@@ -210,6 +244,12 @@ if.end:
; Verify that sqrt(42.0) isn't simplified when the rounding mode is unknown.
define double @f5() #0 {
+; X87-LABEL: f5:
+; X87: # %bb.0: # %entry
+; X87-NEXT: flds {{\.LCPI.*}}
+; X87-NEXT: fsqrt
+; X87-NEXT: retl
+;
; X86-SSE-LABEL: f5:
; X86-SSE: # %bb.0: # %entry
; X86-SSE-NEXT: subl $12, %esp
@@ -242,6 +282,19 @@ entry:
; Verify that pow(42.1, 3.0) isn't simplified when the rounding mode is unknown.
define double @f6() #0 {
+; X87-LABEL: f6:
+; X87: # %bb.0: # %entry
+; X87-NEXT: subl $28, %esp
+; X87-NEXT: .cfi_def_cfa_offset 32
+; X87-NEXT: flds {{\.LCPI.*}}
+; X87-NEXT: fstpl {{[0-9]+}}(%esp)
+; X87-NEXT: fldl {{\.LCPI.*}}
+; X87-NEXT: fstpl (%esp)
+; X87-NEXT: calll pow
+; X87-NEXT: addl $28, %esp
+; X87-NEXT: .cfi_def_cfa_offset 4
+; X87-NEXT: retl
+;
; X86-SSE-LABEL: f6:
; X86-SSE: # %bb.0: # %entry
; X86-SSE-NEXT: subl $28, %esp
@@ -286,6 +339,18 @@ entry:
; Verify that powi(42.1, 3) isn't simplified when the rounding mode is unknown.
define double @f7() #0 {
+; X87-LABEL: f7:
+; X87: # %bb.0: # %entry
+; X87-NEXT: subl $12, %esp
+; X87-NEXT: .cfi_def_cfa_offset 16
+; X87-NEXT: fldl {{\.LCPI.*}}
+; X87-NEXT: fstpl (%esp)
+; X87-NEXT: movl $3, {{[0-9]+}}(%esp)
+; X87-NEXT: calll __powidf2
+; X87-NEXT: addl $12, %esp
+; X87-NEXT: .cfi_def_cfa_offset 4
+; X87-NEXT: retl
+;
; X86-SSE-LABEL: f7:
; X86-SSE: # %bb.0: # %entry
; X86-SSE-NEXT: subl $12, %esp
@@ -329,6 +394,17 @@ entry:
; Verify that sin(42.0) isn't simplified when the rounding mode is unknown.
define double @f8() #0 {
+; X87-LABEL: f8:
+; X87: # %bb.0: # %entry
+; X87-NEXT: subl $12, %esp
+; X87-NEXT: .cfi_def_cfa_offset 16
+; X87-NEXT: flds {{\.LCPI.*}}
+; X87-NEXT: fstpl (%esp)
+; X87-NEXT: calll sin
+; X87-NEXT: addl $12, %esp
+; X87-NEXT: .cfi_def_cfa_offset 4
+; X87-NEXT: retl
+;
; X86-SSE-LABEL: f8:
; X86-SSE: # %bb.0: # %entry
; X86-SSE-NEXT: subl $12, %esp
@@ -368,6 +444,17 @@ entry:
; Verify that cos(42.0) isn't simplified when the rounding mode is unknown.
define double @f9() #0 {
+; X87-LABEL: f9:
+; X87: # %bb.0: # %entry
+; X87-NEXT: subl $12, %esp
+; X87-NEXT: .cfi_def_cfa_offset 16
+; X87-NEXT: flds {{\.LCPI.*}}
+; X87-NEXT: fstpl (%esp)
+; X87-NEXT: calll cos
+; X87-NEXT: addl $12, %esp
+; X87-NEXT: .cfi_def_cfa_offset 4
+; X87-NEXT: retl
+;
; X86-SSE-LABEL: f9:
; X86-SSE: # %bb.0: # %entry
; X86-SSE-NEXT: subl $12, %esp
@@ -407,6 +494,17 @@ entry:
; Verify that exp(42.0) isn't simplified when the rounding mode is unknown.
define double @f10() #0 {
+; X87-LABEL: f10:
+; X87: # %bb.0: # %entry
+; X87-NEXT: subl $12, %esp
+; X87-NEXT: .cfi_def_cfa_offset 16
+; X87-NEXT: flds {{\.LCPI.*}}
+; X87-NEXT: fstpl (%esp)
+; X87-NEXT: calll exp
+; X87-NEXT: addl $12, %esp
+; X87-NEXT: .cfi_def_cfa_offset 4
+; X87-NEXT: retl
+;
; X86-SSE-LABEL: f10:
; X86-SSE: # %bb.0: # %entry
; X86-SSE-NEXT: subl $12, %esp
@@ -446,6 +544,17 @@ entry:
; Verify that exp2(42.1) isn't simplified when the rounding mode is unknown.
define double @f11() #0 {
+; X87-LABEL: f11:
+; X87: # %bb.0: # %entry
+; X87-NEXT: subl $12, %esp
+; X87-NEXT: .cfi_def_cfa_offset 16
+; X87-NEXT: fldl {{\.LCPI.*}}
+; X87-NEXT: fstpl (%esp)
+; X87-NEXT: calll exp2
+; X87-NEXT: addl $12, %esp
+; X87-NEXT: .cfi_def_cfa_offset 4
+; X87-NEXT: retl
+;
; X86-SSE-LABEL: f11:
; X86-SSE: # %bb.0: # %entry
; X86-SSE-NEXT: subl $12, %esp
@@ -485,6 +594,17 @@ entry:
; Verify that log(42.0) isn't simplified when the rounding mode is unknown.
define double @f12() #0 {
+; X87-LABEL: f12:
+; X87: # %bb.0: # %entry
+; X87-NEXT: subl $12, %esp
+; X87-NEXT: .cfi_def_cfa_offset 16
+; X87-NEXT: flds {{\.LCPI.*}}
+; X87-NEXT: fstpl (%esp)
+; X87-NEXT: calll log
+; X87-NEXT: addl $12, %esp
+; X87-NEXT: .cfi_def_cfa_offset 4
+; X87-NEXT: retl
+;
; X86-SSE-LABEL: f12:
; X86-SSE: # %bb.0: # %entry
; X86-SSE-NEXT: subl $12, %esp
@@ -524,6 +644,17 @@ entry:
; Verify that log10(42.0) isn't simplified when the rounding mode is unknown.
define double @f13() #0 {
+; X87-LABEL: f13:
+; X87: # %bb.0: # %entry
+; X87-NEXT: subl $12, %esp
+; X87-NEXT: .cfi_def_cfa_offset 16
+; X87-NEXT: flds {{\.LCPI.*}}
+; X87-NEXT: fstpl (%esp)
+; X87-NEXT: calll log10
+; X87-NEXT: addl $12, %esp
+; X87-NEXT: .cfi_def_cfa_offset 4
+; X87-NEXT: retl
+;
; X86-SSE-LABEL: f13:
; X86-SSE: # %bb.0: # %entry
; X86-SSE-NEXT: subl $12, %esp
@@ -563,6 +694,17 @@ entry:
; Verify that log2(42.0) isn't simplified when the rounding mode is unknown.
define double @f14() #0 {
+; X87-LABEL: f14:
+; X87: # %bb.0: # %entry
+; X87-NEXT: subl $12, %esp
+; X87-NEXT: .cfi_def_cfa_offset 16
+; X87-NEXT: flds {{\.LCPI.*}}
+; X87-NEXT: fstpl (%esp)
+; X87-NEXT: calll log2
+; X87-NEXT: addl $12, %esp
+; X87-NEXT: .cfi_def_cfa_offset 4
+; X87-NEXT: retl
+;
; X86-SSE-LABEL: f14:
; X86-SSE: # %bb.0: # %entry
; X86-SSE-NEXT: subl $12, %esp
@@ -602,6 +744,17 @@ entry:
; Verify that rint(42.1) isn't simplified when the rounding mode is unknown.
define double @f15() #0 {
+; X87-LABEL: f15:
+; X87: # %bb.0: # %entry
+; X87-NEXT: subl $12, %esp
+; X87-NEXT: .cfi_def_cfa_offset 16
+; X87-NEXT: fldl {{\.LCPI.*}}
+; X87-NEXT: fstpl (%esp)
+; X87-NEXT: calll rint
+; X87-NEXT: addl $12, %esp
+; X87-NEXT: .cfi_def_cfa_offset 4
+; X87-NEXT: retl
+;
; X86-SSE-LABEL: f15:
; X86-SSE: # %bb.0: # %entry
; X86-SSE-NEXT: subl $12, %esp
@@ -638,6 +791,17 @@ entry:
; Verify that nearbyint(42.1) isn't simplified when the rounding mode is
; unknown.
define double @f16() #0 {
+; X87-LABEL: f16:
+; X87: # %bb.0: # %entry
+; X87-NEXT: subl $12, %esp
+; X87-NEXT: .cfi_def_cfa_offset 16
+; X87-NEXT: fldl {{\.LCPI.*}}
+; X87-NEXT: fstpl (%esp)
+; X87-NEXT: calll nearbyint
+; X87-NEXT: addl $12, %esp
+; X87-NEXT: .cfi_def_cfa_offset 4
+; X87-NEXT: retl
+;
; X86-SSE-LABEL: f16:
; X86-SSE: # %bb.0: # %entry
; X86-SSE-NEXT: subl $12, %esp
@@ -673,6 +837,19 @@ entry:
}
define double @f19() #0 {
+; X87-LABEL: f19:
+; X87: # %bb.0: # %entry
+; X87-NEXT: subl $28, %esp
+; X87-NEXT: .cfi_def_cfa_offset 32
+; X87-NEXT: flds {{\.LCPI.*}}
+; X87-NEXT: fstpl {{[0-9]+}}(%esp)
+; X87-NEXT: movl $1072693248, {{[0-9]+}}(%esp) # imm = 0x3FF00000
+; X87-NEXT: movl $0, (%esp)
+; X87-NEXT: calll fmod
+; X87-NEXT: addl $28, %esp
+; X87-NEXT: .cfi_def_cfa_offset 4
+; X87-NEXT: retl
+;
; X86-SSE-LABEL: f19:
; X86-SSE: # %bb.0: # %entry
; X86-SSE-NEXT: subl $28, %esp
@@ -717,9 +894,72 @@ entry:
}
; Verify that fptosi(%x) isn't simplified when the rounding mode is
-; unknown. The expansion should have only one conversion instruction.
+; unknown.
+; Verify that no gross errors happen.
+; FIXME: The SSE/AVX code does not raise an invalid exception for all values
+; that don't fit in i16.
+define i16 @f20s16(double %x) #0 {
+; X87-LABEL: f20s16:
+; X87: # %bb.0: # %entry
+; X87-NEXT: subl $8, %esp
+; X87-NEXT: .cfi_def_cfa_offset 12
+; X87-NEXT: fldl {{[0-9]+}}(%esp)
+; X87-NEXT: fnstcw {{[0-9]+}}(%esp)
+; X87-NEXT: movzwl {{[0-9]+}}(%esp), %eax
+; X87-NEXT: orl $3072, %eax # imm = 0xC00
+; X87-NEXT: movw %ax, {{[0-9]+}}(%esp)
+; X87-NEXT: fldcw {{[0-9]+}}(%esp)
+; X87-NEXT: fistps {{[0-9]+}}(%esp)
+; X87-NEXT: fldcw {{[0-9]+}}(%esp)
+; X87-NEXT: movzwl {{[0-9]+}}(%esp), %eax
+; X87-NEXT: addl $8, %esp
+; X87-NEXT: .cfi_def_cfa_offset 4
+; X87-NEXT: retl
+;
+; X86-SSE-LABEL: f20s16:
+; X86-SSE: # %bb.0: # %entry
+; X86-SSE-NEXT: cvttsd2si {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT: # kill: def $ax killed $ax killed $eax
+; X86-SSE-NEXT: retl
+;
+; SSE-LABEL: f20s16:
+; SSE: # %bb.0: # %entry
+; SSE-NEXT: cvttsd2si %xmm0, %eax
+; SSE-NEXT: # kill: def $ax killed $ax killed $eax
+; SSE-NEXT: retq
+;
+; AVX-LABEL: f20s16:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vcvttsd2si %xmm0, %eax
+; AVX-NEXT: # kill: def $ax killed $ax killed $eax
+; AVX-NEXT: retq
+entry:
+ %result = call i16 @llvm.experimental.constrained.fptosi.i16.f64(double %x,
+ metadata !"fpexcept.strict") #0
+ ret i16 %result
+}
+
+; Verify that fptosi(%x) isn't simplified when the rounding mode is
+; unknown.
; Verify that no gross errors happen.
define i32 @f20s(double %x) #0 {
+; X87-LABEL: f20s:
+; X87: # %bb.0: # %entry
+; X87-NEXT: subl $8, %esp
+; X87-NEXT: .cfi_def_cfa_offset 12
+; X87-NEXT: fldl {{[0-9]+}}(%esp)
+; X87-NEXT: fnstcw (%esp)
+; X87-NEXT: movzwl (%esp), %eax
+; X87-NEXT: orl $3072, %eax # imm = 0xC00
+; X87-NEXT: movw %ax, {{[0-9]+}}(%esp)
+; X87-NEXT: fldcw {{[0-9]+}}(%esp)
+; X87-NEXT: fistpl {{[0-9]+}}(%esp)
+; X87-NEXT: fldcw (%esp)
+; X87-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X87-NEXT: addl $8, %esp
+; X87-NEXT: .cfi_def_cfa_offset 4
+; X87-NEXT: retl
+;
; X86-SSE-LABEL: f20s:
; X86-SSE: # %bb.0: # %entry
; X86-SSE-NEXT: cvttsd2si {{[0-9]+}}(%esp), %eax
@@ -741,9 +981,86 @@ entry:
}
; Verify that fptoui(%x) isn't simplified when the rounding mode is
-; unknown. The expansion should have only one conversion instruction.
+; unknown.
+; Verify that no gross errors happen.
+; FIXME: This code generates spurious inexact exceptions.
+define i64 @f20s64(double %x) #0 {
+; X87-LABEL: f20s64:
+; X87: # %bb.0: # %entry
+; X87-NEXT: subl $20, %esp
+; X87-NEXT: .cfi_def_cfa_offset 24
+; X87-NEXT: fldl {{[0-9]+}}(%esp)
+; X87-NEXT: fnstcw {{[0-9]+}}(%esp)
+; X87-NEXT: movzwl {{[0-9]+}}(%esp), %eax
+; X87-NEXT: orl $3072, %eax # imm = 0xC00
+; X87-NEXT: movw %ax, {{[0-9]+}}(%esp)
+; X87-NEXT: fldcw {{[0-9]+}}(%esp)
+; X87-NEXT: fistpll {{[0-9]+}}(%esp)
+; X87-NEXT: fldcw {{[0-9]+}}(%esp)
+; X87-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X87-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X87-NEXT: addl $20, %esp
+; X87-NEXT: .cfi_def_cfa_offset 4
+; X87-NEXT: retl
+;
+; X86-SSE-LABEL: f20s64:
+; X86-SSE: # %bb.0: # %entry
+; X86-SSE-NEXT: subl $20, %esp
+; X86-SSE-NEXT: .cfi_def_cfa_offset 24
+; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-SSE-NEXT: movsd %xmm0, {{[0-9]+}}(%esp)
+; X86-SSE-NEXT: fldl {{[0-9]+}}(%esp)
+; X86-SSE-NEXT: fnstcw {{[0-9]+}}(%esp)
+; X86-SSE-NEXT: movzwl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT: orl $3072, %eax # imm = 0xC00
+; X86-SSE-NEXT: movw %ax, {{[0-9]+}}(%esp)
+; X86-SSE-NEXT: fldcw {{[0-9]+}}(%esp)
+; X86-SSE-NEXT: fistpll {{[0-9]+}}(%esp)
+; X86-SSE-NEXT: fldcw {{[0-9]+}}(%esp)
+; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-SSE-NEXT: addl $20, %esp
+; X86-SSE-NEXT: .cfi_def_cfa_offset 4
+; X86-SSE-NEXT: retl
+;
+; SSE-LABEL: f20s64:
+; SSE: # %bb.0: # %entry
+; SSE-NEXT: cvttsd2si %xmm0, %rax
+; SSE-NEXT: retq
+;
+; AVX-LABEL: f20s64:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vcvttsd2si %xmm0, %rax
+; AVX-NEXT: retq
+entry:
+ %result = call i64 @llvm.experimental.constrained.fptosi.i64.f64(double %x,
+ metadata !"fpexcept.strict") #0
+ ret i64 %result
+}
+
+; Verify that fptoui(%x) isn't simplified when the rounding mode is
+; unknown.
; Verify that no gross errors happen.
+; FIXME: The X87/SSE/AVX1 code does not raise an invalid exception for all
+; values that don't fit in i32. The AVX512 code does.
define i32 @f20u(double %x) #0 {
+; X87-LABEL: f20u:
+; X87: # %bb.0: # %entry
+; X87-NEXT: subl $20, %esp
+; X87-NEXT: .cfi_def_cfa_offset 24
+; X87-NEXT: fldl {{[0-9]+}}(%esp)
+; X87-NEXT: fnstcw {{[0-9]+}}(%esp)
+; X87-NEXT: movzwl {{[0-9]+}}(%esp), %eax
+; X87-NEXT: orl $3072, %eax # imm = 0xC00
+; X87-NEXT: movw %ax, {{[0-9]+}}(%esp)
+; X87-NEXT: fldcw {{[0-9]+}}(%esp)
+; X87-NEXT: fistpll {{[0-9]+}}(%esp)
+; X87-NEXT: fldcw {{[0-9]+}}(%esp)
+; X87-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X87-NEXT: addl $20, %esp
+; X87-NEXT: .cfi_def_cfa_offset 4
+; X87-NEXT: retl
+;
; X86-SSE-LABEL: f20u:
; X86-SSE: # %bb.0: # %entry
; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
@@ -765,6 +1082,94 @@ define i32 @f20u(double %x) #0 {
;
; SSE-LABEL: f20u:
; SSE: # %bb.0: # %entry
+; SSE-NEXT: cvttsd2si %xmm0, %rax
+; SSE-NEXT: # kill: def $eax killed $eax killed $rax
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: f20u:
+; AVX1: # %bb.0: # %entry
+; AVX1-NEXT: vcvttsd2si %xmm0, %rax
+; AVX1-NEXT: # kill: def $eax killed $eax killed $rax
+; AVX1-NEXT: retq
+;
+; AVX512-LABEL: f20u:
+; AVX512: # %bb.0: # %entry
+; AVX512-NEXT: vcvttsd2usi %xmm0, %eax
+; AVX512-NEXT: retq
+entry:
+ %result = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double %x,
+ metadata !"fpexcept.strict") #0
+ ret i32 %result
+}
+
+; Verify that fptoui(%x) isn't simplified when the rounding mode is
+; unknown.
+; Verify that no gross errors happen.
+; FIXME: This code generates spurious inexact exceptions.
+define i64 @f20u64(double %x) #0 {
+; X87-LABEL: f20u64:
+; X87: # %bb.0: # %entry
+; X87-NEXT: subl $20, %esp
+; X87-NEXT: .cfi_def_cfa_offset 24
+; X87-NEXT: fldl {{[0-9]+}}(%esp)
+; X87-NEXT: flds {{\.LCPI.*}}
+; X87-NEXT: fld %st(1)
+; X87-NEXT: fsub %st(1), %st
+; X87-NEXT: xorl %edx, %edx
+; X87-NEXT: fxch %st(1)
+; X87-NEXT: fucompi %st(2), %st
+; X87-NEXT: fcmovnbe %st(1), %st
+; X87-NEXT: fstp %st(1)
+; X87-NEXT: setbe %dl
+; X87-NEXT: fnstcw {{[0-9]+}}(%esp)
+; X87-NEXT: movzwl {{[0-9]+}}(%esp), %eax
+; X87-NEXT: orl $3072, %eax # imm = 0xC00
+; X87-NEXT: movw %ax, {{[0-9]+}}(%esp)
+; X87-NEXT: fldcw {{[0-9]+}}(%esp)
+; X87-NEXT: fistpll {{[0-9]+}}(%esp)
+; X87-NEXT: fldcw {{[0-9]+}}(%esp)
+; X87-NEXT: shll $31, %edx
+; X87-NEXT: xorl {{[0-9]+}}(%esp), %edx
+; X87-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X87-NEXT: addl $20, %esp
+; X87-NEXT: .cfi_def_cfa_offset 4
+; X87-NEXT: retl
+;
+; X86-SSE-LABEL: f20u64:
+; X86-SSE: # %bb.0: # %entry
+; X86-SSE-NEXT: subl $20, %esp
+; X86-SSE-NEXT: .cfi_def_cfa_offset 24
+; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-SSE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; X86-SSE-NEXT: movapd %xmm0, %xmm2
+; X86-SSE-NEXT: subsd %xmm1, %xmm2
+; X86-SSE-NEXT: movapd %xmm0, %xmm3
+; X86-SSE-NEXT: cmpltsd %xmm1, %xmm3
+; X86-SSE-NEXT: movapd %xmm3, %xmm4
+; X86-SSE-NEXT: andnpd %xmm2, %xmm4
+; X86-SSE-NEXT: andpd %xmm0, %xmm3
+; X86-SSE-NEXT: orpd %xmm4, %xmm3
+; X86-SSE-NEXT: movlpd %xmm3, {{[0-9]+}}(%esp)
+; X86-SSE-NEXT: fldl {{[0-9]+}}(%esp)
+; X86-SSE-NEXT: fnstcw {{[0-9]+}}(%esp)
+; X86-SSE-NEXT: movzwl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT: orl $3072, %eax # imm = 0xC00
+; X86-SSE-NEXT: movw %ax, {{[0-9]+}}(%esp)
+; X86-SSE-NEXT: fldcw {{[0-9]+}}(%esp)
+; X86-SSE-NEXT: fistpll {{[0-9]+}}(%esp)
+; X86-SSE-NEXT: fldcw {{[0-9]+}}(%esp)
+; X86-SSE-NEXT: xorl %edx, %edx
+; X86-SSE-NEXT: ucomisd %xmm0, %xmm1
+; X86-SSE-NEXT: setbe %dl
+; X86-SSE-NEXT: shll $31, %edx
+; X86-SSE-NEXT: xorl {{[0-9]+}}(%esp), %edx
+; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT: addl $20, %esp
+; X86-SSE-NEXT: .cfi_def_cfa_offset 4
+; X86-SSE-NEXT: retl
+;
+; SSE-LABEL: f20u64:
+; SSE: # %bb.0: # %entry
; SSE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; SSE-NEXT: movapd %xmm0, %xmm2
; SSE-NEXT: cmpltsd %xmm1, %xmm2
@@ -775,49 +1180,51 @@ define i32 @f20u(double %x) #0 {
; SSE-NEXT: subsd %xmm1, %xmm0
; SSE-NEXT: andnpd %xmm0, %xmm3
; SSE-NEXT: orpd %xmm3, %xmm2
-; SSE-NEXT: cvttsd2si %xmm2, %ecx
+; SSE-NEXT: cvttsd2si %xmm2, %rcx
; SSE-NEXT: setae %al
-; SSE-NEXT: shll $31, %eax
-; SSE-NEXT: xorl %ecx, %eax
+; SSE-NEXT: shlq $63, %rax
+; SSE-NEXT: xorq %rcx, %rax
; SSE-NEXT: retq
;
-; AVX1-LABEL: f20u:
+; AVX1-LABEL: f20u64:
; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; AVX1-NEXT: vcmpltsd %xmm1, %xmm0, %xmm2
; AVX1-NEXT: vsubsd %xmm1, %xmm0, %xmm3
; AVX1-NEXT: vblendvpd %xmm2, %xmm0, %xmm3, %xmm2
-; AVX1-NEXT: vcvttsd2si %xmm2, %ecx
+; AVX1-NEXT: vcvttsd2si %xmm2, %rcx
; AVX1-NEXT: xorl %eax, %eax
; AVX1-NEXT: vucomisd %xmm1, %xmm0
; AVX1-NEXT: setae %al
-; AVX1-NEXT: shll $31, %eax
-; AVX1-NEXT: xorl %ecx, %eax
+; AVX1-NEXT: shlq $63, %rax
+; AVX1-NEXT: xorq %rcx, %rax
; AVX1-NEXT: retq
;
-; AVX512-LABEL: f20u:
+; AVX512-LABEL: f20u64:
; AVX512: # %bb.0: # %entry
-; AVX512-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
-; AVX512-NEXT: vcmpltsd %xmm1, %xmm0, %k1
-; AVX512-NEXT: vsubsd %xmm1, %xmm0, %xmm2
-; AVX512-NEXT: vmovsd %xmm0, %xmm2, %xmm2 {%k1}
-; AVX512-NEXT: vcvttsd2si %xmm2, %ecx
-; AVX512-NEXT: xorl %eax, %eax
-; AVX512-NEXT: vucomisd %xmm1, %xmm0
-; AVX512-NEXT: setae %al
-; AVX512-NEXT: shll $31, %eax
-; AVX512-NEXT: xorl %ecx, %eax
+; AVX512-NEXT: vcvttsd2usi %xmm0, %rax
; AVX512-NEXT: retq
entry:
- %result = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double %x,
+ %result = call i64 @llvm.experimental.constrained.fptoui.i64.f64(double %x,
metadata !"fpexcept.strict") #0
- ret i32 %result
+ ret i64 %result
}
; Verify that round(42.1) isn't simplified when the rounding mode is
; unknown.
; Verify that no gross errors happen.
define float @f21() #0 {
+; X87-LABEL: f21:
+; X87: # %bb.0: # %entry
+; X87-NEXT: pushl %eax
+; X87-NEXT: .cfi_def_cfa_offset 8
+; X87-NEXT: fldl {{\.LCPI.*}}
+; X87-NEXT: fstps (%esp)
+; X87-NEXT: flds (%esp)
+; X87-NEXT: popl %eax
+; X87-NEXT: .cfi_def_cfa_offset 4
+; X87-NEXT: retl
+;
; X86-SSE-LABEL: f21:
; X86-SSE: # %bb.0: # %entry
; X86-SSE-NEXT: pushl %eax
@@ -850,6 +1257,11 @@ entry:
}
define double @f22(float %x) #0 {
+; X87-LABEL: f22:
+; X87: # %bb.0: # %entry
+; X87-NEXT: flds {{[0-9]+}}(%esp)
+; X87-NEXT: retl
+;
; X86-SSE-LABEL: f22:
; X86-SSE: # %bb.0: # %entry
; X86-SSE-NEXT: subl $12, %esp
@@ -878,6 +1290,17 @@ entry:
}
define i32 @f23(double %x) #0 {
+; X87-LABEL: f23:
+; X87: # %bb.0: # %entry
+; X87-NEXT: subl $12, %esp
+; X87-NEXT: .cfi_def_cfa_offset 16
+; X87-NEXT: fldl {{[0-9]+}}(%esp)
+; X87-NEXT: fstpl (%esp)
+; X87-NEXT: calll lrint
+; X87-NEXT: addl $12, %esp
+; X87-NEXT: .cfi_def_cfa_offset 4
+; X87-NEXT: retl
+;
; X86-SSE-LABEL: f23:
; X86-SSE: # %bb.0: # %entry
; X86-SSE-NEXT: subl $12, %esp
@@ -914,6 +1337,17 @@ entry:
}
define i32 @f24(float %x) #0 {
+; X87-LABEL: f24:
+; X87: # %bb.0: # %entry
+; X87-NEXT: subl $12, %esp
+; X87-NEXT: .cfi_def_cfa_offset 16
+; X87-NEXT: flds {{[0-9]+}}(%esp)
+; X87-NEXT: fstps (%esp)
+; X87-NEXT: calll lrintf
+; X87-NEXT: addl $12, %esp
+; X87-NEXT: .cfi_def_cfa_offset 4
+; X87-NEXT: retl
+;
; X86-SSE-LABEL: f24:
; X86-SSE: # %bb.0: # %entry
; X86-SSE-NEXT: subl $12, %esp
@@ -950,6 +1384,17 @@ entry:
}
define i64 @f25(double %x) #0 {
+; X87-LABEL: f25:
+; X87: # %bb.0: # %entry
+; X87-NEXT: subl $12, %esp
+; X87-NEXT: .cfi_def_cfa_offset 16
+; X87-NEXT: fldl {{[0-9]+}}(%esp)
+; X87-NEXT: fstpl (%esp)
+; X87-NEXT: calll llrint
+; X87-NEXT: addl $12, %esp
+; X87-NEXT: .cfi_def_cfa_offset 4
+; X87-NEXT: retl
+;
; X86-SSE-LABEL: f25:
; X86-SSE: # %bb.0: # %entry
; X86-SSE-NEXT: subl $12, %esp
@@ -986,6 +1431,17 @@ entry:
}
define i64 @f26(float %x) {
+; X87-LABEL: f26:
+; X87: # %bb.0: # %entry
+; X87-NEXT: subl $12, %esp
+; X87-NEXT: .cfi_def_cfa_offset 16
+; X87-NEXT: flds {{[0-9]+}}(%esp)
+; X87-NEXT: fstps (%esp)
+; X87-NEXT: calll llrintf
+; X87-NEXT: addl $12, %esp
+; X87-NEXT: .cfi_def_cfa_offset 4
+; X87-NEXT: retl
+;
; X86-SSE-LABEL: f26:
; X86-SSE: # %bb.0: # %entry
; X86-SSE-NEXT: subl $12, %esp
@@ -1022,6 +1478,17 @@ entry:
}
define i32 @f27(double %x) #0 {
+; X87-LABEL: f27:
+; X87: # %bb.0: # %entry
+; X87-NEXT: subl $12, %esp
+; X87-NEXT: .cfi_def_cfa_offset 16
+; X87-NEXT: fldl {{[0-9]+}}(%esp)
+; X87-NEXT: fstpl (%esp)
+; X87-NEXT: calll lround
+; X87-NEXT: addl $12, %esp
+; X87-NEXT: .cfi_def_cfa_offset 4
+; X87-NEXT: retl
+;
; X86-SSE-LABEL: f27:
; X86-SSE: # %bb.0: # %entry
; X86-SSE-NEXT: subl $12, %esp
@@ -1057,6 +1524,17 @@ entry:
}
define i32 @f28(float %x) #0 {
+; X87-LABEL: f28:
+; X87: # %bb.0: # %entry
+; X87-NEXT: subl $12, %esp
+; X87-NEXT: .cfi_def_cfa_offset 16
+; X87-NEXT: flds {{[0-9]+}}(%esp)
+; X87-NEXT: fstps (%esp)
+; X87-NEXT: calll lroundf
+; X87-NEXT: addl $12, %esp
+; X87-NEXT: .cfi_def_cfa_offset 4
+; X87-NEXT: retl
+;
; X86-SSE-LABEL: f28:
; X86-SSE: # %bb.0: # %entry
; X86-SSE-NEXT: subl $12, %esp
@@ -1092,6 +1570,17 @@ entry:
}
define i64 @f29(double %x) #0 {
+; X87-LABEL: f29:
+; X87: # %bb.0: # %entry
+; X87-NEXT: subl $12, %esp
+; X87-NEXT: .cfi_def_cfa_offset 16
+; X87-NEXT: fldl {{[0-9]+}}(%esp)
+; X87-NEXT: fstpl (%esp)
+; X87-NEXT: calll llround
+; X87-NEXT: addl $12, %esp
+; X87-NEXT: .cfi_def_cfa_offset 4
+; X87-NEXT: retl
+;
; X86-SSE-LABEL: f29:
; X86-SSE: # %bb.0: # %entry
; X86-SSE-NEXT: subl $12, %esp
@@ -1127,6 +1616,17 @@ entry:
}
define i64 @f30(float %x) #0 {
+; X87-LABEL: f30:
+; X87: # %bb.0: # %entry
+; X87-NEXT: subl $12, %esp
+; X87-NEXT: .cfi_def_cfa_offset 16
+; X87-NEXT: flds {{[0-9]+}}(%esp)
+; X87-NEXT: fstps (%esp)
+; X87-NEXT: calll llroundf
+; X87-NEXT: addl $12, %esp
+; X87-NEXT: .cfi_def_cfa_offset 4
+; X87-NEXT: retl
+;
; X86-SSE-LABEL: f30:
; X86-SSE: # %bb.0: # %entry
; X86-SSE-NEXT: subl $12, %esp
@@ -1181,8 +1681,11 @@ declare double @llvm.experimental.constrained.log10.f64(double, metadata, metada
declare double @llvm.experimental.constrained.log2.f64(double, metadata, metadata)
declare double @llvm.experimental.constrained.rint.f64(double, metadata, metadata)
declare double @llvm.experimental.constrained.nearbyint.f64(double, metadata, metadata)
+declare i16 @llvm.experimental.constrained.fptosi.i16.f64(double, metadata)
declare i32 @llvm.experimental.constrained.fptosi.i32.f64(double, metadata)
+declare i64 @llvm.experimental.constrained.fptosi.i64.f64(double, metadata)
declare i32 @llvm.experimental.constrained.fptoui.i32.f64(double, metadata)
+declare i64 @llvm.experimental.constrained.fptoui.i64.f64(double, metadata)
declare float @llvm.experimental.constrained.fptrunc.f32.f64(double, metadata, metadata)
declare double @llvm.experimental.constrained.fpext.f64.f32(float, metadata)
declare i32 @llvm.experimental.constrained.lrint.i32.f64(double, metadata, metadata)
diff --git a/llvm/test/CodeGen/X86/vector-constrained-fp-intrinsics.ll b/llvm/test/CodeGen/X86/vector-constrained-fp-intrinsics.ll
index 6850410e074..9f8f066ec9c 100644
--- a/llvm/test/CodeGen/X86/vector-constrained-fp-intrinsics.ll
+++ b/llvm/test/CodeGen/X86/vector-constrained-fp-intrinsics.ll
@@ -4365,13 +4365,20 @@ entry:
define <1 x i32> @constrained_vector_fptoui_v1i32_v1f32() #0 {
; CHECK-LABEL: constrained_vector_fptoui_v1i32_v1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: cvttss2si {{.*}}(%rip), %eax
+; CHECK-NEXT: cvttss2si {{.*}}(%rip), %rax
+; CHECK-NEXT: # kill: def $eax killed $eax killed $rax
; CHECK-NEXT: retq
;
-; AVX-LABEL: constrained_vector_fptoui_v1i32_v1f32:
-; AVX: # %bb.0: # %entry
-; AVX-NEXT: vcvttss2si {{.*}}(%rip), %eax
-; AVX-NEXT: retq
+; AVX1-LABEL: constrained_vector_fptoui_v1i32_v1f32:
+; AVX1: # %bb.0: # %entry
+; AVX1-NEXT: vcvttss2si {{.*}}(%rip), %rax
+; AVX1-NEXT: # kill: def $eax killed $eax killed $rax
+; AVX1-NEXT: retq
+;
+; AVX512-LABEL: constrained_vector_fptoui_v1i32_v1f32:
+; AVX512: # %bb.0: # %entry
+; AVX512-NEXT: vcvttss2usi {{.*}}(%rip), %eax
+; AVX512-NEXT: retq
entry:
%result = call <1 x i32> @llvm.experimental.constrained.fptoui.v1i32.v1f32(
<1 x float><float 42.0>,
@@ -4382,20 +4389,28 @@ entry:
define <2 x i32> @constrained_vector_fptoui_v2i32_v2f32() #0 {
; CHECK-LABEL: constrained_vector_fptoui_v2i32_v2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: cvttss2si {{.*}}(%rip), %eax
+; CHECK-NEXT: cvttss2si {{.*}}(%rip), %rax
; CHECK-NEXT: movd %eax, %xmm1
-; CHECK-NEXT: cvttss2si {{.*}}(%rip), %eax
+; CHECK-NEXT: cvttss2si {{.*}}(%rip), %rax
; CHECK-NEXT: movd %eax, %xmm0
; CHECK-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; CHECK-NEXT: retq
;
-; AVX-LABEL: constrained_vector_fptoui_v2i32_v2f32:
-; AVX: # %bb.0: # %entry
-; AVX-NEXT: vcvttss2si {{.*}}(%rip), %eax
-; AVX-NEXT: vcvttss2si {{.*}}(%rip), %ecx
-; AVX-NEXT: vmovd %ecx, %xmm0
-; AVX-NEXT: vpinsrd $1, %eax, %xmm0, %xmm0
-; AVX-NEXT: retq
+; AVX1-LABEL: constrained_vector_fptoui_v2i32_v2f32:
+; AVX1: # %bb.0: # %entry
+; AVX1-NEXT: vcvttss2si {{.*}}(%rip), %rax
+; AVX1-NEXT: vcvttss2si {{.*}}(%rip), %rcx
+; AVX1-NEXT: vmovd %ecx, %xmm0
+; AVX1-NEXT: vpinsrd $1, %eax, %xmm0, %xmm0
+; AVX1-NEXT: retq
+;
+; AVX512-LABEL: constrained_vector_fptoui_v2i32_v2f32:
+; AVX512: # %bb.0: # %entry
+; AVX512-NEXT: vcvttss2usi {{.*}}(%rip), %eax
+; AVX512-NEXT: vmovd %eax, %xmm0
+; AVX512-NEXT: vcvttss2usi {{.*}}(%rip), %eax
+; AVX512-NEXT: vpinsrd $1, %eax, %xmm0, %xmm0
+; AVX512-NEXT: retq
entry:
%result = call <2 x i32> @llvm.experimental.constrained.fptoui.v2i32.v2f32(
<2 x float><float 42.0, float 43.0>,
@@ -4406,25 +4421,35 @@ entry:
define <3 x i32> @constrained_vector_fptoui_v3i32_v3f32() #0 {
; CHECK-LABEL: constrained_vector_fptoui_v3i32_v3f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: cvttss2si {{.*}}(%rip), %eax
+; CHECK-NEXT: cvttss2si {{.*}}(%rip), %rax
; CHECK-NEXT: movd %eax, %xmm1
-; CHECK-NEXT: cvttss2si {{.*}}(%rip), %eax
+; CHECK-NEXT: cvttss2si {{.*}}(%rip), %rax
; CHECK-NEXT: movd %eax, %xmm0
; CHECK-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; CHECK-NEXT: cvttss2si {{.*}}(%rip), %eax
+; CHECK-NEXT: cvttss2si {{.*}}(%rip), %rax
; CHECK-NEXT: movd %eax, %xmm1
; CHECK-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; CHECK-NEXT: retq
;
-; AVX-LABEL: constrained_vector_fptoui_v3i32_v3f32:
-; AVX: # %bb.0: # %entry
-; AVX-NEXT: vcvttss2si {{.*}}(%rip), %eax
-; AVX-NEXT: vcvttss2si {{.*}}(%rip), %ecx
-; AVX-NEXT: vmovd %ecx, %xmm0
-; AVX-NEXT: vpinsrd $1, %eax, %xmm0, %xmm0
-; AVX-NEXT: vcvttss2si {{.*}}(%rip), %eax
-; AVX-NEXT: vpinsrd $2, %eax, %xmm0, %xmm0
-; AVX-NEXT: retq
+; AVX1-LABEL: constrained_vector_fptoui_v3i32_v3f32:
+; AVX1: # %bb.0: # %entry
+; AVX1-NEXT: vcvttss2si {{.*}}(%rip), %rax
+; AVX1-NEXT: vcvttss2si {{.*}}(%rip), %rcx
+; AVX1-NEXT: vmovd %ecx, %xmm0
+; AVX1-NEXT: vpinsrd $1, %eax, %xmm0, %xmm0
+; AVX1-NEXT: vcvttss2si {{.*}}(%rip), %rax
+; AVX1-NEXT: vpinsrd $2, %eax, %xmm0, %xmm0
+; AVX1-NEXT: retq
+;
+; AVX512-LABEL: constrained_vector_fptoui_v3i32_v3f32:
+; AVX512: # %bb.0: # %entry
+; AVX512-NEXT: vcvttss2usi {{.*}}(%rip), %eax
+; AVX512-NEXT: vmovd %eax, %xmm0
+; AVX512-NEXT: vcvttss2usi {{.*}}(%rip), %eax
+; AVX512-NEXT: vpinsrd $1, %eax, %xmm0, %xmm0
+; AVX512-NEXT: vcvttss2usi {{.*}}(%rip), %eax
+; AVX512-NEXT: vpinsrd $2, %eax, %xmm0, %xmm0
+; AVX512-NEXT: retq
entry:
%result = call <3 x i32> @llvm.experimental.constrained.fptoui.v3i32.v3f32(
<3 x float><float 42.0, float 43.0,
@@ -4436,30 +4461,42 @@ entry:
define <4 x i32> @constrained_vector_fptoui_v4i32_v4f32() #0 {
; CHECK-LABEL: constrained_vector_fptoui_v4i32_v4f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: cvttss2si {{.*}}(%rip), %eax
+; CHECK-NEXT: cvttss2si {{.*}}(%rip), %rax
; CHECK-NEXT: movd %eax, %xmm0
-; CHECK-NEXT: cvttss2si {{.*}}(%rip), %eax
+; CHECK-NEXT: cvttss2si {{.*}}(%rip), %rax
; CHECK-NEXT: movd %eax, %xmm1
; CHECK-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; CHECK-NEXT: cvttss2si {{.*}}(%rip), %eax
+; CHECK-NEXT: cvttss2si {{.*}}(%rip), %rax
; CHECK-NEXT: movd %eax, %xmm2
-; CHECK-NEXT: cvttss2si {{.*}}(%rip), %eax
+; CHECK-NEXT: cvttss2si {{.*}}(%rip), %rax
; CHECK-NEXT: movd %eax, %xmm0
; CHECK-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; CHECK-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; CHECK-NEXT: retq
;
-; AVX-LABEL: constrained_vector_fptoui_v4i32_v4f32:
-; AVX: # %bb.0: # %entry
-; AVX-NEXT: vcvttss2si {{.*}}(%rip), %eax
-; AVX-NEXT: vcvttss2si {{.*}}(%rip), %ecx
-; AVX-NEXT: vmovd %ecx, %xmm0
-; AVX-NEXT: vpinsrd $1, %eax, %xmm0, %xmm0
-; AVX-NEXT: vcvttss2si {{.*}}(%rip), %eax
-; AVX-NEXT: vpinsrd $2, %eax, %xmm0, %xmm0
-; AVX-NEXT: vcvttss2si {{.*}}(%rip), %eax
-; AVX-NEXT: vpinsrd $3, %eax, %xmm0, %xmm0
-; AVX-NEXT: retq
+; AVX1-LABEL: constrained_vector_fptoui_v4i32_v4f32:
+; AVX1: # %bb.0: # %entry
+; AVX1-NEXT: vcvttss2si {{.*}}(%rip), %rax
+; AVX1-NEXT: vcvttss2si {{.*}}(%rip), %rcx
+; AVX1-NEXT: vmovd %ecx, %xmm0
+; AVX1-NEXT: vpinsrd $1, %eax, %xmm0, %xmm0
+; AVX1-NEXT: vcvttss2si {{.*}}(%rip), %rax
+; AVX1-NEXT: vpinsrd $2, %eax, %xmm0, %xmm0
+; AVX1-NEXT: vcvttss2si {{.*}}(%rip), %rax
+; AVX1-NEXT: vpinsrd $3, %eax, %xmm0, %xmm0
+; AVX1-NEXT: retq
+;
+; AVX512-LABEL: constrained_vector_fptoui_v4i32_v4f32:
+; AVX512: # %bb.0: # %entry
+; AVX512-NEXT: vcvttss2usi {{.*}}(%rip), %eax
+; AVX512-NEXT: vmovd %eax, %xmm0
+; AVX512-NEXT: vcvttss2usi {{.*}}(%rip), %eax
+; AVX512-NEXT: vpinsrd $1, %eax, %xmm0, %xmm0
+; AVX512-NEXT: vcvttss2usi {{.*}}(%rip), %eax
+; AVX512-NEXT: vpinsrd $2, %eax, %xmm0, %xmm0
+; AVX512-NEXT: vcvttss2usi {{.*}}(%rip), %eax
+; AVX512-NEXT: vpinsrd $3, %eax, %xmm0, %xmm0
+; AVX512-NEXT: retq
entry:
%result = call <4 x i32> @llvm.experimental.constrained.fptoui.v4i32.v4f32(
<4 x float><float 42.0, float 43.0,
@@ -4474,10 +4511,15 @@ define <1 x i64> @constrained_vector_fptoui_v1i64_v1f32() #0 {
; CHECK-NEXT: cvttss2si {{.*}}(%rip), %rax
; CHECK-NEXT: retq
;
-; AVX-LABEL: constrained_vector_fptoui_v1i64_v1f32:
-; AVX: # %bb.0: # %entry
-; AVX-NEXT: vcvttss2si {{.*}}(%rip), %rax
-; AVX-NEXT: retq
+; AVX1-LABEL: constrained_vector_fptoui_v1i64_v1f32:
+; AVX1: # %bb.0: # %entry
+; AVX1-NEXT: vcvttss2si {{.*}}(%rip), %rax
+; AVX1-NEXT: retq
+;
+; AVX512-LABEL: constrained_vector_fptoui_v1i64_v1f32:
+; AVX512: # %bb.0: # %entry
+; AVX512-NEXT: vcvttss2usi {{.*}}(%rip), %rax
+; AVX512-NEXT: retq
entry:
%result = call <1 x i64> @llvm.experimental.constrained.fptoui.v1i64.v1f32(
<1 x float><float 42.0>,
@@ -4495,14 +4537,23 @@ define <2 x i64> @constrained_vector_fptoui_v2i64_v2f32() #0 {
; CHECK-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; CHECK-NEXT: retq
;
-; AVX-LABEL: constrained_vector_fptoui_v2i64_v2f32:
-; AVX: # %bb.0: # %entry
-; AVX-NEXT: vcvttss2si {{.*}}(%rip), %rax
-; AVX-NEXT: vmovq %rax, %xmm0
-; AVX-NEXT: vcvttss2si {{.*}}(%rip), %rax
-; AVX-NEXT: vmovq %rax, %xmm1
-; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
-; AVX-NEXT: retq
+; AVX1-LABEL: constrained_vector_fptoui_v2i64_v2f32:
+; AVX1: # %bb.0: # %entry
+; AVX1-NEXT: vcvttss2si {{.*}}(%rip), %rax
+; AVX1-NEXT: vmovq %rax, %xmm0
+; AVX1-NEXT: vcvttss2si {{.*}}(%rip), %rax
+; AVX1-NEXT: vmovq %rax, %xmm1
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX1-NEXT: retq
+;
+; AVX512-LABEL: constrained_vector_fptoui_v2i64_v2f32:
+; AVX512: # %bb.0: # %entry
+; AVX512-NEXT: vcvttss2usi {{.*}}(%rip), %rax
+; AVX512-NEXT: vmovq %rax, %xmm0
+; AVX512-NEXT: vcvttss2usi {{.*}}(%rip), %rax
+; AVX512-NEXT: vmovq %rax, %xmm1
+; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX512-NEXT: retq
entry:
%result = call <2 x i64> @llvm.experimental.constrained.fptoui.v2i64.v2f32(
<2 x float><float 42.0, float 43.0>,
@@ -4532,12 +4583,12 @@ define <3 x i64> @constrained_vector_fptoui_v3i64_v3f32() #0 {
;
; AVX512-LABEL: constrained_vector_fptoui_v3i64_v3f32:
; AVX512: # %bb.0: # %entry
-; AVX512-NEXT: vcvttss2si {{.*}}(%rip), %rax
+; AVX512-NEXT: vcvttss2usi {{.*}}(%rip), %rax
; AVX512-NEXT: vmovq %rax, %xmm0
-; AVX512-NEXT: vcvttss2si {{.*}}(%rip), %rax
+; AVX512-NEXT: vcvttss2usi {{.*}}(%rip), %rax
; AVX512-NEXT: vmovq %rax, %xmm1
; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
-; AVX512-NEXT: vcvttss2si {{.*}}(%rip), %rax
+; AVX512-NEXT: vcvttss2usi {{.*}}(%rip), %rax
; AVX512-NEXT: vmovq %rax, %xmm1
; AVX512-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX512-NEXT: retq
@@ -4581,14 +4632,14 @@ define <4 x i64> @constrained_vector_fptoui_v4i64_v4f32() #0 {
;
; AVX512-LABEL: constrained_vector_fptoui_v4i64_v4f32:
; AVX512: # %bb.0: # %entry
-; AVX512-NEXT: vcvttss2si {{.*}}(%rip), %rax
+; AVX512-NEXT: vcvttss2usi {{.*}}(%rip), %rax
; AVX512-NEXT: vmovq %rax, %xmm0
-; AVX512-NEXT: vcvttss2si {{.*}}(%rip), %rax
+; AVX512-NEXT: vcvttss2usi {{.*}}(%rip), %rax
; AVX512-NEXT: vmovq %rax, %xmm1
; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
-; AVX512-NEXT: vcvttss2si {{.*}}(%rip), %rax
+; AVX512-NEXT: vcvttss2usi {{.*}}(%rip), %rax
; AVX512-NEXT: vmovq %rax, %xmm1
-; AVX512-NEXT: vcvttss2si {{.*}}(%rip), %rax
+; AVX512-NEXT: vcvttss2usi {{.*}}(%rip), %rax
; AVX512-NEXT: vmovq %rax, %xmm2
; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
; AVX512-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
@@ -4604,13 +4655,20 @@ entry:
define <1 x i32> @constrained_vector_fptoui_v1i32_v1f64() #0 {
; CHECK-LABEL: constrained_vector_fptoui_v1i32_v1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %eax
+; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %rax
+; CHECK-NEXT: # kill: def $eax killed $eax killed $rax
; CHECK-NEXT: retq
;
-; AVX-LABEL: constrained_vector_fptoui_v1i32_v1f64:
-; AVX: # %bb.0: # %entry
-; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %eax
-; AVX-NEXT: retq
+; AVX1-LABEL: constrained_vector_fptoui_v1i32_v1f64:
+; AVX1: # %bb.0: # %entry
+; AVX1-NEXT: vcvttsd2si {{.*}}(%rip), %rax
+; AVX1-NEXT: # kill: def $eax killed $eax killed $rax
+; AVX1-NEXT: retq
+;
+; AVX512-LABEL: constrained_vector_fptoui_v1i32_v1f64:
+; AVX512: # %bb.0: # %entry
+; AVX512-NEXT: vcvttsd2usi {{.*}}(%rip), %eax
+; AVX512-NEXT: retq
entry:
%result = call <1 x i32> @llvm.experimental.constrained.fptoui.v1i32.v1f64(
<1 x double><double 42.1>,
@@ -4621,20 +4679,28 @@ entry:
define <2 x i32> @constrained_vector_fptoui_v2i32_v2f64() #0 {
; CHECK-LABEL: constrained_vector_fptoui_v2i32_v2f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %eax
+; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %rax
; CHECK-NEXT: movd %eax, %xmm1
-; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %eax
+; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %rax
; CHECK-NEXT: movd %eax, %xmm0
; CHECK-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; CHECK-NEXT: retq
;
-; AVX-LABEL: constrained_vector_fptoui_v2i32_v2f64:
-; AVX: # %bb.0: # %entry
-; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %eax
-; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %ecx
-; AVX-NEXT: vmovd %ecx, %xmm0
-; AVX-NEXT: vpinsrd $1, %eax, %xmm0, %xmm0
-; AVX-NEXT: retq
+; AVX1-LABEL: constrained_vector_fptoui_v2i32_v2f64:
+; AVX1: # %bb.0: # %entry
+; AVX1-NEXT: vcvttsd2si {{.*}}(%rip), %rax
+; AVX1-NEXT: vcvttsd2si {{.*}}(%rip), %rcx
+; AVX1-NEXT: vmovd %ecx, %xmm0
+; AVX1-NEXT: vpinsrd $1, %eax, %xmm0, %xmm0
+; AVX1-NEXT: retq
+;
+; AVX512-LABEL: constrained_vector_fptoui_v2i32_v2f64:
+; AVX512: # %bb.0: # %entry
+; AVX512-NEXT: vcvttsd2usi {{.*}}(%rip), %eax
+; AVX512-NEXT: vmovd %eax, %xmm0
+; AVX512-NEXT: vcvttsd2usi {{.*}}(%rip), %eax
+; AVX512-NEXT: vpinsrd $1, %eax, %xmm0, %xmm0
+; AVX512-NEXT: retq
entry:
%result = call <2 x i32> @llvm.experimental.constrained.fptoui.v2i32.v2f64(
<2 x double><double 42.1, double 42.2>,
@@ -4645,25 +4711,35 @@ entry:
define <3 x i32> @constrained_vector_fptoui_v3i32_v3f64() #0 {
; CHECK-LABEL: constrained_vector_fptoui_v3i32_v3f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %eax
+; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %rax
; CHECK-NEXT: movd %eax, %xmm1
-; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %eax
+; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %rax
; CHECK-NEXT: movd %eax, %xmm0
; CHECK-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %eax
+; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %rax
; CHECK-NEXT: movd %eax, %xmm1
; CHECK-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; CHECK-NEXT: retq
;
-; AVX-LABEL: constrained_vector_fptoui_v3i32_v3f64:
-; AVX: # %bb.0: # %entry
-; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %eax
-; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %ecx
-; AVX-NEXT: vmovd %ecx, %xmm0
-; AVX-NEXT: vpinsrd $1, %eax, %xmm0, %xmm0
-; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %eax
-; AVX-NEXT: vpinsrd $2, %eax, %xmm0, %xmm0
-; AVX-NEXT: retq
+; AVX1-LABEL: constrained_vector_fptoui_v3i32_v3f64:
+; AVX1: # %bb.0: # %entry
+; AVX1-NEXT: vcvttsd2si {{.*}}(%rip), %rax
+; AVX1-NEXT: vcvttsd2si {{.*}}(%rip), %rcx
+; AVX1-NEXT: vmovd %ecx, %xmm0
+; AVX1-NEXT: vpinsrd $1, %eax, %xmm0, %xmm0
+; AVX1-NEXT: vcvttsd2si {{.*}}(%rip), %rax
+; AVX1-NEXT: vpinsrd $2, %eax, %xmm0, %xmm0
+; AVX1-NEXT: retq
+;
+; AVX512-LABEL: constrained_vector_fptoui_v3i32_v3f64:
+; AVX512: # %bb.0: # %entry
+; AVX512-NEXT: vcvttsd2usi {{.*}}(%rip), %eax
+; AVX512-NEXT: vmovd %eax, %xmm0
+; AVX512-NEXT: vcvttsd2usi {{.*}}(%rip), %eax
+; AVX512-NEXT: vpinsrd $1, %eax, %xmm0, %xmm0
+; AVX512-NEXT: vcvttsd2usi {{.*}}(%rip), %eax
+; AVX512-NEXT: vpinsrd $2, %eax, %xmm0, %xmm0
+; AVX512-NEXT: retq
entry:
%result = call <3 x i32> @llvm.experimental.constrained.fptoui.v3i32.v3f64(
<3 x double><double 42.1, double 42.2,
@@ -4675,30 +4751,42 @@ entry:
define <4 x i32> @constrained_vector_fptoui_v4i32_v4f64() #0 {
; CHECK-LABEL: constrained_vector_fptoui_v4i32_v4f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %eax
+; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %rax
; CHECK-NEXT: movd %eax, %xmm0
-; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %eax
+; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %rax
; CHECK-NEXT: movd %eax, %xmm1
; CHECK-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %eax
+; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %rax
; CHECK-NEXT: movd %eax, %xmm2
-; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %eax
+; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %rax
; CHECK-NEXT: movd %eax, %xmm0
; CHECK-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; CHECK-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; CHECK-NEXT: retq
;
-; AVX-LABEL: constrained_vector_fptoui_v4i32_v4f64:
-; AVX: # %bb.0: # %entry
-; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %eax
-; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %ecx
-; AVX-NEXT: vmovd %ecx, %xmm0
-; AVX-NEXT: vpinsrd $1, %eax, %xmm0, %xmm0
-; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %eax
-; AVX-NEXT: vpinsrd $2, %eax, %xmm0, %xmm0
-; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %eax
-; AVX-NEXT: vpinsrd $3, %eax, %xmm0, %xmm0
-; AVX-NEXT: retq
+; AVX1-LABEL: constrained_vector_fptoui_v4i32_v4f64:
+; AVX1: # %bb.0: # %entry
+; AVX1-NEXT: vcvttsd2si {{.*}}(%rip), %rax
+; AVX1-NEXT: vcvttsd2si {{.*}}(%rip), %rcx
+; AVX1-NEXT: vmovd %ecx, %xmm0
+; AVX1-NEXT: vpinsrd $1, %eax, %xmm0, %xmm0
+; AVX1-NEXT: vcvttsd2si {{.*}}(%rip), %rax
+; AVX1-NEXT: vpinsrd $2, %eax, %xmm0, %xmm0
+; AVX1-NEXT: vcvttsd2si {{.*}}(%rip), %rax
+; AVX1-NEXT: vpinsrd $3, %eax, %xmm0, %xmm0
+; AVX1-NEXT: retq
+;
+; AVX512-LABEL: constrained_vector_fptoui_v4i32_v4f64:
+; AVX512: # %bb.0: # %entry
+; AVX512-NEXT: vcvttsd2usi {{.*}}(%rip), %eax
+; AVX512-NEXT: vmovd %eax, %xmm0
+; AVX512-NEXT: vcvttsd2usi {{.*}}(%rip), %eax
+; AVX512-NEXT: vpinsrd $1, %eax, %xmm0, %xmm0
+; AVX512-NEXT: vcvttsd2usi {{.*}}(%rip), %eax
+; AVX512-NEXT: vpinsrd $2, %eax, %xmm0, %xmm0
+; AVX512-NEXT: vcvttsd2usi {{.*}}(%rip), %eax
+; AVX512-NEXT: vpinsrd $3, %eax, %xmm0, %xmm0
+; AVX512-NEXT: retq
entry:
%result = call <4 x i32> @llvm.experimental.constrained.fptoui.v4i32.v4f64(
<4 x double><double 42.1, double 42.2,
@@ -4713,10 +4801,15 @@ define <1 x i64> @constrained_vector_fptoui_v1i64_v1f64() #0 {
; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %rax
; CHECK-NEXT: retq
;
-; AVX-LABEL: constrained_vector_fptoui_v1i64_v1f64:
-; AVX: # %bb.0: # %entry
-; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %rax
-; AVX-NEXT: retq
+; AVX1-LABEL: constrained_vector_fptoui_v1i64_v1f64:
+; AVX1: # %bb.0: # %entry
+; AVX1-NEXT: vcvttsd2si {{.*}}(%rip), %rax
+; AVX1-NEXT: retq
+;
+; AVX512-LABEL: constrained_vector_fptoui_v1i64_v1f64:
+; AVX512: # %bb.0: # %entry
+; AVX512-NEXT: vcvttsd2usi {{.*}}(%rip), %rax
+; AVX512-NEXT: retq
entry:
%result = call <1 x i64> @llvm.experimental.constrained.fptoui.v1i64.v1f64(
<1 x double><double 42.1>,
@@ -4734,14 +4827,23 @@ define <2 x i64> @constrained_vector_fptoui_v2i64_v2f64() #0 {
; CHECK-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; CHECK-NEXT: retq
;
-; AVX-LABEL: constrained_vector_fptoui_v2i64_v2f64:
-; AVX: # %bb.0: # %entry
-; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %rax
-; AVX-NEXT: vmovq %rax, %xmm0
-; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %rax
-; AVX-NEXT: vmovq %rax, %xmm1
-; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
-; AVX-NEXT: retq
+; AVX1-LABEL: constrained_vector_fptoui_v2i64_v2f64:
+; AVX1: # %bb.0: # %entry
+; AVX1-NEXT: vcvttsd2si {{.*}}(%rip), %rax
+; AVX1-NEXT: vmovq %rax, %xmm0
+; AVX1-NEXT: vcvttsd2si {{.*}}(%rip), %rax
+; AVX1-NEXT: vmovq %rax, %xmm1
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX1-NEXT: retq
+;
+; AVX512-LABEL: constrained_vector_fptoui_v2i64_v2f64:
+; AVX512: # %bb.0: # %entry
+; AVX512-NEXT: vcvttsd2usi {{.*}}(%rip), %rax
+; AVX512-NEXT: vmovq %rax, %xmm0
+; AVX512-NEXT: vcvttsd2usi {{.*}}(%rip), %rax
+; AVX512-NEXT: vmovq %rax, %xmm1
+; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX512-NEXT: retq
entry:
%result = call <2 x i64> @llvm.experimental.constrained.fptoui.v2i64.v2f64(
<2 x double><double 42.1, double 42.2>,
@@ -4771,12 +4873,12 @@ define <3 x i64> @constrained_vector_fptoui_v3i64_v3f64() #0 {
;
; AVX512-LABEL: constrained_vector_fptoui_v3i64_v3f64:
; AVX512: # %bb.0: # %entry
-; AVX512-NEXT: vcvttsd2si {{.*}}(%rip), %rax
+; AVX512-NEXT: vcvttsd2usi {{.*}}(%rip), %rax
; AVX512-NEXT: vmovq %rax, %xmm0
-; AVX512-NEXT: vcvttsd2si {{.*}}(%rip), %rax
+; AVX512-NEXT: vcvttsd2usi {{.*}}(%rip), %rax
; AVX512-NEXT: vmovq %rax, %xmm1
; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
-; AVX512-NEXT: vcvttsd2si {{.*}}(%rip), %rax
+; AVX512-NEXT: vcvttsd2usi {{.*}}(%rip), %rax
; AVX512-NEXT: vmovq %rax, %xmm1
; AVX512-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX512-NEXT: retq
@@ -4820,14 +4922,14 @@ define <4 x i64> @constrained_vector_fptoui_v4i64_v4f64() #0 {
;
; AVX512-LABEL: constrained_vector_fptoui_v4i64_v4f64:
; AVX512: # %bb.0: # %entry
-; AVX512-NEXT: vcvttsd2si {{.*}}(%rip), %rax
+; AVX512-NEXT: vcvttsd2usi {{.*}}(%rip), %rax
; AVX512-NEXT: vmovq %rax, %xmm0
-; AVX512-NEXT: vcvttsd2si {{.*}}(%rip), %rax
+; AVX512-NEXT: vcvttsd2usi {{.*}}(%rip), %rax
; AVX512-NEXT: vmovq %rax, %xmm1
; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
-; AVX512-NEXT: vcvttsd2si {{.*}}(%rip), %rax
+; AVX512-NEXT: vcvttsd2usi {{.*}}(%rip), %rax
; AVX512-NEXT: vmovq %rax, %xmm1
-; AVX512-NEXT: vcvttsd2si {{.*}}(%rip), %rax
+; AVX512-NEXT: vcvttsd2usi {{.*}}(%rip), %rax
; AVX512-NEXT: vmovq %rax, %xmm2
; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
; AVX512-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
OpenPOWER on IntegriCloud