diff options
author | Eric Christopher <echristo@gmail.com> | 2015-05-12 01:26:05 +0000 |
---|---|---|
committer | Eric Christopher <echristo@gmail.com> | 2015-05-12 01:26:05 +0000 |
commit | 824f42f209c4b04d9f43e4b74dd601ced6409ff7 (patch) | |
tree | db1a47b1f504331007c995d19256c06dd4df9a41 /llvm/lib/Target | |
parent | 8b33567189ea25fbdd454d9c4cc75cb2850a821f (diff) | |
download | bcm5719-llvm-824f42f209c4b04d9f43e4b74dd601ced6409ff7.tar.gz bcm5719-llvm-824f42f209c4b04d9f43e4b74dd601ced6409ff7.zip |
Migrate existing backends that care about software floating point
to use the information in the module rather than TargetOptions.
We've had and clang has used the use-soft-float attribute for some
time now so have the backends set a subtarget feature based on
a particular function now that subtargets are created based on
functions and function attributes.
For the one middle end soft float check go ahead and create
an overloadable TargetLowering::useSoftFloat function that
just checks the TargetSubtargetInfo in all cases.
Also remove the command line option that hard codes whether or
not soft-float is set by using the attribute for all of the
target specific test cases - for the generic just go ahead and
add the attribute in the one case that showed up.
llvm-svn: 237079
Diffstat (limited to 'llvm/lib/Target')
-rw-r--r-- | llvm/lib/Target/ARM/ARM.td | 3 | ||||
-rw-r--r-- | llvm/lib/Target/ARM/ARMISelLowering.cpp | 16 | ||||
-rw-r--r-- | llvm/lib/Target/ARM/ARMISelLowering.h | 1 | ||||
-rw-r--r-- | llvm/lib/Target/ARM/ARMSubtarget.cpp | 1 | ||||
-rw-r--r-- | llvm/lib/Target/ARM/ARMSubtarget.h | 4 | ||||
-rw-r--r-- | llvm/lib/Target/ARM/ARMTargetMachine.cpp | 16 | ||||
-rw-r--r-- | llvm/lib/Target/Mips/MipsISelLowering.cpp | 4 | ||||
-rw-r--r-- | llvm/lib/Target/Mips/MipsISelLowering.h | 1 | ||||
-rw-r--r-- | llvm/lib/Target/Mips/MipsTargetMachine.cpp | 7 | ||||
-rw-r--r-- | llvm/lib/Target/TargetMachine.cpp | 1 | ||||
-rw-r--r-- | llvm/lib/Target/X86/X86.td | 3 | ||||
-rw-r--r-- | llvm/lib/Target/X86/X86FastISel.cpp | 2 | ||||
-rw-r--r-- | llvm/lib/Target/X86/X86ISelLowering.cpp | 46 | ||||
-rw-r--r-- | llvm/lib/Target/X86/X86ISelLowering.h | 1 | ||||
-rw-r--r-- | llvm/lib/Target/X86/X86Subtarget.cpp | 1 | ||||
-rw-r--r-- | llvm/lib/Target/X86/X86Subtarget.h | 4 | ||||
-rw-r--r-- | llvm/lib/Target/X86/X86TargetMachine.cpp | 16 |
17 files changed, 80 insertions, 47 deletions
diff --git a/llvm/lib/Target/ARM/ARM.td b/llvm/lib/Target/ARM/ARM.td index 9882227905c..c7ea18a17fe 100644 --- a/llvm/lib/Target/ARM/ARM.td +++ b/llvm/lib/Target/ARM/ARM.td @@ -23,6 +23,9 @@ include "llvm/Target/Target.td" def ModeThumb : SubtargetFeature<"thumb-mode", "InThumbMode", "true", "Thumb mode">; +def ModeSoftFloat : SubtargetFeature<"soft-float", "UseSoftFloat", "true", + "Use software floating point features.">; + //===----------------------------------------------------------------------===// // ARM Subtarget features. // diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp index a3743d452e0..fd956d4670e 100644 --- a/llvm/lib/Target/ARM/ARMISelLowering.cpp +++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp @@ -170,7 +170,7 @@ ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM, if (Subtarget->isTargetMachO()) { // Uses VFP for Thumb libfuncs if available. if (Subtarget->isThumb() && Subtarget->hasVFP2() && - Subtarget->hasARMOps() && !TM.Options.UseSoftFloat) { + Subtarget->hasARMOps() && !Subtarget->useSoftFloat()) { // Single-precision floating-point arithmetic. setLibcallName(RTLIB::ADD_F32, "__addsf3vfp"); setLibcallName(RTLIB::SUB_F32, "__subsf3vfp"); @@ -401,7 +401,7 @@ ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM, addRegisterClass(MVT::i32, &ARM::tGPRRegClass); else addRegisterClass(MVT::i32, &ARM::GPRRegClass); - if (!TM.Options.UseSoftFloat && Subtarget->hasVFP2() && + if (!Subtarget->useSoftFloat() && Subtarget->hasVFP2() && !Subtarget->isThumb1Only()) { addRegisterClass(MVT::f32, &ARM::SPRRegClass); addRegisterClass(MVT::f64, &ARM::DPRRegClass); @@ -820,7 +820,7 @@ ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM, } setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); - if (!TM.Options.UseSoftFloat && Subtarget->hasVFP2() && + if (!Subtarget->useSoftFloat() && Subtarget->hasVFP2() && !Subtarget->isThumb1Only()) { // Turn f64->i64 into VMOVRRD, i64 -> f64 to VMOVDRR // iff target supports vfp2. @@ -861,7 +861,7 @@ ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM, setOperationAction(ISD::FSINCOS, MVT::f32, Expand); setOperationAction(ISD::FREM, MVT::f64, Expand); setOperationAction(ISD::FREM, MVT::f32, Expand); - if (!TM.Options.UseSoftFloat && Subtarget->hasVFP2() && + if (!Subtarget->useSoftFloat() && Subtarget->hasVFP2() && !Subtarget->isThumb1Only()) { setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom); setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom); @@ -875,7 +875,7 @@ ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM, } // Various VFP goodness - if (!TM.Options.UseSoftFloat && !Subtarget->isThumb1Only()) { + if (!Subtarget->useSoftFloat() && !Subtarget->isThumb1Only()) { // FP-ARMv8 adds f64 <-> f16 conversion. Before that it should be expanded. if (!Subtarget->hasFPARMv8() || Subtarget->isFPOnlySP()) { setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand); @@ -932,7 +932,7 @@ ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM, setStackPointerRegisterToSaveRestore(ARM::SP); - if (TM.Options.UseSoftFloat || Subtarget->isThumb1Only() || + if (Subtarget->useSoftFloat() || Subtarget->isThumb1Only() || !Subtarget->hasVFP2()) setSchedulingPreference(Sched::RegPressure); else @@ -956,6 +956,10 @@ ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM, setMinFunctionAlignment(Subtarget->isThumb() ? 1 : 2); } +bool ARMTargetLowering::useSoftFloat() const { + return Subtarget->useSoftFloat(); +} + // FIXME: It might make sense to define the representative register class as the // nearest super-register that has a non-null superset. For example, DPR_VFP2 is // a super-register of SPR, and DPR is a superset if DPR_VFP2. Consequently, diff --git a/llvm/lib/Target/ARM/ARMISelLowering.h b/llvm/lib/Target/ARM/ARMISelLowering.h index 76b0d8851f5..63e87c5282d 100644 --- a/llvm/lib/Target/ARM/ARMISelLowering.h +++ b/llvm/lib/Target/ARM/ARMISelLowering.h @@ -231,6 +231,7 @@ namespace llvm { const ARMSubtarget &STI); unsigned getJumpTableEncoding() const override; + bool useSoftFloat() const override; SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override; diff --git a/llvm/lib/Target/ARM/ARMSubtarget.cpp b/llvm/lib/Target/ARM/ARMSubtarget.cpp index 6e0a6df9280..89aab260366 100644 --- a/llvm/lib/Target/ARM/ARMSubtarget.cpp +++ b/llvm/lib/Target/ARM/ARMSubtarget.cpp @@ -145,6 +145,7 @@ void ARMSubtarget::initializeEnvironment() { HasVMLxForwarding = false; SlowFPBrcc = false; InThumbMode = false; + UseSoftFloat = false; HasThumb2 = false; NoARM = false; IsR9Reserved = ReserveR9; diff --git a/llvm/lib/Target/ARM/ARMSubtarget.h b/llvm/lib/Target/ARM/ARMSubtarget.h index 0b8b7801f3d..ed1c6a0fc64 100644 --- a/llvm/lib/Target/ARM/ARMSubtarget.h +++ b/llvm/lib/Target/ARM/ARMSubtarget.h @@ -100,6 +100,9 @@ protected: /// InThumbMode - True if compiling for Thumb, false for ARM. bool InThumbMode; + /// UseSoftFloat - True if we're using software floating point features. + bool UseSoftFloat; + /// HasThumb2 - True if Thumb2 instructions are supported. bool HasThumb2; @@ -393,6 +396,7 @@ public: bool isAPCS_ABI() const; bool isAAPCS_ABI() const; + bool useSoftFloat() const { return UseSoftFloat; } bool isThumb() const { return InThumbMode; } bool isThumb1Only() const { return InThumbMode && !HasThumb2; } bool isThumb2() const { return InThumbMode && HasThumb2; } diff --git a/llvm/lib/Target/ARM/ARMTargetMachine.cpp b/llvm/lib/Target/ARM/ARMTargetMachine.cpp index ae333401915..bd29a052c37 100644 --- a/llvm/lib/Target/ARM/ARMTargetMachine.cpp +++ b/llvm/lib/Target/ARM/ARMTargetMachine.cpp @@ -207,13 +207,15 @@ ARMBaseTargetMachine::getSubtargetImpl(const Function &F) const { // function before we can generate a subtarget. We also need to use // it as a key for the subtarget since that can be the only difference // between two functions. - Attribute SFAttr = F.getFnAttribute("use-soft-float"); - bool SoftFloat = !SFAttr.hasAttribute(Attribute::None) - ? SFAttr.getValueAsString() == "true" - : Options.UseSoftFloat; - - auto &I = SubtargetMap[CPU + FS + (SoftFloat ? "use-soft-float=true" - : "use-soft-float=false")]; + bool SoftFloat = + F.hasFnAttribute("use-soft-float") && + F.getFnAttribute("use-soft-float").getValueAsString() == "true"; + // If the soft float attribute is set on the function turn on the soft float + // subtarget feature. + if (SoftFloat) + FS += FS.empty() ? "+soft-float" : ",+soft-float"; + + auto &I = SubtargetMap[CPU + FS]; if (!I) { // This needs to be done before we create a new subtarget since any // creation will depend on the TM and the code generation flags on the diff --git a/llvm/lib/Target/Mips/MipsISelLowering.cpp b/llvm/lib/Target/Mips/MipsISelLowering.cpp index ad278c28057..6c7f0895b42 100644 --- a/llvm/lib/Target/Mips/MipsISelLowering.cpp +++ b/llvm/lib/Target/Mips/MipsISelLowering.cpp @@ -3598,6 +3598,10 @@ unsigned MipsTargetLowering::getJumpTableEncoding() const { return TargetLowering::getJumpTableEncoding(); } +bool MipsTargetLowering::useSoftFloat() const { + return Subtarget.useSoftFloat(); +} + void MipsTargetLowering::copyByValRegs( SDValue Chain, SDLoc DL, std::vector<SDValue> &OutChains, SelectionDAG &DAG, const ISD::ArgFlagsTy &Flags, SmallVectorImpl<SDValue> &InVals, diff --git a/llvm/lib/Target/Mips/MipsISelLowering.h b/llvm/lib/Target/Mips/MipsISelLowering.h index 2cbe20010a9..6ea14b53a57 100644 --- a/llvm/lib/Target/Mips/MipsISelLowering.h +++ b/llvm/lib/Target/Mips/MipsISelLowering.h @@ -530,6 +530,7 @@ namespace llvm { bool isFPImmLegal(const APFloat &Imm, EVT VT) const override; unsigned getJumpTableEncoding() const override; + bool useSoftFloat() const override; /// Emit a sign-extension using sll/sra, seb, or seh appropriately. MachineBasicBlock *emitSignExtendToI32InReg(MachineInstr *MI, diff --git a/llvm/lib/Target/Mips/MipsTargetMachine.cpp b/llvm/lib/Target/Mips/MipsTargetMachine.cpp index 4d05eb84ce5..b279184ea30 100644 --- a/llvm/lib/Target/Mips/MipsTargetMachine.cpp +++ b/llvm/lib/Target/Mips/MipsTargetMachine.cpp @@ -140,10 +140,9 @@ MipsTargetMachine::getSubtargetImpl(const Function &F) const { // FIXME: This is related to the code below to reset the target options, // we need to know whether or not the soft float flag is set on the // function, so we can enable it as a subtarget feature. - Attribute SFAttr = F.getFnAttribute("use-soft-float"); - bool softFloat = !SFAttr.hasAttribute(Attribute::None) - ? SFAttr.getValueAsString() == "true" - : Options.UseSoftFloat; + bool softFloat = + F.hasFnAttribute("use-soft-float") && + F.getFnAttribute("use-soft-float").getValueAsString() == "true"; if (hasMips16Attr) FS += FS.empty() ? "+mips16" : ",+mips16"; diff --git a/llvm/lib/Target/TargetMachine.cpp b/llvm/lib/Target/TargetMachine.cpp index 69900b012c4..2a1beda49fd 100644 --- a/llvm/lib/Target/TargetMachine.cpp +++ b/llvm/lib/Target/TargetMachine.cpp @@ -71,7 +71,6 @@ void TargetMachine::resetTargetOptions(const Function &F) const { RESET_OPTION(UnsafeFPMath, "unsafe-fp-math"); RESET_OPTION(NoInfsFPMath, "no-infs-fp-math"); RESET_OPTION(NoNaNsFPMath, "no-nans-fp-math"); - RESET_OPTION(UseSoftFloat, "use-soft-float"); RESET_OPTION(DisableTailCalls, "disable-tail-calls"); Options.MCOptions.SanitizeAddress = F.hasFnAttribute(Attribute::SanitizeAddress); diff --git a/llvm/lib/Target/X86/X86.td b/llvm/lib/Target/X86/X86.td index d13f1551d20..c70e2e95463 100644 --- a/llvm/lib/Target/X86/X86.td +++ b/llvm/lib/Target/X86/X86.td @@ -192,6 +192,9 @@ def FeatureUseSqrtEst : SubtargetFeature<"use-sqrt-est", "UseSqrtEst", "true", "Use RSQRT* to optimize square root calculations">; def FeatureUseRecipEst : SubtargetFeature<"use-recip-est", "UseReciprocalEst", "true", "Use RCP* to optimize division calculations">; +def FeatureSoftFloat + : SubtargetFeature<"soft-float", "UseSoftFloat", "true", + "Use software floating point features.">; //===----------------------------------------------------------------------===// // X86 processors supported. diff --git a/llvm/lib/Target/X86/X86FastISel.cpp b/llvm/lib/Target/X86/X86FastISel.cpp index 3bf4fb824fd..9af0aebea23 100644 --- a/llvm/lib/Target/X86/X86FastISel.cpp +++ b/llvm/lib/Target/X86/X86FastISel.cpp @@ -2254,7 +2254,7 @@ bool X86FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) { default: return false; case Intrinsic::convert_from_fp16: case Intrinsic::convert_to_fp16: { - if (TM.Options.UseSoftFloat || !Subtarget->hasF16C()) + if (Subtarget->useSoftFloat() || !Subtarget->hasF16C()) return false; const Value *Op = II->getArgOperand(0); diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index 68d777be4ce..f36e4a5e369 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -183,7 +183,7 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM, if (Subtarget->is64Bit()) { setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote); setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Custom); - } else if (!TM.Options.UseSoftFloat) { + } else if (!Subtarget->useSoftFloat()) { // We have an algorithm for SSE2->double, and we turn this into a // 64-bit FILD followed by conditional FADD for other targets. setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Custom); @@ -197,7 +197,7 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM, setOperationAction(ISD::SINT_TO_FP , MVT::i1 , Promote); setOperationAction(ISD::SINT_TO_FP , MVT::i8 , Promote); - if (!TM.Options.UseSoftFloat) { + if (!Subtarget->useSoftFloat()) { // SSE has no i16 to fp conversion, only i32 if (X86ScalarSSEf32) { setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote); @@ -240,7 +240,7 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM, if (Subtarget->is64Bit()) { setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Expand); setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Promote); - } else if (!TM.Options.UseSoftFloat) { + } else if (!Subtarget->useSoftFloat()) { // Since AVX is a superset of SSE3, only check for SSE here. if (Subtarget->hasSSE1() && !Subtarget->hasSSE3()) // Expand FP_TO_UINT into a select. @@ -368,7 +368,7 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM, // Special handling for half-precision floating point conversions. // If we don't have F16C support, then lower half float conversions // into library calls. - if (TM.Options.UseSoftFloat || !Subtarget->hasF16C()) { + if (Subtarget->useSoftFloat() || !Subtarget->hasF16C()) { setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand); setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand); } @@ -517,7 +517,7 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM, setOperationAction(ISD::GC_TRANSITION_START, MVT::Other, Custom); setOperationAction(ISD::GC_TRANSITION_END, MVT::Other, Custom); - if (!TM.Options.UseSoftFloat && X86ScalarSSEf64) { + if (!Subtarget->useSoftFloat() && X86ScalarSSEf64) { // f32 and f64 use SSE. // Set up the FP register classes. addRegisterClass(MVT::f32, &X86::FR32RegClass); @@ -551,7 +551,7 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM, // cases we handle. addLegalFPImmediate(APFloat(+0.0)); // xorpd addLegalFPImmediate(APFloat(+0.0f)); // xorps - } else if (!TM.Options.UseSoftFloat && X86ScalarSSEf32) { + } else if (!Subtarget->useSoftFloat() && X86ScalarSSEf32) { // Use SSE for f32, x87 for f64. // Set up the FP register classes. addRegisterClass(MVT::f32, &X86::FR32RegClass); @@ -586,7 +586,7 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM, setOperationAction(ISD::FCOS , MVT::f64, Expand); setOperationAction(ISD::FSINCOS, MVT::f64, Expand); } - } else if (!TM.Options.UseSoftFloat) { + } else if (!Subtarget->useSoftFloat()) { // f32 and f64 in x87. // Set up the FP register classes. addRegisterClass(MVT::f64, &X86::RFP64RegClass); @@ -620,7 +620,7 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM, setOperationAction(ISD::FMA, MVT::f32, Expand); // Long double always uses X87. - if (!TM.Options.UseSoftFloat) { + if (!Subtarget->useSoftFloat()) { addRegisterClass(MVT::f80, &X86::RFP80RegClass); setOperationAction(ISD::UNDEF, MVT::f80, Expand); setOperationAction(ISD::FCOPYSIGN, MVT::f80, Expand); @@ -760,7 +760,7 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM, // FIXME: In order to prevent SSE instructions being expanded to MMX ones // with -msoft-float, disable use of MMX as well. - if (!TM.Options.UseSoftFloat && Subtarget->hasMMX()) { + if (!Subtarget->useSoftFloat() && Subtarget->hasMMX()) { addRegisterClass(MVT::x86mmx, &X86::VR64RegClass); // No operations on x86mmx supported, everything uses intrinsics. } @@ -778,7 +778,7 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM, } setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v1i64, Expand); - if (!TM.Options.UseSoftFloat && Subtarget->hasSSE1()) { + if (!Subtarget->useSoftFloat() && Subtarget->hasSSE1()) { addRegisterClass(MVT::v4f32, &X86::VR128RegClass); setOperationAction(ISD::FADD, MVT::v4f32, Legal); @@ -797,7 +797,7 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM, setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Custom); } - if (!TM.Options.UseSoftFloat && Subtarget->hasSSE2()) { + if (!Subtarget->useSoftFloat() && Subtarget->hasSSE2()) { addRegisterClass(MVT::v2f64, &X86::VR128RegClass); // FIXME: Unfortunately, -soft-float and -no-implicit-float mean XMM @@ -942,7 +942,7 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM, setOperationAction(ISD::BITCAST, MVT::v8i8, Custom); } - if (!TM.Options.UseSoftFloat && Subtarget->hasSSE41()) { + if (!Subtarget->useSoftFloat() && Subtarget->hasSSE41()) { for (MVT RoundedTy : {MVT::f32, MVT::f64, MVT::v4f32, MVT::v2f64}) { setOperationAction(ISD::FFLOOR, RoundedTy, Legal); setOperationAction(ISD::FCEIL, RoundedTy, Legal); @@ -1024,7 +1024,7 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM, setOperationAction(ISD::SRA, MVT::v4i32, Custom); } - if (!TM.Options.UseSoftFloat && Subtarget->hasFp256()) { + if (!Subtarget->useSoftFloat() && Subtarget->hasFp256()) { addRegisterClass(MVT::v32i8, &X86::VR256RegClass); addRegisterClass(MVT::v16i16, &X86::VR256RegClass); addRegisterClass(MVT::v8i32, &X86::VR256RegClass); @@ -1244,7 +1244,7 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM, } } - if (!TM.Options.UseSoftFloat && Subtarget->hasAVX512()) { + if (!Subtarget->useSoftFloat() && Subtarget->hasAVX512()) { addRegisterClass(MVT::v16i32, &X86::VR512RegClass); addRegisterClass(MVT::v16f32, &X86::VR512RegClass); addRegisterClass(MVT::v8i64, &X86::VR512RegClass); @@ -1447,7 +1447,7 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM, } }// has AVX-512 - if (!TM.Options.UseSoftFloat && Subtarget->hasBWI()) { + if (!Subtarget->useSoftFloat() && Subtarget->hasBWI()) { addRegisterClass(MVT::v32i16, &X86::VR512RegClass); addRegisterClass(MVT::v64i8, &X86::VR512RegClass); @@ -1484,7 +1484,7 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM, } } - if (!TM.Options.UseSoftFloat && Subtarget->hasVLX()) { + if (!Subtarget->useSoftFloat() && Subtarget->hasVLX()) { addRegisterClass(MVT::v4i1, &X86::VK4RegClass); addRegisterClass(MVT::v2i1, &X86::VK2RegClass); @@ -1789,6 +1789,10 @@ unsigned X86TargetLowering::getJumpTableEncoding() const { return TargetLowering::getJumpTableEncoding(); } +bool X86TargetLowering::useSoftFloat() const { + return Subtarget->useSoftFloat(); +} + const MCExpr * X86TargetLowering::LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI, const MachineBasicBlock *MBB, @@ -2294,7 +2298,7 @@ static ArrayRef<MCPhysReg> get64BitArgumentXMMs(MachineFunction &MF, const Function *Fn = MF.getFunction(); bool NoImplicitFloatOps = Fn->hasFnAttribute(Attribute::NoImplicitFloat); - bool isSoftFloat = MF.getTarget().Options.UseSoftFloat; + bool isSoftFloat = Subtarget->useSoftFloat(); assert(!(isSoftFloat && NoImplicitFloatOps) && "SSE register cannot be used when SSE is disabled!"); if (isSoftFloat || NoImplicitFloatOps || !Subtarget->hasSSE1()) @@ -2468,7 +2472,7 @@ X86TargetLowering::LowerFormalArguments(SDValue Chain, bool IsWinEHParent = WinEHParent && WinEHParent == Fn; // Figure out if XMM registers are in use. - assert(!(MF.getTarget().Options.UseSoftFloat && + assert(!(Subtarget->useSoftFloat() && Fn->hasFnAttribute(Attribute::NoImplicitFloat)) && "SSE register cannot be used when SSE is disabled!"); @@ -14600,7 +14604,7 @@ SDValue X86TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const { if (ArgMode == 2) { // Sanity Check: Make sure using fp_offset makes sense. - assert(!DAG.getTarget().Options.UseSoftFloat && + assert(!Subtarget->useSoftFloat() && !(DAG.getMachineFunction().getFunction()->hasFnAttribute( Attribute::NoImplicitFloat)) && Subtarget->hasSSE1()); @@ -23203,8 +23207,8 @@ static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG, const Function *F = DAG.getMachineFunction().getFunction(); bool NoImplicitFloatOps = F->hasFnAttribute(Attribute::NoImplicitFloat); - bool F64IsLegal = !DAG.getTarget().Options.UseSoftFloat && !NoImplicitFloatOps - && Subtarget->hasSSE2(); + bool F64IsLegal = + !Subtarget->useSoftFloat() && !NoImplicitFloatOps && Subtarget->hasSSE2(); if ((VT.isVector() || (VT == MVT::i64 && F64IsLegal && !Subtarget->is64Bit())) && isa<LoadSDNode>(St->getValue()) && diff --git a/llvm/lib/Target/X86/X86ISelLowering.h b/llvm/lib/Target/X86/X86ISelLowering.h index 90b49328f22..1c5f73ab379 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.h +++ b/llvm/lib/Target/X86/X86ISelLowering.h @@ -572,6 +572,7 @@ namespace llvm { const X86Subtarget &STI); unsigned getJumpTableEncoding() const override; + bool useSoftFloat() const override; MVT getScalarShiftAmountTy(EVT LHSTy) const override { return MVT::i8; } diff --git a/llvm/lib/Target/X86/X86Subtarget.cpp b/llvm/lib/Target/X86/X86Subtarget.cpp index de30c75ac86..1cdab14e034 100644 --- a/llvm/lib/Target/X86/X86Subtarget.cpp +++ b/llvm/lib/Target/X86/X86Subtarget.cpp @@ -278,6 +278,7 @@ void X86Subtarget::initializeEnvironment() { stackAlignment = 4; // FIXME: this is a known good value for Yonah. How about others? MaxInlineSizeThreshold = 128; + UseSoftFloat = false; } X86Subtarget &X86Subtarget::initializeSubtargetDependencies(StringRef CPU, diff --git a/llvm/lib/Target/X86/X86Subtarget.h b/llvm/lib/Target/X86/X86Subtarget.h index 4c31f789546..455dd7744d7 100644 --- a/llvm/lib/Target/X86/X86Subtarget.h +++ b/llvm/lib/Target/X86/X86Subtarget.h @@ -218,6 +218,9 @@ protected: /// Processor has AVX-512 Vector Length eXtenstions bool HasVLX; + /// Use software floating point for code generation. + bool UseSoftFloat; + /// The minimum alignment known to hold of the stack frame on /// entry to the function and which must be maintained by every function. unsigned stackAlignment; @@ -385,6 +388,7 @@ public: bool isAtom() const { return X86ProcFamily == IntelAtom; } bool isSLM() const { return X86ProcFamily == IntelSLM; } + bool useSoftFloat() const { return UseSoftFloat; } const Triple &getTargetTriple() const { return TargetTriple; } diff --git a/llvm/lib/Target/X86/X86TargetMachine.cpp b/llvm/lib/Target/X86/X86TargetMachine.cpp index 9484f678808..d5e7eac37c4 100644 --- a/llvm/lib/Target/X86/X86TargetMachine.cpp +++ b/llvm/lib/Target/X86/X86TargetMachine.cpp @@ -131,13 +131,15 @@ X86TargetMachine::getSubtargetImpl(const Function &F) const { // function before we can generate a subtarget. We also need to use // it as a key for the subtarget since that can be the only difference // between two functions. - Attribute SFAttr = F.getFnAttribute("use-soft-float"); - bool SoftFloat = !SFAttr.hasAttribute(Attribute::None) - ? SFAttr.getValueAsString() == "true" - : Options.UseSoftFloat; - - auto &I = SubtargetMap[CPU + FS + (SoftFloat ? "use-soft-float=true" - : "use-soft-float=false")]; + bool SoftFloat = + F.hasFnAttribute("use-soft-float") && + F.getFnAttribute("use-soft-float").getValueAsString() == "true"; + // If the soft float attribute is set on the function turn on the soft float + // subtarget feature. + if (SoftFloat) + FS += FS.empty() ? "+soft-float" : ",+soft-float"; + + auto &I = SubtargetMap[CPU + FS]; if (!I) { // This needs to be done before we create a new subtarget since any // creation will depend on the TM and the code generation flags on the |