summaryrefslogtreecommitdiffstats
path: root/clang/lib/CodeGen/TargetInfo.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'clang/lib/CodeGen/TargetInfo.cpp')
-rw-r--r--clang/lib/CodeGen/TargetInfo.cpp95
1 files changed, 65 insertions, 30 deletions
diff --git a/clang/lib/CodeGen/TargetInfo.cpp b/clang/lib/CodeGen/TargetInfo.cpp
index 1e262ccbb9c..4566fdbebf8 100644
--- a/clang/lib/CodeGen/TargetInfo.cpp
+++ b/clang/lib/CodeGen/TargetInfo.cpp
@@ -841,7 +841,13 @@ class X86_32ABIInfo : public ABIInfo {
Class classify(QualType Ty) const;
ABIArgInfo classifyReturnType(QualType RetTy, CCState &State) const;
ABIArgInfo classifyArgumentType(QualType RetTy, CCState &State) const;
- bool shouldUseInReg(QualType Ty, CCState &State, bool &NeedsPadding) const;
+ /// \brief Updates the number of available free registers, returns
+ /// true if any registers were allocated.
+ bool updateFreeRegs(QualType Ty, CCState &State) const;
+
+ bool shouldAggregateUseDirect(QualType Ty, CCState &State, bool &InReg,
+ bool &NeedsPadding) const;
+ bool shouldPrimitiveUseInReg(QualType Ty, CCState &State) const;
/// \brief Rewrite the function info so that all memory arguments use
/// inalloca.
@@ -1003,9 +1009,10 @@ bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty,
ASTContext &Context) const {
uint64_t Size = Context.getTypeSize(Ty);
- // Type must be register sized.
- if (!isRegisterSize(Size))
- return false;
+ // For i386, type must be register sized.
+ // For the MCU ABI, it only needs to be <= 8-byte
+ if ((IsMCUABI && Size > 64) || (!IsMCUABI && !isRegisterSize(Size)))
+ return false;
if (Ty->isVectorType()) {
// 64- and 128- bit vectors inside structures are not returned in
@@ -1052,7 +1059,8 @@ ABIArgInfo X86_32ABIInfo::getIndirectReturnResult(QualType RetTy, CCState &State
// integer register.
if (State.FreeRegs) {
--State.FreeRegs;
- return getNaturalAlignIndirectInReg(RetTy);
+ if (!IsMCUABI)
+ return getNaturalAlignIndirectInReg(RetTy);
}
return getNaturalAlignIndirect(RetTy, /*ByVal=*/false);
}
@@ -1192,7 +1200,8 @@ ABIArgInfo X86_32ABIInfo::getIndirectResult(QualType Ty, bool ByVal,
if (!ByVal) {
if (State.FreeRegs) {
--State.FreeRegs; // Non-byval indirects just use one pointer.
- return getNaturalAlignIndirectInReg(Ty);
+ if (!IsMCUABI)
+ return getNaturalAlignIndirectInReg(Ty);
}
return getNaturalAlignIndirect(Ty, false);
}
@@ -1223,9 +1232,7 @@ X86_32ABIInfo::Class X86_32ABIInfo::classify(QualType Ty) const {
return Integer;
}
-bool X86_32ABIInfo::shouldUseInReg(QualType Ty, CCState &State,
- bool &NeedsPadding) const {
- NeedsPadding = false;
+bool X86_32ABIInfo::updateFreeRegs(QualType Ty, CCState &State) const {
if (!IsSoftFloatABI) {
Class C = classify(Ty);
if (C == Float)
@@ -1253,25 +1260,46 @@ bool X86_32ABIInfo::shouldUseInReg(QualType Ty, CCState &State,
}
State.FreeRegs -= SizeInRegs;
+ return true;
+}
+
+bool X86_32ABIInfo::shouldAggregateUseDirect(QualType Ty, CCState &State,
+ bool &InReg,
+ bool &NeedsPadding) const {
+ NeedsPadding = false;
+ InReg = !IsMCUABI;
+
+ if (!updateFreeRegs(Ty, State))
+ return false;
+
+ if (IsMCUABI)
+ return true;
if (State.CC == llvm::CallingConv::X86_FastCall ||
State.CC == llvm::CallingConv::X86_VectorCall) {
- if (Size > 32)
- return false;
-
- if (Ty->isIntegralOrEnumerationType())
- return true;
+ if (getContext().getTypeSize(Ty) <= 32 && State.FreeRegs)
+ NeedsPadding = true;
- if (Ty->isPointerType())
- return true;
+ return false;
+ }
- if (Ty->isReferenceType())
- return true;
+ return true;
+}
- if (State.FreeRegs)
- NeedsPadding = true;
+bool X86_32ABIInfo::shouldPrimitiveUseInReg(QualType Ty, CCState &State) const {
+ if (!updateFreeRegs(Ty, State))
+ return false;
+ if (IsMCUABI)
return false;
+
+ if (State.CC == llvm::CallingConv::X86_FastCall ||
+ State.CC == llvm::CallingConv::X86_VectorCall) {
+ if (getContext().getTypeSize(Ty) > 32)
+ return false;
+
+ return (Ty->isIntegralOrEnumerationType() || Ty->isPointerType() ||
+ Ty->isReferenceType());
}
return true;
@@ -1327,12 +1355,15 @@ ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty,
llvm::LLVMContext &LLVMContext = getVMContext();
llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext);
- bool NeedsPadding;
- if (shouldUseInReg(Ty, State, NeedsPadding)) {
+ bool NeedsPadding, InReg;
+ if (shouldAggregateUseDirect(Ty, State, InReg, NeedsPadding)) {
unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32;
SmallVector<llvm::Type*, 3> Elements(SizeInRegs, Int32);
llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
- return ABIArgInfo::getDirectInReg(Result);
+ if (InReg)
+ return ABIArgInfo::getDirectInReg(Result);
+ else
+ return ABIArgInfo::getDirect(Result);
}
llvm::IntegerType *PaddingType = NeedsPadding ? Int32 : nullptr;
@@ -1340,8 +1371,11 @@ ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty,
// of those arguments will match the struct. This is important because the
// LLVM backend isn't smart enough to remove byval, which inhibits many
// optimizations.
+ // Don't do this for the MCU if there are still free integer registers
+ // (see X86_64 ABI for full explanation).
if (getContext().getTypeSize(Ty) <= 4*32 &&
- canExpandIndirectArgument(Ty, getContext()))
+ canExpandIndirectArgument(Ty, getContext()) &&
+ (!IsMCUABI || State.FreeRegs == 0))
return ABIArgInfo::getExpandWithPadding(
State.CC == llvm::CallingConv::X86_FastCall ||
State.CC == llvm::CallingConv::X86_VectorCall,
@@ -1371,14 +1405,14 @@ ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty,
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
Ty = EnumTy->getDecl()->getIntegerType();
- bool NeedsPadding;
- bool InReg = shouldUseInReg(Ty, State, NeedsPadding);
+ bool InReg = shouldPrimitiveUseInReg(Ty, State);
if (Ty->isPromotableIntegerType()) {
if (InReg)
return ABIArgInfo::getExtendInReg();
return ABIArgInfo::getExtend();
}
+
if (InReg)
return ABIArgInfo::getDirectInReg();
return ABIArgInfo::getDirect();
@@ -1386,15 +1420,15 @@ ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty,
void X86_32ABIInfo::computeInfo(CGFunctionInfo &FI) const {
CCState State(FI.getCallingConvention());
- if (State.CC == llvm::CallingConv::X86_FastCall)
+ if (IsMCUABI)
+ State.FreeRegs = 3;
+ else if (State.CC == llvm::CallingConv::X86_FastCall)
State.FreeRegs = 2;
else if (State.CC == llvm::CallingConv::X86_VectorCall) {
State.FreeRegs = 2;
State.FreeSSERegs = 6;
} else if (FI.getHasRegParm())
State.FreeRegs = FI.getRegParm();
- else if (IsMCUABI)
- State.FreeRegs = 3;
else
State.FreeRegs = DefaultNumRegisterParameters;
@@ -1405,7 +1439,8 @@ void X86_32ABIInfo::computeInfo(CGFunctionInfo &FI) const {
// return value was sret and put it in a register ourselves if appropriate.
if (State.FreeRegs) {
--State.FreeRegs; // The sret parameter consumes a register.
- FI.getReturnInfo().setInReg(true);
+ if (!IsMCUABI)
+ FI.getReturnInfo().setInReg(true);
}
}
OpenPOWER on IntegriCloud