summaryrefslogtreecommitdiffstats
path: root/clang/lib/CodeGen
diff options
context:
space:
mode:
authorJames Molloy <james.molloy@arm.com>2014-05-09 16:21:39 +0000
committerJames Molloy <james.molloy@arm.com>2014-05-09 16:21:39 +0000
commit6f244b6f784e55ee6c404f059c5fc6bfe2c288eb (patch)
tree68848a207815e43d4ea22b1be16bdfc936780226 /clang/lib/CodeGen
parentdd1aa14a2102ca159176574349cff032e459d441 (diff)
downloadbcm5719-llvm-6f244b6f784e55ee6c404f059c5fc6bfe2c288eb.tar.gz
bcm5719-llvm-6f244b6f784e55ee6c404f059c5fc6bfe2c288eb.zip
Reapply r208417 (olista01 'ARM: HFAs must be passed in consecutive registers'). Bots are now pacified.
llvm-svn: 208425
Diffstat (limited to 'clang/lib/CodeGen')
-rw-r--r--clang/lib/CodeGen/CGCall.cpp42
-rw-r--r--clang/lib/CodeGen/TargetInfo.cpp25
2 files changed, 40 insertions, 27 deletions
diff --git a/clang/lib/CodeGen/CGCall.cpp b/clang/lib/CodeGen/CGCall.cpp
index 972a7c8dbb7..56004f364f1 100644
--- a/clang/lib/CodeGen/CGCall.cpp
+++ b/clang/lib/CodeGen/CGCall.cpp
@@ -158,6 +158,23 @@ static CallingConv getCallingConventionForDecl(const Decl *D, bool IsWindows) {
return CC_C;
}
+static bool isAAPCSVFP(const CGFunctionInfo &FI, const TargetInfo &Target) {
+ switch (FI.getEffectiveCallingConvention()) {
+ case llvm::CallingConv::C:
+ switch (Target.getTriple().getEnvironment()) {
+ case llvm::Triple::EABIHF:
+ case llvm::Triple::GNUEABIHF:
+ return true;
+ default:
+ return false;
+ }
+ case llvm::CallingConv::ARM_AAPCS_VFP:
+ return true;
+ default:
+ return false;
+ }
+}
+
/// Arrange the argument and result information for a call to an
/// unknown C++ non-static member function of the given abstract type.
/// (Zero value of RD means we don't have any meaningful "this" argument type,
@@ -995,8 +1012,11 @@ CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) {
// If the coerce-to type is a first class aggregate, flatten it. Either
// way is semantically identical, but fast-isel and the optimizer
// generally likes scalar values better than FCAs.
+ // We cannot do this for functions using the AAPCS calling convention,
+ // as structures are treated differently by that calling convention.
llvm::Type *argType = argAI.getCoerceToType();
- if (llvm::StructType *st = dyn_cast<llvm::StructType>(argType)) {
+ llvm::StructType *st = dyn_cast<llvm::StructType>(argType);
+ if (st && !isAAPCSVFP(FI, getTarget())) {
for (unsigned i = 0, e = st->getNumElements(); i != e; ++i)
argTypes.push_back(st->getElementType(i));
} else {
@@ -1199,14 +1219,15 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
else if (ParamType->isUnsignedIntegerOrEnumerationType())
Attrs.addAttribute(llvm::Attribute::ZExt);
// FALL THROUGH
- case ABIArgInfo::Direct:
+ case ABIArgInfo::Direct: {
if (AI.getInReg())
Attrs.addAttribute(llvm::Attribute::InReg);
// FIXME: handle sseregparm someday...
- if (llvm::StructType *STy =
- dyn_cast<llvm::StructType>(AI.getCoerceToType())) {
+ llvm::StructType *STy =
+ dyn_cast<llvm::StructType>(AI.getCoerceToType());
+ if (!isAAPCSVFP(FI, getTarget()) && STy) {
unsigned Extra = STy->getNumElements()-1; // 1 will be added below.
if (Attrs.hasAttributes())
for (unsigned I = 0; I < Extra; ++I)
@@ -1215,7 +1236,7 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
Index += Extra;
}
break;
-
+ }
case ABIArgInfo::Indirect:
if (AI.getInReg())
Attrs.addAttribute(llvm::Attribute::InReg);
@@ -1474,8 +1495,10 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
// If the coerce-to type is a first class aggregate, we flatten it and
// pass the elements. Either way is semantically identical, but fast-isel
// and the optimizer generally likes scalar values better than FCAs.
+ // We cannot do this for functions using the AAPCS calling convention,
+ // as structures are treated differently by that calling convention.
llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType());
- if (STy && STy->getNumElements() > 1) {
+ if (!isAAPCSVFP(FI, getTarget()) && STy && STy->getNumElements() > 1) {
uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(STy);
llvm::Type *DstTy =
cast<llvm::PointerType>(Ptr->getType())->getElementType();
@@ -2735,8 +2758,11 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
// If the coerce-to type is a first class aggregate, we flatten it and
// pass the elements. Either way is semantically identical, but fast-isel
// and the optimizer generally likes scalar values better than FCAs.
- if (llvm::StructType *STy =
- dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType())) {
+ // We cannot do this for functions using the AAPCS calling convention,
+ // as structures are treated differently by that calling convention.
+ llvm::StructType *STy =
+ dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType());
+ if (STy && !isAAPCSVFP(CallInfo, getTarget())) {
llvm::Type *SrcTy =
cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy);
diff --git a/clang/lib/CodeGen/TargetInfo.cpp b/clang/lib/CodeGen/TargetInfo.cpp
index d539ffc5177..64f9e7b544c 100644
--- a/clang/lib/CodeGen/TargetInfo.cpp
+++ b/clang/lib/CodeGen/TargetInfo.cpp
@@ -3796,7 +3796,7 @@ public:
private:
ABIArgInfo classifyReturnType(QualType RetTy, bool isVariadic) const;
- ABIArgInfo classifyArgumentType(QualType RetTy, bool &IsHA, bool isVariadic,
+ ABIArgInfo classifyArgumentType(QualType RetTy, bool isVariadic,
bool &IsCPRC) const;
bool isIllegalVectorType(QualType Ty) const;
@@ -3901,22 +3901,10 @@ void ARMABIInfo::computeInfo(CGFunctionInfo &FI) const {
for (auto &I : FI.arguments()) {
unsigned PreAllocationVFPs = AllocatedVFPs;
unsigned PreAllocationGPRs = AllocatedGPRs;
- bool IsHA = false;
bool IsCPRC = false;
// 6.1.2.3 There is one VFP co-processor register class using registers
// s0-s15 (d0-d7) for passing arguments.
- I.info = classifyArgumentType(I.type, IsHA, FI.isVariadic(), IsCPRC);
- assert((IsCPRC || !IsHA) && "Homogeneous aggregates must be CPRCs");
- // If we do not have enough VFP registers for the HA, any VFP registers
- // that are unallocated are marked as unavailable. To achieve this, we add
- // padding of (NumVFPs - PreAllocationVFP) floats.
- // Note that IsHA will only be set when using the AAPCS-VFP calling convention,
- // and the callee is not variadic.
- if (IsHA && AllocatedVFPs > NumVFPs && PreAllocationVFPs < NumVFPs) {
- llvm::Type *PaddingTy = llvm::ArrayType::get(
- llvm::Type::getFloatTy(getVMContext()), NumVFPs - PreAllocationVFPs);
- I.info = ABIArgInfo::getExpandWithPadding(false, PaddingTy);
- }
+ I.info = classifyArgumentType(I.type, FI.isVariadic(), IsCPRC);
// If we have allocated some arguments onto the stack (due to running
// out of VFP registers), we cannot split an argument between GPRs and
@@ -3930,6 +3918,7 @@ void ARMABIInfo::computeInfo(CGFunctionInfo &FI) const {
llvm::Type::getInt32Ty(getVMContext()), NumGPRs - PreAllocationGPRs);
I.info = ABIArgInfo::getDirect(nullptr /* type */, 0 /* offset */,
PaddingTy);
+
}
}
@@ -4113,8 +4102,7 @@ void ARMABIInfo::resetAllocatedRegs(void) const {
VFPRegs[i] = 0;
}
-ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty, bool &IsHA,
- bool isVariadic,
+ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty, bool isVariadic,
bool &IsCPRC) const {
// We update number of allocated VFPs according to
// 6.1.2.1 The following argument types are VFP CPRCs:
@@ -4226,9 +4214,8 @@ ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty, bool &IsHA,
Base->isSpecificBuiltinType(BuiltinType::LongDouble));
markAllocatedVFPs(2, Members * 2);
}
- IsHA = true;
IsCPRC = true;
- return ABIArgInfo::getExpand();
+ return ABIArgInfo::getDirect();
}
}
@@ -4242,7 +4229,7 @@ ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty, bool &IsHA,
getABIKind() == ARMABIInfo::AAPCS)
ABIAlign = std::min(std::max(TyAlign, (uint64_t)4), (uint64_t)8);
if (getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(64)) {
- // Update Allocated GPRs
+ // Update Allocated GPRs
markAllocatedGPRs(1, 1);
return ABIArgInfo::getIndirect(TyAlign, /*ByVal=*/true,
/*Realign=*/TyAlign > ABIAlign);
OpenPOWER on IntegriCloud