summaryrefslogtreecommitdiffstats
path: root/clang/lib
diff options
context:
space:
mode:
authorChris Lattner <sabre@nondot.org>2010-07-30 04:02:24 +0000
committerChris Lattner <sabre@nondot.org>2010-07-30 04:02:24 +0000
commit8a2f3c778e9dc3aa5d49b3c97f7bc247b004ea3a (patch)
tree6f61cf6f651fab7c5ac4327994f57a60016459d6 /clang/lib
parentb8cba97cde9dfd0a5fa7ac9893ae96d075fcfe88 (diff)
downloadbcm5719-llvm-8a2f3c778e9dc3aa5d49b3c97f7bc247b004ea3a.tar.gz
bcm5719-llvm-8a2f3c778e9dc3aa5d49b3c97f7bc247b004ea3a.zip
fix PR5179 and correctly fix PR5831 to not miscompile.
The X86-64 ABI code didn't handle the case when a struct would get classified and turn up as "NoClass INTEGER" for example. This is perfectly possible when the first slot is all padding (e.g. due to empty base classes). In this situation, the first 8-byte doesn't take a register at all, only the second 8-byte does. This fixes this by enhancing the x86-64 abi stuff to allow and handle this case, reverts the broken fix for PR5831, and enhances the target independent stuff to be able to handle an argument value in registers being accessed at an offset from the memory value. This is the last x86-64 calling convention related miscompile that I'm aware of. llvm-svn: 109848
Diffstat (limited to 'clang/lib')
-rw-r--r--clang/lib/CodeGen/ABIInfo.h55
-rw-r--r--clang/lib/CodeGen/CGCall.cpp56
-rw-r--r--clang/lib/CodeGen/TargetInfo.cpp42
3 files changed, 111 insertions, 42 deletions
diff --git a/clang/lib/CodeGen/ABIInfo.h b/clang/lib/CodeGen/ABIInfo.h
index c059cb8baab..91b7742557f 100644
--- a/clang/lib/CodeGen/ABIInfo.h
+++ b/clang/lib/CodeGen/ABIInfo.h
@@ -38,26 +38,30 @@ namespace clang {
class ABIArgInfo {
public:
enum Kind {
- Direct, /// Pass the argument directly using the normal converted LLVM
- /// type, or by coercing to another specified type
- /// (stored in 'CoerceToType').
-
- Extend, /// Valid only for integer argument types. Same as 'direct'
- /// but also emit a zero/sign extension attribute.
-
- Indirect, /// Pass the argument indirectly via a hidden pointer
- /// with the specified alignment (0 indicates default
- /// alignment).
-
- Ignore, /// Ignore the argument (treat as void). Useful for
- /// void and empty structs.
-
- Expand, /// Only valid for aggregate argument types. The
- /// structure should be expanded into consecutive
- /// arguments for its constituent fields. Currently
- /// expand is only allowed on structures whose fields
- /// are all scalar types or are themselves expandable
- /// types.
+ /// Direct - Pass the argument directly using the normal converted LLVM
+ /// type, or by coercing to another specified type stored in
+ /// 'CoerceToType'). If an offset is specified (in UIntData), then the
+ /// argument passed is offset by some number of bytes in the memory
+ /// representation.
+ Direct,
+
+ /// Extend - Valid only for integer argument types. Same as 'direct'
+ /// but also emit a zero/sign extension attribute.
+ Extend,
+
+ /// Indirect - Pass the argument indirectly via a hidden pointer
+ /// with the specified alignment (0 indicates default alignment).
+ Indirect,
+
+ /// Ignore - Ignore the argument (treat as void). Useful for void and
+ /// empty structs.
+ Ignore,
+
+ /// Expand - Only valid for aggregate argument types. The structure should
+ /// be expanded into consecutive arguments for its constituent fields.
+ /// Currently expand is only allowed on structures whose fields
+ /// are all scalar types or are themselves expandable types.
+ Expand,
KindFirst=Direct, KindLast=Expand
};
@@ -75,11 +79,11 @@ namespace clang {
public:
ABIArgInfo() : TheKind(Direct), TypeData(0), UIntData(0) {}
- static ABIArgInfo getDirect(const llvm::Type *T = 0) {
- return ABIArgInfo(Direct, T);
+ static ABIArgInfo getDirect(const llvm::Type *T = 0, unsigned Offset = 0) {
+ return ABIArgInfo(Direct, T, Offset);
}
static ABIArgInfo getExtend(const llvm::Type *T = 0) {
- return ABIArgInfo(Extend, T);
+ return ABIArgInfo(Extend, T, 0);
}
static ABIArgInfo getIgnore() {
return ABIArgInfo(Ignore);
@@ -103,6 +107,10 @@ namespace clang {
}
// Direct/Extend accessors
+ unsigned getDirectOffset() const {
+ assert((isDirect() || isExtend()) && "Not a direct or extend kind");
+ return UIntData;
+ }
const llvm::Type *getCoerceToType() const {
assert(canHaveCoerceToType() && "Invalid kind!");
return TypeData;
@@ -112,6 +120,7 @@ namespace clang {
assert(canHaveCoerceToType() && "Invalid kind!");
TypeData = T;
}
+
// Indirect accessors
unsigned getIndirectAlign() const {
assert(TheKind == Indirect && "Invalid kind!");
diff --git a/clang/lib/CodeGen/CGCall.cpp b/clang/lib/CodeGen/CGCall.cpp
index 94a2ae9ee65..2ba44714c71 100644
--- a/clang/lib/CodeGen/CGCall.cpp
+++ b/clang/lib/CodeGen/CGCall.cpp
@@ -869,7 +869,8 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
case ABIArgInfo::Direct: {
// If we have the trivial case, handle it with no muss and fuss.
if (!isa<llvm::StructType>(ArgI.getCoerceToType()) &&
- ArgI.getCoerceToType() == ConvertType(Ty)) {
+ ArgI.getCoerceToType() == ConvertType(Ty) &&
+ ArgI.getDirectOffset() == 0) {
assert(AI != Fn->arg_end() && "Argument mismatch!");
llvm::Value *V = AI;
@@ -896,13 +897,21 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
Alloca->setAlignment(AlignmentToUse);
llvm::Value *V = Alloca;
+ llvm::Value *Ptr = V; // Pointer to store into.
+
+ // If the value is offset in memory, apply the offset now.
+ if (unsigned Offs = ArgI.getDirectOffset()) {
+ Ptr = Builder.CreateBitCast(Ptr, Builder.getInt8PtrTy());
+ Ptr = Builder.CreateConstGEP1_32(Ptr, Offs);
+ Ptr = Builder.CreateBitCast(Ptr,
+ llvm::PointerType::getUnqual(ArgI.getCoerceToType()));
+ }
// If the coerce-to type is a first class aggregate, we flatten it and
// pass the elements. Either way is semantically identical, but fast-isel
// and the optimizer generally likes scalar values better than FCAs.
if (const llvm::StructType *STy =
dyn_cast<llvm::StructType>(ArgI.getCoerceToType())) {
- llvm::Value *Ptr = V;
Ptr = Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(STy));
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
@@ -915,7 +924,7 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
// Simple case, just do a coerced store of the argument into the alloca.
assert(AI != Fn->arg_end() && "Argument mismatch!");
AI->setName(Arg->getName() + ".coerce");
- CreateCoercedStore(AI++, V, /*DestIsVolatile=*/false, *this);
+ CreateCoercedStore(AI++, Ptr, /*DestIsVolatile=*/false, *this);
}
@@ -992,8 +1001,8 @@ void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI) {
case ABIArgInfo::Extend:
case ABIArgInfo::Direct:
-
- if (RetAI.getCoerceToType() == ConvertType(RetTy)) {
+ if (RetAI.getCoerceToType() == ConvertType(RetTy) &&
+ RetAI.getDirectOffset() == 0) {
// The internal return value temp always will have pointer-to-return-type
// type, just do a load.
@@ -1019,7 +1028,16 @@ void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI) {
}
}
} else {
- RV = CreateCoercedLoad(ReturnValue, RetAI.getCoerceToType(), *this);
+ llvm::Value *V = ReturnValue;
+ // If the value is offset in memory, apply the offset now.
+ if (unsigned Offs = RetAI.getDirectOffset()) {
+ V = Builder.CreateBitCast(V, Builder.getInt8PtrTy());
+ V = Builder.CreateConstGEP1_32(V, Offs);
+ V = Builder.CreateBitCast(V,
+ llvm::PointerType::getUnqual(RetAI.getCoerceToType()));
+ }
+
+ RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this);
}
break;
@@ -1142,7 +1160,8 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
case ABIArgInfo::Extend:
case ABIArgInfo::Direct: {
if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) &&
- ArgInfo.getCoerceToType() == ConvertType(info_it->type)) {
+ ArgInfo.getCoerceToType() == ConvertType(info_it->type) &&
+ ArgInfo.getDirectOffset() == 0) {
if (RV.isScalar())
Args.push_back(RV.getScalarVal());
else
@@ -1161,6 +1180,15 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
} else
SrcPtr = RV.getAggregateAddr();
+ // If the value is offset in memory, apply the offset now.
+ if (unsigned Offs = ArgInfo.getDirectOffset()) {
+ SrcPtr = Builder.CreateBitCast(SrcPtr, Builder.getInt8PtrTy());
+ SrcPtr = Builder.CreateConstGEP1_32(SrcPtr, Offs);
+ SrcPtr = Builder.CreateBitCast(SrcPtr,
+ llvm::PointerType::getUnqual(ArgInfo.getCoerceToType()));
+
+ }
+
// If the coerce-to type is a first class aggregate, we flatten it and
// pass the elements. Either way is semantically identical, but fast-isel
// and the optimizer generally likes scalar values better than FCAs.
@@ -1280,7 +1308,8 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
case ABIArgInfo::Extend:
case ABIArgInfo::Direct: {
- if (RetAI.getCoerceToType() == ConvertType(RetTy)) {
+ if (RetAI.getCoerceToType() == ConvertType(RetTy) &&
+ RetAI.getDirectOffset() == 0) {
if (RetTy->isAnyComplexType()) {
llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
@@ -1308,7 +1337,16 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
DestIsVolatile = false;
}
- CreateCoercedStore(CI, DestPtr, DestIsVolatile, *this);
+ // If the value is offset in memory, apply the offset now.
+ llvm::Value *StorePtr = DestPtr;
+ if (unsigned Offs = RetAI.getDirectOffset()) {
+ StorePtr = Builder.CreateBitCast(StorePtr, Builder.getInt8PtrTy());
+ StorePtr = Builder.CreateConstGEP1_32(StorePtr, Offs);
+ StorePtr = Builder.CreateBitCast(StorePtr,
+ llvm::PointerType::getUnqual(RetAI.getCoerceToType()));
+ }
+ CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this);
+
if (RetTy->isAnyComplexType())
return RValue::getComplex(LoadComplexFromAddr(DestPtr, false));
if (CodeGenFunction::hasAggregateLLVMType(RetTy))
diff --git a/clang/lib/CodeGen/TargetInfo.cpp b/clang/lib/CodeGen/TargetInfo.cpp
index be2e9540024..5ea77be07b7 100644
--- a/clang/lib/CodeGen/TargetInfo.cpp
+++ b/clang/lib/CodeGen/TargetInfo.cpp
@@ -1015,11 +1015,6 @@ void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
if (Lo == Memory || Hi == Memory)
break;
}
-
- // If this record has no fields, no bases, no vtable, but isn't empty,
- // classify as INTEGER.
- if (CXXRD->isEmpty() && Size)
- Current = Integer;
}
// Classify the fields one at a time, merging the results.
@@ -1387,13 +1382,18 @@ classifyReturnType(QualType RetTy) const {
// Check some invariants.
assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
- assert((Lo != NoClass || Hi == NoClass) && "Invalid null classification.");
assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
const llvm::Type *ResType = 0;
switch (Lo) {
case NoClass:
- return ABIArgInfo::getIgnore();
+ if (Hi == NoClass)
+ return ABIArgInfo::getIgnore();
+ // If the low part is just padding, it takes no register, leave ResType
+ // null.
+ assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&
+ "Unknown missing lo part");
+ break;
case SSEUp:
case X87Up:
@@ -1461,12 +1461,18 @@ classifyReturnType(QualType RetTy) const {
case Integer: {
const llvm::Type *HiType =
GetINTEGERTypeAtOffset(CGT.ConvertTypeRecursive(RetTy), 8, RetTy, 8);
+ if (Lo == NoClass) // Return HiType at offset 8 in memory.
+ return ABIArgInfo::getDirect(HiType, 8);
+
ResType = llvm::StructType::get(getVMContext(), ResType, HiType, NULL);
break;
}
case SSE: {
const llvm::Type *HiType =
GetSSETypeAtOffset(CGT.ConvertTypeRecursive(RetTy), 8, RetTy, 8);
+ if (Lo == NoClass) // Return HiType at offset 8 in memory.
+ return ABIArgInfo::getDirect(HiType, 8);
+
ResType = llvm::StructType::get(getVMContext(), ResType, HiType,NULL);
break;
}
@@ -1490,6 +1496,9 @@ classifyReturnType(QualType RetTy) const {
if (Lo != X87) {
const llvm::Type *HiType =
GetSSETypeAtOffset(CGT.ConvertTypeRecursive(RetTy), 8, RetTy, 8);
+ if (Lo == NoClass) // Return HiType at offset 8 in memory.
+ return ABIArgInfo::getDirect(HiType, 8);
+
ResType = llvm::StructType::get(getVMContext(), ResType, HiType, NULL);
}
break;
@@ -1506,7 +1515,6 @@ ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, unsigned &neededInt,
// Check some invariants.
// FIXME: Enforce these by construction.
assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
- assert((Lo != NoClass || Hi == NoClass) && "Invalid null classification.");
assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
neededInt = 0;
@@ -1514,8 +1522,14 @@ ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, unsigned &neededInt,
const llvm::Type *ResType = 0;
switch (Lo) {
case NoClass:
- return ABIArgInfo::getIgnore();
-
+ if (Hi == NoClass)
+ return ABIArgInfo::getIgnore();
+ // If the low part is just padding, it takes no register, leave ResType
+ // null.
+ assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&
+ "Unknown missing lo part");
+ break;
+
// AMD64-ABI 3.2.3p3: Rule 1. If the class is MEMORY, pass the argument
// on the stack.
case Memory:
@@ -1579,6 +1593,10 @@ ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, unsigned &neededInt,
// Pick an 8-byte type based on the preferred type.
const llvm::Type *HiType =
GetINTEGERTypeAtOffset(CGT.ConvertTypeRecursive(Ty), 8, Ty, 8);
+
+ if (Lo == NoClass) // Pass HiType at offset 8 in memory.
+ return ABIArgInfo::getDirect(HiType, 8);
+
ResType = llvm::StructType::get(getVMContext(), ResType, HiType, NULL);
break;
}
@@ -1589,6 +1607,10 @@ ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, unsigned &neededInt,
case SSE: {
const llvm::Type *HiType =
GetSSETypeAtOffset(CGT.ConvertTypeRecursive(Ty), 8, Ty, 8);
+
+ if (Lo == NoClass) // Pass HiType at offset 8 in memory.
+ return ABIArgInfo::getDirect(HiType, 8);
+
ResType = llvm::StructType::get(getVMContext(), ResType, HiType, NULL);
++neededSSE;
break;
OpenPOWER on IntegriCloud