summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--clang/include/clang/AST/CharUnits.h20
-rw-r--r--clang/include/clang/CodeGen/CGFunctionInfo.h24
-rw-r--r--clang/lib/CodeGen/ABIInfo.h16
-rw-r--r--clang/lib/CodeGen/Address.h119
-rw-r--r--clang/lib/CodeGen/CGAtomic.cpp513
-rw-r--r--clang/lib/CodeGen/CGBlocks.cpp777
-rw-r--r--clang/lib/CodeGen/CGBlocks.h61
-rw-r--r--clang/lib/CodeGen/CGBuilder.h255
-rw-r--r--clang/lib/CodeGen/CGBuiltin.cpp337
-rw-r--r--clang/lib/CodeGen/CGCUDANV.cpp22
-rw-r--r--clang/lib/CodeGen/CGCXX.cpp3
-rw-r--r--clang/lib/CodeGen/CGCXXABI.cpp53
-rw-r--r--clang/lib/CodeGen/CGCXXABI.h60
-rw-r--r--clang/lib/CodeGen/CGCall.cpp612
-rw-r--r--clang/lib/CodeGen/CGCall.h25
-rw-r--r--clang/lib/CodeGen/CGClass.cpp365
-rw-r--r--clang/lib/CodeGen/CGCleanup.cpp137
-rw-r--r--clang/lib/CodeGen/CGCleanup.h12
-rw-r--r--clang/lib/CodeGen/CGDecl.cpp278
-rw-r--r--clang/lib/CodeGen/CGDeclCXX.cpp31
-rw-r--r--clang/lib/CodeGen/CGException.cpp73
-rw-r--r--clang/lib/CodeGen/CGExpr.cpp1030
-rw-r--r--clang/lib/CodeGen/CGExprAgg.cpp179
-rw-r--r--clang/lib/CodeGen/CGExprCXX.cpp289
-rw-r--r--clang/lib/CodeGen/CGExprComplex.cpp75
-rw-r--r--clang/lib/CodeGen/CGExprConstant.cpp50
-rw-r--r--clang/lib/CodeGen/CGExprScalar.cpp119
-rw-r--r--clang/lib/CodeGen/CGObjC.cpp207
-rw-r--r--clang/lib/CodeGen/CGObjCGNU.cpp252
-rw-r--r--clang/lib/CodeGen/CGObjCMac.cpp381
-rw-r--r--clang/lib/CodeGen/CGObjCRuntime.cpp11
-rw-r--r--clang/lib/CodeGen/CGObjCRuntime.h29
-rw-r--r--clang/lib/CodeGen/CGOpenMPRuntime.cpp537
-rw-r--r--clang/lib/CodeGen/CGOpenMPRuntime.h47
-rw-r--r--clang/lib/CodeGen/CGStmt.cpp46
-rw-r--r--clang/lib/CodeGen/CGStmtOpenMP.cpp217
-rw-r--r--clang/lib/CodeGen/CGVTables.cpp36
-rw-r--r--clang/lib/CodeGen/CGValue.h175
-rw-r--r--clang/lib/CodeGen/CodeGenFunction.cpp173
-rw-r--r--clang/lib/CodeGen/CodeGenFunction.h543
-rw-r--r--clang/lib/CodeGen/CodeGenModule.cpp94
-rw-r--r--clang/lib/CodeGen/CodeGenModule.h118
-rw-r--r--clang/lib/CodeGen/CodeGenTypeCache.h108
-rw-r--r--clang/lib/CodeGen/ItaniumCXXABI.cpp368
-rw-r--r--clang/lib/CodeGen/MicrosoftCXXABI.cpp406
-rw-r--r--clang/lib/CodeGen/TargetInfo.cpp1528
-rw-r--r--clang/test/CodeGen/aarch64-varargs.c57
-rw-r--r--clang/test/CodeGen/arm-abi-vector.c122
-rw-r--r--clang/test/CodeGen/arm-arguments.c12
-rw-r--r--clang/test/CodeGen/arm64-abi-vector.c68
-rw-r--r--clang/test/CodeGen/arm64-arguments.c56
-rw-r--r--clang/test/CodeGen/arm64-be-hfa-vararg.c10
-rw-r--r--clang/test/CodeGen/atomic-arm64.c12
-rw-r--r--clang/test/CodeGen/block-byref-aggr.c4
-rw-r--r--clang/test/CodeGen/c11atomics-ios.c18
-rw-r--r--clang/test/CodeGen/c11atomics.c16
-rw-r--r--clang/test/CodeGen/catch-undef-behavior.c2
-rw-r--r--clang/test/CodeGen/exprs.c7
-rw-r--r--clang/test/CodeGen/ext-vector-member-alignment.c14
-rw-r--r--clang/test/CodeGen/mips-varargs.c210
-rw-r--r--clang/test/CodeGen/object-size.c4
-rw-r--r--clang/test/CodeGen/packed-arrays.c6
-rw-r--r--clang/test/CodeGen/packed-structure.c4
-rw-r--r--clang/test/CodeGen/ppc-varargs-struct.c128
-rw-r--r--clang/test/CodeGen/ppc64-align-struct.c62
-rw-r--r--clang/test/CodeGen/ppc64-complex-parms.c60
-rw-r--r--clang/test/CodeGen/ppc64-struct-onefloat.c24
-rw-r--r--clang/test/CodeGen/ppc64-varargs-complex.c58
-rw-r--r--clang/test/CodeGen/ppc64le-varargs-complex.c50
-rw-r--r--clang/test/CodeGen/sparcv9-abi.c12
-rw-r--r--clang/test/CodeGen/tbaa-class.cpp60
-rw-r--r--clang/test/CodeGen/tbaa.cpp76
-rw-r--r--clang/test/CodeGen/vectorcall.c8
-rw-r--r--clang/test/CodeGen/xcore-abi.c14
-rw-r--r--clang/test/CodeGenCXX/alignment.cpp297
-rw-r--r--clang/test/CodeGenCXX/arm.cpp26
-rw-r--r--clang/test/CodeGenCXX/cxx0x-initializer-stdinitializerlist.cpp2
-rw-r--r--clang/test/CodeGenCXX/cxx11-initializer-array-new.cpp10
-rw-r--r--clang/test/CodeGenCXX/delete-two-arg.cpp4
-rw-r--r--clang/test/CodeGenCXX/lambda-expressions.cpp2
-rw-r--r--clang/test/CodeGenCXX/microsoft-abi-array-cookies.cpp8
-rw-r--r--clang/test/CodeGenCXX/microsoft-abi-eh-cleanups.cpp2
-rw-r--r--clang/test/CodeGenCXX/microsoft-abi-structors.cpp16
-rw-r--r--clang/test/CodeGenCXX/microsoft-abi-try-throw.cpp2
-rw-r--r--clang/test/CodeGenCXX/microsoft-abi-virtual-inheritance-vtordisps.cpp6
-rw-r--r--clang/test/CodeGenCXX/microsoft-abi-virtual-inheritance.cpp4
-rw-r--r--clang/test/CodeGenCXX/static-init-wasm.cpp4
-rw-r--r--clang/test/CodeGenCXX/static-init.cpp6
-rw-r--r--clang/test/CodeGenCXX/vararg-non-pod-ms-compat.cpp2
-rw-r--r--clang/test/CodeGenCXX/wasm-args-returns.cpp12
-rw-r--r--clang/test/CodeGenObjC/arc-captured-32bit-block-var-layout-2.m13
-rw-r--r--clang/test/CodeGenObjC/arc-captured-32bit-block-var-layout.m32
-rw-r--r--clang/test/CodeGenObjC/arc-captured-block-var-inlined-layout.m34
-rw-r--r--clang/test/CodeGenObjC/arc-captured-block-var-layout.m32
-rw-r--r--clang/test/CodeGenObjC/arc-literals.m14
-rw-r--r--clang/test/CodeGenObjC/arc.m6
-rw-r--r--clang/test/CodeGenObjC/debug-info-block-captured-self.m6
-rw-r--r--clang/test/CodeGenObjC/ivar-base-as-invariant-load.m6
-rw-r--r--clang/test/CodeGenObjC/ivar-invariant.m8
-rw-r--r--clang/test/CodeGenObjC/mrr-captured-block-var-inlined-layout.m19
-rw-r--r--clang/test/CodeGenObjC/selector-ref-invariance.m2
-rw-r--r--clang/test/CodeGenObjCXX/arc-new-delete.mm27
-rw-r--r--clang/test/CodeGenObjCXX/literals.mm8
-rw-r--r--clang/test/CodeGenObjCXX/property-lvalue-capture.mm6
-rw-r--r--clang/test/CodeGenObjCXX/property-object-conditional-exp.mm4
-rw-r--r--clang/test/OpenMP/for_reduction_codegen.cpp52
-rw-r--r--clang/test/OpenMP/parallel_codegen.cpp4
-rw-r--r--clang/test/OpenMP/parallel_reduction_codegen.cpp52
-rw-r--r--clang/test/OpenMP/sections_reduction_codegen.cpp28
-rw-r--r--clang/test/OpenMP/task_codegen.cpp76
110 files changed, 7234 insertions, 5583 deletions
diff --git a/clang/include/clang/AST/CharUnits.h b/clang/include/clang/AST/CharUnits.h
index 72ca9f5cd67..1d22bccd2e8 100644
--- a/clang/include/clang/AST/CharUnits.h
+++ b/clang/include/clang/AST/CharUnits.h
@@ -130,6 +130,14 @@ namespace clang {
return (Quantity & -Quantity) == Quantity;
}
+ /// Test whether this is a multiple of the other value.
+ ///
+ /// Among other things, this promises that
+ /// self.RoundUpToAlignment(N) will just return self.
+ bool isMultipleOf(CharUnits N) const {
+ return (*this % N) == 0;
+ }
+
// Arithmetic operators.
CharUnits operator* (QuantityType N) const {
return CharUnits(Quantity * N);
@@ -172,10 +180,20 @@ namespace clang {
/// Given that this is a non-zero alignment value, what is the
/// alignment at the given offset?
- CharUnits alignmentAtOffset(CharUnits offset) {
+ CharUnits alignmentAtOffset(CharUnits offset) const {
+ assert(Quantity != 0 && "offsetting from unknown alignment?");
return CharUnits(llvm::MinAlign(Quantity, offset.Quantity));
}
+ /// Given that this is the alignment of the first element of an
+ /// array, return the minimum alignment of any element in the array.
+ CharUnits alignmentOfArrayElement(CharUnits elementSize) const {
+ // Since we don't track offsetted alignments, the alignment of
+ // the second element (or any odd element) will be minimally
+ // aligned.
+ return alignmentAtOffset(elementSize);
+ }
+
}; // class CharUnit
} // namespace clang
diff --git a/clang/include/clang/CodeGen/CGFunctionInfo.h b/clang/include/clang/CodeGen/CGFunctionInfo.h
index e32fb145856..a3ce8c2b9ff 100644
--- a/clang/include/clang/CodeGen/CGFunctionInfo.h
+++ b/clang/include/clang/CodeGen/CGFunctionInfo.h
@@ -17,6 +17,7 @@
#define LLVM_CLANG_CODEGEN_CGFUNCTIONINFO_H
#include "clang/AST/CanonicalType.h"
+#include "clang/AST/CharUnits.h"
#include "clang/AST/Type.h"
#include "llvm/ADT/FoldingSet.h"
#include <cassert>
@@ -126,7 +127,7 @@ public:
static ABIArgInfo getIgnore() {
return ABIArgInfo(Ignore);
}
- static ABIArgInfo getIndirect(unsigned Alignment, bool ByVal = true,
+ static ABIArgInfo getIndirect(CharUnits Alignment, bool ByVal = true,
bool Realign = false,
llvm::Type *Padding = nullptr) {
auto AI = ABIArgInfo(Indirect);
@@ -137,7 +138,7 @@ public:
AI.setPaddingType(Padding);
return AI;
}
- static ABIArgInfo getIndirectInReg(unsigned Alignment, bool ByVal = true,
+ static ABIArgInfo getIndirectInReg(CharUnits Alignment, bool ByVal = true,
bool Realign = false) {
auto AI = getIndirect(Alignment, ByVal, Realign);
AI.setInReg(true);
@@ -211,20 +212,20 @@ public:
}
// Indirect accessors
- unsigned getIndirectAlign() const {
+ CharUnits getIndirectAlign() const {
assert(isIndirect() && "Invalid kind!");
- return IndirectAlign;
+ return CharUnits::fromQuantity(IndirectAlign);
}
- void setIndirectAlign(unsigned IA) {
+ void setIndirectAlign(CharUnits IA) {
assert(isIndirect() && "Invalid kind!");
- IndirectAlign = IA;
+ IndirectAlign = IA.getQuantity();
}
bool getIndirectByVal() const {
assert(isIndirect() && "Invalid kind!");
return IndirectByVal;
}
- void setIndirectByVal(unsigned IBV) {
+ void setIndirectByVal(bool IBV) {
assert(isIndirect() && "Invalid kind!");
IndirectByVal = IBV;
}
@@ -370,6 +371,7 @@ class CGFunctionInfo : public llvm::FoldingSetNode {
/// The struct representing all arguments passed in memory. Only used when
/// passing non-trivial types with inalloca. Not part of the profile.
llvm::StructType *ArgStruct;
+ unsigned ArgStructAlign;
unsigned NumArgs;
ArgInfo *getArgsBuffer() {
@@ -463,7 +465,13 @@ public:
/// \brief Get the struct type used to represent all the arguments in memory.
llvm::StructType *getArgStruct() const { return ArgStruct; }
- void setArgStruct(llvm::StructType *Ty) { ArgStruct = Ty; }
+ CharUnits getArgStructAlignment() const {
+ return CharUnits::fromQuantity(ArgStructAlign);
+ }
+ void setArgStruct(llvm::StructType *Ty, CharUnits Align) {
+ ArgStruct = Ty;
+ ArgStructAlign = Align.getQuantity();
+ }
void Profile(llvm::FoldingSetNodeID &ID) {
ID.AddInteger(getASTCallingConvention());
diff --git a/clang/lib/CodeGen/ABIInfo.h b/clang/lib/CodeGen/ABIInfo.h
index cc8652e169d..ece36e8879c 100644
--- a/clang/lib/CodeGen/ABIInfo.h
+++ b/clang/lib/CodeGen/ABIInfo.h
@@ -25,6 +25,8 @@ namespace clang {
class TargetInfo;
namespace CodeGen {
+ class ABIArgInfo;
+ class Address;
class CGCXXABI;
class CGFunctionInfo;
class CodeGenFunction;
@@ -79,8 +81,9 @@ namespace clang {
// the ABI information any lower than CodeGen. Of course, for
// VAArg handling it has to be at this level; there is no way to
// abstract this out.
- virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
- CodeGen::CodeGenFunction &CGF) const = 0;
+ virtual CodeGen::Address EmitVAArg(CodeGen::CodeGenFunction &CGF,
+ CodeGen::Address VAListAddr,
+ QualType Ty) const = 0;
virtual bool isHomogeneousAggregateBaseType(QualType Ty) const;
@@ -92,6 +95,15 @@ namespace clang {
bool isHomogeneousAggregate(QualType Ty, const Type *&Base,
uint64_t &Members) const;
+ /// A convenience method to return an indirect ABIArgInfo with an
+ /// expected alignment equal to the ABI alignment of the given type.
+ CodeGen::ABIArgInfo
+ getNaturalAlignIndirect(QualType Ty, bool ByRef = true,
+ bool Realign = false,
+ llvm::Type *Padding = nullptr) const;
+
+ CodeGen::ABIArgInfo
+ getNaturalAlignIndirectInReg(QualType Ty, bool Realign = false) const;
};
} // end namespace clang
diff --git a/clang/lib/CodeGen/Address.h b/clang/lib/CodeGen/Address.h
new file mode 100644
index 00000000000..b1aa6307e36
--- /dev/null
+++ b/clang/lib/CodeGen/Address.h
@@ -0,0 +1,119 @@
+//===-- Address.h - An aligned address -------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This class provides a simple wrapper for a pair of a pointer and an
+// alignment.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_CODEGEN_ADDRESS_H
+#define LLVM_CLANG_LIB_CODEGEN_ADDRESS_H
+
+#include "llvm/IR/Constants.h"
+#include "clang/AST/CharUnits.h"
+
+namespace clang {
+namespace CodeGen {
+
+/// An aligned address.
+class Address {
+ llvm::Value *Pointer;
+ CharUnits Alignment;
+public:
+ Address(llvm::Value *pointer, CharUnits alignment)
+ : Pointer(pointer), Alignment(alignment) {
+ assert((!alignment.isZero() || pointer == nullptr) &&
+ "creating valid address with invalid alignment");
+ }
+
+ static Address invalid() { return Address(nullptr, CharUnits()); }
+ bool isValid() const { return Pointer != nullptr; }
+
+ llvm::Value *getPointer() const {
+ assert(isValid());
+ return Pointer;
+ }
+
+ /// Return the type of the pointer value.
+ llvm::PointerType *getType() const {
+ return llvm::cast<llvm::PointerType>(getPointer()->getType());
+ }
+
+ /// Return the type of the values stored in this address.
+ ///
+ /// When IR pointer types lose their element type, we should simply
+ /// store it in Address instead for the convenience of writing code.
+ llvm::Type *getElementType() const {
+ return getType()->getElementType();
+ }
+
+ /// Return the address space that this address resides in.
+ unsigned getAddressSpace() const {
+ return getType()->getAddressSpace();
+ }
+
+ /// Return the IR name of the pointer value.
+ llvm::StringRef getName() const {
+ return getPointer()->getName();
+ }
+
+ /// Return the alignment of this pointer.
+ CharUnits getAlignment() const {
+ assert(isValid());
+ return Alignment;
+ }
+};
+
+/// A specialization of Address that requires the address to be an
+/// LLVM Constant.
+class ConstantAddress : public Address {
+public:
+ ConstantAddress(llvm::Constant *pointer, CharUnits alignment)
+ : Address(pointer, alignment) {}
+
+ static ConstantAddress invalid() {
+ return ConstantAddress(nullptr, CharUnits());
+ }
+
+ llvm::Constant *getPointer() const {
+ return llvm::cast<llvm::Constant>(Address::getPointer());
+ }
+
+ ConstantAddress getBitCast(llvm::Type *ty) const {
+ return ConstantAddress(llvm::ConstantExpr::getBitCast(getPointer(), ty),
+ getAlignment());
+ }
+
+ ConstantAddress getElementBitCast(llvm::Type *ty) const {
+ return getBitCast(ty->getPointerTo(getAddressSpace()));
+ }
+
+ static bool isaImpl(Address addr) {
+ return llvm::isa<llvm::Constant>(addr.getPointer());
+ }
+ static ConstantAddress castImpl(Address addr) {
+ return ConstantAddress(llvm::cast<llvm::Constant>(addr.getPointer()),
+ addr.getAlignment());
+ }
+};
+
+}
+}
+
+namespace llvm {
+ // Present a minimal LLVM-like casting interface.
+ template <class U> inline U cast(clang::CodeGen::Address addr) {
+ return U::castImpl(addr);
+ }
+ template <class U> inline bool isa(clang::CodeGen::Address addr) {
+ return U::isaImpl(addr);
+ }
+}
+
+#endif
diff --git a/clang/lib/CodeGen/CGAtomic.cpp b/clang/lib/CodeGen/CGAtomic.cpp
index fc4b66bbbbc..4e457fd43f8 100644
--- a/clang/lib/CodeGen/CGAtomic.cpp
+++ b/clang/lib/CodeGen/CGAtomic.cpp
@@ -80,7 +80,7 @@ namespace {
AtomicSizeInBits = C.toBits(
C.toCharUnitsFromBits(Offset + OrigBFI.Size + C.getCharWidth() - 1)
.RoundUpToAlignment(lvalue.getAlignment()));
- auto VoidPtrAddr = CGF.EmitCastToVoidPtr(lvalue.getBitFieldAddr());
+ auto VoidPtrAddr = CGF.EmitCastToVoidPtr(lvalue.getBitFieldPointer());
auto OffsetInChars =
(C.toCharUnitsFromBits(OrigBFI.Offset) / lvalue.getAlignment()) *
lvalue.getAlignment();
@@ -94,8 +94,9 @@ namespace {
BFI.Offset = Offset;
BFI.StorageSize = AtomicSizeInBits;
BFI.StorageOffset += OffsetInChars;
- LVal = LValue::MakeBitfield(Addr, BFI, lvalue.getType(),
- lvalue.getAlignment());
+ LVal = LValue::MakeBitfield(Address(Addr, lvalue.getAlignment()),
+ BFI, lvalue.getType(),
+ lvalue.getAlignmentSource());
LVal.setTBAAInfo(lvalue.getTBAAInfo());
AtomicTy = C.getIntTypeForBitwidth(AtomicSizeInBits, OrigBFI.IsSigned);
if (AtomicTy.isNull()) {
@@ -118,10 +119,8 @@ namespace {
ValueTy = lvalue.getType();
ValueSizeInBits = C.getTypeSize(ValueTy);
AtomicTy = ValueTy = CGF.getContext().getExtVectorType(
- lvalue.getType(), lvalue.getExtVectorAddr()
- ->getType()
- ->getPointerElementType()
- ->getVectorNumElements());
+ lvalue.getType(), lvalue.getExtVectorAddress()
+ .getElementType()->getVectorNumElements());
AtomicSizeInBits = C.getTypeSize(AtomicTy);
AtomicAlign = ValueAlign = lvalue.getAlignment();
LVal = lvalue;
@@ -139,15 +138,22 @@ namespace {
TypeEvaluationKind getEvaluationKind() const { return EvaluationKind; }
bool shouldUseLibcall() const { return UseLibcall; }
const LValue &getAtomicLValue() const { return LVal; }
- llvm::Value *getAtomicAddress() const {
+ llvm::Value *getAtomicPointer() const {
if (LVal.isSimple())
- return LVal.getAddress();
+ return LVal.getPointer();
else if (LVal.isBitField())
- return LVal.getBitFieldAddr();
+ return LVal.getBitFieldPointer();
else if (LVal.isVectorElt())
- return LVal.getVectorAddr();
+ return LVal.getVectorPointer();
assert(LVal.isExtVectorElt());
- return LVal.getExtVectorAddr();
+ return LVal.getExtVectorPointer();
+ }
+ Address getAtomicAddress() const {
+ return Address(getAtomicPointer(), getAtomicAlignment());
+ }
+
+ Address getAtomicAddressAsAtomicIntPointer() const {
+ return emitCastToAtomicIntPointer(getAtomicAddress());
}
/// Is the atomic size larger than the underlying value type?
@@ -169,11 +175,11 @@ namespace {
/// Cast the given pointer to an integer pointer suitable for
/// atomic operations.
- llvm::Value *emitCastToAtomicIntPointer(llvm::Value *addr) const;
+ Address emitCastToAtomicIntPointer(Address addr) const;
/// Turn an atomic-layout object into an r-value.
- RValue convertTempToRValue(llvm::Value *addr, AggValueSlot resultSlot,
- SourceLocation loc, bool AsValue) const;
+ RValue convertAtomicTempToRValue(Address addr, AggValueSlot resultSlot,
+ SourceLocation loc, bool AsValue) const;
/// \brief Converts a rvalue to integer value.
llvm::Value *convertRValueToInt(RValue RVal) const;
@@ -188,12 +194,12 @@ namespace {
/// Project an l-value down to the value field.
LValue projectValue() const {
assert(LVal.isSimple());
- llvm::Value *addr = getAtomicAddress();
+ Address addr = getAtomicAddress();
if (hasPadding())
- addr = CGF.Builder.CreateStructGEP(nullptr, addr, 0);
+ addr = CGF.Builder.CreateStructGEP(addr, 0, CharUnits());
- return LValue::MakeAddr(addr, getValueType(), LVal.getAlignment(),
- CGF.getContext(), LVal.getTBAAInfo());
+ return LValue::MakeAddr(addr, getValueType(), CGF.getContext(),
+ LVal.getAlignmentSource(), LVal.getTBAAInfo());
}
/// \brief Emits atomic load.
@@ -228,7 +234,7 @@ namespace {
bool IsVolatile);
/// Materialize an atomic r-value in atomic-layout memory.
- llvm::Value *materializeRValue(RValue rvalue) const;
+ Address materializeRValue(RValue rvalue) const;
/// \brief Translates LLVM atomic ordering to GNU atomic ordering for
/// libcalls.
@@ -239,7 +245,7 @@ namespace {
bool requiresMemSetZero(llvm::Type *type) const;
/// \brief Creates temp alloca for intermediate operations on atomic value.
- llvm::Value *CreateTempAlloca() const;
+ Address CreateTempAlloca() const;
/// \brief Emits atomic load as a libcall.
void EmitAtomicLoadLibcall(llvm::Value *AddForLoaded,
@@ -294,16 +300,16 @@ AtomicInfo::translateAtomicOrdering(const llvm::AtomicOrdering AO) {
llvm_unreachable("Unhandled AtomicOrdering");
}
-llvm::Value *AtomicInfo::CreateTempAlloca() const {
- auto *TempAlloca = CGF.CreateMemTemp(
+Address AtomicInfo::CreateTempAlloca() const {
+ Address TempAlloca = CGF.CreateMemTemp(
(LVal.isBitField() && ValueSizeInBits > AtomicSizeInBits) ? ValueTy
: AtomicTy,
+ getAtomicAlignment(),
"atomic-temp");
- TempAlloca->setAlignment(getAtomicAlignment().getQuantity());
// Cast to pointer to value type for bitfields.
if (LVal.isBitField())
return CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- TempAlloca, getAtomicAddress()->getType());
+ TempAlloca, getAtomicAddress().getType());
return TempAlloca;
}
@@ -351,7 +357,7 @@ bool AtomicInfo::requiresMemSetZero(llvm::Type *type) const {
bool AtomicInfo::emitMemSetZeroIfNecessary() const {
assert(LVal.isSimple());
- llvm::Value *addr = LVal.getAddress();
+ llvm::Value *addr = LVal.getPointer();
if (!requiresMemSetZero(addr->getType()->getPointerElementType()))
return false;
@@ -363,19 +369,17 @@ bool AtomicInfo::emitMemSetZeroIfNecessary() const {
}
static void emitAtomicCmpXchg(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak,
- llvm::Value *Dest, llvm::Value *Ptr,
- llvm::Value *Val1, llvm::Value *Val2,
- uint64_t Size, unsigned Align,
+ Address Dest, Address Ptr,
+ Address Val1, Address Val2,
+ uint64_t Size,
llvm::AtomicOrdering SuccessOrder,
llvm::AtomicOrdering FailureOrder) {
// Note that cmpxchg doesn't support weak cmpxchg, at least at the moment.
- llvm::LoadInst *Expected = CGF.Builder.CreateLoad(Val1);
- Expected->setAlignment(Align);
- llvm::LoadInst *Desired = CGF.Builder.CreateLoad(Val2);
- Desired->setAlignment(Align);
+ llvm::Value *Expected = CGF.Builder.CreateLoad(Val1);
+ llvm::Value *Desired = CGF.Builder.CreateLoad(Val2);
llvm::AtomicCmpXchgInst *Pair = CGF.Builder.CreateAtomicCmpXchg(
- Ptr, Expected, Desired, SuccessOrder, FailureOrder);
+ Ptr.getPointer(), Expected, Desired, SuccessOrder, FailureOrder);
Pair->setVolatile(E->isVolatile());
Pair->setWeak(IsWeak);
@@ -400,26 +404,24 @@ static void emitAtomicCmpXchg(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak,
CGF.Builder.SetInsertPoint(StoreExpectedBB);
// Update the memory at Expected with Old's value.
- llvm::StoreInst *StoreExpected = CGF.Builder.CreateStore(Old, Val1);
- StoreExpected->setAlignment(Align);
+ CGF.Builder.CreateStore(Old, Val1);
// Finally, branch to the exit point.
CGF.Builder.CreateBr(ContinueBB);
CGF.Builder.SetInsertPoint(ContinueBB);
// Update the memory at Dest with Cmp's value.
CGF.EmitStoreOfScalar(Cmp, CGF.MakeAddrLValue(Dest, E->getType()));
- return;
}
/// Given an ordering required on success, emit all possible cmpxchg
/// instructions to cope with the provided (but possibly only dynamically known)
/// FailureOrder.
static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E,
- bool IsWeak, llvm::Value *Dest,
- llvm::Value *Ptr, llvm::Value *Val1,
- llvm::Value *Val2,
+ bool IsWeak, Address Dest,
+ Address Ptr, Address Val1,
+ Address Val2,
llvm::Value *FailureOrderVal,
- uint64_t Size, unsigned Align,
+ uint64_t Size,
llvm::AtomicOrdering SuccessOrder) {
llvm::AtomicOrdering FailureOrder;
if (llvm::ConstantInt *FO = dyn_cast<llvm::ConstantInt>(FailureOrderVal)) {
@@ -440,7 +442,7 @@ static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E,
FailureOrder =
llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(SuccessOrder);
}
- emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, Align,
+ emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size,
SuccessOrder, FailureOrder);
return;
}
@@ -465,13 +467,13 @@ static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E,
// doesn't fold to a constant for the ordering.
CGF.Builder.SetInsertPoint(MonotonicBB);
emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
- Size, Align, SuccessOrder, llvm::Monotonic);
+ Size, SuccessOrder, llvm::Monotonic);
CGF.Builder.CreateBr(ContBB);
if (AcquireBB) {
CGF.Builder.SetInsertPoint(AcquireBB);
emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
- Size, Align, SuccessOrder, llvm::Acquire);
+ Size, SuccessOrder, llvm::Acquire);
CGF.Builder.CreateBr(ContBB);
SI->addCase(CGF.Builder.getInt32(AtomicExpr::AO_ABI_memory_order_consume),
AcquireBB);
@@ -481,7 +483,7 @@ static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E,
if (SeqCstBB) {
CGF.Builder.SetInsertPoint(SeqCstBB);
emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
- Size, Align, SuccessOrder, llvm::SequentiallyConsistent);
+ Size, SuccessOrder, llvm::SequentiallyConsistent);
CGF.Builder.CreateBr(ContBB);
SI->addCase(CGF.Builder.getInt32(AtomicExpr::AO_ABI_memory_order_seq_cst),
SeqCstBB);
@@ -490,11 +492,10 @@ static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E,
CGF.Builder.SetInsertPoint(ContBB);
}
-static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, llvm::Value *Dest,
- llvm::Value *Ptr, llvm::Value *Val1, llvm::Value *Val2,
+static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest,
+ Address Ptr, Address Val1, Address Val2,
llvm::Value *IsWeak, llvm::Value *FailureOrder,
- uint64_t Size, unsigned Align,
- llvm::AtomicOrdering Order) {
+ uint64_t Size, llvm::AtomicOrdering Order) {
llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;
llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0;
@@ -504,17 +505,17 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, llvm::Value *Dest,
case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,
- FailureOrder, Size, Align, Order);
+ FailureOrder, Size, Order);
return;
case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,
- FailureOrder, Size, Align, Order);
+ FailureOrder, Size, Order);
return;
case AtomicExpr::AO__atomic_compare_exchange:
case AtomicExpr::AO__atomic_compare_exchange_n: {
if (llvm::ConstantInt *IsWeakC = dyn_cast<llvm::ConstantInt>(IsWeak)) {
emitAtomicCmpXchgFailureSet(CGF, E, IsWeakC->getZExtValue(), Dest, Ptr,
- Val1, Val2, FailureOrder, Size, Align, Order);
+ Val1, Val2, FailureOrder, Size, Order);
} else {
// Create all the relevant BB's
llvm::BasicBlock *StrongBB =
@@ -528,12 +529,12 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, llvm::Value *Dest,
CGF.Builder.SetInsertPoint(StrongBB);
emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,
- FailureOrder, Size, Align, Order);
+ FailureOrder, Size, Order);
CGF.Builder.CreateBr(ContBB);
CGF.Builder.SetInsertPoint(WeakBB);
emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,
- FailureOrder, Size, Align, Order);
+ FailureOrder, Size, Order);
CGF.Builder.CreateBr(ContBB);
CGF.Builder.SetInsertPoint(ContBB);
@@ -545,22 +546,18 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, llvm::Value *Dest,
case AtomicExpr::AO__atomic_load: {
llvm::LoadInst *Load = CGF.Builder.CreateLoad(Ptr);
Load->setAtomic(Order);
- Load->setAlignment(Size);
Load->setVolatile(E->isVolatile());
- llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(Load, Dest);
- StoreDest->setAlignment(Align);
+ CGF.Builder.CreateStore(Load, Dest);
return;
}
case AtomicExpr::AO__c11_atomic_store:
case AtomicExpr::AO__atomic_store:
case AtomicExpr::AO__atomic_store_n: {
- assert(!Dest && "Store does not return a value");
- llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
- LoadVal1->setAlignment(Align);
+ assert(!Dest.isValid() && "Store does not return a value");
+ llvm::Value *LoadVal1 = CGF.Builder.CreateLoad(Val1);
llvm::StoreInst *Store = CGF.Builder.CreateStore(LoadVal1, Ptr);
Store->setAtomic(Order);
- Store->setAlignment(Size);
Store->setVolatile(E->isVolatile());
return;
}
@@ -619,10 +616,9 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, llvm::Value *Dest,
break;
}
- llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
- LoadVal1->setAlignment(Align);
+ llvm::Value *LoadVal1 = CGF.Builder.CreateLoad(Val1);
llvm::AtomicRMWInst *RMWI =
- CGF.Builder.CreateAtomicRMW(Op, Ptr, LoadVal1, Order);
+ CGF.Builder.CreateAtomicRMW(Op, Ptr.getPointer(), LoadVal1, Order);
RMWI->setVolatile(E->isVolatile());
// For __atomic_*_fetch operations, perform the operation again to
@@ -632,15 +628,14 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, llvm::Value *Dest,
Result = CGF.Builder.CreateBinOp(PostOp, RMWI, LoadVal1);
if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch)
Result = CGF.Builder.CreateNot(Result);
- llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(Result, Dest);
- StoreDest->setAlignment(Align);
+ CGF.Builder.CreateStore(Result, Dest);
}
// This function emits any expression (scalar, complex, or aggregate)
// into a temporary alloca.
-static llvm::Value *
+static Address
EmitValToTemp(CodeGenFunction &CGF, Expr *E) {
- llvm::Value *DeclPtr = CGF.CreateMemTemp(E->getType(), ".atomictmp");
+ Address DeclPtr = CGF.CreateMemTemp(E->getType(), ".atomictmp");
CGF.EmitAnyExprToMem(E, DeclPtr, E->getType().getQualifiers(),
/*Init*/ true);
return DeclPtr;
@@ -652,14 +647,15 @@ AddDirectArgument(CodeGenFunction &CGF, CallArgList &Args,
SourceLocation Loc, CharUnits SizeInChars) {
if (UseOptimizedLibcall) {
// Load value and pass it to the function directly.
- unsigned Align = CGF.getContext().getTypeAlignInChars(ValTy).getQuantity();
+ CharUnits Align = CGF.getContext().getTypeAlignInChars(ValTy);
int64_t SizeInBits = CGF.getContext().toBits(SizeInChars);
ValTy =
CGF.getContext().getIntTypeForBitwidth(SizeInBits, /*Signed=*/false);
llvm::Type *IPtrTy = llvm::IntegerType::get(CGF.getLLVMContext(),
SizeInBits)->getPointerTo();
- Val = CGF.EmitLoadOfScalar(CGF.Builder.CreateBitCast(Val, IPtrTy), false,
- Align, CGF.getContext().getPointerType(ValTy),
+ Address Ptr = Address(CGF.Builder.CreateBitCast(Val, IPtrTy), Align);
+ Val = CGF.EmitLoadOfScalar(Ptr, false,
+ CGF.getContext().getPointerType(ValTy),
Loc);
// Coerce the value into an appropriately sized integer type.
Args.add(RValue::get(Val), ValTy);
@@ -670,27 +666,27 @@ AddDirectArgument(CodeGenFunction &CGF, CallArgList &Args,
}
}
-RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
+RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, Address Dest) {
QualType AtomicTy = E->getPtr()->getType()->getPointeeType();
QualType MemTy = AtomicTy;
if (const AtomicType *AT = AtomicTy->getAs<AtomicType>())
MemTy = AT->getValueType();
- CharUnits sizeChars = getContext().getTypeSizeInChars(AtomicTy);
+ CharUnits sizeChars, alignChars;
+ std::tie(sizeChars, alignChars) = getContext().getTypeInfoInChars(AtomicTy);
uint64_t Size = sizeChars.getQuantity();
- CharUnits alignChars = getContext().getTypeAlignInChars(AtomicTy);
- unsigned Align = alignChars.getQuantity();
- unsigned MaxInlineWidthInBits =
- getTarget().getMaxAtomicInlineWidth();
- bool UseLibcall = (Size != Align ||
+ unsigned MaxInlineWidthInBits = getTarget().getMaxAtomicInlineWidth();
+ bool UseLibcall = (sizeChars != alignChars ||
getContext().toBits(sizeChars) > MaxInlineWidthInBits);
- llvm::Value *IsWeak = nullptr, *OrderFail = nullptr, *Val1 = nullptr,
- *Val2 = nullptr;
- llvm::Value *Ptr = EmitScalarExpr(E->getPtr());
+ llvm::Value *IsWeak = nullptr, *OrderFail = nullptr;
+
+ Address Val1 = Address::invalid();
+ Address Val2 = Address::invalid();
+ Address Ptr(EmitScalarExpr(E->getPtr()), alignChars);
if (E->getOp() == AtomicExpr::AO__c11_atomic_init) {
- assert(!Dest && "Init does not return a value");
- LValue lvalue = LValue::MakeAddr(Ptr, AtomicTy, alignChars, getContext());
+ assert(!Dest.isValid() && "Init does not return a value");
+ LValue lvalue = MakeAddrLValue(Ptr, AtomicTy);
EmitAtomicInit(E->getVal1(), lvalue);
return RValue::get(nullptr);
}
@@ -706,25 +702,25 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
break;
case AtomicExpr::AO__atomic_load:
- Dest = EmitScalarExpr(E->getVal1());
+ Dest = EmitPointerWithAlignment(E->getVal1());
break;
case AtomicExpr::AO__atomic_store:
- Val1 = EmitScalarExpr(E->getVal1());
+ Val1 = EmitPointerWithAlignment(E->getVal1());
break;
case AtomicExpr::AO__atomic_exchange:
- Val1 = EmitScalarExpr(E->getVal1());
- Dest = EmitScalarExpr(E->getVal2());
+ Val1 = EmitPointerWithAlignment(E->getVal1());
+ Dest = EmitPointerWithAlignment(E->getVal2());
break;
case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
case AtomicExpr::AO__atomic_compare_exchange_n:
case AtomicExpr::AO__atomic_compare_exchange:
- Val1 = EmitScalarExpr(E->getVal1());
+ Val1 = EmitPointerWithAlignment(E->getVal1());
if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange)
- Val2 = EmitScalarExpr(E->getVal2());
+ Val2 = EmitPointerWithAlignment(E->getVal2());
else
Val2 = EmitValToTemp(*this, E->getVal2());
OrderFail = EmitScalarExpr(E->getOrderFail());
@@ -744,8 +740,9 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
CharUnits PointeeIncAmt =
getContext().getTypeSizeInChars(MemTy->getPointeeType());
Val1Scalar = Builder.CreateMul(Val1Scalar, CGM.getSize(PointeeIncAmt));
- Val1 = CreateMemTemp(Val1Ty, ".atomictmp");
- EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Val1, Val1Ty));
+ auto Temp = CreateMemTemp(Val1Ty, ".atomictmp");
+ Val1 = Temp;
+ EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Temp, Val1Ty));
break;
}
// Fall through.
@@ -775,7 +772,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
QualType RValTy = E->getType().getUnqualifiedType();
auto GetDest = [&] {
- if (!RValTy->isVoidType() && !Dest) {
+ if (!RValTy->isVoidType() && !Dest.isValid()) {
Dest = CreateMemTemp(RValTy, ".atomicdst");
}
return Dest;
@@ -835,7 +832,8 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
getContext().getSizeType());
}
// Atomic address is the first or second parameter
- Args.add(RValue::get(EmitCastToVoidPtr(Ptr)), getContext().VoidPtrTy);
+ Args.add(RValue::get(EmitCastToVoidPtr(Ptr.getPointer())),
+ getContext().VoidPtrTy);
std::string LibCallName;
QualType LoweredMemTy =
@@ -860,9 +858,10 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
LibCallName = "__atomic_compare_exchange";
RetTy = getContext().BoolTy;
HaveRetTy = true;
- Args.add(RValue::get(EmitCastToVoidPtr(Val1)), getContext().VoidPtrTy);
- AddDirectArgument(*this, Args, UseOptimizedLibcall, Val2, MemTy,
- E->getExprLoc(), sizeChars);
+ Args.add(RValue::get(EmitCastToVoidPtr(Val1.getPointer())),
+ getContext().VoidPtrTy);
+ AddDirectArgument(*this, Args, UseOptimizedLibcall, Val2.getPointer(),
+ MemTy, E->getExprLoc(), sizeChars);
Args.add(RValue::get(Order), getContext().IntTy);
Order = OrderFail;
break;
@@ -873,8 +872,8 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
case AtomicExpr::AO__atomic_exchange_n:
case AtomicExpr::AO__atomic_exchange:
LibCallName = "__atomic_exchange";
- AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
- E->getExprLoc(), sizeChars);
+ AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
+ MemTy, E->getExprLoc(), sizeChars);
break;
// void __atomic_store(size_t size, void *mem, void *val, int order)
// void __atomic_store_N(T *mem, T val, int order)
@@ -884,8 +883,8 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
LibCallName = "__atomic_store";
RetTy = getContext().VoidTy;
HaveRetTy = true;
- AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
- E->getExprLoc(), sizeChars);
+ AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
+ MemTy, E->getExprLoc(), sizeChars);
break;
// void __atomic_load(size_t size, void *mem, void *return, int order)
// T __atomic_load_N(T *mem, int order)
@@ -898,79 +897,79 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
case AtomicExpr::AO__c11_atomic_fetch_add:
case AtomicExpr::AO__atomic_fetch_add:
LibCallName = "__atomic_fetch_add";
- AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, LoweredMemTy,
- E->getExprLoc(), sizeChars);
+ AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
+ LoweredMemTy, E->getExprLoc(), sizeChars);
break;
// T __atomic_fetch_and_N(T *mem, T val, int order)
case AtomicExpr::AO__c11_atomic_fetch_and:
case AtomicExpr::AO__atomic_fetch_and:
LibCallName = "__atomic_fetch_and";
- AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
- E->getExprLoc(), sizeChars);
+ AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
+ MemTy, E->getExprLoc(), sizeChars);
break;
// T __atomic_fetch_or_N(T *mem, T val, int order)
case AtomicExpr::AO__c11_atomic_fetch_or:
case AtomicExpr::AO__atomic_fetch_or:
LibCallName = "__atomic_fetch_or";
- AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
- E->getExprLoc(), sizeChars);
+ AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
+ MemTy, E->getExprLoc(), sizeChars);
break;
// T __atomic_fetch_sub_N(T *mem, T val, int order)
case AtomicExpr::AO__c11_atomic_fetch_sub:
case AtomicExpr::AO__atomic_fetch_sub:
LibCallName = "__atomic_fetch_sub";
- AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, LoweredMemTy,
- E->getExprLoc(), sizeChars);
+ AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
+ LoweredMemTy, E->getExprLoc(), sizeChars);
break;
// T __atomic_fetch_xor_N(T *mem, T val, int order)
case AtomicExpr::AO__c11_atomic_fetch_xor:
case AtomicExpr::AO__atomic_fetch_xor:
LibCallName = "__atomic_fetch_xor";
- AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
- E->getExprLoc(), sizeChars);
+ AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
+ MemTy, E->getExprLoc(), sizeChars);
break;
// T __atomic_fetch_nand_N(T *mem, T val, int order)
case AtomicExpr::AO__atomic_fetch_nand:
LibCallName = "__atomic_fetch_nand";
- AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
- E->getExprLoc(), sizeChars);
+ AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
+ MemTy, E->getExprLoc(), sizeChars);
break;
// T __atomic_add_fetch_N(T *mem, T val, int order)
case AtomicExpr::AO__atomic_add_fetch:
LibCallName = "__atomic_add_fetch";
- AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, LoweredMemTy,
- E->getExprLoc(), sizeChars);
+ AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
+ LoweredMemTy, E->getExprLoc(), sizeChars);
break;
// T __atomic_and_fetch_N(T *mem, T val, int order)
case AtomicExpr::AO__atomic_and_fetch:
LibCallName = "__atomic_and_fetch";
- AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
- E->getExprLoc(), sizeChars);
+ AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
+ MemTy, E->getExprLoc(), sizeChars);
break;
// T __atomic_or_fetch_N(T *mem, T val, int order)
case AtomicExpr::AO__atomic_or_fetch:
LibCallName = "__atomic_or_fetch";
- AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
- E->getExprLoc(), sizeChars);
+ AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
+ MemTy, E->getExprLoc(), sizeChars);
break;
// T __atomic_sub_fetch_N(T *mem, T val, int order)
case AtomicExpr::AO__atomic_sub_fetch:
LibCallName = "__atomic_sub_fetch";
- AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, LoweredMemTy,
- E->getExprLoc(), sizeChars);
+ AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
+ LoweredMemTy, E->getExprLoc(), sizeChars);
break;
// T __atomic_xor_fetch_N(T *mem, T val, int order)
case AtomicExpr::AO__atomic_xor_fetch:
LibCallName = "__atomic_xor_fetch";
- AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
- E->getExprLoc(), sizeChars);
+ AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
+ MemTy, E->getExprLoc(), sizeChars);
break;
// T __atomic_nand_fetch_N(T *mem, T val, int order)
case AtomicExpr::AO__atomic_nand_fetch:
LibCallName = "__atomic_nand_fetch";
- AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
- E->getExprLoc(), sizeChars);
+ AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
+ MemTy, E->getExprLoc(), sizeChars);
break;
}
@@ -987,7 +986,8 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
} else {
// Value is returned through parameter before the order.
RetTy = getContext().VoidTy;
- Args.add(RValue::get(EmitCastToVoidPtr(Dest)), getContext().VoidPtrTy);
+ Args.add(RValue::get(EmitCastToVoidPtr(Dest.getPointer())),
+ getContext().VoidPtrTy);
}
}
// order is always the last parameter
@@ -1005,10 +1005,8 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
// expected an out-param.
if (UseOptimizedLibcall) {
llvm::Value *ResVal = Res.getScalarVal();
- llvm::StoreInst *StoreDest = Builder.CreateStore(
- ResVal,
+ Builder.CreateStore(ResVal,
Builder.CreateBitCast(GetDest(), ResVal->getType()->getPointerTo()));
- StoreDest->setAlignment(Align);
}
return convertTempToRValue(Dest, RValTy, E->getExprLoc());
}
@@ -1022,12 +1020,12 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
llvm::Type *ITy =
llvm::IntegerType::get(getLLVMContext(), Size * 8);
- llvm::Value *OrigDest = GetDest();
+ Address OrigDest = GetDest();
Ptr = Builder.CreateBitCast(
- Ptr, ITy->getPointerTo(Ptr->getType()->getPointerAddressSpace()));
- if (Val1) Val1 = Builder.CreateBitCast(Val1, ITy->getPointerTo());
- if (Val2) Val2 = Builder.CreateBitCast(Val2, ITy->getPointerTo());
- if (Dest && !E->isCmpXChg())
+ Ptr, ITy->getPointerTo(Ptr.getType()->getPointerAddressSpace()));
+ if (Val1.isValid()) Val1 = Builder.CreateBitCast(Val1, ITy->getPointerTo());
+ if (Val2.isValid()) Val2 = Builder.CreateBitCast(Val2, ITy->getPointerTo());
+ if (Dest.isValid() && !E->isCmpXChg())
Dest = Builder.CreateBitCast(Dest, ITy->getPointerTo());
if (isa<llvm::ConstantInt>(Order)) {
@@ -1035,30 +1033,30 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
switch (ord) {
case AtomicExpr::AO_ABI_memory_order_relaxed:
EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
- Size, Align, llvm::Monotonic);
+ Size, llvm::Monotonic);
break;
case AtomicExpr::AO_ABI_memory_order_consume:
case AtomicExpr::AO_ABI_memory_order_acquire:
if (IsStore)
break; // Avoid crashing on code with undefined behavior
EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
- Size, Align, llvm::Acquire);
+ Size, llvm::Acquire);
break;
case AtomicExpr::AO_ABI_memory_order_release:
if (IsLoad)
break; // Avoid crashing on code with undefined behavior
EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
- Size, Align, llvm::Release);
+ Size, llvm::Release);
break;
case AtomicExpr::AO_ABI_memory_order_acq_rel:
if (IsLoad || IsStore)
break; // Avoid crashing on code with undefined behavior
EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
- Size, Align, llvm::AcquireRelease);
+ Size, llvm::AcquireRelease);
break;
case AtomicExpr::AO_ABI_memory_order_seq_cst:
EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
- Size, Align, llvm::SequentiallyConsistent);
+ Size, llvm::SequentiallyConsistent);
break;
default: // invalid order
// We should not ever get here normally, but it's hard to
@@ -1096,12 +1094,12 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
// Emit all the different atomics
Builder.SetInsertPoint(MonotonicBB);
EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
- Size, Align, llvm::Monotonic);
+ Size, llvm::Monotonic);
Builder.CreateBr(ContBB);
if (!IsStore) {
Builder.SetInsertPoint(AcquireBB);
EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
- Size, Align, llvm::Acquire);
+ Size, llvm::Acquire);
Builder.CreateBr(ContBB);
SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_consume),
AcquireBB);
@@ -1111,7 +1109,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
if (!IsLoad) {
Builder.SetInsertPoint(ReleaseBB);
EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
- Size, Align, llvm::Release);
+ Size, llvm::Release);
Builder.CreateBr(ContBB);
SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_release),
ReleaseBB);
@@ -1119,14 +1117,14 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
if (!IsLoad && !IsStore) {
Builder.SetInsertPoint(AcqRelBB);
EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
- Size, Align, llvm::AcquireRelease);
+ Size, llvm::AcquireRelease);
Builder.CreateBr(ContBB);
SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_acq_rel),
AcqRelBB);
}
Builder.SetInsertPoint(SeqCstBB);
EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
- Size, Align, llvm::SequentiallyConsistent);
+ Size, llvm::SequentiallyConsistent);
Builder.CreateBr(ContBB);
SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_seq_cst),
SeqCstBB);
@@ -1138,44 +1136,45 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
return convertTempToRValue(OrigDest, RValTy, E->getExprLoc());
}
-llvm::Value *AtomicInfo::emitCastToAtomicIntPointer(llvm::Value *addr) const {
+Address AtomicInfo::emitCastToAtomicIntPointer(Address addr) const {
unsigned addrspace =
- cast<llvm::PointerType>(addr->getType())->getAddressSpace();
+ cast<llvm::PointerType>(addr.getPointer()->getType())->getAddressSpace();
llvm::IntegerType *ty =
llvm::IntegerType::get(CGF.getLLVMContext(), AtomicSizeInBits);
return CGF.Builder.CreateBitCast(addr, ty->getPointerTo(addrspace));
}
-RValue AtomicInfo::convertTempToRValue(llvm::Value *addr,
- AggValueSlot resultSlot,
- SourceLocation loc, bool AsValue) const {
+RValue AtomicInfo::convertAtomicTempToRValue(Address addr,
+ AggValueSlot resultSlot,
+ SourceLocation loc,
+ bool asValue) const {
if (LVal.isSimple()) {
if (EvaluationKind == TEK_Aggregate)
return resultSlot.asRValue();
// Drill into the padding structure if we have one.
if (hasPadding())
- addr = CGF.Builder.CreateStructGEP(nullptr, addr, 0);
+ addr = CGF.Builder.CreateStructGEP(addr, 0, CharUnits());
// Otherwise, just convert the temporary to an r-value using the
// normal conversion routine.
return CGF.convertTempToRValue(addr, getValueType(), loc);
}
- if (!AsValue)
+ if (!asValue)
// Get RValue from temp memory as atomic for non-simple lvalues
- return RValue::get(
- CGF.Builder.CreateAlignedLoad(addr, AtomicAlign.getQuantity()));
+ return RValue::get(CGF.Builder.CreateLoad(addr));
if (LVal.isBitField())
- return CGF.EmitLoadOfBitfieldLValue(LValue::MakeBitfield(
- addr, LVal.getBitFieldInfo(), LVal.getType(), LVal.getAlignment()));
+ return CGF.EmitLoadOfBitfieldLValue(
+ LValue::MakeBitfield(addr, LVal.getBitFieldInfo(), LVal.getType(),
+ LVal.getAlignmentSource()));
if (LVal.isVectorElt())
- return CGF.EmitLoadOfLValue(LValue::MakeVectorElt(addr, LVal.getVectorIdx(),
- LVal.getType(),
- LVal.getAlignment()),
- loc);
+ return CGF.EmitLoadOfLValue(
+ LValue::MakeVectorElt(addr, LVal.getVectorIdx(), LVal.getType(),
+ LVal.getAlignmentSource()), loc);
assert(LVal.isExtVectorElt());
return CGF.EmitLoadOfExtVectorElementLValue(LValue::MakeExtVectorElt(
- addr, LVal.getExtVectorElts(), LVal.getType(), LVal.getAlignment()));
+ addr, LVal.getExtVectorElts(), LVal.getType(),
+ LVal.getAlignmentSource()));
}
RValue AtomicInfo::ConvertIntToValueOrAtomic(llvm::Value *IntVal,
@@ -1191,7 +1190,7 @@ RValue AtomicInfo::ConvertIntToValueOrAtomic(llvm::Value *IntVal,
!AsValue)) {
auto *ValTy = AsValue
? CGF.ConvertTypeForMem(ValueTy)
- : getAtomicAddress()->getType()->getPointerElementType();
+ : getAtomicAddress().getType()->getPointerElementType();
if (ValTy->isIntegerTy()) {
assert(IntVal->getType() == ValTy && "Different integer types.");
return RValue::get(CGF.EmitFromMemory(IntVal, ValueTy));
@@ -1203,25 +1202,22 @@ RValue AtomicInfo::ConvertIntToValueOrAtomic(llvm::Value *IntVal,
// Create a temporary. This needs to be big enough to hold the
// atomic integer.
- llvm::Value *Temp;
+ Address Temp = Address::invalid();
bool TempIsVolatile = false;
- CharUnits TempAlignment;
if (AsValue && getEvaluationKind() == TEK_Aggregate) {
assert(!ResultSlot.isIgnored());
- Temp = ResultSlot.getAddr();
- TempAlignment = getValueAlignment();
+ Temp = ResultSlot.getAddress();
TempIsVolatile = ResultSlot.isVolatile();
} else {
Temp = CreateTempAlloca();
- TempAlignment = getAtomicAlignment();
}
// Slam the integer into the temporary.
- llvm::Value *CastTemp = emitCastToAtomicIntPointer(Temp);
- CGF.Builder.CreateAlignedStore(IntVal, CastTemp, TempAlignment.getQuantity())
+ Address CastTemp = emitCastToAtomicIntPointer(Temp);
+ CGF.Builder.CreateStore(IntVal, CastTemp)
->setVolatile(TempIsVolatile);
- return convertTempToRValue(Temp, ResultSlot, Loc, AsValue);
+ return convertAtomicTempToRValue(Temp, ResultSlot, Loc, AsValue);
}
void AtomicInfo::EmitAtomicLoadLibcall(llvm::Value *AddForLoaded,
@@ -1229,7 +1225,7 @@ void AtomicInfo::EmitAtomicLoadLibcall(llvm::Value *AddForLoaded,
// void __atomic_load(size_t size, void *mem, void *return, int order);
CallArgList Args;
Args.add(RValue::get(getAtomicSizeValue()), CGF.getContext().getSizeType());
- Args.add(RValue::get(CGF.EmitCastToVoidPtr(getAtomicAddress())),
+ Args.add(RValue::get(CGF.EmitCastToVoidPtr(getAtomicPointer())),
CGF.getContext().VoidPtrTy);
Args.add(RValue::get(CGF.EmitCastToVoidPtr(AddForLoaded)),
CGF.getContext().VoidPtrTy);
@@ -1242,12 +1238,11 @@ void AtomicInfo::EmitAtomicLoadLibcall(llvm::Value *AddForLoaded,
llvm::Value *AtomicInfo::EmitAtomicLoadOp(llvm::AtomicOrdering AO,
bool IsVolatile) {
// Okay, we're doing this natively.
- llvm::Value *Addr = emitCastToAtomicIntPointer(getAtomicAddress());
+ Address Addr = getAtomicAddressAsAtomicIntPointer();
llvm::LoadInst *Load = CGF.Builder.CreateLoad(Addr, "atomic-load");
Load->setAtomic(AO);
// Other decoration.
- Load->setAlignment(getAtomicAlignment().getQuantity());
if (IsVolatile)
Load->setVolatile(true);
if (LVal.getTBAAInfo())
@@ -1259,11 +1254,12 @@ llvm::Value *AtomicInfo::EmitAtomicLoadOp(llvm::AtomicOrdering AO,
/// we are operating under /volatile:ms *and* the LValue itself is volatile and
/// performing such an operation can be performed without a libcall.
bool CodeGenFunction::LValueIsSuitableForInlineAtomic(LValue LV) {
+ if (!CGM.getCodeGenOpts().MSVolatile) return false;
AtomicInfo AI(*this, LV);
bool IsVolatile = LV.isVolatile() || hasVolatileMember(LV.getType());
// An atomic is inline if we don't need to use a libcall.
bool AtomicIsInline = !AI.shouldUseLibcall();
- return CGM.getCodeGenOpts().MSVolatile && IsVolatile && AtomicIsInline;
+ return IsVolatile && AtomicIsInline;
}
/// An type is a candidate for having its loads and stores be made atomic if
@@ -1295,18 +1291,18 @@ RValue AtomicInfo::EmitAtomicLoad(AggValueSlot ResultSlot, SourceLocation Loc,
bool IsVolatile) {
// Check whether we should use a library call.
if (shouldUseLibcall()) {
- llvm::Value *TempAddr;
+ Address TempAddr = Address::invalid();
if (LVal.isSimple() && !ResultSlot.isIgnored()) {
assert(getEvaluationKind() == TEK_Aggregate);
- TempAddr = ResultSlot.getAddr();
+ TempAddr = ResultSlot.getAddress();
} else
TempAddr = CreateTempAlloca();
- EmitAtomicLoadLibcall(TempAddr, AO, IsVolatile);
+ EmitAtomicLoadLibcall(TempAddr.getPointer(), AO, IsVolatile);
// Okay, turn that back into the original value or whole atomic (for
// non-simple lvalues) type.
- return convertTempToRValue(TempAddr, ResultSlot, Loc, AsValue);
+ return convertAtomicTempToRValue(TempAddr, ResultSlot, Loc, AsValue);
}
// Okay, we're doing this natively.
@@ -1314,7 +1310,7 @@ RValue AtomicInfo::EmitAtomicLoad(AggValueSlot ResultSlot, SourceLocation Loc,
// If we're ignoring an aggregate return, don't do anything.
if (getEvaluationKind() == TEK_Aggregate && ResultSlot.isIgnored())
- return RValue::getAggregate(nullptr, false);
+ return RValue::getAggregate(Address::invalid(), false);
// Okay, turn that back into the original value or atomic (for non-simple
// lvalues) type.
@@ -1340,11 +1336,10 @@ void AtomicInfo::emitCopyIntoMemory(RValue rvalue) const {
// any padding. Just do an aggregate copy of that type.
if (rvalue.isAggregate()) {
CGF.EmitAggregateCopy(getAtomicAddress(),
- rvalue.getAggregateAddr(),
+ rvalue.getAggregateAddress(),
getAtomicType(),
(rvalue.isVolatileQualified()
- || LVal.isVolatileQualified()),
- LVal.getAlignment());
+ || LVal.isVolatileQualified()));
return;
}
@@ -1367,15 +1362,14 @@ void AtomicInfo::emitCopyIntoMemory(RValue rvalue) const {
/// Materialize an r-value into memory for the purposes of storing it
/// to an atomic type.
-llvm::Value *AtomicInfo::materializeRValue(RValue rvalue) const {
+Address AtomicInfo::materializeRValue(RValue rvalue) const {
// Aggregate r-values are already in memory, and EmitAtomicStore
// requires them to be values of the atomic type.
if (rvalue.isAggregate())
- return rvalue.getAggregateAddr();
+ return rvalue.getAggregateAddress();
// Otherwise, make a temporary and materialize into it.
- LValue TempLV = CGF.MakeAddrLValue(CreateTempAlloca(), getAtomicType(),
- getAtomicAlignment());
+ LValue TempLV = CGF.MakeAddrLValue(CreateTempAlloca(), getAtomicType());
AtomicInfo Atomics(CGF, TempLV);
Atomics.emitCopyIntoMemory(rvalue);
return TempLV.getAddress();
@@ -1400,20 +1394,20 @@ llvm::Value *AtomicInfo::convertRValueToInt(RValue RVal) const {
}
// Otherwise, we need to go through memory.
// Put the r-value in memory.
- llvm::Value *Addr = materializeRValue(RVal);
+ Address Addr = materializeRValue(RVal);
// Cast the temporary to the atomic int type and pull a value out.
Addr = emitCastToAtomicIntPointer(Addr);
- return CGF.Builder.CreateAlignedLoad(Addr,
- getAtomicAlignment().getQuantity());
+ return CGF.Builder.CreateLoad(Addr);
}
std::pair<llvm::Value *, llvm::Value *> AtomicInfo::EmitAtomicCompareExchangeOp(
llvm::Value *ExpectedVal, llvm::Value *DesiredVal,
llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure, bool IsWeak) {
// Do the atomic store.
- auto *Addr = emitCastToAtomicIntPointer(getAtomicAddress());
- auto *Inst = CGF.Builder.CreateAtomicCmpXchg(Addr, ExpectedVal, DesiredVal,
+ Address Addr = getAtomicAddressAsAtomicIntPointer();
+ auto *Inst = CGF.Builder.CreateAtomicCmpXchg(Addr.getPointer(),
+ ExpectedVal, DesiredVal,
Success, Failure);
// Other decoration.
Inst->setVolatile(LVal.isVolatileQualified());
@@ -1434,7 +1428,7 @@ AtomicInfo::EmitAtomicCompareExchangeLibcall(llvm::Value *ExpectedAddr,
// void *desired, int success, int failure);
CallArgList Args;
Args.add(RValue::get(getAtomicSizeValue()), CGF.getContext().getSizeType());
- Args.add(RValue::get(CGF.EmitCastToVoidPtr(getAtomicAddress())),
+ Args.add(RValue::get(CGF.EmitCastToVoidPtr(getAtomicPointer())),
CGF.getContext().VoidPtrTy);
Args.add(RValue::get(CGF.EmitCastToVoidPtr(ExpectedAddr)),
CGF.getContext().VoidPtrTy);
@@ -1462,13 +1456,14 @@ std::pair<RValue, llvm::Value *> AtomicInfo::EmitAtomicCompareExchange(
// Check whether we should use a library call.
if (shouldUseLibcall()) {
// Produce a source address.
- auto *ExpectedAddr = materializeRValue(Expected);
- auto *DesiredAddr = materializeRValue(Desired);
- auto *Res = EmitAtomicCompareExchangeLibcall(ExpectedAddr, DesiredAddr,
+ Address ExpectedAddr = materializeRValue(Expected);
+ Address DesiredAddr = materializeRValue(Desired);
+ auto *Res = EmitAtomicCompareExchangeLibcall(ExpectedAddr.getPointer(),
+ DesiredAddr.getPointer(),
Success, Failure);
return std::make_pair(
- convertTempToRValue(ExpectedAddr, AggValueSlot::ignored(),
- SourceLocation(), /*AsValue=*/false),
+ convertAtomicTempToRValue(ExpectedAddr, AggValueSlot::ignored(),
+ SourceLocation(), /*AsValue=*/false),
Res);
}
@@ -1487,42 +1482,41 @@ std::pair<RValue, llvm::Value *> AtomicInfo::EmitAtomicCompareExchange(
static void
EmitAtomicUpdateValue(CodeGenFunction &CGF, AtomicInfo &Atomics, RValue OldRVal,
const llvm::function_ref<RValue(RValue)> &UpdateOp,
- llvm::Value *DesiredAddr) {
- llvm::Value *Ptr = nullptr;
- LValue UpdateLVal;
+ Address DesiredAddr) {
RValue UpRVal;
LValue AtomicLVal = Atomics.getAtomicLValue();
LValue DesiredLVal;
if (AtomicLVal.isSimple()) {
UpRVal = OldRVal;
- DesiredLVal =
- LValue::MakeAddr(DesiredAddr, AtomicLVal.getType(),
- AtomicLVal.getAlignment(), CGF.CGM.getContext());
+ DesiredLVal = CGF.MakeAddrLValue(DesiredAddr, AtomicLVal.getType());
} else {
// Build new lvalue for temp address
- Ptr = Atomics.materializeRValue(OldRVal);
+ Address Ptr = Atomics.materializeRValue(OldRVal);
+ LValue UpdateLVal;
if (AtomicLVal.isBitField()) {
UpdateLVal =
LValue::MakeBitfield(Ptr, AtomicLVal.getBitFieldInfo(),
- AtomicLVal.getType(), AtomicLVal.getAlignment());
+ AtomicLVal.getType(),
+ AtomicLVal.getAlignmentSource());
DesiredLVal =
LValue::MakeBitfield(DesiredAddr, AtomicLVal.getBitFieldInfo(),
- AtomicLVal.getType(), AtomicLVal.getAlignment());
+ AtomicLVal.getType(),
+ AtomicLVal.getAlignmentSource());
} else if (AtomicLVal.isVectorElt()) {
UpdateLVal = LValue::MakeVectorElt(Ptr, AtomicLVal.getVectorIdx(),
AtomicLVal.getType(),
- AtomicLVal.getAlignment());
+ AtomicLVal.getAlignmentSource());
DesiredLVal = LValue::MakeVectorElt(
DesiredAddr, AtomicLVal.getVectorIdx(), AtomicLVal.getType(),
- AtomicLVal.getAlignment());
+ AtomicLVal.getAlignmentSource());
} else {
assert(AtomicLVal.isExtVectorElt());
UpdateLVal = LValue::MakeExtVectorElt(Ptr, AtomicLVal.getExtVectorElts(),
AtomicLVal.getType(),
- AtomicLVal.getAlignment());
+ AtomicLVal.getAlignmentSource());
DesiredLVal = LValue::MakeExtVectorElt(
DesiredAddr, AtomicLVal.getExtVectorElts(), AtomicLVal.getType(),
- AtomicLVal.getAlignment());
+ AtomicLVal.getAlignmentSource());
}
UpdateLVal.setTBAAInfo(AtomicLVal.getTBAAInfo());
DesiredLVal.setTBAAInfo(AtomicLVal.getTBAAInfo());
@@ -1544,26 +1538,26 @@ void AtomicInfo::EmitAtomicUpdateLibcall(
bool IsVolatile) {
auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
- llvm::Value *ExpectedAddr = CreateTempAlloca();
+ Address ExpectedAddr = CreateTempAlloca();
- EmitAtomicLoadLibcall(ExpectedAddr, AO, IsVolatile);
+ EmitAtomicLoadLibcall(ExpectedAddr.getPointer(), AO, IsVolatile);
auto *ContBB = CGF.createBasicBlock("atomic_cont");
auto *ExitBB = CGF.createBasicBlock("atomic_exit");
CGF.EmitBlock(ContBB);
- auto *DesiredAddr = CreateTempAlloca();
+ Address DesiredAddr = CreateTempAlloca();
if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
- requiresMemSetZero(
- getAtomicAddress()->getType()->getPointerElementType())) {
- auto *OldVal = CGF.Builder.CreateAlignedLoad(
- ExpectedAddr, getAtomicAlignment().getQuantity());
- CGF.Builder.CreateAlignedStore(OldVal, DesiredAddr,
- getAtomicAlignment().getQuantity());
+ requiresMemSetZero(getAtomicAddress().getElementType())) {
+ auto *OldVal = CGF.Builder.CreateLoad(ExpectedAddr);
+ CGF.Builder.CreateStore(OldVal, DesiredAddr);
}
- auto OldRVal = convertTempToRValue(ExpectedAddr, AggValueSlot::ignored(),
- SourceLocation(), /*AsValue=*/false);
+ auto OldRVal = convertAtomicTempToRValue(ExpectedAddr,
+ AggValueSlot::ignored(),
+ SourceLocation(), /*AsValue=*/false);
EmitAtomicUpdateValue(CGF, *this, OldRVal, UpdateOp, DesiredAddr);
auto *Res =
- EmitAtomicCompareExchangeLibcall(ExpectedAddr, DesiredAddr, AO, Failure);
+ EmitAtomicCompareExchangeLibcall(ExpectedAddr.getPointer(),
+ DesiredAddr.getPointer(),
+ AO, Failure);
CGF.Builder.CreateCondBr(Res, ExitBB, ContBB);
CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
}
@@ -1583,19 +1577,16 @@ void AtomicInfo::EmitAtomicUpdateOp(
llvm::PHINode *PHI = CGF.Builder.CreatePHI(OldVal->getType(),
/*NumReservedValues=*/2);
PHI->addIncoming(OldVal, CurBB);
- auto *NewAtomicAddr = CreateTempAlloca();
- auto *NewAtomicIntAddr = emitCastToAtomicIntPointer(NewAtomicAddr);
+ Address NewAtomicAddr = CreateTempAlloca();
+ Address NewAtomicIntAddr = emitCastToAtomicIntPointer(NewAtomicAddr);
if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
- requiresMemSetZero(
- getAtomicAddress()->getType()->getPointerElementType())) {
- CGF.Builder.CreateAlignedStore(PHI, NewAtomicIntAddr,
- getAtomicAlignment().getQuantity());
+ requiresMemSetZero(getAtomicAddress().getElementType())) {
+ CGF.Builder.CreateStore(PHI, NewAtomicIntAddr);
}
auto OldRVal = ConvertIntToValueOrAtomic(PHI, AggValueSlot::ignored(),
SourceLocation(), /*AsValue=*/false);
EmitAtomicUpdateValue(CGF, *this, OldRVal, UpdateOp, NewAtomicAddr);
- auto *DesiredVal = CGF.Builder.CreateAlignedLoad(
- NewAtomicIntAddr, getAtomicAlignment().getQuantity());
+ auto *DesiredVal = CGF.Builder.CreateLoad(NewAtomicIntAddr);
// Try to write new value using cmpxchg operation
auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure);
PHI->addIncoming(Res.first, CGF.Builder.GetInsertBlock());
@@ -1604,23 +1595,25 @@ void AtomicInfo::EmitAtomicUpdateOp(
}
static void EmitAtomicUpdateValue(CodeGenFunction &CGF, AtomicInfo &Atomics,
- RValue UpdateRVal, llvm::Value *DesiredAddr) {
+ RValue UpdateRVal, Address DesiredAddr) {
LValue AtomicLVal = Atomics.getAtomicLValue();
LValue DesiredLVal;
// Build new lvalue for temp address
if (AtomicLVal.isBitField()) {
DesiredLVal =
LValue::MakeBitfield(DesiredAddr, AtomicLVal.getBitFieldInfo(),
- AtomicLVal.getType(), AtomicLVal.getAlignment());
+ AtomicLVal.getType(),
+ AtomicLVal.getAlignmentSource());
} else if (AtomicLVal.isVectorElt()) {
DesiredLVal =
LValue::MakeVectorElt(DesiredAddr, AtomicLVal.getVectorIdx(),
- AtomicLVal.getType(), AtomicLVal.getAlignment());
+ AtomicLVal.getType(),
+ AtomicLVal.getAlignmentSource());
} else {
assert(AtomicLVal.isExtVectorElt());
DesiredLVal = LValue::MakeExtVectorElt(
DesiredAddr, AtomicLVal.getExtVectorElts(), AtomicLVal.getType(),
- AtomicLVal.getAlignment());
+ AtomicLVal.getAlignmentSource());
}
DesiredLVal.setTBAAInfo(AtomicLVal.getTBAAInfo());
// Store new value in the corresponding memory area
@@ -1632,24 +1625,23 @@ void AtomicInfo::EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
RValue UpdateRVal, bool IsVolatile) {
auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
- llvm::Value *ExpectedAddr = CreateTempAlloca();
+ Address ExpectedAddr = CreateTempAlloca();
- EmitAtomicLoadLibcall(ExpectedAddr, AO, IsVolatile);
+ EmitAtomicLoadLibcall(ExpectedAddr.getPointer(), AO, IsVolatile);
auto *ContBB = CGF.createBasicBlock("atomic_cont");
auto *ExitBB = CGF.createBasicBlock("atomic_exit");
CGF.EmitBlock(ContBB);
- auto *DesiredAddr = CreateTempAlloca();
+ Address DesiredAddr = CreateTempAlloca();
if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
- requiresMemSetZero(
- getAtomicAddress()->getType()->getPointerElementType())) {
- auto *OldVal = CGF.Builder.CreateAlignedLoad(
- ExpectedAddr, getAtomicAlignment().getQuantity());
- CGF.Builder.CreateAlignedStore(OldVal, DesiredAddr,
- getAtomicAlignment().getQuantity());
+ requiresMemSetZero(getAtomicAddress().getElementType())) {
+ auto *OldVal = CGF.Builder.CreateLoad(ExpectedAddr);
+ CGF.Builder.CreateStore(OldVal, DesiredAddr);
}
EmitAtomicUpdateValue(CGF, *this, UpdateRVal, DesiredAddr);
auto *Res =
- EmitAtomicCompareExchangeLibcall(ExpectedAddr, DesiredAddr, AO, Failure);
+ EmitAtomicCompareExchangeLibcall(ExpectedAddr.getPointer(),
+ DesiredAddr.getPointer(),
+ AO, Failure);
CGF.Builder.CreateCondBr(Res, ExitBB, ContBB);
CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
}
@@ -1668,17 +1660,14 @@ void AtomicInfo::EmitAtomicUpdateOp(llvm::AtomicOrdering AO, RValue UpdateRVal,
llvm::PHINode *PHI = CGF.Builder.CreatePHI(OldVal->getType(),
/*NumReservedValues=*/2);
PHI->addIncoming(OldVal, CurBB);
- auto *NewAtomicAddr = CreateTempAlloca();
- auto *NewAtomicIntAddr = emitCastToAtomicIntPointer(NewAtomicAddr);
+ Address NewAtomicAddr = CreateTempAlloca();
+ Address NewAtomicIntAddr = emitCastToAtomicIntPointer(NewAtomicAddr);
if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
- requiresMemSetZero(
- getAtomicAddress()->getType()->getPointerElementType())) {
- CGF.Builder.CreateAlignedStore(PHI, NewAtomicIntAddr,
- getAtomicAlignment().getQuantity());
+ requiresMemSetZero(getAtomicAddress().getElementType())) {
+ CGF.Builder.CreateStore(PHI, NewAtomicIntAddr);
}
EmitAtomicUpdateValue(CGF, *this, UpdateRVal, NewAtomicAddr);
- auto *DesiredVal = CGF.Builder.CreateAlignedLoad(
- NewAtomicIntAddr, getAtomicAlignment().getQuantity());
+ auto *DesiredVal = CGF.Builder.CreateLoad(NewAtomicIntAddr);
// Try to write new value using cmpxchg operation
auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure);
PHI->addIncoming(Res.first, CGF.Builder.GetInsertBlock());
@@ -1729,8 +1718,8 @@ void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest,
// If this is an aggregate r-value, it should agree in type except
// maybe for address-space qualification.
assert(!rvalue.isAggregate() ||
- rvalue.getAggregateAddr()->getType()->getPointerElementType()
- == dest.getAddress()->getType()->getPointerElementType());
+ rvalue.getAggregateAddress().getElementType()
+ == dest.getAddress().getElementType());
AtomicInfo atomics(*this, dest);
LValue LVal = atomics.getAtomicLValue();
@@ -1745,15 +1734,16 @@ void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest,
// Check whether we should use a library call.
if (atomics.shouldUseLibcall()) {
// Produce a source address.
- llvm::Value *srcAddr = atomics.materializeRValue(rvalue);
+ Address srcAddr = atomics.materializeRValue(rvalue);
// void __atomic_store(size_t size, void *mem, void *val, int order)
CallArgList args;
args.add(RValue::get(atomics.getAtomicSizeValue()),
getContext().getSizeType());
- args.add(RValue::get(EmitCastToVoidPtr(atomics.getAtomicAddress())),
+ args.add(RValue::get(EmitCastToVoidPtr(atomics.getAtomicPointer())),
+ getContext().VoidPtrTy);
+ args.add(RValue::get(EmitCastToVoidPtr(srcAddr.getPointer())),
getContext().VoidPtrTy);
- args.add(RValue::get(EmitCastToVoidPtr(srcAddr)), getContext().VoidPtrTy);
args.add(RValue::get(llvm::ConstantInt::get(
IntTy, AtomicInfo::translateAtomicOrdering(AO))),
getContext().IntTy);
@@ -1765,10 +1755,10 @@ void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest,
llvm::Value *intValue = atomics.convertRValueToInt(rvalue);
// Do the atomic store.
- llvm::Value *addr =
+ Address addr =
atomics.emitCastToAtomicIntPointer(atomics.getAtomicAddress());
intValue = Builder.CreateIntCast(
- intValue, addr->getType()->getPointerElementType(), /*isSigned=*/false);
+ intValue, addr.getElementType(), /*isSigned=*/false);
llvm::StoreInst *store = Builder.CreateStore(intValue, addr);
// Initializations don't need to be atomic.
@@ -1776,7 +1766,6 @@ void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest,
store->setAtomic(AO);
// Other decoration.
- store->setAlignment(dest.getAlignment().getQuantity());
if (IsVolatile)
store->setVolatile(true);
if (dest.getTBAAInfo())
@@ -1797,11 +1786,11 @@ std::pair<RValue, llvm::Value *> CodeGenFunction::EmitAtomicCompareExchange(
// If this is an aggregate r-value, it should agree in type except
// maybe for address-space qualification.
assert(!Expected.isAggregate() ||
- Expected.getAggregateAddr()->getType()->getPointerElementType() ==
- Obj.getAddress()->getType()->getPointerElementType());
+ Expected.getAggregateAddress().getElementType() ==
+ Obj.getAddress().getElementType());
assert(!Desired.isAggregate() ||
- Desired.getAggregateAddr()->getType()->getPointerElementType() ==
- Obj.getAddress()->getType()->getPointerElementType());
+ Desired.getAggregateAddress().getElementType() ==
+ Obj.getAddress().getElementType());
AtomicInfo Atomics(*this, Obj);
return Atomics.EmitAtomicCompareExchange(Expected, Desired, Success, Failure,
diff --git a/clang/lib/CodeGen/CGBlocks.cpp b/clang/lib/CodeGen/CGBlocks.cpp
index adc9de1792a..472835feabd 100644
--- a/clang/lib/CodeGen/CGBlocks.cpp
+++ b/clang/lib/CodeGen/CGBlocks.cpp
@@ -30,7 +30,7 @@ using namespace CodeGen;
CGBlockInfo::CGBlockInfo(const BlockDecl *block, StringRef name)
: Name(name), CXXThisIndex(0), CanBeGlobal(false), NeedsCopyDispose(false),
HasCXXObject(false), UsesStret(false), HasCapturedVariableLayout(false),
- StructureType(nullptr), Block(block),
+ LocalAddress(Address::invalid()), StructureType(nullptr), Block(block),
DominatingIP(nullptr) {
// Skip asm prefix, if any. 'name' is usually taken directly from
@@ -40,7 +40,7 @@ CGBlockInfo::CGBlockInfo(const BlockDecl *block, StringRef name)
}
// Anchor the vtable to this translation unit.
-CodeGenModule::ByrefHelpers::~ByrefHelpers() {}
+BlockByrefHelpers::~BlockByrefHelpers() {}
/// Build the given block as a global block.
static llvm::Constant *buildGlobalBlock(CodeGenModule &CGM,
@@ -111,7 +111,7 @@ static llvm::Constant *buildBlockDescriptor(CodeGenModule &CGM,
std::string typeAtEncoding =
CGM.getContext().getObjCEncodingForBlock(blockInfo.getBlockExpr());
elements.push_back(llvm::ConstantExpr::getBitCast(
- CGM.GetAddrOfConstantCString(typeAtEncoding), i8p));
+ CGM.GetAddrOfConstantCString(typeAtEncoding).getPointer(), i8p));
// GC layout.
if (C.getLangOpts().ObjC1) {
@@ -203,44 +203,34 @@ namespace {
Capture(capture), Type(type) {}
/// Tell the block info that this chunk has the given field index.
- void setIndex(CGBlockInfo &info, unsigned index) {
- if (!Capture)
+ void setIndex(CGBlockInfo &info, unsigned index, CharUnits offset) {
+ if (!Capture) {
info.CXXThisIndex = index;
- else
- info.Captures[Capture->getVariable()]
- = CGBlockInfo::Capture::makeIndex(index);
+ info.CXXThisOffset = offset;
+ } else {
+ info.Captures.insert({Capture->getVariable(),
+ CGBlockInfo::Capture::makeIndex(index, offset)});
+ }
}
};
/// Order by 1) all __strong together 2) next, all byfref together 3) next,
/// all __weak together. Preserve descending alignment in all situations.
bool operator<(const BlockLayoutChunk &left, const BlockLayoutChunk &right) {
- CharUnits LeftValue, RightValue;
- bool LeftByref = left.Capture ? left.Capture->isByRef() : false;
- bool RightByref = right.Capture ? right.Capture->isByRef() : false;
-
- if (left.Lifetime == Qualifiers::OCL_Strong &&
- left.Alignment >= right.Alignment)
- LeftValue = CharUnits::fromQuantity(64);
- else if (LeftByref && left.Alignment >= right.Alignment)
- LeftValue = CharUnits::fromQuantity(32);
- else if (left.Lifetime == Qualifiers::OCL_Weak &&
- left.Alignment >= right.Alignment)
- LeftValue = CharUnits::fromQuantity(16);
- else
- LeftValue = left.Alignment;
- if (right.Lifetime == Qualifiers::OCL_Strong &&
- right.Alignment >= left.Alignment)
- RightValue = CharUnits::fromQuantity(64);
- else if (RightByref && right.Alignment >= left.Alignment)
- RightValue = CharUnits::fromQuantity(32);
- else if (right.Lifetime == Qualifiers::OCL_Weak &&
- right.Alignment >= left.Alignment)
- RightValue = CharUnits::fromQuantity(16);
- else
- RightValue = right.Alignment;
-
- return LeftValue > RightValue;
+ if (left.Alignment != right.Alignment)
+ return left.Alignment > right.Alignment;
+
+ auto getPrefOrder = [](const BlockLayoutChunk &chunk) {
+ if (chunk.Capture->isByRef())
+ return 1;
+ if (chunk.Lifetime == Qualifiers::OCL_Strong)
+ return 0;
+ if (chunk.Lifetime == Qualifiers::OCL_Weak)
+ return 2;
+ return 3;
+ };
+
+ return getPrefOrder(left) < getPrefOrder(right);
}
}
@@ -302,31 +292,20 @@ static CharUnits getLowBit(CharUnits v) {
static void initializeForBlockHeader(CodeGenModule &CGM, CGBlockInfo &info,
SmallVectorImpl<llvm::Type*> &elementTypes) {
- ASTContext &C = CGM.getContext();
-
- // The header is basically a 'struct { void *; int; int; void *; void *; }'.
- CharUnits ptrSize, ptrAlign, intSize, intAlign;
- std::tie(ptrSize, ptrAlign) = C.getTypeInfoInChars(C.VoidPtrTy);
- std::tie(intSize, intAlign) = C.getTypeInfoInChars(C.IntTy);
-
- // Are there crazy embedded platforms where this isn't true?
- assert(intSize <= ptrSize && "layout assumptions horribly violated");
-
- CharUnits headerSize = ptrSize;
- if (2 * intSize < ptrAlign) headerSize += ptrSize;
- else headerSize += 2 * intSize;
- headerSize += 2 * ptrSize;
+ // The header is basically 'struct { void *; int; int; void *; void *; }'.
+ // Assert that that struct is packed.
+ assert(CGM.getIntSize() <= CGM.getPointerSize());
+ assert(CGM.getIntAlign() <= CGM.getPointerAlign());
+ assert((2 * CGM.getIntSize()).isMultipleOf(CGM.getPointerAlign()));
- info.BlockAlign = ptrAlign;
- info.BlockSize = headerSize;
+ info.BlockAlign = CGM.getPointerAlign();
+ info.BlockSize = 3 * CGM.getPointerSize() + 2 * CGM.getIntSize();
assert(elementTypes.empty());
- llvm::Type *i8p = CGM.getTypes().ConvertType(C.VoidPtrTy);
- llvm::Type *intTy = CGM.getTypes().ConvertType(C.IntTy);
- elementTypes.push_back(i8p);
- elementTypes.push_back(intTy);
- elementTypes.push_back(intTy);
- elementTypes.push_back(i8p);
+ elementTypes.push_back(CGM.VoidPtrTy);
+ elementTypes.push_back(CGM.IntTy);
+ elementTypes.push_back(CGM.IntTy);
+ elementTypes.push_back(CGM.VoidPtrTy);
elementTypes.push_back(CGM.getBlockDescriptorType());
assert(elementTypes.size() == BlockHeaderSize);
@@ -365,6 +344,8 @@ static void computeBlockInfo(CodeGenModule &CGM, CodeGenFunction *CGF,
"Can't capture 'this' outside a method");
QualType thisType = cast<CXXMethodDecl>(CGF->CurFuncDecl)->getThisType(C);
+ // Theoretically, this could be in a different address space, so
+ // don't assume standard pointer size/align.
llvm::Type *llvmType = CGM.getTypes().ConvertType(thisType);
std::pair<CharUnits,CharUnits> tinfo
= CGM.getContext().getTypeInfoInChars(thisType);
@@ -384,15 +365,12 @@ static void computeBlockInfo(CodeGenModule &CGM, CodeGenFunction *CGF,
info.NeedsCopyDispose = true;
// Just use void* instead of a pointer to the byref type.
- QualType byRefPtrTy = C.VoidPtrTy;
+ CharUnits align = CGM.getPointerAlign();
+ maxFieldAlign = std::max(maxFieldAlign, align);
- llvm::Type *llvmType = CGM.getTypes().ConvertType(byRefPtrTy);
- std::pair<CharUnits,CharUnits> tinfo
- = CGM.getContext().getTypeInfoInChars(byRefPtrTy);
- maxFieldAlign = std::max(maxFieldAlign, tinfo.second);
-
- layout.push_back(BlockLayoutChunk(tinfo.second, tinfo.first,
- Qualifiers::OCL_None, &CI, llvmType));
+ layout.push_back(BlockLayoutChunk(align, CGM.getPointerSize(),
+ Qualifiers::OCL_None, &CI,
+ CGM.VoidPtrTy));
continue;
}
@@ -504,18 +482,13 @@ static void computeBlockInfo(CodeGenModule &CGM, CodeGenFunction *CGF,
for (; li != le; ++li) {
assert(endAlign >= li->Alignment);
- li->setIndex(info, elementTypes.size());
+ li->setIndex(info, elementTypes.size(), blockSize);
elementTypes.push_back(li->Type);
blockSize += li->Size;
endAlign = getLowBit(blockSize);
// ...until we get to the alignment of the maximum field.
if (endAlign >= maxFieldAlign) {
- if (li == first) {
- // No user field was appended. So, a gap was added.
- // Save total gap size for use in block layout bit map.
- info.BlockHeaderForcedGapSize = li->Size;
- }
break;
}
}
@@ -532,6 +505,12 @@ static void computeBlockInfo(CodeGenModule &CGM, CodeGenFunction *CGF,
CharUnits newBlockSize = blockSize.RoundUpToAlignment(maxFieldAlign);
CharUnits padding = newBlockSize - blockSize;
+ // If we haven't yet added any fields, remember that there was an
+ // initial gap; this need to go into the block layout bit map.
+ if (blockSize == info.BlockHeaderForcedGapOffset) {
+ info.BlockHeaderForcedGapSize = padding;
+ }
+
elementTypes.push_back(llvm::ArrayType::get(CGM.Int8Ty,
padding.getQuantity()));
blockSize = newBlockSize;
@@ -556,7 +535,7 @@ static void computeBlockInfo(CodeGenModule &CGM, CodeGenFunction *CGF,
endAlign = getLowBit(blockSize);
}
assert(endAlign >= li->Alignment);
- li->setIndex(info, elementTypes.size());
+ li->setIndex(info, elementTypes.size(), blockSize);
elementTypes.push_back(li->Type);
blockSize += li->Size;
endAlign = getLowBit(blockSize);
@@ -586,9 +565,8 @@ static void enterBlockScope(CodeGenFunction &CGF, BlockDecl *block) {
if (blockInfo.CanBeGlobal) return;
// Make the allocation for the block.
- blockInfo.Address =
- CGF.CreateTempAlloca(blockInfo.StructureType, "block");
- blockInfo.Address->setAlignment(blockInfo.BlockAlign.getQuantity());
+ blockInfo.LocalAddress = CGF.CreateTempAlloca(blockInfo.StructureType,
+ blockInfo.BlockAlign, "block");
// If there are cleanups to emit, enter them (but inactive).
if (!blockInfo.NeedsCopyDispose) return;
@@ -621,12 +599,13 @@ static void enterBlockScope(CodeGenFunction &CGF, BlockDecl *block) {
}
// GEP down to the address.
- llvm::Value *addr = CGF.Builder.CreateStructGEP(
- blockInfo.StructureType, blockInfo.Address, capture.getIndex());
+ Address addr = CGF.Builder.CreateStructGEP(blockInfo.LocalAddress,
+ capture.getIndex(),
+ capture.getOffset());
// We can use that GEP as the dominating IP.
if (!blockInfo.DominatingIP)
- blockInfo.DominatingIP = cast<llvm::Instruction>(addr);
+ blockInfo.DominatingIP = cast<llvm::Instruction>(addr.getPointer());
CleanupKind cleanupKind = InactiveNormalCleanup;
bool useArrayEHCleanup = CGF.needsEHCleanup(dtorKind);
@@ -721,9 +700,8 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) {
// Build the block descriptor.
llvm::Constant *descriptor = buildBlockDescriptor(CGM, blockInfo);
- llvm::Type *blockTy = blockInfo.StructureType;
- llvm::AllocaInst *blockAddr = blockInfo.Address;
- assert(blockAddr && "block has no address!");
+ Address blockAddr = blockInfo.LocalAddress;
+ assert(blockAddr.isValid() && "block has no address!");
// Compute the initial on-stack block flags.
BlockFlags flags = BLOCK_HAS_SIGNATURE;
@@ -732,27 +710,44 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) {
if (blockInfo.HasCXXObject) flags |= BLOCK_HAS_CXX_OBJ;
if (blockInfo.UsesStret) flags |= BLOCK_USE_STRET;
- // Initialize the block literal.
- Builder.CreateStore(
- isa, Builder.CreateStructGEP(blockTy, blockAddr, 0, "block.isa"));
- Builder.CreateStore(
- llvm::ConstantInt::get(IntTy, flags.getBitMask()),
- Builder.CreateStructGEP(blockTy, blockAddr, 1, "block.flags"));
- Builder.CreateStore(
- llvm::ConstantInt::get(IntTy, 0),
- Builder.CreateStructGEP(blockTy, blockAddr, 2, "block.reserved"));
- Builder.CreateStore(
- blockFn, Builder.CreateStructGEP(blockTy, blockAddr, 3, "block.invoke"));
- Builder.CreateStore(descriptor, Builder.CreateStructGEP(blockTy, blockAddr, 4,
- "block.descriptor"));
+ auto projectField =
+ [&](unsigned index, CharUnits offset, const Twine &name) -> Address {
+ return Builder.CreateStructGEP(blockAddr, index, offset, name);
+ };
+ auto storeField =
+ [&](llvm::Value *value, unsigned index, CharUnits offset,
+ const Twine &name) {
+ Builder.CreateStore(value, projectField(index, offset, name));
+ };
+
+ // Initialize the block header.
+ {
+ // We assume all the header fields are densely packed.
+ unsigned index = 0;
+ CharUnits offset;
+ auto addHeaderField =
+ [&](llvm::Value *value, CharUnits size, const Twine &name) {
+ storeField(value, index, offset, name);
+ offset += size;
+ index++;
+ };
+
+ addHeaderField(isa, getPointerSize(), "block.isa");
+ addHeaderField(llvm::ConstantInt::get(IntTy, flags.getBitMask()),
+ getIntSize(), "block.flags");
+ addHeaderField(llvm::ConstantInt::get(IntTy, 0),
+ getIntSize(), "block.reserved");
+ addHeaderField(blockFn, getPointerSize(), "block.invoke");
+ addHeaderField(descriptor, getPointerSize(), "block.descriptor");
+ }
// Finally, capture all the values into the block.
const BlockDecl *blockDecl = blockInfo.getBlockDecl();
// First, 'this'.
if (blockDecl->capturesCXXThis()) {
- llvm::Value *addr = Builder.CreateStructGEP(
- blockTy, blockAddr, blockInfo.CXXThisIndex, "block.captured-this.addr");
+ Address addr = projectField(blockInfo.CXXThisIndex, blockInfo.CXXThisOffset,
+ "block.captured-this.addr");
Builder.CreateStore(LoadCXXThis(), addr);
}
@@ -765,35 +760,37 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) {
if (capture.isConstant()) continue;
QualType type = variable->getType();
- CharUnits align = getContext().getDeclAlign(variable);
// This will be a [[type]]*, except that a byref entry will just be
// an i8**.
- llvm::Value *blockField = Builder.CreateStructGEP(
- blockTy, blockAddr, capture.getIndex(), "block.captured");
+ Address blockField =
+ projectField(capture.getIndex(), capture.getOffset(), "block.captured");
// Compute the address of the thing we're going to move into the
// block literal.
- llvm::Value *src;
+ Address src = Address::invalid();
if (BlockInfo && CI.isNested()) {
// We need to use the capture from the enclosing block.
const CGBlockInfo::Capture &enclosingCapture =
BlockInfo->getCapture(variable);
// This is a [[type]]*, except that a byref entry wil just be an i8**.
- src = Builder.CreateStructGEP(BlockInfo->StructureType, LoadBlockStruct(),
+ src = Builder.CreateStructGEP(LoadBlockStruct(),
enclosingCapture.getIndex(),
+ enclosingCapture.getOffset(),
"block.capture.addr");
} else if (blockDecl->isConversionFromLambda()) {
// The lambda capture in a lambda's conversion-to-block-pointer is
// special; we'll simply emit it directly.
- src = nullptr;
+ src = Address::invalid();
} else {
// Just look it up in the locals map, which will give us back a
// [[type]]*. If that doesn't work, do the more elaborate DRE
// emission.
- src = LocalDeclMap.lookup(variable);
- if (!src) {
+ auto it = LocalDeclMap.find(variable);
+ if (it != LocalDeclMap.end()) {
+ src = it->second;
+ } else {
DeclRefExpr declRef(
const_cast<VarDecl *>(variable),
/*RefersToEnclosingVariableOrCapture*/ CI.isNested(), type,
@@ -808,14 +805,14 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) {
// live a shorter life than the stack byref anyway.
if (CI.isByRef()) {
// Get a void* that points to the byref struct.
+ llvm::Value *byrefPointer;
if (CI.isNested())
- src = Builder.CreateAlignedLoad(src, align.getQuantity(),
- "byref.capture");
+ byrefPointer = Builder.CreateLoad(src, "byref.capture");
else
- src = Builder.CreateBitCast(src, VoidPtrTy);
+ byrefPointer = Builder.CreateBitCast(src.getPointer(), VoidPtrTy);
// Write that void* into the capture field.
- Builder.CreateAlignedStore(src, blockField, align.getQuantity());
+ Builder.CreateStore(byrefPointer, blockField);
// If we have a copy constructor, evaluate that into the block field.
} else if (const Expr *copyExpr = CI.getCopyExpr()) {
@@ -823,7 +820,7 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) {
// If we have a lambda conversion, emit the expression
// directly into the block instead.
AggValueSlot Slot =
- AggValueSlot::forAddr(blockField, align, Qualifiers(),
+ AggValueSlot::forAddr(blockField, Qualifiers(),
AggValueSlot::IsDestructed,
AggValueSlot::DoesNotNeedGCBarriers,
AggValueSlot::IsNotAliased);
@@ -834,9 +831,8 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) {
// If it's a reference variable, copy the reference into the block field.
} else if (type->isReferenceType()) {
- llvm::Value *ref =
- Builder.CreateAlignedLoad(src, align.getQuantity(), "ref.val");
- Builder.CreateAlignedStore(ref, blockField, align.getQuantity());
+ llvm::Value *ref = Builder.CreateLoad(src, "ref.val");
+ Builder.CreateStore(ref, blockField);
// If this is an ARC __strong block-pointer variable, don't do a
// block copy.
@@ -848,13 +844,11 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) {
} else if (type.getObjCLifetime() == Qualifiers::OCL_Strong &&
type->isBlockPointerType()) {
// Load the block and do a simple retain.
- LValue srcLV = MakeAddrLValue(src, type, align);
- llvm::Value *value = EmitLoadOfScalar(srcLV, SourceLocation());
+ llvm::Value *value = Builder.CreateLoad(src, "block.captured_block");
value = EmitARCRetainNonBlock(value);
// Do a primitive store to the block field.
- LValue destLV = MakeAddrLValue(blockField, type, align);
- EmitStoreOfScalar(value, destLV, /*init*/ true);
+ Builder.CreateStore(value, blockField);
// Otherwise, fake up a POD copy into the block field.
} else {
@@ -876,7 +870,7 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) {
// attributed to a reasonable location - otherwise it may be attributed to
// locations of subexpressions in the initialization.
EmitExprAsInit(&l2r, &blockFieldPseudoVar,
- MakeAddrLValue(blockField, type, align),
+ MakeAddrLValue(blockField, type, AlignmentSource::Decl),
/*captured by init*/ false);
}
@@ -891,7 +885,7 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) {
// Cast to the converted block-pointer type, which happens (somewhat
// unfortunately) to be a pointer to function type.
llvm::Value *result =
- Builder.CreateBitCast(blockAddr,
+ Builder.CreateBitCast(blockAddr.getPointer(),
ConvertType(blockInfo.getBlockExpr()->getType()));
return result;
@@ -966,8 +960,8 @@ RValue CodeGenFunction::EmitBlockCallExpr(const CallExpr *E,
Builder.CreateBitCast(Callee, BlockLiteralTy, "block.literal");
// Get the function pointer from the literal.
- llvm::Value *FuncPtr = Builder.CreateStructGEP(
- CGM.getGenericBlockLiteralType(), BlockLiteral, 3);
+ llvm::Value *FuncPtr =
+ Builder.CreateStructGEP(CGM.getGenericBlockLiteralType(), BlockLiteral, 3);
BlockLiteral = Builder.CreateBitCast(BlockLiteral, VoidPtrTy);
@@ -981,7 +975,7 @@ RValue CodeGenFunction::EmitBlockCallExpr(const CallExpr *E,
EmitCallArgs(Args, FnType->getAs<FunctionProtoType>(), E->arguments());
// Load the function.
- llvm::Value *Func = Builder.CreateLoad(FuncPtr);
+ llvm::Value *Func = Builder.CreateAlignedLoad(FuncPtr, getPointerAlign());
const FunctionType *FuncTy = FnType->castAs<FunctionType>();
const CGFunctionInfo &FnInfo =
@@ -997,41 +991,35 @@ RValue CodeGenFunction::EmitBlockCallExpr(const CallExpr *E,
return EmitCall(FnInfo, Func, ReturnValue, Args);
}
-llvm::Value *CodeGenFunction::GetAddrOfBlockDecl(const VarDecl *variable,
- bool isByRef) {
+Address CodeGenFunction::GetAddrOfBlockDecl(const VarDecl *variable,
+ bool isByRef) {
assert(BlockInfo && "evaluating block ref without block information?");
const CGBlockInfo::Capture &capture = BlockInfo->getCapture(variable);
// Handle constant captures.
- if (capture.isConstant()) return LocalDeclMap[variable];
+ if (capture.isConstant()) return LocalDeclMap.find(variable)->second;
- llvm::Value *addr =
- Builder.CreateStructGEP(BlockInfo->StructureType, LoadBlockStruct(),
- capture.getIndex(), "block.capture.addr");
+ Address addr =
+ Builder.CreateStructGEP(LoadBlockStruct(), capture.getIndex(),
+ capture.getOffset(), "block.capture.addr");
if (isByRef) {
// addr should be a void** right now. Load, then cast the result
// to byref*.
- addr = Builder.CreateLoad(addr);
- auto *byrefType = BuildByRefType(variable);
- llvm::PointerType *byrefPointerType = llvm::PointerType::get(byrefType, 0);
- addr = Builder.CreateBitCast(addr, byrefPointerType,
- "byref.addr");
-
- // Follow the forwarding pointer.
- addr = Builder.CreateStructGEP(byrefType, addr, 1, "byref.forwarding");
- addr = Builder.CreateLoad(addr, "byref.addr.forwarded");
-
- // Cast back to byref* and GEP over to the actual object.
- addr = Builder.CreateBitCast(addr, byrefPointerType);
- addr = Builder.CreateStructGEP(byrefType, addr,
- getByRefValueLLVMField(variable).second,
- variable->getNameAsString());
+ auto &byrefInfo = getBlockByrefInfo(variable);
+ addr = Address(Builder.CreateLoad(addr), byrefInfo.ByrefAlignment);
+
+ auto byrefPointerType = llvm::PointerType::get(byrefInfo.Type, 0);
+ addr = Builder.CreateBitCast(addr, byrefPointerType, "byref.addr");
+
+ addr = emitBlockByrefAddress(addr, byrefInfo, /*follow*/ true,
+ variable->getName());
}
- if (variable->getType()->isReferenceType())
- addr = Builder.CreateLoad(addr, "ref.tmp");
+ if (auto refType = variable->getType()->getAs<ReferenceType>()) {
+ addr = EmitLoadOfReference(addr, refType);
+ }
return addr;
}
@@ -1048,7 +1036,7 @@ CodeGenModule::GetAddrOfGlobalBlock(const BlockExpr *blockExpr,
// Using that metadata, generate the actual block function.
llvm::Constant *blockFn;
{
- llvm::DenseMap<const Decl*, llvm::Value*> LocalDeclMap;
+ CodeGenFunction::DeclMapTy LocalDeclMap;
blockFn = CodeGenFunction(*this).GenerateBlockFunction(GlobalDecl(),
blockInfo,
LocalDeclMap,
@@ -1102,6 +1090,44 @@ static llvm::Constant *buildGlobalBlock(CodeGenModule &CGM,
return llvm::ConstantExpr::getBitCast(literal, requiredType);
}
+void CodeGenFunction::setBlockContextParameter(const ImplicitParamDecl *D,
+ unsigned argNum,
+ llvm::Value *arg) {
+ assert(BlockInfo && "not emitting prologue of block invocation function?!");
+
+ llvm::Value *localAddr = nullptr;
+ if (CGM.getCodeGenOpts().OptimizationLevel == 0) {
+ // Allocate a stack slot to let the debug info survive the RA.
+ Address alloc = CreateMemTemp(D->getType(), D->getName() + ".addr");
+ Builder.CreateStore(arg, alloc);
+ localAddr = Builder.CreateLoad(alloc);
+ }
+
+ if (CGDebugInfo *DI = getDebugInfo()) {
+ if (CGM.getCodeGenOpts().getDebugInfo()
+ >= CodeGenOptions::LimitedDebugInfo) {
+ DI->setLocation(D->getLocation());
+ DI->EmitDeclareOfBlockLiteralArgVariable(*BlockInfo, arg, argNum,
+ localAddr, Builder);
+ }
+ }
+
+ SourceLocation StartLoc = BlockInfo->getBlockExpr()->getBody()->getLocStart();
+ ApplyDebugLocation Scope(*this, StartLoc);
+
+ // Instead of messing around with LocalDeclMap, just set the value
+ // directly as BlockPointer.
+ BlockPointer = Builder.CreateBitCast(arg,
+ BlockInfo->StructureType->getPointerTo(),
+ "block");
+}
+
+Address CodeGenFunction::LoadBlockStruct() {
+ assert(BlockInfo && "not in a block invocation function!");
+ assert(BlockPointer && "no block pointer set!");
+ return Address(BlockPointer, BlockInfo->BlockAlign);
+}
+
llvm::Function *
CodeGenFunction::GenerateBlockFunction(GlobalDecl GD,
const CGBlockInfo &blockInfo,
@@ -1121,7 +1147,7 @@ CodeGenFunction::GenerateBlockFunction(GlobalDecl GD,
for (DeclMapTy::const_iterator i = ldm.begin(), e = ldm.end(); i != e; ++i) {
const auto *var = dyn_cast<VarDecl>(i->first);
if (var && !var->hasLocalStorage())
- LocalDeclMap[var] = i->second;
+ setAddrOfLocalVar(var, i->second);
}
// Begin building the function declaration.
@@ -1162,35 +1188,28 @@ CodeGenFunction::GenerateBlockFunction(GlobalDecl GD,
blockInfo.getBlockExpr()->getBody()->getLocStart());
// Okay. Undo some of what StartFunction did.
-
- // Pull the 'self' reference out of the local decl map.
- llvm::Value *blockAddr = LocalDeclMap[&selfDecl];
- LocalDeclMap.erase(&selfDecl);
- BlockPointer = Builder.CreateBitCast(blockAddr,
- blockInfo.StructureType->getPointerTo(),
- "block");
+
// At -O0 we generate an explicit alloca for the BlockPointer, so the RA
// won't delete the dbg.declare intrinsics for captured variables.
llvm::Value *BlockPointerDbgLoc = BlockPointer;
if (CGM.getCodeGenOpts().OptimizationLevel == 0) {
// Allocate a stack slot for it, so we can point the debugger to it
- llvm::AllocaInst *Alloca = CreateTempAlloca(BlockPointer->getType(),
- "block.addr");
- unsigned Align = getContext().getDeclAlign(&selfDecl).getQuantity();
- Alloca->setAlignment(Align);
+ Address Alloca = CreateTempAlloca(BlockPointer->getType(),
+ getPointerAlign(),
+ "block.addr");
// Set the DebugLocation to empty, so the store is recognized as a
// frame setup instruction by llvm::DwarfDebug::beginFunction().
auto NL = ApplyDebugLocation::CreateEmpty(*this);
- Builder.CreateAlignedStore(BlockPointer, Alloca, Align);
- BlockPointerDbgLoc = Alloca;
+ Builder.CreateStore(BlockPointer, Alloca);
+ BlockPointerDbgLoc = Alloca.getPointer();
}
// If we have a C++ 'this' reference, go ahead and force it into
// existence now.
if (blockDecl->capturesCXXThis()) {
- llvm::Value *addr =
- Builder.CreateStructGEP(blockInfo.StructureType, BlockPointer,
- blockInfo.CXXThisIndex, "block.captured-this");
+ Address addr =
+ Builder.CreateStructGEP(LoadBlockStruct(), blockInfo.CXXThisIndex,
+ blockInfo.CXXThisOffset, "block.captured-this");
CXXThisValue = Builder.CreateLoad(addr, "this");
}
@@ -1200,15 +1219,13 @@ CodeGenFunction::GenerateBlockFunction(GlobalDecl GD,
const CGBlockInfo::Capture &capture = blockInfo.getCapture(variable);
if (!capture.isConstant()) continue;
- unsigned align = getContext().getDeclAlign(variable).getQuantity();
-
- llvm::AllocaInst *alloca =
- CreateMemTemp(variable->getType(), "block.captured-const");
- alloca->setAlignment(align);
+ CharUnits align = getContext().getDeclAlign(variable);
+ Address alloca =
+ CreateMemTemp(variable->getType(), align, "block.captured-const");
- Builder.CreateAlignedStore(capture.getConstant(), alloca, align);
+ Builder.CreateStore(capture.getConstant(), alloca);
- LocalDeclMap[variable] = alloca;
+ setAddrOfLocalVar(variable, alloca);
}
// Save a spot to insert the debug information for all the DeclRefExprs.
@@ -1242,7 +1259,8 @@ CodeGenFunction::GenerateBlockFunction(GlobalDecl GD,
>= CodeGenOptions::LimitedDebugInfo) {
const CGBlockInfo::Capture &capture = blockInfo.getCapture(variable);
if (capture.isConstant()) {
- DI->EmitDeclareOfAutoVariable(variable, LocalDeclMap[variable],
+ auto addr = LocalDeclMap.find(variable)->second;
+ DI->EmitDeclareOfAutoVariable(variable, addr.getPointer(),
Builder);
continue;
}
@@ -1335,12 +1353,12 @@ CodeGenFunction::GenerateCopyHelperFunction(const CGBlockInfo &blockInfo) {
auto AL = ApplyDebugLocation::CreateArtificial(*this);
llvm::Type *structPtrTy = blockInfo.StructureType->getPointerTo();
- llvm::Value *src = GetAddrOfLocalVar(&srcDecl);
- src = Builder.CreateLoad(src);
+ Address src = GetAddrOfLocalVar(&srcDecl);
+ src = Address(Builder.CreateLoad(src), blockInfo.BlockAlign);
src = Builder.CreateBitCast(src, structPtrTy, "block.source");
- llvm::Value *dst = GetAddrOfLocalVar(&dstDecl);
- dst = Builder.CreateLoad(dst);
+ Address dst = GetAddrOfLocalVar(&dstDecl);
+ dst = Address(Builder.CreateLoad(dst), blockInfo.BlockAlign);
dst = Builder.CreateBitCast(dst, structPtrTy, "block.dest");
const BlockDecl *blockDecl = blockInfo.getBlockDecl();
@@ -1404,10 +1422,8 @@ CodeGenFunction::GenerateCopyHelperFunction(const CGBlockInfo &blockInfo) {
}
unsigned index = capture.getIndex();
- llvm::Value *srcField =
- Builder.CreateStructGEP(blockInfo.StructureType, src, index);
- llvm::Value *dstField =
- Builder.CreateStructGEP(blockInfo.StructureType, dst, index);
+ Address srcField = Builder.CreateStructGEP(src, index, capture.getOffset());
+ Address dstField = Builder.CreateStructGEP(dst, index, capture.getOffset());
// If there's an explicit copy expression, we do that.
if (copyExpr) {
@@ -1434,11 +1450,12 @@ CodeGenFunction::GenerateCopyHelperFunction(const CGBlockInfo &blockInfo) {
// We don't need this anymore, so kill it. It's not quite
// worth the annoyance to avoid creating it in the first place.
- cast<llvm::Instruction>(dstField)->eraseFromParent();
+ cast<llvm::Instruction>(dstField.getPointer())->eraseFromParent();
}
} else {
srcValue = Builder.CreateBitCast(srcValue, VoidPtrTy);
- llvm::Value *dstAddr = Builder.CreateBitCast(dstField, VoidPtrTy);
+ llvm::Value *dstAddr =
+ Builder.CreateBitCast(dstField.getPointer(), VoidPtrTy);
llvm::Value *args[] = {
dstAddr, srcValue, llvm::ConstantInt::get(Int32Ty, flags.getBitMask())
};
@@ -1508,8 +1525,8 @@ CodeGenFunction::GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo) {
llvm::Type *structPtrTy = blockInfo.StructureType->getPointerTo();
- llvm::Value *src = GetAddrOfLocalVar(&srcDecl);
- src = Builder.CreateLoad(src);
+ Address src = GetAddrOfLocalVar(&srcDecl);
+ src = Address(Builder.CreateLoad(src), blockInfo.BlockAlign);
src = Builder.CreateBitCast(src, structPtrTy, "block");
const BlockDecl *blockDecl = blockInfo.getBlockDecl();
@@ -1563,9 +1580,8 @@ CodeGenFunction::GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo) {
continue;
}
- unsigned index = capture.getIndex();
- llvm::Value *srcField =
- Builder.CreateStructGEP(blockInfo.StructureType, src, index);
+ Address srcField =
+ Builder.CreateStructGEP(src, capture.getIndex(), capture.getOffset());
// If there's an explicit copy expression, we do that.
if (dtor) {
@@ -1599,15 +1615,15 @@ CodeGenFunction::GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo) {
namespace {
/// Emits the copy/dispose helper functions for a __block object of id type.
-class ObjectByrefHelpers final : public CodeGenModule::ByrefHelpers {
+class ObjectByrefHelpers final : public BlockByrefHelpers {
BlockFieldFlags Flags;
public:
ObjectByrefHelpers(CharUnits alignment, BlockFieldFlags flags)
- : ByrefHelpers(alignment), Flags(flags) {}
+ : BlockByrefHelpers(alignment), Flags(flags) {}
- void emitCopy(CodeGenFunction &CGF, llvm::Value *destField,
- llvm::Value *srcField) override {
+ void emitCopy(CodeGenFunction &CGF, Address destField,
+ Address srcField) override {
destField = CGF.Builder.CreateBitCast(destField, CGF.VoidPtrTy);
srcField = CGF.Builder.CreateBitCast(srcField, CGF.VoidPtrPtrTy);
@@ -1618,11 +1634,11 @@ public:
llvm::Value *flagsVal = llvm::ConstantInt::get(CGF.Int32Ty, flags);
llvm::Value *fn = CGF.CGM.getBlockObjectAssign();
- llvm::Value *args[] = { destField, srcValue, flagsVal };
+ llvm::Value *args[] = { destField.getPointer(), srcValue, flagsVal };
CGF.EmitNounwindRuntimeCall(fn, args);
}
- void emitDispose(CodeGenFunction &CGF, llvm::Value *field) override {
+ void emitDispose(CodeGenFunction &CGF, Address field) override {
field = CGF.Builder.CreateBitCast(field, CGF.Int8PtrTy->getPointerTo(0));
llvm::Value *value = CGF.Builder.CreateLoad(field);
@@ -1635,16 +1651,16 @@ public:
};
/// Emits the copy/dispose helpers for an ARC __block __weak variable.
-class ARCWeakByrefHelpers final : public CodeGenModule::ByrefHelpers {
+class ARCWeakByrefHelpers final : public BlockByrefHelpers {
public:
- ARCWeakByrefHelpers(CharUnits alignment) : ByrefHelpers(alignment) {}
+ ARCWeakByrefHelpers(CharUnits alignment) : BlockByrefHelpers(alignment) {}
- void emitCopy(CodeGenFunction &CGF, llvm::Value *destField,
- llvm::Value *srcField) override {
+ void emitCopy(CodeGenFunction &CGF, Address destField,
+ Address srcField) override {
CGF.EmitARCMoveWeak(destField, srcField);
}
- void emitDispose(CodeGenFunction &CGF, llvm::Value *field) override {
+ void emitDispose(CodeGenFunction &CGF, Address field) override {
CGF.EmitARCDestroyWeak(field);
}
@@ -1656,36 +1672,31 @@ public:
/// Emits the copy/dispose helpers for an ARC __block __strong variable
/// that's not of block-pointer type.
-class ARCStrongByrefHelpers final : public CodeGenModule::ByrefHelpers {
+class ARCStrongByrefHelpers final : public BlockByrefHelpers {
public:
- ARCStrongByrefHelpers(CharUnits alignment) : ByrefHelpers(alignment) {}
+ ARCStrongByrefHelpers(CharUnits alignment) : BlockByrefHelpers(alignment) {}
- void emitCopy(CodeGenFunction &CGF, llvm::Value *destField,
- llvm::Value *srcField) override {
+ void emitCopy(CodeGenFunction &CGF, Address destField,
+ Address srcField) override {
// Do a "move" by copying the value and then zeroing out the old
// variable.
- llvm::LoadInst *value = CGF.Builder.CreateLoad(srcField);
- value->setAlignment(Alignment.getQuantity());
+ llvm::Value *value = CGF.Builder.CreateLoad(srcField);
llvm::Value *null =
llvm::ConstantPointerNull::get(cast<llvm::PointerType>(value->getType()));
if (CGF.CGM.getCodeGenOpts().OptimizationLevel == 0) {
- llvm::StoreInst *store = CGF.Builder.CreateStore(null, destField);
- store->setAlignment(Alignment.getQuantity());
+ CGF.Builder.CreateStore(null, destField);
CGF.EmitARCStoreStrongCall(destField, value, /*ignored*/ true);
CGF.EmitARCStoreStrongCall(srcField, null, /*ignored*/ true);
return;
}
- llvm::StoreInst *store = CGF.Builder.CreateStore(value, destField);
- store->setAlignment(Alignment.getQuantity());
-
- store = CGF.Builder.CreateStore(null, srcField);
- store->setAlignment(Alignment.getQuantity());
+ CGF.Builder.CreateStore(value, destField);
+ CGF.Builder.CreateStore(null, srcField);
}
- void emitDispose(CodeGenFunction &CGF, llvm::Value *field) override {
+ void emitDispose(CodeGenFunction &CGF, Address field) override {
CGF.EmitARCDestroyStrong(field, ARCImpreciseLifetime);
}
@@ -1697,25 +1708,22 @@ public:
/// Emits the copy/dispose helpers for an ARC __block __strong
/// variable that's of block-pointer type.
-class ARCStrongBlockByrefHelpers final : public CodeGenModule::ByrefHelpers {
+class ARCStrongBlockByrefHelpers final : public BlockByrefHelpers {
public:
- ARCStrongBlockByrefHelpers(CharUnits alignment) : ByrefHelpers(alignment) {}
+ ARCStrongBlockByrefHelpers(CharUnits alignment)
+ : BlockByrefHelpers(alignment) {}
- void emitCopy(CodeGenFunction &CGF, llvm::Value *destField,
- llvm::Value *srcField) override {
+ void emitCopy(CodeGenFunction &CGF, Address destField,
+ Address srcField) override {
// Do the copy with objc_retainBlock; that's all that
// _Block_object_assign would do anyway, and we'd have to pass the
// right arguments to make sure it doesn't get no-op'ed.
- llvm::LoadInst *oldValue = CGF.Builder.CreateLoad(srcField);
- oldValue->setAlignment(Alignment.getQuantity());
-
+ llvm::Value *oldValue = CGF.Builder.CreateLoad(srcField);
llvm::Value *copy = CGF.EmitARCRetainBlock(oldValue, /*mandatory*/ true);
-
- llvm::StoreInst *store = CGF.Builder.CreateStore(copy, destField);
- store->setAlignment(Alignment.getQuantity());
+ CGF.Builder.CreateStore(copy, destField);
}
- void emitDispose(CodeGenFunction &CGF, llvm::Value *field) override {
+ void emitDispose(CodeGenFunction &CGF, Address field) override {
CGF.EmitARCDestroyStrong(field, ARCImpreciseLifetime);
}
@@ -1727,23 +1735,23 @@ public:
/// Emits the copy/dispose helpers for a __block variable with a
/// nontrivial copy constructor or destructor.
-class CXXByrefHelpers final : public CodeGenModule::ByrefHelpers {
+class CXXByrefHelpers final : public BlockByrefHelpers {
QualType VarType;
const Expr *CopyExpr;
public:
CXXByrefHelpers(CharUnits alignment, QualType type,
const Expr *copyExpr)
- : ByrefHelpers(alignment), VarType(type), CopyExpr(copyExpr) {}
+ : BlockByrefHelpers(alignment), VarType(type), CopyExpr(copyExpr) {}
bool needsCopy() const override { return CopyExpr != nullptr; }
- void emitCopy(CodeGenFunction &CGF, llvm::Value *destField,
- llvm::Value *srcField) override {
+ void emitCopy(CodeGenFunction &CGF, Address destField,
+ Address srcField) override {
if (!CopyExpr) return;
CGF.EmitSynthesizedCXXCopyCtor(destField, srcField, CopyExpr);
}
- void emitDispose(CodeGenFunction &CGF, llvm::Value *field) override {
+ void emitDispose(CodeGenFunction &CGF, Address field) override {
EHScopeStack::stable_iterator cleanupDepth = CGF.EHStack.stable_begin();
CGF.PushDestructorCleanup(VarType, field);
CGF.PopCleanupBlocks(cleanupDepth);
@@ -1756,10 +1764,8 @@ public:
} // end anonymous namespace
static llvm::Constant *
-generateByrefCopyHelper(CodeGenFunction &CGF,
- llvm::StructType &byrefType,
- unsigned valueFieldIndex,
- CodeGenModule::ByrefHelpers &byrefInfo) {
+generateByrefCopyHelper(CodeGenFunction &CGF, const BlockByrefInfo &byrefInfo,
+ BlockByrefHelpers &generator) {
ASTContext &Context = CGF.getContext();
QualType R = Context.VoidTy;
@@ -1776,8 +1782,7 @@ generateByrefCopyHelper(CodeGenFunction &CGF,
const CGFunctionInfo &FI = CGF.CGM.getTypes().arrangeFreeFunctionDeclaration(
R, args, FunctionType::ExtInfo(), /*variadic=*/false);
- CodeGenTypes &Types = CGF.CGM.getTypes();
- llvm::FunctionType *LTy = Types.GetFunctionType(FI);
+ llvm::FunctionType *LTy = CGF.CGM.getTypes().GetFunctionType(FI);
// FIXME: We'd like to put these into a mergable by content, with
// internal linkage.
@@ -1797,24 +1802,26 @@ generateByrefCopyHelper(CodeGenFunction &CGF,
CGF.StartFunction(FD, R, Fn, FI, args);
- if (byrefInfo.needsCopy()) {
- llvm::Type *byrefPtrType = byrefType.getPointerTo(0);
+ if (generator.needsCopy()) {
+ llvm::Type *byrefPtrType = byrefInfo.Type->getPointerTo(0);
// dst->x
- llvm::Value *destField = CGF.GetAddrOfLocalVar(&dst);
- destField = CGF.Builder.CreateLoad(destField);
+ Address destField = CGF.GetAddrOfLocalVar(&dst);
+ destField = Address(CGF.Builder.CreateLoad(destField),
+ byrefInfo.ByrefAlignment);
destField = CGF.Builder.CreateBitCast(destField, byrefPtrType);
- destField = CGF.Builder.CreateStructGEP(&byrefType, destField,
- valueFieldIndex, "x");
+ destField = CGF.emitBlockByrefAddress(destField, byrefInfo, false,
+ "dest-object");
// src->x
- llvm::Value *srcField = CGF.GetAddrOfLocalVar(&src);
- srcField = CGF.Builder.CreateLoad(srcField);
+ Address srcField = CGF.GetAddrOfLocalVar(&src);
+ srcField = Address(CGF.Builder.CreateLoad(srcField),
+ byrefInfo.ByrefAlignment);
srcField = CGF.Builder.CreateBitCast(srcField, byrefPtrType);
- srcField =
- CGF.Builder.CreateStructGEP(&byrefType, srcField, valueFieldIndex, "x");
+ srcField = CGF.emitBlockByrefAddress(srcField, byrefInfo, false,
+ "src-object");
- byrefInfo.emitCopy(CGF, destField, srcField);
+ generator.emitCopy(CGF, destField, srcField);
}
CGF.FinishFunction();
@@ -1824,19 +1831,17 @@ generateByrefCopyHelper(CodeGenFunction &CGF,
/// Build the copy helper for a __block variable.
static llvm::Constant *buildByrefCopyHelper(CodeGenModule &CGM,
- llvm::StructType &byrefType,
- unsigned byrefValueIndex,
- CodeGenModule::ByrefHelpers &info) {
+ const BlockByrefInfo &byrefInfo,
+ BlockByrefHelpers &generator) {
CodeGenFunction CGF(CGM);
- return generateByrefCopyHelper(CGF, byrefType, byrefValueIndex, info);
+ return generateByrefCopyHelper(CGF, byrefInfo, generator);
}
/// Generate code for a __block variable's dispose helper.
static llvm::Constant *
generateByrefDisposeHelper(CodeGenFunction &CGF,
- llvm::StructType &byrefType,
- unsigned byrefValueIndex,
- CodeGenModule::ByrefHelpers &byrefInfo) {
+ const BlockByrefInfo &byrefInfo,
+ BlockByrefHelpers &generator) {
ASTContext &Context = CGF.getContext();
QualType R = Context.VoidTy;
@@ -1848,8 +1853,7 @@ generateByrefDisposeHelper(CodeGenFunction &CGF,
const CGFunctionInfo &FI = CGF.CGM.getTypes().arrangeFreeFunctionDeclaration(
R, args, FunctionType::ExtInfo(), /*variadic=*/false);
- CodeGenTypes &Types = CGF.CGM.getTypes();
- llvm::FunctionType *LTy = Types.GetFunctionType(FI);
+ llvm::FunctionType *LTy = CGF.CGM.getTypes().GetFunctionType(FI);
// FIXME: We'd like to put these into a mergable by content, with
// internal linkage.
@@ -1869,13 +1873,14 @@ generateByrefDisposeHelper(CodeGenFunction &CGF,
false, false);
CGF.StartFunction(FD, R, Fn, FI, args);
- if (byrefInfo.needsDispose()) {
- llvm::Value *V = CGF.GetAddrOfLocalVar(&src);
- V = CGF.Builder.CreateLoad(V);
- V = CGF.Builder.CreateBitCast(V, byrefType.getPointerTo(0));
- V = CGF.Builder.CreateStructGEP(&byrefType, V, byrefValueIndex, "x");
+ if (generator.needsDispose()) {
+ Address addr = CGF.GetAddrOfLocalVar(&src);
+ addr = Address(CGF.Builder.CreateLoad(addr), byrefInfo.ByrefAlignment);
+ auto byrefPtrType = byrefInfo.Type->getPointerTo(0);
+ addr = CGF.Builder.CreateBitCast(addr, byrefPtrType);
+ addr = CGF.emitBlockByrefAddress(addr, byrefInfo, false, "object");
- byrefInfo.emitDispose(CGF, V);
+ generator.emitDispose(CGF, addr);
}
CGF.FinishFunction();
@@ -1885,37 +1890,29 @@ generateByrefDisposeHelper(CodeGenFunction &CGF,
/// Build the dispose helper for a __block variable.
static llvm::Constant *buildByrefDisposeHelper(CodeGenModule &CGM,
- llvm::StructType &byrefType,
- unsigned byrefValueIndex,
- CodeGenModule::ByrefHelpers &info) {
+ const BlockByrefInfo &byrefInfo,
+ BlockByrefHelpers &generator) {
CodeGenFunction CGF(CGM);
- return generateByrefDisposeHelper(CGF, byrefType, byrefValueIndex, info);
+ return generateByrefDisposeHelper(CGF, byrefInfo, generator);
}
/// Lazily build the copy and dispose helpers for a __block variable
/// with the given information.
template <class T>
-static T *buildByrefHelpers(CodeGenModule &CGM, llvm::StructType &byrefTy,
- unsigned byrefValueIndex, T byrefInfo) {
- // Increase the field's alignment to be at least pointer alignment,
- // since the layout of the byref struct will guarantee at least that.
- byrefInfo.Alignment = std::max(byrefInfo.Alignment,
- CharUnits::fromQuantity(CGM.PointerAlignInBytes));
-
+static T *buildByrefHelpers(CodeGenModule &CGM, const BlockByrefInfo &byrefInfo,
+ T &&generator) {
llvm::FoldingSetNodeID id;
- byrefInfo.Profile(id);
+ generator.Profile(id);
void *insertPos;
- CodeGenModule::ByrefHelpers *node
+ BlockByrefHelpers *node
= CGM.ByrefHelpersCache.FindNodeOrInsertPos(id, insertPos);
if (node) return static_cast<T*>(node);
- byrefInfo.CopyHelper =
- buildByrefCopyHelper(CGM, byrefTy, byrefValueIndex, byrefInfo);
- byrefInfo.DisposeHelper =
- buildByrefDisposeHelper(CGM, byrefTy, byrefValueIndex,byrefInfo);
+ generator.CopyHelper = buildByrefCopyHelper(CGM, byrefInfo, generator);
+ generator.DisposeHelper = buildByrefDisposeHelper(CGM, byrefInfo, generator);
- T *copy = new (CGM.getContext()) T(std::move(byrefInfo));
+ T *copy = new (CGM.getContext()) T(std::move(generator));
CGM.ByrefHelpersCache.InsertNode(copy, insertPos);
return copy;
}
@@ -1923,21 +1920,25 @@ static T *buildByrefHelpers(CodeGenModule &CGM, llvm::StructType &byrefTy,
/// Build the copy and dispose helpers for the given __block variable
/// emission. Places the helpers in the global cache. Returns null
/// if no helpers are required.
-CodeGenModule::ByrefHelpers *
+BlockByrefHelpers *
CodeGenFunction::buildByrefHelpers(llvm::StructType &byrefType,
const AutoVarEmission &emission) {
const VarDecl &var = *emission.Variable;
QualType type = var.getType();
- unsigned byrefValueIndex = getByRefValueLLVMField(&var).second;
+ auto &byrefInfo = getBlockByrefInfo(&var);
+
+ // The alignment we care about for the purposes of uniquing byref
+ // helpers is the alignment of the actual byref value field.
+ CharUnits valueAlignment =
+ byrefInfo.ByrefAlignment.alignmentAtOffset(byrefInfo.FieldOffset);
if (const CXXRecordDecl *record = type->getAsCXXRecordDecl()) {
const Expr *copyExpr = CGM.getContext().getBlockVarCopyInits(&var);
if (!copyExpr && record->hasTrivialDestructor()) return nullptr;
return ::buildByrefHelpers(
- CGM, byrefType, byrefValueIndex,
- CXXByrefHelpers(emission.Alignment, type, copyExpr));
+ CGM, byrefInfo, CXXByrefHelpers(valueAlignment, type, copyExpr));
}
// Otherwise, if we don't have a retainable type, there's nothing to do.
@@ -1961,23 +1962,22 @@ CodeGenFunction::buildByrefHelpers(llvm::StructType &byrefType,
// Tell the runtime that this is ARC __weak, called by the
// byref routines.
case Qualifiers::OCL_Weak:
- return ::buildByrefHelpers(CGM, byrefType, byrefValueIndex,
- ARCWeakByrefHelpers(emission.Alignment));
+ return ::buildByrefHelpers(CGM, byrefInfo,
+ ARCWeakByrefHelpers(valueAlignment));
// ARC __strong __block variables need to be retained.
case Qualifiers::OCL_Strong:
// Block pointers need to be copied, and there's no direct
// transfer possible.
if (type->isBlockPointerType()) {
- return ::buildByrefHelpers(
- CGM, byrefType, byrefValueIndex,
- ARCStrongBlockByrefHelpers(emission.Alignment));
+ return ::buildByrefHelpers(CGM, byrefInfo,
+ ARCStrongBlockByrefHelpers(valueAlignment));
// Otherwise, we transfer ownership of the retain from the stack
// to the heap.
} else {
- return ::buildByrefHelpers(CGM, byrefType, byrefValueIndex,
- ARCStrongByrefHelpers(emission.Alignment));
+ return ::buildByrefHelpers(CGM, byrefInfo,
+ ARCStrongByrefHelpers(valueAlignment));
}
}
llvm_unreachable("fell out of lifetime switch!");
@@ -1996,28 +1996,33 @@ CodeGenFunction::buildByrefHelpers(llvm::StructType &byrefType,
if (type.isObjCGCWeak())
flags |= BLOCK_FIELD_IS_WEAK;
- return ::buildByrefHelpers(CGM, byrefType, byrefValueIndex,
- ObjectByrefHelpers(emission.Alignment, flags));
+ return ::buildByrefHelpers(CGM, byrefInfo,
+ ObjectByrefHelpers(valueAlignment, flags));
}
-std::pair<llvm::Type *, unsigned>
-CodeGenFunction::getByRefValueLLVMField(const ValueDecl *VD) const {
- assert(ByRefValueInfo.count(VD) && "Did not find value!");
-
- return ByRefValueInfo.find(VD)->second;
+Address CodeGenFunction::emitBlockByrefAddress(Address baseAddr,
+ const VarDecl *var,
+ bool followForward) {
+ auto &info = getBlockByrefInfo(var);
+ return emitBlockByrefAddress(baseAddr, info, followForward, var->getName());
}
-llvm::Value *CodeGenFunction::BuildBlockByrefAddress(llvm::Value *BaseAddr,
- const VarDecl *V) {
- auto P = getByRefValueLLVMField(V);
- llvm::Value *Loc =
- Builder.CreateStructGEP(P.first, BaseAddr, 1, "forwarding");
- Loc = Builder.CreateLoad(Loc);
- Loc = Builder.CreateStructGEP(P.first, Loc, P.second, V->getNameAsString());
- return Loc;
+Address CodeGenFunction::emitBlockByrefAddress(Address baseAddr,
+ const BlockByrefInfo &info,
+ bool followForward,
+ const llvm::Twine &name) {
+ // Chase the forwarding address if requested.
+ if (followForward) {
+ Address forwardingAddr =
+ Builder.CreateStructGEP(baseAddr, 1, getPointerSize(), "forwarding");
+ baseAddr = Address(Builder.CreateLoad(forwardingAddr), info.ByrefAlignment);
+ }
+
+ return Builder.CreateStructGEP(baseAddr, info.FieldIndex,
+ info.FieldOffset, name);
}
-/// BuildByRefType - This routine changes a __block variable declared as T x
+/// BuildByrefInfo - This routine changes a __block variable declared as T x
/// into:
///
/// struct {
@@ -2032,108 +2037,116 @@ llvm::Value *CodeGenFunction::BuildBlockByrefAddress(llvm::Value *BaseAddr,
/// T x;
/// } x
///
-llvm::Type *CodeGenFunction::BuildByRefType(const VarDecl *D) {
- std::pair<llvm::Type *, unsigned> &Info = ByRefValueInfo[D];
- if (Info.first)
- return Info.first;
+const BlockByrefInfo &CodeGenFunction::getBlockByrefInfo(const VarDecl *D) {
+ auto it = BlockByrefInfos.find(D);
+ if (it != BlockByrefInfos.end())
+ return it->second;
+
+ llvm::StructType *byrefType =
+ llvm::StructType::create(getLLVMContext(),
+ "struct.__block_byref_" + D->getNameAsString());
QualType Ty = D->getType();
+ CharUnits size;
SmallVector<llvm::Type *, 8> types;
- llvm::StructType *ByRefType =
- llvm::StructType::create(getLLVMContext(),
- "struct.__block_byref_" + D->getNameAsString());
-
// void *__isa;
types.push_back(Int8PtrTy);
+ size += getPointerSize();
// void *__forwarding;
- types.push_back(llvm::PointerType::getUnqual(ByRefType));
+ types.push_back(llvm::PointerType::getUnqual(byrefType));
+ size += getPointerSize();
// int32_t __flags;
types.push_back(Int32Ty);
+ size += CharUnits::fromQuantity(4);
// int32_t __size;
types.push_back(Int32Ty);
+ size += CharUnits::fromQuantity(4);
+
// Note that this must match *exactly* the logic in buildByrefHelpers.
- bool HasCopyAndDispose = getContext().BlockRequiresCopying(Ty, D);
- if (HasCopyAndDispose) {
+ bool hasCopyAndDispose = getContext().BlockRequiresCopying(Ty, D);
+ if (hasCopyAndDispose) {
/// void *__copy_helper;
types.push_back(Int8PtrTy);
+ size += getPointerSize();
/// void *__destroy_helper;
types.push_back(Int8PtrTy);
+ size += getPointerSize();
}
+
bool HasByrefExtendedLayout = false;
Qualifiers::ObjCLifetime Lifetime;
if (getContext().getByrefLifetime(Ty, Lifetime, HasByrefExtendedLayout) &&
- HasByrefExtendedLayout)
+ HasByrefExtendedLayout) {
/// void *__byref_variable_layout;
types.push_back(Int8PtrTy);
+ size += CharUnits::fromQuantity(PointerSizeInBytes);
+ }
- bool Packed = false;
- CharUnits Align = getContext().getDeclAlign(D);
- if (Align >
- getContext().toCharUnitsFromBits(getTarget().getPointerAlign(0))) {
- // We have to insert padding.
-
- // The struct above has 2 32-bit integers.
- unsigned CurrentOffsetInBytes = 4 * 2;
-
- // And either 2, 3, 4 or 5 pointers.
- unsigned noPointers = 2;
- if (HasCopyAndDispose)
- noPointers += 2;
- if (HasByrefExtendedLayout)
- noPointers += 1;
-
- CurrentOffsetInBytes += noPointers * CGM.getDataLayout().getTypeAllocSize(Int8PtrTy);
-
- // Align the offset.
- unsigned AlignedOffsetInBytes =
- llvm::RoundUpToAlignment(CurrentOffsetInBytes, Align.getQuantity());
-
- unsigned NumPaddingBytes = AlignedOffsetInBytes - CurrentOffsetInBytes;
- if (NumPaddingBytes > 0) {
- llvm::Type *Ty = Int8Ty;
- // FIXME: We need a sema error for alignment larger than the minimum of
- // the maximal stack alignment and the alignment of malloc on the system.
- if (NumPaddingBytes > 1)
- Ty = llvm::ArrayType::get(Ty, NumPaddingBytes);
-
- types.push_back(Ty);
+ // T x;
+ llvm::Type *varTy = ConvertTypeForMem(Ty);
- // We want a packed struct.
- Packed = true;
- }
+ bool packed = false;
+ CharUnits varAlign = getContext().getDeclAlign(D);
+ CharUnits varOffset = size.RoundUpToAlignment(varAlign);
+
+ // We may have to insert padding.
+ if (varOffset != size) {
+ llvm::Type *paddingTy =
+ llvm::ArrayType::get(Int8Ty, (varOffset - size).getQuantity());
+
+ types.push_back(paddingTy);
+ size = varOffset;
+
+ // Conversely, we might have to prevent LLVM from inserting padding.
+ } else if (CGM.getDataLayout().getABITypeAlignment(varTy)
+ > varAlign.getQuantity()) {
+ packed = true;
}
+ types.push_back(varTy);
- // T x;
- types.push_back(ConvertTypeForMem(Ty));
-
- ByRefType->setBody(types, Packed);
-
- Info.first = ByRefType;
-
- Info.second = types.size() - 1;
-
- return Info.first;
+ byrefType->setBody(types, packed);
+
+ BlockByrefInfo info;
+ info.Type = byrefType;
+ info.FieldIndex = types.size() - 1;
+ info.FieldOffset = varOffset;
+ info.ByrefAlignment = std::max(varAlign, getPointerAlign());
+
+ auto pair = BlockByrefInfos.insert({D, info});
+ assert(pair.second && "info was inserted recursively?");
+ return pair.first->second;
}
/// Initialize the structural components of a __block variable, i.e.
/// everything but the actual object.
void CodeGenFunction::emitByrefStructureInit(const AutoVarEmission &emission) {
// Find the address of the local.
- llvm::Value *addr = emission.Address;
+ Address addr = emission.Addr;
// That's an alloca of the byref structure type.
llvm::StructType *byrefType = cast<llvm::StructType>(
- cast<llvm::PointerType>(addr->getType())->getElementType());
+ cast<llvm::PointerType>(addr.getPointer()->getType())->getElementType());
+
+ unsigned nextHeaderIndex = 0;
+ CharUnits nextHeaderOffset;
+ auto storeHeaderField = [&](llvm::Value *value, CharUnits fieldSize,
+ const Twine &name) {
+ auto fieldAddr = Builder.CreateStructGEP(addr, nextHeaderIndex,
+ nextHeaderOffset, name);
+ Builder.CreateStore(value, fieldAddr);
+
+ nextHeaderIndex++;
+ nextHeaderOffset += fieldSize;
+ };
// Build the byref helpers if necessary. This is null if we don't need any.
- CodeGenModule::ByrefHelpers *helpers =
- buildByrefHelpers(*byrefType, emission);
+ BlockByrefHelpers *helpers = buildByrefHelpers(*byrefType, emission);
const VarDecl &D = *emission.Variable;
QualType type = D.getType();
@@ -2142,7 +2155,7 @@ void CodeGenFunction::emitByrefStructureInit(const AutoVarEmission &emission) {
Qualifiers::ObjCLifetime ByrefLifetime;
bool ByRefHasLifetime =
getContext().getByrefLifetime(type, ByrefLifetime, HasByrefExtendedLayout);
-
+
llvm::Value *V;
// Initialize the 'isa', which is just 0 or 1.
@@ -2150,12 +2163,10 @@ void CodeGenFunction::emitByrefStructureInit(const AutoVarEmission &emission) {
if (type.isObjCGCWeak())
isa = 1;
V = Builder.CreateIntToPtr(Builder.getInt32(isa), Int8PtrTy, "isa");
- Builder.CreateStore(V,
- Builder.CreateStructGEP(nullptr, addr, 0, "byref.isa"));
+ storeHeaderField(V, getPointerSize(), "byref.isa");
// Store the address of the variable into its own forwarding pointer.
- Builder.CreateStore(
- addr, Builder.CreateStructGEP(nullptr, addr, 1, "byref.forwarding"));
+ storeHeaderField(addr.getPointer(), getPointerSize(), "byref.forwarding");
// Blocks ABI:
// c) the flags field is set to either 0 if no helper functions are
@@ -2201,31 +2212,23 @@ void CodeGenFunction::emitByrefStructureInit(const AutoVarEmission &emission) {
printf("\n");
}
}
-
- Builder.CreateStore(llvm::ConstantInt::get(IntTy, flags.getBitMask()),
- Builder.CreateStructGEP(nullptr, addr, 2, "byref.flags"));
+ storeHeaderField(llvm::ConstantInt::get(IntTy, flags.getBitMask()),
+ getIntSize(), "byref.flags");
CharUnits byrefSize = CGM.GetTargetTypeStoreSize(byrefType);
V = llvm::ConstantInt::get(IntTy, byrefSize.getQuantity());
- Builder.CreateStore(V,
- Builder.CreateStructGEP(nullptr, addr, 3, "byref.size"));
+ storeHeaderField(V, getIntSize(), "byref.size");
if (helpers) {
- llvm::Value *copy_helper = Builder.CreateStructGEP(nullptr, addr, 4);
- Builder.CreateStore(helpers->CopyHelper, copy_helper);
-
- llvm::Value *destroy_helper = Builder.CreateStructGEP(nullptr, addr, 5);
- Builder.CreateStore(helpers->DisposeHelper, destroy_helper);
+ storeHeaderField(helpers->CopyHelper, getPointerSize(),
+ "byref.copyHelper");
+ storeHeaderField(helpers->DisposeHelper, getPointerSize(),
+ "byref.disposeHelper");
}
+
if (ByRefHasLifetime && HasByrefExtendedLayout) {
- llvm::Constant* ByrefLayoutInfo = CGM.getObjCRuntime().BuildByrefLayout(CGM, type);
- llvm::Value *ByrefInfoAddr =
- Builder.CreateStructGEP(nullptr, addr, helpers ? 6 : 4, "byref.layout");
- // cast destination to pointer to source type.
- llvm::Type *DesTy = ByrefLayoutInfo->getType();
- DesTy = DesTy->getPointerTo();
- llvm::Value *BC = Builder.CreatePointerCast(ByrefInfoAddr, DesTy);
- Builder.CreateStore(ByrefLayoutInfo, BC);
+ auto layoutInfo = CGM.getObjCRuntime().BuildByrefLayout(CGM, type);
+ storeHeaderField(layoutInfo, getPointerSize(), "byref.layout");
}
}
@@ -2239,6 +2242,7 @@ void CodeGenFunction::BuildBlockRelease(llvm::Value *V, BlockFieldFlags flags) {
}
namespace {
+ /// Release a __block variable.
struct CallBlockRelease final : EHScopeStack::Cleanup {
llvm::Value *Addr;
CallBlockRelease(llvm::Value *Addr) : Addr(Addr) {}
@@ -2259,7 +2263,8 @@ void CodeGenFunction::enterByrefCleanup(const AutoVarEmission &emission) {
if (CGM.getLangOpts().getGC() == LangOptions::GCOnly)
return;
- EHStack.pushCleanup<CallBlockRelease>(NormalAndEHCleanup, emission.Address);
+ EHStack.pushCleanup<CallBlockRelease>(NormalAndEHCleanup,
+ emission.Addr.getPointer());
}
/// Adjust the declaration of something from the blocks API.
diff --git a/clang/lib/CodeGen/CGBlocks.h b/clang/lib/CodeGen/CGBlocks.h
index c4eed0d0e8e..6be47c55b8e 100644
--- a/clang/lib/CodeGen/CGBlocks.h
+++ b/clang/lib/CodeGen/CGBlocks.h
@@ -140,6 +140,43 @@ inline BlockFieldFlags operator|(BlockFieldFlag_t l, BlockFieldFlag_t r) {
return BlockFieldFlags(l) | BlockFieldFlags(r);
}
+/// Information about the layout of a __block variable.
+class BlockByrefInfo {
+public:
+ llvm::StructType *Type;
+ unsigned FieldIndex;
+ CharUnits ByrefAlignment;
+ CharUnits FieldOffset;
+};
+
+/// A pair of helper functions for a __block variable.
+class BlockByrefHelpers : public llvm::FoldingSetNode {
+public:
+ llvm::Constant *CopyHelper;
+ llvm::Constant *DisposeHelper;
+
+ /// The alignment of the field. This is important because
+ /// different offsets to the field within the byref struct need to
+ /// have different helper functions.
+ CharUnits Alignment;
+
+ BlockByrefHelpers(CharUnits alignment) : Alignment(alignment) {}
+ BlockByrefHelpers(const BlockByrefHelpers &) = default;
+ virtual ~BlockByrefHelpers();
+
+ void Profile(llvm::FoldingSetNodeID &id) const {
+ id.AddInteger(Alignment.getQuantity());
+ profileImpl(id);
+ }
+ virtual void profileImpl(llvm::FoldingSetNodeID &id) const = 0;
+
+ virtual bool needsCopy() const { return true; }
+ virtual void emitCopy(CodeGenFunction &CGF, Address dest, Address src) = 0;
+
+ virtual bool needsDispose() const { return true; }
+ virtual void emitDispose(CodeGenFunction &CGF, Address field) = 0;
+};
+
/// CGBlockInfo - Information to generate a block literal.
class CGBlockInfo {
public:
@@ -152,14 +189,19 @@ public:
class Capture {
uintptr_t Data;
EHScopeStack::stable_iterator Cleanup;
+ CharUnits::QuantityType Offset;
public:
bool isIndex() const { return (Data & 1) != 0; }
bool isConstant() const { return !isIndex(); }
- unsigned getIndex() const { assert(isIndex()); return Data >> 1; }
- llvm::Value *getConstant() const {
- assert(isConstant());
- return reinterpret_cast<llvm::Value*>(Data);
+
+ unsigned getIndex() const {
+ assert(isIndex());
+ return Data >> 1;
+ }
+ CharUnits getOffset() const {
+ assert(isIndex());
+ return CharUnits::fromQuantity(Offset);
}
EHScopeStack::stable_iterator getCleanup() const {
assert(isIndex());
@@ -170,9 +212,15 @@ public:
Cleanup = cleanup;
}
- static Capture makeIndex(unsigned index) {
+ llvm::Value *getConstant() const {
+ assert(isConstant());
+ return reinterpret_cast<llvm::Value*>(Data);
+ }
+
+ static Capture makeIndex(unsigned index, CharUnits offset) {
Capture v;
v.Data = (index << 1) | 1;
+ v.Offset = offset.getQuantity();
return v;
}
@@ -205,12 +253,13 @@ public:
/// The mapping of allocated indexes within the block.
llvm::DenseMap<const VarDecl*, Capture> Captures;
- llvm::AllocaInst *Address;
+ Address LocalAddress;
llvm::StructType *StructureType;
const BlockDecl *Block;
const BlockExpr *BlockExpression;
CharUnits BlockSize;
CharUnits BlockAlign;
+ CharUnits CXXThisOffset;
// Offset of the gap caused by block header having a smaller
// alignment than the alignment of the block descriptor. This
diff --git a/clang/lib/CodeGen/CGBuilder.h b/clang/lib/CodeGen/CGBuilder.h
index fb36b8b5f15..b46f502a711 100644
--- a/clang/lib/CodeGen/CGBuilder.h
+++ b/clang/lib/CodeGen/CGBuilder.h
@@ -11,6 +11,8 @@
#define LLVM_CLANG_LIB_CODEGEN_CGBUILDER_H
#include "llvm/IR/IRBuilder.h"
+#include "Address.h"
+#include "CodeGenTypeCache.h"
namespace clang {
namespace CodeGen {
@@ -42,9 +44,260 @@ private:
#else
#define PreserveNames true
#endif
+
typedef CGBuilderInserter<PreserveNames> CGBuilderInserterTy;
+
typedef llvm::IRBuilder<PreserveNames, llvm::ConstantFolder,
- CGBuilderInserterTy> CGBuilderTy;
+ CGBuilderInserterTy> CGBuilderBaseTy;
+
+class CGBuilderTy : public CGBuilderBaseTy {
+ /// Storing a reference to the type cache here makes it a lot easier
+ /// to build natural-feeling, target-specific IR.
+ const CodeGenTypeCache &TypeCache;
+public:
+ CGBuilderTy(const CodeGenTypeCache &TypeCache, llvm::LLVMContext &C)
+ : CGBuilderBaseTy(C), TypeCache(TypeCache) {}
+ CGBuilderTy(const CodeGenTypeCache &TypeCache,
+ llvm::LLVMContext &C, const llvm::ConstantFolder &F,
+ const CGBuilderInserterTy &Inserter)
+ : CGBuilderBaseTy(C, F, Inserter), TypeCache(TypeCache) {}
+ CGBuilderTy(const CodeGenTypeCache &TypeCache, llvm::Instruction *I)
+ : CGBuilderBaseTy(I), TypeCache(TypeCache) {}
+ CGBuilderTy(const CodeGenTypeCache &TypeCache, llvm::BasicBlock *BB)
+ : CGBuilderBaseTy(BB), TypeCache(TypeCache) {}
+
+ llvm::ConstantInt *getSize(CharUnits N) {
+ return llvm::ConstantInt::get(TypeCache.SizeTy, N.getQuantity());
+ }
+ llvm::ConstantInt *getSize(uint64_t N) {
+ return llvm::ConstantInt::get(TypeCache.SizeTy, N);
+ }
+
+ // Note that we intentionally hide the CreateLoad APIs that don't
+ // take an alignment.
+ llvm::LoadInst *CreateLoad(Address Addr, const llvm::Twine &Name = "") {
+ return CreateAlignedLoad(Addr.getPointer(),
+ Addr.getAlignment().getQuantity(),
+ Name);
+ }
+ llvm::LoadInst *CreateLoad(Address Addr, const char *Name) {
+ // This overload is required to prevent string literals from
+ // ending up in the IsVolatile overload.
+ return CreateAlignedLoad(Addr.getPointer(),
+ Addr.getAlignment().getQuantity(),
+ Name);
+ }
+ llvm::LoadInst *CreateLoad(Address Addr, bool IsVolatile,
+ const llvm::Twine &Name = "") {
+ return CreateAlignedLoad(Addr.getPointer(),
+ Addr.getAlignment().getQuantity(),
+ IsVolatile,
+ Name);
+ }
+
+ using CGBuilderBaseTy::CreateAlignedLoad;
+ llvm::LoadInst *CreateAlignedLoad(llvm::Value *Addr, CharUnits Align,
+ const llvm::Twine &Name = "") {
+ return CreateAlignedLoad(Addr, Align.getQuantity(), Name);
+ }
+ llvm::LoadInst *CreateAlignedLoad(llvm::Value *Addr, CharUnits Align,
+ const char *Name) {
+ return CreateAlignedLoad(Addr, Align.getQuantity(), Name);
+ }
+ llvm::LoadInst *CreateAlignedLoad(llvm::Type *Ty, llvm::Value *Addr,
+ CharUnits Align,
+ const llvm::Twine &Name = "") {
+ assert(Addr->getType()->getPointerElementType() == Ty);
+ return CreateAlignedLoad(Addr, Align.getQuantity(), Name);
+ }
+ llvm::LoadInst *CreateAlignedLoad(llvm::Value *Addr, CharUnits Align,
+ bool IsVolatile,
+ const llvm::Twine &Name = "") {
+ return CreateAlignedLoad(Addr, Align.getQuantity(), IsVolatile, Name);
+ }
+
+ // Note that we intentionally hide the CreateStore APIs that don't
+ // take an alignment.
+ llvm::StoreInst *CreateStore(llvm::Value *Val, Address Addr,
+ bool IsVolatile = false) {
+ return CreateAlignedStore(Val, Addr.getPointer(),
+ Addr.getAlignment().getQuantity(), IsVolatile);
+ }
+
+ using CGBuilderBaseTy::CreateAlignedStore;
+ llvm::StoreInst *CreateAlignedStore(llvm::Value *Val, llvm::Value *Addr,
+ CharUnits Align, bool IsVolatile = false) {
+ return CreateAlignedStore(Val, Addr, Align.getQuantity(), IsVolatile);
+ }
+
+ // FIXME: these "default-aligned" APIs should be removed,
+ // but I don't feel like fixing all the builtin code right now.
+ llvm::LoadInst *CreateDefaultAlignedLoad(llvm::Value *Addr,
+ const llvm::Twine &Name = "") {
+ return CGBuilderBaseTy::CreateLoad(Addr, false, Name);
+ }
+ llvm::LoadInst *CreateDefaultAlignedLoad(llvm::Value *Addr,
+ const char *Name) {
+ return CGBuilderBaseTy::CreateLoad(Addr, false, Name);
+ }
+ llvm::LoadInst *CreateDefaultAlignedLoad(llvm::Value *Addr, bool IsVolatile,
+ const llvm::Twine &Name = "") {
+ return CGBuilderBaseTy::CreateLoad(Addr, IsVolatile, Name);
+ }
+
+ llvm::StoreInst *CreateDefaultAlignedStore(llvm::Value *Val,
+ llvm::Value *Addr,
+ bool IsVolatile = false) {
+ return CGBuilderBaseTy::CreateStore(Val, Addr, IsVolatile);
+ }
+
+ /// Emit a load from an i1 flag variable.
+ llvm::LoadInst *CreateFlagLoad(llvm::Value *Addr,
+ const llvm::Twine &Name = "") {
+ assert(Addr->getType()->getPointerElementType() == getInt1Ty());
+ return CreateAlignedLoad(getInt1Ty(), Addr, CharUnits::One(), Name);
+ }
+
+ /// Emit a store to an i1 flag variable.
+ llvm::StoreInst *CreateFlagStore(bool Value, llvm::Value *Addr) {
+ assert(Addr->getType()->getPointerElementType() == getInt1Ty());
+ return CreateAlignedStore(getInt1(Value), Addr, CharUnits::One());
+ }
+
+ using CGBuilderBaseTy::CreateBitCast;
+ Address CreateBitCast(Address Addr, llvm::Type *Ty,
+ const llvm::Twine &Name = "") {
+ return Address(CreateBitCast(Addr.getPointer(), Ty, Name),
+ Addr.getAlignment());
+ }
+
+ /// Cast the element type of the given address to a different type,
+ /// preserving information like the alignment and address space.
+ Address CreateElementBitCast(Address Addr, llvm::Type *Ty,
+ const llvm::Twine &Name = "") {
+ auto PtrTy = Ty->getPointerTo(Addr.getAddressSpace());
+ return CreateBitCast(Addr, PtrTy, Name);
+ }
+
+ using CGBuilderBaseTy::CreatePointerBitCastOrAddrSpaceCast;
+ Address CreatePointerBitCastOrAddrSpaceCast(Address Addr, llvm::Type *Ty,
+ const llvm::Twine &Name = "") {
+ llvm::Value *Ptr =
+ CreatePointerBitCastOrAddrSpaceCast(Addr.getPointer(), Ty, Name);
+ return Address(Ptr, Addr.getAlignment());
+ }
+
+ using CGBuilderBaseTy::CreateStructGEP;
+ Address CreateStructGEP(Address Addr, unsigned Index, CharUnits Offset,
+ const llvm::Twine &Name = "") {
+ return Address(CreateStructGEP(Addr.getElementType(),
+ Addr.getPointer(), Index, Name),
+ Addr.getAlignment().alignmentAtOffset(Offset));
+ }
+
+ /// Given
+ /// %addr = [n x T]* ...
+ /// produce
+ /// %name = getelementptr inbounds %addr, i64 0, i64 index
+ /// where i64 is actually the target word size.
+ ///
+ /// This API assumes that drilling into an array like this is always
+ /// an inbounds operation.
+ ///
+ /// \param EltSize - the size of the type T in bytes
+ Address CreateConstArrayGEP(Address Addr, uint64_t Index, CharUnits EltSize,
+ const llvm::Twine &Name = "") {
+ return Address(CreateInBoundsGEP(Addr.getPointer(),
+ {getSize(CharUnits::Zero()),
+ getSize(Index)},
+ Name),
+ Addr.getAlignment().alignmentAtOffset(Index * EltSize));
+ }
+
+ /// Given
+ /// %addr = T* ...
+ /// produce
+ /// %name = getelementptr inbounds %addr, i64 index
+ /// where i64 is actually the target word size.
+ ///
+ /// \param EltSize - the size of the type T in bytes
+ Address CreateConstInBoundsGEP(Address Addr, uint64_t Index,
+ CharUnits EltSize,
+ const llvm::Twine &Name = "") {
+ return Address(CreateInBoundsGEP(Addr.getElementType(), Addr.getPointer(),
+ {getSize(Index)}, Name),
+ Addr.getAlignment().alignmentAtOffset(Index * EltSize));
+ }
+
+ /// Given
+ /// %addr = T* ...
+ /// produce
+ /// %name = getelementptr inbounds %addr, i64 index
+ /// where i64 is actually the target word size.
+ ///
+ /// \param EltSize - the size of the type T in bytes
+ Address CreateConstGEP(Address Addr, uint64_t Index, CharUnits EltSize,
+ const llvm::Twine &Name = "") {
+ return Address(CreateGEP(Addr.getElementType(), Addr.getPointer(),
+ {getSize(Index)}, Name),
+ Addr.getAlignment().alignmentAtOffset(Index * EltSize));
+ }
+
+ /// Given a pointer to i8, adjust it by a given constant offset.
+ Address CreateConstInBoundsByteGEP(Address Addr, CharUnits Offset,
+ const llvm::Twine &Name = "") {
+ assert(Addr.getElementType() == TypeCache.Int8Ty);
+ return Address(CreateInBoundsGEP(Addr.getPointer(), getSize(Offset), Name),
+ Addr.getAlignment().alignmentAtOffset(Offset));
+ }
+ Address CreateConstByteGEP(Address Addr, CharUnits Offset,
+ const llvm::Twine &Name = "") {
+ assert(Addr.getElementType() == TypeCache.Int8Ty);
+ return Address(CreateGEP(Addr.getPointer(), getSize(Offset), Name),
+ Addr.getAlignment().alignmentAtOffset(Offset));
+ }
+
+ llvm::Value *CreateConstInBoundsByteGEP(llvm::Value *Ptr, CharUnits Offset,
+ const llvm::Twine &Name = "") {
+ assert(Ptr->getType()->getPointerElementType() == TypeCache.Int8Ty);
+ return CreateInBoundsGEP(Ptr, getSize(Offset), Name);
+ }
+ llvm::Value *CreateConstByteGEP(llvm::Value *Ptr, CharUnits Offset,
+ const llvm::Twine &Name = "") {
+ assert(Ptr->getType()->getPointerElementType() == TypeCache.Int8Ty);
+ return CreateGEP(Ptr, getSize(Offset), Name);
+ }
+
+ using CGBuilderBaseTy::CreateMemCpy;
+ llvm::CallInst *CreateMemCpy(Address Dest, Address Src, llvm::Value *Size,
+ bool IsVolatile = false) {
+ auto Align = std::min(Dest.getAlignment(), Src.getAlignment());
+ return CreateMemCpy(Dest.getPointer(), Src.getPointer(), Size,
+ Align.getQuantity(), IsVolatile);
+ }
+ llvm::CallInst *CreateMemCpy(Address Dest, Address Src, uint64_t Size,
+ bool IsVolatile = false) {
+ auto Align = std::min(Dest.getAlignment(), Src.getAlignment());
+ return CreateMemCpy(Dest.getPointer(), Src.getPointer(), Size,
+ Align.getQuantity(), IsVolatile);
+ }
+
+ using CGBuilderBaseTy::CreateMemMove;
+ llvm::CallInst *CreateMemMove(Address Dest, Address Src, llvm::Value *Size,
+ bool IsVolatile = false) {
+ auto Align = std::min(Dest.getAlignment(), Src.getAlignment());
+ return CreateMemMove(Dest.getPointer(), Src.getPointer(), Size,
+ Align.getQuantity(), IsVolatile);
+ }
+
+ using CGBuilderBaseTy::CreateMemSet;
+ llvm::CallInst *CreateMemSet(Address Dest, llvm::Value *Value,
+ llvm::Value *Size, bool IsVolatile = false) {
+ return CreateMemSet(Dest.getPointer(), Value, Size,
+ Dest.getAlignment().getQuantity(), IsVolatile);
+ }
+};
+
#undef PreserveNames
} // end namespace CodeGen
diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp
index 8f68d29ad06..c35f25ad103 100644
--- a/clang/lib/CodeGen/CGBuiltin.cpp
+++ b/clang/lib/CodeGen/CGBuiltin.cpp
@@ -282,7 +282,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
case Builtin::BI__builtin_va_end: {
Value *ArgValue = (BuiltinID == Builtin::BI__va_start)
? EmitScalarExpr(E->getArg(0))
- : EmitVAListRef(E->getArg(0));
+ : EmitVAListRef(E->getArg(0)).getPointer();
llvm::Type *DestType = Int8PtrTy;
if (ArgValue->getType() != DestType)
ArgValue = Builder.CreateBitCast(ArgValue, DestType,
@@ -293,8 +293,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
return RValue::get(Builder.CreateCall(CGM.getIntrinsic(inst), ArgValue));
}
case Builtin::BI__builtin_va_copy: {
- Value *DstPtr = EmitVAListRef(E->getArg(0));
- Value *SrcPtr = EmitVAListRef(E->getArg(1));
+ Value *DstPtr = EmitVAListRef(E->getArg(0)).getPointer();
+ Value *SrcPtr = EmitVAListRef(E->getArg(1)).getPointer();
llvm::Type *Type = Int8PtrTy;
@@ -743,29 +743,24 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
}
case Builtin::BIbzero:
case Builtin::BI__builtin_bzero: {
- std::pair<llvm::Value*, unsigned> Dest =
- EmitPointerWithAlignment(E->getArg(0));
+ Address Dest = EmitPointerWithAlignment(E->getArg(0));
Value *SizeVal = EmitScalarExpr(E->getArg(1));
- EmitNonNullArgCheck(RValue::get(Dest.first), E->getArg(0)->getType(),
+ EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(),
E->getArg(0)->getExprLoc(), FD, 0);
- Builder.CreateMemSet(Dest.first, Builder.getInt8(0), SizeVal,
- Dest.second, false);
- return RValue::get(Dest.first);
+ Builder.CreateMemSet(Dest, Builder.getInt8(0), SizeVal, false);
+ return RValue::get(Dest.getPointer());
}
case Builtin::BImemcpy:
case Builtin::BI__builtin_memcpy: {
- std::pair<llvm::Value*, unsigned> Dest =
- EmitPointerWithAlignment(E->getArg(0));
- std::pair<llvm::Value*, unsigned> Src =
- EmitPointerWithAlignment(E->getArg(1));
+ Address Dest = EmitPointerWithAlignment(E->getArg(0));
+ Address Src = EmitPointerWithAlignment(E->getArg(1));
Value *SizeVal = EmitScalarExpr(E->getArg(2));
- unsigned Align = std::min(Dest.second, Src.second);
- EmitNonNullArgCheck(RValue::get(Dest.first), E->getArg(0)->getType(),
+ EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(),
E->getArg(0)->getExprLoc(), FD, 0);
- EmitNonNullArgCheck(RValue::get(Src.first), E->getArg(1)->getType(),
+ EmitNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(1)->getType(),
E->getArg(1)->getExprLoc(), FD, 1);
- Builder.CreateMemCpy(Dest.first, Src.first, SizeVal, Align, false);
- return RValue::get(Dest.first);
+ Builder.CreateMemCpy(Dest, Src, SizeVal, false);
+ return RValue::get(Dest.getPointer());
}
case Builtin::BI__builtin___memcpy_chk: {
@@ -776,23 +771,20 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
break;
if (Size.ugt(DstSize))
break;
- std::pair<llvm::Value*, unsigned> Dest =
- EmitPointerWithAlignment(E->getArg(0));
- std::pair<llvm::Value*, unsigned> Src =
- EmitPointerWithAlignment(E->getArg(1));
+ Address Dest = EmitPointerWithAlignment(E->getArg(0));
+ Address Src = EmitPointerWithAlignment(E->getArg(1));
Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
- unsigned Align = std::min(Dest.second, Src.second);
- Builder.CreateMemCpy(Dest.first, Src.first, SizeVal, Align, false);
- return RValue::get(Dest.first);
+ Builder.CreateMemCpy(Dest, Src, SizeVal, false);
+ return RValue::get(Dest.getPointer());
}
case Builtin::BI__builtin_objc_memmove_collectable: {
- Value *Address = EmitScalarExpr(E->getArg(0));
- Value *SrcAddr = EmitScalarExpr(E->getArg(1));
+ Address DestAddr = EmitPointerWithAlignment(E->getArg(0));
+ Address SrcAddr = EmitPointerWithAlignment(E->getArg(1));
Value *SizeVal = EmitScalarExpr(E->getArg(2));
CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this,
- Address, SrcAddr, SizeVal);
- return RValue::get(Address);
+ DestAddr, SrcAddr, SizeVal);
+ return RValue::get(DestAddr.getPointer());
}
case Builtin::BI__builtin___memmove_chk: {
@@ -803,42 +795,35 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
break;
if (Size.ugt(DstSize))
break;
- std::pair<llvm::Value*, unsigned> Dest =
- EmitPointerWithAlignment(E->getArg(0));
- std::pair<llvm::Value*, unsigned> Src =
- EmitPointerWithAlignment(E->getArg(1));
+ Address Dest = EmitPointerWithAlignment(E->getArg(0));
+ Address Src = EmitPointerWithAlignment(E->getArg(1));
Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
- unsigned Align = std::min(Dest.second, Src.second);
- Builder.CreateMemMove(Dest.first, Src.first, SizeVal, Align, false);
- return RValue::get(Dest.first);
+ Builder.CreateMemMove(Dest, Src, SizeVal, false);
+ return RValue::get(Dest.getPointer());
}
case Builtin::BImemmove:
case Builtin::BI__builtin_memmove: {
- std::pair<llvm::Value*, unsigned> Dest =
- EmitPointerWithAlignment(E->getArg(0));
- std::pair<llvm::Value*, unsigned> Src =
- EmitPointerWithAlignment(E->getArg(1));
+ Address Dest = EmitPointerWithAlignment(E->getArg(0));
+ Address Src = EmitPointerWithAlignment(E->getArg(1));
Value *SizeVal = EmitScalarExpr(E->getArg(2));
- unsigned Align = std::min(Dest.second, Src.second);
- EmitNonNullArgCheck(RValue::get(Dest.first), E->getArg(0)->getType(),
+ EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(),
E->getArg(0)->getExprLoc(), FD, 0);
- EmitNonNullArgCheck(RValue::get(Src.first), E->getArg(1)->getType(),
+ EmitNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(1)->getType(),
E->getArg(1)->getExprLoc(), FD, 1);
- Builder.CreateMemMove(Dest.first, Src.first, SizeVal, Align, false);
- return RValue::get(Dest.first);
+ Builder.CreateMemMove(Dest, Src, SizeVal, false);
+ return RValue::get(Dest.getPointer());
}
case Builtin::BImemset:
case Builtin::BI__builtin_memset: {
- std::pair<llvm::Value*, unsigned> Dest =
- EmitPointerWithAlignment(E->getArg(0));
+ Address Dest = EmitPointerWithAlignment(E->getArg(0));
Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
Builder.getInt8Ty());
Value *SizeVal = EmitScalarExpr(E->getArg(2));
- EmitNonNullArgCheck(RValue::get(Dest.first), E->getArg(0)->getType(),
+ EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(),
E->getArg(0)->getExprLoc(), FD, 0);
- Builder.CreateMemSet(Dest.first, ByteVal, SizeVal, Dest.second, false);
- return RValue::get(Dest.first);
+ Builder.CreateMemSet(Dest, ByteVal, SizeVal, false);
+ return RValue::get(Dest.getPointer());
}
case Builtin::BI__builtin___memset_chk: {
// fold __builtin_memset_chk(x, y, cst1, cst2) to memset iff cst1<=cst2.
@@ -848,13 +833,12 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
break;
if (Size.ugt(DstSize))
break;
- std::pair<llvm::Value*, unsigned> Dest =
- EmitPointerWithAlignment(E->getArg(0));
+ Address Dest = EmitPointerWithAlignment(E->getArg(0));
Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
Builder.getInt8Ty());
Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
- Builder.CreateMemSet(Dest.first, ByteVal, SizeVal, Dest.second, false);
- return RValue::get(Dest.first);
+ Builder.CreateMemSet(Dest, ByteVal, SizeVal, false);
+ return RValue::get(Dest.getPointer());
}
case Builtin::BI__builtin_dwarf_cfa: {
// The offset in bytes from the first argument to the CFA.
@@ -958,7 +942,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
}
case Builtin::BI__builtin_setjmp: {
// Buffer is a void**.
- Value *Buf = EmitScalarExpr(E->getArg(0));
+ Address Buf = EmitPointerWithAlignment(E->getArg(0));
// Store the frame pointer to the setjmp buffer.
Value *FrameAddr =
@@ -969,14 +953,14 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
// Store the stack pointer to the setjmp buffer.
Value *StackAddr =
Builder.CreateCall(CGM.getIntrinsic(Intrinsic::stacksave));
- Value *StackSaveSlot =
- Builder.CreateGEP(Buf, ConstantInt::get(Int32Ty, 2));
+ Address StackSaveSlot =
+ Builder.CreateConstInBoundsGEP(Buf, 2, getPointerSize());
Builder.CreateStore(StackAddr, StackSaveSlot);
// Call LLVM's EH setjmp, which is lightweight.
Value *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_setjmp);
Buf = Builder.CreateBitCast(Buf, Int8PtrTy);
- return RValue::get(Builder.CreateCall(F, Buf));
+ return RValue::get(Builder.CreateCall(F, Buf.getPointer()));
}
case Builtin::BI__builtin_longjmp: {
Value *Buf = EmitScalarExpr(E->getArg(0));
@@ -1141,8 +1125,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
StoreSize.getQuantity() * 8);
Ptr = Builder.CreateBitCast(Ptr, ITy->getPointerTo());
llvm::StoreInst *Store =
- Builder.CreateStore(llvm::Constant::getNullValue(ITy), Ptr);
- Store->setAlignment(StoreSize.getQuantity());
+ Builder.CreateAlignedStore(llvm::Constant::getNullValue(ITy), Ptr,
+ StoreSize);
Store->setAtomic(llvm::Release);
return RValue::get(nullptr);
}
@@ -1276,15 +1260,14 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
bool Volatile =
PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified();
- Value *Ptr = EmitScalarExpr(E->getArg(0));
- unsigned AddrSpace = Ptr->getType()->getPointerAddressSpace();
+ Address Ptr = EmitPointerWithAlignment(E->getArg(0));
+ unsigned AddrSpace = Ptr.getPointer()->getType()->getPointerAddressSpace();
Ptr = Builder.CreateBitCast(Ptr, Int8Ty->getPointerTo(AddrSpace));
Value *NewVal = Builder.getInt8(0);
Value *Order = EmitScalarExpr(E->getArg(1));
if (isa<llvm::ConstantInt>(Order)) {
int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
StoreInst *Store = Builder.CreateStore(NewVal, Ptr, Volatile);
- Store->setAlignment(1);
switch (ord) {
case 0: // memory_order_relaxed
default: // invalid order
@@ -1317,7 +1300,6 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
for (unsigned i = 0; i < 3; ++i) {
Builder.SetInsertPoint(BBs[i]);
StoreInst *Store = Builder.CreateStore(NewVal, Ptr, Volatile);
- Store->setAlignment(1);
Store->setOrdering(Orders[i]);
Builder.CreateBr(ContBB);
}
@@ -1499,8 +1481,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
llvm::Value *X = EmitScalarExpr(E->getArg(0));
llvm::Value *Y = EmitScalarExpr(E->getArg(1));
llvm::Value *Carryin = EmitScalarExpr(E->getArg(2));
- std::pair<llvm::Value*, unsigned> CarryOutPtr =
- EmitPointerWithAlignment(E->getArg(3));
+ Address CarryOutPtr = EmitPointerWithAlignment(E->getArg(3));
// Decide if we are lowering to a uadd.with.overflow or usub.with.overflow.
llvm::Intrinsic::ID IntrinsicId;
@@ -1531,9 +1512,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Sum1, Carryin, Carry2);
llvm::Value *CarryOut = Builder.CreateZExt(Builder.CreateOr(Carry1, Carry2),
X->getType());
- llvm::StoreInst *CarryOutStore = Builder.CreateStore(CarryOut,
- CarryOutPtr.first);
- CarryOutStore->setAlignment(CarryOutPtr.second);
+ Builder.CreateStore(CarryOut, CarryOutPtr);
return RValue::get(Sum2);
}
case Builtin::BI__builtin_uadd_overflow:
@@ -1560,8 +1539,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
// Scalarize our inputs.
llvm::Value *X = EmitScalarExpr(E->getArg(0));
llvm::Value *Y = EmitScalarExpr(E->getArg(1));
- std::pair<llvm::Value *, unsigned> SumOutPtr =
- EmitPointerWithAlignment(E->getArg(2));
+ Address SumOutPtr = EmitPointerWithAlignment(E->getArg(2));
// Decide which of the overflow intrinsics we are lowering to:
llvm::Intrinsic::ID IntrinsicId;
@@ -1602,13 +1580,12 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
llvm::Value *Carry;
llvm::Value *Sum = EmitOverflowIntrinsic(*this, IntrinsicId, X, Y, Carry);
- llvm::StoreInst *SumOutStore = Builder.CreateStore(Sum, SumOutPtr.first);
- SumOutStore->setAlignment(SumOutPtr.second);
+ Builder.CreateStore(Sum, SumOutPtr);
return RValue::get(Carry);
}
case Builtin::BI__builtin_addressof:
- return RValue::get(EmitLValue(E->getArg(0)).getAddress());
+ return RValue::get(EmitLValue(E->getArg(0)).getPointer());
case Builtin::BI__builtin_operator_new:
return EmitBuiltinNewDeleteCall(FD->getType()->castAs<FunctionProtoType>(),
E->getArg(0), false);
@@ -1997,61 +1974,6 @@ Value *CodeGenFunction::EmitNeonRShiftImm(Value *Vec, Value *Shift,
return Builder.CreateAShr(Vec, Shift, name);
}
-/// GetPointeeAlignment - Given an expression with a pointer type, find the
-/// alignment of the type referenced by the pointer. Skip over implicit
-/// casts.
-std::pair<llvm::Value*, unsigned>
-CodeGenFunction::EmitPointerWithAlignment(const Expr *Addr) {
- assert(Addr->getType()->isPointerType());
- Addr = Addr->IgnoreParens();
- if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Addr)) {
- if ((ICE->getCastKind() == CK_BitCast || ICE->getCastKind() == CK_NoOp) &&
- ICE->getSubExpr()->getType()->isPointerType()) {
- std::pair<llvm::Value*, unsigned> Ptr =
- EmitPointerWithAlignment(ICE->getSubExpr());
- Ptr.first = Builder.CreateBitCast(Ptr.first,
- ConvertType(Addr->getType()));
- return Ptr;
- } else if (ICE->getCastKind() == CK_ArrayToPointerDecay) {
- LValue LV = EmitLValue(ICE->getSubExpr());
- unsigned Align = LV.getAlignment().getQuantity();
- if (!Align) {
- // FIXME: Once LValues are fixed to always set alignment,
- // zap this code.
- QualType PtTy = ICE->getSubExpr()->getType();
- if (!PtTy->isIncompleteType())
- Align = getContext().getTypeAlignInChars(PtTy).getQuantity();
- else
- Align = 1;
- }
- return std::make_pair(LV.getAddress(), Align);
- }
- }
- if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(Addr)) {
- if (UO->getOpcode() == UO_AddrOf) {
- LValue LV = EmitLValue(UO->getSubExpr());
- unsigned Align = LV.getAlignment().getQuantity();
- if (!Align) {
- // FIXME: Once LValues are fixed to always set alignment,
- // zap this code.
- QualType PtTy = UO->getSubExpr()->getType();
- if (!PtTy->isIncompleteType())
- Align = getContext().getTypeAlignInChars(PtTy).getQuantity();
- else
- Align = 1;
- }
- return std::make_pair(LV.getAddress(), Align);
- }
- }
-
- unsigned Align = 1;
- QualType PtTy = Addr->getType()->getPointeeType();
- if (!PtTy->isIncompleteType())
- Align = getContext().getTypeAlignInChars(PtTy).getQuantity();
-
- return std::make_pair(EmitScalarExpr(Addr), Align);
-}
-
enum {
AddRetType = (1 << 0),
Add1ArgType = (1 << 1),
@@ -2762,7 +2684,7 @@ static Value *EmitCommonNeonSISDBuiltinExpr(CodeGenFunction &CGF,
Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
unsigned BuiltinID, unsigned LLVMIntrinsic, unsigned AltLLVMIntrinsic,
const char *NameHint, unsigned Modifier, const CallExpr *E,
- SmallVectorImpl<llvm::Value *> &Ops, llvm::Value *Align) {
+ SmallVectorImpl<llvm::Value *> &Ops, Address PtrOp0, Address PtrOp1) {
// Get the last argument, which specifies the vector type.
llvm::APSInt NeonTypeConst;
const Expr *Arg = E->getArg(E->getNumArgs() - 1);
@@ -2779,6 +2701,10 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
if (!Ty)
return nullptr;
+ auto getAlignmentValue32 = [&](Address addr) -> Value* {
+ return Builder.getInt32(addr.getAlignment().getQuantity());
+ };
+
unsigned Int = LLVMIntrinsic;
if ((Modifier & UnsignedAlts) && !Usgn)
Int = AltLLVMIntrinsic;
@@ -2927,7 +2853,7 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
}
case NEON::BI__builtin_neon_vld1_v:
case NEON::BI__builtin_neon_vld1q_v:
- Ops.push_back(Align);
+ Ops.push_back(getAlignmentValue32(PtrOp0));
return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Ty), Ops, "vld1");
case NEON::BI__builtin_neon_vld2_v:
case NEON::BI__builtin_neon_vld2q_v:
@@ -2936,18 +2862,18 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
case NEON::BI__builtin_neon_vld4_v:
case NEON::BI__builtin_neon_vld4q_v: {
Function *F = CGM.getIntrinsic(LLVMIntrinsic, Ty);
+ Value *Align = getAlignmentValue32(PtrOp1);
Ops[1] = Builder.CreateCall(F, {Ops[1], Align}, NameHint);
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
- return Builder.CreateStore(Ops[1], Ops[0]);
+ return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
}
case NEON::BI__builtin_neon_vld1_dup_v:
case NEON::BI__builtin_neon_vld1q_dup_v: {
Value *V = UndefValue::get(Ty);
Ty = llvm::PointerType::getUnqual(VTy->getElementType());
- Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
- LoadInst *Ld = Builder.CreateLoad(Ops[0]);
- Ld->setAlignment(cast<ConstantInt>(Align)->getZExtValue());
+ PtrOp0 = Builder.CreateBitCast(PtrOp0, Ty);
+ LoadInst *Ld = Builder.CreateLoad(PtrOp0);
llvm::Constant *CI = ConstantInt::get(SizeTy, 0);
Ops[0] = Builder.CreateInsertElement(V, Ld, CI);
return EmitNeonSplat(Ops[0], CI);
@@ -2961,11 +2887,11 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
Function *F = CGM.getIntrinsic(LLVMIntrinsic, Ty);
for (unsigned I = 2; I < Ops.size() - 1; ++I)
Ops[I] = Builder.CreateBitCast(Ops[I], Ty);
- Ops.push_back(Align);
+ Ops.push_back(getAlignmentValue32(PtrOp1));
Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), NameHint);
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
- return Builder.CreateStore(Ops[1], Ops[0]);
+ return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
}
case NEON::BI__builtin_neon_vmovl_v: {
llvm::Type *DTy =llvm::VectorType::getTruncatedElementVectorType(VTy);
@@ -3078,7 +3004,7 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
case NEON::BI__builtin_neon_vst3q_lane_v:
case NEON::BI__builtin_neon_vst4_lane_v:
case NEON::BI__builtin_neon_vst4q_lane_v:
- Ops.push_back(Align);
+ Ops.push_back(getAlignmentValue32(PtrOp0));
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "");
case NEON::BI__builtin_neon_vsubhn_v: {
llvm::VectorType *SrcTy =
@@ -3113,7 +3039,7 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
SV = llvm::ConstantVector::get(Indices);
SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vtrn");
- SV = Builder.CreateStore(SV, Addr);
+ SV = Builder.CreateDefaultAlignedStore(SV, Addr);
}
return SV;
}
@@ -3141,7 +3067,7 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
SV = llvm::ConstantVector::get(Indices);
SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vuzp");
- SV = Builder.CreateStore(SV, Addr);
+ SV = Builder.CreateDefaultAlignedStore(SV, Addr);
}
return SV;
}
@@ -3161,7 +3087,7 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
SV = llvm::ConstantVector::get(Indices);
SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vzip");
- SV = Builder.CreateStore(SV, Addr);
+ SV = Builder.CreateDefaultAlignedStore(SV, Addr);
}
return SV;
}
@@ -3496,11 +3422,11 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
: Intrinsic::arm_strexd);
llvm::Type *STy = llvm::StructType::get(Int32Ty, Int32Ty, nullptr);
- Value *Tmp = CreateMemTemp(E->getArg(0)->getType());
+ Address Tmp = CreateMemTemp(E->getArg(0)->getType());
Value *Val = EmitScalarExpr(E->getArg(0));
Builder.CreateStore(Val, Tmp);
- Value *LdPtr = Builder.CreateBitCast(Tmp,llvm::PointerType::getUnqual(STy));
+ Address LdPtr = Builder.CreateBitCast(Tmp,llvm::PointerType::getUnqual(STy));
Val = Builder.CreateLoad(LdPtr);
Value *Arg0 = Builder.CreateExtractValue(Val, 0);
@@ -3619,8 +3545,13 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
assert(Error == ASTContext::GE_None && "Should not codegen an error");
+ auto getAlignmentValue32 = [&](Address addr) -> Value* {
+ return Builder.getInt32(addr.getAlignment().getQuantity());
+ };
+
+ Address PtrOp0 = Address::invalid();
+ Address PtrOp1 = Address::invalid();
SmallVector<Value*, 4> Ops;
- llvm::Value *Align = nullptr;
bool HasExtraArg = HasExtraNeonArgument(BuiltinID);
unsigned NumArgs = E->getNumArgs() - (HasExtraArg ? 1 : 0);
for (unsigned i = 0, e = NumArgs; i != e; i++) {
@@ -3650,10 +3581,8 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vst4q_lane_v:
// Get the alignment for the argument in addition to the value;
// we'll use it later.
- std::pair<llvm::Value*, unsigned> Src =
- EmitPointerWithAlignment(E->getArg(0));
- Ops.push_back(Src.first);
- Align = Builder.getInt32(Src.second);
+ PtrOp0 = EmitPointerWithAlignment(E->getArg(0));
+ Ops.push_back(PtrOp0.getPointer());
continue;
}
}
@@ -3676,10 +3605,8 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vld4_dup_v:
// Get the alignment for the argument in addition to the value;
// we'll use it later.
- std::pair<llvm::Value*, unsigned> Src =
- EmitPointerWithAlignment(E->getArg(1));
- Ops.push_back(Src.first);
- Align = Builder.getInt32(Src.second);
+ PtrOp1 = EmitPointerWithAlignment(E->getArg(1));
+ Ops.push_back(PtrOp1.getPointer());
continue;
}
}
@@ -3790,7 +3717,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
if (Builtin)
return EmitCommonNeonBuiltinExpr(
Builtin->BuiltinID, Builtin->LLVMIntrinsic, Builtin->AltLLVMIntrinsic,
- Builtin->NameHint, Builtin->TypeModifier, E, Ops, Align);
+ Builtin->NameHint, Builtin->TypeModifier, E, Ops, PtrOp0, PtrOp1);
unsigned Int;
switch (BuiltinID) {
@@ -3807,6 +3734,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
// Load the value as a one-element vector.
Ty = llvm::VectorType::get(VTy->getElementType(), 1);
Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld1, Ty);
+ Value *Align = getAlignmentValue32(PtrOp0);
Value *Ld = Builder.CreateCall(F, {Ops[0], Align});
// Combine them.
uint32_t Indices[] = {1 - Lane, Lane};
@@ -3818,8 +3746,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ty = llvm::PointerType::getUnqual(VTy->getElementType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
- LoadInst *Ld = Builder.CreateLoad(Ops[0]);
- Ld->setAlignment(cast<ConstantInt>(Align)->getZExtValue());
+ Value *Ld = Builder.CreateLoad(PtrOp0);
return Builder.CreateInsertElement(Ops[1], Ld, Ops[2], "vld1_lane");
}
case NEON::BI__builtin_neon_vld2_dup_v:
@@ -3840,10 +3767,11 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
default: llvm_unreachable("unknown vld_dup intrinsic?");
}
Function *F = CGM.getIntrinsic(Int, Ty);
+ llvm::Value *Align = getAlignmentValue32(PtrOp1);
Ops[1] = Builder.CreateCall(F, {Ops[1], Align}, "vld_dup");
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
- return Builder.CreateStore(Ops[1], Ops[0]);
+ return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
}
switch (BuiltinID) {
case NEON::BI__builtin_neon_vld2_dup_v:
@@ -3866,7 +3794,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
llvm::Constant *CI = ConstantInt::get(Int32Ty, 0);
Args.push_back(CI);
- Args.push_back(Align);
+ Args.push_back(getAlignmentValue32(PtrOp1));
Ops[1] = Builder.CreateCall(F, Args, "vld_dup");
// splat lane 0 to all elts in each vector of the result.
@@ -3879,7 +3807,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
}
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
- return Builder.CreateStore(Ops[1], Ops[0]);
+ return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
}
case NEON::BI__builtin_neon_vqrshrn_n_v:
Int =
@@ -3931,7 +3859,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Value *SV = llvm::ConstantVector::get(cast<llvm::Constant>(Ops[2]));
Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV);
- Ops[2] = Align;
+ Ops[2] = getAlignmentValue32(PtrOp0);
return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst1,
Ops[1]->getType()), Ops);
}
@@ -3940,9 +3868,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2]);
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
- StoreInst *St = Builder.CreateStore(Ops[1],
- Builder.CreateBitCast(Ops[0], Ty));
- St->setAlignment(cast<ConstantInt>(Align)->getZExtValue());
+ auto St = Builder.CreateStore(Ops[1], Builder.CreateBitCast(PtrOp0, Ty));
return St;
}
case NEON::BI__builtin_neon_vtbl1_v:
@@ -4269,14 +4195,11 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
: Intrinsic::aarch64_stxp);
llvm::Type *STy = llvm::StructType::get(Int64Ty, Int64Ty, nullptr);
- Value *One = llvm::ConstantInt::get(Int32Ty, 1);
- Value *Tmp = Builder.CreateAlloca(ConvertType(E->getArg(0)->getType()),
- One);
- Value *Val = EmitScalarExpr(E->getArg(0));
- Builder.CreateStore(Val, Tmp);
+ Address Tmp = CreateMemTemp(E->getArg(0)->getType());
+ EmitAnyExprToMem(E->getArg(0), Tmp, Qualifiers(), /*init*/ true);
- Value *LdPtr = Builder.CreateBitCast(Tmp,llvm::PointerType::getUnqual(STy));
- Val = Builder.CreateLoad(LdPtr);
+ Tmp = Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(STy));
+ llvm::Value *Val = Builder.CreateLoad(Tmp);
Value *Arg0 = Builder.CreateExtractValue(Val, 0);
Value *Arg1 = Builder.CreateExtractValue(Val, 1);
@@ -4430,12 +4353,12 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vldrq_p128: {
llvm::Type *Int128PTy = llvm::Type::getIntNPtrTy(getLLVMContext(), 128);
Value *Ptr = Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)), Int128PTy);
- return Builder.CreateLoad(Ptr);
+ return Builder.CreateDefaultAlignedLoad(Ptr);
}
case NEON::BI__builtin_neon_vstrq_p128: {
llvm::Type *Int128PTy = llvm::Type::getIntNPtrTy(getLLVMContext(), 128);
Value *Ptr = Builder.CreateBitCast(Ops[0], Int128PTy);
- return Builder.CreateStore(EmitScalarExpr(E->getArg(1)), Ptr);
+ return Builder.CreateDefaultAlignedStore(EmitScalarExpr(E->getArg(1)), Ptr);
}
case NEON::BI__builtin_neon_vcvts_u32_f32:
case NEON::BI__builtin_neon_vcvtd_u64_f64:
@@ -4895,7 +4818,8 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
if (Builtin)
return EmitCommonNeonBuiltinExpr(
Builtin->BuiltinID, Builtin->LLVMIntrinsic, Builtin->AltLLVMIntrinsic,
- Builtin->NameHint, Builtin->TypeModifier, E, Ops, nullptr);
+ Builtin->NameHint, Builtin->TypeModifier, E, Ops,
+ /*never use addresses*/ Address::invalid(), Address::invalid());
if (Value *V = EmitAArch64TblBuiltinExpr(*this, BuiltinID, E, Ops))
return V;
@@ -5589,7 +5513,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Ops[1] = Builder.CreateCall(F, Ops[1], "vld1xN");
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
- return Builder.CreateStore(Ops[1], Ops[0]);
+ return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
}
case NEON::BI__builtin_neon_vst1_x2_v:
case NEON::BI__builtin_neon_vst1q_x2_v:
@@ -5620,25 +5544,25 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vld1_v:
case NEON::BI__builtin_neon_vld1q_v:
Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(VTy));
- return Builder.CreateLoad(Ops[0]);
+ return Builder.CreateDefaultAlignedLoad(Ops[0]);
case NEON::BI__builtin_neon_vst1_v:
case NEON::BI__builtin_neon_vst1q_v:
Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(VTy));
Ops[1] = Builder.CreateBitCast(Ops[1], VTy);
- return Builder.CreateStore(Ops[1], Ops[0]);
+ return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
case NEON::BI__builtin_neon_vld1_lane_v:
case NEON::BI__builtin_neon_vld1q_lane_v:
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ty = llvm::PointerType::getUnqual(VTy->getElementType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
- Ops[0] = Builder.CreateLoad(Ops[0]);
+ Ops[0] = Builder.CreateDefaultAlignedLoad(Ops[0]);
return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vld1_lane");
case NEON::BI__builtin_neon_vld1_dup_v:
case NEON::BI__builtin_neon_vld1q_dup_v: {
Value *V = UndefValue::get(Ty);
Ty = llvm::PointerType::getUnqual(VTy->getElementType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
- Ops[0] = Builder.CreateLoad(Ops[0]);
+ Ops[0] = Builder.CreateDefaultAlignedLoad(Ops[0]);
llvm::Constant *CI = ConstantInt::get(Int32Ty, 0);
Ops[0] = Builder.CreateInsertElement(V, Ops[0], CI);
return EmitNeonSplat(Ops[0], CI);
@@ -5648,7 +5572,8 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2]);
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
- return Builder.CreateStore(Ops[1], Builder.CreateBitCast(Ops[0], Ty));
+ return Builder.CreateDefaultAlignedStore(Ops[1],
+ Builder.CreateBitCast(Ops[0], Ty));
case NEON::BI__builtin_neon_vld2_v:
case NEON::BI__builtin_neon_vld2q_v: {
llvm::Type *PTy = llvm::PointerType::getUnqual(VTy);
@@ -5658,7 +5583,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Ops[1] = Builder.CreateCall(F, Ops[1], "vld2");
Ops[0] = Builder.CreateBitCast(Ops[0],
llvm::PointerType::getUnqual(Ops[1]->getType()));
- return Builder.CreateStore(Ops[1], Ops[0]);
+ return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
}
case NEON::BI__builtin_neon_vld3_v:
case NEON::BI__builtin_neon_vld3q_v: {
@@ -5669,7 +5594,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Ops[1] = Builder.CreateCall(F, Ops[1], "vld3");
Ops[0] = Builder.CreateBitCast(Ops[0],
llvm::PointerType::getUnqual(Ops[1]->getType()));
- return Builder.CreateStore(Ops[1], Ops[0]);
+ return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
}
case NEON::BI__builtin_neon_vld4_v:
case NEON::BI__builtin_neon_vld4q_v: {
@@ -5680,7 +5605,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Ops[1] = Builder.CreateCall(F, Ops[1], "vld4");
Ops[0] = Builder.CreateBitCast(Ops[0],
llvm::PointerType::getUnqual(Ops[1]->getType()));
- return Builder.CreateStore(Ops[1], Ops[0]);
+ return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
}
case NEON::BI__builtin_neon_vld2_dup_v:
case NEON::BI__builtin_neon_vld2q_dup_v: {
@@ -5692,7 +5617,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Ops[1] = Builder.CreateCall(F, Ops[1], "vld2");
Ops[0] = Builder.CreateBitCast(Ops[0],
llvm::PointerType::getUnqual(Ops[1]->getType()));
- return Builder.CreateStore(Ops[1], Ops[0]);
+ return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
}
case NEON::BI__builtin_neon_vld3_dup_v:
case NEON::BI__builtin_neon_vld3q_dup_v: {
@@ -5704,7 +5629,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Ops[1] = Builder.CreateCall(F, Ops[1], "vld3");
Ops[0] = Builder.CreateBitCast(Ops[0],
llvm::PointerType::getUnqual(Ops[1]->getType()));
- return Builder.CreateStore(Ops[1], Ops[0]);
+ return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
}
case NEON::BI__builtin_neon_vld4_dup_v:
case NEON::BI__builtin_neon_vld4q_dup_v: {
@@ -5716,7 +5641,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Ops[1] = Builder.CreateCall(F, Ops[1], "vld4");
Ops[0] = Builder.CreateBitCast(Ops[0],
llvm::PointerType::getUnqual(Ops[1]->getType()));
- return Builder.CreateStore(Ops[1], Ops[0]);
+ return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
}
case NEON::BI__builtin_neon_vld2_lane_v:
case NEON::BI__builtin_neon_vld2q_lane_v: {
@@ -5730,7 +5655,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld2_lane");
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
- return Builder.CreateStore(Ops[1], Ops[0]);
+ return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
}
case NEON::BI__builtin_neon_vld3_lane_v:
case NEON::BI__builtin_neon_vld3q_lane_v: {
@@ -5745,7 +5670,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld3_lane");
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
- return Builder.CreateStore(Ops[1], Ops[0]);
+ return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
}
case NEON::BI__builtin_neon_vld4_lane_v:
case NEON::BI__builtin_neon_vld4q_lane_v: {
@@ -5761,7 +5686,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld4_lane");
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
- return Builder.CreateStore(Ops[1], Ops[0]);
+ return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
}
case NEON::BI__builtin_neon_vst2_v:
case NEON::BI__builtin_neon_vst2q_v: {
@@ -5830,7 +5755,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
SV = llvm::ConstantVector::get(Indices);
SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vtrn");
- SV = Builder.CreateStore(SV, Addr);
+ SV = Builder.CreateDefaultAlignedStore(SV, Addr);
}
return SV;
}
@@ -5849,7 +5774,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
SV = llvm::ConstantVector::get(Indices);
SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vuzp");
- SV = Builder.CreateStore(SV, Addr);
+ SV = Builder.CreateDefaultAlignedStore(SV, Addr);
}
return SV;
}
@@ -5869,7 +5794,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
SV = llvm::ConstantVector::get(Indices);
SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vzip");
- SV = Builder.CreateStore(SV, Addr);
+ SV = Builder.CreateDefaultAlignedStore(SV, Addr);
}
return SV;
}
@@ -6041,7 +5966,8 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
ConstantInt::get(Int32Ty, 0)
};
Value *CpuFeatures = Builder.CreateGEP(STy, CpuModel, Idxs);
- Value *Features = Builder.CreateLoad(CpuFeatures);
+ Value *Features = Builder.CreateAlignedLoad(CpuFeatures,
+ CharUnits::fromQuantity(4));
// Check the value of the bit corresponding to the feature requested.
Value *Bitset = Builder.CreateAnd(
@@ -6049,9 +5975,9 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
return Builder.CreateICmpNE(Bitset, llvm::ConstantInt::get(Int32Ty, 0));
}
case X86::BI_mm_prefetch: {
- Value *Address = EmitScalarExpr(E->getArg(0));
+ Value *Address = Ops[0];
Value *RW = ConstantInt::get(Int32Ty, 0);
- Value *Locality = EmitScalarExpr(E->getArg(1));
+ Value *Locality = Ops[1];
Value *Data = ConstantInt::get(Int32Ty, 1);
Value *F = CGM.getIntrinsic(Intrinsic::prefetch);
return Builder.CreateCall(F, {Address, RW, Locality, Data});
@@ -6069,15 +5995,15 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
return Builder.CreateExtractElement(Ops[0],
llvm::ConstantInt::get(Ops[1]->getType(), 0));
case X86::BI__builtin_ia32_ldmxcsr: {
- Value *Tmp = CreateMemTemp(E->getArg(0)->getType());
+ Address Tmp = CreateMemTemp(E->getArg(0)->getType());
Builder.CreateStore(Ops[0], Tmp);
return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_ldmxcsr),
- Builder.CreateBitCast(Tmp, Int8PtrTy));
+ Builder.CreateBitCast(Tmp.getPointer(), Int8PtrTy));
}
case X86::BI__builtin_ia32_stmxcsr: {
- Value *Tmp = CreateMemTemp(E->getType());
+ Address Tmp = CreateMemTemp(E->getType());
Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_stmxcsr),
- Builder.CreateBitCast(Tmp, Int8PtrTy));
+ Builder.CreateBitCast(Tmp.getPointer(), Int8PtrTy));
return Builder.CreateLoad(Tmp, "stmxcsr");
}
case X86::BI__builtin_ia32_storehps:
@@ -6095,7 +6021,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
// cast pointer to i64 & store
Ops[0] = Builder.CreateBitCast(Ops[0], PtrTy);
- return Builder.CreateStore(Ops[1], Ops[0]);
+ return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
}
case X86::BI__builtin_ia32_palignr128:
case X86::BI__builtin_ia32_palignr256: {
@@ -6204,7 +6130,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
Value *BC = Builder.CreateBitCast(Ops[0],
llvm::PointerType::getUnqual(Ops[1]->getType()),
"cast");
- StoreInst *SI = Builder.CreateStore(Ops[1], BC);
+ StoreInst *SI = Builder.CreateDefaultAlignedStore(Ops[1], BC);
SI->setMetadata(CGM.getModule().getMDKindID("nontemporal"), Node);
// If the operand is an integer, we can't assume alignment. Otherwise,
@@ -6256,7 +6182,8 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
}
Value *Call = Builder.CreateCall(CGM.getIntrinsic(ID));
- Builder.CreateStore(Builder.CreateExtractValue(Call, 0), Ops[0]);
+ Builder.CreateDefaultAlignedStore(Builder.CreateExtractValue(Call, 0),
+ Ops[0]);
return Builder.CreateExtractValue(Call, 1);
}
// SSE comparison intrisics
@@ -6659,8 +6586,7 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
// Translate from the intrinsics's struct return to the builtin's out
// argument.
- std::pair<llvm::Value *, unsigned> FlagOutPtr
- = EmitPointerWithAlignment(E->getArg(3));
+ Address FlagOutPtr = EmitPointerWithAlignment(E->getArg(3));
llvm::Value *X = EmitScalarExpr(E->getArg(0));
llvm::Value *Y = EmitScalarExpr(E->getArg(1));
@@ -6675,11 +6601,10 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
llvm::Value *Flag = Builder.CreateExtractValue(Tmp, 1);
llvm::Type *RealFlagType
- = FlagOutPtr.first->getType()->getPointerElementType();
+ = FlagOutPtr.getPointer()->getType()->getPointerElementType();
llvm::Value *FlagExt = Builder.CreateZExt(Flag, RealFlagType);
- llvm::StoreInst *FlagStore = Builder.CreateStore(FlagExt, FlagOutPtr.first);
- FlagStore->setAlignment(FlagOutPtr.second);
+ Builder.CreateStore(FlagExt, FlagOutPtr);
return Result;
}
case AMDGPU::BI__builtin_amdgpu_div_fmas:
@@ -6730,7 +6655,7 @@ static Value *EmitSystemZIntrinsicWithCC(CodeGenFunction &CGF,
SmallVector<Value *, 8> Args(NumArgs);
for (unsigned I = 0; I < NumArgs; ++I)
Args[I] = CGF.EmitScalarExpr(E->getArg(I));
- Value *CCPtr = CGF.EmitScalarExpr(E->getArg(NumArgs));
+ Address CCPtr = CGF.EmitPointerWithAlignment(E->getArg(NumArgs));
Value *F = CGF.CGM.getIntrinsic(IntrinsicID);
Value *Call = CGF.Builder.CreateCall(F, Args);
Value *CC = CGF.Builder.CreateExtractValue(Call, 1);
diff --git a/clang/lib/CodeGen/CGCUDANV.cpp b/clang/lib/CodeGen/CGCUDANV.cpp
index 67d0ab7a82f..045e19b189d 100644
--- a/clang/lib/CodeGen/CGCUDANV.cpp
+++ b/clang/lib/CodeGen/CGCUDANV.cpp
@@ -57,9 +57,9 @@ private:
unsigned Alignment = 0) {
llvm::Constant *Zeros[] = {llvm::ConstantInt::get(SizeTy, 0),
llvm::ConstantInt::get(SizeTy, 0)};
- auto *ConstStr = CGM.GetAddrOfConstantCString(Str, Name.c_str());
- return llvm::ConstantExpr::getGetElementPtr(ConstStr->getValueType(),
- ConstStr, Zeros);
+ auto ConstStr = CGM.GetAddrOfConstantCString(Str, Name.c_str());
+ return llvm::ConstantExpr::getGetElementPtr(ConstStr.getElementType(),
+ ConstStr.getPointer(), Zeros);
}
void emitDeviceStubBody(CodeGenFunction &CGF, FunctionArgList &Args);
@@ -121,7 +121,7 @@ void CGNVCUDARuntime::emitDeviceStubBody(CodeGenFunction &CGF,
std::vector<llvm::Type *> ArgTypes;
for (FunctionArgList::const_iterator I = Args.begin(), E = Args.end();
I != E; ++I) {
- llvm::Value *V = CGF.GetAddrOfLocalVar(*I);
+ llvm::Value *V = CGF.GetAddrOfLocalVar(*I).getPointer();
ArgValues.push_back(V);
assert(isa<llvm::PointerType>(V->getType()) && "Arg type not PointerType");
ArgTypes.push_back(cast<llvm::PointerType>(V->getType())->getElementType());
@@ -173,7 +173,7 @@ llvm::Function *CGNVCUDARuntime::makeRegisterKernelsFn() {
llvm::GlobalValue::InternalLinkage, "__cuda_register_kernels", &TheModule);
llvm::BasicBlock *EntryBB =
llvm::BasicBlock::Create(Context, "entry", RegisterKernelsFunc);
- CGBuilderTy Builder(Context);
+ CGBuilderTy Builder(CGM, Context);
Builder.SetInsertPoint(EntryBB);
// void __cudaRegisterFunction(void **, const char *, char *, const char *,
@@ -230,7 +230,7 @@ llvm::Function *CGNVCUDARuntime::makeModuleCtorFunction() {
llvm::GlobalValue::InternalLinkage, "__cuda_module_ctor", &TheModule);
llvm::BasicBlock *CtorEntryBB =
llvm::BasicBlock::Create(Context, "entry", ModuleCtorFunc);
- CGBuilderTy CtorBuilder(Context);
+ CGBuilderTy CtorBuilder(CGM, Context);
CtorBuilder.SetInsertPoint(CtorEntryBB);
@@ -267,7 +267,8 @@ llvm::Function *CGNVCUDARuntime::makeModuleCtorFunction() {
llvm::GlobalVariable *GpuBinaryHandle = new llvm::GlobalVariable(
TheModule, VoidPtrPtrTy, false, llvm::GlobalValue::InternalLinkage,
llvm::ConstantPointerNull::get(VoidPtrPtrTy), "__cuda_gpubin_handle");
- CtorBuilder.CreateStore(RegisterFatbinCall, GpuBinaryHandle, false);
+ CtorBuilder.CreateAlignedStore(RegisterFatbinCall, GpuBinaryHandle,
+ CGM.getPointerAlign());
// Call __cuda_register_kernels(GpuBinaryHandle);
CtorBuilder.CreateCall(RegisterKernelsFunc, RegisterFatbinCall);
@@ -300,12 +301,13 @@ llvm::Function *CGNVCUDARuntime::makeModuleDtorFunction() {
llvm::GlobalValue::InternalLinkage, "__cuda_module_dtor", &TheModule);
llvm::BasicBlock *DtorEntryBB =
llvm::BasicBlock::Create(Context, "entry", ModuleDtorFunc);
- CGBuilderTy DtorBuilder(Context);
+ CGBuilderTy DtorBuilder(CGM, Context);
DtorBuilder.SetInsertPoint(DtorEntryBB);
for (llvm::GlobalVariable *GpuBinaryHandle : GpuBinaryHandles) {
- DtorBuilder.CreateCall(UnregisterFatbinFunc,
- DtorBuilder.CreateLoad(GpuBinaryHandle, false));
+ auto HandleValue =
+ DtorBuilder.CreateAlignedLoad(GpuBinaryHandle, CGM.getPointerAlign());
+ DtorBuilder.CreateCall(UnregisterFatbinFunc, HandleValue);
}
DtorBuilder.CreateRetVoid();
diff --git a/clang/lib/CodeGen/CGCXX.cpp b/clang/lib/CodeGen/CGCXX.cpp
index cb8700ec563..ad303649867 100644
--- a/clang/lib/CodeGen/CGCXX.cpp
+++ b/clang/lib/CodeGen/CGCXX.cpp
@@ -28,6 +28,7 @@
using namespace clang;
using namespace CodeGen;
+
/// Try to emit a base destructor as an alias to its primary
/// base-class destructor.
bool CodeGenModule::TryEmitBaseDestructorAsAlias(const CXXDestructorDecl *D) {
@@ -273,7 +274,7 @@ static llvm::Value *BuildAppleKextVirtualCall(CodeGenFunction &CGF,
VTableIndex += AddressPoint;
llvm::Value *VFuncPtr =
CGF.Builder.CreateConstInBoundsGEP1_64(VTable, VTableIndex, "vfnkxt");
- return CGF.Builder.CreateLoad(VFuncPtr);
+ return CGF.Builder.CreateAlignedLoad(VFuncPtr, CGF.PointerAlignInBytes);
}
/// BuildAppleKextVirtualCall - This routine is to support gcc's kext ABI making
diff --git a/clang/lib/CodeGen/CGCXXABI.cpp b/clang/lib/CodeGen/CGCXXABI.cpp
index dc16616df9c..1f0c3c0e1b4 100644
--- a/clang/lib/CodeGen/CGCXXABI.cpp
+++ b/clang/lib/CodeGen/CGCXXABI.cpp
@@ -73,10 +73,12 @@ CGCXXABI::ConvertMemberPointerType(const MemberPointerType *MPT) {
}
llvm::Value *CGCXXABI::EmitLoadOfMemberFunctionPointer(
- CodeGenFunction &CGF, const Expr *E, llvm::Value *&This,
+ CodeGenFunction &CGF, const Expr *E, Address This,
+ llvm::Value *&ThisPtrForCall,
llvm::Value *MemPtr, const MemberPointerType *MPT) {
ErrorUnsupportedABI(CGF, "calls through member pointers");
+ ThisPtrForCall = This.getPointer();
const FunctionProtoType *FPT =
MPT->getPointeeType()->getAs<FunctionProtoType>();
const CXXRecordDecl *RD =
@@ -88,10 +90,11 @@ llvm::Value *CGCXXABI::EmitLoadOfMemberFunctionPointer(
llvm::Value *
CGCXXABI::EmitMemberDataPointerAddress(CodeGenFunction &CGF, const Expr *E,
- llvm::Value *Base, llvm::Value *MemPtr,
+ Address Base, llvm::Value *MemPtr,
const MemberPointerType *MPT) {
ErrorUnsupportedABI(CGF, "loads of member pointers");
- llvm::Type *Ty = CGF.ConvertType(MPT->getPointeeType())->getPointerTo();
+ llvm::Type *Ty = CGF.ConvertType(MPT->getPointeeType())
+ ->getPointerTo(Base.getAddressSpace());
return llvm::Constant::getNullValue(Ty);
}
@@ -159,13 +162,24 @@ void CGCXXABI::buildThisParam(CodeGenFunction &CGF, FunctionArgList &params) {
&CGM.getContext().Idents.get("this"),
MD->getThisType(CGM.getContext()));
params.push_back(ThisDecl);
- getThisDecl(CGF) = ThisDecl;
+ CGF.CXXABIThisDecl = ThisDecl;
+
+ // Compute the presumed alignment of 'this', which basically comes
+ // down to whether we know it's a complete object or not.
+ auto &Layout = CGF.getContext().getASTRecordLayout(MD->getParent());
+ if (MD->getParent()->getNumVBases() == 0 || // avoid vcall in common case
+ MD->getParent()->hasAttr<FinalAttr>() ||
+ !isThisCompleteObject(CGF.CurGD)) {
+ CGF.CXXABIThisAlignment = Layout.getAlignment();
+ } else {
+ CGF.CXXABIThisAlignment = Layout.getNonVirtualAlignment();
+ }
}
void CGCXXABI::EmitThisParam(CodeGenFunction &CGF) {
/// Initialize the 'this' slot.
assert(getThisDecl(CGF) && "no 'this' variable for function");
- getThisValue(CGF)
+ CGF.CXXABIThisValue
= CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(getThisDecl(CGF)),
"this");
}
@@ -186,14 +200,14 @@ CharUnits CGCXXABI::getArrayCookieSizeImpl(QualType elementType) {
return CharUnits::Zero();
}
-llvm::Value *CGCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
- llvm::Value *NewPtr,
- llvm::Value *NumElements,
- const CXXNewExpr *expr,
- QualType ElementType) {
+Address CGCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
+ Address NewPtr,
+ llvm::Value *NumElements,
+ const CXXNewExpr *expr,
+ QualType ElementType) {
// Should never be called.
ErrorUnsupportedABI(CGF, "array cookie initialization");
- return nullptr;
+ return Address::invalid();
}
bool CGCXXABI::requiresArrayCookie(const CXXDeleteExpr *expr,
@@ -215,31 +229,30 @@ bool CGCXXABI::requiresArrayCookie(const CXXNewExpr *expr) {
return expr->getAllocatedType().isDestructedType();
}
-void CGCXXABI::ReadArrayCookie(CodeGenFunction &CGF, llvm::Value *ptr,
+void CGCXXABI::ReadArrayCookie(CodeGenFunction &CGF, Address ptr,
const CXXDeleteExpr *expr, QualType eltTy,
llvm::Value *&numElements,
llvm::Value *&allocPtr, CharUnits &cookieSize) {
// Derive a char* in the same address space as the pointer.
- unsigned AS = ptr->getType()->getPointerAddressSpace();
- llvm::Type *charPtrTy = CGF.Int8Ty->getPointerTo(AS);
- ptr = CGF.Builder.CreateBitCast(ptr, charPtrTy);
+ ptr = CGF.Builder.CreateElementBitCast(ptr, CGF.Int8Ty);
// If we don't need an array cookie, bail out early.
if (!requiresArrayCookie(expr, eltTy)) {
- allocPtr = ptr;
+ allocPtr = ptr.getPointer();
numElements = nullptr;
cookieSize = CharUnits::Zero();
return;
}
cookieSize = getArrayCookieSizeImpl(eltTy);
- allocPtr = CGF.Builder.CreateConstInBoundsGEP1_64(ptr,
- -cookieSize.getQuantity());
- numElements = readArrayCookieImpl(CGF, allocPtr, cookieSize);
+ Address allocAddr =
+ CGF.Builder.CreateConstInBoundsByteGEP(ptr, -cookieSize);
+ allocPtr = allocAddr.getPointer();
+ numElements = readArrayCookieImpl(CGF, allocAddr, cookieSize);
}
llvm::Value *CGCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
- llvm::Value *ptr,
+ Address ptr,
CharUnits cookieSize) {
ErrorUnsupportedABI(CGF, "reading a new[] cookie");
return llvm::ConstantInt::get(CGF.SizeTy, 0);
diff --git a/clang/lib/CodeGen/CGCXXABI.h b/clang/lib/CodeGen/CGCXXABI.h
index 5ef409ecdee..828c9ecfc6a 100644
--- a/clang/lib/CodeGen/CGCXXABI.h
+++ b/clang/lib/CodeGen/CGCXXABI.h
@@ -48,12 +48,15 @@ protected:
: CGM(CGM), MangleCtx(CGM.getContext().createMangleContext()) {}
protected:
- ImplicitParamDecl *&getThisDecl(CodeGenFunction &CGF) {
+ ImplicitParamDecl *getThisDecl(CodeGenFunction &CGF) {
return CGF.CXXABIThisDecl;
}
- llvm::Value *&getThisValue(CodeGenFunction &CGF) {
+ llvm::Value *getThisValue(CodeGenFunction &CGF) {
return CGF.CXXABIThisValue;
}
+ Address getThisAddress(CodeGenFunction &CGF) {
+ return Address(CGF.CXXABIThisValue, CGF.CXXABIThisAlignment);
+ }
/// Issue a diagnostic about unsupported features in the ABI.
void ErrorUnsupportedABI(CodeGenFunction &CGF, StringRef S);
@@ -77,6 +80,12 @@ protected:
virtual bool requiresArrayCookie(const CXXDeleteExpr *E, QualType eltType);
virtual bool requiresArrayCookie(const CXXNewExpr *E);
+ /// Determine whether there's something special about the rules of
+ /// the ABI tell us that 'this' is a complete object within the
+ /// given function. Obvious common logic like being defined on a
+ /// final class will have been taken care of by the caller.
+ virtual bool isThisCompleteObject(GlobalDecl GD) const = 0;
+
public:
virtual ~CGCXXABI();
@@ -135,13 +144,14 @@ public:
/// pointer. Apply the this-adjustment and set 'This' to the
/// adjusted value.
virtual llvm::Value *EmitLoadOfMemberFunctionPointer(
- CodeGenFunction &CGF, const Expr *E, llvm::Value *&This,
- llvm::Value *MemPtr, const MemberPointerType *MPT);
+ CodeGenFunction &CGF, const Expr *E, Address This,
+ llvm::Value *&ThisPtrForCall, llvm::Value *MemPtr,
+ const MemberPointerType *MPT);
/// Calculate an l-value from an object and a data member pointer.
virtual llvm::Value *
EmitMemberDataPointerAddress(CodeGenFunction &CGF, const Expr *E,
- llvm::Value *Base, llvm::Value *MemPtr,
+ Address Base, llvm::Value *MemPtr,
const MemberPointerType *MPT);
/// Perform a derived-to-base, base-to-derived, or bitcast member
@@ -212,7 +222,7 @@ protected:
public:
virtual void emitVirtualObjectDelete(CodeGenFunction &CGF,
const CXXDeleteExpr *DE,
- llvm::Value *Ptr, QualType ElementType,
+ Address Ptr, QualType ElementType,
const CXXDestructorDecl *Dtor) = 0;
virtual void emitRethrow(CodeGenFunction &CGF, bool isNoReturn) = 0;
virtual void emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) = 0;
@@ -235,26 +245,26 @@ public:
QualType SrcRecordTy) = 0;
virtual void EmitBadTypeidCall(CodeGenFunction &CGF) = 0;
virtual llvm::Value *EmitTypeid(CodeGenFunction &CGF, QualType SrcRecordTy,
- llvm::Value *ThisPtr,
+ Address ThisPtr,
llvm::Type *StdTypeInfoPtrTy) = 0;
virtual bool shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
QualType SrcRecordTy) = 0;
virtual llvm::Value *
- EmitDynamicCastCall(CodeGenFunction &CGF, llvm::Value *Value,
+ EmitDynamicCastCall(CodeGenFunction &CGF, Address Value,
QualType SrcRecordTy, QualType DestTy,
QualType DestRecordTy, llvm::BasicBlock *CastEnd) = 0;
virtual llvm::Value *EmitDynamicCastToVoid(CodeGenFunction &CGF,
- llvm::Value *Value,
+ Address Value,
QualType SrcRecordTy,
QualType DestTy) = 0;
virtual bool EmitBadCastCall(CodeGenFunction &CGF) = 0;
virtual llvm::Value *GetVirtualBaseClassOffset(CodeGenFunction &CGF,
- llvm::Value *This,
+ Address This,
const CXXRecordDecl *ClassDecl,
const CXXRecordDecl *BaseClassDecl) = 0;
@@ -297,10 +307,9 @@ public:
/// Perform ABI-specific "this" argument adjustment required prior to
/// a call of a virtual function.
/// The "VirtualCall" argument is true iff the call itself is virtual.
- virtual llvm::Value *
+ virtual Address
adjustThisArgumentForVirtualFunctionCall(CodeGenFunction &CGF, GlobalDecl GD,
- llvm::Value *This,
- bool VirtualCall) {
+ Address This, bool VirtualCall) {
return This;
}
@@ -340,7 +349,7 @@ public:
virtual void EmitDestructorCall(CodeGenFunction &CGF,
const CXXDestructorDecl *DD, CXXDtorType Type,
bool ForVirtualBase, bool Delegating,
- llvm::Value *This) = 0;
+ Address This) = 0;
/// Emits the VTable definitions required for the given record type.
virtual void emitVTableDefinitions(CodeGenVTables &CGVT,
@@ -368,14 +377,14 @@ public:
/// Build a virtual function pointer in the ABI-specific way.
virtual llvm::Value *getVirtualFunctionPointer(CodeGenFunction &CGF,
GlobalDecl GD,
- llvm::Value *This,
+ Address This,
llvm::Type *Ty,
SourceLocation Loc) = 0;
/// Emit the ABI-specific virtual destructor call.
virtual llvm::Value *
EmitVirtualDestructorCall(CodeGenFunction &CGF, const CXXDestructorDecl *Dtor,
- CXXDtorType DtorType, llvm::Value *This,
+ CXXDtorType DtorType, Address This,
const CXXMemberCallExpr *CE) = 0;
virtual void adjustCallArgsForDestructorThunk(CodeGenFunction &CGF,
@@ -391,11 +400,11 @@ public:
GlobalDecl GD, bool ReturnAdjustment) = 0;
virtual llvm::Value *performThisAdjustment(CodeGenFunction &CGF,
- llvm::Value *This,
+ Address This,
const ThisAdjustment &TA) = 0;
virtual llvm::Value *performReturnAdjustment(CodeGenFunction &CGF,
- llvm::Value *Ret,
+ Address Ret,
const ReturnAdjustment &RA) = 0;
virtual void EmitReturnFromThunk(CodeGenFunction &CGF,
@@ -432,11 +441,11 @@ public:
/// always a size_t
/// \param ElementType - the base element allocated type,
/// i.e. the allocated type after stripping all array types
- virtual llvm::Value *InitializeArrayCookie(CodeGenFunction &CGF,
- llvm::Value *NewPtr,
- llvm::Value *NumElements,
- const CXXNewExpr *expr,
- QualType ElementType);
+ virtual Address InitializeArrayCookie(CodeGenFunction &CGF,
+ Address NewPtr,
+ llvm::Value *NumElements,
+ const CXXNewExpr *expr,
+ QualType ElementType);
/// Reads the array cookie associated with the given pointer,
/// if it has one.
@@ -451,7 +460,7 @@ public:
/// function
/// \param CookieSize - an out parameter which will be initialized
/// with the size of the cookie, or zero if there is no cookie
- virtual void ReadArrayCookie(CodeGenFunction &CGF, llvm::Value *Ptr,
+ virtual void ReadArrayCookie(CodeGenFunction &CGF, Address Ptr,
const CXXDeleteExpr *expr,
QualType ElementType, llvm::Value *&NumElements,
llvm::Value *&AllocPtr, CharUnits &CookieSize);
@@ -474,8 +483,7 @@ protected:
/// Other parameters are as above.
///
/// \return a size_t
- virtual llvm::Value *readArrayCookieImpl(CodeGenFunction &IGF,
- llvm::Value *ptr,
+ virtual llvm::Value *readArrayCookieImpl(CodeGenFunction &IGF, Address ptr,
CharUnits cookieSize);
public:
diff --git a/clang/lib/CodeGen/CGCall.cpp b/clang/lib/CodeGen/CGCall.cpp
index 2dc68622e77..e8760111f01 100644
--- a/clang/lib/CodeGen/CGCall.cpp
+++ b/clang/lib/CodeGen/CGCall.cpp
@@ -553,6 +553,7 @@ CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC,
FI->HasRegParm = info.getHasRegParm();
FI->RegParm = info.getRegParm();
FI->ArgStruct = nullptr;
+ FI->ArgStructAlign = 0;
FI->NumArgs = argTypes.size();
FI->getArgsBuffer()[0].type = resultType;
for (unsigned i = 0, e = argTypes.size(); i != e; ++i)
@@ -726,6 +727,21 @@ CodeGenTypes::getExpandedTypes(QualType Ty,
}
}
+static void forConstantArrayExpansion(CodeGenFunction &CGF,
+ ConstantArrayExpansion *CAE,
+ Address BaseAddr,
+ llvm::function_ref<void(Address)> Fn) {
+ CharUnits EltSize = CGF.getContext().getTypeSizeInChars(CAE->EltTy);
+ CharUnits EltAlign =
+ BaseAddr.getAlignment().alignmentOfArrayElement(EltSize);
+
+ for (int i = 0, n = CAE->NumElts; i < n; i++) {
+ llvm::Value *EltAddr =
+ CGF.Builder.CreateConstGEP2_32(nullptr, BaseAddr.getPointer(), 0, i);
+ Fn(Address(EltAddr, EltAlign));
+ }
+}
+
void CodeGenFunction::ExpandTypeFromArgs(
QualType Ty, LValue LV, SmallVectorImpl<llvm::Argument *>::iterator &AI) {
assert(LV.isSimple() &&
@@ -733,17 +749,16 @@ void CodeGenFunction::ExpandTypeFromArgs(
auto Exp = getTypeExpansion(Ty, getContext());
if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
- for (int i = 0, n = CAExp->NumElts; i < n; i++) {
- llvm::Value *EltAddr =
- Builder.CreateConstGEP2_32(nullptr, LV.getAddress(), 0, i);
+ forConstantArrayExpansion(*this, CAExp, LV.getAddress(),
+ [&](Address EltAddr) {
LValue LV = MakeAddrLValue(EltAddr, CAExp->EltTy);
ExpandTypeFromArgs(CAExp->EltTy, LV, AI);
- }
+ });
} else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
- llvm::Value *This = LV.getAddress();
+ Address This = LV.getAddress();
for (const CXXBaseSpecifier *BS : RExp->Bases) {
// Perform a single step derived-to-base conversion.
- llvm::Value *Base =
+ Address Base =
GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
/*NullCheckValue=*/false, SourceLocation());
LValue SubLV = MakeAddrLValue(Base, BS->getType());
@@ -756,15 +771,10 @@ void CodeGenFunction::ExpandTypeFromArgs(
LValue SubLV = EmitLValueForField(LV, FD);
ExpandTypeFromArgs(FD->getType(), SubLV, AI);
}
- } else if (auto CExp = dyn_cast<ComplexExpansion>(Exp.get())) {
- llvm::Value *RealAddr =
- Builder.CreateStructGEP(nullptr, LV.getAddress(), 0, "real");
- EmitStoreThroughLValue(RValue::get(*AI++),
- MakeAddrLValue(RealAddr, CExp->EltTy));
- llvm::Value *ImagAddr =
- Builder.CreateStructGEP(nullptr, LV.getAddress(), 1, "imag");
- EmitStoreThroughLValue(RValue::get(*AI++),
- MakeAddrLValue(ImagAddr, CExp->EltTy));
+ } else if (isa<ComplexExpansion>(Exp.get())) {
+ auto realValue = *AI++;
+ auto imagValue = *AI++;
+ EmitStoreOfComplex(ComplexPairTy(realValue, imagValue), LV, /*init*/ true);
} else {
assert(isa<NoExpansion>(Exp.get()));
EmitStoreThroughLValue(RValue::get(*AI++), LV);
@@ -776,18 +786,17 @@ void CodeGenFunction::ExpandTypeToArgs(
SmallVectorImpl<llvm::Value *> &IRCallArgs, unsigned &IRCallArgPos) {
auto Exp = getTypeExpansion(Ty, getContext());
if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
- llvm::Value *Addr = RV.getAggregateAddr();
- for (int i = 0, n = CAExp->NumElts; i < n; i++) {
- llvm::Value *EltAddr = Builder.CreateConstGEP2_32(nullptr, Addr, 0, i);
+ forConstantArrayExpansion(*this, CAExp, RV.getAggregateAddress(),
+ [&](Address EltAddr) {
RValue EltRV =
convertTempToRValue(EltAddr, CAExp->EltTy, SourceLocation());
ExpandTypeToArgs(CAExp->EltTy, EltRV, IRFuncTy, IRCallArgs, IRCallArgPos);
- }
+ });
} else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
- llvm::Value *This = RV.getAggregateAddr();
+ Address This = RV.getAggregateAddress();
for (const CXXBaseSpecifier *BS : RExp->Bases) {
// Perform a single step derived-to-base conversion.
- llvm::Value *Base =
+ Address Base =
GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
/*NullCheckValue=*/false, SourceLocation());
RValue BaseRV = RValue::getAggregate(Base);
@@ -822,12 +831,22 @@ void CodeGenFunction::ExpandTypeToArgs(
}
}
+/// Create a temporary allocation for the purposes of coercion.
+static Address CreateTempAllocaForCoercion(CodeGenFunction &CGF, llvm::Type *Ty,
+ CharUnits MinAlign) {
+ // Don't use an alignment that's worse than what LLVM would prefer.
+ auto PrefAlign = CGF.CGM.getDataLayout().getPrefTypeAlignment(Ty);
+ CharUnits Align = std::max(MinAlign, CharUnits::fromQuantity(PrefAlign));
+
+ return CGF.CreateTempAlloca(Ty, Align);
+}
+
/// EnterStructPointerForCoercedAccess - Given a struct pointer that we are
/// accessing some number of bytes out of it, try to gep into the struct to get
/// at its inner goodness. Dive as deep as possible without entering an element
/// with an in-memory size smaller than DstSize.
-static llvm::Value *
-EnterStructPointerForCoercedAccess(llvm::Value *SrcPtr,
+static Address
+EnterStructPointerForCoercedAccess(Address SrcPtr,
llvm::StructType *SrcSTy,
uint64_t DstSize, CodeGenFunction &CGF) {
// We can't dive into a zero-element struct.
@@ -846,11 +865,10 @@ EnterStructPointerForCoercedAccess(llvm::Value *SrcPtr,
return SrcPtr;
// GEP into the first element.
- SrcPtr = CGF.Builder.CreateConstGEP2_32(SrcSTy, SrcPtr, 0, 0, "coerce.dive");
+ SrcPtr = CGF.Builder.CreateStructGEP(SrcPtr, 0, CharUnits(), "coerce.dive");
// If the first element is a struct, recurse.
- llvm::Type *SrcTy =
- cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
+ llvm::Type *SrcTy = SrcPtr.getElementType();
if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy))
return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
@@ -918,21 +936,19 @@ static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val,
/// This safely handles the case when the src type is smaller than the
/// destination type; in this situation the values of bits which not
/// present in the src are undefined.
-static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr,
- llvm::Type *Ty, CharUnits SrcAlign,
+static llvm::Value *CreateCoercedLoad(Address Src, llvm::Type *Ty,
CodeGenFunction &CGF) {
- llvm::Type *SrcTy =
- cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
+ llvm::Type *SrcTy = Src.getElementType();
// If SrcTy and Ty are the same, just do a load.
if (SrcTy == Ty)
- return CGF.Builder.CreateAlignedLoad(SrcPtr, SrcAlign.getQuantity());
+ return CGF.Builder.CreateLoad(Src);
uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty);
if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
- SrcPtr = EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
- SrcTy = cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
+ Src = EnterStructPointerForCoercedAccess(Src, SrcSTy, DstSize, CGF);
+ SrcTy = Src.getType()->getElementType();
}
uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
@@ -941,8 +957,7 @@ static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr,
// extension or truncation to the desired type.
if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) &&
(isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) {
- llvm::LoadInst *Load =
- CGF.Builder.CreateAlignedLoad(SrcPtr, SrcAlign.getQuantity());
+ llvm::Value *Load = CGF.Builder.CreateLoad(Src);
return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF);
}
@@ -954,22 +969,18 @@ static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr,
//
// FIXME: Assert that we aren't truncating non-padding bits when have access
// to that information.
- llvm::Value *Casted =
- CGF.Builder.CreateBitCast(SrcPtr, llvm::PointerType::getUnqual(Ty));
- return CGF.Builder.CreateAlignedLoad(Casted, SrcAlign.getQuantity());
- }
-
- // Otherwise do coercion through memory. This is stupid, but
- // simple.
- llvm::AllocaInst *Tmp = CGF.CreateTempAlloca(Ty);
- Tmp->setAlignment(SrcAlign.getQuantity());
- llvm::Type *I8PtrTy = CGF.Builder.getInt8PtrTy();
- llvm::Value *Casted = CGF.Builder.CreateBitCast(Tmp, I8PtrTy);
- llvm::Value *SrcCasted = CGF.Builder.CreateBitCast(SrcPtr, I8PtrTy);
+ Src = CGF.Builder.CreateBitCast(Src, llvm::PointerType::getUnqual(Ty));
+ return CGF.Builder.CreateLoad(Src);
+ }
+
+ // Otherwise do coercion through memory. This is stupid, but simple.
+ Address Tmp = CreateTempAllocaForCoercion(CGF, Ty, Src.getAlignment());
+ Address Casted = CGF.Builder.CreateBitCast(Tmp, CGF.Int8PtrTy);
+ Address SrcCasted = CGF.Builder.CreateBitCast(Src, CGF.Int8PtrTy);
CGF.Builder.CreateMemCpy(Casted, SrcCasted,
llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize),
- SrcAlign.getQuantity(), false);
- return CGF.Builder.CreateAlignedLoad(Tmp, SrcAlign.getQuantity());
+ false);
+ return CGF.Builder.CreateLoad(Tmp);
}
// Function to store a first-class aggregate into memory. We prefer to
@@ -977,8 +988,7 @@ static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr,
// fast-isel.
// FIXME: Do we need to recurse here?
static void BuildAggStore(CodeGenFunction &CGF, llvm::Value *Val,
- llvm::Value *DestPtr, bool DestIsVolatile,
- CharUnits DestAlign) {
+ Address Dest, bool DestIsVolatile) {
// Prefer scalar stores to first-class aggregate stores.
if (llvm::StructType *STy =
dyn_cast<llvm::StructType>(Val->getType())) {
@@ -986,17 +996,13 @@ static void BuildAggStore(CodeGenFunction &CGF, llvm::Value *Val,
CGF.CGM.getDataLayout().getStructLayout(STy);
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
- llvm::Value *EltPtr = CGF.Builder.CreateConstGEP2_32(STy, DestPtr, 0, i);
+ auto EltOffset = CharUnits::fromQuantity(Layout->getElementOffset(i));
+ Address EltPtr = CGF.Builder.CreateStructGEP(Dest, i, EltOffset);
llvm::Value *Elt = CGF.Builder.CreateExtractValue(Val, i);
- uint64_t EltOffset = Layout->getElementOffset(i);
- CharUnits EltAlign =
- DestAlign.alignmentAtOffset(CharUnits::fromQuantity(EltOffset));
- CGF.Builder.CreateAlignedStore(Elt, EltPtr, EltAlign.getQuantity(),
- DestIsVolatile);
+ CGF.Builder.CreateStore(Elt, EltPtr, DestIsVolatile);
}
} else {
- CGF.Builder.CreateAlignedStore(Val, DestPtr, DestAlign.getQuantity(),
- DestIsVolatile);
+ CGF.Builder.CreateStore(Val, Dest, DestIsVolatile);
}
}
@@ -1007,24 +1013,21 @@ static void BuildAggStore(CodeGenFunction &CGF, llvm::Value *Val,
/// This safely handles the case when the src type is larger than the
/// destination type; the upper bits of the src will be lost.
static void CreateCoercedStore(llvm::Value *Src,
- llvm::Value *DstPtr,
+ Address Dst,
bool DstIsVolatile,
- CharUnits DstAlign,
CodeGenFunction &CGF) {
llvm::Type *SrcTy = Src->getType();
- llvm::Type *DstTy =
- cast<llvm::PointerType>(DstPtr->getType())->getElementType();
+ llvm::Type *DstTy = Dst.getType()->getElementType();
if (SrcTy == DstTy) {
- CGF.Builder.CreateAlignedStore(Src, DstPtr, DstAlign.getQuantity(),
- DstIsVolatile);
+ CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
return;
}
uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) {
- DstPtr = EnterStructPointerForCoercedAccess(DstPtr, DstSTy, SrcSize, CGF);
- DstTy = cast<llvm::PointerType>(DstPtr->getType())->getElementType();
+ Dst = EnterStructPointerForCoercedAccess(Dst, DstSTy, SrcSize, CGF);
+ DstTy = Dst.getType()->getElementType();
}
// If the source and destination are integer or pointer types, just do an
@@ -1032,8 +1035,7 @@ static void CreateCoercedStore(llvm::Value *Src,
if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) &&
(isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) {
Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF);
- CGF.Builder.CreateAlignedStore(Src, DstPtr, DstAlign.getQuantity(),
- DstIsVolatile);
+ CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
return;
}
@@ -1041,9 +1043,8 @@ static void CreateCoercedStore(llvm::Value *Src,
// If store is legal, just bitcast the src pointer.
if (SrcSize <= DstSize) {
- llvm::Value *Casted =
- CGF.Builder.CreateBitCast(DstPtr, llvm::PointerType::getUnqual(SrcTy));
- BuildAggStore(CGF, Src, Casted, DstIsVolatile, DstAlign);
+ Dst = CGF.Builder.CreateBitCast(Dst, llvm::PointerType::getUnqual(SrcTy));
+ BuildAggStore(CGF, Src, Dst, DstIsVolatile);
} else {
// Otherwise do coercion through memory. This is stupid, but
// simple.
@@ -1054,16 +1055,25 @@ static void CreateCoercedStore(llvm::Value *Src,
//
// FIXME: Assert that we aren't truncating non-padding bits when have access
// to that information.
- llvm::AllocaInst *Tmp = CGF.CreateTempAlloca(SrcTy);
- Tmp->setAlignment(DstAlign.getQuantity());
- CGF.Builder.CreateAlignedStore(Src, Tmp, DstAlign.getQuantity());
- llvm::Type *I8PtrTy = CGF.Builder.getInt8PtrTy();
- llvm::Value *Casted = CGF.Builder.CreateBitCast(Tmp, I8PtrTy);
- llvm::Value *DstCasted = CGF.Builder.CreateBitCast(DstPtr, I8PtrTy);
+ Address Tmp = CreateTempAllocaForCoercion(CGF, SrcTy, Dst.getAlignment());
+ CGF.Builder.CreateStore(Src, Tmp);
+ Address Casted = CGF.Builder.CreateBitCast(Tmp, CGF.Int8PtrTy);
+ Address DstCasted = CGF.Builder.CreateBitCast(Dst, CGF.Int8PtrTy);
CGF.Builder.CreateMemCpy(DstCasted, Casted,
llvm::ConstantInt::get(CGF.IntPtrTy, DstSize),
- DstAlign.getQuantity(), false);
+ false);
+ }
+}
+
+static Address emitAddressAtOffset(CodeGenFunction &CGF, Address addr,
+ const ABIArgInfo &info) {
+ if (unsigned offset = info.getDirectOffset()) {
+ addr = CGF.Builder.CreateElementBitCast(addr, CGF.Int8Ty);
+ addr = CGF.Builder.CreateConstInBoundsByteGEP(addr,
+ CharUnits::fromQuantity(offset));
+ addr = CGF.Builder.CreateElementBitCast(addr, info.getCoerceToType());
}
+ return addr;
}
namespace {
@@ -1279,12 +1289,7 @@ CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) {
}
break;
- case ABIArgInfo::Indirect: {
- assert(!retAI.getIndirectAlign() && "Align unused on indirect return.");
- resultType = llvm::Type::getVoidTy(getLLVMContext());
- break;
- }
-
+ case ABIArgInfo::Indirect:
case ABIArgInfo::Ignore:
resultType = llvm::Type::getVoidTy(getLLVMContext());
break;
@@ -1656,7 +1661,7 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
if (AI.getIndirectByVal())
Attrs.addAttribute(llvm::Attribute::ByVal);
- unsigned Align = AI.getIndirectAlign();
+ CharUnits Align = AI.getIndirectAlign();
// In a byval argument, it is important that the required
// alignment of the type is honored, as LLVM might be creating a
@@ -1668,10 +1673,12 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
// This is different from indirect *not* byval, where the object
// exists already, and the align attribute is purely
// informative.
- if (Align == 0 && AI.getIndirectByVal())
- Align = getContext().getTypeAlignInChars(ParamType).getQuantity();
+ assert(!Align.isZero());
- Attrs.addAlignmentAttr(Align);
+ // For now, only add this when we have a byval argument.
+ // TODO: be less lazy about updating test cases.
+ if (AI.getIndirectByVal())
+ Attrs.addAlignmentAttr(Align.getQuantity());
// byval disables readnone and readonly.
FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
@@ -1797,10 +1804,14 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
// If we're using inalloca, all the memory arguments are GEPs off of the last
// parameter, which is a pointer to the complete memory area.
- llvm::Value *ArgStruct = nullptr;
+ Address ArgStruct = Address::invalid();
+ const llvm::StructLayout *ArgStructLayout = nullptr;
if (IRFunctionArgs.hasInallocaArg()) {
- ArgStruct = FnArgs[IRFunctionArgs.getInallocaArgNo()];
- assert(ArgStruct->getType() == FI.getArgStruct()->getPointerTo());
+ ArgStructLayout = CGM.getDataLayout().getStructLayout(FI.getArgStruct());
+ ArgStruct = Address(FnArgs[IRFunctionArgs.getInallocaArgNo()],
+ FI.getArgStructAlignment());
+
+ assert(ArgStruct.getType() == FI.getArgStruct()->getPointerTo());
}
// Name the struct return parameter.
@@ -1814,9 +1825,7 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
// Track if we received the parameter as a pointer (indirect, byval, or
// inalloca). If already have a pointer, EmitParmDecl doesn't need to copy it
// into a local alloca for us.
- enum ValOrPointer { HaveValue = 0, HavePointer = 1 };
- typedef llvm::PointerIntPair<llvm::Value *, 1> ValueAndIsPtr;
- SmallVector<ValueAndIsPtr, 16> ArgVals;
+ SmallVector<ParamValue, 16> ArgVals;
ArgVals.reserve(Args.size());
// Create a pointer value for every parameter declaration. This usually
@@ -1842,49 +1851,47 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
switch (ArgI.getKind()) {
case ABIArgInfo::InAlloca: {
assert(NumIRArgs == 0);
- llvm::Value *V =
- Builder.CreateStructGEP(FI.getArgStruct(), ArgStruct,
- ArgI.getInAllocaFieldIndex(), Arg->getName());
- ArgVals.push_back(ValueAndIsPtr(V, HavePointer));
+ auto FieldIndex = ArgI.getInAllocaFieldIndex();
+ CharUnits FieldOffset =
+ CharUnits::fromQuantity(ArgStructLayout->getElementOffset(FieldIndex));
+ Address V = Builder.CreateStructGEP(ArgStruct, FieldIndex, FieldOffset,
+ Arg->getName());
+ ArgVals.push_back(ParamValue::forIndirect(V));
break;
}
case ABIArgInfo::Indirect: {
assert(NumIRArgs == 1);
- llvm::Value *V = FnArgs[FirstIRArg];
+ Address ParamAddr = Address(FnArgs[FirstIRArg], ArgI.getIndirectAlign());
if (!hasScalarEvaluationKind(Ty)) {
// Aggregates and complex variables are accessed by reference. All we
- // need to do is realign the value, if requested
+ // need to do is realign the value, if requested.
+ Address V = ParamAddr;
if (ArgI.getIndirectRealign()) {
- llvm::Value *AlignedTemp = CreateMemTemp(Ty, "coerce");
+ Address AlignedTemp = CreateMemTemp(Ty, "coerce");
// Copy from the incoming argument pointer to the temporary with the
// appropriate alignment.
//
// FIXME: We should have a common utility for generating an aggregate
// copy.
- llvm::Type *I8PtrTy = Builder.getInt8PtrTy();
CharUnits Size = getContext().getTypeSizeInChars(Ty);
- llvm::Value *Dst = Builder.CreateBitCast(AlignedTemp, I8PtrTy);
- llvm::Value *Src = Builder.CreateBitCast(V, I8PtrTy);
- Builder.CreateMemCpy(Dst,
- Src,
- llvm::ConstantInt::get(IntPtrTy,
- Size.getQuantity()),
- ArgI.getIndirectAlign(),
- false);
+ auto SizeVal = llvm::ConstantInt::get(IntPtrTy, Size.getQuantity());
+ Address Dst = Builder.CreateBitCast(AlignedTemp, Int8PtrTy);
+ Address Src = Builder.CreateBitCast(ParamAddr, Int8PtrTy);
+ Builder.CreateMemCpy(Dst, Src, SizeVal, false);
V = AlignedTemp;
}
- ArgVals.push_back(ValueAndIsPtr(V, HavePointer));
+ ArgVals.push_back(ParamValue::forIndirect(V));
} else {
// Load scalar value from indirect argument.
- V = EmitLoadOfScalar(V, false, ArgI.getIndirectAlign(), Ty,
- Arg->getLocStart());
+ llvm::Value *V =
+ EmitLoadOfScalar(ParamAddr, false, Ty, Arg->getLocStart());
if (isPromoted)
V = emitArgumentDemotion(*this, Arg, V);
- ArgVals.push_back(ValueAndIsPtr(V, HaveValue));
+ ArgVals.push_back(ParamValue::forDirect(V));
}
break;
}
@@ -1989,87 +1996,66 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
if (V->getType() != LTy)
V = Builder.CreateBitCast(V, LTy);
- ArgVals.push_back(ValueAndIsPtr(V, HaveValue));
+ ArgVals.push_back(ParamValue::forDirect(V));
break;
}
- llvm::AllocaInst *Alloca = CreateMemTemp(Ty, Arg->getName());
-
- // The alignment we need to use is the max of the requested alignment for
- // the argument plus the alignment required by our access code below.
- unsigned AlignmentToUse =
- CGM.getDataLayout().getABITypeAlignment(ArgI.getCoerceToType());
- AlignmentToUse = std::max(AlignmentToUse,
- (unsigned)getContext().getDeclAlign(Arg).getQuantity());
-
- Alloca->setAlignment(AlignmentToUse);
- llvm::Value *V = Alloca;
- llvm::Value *Ptr = V; // Pointer to store into.
- CharUnits PtrAlign = CharUnits::fromQuantity(AlignmentToUse);
+ Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg),
+ Arg->getName());
- // If the value is offset in memory, apply the offset now.
- if (unsigned Offs = ArgI.getDirectOffset()) {
- Ptr = Builder.CreateBitCast(Ptr, Builder.getInt8PtrTy());
- Ptr = Builder.CreateConstGEP1_32(Builder.getInt8Ty(), Ptr, Offs);
- Ptr = Builder.CreateBitCast(Ptr,
- llvm::PointerType::getUnqual(ArgI.getCoerceToType()));
- PtrAlign = PtrAlign.alignmentAtOffset(CharUnits::fromQuantity(Offs));
- }
+ // Pointer to store into.
+ Address Ptr = emitAddressAtOffset(*this, Alloca, ArgI);
// Fast-isel and the optimizer generally like scalar values better than
// FCAs, so we flatten them if this is safe to do for this argument.
llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType());
if (ArgI.isDirect() && ArgI.getCanBeFlattened() && STy &&
STy->getNumElements() > 1) {
+ auto SrcLayout = CGM.getDataLayout().getStructLayout(STy);
uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(STy);
- llvm::Type *DstTy =
- cast<llvm::PointerType>(Ptr->getType())->getElementType();
+ llvm::Type *DstTy = Ptr.getElementType();
uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(DstTy);
+ Address AddrToStoreInto = Address::invalid();
if (SrcSize <= DstSize) {
- Ptr = Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(STy));
-
- assert(STy->getNumElements() == NumIRArgs);
- for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
- auto AI = FnArgs[FirstIRArg + i];
- AI->setName(Arg->getName() + ".coerce" + Twine(i));
- llvm::Value *EltPtr = Builder.CreateConstGEP2_32(STy, Ptr, 0, i);
- Builder.CreateStore(AI, EltPtr);
- }
+ AddrToStoreInto =
+ Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(STy));
} else {
- llvm::AllocaInst *TempAlloca =
- CreateTempAlloca(ArgI.getCoerceToType(), "coerce");
- TempAlloca->setAlignment(AlignmentToUse);
- llvm::Value *TempV = TempAlloca;
-
- assert(STy->getNumElements() == NumIRArgs);
- for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
- auto AI = FnArgs[FirstIRArg + i];
- AI->setName(Arg->getName() + ".coerce" + Twine(i));
- llvm::Value *EltPtr =
- Builder.CreateConstGEP2_32(ArgI.getCoerceToType(), TempV, 0, i);
- Builder.CreateStore(AI, EltPtr);
- }
+ AddrToStoreInto =
+ CreateTempAlloca(STy, Alloca.getAlignment(), "coerce");
+ }
+
+ assert(STy->getNumElements() == NumIRArgs);
+ for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
+ auto AI = FnArgs[FirstIRArg + i];
+ AI->setName(Arg->getName() + ".coerce" + Twine(i));
+ auto Offset = CharUnits::fromQuantity(SrcLayout->getElementOffset(i));
+ Address EltPtr =
+ Builder.CreateStructGEP(AddrToStoreInto, i, Offset);
+ Builder.CreateStore(AI, EltPtr);
+ }
- Builder.CreateMemCpy(Ptr, TempV, DstSize, AlignmentToUse);
+ if (SrcSize > DstSize) {
+ Builder.CreateMemCpy(Ptr, AddrToStoreInto, DstSize);
}
+
} else {
// Simple case, just do a coerced store of the argument into the alloca.
assert(NumIRArgs == 1);
auto AI = FnArgs[FirstIRArg];
AI->setName(Arg->getName() + ".coerce");
- CreateCoercedStore(AI, Ptr, /*DestIsVolatile=*/false, PtrAlign, *this);
+ CreateCoercedStore(AI, Ptr, /*DestIsVolatile=*/false, *this);
}
-
// Match to what EmitParmDecl is expecting for this type.
if (CodeGenFunction::hasScalarEvaluationKind(Ty)) {
- V = EmitLoadOfScalar(V, false, AlignmentToUse, Ty, Arg->getLocStart());
+ llvm::Value *V =
+ EmitLoadOfScalar(Alloca, false, Ty, Arg->getLocStart());
if (isPromoted)
V = emitArgumentDemotion(*this, Arg, V);
- ArgVals.push_back(ValueAndIsPtr(V, HaveValue));
+ ArgVals.push_back(ParamValue::forDirect(V));
} else {
- ArgVals.push_back(ValueAndIsPtr(V, HavePointer));
+ ArgVals.push_back(ParamValue::forIndirect(Alloca));
}
break;
}
@@ -2078,11 +2064,9 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
// If this structure was expanded into multiple arguments then
// we need to create a temporary and reconstruct it from the
// arguments.
- llvm::AllocaInst *Alloca = CreateMemTemp(Ty);
- CharUnits Align = getContext().getDeclAlign(Arg);
- Alloca->setAlignment(Align.getQuantity());
- LValue LV = MakeAddrLValue(Alloca, Ty, Align);
- ArgVals.push_back(ValueAndIsPtr(Alloca, HavePointer));
+ Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg));
+ LValue LV = MakeAddrLValue(Alloca, Ty);
+ ArgVals.push_back(ParamValue::forIndirect(Alloca));
auto FnArgIter = FnArgs.begin() + FirstIRArg;
ExpandTypeFromArgs(Ty, LV, FnArgIter);
@@ -2098,10 +2082,10 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
assert(NumIRArgs == 0);
// Initialize the local variable appropriately.
if (!hasScalarEvaluationKind(Ty)) {
- ArgVals.push_back(ValueAndIsPtr(CreateMemTemp(Ty), HavePointer));
+ ArgVals.push_back(ParamValue::forIndirect(CreateMemTemp(Ty)));
} else {
llvm::Value *U = llvm::UndefValue::get(ConvertType(Arg->getType()));
- ArgVals.push_back(ValueAndIsPtr(U, HaveValue));
+ ArgVals.push_back(ParamValue::forDirect(U));
}
break;
}
@@ -2109,12 +2093,10 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
if (getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
for (int I = Args.size() - 1; I >= 0; --I)
- EmitParmDecl(*Args[I], ArgVals[I].getPointer(), ArgVals[I].getInt(),
- I + 1);
+ EmitParmDecl(*Args[I], ArgVals[I], I + 1);
} else {
for (unsigned I = 0, E = Args.size(); I != E; ++I)
- EmitParmDecl(*Args[I], ArgVals[I].getPointer(), ArgVals[I].getInt(),
- I + 1);
+ EmitParmDecl(*Args[I], ArgVals[I], I + 1);
}
}
@@ -2240,7 +2222,7 @@ static llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction &CGF,
llvm::LoadInst *load =
dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts());
if (!load || load->isAtomic() || load->isVolatile() ||
- load->getPointerOperand() != CGF.GetAddrOfLocalVar(self))
+ load->getPointerOperand() != CGF.GetAddrOfLocalVar(self).getPointer())
return nullptr;
// Okay! Burn it all down. This relies for correctness on the
@@ -2281,7 +2263,7 @@ static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) {
// for something immediately preceding the IP. Sometimes this can
// happen with how we generate implicit-returns; it can also happen
// with noreturn cleanups.
- if (!CGF.ReturnValue->hasOneUse()) {
+ if (!CGF.ReturnValue.getPointer()->hasOneUse()) {
llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
if (IP->empty()) return nullptr;
llvm::Instruction *I = &IP->back();
@@ -2307,13 +2289,14 @@ static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) {
llvm::StoreInst *store = dyn_cast<llvm::StoreInst>(I);
if (!store) return nullptr;
- if (store->getPointerOperand() != CGF.ReturnValue) return nullptr;
+ if (store->getPointerOperand() != CGF.ReturnValue.getPointer())
+ return nullptr;
assert(!store->isAtomic() && !store->isVolatile()); // see below
return store;
}
llvm::StoreInst *store =
- dyn_cast<llvm::StoreInst>(CGF.ReturnValue->user_back());
+ dyn_cast<llvm::StoreInst>(CGF.ReturnValue.getPointer()->user_back());
if (!store) return nullptr;
// These aren't actually possible for non-coerced returns, and we
@@ -2344,7 +2327,7 @@ void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
}
// Functions with no result always return void.
- if (!ReturnValue) {
+ if (!ReturnValue.isValid()) {
Builder.CreateRetVoid();
return;
}
@@ -2365,7 +2348,7 @@ void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
llvm::Value *ArgStruct = EI;
llvm::Value *SRet = Builder.CreateStructGEP(
nullptr, ArgStruct, RetAI.getInAllocaFieldIndex());
- RV = Builder.CreateLoad(SRet, "sret");
+ RV = Builder.CreateAlignedLoad(SRet, getPointerAlign(), "sret");
}
break;
@@ -2376,8 +2359,7 @@ void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
switch (getEvaluationKind(RetTy)) {
case TEK_Complex: {
ComplexPairTy RT =
- EmitLoadOfComplex(MakeNaturalAlignAddrLValue(ReturnValue, RetTy),
- EndLoc);
+ EmitLoadOfComplex(MakeAddrLValue(ReturnValue, RetTy), EndLoc);
EmitStoreOfComplex(RT, MakeNaturalAlignAddrLValue(AI, RetTy),
/*isInit*/ true);
break;
@@ -2415,9 +2397,12 @@ void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
SI->eraseFromParent();
// If that was the only use of the return value, nuke it as well now.
- if (ReturnValue->use_empty() && isa<llvm::AllocaInst>(ReturnValue)) {
- cast<llvm::AllocaInst>(ReturnValue)->eraseFromParent();
- ReturnValue = nullptr;
+ auto returnValueInst = ReturnValue.getPointer();
+ if (returnValueInst->use_empty()) {
+ if (auto alloca = dyn_cast<llvm::AllocaInst>(returnValueInst)) {
+ alloca->eraseFromParent();
+ ReturnValue = Address::invalid();
+ }
}
// Otherwise, we have to do a simple load.
@@ -2425,18 +2410,10 @@ void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
RV = Builder.CreateLoad(ReturnValue);
}
} else {
- llvm::Value *V = ReturnValue;
- CharUnits Align = getContext().getTypeAlignInChars(RetTy);
// If the value is offset in memory, apply the offset now.
- if (unsigned Offs = RetAI.getDirectOffset()) {
- V = Builder.CreateBitCast(V, Builder.getInt8PtrTy());
- V = Builder.CreateConstGEP1_32(Builder.getInt8Ty(), V, Offs);
- V = Builder.CreateBitCast(V,
- llvm::PointerType::getUnqual(RetAI.getCoerceToType()));
- Align = Align.alignmentAtOffset(CharUnits::fromQuantity(Offs));
- }
+ Address V = emitAddressAtOffset(*this, ReturnValue, RetAI);
- RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), Align, *this);
+ RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this);
}
// In ARC, end functions that return a retainable type with a call
@@ -2486,14 +2463,20 @@ static bool isInAllocaArgument(CGCXXABI &ABI, QualType type) {
return RD && ABI.getRecordArgABI(RD) == CGCXXABI::RAA_DirectInMemory;
}
-static AggValueSlot createPlaceholderSlot(CodeGenFunction &CGF, QualType Ty) {
+static AggValueSlot createPlaceholderSlot(CodeGenFunction &CGF,
+ QualType Ty) {
// FIXME: Generate IR in one pass, rather than going back and fixing up these
// placeholders.
llvm::Type *IRTy = CGF.ConvertTypeForMem(Ty);
llvm::Value *Placeholder =
- llvm::UndefValue::get(IRTy->getPointerTo()->getPointerTo());
- Placeholder = CGF.Builder.CreateLoad(Placeholder);
- return AggValueSlot::forAddr(Placeholder, CharUnits::Zero(),
+ llvm::UndefValue::get(IRTy->getPointerTo()->getPointerTo());
+ Placeholder = CGF.Builder.CreateDefaultAlignedLoad(Placeholder);
+
+ // FIXME: When we generate this IR in one pass, we shouldn't need
+ // this win32-specific alignment hack.
+ CharUnits Align = CharUnits::fromQuantity(4);
+
+ return AggValueSlot::forAddr(Address(Placeholder, Align),
Ty.getQualifiers(),
AggValueSlot::IsNotDestructed,
AggValueSlot::DoesNotNeedGCBarriers,
@@ -2506,7 +2489,7 @@ void CodeGenFunction::EmitDelegateCallArg(CallArgList &args,
// StartFunction converted the ABI-lowered parameter(s) into a
// local alloca. We need to turn that into an r-value suitable
// for EmitCall.
- llvm::Value *local = GetAddrOfLocalVar(param);
+ Address local = GetAddrOfLocalVar(param);
QualType type = param->getType();
@@ -2541,20 +2524,21 @@ static bool isProvablyNonNull(llvm::Value *addr) {
static void emitWriteback(CodeGenFunction &CGF,
const CallArgList::Writeback &writeback) {
const LValue &srcLV = writeback.Source;
- llvm::Value *srcAddr = srcLV.getAddress();
- assert(!isProvablyNull(srcAddr) &&
+ Address srcAddr = srcLV.getAddress();
+ assert(!isProvablyNull(srcAddr.getPointer()) &&
"shouldn't have writeback for provably null argument");
llvm::BasicBlock *contBB = nullptr;
// If the argument wasn't provably non-null, we need to null check
// before doing the store.
- bool provablyNonNull = isProvablyNonNull(srcAddr);
+ bool provablyNonNull = isProvablyNonNull(srcAddr.getPointer());
if (!provablyNonNull) {
llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback");
contBB = CGF.createBasicBlock("icr.done");
- llvm::Value *isNull = CGF.Builder.CreateIsNull(srcAddr, "icr.isnull");
+ llvm::Value *isNull =
+ CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull");
CGF.Builder.CreateCondBr(isNull, contBB, writebackBB);
CGF.EmitBlock(writebackBB);
}
@@ -2563,9 +2547,8 @@ static void emitWriteback(CodeGenFunction &CGF,
llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary);
// Cast it back, in case we're writing an id to a Foo* or something.
- value = CGF.Builder.CreateBitCast(value,
- cast<llvm::PointerType>(srcAddr->getType())->getElementType(),
- "icr.writeback-cast");
+ value = CGF.Builder.CreateBitCast(value, srcAddr.getElementType(),
+ "icr.writeback-cast");
// Perform the writeback.
@@ -2629,7 +2612,9 @@ static const Expr *maybeGetUnaryAddrOfOperand(const Expr *E) {
}
/// Emit an argument that's being passed call-by-writeback. That is,
-/// we are passing the address of
+/// we are passing the address of an __autoreleased temporary; it
+/// might be copy-initialized with the current value of the given
+/// address, but it will definitely be copied out of after the call.
static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args,
const ObjCIndirectCopyRestoreExpr *CRE) {
LValue srcLV;
@@ -2641,13 +2626,13 @@ static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args,
// Otherwise, just emit it as a scalar.
} else {
- llvm::Value *srcAddr = CGF.EmitScalarExpr(CRE->getSubExpr());
+ Address srcAddr = CGF.EmitPointerWithAlignment(CRE->getSubExpr());
QualType srcAddrType =
CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType();
- srcLV = CGF.MakeNaturalAlignAddrLValue(srcAddr, srcAddrType);
+ srcLV = CGF.MakeAddrLValue(srcAddr, srcAddrType);
}
- llvm::Value *srcAddr = srcLV.getAddress();
+ Address srcAddr = srcLV.getAddress();
// The dest and src types don't necessarily match in LLVM terms
// because of the crazy ObjC compatibility rules.
@@ -2656,15 +2641,16 @@ static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args,
cast<llvm::PointerType>(CGF.ConvertType(CRE->getType()));
// If the address is a constant null, just pass the appropriate null.
- if (isProvablyNull(srcAddr)) {
+ if (isProvablyNull(srcAddr.getPointer())) {
args.add(RValue::get(llvm::ConstantPointerNull::get(destType)),
CRE->getType());
return;
}
// Create the temporary.
- llvm::Value *temp = CGF.CreateTempAlloca(destType->getElementType(),
- "icr.temp");
+ Address temp = CGF.CreateTempAlloca(destType->getElementType(),
+ CGF.getPointerAlign(),
+ "icr.temp");
// Loading an l-value can introduce a cleanup if the l-value is __weak,
// and that cleanup will be conditional if we can't prove that the l-value
// isn't null, so we need to register a dominating point so that the cleanups
@@ -2686,15 +2672,16 @@ static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args,
// If the address is *not* known to be non-null, we need to switch.
llvm::Value *finalArgument;
- bool provablyNonNull = isProvablyNonNull(srcAddr);
+ bool provablyNonNull = isProvablyNonNull(srcAddr.getPointer());
if (provablyNonNull) {
- finalArgument = temp;
+ finalArgument = temp.getPointer();
} else {
- llvm::Value *isNull = CGF.Builder.CreateIsNull(srcAddr, "icr.isnull");
+ llvm::Value *isNull =
+ CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull");
finalArgument = CGF.Builder.CreateSelect(isNull,
llvm::ConstantPointerNull::get(destType),
- temp, "icr.argument");
+ temp.getPointer(), "icr.argument");
// If we need to copy, then the load has to be conditional, which
// means we need control flow.
@@ -2766,7 +2753,8 @@ void CallArgList::allocateArgumentMemory(CodeGenFunction &CGF) {
// stacksave to an alloca to avoid violating SSA form.
// TODO: This is dead if we never emit the cleanup. We should create the
// alloca and store lazily on the first cleanup emission.
- StackBaseMem = CGF.CreateTempAlloca(CGF.Int8PtrTy, "inalloca.spmem");
+ StackBaseMem = CGF.CreateTempAlloca(CGF.Int8PtrTy, CGF.getPointerAlign(),
+ "inalloca.spmem");
CGF.Builder.CreateStore(StackBase, StackBaseMem);
CGF.pushStackRestore(EHCleanup, StackBaseMem);
StackCleanup = CGF.EHStack.getInnermostEHScope();
@@ -2853,10 +2841,10 @@ void CodeGenFunction::EmitCallArgs(
namespace {
struct DestroyUnpassedArg final : EHScopeStack::Cleanup {
- DestroyUnpassedArg(llvm::Value *Addr, QualType Ty)
+ DestroyUnpassedArg(Address Addr, QualType Ty)
: Addr(Addr), Ty(Ty) {}
- llvm::Value *Addr;
+ Address Addr;
QualType Ty;
void Emit(CodeGenFunction &CGF, Flags flags) override {
@@ -2930,7 +2918,8 @@ void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
// Create a no-op GEP between the placeholder and the cleanup so we can
// RAUW it successfully. It also serves as a marker of the first
// instruction where the cleanup is active.
- pushFullExprCleanup<DestroyUnpassedArg>(EHCleanup, Slot.getAddr(), type);
+ pushFullExprCleanup<DestroyUnpassedArg>(EHCleanup, Slot.getAddress(),
+ type);
// This unreachable is a temporary marker which will be removed later.
llvm::Instruction *IsActive = Builder.CreateUnreachable();
args.addArgCleanupDeactivation(EHStack.getInnermostEHScope(), IsActive);
@@ -2947,9 +2936,8 @@ void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
} else {
// We can't represent a misaligned lvalue in the CallArgList, so copy
// to an aligned temporary now.
- llvm::Value *tmp = CreateMemTemp(type);
- EmitAggregateCopy(tmp, L.getAddress(), type, L.isVolatile(),
- L.getAlignment());
+ Address tmp = CreateMemTemp(type);
+ EmitAggregateCopy(tmp, L.getAddress(), type, L.isVolatile());
args.add(RValue::getAggregate(tmp), type);
}
return;
@@ -3124,8 +3112,10 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
// If we're using inalloca, insert the allocation after the stack save.
// FIXME: Do this earlier rather than hacking it in here!
- llvm::AllocaInst *ArgMemory = nullptr;
+ Address ArgMemory = Address::invalid();
+ const llvm::StructLayout *ArgMemoryLayout = nullptr;
if (llvm::StructType *ArgStruct = CallInfo.getArgStruct()) {
+ ArgMemoryLayout = CGM.getDataLayout().getStructLayout(ArgStruct);
llvm::Instruction *IP = CallArgs.getStackBase();
llvm::AllocaInst *AI;
if (IP) {
@@ -3134,36 +3124,44 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
} else {
AI = CreateTempAlloca(ArgStruct, "argmem");
}
+ auto Align = CallInfo.getArgStructAlignment();
+ AI->setAlignment(Align.getQuantity());
AI->setUsedWithInAlloca(true);
assert(AI->isUsedWithInAlloca() && !AI->isStaticAlloca());
- ArgMemory = AI;
+ ArgMemory = Address(AI, Align);
}
+ // Helper function to drill into the inalloca allocation.
+ auto createInAllocaStructGEP = [&](unsigned FieldIndex) -> Address {
+ auto FieldOffset =
+ CharUnits::fromQuantity(ArgMemoryLayout->getElementOffset(FieldIndex));
+ return Builder.CreateStructGEP(ArgMemory, FieldIndex, FieldOffset);
+ };
+
ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), CallInfo);
SmallVector<llvm::Value *, 16> IRCallArgs(IRFunctionArgs.totalIRArgs());
// If the call returns a temporary with struct return, create a temporary
// alloca to hold the result, unless one is given to us.
- llvm::Value *SRetPtr = nullptr;
+ Address SRetPtr = Address::invalid();
size_t UnusedReturnSize = 0;
if (RetAI.isIndirect() || RetAI.isInAlloca()) {
- SRetPtr = ReturnValue.getValue();
- if (!SRetPtr) {
+ if (!ReturnValue.isNull()) {
+ SRetPtr = ReturnValue.getValue();
+ } else {
SRetPtr = CreateMemTemp(RetTy);
if (HaveInsertPoint() && ReturnValue.isUnused()) {
uint64_t size =
CGM.getDataLayout().getTypeAllocSize(ConvertTypeForMem(RetTy));
- if (EmitLifetimeStart(size, SRetPtr))
+ if (EmitLifetimeStart(size, SRetPtr.getPointer()))
UnusedReturnSize = size;
}
}
if (IRFunctionArgs.hasSRetArg()) {
- IRCallArgs[IRFunctionArgs.getSRetArgNo()] = SRetPtr;
+ IRCallArgs[IRFunctionArgs.getSRetArgNo()] = SRetPtr.getPointer();
} else {
- llvm::Value *Addr =
- Builder.CreateStructGEP(ArgMemory->getAllocatedType(), ArgMemory,
- RetAI.getInAllocaFieldIndex());
- Builder.CreateStore(SRetPtr, Addr);
+ Address Addr = createInAllocaStructGEP(RetAI.getInAllocaFieldIndex());
+ Builder.CreateStore(SRetPtr.getPointer(), Addr);
}
}
@@ -3176,8 +3174,6 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
const ABIArgInfo &ArgInfo = info_it->info;
RValue RV = I->RV;
- CharUnits TypeAlign = getContext().getTypeAlignInChars(I->Ty);
-
// Insert a padding argument to ensure proper alignment.
if (IRFunctionArgs.hasPaddingArg(ArgNo))
IRCallArgs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
@@ -3193,27 +3189,23 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
if (RV.isAggregate()) {
// Replace the placeholder with the appropriate argument slot GEP.
llvm::Instruction *Placeholder =
- cast<llvm::Instruction>(RV.getAggregateAddr());
+ cast<llvm::Instruction>(RV.getAggregatePointer());
CGBuilderTy::InsertPoint IP = Builder.saveIP();
Builder.SetInsertPoint(Placeholder);
- llvm::Value *Addr =
- Builder.CreateStructGEP(ArgMemory->getAllocatedType(), ArgMemory,
- ArgInfo.getInAllocaFieldIndex());
+ Address Addr = createInAllocaStructGEP(ArgInfo.getInAllocaFieldIndex());
Builder.restoreIP(IP);
- deferPlaceholderReplacement(Placeholder, Addr);
+ deferPlaceholderReplacement(Placeholder, Addr.getPointer());
} else {
// Store the RValue into the argument struct.
- llvm::Value *Addr =
- Builder.CreateStructGEP(ArgMemory->getAllocatedType(), ArgMemory,
- ArgInfo.getInAllocaFieldIndex());
- unsigned AS = Addr->getType()->getPointerAddressSpace();
+ Address Addr = createInAllocaStructGEP(ArgInfo.getInAllocaFieldIndex());
+ unsigned AS = Addr.getType()->getPointerAddressSpace();
llvm::Type *MemType = ConvertTypeForMem(I->Ty)->getPointerTo(AS);
// There are some cases where a trivial bitcast is not avoidable. The
// definition of a type later in a translation unit may change it's type
// from {}* to (%struct.foo*)*.
- if (Addr->getType() != MemType)
+ if (Addr.getType() != MemType)
Addr = Builder.CreateBitCast(Addr, MemType);
- LValue argLV = MakeAddrLValue(Addr, I->Ty, TypeAlign);
+ LValue argLV = MakeAddrLValue(Addr, I->Ty);
EmitInitStoreOfNonAggregate(*this, RV, argLV);
}
break;
@@ -3223,12 +3215,10 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
assert(NumIRArgs == 1);
if (RV.isScalar() || RV.isComplex()) {
// Make a temporary alloca to pass the argument.
- llvm::AllocaInst *AI = CreateMemTemp(I->Ty);
- if (ArgInfo.getIndirectAlign() > AI->getAlignment())
- AI->setAlignment(ArgInfo.getIndirectAlign());
- IRCallArgs[FirstIRArg] = AI;
+ Address Addr = CreateMemTemp(I->Ty, ArgInfo.getIndirectAlign());
+ IRCallArgs[FirstIRArg] = Addr.getPointer();
- LValue argLV = MakeAddrLValue(AI, I->Ty, TypeAlign);
+ LValue argLV = MakeAddrLValue(Addr, I->Ty);
EmitInitStoreOfNonAggregate(*this, RV, argLV);
} else {
// We want to avoid creating an unnecessary temporary+copy here;
@@ -3239,27 +3229,27 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
// we cannot force it to be sufficiently aligned.
// 3. If the argument is byval, but RV is located in an address space
// different than that of the argument (0).
- llvm::Value *Addr = RV.getAggregateAddr();
- unsigned Align = ArgInfo.getIndirectAlign();
+ Address Addr = RV.getAggregateAddress();
+ CharUnits Align = ArgInfo.getIndirectAlign();
const llvm::DataLayout *TD = &CGM.getDataLayout();
- const unsigned RVAddrSpace = Addr->getType()->getPointerAddressSpace();
+ const unsigned RVAddrSpace = Addr.getType()->getAddressSpace();
const unsigned ArgAddrSpace =
(FirstIRArg < IRFuncTy->getNumParams()
? IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace()
: 0);
if ((!ArgInfo.getIndirectByVal() && I->NeedsCopy) ||
- (ArgInfo.getIndirectByVal() && TypeAlign.getQuantity() < Align &&
- llvm::getOrEnforceKnownAlignment(Addr, Align, *TD) < Align) ||
+ (ArgInfo.getIndirectByVal() && Addr.getAlignment() < Align &&
+ llvm::getOrEnforceKnownAlignment(Addr.getPointer(),
+ Align.getQuantity(), *TD)
+ < Align.getQuantity()) ||
(ArgInfo.getIndirectByVal() && (RVAddrSpace != ArgAddrSpace))) {
// Create an aligned temporary, and copy to it.
- llvm::AllocaInst *AI = CreateMemTemp(I->Ty);
- if (Align > AI->getAlignment())
- AI->setAlignment(Align);
- IRCallArgs[FirstIRArg] = AI;
+ Address AI = CreateMemTemp(I->Ty, ArgInfo.getIndirectAlign());
+ IRCallArgs[FirstIRArg] = AI.getPointer();
EmitAggregateCopy(AI, Addr, I->Ty, RV.isVolatileQualified());
} else {
// Skip the extra memcpy call.
- IRCallArgs[FirstIRArg] = Addr;
+ IRCallArgs[FirstIRArg] = Addr.getPointer();
}
}
break;
@@ -3279,7 +3269,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
if (RV.isScalar())
V = RV.getScalarVal();
else
- V = Builder.CreateLoad(RV.getAggregateAddr());
+ V = Builder.CreateLoad(RV.getAggregateAddress());
// We might have to widen integers, but we should never truncate.
if (ArgInfo.getCoerceToType() != V->getType() &&
@@ -3296,35 +3286,24 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
}
// FIXME: Avoid the conversion through memory if possible.
- llvm::Value *SrcPtr;
- CharUnits SrcAlign;
+ Address Src = Address::invalid();
if (RV.isScalar() || RV.isComplex()) {
- SrcPtr = CreateMemTemp(I->Ty, "coerce");
- SrcAlign = TypeAlign;
- LValue SrcLV = MakeAddrLValue(SrcPtr, I->Ty, TypeAlign);
+ Src = CreateMemTemp(I->Ty, "coerce");
+ LValue SrcLV = MakeAddrLValue(Src, I->Ty);
EmitInitStoreOfNonAggregate(*this, RV, SrcLV);
} else {
- SrcPtr = RV.getAggregateAddr();
- // This alignment is guaranteed by EmitCallArg.
- SrcAlign = TypeAlign;
+ Src = RV.getAggregateAddress();
}
// If the value is offset in memory, apply the offset now.
- if (unsigned Offs = ArgInfo.getDirectOffset()) {
- SrcPtr = Builder.CreateBitCast(SrcPtr, Builder.getInt8PtrTy());
- SrcPtr = Builder.CreateConstGEP1_32(Builder.getInt8Ty(), SrcPtr, Offs);
- SrcPtr = Builder.CreateBitCast(SrcPtr,
- llvm::PointerType::getUnqual(ArgInfo.getCoerceToType()));
- SrcAlign = SrcAlign.alignmentAtOffset(CharUnits::fromQuantity(Offs));
- }
+ Src = emitAddressAtOffset(*this, Src, ArgInfo);
// Fast-isel and the optimizer generally like scalar values better than
// FCAs, so we flatten them if this is safe to do for this argument.
llvm::StructType *STy =
dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType());
if (STy && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
- llvm::Type *SrcTy =
- cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
+ llvm::Type *SrcTy = Src.getType()->getElementType();
uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy);
uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(STy);
@@ -3333,29 +3312,28 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
// of the destination type to allow loading all of it. The bits past
// the source value are left undef.
if (SrcSize < DstSize) {
- llvm::AllocaInst *TempAlloca
- = CreateTempAlloca(STy, SrcPtr->getName() + ".coerce");
- Builder.CreateMemCpy(TempAlloca, SrcPtr, SrcSize, 0);
- SrcPtr = TempAlloca;
+ Address TempAlloca
+ = CreateTempAlloca(STy, Src.getAlignment(),
+ Src.getName() + ".coerce");
+ Builder.CreateMemCpy(TempAlloca, Src, SrcSize);
+ Src = TempAlloca;
} else {
- SrcPtr = Builder.CreateBitCast(SrcPtr,
- llvm::PointerType::getUnqual(STy));
+ Src = Builder.CreateBitCast(Src, llvm::PointerType::getUnqual(STy));
}
+ auto SrcLayout = CGM.getDataLayout().getStructLayout(STy);
assert(NumIRArgs == STy->getNumElements());
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
- llvm::Value *EltPtr = Builder.CreateConstGEP2_32(STy, SrcPtr, 0, i);
- llvm::LoadInst *LI = Builder.CreateLoad(EltPtr);
- // We don't know what we're loading from.
- LI->setAlignment(1);
+ auto Offset = CharUnits::fromQuantity(SrcLayout->getElementOffset(i));
+ Address EltPtr = Builder.CreateStructGEP(Src, i, Offset);
+ llvm::Value *LI = Builder.CreateLoad(EltPtr);
IRCallArgs[FirstIRArg + i] = LI;
}
} else {
// In the simple case, just pass the coerced loaded value.
assert(NumIRArgs == 1);
IRCallArgs[FirstIRArg] =
- CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(),
- SrcAlign, *this);
+ CreateCoercedLoad(Src, ArgInfo.getCoerceToType(), *this);
}
break;
@@ -3369,8 +3347,8 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
}
}
- if (ArgMemory) {
- llvm::Value *Arg = ArgMemory;
+ if (ArgMemory.isValid()) {
+ llvm::Value *Arg = ArgMemory.getPointer();
if (CallInfo.isVariadic()) {
// When passing non-POD arguments by value to variadic functions, we will
// end up with a variadic prototype and an inalloca call site. In such
@@ -3496,7 +3474,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
if (CS.doesNotReturn()) {
if (UnusedReturnSize)
EmitLifetimeEnd(llvm::ConstantInt::get(Int64Ty, UnusedReturnSize),
- SRetPtr);
+ SRetPtr.getPointer());
Builder.CreateUnreachable();
Builder.ClearInsertionPoint();
@@ -3530,7 +3508,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
RValue ret = convertTempToRValue(SRetPtr, RetTy, SourceLocation());
if (UnusedReturnSize)
EmitLifetimeEnd(llvm::ConstantInt::get(Int64Ty, UnusedReturnSize),
- SRetPtr);
+ SRetPtr.getPointer());
return ret;
}
@@ -3550,15 +3528,14 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
return RValue::getComplex(std::make_pair(Real, Imag));
}
case TEK_Aggregate: {
- llvm::Value *DestPtr = ReturnValue.getValue();
+ Address DestPtr = ReturnValue.getValue();
bool DestIsVolatile = ReturnValue.isVolatile();
- CharUnits DestAlign = getContext().getTypeAlignInChars(RetTy);
- if (!DestPtr) {
+ if (!DestPtr.isValid()) {
DestPtr = CreateMemTemp(RetTy, "agg.tmp");
DestIsVolatile = false;
}
- BuildAggStore(*this, CI, DestPtr, DestIsVolatile, DestAlign);
+ BuildAggStore(*this, CI, DestPtr, DestIsVolatile);
return RValue::getAggregate(DestPtr);
}
case TEK_Scalar: {
@@ -3573,28 +3550,17 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
llvm_unreachable("bad evaluation kind");
}
- llvm::Value *DestPtr = ReturnValue.getValue();
+ Address DestPtr = ReturnValue.getValue();
bool DestIsVolatile = ReturnValue.isVolatile();
- CharUnits DestAlign = getContext().getTypeAlignInChars(RetTy);
- if (!DestPtr) {
+ if (!DestPtr.isValid()) {
DestPtr = CreateMemTemp(RetTy, "coerce");
DestIsVolatile = false;
}
// If the value is offset in memory, apply the offset now.
- llvm::Value *StorePtr = DestPtr;
- CharUnits StoreAlign = DestAlign;
- if (unsigned Offs = RetAI.getDirectOffset()) {
- StorePtr = Builder.CreateBitCast(StorePtr, Builder.getInt8PtrTy());
- StorePtr =
- Builder.CreateConstGEP1_32(Builder.getInt8Ty(), StorePtr, Offs);
- StorePtr = Builder.CreateBitCast(StorePtr,
- llvm::PointerType::getUnqual(RetAI.getCoerceToType()));
- StoreAlign =
- StoreAlign.alignmentAtOffset(CharUnits::fromQuantity(Offs));
- }
- CreateCoercedStore(CI, StorePtr, DestIsVolatile, StoreAlign, *this);
+ Address StorePtr = emitAddressAtOffset(*this, DestPtr, RetAI);
+ CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this);
return convertTempToRValue(DestPtr, RetTy, SourceLocation());
}
@@ -3624,6 +3590,6 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
/* VarArg handling */
-llvm::Value *CodeGenFunction::EmitVAArg(llvm::Value *VAListAddr, QualType Ty) {
- return CGM.getTypes().getABIInfo().EmitVAArg(VAListAddr, Ty, *this);
+Address CodeGenFunction::EmitVAArg(Address VAListAddr, QualType Ty) {
+ return CGM.getTypes().getABIInfo().EmitVAArg(*this, VAListAddr, Ty);
}
diff --git a/clang/lib/CodeGen/CGCall.h b/clang/lib/CodeGen/CGCall.h
index 7a4708e5ccf..1b75c153661 100644
--- a/clang/lib/CodeGen/CGCall.h
+++ b/clang/lib/CodeGen/CGCall.h
@@ -56,7 +56,7 @@ namespace CodeGen {
class CallArgList :
public SmallVector<CallArg, 16> {
public:
- CallArgList() : StackBase(nullptr), StackBaseMem(nullptr) {}
+ CallArgList() : StackBase(nullptr), StackBaseMem(Address::invalid()) {}
struct Writeback {
/// The original argument. Note that the argument l-value
@@ -64,7 +64,7 @@ namespace CodeGen {
LValue Source;
/// The temporary alloca.
- llvm::Value *Temporary;
+ Address Temporary;
/// A value to "use" after the writeback, or null.
llvm::Value *ToUse;
@@ -88,12 +88,9 @@ namespace CodeGen {
other.Writebacks.begin(), other.Writebacks.end());
}
- void addWriteback(LValue srcLV, llvm::Value *temporary,
+ void addWriteback(LValue srcLV, Address temporary,
llvm::Value *toUse) {
- Writeback writeback;
- writeback.Source = srcLV;
- writeback.Temporary = temporary;
- writeback.ToUse = toUse;
+ Writeback writeback = { srcLV, temporary, toUse };
Writebacks.push_back(writeback);
}
@@ -138,7 +135,7 @@ namespace CodeGen {
llvm::CallInst *StackBase;
/// The alloca holding the stackbase. We need it to maintain SSA form.
- llvm::AllocaInst *StackBaseMem;
+ Address StackBaseMem;
/// The iterator pointing to the stack restore cleanup. We manually run and
/// deactivate this cleanup after the call in the unexceptional case because
@@ -156,6 +153,7 @@ namespace CodeGen {
/// function can be stored, and whether the address is volatile or not.
class ReturnValueSlot {
llvm::PointerIntPair<llvm::Value *, 2, unsigned int> Value;
+ CharUnits Alignment;
// Return value slot flags
enum Flags {
@@ -165,14 +163,15 @@ namespace CodeGen {
public:
ReturnValueSlot() {}
- ReturnValueSlot(llvm::Value *Value, bool IsVolatile, bool IsUnused = false)
- : Value(Value,
- (IsVolatile ? IS_VOLATILE : 0) | (IsUnused ? IS_UNUSED : 0)) {}
+ ReturnValueSlot(Address Addr, bool IsVolatile, bool IsUnused = false)
+ : Value(Addr.isValid() ? Addr.getPointer() : nullptr,
+ (IsVolatile ? IS_VOLATILE : 0) | (IsUnused ? IS_UNUSED : 0)),
+ Alignment(Addr.isValid() ? Addr.getAlignment() : CharUnits::Zero()) {}
- bool isNull() const { return !getValue(); }
+ bool isNull() const { return !getValue().isValid(); }
bool isVolatile() const { return Value.getInt() & IS_VOLATILE; }
- llvm::Value *getValue() const { return Value.getPointer(); }
+ Address getValue() const { return Address(Value.getPointer(), Alignment); }
bool isUnused() const { return Value.getInt() & IS_UNUSED; }
};
diff --git a/clang/lib/CodeGen/CGClass.cpp b/clang/lib/CodeGen/CGClass.cpp
index 676cd13c8d1..ebeb098ba1e 100644
--- a/clang/lib/CodeGen/CGClass.cpp
+++ b/clang/lib/CodeGen/CGClass.cpp
@@ -29,6 +29,119 @@
using namespace clang;
using namespace CodeGen;
+/// Return the best known alignment for an unknown pointer to a
+/// particular class.
+CharUnits CodeGenModule::getClassPointerAlignment(const CXXRecordDecl *RD) {
+ if (!RD->isCompleteDefinition())
+ return CharUnits::One(); // Hopefully won't be used anywhere.
+
+ auto &layout = getContext().getASTRecordLayout(RD);
+
+ // If the class is final, then we know that the pointer points to an
+ // object of that type and can use the full alignment.
+ if (RD->hasAttr<FinalAttr>()) {
+ return layout.getAlignment();
+
+ // Otherwise, we have to assume it could be a subclass.
+ } else {
+ return layout.getNonVirtualAlignment();
+ }
+}
+
+/// Return the best known alignment for a pointer to a virtual base,
+/// given the alignment of a pointer to the derived class.
+CharUnits CodeGenModule::getVBaseAlignment(CharUnits actualDerivedAlign,
+ const CXXRecordDecl *derivedClass,
+ const CXXRecordDecl *vbaseClass) {
+ // The basic idea here is that an underaligned derived pointer might
+ // indicate an underaligned base pointer.
+
+ assert(vbaseClass->isCompleteDefinition());
+ auto &baseLayout = getContext().getASTRecordLayout(vbaseClass);
+ CharUnits expectedVBaseAlign = baseLayout.getNonVirtualAlignment();
+
+ return getDynamicOffsetAlignment(actualDerivedAlign, derivedClass,
+ expectedVBaseAlign);
+}
+
+CharUnits
+CodeGenModule::getDynamicOffsetAlignment(CharUnits actualBaseAlign,
+ const CXXRecordDecl *baseDecl,
+ CharUnits expectedTargetAlign) {
+ // If the base is an incomplete type (which is, alas, possible with
+ // member pointers), be pessimistic.
+ if (!baseDecl->isCompleteDefinition())
+ return std::min(actualBaseAlign, expectedTargetAlign);
+
+ auto &baseLayout = getContext().getASTRecordLayout(baseDecl);
+ CharUnits expectedBaseAlign = baseLayout.getNonVirtualAlignment();
+
+ // If the class is properly aligned, assume the target offset is, too.
+ //
+ // This actually isn't necessarily the right thing to do --- if the
+ // class is a complete object, but it's only properly aligned for a
+ // base subobject, then the alignments of things relative to it are
+ // probably off as well. (Note that this requires the alignment of
+ // the target to be greater than the NV alignment of the derived
+ // class.)
+ //
+ // However, our approach to this kind of under-alignment can only
+ // ever be best effort; after all, we're never going to propagate
+ // alignments through variables or parameters. Note, in particular,
+ // that constructing a polymorphic type in an address that's less
+ // than pointer-aligned will generally trap in the constructor,
+ // unless we someday add some sort of attribute to change the
+ // assumed alignment of 'this'. So our goal here is pretty much
+ // just to allow the user to explicitly say that a pointer is
+ // under-aligned and then safely access its fields and v-tables.
+ if (actualBaseAlign >= expectedBaseAlign) {
+ return expectedTargetAlign;
+ }
+
+ // Otherwise, we might be offset by an arbitrary multiple of the
+ // actual alignment. The correct adjustment is to take the min of
+ // the two alignments.
+ return std::min(actualBaseAlign, expectedTargetAlign);
+}
+
+Address CodeGenFunction::LoadCXXThisAddress() {
+ assert(CurFuncDecl && "loading 'this' without a func declaration?");
+ assert(isa<CXXMethodDecl>(CurFuncDecl));
+
+ // Lazily compute CXXThisAlignment.
+ if (CXXThisAlignment.isZero()) {
+ // Just use the best known alignment for the parent.
+ // TODO: if we're currently emitting a complete-object ctor/dtor,
+ // we can always use the complete-object alignment.
+ auto RD = cast<CXXMethodDecl>(CurFuncDecl)->getParent();
+ CXXThisAlignment = CGM.getClassPointerAlignment(RD);
+ }
+
+ return Address(LoadCXXThis(), CXXThisAlignment);
+}
+
+/// Emit the address of a field using a member data pointer.
+///
+/// \param E Only used for emergency diagnostics
+Address
+CodeGenFunction::EmitCXXMemberDataPointerAddress(const Expr *E, Address base,
+ llvm::Value *memberPtr,
+ const MemberPointerType *memberPtrType,
+ AlignmentSource *alignSource) {
+ // Ask the ABI to compute the actual address.
+ llvm::Value *ptr =
+ CGM.getCXXABI().EmitMemberDataPointerAddress(*this, E, base,
+ memberPtr, memberPtrType);
+
+ QualType memberType = memberPtrType->getPointeeType();
+ CharUnits memberAlign = getNaturalTypeAlignment(memberType, alignSource);
+ memberAlign =
+ CGM.getDynamicOffsetAlignment(base.getAlignment(),
+ memberPtrType->getClass()->getAsCXXRecordDecl(),
+ memberAlign);
+ return Address(ptr, memberAlign);
+}
+
CharUnits CodeGenModule::computeNonVirtualBaseClassOffset(
const CXXRecordDecl *DerivedClass, CastExpr::path_const_iterator Start,
CastExpr::path_const_iterator End) {
@@ -78,15 +191,13 @@ CodeGenModule::GetNonVirtualBaseClassOffset(const CXXRecordDecl *ClassDecl,
/// when the type is known to be complete (e.g. in complete destructors).
///
/// The object pointed to by 'This' is assumed to be non-null.
-llvm::Value *
-CodeGenFunction::GetAddressOfDirectBaseInCompleteClass(llvm::Value *This,
+Address
+CodeGenFunction::GetAddressOfDirectBaseInCompleteClass(Address This,
const CXXRecordDecl *Derived,
const CXXRecordDecl *Base,
bool BaseIsVirtual) {
// 'this' must be a pointer (in some address space) to Derived.
- assert(This->getType()->isPointerTy() &&
- cast<llvm::PointerType>(This->getType())->getElementType()
- == ConvertType(Derived));
+ assert(This.getElementType() == ConvertType(Derived));
// Compute the offset of the virtual base.
CharUnits Offset;
@@ -98,20 +209,22 @@ CodeGenFunction::GetAddressOfDirectBaseInCompleteClass(llvm::Value *This,
// Shift and cast down to the base type.
// TODO: for complete types, this should be possible with a GEP.
- llvm::Value *V = This;
- if (Offset.isPositive()) {
- V = Builder.CreateBitCast(V, Int8PtrTy);
- V = Builder.CreateConstInBoundsGEP1_64(V, Offset.getQuantity());
+ Address V = This;
+ if (!Offset.isZero()) {
+ V = Builder.CreateElementBitCast(V, Int8Ty);
+ V = Builder.CreateConstInBoundsByteGEP(V, Offset);
}
- V = Builder.CreateBitCast(V, ConvertType(Base)->getPointerTo());
+ V = Builder.CreateElementBitCast(V, ConvertType(Base));
return V;
}
-static llvm::Value *
-ApplyNonVirtualAndVirtualOffset(CodeGenFunction &CGF, llvm::Value *ptr,
+static Address
+ApplyNonVirtualAndVirtualOffset(CodeGenFunction &CGF, Address addr,
CharUnits nonVirtualOffset,
- llvm::Value *virtualOffset) {
+ llvm::Value *virtualOffset,
+ const CXXRecordDecl *derivedClass,
+ const CXXRecordDecl *nearestVBase) {
// Assert that we have something to do.
assert(!nonVirtualOffset.isZero() || virtualOffset != nullptr);
@@ -128,13 +241,27 @@ ApplyNonVirtualAndVirtualOffset(CodeGenFunction &CGF, llvm::Value *ptr,
}
// Apply the base offset.
+ llvm::Value *ptr = addr.getPointer();
ptr = CGF.Builder.CreateBitCast(ptr, CGF.Int8PtrTy);
ptr = CGF.Builder.CreateInBoundsGEP(ptr, baseOffset, "add.ptr");
- return ptr;
+
+ // If we have a virtual component, the alignment of the result will
+ // be relative only to the known alignment of that vbase.
+ CharUnits alignment;
+ if (virtualOffset) {
+ assert(nearestVBase && "virtual offset without vbase?");
+ alignment = CGF.CGM.getVBaseAlignment(addr.getAlignment(),
+ derivedClass, nearestVBase);
+ } else {
+ alignment = addr.getAlignment();
+ }
+ alignment = alignment.alignmentAtOffset(nonVirtualOffset);
+
+ return Address(ptr, alignment);
}
-llvm::Value *CodeGenFunction::GetAddressOfBaseClass(
- llvm::Value *Value, const CXXRecordDecl *Derived,
+Address CodeGenFunction::GetAddressOfBaseClass(
+ Address Value, const CXXRecordDecl *Derived,
CastExpr::path_const_iterator PathBegin,
CastExpr::path_const_iterator PathEnd, bool NullCheckValue,
SourceLocation Loc) {
@@ -174,14 +301,14 @@ llvm::Value *CodeGenFunction::GetAddressOfBaseClass(
ConvertType((PathEnd[-1])->getType())->getPointerTo();
QualType DerivedTy = getContext().getRecordType(Derived);
- CharUnits DerivedAlign = getContext().getTypeAlignInChars(DerivedTy);
+ CharUnits DerivedAlign = CGM.getClassPointerAlignment(Derived);
// If the static offset is zero and we don't have a virtual step,
// just do a bitcast; null checks are unnecessary.
if (NonVirtualOffset.isZero() && !VBase) {
if (sanitizePerformTypeCheck()) {
- EmitTypeCheck(TCK_Upcast, Loc, Value, DerivedTy, DerivedAlign,
- !NullCheckValue);
+ EmitTypeCheck(TCK_Upcast, Loc, Value.getPointer(),
+ DerivedTy, DerivedAlign, !NullCheckValue);
}
return Builder.CreateBitCast(Value, BasePtrTy);
}
@@ -196,14 +323,14 @@ llvm::Value *CodeGenFunction::GetAddressOfBaseClass(
llvm::BasicBlock *notNullBB = createBasicBlock("cast.notnull");
endBB = createBasicBlock("cast.end");
- llvm::Value *isNull = Builder.CreateIsNull(Value);
+ llvm::Value *isNull = Builder.CreateIsNull(Value.getPointer());
Builder.CreateCondBr(isNull, endBB, notNullBB);
EmitBlock(notNullBB);
}
if (sanitizePerformTypeCheck()) {
- EmitTypeCheck(VBase ? TCK_UpcastToVirtualBase : TCK_Upcast, Loc, Value,
- DerivedTy, DerivedAlign, true);
+ EmitTypeCheck(VBase ? TCK_UpcastToVirtualBase : TCK_Upcast, Loc,
+ Value.getPointer(), DerivedTy, DerivedAlign, true);
}
// Compute the virtual offset.
@@ -214,9 +341,8 @@ llvm::Value *CodeGenFunction::GetAddressOfBaseClass(
}
// Apply both offsets.
- Value = ApplyNonVirtualAndVirtualOffset(*this, Value,
- NonVirtualOffset,
- VirtualOffset);
+ Value = ApplyNonVirtualAndVirtualOffset(*this, Value, NonVirtualOffset,
+ VirtualOffset, Derived, VBase);
// Cast to the destination type.
Value = Builder.CreateBitCast(Value, BasePtrTy);
@@ -228,16 +354,16 @@ llvm::Value *CodeGenFunction::GetAddressOfBaseClass(
EmitBlock(endBB);
llvm::PHINode *PHI = Builder.CreatePHI(BasePtrTy, 2, "cast.result");
- PHI->addIncoming(Value, notNullBB);
+ PHI->addIncoming(Value.getPointer(), notNullBB);
PHI->addIncoming(llvm::Constant::getNullValue(BasePtrTy), origBB);
- Value = PHI;
+ Value = Address(PHI, Value.getAlignment());
}
return Value;
}
-llvm::Value *
-CodeGenFunction::GetAddressOfDerivedClass(llvm::Value *Value,
+Address
+CodeGenFunction::GetAddressOfDerivedClass(Address BaseAddr,
const CXXRecordDecl *Derived,
CastExpr::path_const_iterator PathBegin,
CastExpr::path_const_iterator PathEnd,
@@ -253,7 +379,7 @@ CodeGenFunction::GetAddressOfDerivedClass(llvm::Value *Value,
if (!NonVirtualOffset) {
// No offset, we can just cast back.
- return Builder.CreateBitCast(Value, DerivedPtrTy);
+ return Builder.CreateBitCast(BaseAddr, DerivedPtrTy);
}
llvm::BasicBlock *CastNull = nullptr;
@@ -265,19 +391,20 @@ CodeGenFunction::GetAddressOfDerivedClass(llvm::Value *Value,
CastNotNull = createBasicBlock("cast.notnull");
CastEnd = createBasicBlock("cast.end");
- llvm::Value *IsNull = Builder.CreateIsNull(Value);
+ llvm::Value *IsNull = Builder.CreateIsNull(BaseAddr.getPointer());
Builder.CreateCondBr(IsNull, CastNull, CastNotNull);
EmitBlock(CastNotNull);
}
// Apply the offset.
- Value = Builder.CreateBitCast(Value, Int8PtrTy);
+ llvm::Value *Value = Builder.CreateBitCast(BaseAddr.getPointer(), Int8PtrTy);
Value = Builder.CreateGEP(Value, Builder.CreateNeg(NonVirtualOffset),
"sub.ptr");
// Just cast.
Value = Builder.CreateBitCast(Value, DerivedPtrTy);
+ // Produce a PHI if we had a null-check.
if (NullCheckValue) {
Builder.CreateBr(CastEnd);
EmitBlock(CastNull);
@@ -286,12 +413,11 @@ CodeGenFunction::GetAddressOfDerivedClass(llvm::Value *Value,
llvm::PHINode *PHI = Builder.CreatePHI(Value->getType(), 2);
PHI->addIncoming(Value, CastNotNull);
- PHI->addIncoming(llvm::Constant::getNullValue(Value->getType()),
- CastNull);
+ PHI->addIncoming(llvm::Constant::getNullValue(Value->getType()), CastNull);
Value = PHI;
}
- return Value;
+ return Address(Value, CGM.getClassPointerAlignment(Derived));
}
llvm::Value *CodeGenFunction::GetVTTParameter(GlobalDecl GD,
@@ -356,8 +482,8 @@ namespace {
cast<CXXMethodDecl>(CGF.CurCodeDecl)->getParent();
const CXXDestructorDecl *D = BaseClass->getDestructor();
- llvm::Value *Addr =
- CGF.GetAddressOfDirectBaseInCompleteClass(CGF.LoadCXXThis(),
+ Address Addr =
+ CGF.GetAddressOfDirectBaseInCompleteClass(CGF.LoadCXXThisAddress(),
DerivedClass, BaseClass,
BaseIsVirtual);
CGF.EmitCXXDestructorCall(D, Dtor_Base, BaseIsVirtual,
@@ -396,7 +522,7 @@ static void EmitBaseInitializer(CodeGenFunction &CGF,
assert(BaseInit->isBaseInitializer() &&
"Must have base initializer!");
- llvm::Value *ThisPtr = CGF.LoadCXXThis();
+ Address ThisPtr = CGF.LoadCXXThisAddress();
const Type *BaseType = BaseInit->getBaseClass();
CXXRecordDecl *BaseClassDecl =
@@ -416,13 +542,12 @@ static void EmitBaseInitializer(CodeGenFunction &CGF,
// We can pretend to be a complete class because it only matters for
// virtual bases, and we only do virtual bases for complete ctors.
- llvm::Value *V =
+ Address V =
CGF.GetAddressOfDirectBaseInCompleteClass(ThisPtr, ClassDecl,
BaseClassDecl,
isBaseVirtual);
- CharUnits Alignment = CGF.getContext().getTypeAlignInChars(BaseType);
AggValueSlot AggSlot =
- AggValueSlot::forAddr(V, Alignment, Qualifiers(),
+ AggValueSlot::forAddr(V, Qualifiers(),
AggValueSlot::IsDestructed,
AggValueSlot::DoesNotNeedGCBarriers,
AggValueSlot::IsNotAliased);
@@ -438,17 +563,17 @@ static void EmitBaseInitializer(CodeGenFunction &CGF,
static void EmitAggMemberInitializer(CodeGenFunction &CGF,
LValue LHS,
Expr *Init,
- llvm::Value *ArrayIndexVar,
+ Address ArrayIndexVar,
QualType T,
ArrayRef<VarDecl *> ArrayIndexes,
unsigned Index) {
if (Index == ArrayIndexes.size()) {
LValue LV = LHS;
- if (ArrayIndexVar) {
+ if (ArrayIndexVar.isValid()) {
// If we have an array index variable, load it and use it as an offset.
// Then, increment the value.
- llvm::Value *Dest = LHS.getAddress();
+ llvm::Value *Dest = LHS.getPointer();
llvm::Value *ArrayIndex = CGF.Builder.CreateLoad(ArrayIndexVar);
Dest = CGF.Builder.CreateInBoundsGEP(Dest, ArrayIndex, "destaddress");
llvm::Value *Next = llvm::ConstantInt::get(ArrayIndex->getType(), 1);
@@ -456,9 +581,9 @@ static void EmitAggMemberInitializer(CodeGenFunction &CGF,
CGF.Builder.CreateStore(Next, ArrayIndexVar);
// Update the LValue.
- LV.setAddress(Dest);
- CharUnits Align = CGF.getContext().getTypeAlignInChars(T);
- LV.setAlignment(std::min(Align, LV.getAlignment()));
+ CharUnits EltSize = CGF.getContext().getTypeSizeInChars(T);
+ CharUnits Align = LV.getAlignment().alignmentOfArrayElement(EltSize);
+ LV.setAddress(Address(Dest, Align));
}
switch (CGF.getEvaluationKind(T)) {
@@ -485,14 +610,11 @@ static void EmitAggMemberInitializer(CodeGenFunction &CGF,
const ConstantArrayType *Array = CGF.getContext().getAsConstantArrayType(T);
assert(Array && "Array initialization without the array type?");
- llvm::Value *IndexVar
- = CGF.GetAddrOfLocalVar(ArrayIndexes[Index]);
- assert(IndexVar && "Array index variable not loaded");
+ Address IndexVar = CGF.GetAddrOfLocalVar(ArrayIndexes[Index]);
// Initialize this index variable to zero.
llvm::Value* Zero
- = llvm::Constant::getNullValue(
- CGF.ConvertType(CGF.getContext().getSizeType()));
+ = llvm::Constant::getNullValue(IndexVar.getElementType());
CGF.Builder.CreateStore(Zero, IndexVar);
// Start the loop with a block that tests the condition.
@@ -626,9 +748,8 @@ static void EmitMemberInitializer(CodeGenFunction &CGF,
CGF.EmitInitializerForField(Field, LHS, MemberInit->getInit(), ArrayIndexes);
}
-void CodeGenFunction::EmitInitializerForField(
- FieldDecl *Field, LValue LHS, Expr *Init,
- ArrayRef<VarDecl *> ArrayIndexes) {
+void CodeGenFunction::EmitInitializerForField(FieldDecl *Field, LValue LHS,
+ Expr *Init, ArrayRef<VarDecl *> ArrayIndexes) {
QualType FieldType = Field->getType();
switch (getEvaluationKind(FieldType)) {
case TEK_Scalar:
@@ -643,26 +764,23 @@ void CodeGenFunction::EmitInitializerForField(
EmitComplexExprIntoLValue(Init, LHS, /*isInit*/ true);
break;
case TEK_Aggregate: {
- llvm::Value *ArrayIndexVar = nullptr;
+ Address ArrayIndexVar = Address::invalid();
if (ArrayIndexes.size()) {
- llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
-
// The LHS is a pointer to the first object we'll be constructing, as
// a flat array.
QualType BaseElementTy = getContext().getBaseElementType(FieldType);
llvm::Type *BasePtr = ConvertType(BaseElementTy);
BasePtr = llvm::PointerType::getUnqual(BasePtr);
- llvm::Value *BaseAddrPtr = Builder.CreateBitCast(LHS.getAddress(),
- BasePtr);
+ Address BaseAddrPtr = Builder.CreateBitCast(LHS.getAddress(), BasePtr);
LHS = MakeAddrLValue(BaseAddrPtr, BaseElementTy);
// Create an array index that will be used to walk over all of the
// objects we're constructing.
- ArrayIndexVar = CreateTempAlloca(SizeTy, "object.index");
- llvm::Value *Zero = llvm::Constant::getNullValue(SizeTy);
+ ArrayIndexVar = CreateMemTemp(getContext().getSizeType(), "object.index");
+ llvm::Value *Zero =
+ llvm::Constant::getNullValue(ArrayIndexVar.getElementType());
Builder.CreateStore(Zero, ArrayIndexVar);
-
// Emit the block variables for the array indices, if any.
for (unsigned I = 0, N = ArrayIndexes.size(); I != N; ++I)
EmitAutoVarDecl(*ArrayIndexes[I]);
@@ -930,19 +1048,16 @@ namespace {
CharUnits MemcpySize = getMemcpySize(FirstByteOffset);
QualType RecordTy = CGF.getContext().getTypeDeclType(ClassDecl);
- llvm::Value *ThisPtr = CGF.LoadCXXThis();
- LValue DestLV = CGF.MakeNaturalAlignAddrLValue(ThisPtr, RecordTy);
+ Address ThisPtr = CGF.LoadCXXThisAddress();
+ LValue DestLV = CGF.MakeAddrLValue(ThisPtr, RecordTy);
LValue Dest = CGF.EmitLValueForFieldInitialization(DestLV, FirstField);
llvm::Value *SrcPtr = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(SrcRec));
LValue SrcLV = CGF.MakeNaturalAlignAddrLValue(SrcPtr, RecordTy);
LValue Src = CGF.EmitLValueForFieldInitialization(SrcLV, FirstField);
- CharUnits Offset = CGF.getContext().toCharUnitsFromBits(FirstByteOffset);
- CharUnits Alignment = DestLV.getAlignment().alignmentAtOffset(Offset);
-
- emitMemcpyIR(Dest.isBitField() ? Dest.getBitFieldAddr() : Dest.getAddress(),
- Src.isBitField() ? Src.getBitFieldAddr() : Src.getAddress(),
- MemcpySize, Alignment);
+ emitMemcpyIR(Dest.isBitField() ? Dest.getBitFieldAddress() : Dest.getAddress(),
+ Src.isBitField() ? Src.getBitFieldAddress() : Src.getAddress(),
+ MemcpySize);
reset();
}
@@ -956,20 +1071,18 @@ namespace {
private:
- void emitMemcpyIR(llvm::Value *DestPtr, llvm::Value *SrcPtr,
- CharUnits Size, CharUnits Alignment) {
- llvm::PointerType *DPT = cast<llvm::PointerType>(DestPtr->getType());
+ void emitMemcpyIR(Address DestPtr, Address SrcPtr, CharUnits Size) {
+ llvm::PointerType *DPT = DestPtr.getType();
llvm::Type *DBP =
llvm::Type::getInt8PtrTy(CGF.getLLVMContext(), DPT->getAddressSpace());
DestPtr = CGF.Builder.CreateBitCast(DestPtr, DBP);
- llvm::PointerType *SPT = cast<llvm::PointerType>(SrcPtr->getType());
+ llvm::PointerType *SPT = SrcPtr.getType();
llvm::Type *SBP =
llvm::Type::getInt8PtrTy(CGF.getLLVMContext(), SPT->getAddressSpace());
SrcPtr = CGF.Builder.CreateBitCast(SrcPtr, SBP);
- CGF.Builder.CreateMemCpy(DestPtr, SrcPtr, Size.getQuantity(),
- Alignment.getQuantity());
+ CGF.Builder.CreateMemCpy(DestPtr, SrcPtr, Size.getQuantity());
}
void addInitialField(FieldDecl *F) {
@@ -1089,9 +1202,9 @@ namespace {
}
void pushEHDestructors() {
- llvm::Value *ThisPtr = CGF.LoadCXXThis();
+ Address ThisPtr = CGF.LoadCXXThisAddress();
QualType RecordTy = CGF.getContext().getTypeDeclType(ClassDecl);
- LValue LHS = CGF.MakeNaturalAlignAddrLValue(ThisPtr, RecordTy);
+ LValue LHS = CGF.MakeAddrLValue(ThisPtr, RecordTy);
for (unsigned i = 0; i < AggregatedInits.size(); ++i) {
CXXCtorInitializer *MemberInit = AggregatedInits[i];
@@ -1274,7 +1387,7 @@ void CodeGenFunction::EmitCtorPrologue(const CXXConstructorDecl *CD,
InitializeVTablePointers(ClassDecl);
// And finally, initialize class members.
- FieldConstructionScope FCS(*this, CXXThisValue);
+ FieldConstructionScope FCS(*this, LoadCXXThisAddress());
ConstructorMemcpyizer CM(*this, CD, Args);
for (; B != E; B++) {
CXXCtorInitializer *Member = (*B);
@@ -1383,7 +1496,7 @@ void CodeGenFunction::EmitDestructorBody(FunctionArgList &Args) {
if (DtorType == Dtor_Deleting) {
EnterDtorCleanups(Dtor, Dtor_Deleting);
EmitCXXDestructorCall(Dtor, Dtor_Complete, /*ForVirtualBase=*/false,
- /*Delegating=*/false, LoadCXXThis());
+ /*Delegating=*/false, LoadCXXThisAddress());
PopCleanupBlock();
return;
}
@@ -1418,7 +1531,7 @@ void CodeGenFunction::EmitDestructorBody(FunctionArgList &Args) {
if (!isTryBody) {
EmitCXXDestructorCall(Dtor, Dtor_Base, /*ForVirtualBase=*/false,
- /*Delegating=*/false, LoadCXXThis());
+ /*Delegating=*/false, LoadCXXThisAddress());
break;
}
// Fallthrough: act like we're in the base variant.
@@ -1524,7 +1637,7 @@ namespace {
void Emit(CodeGenFunction &CGF, Flags flags) override {
// Find the address of the field.
- llvm::Value *thisValue = CGF.LoadCXXThis();
+ Address thisValue = CGF.LoadCXXThisAddress();
QualType RecordTy = CGF.getContext().getTagDeclType(field->getParent());
LValue ThisLV = CGF.MakeAddrLValue(thisValue, RecordTy);
LValue LV = CGF.EmitLValueForField(ThisLV, field);
@@ -1738,7 +1851,7 @@ void CodeGenFunction::EnterDtorCleanups(const CXXDestructorDecl *DD,
/// zero-initialized before it is constructed
void CodeGenFunction::EmitCXXAggrConstructorCall(
const CXXConstructorDecl *ctor, const ConstantArrayType *arrayType,
- llvm::Value *arrayBegin, const CXXConstructExpr *E, bool zeroInitialize) {
+ Address arrayBegin, const CXXConstructExpr *E, bool zeroInitialize) {
QualType elementType;
llvm::Value *numElements =
emitArrayLength(arrayType, elementType, arrayBegin);
@@ -1757,7 +1870,7 @@ void CodeGenFunction::EmitCXXAggrConstructorCall(
/// zero-initialized before it is constructed
void CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *ctor,
llvm::Value *numElements,
- llvm::Value *arrayBegin,
+ Address arrayBase,
const CXXConstructExpr *E,
bool zeroInitialize) {
@@ -1784,6 +1897,7 @@ void CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *ctor,
}
// Find the end of the array.
+ llvm::Value *arrayBegin = arrayBase.getPointer();
llvm::Value *arrayEnd = Builder.CreateInBoundsGEP(arrayBegin, numElements,
"arrayctor.end");
@@ -1797,11 +1911,21 @@ void CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *ctor,
// Inside the loop body, emit the constructor call on the array element.
+ // The alignment of the base, adjusted by the size of a single element,
+ // provides a conservative estimate of the alignment of every element.
+ // (This assumes we never start tracking offsetted alignments.)
+ //
+ // Note that these are complete objects and so we don't need to
+ // use the non-virtual size or alignment.
QualType type = getContext().getTypeDeclType(ctor->getParent());
+ CharUnits eltAlignment =
+ arrayBase.getAlignment()
+ .alignmentOfArrayElement(getContext().getTypeSizeInChars(type));
+ Address curAddr = Address(cur, eltAlignment);
// Zero initialize the storage, if requested.
if (zeroInitialize)
- EmitNullInitialization(cur, type);
+ EmitNullInitialization(curAddr, type);
// C++ [class.temporary]p4:
// There are two contexts in which temporaries are destroyed at a different
@@ -1819,11 +1943,12 @@ void CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *ctor,
if (getLangOpts().Exceptions &&
!ctor->getParent()->hasTrivialDestructor()) {
Destroyer *destroyer = destroyCXXObject;
- pushRegularPartialArrayCleanup(arrayBegin, cur, type, *destroyer);
+ pushRegularPartialArrayCleanup(arrayBegin, cur, type, eltAlignment,
+ *destroyer);
}
EmitCXXConstructorCall(ctor, Ctor_Complete, /*ForVirtualBase=*/false,
- /*Delegating=*/false, cur, E);
+ /*Delegating=*/false, curAddr, E);
}
// Go to the next element.
@@ -1844,7 +1969,7 @@ void CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *ctor,
}
void CodeGenFunction::destroyCXXObject(CodeGenFunction &CGF,
- llvm::Value *addr,
+ Address addr,
QualType type) {
const RecordType *rtype = type->castAs<RecordType>();
const CXXRecordDecl *record = cast<CXXRecordDecl>(rtype->getDecl());
@@ -1857,14 +1982,14 @@ void CodeGenFunction::destroyCXXObject(CodeGenFunction &CGF,
void CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D,
CXXCtorType Type,
bool ForVirtualBase,
- bool Delegating, llvm::Value *This,
+ bool Delegating, Address This,
const CXXConstructExpr *E) {
// C++11 [class.mfct.non-static]p2:
// If a non-static member function of a class X is called for an object that
// is not of type X, or of a type derived from X, the behavior is undefined.
// FIXME: Provide a source location here.
- EmitTypeCheck(CodeGenFunction::TCK_ConstructorCall, SourceLocation(), This,
- getContext().getRecordType(D->getParent()));
+ EmitTypeCheck(CodeGenFunction::TCK_ConstructorCall, SourceLocation(),
+ This.getPointer(), getContext().getRecordType(D->getParent()));
if (D->isTrivial() && D->isDefaultConstructor()) {
assert(E->getNumArgs() == 0 && "trivial default ctor with args");
@@ -1879,7 +2004,7 @@ void CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D,
const Expr *Arg = E->getArg(0);
QualType SrcTy = Arg->getType();
- llvm::Value *Src = EmitLValue(Arg).getAddress();
+ Address Src = EmitLValue(Arg).getAddress();
QualType DestTy = getContext().getTypeDeclType(D->getParent());
EmitAggregateCopyCtor(This, Src, DestTy, SrcTy);
return;
@@ -1888,7 +2013,7 @@ void CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D,
CallArgList Args;
// Push the this ptr.
- Args.add(RValue::get(This), D->getThisType(getContext()));
+ Args.add(RValue::get(This.getPointer()), D->getThisType(getContext()));
// Add the rest of the user-supplied arguments.
const FunctionProtoType *FPT = D->getType()->castAs<FunctionProtoType>();
@@ -1907,8 +2032,8 @@ void CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D,
void
CodeGenFunction::EmitSynthesizedCXXCopyCtorCall(const CXXConstructorDecl *D,
- llvm::Value *This, llvm::Value *Src,
- const CXXConstructExpr *E) {
+ Address This, Address Src,
+ const CXXConstructExpr *E) {
if (isMemcpyEquivalentSpecialMember(D)) {
assert(E->getNumArgs() == 1 && "unexpected argcount for trivial ctor");
assert(D->isCopyOrMoveConstructor() &&
@@ -1927,13 +2052,13 @@ CodeGenFunction::EmitSynthesizedCXXCopyCtorCall(const CXXConstructorDecl *D,
CallArgList Args;
// Push the this ptr.
- Args.add(RValue::get(This), D->getThisType(getContext()));
+ Args.add(RValue::get(This.getPointer()), D->getThisType(getContext()));
// Push the src ptr.
QualType QT = *(FPT->param_type_begin());
llvm::Type *t = CGM.getTypes().ConvertType(QT);
Src = Builder.CreateBitCast(Src, t);
- Args.add(RValue::get(Src), QT);
+ Args.add(RValue::get(Src.getPointer()), QT);
// Skip over first argument (Src).
EmitCallArgs(Args, FPT, drop_begin(E->arguments(), 1), E->getConstructor(),
@@ -1988,10 +2113,10 @@ CodeGenFunction::EmitDelegateCXXConstructorCall(const CXXConstructorDecl *Ctor,
namespace {
struct CallDelegatingCtorDtor final : EHScopeStack::Cleanup {
const CXXDestructorDecl *Dtor;
- llvm::Value *Addr;
+ Address Addr;
CXXDtorType Type;
- CallDelegatingCtorDtor(const CXXDestructorDecl *D, llvm::Value *Addr,
+ CallDelegatingCtorDtor(const CXXDestructorDecl *D, Address Addr,
CXXDtorType Type)
: Dtor(D), Addr(Addr), Type(Type) {}
@@ -2007,12 +2132,10 @@ CodeGenFunction::EmitDelegatingCXXConstructorCall(const CXXConstructorDecl *Ctor
const FunctionArgList &Args) {
assert(Ctor->isDelegatingConstructor());
- llvm::Value *ThisPtr = LoadCXXThis();
+ Address ThisPtr = LoadCXXThisAddress();
- QualType Ty = getContext().getTagDeclType(Ctor->getParent());
- CharUnits Alignment = getContext().getTypeAlignInChars(Ty);
AggValueSlot AggSlot =
- AggValueSlot::forAddr(ThisPtr, Alignment, Qualifiers(),
+ AggValueSlot::forAddr(ThisPtr, Qualifiers(),
AggValueSlot::IsDestructed,
AggValueSlot::DoesNotNeedGCBarriers,
AggValueSlot::IsNotAliased);
@@ -2034,7 +2157,7 @@ void CodeGenFunction::EmitCXXDestructorCall(const CXXDestructorDecl *DD,
CXXDtorType Type,
bool ForVirtualBase,
bool Delegating,
- llvm::Value *This) {
+ Address This) {
CGM.getCXXABI().EmitDestructorCall(*this, DD, Type, ForVirtualBase,
Delegating, This);
}
@@ -2042,9 +2165,9 @@ void CodeGenFunction::EmitCXXDestructorCall(const CXXDestructorDecl *DD,
namespace {
struct CallLocalDtor final : EHScopeStack::Cleanup {
const CXXDestructorDecl *Dtor;
- llvm::Value *Addr;
+ Address Addr;
- CallLocalDtor(const CXXDestructorDecl *D, llvm::Value *Addr)
+ CallLocalDtor(const CXXDestructorDecl *D, Address Addr)
: Dtor(D), Addr(Addr) {}
void Emit(CodeGenFunction &CGF, Flags flags) override {
@@ -2056,11 +2179,11 @@ namespace {
}
void CodeGenFunction::PushDestructorCleanup(const CXXDestructorDecl *D,
- llvm::Value *Addr) {
+ Address Addr) {
EHStack.pushCleanup<CallLocalDtor>(NormalAndEHCleanup, D, Addr);
}
-void CodeGenFunction::PushDestructorCleanup(QualType T, llvm::Value *Addr) {
+void CodeGenFunction::PushDestructorCleanup(QualType T, Address Addr) {
CXXRecordDecl *ClassDecl = T->getAsCXXRecordDecl();
if (!ClassDecl) return;
if (ClassDecl->hasTrivialDestructor()) return;
@@ -2098,10 +2221,9 @@ CodeGenFunction::InitializeVTablePointer(BaseSubobject Base,
if (NeedsVirtualOffset) {
// We need to use the virtual base offset offset because the virtual base
// might have a different offset in the most derived class.
- VirtualOffset = CGM.getCXXABI().GetVirtualBaseClassOffset(*this,
- LoadCXXThis(),
- VTableClass,
- NearestVBase);
+ VirtualOffset =
+ CGM.getCXXABI().GetVirtualBaseClassOffset(*this, LoadCXXThisAddress(),
+ VTableClass, NearestVBase);
NonVirtualOffset = OffsetFromNearestVBase;
} else {
// We can just use the base offset in the complete class.
@@ -2109,12 +2231,14 @@ CodeGenFunction::InitializeVTablePointer(BaseSubobject Base,
}
// Apply the offsets.
- llvm::Value *VTableField = LoadCXXThis();
+ Address VTableField = LoadCXXThisAddress();
if (!NonVirtualOffset.isZero() || VirtualOffset)
VTableField = ApplyNonVirtualAndVirtualOffset(*this, VTableField,
NonVirtualOffset,
- VirtualOffset);
+ VirtualOffset,
+ VTableClass,
+ NearestVBase);
// Finally, store the address point. Use the same LLVM types as the field to
// support optimization.
@@ -2202,9 +2326,9 @@ void CodeGenFunction::InitializeVTablePointers(const CXXRecordDecl *RD) {
CGM.getCXXABI().initializeHiddenVirtualInheritanceMembers(*this, RD);
}
-llvm::Value *CodeGenFunction::GetVTablePtr(llvm::Value *This,
+llvm::Value *CodeGenFunction::GetVTablePtr(Address This,
llvm::Type *Ty) {
- llvm::Value *VTablePtrSrc = Builder.CreateBitCast(This, Ty->getPointerTo());
+ Address VTablePtrSrc = Builder.CreateElementBitCast(This, Ty);
llvm::Instruction *VTable = Builder.CreateLoad(VTablePtrSrc, "vtable");
CGM.DecorateInstruction(VTable, CGM.getTBAAInfoForVTablePtr());
return VTable;
@@ -2290,7 +2414,8 @@ void CodeGenFunction::EmitVTablePtrCheckForCast(QualType T,
EmitBlock(CheckBlock);
}
- llvm::Value *VTable = GetVTablePtr(Derived, Int8PtrTy);
+ llvm::Value *VTable =
+ GetVTablePtr(Address(Derived, getPointerAlign()), Int8PtrTy);
EmitVTablePtrCheck(ClassDecl, VTable, TCK, Loc);
if (MayBeNull) {
@@ -2479,8 +2604,8 @@ void CodeGenFunction::EmitLambdaBlockInvokeBody() {
CallArgList CallArgs;
QualType ThisType = getContext().getPointerType(getContext().getRecordType(Lambda));
- llvm::Value *ThisPtr = GetAddrOfBlockDecl(variable, false);
- CallArgs.add(RValue::get(ThisPtr), ThisType);
+ Address ThisPtr = GetAddrOfBlockDecl(variable, false);
+ CallArgs.add(RValue::get(ThisPtr.getPointer()), ThisType);
// Add the rest of the parameters.
for (auto param : BD->params())
diff --git a/clang/lib/CodeGen/CGCleanup.cpp b/clang/lib/CodeGen/CGCleanup.cpp
index 95d2914df93..b5a64d0a28a 100644
--- a/clang/lib/CodeGen/CGCleanup.cpp
+++ b/clang/lib/CodeGen/CGCleanup.cpp
@@ -27,7 +27,7 @@ bool DominatingValue<RValue>::saved_type::needsSaving(RValue rv) {
if (rv.isScalar())
return DominatingLLVMValue::needsSaving(rv.getScalarVal());
if (rv.isAggregate())
- return DominatingLLVMValue::needsSaving(rv.getAggregateAddr());
+ return DominatingLLVMValue::needsSaving(rv.getAggregatePointer());
return true;
}
@@ -41,9 +41,10 @@ DominatingValue<RValue>::saved_type::save(CodeGenFunction &CGF, RValue rv) {
return saved_type(V, ScalarLiteral);
// Everything else needs an alloca.
- llvm::Value *addr = CGF.CreateTempAlloca(V->getType(), "saved-rvalue");
+ Address addr =
+ CGF.CreateDefaultAlignTempAlloca(V->getType(), "saved-rvalue");
CGF.Builder.CreateStore(V, addr);
- return saved_type(addr, ScalarAddress);
+ return saved_type(addr.getPointer(), ScalarAddress);
}
if (rv.isComplex()) {
@@ -51,42 +52,56 @@ DominatingValue<RValue>::saved_type::save(CodeGenFunction &CGF, RValue rv) {
llvm::Type *ComplexTy =
llvm::StructType::get(V.first->getType(), V.second->getType(),
(void*) nullptr);
- llvm::Value *addr = CGF.CreateTempAlloca(ComplexTy, "saved-complex");
+ Address addr = CGF.CreateDefaultAlignTempAlloca(ComplexTy, "saved-complex");
CGF.Builder.CreateStore(V.first,
- CGF.Builder.CreateStructGEP(ComplexTy, addr, 0));
+ CGF.Builder.CreateStructGEP(addr, 0, CharUnits()));
+ CharUnits offset = CharUnits::fromQuantity(
+ CGF.CGM.getDataLayout().getTypeAllocSize(V.first->getType()));
CGF.Builder.CreateStore(V.second,
- CGF.Builder.CreateStructGEP(ComplexTy, addr, 1));
- return saved_type(addr, ComplexAddress);
+ CGF.Builder.CreateStructGEP(addr, 1, offset));
+ return saved_type(addr.getPointer(), ComplexAddress);
}
assert(rv.isAggregate());
- llvm::Value *V = rv.getAggregateAddr(); // TODO: volatile?
- if (!DominatingLLVMValue::needsSaving(V))
- return saved_type(V, AggregateLiteral);
-
- llvm::Value *addr = CGF.CreateTempAlloca(V->getType(), "saved-rvalue");
- CGF.Builder.CreateStore(V, addr);
- return saved_type(addr, AggregateAddress);
+ Address V = rv.getAggregateAddress(); // TODO: volatile?
+ if (!DominatingLLVMValue::needsSaving(V.getPointer()))
+ return saved_type(V.getPointer(), AggregateLiteral,
+ V.getAlignment().getQuantity());
+
+ Address addr =
+ CGF.CreateTempAlloca(V.getType(), CGF.getPointerAlign(), "saved-rvalue");
+ CGF.Builder.CreateStore(V.getPointer(), addr);
+ return saved_type(addr.getPointer(), AggregateAddress,
+ V.getAlignment().getQuantity());
}
/// Given a saved r-value produced by SaveRValue, perform the code
/// necessary to restore it to usability at the current insertion
/// point.
RValue DominatingValue<RValue>::saved_type::restore(CodeGenFunction &CGF) {
+ auto getSavingAddress = [&](llvm::Value *value) {
+ auto alignment = cast<llvm::AllocaInst>(value)->getAlignment();
+ return Address(value, CharUnits::fromQuantity(alignment));
+ };
switch (K) {
case ScalarLiteral:
return RValue::get(Value);
case ScalarAddress:
- return RValue::get(CGF.Builder.CreateLoad(Value));
+ return RValue::get(CGF.Builder.CreateLoad(getSavingAddress(Value)));
case AggregateLiteral:
- return RValue::getAggregate(Value);
- case AggregateAddress:
- return RValue::getAggregate(CGF.Builder.CreateLoad(Value));
+ return RValue::getAggregate(Address(Value, CharUnits::fromQuantity(Align)));
+ case AggregateAddress: {
+ auto addr = CGF.Builder.CreateLoad(getSavingAddress(Value));
+ return RValue::getAggregate(Address(addr, CharUnits::fromQuantity(Align)));
+ }
case ComplexAddress: {
- llvm::Value *real =
- CGF.Builder.CreateLoad(CGF.Builder.CreateStructGEP(nullptr, Value, 0));
- llvm::Value *imag =
- CGF.Builder.CreateLoad(CGF.Builder.CreateStructGEP(nullptr, Value, 1));
+ Address address = getSavingAddress(Value);
+ llvm::Value *real = CGF.Builder.CreateLoad(
+ CGF.Builder.CreateStructGEP(address, 0, CharUnits()));
+ CharUnits offset = CharUnits::fromQuantity(
+ CGF.CGM.getDataLayout().getTypeAllocSize(real->getType()));
+ llvm::Value *imag = CGF.Builder.CreateLoad(
+ CGF.Builder.CreateStructGEP(address, 1, offset));
return RValue::getComplex(real, imag);
}
}
@@ -275,8 +290,8 @@ void EHScopeStack::popNullFixups() {
void CodeGenFunction::initFullExprCleanup() {
// Create a variable to decide whether the cleanup needs to be run.
- llvm::AllocaInst *active
- = CreateTempAlloca(Builder.getInt1Ty(), "cleanup.cond");
+ Address active = CreateTempAlloca(Builder.getInt1Ty(), CharUnits::One(),
+ "cleanup.cond");
// Initialize it to false at a site that's guaranteed to be run
// before each evaluation.
@@ -287,7 +302,7 @@ void CodeGenFunction::initFullExprCleanup() {
// Set that as the active flag in the cleanup.
EHCleanupScope &cleanup = cast<EHCleanupScope>(*EHStack.begin());
- assert(!cleanup.getActiveFlag() && "cleanup already has active flag?");
+ assert(!cleanup.hasActiveFlag() && "cleanup already has active flag?");
cleanup.setActiveFlag(active);
if (cleanup.isNormalCleanup()) cleanup.setTestFlagInNormalCleanup();
@@ -296,6 +311,19 @@ void CodeGenFunction::initFullExprCleanup() {
void EHScopeStack::Cleanup::anchor() {}
+static void createStoreInstBefore(llvm::Value *value, Address addr,
+ llvm::Instruction *beforeInst) {
+ auto store = new llvm::StoreInst(value, addr.getPointer(), beforeInst);
+ store->setAlignment(addr.getAlignment().getQuantity());
+}
+
+static llvm::LoadInst *createLoadInstBefore(Address addr, const Twine &name,
+ llvm::Instruction *beforeInst) {
+ auto load = new llvm::LoadInst(addr.getPointer(), name, beforeInst);
+ load->setAlignment(addr.getAlignment().getQuantity());
+ return load;
+}
+
/// All the branch fixups on the EH stack have propagated out past the
/// outermost normal cleanup; resolve them all by adding cases to the
/// given switch instruction.
@@ -318,9 +346,9 @@ static void ResolveAllBranchFixups(CodeGenFunction &CGF,
// i.e. where there's an unresolved fixup inside a single cleanup
// entry which we're currently popping.
if (Fixup.OptimisticBranchBlock == nullptr) {
- new llvm::StoreInst(CGF.Builder.getInt32(Fixup.DestinationIndex),
- CGF.getNormalCleanupDestSlot(),
- Fixup.InitialBranch);
+ createStoreInstBefore(CGF.Builder.getInt32(Fixup.DestinationIndex),
+ CGF.getNormalCleanupDestSlot(),
+ Fixup.InitialBranch);
Fixup.InitialBranch->setSuccessor(0, CleanupEntry);
}
@@ -346,8 +374,8 @@ static llvm::SwitchInst *TransitionToCleanupSwitch(CodeGenFunction &CGF,
if (llvm::BranchInst *Br = dyn_cast<llvm::BranchInst>(Term)) {
assert(Br->isUnconditional());
- llvm::LoadInst *Load =
- new llvm::LoadInst(CGF.getNormalCleanupDestSlot(), "cleanup.dest", Term);
+ auto Load = createLoadInstBefore(CGF.getNormalCleanupDestSlot(),
+ "cleanup.dest", Term);
llvm::SwitchInst *Switch =
llvm::SwitchInst::Create(Load, Br->getSuccessor(0), 4, Block);
Br->eraseFromParent();
@@ -492,7 +520,7 @@ static llvm::BasicBlock *SimplifyCleanupEntry(CodeGenFunction &CGF,
static void EmitCleanup(CodeGenFunction &CGF,
EHScopeStack::Cleanup *Fn,
EHScopeStack::Cleanup::Flags flags,
- llvm::Value *ActiveFlag) {
+ Address ActiveFlag) {
// Itanium EH cleanups occur within a terminate scope. Microsoft SEH doesn't
// have this behavior, and the Microsoft C++ runtime will call terminate for
// us if the cleanup throws.
@@ -505,7 +533,7 @@ static void EmitCleanup(CodeGenFunction &CGF,
// If there's an active flag, load it and skip the cleanup if it's
// false.
llvm::BasicBlock *ContBB = nullptr;
- if (ActiveFlag) {
+ if (ActiveFlag.isValid()) {
ContBB = CGF.createBasicBlock("cleanup.done");
llvm::BasicBlock *CleanupBB = CGF.createBasicBlock("cleanup.action");
llvm::Value *IsActive
@@ -519,7 +547,7 @@ static void EmitCleanup(CodeGenFunction &CGF,
assert(CGF.HaveInsertPoint() && "cleanup ended with no insertion point?");
// Emit the continuation block if there was an active flag.
- if (ActiveFlag)
+ if (ActiveFlag.isValid())
CGF.EmitBlock(ContBB);
// Leave the terminate scope.
@@ -599,10 +627,12 @@ void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) {
// Remember activation information.
bool IsActive = Scope.isActive();
- llvm::Value *NormalActiveFlag =
- Scope.shouldTestFlagInNormalCleanup() ? Scope.getActiveFlag() : nullptr;
- llvm::Value *EHActiveFlag =
- Scope.shouldTestFlagInEHCleanup() ? Scope.getActiveFlag() : nullptr;
+ Address NormalActiveFlag =
+ Scope.shouldTestFlagInNormalCleanup() ? Scope.getActiveFlag()
+ : Address::invalid();
+ Address EHActiveFlag =
+ Scope.shouldTestFlagInEHCleanup() ? Scope.getActiveFlag()
+ : Address::invalid();
// Check whether we need an EH cleanup. This is only true if we've
// generated a lazy EH cleanup block.
@@ -769,7 +799,7 @@ void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) {
// Clean up the possibly dead store to the cleanup dest slot.
llvm::Instruction *NormalCleanupDestSlot =
- cast<llvm::Instruction>(getNormalCleanupDestSlot());
+ cast<llvm::Instruction>(getNormalCleanupDestSlot().getPointer());
if (NormalCleanupDestSlot->hasOneUse()) {
NormalCleanupDestSlot->user_back()->eraseFromParent();
NormalCleanupDestSlot->eraseFromParent();
@@ -795,7 +825,8 @@ void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) {
const unsigned SwitchCapacity = 10;
llvm::LoadInst *Load =
- new llvm::LoadInst(getNormalCleanupDestSlot(), "cleanup.dest");
+ createLoadInstBefore(getNormalCleanupDestSlot(), "cleanup.dest",
+ nullptr);
llvm::SwitchInst *Switch =
llvm::SwitchInst::Create(Load, Default, SwitchCapacity);
@@ -841,9 +872,9 @@ void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) {
BranchFixup &Fixup = EHStack.getBranchFixup(I);
if (!Fixup.Destination) continue;
if (!Fixup.OptimisticBranchBlock) {
- new llvm::StoreInst(Builder.getInt32(Fixup.DestinationIndex),
- getNormalCleanupDestSlot(),
- Fixup.InitialBranch);
+ createStoreInstBefore(Builder.getInt32(Fixup.DestinationIndex),
+ getNormalCleanupDestSlot(),
+ Fixup.InitialBranch);
Fixup.InitialBranch->setSuccessor(0, NormalEntry);
}
Fixup.OptimisticBranchBlock = NormalExit;
@@ -908,8 +939,7 @@ void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) {
// We only actually emit the cleanup code if the cleanup is either
// active or was used before it was deactivated.
- if (EHActiveFlag || IsActive) {
-
+ if (EHActiveFlag.isValid() || IsActive) {
cleanupFlags.setIsForEHCleanup();
EmitCleanup(*this, Fn, cleanupFlags, EHActiveFlag);
}
@@ -993,7 +1023,7 @@ void CodeGenFunction::EmitBranchThroughCleanup(JumpDest Dest) {
// Store the index at the start.
llvm::ConstantInt *Index = Builder.getInt32(Dest.getDestIndex());
- new llvm::StoreInst(Index, getNormalCleanupDestSlot(), BI);
+ createStoreInstBefore(Index, getNormalCleanupDestSlot(), BI);
// Adjust BI to point to the first cleanup block.
{
@@ -1112,23 +1142,24 @@ static void SetupCleanupBlockActivation(CodeGenFunction &CGF,
// If it hasn't yet been used as either, we're done.
if (!needFlag) return;
- llvm::AllocaInst *var = Scope.getActiveFlag();
- if (!var) {
- var = CGF.CreateTempAlloca(CGF.Builder.getInt1Ty(), "cleanup.isactive");
+ Address var = Scope.getActiveFlag();
+ if (!var.isValid()) {
+ var = CGF.CreateTempAlloca(CGF.Builder.getInt1Ty(), CharUnits::One(),
+ "cleanup.isactive");
Scope.setActiveFlag(var);
assert(dominatingIP && "no existing variable and no dominating IP!");
// Initialize to true or false depending on whether it was
// active up to this point.
- llvm::Value *value = CGF.Builder.getInt1(kind == ForDeactivation);
+ llvm::Constant *value = CGF.Builder.getInt1(kind == ForDeactivation);
// If we're in a conditional block, ignore the dominating IP and
// use the outermost conditional branch.
if (CGF.isInConditionalBranch()) {
CGF.setBeforeOutermostConditional(value, var);
} else {
- new llvm::StoreInst(value, var, dominatingIP);
+ createStoreInstBefore(value, var, dominatingIP);
}
}
@@ -1170,17 +1201,17 @@ void CodeGenFunction::DeactivateCleanupBlock(EHScopeStack::stable_iterator C,
Scope.setActive(false);
}
-llvm::Value *CodeGenFunction::getNormalCleanupDestSlot() {
+Address CodeGenFunction::getNormalCleanupDestSlot() {
if (!NormalCleanupDest)
NormalCleanupDest =
CreateTempAlloca(Builder.getInt32Ty(), "cleanup.dest.slot");
- return NormalCleanupDest;
+ return Address(NormalCleanupDest, CharUnits::fromQuantity(4));
}
/// Emits all the code to cause the given temporary to be cleaned up.
void CodeGenFunction::EmitCXXTemporary(const CXXTemporary *Temporary,
QualType TempType,
- llvm::Value *Ptr) {
+ Address Ptr) {
pushDestroy(NormalAndEHCleanup, Ptr, TempType, destroyCXXObject,
/*useEHCleanup*/ true);
}
diff --git a/clang/lib/CodeGen/CGCleanup.h b/clang/lib/CodeGen/CGCleanup.h
index dcbc40db873..d0569287ef0 100644
--- a/clang/lib/CodeGen/CGCleanup.h
+++ b/clang/lib/CodeGen/CGCleanup.h
@@ -15,6 +15,8 @@
#define LLVM_CLANG_LIB_CODEGEN_CGCLEANUP_H
#include "EHScopeStack.h"
+
+#include "Address.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
@@ -305,8 +307,14 @@ public:
bool isLifetimeMarker() const { return CleanupBits.IsLifetimeMarker; }
void setLifetimeMarker() { CleanupBits.IsLifetimeMarker = true; }
- llvm::AllocaInst *getActiveFlag() const { return ActiveFlag; }
- void setActiveFlag(llvm::AllocaInst *Var) { ActiveFlag = Var; }
+ bool hasActiveFlag() const { return ActiveFlag != nullptr; }
+ Address getActiveFlag() const {
+ return Address(ActiveFlag, CharUnits::One());
+ }
+ void setActiveFlag(Address Var) {
+ assert(Var.getAlignment().isOne());
+ ActiveFlag = cast<llvm::AllocaInst>(Var.getPointer());
+ }
void setTestFlagInNormalCleanup() {
CleanupBits.TestFlagInNormalCleanup = true;
diff --git a/clang/lib/CodeGen/CGDecl.cpp b/clang/lib/CodeGen/CGDecl.cpp
index fc25c467009..e4fe09c435b 100644
--- a/clang/lib/CodeGen/CGDecl.cpp
+++ b/clang/lib/CodeGen/CGDecl.cpp
@@ -12,6 +12,7 @@
//===----------------------------------------------------------------------===//
#include "CodeGenFunction.h"
+#include "CGBlocks.h"
#include "CGCleanup.h"
#include "CGDebugInfo.h"
#include "CGOpenCLRuntime.h"
@@ -340,17 +341,15 @@ CodeGenFunction::AddInitializerToStaticVarDecl(const VarDecl &D,
void CodeGenFunction::EmitStaticVarDecl(const VarDecl &D,
llvm::GlobalValue::LinkageTypes Linkage) {
- llvm::Value *&DMEntry = LocalDeclMap[&D];
- assert(!DMEntry && "Decl already exists in localdeclmap!");
-
// Check to see if we already have a global variable for this
// declaration. This can happen when double-emitting function
// bodies, e.g. with complete and base constructors.
llvm::Constant *addr = CGM.getOrCreateStaticVarDecl(D, Linkage);
+ CharUnits alignment = getContext().getDeclAlign(&D);
// Store into LocalDeclMap before generating initializer to handle
// circular references.
- DMEntry = addr;
+ setAddrOfLocalVar(&D, Address(addr, alignment));
// We can't have a VLA here, but we can have a pointer to a VLA,
// even though that doesn't really make any sense.
@@ -367,7 +366,7 @@ void CodeGenFunction::EmitStaticVarDecl(const VarDecl &D,
if (D.getInit())
var = AddInitializerToStaticVarDecl(D, var);
- var->setAlignment(getContext().getDeclAlign(&D).getQuantity());
+ var->setAlignment(alignment.getQuantity());
if (D.hasAttr<AnnotateAttr>())
CGM.AddGlobalAnnotations(&D, var);
@@ -385,7 +384,8 @@ void CodeGenFunction::EmitStaticVarDecl(const VarDecl &D,
// RAUW's the GV uses of this constant will be invalid.
llvm::Constant *castedAddr =
llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(var, expectedType);
- DMEntry = castedAddr;
+ if (var != castedAddr)
+ LocalDeclMap.find(&D)->second = Address(castedAddr, alignment);
CGM.setStaticLocalDeclAddress(&D, castedAddr);
CGM.getSanitizerMetadata()->reportGlobalToASan(var, D);
@@ -401,13 +401,13 @@ void CodeGenFunction::EmitStaticVarDecl(const VarDecl &D,
namespace {
struct DestroyObject final : EHScopeStack::Cleanup {
- DestroyObject(llvm::Value *addr, QualType type,
+ DestroyObject(Address addr, QualType type,
CodeGenFunction::Destroyer *destroyer,
bool useEHCleanupForArray)
: addr(addr), type(type), destroyer(destroyer),
useEHCleanupForArray(useEHCleanupForArray) {}
- llvm::Value *addr;
+ Address addr;
QualType type;
CodeGenFunction::Destroyer *destroyer;
bool useEHCleanupForArray;
@@ -422,14 +422,14 @@ namespace {
};
struct DestroyNRVOVariable final : EHScopeStack::Cleanup {
- DestroyNRVOVariable(llvm::Value *addr,
+ DestroyNRVOVariable(Address addr,
const CXXDestructorDecl *Dtor,
llvm::Value *NRVOFlag)
: Dtor(Dtor), NRVOFlag(NRVOFlag), Loc(addr) {}
const CXXDestructorDecl *Dtor;
llvm::Value *NRVOFlag;
- llvm::Value *Loc;
+ Address Loc;
void Emit(CodeGenFunction &CGF, Flags flags) override {
// Along the exceptions path we always execute the dtor.
@@ -440,7 +440,8 @@ namespace {
// If we exited via NRVO, we skip the destructor call.
llvm::BasicBlock *RunDtorBB = CGF.createBasicBlock("nrvo.unused");
SkipDtorBB = CGF.createBasicBlock("nrvo.skipdtor");
- llvm::Value *DidNRVO = CGF.Builder.CreateLoad(NRVOFlag, "nrvo.val");
+ llvm::Value *DidNRVO =
+ CGF.Builder.CreateFlagLoad(NRVOFlag, "nrvo.val");
CGF.Builder.CreateCondBr(DidNRVO, SkipDtorBB, RunDtorBB);
CGF.EmitBlock(RunDtorBB);
}
@@ -455,8 +456,8 @@ namespace {
};
struct CallStackRestore final : EHScopeStack::Cleanup {
- llvm::Value *Stack;
- CallStackRestore(llvm::Value *Stack) : Stack(Stack) {}
+ Address Stack;
+ CallStackRestore(Address Stack) : Stack(Stack) {}
void Emit(CodeGenFunction &CGF, Flags flags) override {
llvm::Value *V = CGF.Builder.CreateLoad(Stack);
llvm::Value *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore);
@@ -493,7 +494,7 @@ namespace {
Var.getType(), VK_LValue, SourceLocation());
// Compute the address of the local variable, in case it's a byref
// or something.
- llvm::Value *Addr = CGF.EmitDeclRefLValue(&DRE).getAddress();
+ llvm::Value *Addr = CGF.EmitDeclRefLValue(&DRE).getPointer();
// In some cases, the type of the function argument will be different from
// the type of the pointer. An example of this is
@@ -517,8 +518,8 @@ namespace {
llvm::Value *Addr;
llvm::Value *Size;
public:
- CallLifetimeEnd(llvm::Value *addr, llvm::Value *size)
- : Addr(addr), Size(size) {}
+ CallLifetimeEnd(Address addr, llvm::Value *size)
+ : Addr(addr.getPointer()), Size(size) {}
void Emit(CodeGenFunction &CGF, Flags flags) override {
CGF.EmitLifetimeEnd(Size, Addr);
@@ -529,7 +530,7 @@ namespace {
/// EmitAutoVarWithLifetime - Does the setup required for an automatic
/// variable with lifetime.
static void EmitAutoVarWithLifetime(CodeGenFunction &CGF, const VarDecl &var,
- llvm::Value *addr,
+ Address addr,
Qualifiers::ObjCLifetime lifetime) {
switch (lifetime) {
case Qualifiers::OCL_None:
@@ -599,7 +600,7 @@ static bool isAccessedBy(const ValueDecl *decl, const Expr *e) {
static void drillIntoBlockVariable(CodeGenFunction &CGF,
LValue &lvalue,
const VarDecl *var) {
- lvalue.setAddress(CGF.BuildBlockByrefAddress(lvalue.getAddress(), var));
+ lvalue.setAddress(CGF.emitBlockByrefAddress(lvalue.getAddress(), var));
}
void CodeGenFunction::EmitScalarInit(const Expr *init, const ValueDecl *D,
@@ -637,15 +638,12 @@ void CodeGenFunction::EmitScalarInit(const Expr *init, const ValueDecl *D,
if (capturedByInit) {
// We can use a simple GEP for this because it can't have been
// moved yet.
- tempLV.setAddress(Builder.CreateStructGEP(
- nullptr, tempLV.getAddress(),
- getByRefValueLLVMField(cast<VarDecl>(D)).second));
+ tempLV.setAddress(emitBlockByrefAddress(tempLV.getAddress(),
+ cast<VarDecl>(D),
+ /*follow*/ false));
}
- llvm::PointerType *ty
- = cast<llvm::PointerType>(tempLV.getAddress()->getType());
- ty = cast<llvm::PointerType>(ty->getElementType());
-
+ auto ty = cast<llvm::PointerType>(tempLV.getAddress().getElementType());
llvm::Value *zero = llvm::ConstantPointerNull::get(ty);
// If __weak, we want to use a barrier under certain conditions.
@@ -789,7 +787,7 @@ static void emitStoresForInitAfterMemset(llvm::Constant *Init, llvm::Value *Loc,
if (isa<llvm::ConstantInt>(Init) || isa<llvm::ConstantFP>(Init) ||
isa<llvm::ConstantVector>(Init) || isa<llvm::BlockAddress>(Init) ||
isa<llvm::ConstantExpr>(Init)) {
- Builder.CreateStore(Init, Loc, isVolatile);
+ Builder.CreateDefaultAlignedStore(Init, Loc, isVolatile);
return;
}
@@ -892,13 +890,12 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
emission.IsByRef = isByRef;
CharUnits alignment = getContext().getDeclAlign(&D);
- emission.Alignment = alignment;
// If the type is variably-modified, emit all the VLA sizes for it.
if (Ty->isVariablyModifiedType())
EmitVariablyModifiedType(Ty);
- llvm::Value *DeclPtr;
+ Address address = Address::invalid();
if (Ty->isConstantSizeType()) {
bool NRVO = getLangOpts().ElideConstructors &&
D.isNRVOVariable();
@@ -924,7 +921,8 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
CGM.isTypeConstant(Ty, true)) {
EmitStaticVarDecl(D, llvm::GlobalValue::InternalLinkage);
- emission.Address = nullptr; // signal this condition to later callbacks
+ // Signal this condition to later callbacks.
+ emission.Addr = Address::invalid();
assert(emission.wasEmittedAsGlobal());
return emission;
}
@@ -935,13 +933,12 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
// A normal fixed sized variable becomes an alloca in the entry block,
// unless it's an NRVO variable.
- llvm::Type *LTy = ConvertTypeForMem(Ty);
if (NRVO) {
// The named return value optimization: allocate this variable in the
// return slot, so that we can elide the copy when returning this
// variable (C++0x [class.copy]p34).
- DeclPtr = ReturnValue;
+ address = ReturnValue;
if (const RecordType *RecordTy = Ty->getAs<RecordType>()) {
if (!cast<CXXRecordDecl>(RecordTy->getDecl())->hasTrivialDestructor()) {
@@ -949,34 +946,36 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
// to this variable. Set it to zero to indicate that NRVO was not
// applied.
llvm::Value *Zero = Builder.getFalse();
- llvm::Value *NRVOFlag = CreateTempAlloca(Zero->getType(), "nrvo");
+ Address NRVOFlag =
+ CreateTempAlloca(Zero->getType(), CharUnits::One(), "nrvo");
EnsureInsertPoint();
Builder.CreateStore(Zero, NRVOFlag);
// Record the NRVO flag for this variable.
- NRVOFlags[&D] = NRVOFlag;
- emission.NRVOFlag = NRVOFlag;
+ NRVOFlags[&D] = NRVOFlag.getPointer();
+ emission.NRVOFlag = NRVOFlag.getPointer();
}
}
} else {
- if (isByRef)
- LTy = BuildByRefType(&D);
-
- llvm::AllocaInst *Alloc = CreateTempAlloca(LTy);
- Alloc->setName(D.getName());
+ CharUnits allocaAlignment;
+ llvm::Type *allocaTy;
+ if (isByRef) {
+ auto &byrefInfo = getBlockByrefInfo(&D);
+ allocaTy = byrefInfo.Type;
+ allocaAlignment = byrefInfo.ByrefAlignment;
+ } else {
+ allocaTy = ConvertTypeForMem(Ty);
+ allocaAlignment = alignment;
+ }
- CharUnits allocaAlignment = alignment;
- if (isByRef)
- allocaAlignment = std::max(allocaAlignment,
- getContext().toCharUnitsFromBits(getTarget().getPointerAlign(0)));
- Alloc->setAlignment(allocaAlignment.getQuantity());
- DeclPtr = Alloc;
+ address = CreateTempAlloca(allocaTy, allocaAlignment, D.getName());
// Emit a lifetime intrinsic if meaningful. There's no point
// in doing this if we don't have a valid insertion point (?).
- uint64_t size = CGM.getDataLayout().getTypeAllocSize(LTy);
if (HaveInsertPoint()) {
- emission.SizeForLifetimeMarkers = EmitLifetimeStart(size, Alloc);
+ uint64_t size = CGM.getDataLayout().getTypeAllocSize(allocaTy);
+ emission.SizeForLifetimeMarkers =
+ EmitLifetimeStart(size, address.getPointer());
} else {
assert(!emission.useLifetimeMarkers());
}
@@ -986,11 +985,11 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
if (!DidCallStackSave) {
// Save the stack.
- llvm::Value *Stack = CreateTempAlloca(Int8PtrTy, "saved_stack");
+ Address Stack =
+ CreateTempAlloca(Int8PtrTy, getPointerAlign(), "saved_stack");
llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::stacksave);
llvm::Value *V = Builder.CreateCall(F);
-
Builder.CreateStore(V, Stack);
DidCallStackSave = true;
@@ -1010,13 +1009,11 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
llvm::AllocaInst *vla = Builder.CreateAlloca(llvmTy, elementCount, "vla");
vla->setAlignment(alignment.getQuantity());
- DeclPtr = vla;
+ address = Address(vla, alignment);
}
- llvm::Value *&DMEntry = LocalDeclMap[&D];
- assert(!DMEntry && "Decl already exists in localdeclmap!");
- DMEntry = DeclPtr;
- emission.Address = DeclPtr;
+ setAddrOfLocalVar(&D, address);
+ emission.Addr = address;
// Emit debug info for local var declaration.
if (HaveInsertPoint())
@@ -1024,12 +1021,12 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
if (CGM.getCodeGenOpts().getDebugInfo()
>= CodeGenOptions::LimitedDebugInfo) {
DI->setLocation(D.getLocation());
- DI->EmitDeclareOfAutoVariable(&D, DeclPtr, Builder);
+ DI->EmitDeclareOfAutoVariable(&D, address.getPointer(), Builder);
}
}
if (D.hasAttr<AnnotateAttr>())
- EmitVarAnnotations(&D, emission.Address);
+ EmitVarAnnotations(&D, address.getPointer());
return emission;
}
@@ -1125,15 +1122,13 @@ void CodeGenFunction::EmitAutoVarInit(const AutoVarEmission &emission) {
if (isTrivialInitializer(Init))
return;
- CharUnits alignment = emission.Alignment;
-
// Check whether this is a byref variable that's potentially
// captured and moved by its own initializer. If so, we'll need to
// emit the initializer first, then copy into the variable.
bool capturedByInit = emission.IsByRef && isCapturedBy(D, Init);
- llvm::Value *Loc =
- capturedByInit ? emission.Address : emission.getObjectAddress(*this);
+ Address Loc =
+ capturedByInit ? emission.Addr : emission.getObjectAddress(*this);
llvm::Constant *constant = nullptr;
if (emission.IsConstantAggregate || D.isConstexpr()) {
@@ -1142,14 +1137,14 @@ void CodeGenFunction::EmitAutoVarInit(const AutoVarEmission &emission) {
}
if (!constant) {
- LValue lv = MakeAddrLValue(Loc, type, alignment);
+ LValue lv = MakeAddrLValue(Loc, type);
lv.setNonGC(true);
return EmitExprAsInit(Init, &D, lv, capturedByInit);
}
if (!emission.IsConstantAggregate) {
// For simple scalar/complex initialization, store the value directly.
- LValue lv = MakeAddrLValue(Loc, type, alignment);
+ LValue lv = MakeAddrLValue(Loc, type);
lv.setNonGC(true);
return EmitStoreThroughLValue(RValue::get(constant), lv, true);
}
@@ -1163,7 +1158,7 @@ void CodeGenFunction::EmitAutoVarInit(const AutoVarEmission &emission) {
getContext().getTypeSizeInChars(type).getQuantity());
llvm::Type *BP = Int8PtrTy;
- if (Loc->getType() != BP)
+ if (Loc.getType() != BP)
Loc = Builder.CreateBitCast(Loc, BP);
// If the initializer is all or mostly zeros, codegen with memset then do
@@ -1171,11 +1166,12 @@ void CodeGenFunction::EmitAutoVarInit(const AutoVarEmission &emission) {
if (shouldUseMemSetPlusStoresToInitialize(constant,
CGM.getDataLayout().getTypeAllocSize(constant->getType()))) {
Builder.CreateMemSet(Loc, llvm::ConstantInt::get(Int8Ty, 0), SizeVal,
- alignment.getQuantity(), isVolatile);
+ isVolatile);
// Zero and undef don't require a stores.
if (!constant->isNullValue() && !isa<llvm::UndefValue>(constant)) {
Loc = Builder.CreateBitCast(Loc, constant->getType()->getPointerTo());
- emitStoresForInitAfterMemset(constant, Loc, isVolatile, Builder);
+ emitStoresForInitAfterMemset(constant, Loc.getPointer(),
+ isVolatile, Builder);
}
} else {
// Otherwise, create a temporary global with the initializer then
@@ -1185,15 +1181,14 @@ void CodeGenFunction::EmitAutoVarInit(const AutoVarEmission &emission) {
new llvm::GlobalVariable(CGM.getModule(), constant->getType(), true,
llvm::GlobalValue::PrivateLinkage,
constant, Name);
- GV->setAlignment(alignment.getQuantity());
+ GV->setAlignment(Loc.getAlignment().getQuantity());
GV->setUnnamedAddr(true);
- llvm::Value *SrcPtr = GV;
- if (SrcPtr->getType() != BP)
+ Address SrcPtr = Address(GV, Loc.getAlignment());
+ if (SrcPtr.getType() != BP)
SrcPtr = Builder.CreateBitCast(SrcPtr, BP);
- Builder.CreateMemCpy(Loc, SrcPtr, SizeVal, alignment.getQuantity(),
- isVolatile);
+ Builder.CreateMemCpy(Loc, SrcPtr, SizeVal, isVolatile);
}
}
@@ -1254,7 +1249,7 @@ void CodeGenFunction::emitAutoVarTypeCleanup(
// Note that for __block variables, we want to destroy the
// original stack object, not the possibly forwarded object.
- llvm::Value *addr = emission.getObjectAddress(*this);
+ Address addr = emission.getObjectAddress(*this);
const VarDecl *var = emission.Variable;
QualType type = var->getType();
@@ -1272,8 +1267,8 @@ void CodeGenFunction::emitAutoVarTypeCleanup(
if (emission.NRVOFlag) {
assert(!type->isArrayType());
CXXDestructorDecl *dtor = type->getAsCXXRecordDecl()->getDestructor();
- EHStack.pushCleanup<DestroyNRVOVariable>(cleanupKind, addr, dtor,
- emission.NRVOFlag);
+ EHStack.pushCleanup<DestroyNRVOVariable>(cleanupKind, addr,
+ dtor, emission.NRVOFlag);
return;
}
break;
@@ -1370,7 +1365,7 @@ CodeGenFunction::getDestroyer(QualType::DestructionKind kind) {
/// pushEHDestroy - Push the standard destructor for the given type as
/// an EH-only cleanup.
void CodeGenFunction::pushEHDestroy(QualType::DestructionKind dtorKind,
- llvm::Value *addr, QualType type) {
+ Address addr, QualType type) {
assert(dtorKind && "cannot push destructor for trivial type");
assert(needsEHCleanup(dtorKind));
@@ -1380,7 +1375,7 @@ void CodeGenFunction::pushEHDestroy(QualType::DestructionKind dtorKind,
/// pushDestroy - Push the standard destructor for the given type as
/// at least a normal cleanup.
void CodeGenFunction::pushDestroy(QualType::DestructionKind dtorKind,
- llvm::Value *addr, QualType type) {
+ Address addr, QualType type) {
assert(dtorKind && "cannot push destructor for trivial type");
CleanupKind cleanupKind = getCleanupKind(dtorKind);
@@ -1388,19 +1383,19 @@ void CodeGenFunction::pushDestroy(QualType::DestructionKind dtorKind,
cleanupKind & EHCleanup);
}
-void CodeGenFunction::pushDestroy(CleanupKind cleanupKind, llvm::Value *addr,
+void CodeGenFunction::pushDestroy(CleanupKind cleanupKind, Address addr,
QualType type, Destroyer *destroyer,
bool useEHCleanupForArray) {
pushFullExprCleanup<DestroyObject>(cleanupKind, addr, type,
destroyer, useEHCleanupForArray);
}
-void CodeGenFunction::pushStackRestore(CleanupKind Kind, llvm::Value *SPMem) {
+void CodeGenFunction::pushStackRestore(CleanupKind Kind, Address SPMem) {
EHStack.pushCleanup<CallStackRestore>(Kind, SPMem);
}
void CodeGenFunction::pushLifetimeExtendedDestroy(
- CleanupKind cleanupKind, llvm::Value *addr, QualType type,
+ CleanupKind cleanupKind, Address addr, QualType type,
Destroyer *destroyer, bool useEHCleanupForArray) {
assert(!isInConditionalBranch() &&
"performing lifetime extension from within conditional");
@@ -1430,15 +1425,18 @@ void CodeGenFunction::pushLifetimeExtendedDestroy(
/// \param useEHCleanupForArray - whether an EH cleanup should be
/// used when destroying array elements, in case one of the
/// destructions throws an exception
-void CodeGenFunction::emitDestroy(llvm::Value *addr, QualType type,
+void CodeGenFunction::emitDestroy(Address addr, QualType type,
Destroyer *destroyer,
bool useEHCleanupForArray) {
const ArrayType *arrayType = getContext().getAsArrayType(type);
if (!arrayType)
return destroyer(*this, addr, type);
- llvm::Value *begin = addr;
- llvm::Value *length = emitArrayLength(arrayType, type, begin);
+ llvm::Value *length = emitArrayLength(arrayType, type, addr);
+
+ CharUnits elementAlign =
+ addr.getAlignment()
+ .alignmentOfArrayElement(getContext().getTypeSizeInChars(type));
// Normally we have to check whether the array is zero-length.
bool checkZeroLength = true;
@@ -1450,8 +1448,9 @@ void CodeGenFunction::emitDestroy(llvm::Value *addr, QualType type,
checkZeroLength = false;
}
+ llvm::Value *begin = addr.getPointer();
llvm::Value *end = Builder.CreateInBoundsGEP(begin, length);
- emitArrayDestroy(begin, end, type, destroyer,
+ emitArrayDestroy(begin, end, type, elementAlign, destroyer,
checkZeroLength, useEHCleanupForArray);
}
@@ -1467,11 +1466,12 @@ void CodeGenFunction::emitDestroy(llvm::Value *addr, QualType type,
/// element throws
void CodeGenFunction::emitArrayDestroy(llvm::Value *begin,
llvm::Value *end,
- QualType type,
+ QualType elementType,
+ CharUnits elementAlign,
Destroyer *destroyer,
bool checkZeroLength,
bool useEHCleanup) {
- assert(!type->isArrayType());
+ assert(!elementType->isArrayType());
// The basic structure here is a do-while loop, because we don't
// need to check for the zero-element case.
@@ -1497,10 +1497,11 @@ void CodeGenFunction::emitArrayDestroy(llvm::Value *begin,
"arraydestroy.element");
if (useEHCleanup)
- pushRegularPartialArrayCleanup(begin, element, type, destroyer);
+ pushRegularPartialArrayCleanup(begin, element, elementType, elementAlign,
+ destroyer);
// Perform the actual destruction there.
- destroyer(*this, element, type);
+ destroyer(*this, Address(element, elementAlign), elementType);
if (useEHCleanup)
PopCleanupBlock();
@@ -1518,7 +1519,7 @@ void CodeGenFunction::emitArrayDestroy(llvm::Value *begin,
/// emitArrayDestroy, the element type here may still be an array type.
static void emitPartialArrayDestroy(CodeGenFunction &CGF,
llvm::Value *begin, llvm::Value *end,
- QualType type,
+ QualType type, CharUnits elementAlign,
CodeGenFunction::Destroyer *destroyer) {
// If the element type is itself an array, drill down.
unsigned arrayDepth = 0;
@@ -1540,7 +1541,7 @@ static void emitPartialArrayDestroy(CodeGenFunction &CGF,
// Destroy the array. We don't ever need an EH cleanup because we
// assume that we're in an EH cleanup ourselves, so a throwing
// destructor causes an immediate terminate.
- CGF.emitArrayDestroy(begin, end, type, destroyer,
+ CGF.emitArrayDestroy(begin, end, type, elementAlign, destroyer,
/*checkZeroLength*/ true, /*useEHCleanup*/ false);
}
@@ -1553,16 +1554,18 @@ namespace {
llvm::Value *ArrayEnd;
QualType ElementType;
CodeGenFunction::Destroyer *Destroyer;
+ CharUnits ElementAlign;
public:
RegularPartialArrayDestroy(llvm::Value *arrayBegin, llvm::Value *arrayEnd,
- QualType elementType,
+ QualType elementType, CharUnits elementAlign,
CodeGenFunction::Destroyer *destroyer)
: ArrayBegin(arrayBegin), ArrayEnd(arrayEnd),
- ElementType(elementType), Destroyer(destroyer) {}
+ ElementType(elementType), Destroyer(destroyer),
+ ElementAlign(elementAlign) {}
void Emit(CodeGenFunction &CGF, Flags flags) override {
emitPartialArrayDestroy(CGF, ArrayBegin, ArrayEnd,
- ElementType, Destroyer);
+ ElementType, ElementAlign, Destroyer);
}
};
@@ -1571,21 +1574,24 @@ namespace {
/// determined and must be loaded from a local.
class IrregularPartialArrayDestroy final : public EHScopeStack::Cleanup {
llvm::Value *ArrayBegin;
- llvm::Value *ArrayEndPointer;
+ Address ArrayEndPointer;
QualType ElementType;
CodeGenFunction::Destroyer *Destroyer;
+ CharUnits ElementAlign;
public:
IrregularPartialArrayDestroy(llvm::Value *arrayBegin,
- llvm::Value *arrayEndPointer,
+ Address arrayEndPointer,
QualType elementType,
+ CharUnits elementAlign,
CodeGenFunction::Destroyer *destroyer)
: ArrayBegin(arrayBegin), ArrayEndPointer(arrayEndPointer),
- ElementType(elementType), Destroyer(destroyer) {}
+ ElementType(elementType), Destroyer(destroyer),
+ ElementAlign(elementAlign) {}
void Emit(CodeGenFunction &CGF, Flags flags) override {
llvm::Value *arrayEnd = CGF.Builder.CreateLoad(ArrayEndPointer);
emitPartialArrayDestroy(CGF, ArrayBegin, arrayEnd,
- ElementType, Destroyer);
+ ElementType, ElementAlign, Destroyer);
}
};
}
@@ -1597,12 +1603,14 @@ namespace {
/// \param elementType - the immediate element type of the array;
/// possibly still an array type
void CodeGenFunction::pushIrregularPartialArrayCleanup(llvm::Value *arrayBegin,
- llvm::Value *arrayEndPointer,
+ Address arrayEndPointer,
QualType elementType,
+ CharUnits elementAlign,
Destroyer *destroyer) {
pushFullExprCleanup<IrregularPartialArrayDestroy>(EHCleanup,
arrayBegin, arrayEndPointer,
- elementType, destroyer);
+ elementType, elementAlign,
+ destroyer);
}
/// pushRegularPartialArrayCleanup - Push an EH cleanup to destroy
@@ -1614,10 +1622,12 @@ void CodeGenFunction::pushIrregularPartialArrayCleanup(llvm::Value *arrayBegin,
void CodeGenFunction::pushRegularPartialArrayCleanup(llvm::Value *arrayBegin,
llvm::Value *arrayEnd,
QualType elementType,
+ CharUnits elementAlign,
Destroyer *destroyer) {
pushFullExprCleanup<RegularPartialArrayDestroy>(EHCleanup,
arrayBegin, arrayEnd,
- elementType, destroyer);
+ elementType, elementAlign,
+ destroyer);
}
/// Lazily declare the @llvm.lifetime.start intrinsic.
@@ -1657,56 +1667,38 @@ namespace {
/// Emit an alloca (or GlobalValue depending on target)
/// for the specified parameter and set up LocalDeclMap.
-void CodeGenFunction::EmitParmDecl(const VarDecl &D, llvm::Value *Arg,
- bool ArgIsPointer, unsigned ArgNo) {
+void CodeGenFunction::EmitParmDecl(const VarDecl &D, ParamValue Arg,
+ unsigned ArgNo) {
// FIXME: Why isn't ImplicitParamDecl a ParmVarDecl?
assert((isa<ParmVarDecl>(D) || isa<ImplicitParamDecl>(D)) &&
"Invalid argument to EmitParmDecl");
- Arg->setName(D.getName());
+ Arg.getAnyValue()->setName(D.getName());
QualType Ty = D.getType();
// Use better IR generation for certain implicit parameters.
- if (isa<ImplicitParamDecl>(D)) {
+ if (auto IPD = dyn_cast<ImplicitParamDecl>(&D)) {
// The only implicit argument a block has is its literal.
+ // We assume this is always passed directly.
if (BlockInfo) {
- LocalDeclMap[&D] = Arg;
- llvm::Value *LocalAddr = nullptr;
- if (CGM.getCodeGenOpts().OptimizationLevel == 0) {
- // Allocate a stack slot to let the debug info survive the RA.
- llvm::AllocaInst *Alloc = CreateTempAlloca(ConvertTypeForMem(Ty),
- D.getName() + ".addr");
- Alloc->setAlignment(getContext().getDeclAlign(&D).getQuantity());
- LValue lv = MakeAddrLValue(Alloc, Ty, getContext().getDeclAlign(&D));
- EmitStoreOfScalar(Arg, lv, /* isInitialization */ true);
- LocalAddr = Builder.CreateLoad(Alloc);
- }
-
- if (CGDebugInfo *DI = getDebugInfo()) {
- if (CGM.getCodeGenOpts().getDebugInfo()
- >= CodeGenOptions::LimitedDebugInfo) {
- DI->setLocation(D.getLocation());
- DI->EmitDeclareOfBlockLiteralArgVariable(*BlockInfo, Arg, ArgNo,
- LocalAddr, Builder);
- }
- }
-
+ setBlockContextParameter(IPD, ArgNo, Arg.getDirectValue());
return;
}
}
- llvm::Value *DeclPtr;
+ Address DeclPtr = Address::invalid();
bool DoStore = false;
bool IsScalar = hasScalarEvaluationKind(Ty);
- CharUnits Align = getContext().getDeclAlign(&D);
// If we already have a pointer to the argument, reuse the input pointer.
- if (ArgIsPointer) {
+ if (Arg.isIndirect()) {
+ DeclPtr = Arg.getIndirectAddress();
// If we have a prettier pointer type at this point, bitcast to that.
- unsigned AS = cast<llvm::PointerType>(Arg->getType())->getAddressSpace();
+ unsigned AS = DeclPtr.getType()->getAddressSpace();
llvm::Type *IRTy = ConvertTypeForMem(Ty)->getPointerTo(AS);
- DeclPtr = Arg->getType() == IRTy ? Arg : Builder.CreateBitCast(Arg, IRTy,
- D.getName());
+ if (DeclPtr.getType() != IRTy)
+ DeclPtr = Builder.CreateBitCast(DeclPtr, IRTy, D.getName());
+
// Push a destructor cleanup for this parameter if the ABI requires it.
// Don't push a cleanup in a thunk for a method that will also emit a
// cleanup.
@@ -1718,14 +1710,14 @@ void CodeGenFunction::EmitParmDecl(const VarDecl &D, llvm::Value *Arg,
}
} else {
// Otherwise, create a temporary to hold the value.
- llvm::AllocaInst *Alloc = CreateTempAlloca(ConvertTypeForMem(Ty),
- D.getName() + ".addr");
- Alloc->setAlignment(Align.getQuantity());
- DeclPtr = Alloc;
+ DeclPtr = CreateMemTemp(Ty, getContext().getDeclAlign(&D),
+ D.getName() + ".addr");
DoStore = true;
}
- LValue lv = MakeAddrLValue(DeclPtr, Ty, Align);
+ llvm::Value *ArgVal = (DoStore ? Arg.getDirectValue() : nullptr);
+
+ LValue lv = MakeAddrLValue(DeclPtr, Ty);
if (IsScalar) {
Qualifiers qs = Ty.getQualifiers();
if (Qualifiers::ObjCLifetime lt = qs.getObjCLifetime()) {
@@ -1755,26 +1747,26 @@ void CodeGenFunction::EmitParmDecl(const VarDecl &D, llvm::Value *Arg,
// objc_storeStrong attempts to release its old value.
llvm::Value *Null = CGM.EmitNullConstant(D.getType());
EmitStoreOfScalar(Null, lv, /* isInitialization */ true);
- EmitARCStoreStrongCall(lv.getAddress(), Arg, true);
+ EmitARCStoreStrongCall(lv.getAddress(), ArgVal, true);
DoStore = false;
}
else
// Don't use objc_retainBlock for block pointers, because we
// don't want to Block_copy something just because we got it
// as a parameter.
- Arg = EmitARCRetainNonBlock(Arg);
+ ArgVal = EmitARCRetainNonBlock(ArgVal);
}
} else {
// Push the cleanup for a consumed parameter.
if (isConsumed) {
ARCPreciseLifetime_t precise = (D.hasAttr<ObjCPreciseLifetimeAttr>()
? ARCPreciseLifetime : ARCImpreciseLifetime);
- EHStack.pushCleanup<ConsumeARCParameter>(getARCCleanupKind(), Arg,
+ EHStack.pushCleanup<ConsumeARCParameter>(getARCCleanupKind(), ArgVal,
precise);
}
if (lt == Qualifiers::OCL_Weak) {
- EmitARCInitWeak(DeclPtr, Arg);
+ EmitARCInitWeak(DeclPtr, ArgVal);
DoStore = false; // The weak init is a store, no need to do two.
}
}
@@ -1786,20 +1778,18 @@ void CodeGenFunction::EmitParmDecl(const VarDecl &D, llvm::Value *Arg,
// Store the initial value into the alloca.
if (DoStore)
- EmitStoreOfScalar(Arg, lv, /* isInitialization */ true);
+ EmitStoreOfScalar(ArgVal, lv, /* isInitialization */ true);
- llvm::Value *&DMEntry = LocalDeclMap[&D];
- assert(!DMEntry && "Decl already exists in localdeclmap!");
- DMEntry = DeclPtr;
+ setAddrOfLocalVar(&D, DeclPtr);
// Emit debug info for param declaration.
if (CGDebugInfo *DI = getDebugInfo()) {
if (CGM.getCodeGenOpts().getDebugInfo()
>= CodeGenOptions::LimitedDebugInfo) {
- DI->EmitDeclareOfArgVariable(&D, DeclPtr, ArgNo, Builder);
+ DI->EmitDeclareOfArgVariable(&D, DeclPtr.getPointer(), ArgNo, Builder);
}
}
if (D.hasAttr<AnnotateAttr>())
- EmitVarAnnotations(&D, DeclPtr);
+ EmitVarAnnotations(&D, DeclPtr.getPointer());
}
diff --git a/clang/lib/CodeGen/CGDeclCXX.cpp b/clang/lib/CodeGen/CGDeclCXX.cpp
index 00d6d5cee74..f10be1bb5d2 100644
--- a/clang/lib/CodeGen/CGDeclCXX.cpp
+++ b/clang/lib/CodeGen/CGDeclCXX.cpp
@@ -24,16 +24,13 @@ using namespace clang;
using namespace CodeGen;
static void EmitDeclInit(CodeGenFunction &CGF, const VarDecl &D,
- llvm::Constant *DeclPtr) {
+ ConstantAddress DeclPtr) {
assert(D.hasGlobalStorage() && "VarDecl must have global storage!");
assert(!D.getType()->isReferenceType() &&
"Should not call EmitDeclInit on a reference!");
- ASTContext &Context = CGF.getContext();
-
- CharUnits alignment = Context.getDeclAlign(&D);
QualType type = D.getType();
- LValue lv = CGF.MakeAddrLValue(DeclPtr, type, alignment);
+ LValue lv = CGF.MakeAddrLValue(DeclPtr, type);
const Expr *Init = D.getInit();
switch (CGF.getEvaluationKind(type)) {
@@ -64,7 +61,7 @@ static void EmitDeclInit(CodeGenFunction &CGF, const VarDecl &D,
/// Emit code to cause the destruction of the given variable with
/// static storage duration.
static void EmitDeclDestroy(CodeGenFunction &CGF, const VarDecl &D,
- llvm::Constant *addr) {
+ ConstantAddress addr) {
CodeGenModule &CGM = CGF.CGM;
// FIXME: __attribute__((cleanup)) ?
@@ -99,7 +96,7 @@ static void EmitDeclDestroy(CodeGenFunction &CGF, const VarDecl &D,
function = CGM.getAddrOfCXXStructor(dtor, StructorType::Complete);
argument = llvm::ConstantExpr::getBitCast(
- addr, CGF.getTypes().ConvertType(type)->getPointerTo());
+ addr.getPointer(), CGF.getTypes().ConvertType(type)->getPointerTo());
// Otherwise, the standard logic requires a helper function.
} else {
@@ -162,25 +159,26 @@ void CodeGenFunction::EmitCXXGlobalVarDeclInit(const VarDecl &D,
DeclPtr = llvm::ConstantExpr::getAddrSpaceCast(DeclPtr, PTy);
}
+ ConstantAddress DeclAddr(DeclPtr, getContext().getDeclAlign(&D));
+
if (!T->isReferenceType()) {
if (getLangOpts().OpenMP && D.hasAttr<OMPThreadPrivateDeclAttr>())
(void)CGM.getOpenMPRuntime().emitThreadPrivateVarDefinition(
- &D, DeclPtr, D.getAttr<OMPThreadPrivateDeclAttr>()->getLocation(),
+ &D, DeclAddr, D.getAttr<OMPThreadPrivateDeclAttr>()->getLocation(),
PerformInit, this);
if (PerformInit)
- EmitDeclInit(*this, D, DeclPtr);
+ EmitDeclInit(*this, D, DeclAddr);
if (CGM.isTypeConstant(D.getType(), true))
EmitDeclInvariant(*this, D, DeclPtr);
else
- EmitDeclDestroy(*this, D, DeclPtr);
+ EmitDeclDestroy(*this, D, DeclAddr);
return;
}
assert(PerformInit && "cannot have constant initializer which needs "
"destruction for reference");
- unsigned Alignment = getContext().getDeclAlign(&D).getQuantity();
RValue RV = EmitReferenceBindingToExpr(Init);
- EmitStoreOfScalar(RV.getScalarVal(), DeclPtr, false, Alignment, T);
+ EmitStoreOfScalar(RV.getScalarVal(), DeclAddr, false, T);
}
/// Create a stub function, suitable for being passed to atexit,
@@ -498,7 +496,7 @@ void CodeGenFunction::GenerateCXXGlobalVarDeclInitFunc(llvm::Function *Fn,
void
CodeGenFunction::GenerateCXXGlobalInitFunc(llvm::Function *Fn,
ArrayRef<llvm::Function *> Decls,
- llvm::GlobalVariable *Guard) {
+ Address Guard) {
{
auto NL = ApplyDebugLocation::CreateEmpty(*this);
StartFunction(GlobalDecl(), getContext().VoidTy, Fn,
@@ -507,7 +505,7 @@ CodeGenFunction::GenerateCXXGlobalInitFunc(llvm::Function *Fn,
auto AL = ApplyDebugLocation::CreateArtificial(*this);
llvm::BasicBlock *ExitBlock = nullptr;
- if (Guard) {
+ if (Guard.isValid()) {
// If we have a guard variable, check whether we've already performed
// these initializations. This happens for TLS initialization functions.
llvm::Value *GuardVal = Builder.CreateLoad(Guard);
@@ -572,9 +570,10 @@ void CodeGenFunction::GenerateCXXGlobalDtorsFunc(llvm::Function *Fn,
}
/// generateDestroyHelper - Generates a helper function which, when
-/// invoked, destroys the given object.
+/// invoked, destroys the given object. The address of the object
+/// should be in global memory.
llvm::Function *CodeGenFunction::generateDestroyHelper(
- llvm::Constant *addr, QualType type, Destroyer *destroyer,
+ Address addr, QualType type, Destroyer *destroyer,
bool useEHCleanupForArray, const VarDecl *VD) {
FunctionArgList args;
ImplicitParamDecl dst(getContext(), nullptr, SourceLocation(), nullptr,
diff --git a/clang/lib/CodeGen/CGException.cpp b/clang/lib/CodeGen/CGException.cpp
index 974da245631..8de626e1741 100644
--- a/clang/lib/CodeGen/CGException.cpp
+++ b/clang/lib/CodeGen/CGException.cpp
@@ -340,16 +340,16 @@ namespace {
// differs from EmitAnyExprToMem only in that, if a final copy-ctor
// call is required, an exception within that copy ctor causes
// std::terminate to be invoked.
-void CodeGenFunction::EmitAnyExprToExn(const Expr *e, llvm::Value *addr) {
+void CodeGenFunction::EmitAnyExprToExn(const Expr *e, Address addr) {
// Make sure the exception object is cleaned up if there's an
// exception during initialization.
- pushFullExprCleanup<FreeException>(EHCleanup, addr);
+ pushFullExprCleanup<FreeException>(EHCleanup, addr.getPointer());
EHScopeStack::stable_iterator cleanup = EHStack.stable_begin();
// __cxa_allocate_exception returns a void*; we need to cast this
// to the appropriate type for the object.
llvm::Type *ty = ConvertTypeForMem(e->getType())->getPointerTo();
- llvm::Value *typedAddr = Builder.CreateBitCast(addr, ty);
+ Address typedAddr = Builder.CreateBitCast(addr, ty);
// FIXME: this isn't quite right! If there's a final unelided call
// to a copy constructor, then according to [except.terminate]p1 we
@@ -362,19 +362,20 @@ void CodeGenFunction::EmitAnyExprToExn(const Expr *e, llvm::Value *addr) {
/*IsInit*/ true);
// Deactivate the cleanup block.
- DeactivateCleanupBlock(cleanup, cast<llvm::Instruction>(typedAddr));
+ DeactivateCleanupBlock(cleanup,
+ cast<llvm::Instruction>(typedAddr.getPointer()));
}
-llvm::Value *CodeGenFunction::getExceptionSlot() {
+Address CodeGenFunction::getExceptionSlot() {
if (!ExceptionSlot)
ExceptionSlot = CreateTempAlloca(Int8PtrTy, "exn.slot");
- return ExceptionSlot;
+ return Address(ExceptionSlot, getPointerAlign());
}
-llvm::Value *CodeGenFunction::getEHSelectorSlot() {
+Address CodeGenFunction::getEHSelectorSlot() {
if (!EHSelectorSlot)
EHSelectorSlot = CreateTempAlloca(Int32Ty, "ehselector.slot");
- return EHSelectorSlot;
+ return Address(EHSelectorSlot, CharUnits::fromQuantity(4));
}
llvm::Value *CodeGenFunction::getExceptionFromSlot() {
@@ -626,7 +627,7 @@ CodeGenFunction::getMSVCDispatchBlock(EHScopeStack::stable_iterator SI) {
DispatchBlock = getTerminateHandler();
else
DispatchBlock = createBasicBlock();
- CGBuilderTy Builder(DispatchBlock);
+ CGBuilderTy Builder(*this, DispatchBlock);
switch (EHS.getKind()) {
case EHScope::Catch:
@@ -879,7 +880,7 @@ static llvm::BasicBlock *emitMSVCCatchDispatchBlock(CodeGenFunction &CGF,
// block is the block for the enclosing EH scope.
if (I + 1 == E) {
NextBlock = CGF.createBasicBlock("catchendblock");
- CGBuilderTy(NextBlock).CreateCatchEndPad(
+ CGBuilderTy(CGF, NextBlock).CreateCatchEndPad(
CGF.getEHDispatchBlock(CatchScope.getEnclosingEHScope()));
} else {
NextBlock = CGF.createBasicBlock("catch.dispatch");
@@ -1098,7 +1099,7 @@ namespace {
CGF.createBasicBlock("finally.cleanup.cont");
llvm::Value *ShouldEndCatch =
- CGF.Builder.CreateLoad(ForEHVar, "finally.endcatch");
+ CGF.Builder.CreateFlagLoad(ForEHVar, "finally.endcatch");
CGF.Builder.CreateCondBr(ShouldEndCatch, EndCatchBB, CleanupContBB);
CGF.EmitBlock(EndCatchBB);
CGF.EmitRuntimeCallOrInvoke(EndCatchFn); // catch-all, so might throw
@@ -1141,13 +1142,13 @@ namespace {
llvm::BasicBlock *ContBB = CGF.createBasicBlock("finally.cont");
llvm::Value *ShouldRethrow =
- CGF.Builder.CreateLoad(ForEHVar, "finally.shouldthrow");
+ CGF.Builder.CreateFlagLoad(ForEHVar, "finally.shouldthrow");
CGF.Builder.CreateCondBr(ShouldRethrow, RethrowBB, ContBB);
CGF.EmitBlock(RethrowBB);
if (SavedExnVar) {
CGF.EmitRuntimeCallOrInvoke(RethrowFn,
- CGF.Builder.CreateLoad(SavedExnVar));
+ CGF.Builder.CreateAlignedLoad(SavedExnVar, CGF.getPointerAlign()));
} else {
CGF.EmitRuntimeCallOrInvoke(RethrowFn);
}
@@ -1222,7 +1223,7 @@ void CodeGenFunction::FinallyInfo::enter(CodeGenFunction &CGF,
// Whether the finally block is being executed for EH purposes.
ForEHVar = CGF.CreateTempAlloca(CGF.Builder.getInt1Ty(), "finally.for-eh");
- CGF.Builder.CreateStore(CGF.Builder.getFalse(), ForEHVar);
+ CGF.Builder.CreateFlagStore(false, ForEHVar);
// Enter a normal cleanup which will perform the @finally block.
CGF.EHStack.pushCleanup<PerformFinally>(NormalCleanup, body,
@@ -1260,11 +1261,11 @@ void CodeGenFunction::FinallyInfo::exit(CodeGenFunction &CGF) {
// If we need to remember the exception pointer to rethrow later, do so.
if (SavedExnVar) {
if (!exn) exn = CGF.getExceptionFromSlot();
- CGF.Builder.CreateStore(exn, SavedExnVar);
+ CGF.Builder.CreateAlignedStore(exn, SavedExnVar, CGF.getPointerAlign());
}
// Tell the cleanups in the finally block that we're do this for EH.
- CGF.Builder.CreateStore(CGF.Builder.getTrue(), ForEHVar);
+ CGF.Builder.CreateFlagStore(true, ForEHVar);
// Thread a jump through the finally cleanup.
CGF.EmitBranchThroughCleanup(RethrowDest);
@@ -1433,13 +1434,13 @@ struct CaptureFinder : ConstStmtVisitor<CaptureFinder> {
CodeGenFunction &ParentCGF;
const VarDecl *ParentThis;
SmallVector<const VarDecl *, 4> Captures;
- llvm::Value *SEHCodeSlot = nullptr;
+ Address SEHCodeSlot = Address::invalid();
CaptureFinder(CodeGenFunction &ParentCGF, const VarDecl *ParentThis)
: ParentCGF(ParentCGF), ParentThis(ParentThis) {}
// Return true if we need to do any capturing work.
bool foundCaptures() {
- return !Captures.empty() || SEHCodeSlot;
+ return !Captures.empty() || SEHCodeSlot.isValid();
}
void Visit(const Stmt *S) {
@@ -1478,7 +1479,7 @@ struct CaptureFinder : ConstStmtVisitor<CaptureFinder> {
// This is the simple case where we are the outermost finally. All we
// have to do here is make sure we escape this and recover it in the
// outlined handler.
- if (!SEHCodeSlot)
+ if (!SEHCodeSlot.isValid())
SEHCodeSlot = ParentCGF.SEHCodeSlotStack.back();
break;
}
@@ -1486,11 +1487,11 @@ struct CaptureFinder : ConstStmtVisitor<CaptureFinder> {
};
}
-llvm::Value *CodeGenFunction::recoverAddrOfEscapedLocal(
- CodeGenFunction &ParentCGF, llvm::Value *ParentVar, llvm::Value *ParentFP) {
+Address CodeGenFunction::recoverAddrOfEscapedLocal(
+ CodeGenFunction &ParentCGF, Address ParentVar, llvm::Value *ParentFP) {
llvm::CallInst *RecoverCall = nullptr;
- CGBuilderTy Builder(AllocaInsertPt);
- if (auto *ParentAlloca = dyn_cast<llvm::AllocaInst>(ParentVar)) {
+ CGBuilderTy Builder(*this, AllocaInsertPt);
+ if (auto *ParentAlloca = dyn_cast<llvm::AllocaInst>(ParentVar.getPointer())) {
// Mark the variable escaped if nobody else referenced it and compute the
// localescape index.
auto InsertPair = ParentCGF.EscapedLocals.insert(
@@ -1510,7 +1511,7 @@ llvm::Value *CodeGenFunction::recoverAddrOfEscapedLocal(
// Just clone the existing localrecover call, but tweak the FP argument to
// use our FP value. All other arguments are constants.
auto *ParentRecover =
- cast<llvm::IntrinsicInst>(ParentVar->stripPointerCasts());
+ cast<llvm::IntrinsicInst>(ParentVar.getPointer()->stripPointerCasts());
assert(ParentRecover->getIntrinsicID() == llvm::Intrinsic::localrecover &&
"expected alloca or localrecover in parent LocalDeclMap");
RecoverCall = cast<llvm::CallInst>(ParentRecover->clone());
@@ -1520,9 +1521,9 @@ llvm::Value *CodeGenFunction::recoverAddrOfEscapedLocal(
// Bitcast the variable, rename it, and insert it in the local decl map.
llvm::Value *ChildVar =
- Builder.CreateBitCast(RecoverCall, ParentVar->getType());
- ChildVar->setName(ParentVar->getName());
- return ChildVar;
+ Builder.CreateBitCast(RecoverCall, ParentVar.getType());
+ ChildVar->setName(ParentVar.getName());
+ return Address(ChildVar, ParentVar.getAlignment());
}
void CodeGenFunction::EmitCapturedLocals(CodeGenFunction &ParentCGF,
@@ -1548,7 +1549,7 @@ void CodeGenFunction::EmitCapturedLocals(CodeGenFunction &ParentCGF,
// EH registration is passed in as the EBP physical register. We can
// recover that with llvm.frameaddress(1), and adjust that to recover the
// parent's true frame pointer.
- CGBuilderTy Builder(AllocaInsertPt);
+ CGBuilderTy Builder(CGM, AllocaInsertPt);
EntryEBP = Builder.CreateCall(
CGM.getIntrinsic(llvm::Intrinsic::frameaddress), {Builder.getInt32(1)});
llvm::Function *RecoverFPIntrin =
@@ -1583,13 +1584,13 @@ void CodeGenFunction::EmitCapturedLocals(CodeGenFunction &ParentCGF,
auto I = ParentCGF.LocalDeclMap.find(VD);
if (I == ParentCGF.LocalDeclMap.end())
continue;
- llvm::Value *ParentVar = I->second;
- LocalDeclMap[VD] =
- recoverAddrOfEscapedLocal(ParentCGF, ParentVar, ParentFP);
+ Address ParentVar = I->second;
+ setAddrOfLocalVar(VD,
+ recoverAddrOfEscapedLocal(ParentCGF, ParentVar, ParentFP));
}
- if (Finder.SEHCodeSlot) {
+ if (Finder.SEHCodeSlot.isValid()) {
SEHCodeSlotStack.push_back(
recoverAddrOfEscapedLocal(ParentCGF, Finder.SEHCodeSlot, ParentFP));
}
@@ -1727,7 +1728,7 @@ void CodeGenFunction::EmitSEHExceptionCodeSave(CodeGenFunction &ParentCGF,
// load the pointer.
SEHInfo = Builder.CreateConstInBoundsGEP1_32(Int8Ty, EntryEBP, -20);
SEHInfo = Builder.CreateBitCast(SEHInfo, Int8PtrTy->getPointerTo());
- SEHInfo = Builder.CreateLoad(Int8PtrTy, SEHInfo);
+ SEHInfo = Builder.CreateAlignedLoad(Int8PtrTy, SEHInfo, getPointerAlign());
SEHCodeSlotStack.push_back(recoverAddrOfEscapedLocal(
ParentCGF, ParentCGF.SEHCodeSlotStack.back(), ParentFP));
}
@@ -1743,8 +1744,8 @@ void CodeGenFunction::EmitSEHExceptionCodeSave(CodeGenFunction &ParentCGF,
llvm::Type *PtrsTy = llvm::StructType::get(RecordTy, CGM.VoidPtrTy, nullptr);
llvm::Value *Ptrs = Builder.CreateBitCast(SEHInfo, PtrsTy->getPointerTo());
llvm::Value *Rec = Builder.CreateStructGEP(PtrsTy, Ptrs, 0);
- Rec = Builder.CreateLoad(Rec);
- llvm::Value *Code = Builder.CreateLoad(Rec);
+ Rec = Builder.CreateAlignedLoad(Rec, getPointerAlign());
+ llvm::Value *Code = Builder.CreateAlignedLoad(Rec, getIntAlign());
assert(!SEHCodeSlotStack.empty() && "emitting EH code outside of __except");
Builder.CreateStore(Code, SEHCodeSlotStack.back());
}
@@ -1760,7 +1761,7 @@ llvm::Value *CodeGenFunction::EmitSEHExceptionInfo() {
llvm::Value *CodeGenFunction::EmitSEHExceptionCode() {
assert(!SEHCodeSlotStack.empty() && "emitting EH code outside of __except");
- return Builder.CreateLoad(Int32Ty, SEHCodeSlotStack.back());
+ return Builder.CreateLoad(SEHCodeSlotStack.back());
}
llvm::Value *CodeGenFunction::EmitSEHAbnormalTermination() {
diff --git a/clang/lib/CodeGen/CGExpr.cpp b/clang/lib/CodeGen/CGExpr.cpp
index b38afc9b37a..6635e570c64 100644
--- a/clang/lib/CodeGen/CGExpr.cpp
+++ b/clang/lib/CodeGen/CGExpr.cpp
@@ -54,6 +54,15 @@ llvm::Value *CodeGenFunction::EmitCastToVoidPtr(llvm::Value *value) {
/// CreateTempAlloca - This creates a alloca and inserts it into the entry
/// block.
+Address CodeGenFunction::CreateTempAlloca(llvm::Type *Ty, CharUnits Align,
+ const Twine &Name) {
+ auto Alloca = CreateTempAlloca(Ty, Name);
+ Alloca->setAlignment(Align.getQuantity());
+ return Address(Alloca, Align);
+}
+
+/// CreateTempAlloca - This creates a alloca and inserts it into the entry
+/// block.
llvm::AllocaInst *CodeGenFunction::CreateTempAlloca(llvm::Type *Ty,
const Twine &Name) {
if (!Builder.isNamePreserving())
@@ -61,29 +70,38 @@ llvm::AllocaInst *CodeGenFunction::CreateTempAlloca(llvm::Type *Ty,
return new llvm::AllocaInst(Ty, nullptr, Name, AllocaInsertPt);
}
-void CodeGenFunction::InitTempAlloca(llvm::AllocaInst *Var,
- llvm::Value *Init) {
- auto *Store = new llvm::StoreInst(Init, Var);
+/// CreateDefaultAlignTempAlloca - This creates an alloca with the
+/// default alignment of the corresponding LLVM type, which is *not*
+/// guaranteed to be related in any way to the expected alignment of
+/// an AST type that might have been lowered to Ty.
+Address CodeGenFunction::CreateDefaultAlignTempAlloca(llvm::Type *Ty,
+ const Twine &Name) {
+ CharUnits Align =
+ CharUnits::fromQuantity(CGM.getDataLayout().getABITypeAlignment(Ty));
+ return CreateTempAlloca(Ty, Align, Name);
+}
+
+void CodeGenFunction::InitTempAlloca(Address Var, llvm::Value *Init) {
+ assert(isa<llvm::AllocaInst>(Var.getPointer()));
+ auto *Store = new llvm::StoreInst(Init, Var.getPointer());
+ Store->setAlignment(Var.getAlignment().getQuantity());
llvm::BasicBlock *Block = AllocaInsertPt->getParent();
Block->getInstList().insertAfter(&*AllocaInsertPt, Store);
}
-llvm::AllocaInst *CodeGenFunction::CreateIRTemp(QualType Ty,
- const Twine &Name) {
- llvm::AllocaInst *Alloc = CreateTempAlloca(ConvertType(Ty), Name);
- // FIXME: Should we prefer the preferred type alignment here?
+Address CodeGenFunction::CreateIRTemp(QualType Ty, const Twine &Name) {
CharUnits Align = getContext().getTypeAlignInChars(Ty);
- Alloc->setAlignment(Align.getQuantity());
- return Alloc;
+ return CreateTempAlloca(ConvertType(Ty), Align, Name);
}
-llvm::AllocaInst *CodeGenFunction::CreateMemTemp(QualType Ty,
- const Twine &Name) {
- llvm::AllocaInst *Alloc = CreateTempAlloca(ConvertTypeForMem(Ty), Name);
+Address CodeGenFunction::CreateMemTemp(QualType Ty, const Twine &Name) {
// FIXME: Should we prefer the preferred type alignment here?
- CharUnits Align = getContext().getTypeAlignInChars(Ty);
- Alloc->setAlignment(Align.getQuantity());
- return Alloc;
+ return CreateMemTemp(Ty, getContext().getTypeAlignInChars(Ty), Name);
+}
+
+Address CodeGenFunction::CreateMemTemp(QualType Ty, CharUnits Align,
+ const Twine &Name) {
+ return CreateTempAlloca(ConvertTypeForMem(Ty), Align, Name);
}
/// EvaluateExprAsBool - Perform the usual unary conversions on the specified
@@ -148,20 +166,18 @@ RValue CodeGenFunction::EmitAnyExprToTemp(const Expr *E) {
/// EmitAnyExprToMem - Evaluate an expression into a given memory
/// location.
void CodeGenFunction::EmitAnyExprToMem(const Expr *E,
- llvm::Value *Location,
+ Address Location,
Qualifiers Quals,
bool IsInit) {
// FIXME: This function should take an LValue as an argument.
switch (getEvaluationKind(E->getType())) {
case TEK_Complex:
- EmitComplexExprIntoLValue(E,
- MakeNaturalAlignAddrLValue(Location, E->getType()),
+ EmitComplexExprIntoLValue(E, MakeAddrLValue(Location, E->getType()),
/*isInit*/ false);
return;
case TEK_Aggregate: {
- CharUnits Alignment = getContext().getTypeAlignInChars(E->getType());
- EmitAggExpr(E, AggValueSlot::forAddr(Location, Alignment, Quals,
+ EmitAggExpr(E, AggValueSlot::forAddr(Location, Quals,
AggValueSlot::IsDestructed_t(IsInit),
AggValueSlot::DoesNotNeedGCBarriers,
AggValueSlot::IsAliased_t(!IsInit)));
@@ -180,7 +196,7 @@ void CodeGenFunction::EmitAnyExprToMem(const Expr *E,
static void
pushTemporaryCleanup(CodeGenFunction &CGF, const MaterializeTemporaryExpr *M,
- const Expr *E, llvm::Value *ReferenceTemporary) {
+ const Expr *E, Address ReferenceTemporary) {
// Objective-C++ ARC:
// If we are binding a reference to a temporary that has ownership, we
// need to perform retain/release operations on the temporary.
@@ -266,14 +282,14 @@ pushTemporaryCleanup(CodeGenFunction &CGF, const MaterializeTemporaryExpr *M,
llvm::Constant *CleanupArg;
if (E->getType()->isArrayType()) {
CleanupFn = CodeGenFunction(CGF.CGM).generateDestroyHelper(
- cast<llvm::Constant>(ReferenceTemporary), E->getType(),
+ ReferenceTemporary, E->getType(),
CodeGenFunction::destroyCXXObject, CGF.getLangOpts().Exceptions,
dyn_cast_or_null<VarDecl>(M->getExtendingDecl()));
CleanupArg = llvm::Constant::getNullValue(CGF.Int8PtrTy);
} else {
CleanupFn = CGF.CGM.getAddrOfCXXStructor(ReferenceTemporaryDtor,
StructorType::Complete);
- CleanupArg = cast<llvm::Constant>(ReferenceTemporary);
+ CleanupArg = cast<llvm::Constant>(ReferenceTemporary.getPointer());
}
CGF.CGM.getCXXABI().registerGlobalDtor(
CGF, *cast<VarDecl>(M->getExtendingDecl()), CleanupFn, CleanupArg);
@@ -298,7 +314,7 @@ pushTemporaryCleanup(CodeGenFunction &CGF, const MaterializeTemporaryExpr *M,
}
}
-static llvm::Value *
+static Address
createReferenceTemporary(CodeGenFunction &CGF,
const MaterializeTemporaryExpr *M, const Expr *Inner) {
switch (M->getStorageDuration()) {
@@ -316,10 +332,10 @@ createReferenceTemporary(CodeGenFunction &CGF,
auto *GV = new llvm::GlobalVariable(
CGF.CGM.getModule(), Init->getType(), /*isConstant=*/true,
llvm::GlobalValue::PrivateLinkage, Init, ".ref.tmp");
- GV->setAlignment(
- CGF.getContext().getTypeAlignInChars(Ty).getQuantity());
+ CharUnits alignment = CGF.getContext().getTypeAlignInChars(Ty);
+ GV->setAlignment(alignment.getQuantity());
// FIXME: Should we put the new global into a COMDAT?
- return GV;
+ return Address(GV, alignment);
}
return CGF.CreateMemTemp(Ty, "ref.tmp");
}
@@ -343,16 +359,19 @@ EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *M) {
M->getType()->isObjCLifetimeType() &&
M->getType().getObjCLifetime() != Qualifiers::OCL_None &&
M->getType().getObjCLifetime() != Qualifiers::OCL_ExplicitNone) {
- llvm::Value *Object = createReferenceTemporary(*this, M, E);
- if (auto *Var = dyn_cast<llvm::GlobalVariable>(Object)) {
- Object = llvm::ConstantExpr::getBitCast(
- Var, ConvertTypeForMem(E->getType())->getPointerTo());
+ Address Object = createReferenceTemporary(*this, M, E);
+ if (auto *Var = dyn_cast<llvm::GlobalVariable>(Object.getPointer())) {
+ Object = Address(llvm::ConstantExpr::getBitCast(Var,
+ ConvertTypeForMem(E->getType())
+ ->getPointerTo(Object.getAddressSpace())),
+ Object.getAlignment());
// We should not have emitted the initializer for this temporary as a
// constant.
assert(!Var->hasInitializer());
Var->setInitializer(CGM.EmitNullConstant(E->getType()));
}
- LValue RefTempDst = MakeAddrLValue(Object, M->getType());
+ LValue RefTempDst = MakeAddrLValue(Object, M->getType(),
+ AlignmentSource::Decl);
switch (getEvaluationKind(E->getType())) {
default: llvm_unreachable("expected scalar or aggregate expression");
@@ -360,8 +379,7 @@ EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *M) {
EmitScalarInit(E, M->getExtendingDecl(), RefTempDst, false);
break;
case TEK_Aggregate: {
- CharUnits Alignment = getContext().getTypeAlignInChars(E->getType());
- EmitAggExpr(E, AggValueSlot::forAddr(Object, Alignment,
+ EmitAggExpr(E, AggValueSlot::forAddr(Object,
E->getType().getQualifiers(),
AggValueSlot::IsDestructed,
AggValueSlot::DoesNotNeedGCBarriers,
@@ -389,10 +407,11 @@ EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *M) {
}
// Create and initialize the reference temporary.
- llvm::Value *Object = createReferenceTemporary(*this, M, E);
- if (auto *Var = dyn_cast<llvm::GlobalVariable>(Object)) {
- Object = llvm::ConstantExpr::getBitCast(
- Var, ConvertTypeForMem(E->getType())->getPointerTo());
+ Address Object = createReferenceTemporary(*this, M, E);
+ if (auto *Var = dyn_cast<llvm::GlobalVariable>(Object.getPointer())) {
+ Object = Address(llvm::ConstantExpr::getBitCast(
+ Var, ConvertTypeForMem(E->getType())->getPointerTo()),
+ Object.getAlignment());
// If the temporary is a global and has a constant initializer or is a
// constant temporary that we promoted to a global, we may have already
// initialized it.
@@ -420,7 +439,8 @@ EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *M) {
break;
case SubobjectAdjustment::FieldAdjustment: {
- LValue LV = MakeAddrLValue(Object, E->getType());
+ LValue LV = MakeAddrLValue(Object, E->getType(),
+ AlignmentSource::Decl);
LV = EmitLValueForField(LV, Adjustment.Field);
assert(LV.isSimple() &&
"materialized temporary field is not a simple lvalue");
@@ -430,14 +450,14 @@ EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *M) {
case SubobjectAdjustment::MemberPointerAdjustment: {
llvm::Value *Ptr = EmitScalarExpr(Adjustment.Ptr.RHS);
- Object = CGM.getCXXABI().EmitMemberDataPointerAddress(
- *this, E, Object, Ptr, Adjustment.Ptr.MPT);
+ Object = EmitCXXMemberDataPointerAddress(E, Object, Ptr,
+ Adjustment.Ptr.MPT);
break;
}
}
}
- return MakeAddrLValue(Object, M->getType());
+ return MakeAddrLValue(Object, M->getType(), AlignmentSource::Decl);
}
RValue
@@ -445,7 +465,7 @@ CodeGenFunction::EmitReferenceBindingToExpr(const Expr *E) {
// Emit the expression as an lvalue.
LValue LV = EmitLValue(E);
assert(LV.isSimple());
- llvm::Value *Value = LV.getAddress();
+ llvm::Value *Value = LV.getPointer();
if (sanitizePerformTypeCheck() && !E->getType()->isFunctionType()) {
// C++11 [dcl.ref]p5 (as amended by core issue 453):
@@ -489,7 +509,7 @@ bool CodeGenFunction::sanitizePerformTypeCheck() const {
}
void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc,
- llvm::Value *Address, QualType Ty,
+ llvm::Value *Ptr, QualType Ty,
CharUnits Alignment, bool SkipNullCheck) {
if (!sanitizePerformTypeCheck())
return;
@@ -497,7 +517,7 @@ void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc,
// Don't check pointers outside the default address space. The null check
// isn't correct, the object-size check isn't supported by LLVM, and we can't
// communicate the addresses to the runtime handler for the vptr check.
- if (Address->getType()->getPointerAddressSpace())
+ if (Ptr->getType()->getPointerAddressSpace())
return;
SanitizerScope SanScope(this);
@@ -510,8 +530,7 @@ void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc,
if ((SanOpts.has(SanitizerKind::Null) || AllowNullPointers) &&
!SkipNullCheck) {
// The glvalue must not be an empty glvalue.
- llvm::Value *IsNonNull = Builder.CreateICmpNE(
- Address, llvm::Constant::getNullValue(Address->getType()));
+ llvm::Value *IsNonNull = Builder.CreateIsNotNull(Ptr);
if (AllowNullPointers) {
// When performing pointer casts, it's OK if the value is null.
@@ -535,7 +554,7 @@ void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc,
llvm::Type *Tys[2] = { IntPtrTy, Int8PtrTy };
llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::objectsize, Tys);
llvm::Value *Min = Builder.getFalse();
- llvm::Value *CastAddr = Builder.CreateBitCast(Address, Int8PtrTy);
+ llvm::Value *CastAddr = Builder.CreateBitCast(Ptr, Int8PtrTy);
llvm::Value *LargeEnough =
Builder.CreateICmpUGE(Builder.CreateCall(F, {CastAddr, Min}),
llvm::ConstantInt::get(IntPtrTy, Size));
@@ -552,7 +571,7 @@ void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc,
// The glvalue must be suitably aligned.
if (AlignVal) {
llvm::Value *Align =
- Builder.CreateAnd(Builder.CreatePtrToInt(Address, IntPtrTy),
+ Builder.CreateAnd(Builder.CreatePtrToInt(Ptr, IntPtrTy),
llvm::ConstantInt::get(IntPtrTy, AlignVal - 1));
llvm::Value *Aligned =
Builder.CreateICmpEQ(Align, llvm::ConstantInt::get(IntPtrTy, 0));
@@ -567,7 +586,7 @@ void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc,
llvm::ConstantInt::get(SizeTy, AlignVal),
llvm::ConstantInt::get(Int8Ty, TCK)
};
- EmitCheck(Checks, "type_mismatch", StaticData, Address);
+ EmitCheck(Checks, "type_mismatch", StaticData, Ptr);
}
// If possible, check that the vptr indicates that there is a subobject of
@@ -602,7 +621,7 @@ void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc,
// Load the vptr, and compute hash_16_bytes(TypeHash, vptr).
llvm::Value *Low = llvm::ConstantInt::get(Int64Ty, TypeHash);
llvm::Type *VPtrTy = llvm::PointerType::get(IntPtrTy, 0);
- llvm::Value *VPtrAddr = Builder.CreateBitCast(Address, VPtrTy);
+ Address VPtrAddr(Builder.CreateBitCast(Ptr, VPtrTy), getPointerAlign());
llvm::Value *VPtrVal = Builder.CreateLoad(VPtrAddr);
llvm::Value *High = Builder.CreateZExt(VPtrVal, Int64Ty);
@@ -619,7 +638,8 @@ void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc,
CacheSize-1));
llvm::Value *Indices[] = { Builder.getInt32(0), Slot };
llvm::Value *CacheVal =
- Builder.CreateLoad(Builder.CreateInBoundsGEP(Cache, Indices));
+ Builder.CreateAlignedLoad(Builder.CreateInBoundsGEP(Cache, Indices),
+ getPointerAlign());
// If the hash isn't in the cache, call a runtime handler to perform the
// hard work of checking whether the vptr is for an object of the right
@@ -632,7 +652,7 @@ void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc,
CGM.GetAddrOfRTTIDescriptor(Ty.getUnqualifiedType()),
llvm::ConstantInt::get(Int8Ty, TCK)
};
- llvm::Value *DynamicData[] = { Address, Hash };
+ llvm::Value *DynamicData[] = { Ptr, Hash };
EmitCheck(std::make_pair(EqualHash, SanitizerKind::Vptr),
"dynamic_type_cache_miss", StaticData, DynamicData);
}
@@ -764,6 +784,84 @@ EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV,
// LValue Expression Emission
//===----------------------------------------------------------------------===//
+/// EmitPointerWithAlignment - Given an expression of pointer type, try to
+/// derive a more accurate bound on the alignment of the pointer.
+Address CodeGenFunction::EmitPointerWithAlignment(const Expr *E,
+ AlignmentSource *Source) {
+ // We allow this with ObjC object pointers because of fragile ABIs.
+ assert(E->getType()->isPointerType() ||
+ E->getType()->isObjCObjectPointerType());
+ E = E->IgnoreParens();
+
+ // Casts:
+ if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
+ // Bind VLAs in the cast type.
+ if (E->getType()->isVariablyModifiedType())
+ EmitVariablyModifiedType(E->getType());
+
+ switch (CE->getCastKind()) {
+ // Non-converting casts (but not C's implicit conversion from void*).
+ case CK_BitCast:
+ case CK_NoOp:
+ if (auto PtrTy = CE->getSubExpr()->getType()->getAs<PointerType>()) {
+ if (PtrTy->getPointeeType()->isVoidType())
+ break;
+
+ AlignmentSource InnerSource;
+ Address Addr = EmitPointerWithAlignment(CE->getSubExpr(), &InnerSource);
+ if (Source) *Source = InnerSource;
+
+ // If this is an explicit bitcast, and the source l-value is
+ // opaque, honor the alignment of the casted-to type.
+ if (isa<ExplicitCastExpr>(CE) &&
+ CE->getCastKind() == CK_BitCast &&
+ InnerSource != AlignmentSource::Decl) {
+ Addr = Address(Addr.getPointer(),
+ getNaturalPointeeTypeAlignment(E->getType(), Source));
+ }
+
+ return Builder.CreateBitCast(Addr, ConvertType(E->getType()));
+ }
+ break;
+
+ // Array-to-pointer decay.
+ case CK_ArrayToPointerDecay:
+ return EmitArrayToPointerDecay(CE->getSubExpr(), Source);
+
+ // Derived-to-base conversions.
+ case CK_UncheckedDerivedToBase:
+ case CK_DerivedToBase: {
+ Address Addr = EmitPointerWithAlignment(CE->getSubExpr(), Source);
+ auto Derived = CE->getSubExpr()->getType()->getPointeeCXXRecordDecl();
+ return GetAddressOfBaseClass(Addr, Derived,
+ CE->path_begin(), CE->path_end(),
+ ShouldNullCheckClassCastValue(CE),
+ CE->getExprLoc());
+ }
+
+ // TODO: Is there any reason to treat base-to-derived conversions
+ // specially?
+ default:
+ break;
+ }
+ }
+
+ // Unary &.
+ if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) {
+ if (UO->getOpcode() == UO_AddrOf) {
+ LValue LV = EmitLValue(UO->getSubExpr());
+ if (Source) *Source = LV.getAlignmentSource();
+ return LV.getAddress();
+ }
+ }
+
+ // TODO: conditional operators, comma.
+
+ // Otherwise, use the alignment of the type.
+ CharUnits Align = getNaturalPointeeTypeAlignment(E->getType(), Source);
+ return Address(EmitScalarExpr(E), Align);
+}
+
RValue CodeGenFunction::GetUndefRValue(QualType Ty) {
if (Ty->isVoidType())
return RValue::get(nullptr);
@@ -780,7 +878,7 @@ RValue CodeGenFunction::GetUndefRValue(QualType Ty) {
// identifiable address. Just because the contents of the value are undefined
// doesn't mean that the address can't be taken and compared.
case TEK_Aggregate: {
- llvm::Value *DestPtr = CreateMemTemp(Ty, "undef.agg.tmp");
+ Address DestPtr = CreateMemTemp(Ty, "undef.agg.tmp");
return RValue::getAggregate(DestPtr);
}
@@ -800,7 +898,8 @@ LValue CodeGenFunction::EmitUnsupportedLValue(const Expr *E,
const char *Name) {
ErrorUnsupported(E, Name);
llvm::Type *Ty = llvm::PointerType::getUnqual(ConvertType(E->getType()));
- return MakeAddrLValue(llvm::UndefValue::get(Ty), E->getType());
+ return MakeAddrLValue(Address(llvm::UndefValue::get(Ty), CharUnits::One()),
+ E->getType());
}
LValue CodeGenFunction::EmitCheckedLValue(const Expr *E, TypeCheckKind TCK) {
@@ -810,7 +909,7 @@ LValue CodeGenFunction::EmitCheckedLValue(const Expr *E, TypeCheckKind TCK) {
else
LV = EmitLValue(E);
if (!isa<DeclRefExpr>(E) && !LV.isBitField() && LV.isSimple())
- EmitTypeCheck(TCK, E->getExprLoc(), LV.getAddress(),
+ EmitTypeCheck(TCK, E->getExprLoc(), LV.getPointer(),
E->getType(), LV.getAlignment());
return LV;
}
@@ -1059,8 +1158,8 @@ CodeGenFunction::tryEmitAsConstant(DeclRefExpr *refExpr) {
llvm::Value *CodeGenFunction::EmitLoadOfScalar(LValue lvalue,
SourceLocation Loc) {
return EmitLoadOfScalar(lvalue.getAddress(), lvalue.isVolatile(),
- lvalue.getAlignment().getQuantity(),
- lvalue.getType(), Loc, lvalue.getTBAAInfo(),
+ lvalue.getType(), Loc, lvalue.getAlignmentSource(),
+ lvalue.getTBAAInfo(),
lvalue.getTBAABaseType(), lvalue.getTBAAOffset());
}
@@ -1121,37 +1220,31 @@ llvm::MDNode *CodeGenFunction::getRangeForLoadFromType(QualType Ty) {
return MDHelper.createRange(Min, End);
}
-llvm::Value *CodeGenFunction::EmitLoadOfScalar(llvm::Value *Addr, bool Volatile,
- unsigned Alignment, QualType Ty,
+llvm::Value *CodeGenFunction::EmitLoadOfScalar(Address Addr, bool Volatile,
+ QualType Ty,
SourceLocation Loc,
+ AlignmentSource AlignSource,
llvm::MDNode *TBAAInfo,
QualType TBAABaseType,
uint64_t TBAAOffset) {
// For better performance, handle vector loads differently.
if (Ty->isVectorType()) {
- llvm::Value *V;
- const llvm::Type *EltTy =
- cast<llvm::PointerType>(Addr->getType())->getElementType();
+ const llvm::Type *EltTy = Addr.getElementType();
const auto *VTy = cast<llvm::VectorType>(EltTy);
- // Handle vectors of size 3, like size 4 for better performance.
+ // Handle vectors of size 3 like size 4 for better performance.
if (VTy->getNumElements() == 3) {
// Bitcast to vec4 type.
llvm::VectorType *vec4Ty = llvm::VectorType::get(VTy->getElementType(),
4);
- llvm::PointerType *ptVec4Ty =
- llvm::PointerType::get(vec4Ty,
- (cast<llvm::PointerType>(
- Addr->getType()))->getAddressSpace());
- llvm::Value *Cast = Builder.CreateBitCast(Addr, ptVec4Ty,
- "castToVec4");
+ Address Cast = Builder.CreateElementBitCast(Addr, vec4Ty, "castToVec4");
// Now load value.
- llvm::Value *LoadVal = Builder.CreateLoad(Cast, Volatile, "loadVec4");
+ llvm::Value *V = Builder.CreateLoad(Cast, Volatile, "loadVec4");
// Shuffle vector to get vec3.
- V = Builder.CreateShuffleVector(LoadVal, llvm::UndefValue::get(vec4Ty),
+ V = Builder.CreateShuffleVector(V, llvm::UndefValue::get(vec4Ty),
{0, 1, 2}, "extractVec");
return EmitFromMemory(V, Ty);
}
@@ -1159,17 +1252,12 @@ llvm::Value *CodeGenFunction::EmitLoadOfScalar(llvm::Value *Addr, bool Volatile,
// Atomic operations have to be done on integral types.
if (Ty->isAtomicType() || typeIsSuitableForInlineAtomic(Ty, Volatile)) {
- LValue lvalue = LValue::MakeAddr(Addr, Ty,
- CharUnits::fromQuantity(Alignment),
- getContext(), TBAAInfo);
+ LValue lvalue =
+ LValue::MakeAddr(Addr, Ty, getContext(), AlignSource, TBAAInfo);
return EmitAtomicLoad(lvalue, Loc).getScalarVal();
}
- llvm::LoadInst *Load = Builder.CreateLoad(Addr);
- if (Volatile)
- Load->setVolatile(true);
- if (Alignment)
- Load->setAlignment(Alignment);
+ llvm::LoadInst *Load = Builder.CreateLoad(Addr, Volatile);
if (TBAAInfo) {
llvm::MDNode *TBAAPath = CGM.getTBAAStructTagInfo(TBAABaseType, TBAAInfo,
TBAAOffset);
@@ -1237,9 +1325,10 @@ llvm::Value *CodeGenFunction::EmitFromMemory(llvm::Value *Value, QualType Ty) {
return Value;
}
-void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr,
- bool Volatile, unsigned Alignment,
- QualType Ty, llvm::MDNode *TBAAInfo,
+void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, Address Addr,
+ bool Volatile, QualType Ty,
+ AlignmentSource AlignSource,
+ llvm::MDNode *TBAAInfo,
bool isInit, QualType TBAABaseType,
uint64_t TBAAOffset) {
@@ -1259,11 +1348,8 @@ void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr,
MaskV, "extractVec");
SrcTy = llvm::VectorType::get(VecTy->getElementType(), 4);
}
- auto *DstPtr = cast<llvm::PointerType>(Addr->getType());
- if (DstPtr->getElementType() != SrcTy) {
- llvm::Type *MemTy =
- llvm::PointerType::get(SrcTy, DstPtr->getAddressSpace());
- Addr = Builder.CreateBitCast(Addr, MemTy, "storetmp");
+ if (Addr.getElementType() != SrcTy) {
+ Addr = Builder.CreateElementBitCast(Addr, SrcTy, "storetmp");
}
}
@@ -1272,16 +1358,13 @@ void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr,
if (Ty->isAtomicType() ||
(!isInit && typeIsSuitableForInlineAtomic(Ty, Volatile))) {
EmitAtomicStore(RValue::get(Value),
- LValue::MakeAddr(Addr, Ty,
- CharUnits::fromQuantity(Alignment),
- getContext(), TBAAInfo),
+ LValue::MakeAddr(Addr, Ty, getContext(),
+ AlignSource, TBAAInfo),
isInit);
return;
}
llvm::StoreInst *Store = Builder.CreateStore(Value, Addr, Volatile);
- if (Alignment)
- Store->setAlignment(Alignment);
if (TBAAInfo) {
llvm::MDNode *TBAAPath = CGM.getTBAAStructTagInfo(TBAABaseType, TBAAInfo,
TBAAOffset);
@@ -1293,7 +1376,7 @@ void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr,
void CodeGenFunction::EmitStoreOfScalar(llvm::Value *value, LValue lvalue,
bool isInit) {
EmitStoreOfScalar(value, lvalue.getAddress(), lvalue.isVolatile(),
- lvalue.getAlignment().getQuantity(), lvalue.getType(),
+ lvalue.getType(), lvalue.getAlignmentSource(),
lvalue.getTBAAInfo(), isInit, lvalue.getTBAABaseType(),
lvalue.getTBAAOffset());
}
@@ -1304,7 +1387,7 @@ void CodeGenFunction::EmitStoreOfScalar(llvm::Value *value, LValue lvalue,
RValue CodeGenFunction::EmitLoadOfLValue(LValue LV, SourceLocation Loc) {
if (LV.isObjCWeak()) {
// load of a __weak object.
- llvm::Value *AddrWeakObj = LV.getAddress();
+ Address AddrWeakObj = LV.getAddress();
return RValue::get(CGM.getObjCRuntime().EmitObjCWeakRead(*this,
AddrWeakObj));
}
@@ -1322,9 +1405,8 @@ RValue CodeGenFunction::EmitLoadOfLValue(LValue LV, SourceLocation Loc) {
}
if (LV.isVectorElt()) {
- llvm::LoadInst *Load = Builder.CreateLoad(LV.getVectorAddr(),
+ llvm::LoadInst *Load = Builder.CreateLoad(LV.getVectorAddress(),
LV.isVolatileQualified());
- Load->setAlignment(LV.getAlignment().getQuantity());
return RValue::get(Builder.CreateExtractElement(Load, LV.getVectorIdx(),
"vecext"));
}
@@ -1344,15 +1426,12 @@ RValue CodeGenFunction::EmitLoadOfLValue(LValue LV, SourceLocation Loc) {
RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV) {
const CGBitFieldInfo &Info = LV.getBitFieldInfo();
- CharUnits Align = LV.getAlignment().alignmentAtOffset(Info.StorageOffset);
// Get the output type.
llvm::Type *ResLTy = ConvertType(LV.getType());
- llvm::Value *Ptr = LV.getBitFieldAddr();
- llvm::Value *Val = Builder.CreateAlignedLoad(Ptr, Align.getQuantity(),
- LV.isVolatileQualified(),
- "bf.load");
+ Address Ptr = LV.getBitFieldAddress();
+ llvm::Value *Val = Builder.CreateLoad(Ptr, LV.isVolatileQualified(), "bf.load");
if (Info.IsSigned) {
assert(static_cast<unsigned>(Info.Offset + Info.Size) <= Info.StorageSize);
@@ -1377,10 +1456,8 @@ RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV) {
// If this is a reference to a subset of the elements of a vector, create an
// appropriate shufflevector.
RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV) {
- llvm::LoadInst *Load = Builder.CreateLoad(LV.getExtVectorAddr(),
- LV.isVolatileQualified());
- Load->setAlignment(LV.getAlignment().getQuantity());
- llvm::Value *Vec = Load;
+ llvm::Value *Vec = Builder.CreateLoad(LV.getExtVectorAddress(),
+ LV.isVolatileQualified());
const llvm::Constant *Elts = LV.getExtVectorElts();
@@ -1407,24 +1484,24 @@ RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV) {
}
/// @brief Generates lvalue for partial ext_vector access.
-llvm::Value *CodeGenFunction::EmitExtVectorElementLValue(LValue LV) {
- llvm::Value *VectorAddress = LV.getExtVectorAddr();
+Address CodeGenFunction::EmitExtVectorElementLValue(LValue LV) {
+ Address VectorAddress = LV.getExtVectorAddress();
const VectorType *ExprVT = LV.getType()->getAs<VectorType>();
QualType EQT = ExprVT->getElementType();
llvm::Type *VectorElementTy = CGM.getTypes().ConvertType(EQT);
- llvm::Type *VectorElementPtrToTy = VectorElementTy->getPointerTo();
- llvm::Value *CastToPointerElement =
- Builder.CreateBitCast(VectorAddress,
- VectorElementPtrToTy, "conv.ptr.element");
+ Address CastToPointerElement =
+ Builder.CreateElementBitCast(VectorAddress, VectorElementTy,
+ "conv.ptr.element");
const llvm::Constant *Elts = LV.getExtVectorElts();
unsigned ix = getAccessedFieldNo(0, Elts);
- llvm::Value *VectorBasePtrPlusIx =
- Builder.CreateInBoundsGEP(CastToPointerElement,
- llvm::ConstantInt::get(SizeTy, ix), "add.ptr");
-
+ Address VectorBasePtrPlusIx =
+ Builder.CreateConstInBoundsGEP(CastToPointerElement, ix,
+ getContext().getTypeSizeInChars(EQT),
+ "vector.elt");
+
return VectorBasePtrPlusIx;
}
@@ -1459,15 +1536,12 @@ void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst,
if (!Dst.isSimple()) {
if (Dst.isVectorElt()) {
// Read/modify/write the vector, inserting the new element.
- llvm::LoadInst *Load = Builder.CreateLoad(Dst.getVectorAddr(),
- Dst.isVolatileQualified());
- Load->setAlignment(Dst.getAlignment().getQuantity());
- llvm::Value *Vec = Load;
+ llvm::Value *Vec = Builder.CreateLoad(Dst.getVectorAddress(),
+ Dst.isVolatileQualified());
Vec = Builder.CreateInsertElement(Vec, Src.getScalarVal(),
Dst.getVectorIdx(), "vecins");
- llvm::StoreInst *Store = Builder.CreateStore(Vec, Dst.getVectorAddr(),
- Dst.isVolatileQualified());
- Store->setAlignment(Dst.getAlignment().getQuantity());
+ Builder.CreateStore(Vec, Dst.getVectorAddress(),
+ Dst.isVolatileQualified());
return;
}
@@ -1511,7 +1585,7 @@ void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst,
if (Dst.isObjCWeak() && !Dst.isNonGC()) {
// load of a __weak object.
- llvm::Value *LvalueDst = Dst.getAddress();
+ Address LvalueDst = Dst.getAddress();
llvm::Value *src = Src.getScalarVal();
CGM.getObjCRuntime().EmitObjCWeakAssign(*this, src, LvalueDst);
return;
@@ -1519,16 +1593,17 @@ void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst,
if (Dst.isObjCStrong() && !Dst.isNonGC()) {
// load of a __strong object.
- llvm::Value *LvalueDst = Dst.getAddress();
+ Address LvalueDst = Dst.getAddress();
llvm::Value *src = Src.getScalarVal();
if (Dst.isObjCIvar()) {
assert(Dst.getBaseIvarExp() && "BaseIvarExp is NULL");
- llvm::Type *ResultType = ConvertType(getContext().LongTy);
- llvm::Value *RHS = EmitScalarExpr(Dst.getBaseIvarExp());
- llvm::Value *dst = RHS;
+ llvm::Type *ResultType = IntPtrTy;
+ Address dst = EmitPointerWithAlignment(Dst.getBaseIvarExp());
+ llvm::Value *RHS = dst.getPointer();
RHS = Builder.CreatePtrToInt(RHS, ResultType, "sub.ptr.rhs.cast");
llvm::Value *LHS =
- Builder.CreatePtrToInt(LvalueDst, ResultType, "sub.ptr.lhs.cast");
+ Builder.CreatePtrToInt(LvalueDst.getPointer(), ResultType,
+ "sub.ptr.lhs.cast");
llvm::Value *BytesBetween = Builder.CreateSub(LHS, RHS, "ivar.offset");
CGM.getObjCRuntime().EmitObjCIvarAssign(*this, src, dst,
BytesBetween);
@@ -1548,16 +1623,14 @@ void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst,
void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
llvm::Value **Result) {
const CGBitFieldInfo &Info = Dst.getBitFieldInfo();
- CharUnits Align = Dst.getAlignment().alignmentAtOffset(Info.StorageOffset);
llvm::Type *ResLTy = ConvertTypeForMem(Dst.getType());
- llvm::Value *Ptr = Dst.getBitFieldAddr();
+ Address Ptr = Dst.getBitFieldAddress();
// Get the source value, truncated to the width of the bit-field.
llvm::Value *SrcVal = Src.getScalarVal();
// Cast the source to the storage type and shift it into place.
- SrcVal = Builder.CreateIntCast(SrcVal,
- Ptr->getType()->getPointerElementType(),
+ SrcVal = Builder.CreateIntCast(SrcVal, Ptr.getElementType(),
/*IsSigned=*/false);
llvm::Value *MaskedVal = SrcVal;
@@ -1565,9 +1638,8 @@ void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
// and mask together with source before storing.
if (Info.StorageSize != Info.Size) {
assert(Info.StorageSize > Info.Size && "Invalid bitfield size.");
- llvm::Value *Val = Builder.CreateAlignedLoad(Ptr, Align.getQuantity(),
- Dst.isVolatileQualified(),
- "bf.load");
+ llvm::Value *Val =
+ Builder.CreateLoad(Ptr, Dst.isVolatileQualified(), "bf.load");
// Mask the source value as needed.
if (!hasBooleanRepresentation(Dst.getType()))
@@ -1593,8 +1665,7 @@ void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
}
// Write the new value back out.
- Builder.CreateAlignedStore(SrcVal, Ptr, Align.getQuantity(),
- Dst.isVolatileQualified());
+ Builder.CreateStore(SrcVal, Ptr, Dst.isVolatileQualified());
// Return the new value of the bit-field, if requested.
if (Result) {
@@ -1620,10 +1691,8 @@ void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src,
LValue Dst) {
// This access turns into a read/modify/write of the vector. Load the input
// value now.
- llvm::LoadInst *Load = Builder.CreateLoad(Dst.getExtVectorAddr(),
- Dst.isVolatileQualified());
- Load->setAlignment(Dst.getAlignment().getQuantity());
- llvm::Value *Vec = Load;
+ llvm::Value *Vec = Builder.CreateLoad(Dst.getExtVectorAddress(),
+ Dst.isVolatileQualified());
const llvm::Constant *Elts = Dst.getExtVectorElts();
llvm::Value *SrcVal = Src.getScalarVal();
@@ -1685,9 +1754,8 @@ void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src,
Vec = Builder.CreateInsertElement(Vec, SrcVal, Elt);
}
- llvm::StoreInst *Store = Builder.CreateStore(Vec, Dst.getExtVectorAddr(),
- Dst.isVolatileQualified());
- Store->setAlignment(Dst.getAlignment().getQuantity());
+ Builder.CreateStore(Vec, Dst.getExtVectorAddress(),
+ Dst.isVolatileQualified());
}
/// @brief Store of global named registers are always calls to intrinsics.
@@ -1822,11 +1890,27 @@ EmitBitCastOfLValueToProperType(CodeGenFunction &CGF,
}
static LValue EmitThreadPrivateVarDeclLValue(
- CodeGenFunction &CGF, const VarDecl *VD, QualType T, llvm::Value *V,
- llvm::Type *RealVarTy, CharUnits Alignment, SourceLocation Loc) {
- V = CGF.CGM.getOpenMPRuntime().getAddrOfThreadPrivate(CGF, VD, V, Loc);
- V = EmitBitCastOfLValueToProperType(CGF, V, RealVarTy);
- return CGF.MakeAddrLValue(V, T, Alignment);
+ CodeGenFunction &CGF, const VarDecl *VD, QualType T, Address Addr,
+ llvm::Type *RealVarTy, SourceLocation Loc) {
+ Addr = CGF.CGM.getOpenMPRuntime().getAddrOfThreadPrivate(CGF, VD, Addr, Loc);
+ Addr = CGF.Builder.CreateElementBitCast(Addr, RealVarTy);
+ return CGF.MakeAddrLValue(Addr, T, AlignmentSource::Decl);
+}
+
+Address CodeGenFunction::EmitLoadOfReference(Address Addr,
+ const ReferenceType *RefTy,
+ AlignmentSource *Source) {
+ llvm::Value *Ptr = Builder.CreateLoad(Addr);
+ return Address(Ptr, getNaturalTypeAlignment(RefTy->getPointeeType(),
+ Source, /*forPointee*/ true));
+
+}
+
+LValue CodeGenFunction::EmitLoadOfReferenceLValue(Address RefAddr,
+ const ReferenceType *RefTy) {
+ AlignmentSource Source;
+ Address Addr = EmitLoadOfReference(RefAddr, RefTy, &Source);
+ return MakeAddrLValue(Addr, RefTy->getPointeeType(), Source);
}
static LValue EmitGlobalVarDeclLValue(CodeGenFunction &CGF,
@@ -1842,19 +1926,17 @@ static LValue EmitGlobalVarDeclLValue(CodeGenFunction &CGF,
llvm::Type *RealVarTy = CGF.getTypes().ConvertTypeForMem(VD->getType());
V = EmitBitCastOfLValueToProperType(CGF, V, RealVarTy);
CharUnits Alignment = CGF.getContext().getDeclAlign(VD);
+ Address Addr(V, Alignment);
LValue LV;
// Emit reference to the private copy of the variable if it is an OpenMP
// threadprivate variable.
if (CGF.getLangOpts().OpenMP && VD->hasAttr<OMPThreadPrivateDeclAttr>())
- return EmitThreadPrivateVarDeclLValue(CGF, VD, T, V, RealVarTy, Alignment,
+ return EmitThreadPrivateVarDeclLValue(CGF, VD, T, Addr, RealVarTy,
E->getExprLoc());
- if (VD->getType()->isReferenceType()) {
- llvm::LoadInst *LI = CGF.Builder.CreateLoad(V);
- LI->setAlignment(Alignment.getQuantity());
- V = LI;
- LV = CGF.MakeNaturalAlignAddrLValue(V, T);
+ if (auto RefTy = VD->getType()->getAs<ReferenceType>()) {
+ LV = CGF.EmitLoadOfReferenceLValue(Addr, RefTy);
} else {
- LV = CGF.MakeAddrLValue(V, T, Alignment);
+ LV = CGF.MakeAddrLValue(Addr, T, AlignmentSource::Decl);
}
setObjCGCLValueClass(CGF.getContext(), E, LV);
return LV;
@@ -1876,7 +1958,7 @@ static LValue EmitFunctionDeclLValue(CodeGenFunction &CGF,
}
}
CharUnits Alignment = CGF.getContext().getDeclAlign(FD);
- return CGF.MakeAddrLValue(V, E->getType(), Alignment);
+ return CGF.MakeAddrLValue(V, E->getType(), Alignment, AlignmentSource::Decl);
}
static LValue EmitCapturedFieldLValue(CodeGenFunction &CGF, const FieldDecl *FD,
@@ -1892,9 +1974,7 @@ static LValue EmitCapturedFieldLValue(CodeGenFunction &CGF, const FieldDecl *FD,
/// So far, only the name is being passed down, but other options such as
/// register type, allocation type or even optimization options could be
/// passed down via the metadata node.
-static LValue EmitGlobalNamedRegister(const VarDecl *VD,
- CodeGenModule &CGM,
- CharUnits Alignment) {
+static LValue EmitGlobalNamedRegister(const VarDecl *VD, CodeGenModule &CGM) {
SmallString<64> Name("llvm.named.register.");
AsmLabelAttr *Asm = VD->getAttr<AsmLabelAttr>();
assert(Asm->getLabel().size() < 64-Name.size() &&
@@ -1908,21 +1988,23 @@ static LValue EmitGlobalNamedRegister(const VarDecl *VD,
llvm::Metadata *Ops[] = {Str};
M->addOperand(llvm::MDNode::get(CGM.getLLVMContext(), Ops));
}
- return LValue::MakeGlobalReg(
- llvm::MetadataAsValue::get(CGM.getLLVMContext(), M->getOperand(0)),
- VD->getType(), Alignment);
+
+ CharUnits Alignment = CGM.getContext().getDeclAlign(VD);
+
+ llvm::Value *Ptr =
+ llvm::MetadataAsValue::get(CGM.getLLVMContext(), M->getOperand(0));
+ return LValue::MakeGlobalReg(Address(Ptr, Alignment), VD->getType());
}
LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
const NamedDecl *ND = E->getDecl();
- CharUnits Alignment = getContext().getDeclAlign(ND);
QualType T = E->getType();
if (const auto *VD = dyn_cast<VarDecl>(ND)) {
// Global Named registers access via intrinsics only
if (VD->getStorageClass() == SC_Register &&
VD->hasAttr<AsmLabelAttr>() && !VD->isLocalVarDecl())
- return EmitGlobalNamedRegister(VD, CGM, Alignment);
+ return EmitGlobalNamedRegister(VD, CGM);
// A DeclRefExpr for a reference initialized by a constant expression can
// appear without being odr-used. Directly emit the constant initializer.
@@ -1934,7 +2016,12 @@ LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
CGM.EmitConstantValue(*VD->evaluateValue(), VD->getType(), this);
assert(Val && "failed to emit reference constant expression");
// FIXME: Eventually we will want to emit vector element references.
- return MakeAddrLValue(Val, T, Alignment);
+
+ // Should we be using the alignment of the constant pointer we emitted?
+ CharUnits Alignment = getNaturalTypeAlignment(E->getType(), nullptr,
+ /*pointee*/ true);
+
+ return MakeAddrLValue(Address(Val, Alignment), T, AlignmentSource::Decl);
}
// Check for captured variables.
@@ -1942,21 +2029,20 @@ LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
if (auto *FD = LambdaCaptureFields.lookup(VD))
return EmitCapturedFieldLValue(*this, FD, CXXABIThisValue);
else if (CapturedStmtInfo) {
- if (auto *V = LocalDeclMap.lookup(VD)) {
- if (VD->getType()->isReferenceType()) {
- llvm::LoadInst *LI = Builder.CreateLoad(V);
- LI->setAlignment(Alignment.getQuantity());
- V = LI;
- return MakeNaturalAlignAddrLValue(V, T);
+ auto it = LocalDeclMap.find(VD);
+ if (it != LocalDeclMap.end()) {
+ if (auto RefTy = VD->getType()->getAs<ReferenceType>()) {
+ return EmitLoadOfReferenceLValue(it->second, RefTy);
}
- return MakeAddrLValue(V, T, Alignment);
+ return MakeAddrLValue(it->second, T);
}
return EmitCapturedFieldLValue(*this, CapturedStmtInfo->lookup(VD),
CapturedStmtInfo->getContextValue());
}
+
assert(isa<BlockDecl>(CurCodeDecl));
- return MakeAddrLValue(GetAddrOfBlockDecl(VD, VD->hasAttr<BlocksAttr>()),
- T, Alignment);
+ Address addr = GetAddrOfBlockDecl(VD, VD->hasAttr<BlocksAttr>());
+ return MakeAddrLValue(addr, T, AlignmentSource::Decl);
}
}
@@ -1969,8 +2055,8 @@ LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
if (ND->hasAttr<WeakRefAttr>()) {
const auto *VD = cast<ValueDecl>(ND);
- llvm::Constant *Aliasee = CGM.GetWeakRefReference(VD);
- return MakeAddrLValue(Aliasee, T, Alignment);
+ ConstantAddress Aliasee = CGM.GetWeakRefReference(VD);
+ return MakeAddrLValue(Aliasee, T, AlignmentSource::Decl);
}
if (const auto *VD = dyn_cast<VarDecl>(ND)) {
@@ -1978,39 +2064,52 @@ LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
if (VD->hasLinkage() || VD->isStaticDataMember())
return EmitGlobalVarDeclLValue(*this, E, VD);
- bool isBlockVariable = VD->hasAttr<BlocksAttr>();
+ Address addr = Address::invalid();
- llvm::Value *V = LocalDeclMap.lookup(VD);
- if (!V && VD->isStaticLocal())
- V = CGM.getOrCreateStaticVarDecl(
- *VD, CGM.getLLVMLinkageVarDefinition(VD, /*isConstant=*/false));
+ // The variable should generally be present in the local decl map.
+ auto iter = LocalDeclMap.find(VD);
+ if (iter != LocalDeclMap.end()) {
+ addr = iter->second;
- // Check if variable is threadprivate.
- if (V && getLangOpts().OpenMP && VD->hasAttr<OMPThreadPrivateDeclAttr>())
- return EmitThreadPrivateVarDeclLValue(
- *this, VD, T, V, getTypes().ConvertTypeForMem(VD->getType()),
- Alignment, E->getExprLoc());
+ // Otherwise, it might be static local we haven't emitted yet for
+ // some reason; most likely, because it's in an outer function.
+ } else if (VD->isStaticLocal()) {
+ addr = Address(CGM.getOrCreateStaticVarDecl(
+ *VD, CGM.getLLVMLinkageVarDefinition(VD, /*isConstant=*/false)),
+ getContext().getDeclAlign(VD));
+
+ // No other cases for now.
+ } else {
+ llvm_unreachable("DeclRefExpr for Decl not entered in LocalDeclMap?");
+ }
- assert(V && "DeclRefExpr not entered in LocalDeclMap?");
- if (isBlockVariable)
- V = BuildBlockByrefAddress(V, VD);
+ // Check for OpenMP threadprivate variables.
+ if (getLangOpts().OpenMP && VD->hasAttr<OMPThreadPrivateDeclAttr>()) {
+ return EmitThreadPrivateVarDeclLValue(
+ *this, VD, T, addr, getTypes().ConvertTypeForMem(VD->getType()),
+ E->getExprLoc());
+ }
+
+ // Drill into block byref variables.
+ bool isBlockByref = VD->hasAttr<BlocksAttr>();
+ if (isBlockByref) {
+ addr = emitBlockByrefAddress(addr, VD);
+ }
+ // Drill into reference types.
LValue LV;
- if (VD->getType()->isReferenceType()) {
- llvm::LoadInst *LI = Builder.CreateLoad(V);
- LI->setAlignment(Alignment.getQuantity());
- V = LI;
- LV = MakeNaturalAlignAddrLValue(V, T);
+ if (auto RefTy = VD->getType()->getAs<ReferenceType>()) {
+ LV = EmitLoadOfReferenceLValue(addr, RefTy);
} else {
- LV = MakeAddrLValue(V, T, Alignment);
+ LV = MakeAddrLValue(addr, T, AlignmentSource::Decl);
}
bool isLocalStorage = VD->hasLocalStorage();
bool NonGCable = isLocalStorage &&
!VD->getType()->isReferenceType() &&
- !isBlockVariable;
+ !isBlockByref;
if (NonGCable) {
LV.getQuals().removeObjCGCAttr();
LV.setNonGC(true);
@@ -2042,7 +2141,9 @@ LValue CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator *E) {
QualType T = E->getSubExpr()->getType()->getPointeeType();
assert(!T.isNull() && "CodeGenFunction::EmitUnaryOpLValue: Illegal type");
- LValue LV = MakeNaturalAlignAddrLValue(EmitScalarExpr(E->getSubExpr()), T);
+ AlignmentSource AlignSource;
+ Address Addr = EmitPointerWithAlignment(E->getSubExpr(), &AlignSource);
+ LValue LV = MakeAddrLValue(Addr, T, AlignSource);
LV.getQuals().setAddressSpace(ExprTy.getAddressSpace());
// We should not generate __weak write barrier on indirect reference
@@ -2059,22 +2160,22 @@ LValue CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator *E) {
case UO_Imag: {
LValue LV = EmitLValue(E->getSubExpr());
assert(LV.isSimple() && "real/imag on non-ordinary l-value");
- llvm::Value *Addr = LV.getAddress();
// __real is valid on scalars. This is a faster way of testing that.
// __imag can only produce an rvalue on scalars.
if (E->getOpcode() == UO_Real &&
- !cast<llvm::PointerType>(Addr->getType())
- ->getElementType()->isStructTy()) {
+ !LV.getAddress().getElementType()->isStructTy()) {
assert(E->getSubExpr()->getType()->isArithmeticType());
return LV;
}
assert(E->getSubExpr()->getType()->isAnyComplexType());
- unsigned Idx = E->getOpcode() == UO_Imag;
- return MakeAddrLValue(
- Builder.CreateStructGEP(nullptr, LV.getAddress(), Idx, "idx"), ExprTy);
+ Address Component =
+ (E->getOpcode() == UO_Real
+ ? emitAddrOfRealComponent(LV.getAddress(), LV.getType())
+ : emitAddrOfImagComponent(LV.getAddress(), LV.getType()));
+ return MakeAddrLValue(Component, ExprTy, LV.getAlignmentSource());
}
case UO_PreInc:
case UO_PreDec: {
@@ -2092,12 +2193,12 @@ LValue CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator *E) {
LValue CodeGenFunction::EmitStringLiteralLValue(const StringLiteral *E) {
return MakeAddrLValue(CGM.GetAddrOfConstantStringFromLiteral(E),
- E->getType());
+ E->getType(), AlignmentSource::Decl);
}
LValue CodeGenFunction::EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E) {
return MakeAddrLValue(CGM.GetAddrOfConstantStringFromObjCEncode(E),
- E->getType());
+ E->getType(), AlignmentSource::Decl);
}
LValue CodeGenFunction::EmitPredefinedLValue(const PredefinedExpr *E) {
@@ -2110,11 +2211,11 @@ LValue CodeGenFunction::EmitPredefinedLValue(const PredefinedExpr *E) {
PredefinedExpr::getIdentTypeName(E->getIdentType()), FnName};
std::string GVName = llvm::join(NameItems, NameItems + 2, ".");
if (CurCodeDecl && isa<BlockDecl>(CurCodeDecl)) {
- auto C = CGM.GetAddrOfConstantCString(FnName, GVName.c_str(), 1);
- return MakeAddrLValue(C, E->getType());
+ auto C = CGM.GetAddrOfConstantCString(FnName, GVName.c_str());
+ return MakeAddrLValue(C, E->getType(), AlignmentSource::Decl);
}
auto C = CGM.GetAddrOfConstantStringFromLiteral(SL, GVName);
- return MakeAddrLValue(C, E->getType());
+ return MakeAddrLValue(C, E->getType(), AlignmentSource::Decl);
}
/// Emit a type description suitable for use by a runtime sanitizer library. The
@@ -2188,9 +2289,9 @@ llvm::Value *CodeGenFunction::EmitCheckValue(llvm::Value *V) {
// Pointers are passed directly, everything else is passed by address.
if (!V->getType()->isPointerTy()) {
- llvm::Value *Ptr = CreateTempAlloca(V->getType());
+ Address Ptr = CreateDefaultAlignTempAlloca(V->getType());
Builder.CreateStore(V, Ptr);
- V = Ptr;
+ V = Ptr.getPointer();
}
return Builder.CreatePtrToInt(V, TargetTy);
}
@@ -2211,8 +2312,9 @@ llvm::Constant *CodeGenFunction::EmitCheckSourceLocation(SourceLocation Loc) {
PresumedLoc PLoc = getContext().getSourceManager().getPresumedLoc(Loc);
if (PLoc.isValid()) {
auto FilenameGV = CGM.GetAddrOfConstantCString(PLoc.getFilename(), ".src");
- CGM.getSanitizerMetadata()->disableSanitizerForGlobal(FilenameGV);
- Filename = FilenameGV;
+ CGM.getSanitizerMetadata()->disableSanitizerForGlobal(
+ cast<llvm::GlobalVariable>(FilenameGV.getPointer()));
+ Filename = FilenameGV.getPointer();
Line = PLoc.getLine();
Column = PLoc.getColumn();
} else {
@@ -2420,6 +2522,33 @@ llvm::CallInst *CodeGenFunction::EmitTrapCall(llvm::Intrinsic::ID IntrID) {
return TrapCall;
}
+Address CodeGenFunction::EmitArrayToPointerDecay(const Expr *E,
+ AlignmentSource *AlignSource) {
+ assert(E->getType()->isArrayType() &&
+ "Array to pointer decay must have array source type!");
+
+ // Expressions of array type can't be bitfields or vector elements.
+ LValue LV = EmitLValue(E);
+ Address Addr = LV.getAddress();
+ if (AlignSource) *AlignSource = LV.getAlignmentSource();
+
+ // If the array type was an incomplete type, we need to make sure
+ // the decay ends up being the right type.
+ llvm::Type *NewTy = ConvertType(E->getType());
+ Addr = Builder.CreateElementBitCast(Addr, NewTy);
+
+ // Note that VLA pointers are always decayed, so we don't need to do
+ // anything here.
+ if (!E->getType()->isVariableArrayType()) {
+ assert(isa<llvm::ArrayType>(Addr.getElementType()) &&
+ "Expected pointer to array");
+ Addr = Builder.CreateStructGEP(Addr, 0, CharUnits::Zero(), "arraydecay");
+ }
+
+ QualType EltType = E->getType()->castAsArrayTypeUnsafe()->getElementType();
+ return Builder.CreateElementBitCast(Addr, ConvertTypeForMem(EltType));
+}
+
/// isSimpleArrayDecayOperand - If the specified expr is a simple decay from an
/// array to pointer, return the array subexpression.
static const Expr *isSimpleArrayDecayOperand(const Expr *E) {
@@ -2436,6 +2565,69 @@ static const Expr *isSimpleArrayDecayOperand(const Expr *E) {
return SubExpr;
}
+static llvm::Value *emitArraySubscriptGEP(CodeGenFunction &CGF,
+ llvm::Value *ptr,
+ ArrayRef<llvm::Value*> indices,
+ bool inbounds,
+ const llvm::Twine &name = "arrayidx") {
+ if (inbounds) {
+ return CGF.Builder.CreateInBoundsGEP(ptr, indices, name);
+ } else {
+ return CGF.Builder.CreateGEP(ptr, indices, name);
+ }
+}
+
+static CharUnits getArrayElementAlign(CharUnits arrayAlign,
+ llvm::Value *idx,
+ CharUnits eltSize) {
+ // If we have a constant index, we can use the exact offset of the
+ // element we're accessing.
+ if (auto constantIdx = dyn_cast<llvm::ConstantInt>(idx)) {
+ CharUnits offset = constantIdx->getZExtValue() * eltSize;
+ return arrayAlign.alignmentAtOffset(offset);
+
+ // Otherwise, use the worst-case alignment for any element.
+ } else {
+ return arrayAlign.alignmentOfArrayElement(eltSize);
+ }
+}
+
+static QualType getFixedSizeElementType(const ASTContext &ctx,
+ const VariableArrayType *vla) {
+ QualType eltType;
+ do {
+ eltType = vla->getElementType();
+ } while ((vla = ctx.getAsVariableArrayType(eltType)));
+ return eltType;
+}
+
+static Address emitArraySubscriptGEP(CodeGenFunction &CGF, Address addr,
+ ArrayRef<llvm::Value*> indices,
+ QualType eltType, bool inbounds,
+ const llvm::Twine &name = "arrayidx") {
+ // All the indices except that last must be zero.
+#ifndef NDEBUG
+ for (auto idx : indices.drop_back())
+ assert(isa<llvm::ConstantInt>(idx) &&
+ cast<llvm::ConstantInt>(idx)->isZero());
+#endif
+
+ // Determine the element size of the statically-sized base. This is
+ // the thing that the indices are expressed in terms of.
+ if (auto vla = CGF.getContext().getAsVariableArrayType(eltType)) {
+ eltType = getFixedSizeElementType(CGF.getContext(), vla);
+ }
+
+ // We can use that to compute the best alignment of the element.
+ CharUnits eltSize = CGF.getContext().getTypeSizeInChars(eltType);
+ CharUnits eltAlign =
+ getArrayElementAlign(addr.getAlignment(), indices.back(), eltSize);
+
+ llvm::Value *eltPtr =
+ emitArraySubscriptGEP(CGF, addr.getPointer(), indices, inbounds, name);
+ return Address(eltPtr, eltAlign);
+}
+
LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E,
bool Accessed) {
// The index must always be an integer, which is not an aggregate. Emit it.
@@ -2454,32 +2646,34 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E,
LValue LHS = EmitLValue(E->getBase());
assert(LHS.isSimple() && "Can only subscript lvalue vectors here!");
return LValue::MakeVectorElt(LHS.getAddress(), Idx,
- E->getBase()->getType(), LHS.getAlignment());
+ E->getBase()->getType(),
+ LHS.getAlignmentSource());
}
+ // All the other cases basically behave like simple offsetting.
+
// Extend or truncate the index type to 32 or 64-bits.
if (Idx->getType() != IntPtrTy)
Idx = Builder.CreateIntCast(Idx, IntPtrTy, IdxSigned, "idxprom");
- // We know that the pointer points to a type of the correct size, unless the
- // size is a VLA or Objective-C interface.
- llvm::Value *Address = nullptr;
- CharUnits ArrayAlignment;
+ // Handle the extvector case we ignored above.
if (isa<ExtVectorElementExpr>(E->getBase())) {
LValue LV = EmitLValue(E->getBase());
- Address = EmitExtVectorElementLValue(LV);
- Address = Builder.CreateInBoundsGEP(Address, Idx, "arrayidx");
- const VectorType *ExprVT = LV.getType()->getAs<VectorType>();
- QualType EQT = ExprVT->getElementType();
- return MakeAddrLValue(Address, EQT,
- getContext().getTypeAlignInChars(EQT));
- }
- else if (const VariableArrayType *vla =
+ Address Addr = EmitExtVectorElementLValue(LV);
+
+ QualType EltType = LV.getType()->castAs<VectorType>()->getElementType();
+ Addr = emitArraySubscriptGEP(*this, Addr, Idx, EltType, /*inbounds*/ true);
+ return MakeAddrLValue(Addr, EltType, LV.getAlignmentSource());
+ }
+
+ AlignmentSource AlignSource;
+ Address Addr = Address::invalid();
+ if (const VariableArrayType *vla =
getContext().getAsVariableArrayType(E->getType())) {
// The base must be a pointer, which is not an aggregate. Emit
// it. It needs to be emitted first in case it's what captures
// the VLA bounds.
- Address = EmitScalarExpr(E->getBase());
+ Addr = EmitPointerWithAlignment(E->getBase(), &AlignSource);
// The element count here is the total number of non-VLA elements.
llvm::Value *numElements = getVLASize(vla).first;
@@ -2490,24 +2684,40 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E,
// multiply. We suppress this if overflow is not undefined behavior.
if (getLangOpts().isSignedOverflowDefined()) {
Idx = Builder.CreateMul(Idx, numElements);
- Address = Builder.CreateGEP(Address, Idx, "arrayidx");
} else {
Idx = Builder.CreateNSWMul(Idx, numElements);
- Address = Builder.CreateInBoundsGEP(Address, Idx, "arrayidx");
}
- } else if (const ObjCObjectType *OIT = E->getType()->getAs<ObjCObjectType>()){
- // Indexing over an interface, as in "NSString *P; P[4];"
- llvm::Value *InterfaceSize =
- llvm::ConstantInt::get(Idx->getType(),
- getContext().getTypeSizeInChars(OIT).getQuantity());
- Idx = Builder.CreateMul(Idx, InterfaceSize);
+ Addr = emitArraySubscriptGEP(*this, Addr, Idx, vla->getElementType(),
+ !getLangOpts().isSignedOverflowDefined());
- // The base must be a pointer, which is not an aggregate. Emit it.
- llvm::Value *Base = EmitScalarExpr(E->getBase());
- Address = EmitCastToVoidPtr(Base);
- Address = Builder.CreateGEP(Address, Idx, "arrayidx");
- Address = Builder.CreateBitCast(Address, Base->getType());
+ } else if (const ObjCObjectType *OIT = E->getType()->getAs<ObjCObjectType>()){
+ // Indexing over an interface, as in "NSString *P; P[4];"
+ CharUnits InterfaceSize = getContext().getTypeSizeInChars(OIT);
+ llvm::Value *InterfaceSizeVal =
+ llvm::ConstantInt::get(Idx->getType(), InterfaceSize.getQuantity());;
+
+ llvm::Value *ScaledIdx = Builder.CreateMul(Idx, InterfaceSizeVal);
+
+ // Emit the base pointer.
+ Addr = EmitPointerWithAlignment(E->getBase(), &AlignSource);
+
+ // We don't necessarily build correct LLVM struct types for ObjC
+ // interfaces, so we can't rely on GEP to do this scaling
+ // correctly, so we need to cast to i8*. FIXME: is this actually
+ // true? A lot of other things in the fragile ABI would break...
+ llvm::Type *OrigBaseTy = Addr.getType();
+ Addr = Builder.CreateElementBitCast(Addr, Int8Ty);
+
+ // Do the GEP.
+ CharUnits EltAlign =
+ getArrayElementAlign(Addr.getAlignment(), Idx, InterfaceSize);
+ llvm::Value *EltPtr =
+ emitArraySubscriptGEP(*this, Addr.getPointer(), ScaledIdx, false);
+ Addr = Address(EltPtr, EltAlign);
+
+ // Cast back.
+ Addr = Builder.CreateBitCast(Addr, OrigBaseTy);
} else if (const Expr *Array = isSimpleArrayDecayOperand(E->getBase())) {
// If this is A[i] where A is an array, the frontend will have decayed the
// base to be a ArrayToPointerDecay implicit cast. While correct, it is
@@ -2522,42 +2732,23 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E,
ArrayLV = EmitArraySubscriptExpr(ASE, /*Accessed*/ true);
else
ArrayLV = EmitLValue(Array);
- llvm::Value *ArrayPtr = ArrayLV.getAddress();
- llvm::Value *Zero = llvm::ConstantInt::get(Int32Ty, 0);
- llvm::Value *Args[] = { Zero, Idx };
// Propagate the alignment from the array itself to the result.
- ArrayAlignment = ArrayLV.getAlignment();
-
- if (getLangOpts().isSignedOverflowDefined())
- Address = Builder.CreateGEP(ArrayPtr, Args, "arrayidx");
- else
- Address = Builder.CreateInBoundsGEP(ArrayPtr, Args, "arrayidx");
+ Addr = emitArraySubscriptGEP(*this, ArrayLV.getAddress(),
+ {CGM.getSize(CharUnits::Zero()), Idx},
+ E->getType(),
+ !getLangOpts().isSignedOverflowDefined());
+ AlignSource = ArrayLV.getAlignmentSource();
} else {
- // The base must be a pointer, which is not an aggregate. Emit it.
- llvm::Value *Base = EmitScalarExpr(E->getBase());
- if (getLangOpts().isSignedOverflowDefined())
- Address = Builder.CreateGEP(Base, Idx, "arrayidx");
- else
- Address = Builder.CreateInBoundsGEP(Base, Idx, "arrayidx");
+ // The base must be a pointer; emit it with an estimate of its alignment.
+ Addr = EmitPointerWithAlignment(E->getBase(), &AlignSource);
+ Addr = emitArraySubscriptGEP(*this, Addr, Idx, E->getType(),
+ !getLangOpts().isSignedOverflowDefined());
}
- QualType T = E->getBase()->getType()->getPointeeType();
- assert(!T.isNull() &&
- "CodeGenFunction::EmitArraySubscriptExpr(): Illegal base type");
-
+ LValue LV = MakeAddrLValue(Addr, E->getType(), AlignSource);
- // Limit the alignment to that of the result type.
- LValue LV;
- if (!ArrayAlignment.isZero()) {
- CharUnits Align = getContext().getTypeAlignInChars(T);
- ArrayAlignment = std::min(Align, ArrayAlignment);
- LV = MakeAddrLValue(Address, T, ArrayAlignment);
- } else {
- LV = MakeNaturalAlignAddrLValue(Address, T);
- }
-
- LV.getQuals().setAddressSpace(E->getBase()->getType().getAddressSpace());
+ // TODO: Preserve/extend path TBAA metadata?
if (getLangOpts().ObjC1 &&
getLangOpts().getGC() != LangOptions::NonGC) {
@@ -2665,11 +2856,12 @@ LValue CodeGenFunction::EmitOMPArraySectionExpr(const OMPArraySectionExpr *E,
}
assert(Idx);
- llvm::Value *Address;
- CharUnits ArrayAlignment;
+ llvm::Value *EltPtr;
+ QualType FixedSizeEltType = ResultExprTy;
if (auto *VLA = getContext().getAsVariableArrayType(ResultExprTy)) {
// The element count here is the total number of non-VLA elements.
llvm::Value *numElements = getVLASize(VLA).first;
+ FixedSizeEltType = getFixedSizeElementType(getContext(), VLA);
// Effectively, the multiply by the VLA size is part of the GEP.
// GEP indexes are signed, and scaling an index isn't permitted to
@@ -2677,39 +2869,35 @@ LValue CodeGenFunction::EmitOMPArraySectionExpr(const OMPArraySectionExpr *E,
// multiply. We suppress this if overflow is not undefined behavior.
if (getLangOpts().isSignedOverflowDefined()) {
Idx = Builder.CreateMul(Idx, numElements);
- Address = Builder.CreateGEP(Base.getAddress(), Idx, "arrayidx");
+ EltPtr = Builder.CreateGEP(Base.getPointer(), Idx, "arrayidx");
} else {
Idx = Builder.CreateNSWMul(Idx, numElements);
- Address = Builder.CreateInBoundsGEP(Base.getAddress(), Idx, "arrayidx");
+ EltPtr = Builder.CreateInBoundsGEP(Base.getPointer(), Idx, "arrayidx");
}
} else if (BaseTy->isConstantArrayType()) {
- llvm::Value *ArrayPtr = Base.getAddress();
+ llvm::Value *ArrayPtr = Base.getPointer();
llvm::Value *Zero = llvm::ConstantInt::getNullValue(IntPtrTy);
llvm::Value *Args[] = {Zero, Idx};
- // Propagate the alignment from the array itself to the result.
- ArrayAlignment = Base.getAlignment();
-
if (getLangOpts().isSignedOverflowDefined())
- Address = Builder.CreateGEP(ArrayPtr, Args, "arrayidx");
+ EltPtr = Builder.CreateGEP(ArrayPtr, Args, "arrayidx");
else
- Address = Builder.CreateInBoundsGEP(ArrayPtr, Args, "arrayidx");
+ EltPtr = Builder.CreateInBoundsGEP(ArrayPtr, Args, "arrayidx");
} else {
// The base must be a pointer, which is not an aggregate. Emit it.
if (getLangOpts().isSignedOverflowDefined())
- Address = Builder.CreateGEP(Base.getAddress(), Idx, "arrayidx");
+ EltPtr = Builder.CreateGEP(Base.getPointer(), Idx, "arrayidx");
else
- Address = Builder.CreateInBoundsGEP(Base.getAddress(), Idx, "arrayidx");
+ EltPtr = Builder.CreateInBoundsGEP(Base.getPointer(), Idx, "arrayidx");
}
+ CharUnits EltAlign =
+ Base.getAlignment().alignmentOfArrayElement(
+ getContext().getTypeSizeInChars(FixedSizeEltType));
+
// Limit the alignment to that of the result type.
- LValue LV;
- if (!ArrayAlignment.isZero()) {
- CharUnits Align = getContext().getTypeAlignInChars(ResultExprTy);
- ArrayAlignment = std::min(Align, ArrayAlignment);
- LV = MakeAddrLValue(Address, ResultExprTy, ArrayAlignment);
- } else
- LV = MakeNaturalAlignAddrLValue(Address, ResultExprTy);
+ LValue LV = MakeAddrLValue(Address(EltPtr, EltAlign), ResultExprTy,
+ Base.getAlignmentSource());
LV.getQuals().setAddressSpace(BaseTy.getAddressSpace());
@@ -2725,9 +2913,10 @@ EmitExtVectorElementExpr(const ExtVectorElementExpr *E) {
if (E->isArrow()) {
// If it is a pointer to a vector, emit the address and form an lvalue with
// it.
- llvm::Value *Ptr = EmitScalarExpr(E->getBase());
+ AlignmentSource AlignSource;
+ Address Ptr = EmitPointerWithAlignment(E->getBase(), &AlignSource);
const PointerType *PT = E->getBase()->getType()->getAs<PointerType>();
- Base = MakeAddrLValue(Ptr, PT->getPointeeType());
+ Base = MakeAddrLValue(Ptr, PT->getPointeeType(), AlignSource);
Base.getQuals().removeObjCGCAttr();
} else if (E->getBase()->isGLValue()) {
// Otherwise, if the base is an lvalue ( as in the case of foo.x.x),
@@ -2741,9 +2930,10 @@ EmitExtVectorElementExpr(const ExtVectorElementExpr *E) {
llvm::Value *Vec = EmitScalarExpr(E->getBase());
// Store the vector to memory (because LValue wants an address).
- llvm::Value *VecMem = CreateMemTemp(E->getBase()->getType());
+ Address VecMem = CreateMemTemp(E->getBase()->getType());
Builder.CreateStore(Vec, VecMem);
- Base = MakeAddrLValue(VecMem, E->getBase()->getType());
+ Base = MakeAddrLValue(VecMem, E->getBase()->getType(),
+ AlignmentSource::Decl);
}
QualType type =
@@ -2757,7 +2947,7 @@ EmitExtVectorElementExpr(const ExtVectorElementExpr *E) {
llvm::Constant *CV =
llvm::ConstantDataVector::get(getLLVMContext(), Indices);
return LValue::MakeExtVectorElt(Base.getAddress(), CV, type,
- Base.getAlignment());
+ Base.getAlignmentSource());
}
assert(Base.isExtVectorElt() && "Can only subscript lvalue vec elts here!");
@@ -2767,8 +2957,8 @@ EmitExtVectorElementExpr(const ExtVectorElementExpr *E) {
for (unsigned i = 0, e = Indices.size(); i != e; ++i)
CElts.push_back(BaseElts->getAggregateElement(Indices[i]));
llvm::Constant *CV = llvm::ConstantVector::get(CElts);
- return LValue::MakeExtVectorElt(Base.getExtVectorAddr(), CV, type,
- Base.getAlignment());
+ return LValue::MakeExtVectorElt(Base.getExtVectorAddress(), CV, type,
+ Base.getAlignmentSource());
}
LValue CodeGenFunction::EmitMemberExpr(const MemberExpr *E) {
@@ -2777,10 +2967,11 @@ LValue CodeGenFunction::EmitMemberExpr(const MemberExpr *E) {
// If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar.
LValue BaseLV;
if (E->isArrow()) {
- llvm::Value *Ptr = EmitScalarExpr(BaseExpr);
+ AlignmentSource AlignSource;
+ Address Addr = EmitPointerWithAlignment(BaseExpr, &AlignSource);
QualType PtrTy = BaseExpr->getType()->getPointeeType();
- EmitTypeCheck(TCK_MemberAccess, E->getExprLoc(), Ptr, PtrTy);
- BaseLV = MakeNaturalAlignAddrLValue(Ptr, PtrTy);
+ EmitTypeCheck(TCK_MemberAccess, E->getExprLoc(), Addr.getPointer(), PtrTy);
+ BaseLV = MakeAddrLValue(Addr, PtrTy, AlignSource);
} else
BaseLV = EmitCheckedLValue(BaseExpr, TCK_MemberAccess);
@@ -2811,41 +3002,65 @@ LValue CodeGenFunction::EmitLValueForLambdaField(const FieldDecl *Field) {
return EmitLValueForField(LambdaLV, Field);
}
+/// Drill down to the storage of a field without walking into
+/// reference types.
+///
+/// The resulting address doesn't necessarily have the right type.
+static Address emitAddrOfFieldStorage(CodeGenFunction &CGF, Address base,
+ const FieldDecl *field) {
+ const RecordDecl *rec = field->getParent();
+
+ unsigned idx =
+ CGF.CGM.getTypes().getCGRecordLayout(rec).getLLVMFieldNo(field);
+
+ CharUnits offset;
+ // Adjust the alignment down to the given offset.
+ // As a special case, if the LLVM field index is 0, we know that this
+ // is zero.
+ assert((idx != 0 || CGF.getContext().getASTRecordLayout(rec)
+ .getFieldOffset(field->getFieldIndex()) == 0) &&
+ "LLVM field at index zero had non-zero offset?");
+ if (idx != 0) {
+ auto &recLayout = CGF.getContext().getASTRecordLayout(rec);
+ auto offsetInBits = recLayout.getFieldOffset(field->getFieldIndex());
+ offset = CGF.getContext().toCharUnitsFromBits(offsetInBits);
+ }
+
+ return CGF.Builder.CreateStructGEP(base, idx, offset, field->getName());
+}
+
LValue CodeGenFunction::EmitLValueForField(LValue base,
const FieldDecl *field) {
+ AlignmentSource fieldAlignSource =
+ getFieldAlignmentSource(base.getAlignmentSource());
+
if (field->isBitField()) {
const CGRecordLayout &RL =
CGM.getTypes().getCGRecordLayout(field->getParent());
const CGBitFieldInfo &Info = RL.getBitFieldInfo(field);
- llvm::Value *Addr = base.getAddress();
+ Address Addr = base.getAddress();
unsigned Idx = RL.getLLVMFieldNo(field);
if (Idx != 0)
// For structs, we GEP to the field that the record layout suggests.
- Addr = Builder.CreateStructGEP(nullptr, Addr, Idx, field->getName());
+ Addr = Builder.CreateStructGEP(Addr, Idx, Info.StorageOffset,
+ field->getName());
// Get the access type.
- llvm::Type *PtrTy = llvm::Type::getIntNPtrTy(
- getLLVMContext(), Info.StorageSize,
- CGM.getContext().getTargetAddressSpace(base.getType()));
- if (Addr->getType() != PtrTy)
- Addr = Builder.CreateBitCast(Addr, PtrTy);
+ llvm::Type *FieldIntTy =
+ llvm::Type::getIntNTy(getLLVMContext(), Info.StorageSize);
+ if (Addr.getElementType() != FieldIntTy)
+ Addr = Builder.CreateElementBitCast(Addr, FieldIntTy);
QualType fieldType =
field->getType().withCVRQualifiers(base.getVRQualifiers());
- return LValue::MakeBitfield(Addr, Info, fieldType, base.getAlignment());
+ return LValue::MakeBitfield(Addr, Info, fieldType, fieldAlignSource);
}
const RecordDecl *rec = field->getParent();
QualType type = field->getType();
- CharUnits alignment = getContext().getDeclAlign(field);
-
- // FIXME: It should be impossible to have an LValue without alignment for a
- // complete type.
- if (!base.getAlignment().isZero())
- alignment = std::min(alignment, base.getAlignment());
bool mayAlias = rec->hasAttr<MayAliasAttr>();
- llvm::Value *addr = base.getAddress();
+ Address addr = base.getAddress();
unsigned cvr = base.getVRQualifiers();
bool TBAAPath = CGM.getCodeGenOpts().StructPathTBAA;
if (rec->isUnion()) {
@@ -2855,14 +3070,12 @@ LValue CodeGenFunction::EmitLValueForField(LValue base,
TBAAPath = false;
} else {
// For structs, we GEP to the field that the record layout suggests.
- unsigned idx = CGM.getTypes().getCGRecordLayout(rec).getLLVMFieldNo(field);
- addr = Builder.CreateStructGEP(nullptr, addr, idx, field->getName());
+ addr = emitAddrOfFieldStorage(*this, addr, field);
// If this is a reference field, load the reference right now.
if (const ReferenceType *refType = type->getAs<ReferenceType>()) {
llvm::LoadInst *load = Builder.CreateLoad(addr, "ref");
if (cvr & Qualifiers::Volatile) load->setVolatile(true);
- load->setAlignment(alignment.getQuantity());
// Loading the reference will disable path-aware TBAA.
TBAAPath = false;
@@ -2876,14 +3089,17 @@ LValue CodeGenFunction::EmitLValueForField(LValue base,
CGM.DecorateInstruction(load, tbaa);
}
- addr = load;
mayAlias = false;
type = refType->getPointeeType();
- if (type->isIncompleteType())
- alignment = CharUnits();
- else
- alignment = getContext().getTypeAlignInChars(type);
- cvr = 0; // qualifiers don't recursively apply to referencee
+
+ CharUnits alignment =
+ getNaturalTypeAlignment(type, &fieldAlignSource, /*pointee*/ true);
+ addr = Address(load, alignment);
+
+ // Qualifiers on the struct don't apply to the referencee, and
+ // we'll pick up CVR from the actual type later, so reset these
+ // additional qualifiers now.
+ cvr = 0;
}
}
@@ -2891,14 +3107,14 @@ LValue CodeGenFunction::EmitLValueForField(LValue base,
// for both unions and structs. A union needs a bitcast, a struct element
// will need a bitcast if the LLVM type laid out doesn't match the desired
// type.
- addr = EmitBitCastOfLValueToProperType(*this, addr,
- CGM.getTypes().ConvertTypeForMem(type),
- field->getName());
+ addr = Builder.CreateElementBitCast(addr,
+ CGM.getTypes().ConvertTypeForMem(type),
+ field->getName());
if (field->hasAttr<AnnotateAttr>())
addr = EmitFieldAnnotations(field, addr);
- LValue LV = MakeAddrLValue(addr, type, alignment);
+ LValue LV = MakeAddrLValue(addr, type, fieldAlignSource);
LV.getQuals().addCVRQualifiers(cvr);
if (TBAAPath) {
const ASTRecordLayout &Layout =
@@ -2932,41 +3148,29 @@ CodeGenFunction::EmitLValueForFieldInitialization(LValue Base,
if (!FieldType->isReferenceType())
return EmitLValueForField(Base, Field);
- const CGRecordLayout &RL =
- CGM.getTypes().getCGRecordLayout(Field->getParent());
- unsigned idx = RL.getLLVMFieldNo(Field);
- llvm::Value *V = Builder.CreateStructGEP(nullptr, Base.getAddress(), idx);
- assert(!FieldType.getObjCGCAttr() && "fields cannot have GC attrs");
+ Address V = emitAddrOfFieldStorage(*this, Base.getAddress(), Field);
- // Make sure that the address is pointing to the right type. This is critical
- // for both unions and structs. A union needs a bitcast, a struct element
- // will need a bitcast if the LLVM type laid out doesn't match the desired
- // type.
+ // Make sure that the address is pointing to the right type.
llvm::Type *llvmType = ConvertTypeForMem(FieldType);
- V = EmitBitCastOfLValueToProperType(*this, V, llvmType, Field->getName());
-
- CharUnits Alignment = getContext().getDeclAlign(Field);
+ V = Builder.CreateElementBitCast(V, llvmType, Field->getName());
- // FIXME: It should be impossible to have an LValue without alignment for a
- // complete type.
- if (!Base.getAlignment().isZero())
- Alignment = std::min(Alignment, Base.getAlignment());
-
- return MakeAddrLValue(V, FieldType, Alignment);
+ // TODO: access-path TBAA?
+ auto FieldAlignSource = getFieldAlignmentSource(Base.getAlignmentSource());
+ return MakeAddrLValue(V, FieldType, FieldAlignSource);
}
LValue CodeGenFunction::EmitCompoundLiteralLValue(const CompoundLiteralExpr *E){
if (E->isFileScope()) {
- llvm::Value *GlobalPtr = CGM.GetAddrOfConstantCompoundLiteral(E);
- return MakeAddrLValue(GlobalPtr, E->getType());
+ ConstantAddress GlobalPtr = CGM.GetAddrOfConstantCompoundLiteral(E);
+ return MakeAddrLValue(GlobalPtr, E->getType(), AlignmentSource::Decl);
}
if (E->getType()->isVariablyModifiedType())
// make sure to emit the VLA size.
EmitVariablyModifiedType(E->getType());
- llvm::Value *DeclPtr = CreateMemTemp(E->getType(), ".compoundliteral");
+ Address DeclPtr = CreateMemTemp(E->getType(), ".compoundliteral");
const Expr *InitExpr = E->getInitializer();
- LValue Result = MakeAddrLValue(DeclPtr, E->getType());
+ LValue Result = MakeAddrLValue(DeclPtr, E->getType(), AlignmentSource::Decl);
EmitAnyExprToMem(InitExpr, DeclPtr, E->getType().getQualifiers(),
/*Init*/ true);
@@ -3057,11 +3261,14 @@ EmitConditionalOperatorLValue(const AbstractConditionalOperator *expr) {
EmitBlock(contBlock);
if (lhs && rhs) {
- llvm::PHINode *phi = Builder.CreatePHI(lhs->getAddress()->getType(),
+ llvm::PHINode *phi = Builder.CreatePHI(lhs->getPointer()->getType(),
2, "cond-lvalue");
- phi->addIncoming(lhs->getAddress(), lhsBlock);
- phi->addIncoming(rhs->getAddress(), rhsBlock);
- return MakeAddrLValue(phi, expr->getType());
+ phi->addIncoming(lhs->getPointer(), lhsBlock);
+ phi->addIncoming(rhs->getPointer(), rhsBlock);
+ Address result(phi, std::min(lhs->getAlignment(), rhs->getAlignment()));
+ AlignmentSource alignSource =
+ std::max(lhs->getAlignmentSource(), rhs->getAlignmentSource());
+ return MakeAddrLValue(result, expr->getType(), alignSource);
} else {
assert((lhs || rhs) &&
"both operands of glvalue conditional are throw-expressions?");
@@ -3130,9 +3337,9 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
case CK_Dynamic: {
LValue LV = EmitLValue(E->getSubExpr());
- llvm::Value *V = LV.getAddress();
+ Address V = LV.getAddress();
const auto *DCE = cast<CXXDynamicCastExpr>(E);
- return MakeAddrLValue(EmitDynamicCast(V, DCE), E->getType());
+ return MakeNaturalAlignAddrLValue(EmitDynamicCast(V, DCE), E->getType());
}
case CK_ConstructorConversion:
@@ -3150,14 +3357,14 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
auto *DerivedClassDecl = cast<CXXRecordDecl>(DerivedClassTy->getDecl());
LValue LV = EmitLValue(E->getSubExpr());
- llvm::Value *This = LV.getAddress();
+ Address This = LV.getAddress();
// Perform the derived-to-base conversion
- llvm::Value *Base = GetAddressOfBaseClass(
+ Address Base = GetAddressOfBaseClass(
This, DerivedClassDecl, E->path_begin(), E->path_end(),
/*NullCheckValue=*/false, E->getExprLoc());
- return MakeAddrLValue(Base, E->getType());
+ return MakeAddrLValue(Base, E->getType(), LV.getAlignmentSource());
}
case CK_ToUnion:
return EmitAggExprToLValue(E);
@@ -3168,7 +3375,7 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
LValue LV = EmitLValue(E->getSubExpr());
// Perform the base-to-derived conversion
- llvm::Value *Derived =
+ Address Derived =
GetAddressOfDerivedClass(LV.getAddress(), DerivedClassDecl,
E->path_begin(), E->path_end(),
/*NullCheckValue=*/false);
@@ -3177,34 +3384,35 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
// performed and the object is not of the derived type.
if (sanitizePerformTypeCheck())
EmitTypeCheck(TCK_DowncastReference, E->getExprLoc(),
- Derived, E->getType());
+ Derived.getPointer(), E->getType());
if (SanOpts.has(SanitizerKind::CFIDerivedCast))
- EmitVTablePtrCheckForCast(E->getType(), Derived, /*MayBeNull=*/false,
+ EmitVTablePtrCheckForCast(E->getType(), Derived.getPointer(),
+ /*MayBeNull=*/false,
CFITCK_DerivedCast, E->getLocStart());
- return MakeAddrLValue(Derived, E->getType());
+ return MakeAddrLValue(Derived, E->getType(), LV.getAlignmentSource());
}
case CK_LValueBitCast: {
// This must be a reinterpret_cast (or c-style equivalent).
const auto *CE = cast<ExplicitCastExpr>(E);
LValue LV = EmitLValue(E->getSubExpr());
- llvm::Value *V = Builder.CreateBitCast(LV.getAddress(),
- ConvertType(CE->getTypeAsWritten()));
+ Address V = Builder.CreateBitCast(LV.getAddress(),
+ ConvertType(CE->getTypeAsWritten()));
if (SanOpts.has(SanitizerKind::CFIUnrelatedCast))
- EmitVTablePtrCheckForCast(E->getType(), V, /*MayBeNull=*/false,
+ EmitVTablePtrCheckForCast(E->getType(), V.getPointer(),
+ /*MayBeNull=*/false,
CFITCK_UnrelatedCast, E->getLocStart());
- return MakeAddrLValue(V, E->getType());
+ return MakeAddrLValue(V, E->getType(), LV.getAlignmentSource());
}
case CK_ObjCObjectLValueCast: {
LValue LV = EmitLValue(E->getSubExpr());
- QualType ToType = getContext().getLValueReferenceType(E->getType());
- llvm::Value *V = Builder.CreateBitCast(LV.getAddress(),
- ConvertType(ToType));
- return MakeAddrLValue(V, E->getType());
+ Address V = Builder.CreateElementBitCast(LV.getAddress(),
+ ConvertType(E->getType()));
+ return MakeAddrLValue(V, E->getType(), LV.getAlignmentSource());
}
case CK_ZeroToOCLEvent:
llvm_unreachable("NULL to OpenCL event lvalue cast is not valid");
@@ -3271,12 +3479,12 @@ RValue CodeGenFunction::EmitCallExpr(const CallExpr *E,
// If the pseudo-expression names a retainable object with weak or
// strong lifetime, the object shall be released.
Expr *BaseExpr = PseudoDtor->getBase();
- llvm::Value *BaseValue = nullptr;
+ Address BaseValue = Address::invalid();
Qualifiers BaseQuals;
// If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar.
if (PseudoDtor->isArrow()) {
- BaseValue = EmitScalarExpr(BaseExpr);
+ BaseValue = EmitPointerWithAlignment(BaseExpr);
const PointerType *PTy = BaseExpr->getType()->getAs<PointerType>();
BaseQuals = PTy->getPointeeType().getQualifiers();
} else {
@@ -3371,13 +3579,14 @@ LValue CodeGenFunction::EmitCallExprLValue(const CallExpr *E) {
RValue RV = EmitCallExpr(E);
if (!RV.isScalar())
- return MakeAddrLValue(RV.getAggregateAddr(), E->getType());
+ return MakeAddrLValue(RV.getAggregateAddress(), E->getType(),
+ AlignmentSource::Decl);
assert(E->getCallReturnType(getContext())->isReferenceType() &&
"Can't have a scalar return unless the return type is a "
"reference type!");
- return MakeAddrLValue(RV.getScalarVal(), E->getType());
+ return MakeNaturalAlignPointeeAddrLValue(RV.getScalarVal(), E->getType());
}
LValue CodeGenFunction::EmitVAArgExprLValue(const VAArgExpr *E) {
@@ -3390,21 +3599,23 @@ LValue CodeGenFunction::EmitCXXConstructLValue(const CXXConstructExpr *E) {
&& "binding l-value to type which needs a temporary");
AggValueSlot Slot = CreateAggTemp(E->getType());
EmitCXXConstructExpr(E, Slot);
- return MakeAddrLValue(Slot.getAddr(), E->getType());
+ return MakeAddrLValue(Slot.getAddress(), E->getType(),
+ AlignmentSource::Decl);
}
LValue
CodeGenFunction::EmitCXXTypeidLValue(const CXXTypeidExpr *E) {
- return MakeAddrLValue(EmitCXXTypeidExpr(E), E->getType());
+ return MakeNaturalAlignAddrLValue(EmitCXXTypeidExpr(E), E->getType());
}
-llvm::Value *CodeGenFunction::EmitCXXUuidofExpr(const CXXUuidofExpr *E) {
- return Builder.CreateBitCast(CGM.GetAddrOfUuidDescriptor(E),
- ConvertType(E->getType())->getPointerTo());
+Address CodeGenFunction::EmitCXXUuidofExpr(const CXXUuidofExpr *E) {
+ return Builder.CreateElementBitCast(CGM.GetAddrOfUuidDescriptor(E),
+ ConvertType(E->getType()));
}
LValue CodeGenFunction::EmitCXXUuidofLValue(const CXXUuidofExpr *E) {
- return MakeAddrLValue(EmitCXXUuidofExpr(E), E->getType());
+ return MakeAddrLValue(EmitCXXUuidofExpr(E), E->getType(),
+ AlignmentSource::Decl);
}
LValue
@@ -3412,34 +3623,37 @@ CodeGenFunction::EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E) {
AggValueSlot Slot = CreateAggTemp(E->getType(), "temp.lvalue");
Slot.setExternallyDestructed();
EmitAggExpr(E->getSubExpr(), Slot);
- EmitCXXTemporary(E->getTemporary(), E->getType(), Slot.getAddr());
- return MakeAddrLValue(Slot.getAddr(), E->getType());
+ EmitCXXTemporary(E->getTemporary(), E->getType(), Slot.getAddress());
+ return MakeAddrLValue(Slot.getAddress(), E->getType(),
+ AlignmentSource::Decl);
}
LValue
CodeGenFunction::EmitLambdaLValue(const LambdaExpr *E) {
AggValueSlot Slot = CreateAggTemp(E->getType(), "temp.lvalue");
EmitLambdaExpr(E, Slot);
- return MakeAddrLValue(Slot.getAddr(), E->getType());
+ return MakeAddrLValue(Slot.getAddress(), E->getType(),
+ AlignmentSource::Decl);
}
LValue CodeGenFunction::EmitObjCMessageExprLValue(const ObjCMessageExpr *E) {
RValue RV = EmitObjCMessageExpr(E);
if (!RV.isScalar())
- return MakeAddrLValue(RV.getAggregateAddr(), E->getType());
+ return MakeAddrLValue(RV.getAggregateAddress(), E->getType(),
+ AlignmentSource::Decl);
assert(E->getMethodDecl()->getReturnType()->isReferenceType() &&
"Can't have a scalar return unless the return type is a "
"reference type!");
- return MakeAddrLValue(RV.getScalarVal(), E->getType());
+ return MakeNaturalAlignPointeeAddrLValue(RV.getScalarVal(), E->getType());
}
LValue CodeGenFunction::EmitObjCSelectorLValue(const ObjCSelectorExpr *E) {
- llvm::Value *V =
- CGM.getObjCRuntime().GetSelector(*this, E->getSelector(), true);
- return MakeAddrLValue(V, E->getType());
+ Address V =
+ CGM.getObjCRuntime().GetAddrOfSelector(*this, E->getSelector());
+ return MakeAddrLValue(V, E->getType(), AlignmentSource::Decl);
}
llvm::Value *CodeGenFunction::EmitIvarOffset(const ObjCInterfaceDecl *Interface,
@@ -3467,8 +3681,7 @@ LValue CodeGenFunction::EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E) {
BaseQuals = ObjectTy.getQualifiers();
} else {
LValue BaseLV = EmitLValue(BaseExpr);
- // FIXME: this isn't right for bitfields.
- BaseValue = BaseLV.getAddress();
+ BaseValue = BaseLV.getPointer();
ObjectTy = BaseExpr->getType();
BaseQuals = ObjectTy.getQualifiers();
}
@@ -3483,7 +3696,8 @@ LValue CodeGenFunction::EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E) {
LValue CodeGenFunction::EmitStmtExprLValue(const StmtExpr *E) {
// Can only get l-value for message expression returning aggregate type
RValue RV = EmitAnyExprToTemp(E);
- return MakeAddrLValue(RV.getAggregateAddr(), E->getType());
+ return MakeAddrLValue(RV.getAggregateAddress(), E->getType(),
+ AlignmentSource::Decl);
}
RValue CodeGenFunction::EmitCall(QualType CalleeType, llvm::Value *Callee,
@@ -3517,7 +3731,8 @@ RValue CodeGenFunction::EmitCall(QualType CalleeType, llvm::Value *Callee,
Callee, llvm::PointerType::getUnqual(PrefixStructTy));
llvm::Value *CalleeSigPtr =
Builder.CreateConstGEP2_32(PrefixStructTy, CalleePrefixStruct, 0, 0);
- llvm::Value *CalleeSig = Builder.CreateLoad(CalleeSigPtr);
+ llvm::Value *CalleeSig =
+ Builder.CreateAlignedLoad(CalleeSigPtr, getIntAlign());
llvm::Value *CalleeSigMatch = Builder.CreateICmpEQ(CalleeSig, PrefixSig);
llvm::BasicBlock *Cont = createBasicBlock("cont");
@@ -3527,7 +3742,8 @@ RValue CodeGenFunction::EmitCall(QualType CalleeType, llvm::Value *Callee,
EmitBlock(TypeCheck);
llvm::Value *CalleeRTTIPtr =
Builder.CreateConstGEP2_32(PrefixStructTy, CalleePrefixStruct, 0, 1);
- llvm::Value *CalleeRTTI = Builder.CreateLoad(CalleeRTTIPtr);
+ llvm::Value *CalleeRTTI =
+ Builder.CreateAlignedLoad(CalleeRTTIPtr, getPointerAlign());
llvm::Value *CalleeRTTIMatch =
Builder.CreateICmpEQ(CalleeRTTI, FTRTTIConst);
llvm::Constant *StaticData[] = {
@@ -3583,29 +3799,32 @@ RValue CodeGenFunction::EmitCall(QualType CalleeType, llvm::Value *Callee,
LValue CodeGenFunction::
EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E) {
- llvm::Value *BaseV;
- if (E->getOpcode() == BO_PtrMemI)
- BaseV = EmitScalarExpr(E->getLHS());
- else
- BaseV = EmitLValue(E->getLHS()).getAddress();
+ Address BaseAddr = Address::invalid();
+ if (E->getOpcode() == BO_PtrMemI) {
+ BaseAddr = EmitPointerWithAlignment(E->getLHS());
+ } else {
+ BaseAddr = EmitLValue(E->getLHS()).getAddress();
+ }
llvm::Value *OffsetV = EmitScalarExpr(E->getRHS());
const MemberPointerType *MPT
= E->getRHS()->getType()->getAs<MemberPointerType>();
- llvm::Value *AddV = CGM.getCXXABI().EmitMemberDataPointerAddress(
- *this, E, BaseV, OffsetV, MPT);
+ AlignmentSource AlignSource;
+ Address MemberAddr =
+ EmitCXXMemberDataPointerAddress(E, BaseAddr, OffsetV, MPT,
+ &AlignSource);
- return MakeAddrLValue(AddV, MPT->getPointeeType());
+ return MakeAddrLValue(MemberAddr, MPT->getPointeeType(), AlignSource);
}
/// Given the address of a temporary variable, produce an r-value of
/// its type.
-RValue CodeGenFunction::convertTempToRValue(llvm::Value *addr,
+RValue CodeGenFunction::convertTempToRValue(Address addr,
QualType type,
SourceLocation loc) {
- LValue lvalue = MakeNaturalAlignAddrLValue(addr, type);
+ LValue lvalue = MakeAddrLValue(addr, type, AlignmentSource::Decl);
switch (getEvaluationKind(type)) {
case TEK_Complex:
return RValue::getComplex(EmitLoadOfComplex(lvalue, loc));
@@ -3661,7 +3880,8 @@ static LValueOrRValue emitPseudoObjectExpr(CodeGenFunction &CGF,
CodeGenFunction::hasAggregateEvaluationKind(ov->getType())) {
CGF.EmitAggExpr(ov->getSourceExpr(), slot);
- LValue LV = CGF.MakeAddrLValue(slot.getAddr(), ov->getType());
+ LValue LV = CGF.MakeAddrLValue(slot.getAddress(), ov->getType(),
+ AlignmentSource::Decl);
opaqueData = OVMA::bind(CGF, ov, LV);
result.RV = slot.asRValue();
diff --git a/clang/lib/CodeGen/CGExprAgg.cpp b/clang/lib/CodeGen/CGExprAgg.cpp
index 3182ac5509f..1ab9e022c84 100644
--- a/clang/lib/CodeGen/CGExprAgg.cpp
+++ b/clang/lib/CodeGen/CGExprAgg.cpp
@@ -49,7 +49,8 @@ class AggExprEmitter : public StmtVisitor<AggExprEmitter> {
if (!shouldUseDestForReturnSlot())
return ReturnValueSlot();
- return ReturnValueSlot(Dest.getAddr(), Dest.isVolatile(), IsResultUnused);
+ return ReturnValueSlot(Dest.getAddress(), Dest.isVolatile(),
+ IsResultUnused);
}
AggValueSlot EnsureSlot(QualType T) {
@@ -77,14 +78,13 @@ public:
/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
void EmitFinalDestCopy(QualType type, const LValue &src);
- void EmitFinalDestCopy(QualType type, RValue src,
- CharUnits srcAlignment = CharUnits::Zero());
+ void EmitFinalDestCopy(QualType type, RValue src);
void EmitCopy(QualType type, const AggValueSlot &dest,
const AggValueSlot &src);
void EmitMoveFromReturnSlot(const Expr *E, RValue Src);
- void EmitArrayInit(llvm::Value *DestPtr, llvm::ArrayType *AType,
+ void EmitArrayInit(Address DestPtr, llvm::ArrayType *AType,
QualType elementType, InitListExpr *E);
AggValueSlot::NeedsGCBarriers_t needsGC(QualType T) {
@@ -199,7 +199,7 @@ public:
// case Expr::ChooseExprClass:
void VisitCXXThrowExpr(const CXXThrowExpr *E) { CGF.EmitCXXThrowExpr(E); }
void VisitAtomicExpr(AtomicExpr *E) {
- CGF.EmitAtomicExpr(E, EnsureSlot(E->getType()).getAddr());
+ CGF.EmitAtomicExpr(E, EnsureSlot(E->getType()).getAddress());
}
};
} // end anonymous namespace.
@@ -259,17 +259,14 @@ void AggExprEmitter::EmitMoveFromReturnSlot(const Expr *E, RValue src) {
}
// Otherwise, copy from there to the destination.
- assert(Dest.getAddr() != src.getAggregateAddr());
- std::pair<CharUnits, CharUnits> typeInfo =
- CGF.getContext().getTypeInfoInChars(E->getType());
- EmitFinalDestCopy(E->getType(), src, typeInfo.second);
+ assert(Dest.getPointer() != src.getAggregatePointer());
+ EmitFinalDestCopy(E->getType(), src);
}
/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
-void AggExprEmitter::EmitFinalDestCopy(QualType type, RValue src,
- CharUnits srcAlign) {
+void AggExprEmitter::EmitFinalDestCopy(QualType type, RValue src) {
assert(src.isAggregate() && "value must be aggregate value!");
- LValue srcLV = CGF.MakeAddrLValue(src.getAggregateAddr(), type, srcAlign);
+ LValue srcLV = CGF.MakeAddrLValue(src.getAggregateAddress(), type);
EmitFinalDestCopy(type, srcLV);
}
@@ -298,8 +295,8 @@ void AggExprEmitter::EmitCopy(QualType type, const AggValueSlot &dest,
CharUnits sz = CGF.getContext().getTypeSizeInChars(type);
llvm::Value *size = llvm::ConstantInt::get(CGF.SizeTy, sz.getQuantity());
CGF.CGM.getObjCRuntime().EmitGCMemmoveCollectable(CGF,
- dest.getAddr(),
- src.getAddr(),
+ dest.getAddress(),
+ src.getAddress(),
size);
return;
}
@@ -307,9 +304,8 @@ void AggExprEmitter::EmitCopy(QualType type, const AggValueSlot &dest,
// If the result of the assignment is used, copy the LHS there also.
// It's volatile if either side is. Use the minimum alignment of
// the two sides.
- CGF.EmitAggregateCopy(dest.getAddr(), src.getAddr(), type,
- dest.isVolatile() || src.isVolatile(),
- std::min(dest.getAlignment(), src.getAlignment()));
+ CGF.EmitAggregateCopy(dest.getAddress(), src.getAddress(), type,
+ dest.isVolatile() || src.isVolatile());
}
/// \brief Emit the initializer for a std::initializer_list initialized with a
@@ -321,7 +317,7 @@ AggExprEmitter::VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E) {
ASTContext &Ctx = CGF.getContext();
LValue Array = CGF.EmitLValue(E->getSubExpr());
assert(Array.isSimple() && "initializer_list array not a simple lvalue");
- llvm::Value *ArrayPtr = Array.getAddress();
+ Address ArrayPtr = Array.getAddress();
const ConstantArrayType *ArrayType =
Ctx.getAsConstantArrayType(E->getSubExpr()->getType());
@@ -344,13 +340,12 @@ AggExprEmitter::VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E) {
}
AggValueSlot Dest = EnsureSlot(E->getType());
- LValue DestLV = CGF.MakeAddrLValue(Dest.getAddr(), E->getType(),
- Dest.getAlignment());
+ LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), E->getType());
LValue Start = CGF.EmitLValueForFieldInitialization(DestLV, *Field);
llvm::Value *Zero = llvm::ConstantInt::get(CGF.PtrDiffTy, 0);
llvm::Value *IdxStart[] = { Zero, Zero };
llvm::Value *ArrayStart =
- Builder.CreateInBoundsGEP(ArrayPtr, IdxStart, "arraystart");
+ Builder.CreateInBoundsGEP(ArrayPtr.getPointer(), IdxStart, "arraystart");
CGF.EmitStoreThroughLValue(RValue::get(ArrayStart), Start);
++Field;
@@ -367,7 +362,7 @@ AggExprEmitter::VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E) {
// End pointer.
llvm::Value *IdxEnd[] = { Zero, Size };
llvm::Value *ArrayEnd =
- Builder.CreateInBoundsGEP(ArrayPtr, IdxEnd, "arrayend");
+ Builder.CreateInBoundsGEP(ArrayPtr.getPointer(), IdxEnd, "arrayend");
CGF.EmitStoreThroughLValue(RValue::get(ArrayEnd), EndOrLength);
} else if (Ctx.hasSameType(Field->getType(), Ctx.getSizeType())) {
// Length.
@@ -402,7 +397,7 @@ static bool isTrivialFiller(Expr *E) {
}
/// \brief Emit initialization of an array from an initializer list.
-void AggExprEmitter::EmitArrayInit(llvm::Value *DestPtr, llvm::ArrayType *AType,
+void AggExprEmitter::EmitArrayInit(Address DestPtr, llvm::ArrayType *AType,
QualType elementType, InitListExpr *E) {
uint64_t NumInitElements = E->getNumInits();
@@ -414,13 +409,17 @@ void AggExprEmitter::EmitArrayInit(llvm::Value *DestPtr, llvm::ArrayType *AType,
llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0);
llvm::Value *indices[] = { zero, zero };
llvm::Value *begin =
- Builder.CreateInBoundsGEP(DestPtr, indices, "arrayinit.begin");
+ Builder.CreateInBoundsGEP(DestPtr.getPointer(), indices, "arrayinit.begin");
+
+ CharUnits elementSize = CGF.getContext().getTypeSizeInChars(elementType);
+ CharUnits elementAlign =
+ DestPtr.getAlignment().alignmentOfArrayElement(elementSize);
// Exception safety requires us to destroy all the
// already-constructed members if an initializer throws.
// For that, we'll need an EH cleanup.
QualType::DestructionKind dtorKind = elementType.isDestructedType();
- llvm::AllocaInst *endOfInit = nullptr;
+ Address endOfInit = Address::invalid();
EHScopeStack::stable_iterator cleanup;
llvm::Instruction *cleanupDominator = nullptr;
if (CGF.needsEHCleanup(dtorKind)) {
@@ -428,10 +427,11 @@ void AggExprEmitter::EmitArrayInit(llvm::Value *DestPtr, llvm::ArrayType *AType,
// directly, but the control flow can get so varied here that it
// would actually be quite complex. Therefore we go through an
// alloca.
- endOfInit = CGF.CreateTempAlloca(begin->getType(),
+ endOfInit = CGF.CreateTempAlloca(begin->getType(), CGF.getPointerAlign(),
"arrayinit.endOfInit");
cleanupDominator = Builder.CreateStore(begin, endOfInit);
CGF.pushIrregularPartialArrayCleanup(begin, endOfInit, elementType,
+ elementAlign,
CGF.getDestroyer(dtorKind));
cleanup = CGF.EHStack.stable_begin();
@@ -458,10 +458,11 @@ void AggExprEmitter::EmitArrayInit(llvm::Value *DestPtr, llvm::ArrayType *AType,
// Tell the cleanup that it needs to destroy up to this
// element. TODO: some of these stores can be trivially
// observed to be unnecessary.
- if (endOfInit) Builder.CreateStore(element, endOfInit);
+ if (endOfInit.isValid()) Builder.CreateStore(element, endOfInit);
}
- LValue elementLV = CGF.MakeAddrLValue(element, elementType);
+ LValue elementLV =
+ CGF.MakeAddrLValue(Address(element, elementAlign), elementType);
EmitInitializationToLValue(E->getInit(i), elementLV);
}
@@ -482,7 +483,7 @@ void AggExprEmitter::EmitArrayInit(llvm::Value *DestPtr, llvm::ArrayType *AType,
// Advance to the start of the rest of the array.
if (NumInitElements) {
element = Builder.CreateInBoundsGEP(element, one, "arrayinit.start");
- if (endOfInit) Builder.CreateStore(element, endOfInit);
+ if (endOfInit.isValid()) Builder.CreateStore(element, endOfInit);
}
// Compute the end of the array.
@@ -500,7 +501,8 @@ void AggExprEmitter::EmitArrayInit(llvm::Value *DestPtr, llvm::ArrayType *AType,
currentElement->addIncoming(element, entryBB);
// Emit the actual filler expression.
- LValue elementLV = CGF.MakeAddrLValue(currentElement, elementType);
+ LValue elementLV =
+ CGF.MakeAddrLValue(Address(currentElement, elementAlign), elementType);
if (filler)
EmitInitializationToLValue(filler, elementLV);
else
@@ -511,7 +513,7 @@ void AggExprEmitter::EmitArrayInit(llvm::Value *DestPtr, llvm::ArrayType *AType,
Builder.CreateInBoundsGEP(currentElement, one, "arrayinit.next");
// Tell the EH cleanup that we finished with the last element.
- if (endOfInit) Builder.CreateStore(nextElement, endOfInit);
+ if (endOfInit.isValid()) Builder.CreateStore(nextElement, endOfInit);
// Leave the loop if we're done.
llvm::Value *done = Builder.CreateICmpEQ(nextElement, end,
@@ -596,9 +598,8 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) {
// GCC union extension
QualType Ty = E->getSubExpr()->getType();
- QualType PtrTy = CGF.getContext().getPointerType(Ty);
- llvm::Value *CastPtr = Builder.CreateBitCast(Dest.getAddr(),
- CGF.ConvertType(PtrTy));
+ Address CastPtr =
+ Builder.CreateElementBitCast(Dest.getAddress(), CGF.ConvertType(Ty));
EmitInitializationToLValue(E->getSubExpr(),
CGF.MakeAddrLValue(CastPtr, Ty));
break;
@@ -649,13 +650,13 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) {
// Zero-initialize. (Strictly speaking, we only need to intialize
// the padding at the end, but this is simpler.)
if (!Dest.isZeroed())
- CGF.EmitNullInitialization(Dest.getAddr(), atomicType);
+ CGF.EmitNullInitialization(Dest.getAddress(), atomicType);
// Build a GEP to refer to the subobject.
- llvm::Value *valueAddr =
- CGF.Builder.CreateStructGEP(nullptr, valueDest.getAddr(), 0);
+ Address valueAddr =
+ CGF.Builder.CreateStructGEP(valueDest.getAddress(), 0,
+ CharUnits());
valueDest = AggValueSlot::forAddr(valueAddr,
- valueDest.getAlignment(),
valueDest.getQualifiers(),
valueDest.isExternallyDestructed(),
valueDest.requiresGCollection(),
@@ -673,8 +674,8 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) {
CGF.CreateAggTemp(atomicType, "atomic-to-nonatomic.temp");
CGF.EmitAggExpr(E->getSubExpr(), atomicSlot);
- llvm::Value *valueAddr =
- Builder.CreateStructGEP(nullptr, atomicSlot.getAddr(), 0);
+ Address valueAddr =
+ Builder.CreateStructGEP(atomicSlot.getAddress(), 0, CharUnits());
RValue rvalue = RValue::getAggregate(valueAddr, atomicSlot.isVolatile());
return EmitFinalDestCopy(valueType, rvalue);
}
@@ -959,15 +960,15 @@ void AggExprEmitter::VisitChooseExpr(const ChooseExpr *CE) {
}
void AggExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
- llvm::Value *ArgValue = CGF.EmitVAListRef(VE->getSubExpr());
- llvm::Value *ArgPtr = CGF.EmitVAArg(ArgValue, VE->getType());
+ Address ArgValue = CGF.EmitVAListRef(VE->getSubExpr());
+ Address ArgPtr = CGF.EmitVAArg(ArgValue, VE->getType());
- if (!ArgPtr) {
+ if (!ArgPtr.isValid()) {
// If EmitVAArg fails, we fall back to the LLVM instruction.
- llvm::Value *Val =
- Builder.CreateVAArg(ArgValue, CGF.ConvertType(VE->getType()));
+ llvm::Value *Val = Builder.CreateVAArg(ArgValue.getPointer(),
+ CGF.ConvertType(VE->getType()));
if (!Dest.isIgnored())
- Builder.CreateStore(Val, Dest.getAddr());
+ Builder.CreateStore(Val, Dest.getAddress());
return;
}
@@ -987,7 +988,7 @@ void AggExprEmitter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
// Push that destructor we promised.
if (!wasExternallyDestructed)
- CGF.EmitCXXTemporary(E->getTemporary(), E->getType(), Dest.getAddr());
+ CGF.EmitCXXTemporary(E->getTemporary(), E->getType(), Dest.getAddress());
}
void
@@ -1011,13 +1012,13 @@ void AggExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) {
void AggExprEmitter::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) {
QualType T = E->getType();
AggValueSlot Slot = EnsureSlot(T);
- EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddr(), T));
+ EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddress(), T));
}
void AggExprEmitter::VisitImplicitValueInitExpr(ImplicitValueInitExpr *E) {
QualType T = E->getType();
AggValueSlot Slot = EnsureSlot(T);
- EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddr(), T));
+ EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddress(), T));
}
/// isSimpleZero - If emitting this value will obviously just cause a store of
@@ -1135,8 +1136,7 @@ void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
AggValueSlot Dest = EnsureSlot(E->getType());
- LValue DestLV = CGF.MakeAddrLValue(Dest.getAddr(), E->getType(),
- Dest.getAlignment());
+ LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), E->getType());
// Handle initialization of an array.
if (E->getType()->isArrayType()) {
@@ -1146,12 +1146,8 @@ void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
QualType elementType =
CGF.getContext().getAsArrayType(E->getType())->getElementType();
- llvm::PointerType *APType =
- cast<llvm::PointerType>(Dest.getAddr()->getType());
- llvm::ArrayType *AType =
- cast<llvm::ArrayType>(APType->getElementType());
-
- EmitArrayInit(Dest.getAddr(), AType, elementType, E);
+ auto AType = cast<llvm::ArrayType>(Dest.getAddress().getElementType());
+ EmitArrayInit(Dest.getAddress(), AType, elementType, E);
return;
}
@@ -1175,7 +1171,7 @@ void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
RecordDecl *record = E->getType()->castAs<RecordType>()->getDecl();
// Prepare a 'this' for CXXDefaultInitExprs.
- CodeGenFunction::FieldConstructionScope FCS(CGF, Dest.getAddr());
+ CodeGenFunction::FieldConstructionScope FCS(CGF, Dest.getAddress());
if (record->isUnion()) {
// Only initialize one field of a union. The field itself is
@@ -1253,9 +1249,10 @@ void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
assert(LV.isSimple());
if (CGF.needsEHCleanup(dtorKind)) {
if (!cleanupDominator)
- cleanupDominator = CGF.Builder.CreateLoad(
+ cleanupDominator = CGF.Builder.CreateAlignedLoad(
CGF.Int8Ty,
- llvm::Constant::getNullValue(CGF.Int8PtrTy)); // placeholder
+ llvm::Constant::getNullValue(CGF.Int8PtrTy),
+ CharUnits::One()); // placeholder
CGF.pushDestroy(EHCleanup, LV.getAddress(), field->getType(),
CGF.getDestroyer(dtorKind), false);
@@ -1268,7 +1265,7 @@ void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
// else, clean it up for -O0 builds and general tidiness.
if (!pushedCleanup && LV.isSimple())
if (llvm::GetElementPtrInst *GEP =
- dyn_cast<llvm::GetElementPtrInst>(LV.getAddress()))
+ dyn_cast<llvm::GetElementPtrInst>(LV.getPointer()))
if (GEP->use_empty())
GEP->eraseFromParent();
}
@@ -1286,8 +1283,7 @@ void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
void AggExprEmitter::VisitDesignatedInitUpdateExpr(DesignatedInitUpdateExpr *E) {
AggValueSlot Dest = EnsureSlot(E->getType());
- LValue DestLV = CGF.MakeAddrLValue(Dest.getAddr(), E->getType(),
- Dest.getAlignment());
+ LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), E->getType());
EmitInitializationToLValue(E->getBase(), DestLV);
VisitInitListExpr(E->getUpdater());
}
@@ -1357,7 +1353,7 @@ static void CheckAggExprForMemSetUse(AggValueSlot &Slot, const Expr *E,
CodeGenFunction &CGF) {
// If the slot is already known to be zeroed, nothing to do. Don't mess with
// volatile stores.
- if (Slot.isZeroed() || Slot.isVolatile() || Slot.getAddr() == nullptr)
+ if (Slot.isZeroed() || Slot.isVolatile() || !Slot.getAddress().isValid())
return;
// C++ objects with a user-declared constructor don't need zero'ing.
@@ -1370,26 +1366,22 @@ static void CheckAggExprForMemSetUse(AggValueSlot &Slot, const Expr *E,
}
// If the type is 16-bytes or smaller, prefer individual stores over memset.
- std::pair<CharUnits, CharUnits> TypeInfo =
- CGF.getContext().getTypeInfoInChars(E->getType());
- if (TypeInfo.first <= CharUnits::fromQuantity(16))
+ CharUnits Size = CGF.getContext().getTypeSizeInChars(E->getType());
+ if (Size <= CharUnits::fromQuantity(16))
return;
// Check to see if over 3/4 of the initializer are known to be zero. If so,
// we prefer to emit memset + individual stores for the rest.
CharUnits NumNonZeroBytes = GetNumNonZeroBytesInInit(E, CGF);
- if (NumNonZeroBytes*4 > TypeInfo.first)
+ if (NumNonZeroBytes*4 > Size)
return;
// Okay, it seems like a good idea to use an initial memset, emit the call.
- llvm::Constant *SizeVal = CGF.Builder.getInt64(TypeInfo.first.getQuantity());
- CharUnits Align = TypeInfo.second;
+ llvm::Constant *SizeVal = CGF.Builder.getInt64(Size.getQuantity());
- llvm::Value *Loc = Slot.getAddr();
-
- Loc = CGF.Builder.CreateBitCast(Loc, CGF.Int8PtrTy);
- CGF.Builder.CreateMemSet(Loc, CGF.Builder.getInt8(0), SizeVal,
- Align.getQuantity(), false);
+ Address Loc = Slot.getAddress();
+ Loc = CGF.Builder.CreateElementBitCast(Loc, CGF.Int8Ty);
+ CGF.Builder.CreateMemSet(Loc, CGF.Builder.getInt8(0), SizeVal, false);
// Tell the AggExprEmitter that the slot is known zero.
Slot.setZeroed();
@@ -1405,7 +1397,7 @@ static void CheckAggExprForMemSetUse(AggValueSlot &Slot, const Expr *E,
void CodeGenFunction::EmitAggExpr(const Expr *E, AggValueSlot Slot) {
assert(E && hasAggregateEvaluationKind(E->getType()) &&
"Invalid aggregate expression to emit");
- assert((Slot.getAddr() != nullptr || Slot.isIgnored()) &&
+ assert((Slot.getAddress().isValid() || Slot.isIgnored()) &&
"slot has bits but no address");
// Optimize the slot if possible.
@@ -1416,7 +1408,7 @@ void CodeGenFunction::EmitAggExpr(const Expr *E, AggValueSlot Slot) {
LValue CodeGenFunction::EmitAggExprToLValue(const Expr *E) {
assert(hasAggregateEvaluationKind(E->getType()) && "Invalid argument!");
- llvm::Value *Temp = CreateMemTemp(E->getType());
+ Address Temp = CreateMemTemp(E->getType());
LValue LV = MakeAddrLValue(Temp, E->getType());
EmitAggExpr(E, AggValueSlot::forLValue(LV, AggValueSlot::IsNotDestructed,
AggValueSlot::DoesNotNeedGCBarriers,
@@ -1424,10 +1416,9 @@ LValue CodeGenFunction::EmitAggExprToLValue(const Expr *E) {
return LV;
}
-void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr,
- llvm::Value *SrcPtr, QualType Ty,
+void CodeGenFunction::EmitAggregateCopy(Address DestPtr,
+ Address SrcPtr, QualType Ty,
bool isVolatile,
- CharUnits alignment,
bool isAssignment) {
assert(!Ty->isAnyComplexType() && "Shouldn't happen for complex");
@@ -1458,17 +1449,16 @@ void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr,
// implementation handles this case safely. If there is a libc that does not
// safely handle this, we can add a target hook.
- // Get data size and alignment info for this aggregate. If this is an
- // assignment don't copy the tail padding. Otherwise copying it is fine.
+ // Get data size info for this aggregate. If this is an assignment,
+ // don't copy the tail padding, because we might be assigning into a
+ // base subobject where the tail padding is claimed. Otherwise,
+ // copying it is fine.
std::pair<CharUnits, CharUnits> TypeInfo;
if (isAssignment)
TypeInfo = getContext().getTypeInfoDataSizeInChars(Ty);
else
TypeInfo = getContext().getTypeInfoInChars(Ty);
- if (alignment.isZero())
- alignment = TypeInfo.second;
-
llvm::Value *SizeVal = nullptr;
if (TypeInfo.first.isZero()) {
// But note that getTypeInfo returns 0 for a VLA.
@@ -1511,15 +1501,8 @@ void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr,
// we need to use a different call here. We use isVolatile to indicate when
// either the source or the destination is volatile.
- llvm::PointerType *DPT = cast<llvm::PointerType>(DestPtr->getType());
- llvm::Type *DBP =
- llvm::Type::getInt8PtrTy(getLLVMContext(), DPT->getAddressSpace());
- DestPtr = Builder.CreateBitCast(DestPtr, DBP);
-
- llvm::PointerType *SPT = cast<llvm::PointerType>(SrcPtr->getType());
- llvm::Type *SBP =
- llvm::Type::getInt8PtrTy(getLLVMContext(), SPT->getAddressSpace());
- SrcPtr = Builder.CreateBitCast(SrcPtr, SBP);
+ DestPtr = Builder.CreateElementBitCast(DestPtr, Int8Ty);
+ SrcPtr = Builder.CreateElementBitCast(SrcPtr, Int8Ty);
// Don't do any of the memmove_collectable tests if GC isn't set.
if (CGM.getLangOpts().getGC() == LangOptions::NonGC) {
@@ -1542,11 +1525,11 @@ void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr,
}
}
+ auto Inst = Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, isVolatile);
+
// Determine the metadata to describe the position of any padding in this
// memcpy, as well as the TBAA tags for the members of the struct, in case
// the optimizer wishes to expand it in to scalar memory operations.
- llvm::MDNode *TBAAStructTag = CGM.getTBAAStructInfo(Ty);
-
- Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, alignment.getQuantity(),
- isVolatile, /*TBAATag=*/nullptr, TBAAStructTag);
+ if (llvm::MDNode *TBAAStructTag = CGM.getTBAAStructInfo(Ty))
+ Inst->setMetadata(llvm::LLVMContext::MD_tbaa_struct, TBAAStructTag);
}
diff --git a/clang/lib/CodeGen/CGExprCXX.cpp b/clang/lib/CodeGen/CGExprCXX.cpp
index e4371bb7a55..8b060ce2874 100644
--- a/clang/lib/CodeGen/CGExprCXX.cpp
+++ b/clang/lib/CodeGen/CGExprCXX.cpp
@@ -166,9 +166,9 @@ RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr(
}
}
- llvm::Value *This;
+ Address This = Address::invalid();
if (IsArrow)
- This = EmitScalarExpr(Base);
+ This = EmitPointerWithAlignment(Base);
else
This = EmitLValue(Base).getAddress();
@@ -185,19 +185,18 @@ RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr(
// when it isn't necessary; just produce the proper effect here.
// Special case: skip first argument of CXXOperatorCall (it is "this").
unsigned ArgsToSkip = isa<CXXOperatorCallExpr>(CE) ? 1 : 0;
- llvm::Value *RHS =
- EmitLValue(*(CE->arg_begin() + ArgsToSkip)).getAddress();
+ Address RHS = EmitLValue(*(CE->arg_begin() + ArgsToSkip)).getAddress();
EmitAggregateAssign(This, RHS, CE->getType());
- return RValue::get(This);
+ return RValue::get(This.getPointer());
}
if (isa<CXXConstructorDecl>(MD) &&
cast<CXXConstructorDecl>(MD)->isCopyOrMoveConstructor()) {
// Trivial move and copy ctor are the same.
assert(CE->getNumArgs() == 1 && "unexpected argcount for trivial ctor");
- llvm::Value *RHS = EmitLValue(*CE->arg_begin()).getAddress();
+ Address RHS = EmitLValue(*CE->arg_begin()).getAddress();
EmitAggregateCopy(This, RHS, (*CE->arg_begin())->getType());
- return RValue::get(This);
+ return RValue::get(This.getPointer());
}
llvm_unreachable("unknown trivial member function");
}
@@ -245,7 +244,7 @@ RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr(
cast<CXXDestructorDecl>(DevirtualizedMethod);
Callee = CGM.GetAddrOfFunction(GlobalDecl(DDtor, Dtor_Complete), Ty);
}
- EmitCXXMemberOrOperatorCall(MD, Callee, ReturnValue, This,
+ EmitCXXMemberOrOperatorCall(MD, Callee, ReturnValue, This.getPointer(),
/*ImplicitParam=*/nullptr, QualType(), CE);
}
return RValue::get(nullptr);
@@ -277,7 +276,7 @@ RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr(
*this, MD, This, UseVirtualCall);
}
- return EmitCXXMemberOrOperatorCall(MD, Callee, ReturnValue, This,
+ return EmitCXXMemberOrOperatorCall(MD, Callee, ReturnValue, This.getPointer(),
/*ImplicitParam=*/nullptr, QualType(), CE);
}
@@ -301,19 +300,20 @@ CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
llvm::Value *MemFnPtr = EmitScalarExpr(MemFnExpr);
// Emit the 'this' pointer.
- llvm::Value *This;
-
+ Address This = Address::invalid();
if (BO->getOpcode() == BO_PtrMemI)
- This = EmitScalarExpr(BaseExpr);
+ This = EmitPointerWithAlignment(BaseExpr);
else
This = EmitLValue(BaseExpr).getAddress();
- EmitTypeCheck(TCK_MemberCall, E->getExprLoc(), This,
+ EmitTypeCheck(TCK_MemberCall, E->getExprLoc(), This.getPointer(),
QualType(MPT->getClass(), 0));
// Ask the ABI to load the callee. Note that This is modified.
+ llvm::Value *ThisPtrForCall = nullptr;
llvm::Value *Callee =
- CGM.getCXXABI().EmitLoadOfMemberFunctionPointer(*this, BO, This, MemFnPtr, MPT);
+ CGM.getCXXABI().EmitLoadOfMemberFunctionPointer(*this, BO, This,
+ ThisPtrForCall, MemFnPtr, MPT);
CallArgList Args;
@@ -321,7 +321,7 @@ CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
getContext().getPointerType(getContext().getTagDeclType(RD));
// Push the this ptr.
- Args.add(RValue::get(This), ThisType);
+ Args.add(RValue::get(ThisPtrForCall), ThisType);
RequiredArgs required = RequiredArgs::forPrototypePlus(FPT, 1);
@@ -348,18 +348,15 @@ RValue CodeGenFunction::EmitCUDAKernelCallExpr(const CUDAKernelCallExpr *E,
}
static void EmitNullBaseClassInitialization(CodeGenFunction &CGF,
- llvm::Value *DestPtr,
+ Address DestPtr,
const CXXRecordDecl *Base) {
if (Base->isEmpty())
return;
- DestPtr = CGF.EmitCastToVoidPtr(DestPtr);
+ DestPtr = CGF.Builder.CreateElementBitCast(DestPtr, CGF.Int8Ty);
const ASTRecordLayout &Layout = CGF.getContext().getASTRecordLayout(Base);
- CharUnits Size = Layout.getNonVirtualSize();
- CharUnits Align = Layout.getNonVirtualAlignment();
-
- llvm::Value *SizeVal = CGF.CGM.getSize(Size);
+ llvm::Value *SizeVal = CGF.CGM.getSize(Layout.getNonVirtualSize());
// If the type contains a pointer to data member we can't memset it to zero.
// Instead, create a null constant and copy it to the destination.
@@ -375,19 +372,22 @@ static void EmitNullBaseClassInitialization(CodeGenFunction &CGF,
/*isConstant=*/true,
llvm::GlobalVariable::PrivateLinkage,
NullConstant, Twine());
+
+ CharUnits Align = std::max(Layout.getNonVirtualAlignment(),
+ DestPtr.getAlignment());
NullVariable->setAlignment(Align.getQuantity());
- llvm::Value *SrcPtr = CGF.EmitCastToVoidPtr(NullVariable);
+
+ Address SrcPtr = Address(CGF.EmitCastToVoidPtr(NullVariable), Align);
// Get and call the appropriate llvm.memcpy overload.
- CGF.Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, Align.getQuantity());
+ CGF.Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal);
return;
}
// Otherwise, just memset the whole thing to zero. This is legal
// because in LLVM, all default initializers (other than the ones we just
// handled above) are guaranteed to have a bit pattern of all zeros.
- CGF.Builder.CreateMemSet(DestPtr, CGF.Builder.getInt8(0), SizeVal,
- Align.getQuantity());
+ CGF.Builder.CreateMemSet(DestPtr, CGF.Builder.getInt8(0), SizeVal);
}
void
@@ -404,11 +404,12 @@ CodeGenFunction::EmitCXXConstructExpr(const CXXConstructExpr *E,
switch (E->getConstructionKind()) {
case CXXConstructExpr::CK_Delegating:
case CXXConstructExpr::CK_Complete:
- EmitNullInitialization(Dest.getAddr(), E->getType());
+ EmitNullInitialization(Dest.getAddress(), E->getType());
break;
case CXXConstructExpr::CK_VirtualBase:
case CXXConstructExpr::CK_NonVirtualBase:
- EmitNullBaseClassInitialization(*this, Dest.getAddr(), CD->getParent());
+ EmitNullBaseClassInitialization(*this, Dest.getAddress(),
+ CD->getParent());
break;
}
}
@@ -431,7 +432,7 @@ CodeGenFunction::EmitCXXConstructExpr(const CXXConstructExpr *E,
if (const ConstantArrayType *arrayType
= getContext().getAsConstantArrayType(E->getType())) {
- EmitCXXAggrConstructorCall(CD, arrayType, Dest.getAddr(), E);
+ EmitCXXAggrConstructorCall(CD, arrayType, Dest.getAddress(), E);
} else {
CXXCtorType Type = Ctor_Complete;
bool ForVirtualBase = false;
@@ -457,15 +458,13 @@ CodeGenFunction::EmitCXXConstructExpr(const CXXConstructExpr *E,
}
// Call the constructor.
- EmitCXXConstructorCall(CD, Type, ForVirtualBase, Delegating, Dest.getAddr(),
- E);
+ EmitCXXConstructorCall(CD, Type, ForVirtualBase, Delegating,
+ Dest.getAddress(), E);
}
}
-void
-CodeGenFunction::EmitSynthesizedCXXCopyCtor(llvm::Value *Dest,
- llvm::Value *Src,
- const Expr *Exp) {
+void CodeGenFunction::EmitSynthesizedCXXCopyCtor(Address Dest, Address Src,
+ const Expr *Exp) {
if (const ExprWithCleanups *E = dyn_cast<ExprWithCleanups>(Exp))
Exp = E->getSubExpr();
assert(isa<CXXConstructExpr>(Exp) &&
@@ -759,22 +758,20 @@ static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction &CGF,
}
static void StoreAnyExprIntoOneUnit(CodeGenFunction &CGF, const Expr *Init,
- QualType AllocType, llvm::Value *NewPtr) {
+ QualType AllocType, Address NewPtr) {
// FIXME: Refactor with EmitExprAsInit.
- CharUnits Alignment = CGF.getContext().getTypeAlignInChars(AllocType);
switch (CGF.getEvaluationKind(AllocType)) {
case TEK_Scalar:
CGF.EmitScalarInit(Init, nullptr,
- CGF.MakeAddrLValue(NewPtr, AllocType, Alignment), false);
+ CGF.MakeAddrLValue(NewPtr, AllocType), false);
return;
case TEK_Complex:
- CGF.EmitComplexExprIntoLValue(Init, CGF.MakeAddrLValue(NewPtr, AllocType,
- Alignment),
+ CGF.EmitComplexExprIntoLValue(Init, CGF.MakeAddrLValue(NewPtr, AllocType),
/*isInit*/ true);
return;
case TEK_Aggregate: {
AggValueSlot Slot
- = AggValueSlot::forAddr(NewPtr, Alignment, AllocType.getQualifiers(),
+ = AggValueSlot::forAddr(NewPtr, AllocType.getQualifiers(),
AggValueSlot::IsDestructed,
AggValueSlot::DoesNotNeedGCBarriers,
AggValueSlot::IsNotAliased);
@@ -787,23 +784,27 @@ static void StoreAnyExprIntoOneUnit(CodeGenFunction &CGF, const Expr *Init,
void CodeGenFunction::EmitNewArrayInitializer(
const CXXNewExpr *E, QualType ElementType, llvm::Type *ElementTy,
- llvm::Value *BeginPtr, llvm::Value *NumElements,
+ Address BeginPtr, llvm::Value *NumElements,
llvm::Value *AllocSizeWithoutCookie) {
// If we have a type with trivial initialization and no initializer,
// there's nothing to do.
if (!E->hasInitializer())
return;
- llvm::Value *CurPtr = BeginPtr;
+ Address CurPtr = BeginPtr;
unsigned InitListElements = 0;
const Expr *Init = E->getInitializer();
- llvm::AllocaInst *EndOfInit = nullptr;
+ Address EndOfInit = Address::invalid();
QualType::DestructionKind DtorKind = ElementType.isDestructedType();
EHScopeStack::stable_iterator Cleanup;
llvm::Instruction *CleanupDominator = nullptr;
+ CharUnits ElementSize = getContext().getTypeSizeInChars(ElementType);
+ CharUnits ElementAlign =
+ BeginPtr.getAlignment().alignmentOfArrayElement(ElementSize);
+
// If the initializer is an initializer list, first do the explicit elements.
if (const InitListExpr *ILE = dyn_cast<InitListExpr>(Init)) {
InitListElements = ILE->getNumInits();
@@ -813,10 +814,8 @@ void CodeGenFunction::EmitNewArrayInitializer(
QualType AllocType = E->getAllocatedType();
if (const ConstantArrayType *CAT = dyn_cast_or_null<ConstantArrayType>(
AllocType->getAsArrayTypeUnsafe())) {
- unsigned AS = CurPtr->getType()->getPointerAddressSpace();
ElementTy = ConvertTypeForMem(AllocType);
- llvm::Type *AllocPtrTy = ElementTy->getPointerTo(AS);
- CurPtr = Builder.CreateBitCast(CurPtr, AllocPtrTy);
+ CurPtr = Builder.CreateElementBitCast(CurPtr, ElementTy);
InitListElements *= getContext().getConstantArrayElementCount(CAT);
}
@@ -826,27 +825,34 @@ void CodeGenFunction::EmitNewArrayInitializer(
// directly, but the control flow can get so varied here that it
// would actually be quite complex. Therefore we go through an
// alloca.
- EndOfInit = CreateTempAlloca(BeginPtr->getType(), "array.init.end");
- CleanupDominator = Builder.CreateStore(BeginPtr, EndOfInit);
- pushIrregularPartialArrayCleanup(BeginPtr, EndOfInit, ElementType,
+ EndOfInit = CreateTempAlloca(BeginPtr.getType(), getPointerAlign(),
+ "array.init.end");
+ CleanupDominator = Builder.CreateStore(BeginPtr.getPointer(), EndOfInit);
+ pushIrregularPartialArrayCleanup(BeginPtr.getPointer(), EndOfInit,
+ ElementType, ElementAlign,
getDestroyer(DtorKind));
Cleanup = EHStack.stable_begin();
}
+ CharUnits StartAlign = CurPtr.getAlignment();
for (unsigned i = 0, e = ILE->getNumInits(); i != e; ++i) {
// Tell the cleanup that it needs to destroy up to this
// element. TODO: some of these stores can be trivially
// observed to be unnecessary.
- if (EndOfInit)
- Builder.CreateStore(Builder.CreateBitCast(CurPtr, BeginPtr->getType()),
- EndOfInit);
+ if (EndOfInit.isValid()) {
+ auto FinishedPtr =
+ Builder.CreateBitCast(CurPtr.getPointer(), BeginPtr.getType());
+ Builder.CreateStore(FinishedPtr, EndOfInit);
+ }
// FIXME: If the last initializer is an incomplete initializer list for
// an array, and we have an array filler, we can fold together the two
// initialization loops.
StoreAnyExprIntoOneUnit(*this, ILE->getInit(i),
ILE->getInit(i)->getType(), CurPtr);
- CurPtr = Builder.CreateConstInBoundsGEP1_32(ElementTy, CurPtr, 1,
- "array.exp.next");
+ CurPtr = Address(Builder.CreateInBoundsGEP(CurPtr.getPointer(),
+ Builder.getSize(1),
+ "array.exp.next"),
+ StartAlign.alignmentAtOffset((i + 1) * ElementSize));
}
// The remaining elements are filled with the array filler expression.
@@ -864,7 +870,7 @@ void CodeGenFunction::EmitNewArrayInitializer(
}
// Switch back to initializing one base element at a time.
- CurPtr = Builder.CreateBitCast(CurPtr, BeginPtr->getType());
+ CurPtr = Builder.CreateBitCast(CurPtr, BeginPtr.getType());
}
// Attempt to perform zero-initialization using memset.
@@ -889,9 +895,7 @@ void CodeGenFunction::EmitNewArrayInitializer(
}
// Create the memset.
- CharUnits Alignment = getContext().getTypeAlignInChars(ElementType);
- Builder.CreateMemSet(CurPtr, Builder.getInt8(0), RemainingSize,
- Alignment.getQuantity(), false);
+ Builder.CreateMemSet(CurPtr, Builder.getInt8(0), RemainingSize, false);
return true;
};
@@ -925,7 +929,8 @@ void CodeGenFunction::EmitNewArrayInitializer(
//
// FIXME: Share this cleanup with the constructor call emission rather than
// having it create a cleanup of its own.
- if (EndOfInit) Builder.CreateStore(CurPtr, EndOfInit);
+ if (EndOfInit.isValid())
+ Builder.CreateStore(CurPtr.getPointer(), EndOfInit);
// Emit a constructor call loop to initialize the remaining elements.
if (InitListElements)
@@ -985,13 +990,13 @@ void CodeGenFunction::EmitNewArrayInitializer(
// Find the end of the array, hoisted out of the loop.
llvm::Value *EndPtr =
- Builder.CreateInBoundsGEP(BeginPtr, NumElements, "array.end");
+ Builder.CreateInBoundsGEP(BeginPtr.getPointer(), NumElements, "array.end");
// If the number of elements isn't constant, we have to now check if there is
// anything left to initialize.
if (!ConstNum) {
- llvm::Value *IsEmpty = Builder.CreateICmpEQ(CurPtr, EndPtr,
- "array.isempty");
+ llvm::Value *IsEmpty =
+ Builder.CreateICmpEQ(CurPtr.getPointer(), EndPtr, "array.isempty");
Builder.CreateCondBr(IsEmpty, ContBB, LoopBB);
}
@@ -1000,16 +1005,19 @@ void CodeGenFunction::EmitNewArrayInitializer(
// Set up the current-element phi.
llvm::PHINode *CurPtrPhi =
- Builder.CreatePHI(CurPtr->getType(), 2, "array.cur");
- CurPtrPhi->addIncoming(CurPtr, EntryBB);
- CurPtr = CurPtrPhi;
+ Builder.CreatePHI(CurPtr.getType(), 2, "array.cur");
+ CurPtrPhi->addIncoming(CurPtr.getPointer(), EntryBB);
+
+ CurPtr = Address(CurPtrPhi, ElementAlign);
// Store the new Cleanup position for irregular Cleanups.
- if (EndOfInit) Builder.CreateStore(CurPtr, EndOfInit);
+ if (EndOfInit.isValid())
+ Builder.CreateStore(CurPtr.getPointer(), EndOfInit);
// Enter a partial-destruction Cleanup if necessary.
if (!CleanupDominator && needsEHCleanup(DtorKind)) {
- pushRegularPartialArrayCleanup(BeginPtr, CurPtr, ElementType,
+ pushRegularPartialArrayCleanup(BeginPtr.getPointer(), CurPtr.getPointer(),
+ ElementType, ElementAlign,
getDestroyer(DtorKind));
Cleanup = EHStack.stable_begin();
CleanupDominator = Builder.CreateUnreachable();
@@ -1026,7 +1034,8 @@ void CodeGenFunction::EmitNewArrayInitializer(
// Advance to the next element by adjusting the pointer type as necessary.
llvm::Value *NextPtr =
- Builder.CreateConstInBoundsGEP1_32(ElementTy, CurPtr, 1, "array.next");
+ Builder.CreateConstInBoundsGEP1_32(ElementTy, CurPtr.getPointer(), 1,
+ "array.next");
// Check whether we've gotten to the end of the array and, if so,
// exit the loop.
@@ -1039,7 +1048,7 @@ void CodeGenFunction::EmitNewArrayInitializer(
static void EmitNewInitializer(CodeGenFunction &CGF, const CXXNewExpr *E,
QualType ElementType, llvm::Type *ElementTy,
- llvm::Value *NewPtr, llvm::Value *NumElements,
+ Address NewPtr, llvm::Value *NumElements,
llvm::Value *AllocSizeWithoutCookie) {
ApplyDebugLocation DL(CGF, E);
if (E->isArray())
@@ -1218,7 +1227,7 @@ namespace {
/// new-expression throws.
static void EnterNewDeleteCleanup(CodeGenFunction &CGF,
const CXXNewExpr *E,
- llvm::Value *NewPtr,
+ Address NewPtr,
llvm::Value *AllocSize,
const CallArgList &NewArgs) {
// If we're not inside a conditional branch, then the cleanup will
@@ -1228,7 +1237,8 @@ static void EnterNewDeleteCleanup(CodeGenFunction &CGF,
.pushCleanupWithExtra<CallDeleteDuringNew>(EHCleanup,
E->getNumPlacementArgs(),
E->getOperatorDelete(),
- NewPtr, AllocSize);
+ NewPtr.getPointer(),
+ AllocSize);
for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I)
Cleanup->setPlacementArg(I, NewArgs[I+1].RV);
@@ -1237,7 +1247,7 @@ static void EnterNewDeleteCleanup(CodeGenFunction &CGF,
// Otherwise, we need to save all this stuff.
DominatingValue<RValue>::saved_type SavedNewPtr =
- DominatingValue<RValue>::save(CGF, RValue::get(NewPtr));
+ DominatingValue<RValue>::save(CGF, RValue::get(NewPtr.getPointer()));
DominatingValue<RValue>::saved_type SavedAllocSize =
DominatingValue<RValue>::save(CGF, RValue::get(AllocSize));
@@ -1260,13 +1270,6 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
// 1. Build a call to the allocation function.
FunctionDecl *allocator = E->getOperatorNew();
- const FunctionProtoType *allocatorType =
- allocator->getType()->castAs<FunctionProtoType>();
-
- CallArgList allocatorArgs;
-
- // The allocation size is the first argument.
- QualType sizeType = getContext().getSizeType();
// If there is a brace-initializer, cannot allocate fewer elements than inits.
unsigned minElements = 0;
@@ -1281,24 +1284,51 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
EmitCXXNewAllocSize(*this, E, minElements, numElements,
allocSizeWithoutCookie);
- allocatorArgs.add(RValue::get(allocSize), sizeType);
-
- // We start at 1 here because the first argument (the allocation size)
- // has already been emitted.
- EmitCallArgs(allocatorArgs, allocatorType, E->placement_arguments(),
- /* CalleeDecl */ nullptr,
- /*ParamsToSkip*/ 1);
-
// Emit the allocation call. If the allocator is a global placement
// operator, just "inline" it directly.
- RValue RV;
+ Address allocation = Address::invalid();
+ CallArgList allocatorArgs;
if (allocator->isReservedGlobalPlacementOperator()) {
- assert(allocatorArgs.size() == 2);
- RV = allocatorArgs[1].RV;
- // TODO: kill any unnecessary computations done for the size
- // argument.
+ AlignmentSource alignSource;
+ allocation = EmitPointerWithAlignment(*E->placement_arguments().begin(),
+ &alignSource);
+
+ // The pointer expression will, in many cases, be an opaque void*.
+ // In these cases, discard the computed alignment and use the
+ // formal alignment of the allocated type.
+ if (alignSource != AlignmentSource::Decl) {
+ allocation = Address(allocation.getPointer(),
+ getContext().getTypeAlignInChars(allocType));
+ }
+
} else {
- RV = EmitNewDeleteCall(*this, allocator, allocatorType, allocatorArgs);
+ const FunctionProtoType *allocatorType =
+ allocator->getType()->castAs<FunctionProtoType>();
+
+ // The allocation size is the first argument.
+ QualType sizeType = getContext().getSizeType();
+ allocatorArgs.add(RValue::get(allocSize), sizeType);
+
+ // We start at 1 here because the first argument (the allocation size)
+ // has already been emitted.
+ EmitCallArgs(allocatorArgs, allocatorType, E->placement_arguments(),
+ /* CalleeDecl */ nullptr,
+ /*ParamsToSkip*/ 1);
+
+ RValue RV =
+ EmitNewDeleteCall(*this, allocator, allocatorType, allocatorArgs);
+
+ // For now, only assume that the allocation function returns
+ // something satisfactorily aligned for the element type, plus
+ // the cookie if we have one.
+ CharUnits allocationAlign =
+ getContext().getTypeAlignInChars(allocType);
+ if (allocSize != allocSizeWithoutCookie) {
+ CharUnits cookieAlign = getSizeAlign(); // FIXME?
+ allocationAlign = std::max(allocationAlign, cookieAlign);
+ }
+
+ allocation = Address(RV.getScalarVal(), allocationAlign);
}
// Emit a null check on the allocation result if the allocation
@@ -1311,9 +1341,6 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
llvm::BasicBlock *nullCheckBB = nullptr;
llvm::BasicBlock *contBB = nullptr;
- llvm::Value *allocation = RV.getScalarVal();
- unsigned AS = allocation->getType()->getPointerAddressSpace();
-
// The null-check means that the initializer is conditionally
// evaluated.
ConditionalEvaluation conditional(*this);
@@ -1325,7 +1352,8 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
llvm::BasicBlock *notNullBB = createBasicBlock("new.notnull");
contBB = createBasicBlock("new.cont");
- llvm::Value *isNull = Builder.CreateIsNull(allocation, "new.isnull");
+ llvm::Value *isNull =
+ Builder.CreateIsNull(allocation.getPointer(), "new.isnull");
Builder.CreateCondBr(isNull, contBB, notNullBB);
EmitBlock(notNullBB);
}
@@ -1351,8 +1379,7 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
}
llvm::Type *elementTy = ConvertTypeForMem(allocType);
- llvm::Type *elementPtrTy = elementTy->getPointerTo(AS);
- llvm::Value *result = Builder.CreateBitCast(allocation, elementPtrTy);
+ Address result = Builder.CreateElementBitCast(allocation, elementTy);
EmitNewInitializer(*this, E, allocType, elementTy, result, numElements,
allocSizeWithoutCookie);
@@ -1361,7 +1388,7 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
// allocating an array of arrays, we'll need to cast back to the
// array pointer type.
llvm::Type *resultType = ConvertTypeForMem(E->getType());
- if (result->getType() != resultType)
+ if (result.getType() != resultType)
result = Builder.CreateBitCast(result, resultType);
}
@@ -1372,21 +1399,22 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
cleanupDominator->eraseFromParent();
}
+ llvm::Value *resultPtr = result.getPointer();
if (nullCheck) {
conditional.end(*this);
llvm::BasicBlock *notNullBB = Builder.GetInsertBlock();
EmitBlock(contBB);
- llvm::PHINode *PHI = Builder.CreatePHI(result->getType(), 2);
- PHI->addIncoming(result, notNullBB);
- PHI->addIncoming(llvm::Constant::getNullValue(result->getType()),
+ llvm::PHINode *PHI = Builder.CreatePHI(resultPtr->getType(), 2);
+ PHI->addIncoming(resultPtr, notNullBB);
+ PHI->addIncoming(llvm::Constant::getNullValue(resultPtr->getType()),
nullCheckBB);
- result = PHI;
+ resultPtr = PHI;
}
- return result;
+ return resultPtr;
}
void CodeGenFunction::EmitDeleteCall(const FunctionDecl *DeleteFD,
@@ -1449,7 +1477,7 @@ CodeGenFunction::pushCallObjectDeleteCleanup(const FunctionDecl *OperatorDelete,
/// Emit the code for deleting a single object.
static void EmitObjectDelete(CodeGenFunction &CGF,
const CXXDeleteExpr *DE,
- llvm::Value *Ptr,
+ Address Ptr,
QualType ElementType) {
// Find the destructor for the type, if applicable. If the
// destructor is virtual, we'll just emit the vcall and return.
@@ -1472,7 +1500,8 @@ static void EmitObjectDelete(CodeGenFunction &CGF,
// to pop it off in a second.
const FunctionDecl *OperatorDelete = DE->getOperatorDelete();
CGF.EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup,
- Ptr, OperatorDelete, ElementType);
+ Ptr.getPointer(),
+ OperatorDelete, ElementType);
if (Dtor)
CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete,
@@ -1487,14 +1516,9 @@ static void EmitObjectDelete(CodeGenFunction &CGF,
case Qualifiers::OCL_Autoreleasing:
break;
- case Qualifiers::OCL_Strong: {
- // Load the pointer value.
- llvm::Value *PtrValue = CGF.Builder.CreateLoad(Ptr,
- ElementType.isVolatileQualified());
-
- CGF.EmitARCRelease(PtrValue, ARCPreciseLifetime);
+ case Qualifiers::OCL_Strong:
+ CGF.EmitARCDestroyStrong(Ptr, ARCPreciseLifetime);
break;
- }
case Qualifiers::OCL_Weak:
CGF.EmitARCDestroyWeak(Ptr);
@@ -1569,7 +1593,7 @@ namespace {
/// Emit the code for deleting an array of objects.
static void EmitArrayDelete(CodeGenFunction &CGF,
const CXXDeleteExpr *E,
- llvm::Value *deletedPtr,
+ Address deletedPtr,
QualType elementType) {
llvm::Value *numElements = nullptr;
llvm::Value *allocatedPtr = nullptr;
@@ -1590,13 +1614,18 @@ static void EmitArrayDelete(CodeGenFunction &CGF,
if (QualType::DestructionKind dtorKind = elementType.isDestructedType()) {
assert(numElements && "no element count for a type with a destructor!");
+ CharUnits elementSize = CGF.getContext().getTypeSizeInChars(elementType);
+ CharUnits elementAlign =
+ deletedPtr.getAlignment().alignmentOfArrayElement(elementSize);
+
+ llvm::Value *arrayBegin = deletedPtr.getPointer();
llvm::Value *arrayEnd =
- CGF.Builder.CreateInBoundsGEP(deletedPtr, numElements, "delete.end");
+ CGF.Builder.CreateInBoundsGEP(arrayBegin, numElements, "delete.end");
// Note that it is legal to allocate a zero-length array, and we
// can never fold the check away because the length should always
// come from a cookie.
- CGF.emitArrayDestroy(deletedPtr, arrayEnd, elementType,
+ CGF.emitArrayDestroy(arrayBegin, arrayEnd, elementType, elementAlign,
CGF.getDestroyer(dtorKind),
/*checkZeroLength*/ true,
CGF.needsEHCleanup(dtorKind));
@@ -1608,13 +1637,13 @@ static void EmitArrayDelete(CodeGenFunction &CGF,
void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) {
const Expr *Arg = E->getArgument();
- llvm::Value *Ptr = EmitScalarExpr(Arg);
+ Address Ptr = EmitPointerWithAlignment(Arg);
// Null check the pointer.
llvm::BasicBlock *DeleteNotNull = createBasicBlock("delete.notnull");
llvm::BasicBlock *DeleteEnd = createBasicBlock("delete.end");
- llvm::Value *IsNull = Builder.CreateIsNull(Ptr, "isnull");
+ llvm::Value *IsNull = Builder.CreateIsNull(Ptr.getPointer(), "isnull");
Builder.CreateCondBr(IsNull, DeleteEnd, DeleteNotNull);
EmitBlock(DeleteNotNull);
@@ -1639,11 +1668,11 @@ void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) {
GEP.push_back(Zero);
}
- Ptr = Builder.CreateInBoundsGEP(Ptr, GEP, "del.first");
+ Ptr = Address(Builder.CreateInBoundsGEP(Ptr.getPointer(), GEP, "del.first"),
+ Ptr.getAlignment());
}
- assert(ConvertTypeForMem(DeleteTy) ==
- cast<llvm::PointerType>(Ptr->getType())->getElementType());
+ assert(ConvertTypeForMem(DeleteTy) == Ptr.getElementType());
if (E->isArrayForm()) {
EmitArrayDelete(*this, E, Ptr, DeleteTy);
@@ -1689,7 +1718,7 @@ static bool isGLValueFromPointerDeref(const Expr *E) {
static llvm::Value *EmitTypeidFromVTable(CodeGenFunction &CGF, const Expr *E,
llvm::Type *StdTypeInfoPtrTy) {
// Get the vtable pointer.
- llvm::Value *ThisPtr = CGF.EmitLValue(E).getAddress();
+ Address ThisPtr = CGF.EmitLValue(E).getAddress();
// C++ [expr.typeid]p2:
// If the glvalue expression is obtained by applying the unary * operator to
@@ -1706,7 +1735,7 @@ static llvm::Value *EmitTypeidFromVTable(CodeGenFunction &CGF, const Expr *E,
CGF.createBasicBlock("typeid.bad_typeid");
llvm::BasicBlock *EndBlock = CGF.createBasicBlock("typeid.end");
- llvm::Value *IsNull = CGF.Builder.CreateIsNull(ThisPtr);
+ llvm::Value *IsNull = CGF.Builder.CreateIsNull(ThisPtr.getPointer());
CGF.Builder.CreateCondBr(IsNull, BadTypeidBlock, EndBlock);
CGF.EmitBlock(BadTypeidBlock);
@@ -1757,7 +1786,7 @@ static llvm::Value *EmitDynamicCastToNull(CodeGenFunction &CGF,
return llvm::UndefValue::get(DestLTy);
}
-llvm::Value *CodeGenFunction::EmitDynamicCast(llvm::Value *Value,
+llvm::Value *CodeGenFunction::EmitDynamicCast(Address ThisAddr,
const CXXDynamicCastExpr *DCE) {
QualType DestTy = DCE->getTypeAsWritten();
@@ -1802,18 +1831,19 @@ llvm::Value *CodeGenFunction::EmitDynamicCast(llvm::Value *Value,
CastNull = createBasicBlock("dynamic_cast.null");
CastNotNull = createBasicBlock("dynamic_cast.notnull");
- llvm::Value *IsNull = Builder.CreateIsNull(Value);
+ llvm::Value *IsNull = Builder.CreateIsNull(ThisAddr.getPointer());
Builder.CreateCondBr(IsNull, CastNull, CastNotNull);
EmitBlock(CastNotNull);
}
+ llvm::Value *Value;
if (isDynamicCastToVoid) {
- Value = CGM.getCXXABI().EmitDynamicCastToVoid(*this, Value, SrcRecordTy,
+ Value = CGM.getCXXABI().EmitDynamicCastToVoid(*this, ThisAddr, SrcRecordTy,
DestTy);
} else {
assert(DestRecordTy->isRecordType() &&
"destination type must be a record type!");
- Value = CGM.getCXXABI().EmitDynamicCastCall(*this, Value, SrcRecordTy,
+ Value = CGM.getCXXABI().EmitDynamicCastCall(*this, ThisAddr, SrcRecordTy,
DestTy, DestRecordTy, CastEnd);
}
@@ -1839,8 +1869,7 @@ llvm::Value *CodeGenFunction::EmitDynamicCast(llvm::Value *Value,
void CodeGenFunction::EmitLambdaExpr(const LambdaExpr *E, AggValueSlot Slot) {
RunCleanupsScope Scope(*this);
- LValue SlotLV =
- MakeAddrLValue(Slot.getAddr(), E->getType(), Slot.getAlignment());
+ LValue SlotLV = MakeAddrLValue(Slot.getAddress(), E->getType());
CXXRecordDecl::field_iterator CurField = E->getLambdaClass()->field_begin();
for (LambdaExpr::const_capture_init_iterator i = E->capture_init_begin(),
diff --git a/clang/lib/CodeGen/CGExprComplex.cpp b/clang/lib/CodeGen/CGExprComplex.cpp
index 00f9f726ec2..a2eba2b4cb1 100644
--- a/clang/lib/CodeGen/CGExprComplex.cpp
+++ b/clang/lib/CodeGen/CGExprComplex.cpp
@@ -298,6 +298,19 @@ public:
// Utilities
//===----------------------------------------------------------------------===//
+Address CodeGenFunction::emitAddrOfRealComponent(Address addr,
+ QualType complexType) {
+ CharUnits offset = CharUnits::Zero();
+ return Builder.CreateStructGEP(addr, 0, offset, addr.getName() + ".realp");
+}
+
+Address CodeGenFunction::emitAddrOfImagComponent(Address addr,
+ QualType complexType) {
+ QualType eltType = complexType->castAs<ComplexType>()->getElementType();
+ CharUnits offset = getContext().getTypeSizeInChars(eltType);
+ return Builder.CreateStructGEP(addr, 1, offset, addr.getName() + ".imagp");
+}
+
/// EmitLoadOfLValue - Given an RValue reference for a complex, emit code to
/// load the real and imaginary pieces, returning them as Real/Imag.
ComplexPairTy ComplexExprEmitter::EmitLoadOfLValue(LValue lvalue,
@@ -306,29 +319,21 @@ ComplexPairTy ComplexExprEmitter::EmitLoadOfLValue(LValue lvalue,
if (lvalue.getType()->isAtomicType())
return CGF.EmitAtomicLoad(lvalue, loc).getComplexVal();
- llvm::Value *SrcPtr = lvalue.getAddress();
+ Address SrcPtr = lvalue.getAddress();
bool isVolatile = lvalue.isVolatileQualified();
- unsigned AlignR = lvalue.getAlignment().getQuantity();
- ASTContext &C = CGF.getContext();
- QualType ComplexTy = lvalue.getType();
- unsigned ComplexAlign = C.getTypeAlignInChars(ComplexTy).getQuantity();
- unsigned AlignI = std::min(AlignR, ComplexAlign);
- llvm::Value *Real=nullptr, *Imag=nullptr;
+ llvm::Value *Real = nullptr, *Imag = nullptr;
if (!IgnoreReal || isVolatile) {
- llvm::Value *RealP = Builder.CreateStructGEP(nullptr, SrcPtr, 0,
- SrcPtr->getName() + ".realp");
- Real = Builder.CreateAlignedLoad(RealP, AlignR, isVolatile,
- SrcPtr->getName() + ".real");
+ Address RealP = CGF.emitAddrOfRealComponent(SrcPtr, lvalue.getType());
+ Real = Builder.CreateLoad(RealP, isVolatile, SrcPtr.getName() + ".real");
}
if (!IgnoreImag || isVolatile) {
- llvm::Value *ImagP = Builder.CreateStructGEP(nullptr, SrcPtr, 1,
- SrcPtr->getName() + ".imagp");
- Imag = Builder.CreateAlignedLoad(ImagP, AlignI, isVolatile,
- SrcPtr->getName() + ".imag");
+ Address ImagP = CGF.emitAddrOfImagComponent(SrcPtr, lvalue.getType());
+ Imag = Builder.CreateLoad(ImagP, isVolatile, SrcPtr.getName() + ".imag");
}
+
return ComplexPairTy(Real, Imag);
}
@@ -340,19 +345,12 @@ void ComplexExprEmitter::EmitStoreOfComplex(ComplexPairTy Val, LValue lvalue,
(!isInit && CGF.LValueIsSuitableForInlineAtomic(lvalue)))
return CGF.EmitAtomicStore(RValue::getComplex(Val), lvalue, isInit);
- llvm::Value *Ptr = lvalue.getAddress();
- llvm::Value *RealPtr = Builder.CreateStructGEP(nullptr, Ptr, 0, "real");
- llvm::Value *ImagPtr = Builder.CreateStructGEP(nullptr, Ptr, 1, "imag");
- unsigned AlignR = lvalue.getAlignment().getQuantity();
- ASTContext &C = CGF.getContext();
- QualType ComplexTy = lvalue.getType();
- unsigned ComplexAlign = C.getTypeAlignInChars(ComplexTy).getQuantity();
- unsigned AlignI = std::min(AlignR, ComplexAlign);
-
- Builder.CreateAlignedStore(Val.first, RealPtr, AlignR,
- lvalue.isVolatileQualified());
- Builder.CreateAlignedStore(Val.second, ImagPtr, AlignI,
- lvalue.isVolatileQualified());
+ Address Ptr = lvalue.getAddress();
+ Address RealPtr = CGF.emitAddrOfRealComponent(Ptr, lvalue.getType());
+ Address ImagPtr = CGF.emitAddrOfImagComponent(Ptr, lvalue.getType());
+
+ Builder.CreateStore(Val.first, RealPtr, lvalue.isVolatileQualified());
+ Builder.CreateStore(Val.second, ImagPtr, lvalue.isVolatileQualified());
}
@@ -385,8 +383,8 @@ ComplexPairTy ComplexExprEmitter::VisitCallExpr(const CallExpr *E) {
ComplexPairTy ComplexExprEmitter::VisitStmtExpr(const StmtExpr *E) {
CodeGenFunction::StmtExprEvaluation eval(CGF);
- llvm::Value *RetAlloca = CGF.EmitCompoundStmt(*E->getSubStmt(), true);
- assert(RetAlloca && "Expected complex return value");
+ Address RetAlloca = CGF.EmitCompoundStmt(*E->getSubStmt(), true);
+ assert(RetAlloca.isValid() && "Expected complex return value");
return EmitLoadOfLValue(CGF.MakeAddrLValue(RetAlloca, E->getType()),
E->getExprLoc());
}
@@ -436,12 +434,9 @@ ComplexPairTy ComplexExprEmitter::EmitCast(CastKind CK, Expr *Op,
case CK_LValueBitCast: {
LValue origLV = CGF.EmitLValue(Op);
- llvm::Value *V = origLV.getAddress();
- V = Builder.CreateBitCast(V,
- CGF.ConvertType(CGF.getContext().getPointerType(DestTy)));
- return EmitLoadOfLValue(CGF.MakeAddrLValue(V, DestTy,
- origLV.getAlignment()),
- Op->getExprLoc());
+ Address V = origLV.getAddress();
+ V = Builder.CreateElementBitCast(V, CGF.ConvertType(DestTy));
+ return EmitLoadOfLValue(CGF.MakeAddrLValue(V, DestTy), Op->getExprLoc());
}
case CK_BitCast:
@@ -1016,10 +1011,10 @@ ComplexPairTy ComplexExprEmitter::VisitInitListExpr(InitListExpr *E) {
}
ComplexPairTy ComplexExprEmitter::VisitVAArgExpr(VAArgExpr *E) {
- llvm::Value *ArgValue = CGF.EmitVAListRef(E->getSubExpr());
- llvm::Value *ArgPtr = CGF.EmitVAArg(ArgValue, E->getType());
+ Address ArgValue = CGF.EmitVAListRef(E->getSubExpr());
+ Address ArgPtr = CGF.EmitVAArg(ArgValue, E->getType());
- if (!ArgPtr) {
+ if (!ArgPtr.isValid()) {
CGF.ErrorUnsupported(E, "complex va_arg expression");
llvm::Type *EltTy =
CGF.ConvertType(E->getType()->castAs<ComplexType>()->getElementType());
@@ -1027,7 +1022,7 @@ ComplexPairTy ComplexExprEmitter::VisitVAArgExpr(VAArgExpr *E) {
return ComplexPairTy(U, U);
}
- return EmitLoadOfLValue(CGF.MakeNaturalAlignAddrLValue(ArgPtr, E->getType()),
+ return EmitLoadOfLValue(CGF.MakeAddrLValue(ArgPtr, E->getType()),
E->getExprLoc());
}
diff --git a/clang/lib/CodeGen/CGExprConstant.cpp b/clang/lib/CodeGen/CGExprConstant.cpp
index a15c151d6f9..1f4b1dcbe02 100644
--- a/clang/lib/CodeGen/CGExprConstant.cpp
+++ b/clang/lib/CodeGen/CGExprConstant.cpp
@@ -977,23 +977,26 @@ public:
}
public:
- llvm::Constant *EmitLValue(APValue::LValueBase LVBase) {
+ ConstantAddress EmitLValue(APValue::LValueBase LVBase) {
if (const ValueDecl *Decl = LVBase.dyn_cast<const ValueDecl*>()) {
if (Decl->hasAttr<WeakRefAttr>())
return CGM.GetWeakRefReference(Decl);
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(Decl))
- return CGM.GetAddrOfFunction(FD);
+ return ConstantAddress(CGM.GetAddrOfFunction(FD), CharUnits::One());
if (const VarDecl* VD = dyn_cast<VarDecl>(Decl)) {
// We can never refer to a variable with local storage.
if (!VD->hasLocalStorage()) {
+ CharUnits Align = CGM.getContext().getDeclAlign(VD);
if (VD->isFileVarDecl() || VD->hasExternalStorage())
- return CGM.GetAddrOfGlobalVar(VD);
- else if (VD->isLocalVarDecl())
- return CGM.getOrCreateStaticVarDecl(
+ return ConstantAddress(CGM.GetAddrOfGlobalVar(VD), Align);
+ else if (VD->isLocalVarDecl()) {
+ auto Ptr = CGM.getOrCreateStaticVarDecl(
*VD, CGM.getLLVMLinkageVarDefinition(VD, /*isConstant=*/false));
+ return ConstantAddress(Ptr, Align);
+ }
}
}
- return nullptr;
+ return ConstantAddress::invalid();
}
Expr *E = const_cast<Expr*>(LVBase.get<const Expr*>());
@@ -1006,14 +1009,18 @@ public:
llvm::Constant* C = CGM.EmitConstantExpr(CLE->getInitializer(),
CLE->getType(), CGF);
// FIXME: "Leaked" on failure.
- if (C)
- C = new llvm::GlobalVariable(CGM.getModule(), C->getType(),
+ if (!C) return ConstantAddress::invalid();
+
+ CharUnits Align = CGM.getContext().getTypeAlignInChars(E->getType());
+
+ auto GV = new llvm::GlobalVariable(CGM.getModule(), C->getType(),
E->getType().isConstant(CGM.getContext()),
llvm::GlobalValue::InternalLinkage,
C, ".compoundliteral", nullptr,
llvm::GlobalVariable::NotThreadLocal,
CGM.getContext().getTargetAddressSpace(E->getType()));
- return C;
+ GV->setAlignment(Align.getQuantity());
+ return ConstantAddress(GV, Align);
}
case Expr::StringLiteralClass:
return CGM.GetAddrOfConstantStringFromLiteral(cast<StringLiteral>(E));
@@ -1021,15 +1028,15 @@ public:
return CGM.GetAddrOfConstantStringFromObjCEncode(cast<ObjCEncodeExpr>(E));
case Expr::ObjCStringLiteralClass: {
ObjCStringLiteral* SL = cast<ObjCStringLiteral>(E);
- llvm::Constant *C =
+ ConstantAddress C =
CGM.getObjCRuntime().GenerateConstantString(SL->getString());
- return llvm::ConstantExpr::getBitCast(C, ConvertType(E->getType()));
+ return C.getElementBitCast(ConvertType(E->getType()));
}
case Expr::PredefinedExprClass: {
unsigned Type = cast<PredefinedExpr>(E)->getIdentType();
if (CGF) {
LValue Res = CGF->EmitPredefinedLValue(cast<PredefinedExpr>(E));
- return cast<llvm::Constant>(Res.getAddress());
+ return cast<ConstantAddress>(Res.getAddress());
} else if (Type == PredefinedExpr::PrettyFunction) {
return CGM.GetAddrOfConstantCString("top level", ".tmp");
}
@@ -1040,7 +1047,8 @@ public:
assert(CGF && "Invalid address of label expression outside function.");
llvm::Constant *Ptr =
CGF->GetAddrOfLabel(cast<AddrLabelExpr>(E)->getLabel());
- return llvm::ConstantExpr::getBitCast(Ptr, ConvertType(E->getType()));
+ Ptr = llvm::ConstantExpr::getBitCast(Ptr, ConvertType(E->getType()));
+ return ConstantAddress(Ptr, CharUnits::One());
}
case Expr::CallExprClass: {
CallExpr* CE = cast<CallExpr>(E);
@@ -1066,7 +1074,10 @@ public:
else
FunctionName = "global";
- return CGM.GetAddrOfGlobalBlock(cast<BlockExpr>(E), FunctionName.c_str());
+ // This is not really an l-value.
+ llvm::Constant *Ptr =
+ CGM.GetAddrOfGlobalBlock(cast<BlockExpr>(E), FunctionName.c_str());
+ return ConstantAddress(Ptr, CGM.getPointerAlign());
}
case Expr::CXXTypeidExprClass: {
CXXTypeidExpr *Typeid = cast<CXXTypeidExpr>(E);
@@ -1075,7 +1086,8 @@ public:
T = Typeid->getTypeOperand(CGM.getContext());
else
T = Typeid->getExprOperand()->getType();
- return CGM.GetAddrOfRTTIDescriptor(T);
+ return ConstantAddress(CGM.GetAddrOfRTTIDescriptor(T),
+ CGM.getPointerAlign());
}
case Expr::CXXUuidofExprClass: {
return CGM.GetAddrOfUuidDescriptor(cast<CXXUuidofExpr>(E));
@@ -1091,7 +1103,7 @@ public:
}
}
- return nullptr;
+ return ConstantAddress::invalid();
}
};
@@ -1255,7 +1267,7 @@ llvm::Constant *CodeGenModule::EmitConstantValue(const APValue &Value,
llvm::Constant *Offset =
llvm::ConstantInt::get(Int64Ty, Value.getLValueOffset().getQuantity());
- llvm::Constant *C;
+ llvm::Constant *C = nullptr;
if (APValue::LValueBase LVBase = Value.getLValueBase()) {
// An array can be represented as an lvalue referring to the base.
if (isa<llvm::ArrayType>(DestTy)) {
@@ -1264,7 +1276,7 @@ llvm::Constant *CodeGenModule::EmitConstantValue(const APValue &Value,
const_cast<Expr*>(LVBase.get<const Expr*>()));
}
- C = ConstExprEmitter(*this, CGF).EmitLValue(LVBase);
+ C = ConstExprEmitter(*this, CGF).EmitLValue(LVBase).getPointer();
// Apply offset if necessary.
if (!Offset->isNullValue()) {
@@ -1438,7 +1450,7 @@ CodeGenModule::EmitConstantValueForMemory(const APValue &Value,
return C;
}
-llvm::Constant *
+ConstantAddress
CodeGenModule::GetAddrOfConstantCompoundLiteral(const CompoundLiteralExpr *E) {
assert(E->isFileScope() && "not a file-scope compound literal expr");
return ConstExprEmitter(*this, nullptr).EmitLValue(E);
diff --git a/clang/lib/CodeGen/CGExprScalar.cpp b/clang/lib/CodeGen/CGExprScalar.cpp
index 4902f79f817..2bc264cfa12 100644
--- a/clang/lib/CodeGen/CGExprScalar.cpp
+++ b/clang/lib/CodeGen/CGExprScalar.cpp
@@ -363,7 +363,7 @@ public:
if (isa<MemberPointerType>(E->getType())) // never sugared
return CGF.CGM.getMemberPointerConstant(E);
- return EmitLValue(E->getSubExpr()).getAddress();
+ return EmitLValue(E->getSubExpr()).getPointer();
}
Value *VisitUnaryDeref(const UnaryOperator *E) {
if (E->getType()->isVoidType())
@@ -1327,13 +1327,13 @@ Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
return V;
}
-static bool ShouldNullCheckClassCastValue(const CastExpr *CE) {
+bool CodeGenFunction::ShouldNullCheckClassCastValue(const CastExpr *CE) {
const Expr *E = CE->getSubExpr();
if (CE->getCastKind() == CK_UncheckedDerivedToBase)
return false;
- if (isa<CXXThisExpr>(E)) {
+ if (isa<CXXThisExpr>(E->IgnoreParens())) {
// We always assume that 'this' is never null.
return false;
}
@@ -1368,11 +1368,10 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
case CK_LValueBitCast:
case CK_ObjCObjectLValueCast: {
- Value *V = EmitLValue(E).getAddress();
- V = Builder.CreateBitCast(V,
- ConvertType(CGF.getContext().getPointerType(DestTy)));
- return EmitLoadOfLValue(CGF.MakeNaturalAlignAddrLValue(V, DestTy),
- CE->getExprLoc());
+ Address Addr = EmitLValue(E).getAddress();
+ Addr = Builder.CreateElementBitCast(Addr, ConvertType(DestTy));
+ LValue LV = CGF.MakeAddrLValue(Addr, DestTy);
+ return EmitLoadOfLValue(LV, CE->getExprLoc());
}
case CK_CPointerToObjCPointerCast:
@@ -1412,68 +1411,44 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
const CXXRecordDecl *DerivedClassDecl = DestTy->getPointeeCXXRecordDecl();
assert(DerivedClassDecl && "BaseToDerived arg isn't a C++ object pointer!");
- llvm::Value *V = Visit(E);
-
- llvm::Value *Derived =
- CGF.GetAddressOfDerivedClass(V, DerivedClassDecl,
+ Address Base = CGF.EmitPointerWithAlignment(E);
+ Address Derived =
+ CGF.GetAddressOfDerivedClass(Base, DerivedClassDecl,
CE->path_begin(), CE->path_end(),
- ShouldNullCheckClassCastValue(CE));
+ CGF.ShouldNullCheckClassCastValue(CE));
// C++11 [expr.static.cast]p11: Behavior is undefined if a downcast is
// performed and the object is not of the derived type.
if (CGF.sanitizePerformTypeCheck())
CGF.EmitTypeCheck(CodeGenFunction::TCK_DowncastPointer, CE->getExprLoc(),
- Derived, DestTy->getPointeeType());
+ Derived.getPointer(), DestTy->getPointeeType());
if (CGF.SanOpts.has(SanitizerKind::CFIDerivedCast))
- CGF.EmitVTablePtrCheckForCast(DestTy->getPointeeType(), Derived,
+ CGF.EmitVTablePtrCheckForCast(DestTy->getPointeeType(),
+ Derived.getPointer(),
/*MayBeNull=*/true,
CodeGenFunction::CFITCK_DerivedCast,
CE->getLocStart());
- return Derived;
+ return Derived.getPointer();
}
case CK_UncheckedDerivedToBase:
case CK_DerivedToBase: {
- const CXXRecordDecl *DerivedClassDecl =
- E->getType()->getPointeeCXXRecordDecl();
- assert(DerivedClassDecl && "DerivedToBase arg isn't a C++ object pointer!");
-
- return CGF.GetAddressOfBaseClass(
- Visit(E), DerivedClassDecl, CE->path_begin(), CE->path_end(),
- ShouldNullCheckClassCastValue(CE), CE->getExprLoc());
+ // The EmitPointerWithAlignment path does this fine; just discard
+ // the alignment.
+ return CGF.EmitPointerWithAlignment(CE).getPointer();
}
+
case CK_Dynamic: {
- Value *V = Visit(const_cast<Expr*>(E));
+ Address V = CGF.EmitPointerWithAlignment(E);
const CXXDynamicCastExpr *DCE = cast<CXXDynamicCastExpr>(CE);
return CGF.EmitDynamicCast(V, DCE);
}
- case CK_ArrayToPointerDecay: {
- assert(E->getType()->isArrayType() &&
- "Array to pointer decay must have array source type!");
-
- Value *V = EmitLValue(E).getAddress(); // Bitfields can't be arrays.
-
- // Note that VLA pointers are always decayed, so we don't need to do
- // anything here.
- if (!E->getType()->isVariableArrayType()) {
- assert(isa<llvm::PointerType>(V->getType()) && "Expected pointer");
- llvm::Type *NewTy = ConvertType(E->getType());
- V = CGF.Builder.CreatePointerCast(
- V, NewTy->getPointerTo(V->getType()->getPointerAddressSpace()));
-
- assert(isa<llvm::ArrayType>(V->getType()->getPointerElementType()) &&
- "Expected pointer to array");
- V = Builder.CreateStructGEP(NewTy, V, 0, "arraydecay");
- }
-
- // Make sure the array decay ends up being the right type. This matters if
- // the array type was of an incomplete type.
- return CGF.Builder.CreatePointerCast(V, ConvertType(CE->getType()));
- }
+ case CK_ArrayToPointerDecay:
+ return CGF.EmitArrayToPointerDecay(E).getPointer();
case CK_FunctionToPointerDecay:
- return EmitLValue(E).getAddress();
+ return EmitLValue(E).getPointer();
case CK_NullToPointer:
if (MustVisitNullValue(E))
@@ -1609,9 +1584,9 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
Value *ScalarExprEmitter::VisitStmtExpr(const StmtExpr *E) {
CodeGenFunction::StmtExprEvaluation eval(CGF);
- llvm::Value *RetAlloca = CGF.EmitCompoundStmt(*E->getSubStmt(),
- !E->getType()->isVoidType());
- if (!RetAlloca)
+ Address RetAlloca = CGF.EmitCompoundStmt(*E->getSubStmt(),
+ !E->getType()->isVoidType());
+ if (!RetAlloca.isValid())
return nullptr;
return CGF.EmitLoadOfScalar(CGF.MakeAddrLValue(RetAlloca, E->getType()),
E->getExprLoc());
@@ -1667,16 +1642,14 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
if (isInc && type->isBooleanType()) {
llvm::Value *True = CGF.EmitToMemory(Builder.getTrue(), type);
if (isPre) {
- Builder.Insert(new llvm::StoreInst(True,
- LV.getAddress(), LV.isVolatileQualified(),
- LV.getAlignment().getQuantity(),
- llvm::SequentiallyConsistent));
+ Builder.CreateStore(True, LV.getAddress(), LV.isVolatileQualified())
+ ->setAtomic(llvm::SequentiallyConsistent);
return Builder.getTrue();
}
// For atomic bool increment, we just store true and return it for
// preincrement, do an atomic swap with true for postincrement
return Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
- LV.getAddress(), True, llvm::SequentiallyConsistent);
+ LV.getPointer(), True, llvm::SequentiallyConsistent);
}
// Special case for atomic increment / decrement on integers, emit
// atomicrmw instructions. We skip this if we want to be doing overflow
@@ -1693,7 +1666,7 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
llvm::Value *amt = CGF.EmitToMemory(
llvm::ConstantInt::get(ConvertType(type), 1, true), type);
llvm::Value *old = Builder.CreateAtomicRMW(aop,
- LV.getAddress(), amt, llvm::SequentiallyConsistent);
+ LV.getPointer(), amt, llvm::SequentiallyConsistent);
return isPre ? Builder.CreateBinOp(op, old, amt) : old;
}
value = EmitLoadOfLValue(LV, E->getExprLoc());
@@ -2174,7 +2147,7 @@ LValue ScalarExprEmitter::EmitCompoundAssignLValue(
EmitScalarConversion(OpInfo.RHS, E->getRHS()->getType(), LHSTy,
E->getExprLoc()),
LHSTy);
- Builder.CreateAtomicRMW(aop, LHSLV.getAddress(), amt,
+ Builder.CreateAtomicRMW(aop, LHSLV.getPointer(), amt,
llvm::SequentiallyConsistent);
return LHSLV;
}
@@ -3384,13 +3357,13 @@ Value *ScalarExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
if (Ty->isVariablyModifiedType())
CGF.EmitVariablyModifiedType(Ty);
- llvm::Value *ArgValue = CGF.EmitVAListRef(VE->getSubExpr());
- llvm::Value *ArgPtr = CGF.EmitVAArg(ArgValue, VE->getType());
+ Address ArgValue = CGF.EmitVAListRef(VE->getSubExpr());
+ Address ArgPtr = CGF.EmitVAArg(ArgValue, VE->getType());
llvm::Type *ArgTy = ConvertType(VE->getType());
// If EmitVAArg fails, we fall back to the LLVM instruction.
- if (!ArgPtr)
- return Builder.CreateVAArg(ArgValue, ArgTy);
+ if (!ArgPtr.isValid())
+ return Builder.CreateVAArg(ArgValue.getPointer(), ArgTy);
// FIXME Volatility.
llvm::Value *Val = Builder.CreateLoad(ArgPtr);
@@ -3507,30 +3480,20 @@ EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
}
LValue CodeGenFunction::EmitObjCIsaExpr(const ObjCIsaExpr *E) {
- llvm::Value *V;
// object->isa or (*object).isa
// Generate code as for: *(Class*)object
- // build Class* type
- llvm::Type *ClassPtrTy = ConvertType(E->getType());
Expr *BaseExpr = E->getBase();
+ Address Addr = Address::invalid();
if (BaseExpr->isRValue()) {
- V = CreateMemTemp(E->getType(), "resval");
- llvm::Value *Src = EmitScalarExpr(BaseExpr);
- Builder.CreateStore(Src, V);
- V = ScalarExprEmitter(*this).EmitLoadOfLValue(
- MakeNaturalAlignAddrLValue(V, E->getType()), E->getExprLoc());
+ Addr = Address(EmitScalarExpr(BaseExpr), getPointerAlign());
} else {
- if (E->isArrow())
- V = ScalarExprEmitter(*this).EmitLoadOfLValue(BaseExpr);
- else
- V = EmitLValue(BaseExpr).getAddress();
+ Addr = EmitLValue(BaseExpr).getAddress();
}
- // build Class* type
- ClassPtrTy = ClassPtrTy->getPointerTo();
- V = Builder.CreateBitCast(V, ClassPtrTy);
- return MakeNaturalAlignAddrLValue(V, E->getType());
+ // Cast the address to Class*.
+ Addr = Builder.CreateElementBitCast(Addr, ConvertType(E->getType()));
+ return MakeAddrLValue(Addr, E->getType());
}
diff --git a/clang/lib/CodeGen/CGObjC.cpp b/clang/lib/CodeGen/CGObjC.cpp
index 4f0c42a0e83..242b3d5a735 100644
--- a/clang/lib/CodeGen/CGObjC.cpp
+++ b/clang/lib/CodeGen/CGObjC.cpp
@@ -37,9 +37,8 @@ static RValue AdjustObjCObjectType(CodeGenFunction &CGF,
/// Given the address of a variable of pointer type, find the correct
/// null to store into it.
-static llvm::Constant *getNullForVariable(llvm::Value *addr) {
- llvm::Type *type =
- cast<llvm::PointerType>(addr->getType())->getElementType();
+static llvm::Constant *getNullForVariable(Address addr) {
+ llvm::Type *type = addr.getElementType();
return llvm::ConstantPointerNull::get(cast<llvm::PointerType>(type));
}
@@ -47,7 +46,7 @@ static llvm::Constant *getNullForVariable(llvm::Value *addr) {
llvm::Value *CodeGenFunction::EmitObjCStringLiteral(const ObjCStringLiteral *E)
{
llvm::Constant *C =
- CGM.getObjCRuntime().GenerateConstantString(E->getString());
+ CGM.getObjCRuntime().GenerateConstantString(E->getString()).getPointer();
// FIXME: This bitcast should just be made an invariant on the Runtime.
return llvm::ConstantExpr::getBitCast(C, ConvertType(E->getType()));
}
@@ -84,16 +83,15 @@ CodeGenFunction::EmitObjCBoxedExpr(const ObjCBoxedExpr *E) {
if (ValueType->isObjCBoxableRecordType()) {
// Emit CodeGen for first parameter
// and cast value to correct type
- llvm::Value *Temporary = CreateMemTemp(SubExpr->getType());
+ Address Temporary = CreateMemTemp(SubExpr->getType());
EmitAnyExprToMem(SubExpr, Temporary, Qualifiers(), /*isInit*/ true);
- llvm::Value *BitCast = Builder.CreateBitCast(Temporary,
- ConvertType(ArgQT));
- Args.add(RValue::get(BitCast), ArgQT);
+ Address BitCast = Builder.CreateBitCast(Temporary, ConvertType(ArgQT));
+ Args.add(RValue::get(BitCast.getPointer()), ArgQT);
// Create char array to store type encoding
std::string Str;
getContext().getObjCEncodingForType(ValueType, Str);
- llvm::GlobalVariable *GV = CGM.GetAddrOfConstantCString(Str);
+ llvm::Constant *GV = CGM.GetAddrOfConstantCString(Str).getPointer();
// Cast type encoding to correct type
const ParmVarDecl *EncodingDecl = BoxingMethod->parameters()[1];
@@ -131,8 +129,8 @@ llvm::Value *CodeGenFunction::EmitObjCCollectionLiteral(const Expr *E,
ArrayType::Normal, /*IndexTypeQuals=*/0);
// Allocate the temporary array(s).
- llvm::AllocaInst *Objects = CreateMemTemp(ElementArrayType, "objects");
- llvm::AllocaInst *Keys = nullptr;
+ Address Objects = CreateMemTemp(ElementArrayType, "objects");
+ Address Keys = Address::invalid();
if (DLE)
Keys = CreateMemTemp(ElementArrayType, "keys");
@@ -148,9 +146,9 @@ llvm::Value *CodeGenFunction::EmitObjCCollectionLiteral(const Expr *E,
if (ALE) {
// Emit the element and store it to the appropriate array slot.
const Expr *Rhs = ALE->getElement(i);
- LValue LV = LValue::MakeAddr(
- Builder.CreateStructGEP(Objects->getAllocatedType(), Objects, i),
- ElementType, Context.getTypeAlignInChars(Rhs->getType()), Context);
+ LValue LV = MakeAddrLValue(
+ Builder.CreateConstArrayGEP(Objects, i, getPointerSize()),
+ ElementType, AlignmentSource::Decl);
llvm::Value *value = EmitScalarExpr(Rhs);
EmitStoreThroughLValue(RValue::get(value), LV, true);
@@ -160,17 +158,17 @@ llvm::Value *CodeGenFunction::EmitObjCCollectionLiteral(const Expr *E,
} else {
// Emit the key and store it to the appropriate array slot.
const Expr *Key = DLE->getKeyValueElement(i).Key;
- LValue KeyLV = LValue::MakeAddr(
- Builder.CreateStructGEP(Keys->getAllocatedType(), Keys, i),
- ElementType, Context.getTypeAlignInChars(Key->getType()), Context);
+ LValue KeyLV = MakeAddrLValue(
+ Builder.CreateConstArrayGEP(Keys, i, getPointerSize()),
+ ElementType, AlignmentSource::Decl);
llvm::Value *keyValue = EmitScalarExpr(Key);
EmitStoreThroughLValue(RValue::get(keyValue), KeyLV, /*isInit=*/true);
// Emit the value and store it to the appropriate array slot.
const Expr *Value = DLE->getKeyValueElement(i).Value;
- LValue ValueLV = LValue::MakeAddr(
- Builder.CreateStructGEP(Objects->getAllocatedType(), Objects, i),
- ElementType, Context.getTypeAlignInChars(Value->getType()), Context);
+ LValue ValueLV = MakeAddrLValue(
+ Builder.CreateConstArrayGEP(Objects, i, getPointerSize()),
+ ElementType, AlignmentSource::Decl);
llvm::Value *valueValue = EmitScalarExpr(Value);
EmitStoreThroughLValue(RValue::get(valueValue), ValueLV, /*isInit=*/true);
if (TrackNeededObjects) {
@@ -185,11 +183,11 @@ llvm::Value *CodeGenFunction::EmitObjCCollectionLiteral(const Expr *E,
ObjCMethodDecl::param_const_iterator PI = MethodWithObjects->param_begin();
const ParmVarDecl *argDecl = *PI++;
QualType ArgQT = argDecl->getType().getUnqualifiedType();
- Args.add(RValue::get(Objects), ArgQT);
+ Args.add(RValue::get(Objects.getPointer()), ArgQT);
if (DLE) {
argDecl = *PI++;
ArgQT = argDecl->getType().getUnqualifiedType();
- Args.add(RValue::get(Keys), ArgQT);
+ Args.add(RValue::get(Keys.getPointer()), ArgQT);
}
argDecl = *PI;
ArgQT = argDecl->getType().getUnqualifiedType();
@@ -404,10 +402,8 @@ RValue CodeGenFunction::EmitObjCMessageExpr(const ObjCMessageExpr *E,
"delegate init calls should only be marked in ARC");
// Do an unsafe store of null into self.
- llvm::Value *selfAddr =
- LocalDeclMap[cast<ObjCMethodDecl>(CurCodeDecl)->getSelfDecl()];
- assert(selfAddr && "no self entry for a delegate init call?");
-
+ Address selfAddr =
+ GetAddrOfLocalVar(cast<ObjCMethodDecl>(CurCodeDecl)->getSelfDecl());
Builder.CreateStore(getNullForVariable(selfAddr), selfAddr);
}
@@ -434,14 +430,13 @@ RValue CodeGenFunction::EmitObjCMessageExpr(const ObjCMessageExpr *E,
// For delegate init calls in ARC, implicitly store the result of
// the call back into self. This takes ownership of the value.
if (isDelegateInit) {
- llvm::Value *selfAddr =
- LocalDeclMap[cast<ObjCMethodDecl>(CurCodeDecl)->getSelfDecl()];
+ Address selfAddr =
+ GetAddrOfLocalVar(cast<ObjCMethodDecl>(CurCodeDecl)->getSelfDecl());
llvm::Value *newSelf = result.getScalarVal();
// The delegate return type isn't necessarily a matching type; in
// fact, it's quite likely to be 'id'.
- llvm::Type *selfTy =
- cast<llvm::PointerType>(selfAddr->getType())->getElementType();
+ llvm::Type *selfTy = selfAddr.getElementType();
newSelf = Builder.CreateBitCast(newSelf, selfTy);
Builder.CreateStore(newSelf, selfAddr);
@@ -536,19 +531,19 @@ static void emitStructGetterCall(CodeGenFunction &CGF, ObjCIvarDecl *ivar,
bool isAtomic, bool hasStrong) {
ASTContext &Context = CGF.getContext();
- llvm::Value *src =
- CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(), CGF.LoadObjCSelf(),
- ivar, 0).getAddress();
+ Address src =
+ CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(), CGF.LoadObjCSelf(), ivar, 0)
+ .getAddress();
// objc_copyStruct (ReturnValue, &structIvar,
// sizeof (Type of Ivar), isAtomic, false);
CallArgList args;
- llvm::Value *dest = CGF.Builder.CreateBitCast(CGF.ReturnValue, CGF.VoidPtrTy);
- args.add(RValue::get(dest), Context.VoidPtrTy);
+ Address dest = CGF.Builder.CreateBitCast(CGF.ReturnValue, CGF.VoidPtrTy);
+ args.add(RValue::get(dest.getPointer()), Context.VoidPtrTy);
src = CGF.Builder.CreateBitCast(src, CGF.VoidPtrTy);
- args.add(RValue::get(src), Context.VoidPtrTy);
+ args.add(RValue::get(src.getPointer()), Context.VoidPtrTy);
CharUnits size = CGF.getContext().getTypeSizeInChars(ivar->getType());
args.add(RValue::get(CGF.CGM.getSize(size)), Context.getSizeType());
@@ -812,8 +807,8 @@ static void emitCPPObjectAtomicGetterCall(CodeGenFunction &CGF,
// The 2nd argument is the address of the ivar.
llvm::Value *ivarAddr =
- CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(),
- CGF.LoadObjCSelf(), ivar, 0).getAddress();
+ CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(),
+ CGF.LoadObjCSelf(), ivar, 0).getPointer();
ivarAddr = CGF.Builder.CreateBitCast(ivarAddr, CGF.Int8PtrTy);
args.add(RValue::get(ivarAddr), CGF.getContext().VoidPtrTy);
@@ -843,7 +838,7 @@ CodeGenFunction::generateObjCGetterBody(const ObjCImplementationDecl *classImpl,
}
else {
ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl();
- emitCPPObjectAtomicGetterCall(*this, ReturnValue,
+ emitCPPObjectAtomicGetterCall(*this, ReturnValue.getPointer(),
ivar, AtomicHelperFn);
}
return;
@@ -873,10 +868,9 @@ CodeGenFunction::generateObjCGetterBody(const ObjCImplementationDecl *classImpl,
bitcastType = bitcastType->getPointerTo(); // addrspace 0 okay
// Perform an atomic load. This does not impose ordering constraints.
- llvm::Value *ivarAddr = LV.getAddress();
+ Address ivarAddr = LV.getAddress();
ivarAddr = Builder.CreateBitCast(ivarAddr, bitcastType);
llvm::LoadInst *load = Builder.CreateLoad(ivarAddr, "load");
- load->setAlignment(strategy.getIvarAlignment().getQuantity());
load->setAtomic(llvm::Unordered);
// Store that value into the return address. Doing this with a
@@ -901,7 +895,7 @@ CodeGenFunction::generateObjCGetterBody(const ObjCImplementationDecl *classImpl,
// FIXME: Can't this be simpler? This might even be worse than the
// corresponding gcc code.
llvm::Value *cmd =
- Builder.CreateLoad(LocalDeclMap[getterMethod->getCmdDecl()], "cmd");
+ Builder.CreateLoad(GetAddrOfLocalVar(getterMethod->getCmdDecl()), "cmd");
llvm::Value *self = Builder.CreateBitCast(LoadObjCSelf(), VoidPtrTy);
llvm::Value *ivarOffset =
EmitIvarOffset(classImpl->getClassInterface(), ivar);
@@ -952,8 +946,7 @@ CodeGenFunction::generateObjCGetterBody(const ObjCImplementationDecl *classImpl,
switch (getEvaluationKind(ivarType)) {
case TEK_Complex: {
ComplexPairTy pair = EmitLoadOfComplex(LV, SourceLocation());
- EmitStoreOfComplex(pair,
- MakeNaturalAlignAddrLValue(ReturnValue, ivarType),
+ EmitStoreOfComplex(pair, MakeAddrLValue(ReturnValue, ivarType),
/*init*/ true);
return;
}
@@ -966,7 +959,7 @@ CodeGenFunction::generateObjCGetterBody(const ObjCImplementationDecl *classImpl,
case TEK_Scalar: {
llvm::Value *value;
if (propType->isReferenceType()) {
- value = LV.getAddress();
+ value = LV.getAddress().getPointer();
} else {
// We want to load and autoreleaseReturnValue ARC __weak ivars.
if (LV.getQuals().getObjCLifetime() == Qualifiers::OCL_Weak) {
@@ -1006,7 +999,7 @@ static void emitStructSetterCall(CodeGenFunction &CGF, ObjCMethodDecl *OMD,
// The first argument is the address of the ivar.
llvm::Value *ivarAddr = CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(),
CGF.LoadObjCSelf(), ivar, 0)
- .getAddress();
+ .getPointer();
ivarAddr = CGF.Builder.CreateBitCast(ivarAddr, CGF.Int8PtrTy);
args.add(RValue::get(ivarAddr), CGF.getContext().VoidPtrTy);
@@ -1014,7 +1007,7 @@ static void emitStructSetterCall(CodeGenFunction &CGF, ObjCMethodDecl *OMD,
ParmVarDecl *argVar = *OMD->param_begin();
DeclRefExpr argRef(argVar, false, argVar->getType().getNonReferenceType(),
VK_LValue, SourceLocation());
- llvm::Value *argAddr = CGF.EmitLValue(&argRef).getAddress();
+ llvm::Value *argAddr = CGF.EmitLValue(&argRef).getPointer();
argAddr = CGF.Builder.CreateBitCast(argAddr, CGF.Int8PtrTy);
args.add(RValue::get(argAddr), CGF.getContext().VoidPtrTy);
@@ -1052,7 +1045,7 @@ static void emitCPPObjectAtomicSetterCall(CodeGenFunction &CGF,
// The first argument is the address of the ivar.
llvm::Value *ivarAddr =
CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(),
- CGF.LoadObjCSelf(), ivar, 0).getAddress();
+ CGF.LoadObjCSelf(), ivar, 0).getPointer();
ivarAddr = CGF.Builder.CreateBitCast(ivarAddr, CGF.Int8PtrTy);
args.add(RValue::get(ivarAddr), CGF.getContext().VoidPtrTy);
@@ -1060,7 +1053,7 @@ static void emitCPPObjectAtomicSetterCall(CodeGenFunction &CGF,
ParmVarDecl *argVar = *OMD->param_begin();
DeclRefExpr argRef(argVar, false, argVar->getType().getNonReferenceType(),
VK_LValue, SourceLocation());
- llvm::Value *argAddr = CGF.EmitLValue(&argRef).getAddress();
+ llvm::Value *argAddr = CGF.EmitLValue(&argRef).getPointer();
argAddr = CGF.Builder.CreateBitCast(argAddr, CGF.Int8PtrTy);
args.add(RValue::get(argAddr), CGF.getContext().VoidPtrTy);
@@ -1135,29 +1128,27 @@ CodeGenFunction::generateObjCSetterBody(const ObjCImplementationDecl *classImpl,
if (strategy.getIvarSize().isZero())
return;
- llvm::Value *argAddr = LocalDeclMap[*setterMethod->param_begin()];
+ Address argAddr = GetAddrOfLocalVar(*setterMethod->param_begin());
LValue ivarLValue =
EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(), ivar, /*quals*/ 0);
- llvm::Value *ivarAddr = ivarLValue.getAddress();
+ Address ivarAddr = ivarLValue.getAddress();
// Currently, all atomic accesses have to be through integer
// types, so there's no point in trying to pick a prettier type.
llvm::Type *bitcastType =
llvm::Type::getIntNTy(getLLVMContext(),
getContext().toBits(strategy.getIvarSize()));
- bitcastType = bitcastType->getPointerTo(); // addrspace 0 okay
// Cast both arguments to the chosen operation type.
- argAddr = Builder.CreateBitCast(argAddr, bitcastType);
- ivarAddr = Builder.CreateBitCast(ivarAddr, bitcastType);
+ argAddr = Builder.CreateElementBitCast(argAddr, bitcastType);
+ ivarAddr = Builder.CreateElementBitCast(ivarAddr, bitcastType);
// This bitcast load is likely to cause some nasty IR.
llvm::Value *load = Builder.CreateLoad(argAddr);
// Perform an atomic store. There are no memory ordering requirements.
llvm::StoreInst *store = Builder.CreateStore(load, ivarAddr);
- store->setAlignment(strategy.getIvarAlignment().getQuantity());
store->setAtomic(llvm::Unordered);
return;
}
@@ -1189,13 +1180,14 @@ CodeGenFunction::generateObjCSetterBody(const ObjCImplementationDecl *classImpl,
// Emit objc_setProperty((id) self, _cmd, offset, arg,
// <is-atomic>, <is-copy>).
llvm::Value *cmd =
- Builder.CreateLoad(LocalDeclMap[setterMethod->getCmdDecl()]);
+ Builder.CreateLoad(GetAddrOfLocalVar(setterMethod->getCmdDecl()));
llvm::Value *self =
Builder.CreateBitCast(LoadObjCSelf(), VoidPtrTy);
llvm::Value *ivarOffset =
EmitIvarOffset(classImpl->getClassInterface(), ivar);
- llvm::Value *arg = LocalDeclMap[*setterMethod->param_begin()];
- arg = Builder.CreateBitCast(Builder.CreateLoad(arg, "arg"), VoidPtrTy);
+ Address argAddr = GetAddrOfLocalVar(*setterMethod->param_begin());
+ llvm::Value *arg = Builder.CreateLoad(argAddr, "arg");
+ arg = Builder.CreateBitCast(arg, VoidPtrTy);
CallArgList args;
args.add(RValue::get(self), getContext().getObjCIdType());
@@ -1328,7 +1320,7 @@ namespace {
/// Like CodeGenFunction::destroyARCStrong, but do it with a call.
static void destroyARCStrongWithStore(CodeGenFunction &CGF,
- llvm::Value *addr,
+ Address addr,
QualType type) {
llvm::Value *null = getNullForVariable(addr);
CGF.EmitARCStoreStrongCall(addr, null, /*ignored*/ true);
@@ -1458,7 +1450,7 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
// Fast enumeration state.
QualType StateTy = CGM.getObjCFastEnumerationStateType();
- llvm::AllocaInst *StatePtr = CreateMemTemp(StateTy, "state.ptr");
+ Address StatePtr = CreateMemTemp(StateTy, "state.ptr");
EmitNullInitialization(StatePtr, StateTy);
// Number of elements in the items array.
@@ -1477,7 +1469,7 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
getContext().getConstantArrayType(getContext().getObjCIdType(),
llvm::APInt(32, NumItems),
ArrayType::Normal, 0);
- llvm::Value *ItemsPtr = CreateMemTemp(ItemsTy, "items.ptr");
+ Address ItemsPtr = CreateMemTemp(ItemsTy, "items.ptr");
// Emit the collection pointer. In ARC, we do a retain.
llvm::Value *Collection;
@@ -1498,14 +1490,16 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
CallArgList Args;
// The first argument is a temporary of the enumeration-state type.
- Args.add(RValue::get(StatePtr), getContext().getPointerType(StateTy));
+ Args.add(RValue::get(StatePtr.getPointer()),
+ getContext().getPointerType(StateTy));
// The second argument is a temporary array with space for NumItems
// pointers. We'll actually be loading elements from the array
// pointer written into the control state; this buffer is so that
// collections that *aren't* backed by arrays can still queue up
// batches of elements.
- Args.add(RValue::get(ItemsPtr), getContext().getPointerType(ItemsTy));
+ Args.add(RValue::get(ItemsPtr.getPointer()),
+ getContext().getPointerType(ItemsTy));
// The third argument is the capacity of that temporary array.
llvm::Type *UnsignedLongLTy = ConvertType(getContext().UnsignedLongTy);
@@ -1542,13 +1536,14 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
// Save the initial mutations value. This is the value at an
// address that was written into the state object by
// countByEnumeratingWithState:objects:count:.
- llvm::Value *StateMutationsPtrPtr = Builder.CreateStructGEP(
- StatePtr->getAllocatedType(), StatePtr, 2, "mutationsptr.ptr");
- llvm::Value *StateMutationsPtr = Builder.CreateLoad(StateMutationsPtrPtr,
- "mutationsptr");
+ Address StateMutationsPtrPtr = Builder.CreateStructGEP(
+ StatePtr, 2, 2 * getPointerSize(), "mutationsptr.ptr");
+ llvm::Value *StateMutationsPtr
+ = Builder.CreateLoad(StateMutationsPtrPtr, "mutationsptr");
llvm::Value *initialMutations =
- Builder.CreateLoad(StateMutationsPtr, "forcoll.initial-mutations");
+ Builder.CreateAlignedLoad(StateMutationsPtr, getPointerAlign(),
+ "forcoll.initial-mutations");
// Start looping. This is the point we return to whenever we have a
// fresh, non-empty batch of objects.
@@ -1570,7 +1565,8 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
// refreshes.
StateMutationsPtr = Builder.CreateLoad(StateMutationsPtrPtr, "mutationsptr");
llvm::Value *currentMutations
- = Builder.CreateLoad(StateMutationsPtr, "statemutations");
+ = Builder.CreateAlignedLoad(StateMutationsPtr, getPointerAlign(),
+ "statemutations");
llvm::BasicBlock *WasMutatedBB = createBasicBlock("forcoll.mutated");
llvm::BasicBlock *WasNotMutatedBB = createBasicBlock("forcoll.notmutated");
@@ -1623,15 +1619,16 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
// Fetch the buffer out of the enumeration state.
// TODO: this pointer should actually be invariant between
// refreshes, which would help us do certain loop optimizations.
- llvm::Value *StateItemsPtr = Builder.CreateStructGEP(
- StatePtr->getAllocatedType(), StatePtr, 1, "stateitems.ptr");
+ Address StateItemsPtr = Builder.CreateStructGEP(
+ StatePtr, 1, getPointerSize(), "stateitems.ptr");
llvm::Value *EnumStateItems =
Builder.CreateLoad(StateItemsPtr, "stateitems");
// Fetch the value at the current index from the buffer.
llvm::Value *CurrentItemPtr =
Builder.CreateGEP(EnumStateItems, index, "currentitem.ptr");
- llvm::Value *CurrentItem = Builder.CreateLoad(CurrentItemPtr);
+ llvm::Value *CurrentItem =
+ Builder.CreateAlignedLoad(CurrentItemPtr, getPointerAlign());
// Cast that value to the right type.
CurrentItem = Builder.CreateBitCast(CurrentItem, convertedElementType,
@@ -1838,7 +1835,7 @@ static llvm::Value *emitARCValueOperation(CodeGenFunction &CGF,
/// Perform an operation having the following signature:
/// i8* (i8**)
static llvm::Value *emitARCLoadOperation(CodeGenFunction &CGF,
- llvm::Value *addr,
+ Address addr,
llvm::Constant *&fn,
StringRef fnName) {
if (!fn) {
@@ -1848,16 +1845,15 @@ static llvm::Value *emitARCLoadOperation(CodeGenFunction &CGF,
}
// Cast the argument to 'id*'.
- llvm::Type *origType = addr->getType();
+ llvm::Type *origType = addr.getElementType();
addr = CGF.Builder.CreateBitCast(addr, CGF.Int8PtrPtrTy);
// Call the function.
- llvm::Value *result = CGF.EmitNounwindRuntimeCall(fn, addr);
+ llvm::Value *result = CGF.EmitNounwindRuntimeCall(fn, addr.getPointer());
// Cast the result back to a dereference of the original type.
- if (origType != CGF.Int8PtrPtrTy)
- result = CGF.Builder.CreateBitCast(result,
- cast<llvm::PointerType>(origType)->getElementType());
+ if (origType != CGF.Int8PtrTy)
+ result = CGF.Builder.CreateBitCast(result, origType);
return result;
}
@@ -1865,13 +1861,12 @@ static llvm::Value *emitARCLoadOperation(CodeGenFunction &CGF,
/// Perform an operation having the following signature:
/// i8* (i8**, i8*)
static llvm::Value *emitARCStoreOperation(CodeGenFunction &CGF,
- llvm::Value *addr,
+ Address addr,
llvm::Value *value,
llvm::Constant *&fn,
StringRef fnName,
bool ignored) {
- assert(cast<llvm::PointerType>(addr->getType())->getElementType()
- == value->getType());
+ assert(addr.getElementType() == value->getType());
if (!fn) {
llvm::Type *argTypes[] = { CGF.Int8PtrPtrTy, CGF.Int8PtrTy };
@@ -1884,7 +1879,7 @@ static llvm::Value *emitARCStoreOperation(CodeGenFunction &CGF,
llvm::Type *origType = value->getType();
llvm::Value *args[] = {
- CGF.Builder.CreateBitCast(addr, CGF.Int8PtrPtrTy),
+ CGF.Builder.CreateBitCast(addr.getPointer(), CGF.Int8PtrPtrTy),
CGF.Builder.CreateBitCast(value, CGF.Int8PtrTy)
};
llvm::CallInst *result = CGF.EmitNounwindRuntimeCall(fn, args);
@@ -1897,11 +1892,11 @@ static llvm::Value *emitARCStoreOperation(CodeGenFunction &CGF,
/// Perform an operation having the following signature:
/// void (i8**, i8**)
static void emitARCCopyOperation(CodeGenFunction &CGF,
- llvm::Value *dst,
- llvm::Value *src,
+ Address dst,
+ Address src,
llvm::Constant *&fn,
StringRef fnName) {
- assert(dst->getType() == src->getType());
+ assert(dst.getType() == src.getType());
if (!fn) {
llvm::Type *argTypes[] = { CGF.Int8PtrPtrTy, CGF.Int8PtrPtrTy };
@@ -1912,8 +1907,8 @@ static void emitARCCopyOperation(CodeGenFunction &CGF,
}
llvm::Value *args[] = {
- CGF.Builder.CreateBitCast(dst, CGF.Int8PtrPtrTy),
- CGF.Builder.CreateBitCast(src, CGF.Int8PtrPtrTy)
+ CGF.Builder.CreateBitCast(dst.getPointer(), CGF.Int8PtrPtrTy),
+ CGF.Builder.CreateBitCast(src.getPointer(), CGF.Int8PtrPtrTy)
};
CGF.EmitNounwindRuntimeCall(fn, args);
}
@@ -2050,12 +2045,10 @@ void CodeGenFunction::EmitARCRelease(llvm::Value *value,
/// At -O1 and above, just load and call objc_release.
///
/// call void \@objc_storeStrong(i8** %addr, i8* null)
-void CodeGenFunction::EmitARCDestroyStrong(llvm::Value *addr,
+void CodeGenFunction::EmitARCDestroyStrong(Address addr,
ARCPreciseLifetime_t precise) {
if (CGM.getCodeGenOpts().OptimizationLevel == 0) {
- llvm::PointerType *addrTy = cast<llvm::PointerType>(addr->getType());
- llvm::Value *null = llvm::ConstantPointerNull::get(
- cast<llvm::PointerType>(addrTy->getElementType()));
+ llvm::Value *null = getNullForVariable(addr);
EmitARCStoreStrongCall(addr, null, /*ignored*/ true);
return;
}
@@ -2066,11 +2059,10 @@ void CodeGenFunction::EmitARCDestroyStrong(llvm::Value *addr,
/// Store into a strong object. Always calls this:
/// call void \@objc_storeStrong(i8** %addr, i8* %value)
-llvm::Value *CodeGenFunction::EmitARCStoreStrongCall(llvm::Value *addr,
+llvm::Value *CodeGenFunction::EmitARCStoreStrongCall(Address addr,
llvm::Value *value,
bool ignored) {
- assert(cast<llvm::PointerType>(addr->getType())->getElementType()
- == value->getType());
+ assert(addr.getElementType() == value->getType());
llvm::Constant *&fn = CGM.getARCEntrypoints().objc_storeStrong;
if (!fn) {
@@ -2081,7 +2073,7 @@ llvm::Value *CodeGenFunction::EmitARCStoreStrongCall(llvm::Value *addr,
}
llvm::Value *args[] = {
- Builder.CreateBitCast(addr, Int8PtrPtrTy),
+ Builder.CreateBitCast(addr.getPointer(), Int8PtrPtrTy),
Builder.CreateBitCast(value, Int8PtrTy)
};
EmitNounwindRuntimeCall(fn, args);
@@ -2184,14 +2176,14 @@ CodeGenFunction::EmitARCRetainAutoreleaseNonBlock(llvm::Value *value) {
/// i8* \@objc_loadWeak(i8** %addr)
/// Essentially objc_autorelease(objc_loadWeakRetained(addr)).
-llvm::Value *CodeGenFunction::EmitARCLoadWeak(llvm::Value *addr) {
+llvm::Value *CodeGenFunction::EmitARCLoadWeak(Address addr) {
return emitARCLoadOperation(*this, addr,
CGM.getARCEntrypoints().objc_loadWeak,
"objc_loadWeak");
}
/// i8* \@objc_loadWeakRetained(i8** %addr)
-llvm::Value *CodeGenFunction::EmitARCLoadWeakRetained(llvm::Value *addr) {
+llvm::Value *CodeGenFunction::EmitARCLoadWeakRetained(Address addr) {
return emitARCLoadOperation(*this, addr,
CGM.getARCEntrypoints().objc_loadWeakRetained,
"objc_loadWeakRetained");
@@ -2199,7 +2191,7 @@ llvm::Value *CodeGenFunction::EmitARCLoadWeakRetained(llvm::Value *addr) {
/// i8* \@objc_storeWeak(i8** %addr, i8* %value)
/// Returns %value.
-llvm::Value *CodeGenFunction::EmitARCStoreWeak(llvm::Value *addr,
+llvm::Value *CodeGenFunction::EmitARCStoreWeak(Address addr,
llvm::Value *value,
bool ignored) {
return emitARCStoreOperation(*this, addr, value,
@@ -2211,7 +2203,7 @@ llvm::Value *CodeGenFunction::EmitARCStoreWeak(llvm::Value *addr,
/// Returns %value. %addr is known to not have a current weak entry.
/// Essentially equivalent to:
/// *addr = nil; objc_storeWeak(addr, value);
-void CodeGenFunction::EmitARCInitWeak(llvm::Value *addr, llvm::Value *value) {
+void CodeGenFunction::EmitARCInitWeak(Address addr, llvm::Value *value) {
// If we're initializing to null, just write null to memory; no need
// to get the runtime involved. But don't do this if optimization
// is enabled, because accounting for this would make the optimizer
@@ -2229,7 +2221,7 @@ void CodeGenFunction::EmitARCInitWeak(llvm::Value *addr, llvm::Value *value) {
/// void \@objc_destroyWeak(i8** %addr)
/// Essentially objc_storeWeak(addr, nil).
-void CodeGenFunction::EmitARCDestroyWeak(llvm::Value *addr) {
+void CodeGenFunction::EmitARCDestroyWeak(Address addr) {
llvm::Constant *&fn = CGM.getARCEntrypoints().objc_destroyWeak;
if (!fn) {
llvm::FunctionType *fnType =
@@ -2240,13 +2232,13 @@ void CodeGenFunction::EmitARCDestroyWeak(llvm::Value *addr) {
// Cast the argument to 'id*'.
addr = Builder.CreateBitCast(addr, Int8PtrPtrTy);
- EmitNounwindRuntimeCall(fn, addr);
+ EmitNounwindRuntimeCall(fn, addr.getPointer());
}
/// void \@objc_moveWeak(i8** %dest, i8** %src)
/// Disregards the current value in %dest. Leaves %src pointing to nothing.
/// Essentially (objc_copyWeak(dest, src), objc_destroyWeak(src)).
-void CodeGenFunction::EmitARCMoveWeak(llvm::Value *dst, llvm::Value *src) {
+void CodeGenFunction::EmitARCMoveWeak(Address dst, Address src) {
emitARCCopyOperation(*this, dst, src,
CGM.getARCEntrypoints().objc_moveWeak,
"objc_moveWeak");
@@ -2255,7 +2247,7 @@ void CodeGenFunction::EmitARCMoveWeak(llvm::Value *dst, llvm::Value *src) {
/// void \@objc_copyWeak(i8** %dest, i8** %src)
/// Disregards the current value in %dest. Essentially
/// objc_release(objc_initWeak(dest, objc_readWeakRetained(src)))
-void CodeGenFunction::EmitARCCopyWeak(llvm::Value *dst, llvm::Value *src) {
+void CodeGenFunction::EmitARCCopyWeak(Address dst, Address src) {
emitARCCopyOperation(*this, dst, src,
CGM.getARCEntrypoints().objc_copyWeak,
"objc_copyWeak");
@@ -2332,19 +2324,19 @@ void CodeGenFunction::EmitObjCMRRAutoreleasePoolPop(llvm::Value *Arg) {
}
void CodeGenFunction::destroyARCStrongPrecise(CodeGenFunction &CGF,
- llvm::Value *addr,
+ Address addr,
QualType type) {
CGF.EmitARCDestroyStrong(addr, ARCPreciseLifetime);
}
void CodeGenFunction::destroyARCStrongImprecise(CodeGenFunction &CGF,
- llvm::Value *addr,
+ Address addr,
QualType type) {
CGF.EmitARCDestroyStrong(addr, ARCImpreciseLifetime);
}
void CodeGenFunction::destroyARCWeak(CodeGenFunction &CGF,
- llvm::Value *addr,
+ Address addr,
QualType type) {
CGF.EmitARCDestroyWeak(addr);
}
@@ -3046,7 +3038,8 @@ CodeGenFunction::GenerateObjCAtomicGetterCopyHelperFunction(
CharUnits Alignment
= getContext().getTypeAlignInChars(TheCXXConstructExpr->getType());
EmitAggExpr(TheCXXConstructExpr,
- AggValueSlot::forAddr(DV.getScalarVal(), Alignment, Qualifiers(),
+ AggValueSlot::forAddr(Address(DV.getScalarVal(), Alignment),
+ Qualifiers(),
AggValueSlot::IsDestructed,
AggValueSlot::DoesNotNeedGCBarriers,
AggValueSlot::IsNotAliased));
diff --git a/clang/lib/CodeGen/CGObjCGNU.cpp b/clang/lib/CodeGen/CGObjCGNU.cpp
index b52d623b948..6bbf2ef5b08 100644
--- a/clang/lib/CodeGen/CGObjCGNU.cpp
+++ b/clang/lib/CodeGen/CGObjCGNU.cpp
@@ -166,9 +166,9 @@ protected:
/// where the C code specifies const char*.
llvm::Constant *MakeConstantString(const std::string &Str,
const std::string &Name="") {
- auto *ConstStr = CGM.GetAddrOfConstantCString(Str, Name.c_str());
- return llvm::ConstantExpr::getGetElementPtr(ConstStr->getValueType(),
- ConstStr, Zeros);
+ ConstantAddress Array = CGM.GetAddrOfConstantCString(Str, Name.c_str());
+ return llvm::ConstantExpr::getGetElementPtr(Array.getElementType(),
+ Array.getPointer(), Zeros);
}
/// Emits a linkonce_odr string, whose name is the prefix followed by the
/// string value. This allows the linker to combine the strings between
@@ -191,34 +191,41 @@ protected:
/// first argument.
llvm::GlobalVariable *MakeGlobal(llvm::StructType *Ty,
ArrayRef<llvm::Constant *> V,
+ CharUnits Align,
StringRef Name="",
llvm::GlobalValue::LinkageTypes linkage
=llvm::GlobalValue::InternalLinkage) {
llvm::Constant *C = llvm::ConstantStruct::get(Ty, V);
- return new llvm::GlobalVariable(TheModule, Ty, false,
- linkage, C, Name);
+ auto GV = new llvm::GlobalVariable(TheModule, Ty, false,
+ linkage, C, Name);
+ GV->setAlignment(Align.getQuantity());
+ return GV;
}
/// Generates a global array. The vector must contain the same number of
/// elements that the array type declares, of the type specified as the array
/// element type.
llvm::GlobalVariable *MakeGlobal(llvm::ArrayType *Ty,
ArrayRef<llvm::Constant *> V,
+ CharUnits Align,
StringRef Name="",
llvm::GlobalValue::LinkageTypes linkage
=llvm::GlobalValue::InternalLinkage) {
llvm::Constant *C = llvm::ConstantArray::get(Ty, V);
- return new llvm::GlobalVariable(TheModule, Ty, false,
- linkage, C, Name);
+ auto GV = new llvm::GlobalVariable(TheModule, Ty, false,
+ linkage, C, Name);
+ GV->setAlignment(Align.getQuantity());
+ return GV;
}
/// Generates a global array, inferring the array type from the specified
/// element type and the size of the initialiser.
llvm::GlobalVariable *MakeGlobalArray(llvm::Type *Ty,
ArrayRef<llvm::Constant *> V,
+ CharUnits Align,
StringRef Name="",
llvm::GlobalValue::LinkageTypes linkage
=llvm::GlobalValue::InternalLinkage) {
llvm::ArrayType *ArrayTy = llvm::ArrayType::get(Ty, V.size());
- return MakeGlobal(ArrayTy, V, Name, linkage);
+ return MakeGlobal(ArrayTy, V, Align, Name, linkage);
}
/// Returns a property name and encoding string.
llvm::Constant *MakePropertyEncodingString(const ObjCPropertyDecl *PD,
@@ -234,9 +241,7 @@ protected:
NameAndAttributes += TypeStr;
NameAndAttributes += '\0';
NameAndAttributes += PD->getNameAsString();
- auto *ConstStr = CGM.GetAddrOfConstantCString(NameAndAttributes);
- return llvm::ConstantExpr::getGetElementPtr(ConstStr->getValueType(),
- ConstStr, Zeros);
+ return MakeConstantString(NameAndAttributes);
}
return MakeConstantString(PD->getNameAsString());
}
@@ -275,6 +280,10 @@ protected:
if (V->getType() == Ty) return V;
return B.CreateBitCast(V, Ty);
}
+ Address EnforceType(CGBuilderTy &B, Address V, llvm::Type *Ty) {
+ if (V.getType() == Ty) return V;
+ return B.CreateBitCast(V, Ty);
+ }
// Some zeros used for GEPs in lots of places.
llvm::Constant *Zeros[2];
/// Null pointer value. Mainly used as a terminator in various arrays.
@@ -435,7 +444,7 @@ private:
/// Returns a selector with the specified type encoding. An empty string is
/// used to return an untyped selector (with the types field set to NULL).
llvm::Value *GetSelector(CodeGenFunction &CGF, Selector Sel,
- const std::string &TypeEncoding, bool lval);
+ const std::string &TypeEncoding);
/// Returns the variable used to store the offset of an instance variable.
llvm::GlobalVariable *ObjCIvarOffsetVariable(const ObjCInterfaceDecl *ID,
const ObjCIvarDecl *Ivar);
@@ -458,7 +467,7 @@ protected:
/// mechanism differs between the GCC and GNU runtimes, so this method must
/// be overridden in subclasses.
virtual llvm::Value *LookupIMPSuper(CodeGenFunction &CGF,
- llvm::Value *ObjCSuper,
+ Address ObjCSuper,
llvm::Value *cmd,
MessageSendInfo &MSI) = 0;
/// Libobjc2 uses a bitfield representation where small(ish) bitfields are
@@ -477,7 +486,7 @@ public:
CGObjCGNU(CodeGenModule &cgm, unsigned runtimeABIVersion,
unsigned protocolClassVersion);
- llvm::Constant *GenerateConstantString(const StringLiteral *) override;
+ ConstantAddress GenerateConstantString(const StringLiteral *) override;
RValue
GenerateMessageSend(CodeGenFunction &CGF, ReturnValueSlot Return,
@@ -494,8 +503,8 @@ public:
const ObjCMethodDecl *Method) override;
llvm::Value *GetClass(CodeGenFunction &CGF,
const ObjCInterfaceDecl *OID) override;
- llvm::Value *GetSelector(CodeGenFunction &CGF, Selector Sel,
- bool lval = false) override;
+ llvm::Value *GetSelector(CodeGenFunction &CGF, Selector Sel) override;
+ Address GetAddrOfSelector(CodeGenFunction &CGF, Selector Sel) override;
llvm::Value *GetSelector(CodeGenFunction &CGF,
const ObjCMethodDecl *Method) override;
llvm::Constant *GetEHType(QualType T) override;
@@ -527,18 +536,18 @@ public:
const ObjCAtThrowStmt &S,
bool ClearInsertionPoint=true) override;
llvm::Value * EmitObjCWeakRead(CodeGenFunction &CGF,
- llvm::Value *AddrWeakObj) override;
+ Address AddrWeakObj) override;
void EmitObjCWeakAssign(CodeGenFunction &CGF,
- llvm::Value *src, llvm::Value *dst) override;
+ llvm::Value *src, Address dst) override;
void EmitObjCGlobalAssign(CodeGenFunction &CGF,
- llvm::Value *src, llvm::Value *dest,
+ llvm::Value *src, Address dest,
bool threadlocal=false) override;
void EmitObjCIvarAssign(CodeGenFunction &CGF, llvm::Value *src,
- llvm::Value *dest, llvm::Value *ivarOffset) override;
+ Address dest, llvm::Value *ivarOffset) override;
void EmitObjCStrongCastAssign(CodeGenFunction &CGF,
- llvm::Value *src, llvm::Value *dest) override;
- void EmitGCMemmoveCollectable(CodeGenFunction &CGF, llvm::Value *DestPtr,
- llvm::Value *SrcPtr,
+ llvm::Value *src, Address dest) override;
+ void EmitGCMemmoveCollectable(CodeGenFunction &CGF, Address DestPtr,
+ Address SrcPtr,
llvm::Value *Size) override;
LValue EmitObjCValueForIvar(CodeGenFunction &CGF, QualType ObjectTy,
llvm::Value *BaseValue, const ObjCIvarDecl *Ivar,
@@ -593,11 +602,11 @@ protected:
imp->setMetadata(msgSendMDKind, node);
return imp.getInstruction();
}
- llvm::Value *LookupIMPSuper(CodeGenFunction &CGF, llvm::Value *ObjCSuper,
+ llvm::Value *LookupIMPSuper(CodeGenFunction &CGF, Address ObjCSuper,
llvm::Value *cmd, MessageSendInfo &MSI) override {
CGBuilderTy &Builder = CGF.Builder;
llvm::Value *lookupArgs[] = {EnforceType(Builder, ObjCSuper,
- PtrToObjCSuperTy), cmd};
+ PtrToObjCSuperTy).getPointer(), cmd};
return CGF.EmitNounwindRuntimeCall(MsgLookupSuperFn, lookupArgs);
}
public:
@@ -647,7 +656,8 @@ class CGObjCGNUstep : public CGObjCGNU {
llvm::Function *LookupFn = SlotLookupFn;
// Store the receiver on the stack so that we can reload it later
- llvm::Value *ReceiverPtr = CGF.CreateTempAlloca(Receiver->getType());
+ Address ReceiverPtr =
+ CGF.CreateTempAlloca(Receiver->getType(), CGF.getPointerAlign());
Builder.CreateStore(Receiver, ReceiverPtr);
llvm::Value *self;
@@ -662,7 +672,7 @@ class CGObjCGNUstep : public CGObjCGNU {
LookupFn->setDoesNotCapture(1);
llvm::Value *args[] = {
- EnforceType(Builder, ReceiverPtr, PtrToIdTy),
+ EnforceType(Builder, ReceiverPtr.getPointer(), PtrToIdTy),
EnforceType(Builder, cmd, SelectorTy),
EnforceType(Builder, self, IdTy) };
llvm::CallSite slot = CGF.EmitRuntimeCallOrInvoke(LookupFn, args);
@@ -670,25 +680,27 @@ class CGObjCGNUstep : public CGObjCGNU {
slot->setMetadata(msgSendMDKind, node);
// Load the imp from the slot
- llvm::Value *imp = Builder.CreateLoad(
- Builder.CreateStructGEP(nullptr, slot.getInstruction(), 4));
+ llvm::Value *imp = Builder.CreateAlignedLoad(
+ Builder.CreateStructGEP(nullptr, slot.getInstruction(), 4),
+ CGF.getPointerAlign());
// The lookup function may have changed the receiver, so make sure we use
// the new one.
Receiver = Builder.CreateLoad(ReceiverPtr, true);
return imp;
}
- llvm::Value *LookupIMPSuper(CodeGenFunction &CGF, llvm::Value *ObjCSuper,
+ llvm::Value *LookupIMPSuper(CodeGenFunction &CGF, Address ObjCSuper,
llvm::Value *cmd,
MessageSendInfo &MSI) override {
CGBuilderTy &Builder = CGF.Builder;
- llvm::Value *lookupArgs[] = {ObjCSuper, cmd};
+ llvm::Value *lookupArgs[] = {ObjCSuper.getPointer(), cmd};
llvm::CallInst *slot =
CGF.EmitNounwindRuntimeCall(SlotLookupSuperFn, lookupArgs);
slot->setOnlyReadsMemory();
- return Builder.CreateLoad(Builder.CreateStructGEP(nullptr, slot, 4));
+ return Builder.CreateAlignedLoad(Builder.CreateStructGEP(nullptr, slot, 4),
+ CGF.getPointerAlign());
}
public:
CGObjCGNUstep(CodeGenModule &Mod) : CGObjCGNU(Mod, 9, 3) {
@@ -807,10 +819,10 @@ protected:
return imp.getInstruction();
}
- llvm::Value *LookupIMPSuper(CodeGenFunction &CGF, llvm::Value *ObjCSuper,
+ llvm::Value *LookupIMPSuper(CodeGenFunction &CGF, Address ObjCSuper,
llvm::Value *cmd, MessageSendInfo &MSI) override {
CGBuilderTy &Builder = CGF.Builder;
- llvm::Value *lookupArgs[] = {EnforceType(Builder, ObjCSuper,
+ llvm::Value *lookupArgs[] = {EnforceType(Builder, ObjCSuper.getPointer(),
PtrToObjCSuperTy), cmd};
if (CGM.ReturnTypeUsesSRet(MSI.CallInfo))
@@ -1011,7 +1023,7 @@ CGObjCGNU::CGObjCGNU(CodeGenModule &cgm, unsigned runtimeABIVersion,
llvm::Value *CGObjCGNU::GetClassNamed(CodeGenFunction &CGF,
const std::string &Name,
bool isWeak) {
- llvm::GlobalVariable *ClassNameGV = CGM.GetAddrOfConstantCString(Name);
+ llvm::Constant *ClassName = MakeConstantString(Name);
// With the incompatible ABI, this will need to be replaced with a direct
// reference to the class symbol. For the compatible nonfragile ABI we are
// still performing this lookup at run time but emitting the symbol for the
@@ -1021,8 +1033,6 @@ llvm::Value *CGObjCGNU::GetClassNamed(CodeGenFunction &CGF,
// with memoized versions or with static references if it's safe to do so.
if (!isWeak)
EmitClassRef(Name);
- llvm::Value *ClassName =
- CGF.Builder.CreateStructGEP(ClassNameGV->getValueType(), ClassNameGV, 0);
llvm::Constant *ClassLookupFn =
CGM.CreateRuntimeFunction(llvm::FunctionType::get(IdTy, PtrToInt8Ty, true),
@@ -1041,7 +1051,7 @@ llvm::Value *CGObjCGNU::EmitNSAutoreleasePoolClassRef(CodeGenFunction &CGF) {
}
llvm::Value *CGObjCGNU::GetSelector(CodeGenFunction &CGF, Selector Sel,
- const std::string &TypeEncoding, bool lval) {
+ const std::string &TypeEncoding) {
SmallVectorImpl<TypedSelector> &Types = SelectorTable[Sel];
llvm::GlobalAlias *SelValue = nullptr;
@@ -1060,24 +1070,29 @@ llvm::Value *CGObjCGNU::GetSelector(CodeGenFunction &CGF, Selector Sel,
Types.emplace_back(TypeEncoding, SelValue);
}
- if (lval) {
- llvm::Value *tmp = CGF.CreateTempAlloca(SelValue->getType());
- CGF.Builder.CreateStore(SelValue, tmp);
- return tmp;
- }
return SelValue;
}
-llvm::Value *CGObjCGNU::GetSelector(CodeGenFunction &CGF, Selector Sel,
- bool lval) {
- return GetSelector(CGF, Sel, std::string(), lval);
+Address CGObjCGNU::GetAddrOfSelector(CodeGenFunction &CGF, Selector Sel) {
+ llvm::Value *SelValue = GetSelector(CGF, Sel);
+
+ // Store it to a temporary. Does this satisfy the semantics of
+ // GetAddrOfSelector? Hopefully.
+ Address tmp = CGF.CreateTempAlloca(SelValue->getType(),
+ CGF.getPointerAlign());
+ CGF.Builder.CreateStore(SelValue, tmp);
+ return tmp;
+}
+
+llvm::Value *CGObjCGNU::GetSelector(CodeGenFunction &CGF, Selector Sel) {
+ return GetSelector(CGF, Sel, std::string());
}
llvm::Value *CGObjCGNU::GetSelector(CodeGenFunction &CGF,
const ObjCMethodDecl *Method) {
std::string SelTypes;
CGM.getContext().getObjCEncodingForMethodDecl(Method, SelTypes);
- return GetSelector(CGF, Method->getSelector(), SelTypes, false);
+ return GetSelector(CGF, Method->getSelector(), SelTypes);
}
llvm::Constant *CGObjCGNU::GetEHType(QualType T) {
@@ -1160,21 +1175,23 @@ llvm::Constant *CGObjCGNUstep::GetEHType(QualType T) {
fields.push_back(BVtable);
fields.push_back(typeName);
llvm::Constant *TI =
- MakeGlobal(llvm::StructType::get(PtrToInt8Ty, PtrToInt8Ty,
- nullptr), fields, "__objc_eh_typeinfo_" + className,
+ MakeGlobal(llvm::StructType::get(PtrToInt8Ty, PtrToInt8Ty, nullptr),
+ fields, CGM.getPointerAlign(),
+ "__objc_eh_typeinfo_" + className,
llvm::GlobalValue::LinkOnceODRLinkage);
return llvm::ConstantExpr::getBitCast(TI, PtrToInt8Ty);
}
/// Generate an NSConstantString object.
-llvm::Constant *CGObjCGNU::GenerateConstantString(const StringLiteral *SL) {
+ConstantAddress CGObjCGNU::GenerateConstantString(const StringLiteral *SL) {
std::string Str = SL->getString().str();
+ CharUnits Align = CGM.getPointerAlign();
// Look for an existing one
llvm::StringMap<llvm::Constant*>::iterator old = ObjCStrings.find(Str);
if (old != ObjCStrings.end())
- return old->getValue();
+ return ConstantAddress(old->getValue(), Align);
StringRef StringClass = CGM.getLangOpts().ObjCConstantStringClass;
@@ -1197,11 +1214,11 @@ llvm::Constant *CGObjCGNU::GenerateConstantString(const StringLiteral *SL) {
Ivars.push_back(llvm::ConstantInt::get(IntTy, Str.size()));
llvm::Constant *ObjCStr = MakeGlobal(
llvm::StructType::get(PtrToIdTy, PtrToInt8Ty, IntTy, nullptr),
- Ivars, ".objc_str");
+ Ivars, Align, ".objc_str");
ObjCStr = llvm::ConstantExpr::getBitCast(ObjCStr, PtrToInt8Ty);
ObjCStrings[Str] = ObjCStr;
ConstantStrings.push_back(ObjCStr);
- return ObjCStr;
+ return ConstantAddress(ObjCStr, Align);
}
///Generates a message send where the super is the receiver. This is a message
@@ -1281,16 +1298,20 @@ CGObjCGNU::GenerateMessageSendSuper(CodeGenFunction &CGF,
// Get the superclass pointer
ReceiverClass = Builder.CreateStructGEP(CastTy, ReceiverClass, 1);
// Load the superclass pointer
- ReceiverClass = Builder.CreateLoad(ReceiverClass);
+ ReceiverClass =
+ Builder.CreateAlignedLoad(ReceiverClass, CGF.getPointerAlign());
// Construct the structure used to look up the IMP
llvm::StructType *ObjCSuperTy = llvm::StructType::get(
Receiver->getType(), IdTy, nullptr);
- llvm::Value *ObjCSuper = Builder.CreateAlloca(ObjCSuperTy);
+
+ // FIXME: Is this really supposed to be a dynamic alloca?
+ Address ObjCSuper = Address(Builder.CreateAlloca(ObjCSuperTy),
+ CGF.getPointerAlign());
Builder.CreateStore(Receiver,
- Builder.CreateStructGEP(ObjCSuperTy, ObjCSuper, 0));
+ Builder.CreateStructGEP(ObjCSuper, 0, CharUnits::Zero()));
Builder.CreateStore(ReceiverClass,
- Builder.CreateStructGEP(ObjCSuperTy, ObjCSuper, 1));
+ Builder.CreateStructGEP(ObjCSuper, 1, CGF.getPointerSize()));
ObjCSuper = EnforceType(Builder, ObjCSuper, PtrToObjCSuperTy);
@@ -1435,16 +1456,14 @@ CGObjCGNU::GenerateMessageSend(CodeGenFunction &CGF,
phi->addIncoming(llvm::Constant::getNullValue(v->getType()), startBB);
msgRet = RValue::get(phi);
} else if (msgRet.isAggregate()) {
- llvm::Value *v = msgRet.getAggregateAddr();
- llvm::PHINode *phi = Builder.CreatePHI(v->getType(), 2);
- llvm::PointerType *RetTy = cast<llvm::PointerType>(v->getType());
- llvm::AllocaInst *NullVal =
- CGF.CreateTempAlloca(RetTy->getElementType(), "null");
- CGF.InitTempAlloca(NullVal,
- llvm::Constant::getNullValue(RetTy->getElementType()));
- phi->addIncoming(v, messageBB);
- phi->addIncoming(NullVal, startBB);
- msgRet = RValue::getAggregate(phi);
+ Address v = msgRet.getAggregateAddress();
+ llvm::PHINode *phi = Builder.CreatePHI(v.getType(), 2);
+ llvm::Type *RetTy = v.getElementType();
+ Address NullVal = CGF.CreateTempAlloca(RetTy, v.getAlignment(), "null");
+ CGF.InitTempAlloca(NullVal, llvm::Constant::getNullValue(RetTy));
+ phi->addIncoming(v.getPointer(), messageBB);
+ phi->addIncoming(NullVal.getPointer(), startBB);
+ msgRet = RValue::getAggregate(Address(phi, v.getAlignment()));
} else /* isComplex() */ {
std::pair<llvm::Value*,llvm::Value*> v = msgRet.getComplexVal();
llvm::PHINode *phi = Builder.CreatePHI(v.first->getType(), 2);
@@ -1517,7 +1536,8 @@ GenerateMethodList(StringRef ClassName,
Methods.push_back(MethodArray);
// Create an instance of the structure
- return MakeGlobal(ObjCMethodListTy, Methods, ".objc_method_list");
+ return MakeGlobal(ObjCMethodListTy, Methods, CGM.getPointerAlign(),
+ ".objc_method_list");
}
/// Generates an IvarList. Used in construction of a objc_class.
@@ -1557,7 +1577,8 @@ GenerateIvarList(ArrayRef<llvm::Constant *> IvarNames,
nullptr);
// Create an instance of the structure
- return MakeGlobal(ObjCIvarListTy, Elements, ".objc_ivar_list");
+ return MakeGlobal(ObjCIvarListTy, Elements, CGM.getPointerAlign(),
+ ".objc_ivar_list");
}
/// Generate a class structure
@@ -1640,8 +1661,9 @@ llvm::Constant *CGObjCGNU::GenerateClassStructure(
std::string ClassSym((isMeta ? "_OBJC_METACLASS_": "_OBJC_CLASS_") +
std::string(Name));
llvm::GlobalVariable *ClassRef = TheModule.getNamedGlobal(ClassSym);
- llvm::Constant *Class = MakeGlobal(ClassTy, Elements, ClassSym,
- llvm::GlobalValue::ExternalLinkage);
+ llvm::Constant *Class =
+ MakeGlobal(ClassTy, Elements, CGM.getPointerAlign(), ClassSym,
+ llvm::GlobalValue::ExternalLinkage);
if (ClassRef) {
ClassRef->replaceAllUsesWith(llvm::ConstantExpr::getBitCast(Class,
ClassRef->getType()));
@@ -1676,7 +1698,8 @@ GenerateProtocolMethodList(ArrayRef<llvm::Constant *> MethodNames,
Methods.clear();
Methods.push_back(llvm::ConstantInt::get(IntTy, MethodNames.size()));
Methods.push_back(Array);
- return MakeGlobal(ObjCMethodDescListTy, Methods, ".objc_method_list");
+ return MakeGlobal(ObjCMethodDescListTy, Methods, CGM.getPointerAlign(),
+ ".objc_method_list");
}
// Create the protocol list structure used in classes, categories and so on
@@ -1709,7 +1732,8 @@ llvm::Constant *CGObjCGNU::GenerateProtocolList(ArrayRef<std::string>Protocols){
Elements.push_back(NULLPtr);
Elements.push_back(llvm::ConstantInt::get(LongTy, Protocols.size()));
Elements.push_back(ProtocolArray);
- return MakeGlobal(ProtocolListTy, Elements, ".objc_protocol_list");
+ return MakeGlobal(ProtocolListTy, Elements, CGM.getPointerAlign(),
+ ".objc_protocol_list");
}
llvm::Value *CGObjCGNU::GenerateProtocolRef(CodeGenFunction &CGF,
@@ -1749,7 +1773,8 @@ llvm::Constant *CGObjCGNU::GenerateEmptyProtocol(
Elements.push_back(MethodList);
Elements.push_back(MethodList);
Elements.push_back(MethodList);
- return MakeGlobal(ProtocolTy, Elements, ".objc_protocol");
+ return MakeGlobal(ProtocolTy, Elements, CGM.getPointerAlign(),
+ ".objc_protocol");
}
void CGObjCGNU::GenerateProtocol(const ObjCProtocolDecl *PD) {
@@ -1910,7 +1935,7 @@ void CGObjCGNU::GenerateProtocol(const ObjCProtocolDecl *PD) {
Elements.push_back(OptionalPropertyList);
ExistingProtocols[ProtocolName] =
llvm::ConstantExpr::getBitCast(MakeGlobal(ProtocolTy, Elements,
- ".objc_protocol"), IdTy);
+ CGM.getPointerAlign(), ".objc_protocol"), IdTy);
}
void CGObjCGNU::GenerateProtocolHolderCategory() {
// Collect information about instance methods
@@ -1952,10 +1977,12 @@ void CGObjCGNU::GenerateProtocolHolderCategory() {
ExistingProtocols.size()));
ProtocolElements.push_back(ProtocolArray);
Elements.push_back(llvm::ConstantExpr::getBitCast(MakeGlobal(ProtocolListTy,
- ProtocolElements, ".objc_protocol_list"), PtrTy));
+ ProtocolElements, CGM.getPointerAlign(),
+ ".objc_protocol_list"), PtrTy));
Categories.push_back(llvm::ConstantExpr::getBitCast(
MakeGlobal(llvm::StructType::get(PtrToInt8Ty, PtrToInt8Ty,
- PtrTy, PtrTy, PtrTy, nullptr), Elements), PtrTy));
+ PtrTy, PtrTy, PtrTy, nullptr), Elements, CGM.getPointerAlign()),
+ PtrTy));
}
/// Libobjc2 uses a bitfield representation where small(ish) bitfields are
@@ -1995,7 +2022,7 @@ llvm::Constant *CGObjCGNU::MakeBitField(ArrayRef<bool> bits) {
llvm::ConstantInt::get(Int32Ty, values.size()),
array };
llvm::Constant *GS = MakeGlobal(llvm::StructType::get(Int32Ty, arrayTy,
- nullptr), fields);
+ nullptr), fields, CharUnits::fromQuantity(4));
llvm::Constant *ptr = llvm::ConstantExpr::getPtrToInt(GS, IntPtrTy);
return ptr;
}
@@ -2047,7 +2074,8 @@ void CGObjCGNU::GenerateCategory(const ObjCCategoryImplDecl *OCD) {
GenerateProtocolList(Protocols), PtrTy));
Categories.push_back(llvm::ConstantExpr::getBitCast(
MakeGlobal(llvm::StructType::get(PtrToInt8Ty, PtrToInt8Ty,
- PtrTy, PtrTy, PtrTy, nullptr), Elements), PtrTy));
+ PtrTy, PtrTy, PtrTy, nullptr), Elements, CGM.getPointerAlign()),
+ PtrTy));
}
llvm::Constant *CGObjCGNU::GeneratePropertyList(const ObjCImplementationDecl *OID,
@@ -2225,7 +2253,8 @@ void CGObjCGNU::GenerateClass(const ObjCImplementationDecl *OID) {
llvm::Constant *StrongIvarBitmap = MakeBitField(StrongIvars);
llvm::Constant *WeakIvarBitmap = MakeBitField(WeakIvars);
llvm::GlobalVariable *IvarOffsetArray =
- MakeGlobalArray(PtrToIntTy, IvarOffsetValues, ".ivar.offsets");
+ MakeGlobalArray(PtrToIntTy, IvarOffsetValues, CGM.getPointerAlign(),
+ ".ivar.offsets");
// Collect information about instance methods
@@ -2385,13 +2414,15 @@ llvm::Function *CGObjCGNU::ModuleInitFunction() {
llvm::StructType::get(PtrToInt8Ty, StaticsArrayTy, nullptr);
llvm::Type *StaticsListPtrTy =
llvm::PointerType::getUnqual(StaticsListTy);
- Statics = MakeGlobal(StaticsListTy, Elements, ".objc_statics");
+ Statics = MakeGlobal(StaticsListTy, Elements, CGM.getPointerAlign(),
+ ".objc_statics");
llvm::ArrayType *StaticsListArrayTy =
llvm::ArrayType::get(StaticsListPtrTy, 2);
Elements.clear();
Elements.push_back(Statics);
Elements.push_back(llvm::Constant::getNullValue(StaticsListPtrTy));
- Statics = MakeGlobal(StaticsListArrayTy, Elements, ".objc_statics_ptr");
+ Statics = MakeGlobal(StaticsListArrayTy, Elements,
+ CGM.getPointerAlign(), ".objc_statics_ptr");
Statics = llvm::ConstantExpr::getBitCast(Statics, PtrTy);
}
// Array of classes, categories, and constant objects
@@ -2442,7 +2473,8 @@ llvm::Function *CGObjCGNU::ModuleInitFunction() {
// Number of static selectors
Elements.push_back(llvm::ConstantInt::get(LongTy, SelectorCount));
llvm::GlobalVariable *SelectorList =
- MakeGlobalArray(SelStructTy, Selectors, ".objc_selector_list");
+ MakeGlobalArray(SelStructTy, Selectors, CGM.getPointerAlign(),
+ ".objc_selector_list");
Elements.push_back(llvm::ConstantExpr::getBitCast(SelectorList,
SelStructPtrTy));
@@ -2475,7 +2507,8 @@ llvm::Function *CGObjCGNU::ModuleInitFunction() {
llvm::Constant *ClassList = llvm::ConstantArray::get(ClassListTy, Classes);
Elements.push_back(ClassList);
// Construct the symbol table
- llvm::Constant *SymTab= MakeGlobal(SymTabTy, Elements);
+ llvm::Constant *SymTab =
+ MakeGlobal(SymTabTy, Elements, CGM.getPointerAlign());
// The symbol table is contained in a module which has some version-checking
// constants
@@ -2516,7 +2549,7 @@ llvm::Function *CGObjCGNU::ModuleInitFunction() {
break;
}
- llvm::Value *Module = MakeGlobal(ModuleTy, Elements);
+ llvm::Value *Module = MakeGlobal(ModuleTy, Elements, CGM.getPointerAlign());
// Create the load function calling the runtime entry point with the module
// structure
@@ -2526,7 +2559,7 @@ llvm::Function *CGObjCGNU::ModuleInitFunction() {
&TheModule);
llvm::BasicBlock *EntryBB =
llvm::BasicBlock::Create(VMContext, "entry", LoadFunction);
- CGBuilderTy Builder(VMContext);
+ CGBuilderTy Builder(CGM, VMContext);
Builder.SetInsertPoint(EntryBB);
llvm::FunctionType *FT =
@@ -2678,57 +2711,63 @@ void CGObjCGNU::EmitThrowStmt(CodeGenFunction &CGF,
}
llvm::Value * CGObjCGNU::EmitObjCWeakRead(CodeGenFunction &CGF,
- llvm::Value *AddrWeakObj) {
+ Address AddrWeakObj) {
CGBuilderTy &B = CGF.Builder;
AddrWeakObj = EnforceType(B, AddrWeakObj, PtrToIdTy);
- return B.CreateCall(WeakReadFn.getType(), WeakReadFn, AddrWeakObj);
+ return B.CreateCall(WeakReadFn.getType(), WeakReadFn,
+ AddrWeakObj.getPointer());
}
void CGObjCGNU::EmitObjCWeakAssign(CodeGenFunction &CGF,
- llvm::Value *src, llvm::Value *dst) {
+ llvm::Value *src, Address dst) {
CGBuilderTy &B = CGF.Builder;
src = EnforceType(B, src, IdTy);
dst = EnforceType(B, dst, PtrToIdTy);
- B.CreateCall(WeakAssignFn.getType(), WeakAssignFn, {src, dst});
+ B.CreateCall(WeakAssignFn.getType(), WeakAssignFn,
+ {src, dst.getPointer()});
}
void CGObjCGNU::EmitObjCGlobalAssign(CodeGenFunction &CGF,
- llvm::Value *src, llvm::Value *dst,
+ llvm::Value *src, Address dst,
bool threadlocal) {
CGBuilderTy &B = CGF.Builder;
src = EnforceType(B, src, IdTy);
dst = EnforceType(B, dst, PtrToIdTy);
// FIXME. Add threadloca assign API
assert(!threadlocal && "EmitObjCGlobalAssign - Threal Local API NYI");
- B.CreateCall(GlobalAssignFn.getType(), GlobalAssignFn, {src, dst});
+ B.CreateCall(GlobalAssignFn.getType(), GlobalAssignFn,
+ {src, dst.getPointer()});
}
void CGObjCGNU::EmitObjCIvarAssign(CodeGenFunction &CGF,
- llvm::Value *src, llvm::Value *dst,
+ llvm::Value *src, Address dst,
llvm::Value *ivarOffset) {
CGBuilderTy &B = CGF.Builder;
src = EnforceType(B, src, IdTy);
dst = EnforceType(B, dst, IdTy);
- B.CreateCall(IvarAssignFn.getType(), IvarAssignFn, {src, dst, ivarOffset});
+ B.CreateCall(IvarAssignFn.getType(), IvarAssignFn,
+ {src, dst.getPointer(), ivarOffset});
}
void CGObjCGNU::EmitObjCStrongCastAssign(CodeGenFunction &CGF,
- llvm::Value *src, llvm::Value *dst) {
+ llvm::Value *src, Address dst) {
CGBuilderTy &B = CGF.Builder;
src = EnforceType(B, src, IdTy);
dst = EnforceType(B, dst, PtrToIdTy);
- B.CreateCall(StrongCastAssignFn.getType(), StrongCastAssignFn, {src, dst});
+ B.CreateCall(StrongCastAssignFn.getType(), StrongCastAssignFn,
+ {src, dst.getPointer()});
}
void CGObjCGNU::EmitGCMemmoveCollectable(CodeGenFunction &CGF,
- llvm::Value *DestPtr,
- llvm::Value *SrcPtr,
+ Address DestPtr,
+ Address SrcPtr,
llvm::Value *Size) {
CGBuilderTy &B = CGF.Builder;
DestPtr = EnforceType(B, DestPtr, PtrTy);
SrcPtr = EnforceType(B, SrcPtr, PtrTy);
- B.CreateCall(MemMoveFn.getType(), MemMoveFn, {DestPtr, SrcPtr, Size});
+ B.CreateCall(MemMoveFn.getType(), MemMoveFn,
+ {DestPtr.getPointer(), SrcPtr.getPointer(), Size});
}
llvm::GlobalVariable *CGObjCGNU::ObjCIvarOffsetVariable(
@@ -2811,17 +2850,22 @@ llvm::Value *CGObjCGNU::EmitIvarOffset(CodeGenFunction &CGF,
Interface = FindIvarInterface(CGM.getContext(), Interface, Ivar);
if (RuntimeVersion < 10)
return CGF.Builder.CreateZExtOrBitCast(
- CGF.Builder.CreateLoad(CGF.Builder.CreateLoad(
- ObjCIvarOffsetVariable(Interface, Ivar), false, "ivar")),
+ CGF.Builder.CreateDefaultAlignedLoad(CGF.Builder.CreateAlignedLoad(
+ ObjCIvarOffsetVariable(Interface, Ivar),
+ CGF.getPointerAlign(), "ivar")),
PtrDiffTy);
std::string name = "__objc_ivar_offset_value_" +
Interface->getNameAsString() +"." + Ivar->getNameAsString();
+ CharUnits Align = CGM.getIntAlign();
llvm::Value *Offset = TheModule.getGlobalVariable(name);
- if (!Offset)
- Offset = new llvm::GlobalVariable(TheModule, IntTy,
+ if (!Offset) {
+ auto GV = new llvm::GlobalVariable(TheModule, IntTy,
false, llvm::GlobalValue::LinkOnceAnyLinkage,
llvm::Constant::getNullValue(IntTy), name);
- Offset = CGF.Builder.CreateLoad(Offset);
+ GV->setAlignment(Align.getQuantity());
+ Offset = GV;
+ }
+ Offset = CGF.Builder.CreateAlignedLoad(Offset, Align);
if (Offset->getType() != PtrDiffTy)
Offset = CGF.Builder.CreateZExtOrBitCast(Offset, PtrDiffTy);
return Offset;
diff --git a/clang/lib/CodeGen/CGObjCMac.cpp b/clang/lib/CodeGen/CGObjCMac.cpp
index 0aff2fd7e08..d4341b33859 100644
--- a/clang/lib/CodeGen/CGObjCMac.cpp
+++ b/clang/lib/CodeGen/CGObjCMac.cpp
@@ -1017,7 +1017,7 @@ protected:
/// \param AddToUsed - Whether the variable should be added to
/// "llvm.used".
llvm::GlobalVariable *CreateMetadataVar(Twine Name, llvm::Constant *Init,
- StringRef Section, unsigned Align,
+ StringRef Section, CharUnits Align,
bool AddToUsed);
CodeGen::RValue EmitMessageSend(CodeGen::CodeGenFunction &CGF,
@@ -1039,7 +1039,7 @@ public:
CGObjCCommonMac(CodeGen::CodeGenModule &cgm) :
CGObjCRuntime(cgm), VMContext(cgm.getLLVMContext()) { }
- llvm::Constant *GenerateConstantString(const StringLiteral *SL) override;
+ ConstantAddress GenerateConstantString(const StringLiteral *SL) override;
llvm::Function *GenerateMethod(const ObjCMethodDecl *OMD,
const ObjCContainerDecl *CD=nullptr) override;
@@ -1172,8 +1172,8 @@ private:
/// EmitSelector - Return a Value*, of type ObjCTypes.SelectorPtrTy,
/// for the given selector.
- llvm::Value *EmitSelector(CodeGenFunction &CGF, Selector Sel,
- bool lval=false);
+ llvm::Value *EmitSelector(CodeGenFunction &CGF, Selector Sel);
+ Address EmitSelectorAddr(CodeGenFunction &CGF, Selector Sel);
public:
CGObjCMac(CodeGen::CodeGenModule &cgm);
@@ -1199,8 +1199,8 @@ public:
llvm::Value *GetClass(CodeGenFunction &CGF,
const ObjCInterfaceDecl *ID) override;
- llvm::Value *GetSelector(CodeGenFunction &CGF, Selector Sel,
- bool lval = false) override;
+ llvm::Value *GetSelector(CodeGenFunction &CGF, Selector Sel) override;
+ Address GetAddrOfSelector(CodeGenFunction &CGF, Selector Sel) override;
/// The NeXT/Apple runtimes do not support typed selectors; just emit an
/// untyped one.
@@ -1236,19 +1236,19 @@ public:
void EmitThrowStmt(CodeGen::CodeGenFunction &CGF, const ObjCAtThrowStmt &S,
bool ClearInsertionPoint=true) override;
llvm::Value * EmitObjCWeakRead(CodeGen::CodeGenFunction &CGF,
- llvm::Value *AddrWeakObj) override;
+ Address AddrWeakObj) override;
void EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
- llvm::Value *src, llvm::Value *dst) override;
+ llvm::Value *src, Address dst) override;
void EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
- llvm::Value *src, llvm::Value *dest,
+ llvm::Value *src, Address dest,
bool threadlocal = false) override;
void EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
- llvm::Value *src, llvm::Value *dest,
+ llvm::Value *src, Address dest,
llvm::Value *ivarOffset) override;
void EmitObjCStrongCastAssign(CodeGen::CodeGenFunction &CGF,
- llvm::Value *src, llvm::Value *dest) override;
+ llvm::Value *src, Address dest) override;
void EmitGCMemmoveCollectable(CodeGen::CodeGenFunction &CGF,
- llvm::Value *dest, llvm::Value *src,
+ Address dest, Address src,
llvm::Value *size) override;
LValue EmitObjCValueForIvar(CodeGen::CodeGenFunction &CGF, QualType ObjectTy,
@@ -1395,8 +1395,8 @@ private:
/// EmitSelector - Return a Value*, of type ObjCTypes.SelectorPtrTy,
/// for the given selector.
- llvm::Value *EmitSelector(CodeGenFunction &CGF, Selector Sel,
- bool lval=false);
+ llvm::Value *EmitSelector(CodeGenFunction &CGF, Selector Sel);
+ Address EmitSelectorAddr(CodeGenFunction &CGF, Selector Sel);
/// GetInterfaceEHType - Get the cached ehtype for the given Objective-C
/// interface. The return value has type EHTypePtrTy.
@@ -1474,9 +1474,10 @@ public:
llvm::Value *GetClass(CodeGenFunction &CGF,
const ObjCInterfaceDecl *ID) override;
- llvm::Value *GetSelector(CodeGenFunction &CGF, Selector Sel,
- bool lvalue = false) override
- { return EmitSelector(CGF, Sel, lvalue); }
+ llvm::Value *GetSelector(CodeGenFunction &CGF, Selector Sel) override
+ { return EmitSelector(CGF, Sel); }
+ Address GetAddrOfSelector(CodeGenFunction &CGF, Selector Sel) override
+ { return EmitSelectorAddr(CGF, Sel); }
/// The NeXT/Apple runtimes do not support typed selectors; just emit an
/// untyped one.
@@ -1531,19 +1532,19 @@ public:
void EmitThrowStmt(CodeGen::CodeGenFunction &CGF, const ObjCAtThrowStmt &S,
bool ClearInsertionPoint=true) override;
llvm::Value * EmitObjCWeakRead(CodeGen::CodeGenFunction &CGF,
- llvm::Value *AddrWeakObj) override;
+ Address AddrWeakObj) override;
void EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
- llvm::Value *src, llvm::Value *dst) override;
+ llvm::Value *src, Address edst) override;
void EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
- llvm::Value *src, llvm::Value *dest,
+ llvm::Value *src, Address dest,
bool threadlocal = false) override;
void EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
- llvm::Value *src, llvm::Value *dest,
+ llvm::Value *src, Address dest,
llvm::Value *ivarOffset) override;
void EmitObjCStrongCastAssign(CodeGen::CodeGenFunction &CGF,
- llvm::Value *src, llvm::Value *dest) override;
+ llvm::Value *src, Address dest) override;
void EmitGCMemmoveCollectable(CodeGen::CodeGenFunction &CGF,
- llvm::Value *dest, llvm::Value *src,
+ Address dest, Address src,
llvm::Value *size) override;
LValue EmitObjCValueForIvar(CodeGen::CodeGenFunction &CGF, QualType ObjectTy,
llvm::Value *BaseValue, const ObjCIvarDecl *Ivar,
@@ -1645,7 +1646,7 @@ struct NullReturnState {
// memory or (2) agg values in registers.
if (result.isAggregate()) {
assert(result.isAggregate() && "null init of non-aggregate result?");
- CGF.EmitNullInitialization(result.getAggregateAddr(), resultType);
+ CGF.EmitNullInitialization(result.getAggregateAddress(), resultType);
if (contBB) CGF.EmitBlock(contBB);
return result;
}
@@ -1711,9 +1712,11 @@ llvm::Value *CGObjCMac::GetClass(CodeGenFunction &CGF,
}
/// GetSelector - Return the pointer to the unique'd string for this selector.
-llvm::Value *CGObjCMac::GetSelector(CodeGenFunction &CGF, Selector Sel,
- bool lval) {
- return EmitSelector(CGF, Sel, lval);
+llvm::Value *CGObjCMac::GetSelector(CodeGenFunction &CGF, Selector Sel) {
+ return EmitSelector(CGF, Sel);
+}
+Address CGObjCMac::GetAddrOfSelector(CodeGenFunction &CGF, Selector Sel) {
+ return EmitSelectorAddr(CGF, Sel);
}
llvm::Value *CGObjCMac::GetSelector(CodeGenFunction &CGF, const ObjCMethodDecl
*Method) {
@@ -1756,7 +1759,7 @@ llvm::Constant *CGObjCMac::GetEHType(QualType T) {
};
*/
-llvm::Constant *CGObjCCommonMac::GenerateConstantString(
+ConstantAddress CGObjCCommonMac::GenerateConstantString(
const StringLiteral *SL) {
return (CGM.getLangOpts().NoConstantCFStrings == 0 ?
CGM.GetAddrOfConstantCFString(SL) :
@@ -1783,13 +1786,14 @@ CGObjCMac::GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
const ObjCMethodDecl *Method) {
// Create and init a super structure; this is a (receiver, class)
// pair we will pass to objc_msgSendSuper.
- llvm::Value *ObjCSuper =
- CGF.CreateTempAlloca(ObjCTypes.SuperTy, "objc_super");
+ Address ObjCSuper =
+ CGF.CreateTempAlloca(ObjCTypes.SuperTy, CGF.getPointerAlign(),
+ "objc_super");
llvm::Value *ReceiverAsObject =
CGF.Builder.CreateBitCast(Receiver, ObjCTypes.ObjectPtrTy);
CGF.Builder.CreateStore(
ReceiverAsObject,
- CGF.Builder.CreateStructGEP(ObjCTypes.SuperTy, ObjCSuper, 0));
+ CGF.Builder.CreateStructGEP(ObjCSuper, 0, CharUnits::Zero()));
// If this is a class message the metaclass is passed as the target.
llvm::Value *Target;
@@ -1803,12 +1807,13 @@ CGObjCMac::GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
// isa" is the first ivar in a class (which it must be).
Target = EmitClassRef(CGF, Class->getSuperClass());
Target = CGF.Builder.CreateStructGEP(ObjCTypes.ClassTy, Target, 0);
- Target = CGF.Builder.CreateLoad(Target);
+ Target = CGF.Builder.CreateAlignedLoad(Target, CGF.getPointerAlign());
} else {
llvm::Constant *MetaClassPtr = EmitMetaClassRef(Class);
llvm::Value *SuperPtr =
CGF.Builder.CreateStructGEP(ObjCTypes.ClassTy, MetaClassPtr, 1);
- llvm::Value *Super = CGF.Builder.CreateLoad(SuperPtr);
+ llvm::Value *Super =
+ CGF.Builder.CreateAlignedLoad(SuperPtr, CGF.getPointerAlign());
Target = Super;
}
} else if (isCategoryImpl)
@@ -1816,18 +1821,18 @@ CGObjCMac::GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
else {
llvm::Value *ClassPtr = EmitSuperClassRef(Class);
ClassPtr = CGF.Builder.CreateStructGEP(ObjCTypes.ClassTy, ClassPtr, 1);
- Target = CGF.Builder.CreateLoad(ClassPtr);
+ Target = CGF.Builder.CreateAlignedLoad(ClassPtr, CGF.getPointerAlign());
}
// FIXME: We shouldn't need to do this cast, rectify the ASTContext and
// ObjCTypes types.
llvm::Type *ClassTy =
CGM.getTypes().ConvertType(CGF.getContext().getObjCClassType());
Target = CGF.Builder.CreateBitCast(Target, ClassTy);
- CGF.Builder.CreateStore(
- Target, CGF.Builder.CreateStructGEP(ObjCTypes.SuperTy, ObjCSuper, 1));
+ CGF.Builder.CreateStore(Target,
+ CGF.Builder.CreateStructGEP(ObjCSuper, 1, CGF.getPointerSize()));
return EmitMessageSend(CGF, Return, ResultType,
EmitSelector(CGF, Sel),
- ObjCSuper, ObjCTypes.SuperPtrCTy,
+ ObjCSuper.getPointer(), ObjCTypes.SuperPtrCTy,
true, CallArgs, Method, ObjCTypes);
}
@@ -2361,9 +2366,8 @@ llvm::Constant *CGObjCCommonMac::getBitmapBlockLayout(bool ComputeByrefLayout) {
}
}
- int e = Layout.size()-1;
- while (e >= 0) {
- unsigned char inst = Layout[e--];
+ while (!Layout.empty()) {
+ unsigned char inst = Layout.back();
enum BLOCK_LAYOUT_OPCODE opcode = (enum BLOCK_LAYOUT_OPCODE) (inst >> 4);
if (opcode == BLOCK_LAYOUT_NON_OBJECT_BYTES || opcode == BLOCK_LAYOUT_NON_OBJECT_WORDS)
Layout.pop_back();
@@ -2376,19 +2380,19 @@ llvm::Constant *CGObjCCommonMac::getBitmapBlockLayout(bool ComputeByrefLayout) {
// Block variable layout instruction has been inlined.
if (CGM.getLangOpts().ObjCGCBitmapPrint) {
if (ComputeByrefLayout)
- printf("\n Inline instruction for BYREF variable layout: ");
+ printf("\n Inline BYREF variable layout: ");
else
- printf("\n Inline instruction for block variable layout: ");
- printf("0x0%" PRIx64 "\n", Result);
- }
- if (WordSizeInBytes == 8) {
- const llvm::APInt Instruction(64, Result);
- return llvm::Constant::getIntegerValue(CGM.Int64Ty, Instruction);
- }
- else {
- const llvm::APInt Instruction(32, Result);
- return llvm::Constant::getIntegerValue(CGM.Int32Ty, Instruction);
+ printf("\n Inline block variable layout: ");
+ printf("0x0%" PRIx64 "", Result);
+ if (auto numStrong = (Result & 0xF00) >> 8)
+ printf(", BL_STRONG:%d", (int) numStrong);
+ if (auto numByref = (Result & 0x0F0) >> 4)
+ printf(", BL_BYREF:%d", (int) numByref);
+ if (auto numWeak = (Result & 0x00F) >> 0)
+ printf(", BL_WEAK:%d", (int) numWeak);
+ printf(", BL_OPERATOR:0\n");
}
+ return llvm::ConstantInt::get(CGM.IntPtrTy, Result);
}
unsigned char inst = (BLOCK_LAYOUT_OPERATOR << 4) | 0;
@@ -2399,9 +2403,9 @@ llvm::Constant *CGObjCCommonMac::getBitmapBlockLayout(bool ComputeByrefLayout) {
if (CGM.getLangOpts().ObjCGCBitmapPrint) {
if (ComputeByrefLayout)
- printf("\n BYREF variable layout: ");
+ printf("\n Byref variable layout: ");
else
- printf("\n block variable layout: ");
+ printf("\n Block variable layout: ");
for (unsigned i = 0, e = BitMap.size(); i != e; i++) {
unsigned char inst = BitMap[i];
enum BLOCK_LAYOUT_OPCODE opcode = (enum BLOCK_LAYOUT_OPCODE) (inst >> 4);
@@ -2443,7 +2447,7 @@ llvm::Constant *CGObjCCommonMac::getBitmapBlockLayout(bool ComputeByrefLayout) {
llvm::GlobalVariable *Entry = CreateMetadataVar(
"OBJC_CLASS_NAME_",
llvm::ConstantDataArray::getString(VMContext, BitMap, false),
- "__TEXT,__objc_classname,cstring_literals", 1, true);
+ "__TEXT,__objc_classname,cstring_literals", CharUnits::One(), true);
return getConstantGEP(VMContext, Entry, 0, 0);
}
@@ -2699,7 +2703,7 @@ CGObjCMac::EmitProtocolExtension(const ObjCProtocolDecl *PD,
// No special section, but goes in llvm.used
return CreateMetadataVar("\01l_OBJC_PROTOCOLEXT_" + PD->getName(), Init,
- StringRef(), 0, true);
+ StringRef(), CGM.getPointerAlign(), true);
}
/*
@@ -2738,7 +2742,7 @@ CGObjCMac::EmitProtocolList(Twine Name,
llvm::Constant *Init = llvm::ConstantStruct::getAnon(Values);
llvm::GlobalVariable *GV =
CreateMetadataVar(Name, Init, "__OBJC,__cat_cls_meth,regular,no_dead_strip",
- 4, false);
+ CGM.getPointerAlign(), false);
return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.ProtocolListPtrTy);
}
@@ -2815,7 +2819,7 @@ llvm::Constant *CGObjCCommonMac::EmitPropertyList(Twine Name,
CreateMetadataVar(Name, Init,
(ObjCABI == 2) ? "__DATA, __objc_const" :
"__OBJC,__property,regular,no_dead_strip",
- (ObjCABI == 2) ? 8 : 4,
+ CGM.getPointerAlign(),
true);
return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.PropertyListPtrTy);
}
@@ -2834,7 +2838,7 @@ CGObjCCommonMac::EmitProtocolMethodTypes(Twine Name,
llvm::GlobalVariable *GV = CreateMetadataVar(
Name, Init, (ObjCABI == 2) ? "__DATA, __objc_const" : StringRef(),
- (ObjCABI == 2) ? 8 : 4, true);
+ CGM.getPointerAlign(), true);
return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.Int8PtrPtrTy);
}
@@ -2872,7 +2876,8 @@ CGObjCMac::EmitMethodDescList(Twine Name, const char *Section,
Values[1] = llvm::ConstantArray::get(AT, Methods);
llvm::Constant *Init = llvm::ConstantStruct::getAnon(Values);
- llvm::GlobalVariable *GV = CreateMetadataVar(Name, Init, Section, 4, true);
+ llvm::GlobalVariable *GV =
+ CreateMetadataVar(Name, Init, Section, CGM.getPointerAlign(), true);
return llvm::ConstantExpr::getBitCast(GV,
ObjCTypes.MethodDescriptionListPtrTy);
}
@@ -2944,7 +2949,8 @@ void CGObjCMac::GenerateCategory(const ObjCCategoryImplDecl *OCD) {
llvm::GlobalVariable *GV =
CreateMetadataVar("OBJC_CATEGORY_" + ExtName.str(), Init,
- "__OBJC,__category,regular,no_dead_strip", 4, true);
+ "__OBJC,__category,regular,no_dead_strip",
+ CGM.getPointerAlign(), true);
DefinedCategories.push_back(GV);
DefinedCategoryNames.insert(ExtName.str());
// method definition entries must be clear for next implementation.
@@ -3084,10 +3090,10 @@ void CGObjCMac::GenerateClass(const ObjCImplementationDecl *ID) {
"Forward metaclass reference has incorrect type.");
GV->setInitializer(Init);
GV->setSection(Section);
- GV->setAlignment(4);
+ GV->setAlignment(CGM.getPointerAlign().getQuantity());
CGM.addCompilerUsedGlobal(GV);
} else
- GV = CreateMetadataVar(Name, Init, Section, 4, true);
+ GV = CreateMetadataVar(Name, Init, Section, CGM.getPointerAlign(), true);
DefinedClasses.push_back(GV);
ImplementedClasses.push_back(Interface);
// method definition entries must be clear for next implementation.
@@ -3222,7 +3228,8 @@ CGObjCMac::EmitClassExtension(const ObjCImplementationDecl *ID) {
llvm::Constant *Init =
llvm::ConstantStruct::get(ObjCTypes.ClassExtensionTy, Values);
return CreateMetadataVar("OBJC_CLASSEXT_" + ID->getName(), Init,
- "__OBJC,__class_ext,regular,no_dead_strip", 4, true);
+ "__OBJC,__class_ext,regular,no_dead_strip",
+ CGM.getPointerAlign(), true);
}
/*
@@ -3280,11 +3287,12 @@ llvm::Constant *CGObjCMac::EmitIvarList(const ObjCImplementationDecl *ID,
if (ForClass)
GV =
CreateMetadataVar("OBJC_CLASS_VARIABLES_" + ID->getName(), Init,
- "__OBJC,__class_vars,regular,no_dead_strip", 4, true);
+ "__OBJC,__class_vars,regular,no_dead_strip",
+ CGM.getPointerAlign(), true);
else
GV = CreateMetadataVar("OBJC_INSTANCE_VARIABLES_" + ID->getName(), Init,
- "__OBJC,__instance_vars,regular,no_dead_strip", 4,
- true);
+ "__OBJC,__instance_vars,regular,no_dead_strip",
+ CGM.getPointerAlign(), true);
return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.IvarListPtrTy);
}
@@ -3334,7 +3342,8 @@ llvm::Constant *CGObjCMac::EmitMethodList(Twine Name,
Values[2] = llvm::ConstantArray::get(AT, Methods);
llvm::Constant *Init = llvm::ConstantStruct::getAnon(Values);
- llvm::GlobalVariable *GV = CreateMetadataVar(Name, Init, Section, 4, true);
+ llvm::GlobalVariable *GV =
+ CreateMetadataVar(Name, Init, Section, CGM.getPointerAlign(), true);
return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.MethodListPtrTy);
}
@@ -3359,7 +3368,7 @@ llvm::Function *CGObjCCommonMac::GenerateMethod(const ObjCMethodDecl *OMD,
llvm::GlobalVariable *CGObjCCommonMac::CreateMetadataVar(Twine Name,
llvm::Constant *Init,
StringRef Section,
- unsigned Align,
+ CharUnits Align,
bool AddToUsed) {
llvm::Type *Ty = Init->getType();
llvm::GlobalVariable *GV =
@@ -3367,8 +3376,7 @@ llvm::GlobalVariable *CGObjCCommonMac::CreateMetadataVar(Twine Name,
llvm::GlobalValue::PrivateLinkage, Init, Name);
if (!Section.empty())
GV->setSection(Section);
- if (Align)
- GV->setAlignment(Align);
+ GV->setAlignment(Align.getQuantity());
if (AddToUsed)
CGM.addCompilerUsedGlobal(GV);
return GV;
@@ -3423,14 +3431,14 @@ void CGObjCMac::EmitSynchronizedStmt(CodeGenFunction &CGF,
namespace {
struct PerformFragileFinally final : EHScopeStack::Cleanup {
const Stmt &S;
- llvm::Value *SyncArgSlot;
- llvm::Value *CallTryExitVar;
- llvm::Value *ExceptionData;
+ Address SyncArgSlot;
+ Address CallTryExitVar;
+ Address ExceptionData;
ObjCTypesHelper &ObjCTypes;
PerformFragileFinally(const Stmt *S,
- llvm::Value *SyncArgSlot,
- llvm::Value *CallTryExitVar,
- llvm::Value *ExceptionData,
+ Address SyncArgSlot,
+ Address CallTryExitVar,
+ Address ExceptionData,
ObjCTypesHelper *ObjCTypes)
: S(*S), SyncArgSlot(SyncArgSlot), CallTryExitVar(CallTryExitVar),
ExceptionData(ExceptionData), ObjCTypes(*ObjCTypes) {}
@@ -3447,7 +3455,7 @@ namespace {
CGF.EmitBlock(FinallyCallExit);
CGF.EmitNounwindRuntimeCall(ObjCTypes.getExceptionTryExitFn(),
- ExceptionData);
+ ExceptionData.getPointer());
CGF.EmitBlock(FinallyNoCallExit);
@@ -3568,7 +3576,7 @@ void FragileHazards::emitReadHazard(CGBuilderTy &Builder) {
void FragileHazards::emitHazardsInNewBlocks() {
if (Locals.empty()) return;
- CGBuilderTy Builder(CGF.getLLVMContext());
+ CGBuilderTy Builder(CGF, CGF.getLLVMContext());
// Iterate through all blocks, skipping those prior to the try.
for (llvm::Function::iterator
@@ -3607,6 +3615,10 @@ static void addIfPresent(llvm::DenseSet<llvm::Value*> &S, llvm::Value *V) {
if (V) S.insert(V);
}
+static void addIfPresent(llvm::DenseSet<llvm::Value*> &S, Address V) {
+ if (V.isValid()) S.insert(V.getPointer());
+}
+
void FragileHazards::collectLocals() {
// Compute a set of allocas to ignore.
llvm::DenseSet<llvm::Value*> AllocasToIgnore;
@@ -3760,21 +3772,23 @@ void CGObjCMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
// @synchronized. We can't avoid a temp here because we need the
// value to be preserved. If the backend ever does liveness
// correctly after setjmp, this will be unnecessary.
- llvm::Value *SyncArgSlot = nullptr;
+ Address SyncArgSlot = Address::invalid();
if (!isTry) {
llvm::Value *SyncArg =
CGF.EmitScalarExpr(cast<ObjCAtSynchronizedStmt>(S).getSynchExpr());
SyncArg = CGF.Builder.CreateBitCast(SyncArg, ObjCTypes.ObjectPtrTy);
CGF.EmitNounwindRuntimeCall(ObjCTypes.getSyncEnterFn(), SyncArg);
- SyncArgSlot = CGF.CreateTempAlloca(SyncArg->getType(), "sync.arg");
+ SyncArgSlot = CGF.CreateTempAlloca(SyncArg->getType(),
+ CGF.getPointerAlign(), "sync.arg");
CGF.Builder.CreateStore(SyncArg, SyncArgSlot);
}
// Allocate memory for the setjmp buffer. This needs to be kept
// live throughout the try and catch blocks.
- llvm::Value *ExceptionData = CGF.CreateTempAlloca(ObjCTypes.ExceptionDataTy,
- "exceptiondata.ptr");
+ Address ExceptionData = CGF.CreateTempAlloca(ObjCTypes.ExceptionDataTy,
+ CGF.getPointerAlign(),
+ "exceptiondata.ptr");
// Create the fragile hazards. Note that this will not capture any
// of the allocas required for exception processing, but will
@@ -3790,12 +3804,13 @@ void CGObjCMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
// The setjmp-safety rule here is that we should always store to this
// variable in a place that dominates the branch through the cleanup
// without passing through any setjmps.
- llvm::Value *CallTryExitVar = CGF.CreateTempAlloca(CGF.Builder.getInt1Ty(),
- "_call_try_exit");
+ Address CallTryExitVar = CGF.CreateTempAlloca(CGF.Builder.getInt1Ty(),
+ CharUnits::One(),
+ "_call_try_exit");
// A slot containing the exception to rethrow. Only needed when we
// have both a @catch and a @finally.
- llvm::Value *PropagatingExnVar = nullptr;
+ Address PropagatingExnVar = Address::invalid();
// Push a normal cleanup to leave the try scope.
CGF.EHStack.pushCleanup<PerformFragileFinally>(NormalAndEHCleanup, &S,
@@ -3808,13 +3823,14 @@ void CGObjCMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
// - Call objc_exception_try_enter to push ExceptionData on top of
// the EH stack.
CGF.EmitNounwindRuntimeCall(ObjCTypes.getExceptionTryEnterFn(),
- ExceptionData);
+ ExceptionData.getPointer());
// - Call setjmp on the exception data buffer.
llvm::Constant *Zero = llvm::ConstantInt::get(CGF.Builder.getInt32Ty(), 0);
llvm::Value *GEPIndexes[] = { Zero, Zero, Zero };
llvm::Value *SetJmpBuffer = CGF.Builder.CreateGEP(
- ObjCTypes.ExceptionDataTy, ExceptionData, GEPIndexes, "setjmp_buffer");
+ ObjCTypes.ExceptionDataTy, ExceptionData.getPointer(), GEPIndexes,
+ "setjmp_buffer");
llvm::CallInst *SetJmpResult = CGF.EmitNounwindRuntimeCall(
ObjCTypes.getSetJmpFn(), SetJmpBuffer, "setjmp_result");
SetJmpResult->setCanReturnTwice();
@@ -3854,7 +3870,7 @@ void CGObjCMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
// nothing can cross this so the value is already in SSA form.
llvm::CallInst *Caught =
CGF.EmitNounwindRuntimeCall(ObjCTypes.getExceptionExtractFn(),
- ExceptionData, "caught");
+ ExceptionData.getPointer(), "caught");
// Push the exception to rethrow onto the EH value stack for the
// benefit of any @throws in the handlers.
@@ -3870,13 +3886,14 @@ void CGObjCMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
// Save the currently-propagating exception before
// objc_exception_try_enter clears the exception slot.
PropagatingExnVar = CGF.CreateTempAlloca(Caught->getType(),
+ CGF.getPointerAlign(),
"propagating_exception");
CGF.Builder.CreateStore(Caught, PropagatingExnVar);
// Enter a new exception try block (in case a @catch block
// throws an exception).
CGF.EmitNounwindRuntimeCall(ObjCTypes.getExceptionTryEnterFn(),
- ExceptionData);
+ ExceptionData.getPointer());
llvm::CallInst *SetJmpResult =
CGF.EmitNounwindRuntimeCall(ObjCTypes.getSetJmpFn(),
@@ -4008,10 +4025,10 @@ void CGObjCMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
// Extract the new exception and save it to the
// propagating-exception slot.
- assert(PropagatingExnVar);
+ assert(PropagatingExnVar.isValid());
llvm::CallInst *NewCaught =
CGF.EmitNounwindRuntimeCall(ObjCTypes.getExceptionExtractFn(),
- ExceptionData, "caught");
+ ExceptionData.getPointer(), "caught");
CGF.Builder.CreateStore(NewCaught, PropagatingExnVar);
// Don't pop the catch handler; the throw already did.
@@ -4036,14 +4053,14 @@ void CGObjCMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
if (CGF.HaveInsertPoint()) {
// If we have a propagating-exception variable, check it.
llvm::Value *PropagatingExn;
- if (PropagatingExnVar) {
+ if (PropagatingExnVar.isValid()) {
PropagatingExn = CGF.Builder.CreateLoad(PropagatingExnVar);
// Otherwise, just look in the buffer for the exception to throw.
} else {
llvm::CallInst *Caught =
CGF.EmitNounwindRuntimeCall(ObjCTypes.getExceptionExtractFn(),
- ExceptionData);
+ ExceptionData.getPointer());
PropagatingExn = Caught;
}
@@ -4083,14 +4100,13 @@ void CGObjCMac::EmitThrowStmt(CodeGen::CodeGenFunction &CGF,
/// object: objc_read_weak (id *src)
///
llvm::Value * CGObjCMac::EmitObjCWeakRead(CodeGen::CodeGenFunction &CGF,
- llvm::Value *AddrWeakObj) {
- llvm::Type* DestTy =
- cast<llvm::PointerType>(AddrWeakObj->getType())->getElementType();
+ Address AddrWeakObj) {
+ llvm::Type* DestTy = AddrWeakObj.getElementType();
AddrWeakObj = CGF.Builder.CreateBitCast(AddrWeakObj,
ObjCTypes.PtrObjectPtrTy);
llvm::Value *read_weak =
CGF.EmitNounwindRuntimeCall(ObjCTypes.getGcReadWeakFn(),
- AddrWeakObj, "weakread");
+ AddrWeakObj.getPointer(), "weakread");
read_weak = CGF.Builder.CreateBitCast(read_weak, DestTy);
return read_weak;
}
@@ -4099,7 +4115,7 @@ llvm::Value * CGObjCMac::EmitObjCWeakRead(CodeGen::CodeGenFunction &CGF,
/// objc_assign_weak (id src, id *dst)
///
void CGObjCMac::EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
- llvm::Value *src, llvm::Value *dst) {
+ llvm::Value *src, Address dst) {
llvm::Type * SrcTy = src->getType();
if (!isa<llvm::PointerType>(SrcTy)) {
unsigned Size = CGM.getDataLayout().getTypeAllocSize(SrcTy);
@@ -4110,7 +4126,7 @@ void CGObjCMac::EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
}
src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
dst = CGF.Builder.CreateBitCast(dst, ObjCTypes.PtrObjectPtrTy);
- llvm::Value *args[] = { src, dst };
+ llvm::Value *args[] = { src, dst.getPointer() };
CGF.EmitNounwindRuntimeCall(ObjCTypes.getGcAssignWeakFn(),
args, "weakassign");
return;
@@ -4120,7 +4136,7 @@ void CGObjCMac::EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
/// objc_assign_global (id src, id *dst)
///
void CGObjCMac::EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
- llvm::Value *src, llvm::Value *dst,
+ llvm::Value *src, Address dst,
bool threadlocal) {
llvm::Type * SrcTy = src->getType();
if (!isa<llvm::PointerType>(SrcTy)) {
@@ -4132,7 +4148,7 @@ void CGObjCMac::EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
}
src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
dst = CGF.Builder.CreateBitCast(dst, ObjCTypes.PtrObjectPtrTy);
- llvm::Value *args[] = { src, dst };
+ llvm::Value *args[] = { src, dst.getPointer() };
if (!threadlocal)
CGF.EmitNounwindRuntimeCall(ObjCTypes.getGcAssignGlobalFn(),
args, "globalassign");
@@ -4146,7 +4162,7 @@ void CGObjCMac::EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
/// objc_assign_ivar (id src, id *dst, ptrdiff_t ivaroffset)
///
void CGObjCMac::EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
- llvm::Value *src, llvm::Value *dst,
+ llvm::Value *src, Address dst,
llvm::Value *ivarOffset) {
assert(ivarOffset && "EmitObjCIvarAssign - ivarOffset is NULL");
llvm::Type * SrcTy = src->getType();
@@ -4159,7 +4175,7 @@ void CGObjCMac::EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
}
src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
dst = CGF.Builder.CreateBitCast(dst, ObjCTypes.PtrObjectPtrTy);
- llvm::Value *args[] = { src, dst, ivarOffset };
+ llvm::Value *args[] = { src, dst.getPointer(), ivarOffset };
CGF.EmitNounwindRuntimeCall(ObjCTypes.getGcAssignIvarFn(), args);
return;
}
@@ -4168,7 +4184,7 @@ void CGObjCMac::EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
/// objc_assign_strongCast (id src, id *dst)
///
void CGObjCMac::EmitObjCStrongCastAssign(CodeGen::CodeGenFunction &CGF,
- llvm::Value *src, llvm::Value *dst) {
+ llvm::Value *src, Address dst) {
llvm::Type * SrcTy = src->getType();
if (!isa<llvm::PointerType>(SrcTy)) {
unsigned Size = CGM.getDataLayout().getTypeAllocSize(SrcTy);
@@ -4179,19 +4195,19 @@ void CGObjCMac::EmitObjCStrongCastAssign(CodeGen::CodeGenFunction &CGF,
}
src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
dst = CGF.Builder.CreateBitCast(dst, ObjCTypes.PtrObjectPtrTy);
- llvm::Value *args[] = { src, dst };
+ llvm::Value *args[] = { src, dst.getPointer() };
CGF.EmitNounwindRuntimeCall(ObjCTypes.getGcAssignStrongCastFn(),
- args, "weakassign");
+ args, "strongassign");
return;
}
void CGObjCMac::EmitGCMemmoveCollectable(CodeGen::CodeGenFunction &CGF,
- llvm::Value *DestPtr,
- llvm::Value *SrcPtr,
+ Address DestPtr,
+ Address SrcPtr,
llvm::Value *size) {
SrcPtr = CGF.Builder.CreateBitCast(SrcPtr, ObjCTypes.Int8PtrTy);
DestPtr = CGF.Builder.CreateBitCast(DestPtr, ObjCTypes.Int8PtrTy);
- llvm::Value *args[] = { DestPtr, SrcPtr, size };
+ llvm::Value *args[] = { DestPtr.getPointer(), SrcPtr.getPointer(), size };
CGF.EmitNounwindRuntimeCall(ObjCTypes.GcMemmoveCollectableFn(), args);
}
@@ -4312,7 +4328,8 @@ void CGObjCMac::EmitModuleInfo() {
};
CreateMetadataVar("OBJC_MODULES",
llvm::ConstantStruct::get(ObjCTypes.ModuleTy, Values),
- "__OBJC,__module_info,regular,no_dead_strip", 4, true);
+ "__OBJC,__module_info,regular,no_dead_strip",
+ CGM.getPointerAlign(), true);
}
llvm::Constant *CGObjCMac::EmitModuleSymbols() {
@@ -4356,7 +4373,8 @@ llvm::Constant *CGObjCMac::EmitModuleSymbols() {
llvm::Constant *Init = llvm::ConstantStruct::getAnon(Values);
llvm::GlobalVariable *GV = CreateMetadataVar(
- "OBJC_SYMBOLS", Init, "__OBJC,__symbols,regular,no_dead_strip", 4, true);
+ "OBJC_SYMBOLS", Init, "__OBJC,__symbols,regular,no_dead_strip",
+ CGM.getPointerAlign(), true);
return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.SymtabPtrTy);
}
@@ -4372,10 +4390,11 @@ llvm::Value *CGObjCMac::EmitClassRefFromId(CodeGenFunction &CGF,
ObjCTypes.ClassPtrTy);
Entry = CreateMetadataVar(
"OBJC_CLASS_REFERENCES_", Casted,
- "__OBJC,__cls_refs,literal_pointers,no_dead_strip", 4, true);
+ "__OBJC,__cls_refs,literal_pointers,no_dead_strip",
+ CGM.getPointerAlign(), true);
}
- return CGF.Builder.CreateLoad(Entry);
+ return CGF.Builder.CreateAlignedLoad(Entry, CGF.getPointerAlign());
}
llvm::Value *CGObjCMac::EmitClassRef(CodeGenFunction &CGF,
@@ -4388,23 +4407,25 @@ llvm::Value *CGObjCMac::EmitNSAutoreleasePoolClassRef(CodeGenFunction &CGF) {
return EmitClassRefFromId(CGF, II);
}
-llvm::Value *CGObjCMac::EmitSelector(CodeGenFunction &CGF, Selector Sel,
- bool lvalue) {
- llvm::GlobalVariable *&Entry = SelectorReferences[Sel];
+llvm::Value *CGObjCMac::EmitSelector(CodeGenFunction &CGF, Selector Sel) {
+ return CGF.Builder.CreateLoad(EmitSelectorAddr(CGF, Sel));
+}
+
+Address CGObjCMac::EmitSelectorAddr(CodeGenFunction &CGF, Selector Sel) {
+ CharUnits Align = CGF.getPointerAlign();
+ llvm::GlobalVariable *&Entry = SelectorReferences[Sel];
if (!Entry) {
llvm::Constant *Casted =
llvm::ConstantExpr::getBitCast(GetMethodVarName(Sel),
ObjCTypes.SelectorPtrTy);
Entry = CreateMetadataVar(
"OBJC_SELECTOR_REFERENCES_", Casted,
- "__OBJC,__message_refs,literal_pointers,no_dead_strip", 4, true);
+ "__OBJC,__message_refs,literal_pointers,no_dead_strip", Align, true);
Entry->setExternallyInitialized(true);
}
- if (lvalue)
- return Entry;
- return CGF.Builder.CreateLoad(Entry);
+ return Address(Entry, Align);
}
llvm::Constant *CGObjCCommonMac::GetClassName(StringRef RuntimeName) {
@@ -4415,7 +4436,7 @@ llvm::Constant *CGObjCCommonMac::GetClassName(StringRef RuntimeName) {
llvm::ConstantDataArray::getString(VMContext, RuntimeName),
((ObjCABI == 2) ? "__TEXT,__objc_classname,cstring_literals"
: "__TEXT,__cstring,cstring_literals"),
- 1, true);
+ CharUnits::One(), true);
return getConstantGEP(VMContext, Entry, 0, 0);
}
@@ -4735,7 +4756,7 @@ llvm::Constant *CGObjCCommonMac::BuildIvarLayoutBitmap(std::string &BitMap) {
llvm::ConstantDataArray::getString(VMContext, BitMap, false),
((ObjCABI == 2) ? "__TEXT,__objc_classname,cstring_literals"
: "__TEXT,__cstring,cstring_literals"),
- 1, true);
+ CharUnits::One(), true);
return getConstantGEP(VMContext, Entry, 0, 0);
}
@@ -4825,7 +4846,7 @@ llvm::Constant *CGObjCCommonMac::GetMethodVarName(Selector Sel) {
llvm::ConstantDataArray::getString(VMContext, Sel.getAsString()),
((ObjCABI == 2) ? "__TEXT,__objc_methname,cstring_literals"
: "__TEXT,__cstring,cstring_literals"),
- 1, true);
+ CharUnits::One(), true);
return getConstantGEP(VMContext, Entry, 0, 0);
}
@@ -4847,7 +4868,7 @@ llvm::Constant *CGObjCCommonMac::GetMethodVarType(const FieldDecl *Field) {
llvm::ConstantDataArray::getString(VMContext, TypeStr),
((ObjCABI == 2) ? "__TEXT,__objc_methtype,cstring_literals"
: "__TEXT,__cstring,cstring_literals"),
- 1, true);
+ CharUnits::One(), true);
return getConstantGEP(VMContext, Entry, 0, 0);
}
@@ -4866,7 +4887,7 @@ llvm::Constant *CGObjCCommonMac::GetMethodVarType(const ObjCMethodDecl *D,
llvm::ConstantDataArray::getString(VMContext, TypeStr),
((ObjCABI == 2) ? "__TEXT,__objc_methtype,cstring_literals"
: "__TEXT,__cstring,cstring_literals"),
- 1, true);
+ CharUnits::One(), true);
return getConstantGEP(VMContext, Entry, 0, 0);
}
@@ -4879,7 +4900,7 @@ llvm::Constant *CGObjCCommonMac::GetPropertyName(IdentifierInfo *Ident) {
Entry = CreateMetadataVar(
"OBJC_PROP_NAME_ATTR_",
llvm::ConstantDataArray::getString(VMContext, Ident->getName()),
- "__TEXT,__cstring,cstring_literals", 1, true);
+ "__TEXT,__cstring,cstring_literals", CharUnits::One(), true);
return getConstantGEP(VMContext, Entry, 0, 0);
}
@@ -5887,9 +5908,11 @@ llvm::Value *CGObjCNonFragileABIMac::GenerateProtocolRef(CodeGenFunction &CGF,
std::string ProtocolName("\01l_OBJC_PROTOCOL_REFERENCE_$_");
ProtocolName += PD->getObjCRuntimeNameAsString();
+ CharUnits Align = CGF.getPointerAlign();
+
llvm::GlobalVariable *PTGV = CGM.getModule().getGlobalVariable(ProtocolName);
if (PTGV)
- return CGF.Builder.CreateLoad(PTGV);
+ return CGF.Builder.CreateAlignedLoad(PTGV, Align);
PTGV = new llvm::GlobalVariable(
CGM.getModule(),
Init->getType(), false,
@@ -5898,8 +5921,9 @@ llvm::Value *CGObjCNonFragileABIMac::GenerateProtocolRef(CodeGenFunction &CGF,
ProtocolName);
PTGV->setSection("__DATA, __objc_protorefs, coalesced, no_dead_strip");
PTGV->setVisibility(llvm::GlobalValue::HiddenVisibility);
+ PTGV->setAlignment(Align.getQuantity());
CGM.addCompilerUsedGlobal(PTGV);
- return CGF.Builder.CreateLoad(PTGV);
+ return CGF.Builder.CreateAlignedLoad(PTGV, Align);
}
/// GenerateCategory - Build metadata for a category implementation.
@@ -6428,7 +6452,8 @@ llvm::Value *CGObjCNonFragileABIMac::EmitIvarOffset(
const ObjCInterfaceDecl *Interface,
const ObjCIvarDecl *Ivar) {
llvm::Value *IvarOffsetValue = ObjCIvarOffsetVariable(Interface, Ivar);
- IvarOffsetValue = CGF.Builder.CreateLoad(IvarOffsetValue, "ivar");
+ IvarOffsetValue = CGF.Builder.CreateAlignedLoad(IvarOffsetValue,
+ CGF.getSizeAlign(), "ivar");
if (IsIvarOffsetKnownIdempotent(CGF, Ivar))
cast<llvm::LoadInst>(IvarOffsetValue)
->setMetadata(CGM.getModule().getMDKindID("invariant.load"),
@@ -6559,16 +6584,17 @@ CGObjCNonFragileABIMac::EmitVTableMessageSend(CodeGenFunction &CGF,
}
}
- llvm::Value *mref =
- CGF.Builder.CreateBitCast(messageRef, ObjCTypes.MessageRefPtrTy);
+ Address mref =
+ Address(CGF.Builder.CreateBitCast(messageRef, ObjCTypes.MessageRefPtrTy),
+ CGF.getPointerAlign());
// Update the message ref argument.
- args[1].RV = RValue::get(mref);
+ args[1].RV = RValue::get(mref.getPointer());
// Load the function to call from the message ref table.
- llvm::Value *callee =
- CGF.Builder.CreateStructGEP(ObjCTypes.MessageRefTy, mref, 0);
- callee = CGF.Builder.CreateLoad(callee, "msgSend_fn");
+ Address calleeAddr =
+ CGF.Builder.CreateStructGEP(mref, 0, CharUnits::Zero());
+ llvm::Value *callee = CGF.Builder.CreateLoad(calleeAddr, "msgSend_fn");
callee = CGF.Builder.CreateBitCast(callee, MSI.MessengerType);
@@ -6617,6 +6643,7 @@ llvm::Value *CGObjCNonFragileABIMac::EmitClassRefFromId(CodeGenFunction &CGF,
IdentifierInfo *II,
bool Weak,
const ObjCInterfaceDecl *ID) {
+ CharUnits Align = CGF.getPointerAlign();
llvm::GlobalVariable *&Entry = ClassReferences[II];
if (!Entry) {
@@ -6627,13 +6654,11 @@ llvm::Value *CGObjCNonFragileABIMac::EmitClassRefFromId(CodeGenFunction &CGF,
Entry = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassnfABIPtrTy,
false, llvm::GlobalValue::PrivateLinkage,
ClassGV, "OBJC_CLASSLIST_REFERENCES_$_");
- Entry->setAlignment(
- CGM.getDataLayout().getABITypeAlignment(
- ObjCTypes.ClassnfABIPtrTy));
+ Entry->setAlignment(Align.getQuantity());
Entry->setSection("__DATA, __objc_classrefs, regular, no_dead_strip");
CGM.addCompilerUsedGlobal(Entry);
}
- return CGF.Builder.CreateLoad(Entry);
+ return CGF.Builder.CreateAlignedLoad(Entry, Align);
}
llvm::Value *CGObjCNonFragileABIMac::EmitClassRef(CodeGenFunction &CGF,
@@ -6650,6 +6675,7 @@ llvm::Value *CGObjCNonFragileABIMac::EmitNSAutoreleasePoolClassRef(
llvm::Value *
CGObjCNonFragileABIMac::EmitSuperClassRef(CodeGenFunction &CGF,
const ObjCInterfaceDecl *ID) {
+ CharUnits Align = CGF.getPointerAlign();
llvm::GlobalVariable *&Entry = SuperClassReferences[ID->getIdentifier()];
if (!Entry) {
@@ -6660,13 +6686,11 @@ CGObjCNonFragileABIMac::EmitSuperClassRef(CodeGenFunction &CGF,
Entry = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassnfABIPtrTy,
false, llvm::GlobalValue::PrivateLinkage,
ClassGV, "OBJC_CLASSLIST_SUP_REFS_$_");
- Entry->setAlignment(
- CGM.getDataLayout().getABITypeAlignment(
- ObjCTypes.ClassnfABIPtrTy));
+ Entry->setAlignment(Align.getQuantity());
Entry->setSection("__DATA, __objc_superrefs, regular, no_dead_strip");
CGM.addCompilerUsedGlobal(Entry);
}
- return CGF.Builder.CreateLoad(Entry);
+ return CGF.Builder.CreateAlignedLoad(Entry, Align);
}
/// EmitMetaClassRef - Return a Value * of the address of _class_t
@@ -6675,6 +6699,7 @@ CGObjCNonFragileABIMac::EmitSuperClassRef(CodeGenFunction &CGF,
llvm::Value *CGObjCNonFragileABIMac::EmitMetaClassRef(CodeGenFunction &CGF,
const ObjCInterfaceDecl *ID,
bool Weak) {
+ CharUnits Align = CGF.getPointerAlign();
llvm::GlobalVariable * &Entry = MetaClassReferences[ID->getIdentifier()];
if (!Entry) {
llvm::SmallString<64> MetaClassName(getMetaclassSymbolPrefix());
@@ -6685,14 +6710,13 @@ llvm::Value *CGObjCNonFragileABIMac::EmitMetaClassRef(CodeGenFunction &CGF,
Entry = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassnfABIPtrTy,
false, llvm::GlobalValue::PrivateLinkage,
MetaClassGV, "OBJC_CLASSLIST_SUP_REFS_$_");
- Entry->setAlignment(
- CGM.getDataLayout().getABITypeAlignment(ObjCTypes.ClassnfABIPtrTy));
+ Entry->setAlignment(Align.getQuantity());
Entry->setSection("__DATA, __objc_superrefs, regular, no_dead_strip");
CGM.addCompilerUsedGlobal(Entry);
}
- return CGF.Builder.CreateLoad(Entry);
+ return CGF.Builder.CreateAlignedLoad(Entry, Align);
}
/// GetClass - Return a reference to the class for the given interface
@@ -6727,14 +6751,15 @@ CGObjCNonFragileABIMac::GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
// ...
// Create and init a super structure; this is a (receiver, class)
// pair we will pass to objc_msgSendSuper.
- llvm::Value *ObjCSuper =
- CGF.CreateTempAlloca(ObjCTypes.SuperTy, "objc_super");
+ Address ObjCSuper =
+ CGF.CreateTempAlloca(ObjCTypes.SuperTy, CGF.getPointerAlign(),
+ "objc_super");
llvm::Value *ReceiverAsObject =
CGF.Builder.CreateBitCast(Receiver, ObjCTypes.ObjectPtrTy);
CGF.Builder.CreateStore(
ReceiverAsObject,
- CGF.Builder.CreateStructGEP(ObjCTypes.SuperTy, ObjCSuper, 0));
+ CGF.Builder.CreateStructGEP(ObjCSuper, 0, CharUnits::Zero()));
// If this is a class message the metaclass is passed as the target.
llvm::Value *Target;
@@ -6749,22 +6774,33 @@ CGObjCNonFragileABIMac::GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
CGM.getTypes().ConvertType(CGF.getContext().getObjCClassType());
Target = CGF.Builder.CreateBitCast(Target, ClassTy);
CGF.Builder.CreateStore(
- Target, CGF.Builder.CreateStructGEP(ObjCTypes.SuperTy, ObjCSuper, 1));
+ Target, CGF.Builder.CreateStructGEP(ObjCSuper, 1, CGF.getPointerSize()));
return (isVTableDispatchedSelector(Sel))
? EmitVTableMessageSend(CGF, Return, ResultType, Sel,
- ObjCSuper, ObjCTypes.SuperPtrCTy,
+ ObjCSuper.getPointer(), ObjCTypes.SuperPtrCTy,
true, CallArgs, Method)
: EmitMessageSend(CGF, Return, ResultType,
EmitSelector(CGF, Sel),
- ObjCSuper, ObjCTypes.SuperPtrCTy,
+ ObjCSuper.getPointer(), ObjCTypes.SuperPtrCTy,
true, CallArgs, Method, ObjCTypes);
}
llvm::Value *CGObjCNonFragileABIMac::EmitSelector(CodeGenFunction &CGF,
- Selector Sel, bool lval) {
+ Selector Sel) {
+ Address Addr = EmitSelectorAddr(CGF, Sel);
+
+ llvm::LoadInst* LI = CGF.Builder.CreateLoad(Addr);
+ LI->setMetadata(CGM.getModule().getMDKindID("invariant.load"),
+ llvm::MDNode::get(VMContext, None));
+ return LI;
+}
+
+Address CGObjCNonFragileABIMac::EmitSelectorAddr(CodeGenFunction &CGF,
+ Selector Sel) {
llvm::GlobalVariable *&Entry = SelectorReferences[Sel];
+ CharUnits Align = CGF.getPointerAlign();
if (!Entry) {
llvm::Constant *Casted =
llvm::ConstantExpr::getBitCast(GetMethodVarName(Sel),
@@ -6774,23 +6810,19 @@ llvm::Value *CGObjCNonFragileABIMac::EmitSelector(CodeGenFunction &CGF,
Casted, "OBJC_SELECTOR_REFERENCES_");
Entry->setExternallyInitialized(true);
Entry->setSection("__DATA, __objc_selrefs, literal_pointers, no_dead_strip");
+ Entry->setAlignment(Align.getQuantity());
CGM.addCompilerUsedGlobal(Entry);
}
- if (lval)
- return Entry;
- llvm::LoadInst* LI = CGF.Builder.CreateLoad(Entry);
-
- LI->setMetadata(CGM.getModule().getMDKindID("invariant.load"),
- llvm::MDNode::get(VMContext, None));
- return LI;
+ return Address(Entry, Align);
}
+
/// EmitObjCIvarAssign - Code gen for assigning to a __strong object.
/// objc_assign_ivar (id src, id *dst, ptrdiff_t)
///
void CGObjCNonFragileABIMac::EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
llvm::Value *src,
- llvm::Value *dst,
+ Address dst,
llvm::Value *ivarOffset) {
llvm::Type * SrcTy = src->getType();
if (!isa<llvm::PointerType>(SrcTy)) {
@@ -6802,7 +6834,7 @@ void CGObjCNonFragileABIMac::EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
}
src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
dst = CGF.Builder.CreateBitCast(dst, ObjCTypes.PtrObjectPtrTy);
- llvm::Value *args[] = { src, dst, ivarOffset };
+ llvm::Value *args[] = { src, dst.getPointer(), ivarOffset };
CGF.EmitNounwindRuntimeCall(ObjCTypes.getGcAssignIvarFn(), args);
}
@@ -6811,7 +6843,7 @@ void CGObjCNonFragileABIMac::EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
///
void CGObjCNonFragileABIMac::EmitObjCStrongCastAssign(
CodeGen::CodeGenFunction &CGF,
- llvm::Value *src, llvm::Value *dst) {
+ llvm::Value *src, Address dst) {
llvm::Type * SrcTy = src->getType();
if (!isa<llvm::PointerType>(SrcTy)) {
unsigned Size = CGM.getDataLayout().getTypeAllocSize(SrcTy);
@@ -6822,19 +6854,19 @@ void CGObjCNonFragileABIMac::EmitObjCStrongCastAssign(
}
src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
dst = CGF.Builder.CreateBitCast(dst, ObjCTypes.PtrObjectPtrTy);
- llvm::Value *args[] = { src, dst };
+ llvm::Value *args[] = { src, dst.getPointer() };
CGF.EmitNounwindRuntimeCall(ObjCTypes.getGcAssignStrongCastFn(),
args, "weakassign");
}
void CGObjCNonFragileABIMac::EmitGCMemmoveCollectable(
CodeGen::CodeGenFunction &CGF,
- llvm::Value *DestPtr,
- llvm::Value *SrcPtr,
+ Address DestPtr,
+ Address SrcPtr,
llvm::Value *Size) {
SrcPtr = CGF.Builder.CreateBitCast(SrcPtr, ObjCTypes.Int8PtrTy);
DestPtr = CGF.Builder.CreateBitCast(DestPtr, ObjCTypes.Int8PtrTy);
- llvm::Value *args[] = { DestPtr, SrcPtr, Size };
+ llvm::Value *args[] = { DestPtr.getPointer(), SrcPtr.getPointer(), Size };
CGF.EmitNounwindRuntimeCall(ObjCTypes.GcMemmoveCollectableFn(), args);
}
@@ -6843,13 +6875,12 @@ void CGObjCNonFragileABIMac::EmitGCMemmoveCollectable(
///
llvm::Value * CGObjCNonFragileABIMac::EmitObjCWeakRead(
CodeGen::CodeGenFunction &CGF,
- llvm::Value *AddrWeakObj) {
- llvm::Type* DestTy =
- cast<llvm::PointerType>(AddrWeakObj->getType())->getElementType();
+ Address AddrWeakObj) {
+ llvm::Type *DestTy = AddrWeakObj.getElementType();
AddrWeakObj = CGF.Builder.CreateBitCast(AddrWeakObj, ObjCTypes.PtrObjectPtrTy);
llvm::Value *read_weak =
CGF.EmitNounwindRuntimeCall(ObjCTypes.getGcReadWeakFn(),
- AddrWeakObj, "weakread");
+ AddrWeakObj.getPointer(), "weakread");
read_weak = CGF.Builder.CreateBitCast(read_weak, DestTy);
return read_weak;
}
@@ -6858,7 +6889,7 @@ llvm::Value * CGObjCNonFragileABIMac::EmitObjCWeakRead(
/// objc_assign_weak (id src, id *dst)
///
void CGObjCNonFragileABIMac::EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
- llvm::Value *src, llvm::Value *dst) {
+ llvm::Value *src, Address dst) {
llvm::Type * SrcTy = src->getType();
if (!isa<llvm::PointerType>(SrcTy)) {
unsigned Size = CGM.getDataLayout().getTypeAllocSize(SrcTy);
@@ -6869,7 +6900,7 @@ void CGObjCNonFragileABIMac::EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
}
src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
dst = CGF.Builder.CreateBitCast(dst, ObjCTypes.PtrObjectPtrTy);
- llvm::Value *args[] = { src, dst };
+ llvm::Value *args[] = { src, dst.getPointer() };
CGF.EmitNounwindRuntimeCall(ObjCTypes.getGcAssignWeakFn(),
args, "weakassign");
}
@@ -6878,7 +6909,7 @@ void CGObjCNonFragileABIMac::EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
/// objc_assign_global (id src, id *dst)
///
void CGObjCNonFragileABIMac::EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
- llvm::Value *src, llvm::Value *dst,
+ llvm::Value *src, Address dst,
bool threadlocal) {
llvm::Type * SrcTy = src->getType();
if (!isa<llvm::PointerType>(SrcTy)) {
@@ -6890,7 +6921,7 @@ void CGObjCNonFragileABIMac::EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
}
src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
dst = CGF.Builder.CreateBitCast(dst, ObjCTypes.PtrObjectPtrTy);
- llvm::Value *args[] = { src, dst };
+ llvm::Value *args[] = { src, dst.getPointer() };
if (!threadlocal)
CGF.EmitNounwindRuntimeCall(ObjCTypes.getGcAssignGlobalFn(),
args, "globalassign");
diff --git a/clang/lib/CodeGen/CGObjCRuntime.cpp b/clang/lib/CodeGen/CGObjCRuntime.cpp
index b6f79cfc993..b2ca5c8d0e0 100644
--- a/clang/lib/CodeGen/CGObjCRuntime.cpp
+++ b/clang/lib/CodeGen/CGObjCRuntime.cpp
@@ -136,12 +136,13 @@ LValue CGObjCRuntime::EmitValueForIvarAtOffset(CodeGen::CodeGenFunction &CGF,
CGF.CGM.getContext().toBits(StorageSize),
CharUnits::fromQuantity(0)));
- V = CGF.Builder.CreateBitCast(V,
- llvm::Type::getIntNPtrTy(CGF.getLLVMContext(),
+ Address Addr(V, Alignment);
+ Addr = CGF.Builder.CreateElementBitCast(Addr,
+ llvm::Type::getIntNTy(CGF.getLLVMContext(),
Info->StorageSize));
- return LValue::MakeBitfield(V, *Info,
+ return LValue::MakeBitfield(Addr, *Info,
IvarTy.withCVRQualifiers(CVRQualifiers),
- Alignment);
+ AlignmentSource::Decl);
}
namespace {
@@ -256,7 +257,7 @@ void CGObjCRuntime::EmitTryCatchStmt(CodeGenFunction &CGF,
CGF.EmitAutoVarDecl(*CatchParam);
- llvm::Value *CatchParamAddr = CGF.GetAddrOfLocalVar(CatchParam);
+ Address CatchParamAddr = CGF.GetAddrOfLocalVar(CatchParam);
switch (CatchParam->getType().getQualifiers().getObjCLifetime()) {
case Qualifiers::OCL_Strong:
diff --git a/clang/lib/CodeGen/CGObjCRuntime.h b/clang/lib/CodeGen/CGObjCRuntime.h
index 47525464986..1632713c32a 100644
--- a/clang/lib/CodeGen/CGObjCRuntime.h
+++ b/clang/lib/CodeGen/CGObjCRuntime.h
@@ -116,11 +116,16 @@ public:
/// this compilation unit with the runtime library.
virtual llvm::Function *ModuleInitFunction() = 0;
- /// Get a selector for the specified name and type values. The
- /// return value should have the LLVM type for pointer-to
+ /// Get a selector for the specified name and type values.
+ /// The result should have the LLVM type for ASTContext::getObjCSelType().
+ virtual llvm::Value *GetSelector(CodeGenFunction &CGF, Selector Sel) = 0;
+
+ /// Get the address of a selector for the specified name and type values.
+ /// This is a rarely-used language extension, but sadly it exists.
+ ///
+ /// The result should have the LLVM type for a pointer to
/// ASTContext::getObjCSelType().
- virtual llvm::Value *GetSelector(CodeGenFunction &CGF,
- Selector Sel, bool lval=false) = 0;
+ virtual Address GetAddrOfSelector(CodeGenFunction &CGF, Selector Sel) = 0;
/// Get a typed selector.
virtual llvm::Value *GetSelector(CodeGenFunction &CGF,
@@ -133,7 +138,7 @@ public:
virtual llvm::Constant *GetEHType(QualType T) = 0;
/// Generate a constant string object.
- virtual llvm::Constant *GenerateConstantString(const StringLiteral *) = 0;
+ virtual ConstantAddress GenerateConstantString(const StringLiteral *) = 0;
/// Generate a category. A category contains a list of methods (and
/// accompanying metadata) and a list of protocols.
@@ -238,17 +243,17 @@ public:
const ObjCAtThrowStmt &S,
bool ClearInsertionPoint=true) = 0;
virtual llvm::Value *EmitObjCWeakRead(CodeGen::CodeGenFunction &CGF,
- llvm::Value *AddrWeakObj) = 0;
+ Address AddrWeakObj) = 0;
virtual void EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
- llvm::Value *src, llvm::Value *dest) = 0;
+ llvm::Value *src, Address dest) = 0;
virtual void EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
- llvm::Value *src, llvm::Value *dest,
+ llvm::Value *src, Address dest,
bool threadlocal=false) = 0;
virtual void EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
- llvm::Value *src, llvm::Value *dest,
+ llvm::Value *src, Address dest,
llvm::Value *ivarOffset) = 0;
virtual void EmitObjCStrongCastAssign(CodeGen::CodeGenFunction &CGF,
- llvm::Value *src, llvm::Value *dest) = 0;
+ llvm::Value *src, Address dest) = 0;
virtual LValue EmitObjCValueForIvar(CodeGen::CodeGenFunction &CGF,
QualType ObjectTy,
@@ -259,8 +264,8 @@ public:
const ObjCInterfaceDecl *Interface,
const ObjCIvarDecl *Ivar) = 0;
virtual void EmitGCMemmoveCollectable(CodeGen::CodeGenFunction &CGF,
- llvm::Value *DestPtr,
- llvm::Value *SrcPtr,
+ Address DestPtr,
+ Address SrcPtr,
llvm::Value *Size) = 0;
virtual llvm::Constant *BuildGCBlockLayout(CodeGen::CodeGenModule &CGM,
const CodeGen::CGBlockInfo &blockInfo) = 0;
diff --git a/clang/lib/CodeGen/CGOpenMPRuntime.cpp b/clang/lib/CodeGen/CGOpenMPRuntime.cpp
index 6ffcb715c95..a0b3ee5ab01 100644
--- a/clang/lib/CodeGen/CGOpenMPRuntime.cpp
+++ b/clang/lib/CodeGen/CGOpenMPRuntime.cpp
@@ -233,9 +233,8 @@ public:
LValue CGOpenMPRegionInfo::getThreadIDVariableLValue(CodeGenFunction &CGF) {
return CGF.MakeNaturalAlignAddrLValue(
- CGF.Builder.CreateAlignedLoad(
- CGF.GetAddrOfLocalVar(getThreadIDVariable()),
- CGF.PointerAlignInBytes),
+ CGF.Builder.CreateLoad(
+ CGF.GetAddrOfLocalVar(getThreadIDVariable())),
getThreadIDVariable()
->getType()
->castAs<PointerType>()
@@ -258,7 +257,7 @@ void CGOpenMPRegionInfo::EmitBody(CodeGenFunction &CGF, const Stmt * /*S*/) {
LValue CGOpenMPTaskOutlinedRegionInfo::getThreadIDVariableLValue(
CodeGenFunction &CGF) {
- return CGF.MakeNaturalAlignAddrLValue(
+ return CGF.MakeAddrLValue(
CGF.GetAddrOfLocalVar(getThreadIDVariable()),
getThreadIDVariable()->getType());
}
@@ -280,6 +279,25 @@ void CGOpenMPRuntime::clear() {
InternalVars.clear();
}
+// Layout information for ident_t.
+static CharUnits getIdentAlign(CodeGenModule &CGM) {
+ return CGM.getPointerAlign();
+}
+static CharUnits getIdentSize(CodeGenModule &CGM) {
+ assert((4 * CGM.getPointerSize()).isMultipleOf(CGM.getPointerAlign()));
+ return CharUnits::fromQuantity(16) + CGM.getPointerSize();
+}
+static CharUnits getOffsetOfIdentField(CGOpenMPRuntime::IdentFieldIndex Field) {
+ // All the fields except the last are i32, so this works beautifully.
+ return unsigned(Field) * CharUnits::fromQuantity(4);
+}
+static Address createIdentFieldGEP(CodeGenFunction &CGF, Address Addr,
+ CGOpenMPRuntime::IdentFieldIndex Field,
+ const llvm::Twine &Name = "") {
+ auto Offset = getOffsetOfIdentField(Field);
+ return CGF.Builder.CreateStructGEP(Addr, Field, Offset, Name);
+}
+
llvm::Value *CGOpenMPRuntime::emitParallelOutlinedFunction(
const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
@@ -305,8 +323,8 @@ llvm::Value *CGOpenMPRuntime::emitTaskOutlinedFunction(
return CGF.GenerateCapturedStmtFunction(*CS);
}
-llvm::Value *
-CGOpenMPRuntime::getOrCreateDefaultLocation(OpenMPLocationFlags Flags) {
+Address CGOpenMPRuntime::getOrCreateDefaultLocation(OpenMPLocationFlags Flags) {
+ CharUnits Align = getIdentAlign(CGM);
llvm::Value *Entry = OpenMPDefaultLocMap.lookup(Flags);
if (!Entry) {
if (!DefaultOpenMPPSource) {
@@ -315,7 +333,7 @@ CGOpenMPRuntime::getOrCreateDefaultLocation(OpenMPLocationFlags Flags) {
// Taken from
// http://llvm.org/svn/llvm-project/openmp/trunk/runtime/src/kmp_str.c
DefaultOpenMPPSource =
- CGM.GetAddrOfConstantCString(";unknown;unknown;0;0;;");
+ CGM.GetAddrOfConstantCString(";unknown;unknown;0;0;;").getPointer();
DefaultOpenMPPSource =
llvm::ConstantExpr::getBitCast(DefaultOpenMPPSource, CGM.Int8PtrTy);
}
@@ -323,6 +341,7 @@ CGOpenMPRuntime::getOrCreateDefaultLocation(OpenMPLocationFlags Flags) {
CGM.getModule(), IdentTy, /*isConstant*/ true,
llvm::GlobalValue::PrivateLinkage, /*Initializer*/ nullptr);
DefaultOpenMPLocation->setUnnamedAddr(true);
+ DefaultOpenMPLocation->setAlignment(Align.getQuantity());
llvm::Constant *Zero = llvm::ConstantInt::get(CGM.Int32Ty, 0, true);
llvm::Constant *Values[] = {Zero,
@@ -330,10 +349,9 @@ CGOpenMPRuntime::getOrCreateDefaultLocation(OpenMPLocationFlags Flags) {
Zero, Zero, DefaultOpenMPPSource};
llvm::Constant *Init = llvm::ConstantStruct::get(IdentTy, Values);
DefaultOpenMPLocation->setInitializer(Init);
- OpenMPDefaultLocMap[Flags] = DefaultOpenMPLocation;
- return DefaultOpenMPLocation;
+ OpenMPDefaultLocMap[Flags] = Entry = DefaultOpenMPLocation;
}
- return Entry;
+ return Address(Entry, Align);
}
llvm::Value *CGOpenMPRuntime::emitUpdateLocation(CodeGenFunction &CGF,
@@ -342,34 +360,33 @@ llvm::Value *CGOpenMPRuntime::emitUpdateLocation(CodeGenFunction &CGF,
// If no debug info is generated - return global default location.
if (CGM.getCodeGenOpts().getDebugInfo() == CodeGenOptions::NoDebugInfo ||
Loc.isInvalid())
- return getOrCreateDefaultLocation(Flags);
+ return getOrCreateDefaultLocation(Flags).getPointer();
assert(CGF.CurFn && "No function in current CodeGenFunction.");
- llvm::Value *LocValue = nullptr;
+ Address LocValue = Address::invalid();
auto I = OpenMPLocThreadIDMap.find(CGF.CurFn);
if (I != OpenMPLocThreadIDMap.end())
- LocValue = I->second.DebugLoc;
+ LocValue = Address(I->second.DebugLoc, getIdentAlign(CGF.CGM));
+
// OpenMPLocThreadIDMap may have null DebugLoc and non-null ThreadID, if
// GetOpenMPThreadID was called before this routine.
- if (LocValue == nullptr) {
+ if (!LocValue.isValid()) {
// Generate "ident_t .kmpc_loc.addr;"
- llvm::AllocaInst *AI = CGF.CreateTempAlloca(IdentTy, ".kmpc_loc.addr");
- AI->setAlignment(CGM.getDataLayout().getPrefTypeAlignment(IdentTy));
+ Address AI = CGF.CreateTempAlloca(IdentTy, getIdentAlign(CGF.CGM),
+ ".kmpc_loc.addr");
auto &Elem = OpenMPLocThreadIDMap.FindAndConstruct(CGF.CurFn);
- Elem.second.DebugLoc = AI;
+ Elem.second.DebugLoc = AI.getPointer();
LocValue = AI;
CGBuilderTy::InsertPointGuard IPG(CGF.Builder);
CGF.Builder.SetInsertPoint(CGF.AllocaInsertPt);
CGF.Builder.CreateMemCpy(LocValue, getOrCreateDefaultLocation(Flags),
- llvm::ConstantExpr::getSizeOf(IdentTy),
- CGM.PointerAlignInBytes);
+ CGM.getSize(getIdentSize(CGF.CGM)));
}
// char **psource = &.kmpc_loc_<flags>.addr.psource;
- auto *PSource = CGF.Builder.CreateConstInBoundsGEP2_32(IdentTy, LocValue, 0,
- IdentField_PSource);
+ Address PSource = createIdentFieldGEP(CGF, LocValue, IdentField_PSource);
auto OMPDebugLoc = OpenMPDebugLocMap.lookup(Loc.getRawEncoding());
if (OMPDebugLoc == nullptr) {
@@ -389,7 +406,9 @@ llvm::Value *CGOpenMPRuntime::emitUpdateLocation(CodeGenFunction &CGF,
// *psource = ";<File>;<Function>;<Line>;<Column>;;";
CGF.Builder.CreateStore(OMPDebugLoc, PSource);
- return LocValue;
+ // Our callers always pass this to a runtime function, so for
+ // convenience, go ahead and return a naked pointer.
+ return LocValue.getPointer();
}
llvm::Value *CGOpenMPRuntime::getThreadID(CodeGenFunction &CGF,
@@ -939,25 +958,27 @@ CGOpenMPRuntime::getOrCreateThreadPrivateCache(const VarDecl *VD) {
Twine(CGM.getMangledName(VD)) + ".cache.");
}
-llvm::Value *CGOpenMPRuntime::getAddrOfThreadPrivate(CodeGenFunction &CGF,
- const VarDecl *VD,
- llvm::Value *VDAddr,
- SourceLocation Loc) {
+Address CGOpenMPRuntime::getAddrOfThreadPrivate(CodeGenFunction &CGF,
+ const VarDecl *VD,
+ Address VDAddr,
+ SourceLocation Loc) {
if (CGM.getLangOpts().OpenMPUseTLS &&
CGM.getContext().getTargetInfo().isTLSSupported())
return VDAddr;
- auto VarTy = VDAddr->getType()->getPointerElementType();
+ auto VarTy = VDAddr.getElementType();
llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
- CGF.Builder.CreatePointerCast(VDAddr, CGM.Int8PtrTy),
+ CGF.Builder.CreatePointerCast(VDAddr.getPointer(),
+ CGM.Int8PtrTy),
CGM.getSize(CGM.GetTargetTypeStoreSize(VarTy)),
getOrCreateThreadPrivateCache(VD)};
- return CGF.EmitRuntimeCall(
- createRuntimeFunction(OMPRTL__kmpc_threadprivate_cached), Args);
+ return Address(CGF.EmitRuntimeCall(
+ createRuntimeFunction(OMPRTL__kmpc_threadprivate_cached), Args),
+ VDAddr.getAlignment());
}
void CGOpenMPRuntime::emitThreadPrivateVarInit(
- CodeGenFunction &CGF, llvm::Value *VDAddr, llvm::Value *Ctor,
+ CodeGenFunction &CGF, Address VDAddr, llvm::Value *Ctor,
llvm::Value *CopyCtor, llvm::Value *Dtor, SourceLocation Loc) {
// Call kmp_int32 __kmpc_global_thread_num(&loc) to init OpenMP runtime
// library.
@@ -967,14 +988,15 @@ void CGOpenMPRuntime::emitThreadPrivateVarInit(
// Call __kmpc_threadprivate_register(&loc, &var, ctor, cctor/*NULL*/, dtor)
// to register constructor/destructor for variable.
llvm::Value *Args[] = {OMPLoc,
- CGF.Builder.CreatePointerCast(VDAddr, CGM.VoidPtrTy),
+ CGF.Builder.CreatePointerCast(VDAddr.getPointer(),
+ CGM.VoidPtrTy),
Ctor, CopyCtor, Dtor};
CGF.EmitRuntimeCall(
createRuntimeFunction(OMPRTL__kmpc_threadprivate_register), Args);
}
llvm::Function *CGOpenMPRuntime::emitThreadPrivateVarDefinition(
- const VarDecl *VD, llvm::Value *VDAddr, SourceLocation Loc,
+ const VarDecl *VD, Address VDAddr, SourceLocation Loc,
bool PerformInit, CodeGenFunction *CGF) {
if (CGM.getLangOpts().OpenMPUseTLS &&
CGM.getContext().getTargetInfo().isTLSSupported())
@@ -1005,17 +1027,15 @@ llvm::Function *CGOpenMPRuntime::emitThreadPrivateVarDefinition(
CtorCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidPtrTy, Fn, FI,
Args, SourceLocation());
auto ArgVal = CtorCGF.EmitLoadOfScalar(
- CtorCGF.GetAddrOfLocalVar(&Dst),
- /*Volatile=*/false, CGM.PointerAlignInBytes,
+ CtorCGF.GetAddrOfLocalVar(&Dst), /*Volatile=*/false,
CGM.getContext().VoidPtrTy, Dst.getLocation());
- auto Arg = CtorCGF.Builder.CreatePointerCast(
- ArgVal,
- CtorCGF.ConvertTypeForMem(CGM.getContext().getPointerType(ASTTy)));
+ Address Arg = Address(ArgVal, VDAddr.getAlignment());
+ Arg = CtorCGF.Builder.CreateElementBitCast(Arg,
+ CtorCGF.ConvertTypeForMem(ASTTy));
CtorCGF.EmitAnyExprToMem(Init, Arg, Init->getType().getQualifiers(),
/*IsInitializer=*/true);
ArgVal = CtorCGF.EmitLoadOfScalar(
- CtorCGF.GetAddrOfLocalVar(&Dst),
- /*Volatile=*/false, CGM.PointerAlignInBytes,
+ CtorCGF.GetAddrOfLocalVar(&Dst), /*Volatile=*/false,
CGM.getContext().VoidPtrTy, Dst.getLocation());
CtorCGF.Builder.CreateStore(ArgVal, CtorCGF.ReturnValue);
CtorCGF.FinishFunction();
@@ -1040,9 +1060,8 @@ llvm::Function *CGOpenMPRuntime::emitThreadPrivateVarDefinition(
SourceLocation());
auto ArgVal = DtorCGF.EmitLoadOfScalar(
DtorCGF.GetAddrOfLocalVar(&Dst),
- /*Volatile=*/false, CGM.PointerAlignInBytes,
- CGM.getContext().VoidPtrTy, Dst.getLocation());
- DtorCGF.emitDestroy(ArgVal, ASTTy,
+ /*Volatile=*/false, CGM.getContext().VoidPtrTy, Dst.getLocation());
+ DtorCGF.emitDestroy(Address(ArgVal, VDAddr.getAlignment()), ASTTy,
DtorCGF.getDestroyer(ASTTy.isDestructedType()),
DtorCGF.needsEHCleanup(ASTTy.isDestructedType()));
DtorCGF.FinishFunction();
@@ -1149,7 +1168,7 @@ static void emitOMPIfClause(CodeGenFunction &CGF, const Expr *Cond,
void CGOpenMPRuntime::emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
llvm::Value *OutlinedFn,
- llvm::Value *CapturedStruct,
+ Address CapturedStruct,
const Expr *IfCond) {
auto *RTLoc = emitUpdateLocation(CGF, Loc);
auto &&ThenGen =
@@ -1162,7 +1181,7 @@ void CGOpenMPRuntime::emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
1), // Number of arguments after 'microtask' argument
// (there is only one additional argument - 'context')
CGF.Builder.CreateBitCast(OutlinedFn, getKmpc_MicroPointerTy()),
- CGF.EmitCastToVoidPtr(CapturedStruct)};
+ CGF.EmitCastToVoidPtr(CapturedStruct.getPointer())};
auto RTLFn = createRuntimeFunction(OMPRTL__kmpc_fork_call);
CGF.EmitRuntimeCall(RTLFn, Args);
};
@@ -1177,11 +1196,15 @@ void CGOpenMPRuntime::emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
// OutlinedFn(&GTid, &zero, CapturedStruct);
auto ThreadIDAddr = emitThreadIDAddress(CGF, Loc);
- auto Int32Ty = CGF.getContext().getIntTypeForBitwidth(/*DestWidth*/ 32,
- /*Signed*/ true);
- auto ZeroAddr = CGF.CreateMemTemp(Int32Ty, /*Name*/ ".zero.addr");
+ Address ZeroAddr =
+ CGF.CreateTempAlloca(CGF.Int32Ty, CharUnits::fromQuantity(4),
+ /*Name*/ ".zero.addr");
CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0));
- llvm::Value *OutlinedFnArgs[] = {ThreadIDAddr, ZeroAddr, CapturedStruct};
+ llvm::Value *OutlinedFnArgs[] = {
+ ThreadIDAddr.getPointer(),
+ ZeroAddr.getPointer(),
+ CapturedStruct.getPointer()
+ };
CGF.EmitCallOrInvoke(OutlinedFn, OutlinedFnArgs);
// __kmpc_end_serialized_parallel(&Loc, GTid);
@@ -1203,8 +1226,8 @@ void CGOpenMPRuntime::emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
// regular serial code region, get thread ID by calling kmp_int32
// kmpc_global_thread_num(ident_t *loc), stash this thread ID in a temporary and
// return the address of that temp.
-llvm::Value *CGOpenMPRuntime::emitThreadIDAddress(CodeGenFunction &CGF,
- SourceLocation Loc) {
+Address CGOpenMPRuntime::emitThreadIDAddress(CodeGenFunction &CGF,
+ SourceLocation Loc) {
if (auto OMPRegionInfo =
dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo))
if (OMPRegionInfo->getThreadIDVariable())
@@ -1215,7 +1238,7 @@ llvm::Value *CGOpenMPRuntime::emitThreadIDAddress(CodeGenFunction &CGF,
CGF.getContext().getIntTypeForBitwidth(/*DestWidth*/ 32, /*Signed*/ true);
auto ThreadIDTemp = CGF.CreateMemTemp(Int32Ty, /*Name*/ ".threadid_temp.");
CGF.EmitStoreOfScalar(ThreadID,
- CGF.MakeNaturalAlignAddrLValue(ThreadIDTemp, Int32Ty));
+ CGF.MakeAddrLValue(ThreadIDTemp, Int32Ty));
return ThreadIDTemp;
}
@@ -1353,6 +1376,22 @@ void CGOpenMPRuntime::emitTaskgroupRegion(CodeGenFunction &CGF,
}
}
+/// Given an array of pointers to variables, project the address of a
+/// given variable.
+static Address emitAddrOfVarFromArray(CodeGenFunction &CGF,
+ Address Array, unsigned Index,
+ const VarDecl *Var) {
+ // Pull out the pointer to the variable.
+ Address PtrAddr =
+ CGF.Builder.CreateConstArrayGEP(Array, Index, CGF.getPointerSize());
+ llvm::Value *Ptr = CGF.Builder.CreateLoad(PtrAddr);
+
+ Address Addr = Address(Ptr, CGF.getContext().getDeclAlign(Var));
+ Addr = CGF.Builder.CreateElementBitCast(Addr,
+ CGF.ConvertTypeForMem(Var->getType()));
+ return Addr;
+}
+
static llvm::Value *emitCopyprivateCopyFunction(
CodeGenModule &CGM, llvm::Type *ArgsType,
ArrayRef<const Expr *> CopyprivateVars, ArrayRef<const Expr *> DestExprs,
@@ -1377,35 +1416,26 @@ static llvm::Value *emitCopyprivateCopyFunction(
CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args);
// Dest = (void*[n])(LHSArg);
// Src = (void*[n])(RHSArg);
- auto *LHS = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- CGF.Builder.CreateAlignedLoad(CGF.GetAddrOfLocalVar(&LHSArg),
- CGF.PointerAlignInBytes),
- ArgsType);
- auto *RHS = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- CGF.Builder.CreateAlignedLoad(CGF.GetAddrOfLocalVar(&RHSArg),
- CGF.PointerAlignInBytes),
- ArgsType);
+ Address LHS(CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
+ CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(&LHSArg)),
+ ArgsType), CGF.getPointerAlign());
+ Address RHS(CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
+ CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(&RHSArg)),
+ ArgsType), CGF.getPointerAlign());
// *(Type0*)Dst[0] = *(Type0*)Src[0];
// *(Type1*)Dst[1] = *(Type1*)Src[1];
// ...
// *(Typen*)Dst[n] = *(Typen*)Src[n];
for (unsigned I = 0, E = AssignmentOps.size(); I < E; ++I) {
- auto *DestAddr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- CGF.Builder.CreateAlignedLoad(
- CGF.Builder.CreateStructGEP(nullptr, LHS, I),
- CGM.PointerAlignInBytes),
- CGF.ConvertTypeForMem(C.getPointerType(SrcExprs[I]->getType())));
- auto *SrcAddr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- CGF.Builder.CreateAlignedLoad(
- CGF.Builder.CreateStructGEP(nullptr, RHS, I),
- CGM.PointerAlignInBytes),
- CGF.ConvertTypeForMem(C.getPointerType(SrcExprs[I]->getType())));
+ auto DestVar = cast<VarDecl>(cast<DeclRefExpr>(DestExprs[I])->getDecl());
+ Address DestAddr = emitAddrOfVarFromArray(CGF, LHS, I, DestVar);
+
+ auto SrcVar = cast<VarDecl>(cast<DeclRefExpr>(SrcExprs[I])->getDecl());
+ Address SrcAddr = emitAddrOfVarFromArray(CGF, RHS, I, SrcVar);
+
auto *VD = cast<DeclRefExpr>(CopyprivateVars[I])->getDecl();
QualType Type = VD->getType();
- CGF.EmitOMPCopy(CGF, Type, DestAddr, SrcAddr,
- cast<VarDecl>(cast<DeclRefExpr>(DestExprs[I])->getDecl()),
- cast<VarDecl>(cast<DeclRefExpr>(SrcExprs[I])->getDecl()),
- AssignmentOps[I]);
+ CGF.EmitOMPCopy(Type, DestAddr, SrcAddr, DestVar, SrcVar, AssignmentOps[I]);
}
CGF.FinishFunction();
return Fn;
@@ -1431,13 +1461,12 @@ void CGOpenMPRuntime::emitSingleRegion(CodeGenFunction &CGF,
// call __kmpc_copyprivate(ident_t *, gtid, <buf_size>, <copyprivate list>,
// <copy_func>, did_it);
- llvm::AllocaInst *DidIt = nullptr;
+ Address DidIt = Address::invalid();
if (!CopyprivateVars.empty()) {
// int32 did_it = 0;
auto KmpInt32Ty = C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1);
DidIt = CGF.CreateMemTemp(KmpInt32Ty, ".omp.copyprivate.did_it");
- CGF.Builder.CreateAlignedStore(CGF.Builder.getInt32(0), DidIt,
- DidIt->getAlignment());
+ CGF.Builder.CreateStore(CGF.Builder.getInt32(0), DidIt);
}
// Prepare arguments and build a call to __kmpc_single
llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
@@ -1452,29 +1481,28 @@ void CGOpenMPRuntime::emitSingleRegion(CodeGenFunction &CGF,
NormalAndEHCleanup, createRuntimeFunction(OMPRTL__kmpc_end_single),
llvm::makeArrayRef(Args));
SingleOpGen(CGF);
- if (DidIt) {
+ if (DidIt.isValid()) {
// did_it = 1;
- CGF.Builder.CreateAlignedStore(CGF.Builder.getInt32(1), DidIt,
- DidIt->getAlignment());
+ CGF.Builder.CreateStore(CGF.Builder.getInt32(1), DidIt);
}
});
// call __kmpc_copyprivate(ident_t *, gtid, <buf_size>, <copyprivate list>,
// <copy_func>, did_it);
- if (DidIt) {
+ if (DidIt.isValid()) {
llvm::APInt ArraySize(/*unsigned int numBits=*/32, CopyprivateVars.size());
auto CopyprivateArrayTy =
C.getConstantArrayType(C.VoidPtrTy, ArraySize, ArrayType::Normal,
/*IndexTypeQuals=*/0);
// Create a list of all private variables for copyprivate.
- auto *CopyprivateList =
+ Address CopyprivateList =
CGF.CreateMemTemp(CopyprivateArrayTy, ".omp.copyprivate.cpr_list");
for (unsigned I = 0, E = CopyprivateVars.size(); I < E; ++I) {
- auto *Elem = CGF.Builder.CreateStructGEP(
- CopyprivateList->getAllocatedType(), CopyprivateList, I);
- CGF.Builder.CreateAlignedStore(
+ Address Elem = CGF.Builder.CreateConstArrayGEP(
+ CopyprivateList, I, CGF.getPointerSize());
+ CGF.Builder.CreateStore(
CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- CGF.EmitLValue(CopyprivateVars[I]).getAddress(), CGF.VoidPtrTy),
- Elem, CGM.PointerAlignInBytes);
+ CGF.EmitLValue(CopyprivateVars[I]).getPointer(), CGF.VoidPtrTy),
+ Elem);
}
// Build function that copies private values from single region to all other
// threads in the corresponding parallel region.
@@ -1483,15 +1511,15 @@ void CGOpenMPRuntime::emitSingleRegion(CodeGenFunction &CGF,
CopyprivateVars, SrcExprs, DstExprs, AssignmentOps);
auto *BufSize = llvm::ConstantInt::get(
CGM.SizeTy, C.getTypeSizeInChars(CopyprivateArrayTy).getQuantity());
- auto *CL = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(CopyprivateList,
- CGF.VoidPtrTy);
- auto *DidItVal =
- CGF.Builder.CreateAlignedLoad(DidIt, CGF.PointerAlignInBytes);
+ Address CL =
+ CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(CopyprivateList,
+ CGF.VoidPtrTy);
+ auto *DidItVal = CGF.Builder.CreateLoad(DidIt);
llvm::Value *Args[] = {
emitUpdateLocation(CGF, Loc), // ident_t *<loc>
getThreadID(CGF, Loc), // i32 <gtid>
BufSize, // size_t <buf_size>
- CL, // void *<copyprivate list>
+ CL.getPointer(), // void *<copyprivate list>
CpyFn, // void (*) (void *, void *) <copy_func>
DidItVal // i32 did_it
};
@@ -1625,61 +1653,77 @@ bool CGOpenMPRuntime::isDynamic(OpenMPScheduleClauseKind ScheduleKind) const {
return Schedule != OMP_sch_static;
}
-void CGOpenMPRuntime::emitForInit(CodeGenFunction &CGF, SourceLocation Loc,
- OpenMPScheduleClauseKind ScheduleKind,
- unsigned IVSize, bool IVSigned, bool Ordered,
- llvm::Value *IL, llvm::Value *LB,
- llvm::Value *UB, llvm::Value *ST,
- llvm::Value *Chunk) {
+void CGOpenMPRuntime::emitForDispatchInit(CodeGenFunction &CGF,
+ SourceLocation Loc,
+ OpenMPScheduleClauseKind ScheduleKind,
+ unsigned IVSize, bool IVSigned,
+ bool Ordered, llvm::Value *UB,
+ llvm::Value *Chunk) {
OpenMPSchedType Schedule =
getRuntimeSchedule(ScheduleKind, Chunk != nullptr, Ordered);
- if (Ordered ||
- (Schedule != OMP_sch_static && Schedule != OMP_sch_static_chunked &&
- Schedule != OMP_ord_static && Schedule != OMP_ord_static_chunked)) {
- // Call __kmpc_dispatch_init(
- // ident_t *loc, kmp_int32 tid, kmp_int32 schedule,
- // kmp_int[32|64] lower, kmp_int[32|64] upper,
- // kmp_int[32|64] stride, kmp_int[32|64] chunk);
+ assert(Ordered ||
+ (Schedule != OMP_sch_static && Schedule != OMP_sch_static_chunked &&
+ Schedule != OMP_ord_static && Schedule != OMP_ord_static_chunked));
+ // Call __kmpc_dispatch_init(
+ // ident_t *loc, kmp_int32 tid, kmp_int32 schedule,
+ // kmp_int[32|64] lower, kmp_int[32|64] upper,
+ // kmp_int[32|64] stride, kmp_int[32|64] chunk);
+
+ // If the Chunk was not specified in the clause - use default value 1.
+ if (Chunk == nullptr)
+ Chunk = CGF.Builder.getIntN(IVSize, 1);
+ llvm::Value *Args[] = {
+ emitUpdateLocation(CGF, Loc, OMP_IDENT_KMPC),
+ getThreadID(CGF, Loc),
+ CGF.Builder.getInt32(Schedule), // Schedule type
+ CGF.Builder.getIntN(IVSize, 0), // Lower
+ UB, // Upper
+ CGF.Builder.getIntN(IVSize, 1), // Stride
+ Chunk // Chunk
+ };
+ CGF.EmitRuntimeCall(createDispatchInitFunction(IVSize, IVSigned), Args);
+}
+void CGOpenMPRuntime::emitForStaticInit(CodeGenFunction &CGF,
+ SourceLocation Loc,
+ OpenMPScheduleClauseKind ScheduleKind,
+ unsigned IVSize, bool IVSigned,
+ bool Ordered, Address IL, Address LB,
+ Address UB, Address ST,
+ llvm::Value *Chunk) {
+ OpenMPSchedType Schedule =
+ getRuntimeSchedule(ScheduleKind, Chunk != nullptr, Ordered);
+ assert(!Ordered);
+ assert(Schedule == OMP_sch_static || Schedule == OMP_sch_static_chunked ||
+ Schedule == OMP_ord_static || Schedule == OMP_ord_static_chunked);
+
+ // Call __kmpc_for_static_init(
+ // ident_t *loc, kmp_int32 tid, kmp_int32 schedtype,
+ // kmp_int32 *p_lastiter, kmp_int[32|64] *p_lower,
+ // kmp_int[32|64] *p_upper, kmp_int[32|64] *p_stride,
+ // kmp_int[32|64] incr, kmp_int[32|64] chunk);
+ if (Chunk == nullptr) {
+ assert((Schedule == OMP_sch_static || Schedule == OMP_ord_static) &&
+ "expected static non-chunked schedule");
// If the Chunk was not specified in the clause - use default value 1.
- if (Chunk == nullptr)
Chunk = CGF.Builder.getIntN(IVSize, 1);
- llvm::Value *Args[] = { emitUpdateLocation(CGF, Loc, OMP_IDENT_KMPC),
- getThreadID(CGF, Loc),
- CGF.Builder.getInt32(Schedule), // Schedule type
- CGF.Builder.getIntN(IVSize, 0), // Lower
- UB, // Upper
- CGF.Builder.getIntN(IVSize, 1), // Stride
- Chunk // Chunk
- };
- CGF.EmitRuntimeCall(createDispatchInitFunction(IVSize, IVSigned), Args);
} else {
- // Call __kmpc_for_static_init(
- // ident_t *loc, kmp_int32 tid, kmp_int32 schedtype,
- // kmp_int32 *p_lastiter, kmp_int[32|64] *p_lower,
- // kmp_int[32|64] *p_upper, kmp_int[32|64] *p_stride,
- // kmp_int[32|64] incr, kmp_int[32|64] chunk);
- if (Chunk == nullptr) {
- assert((Schedule == OMP_sch_static || Schedule == OMP_ord_static) &&
- "expected static non-chunked schedule");
- // If the Chunk was not specified in the clause - use default value 1.
- Chunk = CGF.Builder.getIntN(IVSize, 1);
- } else
- assert((Schedule == OMP_sch_static_chunked ||
- Schedule == OMP_ord_static_chunked) &&
- "expected static chunked schedule");
- llvm::Value *Args[] = { emitUpdateLocation(CGF, Loc, OMP_IDENT_KMPC),
- getThreadID(CGF, Loc),
- CGF.Builder.getInt32(Schedule), // Schedule type
- IL, // &isLastIter
- LB, // &LB
- UB, // &UB
- ST, // &Stride
- CGF.Builder.getIntN(IVSize, 1), // Incr
- Chunk // Chunk
- };
- CGF.EmitRuntimeCall(createForStaticInitFunction(IVSize, IVSigned), Args);
+ assert((Schedule == OMP_sch_static_chunked ||
+ Schedule == OMP_ord_static_chunked) &&
+ "expected static chunked schedule");
}
+ llvm::Value *Args[] = {
+ emitUpdateLocation(CGF, Loc, OMP_IDENT_KMPC),
+ getThreadID(CGF, Loc),
+ CGF.Builder.getInt32(Schedule), // Schedule type
+ IL.getPointer(), // &isLastIter
+ LB.getPointer(), // &LB
+ UB.getPointer(), // &UB
+ ST.getPointer(), // &Stride
+ CGF.Builder.getIntN(IVSize, 1), // Incr
+ Chunk // Chunk
+ };
+ CGF.EmitRuntimeCall(createForStaticInitFunction(IVSize, IVSigned), Args);
}
void CGOpenMPRuntime::emitForStaticFinish(CodeGenFunction &CGF,
@@ -1703,19 +1747,19 @@ void CGOpenMPRuntime::emitForOrderedIterationEnd(CodeGenFunction &CGF,
llvm::Value *CGOpenMPRuntime::emitForNext(CodeGenFunction &CGF,
SourceLocation Loc, unsigned IVSize,
- bool IVSigned, llvm::Value *IL,
- llvm::Value *LB, llvm::Value *UB,
- llvm::Value *ST) {
+ bool IVSigned, Address IL,
+ Address LB, Address UB,
+ Address ST) {
// Call __kmpc_dispatch_next(
// ident_t *loc, kmp_int32 tid, kmp_int32 *p_lastiter,
// kmp_int[32|64] *p_lower, kmp_int[32|64] *p_upper,
// kmp_int[32|64] *p_stride);
llvm::Value *Args[] = {
emitUpdateLocation(CGF, Loc, OMP_IDENT_KMPC), getThreadID(CGF, Loc),
- IL, // &isLastIter
- LB, // &Lower
- UB, // &Upper
- ST // &Stride
+ IL.getPointer(), // &isLastIter
+ LB.getPointer(), // &Lower
+ UB.getPointer(), // &Upper
+ ST.getPointer() // &Stride
};
llvm::Value *Call =
CGF.EmitRuntimeCall(createDispatchNextFunction(IVSize, IVSigned), Args);
@@ -1921,10 +1965,9 @@ emitProxyTaskFunction(CodeGenModule &CGM, SourceLocation Loc,
// TaskFunction(gtid, tt->task_data.part_id, &tt->privates, task_privates_map,
// tt->task_data.shareds);
auto *GtidParam = CGF.EmitLoadOfScalar(
- CGF.GetAddrOfLocalVar(&GtidArg), /*Volatile=*/false,
- C.getTypeAlignInChars(KmpInt32Ty).getQuantity(), KmpInt32Ty, Loc);
- auto *TaskTypeArgAddr = CGF.Builder.CreateAlignedLoad(
- CGF.GetAddrOfLocalVar(&TaskTypeArg), CGM.PointerAlignInBytes);
+ CGF.GetAddrOfLocalVar(&GtidArg), /*Volatile=*/false, KmpInt32Ty, Loc);
+ auto *TaskTypeArgAddr = CGF.Builder.CreateLoad(
+ CGF.GetAddrOfLocalVar(&TaskTypeArg));
LValue TDBase =
CGF.MakeNaturalAlignAddrLValue(TaskTypeArgAddr, KmpTaskTWithPrivatesQTy);
auto *KmpTaskTWithPrivatesQTyRD =
@@ -1947,7 +1990,7 @@ emitProxyTaskFunction(CodeGenModule &CGM, SourceLocation Loc,
if (PrivatesFI != KmpTaskTWithPrivatesQTyRD->field_end()) {
auto PrivatesLVal = CGF.EmitLValueForField(TDBase, *PrivatesFI);
PrivatesParam = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- PrivatesLVal.getAddress(), CGF.VoidPtrTy);
+ PrivatesLVal.getPointer(), CGF.VoidPtrTy);
} else {
PrivatesParam = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
}
@@ -1957,7 +2000,7 @@ emitProxyTaskFunction(CodeGenModule &CGM, SourceLocation Loc,
CGF.EmitCallOrInvoke(TaskFunction, CallArgs);
CGF.EmitStoreThroughLValue(
RValue::get(CGF.Builder.getInt32(/*C=*/0)),
- CGF.MakeNaturalAlignAddrLValue(CGF.ReturnValue, KmpInt32Ty));
+ CGF.MakeAddrLValue(CGF.ReturnValue, KmpInt32Ty));
CGF.FinishFunction();
return TaskEntry;
}
@@ -1988,8 +2031,8 @@ static llvm::Value *emitDestructorsFunction(CodeGenModule &CGM,
CGF.StartFunction(GlobalDecl(), KmpInt32Ty, DestructorFn, DestructorFnInfo,
Args);
- auto *TaskTypeArgAddr = CGF.Builder.CreateAlignedLoad(
- CGF.GetAddrOfLocalVar(&TaskTypeArg), CGM.PointerAlignInBytes);
+ auto *TaskTypeArgAddr = CGF.Builder.CreateLoad(
+ CGF.GetAddrOfLocalVar(&TaskTypeArg));
LValue Base =
CGF.MakeNaturalAlignAddrLValue(TaskTypeArgAddr, KmpTaskTWithPrivatesQTy);
auto *KmpTaskTWithPrivatesQTyRD =
@@ -2069,8 +2112,8 @@ emitTaskPrivateMappingFunction(CodeGenModule &CGM, SourceLocation Loc,
TaskPrivatesMapFnInfo, Args);
// *privi = &.privates.privi;
- auto *TaskPrivatesArgAddr = CGF.Builder.CreateAlignedLoad(
- CGF.GetAddrOfLocalVar(&TaskPrivatesArg), CGM.PointerAlignInBytes);
+ auto *TaskPrivatesArgAddr = CGF.Builder.CreateLoad(
+ CGF.GetAddrOfLocalVar(&TaskPrivatesArg));
LValue Base =
CGF.MakeNaturalAlignAddrLValue(TaskPrivatesArgAddr, PrivatesQTy);
auto *PrivatesQTyRD = cast<RecordDecl>(PrivatesQTy->getAsTagDecl());
@@ -2078,11 +2121,10 @@ emitTaskPrivateMappingFunction(CodeGenModule &CGM, SourceLocation Loc,
for (auto *Field : PrivatesQTyRD->fields()) {
auto FieldLVal = CGF.EmitLValueForField(Base, Field);
auto *VD = Args[PrivateVarsPos[Privates[Counter].second.Original]];
- auto RefLVal = CGF.MakeNaturalAlignAddrLValue(CGF.GetAddrOfLocalVar(VD),
- VD->getType());
+ auto RefLVal = CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(VD), VD->getType());
auto RefLoadRVal = CGF.EmitLoadOfLValue(RefLVal, Loc);
CGF.EmitStoreOfScalar(
- FieldLVal.getAddress(),
+ FieldLVal.getPointer(),
CGF.MakeNaturalAlignAddrLValue(RefLoadRVal.getScalarVal(),
RefLVal.getType()->getPointeeType()));
++Counter;
@@ -2120,7 +2162,7 @@ static int array_pod_sort_comparator(const PrivateDataTy *P1,
void CGOpenMPRuntime::emitTaskCall(
CodeGenFunction &CGF, SourceLocation Loc, const OMPExecutableDirective &D,
bool Tied, llvm::PointerIntPair<llvm::Value *, 1, bool> Final,
- llvm::Value *TaskFunction, QualType SharedsTy, llvm::Value *Shareds,
+ llvm::Value *TaskFunction, QualType SharedsTy, Address Shareds,
const Expr *IfCond, ArrayRef<const Expr *> PrivateVars,
ArrayRef<const Expr *> PrivateCopies,
ArrayRef<const Expr *> FirstprivateVars,
@@ -2227,12 +2269,12 @@ void CGOpenMPRuntime::emitTaskCall(
CGF.EmitLValueForField(Base, *KmpTaskTWithPrivatesQTyRD->field_begin());
// Fill the data in the resulting kmp_task_t record.
// Copy shareds if there are any.
- llvm::Value *KmpTaskSharedsPtr = nullptr;
+ Address KmpTaskSharedsPtr = Address::invalid();
if (!SharedsTy->getAsStructureType()->getDecl()->field_empty()) {
- KmpTaskSharedsPtr = CGF.EmitLoadOfScalar(
+ KmpTaskSharedsPtr = Address(CGF.EmitLoadOfScalar(
CGF.EmitLValueForField(
TDBase, *std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTShareds)),
- Loc);
+ Loc), CGF.getNaturalTypeAlignment(SharedsTy));
CGF.EmitAggregateCopy(KmpTaskSharedsPtr, Shareds, SharedsTy);
}
// Emit initial values for private copies (if any).
@@ -2243,7 +2285,7 @@ void CGOpenMPRuntime::emitTaskCall(
FI = cast<RecordDecl>(FI->getType()->getAsTagDecl())->field_begin();
LValue SharedsBase;
if (!FirstprivateVars.empty()) {
- SharedsBase = CGF.MakeNaturalAlignAddrLValue(
+ SharedsBase = CGF.MakeAddrLValue(
CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
KmpTaskSharedsPtr, CGF.ConvertTypeForMem(SharedsPtrTy)),
SharedsTy);
@@ -2274,10 +2316,10 @@ void CGOpenMPRuntime::emitTaskCall(
CGF.EmitOMPAggregateAssign(
PrivateLValue.getAddress(), SharedRefLValue.getAddress(),
Type, [&CGF, Elem, Init, &CapturesInfo](
- llvm::Value *DestElement, llvm::Value *SrcElement) {
+ Address DestElement, Address SrcElement) {
// Clean up any temporaries needed by the initialization.
CodeGenFunction::OMPPrivateScope InitScope(CGF);
- InitScope.addPrivate(Elem, [SrcElement]() -> llvm::Value *{
+ InitScope.addPrivate(Elem, [SrcElement]() -> Address {
return SrcElement;
});
(void)InitScope.Privatize();
@@ -2291,7 +2333,7 @@ void CGOpenMPRuntime::emitTaskCall(
}
} else {
CodeGenFunction::OMPPrivateScope InitScope(CGF);
- InitScope.addPrivate(Elem, [SharedRefLValue]() -> llvm::Value *{
+ InitScope.addPrivate(Elem, [SharedRefLValue]() -> Address {
return SharedRefLValue.getAddress();
});
(void)InitScope.Privatize();
@@ -2321,9 +2363,9 @@ void CGOpenMPRuntime::emitTaskCall(
Destructor);
// Process list of dependences.
- llvm::Value *DependInfo = nullptr;
- unsigned DependencesNumber = Dependences.size();
- if (!Dependences.empty()) {
+ Address DependenciesArray = Address::invalid();
+ unsigned NumDependencies = Dependences.size();
+ if (NumDependencies) {
// Dependence kind for RTL.
enum RTLDependenceKindTy { DepIn = 1, DepOut = 2, DepInOut = 3 };
enum RTLDependInfoFieldsTy { BaseAddr, Len, Flags };
@@ -2342,37 +2384,39 @@ void CGOpenMPRuntime::emitTaskCall(
} else {
KmpDependInfoRD = cast<RecordDecl>(KmpDependInfoTy->getAsTagDecl());
}
+ CharUnits DependencySize = C.getTypeSizeInChars(KmpDependInfoTy);
// Define type kmp_depend_info[<Dependences.size()>];
QualType KmpDependInfoArrayTy = C.getConstantArrayType(
- KmpDependInfoTy, llvm::APInt(/*numBits=*/64, Dependences.size()),
+ KmpDependInfoTy, llvm::APInt(/*numBits=*/64, NumDependencies),
ArrayType::Normal, /*IndexTypeQuals=*/0);
// kmp_depend_info[<Dependences.size()>] deps;
- DependInfo = CGF.CreateMemTemp(KmpDependInfoArrayTy);
- for (unsigned i = 0; i < DependencesNumber; ++i) {
- auto *E = Dependences[i].second;
- LValue Addr = CGF.EmitLValue(E);
+ DependenciesArray = CGF.CreateMemTemp(KmpDependInfoArrayTy);
+ for (unsigned i = 0; i < NumDependencies; ++i) {
+ const Expr *E = Dependences[i].second;
+ auto Addr = CGF.EmitLValue(E);
llvm::Value *Size;
QualType Ty = E->getType();
- auto *DestAddr = Addr.getAddress();
if (auto *ASE = dyn_cast<OMPArraySectionExpr>(E->IgnoreParenImpCasts())) {
LValue UpAddrLVal =
CGF.EmitOMPArraySectionExpr(ASE, /*LowerBound=*/false);
llvm::Value *UpAddr =
- CGF.Builder.CreateConstGEP1_32(UpAddrLVal.getAddress(), /*Idx0=*/1);
+ CGF.Builder.CreateConstGEP1_32(UpAddrLVal.getPointer(), /*Idx0=*/1);
llvm::Value *LowIntPtr =
- CGF.Builder.CreatePtrToInt(DestAddr, CGM.SizeTy);
+ CGF.Builder.CreatePtrToInt(Addr.getPointer(), CGM.SizeTy);
llvm::Value *UpIntPtr = CGF.Builder.CreatePtrToInt(UpAddr, CGM.SizeTy);
Size = CGF.Builder.CreateNUWSub(UpIntPtr, LowIntPtr);
- } else
+ } else {
Size = getTypeSize(CGF, Ty);
- auto Base = CGF.MakeNaturalAlignAddrLValue(
- CGF.Builder.CreateStructGEP(/*Ty=*/nullptr, DependInfo, i),
+ }
+ auto Base = CGF.MakeAddrLValue(
+ CGF.Builder.CreateConstArrayGEP(DependenciesArray, i, DependencySize),
KmpDependInfoTy);
// deps[i].base_addr = &<Dependences[i].second>;
auto BaseAddrLVal = CGF.EmitLValueForField(
Base, *std::next(KmpDependInfoRD->field_begin(), BaseAddr));
- CGF.EmitStoreOfScalar(CGF.Builder.CreatePtrToInt(DestAddr, CGF.IntPtrTy),
- BaseAddrLVal);
+ CGF.EmitStoreOfScalar(
+ CGF.Builder.CreatePtrToInt(Addr.getPointer(), CGF.IntPtrTy),
+ BaseAddrLVal);
// deps[i].len = sizeof(<Dependences[i].second>);
auto LenLVal = CGF.EmitLValueForField(
Base, *std::next(KmpDependInfoRD->field_begin(), Len));
@@ -2397,8 +2441,8 @@ void CGOpenMPRuntime::emitTaskCall(
CGF.EmitStoreOfScalar(llvm::ConstantInt::get(LLVMFlagsTy, DepKind),
FlagsLVal);
}
- DependInfo = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- CGF.Builder.CreateStructGEP(/*Ty=*/nullptr, DependInfo, 0),
+ DependenciesArray = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
+ CGF.Builder.CreateStructGEP(DependenciesArray, 0, CharUnits::Zero()),
CGF.VoidPtrTy);
}
@@ -2412,40 +2456,48 @@ void CGOpenMPRuntime::emitTaskCall(
// list is not empty
auto *ThreadID = getThreadID(CGF, Loc);
auto *UpLoc = emitUpdateLocation(CGF, Loc);
- llvm::Value *TaskArgs[] = {UpLoc, ThreadID, NewTask};
- llvm::Value *DepTaskArgs[] = {
- UpLoc,
- ThreadID,
- NewTask,
- DependInfo ? CGF.Builder.getInt32(DependencesNumber) : nullptr,
- DependInfo,
- DependInfo ? CGF.Builder.getInt32(0) : nullptr,
- DependInfo ? llvm::ConstantPointerNull::get(CGF.VoidPtrTy) : nullptr};
- auto &&ThenCodeGen = [this, DependInfo, &TaskArgs,
- &DepTaskArgs](CodeGenFunction &CGF) {
- // TODO: add check for untied tasks.
- CGF.EmitRuntimeCall(
- createRuntimeFunction(DependInfo ? OMPRTL__kmpc_omp_task_with_deps
- : OMPRTL__kmpc_omp_task),
- DependInfo ? makeArrayRef(DepTaskArgs) : makeArrayRef(TaskArgs));
+ llvm::Value *TaskArgs[] = { UpLoc, ThreadID, NewTask };
+ llvm::Value *DepTaskArgs[7];
+ if (NumDependencies) {
+ DepTaskArgs[0] = UpLoc;
+ DepTaskArgs[1] = ThreadID;
+ DepTaskArgs[2] = NewTask;
+ DepTaskArgs[3] = CGF.Builder.getInt32(NumDependencies);
+ DepTaskArgs[4] = DependenciesArray.getPointer();
+ DepTaskArgs[5] = CGF.Builder.getInt32(0);
+ DepTaskArgs[6] = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
+ }
+ auto &&ThenCodeGen = [this, NumDependencies,
+ &TaskArgs, &DepTaskArgs](CodeGenFunction &CGF) {
+ // TODO: add check for untied tasks.
+ if (NumDependencies) {
+ CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_omp_task_with_deps),
+ DepTaskArgs);
+ } else {
+ CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_omp_task),
+ TaskArgs);
+ }
};
typedef CallEndCleanup<std::extent<decltype(TaskArgs)>::value>
IfCallEndCleanup;
- llvm::Value *DepWaitTaskArgs[] = {
- UpLoc,
- ThreadID,
- DependInfo ? CGF.Builder.getInt32(DependencesNumber) : nullptr,
- DependInfo,
- DependInfo ? CGF.Builder.getInt32(0) : nullptr,
- DependInfo ? llvm::ConstantPointerNull::get(CGF.VoidPtrTy) : nullptr};
+
+ llvm::Value *DepWaitTaskArgs[6];
+ if (NumDependencies) {
+ DepWaitTaskArgs[0] = UpLoc;
+ DepWaitTaskArgs[1] = ThreadID;
+ DepWaitTaskArgs[2] = CGF.Builder.getInt32(NumDependencies);
+ DepWaitTaskArgs[3] = DependenciesArray.getPointer();
+ DepWaitTaskArgs[4] = CGF.Builder.getInt32(0);
+ DepWaitTaskArgs[5] = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
+ }
auto &&ElseCodeGen = [this, &TaskArgs, ThreadID, NewTaskNewTaskTTy, TaskEntry,
- DependInfo, &DepWaitTaskArgs](CodeGenFunction &CGF) {
+ NumDependencies, &DepWaitTaskArgs](CodeGenFunction &CGF) {
CodeGenFunction::RunCleanupsScope LocalScope(CGF);
// Build void __kmpc_omp_wait_deps(ident_t *, kmp_int32 gtid,
// kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32
// ndeps_noalias, kmp_depend_info_t *noalias_dep_list); if dependence info
// is specified.
- if (DependInfo)
+ if (NumDependencies)
CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_omp_wait_deps),
DepWaitTaskArgs);
// Build void __kmpc_omp_task_begin_if0(ident_t *, kmp_int32 gtid,
@@ -2463,6 +2515,7 @@ void CGOpenMPRuntime::emitTaskCall(
llvm::Value *OutlinedFnArgs[] = {ThreadID, NewTaskNewTaskTTy};
CGF.EmitCallOrInvoke(TaskEntry, OutlinedFnArgs);
};
+
if (IfCond) {
emitOMPIfClause(CGF, IfCond, ThenCodeGen, ElseCodeGen);
} else {
@@ -2498,38 +2551,26 @@ static llvm::Value *emitReductionFunction(CodeGenModule &CGM,
// Dst = (void*[n])(LHSArg);
// Src = (void*[n])(RHSArg);
- auto *LHS = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- CGF.Builder.CreateAlignedLoad(CGF.GetAddrOfLocalVar(&LHSArg),
- CGF.PointerAlignInBytes),
- ArgsType);
- auto *RHS = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- CGF.Builder.CreateAlignedLoad(CGF.GetAddrOfLocalVar(&RHSArg),
- CGF.PointerAlignInBytes),
- ArgsType);
+ Address LHS(CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
+ CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(&LHSArg)),
+ ArgsType), CGF.getPointerAlign());
+ Address RHS(CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
+ CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(&RHSArg)),
+ ArgsType), CGF.getPointerAlign());
// ...
// *(Type<i>*)lhs[i] = RedOp<i>(*(Type<i>*)lhs[i], *(Type<i>*)rhs[i]);
// ...
CodeGenFunction::OMPPrivateScope Scope(CGF);
for (unsigned I = 0, E = ReductionOps.size(); I < E; ++I) {
- Scope.addPrivate(
- cast<VarDecl>(cast<DeclRefExpr>(RHSExprs[I])->getDecl()),
- [&]() -> llvm::Value *{
- return CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- CGF.Builder.CreateAlignedLoad(
- CGF.Builder.CreateStructGEP(/*Ty=*/nullptr, RHS, I),
- CGM.PointerAlignInBytes),
- CGF.ConvertTypeForMem(C.getPointerType(RHSExprs[I]->getType())));
- });
- Scope.addPrivate(
- cast<VarDecl>(cast<DeclRefExpr>(LHSExprs[I])->getDecl()),
- [&]() -> llvm::Value *{
- return CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- CGF.Builder.CreateAlignedLoad(
- CGF.Builder.CreateStructGEP(/*Ty=*/nullptr, LHS, I),
- CGM.PointerAlignInBytes),
- CGF.ConvertTypeForMem(C.getPointerType(LHSExprs[I]->getType())));
- });
+ auto RHSVar = cast<VarDecl>(cast<DeclRefExpr>(RHSExprs[I])->getDecl());
+ Scope.addPrivate(RHSVar, [&]() -> Address {
+ return emitAddrOfVarFromArray(CGF, RHS, I, RHSVar);
+ });
+ auto LHSVar = cast<VarDecl>(cast<DeclRefExpr>(LHSExprs[I])->getDecl());
+ Scope.addPrivate(LHSVar, [&]() -> Address {
+ return emitAddrOfVarFromArray(CGF, LHS, I, LHSVar);
+ });
}
Scope.Privatize();
for (auto *E : ReductionOps) {
@@ -2596,14 +2637,15 @@ void CGOpenMPRuntime::emitReduction(CodeGenFunction &CGF, SourceLocation Loc,
QualType ReductionArrayTy =
C.getConstantArrayType(C.VoidPtrTy, ArraySize, ArrayType::Normal,
/*IndexTypeQuals=*/0);
- auto *ReductionList =
+ Address ReductionList =
CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.red_list");
for (unsigned I = 0, E = RHSExprs.size(); I < E; ++I) {
- auto *Elem = CGF.Builder.CreateStructGEP(/*Ty=*/nullptr, ReductionList, I);
- CGF.Builder.CreateAlignedStore(
+ Address Elem =
+ CGF.Builder.CreateConstArrayGEP(ReductionList, I, CGF.getPointerSize());
+ CGF.Builder.CreateStore(
CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- CGF.EmitLValue(RHSExprs[I]).getAddress(), CGF.VoidPtrTy),
- Elem, CGM.PointerAlignInBytes);
+ CGF.EmitLValue(RHSExprs[I]).getPointer(), CGF.VoidPtrTy),
+ Elem);
}
// 2. Emit reduce_func().
@@ -2622,8 +2664,9 @@ void CGOpenMPRuntime::emitReduction(CodeGenFunction &CGF, SourceLocation Loc,
auto *ThreadId = getThreadID(CGF, Loc);
auto *ReductionArrayTySize = llvm::ConstantInt::get(
CGM.SizeTy, C.getTypeSizeInChars(ReductionArrayTy).getQuantity());
- auto *RL = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(ReductionList,
- CGF.VoidPtrTy);
+ auto *RL =
+ CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(ReductionList.getPointer(),
+ CGF.VoidPtrTy);
llvm::Value *Args[] = {
IdentTLoc, // ident_t *<loc>
ThreadId, // i32 <gtid>
@@ -2736,11 +2779,11 @@ void CGOpenMPRuntime::emitReduction(CodeGenFunction &CGF, SourceLocation Loc,
[&CGF, UpExpr, VD](RValue XRValue) {
CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
PrivateScope.addPrivate(
- VD, [&CGF, VD, XRValue]() -> llvm::Value *{
- auto *LHSTemp = CGF.CreateMemTemp(VD->getType());
+ VD, [&CGF, VD, XRValue]() -> Address {
+ Address LHSTemp = CGF.CreateMemTemp(VD->getType());
CGF.EmitStoreThroughLValue(
XRValue,
- CGF.MakeNaturalAlignAddrLValue(LHSTemp, VD->getType()));
+ CGF.MakeAddrLValue(LHSTemp, VD->getType()));
return LHSTemp;
});
(void)PrivateScope.Privatize();
diff --git a/clang/lib/CodeGen/CGOpenMPRuntime.h b/clang/lib/CodeGen/CGOpenMPRuntime.h
index 44bc8a139b1..ed26e9505b5 100644
--- a/clang/lib/CodeGen/CGOpenMPRuntime.h
+++ b/clang/lib/CodeGen/CGOpenMPRuntime.h
@@ -39,7 +39,7 @@ class OMPExecutableDirective;
class VarDecl;
namespace CodeGen {
-
+class Address;
class CodeGenFunction;
class CodeGenModule;
@@ -184,7 +184,9 @@ private:
/// \brief Map of flags and corresponding default locations.
typedef llvm::DenseMap<unsigned, llvm::Value *> OpenMPDefaultLocMapTy;
OpenMPDefaultLocMapTy OpenMPDefaultLocMap;
- llvm::Value *getOrCreateDefaultLocation(OpenMPLocationFlags Flags);
+ Address getOrCreateDefaultLocation(OpenMPLocationFlags Flags);
+
+public:
/// \brief Describes ident structure that describes a source location.
/// All descriptions are taken from
/// http://llvm.org/svn/llvm-project/openmp/trunk/runtime/src/kmp.h
@@ -225,6 +227,7 @@ private:
/// and a pair of line numbers that delimit the construct.
IdentField_PSource
};
+private:
llvm::StructType *IdentTy;
/// \brief Map for SourceLocation and OpenMP runtime library debug locations.
typedef llvm::DenseMap<unsigned, llvm::Value *> OpenMPDebugLocMapTy;
@@ -321,8 +324,7 @@ private:
/// \brief Emits address of the word in a memory where current thread id is
/// stored.
- virtual llvm::Value *emitThreadIDAddress(CodeGenFunction &CGF,
- SourceLocation Loc);
+ virtual Address emitThreadIDAddress(CodeGenFunction &CGF, SourceLocation Loc);
/// \brief Gets thread id value for the current thread.
///
@@ -346,7 +348,7 @@ private:
/// \param CopyCtor Pointer to a global copy function for \a VD.
/// \param Dtor Pointer to a global destructor function for \a VD.
/// \param Loc Location of threadprivate declaration.
- void emitThreadPrivateVarInit(CodeGenFunction &CGF, llvm::Value *VDAddr,
+ void emitThreadPrivateVarInit(CodeGenFunction &CGF, Address VDAddr,
llvm::Value *Ctor, llvm::Value *CopyCtor,
llvm::Value *Dtor, SourceLocation Loc);
@@ -403,8 +405,7 @@ public:
///
virtual void emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
llvm::Value *OutlinedFn,
- llvm::Value *CapturedStruct,
- const Expr *IfCond);
+ Address CapturedStruct, const Expr *IfCond);
/// \brief Emits a critical region.
/// \param CriticalName Name of the critical region.
@@ -497,11 +498,17 @@ public:
/// \param Chunk Value of the chunk for the static_chunked scheduled loop.
/// For the default (nullptr) value, the chunk 1 will be used.
///
- virtual void emitForInit(CodeGenFunction &CGF, SourceLocation Loc,
- OpenMPScheduleClauseKind SchedKind, unsigned IVSize,
- bool IVSigned, bool Ordered, llvm::Value *IL,
- llvm::Value *LB, llvm::Value *UB, llvm::Value *ST,
- llvm::Value *Chunk = nullptr);
+ virtual void emitForDispatchInit(CodeGenFunction &CGF, SourceLocation Loc,
+ OpenMPScheduleClauseKind SchedKind,
+ unsigned IVSize, bool IVSigned,
+ bool Ordered, llvm::Value *UB,
+ llvm::Value *Chunk = nullptr);
+ virtual void emitForStaticInit(CodeGenFunction &CGF, SourceLocation Loc,
+ OpenMPScheduleClauseKind SchedKind,
+ unsigned IVSize, bool IVSigned, bool Ordered,
+ Address IL, Address LB,
+ Address UB, Address ST,
+ llvm::Value *Chunk = nullptr);
/// \brief Call the appropriate runtime routine to notify that we finished
/// iteration of the ordered loop with the dynamic scheduling.
@@ -539,8 +546,8 @@ public:
/// returned.
virtual llvm::Value *emitForNext(CodeGenFunction &CGF, SourceLocation Loc,
unsigned IVSize, bool IVSigned,
- llvm::Value *IL, llvm::Value *LB,
- llvm::Value *UB, llvm::Value *ST);
+ Address IL, Address LB,
+ Address UB, Address ST);
/// \brief Emits call to void __kmpc_push_num_threads(ident_t *loc, kmp_int32
/// global_tid, kmp_int32 num_threads) to generate code for 'num_threads'
@@ -562,10 +569,10 @@ public:
/// \param VDAddr Address of the global variable \a VD.
/// \param Loc Location of the reference to threadprivate var.
/// \return Address of the threadprivate variable for the current thread.
- virtual llvm::Value *getAddrOfThreadPrivate(CodeGenFunction &CGF,
- const VarDecl *VD,
- llvm::Value *VDAddr,
- SourceLocation Loc);
+ virtual Address getAddrOfThreadPrivate(CodeGenFunction &CGF,
+ const VarDecl *VD,
+ Address VDAddr,
+ SourceLocation Loc);
/// \brief Emit a code for initialization of threadprivate variable. It emits
/// a call to runtime library which adds initial value to the newly created
@@ -576,7 +583,7 @@ public:
/// \param Loc Location of threadprivate declaration.
/// \param PerformInit true if initialization expression is not constant.
virtual llvm::Function *
- emitThreadPrivateVarDefinition(const VarDecl *VD, llvm::Value *VDAddr,
+ emitThreadPrivateVarDefinition(const VarDecl *VD, Address VDAddr,
SourceLocation Loc, bool PerformInit,
CodeGenFunction *CGF = nullptr);
@@ -632,7 +639,7 @@ public:
virtual void emitTaskCall(
CodeGenFunction &CGF, SourceLocation Loc, const OMPExecutableDirective &D,
bool Tied, llvm::PointerIntPair<llvm::Value *, 1, bool> Final,
- llvm::Value *TaskFunction, QualType SharedsTy, llvm::Value *Shareds,
+ llvm::Value *TaskFunction, QualType SharedsTy, Address Shareds,
const Expr *IfCond, ArrayRef<const Expr *> PrivateVars,
ArrayRef<const Expr *> PrivateCopies,
ArrayRef<const Expr *> FirstprivateVars,
diff --git a/clang/lib/CodeGen/CGStmt.cpp b/clang/lib/CodeGen/CGStmt.cpp
index b9a176fe652..4e95e2f3128 100644
--- a/clang/lib/CodeGen/CGStmt.cpp
+++ b/clang/lib/CodeGen/CGStmt.cpp
@@ -275,8 +275,8 @@ bool CodeGenFunction::EmitSimpleStmt(const Stmt *S) {
/// EmitCompoundStmt - Emit a compound statement {..} node. If GetLast is true,
/// this captures the expression result of the last sub-statement and returns it
/// (for use by the statement expression extension).
-llvm::Value* CodeGenFunction::EmitCompoundStmt(const CompoundStmt &S, bool GetLast,
- AggValueSlot AggSlot) {
+Address CodeGenFunction::EmitCompoundStmt(const CompoundStmt &S, bool GetLast,
+ AggValueSlot AggSlot) {
PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),S.getLBracLoc(),
"LLVM IR generation of compound statement ('{}')");
@@ -286,7 +286,7 @@ llvm::Value* CodeGenFunction::EmitCompoundStmt(const CompoundStmt &S, bool GetLa
return EmitCompoundStmtWithoutScope(S, GetLast, AggSlot);
}
-llvm::Value*
+Address
CodeGenFunction::EmitCompoundStmtWithoutScope(const CompoundStmt &S,
bool GetLast,
AggValueSlot AggSlot) {
@@ -295,7 +295,7 @@ CodeGenFunction::EmitCompoundStmtWithoutScope(const CompoundStmt &S,
E = S.body_end()-GetLast; I != E; ++I)
EmitStmt(*I);
- llvm::Value *RetAlloca = nullptr;
+ Address RetAlloca = Address::invalid();
if (GetLast) {
// We have to special case labels here. They are statements, but when put
// at the end of a statement expression, they yield the value of their
@@ -909,10 +909,9 @@ void CodeGenFunction::EmitReturnOfRValue(RValue RV, QualType Ty) {
if (RV.isScalar()) {
Builder.CreateStore(RV.getScalarVal(), ReturnValue);
} else if (RV.isAggregate()) {
- EmitAggregateCopy(ReturnValue, RV.getAggregateAddr(), Ty);
+ EmitAggregateCopy(ReturnValue, RV.getAggregateAddress(), Ty);
} else {
- EmitStoreOfComplex(RV.getComplexVal(),
- MakeNaturalAlignAddrLValue(ReturnValue, Ty),
+ EmitStoreOfComplex(RV.getComplexVal(), MakeAddrLValue(ReturnValue, Ty),
/*init*/ true);
}
EmitBranchThroughCleanup(ReturnBlock);
@@ -953,8 +952,8 @@ void CodeGenFunction::EmitReturnStmt(const ReturnStmt &S) {
// If there is an NRVO flag for this variable, set it to 1 into indicate
// that the cleanup code should not destroy the variable.
if (llvm::Value *NRVOFlag = NRVOFlags[S.getNRVOCandidate()])
- Builder.CreateStore(Builder.getTrue(), NRVOFlag);
- } else if (!ReturnValue || (RV && RV->getType()->isVoidType())) {
+ Builder.CreateFlagStore(Builder.getTrue(), NRVOFlag);
+ } else if (!ReturnValue.isValid() || (RV && RV->getType()->isVoidType())) {
// Make sure not to return anything, but evaluate the expression
// for side effects.
if (RV)
@@ -972,20 +971,17 @@ void CodeGenFunction::EmitReturnStmt(const ReturnStmt &S) {
Builder.CreateStore(EmitScalarExpr(RV), ReturnValue);
break;
case TEK_Complex:
- EmitComplexExprIntoLValue(RV,
- MakeNaturalAlignAddrLValue(ReturnValue, RV->getType()),
+ EmitComplexExprIntoLValue(RV, MakeAddrLValue(ReturnValue, RV->getType()),
/*isInit*/ true);
break;
- case TEK_Aggregate: {
- CharUnits Alignment = getContext().getTypeAlignInChars(RV->getType());
- EmitAggExpr(RV, AggValueSlot::forAddr(ReturnValue, Alignment,
+ case TEK_Aggregate:
+ EmitAggExpr(RV, AggValueSlot::forAddr(ReturnValue,
Qualifiers(),
AggValueSlot::IsDestructed,
AggValueSlot::DoesNotNeedGCBarriers,
AggValueSlot::IsNotAliased));
break;
}
- }
}
++NumReturnExprs;
@@ -1640,12 +1636,12 @@ CodeGenFunction::EmitAsmInputLValue(const TargetInfo::ConstraintInfo &Info,
Arg = Builder.CreateLoad(Builder.CreateBitCast(InputValue.getAddress(),
Ty));
} else {
- Arg = InputValue.getAddress();
+ Arg = InputValue.getPointer();
ConstraintStr += '*';
}
}
} else {
- Arg = InputValue.getAddress();
+ Arg = InputValue.getPointer();
ConstraintStr += '*';
}
@@ -1816,8 +1812,8 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
<< OutExpr->getType() << OutputConstraint;
}
} else {
- ArgTypes.push_back(Dest.getAddress()->getType());
- Args.push_back(Dest.getAddress());
+ ArgTypes.push_back(Dest.getAddress().getType());
+ Args.push_back(Dest.getPointer());
Constraints += "=*";
Constraints += OutputConstraint;
ReadOnly = ReadNone = false;
@@ -2049,8 +2045,8 @@ LValue CodeGenFunction::InitCapturedStruct(const CapturedStmt &S) {
QualType RecordTy = getContext().getRecordType(RD);
// Initialize the captured struct.
- LValue SlotLV = MakeNaturalAlignAddrLValue(
- CreateMemTemp(RecordTy, "agg.captured"), RecordTy);
+ LValue SlotLV =
+ MakeAddrLValue(CreateMemTemp(RecordTy, "agg.captured"), RecordTy);
RecordDecl::field_iterator CurField = RD->field_begin();
for (CapturedStmt::const_capture_init_iterator I = S.capture_init_begin(),
@@ -2081,13 +2077,12 @@ CodeGenFunction::EmitCapturedStmt(const CapturedStmt &S, CapturedRegionKind K) {
delete CGF.CapturedStmtInfo;
// Emit call to the helper function.
- EmitCallOrInvoke(F, CapStruct.getAddress());
+ EmitCallOrInvoke(F, CapStruct.getPointer());
return F;
}
-llvm::Value *
-CodeGenFunction::GenerateCapturedStmtArgument(const CapturedStmt &S) {
+Address CodeGenFunction::GenerateCapturedStmtArgument(const CapturedStmt &S) {
LValue CapStruct = InitCapturedStruct(S);
return CapStruct.getAddress();
}
@@ -2126,8 +2121,7 @@ CodeGenFunction::GenerateCapturedStmtFunction(const CapturedStmt &S) {
CD->getLocation(),
CD->getBody()->getLocStart());
// Set the context parameter in CapturedStmtInfo.
- llvm::Value *DeclPtr = LocalDeclMap[CD->getContextParam()];
- assert(DeclPtr && "missing context parameter for CapturedStmt");
+ Address DeclPtr = GetAddrOfLocalVar(CD->getContextParam());
CapturedStmtInfo->setContextValue(Builder.CreateLoad(DeclPtr));
// Initialize variable-length arrays.
diff --git a/clang/lib/CodeGen/CGStmtOpenMP.cpp b/clang/lib/CodeGen/CGStmtOpenMP.cpp
index 0e87c3844ba..3ac3dfaf453 100644
--- a/clang/lib/CodeGen/CGStmtOpenMP.cpp
+++ b/clang/lib/CodeGen/CGStmtOpenMP.cpp
@@ -24,17 +24,19 @@ using namespace CodeGen;
// OpenMP Directive Emission
//===----------------------------------------------------------------------===//
void CodeGenFunction::EmitOMPAggregateAssign(
- llvm::Value *DestAddr, llvm::Value *SrcAddr, QualType OriginalType,
- const llvm::function_ref<void(llvm::Value *, llvm::Value *)> &CopyGen) {
+ Address DestAddr, Address SrcAddr, QualType OriginalType,
+ const llvm::function_ref<void(Address, Address)> &CopyGen) {
// Perform element-by-element initialization.
QualType ElementTy;
- auto SrcBegin = SrcAddr;
- auto DestBegin = DestAddr;
+
+ // Drill down to the base element type on both arrays.
auto ArrayTy = OriginalType->getAsArrayTypeUnsafe();
- auto NumElements = emitArrayLength(ArrayTy, ElementTy, DestBegin);
+ auto NumElements = emitArrayLength(ArrayTy, ElementTy, DestAddr);
+ SrcAddr = Builder.CreateElementBitCast(SrcAddr, DestAddr.getElementType());
+
+ auto SrcBegin = SrcAddr.getPointer();
+ auto DestBegin = DestAddr.getPointer();
// Cast from pointer to array type to pointer to single element.
- SrcBegin = Builder.CreatePointerBitCastOrAddrSpaceCast(SrcBegin,
- DestBegin->getType());
auto DestEnd = Builder.CreateGEP(DestBegin, NumElements);
// The basic structure here is a while-do loop.
auto BodyBB = createBasicBlock("omp.arraycpy.body");
@@ -46,69 +48,77 @@ void CodeGenFunction::EmitOMPAggregateAssign(
// Enter the loop body, making that address the current address.
auto EntryBB = Builder.GetInsertBlock();
EmitBlock(BodyBB);
- auto SrcElementCurrent =
- Builder.CreatePHI(SrcBegin->getType(), 2, "omp.arraycpy.srcElementPast");
- SrcElementCurrent->addIncoming(SrcBegin, EntryBB);
- auto DestElementCurrent = Builder.CreatePHI(DestBegin->getType(), 2,
- "omp.arraycpy.destElementPast");
- DestElementCurrent->addIncoming(DestBegin, EntryBB);
+
+ CharUnits ElementSize = getContext().getTypeSizeInChars(ElementTy);
+
+ llvm::PHINode *SrcElementPHI =
+ Builder.CreatePHI(SrcBegin->getType(), 2, "omp.arraycpy.srcElementPast");
+ SrcElementPHI->addIncoming(SrcBegin, EntryBB);
+ Address SrcElementCurrent =
+ Address(SrcElementPHI,
+ SrcAddr.getAlignment().alignmentOfArrayElement(ElementSize));
+
+ llvm::PHINode *DestElementPHI =
+ Builder.CreatePHI(DestBegin->getType(), 2, "omp.arraycpy.destElementPast");
+ DestElementPHI->addIncoming(DestBegin, EntryBB);
+ Address DestElementCurrent =
+ Address(DestElementPHI,
+ DestAddr.getAlignment().alignmentOfArrayElement(ElementSize));
// Emit copy.
CopyGen(DestElementCurrent, SrcElementCurrent);
// Shift the address forward by one element.
auto DestElementNext = Builder.CreateConstGEP1_32(
- DestElementCurrent, /*Idx0=*/1, "omp.arraycpy.dest.element");
+ DestElementPHI, /*Idx0=*/1, "omp.arraycpy.dest.element");
auto SrcElementNext = Builder.CreateConstGEP1_32(
- SrcElementCurrent, /*Idx0=*/1, "omp.arraycpy.src.element");
+ SrcElementPHI, /*Idx0=*/1, "omp.arraycpy.src.element");
// Check whether we've reached the end.
auto Done =
Builder.CreateICmpEQ(DestElementNext, DestEnd, "omp.arraycpy.done");
Builder.CreateCondBr(Done, DoneBB, BodyBB);
- DestElementCurrent->addIncoming(DestElementNext, Builder.GetInsertBlock());
- SrcElementCurrent->addIncoming(SrcElementNext, Builder.GetInsertBlock());
+ DestElementPHI->addIncoming(DestElementNext, Builder.GetInsertBlock());
+ SrcElementPHI->addIncoming(SrcElementNext, Builder.GetInsertBlock());
// Done.
EmitBlock(DoneBB, /*IsFinished=*/true);
}
-void CodeGenFunction::EmitOMPCopy(CodeGenFunction &CGF,
- QualType OriginalType, llvm::Value *DestAddr,
- llvm::Value *SrcAddr, const VarDecl *DestVD,
+void CodeGenFunction::EmitOMPCopy(QualType OriginalType, Address DestAddr,
+ Address SrcAddr, const VarDecl *DestVD,
const VarDecl *SrcVD, const Expr *Copy) {
if (OriginalType->isArrayType()) {
auto *BO = dyn_cast<BinaryOperator>(Copy);
if (BO && BO->getOpcode() == BO_Assign) {
// Perform simple memcpy for simple copying.
- CGF.EmitAggregateAssign(DestAddr, SrcAddr, OriginalType);
+ EmitAggregateAssign(DestAddr, SrcAddr, OriginalType);
} else {
// For arrays with complex element types perform element by element
// copying.
- CGF.EmitOMPAggregateAssign(
+ EmitOMPAggregateAssign(
DestAddr, SrcAddr, OriginalType,
- [&CGF, Copy, SrcVD, DestVD](llvm::Value *DestElement,
- llvm::Value *SrcElement) {
+ [this, Copy, SrcVD, DestVD](Address DestElement, Address SrcElement) {
// Working with the single array element, so have to remap
// destination and source variables to corresponding array
// elements.
- CodeGenFunction::OMPPrivateScope Remap(CGF);
- Remap.addPrivate(DestVD, [DestElement]() -> llvm::Value *{
+ CodeGenFunction::OMPPrivateScope Remap(*this);
+ Remap.addPrivate(DestVD, [DestElement]() -> Address {
return DestElement;
});
Remap.addPrivate(
- SrcVD, [SrcElement]() -> llvm::Value *{ return SrcElement; });
+ SrcVD, [SrcElement]() -> Address { return SrcElement; });
(void)Remap.Privatize();
- CGF.EmitIgnoredExpr(Copy);
+ EmitIgnoredExpr(Copy);
});
}
} else {
// Remap pseudo source variable to private copy.
- CodeGenFunction::OMPPrivateScope Remap(CGF);
- Remap.addPrivate(SrcVD, [SrcAddr]() -> llvm::Value *{ return SrcAddr; });
- Remap.addPrivate(DestVD, [DestAddr]() -> llvm::Value *{ return DestAddr; });
+ CodeGenFunction::OMPPrivateScope Remap(*this);
+ Remap.addPrivate(SrcVD, [SrcAddr]() -> Address { return SrcAddr; });
+ Remap.addPrivate(DestVD, [DestAddr]() -> Address { return DestAddr; });
(void)Remap.Privatize();
// Emit copying of the whole variable.
- CGF.EmitIgnoredExpr(Copy);
+ EmitIgnoredExpr(Copy);
}
}
@@ -130,13 +140,13 @@ bool CodeGenFunction::EmitOMPFirstprivateClause(const OMPExecutableDirective &D,
/*RefersToEnclosingVariableOrCapture=*/CapturedStmtInfo->lookup(
OrigVD) != nullptr,
(*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc());
- auto *OriginalAddr = EmitLValue(&DRE).getAddress();
+ Address OriginalAddr = EmitLValue(&DRE).getAddress();
QualType Type = OrigVD->getType();
if (Type->isArrayType()) {
// Emit VarDecl with copy init for arrays.
// Get the address of the original variable captured in current
// captured region.
- IsRegistered = PrivateScope.addPrivate(OrigVD, [&]() -> llvm::Value *{
+ IsRegistered = PrivateScope.addPrivate(OrigVD, [&]() -> Address {
auto Emission = EmitAutoVarAlloca(*VD);
auto *Init = VD->getInit();
if (!isa<CXXConstructExpr>(Init) || isTrivialInitializer(Init)) {
@@ -146,12 +156,12 @@ bool CodeGenFunction::EmitOMPFirstprivateClause(const OMPExecutableDirective &D,
} else {
EmitOMPAggregateAssign(
Emission.getAllocatedAddress(), OriginalAddr, Type,
- [this, VDInit, Init](llvm::Value *DestElement,
- llvm::Value *SrcElement) {
+ [this, VDInit, Init](Address DestElement,
+ Address SrcElement) {
// Clean up any temporaries needed by the initialization.
RunCleanupsScope InitScope(*this);
// Emit initialization for single element.
- LocalDeclMap[VDInit] = SrcElement;
+ setAddrOfLocalVar(VDInit, SrcElement);
EmitAnyExprToMem(Init, DestElement,
Init->getType().getQualifiers(),
/*IsInitializer*/ false);
@@ -162,12 +172,12 @@ bool CodeGenFunction::EmitOMPFirstprivateClause(const OMPExecutableDirective &D,
return Emission.getAllocatedAddress();
});
} else {
- IsRegistered = PrivateScope.addPrivate(OrigVD, [&]() -> llvm::Value *{
+ IsRegistered = PrivateScope.addPrivate(OrigVD, [&]() -> Address {
// Emit private VarDecl with copy init.
// Remap temp VDInit variable to the address of the original
// variable
// (for proper handling of captured global variables).
- LocalDeclMap[VDInit] = OriginalAddr;
+ setAddrOfLocalVar(VDInit, OriginalAddr);
EmitDecl(*VD);
LocalDeclMap.erase(VDInit);
return GetAddrOfLocalVar(VD);
@@ -195,7 +205,7 @@ void CodeGenFunction::EmitOMPPrivateClause(
if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) {
auto VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl());
bool IsRegistered =
- PrivateScope.addPrivate(OrigVD, [&]() -> llvm::Value *{
+ PrivateScope.addPrivate(OrigVD, [&]() -> Address {
// Emit private VarDecl with copy init.
EmitDecl(*VD);
return GetAddrOfLocalVar(VD);
@@ -228,7 +238,7 @@ bool CodeGenFunction::EmitOMPCopyinClause(const OMPExecutableDirective &D) {
// Get the address of the master variable. If we are emitting code with
// TLS support, the address is passed from the master as field in the
// captured declaration.
- llvm::Value *MasterAddr;
+ Address MasterAddr = Address::invalid();
if (getLangOpts().OpenMPUseTLS &&
getContext().getTargetInfo().isTLSSupported()) {
assert(CapturedStmtInfo->lookup(VD) &&
@@ -237,11 +247,13 @@ bool CodeGenFunction::EmitOMPCopyinClause(const OMPExecutableDirective &D) {
VK_LValue, (*IRef)->getExprLoc());
MasterAddr = EmitLValue(&DRE).getAddress();
} else {
- MasterAddr = VD->isStaticLocal() ? CGM.getStaticLocalDeclAddress(VD)
- : CGM.GetAddrOfGlobal(VD);
+ MasterAddr =
+ Address(VD->isStaticLocal() ? CGM.getStaticLocalDeclAddress(VD)
+ : CGM.GetAddrOfGlobal(VD),
+ getContext().getDeclAlign(VD));
}
// Get the address of the threadprivate variable.
- auto *PrivateAddr = EmitLValue(*IRef).getAddress();
+ Address PrivateAddr = EmitLValue(*IRef).getAddress();
if (CopiedVars.size() == 1) {
// At first check if current thread is a master thread. If it is, no
// need to copy data.
@@ -249,15 +261,14 @@ bool CodeGenFunction::EmitOMPCopyinClause(const OMPExecutableDirective &D) {
CopyEnd = createBasicBlock("copyin.not.master.end");
Builder.CreateCondBr(
Builder.CreateICmpNE(
- Builder.CreatePtrToInt(MasterAddr, CGM.IntPtrTy),
- Builder.CreatePtrToInt(PrivateAddr, CGM.IntPtrTy)),
+ Builder.CreatePtrToInt(MasterAddr.getPointer(), CGM.IntPtrTy),
+ Builder.CreatePtrToInt(PrivateAddr.getPointer(), CGM.IntPtrTy)),
CopyBegin, CopyEnd);
EmitBlock(CopyBegin);
}
auto *SrcVD = cast<VarDecl>(cast<DeclRefExpr>(*ISrcRef)->getDecl());
auto *DestVD = cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl());
- EmitOMPCopy(*this, Type, PrivateAddr, MasterAddr, DestVD, SrcVD,
- AssignOp);
+ EmitOMPCopy(Type, PrivateAddr, MasterAddr, DestVD, SrcVD, AssignOp);
}
++IRef;
++ISrcRef;
@@ -286,7 +297,7 @@ bool CodeGenFunction::EmitOMPLastprivateClauseInit(
auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
if (AlreadyEmittedVars.insert(OrigVD->getCanonicalDecl()).second) {
auto *DestVD = cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl());
- PrivateScope.addPrivate(DestVD, [this, OrigVD, IRef]() -> llvm::Value *{
+ PrivateScope.addPrivate(DestVD, [this, OrigVD, IRef]() -> Address {
DeclRefExpr DRE(
const_cast<VarDecl *>(OrigVD),
/*RefersToEnclosingVariableOrCapture=*/CapturedStmtInfo->lookup(
@@ -300,7 +311,7 @@ bool CodeGenFunction::EmitOMPLastprivateClauseInit(
if (IInit) {
auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl());
bool IsRegistered =
- PrivateScope.addPrivate(OrigVD, [&]() -> llvm::Value *{
+ PrivateScope.addPrivate(OrigVD, [&]() -> Address {
// Emit private VarDecl with copy init.
EmitDecl(*VD);
return GetAddrOfLocalVar(VD);
@@ -380,17 +391,14 @@ void CodeGenFunction::EmitOMPLastprivateClauseFinal(
auto *SrcVD = cast<VarDecl>(cast<DeclRefExpr>(*ISrcRef)->getDecl());
auto *DestVD = cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl());
// Get the address of the original variable.
- auto *OriginalAddr = GetAddrOfLocalVar(DestVD);
+ Address OriginalAddr = GetAddrOfLocalVar(DestVD);
// Get the address of the private variable.
- auto *PrivateAddr = GetAddrOfLocalVar(PrivateVD);
- if (PrivateVD->getType()->isReferenceType())
+ Address PrivateAddr = GetAddrOfLocalVar(PrivateVD);
+ if (auto RefTy = PrivateVD->getType()->getAs<ReferenceType>())
PrivateAddr =
- EmitLoadOfLValue(MakeNaturalAlignAddrLValue(
- PrivateAddr, PrivateVD->getType()),
- (*IRef)->getExprLoc())
- .getScalarVal();
- EmitOMPCopy(*this, Type, OriginalAddr, PrivateAddr, DestVD, SrcVD,
- AssignOp);
+ Address(Builder.CreateLoad(PrivateAddr),
+ getNaturalTypeAlignment(RefTy->getPointeeType()));
+ EmitOMPCopy(Type, OriginalAddr, PrivateAddr, DestVD, SrcVD, AssignOp);
}
++IRef;
++ISrcRef;
@@ -415,7 +423,7 @@ void CodeGenFunction::EmitOMPReductionClauseInit(
auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl());
// Store the address of the original variable associated with the LHS
// implicit variable.
- PrivateScope.addPrivate(LHSVD, [this, OrigVD, IRef]() -> llvm::Value *{
+ PrivateScope.addPrivate(LHSVD, [this, OrigVD, IRef]() -> Address {
DeclRefExpr DRE(const_cast<VarDecl *>(OrigVD),
CapturedStmtInfo->lookup(OrigVD) != nullptr,
IRef->getType(), VK_LValue, IRef->getExprLoc());
@@ -423,7 +431,7 @@ void CodeGenFunction::EmitOMPReductionClauseInit(
});
// Emit reduction copy.
bool IsRegistered =
- PrivateScope.addPrivate(OrigVD, [this, PrivateVD]() -> llvm::Value *{
+ PrivateScope.addPrivate(OrigVD, [this, PrivateVD]() -> Address {
// Emit private VarDecl with reduction init.
EmitDecl(*PrivateVD);
return GetAddrOfLocalVar(PrivateVD);
@@ -607,8 +615,7 @@ void CodeGenFunction::EmitOMPLinearClauseInit(const OMPLoopDirective &D) {
VD->getInit()->getExprLoc());
AutoVarEmission Emission = EmitAutoVarAlloca(*VD);
EmitExprAsInit(&DRE, VD,
- MakeAddrLValue(Emission.getAllocatedAddress(),
- VD->getType(), Emission.Alignment),
+ MakeAddrLValue(Emission.getAllocatedAddress(), VD->getType()),
/*capturedByInit=*/false);
EmitAutoVarCleanups(Emission);
}
@@ -633,10 +640,10 @@ static void emitLinearClauseFinal(CodeGenFunction &CGF,
DeclRefExpr DRE(const_cast<VarDecl *>(OrigVD),
CGF.CapturedStmtInfo->lookup(OrigVD) != nullptr,
(*IC)->getType(), VK_LValue, (*IC)->getExprLoc());
- auto *OrigAddr = CGF.EmitLValue(&DRE).getAddress();
+ Address OrigAddr = CGF.EmitLValue(&DRE).getAddress();
CodeGenFunction::OMPPrivateScope VarScope(CGF);
VarScope.addPrivate(OrigVD,
- [OrigAddr]() -> llvm::Value *{ return OrigAddr; });
+ [OrigAddr]() -> Address { return OrigAddr; });
(void)VarScope.Privatize();
CGF.EmitIgnoredExpr(F);
++IC;
@@ -683,15 +690,15 @@ static void emitPrivateLoopCounters(CodeGenFunction &CGF,
for (auto *E : Counters) {
auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl());
- llvm::Value *Addr;
- (void)LoopScope.addPrivate(PrivateVD, [&]() -> llvm::Value * {
+ Address Addr = Address::invalid();
+ (void)LoopScope.addPrivate(PrivateVD, [&]() -> Address {
// Emit var without initialization.
auto VarEmission = CGF.EmitAutoVarAlloca(*PrivateVD);
CGF.EmitAutoVarCleanups(VarEmission);
Addr = VarEmission.getAllocatedAddress();
return Addr;
});
- (void)LoopScope.addPrivate(VD, [&]() -> llvm::Value * { return Addr; });
+ (void)LoopScope.addPrivate(VD, [&]() -> Address { return Addr; });
++I;
}
}
@@ -722,7 +729,7 @@ emitPrivateLinearVars(CodeGenFunction &CGF, const OMPExecutableDirective &D,
auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
auto *PrivateVD =
cast<VarDecl>(cast<DeclRefExpr>(*CurPrivate)->getDecl());
- bool IsRegistered = PrivateScope.addPrivate(VD, [&]() -> llvm::Value * {
+ bool IsRegistered = PrivateScope.addPrivate(VD, [&]() -> Address {
// Emit private VarDecl with copy init.
CGF.EmitVarDecl(*PrivateVD);
return CGF.GetAddrOfLocalVar(PrivateVD);
@@ -769,14 +776,14 @@ void CodeGenFunction::EmitOMPSimdFinal(const OMPLoopDirective &D) {
auto IC = D.counters().begin();
for (auto F : D.finals()) {
auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>((*IC))->getDecl());
- if (LocalDeclMap.lookup(OrigVD) || CapturedStmtInfo->lookup(OrigVD)) {
+ if (LocalDeclMap.count(OrigVD) || CapturedStmtInfo->lookup(OrigVD)) {
DeclRefExpr DRE(const_cast<VarDecl *>(OrigVD),
CapturedStmtInfo->lookup(OrigVD) != nullptr,
(*IC)->getType(), VK_LValue, (*IC)->getExprLoc());
- auto *OrigAddr = EmitLValue(&DRE).getAddress();
+ Address OrigAddr = EmitLValue(&DRE).getAddress();
OMPPrivateScope VarScope(*this);
VarScope.addPrivate(OrigVD,
- [OrigAddr]() -> llvm::Value *{ return OrigAddr; });
+ [OrigAddr]() -> Address { return OrigAddr; });
(void)VarScope.Privatize();
EmitIgnoredExpr(F);
}
@@ -865,9 +872,9 @@ void CodeGenFunction::EmitOMPSimdDirective(const OMPSimdDirective &S) {
void CodeGenFunction::EmitOMPForOuterLoop(OpenMPScheduleClauseKind ScheduleKind,
const OMPLoopDirective &S,
OMPPrivateScope &LoopScope,
- bool Ordered, llvm::Value *LB,
- llvm::Value *UB, llvm::Value *ST,
- llvm::Value *IL, llvm::Value *Chunk) {
+ bool Ordered, Address LB,
+ Address UB, Address ST,
+ Address IL, llvm::Value *Chunk) {
auto &RT = CGM.getOpenMPRuntime();
// Dynamic scheduling of the outer loop (dynamic, guided, auto, runtime).
@@ -931,11 +938,14 @@ void CodeGenFunction::EmitOMPForOuterLoop(OpenMPScheduleClauseKind ScheduleKind,
const unsigned IVSize = getContext().getTypeSize(IVExpr->getType());
const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation();
- RT.emitForInit(
- *this, S.getLocStart(), ScheduleKind, IVSize, IVSigned, Ordered, IL, LB,
- (DynamicOrOrdered ? EmitAnyExpr(S.getLastIteration()).getScalarVal()
- : UB),
- ST, Chunk);
+ if (DynamicOrOrdered) {
+ llvm::Value *UBVal = EmitScalarExpr(S.getLastIteration());
+ RT.emitForDispatchInit(*this, S.getLocStart(), ScheduleKind,
+ IVSize, IVSigned, Ordered, UBVal, Chunk);
+ } else {
+ RT.emitForStaticInit(*this, S.getLocStart(), ScheduleKind,
+ IVSize, IVSigned, Ordered, IL, LB, UB, ST, Chunk);
+ }
auto LoopExit = getJumpDestInCurrentScope("omp.dispatch.end");
@@ -1044,8 +1054,8 @@ emitScheduleClause(CodeGenFunction &CGF, const OMPLoopDirective &S,
CGF.EmitVarDecl(*ImpVar);
CGF.EmitStoreThroughLValue(
CGF.EmitAnyExpr(Ch),
- CGF.MakeNaturalAlignAddrLValue(CGF.GetAddrOfLocalVar(ImpVar),
- ImpVar->getType()));
+ CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(ImpVar),
+ ImpVar->getType()));
} else {
Ch = ImpRef;
}
@@ -1148,9 +1158,10 @@ bool CodeGenFunction::EmitOMPWorksharingLoop(const OMPLoopDirective &S) {
// chunks that are approximately equal in size, and at most one chunk is
// distributed to each thread. Note that the size of the chunks is
// unspecified in this case.
- RT.emitForInit(*this, S.getLocStart(), ScheduleKind, IVSize, IVSigned,
- Ordered, IL.getAddress(), LB.getAddress(),
- UB.getAddress(), ST.getAddress());
+ RT.emitForStaticInit(*this, S.getLocStart(), ScheduleKind,
+ IVSize, IVSigned, Ordered,
+ IL.getAddress(), LB.getAddress(),
+ UB.getAddress(), ST.getAddress());
auto LoopExit = getJumpDestInCurrentScope(createBasicBlock("omp.loop.exit"));
// UB = min(UB, GlobalUB);
EmitIgnoredExpr(S.getEnsureUpperBound());
@@ -1223,7 +1234,7 @@ void CodeGenFunction::EmitOMPForSimdDirective(const OMPForSimdDirective &S) {
static LValue createSectionLVal(CodeGenFunction &CGF, QualType Ty,
const Twine &Name,
llvm::Value *Init = nullptr) {
- auto LVal = CGF.MakeNaturalAlignAddrLValue(CGF.CreateMemTemp(Ty, Name), Ty);
+ auto LVal = CGF.MakeAddrLValue(CGF.CreateMemTemp(Ty, Name), Ty);
if (Init)
CGF.EmitScalarInit(Init, LVal);
return LVal;
@@ -1302,7 +1313,7 @@ CodeGenFunction::EmitSections(const OMPExecutableDirective &S) {
(void)LoopScope.Privatize();
// Emit static non-chunked loop.
- CGF.CGM.getOpenMPRuntime().emitForInit(
+ CGF.CGM.getOpenMPRuntime().emitForStaticInit(
CGF, S.getLocStart(), OMPC_SCHEDULE_static, /*IVSize=*/32,
/*IVSigned=*/true, /*Ordered=*/false, IL.getAddress(),
LB.getAddress(), UB.getAddress(), ST.getAddress());
@@ -1556,35 +1567,33 @@ void CodeGenFunction::EmitOMPTaskDirective(const OMPTaskDirective &S) {
auto *CS = cast<CapturedStmt>(S.getAssociatedStmt());
OMPPrivateScope Scope(CGF);
if (!PrivateVars.empty() || !FirstprivateVars.empty()) {
- auto *CopyFn = CGF.Builder.CreateAlignedLoad(
- CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(3)),
- CGF.PointerAlignInBytes);
- auto *PrivatesPtr = CGF.Builder.CreateAlignedLoad(
- CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(2)),
- CGF.PointerAlignInBytes);
+ auto *CopyFn = CGF.Builder.CreateLoad(
+ CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(3)));
+ auto *PrivatesPtr = CGF.Builder.CreateLoad(
+ CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(2)));
// Map privates.
- llvm::SmallVector<std::pair<const VarDecl *, llvm::Value *>, 16>
+ llvm::SmallVector<std::pair<const VarDecl *, Address>, 16>
PrivatePtrs;
llvm::SmallVector<llvm::Value *, 16> CallArgs;
CallArgs.push_back(PrivatesPtr);
for (auto *E : PrivateVars) {
auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
- auto *PrivatePtr =
+ Address PrivatePtr =
CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType()));
PrivatePtrs.push_back(std::make_pair(VD, PrivatePtr));
- CallArgs.push_back(PrivatePtr);
+ CallArgs.push_back(PrivatePtr.getPointer());
}
for (auto *E : FirstprivateVars) {
auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
- auto *PrivatePtr =
+ Address PrivatePtr =
CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType()));
PrivatePtrs.push_back(std::make_pair(VD, PrivatePtr));
- CallArgs.push_back(PrivatePtr);
+ CallArgs.push_back(PrivatePtr.getPointer());
}
CGF.EmitRuntimeCall(CopyFn, CallArgs);
for (auto &&Pair : PrivatePtrs) {
- auto *Replacement =
- CGF.Builder.CreateAlignedLoad(Pair.second, CGF.PointerAlignInBytes);
+ Address Replacement(CGF.Builder.CreateLoad(Pair.second),
+ CGF.getContext().getDeclAlign(Pair.first));
Scope.addPrivate(Pair.first, [Replacement]() { return Replacement; });
}
}
@@ -1787,8 +1796,8 @@ static std::pair<bool, RValue> emitOMPAtomicRMW(CodeGenFunction &CGF, LValue X,
!Update.getScalarVal()->getType()->isIntegerTy() ||
!X.isSimple() || (!isa<llvm::ConstantInt>(Update.getScalarVal()) &&
(Update.getScalarVal()->getType() !=
- X.getAddress()->getType()->getPointerElementType())) ||
- !X.getAddress()->getType()->getPointerElementType()->isIntegerTy() ||
+ X.getAddress().getElementType())) ||
+ !X.getAddress().getElementType()->isIntegerTy() ||
!Context.getTargetInfo().hasBuiltinAtomic(
Context.getTypeSize(X.getType()), Context.toBits(X.getAlignment())))
return std::make_pair(false, RValue::get(nullptr));
@@ -1859,10 +1868,10 @@ static std::pair<bool, RValue> emitOMPAtomicRMW(CodeGenFunction &CGF, LValue X,
auto *UpdateVal = Update.getScalarVal();
if (auto *IC = dyn_cast<llvm::ConstantInt>(UpdateVal)) {
UpdateVal = CGF.Builder.CreateIntCast(
- IC, X.getAddress()->getType()->getPointerElementType(),
+ IC, X.getAddress().getElementType(),
X.getType()->hasSignedIntegerRepresentation());
}
- auto *Res = CGF.Builder.CreateAtomicRMW(RMWOp, X.getAddress(), UpdateVal, AO);
+ auto *Res = CGF.Builder.CreateAtomicRMW(RMWOp, X.getPointer(), UpdateVal, AO);
return std::make_pair(true, RValue::get(Res));
}
diff --git a/clang/lib/CodeGen/CGVTables.cpp b/clang/lib/CodeGen/CGVTables.cpp
index fcb5c366494..48e06a3879d 100644
--- a/clang/lib/CodeGen/CGVTables.cpp
+++ b/clang/lib/CodeGen/CGVTables.cpp
@@ -102,8 +102,11 @@ static RValue PerformReturnAdjustment(CodeGenFunction &CGF,
CGF.EmitBlock(AdjustNotNull);
}
- ReturnValue = CGF.CGM.getCXXABI().performReturnAdjustment(CGF, ReturnValue,
- Thunk.Return);
+ auto ClassDecl = ResultType->getPointeeType()->getAsCXXRecordDecl();
+ auto ClassAlign = CGF.CGM.getClassPointerAlignment(ClassDecl);
+ ReturnValue = CGF.CGM.getCXXABI().performReturnAdjustment(CGF,
+ Address(ReturnValue, ClassAlign),
+ Thunk.Return);
if (NullCheckValue) {
CGF.Builder.CreateBr(AdjustEnd);
@@ -171,11 +174,11 @@ CodeGenFunction::GenerateVarArgsThunk(llvm::Function *Fn,
// Find the first store of "this", which will be to the alloca associated
// with "this".
- llvm::Value *ThisPtr = &*AI;
+ Address ThisPtr(&*AI, CGM.getClassPointerAlignment(MD->getParent()));
llvm::BasicBlock *EntryBB = Fn->begin();
llvm::Instruction *ThisStore =
std::find_if(EntryBB->begin(), EntryBB->end(), [&](llvm::Instruction &I) {
- return isa<llvm::StoreInst>(I) && I.getOperand(0) == ThisPtr;
+ return isa<llvm::StoreInst>(I) && I.getOperand(0) == ThisPtr.getPointer();
});
assert(ThisStore && "Store of this should be in entry block?");
// Adjust "this", if necessary.
@@ -235,6 +238,17 @@ void CodeGenFunction::StartThunk(llvm::Function *Fn, GlobalDecl GD,
// Since we didn't pass a GlobalDecl to StartFunction, do this ourselves.
CGM.getCXXABI().EmitInstanceFunctionProlog(*this);
CXXThisValue = CXXABIThisValue;
+ CurCodeDecl = MD;
+ CurFuncDecl = MD;
+}
+
+void CodeGenFunction::FinishThunk() {
+ // Clear these to restore the invariants expected by
+ // StartFunction/FinishFunction.
+ CurCodeDecl = nullptr;
+ CurFuncDecl = nullptr;
+
+ FinishFunction();
}
void CodeGenFunction::EmitCallAndReturnForThunk(llvm::Value *Callee,
@@ -244,9 +258,10 @@ void CodeGenFunction::EmitCallAndReturnForThunk(llvm::Value *Callee,
const CXXMethodDecl *MD = cast<CXXMethodDecl>(CurGD.getDecl());
// Adjust the 'this' pointer if necessary
- llvm::Value *AdjustedThisPtr = Thunk ? CGM.getCXXABI().performThisAdjustment(
- *this, LoadCXXThis(), Thunk->This)
- : LoadCXXThis();
+ llvm::Value *AdjustedThisPtr =
+ Thunk ? CGM.getCXXABI().performThisAdjustment(
+ *this, LoadCXXThisAddress(), Thunk->This)
+ : LoadCXXThis();
if (CurFnInfo->usesInAlloca()) {
// We don't handle return adjusting thunks, because they require us to call
@@ -321,7 +336,7 @@ void CodeGenFunction::EmitCallAndReturnForThunk(llvm::Value *Callee,
// Disable the final ARC autorelease.
AutoreleaseResult = false;
- FinishFunction();
+ FinishThunk();
}
void CodeGenFunction::EmitMustTailThunk(const CXXMethodDecl *MD,
@@ -346,9 +361,8 @@ void CodeGenFunction::EmitMustTailThunk(const CXXMethodDecl *MD,
Args[ThisArgNo] = AdjustedThisPtr;
} else {
assert(ThisAI.isInAlloca() && "this is passed directly or inalloca");
- llvm::Value *ThisAddr = GetAddrOfLocalVar(CXXABIThisDecl);
- llvm::Type *ThisType =
- cast<llvm::PointerType>(ThisAddr->getType())->getElementType();
+ Address ThisAddr = GetAddrOfLocalVar(CXXABIThisDecl);
+ llvm::Type *ThisType = ThisAddr.getElementType();
if (ThisType != AdjustedThisPtr->getType())
AdjustedThisPtr = Builder.CreateBitCast(AdjustedThisPtr, ThisType);
Builder.CreateStore(AdjustedThisPtr, ThisAddr);
diff --git a/clang/lib/CodeGen/CGValue.h b/clang/lib/CodeGen/CGValue.h
index 92055917dba..195571ba070 100644
--- a/clang/lib/CodeGen/CGValue.h
+++ b/clang/lib/CodeGen/CGValue.h
@@ -16,10 +16,10 @@
#define LLVM_CLANG_LIB_CODEGEN_CGVALUE_H
#include "clang/AST/ASTContext.h"
-#include "clang/AST/CharUnits.h"
#include "clang/AST/Type.h"
#include "llvm/IR/Value.h"
#include "llvm/IR/Type.h"
+#include "Address.h"
namespace llvm {
class Constant;
@@ -38,6 +38,10 @@ namespace CodeGen {
class RValue {
enum Flavor { Scalar, Complex, Aggregate };
+ // The shift to make to an aggregate's alignment to make it look
+ // like a pointer.
+ enum { AggAlignShift = 4 };
+
// Stores first value and flavor.
llvm::PointerIntPair<llvm::Value *, 2, Flavor> V1;
// Stores second value and volatility.
@@ -63,11 +67,21 @@ public:
}
/// getAggregateAddr() - Return the Value* of the address of the aggregate.
- llvm::Value *getAggregateAddr() const {
+ Address getAggregateAddress() const {
+ assert(isAggregate() && "Not an aggregate!");
+ auto align = reinterpret_cast<uintptr_t>(V2.getPointer()) >> AggAlignShift;
+ return Address(V1.getPointer(), CharUnits::fromQuantity(align));
+ }
+ llvm::Value *getAggregatePointer() const {
assert(isAggregate() && "Not an aggregate!");
return V1.getPointer();
}
+ static RValue getIgnored() {
+ // FIXME: should we make this a more explicit state?
+ return get(nullptr);
+ }
+
static RValue get(llvm::Value *V) {
RValue ER;
ER.V1.setPointer(V);
@@ -89,11 +103,14 @@ public:
// FIXME: Aggregate rvalues need to retain information about whether they are
// volatile or not. Remove default to find all places that probably get this
// wrong.
- static RValue getAggregate(llvm::Value *V, bool Volatile = false) {
+ static RValue getAggregate(Address addr, bool isVolatile = false) {
RValue ER;
- ER.V1.setPointer(V);
+ ER.V1.setPointer(addr.getPointer());
ER.V1.setInt(Aggregate);
- ER.V2.setInt(Volatile);
+
+ auto align = static_cast<uintptr_t>(addr.getAlignment().getQuantity());
+ ER.V2.setPointer(reinterpret_cast<llvm::Value*>(align << AggAlignShift));
+ ER.V2.setInt(isVolatile);
return ER;
}
};
@@ -103,6 +120,32 @@ enum ARCPreciseLifetime_t {
ARCImpreciseLifetime, ARCPreciseLifetime
};
+/// The source of the alignment of an l-value; an expression of
+/// confidence in the alignment actually matching the estimate.
+enum class AlignmentSource {
+ /// The l-value was an access to a declared entity or something
+ /// equivalently strong, like the address of an array allocated by a
+ /// language runtime.
+ Decl,
+
+ /// The l-value was considered opaque, so the alignment was
+ /// determined from a type, but that type was an explicitly-aligned
+ /// typedef.
+ AttributedType,
+
+ /// The l-value was considered opaque, so the alignment was
+ /// determined from a type.
+ Type
+};
+
+/// Given that the base address has the given alignment source, what's
+/// our confidence in the alignment of the field?
+static inline AlignmentSource getFieldAlignmentSource(AlignmentSource Source) {
+ // For now, we don't distinguish fields of opaque pointers from
+ // top-level declarations, but maybe we should.
+ return AlignmentSource::Decl;
+}
+
/// LValue - This represents an lvalue references. Because C/C++ allow
/// bitfields, this is not a simple LLVM pointer, it may be a pointer plus a
/// bitrange.
@@ -157,6 +200,8 @@ class LValue {
// to make the default bitfield pattern all-zeroes.
bool ImpreciseLifetime : 1;
+ unsigned AlignSource : 2;
+
Expr *BaseIvarExp;
/// Used by struct-path-aware TBAA.
@@ -169,13 +214,16 @@ class LValue {
private:
void Initialize(QualType Type, Qualifiers Quals,
- CharUnits Alignment,
+ CharUnits Alignment, AlignmentSource AlignSource,
llvm::MDNode *TBAAInfo = nullptr) {
+ assert((!Alignment.isZero() || Type->isIncompleteType()) &&
+ "initializing l-value with zero alignment!");
this->Type = Type;
this->Quals = Quals;
this->Alignment = Alignment.getQuantity();
assert(this->Alignment == Alignment.getQuantity() &&
"Alignment exceeds allowed max!");
+ this->AlignSource = unsigned(AlignSource);
// Initialize Objective-C flags.
this->Ivar = this->ObjIsArray = this->NonGC = this->GlobalObjCRef = false;
@@ -261,29 +309,50 @@ public:
CharUnits getAlignment() const { return CharUnits::fromQuantity(Alignment); }
void setAlignment(CharUnits A) { Alignment = A.getQuantity(); }
+ AlignmentSource getAlignmentSource() const {
+ return AlignmentSource(AlignSource);
+ }
+ void setAlignmentSource(AlignmentSource Source) {
+ AlignSource = unsigned(Source);
+ }
+
// simple lvalue
- llvm::Value *getAddress() const { assert(isSimple()); return V; }
- void setAddress(llvm::Value *address) {
+ llvm::Value *getPointer() const {
assert(isSimple());
- V = address;
+ return V;
+ }
+ Address getAddress() const { return Address(getPointer(), getAlignment()); }
+ void setAddress(Address address) {
+ assert(isSimple());
+ V = address.getPointer();
+ Alignment = address.getAlignment().getQuantity();
}
// vector elt lvalue
- llvm::Value *getVectorAddr() const { assert(isVectorElt()); return V; }
+ Address getVectorAddress() const {
+ return Address(getVectorPointer(), getAlignment());
+ }
+ llvm::Value *getVectorPointer() const { assert(isVectorElt()); return V; }
llvm::Value *getVectorIdx() const { assert(isVectorElt()); return VectorIdx; }
// extended vector elements.
- llvm::Value *getExtVectorAddr() const { assert(isExtVectorElt()); return V; }
+ Address getExtVectorAddress() const {
+ return Address(getExtVectorPointer(), getAlignment());
+ }
+ llvm::Value *getExtVectorPointer() const {
+ assert(isExtVectorElt());
+ return V;
+ }
llvm::Constant *getExtVectorElts() const {
assert(isExtVectorElt());
return VectorElts;
}
// bitfield lvalue
- llvm::Value *getBitFieldAddr() const {
- assert(isBitField());
- return V;
+ Address getBitFieldAddress() const {
+ return Address(getBitFieldPointer(), getAlignment());
}
+ llvm::Value *getBitFieldPointer() const { assert(isBitField()); return V; }
const CGBitFieldInfo &getBitFieldInfo() const {
assert(isBitField());
return *BitFieldInfo;
@@ -292,37 +361,40 @@ public:
// global register lvalue
llvm::Value *getGlobalReg() const { assert(isGlobalReg()); return V; }
- static LValue MakeAddr(llvm::Value *address, QualType type,
- CharUnits alignment, ASTContext &Context,
+ static LValue MakeAddr(Address address, QualType type,
+ ASTContext &Context,
+ AlignmentSource alignSource,
llvm::MDNode *TBAAInfo = nullptr) {
Qualifiers qs = type.getQualifiers();
qs.setObjCGCAttr(Context.getObjCGCAttrKind(type));
LValue R;
R.LVType = Simple;
- assert(address->getType()->isPointerTy());
- R.V = address;
- R.Initialize(type, qs, alignment, TBAAInfo);
+ assert(address.getPointer()->getType()->isPointerTy());
+ R.V = address.getPointer();
+ R.Initialize(type, qs, address.getAlignment(), alignSource, TBAAInfo);
return R;
}
- static LValue MakeVectorElt(llvm::Value *Vec, llvm::Value *Idx,
- QualType type, CharUnits Alignment) {
+ static LValue MakeVectorElt(Address vecAddress, llvm::Value *Idx,
+ QualType type, AlignmentSource alignSource) {
LValue R;
R.LVType = VectorElt;
- R.V = Vec;
+ R.V = vecAddress.getPointer();
R.VectorIdx = Idx;
- R.Initialize(type, type.getQualifiers(), Alignment);
+ R.Initialize(type, type.getQualifiers(), vecAddress.getAlignment(),
+ alignSource);
return R;
}
- static LValue MakeExtVectorElt(llvm::Value *Vec, llvm::Constant *Elts,
- QualType type, CharUnits Alignment) {
+ static LValue MakeExtVectorElt(Address vecAddress, llvm::Constant *Elts,
+ QualType type, AlignmentSource alignSource) {
LValue R;
R.LVType = ExtVectorElt;
- R.V = Vec;
+ R.V = vecAddress.getPointer();
R.VectorElts = Elts;
- R.Initialize(type, type.getQualifiers(), Alignment);
+ R.Initialize(type, type.getQualifiers(), vecAddress.getAlignment(),
+ alignSource);
return R;
}
@@ -332,29 +404,28 @@ public:
/// bit-field refers to.
/// \param Info - The information describing how to perform the bit-field
/// access.
- static LValue MakeBitfield(llvm::Value *Addr,
+ static LValue MakeBitfield(Address Addr,
const CGBitFieldInfo &Info,
- QualType type, CharUnits Alignment) {
+ QualType type,
+ AlignmentSource alignSource) {
LValue R;
R.LVType = BitField;
- R.V = Addr;
+ R.V = Addr.getPointer();
R.BitFieldInfo = &Info;
- R.Initialize(type, type.getQualifiers(), Alignment);
+ R.Initialize(type, type.getQualifiers(), Addr.getAlignment(), alignSource);
return R;
}
- static LValue MakeGlobalReg(llvm::Value *Reg,
- QualType type,
- CharUnits Alignment) {
+ static LValue MakeGlobalReg(Address Reg, QualType type) {
LValue R;
R.LVType = GlobalReg;
- R.V = Reg;
- R.Initialize(type, type.getQualifiers(), Alignment);
+ R.V = Reg.getPointer();
+ R.Initialize(type, type.getQualifiers(), Reg.getAlignment(),
+ AlignmentSource::Decl);
return R;
}
RValue asAggregateRValue() const {
- // FIMXE: Alignment
return RValue::getAggregate(getAddress(), isVolatileQualified());
}
};
@@ -407,7 +478,7 @@ public:
/// ignored - Returns an aggregate value slot indicating that the
/// aggregate value is being ignored.
static AggValueSlot ignored() {
- return forAddr(nullptr, CharUnits(), Qualifiers(), IsNotDestructed,
+ return forAddr(Address::invalid(), Qualifiers(), IsNotDestructed,
DoesNotNeedGCBarriers, IsNotAliased);
}
@@ -421,15 +492,20 @@ public:
/// for calling destructors on this object
/// \param needsGC - true if the slot is potentially located
/// somewhere that ObjC GC calls should be emitted for
- static AggValueSlot forAddr(llvm::Value *addr, CharUnits align,
+ static AggValueSlot forAddr(Address addr,
Qualifiers quals,
IsDestructed_t isDestructed,
NeedsGCBarriers_t needsGC,
IsAliased_t isAliased,
IsZeroed_t isZeroed = IsNotZeroed) {
AggValueSlot AV;
- AV.Addr = addr;
- AV.Alignment = align.getQuantity();
+ if (addr.isValid()) {
+ AV.Addr = addr.getPointer();
+ AV.Alignment = addr.getAlignment().getQuantity();
+ } else {
+ AV.Addr = nullptr;
+ AV.Alignment = 0;
+ }
AV.Quals = quals;
AV.DestructedFlag = isDestructed;
AV.ObjCGCFlag = needsGC;
@@ -443,7 +519,7 @@ public:
NeedsGCBarriers_t needsGC,
IsAliased_t isAliased,
IsZeroed_t isZeroed = IsNotZeroed) {
- return forAddr(LV.getAddress(), LV.getAlignment(),
+ return forAddr(LV.getAddress(),
LV.getQuals(), isDestructed, needsGC, isAliased, isZeroed);
}
@@ -471,11 +547,15 @@ public:
NeedsGCBarriers_t requiresGCollection() const {
return NeedsGCBarriers_t(ObjCGCFlag);
}
-
- llvm::Value *getAddr() const {
+
+ llvm::Value *getPointer() const {
return Addr;
}
+ Address getAddress() const {
+ return Address(Addr, getAlignment());
+ }
+
bool isIgnored() const {
return Addr == nullptr;
}
@@ -488,9 +568,12 @@ public:
return IsAliased_t(AliasedFlag);
}
- // FIXME: Alignment?
RValue asRValue() const {
- return RValue::getAggregate(getAddr(), isVolatile());
+ if (isIgnored()) {
+ return RValue::getIgnored();
+ } else {
+ return RValue::getAggregate(getAddress(), isVolatile());
+ }
}
void setZeroed(bool V = true) { ZeroedFlag = V; }
diff --git a/clang/lib/CodeGen/CodeGenFunction.cpp b/clang/lib/CodeGen/CodeGenFunction.cpp
index ea387d3f080..39bd2bc28e1 100644
--- a/clang/lib/CodeGen/CodeGenFunction.cpp
+++ b/clang/lib/CodeGen/CodeGenFunction.cpp
@@ -12,6 +12,7 @@
//===----------------------------------------------------------------------===//
#include "CodeGenFunction.h"
+#include "CGBlocks.h"
#include "CGCleanup.h"
#include "CGCUDARuntime.h"
#include "CGCXXABI.h"
@@ -37,12 +38,14 @@ using namespace CodeGen;
CodeGenFunction::CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext)
: CodeGenTypeCache(cgm), CGM(cgm), Target(cgm.getTarget()),
- Builder(cgm.getModule().getContext(), llvm::ConstantFolder(),
+ Builder(cgm, cgm.getModule().getContext(), llvm::ConstantFolder(),
CGBuilderInserterTy(this)),
- CurFn(nullptr), CapturedStmtInfo(nullptr),
+ CurFn(nullptr), ReturnValue(Address::invalid()),
+ CapturedStmtInfo(nullptr),
SanOpts(CGM.getLangOpts().Sanitize), IsSanitizerScope(false),
CurFuncIsThunk(false), AutoreleaseResult(false), SawAsmBlock(false),
- IsOutlinedSEHHelper(false), BlockInfo(nullptr), BlockPointer(nullptr),
+ IsOutlinedSEHHelper(false),
+ BlockInfo(nullptr), BlockPointer(nullptr),
LambdaThisCaptureField(nullptr), NormalCleanupDest(nullptr),
NextCleanupDestIndex(1), FirstBlockInfo(nullptr), EHResumeBlock(nullptr),
ExceptionSlot(nullptr), EHSelectorSlot(nullptr),
@@ -52,7 +55,7 @@ CodeGenFunction::CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext)
CaseRangeBlock(nullptr), UnreachableBlock(nullptr), NumReturnExprs(0),
NumSimpleReturnExprs(0), CXXABIThisDecl(nullptr),
CXXABIThisValue(nullptr), CXXThisValue(nullptr),
- CXXDefaultInitExprThis(nullptr), CXXStructorImplicitParamDecl(nullptr),
+ CXXStructorImplicitParamDecl(nullptr),
CXXStructorImplicitParamValue(nullptr), OutermostConditional(nullptr),
CurLexicalScope(nullptr), TerminateLandingPad(nullptr),
TerminateHandler(nullptr), TrapBB(nullptr) {
@@ -92,18 +95,69 @@ CodeGenFunction::~CodeGenFunction() {
}
}
-LValue CodeGenFunction::MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T) {
+CharUnits CodeGenFunction::getNaturalPointeeTypeAlignment(QualType T,
+ AlignmentSource *Source) {
+ return getNaturalTypeAlignment(T->getPointeeType(), Source,
+ /*forPointee*/ true);
+}
+
+CharUnits CodeGenFunction::getNaturalTypeAlignment(QualType T,
+ AlignmentSource *Source,
+ bool forPointeeType) {
+ // Honor alignment typedef attributes even on incomplete types.
+ // We also honor them straight for C++ class types, even as pointees;
+ // there's an expressivity gap here.
+ if (auto TT = T->getAs<TypedefType>()) {
+ if (auto Align = TT->getDecl()->getMaxAlignment()) {
+ if (Source) *Source = AlignmentSource::AttributedType;
+ return getContext().toCharUnitsFromBits(Align);
+ }
+ }
+
+ if (Source) *Source = AlignmentSource::Type;
+
CharUnits Alignment;
- if (CGM.getCXXABI().isTypeInfoCalculable(T)) {
- Alignment = getContext().getTypeAlignInChars(T);
- unsigned MaxAlign = getContext().getLangOpts().MaxTypeAlign;
- if (MaxAlign && Alignment.getQuantity() > MaxAlign &&
- !getContext().isAlignmentRequired(T))
- Alignment = CharUnits::fromQuantity(MaxAlign);
+ if (!CGM.getCXXABI().isTypeInfoCalculable(T)) {
+ Alignment = CharUnits::One(); // Shouldn't be used, but pessimistic is best.
+ } else {
+ // For C++ class pointees, we don't know whether we're pointing at a
+ // base or a complete object, so we generally need to use the
+ // non-virtual alignment.
+ const CXXRecordDecl *RD;
+ if (forPointeeType && (RD = T->getAsCXXRecordDecl())) {
+ Alignment = CGM.getClassPointerAlignment(RD);
+ } else {
+ Alignment = getContext().getTypeAlignInChars(T);
+ }
+
+ // Cap to the global maximum type alignment unless the alignment
+ // was somehow explicit on the type.
+ if (unsigned MaxAlign = getLangOpts().MaxTypeAlign) {
+ if (Alignment.getQuantity() > MaxAlign &&
+ !getContext().isAlignmentRequired(T))
+ Alignment = CharUnits::fromQuantity(MaxAlign);
+ }
}
- return LValue::MakeAddr(V, T, Alignment, getContext(), CGM.getTBAAInfo(T));
+ return Alignment;
+}
+
+LValue CodeGenFunction::MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T) {
+ AlignmentSource AlignSource;
+ CharUnits Alignment = getNaturalTypeAlignment(T, &AlignSource);
+ return LValue::MakeAddr(Address(V, Alignment), T, getContext(), AlignSource,
+ CGM.getTBAAInfo(T));
+}
+
+/// Given a value of type T* that may not be to a complete object,
+/// construct an l-value with the natural pointee alignment of T.
+LValue
+CodeGenFunction::MakeNaturalAlignPointeeAddrLValue(llvm::Value *V, QualType T) {
+ AlignmentSource AlignSource;
+ CharUnits Align = getNaturalTypeAlignment(T, &AlignSource, /*pointee*/ true);
+ return MakeAddrLValue(Address(V, Align), T, AlignSource);
}
+
llvm::Type *CodeGenFunction::ConvertTypeForMem(QualType T) {
return CGM.getTypes().ConvertTypeForMem(T);
}
@@ -296,7 +350,7 @@ void CodeGenFunction::FinishFunction(SourceLocation EndLoc) {
EscapeArgs[Pair.second] = Pair.first;
llvm::Function *FrameEscapeFn = llvm::Intrinsic::getDeclaration(
&CGM.getModule(), llvm::Intrinsic::localescape);
- CGBuilderTy(AllocaInsertPt).CreateCall(FrameEscapeFn, EscapeArgs);
+ CGBuilderTy(*this, AllocaInsertPt).CreateCall(FrameEscapeFn, EscapeArgs);
}
// Remove the AllocaInsertPt instruction, which is just a convenience for us.
@@ -697,7 +751,7 @@ void CodeGenFunction::StartFunction(GlobalDecl GD,
if (RetTy->isVoidType()) {
// Void type; nothing to return.
- ReturnValue = nullptr;
+ ReturnValue = Address::invalid();
// Count the implicit return.
if (!endsWithReturn(D))
@@ -709,7 +763,7 @@ void CodeGenFunction::StartFunction(GlobalDecl GD,
auto AI = CurFn->arg_begin();
if (CurFnInfo->getReturnInfo().isSRetAfterThis())
++AI;
- ReturnValue = AI;
+ ReturnValue = Address(AI, CurFnInfo->getReturnInfo().getIndirectAlign());
} else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::InAlloca &&
!hasScalarEvaluationKind(CurFnInfo->getReturnType())) {
// Load the sret pointer from the argument struct and return into that.
@@ -717,7 +771,8 @@ void CodeGenFunction::StartFunction(GlobalDecl GD,
llvm::Function::arg_iterator EI = CurFn->arg_end();
--EI;
llvm::Value *Addr = Builder.CreateStructGEP(nullptr, EI, Idx);
- ReturnValue = Builder.CreateLoad(Addr, "agg.result");
+ Addr = Builder.CreateAlignedLoad(Addr, getPointerAlign(), "agg.result");
+ ReturnValue = Address(Addr, getNaturalTypeAlignment(RetTy));
} else {
ReturnValue = CreateIRTemp(RetTy, "retval");
@@ -1249,20 +1304,18 @@ void CodeGenFunction::ErrorUnsupported(const Stmt *S, const char *Type) {
/// base element of the array
/// \param sizeInChars - the total size of the VLA, in chars
static void emitNonZeroVLAInit(CodeGenFunction &CGF, QualType baseType,
- llvm::Value *dest, llvm::Value *src,
+ Address dest, Address src,
llvm::Value *sizeInChars) {
- std::pair<CharUnits,CharUnits> baseSizeAndAlign
- = CGF.getContext().getTypeInfoInChars(baseType);
-
CGBuilderTy &Builder = CGF.Builder;
+ CharUnits baseSize = CGF.getContext().getTypeSizeInChars(baseType);
llvm::Value *baseSizeInChars
- = llvm::ConstantInt::get(CGF.IntPtrTy, baseSizeAndAlign.first.getQuantity());
+ = llvm::ConstantInt::get(CGF.IntPtrTy, baseSize.getQuantity());
- llvm::Type *i8p = Builder.getInt8PtrTy();
-
- llvm::Value *begin = Builder.CreateBitCast(dest, i8p, "vla.begin");
- llvm::Value *end = Builder.CreateInBoundsGEP(dest, sizeInChars, "vla.end");
+ Address begin =
+ Builder.CreateElementBitCast(dest, CGF.Int8Ty, "vla.begin");
+ llvm::Value *end =
+ Builder.CreateInBoundsGEP(begin.getPointer(), sizeInChars, "vla.end");
llvm::BasicBlock *originBB = CGF.Builder.GetInsertBlock();
llvm::BasicBlock *loopBB = CGF.createBasicBlock("vla-init.loop");
@@ -1272,17 +1325,19 @@ static void emitNonZeroVLAInit(CodeGenFunction &CGF, QualType baseType,
// count must be nonzero.
CGF.EmitBlock(loopBB);
- llvm::PHINode *cur = Builder.CreatePHI(i8p, 2, "vla.cur");
- cur->addIncoming(begin, originBB);
+ llvm::PHINode *cur = Builder.CreatePHI(begin.getType(), 2, "vla.cur");
+ cur->addIncoming(begin.getPointer(), originBB);
+
+ CharUnits curAlign =
+ dest.getAlignment().alignmentOfArrayElement(baseSize);
// memcpy the individual element bit-pattern.
- Builder.CreateMemCpy(cur, src, baseSizeInChars,
- baseSizeAndAlign.second.getQuantity(),
+ Builder.CreateMemCpy(Address(cur, curAlign), src, baseSizeInChars,
/*volatile*/ false);
// Go to the next element.
- llvm::Value *next = Builder.CreateConstInBoundsGEP1_32(Builder.getInt8Ty(),
- cur, 1, "vla.next");
+ llvm::Value *next =
+ Builder.CreateInBoundsGEP(CGF.Int8Ty, cur, baseSizeInChars, "vla.next");
// Leave if that's the end of the VLA.
llvm::Value *done = Builder.CreateICmpEQ(next, end, "vla-init.isdone");
@@ -1293,7 +1348,7 @@ static void emitNonZeroVLAInit(CodeGenFunction &CGF, QualType baseType,
}
void
-CodeGenFunction::EmitNullInitialization(llvm::Value *DestPtr, QualType Ty) {
+CodeGenFunction::EmitNullInitialization(Address DestPtr, QualType Ty) {
// Ignore empty classes in C++.
if (getLangOpts().CPlusPlus) {
if (const RecordType *RT = Ty->getAs<RecordType>()) {
@@ -1303,23 +1358,17 @@ CodeGenFunction::EmitNullInitialization(llvm::Value *DestPtr, QualType Ty) {
}
// Cast the dest ptr to the appropriate i8 pointer type.
- unsigned DestAS =
- cast<llvm::PointerType>(DestPtr->getType())->getAddressSpace();
- llvm::Type *BP = Builder.getInt8PtrTy(DestAS);
- if (DestPtr->getType() != BP)
- DestPtr = Builder.CreateBitCast(DestPtr, BP);
+ if (DestPtr.getElementType() != Int8Ty)
+ DestPtr = Builder.CreateElementBitCast(DestPtr, Int8Ty);
// Get size and alignment info for this aggregate.
- std::pair<CharUnits, CharUnits> TypeInfo =
- getContext().getTypeInfoInChars(Ty);
- CharUnits Size = TypeInfo.first;
- CharUnits Align = TypeInfo.second;
+ CharUnits size = getContext().getTypeSizeInChars(Ty);
llvm::Value *SizeVal;
const VariableArrayType *vla;
// Don't bother emitting a zero-byte memset.
- if (Size.isZero()) {
+ if (size.isZero()) {
// But note that getTypeInfo returns 0 for a VLA.
if (const VariableArrayType *vlaType =
dyn_cast_or_null<VariableArrayType>(
@@ -1337,7 +1386,7 @@ CodeGenFunction::EmitNullInitialization(llvm::Value *DestPtr, QualType Ty) {
return;
}
} else {
- SizeVal = CGM.getSize(Size);
+ SizeVal = CGM.getSize(size);
vla = nullptr;
}
@@ -1356,21 +1405,22 @@ CodeGenFunction::EmitNullInitialization(llvm::Value *DestPtr, QualType Ty) {
/*isConstant=*/true,
llvm::GlobalVariable::PrivateLinkage,
NullConstant, Twine());
- llvm::Value *SrcPtr =
- Builder.CreateBitCast(NullVariable, Builder.getInt8PtrTy());
+ CharUnits NullAlign = DestPtr.getAlignment();
+ NullVariable->setAlignment(NullAlign.getQuantity());
+ Address SrcPtr(Builder.CreateBitCast(NullVariable, Builder.getInt8PtrTy()),
+ NullAlign);
if (vla) return emitNonZeroVLAInit(*this, Ty, DestPtr, SrcPtr, SizeVal);
// Get and call the appropriate llvm.memcpy overload.
- Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, Align.getQuantity(), false);
+ Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, false);
return;
}
// Otherwise, just memset the whole thing to zero. This is legal
// because in LLVM, all default initializers (other than the ones we just
// handled above) are guaranteed to have a bit pattern of all zeros.
- Builder.CreateMemSet(DestPtr, Builder.getInt8(0), SizeVal,
- Align.getQuantity(), false);
+ Builder.CreateMemSet(DestPtr, Builder.getInt8(0), SizeVal, false);
}
llvm::BlockAddress *CodeGenFunction::GetAddrOfLabel(const LabelDecl *L) {
@@ -1389,7 +1439,7 @@ llvm::BasicBlock *CodeGenFunction::GetIndirectGotoBlock() {
// If we already made the indirect branch for indirect goto, return its block.
if (IndirectBranch) return IndirectBranch->getParent();
- CGBuilderTy TmpBuilder(createBasicBlock("indirectgoto"));
+ CGBuilderTy TmpBuilder(*this, createBasicBlock("indirectgoto"));
// Create the PHI node that indirect gotos will add entries to.
llvm::Value *DestVal = TmpBuilder.CreatePHI(Int8PtrTy, 0,
@@ -1404,7 +1454,7 @@ llvm::BasicBlock *CodeGenFunction::GetIndirectGotoBlock() {
/// element type and a properly-typed first element pointer.
llvm::Value *CodeGenFunction::emitArrayLength(const ArrayType *origArrayType,
QualType &baseType,
- llvm::Value *&addr) {
+ Address &addr) {
const ArrayType *arrayType = origArrayType;
// If it's a VLA, we have to load the stored size. Note that
@@ -1443,8 +1493,7 @@ llvm::Value *CodeGenFunction::emitArrayLength(const ArrayType *origArrayType,
QualType eltType;
llvm::ArrayType *llvmArrayType =
- dyn_cast<llvm::ArrayType>(
- cast<llvm::PointerType>(addr->getType())->getElementType());
+ dyn_cast<llvm::ArrayType>(addr.getElementType());
while (llvmArrayType) {
assert(isa<ConstantArrayType>(arrayType));
assert(cast<ConstantArrayType>(arrayType)->getSize().getZExtValue()
@@ -1472,12 +1521,13 @@ llvm::Value *CodeGenFunction::emitArrayLength(const ArrayType *origArrayType,
arrayType = getContext().getAsArrayType(eltType);
}
- unsigned AddressSpace = addr->getType()->getPointerAddressSpace();
- llvm::Type *BaseType = ConvertType(eltType)->getPointerTo(AddressSpace);
- addr = Builder.CreateBitCast(addr, BaseType, "array.begin");
+ llvm::Type *baseType = ConvertType(eltType);
+ addr = Builder.CreateElementBitCast(addr, baseType, "array.begin");
} else {
// Create the actual GEP.
- addr = Builder.CreateInBoundsGEP(addr, gepIndices, "array.begin");
+ addr = Address(Builder.CreateInBoundsGEP(addr.getPointer(),
+ gepIndices, "array.begin"),
+ addr.getAlignment());
}
baseType = eltType;
@@ -1662,9 +1712,9 @@ void CodeGenFunction::EmitVariablyModifiedType(QualType type) {
} while (type->isVariablyModifiedType());
}
-llvm::Value* CodeGenFunction::EmitVAListRef(const Expr* E) {
+Address CodeGenFunction::EmitVAListRef(const Expr* E) {
if (getContext().getBuiltinVaListType()->isArrayType())
- return EmitScalarExpr(E);
+ return EmitPointerWithAlignment(E);
return EmitLValue(E).getAddress();
}
@@ -1726,9 +1776,10 @@ void CodeGenFunction::EmitVarAnnotations(const VarDecl *D, llvm::Value *V) {
I->getAnnotation(), D->getLocation());
}
-llvm::Value *CodeGenFunction::EmitFieldAnnotations(const FieldDecl *D,
- llvm::Value *V) {
+Address CodeGenFunction::EmitFieldAnnotations(const FieldDecl *D,
+ Address Addr) {
assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute");
+ llvm::Value *V = Addr.getPointer();
llvm::Type *VTy = V->getType();
llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::ptr_annotation,
CGM.Int8PtrTy);
@@ -1743,7 +1794,7 @@ llvm::Value *CodeGenFunction::EmitFieldAnnotations(const FieldDecl *D,
V = Builder.CreateBitCast(V, VTy);
}
- return V;
+ return Address(V, Addr.getAlignment());
}
CodeGenFunction::CGCapturedStmtInfo::~CGCapturedStmtInfo() { }
diff --git a/clang/lib/CodeGen/CodeGenFunction.h b/clang/lib/CodeGen/CodeGenFunction.h
index 22f6f6c5a56..86154e923eb 100644
--- a/clang/lib/CodeGen/CodeGenFunction.h
+++ b/clang/lib/CodeGen/CodeGenFunction.h
@@ -81,6 +81,8 @@ class CGFunctionInfo;
class CGRecordLayout;
class CGBlockInfo;
class CGCXXABI;
+class BlockByrefHelpers;
+class BlockByrefInfo;
class BlockFlags;
class BlockFieldFlags;
@@ -160,9 +162,9 @@ public:
/// ReturnBlock - Unified return block.
JumpDest ReturnBlock;
- /// ReturnValue - The temporary alloca to hold the return value. This is null
- /// iff the function has no return value.
- llvm::Value *ReturnValue;
+ /// ReturnValue - The temporary alloca to hold the return
+ /// value. This is invalid iff the function has no return value.
+ Address ReturnValue;
/// AllocaInsertPoint - This is an instruction in the entry block before which
/// we prefer to insert allocas.
@@ -328,7 +330,7 @@ public:
/// A stack of exception code slots. Entering an __except block pushes a slot
/// on the stack and leaving pops one. The __exception_code() intrinsic loads
/// a value from the top of the stack.
- SmallVector<llvm::Value *, 1> SEHCodeSlotStack;
+ SmallVector<Address, 1> SEHCodeSlotStack;
/// Value returned by __exception_info intrinsic.
llvm::Value *SEHInfo = nullptr;
@@ -420,13 +422,12 @@ public:
/// complete-object destructor of an object of the given type at the
/// given address. Does nothing if T is not a C++ class type with a
/// non-trivial destructor.
- void PushDestructorCleanup(QualType T, llvm::Value *Addr);
+ void PushDestructorCleanup(QualType T, Address Addr);
/// PushDestructorCleanup - Push a cleanup to call the
/// complete-object variant of the given destructor on the object at
/// the given address.
- void PushDestructorCleanup(const CXXDestructorDecl *Dtor,
- llvm::Value *Addr);
+ void PushDestructorCleanup(const CXXDestructorDecl *Dtor, Address Addr);
/// PopCleanupBlock - Will pop the cleanup entry on the stack and
/// process all branch fixups.
@@ -556,13 +557,14 @@ public:
void rescopeLabels();
};
+ typedef llvm::DenseMap<const Decl *, Address> DeclMapTy;
+
/// \brief The scope used to remap some variables as private in the OpenMP
/// loop body (or other captured region emitted without outlining), and to
/// restore old vars back on exit.
class OMPPrivateScope : public RunCleanupsScope {
- typedef llvm::DenseMap<const VarDecl *, llvm::Value *> VarDeclMapTy;
- VarDeclMapTy SavedLocals;
- VarDeclMapTy SavedPrivates;
+ DeclMapTy SavedLocals;
+ DeclMapTy SavedPrivates;
private:
OMPPrivateScope(const OMPPrivateScope &) = delete;
@@ -579,21 +581,30 @@ public:
/// been privatized already.
bool
addPrivate(const VarDecl *LocalVD,
- const std::function<llvm::Value *()> &PrivateGen) {
+ llvm::function_ref<Address()> PrivateGen) {
assert(PerformCleanup && "adding private to dead scope");
- if (SavedLocals.count(LocalVD) > 0) return false;
- SavedLocals[LocalVD] = CGF.LocalDeclMap.lookup(LocalVD);
- CGF.LocalDeclMap.erase(LocalVD);
- auto *V = PrivateGen();
+
+ // Only save it once.
+ if (SavedLocals.count(LocalVD)) return false;
+
+ // Copy the existing local entry to SavedLocals.
+ auto it = CGF.LocalDeclMap.find(LocalVD);
+ if (it != CGF.LocalDeclMap.end()) {
+ SavedLocals.insert({LocalVD, it->second});
+ } else {
+ SavedLocals.insert({LocalVD, Address::invalid()});
+ }
+
+ // Generate the private entry.
+ Address Addr = PrivateGen();
QualType VarTy = LocalVD->getType();
if (VarTy->isReferenceType()) {
- auto *TempAlloca = CGF.CreateMemTemp(VarTy);
- LValue RefLVal = CGF.MakeNaturalAlignAddrLValue(TempAlloca, VarTy);
- CGF.EmitStoreOfScalar(V, RefLVal);
- V = TempAlloca;
+ Address Temp = CGF.CreateMemTemp(VarTy);
+ CGF.Builder.CreateStore(Addr.getPointer(), Temp);
+ Addr = Temp;
}
- SavedPrivates[LocalVD] = V;
- CGF.LocalDeclMap[LocalVD] = SavedLocals[LocalVD];
+ SavedPrivates.insert({LocalVD, Addr});
+
return true;
}
@@ -606,19 +617,14 @@ public:
/// private copies.
/// \return true if at least one variable was privatized, false otherwise.
bool Privatize() {
- for (auto VDPair : SavedPrivates) {
- CGF.LocalDeclMap[VDPair.first] = VDPair.second;
- }
+ copyInto(SavedPrivates, CGF.LocalDeclMap);
SavedPrivates.clear();
return !SavedLocals.empty();
}
void ForceCleanup() {
RunCleanupsScope::ForceCleanup();
- // Remap vars back to the original values.
- for (auto I : SavedLocals) {
- CGF.LocalDeclMap[I.first] = I.second;
- }
+ copyInto(SavedLocals, CGF.LocalDeclMap);
SavedLocals.clear();
}
@@ -627,6 +633,25 @@ public:
if (PerformCleanup)
ForceCleanup();
}
+
+ private:
+ /// Copy all the entries in the source map over the corresponding
+ /// entries in the destination, which must exist.
+ static void copyInto(const DeclMapTy &src, DeclMapTy &dest) {
+ for (auto &pair : src) {
+ if (!pair.second.isValid()) {
+ dest.erase(pair.first);
+ continue;
+ }
+
+ auto it = dest.find(pair.first);
+ if (it != dest.end()) {
+ it->second = pair.second;
+ } else {
+ dest.insert(pair);
+ }
+ }
+ }
};
/// \brief Takes the old cleanup stack size and emits the cleanup blocks
@@ -707,10 +732,11 @@ public:
/// one branch or the other of a conditional expression.
bool isInConditionalBranch() const { return OutermostConditional != nullptr; }
- void setBeforeOutermostConditional(llvm::Value *value, llvm::Value *addr) {
+ void setBeforeOutermostConditional(llvm::Value *value, Address addr) {
assert(isInConditionalBranch());
llvm::BasicBlock *block = OutermostConditional->getStartingBlock();
- new llvm::StoreInst(value, addr, &block->back());
+ auto store = new llvm::StoreInst(value, addr.getPointer(), &block->back());
+ store->setAlignment(addr.getAlignment().getQuantity());
}
/// An RAII object to record that we're evaluating a statement
@@ -869,15 +895,6 @@ public:
}
};
- /// getByrefValueFieldNumber - Given a declaration, returns the LLVM field
- /// number that holds the value.
- std::pair<llvm::Type *, unsigned>
- getByRefValueLLVMField(const ValueDecl *VD) const;
-
- /// BuildBlockByrefAddress - Computes address location of the
- /// variable which is declared as __block.
- llvm::Value *BuildBlockByrefAddress(llvm::Value *BaseAddr,
- const VarDecl *V);
private:
CGDebugInfo *DebugInfo;
bool DisableDebugInfo;
@@ -894,7 +911,6 @@ private:
/// LocalDeclMap - This keeps track of the LLVM allocas or globals for local C
/// decls.
- typedef llvm::DenseMap<const Decl*, llvm::Value*> DeclMapTy;
DeclMapTy LocalDeclMap;
/// Track escaped local variables with auto storage. Used during SEH
@@ -994,7 +1010,7 @@ public:
/// if we need to evaluate a CXXDefaultInitExpr within the evaluation.
class FieldConstructionScope {
public:
- FieldConstructionScope(CodeGenFunction &CGF, llvm::Value *This)
+ FieldConstructionScope(CodeGenFunction &CGF, Address This)
: CGF(CGF), OldCXXDefaultInitExprThis(CGF.CXXDefaultInitExprThis) {
CGF.CXXDefaultInitExprThis = This;
}
@@ -1004,7 +1020,7 @@ public:
private:
CodeGenFunction &CGF;
- llvm::Value *OldCXXDefaultInitExprThis;
+ Address OldCXXDefaultInitExprThis;
};
/// The scope of a CXXDefaultInitExpr. Within this scope, the value of 'this'
@@ -1012,16 +1028,20 @@ public:
class CXXDefaultInitExprScope {
public:
CXXDefaultInitExprScope(CodeGenFunction &CGF)
- : CGF(CGF), OldCXXThisValue(CGF.CXXThisValue) {
- CGF.CXXThisValue = CGF.CXXDefaultInitExprThis;
+ : CGF(CGF), OldCXXThisValue(CGF.CXXThisValue),
+ OldCXXThisAlignment(CGF.CXXThisAlignment) {
+ CGF.CXXThisValue = CGF.CXXDefaultInitExprThis.getPointer();
+ CGF.CXXThisAlignment = CGF.CXXDefaultInitExprThis.getAlignment();
}
~CXXDefaultInitExprScope() {
CGF.CXXThisValue = OldCXXThisValue;
+ CGF.CXXThisAlignment = OldCXXThisAlignment;
}
public:
CodeGenFunction &CGF;
llvm::Value *OldCXXThisValue;
+ CharUnits OldCXXThisAlignment;
};
private:
@@ -1030,10 +1050,12 @@ private:
ImplicitParamDecl *CXXABIThisDecl;
llvm::Value *CXXABIThisValue;
llvm::Value *CXXThisValue;
+ CharUnits CXXABIThisAlignment;
+ CharUnits CXXThisAlignment;
/// The value of 'this' to use when evaluating CXXDefaultInitExprs within
/// this expression.
- llvm::Value *CXXDefaultInitExprThis;
+ Address CXXDefaultInitExprThis = Address::invalid();
/// CXXStructorImplicitParamDecl - When generating code for a constructor or
/// destructor, this will hold the implicit argument (e.g. VTT).
@@ -1052,10 +1074,9 @@ private:
/// handling code.
SourceLocation CurEHLocation;
- /// ByrefValueInfoMap - For each __block variable, contains a pair of the LLVM
- /// type as well as the field number that contains the actual data.
- llvm::DenseMap<const ValueDecl *, std::pair<llvm::Type *,
- unsigned> > ByRefValueInfo;
+ /// BlockByrefInfos - For each __block variable, contains
+ /// information about the layout of the variable.
+ llvm::DenseMap<const ValueDecl *, BlockByrefInfo> BlockByrefInfos;
llvm::BasicBlock *TerminateLandingPad;
llvm::BasicBlock *TerminateHandler;
@@ -1096,15 +1117,15 @@ public:
/// Returns a pointer to the function's exception object and selector slot,
/// which is assigned in every landing pad.
- llvm::Value *getExceptionSlot();
- llvm::Value *getEHSelectorSlot();
+ Address getExceptionSlot();
+ Address getEHSelectorSlot();
/// Returns the contents of the function's exception object and selector
/// slots.
llvm::Value *getExceptionFromSlot();
llvm::Value *getSelectorFromSlot();
- llvm::Value *getNormalCleanupDestSlot();
+ Address getNormalCleanupDestSlot();
llvm::BasicBlock *getUnreachableBlock() {
if (!UnreachableBlock) {
@@ -1131,38 +1152,41 @@ public:
// Cleanups
//===--------------------------------------------------------------------===//
- typedef void Destroyer(CodeGenFunction &CGF, llvm::Value *addr, QualType ty);
+ typedef void Destroyer(CodeGenFunction &CGF, Address addr, QualType ty);
void pushIrregularPartialArrayCleanup(llvm::Value *arrayBegin,
- llvm::Value *arrayEndPointer,
+ Address arrayEndPointer,
QualType elementType,
+ CharUnits elementAlignment,
Destroyer *destroyer);
void pushRegularPartialArrayCleanup(llvm::Value *arrayBegin,
llvm::Value *arrayEnd,
QualType elementType,
+ CharUnits elementAlignment,
Destroyer *destroyer);
void pushDestroy(QualType::DestructionKind dtorKind,
- llvm::Value *addr, QualType type);
+ Address addr, QualType type);
void pushEHDestroy(QualType::DestructionKind dtorKind,
- llvm::Value *addr, QualType type);
- void pushDestroy(CleanupKind kind, llvm::Value *addr, QualType type,
+ Address addr, QualType type);
+ void pushDestroy(CleanupKind kind, Address addr, QualType type,
Destroyer *destroyer, bool useEHCleanupForArray);
- void pushLifetimeExtendedDestroy(CleanupKind kind, llvm::Value *addr,
+ void pushLifetimeExtendedDestroy(CleanupKind kind, Address addr,
QualType type, Destroyer *destroyer,
bool useEHCleanupForArray);
void pushCallObjectDeleteCleanup(const FunctionDecl *OperatorDelete,
llvm::Value *CompletePtr,
QualType ElementType);
- void pushStackRestore(CleanupKind kind, llvm::Value *SPMem);
- void emitDestroy(llvm::Value *addr, QualType type, Destroyer *destroyer,
+ void pushStackRestore(CleanupKind kind, Address SPMem);
+ void emitDestroy(Address addr, QualType type, Destroyer *destroyer,
bool useEHCleanupForArray);
- llvm::Function *generateDestroyHelper(llvm::Constant *addr, QualType type,
+ llvm::Function *generateDestroyHelper(Address addr, QualType type,
Destroyer *destroyer,
bool useEHCleanupForArray,
const VarDecl *VD);
void emitArrayDestroy(llvm::Value *begin, llvm::Value *end,
- QualType type, Destroyer *destroyer,
+ QualType elementType, CharUnits elementAlign,
+ Destroyer *destroyer,
bool checkZeroLength, bool useEHCleanup);
Destroyer *getDestroyer(QualType::DestructionKind destructionKind);
@@ -1248,15 +1272,25 @@ public:
void emitByrefStructureInit(const AutoVarEmission &emission);
void enterByrefCleanup(const AutoVarEmission &emission);
- llvm::Value *LoadBlockStruct() {
- assert(BlockPointer && "no block pointer set!");
- return BlockPointer;
- }
+ void setBlockContextParameter(const ImplicitParamDecl *D, unsigned argNum,
+ llvm::Value *ptr);
+
+ Address LoadBlockStruct();
void AllocateBlockCXXThisPointer(const CXXThisExpr *E);
void AllocateBlockDecl(const DeclRefExpr *E);
- llvm::Value *GetAddrOfBlockDecl(const VarDecl *var, bool ByRef);
- llvm::Type *BuildByRefType(const VarDecl *var);
+ Address GetAddrOfBlockDecl(const VarDecl *var, bool ByRef);
+
+ /// BuildBlockByrefAddress - Computes the location of the
+ /// data in a variable which is declared as __block.
+ Address emitBlockByrefAddress(Address baseAddr, const VarDecl *V,
+ bool followForward = true);
+ Address emitBlockByrefAddress(Address baseAddr,
+ const BlockByrefInfo &info,
+ bool followForward,
+ const llvm::Twine &name);
+
+ const BlockByrefInfo &getBlockByrefInfo(const VarDecl *var);
void GenerateCode(GlobalDecl GD, llvm::Function *Fn,
const CGFunctionInfo &FnInfo);
@@ -1300,6 +1334,8 @@ public:
void EmitCallAndReturnForThunk(llvm::Value *Callee, const ThunkInfo *Thunk);
+ void FinishThunk();
+
/// Emit a musttail call for a thunk with a potentially adjusted this pointer.
void EmitMustTailThunk(const CXXMethodDecl *MD, llvm::Value *AdjustedThisPtr,
llvm::Value *Callee);
@@ -1338,7 +1374,7 @@ public:
/// GetVTablePtr - Return the Value of the vtable pointer member pointed
/// to by This.
- llvm::Value *GetVTablePtr(llvm::Value *This, llvm::Type *Ty);
+ llvm::Value *GetVTablePtr(Address This, llvm::Type *Ty);
enum CFITypeCheckKind {
CFITCK_VCall,
@@ -1505,39 +1541,79 @@ public:
// Helpers
//===--------------------------------------------------------------------===//
- LValue MakeAddrLValue(llvm::Value *V, QualType T,
- CharUnits Alignment = CharUnits()) {
- return LValue::MakeAddr(V, T, Alignment, getContext(),
+ LValue MakeAddrLValue(Address Addr, QualType T,
+ AlignmentSource AlignSource = AlignmentSource::Type) {
+ return LValue::MakeAddr(Addr, T, getContext(), AlignSource,
CGM.getTBAAInfo(T));
}
+ LValue MakeAddrLValue(llvm::Value *V, QualType T, CharUnits Alignment,
+ AlignmentSource AlignSource = AlignmentSource::Type) {
+ return LValue::MakeAddr(Address(V, Alignment), T, getContext(),
+ AlignSource, CGM.getTBAAInfo(T));
+ }
+
+ LValue MakeNaturalAlignPointeeAddrLValue(llvm::Value *V, QualType T);
LValue MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T);
+ CharUnits getNaturalTypeAlignment(QualType T,
+ AlignmentSource *Source = nullptr,
+ bool forPointeeType = false);
+ CharUnits getNaturalPointeeTypeAlignment(QualType T,
+ AlignmentSource *Source = nullptr);
+
+ Address EmitLoadOfReference(Address Ref, const ReferenceType *RefTy,
+ AlignmentSource *Source = nullptr);
+ LValue EmitLoadOfReferenceLValue(Address Ref, const ReferenceType *RefTy);
/// CreateTempAlloca - This creates a alloca and inserts it into the entry
/// block. The caller is responsible for setting an appropriate alignment on
/// the alloca.
llvm::AllocaInst *CreateTempAlloca(llvm::Type *Ty,
const Twine &Name = "tmp");
+ Address CreateTempAlloca(llvm::Type *Ty, CharUnits align,
+ const Twine &Name = "tmp");
- /// InitTempAlloca - Provide an initial value for the given alloca.
- void InitTempAlloca(llvm::AllocaInst *Alloca, llvm::Value *Value);
+ /// CreateDefaultAlignedTempAlloca - This creates an alloca with the
+ /// default ABI alignment of the given LLVM type.
+ ///
+ /// IMPORTANT NOTE: This is *not* generally the right alignment for
+ /// any given AST type that happens to have been lowered to the
+ /// given IR type. This should only ever be used for function-local,
+ /// IR-driven manipulations like saving and restoring a value. Do
+ /// not hand this address off to arbitrary IRGen routines, and especially
+ /// do not pass it as an argument to a function that might expect a
+ /// properly ABI-aligned value.
+ Address CreateDefaultAlignTempAlloca(llvm::Type *Ty,
+ const Twine &Name = "tmp");
+
+ /// InitTempAlloca - Provide an initial value for the given alloca which
+ /// will be observable at all locations in the function.
+ ///
+ /// The address should be something that was returned from one of
+ /// the CreateTempAlloca or CreateMemTemp routines, and the
+ /// initializer must be valid in the entry block (i.e. it must
+ /// either be a constant or an argument value).
+ void InitTempAlloca(Address Alloca, llvm::Value *Value);
/// CreateIRTemp - Create a temporary IR object of the given type, with
/// appropriate alignment. This routine should only be used when an temporary
/// value needs to be stored into an alloca (for example, to avoid explicit
/// PHI construction), but the type is the IR type, not the type appropriate
/// for storing in memory.
- llvm::AllocaInst *CreateIRTemp(QualType T, const Twine &Name = "tmp");
+ ///
+ /// That is, this is exactly equivalent to CreateMemTemp, but calling
+ /// ConvertType instead of ConvertTypeForMem.
+ Address CreateIRTemp(QualType T, const Twine &Name = "tmp");
/// CreateMemTemp - Create a temporary memory object of the given type, with
/// appropriate alignment.
- llvm::AllocaInst *CreateMemTemp(QualType T, const Twine &Name = "tmp");
+ Address CreateMemTemp(QualType T, const Twine &Name = "tmp");
+ Address CreateMemTemp(QualType T, CharUnits Align, const Twine &Name = "tmp");
/// CreateAggTemp - Create a temporary memory object for the given
/// aggregate type.
AggValueSlot CreateAggTemp(QualType T, const Twine &Name = "tmp") {
- CharUnits Alignment = getContext().getTypeAlignInChars(T);
- return AggValueSlot::forAddr(CreateMemTemp(T, Name), Alignment,
+ return AggValueSlot::forAddr(CreateMemTemp(T, Name),
T.getQualifiers(),
AggValueSlot::IsNotDestructed,
AggValueSlot::DoesNotNeedGCBarriers,
@@ -1570,7 +1646,7 @@ public:
// EmitVAListRef - Emit a "reference" to a va_list; this is either the address
// or the value of the expression, depending on how va_list is defined.
- llvm::Value *EmitVAListRef(const Expr *E);
+ Address EmitVAListRef(const Expr *E);
/// EmitAnyExprToTemp - Similary to EmitAnyExpr(), however, the result will
/// always be accessible even if no aggregate location is provided.
@@ -1578,10 +1654,10 @@ public:
/// EmitAnyExprToMem - Emits the code necessary to evaluate an
/// arbitrary expression into the given memory location.
- void EmitAnyExprToMem(const Expr *E, llvm::Value *Location,
+ void EmitAnyExprToMem(const Expr *E, Address Location,
Qualifiers Quals, bool IsInitializer);
- void EmitAnyExprToExn(const Expr *E, llvm::Value *Addr);
+ void EmitAnyExprToExn(const Expr *E, Address Addr);
/// EmitExprAsInit - Emits the code necessary to initialize a
/// location in memory with the given initializer.
@@ -1601,19 +1677,15 @@ public:
///
/// The difference to EmitAggregateCopy is that tail padding is not copied.
/// This is required for correctness when assigning non-POD structures in C++.
- void EmitAggregateAssign(llvm::Value *DestPtr, llvm::Value *SrcPtr,
+ void EmitAggregateAssign(Address DestPtr, Address SrcPtr,
QualType EltTy) {
bool IsVolatile = hasVolatileMember(EltTy);
- EmitAggregateCopy(DestPtr, SrcPtr, EltTy, IsVolatile, CharUnits::Zero(),
- true);
+ EmitAggregateCopy(DestPtr, SrcPtr, EltTy, IsVolatile, true);
}
- void EmitAggregateCopyCtor(llvm::Value *DestPtr, llvm::Value *SrcPtr,
- QualType DestTy, QualType SrcTy) {
- CharUnits DestTypeAlign = getContext().getTypeAlignInChars(DestTy);
- CharUnits SrcTypeAlign = getContext().getTypeAlignInChars(SrcTy);
+ void EmitAggregateCopyCtor(Address DestPtr, Address SrcPtr,
+ QualType DestTy, QualType SrcTy) {
EmitAggregateCopy(DestPtr, SrcPtr, SrcTy, /*IsVolatile=*/false,
- std::min(DestTypeAlign, SrcTypeAlign),
/*IsAssignment=*/false);
}
@@ -1623,9 +1695,8 @@ public:
/// volatile.
/// \param isAssignment - If false, allow padding to be copied. This often
/// yields more efficient.
- void EmitAggregateCopy(llvm::Value *DestPtr, llvm::Value *SrcPtr,
+ void EmitAggregateCopy(Address DestPtr, Address SrcPtr,
QualType EltTy, bool isVolatile=false,
- CharUnits Alignment = CharUnits::Zero(),
bool isAssignment = false);
/// StartBlock - Start new block named N. If insert block is a dummy block
@@ -1633,10 +1704,11 @@ public:
void StartBlock(const char *N);
/// GetAddrOfLocalVar - Return the address of a local variable.
- llvm::Value *GetAddrOfLocalVar(const VarDecl *VD) {
- llvm::Value *Res = LocalDeclMap[VD];
- assert(Res && "Invalid argument to GetAddrOfLocalVar(), no decl!");
- return Res;
+ Address GetAddrOfLocalVar(const VarDecl *VD) {
+ auto it = LocalDeclMap.find(VD);
+ assert(it != LocalDeclMap.end() &&
+ "Invalid argument to GetAddrOfLocalVar(), no decl!");
+ return it->second;
}
/// getOpaqueLValueMapping - Given an opaque value expression (which
@@ -1671,19 +1743,19 @@ public:
/// EmitNullInitialization - Generate code to set a value of the given type to
/// null, If the type contains data member pointers, they will be initialized
/// to -1 in accordance with the Itanium C++ ABI.
- void EmitNullInitialization(llvm::Value *DestPtr, QualType Ty);
+ void EmitNullInitialization(Address DestPtr, QualType Ty);
// EmitVAArg - Generate code to get an argument from the passed in pointer
// and update it accordingly. The return value is a pointer to the argument.
// FIXME: We should be able to get rid of this method and use the va_arg
// instruction in LLVM instead once it works well enough.
- llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty);
+ Address EmitVAArg(Address VAListAddr, QualType Ty);
/// emitArrayLength - Compute the length of an array, even if it's a
/// VLA, and drill down to the base element type.
llvm::Value *emitArrayLength(const ArrayType *arrayType,
QualType &baseType,
- llvm::Value *&addr);
+ Address &addr);
/// EmitVLASize - Capture all the sizes for the VLA expressions in
/// the given variably-modified type and store them in the VLASizeMap.
@@ -1704,6 +1776,7 @@ public:
assert(CXXThisValue && "no 'this' value for this function");
return CXXThisValue;
}
+ Address LoadCXXThisAddress();
/// LoadCXXVTT - Load the VTT parameter to base constructors/destructors have
/// virtual bases.
@@ -1724,25 +1797,27 @@ public:
/// GetAddressOfBaseOfCompleteClass - Convert the given pointer to a
/// complete class to the given direct base.
- llvm::Value *
- GetAddressOfDirectBaseInCompleteClass(llvm::Value *Value,
+ Address
+ GetAddressOfDirectBaseInCompleteClass(Address Value,
const CXXRecordDecl *Derived,
const CXXRecordDecl *Base,
bool BaseIsVirtual);
+ static bool ShouldNullCheckClassCastValue(const CastExpr *Cast);
+
/// GetAddressOfBaseClass - This function will add the necessary delta to the
/// load of 'this' and returns address of the base class.
- llvm::Value *GetAddressOfBaseClass(llvm::Value *Value,
- const CXXRecordDecl *Derived,
- CastExpr::path_const_iterator PathBegin,
- CastExpr::path_const_iterator PathEnd,
- bool NullCheckValue, SourceLocation Loc);
-
- llvm::Value *GetAddressOfDerivedClass(llvm::Value *Value,
- const CXXRecordDecl *Derived,
- CastExpr::path_const_iterator PathBegin,
- CastExpr::path_const_iterator PathEnd,
- bool NullCheckValue);
+ Address GetAddressOfBaseClass(Address Value,
+ const CXXRecordDecl *Derived,
+ CastExpr::path_const_iterator PathBegin,
+ CastExpr::path_const_iterator PathEnd,
+ bool NullCheckValue, SourceLocation Loc);
+
+ Address GetAddressOfDerivedClass(Address Value,
+ const CXXRecordDecl *Derived,
+ CastExpr::path_const_iterator PathBegin,
+ CastExpr::path_const_iterator PathEnd,
+ bool NullCheckValue);
/// GetVTTParameter - Return the VTT parameter that should be passed to a
/// base constructor/destructor with virtual bases.
@@ -1763,21 +1838,21 @@ public:
const FunctionArgList &Args);
void EmitCXXConstructorCall(const CXXConstructorDecl *D, CXXCtorType Type,
bool ForVirtualBase, bool Delegating,
- llvm::Value *This, const CXXConstructExpr *E);
+ Address This, const CXXConstructExpr *E);
void EmitSynthesizedCXXCopyCtorCall(const CXXConstructorDecl *D,
- llvm::Value *This, llvm::Value *Src,
- const CXXConstructExpr *E);
+ Address This, Address Src,
+ const CXXConstructExpr *E);
void EmitCXXAggrConstructorCall(const CXXConstructorDecl *D,
const ConstantArrayType *ArrayTy,
- llvm::Value *ArrayPtr,
+ Address ArrayPtr,
const CXXConstructExpr *E,
bool ZeroInitialization = false);
void EmitCXXAggrConstructorCall(const CXXConstructorDecl *D,
llvm::Value *NumElements,
- llvm::Value *ArrayPtr,
+ Address ArrayPtr,
const CXXConstructExpr *E,
bool ZeroInitialization = false);
@@ -1785,15 +1860,15 @@ public:
void EmitCXXDestructorCall(const CXXDestructorDecl *D, CXXDtorType Type,
bool ForVirtualBase, bool Delegating,
- llvm::Value *This);
+ Address This);
void EmitNewArrayInitializer(const CXXNewExpr *E, QualType elementType,
- llvm::Type *ElementTy, llvm::Value *NewPtr,
+ llvm::Type *ElementTy, Address NewPtr,
llvm::Value *NumElements,
llvm::Value *AllocSizeWithoutCookie);
void EmitCXXTemporary(const CXXTemporary *Temporary, QualType TempType,
- llvm::Value *Ptr);
+ Address Ptr);
llvm::Value *EmitLifetimeStart(uint64_t Size, llvm::Value *Addr);
void EmitLifetimeEnd(llvm::Value *Size, llvm::Value *Addr);
@@ -1807,9 +1882,9 @@ public:
RValue EmitBuiltinNewDeleteCall(const FunctionProtoType *Type,
const Expr *Arg, bool IsDelete);
- llvm::Value* EmitCXXTypeidExpr(const CXXTypeidExpr *E);
- llvm::Value *EmitDynamicCast(llvm::Value *V, const CXXDynamicCastExpr *DCE);
- llvm::Value* EmitCXXUuidofExpr(const CXXUuidofExpr *E);
+ llvm::Value *EmitCXXTypeidExpr(const CXXTypeidExpr *E);
+ llvm::Value *EmitDynamicCast(Address V, const CXXDynamicCastExpr *DCE);
+ Address EmitCXXUuidofExpr(const CXXUuidofExpr *E);
/// \brief Situations in which we might emit a check for the suitability of a
/// pointer or glvalue.
@@ -1906,12 +1981,9 @@ public:
const VarDecl *Variable;
- /// The alignment of the variable.
- CharUnits Alignment;
-
- /// The address of the alloca. Null if the variable was emitted
+ /// The address of the alloca. Invalid if the variable was emitted
/// as a global constant.
- llvm::Value *Address;
+ Address Addr;
llvm::Value *NRVOFlag;
@@ -1926,14 +1998,14 @@ public:
llvm::Value *SizeForLifetimeMarkers;
struct Invalid {};
- AutoVarEmission(Invalid) : Variable(nullptr) {}
+ AutoVarEmission(Invalid) : Variable(nullptr), Addr(Address::invalid()) {}
AutoVarEmission(const VarDecl &variable)
- : Variable(&variable), Address(nullptr), NRVOFlag(nullptr),
+ : Variable(&variable), Addr(Address::invalid()), NRVOFlag(nullptr),
IsByRef(false), IsConstantAggregate(false),
SizeForLifetimeMarkers(nullptr) {}
- bool wasEmittedAsGlobal() const { return Address == nullptr; }
+ bool wasEmittedAsGlobal() const { return !Addr.isValid(); }
public:
static AutoVarEmission invalid() { return AutoVarEmission(Invalid()); }
@@ -1948,19 +2020,17 @@ public:
/// Returns the raw, allocated address, which is not necessarily
/// the address of the object itself.
- llvm::Value *getAllocatedAddress() const {
- return Address;
+ Address getAllocatedAddress() const {
+ return Addr;
}
/// Returns the address of the object within this declaration.
/// Note that this does not chase the forwarding pointer for
/// __block decls.
- llvm::Value *getObjectAddress(CodeGenFunction &CGF) const {
- if (!IsByRef) return Address;
+ Address getObjectAddress(CodeGenFunction &CGF) const {
+ if (!IsByRef) return Addr;
- auto F = CGF.getByRefValueLLVMField(Variable);
- return CGF.Builder.CreateStructGEP(F.first, Address, F.second,
- Variable->getNameAsString());
+ return CGF.emitBlockByrefAddress(Addr, Variable, /*forward*/ false);
}
};
AutoVarEmission EmitAutoVarAlloca(const VarDecl &var);
@@ -1972,9 +2042,35 @@ public:
void EmitStaticVarDecl(const VarDecl &D,
llvm::GlobalValue::LinkageTypes Linkage);
+ class ParamValue {
+ llvm::Value *Value;
+ unsigned Alignment;
+ ParamValue(llvm::Value *V, unsigned A) : Value(V), Alignment(A) {}
+ public:
+ static ParamValue forDirect(llvm::Value *value) {
+ return ParamValue(value, 0);
+ }
+ static ParamValue forIndirect(Address addr) {
+ assert(!addr.getAlignment().isZero());
+ return ParamValue(addr.getPointer(), addr.getAlignment().getQuantity());
+ }
+
+ bool isIndirect() const { return Alignment != 0; }
+ llvm::Value *getAnyValue() const { return Value; }
+
+ llvm::Value *getDirectValue() const {
+ assert(!isIndirect());
+ return Value;
+ }
+
+ Address getIndirectAddress() const {
+ assert(isIndirect());
+ return Address(Value, CharUnits::fromQuantity(Alignment));
+ }
+ };
+
/// EmitParmDecl - Emit a ParmVarDecl or an ImplicitParamDecl.
- void EmitParmDecl(const VarDecl &D, llvm::Value *Arg, bool ArgIsPointer,
- unsigned ArgNo);
+ void EmitParmDecl(const VarDecl &D, ParamValue Arg, unsigned ArgNo);
/// protectFromPeepholes - Protect a value that we're intending to
/// store to the side, but which will probably be used later, from
@@ -2011,11 +2107,11 @@ public:
/// \return True if the statement was handled.
bool EmitSimpleStmt(const Stmt *S);
- llvm::Value *EmitCompoundStmt(const CompoundStmt &S, bool GetLast = false,
- AggValueSlot AVS = AggValueSlot::ignored());
- llvm::Value *EmitCompoundStmtWithoutScope(const CompoundStmt &S,
- bool GetLast = false,
- AggValueSlot AVS =
+ Address EmitCompoundStmt(const CompoundStmt &S, bool GetLast = false,
+ AggValueSlot AVS = AggValueSlot::ignored());
+ Address EmitCompoundStmtWithoutScope(const CompoundStmt &S,
+ bool GetLast = false,
+ AggValueSlot AVS =
AggValueSlot::ignored());
/// EmitLabel - Emit the block for the given label. It is legal to call this
@@ -2085,9 +2181,9 @@ public:
/// either be an alloca or a call to llvm.localrecover if there are nested
/// outlined functions. ParentFP is the frame pointer of the outermost parent
/// frame.
- llvm::Value *recoverAddrOfEscapedLocal(CodeGenFunction &ParentCGF,
- llvm::Value *ParentVar,
- llvm::Value *ParentFP);
+ Address recoverAddrOfEscapedLocal(CodeGenFunction &ParentCGF,
+ Address ParentVar,
+ llvm::Value *ParentFP);
void EmitCXXForRangeStmt(const CXXForRangeStmt &S,
ArrayRef<const Attr *> Attrs = None);
@@ -2097,7 +2193,7 @@ public:
void GenerateCapturedStmtFunctionProlog(const CapturedStmt &S);
llvm::Function *GenerateCapturedStmtFunctionEpilog(const CapturedStmt &S);
llvm::Function *GenerateCapturedStmtFunction(const CapturedStmt &S);
- llvm::Value *GenerateCapturedStmtArgument(const CapturedStmt &S);
+ Address GenerateCapturedStmtArgument(const CapturedStmt &S);
/// \brief Perform element by element copying of arrays with type \a
/// OriginalType from \a SrcAddr to \a DestAddr using copying procedure
/// generated by \a CopyGen.
@@ -2108,8 +2204,8 @@ public:
/// \param CopyGen Copying procedure that copies value of single array element
/// to another single array element.
void EmitOMPAggregateAssign(
- llvm::Value *DestAddr, llvm::Value *SrcAddr, QualType OriginalType,
- const llvm::function_ref<void(llvm::Value *, llvm::Value *)> &CopyGen);
+ Address DestAddr, Address SrcAddr, QualType OriginalType,
+ const llvm::function_ref<void(Address, Address)> &CopyGen);
/// \brief Emit proper copying of data from one variable to another.
///
/// \param OriginalType Original type of the copied variables.
@@ -2121,8 +2217,8 @@ public:
/// the base array element).
/// \param Copy Actual copygin expression for copying data from \a SrcVD to \a
/// DestVD.
- void EmitOMPCopy(CodeGenFunction &CGF, QualType OriginalType,
- llvm::Value *DestAddr, llvm::Value *SrcAddr,
+ void EmitOMPCopy(QualType OriginalType,
+ Address DestAddr, Address SrcAddr,
const VarDecl *DestVD, const VarDecl *SrcVD,
const Expr *Copy);
/// \brief Emit atomic update code for constructs: \a X = \a X \a BO \a E or
@@ -2258,8 +2354,8 @@ private:
void EmitOMPForOuterLoop(OpenMPScheduleClauseKind ScheduleKind,
const OMPLoopDirective &S,
OMPPrivateScope &LoopScope, bool Ordered,
- llvm::Value *LB, llvm::Value *UB, llvm::Value *ST,
- llvm::Value *IL, llvm::Value *Chunk);
+ Address LB, Address UB, Address ST,
+ Address IL, llvm::Value *Chunk);
/// \brief Emit code for sections directive.
OpenMPDirectiveKind EmitSections(const OMPExecutableDirective &S);
@@ -2306,7 +2402,7 @@ public:
/// that the address will be used to access the object.
LValue EmitCheckedLValue(const Expr *E, TypeCheckKind TCK);
- RValue convertTempToRValue(llvm::Value *addr, QualType type,
+ RValue convertTempToRValue(Address addr, QualType type,
SourceLocation Loc);
void EmitAtomicInit(Expr *E, LValue lvalue);
@@ -2347,9 +2443,10 @@ public:
/// EmitLoadOfScalar - Load a scalar value from an address, taking
/// care to appropriately convert from the memory representation to
/// the LLVM value representation.
- llvm::Value *EmitLoadOfScalar(llvm::Value *Addr, bool Volatile,
- unsigned Alignment, QualType Ty,
+ llvm::Value *EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty,
SourceLocation Loc,
+ AlignmentSource AlignSource =
+ AlignmentSource::Type,
llvm::MDNode *TBAAInfo = nullptr,
QualType TBAABaseTy = QualType(),
uint64_t TBAAOffset = 0);
@@ -2363,8 +2460,9 @@ public:
/// EmitStoreOfScalar - Store a scalar value to an address, taking
/// care to appropriately convert from the memory representation to
/// the LLVM value representation.
- void EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr,
- bool Volatile, unsigned Alignment, QualType Ty,
+ void EmitStoreOfScalar(llvm::Value *Value, Address Addr,
+ bool Volatile, QualType Ty,
+ AlignmentSource AlignSource = AlignmentSource::Type,
llvm::MDNode *TBAAInfo = nullptr, bool isInit = false,
QualType TBAABaseTy = QualType(),
uint64_t TBAAOffset = 0);
@@ -2433,10 +2531,13 @@ public:
LValue EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E);
LValue EmitOpaqueValueLValue(const OpaqueValueExpr *e);
- llvm::Value *EmitExtVectorElementLValue(LValue V);
+ Address EmitExtVectorElementLValue(LValue V);
RValue EmitRValueForField(LValue LV, const FieldDecl *FD, SourceLocation Loc);
+ Address EmitArrayToPointerDecay(const Expr *Array,
+ AlignmentSource *AlignSource = nullptr);
+
class ConstantEmission {
llvm::PointerIntPair<llvm::Constant*, 1, bool> ValueAndIsReference;
ConstantEmission(llvm::Constant *C, bool isReference)
@@ -2576,6 +2677,10 @@ public:
NestedNameSpecifier *Qualifier,
bool IsArrow, const Expr *Base);
// Compute the object pointer.
+ Address EmitCXXMemberDataPointerAddress(const Expr *E, Address base,
+ llvm::Value *memberPtr,
+ const MemberPointerType *memberPtrType,
+ AlignmentSource *AlignSource = nullptr);
RValue EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
ReturnValueSlot ReturnValue);
@@ -2610,7 +2715,7 @@ public:
unsigned Modifier,
const CallExpr *E,
SmallVectorImpl<llvm::Value *> &Ops,
- llvm::Value *Align = nullptr);
+ Address PtrOp0, Address PtrOp1);
llvm::Function *LookupNeonLLVMIntrinsic(unsigned IntrinsicID,
unsigned Modifier, llvm::Type *ArgTy,
const CallExpr *E);
@@ -2656,24 +2761,23 @@ public:
}
// ARC primitives.
- void EmitARCInitWeak(llvm::Value *value, llvm::Value *addr);
- void EmitARCDestroyWeak(llvm::Value *addr);
- llvm::Value *EmitARCLoadWeak(llvm::Value *addr);
- llvm::Value *EmitARCLoadWeakRetained(llvm::Value *addr);
- llvm::Value *EmitARCStoreWeak(llvm::Value *value, llvm::Value *addr,
- bool ignored);
- void EmitARCCopyWeak(llvm::Value *dst, llvm::Value *src);
- void EmitARCMoveWeak(llvm::Value *dst, llvm::Value *src);
+ void EmitARCInitWeak(Address addr, llvm::Value *value);
+ void EmitARCDestroyWeak(Address addr);
+ llvm::Value *EmitARCLoadWeak(Address addr);
+ llvm::Value *EmitARCLoadWeakRetained(Address addr);
+ llvm::Value *EmitARCStoreWeak(Address addr, llvm::Value *value, bool ignored);
+ void EmitARCCopyWeak(Address dst, Address src);
+ void EmitARCMoveWeak(Address dst, Address src);
llvm::Value *EmitARCRetainAutorelease(QualType type, llvm::Value *value);
llvm::Value *EmitARCRetainAutoreleaseNonBlock(llvm::Value *value);
llvm::Value *EmitARCStoreStrong(LValue lvalue, llvm::Value *value,
bool resultIgnored);
- llvm::Value *EmitARCStoreStrongCall(llvm::Value *addr, llvm::Value *value,
+ llvm::Value *EmitARCStoreStrongCall(Address addr, llvm::Value *value,
bool resultIgnored);
llvm::Value *EmitARCRetain(QualType type, llvm::Value *value);
llvm::Value *EmitARCRetainNonBlock(llvm::Value *value);
llvm::Value *EmitARCRetainBlock(llvm::Value *value, bool mandatory);
- void EmitARCDestroyStrong(llvm::Value *addr, ARCPreciseLifetime_t precise);
+ void EmitARCDestroyStrong(Address addr, ARCPreciseLifetime_t precise);
void EmitARCRelease(llvm::Value *value, ARCPreciseLifetime_t precise);
llvm::Value *EmitARCAutorelease(llvm::Value *value);
llvm::Value *EmitARCAutoreleaseReturnValue(llvm::Value *value);
@@ -2765,6 +2869,9 @@ public:
/// EmitLoadOfComplex - Load a complex number from the specified l-value.
ComplexPairTy EmitLoadOfComplex(LValue src, SourceLocation loc);
+ Address emitAddrOfRealComponent(Address complex, QualType complexType);
+ Address emitAddrOfImagComponent(Address complex, QualType complexType);
+
/// AddInitializerToStaticVarDecl - Add the initializer for 'D' to the
/// global variable that has already been created for it. If the initializer
/// has a different type than GV does, this may free GV and return a different
@@ -2799,7 +2906,7 @@ public:
/// variables.
void GenerateCXXGlobalInitFunc(llvm::Function *Fn,
ArrayRef<llvm::Function *> CXXThreadLocals,
- llvm::GlobalVariable *Guard = nullptr);
+ Address Guard = Address::invalid());
/// GenerateCXXGlobalDtorsFunc - Generates code for destroying global
/// variables.
@@ -2814,8 +2921,7 @@ public:
void EmitCXXConstructExpr(const CXXConstructExpr *E, AggValueSlot Dest);
- void EmitSynthesizedCXXCopyCtor(llvm::Value *Dest, llvm::Value *Src,
- const Expr *Exp);
+ void EmitSynthesizedCXXCopyCtor(Address Dest, Address Src, const Expr *Exp);
void enterFullExpression(const ExprWithCleanups *E) {
if (E->getNumObjects() == 0) return;
@@ -2827,7 +2933,7 @@ public:
void EmitLambdaExpr(const LambdaExpr *E, AggValueSlot Dest);
- RValue EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest = nullptr);
+ RValue EmitAtomicExpr(AtomicExpr *E, Address Dest = Address::invalid());
//===--------------------------------------------------------------------===//
// Annotations Emission
@@ -2844,7 +2950,7 @@ public:
/// Emit field annotations for the given field & value. Returns the
/// annotation result.
- llvm::Value *EmitFieldAnnotations(const FieldDecl *D, llvm::Value *V);
+ Address EmitFieldAnnotations(const FieldDecl *D, Address V);
//===--------------------------------------------------------------------===//
// Internal Helpers
@@ -2932,6 +3038,12 @@ private:
llvm::SmallVector<std::pair<llvm::Instruction *, llvm::Value *>, 4>
DeferredReplacements;
+ /// Set the address of a local variable.
+ void setAddrOfLocalVar(const VarDecl *VD, Address Addr) {
+ assert(!LocalDeclMap.count(VD) && "Decl already exists in LocalDeclMap!");
+ LocalDeclMap.insert({VD, Addr});
+ }
+
/// ExpandTypeFromArgs - Reconstruct a structure of type \arg Ty
/// from function arguments into \arg Dst. See ABIArgInfo::Expand.
///
@@ -3028,6 +3140,30 @@ public:
const FunctionDecl *CalleeDecl = nullptr,
unsigned ParamsToSkip = 0);
+ /// EmitPointerWithAlignment - Given an expression with a pointer
+ /// type, emit the value and compute our best estimate of the
+ /// alignment of the pointee.
+ ///
+ /// Note that this function will conservatively fall back on the type
+ /// when it doesn't
+ ///
+ /// \param Source - If non-null, this will be initialized with
+ /// information about the source of the alignment. Note that this
+ /// function will conservatively fall back on the type when it
+ /// doesn't recognize the expression, which means that sometimes
+ ///
+ /// a worst-case One
+ /// reasonable way to use this information is when there's a
+ /// language guarantee that the pointer must be aligned to some
+ /// stricter value, and we're simply trying to ensure that
+ /// sufficiently obvious uses of under-aligned objects don't get
+ /// miscompiled; for example, a placement new into the address of
+ /// a local variable. In such a case, it's quite reasonable to
+ /// just ignore the returned alignment when it isn't from an
+ /// explicit source.
+ Address EmitPointerWithAlignment(const Expr *Addr,
+ AlignmentSource *Source = nullptr);
+
private:
QualType getVarArgType(const Expr *Arg);
@@ -3037,16 +3173,11 @@ private:
void EmitDeclMetadata();
- CodeGenModule::ByrefHelpers *
- buildByrefHelpers(llvm::StructType &byrefType,
- const AutoVarEmission &emission);
+ BlockByrefHelpers *buildByrefHelpers(llvm::StructType &byrefType,
+ const AutoVarEmission &emission);
void AddObjCARCExceptionMetadata(llvm::Instruction *Inst);
- /// GetPointeeAlignment - Given an expression with a pointer type, emit the
- /// value and compute our best estimate of the alignment of the pointee.
- std::pair<llvm::Value*, unsigned> EmitPointerWithAlignment(const Expr *Addr);
-
llvm::Value *GetValueForARMHint(unsigned BuiltinID);
};
@@ -3069,17 +3200,23 @@ struct DominatingLLVMValue {
static saved_type save(CodeGenFunction &CGF, llvm::Value *value) {
if (!needsSaving(value)) return saved_type(value, false);
- // Otherwise we need an alloca.
- llvm::Value *alloca =
- CGF.CreateTempAlloca(value->getType(), "cond-cleanup.save");
+ // Otherwise, we need an alloca.
+ auto align = CharUnits::fromQuantity(
+ CGF.CGM.getDataLayout().getPrefTypeAlignment(value->getType()));
+ Address alloca =
+ CGF.CreateTempAlloca(value->getType(), align, "cond-cleanup.save");
CGF.Builder.CreateStore(value, alloca);
- return saved_type(alloca, true);
+ return saved_type(alloca.getPointer(), true);
}
static llvm::Value *restore(CodeGenFunction &CGF, saved_type value) {
+ // If the value says it wasn't saved, trust that it's still dominating.
if (!value.getInt()) return value.getPointer();
- return CGF.Builder.CreateLoad(value.getPointer());
+
+ // Otherwise, it should be an alloca instruction, as set up in save().
+ auto alloca = cast<llvm::AllocaInst>(value.getPointer());
+ return CGF.Builder.CreateAlignedLoad(alloca, alloca->getAlignment());
}
};
@@ -3092,6 +3229,28 @@ template <class T> struct DominatingPointer<T,true> : DominatingLLVMValue {
}
};
+/// A specialization of DominatingValue for Address.
+template <> struct DominatingValue<Address> {
+ typedef Address type;
+
+ struct saved_type {
+ DominatingLLVMValue::saved_type SavedValue;
+ CharUnits Alignment;
+ };
+
+ static bool needsSaving(type value) {
+ return DominatingLLVMValue::needsSaving(value.getPointer());
+ }
+ static saved_type save(CodeGenFunction &CGF, type value) {
+ return { DominatingLLVMValue::save(CGF, value.getPointer()),
+ value.getAlignment() };
+ }
+ static type restore(CodeGenFunction &CGF, saved_type value) {
+ return Address(DominatingLLVMValue::restore(CGF, value.SavedValue),
+ value.Alignment);
+ }
+};
+
/// A specialization of DominatingValue for RValue.
template <> struct DominatingValue<RValue> {
typedef RValue type;
@@ -3100,15 +3259,17 @@ template <> struct DominatingValue<RValue> {
AggregateAddress, ComplexAddress };
llvm::Value *Value;
- Kind K;
- saved_type(llvm::Value *v, Kind k) : Value(v), K(k) {}
+ unsigned K : 3;
+ unsigned Align : 29;
+ saved_type(llvm::Value *v, Kind k, unsigned a = 0)
+ : Value(v), K(k), Align(a) {}
public:
static bool needsSaving(RValue value);
static saved_type save(CodeGenFunction &CGF, RValue value);
RValue restore(CodeGenFunction &CGF);
- // implementations in CGExprCXX.cpp
+ // implementations in CGCleanup.cpp
};
static bool needsSaving(type value) {
diff --git a/clang/lib/CodeGen/CodeGenModule.cpp b/clang/lib/CodeGen/CodeGenModule.cpp
index 1177557aca0..e76ee12512e 100644
--- a/clang/lib/CodeGen/CodeGenModule.cpp
+++ b/clang/lib/CodeGen/CodeGenModule.cpp
@@ -12,6 +12,7 @@
//===----------------------------------------------------------------------===//
#include "CodeGenModule.h"
+#include "CGBlocks.h"
#include "CGCUDARuntime.h"
#include "CGCXXABI.h"
#include "CGCall.h"
@@ -106,7 +107,9 @@ CodeGenModule::CodeGenModule(ASTContext &C, const HeaderSearchOptions &HSO,
DoubleTy = llvm::Type::getDoubleTy(LLVMContext);
PointerWidthInBits = C.getTargetInfo().getPointerWidth(0);
PointerAlignInBytes =
- C.toCharUnitsFromBits(C.getTargetInfo().getPointerAlign(0)).getQuantity();
+ C.toCharUnitsFromBits(C.getTargetInfo().getPointerAlign(0)).getQuantity();
+ IntAlignInBytes =
+ C.toCharUnitsFromBits(C.getTargetInfo().getIntAlign()).getQuantity();
IntTy = llvm::IntegerType::get(LLVMContext, C.getTargetInfo().getIntWidth());
IntPtrTy = llvm::IntegerType::get(LLVMContext, PointerWidthInBits);
Int8PtrTy = Int8Ty->getPointerTo(0);
@@ -1303,7 +1306,7 @@ bool CodeGenModule::MayBeEmittedEagerly(const ValueDecl *Global) {
return true;
}
-llvm::Constant *CodeGenModule::GetAddrOfUuidDescriptor(
+ConstantAddress CodeGenModule::GetAddrOfUuidDescriptor(
const CXXUuidofExpr* E) {
// Sema has verified that IIDSource has a __declspec(uuid()), and that its
// well-formed.
@@ -1311,9 +1314,12 @@ llvm::Constant *CodeGenModule::GetAddrOfUuidDescriptor(
std::string Name = "_GUID_" + Uuid.lower();
std::replace(Name.begin(), Name.end(), '-', '_');
+ // Contains a 32-bit field.
+ CharUnits Alignment = CharUnits::fromQuantity(4);
+
// Look for an existing global.
if (llvm::GlobalVariable *GV = getModule().getNamedGlobal(Name))
- return GV;
+ return ConstantAddress(GV, Alignment);
llvm::Constant *Init = EmitUuidofInitializer(Uuid);
assert(Init && "failed to initialize as constant");
@@ -1323,20 +1329,22 @@ llvm::Constant *CodeGenModule::GetAddrOfUuidDescriptor(
/*isConstant=*/true, llvm::GlobalValue::LinkOnceODRLinkage, Init, Name);
if (supportsCOMDAT())
GV->setComdat(TheModule.getOrInsertComdat(GV->getName()));
- return GV;
+ return ConstantAddress(GV, Alignment);
}
-llvm::Constant *CodeGenModule::GetWeakRefReference(const ValueDecl *VD) {
+ConstantAddress CodeGenModule::GetWeakRefReference(const ValueDecl *VD) {
const AliasAttr *AA = VD->getAttr<AliasAttr>();
assert(AA && "No alias?");
+ CharUnits Alignment = getContext().getDeclAlign(VD);
llvm::Type *DeclTy = getTypes().ConvertTypeForMem(VD->getType());
// See if there is already something with the target's name in the module.
llvm::GlobalValue *Entry = GetGlobalValue(AA->getAliasee());
if (Entry) {
unsigned AS = getContext().getTargetAddressSpace(VD->getType());
- return llvm::ConstantExpr::getBitCast(Entry, DeclTy->getPointerTo(AS));
+ auto Ptr = llvm::ConstantExpr::getBitCast(Entry, DeclTy->getPointerTo(AS));
+ return ConstantAddress(Ptr, Alignment);
}
llvm::Constant *Aliasee;
@@ -1353,7 +1361,7 @@ llvm::Constant *CodeGenModule::GetWeakRefReference(const ValueDecl *VD) {
F->setLinkage(llvm::Function::ExternalWeakLinkage);
WeakRefReferences.insert(F);
- return Aliasee;
+ return ConstantAddress(Aliasee, Alignment);
}
void CodeGenModule::EmitGlobal(GlobalDecl GD) {
@@ -2732,7 +2740,7 @@ GetConstantStringEntry(llvm::StringMap<llvm::GlobalVariable *> &Map,
return *Map.insert(std::make_pair(String, nullptr)).first;
}
-llvm::Constant *
+ConstantAddress
CodeGenModule::GetAddrOfConstantCFString(const StringLiteral *Literal) {
unsigned StringLength = 0;
bool isUTF16 = false;
@@ -2742,7 +2750,7 @@ CodeGenModule::GetAddrOfConstantCFString(const StringLiteral *Literal) {
StringLength);
if (auto *C = Entry.second)
- return C;
+ return ConstantAddress(C, CharUnits::fromQuantity(C->getAlignment()));
llvm::Constant *Zero = llvm::Constant::getNullValue(Int32Ty);
llvm::Constant *Zeros[] = { Zero, Zero };
@@ -2819,25 +2827,28 @@ CodeGenModule::GetAddrOfConstantCFString(const StringLiteral *Literal) {
Ty = getTypes().ConvertType(getContext().LongTy);
Fields[3] = llvm::ConstantInt::get(Ty, StringLength);
+ CharUnits Alignment = getPointerAlign();
+
// The struct.
C = llvm::ConstantStruct::get(STy, Fields);
GV = new llvm::GlobalVariable(getModule(), C->getType(), true,
llvm::GlobalVariable::PrivateLinkage, C,
"_unnamed_cfstring_");
GV->setSection("__DATA,__cfstring");
+ GV->setAlignment(Alignment.getQuantity());
Entry.second = GV;
- return GV;
+ return ConstantAddress(GV, Alignment);
}
-llvm::GlobalVariable *
+ConstantAddress
CodeGenModule::GetAddrOfConstantString(const StringLiteral *Literal) {
unsigned StringLength = 0;
llvm::StringMapEntry<llvm::GlobalVariable *> &Entry =
GetConstantStringEntry(CFConstantStringMap, Literal, StringLength);
if (auto *C = Entry.second)
- return C;
+ return ConstantAddress(C, CharUnits::fromQuantity(C->getAlignment()));
llvm::Constant *Zero = llvm::Constant::getNullValue(Int32Ty);
llvm::Constant *Zeros[] = { Zero, Zero };
@@ -2930,10 +2941,12 @@ CodeGenModule::GetAddrOfConstantString(const StringLiteral *Literal) {
Fields[2] = llvm::ConstantInt::get(Ty, StringLength);
// The struct.
+ CharUnits Alignment = getPointerAlign();
C = llvm::ConstantStruct::get(NSConstantStringType, Fields);
GV = new llvm::GlobalVariable(getModule(), C->getType(), true,
llvm::GlobalVariable::PrivateLinkage, C,
"_unnamed_nsstring_");
+ GV->setAlignment(Alignment.getQuantity());
const char *NSStringSection = "__OBJC,__cstring_object,regular,no_dead_strip";
const char *NSStringNonFragileABISection =
"__DATA,__objc_stringobj,regular,no_dead_strip";
@@ -2943,7 +2956,7 @@ CodeGenModule::GetAddrOfConstantString(const StringLiteral *Literal) {
: NSStringSection);
Entry.second = GV;
- return GV;
+ return ConstantAddress(GV, Alignment);
}
QualType CodeGenModule::getObjCFastEnumerationStateType() {
@@ -3022,7 +3035,7 @@ CodeGenModule::GetConstantArrayFromStringLiteral(const StringLiteral *E) {
static llvm::GlobalVariable *
GenerateStringLiteral(llvm::Constant *C, llvm::GlobalValue::LinkageTypes LT,
CodeGenModule &CGM, StringRef GlobalName,
- unsigned Alignment) {
+ CharUnits Alignment) {
// OpenCL v1.2 s6.5.3: a string literal is in the constant address space.
unsigned AddrSpace = 0;
if (CGM.getLangOpts().OpenCL)
@@ -3033,7 +3046,7 @@ GenerateStringLiteral(llvm::Constant *C, llvm::GlobalValue::LinkageTypes LT,
auto *GV = new llvm::GlobalVariable(
M, C->getType(), !CGM.getLangOpts().WritableStrings, LT, C, GlobalName,
nullptr, llvm::GlobalVariable::NotThreadLocal, AddrSpace);
- GV->setAlignment(Alignment);
+ GV->setAlignment(Alignment.getQuantity());
GV->setUnnamedAddr(true);
if (GV->isWeakForLinker()) {
assert(CGM.supportsCOMDAT() && "Only COFF uses weak string literals");
@@ -3045,20 +3058,19 @@ GenerateStringLiteral(llvm::Constant *C, llvm::GlobalValue::LinkageTypes LT,
/// GetAddrOfConstantStringFromLiteral - Return a pointer to a
/// constant array for the given string literal.
-llvm::GlobalVariable *
+ConstantAddress
CodeGenModule::GetAddrOfConstantStringFromLiteral(const StringLiteral *S,
StringRef Name) {
- auto Alignment =
- getContext().getAlignOfGlobalVarInChars(S->getType()).getQuantity();
+ CharUnits Alignment = getContext().getAlignOfGlobalVarInChars(S->getType());
llvm::Constant *C = GetConstantArrayFromStringLiteral(S);
llvm::GlobalVariable **Entry = nullptr;
if (!LangOpts.WritableStrings) {
Entry = &ConstantStringMap[C];
if (auto GV = *Entry) {
- if (Alignment > GV->getAlignment())
- GV->setAlignment(Alignment);
- return GV;
+ if (Alignment.getQuantity() > GV->getAlignment())
+ GV->setAlignment(Alignment.getQuantity());
+ return ConstantAddress(GV, Alignment);
}
}
@@ -3088,12 +3100,12 @@ CodeGenModule::GetAddrOfConstantStringFromLiteral(const StringLiteral *S,
SanitizerMD->reportGlobalToASan(GV, S->getStrTokenLoc(0), "<string literal>",
QualType());
- return GV;
+ return ConstantAddress(GV, Alignment);
}
/// GetAddrOfConstantStringFromObjCEncode - Return a pointer to a constant
/// array for the given ObjCEncodeExpr node.
-llvm::GlobalVariable *
+ConstantAddress
CodeGenModule::GetAddrOfConstantStringFromObjCEncode(const ObjCEncodeExpr *E) {
std::string Str;
getContext().getObjCEncodingForType(E->getEncodedType(), Str);
@@ -3104,14 +3116,11 @@ CodeGenModule::GetAddrOfConstantStringFromObjCEncode(const ObjCEncodeExpr *E) {
/// GetAddrOfConstantCString - Returns a pointer to a character array containing
/// the literal and a terminating '\0' character.
/// The result has pointer to array type.
-llvm::GlobalVariable *CodeGenModule::GetAddrOfConstantCString(
- const std::string &Str, const char *GlobalName, unsigned Alignment) {
+ConstantAddress CodeGenModule::GetAddrOfConstantCString(
+ const std::string &Str, const char *GlobalName) {
StringRef StrWithNull(Str.c_str(), Str.size() + 1);
- if (Alignment == 0) {
- Alignment = getContext()
- .getAlignOfGlobalVarInChars(getContext().CharTy)
- .getQuantity();
- }
+ CharUnits Alignment =
+ getContext().getAlignOfGlobalVarInChars(getContext().CharTy);
llvm::Constant *C =
llvm::ConstantDataArray::getString(getLLVMContext(), StrWithNull, false);
@@ -3121,9 +3130,9 @@ llvm::GlobalVariable *CodeGenModule::GetAddrOfConstantCString(
if (!LangOpts.WritableStrings) {
Entry = &ConstantStringMap[C];
if (auto GV = *Entry) {
- if (Alignment > GV->getAlignment())
- GV->setAlignment(Alignment);
- return GV;
+ if (Alignment.getQuantity() > GV->getAlignment())
+ GV->setAlignment(Alignment.getQuantity());
+ return ConstantAddress(GV, Alignment);
}
}
@@ -3135,10 +3144,10 @@ llvm::GlobalVariable *CodeGenModule::GetAddrOfConstantCString(
GlobalName, Alignment);
if (Entry)
*Entry = GV;
- return GV;
+ return ConstantAddress(GV, Alignment);
}
-llvm::Constant *CodeGenModule::GetAddrOfGlobalTemporary(
+ConstantAddress CodeGenModule::GetAddrOfGlobalTemporary(
const MaterializeTemporaryExpr *E, const Expr *Init) {
assert((E->getStorageDuration() == SD_Static ||
E->getStorageDuration() == SD_Thread) && "not a global temporary");
@@ -3150,8 +3159,10 @@ llvm::Constant *CodeGenModule::GetAddrOfGlobalTemporary(
if (Init == E->GetTemporaryExpr())
MaterializedType = E->getType();
+ CharUnits Align = getContext().getTypeAlignInChars(MaterializedType);
+
if (llvm::Constant *Slot = MaterializedGlobalTemporaryMap[E])
- return Slot;
+ return ConstantAddress(Slot, Align);
// FIXME: If an externally-visible declaration extends multiple temporaries,
// we need to give each temporary the same name in every translation unit (and
@@ -3215,14 +3226,13 @@ llvm::Constant *CodeGenModule::GetAddrOfGlobalTemporary(
/*InsertBefore=*/nullptr, llvm::GlobalVariable::NotThreadLocal,
AddrSpace);
setGlobalVisibility(GV, VD);
- GV->setAlignment(
- getContext().getTypeAlignInChars(MaterializedType).getQuantity());
+ GV->setAlignment(Align.getQuantity());
if (supportsCOMDAT() && GV->isWeakForLinker())
GV->setComdat(TheModule.getOrInsertComdat(GV->getName()));
if (VD->getTLSKind())
setTLSMode(GV, *VD);
MaterializedGlobalTemporaryMap[E] = GV;
- return GV;
+ return ConstantAddress(GV, Align);
}
/// EmitObjCPropertyImplementations - Emit information for synthesized
@@ -3676,7 +3686,7 @@ void CodeGenFunction::EmitDeclMetadata() {
for (auto &I : LocalDeclMap) {
const Decl *D = I.first;
- llvm::Value *Addr = I.second;
+ llvm::Value *Addr = I.second.getPointer();
if (auto *Alloca = dyn_cast<llvm::AllocaInst>(Addr)) {
llvm::Value *DAddr = GetPointerConstant(getLLVMContext(), D);
Alloca->setMetadata(
@@ -3785,8 +3795,10 @@ void CodeGenModule::EmitOMPThreadPrivateDecl(const OMPThreadPrivateDecl *D) {
VD->getAnyInitializer() &&
!VD->getAnyInitializer()->isConstantInitializer(getContext(),
/*ForRef=*/false);
+
+ Address Addr(GetAddrOfGlobalVar(VD), getContext().getDeclAlign(VD));
if (auto InitFunction = getOpenMPRuntime().emitThreadPrivateVarDefinition(
- VD, GetAddrOfGlobalVar(VD), RefExpr->getLocStart(), PerformInit))
+ VD, Addr, RefExpr->getLocStart(), PerformInit))
CXXGlobalInits.push_back(InitFunction);
}
}
diff --git a/clang/lib/CodeGen/CodeGenModule.h b/clang/lib/CodeGen/CodeGenModule.h
index c35722a771d..fcc499392e5 100644
--- a/clang/lib/CodeGen/CodeGenModule.h
+++ b/clang/lib/CodeGen/CodeGenModule.h
@@ -15,6 +15,7 @@
#define LLVM_CLANG_LIB_CODEGEN_CODEGENMODULE_H
#include "CGVTables.h"
+#include "CodeGenTypeCache.h"
#include "CodeGenTypes.h"
#include "SanitizerMetadata.h"
#include "clang/AST/Attr.h"
@@ -30,7 +31,6 @@
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/StringMap.h"
-#include "llvm/IR/CallingConv.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/ValueHandle.h"
@@ -79,6 +79,7 @@ class CoverageSourceInfo;
namespace CodeGen {
+class BlockByrefHelpers;
class CallArgList;
class CodeGenFunction;
class CodeGenTBAA;
@@ -108,54 +109,6 @@ struct OrderGlobalInits {
}
};
-struct CodeGenTypeCache {
- /// void
- llvm::Type *VoidTy;
-
- /// i8, i16, i32, and i64
- llvm::IntegerType *Int8Ty, *Int16Ty, *Int32Ty, *Int64Ty;
- /// float, double
- llvm::Type *FloatTy, *DoubleTy;
-
- /// int
- llvm::IntegerType *IntTy;
-
- /// intptr_t, size_t, and ptrdiff_t, which we assume are the same size.
- union {
- llvm::IntegerType *IntPtrTy;
- llvm::IntegerType *SizeTy;
- llvm::IntegerType *PtrDiffTy;
- };
-
- /// void* in address space 0
- union {
- llvm::PointerType *VoidPtrTy;
- llvm::PointerType *Int8PtrTy;
- };
-
- /// void** in address space 0
- union {
- llvm::PointerType *VoidPtrPtrTy;
- llvm::PointerType *Int8PtrPtrTy;
- };
-
- /// The width of a pointer into the generic address space.
- unsigned char PointerWidthInBits;
-
- /// The size and alignment of a pointer into the generic address
- /// space.
- union {
- unsigned char PointerAlignInBytes;
- unsigned char PointerSizeInBytes;
- unsigned char SizeSizeInBytes; // sizeof(size_t)
- };
-
- llvm::CallingConv::ID RuntimeCC;
- llvm::CallingConv::ID getRuntimeCC() const { return RuntimeCC; }
- llvm::CallingConv::ID BuiltinCC;
- llvm::CallingConv::ID getBuiltinCC() const { return BuiltinCC; }
-};
-
struct RREntrypoints {
RREntrypoints() { memset(this, 0, sizeof(*this)); }
/// void objc_autoreleasePoolPop(void*);
@@ -735,13 +688,28 @@ public:
QualType CatchHandlerType);
/// Get the address of a uuid descriptor .
- llvm::Constant *GetAddrOfUuidDescriptor(const CXXUuidofExpr* E);
+ ConstantAddress GetAddrOfUuidDescriptor(const CXXUuidofExpr* E);
/// Get the address of the thunk for the given global decl.
llvm::Constant *GetAddrOfThunk(GlobalDecl GD, const ThunkInfo &Thunk);
/// Get a reference to the target of VD.
- llvm::Constant *GetWeakRefReference(const ValueDecl *VD);
+ ConstantAddress GetWeakRefReference(const ValueDecl *VD);
+
+ /// Returns the assumed alignment of an opaque pointer to the given class.
+ CharUnits getClassPointerAlignment(const CXXRecordDecl *CD);
+
+ /// Returns the assumed alignment of a virtual base of a class.
+ CharUnits getVBaseAlignment(CharUnits DerivedAlign,
+ const CXXRecordDecl *Derived,
+ const CXXRecordDecl *VBase);
+
+ /// Given a class pointer with an actual known alignment, and the
+ /// expected alignment of an object at a dynamic offset w.r.t that
+ /// pointer, return the alignment to assume at the offset.
+ CharUnits getDynamicOffsetAlignment(CharUnits ActualAlign,
+ const CXXRecordDecl *Class,
+ CharUnits ExpectedTargetAlign);
CharUnits
computeNonVirtualBaseClassOffset(const CXXRecordDecl *DerivedClass,
@@ -755,36 +723,7 @@ public:
CastExpr::path_const_iterator PathBegin,
CastExpr::path_const_iterator PathEnd);
- /// A pair of helper functions for a __block variable.
- class ByrefHelpers : public llvm::FoldingSetNode {
- public:
- llvm::Constant *CopyHelper;
- llvm::Constant *DisposeHelper;
-
- /// The alignment of the field. This is important because
- /// different offsets to the field within the byref struct need to
- /// have different helper functions.
- CharUnits Alignment;
-
- ByrefHelpers(CharUnits alignment) : Alignment(alignment) {}
- ByrefHelpers(const ByrefHelpers &) = default;
- virtual ~ByrefHelpers();
-
- void Profile(llvm::FoldingSetNodeID &id) const {
- id.AddInteger(Alignment.getQuantity());
- profileImpl(id);
- }
- virtual void profileImpl(llvm::FoldingSetNodeID &id) const = 0;
-
- virtual bool needsCopy() const { return true; }
- virtual void emitCopy(CodeGenFunction &CGF,
- llvm::Value *dest, llvm::Value *src) = 0;
-
- virtual bool needsDispose() const { return true; }
- virtual void emitDispose(CodeGenFunction &CGF, llvm::Value *field) = 0;
- };
-
- llvm::FoldingSet<ByrefHelpers> ByrefHelpersCache;
+ llvm::FoldingSet<BlockByrefHelpers> ByrefHelpersCache;
/// Fetches the global unique block count.
int getUniqueBlockCount() { return ++Block.GlobalUniqueCount; }
@@ -799,23 +738,23 @@ public:
llvm::Constant *GetAddrOfGlobalBlock(const BlockExpr *BE, const char *);
/// Return a pointer to a constant CFString object for the given string.
- llvm::Constant *GetAddrOfConstantCFString(const StringLiteral *Literal);
+ ConstantAddress GetAddrOfConstantCFString(const StringLiteral *Literal);
/// Return a pointer to a constant NSString object for the given string. Or a
/// user defined String object as defined via
/// -fconstant-string-class=class_name option.
- llvm::GlobalVariable *GetAddrOfConstantString(const StringLiteral *Literal);
+ ConstantAddress GetAddrOfConstantString(const StringLiteral *Literal);
/// Return a constant array for the given string.
llvm::Constant *GetConstantArrayFromStringLiteral(const StringLiteral *E);
/// Return a pointer to a constant array for the given string literal.
- llvm::GlobalVariable *
+ ConstantAddress
GetAddrOfConstantStringFromLiteral(const StringLiteral *S,
StringRef Name = ".str");
/// Return a pointer to a constant array for the given ObjCEncodeExpr node.
- llvm::GlobalVariable *
+ ConstantAddress
GetAddrOfConstantStringFromObjCEncode(const ObjCEncodeExpr *);
/// Returns a pointer to a character array containing the literal and a
@@ -823,18 +762,17 @@ public:
///
/// \param GlobalName If provided, the name to use for the global (if one is
/// created).
- llvm::GlobalVariable *
+ ConstantAddress
GetAddrOfConstantCString(const std::string &Str,
- const char *GlobalName = nullptr,
- unsigned Alignment = 0);
+ const char *GlobalName = nullptr);
/// Returns a pointer to a constant global variable for the given file-scope
/// compound literal expression.
- llvm::Constant *GetAddrOfConstantCompoundLiteral(const CompoundLiteralExpr*E);
+ ConstantAddress GetAddrOfConstantCompoundLiteral(const CompoundLiteralExpr*E);
/// \brief Returns a pointer to a global variable representing a temporary
/// with static or thread storage duration.
- llvm::Constant *GetAddrOfGlobalTemporary(const MaterializeTemporaryExpr *E,
+ ConstantAddress GetAddrOfGlobalTemporary(const MaterializeTemporaryExpr *E,
const Expr *Inner);
/// \brief Retrieve the record type that describes the state of an
diff --git a/clang/lib/CodeGen/CodeGenTypeCache.h b/clang/lib/CodeGen/CodeGenTypeCache.h
new file mode 100644
index 00000000000..c32b66d129d
--- /dev/null
+++ b/clang/lib/CodeGen/CodeGenTypeCache.h
@@ -0,0 +1,108 @@
+//===--- CodeGenTypeCache.h - Commonly used LLVM types and info -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This structure provides a set of common types useful during IR emission.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_CODEGEN_CODEGENTYPECACHE_H
+#define LLVM_CLANG_LIB_CODEGEN_CODEGENTYPECACHE_H
+
+#include "clang/AST/CharUnits.h"
+#include "llvm/IR/CallingConv.h"
+
+namespace llvm {
+ class Type;
+ class IntegerType;
+ class PointerType;
+}
+
+namespace clang {
+namespace CodeGen {
+
+/// This structure provides a set of types that are commonly used
+/// during IR emission. It's initialized once in CodeGenModule's
+/// constructor and then copied around into new CodeGenFunctions.
+struct CodeGenTypeCache {
+ /// void
+ llvm::Type *VoidTy;
+
+ /// i8, i16, i32, and i64
+ llvm::IntegerType *Int8Ty, *Int16Ty, *Int32Ty, *Int64Ty;
+ /// float, double
+ llvm::Type *FloatTy, *DoubleTy;
+
+ /// int
+ llvm::IntegerType *IntTy;
+
+ /// intptr_t, size_t, and ptrdiff_t, which we assume are the same size.
+ union {
+ llvm::IntegerType *IntPtrTy;
+ llvm::IntegerType *SizeTy;
+ llvm::IntegerType *PtrDiffTy;
+ };
+
+ /// void* in address space 0
+ union {
+ llvm::PointerType *VoidPtrTy;
+ llvm::PointerType *Int8PtrTy;
+ };
+
+ /// void** in address space 0
+ union {
+ llvm::PointerType *VoidPtrPtrTy;
+ llvm::PointerType *Int8PtrPtrTy;
+ };
+
+ /// The size and alignment of the builtin C type 'int'. This comes
+ /// up enough in various ABI lowering tasks to be worth pre-computing.
+ union {
+ unsigned char IntSizeInBytes;
+ unsigned char IntAlignInBytes;
+ };
+ CharUnits getIntSize() const {
+ return CharUnits::fromQuantity(IntSizeInBytes);
+ }
+ CharUnits getIntAlign() const {
+ return CharUnits::fromQuantity(IntAlignInBytes);
+ }
+
+ /// The width of a pointer into the generic address space.
+ unsigned char PointerWidthInBits;
+
+ /// The size and alignment of a pointer into the generic address space.
+ union {
+ unsigned char PointerAlignInBytes;
+ unsigned char PointerSizeInBytes;
+ unsigned char SizeSizeInBytes; // sizeof(size_t)
+ unsigned char SizeAlignInBytes;
+ };
+ CharUnits getSizeSize() const {
+ return CharUnits::fromQuantity(SizeSizeInBytes);
+ }
+ CharUnits getSizeAlign() const {
+ return CharUnits::fromQuantity(SizeAlignInBytes);
+ }
+ CharUnits getPointerSize() const {
+ return CharUnits::fromQuantity(PointerSizeInBytes);
+ }
+ CharUnits getPointerAlign() const {
+ return CharUnits::fromQuantity(PointerAlignInBytes);
+ }
+
+ llvm::CallingConv::ID RuntimeCC;
+ llvm::CallingConv::ID getRuntimeCC() const { return RuntimeCC; }
+ llvm::CallingConv::ID BuiltinCC;
+ llvm::CallingConv::ID getBuiltinCC() const { return BuiltinCC; }
+};
+
+} // end namespace CodeGen
+} // end namespace clang
+
+#endif
diff --git a/clang/lib/CodeGen/ItaniumCXXABI.cpp b/clang/lib/CodeGen/ItaniumCXXABI.cpp
index b28ffbf3b77..0c6a6d751c7 100644
--- a/clang/lib/CodeGen/ItaniumCXXABI.cpp
+++ b/clang/lib/CodeGen/ItaniumCXXABI.cpp
@@ -69,6 +69,45 @@ public:
return RAA_Default;
}
+ bool isThisCompleteObject(GlobalDecl GD) const override {
+ // The Itanium ABI has separate complete-object vs. base-object
+ // variants of both constructors and destructors.
+ if (isa<CXXDestructorDecl>(GD.getDecl())) {
+ switch (GD.getDtorType()) {
+ case Dtor_Complete:
+ case Dtor_Deleting:
+ return true;
+
+ case Dtor_Base:
+ return false;
+
+ case Dtor_Comdat:
+ llvm_unreachable("emitting dtor comdat as function?");
+ }
+ llvm_unreachable("bad dtor kind");
+ }
+ if (isa<CXXConstructorDecl>(GD.getDecl())) {
+ switch (GD.getCtorType()) {
+ case Ctor_Complete:
+ return true;
+
+ case Ctor_Base:
+ return false;
+
+ case Ctor_CopyingClosure:
+ case Ctor_DefaultClosure:
+ llvm_unreachable("closure ctors in Itanium ABI?");
+
+ case Ctor_Comdat:
+ llvm_unreachable("emitting ctor comdat as function?");
+ }
+ llvm_unreachable("bad dtor kind");
+ }
+
+ // No other kinds.
+ return false;
+ }
+
bool isZeroInitializable(const MemberPointerType *MPT) override;
llvm::Type *ConvertMemberPointerType(const MemberPointerType *MPT) override;
@@ -76,13 +115,14 @@ public:
llvm::Value *
EmitLoadOfMemberFunctionPointer(CodeGenFunction &CGF,
const Expr *E,
- llvm::Value *&This,
+ Address This,
+ llvm::Value *&ThisPtrForCall,
llvm::Value *MemFnPtr,
const MemberPointerType *MPT) override;
llvm::Value *
EmitMemberDataPointerAddress(CodeGenFunction &CGF, const Expr *E,
- llvm::Value *Base,
+ Address Base,
llvm::Value *MemPtr,
const MemberPointerType *MPT) override;
@@ -111,9 +151,22 @@ public:
const MemberPointerType *MPT) override;
void emitVirtualObjectDelete(CodeGenFunction &CGF, const CXXDeleteExpr *DE,
- llvm::Value *Ptr, QualType ElementType,
+ Address Ptr, QualType ElementType,
const CXXDestructorDecl *Dtor) override;
+ /// Itanium says that an _Unwind_Exception has to be "double-word"
+ /// aligned (and thus the end of it is also so-aligned), meaning 16
+ /// bytes. Of course, that was written for the actual Itanium,
+ /// which is a 64-bit platform. Classically, the ABI doesn't really
+ /// specify the alignment on other platforms, but in practice
+ /// libUnwind declares the struct with __attribute__((aligned)), so
+ /// we assume that alignment here. (It's generally 16 bytes, but
+ /// some targets overwrite it.)
+ CharUnits getAlignmentOfExnObject() {
+ auto align = CGM.getContext().getTargetDefaultAlignForAttributeAligned();
+ return CGM.getContext().toCharUnitsFromBits(align);
+ }
+
void emitRethrow(CodeGenFunction &CGF, bool isNoReturn) override;
void emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) override;
@@ -135,25 +188,25 @@ public:
bool shouldTypeidBeNullChecked(bool IsDeref, QualType SrcRecordTy) override;
void EmitBadTypeidCall(CodeGenFunction &CGF) override;
llvm::Value *EmitTypeid(CodeGenFunction &CGF, QualType SrcRecordTy,
- llvm::Value *ThisPtr,
+ Address ThisPtr,
llvm::Type *StdTypeInfoPtrTy) override;
bool shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
QualType SrcRecordTy) override;
- llvm::Value *EmitDynamicCastCall(CodeGenFunction &CGF, llvm::Value *Value,
+ llvm::Value *EmitDynamicCastCall(CodeGenFunction &CGF, Address Value,
QualType SrcRecordTy, QualType DestTy,
QualType DestRecordTy,
llvm::BasicBlock *CastEnd) override;
- llvm::Value *EmitDynamicCastToVoid(CodeGenFunction &CGF, llvm::Value *Value,
+ llvm::Value *EmitDynamicCastToVoid(CodeGenFunction &CGF, Address Value,
QualType SrcRecordTy,
QualType DestTy) override;
bool EmitBadCastCall(CodeGenFunction &CGF) override;
llvm::Value *
- GetVirtualBaseClassOffset(CodeGenFunction &CGF, llvm::Value *This,
+ GetVirtualBaseClassOffset(CodeGenFunction &CGF, Address This,
const CXXRecordDecl *ClassDecl,
const CXXRecordDecl *BaseClassDecl) override;
@@ -185,7 +238,7 @@ public:
void EmitDestructorCall(CodeGenFunction &CGF, const CXXDestructorDecl *DD,
CXXDtorType Type, bool ForVirtualBase,
- bool Delegating, llvm::Value *This) override;
+ bool Delegating, Address This) override;
void emitVTableDefinitions(CodeGenVTables &CGVT,
const CXXRecordDecl *RD) override;
@@ -203,14 +256,13 @@ public:
CharUnits VPtrOffset) override;
llvm::Value *getVirtualFunctionPointer(CodeGenFunction &CGF, GlobalDecl GD,
- llvm::Value *This,
- llvm::Type *Ty,
+ Address This, llvm::Type *Ty,
SourceLocation Loc) override;
llvm::Value *EmitVirtualDestructorCall(CodeGenFunction &CGF,
const CXXDestructorDecl *Dtor,
CXXDtorType DtorType,
- llvm::Value *This,
+ Address This,
const CXXMemberCallExpr *CE) override;
void emitVirtualInheritanceTables(const CXXRecordDecl *RD) override;
@@ -225,10 +277,10 @@ public:
Thunk->setLinkage(llvm::GlobalValue::AvailableExternallyLinkage);
}
- llvm::Value *performThisAdjustment(CodeGenFunction &CGF, llvm::Value *This,
+ llvm::Value *performThisAdjustment(CodeGenFunction &CGF, Address This,
const ThisAdjustment &TA) override;
- llvm::Value *performReturnAdjustment(CodeGenFunction &CGF, llvm::Value *Ret,
+ llvm::Value *performReturnAdjustment(CodeGenFunction &CGF, Address Ret,
const ReturnAdjustment &RA) override;
size_t getSrcArgforCopyCtor(const CXXConstructorDecl *,
@@ -242,13 +294,13 @@ public:
{ return "__cxa_deleted_virtual"; }
CharUnits getArrayCookieSizeImpl(QualType elementType) override;
- llvm::Value *InitializeArrayCookie(CodeGenFunction &CGF,
- llvm::Value *NewPtr,
- llvm::Value *NumElements,
- const CXXNewExpr *expr,
- QualType ElementType) override;
+ Address InitializeArrayCookie(CodeGenFunction &CGF,
+ Address NewPtr,
+ llvm::Value *NumElements,
+ const CXXNewExpr *expr,
+ QualType ElementType) override;
llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF,
- llvm::Value *allocPtr,
+ Address allocPtr,
CharUnits cookieSize) override;
void EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D,
@@ -338,12 +390,12 @@ public:
QualType ResTy) override;
CharUnits getArrayCookieSizeImpl(QualType elementType) override;
- llvm::Value *InitializeArrayCookie(CodeGenFunction &CGF,
- llvm::Value *NewPtr,
- llvm::Value *NumElements,
- const CXXNewExpr *expr,
- QualType ElementType) override;
- llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF, llvm::Value *allocPtr,
+ Address InitializeArrayCookie(CodeGenFunction &CGF,
+ Address NewPtr,
+ llvm::Value *NumElements,
+ const CXXNewExpr *expr,
+ QualType ElementType) override;
+ llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF, Address allocPtr,
CharUnits cookieSize) override;
};
@@ -439,7 +491,8 @@ ItaniumCXXABI::ConvertMemberPointerType(const MemberPointerType *MPT) {
/// If the member is non-virtual, memptr.ptr is the address of
/// the function to call.
llvm::Value *ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
- CodeGenFunction &CGF, const Expr *E, llvm::Value *&This,
+ CodeGenFunction &CGF, const Expr *E, Address ThisAddr,
+ llvm::Value *&ThisPtrForCall,
llvm::Value *MemFnPtr, const MemberPointerType *MPT) {
CGBuilderTy &Builder = CGF.Builder;
@@ -468,9 +521,11 @@ llvm::Value *ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
// Apply the adjustment and cast back to the original struct type
// for consistency.
+ llvm::Value *This = ThisAddr.getPointer();
llvm::Value *Ptr = Builder.CreateBitCast(This, Builder.getInt8PtrTy());
Ptr = Builder.CreateInBoundsGEP(Ptr, Adj);
This = Builder.CreateBitCast(Ptr, This->getType(), "this.adjusted");
+ ThisPtrForCall = This;
// Load the function pointer.
llvm::Value *FnAsInt = Builder.CreateExtractValue(MemFnPtr, 0, "memptr.ptr");
@@ -492,7 +547,11 @@ llvm::Value *ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
// Cast the adjusted this to a pointer to vtable pointer and load.
llvm::Type *VTableTy = Builder.getInt8PtrTy();
- llvm::Value *VTable = CGF.GetVTablePtr(This, VTableTy);
+ CharUnits VTablePtrAlign =
+ CGF.CGM.getDynamicOffsetAlignment(ThisAddr.getAlignment(), RD,
+ CGF.getPointerAlign());
+ llvm::Value *VTable =
+ CGF.GetVTablePtr(Address(This, VTablePtrAlign), VTableTy);
// Apply the offset.
llvm::Value *VTableOffset = FnAsInt;
@@ -502,7 +561,9 @@ llvm::Value *ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
// Load the virtual function to call.
VTable = Builder.CreateBitCast(VTable, FTy->getPointerTo()->getPointerTo());
- llvm::Value *VirtualFn = Builder.CreateLoad(VTable, "memptr.virtualfn");
+ llvm::Value *VirtualFn =
+ Builder.CreateAlignedLoad(VTable, CGF.getPointerAlign(),
+ "memptr.virtualfn");
CGF.EmitBranch(FnEnd);
// In the non-virtual path, the function pointer is actually a
@@ -522,24 +583,23 @@ llvm::Value *ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
/// Compute an l-value by applying the given pointer-to-member to a
/// base object.
llvm::Value *ItaniumCXXABI::EmitMemberDataPointerAddress(
- CodeGenFunction &CGF, const Expr *E, llvm::Value *Base, llvm::Value *MemPtr,
+ CodeGenFunction &CGF, const Expr *E, Address Base, llvm::Value *MemPtr,
const MemberPointerType *MPT) {
assert(MemPtr->getType() == CGM.PtrDiffTy);
CGBuilderTy &Builder = CGF.Builder;
- unsigned AS = Base->getType()->getPointerAddressSpace();
-
// Cast to char*.
- Base = Builder.CreateBitCast(Base, Builder.getInt8Ty()->getPointerTo(AS));
+ Base = Builder.CreateElementBitCast(Base, CGF.Int8Ty);
// Apply the offset, which we assume is non-null.
- llvm::Value *Addr = Builder.CreateInBoundsGEP(Base, MemPtr, "memptr.offset");
+ llvm::Value *Addr =
+ Builder.CreateInBoundsGEP(Base.getPointer(), MemPtr, "memptr.offset");
// Cast the address to the appropriate pointer type, adopting the
// address space of the base pointer.
- llvm::Type *PType
- = CGF.ConvertTypeForMem(MPT->getPointeeType())->getPointerTo(AS);
+ llvm::Type *PType = CGF.ConvertTypeForMem(MPT->getPointeeType())
+ ->getPointerTo(Base.getAddressSpace());
return Builder.CreateBitCast(Addr, PType);
}
@@ -893,7 +953,8 @@ bool ItaniumCXXABI::classifyReturnType(CGFunctionInfo &FI) const {
// FIXME: Use canCopyArgument() when it is fixed to handle lazily declared
// special members.
if (RD->hasNonTrivialDestructor() || RD->hasNonTrivialCopyConstructor()) {
- FI.getReturnInfo() = ABIArgInfo::getIndirect(0, /*ByVal=*/false);
+ auto Align = CGM.getContext().getTypeAlignInChars(FI.getReturnType());
+ FI.getReturnInfo() = ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
return true;
}
return false;
@@ -909,7 +970,7 @@ bool ItaniumCXXABI::isZeroInitializable(const MemberPointerType *MPT) {
/// at entry -2 in the vtable.
void ItaniumCXXABI::emitVirtualObjectDelete(CodeGenFunction &CGF,
const CXXDeleteExpr *DE,
- llvm::Value *Ptr,
+ Address Ptr,
QualType ElementType,
const CXXDestructorDecl *Dtor) {
bool UseGlobalDelete = DE->isGlobalDelete();
@@ -923,11 +984,12 @@ void ItaniumCXXABI::emitVirtualObjectDelete(CodeGenFunction &CGF,
// Track back to entry -2 and pull out the offset there.
llvm::Value *OffsetPtr = CGF.Builder.CreateConstInBoundsGEP1_64(
VTable, -2, "complete-offset.ptr");
- llvm::LoadInst *Offset = CGF.Builder.CreateLoad(OffsetPtr);
- Offset->setAlignment(CGF.PointerAlignInBytes);
+ llvm::Value *Offset =
+ CGF.Builder.CreateAlignedLoad(OffsetPtr, CGF.getPointerAlign());
// Apply the offset.
- llvm::Value *CompletePtr = CGF.Builder.CreateBitCast(Ptr, CGF.Int8PtrTy);
+ llvm::Value *CompletePtr =
+ CGF.Builder.CreateBitCast(Ptr.getPointer(), CGF.Int8PtrTy);
CompletePtr = CGF.Builder.CreateInBoundsGEP(CompletePtr, Offset);
// If we're supposed to call the global delete, make sure we do so
@@ -989,7 +1051,8 @@ void ItaniumCXXABI::emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) {
llvm::CallInst *ExceptionPtr = CGF.EmitNounwindRuntimeCall(
AllocExceptionFn, llvm::ConstantInt::get(SizeTy, TypeSize), "exception");
- CGF.EmitAnyExprToExn(E->getSubExpr(), ExceptionPtr);
+ CharUnits ExnAlign = getAlignmentOfExnObject();
+ CGF.EmitAnyExprToExn(E->getSubExpr(), Address(ExceptionPtr, ExnAlign));
// Now throw the exception.
llvm::Constant *TypeInfo = CGM.GetAddrOfRTTIDescriptor(ThrowType,
@@ -1113,14 +1176,14 @@ void ItaniumCXXABI::EmitBadTypeidCall(CodeGenFunction &CGF) {
llvm::Value *ItaniumCXXABI::EmitTypeid(CodeGenFunction &CGF,
QualType SrcRecordTy,
- llvm::Value *ThisPtr,
+ Address ThisPtr,
llvm::Type *StdTypeInfoPtrTy) {
llvm::Value *Value =
CGF.GetVTablePtr(ThisPtr, StdTypeInfoPtrTy->getPointerTo());
// Load the type info.
Value = CGF.Builder.CreateConstInBoundsGEP1_64(Value, -1ULL);
- return CGF.Builder.CreateLoad(Value);
+ return CGF.Builder.CreateAlignedLoad(Value, CGF.getPointerAlign());
}
bool ItaniumCXXABI::shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
@@ -1129,7 +1192,7 @@ bool ItaniumCXXABI::shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
}
llvm::Value *ItaniumCXXABI::EmitDynamicCastCall(
- CodeGenFunction &CGF, llvm::Value *Value, QualType SrcRecordTy,
+ CodeGenFunction &CGF, Address ThisAddr, QualType SrcRecordTy,
QualType DestTy, QualType DestRecordTy, llvm::BasicBlock *CastEnd) {
llvm::Type *PtrDiffLTy =
CGF.ConvertType(CGF.getContext().getPointerDiffType());
@@ -1148,6 +1211,7 @@ llvm::Value *ItaniumCXXABI::EmitDynamicCastCall(
computeOffsetHint(CGF.getContext(), SrcDecl, DestDecl).getQuantity());
// Emit the call to __dynamic_cast.
+ llvm::Value *Value = ThisAddr.getPointer();
Value = CGF.EmitCastToVoidPtr(Value);
llvm::Value *args[] = {Value, SrcRTTI, DestRTTI, OffsetHint};
@@ -1171,7 +1235,7 @@ llvm::Value *ItaniumCXXABI::EmitDynamicCastCall(
}
llvm::Value *ItaniumCXXABI::EmitDynamicCastToVoid(CodeGenFunction &CGF,
- llvm::Value *Value,
+ Address ThisAddr,
QualType SrcRecordTy,
QualType DestTy) {
llvm::Type *PtrDiffLTy =
@@ -1179,14 +1243,17 @@ llvm::Value *ItaniumCXXABI::EmitDynamicCastToVoid(CodeGenFunction &CGF,
llvm::Type *DestLTy = CGF.ConvertType(DestTy);
// Get the vtable pointer.
- llvm::Value *VTable = CGF.GetVTablePtr(Value, PtrDiffLTy->getPointerTo());
+ llvm::Value *VTable = CGF.GetVTablePtr(ThisAddr, PtrDiffLTy->getPointerTo());
// Get the offset-to-top from the vtable.
llvm::Value *OffsetToTop =
CGF.Builder.CreateConstInBoundsGEP1_64(VTable, -2ULL);
- OffsetToTop = CGF.Builder.CreateLoad(OffsetToTop, "offset.to.top");
+ OffsetToTop =
+ CGF.Builder.CreateAlignedLoad(OffsetToTop, CGF.getPointerAlign(),
+ "offset.to.top");
// Finally, add the offset to the pointer.
+ llvm::Value *Value = ThisAddr.getPointer();
Value = CGF.EmitCastToVoidPtr(Value);
Value = CGF.Builder.CreateInBoundsGEP(Value, OffsetToTop);
@@ -1202,7 +1269,7 @@ bool ItaniumCXXABI::EmitBadCastCall(CodeGenFunction &CGF) {
llvm::Value *
ItaniumCXXABI::GetVirtualBaseClassOffset(CodeGenFunction &CGF,
- llvm::Value *This,
+ Address This,
const CXXRecordDecl *ClassDecl,
const CXXRecordDecl *BaseClassDecl) {
llvm::Value *VTablePtr = CGF.GetVTablePtr(This, CGM.Int8PtrTy);
@@ -1217,7 +1284,8 @@ ItaniumCXXABI::GetVirtualBaseClassOffset(CodeGenFunction &CGF,
CGM.PtrDiffTy->getPointerTo());
llvm::Value *VBaseOffset =
- CGF.Builder.CreateLoad(VBaseOffsetPtr, "vbase.offset");
+ CGF.Builder.CreateAlignedLoad(VBaseOffsetPtr, CGF.getPointerAlign(),
+ "vbase.offset");
return VBaseOffset;
}
@@ -1328,7 +1396,7 @@ unsigned ItaniumCXXABI::addImplicitConstructorArgs(
void ItaniumCXXABI::EmitDestructorCall(CodeGenFunction &CGF,
const CXXDestructorDecl *DD,
CXXDtorType Type, bool ForVirtualBase,
- bool Delegating, llvm::Value *This) {
+ bool Delegating, Address This) {
GlobalDecl GD(DD, Type);
llvm::Value *VTT = CGF.GetVTTParameter(GD, ForVirtualBase, Delegating);
QualType VTTTy = getContext().getPointerType(getContext().VoidPtrTy);
@@ -1340,8 +1408,8 @@ void ItaniumCXXABI::EmitDestructorCall(CodeGenFunction &CGF,
if (!Callee)
Callee = CGM.getAddrOfCXXStructor(DD, getFromDtorType(Type));
- CGF.EmitCXXMemberOrOperatorCall(DD, Callee, ReturnValueSlot(), This, VTT,
- VTTTy, nullptr);
+ CGF.EmitCXXMemberOrOperatorCall(DD, Callee, ReturnValueSlot(),
+ This.getPointer(), VTT, VTTTy, nullptr);
}
void ItaniumCXXABI::emitVTableDefinitions(CodeGenVTables &CGVT,
@@ -1409,7 +1477,7 @@ llvm::Value *ItaniumCXXABI::getVTableAddressPointInStructor(
VTT = CGF.Builder.CreateConstInBoundsGEP1_64(VTT, VirtualPointerIndex);
// And load the address point from the VTT.
- VTableAddressPoint = CGF.Builder.CreateLoad(VTT);
+ VTableAddressPoint = CGF.Builder.CreateAlignedLoad(VTT, CGF.getPointerAlign());
} else {
llvm::Constant *VTable =
CGM.getCXXABI().getAddrOfVTable(VTableClass, CharUnits());
@@ -1473,7 +1541,7 @@ llvm::GlobalVariable *ItaniumCXXABI::getAddrOfVTable(const CXXRecordDecl *RD,
llvm::Value *ItaniumCXXABI::getVirtualFunctionPointer(CodeGenFunction &CGF,
GlobalDecl GD,
- llvm::Value *This,
+ Address This,
llvm::Type *Ty,
SourceLocation Loc) {
GD = GD.getCanonicalDecl();
@@ -1487,12 +1555,12 @@ llvm::Value *ItaniumCXXABI::getVirtualFunctionPointer(CodeGenFunction &CGF,
uint64_t VTableIndex = CGM.getItaniumVTableContext().getMethodVTableIndex(GD);
llvm::Value *VFuncPtr =
CGF.Builder.CreateConstInBoundsGEP1_64(VTable, VTableIndex, "vfn");
- return CGF.Builder.CreateLoad(VFuncPtr);
+ return CGF.Builder.CreateAlignedLoad(VFuncPtr, CGF.getPointerAlign());
}
llvm::Value *ItaniumCXXABI::EmitVirtualDestructorCall(
CodeGenFunction &CGF, const CXXDestructorDecl *Dtor, CXXDtorType DtorType,
- llvm::Value *This, const CXXMemberCallExpr *CE) {
+ Address This, const CXXMemberCallExpr *CE) {
assert(CE == nullptr || CE->arg_begin() == CE->arg_end());
assert(DtorType == Dtor_Deleting || DtorType == Dtor_Complete);
@@ -1503,8 +1571,9 @@ llvm::Value *ItaniumCXXABI::EmitVirtualDestructorCall(
getVirtualFunctionPointer(CGF, GlobalDecl(Dtor, DtorType), This, Ty,
CE ? CE->getLocStart() : SourceLocation());
- CGF.EmitCXXMemberOrOperatorCall(Dtor, Callee, ReturnValueSlot(), This,
- /*ImplicitParam=*/nullptr, QualType(), CE);
+ CGF.EmitCXXMemberOrOperatorCall(Dtor, Callee, ReturnValueSlot(),
+ This.getPointer(), /*ImplicitParam=*/nullptr,
+ QualType(), CE);
return nullptr;
}
@@ -1528,29 +1597,28 @@ bool ItaniumCXXABI::canEmitAvailableExternallyVTable(
return !hasAnyUsedVirtualInlineFunction(RD);
}
static llvm::Value *performTypeAdjustment(CodeGenFunction &CGF,
- llvm::Value *Ptr,
+ Address InitialPtr,
int64_t NonVirtualAdjustment,
int64_t VirtualAdjustment,
bool IsReturnAdjustment) {
if (!NonVirtualAdjustment && !VirtualAdjustment)
- return Ptr;
+ return InitialPtr.getPointer();
- llvm::Type *Int8PtrTy = CGF.Int8PtrTy;
- llvm::Value *V = CGF.Builder.CreateBitCast(Ptr, Int8PtrTy);
+ Address V = CGF.Builder.CreateElementBitCast(InitialPtr, CGF.Int8Ty);
+ // In a base-to-derived cast, the non-virtual adjustment is applied first.
if (NonVirtualAdjustment && !IsReturnAdjustment) {
- // Perform the non-virtual adjustment for a base-to-derived cast.
- V = CGF.Builder.CreateConstInBoundsGEP1_64(V, NonVirtualAdjustment);
+ V = CGF.Builder.CreateConstInBoundsByteGEP(V,
+ CharUnits::fromQuantity(NonVirtualAdjustment));
}
+ // Perform the virtual adjustment if we have one.
+ llvm::Value *ResultPtr;
if (VirtualAdjustment) {
llvm::Type *PtrDiffTy =
CGF.ConvertType(CGF.getContext().getPointerDiffType());
- // Perform the virtual adjustment.
- llvm::Value *VTablePtrPtr =
- CGF.Builder.CreateBitCast(V, Int8PtrTy->getPointerTo());
-
+ Address VTablePtrPtr = CGF.Builder.CreateElementBitCast(V, CGF.Int8PtrTy);
llvm::Value *VTablePtr = CGF.Builder.CreateLoad(VTablePtrPtr);
llvm::Value *OffsetPtr =
@@ -1559,23 +1627,28 @@ static llvm::Value *performTypeAdjustment(CodeGenFunction &CGF,
OffsetPtr = CGF.Builder.CreateBitCast(OffsetPtr, PtrDiffTy->getPointerTo());
// Load the adjustment offset from the vtable.
- llvm::Value *Offset = CGF.Builder.CreateLoad(OffsetPtr);
+ llvm::Value *Offset =
+ CGF.Builder.CreateAlignedLoad(OffsetPtr, CGF.getPointerAlign());
// Adjust our pointer.
- V = CGF.Builder.CreateInBoundsGEP(V, Offset);
+ ResultPtr = CGF.Builder.CreateInBoundsGEP(V.getPointer(), Offset);
+ } else {
+ ResultPtr = V.getPointer();
}
+ // In a derived-to-base conversion, the non-virtual adjustment is
+ // applied second.
if (NonVirtualAdjustment && IsReturnAdjustment) {
- // Perform the non-virtual adjustment for a derived-to-base cast.
- V = CGF.Builder.CreateConstInBoundsGEP1_64(V, NonVirtualAdjustment);
+ ResultPtr = CGF.Builder.CreateConstInBoundsGEP1_64(ResultPtr,
+ NonVirtualAdjustment);
}
// Cast back to the original type.
- return CGF.Builder.CreateBitCast(V, Ptr->getType());
+ return CGF.Builder.CreateBitCast(ResultPtr, InitialPtr.getType());
}
llvm::Value *ItaniumCXXABI::performThisAdjustment(CodeGenFunction &CGF,
- llvm::Value *This,
+ Address This,
const ThisAdjustment &TA) {
return performTypeAdjustment(CGF, This, TA.NonVirtual,
TA.Virtual.Itanium.VCallOffsetOffset,
@@ -1583,7 +1656,7 @@ llvm::Value *ItaniumCXXABI::performThisAdjustment(CodeGenFunction &CGF,
}
llvm::Value *
-ItaniumCXXABI::performReturnAdjustment(CodeGenFunction &CGF, llvm::Value *Ret,
+ItaniumCXXABI::performReturnAdjustment(CodeGenFunction &CGF, Address Ret,
const ReturnAdjustment &RA) {
return performTypeAdjustment(CGF, Ret, RA.NonVirtual,
RA.Virtual.Itanium.VBaseOffsetOffset,
@@ -1596,8 +1669,7 @@ void ARMCXXABI::EmitReturnFromThunk(CodeGenFunction &CGF,
return ItaniumCXXABI::EmitReturnFromThunk(CGF, RV, ResultType);
// Destructor thunks in the ARM ABI have indeterminate results.
- llvm::Type *T =
- cast<llvm::PointerType>(CGF.ReturnValue->getType())->getElementType();
+ llvm::Type *T = CGF.ReturnValue.getElementType();
RValue Undef = RValue::get(llvm::UndefValue::get(T));
return ItaniumCXXABI::EmitReturnFromThunk(CGF, Undef, ResultType);
}
@@ -1611,18 +1683,17 @@ CharUnits ItaniumCXXABI::getArrayCookieSizeImpl(QualType elementType) {
CGM.getContext().getTypeAlignInChars(elementType));
}
-llvm::Value *ItaniumCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
- llvm::Value *NewPtr,
- llvm::Value *NumElements,
- const CXXNewExpr *expr,
- QualType ElementType) {
+Address ItaniumCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
+ Address NewPtr,
+ llvm::Value *NumElements,
+ const CXXNewExpr *expr,
+ QualType ElementType) {
assert(requiresArrayCookie(expr));
- unsigned AS = NewPtr->getType()->getPointerAddressSpace();
+ unsigned AS = NewPtr.getAddressSpace();
ASTContext &Ctx = getContext();
- QualType SizeTy = Ctx.getSizeType();
- CharUnits SizeSize = Ctx.getTypeSizeInChars(SizeTy);
+ CharUnits SizeSize = CGF.getSizeSize();
// The size of the cookie.
CharUnits CookieSize =
@@ -1630,49 +1701,45 @@ llvm::Value *ItaniumCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
assert(CookieSize == getArrayCookieSizeImpl(ElementType));
// Compute an offset to the cookie.
- llvm::Value *CookiePtr = NewPtr;
+ Address CookiePtr = NewPtr;
CharUnits CookieOffset = CookieSize - SizeSize;
if (!CookieOffset.isZero())
- CookiePtr = CGF.Builder.CreateConstInBoundsGEP1_64(CookiePtr,
- CookieOffset.getQuantity());
+ CookiePtr = CGF.Builder.CreateConstInBoundsByteGEP(CookiePtr, CookieOffset);
// Write the number of elements into the appropriate slot.
- llvm::Type *NumElementsTy = CGF.ConvertType(SizeTy)->getPointerTo(AS);
- llvm::Value *NumElementsPtr =
- CGF.Builder.CreateBitCast(CookiePtr, NumElementsTy);
+ Address NumElementsPtr =
+ CGF.Builder.CreateElementBitCast(CookiePtr, CGF.SizeTy);
llvm::Instruction *SI = CGF.Builder.CreateStore(NumElements, NumElementsPtr);
+
+ // Handle the array cookie specially in ASan.
if (CGM.getLangOpts().Sanitize.has(SanitizerKind::Address) && AS == 0 &&
expr->getOperatorNew()->isReplaceableGlobalAllocationFunction()) {
// The store to the CookiePtr does not need to be instrumented.
CGM.getSanitizerMetadata()->disableSanitizerForInstruction(SI);
llvm::FunctionType *FTy =
- llvm::FunctionType::get(CGM.VoidTy, NumElementsTy, false);
+ llvm::FunctionType::get(CGM.VoidTy, NumElementsPtr.getType(), false);
llvm::Constant *F =
CGM.CreateRuntimeFunction(FTy, "__asan_poison_cxx_array_cookie");
- CGF.Builder.CreateCall(F, NumElementsPtr);
+ CGF.Builder.CreateCall(F, NumElementsPtr.getPointer());
}
// Finally, compute a pointer to the actual data buffer by skipping
// over the cookie completely.
- return CGF.Builder.CreateConstInBoundsGEP1_64(NewPtr,
- CookieSize.getQuantity());
+ return CGF.Builder.CreateConstInBoundsByteGEP(NewPtr, CookieSize);
}
llvm::Value *ItaniumCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
- llvm::Value *allocPtr,
+ Address allocPtr,
CharUnits cookieSize) {
// The element size is right-justified in the cookie.
- llvm::Value *numElementsPtr = allocPtr;
- CharUnits numElementsOffset =
- cookieSize - CharUnits::fromQuantity(CGF.SizeSizeInBytes);
+ Address numElementsPtr = allocPtr;
+ CharUnits numElementsOffset = cookieSize - CGF.getSizeSize();
if (!numElementsOffset.isZero())
numElementsPtr =
- CGF.Builder.CreateConstInBoundsGEP1_64(numElementsPtr,
- numElementsOffset.getQuantity());
+ CGF.Builder.CreateConstInBoundsByteGEP(numElementsPtr, numElementsOffset);
- unsigned AS = allocPtr->getType()->getPointerAddressSpace();
- numElementsPtr =
- CGF.Builder.CreateBitCast(numElementsPtr, CGF.SizeTy->getPointerTo(AS));
+ unsigned AS = allocPtr.getAddressSpace();
+ numElementsPtr = CGF.Builder.CreateElementBitCast(numElementsPtr, CGF.SizeTy);
if (!CGM.getLangOpts().Sanitize.has(SanitizerKind::Address) || AS != 0)
return CGF.Builder.CreateLoad(numElementsPtr);
// In asan mode emit a function call instead of a regular load and let the
@@ -1684,7 +1751,7 @@ llvm::Value *ItaniumCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
llvm::FunctionType::get(CGF.SizeTy, CGF.SizeTy->getPointerTo(0), false);
llvm::Constant *F =
CGM.CreateRuntimeFunction(FTy, "__asan_load_cxx_array_cookie");
- return CGF.Builder.CreateCall(F, numElementsPtr);
+ return CGF.Builder.CreateCall(F, numElementsPtr.getPointer());
}
CharUnits ARMCXXABI::getArrayCookieSizeImpl(QualType elementType) {
@@ -1700,47 +1767,41 @@ CharUnits ARMCXXABI::getArrayCookieSizeImpl(QualType elementType) {
CGM.getContext().getTypeAlignInChars(elementType));
}
-llvm::Value *ARMCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
- llvm::Value *newPtr,
- llvm::Value *numElements,
- const CXXNewExpr *expr,
- QualType elementType) {
+Address ARMCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
+ Address newPtr,
+ llvm::Value *numElements,
+ const CXXNewExpr *expr,
+ QualType elementType) {
assert(requiresArrayCookie(expr));
- // NewPtr is a char*, but we generalize to arbitrary addrspaces.
- unsigned AS = newPtr->getType()->getPointerAddressSpace();
-
// The cookie is always at the start of the buffer.
- llvm::Value *cookie = newPtr;
+ Address cookie = newPtr;
// The first element is the element size.
- cookie = CGF.Builder.CreateBitCast(cookie, CGF.SizeTy->getPointerTo(AS));
+ cookie = CGF.Builder.CreateElementBitCast(cookie, CGF.SizeTy);
llvm::Value *elementSize = llvm::ConstantInt::get(CGF.SizeTy,
getContext().getTypeSizeInChars(elementType).getQuantity());
CGF.Builder.CreateStore(elementSize, cookie);
// The second element is the element count.
- cookie = CGF.Builder.CreateConstInBoundsGEP1_32(CGF.SizeTy, cookie, 1);
+ cookie = CGF.Builder.CreateConstInBoundsGEP(cookie, 1, CGF.getSizeSize());
CGF.Builder.CreateStore(numElements, cookie);
// Finally, compute a pointer to the actual data buffer by skipping
// over the cookie completely.
CharUnits cookieSize = ARMCXXABI::getArrayCookieSizeImpl(elementType);
- return CGF.Builder.CreateConstInBoundsGEP1_64(newPtr,
- cookieSize.getQuantity());
+ return CGF.Builder.CreateConstInBoundsByteGEP(newPtr, cookieSize);
}
llvm::Value *ARMCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
- llvm::Value *allocPtr,
+ Address allocPtr,
CharUnits cookieSize) {
// The number of elements is at offset sizeof(size_t) relative to
// the allocated pointer.
- llvm::Value *numElementsPtr
- = CGF.Builder.CreateConstInBoundsGEP1_64(allocPtr, CGF.SizeSizeInBytes);
+ Address numElementsPtr
+ = CGF.Builder.CreateConstInBoundsByteGEP(allocPtr, CGF.getSizeSize());
- unsigned AS = allocPtr->getType()->getPointerAddressSpace();
- numElementsPtr =
- CGF.Builder.CreateBitCast(numElementsPtr, CGF.SizeTy->getPointerTo(AS));
+ numElementsPtr = CGF.Builder.CreateElementBitCast(numElementsPtr, CGF.SizeTy);
return CGF.Builder.CreateLoad(numElementsPtr);
}
@@ -1810,12 +1871,21 @@ void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
bool useInt8GuardVariable = !threadsafe && var->hasInternalLinkage();
llvm::IntegerType *guardTy;
+ CharUnits guardAlignment;
if (useInt8GuardVariable) {
guardTy = CGF.Int8Ty;
+ guardAlignment = CharUnits::One();
} else {
// Guard variables are 64 bits in the generic ABI and size width on ARM
// (i.e. 32-bit on AArch32, 64-bit on AArch64).
- guardTy = (UseARMGuardVarABI ? CGF.SizeTy : CGF.Int64Ty);
+ if (UseARMGuardVarABI) {
+ guardTy = CGF.SizeTy;
+ guardAlignment = CGF.getSizeAlign();
+ } else {
+ guardTy = CGF.Int64Ty;
+ guardAlignment = CharUnits::fromQuantity(
+ CGM.getDataLayout().getABITypeAlignment(guardTy));
+ }
}
llvm::PointerType *guardPtrTy = guardTy->getPointerTo();
@@ -1839,6 +1909,7 @@ void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
guard->setVisibility(var->getVisibility());
// If the variable is thread-local, so is its guard variable.
guard->setThreadLocalMode(var->getThreadLocalMode());
+ guard->setAlignment(guardAlignment.getQuantity());
// The ABI says: "It is suggested that it be emitted in the same COMDAT
// group as the associated data object." In practice, this doesn't work for
@@ -1855,6 +1926,8 @@ void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
CGM.setStaticLocalDeclGuardAddress(&D, guard);
}
+ Address guardAddr = Address(guard, guardAlignment);
+
// Test whether the variable has completed initialization.
//
// Itanium C++ ABI 3.3.2:
@@ -1874,8 +1947,7 @@ void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
// Load the first byte of the guard variable.
llvm::LoadInst *LI =
- Builder.CreateLoad(Builder.CreateBitCast(guard, CGM.Int8PtrTy));
- LI->setAlignment(1);
+ Builder.CreateLoad(Builder.CreateElementBitCast(guardAddr, CGM.Int8Ty));
// Itanium ABI:
// An implementation supporting thread-safety on multiprocessor
@@ -1945,9 +2017,10 @@ void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
CGF.PopCleanupBlock();
// Call __cxa_guard_release. This cannot throw.
- CGF.EmitNounwindRuntimeCall(getGuardReleaseFn(CGM, guardPtrTy), guard);
+ CGF.EmitNounwindRuntimeCall(getGuardReleaseFn(CGM, guardPtrTy),
+ guardAddr.getPointer());
} else {
- Builder.CreateStore(llvm::ConstantInt::get(guardTy, 1), guard);
+ Builder.CreateStore(llvm::ConstantInt::get(guardTy, 1), guardAddr);
}
CGF.EmitBlock(EndBlock);
@@ -2090,8 +2163,13 @@ void ItaniumCXXABI::EmitThreadLocalInitFuncs(
llvm::GlobalVariable::InternalLinkage,
llvm::ConstantInt::get(CGM.Int8Ty, 0), "__tls_guard");
Guard->setThreadLocal(true);
+
+ CharUnits GuardAlign = CharUnits::One();
+ Guard->setAlignment(GuardAlign.getQuantity());
+
CodeGenFunction(CGM)
- .GenerateCXXGlobalInitFunc(InitFunc, CXXThreadLocalInits, Guard);
+ .GenerateCXXGlobalInitFunc(InitFunc, CXXThreadLocalInits,
+ Address(Guard, GuardAlign));
}
for (auto &I : CXXThreadLocals) {
const VarDecl *VD = I.first;
@@ -2137,7 +2215,7 @@ void ItaniumCXXABI::EmitThreadLocalInitFuncs(
llvm::Function *Wrapper = getOrCreateThreadLocalWrapper(VD, Var);
llvm::LLVMContext &Context = CGM.getModule().getContext();
llvm::BasicBlock *Entry = llvm::BasicBlock::Create(Context, "", Wrapper);
- CGBuilderTy Builder(Entry);
+ CGBuilderTy Builder(CGM, Entry);
if (InitIsInitFunc) {
if (Init)
Builder.CreateCall(Init);
@@ -2159,9 +2237,8 @@ void ItaniumCXXABI::EmitThreadLocalInitFuncs(
// the referenced object.
llvm::Value *Val = Var;
if (VD->getType()->isReferenceType()) {
- llvm::LoadInst *LI = Builder.CreateLoad(Val);
- LI->setAlignment(CGM.getContext().getDeclAlign(VD).getQuantity());
- Val = LI;
+ CharUnits Align = CGM.getContext().getDeclAlign(VD);
+ Val = Builder.CreateAlignedLoad(Val, Align);
}
if (Val->getType() != Wrapper->getReturnType())
Val = Builder.CreatePointerBitCastOrAddrSpaceCast(
@@ -3418,7 +3495,7 @@ static llvm::Value *CallBeginCatch(CodeGenFunction &CGF,
/// parameter during catch initialization.
static void InitCatchParam(CodeGenFunction &CGF,
const VarDecl &CatchParam,
- llvm::Value *ParamAddr,
+ Address ParamAddr,
SourceLocation Loc) {
// Load the exception from where the landing pad saved it.
llvm::Value *Exn = CGF.getExceptionFromSlot();
@@ -3472,12 +3549,13 @@ static void InitCatchParam(CodeGenFunction &CGF,
cast<llvm::PointerType>(LLVMCatchTy)->getElementType();
// Create the temporary and write the adjusted pointer into it.
- llvm::Value *ExnPtrTmp = CGF.CreateTempAlloca(PtrTy, "exn.byref.tmp");
+ Address ExnPtrTmp =
+ CGF.CreateTempAlloca(PtrTy, CGF.getPointerAlign(), "exn.byref.tmp");
llvm::Value *Casted = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy);
CGF.Builder.CreateStore(Casted, ExnPtrTmp);
// Bind the reference to the temporary.
- AdjustedExn = ExnPtrTmp;
+ AdjustedExn = ExnPtrTmp.getPointer();
}
}
@@ -3522,8 +3600,7 @@ static void InitCatchParam(CodeGenFunction &CGF,
llvm::Value *Cast = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy);
LValue srcLV = CGF.MakeNaturalAlignAddrLValue(Cast, CatchType);
- LValue destLV = CGF.MakeAddrLValue(ParamAddr, CatchType,
- CGF.getContext().getDeclAlign(&CatchParam));
+ LValue destLV = CGF.MakeAddrLValue(ParamAddr, CatchType);
switch (TEK) {
case TEK_Complex:
CGF.EmitStoreOfComplex(CGF.EmitLoadOfComplex(srcLV, Loc), destLV,
@@ -3541,6 +3618,8 @@ static void InitCatchParam(CodeGenFunction &CGF,
}
assert(isa<RecordType>(CatchType) && "unexpected catch type!");
+ auto catchRD = CatchType->getAsCXXRecordDecl();
+ CharUnits caughtExnAlignment = CGF.CGM.getClassPointerAlignment(catchRD);
llvm::Type *PtrTy = LLVMCatchTy->getPointerTo(0); // addrspace 0 ok
@@ -3549,7 +3628,8 @@ static void InitCatchParam(CodeGenFunction &CGF,
const Expr *copyExpr = CatchParam.getInit();
if (!copyExpr) {
llvm::Value *rawAdjustedExn = CallBeginCatch(CGF, Exn, true);
- llvm::Value *adjustedExn = CGF.Builder.CreateBitCast(rawAdjustedExn, PtrTy);
+ Address adjustedExn(CGF.Builder.CreateBitCast(rawAdjustedExn, PtrTy),
+ caughtExnAlignment);
CGF.EmitAggregateCopy(ParamAddr, adjustedExn, CatchType);
return;
}
@@ -3560,7 +3640,8 @@ static void InitCatchParam(CodeGenFunction &CGF,
CGF.EmitNounwindRuntimeCall(getGetExceptionPtrFn(CGF.CGM), Exn);
// Cast that to the appropriate type.
- llvm::Value *adjustedExn = CGF.Builder.CreateBitCast(rawAdjustedExn, PtrTy);
+ Address adjustedExn(CGF.Builder.CreateBitCast(rawAdjustedExn, PtrTy),
+ caughtExnAlignment);
// The copy expression is defined in terms of an OpaqueValueExpr.
// Find it and map it to the adjusted expression.
@@ -3572,9 +3653,8 @@ static void InitCatchParam(CodeGenFunction &CGF,
CGF.EHStack.pushTerminate();
// Perform the copy construction.
- CharUnits Alignment = CGF.getContext().getDeclAlign(&CatchParam);
CGF.EmitAggExpr(copyExpr,
- AggValueSlot::forAddr(ParamAddr, Alignment, Qualifiers(),
+ AggValueSlot::forAddr(ParamAddr, Qualifiers(),
AggValueSlot::IsNotDestructed,
AggValueSlot::DoesNotNeedGCBarriers,
AggValueSlot::IsNotAliased));
@@ -3658,7 +3738,7 @@ static llvm::Constant *getClangCallTerminateFn(CodeGenModule &CGM) {
// Set up the function.
llvm::BasicBlock *entry =
llvm::BasicBlock::Create(CGM.getLLVMContext(), "", fn);
- CGBuilderTy builder(entry);
+ CGBuilderTy builder(CGM, entry);
// Pull the exception pointer out of the parameter list.
llvm::Value *exn = &*fn->arg_begin();
diff --git a/clang/lib/CodeGen/MicrosoftCXXABI.cpp b/clang/lib/CodeGen/MicrosoftCXXABI.cpp
index 6a12cedd1bc..5cefc72ed0c 100644
--- a/clang/lib/CodeGen/MicrosoftCXXABI.cpp
+++ b/clang/lib/CodeGen/MicrosoftCXXABI.cpp
@@ -56,6 +56,27 @@ public:
bool isSRetParameterAfterThis() const override { return true; }
+ bool isThisCompleteObject(GlobalDecl GD) const override {
+ // The Microsoft ABI doesn't use separate complete-object vs.
+ // base-object variants of constructors, but it does of destructors.
+ if (isa<CXXDestructorDecl>(GD.getDecl())) {
+ switch (GD.getDtorType()) {
+ case Dtor_Complete:
+ case Dtor_Deleting:
+ return true;
+
+ case Dtor_Base:
+ return false;
+
+ case Dtor_Comdat: llvm_unreachable("emitting dtor comdat as function?");
+ }
+ llvm_unreachable("bad dtor kind");
+ }
+
+ // No other kinds.
+ return false;
+ }
+
size_t getSrcArgforCopyCtor(const CXXConstructorDecl *CD,
FunctionArgList &Args) const override {
assert(Args.size() >= 2 &&
@@ -72,7 +93,7 @@ public:
StringRef GetDeletedVirtualCallName() override { return "_purecall"; }
void emitVirtualObjectDelete(CodeGenFunction &CGF, const CXXDeleteExpr *DE,
- llvm::Value *Ptr, QualType ElementType,
+ Address Ptr, QualType ElementType,
const CXXDestructorDecl *Dtor) override;
void emitRethrow(CodeGenFunction &CGF, bool isNoReturn) override;
@@ -90,18 +111,18 @@ public:
bool shouldTypeidBeNullChecked(bool IsDeref, QualType SrcRecordTy) override;
void EmitBadTypeidCall(CodeGenFunction &CGF) override;
llvm::Value *EmitTypeid(CodeGenFunction &CGF, QualType SrcRecordTy,
- llvm::Value *ThisPtr,
+ Address ThisPtr,
llvm::Type *StdTypeInfoPtrTy) override;
bool shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
QualType SrcRecordTy) override;
- llvm::Value *EmitDynamicCastCall(CodeGenFunction &CGF, llvm::Value *Value,
+ llvm::Value *EmitDynamicCastCall(CodeGenFunction &CGF, Address Value,
QualType SrcRecordTy, QualType DestTy,
QualType DestRecordTy,
llvm::BasicBlock *CastEnd) override;
- llvm::Value *EmitDynamicCastToVoid(CodeGenFunction &CGF, llvm::Value *Value,
+ llvm::Value *EmitDynamicCastToVoid(CodeGenFunction &CGF, Address Value,
QualType SrcRecordTy,
QualType DestTy) override;
@@ -112,7 +133,7 @@ public:
}
llvm::Value *
- GetVirtualBaseClassOffset(CodeGenFunction &CGF, llvm::Value *This,
+ GetVirtualBaseClassOffset(CodeGenFunction &CGF, Address This,
const CXXRecordDecl *ClassDecl,
const CXXRecordDecl *BaseClassDecl) override;
@@ -186,9 +207,9 @@ public:
return MD->getParent();
}
- llvm::Value *
+ Address
adjustThisArgumentForVirtualFunctionCall(CodeGenFunction &CGF, GlobalDecl GD,
- llvm::Value *This,
+ Address This,
bool VirtualCall) override;
void addImplicitStructorParams(CodeGenFunction &CGF, QualType &ResTy,
@@ -207,7 +228,7 @@ public:
void EmitDestructorCall(CodeGenFunction &CGF, const CXXDestructorDecl *DD,
CXXDtorType Type, bool ForVirtualBase,
- bool Delegating, llvm::Value *This) override;
+ bool Delegating, Address This) override;
void emitVTableBitSetEntries(VPtrInfo *Info, const CXXRecordDecl *RD,
llvm::GlobalVariable *VTable);
@@ -228,13 +249,13 @@ public:
CharUnits VPtrOffset) override;
llvm::Value *getVirtualFunctionPointer(CodeGenFunction &CGF, GlobalDecl GD,
- llvm::Value *This, llvm::Type *Ty,
+ Address This, llvm::Type *Ty,
SourceLocation Loc) override;
llvm::Value *EmitVirtualDestructorCall(CodeGenFunction &CGF,
const CXXDestructorDecl *Dtor,
CXXDtorType DtorType,
- llvm::Value *This,
+ Address This,
const CXXMemberCallExpr *CE) override;
void adjustCallArgsForDestructorThunk(CodeGenFunction &CGF, GlobalDecl GD,
@@ -313,10 +334,10 @@ public:
Thunk->setLinkage(llvm::GlobalValue::LinkOnceODRLinkage);
}
- llvm::Value *performThisAdjustment(CodeGenFunction &CGF, llvm::Value *This,
+ llvm::Value *performThisAdjustment(CodeGenFunction &CGF, Address This,
const ThisAdjustment &TA) override;
- llvm::Value *performReturnAdjustment(CodeGenFunction &CGF, llvm::Value *Ret,
+ llvm::Value *performReturnAdjustment(CodeGenFunction &CGF, Address Ret,
const ReturnAdjustment &RA) override;
void EmitThreadLocalInitFuncs(
@@ -363,13 +384,13 @@ public:
QualType elementType) override;
bool requiresArrayCookie(const CXXNewExpr *expr) override;
CharUnits getArrayCookieSizeImpl(QualType type) override;
- llvm::Value *InitializeArrayCookie(CodeGenFunction &CGF,
- llvm::Value *NewPtr,
- llvm::Value *NumElements,
- const CXXNewExpr *expr,
- QualType ElementType) override;
+ Address InitializeArrayCookie(CodeGenFunction &CGF,
+ Address NewPtr,
+ llvm::Value *NumElements,
+ const CXXNewExpr *expr,
+ QualType ElementType) override;
llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF,
- llvm::Value *allocPtr,
+ Address allocPtr,
CharUnits cookieSize) override;
friend struct MSRTTIBuilder;
@@ -514,13 +535,13 @@ private:
/// the vbptr to the virtual base. Optionally returns the address of the
/// vbptr itself.
llvm::Value *GetVBaseOffsetFromVBPtr(CodeGenFunction &CGF,
- llvm::Value *Base,
+ Address Base,
llvm::Value *VBPtrOffset,
llvm::Value *VBTableOffset,
llvm::Value **VBPtr = nullptr);
llvm::Value *GetVBaseOffsetFromVBPtr(CodeGenFunction &CGF,
- llvm::Value *Base,
+ Address Base,
int32_t VBPtrOffset,
int32_t VBTableOffset,
llvm::Value **VBPtr = nullptr) {
@@ -530,14 +551,14 @@ private:
return GetVBaseOffsetFromVBPtr(CGF, Base, VBPOffset, VBTOffset, VBPtr);
}
- std::pair<llvm::Value *, llvm::Value *>
- performBaseAdjustment(CodeGenFunction &CGF, llvm::Value *Value,
+ std::pair<Address, llvm::Value *>
+ performBaseAdjustment(CodeGenFunction &CGF, Address Value,
QualType SrcRecordTy);
/// \brief Performs a full virtual base adjustment. Used to dereference
/// pointers to members of virtual bases.
llvm::Value *AdjustVirtualBase(CodeGenFunction &CGF, const Expr *E,
- const CXXRecordDecl *RD, llvm::Value *Base,
+ const CXXRecordDecl *RD, Address Base,
llvm::Value *VirtualBaseAdjustmentOffset,
llvm::Value *VBPtrOffset /* optional */);
@@ -603,7 +624,7 @@ public:
llvm::Value *
EmitMemberDataPointerAddress(CodeGenFunction &CGF, const Expr *E,
- llvm::Value *Base, llvm::Value *MemPtr,
+ Address Base, llvm::Value *MemPtr,
const MemberPointerType *MPT) override;
llvm::Value *EmitNonNullMemberPointerConversion(
@@ -626,7 +647,8 @@ public:
llvm::Value *
EmitLoadOfMemberFunctionPointer(CodeGenFunction &CGF, const Expr *E,
- llvm::Value *&This, llvm::Value *MemPtr,
+ Address This, llvm::Value *&ThisPtrForCall,
+ llvm::Value *MemPtr,
const MemberPointerType *MPT) override;
void emitCXXStructor(const CXXMethodDecl *MD, StructorType Type) override;
@@ -826,7 +848,7 @@ MicrosoftCXXABI::getRecordArgABI(const CXXRecordDecl *RD) const {
void MicrosoftCXXABI::emitVirtualObjectDelete(CodeGenFunction &CGF,
const CXXDeleteExpr *DE,
- llvm::Value *Ptr,
+ Address Ptr,
QualType ElementType,
const CXXDestructorDecl *Dtor) {
// FIXME: Provide a source location here even though there's no
@@ -899,39 +921,52 @@ void MicrosoftCXXABI::emitBeginCatch(CodeGenFunction &CGF,
CodeGenFunction::AutoVarEmission var = CGF.EmitAutoVarAlloca(*CatchParam);
if (!NewEH) {
- llvm::Value *ParamAddr =
- CGF.Builder.CreateBitCast(var.getObjectAddress(CGF), CGF.Int8PtrTy);
- llvm::Value *Args[2] = {Exn, ParamAddr};
+ Address ParamAddr =
+ CGF.Builder.CreateElementBitCast(var.getObjectAddress(CGF), CGF.Int8Ty);
+ llvm::Value *Args[2] = {Exn, ParamAddr.getPointer()};
CGF.EmitNounwindRuntimeCall(BeginCatch, Args);
} else {
- CPI->setArgOperand(1, var.getObjectAddress(CGF));
+ CPI->setArgOperand(1, var.getObjectAddress(CGF).getPointer());
}
CGF.EHStack.pushCleanup<CallEndCatchMSVC>(NormalCleanup, CPI);
CGF.EmitAutoVarCleanups(var);
}
-std::pair<llvm::Value *, llvm::Value *>
-MicrosoftCXXABI::performBaseAdjustment(CodeGenFunction &CGF, llvm::Value *Value,
+/// We need to perform a generic polymorphic operation (like a typeid
+/// or a cast), which requires an object with a vfptr. Adjust the
+/// address to point to an object with a vfptr.
+std::pair<Address, llvm::Value *>
+MicrosoftCXXABI::performBaseAdjustment(CodeGenFunction &CGF, Address Value,
QualType SrcRecordTy) {
Value = CGF.Builder.CreateBitCast(Value, CGF.Int8PtrTy);
const CXXRecordDecl *SrcDecl = SrcRecordTy->getAsCXXRecordDecl();
const ASTContext &Context = getContext();
+ // If the class itself has a vfptr, great. This check implicitly
+ // covers non-virtual base subobjects: a class with its own virtual
+ // functions would be a candidate to be a primary base.
if (Context.getASTRecordLayout(SrcDecl).hasExtendableVFPtr())
return std::make_pair(Value, llvm::ConstantInt::get(CGF.Int32Ty, 0));
- // Perform a base adjustment.
- const CXXBaseSpecifier *PolymorphicBase = std::find_if(
- SrcDecl->vbases_begin(), SrcDecl->vbases_end(),
- [&](const CXXBaseSpecifier &Base) {
- const CXXRecordDecl *BaseDecl = Base.getType()->getAsCXXRecordDecl();
- return Context.getASTRecordLayout(BaseDecl).hasExtendableVFPtr();
- });
- llvm::Value *Offset = GetVirtualBaseClassOffset(
- CGF, Value, SrcDecl, PolymorphicBase->getType()->getAsCXXRecordDecl());
- Value = CGF.Builder.CreateInBoundsGEP(Value, Offset);
+ // Okay, one of the vbases must have a vfptr, or else this isn't
+ // actually a polymorphic class.
+ const CXXRecordDecl *PolymorphicBase = nullptr;
+ for (auto &Base : SrcDecl->vbases()) {
+ const CXXRecordDecl *BaseDecl = Base.getType()->getAsCXXRecordDecl();
+ if (Context.getASTRecordLayout(BaseDecl).hasExtendableVFPtr()) {
+ PolymorphicBase = BaseDecl;
+ break;
+ }
+ }
+ assert(PolymorphicBase && "polymorphic class has no apparent vfptr?");
+
+ llvm::Value *Offset =
+ GetVirtualBaseClassOffset(CGF, Value, SrcDecl, PolymorphicBase);
+ llvm::Value *Ptr = CGF.Builder.CreateInBoundsGEP(Value.getPointer(), Offset);
Offset = CGF.Builder.CreateTrunc(Offset, CGF.Int32Ty);
- return std::make_pair(Value, Offset);
+ CharUnits VBaseAlign =
+ CGF.CGM.getVBaseAlignment(Value.getAlignment(), SrcDecl, PolymorphicBase);
+ return std::make_pair(Address(Ptr, VBaseAlign), Offset);
}
bool MicrosoftCXXABI::shouldTypeidBeNullChecked(bool IsDeref,
@@ -960,12 +995,12 @@ void MicrosoftCXXABI::EmitBadTypeidCall(CodeGenFunction &CGF) {
llvm::Value *MicrosoftCXXABI::EmitTypeid(CodeGenFunction &CGF,
QualType SrcRecordTy,
- llvm::Value *ThisPtr,
+ Address ThisPtr,
llvm::Type *StdTypeInfoPtrTy) {
llvm::Value *Offset;
std::tie(ThisPtr, Offset) = performBaseAdjustment(CGF, ThisPtr, SrcRecordTy);
- return CGF.Builder.CreateBitCast(
- emitRTtypeidCall(CGF, ThisPtr).getInstruction(), StdTypeInfoPtrTy);
+ auto Typeid = emitRTtypeidCall(CGF, ThisPtr.getPointer()).getInstruction();
+ return CGF.Builder.CreateBitCast(Typeid, StdTypeInfoPtrTy);
}
bool MicrosoftCXXABI::shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
@@ -976,7 +1011,7 @@ bool MicrosoftCXXABI::shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
}
llvm::Value *MicrosoftCXXABI::EmitDynamicCastCall(
- CodeGenFunction &CGF, llvm::Value *Value, QualType SrcRecordTy,
+ CodeGenFunction &CGF, Address This, QualType SrcRecordTy,
QualType DestTy, QualType DestRecordTy, llvm::BasicBlock *CastEnd) {
llvm::Type *DestLTy = CGF.ConvertType(DestTy);
@@ -986,7 +1021,8 @@ llvm::Value *MicrosoftCXXABI::EmitDynamicCastCall(
CGF.CGM.GetAddrOfRTTIDescriptor(DestRecordTy.getUnqualifiedType());
llvm::Value *Offset;
- std::tie(Value, Offset) = performBaseAdjustment(CGF, Value, SrcRecordTy);
+ std::tie(This, Offset) = performBaseAdjustment(CGF, This, SrcRecordTy);
+ llvm::Value *ThisPtr = This.getPointer();
// PVOID __RTDynamicCast(
// PVOID inptr,
@@ -1000,14 +1036,14 @@ llvm::Value *MicrosoftCXXABI::EmitDynamicCastCall(
llvm::FunctionType::get(CGF.Int8PtrTy, ArgTypes, false),
"__RTDynamicCast");
llvm::Value *Args[] = {
- Value, Offset, SrcRTTI, DestRTTI,
+ ThisPtr, Offset, SrcRTTI, DestRTTI,
llvm::ConstantInt::get(CGF.Int32Ty, DestTy->isReferenceType())};
- Value = CGF.EmitRuntimeCallOrInvoke(Function, Args).getInstruction();
- return CGF.Builder.CreateBitCast(Value, DestLTy);
+ ThisPtr = CGF.EmitRuntimeCallOrInvoke(Function, Args).getInstruction();
+ return CGF.Builder.CreateBitCast(ThisPtr, DestLTy);
}
llvm::Value *
-MicrosoftCXXABI::EmitDynamicCastToVoid(CodeGenFunction &CGF, llvm::Value *Value,
+MicrosoftCXXABI::EmitDynamicCastToVoid(CodeGenFunction &CGF, Address Value,
QualType SrcRecordTy,
QualType DestTy) {
llvm::Value *Offset;
@@ -1019,7 +1055,7 @@ MicrosoftCXXABI::EmitDynamicCastToVoid(CodeGenFunction &CGF, llvm::Value *Value,
llvm::Constant *Function = CGF.CGM.CreateRuntimeFunction(
llvm::FunctionType::get(CGF.Int8PtrTy, ArgTypes, false),
"__RTCastToVoid");
- llvm::Value *Args[] = {Value};
+ llvm::Value *Args[] = {Value.getPointer()};
return CGF.EmitRuntimeCall(Function, Args);
}
@@ -1028,7 +1064,7 @@ bool MicrosoftCXXABI::EmitBadCastCall(CodeGenFunction &CGF) {
}
llvm::Value *MicrosoftCXXABI::GetVirtualBaseClassOffset(
- CodeGenFunction &CGF, llvm::Value *This, const CXXRecordDecl *ClassDecl,
+ CodeGenFunction &CGF, Address This, const CXXRecordDecl *ClassDecl,
const CXXRecordDecl *BaseClassDecl) {
const ASTContext &Context = getContext();
int64_t VBPtrChars =
@@ -1066,15 +1102,16 @@ bool MicrosoftCXXABI::classifyReturnType(CGFunctionInfo &FI) const {
if (!RD)
return false;
+ CharUnits Align = CGM.getContext().getTypeAlignInChars(FI.getReturnType());
if (FI.isInstanceMethod()) {
// If it's an instance method, aggregates are always returned indirectly via
// the second parameter.
- FI.getReturnInfo() = ABIArgInfo::getIndirect(0, /*ByVal=*/false);
+ FI.getReturnInfo() = ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
FI.getReturnInfo().setSRetAfterThis(FI.isInstanceMethod());
return true;
} else if (!RD->isPOD()) {
// If it's a free function, non-POD types are returned indirectly.
- FI.getReturnInfo() = ABIArgInfo::getIndirect(0, /*ByVal=*/false);
+ FI.getReturnInfo() = ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
return true;
}
@@ -1126,8 +1163,7 @@ void MicrosoftCXXABI::initializeHiddenVirtualInheritanceMembers(
const VBOffsets &VBaseMap = Layout.getVBaseOffsetsMap();
CGBuilderTy &Builder = CGF.Builder;
- unsigned AS =
- cast<llvm::PointerType>(getThisValue(CGF)->getType())->getAddressSpace();
+ unsigned AS = getThisAddress(CGF).getAddressSpace();
llvm::Value *Int8This = nullptr; // Initialize lazily.
for (VBOffsets::const_iterator I = VBaseMap.begin(), E = VBaseMap.end();
@@ -1136,7 +1172,7 @@ void MicrosoftCXXABI::initializeHiddenVirtualInheritanceMembers(
continue;
llvm::Value *VBaseOffset =
- GetVirtualBaseClassOffset(CGF, getThisValue(CGF), RD, I->first);
+ GetVirtualBaseClassOffset(CGF, getThisAddress(CGF), RD, I->first);
// FIXME: it doesn't look right that we SExt in GetVirtualBaseClassOffset()
// just to Trunc back immediately.
VBaseOffset = Builder.CreateTruncOrBitCast(VBaseOffset, CGF.Int32Ty);
@@ -1157,7 +1193,8 @@ void MicrosoftCXXABI::initializeHiddenVirtualInheritanceMembers(
VtorDispPtr = Builder.CreateBitCast(
VtorDispPtr, CGF.Int32Ty->getPointerTo(AS), "vtordisp.ptr");
- Builder.CreateStore(VtorDispValue, VtorDispPtr);
+ Builder.CreateAlignedStore(VtorDispValue, VtorDispPtr,
+ CharUnits::fromQuantity(4));
}
}
@@ -1188,8 +1225,8 @@ void MicrosoftCXXABI::EmitCXXConstructors(const CXXConstructorDecl *D) {
void MicrosoftCXXABI::EmitVBPtrStores(CodeGenFunction &CGF,
const CXXRecordDecl *RD) {
- llvm::Value *ThisInt8Ptr =
- CGF.Builder.CreateBitCast(getThisValue(CGF), CGM.Int8PtrTy, "this.int8");
+ Address This = getThisAddress(CGF);
+ This = CGF.Builder.CreateElementBitCast(This, CGM.Int8Ty, "this.int8");
const ASTContext &Context = getContext();
const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
@@ -1203,11 +1240,10 @@ void MicrosoftCXXABI::EmitVBPtrStores(CodeGenFunction &CGF,
Offs += SubobjectLayout.getVBPtrOffset();
if (VBT->getVBaseWithVPtr())
Offs += Layout.getVBaseClassOffset(VBT->getVBaseWithVPtr());
- llvm::Value *VBPtr =
- CGF.Builder.CreateConstInBoundsGEP1_64(ThisInt8Ptr, Offs.getQuantity());
+ Address VBPtr = CGF.Builder.CreateConstInBoundsByteGEP(This, Offs);
llvm::Value *GVPtr =
CGF.Builder.CreateConstInBoundsGEP2_32(GV->getValueType(), GV, 0, 0);
- VBPtr = CGF.Builder.CreateBitCast(VBPtr, GVPtr->getType()->getPointerTo(0),
+ VBPtr = CGF.Builder.CreateElementBitCast(VBPtr, GVPtr->getType(),
"vbptr." + VBT->ReusingBase->getName());
CGF.Builder.CreateStore(GVPtr, VBPtr);
}
@@ -1281,8 +1317,9 @@ MicrosoftCXXABI::getVirtualFunctionPrologueThisAdjustment(GlobalDecl GD) {
return Adjustment;
}
-llvm::Value *MicrosoftCXXABI::adjustThisArgumentForVirtualFunctionCall(
- CodeGenFunction &CGF, GlobalDecl GD, llvm::Value *This, bool VirtualCall) {
+Address MicrosoftCXXABI::adjustThisArgumentForVirtualFunctionCall(
+ CodeGenFunction &CGF, GlobalDecl GD, Address This,
+ bool VirtualCall) {
if (!VirtualCall) {
// If the call of a virtual function is not virtual, we just have to
// compensate for the adjustment the virtual function does in its prologue.
@@ -1290,11 +1327,9 @@ llvm::Value *MicrosoftCXXABI::adjustThisArgumentForVirtualFunctionCall(
if (Adjustment.isZero())
return This;
- unsigned AS = cast<llvm::PointerType>(This->getType())->getAddressSpace();
- llvm::Type *charPtrTy = CGF.Int8Ty->getPointerTo(AS);
- This = CGF.Builder.CreateBitCast(This, charPtrTy);
+ This = CGF.Builder.CreateElementBitCast(This, CGF.Int8Ty);
assert(Adjustment.isPositive());
- return CGF.Builder.CreateConstGEP1_32(This, Adjustment.getQuantity());
+ return CGF.Builder.CreateConstByteGEP(This, Adjustment);
}
GD = GD.getCanonicalDecl();
@@ -1314,8 +1349,6 @@ llvm::Value *MicrosoftCXXABI::adjustThisArgumentForVirtualFunctionCall(
MicrosoftVTableContext::MethodVFTableLocation ML =
CGM.getMicrosoftVTableContext().getMethodVFTableLocation(LookupGD);
- unsigned AS = cast<llvm::PointerType>(This->getType())->getAddressSpace();
- llvm::Type *charPtrTy = CGF.Int8Ty->getPointerTo(AS);
CharUnits StaticOffset = ML.VFPtrOffset;
// Base destructors expect 'this' to point to the beginning of the base
@@ -1324,27 +1357,34 @@ llvm::Value *MicrosoftCXXABI::adjustThisArgumentForVirtualFunctionCall(
if (isa<CXXDestructorDecl>(MD) && GD.getDtorType() == Dtor_Base)
StaticOffset = CharUnits::Zero();
+ Address Result = This;
if (ML.VBase) {
- This = CGF.Builder.CreateBitCast(This, charPtrTy);
+ Result = CGF.Builder.CreateElementBitCast(Result, CGF.Int8Ty);
+
+ const CXXRecordDecl *Derived = MD->getParent();
+ const CXXRecordDecl *VBase = ML.VBase;
llvm::Value *VBaseOffset =
- GetVirtualBaseClassOffset(CGF, This, MD->getParent(), ML.VBase);
- This = CGF.Builder.CreateInBoundsGEP(This, VBaseOffset);
+ GetVirtualBaseClassOffset(CGF, Result, Derived, VBase);
+ llvm::Value *VBasePtr =
+ CGF.Builder.CreateInBoundsGEP(Result.getPointer(), VBaseOffset);
+ CharUnits VBaseAlign =
+ CGF.CGM.getVBaseAlignment(Result.getAlignment(), Derived, VBase);
+ Result = Address(VBasePtr, VBaseAlign);
}
if (!StaticOffset.isZero()) {
assert(StaticOffset.isPositive());
- This = CGF.Builder.CreateBitCast(This, charPtrTy);
+ Result = CGF.Builder.CreateElementBitCast(Result, CGF.Int8Ty);
if (ML.VBase) {
// Non-virtual adjustment might result in a pointer outside the allocated
// object, e.g. if the final overrider class is laid out after the virtual
// base that declares a method in the most derived class.
// FIXME: Update the code that emits this adjustment in thunks prologues.
- This = CGF.Builder.CreateConstGEP1_32(This, StaticOffset.getQuantity());
+ Result = CGF.Builder.CreateConstByteGEP(Result, StaticOffset);
} else {
- This = CGF.Builder.CreateConstInBoundsGEP1_32(CGF.Int8Ty, This,
- StaticOffset.getQuantity());
+ Result = CGF.Builder.CreateConstInBoundsByteGEP(Result, StaticOffset);
}
}
- return This;
+ return Result;
}
void MicrosoftCXXABI::addImplicitStructorParams(CodeGenFunction &CGF,
@@ -1465,7 +1505,7 @@ unsigned MicrosoftCXXABI::addImplicitConstructorArgs(
void MicrosoftCXXABI::EmitDestructorCall(CodeGenFunction &CGF,
const CXXDestructorDecl *DD,
CXXDtorType Type, bool ForVirtualBase,
- bool Delegating, llvm::Value *This) {
+ bool Delegating, Address This) {
llvm::Value *Callee = CGM.getAddrOfCXXStructor(DD, getFromDtorType(Type));
if (DD->isVirtual()) {
@@ -1475,7 +1515,7 @@ void MicrosoftCXXABI::EmitDestructorCall(CodeGenFunction &CGF,
This, false);
}
- CGF.EmitCXXStructorCall(DD, Callee, ReturnValueSlot(), This,
+ CGF.EmitCXXStructorCall(DD, Callee, ReturnValueSlot(), This.getPointer(),
/*ImplicitParam=*/nullptr,
/*ImplicitParamTy=*/QualType(), nullptr,
getFromDtorType(Type));
@@ -1772,14 +1812,14 @@ getClassAtVTableLocation(ASTContext &Ctx, GlobalDecl GD,
llvm::Value *MicrosoftCXXABI::getVirtualFunctionPointer(CodeGenFunction &CGF,
GlobalDecl GD,
- llvm::Value *This,
+ Address This,
llvm::Type *Ty,
SourceLocation Loc) {
GD = GD.getCanonicalDecl();
CGBuilderTy &Builder = CGF.Builder;
Ty = Ty->getPointerTo()->getPointerTo();
- llvm::Value *VPtr =
+ Address VPtr =
adjustThisArgumentForVirtualFunctionCall(CGF, GD, This, true);
llvm::Value *VTable = CGF.GetVTablePtr(VPtr, Ty);
@@ -1791,12 +1831,12 @@ llvm::Value *MicrosoftCXXABI::getVirtualFunctionPointer(CodeGenFunction &CGF,
llvm::Value *VFuncPtr =
Builder.CreateConstInBoundsGEP1_64(VTable, ML.Index, "vfn");
- return Builder.CreateLoad(VFuncPtr);
+ return Builder.CreateAlignedLoad(VFuncPtr, CGF.getPointerAlign());
}
llvm::Value *MicrosoftCXXABI::EmitVirtualDestructorCall(
CodeGenFunction &CGF, const CXXDestructorDecl *Dtor, CXXDtorType DtorType,
- llvm::Value *This, const CXXMemberCallExpr *CE) {
+ Address This, const CXXMemberCallExpr *CE) {
assert(CE == nullptr || CE->arg_begin() == CE->arg_end());
assert(DtorType == Dtor_Deleting || DtorType == Dtor_Complete);
@@ -1815,7 +1855,8 @@ llvm::Value *MicrosoftCXXABI::EmitVirtualDestructorCall(
DtorType == Dtor_Deleting);
This = adjustThisArgumentForVirtualFunctionCall(CGF, GD, This, true);
- RValue RV = CGF.EmitCXXStructorCall(Dtor, Callee, ReturnValueSlot(), This,
+ RValue RV = CGF.EmitCXXStructorCall(Dtor, Callee, ReturnValueSlot(),
+ This.getPointer(),
ImplicitParam, Context.IntTy, CE,
StructorType::Deleting);
return RV.getScalarVal();
@@ -1907,10 +1948,11 @@ llvm::Function *MicrosoftCXXABI::EmitVirtualMemPtrThunk(
// Load the vfptr and then callee from the vftable. The callee should have
// adjusted 'this' so that the vfptr is at offset zero.
llvm::Value *VTable = CGF.GetVTablePtr(
- getThisValue(CGF), ThunkTy->getPointerTo()->getPointerTo());
+ getThisAddress(CGF), ThunkTy->getPointerTo()->getPointerTo());
llvm::Value *VFuncPtr =
CGF.Builder.CreateConstInBoundsGEP1_64(VTable, ML.Index, "vfn");
- llvm::Value *Callee = CGF.Builder.CreateLoad(VFuncPtr);
+ llvm::Value *Callee =
+ CGF.Builder.CreateAlignedLoad(VFuncPtr, CGF.getPointerAlign());
CGF.EmitMustTailThunk(MD, getThisValue(CGF), Callee);
@@ -2002,22 +2044,30 @@ void MicrosoftCXXABI::emitVBTableDefinition(const VPtrInfo &VBT,
}
llvm::Value *MicrosoftCXXABI::performThisAdjustment(CodeGenFunction &CGF,
- llvm::Value *This,
+ Address This,
const ThisAdjustment &TA) {
if (TA.isEmpty())
- return This;
+ return This.getPointer();
- llvm::Value *V = CGF.Builder.CreateBitCast(This, CGF.Int8PtrTy);
+ This = CGF.Builder.CreateElementBitCast(This, CGF.Int8Ty);
- if (!TA.Virtual.isEmpty()) {
+ llvm::Value *V;
+ if (TA.Virtual.isEmpty()) {
+ V = This.getPointer();
+ } else {
assert(TA.Virtual.Microsoft.VtordispOffset < 0);
// Adjust the this argument based on the vtordisp value.
- llvm::Value *VtorDispPtr =
- CGF.Builder.CreateConstGEP1_32(V, TA.Virtual.Microsoft.VtordispOffset);
- VtorDispPtr =
- CGF.Builder.CreateBitCast(VtorDispPtr, CGF.Int32Ty->getPointerTo());
+ Address VtorDispPtr =
+ CGF.Builder.CreateConstInBoundsByteGEP(This,
+ CharUnits::fromQuantity(TA.Virtual.Microsoft.VtordispOffset));
+ VtorDispPtr = CGF.Builder.CreateElementBitCast(VtorDispPtr, CGF.Int32Ty);
llvm::Value *VtorDisp = CGF.Builder.CreateLoad(VtorDispPtr, "vtordisp");
- V = CGF.Builder.CreateGEP(V, CGF.Builder.CreateNeg(VtorDisp));
+ V = CGF.Builder.CreateGEP(This.getPointer(),
+ CGF.Builder.CreateNeg(VtorDisp));
+
+ // Unfortunately, having applied the vtordisp means that we no
+ // longer really have a known alignment for the vbptr step.
+ // We'll assume the vbptr is pointer-aligned.
if (TA.Virtual.Microsoft.VBPtrOffset) {
// If the final overrider is defined in a virtual base other than the one
@@ -2027,7 +2077,8 @@ llvm::Value *MicrosoftCXXABI::performThisAdjustment(CodeGenFunction &CGF,
assert(TA.Virtual.Microsoft.VBOffsetOffset >= 0);
llvm::Value *VBPtr;
llvm::Value *VBaseOffset =
- GetVBaseOffsetFromVBPtr(CGF, V, -TA.Virtual.Microsoft.VBPtrOffset,
+ GetVBaseOffsetFromVBPtr(CGF, Address(V, CGF.getPointerAlign()),
+ -TA.Virtual.Microsoft.VBPtrOffset,
TA.Virtual.Microsoft.VBOffsetOffset, &VBPtr);
V = CGF.Builder.CreateInBoundsGEP(VBPtr, VBaseOffset);
}
@@ -2045,20 +2096,21 @@ llvm::Value *MicrosoftCXXABI::performThisAdjustment(CodeGenFunction &CGF,
}
llvm::Value *
-MicrosoftCXXABI::performReturnAdjustment(CodeGenFunction &CGF, llvm::Value *Ret,
+MicrosoftCXXABI::performReturnAdjustment(CodeGenFunction &CGF, Address Ret,
const ReturnAdjustment &RA) {
if (RA.isEmpty())
- return Ret;
+ return Ret.getPointer();
- llvm::Value *V = CGF.Builder.CreateBitCast(Ret, CGF.Int8PtrTy);
+ auto OrigTy = Ret.getType();
+ Ret = CGF.Builder.CreateElementBitCast(Ret, CGF.Int8Ty);
+ llvm::Value *V = Ret.getPointer();
if (RA.Virtual.Microsoft.VBIndex) {
assert(RA.Virtual.Microsoft.VBIndex > 0);
- const ASTContext &Context = getContext();
- int32_t IntSize = Context.getTypeSizeInChars(Context.IntTy).getQuantity();
+ int32_t IntSize = CGF.getIntSize().getQuantity();
llvm::Value *VBPtr;
llvm::Value *VBaseOffset =
- GetVBaseOffsetFromVBPtr(CGF, V, RA.Virtual.Microsoft.VBPtrOffset,
+ GetVBaseOffsetFromVBPtr(CGF, Ret, RA.Virtual.Microsoft.VBPtrOffset,
IntSize * RA.Virtual.Microsoft.VBIndex, &VBPtr);
V = CGF.Builder.CreateInBoundsGEP(VBPtr, VBaseOffset);
}
@@ -2067,7 +2119,7 @@ MicrosoftCXXABI::performReturnAdjustment(CodeGenFunction &CGF, llvm::Value *Ret,
V = CGF.Builder.CreateConstInBoundsGEP1_32(CGF.Int8Ty, V, RA.NonVirtual);
// Cast back to the original type.
- return CGF.Builder.CreateBitCast(V, Ret->getType());
+ return CGF.Builder.CreateBitCast(V, OrigTy);
}
bool MicrosoftCXXABI::requiresArrayCookie(const CXXDeleteExpr *expr,
@@ -2092,37 +2144,34 @@ CharUnits MicrosoftCXXABI::getArrayCookieSizeImpl(QualType type) {
}
llvm::Value *MicrosoftCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
- llvm::Value *allocPtr,
+ Address allocPtr,
CharUnits cookieSize) {
- unsigned AS = allocPtr->getType()->getPointerAddressSpace();
- llvm::Value *numElementsPtr =
- CGF.Builder.CreateBitCast(allocPtr, CGF.SizeTy->getPointerTo(AS));
+ Address numElementsPtr =
+ CGF.Builder.CreateElementBitCast(allocPtr, CGF.SizeTy);
return CGF.Builder.CreateLoad(numElementsPtr);
}
-llvm::Value* MicrosoftCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
- llvm::Value *newPtr,
- llvm::Value *numElements,
- const CXXNewExpr *expr,
- QualType elementType) {
+Address MicrosoftCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
+ Address newPtr,
+ llvm::Value *numElements,
+ const CXXNewExpr *expr,
+ QualType elementType) {
assert(requiresArrayCookie(expr));
// The size of the cookie.
CharUnits cookieSize = getArrayCookieSizeImpl(elementType);
// Compute an offset to the cookie.
- llvm::Value *cookiePtr = newPtr;
+ Address cookiePtr = newPtr;
// Write the number of elements into the appropriate slot.
- unsigned AS = newPtr->getType()->getPointerAddressSpace();
- llvm::Value *numElementsPtr
- = CGF.Builder.CreateBitCast(cookiePtr, CGF.SizeTy->getPointerTo(AS));
+ Address numElementsPtr
+ = CGF.Builder.CreateElementBitCast(cookiePtr, CGF.SizeTy);
CGF.Builder.CreateStore(numElements, numElementsPtr);
// Finally, compute a pointer to the actual data buffer by skipping
// over the cookie completely.
- return CGF.Builder.CreateConstInBoundsGEP1_64(newPtr,
- cookieSize.getQuantity());
+ return CGF.Builder.CreateConstInBoundsByteGEP(newPtr, cookieSize);
}
static void emitGlobalDtorWithTLRegDtor(CodeGenFunction &CGF, const VarDecl &VD,
@@ -2205,17 +2254,18 @@ LValue MicrosoftCXXABI::EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF,
return LValue();
}
-static llvm::GlobalVariable *getInitThreadEpochPtr(CodeGenModule &CGM) {
+static ConstantAddress getInitThreadEpochPtr(CodeGenModule &CGM) {
StringRef VarName("_Init_thread_epoch");
+ CharUnits Align = CGM.getIntAlign();
if (auto *GV = CGM.getModule().getNamedGlobal(VarName))
- return GV;
+ return ConstantAddress(GV, Align);
auto *GV = new llvm::GlobalVariable(
CGM.getModule(), CGM.IntTy,
/*Constant=*/false, llvm::GlobalVariable::ExternalLinkage,
/*Initializer=*/nullptr, VarName,
/*InsertBefore=*/nullptr, llvm::GlobalVariable::GeneralDynamicTLSModel);
- GV->setAlignment(CGM.getTarget().getIntAlign() / 8);
- return GV;
+ GV->setAlignment(Align.getQuantity());
+ return ConstantAddress(GV, Align);
}
static llvm::Constant *getInitThreadHeaderFn(CodeGenModule &CGM) {
@@ -2253,9 +2303,9 @@ static llvm::Constant *getInitThreadAbortFn(CodeGenModule &CGM) {
namespace {
struct ResetGuardBit final : EHScopeStack::Cleanup {
- llvm::GlobalVariable *Guard;
+ Address Guard;
unsigned GuardNum;
- ResetGuardBit(llvm::GlobalVariable *Guard, unsigned GuardNum)
+ ResetGuardBit(Address Guard, unsigned GuardNum)
: Guard(Guard), GuardNum(GuardNum) {}
void Emit(CodeGenFunction &CGF, Flags flags) override {
@@ -2270,8 +2320,8 @@ struct ResetGuardBit final : EHScopeStack::Cleanup {
};
struct CallInitThreadAbort final : EHScopeStack::Cleanup {
- llvm::GlobalVariable *Guard;
- CallInitThreadAbort(llvm::GlobalVariable *Guard) : Guard(Guard) {}
+ llvm::Value *Guard;
+ CallInitThreadAbort(Address Guard) : Guard(Guard.getPointer()) {}
void Emit(CodeGenFunction &CGF, Flags flags) override {
// Calling _Init_thread_abort will reset the guard's state.
@@ -2304,6 +2354,7 @@ void MicrosoftCXXABI::EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D,
CGBuilderTy &Builder = CGF.Builder;
llvm::IntegerType *GuardTy = CGF.Int32Ty;
llvm::ConstantInt *Zero = llvm::ConstantInt::get(GuardTy, 0);
+ CharUnits GuardAlign = CharUnits::fromQuantity(4);
// Get the guard variable for this function if we have one already.
GuardInfo *GI = nullptr;
@@ -2353,6 +2404,7 @@ void MicrosoftCXXABI::EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D,
GV->getLinkage(), Zero, GuardName.str());
GuardVar->setVisibility(GV->getVisibility());
GuardVar->setDLLStorageClass(GV->getDLLStorageClass());
+ GuardVar->setAlignment(GuardAlign.getQuantity());
if (GuardVar->isWeakForLinker())
GuardVar->setComdat(
CGM.getModule().getOrInsertComdat(GuardVar->getName()));
@@ -2362,6 +2414,8 @@ void MicrosoftCXXABI::EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D,
GI->Guard = GuardVar;
}
+ ConstantAddress GuardAddr(GuardVar, GuardAlign);
+
assert(GuardVar->getLinkage() == GV->getLinkage() &&
"static local from the same function had different linkage");
@@ -2374,7 +2428,7 @@ void MicrosoftCXXABI::EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D,
// Test our bit from the guard variable.
llvm::ConstantInt *Bit = llvm::ConstantInt::get(GuardTy, 1U << GuardNum);
- llvm::LoadInst *LI = Builder.CreateLoad(GuardVar);
+ llvm::LoadInst *LI = Builder.CreateLoad(GuardAddr);
llvm::Value *IsInitialized =
Builder.CreateICmpNE(Builder.CreateAnd(LI, Bit), Zero);
llvm::BasicBlock *InitBlock = CGF.createBasicBlock("init");
@@ -2384,8 +2438,8 @@ void MicrosoftCXXABI::EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D,
// Set our bit in the guard variable and emit the initializer and add a global
// destructor if appropriate.
CGF.EmitBlock(InitBlock);
- Builder.CreateStore(Builder.CreateOr(LI, Bit), GuardVar);
- CGF.EHStack.pushCleanup<ResetGuardBit>(EHCleanup, GuardVar, GuardNum);
+ Builder.CreateStore(Builder.CreateOr(LI, Bit), GuardAddr);
+ CGF.EHStack.pushCleanup<ResetGuardBit>(EHCleanup, GuardAddr, GuardNum);
CGF.EmitCXXGlobalVarDeclInit(D, GV, PerformInit);
CGF.PopCleanupBlock();
Builder.CreateBr(EndBlock);
@@ -2405,11 +2459,8 @@ void MicrosoftCXXABI::EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D,
// The algorithm is almost identical to what can be found in the appendix
// found in N2325.
- unsigned IntAlign = CGM.getTarget().getIntAlign() / 8;
-
// This BasicBLock determines whether or not we have any work to do.
- llvm::LoadInst *FirstGuardLoad =
- Builder.CreateAlignedLoad(GuardVar, IntAlign);
+ llvm::LoadInst *FirstGuardLoad = Builder.CreateLoad(GuardAddr);
FirstGuardLoad->setOrdering(llvm::AtomicOrdering::Unordered);
llvm::LoadInst *InitThreadEpoch =
Builder.CreateLoad(getInitThreadEpochPtr(CGM));
@@ -2422,9 +2473,9 @@ void MicrosoftCXXABI::EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D,
// This BasicBlock attempts to determine whether or not this thread is
// responsible for doing the initialization.
CGF.EmitBlock(AttemptInitBlock);
- CGF.EmitNounwindRuntimeCall(getInitThreadHeaderFn(CGM), GuardVar);
- llvm::LoadInst *SecondGuardLoad =
- Builder.CreateAlignedLoad(GuardVar, IntAlign);
+ CGF.EmitNounwindRuntimeCall(getInitThreadHeaderFn(CGM),
+ GuardAddr.getPointer());
+ llvm::LoadInst *SecondGuardLoad = Builder.CreateLoad(GuardAddr);
SecondGuardLoad->setOrdering(llvm::AtomicOrdering::Unordered);
llvm::Value *ShouldDoInit =
Builder.CreateICmpEQ(SecondGuardLoad, getAllOnesInt());
@@ -2433,10 +2484,11 @@ void MicrosoftCXXABI::EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D,
// Ok, we ended up getting selected as the initializing thread.
CGF.EmitBlock(InitBlock);
- CGF.EHStack.pushCleanup<CallInitThreadAbort>(EHCleanup, GuardVar);
+ CGF.EHStack.pushCleanup<CallInitThreadAbort>(EHCleanup, GuardAddr);
CGF.EmitCXXGlobalVarDeclInit(D, GV, PerformInit);
CGF.PopCleanupBlock();
- CGF.EmitNounwindRuntimeCall(getInitThreadFooterFn(CGM), GuardVar);
+ CGF.EmitNounwindRuntimeCall(getInitThreadFooterFn(CGM),
+ GuardAddr.getPointer());
Builder.CreateBr(EndBlock);
CGF.EmitBlock(EndBlock);
@@ -2791,19 +2843,28 @@ bool MicrosoftCXXABI::MemberPointerConstantIsNull(const MemberPointerType *MPT,
llvm::Value *
MicrosoftCXXABI::GetVBaseOffsetFromVBPtr(CodeGenFunction &CGF,
- llvm::Value *This,
+ Address This,
llvm::Value *VBPtrOffset,
llvm::Value *VBTableOffset,
llvm::Value **VBPtrOut) {
CGBuilderTy &Builder = CGF.Builder;
// Load the vbtable pointer from the vbptr in the instance.
- This = Builder.CreateBitCast(This, CGM.Int8PtrTy);
+ This = Builder.CreateElementBitCast(This, CGM.Int8Ty);
llvm::Value *VBPtr =
- Builder.CreateInBoundsGEP(This, VBPtrOffset, "vbptr");
+ Builder.CreateInBoundsGEP(This.getPointer(), VBPtrOffset, "vbptr");
if (VBPtrOut) *VBPtrOut = VBPtr;
VBPtr = Builder.CreateBitCast(VBPtr,
- CGM.Int32Ty->getPointerTo(0)->getPointerTo(0));
- llvm::Value *VBTable = Builder.CreateLoad(VBPtr, "vbtable");
+ CGM.Int32Ty->getPointerTo(0)->getPointerTo(This.getAddressSpace()));
+
+ CharUnits VBPtrAlign;
+ if (auto CI = dyn_cast<llvm::ConstantInt>(VBPtrOffset)) {
+ VBPtrAlign = This.getAlignment().alignmentAtOffset(
+ CharUnits::fromQuantity(CI->getSExtValue()));
+ } else {
+ VBPtrAlign = CGF.getPointerAlign();
+ }
+
+ llvm::Value *VBTable = Builder.CreateAlignedLoad(VBPtr, VBPtrAlign, "vbtable");
// Translate from byte offset to table index. It improves analyzability.
llvm::Value *VBTableIndex = Builder.CreateAShr(
@@ -2813,16 +2874,17 @@ MicrosoftCXXABI::GetVBaseOffsetFromVBPtr(CodeGenFunction &CGF,
// Load an i32 offset from the vb-table.
llvm::Value *VBaseOffs = Builder.CreateInBoundsGEP(VBTable, VBTableIndex);
VBaseOffs = Builder.CreateBitCast(VBaseOffs, CGM.Int32Ty->getPointerTo(0));
- return Builder.CreateLoad(VBaseOffs, "vbase_offs");
+ return Builder.CreateAlignedLoad(VBaseOffs, CharUnits::fromQuantity(4),
+ "vbase_offs");
}
// Returns an adjusted base cast to i8*, since we do more address arithmetic on
// it.
llvm::Value *MicrosoftCXXABI::AdjustVirtualBase(
CodeGenFunction &CGF, const Expr *E, const CXXRecordDecl *RD,
- llvm::Value *Base, llvm::Value *VBTableOffset, llvm::Value *VBPtrOffset) {
+ Address Base, llvm::Value *VBTableOffset, llvm::Value *VBPtrOffset) {
CGBuilderTy &Builder = CGF.Builder;
- Base = Builder.CreateBitCast(Base, CGM.Int8PtrTy);
+ Base = Builder.CreateElementBitCast(Base, CGM.Int8Ty);
llvm::BasicBlock *OriginalBB = nullptr;
llvm::BasicBlock *SkipAdjustBB = nullptr;
llvm::BasicBlock *VBaseAdjustBB = nullptr;
@@ -2867,7 +2929,7 @@ llvm::Value *MicrosoftCXXABI::AdjustVirtualBase(
Builder.CreateBr(SkipAdjustBB);
CGF.EmitBlock(SkipAdjustBB);
llvm::PHINode *Phi = Builder.CreatePHI(CGM.Int8PtrTy, 2, "memptr.base");
- Phi->addIncoming(Base, OriginalBB);
+ Phi->addIncoming(Base.getPointer(), OriginalBB);
Phi->addIncoming(AdjustedBase, VBaseAdjustBB);
return Phi;
}
@@ -2875,10 +2937,10 @@ llvm::Value *MicrosoftCXXABI::AdjustVirtualBase(
}
llvm::Value *MicrosoftCXXABI::EmitMemberDataPointerAddress(
- CodeGenFunction &CGF, const Expr *E, llvm::Value *Base, llvm::Value *MemPtr,
+ CodeGenFunction &CGF, const Expr *E, Address Base, llvm::Value *MemPtr,
const MemberPointerType *MPT) {
assert(MPT->isMemberDataPointer());
- unsigned AS = Base->getType()->getPointerAddressSpace();
+ unsigned AS = Base.getAddressSpace();
llvm::Type *PType =
CGF.ConvertTypeForMem(MPT->getPointeeType())->getPointerTo(AS);
CGBuilderTy &Builder = CGF.Builder;
@@ -2900,17 +2962,19 @@ llvm::Value *MicrosoftCXXABI::EmitMemberDataPointerAddress(
VirtualBaseAdjustmentOffset = Builder.CreateExtractValue(MemPtr, I++);
}
+ llvm::Value *Addr;
if (VirtualBaseAdjustmentOffset) {
- Base = AdjustVirtualBase(CGF, E, RD, Base, VirtualBaseAdjustmentOffset,
+ Addr = AdjustVirtualBase(CGF, E, RD, Base, VirtualBaseAdjustmentOffset,
VBPtrOffset);
+ } else {
+ Addr = Base.getPointer();
}
// Cast to char*.
- Base = Builder.CreateBitCast(Base, Builder.getInt8Ty()->getPointerTo(AS));
+ Addr = Builder.CreateBitCast(Addr, CGF.Int8Ty->getPointerTo(AS));
// Apply the offset, which we assume is non-null.
- llvm::Value *Addr =
- Builder.CreateInBoundsGEP(Base, FieldOffset, "memptr.offset");
+ Addr = Builder.CreateInBoundsGEP(Addr, FieldOffset, "memptr.offset");
// Cast the address to the appropriate pointer type, adopting the address
// space of the base pointer.
@@ -3073,7 +3137,8 @@ llvm::Value *MicrosoftCXXABI::EmitNonNullMemberPointerConversion(
} else {
llvm::Value *Idxs[] = {getZeroInt(), VBIndex};
VirtualBaseAdjustmentOffset =
- Builder.CreateLoad(Builder.CreateInBoundsGEP(VDispMap, Idxs));
+ Builder.CreateAlignedLoad(Builder.CreateInBoundsGEP(VDispMap, Idxs),
+ CharUnits::fromQuantity(4));
}
DstVBIndexEqZero =
@@ -3154,7 +3219,7 @@ llvm::Constant *MicrosoftCXXABI::EmitMemberPointerConversion(
if (CK == CK_ReinterpretMemberPointer)
return Src;
- CGBuilderTy Builder(CGM.getLLVMContext());
+ CGBuilderTy Builder(CGM, CGM.getLLVMContext());
auto *Dst = cast<llvm::Constant>(EmitNonNullMemberPointerConversion(
SrcTy, DstTy, CK, PathBegin, PathEnd, Src, Builder));
@@ -3162,8 +3227,9 @@ llvm::Constant *MicrosoftCXXABI::EmitMemberPointerConversion(
}
llvm::Value *MicrosoftCXXABI::EmitLoadOfMemberFunctionPointer(
- CodeGenFunction &CGF, const Expr *E, llvm::Value *&This,
- llvm::Value *MemPtr, const MemberPointerType *MPT) {
+ CodeGenFunction &CGF, const Expr *E, Address This,
+ llvm::Value *&ThisPtrForCall, llvm::Value *MemPtr,
+ const MemberPointerType *MPT) {
assert(MPT->isMemberFunctionPointer());
const FunctionProtoType *FPT =
MPT->getPointeeType()->castAs<FunctionProtoType>();
@@ -3194,15 +3260,18 @@ llvm::Value *MicrosoftCXXABI::EmitLoadOfMemberFunctionPointer(
}
if (VirtualBaseAdjustmentOffset) {
- This = AdjustVirtualBase(CGF, E, RD, This, VirtualBaseAdjustmentOffset,
- VBPtrOffset);
+ ThisPtrForCall = AdjustVirtualBase(CGF, E, RD, This,
+ VirtualBaseAdjustmentOffset, VBPtrOffset);
+ } else {
+ ThisPtrForCall = This.getPointer();
}
if (NonVirtualBaseAdjustment) {
// Apply the adjustment and cast back to the original struct type.
- llvm::Value *Ptr = Builder.CreateBitCast(This, Builder.getInt8PtrTy());
+ llvm::Value *Ptr = Builder.CreateBitCast(ThisPtrForCall, CGF.Int8PtrTy);
Ptr = Builder.CreateInBoundsGEP(Ptr, NonVirtualBaseAdjustment);
- This = Builder.CreateBitCast(Ptr, This->getType(), "this.adjusted");
+ ThisPtrForCall = Builder.CreateBitCast(Ptr, ThisPtrForCall->getType(),
+ "this.adjusted");
}
return Builder.CreateBitCast(FunctionPointer, FTy->getPointerTo());
@@ -4122,7 +4191,7 @@ void MicrosoftCXXABI::emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) {
QualType ThrowType = SubExpr->getType();
// The exception object lives on the stack and it's address is passed to the
// runtime function.
- llvm::AllocaInst *AI = CGF.CreateMemTemp(ThrowType);
+ Address AI = CGF.CreateMemTemp(ThrowType);
CGF.EmitAnyExprToMem(SubExpr, AI, ThrowType.getQualifiers(),
/*IsInit=*/true);
@@ -4131,6 +4200,9 @@ void MicrosoftCXXABI::emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) {
llvm::GlobalVariable *TI = getThrowInfo(ThrowType);
// Call into the runtime to throw the exception.
- llvm::Value *Args[] = {CGF.Builder.CreateBitCast(AI, CGM.Int8PtrTy), TI};
+ llvm::Value *Args[] = {
+ CGF.Builder.CreateBitCast(AI.getPointer(), CGM.Int8PtrTy),
+ TI
+ };
CGF.EmitNoreturnRuntimeCallOrInvoke(getThrowFn(), Args);
}
diff --git a/clang/lib/CodeGen/TargetInfo.cpp b/clang/lib/CodeGen/TargetInfo.cpp
index 70b02dbebdc..4e2e6f5f496 100644
--- a/clang/lib/CodeGen/TargetInfo.cpp
+++ b/clang/lib/CodeGen/TargetInfo.cpp
@@ -39,7 +39,7 @@ static void AssignToArrayRange(CodeGen::CGBuilderTy &Builder,
for (unsigned I = FirstIndex; I <= LastIndex; ++I) {
llvm::Value *Cell =
Builder.CreateConstInBoundsGEP1_32(Builder.getInt8Ty(), Array, I);
- Builder.CreateStore(Value, Cell);
+ Builder.CreateAlignedStore(Value, Cell, CharUnits::One());
}
}
@@ -48,6 +48,19 @@ static bool isAggregateTypeForABI(QualType T) {
T->isMemberFunctionPointerType();
}
+ABIArgInfo
+ABIInfo::getNaturalAlignIndirect(QualType Ty, bool ByRef, bool Realign,
+ llvm::Type *Padding) const {
+ return ABIArgInfo::getIndirect(getContext().getTypeAlignInChars(Ty),
+ ByRef, Realign, Padding);
+}
+
+ABIArgInfo
+ABIInfo::getNaturalAlignIndirectInReg(QualType Ty, bool Realign) const {
+ return ABIArgInfo::getIndirectInReg(getContext().getTypeAlignInChars(Ty),
+ /*ByRef*/ false, Realign);
+}
+
ABIInfo::~ABIInfo() {}
static CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT,
@@ -133,7 +146,7 @@ void ABIArgInfo::dump() const {
OS << "InAlloca Offset=" << getInAllocaFieldIndex();
break;
case Indirect:
- OS << "Indirect Align=" << getIndirectAlign()
+ OS << "Indirect Align=" << getIndirectAlign().getQuantity()
<< " ByVal=" << getIndirectByVal()
<< " Realign=" << getIndirectRealign();
break;
@@ -144,6 +157,125 @@ void ABIArgInfo::dump() const {
OS << ")\n";
}
+/// Emit va_arg for a platform using the common void* representation,
+/// where arguments are simply emitted in an array of slots on the stack.
+///
+/// This version implements the core direct-value passing rules.
+///
+/// \param SlotSize - The size and alignment of a stack slot.
+/// Each argument will be allocated to a multiple of this number of
+/// slots, and all the slots will be aligned to this value.
+/// \param AllowHigherAlign - The slot alignment is not a cap;
+/// an argument type with an alignment greater than the slot size
+/// will be emitted on a higher-alignment address, potentially
+/// leaving one or more empty slots behind as padding. If this
+/// is false, the returned address might be less-aligned than
+/// DirectAlign.
+static Address emitVoidPtrDirectVAArg(CodeGenFunction &CGF,
+ Address VAListAddr,
+ llvm::Type *DirectTy,
+ CharUnits DirectSize,
+ CharUnits DirectAlign,
+ CharUnits SlotSize,
+ bool AllowHigherAlign) {
+ // Cast the element type to i8* if necessary. Some platforms define
+ // va_list as a struct containing an i8* instead of just an i8*.
+ if (VAListAddr.getElementType() != CGF.Int8PtrTy)
+ VAListAddr = CGF.Builder.CreateElementBitCast(VAListAddr, CGF.Int8PtrTy);
+
+ llvm::Value *Ptr = CGF.Builder.CreateLoad(VAListAddr, "argp.cur");
+
+ // If the CC aligns values higher than the slot size, do so if needed.
+ Address Addr = Address::invalid();
+ if (AllowHigherAlign && DirectAlign > SlotSize) {
+ llvm::Value *PtrAsInt = Ptr;
+ PtrAsInt = CGF.Builder.CreatePtrToInt(PtrAsInt, CGF.IntPtrTy);
+ PtrAsInt = CGF.Builder.CreateAdd(PtrAsInt,
+ llvm::ConstantInt::get(CGF.IntPtrTy, DirectAlign.getQuantity() - 1));
+ PtrAsInt = CGF.Builder.CreateAnd(PtrAsInt,
+ llvm::ConstantInt::get(CGF.IntPtrTy, -DirectAlign.getQuantity()));
+ Addr = Address(CGF.Builder.CreateIntToPtr(PtrAsInt, Ptr->getType(),
+ "argp.cur.aligned"),
+ DirectAlign);
+ } else {
+ Addr = Address(Ptr, SlotSize);
+ }
+
+ // Advance the pointer past the argument, then store that back.
+ CharUnits FullDirectSize = DirectSize.RoundUpToAlignment(SlotSize);
+ llvm::Value *NextPtr =
+ CGF.Builder.CreateConstInBoundsByteGEP(Addr.getPointer(), FullDirectSize,
+ "argp.next");
+ CGF.Builder.CreateStore(NextPtr, VAListAddr);
+
+ // If the argument is smaller than a slot, and this is a big-endian
+ // target, the argument will be right-adjusted in its slot.
+ if (DirectSize < SlotSize && CGF.CGM.getDataLayout().isBigEndian()) {
+ Addr = CGF.Builder.CreateConstInBoundsByteGEP(Addr, SlotSize - DirectSize);
+ }
+
+ Addr = CGF.Builder.CreateElementBitCast(Addr, DirectTy);
+ return Addr;
+}
+
+/// Emit va_arg for a platform using the common void* representation,
+/// where arguments are simply emitted in an array of slots on the stack.
+///
+/// \param IsIndirect - Values of this type are passed indirectly.
+/// \param ValueInfo - The size and alignment of this type, generally
+/// computed with getContext().getTypeInfoInChars(ValueTy).
+/// \param SlotSizeAndAlign - The size and alignment of a stack slot.
+/// Each argument will be allocated to a multiple of this number of
+/// slots, and all the slots will be aligned to this value.
+/// \param AllowHigherAlign - The slot alignment is not a cap;
+/// an argument type with an alignment greater than the slot size
+/// will be emitted on a higher-alignment address, potentially
+/// leaving one or more empty slots behind as padding.
+static Address emitVoidPtrVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType ValueTy, bool IsIndirect,
+ std::pair<CharUnits, CharUnits> ValueInfo,
+ CharUnits SlotSizeAndAlign,
+ bool AllowHigherAlign) {
+ // The size and alignment of the value that was passed directly.
+ CharUnits DirectSize, DirectAlign;
+ if (IsIndirect) {
+ DirectSize = CGF.getPointerSize();
+ DirectAlign = CGF.getPointerAlign();
+ } else {
+ DirectSize = ValueInfo.first;
+ DirectAlign = ValueInfo.second;
+ }
+
+ // Cast the address we've calculated to the right type.
+ llvm::Type *DirectTy = CGF.ConvertTypeForMem(ValueTy);
+ if (IsIndirect)
+ DirectTy = DirectTy->getPointerTo(0);
+
+ Address Addr = emitVoidPtrDirectVAArg(CGF, VAListAddr, DirectTy,
+ DirectSize, DirectAlign,
+ SlotSizeAndAlign,
+ AllowHigherAlign);
+
+ if (IsIndirect) {
+ Addr = Address(CGF.Builder.CreateLoad(Addr), ValueInfo.second);
+ }
+
+ return Addr;
+
+}
+
+static Address emitMergePHI(CodeGenFunction &CGF,
+ Address Addr1, llvm::BasicBlock *Block1,
+ Address Addr2, llvm::BasicBlock *Block2,
+ const llvm::Twine &Name = "") {
+ assert(Addr1.getType() == Addr2.getType());
+ llvm::PHINode *PHI = CGF.Builder.CreatePHI(Addr1.getType(), 2, Name);
+ PHI->addIncoming(Addr1.getPointer(), Block1);
+ PHI->addIncoming(Addr2.getPointer(), Block2);
+ CharUnits Align = std::min(Addr1.getAlignment(), Addr2.getAlignment());
+ return Address(PHI, Align);
+}
+
TargetCodeGenInfo::~TargetCodeGenInfo() { delete Info; }
// If someone can figure out a general rule for this, that would be great.
@@ -394,8 +526,8 @@ public:
I.info = classifyArgumentType(I.type);
}
- llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const override;
+ Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const override;
};
class DefaultTargetCodeGenInfo : public TargetCodeGenInfo {
@@ -404,9 +536,9 @@ public:
: TargetCodeGenInfo(new DefaultABIInfo(CGT)) {}
};
-llvm::Value *DefaultABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const {
- return nullptr;
+Address DefaultABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const {
+ return Address::invalid();
}
ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty) const {
@@ -416,9 +548,9 @@ ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty) const {
// Records with non-trivial destructors/copy-constructors should not be
// passed by value.
if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
- return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
+ return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
- return ABIArgInfo::getIndirect(0);
+ return getNaturalAlignIndirect(Ty);
}
// Treat an enum type as its underlying type.
@@ -434,7 +566,7 @@ ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy) const {
return ABIArgInfo::getIgnore();
if (isAggregateTypeForABI(RetTy))
- return ABIArgInfo::getIndirect(0);
+ return getNaturalAlignIndirect(RetTy);
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
@@ -482,10 +614,8 @@ ABIArgInfo WebAssemblyABIInfo::classifyArgumentType(QualType Ty) const {
if (isAggregateTypeForABI(Ty)) {
// Records with non-trivial destructors/copy-constructors should not be
// passed by value.
- unsigned TypeAlign = getContext().getTypeAlignInChars(Ty).getQuantity();
if (auto RAA = getRecordArgABI(Ty, getCXXABI()))
- return ABIArgInfo::getIndirect(TypeAlign,
- RAA == CGCXXABI::RAA_DirectInMemory);
+ return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
// Ignore empty structs/unions.
if (isEmptyRecord(getContext(), Ty, true))
return ABIArgInfo::getIgnore();
@@ -494,7 +624,7 @@ ABIArgInfo WebAssemblyABIInfo::classifyArgumentType(QualType Ty) const {
// though watch out for things like bitfields.
if (const Type *SeltTy = isSingleElementStruct(Ty, getContext()))
return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
- return ABIArgInfo::getIndirect(TypeAlign);
+ return getNaturalAlignIndirect(Ty);
}
// Otherwise just do the default thing.
@@ -536,8 +666,8 @@ class PNaClABIInfo : public ABIInfo {
ABIArgInfo classifyArgumentType(QualType RetTy) const;
void computeInfo(CGFunctionInfo &FI) const override;
- llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const override;
+ Address EmitVAArg(CodeGenFunction &CGF,
+ Address VAListAddr, QualType Ty) const override;
};
class PNaClTargetCodeGenInfo : public TargetCodeGenInfo {
@@ -554,17 +684,17 @@ void PNaClABIInfo::computeInfo(CGFunctionInfo &FI) const {
I.info = classifyArgumentType(I.type);
}
-llvm::Value *PNaClABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const {
- return nullptr;
+Address PNaClABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const {
+ return Address::invalid();
}
/// \brief Classify argument of given type \p Ty.
ABIArgInfo PNaClABIInfo::classifyArgumentType(QualType Ty) const {
if (isAggregateTypeForABI(Ty)) {
if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
- return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
- return ABIArgInfo::getIndirect(0);
+ return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
+ return getNaturalAlignIndirect(Ty);
} else if (const EnumType *EnumTy = Ty->getAs<EnumType>()) {
// Treat an enum type as its underlying type.
Ty = EnumTy->getDecl()->getIntegerType();
@@ -583,7 +713,7 @@ ABIArgInfo PNaClABIInfo::classifyReturnType(QualType RetTy) const {
// In the PNaCl ABI we always return records/structures on the stack.
if (isAggregateTypeForABI(RetTy))
- return ABIArgInfo::getIndirect(0);
+ return getNaturalAlignIndirect(RetTy);
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
@@ -687,7 +817,7 @@ class X86_32ABIInfo : public ABIInfo {
/// such that the argument will be passed in memory.
ABIArgInfo getIndirectResult(QualType Ty, bool ByVal, CCState &State) const;
- ABIArgInfo getIndirectReturnResult(CCState &State) const;
+ ABIArgInfo getIndirectReturnResult(QualType Ty, CCState &State) const;
/// \brief Return the alignment to use for the given type on the stack.
unsigned getTypeStackAlignInBytes(QualType Ty, unsigned Align) const;
@@ -702,14 +832,14 @@ class X86_32ABIInfo : public ABIInfo {
void rewriteWithInAlloca(CGFunctionInfo &FI) const;
void addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields,
- unsigned &StackOffset, ABIArgInfo &Info,
+ CharUnits &StackOffset, ABIArgInfo &Info,
QualType Type) const;
public:
void computeInfo(CGFunctionInfo &FI) const override;
- llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const override;
+ Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const override;
X86_32ABIInfo(CodeGen::CodeGenTypes &CGT, bool d, bool p, bool w,
unsigned r)
@@ -893,14 +1023,14 @@ bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty,
return true;
}
-ABIArgInfo X86_32ABIInfo::getIndirectReturnResult(CCState &State) const {
+ABIArgInfo X86_32ABIInfo::getIndirectReturnResult(QualType RetTy, CCState &State) const {
// If the return value is indirect, then the hidden argument is consuming one
// integer register.
if (State.FreeRegs) {
--State.FreeRegs;
- return ABIArgInfo::getIndirectInReg(/*Align=*/0, /*ByVal=*/false);
+ return getNaturalAlignIndirectInReg(RetTy);
}
- return ABIArgInfo::getIndirect(/*Align=*/0, /*ByVal=*/false);
+ return getNaturalAlignIndirect(RetTy, /*ByVal=*/false);
}
ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy,
@@ -935,7 +1065,7 @@ ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy,
return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
Size));
- return getIndirectReturnResult(State);
+ return getIndirectReturnResult(RetTy, State);
}
return ABIArgInfo::getDirect();
@@ -945,12 +1075,12 @@ ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy,
if (const RecordType *RT = RetTy->getAs<RecordType>()) {
// Structures with flexible arrays are always indirect.
if (RT->getDecl()->hasFlexibleArrayMember())
- return getIndirectReturnResult(State);
+ return getIndirectReturnResult(RetTy, State);
}
// If specified, structs and unions are always indirect.
if (!IsSmallStructInRegABI && !RetTy->isAnyComplexType())
- return getIndirectReturnResult(State);
+ return getIndirectReturnResult(RetTy, State);
// Small structures which are register sized are generally returned
// in a register.
@@ -972,7 +1102,7 @@ ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy,
return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),Size));
}
- return getIndirectReturnResult(State);
+ return getIndirectReturnResult(RetTy, State);
}
// Treat an enum type as its underlying type.
@@ -1038,21 +1168,22 @@ ABIArgInfo X86_32ABIInfo::getIndirectResult(QualType Ty, bool ByVal,
if (!ByVal) {
if (State.FreeRegs) {
--State.FreeRegs; // Non-byval indirects just use one pointer.
- return ABIArgInfo::getIndirectInReg(0, false);
+ return getNaturalAlignIndirectInReg(Ty);
}
- return ABIArgInfo::getIndirect(0, false);
+ return getNaturalAlignIndirect(Ty, false);
}
// Compute the byval alignment.
unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8;
unsigned StackAlign = getTypeStackAlignInBytes(Ty, TypeAlign);
if (StackAlign == 0)
- return ABIArgInfo::getIndirect(4, /*ByVal=*/true);
+ return ABIArgInfo::getIndirect(CharUnits::fromQuantity(4), /*ByVal=*/true);
// If the stack alignment is less than the type alignment, realign the
// argument.
bool Realign = TypeAlign > StackAlign;
- return ABIArgInfo::getIndirect(StackAlign, /*ByVal=*/true, Realign);
+ return ABIArgInfo::getIndirect(CharUnits::fromQuantity(StackAlign),
+ /*ByVal=*/true, Realign);
}
X86_32ABIInfo::Class X86_32ABIInfo::classify(QualType Ty) const {
@@ -1259,22 +1390,23 @@ void X86_32ABIInfo::computeInfo(CGFunctionInfo &FI) const {
void
X86_32ABIInfo::addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields,
- unsigned &StackOffset,
- ABIArgInfo &Info, QualType Type) const {
- assert(StackOffset % 4U == 0 && "unaligned inalloca struct");
+ CharUnits &StackOffset, ABIArgInfo &Info,
+ QualType Type) const {
+ // Arguments are always 4-byte-aligned.
+ CharUnits FieldAlign = CharUnits::fromQuantity(4);
+
+ assert(StackOffset.isMultipleOf(FieldAlign) && "unaligned inalloca struct");
Info = ABIArgInfo::getInAlloca(FrameFields.size());
FrameFields.push_back(CGT.ConvertTypeForMem(Type));
- StackOffset += getContext().getTypeSizeInChars(Type).getQuantity();
-
- // Insert padding bytes to respect alignment. For x86_32, each argument is 4
- // byte aligned.
- if (StackOffset % 4U) {
- unsigned OldOffset = StackOffset;
- StackOffset = llvm::RoundUpToAlignment(StackOffset, 4U);
- unsigned NumBytes = StackOffset - OldOffset;
- assert(NumBytes);
+ StackOffset += getContext().getTypeSizeInChars(Type);
+
+ // Insert padding bytes to respect alignment.
+ CharUnits FieldEnd = StackOffset;
+ StackOffset = FieldEnd.RoundUpToAlignment(FieldAlign);
+ if (StackOffset != FieldEnd) {
+ CharUnits NumBytes = StackOffset - FieldEnd;
llvm::Type *Ty = llvm::Type::getInt8Ty(getVMContext());
- Ty = llvm::ArrayType::get(Ty, NumBytes);
+ Ty = llvm::ArrayType::get(Ty, NumBytes.getQuantity());
FrameFields.push_back(Ty);
}
}
@@ -1305,7 +1437,10 @@ void X86_32ABIInfo::rewriteWithInAlloca(CGFunctionInfo &FI) const {
// Build a packed struct type for all of the arguments in memory.
SmallVector<llvm::Type *, 6> FrameFields;
- unsigned StackOffset = 0;
+ // The stack alignment is always 4.
+ CharUnits StackAlign = CharUnits::fromQuantity(4);
+
+ CharUnits StackOffset;
CGFunctionInfo::arg_iterator I = FI.arg_begin(), E = FI.arg_end();
// Put 'this' into the struct before 'sret', if necessary.
@@ -1337,47 +1472,25 @@ void X86_32ABIInfo::rewriteWithInAlloca(CGFunctionInfo &FI) const {
}
FI.setArgStruct(llvm::StructType::get(getVMContext(), FrameFields,
- /*isPacked=*/true));
+ /*isPacked=*/true),
+ StackAlign);
}
-llvm::Value *X86_32ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const {
- llvm::Type *BPP = CGF.Int8PtrPtrTy;
-
- CGBuilderTy &Builder = CGF.Builder;
- llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
- "ap");
- llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
-
- // Compute if the address needs to be aligned
- unsigned Align = CGF.getContext().getTypeAlignInChars(Ty).getQuantity();
- Align = getTypeStackAlignInBytes(Ty, Align);
- Align = std::max(Align, 4U);
- if (Align > 4) {
- // addr = (addr + align - 1) & -align;
- llvm::Value *Offset =
- llvm::ConstantInt::get(CGF.Int32Ty, Align - 1);
- Addr = CGF.Builder.CreateGEP(Addr, Offset);
- llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(Addr,
- CGF.Int32Ty);
- llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int32Ty, -Align);
- Addr = CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask),
- Addr->getType(),
- "ap.cur.aligned");
- }
+Address X86_32ABIInfo::EmitVAArg(CodeGenFunction &CGF,
+ Address VAListAddr, QualType Ty) const {
- llvm::Type *PTy =
- llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
- llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
+ auto TypeInfo = getContext().getTypeInfoInChars(Ty);
- uint64_t Offset =
- llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, Align);
- llvm::Value *NextAddr =
- Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
- "ap.next");
- Builder.CreateStore(NextAddr, VAListAddrAsBPP);
+ // x86-32 changes the alignment of certain arguments on the stack.
+ //
+ // Just messing with TypeInfo like this works because we never pass
+ // anything indirectly.
+ TypeInfo.second = CharUnits::fromQuantity(
+ getTypeStackAlignInBytes(Ty, TypeInfo.second.getQuantity()));
- return AddrTyped;
+ return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect*/ false,
+ TypeInfo, CharUnits::fromQuantity(4),
+ /*AllowHigherAlign*/ true);
}
bool X86_32TargetCodeGenInfo::isStructReturnInRegABI(
@@ -1449,8 +1562,9 @@ bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable(
} else {
// 9 is %eflags, which doesn't get a size on Darwin for some
// reason.
- Builder.CreateStore(
- Four8, Builder.CreateConstInBoundsGEP1_32(CGF.Int8Ty, Address, 9));
+ Builder.CreateAlignedStore(
+ Four8, Builder.CreateConstInBoundsGEP1_32(CGF.Int8Ty, Address, 9),
+ CharUnits::One());
// 11-16 are st(0..5). Not sure why we stop at 5.
// These have size 12, which is sizeof(long double) on
@@ -1619,8 +1733,8 @@ public:
void computeInfo(CGFunctionInfo &FI) const override;
- llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const override;
+ Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const override;
bool has64BitPointers() const {
return Has64BitPointers;
@@ -1638,8 +1752,8 @@ public:
void computeInfo(CGFunctionInfo &FI) const override;
- llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const override;
+ Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const override;
bool isHomogeneousAggregateBaseType(QualType Ty) const override {
// FIXME: Assumes vectorcall is in use.
@@ -2257,7 +2371,7 @@ ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty) const {
ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
}
- return ABIArgInfo::getIndirect(0);
+ return getNaturalAlignIndirect(Ty);
}
bool X86_64ABIInfo::IsIllegalVectorType(QualType Ty) const {
@@ -2291,7 +2405,7 @@ ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty,
}
if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
- return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
+ return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
// Compute the byval alignment. We specify the alignment of the byval in all
// cases so that the mid-level optimizer knows the alignment of the byval.
@@ -2328,7 +2442,7 @@ ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty,
Size));
}
- return ABIArgInfo::getIndirect(Align);
+ return ABIArgInfo::getIndirect(CharUnits::fromQuantity(Align));
}
/// The ABI specifies that a value should be passed in a full vector XMM/YMM
@@ -2912,11 +3026,10 @@ void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
}
}
-static llvm::Value *EmitVAArgFromMemory(llvm::Value *VAListAddr,
- QualType Ty,
- CodeGenFunction &CGF) {
- llvm::Value *overflow_arg_area_p = CGF.Builder.CreateStructGEP(
- nullptr, VAListAddr, 2, "overflow_arg_area_p");
+static Address EmitX86_64VAArgFromMemory(CodeGenFunction &CGF,
+ Address VAListAddr, QualType Ty) {
+ Address overflow_arg_area_p = CGF.Builder.CreateStructGEP(
+ VAListAddr, 2, CharUnits::fromQuantity(8), "overflow_arg_area_p");
llvm::Value *overflow_arg_area =
CGF.Builder.CreateLoad(overflow_arg_area_p, "overflow_arg_area");
@@ -2924,7 +3037,7 @@ static llvm::Value *EmitVAArgFromMemory(llvm::Value *VAListAddr,
// byte boundary if alignment needed by type exceeds 8 byte boundary.
// It isn't stated explicitly in the standard, but in practice we use
// alignment greater than 16 where necessary.
- uint64_t Align = CGF.getContext().getTypeAlign(Ty) / 8;
+ uint64_t Align = CGF.getContext().getTypeAlignInChars(Ty).getQuantity();
if (Align > 8) {
// overflow_arg_area = (overflow_arg_area + align - 1) & -align;
llvm::Value *Offset =
@@ -2958,11 +3071,11 @@ static llvm::Value *EmitVAArgFromMemory(llvm::Value *VAListAddr,
CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p);
// AMD64-ABI 3.5.7p5: Step 11. Return the fetched type.
- return Res;
+ return Address(Res, CharUnits::fromQuantity(Align));
}
-llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const {
+Address X86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const {
// Assume that va_list type is correct; should be pointer to LLVM type:
// struct {
// i32 gp_offset;
@@ -2972,14 +3085,14 @@ llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
// };
unsigned neededInt, neededSSE;
- Ty = CGF.getContext().getCanonicalType(Ty);
+ Ty = getContext().getCanonicalType(Ty);
ABIArgInfo AI = classifyArgumentType(Ty, 0, neededInt, neededSSE,
/*isNamedArg*/false);
// AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed
// in the registers. If not go to step 7.
if (!neededInt && !neededSSE)
- return EmitVAArgFromMemory(VAListAddr, Ty, CGF);
+ return EmitX86_64VAArgFromMemory(CGF, VAListAddr, Ty);
// AMD64-ABI 3.5.7p5: Step 2. Compute num_gp to hold the number of
// general purpose registers needed to pass type and num_fp to hold
@@ -2993,11 +3106,12 @@ llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
// register save space).
llvm::Value *InRegs = nullptr;
- llvm::Value *gp_offset_p = nullptr, *gp_offset = nullptr;
- llvm::Value *fp_offset_p = nullptr, *fp_offset = nullptr;
+ Address gp_offset_p = Address::invalid(), fp_offset_p = Address::invalid();
+ llvm::Value *gp_offset = nullptr, *fp_offset = nullptr;
if (neededInt) {
gp_offset_p =
- CGF.Builder.CreateStructGEP(nullptr, VAListAddr, 0, "gp_offset_p");
+ CGF.Builder.CreateStructGEP(VAListAddr, 0, CharUnits::Zero(),
+ "gp_offset_p");
gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset");
InRegs = llvm::ConstantInt::get(CGF.Int32Ty, 48 - neededInt * 8);
InRegs = CGF.Builder.CreateICmpULE(gp_offset, InRegs, "fits_in_gp");
@@ -3005,7 +3119,8 @@ llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
if (neededSSE) {
fp_offset_p =
- CGF.Builder.CreateStructGEP(nullptr, VAListAddr, 1, "fp_offset_p");
+ CGF.Builder.CreateStructGEP(VAListAddr, 1, CharUnits::fromQuantity(4),
+ "fp_offset_p");
fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset");
llvm::Value *FitsInFP =
llvm::ConstantInt::get(CGF.Int32Ty, 176 - neededSSE * 16);
@@ -3033,14 +3148,17 @@ llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
// simple assembling of a structure from scattered addresses has many more
// loads than necessary. Can we clean this up?
llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
- llvm::Value *RegAddr = CGF.Builder.CreateLoad(
- CGF.Builder.CreateStructGEP(nullptr, VAListAddr, 3), "reg_save_area");
+ llvm::Value *RegSaveArea = CGF.Builder.CreateLoad(
+ CGF.Builder.CreateStructGEP(VAListAddr, 3, CharUnits::fromQuantity(16)),
+ "reg_save_area");
+
+ Address RegAddr = Address::invalid();
if (neededInt && neededSSE) {
// FIXME: Cleanup.
assert(AI.isDirect() && "Unexpected ABI info for mixed regs");
llvm::StructType *ST = cast<llvm::StructType>(AI.getCoerceToType());
- llvm::Value *Tmp = CGF.CreateMemTemp(Ty);
- Tmp = CGF.Builder.CreateBitCast(Tmp, ST->getPointerTo());
+ Address Tmp = CGF.CreateMemTemp(Ty);
+ Tmp = CGF.Builder.CreateElementBitCast(Tmp, ST);
assert(ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs");
llvm::Type *TyLo = ST->getElementType(0);
llvm::Type *TyHi = ST->getElementType(1);
@@ -3048,57 +3166,77 @@ llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
"Unexpected ABI info for mixed regs");
llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo);
llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi);
- llvm::Value *GPAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset);
- llvm::Value *FPAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset);
+ llvm::Value *GPAddr = CGF.Builder.CreateGEP(RegSaveArea, gp_offset);
+ llvm::Value *FPAddr = CGF.Builder.CreateGEP(RegSaveArea, fp_offset);
llvm::Value *RegLoAddr = TyLo->isFPOrFPVectorTy() ? FPAddr : GPAddr;
llvm::Value *RegHiAddr = TyLo->isFPOrFPVectorTy() ? GPAddr : FPAddr;
- llvm::Value *V =
- CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegLoAddr, PTyLo));
- CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(ST, Tmp, 0));
- V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegHiAddr, PTyHi));
- CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(ST, Tmp, 1));
- RegAddr = CGF.Builder.CreateBitCast(Tmp,
- llvm::PointerType::getUnqual(LTy));
+ // Copy the first element.
+ llvm::Value *V =
+ CGF.Builder.CreateDefaultAlignedLoad(
+ CGF.Builder.CreateBitCast(RegLoAddr, PTyLo));
+ CGF.Builder.CreateStore(V,
+ CGF.Builder.CreateStructGEP(Tmp, 0, CharUnits::Zero()));
+
+ // Copy the second element.
+ V = CGF.Builder.CreateDefaultAlignedLoad(
+ CGF.Builder.CreateBitCast(RegHiAddr, PTyHi));
+ CharUnits Offset = CharUnits::fromQuantity(
+ getDataLayout().getStructLayout(ST)->getElementOffset(1));
+ CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1, Offset));
+
+ RegAddr = CGF.Builder.CreateElementBitCast(Tmp, LTy);
} else if (neededInt) {
- RegAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset);
- RegAddr = CGF.Builder.CreateBitCast(RegAddr,
- llvm::PointerType::getUnqual(LTy));
+ RegAddr = Address(CGF.Builder.CreateGEP(RegSaveArea, gp_offset),
+ CharUnits::fromQuantity(8));
+ RegAddr = CGF.Builder.CreateElementBitCast(RegAddr, LTy);
// Copy to a temporary if necessary to ensure the appropriate alignment.
std::pair<CharUnits, CharUnits> SizeAlign =
- CGF.getContext().getTypeInfoInChars(Ty);
+ getContext().getTypeInfoInChars(Ty);
uint64_t TySize = SizeAlign.first.getQuantity();
- unsigned TyAlign = SizeAlign.second.getQuantity();
- if (TyAlign > 8) {
- llvm::Value *Tmp = CGF.CreateMemTemp(Ty);
- CGF.Builder.CreateMemCpy(Tmp, RegAddr, TySize, 8, false);
+ CharUnits TyAlign = SizeAlign.second;
+
+ // Copy into a temporary if the type is more aligned than the
+ // register save area.
+ if (TyAlign.getQuantity() > 8) {
+ Address Tmp = CGF.CreateMemTemp(Ty);
+ CGF.Builder.CreateMemCpy(Tmp, RegAddr, TySize, false);
RegAddr = Tmp;
}
+
} else if (neededSSE == 1) {
- RegAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset);
- RegAddr = CGF.Builder.CreateBitCast(RegAddr,
- llvm::PointerType::getUnqual(LTy));
+ RegAddr = Address(CGF.Builder.CreateGEP(RegSaveArea, fp_offset),
+ CharUnits::fromQuantity(16));
+ RegAddr = CGF.Builder.CreateElementBitCast(RegAddr, LTy);
} else {
assert(neededSSE == 2 && "Invalid number of needed registers!");
// SSE registers are spaced 16 bytes apart in the register save
// area, we need to collect the two eightbytes together.
- llvm::Value *RegAddrLo = CGF.Builder.CreateGEP(RegAddr, fp_offset);
- llvm::Value *RegAddrHi = CGF.Builder.CreateConstGEP1_32(RegAddrLo, 16);
+ // The ABI isn't explicit about this, but it seems reasonable
+ // to assume that the slots are 16-byte aligned, since the stack is
+ // naturally 16-byte aligned and the prologue is expected to store
+ // all the SSE registers to the RSA.
+ Address RegAddrLo = Address(CGF.Builder.CreateGEP(RegSaveArea, fp_offset),
+ CharUnits::fromQuantity(16));
+ Address RegAddrHi =
+ CGF.Builder.CreateConstInBoundsByteGEP(RegAddrLo,
+ CharUnits::fromQuantity(16));
llvm::Type *DoubleTy = CGF.DoubleTy;
- llvm::Type *DblPtrTy =
- llvm::PointerType::getUnqual(DoubleTy);
llvm::StructType *ST = llvm::StructType::get(DoubleTy, DoubleTy, nullptr);
- llvm::Value *V, *Tmp = CGF.CreateMemTemp(Ty);
- Tmp = CGF.Builder.CreateBitCast(Tmp, ST->getPointerTo());
- V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrLo,
- DblPtrTy));
- CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(ST, Tmp, 0));
- V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrHi,
- DblPtrTy));
- CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(ST, Tmp, 1));
- RegAddr = CGF.Builder.CreateBitCast(Tmp,
- llvm::PointerType::getUnqual(LTy));
+ llvm::Value *V;
+ Address Tmp = CGF.CreateMemTemp(Ty);
+ Tmp = CGF.Builder.CreateElementBitCast(Tmp, ST);
+ V = CGF.Builder.CreateLoad(
+ CGF.Builder.CreateElementBitCast(RegAddrLo, DoubleTy));
+ CGF.Builder.CreateStore(V,
+ CGF.Builder.CreateStructGEP(Tmp, 0, CharUnits::Zero()));
+ V = CGF.Builder.CreateLoad(
+ CGF.Builder.CreateElementBitCast(RegAddrHi, DoubleTy));
+ CGF.Builder.CreateStore(V,
+ CGF.Builder.CreateStructGEP(Tmp, 1, CharUnits::fromQuantity(8)));
+
+ RegAddr = CGF.Builder.CreateElementBitCast(Tmp, LTy);
}
// AMD64-ABI 3.5.7p5: Step 5. Set:
@@ -3119,15 +3257,13 @@ llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
// Emit code to load the value if it was passed in memory.
CGF.EmitBlock(InMemBlock);
- llvm::Value *MemAddr = EmitVAArgFromMemory(VAListAddr, Ty, CGF);
+ Address MemAddr = EmitX86_64VAArgFromMemory(CGF, VAListAddr, Ty);
// Return the appropriate result.
CGF.EmitBlock(ContBlock);
- llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(RegAddr->getType(), 2,
- "vaarg.addr");
- ResAddr->addIncoming(RegAddr, InRegBlock);
- ResAddr->addIncoming(MemAddr, InMemBlock);
+ Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock, MemAddr, InMemBlock,
+ "vaarg.addr");
return ResAddr;
}
@@ -3148,11 +3284,11 @@ ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty, unsigned &FreeSSERegs,
if (RT) {
if (!IsReturnType) {
if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI()))
- return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
+ return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
}
if (RT->getDecl()->hasFlexibleArrayMember())
- return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
+ return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
// FIXME: mingw-w64-gcc emits 128-bit struct as i128
if (Width == 128 && getTarget().getTriple().isWindowsGNUEnvironment())
@@ -3171,7 +3307,8 @@ ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty, unsigned &FreeSSERegs,
return ABIArgInfo::getDirect();
return ABIArgInfo::getExpand();
}
- return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
+ return ABIArgInfo::getIndirect(CharUnits::fromQuantity(Align),
+ /*ByVal=*/false);
}
@@ -3187,7 +3324,7 @@ ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty, unsigned &FreeSSERegs,
// MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is
// not 1, 2, 4, or 8 bytes, must be passed by reference."
if (Width > 64 || !llvm::isPowerOf2_64(Width))
- return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
+ return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
// Otherwise, coerce it to a small integer.
return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Width));
@@ -3217,26 +3354,12 @@ void WinX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
I.info = classify(I.type, FreeSSERegs, false);
}
-llvm::Value *WinX86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const {
- llvm::Type *BPP = CGF.Int8PtrPtrTy;
-
- CGBuilderTy &Builder = CGF.Builder;
- llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
- "ap");
- llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
- llvm::Type *PTy =
- llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
- llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
-
- uint64_t Offset =
- llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 8);
- llvm::Value *NextAddr =
- Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
- "ap.next");
- Builder.CreateStore(NextAddr, VAListAddrAsBPP);
-
- return AddrTyped;
+Address WinX86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const {
+ return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false,
+ CGF.getContext().getTypeInfoInChars(Ty),
+ CharUnits::fromQuantity(8),
+ /*allowHigherAlign*/ false);
}
// PowerPC-32
@@ -3246,8 +3369,8 @@ class PPC32_SVR4_ABIInfo : public DefaultABIInfo {
public:
PPC32_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
- llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const override;
+ Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const override;
};
class PPC32TargetCodeGenInfo : public TargetCodeGenInfo {
@@ -3266,64 +3389,50 @@ public:
}
-llvm::Value *PPC32_SVR4_ABIInfo::EmitVAArg(llvm::Value *VAListAddr,
- QualType Ty,
- CodeGenFunction &CGF) const {
+Address PPC32_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAList,
+ QualType Ty) const {
if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
// TODO: Implement this. For now ignore.
(void)CTy;
- return nullptr;
+ return Address::invalid();
}
+ // struct __va_list_tag {
+ // unsigned char gpr;
+ // unsigned char fpr;
+ // unsigned short reserved;
+ // void *overflow_arg_area;
+ // void *reg_save_area;
+ // };
+
bool isI64 = Ty->isIntegerType() && getContext().getTypeSize(Ty) == 64;
bool isInt =
Ty->isIntegerType() || Ty->isPointerType() || Ty->isAggregateType();
- llvm::Type *CharPtr = CGF.Int8PtrTy;
- llvm::Type *CharPtrPtr = CGF.Int8PtrPtrTy;
+
+ // All aggregates are passed indirectly? That doesn't seem consistent
+ // with the argument-lowering code.
+ bool isIndirect = Ty->isAggregateType();
CGBuilderTy &Builder = CGF.Builder;
- llvm::Value *GPRPtr = Builder.CreateBitCast(VAListAddr, CharPtr, "gprptr");
- llvm::Value *GPRPtrAsInt = Builder.CreatePtrToInt(GPRPtr, CGF.Int32Ty);
- llvm::Value *FPRPtrAsInt =
- Builder.CreateAdd(GPRPtrAsInt, Builder.getInt32(1));
- llvm::Value *FPRPtr = Builder.CreateIntToPtr(FPRPtrAsInt, CharPtr);
- llvm::Value *OverflowAreaPtrAsInt =
- Builder.CreateAdd(FPRPtrAsInt, Builder.getInt32(3));
- llvm::Value *OverflowAreaPtr =
- Builder.CreateIntToPtr(OverflowAreaPtrAsInt, CharPtrPtr);
- llvm::Value *RegsaveAreaPtrAsInt =
- Builder.CreateAdd(OverflowAreaPtrAsInt, Builder.getInt32(4));
- llvm::Value *RegsaveAreaPtr =
- Builder.CreateIntToPtr(RegsaveAreaPtrAsInt, CharPtrPtr);
- llvm::Value *GPR = Builder.CreateLoad(GPRPtr, false, "gpr");
- // Align GPR when TY is i64.
- if (isI64) {
- llvm::Value *GPRAnd = Builder.CreateAnd(GPR, Builder.getInt8(1));
- llvm::Value *CC64 = Builder.CreateICmpEQ(GPRAnd, Builder.getInt8(1));
- llvm::Value *GPRPlusOne = Builder.CreateAdd(GPR, Builder.getInt8(1));
- GPR = Builder.CreateSelect(CC64, GPRPlusOne, GPR);
- }
- llvm::Value *FPR = Builder.CreateLoad(FPRPtr, false, "fpr");
- llvm::Value *OverflowArea =
- Builder.CreateLoad(OverflowAreaPtr, false, "overflow_area");
- llvm::Value *OverflowAreaAsInt =
- Builder.CreatePtrToInt(OverflowArea, CGF.Int32Ty);
- llvm::Value *RegsaveArea =
- Builder.CreateLoad(RegsaveAreaPtr, false, "regsave_area");
- llvm::Value *RegsaveAreaAsInt =
- Builder.CreatePtrToInt(RegsaveArea, CGF.Int32Ty);
- llvm::Value *CC =
- Builder.CreateICmpULT(isInt ? GPR : FPR, Builder.getInt8(8), "cond");
+ // The calling convention either uses 1-2 GPRs or 1 FPR.
+ Address NumRegsAddr = Address::invalid();
+ if (isInt) {
+ NumRegsAddr = Builder.CreateStructGEP(VAList, 0, CharUnits::Zero(), "gpr");
+ } else {
+ NumRegsAddr = Builder.CreateStructGEP(VAList, 1, CharUnits::One(), "fpr");
+ }
- llvm::Value *RegConstant =
- Builder.CreateMul(isInt ? GPR : FPR, Builder.getInt8(isInt ? 4 : 8));
+ llvm::Value *NumRegs = Builder.CreateLoad(NumRegsAddr, "numUsedRegs");
- llvm::Value *OurReg = Builder.CreateAdd(
- RegsaveAreaAsInt, Builder.CreateSExt(RegConstant, CGF.Int32Ty));
+ // "Align" the register count when TY is i64.
+ if (isI64) {
+ NumRegs = Builder.CreateAdd(NumRegs, Builder.getInt8(1));
+ NumRegs = Builder.CreateAnd(NumRegs, Builder.getInt8((uint8_t) ~1U));
+ }
- if (Ty->isFloatingType())
- OurReg = Builder.CreateAdd(OurReg, Builder.getInt32(32));
+ llvm::Value *CC =
+ Builder.CreateICmpULT(NumRegs, Builder.getInt8(8), "cond");
llvm::BasicBlock *UsingRegs = CGF.createBasicBlock("using_regs");
llvm::BasicBlock *UsingOverflow = CGF.createBasicBlock("using_overflow");
@@ -3331,39 +3440,84 @@ llvm::Value *PPC32_SVR4_ABIInfo::EmitVAArg(llvm::Value *VAListAddr,
Builder.CreateCondBr(CC, UsingRegs, UsingOverflow);
- CGF.EmitBlock(UsingRegs);
+ llvm::Type *DirectTy = CGF.ConvertType(Ty);
+ if (isIndirect) DirectTy = DirectTy->getPointerTo(0);
- llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
- llvm::Value *Result1 = Builder.CreateIntToPtr(OurReg, PTy);
- // Increase the GPR/FPR indexes.
- if (isInt) {
- GPR = Builder.CreateAdd(GPR, Builder.getInt8(isI64 ? 2 : 1));
- Builder.CreateStore(GPR, GPRPtr);
- } else {
- FPR = Builder.CreateAdd(FPR, Builder.getInt8(1));
- Builder.CreateStore(FPR, FPRPtr);
+ // Case 1: consume registers.
+ Address RegAddr = Address::invalid();
+ {
+ CGF.EmitBlock(UsingRegs);
+
+ Address RegSaveAreaPtr =
+ Builder.CreateStructGEP(VAList, 4, CharUnits::fromQuantity(8));
+ RegAddr = Address(Builder.CreateLoad(RegSaveAreaPtr),
+ CharUnits::fromQuantity(8));
+ assert(RegAddr.getElementType() == CGF.Int8Ty);
+
+ // Floating-point registers start after the general-purpose registers.
+ if (!isInt) {
+ RegAddr = Builder.CreateConstInBoundsByteGEP(RegAddr,
+ CharUnits::fromQuantity(32));
+ }
+
+ // Get the address of the saved value by scaling the number of
+ // registers we've used by the number of
+ CharUnits RegSize = CharUnits::fromQuantity(isInt ? 4 : 8);
+ llvm::Value *RegOffset =
+ Builder.CreateMul(NumRegs, Builder.getInt8(RegSize.getQuantity()));
+ RegAddr = Address(Builder.CreateInBoundsGEP(CGF.Int8Ty,
+ RegAddr.getPointer(), RegOffset),
+ RegAddr.getAlignment().alignmentOfArrayElement(RegSize));
+ RegAddr = Builder.CreateElementBitCast(RegAddr, DirectTy);
+
+ // Increase the used-register count.
+ NumRegs = Builder.CreateAdd(NumRegs, Builder.getInt8(isI64 ? 2 : 1));
+ Builder.CreateStore(NumRegs, NumRegsAddr);
+
+ CGF.EmitBranch(Cont);
}
- CGF.EmitBranch(Cont);
- CGF.EmitBlock(UsingOverflow);
+ // Case 2: consume space in the overflow area.
+ Address MemAddr = Address::invalid();
+ {
+ CGF.EmitBlock(UsingOverflow);
- // Increase the overflow area.
- llvm::Value *Result2 = Builder.CreateIntToPtr(OverflowAreaAsInt, PTy);
- OverflowAreaAsInt =
- Builder.CreateAdd(OverflowAreaAsInt, Builder.getInt32(isInt ? 4 : 8));
- Builder.CreateStore(Builder.CreateIntToPtr(OverflowAreaAsInt, CharPtr),
- OverflowAreaPtr);
- CGF.EmitBranch(Cont);
+ // Everything in the overflow area is rounded up to a size of at least 4.
+ CharUnits OverflowAreaAlign = CharUnits::fromQuantity(4);
+
+ CharUnits Size;
+ if (!isIndirect) {
+ auto TypeInfo = CGF.getContext().getTypeInfoInChars(Ty);
+ Size = TypeInfo.first.RoundUpToAlignment(OverflowAreaAlign);
+ } else {
+ Size = CGF.getPointerSize();
+ }
+
+ Address OverflowAreaAddr =
+ Builder.CreateStructGEP(VAList, 3, CharUnits::fromQuantity(4));
+ Address OverflowArea(Builder.CreateLoad(OverflowAreaAddr),
+ OverflowAreaAlign);
+
+ // The current address is the address of the varargs element.
+ // FIXME: do we not need to round up to alignment?
+ MemAddr = Builder.CreateElementBitCast(OverflowArea, DirectTy);
+
+ // Increase the overflow area.
+ OverflowArea = Builder.CreateConstInBoundsByteGEP(OverflowArea, Size);
+ Builder.CreateStore(OverflowArea.getPointer(), OverflowAreaAddr);
+ CGF.EmitBranch(Cont);
+ }
CGF.EmitBlock(Cont);
- llvm::PHINode *Result = CGF.Builder.CreatePHI(PTy, 2, "vaarg.addr");
- Result->addIncoming(Result1, UsingRegs);
- Result->addIncoming(Result2, UsingOverflow);
+ // Merge the cases with a phi.
+ Address Result = emitMergePHI(CGF, RegAddr, UsingRegs, MemAddr, UsingOverflow,
+ "vaarg.addr");
- if (Ty->isAggregateType()) {
- llvm::Value *AGGPtr = Builder.CreateBitCast(Result, CharPtrPtr, "aggrptr");
- return Builder.CreateLoad(AGGPtr, false, "aggr");
+ // Load the pointer if the argument was passed indirectly.
+ if (isIndirect) {
+ Result = Address(Builder.CreateLoad(Result, "aggr"),
+ getContext().getTypeAlignInChars(Ty));
}
return Result;
@@ -3459,7 +3613,7 @@ public:
: DefaultABIInfo(CGT), Kind(Kind), HasQPX(HasQPX) {}
bool isPromotableTypeForABI(QualType Ty) const;
- bool isAlignedParamType(QualType Ty, bool &Align32) const;
+ CharUnits getParamTypeAlignment(QualType Ty) const;
ABIArgInfo classifyReturnType(QualType RetTy) const;
ABIArgInfo classifyArgumentType(QualType Ty) const;
@@ -3496,8 +3650,8 @@ public:
}
}
- llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const override;
+ Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const override;
};
class PPC64_SVR4_TargetCodeGenInfo : public TargetCodeGenInfo {
@@ -3557,12 +3711,9 @@ PPC64_SVR4_ABIInfo::isPromotableTypeForABI(QualType Ty) const {
return false;
}
-/// isAlignedParamType - Determine whether a type requires 16-byte
-/// alignment in the parameter area.
-bool
-PPC64_SVR4_ABIInfo::isAlignedParamType(QualType Ty, bool &Align32) const {
- Align32 = false;
-
+/// isAlignedParamType - Determine whether a type requires 16-byte or
+/// higher alignment in the parameter area. Always returns at least 8.
+CharUnits PPC64_SVR4_ABIInfo::getParamTypeAlignment(QualType Ty) const {
// Complex types are passed just like their elements.
if (const ComplexType *CTy = Ty->getAs<ComplexType>())
Ty = CTy->getElementType();
@@ -3571,11 +3722,11 @@ PPC64_SVR4_ABIInfo::isAlignedParamType(QualType Ty, bool &Align32) const {
// passed via reference, smaller types are not aligned).
if (IsQPXVectorTy(Ty)) {
if (getContext().getTypeSize(Ty) > 128)
- Align32 = true;
+ return CharUnits::fromQuantity(32);
- return true;
+ return CharUnits::fromQuantity(16);
} else if (Ty->isVectorType()) {
- return getContext().getTypeSize(Ty) == 128;
+ return CharUnits::fromQuantity(getContext().getTypeSize(Ty) == 128 ? 16 : 8);
}
// For single-element float/vector structs, we consider the whole type
@@ -3600,22 +3751,22 @@ PPC64_SVR4_ABIInfo::isAlignedParamType(QualType Ty, bool &Align32) const {
// With special case aggregates, only vector base types need alignment.
if (AlignAsType && IsQPXVectorTy(AlignAsType)) {
if (getContext().getTypeSize(AlignAsType) > 128)
- Align32 = true;
+ return CharUnits::fromQuantity(32);
- return true;
+ return CharUnits::fromQuantity(16);
} else if (AlignAsType) {
- return AlignAsType->isVectorType();
+ return CharUnits::fromQuantity(AlignAsType->isVectorType() ? 16 : 8);
}
// Otherwise, we only need alignment for any aggregate type that
// has an alignment requirement of >= 16 bytes.
if (isAggregateTypeForABI(Ty) && getContext().getTypeAlign(Ty) >= 128) {
if (HasQPX && getContext().getTypeAlign(Ty) >= 256)
- Align32 = true;
- return true;
+ return CharUnits::fromQuantity(32);
+ return CharUnits::fromQuantity(16);
}
- return false;
+ return CharUnits::fromQuantity(8);
}
/// isHomogeneousAggregate - Return true if a type is an ELFv2 homogeneous
@@ -3748,7 +3899,7 @@ PPC64_SVR4_ABIInfo::classifyArgumentType(QualType Ty) const {
if (Ty->isVectorType() && !IsQPXVectorTy(Ty)) {
uint64_t Size = getContext().getTypeSize(Ty);
if (Size > 128)
- return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
+ return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
else if (Size < 128) {
llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size);
return ABIArgInfo::getDirect(CoerceTy);
@@ -3757,12 +3908,10 @@ PPC64_SVR4_ABIInfo::classifyArgumentType(QualType Ty) const {
if (isAggregateTypeForABI(Ty)) {
if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
- return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
+ return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
- bool Align32;
- uint64_t ABIAlign = isAlignedParamType(Ty, Align32) ?
- (Align32 ? 32 : 16) : 8;
- uint64_t TyAlign = getContext().getTypeAlign(Ty) / 8;
+ uint64_t ABIAlign = getParamTypeAlignment(Ty).getQuantity();
+ uint64_t TyAlign = getContext().getTypeAlignInChars(Ty).getQuantity();
// ELFv2 homogeneous aggregates are passed as array types.
const Type *Base = nullptr;
@@ -3800,7 +3949,8 @@ PPC64_SVR4_ABIInfo::classifyArgumentType(QualType Ty) const {
}
// All other aggregates are passed ByVal.
- return ABIArgInfo::getIndirect(ABIAlign, /*ByVal=*/true,
+ return ABIArgInfo::getIndirect(CharUnits::fromQuantity(ABIAlign),
+ /*ByVal=*/true,
/*Realign=*/TyAlign > ABIAlign);
}
@@ -3821,7 +3971,7 @@ PPC64_SVR4_ABIInfo::classifyReturnType(QualType RetTy) const {
if (RetTy->isVectorType() && !IsQPXVectorTy(RetTy)) {
uint64_t Size = getContext().getTypeSize(RetTy);
if (Size > 128)
- return ABIArgInfo::getIndirect(0);
+ return getNaturalAlignIndirect(RetTy);
else if (Size < 128) {
llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size);
return ABIArgInfo::getDirect(CoerceTy);
@@ -3856,7 +4006,7 @@ PPC64_SVR4_ABIInfo::classifyReturnType(QualType RetTy) const {
}
// All other aggregates are returned indirectly.
- return ABIArgInfo::getIndirect(0);
+ return getNaturalAlignIndirect(RetTy);
}
return (isPromotableTypeForABI(RetTy) ?
@@ -3864,47 +4014,12 @@ PPC64_SVR4_ABIInfo::classifyReturnType(QualType RetTy) const {
}
// Based on ARMABIInfo::EmitVAArg, adjusted for 64-bit machine.
-llvm::Value *PPC64_SVR4_ABIInfo::EmitVAArg(llvm::Value *VAListAddr,
- QualType Ty,
- CodeGenFunction &CGF) const {
- llvm::Type *BP = CGF.Int8PtrTy;
- llvm::Type *BPP = CGF.Int8PtrPtrTy;
-
- CGBuilderTy &Builder = CGF.Builder;
- llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap");
- llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
-
- // Handle types that require 16-byte alignment in the parameter save area.
- bool Align32;
- if (isAlignedParamType(Ty, Align32)) {
- llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int64Ty);
- AddrAsInt = Builder.CreateAdd(AddrAsInt,
- Builder.getInt64(Align32 ? 31 : 15));
- AddrAsInt = Builder.CreateAnd(AddrAsInt,
- Builder.getInt64(Align32 ? -32 : -16));
- Addr = Builder.CreateIntToPtr(AddrAsInt, BP, "ap.align");
- }
-
- // Update the va_list pointer. The pointer should be bumped by the
- // size of the object. We can trust getTypeSize() except for a complex
- // type whose base type is smaller than a doubleword. For these, the
- // size of the object is 16 bytes; see below for further explanation.
- unsigned SizeInBytes = CGF.getContext().getTypeSize(Ty) / 8;
- QualType BaseTy;
- unsigned CplxBaseSize = 0;
-
- if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
- BaseTy = CTy->getElementType();
- CplxBaseSize = CGF.getContext().getTypeSize(BaseTy) / 8;
- if (CplxBaseSize < 8)
- SizeInBytes = 16;
- }
+Address PPC64_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const {
+ auto TypeInfo = getContext().getTypeInfoInChars(Ty);
+ TypeInfo.second = getParamTypeAlignment(Ty);
- unsigned Offset = llvm::RoundUpToAlignment(SizeInBytes, 8);
- llvm::Value *NextAddr =
- Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int64Ty, Offset),
- "ap.next");
- Builder.CreateStore(NextAddr, VAListAddrAsBPP);
+ CharUnits SlotSize = CharUnits::fromQuantity(8);
// If we have a complex type and the base type is smaller than 8 bytes,
// the ABI calls for the real and imaginary parts to be right-adjusted
@@ -3912,44 +4027,40 @@ llvm::Value *PPC64_SVR4_ABIInfo::EmitVAArg(llvm::Value *VAListAddr,
// pointer to a structure with the two parts packed tightly. So generate
// loads of the real and imaginary parts relative to the va_list pointer,
// and store them to a temporary structure.
- if (CplxBaseSize && CplxBaseSize < 8) {
- llvm::Value *RealAddr = Builder.CreatePtrToInt(Addr, CGF.Int64Ty);
- llvm::Value *ImagAddr = RealAddr;
- if (CGF.CGM.getDataLayout().isBigEndian()) {
- RealAddr =
- Builder.CreateAdd(RealAddr, Builder.getInt64(8 - CplxBaseSize));
- ImagAddr =
- Builder.CreateAdd(ImagAddr, Builder.getInt64(16 - CplxBaseSize));
- } else {
- ImagAddr = Builder.CreateAdd(ImagAddr, Builder.getInt64(8));
+ if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
+ CharUnits EltSize = TypeInfo.first / 2;
+ if (EltSize < SlotSize) {
+ Address Addr = emitVoidPtrDirectVAArg(CGF, VAListAddr, CGF.Int8Ty,
+ SlotSize * 2, SlotSize,
+ SlotSize, /*AllowHigher*/ true);
+
+ Address RealAddr = Addr;
+ Address ImagAddr = RealAddr;
+ if (CGF.CGM.getDataLayout().isBigEndian()) {
+ RealAddr = CGF.Builder.CreateConstInBoundsByteGEP(RealAddr,
+ SlotSize - EltSize);
+ ImagAddr = CGF.Builder.CreateConstInBoundsByteGEP(ImagAddr,
+ 2 * SlotSize - EltSize);
+ } else {
+ ImagAddr = CGF.Builder.CreateConstInBoundsByteGEP(RealAddr, SlotSize);
+ }
+
+ llvm::Type *EltTy = CGF.ConvertTypeForMem(CTy->getElementType());
+ RealAddr = CGF.Builder.CreateElementBitCast(RealAddr, EltTy);
+ ImagAddr = CGF.Builder.CreateElementBitCast(ImagAddr, EltTy);
+ llvm::Value *Real = CGF.Builder.CreateLoad(RealAddr, ".vareal");
+ llvm::Value *Imag = CGF.Builder.CreateLoad(ImagAddr, ".vaimag");
+
+ Address Temp = CGF.CreateMemTemp(Ty, "vacplx");
+ CGF.EmitStoreOfComplex({Real, Imag}, CGF.MakeAddrLValue(Temp, Ty),
+ /*init*/ true);
+ return Temp;
}
- llvm::Type *PBaseTy = llvm::PointerType::getUnqual(CGF.ConvertType(BaseTy));
- RealAddr = Builder.CreateIntToPtr(RealAddr, PBaseTy);
- ImagAddr = Builder.CreateIntToPtr(ImagAddr, PBaseTy);
- llvm::Value *Real = Builder.CreateLoad(RealAddr, false, ".vareal");
- llvm::Value *Imag = Builder.CreateLoad(ImagAddr, false, ".vaimag");
- llvm::AllocaInst *Ptr =
- CGF.CreateTempAlloca(CGT.ConvertTypeForMem(Ty), "vacplx");
- llvm::Value *RealPtr =
- Builder.CreateStructGEP(Ptr->getAllocatedType(), Ptr, 0, ".real");
- llvm::Value *ImagPtr =
- Builder.CreateStructGEP(Ptr->getAllocatedType(), Ptr, 1, ".imag");
- Builder.CreateStore(Real, RealPtr, false);
- Builder.CreateStore(Imag, ImagPtr, false);
- return Ptr;
- }
-
- // If the argument is smaller than 8 bytes, it is right-adjusted in
- // its doubleword slot. Adjust the pointer to pick it up from the
- // correct offset.
- if (SizeInBytes < 8 && CGF.CGM.getDataLayout().isBigEndian()) {
- llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int64Ty);
- AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt64(8 - SizeInBytes));
- Addr = Builder.CreateIntToPtr(AddrAsInt, BP);
- }
-
- llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
- return Builder.CreateBitCast(Addr, PTy);
+ }
+
+ // Otherwise, just use the general rule.
+ return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect*/ false,
+ TypeInfo, SlotSize, /*AllowHigher*/ true);
}
static bool
@@ -4047,14 +4158,14 @@ private:
it.info = classifyArgumentType(it.type);
}
- llvm::Value *EmitDarwinVAArg(llvm::Value *VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const;
+ Address EmitDarwinVAArg(Address VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const;
- llvm::Value *EmitAAPCSVAArg(llvm::Value *VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const;
+ Address EmitAAPCSVAArg(Address VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const;
- llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const override {
+ Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const override {
return isDarwinPCS() ? EmitDarwinVAArg(VAListAddr, Ty, CGF)
: EmitAAPCSVAArg(VAListAddr, Ty, CGF);
}
@@ -4097,7 +4208,7 @@ ABIArgInfo AArch64ABIInfo::classifyArgumentType(QualType Ty) const {
llvm::VectorType::get(llvm::Type::getInt32Ty(getVMContext()), 4);
return ABIArgInfo::getDirect(ResType);
}
- return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
+ return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
}
if (!isAggregateTypeForABI(Ty)) {
@@ -4113,8 +4224,8 @@ ABIArgInfo AArch64ABIInfo::classifyArgumentType(QualType Ty) const {
// Structures with either a non-trivial destructor or a non-trivial
// copy constructor are always indirect.
if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
- return ABIArgInfo::getIndirect(0, /*ByVal=*/RAA ==
- CGCXXABI::RAA_DirectInMemory);
+ return getNaturalAlignIndirect(Ty, /*ByVal=*/RAA ==
+ CGCXXABI::RAA_DirectInMemory);
}
// Empty records are always ignored on Darwin, but actually passed in C++ mode
@@ -4149,7 +4260,7 @@ ABIArgInfo AArch64ABIInfo::classifyArgumentType(QualType Ty) const {
return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Size));
}
- return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
+ return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
}
ABIArgInfo AArch64ABIInfo::classifyReturnType(QualType RetTy) const {
@@ -4158,7 +4269,7 @@ ABIArgInfo AArch64ABIInfo::classifyReturnType(QualType RetTy) const {
// Large vector types should be returned via memory.
if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128)
- return ABIArgInfo::getIndirect(0);
+ return getNaturalAlignIndirect(RetTy);
if (!isAggregateTypeForABI(RetTy)) {
// Treat an enum type as its underlying type.
@@ -4194,7 +4305,7 @@ ABIArgInfo AArch64ABIInfo::classifyReturnType(QualType RetTy) const {
return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Size));
}
- return ABIArgInfo::getIndirect(0);
+ return getNaturalAlignIndirect(RetTy);
}
/// isIllegalVectorType - check whether the vector type is legal for AArch64.
@@ -4232,7 +4343,7 @@ bool AArch64ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base,
return Members <= 4;
}
-llvm::Value *AArch64ABIInfo::EmitAAPCSVAArg(llvm::Value *VAListAddr,
+Address AArch64ABIInfo::EmitAAPCSVAArg(Address VAListAddr,
QualType Ty,
CodeGenFunction &CGF) const {
ABIArgInfo AI = classifyArgumentType(Ty);
@@ -4266,24 +4377,32 @@ llvm::Value *AArch64ABIInfo::EmitAAPCSVAArg(llvm::Value *VAListAddr,
llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
llvm::BasicBlock *OnStackBlock = CGF.createBasicBlock("vaarg.on_stack");
llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
- auto &Ctx = CGF.getContext();
- llvm::Value *reg_offs_p = nullptr, *reg_offs = nullptr;
+ auto TyInfo = getContext().getTypeInfoInChars(Ty);
+ CharUnits TyAlign = TyInfo.second;
+
+ Address reg_offs_p = Address::invalid();
+ llvm::Value *reg_offs = nullptr;
int reg_top_index;
- int RegSize = IsIndirect ? 8 : getContext().getTypeSize(Ty) / 8;
+ CharUnits reg_top_offset;
+ int RegSize = IsIndirect ? 8 : TyInfo.first.getQuantity();
if (!IsFPR) {
// 3 is the field number of __gr_offs
reg_offs_p =
- CGF.Builder.CreateStructGEP(nullptr, VAListAddr, 3, "gr_offs_p");
+ CGF.Builder.CreateStructGEP(VAListAddr, 3, CharUnits::fromQuantity(24),
+ "gr_offs_p");
reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "gr_offs");
reg_top_index = 1; // field number for __gr_top
+ reg_top_offset = CharUnits::fromQuantity(8);
RegSize = llvm::RoundUpToAlignment(RegSize, 8);
} else {
// 4 is the field number of __vr_offs.
reg_offs_p =
- CGF.Builder.CreateStructGEP(nullptr, VAListAddr, 4, "vr_offs_p");
+ CGF.Builder.CreateStructGEP(VAListAddr, 4, CharUnits::fromQuantity(28),
+ "vr_offs_p");
reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "vr_offs");
reg_top_index = 2; // field number for __vr_top
+ reg_top_offset = CharUnits::fromQuantity(16);
RegSize = 16 * NumRegs;
}
@@ -4308,8 +4427,8 @@ llvm::Value *AArch64ABIInfo::EmitAAPCSVAArg(llvm::Value *VAListAddr,
// Integer arguments may need to correct register alignment (for example a
// "struct { __int128 a; };" gets passed in x_2N, x_{2N+1}). In this case we
// align __gr_offs to calculate the potential address.
- if (!IsFPR && !IsIndirect && Ctx.getTypeAlign(Ty) > 64) {
- int Align = Ctx.getTypeAlign(Ty) / 8;
+ if (!IsFPR && !IsIndirect && TyAlign.getQuantity() > 8) {
+ int Align = TyAlign.getQuantity();
reg_offs = CGF.Builder.CreateAdd(
reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, Align - 1),
@@ -4320,6 +4439,9 @@ llvm::Value *AArch64ABIInfo::EmitAAPCSVAArg(llvm::Value *VAListAddr,
}
// Update the gr_offs/vr_offs pointer for next call to va_arg on this va_list.
+ // The fact that this is done unconditionally reflects the fact that
+ // allocating an argument to the stack also uses up all the remaining
+ // registers of the appropriate kind.
llvm::Value *NewOffset = nullptr;
NewOffset = CGF.Builder.CreateAdd(
reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, RegSize), "new_reg_offs");
@@ -4341,13 +4463,14 @@ llvm::Value *AArch64ABIInfo::EmitAAPCSVAArg(llvm::Value *VAListAddr,
// registers. First start the appropriate block:
CGF.EmitBlock(InRegBlock);
- llvm::Value *reg_top_p = nullptr, *reg_top = nullptr;
- reg_top_p = CGF.Builder.CreateStructGEP(nullptr, VAListAddr, reg_top_index,
- "reg_top_p");
+ llvm::Value *reg_top = nullptr;
+ Address reg_top_p = CGF.Builder.CreateStructGEP(VAListAddr, reg_top_index,
+ reg_top_offset, "reg_top_p");
reg_top = CGF.Builder.CreateLoad(reg_top_p, "reg_top");
- llvm::Value *BaseAddr = CGF.Builder.CreateGEP(reg_top, reg_offs);
- llvm::Value *RegAddr = nullptr;
- llvm::Type *MemTy = llvm::PointerType::getUnqual(CGF.ConvertTypeForMem(Ty));
+ Address BaseAddr(CGF.Builder.CreateInBoundsGEP(reg_top, reg_offs),
+ CharUnits::fromQuantity(IsFPR ? 16 : 8));
+ Address RegAddr = Address::invalid();
+ llvm::Type *MemTy = CGF.ConvertTypeForMem(Ty);
if (IsIndirect) {
// If it's been passed indirectly (actually a struct), whatever we find from
@@ -4364,43 +4487,45 @@ llvm::Value *AArch64ABIInfo::EmitAAPCSVAArg(llvm::Value *VAListAddr,
// qN+1, ...). We reload and store into a temporary local variable
// contiguously.
assert(!IsIndirect && "Homogeneous aggregates should be passed directly");
+ auto BaseTyInfo = getContext().getTypeInfoInChars(QualType(Base, 0));
llvm::Type *BaseTy = CGF.ConvertType(QualType(Base, 0));
llvm::Type *HFATy = llvm::ArrayType::get(BaseTy, NumMembers);
- llvm::AllocaInst *Tmp = CGF.CreateTempAlloca(HFATy);
+ Address Tmp = CGF.CreateTempAlloca(HFATy,
+ std::max(TyAlign, BaseTyInfo.second));
+
+ // On big-endian platforms, the value will be right-aligned in its slot.
int Offset = 0;
+ if (CGF.CGM.getDataLayout().isBigEndian() &&
+ BaseTyInfo.first.getQuantity() < 16)
+ Offset = 16 - BaseTyInfo.first.getQuantity();
- if (CGF.CGM.getDataLayout().isBigEndian() && Ctx.getTypeSize(Base) < 128)
- Offset = 16 - Ctx.getTypeSize(Base) / 8;
for (unsigned i = 0; i < NumMembers; ++i) {
- llvm::Value *BaseOffset =
- llvm::ConstantInt::get(CGF.Int32Ty, 16 * i + Offset);
- llvm::Value *LoadAddr = CGF.Builder.CreateGEP(BaseAddr, BaseOffset);
- LoadAddr = CGF.Builder.CreateBitCast(
- LoadAddr, llvm::PointerType::getUnqual(BaseTy));
- llvm::Value *StoreAddr =
- CGF.Builder.CreateStructGEP(Tmp->getAllocatedType(), Tmp, i);
+ CharUnits BaseOffset = CharUnits::fromQuantity(16 * i + Offset);
+ Address LoadAddr =
+ CGF.Builder.CreateConstInBoundsByteGEP(BaseAddr, BaseOffset);
+ LoadAddr = CGF.Builder.CreateElementBitCast(LoadAddr, BaseTy);
+
+ Address StoreAddr =
+ CGF.Builder.CreateConstArrayGEP(Tmp, i, BaseTyInfo.first);
llvm::Value *Elem = CGF.Builder.CreateLoad(LoadAddr);
CGF.Builder.CreateStore(Elem, StoreAddr);
}
- RegAddr = CGF.Builder.CreateBitCast(Tmp, MemTy);
+ RegAddr = CGF.Builder.CreateElementBitCast(Tmp, MemTy);
} else {
- // Otherwise the object is contiguous in memory
- unsigned BeAlign = reg_top_index == 2 ? 16 : 8;
- if (CGF.CGM.getDataLayout().isBigEndian() &&
- (IsHFA || !isAggregateTypeForABI(Ty)) &&
- Ctx.getTypeSize(Ty) < (BeAlign * 8)) {
- int Offset = BeAlign - Ctx.getTypeSize(Ty) / 8;
- BaseAddr = CGF.Builder.CreatePtrToInt(BaseAddr, CGF.Int64Ty);
-
- BaseAddr = CGF.Builder.CreateAdd(
- BaseAddr, llvm::ConstantInt::get(CGF.Int64Ty, Offset), "align_be");
+ // Otherwise the object is contiguous in memory.
- BaseAddr = CGF.Builder.CreateIntToPtr(BaseAddr, CGF.Int8PtrTy);
+ // It might be right-aligned in its slot.
+ CharUnits SlotSize = BaseAddr.getAlignment();
+ if (CGF.CGM.getDataLayout().isBigEndian() && !IsIndirect &&
+ (IsHFA || !isAggregateTypeForABI(Ty)) &&
+ TyInfo.first < SlotSize) {
+ CharUnits Offset = SlotSize - TyInfo.first;
+ BaseAddr = CGF.Builder.CreateConstInBoundsByteGEP(BaseAddr, Offset);
}
- RegAddr = CGF.Builder.CreateBitCast(BaseAddr, MemTy);
+ RegAddr = CGF.Builder.CreateElementBitCast(BaseAddr, MemTy);
}
CGF.EmitBranch(ContBlock);
@@ -4410,55 +4535,51 @@ llvm::Value *AArch64ABIInfo::EmitAAPCSVAArg(llvm::Value *VAListAddr,
//=======================================
CGF.EmitBlock(OnStackBlock);
- llvm::Value *stack_p = nullptr, *OnStackAddr = nullptr;
- stack_p = CGF.Builder.CreateStructGEP(nullptr, VAListAddr, 0, "stack_p");
- OnStackAddr = CGF.Builder.CreateLoad(stack_p, "stack");
+ Address stack_p = CGF.Builder.CreateStructGEP(VAListAddr, 0,
+ CharUnits::Zero(), "stack_p");
+ llvm::Value *OnStackPtr = CGF.Builder.CreateLoad(stack_p, "stack");
- // Again, stack arguments may need realigmnent. In this case both integer and
+ // Again, stack arguments may need realignment. In this case both integer and
// floating-point ones might be affected.
- if (!IsIndirect && Ctx.getTypeAlign(Ty) > 64) {
- int Align = Ctx.getTypeAlign(Ty) / 8;
+ if (!IsIndirect && TyAlign.getQuantity() > 8) {
+ int Align = TyAlign.getQuantity();
- OnStackAddr = CGF.Builder.CreatePtrToInt(OnStackAddr, CGF.Int64Ty);
+ OnStackPtr = CGF.Builder.CreatePtrToInt(OnStackPtr, CGF.Int64Ty);
- OnStackAddr = CGF.Builder.CreateAdd(
- OnStackAddr, llvm::ConstantInt::get(CGF.Int64Ty, Align - 1),
+ OnStackPtr = CGF.Builder.CreateAdd(
+ OnStackPtr, llvm::ConstantInt::get(CGF.Int64Ty, Align - 1),
"align_stack");
- OnStackAddr = CGF.Builder.CreateAnd(
- OnStackAddr, llvm::ConstantInt::get(CGF.Int64Ty, -Align),
+ OnStackPtr = CGF.Builder.CreateAnd(
+ OnStackPtr, llvm::ConstantInt::get(CGF.Int64Ty, -Align),
"align_stack");
- OnStackAddr = CGF.Builder.CreateIntToPtr(OnStackAddr, CGF.Int8PtrTy);
+ OnStackPtr = CGF.Builder.CreateIntToPtr(OnStackPtr, CGF.Int8PtrTy);
}
+ Address OnStackAddr(OnStackPtr,
+ std::max(CharUnits::fromQuantity(8), TyAlign));
- uint64_t StackSize;
+ // All stack slots are multiples of 8 bytes.
+ CharUnits StackSlotSize = CharUnits::fromQuantity(8);
+ CharUnits StackSize;
if (IsIndirect)
- StackSize = 8;
+ StackSize = StackSlotSize;
else
- StackSize = Ctx.getTypeSize(Ty) / 8;
+ StackSize = TyInfo.first.RoundUpToAlignment(StackSlotSize);
- // All stack slots are 8 bytes
- StackSize = llvm::RoundUpToAlignment(StackSize, 8);
-
- llvm::Value *StackSizeC = llvm::ConstantInt::get(CGF.Int32Ty, StackSize);
+ llvm::Value *StackSizeC = CGF.Builder.getSize(StackSize);
llvm::Value *NewStack =
- CGF.Builder.CreateGEP(OnStackAddr, StackSizeC, "new_stack");
+ CGF.Builder.CreateInBoundsGEP(OnStackPtr, StackSizeC, "new_stack");
// Write the new value of __stack for the next call to va_arg
CGF.Builder.CreateStore(NewStack, stack_p);
if (CGF.CGM.getDataLayout().isBigEndian() && !isAggregateTypeForABI(Ty) &&
- Ctx.getTypeSize(Ty) < 64) {
- int Offset = 8 - Ctx.getTypeSize(Ty) / 8;
- OnStackAddr = CGF.Builder.CreatePtrToInt(OnStackAddr, CGF.Int64Ty);
-
- OnStackAddr = CGF.Builder.CreateAdd(
- OnStackAddr, llvm::ConstantInt::get(CGF.Int64Ty, Offset), "align_be");
-
- OnStackAddr = CGF.Builder.CreateIntToPtr(OnStackAddr, CGF.Int8PtrTy);
+ TyInfo.first < StackSlotSize) {
+ CharUnits Offset = StackSlotSize - TyInfo.first;
+ OnStackAddr = CGF.Builder.CreateConstInBoundsByteGEP(OnStackAddr, Offset);
}
- OnStackAddr = CGF.Builder.CreateBitCast(OnStackAddr, MemTy);
+ OnStackAddr = CGF.Builder.CreateElementBitCast(OnStackAddr, MemTy);
CGF.EmitBranch(ContBlock);
@@ -4467,75 +4588,48 @@ llvm::Value *AArch64ABIInfo::EmitAAPCSVAArg(llvm::Value *VAListAddr,
//=======================================
CGF.EmitBlock(ContBlock);
- llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(MemTy, 2, "vaarg.addr");
- ResAddr->addIncoming(RegAddr, InRegBlock);
- ResAddr->addIncoming(OnStackAddr, OnStackBlock);
+ Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock,
+ OnStackAddr, OnStackBlock, "vaargs.addr");
if (IsIndirect)
- return CGF.Builder.CreateLoad(ResAddr, "vaarg.addr");
+ return Address(CGF.Builder.CreateLoad(ResAddr, "vaarg.addr"),
+ TyInfo.second);
return ResAddr;
}
-llvm::Value *AArch64ABIInfo::EmitDarwinVAArg(llvm::Value *VAListAddr,
- QualType Ty,
- CodeGenFunction &CGF) const {
- // We do not support va_arg for aggregates or illegal vector types.
- // Lower VAArg here for these cases and use the LLVM va_arg instruction for
- // other cases.
+Address AArch64ABIInfo::EmitDarwinVAArg(Address VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const {
+ // The backend's lowering doesn't support va_arg for aggregates or
+ // illegal vector types. Lower VAArg here for these cases and use
+ // the LLVM va_arg instruction for everything else.
if (!isAggregateTypeForABI(Ty) && !isIllegalVectorType(Ty))
- return nullptr;
-
- uint64_t Size = CGF.getContext().getTypeSize(Ty) / 8;
- uint64_t Align = CGF.getContext().getTypeAlign(Ty) / 8;
-
- const Type *Base = nullptr;
- uint64_t Members = 0;
- bool isHA = isHomogeneousAggregate(Ty, Base, Members);
-
- bool isIndirect = false;
- // Arguments bigger than 16 bytes which aren't homogeneous aggregates should
- // be passed indirectly.
- if (Size > 16 && !isHA) {
- isIndirect = true;
- Size = 8;
- Align = 8;
- }
+ return Address::invalid();
- llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
- llvm::Type *BPP = llvm::PointerType::getUnqual(BP);
-
- CGBuilderTy &Builder = CGF.Builder;
- llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap");
- llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
+ CharUnits SlotSize = CharUnits::fromQuantity(8);
+ // Empty records are ignored for parameter passing purposes.
if (isEmptyRecord(getContext(), Ty, true)) {
- // These are ignored for parameter passing purposes.
- llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
- return Builder.CreateBitCast(Addr, PTy);
+ Address Addr(CGF.Builder.CreateLoad(VAListAddr, "ap.cur"), SlotSize);
+ Addr = CGF.Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(Ty));
+ return Addr;
}
- const uint64_t MinABIAlign = 8;
- if (Align > MinABIAlign) {
- llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, Align - 1);
- Addr = Builder.CreateGEP(Addr, Offset);
- llvm::Value *AsInt = Builder.CreatePtrToInt(Addr, CGF.Int64Ty);
- llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int64Ty, ~(Align - 1));
- llvm::Value *Aligned = Builder.CreateAnd(AsInt, Mask);
- Addr = Builder.CreateIntToPtr(Aligned, BP, "ap.align");
- }
-
- uint64_t Offset = llvm::RoundUpToAlignment(Size, MinABIAlign);
- llvm::Value *NextAddr = Builder.CreateGEP(
- Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), "ap.next");
- Builder.CreateStore(NextAddr, VAListAddrAsBPP);
+ // The size of the actual thing passed, which might end up just
+ // being a pointer for indirect types.
+ auto TyInfo = getContext().getTypeInfoInChars(Ty);
- if (isIndirect)
- Addr = Builder.CreateLoad(Builder.CreateBitCast(Addr, BPP));
- llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
- llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
+ // Arguments bigger than 16 bytes which aren't homogeneous
+ // aggregates should be passed indirectly.
+ bool IsIndirect = false;
+ if (TyInfo.first.getQuantity() > 16) {
+ const Type *Base = nullptr;
+ uint64_t Members = 0;
+ IsIndirect = !isHomogeneousAggregate(Ty, Base, Members);
+ }
- return AddrTyped;
+ return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect,
+ TyInfo, SlotSize, /*AllowHigherAlign*/ true);
}
//===----------------------------------------------------------------------===//
@@ -4596,8 +4690,8 @@ private:
void computeInfo(CGFunctionInfo &FI) const override;
- llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const override;
+ Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const override;
llvm::CallingConv::ID getLLVMDefaultCC() const;
llvm::CallingConv::ID getABIDefaultCC() const;
@@ -4788,7 +4882,7 @@ ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty,
llvm::Type::getInt32Ty(getVMContext()), 4);
return ABIArgInfo::getDirect(ResType);
}
- return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
+ return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
}
// __fp16 gets passed as if it were an int or float, but with the top 16 bits
@@ -4812,7 +4906,7 @@ ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty,
}
if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
- return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
+ return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
}
// Ignore empty records.
@@ -4842,8 +4936,9 @@ ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty,
ABIAlign = std::min(std::max(TyAlign, (uint64_t)4), (uint64_t)8);
if (getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(64)) {
- return ABIArgInfo::getIndirect(ABIAlign, /*ByVal=*/true,
- /*Realign=*/TyAlign > ABIAlign);
+ return ABIArgInfo::getIndirect(CharUnits::fromQuantity(ABIAlign),
+ /*ByVal=*/true,
+ /*Realign=*/TyAlign > ABIAlign);
}
// Otherwise, pass by coercing to a structure of the appropriate size.
@@ -4956,7 +5051,7 @@ ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy,
// Large vector types should be returned via memory.
if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128) {
- return ABIArgInfo::getIndirect(0);
+ return getNaturalAlignIndirect(RetTy);
}
// __fp16 gets returned as if it were an int or float, but with the top 16
@@ -5003,7 +5098,7 @@ ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy,
}
// Otherwise return in memory.
- return ABIArgInfo::getIndirect(0);
+ return getNaturalAlignIndirect(RetTy);
}
// Otherwise this is an AAPCS variant.
@@ -5038,7 +5133,7 @@ ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy,
return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
}
- return ABIArgInfo::getIndirect(0);
+ return getNaturalAlignIndirect(RetTy);
}
/// isIllegalVector - check whether Ty is an illegal vector type.
@@ -5077,80 +5172,40 @@ bool ARMABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base,
return Members <= 4;
}
-llvm::Value *ARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const {
- llvm::Type *BP = CGF.Int8PtrTy;
- llvm::Type *BPP = CGF.Int8PtrPtrTy;
-
- CGBuilderTy &Builder = CGF.Builder;
- llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap");
- llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
+Address ARMABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const {
+ CharUnits SlotSize = CharUnits::fromQuantity(4);
+ // Empty records are ignored for parameter passing purposes.
if (isEmptyRecord(getContext(), Ty, true)) {
- // These are ignored for parameter passing purposes.
- llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
- return Builder.CreateBitCast(Addr, PTy);
+ Address Addr(CGF.Builder.CreateLoad(VAListAddr), SlotSize);
+ Addr = CGF.Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(Ty));
+ return Addr;
}
- uint64_t Size = CGF.getContext().getTypeSize(Ty) / 8;
- uint64_t TyAlign = CGF.getContext().getTypeAlign(Ty) / 8;
- bool IsIndirect = false;
+ auto TyInfo = getContext().getTypeInfoInChars(Ty);
+ CharUnits TyAlignForABI = TyInfo.second;
- // The ABI alignment for 64-bit or 128-bit vectors is 8 for AAPCS and 4 for
- // APCS. For AAPCS, the ABI alignment is at least 4-byte and at most 8-byte.
- if (getABIKind() == ARMABIInfo::AAPCS_VFP ||
- getABIKind() == ARMABIInfo::AAPCS)
- TyAlign = std::min(std::max(TyAlign, (uint64_t)4), (uint64_t)8);
- else
- TyAlign = 4;
// Use indirect if size of the illegal vector is bigger than 16 bytes.
- if (isIllegalVectorType(Ty) && Size > 16) {
+ bool IsIndirect = false;
+ if (TyInfo.first > CharUnits::fromQuantity(16) && isIllegalVectorType(Ty)) {
IsIndirect = true;
- Size = 4;
- TyAlign = 4;
- }
- // Handle address alignment for ABI alignment > 4 bytes.
- if (TyAlign > 4) {
- assert((TyAlign & (TyAlign - 1)) == 0 &&
- "Alignment is not power of 2!");
- llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int32Ty);
- AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt32(TyAlign - 1));
- AddrAsInt = Builder.CreateAnd(AddrAsInt, Builder.getInt32(~(TyAlign - 1)));
- Addr = Builder.CreateIntToPtr(AddrAsInt, BP, "ap.align");
+ // Otherwise, bound the type's ABI alignment.
+ // The ABI alignment for 64-bit or 128-bit vectors is 8 for AAPCS and 4 for
+ // APCS. For AAPCS, the ABI alignment is at least 4-byte and at most 8-byte.
+ // Our callers should be prepared to handle an under-aligned address.
+ } else if (getABIKind() == ARMABIInfo::AAPCS_VFP ||
+ getABIKind() == ARMABIInfo::AAPCS) {
+ TyAlignForABI = std::max(TyAlignForABI, CharUnits::fromQuantity(4));
+ TyAlignForABI = std::min(TyAlignForABI, CharUnits::fromQuantity(8));
+ } else {
+ TyAlignForABI = CharUnits::fromQuantity(4);
}
+ TyInfo.second = TyAlignForABI;
- uint64_t Offset =
- llvm::RoundUpToAlignment(Size, 4);
- llvm::Value *NextAddr =
- Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
- "ap.next");
- Builder.CreateStore(NextAddr, VAListAddrAsBPP);
-
- if (IsIndirect)
- Addr = Builder.CreateLoad(Builder.CreateBitCast(Addr, BPP));
- else if (TyAlign < CGF.getContext().getTypeAlign(Ty) / 8) {
- // We can't directly cast ap.cur to pointer to a vector type, since ap.cur
- // may not be correctly aligned for the vector type. We create an aligned
- // temporary space and copy the content over from ap.cur to the temporary
- // space. This is necessary if the natural alignment of the type is greater
- // than the ABI alignment.
- llvm::Type *I8PtrTy = Builder.getInt8PtrTy();
- CharUnits CharSize = getContext().getTypeSizeInChars(Ty);
- llvm::Value *AlignedTemp = CGF.CreateTempAlloca(CGF.ConvertType(Ty),
- "var.align");
- llvm::Value *Dst = Builder.CreateBitCast(AlignedTemp, I8PtrTy);
- llvm::Value *Src = Builder.CreateBitCast(Addr, I8PtrTy);
- Builder.CreateMemCpy(Dst, Src,
- llvm::ConstantInt::get(CGF.IntPtrTy, CharSize.getQuantity()),
- TyAlign, false);
- Addr = AlignedTemp; //The content is in aligned location.
- }
- llvm::Type *PTy =
- llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
- llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
-
- return AddrTyped;
+ return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, TyInfo,
+ SlotSize, /*AllowHigherAlign*/ true);
}
//===----------------------------------------------------------------------===//
@@ -5167,8 +5222,8 @@ public:
ABIArgInfo classifyArgumentType(QualType Ty) const;
void computeInfo(CGFunctionInfo &FI) const override;
- llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
- CodeGenFunction &CFG) const override;
+ Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const override;
};
class NVPTXTargetCodeGenInfo : public TargetCodeGenInfo {
@@ -5207,7 +5262,7 @@ ABIArgInfo NVPTXABIInfo::classifyArgumentType(QualType Ty) const {
// Return aggregates type as indirect by value
if (isAggregateTypeForABI(Ty))
- return ABIArgInfo::getIndirect(0, /* byval */ true);
+ return getNaturalAlignIndirect(Ty, /* byval */ true);
return (Ty->isPromotableIntegerType() ?
ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
@@ -5226,8 +5281,8 @@ void NVPTXABIInfo::computeInfo(CGFunctionInfo &FI) const {
FI.setEffectiveCallingConvention(getRuntimeCC());
}
-llvm::Value *NVPTXABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
- CodeGenFunction &CFG) const {
+Address NVPTXABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const {
llvm_unreachable("NVPTX does not support varargs");
}
@@ -5328,8 +5383,8 @@ public:
I.info = classifyArgumentType(I.type);
}
- llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const override;
+ Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const override;
};
class SystemZTargetCodeGenInfo : public TargetCodeGenInfo {
@@ -5430,8 +5485,8 @@ QualType SystemZABIInfo::GetSingleElementType(QualType Ty) const {
return Ty;
}
-llvm::Value *SystemZABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const {
+Address SystemZABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const {
// Assume that va_list type is correct; should be pointer to LLVM type:
// struct {
// i64 __gpr;
@@ -5443,59 +5498,69 @@ llvm::Value *SystemZABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
// Every non-vector argument occupies 8 bytes and is passed by preference
// in either GPRs or FPRs. Vector arguments occupy 8 or 16 bytes and are
// always passed on the stack.
- Ty = CGF.getContext().getCanonicalType(Ty);
+ Ty = getContext().getCanonicalType(Ty);
+ auto TyInfo = getContext().getTypeInfoInChars(Ty);
llvm::Type *ArgTy = CGF.ConvertTypeForMem(Ty);
- llvm::Type *APTy = llvm::PointerType::getUnqual(ArgTy);
+ llvm::Type *DirectTy = ArgTy;
ABIArgInfo AI = classifyArgumentType(Ty);
bool IsIndirect = AI.isIndirect();
bool InFPRs = false;
bool IsVector = false;
- unsigned UnpaddedBitSize;
+ CharUnits UnpaddedSize;
+ CharUnits DirectAlign;
if (IsIndirect) {
- APTy = llvm::PointerType::getUnqual(APTy);
- UnpaddedBitSize = 64;
+ DirectTy = llvm::PointerType::getUnqual(DirectTy);
+ UnpaddedSize = DirectAlign = CharUnits::fromQuantity(8);
} else {
if (AI.getCoerceToType())
ArgTy = AI.getCoerceToType();
InFPRs = ArgTy->isFloatTy() || ArgTy->isDoubleTy();
IsVector = ArgTy->isVectorTy();
- UnpaddedBitSize = getContext().getTypeSize(Ty);
+ UnpaddedSize = TyInfo.first;
+ DirectAlign = TyInfo.second;
}
- unsigned PaddedBitSize = (IsVector && UnpaddedBitSize > 64) ? 128 : 64;
- assert((UnpaddedBitSize <= PaddedBitSize) && "Invalid argument size.");
+ CharUnits PaddedSize = CharUnits::fromQuantity(8);
+ if (IsVector && UnpaddedSize > PaddedSize)
+ PaddedSize = CharUnits::fromQuantity(16);
+ assert((UnpaddedSize <= PaddedSize) && "Invalid argument size.");
- unsigned PaddedSize = PaddedBitSize / 8;
- unsigned Padding = (PaddedBitSize - UnpaddedBitSize) / 8;
+ CharUnits Padding = (PaddedSize - UnpaddedSize);
llvm::Type *IndexTy = CGF.Int64Ty;
- llvm::Value *PaddedSizeV = llvm::ConstantInt::get(IndexTy, PaddedSize);
+ llvm::Value *PaddedSizeV =
+ llvm::ConstantInt::get(IndexTy, PaddedSize.getQuantity());
if (IsVector) {
// Work out the address of a vector argument on the stack.
// Vector arguments are always passed in the high bits of a
// single (8 byte) or double (16 byte) stack slot.
- llvm::Value *OverflowArgAreaPtr =
- CGF.Builder.CreateStructGEP(nullptr, VAListAddr, 2,
+ Address OverflowArgAreaPtr =
+ CGF.Builder.CreateStructGEP(VAListAddr, 2, CharUnits::fromQuantity(16),
"overflow_arg_area_ptr");
- llvm::Value *OverflowArgArea =
- CGF.Builder.CreateLoad(OverflowArgAreaPtr, "overflow_arg_area");
- llvm::Value *MemAddr =
- CGF.Builder.CreateBitCast(OverflowArgArea, APTy, "mem_addr");
+ Address OverflowArgArea =
+ Address(CGF.Builder.CreateLoad(OverflowArgAreaPtr, "overflow_arg_area"),
+ TyInfo.second);
+ Address MemAddr =
+ CGF.Builder.CreateElementBitCast(OverflowArgArea, DirectTy, "mem_addr");
// Update overflow_arg_area_ptr pointer
llvm::Value *NewOverflowArgArea =
- CGF.Builder.CreateGEP(OverflowArgArea, PaddedSizeV, "overflow_arg_area");
+ CGF.Builder.CreateGEP(OverflowArgArea.getPointer(), PaddedSizeV,
+ "overflow_arg_area");
CGF.Builder.CreateStore(NewOverflowArgArea, OverflowArgAreaPtr);
return MemAddr;
}
- unsigned MaxRegs, RegCountField, RegSaveIndex, RegPadding;
+ assert(PaddedSize.getQuantity() == 8);
+
+ unsigned MaxRegs, RegCountField, RegSaveIndex;
+ CharUnits RegPadding;
if (InFPRs) {
MaxRegs = 4; // Maximum of 4 FPR arguments
RegCountField = 1; // __fpr
RegSaveIndex = 16; // save offset for f0
- RegPadding = 0; // floats are passed in the high bits of an FPR
+ RegPadding = CharUnits(); // floats are passed in the high bits of an FPR
} else {
MaxRegs = 5; // Maximum of 5 GPR arguments
RegCountField = 0; // __gpr
@@ -5503,8 +5568,9 @@ llvm::Value *SystemZABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
RegPadding = Padding; // values are passed in the low bits of a GPR
}
- llvm::Value *RegCountPtr = CGF.Builder.CreateStructGEP(
- nullptr, VAListAddr, RegCountField, "reg_count_ptr");
+ Address RegCountPtr = CGF.Builder.CreateStructGEP(
+ VAListAddr, RegCountField, RegCountField * CharUnits::fromQuantity(8),
+ "reg_count_ptr");
llvm::Value *RegCount = CGF.Builder.CreateLoad(RegCountPtr, "reg_count");
llvm::Value *MaxRegsV = llvm::ConstantInt::get(IndexTy, MaxRegs);
llvm::Value *InRegs = CGF.Builder.CreateICmpULT(RegCount, MaxRegsV,
@@ -5522,17 +5588,20 @@ llvm::Value *SystemZABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
llvm::Value *ScaledRegCount =
CGF.Builder.CreateMul(RegCount, PaddedSizeV, "scaled_reg_count");
llvm::Value *RegBase =
- llvm::ConstantInt::get(IndexTy, RegSaveIndex * PaddedSize + RegPadding);
+ llvm::ConstantInt::get(IndexTy, RegSaveIndex * PaddedSize.getQuantity()
+ + RegPadding.getQuantity());
llvm::Value *RegOffset =
CGF.Builder.CreateAdd(ScaledRegCount, RegBase, "reg_offset");
- llvm::Value *RegSaveAreaPtr =
- CGF.Builder.CreateStructGEP(nullptr, VAListAddr, 3, "reg_save_area_ptr");
+ Address RegSaveAreaPtr =
+ CGF.Builder.CreateStructGEP(VAListAddr, 3, CharUnits::fromQuantity(24),
+ "reg_save_area_ptr");
llvm::Value *RegSaveArea =
CGF.Builder.CreateLoad(RegSaveAreaPtr, "reg_save_area");
- llvm::Value *RawRegAddr =
- CGF.Builder.CreateGEP(RegSaveArea, RegOffset, "raw_reg_addr");
- llvm::Value *RegAddr =
- CGF.Builder.CreateBitCast(RawRegAddr, APTy, "reg_addr");
+ Address RawRegAddr(CGF.Builder.CreateGEP(RegSaveArea, RegOffset,
+ "raw_reg_addr"),
+ PaddedSize);
+ Address RegAddr =
+ CGF.Builder.CreateElementBitCast(RawRegAddr, DirectTy, "reg_addr");
// Update the register count
llvm::Value *One = llvm::ConstantInt::get(IndexTy, 1);
@@ -5545,30 +5614,31 @@ llvm::Value *SystemZABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CGF.EmitBlock(InMemBlock);
// Work out the address of a stack argument.
- llvm::Value *OverflowArgAreaPtr = CGF.Builder.CreateStructGEP(
- nullptr, VAListAddr, 2, "overflow_arg_area_ptr");
- llvm::Value *OverflowArgArea =
- CGF.Builder.CreateLoad(OverflowArgAreaPtr, "overflow_arg_area");
- llvm::Value *PaddingV = llvm::ConstantInt::get(IndexTy, Padding);
- llvm::Value *RawMemAddr =
- CGF.Builder.CreateGEP(OverflowArgArea, PaddingV, "raw_mem_addr");
- llvm::Value *MemAddr =
- CGF.Builder.CreateBitCast(RawMemAddr, APTy, "mem_addr");
+ Address OverflowArgAreaPtr = CGF.Builder.CreateStructGEP(
+ VAListAddr, 2, CharUnits::fromQuantity(16), "overflow_arg_area_ptr");
+ Address OverflowArgArea =
+ Address(CGF.Builder.CreateLoad(OverflowArgAreaPtr, "overflow_arg_area"),
+ PaddedSize);
+ Address RawMemAddr =
+ CGF.Builder.CreateConstByteGEP(OverflowArgArea, Padding, "raw_mem_addr");
+ Address MemAddr =
+ CGF.Builder.CreateElementBitCast(RawMemAddr, DirectTy, "mem_addr");
// Update overflow_arg_area_ptr pointer
llvm::Value *NewOverflowArgArea =
- CGF.Builder.CreateGEP(OverflowArgArea, PaddedSizeV, "overflow_arg_area");
+ CGF.Builder.CreateGEP(OverflowArgArea.getPointer(), PaddedSizeV,
+ "overflow_arg_area");
CGF.Builder.CreateStore(NewOverflowArgArea, OverflowArgAreaPtr);
CGF.EmitBranch(ContBlock);
// Return the appropriate result.
CGF.EmitBlock(ContBlock);
- llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(APTy, 2, "va_arg.addr");
- ResAddr->addIncoming(RegAddr, InRegBlock);
- ResAddr->addIncoming(MemAddr, InMemBlock);
+ Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock,
+ MemAddr, InMemBlock, "va_arg.addr");
if (IsIndirect)
- return CGF.Builder.CreateLoad(ResAddr, "indirect_arg");
+ ResAddr = Address(CGF.Builder.CreateLoad(ResAddr, "indirect_arg"),
+ TyInfo.second);
return ResAddr;
}
@@ -5579,7 +5649,7 @@ ABIArgInfo SystemZABIInfo::classifyReturnType(QualType RetTy) const {
if (isVectorArgumentType(RetTy))
return ABIArgInfo::getDirect();
if (isCompoundType(RetTy) || getContext().getTypeSize(RetTy) > 64)
- return ABIArgInfo::getIndirect(0);
+ return getNaturalAlignIndirect(RetTy);
return (isPromotableIntegerType(RetTy) ?
ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
}
@@ -5587,7 +5657,7 @@ ABIArgInfo SystemZABIInfo::classifyReturnType(QualType RetTy) const {
ABIArgInfo SystemZABIInfo::classifyArgumentType(QualType Ty) const {
// Handle the generic C++ ABI.
if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
- return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
+ return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
// Integers and enums are extended to full register width.
if (isPromotableIntegerType(Ty))
@@ -5604,7 +5674,7 @@ ABIArgInfo SystemZABIInfo::classifyArgumentType(QualType Ty) const {
// Values that are not 1, 2, 4 or 8 bytes in size are passed indirectly.
if (Size != 8 && Size != 16 && Size != 32 && Size != 64)
- return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
+ return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
// Handle small structures.
if (const RecordType *RT = Ty->getAs<RecordType>()) {
@@ -5612,7 +5682,7 @@ ABIArgInfo SystemZABIInfo::classifyArgumentType(QualType Ty) const {
// fail the size test above.
const RecordDecl *RD = RT->getDecl();
if (RD->hasFlexibleArrayMember())
- return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
+ return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
// The structure is passed as an unextended integer, a float, or a double.
llvm::Type *PassTy;
@@ -5629,7 +5699,7 @@ ABIArgInfo SystemZABIInfo::classifyArgumentType(QualType Ty) const {
// Non-structure compounds are passed indirectly.
if (isCompoundType(Ty))
- return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
+ return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
return ABIArgInfo::getDirect(nullptr);
}
@@ -5694,8 +5764,8 @@ public:
ABIArgInfo classifyReturnType(QualType RetTy) const;
ABIArgInfo classifyArgumentType(QualType RetTy, uint64_t &Offset) const;
void computeInfo(CGFunctionInfo &FI) const override;
- llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const override;
+ Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const override;
bool shouldSignExtUnsignedType(QualType Ty) const override;
};
@@ -5834,7 +5904,7 @@ MipsABIInfo::classifyArgumentType(QualType Ty, uint64_t &Offset) const {
if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
Offset = OrigOffset + MinABIStackAlignInBytes;
- return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
+ return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
}
// If we have reached here, aggregates are passed directly by coercing to
@@ -5928,7 +5998,7 @@ ABIArgInfo MipsABIInfo::classifyReturnType(QualType RetTy) const {
}
}
- return ABIArgInfo::getIndirect(0);
+ return getNaturalAlignIndirect(RetTy);
}
// Treat an enum type as its underlying type.
@@ -5951,52 +6021,55 @@ void MipsABIInfo::computeInfo(CGFunctionInfo &FI) const {
I.info = classifyArgumentType(I.type, Offset);
}
-llvm::Value* MipsABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const {
- llvm::Type *BP = CGF.Int8PtrTy;
- llvm::Type *BPP = CGF.Int8PtrPtrTy;
+Address MipsABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType OrigTy) const {
+ QualType Ty = OrigTy;
// Integer arguments are promoted to 32-bit on O32 and 64-bit on N32/N64.
// Pointers are also promoted in the same way but this only matters for N32.
unsigned SlotSizeInBits = IsO32 ? 32 : 64;
unsigned PtrWidth = getTarget().getPointerWidth(0);
+ bool DidPromote = false;
if ((Ty->isIntegerType() &&
- CGF.getContext().getIntWidth(Ty) < SlotSizeInBits) ||
+ getContext().getIntWidth(Ty) < SlotSizeInBits) ||
(Ty->isPointerType() && PtrWidth < SlotSizeInBits)) {
- Ty = CGF.getContext().getIntTypeForBitwidth(SlotSizeInBits,
- Ty->isSignedIntegerType());
+ DidPromote = true;
+ Ty = getContext().getIntTypeForBitwidth(SlotSizeInBits,
+ Ty->isSignedIntegerType());
}
- CGBuilderTy &Builder = CGF.Builder;
- llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap");
- llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
- int64_t TypeAlign =
- std::min(getContext().getTypeAlign(Ty) / 8, StackAlignInBytes);
- llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
- llvm::Value *AddrTyped;
- llvm::IntegerType *IntTy = (PtrWidth == 32) ? CGF.Int32Ty : CGF.Int64Ty;
-
- if (TypeAlign > MinABIStackAlignInBytes) {
- llvm::Value *AddrAsInt = CGF.Builder.CreatePtrToInt(Addr, IntTy);
- llvm::Value *Inc = llvm::ConstantInt::get(IntTy, TypeAlign - 1);
- llvm::Value *Mask = llvm::ConstantInt::get(IntTy, -TypeAlign);
- llvm::Value *Add = CGF.Builder.CreateAdd(AddrAsInt, Inc);
- llvm::Value *And = CGF.Builder.CreateAnd(Add, Mask);
- AddrTyped = CGF.Builder.CreateIntToPtr(And, PTy);
- }
- else
- AddrTyped = Builder.CreateBitCast(Addr, PTy);
+ auto TyInfo = getContext().getTypeInfoInChars(Ty);
- llvm::Value *AlignedAddr = Builder.CreateBitCast(AddrTyped, BP);
- TypeAlign = std::max((unsigned)TypeAlign, MinABIStackAlignInBytes);
- unsigned ArgSizeInBits = CGF.getContext().getTypeSize(Ty);
- uint64_t Offset = llvm::RoundUpToAlignment(ArgSizeInBits / 8, TypeAlign);
- llvm::Value *NextAddr =
- Builder.CreateGEP(AlignedAddr, llvm::ConstantInt::get(IntTy, Offset),
- "ap.next");
- Builder.CreateStore(NextAddr, VAListAddrAsBPP);
+ // The alignment of things in the argument area is never larger than
+ // StackAlignInBytes.
+ TyInfo.second =
+ std::min(TyInfo.second, CharUnits::fromQuantity(StackAlignInBytes));
- return AddrTyped;
+ // MinABIStackAlignInBytes is the size of argument slots on the stack.
+ CharUnits ArgSlotSize = CharUnits::fromQuantity(MinABIStackAlignInBytes);
+
+ Address Addr = emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false,
+ TyInfo, ArgSlotSize, /*AllowHigherAlign*/ true);
+
+
+ // If there was a promotion, "unpromote" into a temporary.
+ // TODO: can we just use a pointer into a subset of the original slot?
+ if (DidPromote) {
+ Address Temp = CGF.CreateMemTemp(OrigTy, "vaarg.promotion-temp");
+ llvm::Value *Promoted = CGF.Builder.CreateLoad(Addr);
+
+ // Truncate down to the right width.
+ llvm::Type *IntTy = (OrigTy->isIntegerType() ? Temp.getElementType()
+ : CGF.IntPtrTy);
+ llvm::Value *V = CGF.Builder.CreateTrunc(Promoted, IntTy);
+ if (OrigTy->isPointerType())
+ V = CGF.Builder.CreateIntToPtr(V, Temp.getElementType());
+
+ CGF.Builder.CreateStore(V, Temp);
+ Addr = Temp;
+ }
+
+ return Addr;
}
bool MipsABIInfo::shouldSignExtUnsignedType(QualType Ty) const {
@@ -6118,8 +6191,8 @@ private:
void computeInfo(CGFunctionInfo &FI) const override;
- llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const override;
+ Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const override;
};
class HexagonTargetCodeGenInfo : public TargetCodeGenInfo {
@@ -6156,11 +6229,11 @@ ABIArgInfo HexagonABIInfo::classifyArgumentType(QualType Ty) const {
return ABIArgInfo::getIgnore();
if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
- return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
+ return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
uint64_t Size = getContext().getTypeSize(Ty);
if (Size > 64)
- return ABIArgInfo::getIndirect(0, /*ByVal=*/true);
+ return getNaturalAlignIndirect(Ty, /*ByVal=*/true);
// Pass in the smallest viable integer type.
else if (Size > 32)
return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext()));
@@ -6178,7 +6251,7 @@ ABIArgInfo HexagonABIInfo::classifyReturnType(QualType RetTy) const {
// Large vector types should be returned via memory.
if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 64)
- return ABIArgInfo::getIndirect(0);
+ return getNaturalAlignIndirect(RetTy);
if (!isAggregateTypeForABI(RetTy)) {
// Treat an enum type as its underlying type.
@@ -6206,30 +6279,16 @@ ABIArgInfo HexagonABIInfo::classifyReturnType(QualType RetTy) const {
return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext()));
}
- return ABIArgInfo::getIndirect(0, /*ByVal=*/true);
+ return getNaturalAlignIndirect(RetTy, /*ByVal=*/true);
}
-llvm::Value *HexagonABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const {
- // FIXME: Need to handle alignment
- llvm::Type *BPP = CGF.Int8PtrPtrTy;
-
- CGBuilderTy &Builder = CGF.Builder;
- llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
- "ap");
- llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
- llvm::Type *PTy =
- llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
- llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
-
- uint64_t Offset =
- llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4);
- llvm::Value *NextAddr =
- Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
- "ap.next");
- Builder.CreateStore(NextAddr, VAListAddrAsBPP);
-
- return AddrTyped;
+Address HexagonABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const {
+ // FIXME: Someone needs to audit that this handle alignment correctly.
+ return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false,
+ getContext().getTypeInfoInChars(Ty),
+ CharUnits::fromQuantity(4),
+ /*AllowHigherAlign*/ true);
}
//===----------------------------------------------------------------------===//
@@ -6306,8 +6365,8 @@ public:
private:
ABIArgInfo classifyType(QualType RetTy, unsigned SizeLimit) const;
void computeInfo(CGFunctionInfo &FI) const override;
- llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const override;
+ Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const override;
// Coercion type builder for structs passed in registers. The coercion type
// serves two purposes:
@@ -6427,7 +6486,7 @@ SparcV9ABIInfo::classifyType(QualType Ty, unsigned SizeLimit) const {
// Anything too big to fit in registers is passed with an explicit indirect
// pointer / sret pointer.
if (Size > SizeLimit)
- return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
+ return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
@@ -6444,7 +6503,7 @@ SparcV9ABIInfo::classifyType(QualType Ty, unsigned SizeLimit) const {
// If a C++ object has either a non-trivial copy constructor or a non-trivial
// destructor, it is passed with an explicit indirect pointer / sret pointer.
if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
- return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
+ return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
// This is a small aggregate type that should be passed in registers.
// Build a coercion type from the LLVM struct type.
@@ -6465,55 +6524,59 @@ SparcV9ABIInfo::classifyType(QualType Ty, unsigned SizeLimit) const {
return ABIArgInfo::getDirect(CoerceTy);
}
-llvm::Value *SparcV9ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const {
+Address SparcV9ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const {
ABIArgInfo AI = classifyType(Ty, 16 * 8);
llvm::Type *ArgTy = CGT.ConvertType(Ty);
if (AI.canHaveCoerceToType() && !AI.getCoerceToType())
AI.setCoerceToType(ArgTy);
- llvm::Type *BPP = CGF.Int8PtrPtrTy;
+ CharUnits SlotSize = CharUnits::fromQuantity(8);
+
CGBuilderTy &Builder = CGF.Builder;
- llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap");
- llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
+ Address Addr(Builder.CreateLoad(VAListAddr, "ap.cur"), SlotSize);
llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy);
- llvm::Value *ArgAddr;
- unsigned Stride;
+ auto TypeInfo = getContext().getTypeInfoInChars(Ty);
+
+ Address ArgAddr = Address::invalid();
+ CharUnits Stride;
switch (AI.getKind()) {
case ABIArgInfo::Expand:
case ABIArgInfo::InAlloca:
llvm_unreachable("Unsupported ABI kind for va_arg");
- case ABIArgInfo::Extend:
- Stride = 8;
- ArgAddr = Builder
- .CreateConstGEP1_32(Addr, 8 - getDataLayout().getTypeAllocSize(ArgTy),
- "extend");
+ case ABIArgInfo::Extend: {
+ Stride = SlotSize;
+ CharUnits Offset = SlotSize - TypeInfo.first;
+ ArgAddr = Builder.CreateConstInBoundsByteGEP(Addr, Offset, "extend");
break;
+ }
- case ABIArgInfo::Direct:
- Stride = getDataLayout().getTypeAllocSize(AI.getCoerceToType());
+ case ABIArgInfo::Direct: {
+ auto AllocSize = getDataLayout().getTypeAllocSize(AI.getCoerceToType());
+ Stride = CharUnits::fromQuantity(AllocSize).RoundUpToAlignment(SlotSize);
ArgAddr = Addr;
break;
+ }
case ABIArgInfo::Indirect:
- Stride = 8;
- ArgAddr = Builder.CreateBitCast(Addr,
- llvm::PointerType::getUnqual(ArgPtrTy),
- "indirect");
- ArgAddr = Builder.CreateLoad(ArgAddr, "indirect.arg");
+ Stride = SlotSize;
+ ArgAddr = Builder.CreateElementBitCast(Addr, ArgPtrTy, "indirect");
+ ArgAddr = Address(Builder.CreateLoad(ArgAddr, "indirect.arg"),
+ TypeInfo.second);
break;
case ABIArgInfo::Ignore:
- return llvm::UndefValue::get(ArgPtrTy);
+ return Address(llvm::UndefValue::get(ArgPtrTy), TypeInfo.second);
}
// Update VAList.
- Addr = Builder.CreateConstGEP1_32(Addr, Stride, "ap.next");
- Builder.CreateStore(Addr, VAListAddrAsBPP);
+ llvm::Value *NextPtr =
+ Builder.CreateConstInBoundsByteGEP(Addr.getPointer(), Stride, "ap.next");
+ Builder.CreateStore(NextPtr, VAListAddr);
- return Builder.CreatePointerCast(ArgAddr, ArgPtrTy, "arg.addr");
+ return Builder.CreateBitCast(ArgAddr, ArgPtrTy, "arg.addr");
}
void SparcV9ABIInfo::computeInfo(CGFunctionInfo &FI) const {
@@ -6673,8 +6736,8 @@ public:
class XCoreABIInfo : public DefaultABIInfo {
public:
XCoreABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
- llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const override;
+ Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const override;
};
class XCoreTargetCodeGenInfo : public TargetCodeGenInfo {
@@ -6688,52 +6751,53 @@ public:
} // End anonymous namespace.
-llvm::Value *XCoreABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const {
+Address XCoreABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const {
CGBuilderTy &Builder = CGF.Builder;
// Get the VAList.
- llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr,
- CGF.Int8PtrPtrTy);
- llvm::Value *AP = Builder.CreateLoad(VAListAddrAsBPP);
+ CharUnits SlotSize = CharUnits::fromQuantity(4);
+ Address AP(Builder.CreateLoad(VAListAddr), SlotSize);
// Handle the argument.
ABIArgInfo AI = classifyArgumentType(Ty);
+ CharUnits TypeAlign = getContext().getTypeAlignInChars(Ty);
llvm::Type *ArgTy = CGT.ConvertType(Ty);
if (AI.canHaveCoerceToType() && !AI.getCoerceToType())
AI.setCoerceToType(ArgTy);
llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy);
- llvm::Value *Val;
- uint64_t ArgSize = 0;
+
+ Address Val = Address::invalid();
+ CharUnits ArgSize = CharUnits::Zero();
switch (AI.getKind()) {
case ABIArgInfo::Expand:
case ABIArgInfo::InAlloca:
llvm_unreachable("Unsupported ABI kind for va_arg");
case ABIArgInfo::Ignore:
- Val = llvm::UndefValue::get(ArgPtrTy);
- ArgSize = 0;
+ Val = Address(llvm::UndefValue::get(ArgPtrTy), TypeAlign);
+ ArgSize = CharUnits::Zero();
break;
case ABIArgInfo::Extend:
case ABIArgInfo::Direct:
- Val = Builder.CreatePointerCast(AP, ArgPtrTy);
- ArgSize = getDataLayout().getTypeAllocSize(AI.getCoerceToType());
- if (ArgSize < 4)
- ArgSize = 4;
+ Val = Builder.CreateBitCast(AP, ArgPtrTy);
+ ArgSize = CharUnits::fromQuantity(
+ getDataLayout().getTypeAllocSize(AI.getCoerceToType()));
+ ArgSize = ArgSize.RoundUpToAlignment(SlotSize);
break;
case ABIArgInfo::Indirect:
- llvm::Value *ArgAddr;
- ArgAddr = Builder.CreateBitCast(AP, llvm::PointerType::getUnqual(ArgPtrTy));
- ArgAddr = Builder.CreateLoad(ArgAddr);
- Val = Builder.CreatePointerCast(ArgAddr, ArgPtrTy);
- ArgSize = 4;
+ Val = Builder.CreateElementBitCast(AP, ArgPtrTy);
+ Val = Address(Builder.CreateLoad(Val), TypeAlign);
+ ArgSize = SlotSize;
break;
}
// Increment the VAList.
- if (ArgSize) {
- llvm::Value *APN = Builder.CreateConstGEP1_32(AP, ArgSize);
- Builder.CreateStore(APN, VAListAddrAsBPP);
+ if (!ArgSize.isZero()) {
+ llvm::Value *APN =
+ Builder.CreateConstInBoundsByteGEP(AP.getPointer(), ArgSize);
+ Builder.CreateStore(APN, VAListAddr);
}
+
return Val;
}
diff --git a/clang/test/CodeGen/aarch64-varargs.c b/clang/test/CodeGen/aarch64-varargs.c
index 434337173cf..08f39600c8c 100644
--- a/clang/test/CodeGen/aarch64-varargs.c
+++ b/clang/test/CodeGen/aarch64-varargs.c
@@ -23,21 +23,19 @@ int simple_int(void) {
// CHECK: [[VAARG_IN_REG]]
// CHECK: [[REG_TOP:%[a-z_0-9]+]] = load i8*, i8** getelementptr inbounds (%struct.__va_list, %struct.__va_list* @the_list, i32 0, i32 1)
-// CHECK: [[REG_ADDR:%[a-z_0-9]+]] = getelementptr i8, i8* [[REG_TOP]], i32 [[GR_OFFS]]
-// CHECK-BE: [[REG_ADDR_VAL:%[0-9]+]] = ptrtoint i8* [[REG_ADDR]] to i64
-// CHECK-BE: [[REG_ADDR_VAL_ALIGNED:%[a-z_0-9]*]] = add i64 [[REG_ADDR_VAL]], 4
-// CHECK-BE: [[REG_ADDR:%[0-9]+]] = inttoptr i64 [[REG_ADDR_VAL_ALIGNED]] to i8*
-// CHECK: [[FROMREG_ADDR:%[a-z_0-9]+]] = bitcast i8* [[REG_ADDR]] to i32*
+// CHECK: [[REG_ADDR:%[a-z_0-9]+]] = getelementptr inbounds i8, i8* [[REG_TOP]], i32 [[GR_OFFS]]
+// CHECK-BE: [[REG_ADDR_ALIGNED:%[0-9]+]] = getelementptr inbounds i8, i8* [[REG_ADDR]], i64 4
+// CHECK-BE: [[FROMREG_ADDR:%[a-z_0-9]+]] = bitcast i8* [[REG_ADDR_ALIGNED]] to i32*
+// CHECK-LE: [[FROMREG_ADDR:%[a-z_0-9]+]] = bitcast i8* [[REG_ADDR]] to i32*
// CHECK: br label %[[VAARG_END:[a-z._0-9]+]]
// CHECK: [[VAARG_ON_STACK]]
// CHECK: [[STACK:%[a-z_0-9]+]] = load i8*, i8** getelementptr inbounds (%struct.__va_list, %struct.__va_list* @the_list, i32 0, i32 0)
-// CHECK: [[NEW_STACK:%[a-z_0-9]+]] = getelementptr i8, i8* [[STACK]], i32 8
+// CHECK: [[NEW_STACK:%[a-z_0-9]+]] = getelementptr inbounds i8, i8* [[STACK]], i64 8
// CHECK: store i8* [[NEW_STACK]], i8** getelementptr inbounds (%struct.__va_list, %struct.__va_list* @the_list, i32 0, i32 0)
-// CHECK-BE: [[STACK_VAL:%[0-9]+]] = ptrtoint i8* [[STACK]] to i64
-// CHECK-BE: [[STACK_VAL_ALIGNED:%[a-z_0-9]*]] = add i64 [[STACK_VAL]], 4
-// CHECK-BE: [[STACK:%[0-9]+]] = inttoptr i64 [[STACK_VAL_ALIGNED]] to i8*
-// CHECK: [[FROMSTACK_ADDR:%[a-z_0-9]+]] = bitcast i8* [[STACK]] to i32*
+// CHECK-BE: [[STACK_ALIGNED:%[a-z_0-9]*]] = getelementptr inbounds i8, i8* [[STACK]], i64 4
+// CHECK-BE: [[FROMSTACK_ADDR:%[a-z_0-9]+]] = bitcast i8* [[STACK_ALIGNED]] to i32*
+// CHECK-LE: [[FROMSTACK_ADDR:%[a-z_0-9]+]] = bitcast i8* [[STACK]] to i32*
// CHECK: br label %[[VAARG_END]]
// CHECK: [[VAARG_END]]
@@ -63,7 +61,7 @@ __int128 aligned_int(void) {
// CHECK: [[VAARG_IN_REG]]
// CHECK: [[REG_TOP:%[a-z_0-9]+]] = load i8*, i8** getelementptr inbounds (%struct.__va_list, %struct.__va_list* @the_list, i32 0, i32 1)
-// CHECK: [[REG_ADDR:%[a-z_0-9]+]] = getelementptr i8, i8* [[REG_TOP]], i32 [[ALIGNED_REGOFFS]]
+// CHECK: [[REG_ADDR:%[a-z_0-9]+]] = getelementptr inbounds i8, i8* [[REG_TOP]], i32 [[ALIGNED_REGOFFS]]
// CHECK: [[FROMREG_ADDR:%[a-z_0-9]+]] = bitcast i8* [[REG_ADDR]] to i128*
// CHECK: br label %[[VAARG_END:[a-z._0-9]+]]
@@ -73,7 +71,7 @@ __int128 aligned_int(void) {
// CHECK: [[ALIGN_STACK:%[a-z_0-9]+]] = add i64 [[STACKINT]], 15
// CHECK: [[ALIGNED_STACK_INT:%[a-z_0-9]+]] = and i64 [[ALIGN_STACK]], -16
// CHECK: [[ALIGNED_STACK_PTR:%[a-z_0-9]+]] = inttoptr i64 [[ALIGNED_STACK_INT]] to i8*
-// CHECK: [[NEW_STACK:%[a-z_0-9]+]] = getelementptr i8, i8* [[ALIGNED_STACK_PTR]], i32 16
+// CHECK: [[NEW_STACK:%[a-z_0-9]+]] = getelementptr inbounds i8, i8* [[ALIGNED_STACK_PTR]], i64 16
// CHECK: store i8* [[NEW_STACK]], i8** getelementptr inbounds (%struct.__va_list, %struct.__va_list* @the_list, i32 0, i32 0)
// CHECK: [[FROMSTACK_ADDR:%[a-z_0-9]+]] = bitcast i8* [[ALIGNED_STACK_PTR]] to i128*
// CHECK: br label %[[VAARG_END]]
@@ -104,14 +102,14 @@ struct bigstruct simple_indirect(void) {
// CHECK: [[VAARG_IN_REG]]
// CHECK: [[REG_TOP:%[a-z_0-9]+]] = load i8*, i8** getelementptr inbounds (%struct.__va_list, %struct.__va_list* @the_list, i32 0, i32 1)
-// CHECK: [[REG_ADDR:%[a-z_0-9]+]] = getelementptr i8, i8* [[REG_TOP]], i32 [[GR_OFFS]]
+// CHECK: [[REG_ADDR:%[a-z_0-9]+]] = getelementptr inbounds i8, i8* [[REG_TOP]], i32 [[GR_OFFS]]
// CHECK: [[FROMREG_ADDR:%[a-z_0-9]+]] = bitcast i8* [[REG_ADDR]] to %struct.bigstruct**
// CHECK: br label %[[VAARG_END:[a-z._0-9]+]]
// CHECK: [[VAARG_ON_STACK]]
// CHECK: [[STACK:%[a-z_0-9]+]] = load i8*, i8** getelementptr inbounds (%struct.__va_list, %struct.__va_list* @the_list, i32 0, i32 0)
// CHECK-NOT: and i64
-// CHECK: [[NEW_STACK:%[a-z_0-9]+]] = getelementptr i8, i8* [[STACK]], i32 8
+// CHECK: [[NEW_STACK:%[a-z_0-9]+]] = getelementptr inbounds i8, i8* [[STACK]], i64 8
// CHECK: store i8* [[NEW_STACK]], i8** getelementptr inbounds (%struct.__va_list, %struct.__va_list* @the_list, i32 0, i32 0)
// CHECK: [[FROMSTACK_ADDR:%[a-z_0-9]+]] = bitcast i8* [[STACK]] to %struct.bigstruct**
// CHECK: br label %[[VAARG_END]]
@@ -141,13 +139,13 @@ struct aligned_bigstruct simple_aligned_indirect(void) {
// CHECK: [[VAARG_IN_REG]]
// CHECK: [[REG_TOP:%[a-z_0-9]+]] = load i8*, i8** getelementptr inbounds (%struct.__va_list, %struct.__va_list* @the_list, i32 0, i32 1)
-// CHECK: [[REG_ADDR:%[a-z_0-9]+]] = getelementptr i8, i8* [[REG_TOP]], i32 [[GR_OFFS]]
+// CHECK: [[REG_ADDR:%[a-z_0-9]+]] = getelementptr inbounds i8, i8* [[REG_TOP]], i32 [[GR_OFFS]]
// CHECK: [[FROMREG_ADDR:%[a-z_0-9]+]] = bitcast i8* [[REG_ADDR]] to %struct.aligned_bigstruct**
// CHECK: br label %[[VAARG_END:[a-z._0-9]+]]
// CHECK: [[VAARG_ON_STACK]]
// CHECK: [[STACK:%[a-z_0-9]+]] = load i8*, i8** getelementptr inbounds (%struct.__va_list, %struct.__va_list* @the_list, i32 0, i32 0)
-// CHECK: [[NEW_STACK:%[a-z_0-9]+]] = getelementptr i8, i8* [[STACK]], i32 8
+// CHECK: [[NEW_STACK:%[a-z_0-9]+]] = getelementptr inbounds i8, i8* [[STACK]], i64 8
// CHECK: store i8* [[NEW_STACK]], i8** getelementptr inbounds (%struct.__va_list, %struct.__va_list* @the_list, i32 0, i32 0)
// CHECK: [[FROMSTACK_ADDR:%[a-z_0-9]+]] = bitcast i8* [[STACK]] to %struct.aligned_bigstruct**
// CHECK: br label %[[VAARG_END]]
@@ -172,16 +170,15 @@ double simple_double(void) {
// CHECK: [[VAARG_IN_REG]]
// CHECK: [[REG_TOP:%[a-z_0-9]+]] = load i8*, i8** getelementptr inbounds (%struct.__va_list, %struct.__va_list* @the_list, i32 0, i32 2)
-// CHECK: [[REG_ADDR:%[a-z_0-9]+]] = getelementptr i8, i8* [[REG_TOP]], i32 [[VR_OFFS]]
-// CHECK-BE: [[REG_ADDR_VAL:%[0-9]+]] = ptrtoint i8* [[REG_ADDR]] to i64
-// CHECK-BE: [[REG_ADDR_VAL_ALIGNED:%[a-z_0-9]*]] = add i64 [[REG_ADDR_VAL]], 8
-// CHECK-BE: [[REG_ADDR:%[0-9]+]] = inttoptr i64 [[REG_ADDR_VAL_ALIGNED]] to i8*
-// CHECK: [[FROMREG_ADDR:%[a-z_0-9]+]] = bitcast i8* [[REG_ADDR]] to double*
+// CHECK: [[REG_ADDR:%[a-z_0-9]+]] = getelementptr inbounds i8, i8* [[REG_TOP]], i32 [[VR_OFFS]]
+// CHECK-BE: [[REG_ADDR_ALIGNED:%[a-z_0-9]*]] = getelementptr inbounds i8, i8* [[REG_ADDR]], i64 8
+// CHECK-BE: [[FROMREG_ADDR:%[a-z_0-9]+]] = bitcast i8* [[REG_ADDR_ALIGNED]] to double*
+// CHECK-LE: [[FROMREG_ADDR:%[a-z_0-9]+]] = bitcast i8* [[REG_ADDR]] to double*
// CHECK: br label %[[VAARG_END:[a-z._0-9]+]]
// CHECK: [[VAARG_ON_STACK]]
// CHECK: [[STACK:%[a-z_0-9]+]] = load i8*, i8** getelementptr inbounds (%struct.__va_list, %struct.__va_list* @the_list, i32 0, i32 0)
-// CHECK: [[NEW_STACK:%[a-z_0-9]+]] = getelementptr i8, i8* [[STACK]], i32 8
+// CHECK: [[NEW_STACK:%[a-z_0-9]+]] = getelementptr inbounds i8, i8* [[STACK]], i64 8
// CHECK: store i8* [[NEW_STACK]], i8** getelementptr inbounds (%struct.__va_list, %struct.__va_list* @the_list, i32 0, i32 0)
// CHECK: [[FROMSTACK_ADDR:%[a-z_0-9]+]] = bitcast i8* [[STACK]] to double*
// CHECK: br label %[[VAARG_END]]
@@ -211,17 +208,17 @@ struct hfa simple_hfa(void) {
// CHECK: [[VAARG_IN_REG]]
// CHECK: [[REG_TOP:%[a-z_0-9]+]] = load i8*, i8** getelementptr inbounds (%struct.__va_list, %struct.__va_list* @the_list, i32 0, i32 2)
-// CHECK: [[FIRST_REG:%[a-z_0-9]+]] = getelementptr i8, i8* [[REG_TOP]], i32 [[VR_OFFS]]
-// CHECK-LE: [[EL_ADDR:%[a-z_0-9]+]] = getelementptr i8, i8* [[FIRST_REG]], i32 0
-// CHECK-BE: [[EL_ADDR:%[a-z_0-9]+]] = getelementptr i8, i8* [[FIRST_REG]], i32 12
+// CHECK: [[FIRST_REG:%[a-z_0-9]+]] = getelementptr inbounds i8, i8* [[REG_TOP]], i32 [[VR_OFFS]]
+// CHECK-LE: [[EL_ADDR:%[a-z_0-9]+]] = getelementptr inbounds i8, i8* [[FIRST_REG]], i64 0
+// CHECK-BE: [[EL_ADDR:%[a-z_0-9]+]] = getelementptr inbounds i8, i8* [[FIRST_REG]], i64 12
// CHECK: [[EL_TYPED:%[a-z_0-9]+]] = bitcast i8* [[EL_ADDR]] to float*
-// CHECK: [[EL_TMPADDR:%[a-z_0-9]+]] = getelementptr inbounds [2 x float], [2 x float]* %[[TMP_HFA:[a-z_.0-9]+]], i32 0, i32 0
+// CHECK: [[EL_TMPADDR:%[a-z_0-9]+]] = getelementptr inbounds [2 x float], [2 x float]* %[[TMP_HFA:[a-z_.0-9]+]], i64 0, i64 0
// CHECK: [[EL:%[a-z_0-9]+]] = load float, float* [[EL_TYPED]]
// CHECK: store float [[EL]], float* [[EL_TMPADDR]]
-// CHECK-LE: [[EL_ADDR:%[a-z_0-9]+]] = getelementptr i8, i8* [[FIRST_REG]], i32 16
-// CHECK-BE: [[EL_ADDR:%[a-z_0-9]+]] = getelementptr i8, i8* [[FIRST_REG]], i32 28
+// CHECK-LE: [[EL_ADDR:%[a-z_0-9]+]] = getelementptr inbounds i8, i8* [[FIRST_REG]], i64 16
+// CHECK-BE: [[EL_ADDR:%[a-z_0-9]+]] = getelementptr inbounds i8, i8* [[FIRST_REG]], i64 28
// CHECK: [[EL_TYPED:%[a-z_0-9]+]] = bitcast i8* [[EL_ADDR]] to float*
-// CHECK: [[EL_TMPADDR:%[a-z_0-9]+]] = getelementptr inbounds [2 x float], [2 x float]* %[[TMP_HFA]], i32 0, i32 1
+// CHECK: [[EL_TMPADDR:%[a-z_0-9]+]] = getelementptr inbounds [2 x float], [2 x float]* %[[TMP_HFA]], i64 0, i64 1
// CHECK: [[EL:%[a-z_0-9]+]] = load float, float* [[EL_TYPED]]
// CHECK: store float [[EL]], float* [[EL_TMPADDR]]
// CHECK: [[FROMREG_ADDR:%[a-z_0-9]+]] = bitcast [2 x float]* %[[TMP_HFA]] to %struct.hfa*
@@ -229,7 +226,7 @@ struct hfa simple_hfa(void) {
// CHECK: [[VAARG_ON_STACK]]
// CHECK: [[STACK:%[a-z_0-9]+]] = load i8*, i8** getelementptr inbounds (%struct.__va_list, %struct.__va_list* @the_list, i32 0, i32 0)
-// CHECK: [[NEW_STACK:%[a-z_0-9]+]] = getelementptr i8, i8* [[STACK]], i32 8
+// CHECK: [[NEW_STACK:%[a-z_0-9]+]] = getelementptr inbounds i8, i8* [[STACK]], i64 8
// CHECK: store i8* [[NEW_STACK]], i8** getelementptr inbounds (%struct.__va_list, %struct.__va_list* @the_list, i32 0, i32 0)
// CHECK: [[FROMSTACK_ADDR:%[a-z_0-9]+]] = bitcast i8* [[STACK]] to %struct.hfa*
// CHECK: br label %[[VAARG_END]]
diff --git a/clang/test/CodeGen/arm-abi-vector.c b/clang/test/CodeGen/arm-abi-vector.c
index 88bf593ed29..9920332e48f 100644
--- a/clang/test/CodeGen/arm-abi-vector.c
+++ b/clang/test/CodeGen/arm-abi-vector.c
@@ -14,18 +14,20 @@ typedef __attribute__(( ext_vector_type(5) )) short __short5;
// Passing legal vector types as varargs.
double varargs_vec_2i(int fixed, ...) {
// CHECK: varargs_vec_2i
-// CHECK: alloca <2 x i32>, align 8
-// CHECK: [[ALIGN:%.*]] = and i32 [[VAR:%.*]], -8
+// CHECK: [[VAR:%.*]] = alloca <2 x i32>, align 8
+// CHECK: [[ALIGN:%.*]] = and i32 {{%.*}}, -8
// CHECK: [[AP_ALIGN:%.*]] = inttoptr i32 [[ALIGN]] to i8*
-// CHECK: [[AP_NEXT:%.*]] = getelementptr i8, i8* [[AP_ALIGN]], i32 8
-// CHECK: bitcast i8* [[AP_ALIGN]] to <2 x i32>*
+// CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[AP_ALIGN]], i32 8
+// CHECK: [[AP_CAST:%.*]] = bitcast i8* [[AP_ALIGN]] to <2 x i32>*
+// CHECK: [[VEC:%.*]] = load <2 x i32>, <2 x i32>* [[AP_CAST]], align 8
+// CHECK: store <2 x i32> [[VEC]], <2 x i32>* [[VAR]], align 8
// APCS-GNU: varargs_vec_2i
-// APCS-GNU: alloca <2 x i32>, align 8
-// APCS-GNU: [[VAR_ALIGN:%.*]] = alloca <2 x i32>
-// APCS-GNU: [[AP_NEXT:%.*]] = getelementptr i8, i8* {{%.*}}, i32 8
-// APCS-GNU: bitcast <2 x i32>* [[VAR_ALIGN]] to i8*
-// APCS-GNU: call void @llvm.memcpy
-// APCS-GNU: load <2 x i32>, <2 x i32>* [[VAR_ALIGN]]
+// APCS-GNU: [[VAR:%.*]] = alloca <2 x i32>, align 8
+// APCS-GNU: [[AP:%.*]] = load i8*,
+// APCS-GNU: [[AP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[AP]], i32 8
+// APCS-GNU: [[AP_CAST:%.*]] = bitcast i8* [[AP]] to <2 x i32>*
+// APCS-GNU: [[VEC:%.*]] = load <2 x i32>, <2 x i32>* [[AP_CAST]], align 4
+// APCS-GNU: store <2 x i32> [[VEC]], <2 x i32>* [[VAR]], align 8
va_list ap;
double sum = fixed;
va_start(ap, fixed);
@@ -46,11 +48,11 @@ double test_2i(__int2 *in) {
double varargs_vec_3c(int fixed, ...) {
// CHECK: varargs_vec_3c
// CHECK: alloca <3 x i8>, align 4
-// CHECK: [[AP_NEXT:%.*]] = getelementptr i8, i8* [[AP:%.*]], i32 4
+// CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[AP:%.*]], i32 4
// CHECK: bitcast i8* [[AP]] to <3 x i8>*
// APCS-GNU: varargs_vec_3c
// APCS-GNU: alloca <3 x i8>, align 4
-// APCS-GNU: [[AP_NEXT:%.*]] = getelementptr i8, i8* [[AP:%.*]], i32 4
+// APCS-GNU: [[AP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[AP:%.*]], i32 4
// APCS-GNU: bitcast i8* [[AP]] to <3 x i8>*
va_list ap;
double sum = fixed;
@@ -71,18 +73,20 @@ double test_3c(__char3 *in) {
double varargs_vec_5c(int fixed, ...) {
// CHECK: varargs_vec_5c
-// CHECK: alloca <5 x i8>, align 8
+// CHECK: [[VAR:%.*]] = alloca <5 x i8>, align 8
// CHECK: [[ALIGN:%.*]] = and i32 {{%.*}}, -8
// CHECK: [[AP_ALIGN:%.*]] = inttoptr i32 [[ALIGN]] to i8*
-// CHECK: [[AP_NEXT:%.*]] = getelementptr i8, i8* [[AP_ALIGN]], i32 8
-// CHECK: bitcast i8* [[AP_ALIGN]] to <5 x i8>*
+// CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[AP_ALIGN]], i32 8
+// CHECK: [[AP_CAST:%.*]] = bitcast i8* [[AP_ALIGN]] to <5 x i8>*
+// CHECK: [[VEC:%.*]] = load <5 x i8>, <5 x i8>* [[AP_CAST]], align 8
+// CHECK: store <5 x i8> [[VEC]], <5 x i8>* [[VAR]], align 8
// APCS-GNU: varargs_vec_5c
-// APCS-GNU: alloca <5 x i8>, align 8
-// APCS-GNU: [[VAR_ALIGN:%.*]] = alloca <5 x i8>
-// APCS-GNU: [[AP_NEXT:%.*]] = getelementptr i8, i8* {{%.*}}, i32 8
-// APCS-GNU: bitcast <5 x i8>* [[VAR_ALIGN]] to i8*
-// APCS-GNU: call void @llvm.memcpy
-// APCS-GNU: load <5 x i8>, <5 x i8>* [[VAR_ALIGN]]
+// APCS-GNU: [[VAR:%.*]] = alloca <5 x i8>, align 8
+// APCS-GNU: [[AP:%.*]] = load i8*,
+// APCS-GNU: [[AP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[AP]], i32 8
+// APCS-GNU: [[AP_CAST:%.*]] = bitcast i8* [[AP]] to <5 x i8>*
+// APCS-GNU: [[VEC:%.*]] = load <5 x i8>, <5 x i8>* [[AP_CAST]], align 4
+// APCS-GNU: store <5 x i8> [[VEC]], <5 x i8>* [[VAR]], align 8
va_list ap;
double sum = fixed;
va_start(ap, fixed);
@@ -102,21 +106,20 @@ double test_5c(__char5 *in) {
double varargs_vec_9c(int fixed, ...) {
// CHECK: varargs_vec_9c
-// CHECK: alloca <9 x i8>, align 16
-// CHECK: [[VAR_ALIGN:%.*]] = alloca <9 x i8>
+// CHECK: [[VAR:%.*]] = alloca <9 x i8>, align 16
// CHECK: [[ALIGN:%.*]] = and i32 {{%.*}}, -8
// CHECK: [[AP_ALIGN:%.*]] = inttoptr i32 [[ALIGN]] to i8*
-// CHECK: [[AP_NEXT:%.*]] = getelementptr i8, i8* [[AP_ALIGN]], i32 16
-// CHECK: bitcast <9 x i8>* [[VAR_ALIGN]] to i8*
-// CHECK: call void @llvm.memcpy
-// CHECK: load <9 x i8>, <9 x i8>* [[VAR_ALIGN]]
+// CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[AP_ALIGN]], i32 16
+// CHECK: [[AP_CAST:%.*]] = bitcast i8* [[AP_ALIGN]] to <9 x i8>*
+// CHECK: [[T0:%.*]] = load <9 x i8>, <9 x i8>* [[AP_CAST]], align 8
+// CHECK: store <9 x i8> [[T0]], <9 x i8>* [[VAR]], align 16
// APCS-GNU: varargs_vec_9c
-// APCS-GNU: alloca <9 x i8>, align 16
-// APCS-GNU: [[VAR_ALIGN:%.*]] = alloca <9 x i8>
-// APCS-GNU: [[AP_NEXT:%.*]] = getelementptr i8, i8* {{%.*}}, i32 16
-// APCS-GNU: bitcast <9 x i8>* [[VAR_ALIGN]] to i8*
-// APCS-GNU: call void @llvm.memcpy
-// APCS-GNU: load <9 x i8>, <9 x i8>* [[VAR_ALIGN]]
+// APCS-GNU: [[VAR:%.*]] = alloca <9 x i8>, align 16
+// APCS-GNU: [[AP:%.*]] = load i8*,
+// APCS-GNU: [[AP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[AP]], i32 16
+// APCS-GNU: [[AP_CAST:%.*]] = bitcast i8* [[AP]] to <9 x i8>*
+// APCS-GNU: [[VEC:%.*]] = load <9 x i8>, <9 x i8>* [[AP_CAST]], align 4
+// APCS-GNU: store <9 x i8> [[VEC]], <9 x i8>* [[VAR]], align 16
va_list ap;
double sum = fixed;
va_start(ap, fixed);
@@ -136,15 +139,13 @@ double test_9c(__char9 *in) {
double varargs_vec_19c(int fixed, ...) {
// CHECK: varargs_vec_19c
-// CHECK: [[AP_NEXT:%.*]] = getelementptr i8, i8* [[AP:%.*]], i32 4
-// CHECK: [[VAR:%.*]] = bitcast i8* [[AP]] to i8**
-// CHECK: [[VAR2:%.*]] = load i8*, i8** [[VAR]]
-// CHECK: bitcast i8* [[VAR2]] to <19 x i8>*
+// CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[AP:%.*]], i32 4
+// CHECK: [[VAR:%.*]] = bitcast i8* [[AP]] to <19 x i8>**
+// CHECK: [[VAR2:%.*]] = load <19 x i8>*, <19 x i8>** [[VAR]]
// APCS-GNU: varargs_vec_19c
-// APCS-GNU: [[AP_NEXT:%.*]] = getelementptr i8, i8* [[AP:%.*]], i32 4
-// APCS-GNU: [[VAR:%.*]] = bitcast i8* [[AP]] to i8**
-// APCS-GNU: [[VAR2:%.*]] = load i8*, i8** [[VAR]]
-// APCS-GNU: bitcast i8* [[VAR2]] to <19 x i8>*
+// APCS-GNU: [[AP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[AP:%.*]], i32 4
+// APCS-GNU: [[VAR:%.*]] = bitcast i8* [[AP]] to <19 x i8>**
+// APCS-GNU: [[VAR2:%.*]] = load <19 x i8>*, <19 x i8>** [[VAR]]
va_list ap;
double sum = fixed;
va_start(ap, fixed);
@@ -167,15 +168,14 @@ double varargs_vec_3s(int fixed, ...) {
// CHECK: alloca <3 x i16>, align 8
// CHECK: [[ALIGN:%.*]] = and i32 {{%.*}}, -8
// CHECK: [[AP_ALIGN:%.*]] = inttoptr i32 [[ALIGN]] to i8*
-// CHECK: [[AP_NEXT:%.*]] = getelementptr i8, i8* [[AP_ALIGN]], i32 8
+// CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[AP_ALIGN]], i32 8
// CHECK: bitcast i8* [[AP_ALIGN]] to <3 x i16>*
// APCS-GNU: varargs_vec_3s
-// APCS-GNU: alloca <3 x i16>, align 8
-// APCS-GNU: [[VAR_ALIGN:%.*]] = alloca <3 x i16>
-// APCS-GNU: [[AP_NEXT:%.*]] = getelementptr i8, i8* {{%.*}}, i32 8
-// APCS-GNU: bitcast <3 x i16>* [[VAR_ALIGN]] to i8*
-// APCS-GNU: call void @llvm.memcpy
-// APCS-GNU: load <3 x i16>, <3 x i16>* [[VAR_ALIGN]]
+// APCS-GNU: [[VAR:%.*]] = alloca <3 x i16>, align 8
+// APCS-GNU: [[AP:%.*]] = load i8*,
+// APCS-GNU: [[AP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[AP]], i32 8
+// APCS-GNU: [[AP_CAST:%.*]] = bitcast i8* [[AP]] to <3 x i16>*
+// APCS-GNU: [[VEC:%.*]] = load <3 x i16>, <3 x i16>* [[AP_CAST]], align 4
va_list ap;
double sum = fixed;
va_start(ap, fixed);
@@ -195,21 +195,19 @@ double test_3s(__short3 *in) {
double varargs_vec_5s(int fixed, ...) {
// CHECK: varargs_vec_5s
-// CHECK: alloca <5 x i16>, align 16
-// CHECK: [[VAR_ALIGN:%.*]] = alloca <5 x i16>
+// CHECK: [[VAR_ALIGN:%.*]] = alloca <5 x i16>, align 16
// CHECK: [[ALIGN:%.*]] = and i32 {{%.*}}, -8
// CHECK: [[AP_ALIGN:%.*]] = inttoptr i32 [[ALIGN]] to i8*
-// CHECK: [[AP_NEXT:%.*]] = getelementptr i8, i8* [[AP_ALIGN]], i32 16
-// CHECK: bitcast <5 x i16>* [[VAR_ALIGN]] to i8*
-// CHECK: call void @llvm.memcpy
-// CHECK: load <5 x i16>, <5 x i16>* [[VAR_ALIGN]]
+// CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[AP_ALIGN]], i32 16
+// CHECK: [[AP_CAST:%.*]] = bitcast i8* [[AP_ALIGN]] to <5 x i16>*
+// CHECK: [[VEC:%.*]] = load <5 x i16>, <5 x i16>* [[AP_CAST]], align 8
+// CHECK: store <5 x i16> [[VEC]], <5 x i16>* [[VAR_ALIGN]], align 16
// APCS-GNU: varargs_vec_5s
-// APCS-GNU: alloca <5 x i16>, align 16
-// APCS-GNU: [[VAR_ALIGN:%.*]] = alloca <5 x i16>
-// APCS-GNU: [[AP_NEXT:%.*]] = getelementptr i8, i8* {{%.*}}, i32 16
-// APCS-GNU: bitcast <5 x i16>* [[VAR_ALIGN]] to i8*
-// APCS-GNU: call void @llvm.memcpy
-// APCS-GNU: load <5 x i16>, <5 x i16>* [[VAR_ALIGN]]
+// APCS-GNU: [[VAR:%.*]] = alloca <5 x i16>, align 16
+// APCS-GNU: [[AP:%.*]] = load i8*,
+// APCS-GNU: [[AP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[AP]], i32 16
+// APCS-GNU: [[AP_CAST:%.*]] = bitcast i8* [[AP]] to <5 x i16>*
+// APCS-GNU: [[VEC:%.*]] = load <5 x i16>, <5 x i16>* [[AP_CAST]], align 4
va_list ap;
double sum = fixed;
va_start(ap, fixed);
@@ -238,11 +236,11 @@ double varargs_struct(int fixed, ...) {
// CHECK: varargs_struct
// CHECK: [[ALIGN:%.*]] = and i32 {{%.*}}, -8
// CHECK: [[AP_ALIGN:%.*]] = inttoptr i32 [[ALIGN]] to i8*
-// CHECK: [[AP_NEXT:%.*]] = getelementptr i8, i8* [[AP_ALIGN]], i32 16
+// CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[AP_ALIGN]], i32 16
// CHECK: bitcast i8* [[AP_ALIGN]] to %struct.StructWithVec*
// APCS-GNU: varargs_struct
// APCS-GNU: [[VAR_ALIGN:%.*]] = alloca %struct.StructWithVec
-// APCS-GNU: [[AP_NEXT:%.*]] = getelementptr i8, i8* {{%.*}}, i32 16
+// APCS-GNU: [[AP_NEXT:%.*]] = getelementptr inbounds i8, i8* {{%.*}}, i32 16
// APCS-GNU: bitcast %struct.StructWithVec* [[VAR_ALIGN]] to i8*
// APCS-GNU: call void @llvm.memcpy
va_list ap;
diff --git a/clang/test/CodeGen/arm-arguments.c b/clang/test/CodeGen/arm-arguments.c
index b671626939c..ec3e1734b0c 100644
--- a/clang/test/CodeGen/arm-arguments.c
+++ b/clang/test/CodeGen/arm-arguments.c
@@ -159,13 +159,13 @@ struct s30 f30() {}
struct s31 { char x; };
void f31(struct s31 s) { }
// AAPCS: @f31([1 x i32] %s.coerce)
-// AAPCS: %s = alloca %struct.s31, align 4
-// AAPCS: alloca [1 x i32]
-// AAPCS: store [1 x i32] %s.coerce, [1 x i32]*
+// AAPCS: %s = alloca %struct.s31, align 1
+// AAPCS: [[TEMP:%.*]] = alloca [1 x i32], align 4
+// AAPCS: store [1 x i32] %s.coerce, [1 x i32]* [[TEMP]], align 4
// APCS-GNU: @f31([1 x i32] %s.coerce)
-// APCS-GNU: %s = alloca %struct.s31, align 4
-// APCS-GNU: alloca [1 x i32]
-// APCS-GNU: store [1 x i32] %s.coerce, [1 x i32]*
+// APCS-GNU: %s = alloca %struct.s31, align 1
+// APCS-GNU: [[TEMP:%.*]] = alloca [1 x i32], align 4
+// APCS-GNU: store [1 x i32] %s.coerce, [1 x i32]* [[TEMP]], align 4
// PR13562
struct s32 { double x; };
diff --git a/clang/test/CodeGen/arm64-abi-vector.c b/clang/test/CodeGen/arm64-abi-vector.c
index ebf7f511265..29aeadb66da 100644
--- a/clang/test/CodeGen/arm64-abi-vector.c
+++ b/clang/test/CodeGen/arm64-abi-vector.c
@@ -16,7 +16,7 @@ typedef __attribute__(( ext_vector_type(3) )) double __double3;
double varargs_vec_3c(int fixed, ...) {
// CHECK: varargs_vec_3c
// CHECK: alloca <3 x i8>, align 4
-// CHECK: [[AP_NEXT:%.*]] = getelementptr i8, i8* [[AP_CUR:%.*]], i32 8
+// CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[AP_CUR:%.*]], i64 8
// CHECK: bitcast i8* [[AP_CUR]] to <3 x i8>*
va_list ap;
double sum = fixed;
@@ -36,7 +36,7 @@ double test_3c(__char3 *in) {
double varargs_vec_4c(int fixed, ...) {
// CHECK: varargs_vec_4c
// CHECK: alloca <4 x i8>, align 4
-// CHECK: [[AP_NEXT:%.*]] = getelementptr i8, i8* [[AP_CUR:%.*]], i32 8
+// CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[AP_CUR:%.*]], i64 8
// CHECK: bitcast i8* [[AP_CUR]] to <4 x i8>*
va_list ap;
double sum = fixed;
@@ -56,7 +56,7 @@ double test_4c(__char4 *in) {
double varargs_vec_5c(int fixed, ...) {
// CHECK: varargs_vec_5c
// CHECK: alloca <5 x i8>, align 8
-// CHECK: [[AP_NEXT:%.*]] = getelementptr i8, i8* [[AP_CUR:%.*]], i32 8
+// CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[AP_CUR:%.*]], i64 8
// CHECK: bitcast i8* [[AP_CUR]] to <5 x i8>*
va_list ap;
double sum = fixed;
@@ -78,7 +78,7 @@ double varargs_vec_9c(int fixed, ...) {
// CHECK: alloca <9 x i8>, align 16
// CHECK: [[ALIGN:%.*]] = and i64 {{%.*}}, -16
// CHECK: [[AP_ALIGN:%.*]] = inttoptr i64 [[ALIGN]] to i8*
-// CHECK: [[AP_NEXT:%.*]] = getelementptr i8, i8* [[AP_ALIGN]], i32 16
+// CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[AP_ALIGN]], i64 16
// CHECK: bitcast i8* [[AP_ALIGN]] to <9 x i8>*
va_list ap;
double sum = fixed;
@@ -97,10 +97,9 @@ double test_9c(__char9 *in) {
double varargs_vec_19c(int fixed, ...) {
// CHECK: varargs_vec_19c
-// CHECK: [[AP_NEXT:%.*]] = getelementptr i8, i8* [[AP_CUR:%.*]], i32 8
-// CHECK: [[VAR:%.*]] = bitcast i8* [[AP_CUR]] to i8**
-// CHECK: [[VAR2:%.*]] = load i8*, i8** [[VAR]]
-// CHECK: bitcast i8* [[VAR2]] to <19 x i8>*
+// CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[AP_CUR:%.*]], i64 8
+// CHECK: [[VAR:%.*]] = bitcast i8* [[AP_CUR]] to <19 x i8>**
+// CHECK: [[VAR2:%.*]] = load <19 x i8>*, <19 x i8>** [[VAR]]
va_list ap;
double sum = fixed;
va_start(ap, fixed);
@@ -119,7 +118,7 @@ double test_19c(__char19 *in) {
double varargs_vec_3s(int fixed, ...) {
// CHECK: varargs_vec_3s
// CHECK: alloca <3 x i16>, align 8
-// CHECK: [[AP_NEXT:%.*]] = getelementptr i8, i8* [[AP_CUR:%.*]], i32 8
+// CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[AP_CUR:%.*]], i64 8
// CHECK: bitcast i8* [[AP_CUR]] to <3 x i16>*
va_list ap;
double sum = fixed;
@@ -141,7 +140,7 @@ double varargs_vec_5s(int fixed, ...) {
// CHECK: alloca <5 x i16>, align 16
// CHECK: [[ALIGN:%.*]] = and i64 {{%.*}}, -16
// CHECK: [[AP_ALIGN:%.*]] = inttoptr i64 [[ALIGN]] to i8*
-// CHECK: [[AP_NEXT:%.*]] = getelementptr i8, i8* [[AP_ALIGN]], i32 16
+// CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[AP_ALIGN]], i64 16
// CHECK: bitcast i8* [[AP_ALIGN]] to <5 x i16>*
va_list ap;
double sum = fixed;
@@ -163,7 +162,7 @@ double varargs_vec_3i(int fixed, ...) {
// CHECK: alloca <3 x i32>, align 16
// CHECK: [[ALIGN:%.*]] = and i64 {{%.*}}, -16
// CHECK: [[AP_ALIGN:%.*]] = inttoptr i64 [[ALIGN]] to i8*
-// CHECK: [[AP_NEXT:%.*]] = getelementptr i8, i8* [[AP_ALIGN]], i32 16
+// CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[AP_ALIGN]], i64 16
// CHECK: bitcast i8* [[AP_ALIGN]] to <3 x i32>*
va_list ap;
double sum = fixed;
@@ -183,10 +182,9 @@ double test_3i(__int3 *in) {
double varargs_vec_5i(int fixed, ...) {
// CHECK: varargs_vec_5i
// CHECK: alloca <5 x i32>, align 16
-// CHECK: [[AP_NEXT:%.*]] = getelementptr i8, i8* [[AP_CUR:%.*]], i32 8
-// CHECK: [[VAR:%.*]] = bitcast i8* [[AP_CUR]] to i8**
-// CHECK: [[VAR2:%.*]] = load i8*, i8** [[VAR]]
-// CHECK: bitcast i8* [[VAR2]] to <5 x i32>*
+// CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[AP_CUR:%.*]], i64 8
+// CHECK: [[VAR:%.*]] = bitcast i8* [[AP_CUR]] to <5 x i32>**
+// CHECK: [[VAR2:%.*]] = load <5 x i32>*, <5 x i32>** [[VAR]]
va_list ap;
double sum = fixed;
va_start(ap, fixed);
@@ -205,10 +203,9 @@ double test_5i(__int5 *in) {
double varargs_vec_3d(int fixed, ...) {
// CHECK: varargs_vec_3d
// CHECK: alloca <3 x double>, align 16
-// CHECK: [[AP_NEXT:%.*]] = getelementptr i8, i8* [[AP_CUR:%.*]], i32 8
-// CHECK: [[VAR:%.*]] = bitcast i8* [[AP_CUR]] to i8**
-// CHECK: [[VAR2:%.*]] = load i8*, i8** [[VAR]]
-// CHECK: bitcast i8* [[VAR2]] to <3 x double>*
+// CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[AP_CUR:%.*]], i64 8
+// CHECK: [[VAR:%.*]] = bitcast i8* [[AP_CUR]] to <3 x double>**
+// CHECK: [[VAR2:%.*]] = load <3 x double>*, <3 x double>** [[VAR]]
va_list ap;
double sum = fixed;
va_start(ap, fixed);
@@ -230,52 +227,49 @@ double varargs_vec(int fixed, ...) {
double sum = fixed;
va_start(ap, fixed);
__char3 c3 = va_arg(ap, __char3);
-// CHECK: [[AP_NEXT:%.*]] = getelementptr i8, i8* [[AP_CUR:%.*]], i32 8
+// CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[AP_CUR:%.*]], i64 8
// CHECK: bitcast i8* [[AP_CUR]] to <3 x i8>*
sum = sum + c3.x + c3.y;
__char5 c5 = va_arg(ap, __char5);
-// CHECK: [[AP_NEXT:%.*]] = getelementptr i8, i8* [[AP_CUR:%.*]], i32 8
+// CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[AP_CUR:%.*]], i64 8
// CHECK: bitcast i8* [[AP_CUR]] to <5 x i8>*
sum = sum + c5.x + c5.y;
__char9 c9 = va_arg(ap, __char9);
// CHECK: [[ALIGN:%.*]] = and i64 {{%.*}}, -16
// CHECK: [[AP_ALIGN:%.*]] = inttoptr i64 [[ALIGN]] to i8*
-// CHECK: [[AP_NEXT:%.*]] = getelementptr i8, i8* [[AP_ALIGN]], i32 16
+// CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[AP_ALIGN]], i64 16
// CHECK: bitcast i8* [[AP_ALIGN]] to <9 x i8>*
sum = sum + c9.x + c9.y;
__char19 c19 = va_arg(ap, __char19);
-// CHECK: [[AP_NEXT:%.*]] = getelementptr i8, i8* [[AP_CUR:%.*]], i32 8
-// CHECK: [[VAR:%.*]] = bitcast i8* [[AP_CUR]] to i8**
-// CHECK: [[VAR2:%.*]] = load i8*, i8** [[VAR]]
-// CHECK: bitcast i8* [[VAR2]] to <19 x i8>*
+// CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[AP_CUR:%.*]], i64 8
+// CHECK: [[VAR:%.*]] = bitcast i8* [[AP_CUR]] to <19 x i8>**
+// CHECK: [[VAR2:%.*]] = load <19 x i8>*, <19 x i8>** [[VAR]]
sum = sum + c19.x + c19.y;
__short3 s3 = va_arg(ap, __short3);
-// CHECK: [[AP_NEXT:%.*]] = getelementptr i8, i8* [[AP_CUR:%.*]], i32 8
+// CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[AP_CUR:%.*]], i64 8
// CHECK: bitcast i8* [[AP_CUR]] to <3 x i16>*
sum = sum + s3.x + s3.y;
__short5 s5 = va_arg(ap, __short5);
// CHECK: [[ALIGN:%.*]] = and i64 {{%.*}}, -16
// CHECK: [[AP_ALIGN:%.*]] = inttoptr i64 [[ALIGN]] to i8*
-// CHECK: [[AP_NEXT:%.*]] = getelementptr i8, i8* [[AP_ALIGN]], i32 16
+// CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[AP_ALIGN]], i64 16
// CHECK: bitcast i8* [[AP_ALIGN]] to <5 x i16>*
sum = sum + s5.x + s5.y;
__int3 i3 = va_arg(ap, __int3);
// CHECK: [[ALIGN:%.*]] = and i64 {{%.*}}, -16
// CHECK: [[AP_ALIGN:%.*]] = inttoptr i64 [[ALIGN]] to i8*
-// CHECK: [[AP_NEXT:%.*]] = getelementptr i8, i8* [[AP_ALIGN]], i32 16
+// CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[AP_ALIGN]], i64 16
// CHECK: bitcast i8* [[AP_ALIGN]] to <3 x i32>*
sum = sum + i3.x + i3.y;
__int5 i5 = va_arg(ap, __int5);
-// CHECK: [[AP_NEXT:%.*]] = getelementptr i8, i8* [[AP_CUR:%.*]], i32 8
-// CHECK: [[VAR:%.*]] = bitcast i8* [[AP_CUR]] to i8**
-// CHECK: [[VAR2:%.*]] = load i8*, i8** [[VAR]]
-// CHECK: bitcast i8* [[VAR2]] to <5 x i32>*
+// CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[AP_CUR:%.*]], i64 8
+// CHECK: [[VAR:%.*]] = bitcast i8* [[AP_CUR]] to <5 x i32>**
+// CHECK: [[VAR2:%.*]] = load <5 x i32>*, <5 x i32>** [[VAR]]
sum = sum + i5.x + i5.y;
__double3 d3 = va_arg(ap, __double3);
-// CHECK: [[AP_NEXT:%.*]] = getelementptr i8, i8* [[AP_CUR:%.*]], i32 8
-// CHECK: [[VAR:%.*]] = bitcast i8* [[AP_CUR]] to i8**
-// CHECK: [[VAR2:%.*]] = load i8*, i8** [[VAR]]
-// CHECK: bitcast i8* [[VAR2]] to <3 x double>*
+// CHECK: [[AP_NEXT:%.*]] = getelementptr inbounds i8, i8* [[AP_CUR:%.*]], i64 8
+// CHECK: [[VAR:%.*]] = bitcast i8* [[AP_CUR]] to <3 x double>**
+// CHECK: [[VAR2:%.*]] = load <3 x double>*, <3 x double>** [[VAR]]
sum = sum + d3.x + d3.y;
va_end(ap);
return sum;
diff --git a/clang/test/CodeGen/arm64-arguments.c b/clang/test/CodeGen/arm64-arguments.c
index 8b551c4af47..93a1a198955 100644
--- a/clang/test/CodeGen/arm64-arguments.c
+++ b/clang/test/CodeGen/arm64-arguments.c
@@ -117,7 +117,7 @@ struct s30 f30() {}
struct s31 { char x; };
void f31(struct s31 s) { }
// CHECK: define void @f31(i64 %s.coerce)
-// CHECK: %s = alloca %struct.s31, align 8
+// CHECK: %s = alloca %struct.s31, align 1
// CHECK: trunc i64 %s.coerce to i8
// CHECK: store i8 %{{.*}},
@@ -273,10 +273,10 @@ typedef struct s38 s38_no_align;
__attribute__ ((noinline))
int f38(int i, s38_no_align s1, s38_no_align s2) {
// CHECK: define i32 @f38(i32 %i, i64 %s1.coerce, i64 %s2.coerce)
-// CHECK: %s1 = alloca %struct.s38, align 8
-// CHECK: %s2 = alloca %struct.s38, align 8
-// CHECK: store i64 %s1.coerce, i64* %{{.*}}, align 8
-// CHECK: store i64 %s2.coerce, i64* %{{.*}}, align 8
+// CHECK: %s1 = alloca %struct.s38, align 4
+// CHECK: %s2 = alloca %struct.s38, align 4
+// CHECK: store i64 %s1.coerce, i64* %{{.*}}, align 4
+// CHECK: store i64 %s2.coerce, i64* %{{.*}}, align 4
// CHECK: getelementptr inbounds %struct.s38, %struct.s38* %s1, i32 0, i32 0
// CHECK: getelementptr inbounds %struct.s38, %struct.s38* %s2, i32 0, i32 0
// CHECK: getelementptr inbounds %struct.s38, %struct.s38* %s1, i32 0, i32 1
@@ -297,10 +297,10 @@ __attribute__ ((noinline))
int f38_stack(int i, int i2, int i3, int i4, int i5, int i6, int i7, int i8,
int i9, s38_no_align s1, s38_no_align s2) {
// CHECK: define i32 @f38_stack(i32 %i, i32 %i2, i32 %i3, i32 %i4, i32 %i5, i32 %i6, i32 %i7, i32 %i8, i32 %i9, i64 %s1.coerce, i64 %s2.coerce)
-// CHECK: %s1 = alloca %struct.s38, align 8
-// CHECK: %s2 = alloca %struct.s38, align 8
-// CHECK: store i64 %s1.coerce, i64* %{{.*}}, align 8
-// CHECK: store i64 %s2.coerce, i64* %{{.*}}, align 8
+// CHECK: %s1 = alloca %struct.s38, align 4
+// CHECK: %s2 = alloca %struct.s38, align 4
+// CHECK: store i64 %s1.coerce, i64* %{{.*}}, align 4
+// CHECK: store i64 %s2.coerce, i64* %{{.*}}, align 4
// CHECK: getelementptr inbounds %struct.s38, %struct.s38* %s1, i32 0, i32 0
// CHECK: getelementptr inbounds %struct.s38, %struct.s38* %s2, i32 0, i32 0
// CHECK: getelementptr inbounds %struct.s38, %struct.s38* %s1, i32 0, i32 1
@@ -381,10 +381,10 @@ typedef struct s40 s40_no_align;
__attribute__ ((noinline))
int f40(int i, s40_no_align s1, s40_no_align s2) {
// CHECK: define i32 @f40(i32 %i, [2 x i64] %s1.coerce, [2 x i64] %s2.coerce)
-// CHECK: %s1 = alloca %struct.s40, align 8
-// CHECK: %s2 = alloca %struct.s40, align 8
-// CHECK: store [2 x i64] %s1.coerce, [2 x i64]* %{{.*}}, align 8
-// CHECK: store [2 x i64] %s2.coerce, [2 x i64]* %{{.*}}, align 8
+// CHECK: %s1 = alloca %struct.s40, align 4
+// CHECK: %s2 = alloca %struct.s40, align 4
+// CHECK: store [2 x i64] %s1.coerce, [2 x i64]* %{{.*}}, align 4
+// CHECK: store [2 x i64] %s2.coerce, [2 x i64]* %{{.*}}, align 4
// CHECK: getelementptr inbounds %struct.s40, %struct.s40* %s1, i32 0, i32 0
// CHECK: getelementptr inbounds %struct.s40, %struct.s40* %s2, i32 0, i32 0
// CHECK: getelementptr inbounds %struct.s40, %struct.s40* %s1, i32 0, i32 1
@@ -405,10 +405,10 @@ __attribute__ ((noinline))
int f40_stack(int i, int i2, int i3, int i4, int i5, int i6, int i7, int i8,
int i9, s40_no_align s1, s40_no_align s2) {
// CHECK: define i32 @f40_stack(i32 %i, i32 %i2, i32 %i3, i32 %i4, i32 %i5, i32 %i6, i32 %i7, i32 %i8, i32 %i9, [2 x i64] %s1.coerce, [2 x i64] %s2.coerce)
-// CHECK: %s1 = alloca %struct.s40, align 8
-// CHECK: %s2 = alloca %struct.s40, align 8
-// CHECK: store [2 x i64] %s1.coerce, [2 x i64]* %{{.*}}, align 8
-// CHECK: store [2 x i64] %s2.coerce, [2 x i64]* %{{.*}}, align 8
+// CHECK: %s1 = alloca %struct.s40, align 4
+// CHECK: %s2 = alloca %struct.s40, align 4
+// CHECK: store [2 x i64] %s1.coerce, [2 x i64]* %{{.*}}, align 4
+// CHECK: store [2 x i64] %s2.coerce, [2 x i64]* %{{.*}}, align 4
// CHECK: getelementptr inbounds %struct.s40, %struct.s40* %s1, i32 0, i32 0
// CHECK: getelementptr inbounds %struct.s40, %struct.s40* %s2, i32 0, i32 0
// CHECK: getelementptr inbounds %struct.s40, %struct.s40* %s1, i32 0, i32 1
@@ -629,7 +629,7 @@ float test_hfa(int n, ...) {
// CHECK: [[CURLIST:%.*]] = load i8*, i8** [[THELIST]]
// HFA is not indirect, so occupies its full 16 bytes on the stack.
-// CHECK: [[NEXTLIST:%.*]] = getelementptr i8, i8* [[CURLIST]], i32 16
+// CHECK: [[NEXTLIST:%.*]] = getelementptr inbounds i8, i8* [[CURLIST]], i64 16
// CHECK: store i8* [[NEXTLIST]], i8** [[THELIST]]
// CHECK: bitcast i8* [[CURLIST]] to %struct.HFA*
@@ -656,12 +656,11 @@ float test_toobig_hfa(int n, ...) {
// TooBigHFA is not actually an HFA, so gets passed indirectly. Only 8 bytes
// of stack consumed.
-// CHECK: [[NEXTLIST:%.*]] = getelementptr i8, i8* [[CURLIST]], i32 8
+// CHECK: [[NEXTLIST:%.*]] = getelementptr inbounds i8, i8* [[CURLIST]], i64 8
// CHECK: store i8* [[NEXTLIST]], i8** [[THELIST]]
-// CHECK: [[HFAPTRPTR:%.*]] = bitcast i8* [[CURLIST]] to i8**
-// CHECK: [[HFAPTR:%.*]] = load i8*, i8** [[HFAPTRPTR]]
-// CHECK: bitcast i8* [[HFAPTR]] to %struct.TooBigHFA*
+// CHECK: [[HFAPTRPTR:%.*]] = bitcast i8* [[CURLIST]] to %struct.TooBigHFA**
+// CHECK: [[HFAPTR:%.*]] = load %struct.TooBigHFA*, %struct.TooBigHFA** [[HFAPTRPTR]]
__builtin_va_list thelist;
__builtin_va_start(thelist, n);
struct TooBigHFA h = __builtin_va_arg(thelist, struct TooBigHFA);
@@ -679,12 +678,12 @@ int32x4_t test_hva(int n, ...) {
// HVA is not indirect, so occupies its full 16 bytes on the stack. but it
// must be properly aligned.
-// CHECK: [[ALIGN0:%.*]] = getelementptr i8, i8* [[CURLIST]], i32 15
-// CHECK: [[ALIGN1:%.*]] = ptrtoint i8* [[ALIGN0]] to i64
+// CHECK: [[ALIGN0:%.*]] = ptrtoint i8* [[CURLIST]] to i64
+// CHECK: [[ALIGN1:%.*]] = add i64 [[ALIGN0]], 15
// CHECK: [[ALIGN2:%.*]] = and i64 [[ALIGN1]], -16
// CHECK: [[ALIGNED_LIST:%.*]] = inttoptr i64 [[ALIGN2]] to i8*
-// CHECK: [[NEXTLIST:%.*]] = getelementptr i8, i8* [[ALIGNED_LIST]], i32 32
+// CHECK: [[NEXTLIST:%.*]] = getelementptr inbounds i8, i8* [[ALIGNED_LIST]], i64 32
// CHECK: store i8* [[NEXTLIST]], i8** [[THELIST]]
// CHECK: bitcast i8* [[ALIGNED_LIST]] to %struct.HVA*
@@ -705,12 +704,11 @@ int32x4_t test_toobig_hva(int n, ...) {
// TooBigHVA is not actually an HVA, so gets passed indirectly. Only 8 bytes
// of stack consumed.
-// CHECK: [[NEXTLIST:%.*]] = getelementptr i8, i8* [[CURLIST]], i32 8
+// CHECK: [[NEXTLIST:%.*]] = getelementptr inbounds i8, i8* [[CURLIST]], i64 8
// CHECK: store i8* [[NEXTLIST]], i8** [[THELIST]]
-// CHECK: [[HVAPTRPTR:%.*]] = bitcast i8* [[CURLIST]] to i8**
-// CHECK: [[HVAPTR:%.*]] = load i8*, i8** [[HVAPTRPTR]]
-// CHECK: bitcast i8* [[HVAPTR]] to %struct.TooBigHVA*
+// CHECK: [[HVAPTRPTR:%.*]] = bitcast i8* [[CURLIST]] to %struct.TooBigHVA**
+// CHECK: [[HVAPTR:%.*]] = load %struct.TooBigHVA*, %struct.TooBigHVA** [[HVAPTRPTR]]
__builtin_va_list thelist;
__builtin_va_start(thelist, n);
struct TooBigHVA h = __builtin_va_arg(thelist, struct TooBigHVA);
diff --git a/clang/test/CodeGen/arm64-be-hfa-vararg.c b/clang/test/CodeGen/arm64-be-hfa-vararg.c
index 537aab52b3b..c22572459ba 100644
--- a/clang/test/CodeGen/arm64-be-hfa-vararg.c
+++ b/clang/test/CodeGen/arm64-be-hfa-vararg.c
@@ -4,7 +4,15 @@
// A single member HFA must be aligned just like a non-HFA register argument.
double callee(int a, ...) {
-// CHECK: = add i64 %{{.*}}, 8
+// CHECK: [[REGPP:%.*]] = getelementptr inbounds %struct.__va_list, %struct.__va_list* [[VA:%.*]], i32 0, i32 2
+// CHECK: [[REGP:%.*]] = load i8*, i8** [[REGPP]], align 8
+// CHECK: [[OFFSET0:%.*]] = getelementptr inbounds i8, i8* [[REGP]], i32 {{.*}}
+// CHECK: [[OFFSET1:%.*]] = getelementptr inbounds i8, i8* [[OFFSET0]], i64 8
+
+// CHECK: [[MEMPP:%.*]] = getelementptr inbounds %struct.__va_list, %struct.__va_list* [[VA:%.*]], i32 0, i32 0
+// CHECK: [[MEMP:%.*]] = load i8*, i8** [[MEMPP]], align 8
+// CHECK: [[NEXTP:%.*]] = getelementptr inbounds i8, i8* [[MEMP]], i64 8
+// CHECK: store i8* [[NEXTP]], i8** [[MEMPP]], align 8
va_list vl;
va_start(vl, a);
double result = va_arg(vl, struct { double a; }).a;
diff --git a/clang/test/CodeGen/atomic-arm64.c b/clang/test/CodeGen/atomic-arm64.c
index 98f27aba4f7..e871536866c 100644
--- a/clang/test/CodeGen/atomic-arm64.c
+++ b/clang/test/CodeGen/atomic-arm64.c
@@ -21,7 +21,7 @@ extern _Atomic(void*) a_pointer;
extern _Atomic(pointer_pair_t) a_pointer_pair;
extern _Atomic(pointer_quad_t) a_pointer_quad;
-// CHECK: define void @test0()
+// CHECK-LABEL:define void @test0()
// CHECK: [[TEMP:%.*]] = alloca i8, align 1
// CHECK-NEXT: store i8 1, i8* [[TEMP]]
// CHECK-NEXT: [[T0:%.*]] = load i8, i8* [[TEMP]], align 1
@@ -30,7 +30,7 @@ void test0() {
__c11_atomic_store(&a_bool, 1, memory_order_seq_cst);
}
-// CHECK: define void @test1()
+// CHECK-LABEL:define void @test1()
// CHECK: [[TEMP:%.*]] = alloca float, align 4
// CHECK-NEXT: store float 3.000000e+00, float* [[TEMP]]
// CHECK-NEXT: [[T0:%.*]] = bitcast float* [[TEMP]] to i32*
@@ -40,7 +40,7 @@ void test1() {
__c11_atomic_store(&a_float, 3, memory_order_seq_cst);
}
-// CHECK: define void @test2()
+// CHECK-LABEL:define void @test2()
// CHECK: [[TEMP:%.*]] = alloca i8*, align 8
// CHECK-NEXT: store i8* @a_bool, i8** [[TEMP]]
// CHECK-NEXT: [[T0:%.*]] = bitcast i8** [[TEMP]] to i64*
@@ -50,18 +50,18 @@ void test2() {
__c11_atomic_store(&a_pointer, &a_bool, memory_order_seq_cst);
}
-// CHECK: define void @test3(
+// CHECK-LABEL:define void @test3(
// CHECK: [[PAIR:%.*]] = alloca [[PAIR_T:%.*]], align 8
// CHECK-NEXT: [[TEMP:%.*]] = alloca [[PAIR_T]], align 8
// CHECK: llvm.memcpy
// CHECK-NEXT: [[T0:%.*]] = bitcast [[PAIR_T]]* [[TEMP]] to i128*
-// CHECK-NEXT: [[T1:%.*]] = load i128, i128* [[T0]], align 16
+// CHECK-NEXT: [[T1:%.*]] = load i128, i128* [[T0]], align 8
// CHECK-NEXT: store atomic i128 [[T1]], i128* bitcast ([[PAIR_T]]* @a_pointer_pair to i128*) seq_cst, align 16
void test3(pointer_pair_t pair) {
__c11_atomic_store(&a_pointer_pair, pair, memory_order_seq_cst);
}
-// CHECK: define void @test4([[QUAD_T:%.*]]*
+// CHECK-LABEL:define void @test4(
// CHECK: [[TEMP:%.*]] = alloca [[QUAD_T:%.*]], align 8
// CHECK-NEXT: [[T0:%.*]] = bitcast [[QUAD_T]]* [[TEMP]] to i8*
// CHECK-NEXT: [[T1:%.*]] = bitcast [[QUAD_T]]* {{%.*}} to i8*
diff --git a/clang/test/CodeGen/block-byref-aggr.c b/clang/test/CodeGen/block-byref-aggr.c
index 910f6da3ccc..7d146a2d477 100644
--- a/clang/test/CodeGen/block-byref-aggr.c
+++ b/clang/test/CodeGen/block-byref-aggr.c
@@ -16,7 +16,7 @@ void test0() {
// CHECK: [[A:%.*]] = alloca [[BYREF:%.*]], align 8
// CHECK-NEXT: [[TEMP:%.*]] = alloca [[AGG]], align 4
// CHECK: [[RESULT:%.*]] = call i32 @makeAgg()
-// CHECK-NEXT: [[T0:%.*]] = getelementptr [[AGG]], [[AGG]]* [[TEMP]], i32 0, i32 0
+// CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[TEMP]], i32 0, i32 0
// CHECK-NEXT: store i32 [[RESULT]], i32* [[T0]]
// Check that we properly assign into the forwarding pointer.
// CHECK-NEXT: [[A_FORWARDING:%.*]] = getelementptr inbounds [[BYREF]], [[BYREF]]* [[A]], i32 0, i32 1
@@ -42,7 +42,7 @@ void test1() {
// CHECK-NEXT: [[B:%.*]] = alloca [[B_BYREF:%.*]], align 8
// CHECK-NEXT: [[TEMP:%.*]] = alloca [[AGG]], align 4
// CHECK: [[RESULT:%.*]] = call i32 @makeAgg()
-// CHECK-NEXT: [[T0:%.*]] = getelementptr [[AGG]], [[AGG]]* [[TEMP]], i32 0, i32 0
+// CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[TEMP]], i32 0, i32 0
// CHECK-NEXT: store i32 [[RESULT]], i32* [[T0]]
// Check that we properly assign into the forwarding pointer, first for b:
// CHECK-NEXT: [[B_FORWARDING:%.*]] = getelementptr inbounds [[B_BYREF]], [[B_BYREF]]* [[B]], i32 0, i32 1
diff --git a/clang/test/CodeGen/c11atomics-ios.c b/clang/test/CodeGen/c11atomics-ios.c
index a869982b17b..138db696bf9 100644
--- a/clang/test/CodeGen/c11atomics-ios.c
+++ b/clang/test/CodeGen/c11atomics-ios.c
@@ -103,21 +103,21 @@ void testStruct(_Atomic(S) *fp) {
// CHECK-NEXT: [[P:%.*]] = load [[S]]*, [[S]]** [[FP]]
// CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[S]], [[S]]* [[P]], i32 0, i32 0
-// CHECK-NEXT: store i16 1, i16* [[T0]], align 2
+// CHECK-NEXT: store i16 1, i16* [[T0]], align 8
// CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[S]], [[S]]* [[P]], i32 0, i32 1
// CHECK-NEXT: store i16 2, i16* [[T0]], align 2
// CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[S]], [[S]]* [[P]], i32 0, i32 2
-// CHECK-NEXT: store i16 3, i16* [[T0]], align 2
+// CHECK-NEXT: store i16 3, i16* [[T0]], align 4
// CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[S]], [[S]]* [[P]], i32 0, i32 3
// CHECK-NEXT: store i16 4, i16* [[T0]], align 2
__c11_atomic_init(fp, (S){1,2,3,4});
// CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[S]], [[S]]* [[X]], i32 0, i32 0
-// CHECK-NEXT: store i16 1, i16* [[T0]], align 2
+// CHECK-NEXT: store i16 1, i16* [[T0]], align 8
// CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[S]], [[S]]* [[X]], i32 0, i32 1
// CHECK-NEXT: store i16 2, i16* [[T0]], align 2
// CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[S]], [[S]]* [[X]], i32 0, i32 2
-// CHECK-NEXT: store i16 3, i16* [[T0]], align 2
+// CHECK-NEXT: store i16 3, i16* [[T0]], align 4
// CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[S]], [[S]]* [[X]], i32 0, i32 3
// CHECK-NEXT: store i16 4, i16* [[T0]], align 2
_Atomic(S) x = (S){1,2,3,4};
@@ -157,29 +157,29 @@ void testPromotedStruct(_Atomic(PS) *fp) {
// CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* [[T0]], i8 0, i64 8, i32 8, i1 false)
// CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[APS]], [[APS]]* [[P]], i32 0, i32 0
// CHECK-NEXT: [[T1:%.*]] = getelementptr inbounds [[PS]], [[PS]]* [[T0]], i32 0, i32 0
-// CHECK-NEXT: store i16 1, i16* [[T1]], align 2
+// CHECK-NEXT: store i16 1, i16* [[T1]], align 8
// CHECK-NEXT: [[T1:%.*]] = getelementptr inbounds [[PS]], [[PS]]* [[T0]], i32 0, i32 1
// CHECK-NEXT: store i16 2, i16* [[T1]], align 2
// CHECK-NEXT: [[T1:%.*]] = getelementptr inbounds [[PS]], [[PS]]* [[T0]], i32 0, i32 2
-// CHECK-NEXT: store i16 3, i16* [[T1]], align 2
+// CHECK-NEXT: store i16 3, i16* [[T1]], align 4
__c11_atomic_init(fp, (PS){1,2,3});
// CHECK-NEXT: [[T0:%.*]] = bitcast [[APS]]* [[X]] to i8*
// CHECK-NEXT: call void @llvm.memset.p0i8.i32(i8* [[T0]], i8 0, i32 8, i32 8, i1 false)
// CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[APS]], [[APS]]* [[X]], i32 0, i32 0
// CHECK-NEXT: [[T1:%.*]] = getelementptr inbounds [[PS]], [[PS]]* [[T0]], i32 0, i32 0
-// CHECK-NEXT: store i16 1, i16* [[T1]], align 2
+// CHECK-NEXT: store i16 1, i16* [[T1]], align 8
// CHECK-NEXT: [[T1:%.*]] = getelementptr inbounds [[PS]], [[PS]]* [[T0]], i32 0, i32 1
// CHECK-NEXT: store i16 2, i16* [[T1]], align 2
// CHECK-NEXT: [[T1:%.*]] = getelementptr inbounds [[PS]], [[PS]]* [[T0]], i32 0, i32 2
-// CHECK-NEXT: store i16 3, i16* [[T1]], align 2
+// CHECK-NEXT: store i16 3, i16* [[T1]], align 4
_Atomic(PS) x = (PS){1,2,3};
// CHECK-NEXT: [[T0:%.*]] = load [[APS]]*, [[APS]]** [[FP]]
// CHECK-NEXT: [[T1:%.*]] = bitcast [[APS]]* [[T0]] to i64*
// CHECK-NEXT: [[T2:%.*]] = load atomic i64, i64* [[T1]] seq_cst, align 8
// CHECK-NEXT: [[T3:%.*]] = bitcast [[APS]]* [[TMP0]] to i64*
-// CHECK-NEXT: store i64 [[T2]], i64* [[T3]], align 2
+// CHECK-NEXT: store i64 [[T2]], i64* [[T3]], align 8
// CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[APS]], [[APS]]* [[TMP0]], i32 0, i32 0
// CHECK-NEXT: [[T1:%.*]] = bitcast [[PS]]* [[F]] to i8*
// CHECK-NEXT: [[T2:%.*]] = bitcast [[PS]]* [[T0]] to i8*
diff --git a/clang/test/CodeGen/c11atomics.c b/clang/test/CodeGen/c11atomics.c
index d1e4478d7ec..c6eaca6f2a7 100644
--- a/clang/test/CodeGen/c11atomics.c
+++ b/clang/test/CodeGen/c11atomics.c
@@ -254,21 +254,21 @@ void testStruct(_Atomic(S) *fp) {
// CHECK-NEXT: [[P:%.*]] = load [[S]]*, [[S]]** [[FP]]
// CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[S]], [[S]]* [[P]], i32 0, i32 0
-// CHECK-NEXT: store i16 1, i16* [[T0]], align 2
+// CHECK-NEXT: store i16 1, i16* [[T0]], align 8
// CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[S]], [[S]]* [[P]], i32 0, i32 1
// CHECK-NEXT: store i16 2, i16* [[T0]], align 2
// CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[S]], [[S]]* [[P]], i32 0, i32 2
-// CHECK-NEXT: store i16 3, i16* [[T0]], align 2
+// CHECK-NEXT: store i16 3, i16* [[T0]], align 4
// CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[S]], [[S]]* [[P]], i32 0, i32 3
// CHECK-NEXT: store i16 4, i16* [[T0]], align 2
__c11_atomic_init(fp, (S){1,2,3,4});
// CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[S]], [[S]]* [[X]], i32 0, i32 0
-// CHECK-NEXT: store i16 1, i16* [[T0]], align 2
+// CHECK-NEXT: store i16 1, i16* [[T0]], align 8
// CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[S]], [[S]]* [[X]], i32 0, i32 1
// CHECK-NEXT: store i16 2, i16* [[T0]], align 2
// CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[S]], [[S]]* [[X]], i32 0, i32 2
-// CHECK-NEXT: store i16 3, i16* [[T0]], align 2
+// CHECK-NEXT: store i16 3, i16* [[T0]], align 4
// CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[S]], [[S]]* [[X]], i32 0, i32 3
// CHECK-NEXT: store i16 4, i16* [[T0]], align 2
_Atomic(S) x = (S){1,2,3,4};
@@ -310,22 +310,22 @@ void testPromotedStruct(_Atomic(PS) *fp) {
// CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* [[T0]], i8 0, i64 8, i32 8, i1 false)
// CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[APS]], [[APS]]* [[P]], i32 0, i32 0
// CHECK-NEXT: [[T1:%.*]] = getelementptr inbounds [[PS]], [[PS]]* [[T0]], i32 0, i32 0
-// CHECK-NEXT: store i16 1, i16* [[T1]], align 2
+// CHECK-NEXT: store i16 1, i16* [[T1]], align 8
// CHECK-NEXT: [[T1:%.*]] = getelementptr inbounds [[PS]], [[PS]]* [[T0]], i32 0, i32 1
// CHECK-NEXT: store i16 2, i16* [[T1]], align 2
// CHECK-NEXT: [[T1:%.*]] = getelementptr inbounds [[PS]], [[PS]]* [[T0]], i32 0, i32 2
-// CHECK-NEXT: store i16 3, i16* [[T1]], align 2
+// CHECK-NEXT: store i16 3, i16* [[T1]], align 4
__c11_atomic_init(fp, (PS){1,2,3});
// CHECK-NEXT: [[T0:%.*]] = bitcast [[APS]]* [[X]] to i8*
// CHECK-NEXT: call void @llvm.memset.p0i8.i32(i8* [[T0]], i8 0, i32 8, i32 8, i1 false)
// CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[APS]], [[APS]]* [[X]], i32 0, i32 0
// CHECK-NEXT: [[T1:%.*]] = getelementptr inbounds [[PS]], [[PS]]* [[T0]], i32 0, i32 0
-// CHECK-NEXT: store i16 1, i16* [[T1]], align 2
+// CHECK-NEXT: store i16 1, i16* [[T1]], align 8
// CHECK-NEXT: [[T1:%.*]] = getelementptr inbounds [[PS]], [[PS]]* [[T0]], i32 0, i32 1
// CHECK-NEXT: store i16 2, i16* [[T1]], align 2
// CHECK-NEXT: [[T1:%.*]] = getelementptr inbounds [[PS]], [[PS]]* [[T0]], i32 0, i32 2
-// CHECK-NEXT: store i16 3, i16* [[T1]], align 2
+// CHECK-NEXT: store i16 3, i16* [[T1]], align 4
_Atomic(PS) x = (PS){1,2,3};
// CHECK-NEXT: [[T0:%.*]] = load [[APS]]*, [[APS]]** [[FP]]
diff --git a/clang/test/CodeGen/catch-undef-behavior.c b/clang/test/CodeGen/catch-undef-behavior.c
index 013a4a4ccef..739cf89f5be 100644
--- a/clang/test/CodeGen/catch-undef-behavior.c
+++ b/clang/test/CodeGen/catch-undef-behavior.c
@@ -269,7 +269,7 @@ int long_double_int_overflow(long double ld) {
// CHECK-COMMON: %[[INBOUNDS:.*]] = and i1 %[[GE]], %[[LE]]
// CHECK-COMMON-NEXT: br i1 %[[INBOUNDS]]
- // CHECK-UBSAN: store x86_fp80 %[[F]], x86_fp80* %[[ALLOCA:.*]], !nosanitize
+ // CHECK-UBSAN: store x86_fp80 %[[F]], x86_fp80* %[[ALLOCA:.*]], align 16, !nosanitize
// CHECK-UBSAN: %[[ARG:.*]] = ptrtoint x86_fp80* %[[ALLOCA]] to i64
// CHECK-UBSAN: call void @__ubsan_handle_float_cast_overflow(i8* bitcast ({{.*}} @[[LINE_1300]] to i8*), i64 %[[ARG]]
diff --git a/clang/test/CodeGen/exprs.c b/clang/test/CodeGen/exprs.c
index 59afa802b18..f46b5748f23 100644
--- a/clang/test/CodeGen/exprs.c
+++ b/clang/test/CodeGen/exprs.c
@@ -127,9 +127,10 @@ int f11(long X) {
return A[X];
// CHECK: [[Xaddr:%[^ ]+]] = alloca i64, align 8
-// CHECK: load {{.*}}, {{.*}}* [[Xaddr]]
-// CHECK-NEXT: getelementptr inbounds [100 x i32], [100 x i32]* %A, i32 0,
-// CHECK-NEXT: load i32, i32*
+// CHECK: [[A:%.*]] = alloca [100 x i32], align
+// CHECK: [[X:%.*]] = load {{.*}}, {{.*}}* [[Xaddr]]
+// CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [100 x i32], [100 x i32]* [[A]], i64 0, i64 [[X]]
+// CHECK-NEXT: load i32, i32* [[T0]], align 4
}
int f12() {
diff --git a/clang/test/CodeGen/ext-vector-member-alignment.c b/clang/test/CodeGen/ext-vector-member-alignment.c
index 5f044b8a28d..686051e1936 100644
--- a/clang/test/CodeGen/ext-vector-member-alignment.c
+++ b/clang/test/CodeGen/ext-vector-member-alignment.c
@@ -14,14 +14,12 @@ void func(struct struct1* p, float *a, float *b, float c) {
*a = p->position.y;
*b = p->position[0];
p->position[2] = c;
- // FIXME: We should be able to come up with a more aggressive alignment
- // estimate.
// CHECK: @func
- // CHECK: load <4 x float>, <4 x float>* {{%.*}}, align 1
- // CHECK: store <4 x float> {{%.*}}, <4 x float>* {{%.*}}, align 1
- // CHECK: load <4 x float>, <4 x float>* {{%.*}}, align 1
- // CHECK: load <4 x float>, <4 x float>* {{%.*}}, align 1
- // CHECK: load <4 x float>, <4 x float>* {{%.*}}, align 1
- // CHECK: store <4 x float> {{%.*}}, <4 x float>* {{%.*}}, align 1
+ // CHECK: load <4 x float>, <4 x float>* {{%.*}}, align 4
+ // CHECK: store <4 x float> {{%.*}}, <4 x float>* {{%.*}}, align 4
+ // CHECK: load <4 x float>, <4 x float>* {{%.*}}, align 4
+ // CHECK: load <4 x float>, <4 x float>* {{%.*}}, align 4
+ // CHECK: load <4 x float>, <4 x float>* {{%.*}}, align 4
+ // CHECK: store <4 x float> {{%.*}}, <4 x float>* {{%.*}}, align 4
// CHECK: ret void
}
diff --git a/clang/test/CodeGen/mips-varargs.c b/clang/test/CodeGen/mips-varargs.c
index 891769c711d..6608017062a 100644
--- a/clang/test/CodeGen/mips-varargs.c
+++ b/clang/test/CodeGen/mips-varargs.c
@@ -1,9 +1,9 @@
-// RUN: %clang_cc1 -triple mips-unknown-linux -o - -O1 -emit-llvm %s | FileCheck %s -check-prefix=ALL -check-prefix=O32
-// RUN: %clang_cc1 -triple mipsel-unknown-linux -o - -O1 -emit-llvm %s | FileCheck %s -check-prefix=ALL -check-prefix=O32
-// RUN: %clang_cc1 -triple mips64-unknown-linux -o - -O1 -emit-llvm -target-abi n32 %s | FileCheck %s -check-prefix=ALL -check-prefix=N32 -check-prefix=NEW
-// RUN: %clang_cc1 -triple mips64-unknown-linux -o - -O1 -emit-llvm -target-abi n32 %s | FileCheck %s -check-prefix=ALL -check-prefix=N32 -check-prefix=NEW
-// RUN: %clang_cc1 -triple mips64-unknown-linux -o - -O1 -emit-llvm %s | FileCheck %s -check-prefix=ALL -check-prefix=N64 -check-prefix=NEW
-// RUN: %clang_cc1 -triple mips64el-unknown-linux -o - -O1 -emit-llvm %s | FileCheck %s -check-prefix=ALL -check-prefix=N64 -check-prefix=NEW
+// RUN: %clang_cc1 -triple mips-unknown-linux -o - -emit-llvm %s | FileCheck %s -check-prefix=ALL -check-prefix=O32
+// RUN: %clang_cc1 -triple mipsel-unknown-linux -o - -emit-llvm %s | FileCheck %s -check-prefix=ALL -check-prefix=O32
+// RUN: %clang_cc1 -triple mips64-unknown-linux -o - -emit-llvm -target-abi n32 %s | FileCheck %s -check-prefix=ALL -check-prefix=N32 -check-prefix=NEW
+// RUN: %clang_cc1 -triple mips64-unknown-linux -o - -emit-llvm -target-abi n32 %s | FileCheck %s -check-prefix=ALL -check-prefix=N32 -check-prefix=NEW
+// RUN: %clang_cc1 -triple mips64-unknown-linux -o - -emit-llvm %s | FileCheck %s -check-prefix=ALL -check-prefix=N64 -check-prefix=NEW
+// RUN: %clang_cc1 -triple mips64el-unknown-linux -o - -emit-llvm %s | FileCheck %s -check-prefix=ALL -check-prefix=N64 -check-prefix=NEW
#include <stdarg.h>
@@ -24,75 +24,31 @@ int test_i32(char *fmt, ...) {
// O32: %va = alloca i8*, align [[PTRALIGN:4]]
// N32: %va = alloca i8*, align [[PTRALIGN:4]]
// N64: %va = alloca i8*, align [[PTRALIGN:8]]
+// ALL: [[V:%.*]] = alloca i32, align 4
+// NEW: [[PROMOTION_TEMP:%.*]] = alloca i32, align 4
//
-// ALL: [[VA1:%.+]] = bitcast i8** %va to i8*
-// ALL: call void @llvm.va_start(i8* [[VA1]])
-//
-// O32: [[TMP0:%.+]] = bitcast i8** %va to i32**
-// O32: [[AP_CUR:%.+]] = load i32*, i32** [[TMP0]], align [[PTRALIGN]]
-// NEW: [[TMP0:%.+]] = bitcast i8** %va to i64**
-// NEW: [[AP_CUR:%.+]] = load i64*, i64** [[TMP0]], align [[PTRALIGN]]
+// ALL: [[VA:%.+]] = bitcast i8** %va to i8*
+// ALL: call void @llvm.va_start(i8* [[VA]])
+// ALL: [[AP_CUR:%.+]] = load i8*, i8** %va, align [[PTRALIGN]]
+// O32: [[AP_NEXT:%.+]] = getelementptr inbounds i8, i8* [[AP_CUR]], [[INTPTR_T:i32]] [[CHUNKSIZE:4]]
+// NEW: [[AP_NEXT:%.+]] = getelementptr inbounds i8, i8* [[AP_CUR]], [[INTPTR_T:i32|i64]] [[CHUNKSIZE:8]]
//
-// O32: [[AP_NEXT:%.+]] = getelementptr i32, i32* [[AP_CUR]], i32 1
-// NEW: [[AP_NEXT:%.+]] = getelementptr i64, i64* [[AP_CUR]], {{i32|i64}} 1
+// ALL: store i8* [[AP_NEXT]], i8** %va, align [[PTRALIGN]]
//
-// O32: store i32* [[AP_NEXT]], i32** [[TMP0]], align [[PTRALIGN]]
-// NEW: store i64* [[AP_NEXT]], i64** [[TMP0]], align [[PTRALIGN]]
+// O32: [[AP_CAST:%.+]] = bitcast i8* [[AP_CUR]] to [[CHUNK_T:i32]]*
+// O32: [[ARG:%.+]] = load i32, i32* [[AP_CAST]], align [[CHUNKALIGN:4]]
//
-// O32: [[ARG1:%.+]] = load i32, i32* [[AP_CUR]], align 4
-// NEW: [[TMP2:%.+]] = load i64, i64* [[AP_CUR]], align 8
-// NEW: [[ARG1:%.+]] = trunc i64 [[TMP2]] to i32
+// N32: [[AP_CAST:%.+]] = bitcast i8* [[AP_CUR]] to [[CHUNK_T:i64]]*
+// N32: [[TMP:%.+]] = load i64, i64* [[AP_CAST]], align [[CHUNKALIGN:8]]
+// N64: [[AP_CAST:%.+]] = bitcast i8* [[AP_CUR]] to [[CHUNK_T:i64]]*
+// N64: [[TMP:%.+]] = load i64, i64* [[AP_CAST]], align [[CHUNKALIGN:8]]
+// NEW: [[TMP2:%.+]] = trunc i64 [[TMP]] to i32
+// NEW: store i32 [[TMP2]], i32* [[PROMOTION_TEMP]], align 4
+// NEW: [[ARG:%.+]] = load i32, i32* [[PROMOTION_TEMP]], align 4
+// ALL: store i32 [[ARG]], i32* [[V]], align 4
//
-// ALL: call void @llvm.va_end(i8* [[VA1]])
-// ALL: ret i32 [[ARG1]]
-// ALL: }
-
-int test_i32_2args(char *fmt, ...) {
- va_list va;
-
- va_start(va, fmt);
- int v1 = va_arg(va, int);
- int v2 = va_arg(va, int);
- va_end(va);
-
- return v1 + v2;
-}
-
-// ALL-LABEL: define i32 @test_i32_2args(i8*{{.*}} %fmt, ...)
-//
-// ALL: %va = alloca i8*, align [[PTRALIGN]]
// ALL: [[VA1:%.+]] = bitcast i8** %va to i8*
-// ALL: call void @llvm.va_start(i8* [[VA1]])
-//
-// O32: [[TMP0:%.+]] = bitcast i8** %va to i32**
-// O32: [[AP_CUR:%.+]] = load i32*, i32** [[TMP0]], align [[PTRALIGN]]
-// NEW: [[TMP0:%.+]] = bitcast i8** %va to i64**
-// NEW: [[AP_CUR:%.+]] = load i64*, i64** [[TMP0]], align [[PTRALIGN]]
-//
-// O32: [[AP_NEXT1:%.+]] = getelementptr i32, i32* [[AP_CUR]], i32 1
-// NEW: [[AP_NEXT1:%.+]] = getelementptr i64, i64* [[AP_CUR]], [[INTPTR_T:i32|i64]] 1
-//
-// O32: store i32* [[AP_NEXT1]], i32** [[TMP0]], align [[PTRALIGN]]
-// FIXME: N32 optimised this store out. Why only for this ABI?
-// N64: store i64* [[AP_NEXT1]], i64** [[TMP0]], align [[PTRALIGN]]
-//
-// O32: [[ARG1:%.+]] = load i32, i32* [[AP_CUR]], align 4
-// NEW: [[TMP3:%.+]] = load i64, i64* [[AP_CUR]], align 8
-// NEW: [[ARG1:%.+]] = trunc i64 [[TMP3]] to i32
-//
-// O32: [[AP_NEXT2:%.+]] = getelementptr i32, i32* [[AP_CUR]], i32 2
-// NEW: [[AP_NEXT2:%.+]] = getelementptr i64, i64* [[AP_CUR]], [[INTPTR_T]] 2
-//
-// O32: store i32* [[AP_NEXT2]], i32** [[TMP0]], align [[PTRALIGN]]
-// NEW: store i64* [[AP_NEXT2]], i64** [[TMP0]], align [[PTRALIGN]]
-//
-// O32: [[ARG2:%.+]] = load i32, i32* [[AP_NEXT1]], align 4
-// NEW: [[TMP4:%.+]] = load i64, i64* [[AP_NEXT1]], align 8
-// NEW: [[ARG2:%.+]] = trunc i64 [[TMP4]] to i32
-//
// ALL: call void @llvm.va_end(i8* [[VA1]])
-// ALL: [[ADD:%.+]] = add nsw i32 [[ARG2]], [[ARG1]]
-// ALL: ret i32 [[ADD]]
// ALL: }
long long test_i64(char *fmt, ...) {
@@ -108,32 +64,25 @@ long long test_i64(char *fmt, ...) {
// ALL-LABEL: define i64 @test_i64(i8*{{.*}} %fmt, ...)
//
// ALL: %va = alloca i8*, align [[PTRALIGN]]
-// ALL: [[VA1:%.+]] = bitcast i8** %va to i8*
-// ALL: call void @llvm.va_start(i8* [[VA1]])
-//
-// O32: [[TMP0:%.+]] = bitcast i8** %va to i32*
-// O32: [[AP_CUR:%.+]] = load [[INTPTR_T:i32]], i32* [[TMP0]], align [[PTRALIGN]]
-// NEW: [[TMP0:%.+]] = bitcast i8** %va to i64**
-// NEW: [[AP_CUR:%.+]] = load i64*, i64** [[TMP0]], align [[PTRALIGN]]
+// ALL: [[VA:%.+]] = bitcast i8** %va to i8*
+// ALL: call void @llvm.va_start(i8* [[VA]])
+// ALL: [[AP_CUR:%.+]] = load i8*, i8** %va, align [[PTRALIGN]]
//
// i64 is 8-byte aligned, while this is within O32's stack alignment there's no
// guarantee that the offset is still 8-byte aligned after earlier reads.
-// O32: [[PTR1:%.+]] = add i32 [[AP_CUR]], 7
-// O32: [[PTR2:%.+]] = and i32 [[PTR1]], -8
-// O32: [[PTR3:%.+]] = inttoptr [[INTPTR_T]] [[PTR2]] to i64*
-// O32: [[PTR4:%.+]] = inttoptr [[INTPTR_T]] [[PTR2]] to i8*
-//
-// O32: [[AP_NEXT:%.+]] = getelementptr i8, i8* [[PTR4]], [[INTPTR_T]] 8
-// NEW: [[AP_NEXT:%.+]] = getelementptr i64, i64* [[AP_CUR]], [[INTPTR_T:i32|i64]] 1
+// O32: [[TMP1:%.+]] = ptrtoint i8* [[AP_CUR]] to i32
+// O32: [[TMP2:%.+]] = add i32 [[TMP1]], 7
+// O32: [[TMP3:%.+]] = and i32 [[TMP2]], -8
+// O32: [[AP_CUR:%.+]] = inttoptr i32 [[TMP3]] to i8*
//
-// O32: store i8* [[AP_NEXT]], i8** %va, align [[PTRALIGN]]
-// NEW: store i64* [[AP_NEXT]], i64** [[TMP0]], align [[PTRALIGN]]
+// ALL: [[AP_NEXT:%.+]] = getelementptr inbounds i8, i8* [[AP_CUR]], [[INTPTR_T]] 8
+// ALL: store i8* [[AP_NEXT]], i8** %va, align [[PTRALIGN]]
//
-// O32: [[ARG1:%.+]] = load i64, i64* [[PTR3]], align 8
-// NEW: [[ARG1:%.+]] = load i64, i64* [[AP_CUR]], align 8
+// ALL: [[AP_CAST:%.*]] = bitcast i8* [[AP_CUR]] to i64*
+// ALL: [[ARG:%.+]] = load i64, i64* [[AP_CAST]], align 8
//
+// ALL: [[VA1:%.+]] = bitcast i8** %va to i8*
// ALL: call void @llvm.va_end(i8* [[VA1]])
-// ALL: ret i64 [[ARG1]]
// ALL: }
char *test_ptr(char *fmt, ...) {
@@ -148,41 +97,30 @@ char *test_ptr(char *fmt, ...) {
// ALL-LABEL: define i8* @test_ptr(i8*{{.*}} %fmt, ...)
//
-// O32: %va = alloca i8*, align [[PTRALIGN:4]]
-// N32: %va = alloca i8*, align [[PTRALIGN:4]]
-// N64: %va = alloca i8*, align [[PTRALIGN:8]]
-//
-// ALL: [[VA1:%.+]] = bitcast i8** %va to i8*
-// ALL: call void @llvm.va_start(i8* [[VA1]])
+// ALL: %va = alloca i8*, align [[PTRALIGN]]
+// ALL: [[V:%.*]] = alloca i8*, align [[PTRALIGN]]
+// N32: [[AP_CAST:%.+]] = alloca i8*, align 4
+// ALL: [[VA:%.+]] = bitcast i8** %va to i8*
+// ALL: call void @llvm.va_start(i8* [[VA]])
+// ALL: [[AP_CUR:%.+]] = load i8*, i8** %va, align [[PTRALIGN]]
+// ALL: [[AP_NEXT:%.+]] = getelementptr inbounds i8, i8* [[AP_CUR]], [[INTPTR_T]] [[CHUNKSIZE]]
+// ALL: store i8* [[AP_NEXT]], i8** %va, align [[PTRALIGN]]
//
-// O32: [[TMP0:%.+]] = bitcast i8** %va to i8***
-// O32: [[AP_CUR:%.+]] = load i8**, i8*** [[TMP0]], align [[PTRALIGN]]
-// N32 differs because the vararg is not a N32 pointer. It's been promoted to 64-bit.
-// N32: [[TMP0:%.+]] = bitcast i8** %va to i64**
-// N32: [[AP_CUR:%.+]] = load i64*, i64** [[TMP0]], align [[PTRALIGN]]
-// N64: [[TMP0:%.+]] = bitcast i8** %va to i8***
-// N64: [[AP_CUR:%.+]] = load i8**, i8*** [[TMP0]], align [[PTRALIGN]]
-//
-// O32: [[AP_NEXT:%.+]] = getelementptr i8*, i8** [[AP_CUR]], i32 1
-// N32 differs because the vararg is not a N32 pointer. It's been promoted to 64-bit.
-// N32: [[AP_NEXT:%.+]] = getelementptr i64, i64* [[AP_CUR]], {{i32|i64}} 1
-// N64: [[AP_NEXT:%.+]] = getelementptr i8*, i8** [[AP_CUR]], {{i32|i64}} 1
-//
-// O32: store i8** [[AP_NEXT]], i8*** [[TMP0]], align [[PTRALIGN]]
-// N32 differs because the vararg is not a N32 pointer. It's been promoted to 64-bit.
-// N32: store i64* [[AP_NEXT]], i64** [[TMP0]], align [[PTRALIGN]]
-// N64: store i8** [[AP_NEXT]], i8*** [[TMP0]], align [[PTRALIGN]]
-//
-// O32: [[ARG1:%.+]] = load i8*, i8** [[AP_CUR]], align 4
-// N32 differs because the vararg is not a N32 pointer. It's been promoted to
-// 64-bit so we must truncate the excess and bitcast to a N32 pointer.
-// N32: [[TMP2:%.+]] = load i64, i64* [[AP_CUR]], align 8
+// When the chunk size matches the pointer size, this is easy.
+// O32: [[AP_CAST:%.+]] = bitcast i8* [[AP_CUR]] to i8**
+// N64: [[AP_CAST:%.+]] = bitcast i8* [[AP_CUR]] to i8**
+// Otherwise we need a promotion temporary.
+// N32: [[TMP1:%.+]] = bitcast i8* [[AP_CUR]] to i64*
+// N32: [[TMP2:%.+]] = load i64, i64* [[TMP1]], align 8
// N32: [[TMP3:%.+]] = trunc i64 [[TMP2]] to i32
-// N32: [[ARG1:%.+]] = inttoptr i32 [[TMP3]] to i8*
-// N64: [[ARG1:%.+]] = load i8*, i8** [[AP_CUR]], align 8
+// N32: [[PTR:%.+]] = inttoptr i32 [[TMP3]] to i8*
+// N32: store i8* [[PTR]], i8** [[AP_CAST]], align 4
+//
+// ALL: [[ARG:%.+]] = load i8*, i8** [[AP_CAST]], align [[PTRALIGN]]
+// ALL: store i8* [[ARG]], i8** [[V]], align [[PTRALIGN]]
//
+// ALL: [[VA:%.+]] = bitcast i8** %va to i8*
// ALL: call void @llvm.va_end(i8* [[VA1]])
-// ALL: ret i8* [[ARG1]]
// ALL: }
int test_v4i32(char *fmt, ...) {
@@ -198,31 +136,33 @@ int test_v4i32(char *fmt, ...) {
// ALL-LABEL: define i32 @test_v4i32(i8*{{.*}} %fmt, ...)
//
// ALL: %va = alloca i8*, align [[PTRALIGN]]
+// ALL: [[V]] = alloca <4 x i32>, align 16
// ALL: [[VA1:%.+]] = bitcast i8** %va to i8*
// ALL: call void @llvm.va_start(i8* [[VA1]])
-//
-// O32: [[TMP0:%.+]] = bitcast i8** %va to i32*
-// N32: [[TMP0:%.+]] = bitcast i8** %va to i32*
-// N64: [[TMP0:%.+]] = bitcast i8** %va to i64*
-//
-// O32: [[PTR0:%.+]] = load [[INTPTR_T:i32]], i32* [[TMP0]], align [[PTRALIGN]]
-// N32: [[PTR0:%.+]] = load [[INTPTR_T:i32]], i32* [[TMP0]], align [[PTRALIGN]]
-// N64: [[PTR0:%.+]] = load [[INTPTR_T:i64]], i64* [[TMP0]], align [[PTRALIGN]]
+// ALL: [[AP_CUR:%.+]] = load i8*, i8** %va, align [[PTRALIGN]]
//
// Vectors are 16-byte aligned, however the O32 ABI has a maximum alignment of
// 8-bytes since the base of the stack is 8-byte aligned.
-// O32: [[PTR1:%.+]] = add i32 [[PTR0]], 7
-// O32: [[PTR2:%.+]] = and i32 [[PTR1]], -8
+// O32: [[TMP1:%.+]] = ptrtoint i8* [[AP_CUR]] to i32
+// O32: [[TMP2:%.+]] = add i32 [[TMP1]], 7
+// O32: [[TMP3:%.+]] = and i32 [[TMP2]], -8
+// O32: [[AP_CUR:%.+]] = inttoptr i32 [[TMP3]] to i8*
//
-// NEW: [[PTR1:%.+]] = add [[INTPTR_T]] [[PTR0]], 15
-// NEW: [[PTR2:%.+]] = and [[INTPTR_T]] [[PTR1]], -16
+// NEW: [[TMP1:%.+]] = ptrtoint i8* [[AP_CUR]] to [[INTPTR_T]]
+// NEW: [[TMP2:%.+]] = add [[INTPTR_T]] [[TMP1]], 15
+// NEW: [[TMP3:%.+]] = and [[INTPTR_T]] [[TMP2]], -16
+// NEW: [[AP_CUR:%.+]] = inttoptr [[INTPTR_T]] [[TMP3]] to i8*
//
-// ALL: [[PTR3:%.+]] = inttoptr [[INTPTR_T]] [[PTR2]] to <4 x i32>*
-// ALL: [[PTR4:%.+]] = inttoptr [[INTPTR_T]] [[PTR2]] to i8*
-// ALL: [[AP_NEXT:%.+]] = getelementptr i8, i8* [[PTR4]], [[INTPTR_T]] 16
+// ALL: [[AP_NEXT:%.+]] = getelementptr inbounds i8, i8* [[AP_CUR]], [[INTPTR_T]] 16
// ALL: store i8* [[AP_NEXT]], i8** %va, align [[PTRALIGN]]
-// ALL: [[PTR5:%.+]] = load <4 x i32>, <4 x i32>* [[PTR3]], align 16
+//
+// ALL: [[AP_CAST:%.+]] = bitcast i8* [[AP_CUR]] to <4 x i32>*
+// O32: [[ARG:%.+]] = load <4 x i32>, <4 x i32>* [[AP_CAST]], align 8
+// N64: [[ARG:%.+]] = load <4 x i32>, <4 x i32>* [[AP_CAST]], align 16
+// ALL: store <4 x i32> [[ARG]], <4 x i32>* [[V]], align 16
+//
+// ALL: [[VA1:%.+]] = bitcast i8** %va to i8*
// ALL: call void @llvm.va_end(i8* [[VA1]])
-// ALL: [[VECEXT:%.+]] = extractelement <4 x i32> [[PTR5]], i32 0
+// ALL: [[VECEXT:%.+]] = extractelement <4 x i32> {{.*}}, i32 0
// ALL: ret i32 [[VECEXT]]
// ALL: }
diff --git a/clang/test/CodeGen/object-size.c b/clang/test/CodeGen/object-size.c
index 367318d6cc2..e69764087e9 100644
--- a/clang/test/CodeGen/object-size.c
+++ b/clang/test/CodeGen/object-size.c
@@ -15,7 +15,7 @@ int gi, gj;
// CHECK-LABEL: define void @test1
void test1() {
- // CHECK: = call i8* @__strcpy_chk(i8* getelementptr inbounds ([63 x i8], [63 x i8]* @gbuf, i32 0, i64 4), i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str, i32 0, i32 0), i64 59)
+ // CHECK: = call i8* @__strcpy_chk(i8* getelementptr inbounds ([63 x i8], [63 x i8]* @gbuf, i64 0, i64 4), i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str, i32 0, i32 0), i64 59)
strcpy(&gbuf[4], "Hi there");
}
@@ -33,7 +33,7 @@ void test3() {
// CHECK-LABEL: define void @test4
void test4() {
- // CHECK: = call i8* @__strcpy_chk(i8* getelementptr inbounds ([63 x i8], [63 x i8]* @gbuf, i32 0, i64 -1), i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str, i32 0, i32 0), i64 0)
+ // CHECK: = call i8* @__strcpy_chk(i8* getelementptr inbounds ([63 x i8], [63 x i8]* @gbuf, i64 0, i64 -1), i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str, i32 0, i32 0), i64 0)
strcpy((char*)(void*)&gbuf[-1], "Hi there");
}
diff --git a/clang/test/CodeGen/packed-arrays.c b/clang/test/CodeGen/packed-arrays.c
index 993d88e2772..bb742c6f311 100644
--- a/clang/test/CodeGen/packed-arrays.c
+++ b/clang/test/CodeGen/packed-arrays.c
@@ -64,10 +64,12 @@ int f0_b(struct s0 *a) {
return *(a->x + 1);
}
+// Note that 'y' still causes struct s1 to be four-byte aligned.
+
// Note that we are incompatible with GCC on this example.
//
// CHECK-LABEL: define i32 @f1_a
-// CHECK: load i32, i32* %{{.*}}, align 1
+// CHECK: load i32, i32* %{{.*}}, align 4
// CHECK: }
// CHECK-LABEL: define i32 @f1_b
// CHECK: load i32, i32* %{{.*}}, align 4
@@ -79,7 +81,7 @@ int f0_b(struct s0 *a) {
// CHECK: load i32, i32* %{{.*}}, align 4
// CHECK: }
// CHECK-LABEL: define i32 @f1_d
-// CHECK: load i32, i32* %{{.*}}, align 1
+// CHECK: load i32, i32* %{{.*}}, align 4
// CHECK: }
int f1_a(struct s1 *a) {
return a->x[1];
diff --git a/clang/test/CodeGen/packed-structure.c b/clang/test/CodeGen/packed-structure.c
index 8de31d6a81d..7d1183dc5ca 100644
--- a/clang/test/CodeGen/packed-structure.c
+++ b/clang/test/CodeGen/packed-structure.c
@@ -25,7 +25,7 @@ int s0_load_x(struct s0 *a) { return a->x; }
// with align 1 (in 2363.1 at least).
//
// CHECK-FUNCTIONS-LABEL: define i32 @s0_load_y
-// CHECK-FUNCTIONS: [[s0_load_y:%.*]] = load i32, i32* {{.*}}, align 1
+// CHECK-FUNCTIONS: [[s0_load_y:%.*]] = load i32, i32* {{.*}}, align 4
// CHECK-FUNCTIONS: ret i32 [[s0_load_y]]
int s0_load_y(struct s0 *a) { return a->y; }
// CHECK-FUNCTIONS-LABEL: define void @s0_copy
@@ -95,6 +95,6 @@ int s3_1 = __alignof(((struct s3*) 0)->anInt);
// CHECK-FUNCTIONS-LABEL: define i32 @test3(
int test3(struct s3 *ptr) {
// CHECK-FUNCTIONS: [[PTR:%.*]] = getelementptr inbounds {{%.*}}, {{%.*}}* {{%.*}}, i32 0, i32 1
- // CHECK-FUNCTIONS-NEXT: load i32, i32* [[PTR]], align 1
+ // CHECK-FUNCTIONS-NEXT: load i32, i32* [[PTR]], align 2
return ptr->anInt;
}
diff --git a/clang/test/CodeGen/ppc-varargs-struct.c b/clang/test/CodeGen/ppc-varargs-struct.c
index 1c983c0e434..a6ba13799db 100644
--- a/clang/test/CodeGen/ppc-varargs-struct.c
+++ b/clang/test/CodeGen/ppc-varargs-struct.c
@@ -19,89 +19,69 @@ void testva (int n, ...)
// CHECK: bitcast %struct.x* %t to i8*
// CHECK: bitcast %struct.x* %{{[0-9]+}} to i8*
// CHECK: call void @llvm.memcpy
-// CHECK-PPC: [[ARRAYDECAY:%[a-z0-9]+]] = getelementptr inbounds [1 x %struct.__va_list_tag], [1 x %struct.__va_list_tag]* %ap, i32 0, i32 0
-// CHECK-PPC-NEXT: [[GPRPTR:%[a-z0-9]+]] = bitcast %struct.__va_list_tag* [[ARRAYDECAY]] to i8*
-// CHECK-PPC-NEXT: [[ZERO:%[0-9]+]] = ptrtoint i8* [[GPRPTR]] to i32
-// CHECK-PPC-NEXT: [[ONE:%[0-9]+]] = add i32 [[ZERO]], 1
-// CHECK-PPC-NEXT: [[TWO:%[0-9]+]] = inttoptr i32 [[ONE]] to i8*
-// CHECK-PPC-NEXT: [[THREE:%[0-9]+]] = add i32 [[ONE]], 3
-// CHECK-PPC-NEXT: [[FOUR:%[0-9]+]] = inttoptr i32 [[THREE]] to i8**
-// CHECK-PPC-NEXT: [[FIVE:%[0-9]+]] = add i32 [[THREE]], 4
-// CHECK-PPC-NEXT: [[SIX:%[0-9]+]] = inttoptr i32 [[FIVE]] to i8**
-// CHECK-PPC-NEXT: [[GPR:%[a-z0-9]+]] = load i8, i8* [[GPRPTR]]
-// CHECK-PPC-NEXT: [[FPR:%[a-z0-9]+]] = load i8, i8* [[TWO]]
-// CHECK-PPC-NEXT: [[OVERFLOW_AREA:%[a-z_0-9]+]] = load i8*, i8** [[FOUR]]
-// CHECK-PPC-NEXT: [[SEVEN:%[0-9]+]] = ptrtoint i8* [[OVERFLOW_AREA]] to i32
-// CHECK-PPC-NEXT: [[REGSAVE_AREA:%[a-z_0-9]+]] = load i8*, i8** [[SIX]]
-// CHECK-PPC-NEXT: [[EIGHT:%[0-9]+]] = ptrtoint i8* [[REGSAVE_AREA]] to i32
-// CHECK-PPC-NEXT: [[COND:%[a-z0-9]+]] = icmp ult i8 [[GPR]], 8
-// CHECK-PPC-NEXT: [[NINE:%[0-9]+]] = mul i8 [[GPR]], 4
-// CHECK-PPC-NEXT: [[TEN:%[0-9]+]] = sext i8 [[NINE]] to i32
-// CHECK-PPC-NEXT: [[ELEVEN:%[0-9]+]] = add i32 [[EIGHT]], [[TEN]]
-// CHECK-PPC-NEXT: br i1 [[COND]], label [[USING_REGS:%[a-z_0-9]+]], label [[USING_OVERFLOW:%[a-z_0-9]+]]
+
+// CHECK-PPC: [[ARRAYDECAY:%.+]] = getelementptr inbounds [1 x %struct.__va_list_tag], [1 x %struct.__va_list_tag]* %ap, i32 0, i32 0
+// CHECK-PPC-NEXT: [[GPRPTR:%.+]] = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* [[ARRAYDECAY]], i32 0, i32 0
+// CHECK-PPC-NEXT: [[GPR:%.+]] = load i8, i8* [[GPRPTR]], align 4
+// CHECK-PPC-NEXT: [[COND:%.+]] = icmp ult i8 [[GPR]], 8
+// CHECK-PPC-NEXT: br i1 [[COND]], label %[[USING_REGS:[a-z_0-9]+]], label %[[USING_OVERFLOW:[a-z_0-9]+]]
//
-// CHECK-PPC1:[[USING_REGS]]
-// CHECK-PPC: [[TWELVE:%[0-9]+]] = inttoptr i32 [[ELEVEN]] to %struct.x*
-// CHECK-PPC-NEXT: [[THIRTEEN:%[0-9]+]] = add i8 [[GPR]], 1
-// CHECK-PPC-NEXT: store i8 [[THIRTEEN]], i8* [[GPRPTR]]
-// CHECK-PPC-NEXT: br label [[CONT:%[a-z0-9]+]]
+// CHECK-PPC:[[USING_REGS]]
+// CHECK-PPC-NEXT: [[REGSAVE_AREA_P:%.+]] = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* [[ARRAYDECAY]], i32 0, i32 4
+// CHECK-PPC-NEXT: [[REGSAVE_AREA:%.+]] = load i8*, i8** [[REGSAVE_AREA_P]], align 4
+// CHECK-PPC-NEXT: [[OFFSET:%.+]] = mul i8 [[GPR]], 4
+// CHECK-PPC-NEXT: [[RAW_REGADDR:%.+]] = getelementptr inbounds i8, i8* [[REGSAVE_AREA]], i8 [[OFFSET]]
+// CHECK-PPC-NEXT: [[REGADDR:%.+]] = bitcast i8* [[RAW_REGADDR]] to %struct.x**
+// CHECK-PPC-NEXT: [[USED_GPR:%[0-9]+]] = add i8 [[GPR]], 1
+// CHECK-PPC-NEXT: store i8 [[USED_GPR]], i8* [[GPRPTR]], align 4
+// CHECK-PPC-NEXT: br label %[[CONT:[a-z0-9]+]]
//
-// CHECK-PPC1:[[USING_OVERFLOW]]
-// CHECK-PPC: [[FOURTEEN:%[0-9]+]] = inttoptr i32 [[SEVEN]] to %struct.x*
-// CHECK-PPC-NEXT: [[FIFTEEN:%[0-9]+]] = add i32 [[SEVEN]], 4
-// CHECK-PPC-NEXT: [[SIXTEEN:%[0-9]+]] = inttoptr i32 [[FIFTEEN]] to i8*
-// CHECK-PPC-NEXT: store i8* [[SIXTEEN]], i8** [[FOUR]]
-// CHECK-PPC-NEXT: br label [[CONT]]
+// CHECK-PPC:[[USING_OVERFLOW]]
+// CHECK-PPC-NEXT: [[OVERFLOW_AREA_P:%[0-9]+]] = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* [[ARRAYDECAY]], i32 0, i32 3
+// CHECK-PPC-NEXT: [[OVERFLOW_AREA:%.+]] = load i8*, i8** [[OVERFLOW_AREA_P]], align 4
+// CHECK-PPC-NEXT: [[MEMADDR:%.+]] = bitcast i8* [[OVERFLOW_AREA]] to %struct.x**
+// CHECK-PPC-NEXT: [[NEW_OVERFLOW_AREA:%[0-9]+]] = getelementptr inbounds i8, i8* [[OVERFLOW_AREA]], i32 4
+// CHECK-PPC-NEXT: store i8* [[NEW_OVERFLOW_AREA]], i8** [[OVERFLOW_AREA_P]]
+// CHECK-PPC-NEXT: br label %[[CONT]]
//
-// CHECK-PPC1:[[CONT]]
-// CHECK-PPC: [[VAARG_ADDR:%[a-z.0-9]+]] = phi %struct.x* [ [[TWELVE]], [[USING_REGS]] ], [ [[FOURTEEN]], [[USING_OVERFLOW]] ]
-// CHECK-PPC-NEXT: [[AGGRPTR:%[a-z0-9]+]] = bitcast %struct.x* [[VAARG_ADDR]] to i8**
-// CHECK-PPC-NEXT: [[AGGR:%[a-z0-9]+]] = load i8*, i8** [[AGGRPTR]]
-// CHECK-PPC-NEXT: [[SEVENTEEN:%[0-9]+]] = bitcast %struct.x* %t to i8*
-// CHECK-PPC-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[SEVENTEEN]], i8* [[AGGR]], i32 16, i32 8, i1 false)
+// CHECK-PPC:[[CONT]]
+// CHECK-PPC-NEXT: [[VAARG_ADDR:%[a-z.0-9]+]] = phi %struct.x** [ [[REGADDR]], %[[USING_REGS]] ], [ [[MEMADDR]], %[[USING_OVERFLOW]] ]
+// CHECK-PPC-NEXT: [[AGGR:%[a-z0-9]+]] = load %struct.x*, %struct.x** [[VAARG_ADDR]]
+// CHECK-PPC-NEXT: [[DEST:%[0-9]+]] = bitcast %struct.x* %t to i8*
+// CHECK-PPC-NEXT: [[SRC:%.+]] = bitcast %struct.x* [[AGGR]] to i8*
+// CHECK-PPC-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[DEST]], i8* [[SRC]], i32 16, i32 8, i1 false)
int v = va_arg (ap, int);
-// CHECK: ptrtoint i8* %{{[a-z.0-9]*}} to i64
-// CHECK: add i64 %{{[0-9]+}}, 4
-// CHECK: inttoptr i64 %{{[0-9]+}} to i8*
+
+// CHECK: getelementptr inbounds i8, i8* %{{[a-z.0-9]*}}, i64 4
// CHECK: bitcast i8* %{{[0-9]+}} to i32*
-// CHECK-PPC: [[ARRAYDECAY1:%[a-z0-9]+]] = getelementptr inbounds [1 x %struct.__va_list_tag], [1 x %struct.__va_list_tag]* %ap, i32 0, i32 0
-// CHECK-PPC-NEXT: [[GPRPTR1:%[a-z0-9]+]] = bitcast %struct.__va_list_tag* [[ARRAYDECAY1]] to i8*
-// CHECK-PPC-NEXT: [[EIGHTEEN:%[0-9]+]] = ptrtoint i8* [[GPRPTR1]] to i32
-// CHECK-PPC-NEXT: [[NINETEEN:%[0-9]+]] = add i32 [[EIGHTEEN]], 1
-// CHECK-PPC-NEXT: [[TWENTY:%[0-9]+]] = inttoptr i32 [[NINETEEN]] to i8*
-// CHECK-PPC-NEXT: [[TWENTYONE:%[0-9]+]] = add i32 [[NINETEEN]], 3
-// CHECK-PPC-NEXT: [[TWENTYTWO:%[0-9]+]] = inttoptr i32 [[TWENTYONE]] to i8**
-// CHECK-PPC-NEXT: [[TWENTYTHREE:%[0-9]+]] = add i32 [[TWENTYONE]], 4
-// CHECK-PPC-NEXT: [[TWENTYFOUR:%[0-9]+]] = inttoptr i32 [[TWENTYTHREE]] to i8**
-// CHECK-PPC-NEXT: [[GPR1:%[a-z0-9]+]] = load i8, i8* [[GPRPTR1]]
-// CHECK-PPC-NEXT: [[FPR1:%[a-z0-9]+]] = load i8, i8* [[TWENTY]]
-// CHECK-PPC-NEXT: [[OVERFLOW_AREA1:%[a-z_0-9]+]] = load i8*, i8** [[TWENTYTWO]]
-// CHECK-PPC-NEXT: [[TWENTYFIVE:%[0-9]+]] = ptrtoint i8* [[OVERFLOW_AREA1]] to i32
-// CHECK-PPC-NEXT: [[REGSAVE_AREA1:%[a-z_0-9]+]] = load i8*, i8** [[TWENTYFOUR]]
-// CHECK-PPC-NEXT: [[TWENTYSIX:%[0-9]+]] = ptrtoint i8* [[REGSAVE_AREA1]] to i32
-// CHECK-PPC-NEXT: [[COND1:%[a-z0-9]+]] = icmp ult i8 [[GPR1]], 8
-// CHECK-PPC-NEXT: [[TWENTYSEVEN:%[0-9]+]] = mul i8 [[GPR1]], 4
-// CHECK-PPC-NEXT: [[TWENTYEIGHT:%[0-9]+]] = sext i8 [[TWENTYSEVEN]] to i32
-// CHECK-PPC-NEXT: [[TWENTYNINE:%[0-9]+]] = add i32 [[TWENTYSIX]], [[TWENTYEIGHT]]
-// CHECK-PPC-NEXT: br i1 [[COND1]], label [[USING_REGS1:%[.a-z_0-9]+]], label [[USING_OVERFLOW1:%[.a-z_0-9]+]]
+// CHECK-PPC: [[ARRAYDECAY:%[a-z0-9]+]] = getelementptr inbounds [1 x %struct.__va_list_tag], [1 x %struct.__va_list_tag]* %ap, i32 0, i32 0
+// CHECK-PPC-NEXT: [[GPRPTR:%.+]] = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* [[ARRAYDECAY]], i32 0, i32 0
+// CHECK-PPC-NEXT: [[GPR:%.+]] = load i8, i8* [[GPRPTR]], align 4
+// CHECK-PPC-NEXT: [[COND:%.+]] = icmp ult i8 [[GPR]], 8
+// CHECK-PPC-NEXT: br i1 [[COND]], label %[[USING_REGS:.+]], label %[[USING_OVERFLOW:.+]]{{$}}
//
-// CHECK-PPC1:[[USING_REGS1]]:
-// CHECK-PPC: [[THIRTY:%[0-9]+]] = inttoptr i32 [[TWENTYNINE]] to i32*
-// CHECK-PPC-NEXT: [[THIRTYONE:%[0-9]+]] = add i8 [[GPR1]], 1
-// CHECK-PPC-NEXT: store i8 [[THIRTYONE]], i8* [[GPRPTR1]]
-// CHECK-PPC-NEXT: br label [[CONT1:%[a-z0-9]+]]
+// CHECK-PPC:[[USING_REGS]]
+// CHECK-PPC-NEXT: [[REGSAVE_AREA_P:%.+]] = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* [[ARRAYDECAY]], i32 0, i32 4
+// CHECK-PPC-NEXT: [[REGSAVE_AREA:%.+]] = load i8*, i8** [[REGSAVE_AREA_P]], align 4
+// CHECK-PPC-NEXT: [[OFFSET:%.+]] = mul i8 [[GPR]], 4
+// CHECK-PPC-NEXT: [[RAW_REGADDR:%.+]] = getelementptr inbounds i8, i8* [[REGSAVE_AREA]], i8 [[OFFSET]]
+// CHECK-PPC-NEXT: [[REGADDR:%.+]] = bitcast i8* [[RAW_REGADDR]] to i32*
+// CHECK-PPC-NEXT: [[USED_GPR:%[0-9]+]] = add i8 [[GPR]], 1
+// CHECK-PPC-NEXT: store i8 [[USED_GPR]], i8* [[GPRPTR]], align 4
+// CHECK-PPC-NEXT: br label %[[CONT:[a-z0-9]+]]
//
-// CHECK-PPC1:[[USING_OVERFLOW1]]:
-// CHECK-PPC: [[THIRTYTWO:%[0-9]+]] = inttoptr i32 [[TWENTYFIVE]] to i32*
-// CHECK-PPC-NEXT: [[THIRTYTHREE:%[0-9]+]] = add i32 [[TWENTYFIVE]], 4
-// CHECK-PPC-NEXT: [[THIRTYFOUR:%[0-9]+]] = inttoptr i32 [[THIRTYTHREE]] to i8*
-// CHECK-PPC-NEXT: store i8* [[THIRTYFOUR]], i8** [[TWENTYTWO]]
-// CHECK-PPC-NEXT: br label [[CONT1]]
+// CHECK-PPC:[[USING_OVERFLOW]]
+// CHECK-PPC-NEXT: [[OVERFLOW_AREA_P:%[0-9]+]] = getelementptr inbounds %struct.__va_list_tag, %struct.__va_list_tag* [[ARRAYDECAY]], i32 0, i32 3
+// CHECK-PPC-NEXT: [[OVERFLOW_AREA:%.+]] = load i8*, i8** [[OVERFLOW_AREA_P]], align 4
+// CHECK-PPC-NEXT: [[MEMADDR:%.+]] = bitcast i8* [[OVERFLOW_AREA]] to i32*
+// CHECK-PPC-NEXT: [[NEW_OVERFLOW_AREA:%[0-9]+]] = getelementptr inbounds i8, i8* [[OVERFLOW_AREA]], i32 4
+// CHECK-PPC-NEXT: store i8* [[NEW_OVERFLOW_AREA]], i8** [[OVERFLOW_AREA_P]]
+// CHECK-PPC-NEXT: br label %[[CONT]]
//
-// CHECK-PPC1:[[CONT1]]:
-// CHECK-PPC: [[VAARG_ADDR1:%[a-z.0-9]+]] = phi i32* [ [[THIRTY]], [[USING_REGS1]] ], [ [[THIRTYTWO]], [[USING_OVERFLOW1]] ]
-// CHECK-PPC-NEXT: [[THIRTYFIVE:%[0-9]+]] = load i32, i32* [[VAARG_ADDR1]]
+// CHECK-PPC:[[CONT]]
+// CHECK-PPC-NEXT: [[VAARG_ADDR:%[a-z.0-9]+]] = phi i32* [ [[REGADDR]], %[[USING_REGS]] ], [ [[MEMADDR]], %[[USING_OVERFLOW]] ]
+// CHECK-PPC-NEXT: [[THIRTYFIVE:%[0-9]+]] = load i32, i32* [[VAARG_ADDR]]
// CHECK-PPC-NEXT: store i32 [[THIRTYFIVE]], i32* %v, align 4
#ifdef __powerpc64__
diff --git a/clang/test/CodeGen/ppc64-align-struct.c b/clang/test/CodeGen/ppc64-align-struct.c
index 8c4437a38d0..6a04d0cd84f 100644
--- a/clang/test/CodeGen/ppc64-align-struct.c
+++ b/clang/test/CodeGen/ppc64-align-struct.c
@@ -41,18 +41,22 @@ void test6 (int x, struct test6 y)
}
// This case requires run-time realignment of the incoming struct
-// CHECK: define void @test7(i32 signext %x, %struct.test7* byval align 16)
+// CHECK-LABEL: define void @test7(i32 signext %x, %struct.test7* byval align 16)
// CHECK: %y = alloca %struct.test7, align 32
// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64
void test7 (int x, struct test7 y)
{
}
-// CHECK: define void @test1va(%struct.test1* noalias sret %agg.result, i32 signext %x, ...)
+// CHECK-LABEL: define void @test1va(%struct.test1* noalias sret %agg.result, i32 signext %x, ...)
+// CHECK: %y = alloca %struct.test1, align 4
// CHECK: %[[CUR:[^ ]+]] = load i8*, i8** %ap
-// CHECK: %[[NEXT:[^ ]+]] = getelementptr i8, i8* %[[CUR]], i64 8
+// CHECK: %[[NEXT:[^ ]+]] = getelementptr inbounds i8, i8* %[[CUR]], i64 8
// CHECK: store i8* %[[NEXT]], i8** %ap
-// CHECK: bitcast i8* %[[CUR]] to %struct.test1*
+// CHECK: [[T0:%.*]] = bitcast i8* %[[CUR]] to %struct.test1*
+// CHECK: [[DEST:%.*]] = bitcast %struct.test1* %y to i8*
+// CHECK: [[SRC:%.*]] = bitcast %struct.test1* [[T0]] to i8*
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[DEST]], i8* [[SRC]], i64 8, i32 4, i1 false)
struct test1 test1va (int x, ...)
{
struct test1 y;
@@ -63,15 +67,19 @@ struct test1 test1va (int x, ...)
return y;
}
-// CHECK: define void @test2va(%struct.test2* noalias sret %agg.result, i32 signext %x, ...)
+// CHECK-LABEL: define void @test2va(%struct.test2* noalias sret %agg.result, i32 signext %x, ...)
+// CHECK: %y = alloca %struct.test2, align 16
// CHECK: %[[CUR:[^ ]+]] = load i8*, i8** %ap
// CHECK: %[[TMP0:[^ ]+]] = ptrtoint i8* %[[CUR]] to i64
// CHECK: %[[TMP1:[^ ]+]] = add i64 %[[TMP0]], 15
// CHECK: %[[TMP2:[^ ]+]] = and i64 %[[TMP1]], -16
// CHECK: %[[ALIGN:[^ ]+]] = inttoptr i64 %[[TMP2]] to i8*
-// CHECK: %[[NEXT:[^ ]+]] = getelementptr i8, i8* %[[ALIGN]], i64 16
+// CHECK: %[[NEXT:[^ ]+]] = getelementptr inbounds i8, i8* %[[ALIGN]], i64 16
// CHECK: store i8* %[[NEXT]], i8** %ap
-// CHECK: bitcast i8* %[[ALIGN]] to %struct.test2*
+// CHECK: [[T0:%.*]] = bitcast i8* %[[ALIGN]] to %struct.test2*
+// CHECK: [[DEST:%.*]] = bitcast %struct.test2* %y to i8*
+// CHECK: [[SRC:%.*]] = bitcast %struct.test2* [[T0]] to i8*
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[DEST]], i8* [[SRC]], i64 16, i32 16, i1 false)
struct test2 test2va (int x, ...)
{
struct test2 y;
@@ -82,15 +90,19 @@ struct test2 test2va (int x, ...)
return y;
}
-// CHECK: define void @test3va(%struct.test3* noalias sret %agg.result, i32 signext %x, ...)
+// CHECK-LABEL: define void @test3va(%struct.test3* noalias sret %agg.result, i32 signext %x, ...)
+// CHECK: %y = alloca %struct.test3, align 32
// CHECK: %[[CUR:[^ ]+]] = load i8*, i8** %ap
// CHECK: %[[TMP0:[^ ]+]] = ptrtoint i8* %[[CUR]] to i64
// CHECK: %[[TMP1:[^ ]+]] = add i64 %[[TMP0]], 15
// CHECK: %[[TMP2:[^ ]+]] = and i64 %[[TMP1]], -16
// CHECK: %[[ALIGN:[^ ]+]] = inttoptr i64 %[[TMP2]] to i8*
-// CHECK: %[[NEXT:[^ ]+]] = getelementptr i8, i8* %[[ALIGN]], i64 32
+// CHECK: %[[NEXT:[^ ]+]] = getelementptr inbounds i8, i8* %[[ALIGN]], i64 32
// CHECK: store i8* %[[NEXT]], i8** %ap
-// CHECK: bitcast i8* %[[ALIGN]] to %struct.test3*
+// CHECK: [[T0:%.*]] = bitcast i8* %[[ALIGN]] to %struct.test3*
+// CHECK: [[DEST:%.*]] = bitcast %struct.test3* %y to i8*
+// CHECK: [[SRC:%.*]] = bitcast %struct.test3* [[T0]] to i8*
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[DEST]], i8* [[SRC]], i64 32, i32 16, i1 false)
struct test3 test3va (int x, ...)
{
struct test3 y;
@@ -101,11 +113,15 @@ struct test3 test3va (int x, ...)
return y;
}
-// CHECK: define void @test4va(%struct.test4* noalias sret %agg.result, i32 signext %x, ...)
+// CHECK-LABEL: define void @test4va(%struct.test4* noalias sret %agg.result, i32 signext %x, ...)
+// CHECK: %y = alloca %struct.test4, align 4
// CHECK: %[[CUR:[^ ]+]] = load i8*, i8** %ap
-// CHECK: %[[NEXT:[^ ]+]] = getelementptr i8, i8* %[[CUR]], i64 16
+// CHECK: %[[NEXT:[^ ]+]] = getelementptr inbounds i8, i8* %[[CUR]], i64 16
// CHECK: store i8* %[[NEXT]], i8** %ap
-// CHECK: bitcast i8* %[[CUR]] to %struct.test4*
+// CHECK: [[T0:%.*]] = bitcast i8* %[[CUR]] to %struct.test4*
+// CHECK: [[DEST:%.*]] = bitcast %struct.test4* %y to i8*
+// CHECK: [[SRC:%.*]] = bitcast %struct.test4* [[T0]] to i8*
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[DEST]], i8* [[SRC]], i64 12, i32 4, i1 false)
struct test4 test4va (int x, ...)
{
struct test4 y;
@@ -116,11 +132,15 @@ struct test4 test4va (int x, ...)
return y;
}
-// CHECK: define void @testva_longdouble(%struct.test_longdouble* noalias sret %agg.result, i32 signext %x, ...)
+// CHECK-LABEL: define void @testva_longdouble(%struct.test_longdouble* noalias sret %agg.result, i32 signext %x, ...)
+// CHECK: %y = alloca %struct.test_longdouble, align 16
// CHECK: %[[CUR:[^ ]+]] = load i8*, i8** %ap
-// CHECK: %[[NEXT:[^ ]+]] = getelementptr i8, i8* %[[CUR]], i64 16
+// CHECK: %[[NEXT:[^ ]+]] = getelementptr inbounds i8, i8* %[[CUR]], i64 16
// CHECK: store i8* %[[NEXT]], i8** %ap
-// CHECK: bitcast i8* %[[CUR]] to %struct.test_longdouble*
+// CHECK: [[T0:%.*]] = bitcast i8* %[[CUR]] to %struct.test_longdouble*
+// CHECK: [[DEST:%.*]] = bitcast %struct.test_longdouble* %y to i8*
+// CHECK: [[SRC:%.*]] = bitcast %struct.test_longdouble* [[T0]] to i8*
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[DEST]], i8* [[SRC]], i64 16, i32 8, i1 false)
struct test_longdouble { long double x; };
struct test_longdouble testva_longdouble (int x, ...)
{
@@ -132,15 +152,19 @@ struct test_longdouble testva_longdouble (int x, ...)
return y;
}
-// CHECK: define void @testva_vector(%struct.test_vector* noalias sret %agg.result, i32 signext %x, ...)
+// CHECK-LABEL: define void @testva_vector(%struct.test_vector* noalias sret %agg.result, i32 signext %x, ...)
+// CHECK: %y = alloca %struct.test_vector, align 16
// CHECK: %[[CUR:[^ ]+]] = load i8*, i8** %ap
// CHECK: %[[TMP0:[^ ]+]] = ptrtoint i8* %[[CUR]] to i64
// CHECK: %[[TMP1:[^ ]+]] = add i64 %[[TMP0]], 15
// CHECK: %[[TMP2:[^ ]+]] = and i64 %[[TMP1]], -16
// CHECK: %[[ALIGN:[^ ]+]] = inttoptr i64 %[[TMP2]] to i8*
-// CHECK: %[[NEXT:[^ ]+]] = getelementptr i8, i8* %[[ALIGN]], i64 16
+// CHECK: %[[NEXT:[^ ]+]] = getelementptr inbounds i8, i8* %[[ALIGN]], i64 16
// CHECK: store i8* %[[NEXT]], i8** %ap
-// CHECK: bitcast i8* %[[ALIGN]] to %struct.test_vector*
+// CHECK: [[T0:%.*]] = bitcast i8* %[[ALIGN]] to %struct.test_vector*
+// CHECK: [[DEST:%.*]] = bitcast %struct.test_vector* %y to i8*
+// CHECK: [[SRC:%.*]] = bitcast %struct.test_vector* [[T0]] to i8*
+// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[DEST]], i8* [[SRC]], i64 16, i32 16, i1 false)
struct test_vector { vector int x; };
struct test_vector testva_vector (int x, ...)
{
diff --git a/clang/test/CodeGen/ppc64-complex-parms.c b/clang/test/CodeGen/ppc64-complex-parms.c
index f5583a0742d..3f2a0c21420 100644
--- a/clang/test/CodeGen/ppc64-complex-parms.c
+++ b/clang/test/CodeGen/ppc64-complex-parms.c
@@ -62,10 +62,10 @@ void bar_float(void) {
// CHECK: %[[VAR3:[A-Za-z0-9.]+]] = getelementptr inbounds { float, float }, { float, float }* %[[VAR1]], i32 0, i32 1
// CHECK: store float 2.000000e+00, float* %[[VAR2]]
// CHECK: store float -2.500000e+00, float* %[[VAR3]]
-// CHECK: %[[VAR4:[A-Za-z0-9.]+]] = getelementptr { float, float }, { float, float }* %[[VAR1]], i32 0, i32 0
-// CHECK: %[[VAR5:[A-Za-z0-9.]+]] = load float, float* %[[VAR4]], align 1
-// CHECK: %[[VAR6:[A-Za-z0-9.]+]] = getelementptr { float, float }, { float, float }* %[[VAR1]], i32 0, i32 1
-// CHECK: %[[VAR7:[A-Za-z0-9.]+]] = load float, float* %[[VAR6]], align 1
+// CHECK: %[[VAR4:[A-Za-z0-9.]+]] = getelementptr inbounds { float, float }, { float, float }* %[[VAR1]], i32 0, i32 0
+// CHECK: %[[VAR5:[A-Za-z0-9.]+]] = load float, float* %[[VAR4]], align 4
+// CHECK: %[[VAR6:[A-Za-z0-9.]+]] = getelementptr inbounds { float, float }, { float, float }* %[[VAR1]], i32 0, i32 1
+// CHECK: %[[VAR7:[A-Za-z0-9.]+]] = load float, float* %[[VAR6]], align 4
// CHECK: %{{[A-Za-z0-9.]+}} = call float @foo_float(float %[[VAR5]], float %[[VAR7]])
void bar_double(void) {
@@ -78,10 +78,10 @@ void bar_double(void) {
// CHECK: %[[VAR13:[A-Za-z0-9.]+]] = getelementptr inbounds { double, double }, { double, double }* %[[VAR11]], i32 0, i32 1
// CHECK: store double 2.000000e+00, double* %[[VAR12]]
// CHECK: store double -2.500000e+00, double* %[[VAR13]]
-// CHECK: %[[VAR14:[A-Za-z0-9.]+]] = getelementptr { double, double }, { double, double }* %[[VAR11]], i32 0, i32 0
-// CHECK: %[[VAR15:[A-Za-z0-9.]+]] = load double, double* %[[VAR14]], align 1
-// CHECK: %[[VAR16:[A-Za-z0-9.]+]] = getelementptr { double, double }, { double, double }* %[[VAR11]], i32 0, i32 1
-// CHECK: %[[VAR17:[A-Za-z0-9.]+]] = load double, double* %[[VAR16]], align 1
+// CHECK: %[[VAR14:[A-Za-z0-9.]+]] = getelementptr inbounds { double, double }, { double, double }* %[[VAR11]], i32 0, i32 0
+// CHECK: %[[VAR15:[A-Za-z0-9.]+]] = load double, double* %[[VAR14]], align 8
+// CHECK: %[[VAR16:[A-Za-z0-9.]+]] = getelementptr inbounds { double, double }, { double, double }* %[[VAR11]], i32 0, i32 1
+// CHECK: %[[VAR17:[A-Za-z0-9.]+]] = load double, double* %[[VAR16]], align 8
// CHECK: %{{[A-Za-z0-9.]+}} = call double @foo_double(double %[[VAR15]], double %[[VAR17]])
void bar_long_double(void) {
@@ -94,10 +94,10 @@ void bar_long_double(void) {
// CHECK: %[[VAR23:[A-Za-z0-9.]+]] = getelementptr inbounds { ppc_fp128, ppc_fp128 }, { ppc_fp128, ppc_fp128 }* %[[VAR21]], i32 0, i32 1
// CHECK: store ppc_fp128 0xM40000000000000000000000000000000, ppc_fp128* %[[VAR22]]
// CHECK: store ppc_fp128 0xMC0040000000000000000000000000000, ppc_fp128* %[[VAR23]]
-// CHECK: %[[VAR24:[A-Za-z0-9.]+]] = getelementptr { ppc_fp128, ppc_fp128 }, { ppc_fp128, ppc_fp128 }* %[[VAR21]], i32 0, i32 0
-// CHECK: %[[VAR25:[A-Za-z0-9.]+]] = load ppc_fp128, ppc_fp128* %[[VAR24]], align 1
-// CHECK: %[[VAR26:[A-Za-z0-9.]+]] = getelementptr { ppc_fp128, ppc_fp128 }, { ppc_fp128, ppc_fp128 }* %[[VAR21]], i32 0, i32 1
-// CHECK: %[[VAR27:[A-Za-z0-9.]+]] = load ppc_fp128, ppc_fp128* %[[VAR26]], align 1
+// CHECK: %[[VAR24:[A-Za-z0-9.]+]] = getelementptr inbounds { ppc_fp128, ppc_fp128 }, { ppc_fp128, ppc_fp128 }* %[[VAR21]], i32 0, i32 0
+// CHECK: %[[VAR25:[A-Za-z0-9.]+]] = load ppc_fp128, ppc_fp128* %[[VAR24]], align 16
+// CHECK: %[[VAR26:[A-Za-z0-9.]+]] = getelementptr inbounds { ppc_fp128, ppc_fp128 }, { ppc_fp128, ppc_fp128 }* %[[VAR21]], i32 0, i32 1
+// CHECK: %[[VAR27:[A-Za-z0-9.]+]] = load ppc_fp128, ppc_fp128* %[[VAR26]], align 16
// CHECK: %{{[A-Za-z0-9.]+}} = call ppc_fp128 @foo_long_double(ppc_fp128 %[[VAR25]], ppc_fp128 %[[VAR27]])
void bar_int(void) {
@@ -110,10 +110,10 @@ void bar_int(void) {
// CHECK: %[[VAR33:[A-Za-z0-9.]+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* %[[VAR31]], i32 0, i32 1
// CHECK: store i32 2, i32* %[[VAR32]]
// CHECK: store i32 -3, i32* %[[VAR33]]
-// CHECK: %[[VAR34:[A-Za-z0-9.]+]] = getelementptr { i32, i32 }, { i32, i32 }* %[[VAR31]], i32 0, i32 0
-// CHECK: %[[VAR35:[A-Za-z0-9.]+]] = load i32, i32* %[[VAR34]], align 1
-// CHECK: %[[VAR36:[A-Za-z0-9.]+]] = getelementptr { i32, i32 }, { i32, i32 }* %[[VAR31]], i32 0, i32 1
-// CHECK: %[[VAR37:[A-Za-z0-9.]+]] = load i32, i32* %[[VAR36]], align 1
+// CHECK: %[[VAR34:[A-Za-z0-9.]+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* %[[VAR31]], i32 0, i32 0
+// CHECK: %[[VAR35:[A-Za-z0-9.]+]] = load i32, i32* %[[VAR34]], align 4
+// CHECK: %[[VAR36:[A-Za-z0-9.]+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* %[[VAR31]], i32 0, i32 1
+// CHECK: %[[VAR37:[A-Za-z0-9.]+]] = load i32, i32* %[[VAR36]], align 4
// CHECK: %{{[A-Za-z0-9.]+}} = call signext i32 @foo_int(i32 %[[VAR35]], i32 %[[VAR37]])
void bar_short(void) {
@@ -126,10 +126,10 @@ void bar_short(void) {
// CHECK: %[[VAR43:[A-Za-z0-9.]+]] = getelementptr inbounds { i16, i16 }, { i16, i16 }* %[[VAR41]], i32 0, i32 1
// CHECK: store i16 2, i16* %[[VAR42]]
// CHECK: store i16 -3, i16* %[[VAR43]]
-// CHECK: %[[VAR44:[A-Za-z0-9.]+]] = getelementptr { i16, i16 }, { i16, i16 }* %[[VAR41]], i32 0, i32 0
-// CHECK: %[[VAR45:[A-Za-z0-9.]+]] = load i16, i16* %[[VAR44]], align 1
-// CHECK: %[[VAR46:[A-Za-z0-9.]+]] = getelementptr { i16, i16 }, { i16, i16 }* %[[VAR41]], i32 0, i32 1
-// CHECK: %[[VAR47:[A-Za-z0-9.]+]] = load i16, i16* %[[VAR46]], align 1
+// CHECK: %[[VAR44:[A-Za-z0-9.]+]] = getelementptr inbounds { i16, i16 }, { i16, i16 }* %[[VAR41]], i32 0, i32 0
+// CHECK: %[[VAR45:[A-Za-z0-9.]+]] = load i16, i16* %[[VAR44]], align 2
+// CHECK: %[[VAR46:[A-Za-z0-9.]+]] = getelementptr inbounds { i16, i16 }, { i16, i16 }* %[[VAR41]], i32 0, i32 1
+// CHECK: %[[VAR47:[A-Za-z0-9.]+]] = load i16, i16* %[[VAR46]], align 2
// CHECK: %{{[A-Za-z0-9.]+}} = call signext i16 @foo_short(i16 %[[VAR45]], i16 %[[VAR47]])
void bar_char(void) {
@@ -142,9 +142,9 @@ void bar_char(void) {
// CHECK: %[[VAR53:[A-Za-z0-9.]+]] = getelementptr inbounds { i8, i8 }, { i8, i8 }* %[[VAR51]], i32 0, i32 1
// CHECK: store i8 2, i8* %[[VAR52]]
// CHECK: store i8 -3, i8* %[[VAR53]]
-// CHECK: %[[VAR54:[A-Za-z0-9.]+]] = getelementptr { i8, i8 }, { i8, i8 }* %[[VAR51]], i32 0, i32 0
+// CHECK: %[[VAR54:[A-Za-z0-9.]+]] = getelementptr inbounds { i8, i8 }, { i8, i8 }* %[[VAR51]], i32 0, i32 0
// CHECK: %[[VAR55:[A-Za-z0-9.]+]] = load i8, i8* %[[VAR54]], align 1
-// CHECK: %[[VAR56:[A-Za-z0-9.]+]] = getelementptr { i8, i8 }, { i8, i8 }* %[[VAR51]], i32 0, i32 1
+// CHECK: %[[VAR56:[A-Za-z0-9.]+]] = getelementptr inbounds { i8, i8 }, { i8, i8 }* %[[VAR51]], i32 0, i32 1
// CHECK: %[[VAR57:[A-Za-z0-9.]+]] = load i8, i8* %[[VAR56]], align 1
// CHECK: %{{[A-Za-z0-9.]+}} = call signext i8 @foo_char(i8 %[[VAR55]], i8 %[[VAR57]])
@@ -158,10 +158,10 @@ void bar_long(void) {
// CHECK: %[[VAR63:[A-Za-z0-9.]+]] = getelementptr inbounds { i64, i64 }, { i64, i64 }* %[[VAR61]], i32 0, i32 1
// CHECK: store i64 2, i64* %[[VAR62]]
// CHECK: store i64 -3, i64* %[[VAR63]]
-// CHECK: %[[VAR64:[A-Za-z0-9.]+]] = getelementptr { i64, i64 }, { i64, i64 }* %[[VAR61]], i32 0, i32 0
-// CHECK: %[[VAR65:[A-Za-z0-9.]+]] = load i64, i64* %[[VAR64]], align 1
-// CHECK: %[[VAR66:[A-Za-z0-9.]+]] = getelementptr { i64, i64 }, { i64, i64 }* %[[VAR61]], i32 0, i32 1
-// CHECK: %[[VAR67:[A-Za-z0-9.]+]] = load i64, i64* %[[VAR66]], align 1
+// CHECK: %[[VAR64:[A-Za-z0-9.]+]] = getelementptr inbounds { i64, i64 }, { i64, i64 }* %[[VAR61]], i32 0, i32 0
+// CHECK: %[[VAR65:[A-Za-z0-9.]+]] = load i64, i64* %[[VAR64]], align 8
+// CHECK: %[[VAR66:[A-Za-z0-9.]+]] = getelementptr inbounds { i64, i64 }, { i64, i64 }* %[[VAR61]], i32 0, i32 1
+// CHECK: %[[VAR67:[A-Za-z0-9.]+]] = load i64, i64* %[[VAR66]], align 8
// CHECK: %{{[A-Za-z0-9.]+}} = call i64 @foo_long(i64 %[[VAR65]], i64 %[[VAR67]])
void bar_long_long(void) {
@@ -174,10 +174,10 @@ void bar_long_long(void) {
// CHECK: %[[VAR73:[A-Za-z0-9.]+]] = getelementptr inbounds { i64, i64 }, { i64, i64 }* %[[VAR71]], i32 0, i32 1
// CHECK: store i64 2, i64* %[[VAR72]]
// CHECK: store i64 -3, i64* %[[VAR73]]
-// CHECK: %[[VAR74:[A-Za-z0-9.]+]] = getelementptr { i64, i64 }, { i64, i64 }* %[[VAR71]], i32 0, i32 0
-// CHECK: %[[VAR75:[A-Za-z0-9.]+]] = load i64, i64* %[[VAR74]], align 1
-// CHECK: %[[VAR76:[A-Za-z0-9.]+]] = getelementptr { i64, i64 }, { i64, i64 }* %[[VAR71]], i32 0, i32 1
-// CHECK: %[[VAR77:[A-Za-z0-9.]+]] = load i64, i64* %[[VAR76]], align 1
+// CHECK: %[[VAR74:[A-Za-z0-9.]+]] = getelementptr inbounds { i64, i64 }, { i64, i64 }* %[[VAR71]], i32 0, i32 0
+// CHECK: %[[VAR75:[A-Za-z0-9.]+]] = load i64, i64* %[[VAR74]], align 8
+// CHECK: %[[VAR76:[A-Za-z0-9.]+]] = getelementptr inbounds { i64, i64 }, { i64, i64 }* %[[VAR71]], i32 0, i32 1
+// CHECK: %[[VAR77:[A-Za-z0-9.]+]] = load i64, i64* %[[VAR76]], align 8
// CHECK: %{{[A-Za-z0-9.]+}} = call i64 @foo_long_long(i64 %[[VAR75]], i64 %[[VAR77]])
// CHECK: attributes [[NUW]] = { nounwind{{.*}} }
diff --git a/clang/test/CodeGen/ppc64-struct-onefloat.c b/clang/test/CodeGen/ppc64-struct-onefloat.c
index 534e5116f9b..efc6fe9d8d6 100644
--- a/clang/test/CodeGen/ppc64-struct-onefloat.c
+++ b/clang/test/CodeGen/ppc64-struct-onefloat.c
@@ -13,15 +13,15 @@ void bar(Sf a, Sd b, SSf d, SSd e) {}
// CHECK: %b = alloca %struct.s2, align 8
// CHECK: %d = alloca %struct.s4, align 4
// CHECK: %e = alloca %struct.s5, align 8
-// CHECK: %{{[a-zA-Z0-9.]+}} = getelementptr %struct.s1, %struct.s1* %a, i32 0, i32 0
+// CHECK: %{{[a-zA-Z0-9.]+}} = getelementptr inbounds %struct.s1, %struct.s1* %a, i32 0, i32 0
// CHECK: store float %a.coerce, float* %{{[a-zA-Z0-9.]+}}, align 4
-// CHECK: %{{[a-zA-Z0-9.]+}} = getelementptr %struct.s2, %struct.s2* %b, i32 0, i32 0
+// CHECK: %{{[a-zA-Z0-9.]+}} = getelementptr inbounds %struct.s2, %struct.s2* %b, i32 0, i32 0
// CHECK: store double %b.coerce, double* %{{[a-zA-Z0-9.]+}}, align 8
-// CHECK: %{{[a-zA-Z0-9.]+}} = getelementptr %struct.s4, %struct.s4* %d, i32 0, i32 0
-// CHECK: %{{[a-zA-Z0-9.]+}} = getelementptr %struct.s1, %struct.s1* %{{[a-zA-Z0-9.]+}}, i32 0, i32 0
+// CHECK: %{{[a-zA-Z0-9.]+}} = getelementptr inbounds %struct.s4, %struct.s4* %d, i32 0, i32 0
+// CHECK: %{{[a-zA-Z0-9.]+}} = getelementptr inbounds %struct.s1, %struct.s1* %{{[a-zA-Z0-9.]+}}, i32 0, i32 0
// CHECK: store float %d.coerce, float* %{{[a-zA-Z0-9.]+}}, align 4
-// CHECK: %{{[a-zA-Z0-9.]+}} = getelementptr %struct.s5, %struct.s5* %e, i32 0, i32 0
-// CHECK: %{{[a-zA-Z0-9.]+}} = getelementptr %struct.s2, %struct.s2* %{{[a-zA-Z0-9.]+}}, i32 0, i32 0
+// CHECK: %{{[a-zA-Z0-9.]+}} = getelementptr inbounds %struct.s5, %struct.s5* %e, i32 0, i32 0
+// CHECK: %{{[a-zA-Z0-9.]+}} = getelementptr inbounds %struct.s2, %struct.s2* %{{[a-zA-Z0-9.]+}}, i32 0, i32 0
// CHECK: store double %e.coerce, double* %{{[a-zA-Z0-9.]+}}, align 8
// CHECK: ret void
@@ -35,15 +35,15 @@ void foo(void)
}
// CHECK-LABEL: define void @foo
-// CHECK: %{{[a-zA-Z0-9.]+}} = getelementptr %struct.s1, %struct.s1* %p1, i32 0, i32 0
+// CHECK: %{{[a-zA-Z0-9.]+}} = getelementptr inbounds %struct.s1, %struct.s1* %p1, i32 0, i32 0
// CHECK: %{{[0-9]+}} = load float, float* %{{[a-zA-Z0-9.]+}}, align 4
-// CHECK: %{{[a-zA-Z0-9.]+}} = getelementptr %struct.s2, %struct.s2* %p2, i32 0, i32 0
+// CHECK: %{{[a-zA-Z0-9.]+}} = getelementptr inbounds %struct.s2, %struct.s2* %p2, i32 0, i32 0
// CHECK: %{{[0-9]+}} = load double, double* %{{[a-zA-Z0-9.]+}}, align 8
-// CHECK: %{{[a-zA-Z0-9.]+}} = getelementptr %struct.s4, %struct.s4* %p4, i32 0, i32 0
-// CHECK: %{{[a-zA-Z0-9.]+}} = getelementptr %struct.s1, %struct.s1* %{{[a-zA-Z0-9.]+}}, i32 0, i32 0
+// CHECK: %{{[a-zA-Z0-9.]+}} = getelementptr inbounds %struct.s4, %struct.s4* %p4, i32 0, i32 0
+// CHECK: %{{[a-zA-Z0-9.]+}} = getelementptr inbounds %struct.s1, %struct.s1* %{{[a-zA-Z0-9.]+}}, i32 0, i32 0
// CHECK: %{{[0-9]+}} = load float, float* %{{[a-zA-Z0-9.]+}}, align 4
-// CHECK: %{{[a-zA-Z0-9.]+}} = getelementptr %struct.s5, %struct.s5* %p5, i32 0, i32 0
-// CHECK: %{{[a-zA-Z0-9.]+}} = getelementptr %struct.s2, %struct.s2* %{{[a-zA-Z0-9.]+}}, i32 0, i32 0
+// CHECK: %{{[a-zA-Z0-9.]+}} = getelementptr inbounds %struct.s5, %struct.s5* %p5, i32 0, i32 0
+// CHECK: %{{[a-zA-Z0-9.]+}} = getelementptr inbounds %struct.s2, %struct.s2* %{{[a-zA-Z0-9.]+}}, i32 0, i32 0
// CHECK: %{{[0-9]+}} = load double, double* %{{[a-zA-Z0-9.]+}}, align 8
// CHECK: call void @bar(float inreg %{{[0-9]+}}, double inreg %{{[0-9]+}}, float inreg %{{[0-9]+}}, double inreg %{{[0-9]+}})
// CHECK: ret void
diff --git a/clang/test/CodeGen/ppc64-varargs-complex.c b/clang/test/CodeGen/ppc64-varargs-complex.c
index f7906293457..58206801ac2 100644
--- a/clang/test/CodeGen/ppc64-varargs-complex.c
+++ b/clang/test/CodeGen/ppc64-varargs-complex.c
@@ -9,15 +9,14 @@ void testva (int n, ...)
_Complex int i = va_arg(ap, _Complex int);
// CHECK: %[[VAR40:[A-Za-z0-9.]+]] = load i8*, i8** %[[VAR100:[A-Za-z0-9.]+]]
- // CHECK-NEXT: %[[VAR41:[A-Za-z0-9.]+]] = getelementptr i8, i8* %[[VAR40]], i64 16
+ // CHECK-NEXT: %[[VAR41:[A-Za-z0-9.]+]] = getelementptr inbounds i8, i8* %[[VAR40]], i64 16
// CHECK-NEXT: store i8* %[[VAR41]], i8** %[[VAR100]]
- // CHECK-NEXT: %[[VAR1:[A-Za-z0-9.]+]] = ptrtoint i8* %[[VAR40]] to i64
- // CHECK-NEXT: %[[VAR2:[A-Za-z0-9.]+]] = add i64 %[[VAR1]], 4
- // CHECK-NEXT: %[[VAR3:[A-Za-z0-9.]+]] = add i64 %[[VAR1]], 12
- // CHECK-NEXT: %[[VAR4:[A-Za-z0-9.]+]] = inttoptr i64 %[[VAR2]] to i32*
- // CHECK-NEXT: %[[VAR5:[A-Za-z0-9.]+]] = inttoptr i64 %[[VAR3]] to i32*
- // CHECK-NEXT: %[[VAR6:[A-Za-z0-9.]+]] = load i32, i32* %[[VAR4]]
- // CHECK-NEXT: %[[VAR7:[A-Za-z0-9.]+]] = load i32, i32* %[[VAR5]]
+ // CHECK-NEXT: %[[VAR1:[A-Za-z0-9.]+]] = getelementptr inbounds i8, i8* %[[VAR40]], i64 4
+ // CHECK-NEXT: %[[VAR2:[A-Za-z0-9.]+]] = getelementptr inbounds i8, i8* %[[VAR40]], i64 12
+ // CHECK-NEXT: %[[VAR4:[A-Za-z0-9.]+]] = bitcast i8* %[[VAR1]] to i32*
+ // CHECK-NEXT: %[[VAR5:[A-Za-z0-9.]+]] = bitcast i8* %[[VAR2]] to i32*
+ // CHECK-NEXT: %[[VAR6:[A-Za-z0-9.]+]] = load i32, i32* %[[VAR4]], align 4
+ // CHECK-NEXT: %[[VAR7:[A-Za-z0-9.]+]] = load i32, i32* %[[VAR5]], align 4
// CHECK-NEXT: %[[VAR8:[A-Za-z0-9.]+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* %[[VAR0:[A-Za-z0-9.]+]], i32 0, i32 0
// CHECK-NEXT: %[[VAR9:[A-Za-z0-9.]+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* %[[VAR0]], i32 0, i32 1
// CHECK-NEXT: store i32 %[[VAR6]], i32* %[[VAR8]]
@@ -25,15 +24,14 @@ void testva (int n, ...)
_Complex short s = va_arg(ap, _Complex short);
// CHECK: %[[VAR50:[A-Za-z0-9.]+]] = load i8*, i8** %[[VAR100:[A-Za-z0-9.]+]]
- // CHECK-NEXT: %[[VAR51:[A-Za-z0-9.]+]] = getelementptr i8, i8* %[[VAR50]], i64 16
+ // CHECK-NEXT: %[[VAR51:[A-Za-z0-9.]+]] = getelementptr inbounds i8, i8* %[[VAR50]], i64 16
// CHECK-NEXT: store i8* %[[VAR51]], i8** %[[VAR100]]
- // CHECK: %[[VAR11:[A-Za-z0-9.]+]] = ptrtoint i8* %{{[A-Za-z0-9.]+}} to i64
- // CHECK-NEXT: %[[VAR12:[A-Za-z0-9.]+]] = add i64 %[[VAR11]], 6
- // CHECK-NEXT: %[[VAR13:[A-Za-z0-9.]+]] = add i64 %[[VAR11]], 14
- // CHECK-NEXT: %[[VAR14:[A-Za-z0-9.]+]] = inttoptr i64 %[[VAR12]] to i16*
- // CHECK-NEXT: %[[VAR15:[A-Za-z0-9.]+]] = inttoptr i64 %[[VAR13]] to i16*
- // CHECK-NEXT: %[[VAR16:[A-Za-z0-9.]+]] = load i16, i16* %[[VAR14]]
- // CHECK-NEXT: %[[VAR17:[A-Za-z0-9.]+]] = load i16, i16* %[[VAR15]]
+ // CHECK-NEXT: %[[VAR12:[A-Za-z0-9.]+]] = getelementptr inbounds i8, i8* %[[VAR50]], i64 6
+ // CHECK-NEXT: %[[VAR13:[A-Za-z0-9.]+]] = getelementptr inbounds i8, i8* %[[VAR50]], i64 14
+ // CHECK-NEXT: %[[VAR14:[A-Za-z0-9.]+]] = bitcast i8* %[[VAR12]] to i16*
+ // CHECK-NEXT: %[[VAR15:[A-Za-z0-9.]+]] = bitcast i8* %[[VAR13]] to i16*
+ // CHECK-NEXT: %[[VAR16:[A-Za-z0-9.]+]] = load i16, i16* %[[VAR14]], align 2
+ // CHECK-NEXT: %[[VAR17:[A-Za-z0-9.]+]] = load i16, i16* %[[VAR15]], align 2
// CHECK-NEXT: %[[VAR18:[A-Za-z0-9.]+]] = getelementptr inbounds { i16, i16 }, { i16, i16 }* %[[VAR10:[A-Za-z0-9.]+]], i32 0, i32 0
// CHECK-NEXT: %[[VAR19:[A-Za-z0-9.]+]] = getelementptr inbounds { i16, i16 }, { i16, i16 }* %[[VAR10]], i32 0, i32 1
// CHECK-NEXT: store i16 %[[VAR16]], i16* %[[VAR18]]
@@ -41,15 +39,12 @@ void testva (int n, ...)
_Complex char c = va_arg(ap, _Complex char);
// CHECK: %[[VAR60:[A-Za-z0-9.]+]] = load i8*, i8** %[[VAR100:[A-Za-z0-9.]+]]
- // CHECK-NEXT: %[[VAR61:[A-Za-z0-9.]+]] = getelementptr i8, i8* %[[VAR60]], i64 16
+ // CHECK-NEXT: %[[VAR61:[A-Za-z0-9.]+]] = getelementptr inbounds i8, i8* %[[VAR60]], i64 16
// CHECK-NEXT: store i8* %[[VAR61]], i8** %[[VAR100]]
- // CHECK: %[[VAR21:[A-Za-z0-9.]+]] = ptrtoint i8* %{{[A-Za-z0-9.]+}} to i64
- // CHECK-NEXT: %[[VAR22:[A-Za-z0-9.]+]] = add i64 %[[VAR21]], 7
- // CHECK-NEXT: %[[VAR23:[A-Za-z0-9.]+]] = add i64 %[[VAR21]], 15
- // CHECK-NEXT: %[[VAR24:[A-Za-z0-9.]+]] = inttoptr i64 %[[VAR22]] to i8*
- // CHECK-NEXT: %[[VAR25:[A-Za-z0-9.]+]] = inttoptr i64 %[[VAR23]] to i8*
- // CHECK-NEXT: %[[VAR26:[A-Za-z0-9.]+]] = load i8, i8* %[[VAR24]]
- // CHECK-NEXT: %[[VAR27:[A-Za-z0-9.]+]] = load i8, i8* %[[VAR25]]
+ // CHECK-NEXT: %[[VAR24:[A-Za-z0-9.]+]] = getelementptr inbounds i8, i8* %[[VAR60]], i64 7
+ // CHECK-NEXT: %[[VAR25:[A-Za-z0-9.]+]] = getelementptr inbounds i8, i8* %[[VAR60]], i64 15
+ // CHECK-NEXT: %[[VAR26:[A-Za-z0-9.]+]] = load i8, i8* %[[VAR24]], align 1
+ // CHECK-NEXT: %[[VAR27:[A-Za-z0-9.]+]] = load i8, i8* %[[VAR25]], align 1
// CHECK-NEXT: %[[VAR28:[A-Za-z0-9.]+]] = getelementptr inbounds { i8, i8 }, { i8, i8 }* %[[VAR20:[A-Za-z0-9.]+]], i32 0, i32 0
// CHECK-NEXT: %[[VAR29:[A-Za-z0-9.]+]] = getelementptr inbounds { i8, i8 }, { i8, i8 }* %[[VAR20]], i32 0, i32 1
// CHECK-NEXT: store i8 %[[VAR26]], i8* %[[VAR28]]
@@ -57,15 +52,14 @@ void testva (int n, ...)
_Complex float f = va_arg(ap, _Complex float);
// CHECK: %[[VAR70:[A-Za-z0-9.]+]] = load i8*, i8** %[[VAR100:[A-Za-z0-9.]+]]
- // CHECK-NEXT: %[[VAR71:[A-Za-z0-9.]+]] = getelementptr i8, i8* %[[VAR70]], i64 16
+ // CHECK-NEXT: %[[VAR71:[A-Za-z0-9.]+]] = getelementptr inbounds i8, i8* %[[VAR70]], i64 16
// CHECK-NEXT: store i8* %[[VAR71]], i8** %[[VAR100]]
- // CHECK: %[[VAR31:[A-Za-z0-9.]+]] = ptrtoint i8* %{{[A-Za-z0-9.]+}} to i64
- // CHECK-NEXT: %[[VAR32:[A-Za-z0-9.]+]] = add i64 %[[VAR31]], 4
- // CHECK-NEXT: %[[VAR33:[A-Za-z0-9.]+]] = add i64 %[[VAR31]], 12
- // CHECK-NEXT: %[[VAR34:[A-Za-z0-9.]+]] = inttoptr i64 %[[VAR32]] to float*
- // CHECK-NEXT: %[[VAR35:[A-Za-z0-9.]+]] = inttoptr i64 %[[VAR33]] to float*
- // CHECK-NEXT: %[[VAR36:[A-Za-z0-9.]+]] = load float, float* %[[VAR34]]
- // CHECK-NEXT: %[[VAR37:[A-Za-z0-9.]+]] = load float, float* %[[VAR35]]
+ // CHECK-NEXT: %[[VAR32:[A-Za-z0-9.]+]] = getelementptr inbounds i8, i8* %[[VAR70]], i64 4
+ // CHECK-NEXT: %[[VAR33:[A-Za-z0-9.]+]] = getelementptr inbounds i8, i8* %[[VAR70]], i64 12
+ // CHECK-NEXT: %[[VAR34:[A-Za-z0-9.]+]] = bitcast i8* %[[VAR32]] to float*
+ // CHECK-NEXT: %[[VAR35:[A-Za-z0-9.]+]] = bitcast i8* %[[VAR33]] to float*
+ // CHECK-NEXT: %[[VAR36:[A-Za-z0-9.]+]] = load float, float* %[[VAR34]], align 4
+ // CHECK-NEXT: %[[VAR37:[A-Za-z0-9.]+]] = load float, float* %[[VAR35]], align 4
// CHECK-NEXT: %[[VAR38:[A-Za-z0-9.]+]] = getelementptr inbounds { float, float }, { float, float }* %[[VAR30:[A-Za-z0-9.]+]], i32 0, i32 0
// CHECK-NEXT: %[[VAR39:[A-Za-z0-9.]+]] = getelementptr inbounds { float, float }, { float, float }* %[[VAR30]], i32 0, i32 1
// CHECK-NEXT: store float %[[VAR36]], float* %[[VAR38]]
diff --git a/clang/test/CodeGen/ppc64le-varargs-complex.c b/clang/test/CodeGen/ppc64le-varargs-complex.c
index 68dfa0b69f3..399371b6e6d 100644
--- a/clang/test/CodeGen/ppc64le-varargs-complex.c
+++ b/clang/test/CodeGen/ppc64le-varargs-complex.c
@@ -9,14 +9,13 @@ void testva (int n, ...)
_Complex int i = va_arg(ap, _Complex int);
// CHECK: %[[VAR40:[A-Za-z0-9.]+]] = load i8*, i8** %[[VAR100:[A-Za-z0-9.]+]]
- // CHECK-NEXT: %[[VAR41:[A-Za-z0-9.]+]] = getelementptr i8, i8* %[[VAR40]], i64 16
+ // CHECK-NEXT: %[[VAR41:[A-Za-z0-9.]+]] = getelementptr inbounds i8, i8* %[[VAR40]], i64 16
// CHECK-NEXT: store i8* %[[VAR41]], i8** %[[VAR100]]
- // CHECK-NEXT: %[[VAR1:[A-Za-z0-9.]+]] = ptrtoint i8* %[[VAR40]] to i64
- // CHECK-NEXT: %[[VAR3:[A-Za-z0-9.]+]] = add i64 %[[VAR1]], 8
- // CHECK-NEXT: %[[VAR4:[A-Za-z0-9.]+]] = inttoptr i64 %[[VAR1]] to i32*
- // CHECK-NEXT: %[[VAR5:[A-Za-z0-9.]+]] = inttoptr i64 %[[VAR3]] to i32*
- // CHECK-NEXT: %[[VAR6:[A-Za-z0-9.]+]] = load i32, i32* %[[VAR4]]
- // CHECK-NEXT: %[[VAR7:[A-Za-z0-9.]+]] = load i32, i32* %[[VAR5]]
+ // CHECK-NEXT: %[[VAR3:[A-Za-z0-9.]+]] = getelementptr inbounds i8, i8* %[[VAR40]], i64 8
+ // CHECK-NEXT: %[[VAR4:[A-Za-z0-9.]+]] = bitcast i8* %[[VAR40]] to i32*
+ // CHECK-NEXT: %[[VAR5:[A-Za-z0-9.]+]] = bitcast i8* %[[VAR3]] to i32*
+ // CHECK-NEXT: %[[VAR6:[A-Za-z0-9.]+]] = load i32, i32* %[[VAR4]], align 8
+ // CHECK-NEXT: %[[VAR7:[A-Za-z0-9.]+]] = load i32, i32* %[[VAR5]], align 8
// CHECK-NEXT: %[[VAR8:[A-Za-z0-9.]+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* %[[VAR0:[A-Za-z0-9.]+]], i32 0, i32 0
// CHECK-NEXT: %[[VAR9:[A-Za-z0-9.]+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* %[[VAR0]], i32 0, i32 1
// CHECK-NEXT: store i32 %[[VAR6]], i32* %[[VAR8]]
@@ -24,14 +23,13 @@ void testva (int n, ...)
_Complex short s = va_arg(ap, _Complex short);
// CHECK: %[[VAR50:[A-Za-z0-9.]+]] = load i8*, i8** %[[VAR100:[A-Za-z0-9.]+]]
- // CHECK-NEXT: %[[VAR51:[A-Za-z0-9.]+]] = getelementptr i8, i8* %[[VAR50]], i64 16
+ // CHECK-NEXT: %[[VAR51:[A-Za-z0-9.]+]] = getelementptr inbounds i8, i8* %[[VAR50]], i64 16
// CHECK-NEXT: store i8* %[[VAR51]], i8** %[[VAR100]]
- // CHECK: %[[VAR11:[A-Za-z0-9.]+]] = ptrtoint i8* %{{[A-Za-z0-9.]+}} to i64
- // CHECK-NEXT: %[[VAR13:[A-Za-z0-9.]+]] = add i64 %[[VAR11]], 8
- // CHECK-NEXT: %[[VAR14:[A-Za-z0-9.]+]] = inttoptr i64 %[[VAR11]] to i16*
- // CHECK-NEXT: %[[VAR15:[A-Za-z0-9.]+]] = inttoptr i64 %[[VAR13]] to i16*
- // CHECK-NEXT: %[[VAR16:[A-Za-z0-9.]+]] = load i16, i16* %[[VAR14]]
- // CHECK-NEXT: %[[VAR17:[A-Za-z0-9.]+]] = load i16, i16* %[[VAR15]]
+ // CHECK-NEXT: %[[VAR13:[A-Za-z0-9.]+]] = getelementptr inbounds i8, i8* %[[VAR50]], i64 8
+ // CHECK-NEXT: %[[VAR14:[A-Za-z0-9.]+]] = bitcast i8* %[[VAR50]] to i16*
+ // CHECK-NEXT: %[[VAR15:[A-Za-z0-9.]+]] = bitcast i8* %[[VAR13]] to i16*
+ // CHECK-NEXT: %[[VAR16:[A-Za-z0-9.]+]] = load i16, i16* %[[VAR14]], align 8
+ // CHECK-NEXT: %[[VAR17:[A-Za-z0-9.]+]] = load i16, i16* %[[VAR15]], align 8
// CHECK-NEXT: %[[VAR18:[A-Za-z0-9.]+]] = getelementptr inbounds { i16, i16 }, { i16, i16 }* %[[VAR10:[A-Za-z0-9.]+]], i32 0, i32 0
// CHECK-NEXT: %[[VAR19:[A-Za-z0-9.]+]] = getelementptr inbounds { i16, i16 }, { i16, i16 }* %[[VAR10]], i32 0, i32 1
// CHECK-NEXT: store i16 %[[VAR16]], i16* %[[VAR18]]
@@ -39,14 +37,11 @@ void testva (int n, ...)
_Complex char c = va_arg(ap, _Complex char);
// CHECK: %[[VAR60:[A-Za-z0-9.]+]] = load i8*, i8** %[[VAR100:[A-Za-z0-9.]+]]
- // CHECK-NEXT: %[[VAR61:[A-Za-z0-9.]+]] = getelementptr i8, i8* %[[VAR60]], i64 16
+ // CHECK-NEXT: %[[VAR61:[A-Za-z0-9.]+]] = getelementptr inbounds i8, i8* %[[VAR60]], i64 16
// CHECK-NEXT: store i8* %[[VAR61]], i8** %[[VAR100]]
- // CHECK: %[[VAR21:[A-Za-z0-9.]+]] = ptrtoint i8* %{{[A-Za-z0-9.]+}} to i64
- // CHECK-NEXT: %[[VAR23:[A-Za-z0-9.]+]] = add i64 %[[VAR21]], 8
- // CHECK-NEXT: %[[VAR24:[A-Za-z0-9.]+]] = inttoptr i64 %[[VAR21]] to i8*
- // CHECK-NEXT: %[[VAR25:[A-Za-z0-9.]+]] = inttoptr i64 %[[VAR23]] to i8*
- // CHECK-NEXT: %[[VAR26:[A-Za-z0-9.]+]] = load i8, i8* %[[VAR24]]
- // CHECK-NEXT: %[[VAR27:[A-Za-z0-9.]+]] = load i8, i8* %[[VAR25]]
+ // CHECK-NEXT: %[[VAR25:[A-Za-z0-9.]+]] = getelementptr inbounds i8, i8* %[[VAR60]], i64 8
+ // CHECK-NEXT: %[[VAR26:[A-Za-z0-9.]+]] = load i8, i8* %[[VAR60]], align 8
+ // CHECK-NEXT: %[[VAR27:[A-Za-z0-9.]+]] = load i8, i8* %[[VAR25]], align 8
// CHECK-NEXT: %[[VAR28:[A-Za-z0-9.]+]] = getelementptr inbounds { i8, i8 }, { i8, i8 }* %[[VAR20:[A-Za-z0-9.]+]], i32 0, i32 0
// CHECK-NEXT: %[[VAR29:[A-Za-z0-9.]+]] = getelementptr inbounds { i8, i8 }, { i8, i8 }* %[[VAR20]], i32 0, i32 1
// CHECK-NEXT: store i8 %[[VAR26]], i8* %[[VAR28]]
@@ -54,14 +49,13 @@ void testva (int n, ...)
_Complex float f = va_arg(ap, _Complex float);
// CHECK: %[[VAR70:[A-Za-z0-9.]+]] = load i8*, i8** %[[VAR100:[A-Za-z0-9.]+]]
- // CHECK-NEXT: %[[VAR71:[A-Za-z0-9.]+]] = getelementptr i8, i8* %[[VAR70]], i64 16
+ // CHECK-NEXT: %[[VAR71:[A-Za-z0-9.]+]] = getelementptr inbounds i8, i8* %[[VAR70]], i64 16
// CHECK-NEXT: store i8* %[[VAR71]], i8** %[[VAR100]]
- // CHECK: %[[VAR31:[A-Za-z0-9.]+]] = ptrtoint i8* %{{[A-Za-z0-9.]+}} to i64
- // CHECK-NEXT: %[[VAR33:[A-Za-z0-9.]+]] = add i64 %[[VAR31]], 8
- // CHECK-NEXT: %[[VAR34:[A-Za-z0-9.]+]] = inttoptr i64 %[[VAR31]] to float*
- // CHECK-NEXT: %[[VAR35:[A-Za-z0-9.]+]] = inttoptr i64 %[[VAR33]] to float*
- // CHECK-NEXT: %[[VAR36:[A-Za-z0-9.]+]] = load float, float* %[[VAR34]]
- // CHECK-NEXT: %[[VAR37:[A-Za-z0-9.]+]] = load float, float* %[[VAR35]]
+ // CHECK-NEXT: %[[VAR33:[A-Za-z0-9.]+]] = getelementptr inbounds i8, i8* %[[VAR70]], i64 8
+ // CHECK-NEXT: %[[VAR34:[A-Za-z0-9.]+]] = bitcast i8* %[[VAR70]] to float*
+ // CHECK-NEXT: %[[VAR35:[A-Za-z0-9.]+]] = bitcast i8* %[[VAR33]] to float*
+ // CHECK-NEXT: %[[VAR36:[A-Za-z0-9.]+]] = load float, float* %[[VAR34]], align 8
+ // CHECK-NEXT: %[[VAR37:[A-Za-z0-9.]+]] = load float, float* %[[VAR35]], align 8
// CHECK-NEXT: %[[VAR38:[A-Za-z0-9.]+]] = getelementptr inbounds { float, float }, { float, float }* %[[VAR30:[A-Za-z0-9.]+]], i32 0, i32 0
// CHECK-NEXT: %[[VAR39:[A-Za-z0-9.]+]] = getelementptr inbounds { float, float }, { float, float }* %[[VAR30]], i32 0, i32 1
// CHECK-NEXT: store float %[[VAR36]], float* %[[VAR38]]
diff --git a/clang/test/CodeGen/sparcv9-abi.c b/clang/test/CodeGen/sparcv9-abi.c
index bf447198cdf..5984fa558c8 100644
--- a/clang/test/CodeGen/sparcv9-abi.c
+++ b/clang/test/CodeGen/sparcv9-abi.c
@@ -132,9 +132,9 @@ int f_variable(char *f, ...) {
while ((c = *f++)) switch (c) {
// CHECK: %[[CUR:[^ ]+]] = load i8*, i8** %ap
-// CHECK-DAG: %[[NXT:[^ ]+]] = getelementptr i8, i8* %[[CUR]], i32 8
+// CHECK-DAG: %[[NXT:[^ ]+]] = getelementptr inbounds i8, i8* %[[CUR]], i64 8
// CHECK-DAG: store i8* %[[NXT]], i8** %ap
-// CHECK-DAG: %[[EXT:[^ ]+]] = getelementptr i8, i8* %[[CUR]], i32 4
+// CHECK-DAG: %[[EXT:[^ ]+]] = getelementptr inbounds i8, i8* %[[CUR]], i64 4
// CHECK-DAG: %[[ADR:[^ ]+]] = bitcast i8* %[[EXT]] to i32*
// CHECK-DAG: load i32, i32* %[[ADR]]
// CHECK: br
@@ -143,7 +143,7 @@ int f_variable(char *f, ...) {
break;
// CHECK: %[[CUR:[^ ]+]] = load i8*, i8** %ap
-// CHECK-DAG: %[[NXT:[^ ]+]] = getelementptr i8, i8* %[[CUR]], i32 8
+// CHECK-DAG: %[[NXT:[^ ]+]] = getelementptr inbounds i8, i8* %[[CUR]], i64 8
// CHECK-DAG: store i8* %[[NXT]], i8** %ap
// CHECK-DAG: %[[ADR:[^ ]+]] = bitcast i8* %[[CUR]] to i64*
// CHECK-DAG: load i64, i64* %[[ADR]]
@@ -153,7 +153,7 @@ int f_variable(char *f, ...) {
break;
// CHECK: %[[CUR:[^ ]+]] = load i8*, i8** %ap
-// CHECK-DAG: %[[NXT:[^ ]+]] = getelementptr i8, i8* %[[CUR]], i32 8
+// CHECK-DAG: %[[NXT:[^ ]+]] = getelementptr inbounds i8, i8* %[[CUR]], i64 8
// CHECK-DAG: store i8* %[[NXT]], i8** %ap
// CHECK-DAG: %[[ADR:[^ ]+]] = bitcast i8* %[[CUR]] to %struct.tiny*
// CHECK: br
@@ -162,7 +162,7 @@ int f_variable(char *f, ...) {
break;
// CHECK: %[[CUR:[^ ]+]] = load i8*, i8** %ap
-// CHECK-DAG: %[[NXT:[^ ]+]] = getelementptr i8, i8* %[[CUR]], i32 16
+// CHECK-DAG: %[[NXT:[^ ]+]] = getelementptr inbounds i8, i8* %[[CUR]], i64 16
// CHECK-DAG: store i8* %[[NXT]], i8** %ap
// CHECK-DAG: %[[ADR:[^ ]+]] = bitcast i8* %[[CUR]] to %struct.small*
// CHECK: br
@@ -171,7 +171,7 @@ int f_variable(char *f, ...) {
break;
// CHECK: %[[CUR:[^ ]+]] = load i8*, i8** %ap
-// CHECK-DAG: %[[NXT:[^ ]+]] = getelementptr i8, i8* %[[CUR]], i32 8
+// CHECK-DAG: %[[NXT:[^ ]+]] = getelementptr inbounds i8, i8* %[[CUR]], i64 8
// CHECK-DAG: store i8* %[[NXT]], i8** %ap
// CHECK-DAG: %[[IND:[^ ]+]] = bitcast i8* %[[CUR]] to %struct.medium**
// CHECK-DAG: %[[ADR:[^ ]+]] = load %struct.medium*, %struct.medium** %[[IND]]
diff --git a/clang/test/CodeGen/tbaa-class.cpp b/clang/test/CodeGen/tbaa-class.cpp
index a8005d60572..f611ae5abb8 100644
--- a/clang/test/CodeGen/tbaa-class.cpp
+++ b/clang/test/CodeGen/tbaa-class.cpp
@@ -51,10 +51,10 @@ public:
};
uint32_t g(uint32_t *s, StructA *A, uint64_t count) {
-// CHECK: define i32 @{{.*}}(
+// CHECK-LABEL: define i32 @_Z1g
// CHECK: store i32 1, i32* %{{.*}}, align 4, !tbaa [[TAG_i32:!.*]]
// CHECK: store i32 4, i32* %{{.*}}, align 4, !tbaa [[TAG_i32]]
-// PATH: define i32 @{{.*}}(
+// PATH-LABEL: define i32 @_Z1g
// PATH: store i32 1, i32* %{{.*}}, align 4, !tbaa [[TAG_i32:!.*]]
// PATH: store i32 4, i32* %{{.*}}, align 4, !tbaa [[TAG_A_f32:!.*]]
*s = 1;
@@ -63,22 +63,22 @@ uint32_t g(uint32_t *s, StructA *A, uint64_t count) {
}
uint32_t g2(uint32_t *s, StructA *A, uint64_t count) {
-// CHECK: define i32 @{{.*}}(
+// CHECK-LABEL: define i32 @_Z2g2
// CHECK: store i32 1, i32* %{{.*}}, align 4, !tbaa [[TAG_i32]]
-// CHECK: store i16 4, i16* %{{.*}}, align 2, !tbaa [[TAG_i16:!.*]]
-// PATH: define i32 @{{.*}}(
+// CHECK: store i16 4, i16* %{{.*}}, align 4, !tbaa [[TAG_i16:!.*]]
+// PATH-LABEL: define i32 @_Z2g2
// PATH: store i32 1, i32* %{{.*}}, align 4, !tbaa [[TAG_i32]]
-// PATH: store i16 4, i16* %{{.*}}, align 2, !tbaa [[TAG_A_f16:!.*]]
+// PATH: store i16 4, i16* %{{.*}}, align 4, !tbaa [[TAG_A_f16:!.*]]
*s = 1;
A->f16 = 4;
return *s;
}
uint32_t g3(StructA *A, StructB *B, uint64_t count) {
-// CHECK: define i32 @{{.*}}(
+// CHECK-LABEL: define i32 @_Z2g3
// CHECK: store i32 1, i32* %{{.*}}, align 4, !tbaa [[TAG_i32]]
// CHECK: store i32 4, i32* %{{.*}}, align 4, !tbaa [[TAG_i32]]
-// PATH: define i32 @{{.*}}(
+// PATH-LABEL: define i32 @_Z2g3
// PATH: store i32 1, i32* %{{.*}}, align 4, !tbaa [[TAG_A_f32]]
// PATH: store i32 4, i32* %{{.*}}, align 4, !tbaa [[TAG_B_a_f32:!.*]]
A->f32 = 1;
@@ -87,22 +87,22 @@ uint32_t g3(StructA *A, StructB *B, uint64_t count) {
}
uint32_t g4(StructA *A, StructB *B, uint64_t count) {
-// CHECK: define i32 @{{.*}}(
+// CHECK-LABEL: define i32 @_Z2g4
// CHECK: store i32 1, i32* %{{.*}}, align 4, !tbaa [[TAG_i32]]
-// CHECK: store i16 4, i16* %{{.*}}, align 2, !tbaa [[TAG_i16]]
-// PATH: define i32 @{{.*}}(
+// CHECK: store i16 4, i16* %{{.*}}, align 4, !tbaa [[TAG_i16]]
+// PATH-LABEL: define i32 @_Z2g4
// PATH: store i32 1, i32* %{{.*}}, align 4, !tbaa [[TAG_A_f32]]
-// PATH: store i16 4, i16* %{{.*}}, align 2, !tbaa [[TAG_B_a_f16:!.*]]
+// PATH: store i16 4, i16* %{{.*}}, align 4, !tbaa [[TAG_B_a_f16:!.*]]
A->f32 = 1;
B->a.f16 = 4;
return A->f32;
}
uint32_t g5(StructA *A, StructB *B, uint64_t count) {
-// CHECK: define i32 @{{.*}}(
+// CHECK-LABEL: define i32 @_Z2g5
// CHECK: store i32 1, i32* %{{.*}}, align 4, !tbaa [[TAG_i32]]
// CHECK: store i32 4, i32* %{{.*}}, align 4, !tbaa [[TAG_i32]]
-// PATH: define i32 @{{.*}}(
+// PATH-LABEL: define i32 @_Z2g5
// PATH: store i32 1, i32* %{{.*}}, align 4, !tbaa [[TAG_A_f32]]
// PATH: store i32 4, i32* %{{.*}}, align 4, !tbaa [[TAG_B_f32:!.*]]
A->f32 = 1;
@@ -111,10 +111,10 @@ uint32_t g5(StructA *A, StructB *B, uint64_t count) {
}
uint32_t g6(StructA *A, StructB *B, uint64_t count) {
-// CHECK: define i32 @{{.*}}(
+// CHECK-LABEL: define i32 @_Z2g6
// CHECK: store i32 1, i32* %{{.*}}, align 4, !tbaa [[TAG_i32]]
// CHECK: store i32 4, i32* %{{.*}}, align 4, !tbaa [[TAG_i32]]
-// PATH: define i32 @{{.*}}(
+// PATH-LABEL: define i32 @_Z2g6
// PATH: store i32 1, i32* %{{.*}}, align 4, !tbaa [[TAG_A_f32]]
// PATH: store i32 4, i32* %{{.*}}, align 4, !tbaa [[TAG_B_a_f32_2:!.*]]
A->f32 = 1;
@@ -123,10 +123,10 @@ uint32_t g6(StructA *A, StructB *B, uint64_t count) {
}
uint32_t g7(StructA *A, StructS *S, uint64_t count) {
-// CHECK: define i32 @{{.*}}(
+// CHECK-LABEL: define i32 @_Z2g7
// CHECK: store i32 1, i32* %{{.*}}, align 4, !tbaa [[TAG_i32]]
// CHECK: store i32 4, i32* %{{.*}}, align 4, !tbaa [[TAG_i32]]
-// PATH: define i32 @{{.*}}(
+// PATH-LABEL: define i32 @_Z2g7
// PATH: store i32 1, i32* %{{.*}}, align 4, !tbaa [[TAG_A_f32]]
// PATH: store i32 4, i32* %{{.*}}, align 4, !tbaa [[TAG_S_f32:!.*]]
A->f32 = 1;
@@ -135,22 +135,22 @@ uint32_t g7(StructA *A, StructS *S, uint64_t count) {
}
uint32_t g8(StructA *A, StructS *S, uint64_t count) {
-// CHECK: define i32 @{{.*}}(
+// CHECK-LABEL: define i32 @_Z2g8
// CHECK: store i32 1, i32* %{{.*}}, align 4, !tbaa [[TAG_i32]]
-// CHECK: store i16 4, i16* %{{.*}}, align 2, !tbaa [[TAG_i16]]
-// PATH: define i32 @{{.*}}(
+// CHECK: store i16 4, i16* %{{.*}}, align 4, !tbaa [[TAG_i16]]
+// PATH-LABEL: define i32 @_Z2g8
// PATH: store i32 1, i32* %{{.*}}, align 4, !tbaa [[TAG_A_f32]]
-// PATH: store i16 4, i16* %{{.*}}, align 2, !tbaa [[TAG_S_f16:!.*]]
+// PATH: store i16 4, i16* %{{.*}}, align 4, !tbaa [[TAG_S_f16:!.*]]
A->f32 = 1;
S->f16 = 4;
return A->f32;
}
uint32_t g9(StructS *S, StructS2 *S2, uint64_t count) {
-// CHECK: define i32 @{{.*}}(
+// CHECK-LABEL: define i32 @_Z2g9
// CHECK: store i32 1, i32* %{{.*}}, align 4, !tbaa [[TAG_i32]]
// CHECK: store i32 4, i32* %{{.*}}, align 4, !tbaa [[TAG_i32]]
-// PATH: define i32 @{{.*}}(
+// PATH-LABEL: define i32 @_Z2g9
// PATH: store i32 1, i32* %{{.*}}, align 4, !tbaa [[TAG_S_f32]]
// PATH: store i32 4, i32* %{{.*}}, align 4, !tbaa [[TAG_S_f32:!.*]]
S->f32 = 1;
@@ -159,10 +159,10 @@ uint32_t g9(StructS *S, StructS2 *S2, uint64_t count) {
}
uint32_t g10(StructS *S, StructS2 *S2, uint64_t count) {
-// CHECK: define i32 @{{.*}}(
+// CHECK-LABEL: define i32 @_Z3g10
// CHECK: store i32 1, i32* %{{.*}}, align 4, !tbaa [[TAG_i32]]
// CHECK: store i32 4, i32* %{{.*}}, align 4, !tbaa [[TAG_i32]]
-// PATH: define i32 @{{.*}}(
+// PATH-LABEL: define i32 @_Z3g10
// PATH: store i32 1, i32* %{{.*}}, align 4, !tbaa [[TAG_S_f32]]
// PATH: store i32 4, i32* %{{.*}}, align 4, !tbaa [[TAG_S2_f32_2:!.*]]
S->f32 = 1;
@@ -171,10 +171,10 @@ uint32_t g10(StructS *S, StructS2 *S2, uint64_t count) {
}
uint32_t g11(StructC *C, StructD *D, uint64_t count) {
-// CHECK: define i32 @{{.*}}(
+// CHECK-LABEL: define i32 @_Z3g11
// CHECK: store i32 1, i32* %{{.*}}, align 4, !tbaa [[TAG_i32]]
// CHECK: store i32 4, i32* %{{.*}}, align 4, !tbaa [[TAG_i32]]
-// PATH: define i32 @{{.*}}(
+// PATH-LABEL: define i32 @_Z3g11
// PATH: store i32 1, i32* %{{.*}}, align 4, !tbaa [[TAG_C_b_a_f32:!.*]]
// PATH: store i32 4, i32* %{{.*}}, align 4, !tbaa [[TAG_D_b_a_f32:!.*]]
C->b.a.f32 = 1;
@@ -183,11 +183,11 @@ uint32_t g11(StructC *C, StructD *D, uint64_t count) {
}
uint32_t g12(StructC *C, StructD *D, uint64_t count) {
-// CHECK: define i32 @{{.*}}(
+// CHECK-LABEL: define i32 @_Z3g12
// CHECK: store i32 1, i32* %{{.*}}, align 4, !tbaa [[TAG_i32]]
// CHECK: store i32 4, i32* %{{.*}}, align 4, !tbaa [[TAG_i32]]
// TODO: differentiate the two accesses.
-// PATH: define i32 @{{.*}}(
+// PATH-LABEL: define i32 @_Z3g12
// PATH: store i32 1, i32* %{{.*}}, align 4, !tbaa [[TAG_B_a_f32]]
// PATH: store i32 4, i32* %{{.*}}, align 4, !tbaa [[TAG_B_a_f32]]
StructB *b1 = &(C->b);
diff --git a/clang/test/CodeGen/tbaa.cpp b/clang/test/CodeGen/tbaa.cpp
index 2bff5d0ba07..c43ca58bc3f 100644
--- a/clang/test/CodeGen/tbaa.cpp
+++ b/clang/test/CodeGen/tbaa.cpp
@@ -45,10 +45,10 @@ typedef struct
} StructS2;
uint32_t g(uint32_t *s, StructA *A, uint64_t count) {
-// CHECK: define i32 @{{.*}}(
+// CHECK-LABEL: define i32 @_Z1g
// CHECK: store i32 1, i32* %{{.*}}, align 4, !tbaa [[TAG_i32:!.*]]
// CHECK: store i32 4, i32* %{{.*}}, align 4, !tbaa [[TAG_i32]]
-// PATH: define i32 @{{.*}}(
+// PATH-LABEL: define i32 @_Z1g
// PATH: store i32 1, i32* %{{.*}}, align 4, !tbaa [[TAG_i32:!.*]]
// PATH: store i32 4, i32* %{{.*}}, align 4, !tbaa [[TAG_A_f32:!.*]]
*s = 1;
@@ -57,22 +57,22 @@ uint32_t g(uint32_t *s, StructA *A, uint64_t count) {
}
uint32_t g2(uint32_t *s, StructA *A, uint64_t count) {
-// CHECK: define i32 @{{.*}}(
+// CHECK-LABEL: define i32 @_Z2g2
// CHECK: store i32 1, i32* %{{.*}}, align 4, !tbaa [[TAG_i32]]
-// CHECK: store i16 4, i16* %{{.*}}, align 2, !tbaa [[TAG_i16:!.*]]
-// PATH: define i32 @{{.*}}(
+// CHECK: store i16 4, i16* %{{.*}}, align 4, !tbaa [[TAG_i16:!.*]]
+// PATH-LABEL: define i32 @_Z2g2
// PATH: store i32 1, i32* %{{.*}}, align 4, !tbaa [[TAG_i32]]
-// PATH: store i16 4, i16* %{{.*}}, align 2, !tbaa [[TAG_A_f16:!.*]]
+// PATH: store i16 4, i16* %{{.*}}, align 4, !tbaa [[TAG_A_f16:!.*]]
*s = 1;
A->f16 = 4;
return *s;
}
uint32_t g3(StructA *A, StructB *B, uint64_t count) {
-// CHECK: define i32 @{{.*}}(
+// CHECK-LABEL: define i32 @_Z2g3
// CHECK: store i32 1, i32* %{{.*}}, align 4, !tbaa [[TAG_i32]]
// CHECK: store i32 4, i32* %{{.*}}, align 4, !tbaa [[TAG_i32]]
-// PATH: define i32 @{{.*}}(
+// PATH-LABEL: define i32 @_Z2g3
// PATH: store i32 1, i32* %{{.*}}, align 4, !tbaa [[TAG_A_f32]]
// PATH: store i32 4, i32* %{{.*}}, align 4, !tbaa [[TAG_B_a_f32:!.*]]
A->f32 = 1;
@@ -81,22 +81,22 @@ uint32_t g3(StructA *A, StructB *B, uint64_t count) {
}
uint32_t g4(StructA *A, StructB *B, uint64_t count) {
-// CHECK: define i32 @{{.*}}(
+// CHECK-LABEL: define i32 @_Z2g4
// CHECK: store i32 1, i32* %{{.*}}, align 4, !tbaa [[TAG_i32]]
-// CHECK: store i16 4, i16* %{{.*}}, align 2, !tbaa [[TAG_i16]]
-// PATH: define i32 @{{.*}}(
+// CHECK: store i16 4, i16* %{{.*}}, align 4, !tbaa [[TAG_i16]]
+// PATH-LABEL: define i32 @_Z2g4
// PATH: store i32 1, i32* %{{.*}}, align 4, !tbaa [[TAG_A_f32]]
-// PATH: store i16 4, i16* %{{.*}}, align 2, !tbaa [[TAG_B_a_f16:!.*]]
+// PATH: store i16 4, i16* %{{.*}}, align 4, !tbaa [[TAG_B_a_f16:!.*]]
A->f32 = 1;
B->a.f16 = 4;
return A->f32;
}
uint32_t g5(StructA *A, StructB *B, uint64_t count) {
-// CHECK: define i32 @{{.*}}(
+// CHECK-LABEL: define i32 @_Z2g5
// CHECK: store i32 1, i32* %{{.*}}, align 4, !tbaa [[TAG_i32]]
// CHECK: store i32 4, i32* %{{.*}}, align 4, !tbaa [[TAG_i32]]
-// PATH: define i32 @{{.*}}(
+// PATH-LABEL: define i32 @_Z2g5
// PATH: store i32 1, i32* %{{.*}}, align 4, !tbaa [[TAG_A_f32]]
// PATH: store i32 4, i32* %{{.*}}, align 4, !tbaa [[TAG_B_f32:!.*]]
A->f32 = 1;
@@ -105,10 +105,10 @@ uint32_t g5(StructA *A, StructB *B, uint64_t count) {
}
uint32_t g6(StructA *A, StructB *B, uint64_t count) {
-// CHECK: define i32 @{{.*}}(
+// CHECK-LABEL: define i32 @_Z2g6
// CHECK: store i32 1, i32* %{{.*}}, align 4, !tbaa [[TAG_i32]]
// CHECK: store i32 4, i32* %{{.*}}, align 4, !tbaa [[TAG_i32]]
-// PATH: define i32 @{{.*}}(
+// PATH-LABEL: define i32 @_Z2g6
// PATH: store i32 1, i32* %{{.*}}, align 4, !tbaa [[TAG_A_f32]]
// PATH: store i32 4, i32* %{{.*}}, align 4, !tbaa [[TAG_B_a_f32_2:!.*]]
A->f32 = 1;
@@ -117,10 +117,10 @@ uint32_t g6(StructA *A, StructB *B, uint64_t count) {
}
uint32_t g7(StructA *A, StructS *S, uint64_t count) {
-// CHECK: define i32 @{{.*}}(
+// CHECK-LABEL: define i32 @_Z2g7
// CHECK: store i32 1, i32* %{{.*}}, align 4, !tbaa [[TAG_i32]]
// CHECK: store i32 4, i32* %{{.*}}, align 4, !tbaa [[TAG_i32]]
-// PATH: define i32 @{{.*}}(
+// PATH-LABEL: define i32 @_Z2g7
// PATH: store i32 1, i32* %{{.*}}, align 4, !tbaa [[TAG_A_f32]]
// PATH: store i32 4, i32* %{{.*}}, align 4, !tbaa [[TAG_S_f32:!.*]]
A->f32 = 1;
@@ -129,22 +129,22 @@ uint32_t g7(StructA *A, StructS *S, uint64_t count) {
}
uint32_t g8(StructA *A, StructS *S, uint64_t count) {
-// CHECK: define i32 @{{.*}}(
+// CHECK-LABEL: define i32 @_Z2g8
// CHECK: store i32 1, i32* %{{.*}}, align 4, !tbaa [[TAG_i32]]
-// CHECK: store i16 4, i16* %{{.*}}, align 2, !tbaa [[TAG_i16]]
-// PATH: define i32 @{{.*}}(
+// CHECK: store i16 4, i16* %{{.*}}, align 4, !tbaa [[TAG_i16]]
+// PATH-LABEL: define i32 @_Z2g8
// PATH: store i32 1, i32* %{{.*}}, align 4, !tbaa [[TAG_A_f32]]
-// PATH: store i16 4, i16* %{{.*}}, align 2, !tbaa [[TAG_S_f16:!.*]]
+// PATH: store i16 4, i16* %{{.*}}, align 4, !tbaa [[TAG_S_f16:!.*]]
A->f32 = 1;
S->f16 = 4;
return A->f32;
}
uint32_t g9(StructS *S, StructS2 *S2, uint64_t count) {
-// CHECK: define i32 @{{.*}}(
+// CHECK-LABEL: define i32 @_Z2g9
// CHECK: store i32 1, i32* %{{.*}}, align 4, !tbaa [[TAG_i32]]
// CHECK: store i32 4, i32* %{{.*}}, align 4, !tbaa [[TAG_i32]]
-// PATH: define i32 @{{.*}}(
+// PATH-LABEL: define i32 @_Z2g9
// PATH: store i32 1, i32* %{{.*}}, align 4, !tbaa [[TAG_S_f32]]
// PATH: store i32 4, i32* %{{.*}}, align 4, !tbaa [[TAG_S2_f32:!.*]]
S->f32 = 1;
@@ -153,22 +153,22 @@ uint32_t g9(StructS *S, StructS2 *S2, uint64_t count) {
}
uint32_t g10(StructS *S, StructS2 *S2, uint64_t count) {
-// CHECK: define i32 @{{.*}}(
+// CHECK-LABEL: define i32 @_Z3g10
// CHECK: store i32 1, i32* %{{.*}}, align 4, !tbaa [[TAG_i32]]
-// CHECK: store i16 4, i16* %{{.*}}, align 2, !tbaa [[TAG_i16]]
-// PATH: define i32 @{{.*}}(
+// CHECK: store i16 4, i16* %{{.*}}, align 4, !tbaa [[TAG_i16]]
+// PATH-LABEL: define i32 @_Z3g10
// PATH: store i32 1, i32* %{{.*}}, align 4, !tbaa [[TAG_S_f32]]
-// PATH: store i16 4, i16* %{{.*}}, align 2, !tbaa [[TAG_S2_f16:!.*]]
+// PATH: store i16 4, i16* %{{.*}}, align 4, !tbaa [[TAG_S2_f16:!.*]]
S->f32 = 1;
S2->f16 = 4;
return S->f32;
}
uint32_t g11(StructC *C, StructD *D, uint64_t count) {
-// CHECK: define i32 @{{.*}}(
+// CHECK-LABEL: define i32 @_Z3g11
// CHECK: store i32 1, i32* %{{.*}}, align 4, !tbaa [[TAG_i32]]
// CHECK: store i32 4, i32* %{{.*}}, align 4, !tbaa [[TAG_i32]]
-// PATH: define i32 @{{.*}}(
+// PATH-LABEL: define i32 @_Z3g11
// PATH: store i32 1, i32* %{{.*}}, align 4, !tbaa [[TAG_C_b_a_f32:!.*]]
// PATH: store i32 4, i32* %{{.*}}, align 4, !tbaa [[TAG_D_b_a_f32:!.*]]
C->b.a.f32 = 1;
@@ -177,11 +177,11 @@ uint32_t g11(StructC *C, StructD *D, uint64_t count) {
}
uint32_t g12(StructC *C, StructD *D, uint64_t count) {
-// CHECK: define i32 @{{.*}}(
+// CHECK-LABEL: define i32 @_Z3g12
// CHECK: store i32 1, i32* %{{.*}}, align 4, !tbaa [[TAG_i32]]
// CHECK: store i32 4, i32* %{{.*}}, align 4, !tbaa [[TAG_i32]]
// TODO: differentiate the two accesses.
-// PATH: define i32 @{{.*}}(
+// PATH-LABEL: define i32 @_Z3g12
// PATH: store i32 1, i32* %{{.*}}, align 4, !tbaa [[TAG_B_a_f32]]
// PATH: store i32 4, i32* %{{.*}}, align 4, !tbaa [[TAG_B_a_f32]]
StructB *b1 = &(C->b);
@@ -202,9 +202,9 @@ struct five {
} ATTR;
char g13(struct five *a, struct five *b) {
return a->b;
-// CHECK: define signext i8 @{{.*}}(
+// CHECK-LABEL: define signext i8 @_Z3g13
// CHECK: load i8, i8* %{{.*}}, align 1, !tbaa [[TAG_char:!.*]]
-// PATH: define signext i8 @{{.*}}(
+// PATH-LABEL: define signext i8 @_Z3g13
// PATH: load i8, i8* %{{.*}}, align 1, !tbaa [[TAG_five_b:!.*]]
}
@@ -215,9 +215,9 @@ struct six {
char c;
};
char g14(struct six *a, struct six *b) {
-// CHECK: define signext i8 @{{.*}}(
+// CHECK-LABEL: define signext i8 @_Z3g14
// CHECK: load i8, i8* %{{.*}}, align 1, !tbaa [[TAG_char]]
-// PATH: define signext i8 @{{.*}}(
+// PATH-LABEL: define signext i8 @_Z3g14
// PATH: load i8, i8* %{{.*}}, align 1, !tbaa [[TAG_six_b:!.*]]
return a->b;
}
@@ -225,10 +225,10 @@ char g14(struct six *a, struct six *b) {
// Types that differ only by name may alias.
typedef StructS StructS3;
uint32_t g15(StructS *S, StructS3 *S3, uint64_t count) {
-// CHECK: define i32 @{{.*}}(
+// CHECK-LABEL: define i32 @_Z3g15
// CHECK: store i32 1, i32* %{{.*}}, align 4, !tbaa [[TAG_i32]]
// CHECK: store i32 4, i32* %{{.*}}, align 4, !tbaa [[TAG_i32]]
-// PATH: define i32 @{{.*}}(
+// PATH-LABEL: define i32 @_Z3g15
// PATH: store i32 1, i32* %{{.*}}, align 4, !tbaa [[TAG_S_f32]]
// PATH: store i32 4, i32* %{{.*}}, align 4, !tbaa [[TAG_S_f32]]
S->f32 = 1;
diff --git a/clang/test/CodeGen/vectorcall.c b/clang/test/CodeGen/vectorcall.c
index 17927c7a3de..9ee35b1a02b 100644
--- a/clang/test/CodeGen/vectorcall.c
+++ b/clang/test/CodeGen/vectorcall.c
@@ -32,13 +32,13 @@ void __vectorcall hfa1(int a, struct HFA4 b, int c) {}
// registers.
void __vectorcall hfa2(struct HFA4 a, struct HFA4 b, double c) {}
// CHECK: define x86_vectorcallcc void @"\01hfa2@@72"(double %a.0, double %a.1, double %a.2, double %a.3, %struct.HFA4* inreg %b, double %c)
-// X64: define x86_vectorcallcc void @"\01hfa2@@72"(double %a.0, double %a.1, double %a.2, double %a.3, %struct.HFA4* align 8 %b, double %c)
+// X64: define x86_vectorcallcc void @"\01hfa2@@72"(double %a.0, double %a.1, double %a.2, double %a.3, %struct.HFA4* %b, double %c)
// Ensure that we pass builtin types directly while counting them against the
// SSE register usage.
void __vectorcall hfa3(double a, double b, double c, double d, double e, struct HFA2 f) {}
// CHECK: define x86_vectorcallcc void @"\01hfa3@@56"(double %a, double %b, double %c, double %d, double %e, %struct.HFA2* inreg %f)
-// X64: define x86_vectorcallcc void @"\01hfa3@@56"(double %a, double %b, double %c, double %d, double %e, %struct.HFA2* align 8 %f)
+// X64: define x86_vectorcallcc void @"\01hfa3@@56"(double %a, double %b, double %c, double %d, double %e, %struct.HFA2* %f)
// Aggregates with more than four elements are not HFAs and are passed byval.
// Because they are not classified as homogeneous, they don't get special
@@ -63,11 +63,11 @@ void __vectorcall hva1(int a, struct HVA4 b, int c) {}
void __vectorcall hva2(struct HVA4 a, struct HVA4 b, v4f32 c) {}
// CHECK: define x86_vectorcallcc void @"\01hva2@@144"(<4 x float> %a.0, <4 x float> %a.1, <4 x float> %a.2, <4 x float> %a.3, %struct.HVA4* inreg %b, <4 x float> %c)
-// X64: define x86_vectorcallcc void @"\01hva2@@144"(<4 x float> %a.0, <4 x float> %a.1, <4 x float> %a.2, <4 x float> %a.3, %struct.HVA4* align 16 %b, <4 x float> %c)
+// X64: define x86_vectorcallcc void @"\01hva2@@144"(<4 x float> %a.0, <4 x float> %a.1, <4 x float> %a.2, <4 x float> %a.3, %struct.HVA4* %b, <4 x float> %c)
void __vectorcall hva3(v4f32 a, v4f32 b, v4f32 c, v4f32 d, v4f32 e, struct HVA2 f) {}
// CHECK: define x86_vectorcallcc void @"\01hva3@@112"(<4 x float> %a, <4 x float> %b, <4 x float> %c, <4 x float> %d, <4 x float> %e, %struct.HVA2* inreg %f)
-// X64: define x86_vectorcallcc void @"\01hva3@@112"(<4 x float> %a, <4 x float> %b, <4 x float> %c, <4 x float> %d, <4 x float> %e, %struct.HVA2* align 16 %f)
+// X64: define x86_vectorcallcc void @"\01hva3@@112"(<4 x float> %a, <4 x float> %b, <4 x float> %c, <4 x float> %d, <4 x float> %e, %struct.HVA2* %f)
typedef float __attribute__((ext_vector_type(3))) v3f32;
struct OddSizeHVA { v3f32 x, y; };
diff --git a/clang/test/CodeGen/xcore-abi.c b/clang/test/CodeGen/xcore-abi.c
index 23fb4414694..2bac78d92ed 100644
--- a/clang/test/CodeGen/xcore-abi.c
+++ b/clang/test/CodeGen/xcore-abi.c
@@ -33,7 +33,7 @@ void testva (int n, ...) {
f(v1);
// CHECK: [[I:%[a-z0-9]+]] = load i8*, i8** [[AP]]
// CHECK: [[P:%[a-z0-9]+]] = bitcast i8* [[I]] to i8**
- // CHECK: [[IN:%[a-z0-9]+]] = getelementptr i8, i8* [[I]], i32 4
+ // CHECK: [[IN:%[a-z0-9]+]] = getelementptr inbounds i8, i8* [[I]], i32 4
// CHECK: store i8* [[IN]], i8** [[AP]]
// CHECK: [[V1:%[a-z0-9]+]] = load i8*, i8** [[P]]
// CHECK: store i8* [[V1]], i8** [[V:%[a-z0-9]+]], align 4
@@ -43,7 +43,7 @@ void testva (int n, ...) {
char v2 = va_arg (ap, char); // expected-warning{{second argument to 'va_arg' is of promotable type 'char'}}
f(&v2);
// CHECK: [[I:%[a-z0-9]+]] = load i8*, i8** [[AP]]
- // CHECK: [[IN:%[a-z0-9]+]] = getelementptr i8, i8* [[I]], i32 4
+ // CHECK: [[IN:%[a-z0-9]+]] = getelementptr inbounds i8, i8* [[I]], i32 4
// CHECK: store i8* [[IN]], i8** [[AP]]
// CHECK: [[V1:%[a-z0-9]+]] = load i8, i8* [[I]]
// CHECK: store i8 [[V1]], i8* [[V:%[a-z0-9]+]], align 1
@@ -53,7 +53,7 @@ void testva (int n, ...) {
f(&v3);
// CHECK: [[I:%[a-z0-9]+]] = load i8*, i8** [[AP]]
// CHECK: [[P:%[a-z0-9]+]] = bitcast i8* [[I]] to i32*
- // CHECK: [[IN:%[a-z0-9]+]] = getelementptr i8, i8* [[I]], i32 4
+ // CHECK: [[IN:%[a-z0-9]+]] = getelementptr inbounds i8, i8* [[I]], i32 4
// CHECK: store i8* [[IN]], i8** [[AP]]
// CHECK: [[V1:%[a-z0-9]+]] = load i32, i32* [[P]]
// CHECK: store i32 [[V1]], i32* [[V:%[a-z0-9]+]], align 4
@@ -64,7 +64,7 @@ void testva (int n, ...) {
f(&v4);
// CHECK: [[I:%[a-z0-9]+]] = load i8*, i8** [[AP]]
// CHECK: [[P:%[a-z0-9]+]] = bitcast i8* [[I]] to i64*
- // CHECK: [[IN:%[a-z0-9]+]] = getelementptr i8, i8* [[I]], i32 8
+ // CHECK: [[IN:%[a-z0-9]+]] = getelementptr inbounds i8, i8* [[I]], i32 8
// CHECK: store i8* [[IN]], i8** [[AP]]
// CHECK: [[V1:%[a-z0-9]+]] = load i64, i64* [[P]]
// CHECK: store i64 [[V1]], i64* [[V:%[a-z0-9]+]], align 4
@@ -76,7 +76,7 @@ void testva (int n, ...) {
// CHECK: [[I:%[a-z0-9]+]] = load i8*, i8** [[AP]]
// CHECK: [[I2:%[a-z0-9]+]] = bitcast i8* [[I]] to %struct.x**
// CHECK: [[P:%[a-z0-9]+]] = load %struct.x*, %struct.x** [[I2]]
- // CHECK: [[IN:%[a-z0-9]+]] = getelementptr i8, i8* [[I]], i32 4
+ // CHECK: [[IN:%[a-z0-9]+]] = getelementptr inbounds i8, i8* [[I]], i32 4
// CHECK: store i8* [[IN]], i8** [[AP]]
// CHECK: [[V1:%[a-z0-9]+]] = bitcast %struct.x* [[V:%[a-z0-9]+]] to i8*
// CHECK: [[P1:%[a-z0-9]+]] = bitcast %struct.x* [[P]] to i8*
@@ -89,7 +89,7 @@ void testva (int n, ...) {
// CHECK: [[I:%[a-z0-9]+]] = load i8*, i8** [[AP]]
// CHECK: [[I2:%[a-z0-9]+]] = bitcast i8* [[I]] to [4 x i32]**
// CHECK: [[P:%[a-z0-9]+]] = load [4 x i32]*, [4 x i32]** [[I2]]
- // CHECK: [[IN:%[a-z0-9]+]] = getelementptr i8, i8* [[I]], i32 4
+ // CHECK: [[IN:%[a-z0-9]+]] = getelementptr inbounds i8, i8* [[I]], i32 4
// CHECK: store i8* [[IN]], i8** [[AP]]
// CHECK: [[V1:%[a-z0-9]+]] = bitcast [4 x i32]* [[V0:%[a-z0-9]+]] to i8*
// CHECK: [[P1:%[a-z0-9]+]] = bitcast [4 x i32]* [[P]] to i8*
@@ -104,7 +104,7 @@ void testva (int n, ...) {
f(&v7);
// CHECK: [[I:%[a-z0-9]+]] = load i8*, i8** [[AP]]
// CHECK: [[P:%[a-z0-9]+]] = bitcast i8* [[I]] to double*
- // CHECK: [[IN:%[a-z0-9]+]] = getelementptr i8, i8* [[I]], i32 8
+ // CHECK: [[IN:%[a-z0-9]+]] = getelementptr inbounds i8, i8* [[I]], i32 8
// CHECK: store i8* [[IN]], i8** [[AP]]
// CHECK: [[V1:%[a-z0-9]+]] = load double, double* [[P]]
// CHECK: store double [[V1]], double* [[V:%[a-z0-9]+]], align 4
diff --git a/clang/test/CodeGenCXX/alignment.cpp b/clang/test/CodeGenCXX/alignment.cpp
new file mode 100644
index 00000000000..62648c73176
--- /dev/null
+++ b/clang/test/CodeGenCXX/alignment.cpp
@@ -0,0 +1,297 @@
+// RUN: %clang_cc1 %s -emit-llvm -o - -triple=x86_64-apple-darwin10 | FileCheck %s
+
+extern int int_source();
+extern void int_sink(int x);
+
+namespace test0 {
+ struct A {
+ int aField;
+ int bField;
+ };
+
+ struct B {
+ int onebit : 2;
+ int twobit : 6;
+ int intField;
+ };
+
+ struct __attribute__((packed, aligned(2))) C : A, B {
+ };
+
+ // These accesses should have alignment 4 because they're at offset 0
+ // in a reference with an assumed alignment of 4.
+ // CHECK-LABEL: @_ZN5test01aERNS_1BE
+ void a(B &b) {
+ // CHECK: [[CALL:%.*]] = call i32 @_Z10int_sourcev()
+ // CHECK: [[B_P:%.*]] = load [[B:%.*]]*, [[B]]**
+ // CHECK: [[FIELD_P:%.*]] = bitcast [[B]]* [[B_P]] to i8*
+ // CHECK: [[TRUNC:%.*]] = trunc i32 [[CALL]] to i8
+ // CHECK: [[OLD_VALUE:%.*]] = load i8, i8* [[FIELD_P]], align 4
+ // CHECK: [[T0:%.*]] = and i8 [[TRUNC]], 3
+ // CHECK: [[T1:%.*]] = and i8 [[OLD_VALUE]], -4
+ // CHECK: [[T2:%.*]] = or i8 [[T1]], [[T0]]
+ // CHECK: store i8 [[T2]], i8* [[FIELD_P]], align 4
+ b.onebit = int_source();
+
+ // CHECK: [[B_P:%.*]] = load [[B]]*, [[B]]**
+ // CHECK: [[FIELD_P:%.*]] = bitcast [[B]]* [[B_P]] to i8*
+ // CHECK: [[VALUE:%.*]] = load i8, i8* [[FIELD_P]], align 4
+ // CHECK: [[T0:%.*]] = shl i8 [[VALUE]], 6
+ // CHECK: [[T1:%.*]] = ashr i8 [[T0]], 6
+ // CHECK: [[T2:%.*]] = sext i8 [[T1]] to i32
+ // CHECK: call void @_Z8int_sinki(i32 [[T2]])
+ int_sink(b.onebit);
+ }
+
+ // These accesses should have alignment 2 because they're at offset 8
+ // in a reference/pointer with an assumed alignment of 2.
+ // CHECK-LABEL: @_ZN5test01bERNS_1CE
+ void b(C &c) {
+ // CHECK: [[CALL:%.*]] = call i32 @_Z10int_sourcev()
+ // CHECK: [[C_P:%.*]] = load [[C:%.*]]*, [[C]]**
+ // CHECK: [[T0:%.*]] = bitcast [[C]]* [[C_P]] to i8*
+ // CHECK: [[T1:%.*]] = getelementptr inbounds i8, i8* [[T0]], i64 8
+ // CHECK: [[B_P:%.*]] = bitcast i8* [[T1]] to [[B]]*
+ // CHECK: [[FIELD_P:%.*]] = bitcast [[B]]* [[B_P]] to i8*
+ // CHECK: [[TRUNC:%.*]] = trunc i32 [[CALL]] to i8
+ // CHECK: [[OLD_VALUE:%.*]] = load i8, i8* [[FIELD_P]], align 2
+ // CHECK: [[T0:%.*]] = and i8 [[TRUNC]], 3
+ // CHECK: [[T1:%.*]] = and i8 [[OLD_VALUE]], -4
+ // CHECK: [[T2:%.*]] = or i8 [[T1]], [[T0]]
+ // CHECK: store i8 [[T2]], i8* [[FIELD_P]], align 2
+ c.onebit = int_source();
+
+ // CHECK: [[C_P:%.*]] = load [[C]]*, [[C]]**
+ // CHECK: [[T0:%.*]] = bitcast [[C]]* [[C_P]] to i8*
+ // CHECK: [[T1:%.*]] = getelementptr inbounds i8, i8* [[T0]], i64 8
+ // CHECK: [[B_P:%.*]] = bitcast i8* [[T1]] to [[B]]*
+ // CHECK: [[FIELD_P:%.*]] = bitcast [[B]]* [[B_P]] to i8*
+ // CHECK: [[VALUE:%.*]] = load i8, i8* [[FIELD_P]], align 2
+ // CHECK: [[T0:%.*]] = shl i8 [[VALUE]], 6
+ // CHECK: [[T1:%.*]] = ashr i8 [[T0]], 6
+ // CHECK: [[T2:%.*]] = sext i8 [[T1]] to i32
+ // CHECK: call void @_Z8int_sinki(i32 [[T2]])
+ int_sink(c.onebit);
+ }
+
+ // CHECK-LABEL: @_ZN5test01cEPNS_1CE
+ void c(C *c) {
+ // CHECK: [[CALL:%.*]] = call i32 @_Z10int_sourcev()
+ // CHECK: [[C_P:%.*]] = load [[C]]*, [[C]]**
+ // CHECK: [[T0:%.*]] = bitcast [[C]]* [[C_P]] to i8*
+ // CHECK: [[T1:%.*]] = getelementptr inbounds i8, i8* [[T0]], i64 8
+ // CHECK: [[B_P:%.*]] = bitcast i8* [[T1]] to [[B]]*
+ // CHECK: [[FIELD_P:%.*]] = bitcast [[B]]* [[B_P]] to i8*
+ // CHECK: [[TRUNC:%.*]] = trunc i32 [[CALL]] to i8
+ // CHECK: [[OLD_VALUE:%.*]] = load i8, i8* [[FIELD_P]], align 2
+ // CHECK: [[T0:%.*]] = and i8 [[TRUNC]], 3
+ // CHECK: [[T1:%.*]] = and i8 [[OLD_VALUE]], -4
+ // CHECK: [[T2:%.*]] = or i8 [[T1]], [[T0]]
+ // CHECK: store i8 [[T2]], i8* [[FIELD_P]], align 2
+ c->onebit = int_source();
+
+ // CHECK: [[C_P:%.*]] = load [[C:%.*]]*, [[C]]**
+ // CHECK: [[T0:%.*]] = bitcast [[C]]* [[C_P]] to i8*
+ // CHECK: [[T1:%.*]] = getelementptr inbounds i8, i8* [[T0]], i64 8
+ // CHECK: [[B_P:%.*]] = bitcast i8* [[T1]] to [[B:%.*]]*
+ // CHECK: [[FIELD_P:%.*]] = bitcast [[B]]* [[B_P]] to i8*
+ // CHECK: [[VALUE:%.*]] = load i8, i8* [[FIELD_P]], align 2
+ // CHECK: [[T0:%.*]] = shl i8 [[VALUE]], 6
+ // CHECK: [[T1:%.*]] = ashr i8 [[T0]], 6
+ // CHECK: [[T2:%.*]] = sext i8 [[T1]] to i32
+ // CHECK: call void @_Z8int_sinki(i32 [[T2]])
+ int_sink(c->onebit);
+ }
+
+ // These accesses should have alignment 2 because they're at offset 8
+ // in an alignment-2 variable.
+ // CHECK-LABEL: @_ZN5test01dEv
+ void d() {
+ // CHECK: [[C_P:%.*]] = alloca [[C:%.*]], align 2
+ C c;
+
+ // CHECK: [[T0:%.*]] = bitcast [[C]]* [[C_P]] to i8*
+ // CHECK: [[T1:%.*]] = getelementptr inbounds i8, i8* [[T0]], i64 8
+ // CHECK: [[B_P:%.*]] = bitcast i8* [[T1]] to [[B]]*
+ // CHECK: [[FIELD_P:%.*]] = bitcast [[B]]* [[B_P]] to i8*
+ // CHECK: [[TRUNC:%.*]] = trunc i32 [[CALL]] to i8
+ // CHECK: [[OLD_VALUE:%.*]] = load i8, i8* [[FIELD_P]], align 2
+ // CHECK: [[T0:%.*]] = and i8 [[TRUNC]], 3
+ // CHECK: [[T1:%.*]] = and i8 [[OLD_VALUE]], -4
+ // CHECK: [[T2:%.*]] = or i8 [[T1]], [[T0]]
+ // CHECK: store i8 [[T2]], i8* [[FIELD_P]], align 2
+ c.onebit = int_source();
+
+ // CHECK: [[T0:%.*]] = bitcast [[C]]* [[C_P]] to i8*
+ // CHECK: [[T1:%.*]] = getelementptr inbounds i8, i8* [[T0]], i64 8
+ // CHECK: [[B_P:%.*]] = bitcast i8* [[T1]] to [[B:%.*]]*
+ // CHECK: [[FIELD_P:%.*]] = bitcast [[B]]* [[B_P]] to i8*
+ // CHECK: [[VALUE:%.*]] = load i8, i8* [[FIELD_P]], align 2
+ // CHECK: [[T0:%.*]] = shl i8 [[VALUE]], 6
+ // CHECK: [[T1:%.*]] = ashr i8 [[T0]], 6
+ // CHECK: [[T2:%.*]] = sext i8 [[T1]] to i32
+ // CHECK: call void @_Z8int_sinki(i32 [[T2]])
+ int_sink(c.onebit);
+ }
+
+ // These accesses should have alignment 8 because they're at offset 8
+ // in an alignment-16 variable.
+ // CHECK-LABEL: @_ZN5test01eEv
+ void e() {
+ // CHECK: [[C_P:%.*]] = alloca [[C:%.*]], align 16
+ __attribute__((aligned(16))) C c;
+
+ // CHECK: [[T0:%.*]] = bitcast [[C]]* [[C_P]] to i8*
+ // CHECK: [[T1:%.*]] = getelementptr inbounds i8, i8* [[T0]], i64 8
+ // CHECK: [[B_P:%.*]] = bitcast i8* [[T1]] to [[B]]*
+ // CHECK: [[FIELD_P:%.*]] = bitcast [[B]]* [[B_P]] to i8*
+ // CHECK: [[TRUNC:%.*]] = trunc i32 [[CALL]] to i8
+ // CHECK: [[OLD_VALUE:%.*]] = load i8, i8* [[FIELD_P]], align 8
+ // CHECK: [[T0:%.*]] = and i8 [[TRUNC]], 3
+ // CHECK: [[T1:%.*]] = and i8 [[OLD_VALUE]], -4
+ // CHECK: [[T2:%.*]] = or i8 [[T1]], [[T0]]
+ // CHECK: store i8 [[T2]], i8* [[FIELD_P]], align 8
+ c.onebit = int_source();
+
+ // CHECK: [[T0:%.*]] = bitcast [[C]]* [[C_P]] to i8*
+ // CHECK: [[T1:%.*]] = getelementptr inbounds i8, i8* [[T0]], i64 8
+ // CHECK: [[B_P:%.*]] = bitcast i8* [[T1]] to [[B:%.*]]*
+ // CHECK: [[FIELD_P:%.*]] = bitcast [[B]]* [[B_P]] to i8*
+ // CHECK: [[VALUE:%.*]] = load i8, i8* [[FIELD_P]], align 8
+ // CHECK: [[T0:%.*]] = shl i8 [[VALUE]], 6
+ // CHECK: [[T1:%.*]] = ashr i8 [[T0]], 6
+ // CHECK: [[T2:%.*]] = sext i8 [[T1]] to i32
+ // CHECK: call void @_Z8int_sinki(i32 [[T2]])
+ int_sink(c.onebit);
+ }
+}
+
+namespace test1 {
+ struct Array {
+ int elts[4];
+ };
+
+ struct A {
+ __attribute__((aligned(16))) Array aArray;
+ };
+
+ struct B : virtual A {
+ void *bPointer; // puts bArray at offset 16
+ Array bArray;
+ };
+
+ struct C : virtual A { // must be viable as primary base
+ // Non-empty, nv-size not a multiple of 16.
+ void *cPointer1;
+ void *cPointer2;
+ };
+
+ // Proof of concept that the non-virtual components of B do not have
+ // to be 16-byte-aligned.
+ struct D : C, B {};
+
+ // For the following tests, we want to assign into a variable whose
+ // alignment is high enough that it will absolutely not be the
+ // constraint on the memcpy alignment.
+ typedef __attribute__((aligned(64))) Array AlignedArray;
+
+ // CHECK-LABEL: @_ZN5test11aERNS_1AE
+ void a(A &a) {
+ // CHECK: [[RESULT:%.*]] = alloca [[ARRAY:%.*]], align 64
+ // CHECK: [[A_P:%.*]] = load [[A:%.*]]*, [[A]]**
+ // CHECK: [[ARRAY_P:%.*]] = getelementptr inbounds [[A]], [[A]]* [[A_P]], i32 0, i32 0
+ // CHECK: [[T0:%.*]] = bitcast [[ARRAY]]* [[RESULT]] to i8*
+ // CHECK: [[T1:%.*]] = bitcast [[ARRAY]]* [[ARRAY_P]] to i8*
+ // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[T0]], i8* [[T1]], i64 16, i32 16, i1 false)
+ AlignedArray result = a.aArray;
+ }
+
+ // CHECK-LABEL: @_ZN5test11bERNS_1BE
+ void b(B &b) {
+ // CHECK: [[RESULT:%.*]] = alloca [[ARRAY]], align 64
+ // CHECK: [[B_P:%.*]] = load [[B:%.*]]*, [[B]]**
+ // CHECK: [[VPTR_P:%.*]] = bitcast [[B]]* [[B_P]] to i8**
+ // CHECK: [[VPTR:%.*]] = load i8*, i8** [[VPTR_P]], align 8
+ // CHECK: [[T0:%.*]] = getelementptr i8, i8* [[VPTR]], i64 -24
+ // CHECK: [[OFFSET_P:%.*]] = bitcast i8* [[T0]] to i64*
+ // CHECK: [[OFFSET:%.*]] = load i64, i64* [[OFFSET_P]], align 8
+ // CHECK: [[T0:%.*]] = bitcast [[B]]* [[B_P]] to i8*
+ // CHECK: [[T1:%.*]] = getelementptr inbounds i8, i8* [[T0]], i64 [[OFFSET]]
+ // CHECK: [[A_P:%.*]] = bitcast i8* [[T1]] to [[A]]*
+ // CHECK: [[ARRAY_P:%.*]] = getelementptr inbounds [[A]], [[A]]* [[A_P]], i32 0, i32 0
+ // CHECK: [[T0:%.*]] = bitcast [[ARRAY]]* [[RESULT]] to i8*
+ // CHECK: [[T1:%.*]] = bitcast [[ARRAY]]* [[ARRAY_P]] to i8*
+ // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[T0]], i8* [[T1]], i64 16, i32 16, i1 false)
+ AlignedArray result = b.aArray;
+ }
+
+ // CHECK-LABEL: @_ZN5test11cERNS_1BE
+ void c(B &b) {
+ // CHECK: [[RESULT:%.*]] = alloca [[ARRAY]], align 64
+ // CHECK: [[B_P:%.*]] = load [[B]]*, [[B]]**
+ // CHECK: [[ARRAY_P:%.*]] = getelementptr inbounds [[B]], [[B]]* [[B_P]], i32 0, i32 2
+ // CHECK: [[T0:%.*]] = bitcast [[ARRAY]]* [[RESULT]] to i8*
+ // CHECK: [[T1:%.*]] = bitcast [[ARRAY]]* [[ARRAY_P]] to i8*
+ // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[T0]], i8* [[T1]], i64 16, i32 8, i1 false)
+ AlignedArray result = b.bArray;
+ }
+
+ // CHECK-LABEL: @_ZN5test11dEPNS_1BE
+ void d(B *b) {
+ // CHECK: [[RESULT:%.*]] = alloca [[ARRAY]], align 64
+ // CHECK: [[B_P:%.*]] = load [[B]]*, [[B]]**
+ // CHECK: [[ARRAY_P:%.*]] = getelementptr inbounds [[B]], [[B]]* [[B_P]], i32 0, i32 2
+ // CHECK: [[T0:%.*]] = bitcast [[ARRAY]]* [[RESULT]] to i8*
+ // CHECK: [[T1:%.*]] = bitcast [[ARRAY]]* [[ARRAY_P]] to i8*
+ // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[T0]], i8* [[T1]], i64 16, i32 8, i1 false)
+ AlignedArray result = b->bArray;
+ }
+
+ // CHECK-LABEL: @_ZN5test11eEv
+ void e() {
+ // CHECK: [[B_P:%.*]] = alloca [[B]], align 16
+ // CHECK: [[RESULT:%.*]] = alloca [[ARRAY]], align 64
+ // CHECK: [[ARRAY_P:%.*]] = getelementptr inbounds [[B]], [[B]]* [[B_P]], i32 0, i32 2
+ // CHECK: [[T0:%.*]] = bitcast [[ARRAY]]* [[RESULT]] to i8*
+ // CHECK: [[T1:%.*]] = bitcast [[ARRAY]]* [[ARRAY_P]] to i8*
+ // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[T0]], i8* [[T1]], i64 16, i32 16, i1 false)
+ B b;
+ AlignedArray result = b.bArray;
+ }
+
+ // CHECK-LABEL: @_ZN5test11fEv
+ void f() {
+ // TODO: we should devirtualize this derived-to-base conversion.
+ // CHECK: [[D_P:%.*]] = alloca [[D:%.*]], align 16
+ // CHECK: [[RESULT:%.*]] = alloca [[ARRAY]], align 64
+ // CHECK: [[VPTR_P:%.*]] = bitcast [[D]]* [[D_P]] to i8**
+ // CHECK: [[VPTR:%.*]] = load i8*, i8** [[VPTR_P]], align 16
+ // CHECK: [[T0:%.*]] = getelementptr i8, i8* [[VPTR]], i64 -24
+ // CHECK: [[OFFSET_P:%.*]] = bitcast i8* [[T0]] to i64*
+ // CHECK: [[OFFSET:%.*]] = load i64, i64* [[OFFSET_P]], align 8
+ // CHECK: [[T0:%.*]] = bitcast [[D]]* [[D_P]] to i8*
+ // CHECK: [[T1:%.*]] = getelementptr inbounds i8, i8* [[T0]], i64 [[OFFSET]]
+ // CHECK: [[A_P:%.*]] = bitcast i8* [[T1]] to [[A]]*
+ // CHECK: [[ARRAY_P:%.*]] = getelementptr inbounds [[A]], [[A]]* [[A_P]], i32 0, i32 0
+ // CHECK: [[T0:%.*]] = bitcast [[ARRAY]]* [[RESULT]] to i8*
+ // CHECK: [[T1:%.*]] = bitcast [[ARRAY]]* [[ARRAY_P]] to i8*
+ // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[T0]], i8* [[T1]], i64 16, i32 16, i1 false)
+ D d;
+ AlignedArray result = d.aArray;
+ }
+
+ // CHECK-LABEL: @_ZN5test11gEv
+ void g() {
+ // CHECK: [[D_P:%.*]] = alloca [[D]], align 16
+ // CHECK: [[RESULT:%.*]] = alloca [[ARRAY]], align 64
+ // CHECK: [[T0:%.*]] = bitcast [[D]]* [[D_P]] to i8*
+ // CHECK: [[T1:%.*]] = getelementptr inbounds i8, i8* [[T0]], i64 24
+ // CHECK: [[B_P:%.*]] = bitcast i8* [[T1]] to [[B:%.*]]*
+ // CHECK: [[ARRAY_P:%.*]] = getelementptr inbounds [[B]], [[B]]* [[B_P]], i32 0, i32 2
+ // CHECK: [[T0:%.*]] = bitcast [[ARRAY]]* [[RESULT]] to i8*
+ // CHECK: [[T1:%.*]] = bitcast [[ARRAY]]* [[ARRAY_P]] to i8*
+ // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[T0]], i8* [[T1]], i64 16, i32 8, i1 false)
+ D d;
+ AlignedArray result = d.bArray;
+ }
+}
diff --git a/clang/test/CodeGenCXX/arm.cpp b/clang/test/CodeGenCXX/arm.cpp
index 0fadfe97b49..11ae6b24c26 100644
--- a/clang/test/CodeGenCXX/arm.cpp
+++ b/clang/test/CodeGenCXX/arm.cpp
@@ -152,8 +152,8 @@ namespace test3 {
void e(A *x) {
// CHECK-LABEL: define void @_ZN5test31eEPNS_1AE(
// CHECK: icmp eq {{.*}}, null
- // CHECK: getelementptr {{.*}}, i64 -8
- // CHECK: getelementptr {{.*}}, i64 4
+ // CHECK: getelementptr {{.*}}, i32 -8
+ // CHECK: getelementptr {{.*}}, i32 4
// CHECK: bitcast {{.*}} to i32*
// CHECK: load
// CHECK: invoke {{.*}} @_ZN5test31AD1Ev
@@ -164,8 +164,8 @@ namespace test3 {
void f(A (*x)[20]) {
// CHECK-LABEL: define void @_ZN5test31fEPA20_NS_1AE(
// CHECK: icmp eq {{.*}}, null
- // CHECK: getelementptr {{.*}}, i64 -8
- // CHECK: getelementptr {{.*}}, i64 4
+ // CHECK: getelementptr {{.*}}, i32 -8
+ // CHECK: getelementptr {{.*}}, i32 4
// CHECK: bitcast {{.*}} to i32*
// CHECK: load
// CHECK: invoke {{.*}} @_ZN5test31AD1Ev
@@ -223,8 +223,8 @@ namespace test4 {
void e(A *x) {
// CHECK-LABEL: define void @_ZN5test41eEPNS_1AE(
- // CHECK: [[ALLOC:%.*]] = getelementptr inbounds {{.*}}, i64 -8
- // CHECK: getelementptr inbounds {{.*}}, i64 4
+ // CHECK: [[ALLOC:%.*]] = getelementptr inbounds {{.*}}, i32 -8
+ // CHECK: getelementptr inbounds {{.*}}, i32 4
// CHECK: bitcast
// CHECK: [[T0:%.*]] = load i32, i32*
// CHECK: [[T1:%.*]] = mul i32 4, [[T0]]
@@ -235,8 +235,8 @@ namespace test4 {
void f(A (*x)[20]) {
// CHECK-LABEL: define void @_ZN5test41fEPA20_NS_1AE(
- // CHECK: [[ALLOC:%.*]] = getelementptr inbounds {{.*}}, i64 -8
- // CHECK: getelementptr inbounds {{.*}}, i64 4
+ // CHECK: [[ALLOC:%.*]] = getelementptr inbounds {{.*}}, i32 -8
+ // CHECK: getelementptr inbounds {{.*}}, i32 4
// CHECK: bitcast
// CHECK: [[T0:%.*]] = load i32, i32*
// CHECK: [[T1:%.*]] = mul i32 4, [[T0]]
@@ -293,7 +293,7 @@ namespace test7 {
// CHECK-LABEL: define void @_ZN5test74testEv() {{.*}} personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*)
void test() {
- // CHECK: [[T0:%.*]] = load atomic i8, i8* bitcast (i32* @_ZGVZN5test74testEvE1x to i8*) acquire, align 1
+ // CHECK: [[T0:%.*]] = load atomic i8, i8* bitcast (i32* @_ZGVZN5test74testEvE1x to i8*) acquire, align 4
// CHECK-NEXT: [[T1:%.*]] = and i8 [[T0]], 1
// CHECK-NEXT: [[T2:%.*]] = icmp eq i8 [[T1]], 0
// CHECK-NEXT: br i1 [[T2]]
@@ -328,7 +328,7 @@ namespace test8 {
// CHECK-LABEL: define void @_ZN5test84testEv() {{.*}} personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*)
void test() {
- // CHECK: [[T0:%.*]] = load atomic i8, i8* bitcast (i32* @_ZGVZN5test84testEvE1x to i8*) acquire, align 1
+ // CHECK: [[T0:%.*]] = load atomic i8, i8* bitcast (i32* @_ZGVZN5test84testEvE1x to i8*) acquire, align 4
// CHECK-NEXT: [[T1:%.*]] = and i8 [[T0]], 1
// CHECK-NEXT: [[T2:%.*]] = icmp eq i8 [[T1]], 0
// CHECK-NEXT: br i1 [[T2]]
@@ -388,7 +388,7 @@ namespace test9 {
// CHECK-NEXT: store i32 16, i32* [[T0]]
// CHECK-NEXT: [[T1:%.*]] = getelementptr inbounds i32, i32* [[T0]], i32 1
// CHECK-NEXT: store i32 [[N]], i32* [[T1]]
-// CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds i8, i8* [[ALLOC]], i64 16
+// CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds i8, i8* [[ALLOC]], i32 16
// CHECK-NEXT: bitcast i8* [[T0]] to [[TEST9]]*
// Array allocation follows.
@@ -400,8 +400,8 @@ namespace test9 {
// CHECK-NEXT: [[T0:%.*]] = icmp eq [[TEST9]]* [[BEGIN]], null
// CHECK-NEXT: br i1 [[T0]],
// CHECK: [[T0:%.*]] = bitcast [[TEST9]]* [[BEGIN]] to i8*
-// CHECK-NEXT: [[ALLOC:%.*]] = getelementptr inbounds i8, i8* [[T0]], i64 -16
-// CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds i8, i8* [[ALLOC]], i64 4
+// CHECK-NEXT: [[ALLOC:%.*]] = getelementptr inbounds i8, i8* [[T0]], i32 -16
+// CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds i8, i8* [[ALLOC]], i32 4
// CHECK-NEXT: [[T1:%.*]] = bitcast i8* [[T0]] to i32*
// CHECK-NEXT: [[N:%.*]] = load i32, i32* [[T1]]
// CHECK-NEXT: [[END:%.*]] = getelementptr inbounds [[TEST9]], [[TEST9]]* [[BEGIN]], i32 [[N]]
diff --git a/clang/test/CodeGenCXX/cxx0x-initializer-stdinitializerlist.cpp b/clang/test/CodeGenCXX/cxx0x-initializer-stdinitializerlist.cpp
index 6d5d3971bd7..311edaabb5d 100644
--- a/clang/test/CodeGenCXX/cxx0x-initializer-stdinitializerlist.cpp
+++ b/clang/test/CodeGenCXX/cxx0x-initializer-stdinitializerlist.cpp
@@ -502,7 +502,7 @@ namespace B19773010 {
}
void f2() {
// CHECK-LABEL: @_ZN9B197730102f2Ev
- // CHECK: store %"struct.B19773010::pair"* getelementptr inbounds ([1 x %"struct.B19773010::pair"], [1 x %"struct.B19773010::pair"]* bitcast ([1 x { i8*, i32 }]* @_ZGRZN9B197730102f2EvE1p_ to [1 x %"struct.B19773010::pair"]*), i64 0, i64 0), %"struct.B19773010::pair"** getelementptr inbounds ([2 x %"class.std::initializer_list.10"], [2 x %"class.std::initializer_list.10"]* @_ZZN9B197730102f2EvE1p, i64 0, i64 1, i32 0), align 8
+ // CHECK: store %"struct.B19773010::pair"* getelementptr inbounds ([1 x %"struct.B19773010::pair"], [1 x %"struct.B19773010::pair"]* bitcast ([1 x { i8*, i32 }]* @_ZGRZN9B197730102f2EvE1p_ to [1 x %"struct.B19773010::pair"]*), i64 0, i64 0), %"struct.B19773010::pair"** getelementptr inbounds ([2 x %"class.std::initializer_list.10"], [2 x %"class.std::initializer_list.10"]* @_ZZN9B197730102f2EvE1p, i64 0, i64 1, i32 0), align 16
static std::initializer_list<pair<const char *, E>> a, p[2] =
{a, {{"", ENUM_CONSTANT}}};
}
diff --git a/clang/test/CodeGenCXX/cxx11-initializer-array-new.cpp b/clang/test/CodeGenCXX/cxx11-initializer-array-new.cpp
index 2beb44ecf3b..c662190ff38 100644
--- a/clang/test/CodeGenCXX/cxx11-initializer-array-new.cpp
+++ b/clang/test/CodeGenCXX/cxx11-initializer-array-new.cpp
@@ -28,7 +28,7 @@ void *p = new S[2][3]{ { 1, 2, 3 }, { 4, 5, 6 } };
//
// { 4, 5, 6 }
//
-// CHECK: %[[S_1:.*]] = getelementptr inbounds [3 x %[[S]]], [3 x %[[S]]]* %[[S_0]], i32 1
+// CHECK: %[[S_1:.*]] = getelementptr inbounds [3 x %[[S]]], [3 x %[[S]]]* %[[S_0]], i64 1
//
// CHECK: %[[S_1_0:.*]] = getelementptr inbounds [3 x %[[S]]], [3 x %[[S]]]* %[[S_1]], i64 0, i64 0
// CHECK: call void @_ZN1SC1Ei(%[[S]]* %[[S_1_0]], i32 4)
@@ -72,7 +72,7 @@ void *q = new S[n][3]{ { 1, 2, 3 }, { 4, 5, 6 } };
//
// { 4, 5, 6 }
//
-// CHECK: %[[S_1:.*]] = getelementptr inbounds [3 x %[[S]]], [3 x %[[S]]]* %[[S_0]], i32 1
+// CHECK: %[[S_1:.*]] = getelementptr inbounds [3 x %[[S]]], [3 x %[[S]]]* %[[S_0]], i64 1
//
// CHECK: %[[S_1_0:.*]] = getelementptr inbounds [3 x %[[S]]], [3 x %[[S]]]* %[[S_1]], i64 0, i64 0
// CHECK: call void @_ZN1SC1Ei(%[[S]]* %[[S_1_0]], i32 4)
@@ -83,7 +83,7 @@ void *q = new S[n][3]{ { 1, 2, 3 }, { 4, 5, 6 } };
//
// And the rest.
//
-// CHECK: %[[S_2:.*]] = getelementptr inbounds [3 x %[[S]]], [3 x %[[S]]]* %[[S_1]], i32 1
+// CHECK: %[[S_2:.*]] = getelementptr inbounds [3 x %[[S]]], [3 x %[[S]]]* %[[S_1]], i64 1
// CHECK: %[[S_2_AS_S:.*]] = bitcast [3 x %[[S]]]* %[[S_2]] to %[[S]]*
//
// CHECK: %[[REST:.*]] = sub i64 %[[ELTS]], 6
@@ -135,7 +135,7 @@ void *r = new T[n][3]{ { 1, 2, 3 }, { 4, 5, 6 } };
//
// { 4, 5, 6 }
//
-// CHECK: %[[T_1:.*]] = getelementptr inbounds [3 x %[[T]]], [3 x %[[T]]]* %[[T_0]], i32 1
+// CHECK: %[[T_1:.*]] = getelementptr inbounds [3 x %[[T]]], [3 x %[[T]]]* %[[T_0]], i64 1
//
// CHECK: %[[T_1_0:.*]] = getelementptr inbounds [3 x %[[T]]], [3 x %[[T]]]* %[[T_1]], i64 0, i64 0
// CHECK: %[[T_1_0_0:.*]] = getelementptr inbounds %[[T]], %[[T]]* %[[T_1_0]], i32 0, i32 0
@@ -149,7 +149,7 @@ void *r = new T[n][3]{ { 1, 2, 3 }, { 4, 5, 6 } };
//
// And the rest gets memset to 0.
//
-// CHECK: %[[T_2:.*]] = getelementptr inbounds [3 x %[[T]]], [3 x %[[T]]]* %[[T_1]], i32 1
+// CHECK: %[[T_2:.*]] = getelementptr inbounds [3 x %[[T]]], [3 x %[[T]]]* %[[T_1]], i64 1
// CHECK: %[[T_2_AS_T:.*]] = bitcast [3 x %[[T]]]* %[[T_2]] to %[[T]]*
//
// CHECK: %[[SIZE:.*]] = sub i64 %{{.*}}, 24
diff --git a/clang/test/CodeGenCXX/delete-two-arg.cpp b/clang/test/CodeGenCXX/delete-two-arg.cpp
index e5a4cfa3ee6..85275b3eb17 100644
--- a/clang/test/CodeGenCXX/delete-two-arg.cpp
+++ b/clang/test/CodeGenCXX/delete-two-arg.cpp
@@ -30,7 +30,7 @@ namespace test2 {
// CHECK: [[NEW:%.*]] = call noalias i8* @_Znaj(i32 44)
// CHECK-NEXT: [[T0:%.*]] = bitcast i8* [[NEW]] to i32*
// CHECK-NEXT: store i32 10, i32* [[T0]]
- // CHECK-NEXT: [[T1:%.*]] = getelementptr inbounds i8, i8* [[NEW]], i64 4
+ // CHECK-NEXT: [[T1:%.*]] = getelementptr inbounds i8, i8* [[NEW]], i32 4
// CHECK-NEXT: [[T2:%.*]] = bitcast i8* [[T1]] to [[A]]*
// CHECK-NEXT: ret [[A]]* [[T2]]
return ::new A[10];
@@ -44,7 +44,7 @@ namespace test2 {
// CHECK-NEXT: [[T1:%.*]] = icmp eq [[A]]* [[T0]], null
// CHECK-NEXT: br i1 [[T1]],
// CHECK: [[T2:%.*]] = bitcast [[A]]* [[T0]] to i8*
- // CHECK-NEXT: [[T3:%.*]] = getelementptr inbounds i8, i8* [[T2]], i64 -4
+ // CHECK-NEXT: [[T3:%.*]] = getelementptr inbounds i8, i8* [[T2]], i32 -4
// CHECK-NEXT: [[T4:%.*]] = bitcast i8* [[T3]] to i32*
// CHECK-NEXT: [[T5:%.*]] = load i32, i32* [[T4]]
// CHECK-NEXT: call void @_ZdaPv(i8* [[T3]])
diff --git a/clang/test/CodeGenCXX/lambda-expressions.cpp b/clang/test/CodeGenCXX/lambda-expressions.cpp
index 28a8841b600..2ea0561f9e9 100644
--- a/clang/test/CodeGenCXX/lambda-expressions.cpp
+++ b/clang/test/CodeGenCXX/lambda-expressions.cpp
@@ -81,7 +81,7 @@ int g() {
};
// PR14773
-// CHECK: [[ARRVAL:%[0-9a-zA-Z]*]] = load i32, i32* getelementptr inbounds ([0 x i32], [0 x i32]* @_ZZ14staticarrayrefvE5array, i32 0, i64 0), align 4
+// CHECK: [[ARRVAL:%[0-9a-zA-Z]*]] = load i32, i32* getelementptr inbounds ([0 x i32], [0 x i32]* @_ZZ14staticarrayrefvE5array, i64 0, i64 0), align 4
// CHECK-NEXT: store i32 [[ARRVAL]]
void staticarrayref(){
static int array[] = {};
diff --git a/clang/test/CodeGenCXX/microsoft-abi-array-cookies.cpp b/clang/test/CodeGenCXX/microsoft-abi-array-cookies.cpp
index 62ead4fb69d..75c0621347a 100644
--- a/clang/test/CodeGenCXX/microsoft-abi-array-cookies.cpp
+++ b/clang/test/CodeGenCXX/microsoft-abi-array-cookies.cpp
@@ -28,12 +28,12 @@ void check_array_cookies_simple() {
// 46 = 42 + size of cookie (4)
// CHECK: [[COOKIE:%.*]] = bitcast i8* [[ALLOCATED]] to i32*
// CHECK: store i32 42, i32* [[COOKIE]]
-// CHECK: [[ARRAY:%.*]] = getelementptr inbounds i8, i8* [[ALLOCATED]], i64 4
+// CHECK: [[ARRAY:%.*]] = getelementptr inbounds i8, i8* [[ALLOCATED]], i32 4
// CHECK: bitcast i8* [[ARRAY]] to [[CLASS:%.*]]*
delete [] array;
// CHECK: [[ARRAY_AS_CHAR:%.*]] = bitcast [[CLASS]]* {{%.*}} to i8*
-// CHECK: getelementptr inbounds i8, i8* [[ARRAY_AS_CHAR]], i64 -4
+// CHECK: getelementptr inbounds i8, i8* [[ARRAY_AS_CHAR]], i32 -4
}
struct __attribute__((aligned(8))) ClassWithAlignment {
@@ -50,12 +50,12 @@ void check_array_cookies_aligned() {
// 344 = 42*8 + size of cookie (8, due to alignment)
// CHECK: [[COOKIE:%.*]] = bitcast i8* [[ALLOCATED]] to i32*
// CHECK: store i32 42, i32* [[COOKIE]]
-// CHECK: [[ARRAY:%.*]] = getelementptr inbounds i8, i8* [[ALLOCATED]], i64 8
+// CHECK: [[ARRAY:%.*]] = getelementptr inbounds i8, i8* [[ALLOCATED]], i32 8
// CHECK: bitcast i8* [[ARRAY]] to [[CLASS:%.*]]*
delete [] array;
// CHECK: [[ARRAY_AS_CHAR:%.*]] = bitcast [[CLASS]]*
-// CHECK: getelementptr inbounds i8, i8* [[ARRAY_AS_CHAR]], i64 -8
+// CHECK: getelementptr inbounds i8, i8* [[ARRAY_AS_CHAR]], i32 -8
}
namespace PR23990 {
diff --git a/clang/test/CodeGenCXX/microsoft-abi-eh-cleanups.cpp b/clang/test/CodeGenCXX/microsoft-abi-eh-cleanups.cpp
index 68f1430ace5..686b2b0994c 100644
--- a/clang/test/CodeGenCXX/microsoft-abi-eh-cleanups.cpp
+++ b/clang/test/CodeGenCXX/microsoft-abi-eh-cleanups.cpp
@@ -162,7 +162,7 @@ C::C() { foo(); }
// WIN32-NOT: load
// WIN32: bitcast %"struct.crash_on_partial_destroy::C"* %{{.*}} to i8*
// WIN32-NOT: load
-// WIN32: getelementptr inbounds i8, i8* %{{.*}}, i64 4
+// WIN32: getelementptr inbounds i8, i8* %{{.*}}, i32 4
// WIN32-NOT: load
// WIN32: bitcast i8* %{{.*}} to %"struct.crash_on_partial_destroy::A"*
// WIN32: call x86_thiscallcc void @"\01??1A@crash_on_partial_destroy@@UAE@XZ"
diff --git a/clang/test/CodeGenCXX/microsoft-abi-structors.cpp b/clang/test/CodeGenCXX/microsoft-abi-structors.cpp
index 88400e7c0bd..0e44efa8d0a 100644
--- a/clang/test/CodeGenCXX/microsoft-abi-structors.cpp
+++ b/clang/test/CodeGenCXX/microsoft-abi-structors.cpp
@@ -161,7 +161,7 @@ C::~C() {
// CHECK: load %"struct.dtor_in_second_nvbase::C"*, %"struct.dtor_in_second_nvbase::C"** %{{.*}}
// Now we this-adjust before calling ~B.
// CHECK: bitcast %"struct.dtor_in_second_nvbase::C"* %{{.*}} to i8*
-// CHECK: getelementptr inbounds i8, i8* %{{.*}}, i64 4
+// CHECK: getelementptr inbounds i8, i8* %{{.*}}, i32 4
// CHECK: bitcast i8* %{{.*}} to %"struct.dtor_in_second_nvbase::B"*
// CHECK: call x86_thiscallcc void @"\01??1B@dtor_in_second_nvbase@@UAE@XZ"
// CHECK: (%"struct.dtor_in_second_nvbase::B"* %{{.*}})
@@ -246,11 +246,11 @@ C::C() {
//
// CHECK: [[INIT_VBASES]]
// CHECK-NEXT: %[[this_i8:.*]] = bitcast %"struct.constructors::C"* %{{.*}} to i8*
- // CHECK-NEXT: %[[vbptr_off:.*]] = getelementptr inbounds i8, i8* %[[this_i8]], i64 0
+ // CHECK-NEXT: %[[vbptr_off:.*]] = getelementptr inbounds i8, i8* %[[this_i8]], i32 0
// CHECK-NEXT: %[[vbptr:.*]] = bitcast i8* %[[vbptr_off]] to i32**
// CHECK-NEXT: store i32* getelementptr inbounds ([2 x i32], [2 x i32]* @"\01??_8C@constructors@@7B@", i32 0, i32 0), i32** %[[vbptr]]
// CHECK-NEXT: bitcast %"struct.constructors::C"* %{{.*}} to i8*
- // CHECK-NEXT: getelementptr inbounds i8, i8* %{{.*}}, i64 4
+ // CHECK-NEXT: getelementptr inbounds i8, i8* %{{.*}}, i32 4
// CHECK-NEXT: bitcast i8* %{{.*}} to %"struct.constructors::A"*
// CHECK-NEXT: call x86_thiscallcc %"struct.constructors::A"* @"\01??0A@constructors@@QAE@XZ"(%"struct.constructors::A"* %{{.*}})
// CHECK-NEXT: br label %[[SKIP_VBASES]]
@@ -281,11 +281,11 @@ D::D() {
//
// CHECK: [[INIT_VBASES]]
// CHECK-NEXT: %[[this_i8:.*]] = bitcast %"struct.constructors::D"* %{{.*}} to i8*
- // CHECK-NEXT: %[[vbptr_off:.*]] = getelementptr inbounds i8, i8* %[[this_i8]], i64 0
+ // CHECK-NEXT: %[[vbptr_off:.*]] = getelementptr inbounds i8, i8* %[[this_i8]], i32 0
// CHECK-NEXT: %[[vbptr:.*]] = bitcast i8* %[[vbptr_off]] to i32**
// CHECK-NEXT: store i32* getelementptr inbounds ([2 x i32], [2 x i32]* @"\01??_8D@constructors@@7B@", i32 0, i32 0), i32** %[[vbptr]]
// CHECK-NEXT: bitcast %"struct.constructors::D"* %{{.*}} to i8*
- // CHECK-NEXT: getelementptr inbounds i8, i8* %{{.*}}, i64 4
+ // CHECK-NEXT: getelementptr inbounds i8, i8* %{{.*}}, i32 4
// CHECK-NEXT: bitcast i8* %{{.*}} to %"struct.constructors::A"*
// CHECK-NEXT: call x86_thiscallcc %"struct.constructors::A"* @"\01??0A@constructors@@QAE@XZ"(%"struct.constructors::A"* %{{.*}})
// CHECK-NEXT: br label %[[SKIP_VBASES]]
@@ -308,14 +308,14 @@ E::E() {
//
// CHECK: [[INIT_VBASES]]
// CHECK-NEXT: %[[this_i8:.*]] = bitcast %"struct.constructors::E"* %{{.*}} to i8*
- // CHECK-NEXT: %[[offs:.*]] = getelementptr inbounds i8, i8* %[[this_i8]], i64 0
+ // CHECK-NEXT: %[[offs:.*]] = getelementptr inbounds i8, i8* %[[this_i8]], i32 0
// CHECK-NEXT: %[[vbptr_E:.*]] = bitcast i8* %[[offs]] to i32**
// CHECK-NEXT: store i32* getelementptr inbounds ([3 x i32], [3 x i32]* @"\01??_8E@constructors@@7B01@@", i32 0, i32 0), i32** %[[vbptr_E]]
- // CHECK-NEXT: %[[offs:.*]] = getelementptr inbounds i8, i8* %[[this_i8]], i64 4
+ // CHECK-NEXT: %[[offs:.*]] = getelementptr inbounds i8, i8* %[[this_i8]], i32 4
// CHECK-NEXT: %[[vbptr_C:.*]] = bitcast i8* %[[offs]] to i32**
// CHECK-NEXT: store i32* getelementptr inbounds ([2 x i32], [2 x i32]* @"\01??_8E@constructors@@7BC@1@@", i32 0, i32 0), i32** %[[vbptr_C]]
// CHECK-NEXT: bitcast %"struct.constructors::E"* %{{.*}} to i8*
- // CHECK-NEXT: getelementptr inbounds i8, i8* %{{.*}}, i64 4
+ // CHECK-NEXT: getelementptr inbounds i8, i8* %{{.*}}, i32 4
// CHECK-NEXT: bitcast i8* %{{.*}} to %"struct.constructors::A"*
// CHECK-NEXT: call x86_thiscallcc %"struct.constructors::A"* @"\01??0A@constructors@@QAE@XZ"(%"struct.constructors::A"* %{{.*}})
// CHECK: call x86_thiscallcc %"struct.constructors::C"* @"\01??0C@constructors@@QAE@XZ"(%"struct.constructors::C"* %{{.*}}, i32 0)
diff --git a/clang/test/CodeGenCXX/microsoft-abi-try-throw.cpp b/clang/test/CodeGenCXX/microsoft-abi-try-throw.cpp
index fed39761714..93d6a689ce6 100644
--- a/clang/test/CodeGenCXX/microsoft-abi-try-throw.cpp
+++ b/clang/test/CodeGenCXX/microsoft-abi-try-throw.cpp
@@ -26,7 +26,7 @@ int main() {
}
#endif
#ifdef THROW
- // THROW: store i32 42, i32* %[[mem_for_throw:.*]]
+ // THROW: store i32 42, i32* %[[mem_for_throw:.*]], align 4
// THROW: %[[cast:.*]] = bitcast i32* %[[mem_for_throw]] to i8*
// THROW: call void @_CxxThrowException(i8* %[[cast]], %eh.ThrowInfo* @_TI1H)
throw int(42);
diff --git a/clang/test/CodeGenCXX/microsoft-abi-virtual-inheritance-vtordisps.cpp b/clang/test/CodeGenCXX/microsoft-abi-virtual-inheritance-vtordisps.cpp
index 204da8db153..bb73f8773cc 100644
--- a/clang/test/CodeGenCXX/microsoft-abi-virtual-inheritance-vtordisps.cpp
+++ b/clang/test/CodeGenCXX/microsoft-abi-virtual-inheritance-vtordisps.cpp
@@ -26,7 +26,7 @@ D::D() {} // Forces vftable emission.
// CHECK-LABEL: define linkonce_odr x86_thiscallcc void @"\01?f@D@@$4PPPPPPPM@A@AEXXZ"
// CHECK: %[[ECX:.*]] = load %struct.D*, %struct.D** %{{.*}}
// CHECK: %[[ECX_i8:.*]] = bitcast %struct.D* %[[ECX]] to i8*
-// CHECK: %[[VTORDISP_PTR_i8:.*]] = getelementptr i8, i8* %[[ECX_i8]], i32 -4
+// CHECK: %[[VTORDISP_PTR_i8:.*]] = getelementptr inbounds i8, i8* %[[ECX_i8]], i32 -4
// CHECK: %[[VTORDISP_PTR:.*]] = bitcast i8* %[[VTORDISP_PTR_i8]] to i32*
// CHECK: %[[VTORDISP:.*]] = load i32, i32* %[[VTORDISP_PTR]]
// CHECK: %[[VTORDISP_NEG:.*]] = sub i32 0, %[[VTORDISP]]
@@ -37,7 +37,7 @@ D::D() {} // Forces vftable emission.
// CHECK-LABEL: define linkonce_odr x86_thiscallcc void @"\01?f@D@@$4PPPPPPPI@3AEXXZ"
// CHECK: %[[ECX:.*]] = load %struct.D*, %struct.D** %{{.*}}
// CHECK: %[[ECX_i8:.*]] = bitcast %struct.D* %[[ECX]] to i8*
-// CHECK: %[[VTORDISP_PTR_i8:.*]] = getelementptr i8, i8* %[[ECX_i8]], i32 -8
+// CHECK: %[[VTORDISP_PTR_i8:.*]] = getelementptr inbounds i8, i8* %[[ECX_i8]], i32 -8
// CHECK: %[[VTORDISP_PTR:.*]] = bitcast i8* %[[VTORDISP_PTR_i8]] to i32*
// CHECK: %[[VTORDISP:.*]] = load i32, i32* %[[VTORDISP_PTR]]
// CHECK: %[[VTORDISP_NEG:.*]] = sub i32 0, %[[VTORDISP]]
@@ -66,7 +66,7 @@ G::G() {} // Forces vftable emission.
// CHECK-LABEL: define linkonce_odr x86_thiscallcc void @"\01?f@E@@$R4BA@M@PPPPPPPM@7AEXXZ"(i8*)
// CHECK: %[[ECX:.*]] = load %struct.E*, %struct.E** %{{.*}}
// CHECK: %[[ECX_i8:.*]] = bitcast %struct.E* %[[ECX]] to i8*
-// CHECK: %[[VTORDISP_PTR_i8:.*]] = getelementptr i8, i8* %[[ECX_i8]], i32 -4
+// CHECK: %[[VTORDISP_PTR_i8:.*]] = getelementptr inbounds i8, i8* %[[ECX_i8]], i32 -4
// CHECK: %[[VTORDISP_PTR:.*]] = bitcast i8* %[[VTORDISP_PTR_i8]] to i32*
// CHECK: %[[VTORDISP:.*]] = load i32, i32* %[[VTORDISP_PTR]]
// CHECK: %[[VTORDISP_NEG:.*]] = sub i32 0, %[[VTORDISP]]
diff --git a/clang/test/CodeGenCXX/microsoft-abi-virtual-inheritance.cpp b/clang/test/CodeGenCXX/microsoft-abi-virtual-inheritance.cpp
index b868d1f0b51..9a0b011783e 100644
--- a/clang/test/CodeGenCXX/microsoft-abi-virtual-inheritance.cpp
+++ b/clang/test/CodeGenCXX/microsoft-abi-virtual-inheritance.cpp
@@ -91,7 +91,7 @@ B::~B() {
// CHECK2: %[[B:.*]] = bitcast i8* %[[B_i8]] to %struct.B*
// CHECK2: call x86_thiscallcc void @"\01??1B@@UAE@XZ"(%struct.B* %[[B]])
// CHECK2: %[[THIS_i8:.*]] = bitcast %struct.B* %[[THIS]] to i8*
- // CHECK2: %[[VBASE_i8:.*]] = getelementptr inbounds i8, i8* %[[THIS_i8]], i64 8
+ // CHECK2: %[[VBASE_i8:.*]] = getelementptr inbounds i8, i8* %[[THIS_i8]], i32 8
// CHECK2: %[[VBASE:.*]] = bitcast i8* %[[VBASE_i8]] to %struct.VBase*
// CHECK2: call x86_thiscallcc void @"\01??1VBase@@UAE@XZ"(%struct.VBase* %[[VBASE]])
// CHECK2: ret
@@ -290,7 +290,7 @@ D::~D() {
// CHECK: store %"struct.diamond::D"* %[[THIS]], %"struct.diamond::D"** %[[THIS_VAL:.*]], align 4
// CHECK: %[[THIS:.*]] = load %"struct.diamond::D"*, %"struct.diamond::D"** %[[THIS_VAL]]
// CHECK: %[[D_i8:.*]] = bitcast %"struct.diamond::D"* %[[THIS]] to i8*
- // CHECK: %[[C_i8:.*]] = getelementptr inbounds i8, i8* %[[D_i8]], i64 4
+ // CHECK: %[[C_i8:.*]] = getelementptr inbounds i8, i8* %[[D_i8]], i32 4
// CHECK: %[[C:.*]] = bitcast i8* %[[C_i8]] to %"struct.diamond::C"*
// CHECK: %[[C_i8:.*]] = bitcast %"struct.diamond::C"* %[[C]] to i8*
// CHECK: %[[ARG_i8:.*]] = getelementptr i8, i8* %{{.*}}, i32 16
diff --git a/clang/test/CodeGenCXX/static-init-wasm.cpp b/clang/test/CodeGenCXX/static-init-wasm.cpp
index e842509af48..2d187b5c05f 100644
--- a/clang/test/CodeGenCXX/static-init-wasm.cpp
+++ b/clang/test/CodeGenCXX/static-init-wasm.cpp
@@ -17,7 +17,7 @@ void g() {
static int a = f();
}
// WEBASSEMBLY32-LABEL: @_Z1gv()
-// WEBASSEMBLY32: %[[R0:.+]] = load atomic i8, i8* bitcast (i32* @_ZGVZ1gvE1a to i8*) acquire, align 1
+// WEBASSEMBLY32: %[[R0:.+]] = load atomic i8, i8* bitcast (i32* @_ZGVZ1gvE1a to i8*) acquire, align 4
// WEBASSEMBLY32-NEXT: %[[R1:.+]] = and i8 %[[R0]], 1
// WEBASSEMBLY32-NEXT: %[[R2:.+]] = icmp eq i8 %[[R1]], 0
// WEBASSEMBLY32-NEXT: br i1 %[[R2]], label %[[CHECK:.+]], label %[[END:.+]]
@@ -27,7 +27,7 @@ void g() {
// WEBASSEMBLY32: call void @__cxa_guard_release
//
// WEBASSEMBLY64-LABEL: @_Z1gv()
-// WEBASSEMBLY64: %[[R0:.+]] = load atomic i8, i8* bitcast (i64* @_ZGVZ1gvE1a to i8*) acquire, align 1
+// WEBASSEMBLY64: %[[R0:.+]] = load atomic i8, i8* bitcast (i64* @_ZGVZ1gvE1a to i8*) acquire, align 8
// WEBASSEMBLY64-NEXT: %[[R1:.+]] = and i8 %[[R0]], 1
// WEBASSEMBLY64-NEXT: %[[R2:.+]] = icmp eq i8 %[[R1]], 0
// WEBASSEMBLY64-NEXT: br i1 %[[R2]], label %[[CHECK:.+]], label %[[END:.+]]
diff --git a/clang/test/CodeGenCXX/static-init.cpp b/clang/test/CodeGenCXX/static-init.cpp
index 255251e707b..541f6416efd 100644
--- a/clang/test/CodeGenCXX/static-init.cpp
+++ b/clang/test/CodeGenCXX/static-init.cpp
@@ -6,8 +6,8 @@
// CHECK: @_ZZN5test31BC1EvE1u = internal global { i8, [3 x i8] } { i8 97, [3 x i8] undef }, align 4
-// CHECK: @_ZZ2h2vE1i = linkonce_odr global i32 0, comdat, align
-// CHECK: @_ZGVZ2h2vE1i = linkonce_odr global i64 0, comdat{{$}}
+// CHECK: @_ZZ2h2vE1i = linkonce_odr global i32 0, comdat, align 4
+// CHECK: @_ZGVZ2h2vE1i = linkonce_odr global i64 0, comdat, align 8{{$}}
// CHECK: @_ZZN5test1L6getvarEiE3var = internal constant [4 x i32] [i32 1, i32 0, i32 2, i32 4], align 16
// CHECK: @_ZZN5test414useStaticLocalEvE3obj = linkonce_odr global %"struct.test4::HasVTable" zeroinitializer, comdat, align 8
@@ -17,7 +17,7 @@ struct A {
};
void f() {
- // CHECK: load atomic i8, i8* bitcast (i64* @_ZGVZ1fvE1a to i8*) acquire, align 1
+ // CHECK: load atomic i8, i8* bitcast (i64* @_ZGVZ1fvE1a to i8*) acquire, align 8
// CHECK: call i32 @__cxa_guard_acquire
// CHECK: call void @_ZN1AC1Ev
// CHECK: call i32 @__cxa_atexit(void (i8*)* bitcast (void (%struct.A*)* @_ZN1AD1Ev to void (i8*)*), i8* getelementptr inbounds (%struct.A, %struct.A* @_ZZ1fvE1a, i32 0, i32 0), i8* @__dso_handle)
diff --git a/clang/test/CodeGenCXX/vararg-non-pod-ms-compat.cpp b/clang/test/CodeGenCXX/vararg-non-pod-ms-compat.cpp
index 530a4284b4e..64b1c525a61 100644
--- a/clang/test/CodeGenCXX/vararg-non-pod-ms-compat.cpp
+++ b/clang/test/CodeGenCXX/vararg-non-pod-ms-compat.cpp
@@ -18,7 +18,7 @@ void test(X x) {
// X64: alloca %struct.X
// X64: %[[agg:[^ ]*]] = alloca %struct.X
- // X64: %[[valptr:[^ ]*]] = getelementptr %struct.X, %struct.X* %[[agg]], i32 0, i32 0
+ // X64: %[[valptr:[^ ]*]] = getelementptr inbounds %struct.X, %struct.X* %[[agg]], i32 0, i32 0
// X64: %[[val:[^ ]*]] = load i32, i32* %[[valptr]]
// X64: call void (...) @"\01?vararg@@YAXZZ"(i32 %[[val]])
diff --git a/clang/test/CodeGenCXX/wasm-args-returns.cpp b/clang/test/CodeGenCXX/wasm-args-returns.cpp
index 4ef085f6baa..2b80430014d 100644
--- a/clang/test/CodeGenCXX/wasm-args-returns.cpp
+++ b/clang/test/CodeGenCXX/wasm-args-returns.cpp
@@ -42,17 +42,17 @@ struct copy_ctor {
copy_ctor(copy_ctor const&);
};
test(copy_ctor);
-// CHECK: define void @_Z7forward9copy_ctor(%struct.copy_ctor* noalias sret %{{.*}}, %struct.copy_ctor* align 8 %{{.*}})
+// CHECK: define void @_Z7forward9copy_ctor(%struct.copy_ctor* noalias sret %{{.*}}, %struct.copy_ctor* %{{.*}})
//
// CHECK: declare %struct.copy_ctor* @_ZN9copy_ctorC1ERKS_(%struct.copy_ctor* returned, %struct.copy_ctor* dereferenceable(8))
//
// CHECK: define void @_Z14test_copy_ctorv()
// CHECK: %[[tmp:.*]] = alloca %struct.copy_ctor, align 8
// CHECK: call void @_Z13def_copy_ctorv(%struct.copy_ctor* nonnull sret %[[tmp]])
-// CHECK: call void @_Z3use9copy_ctor(%struct.copy_ctor* nonnull align 8 %[[tmp]])
+// CHECK: call void @_Z3use9copy_ctor(%struct.copy_ctor* nonnull %[[tmp]])
// CHECK: ret void
//
-// CHECK: declare void @_Z3use9copy_ctor(%struct.copy_ctor* align 8)
+// CHECK: declare void @_Z3use9copy_ctor(%struct.copy_ctor*)
// CHECK: declare void @_Z13def_copy_ctorv(%struct.copy_ctor* sret)
struct __attribute__((aligned(16))) aligned_copy_ctor {
@@ -60,17 +60,17 @@ struct __attribute__((aligned(16))) aligned_copy_ctor {
aligned_copy_ctor(aligned_copy_ctor const&);
};
test(aligned_copy_ctor);
-// CHECK: define void @_Z7forward17aligned_copy_ctor(%struct.aligned_copy_ctor* noalias sret %{{.*}}, %struct.aligned_copy_ctor* align 16 %{{.*}})
+// CHECK: define void @_Z7forward17aligned_copy_ctor(%struct.aligned_copy_ctor* noalias sret %{{.*}}, %struct.aligned_copy_ctor* %{{.*}})
//
// CHECK: declare %struct.aligned_copy_ctor* @_ZN17aligned_copy_ctorC1ERKS_(%struct.aligned_copy_ctor* returned, %struct.aligned_copy_ctor* dereferenceable(16))
//
// CHECK: define void @_Z22test_aligned_copy_ctorv()
// CHECK: %[[tmp:.*]] = alloca %struct.aligned_copy_ctor, align 16
// CHECK: call void @_Z21def_aligned_copy_ctorv(%struct.aligned_copy_ctor* nonnull sret %[[tmp]])
-// CHECK: call void @_Z3use17aligned_copy_ctor(%struct.aligned_copy_ctor* nonnull align 16 %[[tmp]])
+// CHECK: call void @_Z3use17aligned_copy_ctor(%struct.aligned_copy_ctor* nonnull %[[tmp]])
// CHECK: ret void
//
-// CHECK: declare void @_Z3use17aligned_copy_ctor(%struct.aligned_copy_ctor* align 16)
+// CHECK: declare void @_Z3use17aligned_copy_ctor(%struct.aligned_copy_ctor*)
// CHECK: declare void @_Z21def_aligned_copy_ctorv(%struct.aligned_copy_ctor* sret)
struct empty {};
diff --git a/clang/test/CodeGenObjC/arc-captured-32bit-block-var-layout-2.m b/clang/test/CodeGenObjC/arc-captured-32bit-block-var-layout-2.m
index 6ab02a916e8..10feda938d0 100644
--- a/clang/test/CodeGenObjC/arc-captured-32bit-block-var-layout-2.m
+++ b/clang/test/CodeGenObjC/arc-captured-32bit-block-var-layout-2.m
@@ -11,31 +11,30 @@ int main() {
NSString *strong;
unsigned long long eightByte = 0x8001800181818181ull;
// Test1
-// CHECK: block variable layout: BL_NON_OBJECT_WORD:3, BL_STRONG:1, BL_OPERATOR:0
+ // CHECK: Inline block variable layout: 0x0100, BL_STRONG:1, BL_OPERATOR:0
void (^block1)() = ^{ printf("%#llx", eightByte); NSLog(@"%@", strong); };
// Test2
int i = 1;
-// CHECK: block variable layout: BL_NON_OBJECT_WORD:3, BL_STRONG:1, BL_OPERATOR:0
+ // CHECK: Inline block variable layout: 0x0100, BL_STRONG:1, BL_OPERATOR:0
void (^block2)() = ^{ printf("%#llx, %d", eightByte, i); NSLog(@"%@", strong); };
// Test3
char ch = 'a';
-// CHECK: block variable layout: BL_NON_OBJECT_WORD:3, BL_STRONG:1, BL_OPERATOR:0
+ // CHECK: Inline block variable layout: 0x0100, BL_STRONG:1, BL_OPERATOR:0
void (^block3)() = ^{ printf("%c %#llx", ch, eightByte); NSLog(@"%@", strong); };
// Test4
unsigned long fourByte = 0x8001ul;
-// block variable layout: BL_NON_OBJECT_WORD:1, BL_STRONG:1, BL_OPERATOR:0
-// CHECK: Inline instruction for block variable layout: 0x0100
+ // CHECK: Inline block variable layout: 0x0100, BL_STRONG:1, BL_OPERATOR:0
void (^block4)() = ^{ printf("%c %#lx", ch, fourByte); NSLog(@"%@", strong); };
// Test5
-// CHECK: block variable layout: BL_NON_OBJECT_WORD:3, BL_STRONG:1, BL_OPERATOR:0
+ // CHECK: Inline block variable layout: 0x0100, BL_STRONG:1, BL_OPERATOR:0
void (^block5)() = ^{ NSLog(@"%@", strong); printf("%c %#llx", ch, eightByte); };
// Test6
-// CHECK: block variable layout: BL_OPERATOR:0
+ // CHECK: Block variable layout: BL_OPERATOR:0
void (^block6)() = ^{ printf("%#llx", eightByte); };
}
diff --git a/clang/test/CodeGenObjC/arc-captured-32bit-block-var-layout.m b/clang/test/CodeGenObjC/arc-captured-32bit-block-var-layout.m
index 45a894c3815..d1b57834270 100644
--- a/clang/test/CodeGenObjC/arc-captured-32bit-block-var-layout.m
+++ b/clang/test/CodeGenObjC/arc-captured-32bit-block-var-layout.m
@@ -33,7 +33,7 @@ void f() {
// and a descriptor pointer).
// Test 1
-// CHECK: Inline instruction for block variable layout: 0x0320
+// CHECK: Inline block variable layout: 0x0320, BL_STRONG:3, BL_BYREF:2, BL_OPERATOR:0
void (^b)() = ^{
byref_int = sh + ch+ch1+ch2 ;
x(bar);
@@ -44,7 +44,7 @@ void f() {
b();
// Test 2
-// CHECK: Inline instruction for block variable layout: 0x0331
+// CHECK: Inline block variable layout: 0x0331, BL_STRONG:3, BL_BYREF:3, BL_WEAK:1, BL_OPERATOR:0
void (^c)() = ^{
byref_int = sh + ch+ch1+ch2 ;
x(bar);
@@ -65,7 +65,7 @@ void g() {
unsigned int i;
NSString *y;
NSString *z;
-// CHECK: Inline instruction for block variable layout: 0x0401
+// CHECK: Inline block variable layout: 0x0401, BL_STRONG:4, BL_WEAK:1, BL_OPERATOR:0
void (^c)() = ^{
int j = i + bletch;
x(foo);
@@ -110,7 +110,7 @@ void h() {
block variable layout: BL_NON_OBJECT_WORD:1, BL_UNRETAINE:1, BL_NON_OBJECT_WORD:1,
BL_UNRETAINE:1, BL_NON_OBJECT_WORD:3, BL_BYREF:1, BL_OPERATOR:0
*/
-// CHECK: block variable layout: BL_BYREF:1, BL_NON_OBJECT_WORD:1, BL_UNRETAINED:1, BL_NON_OBJECT_WORD:1, BL_UNRETAINED:1, BL_OPERATOR:0
+// CHECK: Block variable layout: BL_BYREF:1, BL_NON_OBJECT_WORD:1, BL_UNRETAINED:1, BL_NON_OBJECT_WORD:1, BL_UNRETAINED:1, BL_OPERATOR:0
void (^c)() = ^{
x(s2.ui.o1);
x(u2.o1);
@@ -125,7 +125,7 @@ void arr1() {
__unsafe_unretained id unsafe_unretained_var[4];
} imported_s;
-// CHECK: block variable layout: BL_UNRETAINED:4, BL_OPERATOR:0
+// CHECK: Block variable layout: BL_UNRETAINED:4, BL_OPERATOR:0
void (^c)() = ^{
x(imported_s.unsafe_unretained_var[2]);
};
@@ -140,7 +140,7 @@ void arr2() {
__unsafe_unretained id unsafe_unretained_var[4];
} imported_s;
-// CHECK: block variable layout: BL_NON_OBJECT_WORD:1, BL_UNRETAINED:4, BL_OPERATOR:0
+// CHECK: Block variable layout: BL_NON_OBJECT_WORD:1, BL_UNRETAINED:4, BL_OPERATOR:0
void (^c)() = ^{
x(imported_s.unsafe_unretained_var[2]);
};
@@ -155,7 +155,7 @@ void arr3() {
__unsafe_unretained id unsafe_unretained_var[0];
} imported_s;
-// CHECK: block variable layout: BL_OPERATOR:0
+// CHECK: Block variable layout: BL_OPERATOR:0
void (^c)() = ^{
int i = imported_s.a;
};
@@ -181,7 +181,7 @@ void arr4() {
} f4[2][2];
} captured_s;
-// CHECK: block variable layout: BL_UNRETAINED:3, BL_NON_OBJECT_WORD:1, BL_UNRETAINED:1, BL_NON_OBJECT_WORD:1, BL_UNRETAINED:1, BL_NON_OBJECT_WORD:1, BL_UNRETAINED:1, BL_NON_OBJECT_WORD:1, BL_UNRETAINED:1, BL_OPERATOR:0
+// CHECK: Block variable layout: BL_UNRETAINED:3, BL_NON_OBJECT_WORD:1, BL_UNRETAINED:1, BL_NON_OBJECT_WORD:1, BL_UNRETAINED:1, BL_NON_OBJECT_WORD:1, BL_UNRETAINED:1, BL_NON_OBJECT_WORD:1, BL_UNRETAINED:1, BL_OPERATOR:0
void (^c)() = ^{
id i = captured_s.f0.s_f1;
};
@@ -199,7 +199,7 @@ void bf1() {
int flag4: 24;
} s;
-// CHECK: block variable layout: BL_OPERATOR:0
+// CHECK: Block variable layout: BL_OPERATOR:0
int (^c)() = ^{
return s.flag;
};
@@ -212,7 +212,7 @@ void bf2() {
int flag : 1;
} s;
-// CHECK: block variable layout: BL_OPERATOR:0
+// CHECK: Block variable layout: BL_OPERATOR:0
int (^c)() = ^{
return s.flag;
};
@@ -243,7 +243,7 @@ void bf3() {
unsigned int _filler : 32;
} _flags;
-// CHECK: block variable layout: BL_OPERATOR:0
+// CHECK: Block variable layout: BL_OPERATOR:0
unsigned char (^c)() = ^{
return _flags._draggedNodesAreDeletable;
};
@@ -278,7 +278,7 @@ void bf4() {
unsigned int _filler : 32;
} _flags;
-// CHECK: block variable layout: BL_OPERATOR:0
+// CHECK: Block variable layout: BL_OPERATOR:0
unsigned char (^c)() = ^{
return _flags._draggedNodesAreDeletable;
};
@@ -296,7 +296,7 @@ void bf5() {
unsigned char flag1 : 1;
} _flags;
-// CHECK: block variable layout: BL_OPERATOR:0
+// CHECK: Block variable layout: BL_OPERATOR:0
unsigned char (^c)() = ^{
return _flags.flag;
};
@@ -313,7 +313,7 @@ void bf6() {
unsigned char flag1 : 1;
} _flags;
-// CHECK: block variable layout: BL_OPERATOR:0
+// CHECK: Block variable layout: BL_OPERATOR:0
unsigned char (^c)() = ^{
return _flags.flag;
};
@@ -329,7 +329,7 @@ void Test7() {
__weak id wid9, wid10, wid11, wid12;
__weak id wid13, wid14, wid15, wid16;
const id bar = (id) opaque_id();
-// CHECK: block variable layout: BL_STRONG:1, BL_WEAK:16, BL_OPERATOR:0
+// CHECK: Block variable layout: BL_STRONG:1, BL_WEAK:16, BL_OPERATOR:0
void (^b)() = ^{
x(bar);
x(wid1);
@@ -364,7 +364,7 @@ __weak id wid;
__weak id w9, w10, w11, w12;
__weak id w13, w14, w15, w16;
const id bar = (id) opaque_id();
-// CHECK: block variable layout: BL_STRONG:1, BL_WEAK:16, BL_WEAK:16, BL_WEAK:1, BL_OPERATOR:0
+// CHECK: Block variable layout: BL_STRONG:1, BL_WEAK:16, BL_WEAK:16, BL_WEAK:1, BL_OPERATOR:0
void (^b)() = ^{
x(bar);
x(wid1);
diff --git a/clang/test/CodeGenObjC/arc-captured-block-var-inlined-layout.m b/clang/test/CodeGenObjC/arc-captured-block-var-inlined-layout.m
index 89e2b570b03..07b194da943 100644
--- a/clang/test/CodeGenObjC/arc-captured-block-var-inlined-layout.m
+++ b/clang/test/CodeGenObjC/arc-captured-block-var-inlined-layout.m
@@ -1,7 +1,7 @@
// RUN: %clang_cc1 -fblocks -fobjc-arc -fobjc-runtime-has-weak -triple x86_64-apple-darwin -print-ivar-layout -emit-llvm -o /dev/null %s > %t-64.layout
-// RUN: FileCheck --input-file=%t-64.layout %s
+// RUN: FileCheck -check-prefix=CHECK -check-prefix=CHECK-64 --input-file=%t-64.layout %s
// RUN: %clang_cc1 -fblocks -fobjc-arc -fobjc-runtime-has-weak -triple i386-apple-darwin -print-ivar-layout -emit-llvm -o /dev/null %s > %t-32.layout
-// RUN: FileCheck -check-prefix=CHECK-i386 --input-file=%t-32.layout %s
+// RUN: FileCheck -check-prefix=CHECK -check-prefix=CHECK-32 --input-file=%t-32.layout %s
// rdar://12184410
void x(id y) {}
@@ -17,22 +17,19 @@ void f() {
__block id byref_bab = (id)0;
__block id bl_var1;
-// CHECK: Inline instruction for block variable layout: 0x0100
-// CHECK-i386: Inline instruction for block variable layout: 0x0100
+// CHECK: Inline block variable layout: 0x0100, BL_STRONG:1, BL_OPERATOR:0
void (^b)() = ^{
x(bar);
};
-// CHECK: Inline instruction for block variable layout: 0x0210
-// CHECK-i386: Inline instruction for block variable layout: 0x0210
+// CHECK: Inline block variable layout: 0x0210, BL_STRONG:2, BL_BYREF:1, BL_OPERATOR:0
void (^c)() = ^{
x(bar);
x(baz);
byref_int = 1;
};
-// CHECK: Inline instruction for block variable layout: 0x0230
-// CHECK-i386: Inline instruction for block variable layout: 0x0230
+// CHECK: Inline block variable layout: 0x0230, BL_STRONG:2, BL_BYREF:3, BL_OPERATOR:0
void (^d)() = ^{
x(bar);
x(baz);
@@ -41,8 +38,7 @@ void f() {
byref_bab = 0;
};
-// CHECK: Inline instruction for block variable layout: 0x0231
-// CHECK-i386: Inline instruction for block variable layout: 0x0231
+// CHECK: Inline block variable layout: 0x0231, BL_STRONG:2, BL_BYREF:3, BL_WEAK:1, BL_OPERATOR:0
__weak id wid;
id (^e)() = ^{
x(bar);
@@ -53,8 +49,7 @@ void f() {
return wid;
};
-// CHECK: Inline instruction for block variable layout: 0x0235
-// CHECK-i386: Inline instruction for block variable layout: 0x0235
+// CHECK: Inline block variable layout: 0x0235, BL_STRONG:2, BL_BYREF:3, BL_WEAK:5, BL_OPERATOR:0
__weak id wid1, wid2, wid3, wid4;
id (^f)() = ^{
x(bar);
@@ -69,8 +64,7 @@ void f() {
return wid;
};
-// CHECK: Inline instruction for block variable layout: 0x035
-// CHECK-i386: Inline instruction for block variable layout: 0x035
+// CHECK: Inline block variable layout: 0x035, BL_BYREF:3, BL_WEAK:5, BL_OPERATOR:0
id (^g)() = ^{
byref_int = 1;
bl_var1 = 0;
@@ -82,21 +76,18 @@ void f() {
return wid;
};
-// CHECK: Inline instruction for block variable layout: 0x01
-// CHECK-i386: Inline instruction for block variable layout: 0x01
+// CHECK: Inline block variable layout: 0x01, BL_WEAK:1, BL_OPERATOR:0
id (^h)() = ^{
return wid;
};
-// CHECK: Inline instruction for block variable layout: 0x020
-// CHECK-i386: Inline instruction for block variable layout: 0x020
+// CHECK: Inline block variable layout: 0x020, BL_BYREF:2, BL_OPERATOR:0
void (^ii)() = ^{
byref_int = 1;
byref_bab = 0;
};
-// CHECK: Inline instruction for block variable layout: 0x0102
-// CHECK-i386: Inline instruction for block variable layout: 0x0102
+// CHECK: Inline block variable layout: 0x0102, BL_STRONG:1, BL_WEAK:2, BL_OPERATOR:0
void (^jj)() = ^{
x(bar);
x(wid1);
@@ -114,8 +105,7 @@ int main() {
__weak NSString *w1 = 0;
-// CHECK: Inline instruction for block variable layout: 0x0201
-// CHECK-i386: Inline instruction for block variable layout: 0x0201
+// CHECK: Inline block variable layout: 0x0201, BL_STRONG:2, BL_WEAK:1, BL_OPERATOR:0
dispatch_block_t block2 = ^{
NSLog(@"%@, %@, %@", s1, w1, s2);
};
diff --git a/clang/test/CodeGenObjC/arc-captured-block-var-layout.m b/clang/test/CodeGenObjC/arc-captured-block-var-layout.m
index 42e6060dcc2..f8518d1c44b 100644
--- a/clang/test/CodeGenObjC/arc-captured-block-var-layout.m
+++ b/clang/test/CodeGenObjC/arc-captured-block-var-layout.m
@@ -34,7 +34,7 @@ void f() {
// Test 1
// Inline instruction for block variable layout: 0x0320 (3 strong 2 byref)
-// CHECK-LP64: Inline instruction for block variable layout: 0x0320
+// CHECK-LP64: Inline block variable layout: 0x0320, BL_STRONG:3, BL_BYREF:2, BL_OPERATOR:0
void (^b)() = ^{
byref_int = sh + ch+ch1+ch2 ;
x(bar);
@@ -46,7 +46,7 @@ void f() {
// Test 2
// Inline instruction for block variable layout: 0x0331 (3 strong 3 byref 1 weak)
-// CHECK-LP64: Inline instruction for block variable layout: 0x0331
+// CHECK-LP64: Inline block variable layout: 0x0331, BL_STRONG:3, BL_BYREF:3, BL_WEAK:1, BL_OPERATOR:0
void (^c)() = ^{
byref_int = sh + ch+ch1+ch2 ;
x(bar);
@@ -68,7 +68,7 @@ void g() {
NSString *y;
NSString *z;
// Inline instruction for block variable layout: 0x0401 (4 strong 0 byref 1 weak)
-// CHECK-LP64: Inline instruction for block variable layout: 0x0401
+// CHECK-LP64: Inline block variable layout: 0x0401, BL_STRONG:4, BL_WEAK:1, BL_OPERATOR:0
void (^c)() = ^{
int j = i + bletch;
x(foo);
@@ -109,7 +109,7 @@ void h() {
union U u2;
__block id block_id;
-// CHECK-LP64: block variable layout: BL_BYREF:1, BL_NON_OBJECT_WORD:1, BL_UNRETAINED:1, BL_NON_OBJECT_WORD:1, BL_UNRETAINED:1, BL_OPERATOR:0
+// CHECK-LP64: Block variable layout: BL_BYREF:1, BL_NON_OBJECT_WORD:1, BL_UNRETAINED:1, BL_NON_OBJECT_WORD:1, BL_UNRETAINED:1, BL_OPERATOR:0
void (^c)() = ^{
x(s2.ui.o1);
x(u2.o1);
@@ -124,7 +124,7 @@ void arr1() {
__unsafe_unretained id unsafe_unretained_var[4];
} imported_s;
-// CHECK-LP64: block variable layout: BL_UNRETAINED:4, BL_OPERATOR:0
+// CHECK-LP64: Block variable layout: BL_UNRETAINED:4, BL_OPERATOR:0
void (^c)() = ^{
x(imported_s.unsafe_unretained_var[2]);
};
@@ -139,7 +139,7 @@ void arr2() {
__unsafe_unretained id unsafe_unretained_var[4];
} imported_s;
-// CHECK-LP64: block variable layout: BL_NON_OBJECT_WORD:1, BL_UNRETAINED:4, BL_OPERATOR:0
+// CHECK-LP64: Block variable layout: BL_NON_OBJECT_WORD:1, BL_UNRETAINED:4, BL_OPERATOR:0
void (^c)() = ^{
x(imported_s.unsafe_unretained_var[2]);
};
@@ -154,7 +154,7 @@ void arr3() {
__unsafe_unretained id unsafe_unretained_var[0];
} imported_s;
-// CHECK-LP64: block variable layout: BL_OPERATOR:0
+// CHECK-LP64: Block variable layout: BL_OPERATOR:0
void (^c)() = ^{
int i = imported_s.a;
};
@@ -180,7 +180,7 @@ void arr4() {
} f4[2][2];
} captured_s;
-// CHECK-LP64: block variable layout: BL_UNRETAINED:3, BL_NON_OBJECT_WORD:1, BL_UNRETAINED:1, BL_NON_OBJECT_WORD:1, BL_UNRETAINED:1, BL_NON_OBJECT_WORD:1, BL_UNRETAINED:1, BL_NON_OBJECT_WORD:1, BL_UNRETAINED:1, BL_OPERATOR:0
+// CHECK-LP64: Block variable layout: BL_UNRETAINED:3, BL_NON_OBJECT_WORD:1, BL_UNRETAINED:1, BL_NON_OBJECT_WORD:1, BL_UNRETAINED:1, BL_NON_OBJECT_WORD:1, BL_UNRETAINED:1, BL_NON_OBJECT_WORD:1, BL_UNRETAINED:1, BL_OPERATOR:0
void (^c)() = ^{
id i = captured_s.f0.s_f1;
};
@@ -198,7 +198,7 @@ void bf1() {
int flag4: 24;
} s;
-// CHECK-LP64: block variable layout: BL_OPERATOR:0
+// CHECK-LP64: Block variable layout: BL_OPERATOR:0
int (^c)() = ^{
return s.flag;
};
@@ -211,7 +211,7 @@ void bf2() {
int flag : 1;
} s;
-// CHECK-LP64: block variable layout: BL_OPERATOR:0
+// CHECK-LP64: Block variable layout: BL_OPERATOR:0
int (^c)() = ^{
return s.flag;
};
@@ -242,7 +242,7 @@ void bf3() {
unsigned int _filler : 32;
} _flags;
-// CHECK-LP64: block variable layout: BL_OPERATOR:0
+// CHECK-LP64: Block variable layout: BL_OPERATOR:0
unsigned char (^c)() = ^{
return _flags._draggedNodesAreDeletable;
};
@@ -277,7 +277,7 @@ void bf4() {
unsigned int _filler : 32;
} _flags;
-// CHECK-LP64: block variable layout: BL_OPERATOR:0
+// CHECK-LP64: Block variable layout: BL_OPERATOR:0
unsigned char (^c)() = ^{
return _flags._draggedNodesAreDeletable;
};
@@ -295,7 +295,7 @@ void bf5() {
unsigned char flag1 : 1;
} _flags;
-// CHECK-LP64: block variable layout: BL_OPERATOR:0
+// CHECK-LP64: Block variable layout: BL_OPERATOR:0
unsigned char (^c)() = ^{
return _flags.flag;
};
@@ -312,7 +312,7 @@ void bf6() {
unsigned char flag1 : 1;
} _flags;
-// CHECK-LP64: block variable layout: BL_OPERATOR:0
+// CHECK-LP64: Block variable layout: BL_OPERATOR:0
unsigned char (^c)() = ^{
return _flags.flag;
};
@@ -328,7 +328,7 @@ void Test7() {
__weak id wid9, wid10, wid11, wid12;
__weak id wid13, wid14, wid15, wid16;
const id bar = (id) opaque_id();
-// CHECK-LP64: block variable layout: BL_STRONG:1, BL_WEAK:16, BL_OPERATOR:0
+// CHECK-LP64: Block variable layout: BL_STRONG:1, BL_WEAK:16, BL_OPERATOR:0
void (^b)() = ^{
x(bar);
x(wid1);
@@ -363,7 +363,7 @@ __weak id wid;
__weak id w9, w10, w11, w12;
__weak id w13, w14, w15, w16;
const id bar = (id) opaque_id();
-// CHECK-LP64: block variable layout: BL_STRONG:1, BL_WEAK:16, BL_WEAK:16, BL_WEAK:1, BL_OPERATOR:0
+// CHECK-LP64: Block variable layout: BL_STRONG:1, BL_WEAK:16, BL_WEAK:16, BL_WEAK:1, BL_OPERATOR:0
void (^b)() = ^{
x(bar);
x(wid1);
diff --git a/clang/test/CodeGenObjC/arc-literals.m b/clang/test/CodeGenObjC/arc-literals.m
index d107a28506f..ab6c82b743f 100644
--- a/clang/test/CodeGenObjC/arc-literals.m
+++ b/clang/test/CodeGenObjC/arc-literals.m
@@ -47,10 +47,10 @@ void test_array(id a, id b) {
// CHECK: call i8* @objc_retain(i8*
// Constructing the array
- // CHECK: [[T0:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[OBJECTS:%[A-Za-z0-9]+]], i32 0, i32 0
+ // CHECK: [[T0:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[OBJECTS:%[A-Za-z0-9]+]], i64 0, i64 0
// CHECK-NEXT: [[V0:%.*]] = load i8*, i8** [[A]],
// CHECK-NEXT: store i8* [[V0]], i8** [[T0]]
- // CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[OBJECTS]], i32 0, i32 1
+ // CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[OBJECTS]], i64 0, i64 1
// CHECK-NEXT: [[V1:%.*]] = load i8*, i8** [[B]],
// CHECK-NEXT: store i8* [[V1]], i8** [[T0]]
@@ -83,16 +83,16 @@ void test_dictionary(id k1, id o1, id k2, id o2) {
// CHECK: call i8* @objc_retain(i8*
// Constructing the arrays
- // CHECK: [[T0:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[KEYS:%[A-Za-z0-9]+]], i32 0, i32 0
+ // CHECK: [[T0:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[KEYS:%[A-Za-z0-9]+]], i64 0, i64 0
// CHECK-NEXT: [[V0:%.*]] = load i8*, i8** [[K1]],
// CHECK-NEXT: store i8* [[V0]], i8** [[T0]]
- // CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[OBJECTS:%[A-Za-z0-9]+]], i32 0, i32 0
+ // CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[OBJECTS:%[A-Za-z0-9]+]], i64 0, i64 0
// CHECK-NEXT: [[V1:%.*]] = load i8*, i8** [[O1]],
// CHECK-NEXT: store i8* [[V1]], i8** [[T0]]
- // CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[KEYS]], i32 0, i32 1
+ // CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[KEYS]], i64 0, i64 1
// CHECK-NEXT: [[V2:%.*]] = load i8*, i8** [[K2]],
// CHECK-NEXT: store i8* [[V2]], i8** [[T0]]
- // CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[OBJECTS]], i32 0, i32 1
+ // CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[OBJECTS]], i64 0, i64 1
// CHECK-NEXT: [[V3:%.*]] = load i8*, i8** [[O2]],
// CHECK-NEXT: store i8* [[V3]], i8** [[T0]]
@@ -128,7 +128,7 @@ void test_property(B *b) {
// Retain parameter
// CHECK: call i8* @objc_retain
- // CHECK: [[T0:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[OBJECTS:%.*]], i32 0, i32 0
+ // CHECK: [[T0:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[OBJECTS:%.*]], i64 0, i64 0
// Invoke 'prop'
// CHECK: [[SEL:%.*]] = load i8*, i8** @OBJC_SELECTOR_REFERENCES
diff --git a/clang/test/CodeGenObjC/arc.m b/clang/test/CodeGenObjC/arc.m
index 3aafefd43cd..f756df1460f 100644
--- a/clang/test/CodeGenObjC/arc.m
+++ b/clang/test/CodeGenObjC/arc.m
@@ -515,7 +515,7 @@ void test19() {
// CHECK-NEXT: [[CALL:%.*]] = call i8* @test19_helper()
// CHECK-NEXT: [[T1:%.*]] = call i8* @objc_retainAutoreleasedReturnValue(i8* [[CALL]]) [[NUW]]
- // CHECK-NEXT: [[SLOT:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[X]], i32 0, i64 2
+ // CHECK-NEXT: [[SLOT:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[X]], i64 0, i64 2
// CHECK-NEXT: [[T0:%.*]] = load i8*, i8** [[SLOT]]
// CHECK-NEXT: store i8* [[T1]], i8** [[SLOT]]
// CHECK-NEXT: call void @objc_release(i8* [[T0]]) [[NUW]]
@@ -556,7 +556,7 @@ void test20(unsigned n) {
// Zero-initialize.
// CHECK-NEXT: [[T0:%.*]] = bitcast i8** [[VLA]] to i8*
// CHECK-NEXT: [[T1:%.*]] = mul nuw i64 [[DIM]], 8
- // CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* [[T0]], i8 0, i64 [[T1]], i32 8, i1 false)
+ // CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* [[T0]], i8 0, i64 [[T1]], i32 16, i1 false)
// Destroy.
// CHECK-NEXT: [[END:%.*]] = getelementptr inbounds i8*, i8** [[VLA]], i64 [[DIM]]
@@ -599,7 +599,7 @@ void test21(unsigned n) {
// CHECK-NEXT: [[T0:%.*]] = bitcast [3 x i8*]* [[VLA]] to i8*
// CHECK-NEXT: [[T1:%.*]] = mul nuw i64 2, [[DIM]]
// CHECK-NEXT: [[T2:%.*]] = mul nuw i64 [[T1]], 24
- // CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* [[T0]], i8 0, i64 [[T2]], i32 8, i1 false)
+ // CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* [[T0]], i8 0, i64 [[T2]], i32 16, i1 false)
// Destroy.
// CHECK-NEXT: [[T0:%.*]] = mul nuw i64 2, [[DIM]]
diff --git a/clang/test/CodeGenObjC/debug-info-block-captured-self.m b/clang/test/CodeGenObjC/debug-info-block-captured-self.m
index b2ad97b65ff..06dd9ea60eb 100644
--- a/clang/test/CodeGenObjC/debug-info-block-captured-self.m
+++ b/clang/test/CodeGenObjC/debug-info-block-captured-self.m
@@ -53,13 +53,13 @@ typedef enum {
// CHECK: define internal void {{.*}}_block_invoke{{.*}}
// CHECK: %[[MEM1:.*]] = alloca i8*, align 8
// CHECK-NEXT: %[[MEM2:.*]] = alloca i8*, align 8
+// CHECK-NEXT: [[DBGADDR:%.*]] = alloca [[BLOCK_T:<{.*}>]]*, align 8
// CHECK: store i8* [[BLOCK_DESC:%.*]], i8** %[[MEM1]], align 8
// CHECK: %[[TMP0:.*]] = load i8*, i8** %[[MEM1]]
// CHECK: call void @llvm.dbg.value(metadata i8* %[[TMP0]], i64 0, metadata ![[BDMD:[0-9]+]], metadata !{{.*}})
// CHECK: call void @llvm.dbg.declare(metadata i8* [[BLOCK_DESC]], metadata ![[BDMD:[0-9]+]], metadata !{{.*}})
-// CHECK: %[[TMP1:.*]] = bitcast
-// CHECK-NEXT: store
-// CHECK: call void @llvm.dbg.declare(metadata <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %0* }>** {{[^,]*}}, metadata ![[SELF:.*]], metadata !{{.*}})
+// CHECK: store [[BLOCK_T]]* {{%.*}}, [[BLOCK_T]]** [[DBGADDR]], align 8
+// CHECK: call void @llvm.dbg.declare(metadata [[BLOCK_T]]** [[DBGADDR]], metadata ![[SELF:.*]], metadata !{{.*}})
// make sure we are still in the same function
// CHECK: define {{.*}}__copy_helper_block_
// Metadata
diff --git a/clang/test/CodeGenObjC/ivar-base-as-invariant-load.m b/clang/test/CodeGenObjC/ivar-base-as-invariant-load.m
index 4a17eb16f6b..a3201e04011 100644
--- a/clang/test/CodeGenObjC/ivar-base-as-invariant-load.m
+++ b/clang/test/CodeGenObjC/ivar-base-as-invariant-load.m
@@ -23,7 +23,7 @@
@end
-// CHECK: [[T1:%.*]] = load i64, i64* @"OBJC_IVAR_$_A._flags", !invariant.load ![[MD_NUM:[0-9]+]]
-// CHECK: [[T2:%.*]] = load i64, i64* @"OBJC_IVAR_$_A._flags", !invariant.load ![[MD_NUM]]
-// CHECK: [[T3:%.*]] = load i64, i64* @"OBJC_IVAR_$_A._flags", !invariant.load ![[MD_NUM]]
+// CHECK: [[T1:%.*]] = load i64, i64* @"OBJC_IVAR_$_A._flags", align 8, !invariant.load ![[MD_NUM:[0-9]+]]
+// CHECK: [[T2:%.*]] = load i64, i64* @"OBJC_IVAR_$_A._flags", align 8, !invariant.load ![[MD_NUM]]
+// CHECK: [[T3:%.*]] = load i64, i64* @"OBJC_IVAR_$_A._flags", align 8, !invariant.load ![[MD_NUM]]
//
diff --git a/clang/test/CodeGenObjC/ivar-invariant.m b/clang/test/CodeGenObjC/ivar-invariant.m
index b9c5bec9306..eb1ba9a0d73 100644
--- a/clang/test/CodeGenObjC/ivar-invariant.m
+++ b/clang/test/CodeGenObjC/ivar-invariant.m
@@ -29,7 +29,7 @@
@end
// CHECK: define internal i8* @"\01-[Derived init]"
-// CHECK: [[IVAR:%.*]] = load i64, i64* @"OBJC_IVAR_$_Derived.member", !invariant.load
+// CHECK: [[IVAR:%.*]] = load i64, i64* @"OBJC_IVAR_$_Derived.member", align 8, !invariant.load
void * variant_load_1(int i) {
void *ptr;
@@ -41,7 +41,7 @@ void * variant_load_1(int i) {
}
// CHECK-LABEL: define i8* @variant_load_1(i32 %i)
-// CHECK: [[IVAR:%.*]] = load i64, i64* @"OBJC_IVAR_$_Derived.member"{{$}}
+// CHECK: [[IVAR:%.*]] = load i64, i64* @"OBJC_IVAR_$_Derived.member", align 8{{$}}
@interface Container : Derived @end
@implementation Container
@@ -51,8 +51,8 @@ void * variant_load_1(int i) {
}
@end
-// CHECK: define internal i8* @"\01-[Container invariant_load_1]"
-// CHECK: [[IVAR:%.*]] = load i64, i64* @"OBJC_IVAR_$_Derived.member", !invariant.load
+// CHECK-LABEL: define internal i8* @"\01-[Container invariant_load_1]"
+// CHECK: [[IVAR:%.*]] = load i64, i64* @"OBJC_IVAR_$_Derived.member", align 8, !invariant.load
@interface ForBlock
{
diff --git a/clang/test/CodeGenObjC/mrr-captured-block-var-inlined-layout.m b/clang/test/CodeGenObjC/mrr-captured-block-var-inlined-layout.m
index 6ea656443a8..c87140a87d8 100644
--- a/clang/test/CodeGenObjC/mrr-captured-block-var-inlined-layout.m
+++ b/clang/test/CodeGenObjC/mrr-captured-block-var-inlined-layout.m
@@ -1,7 +1,7 @@
// RUN: %clang_cc1 -fblocks -fobjc-runtime-has-weak -triple x86_64-apple-darwin -print-ivar-layout -emit-llvm -o /dev/null %s > %t-64.layout
-// RUN: FileCheck --input-file=%t-64.layout %s
+// RUN: FileCheck -check-prefix=CHECK -check-prefix=CHECK-64 --input-file=%t-64.layout %s
// RUN: %clang_cc1 -fblocks -fobjc-runtime-has-weak -triple i386-apple-darwin -print-ivar-layout -emit-llvm -o /dev/null %s > %t-32.layout
-// RUN: FileCheck -check-prefix=CHECK-i386 --input-file=%t-32.layout %s
+// RUN: FileCheck -check-prefix=CHECK -check-prefix=CHECK-32 --input-file=%t-32.layout %s
// rdar://12184410
// rdar://12184410
@@ -20,15 +20,13 @@ void f() {
__block id bl_var1;
// block variable layout: BL_STRONG:1, BL_OPERATOR:0
-// CHECK: Inline instruction for block variable layout: 0x0100
-// CHECK-i386: Inline instruction for block variable layout: 0x0100
+// CHECK: Inline block variable layout: 0x0100, BL_STRONG:1, BL_OPERATOR:0
void (^b)() = ^{
x(bar);
};
// block variable layout: BL_STRONG:2, BL_BYREF:1, BL_OPERATOR:0
-// CHECK: Inline instruction for block variable layout: 0x0210
-// CHECK-i386: Inline instruction for block variable layout: 0x0210
+// CHECK: Inline block variable layout: 0x0210, BL_STRONG:2, BL_BYREF:1, BL_OPERATOR:0
void (^c)() = ^{
x(bar);
x(baz);
@@ -36,8 +34,7 @@ void f() {
};
// block variable layout: BL_STRONG:2, BL_BYREF:3, BL_OPERATOR:0
-// CHECK: Inline instruction for block variable layout: 0x0230
-// CHECK-i386: Inline instruction for block variable layout: 0x0230
+// CHECK: Inline block variable layout: 0x0230, BL_STRONG:2, BL_BYREF:3, BL_OPERATOR:0
void (^d)() = ^{
x(bar);
x(baz);
@@ -47,8 +44,7 @@ void f() {
};
// block variable layout: BL_STRONG:2, BL_BYREF:3, BL_OPERATOR:0
-// CHECK: Inline instruction for block variable layout: 0x0230
-// CHECK-i386: Inline instruction for block variable layout: 0x0230
+// CHECK: Inline block variable layout: 0x0230, BL_STRONG:2, BL_BYREF:3, BL_OPERATOR:0
id (^e)() = ^{
x(bar);
x(baz);
@@ -58,8 +54,7 @@ void f() {
return wid;
};
-// CHECK: Inline instruction for block variable layout: 0x020
-// CHECK-i386: Inline instruction for block variable layout: 0x020
+// CHECK: Inline block variable layout: 0x020, BL_BYREF:2, BL_OPERATOR:0
void (^ii)() = ^{
byref_int = 1;
byref_bab = 0;
diff --git a/clang/test/CodeGenObjC/selector-ref-invariance.m b/clang/test/CodeGenObjC/selector-ref-invariance.m
index 5758a1cd7ff..18fb828d29d 100644
--- a/clang/test/CodeGenObjC/selector-ref-invariance.m
+++ b/clang/test/CodeGenObjC/selector-ref-invariance.m
@@ -3,7 +3,7 @@
// rdar://6027699
void test(id x) {
-// CHECK: load i8*, i8** @OBJC_SELECTOR_REFERENCES_, !invariant.load
+// CHECK: load i8*, i8** @OBJC_SELECTOR_REFERENCES_, align 8, !invariant.load
// CHECK: @objc_msgSend
[x foo];
}
diff --git a/clang/test/CodeGenObjCXX/arc-new-delete.mm b/clang/test/CodeGenObjCXX/arc-new-delete.mm
index 9a61f183c69..f853ea4366a 100644
--- a/clang/test/CodeGenObjCXX/arc-new-delete.mm
+++ b/clang/test/CodeGenObjCXX/arc-new-delete.mm
@@ -1,4 +1,5 @@
-// RUN: %clang_cc1 -fobjc-arc -fobjc-runtime-has-weak -fblocks -triple x86_64-apple-darwin10.0.0 -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -fobjc-arc -fobjc-runtime-has-weak -fblocks -triple x86_64-apple-darwin10.0.0 -emit-llvm -o - %s | FileCheck %s -check-prefix=CHECK -check-prefix=UNOPT
+// RUN: %clang_cc1 -fobjc-arc -fobjc-runtime-has-weak -fblocks -triple x86_64-apple-darwin10.0.0 -emit-llvm -o - %s -O -disable-llvm-optzns | FileCheck %s -check-prefix=CHECK -check-prefix=OPT
typedef __strong id strong_id;
typedef __weak id weak_id;
@@ -6,8 +7,10 @@ typedef __weak id weak_id;
// CHECK-LABEL: define void @_Z8test_newP11objc_object
void test_new(id invalue) {
// CHECK: [[INVALUEADDR:%.*]] = alloca i8*
- // CHECK-NEXT: store i8* null, i8** [[INVALUEADDR]]
- // CHECK-NEXT: call void @objc_storeStrong(i8** [[INVALUEADDR]], i8* [[INVALUE:%.*]])
+ // UNOPT-NEXT: store i8* null, i8** [[INVALUEADDR]]
+ // UNOPT-NEXT: call void @objc_storeStrong(i8** [[INVALUEADDR]], i8* [[INVALUE:%.*]])
+ // OPT-NEXT: [[T0:%.*]] = call i8* @objc_retain(i8* [[INVALUE:%.*]])
+ // OPT-NEXT: store i8* [[T0]], i8** [[INVALUEADDR]]
// CHECK: call noalias i8* @_Znwm
// CHECK-NEXT: {{bitcast i8\*.*to i8\*\*}}
@@ -15,7 +18,8 @@ void test_new(id invalue) {
new strong_id;
// CHECK: call noalias i8* @_Znwm
// CHECK-NEXT: {{bitcast i8\*.*to i8\*\*}}
- // CHECK-NEXT: store i8* null, i8**
+ // UNOPT-NEXT: store i8* null, i8**
+ // OPT-NEXT: call i8* @objc_initWeak(i8** {{.*}}, i8* null)
new weak_id;
// CHECK: call noalias i8* @_Znwm
@@ -24,7 +28,8 @@ void test_new(id invalue) {
new __strong id;
// CHECK: call noalias i8* @_Znwm
// CHECK-NEXT: {{bitcast i8\*.*to i8\*\*}}
- // CHECK-NEXT: store i8* null, i8**
+ // UNOPT-NEXT: store i8* null, i8**
+ // OPT-NEXT: call i8* @objc_initWeak(i8** {{.*}}, i8* null)
new __weak id;
// CHECK: call noalias i8* @_Znwm
@@ -36,7 +41,8 @@ void test_new(id invalue) {
// CHECK: call i8* @objc_initWeak
new __weak id(invalue);
- // CHECK: call void @objc_storeStrong
+ // UNOPT: call void @objc_storeStrong
+ // OPT: call void @objc_release
// CHECK: ret void
}
@@ -57,8 +63,9 @@ void test_array_new() {
// CHECK-LABEL: define void @_Z11test_deletePU8__strongP11objc_objectPU6__weakS0_
void test_delete(__strong id *sptr, __weak id *wptr) {
// CHECK: br i1
- // CHECK: load i8*, i8**
- // CHECK-NEXT: call void @objc_release
+ // UNOPT: call void @objc_storeStrong(i8** {{.*}}, i8* null)
+ // OPT: load i8*, i8**
+ // OPT-NEXT: call void @objc_release
// CHECK: call void @_ZdlPv
delete sptr;
@@ -77,7 +84,9 @@ void test_array_delete(__strong id *sptr, __weak id *wptr) {
// CHECK-NEXT: icmp eq i8** [[BEGIN]], [[END]]
// CHECK: [[PAST:%.*]] = phi i8** [ [[END]], {{%.*}} ], [ [[CUR:%.*]],
// CHECK-NEXT: [[CUR]] = getelementptr inbounds i8*, i8** [[PAST]], i64 -1
- // CHECK-NEXT: call void @objc_storeStrong(i8** [[CUR]], i8* null)
+ // UNOPT-NEXT: call void @objc_storeStrong(i8** [[CUR]], i8* null)
+ // OPT-NEXT: [[T0:%.*]] = load i8*, i8** [[CUR]]
+ // OPT-NEXT: objc_release(i8* [[T0]])
// CHECK-NEXT: icmp eq i8** [[CUR]], [[BEGIN]]
// CHECK: call void @_ZdaPv
delete [] sptr;
diff --git a/clang/test/CodeGenObjCXX/literals.mm b/clang/test/CodeGenObjCXX/literals.mm
index 7089de23b93..4d1b5019412 100644
--- a/clang/test/CodeGenObjCXX/literals.mm
+++ b/clang/test/CodeGenObjCXX/literals.mm
@@ -22,14 +22,14 @@ void test_array() {
// Initializing first element
// CHECK: [[PTR1:%.*]] = bitcast i8** [[ARR]] to i8*
// CHECK-NEXT: call void @llvm.lifetime.start(i64 8, i8* [[PTR1]])
- // CHECK: [[ELEMENT0:%[a-zA-Z0-9.]+]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[OBJECTS]], i32 0, i32 0
+ // CHECK: [[ELEMENT0:%[a-zA-Z0-9.]+]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[OBJECTS]], i64 0, i64 0
// CHECK-NEXT: call void @_ZN1XC1Ev
// CHECK-NEXT: [[OBJECT0:%[a-zA-Z0-9.]+]] = invoke i8* @_ZNK1XcvP11objc_objectEv
// CHECK: [[RET0:%[a-zA-Z0-9.]+]] = call i8* @objc_retainAutoreleasedReturnValue(i8* [[OBJECT0]])
// CHECK: store i8* [[RET0]], i8** [[ELEMENT0]]
// Initializing the second element
- // CHECK: [[ELEMENT1:%[a-zA-Z0-9.]+]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[OBJECTS]], i32 0, i32 1
+ // CHECK: [[ELEMENT1:%[a-zA-Z0-9.]+]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[OBJECTS]], i64 0, i64 1
// CHECK-NEXT: invoke void @_ZN1YC1Ev
// CHECK: [[OBJECT1:%[a-zA-Z0-9.]+]] = invoke i8* @_ZNK1YcvP11objc_objectEv
// CHECK: [[RET1:%[a-zA-Z0-9.]+]] = call i8* @objc_retainAutoreleasedReturnValue(i8* [[OBJECT1]])
@@ -74,14 +74,14 @@ void test_array_instantiation() {
// Initializing first element
// CHECK: [[PTR1:%.*]] = bitcast i8** [[ARR]] to i8*
// CHECK-NEXT: call void @llvm.lifetime.start(i64 8, i8* [[PTR1]])
- // CHECK: [[ELEMENT0:%[a-zA-Z0-9.]+]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[OBJECTS]], i32 0, i32 0
+ // CHECK: [[ELEMENT0:%[a-zA-Z0-9.]+]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[OBJECTS]], i64 0, i64 0
// CHECK-NEXT: call void @_ZN1XC1Ev
// CHECK-NEXT: [[OBJECT0:%[a-zA-Z0-9.]+]] = invoke i8* @_ZNK1XcvP11objc_objectEv
// CHECK: [[RET0:%[a-zA-Z0-9.]+]] = call i8* @objc_retainAutoreleasedReturnValue(i8* [[OBJECT0]])
// CHECK: store i8* [[RET0]], i8** [[ELEMENT0]]
// Initializing the second element
- // CHECK: [[ELEMENT1:%[a-zA-Z0-9.]+]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[OBJECTS]], i32 0, i32 1
+ // CHECK: [[ELEMENT1:%[a-zA-Z0-9.]+]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[OBJECTS]], i64 0, i64 1
// CHECK-NEXT: invoke void @_ZN1YC1Ev
// CHECK: [[OBJECT1:%[a-zA-Z0-9.]+]] = invoke i8* @_ZNK1YcvP11objc_objectEv
// CHECK: [[RET1:%[a-zA-Z0-9.]+]] = call i8* @objc_retainAutoreleasedReturnValue(i8* [[OBJECT1]])
diff --git a/clang/test/CodeGenObjCXX/property-lvalue-capture.mm b/clang/test/CodeGenObjCXX/property-lvalue-capture.mm
index b800c39fb3c..c5a753c8cf2 100644
--- a/clang/test/CodeGenObjCXX/property-lvalue-capture.mm
+++ b/clang/test/CodeGenObjCXX/property-lvalue-capture.mm
@@ -24,10 +24,10 @@ typedef Quad2<double> Quad2d;
}
@end
-// CHECK: [[TWO:%.*]] = load i8*, i8** @OBJC_SELECTOR_REFERENCES_, !invariant.load ![[MD_NUM:[0-9]+]]
+// CHECK: [[TWO:%.*]] = load i8*, i8** @OBJC_SELECTOR_REFERENCES_, align 8, !invariant.load ![[MD_NUM:[0-9]+]]
// CHECK: [[THREE:%.*]] = bitcast [[ONET:%.*]]* [[ONE:%.*]] to i8*
// CHECK: [[CALL:%.*]] = call nonnull %struct.Quad2* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to %struct.Quad2* (i8*, i8*)*)(i8* [[THREE]], i8* [[TWO]])
-// CHECK: [[FOUR:%.*]] = load i8*, i8** @OBJC_SELECTOR_REFERENCES_.2, !invariant.load ![[MD_NUM]]
+// CHECK: [[FOUR:%.*]] = load i8*, i8** @OBJC_SELECTOR_REFERENCES_.2, align 8, !invariant.load ![[MD_NUM]]
// CHECK: [[FIVE:%.*]] = bitcast [[ONET]]* [[ZERO:%.*]] to i8*
// CHECK: call void bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to void (i8*, i8*, %struct.Quad2*)*)(i8* [[FIVE]], i8* [[FOUR]], %struct.Quad2* nonnull [[CALL]])
@@ -47,7 +47,7 @@ void test(C *c, const A &a) {
}
// CHECK: [[ONE1:%.*]] = load %struct.A*, %struct.A** [[AADDR:%.*]], align 8
-// CHECK: [[TWO1:%.*]] = load i8*, i8** @OBJC_SELECTOR_REFERENCES_.5, !invariant.load ![[MD_NUM]]
+// CHECK: [[TWO1:%.*]] = load i8*, i8** @OBJC_SELECTOR_REFERENCES_.5, align 8, !invariant.load ![[MD_NUM]]
// CHECK: [[THREE1:%.*]] = bitcast [[TWOT:%.*]]* [[ZERO1:%.*]] to i8*
// CHECK: call void bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to void (i8*, i8*, %struct.A*)*)(i8* [[THREE1]], i8* [[TWO1]], %struct.A* dereferenceable({{[0-9]+}}) [[ONE1]])
// CHECK: store %struct.A* [[ONE1]], %struct.A** [[RESULT:%.*]], align 8
diff --git a/clang/test/CodeGenObjCXX/property-object-conditional-exp.mm b/clang/test/CodeGenObjCXX/property-object-conditional-exp.mm
index e3fc2d70962..cdee635a6fb 100644
--- a/clang/test/CodeGenObjCXX/property-object-conditional-exp.mm
+++ b/clang/test/CodeGenObjCXX/property-object-conditional-exp.mm
@@ -24,10 +24,10 @@ extern "C" bool CGRectIsEmpty(CGRect);
// CHECK: [[SRC:%.*]] = call { i8*, i32 } bitcast (i8* (i8*, i8*, ...)* @objc_msgSend
// CHECK-NEXT: bitcast
-// CHECK-NEXT:getelementptr { i8*, i32 }, { i8*, i32 }* [[SRC:%.*]]
+// CHECK-NEXT:getelementptr inbounds { i8*, i32 }, { i8*, i32 }* [[SRC:%.*]]
// CHECK-NEXT:extractvalue
// CHECK-NEXT:store
-// CHECK-NEXT:getelementptr { i8*, i32 }, { i8*, i32 }* [[SRC:%.*]]
+// CHECK-NEXT:getelementptr inbounds { i8*, i32 }, { i8*, i32 }* [[SRC:%.*]]
// CHECK-NEXT:extractvalue
// CHECK-NEXT:store
dataRect = CGRectIsEmpty(virtualBounds) ? self.bounds : virtualBounds;
diff --git a/clang/test/OpenMP/for_reduction_codegen.cpp b/clang/test/OpenMP/for_reduction_codegen.cpp
index 16a2213655e..3d326bda0ad 100644
--- a/clang/test/OpenMP/for_reduction_codegen.cpp
+++ b/clang/test/OpenMP/for_reduction_codegen.cpp
@@ -81,7 +81,7 @@ int main() {
// LAMBDA: call void [[INNER_LAMBDA:@.+]](%{{.+}}* [[ARG]])
// LAMBDA: call void @__kmpc_for_static_fini(
- // LAMBDA: [[G_PRIV_REF:%.+]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[RED_LIST]], i32 0, i32 0
+ // LAMBDA: [[G_PRIV_REF:%.+]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[RED_LIST]], i64 0, i64 0
// LAMBDA: [[BITCAST:%.+]] = bitcast double* [[G_PRIVATE_ADDR]] to i8*
// LAMBDA: store i8* [[BITCAST]], i8** [[G_PRIV_REF]],
// LAMBDA: call i32 @__kmpc_reduce(
@@ -143,7 +143,7 @@ int main() {
// BLOCKS: call void {{%.+}}(i8
// BLOCKS: call void @__kmpc_for_static_fini(
- // BLOCKS: [[G_PRIV_REF:%.+]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[RED_LIST]], i32 0, i32 0
+ // BLOCKS: [[G_PRIV_REF:%.+]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[RED_LIST]], i64 0, i64 0
// BLOCKS: [[BITCAST:%.+]] = bitcast double* [[G_PRIVATE_ADDR]] to i8*
// BLOCKS: store i8* [[BITCAST]], i8** [[G_PRIV_REF]],
// BLOCKS: call i32 @__kmpc_reduce(
@@ -243,16 +243,16 @@ int main() {
// void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]};
-// CHECK: [[T_VAR_PRIV_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i32 0, i32 0
+// CHECK: [[T_VAR_PRIV_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i64 0, i64 0
// CHECK: [[BITCAST:%.+]] = bitcast float* [[T_VAR_PRIV]] to i8*
// CHECK: store i8* [[BITCAST]], i8** [[T_VAR_PRIV_REF]],
-// CHECK: [[VAR_PRIV_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i32 0, i32 1
+// CHECK: [[VAR_PRIV_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i64 0, i64 1
// CHECK: [[BITCAST:%.+]] = bitcast [[S_FLOAT_TY]]* [[VAR_PRIV]] to i8*
// CHECK: store i8* [[BITCAST]], i8** [[VAR_PRIV_REF]],
-// CHECK: [[VAR1_PRIV_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i32 0, i32 2
+// CHECK: [[VAR1_PRIV_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i64 0, i64 2
// CHECK: [[BITCAST:%.+]] = bitcast [[S_FLOAT_TY]]* [[VAR1_PRIV]] to i8*
// CHECK: store i8* [[BITCAST]], i8** [[VAR1_PRIV_REF]],
-// CHECK: [[T_VAR1_PRIV_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i32 0, i32 3
+// CHECK: [[T_VAR1_PRIV_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i64 0, i64 3
// CHECK: [[BITCAST:%.+]] = bitcast float* [[T_VAR1_PRIV]] to i8*
// CHECK: store i8* [[BITCAST]], i8** [[T_VAR1_PRIV_REF]],
@@ -392,38 +392,38 @@ int main() {
// }
// CHECK: define internal void [[REDUCTION_FUNC]](i8*, i8*)
// t_var_lhs = (float*)lhs[0];
-// CHECK: [[T_VAR_RHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_RHS:%.+]], i32 0, i32 0
+// CHECK: [[T_VAR_RHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_RHS:%.+]], i64 0, i64 0
// CHECK: [[T_VAR_RHS_VOID:%.+]] = load i8*, i8** [[T_VAR_RHS_REF]],
// CHECK: [[T_VAR_RHS:%.+]] = bitcast i8* [[T_VAR_RHS_VOID]] to float*
// t_var_rhs = (float*)rhs[0];
-// CHECK: [[T_VAR_LHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS:%.+]], i32 0, i32 0
+// CHECK: [[T_VAR_LHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS:%.+]], i64 0, i64 0
// CHECK: [[T_VAR_LHS_VOID:%.+]] = load i8*, i8** [[T_VAR_LHS_REF]],
// CHECK: [[T_VAR_LHS:%.+]] = bitcast i8* [[T_VAR_LHS_VOID]] to float*
// var_lhs = (S<float>*)lhs[1];
-// CHECK: [[VAR_RHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_RHS]], i32 0, i32 1
+// CHECK: [[VAR_RHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_RHS]], i64 0, i64 1
// CHECK: [[VAR_RHS_VOID:%.+]] = load i8*, i8** [[VAR_RHS_REF]],
// CHECK: [[VAR_RHS:%.+]] = bitcast i8* [[VAR_RHS_VOID]] to [[S_FLOAT_TY]]*
// var_rhs = (S<float>*)rhs[1];
-// CHECK: [[VAR_LHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS]], i32 0, i32 1
+// CHECK: [[VAR_LHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS]], i64 0, i64 1
// CHECK: [[VAR_LHS_VOID:%.+]] = load i8*, i8** [[VAR_LHS_REF]],
// CHECK: [[VAR_LHS:%.+]] = bitcast i8* [[VAR_LHS_VOID]] to [[S_FLOAT_TY]]*
// var1_lhs = (S<float>*)lhs[2];
-// CHECK: [[VAR1_RHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_RHS]], i32 0, i32 2
+// CHECK: [[VAR1_RHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_RHS]], i64 0, i64 2
// CHECK: [[VAR1_RHS_VOID:%.+]] = load i8*, i8** [[VAR1_RHS_REF]],
// CHECK: [[VAR1_RHS:%.+]] = bitcast i8* [[VAR1_RHS_VOID]] to [[S_FLOAT_TY]]*
// var1_rhs = (S<float>*)rhs[2];
-// CHECK: [[VAR1_LHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS]], i32 0, i32 2
+// CHECK: [[VAR1_LHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS]], i64 0, i64 2
// CHECK: [[VAR1_LHS_VOID:%.+]] = load i8*, i8** [[VAR1_LHS_REF]],
// CHECK: [[VAR1_LHS:%.+]] = bitcast i8* [[VAR1_LHS_VOID]] to [[S_FLOAT_TY]]*
// t_var1_lhs = (float*)lhs[3];
-// CHECK: [[T_VAR1_RHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_RHS]], i32 0, i32 3
+// CHECK: [[T_VAR1_RHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_RHS]], i64 0, i64 3
// CHECK: [[T_VAR1_RHS_VOID:%.+]] = load i8*, i8** [[T_VAR1_RHS_REF]],
// CHECK: [[T_VAR1_RHS:%.+]] = bitcast i8* [[T_VAR1_RHS_VOID]] to float*
// t_var1_rhs = (float*)rhs[3];
-// CHECK: [[T_VAR1_LHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS]], i32 0, i32 3
+// CHECK: [[T_VAR1_LHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS]], i64 0, i64 3
// CHECK: [[T_VAR1_LHS_VOID:%.+]] = load i8*, i8** [[T_VAR1_LHS_REF]],
// CHECK: [[T_VAR1_LHS:%.+]] = bitcast i8* [[T_VAR1_LHS_VOID]] to float*
@@ -516,16 +516,16 @@ int main() {
// void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]};
-// CHECK: [[T_VAR_PRIV_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i32 0, i32 0
+// CHECK: [[T_VAR_PRIV_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i64 0, i64 0
// CHECK: [[BITCAST:%.+]] = bitcast i{{[0-9]+}}* [[T_VAR_PRIV]] to i8*
// CHECK: store i8* [[BITCAST]], i8** [[T_VAR_PRIV_REF]],
-// CHECK: [[VAR_PRIV_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i32 0, i32 1
+// CHECK: [[VAR_PRIV_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i64 0, i64 1
// CHECK: [[BITCAST:%.+]] = bitcast [[S_INT_TY]]* [[VAR_PRIV]] to i8*
// CHECK: store i8* [[BITCAST]], i8** [[VAR_PRIV_REF]],
-// CHECK: [[VAR1_PRIV_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i32 0, i32 2
+// CHECK: [[VAR1_PRIV_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i64 0, i64 2
// CHECK: [[BITCAST:%.+]] = bitcast [[S_INT_TY]]* [[VAR1_PRIV]] to i8*
// CHECK: store i8* [[BITCAST]], i8** [[VAR1_PRIV_REF]],
-// CHECK: [[T_VAR1_PRIV_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i32 0, i32 3
+// CHECK: [[T_VAR1_PRIV_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i64 0, i64 3
// CHECK: [[BITCAST:%.+]] = bitcast i{{[0-9]+}}* [[T_VAR1_PRIV]] to i8*
// CHECK: store i8* [[BITCAST]], i8** [[T_VAR1_PRIV_REF]],
@@ -634,38 +634,38 @@ int main() {
// }
// CHECK: define internal void [[REDUCTION_FUNC]](i8*, i8*)
// t_var_lhs = (i{{[0-9]+}}*)lhs[0];
-// CHECK: [[T_VAR_RHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_RHS:%.+]], i32 0, i32 0
+// CHECK: [[T_VAR_RHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_RHS:%.+]], i64 0, i64 0
// CHECK: [[T_VAR_RHS_VOID:%.+]] = load i8*, i8** [[T_VAR_RHS_REF]],
// CHECK: [[T_VAR_RHS:%.+]] = bitcast i8* [[T_VAR_RHS_VOID]] to i{{[0-9]+}}*
// t_var_rhs = (i{{[0-9]+}}*)rhs[0];
-// CHECK: [[T_VAR_LHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS:%.+]], i32 0, i32 0
+// CHECK: [[T_VAR_LHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS:%.+]], i64 0, i64 0
// CHECK: [[T_VAR_LHS_VOID:%.+]] = load i8*, i8** [[T_VAR_LHS_REF]],
// CHECK: [[T_VAR_LHS:%.+]] = bitcast i8* [[T_VAR_LHS_VOID]] to i{{[0-9]+}}*
// var_lhs = (S<i{{[0-9]+}}>*)lhs[1];
-// CHECK: [[VAR_RHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_RHS]], i32 0, i32 1
+// CHECK: [[VAR_RHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_RHS]], i64 0, i64 1
// CHECK: [[VAR_RHS_VOID:%.+]] = load i8*, i8** [[VAR_RHS_REF]],
// CHECK: [[VAR_RHS:%.+]] = bitcast i8* [[VAR_RHS_VOID]] to [[S_INT_TY]]*
// var_rhs = (S<i{{[0-9]+}}>*)rhs[1];
-// CHECK: [[VAR_LHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS]], i32 0, i32 1
+// CHECK: [[VAR_LHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS]], i64 0, i64 1
// CHECK: [[VAR_LHS_VOID:%.+]] = load i8*, i8** [[VAR_LHS_REF]],
// CHECK: [[VAR_LHS:%.+]] = bitcast i8* [[VAR_LHS_VOID]] to [[S_INT_TY]]*
// var1_lhs = (S<i{{[0-9]+}}>*)lhs[2];
-// CHECK: [[VAR1_RHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_RHS]], i32 0, i32 2
+// CHECK: [[VAR1_RHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_RHS]], i64 0, i64 2
// CHECK: [[VAR1_RHS_VOID:%.+]] = load i8*, i8** [[VAR1_RHS_REF]],
// CHECK: [[VAR1_RHS:%.+]] = bitcast i8* [[VAR1_RHS_VOID]] to [[S_INT_TY]]*
// var1_rhs = (S<i{{[0-9]+}}>*)rhs[2];
-// CHECK: [[VAR1_LHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS]], i32 0, i32 2
+// CHECK: [[VAR1_LHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS]], i64 0, i64 2
// CHECK: [[VAR1_LHS_VOID:%.+]] = load i8*, i8** [[VAR1_LHS_REF]],
// CHECK: [[VAR1_LHS:%.+]] = bitcast i8* [[VAR1_LHS_VOID]] to [[S_INT_TY]]*
// t_var1_lhs = (i{{[0-9]+}}*)lhs[3];
-// CHECK: [[T_VAR1_RHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_RHS]], i32 0, i32 3
+// CHECK: [[T_VAR1_RHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_RHS]], i64 0, i64 3
// CHECK: [[T_VAR1_RHS_VOID:%.+]] = load i8*, i8** [[T_VAR1_RHS_REF]],
// CHECK: [[T_VAR1_RHS:%.+]] = bitcast i8* [[T_VAR1_RHS_VOID]] to i{{[0-9]+}}*
// t_var1_rhs = (i{{[0-9]+}}*)rhs[3];
-// CHECK: [[T_VAR1_LHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS]], i32 0, i32 3
+// CHECK: [[T_VAR1_LHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS]], i64 0, i64 3
// CHECK: [[T_VAR1_LHS_VOID:%.+]] = load i8*, i8** [[T_VAR1_LHS_REF]],
// CHECK: [[T_VAR1_LHS:%.+]] = bitcast i8* [[T_VAR1_LHS_VOID]] to i{{[0-9]+}}*
diff --git a/clang/test/OpenMP/parallel_codegen.cpp b/clang/test/OpenMP/parallel_codegen.cpp
index 6486e44a9da..37d16793574 100644
--- a/clang/test/OpenMP/parallel_codegen.cpp
+++ b/clang/test/OpenMP/parallel_codegen.cpp
@@ -49,7 +49,7 @@ int main (int argc, char **argv) {
// CHECK-DEBUG-DAG: [[LOC_2_ADDR:%.+]] = alloca %ident_t
// CHECK-DEBUG: [[KMPC_LOC_VOIDPTR:%.+]] = bitcast %ident_t* [[LOC_2_ADDR]] to i8*
// CHECK-DEBUG-NEXT: [[KMPC_DEFAULT_LOC_VOIDPTR:%.+]] = bitcast %ident_t* [[DEF_LOC_2]] to i8*
-// CHECK-DEBUG-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[KMPC_LOC_VOIDPTR]], i8* [[KMPC_DEFAULT_LOC_VOIDPTR]], i64 ptrtoint (%ident_t* getelementptr (%ident_t, %ident_t* null, i32 1) to i64), i32 8, i1 false)
+// CHECK-DEBUG-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[KMPC_LOC_VOIDPTR]], i8* [[KMPC_DEFAULT_LOC_VOIDPTR]], i64 24, i32 8, i1 false)
// CHECK-DEBUG: [[ARGC_REF:%.+]] = getelementptr inbounds %struct.anon, %struct.anon* [[AGG_CAPTURED]], i32 0, i32 0
// CHECK-DEBUG-NEXT: store i32* {{%[a-z0-9.]+}}, i32** [[ARGC_REF]]
// CHECK-DEBUG-NEXT: [[KMPC_LOC_PSOURCE_REF:%.+]] = getelementptr inbounds %ident_t, %ident_t* [[LOC_2_ADDR]], i32 0, i32 4
@@ -108,7 +108,7 @@ int main (int argc, char **argv) {
// CHECK-DEBUG-DAG: [[LOC_2_ADDR:%.+]] = alloca %ident_t
// CHECK-DEBUG: [[KMPC_LOC_VOIDPTR:%.+]] = bitcast %ident_t* [[LOC_2_ADDR]] to i8*
// CHECK-DEBUG-NEXT: [[KMPC_DEFAULT_LOC_VOIDPTR:%.+]] = bitcast %ident_t* [[DEF_LOC_2]] to i8*
-// CHECK-DEBUG-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[KMPC_LOC_VOIDPTR]], i8* [[KMPC_DEFAULT_LOC_VOIDPTR]], i64 ptrtoint (%ident_t* getelementptr (%ident_t, %ident_t* null, i32 1) to i64), i32 8, i1 false)
+// CHECK-DEBUG-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[KMPC_LOC_VOIDPTR]], i8* [[KMPC_DEFAULT_LOC_VOIDPTR]], i64 24, i32 8, i1 false)
// CHECK-DEBUG: [[ARGC_REF:%.+]] = getelementptr inbounds %struct.anon.0, %struct.anon.0* [[AGG_CAPTURED]], i32 0, i32 0
// CHECK-DEBUG-NEXT: store i8*** {{%[a-z0-9.]+}}, i8**** [[ARGC_REF]]
// CHECK-DEBUG-NEXT: [[KMPC_LOC_PSOURCE_REF:%.+]] = getelementptr inbounds %ident_t, %ident_t* [[LOC_2_ADDR]], i32 0, i32 4
diff --git a/clang/test/OpenMP/parallel_reduction_codegen.cpp b/clang/test/OpenMP/parallel_reduction_codegen.cpp
index 04d19ebea8c..d04676010c7 100644
--- a/clang/test/OpenMP/parallel_reduction_codegen.cpp
+++ b/clang/test/OpenMP/parallel_reduction_codegen.cpp
@@ -74,7 +74,7 @@ int main() {
// LAMBDA: store i{{[0-9]+}}* [[G_PRIVATE_ADDR]], i{{[0-9]+}}** [[G_PRIVATE_ADDR_REF]]
// LAMBDA: call void [[INNER_LAMBDA:@.+]](%{{.+}}* [[ARG]])
- // LAMBDA: [[G_PRIV_REF:%.+]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[RED_LIST]], i32 0, i32 0
+ // LAMBDA: [[G_PRIV_REF:%.+]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[RED_LIST]], i64 0, i64 0
// LAMBDA: [[BITCAST:%.+]] = bitcast i32* [[G_PRIVATE_ADDR]] to i8*
// LAMBDA: store i8* [[BITCAST]], i8** [[G_PRIV_REF]],
// LAMBDA: call i32 @__kmpc_reduce_nowait(
@@ -136,7 +136,7 @@ int main() {
// BLOCKS-NOT: [[G]]{{[[^:word:]]}}
// BLOCKS: call void {{%.+}}(i8
- // BLOCKS: [[G_PRIV_REF:%.+]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[RED_LIST]], i32 0, i32 0
+ // BLOCKS: [[G_PRIV_REF:%.+]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[RED_LIST]], i64 0, i64 0
// BLOCKS: [[BITCAST:%.+]] = bitcast i32* [[G_PRIVATE_ADDR]] to i8*
// BLOCKS: store i8* [[BITCAST]], i8** [[G_PRIV_REF]],
// BLOCKS: call i32 @__kmpc_reduce_nowait(
@@ -226,16 +226,16 @@ int main() {
// void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]};
-// CHECK: [[T_VAR_PRIV_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i32 0, i32 0
+// CHECK: [[T_VAR_PRIV_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i64 0, i64 0
// CHECK: [[BITCAST:%.+]] = bitcast float* [[T_VAR_PRIV]] to i8*
// CHECK: store i8* [[BITCAST]], i8** [[T_VAR_PRIV_REF]],
-// CHECK: [[VAR_PRIV_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i32 0, i32 1
+// CHECK: [[VAR_PRIV_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i64 0, i64 1
// CHECK: [[BITCAST:%.+]] = bitcast [[S_FLOAT_TY]]* [[VAR_PRIV]] to i8*
// CHECK: store i8* [[BITCAST]], i8** [[VAR_PRIV_REF]],
-// CHECK: [[VAR1_PRIV_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i32 0, i32 2
+// CHECK: [[VAR1_PRIV_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i64 0, i64 2
// CHECK: [[BITCAST:%.+]] = bitcast [[S_FLOAT_TY]]* [[VAR1_PRIV]] to i8*
// CHECK: store i8* [[BITCAST]], i8** [[VAR1_PRIV_REF]],
-// CHECK: [[T_VAR1_PRIV_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i32 0, i32 3
+// CHECK: [[T_VAR1_PRIV_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i64 0, i64 3
// CHECK: [[BITCAST:%.+]] = bitcast float* [[T_VAR1_PRIV]] to i8*
// CHECK: store i8* [[BITCAST]], i8** [[T_VAR1_PRIV_REF]],
@@ -373,38 +373,38 @@ int main() {
// }
// CHECK: define internal void [[REDUCTION_FUNC]](i8*, i8*)
// t_var_lhs = (float*)lhs[0];
-// CHECK: [[T_VAR_RHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_RHS:%.+]], i32 0, i32 0
+// CHECK: [[T_VAR_RHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_RHS:%.+]], i64 0, i64 0
// CHECK: [[T_VAR_RHS_VOID:%.+]] = load i8*, i8** [[T_VAR_RHS_REF]],
// CHECK: [[T_VAR_RHS:%.+]] = bitcast i8* [[T_VAR_RHS_VOID]] to float*
// t_var_rhs = (float*)rhs[0];
-// CHECK: [[T_VAR_LHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS:%.+]], i32 0, i32 0
+// CHECK: [[T_VAR_LHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS:%.+]], i64 0, i64 0
// CHECK: [[T_VAR_LHS_VOID:%.+]] = load i8*, i8** [[T_VAR_LHS_REF]],
// CHECK: [[T_VAR_LHS:%.+]] = bitcast i8* [[T_VAR_LHS_VOID]] to float*
// var_lhs = (S<float>*)lhs[1];
-// CHECK: [[VAR_RHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_RHS]], i32 0, i32 1
+// CHECK: [[VAR_RHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_RHS]], i64 0, i64 1
// CHECK: [[VAR_RHS_VOID:%.+]] = load i8*, i8** [[VAR_RHS_REF]],
// CHECK: [[VAR_RHS:%.+]] = bitcast i8* [[VAR_RHS_VOID]] to [[S_FLOAT_TY]]*
// var_rhs = (S<float>*)rhs[1];
-// CHECK: [[VAR_LHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS]], i32 0, i32 1
+// CHECK: [[VAR_LHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS]], i64 0, i64 1
// CHECK: [[VAR_LHS_VOID:%.+]] = load i8*, i8** [[VAR_LHS_REF]],
// CHECK: [[VAR_LHS:%.+]] = bitcast i8* [[VAR_LHS_VOID]] to [[S_FLOAT_TY]]*
// var1_lhs = (S<float>*)lhs[2];
-// CHECK: [[VAR1_RHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_RHS]], i32 0, i32 2
+// CHECK: [[VAR1_RHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_RHS]], i64 0, i64 2
// CHECK: [[VAR1_RHS_VOID:%.+]] = load i8*, i8** [[VAR1_RHS_REF]],
// CHECK: [[VAR1_RHS:%.+]] = bitcast i8* [[VAR1_RHS_VOID]] to [[S_FLOAT_TY]]*
// var1_rhs = (S<float>*)rhs[2];
-// CHECK: [[VAR1_LHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS]], i32 0, i32 2
+// CHECK: [[VAR1_LHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS]], i64 0, i64 2
// CHECK: [[VAR1_LHS_VOID:%.+]] = load i8*, i8** [[VAR1_LHS_REF]],
// CHECK: [[VAR1_LHS:%.+]] = bitcast i8* [[VAR1_LHS_VOID]] to [[S_FLOAT_TY]]*
// t_var1_lhs = (float*)lhs[3];
-// CHECK: [[T_VAR1_RHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_RHS]], i32 0, i32 3
+// CHECK: [[T_VAR1_RHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_RHS]], i64 0, i64 3
// CHECK: [[T_VAR1_RHS_VOID:%.+]] = load i8*, i8** [[T_VAR1_RHS_REF]],
// CHECK: [[T_VAR1_RHS:%.+]] = bitcast i8* [[T_VAR1_RHS_VOID]] to float*
// t_var1_rhs = (float*)rhs[3];
-// CHECK: [[T_VAR1_LHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS]], i32 0, i32 3
+// CHECK: [[T_VAR1_LHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS]], i64 0, i64 3
// CHECK: [[T_VAR1_LHS_VOID:%.+]] = load i8*, i8** [[T_VAR1_LHS_REF]],
// CHECK: [[T_VAR1_LHS:%.+]] = bitcast i8* [[T_VAR1_LHS_VOID]] to float*
@@ -488,16 +488,16 @@ int main() {
// void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]};
-// CHECK: [[T_VAR_PRIV_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i32 0, i32 0
+// CHECK: [[T_VAR_PRIV_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i64 0, i64 0
// CHECK: [[BITCAST:%.+]] = bitcast i{{[0-9]+}}* [[T_VAR_PRIV]] to i8*
// CHECK: store i8* [[BITCAST]], i8** [[T_VAR_PRIV_REF]],
-// CHECK: [[VAR_PRIV_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i32 0, i32 1
+// CHECK: [[VAR_PRIV_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i64 0, i64 1
// CHECK: [[BITCAST:%.+]] = bitcast [[S_INT_TY]]* [[VAR_PRIV]] to i8*
// CHECK: store i8* [[BITCAST]], i8** [[VAR_PRIV_REF]],
-// CHECK: [[VAR1_PRIV_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i32 0, i32 2
+// CHECK: [[VAR1_PRIV_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i64 0, i64 2
// CHECK: [[BITCAST:%.+]] = bitcast [[S_INT_TY]]* [[VAR1_PRIV]] to i8*
// CHECK: store i8* [[BITCAST]], i8** [[VAR1_PRIV_REF]],
-// CHECK: [[T_VAR1_PRIV_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i32 0, i32 3
+// CHECK: [[T_VAR1_PRIV_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i64 0, i64 3
// CHECK: [[BITCAST:%.+]] = bitcast i{{[0-9]+}}* [[T_VAR1_PRIV]] to i8*
// CHECK: store i8* [[BITCAST]], i8** [[T_VAR1_PRIV_REF]],
@@ -608,38 +608,38 @@ int main() {
// }
// CHECK: define internal void [[REDUCTION_FUNC]](i8*, i8*)
// t_var_lhs = (i{{[0-9]+}}*)lhs[0];
-// CHECK: [[T_VAR_RHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_RHS:%.+]], i32 0, i32 0
+// CHECK: [[T_VAR_RHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_RHS:%.+]], i64 0, i64 0
// CHECK: [[T_VAR_RHS_VOID:%.+]] = load i8*, i8** [[T_VAR_RHS_REF]],
// CHECK: [[T_VAR_RHS:%.+]] = bitcast i8* [[T_VAR_RHS_VOID]] to i{{[0-9]+}}*
// t_var_rhs = (i{{[0-9]+}}*)rhs[0];
-// CHECK: [[T_VAR_LHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS:%.+]], i32 0, i32 0
+// CHECK: [[T_VAR_LHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS:%.+]], i64 0, i64 0
// CHECK: [[T_VAR_LHS_VOID:%.+]] = load i8*, i8** [[T_VAR_LHS_REF]],
// CHECK: [[T_VAR_LHS:%.+]] = bitcast i8* [[T_VAR_LHS_VOID]] to i{{[0-9]+}}*
// var_lhs = (S<i{{[0-9]+}}>*)lhs[1];
-// CHECK: [[VAR_RHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_RHS]], i32 0, i32 1
+// CHECK: [[VAR_RHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_RHS]], i64 0, i64 1
// CHECK: [[VAR_RHS_VOID:%.+]] = load i8*, i8** [[VAR_RHS_REF]],
// CHECK: [[VAR_RHS:%.+]] = bitcast i8* [[VAR_RHS_VOID]] to [[S_INT_TY]]*
// var_rhs = (S<i{{[0-9]+}}>*)rhs[1];
-// CHECK: [[VAR_LHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS]], i32 0, i32 1
+// CHECK: [[VAR_LHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS]], i64 0, i64 1
// CHECK: [[VAR_LHS_VOID:%.+]] = load i8*, i8** [[VAR_LHS_REF]],
// CHECK: [[VAR_LHS:%.+]] = bitcast i8* [[VAR_LHS_VOID]] to [[S_INT_TY]]*
// var1_lhs = (S<i{{[0-9]+}}>*)lhs[2];
-// CHECK: [[VAR1_RHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_RHS]], i32 0, i32 2
+// CHECK: [[VAR1_RHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_RHS]], i64 0, i64 2
// CHECK: [[VAR1_RHS_VOID:%.+]] = load i8*, i8** [[VAR1_RHS_REF]],
// CHECK: [[VAR1_RHS:%.+]] = bitcast i8* [[VAR1_RHS_VOID]] to [[S_INT_TY]]*
// var1_rhs = (S<i{{[0-9]+}}>*)rhs[2];
-// CHECK: [[VAR1_LHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS]], i32 0, i32 2
+// CHECK: [[VAR1_LHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS]], i64 0, i64 2
// CHECK: [[VAR1_LHS_VOID:%.+]] = load i8*, i8** [[VAR1_LHS_REF]],
// CHECK: [[VAR1_LHS:%.+]] = bitcast i8* [[VAR1_LHS_VOID]] to [[S_INT_TY]]*
// t_var1_lhs = (i{{[0-9]+}}*)lhs[3];
-// CHECK: [[T_VAR1_RHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_RHS]], i32 0, i32 3
+// CHECK: [[T_VAR1_RHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_RHS]], i64 0, i64 3
// CHECK: [[T_VAR1_RHS_VOID:%.+]] = load i8*, i8** [[T_VAR1_RHS_REF]],
// CHECK: [[T_VAR1_RHS:%.+]] = bitcast i8* [[T_VAR1_RHS_VOID]] to i{{[0-9]+}}*
// t_var1_rhs = (i{{[0-9]+}}*)rhs[3];
-// CHECK: [[T_VAR1_LHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS]], i32 0, i32 3
+// CHECK: [[T_VAR1_LHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS]], i64 0, i64 3
// CHECK: [[T_VAR1_LHS_VOID:%.+]] = load i8*, i8** [[T_VAR1_LHS_REF]],
// CHECK: [[T_VAR1_LHS:%.+]] = bitcast i8* [[T_VAR1_LHS_VOID]] to i{{[0-9]+}}*
diff --git a/clang/test/OpenMP/sections_reduction_codegen.cpp b/clang/test/OpenMP/sections_reduction_codegen.cpp
index 4d404dbcc6d..3c2525790ba 100644
--- a/clang/test/OpenMP/sections_reduction_codegen.cpp
+++ b/clang/test/OpenMP/sections_reduction_codegen.cpp
@@ -74,7 +74,7 @@ int main() {
// LAMBDA: call void [[INNER_LAMBDA:@.+]](%{{.+}}* [[ARG]])
// LAMBDA: call void @__kmpc_for_static_fini(
- // LAMBDA: [[G_PRIV_REF:%.+]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[RED_LIST]], i32 0, i32 0
+ // LAMBDA: [[G_PRIV_REF:%.+]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[RED_LIST]], i64 0, i64 0
// LAMBDA: [[BITCAST:%.+]] = bitcast double* [[G_PRIVATE_ADDR]] to i8*
// LAMBDA: store i8* [[BITCAST]], i8** [[G_PRIV_REF]],
// LAMBDA: call i32 @__kmpc_reduce(
@@ -135,7 +135,7 @@ int main() {
// BLOCKS: call void {{%.+}}(i8
// BLOCKS: call void @__kmpc_for_static_fini(
- // BLOCKS: [[G_PRIV_REF:%.+]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[RED_LIST]], i32 0, i32 0
+ // BLOCKS: [[G_PRIV_REF:%.+]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[RED_LIST]], i64 0, i64 0
// BLOCKS: [[BITCAST:%.+]] = bitcast double* [[G_PRIVATE_ADDR]] to i8*
// BLOCKS: store i8* [[BITCAST]], i8** [[G_PRIV_REF]],
// BLOCKS: call i32 @__kmpc_reduce(
@@ -277,16 +277,16 @@ int main() {
// void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]};
-// CHECK: [[T_VAR_PRIV_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i32 0, i32 0
+// CHECK: [[T_VAR_PRIV_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i64 0, i64 0
// CHECK: [[BITCAST:%.+]] = bitcast i{{[0-9]+}}* [[T_VAR_PRIV]] to i8*
// CHECK: store i8* [[BITCAST]], i8** [[T_VAR_PRIV_REF]],
-// CHECK: [[VAR_PRIV_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i32 0, i32 1
+// CHECK: [[VAR_PRIV_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i64 0, i64 1
// CHECK: [[BITCAST:%.+]] = bitcast [[S_INT_TY]]* [[VAR_PRIV]] to i8*
// CHECK: store i8* [[BITCAST]], i8** [[VAR_PRIV_REF]],
-// CHECK: [[VAR1_PRIV_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i32 0, i32 2
+// CHECK: [[VAR1_PRIV_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i64 0, i64 2
// CHECK: [[BITCAST:%.+]] = bitcast [[S_INT_TY]]* [[VAR1_PRIV]] to i8*
// CHECK: store i8* [[BITCAST]], i8** [[VAR1_PRIV_REF]],
-// CHECK: [[T_VAR1_PRIV_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i32 0, i32 3
+// CHECK: [[T_VAR1_PRIV_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i64 0, i64 3
// CHECK: [[BITCAST:%.+]] = bitcast i{{[0-9]+}}* [[T_VAR1_PRIV]] to i8*
// CHECK: store i8* [[BITCAST]], i8** [[T_VAR1_PRIV_REF]],
@@ -395,38 +395,38 @@ int main() {
// }
// CHECK: define internal void [[REDUCTION_FUNC]](i8*, i8*)
// t_var_lhs = (i{{[0-9]+}}*)lhs[0];
-// CHECK: [[T_VAR_RHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_RHS:%.+]], i32 0, i32 0
+// CHECK: [[T_VAR_RHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_RHS:%.+]], i64 0, i64 0
// CHECK: [[T_VAR_RHS_VOID:%.+]] = load i8*, i8** [[T_VAR_RHS_REF]],
// CHECK: [[T_VAR_RHS:%.+]] = bitcast i8* [[T_VAR_RHS_VOID]] to i{{[0-9]+}}*
// t_var_rhs = (i{{[0-9]+}}*)rhs[0];
-// CHECK: [[T_VAR_LHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS:%.+]], i32 0, i32 0
+// CHECK: [[T_VAR_LHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS:%.+]], i64 0, i64 0
// CHECK: [[T_VAR_LHS_VOID:%.+]] = load i8*, i8** [[T_VAR_LHS_REF]],
// CHECK: [[T_VAR_LHS:%.+]] = bitcast i8* [[T_VAR_LHS_VOID]] to i{{[0-9]+}}*
// var_lhs = (S<i{{[0-9]+}}>*)lhs[1];
-// CHECK: [[VAR_RHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_RHS]], i32 0, i32 1
+// CHECK: [[VAR_RHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_RHS]], i64 0, i64 1
// CHECK: [[VAR_RHS_VOID:%.+]] = load i8*, i8** [[VAR_RHS_REF]],
// CHECK: [[VAR_RHS:%.+]] = bitcast i8* [[VAR_RHS_VOID]] to [[S_INT_TY]]*
// var_rhs = (S<i{{[0-9]+}}>*)rhs[1];
-// CHECK: [[VAR_LHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS]], i32 0, i32 1
+// CHECK: [[VAR_LHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS]], i64 0, i64 1
// CHECK: [[VAR_LHS_VOID:%.+]] = load i8*, i8** [[VAR_LHS_REF]],
// CHECK: [[VAR_LHS:%.+]] = bitcast i8* [[VAR_LHS_VOID]] to [[S_INT_TY]]*
// var1_lhs = (S<i{{[0-9]+}}>*)lhs[2];
-// CHECK: [[VAR1_RHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_RHS]], i32 0, i32 2
+// CHECK: [[VAR1_RHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_RHS]], i64 0, i64 2
// CHECK: [[VAR1_RHS_VOID:%.+]] = load i8*, i8** [[VAR1_RHS_REF]],
// CHECK: [[VAR1_RHS:%.+]] = bitcast i8* [[VAR1_RHS_VOID]] to [[S_INT_TY]]*
// var1_rhs = (S<i{{[0-9]+}}>*)rhs[2];
-// CHECK: [[VAR1_LHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS]], i32 0, i32 2
+// CHECK: [[VAR1_LHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS]], i64 0, i64 2
// CHECK: [[VAR1_LHS_VOID:%.+]] = load i8*, i8** [[VAR1_LHS_REF]],
// CHECK: [[VAR1_LHS:%.+]] = bitcast i8* [[VAR1_LHS_VOID]] to [[S_INT_TY]]*
// t_var1_lhs = (i{{[0-9]+}}*)lhs[3];
-// CHECK: [[T_VAR1_RHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_RHS]], i32 0, i32 3
+// CHECK: [[T_VAR1_RHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_RHS]], i64 0, i64 3
// CHECK: [[T_VAR1_RHS_VOID:%.+]] = load i8*, i8** [[T_VAR1_RHS_REF]],
// CHECK: [[T_VAR1_RHS:%.+]] = bitcast i8* [[T_VAR1_RHS_VOID]] to i{{[0-9]+}}*
// t_var1_rhs = (i{{[0-9]+}}*)rhs[3];
-// CHECK: [[T_VAR1_LHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS]], i32 0, i32 3
+// CHECK: [[T_VAR1_LHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS]], i64 0, i64 3
// CHECK: [[T_VAR1_LHS_VOID:%.+]] = load i8*, i8** [[T_VAR1_LHS_REF]],
// CHECK: [[T_VAR1_LHS:%.+]] = bitcast i8* [[T_VAR1_LHS_VOID]] to i{{[0-9]+}}*
diff --git a/clang/test/OpenMP/task_codegen.cpp b/clang/test/OpenMP/task_codegen.cpp
index 16a97d2ed77..433578d681f 100644
--- a/clang/test/OpenMP/task_codegen.cpp
+++ b/clang/test/OpenMP/task_codegen.cpp
@@ -53,29 +53,29 @@ int main() {
// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[SHAREDS_REF]], i8* [[BITCAST]], i64 8, i32 8, i1 false)
// CHECK: [[DESTRUCTORS_REF_PTR:%.+]] = getelementptr inbounds [[KMP_TASK_T]], [[KMP_TASK_T]]* [[TASK_PTR]], i32 0, i32 3
// CHECK: store i32 (i32, i8*)* null, i32 (i32, i8*)** [[DESTRUCTORS_REF_PTR]]
-// CHECK: getelementptr inbounds [4 x [[KMP_DEPEND_INFO]]], [4 x [[KMP_DEPEND_INFO]]]* %{{[^,]+}}, i32 0, i32 0
-// CHECK: getelementptr inbounds [[KMP_DEPEND_INFO]], [[KMP_DEPEND_INFO]]* %{{[^,]+}}, i32 0, i32 0
-// CHECK: store i64 ptrtoint (i32* @{{.+}} to i64), i64*
-// CHECK: getelementptr inbounds [[KMP_DEPEND_INFO]], [[KMP_DEPEND_INFO]]* %{{[^,]+}}, i32 0, i32 1
-// CHECK: store i64 4, i64*
-// CHECK: getelementptr inbounds [[KMP_DEPEND_INFO]], [[KMP_DEPEND_INFO]]* %{{[^,]+}}, i32 0, i32 2
-// CHECK: store i8 1, i8*
-// CHECK: getelementptr inbounds [4 x [[KMP_DEPEND_INFO]]], [4 x [[KMP_DEPEND_INFO]]]* %{{[^,]+}}, i32 0, i32 1
-// CHECK: getelementptr inbounds [[KMP_DEPEND_INFO]], [[KMP_DEPEND_INFO]]* %{{[^,]+}}, i32 0, i32 0
+// CHECK: [[DEP:%.*]] = getelementptr inbounds [4 x [[KMP_DEPEND_INFO]]], [4 x [[KMP_DEPEND_INFO]]]* [[DEPENDENCIES:%.*]], i64 0, i64 0
+// CHECK: [[T0:%.*]] = getelementptr inbounds [[KMP_DEPEND_INFO]], [[KMP_DEPEND_INFO]]* [[DEP]], i32 0, i32 0
+// CHECK: store i64 ptrtoint (i32* @{{.+}} to i64), i64* [[T0]]
+// CHECK: [[T0:%.*]] = getelementptr inbounds [[KMP_DEPEND_INFO]], [[KMP_DEPEND_INFO]]* [[DEP]], i32 0, i32 1
+// CHECK: store i64 4, i64* [[T0]]
+// CHECK: [[T0:%.*]] = getelementptr inbounds [[KMP_DEPEND_INFO]], [[KMP_DEPEND_INFO]]* [[DEP]], i32 0, i32 2
+// CHECK: store i8 1, i8* [[T0]]
+// CHECK: [[DEP:%.*]] = getelementptr inbounds [4 x [[KMP_DEPEND_INFO]]], [4 x [[KMP_DEPEND_INFO]]]* [[DEPENDENCIES]], i64 0, i64 1
+// CHECK: [[T0:%.*]] = getelementptr inbounds [[KMP_DEPEND_INFO]], [[KMP_DEPEND_INFO]]* [[DEP]], i32 0, i32 0
// CHECK: ptrtoint i8* [[B]] to i64
-// CHECK: store i64 %{{[^,]+}}, i64*
-// CHECK: getelementptr inbounds [[KMP_DEPEND_INFO]], [[KMP_DEPEND_INFO]]* %{{[^,]+}}, i32 0, i32 1
-// CHECK: store i64 1, i64*
-// CHECK: getelementptr inbounds [[KMP_DEPEND_INFO]], [[KMP_DEPEND_INFO]]* %{{[^,]+}}, i32 0, i32 2
-// CHECK: store i8 1, i8*
-// CHECK: getelementptr inbounds [4 x [[KMP_DEPEND_INFO]]], [4 x [[KMP_DEPEND_INFO]]]* %{{[^,]+}}, i32 0, i32 2
-// CHECK: getelementptr inbounds [[KMP_DEPEND_INFO]], [[KMP_DEPEND_INFO]]* %{{[^,]+}}, i32 0, i32 0
+// CHECK: store i64 %{{[^,]+}}, i64* [[T0]]
+// CHECK: [[T0:%.*]] = getelementptr inbounds [[KMP_DEPEND_INFO]], [[KMP_DEPEND_INFO]]* [[DEP]], i32 0, i32 1
+// CHECK: store i64 1, i64* [[T0]]
+// CHECK: [[T0:%.*]] = getelementptr inbounds [[KMP_DEPEND_INFO]], [[KMP_DEPEND_INFO]]* [[DEP]], i32 0, i32 2
+// CHECK: store i8 1, i8* [[T0]]
+// CHECK: [[DEP:%.*]] = getelementptr inbounds [4 x [[KMP_DEPEND_INFO]]], [4 x [[KMP_DEPEND_INFO]]]* [[DEPENDENCIES]], i64 0, i64 2
+// CHECK: [[T0:%.*]] = getelementptr inbounds [[KMP_DEPEND_INFO]], [[KMP_DEPEND_INFO]]* [[DEP]], i32 0, i32 0
// CHECK: ptrtoint [2 x [[STRUCT_S]]]* [[S]] to i64
-// CHECK: store i64 %{{[^,]+}}, i64*
-// CHECK: getelementptr inbounds [[KMP_DEPEND_INFO]], [[KMP_DEPEND_INFO]]* %{{[^,]+}}, i32 0, i32 1
-// CHECK: store i64 8, i64*
-// CHECK: getelementptr inbounds [[KMP_DEPEND_INFO]], [[KMP_DEPEND_INFO]]* %{{[^,]+}}, i32 0, i32 2
-// CHECK: store i8 1, i8*
+// CHECK: store i64 %{{[^,]+}}, i64* [[T0]]
+// CHECK: [[T0:%.*]] = getelementptr inbounds [[KMP_DEPEND_INFO]], [[KMP_DEPEND_INFO]]* [[DEP]], i32 0, i32 1
+// CHECK: store i64 8, i64* [[T0]]
+// CHECK: [[T0:%.*]] = getelementptr inbounds [[KMP_DEPEND_INFO]], [[KMP_DEPEND_INFO]]* [[DEP]], i32 0, i32 2
+// CHECK: store i8 1, i8* [[T0]]
// CHECK: [[IDX1:%.+]] = mul nsw i64 0, [[A_VAL:%.+]]
// CHECK: [[START:%.+]] = getelementptr inbounds i32, i32* %{{.+}}, i64 [[IDX1]]
// CHECK: [[IDX1:%.+]] = mul nsw i64 9, [[A_VAL]]
@@ -84,16 +84,16 @@ int main() {
// CHECK: [[START_INT:%.+]] = ptrtoint i32* [[START]] to i64
// CHECK: [[END_INT:%.+]] = ptrtoint i32* [[END1]] to i64
// CHECK: [[SIZEOF:%.+]] = sub nuw i64 [[END_INT]], [[START_INT]]
-// CHECK: getelementptr inbounds [4 x [[KMP_DEPEND_INFO]]], [4 x [[KMP_DEPEND_INFO]]]* %{{[^,]+}}, i32 0, i32 3
-// CHECK: getelementptr inbounds [[KMP_DEPEND_INFO]], [[KMP_DEPEND_INFO]]* %{{[^,]+}}, i32 0, i32 0
-// CHECK: ptrtoint i32* [[START]] to i64
-// CHECK: store i64 %{{[^,]+}}, i64*
-// CHECK: getelementptr inbounds [[KMP_DEPEND_INFO]], [[KMP_DEPEND_INFO]]* %{{[^,]+}}, i32 0, i32 1
-// CHECK: store i64 [[SIZEOF]], i64*
-// CHECK: getelementptr inbounds [[KMP_DEPEND_INFO]], [[KMP_DEPEND_INFO]]* %{{[^,]+}}, i32 0, i32 2
-// CHECK: store i8 1, i8*
-// CHECK: getelementptr inbounds [4 x [[KMP_DEPEND_INFO]]], [4 x [[KMP_DEPEND_INFO]]]* %{{[^,]+}}, i32 0, i32 0
-// CHECK: bitcast [[KMP_DEPEND_INFO]]* %{{.+}} to i8*
+// CHECK: [[DEP:%.*]] = getelementptr inbounds [4 x [[KMP_DEPEND_INFO]]], [4 x [[KMP_DEPEND_INFO]]]* %{{[^,]+}}, i64 0, i64 3
+// CHECK: [[T0:%.*]] = getelementptr inbounds [[KMP_DEPEND_INFO]], [[KMP_DEPEND_INFO]]* [[DEP]], i32 0, i32 0
+// CHECK: [[T1:%.*]] = ptrtoint i32* [[START]] to i64
+// CHECK: store i64 [[T1]], i64* [[T0]]
+// CHECK: [[T0:%.*]] = getelementptr inbounds [[KMP_DEPEND_INFO]], [[KMP_DEPEND_INFO]]* %{{[^,]+}}, i32 0, i32 1
+// CHECK: store i64 [[SIZEOF]], i64* [[T0]]
+// CHECK: [[T0:%.*]] = getelementptr inbounds [[KMP_DEPEND_INFO]], [[KMP_DEPEND_INFO]]* %{{[^,]+}}, i32 0, i32 2
+// CHECK: store i8 1, i8* [[T0]]
+// CHECK: [[DEPS:%.*]] = getelementptr inbounds [4 x [[KMP_DEPEND_INFO]]], [4 x [[KMP_DEPEND_INFO]]]* [[DEPENDENCIES]], i32 0, i32 0
+// CHECK: bitcast [[KMP_DEPEND_INFO]]* [[DEPS]] to i8*
// CHECK: call i32 @__kmpc_omp_task_with_deps([[IDENT_T]]* @{{.+}}, i32 [[GTID]], i8* [[ORIG_TASK_PTR]], i32 4, i8* %{{[^,]+}}, i32 0, i8* null)
#pragma omp task shared(a, s) depend(in : a, b, s, arr[:])
{
@@ -111,8 +111,8 @@ int main() {
// CHECK: [[ORIG_TASK_PTR:%.+]] = call i8* @__kmpc_omp_task_alloc([[IDENT_T]]* @{{.+}}, i32 [[GTID]], i32 0, i64 32, i64 1,
// CHECK: [[DESTRUCTORS_REF_PTR:%.+]] = getelementptr inbounds [[KMP_TASK_T]]{{.*}}* {{%.+}}, i32 0, i32 3
// CHECK: store i32 (i32, i8*)* null, i32 (i32, i8*)** [[DESTRUCTORS_REF_PTR]]
-// CHECK: getelementptr inbounds [2 x [[STRUCT_S]]], [2 x [[STRUCT_S]]]* [[S]], i32 0, i64 0
-// CHECK: getelementptr inbounds [2 x [[KMP_DEPEND_INFO]]], [2 x [[KMP_DEPEND_INFO]]]* %{{[^,]+}}, i32 0, i32 0
+// CHECK: getelementptr inbounds [2 x [[STRUCT_S]]], [2 x [[STRUCT_S]]]* [[S]], i64 0, i64 0
+// CHECK: getelementptr inbounds [2 x [[KMP_DEPEND_INFO]]], [2 x [[KMP_DEPEND_INFO]]]* %{{[^,]+}}, i64 0, i64 0
// CHECK: getelementptr inbounds [[KMP_DEPEND_INFO]], [[KMP_DEPEND_INFO]]* %{{[^,]+}}, i32 0, i32 0
// CHECK: ptrtoint [[STRUCT_S]]* %{{.+}} to i64
// CHECK: store i64 %{{[^,]+}}, i64*
@@ -134,7 +134,7 @@ int main() {
// CHECK: [[START_INT:%.+]] = ptrtoint i32* [[START1]] to i64
// CHECK: [[END_INT:%.+]] = ptrtoint i32* [[END2]] to i64
// CHECK: [[SIZEOF:%.+]] = sub nuw i64 [[END_INT]], [[START_INT]]
-// CHECK: getelementptr inbounds [2 x [[KMP_DEPEND_INFO]]], [2 x [[KMP_DEPEND_INFO]]]* %{{[^,]+}}, i32 0, i32 1
+// CHECK: getelementptr inbounds [2 x [[KMP_DEPEND_INFO]]], [2 x [[KMP_DEPEND_INFO]]]* %{{[^,]+}}, i64 0, i64 1
// CHECK: getelementptr inbounds [[KMP_DEPEND_INFO]], [[KMP_DEPEND_INFO]]* %{{[^,]+}}, i32 0, i32 0
// CHECK: ptrtoint i32* [[START1]] to i64
// CHECK: store i64 %{{[^,]+}}, i64*
@@ -152,15 +152,15 @@ int main() {
// CHECK: [[ORIG_TASK_PTR:%.+]] = call i8* @__kmpc_omp_task_alloc([[IDENT_T]]* @{{.+}}, i32 [[GTID]], i32 3, i64 32, i64 1,
// CHECK: [[DESTRUCTORS_REF_PTR:%.+]] = getelementptr inbounds [[KMP_TASK_T]]{{.*}}* {{%.+}}, i32 0, i32 3
// CHECK: store i32 (i32, i8*)* null, i32 (i32, i8*)** [[DESTRUCTORS_REF_PTR]]
-// CHECK: getelementptr inbounds [3 x [[KMP_DEPEND_INFO]]], [3 x [[KMP_DEPEND_INFO]]]* %{{[^,]+}}, i32 0, i32 0
+// CHECK: getelementptr inbounds [3 x [[KMP_DEPEND_INFO]]], [3 x [[KMP_DEPEND_INFO]]]* %{{[^,]+}}, i64 0, i64 0
// CHECK: getelementptr inbounds [[KMP_DEPEND_INFO]], [[KMP_DEPEND_INFO]]* %{{[^,]+}}, i32 0, i32 0
// CHECK: store i64 ptrtoint (i32* @{{.+}} to i64), i64*
// CHECK: getelementptr inbounds [[KMP_DEPEND_INFO]], [[KMP_DEPEND_INFO]]* %{{[^,]+}}, i32 0, i32 1
// CHECK: store i64 4, i64*
// CHECK: getelementptr inbounds [[KMP_DEPEND_INFO]], [[KMP_DEPEND_INFO]]* %{{[^,]+}}, i32 0, i32 2
// CHECK: store i8 3, i8*
-// CHECK: getelementptr inbounds [2 x [[STRUCT_S]]], [2 x [[STRUCT_S]]]* [[S]], i32 0, i64 1
-// CHECK: getelementptr inbounds [3 x [[KMP_DEPEND_INFO]]], [3 x [[KMP_DEPEND_INFO]]]* %{{[^,]+}}, i32 0, i32 1
+// CHECK: getelementptr inbounds [2 x [[STRUCT_S]]], [2 x [[STRUCT_S]]]* [[S]], i64 0, i64 1
+// CHECK: getelementptr inbounds [3 x [[KMP_DEPEND_INFO]]], [3 x [[KMP_DEPEND_INFO]]]* %{{[^,]+}}, i64 0, i64 1
// CHECK: getelementptr inbounds [[KMP_DEPEND_INFO]], [[KMP_DEPEND_INFO]]* %{{[^,]+}}, i32 0, i32 0
// CHECK: ptrtoint [[STRUCT_S]]* %{{.+}} to i64
// CHECK: store i64 %{{[^,]+}}, i64*
@@ -184,7 +184,7 @@ int main() {
// CHECK: [[START_INT:%.+]] = ptrtoint i32* [[START1]] to i64
// CHECK: [[END_INT:%.+]] = ptrtoint i32* [[END2]] to i64
// CHECK: [[SIZEOF:%.+]] = sub nuw i64 [[END_INT]], [[START_INT]]
-// CHECK: getelementptr inbounds [3 x [[KMP_DEPEND_INFO]]], [3 x [[KMP_DEPEND_INFO]]]* %{{[^,]+}}, i32 0, i32 2
+// CHECK: getelementptr inbounds [3 x [[KMP_DEPEND_INFO]]], [3 x [[KMP_DEPEND_INFO]]]* %{{[^,]+}}, i64 0, i64 2
// CHECK: getelementptr inbounds [[KMP_DEPEND_INFO]], [[KMP_DEPEND_INFO]]* %{{[^,]+}}, i32 0, i32 0
// CHECK: ptrtoint i32* [[START1]] to i64
// CHECK: store i64 %{{[^,]+}}, i64*
OpenPOWER on IntegriCloud