summaryrefslogtreecommitdiffstats
path: root/clang/lib/CodeGen/CGAtomic.cpp
diff options
context:
space:
mode:
authorJohn McCall <rjmccall@apple.com>2013-03-07 21:37:17 +0000
committerJohn McCall <rjmccall@apple.com>2013-03-07 21:37:17 +0000
commita8ec7eb9cfe2adeb1b5d03a94f6cee70972e47b7 (patch)
tree751e0d6a8bad1e2e901a95c64693639b1157eb34 /clang/lib/CodeGen/CGAtomic.cpp
parentfc207f2d70baeadfe191283bf035c56876f6c9e8 (diff)
downloadbcm5719-llvm-a8ec7eb9cfe2adeb1b5d03a94f6cee70972e47b7.tar.gz
bcm5719-llvm-a8ec7eb9cfe2adeb1b5d03a94f6cee70972e47b7.zip
Promote atomic type sizes up to a power of two, capped by
MaxAtomicPromoteWidth. Fix a ton of terrible bugs with _Atomic types and (non-intrinsic-mediated) loads and stores thereto. llvm-svn: 176658
Diffstat (limited to 'clang/lib/CodeGen/CGAtomic.cpp')
-rw-r--r--clang/lib/CodeGen/CGAtomic.cpp505
1 files changed, 481 insertions, 24 deletions
diff --git a/clang/lib/CodeGen/CGAtomic.cpp b/clang/lib/CodeGen/CGAtomic.cpp
index f17e48d2f2a..817d5c4cc68 100644
--- a/clang/lib/CodeGen/CGAtomic.cpp
+++ b/clang/lib/CodeGen/CGAtomic.cpp
@@ -17,10 +17,169 @@
#include "clang/AST/ASTContext.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/Operator.h"
using namespace clang;
using namespace CodeGen;
+// The ABI values for various atomic memory orderings.
+enum AtomicOrderingKind {
+ AO_ABI_memory_order_relaxed = 0,
+ AO_ABI_memory_order_consume = 1,
+ AO_ABI_memory_order_acquire = 2,
+ AO_ABI_memory_order_release = 3,
+ AO_ABI_memory_order_acq_rel = 4,
+ AO_ABI_memory_order_seq_cst = 5
+};
+
+namespace {
+ class AtomicInfo {
+ CodeGenFunction &CGF;
+ QualType AtomicTy;
+ QualType ValueTy;
+ uint64_t AtomicSizeInBits;
+ uint64_t ValueSizeInBits;
+ CharUnits AtomicAlign;
+ CharUnits ValueAlign;
+ CharUnits LValueAlign;
+ TypeEvaluationKind EvaluationKind;
+ bool UseLibcall;
+ public:
+ AtomicInfo(CodeGenFunction &CGF, LValue &lvalue) : CGF(CGF) {
+ assert(lvalue.isSimple());
+
+ AtomicTy = lvalue.getType();
+ ValueTy = AtomicTy->castAs<AtomicType>()->getValueType();
+ EvaluationKind = CGF.getEvaluationKind(ValueTy);
+
+ ASTContext &C = CGF.getContext();
+
+ uint64_t valueAlignInBits;
+ llvm::tie(ValueSizeInBits, valueAlignInBits) = C.getTypeInfo(ValueTy);
+
+ uint64_t atomicAlignInBits;
+ llvm::tie(AtomicSizeInBits, atomicAlignInBits) = C.getTypeInfo(AtomicTy);
+
+ assert(ValueSizeInBits <= AtomicSizeInBits);
+ assert(valueAlignInBits <= atomicAlignInBits);
+
+ AtomicAlign = C.toCharUnitsFromBits(atomicAlignInBits);
+ ValueAlign = C.toCharUnitsFromBits(valueAlignInBits);
+ if (lvalue.getAlignment().isZero())
+ lvalue.setAlignment(AtomicAlign);
+
+ UseLibcall =
+ (AtomicSizeInBits > uint64_t(C.toBits(lvalue.getAlignment())) ||
+ AtomicSizeInBits > C.getTargetInfo().getMaxAtomicInlineWidth());
+ }
+
+ QualType getAtomicType() const { return AtomicTy; }
+ QualType getValueType() const { return ValueTy; }
+ CharUnits getAtomicAlignment() const { return AtomicAlign; }
+ CharUnits getValueAlignment() const { return ValueAlign; }
+ uint64_t getAtomicSizeInBits() const { return AtomicSizeInBits; }
+ uint64_t getValueSizeInBits() const { return AtomicSizeInBits; }
+ TypeEvaluationKind getEvaluationKind() const { return EvaluationKind; }
+ bool shouldUseLibcall() const { return UseLibcall; }
+
+ /// Is the atomic size larger than the underlying value type?
+ ///
+ /// Note that the absence of padding does not mean that atomic
+ /// objects are completely interchangeable with non-atomic
+ /// objects: we might have promoted the alignment of a type
+ /// without making it bigger.
+ bool hasPadding() const {
+ return (ValueSizeInBits != AtomicSizeInBits);
+ }
+
+ void emitMemSetZeroIfNecessary(LValue dest) const;
+
+ llvm::Value *getAtomicSizeValue() const {
+ CharUnits size = CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits);
+ return CGF.CGM.getSize(size);
+ }
+
+ /// Cast the given pointer to an integer pointer suitable for
+ /// atomic operations.
+ llvm::Value *emitCastToAtomicIntPointer(llvm::Value *addr) const;
+
+ /// Turn an atomic-layout object into an r-value.
+ RValue convertTempToRValue(llvm::Value *addr,
+ AggValueSlot resultSlot) const;
+
+ /// Copy an atomic r-value into atomic-layout memory.
+ void emitCopyIntoMemory(RValue rvalue, LValue lvalue) const;
+
+ /// Project an l-value down to the value field.
+ LValue projectValue(LValue lvalue) const {
+ llvm::Value *addr = lvalue.getAddress();
+ if (hasPadding())
+ addr = CGF.Builder.CreateStructGEP(addr, 0);
+
+ return LValue::MakeAddr(addr, getValueType(), lvalue.getAlignment(),
+ CGF.getContext(), lvalue.getTBAAInfo());
+ }
+
+ /// Materialize an atomic r-value in atomic-layout memory.
+ llvm::Value *materializeRValue(RValue rvalue) const;
+
+ private:
+ bool requiresMemSetZero(llvm::Type *type) const;
+ };
+}
+
+static RValue emitAtomicLibcall(CodeGenFunction &CGF,
+ StringRef fnName,
+ QualType resultType,
+ CallArgList &args) {
+ const CGFunctionInfo &fnInfo =
+ CGF.CGM.getTypes().arrangeFreeFunctionCall(resultType, args,
+ FunctionType::ExtInfo(), RequiredArgs::All);
+ llvm::FunctionType *fnTy = CGF.CGM.getTypes().GetFunctionType(fnInfo);
+ llvm::Constant *fn = CGF.CGM.CreateRuntimeFunction(fnTy, fnName);
+ return CGF.EmitCall(fnInfo, fn, ReturnValueSlot(), args);
+}
+
+/// Does a store of the given IR type modify the full expected width?
+static bool isFullSizeType(CodeGenModule &CGM, llvm::Type *type,
+ uint64_t expectedSize) {
+ return (CGM.getDataLayout().getTypeStoreSize(type) * 8 == expectedSize);
+}
+
+/// Does the atomic type require memsetting to zero before initialization?
+///
+/// The IR type is provided as a way of making certain queries faster.
+bool AtomicInfo::requiresMemSetZero(llvm::Type *type) const {
+ // If the atomic type has size padding, we definitely need a memset.
+ if (hasPadding()) return true;
+
+ // Otherwise, do some simple heuristics to try to avoid it:
+ switch (getEvaluationKind()) {
+ // For scalars and complexes, check whether the store size of the
+ // type uses the full size.
+ case TEK_Scalar:
+ return !isFullSizeType(CGF.CGM, type, AtomicSizeInBits);
+ case TEK_Complex:
+ return !isFullSizeType(CGF.CGM, type->getStructElementType(0),
+ AtomicSizeInBits / 2);
+
+ // Just be pessimistic about aggregates.
+ case TEK_Aggregate:
+ return true;
+ }
+ llvm_unreachable("bad evaluation kind");
+}
+
+void AtomicInfo::emitMemSetZeroIfNecessary(LValue dest) const {
+ llvm::Value *addr = dest.getAddress();
+ if (!requiresMemSetZero(addr->getType()->getPointerElementType()))
+ return;
+
+ CGF.Builder.CreateMemSet(addr, llvm::ConstantInt::get(CGF.Int8Ty, 0),
+ AtomicSizeInBits / 8,
+ dest.getAlignment().getQuantity());
+}
+
static void
EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, llvm::Value *Dest,
llvm::Value *Ptr, llvm::Value *Val1, llvm::Value *Val2,
@@ -177,24 +336,9 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
if (E->getOp() == AtomicExpr::AO__c11_atomic_init) {
assert(!Dest && "Init does not return a value");
- LValue LV = MakeAddrLValue(Ptr, AtomicTy, alignChars);
- switch (getEvaluationKind(E->getVal1()->getType())) {
- case TEK_Scalar:
- EmitScalarInit(EmitScalarExpr(E->getVal1()), LV);
- return RValue::get(0);
- case TEK_Complex:
- EmitComplexExprIntoLValue(E->getVal1(), LV, /*isInit*/ true);
- return RValue::get(0);
- case TEK_Aggregate: {
- AggValueSlot Slot = AggValueSlot::forLValue(LV,
- AggValueSlot::IsNotDestructed,
- AggValueSlot::DoesNotNeedGCBarriers,
- AggValueSlot::IsNotAliased);
- EmitAggExpr(E->getVal1(), Slot);
- return RValue::get(0);
- }
- }
- llvm_unreachable("bad evaluation kind");
+ LValue lvalue = LValue::MakeAddr(Ptr, AtomicTy, alignChars, getContext());
+ EmitAtomicInit(E->getVal1(), lvalue);
+ return RValue::get(0);
}
Order = EmitScalarExpr(E->getOrder());
@@ -385,30 +529,30 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
if (isa<llvm::ConstantInt>(Order)) {
int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
switch (ord) {
- case 0: // memory_order_relaxed
+ case AO_ABI_memory_order_relaxed:
EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
llvm::Monotonic);
break;
- case 1: // memory_order_consume
- case 2: // memory_order_acquire
+ case AO_ABI_memory_order_consume:
+ case AO_ABI_memory_order_acquire:
if (IsStore)
break; // Avoid crashing on code with undefined behavior
EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
llvm::Acquire);
break;
- case 3: // memory_order_release
+ case AO_ABI_memory_order_release:
if (IsLoad)
break; // Avoid crashing on code with undefined behavior
EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
llvm::Release);
break;
- case 4: // memory_order_acq_rel
+ case AO_ABI_memory_order_acq_rel:
if (IsLoad || IsStore)
break; // Avoid crashing on code with undefined behavior
EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
llvm::AcquireRelease);
break;
- case 5: // memory_order_seq_cst
+ case AO_ABI_memory_order_seq_cst:
EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
llvm::SequentiallyConsistent);
break;
@@ -483,3 +627,316 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
return RValue::get(0);
return convertTempToRValue(OrigDest, E->getType());
}
+
+llvm::Value *AtomicInfo::emitCastToAtomicIntPointer(llvm::Value *addr) const {
+ unsigned addrspace =
+ cast<llvm::PointerType>(addr->getType())->getAddressSpace();
+ llvm::IntegerType *ty =
+ llvm::IntegerType::get(CGF.getLLVMContext(), AtomicSizeInBits);
+ return CGF.Builder.CreateBitCast(addr, ty->getPointerTo(addrspace));
+}
+
+RValue AtomicInfo::convertTempToRValue(llvm::Value *addr,
+ AggValueSlot resultSlot) const {
+ if (EvaluationKind == TEK_Aggregate) {
+ // Nothing to do if the result is ignored.
+ if (resultSlot.isIgnored()) return resultSlot.asRValue();
+
+ assert(resultSlot.getAddr() == addr || hasPadding());
+
+ // In these cases, we should have emitted directly into the result slot.
+ if (!hasPadding() || resultSlot.isValueOfAtomic())
+ return resultSlot.asRValue();
+
+ // Otherwise, fall into the common path.
+ }
+
+ // Drill into the padding structure if we have one.
+ if (hasPadding())
+ addr = CGF.Builder.CreateStructGEP(addr, 0);
+
+ // If we're emitting to an aggregate, copy into the result slot.
+ if (EvaluationKind == TEK_Aggregate) {
+ CGF.EmitAggregateCopy(resultSlot.getAddr(), addr, getValueType(),
+ resultSlot.isVolatile());
+ return resultSlot.asRValue();
+ }
+
+ // Otherwise, just convert the temporary to an r-value using the
+ // normal conversion routine.
+ return CGF.convertTempToRValue(addr, getValueType());
+}
+
+/// Emit a load from an l-value of atomic type. Note that the r-value
+/// we produce is an r-value of the atomic *value* type.
+RValue CodeGenFunction::EmitAtomicLoad(LValue src, AggValueSlot resultSlot) {
+ AtomicInfo atomics(*this, src);
+
+ // Check whether we should use a library call.
+ if (atomics.shouldUseLibcall()) {
+ llvm::Value *tempAddr;
+ if (resultSlot.isValueOfAtomic()) {
+ assert(atomics.getEvaluationKind() == TEK_Aggregate);
+ tempAddr = resultSlot.getPaddedAtomicAddr();
+ } else if (!resultSlot.isIgnored() && !atomics.hasPadding()) {
+ assert(atomics.getEvaluationKind() == TEK_Aggregate);
+ tempAddr = resultSlot.getAddr();
+ } else {
+ tempAddr = CreateMemTemp(atomics.getAtomicType(), "atomic-load-temp");
+ }
+
+ // void __atomic_load(size_t size, void *mem, void *return, int order);
+ CallArgList args;
+ args.add(RValue::get(atomics.getAtomicSizeValue()),
+ getContext().getSizeType());
+ args.add(RValue::get(EmitCastToVoidPtr(src.getAddress())),
+ getContext().VoidPtrTy);
+ args.add(RValue::get(EmitCastToVoidPtr(tempAddr)),
+ getContext().VoidPtrTy);
+ args.add(RValue::get(llvm::ConstantInt::get(IntTy,
+ AO_ABI_memory_order_seq_cst)),
+ getContext().IntTy);
+ emitAtomicLibcall(*this, "__atomic_load", getContext().VoidTy, args);
+
+ // Produce the r-value.
+ return atomics.convertTempToRValue(tempAddr, resultSlot);
+ }
+
+ // Okay, we're doing this natively.
+ llvm::Value *addr = atomics.emitCastToAtomicIntPointer(src.getAddress());
+ llvm::LoadInst *load = Builder.CreateLoad(addr, "atomic-load");
+ load->setAtomic(llvm::SequentiallyConsistent);
+
+ // Other decoration.
+ load->setAlignment(src.getAlignment().getQuantity());
+ if (src.isVolatileQualified())
+ load->setVolatile(true);
+ if (src.getTBAAInfo())
+ CGM.DecorateInstruction(load, src.getTBAAInfo());
+
+ // Okay, turn that back into the original value type.
+ QualType valueType = atomics.getValueType();
+ llvm::Value *result = load;
+
+ // If we're ignoring an aggregate return, don't do anything.
+ if (atomics.getEvaluationKind() == TEK_Aggregate && resultSlot.isIgnored())
+ return RValue::getAggregate(0, false);
+
+ // The easiest way to do this this is to go through memory, but we
+ // try not to in some easy cases.
+ if (atomics.getEvaluationKind() == TEK_Scalar && !atomics.hasPadding()) {
+ llvm::Type *resultTy = CGM.getTypes().ConvertTypeForMem(valueType);
+ if (isa<llvm::IntegerType>(resultTy)) {
+ assert(result->getType() == resultTy);
+ result = EmitFromMemory(result, valueType);
+ } else if (isa<llvm::PointerType>(resultTy)) {
+ result = Builder.CreateIntToPtr(result, resultTy);
+ } else {
+ result = Builder.CreateBitCast(result, resultTy);
+ }
+ return RValue::get(result);
+ }
+
+ // Create a temporary. This needs to be big enough to hold the
+ // atomic integer.
+ llvm::Value *temp;
+ bool tempIsVolatile = false;
+ CharUnits tempAlignment;
+ if (atomics.getEvaluationKind() == TEK_Aggregate &&
+ (!atomics.hasPadding() || resultSlot.isValueOfAtomic())) {
+ assert(!resultSlot.isIgnored());
+ if (resultSlot.isValueOfAtomic()) {
+ temp = resultSlot.getPaddedAtomicAddr();
+ tempAlignment = atomics.getAtomicAlignment();
+ } else {
+ temp = resultSlot.getAddr();
+ tempAlignment = atomics.getValueAlignment();
+ }
+ tempIsVolatile = resultSlot.isVolatile();
+ } else {
+ temp = CreateMemTemp(atomics.getAtomicType(), "atomic-load-temp");
+ tempAlignment = atomics.getAtomicAlignment();
+ }
+
+ // Slam the integer into the temporary.
+ llvm::Value *castTemp = atomics.emitCastToAtomicIntPointer(temp);
+ Builder.CreateAlignedStore(result, castTemp, tempAlignment.getQuantity())
+ ->setVolatile(tempIsVolatile);
+
+ return atomics.convertTempToRValue(temp, resultSlot);
+}
+
+
+
+/// Copy an r-value into memory as part of storing to an atomic type.
+/// This needs to create a bit-pattern suitable for atomic operations.
+void AtomicInfo::emitCopyIntoMemory(RValue rvalue, LValue dest) const {
+ // If we have an r-value, the rvalue should be of the atomic type,
+ // which means that the caller is responsible for having zeroed
+ // any padding. Just do an aggregate copy of that type.
+ if (rvalue.isAggregate()) {
+ CGF.EmitAggregateCopy(dest.getAddress(),
+ rvalue.getAggregateAddr(),
+ getAtomicType(),
+ (rvalue.isVolatileQualified()
+ || dest.isVolatileQualified()),
+ dest.getAlignment());
+ return;
+ }
+
+ // Okay, otherwise we're copying stuff.
+
+ // Zero out the buffer if necessary.
+ emitMemSetZeroIfNecessary(dest);
+
+ // Drill past the padding if present.
+ dest = projectValue(dest);
+
+ // Okay, store the rvalue in.
+ if (rvalue.isScalar()) {
+ CGF.EmitStoreOfScalar(rvalue.getScalarVal(), dest, /*init*/ true);
+ } else {
+ CGF.EmitStoreOfComplex(rvalue.getComplexVal(), dest, /*init*/ true);
+ }
+}
+
+
+/// Materialize an r-value into memory for the purposes of storing it
+/// to an atomic type.
+llvm::Value *AtomicInfo::materializeRValue(RValue rvalue) const {
+ // Aggregate r-values are already in memory, and EmitAtomicStore
+ // requires them to be values of the atomic type.
+ if (rvalue.isAggregate())
+ return rvalue.getAggregateAddr();
+
+ // Otherwise, make a temporary and materialize into it.
+ llvm::Value *temp = CGF.CreateMemTemp(getAtomicType(), "atomic-store-temp");
+ LValue tempLV = CGF.MakeAddrLValue(temp, getAtomicType(), getAtomicAlignment());
+ emitCopyIntoMemory(rvalue, tempLV);
+ return temp;
+}
+
+/// Emit a store to an l-value of atomic type.
+///
+/// Note that the r-value is expected to be an r-value *of the atomic
+/// type*; this means that for aggregate r-values, it should include
+/// storage for any padding that was necessary.
+void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest,
+ bool isInit) {
+ // If this is an aggregate r-value, it should agree in type except
+ // maybe for address-space qualification.
+ assert(!rvalue.isAggregate() ||
+ rvalue.getAggregateAddr()->getType()->getPointerElementType()
+ == dest.getAddress()->getType()->getPointerElementType());
+
+ AtomicInfo atomics(*this, dest);
+
+ // If this is an initialization, just put the value there normally.
+ if (isInit) {
+ atomics.emitCopyIntoMemory(rvalue, dest);
+ return;
+ }
+
+ // Check whether we should use a library call.
+ if (atomics.shouldUseLibcall()) {
+ // Produce a source address.
+ llvm::Value *srcAddr = atomics.materializeRValue(rvalue);
+
+ // void __atomic_store(size_t size, void *mem, void *val, int order)
+ CallArgList args;
+ args.add(RValue::get(atomics.getAtomicSizeValue()),
+ getContext().getSizeType());
+ args.add(RValue::get(EmitCastToVoidPtr(dest.getAddress())),
+ getContext().VoidPtrTy);
+ args.add(RValue::get(EmitCastToVoidPtr(srcAddr)),
+ getContext().VoidPtrTy);
+ args.add(RValue::get(llvm::ConstantInt::get(IntTy,
+ AO_ABI_memory_order_seq_cst)),
+ getContext().IntTy);
+ emitAtomicLibcall(*this, "__atomic_store", getContext().VoidTy, args);
+ return;
+ }
+
+ // Okay, we're doing this natively.
+ llvm::Value *intValue;
+
+ // If we've got a scalar value of the right size, try to avoid going
+ // through memory.
+ if (rvalue.isScalar() && !atomics.hasPadding()) {
+ llvm::Value *value = rvalue.getScalarVal();
+ if (isa<llvm::IntegerType>(value->getType())) {
+ intValue = value;
+ } else {
+ llvm::IntegerType *inputIntTy =
+ llvm::IntegerType::get(getLLVMContext(), atomics.getValueSizeInBits());
+ if (isa<llvm::PointerType>(value->getType())) {
+ intValue = Builder.CreatePtrToInt(value, inputIntTy);
+ } else {
+ intValue = Builder.CreateBitCast(value, inputIntTy);
+ }
+ }
+
+ // Otherwise, we need to go through memory.
+ } else {
+ // Put the r-value in memory.
+ llvm::Value *addr = atomics.materializeRValue(rvalue);
+
+ // Cast the temporary to the atomic int type and pull a value out.
+ addr = atomics.emitCastToAtomicIntPointer(addr);
+ intValue = Builder.CreateAlignedLoad(addr,
+ atomics.getAtomicAlignment().getQuantity());
+ }
+
+ // Do the atomic store.
+ llvm::Value *addr = atomics.emitCastToAtomicIntPointer(dest.getAddress());
+ llvm::StoreInst *store = Builder.CreateStore(intValue, addr);
+
+ // Initializations don't need to be atomic.
+ if (!isInit) store->setAtomic(llvm::SequentiallyConsistent);
+
+ // Other decoration.
+ store->setAlignment(dest.getAlignment().getQuantity());
+ if (dest.isVolatileQualified())
+ store->setVolatile(true);
+ if (dest.getTBAAInfo())
+ CGM.DecorateInstruction(store, dest.getTBAAInfo());
+}
+
+void CodeGenFunction::EmitAtomicInit(Expr *init, LValue dest) {
+ AtomicInfo atomics(*this, dest);
+
+ switch (atomics.getEvaluationKind()) {
+ case TEK_Scalar: {
+ llvm::Value *value = EmitScalarExpr(init);
+ atomics.emitCopyIntoMemory(RValue::get(value), dest);
+ return;
+ }
+
+ case TEK_Complex: {
+ ComplexPairTy value = EmitComplexExpr(init);
+ atomics.emitCopyIntoMemory(RValue::getComplex(value), dest);
+ return;
+ }
+
+ case TEK_Aggregate: {
+ // Memset the buffer first if there's any possibility of
+ // uninitialized internal bits.
+ atomics.emitMemSetZeroIfNecessary(dest);
+
+ // HACK: whether the initializer actually has an atomic type
+ // doesn't really seem reliable right now.
+ if (!init->getType()->isAtomicType()) {
+ dest = atomics.projectValue(dest);
+ }
+
+ // Evaluate the expression directly into the destination.
+ AggValueSlot slot = AggValueSlot::forLValue(dest,
+ AggValueSlot::IsNotDestructed,
+ AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsNotAliased);
+ EmitAggExpr(init, slot);
+ return;
+ }
+ }
+ llvm_unreachable("bad evaluation kind");
+}
OpenPOWER on IntegriCloud