diff options
| author | Alexey Bataev <a.bataev@hotmail.com> | 2015-01-22 06:17:56 +0000 |
|---|---|---|
| committer | Alexey Bataev <a.bataev@hotmail.com> | 2015-01-22 06:17:56 +0000 |
| commit | b57056f483ebb19dae90baf865dd7e2fe99c2995 (patch) | |
| tree | 4ce11b4f15bf844f0055aed69fe590082319e0c2 /clang/lib/CodeGen/CGAtomic.cpp | |
| parent | 02e1ec6966c96f483dbd1616ccb5f1953567d00a (diff) | |
| download | bcm5719-llvm-b57056f483ebb19dae90baf865dd7e2fe99c2995.tar.gz bcm5719-llvm-b57056f483ebb19dae90baf865dd7e2fe99c2995.zip | |
[OPENMP] CodeGen for "omp atomic read [seq_cst]" directive.
"omp atomic read [seq_cst]" accepts expressions "v=x;". In this patch we perform
an atomic load of "x" (using builtin atomic loading instructions or a call to
"atomic_load()" for simple lvalues and "kmpc_atomic_start();load
<x>;kmpc_atomic_end();" for other lvalues), convert the result of loading to
type of "v" (using EmitScalarConversion() for simple types and
EmitComplexToScalarConversion() for conversions from complex to scalar) and then
store the result in "v".)
Differential Revision: http://reviews.llvm.org/D6431
llvm-svn: 226788
Diffstat (limited to 'clang/lib/CodeGen/CGAtomic.cpp')
| -rw-r--r-- | clang/lib/CodeGen/CGAtomic.cpp | 223 |
1 files changed, 151 insertions, 72 deletions
diff --git a/clang/lib/CodeGen/CGAtomic.cpp b/clang/lib/CodeGen/CGAtomic.cpp index daac174c8e0..927195083d4 100644 --- a/clang/lib/CodeGen/CGAtomic.cpp +++ b/clang/lib/CodeGen/CGAtomic.cpp @@ -13,6 +13,7 @@ #include "CodeGenFunction.h" #include "CGCall.h" +#include "CGRecordLayout.h" #include "CodeGenModule.h" #include "clang/AST/ASTContext.h" #include "clang/CodeGen/CGFunctionInfo.h" @@ -36,34 +37,70 @@ namespace { CharUnits LValueAlign; TypeEvaluationKind EvaluationKind; bool UseLibcall; + LValue LVal; + CGBitFieldInfo BFI; public: - AtomicInfo(CodeGenFunction &CGF, LValue &lvalue) : CGF(CGF) { - assert(lvalue.isSimple()); - - AtomicTy = lvalue.getType(); - ValueTy = AtomicTy->castAs<AtomicType>()->getValueType(); - EvaluationKind = CGF.getEvaluationKind(ValueTy); - + AtomicInfo(CodeGenFunction &CGF, LValue &lvalue) + : CGF(CGF), AtomicSizeInBits(0), ValueSizeInBits(0), + EvaluationKind(TEK_Scalar), UseLibcall(true) { + assert(!lvalue.isGlobalReg()); ASTContext &C = CGF.getContext(); - - uint64_t ValueAlignInBits; - uint64_t AtomicAlignInBits; - TypeInfo ValueTI = C.getTypeInfo(ValueTy); - ValueSizeInBits = ValueTI.Width; - ValueAlignInBits = ValueTI.Align; - - TypeInfo AtomicTI = C.getTypeInfo(AtomicTy); - AtomicSizeInBits = AtomicTI.Width; - AtomicAlignInBits = AtomicTI.Align; - - assert(ValueSizeInBits <= AtomicSizeInBits); - assert(ValueAlignInBits <= AtomicAlignInBits); - - AtomicAlign = C.toCharUnitsFromBits(AtomicAlignInBits); - ValueAlign = C.toCharUnitsFromBits(ValueAlignInBits); - if (lvalue.getAlignment().isZero()) - lvalue.setAlignment(AtomicAlign); - + if (lvalue.isSimple()) { + AtomicTy = lvalue.getType(); + if (auto *ATy = AtomicTy->getAs<AtomicType>()) + ValueTy = ATy->getValueType(); + else + ValueTy = AtomicTy; + EvaluationKind = CGF.getEvaluationKind(ValueTy); + + uint64_t ValueAlignInBits; + uint64_t AtomicAlignInBits; + TypeInfo ValueTI = C.getTypeInfo(ValueTy); + ValueSizeInBits = ValueTI.Width; + ValueAlignInBits = ValueTI.Align; + + TypeInfo AtomicTI = C.getTypeInfo(AtomicTy); + AtomicSizeInBits = AtomicTI.Width; + AtomicAlignInBits = AtomicTI.Align; + + assert(ValueSizeInBits <= AtomicSizeInBits); + assert(ValueAlignInBits <= AtomicAlignInBits); + + AtomicAlign = C.toCharUnitsFromBits(AtomicAlignInBits); + ValueAlign = C.toCharUnitsFromBits(ValueAlignInBits); + if (lvalue.getAlignment().isZero()) + lvalue.setAlignment(AtomicAlign); + + LVal = lvalue; + } else if (lvalue.isBitField()) { + auto &OrigBFI = lvalue.getBitFieldInfo(); + auto Offset = OrigBFI.Offset % C.toBits(lvalue.getAlignment()); + AtomicSizeInBits = C.toBits( + C.toCharUnitsFromBits(Offset + OrigBFI.Size + C.getCharWidth() - 1) + .RoundUpToAlignment(lvalue.getAlignment())); + auto VoidPtrAddr = CGF.EmitCastToVoidPtr(lvalue.getBitFieldAddr()); + auto OffsetInChars = + (C.toCharUnitsFromBits(OrigBFI.Offset) / lvalue.getAlignment()) * + lvalue.getAlignment(); + VoidPtrAddr = CGF.Builder.CreateConstGEP1_64( + VoidPtrAddr, OffsetInChars.getQuantity()); + auto Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( + VoidPtrAddr, + CGF.Builder.getIntNTy(AtomicSizeInBits)->getPointerTo(), + "atomic_bitfield_base"); + BFI = OrigBFI; + BFI.Offset = Offset; + BFI.StorageSize = AtomicSizeInBits; + LVal = LValue::MakeBitfield(Addr, BFI, lvalue.getType(), + lvalue.getAlignment()); + } else if (lvalue.isVectorElt()) { + AtomicSizeInBits = C.getTypeSize(lvalue.getType()); + LVal = lvalue; + } else { + assert(lvalue.isExtVectorElt()); + AtomicSizeInBits = C.getTypeSize(lvalue.getType()); + LVal = lvalue; + } UseLibcall = !C.getTargetInfo().hasBuiltinAtomic( AtomicSizeInBits, C.toBits(lvalue.getAlignment())); } @@ -76,6 +113,7 @@ namespace { uint64_t getValueSizeInBits() const { return ValueSizeInBits; } TypeEvaluationKind getEvaluationKind() const { return EvaluationKind; } bool shouldUseLibcall() const { return UseLibcall; } + const LValue &getAtomicLValue() const { return LVal; } /// Is the atomic size larger than the underlying value type? /// @@ -87,7 +125,7 @@ namespace { return (ValueSizeInBits != AtomicSizeInBits); } - bool emitMemSetZeroIfNecessary(LValue dest) const; + bool emitMemSetZeroIfNecessary() const; llvm::Value *getAtomicSizeValue() const { CharUnits size = CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits); @@ -110,16 +148,17 @@ namespace { SourceLocation Loc) const; /// Copy an atomic r-value into atomic-layout memory. - void emitCopyIntoMemory(RValue rvalue, LValue lvalue) const; + void emitCopyIntoMemory(RValue rvalue) const; /// Project an l-value down to the value field. - LValue projectValue(LValue lvalue) const { - llvm::Value *addr = lvalue.getAddress(); + LValue projectValue() const { + assert(LVal.isSimple()); + llvm::Value *addr = LVal.getAddress(); if (hasPadding()) addr = CGF.Builder.CreateStructGEP(addr, 0); - return LValue::MakeAddr(addr, getValueType(), lvalue.getAlignment(), - CGF.getContext(), lvalue.getTBAAInfo()); + return LValue::MakeAddr(addr, getValueType(), LVal.getAlignment(), + CGF.getContext(), LVal.getTBAAInfo()); } /// Materialize an atomic r-value in atomic-layout memory. @@ -172,14 +211,15 @@ bool AtomicInfo::requiresMemSetZero(llvm::Type *type) const { llvm_unreachable("bad evaluation kind"); } -bool AtomicInfo::emitMemSetZeroIfNecessary(LValue dest) const { - llvm::Value *addr = dest.getAddress(); +bool AtomicInfo::emitMemSetZeroIfNecessary() const { + assert(LVal.isSimple()); + llvm::Value *addr = LVal.getAddress(); if (!requiresMemSetZero(addr->getType()->getPointerElementType())) return false; CGF.Builder.CreateMemSet(addr, llvm::ConstantInt::get(CGF.Int8Ty, 0), AtomicSizeInBits / 8, - dest.getAlignment().getQuantity()); + LVal.getAlignment().getQuantity()); return true; } @@ -902,21 +942,34 @@ llvm::Value *AtomicInfo::emitCastToAtomicIntPointer(llvm::Value *addr) const { RValue AtomicInfo::convertTempToRValue(llvm::Value *addr, AggValueSlot resultSlot, SourceLocation loc) const { - if (EvaluationKind == TEK_Aggregate) - return resultSlot.asRValue(); - - // Drill into the padding structure if we have one. - if (hasPadding()) - addr = CGF.Builder.CreateStructGEP(addr, 0); - - // Otherwise, just convert the temporary to an r-value using the - // normal conversion routine. - return CGF.convertTempToRValue(addr, getValueType(), loc); + if (LVal.isSimple()) { + if (EvaluationKind == TEK_Aggregate) + return resultSlot.asRValue(); + + // Drill into the padding structure if we have one. + if (hasPadding()) + addr = CGF.Builder.CreateStructGEP(addr, 0); + + // Otherwise, just convert the temporary to an r-value using the + // normal conversion routine. + return CGF.convertTempToRValue(addr, getValueType(), loc); + } else if (LVal.isBitField()) + return CGF.EmitLoadOfBitfieldLValue(LValue::MakeBitfield( + addr, LVal.getBitFieldInfo(), LVal.getType(), LVal.getAlignment())); + else if (LVal.isVectorElt()) + return CGF.EmitLoadOfLValue(LValue::MakeVectorElt(addr, LVal.getVectorIdx(), + LVal.getType(), + LVal.getAlignment()), + loc); + assert(LVal.isExtVectorElt()); + return CGF.EmitLoadOfExtVectorElementLValue(LValue::MakeExtVectorElt( + addr, LVal.getExtVectorElts(), LVal.getType(), LVal.getAlignment())); } RValue AtomicInfo::convertIntToValue(llvm::Value *IntVal, AggValueSlot ResultSlot, SourceLocation Loc) const { + assert(LVal.isSimple()); // Try not to in some easy cases. assert(IntVal->getType()->isIntegerTy() && "Expected integer value"); if (getEvaluationKind() == TEK_Scalar && !hasPadding()) { @@ -958,25 +1011,43 @@ RValue AtomicInfo::convertIntToValue(llvm::Value *IntVal, RValue CodeGenFunction::EmitAtomicLoad(LValue src, SourceLocation loc, AggValueSlot resultSlot) { AtomicInfo atomics(*this, src); + LValue LVal = atomics.getAtomicLValue(); + llvm::Value *SrcAddr = nullptr; + llvm::AllocaInst *NonSimpleTempAlloca = nullptr; + if (LVal.isSimple()) + SrcAddr = LVal.getAddress(); + else { + if (LVal.isBitField()) + SrcAddr = LVal.getBitFieldAddr(); + else if (LVal.isVectorElt()) + SrcAddr = LVal.getVectorAddr(); + else { + assert(LVal.isExtVectorElt()); + SrcAddr = LVal.getExtVectorAddr(); + } + NonSimpleTempAlloca = CreateTempAlloca( + SrcAddr->getType()->getPointerElementType(), "atomic-load-temp"); + NonSimpleTempAlloca->setAlignment(getContext().toBits(src.getAlignment())); + } // Check whether we should use a library call. if (atomics.shouldUseLibcall()) { llvm::Value *tempAddr; - if (!resultSlot.isIgnored()) { - assert(atomics.getEvaluationKind() == TEK_Aggregate); - tempAddr = resultSlot.getAddr(); - } else { - tempAddr = CreateMemTemp(atomics.getAtomicType(), "atomic-load-temp"); - } + if (LVal.isSimple()) { + if (!resultSlot.isIgnored()) { + assert(atomics.getEvaluationKind() == TEK_Aggregate); + tempAddr = resultSlot.getAddr(); + } else + tempAddr = CreateMemTemp(atomics.getAtomicType(), "atomic-load-temp"); + } else + tempAddr = NonSimpleTempAlloca; // void __atomic_load(size_t size, void *mem, void *return, int order); CallArgList args; args.add(RValue::get(atomics.getAtomicSizeValue()), getContext().getSizeType()); - args.add(RValue::get(EmitCastToVoidPtr(src.getAddress())), - getContext().VoidPtrTy); - args.add(RValue::get(EmitCastToVoidPtr(tempAddr)), - getContext().VoidPtrTy); + args.add(RValue::get(EmitCastToVoidPtr(SrcAddr)), getContext().VoidPtrTy); + args.add(RValue::get(EmitCastToVoidPtr(tempAddr)), getContext().VoidPtrTy); args.add(RValue::get(llvm::ConstantInt::get( IntTy, AtomicExpr::AO_ABI_memory_order_seq_cst)), getContext().IntTy); @@ -987,7 +1058,7 @@ RValue CodeGenFunction::EmitAtomicLoad(LValue src, SourceLocation loc, } // Okay, we're doing this natively. - llvm::Value *addr = atomics.emitCastToAtomicIntPointer(src.getAddress()); + llvm::Value *addr = atomics.emitCastToAtomicIntPointer(SrcAddr); llvm::LoadInst *load = Builder.CreateLoad(addr, "atomic-load"); load->setAtomic(llvm::SequentiallyConsistent); @@ -1003,40 +1074,46 @@ RValue CodeGenFunction::EmitAtomicLoad(LValue src, SourceLocation loc, return RValue::getAggregate(nullptr, false); // Okay, turn that back into the original value type. - return atomics.convertIntToValue(load, resultSlot, loc); + if (src.isSimple()) + return atomics.convertIntToValue(load, resultSlot, loc); + + auto *IntAddr = atomics.emitCastToAtomicIntPointer(NonSimpleTempAlloca); + Builder.CreateAlignedStore(load, IntAddr, src.getAlignment().getQuantity()); + return atomics.convertTempToRValue(NonSimpleTempAlloca, resultSlot, loc); } /// Copy an r-value into memory as part of storing to an atomic type. /// This needs to create a bit-pattern suitable for atomic operations. -void AtomicInfo::emitCopyIntoMemory(RValue rvalue, LValue dest) const { +void AtomicInfo::emitCopyIntoMemory(RValue rvalue) const { + assert(LVal.isSimple()); // If we have an r-value, the rvalue should be of the atomic type, // which means that the caller is responsible for having zeroed // any padding. Just do an aggregate copy of that type. if (rvalue.isAggregate()) { - CGF.EmitAggregateCopy(dest.getAddress(), + CGF.EmitAggregateCopy(LVal.getAddress(), rvalue.getAggregateAddr(), getAtomicType(), (rvalue.isVolatileQualified() - || dest.isVolatileQualified()), - dest.getAlignment()); + || LVal.isVolatileQualified()), + LVal.getAlignment()); return; } // Okay, otherwise we're copying stuff. // Zero out the buffer if necessary. - emitMemSetZeroIfNecessary(dest); + emitMemSetZeroIfNecessary(); // Drill past the padding if present. - dest = projectValue(dest); + LValue TempLVal = projectValue(); // Okay, store the rvalue in. if (rvalue.isScalar()) { - CGF.EmitStoreOfScalar(rvalue.getScalarVal(), dest, /*init*/ true); + CGF.EmitStoreOfScalar(rvalue.getScalarVal(), TempLVal, /*init*/ true); } else { - CGF.EmitStoreOfComplex(rvalue.getComplexVal(), dest, /*init*/ true); + CGF.EmitStoreOfComplex(rvalue.getComplexVal(), TempLVal, /*init*/ true); } } @@ -1051,8 +1128,10 @@ llvm::Value *AtomicInfo::materializeRValue(RValue rvalue) const { // Otherwise, make a temporary and materialize into it. llvm::Value *temp = CGF.CreateMemTemp(getAtomicType(), "atomic-store-temp"); - LValue tempLV = CGF.MakeAddrLValue(temp, getAtomicType(), getAtomicAlignment()); - emitCopyIntoMemory(rvalue, tempLV); + LValue tempLV = + CGF.MakeAddrLValue(temp, getAtomicType(), getAtomicAlignment()); + AtomicInfo Atomics(CGF, tempLV); + Atomics.emitCopyIntoMemory(rvalue); return temp; } @@ -1098,7 +1177,7 @@ void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest, bool isInit) { // If this is an initialization, just put the value there normally. if (isInit) { - atomics.emitCopyIntoMemory(rvalue, dest); + atomics.emitCopyIntoMemory(rvalue); return; } @@ -1214,13 +1293,13 @@ void CodeGenFunction::EmitAtomicInit(Expr *init, LValue dest) { switch (atomics.getEvaluationKind()) { case TEK_Scalar: { llvm::Value *value = EmitScalarExpr(init); - atomics.emitCopyIntoMemory(RValue::get(value), dest); + atomics.emitCopyIntoMemory(RValue::get(value)); return; } case TEK_Complex: { ComplexPairTy value = EmitComplexExpr(init); - atomics.emitCopyIntoMemory(RValue::getComplex(value), dest); + atomics.emitCopyIntoMemory(RValue::getComplex(value)); return; } @@ -1229,8 +1308,8 @@ void CodeGenFunction::EmitAtomicInit(Expr *init, LValue dest) { // of atomic type. bool Zeroed = false; if (!init->getType()->isAtomicType()) { - Zeroed = atomics.emitMemSetZeroIfNecessary(dest); - dest = atomics.projectValue(dest); + Zeroed = atomics.emitMemSetZeroIfNecessary(); + dest = atomics.projectValue(); } // Evaluate the expression directly into the destination. |

