diff options
author | Eli Friedman <eli.friedman@gmail.com> | 2011-07-28 21:48:00 +0000 |
---|---|---|
committer | Eli Friedman <eli.friedman@gmail.com> | 2011-07-28 21:48:00 +0000 |
commit | c9a551ebed9fe4e5320a8c3d216d98907a8f20cc (patch) | |
tree | 2b152b065a6755da89f2d21da0bba7ff0fa15259 /llvm/lib/VMCore | |
parent | 07f0f02bc5c12dfdef34f32ae7f2ddf888d3df2b (diff) | |
download | bcm5719-llvm-c9a551ebed9fe4e5320a8c3d216d98907a8f20cc.tar.gz bcm5719-llvm-c9a551ebed9fe4e5320a8c3d216d98907a8f20cc.zip |
LangRef and basic memory-representation/reading/writing for 'cmpxchg' and
'atomicrmw' instructions, which allow representing all the current atomic
rmw intrinsics.
The allowed operands for these instructions are heavily restricted at the
moment; we can probably loosen it a bit, but supporting general
first-class types (where it makes sense) might get a bit complicated,
given how SelectionDAG works.
As an initial cut, these operations do not support specifying an alignment,
but it would be possible to add if we think it's useful. Specifying an
alignment lower than the natural alignment would be essentially
impossible to support on anything other than x86, but specifying a greater
alignment would be possible. I can't think of any useful optimizations which
would use that information, but maybe someone else has ideas.
Optimizer/codegen support coming soon.
llvm-svn: 136404
Diffstat (limited to 'llvm/lib/VMCore')
-rw-r--r-- | llvm/lib/VMCore/AsmWriter.cpp | 25 | ||||
-rw-r--r-- | llvm/lib/VMCore/Instruction.cpp | 2 | ||||
-rw-r--r-- | llvm/lib/VMCore/Instructions.cpp | 111 | ||||
-rw-r--r-- | llvm/lib/VMCore/Verifier.cpp | 36 |
4 files changed, 174 insertions, 0 deletions
diff --git a/llvm/lib/VMCore/AsmWriter.cpp b/llvm/lib/VMCore/AsmWriter.cpp index e6cd418c321..e3e2484def3 100644 --- a/llvm/lib/VMCore/AsmWriter.cpp +++ b/llvm/lib/VMCore/AsmWriter.cpp @@ -658,6 +658,23 @@ static const char *getPredicateText(unsigned predicate) { return pred; } +static void writeAtomicRMWOperation(raw_ostream &Out, + AtomicRMWInst::BinOp Op) { + switch (Op) { + default: Out << " <unknown operation " << Op << ">"; break; + case AtomicRMWInst::Xchg: Out << " xchg"; break; + case AtomicRMWInst::Add: Out << " add"; break; + case AtomicRMWInst::Sub: Out << " sub"; break; + case AtomicRMWInst::And: Out << " and"; break; + case AtomicRMWInst::Nand: Out << " nand"; break; + case AtomicRMWInst::Or: Out << " or"; break; + case AtomicRMWInst::Xor: Out << " xor"; break; + case AtomicRMWInst::Max: Out << " max"; break; + case AtomicRMWInst::Min: Out << " min"; break; + case AtomicRMWInst::UMax: Out << " umax"; break; + case AtomicRMWInst::UMin: Out << " umin"; break; + } +} static void WriteOptimizationInfo(raw_ostream &Out, const User *U) { if (const OverflowingBinaryOperator *OBO = @@ -1670,6 +1687,10 @@ void AssemblyWriter::printInstruction(const Instruction &I) { if (const CmpInst *CI = dyn_cast<CmpInst>(&I)) Out << ' ' << getPredicateText(CI->getPredicate()); + // Print out the atomicrmw operation + if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(&I)) + writeAtomicRMWOperation(Out, RMWI->getOperation()); + // Print out the type of the operands... const Value *Operand = I.getNumOperands() ? I.getOperand(0) : 0; @@ -1936,6 +1957,10 @@ void AssemblyWriter::printInstruction(const Instruction &I) { Out << ", align " << cast<LoadInst>(I).getAlignment(); } else if (isa<StoreInst>(I) && cast<StoreInst>(I).getAlignment()) { Out << ", align " << cast<StoreInst>(I).getAlignment(); + } else if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(&I)) { + writeAtomic(CXI->getOrdering(), CXI->getSynchScope()); + } else if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(&I)) { + writeAtomic(RMWI->getOrdering(), RMWI->getSynchScope()); } else if (const FenceInst *FI = dyn_cast<FenceInst>(&I)) { writeAtomic(FI->getOrdering(), FI->getSynchScope()); } diff --git a/llvm/lib/VMCore/Instruction.cpp b/llvm/lib/VMCore/Instruction.cpp index 09d16e7d448..ad433ef22a9 100644 --- a/llvm/lib/VMCore/Instruction.cpp +++ b/llvm/lib/VMCore/Instruction.cpp @@ -128,6 +128,8 @@ const char *Instruction::getOpcodeName(unsigned OpCode) { case Alloca: return "alloca"; case Load: return "load"; case Store: return "store"; + case AtomicCmpXchg: return "cmpxchg"; + case AtomicRMW: return "atomicrmw"; case Fence: return "fence"; case GetElementPtr: return "getelementptr"; diff --git a/llvm/lib/VMCore/Instructions.cpp b/llvm/lib/VMCore/Instructions.cpp index 9fdff0773a6..abee7b741a5 100644 --- a/llvm/lib/VMCore/Instructions.cpp +++ b/llvm/lib/VMCore/Instructions.cpp @@ -1106,6 +1106,101 @@ void StoreInst::setAlignment(unsigned Align) { } //===----------------------------------------------------------------------===// +// AtomicCmpXchgInst Implementation +//===----------------------------------------------------------------------===// + +void AtomicCmpXchgInst::Init(Value *Ptr, Value *Cmp, Value *NewVal, + AtomicOrdering Ordering, + SynchronizationScope SynchScope) { + Op<0>() = Ptr; + Op<1>() = Cmp; + Op<2>() = NewVal; + setOrdering(Ordering); + setSynchScope(SynchScope); + + assert(getOperand(0) && getOperand(1) && getOperand(2) && + "All operands must be non-null!"); + assert(getOperand(0)->getType()->isPointerTy() && + "Ptr must have pointer type!"); + assert(getOperand(1)->getType() == + cast<PointerType>(getOperand(0)->getType())->getElementType() + && "Ptr must be a pointer to Cmp type!"); + assert(getOperand(2)->getType() == + cast<PointerType>(getOperand(0)->getType())->getElementType() + && "Ptr must be a pointer to NewVal type!"); + assert(Ordering != NotAtomic && + "AtomicCmpXchg instructions must be atomic!"); +} + +AtomicCmpXchgInst::AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, + AtomicOrdering Ordering, + SynchronizationScope SynchScope, + Instruction *InsertBefore) + : Instruction(Cmp->getType(), AtomicCmpXchg, + OperandTraits<AtomicCmpXchgInst>::op_begin(this), + OperandTraits<AtomicCmpXchgInst>::operands(this), + InsertBefore) { + Init(Ptr, Cmp, NewVal, Ordering, SynchScope); +} + +AtomicCmpXchgInst::AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, + AtomicOrdering Ordering, + SynchronizationScope SynchScope, + BasicBlock *InsertAtEnd) + : Instruction(Cmp->getType(), AtomicCmpXchg, + OperandTraits<AtomicCmpXchgInst>::op_begin(this), + OperandTraits<AtomicCmpXchgInst>::operands(this), + InsertAtEnd) { + Init(Ptr, Cmp, NewVal, Ordering, SynchScope); +} + +//===----------------------------------------------------------------------===// +// AtomicRMWInst Implementation +//===----------------------------------------------------------------------===// + +void AtomicRMWInst::Init(BinOp Operation, Value *Ptr, Value *Val, + AtomicOrdering Ordering, + SynchronizationScope SynchScope) { + Op<0>() = Ptr; + Op<1>() = Val; + setOperation(Operation); + setOrdering(Ordering); + setSynchScope(SynchScope); + + assert(getOperand(0) && getOperand(1) && + "All operands must be non-null!"); + assert(getOperand(0)->getType()->isPointerTy() && + "Ptr must have pointer type!"); + assert(getOperand(1)->getType() == + cast<PointerType>(getOperand(0)->getType())->getElementType() + && "Ptr must be a pointer to Val type!"); + assert(Ordering != NotAtomic && + "AtomicRMW instructions must be atomic!"); +} + +AtomicRMWInst::AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, + AtomicOrdering Ordering, + SynchronizationScope SynchScope, + Instruction *InsertBefore) + : Instruction(Val->getType(), AtomicRMW, + OperandTraits<AtomicRMWInst>::op_begin(this), + OperandTraits<AtomicRMWInst>::operands(this), + InsertBefore) { + Init(Operation, Ptr, Val, Ordering, SynchScope); +} + +AtomicRMWInst::AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, + AtomicOrdering Ordering, + SynchronizationScope SynchScope, + BasicBlock *InsertAtEnd) + : Instruction(Val->getType(), AtomicRMW, + OperandTraits<AtomicRMWInst>::op_begin(this), + OperandTraits<AtomicRMWInst>::operands(this), + InsertAtEnd) { + Init(Operation, Ptr, Val, Ordering, SynchScope); +} + +//===----------------------------------------------------------------------===// // FenceInst Implementation //===----------------------------------------------------------------------===// @@ -3148,6 +3243,22 @@ StoreInst *StoreInst::clone_impl() const { isVolatile(), getAlignment()); } +AtomicCmpXchgInst *AtomicCmpXchgInst::clone_impl() const { + AtomicCmpXchgInst *Result = + new AtomicCmpXchgInst(getOperand(0), getOperand(1), getOperand(2), + getOrdering(), getSynchScope()); + Result->setVolatile(isVolatile()); + return Result; +} + +AtomicRMWInst *AtomicRMWInst::clone_impl() const { + AtomicRMWInst *Result = + new AtomicRMWInst(getOperation(),getOperand(0), getOperand(1), + getOrdering(), getSynchScope()); + Result->setVolatile(isVolatile()); + return Result; +} + FenceInst *FenceInst::clone_impl() const { return new FenceInst(getContext(), getOrdering(), getSynchScope()); } diff --git a/llvm/lib/VMCore/Verifier.cpp b/llvm/lib/VMCore/Verifier.cpp index 9ec2edf3fca..905e9a26233 100644 --- a/llvm/lib/VMCore/Verifier.cpp +++ b/llvm/lib/VMCore/Verifier.cpp @@ -288,6 +288,8 @@ namespace { void visitUserOp1(Instruction &I); void visitUserOp2(Instruction &I) { visitUserOp1(I); } void visitIntrinsicFunctionCall(Intrinsic::ID ID, CallInst &CI); + void visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI); + void visitAtomicRMWInst(AtomicRMWInst &RMWI); void visitFenceInst(FenceInst &FI); void visitAllocaInst(AllocaInst &AI); void visitExtractValueInst(ExtractValueInst &EVI); @@ -1327,6 +1329,40 @@ void Verifier::visitAllocaInst(AllocaInst &AI) { visitInstruction(AI); } +void Verifier::visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI) { + Assert1(CXI.getOrdering() != NotAtomic, + "cmpxchg instructions must be atomic.", &CXI); + Assert1(CXI.getOrdering() != Unordered, + "cmpxchg instructions cannot be unordered.", &CXI); + PointerType *PTy = dyn_cast<PointerType>(CXI.getOperand(0)->getType()); + Assert1(PTy, "First cmpxchg operand must be a pointer.", &CXI); + Type *ElTy = PTy->getElementType(); + Assert2(ElTy == CXI.getOperand(1)->getType(), + "Expected value type does not match pointer operand type!", + &CXI, ElTy); + Assert2(ElTy == CXI.getOperand(2)->getType(), + "Stored value type does not match pointer operand type!", + &CXI, ElTy); + visitInstruction(CXI); +} + +void Verifier::visitAtomicRMWInst(AtomicRMWInst &RMWI) { + Assert1(RMWI.getOrdering() != NotAtomic, + "atomicrmw instructions must be atomic.", &RMWI); + Assert1(RMWI.getOrdering() != Unordered, + "atomicrmw instructions cannot be unordered.", &RMWI); + PointerType *PTy = dyn_cast<PointerType>(RMWI.getOperand(0)->getType()); + Assert1(PTy, "First atomicrmw operand must be a pointer.", &RMWI); + Type *ElTy = PTy->getElementType(); + Assert2(ElTy == RMWI.getOperand(1)->getType(), + "Argument value type does not match pointer operand type!", + &RMWI, ElTy); + Assert1(AtomicRMWInst::FIRST_BINOP <= RMWI.getOperation() && + RMWI.getOperation() <= AtomicRMWInst::LAST_BINOP, + "Invalid binary operation!", &RMWI); + visitInstruction(RMWI); +} + void Verifier::visitFenceInst(FenceInst &FI) { const AtomicOrdering Ordering = FI.getOrdering(); Assert1(Ordering == Acquire || Ordering == Release || |