summaryrefslogtreecommitdiffstats
path: root/llvm/lib
diff options
context:
space:
mode:
authorEli Friedman <eli.friedman@gmail.com>2011-08-09 23:02:53 +0000
committerEli Friedman <eli.friedman@gmail.com>2011-08-09 23:02:53 +0000
commit59b66883eacbc62a09c09f08bcbfdce7af46cf31 (patch)
tree94bf465b6c6ec54c89d295d0422be1d6cc3613c6 /llvm/lib
parente95fcf7860d7de015a4fd2b41eb785340c32f875 (diff)
downloadbcm5719-llvm-59b66883eacbc62a09c09f08bcbfdce7af46cf31.tar.gz
bcm5719-llvm-59b66883eacbc62a09c09f08bcbfdce7af46cf31.zip
Representation of 'atomic load' and 'atomic store' in IR.
llvm-svn: 137170
Diffstat (limited to 'llvm/lib')
-rw-r--r--llvm/lib/AsmParser/LLParser.cpp43
-rw-r--r--llvm/lib/AsmParser/LLParser.h6
-rw-r--r--llvm/lib/Bitcode/Reader/BitcodeReader.cpp49
-rw-r--r--llvm/lib/Bitcode/Writer/BitcodeWriter.cpp25
-rw-r--r--llvm/lib/Transforms/Scalar/LowerAtomic.cpp17
-rw-r--r--llvm/lib/VMCore/AsmWriter.cpp30
-rw-r--r--llvm/lib/VMCore/Instructions.cpp100
-rw-r--r--llvm/lib/VMCore/Verifier.cpp18
8 files changed, 248 insertions, 40 deletions
diff --git a/llvm/lib/AsmParser/LLParser.cpp b/llvm/lib/AsmParser/LLParser.cpp
index c7650368075..3027ff51b85 100644
--- a/llvm/lib/AsmParser/LLParser.cpp
+++ b/llvm/lib/AsmParser/LLParser.cpp
@@ -2949,16 +2949,23 @@ int LLParser::ParseInstruction(Instruction *&Inst, BasicBlock *BB,
case lltok::kw_tail: return ParseCall(Inst, PFS, true);
// Memory.
case lltok::kw_alloca: return ParseAlloc(Inst, PFS);
- case lltok::kw_load: return ParseLoad(Inst, PFS, false);
- case lltok::kw_store: return ParseStore(Inst, PFS, false);
+ case lltok::kw_load: return ParseLoad(Inst, PFS, false, false);
+ case lltok::kw_store: return ParseStore(Inst, PFS, false, false);
case lltok::kw_cmpxchg: return ParseCmpXchg(Inst, PFS, false);
case lltok::kw_atomicrmw: return ParseAtomicRMW(Inst, PFS, false);
case lltok::kw_fence: return ParseFence(Inst, PFS);
+ case lltok::kw_atomic: {
+ bool isVolatile = EatIfPresent(lltok::kw_volatile);
+ if (EatIfPresent(lltok::kw_load))
+ return ParseLoad(Inst, PFS, true, isVolatile);
+ else if (EatIfPresent(lltok::kw_store))
+ return ParseStore(Inst, PFS, true, isVolatile);
+ }
case lltok::kw_volatile:
if (EatIfPresent(lltok::kw_load))
- return ParseLoad(Inst, PFS, true);
+ return ParseLoad(Inst, PFS, false, true);
else if (EatIfPresent(lltok::kw_store))
- return ParseStore(Inst, PFS, true);
+ return ParseStore(Inst, PFS, false, true);
else if (EatIfPresent(lltok::kw_cmpxchg))
return ParseCmpXchg(Inst, PFS, true);
else if (EatIfPresent(lltok::kw_atomicrmw))
@@ -3635,34 +3642,48 @@ int LLParser::ParseAlloc(Instruction *&Inst, PerFunctionState &PFS) {
}
/// ParseLoad
-/// ::= 'volatile'? 'load' TypeAndValue (',' OptionalInfo)?
+/// ::= 'volatile'? 'load' TypeAndValue (',' 'align' i32)?
+// ::= 'atomic' 'volatile'? 'load' TypeAndValue
+// 'singlethread'? AtomicOrdering (',' 'align' i32)?
int LLParser::ParseLoad(Instruction *&Inst, PerFunctionState &PFS,
- bool isVolatile) {
+ bool isAtomic, bool isVolatile) {
Value *Val; LocTy Loc;
unsigned Alignment = 0;
bool AteExtraComma = false;
+ AtomicOrdering Ordering = NotAtomic;
+ SynchronizationScope Scope = CrossThread;
if (ParseTypeAndValue(Val, Loc, PFS) ||
+ ParseScopeAndOrdering(isAtomic, Scope, Ordering) ||
ParseOptionalCommaAlign(Alignment, AteExtraComma))
return true;
if (!Val->getType()->isPointerTy() ||
!cast<PointerType>(Val->getType())->getElementType()->isFirstClassType())
return Error(Loc, "load operand must be a pointer to a first class type");
+ if (isAtomic && !Alignment)
+ return Error(Loc, "atomic load must have explicit non-zero alignment");
+ if (Ordering == Release || Ordering == AcquireRelease)
+ return Error(Loc, "atomic load cannot use Release ordering");
- Inst = new LoadInst(Val, "", isVolatile, Alignment);
+ Inst = new LoadInst(Val, "", isVolatile, Alignment, Ordering, Scope);
return AteExtraComma ? InstExtraComma : InstNormal;
}
/// ParseStore
/// ::= 'volatile'? 'store' TypeAndValue ',' TypeAndValue (',' 'align' i32)?
+/// ::= 'atomic' 'volatile'? 'store' TypeAndValue ',' TypeAndValue
+/// 'singlethread'? AtomicOrdering (',' 'align' i32)?
int LLParser::ParseStore(Instruction *&Inst, PerFunctionState &PFS,
- bool isVolatile) {
+ bool isAtomic, bool isVolatile) {
Value *Val, *Ptr; LocTy Loc, PtrLoc;
unsigned Alignment = 0;
bool AteExtraComma = false;
+ AtomicOrdering Ordering = NotAtomic;
+ SynchronizationScope Scope = CrossThread;
if (ParseTypeAndValue(Val, Loc, PFS) ||
ParseToken(lltok::comma, "expected ',' after store operand") ||
ParseTypeAndValue(Ptr, PtrLoc, PFS) ||
+ ParseScopeAndOrdering(isAtomic, Scope, Ordering) ||
ParseOptionalCommaAlign(Alignment, AteExtraComma))
return true;
@@ -3672,8 +3693,12 @@ int LLParser::ParseStore(Instruction *&Inst, PerFunctionState &PFS,
return Error(Loc, "store operand must be a first class value");
if (cast<PointerType>(Ptr->getType())->getElementType() != Val->getType())
return Error(Loc, "stored value and pointer type do not match");
+ if (isAtomic && !Alignment)
+ return Error(Loc, "atomic store must have explicit non-zero alignment");
+ if (Ordering == Acquire || Ordering == AcquireRelease)
+ return Error(Loc, "atomic store cannot use Acquire ordering");
- Inst = new StoreInst(Val, Ptr, isVolatile, Alignment);
+ Inst = new StoreInst(Val, Ptr, isVolatile, Alignment, Ordering, Scope);
return AteExtraComma ? InstExtraComma : InstNormal;
}
diff --git a/llvm/lib/AsmParser/LLParser.h b/llvm/lib/AsmParser/LLParser.h
index df058edd760..815f0620265 100644
--- a/llvm/lib/AsmParser/LLParser.h
+++ b/llvm/lib/AsmParser/LLParser.h
@@ -362,8 +362,10 @@ namespace llvm {
int ParsePHI(Instruction *&I, PerFunctionState &PFS);
bool ParseCall(Instruction *&I, PerFunctionState &PFS, bool isTail);
int ParseAlloc(Instruction *&I, PerFunctionState &PFS);
- int ParseLoad(Instruction *&I, PerFunctionState &PFS, bool isVolatile);
- int ParseStore(Instruction *&I, PerFunctionState &PFS, bool isVolatile);
+ int ParseLoad(Instruction *&I, PerFunctionState &PFS,
+ bool isAtomic, bool isVolatile);
+ int ParseStore(Instruction *&I, PerFunctionState &PFS,
+ bool isAtomic, bool isVolatile);
int ParseCmpXchg(Instruction *&I, PerFunctionState &PFS, bool isVolatile);
int ParseAtomicRMW(Instruction *&I, PerFunctionState &PFS, bool isVolatile);
int ParseFence(Instruction *&I, PerFunctionState &PFS);
diff --git a/llvm/lib/Bitcode/Reader/BitcodeReader.cpp b/llvm/lib/Bitcode/Reader/BitcodeReader.cpp
index a5d51565c31..93654a1dc56 100644
--- a/llvm/lib/Bitcode/Reader/BitcodeReader.cpp
+++ b/llvm/lib/Bitcode/Reader/BitcodeReader.cpp
@@ -2567,6 +2567,28 @@ bool BitcodeReader::ParseFunctionBody(Function *F) {
InstructionList.push_back(I);
break;
}
+ case bitc::FUNC_CODE_INST_LOADATOMIC: {
+ // LOADATOMIC: [opty, op, align, vol, ordering, synchscope]
+ unsigned OpNum = 0;
+ Value *Op;
+ if (getValueTypePair(Record, OpNum, NextValueNo, Op) ||
+ OpNum+4 != Record.size())
+ return Error("Invalid LOADATOMIC record");
+
+
+ AtomicOrdering Ordering = GetDecodedOrdering(Record[OpNum+2]);
+ if (Ordering == NotAtomic || Ordering == Release ||
+ Ordering == AcquireRelease)
+ return Error("Invalid LOADATOMIC record");
+ if (Ordering != NotAtomic && Record[OpNum] == 0)
+ return Error("Invalid LOADATOMIC record");
+ SynchronizationScope SynchScope = GetDecodedSynchScope(Record[OpNum+3]);
+
+ I = new LoadInst(Op, "", Record[OpNum+1], (1 << Record[OpNum]) >> 1,
+ Ordering, SynchScope);
+ InstructionList.push_back(I);
+ break;
+ }
case bitc::FUNC_CODE_INST_STORE: { // STORE2:[ptrty, ptr, val, align, vol]
unsigned OpNum = 0;
Value *Val, *Ptr;
@@ -2580,6 +2602,29 @@ bool BitcodeReader::ParseFunctionBody(Function *F) {
InstructionList.push_back(I);
break;
}
+ case bitc::FUNC_CODE_INST_STOREATOMIC: {
+ // STOREATOMIC: [ptrty, ptr, val, align, vol, ordering, synchscope]
+ unsigned OpNum = 0;
+ Value *Val, *Ptr;
+ if (getValueTypePair(Record, OpNum, NextValueNo, Ptr) ||
+ getValue(Record, OpNum,
+ cast<PointerType>(Ptr->getType())->getElementType(), Val) ||
+ OpNum+4 != Record.size())
+ return Error("Invalid STOREATOMIC record");
+
+ AtomicOrdering Ordering = GetDecodedOrdering(Record[OpNum+2]);
+ if (Ordering == NotAtomic || Ordering == Release ||
+ Ordering == AcquireRelease)
+ return Error("Invalid STOREATOMIC record");
+ SynchronizationScope SynchScope = GetDecodedSynchScope(Record[OpNum+3]);
+ if (Ordering != NotAtomic && Record[OpNum] == 0)
+ return Error("Invalid STOREATOMIC record");
+
+ I = new StoreInst(Val, Ptr, Record[OpNum+1], (1 << Record[OpNum]) >> 1,
+ Ordering, SynchScope);
+ InstructionList.push_back(I);
+ break;
+ }
case bitc::FUNC_CODE_INST_CMPXCHG: {
// CMPXCHG:[ptrty, ptr, cmp, new, vol, ordering, synchscope]
unsigned OpNum = 0;
@@ -2592,7 +2637,7 @@ bool BitcodeReader::ParseFunctionBody(Function *F) {
OpNum+3 != Record.size())
return Error("Invalid CMPXCHG record");
AtomicOrdering Ordering = GetDecodedOrdering(Record[OpNum+1]);
- if (Ordering == NotAtomic)
+ if (Ordering == NotAtomic || Ordering == Unordered)
return Error("Invalid CMPXCHG record");
SynchronizationScope SynchScope = GetDecodedSynchScope(Record[OpNum+2]);
I = new AtomicCmpXchgInst(Ptr, Cmp, New, Ordering, SynchScope);
@@ -2614,7 +2659,7 @@ bool BitcodeReader::ParseFunctionBody(Function *F) {
Operation > AtomicRMWInst::LAST_BINOP)
return Error("Invalid ATOMICRMW record");
AtomicOrdering Ordering = GetDecodedOrdering(Record[OpNum+2]);
- if (Ordering == NotAtomic)
+ if (Ordering == NotAtomic || Ordering == Unordered)
return Error("Invalid ATOMICRMW record");
SynchronizationScope SynchScope = GetDecodedSynchScope(Record[OpNum+3]);
I = new AtomicRMWInst(Operation, Ptr, Val, Ordering, SynchScope);
diff --git a/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp b/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp
index 9954400e5e4..4aae121da5e 100644
--- a/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp
+++ b/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp
@@ -1175,19 +1175,34 @@ static void WriteInstruction(const Instruction &I, unsigned InstID,
break;
case Instruction::Load:
- Code = bitc::FUNC_CODE_INST_LOAD;
- if (!PushValueAndType(I.getOperand(0), InstID, Vals, VE)) // ptr
- AbbrevToUse = FUNCTION_INST_LOAD_ABBREV;
-
+ if (cast<LoadInst>(I).isAtomic()) {
+ Code = bitc::FUNC_CODE_INST_LOADATOMIC;
+ PushValueAndType(I.getOperand(0), InstID, Vals, VE);
+ } else {
+ Code = bitc::FUNC_CODE_INST_LOAD;
+ if (!PushValueAndType(I.getOperand(0), InstID, Vals, VE)) // ptr
+ AbbrevToUse = FUNCTION_INST_LOAD_ABBREV;
+ }
Vals.push_back(Log2_32(cast<LoadInst>(I).getAlignment())+1);
Vals.push_back(cast<LoadInst>(I).isVolatile());
+ if (cast<LoadInst>(I).isAtomic()) {
+ Vals.push_back(GetEncodedOrdering(cast<LoadInst>(I).getOrdering()));
+ Vals.push_back(GetEncodedSynchScope(cast<LoadInst>(I).getSynchScope()));
+ }
break;
case Instruction::Store:
- Code = bitc::FUNC_CODE_INST_STORE;
+ if (cast<StoreInst>(I).isAtomic())
+ Code = bitc::FUNC_CODE_INST_STOREATOMIC;
+ else
+ Code = bitc::FUNC_CODE_INST_STORE;
PushValueAndType(I.getOperand(1), InstID, Vals, VE); // ptrty + ptr
Vals.push_back(VE.getValueID(I.getOperand(0))); // val.
Vals.push_back(Log2_32(cast<StoreInst>(I).getAlignment())+1);
Vals.push_back(cast<StoreInst>(I).isVolatile());
+ if (cast<StoreInst>(I).isAtomic()) {
+ Vals.push_back(GetEncodedOrdering(cast<StoreInst>(I).getOrdering()));
+ Vals.push_back(GetEncodedSynchScope(cast<StoreInst>(I).getSynchScope()));
+ }
break;
case Instruction::AtomicCmpXchg:
Code = bitc::FUNC_CODE_INST_CMPXCHG;
diff --git a/llvm/lib/Transforms/Scalar/LowerAtomic.cpp b/llvm/lib/Transforms/Scalar/LowerAtomic.cpp
index 5f5ed03bd82..449a795489a 100644
--- a/llvm/lib/Transforms/Scalar/LowerAtomic.cpp
+++ b/llvm/lib/Transforms/Scalar/LowerAtomic.cpp
@@ -190,6 +190,16 @@ static bool LowerFenceInst(FenceInst *FI) {
return true;
}
+static bool LowerLoadInst(LoadInst *LI) {
+ LI->setAtomic(NotAtomic);
+ return true;
+}
+
+static bool LowerStoreInst(StoreInst *SI) {
+ SI->setAtomic(NotAtomic);
+ return true;
+}
+
namespace {
struct LowerAtomic : public BasicBlockPass {
static char ID;
@@ -208,6 +218,13 @@ namespace {
Changed |= LowerAtomicCmpXchgInst(CXI);
else if (AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(Inst))
Changed |= LowerAtomicRMWInst(RMWI);
+ else if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
+ if (LI->isAtomic())
+ LowerLoadInst(LI);
+ } else if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
+ if (SI->isAtomic())
+ LowerStoreInst(SI);
+ }
}
return Changed;
}
diff --git a/llvm/lib/VMCore/AsmWriter.cpp b/llvm/lib/VMCore/AsmWriter.cpp
index 442e8b8f7f9..005f51aae30 100644
--- a/llvm/lib/VMCore/AsmWriter.cpp
+++ b/llvm/lib/VMCore/AsmWriter.cpp
@@ -1659,14 +1659,18 @@ void AssemblyWriter::printInstruction(const Instruction &I) {
Out << '%' << SlotNum << " = ";
}
+ // If this is an atomic load or store, print out the atomic marker.
+ if ((isa<LoadInst>(I) && cast<LoadInst>(I).isAtomic()) ||
+ (isa<StoreInst>(I) && cast<StoreInst>(I).isAtomic()))
+ Out << "atomic ";
+
// If this is a volatile load or store, print out the volatile marker.
if ((isa<LoadInst>(I) && cast<LoadInst>(I).isVolatile()) ||
- (isa<StoreInst>(I) && cast<StoreInst>(I).isVolatile())) {
- Out << "volatile ";
- } else if (isa<CallInst>(I) && cast<CallInst>(I).isTailCall()) {
- // If this is a call, check if it's a tail call.
+ (isa<StoreInst>(I) && cast<StoreInst>(I).isVolatile()))
+ Out << "volatile ";
+
+ if (isa<CallInst>(I) && cast<CallInst>(I).isTailCall())
Out << "tail ";
- }
// Print out the opcode...
Out << I.getOpcodeName();
@@ -1913,11 +1917,17 @@ void AssemblyWriter::printInstruction(const Instruction &I) {
}
}
- // Print post operand alignment for load/store.
- if (isa<LoadInst>(I) && cast<LoadInst>(I).getAlignment()) {
- Out << ", align " << cast<LoadInst>(I).getAlignment();
- } else if (isa<StoreInst>(I) && cast<StoreInst>(I).getAlignment()) {
- Out << ", align " << cast<StoreInst>(I).getAlignment();
+ // Print atomic ordering/alignment for memory operations
+ if (const LoadInst *LI = dyn_cast<LoadInst>(&I)) {
+ if (LI->isAtomic())
+ writeAtomic(LI->getOrdering(), LI->getSynchScope());
+ if (LI->getAlignment())
+ Out << ", align " << LI->getAlignment();
+ } else if (const StoreInst *SI = dyn_cast<StoreInst>(&I)) {
+ if (SI->isAtomic())
+ writeAtomic(SI->getOrdering(), SI->getSynchScope());
+ if (SI->getAlignment())
+ Out << ", align " << SI->getAlignment();
} else if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(&I)) {
writeAtomic(CXI->getOrdering(), CXI->getSynchScope());
} else if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(&I)) {
diff --git a/llvm/lib/VMCore/Instructions.cpp b/llvm/lib/VMCore/Instructions.cpp
index c36102d706d..612b7630728 100644
--- a/llvm/lib/VMCore/Instructions.cpp
+++ b/llvm/lib/VMCore/Instructions.cpp
@@ -822,6 +822,8 @@ bool AllocaInst::isStaticAlloca() const {
void LoadInst::AssertOK() {
assert(getOperand(0)->getType()->isPointerTy() &&
"Ptr must have pointer type.");
+ assert(!(isAtomic() && getAlignment() == 0) &&
+ "Alignment required for atomic load");
}
LoadInst::LoadInst(Value *Ptr, const Twine &Name, Instruction *InsertBef)
@@ -829,6 +831,7 @@ LoadInst::LoadInst(Value *Ptr, const Twine &Name, Instruction *InsertBef)
Load, Ptr, InsertBef) {
setVolatile(false);
setAlignment(0);
+ setAtomic(NotAtomic);
AssertOK();
setName(Name);
}
@@ -838,6 +841,7 @@ LoadInst::LoadInst(Value *Ptr, const Twine &Name, BasicBlock *InsertAE)
Load, Ptr, InsertAE) {
setVolatile(false);
setAlignment(0);
+ setAtomic(NotAtomic);
AssertOK();
setName(Name);
}
@@ -848,6 +852,18 @@ LoadInst::LoadInst(Value *Ptr, const Twine &Name, bool isVolatile,
Load, Ptr, InsertBef) {
setVolatile(isVolatile);
setAlignment(0);
+ setAtomic(NotAtomic);
+ AssertOK();
+ setName(Name);
+}
+
+LoadInst::LoadInst(Value *Ptr, const Twine &Name, bool isVolatile,
+ BasicBlock *InsertAE)
+ : UnaryInstruction(cast<PointerType>(Ptr->getType())->getElementType(),
+ Load, Ptr, InsertAE) {
+ setVolatile(isVolatile);
+ setAlignment(0);
+ setAtomic(NotAtomic);
AssertOK();
setName(Name);
}
@@ -858,6 +874,7 @@ LoadInst::LoadInst(Value *Ptr, const Twine &Name, bool isVolatile,
Load, Ptr, InsertBef) {
setVolatile(isVolatile);
setAlignment(Align);
+ setAtomic(NotAtomic);
AssertOK();
setName(Name);
}
@@ -868,27 +885,43 @@ LoadInst::LoadInst(Value *Ptr, const Twine &Name, bool isVolatile,
Load, Ptr, InsertAE) {
setVolatile(isVolatile);
setAlignment(Align);
+ setAtomic(NotAtomic);
AssertOK();
setName(Name);
}
-LoadInst::LoadInst(Value *Ptr, const Twine &Name, bool isVolatile,
+LoadInst::LoadInst(Value *Ptr, const Twine &Name, bool isVolatile,
+ unsigned Align, AtomicOrdering Order,
+ SynchronizationScope SynchScope,
+ Instruction *InsertBef)
+ : UnaryInstruction(cast<PointerType>(Ptr->getType())->getElementType(),
+ Load, Ptr, InsertBef) {
+ setVolatile(isVolatile);
+ setAlignment(Align);
+ setAtomic(Order, SynchScope);
+ AssertOK();
+ setName(Name);
+}
+
+LoadInst::LoadInst(Value *Ptr, const Twine &Name, bool isVolatile,
+ unsigned Align, AtomicOrdering Order,
+ SynchronizationScope SynchScope,
BasicBlock *InsertAE)
: UnaryInstruction(cast<PointerType>(Ptr->getType())->getElementType(),
Load, Ptr, InsertAE) {
setVolatile(isVolatile);
- setAlignment(0);
+ setAlignment(Align);
+ setAtomic(Order, SynchScope);
AssertOK();
setName(Name);
}
-
-
LoadInst::LoadInst(Value *Ptr, const char *Name, Instruction *InsertBef)
: UnaryInstruction(cast<PointerType>(Ptr->getType())->getElementType(),
Load, Ptr, InsertBef) {
setVolatile(false);
setAlignment(0);
+ setAtomic(NotAtomic);
AssertOK();
if (Name && Name[0]) setName(Name);
}
@@ -898,6 +931,7 @@ LoadInst::LoadInst(Value *Ptr, const char *Name, BasicBlock *InsertAE)
Load, Ptr, InsertAE) {
setVolatile(false);
setAlignment(0);
+ setAtomic(NotAtomic);
AssertOK();
if (Name && Name[0]) setName(Name);
}
@@ -908,6 +942,7 @@ LoadInst::LoadInst(Value *Ptr, const char *Name, bool isVolatile,
Load, Ptr, InsertBef) {
setVolatile(isVolatile);
setAlignment(0);
+ setAtomic(NotAtomic);
AssertOK();
if (Name && Name[0]) setName(Name);
}
@@ -918,6 +953,7 @@ LoadInst::LoadInst(Value *Ptr, const char *Name, bool isVolatile,
Load, Ptr, InsertAE) {
setVolatile(isVolatile);
setAlignment(0);
+ setAtomic(NotAtomic);
AssertOK();
if (Name && Name[0]) setName(Name);
}
@@ -926,7 +962,7 @@ void LoadInst::setAlignment(unsigned Align) {
assert((Align & (Align-1)) == 0 && "Alignment is not a power of 2!");
assert(Align <= MaximumAlignment &&
"Alignment is greater than MaximumAlignment!");
- setInstructionSubclassData((getSubclassDataFromInstruction() & 1) |
+ setInstructionSubclassData((getSubclassDataFromInstruction() & ~(31 << 1)) |
((Log2_32(Align)+1)<<1));
assert(getAlignment() == Align && "Alignment representation error!");
}
@@ -942,6 +978,8 @@ void StoreInst::AssertOK() {
assert(getOperand(0)->getType() ==
cast<PointerType>(getOperand(1)->getType())->getElementType()
&& "Ptr must be a pointer to Val type!");
+ assert(!(isAtomic() && getAlignment() == 0) &&
+ "Alignment required for atomic load");
}
@@ -954,6 +992,7 @@ StoreInst::StoreInst(Value *val, Value *addr, Instruction *InsertBefore)
Op<1>() = addr;
setVolatile(false);
setAlignment(0);
+ setAtomic(NotAtomic);
AssertOK();
}
@@ -966,6 +1005,7 @@ StoreInst::StoreInst(Value *val, Value *addr, BasicBlock *InsertAtEnd)
Op<1>() = addr;
setVolatile(false);
setAlignment(0);
+ setAtomic(NotAtomic);
AssertOK();
}
@@ -979,6 +1019,7 @@ StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
Op<1>() = addr;
setVolatile(isVolatile);
setAlignment(0);
+ setAtomic(NotAtomic);
AssertOK();
}
@@ -992,6 +1033,37 @@ StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
Op<1>() = addr;
setVolatile(isVolatile);
setAlignment(Align);
+ setAtomic(NotAtomic);
+ AssertOK();
+}
+
+StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
+ unsigned Align, AtomicOrdering Order,
+ SynchronizationScope SynchScope,
+ Instruction *InsertBefore)
+ : Instruction(Type::getVoidTy(val->getContext()), Store,
+ OperandTraits<StoreInst>::op_begin(this),
+ OperandTraits<StoreInst>::operands(this),
+ InsertBefore) {
+ Op<0>() = val;
+ Op<1>() = addr;
+ setVolatile(isVolatile);
+ setAlignment(Align);
+ setAtomic(Order, SynchScope);
+ AssertOK();
+}
+
+StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
+ BasicBlock *InsertAtEnd)
+ : Instruction(Type::getVoidTy(val->getContext()), Store,
+ OperandTraits<StoreInst>::op_begin(this),
+ OperandTraits<StoreInst>::operands(this),
+ InsertAtEnd) {
+ Op<0>() = val;
+ Op<1>() = addr;
+ setVolatile(isVolatile);
+ setAlignment(0);
+ setAtomic(NotAtomic);
AssertOK();
}
@@ -1005,10 +1077,13 @@ StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
Op<1>() = addr;
setVolatile(isVolatile);
setAlignment(Align);
+ setAtomic(NotAtomic);
AssertOK();
}
StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
+ unsigned Align, AtomicOrdering Order,
+ SynchronizationScope SynchScope,
BasicBlock *InsertAtEnd)
: Instruction(Type::getVoidTy(val->getContext()), Store,
OperandTraits<StoreInst>::op_begin(this),
@@ -1017,7 +1092,8 @@ StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
Op<0>() = val;
Op<1>() = addr;
setVolatile(isVolatile);
- setAlignment(0);
+ setAlignment(Align);
+ setAtomic(Order, SynchScope);
AssertOK();
}
@@ -1025,7 +1101,7 @@ void StoreInst::setAlignment(unsigned Align) {
assert((Align & (Align-1)) == 0 && "Alignment is not a power of 2!");
assert(Align <= MaximumAlignment &&
"Alignment is greater than MaximumAlignment!");
- setInstructionSubclassData((getSubclassDataFromInstruction() & 1) |
+ setInstructionSubclassData((getSubclassDataFromInstruction() & ~(31 << 1)) |
((Log2_32(Align)+1) << 1));
assert(getAlignment() == Align && "Alignment representation error!");
}
@@ -3158,14 +3234,14 @@ AllocaInst *AllocaInst::clone_impl() const {
}
LoadInst *LoadInst::clone_impl() const {
- return new LoadInst(getOperand(0),
- Twine(), isVolatile(),
- getAlignment());
+ return new LoadInst(getOperand(0), Twine(), isVolatile(),
+ getAlignment(), getOrdering(), getSynchScope());
}
StoreInst *StoreInst::clone_impl() const {
- return new StoreInst(getOperand(0), getOperand(1),
- isVolatile(), getAlignment());
+ return new StoreInst(getOperand(0), getOperand(1),isVolatile(),
+ getAlignment(), getOrdering(), getSynchScope());
+
}
AtomicCmpXchgInst *AtomicCmpXchgInst::clone_impl() const {
diff --git a/llvm/lib/VMCore/Verifier.cpp b/llvm/lib/VMCore/Verifier.cpp
index ffb92a7d051..415c85e142d 100644
--- a/llvm/lib/VMCore/Verifier.cpp
+++ b/llvm/lib/VMCore/Verifier.cpp
@@ -1297,6 +1297,15 @@ void Verifier::visitLoadInst(LoadInst &LI) {
Type *ElTy = PTy->getElementType();
Assert2(ElTy == LI.getType(),
"Load result type does not match pointer operand type!", &LI, ElTy);
+ if (LI.isAtomic()) {
+ Assert1(LI.getOrdering() != Release && LI.getOrdering() != AcquireRelease,
+ "Load cannot have Release ordering", &LI);
+ Assert1(LI.getAlignment() != 0,
+ "Atomic load must specify explicit alignment", &LI);
+ } else {
+ Assert1(LI.getSynchScope() == CrossThread,
+ "Non-atomic load cannot have SynchronizationScope specified", &LI);
+ }
visitInstruction(LI);
}
@@ -1307,6 +1316,15 @@ void Verifier::visitStoreInst(StoreInst &SI) {
Assert2(ElTy == SI.getOperand(0)->getType(),
"Stored value type does not match pointer operand type!",
&SI, ElTy);
+ if (SI.isAtomic()) {
+ Assert1(SI.getOrdering() != Acquire && SI.getOrdering() != AcquireRelease,
+ "Store cannot have Acquire ordering", &SI);
+ Assert1(SI.getAlignment() != 0,
+ "Atomic store must specify explicit alignment", &SI);
+ } else {
+ Assert1(SI.getSynchScope() == CrossThread,
+ "Non-atomic store cannot have SynchronizationScope specified", &SI);
+ }
visitInstruction(SI);
}
OpenPOWER on IntegriCloud