summaryrefslogtreecommitdiffstats
path: root/llvm
diff options
context:
space:
mode:
Diffstat (limited to 'llvm')
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp18
-rw-r--r--llvm/test/Transforms/InstCombine/atomic.ll61
2 files changed, 71 insertions, 8 deletions
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp b/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
index 3b0b6b767e2..6a5d5a62e1b 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
@@ -326,7 +326,8 @@ static LoadInst *combineLoadToNewType(InstCombiner &IC, LoadInst &LI, Type *NewT
LoadInst *NewLoad = IC.Builder->CreateAlignedLoad(
IC.Builder->CreateBitCast(Ptr, NewTy->getPointerTo(AS)),
- LI.getAlignment(), LI.getName() + Suffix);
+ LI.getAlignment(), LI.isVolatile(), LI.getName() + Suffix);
+ NewLoad->setAtomic(LI.getOrdering(), LI.getSynchScope());
MDBuilder MDB(NewLoad->getContext());
for (const auto &MDPair : MD) {
unsigned ID = MDPair.first;
@@ -398,7 +399,8 @@ static StoreInst *combineStoreToNewValue(InstCombiner &IC, StoreInst &SI, Value
StoreInst *NewStore = IC.Builder->CreateAlignedStore(
V, IC.Builder->CreateBitCast(Ptr, V->getType()->getPointerTo(AS)),
- SI.getAlignment());
+ SI.getAlignment(), SI.isVolatile());
+ NewStore->setAtomic(SI.getOrdering(), SI.getSynchScope());
for (const auto &MDPair : MD) {
unsigned ID = MDPair.first;
MDNode *N = MDPair.second;
@@ -456,9 +458,9 @@ static StoreInst *combineStoreToNewValue(InstCombiner &IC, StoreInst &SI, Value
/// later. However, it is risky in case some backend or other part of LLVM is
/// relying on the exact type loaded to select appropriate atomic operations.
static Instruction *combineLoadToOperationType(InstCombiner &IC, LoadInst &LI) {
- // FIXME: We could probably with some care handle both volatile and atomic
- // loads here but it isn't clear that this is important.
- if (!LI.isSimple())
+ // FIXME: We could probably with some care handle both volatile and ordered
+ // atomic loads here but it isn't clear that this is important.
+ if (!LI.isUnordered())
return nullptr;
if (LI.use_empty())
@@ -989,9 +991,9 @@ static Value *likeBitCastFromVector(InstCombiner &IC, Value *V) {
/// the store instruction as otherwise there is no way to signal whether it was
/// combined or not: IC.EraseInstFromFunction returns a null pointer.
static bool combineStoreToValueType(InstCombiner &IC, StoreInst &SI) {
- // FIXME: We could probably with some care handle both volatile and atomic
- // stores here but it isn't clear that this is important.
- if (!SI.isSimple())
+ // FIXME: We could probably with some care handle both volatile and ordered
+ // atomic stores here but it isn't clear that this is important.
+ if (!SI.isUnordered())
return false;
Value *V = SI.getValueOperand();
diff --git a/llvm/test/Transforms/InstCombine/atomic.ll b/llvm/test/Transforms/InstCombine/atomic.ll
index 7b7f0a576b5..15c1659de96 100644
--- a/llvm/test/Transforms/InstCombine/atomic.ll
+++ b/llvm/test/Transforms/InstCombine/atomic.ll
@@ -206,3 +206,64 @@ block2:
merge:
ret i32 0
}
+
+declare void @clobber()
+
+define i32 @test18(float* %p) {
+; CHECK-LABEL: define i32 @test18(
+; CHECK: load atomic i32, i32* [[A:%.*]] unordered, align 4
+; CHECK: store atomic i32 [[B:%.*]], i32* [[C:%.*]] unordered, align 4
+ %x = load atomic float, float* %p unordered, align 4
+ call void @clobber() ;; keep the load around
+ store atomic float %x, float* %p unordered, align 4
+ ret i32 0
+}
+
+; TODO: probably also legal in this case
+define i32 @test19(float* %p) {
+; CHECK-LABEL: define i32 @test19(
+; CHECK: load atomic float, float* %p seq_cst, align 4
+; CHECK: store atomic float %x, float* %p seq_cst, align 4
+ %x = load atomic float, float* %p seq_cst, align 4
+ call void @clobber() ;; keep the load around
+ store atomic float %x, float* %p seq_cst, align 4
+ ret i32 0
+}
+
+define i32 @test20(i32** %p, i8* %v) {
+; CHECK-LABEL: define i32 @test20(
+; CHECK: store atomic i8* %v, i8** [[D:%.*]] unordered, align 4
+ %cast = bitcast i8* %v to i32*
+ store atomic i32* %cast, i32** %p unordered, align 4
+ ret i32 0
+}
+
+define i32 @test21(i32** %p, i8* %v) {
+; CHECK-LABEL: define i32 @test21(
+; CHECK: store atomic i32* %cast, i32** %p monotonic, align 4
+ %cast = bitcast i8* %v to i32*
+ store atomic i32* %cast, i32** %p monotonic, align 4
+ ret i32 0
+}
+
+define void @pr27490a(i8** %p1, i8** %p2) {
+; CHECK-LABEL: define void @pr27490
+; CHECK: %1 = bitcast i8** %p1 to i64*
+; CHECK: %l1 = load i64, i64* %1, align 8
+; CHECK: %2 = bitcast i8** %p2 to i64*
+; CHECK: store volatile i64 %l1, i64* %2, align 8
+ %l = load i8*, i8** %p1
+ store volatile i8* %l, i8** %p2
+ ret void
+}
+
+define void @pr27490b(i8** %p1, i8** %p2) {
+; CHECK-LABEL: define void @pr27490
+; CHECK: %1 = bitcast i8** %p1 to i64*
+; CHECK: %l1 = load i64, i64* %1, align 8
+; CHECK: %2 = bitcast i8** %p2 to i64*
+; CHECK: store atomic i64 %l1, i64* %2 seq_cst, align 8
+ %l = load i8*, i8** %p1
+ store atomic i8* %l, i8** %p2 seq_cst, align 8
+ ret void
+}
OpenPOWER on IntegriCloud