summaryrefslogtreecommitdiffstats
path: root/llvm/test/Analysis
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test/Analysis')
-rw-r--r--llvm/test/Analysis/MemorySSA/assume.ll19
-rw-r--r--llvm/test/Analysis/MemorySSA/atomic-clobber.ll119
-rw-r--r--llvm/test/Analysis/MemorySSA/basicaa-memcpy.ll16
-rw-r--r--llvm/test/Analysis/MemorySSA/constant-memory.ll41
-rw-r--r--llvm/test/Analysis/MemorySSA/cyclicphi.ll123
-rw-r--r--llvm/test/Analysis/MemorySSA/forward-unreachable.ll23
-rw-r--r--llvm/test/Analysis/MemorySSA/function-clobber.ll54
-rw-r--r--llvm/test/Analysis/MemorySSA/function-mem-attrs.ll59
-rw-r--r--llvm/test/Analysis/MemorySSA/invariant-groups.ll285
-rw-r--r--llvm/test/Analysis/MemorySSA/lifetime-simple.ll30
-rw-r--r--llvm/test/Analysis/MemorySSA/load-invariant.ll38
-rw-r--r--llvm/test/Analysis/MemorySSA/many-dom-backedge.ll77
-rw-r--r--llvm/test/Analysis/MemorySSA/many-doms.ll67
-rw-r--r--llvm/test/Analysis/MemorySSA/multi-edges.ll32
-rw-r--r--llvm/test/Analysis/MemorySSA/multiple-backedges-hal.ll73
-rw-r--r--llvm/test/Analysis/MemorySSA/multiple-locations.ll25
-rw-r--r--llvm/test/Analysis/MemorySSA/no-disconnected.ll43
-rw-r--r--llvm/test/Analysis/MemorySSA/optimize-use.ll37
-rw-r--r--llvm/test/Analysis/MemorySSA/phi-translation.ll181
-rw-r--r--llvm/test/Analysis/MemorySSA/pr28880.ll51
-rw-r--r--llvm/test/Analysis/MemorySSA/ptr-const-mem.ll23
-rw-r--r--llvm/test/Analysis/MemorySSA/volatile-clobber.ll94
22 files changed, 1510 insertions, 0 deletions
diff --git a/llvm/test/Analysis/MemorySSA/assume.ll b/llvm/test/Analysis/MemorySSA/assume.ll
new file mode 100644
index 00000000000..d771c78eb1c
--- /dev/null
+++ b/llvm/test/Analysis/MemorySSA/assume.ll
@@ -0,0 +1,19 @@
+; RUN: opt -basicaa -memoryssa -analyze < %s 2>&1 | FileCheck %s
+; RUN: opt -aa-pipeline=basic-aa -passes='print<memoryssa>,verify<memoryssa>' -disable-output < %s 2>&1 | FileCheck %s
+;
+; Ensures that assumes are treated as not reading or writing memory.
+
+declare void @llvm.assume(i1)
+
+define i32 @foo(i32* %a, i32* %b, i1 %c) {
+; CHECK: 1 = MemoryDef(liveOnEntry)
+; CHECK-NEXT: store i32 4
+ store i32 4, i32* %a, align 4
+; CHECK-NOT: MemoryDef
+; CHECK: call void @llvm.assume
+ call void @llvm.assume(i1 %c)
+; CHECK: MemoryUse(1)
+; CHECK-NEXT: %1 = load i32
+ %1 = load i32, i32* %a, align 4
+ ret i32 %1
+}
diff --git a/llvm/test/Analysis/MemorySSA/atomic-clobber.ll b/llvm/test/Analysis/MemorySSA/atomic-clobber.ll
new file mode 100644
index 00000000000..acd819a8935
--- /dev/null
+++ b/llvm/test/Analysis/MemorySSA/atomic-clobber.ll
@@ -0,0 +1,119 @@
+; RUN: opt -basicaa -print-memoryssa -verify-memoryssa -analyze < %s 2>&1 | FileCheck %s
+; RUN: opt -aa-pipeline=basic-aa -passes='print<memoryssa>,verify<memoryssa>' -disable-output < %s 2>&1 | FileCheck %s
+;
+; Ensures that atomic loads count as MemoryDefs
+
+; CHECK-LABEL: define i32 @foo
+define i32 @foo(i32* %a, i32* %b) {
+; CHECK: 1 = MemoryDef(liveOnEntry)
+; CHECK-NEXT: store i32 4
+ store i32 4, i32* %a, align 4
+; CHECK: 2 = MemoryDef(1)
+; CHECK-NEXT: %1 = load atomic i32
+ %1 = load atomic i32, i32* %b acquire, align 4
+; CHECK: MemoryUse(2)
+; CHECK-NEXT: %2 = load i32
+ %2 = load i32, i32* %a, align 4
+ %3 = add i32 %1, %2
+ ret i32 %3
+}
+
+; CHECK-LABEL: define void @bar
+define void @bar(i32* %a) {
+; CHECK: MemoryUse(liveOnEntry)
+; CHECK-NEXT: load atomic i32, i32* %a unordered, align 4
+ load atomic i32, i32* %a unordered, align 4
+; CHECK: 1 = MemoryDef(liveOnEntry)
+; CHECK-NEXT: load atomic i32, i32* %a monotonic, align 4
+ load atomic i32, i32* %a monotonic, align 4
+; CHECK: 2 = MemoryDef(1)
+; CHECK-NEXT: load atomic i32, i32* %a acquire, align 4
+ load atomic i32, i32* %a acquire, align 4
+; CHECK: 3 = MemoryDef(2)
+; CHECK-NEXT: load atomic i32, i32* %a seq_cst, align 4
+ load atomic i32, i32* %a seq_cst, align 4
+ ret void
+}
+
+; CHECK-LABEL: define void @baz
+define void @baz(i32* %a) {
+; CHECK: 1 = MemoryDef(liveOnEntry)
+; CHECK-NEXT: %1 = load atomic i32
+ %1 = load atomic i32, i32* %a acquire, align 4
+; CHECK: MemoryUse(1)
+; CHECK-NEXT: %2 = load atomic i32, i32* %a unordered, align 4
+ %2 = load atomic i32, i32* %a unordered, align 4
+; CHECK: 2 = MemoryDef(1)
+; CHECK-NEXT: %3 = load atomic i32, i32* %a monotonic, align 4
+ %3 = load atomic i32, i32* %a monotonic, align 4
+ ret void
+}
+
+; CHECK-LABEL: define void @fences
+define void @fences(i32* %a) {
+; CHECK: 1 = MemoryDef(liveOnEntry)
+; CHECK-NEXT: fence acquire
+ fence acquire
+; CHECK: MemoryUse(1)
+; CHECK-NEXT: %1 = load i32, i32* %a
+ %1 = load i32, i32* %a
+
+; CHECK: 2 = MemoryDef(1)
+; CHECK-NEXT: fence release
+ fence release
+; CHECK: MemoryUse(2)
+; CHECK-NEXT: %2 = load i32, i32* %a
+ %2 = load i32, i32* %a
+
+; CHECK: 3 = MemoryDef(2)
+; CHECK-NEXT: fence acq_rel
+ fence acq_rel
+; CHECK: MemoryUse(3)
+; CHECK-NEXT: %3 = load i32, i32* %a
+ %3 = load i32, i32* %a
+
+; CHECK: 4 = MemoryDef(3)
+; CHECK-NEXT: fence seq_cst
+ fence seq_cst
+; CHECK: MemoryUse(4)
+; CHECK-NEXT: %4 = load i32, i32* %a
+ %4 = load i32, i32* %a
+ ret void
+}
+
+; CHECK-LABEL: define void @seq_cst_clobber
+define void @seq_cst_clobber(i32* noalias %a, i32* noalias %b) {
+; CHECK: 1 = MemoryDef(liveOnEntry)
+; CHECK-NEXT: %1 = load atomic i32, i32* %a monotonic, align 4
+ load atomic i32, i32* %a monotonic, align 4
+
+; CHECK: 2 = MemoryDef(1)
+; CHECK-NEXT: %2 = load atomic i32, i32* %a seq_cst, align 4
+ load atomic i32, i32* %a seq_cst, align 4
+
+; CHECK: 3 = MemoryDef(2)
+; CHECK-NEXT: load atomic i32, i32* %a monotonic, align 4
+ load atomic i32, i32* %a monotonic, align 4
+
+ ret void
+}
+
+; Ensure that AA hands us MRI_Mod on unreorderable atomic ops.
+;
+; This test is a bit implementation-specific. In particular, it depends on that
+; we pass cmpxchg-load queries to AA, without trying to reason about them on
+; our own.
+;
+; If AA gets more aggressive, we can find another way.
+;
+; CHECK-LABEL: define void @check_aa_is_sane
+define void @check_aa_is_sane(i32* noalias %a, i32* noalias %b) {
+; CHECK: 1 = MemoryDef(liveOnEntry)
+; CHECK-NEXT: cmpxchg i32* %a, i32 0, i32 1 acquire acquire
+ cmpxchg i32* %a, i32 0, i32 1 acquire acquire
+; CHECK: MemoryUse(1)
+; CHECK-NEXT: load i32, i32* %b, align 4
+ load i32, i32* %b, align 4
+
+ ret void
+}
diff --git a/llvm/test/Analysis/MemorySSA/basicaa-memcpy.ll b/llvm/test/Analysis/MemorySSA/basicaa-memcpy.ll
new file mode 100644
index 00000000000..bfd7c899b59
--- /dev/null
+++ b/llvm/test/Analysis/MemorySSA/basicaa-memcpy.ll
@@ -0,0 +1,16 @@
+; RUN: opt -disable-output -basicaa -print-memoryssa %s 2>&1 | FileCheck %s
+
+declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) nounwind
+
+define void @source_clobber(i8* %a, i8* %b) {
+; CHECK-LABEL: @source_clobber(
+; CHECK-NEXT: ; 1 = MemoryDef(liveOnEntry)
+; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* %a, i8* %b, i64 128, i32 1, i1 false)
+; CHECK-NEXT: ; MemoryUse(liveOnEntry)
+; CHECK-NEXT: [[X:%.*]] = load i8, i8* %b
+; CHECK-NEXT: ret void
+;
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %a, i8* %b, i64 128, i32 1, i1 false)
+ %x = load i8, i8* %b
+ ret void
+}
diff --git a/llvm/test/Analysis/MemorySSA/constant-memory.ll b/llvm/test/Analysis/MemorySSA/constant-memory.ll
new file mode 100644
index 00000000000..bc970e72fc4
--- /dev/null
+++ b/llvm/test/Analysis/MemorySSA/constant-memory.ll
@@ -0,0 +1,41 @@
+; RUN: opt -basicaa -print-memoryssa -verify-memoryssa -analyze < %s 2>&1 | FileCheck %s
+;
+; Things that BasicAA can prove points to constant memory should be
+; liveOnEntry, as well.
+
+declare void @clobberAllTheThings()
+
+@str = private unnamed_addr constant [2 x i8] c"hi"
+
+define i8 @foo() {
+; CHECK: 1 = MemoryDef(liveOnEntry)
+; CHECK-NEXT: call void @clobberAllTheThings()
+ call void @clobberAllTheThings()
+ %1 = getelementptr [2 x i8], [2 x i8]* @str, i64 0, i64 0
+; CHECK: MemoryUse(liveOnEntry)
+; CHECK-NEXT: %2 = load i8
+ %2 = load i8, i8* %1, align 1
+ %3 = getelementptr [2 x i8], [2 x i8]* @str, i64 0, i64 1
+; CHECK: MemoryUse(liveOnEntry)
+; CHECK-NEXT: %4 = load i8
+ %4 = load i8, i8* %3, align 1
+ %5 = add i8 %2, %4
+ ret i8 %5
+}
+
+define i8 @select(i1 %b) {
+ %1 = alloca i8, align 1
+; CHECK: 1 = MemoryDef(liveOnEntry)
+; CHECK-NEXT: store i8 0
+ store i8 0, i8* %1, align 1
+
+; CHECK: 2 = MemoryDef(1)
+; CHECK-NEXT: call void @clobberAllTheThings()
+ call void @clobberAllTheThings()
+ %2 = getelementptr [2 x i8], [2 x i8]* @str, i64 0, i64 0
+ %3 = select i1 %b, i8* %2, i8* %1
+; CHECK: MemoryUse(2)
+; CHECK-NEXT: %4 = load i8
+ %4 = load i8, i8* %3, align 1
+ ret i8 %4
+}
diff --git a/llvm/test/Analysis/MemorySSA/cyclicphi.ll b/llvm/test/Analysis/MemorySSA/cyclicphi.ll
new file mode 100644
index 00000000000..6e91db959e4
--- /dev/null
+++ b/llvm/test/Analysis/MemorySSA/cyclicphi.ll
@@ -0,0 +1,123 @@
+; RUN: opt -basicaa -print-memoryssa -verify-memoryssa -analyze < %s 2>&1 | FileCheck %s
+; RUN: opt -aa-pipeline=basic-aa -passes='print<memoryssa>,verify<memoryssa>' -disable-output < %s 2>&1 | FileCheck %s
+
+%struct.hoge = type { i32, %struct.widget }
+%struct.widget = type { i64 }
+
+define hidden void @quux(%struct.hoge *%f) align 2 {
+ %tmp = getelementptr inbounds %struct.hoge, %struct.hoge* %f, i64 0, i32 1, i32 0
+ %tmp24 = getelementptr inbounds %struct.hoge, %struct.hoge* %f, i64 0, i32 1
+ %tmp25 = bitcast %struct.widget* %tmp24 to i64**
+ br label %bb26
+
+bb26: ; preds = %bb77, %0
+; CHECK: 2 = MemoryPhi({%0,liveOnEntry},{bb77,3})
+; CHECK-NEXT: br i1 undef, label %bb68, label %bb77
+ br i1 undef, label %bb68, label %bb77
+
+bb68: ; preds = %bb26
+; CHECK: MemoryUse(liveOnEntry)
+; CHECK-NEXT: %tmp69 = load i64, i64* null, align 8
+ %tmp69 = load i64, i64* null, align 8
+; CHECK: 1 = MemoryDef(2)
+; CHECK-NEXT: store i64 %tmp69, i64* %tmp, align 8
+ store i64 %tmp69, i64* %tmp, align 8
+ br label %bb77
+
+bb77: ; preds = %bb68, %bb26
+; CHECK: 3 = MemoryPhi({bb26,2},{bb68,1})
+; CHECK: MemoryUse(3)
+; CHECK-NEXT: %tmp78 = load i64*, i64** %tmp25, align 8
+ %tmp78 = load i64*, i64** %tmp25, align 8
+ %tmp79 = getelementptr inbounds i64, i64* %tmp78, i64 undef
+ br label %bb26
+}
+
+; CHECK-LABEL: define void @quux_skip
+define void @quux_skip(%struct.hoge* noalias %f, i64* noalias %g) align 2 {
+ %tmp = getelementptr inbounds %struct.hoge, %struct.hoge* %f, i64 0, i32 1, i32 0
+ %tmp24 = getelementptr inbounds %struct.hoge, %struct.hoge* %f, i64 0, i32 1
+ %tmp25 = bitcast %struct.widget* %tmp24 to i64**
+ br label %bb26
+
+bb26: ; preds = %bb77, %0
+; CHECK: 2 = MemoryPhi({%0,liveOnEntry},{bb77,3})
+; CHECK-NEXT: br i1 undef, label %bb68, label %bb77
+ br i1 undef, label %bb68, label %bb77
+
+bb68: ; preds = %bb26
+; CHECK: MemoryUse(2)
+; CHECK-NEXT: %tmp69 = load i64, i64* %g, align 8
+ %tmp69 = load i64, i64* %g, align 8
+; CHECK: 1 = MemoryDef(2)
+; CHECK-NEXT: store i64 %tmp69, i64* %g, align 8
+ store i64 %tmp69, i64* %g, align 8
+ br label %bb77
+
+bb77: ; preds = %bb68, %bb26
+; CHECK: 3 = MemoryPhi({bb26,2},{bb68,1})
+; CHECK: MemoryUse(liveOnEntry)
+; CHECK-NEXT: %tmp78 = load i64*, i64** %tmp25, align 8
+ %tmp78 = load i64*, i64** %tmp25, align 8
+ br label %bb26
+}
+
+; CHECK-LABEL: define void @quux_dominated
+define void @quux_dominated(%struct.hoge* noalias %f, i64* noalias %g) align 2 {
+ %tmp = getelementptr inbounds %struct.hoge, %struct.hoge* %f, i64 0, i32 1, i32 0
+ %tmp24 = getelementptr inbounds %struct.hoge, %struct.hoge* %f, i64 0, i32 1
+ %tmp25 = bitcast %struct.widget* %tmp24 to i64**
+ br label %bb26
+
+bb26: ; preds = %bb77, %0
+; CHECK: 3 = MemoryPhi({%0,liveOnEntry},{bb77,2})
+; CHECK: MemoryUse(3)
+; CHECK-NEXT: load i64*, i64** %tmp25, align 8
+ load i64*, i64** %tmp25, align 8
+ br i1 undef, label %bb68, label %bb77
+
+bb68: ; preds = %bb26
+; CHECK: MemoryUse(3)
+; CHECK-NEXT: %tmp69 = load i64, i64* %g, align 8
+ %tmp69 = load i64, i64* %g, align 8
+; CHECK: 1 = MemoryDef(3)
+; CHECK-NEXT: store i64 %tmp69, i64* %g, align 8
+ store i64 %tmp69, i64* %g, align 8
+ br label %bb77
+
+bb77: ; preds = %bb68, %bb26
+; CHECK: 4 = MemoryPhi({bb26,3},{bb68,1})
+; CHECK: 2 = MemoryDef(4)
+; CHECK-NEXT: store i64* null, i64** %tmp25, align 8
+ store i64* null, i64** %tmp25, align 8
+ br label %bb26
+}
+
+; CHECK-LABEL: define void @quux_nodominate
+define void @quux_nodominate(%struct.hoge* noalias %f, i64* noalias %g) align 2 {
+ %tmp = getelementptr inbounds %struct.hoge, %struct.hoge* %f, i64 0, i32 1, i32 0
+ %tmp24 = getelementptr inbounds %struct.hoge, %struct.hoge* %f, i64 0, i32 1
+ %tmp25 = bitcast %struct.widget* %tmp24 to i64**
+ br label %bb26
+
+bb26: ; preds = %bb77, %0
+; CHECK: 2 = MemoryPhi({%0,liveOnEntry},{bb77,3})
+; CHECK: MemoryUse(liveOnEntry)
+; CHECK-NEXT: load i64*, i64** %tmp25, align 8
+ load i64*, i64** %tmp25, align 8
+ br i1 undef, label %bb68, label %bb77
+
+bb68: ; preds = %bb26
+; CHECK: MemoryUse(2)
+; CHECK-NEXT: %tmp69 = load i64, i64* %g, align 8
+ %tmp69 = load i64, i64* %g, align 8
+; CHECK: 1 = MemoryDef(2)
+; CHECK-NEXT: store i64 %tmp69, i64* %g, align 8
+ store i64 %tmp69, i64* %g, align 8
+ br label %bb77
+
+bb77: ; preds = %bb68, %bb26
+; CHECK: 3 = MemoryPhi({bb26,2},{bb68,1})
+; CHECK-NEXT: br label %bb26
+ br label %bb26
+}
diff --git a/llvm/test/Analysis/MemorySSA/forward-unreachable.ll b/llvm/test/Analysis/MemorySSA/forward-unreachable.ll
new file mode 100644
index 00000000000..2bbf399daae
--- /dev/null
+++ b/llvm/test/Analysis/MemorySSA/forward-unreachable.ll
@@ -0,0 +1,23 @@
+; RUN: opt -aa-pipeline=basic-aa -passes='print<memoryssa>,verify<memoryssa>' -disable-output < %s 2>&1 | FileCheck %s
+target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
+
+define void @test() {
+entry:
+ br i1 undef, label %split1, label %split2
+
+split1:
+ store i16 undef, i16* undef, align 2
+ br label %merge
+split2:
+ br label %merge
+forwardunreachable:
+ br label %merge
+merge:
+; The forwardunreachable block still needs an entry in the phi node,
+; because it is reverse reachable, so the CFG still has it as a
+; predecessor of the block
+; CHECK: 3 = MemoryPhi({split1,1},{split2,liveOnEntry},{forwardunreachable,liveOnEntry})
+ store i16 undef, i16* undef, align 2
+ ret void
+}
+
diff --git a/llvm/test/Analysis/MemorySSA/function-clobber.ll b/llvm/test/Analysis/MemorySSA/function-clobber.ll
new file mode 100644
index 00000000000..a01893a5b95
--- /dev/null
+++ b/llvm/test/Analysis/MemorySSA/function-clobber.ll
@@ -0,0 +1,54 @@
+; RUN: opt -basicaa -print-memoryssa -verify-memoryssa -analyze < %s 2>&1 | FileCheck %s
+; RUN: opt -aa-pipeline=basic-aa -passes='print<memoryssa>,verify<memoryssa>' -disable-output < %s 2>&1 | FileCheck %s
+;
+; Ensuring that external functions without attributes are MemoryDefs
+
+@g = external global i32
+declare void @modifyG()
+
+define i32 @foo() {
+; CHECK: MemoryUse(liveOnEntry)
+; CHECK-NEXT: %1 = load i32
+ %1 = load i32, i32* @g
+
+; CHECK: 1 = MemoryDef(liveOnEntry)
+; CHECK-NEXT: store i32 4
+ store i32 4, i32* @g, align 4
+
+; CHECK: 2 = MemoryDef(1)
+; CHECK-NEXT: call void @modifyG()
+ call void @modifyG()
+
+; CHECK: MemoryUse(2)
+; CHECK-NEXT: %2 = load i32
+ %2 = load i32, i32* @g
+ %3 = add i32 %2, %1
+ ret i32 %3
+}
+
+declare void @readEverything() readonly
+declare void @clobberEverything()
+
+; CHECK-LABEL: define void @bar
+define void @bar() {
+; CHECK: 1 = MemoryDef(liveOnEntry)
+; CHECK-NEXT: call void @clobberEverything()
+ call void @clobberEverything()
+ br i1 undef, label %if.end, label %if.then
+
+if.then:
+; CHECK: MemoryUse(1)
+; CHECK-NEXT: call void @readEverything()
+ call void @readEverything()
+; CHECK: 2 = MemoryDef(1)
+; CHECK-NEXT: call void @clobberEverything()
+ call void @clobberEverything()
+ br label %if.end
+
+if.end:
+; CHECK: 3 = MemoryPhi({%0,1},{if.then,2})
+; CHECK: MemoryUse(3)
+; CHECK-NEXT: call void @readEverything()
+ call void @readEverything()
+ ret void
+}
diff --git a/llvm/test/Analysis/MemorySSA/function-mem-attrs.ll b/llvm/test/Analysis/MemorySSA/function-mem-attrs.ll
new file mode 100644
index 00000000000..11383771a41
--- /dev/null
+++ b/llvm/test/Analysis/MemorySSA/function-mem-attrs.ll
@@ -0,0 +1,59 @@
+; RUN: opt -basicaa -print-memoryssa -verify-memoryssa -analyze < %s 2>&1 | FileCheck %s
+; RUN: opt -aa-pipeline=basic-aa -passes='print<memoryssa>,verify<memoryssa>' -disable-output < %s 2>&1 | FileCheck %s
+;
+; Test that various function attributes give us sane results.
+
+@g = external global i32
+
+declare void @readonlyFunction() readonly
+declare void @noattrsFunction()
+
+define void @readonlyAttr() {
+; CHECK: 1 = MemoryDef(liveOnEntry)
+; CHECK-NEXT: store i32 0
+ store i32 0, i32* @g, align 4
+
+ %1 = alloca i32, align 4
+; CHECK: 2 = MemoryDef(1)
+; CHECK-NEXT: store i32 0
+ store i32 0, i32* %1, align 4
+
+; CHECK: MemoryUse(1)
+; CHECK-NEXT: call void @readonlyFunction()
+ call void @readonlyFunction()
+
+; CHECK: MemoryUse(1)
+; CHECK-NEXT: call void @noattrsFunction() #
+; Assume that #N is readonly
+ call void @noattrsFunction() readonly
+
+ ; Sanity check that noattrsFunction is otherwise a MemoryDef
+; CHECK: 3 = MemoryDef(2)
+; CHECK-NEXT: call void @noattrsFunction()
+ call void @noattrsFunction()
+ ret void
+}
+
+declare void @argMemOnly(i32*) argmemonly
+
+define void @inaccessableOnlyAttr() {
+ %1 = alloca i32, align 4
+; CHECK: 1 = MemoryDef(liveOnEntry)
+; CHECK-NEXT: store i32 0
+ store i32 0, i32* %1, align 4
+
+; CHECK: 2 = MemoryDef(1)
+; CHECK-NEXT: store i32 0
+ store i32 0, i32* @g, align 4
+
+; CHECK: MemoryUse(1)
+; CHECK-NEXT: call void @argMemOnly(i32* %1) #
+; Assume that #N is readonly
+ call void @argMemOnly(i32* %1) readonly
+
+; CHECK: 3 = MemoryDef(2)
+; CHECK-NEXT: call void @argMemOnly(i32* %1)
+ call void @argMemOnly(i32* %1)
+
+ ret void
+}
diff --git a/llvm/test/Analysis/MemorySSA/invariant-groups.ll b/llvm/test/Analysis/MemorySSA/invariant-groups.ll
new file mode 100644
index 00000000000..6e94ae178db
--- /dev/null
+++ b/llvm/test/Analysis/MemorySSA/invariant-groups.ll
@@ -0,0 +1,285 @@
+; RUN: opt -basicaa -print-memoryssa -verify-memoryssa -analyze < %s 2>&1 | FileCheck %s
+;
+; Currently, MemorySSA doesn't support invariant groups. So, we should ignore
+; invariant.group.barrier intrinsics entirely. We'll need to pay attention to
+; them when/if we decide to support invariant groups.
+
+@g = external global i32
+
+define i32 @foo(i32* %a) {
+; CHECK: 1 = MemoryDef(liveOnEntry)
+; CHECK-NEXT: store i32 0
+ store i32 0, i32* %a, align 4, !invariant.group !0
+
+; CHECK: 2 = MemoryDef(1)
+; CHECK-NEXT: store i32 1
+ store i32 1, i32* @g, align 4
+
+ %1 = bitcast i32* %a to i8*
+ %a8 = call i8* @llvm.invariant.group.barrier(i8* %1)
+ %a32 = bitcast i8* %a8 to i32*
+
+; This have to be MemoryUse(2), because we can't skip the barrier based on
+; invariant.group.
+; CHECK: MemoryUse(2)
+; CHECK-NEXT: %2 = load i32
+ %2 = load i32, i32* %a32, align 4, !invariant.group !0
+ ret i32 %2
+}
+
+define i32 @skipBarrier(i32* %a) {
+; CHECK: 1 = MemoryDef(liveOnEntry)
+; CHECK-NEXT: store i32 0
+ store i32 0, i32* %a, align 4, !invariant.group !0
+
+ %1 = bitcast i32* %a to i8*
+ %a8 = call i8* @llvm.invariant.group.barrier(i8* %1)
+ %a32 = bitcast i8* %a8 to i32*
+
+; We can skip the barrier only if the "skip" is not based on !invariant.group.
+; CHECK: MemoryUse(1)
+; CHECK-NEXT: %2 = load i32
+ %2 = load i32, i32* %a32, align 4, !invariant.group !0
+ ret i32 %2
+}
+
+define i32 @skipBarrier2(i32* %a) {
+
+; CHECK: MemoryUse(liveOnEntry)
+; CHECK-NEXT: %v = load i32
+ %v = load i32, i32* %a, align 4, !invariant.group !0
+
+ %1 = bitcast i32* %a to i8*
+ %a8 = call i8* @llvm.invariant.group.barrier(i8* %1)
+ %a32 = bitcast i8* %a8 to i32*
+
+; We can skip the barrier only if the "skip" is not based on !invariant.group.
+; CHECK: MemoryUse(liveOnEntry)
+; CHECK-NEXT: %v2 = load i32
+ %v2 = load i32, i32* %a32, align 4, !invariant.group !0
+; CHECK: 1 = MemoryDef(liveOnEntry)
+; CHECK-NEXT: store i32 1
+ store i32 1, i32* @g, align 4
+
+; FIXME: based on invariant.group it should be MemoryUse(liveOnEntry)
+; CHECK: MemoryUse(1)
+; CHECK-NEXT: %v3 = load i32
+ %v3 = load i32, i32* %a32, align 4, !invariant.group !0
+ %add = add nsw i32 %v2, %v3
+ %add2 = add nsw i32 %add, %v
+ ret i32 %add2
+}
+
+define i32 @handleInvariantGroups(i32* %a) {
+; CHECK: 1 = MemoryDef(liveOnEntry)
+; CHECK-NEXT: store i32 0
+ store i32 0, i32* %a, align 4, !invariant.group !0
+
+; CHECK: 2 = MemoryDef(1)
+; CHECK-NEXT: store i32 1
+ store i32 1, i32* @g, align 4
+ %1 = bitcast i32* %a to i8*
+ %a8 = call i8* @llvm.invariant.group.barrier(i8* %1)
+ %a32 = bitcast i8* %a8 to i32*
+
+; CHECK: MemoryUse(2)
+; CHECK-NEXT: %2 = load i32
+ %2 = load i32, i32* %a32, align 4, !invariant.group !0
+
+; CHECK: 3 = MemoryDef(2)
+; CHECK-NEXT: store i32 2
+ store i32 2, i32* @g, align 4
+
+; FIXME: This can be changed to MemoryUse(2)
+; CHECK: MemoryUse(3)
+; CHECK-NEXT: %3 = load i32
+ %3 = load i32, i32* %a32, align 4, !invariant.group !0
+ %add = add nsw i32 %2, %3
+ ret i32 %add
+}
+
+define i32 @loop(i1 %a) {
+entry:
+ %0 = alloca i32, align 4
+; CHECK: 1 = MemoryDef(liveOnEntry)
+; CHECK-NEXT: store i32 4
+ store i32 4, i32* %0, !invariant.group !0
+; CHECK: 2 = MemoryDef(1)
+; CHECK-NEXT: call void @clobber
+ call void @clobber(i32* %0)
+ br i1 %a, label %Loop.Body, label %Loop.End
+
+Loop.Body:
+; FIXME: MemoryUse(1)
+; CHECK: MemoryUse(2)
+; CHECK-NEXT: %1 = load i32
+ %1 = load i32, i32* %0, !invariant.group !0
+ br i1 %a, label %Loop.End, label %Loop.Body
+
+Loop.End:
+; FIXME: MemoryUse(1)
+; CHECK: MemoryUse(2)
+; CHECK-NEXT: %2 = load
+ %2 = load i32, i32* %0, align 4, !invariant.group !0
+ br i1 %a, label %Ret, label %Loop.Body
+
+Ret:
+ ret i32 %2
+}
+
+define i8 @loop2(i8* %p) {
+entry:
+; CHECK: 1 = MemoryDef(liveOnEntry)
+; CHECK-NEXT: store i8
+ store i8 4, i8* %p, !invariant.group !0
+; CHECK: 2 = MemoryDef(1)
+; CHECK-NEXT: call void @clobber
+ call void @clobber8(i8* %p)
+ %after = call i8* @llvm.invariant.group.barrier(i8* %p)
+ br i1 undef, label %Loop.Body, label %Loop.End
+
+Loop.Body:
+; 4 = MemoryPhi({entry,2},{Loop.Body,3},{Loop.End,5})
+; CHECK: MemoryUse(4)
+; CHECK-NEXT: %0 = load i8
+ %0 = load i8, i8* %after, !invariant.group !0
+
+; FIXME: MemoryUse(1)
+; CHECK: MemoryUse(4)
+; CHECK-NEXT: %1 = load i8
+ %1 = load i8, i8* %p, !invariant.group !0
+
+; CHECK: 3 = MemoryDef(4)
+ store i8 4, i8* %after, !invariant.group !0
+
+ br i1 undef, label %Loop.End, label %Loop.Body
+
+Loop.End:
+; 5 = MemoryPhi({entry,2},{Loop.Body,3})
+; CHECK: MemoryUse(5)
+; CHECK-NEXT: %2 = load
+ %2 = load i8, i8* %after, align 4, !invariant.group !0
+
+; FIXME: MemoryUse(1)
+; CHECK: MemoryUse(5)
+; CHECK-NEXT: %3 = load
+ %3 = load i8, i8* %p, align 4, !invariant.group !0
+ br i1 undef, label %Ret, label %Loop.Body
+
+Ret:
+ ret i8 %3
+}
+
+
+define i8 @loop3(i8* %p) {
+entry:
+; CHECK: 1 = MemoryDef(liveOnEntry)
+; CHECK-NEXT: store i8
+ store i8 4, i8* %p, !invariant.group !0
+; CHECK: 2 = MemoryDef(1)
+; CHECK-NEXT: call void @clobber
+ call void @clobber8(i8* %p)
+ %after = call i8* @llvm.invariant.group.barrier(i8* %p)
+ br i1 undef, label %Loop.Body, label %Loop.End
+
+Loop.Body:
+; CHECK: 6 = MemoryPhi({entry,2},{Loop.Body,3},{Loop.next,4},{Loop.End,5})
+; CHECK: MemoryUse(6)
+; CHECK-NEXT: %0 = load i8
+ %0 = load i8, i8* %after, !invariant.group !0
+
+; CHECK: 3 = MemoryDef(6)
+; CHECK-NEXT: call void @clobber8
+ call void @clobber8(i8* %after)
+
+; FIXME: MemoryUse(6)
+; CHECK: MemoryUse(3)
+; CHECK-NEXT: %1 = load i8
+ %1 = load i8, i8* %after, !invariant.group !0
+
+ br i1 undef, label %Loop.next, label %Loop.Body
+Loop.next:
+; CHECK: 4 = MemoryDef(3)
+; CHECK-NEXT: call void @clobber8
+ call void @clobber8(i8* %after)
+
+; FIXME: MemoryUse(6)
+; CHECK: MemoryUse(4)
+; CHECK-NEXT: %2 = load i8
+ %2 = load i8, i8* %after, !invariant.group !0
+
+ br i1 undef, label %Loop.End, label %Loop.Body
+
+Loop.End:
+; CHECK: 7 = MemoryPhi({entry,2},{Loop.next,4})
+; CHECK: MemoryUse(7)
+; CHECK-NEXT: %3 = load
+ %3 = load i8, i8* %after, align 4, !invariant.group !0
+
+; CHECK: 5 = MemoryDef(7)
+; CHECK-NEXT: call void @clobber8
+ call void @clobber8(i8* %after)
+
+; FIXME: MemoryUse(7)
+; CHECK: MemoryUse(5)
+; CHECK-NEXT: %4 = load
+ %4 = load i8, i8* %after, align 4, !invariant.group !0
+ br i1 undef, label %Ret, label %Loop.Body
+
+Ret:
+ ret i8 %3
+}
+
+define i8 @loop4(i8* %p) {
+entry:
+; CHECK: 1 = MemoryDef(liveOnEntry)
+; CHECK-NEXT: store i8
+ store i8 4, i8* %p, !invariant.group !0
+; CHECK: 2 = MemoryDef(1)
+; CHECK-NEXT: call void @clobber
+ call void @clobber8(i8* %p)
+ %after = call i8* @llvm.invariant.group.barrier(i8* %p)
+ br i1 undef, label %Loop.Pre, label %Loop.End
+
+Loop.Pre:
+; CHECK: MemoryUse(2)
+; CHECK-NEXT: %0 = load i8
+ %0 = load i8, i8* %after, !invariant.group !0
+ br label %Loop.Body
+Loop.Body:
+; CHECK: 4 = MemoryPhi({Loop.Pre,2},{Loop.Body,3},{Loop.End,5})
+; CHECK-NEXT: MemoryUse(4)
+; CHECK-NEXT: %1 = load i8
+ %1 = load i8, i8* %after, !invariant.group !0
+
+; FIXME: MemoryUse(2)
+; CHECK: MemoryUse(4)
+; CHECK-NEXT: %2 = load i8
+ %2 = load i8, i8* %p, !invariant.group !0
+
+; CHECK: 3 = MemoryDef(4)
+ store i8 4, i8* %after, !invariant.group !0
+ br i1 undef, label %Loop.End, label %Loop.Body
+
+Loop.End:
+; CHECK: 5 = MemoryPhi({entry,2},{Loop.Body,3})
+; CHECK-NEXT: MemoryUse(5)
+; CHECK-NEXT: %3 = load
+ %3 = load i8, i8* %after, align 4, !invariant.group !0
+
+; FIXME: MemoryUse(2)
+; CHECK: MemoryUse(5)
+; CHECK-NEXT: %4 = load
+ %4 = load i8, i8* %p, align 4, !invariant.group !0
+ br i1 undef, label %Ret, label %Loop.Body
+
+Ret:
+ ret i8 %3
+}
+
+declare i8* @llvm.invariant.group.barrier(i8*)
+declare void @clobber(i32*)
+declare void @clobber8(i8*)
+
+
+!0 = !{!"group1"}
diff --git a/llvm/test/Analysis/MemorySSA/lifetime-simple.ll b/llvm/test/Analysis/MemorySSA/lifetime-simple.ll
new file mode 100644
index 00000000000..f1db15cc577
--- /dev/null
+++ b/llvm/test/Analysis/MemorySSA/lifetime-simple.ll
@@ -0,0 +1,30 @@
+; RUN: opt -basicaa -print-memoryssa -verify-memoryssa -analyze < %s 2>&1 | FileCheck %s
+; RUN: opt -aa-pipeline=basic-aa -passes='print<memoryssa>,verify<memoryssa>' -disable-output < %s 2>&1 | FileCheck %s
+; This test checks a number of things:
+; First, the lifetime markers should not clobber any uses of Q or P.
+; Second, the loads of P are MemoryUse(LiveOnEntry) due to the placement of the markers vs the loads.
+
+define i8 @test(i8* %P, i8* %Q) {
+entry:
+; CHECK: 1 = MemoryDef(liveOnEntry)
+; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 32, i8* %P)
+ call void @llvm.lifetime.start.p0i8(i64 32, i8* %P)
+; CHECK: MemoryUse(1)
+; CHECK-NEXT: %0 = load i8, i8* %P
+ %0 = load i8, i8* %P
+; CHECK: 2 = MemoryDef(1)
+; CHECK-NEXT: store i8 1, i8* %P
+ store i8 1, i8* %P
+; CHECK: 3 = MemoryDef(2)
+; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 32, i8* %P)
+ call void @llvm.lifetime.end.p0i8(i64 32, i8* %P)
+; CHECK: MemoryUse(liveOnEntry)
+; CHECK-NEXT: %1 = load i8, i8* %P
+ %1 = load i8, i8* %P
+; CHECK: MemoryUse(2)
+; CHECK-NEXT: %2 = load i8, i8* %Q
+ %2 = load i8, i8* %Q
+ ret i8 %1
+}
+declare void @llvm.lifetime.start.p0i8(i64 %S, i8* nocapture %P) readonly
+declare void @llvm.lifetime.end.p0i8(i64 %S, i8* nocapture %P)
diff --git a/llvm/test/Analysis/MemorySSA/load-invariant.ll b/llvm/test/Analysis/MemorySSA/load-invariant.ll
new file mode 100644
index 00000000000..defb74641ea
--- /dev/null
+++ b/llvm/test/Analysis/MemorySSA/load-invariant.ll
@@ -0,0 +1,38 @@
+; RUN: opt -basicaa -print-memoryssa -verify-memoryssa -analyze < %s 2>&1 | FileCheck %s
+; RUN: opt -aa-pipeline=basic-aa -passes='print<memoryssa>' -verify-memoryssa -disable-output < %s 2>&1 | FileCheck %s
+;
+; Invariant loads should be considered live on entry, because, once the
+; location is known to be dereferenceable, the value can never change.
+
+@g = external global i32
+
+declare void @clobberAllTheThings()
+
+; CHECK-LABEL: define i32 @foo
+define i32 @foo() {
+; CHECK: 1 = MemoryDef(liveOnEntry)
+; CHECK-NEXT: call void @clobberAllTheThings()
+ call void @clobberAllTheThings()
+; CHECK: MemoryUse(liveOnEntry)
+; CHECK-NEXT: %1 = load i32
+ %1 = load i32, i32* @g, align 4, !invariant.load !0
+ ret i32 %1
+}
+
+; CHECK-LABEL: define i32 @bar
+define i32 @bar(i32* %a) {
+; CHECK: 1 = MemoryDef(liveOnEntry)
+; CHECK-NEXT: call void @clobberAllTheThings()
+ call void @clobberAllTheThings()
+
+; CHECK: 2 = MemoryDef(1)
+; CHECK-NEXT: %1 = load atomic i32
+ %1 = load atomic i32, i32* %a acquire, align 4, !invariant.load !0
+
+; CHECK: MemoryUse(2)
+; CHECK-NEXT: %2 = load i32
+ %2 = load i32, i32* %a, align 4
+ ret i32 %2
+}
+
+!0 = !{}
diff --git a/llvm/test/Analysis/MemorySSA/many-dom-backedge.ll b/llvm/test/Analysis/MemorySSA/many-dom-backedge.ll
new file mode 100644
index 00000000000..c2216a47bb2
--- /dev/null
+++ b/llvm/test/Analysis/MemorySSA/many-dom-backedge.ll
@@ -0,0 +1,77 @@
+; RUN: opt -basicaa -print-memoryssa -verify-memoryssa -analyze < %s 2>&1 | FileCheck %s
+; RUN: opt -aa-pipeline=basic-aa -passes='print<memoryssa>,verify<memoryssa>' -disable-output < %s 2>&1 | FileCheck %s
+;
+; many-dom.ll, with an added back-edge back into the switch.
+; Because people love their gotos.
+
+declare i1 @getBool() readnone
+
+define i32 @foo(i32* %p) {
+entry:
+ br label %loopbegin
+
+loopbegin:
+; CHECK: 8 = MemoryPhi({entry,liveOnEntry},{sw.epilog,6})
+; CHECK-NEXT: %n =
+ %n = phi i32 [ 0, %entry ], [ %1, %sw.epilog ]
+ %m = alloca i32, align 4
+ switch i32 %n, label %sw.default [
+ i32 0, label %sw.bb
+ i32 1, label %sw.bb1
+ i32 2, label %sw.bb2
+ i32 3, label %sw.bb3
+ ]
+
+sw.bb:
+; CHECK: 1 = MemoryDef(8)
+; CHECK-NEXT: store i32 1
+ store i32 1, i32* %m, align 4
+ br label %sw.epilog
+
+sw.bb1:
+; CHECK: 2 = MemoryDef(8)
+; CHECK-NEXT: store i32 2
+ store i32 2, i32* %m, align 4
+ br label %sw.epilog
+
+sw.bb2:
+; CHECK: 3 = MemoryDef(8)
+; CHECK-NEXT: store i32 3
+ store i32 3, i32* %m, align 4
+ br label %sw.epilog
+
+sw.bb3:
+; CHECK: 9 = MemoryPhi({loopbegin,8},{sw.almostexit,6})
+; CHECK: 4 = MemoryDef(9)
+; CHECK-NEXT: store i32 4
+ store i32 4, i32* %m, align 4
+ br label %sw.epilog
+
+sw.default:
+; CHECK: 5 = MemoryDef(8)
+; CHECK-NEXT: store i32 5
+ store i32 5, i32* %m, align 4
+ br label %sw.epilog
+
+sw.epilog:
+; CHECK: 10 = MemoryPhi({sw.default,5},{sw.bb3,4},{sw.bb,1},{sw.bb1,2},{sw.bb2,3})
+; CHECK-NEXT: MemoryUse(10)
+; CHECK-NEXT: %0 =
+ %0 = load i32, i32* %m, align 4
+; CHECK: 6 = MemoryDef(10)
+; CHECK-NEXT: %1 =
+ %1 = load volatile i32, i32* %p, align 4
+ %2 = icmp eq i32 %0, %1
+ br i1 %2, label %sw.almostexit, label %loopbegin
+
+sw.almostexit:
+ %3 = icmp eq i32 0, %1
+ br i1 %3, label %exit, label %sw.bb3
+
+exit:
+; CHECK: 7 = MemoryDef(6)
+; CHECK-NEXT: %4 = load volatile i32
+ %4 = load volatile i32, i32* %p, align 4
+ %5 = add i32 %4, %1
+ ret i32 %5
+}
diff --git a/llvm/test/Analysis/MemorySSA/many-doms.ll b/llvm/test/Analysis/MemorySSA/many-doms.ll
new file mode 100644
index 00000000000..1f57cbf1c4d
--- /dev/null
+++ b/llvm/test/Analysis/MemorySSA/many-doms.ll
@@ -0,0 +1,67 @@
+; RUN: opt -basicaa -print-memoryssa -verify-memoryssa -analyze < %s 2>&1 | FileCheck %s
+; RUN: opt -aa-pipeline=basic-aa -passes='print<memoryssa>,verify<memoryssa>' -disable-output < %s 2>&1 | FileCheck %s
+;
+; Testing many dominators, specifically from a switch statement in C.
+
+declare i1 @getBool() readnone
+
+define i32 @foo(i32* %p) {
+entry:
+ br label %loopbegin
+
+loopbegin:
+; CHECK: 7 = MemoryPhi({entry,liveOnEntry},{sw.epilog,6})
+; CHECK-NEXT: %n =
+ %n = phi i32 [ 0, %entry ], [ %1, %sw.epilog ]
+ %m = alloca i32, align 4
+ switch i32 %n, label %sw.default [
+ i32 0, label %sw.bb
+ i32 1, label %sw.bb1
+ i32 2, label %sw.bb2
+ i32 3, label %sw.bb3
+ ]
+
+sw.bb:
+; CHECK: 1 = MemoryDef(7)
+; CHECK-NEXT: store i32 1
+ store i32 1, i32* %m, align 4
+ br label %sw.epilog
+
+sw.bb1:
+; CHECK: 2 = MemoryDef(7)
+; CHECK-NEXT: store i32 2
+ store i32 2, i32* %m, align 4
+ br label %sw.epilog
+
+sw.bb2:
+; CHECK: 3 = MemoryDef(7)
+; CHECK-NEXT: store i32 3
+ store i32 3, i32* %m, align 4
+ br label %sw.epilog
+
+sw.bb3:
+; CHECK: 4 = MemoryDef(7)
+; CHECK-NEXT: store i32 4
+ store i32 4, i32* %m, align 4
+ br label %sw.epilog
+
+sw.default:
+; CHECK: 5 = MemoryDef(7)
+; CHECK-NEXT: store i32 5
+ store i32 5, i32* %m, align 4
+ br label %sw.epilog
+
+sw.epilog:
+; CHECK: 8 = MemoryPhi({sw.default,5},{sw.bb,1},{sw.bb1,2},{sw.bb2,3},{sw.bb3,4})
+; CHECK-NEXT: MemoryUse(8)
+; CHECK-NEXT: %0 =
+ %0 = load i32, i32* %m, align 4
+; CHECK: 6 = MemoryDef(8)
+; CHECK-NEXT: %1 =
+ %1 = load volatile i32, i32* %p, align 4
+ %2 = icmp eq i32 %0, %1
+ br i1 %2, label %exit, label %loopbegin
+
+exit:
+ ret i32 %1
+}
diff --git a/llvm/test/Analysis/MemorySSA/multi-edges.ll b/llvm/test/Analysis/MemorySSA/multi-edges.ll
new file mode 100644
index 00000000000..5d47728d6f5
--- /dev/null
+++ b/llvm/test/Analysis/MemorySSA/multi-edges.ll
@@ -0,0 +1,32 @@
+; RUN: opt -basicaa -print-memoryssa -verify-memoryssa -analyze < %s 2>&1 | FileCheck %s
+; RUN: opt -aa-pipeline=basic-aa -passes='print<memoryssa>,verify<memoryssa>' -disable-output < %s 2>&1 | FileCheck %s
+;
+; Makes sure we have a sane model if both successors of some block is the same
+; block.
+
+define i32 @foo(i1 %a) {
+entry:
+ %0 = alloca i32, align 4
+; CHECK: 1 = MemoryDef(liveOnEntry)
+; CHECK-NEXT: store i32 4
+ store i32 4, i32* %0
+ br i1 %a, label %Loop.Body, label %Loop.End
+
+Loop.Body:
+; CHECK: 3 = MemoryPhi({entry,1},{Loop.End,4})
+; CHECK-NEXT: 2 = MemoryDef(3)
+; CHECK-NEXT: store i32 5
+ store i32 5, i32* %0, align 4
+ br i1 %a, label %Loop.End, label %Loop.End ; WhyDoWeEvenHaveThatLever.gif
+
+Loop.End:
+; CHECK: 4 = MemoryPhi({entry,1},{Loop.Body,2},{Loop.Body,2})
+; CHECK-NEXT: MemoryUse(4)
+; CHECK-NEXT: %1 = load
+ %1 = load i32, i32* %0, align 4
+ %2 = icmp eq i32 5, %1
+ br i1 %2, label %Ret, label %Loop.Body
+
+Ret:
+ ret i32 %1
+}
diff --git a/llvm/test/Analysis/MemorySSA/multiple-backedges-hal.ll b/llvm/test/Analysis/MemorySSA/multiple-backedges-hal.ll
new file mode 100644
index 00000000000..005a37c9add
--- /dev/null
+++ b/llvm/test/Analysis/MemorySSA/multiple-backedges-hal.ll
@@ -0,0 +1,73 @@
+; RUN: opt -basicaa -print-memoryssa -verify-memoryssa -analyze < %s 2>&1 | FileCheck %s
+; RUN: opt -aa-pipeline=basic-aa -passes='print<memoryssa>,verify<memoryssa>' -disable-output < %s 2>&1 | FileCheck %s
+
+; hfinkel's case
+; [entry]
+; |
+; .....
+; (clobbering access - b)
+; |
+; .... ________________________________
+; \ / |
+; (x) |
+; ...... |
+; | |
+; | ______________________ |
+; \ / | |
+; (starting access) | |
+; ... | |
+; (clobbering access - a) | |
+; ... | |
+; | | | |
+; | |_______________________| |
+; | |
+; |_________________________________|
+;
+; More specifically, one access, with multiple clobbering accesses. One of
+; which strictly dominates the access, the other of which has a backedge
+
+; readnone so we don't have a 1:1 mapping of MemorySSA edges to Instructions.
+declare void @doThingWithoutReading() readnone
+declare i8 @getValue() readnone
+declare i1 @getBool() readnone
+
+define hidden void @testcase(i8* %Arg) {
+Entry:
+ call void @doThingWithoutReading()
+ %Val.Entry = call i8 @getValue()
+; CHECK: 1 = MemoryDef(liveOnEntry)
+; CHECK-NEXT: store i8 %Val.Entry
+ store i8 %Val.Entry, i8* %Arg
+ call void @doThingWithoutReading()
+ br label %OuterLoop
+
+OuterLoop:
+; CHECK: 4 = MemoryPhi({Entry,1},{InnerLoop.Tail,3})
+; CHECK-NEXT: %Val.Outer =
+ %Val.Outer = call i8 @getValue()
+; CHECK: 2 = MemoryDef(4)
+; CHECK-NEXT: store i8 %Val.Outer
+ store i8 %Val.Outer, i8* %Arg
+ call void @doThingWithoutReading()
+ br label %InnerLoop
+
+InnerLoop:
+; CHECK: 5 = MemoryPhi({OuterLoop,2},{InnerLoop,3})
+; CHECK-NEXT: ; MemoryUse(5)
+; CHECK-NEXT: %StartingAccess = load
+ %StartingAccess = load i8, i8* %Arg, align 4
+ %Val.Inner = call i8 @getValue()
+; CHECK: 3 = MemoryDef(5)
+; CHECK-NEXT: store i8 %Val.Inner
+ store i8 %Val.Inner, i8* %Arg
+ call void @doThingWithoutReading()
+ %KeepGoing = call i1 @getBool()
+ br i1 %KeepGoing, label %InnerLoop.Tail, label %InnerLoop
+
+InnerLoop.Tail:
+ %KeepGoing.Tail = call i1 @getBool()
+ br i1 %KeepGoing.Tail, label %End, label %OuterLoop
+
+End:
+ ret void
+}
diff --git a/llvm/test/Analysis/MemorySSA/multiple-locations.ll b/llvm/test/Analysis/MemorySSA/multiple-locations.ll
new file mode 100644
index 00000000000..9a3e87e4ab6
--- /dev/null
+++ b/llvm/test/Analysis/MemorySSA/multiple-locations.ll
@@ -0,0 +1,25 @@
+; RUN: opt -basicaa -print-memoryssa -verify-memoryssa -analyze < %s 2>&1 | FileCheck %s
+; RUN: opt -aa-pipeline=basic-aa -passes='print<memoryssa>,verify<memoryssa>' -disable-output < %s 2>&1 | FileCheck %s
+;
+; Checks that basicAA is doing some amount of disambiguation for us
+
+define i32 @foo(i1 %cond) {
+ %a = alloca i32, align 4
+ %b = alloca i32, align 4
+; CHECK: 1 = MemoryDef(liveOnEntry)
+; CHECK-NEXT: store i32 0
+ store i32 0, i32* %a, align 4
+; CHECK: 2 = MemoryDef(1)
+; CHECK-NEXT: store i32 1
+ store i32 1, i32* %b, align 4
+
+; CHECK: MemoryUse(1)
+; CHECK-NEXT: %1 = load i32
+ %1 = load i32, i32* %a, align 4
+; CHECK: MemoryUse(2)
+; CHECK-NEXT: %2 = load i32
+ %2 = load i32, i32* %b, align 4
+
+ %3 = add i32 %1, %2
+ ret i32 %3
+}
diff --git a/llvm/test/Analysis/MemorySSA/no-disconnected.ll b/llvm/test/Analysis/MemorySSA/no-disconnected.ll
new file mode 100644
index 00000000000..d1dcb15893a
--- /dev/null
+++ b/llvm/test/Analysis/MemorySSA/no-disconnected.ll
@@ -0,0 +1,43 @@
+; RUN: opt -basicaa -print-memoryssa -verify-memoryssa -analyze < %s 2>&1 | FileCheck %s
+; RUN: opt -aa-pipeline=basic-aa -passes='print<memoryssa>,verify<memoryssa>' -disable-output < %s 2>&1 | FileCheck %s
+;
+; This test ensures we don't end up with multiple reaching defs for a single
+; use/phi edge If we were to optimize defs, we would end up with 2=
+; MemoryDef(liveOnEntry) and 4 = MemoryDef(liveOnEntry) Both would mean both
+; 1,2, and 3,4 would reach the phi node. Because the phi node can only have one
+; entry on each edge, it would choose 2, 4 and disconnect 1 and 3 completely
+; from the SSA graph, even though they are not dead
+
+define void @sink_store(i32 %index, i32* %foo, i32* %bar) {
+entry:
+ %cmp = trunc i32 %index to i1
+ br i1 %cmp, label %if.then, label %if.else
+
+if.then: ; preds = %entry
+; CHECK: 1 = MemoryDef(liveOnEntry)
+; CHECK-NEXT: store i32 %index, i32* %foo, align 4
+ store i32 %index, i32* %foo, align 4
+; CHECK: 2 = MemoryDef(1)
+; CHECK-NEXT: store i32 %index, i32* %bar, align 4
+ store i32 %index, i32* %bar, align 4
+ br label %if.end
+
+if.else: ; preds = %entry
+; CHECK: 3 = MemoryDef(liveOnEntry)
+; CHECK-NEXT: store i32 %index, i32* %foo, align 4
+ store i32 %index, i32* %foo, align 4
+; CHECK: 4 = MemoryDef(3)
+; CHECK-NEXT: store i32 %index, i32* %bar, align 4
+ store i32 %index, i32* %bar, align 4
+ br label %if.end
+
+if.end: ; preds = %if.else, %if.then
+; CHECK: 5 = MemoryPhi({if.then,2},{if.else,4})
+; CHECK: MemoryUse(5)
+; CHECK-NEXT: %c = load i32, i32* %foo
+ %c = load i32, i32* %foo
+; CHECK: MemoryUse(5)
+; CHECK-NEXT: %d = load i32, i32* %bar
+ %d = load i32, i32* %bar
+ ret void
+}
diff --git a/llvm/test/Analysis/MemorySSA/optimize-use.ll b/llvm/test/Analysis/MemorySSA/optimize-use.ll
new file mode 100644
index 00000000000..8a8f2dd5095
--- /dev/null
+++ b/llvm/test/Analysis/MemorySSA/optimize-use.ll
@@ -0,0 +1,37 @@
+; RUN: opt -basicaa -print-memoryssa -verify-memoryssa -analyze < %s 2>&1 | FileCheck %s
+; RUN: opt -aa-pipeline=basic-aa -passes='print<memoryssa>,verify<memoryssa>' -disable-output < %s 2>&1 | FileCheck %s
+
+; Function Attrs: ssp uwtable
+define i32 @main() {
+entry:
+; CHECK: 1 = MemoryDef(liveOnEntry)
+; CHECK-NEXT: %call = call noalias i8* @_Znwm(i64 4)
+ %call = call noalias i8* @_Znwm(i64 4)
+ %0 = bitcast i8* %call to i32*
+; CHECK: 2 = MemoryDef(1)
+; CHECK-NEXT: %call1 = call noalias i8* @_Znwm(i64 4)
+ %call1 = call noalias i8* @_Znwm(i64 4)
+ %1 = bitcast i8* %call1 to i32*
+; CHECK: 3 = MemoryDef(2)
+; CHECK-NEXT: store i32 5, i32* %0, align 4
+ store i32 5, i32* %0, align 4
+; CHECK: 4 = MemoryDef(3)
+; CHECK-NEXT: store i32 7, i32* %1, align 4
+ store i32 7, i32* %1, align 4
+; CHECK: MemoryUse(3)
+; CHECK-NEXT: %2 = load i32, i32* %0, align 4
+ %2 = load i32, i32* %0, align 4
+; CHECK: MemoryUse(4)
+; CHECK-NEXT: %3 = load i32, i32* %1, align 4
+ %3 = load i32, i32* %1, align 4
+; CHECK: MemoryUse(3)
+; CHECK-NEXT: %4 = load i32, i32* %0, align 4
+ %4 = load i32, i32* %0, align 4
+; CHECK: MemoryUse(4)
+; CHECK-NEXT: %5 = load i32, i32* %1, align 4
+ %5 = load i32, i32* %1, align 4
+ %add = add nsw i32 %3, %5
+ ret i32 %add
+}
+
+declare noalias i8* @_Znwm(i64)
diff --git a/llvm/test/Analysis/MemorySSA/phi-translation.ll b/llvm/test/Analysis/MemorySSA/phi-translation.ll
new file mode 100644
index 00000000000..c91faf2ac20
--- /dev/null
+++ b/llvm/test/Analysis/MemorySSA/phi-translation.ll
@@ -0,0 +1,181 @@
+; RUN: opt -basicaa -print-memoryssa -verify-memoryssa -analyze < %s 2>&1 | FileCheck %s
+; RUN: opt -aa-pipeline=basic-aa -passes='print<memoryssa>,verify<memoryssa>' -disable-output < %s 2>&1 | FileCheck %s
+
+; %ptr can't alias %local, so we should be able to optimize the use of %local to
+; point to the store to %local.
+; CHECK-LABEL: define void @check
+define void @check(i8* %ptr, i1 %bool) {
+entry:
+ %local = alloca i8, align 1
+; CHECK: 1 = MemoryDef(liveOnEntry)
+; CHECK-NEXT: store i8 0, i8* %local, align 1
+ store i8 0, i8* %local, align 1
+ br i1 %bool, label %if.then, label %if.end
+
+if.then:
+ %p2 = getelementptr inbounds i8, i8* %ptr, i32 1
+; CHECK: 2 = MemoryDef(1)
+; CHECK-NEXT: store i8 0, i8* %p2, align 1
+ store i8 0, i8* %p2, align 1
+ br label %if.end
+
+if.end:
+; CHECK: 3 = MemoryPhi({entry,1},{if.then,2})
+; CHECK: MemoryUse(1)
+; CHECK-NEXT: load i8, i8* %local, align 1
+ load i8, i8* %local, align 1
+ ret void
+}
+
+; CHECK-LABEL: define void @check2
+define void @check2(i1 %val1, i1 %val2, i1 %val3) {
+entry:
+ %local = alloca i8, align 1
+ %local2 = alloca i8, align 1
+
+; CHECK: 1 = MemoryDef(liveOnEntry)
+; CHECK-NEXT: store i8 0, i8* %local
+ store i8 0, i8* %local
+ br i1 %val1, label %if.then, label %phi.3
+
+if.then:
+; CHECK: 2 = MemoryDef(1)
+; CHECK-NEXT: store i8 2, i8* %local2
+ store i8 2, i8* %local2
+ br i1 %val2, label %phi.2, label %phi.3
+
+phi.3:
+; CHECK: 5 = MemoryPhi({entry,1},{if.then,2})
+; CHECK: 3 = MemoryDef(5)
+; CHECK-NEXT: store i8 3, i8* %local2
+ store i8 3, i8* %local2
+ br i1 %val3, label %phi.2, label %phi.1
+
+phi.2:
+; CHECK: 6 = MemoryPhi({if.then,2},{phi.3,3})
+; CHECK: 4 = MemoryDef(6)
+; CHECK-NEXT: store i8 4, i8* %local2
+ store i8 4, i8* %local2
+ br label %phi.1
+
+phi.1:
+; Order matters here; phi.2 needs to come before phi.3, because that's the order
+; they're visited in.
+; CHECK: 7 = MemoryPhi({phi.2,4},{phi.3,3})
+; CHECK: MemoryUse(1)
+; CHECK-NEXT: load i8, i8* %local
+ load i8, i8* %local
+ ret void
+}
+
+; CHECK-LABEL: define void @cross_phi
+define void @cross_phi(i8* noalias %p1, i8* noalias %p2) {
+; CHECK: 1 = MemoryDef(liveOnEntry)
+; CHECK-NEXT: store i8 0, i8* %p1
+ store i8 0, i8* %p1
+; CHECK: MemoryUse(1)
+; CHECK-NEXT: load i8, i8* %p1
+ load i8, i8* %p1
+ br i1 undef, label %a, label %b
+
+a:
+; CHECK: 2 = MemoryDef(1)
+; CHECK-NEXT: store i8 0, i8* %p2
+ store i8 0, i8* %p2
+ br i1 undef, label %c, label %d
+
+b:
+; CHECK: 3 = MemoryDef(1)
+; CHECK-NEXT: store i8 1, i8* %p2
+ store i8 1, i8* %p2
+ br i1 undef, label %c, label %d
+
+c:
+; CHECK: 6 = MemoryPhi({a,2},{b,3})
+; CHECK: 4 = MemoryDef(6)
+; CHECK-NEXT: store i8 2, i8* %p2
+ store i8 2, i8* %p2
+ br label %e
+
+d:
+; CHECK: 7 = MemoryPhi({a,2},{b,3})
+; CHECK: 5 = MemoryDef(7)
+; CHECK-NEXT: store i8 3, i8* %p2
+ store i8 3, i8* %p2
+ br label %e
+
+e:
+; 8 = MemoryPhi({c,4},{d,5})
+; CHECK: MemoryUse(1)
+; CHECK-NEXT: load i8, i8* %p1
+ load i8, i8* %p1
+ ret void
+}
+
+; CHECK-LABEL: define void @looped
+define void @looped(i8* noalias %p1, i8* noalias %p2) {
+; CHECK: 1 = MemoryDef(liveOnEntry)
+; CHECK-NEXT: store i8 0, i8* %p1
+ store i8 0, i8* %p1
+ br label %loop.1
+
+loop.1:
+; CHECK: 5 = MemoryPhi({%0,1},{loop.3,4})
+; CHECK: 2 = MemoryDef(5)
+; CHECK-NEXT: store i8 0, i8* %p2
+ store i8 0, i8* %p2
+ br i1 undef, label %loop.2, label %loop.3
+
+loop.2:
+; CHECK: 6 = MemoryPhi({loop.1,2},{loop.3,4})
+; CHECK: 3 = MemoryDef(6)
+; CHECK-NEXT: store i8 1, i8* %p2
+ store i8 1, i8* %p2
+ br label %loop.3
+
+loop.3:
+; CHECK: 7 = MemoryPhi({loop.1,2},{loop.2,3})
+; CHECK: 4 = MemoryDef(7)
+; CHECK-NEXT: store i8 2, i8* %p2
+ store i8 2, i8* %p2
+; CHECK: MemoryUse(1)
+; CHECK-NEXT: load i8, i8* %p1
+ load i8, i8* %p1
+ br i1 undef, label %loop.2, label %loop.1
+}
+
+; CHECK-LABEL: define void @looped_visitedonlyonce
+define void @looped_visitedonlyonce(i8* noalias %p1, i8* noalias %p2) {
+ br label %while.cond
+
+while.cond:
+; CHECK: 4 = MemoryPhi({%0,liveOnEntry},{if.end,3})
+; CHECK-NEXT: br i1 undef, label %if.then, label %if.end
+ br i1 undef, label %if.then, label %if.end
+
+if.then:
+; CHECK: 1 = MemoryDef(4)
+; CHECK-NEXT: store i8 0, i8* %p1
+ store i8 0, i8* %p1
+ br i1 undef, label %if.end, label %if.then2
+
+if.then2:
+; CHECK: 2 = MemoryDef(1)
+; CHECK-NEXT: store i8 1, i8* %p2
+ store i8 1, i8* %p2
+ br label %if.end
+
+if.end:
+; CHECK: 5 = MemoryPhi({while.cond,4},{if.then,1},{if.then2,2})
+; CHECK: MemoryUse(5)
+; CHECK-NEXT: load i8, i8* %p1
+ load i8, i8* %p1
+; CHECK: 3 = MemoryDef(5)
+; CHECK-NEXT: store i8 2, i8* %p2
+ store i8 2, i8* %p2
+; CHECK: MemoryUse(5)
+; CHECK-NEXT: load i8, i8* %p1
+ load i8, i8* %p1
+ br label %while.cond
+}
+
diff --git a/llvm/test/Analysis/MemorySSA/pr28880.ll b/llvm/test/Analysis/MemorySSA/pr28880.ll
new file mode 100644
index 00000000000..ae64c0c5d73
--- /dev/null
+++ b/llvm/test/Analysis/MemorySSA/pr28880.ll
@@ -0,0 +1,51 @@
+; RUN: opt -basicaa -print-memoryssa -verify-memoryssa -analyze < %s 2>&1 | FileCheck %s
+; RUN: opt -aa-pipeline=basic-aa -passes='print<memoryssa>,verify<memoryssa>' -disable-output < %s 2>&1 | FileCheck %s
+
+; This testcase is reduced from SingleSource/Benchmarks/Misc/fbench.c
+; It is testing to make sure that the MemorySSA use optimizer
+; comes up with right answers when dealing with multiple MemoryLocations
+; over different blocks. See PR28880 for more details.
+@global = external hidden unnamed_addr global double, align 8
+@global.1 = external hidden unnamed_addr global double, align 8
+
+; Function Attrs: nounwind ssp uwtable
+define hidden fastcc void @hoge() unnamed_addr #0 {
+bb:
+ br i1 undef, label %bb1, label %bb2
+
+bb1: ; preds = %bb
+; These accesses should not conflict.
+; CHECK: 1 = MemoryDef(liveOnEntry)
+; 1 = MemoryDef(liveOnEntry)
+; CHECK-NEXT: store double undef, double* @global, align 8
+ store double undef, double* @global, align 8
+; CHECK: MemoryUse(liveOnEntry)
+; MemoryUse(liveOnEntry)
+; CHECK-NEXT: %tmp = load double, double* @global.1, align 8
+ %tmp = load double, double* @global.1, align 8
+ unreachable
+
+bb2: ; preds = %bb
+ br label %bb3
+
+bb3: ; preds = %bb2
+ br i1 undef, label %bb4, label %bb6
+
+bb4: ; preds = %bb3
+; These accesses should conflict.
+; CHECK: 2 = MemoryDef(liveOnEntry)
+; 2 = MemoryDef(liveOnEntry)
+; CHECK-NEXT: store double 0.000000e+00, double* @global.1, align 8
+ store double 0.000000e+00, double* @global.1, align 8
+; CHECK: MemoryUse(2)
+; MemoryUse(2)
+; CHECK-NEXT: %tmp5 = load double, double* @global.1, align 8
+ %tmp5 = load double, double* @global.1, align 8
+ unreachable
+
+bb6: ; preds = %bb3
+ unreachable
+}
+
+attributes #0 = { nounwind ssp uwtable "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="core2" "target-features"="+cx16,+fxsr,+mmx,+sse,+sse2,+sse3,+ssse3" "unsafe-fp-math"="false" "use-soft-float"="false" }
+
diff --git a/llvm/test/Analysis/MemorySSA/ptr-const-mem.ll b/llvm/test/Analysis/MemorySSA/ptr-const-mem.ll
new file mode 100644
index 00000000000..a326d8d717a
--- /dev/null
+++ b/llvm/test/Analysis/MemorySSA/ptr-const-mem.ll
@@ -0,0 +1,23 @@
+; RUN: opt -basicaa -print-memoryssa -verify-memoryssa -analyze -memssa-check-limit=0 < %s 2>&1 | FileCheck %s
+; RUN: opt -aa-pipeline=basic-aa -passes='print<memoryssa>' -verify-memoryssa -disable-output -memssa-check-limit=0 < %s 2>&1 | FileCheck %s
+target datalayout = "e-p:32:32-p1:64:64-p2:64:64-p3:32:32-p4:64:64-p5:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64"
+target triple = "amdgcn"
+
+@g4 = external unnamed_addr constant i8, align 1
+
+define signext i8 @cmp_constant(i8* %q, i8 %v) local_unnamed_addr {
+entry:
+
+ store i8 %v, i8* %q, align 1
+; CHECK: 1 = MemoryDef(liveOnEntry)
+; CHECK-NEXT: store i8 %v, i8* %q, align 1
+
+ %0 = load i8, i8* @g4, align 1
+; Make sure that this load is liveOnEntry just based on the fact that @g4 is
+; constant memory.
+; CHECK: MemoryUse(liveOnEntry)
+; CHECK-NEXT: load i8, i8* @g4, align 1
+
+ ret i8 %0
+}
+
diff --git a/llvm/test/Analysis/MemorySSA/volatile-clobber.ll b/llvm/test/Analysis/MemorySSA/volatile-clobber.ll
new file mode 100644
index 00000000000..d6f960f3e38
--- /dev/null
+++ b/llvm/test/Analysis/MemorySSA/volatile-clobber.ll
@@ -0,0 +1,94 @@
+; RUN: opt -basicaa -print-memoryssa -verify-memoryssa -analyze < %s 2>&1 | FileCheck %s
+; RUN: opt -aa-pipeline=basic-aa -passes='print<memoryssa>,verify<memoryssa>' -disable-output < %s 2>&1 | FileCheck %s
+;
+; Ensures that volatile stores/loads count as MemoryDefs
+
+; CHECK-LABEL: define i32 @foo
+define i32 @foo() {
+ %1 = alloca i32, align 4
+; CHECK: 1 = MemoryDef(liveOnEntry)
+; CHECK-NEXT: store volatile i32 4
+ store volatile i32 4, i32* %1, align 4
+; CHECK: 2 = MemoryDef(1)
+; CHECK-NEXT: store volatile i32 8
+ store volatile i32 8, i32* %1, align 4
+; CHECK: 3 = MemoryDef(2)
+; CHECK-NEXT: %2 = load volatile i32
+ %2 = load volatile i32, i32* %1, align 4
+; CHECK: 4 = MemoryDef(3)
+; CHECK-NEXT: %3 = load volatile i32
+ %3 = load volatile i32, i32* %1, align 4
+ %4 = add i32 %3, %2
+ ret i32 %4
+}
+
+; Ensuring that we don't automatically hoist nonvolatile loads around volatile
+; loads
+; CHECK-LABEL define void @volatile_only
+define void @volatile_only(i32* %arg1, i32* %arg2) {
+ ; Trivially NoAlias/MustAlias
+ %a = alloca i32
+ %b = alloca i32
+
+; CHECK: 1 = MemoryDef(liveOnEntry)
+; CHECK-NEXT: load volatile i32, i32* %a
+ load volatile i32, i32* %a
+; CHECK: MemoryUse(liveOnEntry)
+; CHECK-NEXT: load i32, i32* %b
+ load i32, i32* %b
+; CHECK: MemoryUse(1)
+; CHECK-NEXT: load i32, i32* %a
+ load i32, i32* %a
+
+ ; MayAlias
+; CHECK: 2 = MemoryDef(1)
+; CHECK-NEXT: load volatile i32, i32* %arg1
+ load volatile i32, i32* %arg1
+; CHECK: MemoryUse(2)
+; CHECK-NEXT: load i32, i32* %arg2
+ load i32, i32* %arg2
+
+ ret void
+}
+
+; Ensuring that volatile atomic operations work properly.
+; CHECK-LABEL define void @volatile_atomics
+define void @volatile_atomics(i32* %arg1, i32* %arg2) {
+ %a = alloca i32
+ %b = alloca i32
+
+ ; Trivially NoAlias/MustAlias
+
+; CHECK: 1 = MemoryDef(liveOnEntry)
+; CHECK-NEXT: load atomic volatile i32, i32* %a acquire, align 4
+ load atomic volatile i32, i32* %a acquire, align 4
+; CHECK: MemoryUse(1)
+; CHECK-NEXT: load i32, i32* %b
+ load i32, i32* %b
+
+; CHECK: 2 = MemoryDef(1)
+; CHECK-NEXT: load atomic volatile i32, i32* %a monotonic, align 4
+ load atomic volatile i32, i32* %a monotonic, align 4
+; CHECK: MemoryUse(1)
+; CHECK-NEXT: load i32, i32* %b
+ load i32, i32* %b
+; CHECK: MemoryUse(1)
+; CHECK-NEXT: load atomic i32, i32* %b unordered, align 4
+ load atomic i32, i32* %b unordered, align 4
+; CHECK: MemoryUse(2)
+; CHECK-NEXT: load atomic i32, i32* %a unordered, align 4
+ load atomic i32, i32* %a unordered, align 4
+; CHECK: MemoryUse(2)
+; CHECK-NEXT: load i32, i32* %a
+ load i32, i32* %a
+
+ ; MayAlias
+; CHECK: 3 = MemoryDef(2)
+; CHECK-NEXT: load atomic volatile i32, i32* %arg1 monotonic, align 4
+ load atomic volatile i32, i32* %arg1 monotonic, align 4
+; CHECK: MemoryUse(3)
+; CHECK-NEXT: load i32, i32* %arg2
+ load i32, i32* %arg2
+
+ ret void
+}
OpenPOWER on IntegriCloud