summaryrefslogtreecommitdiffstats
path: root/llvm/test
diff options
context:
space:
mode:
authorPiotr Padlewski <piotr.padlewski@gmail.com>2018-05-03 11:03:01 +0000
committerPiotr Padlewski <piotr.padlewski@gmail.com>2018-05-03 11:03:01 +0000
commit5dde809404f73b30bb41b79f7060c0e14cfe0426 (patch)
treefecca61a8b4aadbc1d23e57be1793ac2d72c1d68 /llvm/test
parent90b0a53499c98ec3b4903f1f4f0c7b404c236ab3 (diff)
downloadbcm5719-llvm-5dde809404f73b30bb41b79f7060c0e14cfe0426.tar.gz
bcm5719-llvm-5dde809404f73b30bb41b79f7060c0e14cfe0426.zip
Rename invariant.group.barrier to launder.invariant.group
Summary: This is one of the initial commit of "RFC: Devirtualization v2" proposal: https://docs.google.com/document/d/16GVtCpzK8sIHNc2qZz6RN8amICNBtvjWUod2SujZVEo/edit?usp=sharing Reviewers: rsmith, amharc, kuhar, sanjoy Subscribers: arsenm, nhaehnle, javed.absar, hiraditya, llvm-commits Differential Revision: https://reviews.llvm.org/D45111 llvm-svn: 331448
Diffstat (limited to 'llvm/test')
-rw-r--r--llvm/test/Analysis/MemorySSA/invariant-groups.ll48
-rw-r--r--llvm/test/Bitcode/upgrade-invariant-group-barrier.ll22
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/unknown-intrinsic.ll6
-rw-r--r--llvm/test/CodeGen/AMDGPU/promote-alloca-invariant-markers.ll4
-rw-r--r--llvm/test/CodeGen/Generic/intrinsics.ll4
-rw-r--r--llvm/test/Other/Inputs/invariant.group.barrier.ll15
-rw-r--r--llvm/test/Other/launder.invariant.group.ll (renamed from llvm/test/Other/invariant.group.barrier.ll)43
-rw-r--r--llvm/test/Transforms/CodeGenPrepare/invariant.group.ll6
-rw-r--r--llvm/test/Transforms/GVN/invariant.group.ll8
-rw-r--r--llvm/test/Transforms/GlobalOpt/invariant.group.barrier.ll8
-rw-r--r--llvm/test/Transforms/NewGVN/invariant.group.ll9
11 files changed, 95 insertions, 78 deletions
diff --git a/llvm/test/Analysis/MemorySSA/invariant-groups.ll b/llvm/test/Analysis/MemorySSA/invariant-groups.ll
index 062b57445ec..a2535d58d20 100644
--- a/llvm/test/Analysis/MemorySSA/invariant-groups.ll
+++ b/llvm/test/Analysis/MemorySSA/invariant-groups.ll
@@ -1,7 +1,7 @@
; RUN: opt -basicaa -print-memoryssa -verify-memoryssa -analyze < %s 2>&1 | FileCheck %s
;
; Currently, MemorySSA doesn't support invariant groups. So, we should ignore
-; invariant.group.barrier intrinsics entirely. We'll need to pay attention to
+; launder.invariant.group intrinsics entirely. We'll need to pay attention to
; them when/if we decide to support invariant groups.
@g = external global i32
@@ -17,8 +17,8 @@ define i32 @foo(i32* %a) {
%1 = bitcast i32* %a to i8*
; CHECK: 3 = MemoryDef(2)
-; CHECK-NEXT: %a8 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %1)
- %a8 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %1)
+; CHECK-NEXT: %a8 = call i8* @llvm.launder.invariant.group.p0i8(i8* %1)
+ %a8 = call i8* @llvm.launder.invariant.group.p0i8(i8* %1)
%a32 = bitcast i8* %a8 to i32*
; This have to be MemoryUse(2), because we can't skip the barrier based on
@@ -36,8 +36,8 @@ define i32 @skipBarrier(i32* %a) {
%1 = bitcast i32* %a to i8*
; CHECK: 2 = MemoryDef(1)
-; CHECK-NEXT: %a8 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %1)
- %a8 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %1)
+; CHECK-NEXT: %a8 = call i8* @llvm.launder.invariant.group.p0i8(i8* %1)
+ %a8 = call i8* @llvm.launder.invariant.group.p0i8(i8* %1)
%a32 = bitcast i8* %a8 to i32*
; We can skip the barrier only if the "skip" is not based on !invariant.group.
@@ -55,8 +55,8 @@ define i32 @skipBarrier2(i32* %a) {
%1 = bitcast i32* %a to i8*
; CHECK: 1 = MemoryDef(liveOnEntry)
-; CHECK-NEXT: %a8 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %1)
- %a8 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %1)
+; CHECK-NEXT: %a8 = call i8* @llvm.launder.invariant.group.p0i8(i8* %1)
+ %a8 = call i8* @llvm.launder.invariant.group.p0i8(i8* %1)
%a32 = bitcast i8* %a8 to i32*
; We can skip the barrier only if the "skip" is not based on !invariant.group.
@@ -86,8 +86,8 @@ define i32 @handleInvariantGroups(i32* %a) {
store i32 1, i32* @g, align 4
%1 = bitcast i32* %a to i8*
; CHECK: 3 = MemoryDef(2)
-; CHECK-NEXT: %a8 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %1)
- %a8 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %1)
+; CHECK-NEXT: %a8 = call i8* @llvm.launder.invariant.group.p0i8(i8* %1)
+ %a8 = call i8* @llvm.launder.invariant.group.p0i8(i8* %1)
%a32 = bitcast i8* %a8 to i32*
; CHECK: MemoryUse(2)
@@ -145,8 +145,8 @@ entry:
call void @clobber8(i8* %p)
; CHECK: 3 = MemoryDef(2)
-; CHECK-NEXT: %after = call i8* @llvm.invariant.group.barrier.p0i8(i8* %p)
- %after = call i8* @llvm.invariant.group.barrier.p0i8(i8* %p)
+; CHECK-NEXT: %after = call i8* @llvm.launder.invariant.group.p0i8(i8* %p)
+ %after = call i8* @llvm.launder.invariant.group.p0i8(i8* %p)
br i1 undef, label %Loop.Body, label %Loop.End
Loop.Body:
@@ -192,8 +192,8 @@ entry:
call void @clobber8(i8* %p)
; CHECK: 3 = MemoryDef(2)
-; CHECK-NEXT: %after = call i8* @llvm.invariant.group.barrier.p0i8(i8* %p)
- %after = call i8* @llvm.invariant.group.barrier.p0i8(i8* %p)
+; CHECK-NEXT: %after = call i8* @llvm.launder.invariant.group.p0i8(i8* %p)
+ %after = call i8* @llvm.launder.invariant.group.p0i8(i8* %p)
br i1 undef, label %Loop.Body, label %Loop.End
Loop.Body:
@@ -253,8 +253,8 @@ entry:
; CHECK-NEXT: call void @clobber
call void @clobber8(i8* %p)
; CHECK: 3 = MemoryDef(2)
-; CHECK-NEXT: %after = call i8* @llvm.invariant.group.barrier.p0i8(i8* %p)
- %after = call i8* @llvm.invariant.group.barrier.p0i8(i8* %p)
+; CHECK-NEXT: %after = call i8* @llvm.launder.invariant.group.p0i8(i8* %p)
+ %after = call i8* @llvm.launder.invariant.group.p0i8(i8* %p)
br i1 undef, label %Loop.Pre, label %Loop.End
Loop.Pre:
@@ -302,12 +302,12 @@ entry:
; CHECK-NEXT: store i8 42, i8* %ptr, !invariant.group !0
store i8 42, i8* %ptr, !invariant.group !0
; CHECK: 2 = MemoryDef(1)
-; CHECK-NEXT: call i8* @llvm.invariant.group.barrier
- %ptr2 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %ptr)
+; CHECK-NEXT: call i8* @llvm.launder.invariant.group
+ %ptr2 = call i8* @llvm.launder.invariant.group.p0i8(i8* %ptr)
; FIXME: This one could be CSEd.
; CHECK: 3 = MemoryDef(2)
-; CHECK: call i8* @llvm.invariant.group.barrier
- %ptr3 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %ptr)
+; CHECK: call i8* @llvm.launder.invariant.group
+ %ptr3 = call i8* @llvm.launder.invariant.group.p0i8(i8* %ptr)
; CHECK: 4 = MemoryDef(3)
; CHECK-NEXT: call void @clobber8(i8* %ptr)
call void @clobber8(i8* %ptr)
@@ -331,13 +331,13 @@ define i8 @unoptimizable2() {
; CHECK-NEXT: store i8 42, i8* %ptr, !invariant.group !0
store i8 42, i8* %ptr, !invariant.group !0
; CHECK: 2 = MemoryDef(1)
-; CHECK-NEXT: call i8* @llvm.invariant.group.barrier
- %ptr2 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %ptr)
+; CHECK-NEXT: call i8* @llvm.launder.invariant.group
+ %ptr2 = call i8* @llvm.launder.invariant.group.p0i8(i8* %ptr)
; CHECK: 3 = MemoryDef(2)
store i8 43, i8* %ptr
; CHECK: 4 = MemoryDef(3)
-; CHECK-NEXT: call i8* @llvm.invariant.group.barrier
- %ptr3 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %ptr)
+; CHECK-NEXT: call i8* @llvm.launder.invariant.group
+ %ptr3 = call i8* @llvm.launder.invariant.group.p0i8(i8* %ptr)
; CHECK: 5 = MemoryDef(4)
; CHECK-NEXT: call void @clobber8(i8* %ptr)
call void @clobber8(i8* %ptr)
@@ -354,7 +354,7 @@ define i8 @unoptimizable2() {
}
-declare i8* @llvm.invariant.group.barrier.p0i8(i8*)
+declare i8* @llvm.launder.invariant.group.p0i8(i8*)
declare void @clobber(i32*)
declare void @clobber8(i8*)
declare void @use(i8* readonly)
diff --git a/llvm/test/Bitcode/upgrade-invariant-group-barrier.ll b/llvm/test/Bitcode/upgrade-invariant-group-barrier.ll
new file mode 100644
index 00000000000..d18748929ae
--- /dev/null
+++ b/llvm/test/Bitcode/upgrade-invariant-group-barrier.ll
@@ -0,0 +1,22 @@
+; RUN: opt -S < %s | FileCheck %s
+
+; The intrinsic firstly only took i8*, then it was made polimorphic, then
+; it was renamed to launder.invariant.group
+define void @test(i8* %p1, i16* %p16) {
+; CHECK-LABEL: @test
+; CHECK: %p2 = call i8* @llvm.launder.invariant.group.p0i8(i8* %p1)
+; CHECK: %p3 = call i8* @llvm.launder.invariant.group.p0i8(i8* %p1)
+; CHECK: %p4 = call i16* @llvm.launder.invariant.group.p0i16(i16* %p16)
+ %p2 = call i8* @llvm.invariant.group.barrier(i8* %p1)
+ %p3 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %p1)
+ %p4 = call i16* @llvm.invariant.group.barrier.p0i16(i16* %p16)
+ ret void
+}
+
+; CHECK: Function Attrs: inaccessiblememonly nounwind speculatable
+; CHECK: declare i8* @llvm.launder.invariant.group.p0i8(i8*)
+; CHECK: Function Attrs: inaccessiblememonly nounwind speculatable
+; CHECK: declare i16* @llvm.launder.invariant.group.p0i16(i16*)
+declare i8* @llvm.invariant.group.barrier(i8*)
+declare i8* @llvm.invariant.group.barrier.p0i8(i8*)
+declare i16* @llvm.invariant.group.barrier.p0i16(i16*)
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/unknown-intrinsic.ll b/llvm/test/CodeGen/AArch64/GlobalISel/unknown-intrinsic.ll
index 179dd518d3f..34b2a5626cc 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/unknown-intrinsic.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/unknown-intrinsic.ll
@@ -1,10 +1,10 @@
; RUN: llc -O0 -mtriple=arm64 < %s
-declare i8* @llvm.invariant.group.barrier(i8*)
+declare i8* @llvm.launder.invariant.group(i8*)
define i8* @barrier(i8* %p) {
-; CHECK: bl llvm.invariant.group.barrier
- %q = call i8* @llvm.invariant.group.barrier(i8* %p)
+; CHECK: bl llvm.launder.invariant.group
+ %q = call i8* @llvm.launder.invariant.group(i8* %p)
ret i8* %q
}
diff --git a/llvm/test/CodeGen/AMDGPU/promote-alloca-invariant-markers.ll b/llvm/test/CodeGen/AMDGPU/promote-alloca-invariant-markers.ll
index 768d520ddd5..3a8b45fd912 100644
--- a/llvm/test/CodeGen/AMDGPU/promote-alloca-invariant-markers.ll
+++ b/llvm/test/CodeGen/AMDGPU/promote-alloca-invariant-markers.ll
@@ -3,7 +3,7 @@ target datalayout = "A5"
declare {}* @llvm.invariant.start.p5i8(i64, i8 addrspace(5)* nocapture) #0
declare void @llvm.invariant.end.p5i8({}*, i64, i8 addrspace(5)* nocapture) #0
-declare i8 addrspace(5)* @llvm.invariant.group.barrier.p5i8(i8 addrspace(5)*) #1
+declare i8 addrspace(5)* @llvm.launder.invariant.group.p5i8(i8 addrspace(5)*) #1
; GCN-LABEL: {{^}}use_invariant_promotable_lds:
; GCN: buffer_load_dword
@@ -17,7 +17,7 @@ bb:
store i32 %tmp3, i32 addrspace(5)* %tmp
%tmp4 = call {}* @llvm.invariant.start.p5i8(i64 4, i8 addrspace(5)* %tmp1) #0
call void @llvm.invariant.end.p5i8({}* %tmp4, i64 4, i8 addrspace(5)* %tmp1) #0
- %tmp5 = call i8 addrspace(5)* @llvm.invariant.group.barrier.p5i8(i8 addrspace(5)* %tmp1) #1
+ %tmp5 = call i8 addrspace(5)* @llvm.launder.invariant.group.p5i8(i8 addrspace(5)* %tmp1) #1
ret void
}
diff --git a/llvm/test/CodeGen/Generic/intrinsics.ll b/llvm/test/CodeGen/Generic/intrinsics.ll
index 6a51d2d371b..3964968dc9d 100644
--- a/llvm/test/CodeGen/Generic/intrinsics.ll
+++ b/llvm/test/CodeGen/Generic/intrinsics.ll
@@ -39,10 +39,10 @@ define double @test_cos(float %F) {
ret double %I
}
-declare i8* @llvm.invariant.group.barrier(i8*)
+declare i8* @llvm.launder.invariant.group(i8*)
define i8* @barrier(i8* %p) {
- %q = call i8* @llvm.invariant.group.barrier(i8* %p)
+ %q = call i8* @llvm.launder.invariant.group(i8* %p)
ret i8* %q
}
diff --git a/llvm/test/Other/Inputs/invariant.group.barrier.ll b/llvm/test/Other/Inputs/invariant.group.barrier.ll
deleted file mode 100644
index 565b0989ecb..00000000000
--- a/llvm/test/Other/Inputs/invariant.group.barrier.ll
+++ /dev/null
@@ -1,15 +0,0 @@
-; RUN: opt -S -gvn < %s | FileCheck %s
-; RUN: opt -S -newgvn < %s | FileCheck %s
-; RUN: opt -S -O3 < %s | FileCheck %s
-
-; This test check if optimizer is not proving equality based on mustalias
-; CHECK-LABEL: define void @dontProveEquality(i8* %a)
-define void @dontProveEquality(i8* %a) {
- %b = call i8* @llvm.invariant.group.barrier(i8* %a)
- %r = i1 icmp eq i8* %b, i8* %a
-;CHECK: call void @use(%r)
- call void @use(%r)
-}
-
-declare void @use(i1)
-declare i8* @llvm.invariant.group.barrier(i8 *)
diff --git a/llvm/test/Other/invariant.group.barrier.ll b/llvm/test/Other/launder.invariant.group.ll
index 5ba4fcc7924..ebb2819ec28 100644
--- a/llvm/test/Other/invariant.group.barrier.ll
+++ b/llvm/test/Other/launder.invariant.group.ll
@@ -4,7 +4,7 @@
; RUN: opt -S -O3 < %s | FileCheck %s
; These tests checks if passes with CSE functionality can do CSE on
-; invariant.group.barrier, that is prohibited if there is a memory clobber
+; launder.invariant.group, that is prohibited if there is a memory clobber
; between barriers call.
; CHECK-LABEL: define i8 @optimizable()
@@ -12,11 +12,11 @@ define i8 @optimizable() {
entry:
%ptr = alloca i8
store i8 42, i8* %ptr, !invariant.group !0
-; CHECK: call i8* @llvm.invariant.group.barrier.p0i8
- %ptr2 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %ptr)
+; CHECK: call i8* @llvm.launder.invariant.group.p0i8
+ %ptr2 = call i8* @llvm.launder.invariant.group.p0i8(i8* %ptr)
; FIXME: This one could be CSE
-; CHECK: call i8* @llvm.invariant.group.barrier
- %ptr3 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %ptr)
+; CHECK: call i8* @llvm.launder.invariant.group
+ %ptr3 = call i8* @llvm.launder.invariant.group.p0i8(i8* %ptr)
; CHECK: call void @clobber(i8* {{.*}}%ptr)
call void @clobber(i8* %ptr)
@@ -35,11 +35,11 @@ define i8 @unoptimizable() {
entry:
%ptr = alloca i8
store i8 42, i8* %ptr, !invariant.group !0
-; CHECK: call i8* @llvm.invariant.group.barrier.p0i8
- %ptr2 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %ptr)
+; CHECK: call i8* @llvm.launder.invariant.group.p0i8
+ %ptr2 = call i8* @llvm.launder.invariant.group.p0i8(i8* %ptr)
call void @clobber(i8* %ptr)
-; CHECK: call i8* @llvm.invariant.group.barrier.p0i8
- %ptr3 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %ptr)
+; CHECK: call i8* @llvm.launder.invariant.group.p0i8
+ %ptr3 = call i8* @llvm.launder.invariant.group.p0i8(i8* %ptr)
; CHECK: call void @clobber(i8* {{.*}}%ptr)
call void @clobber(i8* %ptr)
; CHECK: call void @use(i8* {{.*}}%ptr2)
@@ -56,11 +56,11 @@ entry:
define i8 @unoptimizable2() {
%ptr = alloca i8
store i8 42, i8* %ptr, !invariant.group !0
-; CHECK: call i8* @llvm.invariant.group.barrier
- %ptr2 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %ptr)
+; CHECK: call i8* @llvm.launder.invariant.group
+ %ptr2 = call i8* @llvm.launder.invariant.group.p0i8(i8* %ptr)
store i8 43, i8* %ptr
-; CHECK: call i8* @llvm.invariant.group.barrier
- %ptr3 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %ptr)
+; CHECK: call i8* @llvm.launder.invariant.group
+ %ptr3 = call i8* @llvm.launder.invariant.group.p0i8(i8* %ptr)
; CHECK: call void @clobber(i8* {{.*}}%ptr)
call void @clobber(i8* %ptr)
; CHECK: call void @use(i8* {{.*}}%ptr2)
@@ -72,12 +72,23 @@ define i8 @unoptimizable2() {
ret i8 %v
}
+; This test check if optimizer is not proving equality based on mustalias
+; CHECK-LABEL: define void @dontProveEquality(i8* %a)
+define void @dontProveEquality(i8* %a) {
+ %b = call i8* @llvm.launder.invariant.group.p0i8(i8* %a)
+ %r = icmp eq i8* %b, %a
+;CHECK: call void @useBool(i1 %r)
+ call void @useBool(i1 %r)
+ ret void
+}
+
declare void @use(i8* readonly)
+declare void @useBool(i1)
declare void @clobber(i8*)
-; CHECK: Function Attrs: inaccessiblememonly nounwind{{$}}
-; CHECK-NEXT: declare i8* @llvm.invariant.group.barrier.p0i8(i8*)
-declare i8* @llvm.invariant.group.barrier.p0i8(i8*)
+; CHECK: Function Attrs: inaccessiblememonly nounwind speculatable{{$}}
+; CHECK-NEXT: declare i8* @llvm.launder.invariant.group.p0i8(i8*)
+declare i8* @llvm.launder.invariant.group.p0i8(i8*)
!0 = !{}
diff --git a/llvm/test/Transforms/CodeGenPrepare/invariant.group.ll b/llvm/test/Transforms/CodeGenPrepare/invariant.group.ll
index 042e58b4fc8..3ad1ca79625 100644
--- a/llvm/test/Transforms/CodeGenPrepare/invariant.group.ll
+++ b/llvm/test/Transforms/CodeGenPrepare/invariant.group.ll
@@ -6,10 +6,10 @@
define void @foo() {
enter:
; CHECK-NOT: !invariant.group
- ; CHECK-NOT: @llvm.invariant.group.barrier.p0i8(
+ ; CHECK-NOT: @llvm.launder.invariant.group.p0i8(
; CHECK: %val = load i8, i8* @tmp, !tbaa
%val = load i8, i8* @tmp, !invariant.group !0, !tbaa !{!1, !1, i64 0}
- %ptr = call i8* @llvm.invariant.group.barrier.p0i8(i8* @tmp)
+ %ptr = call i8* @llvm.launder.invariant.group.p0i8(i8* @tmp)
; CHECK: store i8 42, i8* @tmp
store i8 42, i8* %ptr, !invariant.group !0
@@ -18,7 +18,7 @@ enter:
}
; CHECK-LABEL: }
-declare i8* @llvm.invariant.group.barrier.p0i8(i8*)
+declare i8* @llvm.launder.invariant.group.p0i8(i8*)
!0 = !{!"something"}
!1 = !{!"x", !0}
diff --git a/llvm/test/Transforms/GVN/invariant.group.ll b/llvm/test/Transforms/GVN/invariant.group.ll
index 1bc1f497264..8135087fae6 100644
--- a/llvm/test/Transforms/GVN/invariant.group.ll
+++ b/llvm/test/Transforms/GVN/invariant.group.ll
@@ -25,7 +25,7 @@ define i8 @optimizable1() {
entry:
%ptr = alloca i8
store i8 42, i8* %ptr, !invariant.group !0
- %ptr2 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %ptr)
+ %ptr2 = call i8* @llvm.launder.invariant.group.p0i8(i8* %ptr)
%a = load i8, i8* %ptr, !invariant.group !0
call void @foo(i8* %ptr2); call to use %ptr2
@@ -242,7 +242,7 @@ define i8 @optimizable4() {
entry:
%ptr = alloca i8
store i8 42, i8* %ptr, !invariant.group !0
- %ptr2 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %ptr)
+ %ptr2 = call i8* @llvm.launder.invariant.group.p0i8(i8* %ptr)
; CHECK-NOT: load
%a = load i8, i8* %ptr2, !invariant.group !0
@@ -314,7 +314,7 @@ entry:
; CHECK: store i8 %unknownValue, i8* %ptr, !invariant.group !0
store i8 %unknownValue, i8* %ptr, !invariant.group !0
- %newPtr2 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %ptr)
+ %newPtr2 = call i8* @llvm.launder.invariant.group.p0i8(i8* %ptr)
; CHECK-NOT: load
%d = load i8, i8* %newPtr2, !invariant.group !0
; CHECK: ret i8 %unknownValue
@@ -441,7 +441,7 @@ declare void @_ZN1A3fooEv(%struct.A*)
declare void @_ZN1AC1Ev(%struct.A*)
declare void @fooBit(i1*, i1)
-declare i8* @llvm.invariant.group.barrier.p0i8(i8*)
+declare i8* @llvm.launder.invariant.group.p0i8(i8*)
; Function Attrs: nounwind
declare void @llvm.assume(i1 %cmp.vtables) #0
diff --git a/llvm/test/Transforms/GlobalOpt/invariant.group.barrier.ll b/llvm/test/Transforms/GlobalOpt/invariant.group.barrier.ll
index 80cd411afdc..744ab91ac78 100644
--- a/llvm/test/Transforms/GlobalOpt/invariant.group.barrier.ll
+++ b/llvm/test/Transforms/GlobalOpt/invariant.group.barrier.ll
@@ -33,7 +33,7 @@ enter:
store i32 %val, i32* %valptr
%0 = bitcast i32* %valptr to i8*
- %barr = call i8* @llvm.invariant.group.barrier(i8* %0)
+ %barr = call i8* @llvm.launder.invariant.group(i8* %0)
%1 = bitcast i8* %barr to i32*
%val2 = load i32, i32* %1
@@ -41,7 +41,7 @@ enter:
ret void
}
-; We can't step through invariant.group.barrier here, because that would change
+; We can't step through launder.invariant.group here, because that would change
; this load in @usage_of_globals()
; val = load i32, i32* %ptrVal, !invariant.group !0
; into
@@ -54,7 +54,7 @@ enter:
store i32 13, i32* @tmp3, !invariant.group !0
%0 = bitcast i32* @tmp3 to i8*
- %barr = call i8* @llvm.invariant.group.barrier(i8* %0)
+ %barr = call i8* @llvm.launder.invariant.group(i8* %0)
%1 = bitcast i8* %barr to i32*
store i32* %1, i32** @ptrToTmp3
@@ -74,6 +74,6 @@ entry:
declare void @changeTmp3ValAndCallBarrierInside()
-declare i8* @llvm.invariant.group.barrier(i8*)
+declare i8* @llvm.launder.invariant.group(i8*)
!0 = !{!"something"}
diff --git a/llvm/test/Transforms/NewGVN/invariant.group.ll b/llvm/test/Transforms/NewGVN/invariant.group.ll
index 9839fc49f44..d5890f1d228 100644
--- a/llvm/test/Transforms/NewGVN/invariant.group.ll
+++ b/llvm/test/Transforms/NewGVN/invariant.group.ll
@@ -26,7 +26,7 @@ define i8 @optimizable1() {
entry:
%ptr = alloca i8
store i8 42, i8* %ptr, !invariant.group !0
- %ptr2 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %ptr)
+ %ptr2 = call i8* @llvm.launder.invariant.group.p0i8(i8* %ptr)
%a = load i8, i8* %ptr, !invariant.group !0
call void @foo(i8* %ptr2); call to use %ptr2
@@ -243,8 +243,7 @@ define i8 @optimizable4() {
entry:
%ptr = alloca i8
store i8 42, i8* %ptr, !invariant.group !0
- %ptr2 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %ptr)
-; CHECK-NOT: load
+ %ptr2 = call i8* @llvm.launder.invariant.group.p0i8(i8* %ptr)
%a = load i8, i8* %ptr2, !invariant.group !0
; CHECK: ret i8 42
@@ -315,7 +314,7 @@ entry:
; CHECK: store i8 %unknownValue, i8* %ptr, !invariant.group !0
store i8 %unknownValue, i8* %ptr, !invariant.group !0
- %newPtr2 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %ptr)
+ %newPtr2 = call i8* @llvm.launder.invariant.group.p0i8(i8* %ptr)
; CHECK-NOT: load
%d = load i8, i8* %newPtr2, !invariant.group !0
; CHECK: ret i8 %unknownValue
@@ -442,7 +441,7 @@ declare void @_ZN1A3fooEv(%struct.A*)
declare void @_ZN1AC1Ev(%struct.A*)
declare void @fooBit(i1*, i1)
-declare i8* @llvm.invariant.group.barrier.p0i8(i8*)
+declare i8* @llvm.launder.invariant.group.p0i8(i8*)
; Function Attrs: nounwind
declare void @llvm.assume(i1 %cmp.vtables) #0
OpenPOWER on IntegriCloud