diff options
| author | Eric Christopher <echristo@gmail.com> | 2019-04-17 02:12:23 +0000 |
|---|---|---|
| committer | Eric Christopher <echristo@gmail.com> | 2019-04-17 02:12:23 +0000 |
| commit | a86343512845c9c1fdbac865fea88aa5fce7142a (patch) | |
| tree | 666fc6353de19ad8b00e56b67edd33f24104e4a7 /llvm/test/Transforms/PhaseOrdering | |
| parent | 7f8ca6e3679b3af951cb7a4b1377edfaa3244b93 (diff) | |
| download | bcm5719-llvm-a86343512845c9c1fdbac865fea88aa5fce7142a.tar.gz bcm5719-llvm-a86343512845c9c1fdbac865fea88aa5fce7142a.zip | |
Temporarily Revert "Add basic loop fusion pass."
As it's causing some bot failures (and per request from kbarton).
This reverts commit r358543/ab70da07286e618016e78247e4a24fcb84077fda.
llvm-svn: 358546
Diffstat (limited to 'llvm/test/Transforms/PhaseOrdering')
| -rw-r--r-- | llvm/test/Transforms/PhaseOrdering/2010-03-22-empty-baseclass.ll | 162 | ||||
| -rw-r--r-- | llvm/test/Transforms/PhaseOrdering/PR6627.ll | 93 | ||||
| -rw-r--r-- | llvm/test/Transforms/PhaseOrdering/basic.ll | 51 | ||||
| -rw-r--r-- | llvm/test/Transforms/PhaseOrdering/bitfield-bittests.ll | 130 | ||||
| -rw-r--r-- | llvm/test/Transforms/PhaseOrdering/gdce.ll | 106 | ||||
| -rw-r--r-- | llvm/test/Transforms/PhaseOrdering/globalaa-retained.ll | 66 | ||||
| -rw-r--r-- | llvm/test/Transforms/PhaseOrdering/rotate.ll | 38 | ||||
| -rw-r--r-- | llvm/test/Transforms/PhaseOrdering/scev-custom-dl.ll | 67 | ||||
| -rw-r--r-- | llvm/test/Transforms/PhaseOrdering/scev.ll | 64 | ||||
| -rw-r--r-- | llvm/test/Transforms/PhaseOrdering/simplifycfg-options.ll | 104 |
10 files changed, 0 insertions, 881 deletions
diff --git a/llvm/test/Transforms/PhaseOrdering/2010-03-22-empty-baseclass.ll b/llvm/test/Transforms/PhaseOrdering/2010-03-22-empty-baseclass.ll deleted file mode 100644 index 13404a8b6a7..00000000000 --- a/llvm/test/Transforms/PhaseOrdering/2010-03-22-empty-baseclass.ll +++ /dev/null @@ -1,162 +0,0 @@ -; RUN: opt -O2 -S < %s | FileCheck %s - -target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64" -target triple = "x86_64-apple-darwin11.1" - -%"struct.boost::compressed_pair<empty_t,int>" = type { %"struct.boost::details::compressed_pair_imp<empty_t,int,1>" } -%"struct.boost::details::compressed_pair_imp<empty_t,int,1>" = type { i32 } -%struct.empty_base_t = type <{ i8 }> -%struct.empty_t = type <{ i8 }> - -@.str = private constant [25 x i8] c"x.second() was clobbered\00", align 1 ; <[25 x i8]*> [#uses=1] - -define i32 @main(i32 %argc, i8** %argv) ssp { -entry: - %argc_addr = alloca i32, align 4 ; <i32*> [#uses=1] - %argv_addr = alloca i8**, align 8 ; <i8***> [#uses=1] - %retval = alloca i32 ; <i32*> [#uses=2] - %0 = alloca i32 ; <i32*> [#uses=2] - %retval.1 = alloca i8 ; <i8*> [#uses=2] - %1 = alloca %struct.empty_base_t ; <%struct.empty_base_t*> [#uses=1] - %2 = alloca %struct.empty_base_t* ; <%struct.empty_base_t**> [#uses=1] - %x = alloca %"struct.boost::compressed_pair<empty_t,int>" ; <%"struct.boost::compressed_pair<empty_t,int>"*> [#uses=3] - %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0] - store i32 %argc, i32* %argc_addr - store i8** %argv, i8*** %argv_addr - %3 = call i32* @_ZN5boost15compressed_pairI7empty_tiE6secondEv(%"struct.boost::compressed_pair<empty_t,int>"* %x) ssp ; <i32*> [#uses=1] - store i32 -3, i32* %3, align 4 - %4 = call %struct.empty_base_t* @_ZN5boost15compressed_pairI7empty_tiE5firstEv(%"struct.boost::compressed_pair<empty_t,int>"* %x) ssp ; <%struct.empty_base_t*> [#uses=1] - store %struct.empty_base_t* %4, %struct.empty_base_t** %2, align 8 - call void @_ZN7empty_tC1Ev(%struct.empty_base_t* %1) nounwind - %5 = call i32* @_ZN5boost15compressed_pairI7empty_tiE6secondEv(%"struct.boost::compressed_pair<empty_t,int>"* %x) ssp ; <i32*> [#uses=1] - %6 = load i32, i32* %5, align 4 ; <i32> [#uses=1] - %7 = icmp ne i32 %6, -3 ; <i1> [#uses=1] - %8 = zext i1 %7 to i8 ; <i8> [#uses=1] - store i8 %8, i8* %retval.1, align 1 - %9 = load i8, i8* %retval.1, align 1 ; <i8> [#uses=1] - %toBool = icmp ne i8 %9, 0 ; <i1> [#uses=1] - br i1 %toBool, label %bb, label %bb1 - -bb: ; preds = %entry - %10 = call i32 @puts(i8* getelementptr inbounds ([25 x i8], [25 x i8]* @.str, i64 0, i64 0)) ; <i32> [#uses=0] - call void @abort() noreturn - unreachable - -bb1: ; preds = %entry - store i32 0, i32* %0, align 4 - %11 = load i32, i32* %0, align 4 ; <i32> [#uses=1] - store i32 %11, i32* %retval, align 4 - br label %return - -; CHECK-NOT: x.second() was clobbered -; CHECK: ret i32 -return: ; preds = %bb1 - %retval2 = load i32, i32* %retval ; <i32> [#uses=1] - ret i32 %retval2 -} - -define linkonce_odr void @_ZN12empty_base_tC2Ev(%struct.empty_base_t* %this) nounwind ssp align 2 { -entry: - %this_addr = alloca %struct.empty_base_t*, align 8 ; <%struct.empty_base_t**> [#uses=1] - %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0] - store %struct.empty_base_t* %this, %struct.empty_base_t** %this_addr - br label %return - -return: ; preds = %entry - ret void -} - -define linkonce_odr void @_ZN7empty_tC1Ev(%struct.empty_base_t* %this) nounwind ssp align 2 { -entry: - %this_addr = alloca %struct.empty_base_t*, align 8 ; <%struct.empty_base_t**> [#uses=2] - %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0] - store %struct.empty_base_t* %this, %struct.empty_base_t** %this_addr - %0 = load %struct.empty_base_t*, %struct.empty_base_t** %this_addr, align 8 ; <%struct.empty_base_t*> [#uses=1] - call void @_ZN12empty_base_tC2Ev(%struct.empty_base_t* %0) nounwind - br label %return - -return: ; preds = %entry - ret void -} - -define linkonce_odr i32* @_ZN5boost7details19compressed_pair_impI7empty_tiLi1EE6secondEv(%"struct.boost::details::compressed_pair_imp<empty_t,int,1>"* %this) nounwind ssp align 2 { -entry: - %this_addr = alloca %"struct.boost::details::compressed_pair_imp<empty_t,int,1>"*, align 8 ; <%"struct.boost::details::compressed_pair_imp<empty_t,int,1>"**> [#uses=2] - %retval = alloca i32* ; <i32**> [#uses=2] - %0 = alloca i32* ; <i32**> [#uses=2] - %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0] - store %"struct.boost::details::compressed_pair_imp<empty_t,int,1>"* %this, %"struct.boost::details::compressed_pair_imp<empty_t,int,1>"** %this_addr - %1 = load %"struct.boost::details::compressed_pair_imp<empty_t,int,1>"*, %"struct.boost::details::compressed_pair_imp<empty_t,int,1>"** %this_addr, align 8 ; <%"struct.boost::details::compressed_pair_imp<empty_t,int,1>"*> [#uses=1] - %2 = getelementptr inbounds %"struct.boost::details::compressed_pair_imp<empty_t,int,1>", %"struct.boost::details::compressed_pair_imp<empty_t,int,1>"* %1, i32 0, i32 0 ; <i32*> [#uses=1] - store i32* %2, i32** %0, align 8 - %3 = load i32*, i32** %0, align 8 ; <i32*> [#uses=1] - store i32* %3, i32** %retval, align 8 - br label %return - -return: ; preds = %entry - %retval1 = load i32*, i32** %retval ; <i32*> [#uses=1] - ret i32* %retval1 -} - -define linkonce_odr i32* @_ZN5boost15compressed_pairI7empty_tiE6secondEv(%"struct.boost::compressed_pair<empty_t,int>"* %this) ssp align 2 { -entry: - %this_addr = alloca %"struct.boost::compressed_pair<empty_t,int>"*, align 8 ; <%"struct.boost::compressed_pair<empty_t,int>"**> [#uses=2] - %retval = alloca i32* ; <i32**> [#uses=2] - %0 = alloca i32* ; <i32**> [#uses=2] - %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0] - store %"struct.boost::compressed_pair<empty_t,int>"* %this, %"struct.boost::compressed_pair<empty_t,int>"** %this_addr - %1 = load %"struct.boost::compressed_pair<empty_t,int>"*, %"struct.boost::compressed_pair<empty_t,int>"** %this_addr, align 8 ; <%"struct.boost::compressed_pair<empty_t,int>"*> [#uses=1] - %2 = getelementptr inbounds %"struct.boost::compressed_pair<empty_t,int>", %"struct.boost::compressed_pair<empty_t,int>"* %1, i32 0, i32 0 ; <%"struct.boost::details::compressed_pair_imp<empty_t,int,1>"*> [#uses=1] - %3 = call i32* @_ZN5boost7details19compressed_pair_impI7empty_tiLi1EE6secondEv(%"struct.boost::details::compressed_pair_imp<empty_t,int,1>"* %2) nounwind ; <i32*> [#uses=1] - store i32* %3, i32** %0, align 8 - %4 = load i32*, i32** %0, align 8 ; <i32*> [#uses=1] - store i32* %4, i32** %retval, align 8 - br label %return - -return: ; preds = %entry - %retval1 = load i32*, i32** %retval ; <i32*> [#uses=1] - ret i32* %retval1 -} - -define linkonce_odr %struct.empty_base_t* @_ZN5boost7details19compressed_pair_impI7empty_tiLi1EE5firstEv(%"struct.boost::details::compressed_pair_imp<empty_t,int,1>"* %this) nounwind ssp align 2 { -entry: - %this_addr = alloca %"struct.boost::details::compressed_pair_imp<empty_t,int,1>"*, align 8 ; <%"struct.boost::details::compressed_pair_imp<empty_t,int,1>"**> [#uses=2] - %retval = alloca %struct.empty_base_t* ; <%struct.empty_base_t**> [#uses=2] - %0 = alloca %struct.empty_base_t* ; <%struct.empty_base_t**> [#uses=2] - %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0] - store %"struct.boost::details::compressed_pair_imp<empty_t,int,1>"* %this, %"struct.boost::details::compressed_pair_imp<empty_t,int,1>"** %this_addr - %1 = load %"struct.boost::details::compressed_pair_imp<empty_t,int,1>"*, %"struct.boost::details::compressed_pair_imp<empty_t,int,1>"** %this_addr, align 8 ; <%"struct.boost::details::compressed_pair_imp<empty_t,int,1>"*> [#uses=1] - %2 = bitcast %"struct.boost::details::compressed_pair_imp<empty_t,int,1>"* %1 to %struct.empty_base_t* ; <%struct.empty_base_t*> [#uses=1] - store %struct.empty_base_t* %2, %struct.empty_base_t** %0, align 8 - %3 = load %struct.empty_base_t*, %struct.empty_base_t** %0, align 8 ; <%struct.empty_base_t*> [#uses=1] - store %struct.empty_base_t* %3, %struct.empty_base_t** %retval, align 8 - br label %return - -return: ; preds = %entry - %retval1 = load %struct.empty_base_t*, %struct.empty_base_t** %retval ; <%struct.empty_base_t*> [#uses=1] - ret %struct.empty_base_t* %retval1 -} - -define linkonce_odr %struct.empty_base_t* @_ZN5boost15compressed_pairI7empty_tiE5firstEv(%"struct.boost::compressed_pair<empty_t,int>"* %this) ssp align 2 { -entry: - %this_addr = alloca %"struct.boost::compressed_pair<empty_t,int>"*, align 8 ; <%"struct.boost::compressed_pair<empty_t,int>"**> [#uses=2] - %retval = alloca %struct.empty_base_t* ; <%struct.empty_base_t**> [#uses=2] - %0 = alloca %struct.empty_base_t* ; <%struct.empty_base_t**> [#uses=2] - %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0] - store %"struct.boost::compressed_pair<empty_t,int>"* %this, %"struct.boost::compressed_pair<empty_t,int>"** %this_addr - %1 = load %"struct.boost::compressed_pair<empty_t,int>"*, %"struct.boost::compressed_pair<empty_t,int>"** %this_addr, align 8 ; <%"struct.boost::compressed_pair<empty_t,int>"*> [#uses=1] - %2 = getelementptr inbounds %"struct.boost::compressed_pair<empty_t,int>", %"struct.boost::compressed_pair<empty_t,int>"* %1, i32 0, i32 0 ; <%"struct.boost::details::compressed_pair_imp<empty_t,int,1>"*> [#uses=1] - %3 = call %struct.empty_base_t* @_ZN5boost7details19compressed_pair_impI7empty_tiLi1EE5firstEv(%"struct.boost::details::compressed_pair_imp<empty_t,int,1>"* %2) nounwind ; <%struct.empty_base_t*> [#uses=1] - store %struct.empty_base_t* %3, %struct.empty_base_t** %0, align 8 - %4 = load %struct.empty_base_t*, %struct.empty_base_t** %0, align 8 ; <%struct.empty_base_t*> [#uses=1] - store %struct.empty_base_t* %4, %struct.empty_base_t** %retval, align 8 - br label %return - -return: ; preds = %entry - %retval1 = load %struct.empty_base_t*, %struct.empty_base_t** %retval ; <%struct.empty_base_t*> [#uses=1] - ret %struct.empty_base_t* %retval1 -} - -declare i32 @puts(i8*) - -declare void @abort() noreturn diff --git a/llvm/test/Transforms/PhaseOrdering/PR6627.ll b/llvm/test/Transforms/PhaseOrdering/PR6627.ll deleted file mode 100644 index 2774d208352..00000000000 --- a/llvm/test/Transforms/PhaseOrdering/PR6627.ll +++ /dev/null @@ -1,93 +0,0 @@ -; RUN: opt -O3 -S < %s | FileCheck %s -; XFAIL: * - -declare i32 @doo(...) - -; PR6627 - This whole nasty sequence should be flattened down to a single -; 32-bit comparison. -define void @test2(i8* %arrayidx) nounwind ssp { -entry: - %xx = bitcast i8* %arrayidx to i32* - %x1 = load i32, i32* %xx, align 4 - %tmp = trunc i32 %x1 to i8 - %conv = zext i8 %tmp to i32 - %cmp = icmp eq i32 %conv, 127 - br i1 %cmp, label %land.lhs.true, label %if.end - -land.lhs.true: ; preds = %entry - %arrayidx4 = getelementptr inbounds i8, i8* %arrayidx, i64 1 - %tmp5 = load i8, i8* %arrayidx4, align 1 - %conv6 = zext i8 %tmp5 to i32 - %cmp7 = icmp eq i32 %conv6, 69 - br i1 %cmp7, label %land.lhs.true9, label %if.end - -land.lhs.true9: ; preds = %land.lhs.true - %arrayidx12 = getelementptr inbounds i8, i8* %arrayidx, i64 2 - %tmp13 = load i8, i8* %arrayidx12, align 1 - %conv14 = zext i8 %tmp13 to i32 - %cmp15 = icmp eq i32 %conv14, 76 - br i1 %cmp15, label %land.lhs.true17, label %if.end - -land.lhs.true17: ; preds = %land.lhs.true9 - %arrayidx20 = getelementptr inbounds i8, i8* %arrayidx, i64 3 - %tmp21 = load i8, i8* %arrayidx20, align 1 - %conv22 = zext i8 %tmp21 to i32 - %cmp23 = icmp eq i32 %conv22, 70 - br i1 %cmp23, label %if.then, label %if.end - -if.then: ; preds = %land.lhs.true17 - %call25 = call i32 (...) @doo() - br label %if.end - -if.end: - ret void - -; CHECK-LABEL: @test2( -; CHECK: %x1 = load i32, i32* %xx, align 4 -; CHECK-NEXT: icmp eq i32 %x1, 1179403647 -; CHECK-NEXT: br i1 {{.*}}, label %if.then, label %if.end -} - -; PR6627 - This should all be flattened down to one compare. This is the same -; as test2, except that the initial load is done as an i8 instead of i32, thus -; requiring widening. -define void @test2a(i8* %arrayidx) nounwind ssp { -entry: - %x1 = load i8, i8* %arrayidx, align 4 - %conv = zext i8 %x1 to i32 - %cmp = icmp eq i32 %conv, 127 - br i1 %cmp, label %land.lhs.true, label %if.end - -land.lhs.true: ; preds = %entry - %arrayidx4 = getelementptr inbounds i8, i8* %arrayidx, i64 1 - %tmp5 = load i8, i8* %arrayidx4, align 1 - %conv6 = zext i8 %tmp5 to i32 - %cmp7 = icmp eq i32 %conv6, 69 - br i1 %cmp7, label %land.lhs.true9, label %if.end - -land.lhs.true9: ; preds = %land.lhs.true - %arrayidx12 = getelementptr inbounds i8, i8* %arrayidx, i64 2 - %tmp13 = load i8, i8* %arrayidx12, align 1 - %conv14 = zext i8 %tmp13 to i32 - %cmp15 = icmp eq i32 %conv14, 76 - br i1 %cmp15, label %land.lhs.true17, label %if.end - -land.lhs.true17: ; preds = %land.lhs.true9 - %arrayidx20 = getelementptr inbounds i8, i8* %arrayidx, i64 3 - %tmp21 = load i8, i8* %arrayidx20, align 1 - %conv22 = zext i8 %tmp21 to i32 - %cmp23 = icmp eq i32 %conv22, 70 - br i1 %cmp23, label %if.then, label %if.end - -if.then: ; preds = %land.lhs.true17 - %call25 = call i32 (...) @doo() - br label %if.end - -if.end: - ret void - -; CHECK-LABEL: @test2a( -; CHECK: %x1 = load i32, i32* {{.*}}, align 4 -; CHECK-NEXT: icmp eq i32 %x1, 1179403647 -; CHECK-NEXT: br i1 {{.*}}, label %if.then, label %if.end -} diff --git a/llvm/test/Transforms/PhaseOrdering/basic.ll b/llvm/test/Transforms/PhaseOrdering/basic.ll deleted file mode 100644 index ef57e55e15e..00000000000 --- a/llvm/test/Transforms/PhaseOrdering/basic.ll +++ /dev/null @@ -1,51 +0,0 @@ -; RUN: opt -O3 -S < %s | FileCheck %s - -target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64" -target triple = "x86_64-apple-macosx10.6.7" - -declare i8* @malloc(i64) -declare void @free(i8*) - - -; PR2338 -define void @test1() nounwind ssp { - %retval = alloca i32, align 4 - %i = alloca i8*, align 8 - %call = call i8* @malloc(i64 1) - store i8* %call, i8** %i, align 8 - %tmp = load i8*, i8** %i, align 8 - store i8 1, i8* %tmp - %tmp1 = load i8*, i8** %i, align 8 - call void @free(i8* %tmp1) - ret void - -; CHECK-LABEL: @test1( -; CHECK-NEXT: ret void -} - -; This function exposes a phase ordering problem when InstCombine is -; turning %add into a bitmask, making it difficult to spot a 0 return value. -; -; It it also important that %add is expressed as a multiple of %div so scalar -; evolution can recognize it. -define i32 @test2(i32 %a, i32* %p) nounwind uwtable ssp { -entry: - %div = udiv i32 %a, 4 - %arrayidx = getelementptr inbounds i32, i32* %p, i64 0 - store i32 %div, i32* %arrayidx, align 4 - %add = add i32 %div, %div - %arrayidx1 = getelementptr inbounds i32, i32* %p, i64 1 - store i32 %add, i32* %arrayidx1, align 4 - %arrayidx2 = getelementptr inbounds i32, i32* %p, i64 1 - %0 = load i32, i32* %arrayidx2, align 4 - %arrayidx3 = getelementptr inbounds i32, i32* %p, i64 0 - %1 = load i32, i32* %arrayidx3, align 4 - %mul = mul i32 2, %1 - %sub = sub i32 %0, %mul - ret i32 %sub - -; CHECK-LABEL: @test2( -; CHECK: %div = lshr i32 %a, 2 -; CHECK: %add = shl nuw nsw i32 %div, 1 -; CHECK: ret i32 0 -} diff --git a/llvm/test/Transforms/PhaseOrdering/bitfield-bittests.ll b/llvm/test/Transforms/PhaseOrdering/bitfield-bittests.ll deleted file mode 100644 index 2843a7e7612..00000000000 --- a/llvm/test/Transforms/PhaseOrdering/bitfield-bittests.ll +++ /dev/null @@ -1,130 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -O3 -S < %s | FileCheck %s -; RUN: opt -passes='default<O3>' -S < %s | FileCheck %s - -; These are tests that check for set/clear bits in a bitfield based on PR37098: -; https://bugs.llvm.org/show_bug.cgi?id=37098 -; -; The initial IR from clang has been transformed by SROA, but no other passes -; have run yet. In all cases, we should reduce these to a mask and compare -; instead of shift/cast/logic ops. -; -; Currently, this happens mostly through a combination of instcombine and -; aggressive-instcombine. If pass ordering changes, we may have to adjust -; the pattern matching in 1 or both of those passes. - -; Legal i32 is required to allow casting transforms that eliminate the zexts. -target datalayout = "n32" - -define i32 @allclear(i32 %a) { -; CHECK-LABEL: @allclear( -; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[A:%.*]], 15 -; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i32 [[TMP1]], 0 -; CHECK-NEXT: [[TMP3:%.*]] = zext i1 [[TMP2]] to i32 -; CHECK-NEXT: ret i32 [[TMP3]] -; - %a.sroa.0.0.trunc = trunc i32 %a to i8 - %a.sroa.5.0.shift = lshr i32 %a, 8 - %bf.clear = and i8 %a.sroa.0.0.trunc, 1 - %bf.cast = zext i8 %bf.clear to i32 - %bf.lshr = lshr i8 %a.sroa.0.0.trunc, 1 - %bf.clear2 = and i8 %bf.lshr, 1 - %bf.cast3 = zext i8 %bf.clear2 to i32 - %or = or i32 %bf.cast, %bf.cast3 - %bf.lshr5 = lshr i8 %a.sroa.0.0.trunc, 2 - %bf.clear6 = and i8 %bf.lshr5, 1 - %bf.cast7 = zext i8 %bf.clear6 to i32 - %or8 = or i32 %or, %bf.cast7 - %bf.lshr10 = lshr i8 %a.sroa.0.0.trunc, 3 - %bf.clear11 = and i8 %bf.lshr10, 1 - %bf.cast12 = zext i8 %bf.clear11 to i32 - %or13 = or i32 %or8, %bf.cast12 - %cmp = icmp eq i32 %or13, 0 - %conv = zext i1 %cmp to i32 - ret i32 %conv -} - -define i32 @anyset(i32 %a) { -; CHECK-LABEL: @anyset( -; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[A:%.*]], 15 -; CHECK-NEXT: [[TMP2:%.*]] = icmp ne i32 [[TMP1]], 0 -; CHECK-NEXT: [[TMP3:%.*]] = zext i1 [[TMP2]] to i32 -; CHECK-NEXT: ret i32 [[TMP3]] -; - %a.sroa.0.0.trunc = trunc i32 %a to i8 - %a.sroa.5.0.shift = lshr i32 %a, 8 - %bf.clear = and i8 %a.sroa.0.0.trunc, 1 - %bf.cast = zext i8 %bf.clear to i32 - %bf.lshr = lshr i8 %a.sroa.0.0.trunc, 1 - %bf.clear2 = and i8 %bf.lshr, 1 - %bf.cast3 = zext i8 %bf.clear2 to i32 - %or = or i32 %bf.cast, %bf.cast3 - %bf.lshr5 = lshr i8 %a.sroa.0.0.trunc, 2 - %bf.clear6 = and i8 %bf.lshr5, 1 - %bf.cast7 = zext i8 %bf.clear6 to i32 - %or8 = or i32 %or, %bf.cast7 - %bf.lshr10 = lshr i8 %a.sroa.0.0.trunc, 3 - %bf.clear11 = and i8 %bf.lshr10, 1 - %bf.cast12 = zext i8 %bf.clear11 to i32 - %or13 = or i32 %or8, %bf.cast12 - %cmp = icmp ne i32 %or13, 0 - %conv = zext i1 %cmp to i32 - ret i32 %conv -} - -define i32 @allset(i32 %a) { -; CHECK-LABEL: @allset( -; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[A:%.*]], 15 -; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i32 [[TMP1]], 15 -; CHECK-NEXT: [[TMP3:%.*]] = zext i1 [[TMP2]] to i32 -; CHECK-NEXT: ret i32 [[TMP3]] -; - %a.sroa.0.0.trunc = trunc i32 %a to i8 - %a.sroa.5.0.shift = lshr i32 %a, 8 - %bf.clear = and i8 %a.sroa.0.0.trunc, 1 - %bf.cast = zext i8 %bf.clear to i32 - %bf.lshr = lshr i8 %a.sroa.0.0.trunc, 1 - %bf.clear2 = and i8 %bf.lshr, 1 - %bf.cast3 = zext i8 %bf.clear2 to i32 - %and = and i32 %bf.cast, %bf.cast3 - %bf.lshr5 = lshr i8 %a.sroa.0.0.trunc, 2 - %bf.clear6 = and i8 %bf.lshr5, 1 - %bf.cast7 = zext i8 %bf.clear6 to i32 - %and8 = and i32 %and, %bf.cast7 - %bf.lshr10 = lshr i8 %a.sroa.0.0.trunc, 3 - %bf.clear11 = and i8 %bf.lshr10, 1 - %bf.cast12 = zext i8 %bf.clear11 to i32 - %and13 = and i32 %and8, %bf.cast12 - %cmp = icmp ne i32 %and13, 0 - %conv = zext i1 %cmp to i32 - ret i32 %conv -} - -define i32 @anyclear(i32 %a) { -; CHECK-LABEL: @anyclear( -; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[A:%.*]], 15 -; CHECK-NEXT: [[TMP2:%.*]] = icmp ne i32 [[TMP1]], 15 -; CHECK-NEXT: [[TMP3:%.*]] = zext i1 [[TMP2]] to i32 -; CHECK-NEXT: ret i32 [[TMP3]] -; - %a.sroa.0.0.trunc = trunc i32 %a to i8 - %a.sroa.5.0.shift = lshr i32 %a, 8 - %bf.clear = and i8 %a.sroa.0.0.trunc, 1 - %bf.cast = zext i8 %bf.clear to i32 - %bf.lshr = lshr i8 %a.sroa.0.0.trunc, 1 - %bf.clear2 = and i8 %bf.lshr, 1 - %bf.cast3 = zext i8 %bf.clear2 to i32 - %and = and i32 %bf.cast, %bf.cast3 - %bf.lshr5 = lshr i8 %a.sroa.0.0.trunc, 2 - %bf.clear6 = and i8 %bf.lshr5, 1 - %bf.cast7 = zext i8 %bf.clear6 to i32 - %and8 = and i32 %and, %bf.cast7 - %bf.lshr10 = lshr i8 %a.sroa.0.0.trunc, 3 - %bf.clear11 = and i8 %bf.lshr10, 1 - %bf.cast12 = zext i8 %bf.clear11 to i32 - %and13 = and i32 %and8, %bf.cast12 - %cmp = icmp eq i32 %and13, 0 - %conv = zext i1 %cmp to i32 - ret i32 %conv -} - diff --git a/llvm/test/Transforms/PhaseOrdering/gdce.ll b/llvm/test/Transforms/PhaseOrdering/gdce.ll deleted file mode 100644 index fa62f92500b..00000000000 --- a/llvm/test/Transforms/PhaseOrdering/gdce.ll +++ /dev/null @@ -1,106 +0,0 @@ -; RUN: opt -O2 -S < %s | FileCheck %s - -; Run global DCE to eliminate unused ctor and dtor. -; rdar://9142819 - -; CHECK: main -; CHECK-NOT: _ZN4BaseC1Ev -; CHECK-NOT: _ZN4BaseD1Ev -; CHECK-NOT: _ZN4BaseD2Ev -; CHECK-NOT: _ZN4BaseC2Ev -; CHECK-NOT: _ZN4BaseD0Ev - -%class.Base = type { i32 (...)** } - -@_ZTV4Base = linkonce_odr unnamed_addr constant [4 x i8*] [i8* null, i8* bitcast ({ i8*, i8* }* @_ZTI4Base to i8*), i8* bitcast (void (%class.Base*)* @_ZN4BaseD1Ev to i8*), i8* bitcast (void (%class.Base*)* @_ZN4BaseD0Ev to i8*)] -@_ZTVN10__cxxabiv117__class_type_infoE = external global i8* -@_ZTS4Base = linkonce_odr constant [6 x i8] c"4Base\00" -@_ZTI4Base = linkonce_odr unnamed_addr constant { i8*, i8* } { i8* bitcast (i8** getelementptr inbounds (i8*, i8** @_ZTVN10__cxxabiv117__class_type_infoE, i64 2) to i8*), i8* getelementptr inbounds ([6 x i8], [6 x i8]* @_ZTS4Base, i32 0, i32 0) } - -define i32 @main() uwtable ssp { -entry: - %retval = alloca i32, align 4 - %b = alloca %class.Base, align 8 - %cleanup.dest.slot = alloca i32 - store i32 0, i32* %retval - call void @_ZN4BaseC1Ev(%class.Base* %b) - store i32 0, i32* %retval - store i32 1, i32* %cleanup.dest.slot - call void @_ZN4BaseD1Ev(%class.Base* %b) - %0 = load i32, i32* %retval - ret i32 %0 -} - -define linkonce_odr void @_ZN4BaseC1Ev(%class.Base* %this) unnamed_addr uwtable ssp align 2 { -entry: - %this.addr = alloca %class.Base*, align 8 - store %class.Base* %this, %class.Base** %this.addr, align 8 - %this1 = load %class.Base*, %class.Base** %this.addr - call void @_ZN4BaseC2Ev(%class.Base* %this1) - ret void -} - -define linkonce_odr void @_ZN4BaseD1Ev(%class.Base* %this) unnamed_addr uwtable ssp align 2 { -entry: - %this.addr = alloca %class.Base*, align 8 - store %class.Base* %this, %class.Base** %this.addr, align 8 - %this1 = load %class.Base*, %class.Base** %this.addr - call void @_ZN4BaseD2Ev(%class.Base* %this1) - ret void -} - -define linkonce_odr void @_ZN4BaseD2Ev(%class.Base* %this) unnamed_addr nounwind uwtable ssp align 2 { -entry: - %this.addr = alloca %class.Base*, align 8 - store %class.Base* %this, %class.Base** %this.addr, align 8 - %this1 = load %class.Base*, %class.Base** %this.addr - ret void -} - -define linkonce_odr void @_ZN4BaseC2Ev(%class.Base* %this) unnamed_addr nounwind uwtable ssp align 2 { -entry: - %this.addr = alloca %class.Base*, align 8 - store %class.Base* %this, %class.Base** %this.addr, align 8 - %this1 = load %class.Base*, %class.Base** %this.addr - %0 = bitcast %class.Base* %this1 to i8*** - store i8** getelementptr inbounds ([4 x i8*], [4 x i8*]* @_ZTV4Base, i64 0, i64 2), i8*** %0 - ret void -} - -define linkonce_odr void @_ZN4BaseD0Ev(%class.Base* %this) unnamed_addr uwtable ssp align 2 personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) { -entry: - %this.addr = alloca %class.Base*, align 8 - %exn.slot = alloca i8* - %ehselector.slot = alloca i32 - store %class.Base* %this, %class.Base** %this.addr, align 8 - %this1 = load %class.Base*, %class.Base** %this.addr - invoke void @_ZN4BaseD1Ev(%class.Base* %this1) - to label %invoke.cont unwind label %lpad - -invoke.cont: ; preds = %entry - %0 = bitcast %class.Base* %this1 to i8* - call void @_ZdlPv(i8* %0) nounwind - ret void - -lpad: ; preds = %entry - %1 = landingpad { i8*, i32 } - cleanup - %2 = extractvalue { i8*, i32 } %1, 0 - store i8* %2, i8** %exn.slot - %3 = extractvalue { i8*, i32 } %1, 1 - store i32 %3, i32* %ehselector.slot - %4 = bitcast %class.Base* %this1 to i8* - call void @_ZdlPv(i8* %4) nounwind - br label %eh.resume - -eh.resume: ; preds = %lpad - %exn = load i8*, i8** %exn.slot - %sel = load i32, i32* %ehselector.slot - %lpad.val = insertvalue { i8*, i32 } undef, i8* %exn, 0 - %lpad.val2 = insertvalue { i8*, i32 } %lpad.val, i32 %sel, 1 - resume { i8*, i32 } %lpad.val2 -} - -declare i32 @__gxx_personality_v0(...) - -declare void @_ZdlPv(i8*) nounwind diff --git a/llvm/test/Transforms/PhaseOrdering/globalaa-retained.ll b/llvm/test/Transforms/PhaseOrdering/globalaa-retained.ll deleted file mode 100644 index 47b8e4d7a9e..00000000000 --- a/llvm/test/Transforms/PhaseOrdering/globalaa-retained.ll +++ /dev/null @@ -1,66 +0,0 @@ -; RUN: opt -O3 -S < %s | FileCheck %s -target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128" -target triple = "aarch64" - -@v = internal unnamed_addr global i32 0, align 4 -@p = common global i32* null, align 8 - - -; This test checks that a number of loads and stores are eliminated, -; that can only be eliminated based on GlobalsAA information. As such, -; it tests that GlobalsAA information is retained until the passes -; that perform this optimization, and it protects against accidentally -; dropping the GlobalsAA information earlier in the pipeline, which -; has happened a few times. - -; GlobalsAA invalidation might happen later in the FunctionPassManager -; pipeline than the optimization eliminating unnecessary loads/stores. -; Since GlobalsAA is a module-level analysis, any FunctionPass -; invalidating the GlobalsAA information will affect FunctionPass -; pipelines that execute later. For example, assume a FunctionPass1 | -; FunctionPass2 pipeline and 2 functions to be processed: f1 and f2. -; Assume furthermore that FunctionPass1 uses GlobalsAA info to do an -; optimization, and FunctionPass2 invalidates GlobalsAA. Assume the -; function passes run in the following order: FunctionPass1(f1), -; FunctionPass2(f1), FunctionPass1(f2), FunctionPass2(f2). Then -; FunctionPass1 will not be able to optimize f2, since GlobalsAA will -; have been invalidated in FuntionPass2(f1). - -; To try and also test this scenario, there is an empty function -; before and after the function we're checking so that one of them -; will be processed by the whole set of FunctionPasses before @f. That -; will ensure that if the invalidation happens, it happens before the -; actual optimizations on @f start. -define void @bar() { -entry: - ret void -} - -; Function Attrs: norecurse nounwind -define void @f(i32 %n) { -entry: - %0 = load i32, i32* @v, align 4 - %inc = add nsw i32 %0, 1 - store i32 %inc, i32* @v, align 4 - %1 = load i32*, i32** @p, align 8 - store i32 %n, i32* %1, align 4 - %2 = load i32, i32* @v, align 4 - %inc1 = add nsw i32 %2, 1 - store i32 %inc1, i32* @v, align 4 - ret void -} - -; check variable v is loaded/stored only once after optimization, -; which should be prove that globalsAA survives until the optimization -; that can use it to optimize away the duplicate load/stores on -; variable v. -; CHECK: load i32, i32* @v, align 4 -; CHECK: store i32 {{.*}}, i32* @v, align 4 -; CHECK-NOT: load i32, i32* @v, align 4 -; CHECK-NOT: store i32 {{.*}}, i32* @v, align 4 - -; Same as @bar above, in case the functions are processed in reverse order. -define void @bar2() { -entry: - ret void -} diff --git a/llvm/test/Transforms/PhaseOrdering/rotate.ll b/llvm/test/Transforms/PhaseOrdering/rotate.ll deleted file mode 100644 index e10a46cb830..00000000000 --- a/llvm/test/Transforms/PhaseOrdering/rotate.ll +++ /dev/null @@ -1,38 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -O3 -S < %s | FileCheck %s --check-prefixes=ANY,OLDPM -; RUN: opt -passes='default<O3>' -S < %s | FileCheck %s --check-prefixes=ANY,NEWPM - -; This should become a single funnel shift through a combination -; of aggressive-instcombine, simplifycfg, and instcombine. -; https://bugs.llvm.org/show_bug.cgi?id=34924 -; These are equivalent, but the value name with the new-pm shows a bug - -; this code should not have been converted to a speculative select with -; an intermediate transform. - -define i32 @rotl(i32 %a, i32 %b) { -; OLDPM-LABEL: @rotl( -; OLDPM-NEXT: entry: -; OLDPM-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.fshl.i32(i32 [[A:%.*]], i32 [[A]], i32 [[B:%.*]]) -; OLDPM-NEXT: ret i32 [[TMP0]] -; -; NEWPM-LABEL: @rotl( -; NEWPM-NEXT: entry: -; NEWPM-NEXT: [[SPEC_SELECT:%.*]] = tail call i32 @llvm.fshl.i32(i32 [[A:%.*]], i32 [[A]], i32 [[B:%.*]]) -; NEWPM-NEXT: ret i32 [[SPEC_SELECT]] -; -entry: - %cmp = icmp eq i32 %b, 0 - br i1 %cmp, label %end, label %rotbb - -rotbb: - %sub = sub i32 32, %b - %shr = lshr i32 %a, %sub - %shl = shl i32 %a, %b - %or = or i32 %shr, %shl - br label %end - -end: - %cond = phi i32 [ %or, %rotbb ], [ %a, %entry ] - ret i32 %cond -} - diff --git a/llvm/test/Transforms/PhaseOrdering/scev-custom-dl.ll b/llvm/test/Transforms/PhaseOrdering/scev-custom-dl.ll deleted file mode 100644 index ae822dd2e81..00000000000 --- a/llvm/test/Transforms/PhaseOrdering/scev-custom-dl.ll +++ /dev/null @@ -1,67 +0,0 @@ -; RUN: opt -O3 -S -analyze -scalar-evolution < %s | FileCheck %s - -target datalayout = "e-m:m-p:40:64:64:32-i32:32-i16:16-i8:8-n32" - -; -; This file contains phase ordering tests for scalar evolution. -; Test that the standard passes don't obfuscate the IR so scalar evolution can't -; recognize expressions. - -; CHECK: test1 -; The loop body contains two increments by %div. -; Make sure that 2*%div is recognizable, and not expressed as a bit mask of %d. -; CHECK: --> {%p,+,(8 * (%d /u 4))} -define void @test1(i32 %d, i32* %p) nounwind uwtable ssp { -entry: - %div = udiv i32 %d, 4 - br label %for.cond - -for.cond: ; preds = %for.inc, %entry - %p.addr.0 = phi i32* [ %p, %entry ], [ %add.ptr1, %for.inc ] - %i.0 = phi i32 [ 0, %entry ], [ %inc, %for.inc ] - %cmp = icmp ne i32 %i.0, 64 - br i1 %cmp, label %for.body, label %for.end - -for.body: ; preds = %for.cond - store i32 0, i32* %p.addr.0, align 4 - %add.ptr = getelementptr inbounds i32, i32* %p.addr.0, i32 %div - store i32 1, i32* %add.ptr, align 4 - %add.ptr1 = getelementptr inbounds i32, i32* %add.ptr, i32 %div - br label %for.inc - -for.inc: ; preds = %for.body - %inc = add i32 %i.0, 1 - br label %for.cond - -for.end: ; preds = %for.cond - ret void -} - -; CHECK: test1a -; Same thing as test1, but it is even more tempting to fold 2 * (%d /u 2) -; CHECK: --> {%p,+,(8 * (%d /u 2))} -define void @test1a(i32 %d, i32* %p) nounwind uwtable ssp { -entry: - %div = udiv i32 %d, 2 - br label %for.cond - -for.cond: ; preds = %for.inc, %entry - %p.addr.0 = phi i32* [ %p, %entry ], [ %add.ptr1, %for.inc ] - %i.0 = phi i32 [ 0, %entry ], [ %inc, %for.inc ] - %cmp = icmp ne i32 %i.0, 64 - br i1 %cmp, label %for.body, label %for.end - -for.body: ; preds = %for.cond - store i32 0, i32* %p.addr.0, align 4 - %add.ptr = getelementptr inbounds i32, i32* %p.addr.0, i32 %div - store i32 1, i32* %add.ptr, align 4 - %add.ptr1 = getelementptr inbounds i32, i32* %add.ptr, i32 %div - br label %for.inc - -for.inc: ; preds = %for.body - %inc = add i32 %i.0, 1 - br label %for.cond - -for.end: ; preds = %for.cond - ret void -} diff --git a/llvm/test/Transforms/PhaseOrdering/scev.ll b/llvm/test/Transforms/PhaseOrdering/scev.ll deleted file mode 100644 index c616ca2d768..00000000000 --- a/llvm/test/Transforms/PhaseOrdering/scev.ll +++ /dev/null @@ -1,64 +0,0 @@ -; RUN: opt -O3 -S -analyze -scalar-evolution < %s | FileCheck %s -; -; This file contains phase ordering tests for scalar evolution. -; Test that the standard passes don't obfuscate the IR so scalar evolution can't -; recognize expressions. - -; CHECK: test1 -; The loop body contains two increments by %div. -; Make sure that 2*%div is recognizable, and not expressed as a bit mask of %d. -; CHECK: --> {%p,+,(8 * (%d /u 4))} -define void @test1(i64 %d, i32* %p) nounwind uwtable ssp { -entry: - %div = udiv i64 %d, 4 - br label %for.cond - -for.cond: ; preds = %for.inc, %entry - %p.addr.0 = phi i32* [ %p, %entry ], [ %add.ptr1, %for.inc ] - %i.0 = phi i32 [ 0, %entry ], [ %inc, %for.inc ] - %cmp = icmp ne i32 %i.0, 64 - br i1 %cmp, label %for.body, label %for.end - -for.body: ; preds = %for.cond - store i32 0, i32* %p.addr.0, align 4 - %add.ptr = getelementptr inbounds i32, i32* %p.addr.0, i64 %div - store i32 1, i32* %add.ptr, align 4 - %add.ptr1 = getelementptr inbounds i32, i32* %add.ptr, i64 %div - br label %for.inc - -for.inc: ; preds = %for.body - %inc = add i32 %i.0, 1 - br label %for.cond - -for.end: ; preds = %for.cond - ret void -} - -; CHECK: test1a -; Same thing as test1, but it is even more tempting to fold 2 * (%d /u 2) -; CHECK: --> {%p,+,(8 * (%d /u 2))} -define void @test1a(i64 %d, i32* %p) nounwind uwtable ssp { -entry: - %div = udiv i64 %d, 2 - br label %for.cond - -for.cond: ; preds = %for.inc, %entry - %p.addr.0 = phi i32* [ %p, %entry ], [ %add.ptr1, %for.inc ] - %i.0 = phi i32 [ 0, %entry ], [ %inc, %for.inc ] - %cmp = icmp ne i32 %i.0, 64 - br i1 %cmp, label %for.body, label %for.end - -for.body: ; preds = %for.cond - store i32 0, i32* %p.addr.0, align 4 - %add.ptr = getelementptr inbounds i32, i32* %p.addr.0, i64 %div - store i32 1, i32* %add.ptr, align 4 - %add.ptr1 = getelementptr inbounds i32, i32* %add.ptr, i64 %div - br label %for.inc - -for.inc: ; preds = %for.body - %inc = add i32 %i.0, 1 - br label %for.cond - -for.end: ; preds = %for.cond - ret void -} diff --git a/llvm/test/Transforms/PhaseOrdering/simplifycfg-options.ll b/llvm/test/Transforms/PhaseOrdering/simplifycfg-options.ll deleted file mode 100644 index 69346234635..00000000000 --- a/llvm/test/Transforms/PhaseOrdering/simplifycfg-options.ll +++ /dev/null @@ -1,104 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -O1 -S < %s | FileCheck %s --check-prefix=ALL --check-prefix=OLDPM -; RUN: opt -passes='default<O1>' -S < %s | FileCheck %s --check-prefix=ALL --check-prefix=NEWPM - -; Don't simplify unconditional branches from empty blocks in simplifyCFG -; until late in the pipeline because it can destroy canonical loop structure. - -define i1 @PR33605(i32 %a, i32 %b, i32* %c) { -; ALL-LABEL: @PR33605( -; ALL-NEXT: for.body: -; ALL-NEXT: [[OR:%.*]] = or i32 [[B:%.*]], [[A:%.*]] -; ALL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[C:%.*]], i64 1 -; ALL-NEXT: [[TMP0:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 -; ALL-NEXT: [[CMP:%.*]] = icmp eq i32 [[OR]], [[TMP0]] -; ALL-NEXT: br i1 [[CMP]], label [[IF_END:%.*]], label [[IF_THEN:%.*]] -; ALL: if.then: -; ALL-NEXT: store i32 [[OR]], i32* [[ARRAYIDX]], align 4 -; ALL-NEXT: tail call void @foo() -; ALL-NEXT: br label [[IF_END]] -; ALL: if.end: -; ALL-NEXT: [[CHANGED_1_OFF0:%.*]] = phi i1 [ true, [[IF_THEN]] ], [ false, [[FOR_BODY:%.*]] ] -; ALL-NEXT: [[TMP1:%.*]] = load i32, i32* [[C]], align 4 -; ALL-NEXT: [[CMP_1:%.*]] = icmp eq i32 [[OR]], [[TMP1]] -; ALL-NEXT: br i1 [[CMP_1]], label [[IF_END_1:%.*]], label [[IF_THEN_1:%.*]] -; ALL: if.then.1: -; ALL-NEXT: store i32 [[OR]], i32* [[C]], align 4 -; ALL-NEXT: tail call void @foo() -; ALL-NEXT: br label [[IF_END_1]] -; ALL: if.end.1: -; ALL-NEXT: [[CHANGED_1_OFF0_1:%.*]] = phi i1 [ true, [[IF_THEN_1]] ], [ [[CHANGED_1_OFF0]], [[IF_END]] ] -; ALL-NEXT: ret i1 [[CHANGED_1_OFF0_1]] -; -entry: - br label %for.cond - -for.cond: - %i.0 = phi i32 [ 2, %entry ], [ %dec, %if.end ] - %changed.0.off0 = phi i1 [ false, %entry ], [ %changed.1.off0, %if.end ] - %dec = add nsw i32 %i.0, -1 - %tobool = icmp eq i32 %i.0, 0 - br i1 %tobool, label %for.cond.cleanup, label %for.body - -for.cond.cleanup: - %changed.0.off0.lcssa = phi i1 [ %changed.0.off0, %for.cond ] - ret i1 %changed.0.off0.lcssa - -for.body: - %or = or i32 %a, %b - %idxprom = sext i32 %dec to i64 - %arrayidx = getelementptr inbounds i32, i32* %c, i64 %idxprom - %0 = load i32, i32* %arrayidx, align 4 - %cmp = icmp eq i32 %or, %0 - br i1 %cmp, label %if.end, label %if.then - -if.then: - store i32 %or, i32* %arrayidx, align 4 - call void @foo() - br label %if.end - -if.end: - %changed.1.off0 = phi i1 [ true, %if.then ], [ %changed.0.off0, %for.body ] - br label %for.cond -} - -declare void @foo() - -; PR34603 - https://bugs.llvm.org/show_bug.cgi?id=34603 -; We should have a select of doubles, not a select of double pointers. -; SimplifyCFG should not flatten this before early-cse has a chance to eliminate redundant ops. - -define double @max_of_loads(double* %x, double* %y, i64 %i) { -; ALL-LABEL: @max_of_loads( -; ALL-NEXT: entry: -; ALL-NEXT: [[XI_PTR:%.*]] = getelementptr double, double* [[X:%.*]], i64 [[I:%.*]] -; ALL-NEXT: [[YI_PTR:%.*]] = getelementptr double, double* [[Y:%.*]], i64 [[I]] -; ALL-NEXT: [[XI:%.*]] = load double, double* [[XI_PTR]], align 8 -; ALL-NEXT: [[YI:%.*]] = load double, double* [[YI_PTR]], align 8 -; ALL-NEXT: [[CMP:%.*]] = fcmp ogt double [[XI]], [[YI]] -; ALL-NEXT: [[XI_YI:%.*]] = select i1 [[CMP]], double [[XI]], double [[YI]] -; ALL-NEXT: ret double [[XI_YI]] -; -entry: - %xi_ptr = getelementptr double, double* %x, i64 %i - %yi_ptr = getelementptr double, double* %y, i64 %i - %xi = load double, double* %xi_ptr - %yi = load double, double* %yi_ptr - %cmp = fcmp ogt double %xi, %yi - br i1 %cmp, label %if, label %else - -if: - %xi_ptr_again = getelementptr double, double* %x, i64 %i - %xi_again = load double, double* %xi_ptr_again - br label %end - -else: - %yi_ptr_again = getelementptr double, double* %y, i64 %i - %yi_again = load double, double* %yi_ptr_again - br label %end - -end: - %max = phi double [ %xi_again, %if ], [ %yi_again, %else ] - ret double %max -} - |

