summaryrefslogtreecommitdiffstats
path: root/llvm/test/Transforms
diff options
context:
space:
mode:
authorSanjay Patel <spatel@rotateright.com>2018-12-17 21:14:51 +0000
committerSanjay Patel <spatel@rotateright.com>2018-12-17 21:14:51 +0000
commit200885e654fc1663a9bdcd3fc444c56e1a0e64ed (patch)
treec6f7186738ad32dbaf3a146fd86f30a29a53a729 /llvm/test/Transforms
parenta9bcf5b334680c3baf4cc8ff50ab8904b6e11d32 (diff)
downloadbcm5719-llvm-200885e654fc1663a9bdcd3fc444c56e1a0e64ed.tar.gz
bcm5719-llvm-200885e654fc1663a9bdcd3fc444c56e1a0e64ed.zip
[AggressiveInstCombine] convert rotate with guard branch into funnel shift (PR34924)
Now, that we have funnel shift intrinsics, it should be safe to convert this form of rotate to it. In the worst case (a target that doesn't have rotate instructions), we will expand this into a branch-less sequence of ALU ops (neg/and/and/lshr/shl/or) in the backend, so it's still very likely to be a perf improvement over the original code. The motivating source code pattern for this is shown in: https://bugs.llvm.org/show_bug.cgi?id=34924 Background: I looked at several different options before deciding where to try this - instcombine, simplifycfg, CGP - because it doesn't fit cleanly anywhere AFAIK. The backend (CGP, SDAG, GlobalIsel?) is too late for what we're trying to accomplish. We want to have the IR converted before we reach things like vectorization because the reduced code can make a loop much simpler to transform. Technically, this could be included in instcombine, but it's a large pattern match that includes control-flow, so it just felt wrong to stuff into there (although I have a draft of that patch). Similarly, this could be part of simplifycfg, but all of this pattern matching is a stretch. So we're left with our relatively new dumping ground for homeless transforms: aggressive-instcombine. This only runs at -O3, but that seems like a reasonable limitation given that source code has many options to avoid this pattern (including the recently added clang intrinsics for rotates). I'm including a PhaseOrdering test because we require the teamwork of 3 passes (aggressive-instcombine, instcombine, simplifycfg) to get this into the minimal IR form that we want. That test shows a bug with the new pass manager that's independent of this change (but it will be masked if we canonicalize harder to funnel shift intrinsics in instcombine). Differential Revision: https://reviews.llvm.org/D55604 llvm-svn: 349396
Diffstat (limited to 'llvm/test/Transforms')
-rw-r--r--llvm/test/Transforms/AggressiveInstCombine/rotate.ll318
-rw-r--r--llvm/test/Transforms/PhaseOrdering/rotate.ll13
2 files changed, 285 insertions, 46 deletions
diff --git a/llvm/test/Transforms/AggressiveInstCombine/rotate.ll b/llvm/test/Transforms/AggressiveInstCombine/rotate.ll
index 8ed38258691..20499087e35 100644
--- a/llvm/test/Transforms/AggressiveInstCombine/rotate.ll
+++ b/llvm/test/Transforms/AggressiveInstCombine/rotate.ll
@@ -9,14 +9,10 @@ define i32 @rotl(i32 %a, i32 %b) {
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[B:%.*]], 0
; CHECK-NEXT: br i1 [[CMP]], label [[END:%.*]], label [[ROTBB:%.*]]
; CHECK: rotbb:
-; CHECK-NEXT: [[SUB:%.*]] = sub i32 32, [[B]]
-; CHECK-NEXT: [[SHR:%.*]] = lshr i32 [[A:%.*]], [[SUB]]
-; CHECK-NEXT: [[SHL:%.*]] = shl i32 [[A]], [[B]]
-; CHECK-NEXT: [[OR:%.*]] = or i32 [[SHR]], [[SHL]]
; CHECK-NEXT: br label [[END]]
; CHECK: end:
-; CHECK-NEXT: [[COND:%.*]] = phi i32 [ [[OR]], [[ROTBB]] ], [ [[A]], [[ENTRY:%.*]] ]
-; CHECK-NEXT: ret i32 [[COND]]
+; CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.fshl.i32(i32 [[A:%.*]], i32 [[A]], i32 [[B]])
+; CHECK-NEXT: ret i32 [[TMP0]]
;
entry:
%cmp = icmp eq i32 %b, 0
@@ -40,14 +36,10 @@ define i32 @rotl_commute_phi(i32 %a, i32 %b) {
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[B:%.*]], 0
; CHECK-NEXT: br i1 [[CMP]], label [[END:%.*]], label [[ROTBB:%.*]]
; CHECK: rotbb:
-; CHECK-NEXT: [[SUB:%.*]] = sub i32 32, [[B]]
-; CHECK-NEXT: [[SHR:%.*]] = lshr i32 [[A:%.*]], [[SUB]]
-; CHECK-NEXT: [[SHL:%.*]] = shl i32 [[A]], [[B]]
-; CHECK-NEXT: [[OR:%.*]] = or i32 [[SHR]], [[SHL]]
; CHECK-NEXT: br label [[END]]
; CHECK: end:
-; CHECK-NEXT: [[COND:%.*]] = phi i32 [ [[A]], [[ENTRY:%.*]] ], [ [[OR]], [[ROTBB]] ]
-; CHECK-NEXT: ret i32 [[COND]]
+; CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.fshl.i32(i32 [[A:%.*]], i32 [[A]], i32 [[B]])
+; CHECK-NEXT: ret i32 [[TMP0]]
;
entry:
%cmp = icmp eq i32 %b, 0
@@ -71,14 +63,10 @@ define i32 @rotl_commute_or(i32 %a, i32 %b) {
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[B:%.*]], 0
; CHECK-NEXT: br i1 [[CMP]], label [[END:%.*]], label [[ROTBB:%.*]]
; CHECK: rotbb:
-; CHECK-NEXT: [[SUB:%.*]] = sub i32 32, [[B]]
-; CHECK-NEXT: [[SHR:%.*]] = lshr i32 [[A:%.*]], [[SUB]]
-; CHECK-NEXT: [[SHL:%.*]] = shl i32 [[A]], [[B]]
-; CHECK-NEXT: [[OR:%.*]] = or i32 [[SHL]], [[SHR]]
; CHECK-NEXT: br label [[END]]
; CHECK: end:
-; CHECK-NEXT: [[COND:%.*]] = phi i32 [ [[A]], [[ENTRY:%.*]] ], [ [[OR]], [[ROTBB]] ]
-; CHECK-NEXT: ret i32 [[COND]]
+; CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.fshl.i32(i32 [[A:%.*]], i32 [[A]], i32 [[B]])
+; CHECK-NEXT: ret i32 [[TMP0]]
;
entry:
%cmp = icmp eq i32 %b, 0
@@ -104,15 +92,11 @@ define i32 @rotl_insert_valid_location(i32 %a, i32 %b) {
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[B:%.*]], 0
; CHECK-NEXT: br i1 [[CMP]], label [[END:%.*]], label [[ROTBB:%.*]]
; CHECK: rotbb:
-; CHECK-NEXT: [[SUB:%.*]] = sub i32 32, [[B]]
-; CHECK-NEXT: [[SHR:%.*]] = lshr i32 [[A:%.*]], [[SUB]]
-; CHECK-NEXT: [[SHL:%.*]] = shl i32 [[A]], [[B]]
-; CHECK-NEXT: [[OR:%.*]] = or i32 [[SHR]], [[SHL]]
; CHECK-NEXT: br label [[END]]
; CHECK: end:
-; CHECK-NEXT: [[COND:%.*]] = phi i32 [ [[OR]], [[ROTBB]] ], [ [[A]], [[ENTRY:%.*]] ]
-; CHECK-NEXT: [[OTHER:%.*]] = phi i32 [ 1, [[ROTBB]] ], [ 2, [[ENTRY]] ]
-; CHECK-NEXT: [[RES:%.*]] = or i32 [[COND]], [[OTHER]]
+; CHECK-NEXT: [[OTHER:%.*]] = phi i32 [ 1, [[ROTBB]] ], [ 2, [[ENTRY:%.*]] ]
+; CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.fshl.i32(i32 [[A:%.*]], i32 [[A]], i32 [[B]])
+; CHECK-NEXT: [[RES:%.*]] = or i32 [[TMP0]], [[OTHER]]
; CHECK-NEXT: ret i32 [[RES]]
;
entry:
@@ -139,14 +123,10 @@ define i32 @rotr(i32 %a, i32 %b) {
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[B:%.*]], 0
; CHECK-NEXT: br i1 [[CMP]], label [[END:%.*]], label [[ROTBB:%.*]]
; CHECK: rotbb:
-; CHECK-NEXT: [[SUB:%.*]] = sub i32 32, [[B]]
-; CHECK-NEXT: [[SHL:%.*]] = shl i32 [[A:%.*]], [[SUB]]
-; CHECK-NEXT: [[SHR:%.*]] = lshr i32 [[A]], [[B]]
-; CHECK-NEXT: [[OR:%.*]] = or i32 [[SHR]], [[SHL]]
; CHECK-NEXT: br label [[END]]
; CHECK: end:
-; CHECK-NEXT: [[COND:%.*]] = phi i32 [ [[OR]], [[ROTBB]] ], [ [[A]], [[ENTRY:%.*]] ]
-; CHECK-NEXT: ret i32 [[COND]]
+; CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.fshr.i32(i32 [[A:%.*]], i32 [[A]], i32 [[B]])
+; CHECK-NEXT: ret i32 [[TMP0]]
;
entry:
%cmp = icmp eq i32 %b, 0
@@ -170,10 +150,205 @@ define i32 @rotr_commute_phi(i32 %a, i32 %b) {
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[B:%.*]], 0
; CHECK-NEXT: br i1 [[CMP]], label [[END:%.*]], label [[ROTBB:%.*]]
; CHECK: rotbb:
+; CHECK-NEXT: br label [[END]]
+; CHECK: end:
+; CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.fshr.i32(i32 [[A:%.*]], i32 [[A]], i32 [[B]])
+; CHECK-NEXT: ret i32 [[TMP0]]
+;
+entry:
+ %cmp = icmp eq i32 %b, 0
+ br i1 %cmp, label %end, label %rotbb
+
+rotbb:
+ %sub = sub i32 32, %b
+ %shl = shl i32 %a, %sub
+ %shr = lshr i32 %a, %b
+ %or = or i32 %shr, %shl
+ br label %end
+
+end:
+ %cond = phi i32 [ %a, %entry ], [ %or, %rotbb ]
+ ret i32 %cond
+}
+
+define i32 @rotr_commute_or(i32 %a, i32 %b) {
+; CHECK-LABEL: @rotr_commute_or(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[B:%.*]], 0
+; CHECK-NEXT: br i1 [[CMP]], label [[END:%.*]], label [[ROTBB:%.*]]
+; CHECK: rotbb:
+; CHECK-NEXT: br label [[END]]
+; CHECK: end:
+; CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.fshr.i32(i32 [[A:%.*]], i32 [[A]], i32 [[B]])
+; CHECK-NEXT: ret i32 [[TMP0]]
+;
+entry:
+ %cmp = icmp eq i32 %b, 0
+ br i1 %cmp, label %end, label %rotbb
+
+rotbb:
+ %sub = sub i32 32, %b
+ %shl = shl i32 %a, %sub
+ %shr = lshr i32 %a, %b
+ %or = or i32 %shl, %shr
+ br label %end
+
+end:
+ %cond = phi i32 [ %a, %entry ], [ %or, %rotbb ]
+ ret i32 %cond
+}
+
+; Negative test - non-power-of-2 might require urem expansion in the backend.
+
+define i12 @could_be_rotr_weird_type(i12 %a, i12 %b) {
+; CHECK-LABEL: @could_be_rotr_weird_type(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i12 [[B:%.*]], 0
+; CHECK-NEXT: br i1 [[CMP]], label [[END:%.*]], label [[ROTBB:%.*]]
+; CHECK: rotbb:
+; CHECK-NEXT: [[SUB:%.*]] = sub i12 12, [[B]]
+; CHECK-NEXT: [[SHL:%.*]] = shl i12 [[A:%.*]], [[SUB]]
+; CHECK-NEXT: [[SHR:%.*]] = lshr i12 [[A]], [[B]]
+; CHECK-NEXT: [[OR:%.*]] = or i12 [[SHL]], [[SHR]]
+; CHECK-NEXT: br label [[END]]
+; CHECK: end:
+; CHECK-NEXT: [[COND:%.*]] = phi i12 [ [[A]], [[ENTRY:%.*]] ], [ [[OR]], [[ROTBB]] ]
+; CHECK-NEXT: ret i12 [[COND]]
+;
+entry:
+ %cmp = icmp eq i12 %b, 0
+ br i1 %cmp, label %end, label %rotbb
+
+rotbb:
+ %sub = sub i12 12, %b
+ %shl = shl i12 %a, %sub
+ %shr = lshr i12 %a, %b
+ %or = or i12 %shl, %shr
+ br label %end
+
+end:
+ %cond = phi i12 [ %a, %entry ], [ %or, %rotbb ]
+ ret i12 %cond
+}
+
+; Negative test - wrong phi ops.
+
+define i32 @not_rotr_1(i32 %a, i32 %b) {
+; CHECK-LABEL: @not_rotr_1(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[B:%.*]], 0
+; CHECK-NEXT: br i1 [[CMP]], label [[END:%.*]], label [[ROTBB:%.*]]
+; CHECK: rotbb:
+; CHECK-NEXT: [[SUB:%.*]] = sub i32 32, [[B]]
+; CHECK-NEXT: [[SHL:%.*]] = shl i32 [[A:%.*]], [[SUB]]
+; CHECK-NEXT: [[SHR:%.*]] = lshr i32 [[A]], [[B]]
+; CHECK-NEXT: [[OR:%.*]] = or i32 [[SHL]], [[SHR]]
+; CHECK-NEXT: br label [[END]]
+; CHECK: end:
+; CHECK-NEXT: [[COND:%.*]] = phi i32 [ [[B]], [[ENTRY:%.*]] ], [ [[OR]], [[ROTBB]] ]
+; CHECK-NEXT: ret i32 [[COND]]
+;
+entry:
+ %cmp = icmp eq i32 %b, 0
+ br i1 %cmp, label %end, label %rotbb
+
+rotbb:
+ %sub = sub i32 32, %b
+ %shl = shl i32 %a, %sub
+ %shr = lshr i32 %a, %b
+ %or = or i32 %shl, %shr
+ br label %end
+
+end:
+ %cond = phi i32 [ %b, %entry ], [ %or, %rotbb ]
+ ret i32 %cond
+}
+
+; Negative test - too many phi ops.
+
+define i32 @not_rotr_2(i32 %a, i32 %b, i32 %c) {
+; CHECK-LABEL: @not_rotr_2(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[B:%.*]], 0
+; CHECK-NEXT: br i1 [[CMP]], label [[END:%.*]], label [[ROTBB:%.*]]
+; CHECK: rotbb:
; CHECK-NEXT: [[SUB:%.*]] = sub i32 32, [[B]]
; CHECK-NEXT: [[SHL:%.*]] = shl i32 [[A:%.*]], [[SUB]]
; CHECK-NEXT: [[SHR:%.*]] = lshr i32 [[A]], [[B]]
-; CHECK-NEXT: [[OR:%.*]] = or i32 [[SHR]], [[SHL]]
+; CHECK-NEXT: [[OR:%.*]] = or i32 [[SHL]], [[SHR]]
+; CHECK-NEXT: [[CMP42:%.*]] = icmp ugt i32 [[OR]], 42
+; CHECK-NEXT: br i1 [[CMP42]], label [[END]], label [[BOGUS:%.*]]
+; CHECK: bogus:
+; CHECK-NEXT: br label [[END]]
+; CHECK: end:
+; CHECK-NEXT: [[COND:%.*]] = phi i32 [ [[A]], [[ENTRY:%.*]] ], [ [[OR]], [[ROTBB]] ], [ [[C:%.*]], [[BOGUS]] ]
+; CHECK-NEXT: ret i32 [[COND]]
+;
+entry:
+ %cmp = icmp eq i32 %b, 0
+ br i1 %cmp, label %end, label %rotbb
+
+rotbb:
+ %sub = sub i32 32, %b
+ %shl = shl i32 %a, %sub
+ %shr = lshr i32 %a, %b
+ %or = or i32 %shl, %shr
+ %cmp42 = icmp ugt i32 %or, 42
+ br i1 %cmp42, label %end, label %bogus
+
+bogus:
+ br label %end
+
+end:
+ %cond = phi i32 [ %a, %entry ], [ %or, %rotbb ], [ %c, %bogus ]
+ ret i32 %cond
+}
+
+; Negative test - wrong cmp (but this should match?).
+
+define i32 @not_rotr_3(i32 %a, i32 %b) {
+; CHECK-LABEL: @not_rotr_3(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[CMP:%.*]] = icmp sle i32 [[B:%.*]], 0
+; CHECK-NEXT: br i1 [[CMP]], label [[END:%.*]], label [[ROTBB:%.*]]
+; CHECK: rotbb:
+; CHECK-NEXT: [[SUB:%.*]] = sub i32 32, [[B]]
+; CHECK-NEXT: [[SHL:%.*]] = shl i32 [[A:%.*]], [[SUB]]
+; CHECK-NEXT: [[SHR:%.*]] = lshr i32 [[A]], [[B]]
+; CHECK-NEXT: [[OR:%.*]] = or i32 [[SHL]], [[SHR]]
+; CHECK-NEXT: br label [[END]]
+; CHECK: end:
+; CHECK-NEXT: [[COND:%.*]] = phi i32 [ [[A]], [[ENTRY:%.*]] ], [ [[OR]], [[ROTBB]] ]
+; CHECK-NEXT: ret i32 [[COND]]
+;
+entry:
+ %cmp = icmp sle i32 %b, 0
+ br i1 %cmp, label %end, label %rotbb
+
+rotbb:
+ %sub = sub i32 32, %b
+ %shl = shl i32 %a, %sub
+ %shr = lshr i32 %a, %b
+ %or = or i32 %shl, %shr
+ br label %end
+
+end:
+ %cond = phi i32 [ %a, %entry ], [ %or, %rotbb ]
+ ret i32 %cond
+}
+
+; Negative test - wrong shift.
+
+define i32 @not_rotr_4(i32 %a, i32 %b) {
+; CHECK-LABEL: @not_rotr_4(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[B:%.*]], 0
+; CHECK-NEXT: br i1 [[CMP]], label [[END:%.*]], label [[ROTBB:%.*]]
+; CHECK: rotbb:
+; CHECK-NEXT: [[SUB:%.*]] = sub i32 32, [[B]]
+; CHECK-NEXT: [[SHL:%.*]] = shl i32 [[A:%.*]], [[SUB]]
+; CHECK-NEXT: [[SHR:%.*]] = ashr i32 [[A]], [[B]]
+; CHECK-NEXT: [[OR:%.*]] = or i32 [[SHL]], [[SHR]]
; CHECK-NEXT: br label [[END]]
; CHECK: end:
; CHECK-NEXT: [[COND:%.*]] = phi i32 [ [[A]], [[ENTRY:%.*]] ], [ [[OR]], [[ROTBB]] ]
@@ -186,8 +361,41 @@ entry:
rotbb:
%sub = sub i32 32, %b
%shl = shl i32 %a, %sub
+ %shr = ashr i32 %a, %b
+ %or = or i32 %shl, %shr
+ br label %end
+
+end:
+ %cond = phi i32 [ %a, %entry ], [ %or, %rotbb ]
+ ret i32 %cond
+}
+
+; Negative test - wrong shift.
+
+define i32 @not_rotr_5(i32 %a, i32 %b) {
+; CHECK-LABEL: @not_rotr_5(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[B:%.*]], 0
+; CHECK-NEXT: br i1 [[CMP]], label [[END:%.*]], label [[ROTBB:%.*]]
+; CHECK: rotbb:
+; CHECK-NEXT: [[SUB:%.*]] = sub i32 32, [[B]]
+; CHECK-NEXT: [[SHL:%.*]] = shl i32 [[B]], [[SUB]]
+; CHECK-NEXT: [[SHR:%.*]] = lshr i32 [[A:%.*]], [[B]]
+; CHECK-NEXT: [[OR:%.*]] = or i32 [[SHL]], [[SHR]]
+; CHECK-NEXT: br label [[END]]
+; CHECK: end:
+; CHECK-NEXT: [[COND:%.*]] = phi i32 [ [[A]], [[ENTRY:%.*]] ], [ [[OR]], [[ROTBB]] ]
+; CHECK-NEXT: ret i32 [[COND]]
+;
+entry:
+ %cmp = icmp eq i32 %b, 0
+ br i1 %cmp, label %end, label %rotbb
+
+rotbb:
+ %sub = sub i32 32, %b
+ %shl = shl i32 %b, %sub
%shr = lshr i32 %a, %b
- %or = or i32 %shr, %shl
+ %or = or i32 %shl, %shr
br label %end
end:
@@ -195,8 +403,46 @@ end:
ret i32 %cond
}
-define i32 @rotr_commute_or(i32 %a, i32 %b) {
-; CHECK-LABEL: @rotr_commute_or(
+; Negative test - wrong sub.
+
+define i32 @not_rotr_6(i32 %a, i32 %b) {
+; CHECK-LABEL: @not_rotr_6(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[B:%.*]], 0
+; CHECK-NEXT: br i1 [[CMP]], label [[END:%.*]], label [[ROTBB:%.*]]
+; CHECK: rotbb:
+; CHECK-NEXT: [[SUB:%.*]] = sub i32 8, [[B]]
+; CHECK-NEXT: [[SHL:%.*]] = shl i32 [[A:%.*]], [[SUB]]
+; CHECK-NEXT: [[SHR:%.*]] = lshr i32 [[A]], [[B]]
+; CHECK-NEXT: [[OR:%.*]] = or i32 [[SHL]], [[SHR]]
+; CHECK-NEXT: br label [[END]]
+; CHECK: end:
+; CHECK-NEXT: [[COND:%.*]] = phi i32 [ [[A]], [[ENTRY:%.*]] ], [ [[OR]], [[ROTBB]] ]
+; CHECK-NEXT: ret i32 [[COND]]
+;
+entry:
+ %cmp = icmp eq i32 %b, 0
+ br i1 %cmp, label %end, label %rotbb
+
+rotbb:
+ %sub = sub i32 8, %b
+ %shl = shl i32 %a, %sub
+ %shr = lshr i32 %a, %b
+ %or = or i32 %shl, %shr
+ br label %end
+
+end:
+ %cond = phi i32 [ %a, %entry ], [ %or, %rotbb ]
+ ret i32 %cond
+}
+
+; Negative test - extra use. Technically, we could transform this
+; because it doesn't increase the instruction count, but we're
+; being cautious not to cause a potential perf pessimization for
+; targets that do not have a rotate instruction.
+
+define i32 @could_be_rotr(i32 %a, i32 %b, i32* %p) {
+; CHECK-LABEL: @could_be_rotr(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[B:%.*]], 0
; CHECK-NEXT: br i1 [[CMP]], label [[END:%.*]], label [[ROTBB:%.*]]
@@ -205,6 +451,7 @@ define i32 @rotr_commute_or(i32 %a, i32 %b) {
; CHECK-NEXT: [[SHL:%.*]] = shl i32 [[A:%.*]], [[SUB]]
; CHECK-NEXT: [[SHR:%.*]] = lshr i32 [[A]], [[B]]
; CHECK-NEXT: [[OR:%.*]] = or i32 [[SHL]], [[SHR]]
+; CHECK-NEXT: store i32 [[OR]], i32* [[P:%.*]]
; CHECK-NEXT: br label [[END]]
; CHECK: end:
; CHECK-NEXT: [[COND:%.*]] = phi i32 [ [[A]], [[ENTRY:%.*]] ], [ [[OR]], [[ROTBB]] ]
@@ -219,6 +466,7 @@ rotbb:
%shl = shl i32 %a, %sub
%shr = lshr i32 %a, %b
%or = or i32 %shl, %shr
+ store i32 %or, i32* %p
br label %end
end:
diff --git a/llvm/test/Transforms/PhaseOrdering/rotate.ll b/llvm/test/Transforms/PhaseOrdering/rotate.ll
index 1d33de70c29..190807668ee 100644
--- a/llvm/test/Transforms/PhaseOrdering/rotate.ll
+++ b/llvm/test/Transforms/PhaseOrdering/rotate.ll
@@ -9,17 +9,8 @@
define i32 @rotl(i32 %a, i32 %b) {
; OLDPM-LABEL: @rotl(
; OLDPM-NEXT: entry:
-; OLDPM-NEXT: [[CMP:%.*]] = icmp eq i32 [[B:%.*]], 0
-; OLDPM-NEXT: br i1 [[CMP]], label [[END:%.*]], label [[ROTBB:%.*]]
-; OLDPM: rotbb:
-; OLDPM-NEXT: [[SUB:%.*]] = sub i32 32, [[B]]
-; OLDPM-NEXT: [[SHR:%.*]] = lshr i32 [[A:%.*]], [[SUB]]
-; OLDPM-NEXT: [[SHL:%.*]] = shl i32 [[A]], [[B]]
-; OLDPM-NEXT: [[OR:%.*]] = or i32 [[SHR]], [[SHL]]
-; OLDPM-NEXT: br label [[END]]
-; OLDPM: end:
-; OLDPM-NEXT: [[COND:%.*]] = phi i32 [ [[OR]], [[ROTBB]] ], [ [[A]], [[ENTRY:%.*]] ]
-; OLDPM-NEXT: ret i32 [[COND]]
+; OLDPM-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.fshl.i32(i32 [[A:%.*]], i32 [[A]], i32 [[B:%.*]])
+; OLDPM-NEXT: ret i32 [[TMP0]]
;
; NEWPM-LABEL: @rotl(
; NEWPM-NEXT: entry:
OpenPOWER on IntegriCloud