summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSanjay Patel <spatel@rotateright.com>2020-01-07 10:41:17 -0500
committerSanjay Patel <spatel@rotateright.com>2020-01-07 10:44:23 -0500
commitf8962571f70ad955d2da40b470a7cd246f19db23 (patch)
tree2befb297e6ada06f92aa449d27d7dec9e99a02f3
parente699c03c9be4839f03dcc3a7fa86a44594e80dcf (diff)
downloadbcm5719-llvm-f8962571f70ad955d2da40b470a7cd246f19db23.tar.gz
bcm5719-llvm-f8962571f70ad955d2da40b470a7cd246f19db23.zip
[InstCombine] try to pull 'not' of select into compare operands
not (select ?, (cmp TPred, ?, ?), (cmp FPred, ?, ?) --> select ?, (cmp TPred', ?, ?), (cmp FPred', ?, ?) If both sides of the select are cmps, we can remove an instruction. The case where only side is a cmp is deferred to a possible follow-on patch. We have a more general 'isFreeToInvert' analysis, but I'm not seeing a way to use that more widely without inducing infinite looping (opposing transforms). Here, we flip the compare predicates directly, so we should not have any danger by creating extra intermediate 'not' ops. Alive proofs: https://rise4fun.com/Alive/jKa Name: both select values are compares - invert predicates %tcmp = icmp sle i32 %x, %y %fcmp = icmp ugt i32 %z, %w %sel = select i1 %cond, i1 %tcmp, i1 %fcmp %not = xor i1 %sel, true => %tcmp_not = icmp sgt i32 %x, %y %fcmp_not = icmp ule i32 %z, %w %not = select i1 %cond, i1 %tcmp_not, i1 %fcmp_not Name: false val is compare - invert/not %fcmp = icmp ugt i32 %z, %w %sel = select i1 %cond, i1 %tcmp, i1 %fcmp %not = xor i1 %sel, true => %tcmp_not = xor i1 %tcmp, -1 %fcmp_not = icmp ule i32 %z, %w %not = select i1 %cond, i1 %tcmp_not, i1 %fcmp_not Differential Revision: https://reviews.llvm.org/D72007
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp17
-rw-r--r--llvm/test/Transforms/InstCombine/not.ll19
2 files changed, 32 insertions, 4 deletions
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
index 4a30b60ca93..cc0a9127f8b 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
@@ -3279,6 +3279,23 @@ Instruction *InstCombiner::visitXor(BinaryOperator &I) {
NotLHS, NotRHS);
}
}
+
+ // Pull 'not' into operands of select if both operands are one-use compares.
+ // Inverting the predicates eliminates the 'not' operation.
+ // Example:
+ // not (select ?, (cmp TPred, ?, ?), (cmp FPred, ?, ?) -->
+ // select ?, (cmp InvTPred, ?, ?), (cmp InvFPred, ?, ?)
+ // TODO: Canonicalize by hoisting 'not' into an arm of the select if only
+ // 1 select operand is a cmp?
+ if (auto *Sel = dyn_cast<SelectInst>(Op0)) {
+ auto *CmpT = dyn_cast<CmpInst>(Sel->getTrueValue());
+ auto *CmpF = dyn_cast<CmpInst>(Sel->getFalseValue());
+ if (CmpT && CmpF && CmpT->hasOneUse() && CmpF->hasOneUse()) {
+ CmpT->setPredicate(CmpT->getInversePredicate());
+ CmpF->setPredicate(CmpF->getInversePredicate());
+ return replaceInstUsesWith(I, Sel);
+ }
+ }
}
if (Instruction *NewXor = sinkNotIntoXor(I, Builder))
diff --git a/llvm/test/Transforms/InstCombine/not.ll b/llvm/test/Transforms/InstCombine/not.ll
index 01be119a393..6c7d4863047 100644
--- a/llvm/test/Transforms/InstCombine/not.ll
+++ b/llvm/test/Transforms/InstCombine/not.ll
@@ -253,11 +253,10 @@ define <2 x i32> @not_add_vec(<2 x i32> %x) {
define i1 @not_select_cmp_cmp(i32 %x, i32 %y, float %z, float %w, i1 %cond) {
; CHECK-LABEL: @not_select_cmp_cmp(
-; CHECK-NEXT: [[CMPT:%.*]] = icmp sle i32 [[X:%.*]], [[Y:%.*]]
-; CHECK-NEXT: [[CMPF:%.*]] = fcmp ugt float [[Z:%.*]], [[W:%.*]]
+; CHECK-NEXT: [[CMPT:%.*]] = icmp sgt i32 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[CMPF:%.*]] = fcmp ole float [[Z:%.*]], [[W:%.*]]
; CHECK-NEXT: [[SEL:%.*]] = select i1 [[COND:%.*]], i1 [[CMPT]], i1 [[CMPF]]
-; CHECK-NEXT: [[NOT:%.*]] = xor i1 [[SEL]], true
-; CHECK-NEXT: ret i1 [[NOT]]
+; CHECK-NEXT: ret i1 [[SEL]]
;
%cmpt = icmp sle i32 %x, %y
%cmpf = fcmp ugt float %z, %w
@@ -268,6 +267,8 @@ define i1 @not_select_cmp_cmp(i32 %x, i32 %y, float %z, float %w, i1 %cond) {
declare void @use1(i1)
+; TODO: Missed canonicalization - hoist 'not'?
+
define i1 @not_select_cmp_cmp_extra_use1(i32 %x, i32 %y, float %z, float %w, i1 %cond) {
; CHECK-LABEL: @not_select_cmp_cmp_extra_use1(
; CHECK-NEXT: [[CMPT:%.*]] = icmp sle i32 [[X:%.*]], [[Y:%.*]]
@@ -285,6 +286,8 @@ define i1 @not_select_cmp_cmp_extra_use1(i32 %x, i32 %y, float %z, float %w, i1
ret i1 %not
}
+; TODO: Missed canonicalization - hoist 'not'?
+
define i1 @not_select_cmp_cmp_extra_use2(i32 %x, i32 %y, float %z, float %w, i1 %cond) {
; CHECK-LABEL: @not_select_cmp_cmp_extra_use2(
; CHECK-NEXT: [[CMPT:%.*]] = icmp sle i32 [[X:%.*]], [[Y:%.*]]
@@ -302,6 +305,8 @@ define i1 @not_select_cmp_cmp_extra_use2(i32 %x, i32 %y, float %z, float %w, i1
ret i1 %not
}
+; Negative test - extra uses would require more instructions.
+
define i1 @not_select_cmp_cmp_extra_use3(i32 %x, i32 %y, float %z, float %w, i1 %cond) {
; CHECK-LABEL: @not_select_cmp_cmp_extra_use3(
; CHECK-NEXT: [[CMPT:%.*]] = icmp sle i32 [[X:%.*]], [[Y:%.*]]
@@ -321,6 +326,8 @@ define i1 @not_select_cmp_cmp_extra_use3(i32 %x, i32 %y, float %z, float %w, i1
ret i1 %not
}
+; Negative test - extra uses would require more instructions.
+
define i1 @not_select_cmp_cmp_extra_use4(i32 %x, i32 %y, float %z, float %w, i1 %cond) {
; CHECK-LABEL: @not_select_cmp_cmp_extra_use4(
; CHECK-NEXT: [[CMPT:%.*]] = icmp sle i32 [[X:%.*]], [[Y:%.*]]
@@ -338,6 +345,8 @@ define i1 @not_select_cmp_cmp_extra_use4(i32 %x, i32 %y, float %z, float %w, i1
ret i1 %not
}
+; TODO: Missed canonicalization - hoist 'not'?
+
define i1 @not_select_cmpt(double %x, double %y, i1 %z, i1 %cond) {
; CHECK-LABEL: @not_select_cmpt(
; CHECK-NEXT: [[CMPT:%.*]] = fcmp oeq double [[X:%.*]], [[Y:%.*]]
@@ -351,6 +360,8 @@ define i1 @not_select_cmpt(double %x, double %y, i1 %z, i1 %cond) {
ret i1 %not
}
+; TODO: Missed canonicalization - hoist 'not'?
+
define i1 @not_select_cmpf(i1 %x, i32 %z, i32 %w, i1 %cond) {
; CHECK-LABEL: @not_select_cmpf(
; CHECK-NEXT: [[CMPF:%.*]] = icmp ugt i32 [[Z:%.*]], [[W:%.*]]
OpenPOWER on IntegriCloud