summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--llvm/lib/Analysis/ScalarEvolution.cpp26
-rw-r--r--llvm/test/Analysis/LoopAccessAnalysis/wrapping-pointer-versioning.ll4
-rw-r--r--llvm/test/Analysis/ScalarEvolution/and-xor.ll14
3 files changed, 42 insertions, 2 deletions
diff --git a/llvm/lib/Analysis/ScalarEvolution.cpp b/llvm/lib/Analysis/ScalarEvolution.cpp
index 072996d08f4..d49135a1b49 100644
--- a/llvm/lib/Analysis/ScalarEvolution.cpp
+++ b/llvm/lib/Analysis/ScalarEvolution.cpp
@@ -1778,6 +1778,32 @@ ScalarEvolution::getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth) {
Ops.push_back(getZeroExtendExpr(Op, Ty, Depth + 1));
return getMulExpr(Ops, SCEV::FlagNUW, Depth + 1);
}
+
+ // zext(2^K * (trunc X to iN)) to iM ->
+ // 2^K * (zext(trunc X to i{N-K}) to iM)<nuw>
+ //
+ // Proof:
+ //
+ // zext(2^K * (trunc X to iN)) to iM
+ // = zext((trunc X to iN) << K) to iM
+ // = zext((trunc X to i{N-K}) << K)<nuw> to iM
+ // (because shl removes the top K bits)
+ // = zext((2^K * (trunc X to i{N-K}))<nuw>) to iM
+ // = (2^K * (zext(trunc X to i{N-K}) to iM))<nuw>.
+ //
+ if (SA->getNumOperands() == 2)
+ if (auto *MulLHS = dyn_cast<SCEVConstant>(SA->getOperand(0)))
+ if (MulLHS->getAPInt().isPowerOf2())
+ if (auto *TruncRHS = dyn_cast<SCEVTruncateExpr>(SA->getOperand(1))) {
+ int NewTruncBits = getTypeSizeInBits(TruncRHS->getType()) -
+ MulLHS->getAPInt().logBase2();
+ Type *NewTruncTy = IntegerType::get(getContext(), NewTruncBits);
+ return getMulExpr(
+ getZeroExtendExpr(MulLHS, Ty),
+ getZeroExtendExpr(
+ getTruncateExpr(TruncRHS->getOperand(), NewTruncTy), Ty),
+ SCEV::FlagNUW, Depth + 1);
+ }
}
// The cast wasn't folded; create an explicit cast node.
diff --git a/llvm/test/Analysis/LoopAccessAnalysis/wrapping-pointer-versioning.ll b/llvm/test/Analysis/LoopAccessAnalysis/wrapping-pointer-versioning.ll
index 1329761a8e5..763a5fd09b1 100644
--- a/llvm/test/Analysis/LoopAccessAnalysis/wrapping-pointer-versioning.ll
+++ b/llvm/test/Analysis/LoopAccessAnalysis/wrapping-pointer-versioning.ll
@@ -122,7 +122,7 @@ for.end: ; preds = %for.body
; LAA: Memory dependences are safe{{$}}
; LAA: SCEV assumptions:
; LAA-NEXT: {(2 * (trunc i64 %N to i32)),+,-2}<%for.body> Added Flags: <nusw>
-; LAA-NEXT: {((2 * (zext i32 (2 * (trunc i64 %N to i32)) to i64))<nuw> + %a),+,-4}<%for.body> Added Flags: <nusw>
+; LAA-NEXT: {((4 * (zext i31 (trunc i64 %N to i31) to i64)) + %a),+,-4}<%for.body> Added Flags: <nusw>
; The expression for %mul_ext as analyzed by SCEV is
; (zext i32 {(2 * (trunc i64 %N to i32)),+,-2}<%for.body> to i64)
@@ -131,7 +131,7 @@ for.end: ; preds = %for.body
; LAA: [PSE] %arrayidxA = getelementptr i16, i16* %a, i64 %mul_ext:
; LAA-NEXT: ((2 * (zext i32 {(2 * (trunc i64 %N to i32)),+,-2}<%for.body> to i64))<nuw> + %a)
-; LAA-NEXT: --> {((2 * (zext i32 (2 * (trunc i64 %N to i32)) to i64))<nuw> + %a),+,-4}<%for.body>
+; LAA-NEXT: --> {((4 * (zext i31 (trunc i64 %N to i31) to i64)) + %a),+,-4}<%for.body>
; LV-LABEL: f2
; LV-LABEL: for.body.lver.check
diff --git a/llvm/test/Analysis/ScalarEvolution/and-xor.ll b/llvm/test/Analysis/ScalarEvolution/and-xor.ll
index ad636da4d4d..8217e1a1d91 100644
--- a/llvm/test/Analysis/ScalarEvolution/and-xor.ll
+++ b/llvm/test/Analysis/ScalarEvolution/and-xor.ll
@@ -25,3 +25,17 @@ define i64 @test2(i64 %x) {
%z = xor i64 %t, 8
ret i64 %z
}
+
+; Check that we transform the naive lowering of the sequence below,
+; (4 * (zext i5 (2 * (trunc i32 %x to i5)) to i32)),
+; to
+; (8 * (zext i4 (trunc i32 %x to i4) to i32))
+;
+; CHECK-LABEL: @test3
+define i32 @test3(i32 %x) {
+ %a = mul i32 %x, 8
+; CHECK: %b
+; CHECK-NEXT: --> (8 * (zext i4 (trunc i32 %x to i4) to i32))
+ %b = and i32 %a, 124
+ ret i32 %b
+}
OpenPOWER on IntegriCloud