summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Transforms
diff options
context:
space:
mode:
authorDavide Italiano <davide@freebsd.org>2017-05-21 20:30:27 +0000
committerDavide Italiano <davide@freebsd.org>2017-05-21 20:30:27 +0000
commit21a49dcdf19e9df75015d2618400e9fde2e579fc (patch)
treed06ec7d98363ba7e5b965cd08d5b2c256db49368 /llvm/lib/Transforms
parenta3f75469311c68b3d0d4b79df6cac5941c6a9202 (diff)
downloadbcm5719-llvm-21a49dcdf19e9df75015d2618400e9fde2e579fc.tar.gz
bcm5719-llvm-21a49dcdf19e9df75015d2618400e9fde2e579fc.zip
[InstCombine] Take in account the size in sext->lshr->trunc patterns.
Otherwise we end up miscompiling, transforming: define i8 @tinky() { %sext = sext i1 1 to i16 %hibit = lshr i16 %sext, 15 %tr = trunc i16 %hibit to i8 ret i8 %tr } into: %sext = sext i1 1 to i8 ret i8 %sext and the first get folded to ret i8 1, while the second gets folded to ret i8 -1. Eventually we should get rid of this transform entirely, but for now, this at least fixes a know correctness bug. Differential Revision: https://reviews.llvm.org/D33338 llvm-svn: 303513
Diffstat (limited to 'llvm/lib/Transforms')
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp19
1 files changed, 13 insertions, 6 deletions
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp
index 001a4bcf16f..f4bf5221f6a 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp
@@ -585,6 +585,7 @@ Instruction *InstCombiner::visitTrunc(TruncInst &CI) {
return CastInst::CreateIntegerCast(Shift, DestTy, false);
}
+ // FIXME: We should canonicalize to zext/trunc and remove this transform.
// Transform trunc(lshr (sext A), Cst) to ashr A, Cst to eliminate type
// conversion.
// It works because bits coming from sign extension have the same value as
@@ -595,18 +596,24 @@ Instruction *InstCombiner::visitTrunc(TruncInst &CI) {
Value *SExt = cast<Instruction>(Src)->getOperand(0);
const unsigned SExtSize = SExt->getType()->getPrimitiveSizeInBits();
const unsigned ASize = A->getType()->getPrimitiveSizeInBits();
+ const unsigned CISize = CI.getType()->getPrimitiveSizeInBits();
+ const unsigned MaxAmt = SExtSize - std::max(CISize, ASize);
unsigned ShiftAmt = Cst->getZExtValue();
+
// This optimization can be only performed when zero bits generated by
// the original lshr aren't pulled into the value after truncation, so we
// can only shift by values no larger than the number of extension bits.
// FIXME: Instead of bailing when the shift is too large, use and to clear
// the extra bits.
- if (SExt->hasOneUse() && ShiftAmt <= SExtSize - ASize) {
- // If shifting by the size of the original value in bits or more, it is
- // being filled with the sign bit, so shift by ASize-1 to avoid ub.
- Value *Shift = Builder->CreateAShr(A, std::min(ShiftAmt, ASize-1));
- Shift->takeName(Src);
- return CastInst::CreateIntegerCast(Shift, CI.getType(), true);
+ if (ShiftAmt <= MaxAmt) {
+ if (CISize == ASize)
+ return BinaryOperator::CreateAShr(A, ConstantInt::get(CI.getType(),
+ std::min(ShiftAmt, ASize - 1)));
+ if (SExt->hasOneUse()) {
+ Value *Shift = Builder->CreateAShr(A, std::min(ShiftAmt, ASize-1));
+ Shift->takeName(Src);
+ return CastInst::CreateIntegerCast(Shift, CI.getType(), true);
+ }
}
}
OpenPOWER on IntegriCloud