diff options
author | Sanjay Patel <spatel@rotateright.com> | 2016-04-11 16:11:07 +0000 |
---|---|---|
committer | Sanjay Patel <spatel@rotateright.com> | 2016-04-11 16:11:07 +0000 |
commit | bd8b779d16341d517635172ae354a9ba599db18b (patch) | |
tree | 411d206dda4a1d420fdf65bd9001739c72dd900d /llvm/lib | |
parent | b01a4d48ac4bdea983f43f5e0149aef9e689deef (diff) | |
download | bcm5719-llvm-bd8b779d16341d517635172ae354a9ba599db18b.tar.gz bcm5719-llvm-bd8b779d16341d517635172ae354a9ba599db18b.zip |
[InstCombine] rename variables in shifted-shift helper function (NFCI)
This is step 3 of refactoring to solve PR26760:
https://llvm.org/bugs/show_bug.cgi?id=26760
llvm-svn: 265954
Diffstat (limited to 'llvm/lib')
-rw-r--r-- | llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp | 37 |
1 files changed, 20 insertions, 17 deletions
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp b/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp index 688afb9d2e9..0115678b409 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp @@ -59,31 +59,34 @@ Instruction *InstCombiner::commonShiftTransforms(BinaryOperator &I) { /// that have constant shift amounts. /// FIXME: This can be extended to handle either a shl or lshr instruction, but /// it is currently only valid for a shl. -static bool canEvaluateShiftedShift(unsigned NumBits, bool IsLeftShift, - Instruction *I, InstCombiner &IC, +static bool canEvaluateShiftedShift(unsigned FirstShiftAmt, + bool IsFirstShiftLeft, + Instruction *SecondShift, InstCombiner &IC, Instruction *CxtI) { - // We can often fold the shift into shifts-by-a-constant. - ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1)); - if (!CI) + // We need constant shifts. + auto *SecondShiftConst = dyn_cast<ConstantInt>(SecondShift->getOperand(1)); + if (!SecondShiftConst) return false; - // We can always fold shl(c1)+shl(c2) -> shl(c1+c2). - if (IsLeftShift) + unsigned SecondShiftAmt = SecondShiftConst->getZExtValue(); + + // We can always fold shl(c1) + shl(c2) -> shl(c1+c2). + if (IsFirstShiftLeft) return true; - // We can always turn shl(c)+shr(c) -> and(c2). - if (CI->getValue() == NumBits) + // We can always fold shr(c) + shl(c) -> and(c2). + if (SecondShiftAmt == FirstShiftAmt) return true; - unsigned TypeWidth = I->getType()->getScalarSizeInBits(); + unsigned TypeWidth = SecondShift->getType()->getScalarSizeInBits(); - // We can turn shl(c1)+shr(c2) -> shl(c3)+and(c4), but it isn't - // profitable unless we know the and'd out bits are already zero. - if (CI->getZExtValue() > NumBits) { - unsigned LowBits = TypeWidth - CI->getZExtValue(); - if (IC.MaskedValueIsZero( - I->getOperand(0), - APInt::getLowBitsSet(TypeWidth, NumBits) << LowBits, 0, CxtI)) + // If the 2nd shift is bigger than the 1st, we can fold: + // shr(c1) + shl(c2) -> shl(c3) + and(c4) + // but it isn't profitable unless we know the and'd out bits are already zero. + if (SecondShiftAmt > FirstShiftAmt) { + unsigned MaskShift = TypeWidth - SecondShiftAmt; + APInt Mask = APInt::getLowBitsSet(TypeWidth, FirstShiftAmt) << MaskShift; + if (IC.MaskedValueIsZero(SecondShift->getOperand(0), Mask, 0, CxtI)) return true; } |