summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
diff options
context:
space:
mode:
authorSanjay Patel <spatel@rotateright.com>2019-01-01 21:51:39 +0000
committerSanjay Patel <spatel@rotateright.com>2019-01-01 21:51:39 +0000
commit654e6aabb9f25d0d0fbad194ae6e26dd96c9e9db (patch)
tree24050f1ba2ef6fb0ab5a7667646fac810bfb6e2c /llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
parent00b390a00044ba15f6e1b7c3200d86fc576ccdf7 (diff)
downloadbcm5719-llvm-654e6aabb9f25d0d0fbad194ae6e26dd96c9e9db.tar.gz
bcm5719-llvm-654e6aabb9f25d0d0fbad194ae6e26dd96c9e9db.zip
[InstCombine] canonicalize raw IR rotate patterns to funnel shift
The final piece of IR-level analysis to allow this was committed with: rL350188 Using the intrinsics should improve transforms based on cost models like vectorization and inlining. The backend should be prepared too, so we can now canonicalize more sequences of shift/logic to the intrinsics and know that the end result should be equal or better to the original code even if the target does not have an actual rotate instruction. llvm-svn: 350199
Diffstat (limited to 'llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp')
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp21
1 files changed, 8 insertions, 13 deletions
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp b/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
index 19858ae149a..b1e0ffa6fa7 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
@@ -1547,9 +1547,9 @@ static Instruction *factorizeMinMaxTree(SelectPatternFlavor SPF, Value *LHS,
}
/// Try to reduce a rotate pattern that includes a compare and select into a
-/// sequence of ALU ops only. Example:
+/// funnel shift intrinsic. Example:
/// rotl32(a, b) --> (b == 0 ? a : ((a >> (32 - b)) | (a << b)))
-/// --> (a >> (-b & 31)) | (a << (b & 31))
+/// --> call llvm.fshl.i32(a, a, b)
static Instruction *foldSelectRotate(SelectInst &Sel,
InstCombiner::BuilderTy &Builder) {
// The false value of the select must be a rotate of the true value.
@@ -1593,17 +1593,12 @@ static Instruction *foldSelectRotate(SelectInst &Sel,
return nullptr;
// This is a rotate that avoids shift-by-bitwidth UB in a suboptimal way.
- // Convert to safely bitmasked shifts.
- // TODO: When we can canonicalize to funnel shift intrinsics without risk of
- // performance regressions, replace this sequence with that call.
- Value *NegShAmt = Builder.CreateNeg(ShAmt);
- Value *MaskedShAmt = Builder.CreateAnd(ShAmt, Width - 1);
- Value *MaskedNegShAmt = Builder.CreateAnd(NegShAmt, Width - 1);
- Value *NewSA0 = ShAmt == SA0 ? MaskedShAmt : MaskedNegShAmt;
- Value *NewSA1 = ShAmt == SA1 ? MaskedShAmt : MaskedNegShAmt;
- Value *NewSh0 = Builder.CreateBinOp(ShiftOpcode0, TVal, NewSA0);
- Value *NewSh1 = Builder.CreateBinOp(ShiftOpcode1, TVal, NewSA1);
- return BinaryOperator::CreateOr(NewSh0, NewSh1);
+ // Convert to funnel shift intrinsic.
+ bool IsFshl = (ShAmt == SA0 && ShiftOpcode0 == BinaryOperator::Shl) ||
+ (ShAmt == SA1 && ShiftOpcode1 == BinaryOperator::Shl);
+ Intrinsic::ID IID = IsFshl ? Intrinsic::fshl : Intrinsic::fshr;
+ Function *F = Intrinsic::getDeclaration(Sel.getModule(), IID, Sel.getType());
+ return IntrinsicInst::Create(F, { TVal, TVal, ShAmt });
}
Instruction *InstCombiner::visitSelectInst(SelectInst &SI) {
OpenPOWER on IntegriCloud