diff options
Diffstat (limited to 'llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp')
| -rw-r--r-- | llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp | 66 |
1 files changed, 34 insertions, 32 deletions
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp index 1646c0fd39a..e6f7e623067 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp @@ -1208,7 +1208,6 @@ static Value *simplifyMaskedLoad(const IntrinsicInst &II, } // TODO, Obvious Missing Transforms: -// * SimplifyDemandedVectorElts // * Single constant active lane -> store // * Narrow width by halfs excluding zero/undef lanes Instruction *InstCombiner::simplifyMaskedStore(IntrinsicInst &II) { @@ -1244,6 +1243,8 @@ Instruction *InstCombiner::simplifyMaskedStore(IntrinsicInst &II) { // * Dereferenceable address & few lanes -> scalarize speculative load/selects // * Adjacent vector addresses -> masked.load // * Narrow width by halfs excluding zero/undef lanes +// * Vector splat address w/known mask -> scalar load +// * Vector incrementing address -> vector masked load static Instruction *simplifyMaskedGather(IntrinsicInst &II, InstCombiner &IC) { // If the mask is all zeros, return the "passthru" argument of the gather. auto *ConstMask = dyn_cast<Constant>(II.getArgOperand(2)); @@ -1253,6 +1254,38 @@ static Instruction *simplifyMaskedGather(IntrinsicInst &II, InstCombiner &IC) { return nullptr; } +// TODO, Obvious Missing Transforms: +// * Single constant active lane -> store +// * Adjacent vector addresses -> masked.store +// * Narrow store width by halfs excluding zero/undef lanes +// * Vector splat address w/known mask -> scalar store +// * Vector incrementing address -> vector masked store +Instruction *InstCombiner::simplifyMaskedScatter(IntrinsicInst &II) { + auto *ConstMask = dyn_cast<Constant>(II.getArgOperand(3)); + if (!ConstMask) + return nullptr; + + // If the mask is all zeros, a scatter does nothing. + if (ConstMask->isNullValue()) + return eraseInstFromFunction(II); + + // Use masked off lanes to simplify operands via SimplifyDemandedVectorElts + APInt DemandedElts = possiblyDemandedEltsInMask(ConstMask); + APInt UndefElts(DemandedElts.getBitWidth(), 0); + if (Value *V = SimplifyDemandedVectorElts(II.getOperand(0), + DemandedElts, UndefElts)) { + II.setOperand(0, V); + return &II; + } + if (Value *V = SimplifyDemandedVectorElts(II.getOperand(1), + DemandedElts, UndefElts)) { + II.setOperand(1, V); + return &II; + } + + return nullptr; +} + /// This function transforms launder.invariant.group and strip.invariant.group /// like: /// launder(launder(%x)) -> launder(%x) (the result is not the argument) @@ -1287,37 +1320,6 @@ static Instruction *simplifyInvariantGroupIntrinsic(IntrinsicInst &II, return cast<Instruction>(Result); } -// TODO, Obvious Missing Transforms: -// * SimplifyDemandedVectorElts -// * Single constant active lane -> store -// * Adjacent vector addresses -> masked.store -// * Narrow store width by halfs excluding zero/undef lanes -Instruction *InstCombiner::simplifyMaskedScatter(IntrinsicInst &II) { - auto *ConstMask = dyn_cast<Constant>(II.getArgOperand(3)); - if (!ConstMask) - return nullptr; - - // If the mask is all zeros, a scatter does nothing. - if (ConstMask->isNullValue()) - return eraseInstFromFunction(II); - - // Use masked off lanes to simplify operands via SimplifyDemandedVectorElts - APInt DemandedElts = possiblyDemandedEltsInMask(ConstMask); - APInt UndefElts(DemandedElts.getBitWidth(), 0); - if (Value *V = SimplifyDemandedVectorElts(II.getOperand(0), - DemandedElts, UndefElts)) { - II.setOperand(0, V); - return &II; - } - if (Value *V = SimplifyDemandedVectorElts(II.getOperand(1), - DemandedElts, UndefElts)) { - II.setOperand(1, V); - return &II; - } - - return nullptr; -} - static Instruction *foldCttzCtlz(IntrinsicInst &II, InstCombiner &IC) { assert((II.getIntrinsicID() == Intrinsic::cttz || II.getIntrinsicID() == Intrinsic::ctlz) && |

