summaryrefslogtreecommitdiffstats
path: root/llvm/lib/CodeGen/ScalarizeMaskedMemIntrin.cpp
diff options
context:
space:
mode:
authorCraig Topper <craig.topper@intel.com>2018-09-27 22:31:42 +0000
committerCraig Topper <craig.topper@intel.com>2018-09-27 22:31:42 +0000
commit8b4f0e1b8c454e3427ce1ffe5f86df15ff2a6d30 (patch)
treef620cededa3cc2a1619e48296af0c25986658488 /llvm/lib/CodeGen/ScalarizeMaskedMemIntrin.cpp
parent10ec021621f54d0d2c68de2d7ad230b43723d743 (diff)
downloadbcm5719-llvm-8b4f0e1b8c454e3427ce1ffe5f86df15ff2a6d30.tar.gz
bcm5719-llvm-8b4f0e1b8c454e3427ce1ffe5f86df15ff2a6d30.zip
[ScalarizeMaskedMemIntrin] Ensure the mask is a vector of ConstantInts before generating the expansion without control flow.
Its possible the mask itself or one of the elements is a ConstantExpr and we shouldn't optimize in that case. llvm-svn: 343278
Diffstat (limited to 'llvm/lib/CodeGen/ScalarizeMaskedMemIntrin.cpp')
-rw-r--r--llvm/lib/CodeGen/ScalarizeMaskedMemIntrin.cpp23
1 files changed, 19 insertions, 4 deletions
diff --git a/llvm/lib/CodeGen/ScalarizeMaskedMemIntrin.cpp b/llvm/lib/CodeGen/ScalarizeMaskedMemIntrin.cpp
index 04c9662073b..65787bfd862 100644
--- a/llvm/lib/CodeGen/ScalarizeMaskedMemIntrin.cpp
+++ b/llvm/lib/CodeGen/ScalarizeMaskedMemIntrin.cpp
@@ -77,6 +77,21 @@ FunctionPass *llvm::createScalarizeMaskedMemIntrinPass() {
return new ScalarizeMaskedMemIntrin();
}
+static bool isConstantIntVector(Value *Mask) {
+ Constant *C = dyn_cast<Constant>(Mask);
+ if (!C)
+ return false;
+
+ unsigned NumElts = Mask->getType()->getVectorNumElements();
+ for (unsigned i = 0; i != NumElts; ++i) {
+ Constant *CElt = C->getAggregateElement(i);
+ if (!CElt || !isa<ConstantInt>(CElt))
+ return false;
+ }
+
+ return true;
+}
+
// Translate a masked load intrinsic like
// <16 x i32 > @llvm.masked.load( <16 x i32>* %addr, i32 align,
// <16 x i1> %mask, <16 x i32> %passthru)
@@ -148,7 +163,7 @@ static void scalarizeMaskedLoad(CallInst *CI) {
// The result vector
Value *VResult = Src0;
- if (isa<Constant>(Mask)) {
+ if (isConstantIntVector(Mask)) {
for (unsigned Idx = 0; Idx < VectorWidth; ++Idx) {
if (cast<Constant>(Mask)->getAggregateElement(Idx)->isNullValue())
continue;
@@ -268,7 +283,7 @@ static void scalarizeMaskedStore(CallInst *CI) {
Value *FirstEltPtr = Builder.CreateBitCast(Ptr, NewPtrType);
unsigned VectorWidth = VecType->getNumElements();
- if (isa<Constant>(Mask)) {
+ if (isConstantIntVector(Mask)) {
for (unsigned Idx = 0; Idx < VectorWidth; ++Idx) {
if (cast<Constant>(Mask)->getAggregateElement(Idx)->isNullValue())
continue;
@@ -369,7 +384,7 @@ static void scalarizeMaskedGather(CallInst *CI) {
unsigned VectorWidth = VecType->getNumElements();
// Shorten the way if the mask is a vector of constants.
- if (isa<Constant>(Mask)) {
+ if (isConstantIntVector(Mask)) {
for (unsigned Idx = 0; Idx < VectorWidth; ++Idx) {
if (cast<Constant>(Mask)->getAggregateElement(Idx)->isNullValue())
continue;
@@ -479,7 +494,7 @@ static void scalarizeMaskedScatter(CallInst *CI) {
unsigned VectorWidth = Src->getType()->getVectorNumElements();
// Shorten the way if the mask is a vector of constants.
- if (isa<Constant>(Mask)) {
+ if (isConstantIntVector(Mask)) {
for (unsigned Idx = 0; Idx < VectorWidth; ++Idx) {
if (cast<ConstantVector>(Mask)->getAggregateElement(Idx)->isNullValue())
continue;
OpenPOWER on IntegriCloud