summaryrefslogtreecommitdiffstats
path: root/llvm/lib/CodeGen/ScalarizeMaskedMemIntrin.cpp
diff options
context:
space:
mode:
authorCraig Topper <craig.topper@intel.com>2018-09-27 22:31:40 +0000
committerCraig Topper <craig.topper@intel.com>2018-09-27 22:31:40 +0000
commit10ec021621f54d0d2c68de2d7ad230b43723d743 (patch)
treed892595a84de31ee66c452ed3a0ba9d67375d4a0 /llvm/lib/CodeGen/ScalarizeMaskedMemIntrin.cpp
parent6d229be5ec89ba4a8f53571a15f4836c38b07e65 (diff)
downloadbcm5719-llvm-10ec021621f54d0d2c68de2d7ad230b43723d743.tar.gz
bcm5719-llvm-10ec021621f54d0d2c68de2d7ad230b43723d743.zip
[ScalarizeMaskedMemIntrin] Use cast instead of dyn_cast checked by an assert. Consistently make use of the element type variable we already have. NFCI
cast will take care of asserting internally. llvm-svn: 343277
Diffstat (limited to 'llvm/lib/CodeGen/ScalarizeMaskedMemIntrin.cpp')
-rw-r--r--llvm/lib/CodeGen/ScalarizeMaskedMemIntrin.cpp16
1 files changed, 6 insertions, 10 deletions
diff --git a/llvm/lib/CodeGen/ScalarizeMaskedMemIntrin.cpp b/llvm/lib/CodeGen/ScalarizeMaskedMemIntrin.cpp
index 264b7cd800c..04c9662073b 100644
--- a/llvm/lib/CodeGen/ScalarizeMaskedMemIntrin.cpp
+++ b/llvm/lib/CodeGen/ScalarizeMaskedMemIntrin.cpp
@@ -116,10 +116,9 @@ static void scalarizeMaskedLoad(CallInst *CI) {
Value *Src0 = CI->getArgOperand(3);
unsigned AlignVal = cast<ConstantInt>(Alignment)->getZExtValue();
- VectorType *VecType = dyn_cast<VectorType>(CI->getType());
- assert(VecType && "Unexpected return type of masked load intrinsic");
+ VectorType *VecType = cast<VectorType>(CI->getType());
- Type *EltTy = CI->getType()->getVectorElementType();
+ Type *EltTy = VecType->getElementType();
IRBuilder<> Builder(CI->getContext());
Instruction *InsertPt = CI;
@@ -139,7 +138,7 @@ static void scalarizeMaskedLoad(CallInst *CI) {
}
// Adjust alignment for the scalar instruction.
- AlignVal = std::min(AlignVal, VecType->getScalarSizeInBits() / 8);
+ AlignVal = std::min(AlignVal, EltTy->getPrimitiveSizeInBits() / 8);
// Bitcast %addr fron i8* to EltTy*
Type *NewPtrType =
EltTy->getPointerTo(cast<PointerType>(Ptr->getType())->getAddressSpace());
@@ -244,8 +243,7 @@ static void scalarizeMaskedStore(CallInst *CI) {
Value *Mask = CI->getArgOperand(3);
unsigned AlignVal = cast<ConstantInt>(Alignment)->getZExtValue();
- VectorType *VecType = dyn_cast<VectorType>(Src->getType());
- assert(VecType && "Unexpected data type in masked store intrinsic");
+ VectorType *VecType = cast<VectorType>(Src->getType());
Type *EltTy = VecType->getElementType();
@@ -263,7 +261,7 @@ static void scalarizeMaskedStore(CallInst *CI) {
}
// Adjust alignment for the scalar instruction.
- AlignVal = std::max(AlignVal, VecType->getScalarSizeInBits() / 8);
+ AlignVal = std::max(AlignVal, EltTy->getPrimitiveSizeInBits() / 8);
// Bitcast %addr fron i8* to EltTy*
Type *NewPtrType =
EltTy->getPointerTo(cast<PointerType>(Ptr->getType())->getAddressSpace());
@@ -354,9 +352,7 @@ static void scalarizeMaskedGather(CallInst *CI) {
Value *Mask = CI->getArgOperand(2);
Value *Src0 = CI->getArgOperand(3);
- VectorType *VecType = dyn_cast<VectorType>(CI->getType());
-
- assert(VecType && "Unexpected return type of masked load intrinsic");
+ VectorType *VecType = cast<VectorType>(CI->getType());
IRBuilder<> Builder(CI->getContext());
Instruction *InsertPt = CI;
OpenPOWER on IntegriCloud