summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--llvm/include/llvm/Constants.h13
-rw-r--r--llvm/include/llvm/DerivedTypes.h7
-rw-r--r--llvm/include/llvm/Type.h20
-rw-r--r--llvm/lib/Analysis/ConstantRange.cpp1
-rw-r--r--llvm/lib/Analysis/ScalarEvolution.cpp2
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp1
-rw-r--r--llvm/lib/ExecutionEngine/ExecutionEngine.cpp8
-rw-r--r--llvm/lib/Target/Alpha/AlphaISelDAGToDAG.cpp1
-rw-r--r--llvm/lib/Transforms/IPO/IndMemRemoval.cpp1
-rw-r--r--llvm/lib/Transforms/Scalar/CorrelatedExprs.cpp1
-rw-r--r--llvm/lib/Transforms/Scalar/InstructionCombining.cpp76
-rw-r--r--llvm/lib/Transforms/Scalar/LoopUnswitch.cpp3
-rw-r--r--llvm/lib/Transforms/Scalar/ScalarReplAggregates.cpp2
-rw-r--r--llvm/lib/Transforms/Utils/SimplifyCFG.cpp1
-rw-r--r--llvm/lib/VMCore/ConstantFolding.cpp7
-rw-r--r--llvm/lib/VMCore/Constants.cpp4
-rw-r--r--llvm/lib/VMCore/Type.cpp11
-rw-r--r--llvm/lib/VMCore/ValueTypes.cpp1
18 files changed, 84 insertions, 76 deletions
diff --git a/llvm/include/llvm/Constants.h b/llvm/include/llvm/Constants.h
index 96dc101f378..5a17f398870 100644
--- a/llvm/include/llvm/Constants.h
+++ b/llvm/include/llvm/Constants.h
@@ -60,7 +60,7 @@ public:
/// sign extended as appropriate for the type of this constant.
/// @brief Return the sign extended value.
inline int64_t getSExtValue() const {
- unsigned Size = getType()->getPrimitiveSizeInBits();
+ unsigned Size = Value::getType()->getPrimitiveSizeInBits();
return (int64_t(Val) << (64-Size)) >> (64-Size);
}
/// A helper method that can be used to determine if the constant contained
@@ -92,6 +92,13 @@ public:
/// @brief Get a ConstantInt for a specific value.
static ConstantInt *get(const Type *Ty, int64_t V);
+ /// getType - Specialize the getType() method to always return an IntegerType,
+ /// which reduces the amount of casting needed in parts of the compiler.
+ ///
+ inline const IntegerType *getType() const {
+ return reinterpret_cast<const IntegerType*>(Value::getType());
+ }
+
/// This static method returns true if the type Ty is big enough to
/// represent the value V. This can be used to avoid having the get method
/// assert when V is larger than Ty can represent. Note that there are two
@@ -130,7 +137,7 @@ public:
int64_t V = getSExtValue();
if (V < 0) return false; // Be careful about wrap-around on 'long's
++V;
- return !isValueValidForType(getType(), V) || V < 0;
+ return !isValueValidForType(Value::getType(), V) || V < 0;
}
return isAllOnesValue();
}
@@ -145,7 +152,7 @@ public:
int64_t V = getSExtValue();
if (V > 0) return false; // Be careful about wrap-around on 'long's
--V;
- return !isValueValidForType(getType(), V) || V > 0;
+ return !isValueValidForType(Value::getType(), V) || V > 0;
}
return getZExtValue() == 0;
}
diff --git a/llvm/include/llvm/DerivedTypes.h b/llvm/include/llvm/DerivedTypes.h
index c93c514da6a..c1e3a98a04d 100644
--- a/llvm/include/llvm/DerivedTypes.h
+++ b/llvm/include/llvm/DerivedTypes.h
@@ -101,6 +101,13 @@ public:
/// @brief Get the number of bits in this IntegerType
unsigned getBitWidth() const { return getSubclassData(); }
+ /// getBitMask - Return a bitmask with ones set for all of the bits
+ /// that can be set by an unsigned version of this type. This is 0xFF for
+ /// sbyte/ubyte, 0xFFFF for shorts, etc.
+ uint64_t getBitMask() const {
+ return ~uint64_t(0UL) >> (64-getPrimitiveSizeInBits());
+ }
+
/// This method determines if the width of this IntegerType is a power-of-2
/// in terms of 8 bit bytes.
/// @returns true if this is a power-of-2 byte width.
diff --git a/llvm/include/llvm/Type.h b/llvm/include/llvm/Type.h
index c54b584e3d4..189c5ebd195 100644
--- a/llvm/include/llvm/Type.h
+++ b/llvm/include/llvm/Type.h
@@ -24,6 +24,7 @@ namespace llvm {
class DerivedType;
class PointerType;
+class IntegerType;
class TypeMapBase;
/// This file contains the declaration of the Type class. For more "Type" type
@@ -217,14 +218,6 @@ public:
///
unsigned getPrimitiveSizeInBits() const;
- /// getIntegerTypeMask - Return a bitmask with ones set for all of the bits
- /// that can be set by an unsigned version of this type. This is 0xFF for
- /// sbyte/ubyte, 0xFFFF for shorts, etc.
- uint64_t getIntegerTypeMask() const {
- assert(isInteger() && "This only works for integer types!");
- return ~uint64_t(0UL) >> (64-getPrimitiveSizeInBits());
- }
-
/// getForwaredType - Return the type that this type has been resolved to if
/// it has been resolved to anything. This is used to implement the
/// union-find algorithm for type resolution, and shouldn't be used by general
@@ -237,14 +230,7 @@ public:
/// getVAArgsPromotedType - Return the type an argument of this type
/// will be promoted to if passed through a variable argument
/// function.
- const Type *getVAArgsPromotedType() const {
- if (ID == IntegerTyID && getSubclassData() < 32)
- return Type::Int32Ty;
- else if (ID == FloatTyID)
- return Type::DoubleTy;
- else
- return this;
- }
+ const Type *getVAArgsPromotedType() const;
//===--------------------------------------------------------------------===//
// Type Iteration support
@@ -279,7 +265,7 @@ public:
// These are the builtin types that are always available...
//
static const Type *VoidTy, *LabelTy, *FloatTy, *DoubleTy;
- static const Type *Int1Ty, *Int8Ty, *Int16Ty, *Int32Ty, *Int64Ty;
+ static const IntegerType *Int1Ty, *Int8Ty, *Int16Ty, *Int32Ty, *Int64Ty;
/// Methods for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const Type *T) { return true; }
diff --git a/llvm/lib/Analysis/ConstantRange.cpp b/llvm/lib/Analysis/ConstantRange.cpp
index 3b74f403b32..022c34fe409 100644
--- a/llvm/lib/Analysis/ConstantRange.cpp
+++ b/llvm/lib/Analysis/ConstantRange.cpp
@@ -26,6 +26,7 @@
#include "llvm/Instruction.h"
#include "llvm/Instructions.h"
#include "llvm/Type.h"
+#include "llvm/DerivedTypes.h"
#include "llvm/Support/Streams.h"
#include <ostream>
using namespace llvm;
diff --git a/llvm/lib/Analysis/ScalarEvolution.cpp b/llvm/lib/Analysis/ScalarEvolution.cpp
index 1df54c56ca4..1ee1cf2784f 100644
--- a/llvm/lib/Analysis/ScalarEvolution.cpp
+++ b/llvm/lib/Analysis/ScalarEvolution.cpp
@@ -1333,7 +1333,7 @@ static uint64_t GetConstantFactor(SCEVHandle S) {
if (SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(S))
return GetConstantFactor(T->getOperand()) &
- T->getType()->getIntegerTypeMask();
+ cast<IntegerType>(T->getType())->getBitMask();
if (SCEVZeroExtendExpr *E = dyn_cast<SCEVZeroExtendExpr>(S))
return GetConstantFactor(E->getOperand());
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
index fee85a13c68..d83dcc4d50b 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
@@ -21,6 +21,7 @@
#include "llvm/Target/TargetOptions.h"
#include "llvm/CallingConv.h"
#include "llvm/Constants.h"
+#include "llvm/DerivedTypes.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Compiler.h"
diff --git a/llvm/lib/ExecutionEngine/ExecutionEngine.cpp b/llvm/lib/ExecutionEngine/ExecutionEngine.cpp
index 771ce0b1aa3..93207cc8bd9 100644
--- a/llvm/lib/ExecutionEngine/ExecutionEngine.cpp
+++ b/llvm/lib/ExecutionEngine/ExecutionEngine.cpp
@@ -456,9 +456,7 @@ void ExecutionEngine::StoreValueToMemory(GenericValue Val, GenericValue *Ptr,
switch (Ty->getTypeID()) {
case Type::IntegerTyID: {
unsigned BitWidth = cast<IntegerType>(Ty)->getBitWidth();
- uint64_t BitMask = (1ull << BitWidth) - 1;
- if (BitWidth >= 64)
- BitMask = (uint64_t)-1;
+ uint64_t BitMask = cast<IntegerType>(Ty)->getBitMask();
GenericValue TmpVal = Val;
if (BitWidth <= 8)
Ptr->Untyped[0] = Val.Int8Val & BitMask;
@@ -514,9 +512,7 @@ Store4BytesLittleEndian:
switch (Ty->getTypeID()) {
case Type::IntegerTyID: {
unsigned BitWidth = cast<IntegerType>(Ty)->getBitWidth();
- uint64_t BitMask = (1ull << BitWidth) - 1;
- if (BitWidth >= 64)
- BitMask = (uint64_t)-1;
+ uint64_t BitMask = cast<IntegerType>(Ty)->getBitMask();
GenericValue TmpVal = Val;
if (BitWidth <= 8)
Ptr->Untyped[0] = Val.Int8Val & BitMask;
diff --git a/llvm/lib/Target/Alpha/AlphaISelDAGToDAG.cpp b/llvm/lib/Target/Alpha/AlphaISelDAGToDAG.cpp
index 9881ac3405f..ac77a5f200e 100644
--- a/llvm/lib/Target/Alpha/AlphaISelDAGToDAG.cpp
+++ b/llvm/lib/Target/Alpha/AlphaISelDAGToDAG.cpp
@@ -23,6 +23,7 @@
#include "llvm/CodeGen/SelectionDAGISel.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/Constants.h"
+#include "llvm/DerivedTypes.h"
#include "llvm/GlobalValue.h"
#include "llvm/Intrinsics.h"
#include "llvm/Support/Debug.h"
diff --git a/llvm/lib/Transforms/IPO/IndMemRemoval.cpp b/llvm/lib/Transforms/IPO/IndMemRemoval.cpp
index c68a4d3b249..fa414f25db3 100644
--- a/llvm/lib/Transforms/IPO/IndMemRemoval.cpp
+++ b/llvm/lib/Transforms/IPO/IndMemRemoval.cpp
@@ -21,6 +21,7 @@
#include "llvm/Module.h"
#include "llvm/Instructions.h"
#include "llvm/Type.h"
+#include "llvm/DerivedTypes.h"
#include "llvm/ADT/Statistic.h"
using namespace llvm;
diff --git a/llvm/lib/Transforms/Scalar/CorrelatedExprs.cpp b/llvm/lib/Transforms/Scalar/CorrelatedExprs.cpp
index e58bcf454a1..1027a49da2e 100644
--- a/llvm/lib/Transforms/Scalar/CorrelatedExprs.cpp
+++ b/llvm/lib/Transforms/Scalar/CorrelatedExprs.cpp
@@ -33,6 +33,7 @@
#include "llvm/Function.h"
#include "llvm/Instructions.h"
#include "llvm/Type.h"
+#include "llvm/DerivedTypes.h"
#include "llvm/Analysis/Dominators.h"
#include "llvm/Assembly/Writer.h"
#include "llvm/Transforms/Utils/Local.h"
diff --git a/llvm/lib/Transforms/Scalar/InstructionCombining.cpp b/llvm/lib/Transforms/Scalar/InstructionCombining.cpp
index 324ec8aac04..fd6d23e28f1 100644
--- a/llvm/lib/Transforms/Scalar/InstructionCombining.cpp
+++ b/llvm/lib/Transforms/Scalar/InstructionCombining.cpp
@@ -558,7 +558,7 @@ static void ComputeMaskedBits(Value *V, uint64_t Mask, uint64_t &KnownZero,
Instruction *I = dyn_cast<Instruction>(V);
if (!I) return;
- Mask &= V->getType()->getIntegerTypeMask();
+ Mask &= cast<IntegerType>(V->getType())->getBitMask();
switch (I->getOpcode()) {
case Instruction::And:
@@ -632,11 +632,11 @@ static void ComputeMaskedBits(Value *V, uint64_t Mask, uint64_t &KnownZero,
}
case Instruction::ZExt: {
// Compute the bits in the result that are not present in the input.
- const Type *SrcTy = I->getOperand(0)->getType();
- uint64_t NotIn = ~SrcTy->getIntegerTypeMask();
- uint64_t NewBits = I->getType()->getIntegerTypeMask() & NotIn;
+ const IntegerType *SrcTy = cast<IntegerType>(I->getOperand(0)->getType());
+ uint64_t NotIn = ~SrcTy->getBitMask();
+ uint64_t NewBits = cast<IntegerType>(I->getType())->getBitMask() & NotIn;
- Mask &= SrcTy->getIntegerTypeMask();
+ Mask &= SrcTy->getBitMask();
ComputeMaskedBits(I->getOperand(0), Mask, KnownZero, KnownOne, Depth+1);
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
// The top bits are known to be zero.
@@ -645,11 +645,11 @@ static void ComputeMaskedBits(Value *V, uint64_t Mask, uint64_t &KnownZero,
}
case Instruction::SExt: {
// Compute the bits in the result that are not present in the input.
- const Type *SrcTy = I->getOperand(0)->getType();
- uint64_t NotIn = ~SrcTy->getIntegerTypeMask();
- uint64_t NewBits = I->getType()->getIntegerTypeMask() & NotIn;
+ const IntegerType *SrcTy = cast<IntegerType>(I->getOperand(0)->getType());
+ uint64_t NotIn = ~SrcTy->getBitMask();
+ uint64_t NewBits = cast<IntegerType>(I->getType())->getBitMask() & NotIn;
- Mask &= SrcTy->getIntegerTypeMask();
+ Mask &= SrcTy->getBitMask();
ComputeMaskedBits(I->getOperand(0), Mask, KnownZero, KnownOne, Depth+1);
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
@@ -766,7 +766,7 @@ static void ComputeSignedMinMaxValuesFromKnownBits(const Type *Ty,
uint64_t KnownZero,
uint64_t KnownOne,
int64_t &Min, int64_t &Max) {
- uint64_t TypeBits = Ty->getIntegerTypeMask();
+ uint64_t TypeBits = cast<IntegerType>(Ty)->getBitMask();
uint64_t UnknownBits = ~(KnownZero|KnownOne) & TypeBits;
uint64_t SignBit = 1ULL << (Ty->getPrimitiveSizeInBits()-1);
@@ -796,7 +796,7 @@ static void ComputeUnsignedMinMaxValuesFromKnownBits(const Type *Ty,
uint64_t KnownOne,
uint64_t &Min,
uint64_t &Max) {
- uint64_t TypeBits = Ty->getIntegerTypeMask();
+ uint64_t TypeBits = cast<IntegerType>(Ty)->getBitMask();
uint64_t UnknownBits = ~(KnownZero|KnownOne) & TypeBits;
// The minimum value is when the unknown bits are all zeros.
@@ -831,7 +831,7 @@ bool InstCombiner::SimplifyDemandedBits(Value *V, uint64_t DemandedMask,
}
// If this is the root being simplified, allow it to have multiple uses,
// just set the DemandedMask to all bits.
- DemandedMask = V->getType()->getIntegerTypeMask();
+ DemandedMask = cast<IntegerType>(V->getType())->getBitMask();
} else if (DemandedMask == 0) { // Not demanding any bits from V.
if (V != UndefValue::get(V->getType()))
return UpdateValueUsesWith(V, UndefValue::get(V->getType()));
@@ -843,7 +843,7 @@ bool InstCombiner::SimplifyDemandedBits(Value *V, uint64_t DemandedMask,
Instruction *I = dyn_cast<Instruction>(V);
if (!I) return false; // Only analyze instructions.
- DemandedMask &= V->getType()->getIntegerTypeMask();
+ DemandedMask &= cast<IntegerType>(V->getType())->getBitMask();
uint64_t KnownZero2 = 0, KnownOne2 = 0;
switch (I->getOpcode()) {
@@ -1011,11 +1011,11 @@ bool InstCombiner::SimplifyDemandedBits(Value *V, uint64_t DemandedMask,
break;
case Instruction::ZExt: {
// Compute the bits in the result that are not present in the input.
- const Type *SrcTy = I->getOperand(0)->getType();
- uint64_t NotIn = ~SrcTy->getIntegerTypeMask();
- uint64_t NewBits = I->getType()->getIntegerTypeMask() & NotIn;
+ const IntegerType *SrcTy = cast<IntegerType>(I->getOperand(0)->getType());
+ uint64_t NotIn = ~SrcTy->getBitMask();
+ uint64_t NewBits = cast<IntegerType>(I->getType())->getBitMask() & NotIn;
- DemandedMask &= SrcTy->getIntegerTypeMask();
+ DemandedMask &= SrcTy->getBitMask();
if (SimplifyDemandedBits(I->getOperand(0), DemandedMask,
KnownZero, KnownOne, Depth+1))
return true;
@@ -1026,13 +1026,13 @@ bool InstCombiner::SimplifyDemandedBits(Value *V, uint64_t DemandedMask,
}
case Instruction::SExt: {
// Compute the bits in the result that are not present in the input.
- const Type *SrcTy = I->getOperand(0)->getType();
- uint64_t NotIn = ~SrcTy->getIntegerTypeMask();
- uint64_t NewBits = I->getType()->getIntegerTypeMask() & NotIn;
+ const IntegerType *SrcTy = cast<IntegerType>(I->getOperand(0)->getType());
+ uint64_t NotIn = ~SrcTy->getBitMask();
+ uint64_t NewBits = cast<IntegerType>(I->getType())->getBitMask() & NotIn;
// Get the sign bit for the source type
uint64_t InSignBit = 1ULL << (SrcTy->getPrimitiveSizeInBits()-1);
- int64_t InputDemandedBits = DemandedMask & SrcTy->getIntegerTypeMask();
+ int64_t InputDemandedBits = DemandedMask & SrcTy->getBitMask();
// If any of the sign extended bits are demanded, we know that the sign
// bit is demanded.
@@ -1174,7 +1174,7 @@ bool InstCombiner::SimplifyDemandedBits(Value *V, uint64_t DemandedMask,
// Compute the new bits that are at the top now.
uint64_t HighBits = (1ULL << ShiftAmt)-1;
HighBits <<= I->getType()->getPrimitiveSizeInBits() - ShiftAmt;
- uint64_t TypeMask = I->getType()->getIntegerTypeMask();
+ uint64_t TypeMask = cast<IntegerType>(I->getType())->getBitMask();
// Unsigned shift right.
if (SimplifyDemandedBits(I->getOperand(0),
(DemandedMask << ShiftAmt) & TypeMask,
@@ -1207,7 +1207,7 @@ bool InstCombiner::SimplifyDemandedBits(Value *V, uint64_t DemandedMask,
// Compute the new bits that are at the top now.
uint64_t HighBits = (1ULL << ShiftAmt)-1;
HighBits <<= I->getType()->getPrimitiveSizeInBits() - ShiftAmt;
- uint64_t TypeMask = I->getType()->getIntegerTypeMask();
+ uint64_t TypeMask = cast<IntegerType>(I->getType())->getBitMask();
// Signed shift right.
if (SimplifyDemandedBits(I->getOperand(0),
(DemandedMask << ShiftAmt) & TypeMask,
@@ -1745,7 +1745,7 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) {
// (X & 254)+1 -> (X&254)|1
uint64_t KnownZero, KnownOne;
if (!isa<PackedType>(I.getType()) &&
- SimplifyDemandedBits(&I, I.getType()->getIntegerTypeMask(),
+ SimplifyDemandedBits(&I, cast<IntegerType>(I.getType())->getBitMask(),
KnownZero, KnownOne))
return &I;
}
@@ -1780,7 +1780,7 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) {
// This is a sign extend if the top bits are known zero.
uint64_t Mask = ~0ULL;
Mask <<= 64-(TySizeBits-Size);
- Mask &= XorLHS->getType()->getIntegerTypeMask();
+ Mask &= cast<IntegerType>(XorLHS->getType())->getBitMask();
if (!MaskedValueIsZero(XorLHS, Mask))
Size = 0; // Not a sign ext, but can't be any others either.
goto FoundSExt;
@@ -1876,7 +1876,7 @@ FoundSExt:
// Form a mask of all bits from the lowest bit added through the top.
uint64_t AddRHSHighBits = ~((AddRHSV & -AddRHSV)-1);
- AddRHSHighBits &= C2->getType()->getIntegerTypeMask();
+ AddRHSHighBits &= C2->getType()->getBitMask();
// See if the and mask includes all of these bits.
uint64_t AddRHSHighBitsAnd = AddRHSHighBits & C2->getZExtValue();
@@ -2621,7 +2621,7 @@ static bool isMaxValueMinusOne(const ConstantInt *C, bool isSigned) {
Val >>= 64-TypeBits; // Shift out unwanted 1 bits...
return C->getSExtValue() == Val-1;
}
- return C->getZExtValue() == C->getType()->getIntegerTypeMask()-1;
+ return C->getZExtValue() == C->getType()->getBitMask()-1;
}
// isMinValuePlusOne - return true if this is Min+1
@@ -2838,7 +2838,7 @@ Instruction *InstCombiner::OptAndOp(Instruction *Op,
uint64_t AndRHSV = cast<ConstantInt>(AndRHS)->getZExtValue();
// Clear bits that are not part of the constant.
- AndRHSV &= AndRHS->getType()->getIntegerTypeMask();
+ AndRHSV &= AndRHS->getType()->getBitMask();
// If there is only one bit set...
if (isOneBitSet(cast<ConstantInt>(AndRHS))) {
@@ -3024,7 +3024,7 @@ Value *InstCombiner::FoldLogicalPlusAnd(Value *LHS, Value *RHS,
// is all N is, ignore it.
unsigned MB, ME;
if (isRunOfOnes(Mask, MB, ME)) { // begin/end bit of run, inclusive
- uint64_t Mask = RHS->getType()->getIntegerTypeMask();
+ uint64_t Mask = cast<IntegerType>(RHS->getType())->getBitMask();
Mask >>= 64-MB+1;
if (MaskedValueIsZero(RHS, Mask))
break;
@@ -3063,7 +3063,7 @@ Instruction *InstCombiner::visitAnd(BinaryOperator &I) {
// purpose is to compute bits we don't care about.
uint64_t KnownZero, KnownOne;
if (!isa<PackedType>(I.getType())) {
- if (SimplifyDemandedBits(&I, I.getType()->getIntegerTypeMask(),
+ if (SimplifyDemandedBits(&I, cast<IntegerType>(I.getType())->getBitMask(),
KnownZero, KnownOne))
return &I;
} else {
@@ -3075,7 +3075,7 @@ Instruction *InstCombiner::visitAnd(BinaryOperator &I) {
if (ConstantInt *AndRHS = dyn_cast<ConstantInt>(Op1)) {
uint64_t AndRHSMask = AndRHS->getZExtValue();
- uint64_t TypeMask = Op0->getType()->getIntegerTypeMask();
+ uint64_t TypeMask = cast<IntegerType>(Op0->getType())->getBitMask();
uint64_t NotAndRHS = AndRHSMask^TypeMask;
// Optimize a variety of ((val OP C1) & C2) combinations...
@@ -3540,7 +3540,7 @@ Instruction *InstCombiner::visitOr(BinaryOperator &I) {
// purpose is to compute bits we don't care about.
uint64_t KnownZero, KnownOne;
if (!isa<PackedType>(I.getType()) &&
- SimplifyDemandedBits(&I, I.getType()->getIntegerTypeMask(),
+ SimplifyDemandedBits(&I, cast<IntegerType>(I.getType())->getBitMask(),
KnownZero, KnownOne))
return &I;
@@ -3868,7 +3868,7 @@ Instruction *InstCombiner::visitXor(BinaryOperator &I) {
// purpose is to compute bits we don't care about.
uint64_t KnownZero, KnownOne;
if (!isa<PackedType>(I.getType()) &&
- SimplifyDemandedBits(&I, I.getType()->getIntegerTypeMask(),
+ SimplifyDemandedBits(&I, cast<IntegerType>(I.getType())->getBitMask(),
KnownZero, KnownOne))
return &I;
@@ -4498,7 +4498,7 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
// See if we can fold the comparison based on bits known to be zero or one
// in the input.
uint64_t KnownZero, KnownOne;
- if (SimplifyDemandedBits(Op0, Ty->getIntegerTypeMask(),
+ if (SimplifyDemandedBits(Op0, cast<IntegerType>(Ty)->getBitMask(),
KnownZero, KnownOne, 0))
return &I;
@@ -5422,7 +5422,7 @@ Instruction *InstCombiner::FoldShiftByConstant(Value *Op0, ConstantInt *Op1,
// See if we can simplify any instructions used by the instruction whose sole
// purpose is to compute bits we don't care about.
uint64_t KnownZero, KnownOne;
- if (SimplifyDemandedBits(&I, I.getType()->getIntegerTypeMask(),
+ if (SimplifyDemandedBits(&I, cast<IntegerType>(I.getType())->getBitMask(),
KnownZero, KnownOne))
return &I;
@@ -6024,7 +6024,7 @@ Instruction *InstCombiner::commonIntCastTransforms(CastInst &CI) {
// See if we can simplify any instructions used by the LHS whose sole
// purpose is to compute bits we don't care about.
uint64_t KnownZero = 0, KnownOne = 0;
- if (SimplifyDemandedBits(&CI, DestTy->getIntegerTypeMask(),
+ if (SimplifyDemandedBits(&CI, cast<IntegerType>(DestTy)->getBitMask(),
KnownZero, KnownOne))
return &CI;
@@ -6197,7 +6197,7 @@ Instruction *InstCombiner::commonIntCastTransforms(CastInst &CI) {
if (Op1CV == 0 || isPowerOf2_64(Op1CV)) {
// If Op1C some other power of two, convert:
uint64_t KnownZero, KnownOne;
- uint64_t TypeMask = Op1->getType()->getIntegerTypeMask();
+ uint64_t TypeMask = Op1C->getType()->getBitMask();
ComputeMaskedBits(Op0, TypeMask, KnownZero, KnownOne);
// This only works for EQ and NE
@@ -6319,7 +6319,7 @@ Instruction *InstCombiner::visitZExt(CastInst &CI) {
// If we're actually extending zero bits and the trunc is a no-op
if (MidSize < DstSize && SrcSize == DstSize) {
// Replace both of the casts with an And of the type mask.
- uint64_t AndValue = CSrc->getType()->getIntegerTypeMask();
+ uint64_t AndValue = cast<IntegerType>(CSrc->getType())->getBitMask();
Constant *AndConst = ConstantInt::get(A->getType(), AndValue);
Instruction *And =
BinaryOperator::createAnd(CSrc->getOperand(0), AndConst);
diff --git a/llvm/lib/Transforms/Scalar/LoopUnswitch.cpp b/llvm/lib/Transforms/Scalar/LoopUnswitch.cpp
index 64ff68b6f73..8adeea25d78 100644
--- a/llvm/lib/Transforms/Scalar/LoopUnswitch.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopUnswitch.cpp
@@ -29,6 +29,7 @@
#define DEBUG_TYPE "loop-unswitch"
#include "llvm/Transforms/Scalar.h"
#include "llvm/Constants.h"
+#include "llvm/DerivedTypes.h"
#include "llvm/Function.h"
#include "llvm/Instructions.h"
#include "llvm/Analysis/LoopInfo.h"
@@ -486,7 +487,7 @@ static void EmitPreheaderBranchOnCondition(Value *LIC, Constant *Val,
// Insert a conditional branch on LIC to the two preheaders. The original
// code is the true version and the new code is the false version.
Value *BranchVal = LIC;
- if (Val->getType() != Type::Int1Ty || !isa<ConstantInt>(Val))
+ if (!isa<ConstantInt>(Val) || Val->getType() != Type::Int1Ty)
BranchVal = new ICmpInst(ICmpInst::ICMP_EQ, LIC, Val, "tmp", InsertPt);
else if (Val != ConstantInt::getTrue())
// We want to enter the new loop when the condition is true.
diff --git a/llvm/lib/Transforms/Scalar/ScalarReplAggregates.cpp b/llvm/lib/Transforms/Scalar/ScalarReplAggregates.cpp
index e241c01e3bc..60a127a9219 100644
--- a/llvm/lib/Transforms/Scalar/ScalarReplAggregates.cpp
+++ b/llvm/lib/Transforms/Scalar/ScalarReplAggregates.cpp
@@ -748,7 +748,7 @@ void SROA::ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI, unsigned Offset) {
if (TotalBits != SrcSize) {
assert(TotalBits > SrcSize);
uint64_t Mask = ~(((1ULL << SrcSize)-1) << Offset);
- Mask = Mask & SV->getType()->getIntegerTypeMask();
+ Mask = Mask & cast<IntegerType>(SV->getType())->getBitMask();
Old = BinaryOperator::createAnd(Old,
ConstantInt::get(Old->getType(), Mask),
Old->getName()+".mask", SI);
diff --git a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp b/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
index 25bc16866f3..221a799f883 100644
--- a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
+++ b/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
@@ -16,6 +16,7 @@
#include "llvm/Constants.h"
#include "llvm/Instructions.h"
#include "llvm/Type.h"
+#include "llvm/DerivedTypes.h"
#include "llvm/Support/CFG.h"
#include "llvm/Support/Debug.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
diff --git a/llvm/lib/VMCore/ConstantFolding.cpp b/llvm/lib/VMCore/ConstantFolding.cpp
index 549ac20c3db..521f6b84a25 100644
--- a/llvm/lib/VMCore/ConstantFolding.cpp
+++ b/llvm/lib/VMCore/ConstantFolding.cpp
@@ -295,13 +295,8 @@ Constant *llvm::ConstantFoldCastInstruction(unsigned opc, const Constant *V,
// Handle ConstantFP input.
if (const ConstantFP *FP = dyn_cast<ConstantFP>(V)) {
// FP -> Integral.
- if (DestTy->isInteger()) {
- if (DestTy == Type::Int32Ty)
- return ConstantInt::get(DestTy, FloatToBits(FP->getValue()));
- assert(DestTy == Type::Int64Ty &&
- "Incorrect integer type for bitcast!");
+ if (DestTy->isInteger())
return ConstantInt::get(DestTy, DoubleToBits(FP->getValue()));
- }
}
return 0;
default:
diff --git a/llvm/lib/VMCore/Constants.cpp b/llvm/lib/VMCore/Constants.cpp
index 85598830887..1a650cc5e1c 100644
--- a/llvm/lib/VMCore/Constants.cpp
+++ b/llvm/lib/VMCore/Constants.cpp
@@ -568,7 +568,7 @@ bool ConstantInt::isValueValidForType(const Type *Ty, int64_t Val) {
unsigned NumBits = cast<IntegerType>(Ty)->getBitWidth(); // assert okay
assert(NumBits <= 64 && "Not implemented: integers > 64-bits");
if (Ty == Type::Int1Ty)
- return Val == 0 || Val == 1;
+ return Val == 0 || Val == 1 || Val == -1;
if (NumBits == 64)
return true; // always true, has to fit in largest type
int64_t Min = -(1ll << (NumBits-1));
@@ -849,7 +849,7 @@ ConstantInt *ConstantInt::get(const Type *Ty, int64_t V) {
return getTrue();
else
return getFalse();
- return IntConstants->getOrCreate(Ty, V & Ty->getIntegerTypeMask());
+ return IntConstants->getOrCreate(Ty, V & cast<IntegerType>(Ty)->getBitMask());
}
//---- ConstantFP::get() implementation...
diff --git a/llvm/lib/VMCore/Type.cpp b/llvm/lib/VMCore/Type.cpp
index 00c6d48dceb..0e9e25cf8b9 100644
--- a/llvm/lib/VMCore/Type.cpp
+++ b/llvm/lib/VMCore/Type.cpp
@@ -81,6 +81,15 @@ const Type *Type::getPrimitiveType(TypeID IDNumber) {
}
}
+const Type *Type::getVAArgsPromotedType() const {
+ if (ID == IntegerTyID && getSubclassData() < 32)
+ return Type::Int32Ty;
+ else if (ID == FloatTyID)
+ return Type::DoubleTy;
+ else
+ return this;
+}
+
/// isFPOrFPVector - Return true if this is a FP type or a vector of FP types.
///
bool Type::isFPOrFPVector() const {
@@ -352,7 +361,7 @@ const Type *StructType::getTypeAtIndex(const Value *V) const {
}; \
} \
static ManagedStatic<TY##Type> The##TY##Ty; \
- const Type *Type::TY##Ty = &*The##TY##Ty
+ const IntegerType *Type::TY##Ty = &*The##TY##Ty
DeclarePrimType(Void, "void");
DeclarePrimType(Float, "float");
diff --git a/llvm/lib/VMCore/ValueTypes.cpp b/llvm/lib/VMCore/ValueTypes.cpp
index 006d7ca1031..8732d641983 100644
--- a/llvm/lib/VMCore/ValueTypes.cpp
+++ b/llvm/lib/VMCore/ValueTypes.cpp
@@ -13,6 +13,7 @@
#include "llvm/CodeGen/ValueTypes.h"
#include "llvm/Type.h"
+#include "llvm/DerivedTypes.h"
using namespace llvm;
/// MVT::getValueTypeString - This function returns value type as a string,
OpenPOWER on IntegriCloud