summaryrefslogtreecommitdiffstats
path: root/llvm
diff options
context:
space:
mode:
Diffstat (limited to 'llvm')
-rw-r--r--llvm/include/llvm/ADT/APInt.h13
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp2
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp2
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp6
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp4
-rw-r--r--llvm/lib/ExecutionEngine/Interpreter/Execution.cpp2
-rw-r--r--llvm/lib/Support/APInt.cpp7
-rw-r--r--llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp20
-rw-r--r--llvm/lib/Target/X86/X86ISelLowering.cpp11
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp2
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp2
11 files changed, 41 insertions, 30 deletions
diff --git a/llvm/include/llvm/ADT/APInt.h b/llvm/include/llvm/ADT/APInt.h
index d0104c3f0fa..24919446c32 100644
--- a/llvm/include/llvm/ADT/APInt.h
+++ b/llvm/include/llvm/ADT/APInt.h
@@ -874,6 +874,13 @@ public:
return *this;
}
+ /// \brief Left-shift assignment function.
+ ///
+ /// Shifts *this left by shiftAmt and assigns the result to *this.
+ ///
+ /// \returns *this after shifting left by ShiftAmt
+ APInt &operator<<=(const APInt &ShiftAmt);
+
/// @}
/// \name Binary Operators
/// @{
@@ -981,7 +988,11 @@ public:
/// \brief Left-shift function.
///
/// Left-shift this APInt by shiftAmt.
- APInt shl(const APInt &shiftAmt) const;
+ APInt shl(const APInt &ShiftAmt) const {
+ APInt R(*this);
+ R <<= ShiftAmt;
+ return R;
+ }
/// \brief Rotate left by rotateAmt.
APInt rotl(const APInt &rotateAmt) const;
diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 0c289714c8f..91ce16bb049 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -5364,7 +5364,7 @@ SDValue DAGCombiner::visitSHL(SDNode *N) {
APInt Mask = APInt::getHighBitsSet(OpSizeInBits, OpSizeInBits - c1);
SDValue Shift;
if (c2 > c1) {
- Mask = Mask.shl(c2 - c1);
+ Mask <<= c2 - c1;
SDLoc DL(N);
Shift = DAG.getNode(ISD::SHL, DL, VT, N0.getOperand(0),
DAG.getConstant(c2 - c1, DL, N1.getValueType()));
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
index fdebb8bd00d..7c20ecbd569 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
@@ -2589,7 +2589,7 @@ SDValue SelectionDAGLegalize::ExpandBITREVERSE(SDValue Op, const SDLoc &dl) {
DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(I - J, dl, SHVT));
APInt Shift(Sz, 1);
- Shift = Shift.shl(J);
+ Shift <<= J;
Tmp2 = DAG.getNode(ISD::AND, dl, VT, Tmp2, DAG.getConstant(Shift, dl, VT));
Tmp = DAG.getNode(ISD::OR, dl, VT, Tmp, Tmp2);
}
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index 439f67f1e15..7df762fee30 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -2324,8 +2324,8 @@ void SelectionDAG::computeKnownBits(SDValue Op, APInt &KnownZero,
if (const APInt *ShAmt = getValidShiftAmountConstant(Op)) {
computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, DemandedElts,
Depth + 1);
- KnownZero = KnownZero << *ShAmt;
- KnownOne = KnownOne << *ShAmt;
+ KnownZero <<= *ShAmt;
+ KnownOne <<= *ShAmt;
// Low bits are known zero.
KnownZero.setLowBits(ShAmt->getZExtValue());
}
@@ -4161,7 +4161,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
auto SignExtendInReg = [&](APInt Val, llvm::EVT ConstantVT) {
unsigned FromBits = EVT.getScalarSizeInBits();
Val <<= Val.getBitWidth() - FromBits;
- Val = Val.ashr(Val.getBitWidth() - FromBits);
+ Val.ashrInPlace(Val.getBitWidth() - FromBits);
return getConstant(Val, DL, ConstantVT);
};
diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
index 136dec873cb..e9d71215f38 100644
--- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -1721,7 +1721,7 @@ SDValue TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
bestWidth = width;
break;
}
- newMask = newMask << width;
+ newMask <<= width;
}
}
}
@@ -2988,7 +2988,7 @@ static SDValue BuildExactSDIV(const TargetLowering &TLI, SDValue Op1, APInt d,
Flags.setExact(true);
Op1 = DAG.getNode(ISD::SRA, dl, Op1.getValueType(), Op1, Amt, &Flags);
Created.push_back(Op1.getNode());
- d = d.ashr(ShAmt);
+ d.ashrInPlace(ShAmt);
}
// Calculate the multiplicative inverse, using Newton's method.
diff --git a/llvm/lib/ExecutionEngine/Interpreter/Execution.cpp b/llvm/lib/ExecutionEngine/Interpreter/Execution.cpp
index 10b4e98b607..96844439e72 100644
--- a/llvm/lib/ExecutionEngine/Interpreter/Execution.cpp
+++ b/llvm/lib/ExecutionEngine/Interpreter/Execution.cpp
@@ -1565,7 +1565,7 @@ GenericValue Interpreter::executeBitCastInst(Value *SrcVal, Type *DstTy,
Tmp = Tmp.zext(SrcBitSize);
Tmp = TempSrc.AggregateVal[SrcElt++].IntVal;
Tmp = Tmp.zext(DstBitSize);
- Tmp = Tmp.shl(ShiftAmt);
+ Tmp <<= ShiftAmt;
ShiftAmt += isLittleEndian ? SrcBitSize : -SrcBitSize;
Elt.IntVal |= Tmp;
}
diff --git a/llvm/lib/Support/APInt.cpp b/llvm/lib/Support/APInt.cpp
index 1227d7528c8..7103007d8ca 100644
--- a/llvm/lib/Support/APInt.cpp
+++ b/llvm/lib/Support/APInt.cpp
@@ -844,7 +844,7 @@ APInt llvm::APIntOps::RoundDoubleToAPInt(double Double, unsigned width) {
// Otherwise, we have to shift the mantissa bits up to the right location
APInt Tmp(width, mantissa);
- Tmp = Tmp.shl((unsigned)exp - 52);
+ Tmp <<= (unsigned)exp - 52;
return isNeg ? -Tmp : Tmp;
}
@@ -1072,9 +1072,10 @@ void APInt::lshrSlowCase(unsigned ShiftAmt) {
/// Left-shift this APInt by shiftAmt.
/// @brief Left-shift function.
-APInt APInt::shl(const APInt &shiftAmt) const {
+APInt &APInt::operator<<=(const APInt &shiftAmt) {
// It's undefined behavior in C to shift by BitWidth or greater.
- return shl((unsigned)shiftAmt.getLimitedValue(BitWidth));
+ *this <<= (unsigned)shiftAmt.getLimitedValue(BitWidth);
+ return *this;
}
void APInt::shlSlowCase(unsigned ShiftAmt) {
diff --git a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
index 7141e77fcd2..309714570b5 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
@@ -1852,17 +1852,17 @@ static void getUsefulBitsFromBitfieldMoveOpd(SDValue Op, APInt &UsefulBits,
OpUsefulBits = 1;
if (MSB >= Imm) {
- OpUsefulBits = OpUsefulBits.shl(MSB - Imm + 1);
+ OpUsefulBits <<= MSB - Imm + 1;
--OpUsefulBits;
// The interesting part will be in the lower part of the result
getUsefulBits(Op, OpUsefulBits, Depth + 1);
// The interesting part was starting at Imm in the argument
- OpUsefulBits = OpUsefulBits.shl(Imm);
+ OpUsefulBits <<= Imm;
} else {
- OpUsefulBits = OpUsefulBits.shl(MSB + 1);
+ OpUsefulBits <<= MSB + 1;
--OpUsefulBits;
// The interesting part will be shifted in the result
- OpUsefulBits = OpUsefulBits.shl(OpUsefulBits.getBitWidth() - Imm);
+ OpUsefulBits <<= OpUsefulBits.getBitWidth() - Imm;
getUsefulBits(Op, OpUsefulBits, Depth + 1);
// The interesting part was at zero in the argument
OpUsefulBits.lshrInPlace(OpUsefulBits.getBitWidth() - Imm);
@@ -1892,7 +1892,7 @@ static void getUsefulBitsFromOrWithShiftedReg(SDValue Op, APInt &UsefulBits,
if (AArch64_AM::getShiftType(ShiftTypeAndValue) == AArch64_AM::LSL) {
// Shift Left
uint64_t ShiftAmt = AArch64_AM::getShiftValue(ShiftTypeAndValue);
- Mask = Mask.shl(ShiftAmt);
+ Mask <<= ShiftAmt;
getUsefulBits(Op, Mask, Depth + 1);
Mask.lshrInPlace(ShiftAmt);
} else if (AArch64_AM::getShiftType(ShiftTypeAndValue) == AArch64_AM::LSR) {
@@ -1902,7 +1902,7 @@ static void getUsefulBitsFromOrWithShiftedReg(SDValue Op, APInt &UsefulBits,
uint64_t ShiftAmt = AArch64_AM::getShiftValue(ShiftTypeAndValue);
Mask.lshrInPlace(ShiftAmt);
getUsefulBits(Op, Mask, Depth + 1);
- Mask = Mask.shl(ShiftAmt);
+ Mask <<= ShiftAmt;
} else
return;
@@ -1930,13 +1930,13 @@ static void getUsefulBitsFromBFM(SDValue Op, SDValue Orig, APInt &UsefulBits,
uint64_t Width = MSB - Imm + 1;
uint64_t LSB = Imm;
- OpUsefulBits = OpUsefulBits.shl(Width);
+ OpUsefulBits <<= Width;
--OpUsefulBits;
if (Op.getOperand(1) == Orig) {
// Copy the low bits from the result to bits starting from LSB.
Mask = ResultUsefulBits & OpUsefulBits;
- Mask = Mask.shl(LSB);
+ Mask <<= LSB;
}
if (Op.getOperand(0) == Orig)
@@ -1947,9 +1947,9 @@ static void getUsefulBitsFromBFM(SDValue Op, SDValue Orig, APInt &UsefulBits,
uint64_t Width = MSB + 1;
uint64_t LSB = UsefulBits.getBitWidth() - Imm;
- OpUsefulBits = OpUsefulBits.shl(Width);
+ OpUsefulBits <<= Width;
--OpUsefulBits;
- OpUsefulBits = OpUsefulBits.shl(LSB);
+ OpUsefulBits <<= LSB;
if (Op.getOperand(1) == Orig) {
// Copy the bits from the result to the zero bits.
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index ada46643a5f..1554ee83afd 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -26719,8 +26719,8 @@ void X86TargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
DAG.computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth + 1);
unsigned ShAmt = ShiftImm->getZExtValue();
if (Opc == X86ISD::VSHLI) {
- KnownZero = KnownZero << ShAmt;
- KnownOne = KnownOne << ShAmt;
+ KnownZero <<= ShAmt;
+ KnownOne <<= ShAmt;
// Low bits are known zero.
KnownZero.setLowBits(ShAmt);
} else {
@@ -31056,8 +31056,7 @@ static SDValue combineShiftLeft(SDNode *N, SelectionDAG &DAG) {
N0.getOperand(1).getOpcode() == ISD::Constant) {
SDValue N00 = N0.getOperand(0);
APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue();
- const APInt &ShAmt = N1C->getAPIntValue();
- Mask = Mask.shl(ShAmt);
+ Mask <<= N1C->getAPIntValue();
bool MaskOK = false;
// We can handle cases concerning bit-widening nodes containing setcc_c if
// we carefully interrogate the mask to make sure we are semantics
@@ -31267,9 +31266,9 @@ static SDValue combineVectorShiftImm(SDNode *N, SelectionDAG &DAG,
unsigned ShiftImm = ShiftVal.getZExtValue();
for (APInt &Elt : EltBits) {
if (X86ISD::VSHLI == Opcode)
- Elt = Elt.shl(ShiftImm);
+ Elt <<= ShiftImm;
else if (X86ISD::VSRAI == Opcode)
- Elt = Elt.ashr(ShiftImm);
+ Elt.ashrInPlace(ShiftImm);
else
Elt.lshrInPlace(ShiftImm);
}
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
index 597f1d95806..e9286b1bf17 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
@@ -379,7 +379,7 @@ static Value *simplifyX86immShift(const IntrinsicInst &II,
for (unsigned i = 0; i != NumSubElts; ++i) {
unsigned SubEltIdx = (NumSubElts - 1) - i;
auto SubElt = cast<ConstantInt>(CDV->getElementAsConstant(SubEltIdx));
- Count = Count.shl(BitWidth);
+ Count <<= BitWidth;
Count |= SubElt->getValue().zextOrTrunc(64);
}
}
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp b/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp
index 8d0ed853277..8877799b6ae 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp
@@ -1537,7 +1537,7 @@ Value *InstCombiner::SimplifyDemandedVectorElts(Value *V, APInt DemandedElts,
for (unsigned Lane = 0; Lane != NumLanes; ++Lane) {
APInt LaneElts = OpUndefElts.lshr(InnerVWidthPerLane * Lane);
LaneElts = LaneElts.getLoBits(InnerVWidthPerLane);
- LaneElts = LaneElts.shl(InnerVWidthPerLane * (2 * Lane + OpNum));
+ LaneElts <<= InnerVWidthPerLane * (2 * Lane + OpNum);
UndefElts |= LaneElts;
}
}
OpenPOWER on IntegriCloud