diff options
author | Craig Topper <craig.topper@gmail.com> | 2017-04-28 03:36:24 +0000 |
---|---|---|
committer | Craig Topper <craig.topper@gmail.com> | 2017-04-28 03:36:24 +0000 |
commit | 24e71017aa3a062e6c2b055038ab9ed0ad2d35ae (patch) | |
tree | 461d640d885493861683e8350040434e9a912a74 /llvm/lib/Target | |
parent | f74d946624f30d60fd3f90a9545a455308931c32 (diff) | |
download | bcm5719-llvm-24e71017aa3a062e6c2b055038ab9ed0ad2d35ae.tar.gz bcm5719-llvm-24e71017aa3a062e6c2b055038ab9ed0ad2d35ae.zip |
[APInt] Use inplace shift methods where possible. NFCI
llvm-svn: 301612
Diffstat (limited to 'llvm/lib/Target')
-rw-r--r-- | llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp | 20 | ||||
-rw-r--r-- | llvm/lib/Target/X86/X86ISelLowering.cpp | 11 |
2 files changed, 15 insertions, 16 deletions
diff --git a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp index 7141e77fcd2..309714570b5 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp @@ -1852,17 +1852,17 @@ static void getUsefulBitsFromBitfieldMoveOpd(SDValue Op, APInt &UsefulBits, OpUsefulBits = 1; if (MSB >= Imm) { - OpUsefulBits = OpUsefulBits.shl(MSB - Imm + 1); + OpUsefulBits <<= MSB - Imm + 1; --OpUsefulBits; // The interesting part will be in the lower part of the result getUsefulBits(Op, OpUsefulBits, Depth + 1); // The interesting part was starting at Imm in the argument - OpUsefulBits = OpUsefulBits.shl(Imm); + OpUsefulBits <<= Imm; } else { - OpUsefulBits = OpUsefulBits.shl(MSB + 1); + OpUsefulBits <<= MSB + 1; --OpUsefulBits; // The interesting part will be shifted in the result - OpUsefulBits = OpUsefulBits.shl(OpUsefulBits.getBitWidth() - Imm); + OpUsefulBits <<= OpUsefulBits.getBitWidth() - Imm; getUsefulBits(Op, OpUsefulBits, Depth + 1); // The interesting part was at zero in the argument OpUsefulBits.lshrInPlace(OpUsefulBits.getBitWidth() - Imm); @@ -1892,7 +1892,7 @@ static void getUsefulBitsFromOrWithShiftedReg(SDValue Op, APInt &UsefulBits, if (AArch64_AM::getShiftType(ShiftTypeAndValue) == AArch64_AM::LSL) { // Shift Left uint64_t ShiftAmt = AArch64_AM::getShiftValue(ShiftTypeAndValue); - Mask = Mask.shl(ShiftAmt); + Mask <<= ShiftAmt; getUsefulBits(Op, Mask, Depth + 1); Mask.lshrInPlace(ShiftAmt); } else if (AArch64_AM::getShiftType(ShiftTypeAndValue) == AArch64_AM::LSR) { @@ -1902,7 +1902,7 @@ static void getUsefulBitsFromOrWithShiftedReg(SDValue Op, APInt &UsefulBits, uint64_t ShiftAmt = AArch64_AM::getShiftValue(ShiftTypeAndValue); Mask.lshrInPlace(ShiftAmt); getUsefulBits(Op, Mask, Depth + 1); - Mask = Mask.shl(ShiftAmt); + Mask <<= ShiftAmt; } else return; @@ -1930,13 +1930,13 @@ static void getUsefulBitsFromBFM(SDValue Op, SDValue Orig, APInt &UsefulBits, uint64_t Width = MSB - Imm + 1; uint64_t LSB = Imm; - OpUsefulBits = OpUsefulBits.shl(Width); + OpUsefulBits <<= Width; --OpUsefulBits; if (Op.getOperand(1) == Orig) { // Copy the low bits from the result to bits starting from LSB. Mask = ResultUsefulBits & OpUsefulBits; - Mask = Mask.shl(LSB); + Mask <<= LSB; } if (Op.getOperand(0) == Orig) @@ -1947,9 +1947,9 @@ static void getUsefulBitsFromBFM(SDValue Op, SDValue Orig, APInt &UsefulBits, uint64_t Width = MSB + 1; uint64_t LSB = UsefulBits.getBitWidth() - Imm; - OpUsefulBits = OpUsefulBits.shl(Width); + OpUsefulBits <<= Width; --OpUsefulBits; - OpUsefulBits = OpUsefulBits.shl(LSB); + OpUsefulBits <<= LSB; if (Op.getOperand(1) == Orig) { // Copy the bits from the result to the zero bits. diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index ada46643a5f..1554ee83afd 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -26719,8 +26719,8 @@ void X86TargetLowering::computeKnownBitsForTargetNode(const SDValue Op, DAG.computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth + 1); unsigned ShAmt = ShiftImm->getZExtValue(); if (Opc == X86ISD::VSHLI) { - KnownZero = KnownZero << ShAmt; - KnownOne = KnownOne << ShAmt; + KnownZero <<= ShAmt; + KnownOne <<= ShAmt; // Low bits are known zero. KnownZero.setLowBits(ShAmt); } else { @@ -31056,8 +31056,7 @@ static SDValue combineShiftLeft(SDNode *N, SelectionDAG &DAG) { N0.getOperand(1).getOpcode() == ISD::Constant) { SDValue N00 = N0.getOperand(0); APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue(); - const APInt &ShAmt = N1C->getAPIntValue(); - Mask = Mask.shl(ShAmt); + Mask <<= N1C->getAPIntValue(); bool MaskOK = false; // We can handle cases concerning bit-widening nodes containing setcc_c if // we carefully interrogate the mask to make sure we are semantics @@ -31267,9 +31266,9 @@ static SDValue combineVectorShiftImm(SDNode *N, SelectionDAG &DAG, unsigned ShiftImm = ShiftVal.getZExtValue(); for (APInt &Elt : EltBits) { if (X86ISD::VSHLI == Opcode) - Elt = Elt.shl(ShiftImm); + Elt <<= ShiftImm; else if (X86ISD::VSRAI == Opcode) - Elt = Elt.ashr(ShiftImm); + Elt.ashrInPlace(ShiftImm); else Elt.lshrInPlace(ShiftImm); } |