diff options
author | Craig Topper <craig.topper@gmail.com> | 2012-02-22 05:59:10 +0000 |
---|---|---|
committer | Craig Topper <craig.topper@gmail.com> | 2012-02-22 05:59:10 +0000 |
commit | 760b134ffa8bb88ccc96fc2230ab7e5fa501e921 (patch) | |
tree | ca022afc03390f4d96618946d26a8c5e347590e6 /llvm/lib | |
parent | 91d5bb1ee5e795dbad776856188f60a195d05d5a (diff) | |
download | bcm5719-llvm-760b134ffa8bb88ccc96fc2230ab7e5fa501e921.tar.gz bcm5719-llvm-760b134ffa8bb88ccc96fc2230ab7e5fa501e921.zip |
Make all pointers to TargetRegisterClass const since they are all pointers to static data that should not be modified.
llvm-svn: 151134
Diffstat (limited to 'llvm/lib')
-rw-r--r-- | llvm/lib/CodeGen/PostRASchedulerList.cpp | 6 | ||||
-rw-r--r-- | llvm/lib/CodeGen/SelectionDAG/FastISel.cpp | 4 | ||||
-rw-r--r-- | llvm/lib/Target/ARM/ARMFastISel.cpp | 16 | ||||
-rw-r--r-- | llvm/lib/Target/ARM/ARMISelLowering.cpp | 14 | ||||
-rw-r--r-- | llvm/lib/Target/ARM/ARMISelLowering.h | 2 | ||||
-rw-r--r-- | llvm/lib/Target/MBlaze/MBlazeISelLowering.cpp | 4 | ||||
-rw-r--r-- | llvm/lib/Target/Mips/MipsISelLowering.cpp | 6 | ||||
-rw-r--r-- | llvm/lib/Target/PTX/PTXISelLowering.cpp | 29 | ||||
-rw-r--r-- | llvm/lib/Target/PowerPC/PPCISelLowering.cpp | 2 | ||||
-rw-r--r-- | llvm/lib/Target/X86/X86FastISel.cpp | 2 | ||||
-rw-r--r-- | llvm/lib/Target/X86/X86ISelLowering.cpp | 4 | ||||
-rw-r--r-- | llvm/lib/Target/X86/X86ISelLowering.h | 2 | ||||
-rw-r--r-- | llvm/lib/Target/X86/X86InstrInfo.cpp | 2 | ||||
-rw-r--r-- | llvm/lib/Target/X86/X86VZeroUpper.cpp | 2 |
14 files changed, 44 insertions, 51 deletions
diff --git a/llvm/lib/CodeGen/PostRASchedulerList.cpp b/llvm/lib/CodeGen/PostRASchedulerList.cpp index a52aa75221a..1f7833c47df 100644 --- a/llvm/lib/CodeGen/PostRASchedulerList.cpp +++ b/llvm/lib/CodeGen/PostRASchedulerList.cpp @@ -134,7 +134,7 @@ namespace { MachineFunction &MF, MachineLoopInfo &MLI, MachineDominatorTree &MDT, AliasAnalysis *AA, const RegisterClassInfo&, TargetSubtargetInfo::AntiDepBreakMode AntiDepMode, - SmallVectorImpl<TargetRegisterClass*> &CriticalPathRCs); + SmallVectorImpl<const TargetRegisterClass*> &CriticalPathRCs); ~SchedulePostRATDList(); @@ -184,7 +184,7 @@ SchedulePostRATDList::SchedulePostRATDList( MachineFunction &MF, MachineLoopInfo &MLI, MachineDominatorTree &MDT, AliasAnalysis *AA, const RegisterClassInfo &RCI, TargetSubtargetInfo::AntiDepBreakMode AntiDepMode, - SmallVectorImpl<TargetRegisterClass*> &CriticalPathRCs) + SmallVectorImpl<const TargetRegisterClass*> &CriticalPathRCs) : ScheduleDAGInstrs(MF, MLI, MDT, /*IsPostRA=*/true), Topo(SUnits), AA(AA), KillIndices(TRI->getNumRegs()) { @@ -216,7 +216,7 @@ bool PostRAScheduler::runOnMachineFunction(MachineFunction &Fn) { // Check for explicit enable/disable of post-ra scheduling. TargetSubtargetInfo::AntiDepBreakMode AntiDepMode = TargetSubtargetInfo::ANTIDEP_NONE; - SmallVector<TargetRegisterClass*, 4> CriticalPathRCs; + SmallVector<const TargetRegisterClass*, 4> CriticalPathRCs; if (EnablePostRAScheduler.getPosition() > 0) { if (!EnablePostRAScheduler) return false; diff --git a/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp b/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp index f20a714d2b8..dce3389b014 100644 --- a/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp @@ -725,8 +725,8 @@ bool FastISel::SelectBitCast(const User *I) { // First, try to perform the bitcast by inserting a reg-reg copy. unsigned ResultReg = 0; if (SrcVT.getSimpleVT() == DstVT.getSimpleVT()) { - TargetRegisterClass* SrcClass = TLI.getRegClassFor(SrcVT); - TargetRegisterClass* DstClass = TLI.getRegClassFor(DstVT); + const TargetRegisterClass* SrcClass = TLI.getRegClassFor(SrcVT); + const TargetRegisterClass* DstClass = TLI.getRegClassFor(DstVT); // Don't attempt a cross-class copy. It will likely fail. if (SrcClass == DstClass) { ResultReg = createResultReg(DstClass); diff --git a/llvm/lib/Target/ARM/ARMFastISel.cpp b/llvm/lib/Target/ARM/ARMFastISel.cpp index 51c44d0adb0..458bb1d2d94 100644 --- a/llvm/lib/Target/ARM/ARMFastISel.cpp +++ b/llvm/lib/Target/ARM/ARMFastISel.cpp @@ -728,7 +728,7 @@ unsigned ARMFastISel::TargetMaterializeAlloca(const AllocaInst *AI) { // This will get lowered later into the correct offsets and registers // via rewriteXFrameIndex. if (SI != FuncInfo.StaticAllocaMap.end()) { - TargetRegisterClass* RC = TLI.getRegClassFor(VT); + const TargetRegisterClass* RC = TLI.getRegClassFor(VT); unsigned ResultReg = createResultReg(RC); unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri; AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, @@ -911,8 +911,8 @@ void ARMFastISel::ARMSimplifyAddress(Address &Addr, EVT VT, bool useAM3) { // put the alloca address into a register, set the base type back to // register and continue. This should almost never happen. if (needsLowering && Addr.BaseType == Address::FrameIndexBase) { - TargetRegisterClass *RC = isThumb2 ? ARM::tGPRRegisterClass : - ARM::GPRRegisterClass; + const TargetRegisterClass *RC = isThumb2 ? ARM::tGPRRegisterClass + : ARM::GPRRegisterClass; unsigned ResultReg = createResultReg(RC); unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri; AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, @@ -987,7 +987,7 @@ bool ARMFastISel::ARMEmitLoad(EVT VT, unsigned &ResultReg, Address &Addr, unsigned Opc; bool useAM3 = false; bool needVMOV = false; - TargetRegisterClass *RC; + const TargetRegisterClass *RC; switch (VT.getSimpleVT().SimpleTy) { // This is mostly going to be Neon/vector support. default: return false; @@ -1490,8 +1490,8 @@ bool ARMFastISel::SelectCmp(const Instruction *I) { // Now set a register based on the comparison. Explicitly set the predicates // here. unsigned MovCCOpc = isThumb2 ? ARM::t2MOVCCi : ARM::MOVCCi; - TargetRegisterClass *RC = isThumb2 ? ARM::rGPRRegisterClass - : ARM::GPRRegisterClass; + const TargetRegisterClass *RC = isThumb2 ? ARM::rGPRRegisterClass + : ARM::GPRRegisterClass; unsigned DestReg = createResultReg(RC); Constant *Zero = ConstantInt::get(Type::getInt32Ty(*Context), 0); unsigned ZeroReg = TargetMaterializeConstant(Zero); @@ -1955,7 +1955,7 @@ bool ARMFastISel::FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs, // For this move we copy into two registers and then move into the // double fp reg we want. EVT DestVT = RVLocs[0].getValVT(); - TargetRegisterClass* DstRC = TLI.getRegClassFor(DestVT); + const TargetRegisterClass* DstRC = TLI.getRegClassFor(DestVT); unsigned ResultReg = createResultReg(DstRC); AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(ARM::VMOVDRR), ResultReg) @@ -1975,7 +1975,7 @@ bool ARMFastISel::FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs, if (RetVT == MVT::i1 || RetVT == MVT::i8 || RetVT == MVT::i16) CopyVT = MVT::i32; - TargetRegisterClass* DstRC = TLI.getRegClassFor(CopyVT); + const TargetRegisterClass* DstRC = TLI.getRegClassFor(CopyVT); unsigned ResultReg = createResultReg(DstRC); BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp index 370d3aacdaf..dd0d2f8d07b 100644 --- a/llvm/lib/Target/ARM/ARMISelLowering.cpp +++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp @@ -1011,7 +1011,7 @@ EVT ARMTargetLowering::getSetCCResultType(EVT VT) const { /// getRegClassFor - Return the register class that should be used for the /// specified value type. -TargetRegisterClass *ARMTargetLowering::getRegClassFor(EVT VT) const { +const TargetRegisterClass *ARMTargetLowering::getRegClassFor(EVT VT) const { // Map v4i64 to QQ registers but do not make the type legal. Similarly map // v8i64 to QQQQ registers. v4i64 and v8i64 are only used for REG_SEQUENCE to // load / store 4 to 8 consecutive D registers. @@ -2422,7 +2422,7 @@ ARMTargetLowering::GetF64FormalArgument(CCValAssign &VA, CCValAssign &NextVA, MachineFunction &MF = DAG.getMachineFunction(); ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); - TargetRegisterClass *RC; + const TargetRegisterClass *RC; if (AFI->isThumb1OnlyFunction()) RC = ARM::tGPRRegisterClass; else @@ -2508,7 +2508,7 @@ ARMTargetLowering::VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG, SmallVector<SDValue, 4> MemOps; for (; firstRegToSaveIndex < 4; ++firstRegToSaveIndex) { - TargetRegisterClass *RC; + const TargetRegisterClass *RC; if (AFI->isThumb1OnlyFunction()) RC = ARM::tGPRRegisterClass; else @@ -2591,7 +2591,7 @@ ARMTargetLowering::LowerFormalArguments(SDValue Chain, ArgValue = GetF64FormalArgument(VA, ArgLocs[++i], Chain, DAG, dl); } else { - TargetRegisterClass *RC; + const TargetRegisterClass *RC; if (RegVT == MVT::f32) RC = ARM::SPRRegisterClass; @@ -5299,7 +5299,7 @@ ARMTargetLowering::EmitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB, BB->end()); exitMBB->transferSuccessorsAndUpdatePHIs(BB); - TargetRegisterClass *TRC = + const TargetRegisterClass *TRC = isThumb2 ? ARM::tGPRRegisterClass : ARM::GPRRegisterClass; unsigned scratch = MRI.createVirtualRegister(TRC); unsigned scratch2 = (!BinOpcode) ? incr : MRI.createVirtualRegister(TRC); @@ -5409,7 +5409,7 @@ ARMTargetLowering::EmitAtomicBinaryMinMax(MachineInstr *MI, BB->end()); exitMBB->transferSuccessorsAndUpdatePHIs(BB); - TargetRegisterClass *TRC = + const TargetRegisterClass *TRC = isThumb2 ? ARM::tGPRRegisterClass : ARM::GPRRegisterClass; unsigned scratch = MRI.createVirtualRegister(TRC); unsigned scratch2 = MRI.createVirtualRegister(TRC); @@ -5519,7 +5519,7 @@ ARMTargetLowering::EmitAtomicBinary64(MachineInstr *MI, MachineBasicBlock *BB, BB->end()); exitMBB->transferSuccessorsAndUpdatePHIs(BB); - TargetRegisterClass *TRC = + const TargetRegisterClass *TRC = isThumb2 ? ARM::tGPRRegisterClass : ARM::GPRRegisterClass; unsigned storesuccess = MRI.createVirtualRegister(TRC); diff --git a/llvm/lib/Target/ARM/ARMISelLowering.h b/llvm/lib/Target/ARM/ARMISelLowering.h index b8dc4bf5310..527156aac41 100644 --- a/llvm/lib/Target/ARM/ARMISelLowering.h +++ b/llvm/lib/Target/ARM/ARMISelLowering.h @@ -345,7 +345,7 @@ namespace llvm { /// getRegClassFor - Return the register class that should be used for the /// specified value type. - virtual TargetRegisterClass *getRegClassFor(EVT VT) const; + virtual const TargetRegisterClass *getRegClassFor(EVT VT) const; /// getMaximalGlobalOffset - Returns the maximal possible offset which can /// be used for loads / stores from the global. diff --git a/llvm/lib/Target/MBlaze/MBlazeISelLowering.cpp b/llvm/lib/Target/MBlaze/MBlazeISelLowering.cpp index 9d2805ccb04..4b5e82d466b 100644 --- a/llvm/lib/Target/MBlaze/MBlazeISelLowering.cpp +++ b/llvm/lib/Target/MBlaze/MBlazeISelLowering.cpp @@ -896,7 +896,7 @@ LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, if (VA.isRegLoc()) { MVT RegVT = VA.getLocVT(); ArgRegEnd = VA.getLocReg(); - TargetRegisterClass *RC = 0; + const TargetRegisterClass *RC; if (RegVT == MVT::i32) RC = MBlaze::GPRRegisterClass; @@ -964,7 +964,7 @@ LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, StackPtr = DAG.getRegister(StackReg, getPointerTy()); // The last register argument that must be saved is MBlaze::R10 - TargetRegisterClass *RC = MBlaze::GPRRegisterClass; + const TargetRegisterClass *RC = MBlaze::GPRRegisterClass; unsigned Begin = getMBlazeRegisterNumbering(MBlaze::R5); unsigned Start = getMBlazeRegisterNumbering(ArgRegEnd+1); diff --git a/llvm/lib/Target/Mips/MipsISelLowering.cpp b/llvm/lib/Target/Mips/MipsISelLowering.cpp index 240b6c9c045..a93e0ac5a68 100644 --- a/llvm/lib/Target/Mips/MipsISelLowering.cpp +++ b/llvm/lib/Target/Mips/MipsISelLowering.cpp @@ -710,7 +710,7 @@ LowerOperation(SDValue Op, SelectionDAG &DAG) const // MachineFunction as a live in value. It also creates a corresponding // virtual register for it. static unsigned -AddLiveIn(MachineFunction &MF, unsigned PReg, TargetRegisterClass *RC) +AddLiveIn(MachineFunction &MF, unsigned PReg, const TargetRegisterClass *RC) { assert(RC->contains(PReg) && "Not the correct regclass!"); unsigned VReg = MF.getRegInfo().createVirtualRegister(RC); @@ -2601,7 +2601,7 @@ MipsTargetLowering::LowerFormalArguments(SDValue Chain, if (IsRegLoc) { EVT RegVT = VA.getLocVT(); unsigned ArgReg = VA.getLocReg(); - TargetRegisterClass *RC = 0; + const TargetRegisterClass *RC; if (RegVT == MVT::i32) RC = Mips::CPURegsRegisterClass; @@ -2684,7 +2684,7 @@ MipsTargetLowering::LowerFormalArguments(SDValue Chain, const unsigned *ArgRegs = IsO32 ? O32IntRegs : Mips64IntRegs; unsigned Idx = CCInfo.getFirstUnallocated(ArgRegs, NumOfRegs); int FirstRegSlotOffset = IsO32 ? 0 : -64 ; // offset of $a0's slot. - TargetRegisterClass *RC + const TargetRegisterClass *RC = IsO32 ? Mips::CPURegsRegisterClass : Mips::CPU64RegsRegisterClass; unsigned RegSize = RC->getSize(); int RegSlotOffset = FirstRegSlotOffset + Idx * RegSize; diff --git a/llvm/lib/Target/PTX/PTXISelLowering.cpp b/llvm/lib/Target/PTX/PTXISelLowering.cpp index d5ce5a8e3f8..321b6584106 100644 --- a/llvm/lib/Target/PTX/PTXISelLowering.cpp +++ b/llvm/lib/Target/PTX/PTXISelLowering.cpp @@ -240,32 +240,25 @@ SDValue PTXTargetLowering:: } else { for (unsigned i = 0, e = Ins.size(); i != e; ++i) { - EVT RegVT = Ins[i].VT; - TargetRegisterClass* TRC = getRegClassFor(RegVT); - unsigned RegType; + EVT RegVT = Ins[i].VT; + const TargetRegisterClass* TRC = getRegClassFor(RegVT); + unsigned RegType; // Determine which register class we need - if (RegVT == MVT::i1) { + if (RegVT == MVT::i1) RegType = PTXRegisterType::Pred; - } - else if (RegVT == MVT::i16) { + else if (RegVT == MVT::i16) RegType = PTXRegisterType::B16; - } - else if (RegVT == MVT::i32) { + else if (RegVT == MVT::i32) RegType = PTXRegisterType::B32; - } - else if (RegVT == MVT::i64) { + else if (RegVT == MVT::i64) RegType = PTXRegisterType::B64; - } - else if (RegVT == MVT::f32) { + else if (RegVT == MVT::f32) RegType = PTXRegisterType::F32; - } - else if (RegVT == MVT::f64) { + else if (RegVT == MVT::f64) RegType = PTXRegisterType::F64; - } - else { + else llvm_unreachable("Unknown parameter type"); - } // Use a unique index in the instruction to prevent instruction folding. // Yes, this is a hack. @@ -326,7 +319,7 @@ SDValue PTXTargetLowering:: } else { for (unsigned i = 0, e = Outs.size(); i != e; ++i) { EVT RegVT = Outs[i].VT; - TargetRegisterClass* TRC = 0; + const TargetRegisterClass* TRC; unsigned RegType; // Determine which register class we need diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp index 1231afc116a..7180b224f56 100644 --- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp +++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp @@ -1698,7 +1698,7 @@ PPCTargetLowering::LowerFormalArguments_SVR4( // Arguments stored in registers. if (VA.isRegLoc()) { - TargetRegisterClass *RC; + const TargetRegisterClass *RC; EVT ValVT = VA.getValVT(); switch (ValVT.getSimpleVT().SimpleTy) { diff --git a/llvm/lib/Target/X86/X86FastISel.cpp b/llvm/lib/Target/X86/X86FastISel.cpp index efc4c06456d..f90764e8627 100644 --- a/llvm/lib/Target/X86/X86FastISel.cpp +++ b/llvm/lib/Target/X86/X86FastISel.cpp @@ -2104,7 +2104,7 @@ unsigned X86FastISel::TargetMaterializeAlloca(const AllocaInst *C) { if (!X86SelectAddress(C, AM)) return 0; unsigned Opc = Subtarget->is64Bit() ? X86::LEA64r : X86::LEA32r; - TargetRegisterClass* RC = TLI.getRegClassFor(TLI.getPointerTy()); + const TargetRegisterClass* RC = TLI.getRegClassFor(TLI.getPointerTy()); unsigned ResultReg = createResultReg(RC); addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), ResultReg), AM); diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index 9cadad9b4a3..8c645972e67 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -1829,7 +1829,7 @@ X86TargetLowering::LowerFormalArguments(SDValue Chain, if (VA.isRegLoc()) { EVT RegVT = VA.getLocVT(); - TargetRegisterClass *RC = NULL; + const TargetRegisterClass *RC; if (RegVT == MVT::i32) RC = X86::GR32RegisterClass; else if (Is64Bit && RegVT == MVT::i64) @@ -11209,7 +11209,7 @@ X86TargetLowering::EmitAtomicBitwiseWithCustomInserter(MachineInstr *bInstr, unsigned CXchgOpc, unsigned notOpc, unsigned EAXreg, - TargetRegisterClass *RC, + const TargetRegisterClass *RC, bool invSrc) const { // For the atomic bitwise operator, we generate // thisMBB: diff --git a/llvm/lib/Target/X86/X86ISelLowering.h b/llvm/lib/Target/X86/X86ISelLowering.h index 4a8d05577c9..00b17d37956 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.h +++ b/llvm/lib/Target/X86/X86ISelLowering.h @@ -832,7 +832,7 @@ namespace llvm { unsigned cxchgOpc, unsigned notOpc, unsigned EAXreg, - TargetRegisterClass *RC, + const TargetRegisterClass *RC, bool invSrc = false) const; MachineBasicBlock *EmitAtomicBit6432WithCustomInserter( diff --git a/llvm/lib/Target/X86/X86InstrInfo.cpp b/llvm/lib/Target/X86/X86InstrInfo.cpp index 2ff26b13858..5a479f0e64c 100644 --- a/llvm/lib/Target/X86/X86InstrInfo.cpp +++ b/llvm/lib/Target/X86/X86InstrInfo.cpp @@ -1835,7 +1835,7 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI, case X86::ADD32rr_DB: { assert(MI->getNumOperands() >= 3 && "Unknown add instruction!"); unsigned Opc; - TargetRegisterClass *RC; + const TargetRegisterClass *RC; if (MIOpc == X86::ADD64rr || MIOpc == X86::ADD64rr_DB) { Opc = X86::LEA64r; RC = X86::GR64_NOSPRegisterClass; diff --git a/llvm/lib/Target/X86/X86VZeroUpper.cpp b/llvm/lib/Target/X86/X86VZeroUpper.cpp index f8c30eb9b90..2fd78a7231c 100644 --- a/llvm/lib/Target/X86/X86VZeroUpper.cpp +++ b/llvm/lib/Target/X86/X86VZeroUpper.cpp @@ -145,7 +145,7 @@ bool VZeroUpperInserter::runOnMachineFunction(MachineFunction &MF) { // to insert any VZEROUPPER instructions. This is constant-time, so it is // cheap in the common case of no ymm use. bool YMMUsed = false; - TargetRegisterClass *RC = X86::VR256RegisterClass; + const TargetRegisterClass *RC = X86::VR256RegisterClass; for (TargetRegisterClass::iterator i = RC->begin(), e = RC->end(); i != e; i++) { if (MRI.isPhysRegUsed(*i)) { |