diff options
| author | Anton Korobeynikov <asl@math.spbu.ru> | 2007-04-17 09:20:00 +0000 |
|---|---|---|
| committer | Anton Korobeynikov <asl@math.spbu.ru> | 2007-04-17 09:20:00 +0000 |
| commit | 8b7aab009e564bb38fce5b94b1b506923de29125 (patch) | |
| tree | 9eed91ae85fcc1bb13332f8389fc8a05a551761f /llvm/lib/Target/X86 | |
| parent | 8e846873502be42321200816575c2ca8159959c8 (diff) | |
| download | bcm5719-llvm-8b7aab009e564bb38fce5b94b1b506923de29125.tar.gz bcm5719-llvm-8b7aab009e564bb38fce5b94b1b506923de29125.zip | |
Implemented correct stack probing on mingw/cygwin for dynamic alloca's.
Also, fixed static case in presence of eax livin. This fixes PR331
PS: Why don't we still have push/pop instructions? :)
llvm-svn: 36195
Diffstat (limited to 'llvm/lib/Target/X86')
| -rw-r--r-- | llvm/lib/Target/X86/X86ISelLowering.cpp | 36 | ||||
| -rw-r--r-- | llvm/lib/Target/X86/X86ISelLowering.h | 1 | ||||
| -rw-r--r-- | llvm/lib/Target/X86/X86InstrInfo.td | 3 | ||||
| -rw-r--r-- | llvm/lib/Target/X86/X86RegisterInfo.cpp | 33 |
4 files changed, 68 insertions, 5 deletions
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index 664c7e06387..209b17a5c90 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -237,7 +237,10 @@ X86TargetLowering::X86TargetLowering(TargetMachine &TM) setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); if (Subtarget->is64Bit()) setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Expand); - setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Expand); + if (Subtarget->isTargetCygMing()) + setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom); + else + setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand); if (X86ScalarSSE) { // Set up the FP register classes. @@ -3401,6 +3404,36 @@ SDOperand X86TargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG) { } } +SDOperand X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDOperand Op, + SelectionDAG &DAG) { + // Get the inputs. + SDOperand Chain = Op.getOperand(0); + SDOperand Size = Op.getOperand(1); + // FIXME: Ensure alignment here + + TargetLowering::ArgListTy Args; + TargetLowering::ArgListEntry Entry; + MVT::ValueType IntPtr = getPointerTy(); + MVT::ValueType SPTy = (Subtarget->is64Bit() ? MVT::i64 : MVT::i32); + const Type *IntPtrTy = getTargetData()->getIntPtrType(); + + Entry.Node = Size; + Entry.Ty = IntPtrTy; + Entry.isInReg = true; // Should pass in EAX + Args.push_back(Entry); + std::pair<SDOperand, SDOperand> CallResult = + LowerCallTo(Chain, IntPtrTy, false, false, CallingConv::C, false, + DAG.getExternalSymbol("_alloca", IntPtr), Args, DAG); + + SDOperand SP = DAG.getCopyFromReg(CallResult.second, X86StackPtr, SPTy); + + std::vector<MVT::ValueType> Tys; + Tys.push_back(SPTy); + Tys.push_back(MVT::Other); + SDOperand Ops[2] = { SP, CallResult.second }; + return DAG.getNode(ISD::MERGE_VALUES, Tys, Ops, 2); +} + SDOperand X86TargetLowering::LowerFORMAL_ARGUMENTS(SDOperand Op, SelectionDAG &DAG) { MachineFunction &MF = DAG.getMachineFunction(); @@ -4002,6 +4035,7 @@ SDOperand X86TargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) { case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); + case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG); } return SDOperand(); } diff --git a/llvm/lib/Target/X86/X86ISelLowering.h b/llvm/lib/Target/X86/X86ISelLowering.h index 7d40e30b397..8b9c269dca4 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.h +++ b/llvm/lib/Target/X86/X86ISelLowering.h @@ -401,6 +401,7 @@ namespace llvm { SDOperand LowerJumpTable(SDOperand Op, SelectionDAG &DAG); SDOperand LowerCALL(SDOperand Op, SelectionDAG &DAG); SDOperand LowerRET(SDOperand Op, SelectionDAG &DAG); + SDOperand LowerDYNAMIC_STACKALLOC(SDOperand Op, SelectionDAG &DAG); SDOperand LowerFORMAL_ARGUMENTS(SDOperand Op, SelectionDAG &DAG); SDOperand LowerREADCYCLCECOUNTER(SDOperand Op, SelectionDAG &DAG); SDOperand LowerVASTART(SDOperand Op, SelectionDAG &DAG); diff --git a/llvm/lib/Target/X86/X86InstrInfo.td b/llvm/lib/Target/X86/X86InstrInfo.td index 8e4e7d7ecb4..bfdaff6b1a8 100644 --- a/llvm/lib/Target/X86/X86InstrInfo.td +++ b/llvm/lib/Target/X86/X86InstrInfo.td @@ -477,6 +477,9 @@ def LEAVE : I<0xC9, RawFrm, def POP32r : I<0x58, AddRegFrm, (ops GR32:$reg), "pop{l} $reg", []>, Imp<[ESP],[ESP]>; +def PUSH32r : I<0x50, AddRegFrm, + (ops GR32:$reg), "push{l} $reg", []>, Imp<[ESP],[ESP]>; + def MovePCtoStack : I<0, Pseudo, (ops piclabel:$label), "call $label", []>; diff --git a/llvm/lib/Target/X86/X86RegisterInfo.cpp b/llvm/lib/Target/X86/X86RegisterInfo.cpp index 3737c0e79e7..cd2a0d4a616 100644 --- a/llvm/lib/Target/X86/X86RegisterInfo.cpp +++ b/llvm/lib/Target/X86/X86RegisterInfo.cpp @@ -1039,14 +1039,39 @@ void X86RegisterInfo::emitPrologue(MachineFunction &MF) const { if (NumBytes) { // adjust stack pointer: ESP -= numbytes if (NumBytes >= 4096 && Subtarget->isTargetCygMing()) { + // Check, whether EAX is livein for this function + bool isEAXAlive = false; + for (MachineFunction::livein_iterator II = MF.livein_begin(), + EE = MF.livein_end(); (II != EE) && !isEAXAlive; ++II) { + unsigned Reg = II->first; + isEAXAlive = (Reg == X86::EAX || Reg == X86::AX || + Reg == X86::AH || Reg == X86::AL); + } + // Function prologue calls _alloca to probe the stack when allocating // more than 4k bytes in one go. Touching the stack at 4K increments is // necessary to ensure that the guard pages used by the OS virtual memory // manager are allocated in correct sequence. - MI = BuildMI(TII.get(X86::MOV32ri), X86::EAX).addImm(NumBytes); - MBB.insert(MBBI, MI); - MI = BuildMI(TII.get(X86::CALLpcrel32)).addExternalSymbol("_alloca"); - MBB.insert(MBBI, MI); + if (!isEAXAlive) { + MI = BuildMI(TII.get(X86::MOV32ri), X86::EAX).addImm(NumBytes); + MBB.insert(MBBI, MI); + MI = BuildMI(TII.get(X86::CALLpcrel32)).addExternalSymbol("_alloca"); + MBB.insert(MBBI, MI); + } else { + // Save EAX + MI = BuildMI(TII.get(X86::PUSH32r), X86::EAX); + MBB.insert(MBBI, MI); + // Allocate NumBytes-4 bytes on stack. We'll also use 4 already + // allocated bytes for EAX. + MI = BuildMI(TII.get(X86::MOV32ri), X86::EAX).addImm(NumBytes-4); + MBB.insert(MBBI, MI); + MI = BuildMI(TII.get(X86::CALLpcrel32)).addExternalSymbol("_alloca"); + MBB.insert(MBBI, MI); + // Restore EAX + MI = addRegOffset(BuildMI(TII.get(X86::MOV32rm), X86::EAX), + StackPtr, NumBytes-4); + MBB.insert(MBBI, MI); + } } else { unsigned Opc = (NumBytes < 128) ? (Is64Bit ? X86::SUB64ri8 : X86::SUB32ri8) : |

