diff options
| author | Eli Friedman <eli.friedman@gmail.com> | 2011-05-17 02:36:59 +0000 | 
|---|---|---|
| committer | Eli Friedman <eli.friedman@gmail.com> | 2011-05-17 02:36:59 +0000 | 
| commit | 7335e8a720ce469b2aab3c50aedb6ae251d29a81 (patch) | |
| tree | 3f8d02b70dc65c3d5fcf996d0db654f335679d95 /llvm/lib/Target/X86/X86FastISel.cpp | |
| parent | d42411fa19e92ec57cf24a89b8a4e979c0468574 (diff) | |
| download | bcm5719-llvm-7335e8a720ce469b2aab3c50aedb6ae251d29a81.tar.gz bcm5719-llvm-7335e8a720ce469b2aab3c50aedb6ae251d29a81.zip | |
Back out r131444 and r131438; they're breaking nightly tests.  I'll look into
it more tomorrow.
llvm-svn: 131451
Diffstat (limited to 'llvm/lib/Target/X86/X86FastISel.cpp')
| -rw-r--r-- | llvm/lib/Target/X86/X86FastISel.cpp | 91 | 
1 files changed, 48 insertions, 43 deletions
| diff --git a/llvm/lib/Target/X86/X86FastISel.cpp b/llvm/lib/Target/X86/X86FastISel.cpp index e817f233660..ebdc8f8898f 100644 --- a/llvm/lib/Target/X86/X86FastISel.cpp +++ b/llvm/lib/Target/X86/X86FastISel.cpp @@ -1414,6 +1414,14 @@ bool X86FastISel::X86SelectCall(const Instruction *I) {    if (Subtarget->IsCalleePop(isVarArg, CC))      return false; +  // Handle *simple* calls for now. +  const Type *RetTy = CS.getType(); +  MVT RetVT; +  if (RetTy->isVoidTy()) +    RetVT = MVT::isVoid; +  else if (!isTypeLegal(RetTy, RetVT, true)) +    return false; +    // Materialize callee address in a register. FIXME: GV address can be    // handled with a CALLpcrel32 instead.    X86AddressMode CalleeAM; @@ -1428,6 +1436,13 @@ bool X86FastISel::X86SelectCall(const Instruction *I) {    } else      return false; +  // Allow calls which produce i1 results. +  bool AndToI1 = false; +  if (RetVT == MVT::i1) { +    RetVT = MVT::i8; +    AndToI1 = true; +  } +    // Deal with call operands first.    SmallVector<const Value *, 8> ArgVals;    SmallVector<unsigned, 8> Args; @@ -1682,72 +1697,62 @@ bool X86FastISel::X86SelectCall(const Instruction *I) {    BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(AdjStackUp))      .addImm(NumBytes).addImm(NumBytesCallee); -  // Build info for return calling conv lowering code. -  // FIXME: This is practically a copy-paste from TargetLowering::LowerCallTo. -  SmallVector<ISD::InputArg, 32> Ins; -  SmallVector<EVT, 4> RetTys; -  ComputeValueVTs(TLI, I->getType(), RetTys); -  for (unsigned i = 0, e = RetTys.size(); i != e; ++i) { -    EVT VT = RetTys[i]; -    EVT RegisterVT = TLI.getRegisterType(I->getParent()->getContext(), VT); -    unsigned NumRegs = TLI.getNumRegisters(I->getParent()->getContext(), VT); -    for (unsigned j = 0; j != NumRegs; ++j) { -      ISD::InputArg MyFlags; -      MyFlags.VT = RegisterVT.getSimpleVT(); -      MyFlags.Used = !CS.getInstruction()->use_empty(); -      if (CS.paramHasAttr(0, Attribute::SExt)) -        MyFlags.Flags.setSExt(); -      if (CS.paramHasAttr(0, Attribute::ZExt)) -        MyFlags.Flags.setZExt(); -      if (CS.paramHasAttr(0, Attribute::InReg)) -        MyFlags.Flags.setInReg(); -      Ins.push_back(MyFlags); -    } -  } - -  // Now handle call return values. +  // Now handle call return value (if any).    SmallVector<unsigned, 4> UsedRegs; -  SmallVector<CCValAssign, 16> RVLocs; -  CCState CCRetInfo(CC, false, TM, RVLocs, I->getParent()->getContext()); -  unsigned ResultReg = FuncInfo.CreateRegs(I->getType()); -  CCRetInfo.AnalyzeCallResult(Ins, RetCC_X86); -  for (unsigned i = 0; i != RVLocs.size(); ++i) { -    EVT CopyVT = RVLocs[i].getValVT(); -    unsigned CopyReg = ResultReg + i; +  if (RetVT != MVT::isVoid) { +    SmallVector<CCValAssign, 16> RVLocs; +    CCState CCInfo(CC, false, TM, RVLocs, I->getParent()->getContext()); +    CCInfo.AnalyzeCallResult(RetVT, RetCC_X86); + +    // Copy all of the result registers out of their specified physreg. +    assert(RVLocs.size() == 1 && "Can't handle multi-value calls!"); +    EVT CopyVT = RVLocs[0].getValVT(); +    TargetRegisterClass* DstRC = TLI.getRegClassFor(CopyVT);      // If this is a call to a function that returns an fp value on the x87 fp      // stack, but where we prefer to use the value in xmm registers, copy it      // out as F80 and use a truncate to move it from fp stack reg to xmm reg. -    if ((RVLocs[i].getLocReg() == X86::ST0 || -         RVLocs[i].getLocReg() == X86::ST1) && +    if ((RVLocs[0].getLocReg() == X86::ST0 || +         RVLocs[0].getLocReg() == X86::ST1) &&          isScalarFPTypeInSSEReg(RVLocs[0].getValVT())) {        CopyVT = MVT::f80; -      CopyReg = createResultReg(X86::RFP80RegisterClass); +      DstRC = X86::RFP80RegisterClass;      } +    unsigned ResultReg = createResultReg(DstRC);      BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), -            ResultReg+i).addReg(RVLocs[i].getLocReg()); -    UsedRegs.push_back(RVLocs[i].getLocReg()); +            ResultReg).addReg(RVLocs[0].getLocReg()); +    UsedRegs.push_back(RVLocs[0].getLocReg()); -    if (CopyVT != RVLocs[i].getValVT()) { +    if (CopyVT != RVLocs[0].getValVT()) {        // Round the F80 the right size, which also moves to the appropriate xmm        // register. This is accomplished by storing the F80 value in memory and        // then loading it back. Ewww... -      EVT ResVT = RVLocs[i].getValVT(); +      EVT ResVT = RVLocs[0].getValVT();        unsigned Opc = ResVT == MVT::f32 ? X86::ST_Fp80m32 : X86::ST_Fp80m64;        unsigned MemSize = ResVT.getSizeInBits()/8;        int FI = MFI.CreateStackObject(MemSize, MemSize, false);        addFrameReference(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,                                  TII.get(Opc)), FI) -        .addReg(CopyReg); +        .addReg(ResultReg); +      DstRC = ResVT == MVT::f32 +        ? X86::FR32RegisterClass : X86::FR64RegisterClass;        Opc = ResVT == MVT::f32 ? X86::MOVSSrm : X86::MOVSDrm; +      ResultReg = createResultReg(DstRC);        addFrameReference(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, -                                TII.get(Opc), ResultReg + i), FI); +                                TII.get(Opc), ResultReg), FI);      } -  } -  if (RVLocs.size()) -    UpdateValueMap(I, ResultReg, RVLocs.size()); +    if (AndToI1) { +      // Mask out all but lowest bit for some call which produces an i1. +      unsigned AndResult = createResultReg(X86::GR8RegisterClass); +      BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, +              TII.get(X86::AND8ri), AndResult).addReg(ResultReg).addImm(1); +      ResultReg = AndResult; +    } + +    UpdateValueMap(I, ResultReg); +  }    // Set all unused physreg defs as dead.    static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI); | 

