diff options
| author | Mon P Wang <wangmp@apple.com> | 2008-06-25 08:15:39 +0000 |
|---|---|---|
| committer | Mon P Wang <wangmp@apple.com> | 2008-06-25 08:15:39 +0000 |
| commit | 6a490371c923d98143d574730ec9010bbbb89139 (patch) | |
| tree | 79b5b1309f0962c3d01214272dfadeeb8c26eba8 /llvm/lib/Target | |
| parent | 2e2001d8b9786ccb6703011810817f3751f0e787 (diff) | |
| download | bcm5719-llvm-6a490371c923d98143d574730ec9010bbbb89139.tar.gz bcm5719-llvm-6a490371c923d98143d574730ec9010bbbb89139.zip | |
Added MemOperands to Atomic operations since Atomics touches memory.
Added abstract class MemSDNode for any Node that have an associated MemOperand
Changed atomic.lcs => atomic.cmp.swap, atomic.las => atomic.load.add, and
atomic.lss => atomic.load.sub
llvm-svn: 52706
Diffstat (limited to 'llvm/lib/Target')
| -rw-r--r-- | llvm/lib/Target/Alpha/AlphaInstrInfo.td | 8 | ||||
| -rw-r--r-- | llvm/lib/Target/PowerPC/PPCISelLowering.cpp | 16 | ||||
| -rw-r--r-- | llvm/lib/Target/PowerPC/PPCISelLowering.h | 4 | ||||
| -rw-r--r-- | llvm/lib/Target/TargetSelectionDAG.td | 71 | ||||
| -rw-r--r-- | llvm/lib/Target/X86/X86ISelLowering.cpp | 34 | ||||
| -rw-r--r-- | llvm/lib/Target/X86/X86ISelLowering.h | 6 | ||||
| -rw-r--r-- | llvm/lib/Target/X86/X86Instr64bit.td | 2 | ||||
| -rw-r--r-- | llvm/lib/Target/X86/X86InstrInfo.td | 10 |
8 files changed, 77 insertions, 74 deletions
diff --git a/llvm/lib/Target/Alpha/AlphaInstrInfo.td b/llvm/lib/Target/Alpha/AlphaInstrInfo.td index ddefe858a26..42bd8edaff5 100644 --- a/llvm/lib/Target/Alpha/AlphaInstrInfo.td +++ b/llvm/lib/Target/Alpha/AlphaInstrInfo.td @@ -160,14 +160,14 @@ def MEMLABEL : PseudoInstAlpha<(outs), (ins s64imm:$i, s64imm:$j, s64imm:$k, s64 let usesCustomDAGSchedInserter = 1 in { // Expanded by the scheduler. def CAS32 : PseudoInstAlpha<(outs GPRC:$dst), (ins GPRC:$ptr, GPRC:$cmp, GPRC:$swp), "", - [(set GPRC:$dst, (atomic_lcs_32 GPRC:$ptr, GPRC:$cmp, GPRC:$swp))], s_pseudo>; + [(set GPRC:$dst, (atomic_cmp_swap_32 GPRC:$ptr, GPRC:$cmp, GPRC:$swp))], s_pseudo>; def CAS64 : PseudoInstAlpha<(outs GPRC:$dst), (ins GPRC:$ptr, GPRC:$cmp, GPRC:$swp), "", - [(set GPRC:$dst, (atomic_lcs_64 GPRC:$ptr, GPRC:$cmp, GPRC:$swp))], s_pseudo>; + [(set GPRC:$dst, (atomic_cmp_swap_64 GPRC:$ptr, GPRC:$cmp, GPRC:$swp))], s_pseudo>; def LAS32 : PseudoInstAlpha<(outs GPRC:$dst), (ins GPRC:$ptr, GPRC:$swp), "", - [(set GPRC:$dst, (atomic_las_32 GPRC:$ptr, GPRC:$swp))], s_pseudo>; + [(set GPRC:$dst, (atomic_load_add_32 GPRC:$ptr, GPRC:$swp))], s_pseudo>; def LAS64 :PseudoInstAlpha<(outs GPRC:$dst), (ins GPRC:$ptr, GPRC:$swp), "", - [(set GPRC:$dst, (atomic_las_64 GPRC:$ptr, GPRC:$swp))], s_pseudo>; + [(set GPRC:$dst, (atomic_load_add_64 GPRC:$ptr, GPRC:$swp))], s_pseudo>; def SWAP32 : PseudoInstAlpha<(outs GPRC:$dst), (ins GPRC:$ptr, GPRC:$swp), "", [(set GPRC:$dst, (atomic_swap_32 GPRC:$ptr, GPRC:$swp))], s_pseudo>; diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp index d4f824108be..9432a749b81 100644 --- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp +++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp @@ -204,12 +204,12 @@ PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM) setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Custom); setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64 , Custom); - setOperationAction(ISD::ATOMIC_LAS , MVT::i32 , Custom); - setOperationAction(ISD::ATOMIC_LCS , MVT::i32 , Custom); + setOperationAction(ISD::ATOMIC_LOAD_ADD , MVT::i32 , Custom); + setOperationAction(ISD::ATOMIC_CMP_SWAP , MVT::i32 , Custom); setOperationAction(ISD::ATOMIC_SWAP , MVT::i32 , Custom); if (TM.getSubtarget<PPCSubtarget>().has64BitSupport()) { - setOperationAction(ISD::ATOMIC_LAS , MVT::i64 , Custom); - setOperationAction(ISD::ATOMIC_LCS , MVT::i64 , Custom); + setOperationAction(ISD::ATOMIC_LOAD_ADD , MVT::i64 , Custom); + setOperationAction(ISD::ATOMIC_CMP_SWAP , MVT::i64 , Custom); setOperationAction(ISD::ATOMIC_SWAP , MVT::i64 , Custom); } @@ -2721,7 +2721,7 @@ SDOperand PPCTargetLowering::LowerDYNAMIC_STACKALLOC(SDOperand Op, return DAG.getNode(PPCISD::DYNALLOC, VTs, Ops, 3); } -SDOperand PPCTargetLowering::LowerAtomicLAS(SDOperand Op, SelectionDAG &DAG) { +SDOperand PPCTargetLowering::LowerAtomicLOAD_ADD(SDOperand Op, SelectionDAG &DAG) { MVT VT = Op.Val->getValueType(0); SDOperand Chain = Op.getOperand(0); SDOperand Ptr = Op.getOperand(1); @@ -2757,7 +2757,7 @@ SDOperand PPCTargetLowering::LowerAtomicLAS(SDOperand Op, SelectionDAG &DAG) { OutOps, 2); } -SDOperand PPCTargetLowering::LowerAtomicLCS(SDOperand Op, SelectionDAG &DAG) { +SDOperand PPCTargetLowering::LowerAtomicCMP_SWAP(SDOperand Op, SelectionDAG &DAG) { MVT VT = Op.Val->getValueType(0); SDOperand Chain = Op.getOperand(0); SDOperand Ptr = Op.getOperand(1); @@ -3942,8 +3942,8 @@ SDOperand PPCTargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) { case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG, PPCSubTarget); - case ISD::ATOMIC_LAS: return LowerAtomicLAS(Op, DAG); - case ISD::ATOMIC_LCS: return LowerAtomicLCS(Op, DAG); + case ISD::ATOMIC_LOAD_ADD: return LowerAtomicLOAD_ADD(Op, DAG); + case ISD::ATOMIC_CMP_SWAP: return LowerAtomicCMP_SWAP(Op, DAG); case ISD::ATOMIC_SWAP: return LowerAtomicSWAP(Op, DAG); case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.h b/llvm/lib/Target/PowerPC/PPCISelLowering.h index 34012ffb10d..e3ec7b0e3be 100644 --- a/llvm/lib/Target/PowerPC/PPCISelLowering.h +++ b/llvm/lib/Target/PowerPC/PPCISelLowering.h @@ -366,8 +366,8 @@ namespace llvm { SDOperand LowerDYNAMIC_STACKALLOC(SDOperand Op, SelectionDAG &DAG, const PPCSubtarget &Subtarget); SDOperand LowerSELECT_CC(SDOperand Op, SelectionDAG &DAG); - SDOperand LowerAtomicLAS(SDOperand Op, SelectionDAG &DAG); - SDOperand LowerAtomicLCS(SDOperand Op, SelectionDAG &DAG); + SDOperand LowerAtomicLOAD_ADD(SDOperand Op, SelectionDAG &DAG); + SDOperand LowerAtomicCMP_SWAP(SDOperand Op, SelectionDAG &DAG); SDOperand LowerAtomicSWAP(SDOperand Op, SelectionDAG &DAG); SDOperand LowerFP_TO_SINT(SDOperand Op, SelectionDAG &DAG); SDOperand LowerSINT_TO_FP(SDOperand Op, SelectionDAG &DAG); diff --git a/llvm/lib/Target/TargetSelectionDAG.td b/llvm/lib/Target/TargetSelectionDAG.td index 38a604653f5..c30933d0f9a 100644 --- a/llvm/lib/Target/TargetSelectionDAG.td +++ b/llvm/lib/Target/TargetSelectionDAG.td @@ -220,6 +220,7 @@ def SDNPOptInFlag : SDNodeProperty; // Optionally read a flag operand def SDNPMayStore : SDNodeProperty; // May write to memory, sets 'mayStore'. def SDNPMayLoad : SDNodeProperty; // May read memory, sets 'mayLoad'. def SDNPSideEffect : SDNodeProperty; // Sets 'HasUnmodelledSideEffects'. +def SDNPMemOperand : SDNodeProperty; // Touches memory, has assoc MemOperand //===----------------------------------------------------------------------===// // Selection DAG Node definitions. @@ -353,39 +354,39 @@ def membarrier : SDNode<"ISD::MEMBARRIER" , STDMemBarrier, [SDNPHasChain, SDNPSideEffect]>; // Do not use atomic_* directly, use atomic_*_size (see below) -def atomic_lcs : SDNode<"ISD::ATOMIC_LCS" , STDAtomic3, - [SDNPHasChain, SDNPMayStore, SDNPMayLoad]>; -def atomic_las : SDNode<"ISD::ATOMIC_LAS" , STDAtomic2, - [SDNPHasChain, SDNPMayStore, SDNPMayLoad]>; -def atomic_swap : SDNode<"ISD::ATOMIC_SWAP", STDAtomic2, - [SDNPHasChain, SDNPMayStore, SDNPMayLoad]>; -def atomic_lss : SDNode<"ISD::ATOMIC_LSS" , STDAtomic2, - [SDNPHasChain, SDNPMayStore, SDNPMayLoad]>; +def atomic_cmp_swap : SDNode<"ISD::ATOMIC_CMP_SWAP" , STDAtomic3, + [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; +def atomic_load_add : SDNode<"ISD::ATOMIC_LOAD_ADD" , STDAtomic2, + [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; +def atomic_swap : SDNode<"ISD::ATOMIC_SWAP", STDAtomic2, + [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; +def atomic_load_sub : SDNode<"ISD::ATOMIC_LOAD_SUB" , STDAtomic2, + [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; def atomic_load_and : SDNode<"ISD::ATOMIC_LOAD_AND" , STDAtomic2, - [SDNPHasChain, SDNPMayStore, SDNPMayLoad]>; + [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; def atomic_load_or : SDNode<"ISD::ATOMIC_LOAD_OR" , STDAtomic2, - [SDNPHasChain, SDNPMayStore, SDNPMayLoad]>; + [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; def atomic_load_xor : SDNode<"ISD::ATOMIC_LOAD_XOR" , STDAtomic2, - [SDNPHasChain, SDNPMayStore, SDNPMayLoad]>; + [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; def atomic_load_nand: SDNode<"ISD::ATOMIC_LOAD_NAND", STDAtomic2, - [SDNPHasChain, SDNPMayStore, SDNPMayLoad]>; + [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; def atomic_load_min : SDNode<"ISD::ATOMIC_LOAD_MIN", STDAtomic2, - [SDNPHasChain, SDNPMayStore, SDNPMayLoad]>; + [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; def atomic_load_max : SDNode<"ISD::ATOMIC_LOAD_MAX", STDAtomic2, - [SDNPHasChain, SDNPMayStore, SDNPMayLoad]>; + [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; def atomic_load_umin : SDNode<"ISD::ATOMIC_LOAD_UMIN", STDAtomic2, - [SDNPHasChain, SDNPMayStore, SDNPMayLoad]>; + [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; def atomic_load_umax : SDNode<"ISD::ATOMIC_LOAD_UMAX", STDAtomic2, - [SDNPHasChain, SDNPMayStore, SDNPMayLoad]>; + [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; // Do not use ld, st directly. Use load, extload, sextload, zextload, store, // and truncst (see below). def ld : SDNode<"ISD::LOAD" , SDTLoad, - [SDNPHasChain, SDNPMayLoad]>; + [SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>; def st : SDNode<"ISD::STORE" , SDTStore, - [SDNPHasChain, SDNPMayStore]>; + [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>; def ist : SDNode<"ISD::STORE" , SDTIStore, - [SDNPHasChain, SDNPMayStore]>; + [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>; def vector_shuffle : SDNode<"ISD::VECTOR_SHUFFLE", SDTVecShuffle, []>; def build_vector : SDNode<"ISD::BUILD_VECTOR", SDTypeProfile<1, 0, []>, []>; @@ -764,51 +765,51 @@ def post_truncstf32 : PatFrag<(ops node:$val, node:$base, node:$offset), }]>; //Atomic patterns -def atomic_lcs_8 : PatFrag<(ops node:$ptr, node:$cmp, node:$swp), - (atomic_lcs node:$ptr, node:$cmp, node:$swp), [{ +def atomic_cmp_swap_8 : PatFrag<(ops node:$ptr, node:$cmp, node:$swp), + (atomic_cmp_swap node:$ptr, node:$cmp, node:$swp), [{ if (AtomicSDNode* V = dyn_cast<AtomicSDNode>(N)) return V->getVT() == MVT::i8; return false; }]>; -def atomic_lcs_16 : PatFrag<(ops node:$ptr, node:$cmp, node:$swp), - (atomic_lcs node:$ptr, node:$cmp, node:$swp), [{ +def atomic_cmp_swap_16 : PatFrag<(ops node:$ptr, node:$cmp, node:$swp), + (atomic_cmp_swap node:$ptr, node:$cmp, node:$swp), [{ if (AtomicSDNode* V = dyn_cast<AtomicSDNode>(N)) return V->getVT() == MVT::i16; return false; }]>; -def atomic_lcs_32 : PatFrag<(ops node:$ptr, node:$cmp, node:$swp), - (atomic_lcs node:$ptr, node:$cmp, node:$swp), [{ +def atomic_cmp_swap_32 : PatFrag<(ops node:$ptr, node:$cmp, node:$swp), + (atomic_cmp_swap node:$ptr, node:$cmp, node:$swp), [{ if (AtomicSDNode* V = dyn_cast<AtomicSDNode>(N)) return V->getVT() == MVT::i32; return false; }]>; -def atomic_lcs_64 : PatFrag<(ops node:$ptr, node:$cmp, node:$swp), - (atomic_lcs node:$ptr, node:$cmp, node:$swp), [{ +def atomic_cmp_swap_64 : PatFrag<(ops node:$ptr, node:$cmp, node:$swp), + (atomic_cmp_swap node:$ptr, node:$cmp, node:$swp), [{ if (AtomicSDNode* V = dyn_cast<AtomicSDNode>(N)) return V->getVT() == MVT::i64; return false; }]>; -def atomic_las_8 : PatFrag<(ops node:$ptr, node:$inc), - (atomic_las node:$ptr, node:$inc), [{ +def atomic_load_add_8 : PatFrag<(ops node:$ptr, node:$inc), + (atomic_load_add node:$ptr, node:$inc), [{ if (AtomicSDNode* V = dyn_cast<AtomicSDNode>(N)) return V->getVT() == MVT::i8; return false; }]>; -def atomic_las_16 : PatFrag<(ops node:$ptr, node:$inc), - (atomic_las node:$ptr, node:$inc), [{ +def atomic_load_add_16 : PatFrag<(ops node:$ptr, node:$inc), + (atomic_load_add node:$ptr, node:$inc), [{ if (AtomicSDNode* V = dyn_cast<AtomicSDNode>(N)) return V->getVT() == MVT::i16; return false; }]>; -def atomic_las_32 : PatFrag<(ops node:$ptr, node:$inc), - (atomic_las node:$ptr, node:$inc), [{ +def atomic_load_add_32 : PatFrag<(ops node:$ptr, node:$inc), + (atomic_load_add node:$ptr, node:$inc), [{ if (AtomicSDNode* V = dyn_cast<AtomicSDNode>(N)) return V->getVT() == MVT::i32; return false; }]>; -def atomic_las_64 : PatFrag<(ops node:$ptr, node:$inc), - (atomic_las node:$ptr, node:$inc), [{ +def atomic_load_add_64 : PatFrag<(ops node:$ptr, node:$inc), + (atomic_load_add node:$ptr, node:$inc), [{ if (AtomicSDNode* V = dyn_cast<AtomicSDNode>(N)) return V->getVT() == MVT::i64; return false; diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index 93df72a02b8..ae7f6e77c06 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -292,11 +292,11 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) setOperationAction(ISD::MEMBARRIER , MVT::Other, Expand); // Expand certain atomics - setOperationAction(ISD::ATOMIC_LCS , MVT::i8, Custom); - setOperationAction(ISD::ATOMIC_LCS , MVT::i16, Custom); - setOperationAction(ISD::ATOMIC_LCS , MVT::i32, Custom); - setOperationAction(ISD::ATOMIC_LCS , MVT::i64, Custom); - setOperationAction(ISD::ATOMIC_LSS , MVT::i32, Expand); + setOperationAction(ISD::ATOMIC_CMP_SWAP , MVT::i8, Custom); + setOperationAction(ISD::ATOMIC_CMP_SWAP , MVT::i16, Custom); + setOperationAction(ISD::ATOMIC_CMP_SWAP , MVT::i32, Custom); + setOperationAction(ISD::ATOMIC_CMP_SWAP , MVT::i64, Custom); + setOperationAction(ISD::ATOMIC_LOAD_SUB , MVT::i32, Expand); // Use the default ISD::LOCATION, ISD::DECLARE expansion. setOperationAction(ISD::LOCATION, MVT::Other, Expand); @@ -5655,7 +5655,7 @@ SDOperand X86TargetLowering::LowerCTTZ(SDOperand Op, SelectionDAG &DAG) { return Op; } -SDOperand X86TargetLowering::LowerLCS(SDOperand Op, SelectionDAG &DAG) { +SDOperand X86TargetLowering::LowerCMP_SWAP(SDOperand Op, SelectionDAG &DAG) { MVT T = cast<AtomicSDNode>(Op.Val)->getVT(); unsigned Reg = 0; unsigned size = 0; @@ -5669,7 +5669,7 @@ SDOperand X86TargetLowering::LowerLCS(SDOperand Op, SelectionDAG &DAG) { if (Subtarget->is64Bit()) { Reg = X86::RAX; size = 8; } else //Should go away when LowerType stuff lands - return SDOperand(ExpandATOMIC_LCS(Op.Val, DAG), 0); + return SDOperand(ExpandATOMIC_CMP_SWAP(Op.Val, DAG), 0); break; }; SDOperand cpIn = DAG.getCopyToReg(Op.getOperand(0), Reg, @@ -5686,9 +5686,9 @@ SDOperand X86TargetLowering::LowerLCS(SDOperand Op, SelectionDAG &DAG) { return cpOut; } -SDNode* X86TargetLowering::ExpandATOMIC_LCS(SDNode* Op, SelectionDAG &DAG) { +SDNode* X86TargetLowering::ExpandATOMIC_CMP_SWAP(SDNode* Op, SelectionDAG &DAG) { MVT T = cast<AtomicSDNode>(Op)->getVT(); - assert (T == MVT::i64 && "Only know how to expand i64 CAS"); + assert (T == MVT::i64 && "Only know how to expand i64 Cmp and Swap"); SDOperand cpInL, cpInH; cpInL = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op->getOperand(3), DAG.getConstant(0, MVT::i32)); @@ -5722,13 +5722,15 @@ SDNode* X86TargetLowering::ExpandATOMIC_LCS(SDNode* Op, SelectionDAG &DAG) { return DAG.getNode(ISD::MERGE_VALUES, Tys, ResultVal, cpOutH.getValue(1)).Val; } -SDNode* X86TargetLowering::ExpandATOMIC_LSS(SDNode* Op, SelectionDAG &DAG) { +SDNode* X86TargetLowering::ExpandATOMIC_LOAD_SUB(SDNode* Op, SelectionDAG &DAG) { MVT T = cast<AtomicSDNode>(Op)->getVT(); - assert (T == MVT::i32 && "Only know how to expand i32 LSS"); + assert (T == MVT::i32 && "Only know how to expand i32 Atomic Load Sub"); SDOperand negOp = DAG.getNode(ISD::SUB, T, DAG.getConstant(0, T), Op->getOperand(2)); - return DAG.getAtomic(ISD::ATOMIC_LAS, Op->getOperand(0), - Op->getOperand(1), negOp, T).Val; + return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, Op->getOperand(0), + Op->getOperand(1), negOp, T, + cast<AtomicSDNode>(Op)->getSrcValue(), + cast<AtomicSDNode>(Op)->getAlignment()).Val; } /// LowerOperation - Provide custom lowering hooks for some operations. @@ -5736,7 +5738,7 @@ SDNode* X86TargetLowering::ExpandATOMIC_LSS(SDNode* Op, SelectionDAG &DAG) { SDOperand X86TargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) { switch (Op.getOpcode()) { default: assert(0 && "Should not custom lower this!"); - case ISD::ATOMIC_LCS: return LowerLCS(Op,DAG); + case ISD::ATOMIC_CMP_SWAP: return LowerCMP_SWAP(Op,DAG); case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG); case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG); @@ -5788,8 +5790,8 @@ SDNode *X86TargetLowering::ExpandOperationResult(SDNode *N, SelectionDAG &DAG) { default: assert(0 && "Should not custom lower this!"); case ISD::FP_TO_SINT: return ExpandFP_TO_SINT(N, DAG); case ISD::READCYCLECOUNTER: return ExpandREADCYCLECOUNTER(N, DAG); - case ISD::ATOMIC_LCS: return ExpandATOMIC_LCS(N, DAG); - case ISD::ATOMIC_LSS: return ExpandATOMIC_LSS(N,DAG); + case ISD::ATOMIC_CMP_SWAP: return ExpandATOMIC_CMP_SWAP(N, DAG); + case ISD::ATOMIC_LOAD_SUB: return ExpandATOMIC_LOAD_SUB(N,DAG); } } diff --git a/llvm/lib/Target/X86/X86ISelLowering.h b/llvm/lib/Target/X86/X86ISelLowering.h index f2b851e66e4..dff4beadbf7 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.h +++ b/llvm/lib/Target/X86/X86ISelLowering.h @@ -541,11 +541,11 @@ namespace llvm { SDOperand LowerFLT_ROUNDS_(SDOperand Op, SelectionDAG &DAG); SDOperand LowerCTLZ(SDOperand Op, SelectionDAG &DAG); SDOperand LowerCTTZ(SDOperand Op, SelectionDAG &DAG); - SDOperand LowerLCS(SDOperand Op, SelectionDAG &DAG); + SDOperand LowerCMP_SWAP(SDOperand Op, SelectionDAG &DAG); SDNode *ExpandFP_TO_SINT(SDNode *N, SelectionDAG &DAG); SDNode *ExpandREADCYCLECOUNTER(SDNode *N, SelectionDAG &DAG); - SDNode *ExpandATOMIC_LCS(SDNode *N, SelectionDAG &DAG); - SDNode *ExpandATOMIC_LSS(SDNode *N, SelectionDAG &DAG); + SDNode *ExpandATOMIC_CMP_SWAP(SDNode *N, SelectionDAG &DAG); + SDNode *ExpandATOMIC_LOAD_SUB(SDNode *N, SelectionDAG &DAG); SDOperand EmitTargetCodeForMemset(SelectionDAG &DAG, SDOperand Chain, diff --git a/llvm/lib/Target/X86/X86Instr64bit.td b/llvm/lib/Target/X86/X86Instr64bit.td index 8398e9aa2a7..23a403068bf 100644 --- a/llvm/lib/Target/X86/X86Instr64bit.td +++ b/llvm/lib/Target/X86/X86Instr64bit.td @@ -1124,7 +1124,7 @@ def LCMPXCHG64 : RI<0xB1, MRMDestMem, (outs), (ins i64mem:$ptr, GR64:$swap), let Constraints = "$val = $dst", Defs = [EFLAGS] in { def LXADD64 : RI<0xC1, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$ptr,GR64:$val), "lock xadd $val, $ptr", - [(set GR64:$dst, (atomic_las_64 addr:$ptr, GR64:$val))]>, + [(set GR64:$dst, (atomic_load_add_64 addr:$ptr, GR64:$val))]>, TB, LOCK; def XCHG64rm : RI<0x87, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$ptr,GR64:$val), "xchg $val, $ptr", diff --git a/llvm/lib/Target/X86/X86InstrInfo.td b/llvm/lib/Target/X86/X86InstrInfo.td index 646655ef49c..7a4e796cadb 100644 --- a/llvm/lib/Target/X86/X86InstrInfo.td +++ b/llvm/lib/Target/X86/X86InstrInfo.td @@ -2614,19 +2614,19 @@ def LCMPXCHG8 : I<0xB0, MRMDestMem, (outs), (ins i8mem:$ptr, GR8:$swap), let Constraints = "$val = $dst", Defs = [EFLAGS] in { def LXADD32 : I<0xC1, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$ptr, GR32:$val), "lock xadd{l}\t{$val, $ptr|$ptr, $val}", - [(set GR32:$dst, (atomic_las_32 addr:$ptr, GR32:$val))]>, + [(set GR32:$dst, (atomic_load_add_32 addr:$ptr, GR32:$val))]>, TB, LOCK; def LXADD16 : I<0xC1, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$ptr, GR16:$val), "lock xadd{w}\t{$val, $ptr|$ptr, $val}", - [(set GR16:$dst, (atomic_las_16 addr:$ptr, GR16:$val))]>, + [(set GR16:$dst, (atomic_load_add_16 addr:$ptr, GR16:$val))]>, TB, OpSize, LOCK; def LXADD8 : I<0xC0, MRMSrcMem, (outs GR8:$dst), (ins i8mem:$ptr, GR8:$val), "lock xadd{b}\t{$val, $ptr|$ptr, $val}", - [(set GR8:$dst, (atomic_las_8 addr:$ptr, GR8:$val))]>, + [(set GR8:$dst, (atomic_load_add_8 addr:$ptr, GR8:$val))]>, TB, LOCK; } -// Atomic exchange and and, or, xor +// Atomic exchange, and, or, xor let Constraints = "$val = $dst", Defs = [EFLAGS], usesCustomDAGSchedInserter = 1 in { def ATOMAND32 : I<0, Pseudo, (outs GR32:$dst),(ins i32mem:$ptr, GR32:$val), @@ -2639,7 +2639,7 @@ def ATOMXOR32 : I<0, Pseudo,(outs GR32:$dst),(ins i32mem:$ptr, GR32:$val), "#ATOMXOR32 PSUEDO!", [(set GR32:$dst, (atomic_load_xor addr:$ptr, GR32:$val))]>; def ATOMNAND32 : I<0, Pseudo,(outs GR32:$dst),(ins i32mem:$ptr, GR32:$val), - "#ATOMXOR32 PSUEDO!", + "#ATOMNAND32 PSUEDO!", [(set GR32:$dst, (atomic_load_nand addr:$ptr, GR32:$val))]>; def ATOMMIN32: I<0, Pseudo, (outs GR32:$dst), (ins i32mem:$ptr, GR32:$val), |

