diff options
| author | Mon P Wang <wangmp@apple.com> | 2008-06-25 08:15:39 +0000 |
|---|---|---|
| committer | Mon P Wang <wangmp@apple.com> | 2008-06-25 08:15:39 +0000 |
| commit | 6a490371c923d98143d574730ec9010bbbb89139 (patch) | |
| tree | 79b5b1309f0962c3d01214272dfadeeb8c26eba8 /llvm/lib/Target/X86 | |
| parent | 2e2001d8b9786ccb6703011810817f3751f0e787 (diff) | |
| download | bcm5719-llvm-6a490371c923d98143d574730ec9010bbbb89139.tar.gz bcm5719-llvm-6a490371c923d98143d574730ec9010bbbb89139.zip | |
Added MemOperands to Atomic operations since Atomics touches memory.
Added abstract class MemSDNode for any Node that have an associated MemOperand
Changed atomic.lcs => atomic.cmp.swap, atomic.las => atomic.load.add, and
atomic.lss => atomic.load.sub
llvm-svn: 52706
Diffstat (limited to 'llvm/lib/Target/X86')
| -rw-r--r-- | llvm/lib/Target/X86/X86ISelLowering.cpp | 34 | ||||
| -rw-r--r-- | llvm/lib/Target/X86/X86ISelLowering.h | 6 | ||||
| -rw-r--r-- | llvm/lib/Target/X86/X86Instr64bit.td | 2 | ||||
| -rw-r--r-- | llvm/lib/Target/X86/X86InstrInfo.td | 10 |
4 files changed, 27 insertions, 25 deletions
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index 93df72a02b8..ae7f6e77c06 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -292,11 +292,11 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) setOperationAction(ISD::MEMBARRIER , MVT::Other, Expand); // Expand certain atomics - setOperationAction(ISD::ATOMIC_LCS , MVT::i8, Custom); - setOperationAction(ISD::ATOMIC_LCS , MVT::i16, Custom); - setOperationAction(ISD::ATOMIC_LCS , MVT::i32, Custom); - setOperationAction(ISD::ATOMIC_LCS , MVT::i64, Custom); - setOperationAction(ISD::ATOMIC_LSS , MVT::i32, Expand); + setOperationAction(ISD::ATOMIC_CMP_SWAP , MVT::i8, Custom); + setOperationAction(ISD::ATOMIC_CMP_SWAP , MVT::i16, Custom); + setOperationAction(ISD::ATOMIC_CMP_SWAP , MVT::i32, Custom); + setOperationAction(ISD::ATOMIC_CMP_SWAP , MVT::i64, Custom); + setOperationAction(ISD::ATOMIC_LOAD_SUB , MVT::i32, Expand); // Use the default ISD::LOCATION, ISD::DECLARE expansion. setOperationAction(ISD::LOCATION, MVT::Other, Expand); @@ -5655,7 +5655,7 @@ SDOperand X86TargetLowering::LowerCTTZ(SDOperand Op, SelectionDAG &DAG) { return Op; } -SDOperand X86TargetLowering::LowerLCS(SDOperand Op, SelectionDAG &DAG) { +SDOperand X86TargetLowering::LowerCMP_SWAP(SDOperand Op, SelectionDAG &DAG) { MVT T = cast<AtomicSDNode>(Op.Val)->getVT(); unsigned Reg = 0; unsigned size = 0; @@ -5669,7 +5669,7 @@ SDOperand X86TargetLowering::LowerLCS(SDOperand Op, SelectionDAG &DAG) { if (Subtarget->is64Bit()) { Reg = X86::RAX; size = 8; } else //Should go away when LowerType stuff lands - return SDOperand(ExpandATOMIC_LCS(Op.Val, DAG), 0); + return SDOperand(ExpandATOMIC_CMP_SWAP(Op.Val, DAG), 0); break; }; SDOperand cpIn = DAG.getCopyToReg(Op.getOperand(0), Reg, @@ -5686,9 +5686,9 @@ SDOperand X86TargetLowering::LowerLCS(SDOperand Op, SelectionDAG &DAG) { return cpOut; } -SDNode* X86TargetLowering::ExpandATOMIC_LCS(SDNode* Op, SelectionDAG &DAG) { +SDNode* X86TargetLowering::ExpandATOMIC_CMP_SWAP(SDNode* Op, SelectionDAG &DAG) { MVT T = cast<AtomicSDNode>(Op)->getVT(); - assert (T == MVT::i64 && "Only know how to expand i64 CAS"); + assert (T == MVT::i64 && "Only know how to expand i64 Cmp and Swap"); SDOperand cpInL, cpInH; cpInL = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op->getOperand(3), DAG.getConstant(0, MVT::i32)); @@ -5722,13 +5722,15 @@ SDNode* X86TargetLowering::ExpandATOMIC_LCS(SDNode* Op, SelectionDAG &DAG) { return DAG.getNode(ISD::MERGE_VALUES, Tys, ResultVal, cpOutH.getValue(1)).Val; } -SDNode* X86TargetLowering::ExpandATOMIC_LSS(SDNode* Op, SelectionDAG &DAG) { +SDNode* X86TargetLowering::ExpandATOMIC_LOAD_SUB(SDNode* Op, SelectionDAG &DAG) { MVT T = cast<AtomicSDNode>(Op)->getVT(); - assert (T == MVT::i32 && "Only know how to expand i32 LSS"); + assert (T == MVT::i32 && "Only know how to expand i32 Atomic Load Sub"); SDOperand negOp = DAG.getNode(ISD::SUB, T, DAG.getConstant(0, T), Op->getOperand(2)); - return DAG.getAtomic(ISD::ATOMIC_LAS, Op->getOperand(0), - Op->getOperand(1), negOp, T).Val; + return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, Op->getOperand(0), + Op->getOperand(1), negOp, T, + cast<AtomicSDNode>(Op)->getSrcValue(), + cast<AtomicSDNode>(Op)->getAlignment()).Val; } /// LowerOperation - Provide custom lowering hooks for some operations. @@ -5736,7 +5738,7 @@ SDNode* X86TargetLowering::ExpandATOMIC_LSS(SDNode* Op, SelectionDAG &DAG) { SDOperand X86TargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) { switch (Op.getOpcode()) { default: assert(0 && "Should not custom lower this!"); - case ISD::ATOMIC_LCS: return LowerLCS(Op,DAG); + case ISD::ATOMIC_CMP_SWAP: return LowerCMP_SWAP(Op,DAG); case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG); case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG); @@ -5788,8 +5790,8 @@ SDNode *X86TargetLowering::ExpandOperationResult(SDNode *N, SelectionDAG &DAG) { default: assert(0 && "Should not custom lower this!"); case ISD::FP_TO_SINT: return ExpandFP_TO_SINT(N, DAG); case ISD::READCYCLECOUNTER: return ExpandREADCYCLECOUNTER(N, DAG); - case ISD::ATOMIC_LCS: return ExpandATOMIC_LCS(N, DAG); - case ISD::ATOMIC_LSS: return ExpandATOMIC_LSS(N,DAG); + case ISD::ATOMIC_CMP_SWAP: return ExpandATOMIC_CMP_SWAP(N, DAG); + case ISD::ATOMIC_LOAD_SUB: return ExpandATOMIC_LOAD_SUB(N,DAG); } } diff --git a/llvm/lib/Target/X86/X86ISelLowering.h b/llvm/lib/Target/X86/X86ISelLowering.h index f2b851e66e4..dff4beadbf7 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.h +++ b/llvm/lib/Target/X86/X86ISelLowering.h @@ -541,11 +541,11 @@ namespace llvm { SDOperand LowerFLT_ROUNDS_(SDOperand Op, SelectionDAG &DAG); SDOperand LowerCTLZ(SDOperand Op, SelectionDAG &DAG); SDOperand LowerCTTZ(SDOperand Op, SelectionDAG &DAG); - SDOperand LowerLCS(SDOperand Op, SelectionDAG &DAG); + SDOperand LowerCMP_SWAP(SDOperand Op, SelectionDAG &DAG); SDNode *ExpandFP_TO_SINT(SDNode *N, SelectionDAG &DAG); SDNode *ExpandREADCYCLECOUNTER(SDNode *N, SelectionDAG &DAG); - SDNode *ExpandATOMIC_LCS(SDNode *N, SelectionDAG &DAG); - SDNode *ExpandATOMIC_LSS(SDNode *N, SelectionDAG &DAG); + SDNode *ExpandATOMIC_CMP_SWAP(SDNode *N, SelectionDAG &DAG); + SDNode *ExpandATOMIC_LOAD_SUB(SDNode *N, SelectionDAG &DAG); SDOperand EmitTargetCodeForMemset(SelectionDAG &DAG, SDOperand Chain, diff --git a/llvm/lib/Target/X86/X86Instr64bit.td b/llvm/lib/Target/X86/X86Instr64bit.td index 8398e9aa2a7..23a403068bf 100644 --- a/llvm/lib/Target/X86/X86Instr64bit.td +++ b/llvm/lib/Target/X86/X86Instr64bit.td @@ -1124,7 +1124,7 @@ def LCMPXCHG64 : RI<0xB1, MRMDestMem, (outs), (ins i64mem:$ptr, GR64:$swap), let Constraints = "$val = $dst", Defs = [EFLAGS] in { def LXADD64 : RI<0xC1, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$ptr,GR64:$val), "lock xadd $val, $ptr", - [(set GR64:$dst, (atomic_las_64 addr:$ptr, GR64:$val))]>, + [(set GR64:$dst, (atomic_load_add_64 addr:$ptr, GR64:$val))]>, TB, LOCK; def XCHG64rm : RI<0x87, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$ptr,GR64:$val), "xchg $val, $ptr", diff --git a/llvm/lib/Target/X86/X86InstrInfo.td b/llvm/lib/Target/X86/X86InstrInfo.td index 646655ef49c..7a4e796cadb 100644 --- a/llvm/lib/Target/X86/X86InstrInfo.td +++ b/llvm/lib/Target/X86/X86InstrInfo.td @@ -2614,19 +2614,19 @@ def LCMPXCHG8 : I<0xB0, MRMDestMem, (outs), (ins i8mem:$ptr, GR8:$swap), let Constraints = "$val = $dst", Defs = [EFLAGS] in { def LXADD32 : I<0xC1, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$ptr, GR32:$val), "lock xadd{l}\t{$val, $ptr|$ptr, $val}", - [(set GR32:$dst, (atomic_las_32 addr:$ptr, GR32:$val))]>, + [(set GR32:$dst, (atomic_load_add_32 addr:$ptr, GR32:$val))]>, TB, LOCK; def LXADD16 : I<0xC1, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$ptr, GR16:$val), "lock xadd{w}\t{$val, $ptr|$ptr, $val}", - [(set GR16:$dst, (atomic_las_16 addr:$ptr, GR16:$val))]>, + [(set GR16:$dst, (atomic_load_add_16 addr:$ptr, GR16:$val))]>, TB, OpSize, LOCK; def LXADD8 : I<0xC0, MRMSrcMem, (outs GR8:$dst), (ins i8mem:$ptr, GR8:$val), "lock xadd{b}\t{$val, $ptr|$ptr, $val}", - [(set GR8:$dst, (atomic_las_8 addr:$ptr, GR8:$val))]>, + [(set GR8:$dst, (atomic_load_add_8 addr:$ptr, GR8:$val))]>, TB, LOCK; } -// Atomic exchange and and, or, xor +// Atomic exchange, and, or, xor let Constraints = "$val = $dst", Defs = [EFLAGS], usesCustomDAGSchedInserter = 1 in { def ATOMAND32 : I<0, Pseudo, (outs GR32:$dst),(ins i32mem:$ptr, GR32:$val), @@ -2639,7 +2639,7 @@ def ATOMXOR32 : I<0, Pseudo,(outs GR32:$dst),(ins i32mem:$ptr, GR32:$val), "#ATOMXOR32 PSUEDO!", [(set GR32:$dst, (atomic_load_xor addr:$ptr, GR32:$val))]>; def ATOMNAND32 : I<0, Pseudo,(outs GR32:$dst),(ins i32mem:$ptr, GR32:$val), - "#ATOMXOR32 PSUEDO!", + "#ATOMNAND32 PSUEDO!", [(set GR32:$dst, (atomic_load_nand addr:$ptr, GR32:$val))]>; def ATOMMIN32: I<0, Pseudo, (outs GR32:$dst), (ins i32mem:$ptr, GR32:$val), |

