diff options
author | Tim Northover <tnorthover@apple.com> | 2016-08-02 20:22:36 +0000 |
---|---|---|
committer | Tim Northover <tnorthover@apple.com> | 2016-08-02 20:22:36 +0000 |
commit | 1021d89398ea8262b20270609fb8f4eacc9edacd (patch) | |
tree | de03d50bf86bdcc879fd8f61e08284867f24d516 /llvm/lib | |
parent | 84c557ad3ec0b020e511d0f1792b68b71e815d8a (diff) | |
download | bcm5719-llvm-1021d89398ea8262b20270609fb8f4eacc9edacd.tar.gz bcm5719-llvm-1021d89398ea8262b20270609fb8f4eacc9edacd.zip |
AArch64: properly calculate cmpxchg status in FastISel.
We were relying on the misleadingly-names $status result to actually be the
status. Actually it's just a scratch register that may or may not be valid (and
is the inverse of the real ststus anyway). Success can be determined by
comparing the value loaded against the one we wanted to see for "cmpxchg
strong" loops like this.
Should fix PR28819.
llvm-svn: 277513
Diffstat (limited to 'llvm/lib')
-rw-r--r-- | llvm/lib/Target/AArch64/AArch64FastISel.cpp | 27 | ||||
-rw-r--r-- | llvm/lib/Target/AArch64/AArch64InstrAtomics.td | 14 |
2 files changed, 28 insertions, 13 deletions
diff --git a/llvm/lib/Target/AArch64/AArch64FastISel.cpp b/llvm/lib/Target/AArch64/AArch64FastISel.cpp index c618c4f60e5..a2bb2b2922a 100644 --- a/llvm/lib/Target/AArch64/AArch64FastISel.cpp +++ b/llvm/lib/Target/AArch64/AArch64FastISel.cpp @@ -4955,14 +4955,16 @@ bool AArch64FastISel::selectAtomicCmpXchg(const AtomicCmpXchgInst *I) { return false; const TargetRegisterClass *ResRC; - unsigned Opc; + unsigned Opc, CmpOpc; // This only supports i32/i64, because i8/i16 aren't legal, and the generic // extractvalue selection doesn't support that. if (VT == MVT::i32) { Opc = AArch64::CMP_SWAP_32; + CmpOpc = AArch64::SUBSWrs; ResRC = &AArch64::GPR32RegClass; } else if (VT == MVT::i64) { Opc = AArch64::CMP_SWAP_64; + CmpOpc = AArch64::SUBSXrs; ResRC = &AArch64::GPR64RegClass; } else { return false; @@ -4979,14 +4981,27 @@ bool AArch64FastISel::selectAtomicCmpXchg(const AtomicCmpXchgInst *I) { const unsigned ResultReg1 = createResultReg(ResRC); const unsigned ResultReg2 = createResultReg(&AArch64::GPR32RegClass); + const unsigned ScratchReg = createResultReg(&AArch64::GPR32RegClass); // FIXME: MachineMemOperand doesn't support cmpxchg yet. BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) - .addReg(ResultReg1, RegState::Define) - .addReg(ResultReg2, RegState::Define) - .addReg(AddrReg) - .addReg(DesiredReg) - .addReg(NewReg); + .addDef(ResultReg1) + .addDef(ScratchReg) + .addUse(AddrReg) + .addUse(DesiredReg) + .addUse(NewReg); + + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(CmpOpc)) + .addDef(VT == MVT::i32 ? AArch64::WZR : AArch64::XZR) + .addUse(ResultReg1) + .addUse(DesiredReg) + .addImm(0); + + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::CSINCWr)) + .addDef(ResultReg2) + .addUse(AArch64::WZR) + .addUse(AArch64::WZR) + .addImm(AArch64CC::NE); assert((ResultReg1 + 1) == ResultReg2 && "Nonconsecutive result registers."); updateValueMap(I, ResultReg1, 2); diff --git a/llvm/lib/Target/AArch64/AArch64InstrAtomics.td b/llvm/lib/Target/AArch64/AArch64InstrAtomics.td index 59de62ad287..867074c3c37 100644 --- a/llvm/lib/Target/AArch64/AArch64InstrAtomics.td +++ b/llvm/lib/Target/AArch64/AArch64InstrAtomics.td @@ -377,28 +377,28 @@ def : Pat<(int_aarch64_clrex), (CLREX 0xf)>; // significantly more naive than the standard expansion: we conservatively // assume seq_cst, strong cmpxchg and omit clrex on failure. -let Constraints = "@earlyclobber $Rd,@earlyclobber $status", +let Constraints = "@earlyclobber $Rd,@earlyclobber $scratch", mayLoad = 1, mayStore = 1 in { -def CMP_SWAP_8 : Pseudo<(outs GPR32:$Rd, GPR32:$status), +def CMP_SWAP_8 : Pseudo<(outs GPR32:$Rd, GPR32:$scratch), (ins GPR64:$addr, GPR32:$desired, GPR32:$new), []>, Sched<[WriteAtomic]>; -def CMP_SWAP_16 : Pseudo<(outs GPR32:$Rd, GPR32:$status), +def CMP_SWAP_16 : Pseudo<(outs GPR32:$Rd, GPR32:$scratch), (ins GPR64:$addr, GPR32:$desired, GPR32:$new), []>, Sched<[WriteAtomic]>; -def CMP_SWAP_32 : Pseudo<(outs GPR32:$Rd, GPR32:$status), +def CMP_SWAP_32 : Pseudo<(outs GPR32:$Rd, GPR32:$scratch), (ins GPR64:$addr, GPR32:$desired, GPR32:$new), []>, Sched<[WriteAtomic]>; -def CMP_SWAP_64 : Pseudo<(outs GPR64:$Rd, GPR32:$status), +def CMP_SWAP_64 : Pseudo<(outs GPR64:$Rd, GPR32:$scratch), (ins GPR64:$addr, GPR64:$desired, GPR64:$new), []>, Sched<[WriteAtomic]>; } -let Constraints = "@earlyclobber $RdLo,@earlyclobber $RdHi,@earlyclobber $status", +let Constraints = "@earlyclobber $RdLo,@earlyclobber $RdHi,@earlyclobber $scratch", mayLoad = 1, mayStore = 1 in -def CMP_SWAP_128 : Pseudo<(outs GPR64:$RdLo, GPR64:$RdHi, GPR32:$status), +def CMP_SWAP_128 : Pseudo<(outs GPR64:$RdLo, GPR64:$RdHi, GPR32:$scratch), (ins GPR64:$addr, GPR64:$desiredLo, GPR64:$desiredHi, GPR64:$newLo, GPR64:$newHi), []>, Sched<[WriteAtomic]>; |