diff options
Diffstat (limited to 'llvm/lib/Target/AArch64/AArch64FastISel.cpp')
| -rw-r--r-- | llvm/lib/Target/AArch64/AArch64FastISel.cpp | 43 |
1 files changed, 40 insertions, 3 deletions
diff --git a/llvm/lib/Target/AArch64/AArch64FastISel.cpp b/llvm/lib/Target/AArch64/AArch64FastISel.cpp index e2ab7ab79be..ac98e6674e1 100644 --- a/llvm/lib/Target/AArch64/AArch64FastISel.cpp +++ b/llvm/lib/Target/AArch64/AArch64FastISel.cpp @@ -185,6 +185,8 @@ private: MachineMemOperand *MMO = nullptr); bool emitStore(MVT VT, unsigned SrcReg, Address Addr, MachineMemOperand *MMO = nullptr); + bool emitStoreRelease(MVT VT, unsigned SrcReg, unsigned AddrReg, + MachineMemOperand *MMO = nullptr); unsigned emitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, bool isZExt); unsigned emiti1Ext(unsigned SrcReg, MVT DestVT, bool isZExt); unsigned emitAdd(MVT RetVT, const Value *LHS, const Value *RHS, @@ -1997,6 +1999,28 @@ bool AArch64FastISel::selectLoad(const Instruction *I) { return true; } +bool AArch64FastISel::emitStoreRelease(MVT VT, unsigned SrcReg, + unsigned AddrReg, + MachineMemOperand *MMO) { + unsigned Opc; + switch (VT.SimpleTy) { + default: return false; + case MVT::i8: Opc = AArch64::STLRB; break; + case MVT::i16: Opc = AArch64::STLRH; break; + case MVT::i32: Opc = AArch64::STLRW; break; + case MVT::i64: Opc = AArch64::STLRX; break; + } + + const MCInstrDesc &II = TII.get(Opc); + SrcReg = constrainOperandRegClass(II, SrcReg, 0); + AddrReg = constrainOperandRegClass(II, AddrReg, 1); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) + .addReg(SrcReg) + .addReg(AddrReg) + .addMemOperand(MMO); + return true; +} + bool AArch64FastISel::emitStore(MVT VT, unsigned SrcReg, Address Addr, MachineMemOperand *MMO) { if (!TLI.allowsMisalignedMemoryAccesses(VT)) @@ -2071,8 +2095,7 @@ bool AArch64FastISel::selectStore(const Instruction *I) { // Verify we have a legal type before going any further. Currently, we handle // simple types that will directly fit in a register (i32/f32/i64/f64) or // those that can be sign or zero-extended to a basic operation (i1/i8/i16). - if (!isTypeSupported(Op0->getType(), VT, /*IsVectorAllowed=*/true) || - cast<StoreInst>(I)->isAtomic()) + if (!isTypeSupported(Op0->getType(), VT, /*IsVectorAllowed=*/true)) return false; const Value *PtrV = I->getOperand(1); @@ -2109,9 +2132,23 @@ bool AArch64FastISel::selectStore(const Instruction *I) { if (!SrcReg) return false; + auto *SI = cast<StoreInst>(I); + + // Try to emit a STLR for seq_cst/release. + if (SI->isAtomic()) { + AtomicOrdering Ord = SI->getOrdering(); + // The non-atomic instructions are sufficient for relaxed stores. + if (isReleaseOrStronger(Ord)) { + // The STLR addressing mode only supports a base reg; pass that directly. + unsigned AddrReg = getRegForValue(PtrV); + return emitStoreRelease(VT, SrcReg, AddrReg, + createMachineMemOperandFor(I)); + } + } + // See if we can handle this address. Address Addr; - if (!computeAddress(I->getOperand(1), Addr, I->getOperand(0)->getType())) + if (!computeAddress(PtrV, Addr, Op0->getType())) return false; if (!emitStore(VT, SrcReg, Addr, createMachineMemOperandFor(I))) |

