summaryrefslogtreecommitdiffstats
path: root/llvm/include
diff options
context:
space:
mode:
authorAlex Bradbury <asb@lowrisc.org>2018-09-19 10:54:22 +0000
committerAlex Bradbury <asb@lowrisc.org>2018-09-19 10:54:22 +0000
commit21aea51e71614b15545c69f84310b938520b069c (patch)
tree628636b1c733777c5152513fba3c7bc2da48b37a /llvm/include
parente8d8aee537d4c2ecee173a46d14b7720b200bf64 (diff)
downloadbcm5719-llvm-21aea51e71614b15545c69f84310b938520b069c.tar.gz
bcm5719-llvm-21aea51e71614b15545c69f84310b938520b069c.zip
[RISCV] Codegen for i8, i16, and i32 atomicrmw with RV32A
Introduce a new RISCVExpandPseudoInsts pass to expand atomic pseudo-instructions after register allocation. This is necessary in order to ensure that register spills aren't introduced between LL and SC, thus breaking the forward progress guarantee for the operation. AArch64 does something similar for CmpXchg (though only at O0), and Mips is moving towards this approach (see D31287). See also [this mailing list post](http://lists.llvm.org/pipermail/llvm-dev/2016-May/099490.html) from James Knight, which summarises the issues with lowering to ll/sc in IR or pre-RA. See the [accompanying RFC thread](http://lists.llvm.org/pipermail/llvm-dev/2018-June/123993.html) for an overview of the lowering strategy. Differential Revision: https://reviews.llvm.org/D47882 llvm-svn: 342534
Diffstat (limited to 'llvm/include')
-rw-r--r--llvm/include/llvm/CodeGen/TargetLowering.h12
-rw-r--r--llvm/include/llvm/IR/Intrinsics.td1
-rw-r--r--llvm/include/llvm/IR/IntrinsicsRISCV.td39
3 files changed, 52 insertions, 0 deletions
diff --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h
index 17603023ee3..37d0572b9d1 100644
--- a/llvm/include/llvm/CodeGen/TargetLowering.h
+++ b/llvm/include/llvm/CodeGen/TargetLowering.h
@@ -163,6 +163,7 @@ public:
LLOnly, // Expand the (load) instruction into just a load-linked, which has
// greater atomic guarantees than a normal load.
CmpXChg, // Expand the instruction into cmpxchg; used by at least X86.
+ MaskedIntrinsic, // Use a target-specific intrinsic for the LL/SC loop.
};
/// Enum that specifies when a multiplication should be expanded.
@@ -1562,6 +1563,17 @@ public:
llvm_unreachable("Store conditional unimplemented on this target");
}
+ /// Perform a masked atomicrmw using a target-specific intrinsic. This
+ /// represents the core LL/SC loop which will be lowered at a late stage by
+ /// the backend.
+ virtual Value *emitMaskedAtomicRMWIntrinsic(IRBuilder<> &Builder,
+ AtomicRMWInst *AI,
+ Value *AlignedAddr, Value *Incr,
+ Value *Mask, Value *ShiftAmt,
+ AtomicOrdering Ord) const {
+ llvm_unreachable("Masked atomicrmw expansion unimplemented on this target");
+ }
+
/// Inserts in the IR a target-specific intrinsic specifying a fence.
/// It is called by AtomicExpandPass before expanding an
/// AtomicRMW/AtomicCmpXchg/AtomicStore/AtomicLoad
diff --git a/llvm/include/llvm/IR/Intrinsics.td b/llvm/include/llvm/IR/Intrinsics.td
index 0cec754dd64..b405e86ef40 100644
--- a/llvm/include/llvm/IR/Intrinsics.td
+++ b/llvm/include/llvm/IR/Intrinsics.td
@@ -1008,3 +1008,4 @@ include "llvm/IR/IntrinsicsAMDGPU.td"
include "llvm/IR/IntrinsicsBPF.td"
include "llvm/IR/IntrinsicsSystemZ.td"
include "llvm/IR/IntrinsicsWebAssembly.td"
+include "llvm/IR/IntrinsicsRISCV.td"
diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td
new file mode 100644
index 00000000000..b656622b363
--- /dev/null
+++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td
@@ -0,0 +1,39 @@
+//===- IntrinsicsRISCV.td - Defines RISCV intrinsics -------*- tablegen -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines all of the RISCV-specific intrinsics.
+//
+//===----------------------------------------------------------------------===//
+
+let TargetPrefix = "riscv" in {
+
+//===----------------------------------------------------------------------===//
+// Atomics
+
+class MaskedAtomicRMW32Intrinsic
+ : Intrinsic<[llvm_i32_ty],
+ [llvm_anyptr_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrArgMemOnly, NoCapture<0>]>;
+
+class MaskedAtomicRMW32WithSextIntrinsic
+ : Intrinsic<[llvm_i32_ty],
+ [llvm_anyptr_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty],
+ [IntrArgMemOnly, NoCapture<0>]>;
+
+def int_riscv_masked_atomicrmw_xchg_i32 : MaskedAtomicRMW32Intrinsic;
+def int_riscv_masked_atomicrmw_add_i32 : MaskedAtomicRMW32Intrinsic;
+def int_riscv_masked_atomicrmw_sub_i32 : MaskedAtomicRMW32Intrinsic;
+def int_riscv_masked_atomicrmw_nand_i32 : MaskedAtomicRMW32Intrinsic;
+def int_riscv_masked_atomicrmw_max_i32 : MaskedAtomicRMW32WithSextIntrinsic;
+def int_riscv_masked_atomicrmw_min_i32 : MaskedAtomicRMW32WithSextIntrinsic;
+def int_riscv_masked_atomicrmw_umax_i32 : MaskedAtomicRMW32Intrinsic;
+def int_riscv_masked_atomicrmw_umin_i32 : MaskedAtomicRMW32Intrinsic;
+
+} // TargetPrefix = "riscv"
OpenPOWER on IntegriCloud