diff options
author | Justin Lebar <jlebar@google.com> | 2017-11-07 22:10:54 +0000 |
---|---|---|
committer | Justin Lebar <jlebar@google.com> | 2017-11-07 22:10:54 +0000 |
commit | da9e0bd3a238f80b05a3070c8a8a3cadadc12e5f (patch) | |
tree | 011bda2e7fcd416411315d3d88afd8309e9aeb08 /llvm | |
parent | 3ae8dfda06ff63d16673b1db9e0ce73ce0b8a3f5 (diff) | |
download | bcm5719-llvm-da9e0bd3a238f80b05a3070c8a8a3cadadc12e5f.tar.gz bcm5719-llvm-da9e0bd3a238f80b05a3070c8a8a3cadadc12e5f.zip |
[NVPTX] Implement __nvvm_atom_add_gen_d builtin.
Summary:
This just seems to have been an oversight. We already supported the f64
atomic add with an explicit scope (e.g. "cta"), but not the scopeless
version.
Reviewers: tra
Subscribers: jholewinski, sanjoy, cfe-commits, llvm-commits, hiraditya
Differential Revision: https://reviews.llvm.org/D39638
llvm-svn: 317623
Diffstat (limited to 'llvm')
-rw-r--r-- | llvm/include/llvm/IR/IntrinsicsNVVM.td | 7 | ||||
-rw-r--r-- | llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp | 1 | ||||
-rw-r--r-- | llvm/lib/Target/NVPTX/NVPTXIntrinsics.td | 13 | ||||
-rw-r--r-- | llvm/test/CodeGen/NVPTX/atomics-sm60.ll | 19 |
4 files changed, 39 insertions, 1 deletions
diff --git a/llvm/include/llvm/IR/IntrinsicsNVVM.td b/llvm/include/llvm/IR/IntrinsicsNVVM.td index 7ba1a3eb2e5..249419d15d3 100644 --- a/llvm/include/llvm/IR/IntrinsicsNVVM.td +++ b/llvm/include/llvm/IR/IntrinsicsNVVM.td @@ -683,10 +683,15 @@ let TargetPrefix = "nvvm" in { Intrinsic<[llvm_i64_ty], [llvm_double_ty], [IntrNoMem]>; -// Atomic not available as an llvm intrinsic. +// Atomics not available as llvm intrinsics. def int_nvvm_atomic_load_add_f32 : Intrinsic<[llvm_float_ty], [LLVMAnyPointerType<llvm_float_ty>, llvm_float_ty], [IntrArgMemOnly, NoCapture<0>]>; + // Atomic add of f64 requires sm_60. + def int_nvvm_atomic_load_add_f64 : Intrinsic<[llvm_double_ty], + [LLVMAnyPointerType<llvm_double_ty>, llvm_double_ty], + [IntrArgMemOnly, NoCapture<0>]>; + def int_nvvm_atomic_load_inc_32 : Intrinsic<[llvm_i32_ty], [LLVMAnyPointerType<llvm_i32_ty>, llvm_i32_ty], [IntrArgMemOnly, NoCapture<0>]>; diff --git a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp index 7b9acb20b75..ac4f2544fc3 100644 --- a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp +++ b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp @@ -3449,6 +3449,7 @@ bool NVPTXTargetLowering::getTgtMemIntrinsic( } case Intrinsic::nvvm_atomic_load_add_f32: + case Intrinsic::nvvm_atomic_load_add_f64: case Intrinsic::nvvm_atomic_load_inc_32: case Intrinsic::nvvm_atomic_load_dec_32: diff --git a/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td b/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td index f745b6f6635..478f3e9d057 100644 --- a/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td +++ b/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td @@ -1095,6 +1095,12 @@ def atomic_load_add_f32_s: ATOMIC_SHARED_CHK<(ops node:$a, node:$b), (int_nvvm_atomic_load_add_f32 node:$a, node:$b)>; def atomic_load_add_f32_gen: ATOMIC_GENERIC_CHK<(ops node:$a, node:$b), (int_nvvm_atomic_load_add_f32 node:$a, node:$b)>; +def atomic_load_add_f64_g: ATOMIC_GLOBAL_CHK<(ops node:$a, node:$b), + (int_nvvm_atomic_load_add_f64 node:$a, node:$b)>; +def atomic_load_add_f64_s: ATOMIC_SHARED_CHK<(ops node:$a, node:$b), + (int_nvvm_atomic_load_add_f64 node:$a, node:$b)>; +def atomic_load_add_f64_gen: ATOMIC_GENERIC_CHK<(ops node:$a, node:$b), + (int_nvvm_atomic_load_add_f64 node:$a, node:$b)>; defm INT_PTX_ATOM_ADD_G_32 : F_ATOMIC_2<Int32Regs, ".global", ".u32", ".add", atomic_load_add_32_g, i32imm, imm, hasAtomRedG32>; @@ -1121,6 +1127,13 @@ defm INT_PTX_ATOM_ADD_S_F32 : F_ATOMIC_2<Float32Regs, ".shared", ".f32", ".add", defm INT_PTX_ATOM_ADD_GEN_F32 : F_ATOMIC_2<Float32Regs, "", ".f32", ".add", atomic_load_add_f32_gen, f32imm, fpimm, hasAtomAddF32>; +defm INT_PTX_ATOM_ADD_G_F64 : F_ATOMIC_2<Float64Regs, ".global", ".f64", ".add", + atomic_load_add_f64_g, f64imm, fpimm, hasAtomAddF64>; +defm INT_PTX_ATOM_ADD_S_F64 : F_ATOMIC_2<Float64Regs, ".shared", ".f64", ".add", + atomic_load_add_f64_s, f64imm, fpimm, hasAtomAddF64>; +defm INT_PTX_ATOM_ADD_GEN_F64 : F_ATOMIC_2<Float64Regs, "", ".f64", ".add", + atomic_load_add_f64_gen, f64imm, fpimm, hasAtomAddF64>; + // atom_sub def atomic_load_sub_32_g: ATOMIC_GLOBAL_CHK<(ops node:$a, node:$b), diff --git a/llvm/test/CodeGen/NVPTX/atomics-sm60.ll b/llvm/test/CodeGen/NVPTX/atomics-sm60.ll new file mode 100644 index 00000000000..0b5bafb780c --- /dev/null +++ b/llvm/test/CodeGen/NVPTX/atomics-sm60.ll @@ -0,0 +1,19 @@ +; RUN: llc < %s -march=nvptx -mcpu=sm_60 | FileCheck %s +; RUN: llc < %s -march=nvptx64 -mcpu=sm_60 | FileCheck %s + +; CHECK-LABEL .func test( +define void @test(double* %dp0, double addrspace(1)* %dp1, double addrspace(3)* %dp3, double %d) { +; CHECK: atom.add.f64 + %r1 = call double @llvm.nvvm.atomic.load.add.f64.p0f64(double* %dp0, double %d) +; CHECK: atom.global.add.f64 + %r2 = call double @llvm.nvvm.atomic.load.add.f64.p1f64(double addrspace(1)* %dp1, double %d) +; CHECK: atom.shared.add.f64 + %ret = call double @llvm.nvvm.atomic.load.add.f64.p3f64(double addrspace(3)* %dp3, double %d) + ret void +} + +declare double @llvm.nvvm.atomic.load.add.f64.p0f64(double* nocapture, double) #1 +declare double @llvm.nvvm.atomic.load.add.f64.p1f64(double addrspace(1)* nocapture, double) #1 +declare double @llvm.nvvm.atomic.load.add.f64.p3f64(double addrspace(3)* nocapture, double) #1 + +attributes #1 = { argmemonly nounwind } |