summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNicolai Haehnle <nhaehnle@gmail.com>2016-02-18 16:44:18 +0000
committerNicolai Haehnle <nhaehnle@gmail.com>2016-02-18 16:44:18 +0000
commitf2c64db55a851c41db9af373949b26d5a226973a (patch)
treefd516c3c899015d1fcf582d3f369a04004b64f12
parent7a545536f56e0c7049c6f614b97e7573ceedf778 (diff)
downloadbcm5719-llvm-f2c64db55a851c41db9af373949b26d5a226973a.tar.gz
bcm5719-llvm-f2c64db55a851c41db9af373949b26d5a226973a.zip
AMDGPU/SI: add llvm.amdgcn.image.load/store[.mip] intrinsics
Summary: These correspond to IMAGE_LOAD/STORE[_MIP] and are going to be used by Mesa for the GL_ARB_shader_image_load_store extension. IMAGE_LOAD is already matched by llvm.SI.image.load. That intrinsic has a legacy name and pretends not to read memory. Differential Revision: http://reviews.llvm.org/D17276 llvm-svn: 261224
-rw-r--r--llvm/include/llvm/IR/IntrinsicsAMDGPU.td29
-rw-r--r--llvm/lib/Target/AMDGPU/SIISelLowering.cpp7
-rw-r--r--llvm/lib/Target/AMDGPU/SIInstrInfo.td39
-rw-r--r--llvm/lib/Target/AMDGPU/SIInstructions.td59
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.image.ll111
5 files changed, 215 insertions, 30 deletions
diff --git a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
index ef3c24080b8..54725c5b071 100644
--- a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
+++ b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
@@ -143,6 +143,35 @@ def int_amdgcn_cubetc : GCCBuiltin<"__builtin_amdgcn_cubetc">,
[llvm_float_ty, llvm_float_ty, llvm_float_ty], [IntrNoMem]
>;
+class AMDGPUImageLoad : Intrinsic <
+ [llvm_v4f32_ty], // vdata(VGPR)
+ [llvm_anyint_ty, // vaddr(VGPR)
+ llvm_v8i32_ty, // rsrc(SGPR)
+ llvm_i32_ty, // dmask(imm)
+ llvm_i1_ty, // r128(imm)
+ llvm_i1_ty, // da(imm)
+ llvm_i1_ty, // glc(imm)
+ llvm_i1_ty], // slc(imm)
+ [IntrReadMem]>;
+
+def int_amdgcn_image_load : AMDGPUImageLoad;
+def int_amdgcn_image_load_mip : AMDGPUImageLoad;
+
+class AMDGPUImageStore : Intrinsic <
+ [],
+ [llvm_v4f32_ty, // vdata(VGPR)
+ llvm_anyint_ty, // vaddr(VGPR)
+ llvm_v8i32_ty, // rsrc(SGPR)
+ llvm_i32_ty, // dmask(imm)
+ llvm_i1_ty, // r128(imm)
+ llvm_i1_ty, // da(imm)
+ llvm_i1_ty, // glc(imm)
+ llvm_i1_ty], // slc(imm)
+ []>;
+
+def int_amdgcn_image_store : AMDGPUImageStore;
+def int_amdgcn_image_store_mip : AMDGPUImageStore;
+
def int_amdgcn_read_workdim : AMDGPUReadPreloadRegisterIntrinsic <
"__builtin_amdgcn_read_workdim">;
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 818956b1e06..6306decee5c 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -2766,12 +2766,13 @@ SDNode *SITargetLowering::PostISelFolding(MachineSDNode *Node,
SelectionDAG &DAG) const {
const SIInstrInfo *TII =
static_cast<const SIInstrInfo *>(Subtarget->getInstrInfo());
+ unsigned Opcode = Node->getMachineOpcode();
- if (TII->isMIMG(Node->getMachineOpcode()))
+ if (TII->isMIMG(Opcode) && !TII->get(Opcode).mayStore())
adjustWritemask(Node, DAG);
- if (Node->getMachineOpcode() == AMDGPU::INSERT_SUBREG ||
- Node->getMachineOpcode() == AMDGPU::REG_SEQUENCE) {
+ if (Opcode == AMDGPU::INSERT_SUBREG ||
+ Opcode == AMDGPU::REG_SEQUENCE) {
legalizeTargetIndependentNode(Node, DAG);
return Node;
}
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.td b/llvm/lib/Target/AMDGPU/SIInstrInfo.td
index aad98429bce..55006df69f8 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.td
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.td
@@ -2905,12 +2905,12 @@ class MIMG_Helper <bits<7> op, dag outs, dag ins, string asm,
class MIMG_NoSampler_Helper <bits<7> op, string asm,
RegisterClass dst_rc,
- RegisterClass src_rc,
+ RegisterClass addr_rc,
string dns=""> : MIMG_Helper <
op,
(outs dst_rc:$vdata),
(ins i32imm:$dmask, i1imm:$unorm, i1imm:$glc, i1imm:$da, i1imm:$r128,
- i1imm:$tfe, i1imm:$lwe, i1imm:$slc, src_rc:$vaddr,
+ i1imm:$tfe, i1imm:$lwe, i1imm:$slc, addr_rc:$vaddr,
SReg_256:$srsrc),
asm#" $vdata, $dmask, $unorm, $glc, $da, $r128,"
#" $tfe, $lwe, $slc, $vaddr, $srsrc",
@@ -2937,6 +2937,41 @@ multiclass MIMG_NoSampler <bits<7> op, string asm> {
defm _V4 : MIMG_NoSampler_Src_Helper <op, asm, VReg_128, 4>;
}
+class MIMG_Store_Helper <bits<7> op, string asm,
+ RegisterClass data_rc,
+ RegisterClass addr_rc> : MIMG_Helper <
+ op,
+ (outs),
+ (ins data_rc:$vdata, i32imm:$dmask, i1imm:$unorm, i1imm:$glc, i1imm:$da, i1imm:$r128,
+ i1imm:$tfe, i1imm:$lwe, i1imm:$slc, addr_rc:$vaddr,
+ SReg_256:$srsrc),
+ asm#" $vdata, $dmask, $unorm, $glc, $da, $r128,"
+ #" $tfe, $lwe, $slc, $vaddr, $srsrc"> {
+ let ssamp = 0;
+ let mayLoad = 1; // TableGen requires this for matching with the intrinsics
+ let mayStore = 1;
+ let hasSideEffects = 1;
+ let hasPostISelHook = 0;
+}
+
+multiclass MIMG_Store_Addr_Helper <bits<7> op, string asm,
+ RegisterClass data_rc,
+ int channels> {
+ def _V1 : MIMG_Store_Helper <op, asm, data_rc, VGPR_32>,
+ MIMG_Mask<asm#"_V1", channels>;
+ def _V2 : MIMG_Store_Helper <op, asm, data_rc, VReg_64>,
+ MIMG_Mask<asm#"_V2", channels>;
+ def _V4 : MIMG_Store_Helper <op, asm, data_rc, VReg_128>,
+ MIMG_Mask<asm#"_V4", channels>;
+}
+
+multiclass MIMG_Store <bits<7> op, string asm> {
+ defm _V1 : MIMG_Store_Addr_Helper <op, asm, VGPR_32, 1>;
+ defm _V2 : MIMG_Store_Addr_Helper <op, asm, VReg_64, 2>;
+ defm _V3 : MIMG_Store_Addr_Helper <op, asm, VReg_96, 3>;
+ defm _V4 : MIMG_Store_Addr_Helper <op, asm, VReg_128, 4>;
+}
+
class MIMG_Sampler_Helper <bits<7> op, string asm,
RegisterClass dst_rc,
RegisterClass src_rc,
diff --git a/llvm/lib/Target/AMDGPU/SIInstructions.td b/llvm/lib/Target/AMDGPU/SIInstructions.td
index 627f66a43d6..a3af5ee5d33 100644
--- a/llvm/lib/Target/AMDGPU/SIInstructions.td
+++ b/llvm/lib/Target/AMDGPU/SIInstructions.td
@@ -1063,8 +1063,8 @@ defm IMAGE_LOAD_MIP : MIMG_NoSampler <0x00000001, "image_load_mip">;
//def IMAGE_LOAD_PCK_SGN : MIMG_NoPattern_ <"image_load_pck_sgn", 0x00000003>;
//def IMAGE_LOAD_MIP_PCK : MIMG_NoPattern_ <"image_load_mip_pck", 0x00000004>;
//def IMAGE_LOAD_MIP_PCK_SGN : MIMG_NoPattern_ <"image_load_mip_pck_sgn", 0x00000005>;
-//def IMAGE_STORE : MIMG_NoPattern_ <"image_store", 0x00000008>;
-//def IMAGE_STORE_MIP : MIMG_NoPattern_ <"image_store_mip", 0x00000009>;
+defm IMAGE_STORE : MIMG_Store <0x00000008, "image_store">;
+defm IMAGE_STORE_MIP : MIMG_Store <0x00000009, "image_store_mip">;
//def IMAGE_STORE_PCK : MIMG_NoPattern_ <"image_store_pck", 0x0000000a>;
//def IMAGE_STORE_MIP_PCK : MIMG_NoPattern_ <"image_store_mip_pck", 0x0000000b>;
defm IMAGE_GET_RESINFO : MIMG_NoSampler <0x0000000e, "image_get_resinfo">;
@@ -2230,8 +2230,8 @@ multiclass SampleRawPatterns<SDPatternOperator name, string opcode> {
// Image only
class ImagePattern<SDPatternOperator name, MIMG opcode, ValueType vt> : Pat <
- (name vt:$addr, v8i32:$rsrc, i32:$dmask, i32:$unorm,
- i32:$r128, i32:$da, i32:$glc, i32:$slc, i32:$tfe, i32:$lwe),
+ (name vt:$addr, v8i32:$rsrc, imm:$dmask, imm:$unorm,
+ imm:$r128, imm:$da, imm:$glc, imm:$slc, imm:$tfe, imm:$lwe),
(opcode (as_i32imm $dmask), (as_i1imm $unorm), (as_i1imm $glc), (as_i1imm $da),
(as_i1imm $r128), (as_i1imm $tfe), (as_i1imm $lwe), (as_i1imm $slc),
$addr, $rsrc)
@@ -2243,6 +2243,32 @@ multiclass ImagePatterns<SDPatternOperator name, string opcode> {
def : ImagePattern<name, !cast<MIMG>(opcode # _V4_V4), v4i32>;
}
+class ImageLoadPattern<SDPatternOperator name, MIMG opcode, ValueType vt> : Pat <
+ (name vt:$addr, v8i32:$rsrc, imm:$dmask, imm:$r128, imm:$da, imm:$glc,
+ imm:$slc),
+ (opcode (as_i32imm $dmask), 1, (as_i1imm $glc), (as_i1imm $da),
+ (as_i1imm $r128), 0, 0, (as_i1imm $slc), $addr, $rsrc)
+>;
+
+multiclass ImageLoadPatterns<SDPatternOperator name, string opcode> {
+ def : ImageLoadPattern<name, !cast<MIMG>(opcode # _V4_V1), i32>;
+ def : ImageLoadPattern<name, !cast<MIMG>(opcode # _V4_V2), v2i32>;
+ def : ImageLoadPattern<name, !cast<MIMG>(opcode # _V4_V4), v4i32>;
+}
+
+class ImageStorePattern<SDPatternOperator name, MIMG opcode, ValueType vt> : Pat <
+ (name v4f32:$data, vt:$addr, v8i32:$rsrc, i32:$dmask, imm:$r128, imm:$da,
+ imm:$glc, imm:$slc),
+ (opcode $data, (as_i32imm $dmask), 1, (as_i1imm $glc), (as_i1imm $da),
+ (as_i1imm $r128), 0, 0, (as_i1imm $slc), $addr, $rsrc)
+>;
+
+multiclass ImageStorePatterns<SDPatternOperator name, string opcode> {
+ def : ImageStorePattern<name, !cast<MIMG>(opcode # _V4_V1), i32>;
+ def : ImageStorePattern<name, !cast<MIMG>(opcode # _V4_V2), v2i32>;
+ def : ImageStorePattern<name, !cast<MIMG>(opcode # _V4_V4), v4i32>;
+}
+
// Basic sample
defm : SampleRawPatterns<int_SI_image_sample, "IMAGE_SAMPLE">;
defm : SampleRawPatterns<int_SI_image_sample_cl, "IMAGE_SAMPLE_CL">;
@@ -2339,6 +2365,10 @@ def : SampleRawPattern<int_SI_getlod, IMAGE_GET_LOD_V4_V4, v4i32>;
def : ImagePattern<int_SI_getresinfo, IMAGE_GET_RESINFO_V4_V1, i32>;
defm : ImagePatterns<int_SI_image_load, "IMAGE_LOAD">;
defm : ImagePatterns<int_SI_image_load_mip, "IMAGE_LOAD_MIP">;
+defm : ImageLoadPatterns<int_amdgcn_image_load, "IMAGE_LOAD">;
+defm : ImageLoadPatterns<int_amdgcn_image_load_mip, "IMAGE_LOAD_MIP">;
+defm : ImageStorePatterns<int_amdgcn_image_store, "IMAGE_STORE">;
+defm : ImageStorePatterns<int_amdgcn_image_store_mip, "IMAGE_STORE_MIP">;
/* SIsample for simple 1D texture lookup */
def : Pat <
@@ -2420,27 +2450,6 @@ defm : SamplePatterns<IMAGE_SAMPLE_V4_V16, IMAGE_SAMPLE_C_V4_V16,
IMAGE_SAMPLE_D_V4_V16, IMAGE_SAMPLE_C_D_V4_V16,
v16i32>;
-/* int_SI_imageload for texture fetches consuming varying address parameters */
-class ImageLoadPattern<Intrinsic name, MIMG opcode, ValueType addr_type> : Pat <
- (name addr_type:$addr, v8i32:$rsrc, imm),
- (opcode 0xf, 0, 0, 0, 0, 0, 0, 0, $addr, $rsrc)
->;
-
-class ImageLoadArrayPattern<Intrinsic name, MIMG opcode, ValueType addr_type> : Pat <
- (name addr_type:$addr, v8i32:$rsrc, TEX_ARRAY),
- (opcode 0xf, 0, 0, 1, 0, 0, 0, 0, $addr, $rsrc)
->;
-
-class ImageLoadMSAAPattern<Intrinsic name, MIMG opcode, ValueType addr_type> : Pat <
- (name addr_type:$addr, v8i32:$rsrc, TEX_MSAA),
- (opcode 0xf, 0, 0, 0, 0, 0, 0, 0, $addr, $rsrc)
->;
-
-class ImageLoadArrayMSAAPattern<Intrinsic name, MIMG opcode, ValueType addr_type> : Pat <
- (name addr_type:$addr, v8i32:$rsrc, TEX_ARRAY_MSAA),
- (opcode 0xf, 0, 0, 1, 0, 0, 0, 0, $addr, $rsrc)
->;
-
/********** ============================================ **********/
/********** Extraction, Insertion, Building and Casting **********/
/********** ============================================ **********/
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.image.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.image.ll
new file mode 100644
index 00000000000..23c415b8d1d
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.image.ll
@@ -0,0 +1,111 @@
+;RUN: llc < %s -march=amdgcn -mcpu=verde -verify-machineinstrs | FileCheck %s
+;RUN: llc < %s -march=amdgcn -mcpu=tonga -verify-machineinstrs | FileCheck %s
+
+;CHECK-LABEL: {{^}}image_load_v4i32:
+;CHECK: image_load v[0:3], 15, -1, 0, 0, 0, 0, 0, 0, v[0:3], s[0:7]
+;CHECK: s_waitcnt vmcnt(0)
+define <4 x float> @image_load_v4i32(<8 x i32> inreg %rsrc, <4 x i32> %c) #0 {
+main_body:
+ %tex = call <4 x float> @llvm.amdgcn.image.load.v4i32(<4 x i32> %c, <8 x i32> %rsrc, i32 15, i1 0, i1 0, i1 0, i1 0)
+ ret <4 x float> %tex
+}
+
+;CHECK-LABEL: {{^}}image_load_v2i32:
+;CHECK: image_load v[0:3], 15, -1, 0, 0, 0, 0, 0, 0, v[0:1], s[0:7]
+;CHECK: s_waitcnt vmcnt(0)
+define <4 x float> @image_load_v2i32(<8 x i32> inreg %rsrc, <2 x i32> %c) #0 {
+main_body:
+ %tex = call <4 x float> @llvm.amdgcn.image.load.v2i32(<2 x i32> %c, <8 x i32> %rsrc, i32 15, i1 0, i1 0, i1 0, i1 0)
+ ret <4 x float> %tex
+}
+
+;CHECK-LABEL: {{^}}image_load_i32:
+;CHECK: image_load v[0:3], 15, -1, 0, 0, 0, 0, 0, 0, v0, s[0:7]
+;CHECK: s_waitcnt vmcnt(0)
+define <4 x float> @image_load_i32(<8 x i32> inreg %rsrc, i32 %c) #0 {
+main_body:
+ %tex = call <4 x float> @llvm.amdgcn.image.load.i32(i32 %c, <8 x i32> %rsrc, i32 15, i1 0, i1 0, i1 0, i1 0)
+ ret <4 x float> %tex
+}
+
+;CHECK-LABEL: {{^}}image_load_mip:
+;CHECK: image_load_mip v[0:3], 15, -1, 0, 0, 0, 0, 0, 0, v[0:3], s[0:7]
+;CHECK: s_waitcnt vmcnt(0)
+define <4 x float> @image_load_mip(<8 x i32> inreg %rsrc, <4 x i32> %c) #0 {
+main_body:
+ %tex = call <4 x float> @llvm.amdgcn.image.load.mip.v4i32(<4 x i32> %c, <8 x i32> %rsrc, i32 15, i1 0, i1 0, i1 0, i1 0)
+ ret <4 x float> %tex
+}
+
+;CHECK-LABEL: {{^}}image_load_1:
+;CHECK: image_load v0, 1, -1, 0, 0, 0, 0, 0, 0, v[0:3], s[0:7]
+;CHECK: s_waitcnt vmcnt(0)
+define float @image_load_1(<8 x i32> inreg %rsrc, <4 x i32> %c) #0 {
+main_body:
+ %tex = call <4 x float> @llvm.amdgcn.image.load.v4i32(<4 x i32> %c, <8 x i32> %rsrc, i32 15, i1 0, i1 0, i1 0, i1 0)
+ %elt = extractelement <4 x float> %tex, i32 0
+; Only first component used, test that dmask etc. is changed accordingly
+ ret float %elt
+}
+
+;CHECK-LABEL: {{^}}image_store_v4i32:
+;CHECK: image_store v[0:3], 15, -1, 0, 0, 0, 0, 0, 0, v[4:7], s[0:7]
+define void @image_store_v4i32(<8 x i32> inreg %rsrc, <4 x float> %data, <4 x i32> %coords) #0 {
+main_body:
+ call void @llvm.amdgcn.image.store.v4i32(<4 x float> %data, <4 x i32> %coords, <8 x i32> %rsrc, i32 15, i1 0, i1 0, i1 0, i1 0)
+ ret void
+}
+
+;CHECK-LABEL: {{^}}image_store_v2i32:
+;CHECK: image_store v[0:3], 15, -1, 0, 0, 0, 0, 0, 0, v[4:5], s[0:7]
+define void @image_store_v2i32(<8 x i32> inreg %rsrc, <4 x float> %data, <2 x i32> %coords) #0 {
+main_body:
+ call void @llvm.amdgcn.image.store.v2i32(<4 x float> %data, <2 x i32> %coords, <8 x i32> %rsrc, i32 15, i1 0, i1 0, i1 0, i1 0)
+ ret void
+}
+
+;CHECK-LABEL: {{^}}image_store_i32:
+;CHECK: image_store v[0:3], 15, -1, 0, 0, 0, 0, 0, 0, v4, s[0:7]
+define void @image_store_i32(<8 x i32> inreg %rsrc, <4 x float> %data, i32 %coords) #0 {
+main_body:
+ call void @llvm.amdgcn.image.store.i32(<4 x float> %data, i32 %coords, <8 x i32> %rsrc, i32 15, i1 0, i1 0, i1 0, i1 0)
+ ret void
+}
+
+;CHECK-LABEL: {{^}}image_store_mip:
+;CHECK: image_store_mip v[0:3], 15, -1, 0, 0, 0, 0, 0, 0, v[4:7], s[0:7]
+define void @image_store_mip(<8 x i32> inreg %rsrc, <4 x float> %data, <4 x i32> %coords) #0 {
+main_body:
+ call void @llvm.amdgcn.image.store.mip.v4i32(<4 x float> %data, <4 x i32> %coords, <8 x i32> %rsrc, i32 15, i1 0, i1 0, i1 0, i1 0)
+ ret void
+}
+
+; Ideally, the register allocator would avoid the wait here
+;
+;CHECK-LABEL: {{^}}image_store_wait:
+;CHECK: image_store v[0:3], 15, -1, 0, 0, 0, 0, 0, 0, v4, s[0:7]
+;CHECK: s_waitcnt vmcnt(0) expcnt(0)
+;CHECK: image_load v[0:3], 15, -1, 0, 0, 0, 0, 0, 0, v4, s[8:15]
+;CHECK: s_waitcnt vmcnt(0)
+;CHECK: image_store v[0:3], 15, -1, 0, 0, 0, 0, 0, 0, v4, s[16:23]
+define void @image_store_wait(<8 x i32> inreg, <8 x i32> inreg, <8 x i32> inreg, <4 x float>, i32) #0 {
+main_body:
+ call void @llvm.amdgcn.image.store.i32(<4 x float> %3, i32 %4, <8 x i32> %0, i32 15, i1 0, i1 0, i1 0, i1 0)
+ %data = call <4 x float> @llvm.amdgcn.image.load.i32(i32 %4, <8 x i32> %1, i32 15, i1 0, i1 0, i1 0, i1 0)
+ call void @llvm.amdgcn.image.store.i32(<4 x float> %data, i32 %4, <8 x i32> %2, i32 15, i1 0, i1 0, i1 0, i1 0)
+ ret void
+}
+
+declare void @llvm.amdgcn.image.store.i32(<4 x float>, i32, <8 x i32>, i32, i1, i1, i1, i1) #1
+declare void @llvm.amdgcn.image.store.v2i32(<4 x float>, <2 x i32>, <8 x i32>, i32, i1, i1, i1, i1) #1
+declare void @llvm.amdgcn.image.store.v4i32(<4 x float>, <4 x i32>, <8 x i32>, i32, i1, i1, i1, i1) #1
+declare void @llvm.amdgcn.image.store.mip.v4i32(<4 x float>, <4 x i32>, <8 x i32>, i32, i1, i1, i1, i1) #1
+
+declare <4 x float> @llvm.amdgcn.image.load.i32(i32, <8 x i32>, i32, i1, i1, i1, i1) #2
+declare <4 x float> @llvm.amdgcn.image.load.v2i32(<2 x i32>, <8 x i32>, i32, i1, i1, i1, i1) #2
+declare <4 x float> @llvm.amdgcn.image.load.v4i32(<4 x i32>, <8 x i32>, i32, i1, i1, i1, i1) #2
+declare <4 x float> @llvm.amdgcn.image.load.mip.v4i32(<4 x i32>, <8 x i32>, i32, i1, i1, i1, i1) #2
+
+attributes #0 = { "ShaderType"="0" }
+attributes #1 = { nounwind }
+attributes #2 = { nounwind readonly }
OpenPOWER on IntegriCloud