summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.image.dim.ll
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test/CodeGen/AMDGPU/llvm.amdgcn.image.dim.ll')
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.image.dim.ll438
1 files changed, 434 insertions, 4 deletions
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.image.dim.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.image.dim.ll
index bf93ffa937a..b297acab36c 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.image.dim.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.image.dim.ll
@@ -1,6 +1,7 @@
-; RUN: llc -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,SI %s
-; RUN: llc -march=amdgcn -mcpu=fiji -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,VI %s
-; RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN %s
+; RUN: llc -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,SI,SIVI,PRT %s
+; RUN: llc -march=amdgcn -mcpu=fiji -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,VI,SIVI,PRT %s
+; RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GFX900,PRT %s
+; RUN: llc -march=amdgcn -mcpu=gfx900 -mattr=-enable-prt-strict-null -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GFX900,NOPRT %s
; GCN-LABEL: {{^}}load_1d:
; GCN: image_load v[0:3], v0, s[0:7] dmask:0xf unorm{{$}}
@@ -10,6 +11,52 @@ main_body:
ret <4 x float> %v
}
+; GCN-LABEL: {{^}}load_1d_tfe:
+; PRT: v_mov_b32_e32 v0, 0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v0
+; NOPRT: v_mov_b32_e32 v4, 0
+; NOPRT-NOT: v_mov_b32_e32 v0
+; NOPRT-NOT: v_mov_b32_e32 v1
+; NOPRT-NOT: v_mov_b32_e32 v2
+; NOPRT-NOT: v_mov_b32_e32 v3
+; GCN: image_load v[0:7], v{{[0-9]+}}, s[0:7] dmask:0xf unorm tfe{{$}}
+; SIVI: buffer_store_dword v4, off, s[8:11], 0
+; GFX900: global_store_dword v[{{[0-9]+:[0-9]+}}], v4
+define amdgpu_ps <4 x float> @load_1d_tfe(<8 x i32> inreg %rsrc, i32 addrspace(1)* inreg %out, i32 %s) {
+main_body:
+ %v = call {<4 x float>,i32} @llvm.amdgcn.image.load.1d.v4f32i32.i32(i32 15, i32 %s, <8 x i32> %rsrc, i32 1, i32 0)
+ %v.vec = extractvalue {<4 x float>, i32} %v, 0
+ %v.err = extractvalue {<4 x float>, i32} %v, 1
+ store i32 %v.err, i32 addrspace(1)* %out, align 4
+ ret <4 x float> %v.vec
+}
+
+; GCN-LABEL: {{^}}load_1d_lwe:
+; PRT: v_mov_b32_e32 v0, 0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v0
+; NOPRT: v_mov_b32_e32 v4, 0
+; NOPRT-NOT: v_mov_b32_e32 v0
+; NOPRT-NOT: v_mov_b32_e32 v1
+; NOPRT-NOT: v_mov_b32_e32 v2
+; NOPRT-NOT: v_mov_b32_e32 v3
+; GCN: image_load v[0:7], v{{[0-9]+}}, s[0:7] dmask:0xf unorm lwe{{$}}
+; SIVI: buffer_store_dword v4, off, s[8:11], 0
+; GFX900: global_store_dword v[{{[0-9]+:[0-9]+}}], v4
+define amdgpu_ps <4 x float> @load_1d_lwe(<8 x i32> inreg %rsrc, i32 addrspace(1)* inreg %out, i32 %s) {
+main_body:
+ %v = call {<4 x float>, i32} @llvm.amdgcn.image.load.1d.v4f32i32.i32(i32 15, i32 %s, <8 x i32> %rsrc, i32 2, i32 0)
+ %v.vec = extractvalue {<4 x float>, i32} %v, 0
+ %v.err = extractvalue {<4 x float>, i32} %v, 1
+ store i32 %v.err, i32 addrspace(1)* %out, align 4
+ ret <4 x float> %v.vec
+}
+
; GCN-LABEL: {{^}}load_2d:
; GCN: image_load v[0:3], v[0:1], s[0:7] dmask:0xf unorm{{$}}
define amdgpu_ps <4 x float> @load_2d(<8 x i32> inreg %rsrc, i32 %s, i32 %t) {
@@ -18,6 +65,29 @@ main_body:
ret <4 x float> %v
}
+; GCN-LABEL: {{^}}load_2d_tfe:
+; PRT: v_mov_b32_e32 v0, 0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v0
+; NOPRT: v_mov_b32_e32 v4, 0
+; NOPRT-NOT: v_mov_b32_e32 v0
+; NOPRT-NOT: v_mov_b32_e32 v1
+; NOPRT-NOT: v_mov_b32_e32 v2
+; NOPRT-NOT: v_mov_b32_e32 v3
+; GCN: image_load v[0:7], v[{{[0-9]+:[0-9]+}}], s[0:7] dmask:0xf unorm tfe{{$}}
+; SIVI: buffer_store_dword v4, off, s[8:11], 0
+; GFX900: global_store_dword v[{{[0-9]+:[0-9]+}}], v4
+define amdgpu_ps <4 x float> @load_2d_tfe(<8 x i32> inreg %rsrc, i32 addrspace(1)* inreg %out, i32 %s, i32 %t) {
+main_body:
+ %v = call {<4 x float>,i32} @llvm.amdgcn.image.load.2d.v4f32i32.i32(i32 15, i32 %s, i32 %t, <8 x i32> %rsrc, i32 1, i32 0)
+ %v.vec = extractvalue {<4 x float>, i32} %v, 0
+ %v.err = extractvalue {<4 x float>, i32} %v, 1
+ store i32 %v.err, i32 addrspace(1)* %out, align 4
+ ret <4 x float> %v.vec
+}
+
; GCN-LABEL: {{^}}load_3d:
; GCN: image_load v[0:3], v[0:3], s[0:7] dmask:0xf unorm{{$}}
define amdgpu_ps <4 x float> @load_3d(<8 x i32> inreg %rsrc, i32 %s, i32 %t, i32 %r) {
@@ -26,6 +96,29 @@ main_body:
ret <4 x float> %v
}
+; GCN-LABEL: {{^}}load_3d_tfe_lwe:
+; PRT: v_mov_b32_e32 v0, 0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v0
+; NOPRT: v_mov_b32_e32 v4, 0
+; NOPRT-NOT: v_mov_b32_e32 v0
+; NOPRT-NOT: v_mov_b32_e32 v1
+; NOPRT-NOT: v_mov_b32_e32 v2
+; NOPRT-NOT: v_mov_b32_e32 v3
+; GCN: image_load v[0:7], v[{{[0-9]+:[0-9]+}}], s[0:7] dmask:0xf unorm tfe lwe{{$}}
+; SIVI: buffer_store_dword v4, off, s[8:11], 0
+; GFX900: global_store_dword v[{{[0-9]+:[0-9]+}}], v4
+define amdgpu_ps <4 x float> @load_3d_tfe_lwe(<8 x i32> inreg %rsrc, i32 addrspace(1)* inreg %out, i32 %s, i32 %t, i32 %r) {
+main_body:
+ %v = call {<4 x float>,i32} @llvm.amdgcn.image.load.3d.v4f32i32.i32(i32 15, i32 %s, i32 %t, i32 %r, <8 x i32> %rsrc, i32 3, i32 0)
+ %v.vec = extractvalue {<4 x float>, i32} %v, 0
+ %v.err = extractvalue {<4 x float>, i32} %v, 1
+ store i32 %v.err, i32 addrspace(1)* %out, align 4
+ ret <4 x float> %v.vec
+}
+
; GCN-LABEL: {{^}}load_cube:
; GCN: image_load v[0:3], v[0:3], s[0:7] dmask:0xf unorm da{{$}}
define amdgpu_ps <4 x float> @load_cube(<8 x i32> inreg %rsrc, i32 %s, i32 %t, i32 %slice) {
@@ -34,6 +127,29 @@ main_body:
ret <4 x float> %v
}
+; GCN-LABEL: {{^}}load_cube_lwe:
+; PRT: v_mov_b32_e32 v0, 0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v0
+; NOPRT: v_mov_b32_e32 v4, 0
+; NOPRT-NOT: v_mov_b32_e32 v0
+; NOPRT-NOT: v_mov_b32_e32 v1
+; NOPRT-NOT: v_mov_b32_e32 v2
+; NOPRT-NOT: v_mov_b32_e32 v3
+; GCN: image_load v[0:7], v[{{[0-9]+:[0-9]+}}], s[0:7] dmask:0xf unorm lwe da{{$}}
+; SIVI: buffer_store_dword v4, off, s[8:11], 0
+; GFX900: global_store_dword v[{{[0-9]+:[0-9]+}}], v4
+define amdgpu_ps <4 x float> @load_cube_lwe(<8 x i32> inreg %rsrc, i32 addrspace(1)* inreg %out, i32 %s, i32 %t, i32 %slice) {
+main_body:
+ %v = call {<4 x float>,i32} @llvm.amdgcn.image.load.cube.v4f32i32.i32(i32 15, i32 %s, i32 %t, i32 %slice, <8 x i32> %rsrc, i32 2, i32 0)
+ %v.vec = extractvalue {<4 x float>, i32} %v, 0
+ %v.err = extractvalue {<4 x float>, i32} %v, 1
+ store i32 %v.err, i32 addrspace(1)* %out, align 4
+ ret <4 x float> %v.vec
+}
+
; GCN-LABEL: {{^}}load_1darray:
; GCN: image_load v[0:3], v[0:1], s[0:7] dmask:0xf unorm da{{$}}
define amdgpu_ps <4 x float> @load_1darray(<8 x i32> inreg %rsrc, i32 %s, i32 %slice) {
@@ -42,6 +158,29 @@ main_body:
ret <4 x float> %v
}
+; GCN-LABEL: {{^}}load_1darray_tfe:
+; PRT: v_mov_b32_e32 v0, 0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v0
+; NOPRT: v_mov_b32_e32 v4, 0
+; NOPRT-NOT: v_mov_b32_e32 v0
+; NOPRT-NOT: v_mov_b32_e32 v1
+; NOPRT-NOT: v_mov_b32_e32 v2
+; NOPRT-NOT: v_mov_b32_e32 v3
+; GCN: image_load v[0:7], v[{{[0-9]+:[0-9]+}}], s[0:7] dmask:0xf unorm tfe da{{$}}
+; SIVI: buffer_store_dword v4, off, s[8:11], 0
+; GFX900: global_store_dword v[{{[0-9]+:[0-9]+}}], v4
+define amdgpu_ps <4 x float> @load_1darray_tfe(<8 x i32> inreg %rsrc, i32 addrspace(1)* inreg %out, i32 %s, i32 %slice) {
+main_body:
+ %v = call {<4 x float>,i32} @llvm.amdgcn.image.load.1darray.v4f32i32.i32(i32 15, i32 %s, i32 %slice, <8 x i32> %rsrc, i32 1, i32 0)
+ %v.vec = extractvalue {<4 x float>, i32} %v, 0
+ %v.err = extractvalue {<4 x float>, i32} %v, 1
+ store i32 %v.err, i32 addrspace(1)* %out, align 4
+ ret <4 x float> %v.vec
+}
+
; GCN-LABEL: {{^}}load_2darray:
; GCN: image_load v[0:3], v[0:3], s[0:7] dmask:0xf unorm da{{$}}
define amdgpu_ps <4 x float> @load_2darray(<8 x i32> inreg %rsrc, i32 %s, i32 %t, i32 %slice) {
@@ -50,6 +189,29 @@ main_body:
ret <4 x float> %v
}
+; GCN-LABEL: {{^}}load_2darray_lwe:
+; PRT: v_mov_b32_e32 v0, 0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v0
+; NOPRT: v_mov_b32_e32 v4, 0
+; NOPRT-NOT: v_mov_b32_e32 v0
+; NOPRT-NOT: v_mov_b32_e32 v1
+; NOPRT-NOT: v_mov_b32_e32 v2
+; NOPRT-NOT: v_mov_b32_e32 v3
+; GCN: image_load v[0:7], v[{{[0-9]+:[0-9]+}}], s[0:7] dmask:0xf unorm lwe da{{$}}
+; SIVI: buffer_store_dword v4, off, s[8:11], 0
+; GFX900: global_store_dword v[{{[0-9]+:[0-9]+}}], v4
+define amdgpu_ps <4 x float> @load_2darray_lwe(<8 x i32> inreg %rsrc, i32 addrspace(1)* inreg %out, i32 %s, i32 %t, i32 %slice) {
+main_body:
+ %v = call {<4 x float>,i32} @llvm.amdgcn.image.load.2darray.v4f32i32.i32(i32 15, i32 %s, i32 %t, i32 %slice, <8 x i32> %rsrc, i32 2, i32 0)
+ %v.vec = extractvalue {<4 x float>, i32} %v, 0
+ %v.err = extractvalue {<4 x float>, i32} %v, 1
+ store i32 %v.err, i32 addrspace(1)* %out, align 4
+ ret <4 x float> %v.vec
+}
+
; GCN-LABEL: {{^}}load_2dmsaa:
; GCN: image_load v[0:3], v[0:3], s[0:7] dmask:0xf unorm{{$}}
define amdgpu_ps <4 x float> @load_2dmsaa(<8 x i32> inreg %rsrc, i32 %s, i32 %t, i32 %fragid) {
@@ -58,6 +220,29 @@ main_body:
ret <4 x float> %v
}
+; GCN-LABEL: {{^}}load_2dmsaa_both:
+; PRT: v_mov_b32_e32 v0, 0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v0
+; NOPRT: v_mov_b32_e32 v4, 0
+; NOPRT-NOT: v_mov_b32_e32 v0
+; NOPRT-NOT: v_mov_b32_e32 v1
+; NOPRT-NOT: v_mov_b32_e32 v2
+; NOPRT-NOT: v_mov_b32_e32 v3
+; GCN: image_load v[0:7], v[{{[0-9]+:[0-9]+}}], s[0:7] dmask:0xf unorm tfe lwe{{$}}
+; SIVI: buffer_store_dword v4, off, s[8:11], 0
+; GFX900: global_store_dword v[{{[0-9]+:[0-9]+}}], v4
+define amdgpu_ps <4 x float> @load_2dmsaa_both(<8 x i32> inreg %rsrc, i32 addrspace(1)* inreg %out, i32 %s, i32 %t, i32 %fragid) {
+main_body:
+ %v = call {<4 x float>,i32} @llvm.amdgcn.image.load.2dmsaa.v4f32i32.i32(i32 15, i32 %s, i32 %t, i32 %fragid, <8 x i32> %rsrc, i32 3, i32 0)
+ %v.vec = extractvalue {<4 x float>, i32} %v, 0
+ %v.err = extractvalue {<4 x float>, i32} %v, 1
+ store i32 %v.err, i32 addrspace(1)* %out, align 4
+ ret <4 x float> %v.vec
+}
+
; GCN-LABEL: {{^}}load_2darraymsaa:
; GCN: image_load v[0:3], v[0:3], s[0:7] dmask:0xf unorm da{{$}}
define amdgpu_ps <4 x float> @load_2darraymsaa(<8 x i32> inreg %rsrc, i32 %s, i32 %t, i32 %slice, i32 %fragid) {
@@ -66,6 +251,29 @@ main_body:
ret <4 x float> %v
}
+; GCN-LABEL: {{^}}load_2darraymsaa_tfe:
+; PRT: v_mov_b32_e32 v0, 0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v0
+; NOPRT: v_mov_b32_e32 v4, 0
+; NOPRT-NOT: v_mov_b32_e32 v0
+; NOPRT-NOT: v_mov_b32_e32 v1
+; NOPRT-NOT: v_mov_b32_e32 v2
+; NOPRT-NOT: v_mov_b32_e32 v3
+; GCN: image_load v[0:7], v[{{[0-9]+:[0-9]+}}], s[0:7] dmask:0xf unorm tfe da{{$}}
+; SIVI: buffer_store_dword v4, off, s[8:11], 0
+; GFX900: global_store_dword v[{{[0-9]+:[0-9]+}}], v4
+define amdgpu_ps <4 x float> @load_2darraymsaa_tfe(<8 x i32> inreg %rsrc, i32 addrspace(1)* inreg %out, i32 %s, i32 %t, i32 %slice, i32 %fragid) {
+main_body:
+ %v = call {<4 x float>,i32} @llvm.amdgcn.image.load.2darraymsaa.v4f32i32.i32(i32 15, i32 %s, i32 %t, i32 %slice, i32 %fragid, <8 x i32> %rsrc, i32 1, i32 0)
+ %v.vec = extractvalue {<4 x float>, i32} %v, 0
+ %v.err = extractvalue {<4 x float>, i32} %v, 1
+ store i32 %v.err, i32 addrspace(1)* %out, align 4
+ ret <4 x float> %v.vec
+}
+
; GCN-LABEL: {{^}}load_mip_1d:
; GCN: image_load_mip v[0:3], v[0:1], s[0:7] dmask:0xf unorm{{$}}
define amdgpu_ps <4 x float> @load_mip_1d(<8 x i32> inreg %rsrc, i32 %s, i32 %mip) {
@@ -74,6 +282,29 @@ main_body:
ret <4 x float> %v
}
+; GCN-LABEL: {{^}}load_mip_1d_lwe:
+; PRT: v_mov_b32_e32 v0, 0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v0
+; NOPRT: v_mov_b32_e32 v4, 0
+; NOPRT-NOT: v_mov_b32_e32 v0
+; NOPRT-NOT: v_mov_b32_e32 v1
+; NOPRT-NOT: v_mov_b32_e32 v2
+; NOPRT-NOT: v_mov_b32_e32 v3
+; GCN: image_load_mip v[0:7], v[{{[0-9]+:[0-9]+}}], s[0:7] dmask:0xf unorm lwe{{$}}
+; SIVI: buffer_store_dword v4, off, s[8:11], 0
+; GFX900: global_store_dword v[{{[0-9]+:[0-9]+}}], v4
+define amdgpu_ps <4 x float> @load_mip_1d_lwe(<8 x i32> inreg %rsrc, i32 addrspace(1)* inreg %out, i32 %s, i32 %mip) {
+main_body:
+ %v = call {<4 x float>,i32} @llvm.amdgcn.image.load.mip.1d.v4f32i32.i32(i32 15, i32 %s, i32 %mip, <8 x i32> %rsrc, i32 2, i32 0)
+ %v.vec = extractvalue {<4 x float>, i32} %v, 0
+ %v.err = extractvalue {<4 x float>, i32} %v, 1
+ store i32 %v.err, i32 addrspace(1)* %out, align 4
+ ret <4 x float> %v.vec
+}
+
; GCN-LABEL: {{^}}load_mip_2d:
; GCN: image_load_mip v[0:3], v[0:3], s[0:7] dmask:0xf unorm{{$}}
define amdgpu_ps <4 x float> @load_mip_2d(<8 x i32> inreg %rsrc, i32 %s, i32 %t, i32 %mip) {
@@ -82,6 +313,191 @@ main_body:
ret <4 x float> %v
}
+; GCN-LABEL: {{^}}load_mip_2d_tfe:
+; PRT: v_mov_b32_e32 v0, 0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v0
+; NOPRT: v_mov_b32_e32 v4, 0
+; NOPRT-NOT: v_mov_b32_e32 v0
+; NOPRT-NOT: v_mov_b32_e32 v1
+; NOPRT-NOT: v_mov_b32_e32 v2
+; NOPRT-NOT: v_mov_b32_e32 v3
+; GCN: image_load_mip v[0:7], v[{{[0-9]+:[0-9]+}}], s[0:7] dmask:0xf unorm tfe{{$}}
+; SIVI: buffer_store_dword v4, off, s[8:11], 0
+; GFX900: global_store_dword v[{{[0-9]+:[0-9]+}}], v4
+define amdgpu_ps <4 x float> @load_mip_2d_tfe(<8 x i32> inreg %rsrc, i32 addrspace(1)* inreg %out, i32 %s, i32 %t, i32 %mip) {
+main_body:
+ %v = call {<4 x float>,i32} @llvm.amdgcn.image.load.mip.2d.v4f32i32.i32(i32 15, i32 %s, i32 %t, i32 %mip, <8 x i32> %rsrc, i32 1, i32 0)
+ %v.vec = extractvalue {<4 x float>, i32} %v, 0
+ %v.err = extractvalue {<4 x float>, i32} %v, 1
+ store i32 %v.err, i32 addrspace(1)* %out, align 4
+ ret <4 x float> %v.vec
+}
+
+; Make sure that error flag is returned even with dmask 0
+; GCN-LABEL: {{^}}load_1d_V2_tfe_dmask0:
+; GCN: v_mov_b32_e32 v1, 0
+; PRT-DAG: v_mov_b32_e32 v2, v1
+; PRT: image_load v[1:2], v0, s[0:7] dmask:0x1 unorm tfe{{$}}
+; NOPRT-NOT: v_mov_b32_e32 v1
+; NOPRT: image_load v[0:1], v0, s[0:7] dmask:0x1 unorm tfe{{$}}
+define amdgpu_ps float @load_1d_V2_tfe_dmask0(<8 x i32> inreg %rsrc, i32 %s) {
+main_body:
+ %v = call {<2 x float>,i32} @llvm.amdgcn.image.load.1d.v2f32i32.i32(i32 0, i32 %s, <8 x i32> %rsrc, i32 1, i32 0)
+ %v.err = extractvalue {<2 x float>, i32} %v, 1
+ %vv = bitcast i32 %v.err to float
+ ret float %vv
+}
+
+; GCN-LABEL: {{^}}load_1d_V1_tfe_dmask0:
+; GCN: v_mov_b32_e32 v1, 0
+; PRT-DAG: v_mov_b32_e32 v2, v1
+; PRT: image_load v[1:2], v0, s[0:7] dmask:0x1 unorm tfe{{$}}
+; NOPRT-NOT: v_mov_b32_e32 v1
+; NOPRT: image_load v[0:1], v0, s[0:7] dmask:0x1 unorm tfe{{$}}
+define amdgpu_ps float @load_1d_V1_tfe_dmask0(<8 x i32> inreg %rsrc, i32 %s) {
+main_body:
+ %v = call {float,i32} @llvm.amdgcn.image.load.1d.f32i32.i32(i32 0, i32 %s, <8 x i32> %rsrc, i32 1, i32 0)
+ %v.err = extractvalue {float, i32} %v, 1
+ %vv = bitcast i32 %v.err to float
+ ret float %vv
+}
+
+; GCN-LABEL: {{^}}load_mip_2d_tfe_dmask0:
+; GCN: v_mov_b32_e32 v3, 0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v3
+; PRT: image_load_mip v[3:4], v[{{[0-9]+:[0-9]+}}], s[0:7] dmask:0x1 unorm tfe{{$}}
+; NOPRT-NOT: v_mov_b32_e32 v2
+; NOPRT: image_load_mip v[2:3], v[{{[0-9]+:[0-9]+}}], s[0:7] dmask:0x1 unorm tfe{{$}}
+define amdgpu_ps float @load_mip_2d_tfe_dmask0(<8 x i32> inreg %rsrc, i32 %s, i32 %t, i32 %mip) {
+main_body:
+ %v = call {<4 x float>,i32} @llvm.amdgcn.image.load.mip.2d.v4f32i32.i32(i32 0, i32 %s, i32 %t, i32 %mip, <8 x i32> %rsrc, i32 1, i32 0)
+ %v.err = extractvalue {<4 x float>, i32} %v, 1
+ %vv = bitcast i32 %v.err to float
+ ret float %vv
+}
+
+; Do not make dmask 0 even if no result (other than tfe) is used.
+; GCN-LABEL: {{^}}load_mip_2d_tfe_nouse:
+; GCN: v_mov_b32_e32 v3, 0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v3
+; PRT: image_load_mip v[3:4], v[{{[0-9]+:[0-9]+}}], s[0:7] dmask:0x1 unorm tfe{{$}}
+; NOPRT-NOT: v_mov_b32_e32 v2
+; NOPRT: image_load_mip v[2:3], v[{{[0-9]+:[0-9]+}}], s[0:7] dmask:0x1 unorm tfe{{$}}
+define amdgpu_ps float @load_mip_2d_tfe_nouse(<8 x i32> inreg %rsrc, i32 %s, i32 %t, i32 %mip) {
+main_body:
+ %v = call {<4 x float>,i32} @llvm.amdgcn.image.load.mip.2d.v4f32i32.i32(i32 15, i32 %s, i32 %t, i32 %mip, <8 x i32> %rsrc, i32 1, i32 0)
+ %v.err = extractvalue {<4 x float>, i32} %v, 1
+ %vv = bitcast i32 %v.err to float
+ ret float %vv
+}
+
+; GCN-LABEL: {{^}}load_mip_2d_tfe_nouse_V2:
+; GCN: v_mov_b32_e32 v3, 0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v3
+; PRT: image_load_mip v[3:4], v[{{[0-9]+:[0-9]+}}], s[0:7] dmask:0x1 unorm tfe{{$}}
+; NOPRT-NOT: v_mov_b32_e32 v2
+; NOPRT: image_load_mip v[2:3], v[{{[0-9]+:[0-9]+}}], s[0:7] dmask:0x1 unorm tfe{{$}}
+define amdgpu_ps float @load_mip_2d_tfe_nouse_V2(<8 x i32> inreg %rsrc, i32 %s, i32 %t, i32 %mip) {
+main_body:
+ %v = call {<2 x float>,i32} @llvm.amdgcn.image.load.mip.2d.v2f32i32.i32(i32 6, i32 %s, i32 %t, i32 %mip, <8 x i32> %rsrc, i32 1, i32 0)
+ %v.err = extractvalue {<2 x float>, i32} %v, 1
+ %vv = bitcast i32 %v.err to float
+ ret float %vv
+}
+
+; GCN-LABEL: {{^}}load_mip_2d_tfe_nouse_V1:
+; GCN: v_mov_b32_e32 v3, 0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v3
+; PRT: image_load_mip v[3:4], v[{{[0-9]+:[0-9]+}}], s[0:7] dmask:0x2 unorm tfe{{$}}
+; NOPRT-NOT: v_mov_b32_e32 v2
+; NOPRT: image_load_mip v[2:3], v[{{[0-9]+:[0-9]+}}], s[0:7] dmask:0x2 unorm tfe{{$}}
+define amdgpu_ps float @load_mip_2d_tfe_nouse_V1(<8 x i32> inreg %rsrc, i32 %s, i32 %t, i32 %mip) {
+main_body:
+ %v = call {float, i32} @llvm.amdgcn.image.load.mip.2d.f32i32.i32(i32 2, i32 %s, i32 %t, i32 %mip, <8 x i32> %rsrc, i32 1, i32 0)
+ %v.err = extractvalue {float, i32} %v, 1
+ %vv = bitcast i32 %v.err to float
+ ret float %vv
+}
+
+; Check for dmask being materially smaller than return type
+; GCN-LABEL: {{^}}load_1d_tfe_V4_dmask3:
+; PRT: v_mov_b32_e32 v0, 0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v0
+; NOPRT: v_mov_b32_e32 v3, 0
+; NOPRT-NOT: v_mov_b32_e32 v0
+; NOPRT-NOT: v_mov_b32_e32 v1
+; NOPRT-NOT: v_mov_b32_e32 v2
+; GCN: image_load v[0:3], v{{[0-9]+}}, s[0:7] dmask:0x7 unorm tfe{{$}}
+; SIVI: buffer_store_dword v3, off, s[8:11], 0
+; GFX900: global_store_dword v[{{[0-9]+:[0-9]+}}], v3
+define amdgpu_ps <4 x float> @load_1d_tfe_V4_dmask3(<8 x i32> inreg %rsrc, i32 addrspace(1)* inreg %out, i32 %s) {
+main_body:
+ %v = call {<4 x float>,i32} @llvm.amdgcn.image.load.1d.v4f32i32.i32(i32 7, i32 %s, <8 x i32> %rsrc, i32 1, i32 0)
+ %v.vec = extractvalue {<4 x float>, i32} %v, 0
+ %v.err = extractvalue {<4 x float>, i32} %v, 1
+ store i32 %v.err, i32 addrspace(1)* %out, align 4
+ ret <4 x float> %v.vec
+}
+
+; GCN-LABEL: {{^}}load_1d_tfe_V4_dmask2:
+; PRT: v_mov_b32_e32 v0, 0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v0
+; NOPRT: v_mov_b32_e32 v2, 0
+; NOPRT-NOT: v_mov_b32_e32 v0
+; NOPRT-NOT: v_mov_b32_e32 v1
+; GCN: image_load v[0:3], v{{[0-9]+}}, s[0:7] dmask:0x6 unorm tfe{{$}}
+; SIVI: buffer_store_dword v2, off, s[8:11], 0
+; GFX900: global_store_dword v[{{[0-9]+:[0-9]+}}], v2
+define amdgpu_ps <4 x float> @load_1d_tfe_V4_dmask2(<8 x i32> inreg %rsrc, i32 addrspace(1)* inreg %out, i32 %s) {
+main_body:
+ %v = call {<4 x float>,i32} @llvm.amdgcn.image.load.1d.v4f32i32.i32(i32 6, i32 %s, <8 x i32> %rsrc, i32 1, i32 0)
+ %v.vec = extractvalue {<4 x float>, i32} %v, 0
+ %v.err = extractvalue {<4 x float>, i32} %v, 1
+ store i32 %v.err, i32 addrspace(1)* %out, align 4
+ ret <4 x float> %v.vec
+}
+
+; GCN-LABEL: {{^}}load_1d_tfe_V4_dmask1:
+; PRT: v_mov_b32_e32 v0, 0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v0
+; NOPRT: v_mov_b32_e32 v1, 0
+; NOPRT-NOT: v_mov_b32_e32 v0
+; GCN: image_load v[0:1], v{{[0-9]+}}, s[0:7] dmask:0x8 unorm tfe{{$}}
+; SIVI: buffer_store_dword v1, off, s[8:11], 0
+; GFX900: global_store_dword v[{{[0-9]+:[0-9]+}}], v1
+define amdgpu_ps <4 x float> @load_1d_tfe_V4_dmask1(<8 x i32> inreg %rsrc, i32 addrspace(1)* inreg %out, i32 %s) {
+main_body:
+ %v = call {<4 x float>,i32} @llvm.amdgcn.image.load.1d.v4f32i32.i32(i32 8, i32 %s, <8 x i32> %rsrc, i32 1, i32 0)
+ %v.vec = extractvalue {<4 x float>, i32} %v, 0
+ %v.err = extractvalue {<4 x float>, i32} %v, 1
+ store i32 %v.err, i32 addrspace(1)* %out, align 4
+ ret <4 x float> %v.vec
+}
+
+; GCN-LABEL: {{^}}load_1d_tfe_V2_dmask1:
+; PRT: v_mov_b32_e32 v0, 0
+; PRT-DAG: v_mov_b32_e32 v{{[0-9]+}}, v0
+; NOPRT: v_mov_b32_e32 v1, 0
+; NOPRT-NOT: v_mov_b32_e32 v0
+; GCN: image_load v[0:1], v{{[0-9]+}}, s[0:7] dmask:0x8 unorm tfe{{$}}
+; SIVI: buffer_store_dword v1, off, s[8:11], 0
+; GFX900: global_store_dword v[{{[0-9]+:[0-9]+}}], v1
+define amdgpu_ps <2 x float> @load_1d_tfe_V2_dmask1(<8 x i32> inreg %rsrc, i32 addrspace(1)* inreg %out, i32 %s) {
+main_body:
+ %v = call {<2 x float>,i32} @llvm.amdgcn.image.load.1d.v2f32i32.i32(i32 8, i32 %s, <8 x i32> %rsrc, i32 1, i32 0)
+ %v.vec = extractvalue {<2 x float>, i32} %v, 0
+ %v.err = extractvalue {<2 x float>, i32} %v, 1
+ store i32 %v.err, i32 addrspace(1)* %out, align 4
+ ret <2 x float> %v.vec
+}
+
+
; GCN-LABEL: {{^}}load_mip_3d:
; GCN: image_load_mip v[0:3], v[0:3], s[0:7] dmask:0xf unorm{{$}}
define amdgpu_ps <4 x float> @load_mip_3d(<8 x i32> inreg %rsrc, i32 %s, i32 %t, i32 %r, i32 %mip) {
@@ -404,23 +820,37 @@ define amdgpu_ps float @image_load_mmo(<8 x i32> inreg %rsrc, float addrspace(3)
store float 0.000000e+00, float addrspace(3)* %lds
%c0 = extractelement <2 x i32> %c, i32 0
%c1 = extractelement <2 x i32> %c, i32 1
- %tex = call float @llvm.amdgcn.image.load.2d.f32.i32(i32 15, i32 %c0, i32 %c1, <8 x i32> %rsrc, i32 0, i32 0)
+ %tex = call float @llvm.amdgcn.image.load.2d.f32.i32(i32 1, i32 %c0, i32 %c1, <8 x i32> %rsrc, i32 0, i32 0)
%tmp2 = getelementptr float, float addrspace(3)* %lds, i32 4
store float 0.000000e+00, float addrspace(3)* %tmp2
ret float %tex
}
declare <4 x float> @llvm.amdgcn.image.load.1d.v4f32.i32(i32, i32, <8 x i32>, i32, i32) #1
+declare {float,i32} @llvm.amdgcn.image.load.1d.f32i32.i32(i32, i32, <8 x i32>, i32, i32) #1
+declare {<2 x float>,i32} @llvm.amdgcn.image.load.1d.v2f32i32.i32(i32, i32, <8 x i32>, i32, i32) #1
+declare {<4 x float>,i32} @llvm.amdgcn.image.load.1d.v4f32i32.i32(i32, i32, <8 x i32>, i32, i32) #1
declare <4 x float> @llvm.amdgcn.image.load.2d.v4f32.i32(i32, i32, i32, <8 x i32>, i32, i32) #1
+declare {<4 x float>,i32} @llvm.amdgcn.image.load.2d.v4f32i32.i32(i32, i32, i32, <8 x i32>, i32, i32) #1
declare <4 x float> @llvm.amdgcn.image.load.3d.v4f32.i32(i32, i32, i32, i32, <8 x i32>, i32, i32) #1
+declare {<4 x float>,i32} @llvm.amdgcn.image.load.3d.v4f32i32.i32(i32, i32, i32, i32, <8 x i32>, i32, i32) #1
declare <4 x float> @llvm.amdgcn.image.load.cube.v4f32.i32(i32, i32, i32, i32, <8 x i32>, i32, i32) #1
+declare {<4 x float>,i32} @llvm.amdgcn.image.load.cube.v4f32i32.i32(i32, i32, i32, i32, <8 x i32>, i32, i32) #1
declare <4 x float> @llvm.amdgcn.image.load.1darray.v4f32.i32(i32, i32, i32, <8 x i32>, i32, i32) #1
+declare {<4 x float>,i32} @llvm.amdgcn.image.load.1darray.v4f32i32.i32(i32, i32, i32, <8 x i32>, i32, i32) #1
declare <4 x float> @llvm.amdgcn.image.load.2darray.v4f32.i32(i32, i32, i32, i32, <8 x i32>, i32, i32) #1
+declare {<4 x float>,i32} @llvm.amdgcn.image.load.2darray.v4f32i32.i32(i32, i32, i32, i32, <8 x i32>, i32, i32) #1
declare <4 x float> @llvm.amdgcn.image.load.2dmsaa.v4f32.i32(i32, i32, i32, i32, <8 x i32>, i32, i32) #1
+declare {<4 x float>,i32} @llvm.amdgcn.image.load.2dmsaa.v4f32i32.i32(i32, i32, i32, i32, <8 x i32>, i32, i32) #1
declare <4 x float> @llvm.amdgcn.image.load.2darraymsaa.v4f32.i32(i32, i32, i32, i32, i32, <8 x i32>, i32, i32) #1
+declare {<4 x float>,i32} @llvm.amdgcn.image.load.2darraymsaa.v4f32i32.i32(i32, i32, i32, i32, i32, <8 x i32>, i32, i32) #1
declare <4 x float> @llvm.amdgcn.image.load.mip.1d.v4f32.i32(i32, i32, i32, <8 x i32>, i32, i32) #1
declare <4 x float> @llvm.amdgcn.image.load.mip.2d.v4f32.i32(i32, i32, i32, i32, <8 x i32>, i32, i32) #1
+declare {<4 x float>,i32} @llvm.amdgcn.image.load.mip.1d.v4f32i32.i32(i32, i32, i32, <8 x i32>, i32, i32) #1
+declare {<4 x float>,i32} @llvm.amdgcn.image.load.mip.2d.v4f32i32.i32(i32, i32, i32, i32, <8 x i32>, i32, i32) #1
+declare {<2 x float>,i32} @llvm.amdgcn.image.load.mip.2d.v2f32i32.i32(i32, i32, i32, i32, <8 x i32>, i32, i32) #1
+declare {float,i32} @llvm.amdgcn.image.load.mip.2d.f32i32.i32(i32, i32, i32, i32, <8 x i32>, i32, i32) #1
declare <4 x float> @llvm.amdgcn.image.load.mip.3d.v4f32.i32(i32, i32, i32, i32, i32, <8 x i32>, i32, i32) #1
declare <4 x float> @llvm.amdgcn.image.load.mip.cube.v4f32.i32(i32, i32, i32, i32, i32, <8 x i32>, i32, i32) #1
declare <4 x float> @llvm.amdgcn.image.load.mip.1darray.v4f32.i32(i32, i32, i32, i32, <8 x i32>, i32, i32) #1
OpenPOWER on IntegriCloud