summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMatt Arsenault <Matthew.Arsenault@amd.com>2019-05-03 15:37:07 +0000
committerMatt Arsenault <Matthew.Arsenault@amd.com>2019-05-03 15:37:07 +0000
commit657ef48a882299c3216aa616e0e56366151fb8d0 (patch)
tree2603965ea12bb28a203bd9a584fa30ee0c600c12
parentcfd0ca38b09b50fab0b1dfd6533b16bc2993c676 (diff)
downloadbcm5719-llvm-657ef48a882299c3216aa616e0e56366151fb8d0.tar.gz
bcm5719-llvm-657ef48a882299c3216aa616e0e56366151fb8d0.zip
AMDGPU: Select VOP3 form of sub
The VOP3 form should always be the preferred selection form to be shrunk later. The r600 sub test needs to be split out because it asserts on the arguments in the new test during the calling convention lowering. llvm-svn: 359899
-rw-r--r--llvm/lib/Target/AMDGPU/VOP2Instructions.td4
-rw-r--r--llvm/test/CodeGen/AMDGPU/r600.sub.ll152
-rw-r--r--llvm/test/CodeGen/AMDGPU/sub.ll90
3 files changed, 196 insertions, 50 deletions
diff --git a/llvm/lib/Target/AMDGPU/VOP2Instructions.td b/llvm/lib/Target/AMDGPU/VOP2Instructions.td
index 3d7c75a3a04..f7e51cfa913 100644
--- a/llvm/lib/Target/AMDGPU/VOP2Instructions.td
+++ b/llvm/lib/Target/AMDGPU/VOP2Instructions.td
@@ -525,12 +525,12 @@ def : DivergentBinOp<shl, V_LSHLREV_B32_e64>;
let SubtargetPredicate = HasAddNoCarryInsts in {
def : DivergentBinOp<add, V_ADD_U32_e32>;
- def : DivergentBinOp<sub, V_SUB_U32_e32>;
+ def : DivergentClampingBinOp<sub, V_SUB_U32_e64>;
}
let SubtargetPredicate = isGFX6GFX7GFX8GFX9, Predicates = [isGFX6GFX7GFX8GFX9] in {
def : DivergentBinOp<add, V_ADD_I32_e32>;
-def : DivergentBinOp<sub, V_SUB_I32_e32>;
+def : DivergentClampingBinOp<sub, V_SUB_I32_e64>;
def : DivergentBinOp<adde, V_ADDC_U32_e32>;
def : DivergentBinOp<sube, V_SUBB_U32_e32>;
diff --git a/llvm/test/CodeGen/AMDGPU/r600.sub.ll b/llvm/test/CodeGen/AMDGPU/r600.sub.ll
new file mode 100644
index 00000000000..2ded4f64328
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/r600.sub.ll
@@ -0,0 +1,152 @@
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=r600 -mcpu=redwood -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=EG,FUNC %s
+
+declare i32 @llvm.r600.read.tidig.x() readnone
+
+; FUNC-LABEL: {{^}}s_sub_i32:
+define amdgpu_kernel void @s_sub_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) {
+ %result = sub i32 %a, %b
+ store i32 %result, i32 addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}s_sub_imm_i32:
+define amdgpu_kernel void @s_sub_imm_i32(i32 addrspace(1)* %out, i32 %a) {
+ %result = sub i32 1234, %a
+ store i32 %result, i32 addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}test_sub_i32:
+; EG: SUB_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+define amdgpu_kernel void @test_sub_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
+ %b_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
+ %a = load i32, i32 addrspace(1)* %in
+ %b = load i32, i32 addrspace(1)* %b_ptr
+ %result = sub i32 %a, %b
+ store i32 %result, i32 addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}test_sub_imm_i32:
+; EG: SUB_INT
+define amdgpu_kernel void @test_sub_imm_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
+ %a = load i32, i32 addrspace(1)* %in
+ %result = sub i32 123, %a
+ store i32 %result, i32 addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}test_sub_v2i32:
+; EG: SUB_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; EG: SUB_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+define amdgpu_kernel void @test_sub_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
+ %b_ptr = getelementptr <2 x i32>, <2 x i32> addrspace(1)* %in, i32 1
+ %a = load <2 x i32>, <2 x i32> addrspace(1) * %in
+ %b = load <2 x i32>, <2 x i32> addrspace(1) * %b_ptr
+ %result = sub <2 x i32> %a, %b
+ store <2 x i32> %result, <2 x i32> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}test_sub_v4i32:
+; EG: SUB_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; EG: SUB_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; EG: SUB_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; EG: SUB_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+define amdgpu_kernel void @test_sub_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
+ %b_ptr = getelementptr <4 x i32>, <4 x i32> addrspace(1)* %in, i32 1
+ %a = load <4 x i32>, <4 x i32> addrspace(1) * %in
+ %b = load <4 x i32>, <4 x i32> addrspace(1) * %b_ptr
+ %result = sub <4 x i32> %a, %b
+ store <4 x i32> %result, <4 x i32> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}test_sub_i16:
+define amdgpu_kernel void @test_sub_i16(i16 addrspace(1)* %out, i16 addrspace(1)* %in) {
+ %tid = call i32 @llvm.r600.read.tidig.x()
+ %gep = getelementptr i16, i16 addrspace(1)* %in, i32 %tid
+ %b_ptr = getelementptr i16, i16 addrspace(1)* %gep, i32 1
+ %a = load volatile i16, i16 addrspace(1)* %gep
+ %b = load volatile i16, i16 addrspace(1)* %b_ptr
+ %result = sub i16 %a, %b
+ store i16 %result, i16 addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}test_sub_v2i16:
+define amdgpu_kernel void @test_sub_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %in) {
+ %tid = call i32 @llvm.r600.read.tidig.x()
+ %gep = getelementptr <2 x i16>, <2 x i16> addrspace(1)* %in, i32 %tid
+ %b_ptr = getelementptr <2 x i16>, <2 x i16> addrspace(1)* %gep, i16 1
+ %a = load <2 x i16>, <2 x i16> addrspace(1)* %gep
+ %b = load <2 x i16>, <2 x i16> addrspace(1)* %b_ptr
+ %result = sub <2 x i16> %a, %b
+ store <2 x i16> %result, <2 x i16> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}test_sub_v4i16:
+define amdgpu_kernel void @test_sub_v4i16(<4 x i16> addrspace(1)* %out, <4 x i16> addrspace(1)* %in) {
+ %tid = call i32 @llvm.r600.read.tidig.x()
+ %gep = getelementptr <4 x i16>, <4 x i16> addrspace(1)* %in, i32 %tid
+ %b_ptr = getelementptr <4 x i16>, <4 x i16> addrspace(1)* %gep, i16 1
+ %a = load <4 x i16>, <4 x i16> addrspace(1) * %gep
+ %b = load <4 x i16>, <4 x i16> addrspace(1) * %b_ptr
+ %result = sub <4 x i16> %a, %b
+ store <4 x i16> %result, <4 x i16> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}s_sub_i64:
+; EG: MEM_RAT_CACHELESS STORE_RAW T{{[0-9]+}}.XY
+; EG-DAG: SUB_INT {{[* ]*}}
+; EG-DAG: SUBB_UINT
+; EG-DAG: SUB_INT
+; EG-DAG: SUB_INT {{[* ]*}}
+define amdgpu_kernel void @s_sub_i64(i64 addrspace(1)* noalias %out, i64 %a, i64 %b) nounwind {
+ %result = sub i64 %a, %b
+ store i64 %result, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: {{^}}v_sub_i64:
+; EG: MEM_RAT_CACHELESS STORE_RAW T{{[0-9]+}}.XY
+; EG-DAG: SUB_INT {{[* ]*}}
+; EG-DAG: SUBB_UINT
+; EG-DAG: SUB_INT
+; EG-DAG: SUB_INT {{[* ]*}}
+define amdgpu_kernel void @v_sub_i64(i64 addrspace(1)* noalias %out, i64 addrspace(1)* noalias %inA, i64 addrspace(1)* noalias %inB) nounwind {
+ %tid = call i32 @llvm.r600.read.tidig.x() readnone
+ %a_ptr = getelementptr i64, i64 addrspace(1)* %inA, i32 %tid
+ %b_ptr = getelementptr i64, i64 addrspace(1)* %inB, i32 %tid
+ %a = load i64, i64 addrspace(1)* %a_ptr
+ %b = load i64, i64 addrspace(1)* %b_ptr
+ %result = sub i64 %a, %b
+ store i64 %result, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: {{^}}v_test_sub_v2i64:
+define amdgpu_kernel void @v_test_sub_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(1)* noalias %inA, <2 x i64> addrspace(1)* noalias %inB) {
+ %tid = call i32 @llvm.r600.read.tidig.x() readnone
+ %a_ptr = getelementptr <2 x i64>, <2 x i64> addrspace(1)* %inA, i32 %tid
+ %b_ptr = getelementptr <2 x i64>, <2 x i64> addrspace(1)* %inB, i32 %tid
+ %a = load <2 x i64>, <2 x i64> addrspace(1)* %a_ptr
+ %b = load <2 x i64>, <2 x i64> addrspace(1)* %b_ptr
+ %result = sub <2 x i64> %a, %b
+ store <2 x i64> %result, <2 x i64> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}v_test_sub_v4i64:
+define amdgpu_kernel void @v_test_sub_v4i64(<4 x i64> addrspace(1)* %out, <4 x i64> addrspace(1)* noalias %inA, <4 x i64> addrspace(1)* noalias %inB) {
+ %tid = call i32 @llvm.r600.read.tidig.x() readnone
+ %a_ptr = getelementptr <4 x i64>, <4 x i64> addrspace(1)* %inA, i32 %tid
+ %b_ptr = getelementptr <4 x i64>, <4 x i64> addrspace(1)* %inB, i32 %tid
+ %a = load <4 x i64>, <4 x i64> addrspace(1)* %a_ptr
+ %b = load <4 x i64>, <4 x i64> addrspace(1)* %b_ptr
+ %result = sub <4 x i64> %a, %b
+ store <4 x i64> %result, <4 x i64> addrspace(1)* %out
+ ret void
+}
diff --git a/llvm/test/CodeGen/AMDGPU/sub.ll b/llvm/test/CodeGen/AMDGPU/sub.ll
index 4bd346dc586..485b374454d 100644
--- a/llvm/test/CodeGen/AMDGPU/sub.ll
+++ b/llvm/test/CodeGen/AMDGPU/sub.ll
@@ -1,11 +1,10 @@
-; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,SI,FUNC %s
-; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=fiji -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,VI,GFX89,FUNC %s
-; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX9,GFX89,FUNC %s
-; RUN: llc -amdgpu-scalarize-global-loads=false -march=r600 -mcpu=redwood -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=EG,FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,SI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=fiji -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,VI,GFX89 %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX9,GFX89 %s
-declare i32 @llvm.r600.read.tidig.x() readnone
+declare i32 @llvm.amdgcn.workitem.id.x() nounwind readnone speculatable
-; FUNC-LABEL: {{^}}s_sub_i32:
+; GCN-LABEL: {{^}}s_sub_i32:
; GCN: s_load_dwordx2
; GCN: s_load_dwordx2 s{{\[}}[[A:[0-9]+]]:[[B:[0-9]+]]{{\]}}
; GCN: s_sub_i32 s{{[0-9]+}}, s[[A]], s[[B]]
@@ -15,7 +14,7 @@ define amdgpu_kernel void @s_sub_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) {
ret void
}
-; FUNC-LABEL: {{^}}s_sub_imm_i32:
+; GCN-LABEL: {{^}}s_sub_imm_i32:
; GCN: s_load_dword [[A:s[0-9]+]]
; GCN: s_sub_i32 s{{[0-9]+}}, 0x4d2, [[A]]
define amdgpu_kernel void @s_sub_imm_i32(i32 addrspace(1)* %out, i32 %a) {
@@ -24,9 +23,7 @@ define amdgpu_kernel void @s_sub_imm_i32(i32 addrspace(1)* %out, i32 %a) {
ret void
}
-; FUNC-LABEL: {{^}}test_sub_i32:
-; EG: SUB_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-
+; GCN-LABEL: {{^}}test_sub_i32:
; SI: v_subrev_i32_e32 v{{[0-9]+, vcc, v[0-9]+, v[0-9]+}}
; GFX9: v_sub_u32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
define amdgpu_kernel void @test_sub_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
@@ -38,9 +35,7 @@ define amdgpu_kernel void @test_sub_i32(i32 addrspace(1)* %out, i32 addrspace(1)
ret void
}
-; FUNC-LABEL: {{^}}test_sub_imm_i32:
-; EG: SUB_INT
-
+; GCN-LABEL: {{^}}test_sub_imm_i32:
; SI: v_sub_i32_e32 v{{[0-9]+}}, vcc, 0x7b, v{{[0-9]+}}
; GFX9: v_sub_u32_e32 v{{[0-9]+}}, 0x7b, v{{[0-9]+}}
define amdgpu_kernel void @test_sub_imm_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
@@ -50,10 +45,7 @@ define amdgpu_kernel void @test_sub_imm_i32(i32 addrspace(1)* %out, i32 addrspac
ret void
}
-; FUNC-LABEL: {{^}}test_sub_v2i32:
-; EG: SUB_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-; EG: SUB_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-
+; GCN-LABEL: {{^}}test_sub_v2i32:
; SI: v_sub_i32_e32 v{{[0-9]+, vcc, v[0-9]+, v[0-9]+}}
; SI: v_sub_i32_e32 v{{[0-9]+, vcc, v[0-9]+, v[0-9]+}}
@@ -68,12 +60,7 @@ define amdgpu_kernel void @test_sub_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32
ret void
}
-; FUNC-LABEL: {{^}}test_sub_v4i32:
-; EG: SUB_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-; EG: SUB_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-; EG: SUB_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-; EG: SUB_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-
+; GCN-LABEL: {{^}}test_sub_v4i32:
; SI: v_sub_i32_e32 v{{[0-9]+, vcc, v[0-9]+, v[0-9]+}}
; SI: v_sub_i32_e32 v{{[0-9]+, vcc, v[0-9]+, v[0-9]+}}
; SI: v_sub_i32_e32 v{{[0-9]+, vcc, v[0-9]+, v[0-9]+}}
@@ -92,11 +79,11 @@ define amdgpu_kernel void @test_sub_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32
ret void
}
-; FUNC-LABEL: {{^}}test_sub_i16:
+; GCN-LABEL: {{^}}test_sub_i16:
; SI: v_sub_i32_e32 v{{[0-9]+}}, vcc,
; GFX89: v_sub_u16_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
define amdgpu_kernel void @test_sub_i16(i16 addrspace(1)* %out, i16 addrspace(1)* %in) {
- %tid = call i32 @llvm.r600.read.tidig.x()
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
%gep = getelementptr i16, i16 addrspace(1)* %in, i32 %tid
%b_ptr = getelementptr i16, i16 addrspace(1)* %gep, i32 1
%a = load volatile i16, i16 addrspace(1)* %gep
@@ -106,13 +93,13 @@ define amdgpu_kernel void @test_sub_i16(i16 addrspace(1)* %out, i16 addrspace(1)
ret void
}
-; FUNC-LABEL: {{^}}test_sub_v2i16:
+; GCN-LABEL: {{^}}test_sub_v2i16:
; VI: v_sub_u16_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
; VI: v_sub_u16_sdwa v{{[0-9]+, v[0-9]+, v[0-9]+}}
; GFX9: v_pk_sub_i16
define amdgpu_kernel void @test_sub_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %in) {
- %tid = call i32 @llvm.r600.read.tidig.x()
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
%gep = getelementptr <2 x i16>, <2 x i16> addrspace(1)* %in, i32 %tid
%b_ptr = getelementptr <2 x i16>, <2 x i16> addrspace(1)* %gep, i16 1
%a = load <2 x i16>, <2 x i16> addrspace(1)* %gep
@@ -122,7 +109,7 @@ define amdgpu_kernel void @test_sub_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16
ret void
}
-; FUNC-LABEL: {{^}}test_sub_v4i16:
+; GCN-LABEL: {{^}}test_sub_v4i16:
; VI: v_sub_u16_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
; VI: v_sub_u16_sdwa v{{[0-9]+, v[0-9]+, v[0-9]+}}
; VI: v_sub_u16_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
@@ -131,7 +118,7 @@ define amdgpu_kernel void @test_sub_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16
; GFX9: v_pk_sub_i16
; GFX9: v_pk_sub_i16
define amdgpu_kernel void @test_sub_v4i16(<4 x i16> addrspace(1)* %out, <4 x i16> addrspace(1)* %in) {
- %tid = call i32 @llvm.r600.read.tidig.x()
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
%gep = getelementptr <4 x i16>, <4 x i16> addrspace(1)* %in, i32 %tid
%b_ptr = getelementptr <4 x i16>, <4 x i16> addrspace(1)* %gep, i16 1
%a = load <4 x i16>, <4 x i16> addrspace(1) * %gep
@@ -141,22 +128,16 @@ define amdgpu_kernel void @test_sub_v4i16(<4 x i16> addrspace(1)* %out, <4 x i16
ret void
}
-; FUNC-LABEL: {{^}}s_sub_i64:
+; GCN-LABEL: {{^}}s_sub_i64:
; GCN: s_sub_u32
; GCN: s_subb_u32
-
-; EG: MEM_RAT_CACHELESS STORE_RAW T{{[0-9]+}}.XY
-; EG-DAG: SUB_INT {{[* ]*}}
-; EG-DAG: SUBB_UINT
-; EG-DAG: SUB_INT
-; EG-DAG: SUB_INT {{[* ]*}}
define amdgpu_kernel void @s_sub_i64(i64 addrspace(1)* noalias %out, i64 %a, i64 %b) nounwind {
%result = sub i64 %a, %b
store i64 %result, i64 addrspace(1)* %out, align 8
ret void
}
-; FUNC-LABEL: {{^}}v_sub_i64:
+; GCN-LABEL: {{^}}v_sub_i64:
; SI: v_sub_i32_e32
; SI: v_subb_u32_e32
@@ -165,14 +146,8 @@ define amdgpu_kernel void @s_sub_i64(i64 addrspace(1)* noalias %out, i64 %a, i64
; GFX9: v_sub_co_u32_e32
; GFX9: v_subb_co_u32_e32
-
-; EG: MEM_RAT_CACHELESS STORE_RAW T{{[0-9]+}}.XY
-; EG-DAG: SUB_INT {{[* ]*}}
-; EG-DAG: SUBB_UINT
-; EG-DAG: SUB_INT
-; EG-DAG: SUB_INT {{[* ]*}}
define amdgpu_kernel void @v_sub_i64(i64 addrspace(1)* noalias %out, i64 addrspace(1)* noalias %inA, i64 addrspace(1)* noalias %inB) nounwind {
- %tid = call i32 @llvm.r600.read.tidig.x() readnone
+ %tid = call i32 @llvm.amdgcn.workitem.id.x() readnone
%a_ptr = getelementptr i64, i64 addrspace(1)* %inA, i32 %tid
%b_ptr = getelementptr i64, i64 addrspace(1)* %inB, i32 %tid
%a = load i64, i64 addrspace(1)* %a_ptr
@@ -182,7 +157,7 @@ define amdgpu_kernel void @v_sub_i64(i64 addrspace(1)* noalias %out, i64 addrspa
ret void
}
-; FUNC-LABEL: {{^}}v_test_sub_v2i64:
+; GCN-LABEL: {{^}}v_test_sub_v2i64:
; SI: v_sub_i32_e32 v{{[0-9]+}}, vcc,
; SI: v_subb_u32_e32 v{{[0-9]+}}, vcc,
; SI: v_sub_i32_e32 v{{[0-9]+}}, vcc,
@@ -198,7 +173,7 @@ define amdgpu_kernel void @v_sub_i64(i64 addrspace(1)* noalias %out, i64 addrspa
; GFX9: v_sub_co_u32_e32 v{{[0-9]+}}, vcc,
; GFX9: v_subb_co_u32_e32 v{{[0-9]+}}, vcc,
define amdgpu_kernel void @v_test_sub_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(1)* noalias %inA, <2 x i64> addrspace(1)* noalias %inB) {
- %tid = call i32 @llvm.r600.read.tidig.x() readnone
+ %tid = call i32 @llvm.amdgcn.workitem.id.x() readnone
%a_ptr = getelementptr <2 x i64>, <2 x i64> addrspace(1)* %inA, i32 %tid
%b_ptr = getelementptr <2 x i64>, <2 x i64> addrspace(1)* %inB, i32 %tid
%a = load <2 x i64>, <2 x i64> addrspace(1)* %a_ptr
@@ -208,7 +183,7 @@ define amdgpu_kernel void @v_test_sub_v2i64(<2 x i64> addrspace(1)* %out, <2 x i
ret void
}
-; FUNC-LABEL: {{^}}v_test_sub_v4i64:
+; GCN-LABEL: {{^}}v_test_sub_v4i64:
; SI: v_sub_i32_e32 v{{[0-9]+}}, vcc,
; SI: v_subb_u32_e32 v{{[0-9]+}}, vcc,
; SI: v_sub_i32_e32 v{{[0-9]+}}, vcc,
@@ -236,7 +211,7 @@ define amdgpu_kernel void @v_test_sub_v2i64(<2 x i64> addrspace(1)* %out, <2 x i
; GFX9: v_sub_co_u32_e32 v{{[0-9]+}}, vcc,
; GFX9: v_subb_co_u32_e32 v{{[0-9]+}}, vcc,
define amdgpu_kernel void @v_test_sub_v4i64(<4 x i64> addrspace(1)* %out, <4 x i64> addrspace(1)* noalias %inA, <4 x i64> addrspace(1)* noalias %inB) {
- %tid = call i32 @llvm.r600.read.tidig.x() readnone
+ %tid = call i32 @llvm.amdgcn.workitem.id.x() readnone
%a_ptr = getelementptr <4 x i64>, <4 x i64> addrspace(1)* %inA, i32 %tid
%b_ptr = getelementptr <4 x i64>, <4 x i64> addrspace(1)* %inB, i32 %tid
%a = load <4 x i64>, <4 x i64> addrspace(1)* %a_ptr
@@ -245,3 +220,22 @@ define amdgpu_kernel void @v_test_sub_v4i64(<4 x i64> addrspace(1)* %out, <4 x i
store <4 x i64> %result, <4 x i64> addrspace(1)* %out
ret void
}
+
+; Make sure the VOP3 form of sub is initially selected. Otherwise pair
+; of opies from/to VCC would be necessary
+
+; GCN-LABEL: {{^}}sub_select_vop3:
+; SI: v_subrev_i32_e64 v0, s[0:1], s0, v0
+; VI: v_subrev_u32_e64 v0, s[0:1], s0, v0
+; GFX9: v_subrev_u32_e32 v0, s0, v0
+
+; GCN: ; def vcc
+; GCN: ds_write_b32
+; GCN: ; use vcc
+define amdgpu_ps void @sub_select_vop3(i32 inreg %s, i32 %v) {
+ %vcc = call i64 asm sideeffect "; def vcc", "={vcc}"()
+ %sub = sub i32 %v, %s
+ store i32 %sub, i32 addrspace(3)* undef
+ call void asm sideeffect "; use vcc", "{vcc}"(i64 %vcc)
+ ret void
+}
OpenPOWER on IntegriCloud