summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/AMDGPU/cross-block-use-is-not-abi-copy.ll
diff options
context:
space:
mode:
authorMatt Arsenault <Matthew.Arsenault@amd.com>2019-06-20 21:58:24 +0000
committerMatt Arsenault <Matthew.Arsenault@amd.com>2019-06-20 21:58:24 +0000
commitd88db6d7fc942947ad4a068b38c5b5af7d5d1751 (patch)
tree670dc13f6ccde2424eded38229f3b60061f5e6a2 /llvm/test/CodeGen/AMDGPU/cross-block-use-is-not-abi-copy.ll
parent25f08a17c318e8ffbd30ecbab4d3ea5c5105ddbc (diff)
downloadbcm5719-llvm-d88db6d7fc942947ad4a068b38c5b5af7d5d1751.tar.gz
bcm5719-llvm-d88db6d7fc942947ad4a068b38c5b5af7d5d1751.zip
AMDGPU: Always use s33 for global scratch wave offset
Every called function could possibly need this to calculate the absolute address of stack objectst, and this avoids inserting a copy around every call site in the kernel. It's also somewhat cleaner to keep this in a callee saved SGPR. llvm-svn: 363990
Diffstat (limited to 'llvm/test/CodeGen/AMDGPU/cross-block-use-is-not-abi-copy.ll')
-rw-r--r--llvm/test/CodeGen/AMDGPU/cross-block-use-is-not-abi-copy.ll66
1 files changed, 33 insertions, 33 deletions
diff --git a/llvm/test/CodeGen/AMDGPU/cross-block-use-is-not-abi-copy.ll b/llvm/test/CodeGen/AMDGPU/cross-block-use-is-not-abi-copy.ll
index 7fd669c98c5..3deef3a0126 100644
--- a/llvm/test/CodeGen/AMDGPU/cross-block-use-is-not-abi-copy.ll
+++ b/llvm/test/CodeGen/AMDGPU/cross-block-use-is-not-abi-copy.ll
@@ -32,20 +32,20 @@ define float @call_split_type_used_outside_block_v2f32() #0 {
; GCN-NEXT: s_or_saveexec_b64 s[6:7], -1
; GCN-NEXT: buffer_store_dword v32, off, s[0:3], s5 ; 4-byte Folded Spill
; GCN-NEXT: s_mov_b64 exec, s[6:7]
-; GCN-NEXT: v_writelane_b32 v32, s33, 0
-; GCN-NEXT: v_writelane_b32 v32, s34, 1
-; GCN-NEXT: v_writelane_b32 v32, s35, 2
+; GCN-NEXT: v_writelane_b32 v32, s34, 0
+; GCN-NEXT: v_writelane_b32 v32, s35, 1
+; GCN-NEXT: v_writelane_b32 v32, s36, 2
; GCN-NEXT: s_getpc_b64 s[6:7]
; GCN-NEXT: s_add_u32 s6, s6, func_v2f32@rel32@lo+4
; GCN-NEXT: s_addc_u32 s7, s7, func_v2f32@rel32@hi+4
; GCN-NEXT: s_mov_b64 s[34:35], s[30:31]
-; GCN-NEXT: s_mov_b32 s33, s5
+; GCN-NEXT: s_mov_b32 s36, s5
; GCN-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GCN-NEXT: s_mov_b32 s5, s36
; GCN-NEXT: s_mov_b64 s[30:31], s[34:35]
-; GCN-NEXT: v_readlane_b32 s35, v32, 2
-; GCN-NEXT: s_mov_b32 s5, s33
-; GCN-NEXT: v_readlane_b32 s34, v32, 1
-; GCN-NEXT: v_readlane_b32 s33, v32, 0
+; GCN-NEXT: v_readlane_b32 s36, v32, 2
+; GCN-NEXT: v_readlane_b32 s35, v32, 1
+; GCN-NEXT: v_readlane_b32 s34, v32, 0
; GCN-NEXT: s_or_saveexec_b64 s[6:7], -1
; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s5 ; 4-byte Folded Reload
; GCN-NEXT: s_mov_b64 exec, s[6:7]
@@ -70,20 +70,20 @@ define float @call_split_type_used_outside_block_v3f32() #0 {
; GCN-NEXT: s_or_saveexec_b64 s[6:7], -1
; GCN-NEXT: buffer_store_dword v32, off, s[0:3], s5 ; 4-byte Folded Spill
; GCN-NEXT: s_mov_b64 exec, s[6:7]
-; GCN-NEXT: v_writelane_b32 v32, s33, 0
-; GCN-NEXT: v_writelane_b32 v32, s34, 1
-; GCN-NEXT: v_writelane_b32 v32, s35, 2
+; GCN-NEXT: v_writelane_b32 v32, s34, 0
+; GCN-NEXT: v_writelane_b32 v32, s35, 1
+; GCN-NEXT: v_writelane_b32 v32, s36, 2
; GCN-NEXT: s_getpc_b64 s[6:7]
; GCN-NEXT: s_add_u32 s6, s6, func_v3f32@rel32@lo+4
; GCN-NEXT: s_addc_u32 s7, s7, func_v3f32@rel32@hi+4
; GCN-NEXT: s_mov_b64 s[34:35], s[30:31]
-; GCN-NEXT: s_mov_b32 s33, s5
+; GCN-NEXT: s_mov_b32 s36, s5
; GCN-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GCN-NEXT: s_mov_b32 s5, s36
; GCN-NEXT: s_mov_b64 s[30:31], s[34:35]
-; GCN-NEXT: v_readlane_b32 s35, v32, 2
-; GCN-NEXT: s_mov_b32 s5, s33
-; GCN-NEXT: v_readlane_b32 s34, v32, 1
-; GCN-NEXT: v_readlane_b32 s33, v32, 0
+; GCN-NEXT: v_readlane_b32 s36, v32, 2
+; GCN-NEXT: v_readlane_b32 s35, v32, 1
+; GCN-NEXT: v_readlane_b32 s34, v32, 0
; GCN-NEXT: s_or_saveexec_b64 s[6:7], -1
; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s5 ; 4-byte Folded Reload
; GCN-NEXT: s_mov_b64 exec, s[6:7]
@@ -108,20 +108,20 @@ define half @call_split_type_used_outside_block_v4f16() #0 {
; GCN-NEXT: s_or_saveexec_b64 s[6:7], -1
; GCN-NEXT: buffer_store_dword v32, off, s[0:3], s5 ; 4-byte Folded Spill
; GCN-NEXT: s_mov_b64 exec, s[6:7]
-; GCN-NEXT: v_writelane_b32 v32, s33, 0
-; GCN-NEXT: v_writelane_b32 v32, s34, 1
-; GCN-NEXT: v_writelane_b32 v32, s35, 2
+; GCN-NEXT: v_writelane_b32 v32, s34, 0
+; GCN-NEXT: v_writelane_b32 v32, s35, 1
+; GCN-NEXT: v_writelane_b32 v32, s36, 2
; GCN-NEXT: s_getpc_b64 s[6:7]
; GCN-NEXT: s_add_u32 s6, s6, func_v4f16@rel32@lo+4
; GCN-NEXT: s_addc_u32 s7, s7, func_v4f16@rel32@hi+4
; GCN-NEXT: s_mov_b64 s[34:35], s[30:31]
-; GCN-NEXT: s_mov_b32 s33, s5
+; GCN-NEXT: s_mov_b32 s36, s5
; GCN-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GCN-NEXT: s_mov_b32 s5, s36
; GCN-NEXT: s_mov_b64 s[30:31], s[34:35]
-; GCN-NEXT: v_readlane_b32 s35, v32, 2
-; GCN-NEXT: s_mov_b32 s5, s33
-; GCN-NEXT: v_readlane_b32 s34, v32, 1
-; GCN-NEXT: v_readlane_b32 s33, v32, 0
+; GCN-NEXT: v_readlane_b32 s36, v32, 2
+; GCN-NEXT: v_readlane_b32 s35, v32, 1
+; GCN-NEXT: v_readlane_b32 s34, v32, 0
; GCN-NEXT: s_or_saveexec_b64 s[6:7], -1
; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s5 ; 4-byte Folded Reload
; GCN-NEXT: s_mov_b64 exec, s[6:7]
@@ -146,21 +146,21 @@ define { i32, half } @call_split_type_used_outside_block_struct() #0 {
; GCN-NEXT: s_or_saveexec_b64 s[6:7], -1
; GCN-NEXT: buffer_store_dword v32, off, s[0:3], s5 ; 4-byte Folded Spill
; GCN-NEXT: s_mov_b64 exec, s[6:7]
-; GCN-NEXT: v_writelane_b32 v32, s33, 0
-; GCN-NEXT: v_writelane_b32 v32, s34, 1
-; GCN-NEXT: v_writelane_b32 v32, s35, 2
+; GCN-NEXT: v_writelane_b32 v32, s34, 0
+; GCN-NEXT: v_writelane_b32 v32, s35, 1
+; GCN-NEXT: v_writelane_b32 v32, s36, 2
; GCN-NEXT: s_getpc_b64 s[6:7]
; GCN-NEXT: s_add_u32 s6, s6, func_struct@rel32@lo+4
; GCN-NEXT: s_addc_u32 s7, s7, func_struct@rel32@hi+4
; GCN-NEXT: s_mov_b64 s[34:35], s[30:31]
-; GCN-NEXT: s_mov_b32 s33, s5
+; GCN-NEXT: s_mov_b32 s36, s5
; GCN-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GCN-NEXT: s_mov_b64 s[30:31], s[34:35]
-; GCN-NEXT: v_readlane_b32 s35, v32, 2
-; GCN-NEXT: s_mov_b32 s5, s33
-; GCN-NEXT: v_readlane_b32 s34, v32, 1
+; GCN-NEXT: s_mov_b32 s5, s36
; GCN-NEXT: v_mov_b32_e32 v1, v4
-; GCN-NEXT: v_readlane_b32 s33, v32, 0
+; GCN-NEXT: s_mov_b64 s[30:31], s[34:35]
+; GCN-NEXT: v_readlane_b32 s36, v32, 2
+; GCN-NEXT: v_readlane_b32 s35, v32, 1
+; GCN-NEXT: v_readlane_b32 s34, v32, 0
; GCN-NEXT: s_or_saveexec_b64 s[6:7], -1
; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s5 ; 4-byte Folded Reload
; GCN-NEXT: s_mov_b64 exec, s[6:7]
OpenPOWER on IntegriCloud