diff options
Diffstat (limited to 'llvm/test/CodeGen')
| -rw-r--r-- | llvm/test/CodeGen/AMDGPU/mubuf-legalize-operands.ll | 230 | ||||
| -rw-r--r-- | llvm/test/CodeGen/AMDGPU/mubuf-legalize-operands.mir | 239 |
2 files changed, 469 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/AMDGPU/mubuf-legalize-operands.ll b/llvm/test/CodeGen/AMDGPU/mubuf-legalize-operands.ll new file mode 100644 index 00000000000..965e7a5ebd1 --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/mubuf-legalize-operands.ll @@ -0,0 +1,230 @@ +; RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs -o - %s | FileCheck %s +; RUN: llc -O0 -march=amdgcn -mcpu=gfx900 -verify-machineinstrs -o - %s | FileCheck %s --check-prefix=CHECK-O0 + +; Test that we correctly legalize VGPR Rsrc operands in MUBUF instructions. + +; CHECK-LABEL: mubuf_vgpr +; CHECK: s_mov_b64 [[SAVEEXEC:s\[[0-9]+:[0-9]+\]]], exec +; CHECK: [[LOOPBB:BB[0-9]+_[0-9]+]]: +; CHECK-DAG: v_readfirstlane_b32 s[[SRSRC0:[0-9]+]], v0 +; CHECK-DAG: v_readfirstlane_b32 s[[SRSRC1:[0-9]+]], v1 +; CHECK-DAG: v_readfirstlane_b32 s[[SRSRC2:[0-9]+]], v2 +; CHECK-DAG: v_readfirstlane_b32 s[[SRSRC3:[0-9]+]], v3 +; CHECK: v_cmp_eq_u64_e32 vcc, s{{\[}}[[SRSRC0]]:[[SRSRC1]]{{\]}}, v[0:1] +; CHECK: v_cmp_eq_u64_e64 [[CMP0:s\[[0-9]+:[0-9]+\]]], s{{\[}}[[SRSRC2]]:[[SRSRC3]]{{\]}}, v[2:3] +; CHECK: s_and_b64 [[CMP:s\[[0-9]+:[0-9]+\]]], vcc, [[CMP0]] +; CHECK: s_and_saveexec_b64 [[CMP]], [[CMP]] +; CHECK: s_waitcnt vmcnt(0) +; CHECK: buffer_load_format_x [[RES:v[0-9]+]], v4, s{{\[}}[[SRSRC0]]:[[SRSRC3]]{{\]}}, 0 idxen +; CHECK: s_xor_b64 exec, exec, [[CMP]] +; CHECK: s_cbranch_execnz [[LOOPBB]] +; CHECK: s_mov_b64 exec, [[SAVEEXEC]] +; CHECK: v_mov_b32_e32 v0, [[RES]] +define float @mubuf_vgpr(<4 x i32> %i, i32 %c) #0 { + %call = call float @llvm.amdgcn.buffer.load.format.f32(<4 x i32> %i, i32 %c, i32 0, i1 zeroext false, i1 zeroext false) #1 + ret float %call +} + +; CHECK-LABEL: mubuf_vgpr_adjacent_in_block + +; CHECK: s_mov_b64 [[SAVEEXEC:s\[[0-9]+:[0-9]+\]]], exec +; CHECK: [[LOOPBB0:BB[0-9]+_[0-9]+]]: +; CHECK-DAG: v_readfirstlane_b32 s[[SRSRC0:[0-9]+]], v0 +; CHECK-DAG: v_readfirstlane_b32 s[[SRSRC1:[0-9]+]], v1 +; CHECK-DAG: v_readfirstlane_b32 s[[SRSRC2:[0-9]+]], v2 +; CHECK-DAG: v_readfirstlane_b32 s[[SRSRC3:[0-9]+]], v3 +; CHECK: v_cmp_eq_u64_e32 vcc, s{{\[}}[[SRSRC0]]:[[SRSRC1]]{{\]}}, v[0:1] +; CHECK: v_cmp_eq_u64_e64 [[CMP0:s\[[0-9]+:[0-9]+\]]], s{{\[}}[[SRSRC2]]:[[SRSRC3]]{{\]}}, v[2:3] +; CHECK: s_and_b64 [[CMP:s\[[0-9]+:[0-9]+\]]], vcc, [[CMP0]] +; CHECK: s_and_saveexec_b64 [[CMP]], [[CMP]] +; CHECK: s_waitcnt vmcnt(0) +; CHECK: buffer_load_format_x [[RES0:v[0-9]+]], v8, s{{\[}}[[SRSRC0]]:[[SRSRC3]]{{\]}}, 0 idxen +; CHECK: s_xor_b64 exec, exec, [[CMP]] +; CHECK: s_cbranch_execnz [[LOOPBB0]] + +; CHECK: s_mov_b64 exec, [[SAVEEXEC]] +; FIXME: redundant s_mov +; CHECK: s_mov_b64 [[SAVEEXEC:s\[[0-9]+:[0-9]+\]]], exec + +; CHECK: [[LOOPBB1:BB[0-9]+_[0-9]+]]: +; CHECK-DAG: v_readfirstlane_b32 s[[SRSRC0:[0-9]+]], v4 +; CHECK-DAG: v_readfirstlane_b32 s[[SRSRC1:[0-9]+]], v5 +; CHECK-DAG: v_readfirstlane_b32 s[[SRSRC2:[0-9]+]], v6 +; CHECK-DAG: v_readfirstlane_b32 s[[SRSRC3:[0-9]+]], v7 +; CHECK: v_cmp_eq_u64_e32 vcc, s{{\[}}[[SRSRC0]]:[[SRSRC1]]{{\]}}, v[4:5] +; CHECK: v_cmp_eq_u64_e64 [[CMP0:s\[[0-9]+:[0-9]+\]]], s{{\[}}[[SRSRC2]]:[[SRSRC3]]{{\]}}, v[6:7] +; CHECK: s_and_b64 [[CMP:s\[[0-9]+:[0-9]+\]]], vcc, [[CMP0]] +; CHECK: s_and_saveexec_b64 [[CMP]], [[CMP]] +; CHECK: s_waitcnt vmcnt(0) +; CHECK: buffer_load_format_x [[RES1:v[0-9]+]], v8, s{{\[}}[[SRSRC0]]:[[SRSRC3]]{{\]}}, 0 idxen +; CHECK: s_xor_b64 exec, exec, [[CMP]] +; CHECK: s_cbranch_execnz [[LOOPBB1]] + +; CHECK: s_mov_b64 exec, [[SAVEEXEC]] +; CHECK-DAG: global_store_dword v[9:10], [[RES0]], off +; CHECK-DAG: global_store_dword v[11:12], [[RES1]], off + +define void @mubuf_vgpr_adjacent_in_block(<4 x i32> %i, <4 x i32> %j, i32 %c, float addrspace(1)* %out0, float addrspace(1)* %out1) #0 { +entry: + %val0 = call float @llvm.amdgcn.buffer.load.format.f32(<4 x i32> %i, i32 %c, i32 0, i1 zeroext false, i1 zeroext false) #1 + %val1 = call float @llvm.amdgcn.buffer.load.format.f32(<4 x i32> %j, i32 %c, i32 0, i1 zeroext false, i1 zeroext false) #1 + store volatile float %val0, float addrspace(1)* %out0 + store volatile float %val1, float addrspace(1)* %out1 + ret void +} + +; CHECK-LABEL: mubuf_vgpr_outside_entry + +; CHECK-DAG: v_mov_b32_e32 [[IDX:v[0-9]+]], s4 +; CHECK-DAG: s_mov_b64 [[SAVEEXEC:s\[[0-9]+:[0-9]+\]]], exec + +; CHECK: [[LOOPBB0:BB[0-9]+_[0-9]+]]: +; CHECK-DAG: v_readfirstlane_b32 s[[SRSRC0:[0-9]+]], v0 +; CHECK-DAG: v_readfirstlane_b32 s[[SRSRC1:[0-9]+]], v1 +; CHECK-DAG: v_readfirstlane_b32 s[[SRSRC2:[0-9]+]], v2 +; CHECK-DAG: v_readfirstlane_b32 s[[SRSRC3:[0-9]+]], v3 +; CHECK: v_cmp_eq_u64_e32 vcc, s{{\[}}[[SRSRC0]]:[[SRSRC1]]{{\]}}, v[0:1] +; CHECK: v_cmp_eq_u64_e64 [[CMP0:s\[[0-9]+:[0-9]+\]]], s{{\[}}[[SRSRC2]]:[[SRSRC3]]{{\]}}, v[2:3] +; CHECK: s_and_b64 [[CMP:s\[[0-9]+:[0-9]+\]]], vcc, [[CMP0]] +; CHECK: s_and_saveexec_b64 [[CMP]], [[CMP]] +; CHECK: s_waitcnt vmcnt(0) +; CHECK: buffer_load_format_x [[RES:v[0-9]+]], [[IDX]], s{{\[}}[[SRSRC0]]:[[SRSRC3]]{{\]}}, 0 idxen +; CHECK: s_xor_b64 exec, exec, [[CMP]] +; CHECK: s_cbranch_execnz [[LOOPBB0]] + +; CHECK: s_mov_b64 exec, [[SAVEEXEC]] +; CHECK: s_cbranch_execz [[TERMBB:BB[0-9]+_[0-9]+]] + +; CHECK: BB{{[0-9]+_[0-9]+}}: +; CHECK-DAG: v_mov_b32_e32 [[IDX:v[0-9]+]], s4 +; CHECK-DAG: s_mov_b64 [[SAVEEXEC:s\[[0-9]+:[0-9]+\]]], exec + +; CHECK: [[LOOPBB1:BB[0-9]+_[0-9]+]]: +; CHECK-DAG: v_readfirstlane_b32 s[[SRSRC0:[0-9]+]], v4 +; CHECK-DAG: v_readfirstlane_b32 s[[SRSRC1:[0-9]+]], v5 +; CHECK-DAG: v_readfirstlane_b32 s[[SRSRC2:[0-9]+]], v6 +; CHECK-DAG: v_readfirstlane_b32 s[[SRSRC3:[0-9]+]], v7 +; CHECK: v_cmp_eq_u64_e32 vcc, s{{\[}}[[SRSRC0]]:[[SRSRC1]]{{\]}}, v[4:5] +; CHECK: v_cmp_eq_u64_e64 [[CMP0:s\[[0-9]+:[0-9]+\]]], s{{\[}}[[SRSRC2]]:[[SRSRC3]]{{\]}}, v[6:7] +; CHECK: s_and_b64 [[CMP:s\[[0-9]+:[0-9]+\]]], vcc, [[CMP0]] +; CHECK: s_and_saveexec_b64 [[CMP]], [[CMP]] +; CHECK: s_waitcnt vmcnt(0) +; CHECK: buffer_load_format_x [[RES]], [[IDX]], s{{\[}}[[SRSRC0]]:[[SRSRC3]]{{\]}}, 0 idxen +; CHECK: s_xor_b64 exec, exec, [[CMP]] +; CHECK: s_cbranch_execnz [[LOOPBB1]] + +; CHECK: s_mov_b64 exec, [[SAVEEXEC]] + +; CHECK: [[TERMBB]]: +; CHECK: global_store_dword v[11:12], [[RES]], off + +; Confirm spills do not occur between the XOR and branch that terminate the +; waterfall loop BBs. + +; CHECK-O0-LABEL: mubuf_vgpr_outside_entry + +; CHECK-O0-DAG: s_mov_b32 [[IDX_S:s[0-9]+]], s4 +; CHECK-O0-DAG: v_mov_b32_e32 [[IDX_V:v[0-9]+]], [[IDX_S]] +; CHECK-O0-DAG: s_mov_b64 [[SAVEEXEC:s\[[0-9]+:[0-9]+\]]], exec +; CHECK-O0-DAG: buffer_store_dword [[IDX_V]], off, s[0:3], s5 offset:[[IDX_OFF:[0-9]+]] ; 4-byte Folded Spill + +; CHECK-O0: [[LOOPBB0:BB[0-9]+_[0-9]+]]: +; CHECK-O0: buffer_load_dword v[[VRSRC0:[0-9]+]], {{.*}} ; 4-byte Folded Reload +; CHECK-O0: s_waitcnt vmcnt(0) +; CHECK-O0: buffer_load_dword v[[VRSRC1:[0-9]+]], {{.*}} ; 4-byte Folded Reload +; CHECK-O0: s_waitcnt vmcnt(0) +; CHECK-O0: buffer_load_dword v[[VRSRC2:[0-9]+]], {{.*}} ; 4-byte Folded Reload +; CHECK-O0: s_waitcnt vmcnt(0) +; CHECK-O0: buffer_load_dword v[[VRSRC3:[0-9]+]], {{.*}} ; 4-byte Folded Reload +; CHECK-O0: s_waitcnt vmcnt(0) +; CHECK-O0-DAG: v_readfirstlane_b32 s[[SRSRCTMP0:[0-9]+]], v[[VRSRC0]] +; CHECK-O0-DAG: v_readfirstlane_b32 s[[SRSRCTMP1:[0-9]+]], v[[VRSRC1]] +; CHECK-O0-DAG: v_readfirstlane_b32 s[[SRSRCTMP2:[0-9]+]], v[[VRSRC2]] +; CHECK-O0-DAG: v_readfirstlane_b32 s[[SRSRCTMP3:[0-9]+]], v[[VRSRC3]] +; CHECK-O0-DAG: s_mov_b32 s[[SRSRC0:[0-9]+]], s[[SRSRCTMP0]] +; CHECK-O0-DAG: s_mov_b32 s[[SRSRC1:[0-9]+]], s[[SRSRCTMP1]] +; CHECK-O0-DAG: s_mov_b32 s[[SRSRC2:[0-9]+]], s[[SRSRCTMP2]] +; CHECK-O0-DAG: s_mov_b32 s[[SRSRC3:[0-9]+]], s[[SRSRCTMP3]] +; CHECK-O0: v_cmp_eq_u64_e64 [[CMP0:s\[[0-9]+:[0-9]+\]]], s{{\[}}[[SRSRC0]]:[[SRSRC1]]{{\]}}, v{{\[}}[[VRSRC0]]:[[VRSRC1]]{{\]}} +; CHECK-O0: v_cmp_eq_u64_e64 [[CMP1:s\[[0-9]+:[0-9]+\]]], s{{\[}}[[SRSRC2]]:[[SRSRC3]]{{\]}}, v{{\[}}[[VRSRC2]]:[[VRSRC3]]{{\]}} +; CHECK-O0: s_and_b64 [[CMP:s\[[0-9]+:[0-9]+\]]], [[CMP0]], [[CMP1]] +; CHECK-O0: s_and_saveexec_b64 [[CMP]], [[CMP]] +; CHECK-O0: buffer_load_dword [[IDX:v[0-9]+]], off, s[0:3], s5 offset:[[IDX_OFF]] ; 4-byte Folded Reload +; CHECK-O0: buffer_load_format_x [[RES:v[0-9]+]], [[IDX]], s{{\[}}[[SRSRC0]]:[[SRSRC3]]{{\]}}, {{.*}} idxen +; CHECK-O0: s_waitcnt vmcnt(0) +; CHECK-O0: buffer_store_dword [[RES]], off, s[0:3], s5 offset:[[RES_OFF_TMP:[0-9]+]] ; 4-byte Folded Spill +; CHECK-O0: s_xor_b64 exec, exec, [[CMP]] +; CHECK-O0-NEXT: s_cbranch_execnz [[LOOPBB0]] + +; CHECK-O0: s_mov_b64 exec, [[SAVEEXEC]] +; CHECK-O0: buffer_load_dword [[RES:v[0-9]+]], off, s[0:3], s5 offset:[[RES_OFF_TMP]] ; 4-byte Folded Reload +; CHECK-O0: buffer_store_dword [[RES]], off, s[0:3], s5 offset:[[RES_OFF:[0-9]+]] ; 4-byte Folded Spill +; CHECK-O0: s_cbranch_execz [[TERMBB:BB[0-9]+_[0-9]+]] + +; CHECK-O0: BB{{[0-9]+_[0-9]+}}: +; CHECK-O0-DAG: s_mov_b64 s{{\[}}[[SAVEEXEC0:[0-9]+]]:[[SAVEEXEC1:[0-9]+]]{{\]}}, exec +; CHECK-O0-DAG: buffer_store_dword {{v[0-9]+}}, off, s[0:3], s5 offset:[[IDX_OFF:[0-9]+]] ; 4-byte Folded Spill +; CHECK-O0: v_writelane_b32 [[VSAVEEXEC:v[0-9]+]], s[[SAVEEXEC0]], [[SAVEEXEC_IDX0:[0-9]+]] +; CHECK-O0: v_writelane_b32 [[VSAVEEXEC:v[0-9]+]], s[[SAVEEXEC1]], [[SAVEEXEC_IDX1:[0-9]+]] + +; CHECK-O0: [[LOOPBB1:BB[0-9]+_[0-9]+]]: +; CHECK-O0: buffer_load_dword v[[VRSRC0:[0-9]+]], {{.*}} ; 4-byte Folded Reload +; CHECK-O0: s_waitcnt vmcnt(0) +; CHECK-O0: buffer_load_dword v[[VRSRC1:[0-9]+]], {{.*}} ; 4-byte Folded Reload +; CHECK-O0: s_waitcnt vmcnt(0) +; CHECK-O0: buffer_load_dword v[[VRSRC2:[0-9]+]], {{.*}} ; 4-byte Folded Reload +; CHECK-O0: s_waitcnt vmcnt(0) +; CHECK-O0: buffer_load_dword v[[VRSRC3:[0-9]+]], {{.*}} ; 4-byte Folded Reload +; CHECK-O0: s_waitcnt vmcnt(0) +; CHECK-O0-DAG: v_readfirstlane_b32 s[[SRSRCTMP0:[0-9]+]], v[[VRSRC0]] +; CHECK-O0-DAG: v_readfirstlane_b32 s[[SRSRCTMP1:[0-9]+]], v[[VRSRC1]] +; CHECK-O0-DAG: v_readfirstlane_b32 s[[SRSRCTMP2:[0-9]+]], v[[VRSRC2]] +; CHECK-O0-DAG: v_readfirstlane_b32 s[[SRSRCTMP3:[0-9]+]], v[[VRSRC3]] +; CHECK-O0-DAG: s_mov_b32 s[[SRSRC0:[0-9]+]], s[[SRSRCTMP0]] +; CHECK-O0-DAG: s_mov_b32 s[[SRSRC1:[0-9]+]], s[[SRSRCTMP1]] +; CHECK-O0-DAG: s_mov_b32 s[[SRSRC2:[0-9]+]], s[[SRSRCTMP2]] +; CHECK-O0-DAG: s_mov_b32 s[[SRSRC3:[0-9]+]], s[[SRSRCTMP3]] +; CHECK-O0: v_cmp_eq_u64_e64 [[CMP0:s\[[0-9]+:[0-9]+\]]], s{{\[}}[[SRSRC0]]:[[SRSRC1]]{{\]}}, v{{\[}}[[VRSRC0]]:[[VRSRC1]]{{\]}} +; CHECK-O0: v_cmp_eq_u64_e64 [[CMP1:s\[[0-9]+:[0-9]+\]]], s{{\[}}[[SRSRC2]]:[[SRSRC3]]{{\]}}, v{{\[}}[[VRSRC2]]:[[VRSRC3]]{{\]}} +; CHECK-O0: s_and_b64 [[CMP:s\[[0-9]+:[0-9]+\]]], [[CMP0]], [[CMP1]] +; CHECK-O0: s_and_saveexec_b64 [[CMP]], [[CMP]] +; CHECK-O0: buffer_load_dword [[IDX:v[0-9]+]], off, s[0:3], s5 offset:[[IDX_OFF]] ; 4-byte Folded Reload +; CHECK-O0: buffer_load_format_x [[RES:v[0-9]+]], [[IDX]], s{{\[}}[[SRSRC0]]:[[SRSRC3]]{{\]}}, {{.*}} idxen +; CHECK-O0: s_waitcnt vmcnt(0) +; CHECK-O0: buffer_store_dword [[RES]], off, s[0:3], s5 offset:[[RES_OFF_TMP:[0-9]+]] ; 4-byte Folded Spill +; CHECK-O0: s_xor_b64 exec, exec, [[CMP]] +; CHECK-O0-NEXT: s_cbranch_execnz [[LOOPBB1]] + +; CHECK-O0: v_readlane_b32 s[[SAVEEXEC0:[0-9]+]], [[VSAVEEXEC]], [[SAVEEXEC_IDX0]] +; CHECK-O0: v_readlane_b32 s[[SAVEEXEC1:[0-9]+]], [[VSAVEEXEC]], [[SAVEEXEC_IDX1]] +; CHECK-O0: s_mov_b64 exec, s{{\[}}[[SAVEEXEC0]]:[[SAVEEXEC1]]{{\]}} +; CHECK-O0: buffer_load_dword [[RES:v[0-9]+]], off, s[0:3], s5 offset:[[RES_OFF_TMP]] ; 4-byte Folded Reload +; CHECK-O0: buffer_store_dword [[RES]], off, s[0:3], s5 offset:[[RES_OFF]] ; 4-byte Folded Spill + +; CHECK-O0: [[TERMBB]]: +; CHECK-O0: buffer_load_dword [[RES:v[0-9]+]], off, s[0:3], s5 offset:[[RES_OFF]] ; 4-byte Folded Reload +; CHECK-O0: global_store_dword v[{{[0-9]+:[0-9]+}}], [[RES]], off + +define void @mubuf_vgpr_outside_entry(<4 x i32> %i, <4 x i32> %j, i32 %c, float addrspace(1)* %in, float addrspace(1)* %out) #0 { +entry: + %live.out.reg = call i32 asm sideeffect "s_mov_b32 $0, 17", "={s4}" () + %val0 = call float @llvm.amdgcn.buffer.load.format.f32(<4 x i32> %i, i32 %live.out.reg, i32 0, i1 zeroext false, i1 zeroext false) #1 + %idx = call i32 @llvm.amdgcn.workitem.id.x() #1 + %cmp = icmp eq i32 %idx, 0 + br i1 %cmp, label %bb1, label %bb2 + +bb1: + %val1 = call float @llvm.amdgcn.buffer.load.format.f32(<4 x i32> %j, i32 %live.out.reg, i32 0, i1 zeroext false, i1 zeroext false) #1 + br label %bb2 + +bb2: + %val = phi float [ %val0, %entry ], [ %val1, %bb1 ] + store volatile float %val, float addrspace(1)* %out + ret void +} + +declare i32 @llvm.amdgcn.workitem.id.x() #1 +declare float @llvm.amdgcn.buffer.load.format.f32(<4 x i32>, i32, i32, i1, i1) #1 + +attributes #0 = { nounwind } +attributes #1 = { nounwind readnone } diff --git a/llvm/test/CodeGen/AMDGPU/mubuf-legalize-operands.mir b/llvm/test/CodeGen/AMDGPU/mubuf-legalize-operands.mir new file mode 100644 index 00000000000..8ab1956604d --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/mubuf-legalize-operands.mir @@ -0,0 +1,239 @@ +# RUN: llc -march=amdgcn -mcpu=gfx700 -verify-machineinstrs --run-pass=si-fix-sgpr-copies -o - %s | FileCheck %s --check-prefixes=COMMON,ADDR64 +# RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs --run-pass=si-fix-sgpr-copies -o - %s | FileCheck %s --check-prefixes=COMMON,NO-ADDR64 + +# Test that we correctly legalize VGPR Rsrc operands in MUBUF instructions. +# +# On ADDR64 hardware we optimize the _ADDR64 and _OFFSET cases to avoid +# needing a waterfall. For all other instruction variants, and when we are +# on non-ADDR64 hardware, we emit a waterfall loop. + +# COMMON-LABEL: name: idxen +# COMMON-LABEL: bb.0: +# COMMON-NEXT: successors: %bb.1({{.*}}) +# COMMON: [[VRSRC:%[0-9]+]]:vreg_128 = REG_SEQUENCE %0, %subreg.sub0, %1, %subreg.sub1, %2, %subreg.sub2, %3, %subreg.sub3 +# COMMON: [[SAVEEXEC:%[0-9]+]]:sreg_64_xexec = S_MOV_B64 $exec +# COMMON-LABEL: bb.1: +# COMMON-NEXT: successors: %bb.1({{.*}}), %bb.2({{.*}}) +# COMMON: [[SRSRC0:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub0, implicit $exec +# COMMON: [[SRSRC1:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub1, implicit $exec +# COMMON: [[SRSRC2:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub2, implicit $exec +# COMMON: [[SRSRC3:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub3, implicit $exec +# COMMON: [[SRSRC:%[0-9]+]]:sreg_128 = REG_SEQUENCE [[SRSRC0]], %subreg.sub0, [[SRSRC1]], %subreg.sub1, [[SRSRC2]], %subreg.sub2, [[SRSRC3]], %subreg.sub3 +# COMMON: [[CMP0:%[0-9]+]]:sreg_64 = V_CMP_EQ_U64_e64 [[SRSRC]].sub0_sub1, [[VRSRC]].sub0_sub1, implicit $exec +# COMMON: [[CMP1:%[0-9]+]]:sreg_64 = V_CMP_EQ_U64_e64 [[SRSRC]].sub2_sub3, [[VRSRC]].sub2_sub3, implicit $exec +# COMMON: [[CMP:%[0-9]+]]:sreg_64 = S_AND_B64 [[CMP0]], [[CMP1]], implicit-def $scc +# COMMON: [[TMPEXEC:%[0-9]+]]:sreg_64 = S_AND_SAVEEXEC_B64 killed [[CMP]], implicit-def $exec, implicit-def $scc, implicit $exec +# COMMON: {{[0-9]+}}:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN %4, killed [[SRSRC]], 0, 0, 0, 0, 0, implicit $exec +# COMMON: $exec = S_XOR_B64_term $exec, [[TMPEXEC]], implicit-def $scc +# COMMON: S_CBRANCH_EXECNZ %bb.1, implicit $exec +# COMMON-LABEL bb.2: +# COMMON: $exec = S_MOV_B64 [[SAVEEXEC]] +--- +name: idxen +liveins: + - { reg: '$vgpr0', virtual-reg: '%0' } + - { reg: '$vgpr1', virtual-reg: '%1' } + - { reg: '$vgpr2', virtual-reg: '%2' } + - { reg: '$vgpr3', virtual-reg: '%3' } + - { reg: '$vgpr4', virtual-reg: '%4' } + - { reg: '$sgpr30_sgpr31', virtual-reg: '%5' } +body: | + bb.0: + liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $sgpr30_sgpr31 + %5:sreg_64 = COPY $sgpr30_sgpr31 + %4:vgpr_32 = COPY $vgpr4 + %3:vgpr_32 = COPY $vgpr3 + %2:vgpr_32 = COPY $vgpr2 + %1:vgpr_32 = COPY $vgpr1 + %0:vgpr_32 = COPY $vgpr0 + %6:sreg_128 = REG_SEQUENCE %0, %subreg.sub0, %1, %subreg.sub1, %2, %subreg.sub2, %3, %subreg.sub3 + %7:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN %4, killed %6, 0, 0, 0, 0, 0, implicit $exec + $sgpr30_sgpr31 = COPY %5 + $vgpr0 = COPY %7 + S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0 +... + +# COMMON-LABEL: name: offen +# COMMON-LABEL: bb.0: +# COMMON-NEXT: successors: %bb.1({{.*}}) +# COMMON: [[VRSRC:%[0-9]+]]:vreg_128 = REG_SEQUENCE %0, %subreg.sub0, %1, %subreg.sub1, %2, %subreg.sub2, %3, %subreg.sub3 +# COMMON: [[SAVEEXEC:%[0-9]+]]:sreg_64_xexec = S_MOV_B64 $exec +# COMMON-LABEL: bb.1: +# COMMON-NEXT: successors: %bb.1({{.*}}), %bb.2({{.*}}) +# COMMON: [[SRSRC0:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub0, implicit $exec +# COMMON: [[SRSRC1:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub1, implicit $exec +# COMMON: [[SRSRC2:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub2, implicit $exec +# COMMON: [[SRSRC3:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub3, implicit $exec +# COMMON: [[SRSRC:%[0-9]+]]:sreg_128 = REG_SEQUENCE [[SRSRC0]], %subreg.sub0, [[SRSRC1]], %subreg.sub1, [[SRSRC2]], %subreg.sub2, [[SRSRC3]], %subreg.sub3 +# COMMON: [[CMP0:%[0-9]+]]:sreg_64 = V_CMP_EQ_U64_e64 [[SRSRC]].sub0_sub1, [[VRSRC]].sub0_sub1, implicit $exec +# COMMON: [[CMP1:%[0-9]+]]:sreg_64 = V_CMP_EQ_U64_e64 [[SRSRC]].sub2_sub3, [[VRSRC]].sub2_sub3, implicit $exec +# COMMON: [[CMP:%[0-9]+]]:sreg_64 = S_AND_B64 [[CMP0]], [[CMP1]], implicit-def $scc +# COMMON: [[TMPEXEC:%[0-9]+]]:sreg_64 = S_AND_SAVEEXEC_B64 killed [[CMP]], implicit-def $exec, implicit-def $scc, implicit $exec +# COMMON: {{[0-9]+}}:vgpr_32 = BUFFER_LOAD_FORMAT_X_OFFEN %4, killed [[SRSRC]], 0, 0, 0, 0, 0, implicit $exec +# COMMON: $exec = S_XOR_B64_term $exec, [[TMPEXEC]], implicit-def $scc +# COMMON: S_CBRANCH_EXECNZ %bb.1, implicit $exec +# COMMON-LABEL bb.2: +# COMMON: $exec = S_MOV_B64 [[SAVEEXEC]] +--- +name: offen +liveins: + - { reg: '$vgpr0', virtual-reg: '%0' } + - { reg: '$vgpr1', virtual-reg: '%1' } + - { reg: '$vgpr2', virtual-reg: '%2' } + - { reg: '$vgpr3', virtual-reg: '%3' } + - { reg: '$vgpr4', virtual-reg: '%4' } + - { reg: '$sgpr30_sgpr31', virtual-reg: '%5' } +body: | + bb.0: + liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $sgpr30_sgpr31 + %5:sreg_64 = COPY $sgpr30_sgpr31 + %4:vgpr_32 = COPY $vgpr4 + %3:vgpr_32 = COPY $vgpr3 + %2:vgpr_32 = COPY $vgpr2 + %1:vgpr_32 = COPY $vgpr1 + %0:vgpr_32 = COPY $vgpr0 + %6:sreg_128 = REG_SEQUENCE %0, %subreg.sub0, %1, %subreg.sub1, %2, %subreg.sub2, %3, %subreg.sub3 + %7:vgpr_32 = BUFFER_LOAD_FORMAT_X_OFFEN %4, killed %6, 0, 0, 0, 0, 0, implicit $exec + $sgpr30_sgpr31 = COPY %5 + $vgpr0 = COPY %7 + S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0 +... + +# COMMON-LABEL: name: bothen +# COMMON-LABEL: bb.0: +# COMMON-NEXT: successors: %bb.1({{.*}}) +# COMMON: [[VRSRC:%[0-9]+]]:vreg_128 = REG_SEQUENCE %0, %subreg.sub0, %1, %subreg.sub1, %2, %subreg.sub2, %3, %subreg.sub3 +# COMMON: [[SAVEEXEC:%[0-9]+]]:sreg_64_xexec = S_MOV_B64 $exec +# COMMON-LABEL: bb.1: +# COMMON-NEXT: successors: %bb.1({{.*}}), %bb.2({{.*}}) +# COMMON: [[SRSRC0:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub0, implicit $exec +# COMMON: [[SRSRC1:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub1, implicit $exec +# COMMON: [[SRSRC2:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub2, implicit $exec +# COMMON: [[SRSRC3:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub3, implicit $exec +# COMMON: [[SRSRC:%[0-9]+]]:sreg_128 = REG_SEQUENCE [[SRSRC0]], %subreg.sub0, [[SRSRC1]], %subreg.sub1, [[SRSRC2]], %subreg.sub2, [[SRSRC3]], %subreg.sub3 +# COMMON: [[CMP0:%[0-9]+]]:sreg_64 = V_CMP_EQ_U64_e64 [[SRSRC]].sub0_sub1, [[VRSRC]].sub0_sub1, implicit $exec +# COMMON: [[CMP1:%[0-9]+]]:sreg_64 = V_CMP_EQ_U64_e64 [[SRSRC]].sub2_sub3, [[VRSRC]].sub2_sub3, implicit $exec +# COMMON: [[CMP:%[0-9]+]]:sreg_64 = S_AND_B64 [[CMP0]], [[CMP1]], implicit-def $scc +# COMMON: [[TMPEXEC:%[0-9]+]]:sreg_64 = S_AND_SAVEEXEC_B64 killed [[CMP]], implicit-def $exec, implicit-def $scc, implicit $exec +# COMMON: {{[0-9]+}}:vgpr_32 = BUFFER_LOAD_FORMAT_X_BOTHEN %4, killed [[SRSRC]], 0, 0, 0, 0, 0, implicit $exec +# COMMON: $exec = S_XOR_B64_term $exec, [[TMPEXEC]], implicit-def $scc +# COMMON: S_CBRANCH_EXECNZ %bb.1, implicit $exec +# COMMON-LABEL bb.2: +# COMMON: $exec = S_MOV_B64 [[SAVEEXEC]] +--- +name: bothen +liveins: + - { reg: '$vgpr0', virtual-reg: '%0' } + - { reg: '$vgpr1', virtual-reg: '%1' } + - { reg: '$vgpr2', virtual-reg: '%2' } + - { reg: '$vgpr3', virtual-reg: '%3' } + - { reg: '$vgpr4_vgpr5', virtual-reg: '%4' } + - { reg: '$sgpr30_sgpr31', virtual-reg: '%5' } +body: | + bb.0: + liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $sgpr30_sgpr31 + %5:sreg_64 = COPY $sgpr30_sgpr31 + %4:vreg_64 = COPY $vgpr4_vgpr5 + %3:vgpr_32 = COPY $vgpr3 + %2:vgpr_32 = COPY $vgpr2 + %1:vgpr_32 = COPY $vgpr1 + %0:vgpr_32 = COPY $vgpr0 + %6:sreg_128 = REG_SEQUENCE %0, %subreg.sub0, %1, %subreg.sub1, %2, %subreg.sub2, %3, %subreg.sub3 + %7:vgpr_32 = BUFFER_LOAD_FORMAT_X_BOTHEN %4, killed %6, 0, 0, 0, 0, 0, implicit $exec + $sgpr30_sgpr31 = COPY %5 + $vgpr0 = COPY %7 + S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0 +... + +# COMMON-LABEL: name: addr64 +# COMMON-LABEL: bb.0: +# COMMON: %12:vreg_64 = COPY %8.sub0_sub1 +# COMMON: %13:sreg_64 = S_MOV_B64 0 +# COMMON: %14:sgpr_32 = S_MOV_B32 0 +# COMMON: %15:sgpr_32 = S_MOV_B32 61440 +# COMMON: %16:sreg_128 = REG_SEQUENCE %13, %subreg.sub0_sub1, %14, %subreg.sub2, %15, %subreg.sub3 +# COMMON: %9:vgpr_32 = V_ADD_I32_e32 %12.sub0, %4.sub0, implicit-def $vcc, implicit $exec +# COMMON: %10:vgpr_32 = V_ADDC_U32_e32 %12.sub1, %4.sub1, implicit-def $vcc, implicit $vcc, implicit $exec +# COMMON: %11:vreg_64 = REG_SEQUENCE %9, %subreg.sub0, %10, %subreg.sub1 +# COMMON: {{[0-9]+}}:vgpr_32 = BUFFER_LOAD_FORMAT_X_ADDR64 %11, killed %16, 0, 0, 0, 0, 0, implicit $exec +--- +name: addr64 +liveins: + - { reg: '$vgpr0', virtual-reg: '%0' } + - { reg: '$vgpr1', virtual-reg: '%1' } + - { reg: '$vgpr2', virtual-reg: '%2' } + - { reg: '$vgpr3', virtual-reg: '%3' } + - { reg: '$vgpr4_vgpr5', virtual-reg: '%4' } + - { reg: '$sgpr30_sgpr31', virtual-reg: '%5' } +body: | + bb.0: + liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $sgpr30_sgpr31 + %5:sreg_64 = COPY $sgpr30_sgpr31 + %4:vreg_64 = COPY $vgpr4_vgpr5 + %3:vgpr_32 = COPY $vgpr3 + %2:vgpr_32 = COPY $vgpr2 + %1:vgpr_32 = COPY $vgpr1 + %0:vgpr_32 = COPY $vgpr0 + %6:sreg_128 = REG_SEQUENCE %0, %subreg.sub0, %1, %subreg.sub1, %2, %subreg.sub2, %3, %subreg.sub3 + %7:vgpr_32 = BUFFER_LOAD_FORMAT_X_ADDR64 %4, killed %6, 0, 0, 0, 0, 0, implicit $exec + $sgpr30_sgpr31 = COPY %5 + $vgpr0 = COPY %7 + S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0 +... + +# COMMON-LABEL: name: offset +# COMMON-LABEL: bb.0: + +# NO-ADDR64-NEXT: successors: %bb.1({{.*}}) +# NO-ADDR64: [[VRSRC:%[0-9]+]]:vreg_128 = REG_SEQUENCE %0, %subreg.sub0, %1, %subreg.sub1, %2, %subreg.sub2, %3, %subreg.sub3 +# NO-ADDR64: [[SAVEEXEC:%[0-9]+]]:sreg_64_xexec = S_MOV_B64 $exec +# NO-ADDR64-LABEL: bb.1: +# NO-ADDR64-NEXT: successors: %bb.1({{.*}}), %bb.2({{.*}}) +# NO-ADDR64: [[SRSRC0:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub0, implicit $exec +# NO-ADDR64: [[SRSRC1:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub1, implicit $exec +# NO-ADDR64: [[SRSRC2:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub2, implicit $exec +# NO-ADDR64: [[SRSRC3:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub3, implicit $exec +# NO-ADDR64: [[SRSRC:%[0-9]+]]:sreg_128 = REG_SEQUENCE [[SRSRC0]], %subreg.sub0, [[SRSRC1]], %subreg.sub1, [[SRSRC2]], %subreg.sub2, [[SRSRC3]], %subreg.sub3 +# NO-ADDR64: [[CMP0:%[0-9]+]]:sreg_64 = V_CMP_EQ_U64_e64 [[SRSRC]].sub0_sub1, [[VRSRC]].sub0_sub1, implicit $exec +# NO-ADDR64: [[CMP1:%[0-9]+]]:sreg_64 = V_CMP_EQ_U64_e64 [[SRSRC]].sub2_sub3, [[VRSRC]].sub2_sub3, implicit $exec +# NO-ADDR64: [[CMP:%[0-9]+]]:sreg_64 = S_AND_B64 [[CMP0]], [[CMP1]], implicit-def $scc +# NO-ADDR64: [[TMPEXEC:%[0-9]+]]:sreg_64 = S_AND_SAVEEXEC_B64 killed [[CMP]], implicit-def $exec, implicit-def $scc, implicit $exec +# NO-ADDR64: {{[0-9]+}}:vgpr_32 = BUFFER_LOAD_FORMAT_X_OFFSET killed [[SRSRC]], 0, 0, 0, 0, 0, implicit $exec +# NO-ADDR64: $exec = S_XOR_B64_term $exec, [[TMPEXEC]], implicit-def $scc +# NO-ADDR64: S_CBRANCH_EXECNZ %bb.1, implicit $exec +# NO-ADDR64-LABEL bb.2: +# NO-ADDR64: $exec = S_MOV_B64 [[SAVEEXEC]] + +# ADDR64: [[VRSRC:%[0-9]+]]:vreg_128 = REG_SEQUENCE %0, %subreg.sub0, %1, %subreg.sub1, %2, %subreg.sub2, %3, %subreg.sub3 +# ADDR64: [[RSRCPTR:%[0-9]+]]:vreg_64 = COPY [[VRSRC]].sub0_sub1 +# ADDR64: [[ZERO64:%[0-9]+]]:sreg_64 = S_MOV_B64 0 +# ADDR64: [[RSRCFMTLO:%[0-9]+]]:sgpr_32 = S_MOV_B32 0 +# ADDR64: [[RSRCFMTHI:%[0-9]+]]:sgpr_32 = S_MOV_B32 61440 +# ADDR64: [[ZERORSRC:%[0-9]+]]:sreg_128 = REG_SEQUENCE [[ZERO64]], %subreg.sub0_sub1, [[RSRCFMTLO]], %subreg.sub2, [[RSRCFMTHI]], %subreg.sub3 +# ADDR64: [[VADDR64:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[RSRCPTR]].sub0, %subreg.sub0, [[RSRCPTR]].sub1, %subreg.sub1 +# ADDR64: {{[0-9]+}}:vgpr_32 = BUFFER_LOAD_FORMAT_X_ADDR64 [[VADDR64]], [[ZERORSRC]], 0, 0, 0, 0, 0, implicit $exec + +--- +name: offset +liveins: + - { reg: '$vgpr0', virtual-reg: '%0' } + - { reg: '$vgpr1', virtual-reg: '%1' } + - { reg: '$vgpr2', virtual-reg: '%2' } + - { reg: '$vgpr3', virtual-reg: '%3' } + - { reg: '$vgpr4_vgpr5', virtual-reg: '%4' } + - { reg: '$sgpr30_sgpr31', virtual-reg: '%5' } +body: | + bb.0: + liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $sgpr30_sgpr31 + %5:sreg_64 = COPY $sgpr30_sgpr31 + %4:vreg_64 = COPY $vgpr4_vgpr5 + %3:vgpr_32 = COPY $vgpr3 + %2:vgpr_32 = COPY $vgpr2 + %1:vgpr_32 = COPY $vgpr1 + %0:vgpr_32 = COPY $vgpr0 + %6:sreg_128 = REG_SEQUENCE %0, %subreg.sub0, %1, %subreg.sub1, %2, %subreg.sub2, %3, %subreg.sub3 + %7:vgpr_32 = BUFFER_LOAD_FORMAT_X_OFFSET killed %6, 0, 0, 0, 0, 0, implicit $exec + $sgpr30_sgpr31 = COPY %5 + $vgpr0 = COPY %7 + S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0 +... |

