diff options
| author | Puyan Lotfi <puyan@puyan.org> | 2019-12-11 01:39:17 -0500 |
|---|---|---|
| committer | Puyan Lotfi <puyan@puyan.org> | 2019-12-11 22:11:49 -0500 |
| commit | f5b7a468375ddeaa555a90fccc50098e4ca1e2b1 (patch) | |
| tree | 0bea40576211b69387aa42c5829b42ecbc745f5c /llvm/test/CodeGen/MIR/AMDGPU | |
| parent | 7aa5c160885c92c95ad84216de9b9b02dbc95936 (diff) | |
| download | bcm5719-llvm-f5b7a468375ddeaa555a90fccc50098e4ca1e2b1.tar.gz bcm5719-llvm-f5b7a468375ddeaa555a90fccc50098e4ca1e2b1.zip | |
[llvm][MIRVRegNamerUtils] Adding hashing on memoperands.
No more hash collisions for memoperands. Now the MIRCanonicalization
pass shouldn't hit hash collisions when dealing with nearly identical
memory accessing instructions when their memoperands are in fact different.
Differential Revision: https://reviews.llvm.org/D71328
Diffstat (limited to 'llvm/test/CodeGen/MIR/AMDGPU')
| -rw-r--r-- | llvm/test/CodeGen/MIR/AMDGPU/mircanon-memoperands.mir | 42 |
1 files changed, 42 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/MIR/AMDGPU/mircanon-memoperands.mir b/llvm/test/CodeGen/MIR/AMDGPU/mircanon-memoperands.mir new file mode 100644 index 00000000000..ea2f7de2687 --- /dev/null +++ b/llvm/test/CodeGen/MIR/AMDGPU/mircanon-memoperands.mir @@ -0,0 +1,42 @@ +# RUN: llc -march=amdgcn -mcpu=tahiti -run-pass mir-canonicalizer -o - %s | FileCheck %s +--- | + target datalayout = "e-p:32:32-p1:64:64-p2:64:64-p3:32:32-p4:64:64-p5:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64" + + define amdgpu_kernel void @f(i32 addrspace(1)* nocapture %arg) { + unreachable + } +... +--- +name: f +alignment: 1 +registers: + - { id: 0, class: sgpr_64 } + - { id: 1, class: sreg_64_xexec } + - { id: 2, class: sreg_64_xexec } + - { id: 3, class: sreg_64_xexec } + - { id: 4, class: sreg_64_xexec } + - { id: 5, class: sreg_64_xexec } + - { id: 6, class: sreg_64_xexec } +liveins: + - { reg: '$sgpr4_sgpr5', virtual-reg: '%4' } +body: | + bb.0: + liveins: $sgpr4_sgpr5 + + ; CHECK: COPY + ; CHECK-NEXT: %bb0_{{[0-9]+}}__1:sreg_64_xexec = S_LOAD_DWORDX2_IMM + ; CHECK-NEXT: %bb0_{{[0-9]+}}__1:sreg_64_xexec = S_LOAD_DWORDX2_IMM + ; CHECK-NEXT: %bb0_{{[0-9]+}}__1:sreg_64_xexec = S_LOAD_DWORDX2_IMM + ; CHECK-NEXT: %bb0_{{[0-9]+}}__1:sreg_64_xexec = S_LOAD_DWORDX2_IMM + ; CHECK-NEXT: %bb0_{{[0-9]+}}__1:sreg_64_xexec = S_LOAD_DWORDX2_IMM + ; CHECK-NEXT: %bb0_{{[0-9]+}}__1:sreg_64_xexec = S_LOAD_DWORDX2_IMM + + %0 = COPY $sgpr4_sgpr5 + %1 = S_LOAD_DWORDX2_IMM %0, 0, 0, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(4)* undef`) + %2 = S_LOAD_DWORDX2_IMM %0, 0, 0, 0 :: ( dereferenceable invariant load 8 from `i64 addrspace(4)* undef`) + %3 = S_LOAD_DWORDX2_IMM %0, 0, 0, 0 :: ( invariant load 8 from `i64 addrspace(4)* undef`) + %4 = S_LOAD_DWORDX2_IMM %0, 0, 0, 0 :: ( load 8 from `i64 addrspace(4)* undef`) + %5 = S_LOAD_DWORDX2_IMM %0, 0, 0, 0 :: ( load 8 from `i64 addrspace(2)* undef`) + %6 = S_LOAD_DWORDX2_IMM %0, 0, 0, 0 :: ( load 8 from `i64 addrspace(1)* undef`) + +... |

