diff options
| author | Matt Arsenault <Matthew.Arsenault@amd.com> | 2019-01-27 15:57:23 +0000 |
|---|---|---|
| committer | Matt Arsenault <Matthew.Arsenault@amd.com> | 2019-01-27 15:57:23 +0000 |
| commit | fdfb7d78f128943fb3f20296fd0dfdf73f62295a (patch) | |
| tree | d15bf1723ecb1b38549b258a95726eeb3b6d74c8 /llvm/test/CodeGen | |
| parent | d35424a2b3b1e1045e6044fe11b1bc6e02ec418c (diff) | |
| download | bcm5719-llvm-fdfb7d78f128943fb3f20296fd0dfdf73f62295a.tar.gz bcm5719-llvm-fdfb7d78f128943fb3f20296fd0dfdf73f62295a.zip | |
GlobalISel: Verify load/store has a pointer input
I expected this to be automatically verified, but it seems
nothing uses that the type index was declared as a "ptype"
llvm-svn: 352319
Diffstat (limited to 'llvm/test/CodeGen')
14 files changed, 38 insertions, 61 deletions
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.cvt.pkrtz.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.cvt.pkrtz.mir index f1c19f124ff..f241c2dad1e 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.cvt.pkrtz.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.cvt.pkrtz.mir @@ -21,7 +21,7 @@ body: | %1:vgpr(s32) = COPY $vgpr0 ; GCN: [[VGPR1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 %2:vgpr(s32) = COPY $vgpr1 - %3:vgpr(s64) = COPY $vgpr3_vgpr4 + %3:vgpr(p1) = COPY $vgpr3_vgpr4 ; cvt_pkrtz vs ; GCN: V_CVT_PKRTZ_F16_F32_e64 0, [[VGPR0]], 0, [[SGPR0]] @@ -38,8 +38,8 @@ body: | %7:vgpr(s32) = G_BITCAST %4 %8:vgpr(s32) = G_BITCAST %5 %9:vgpr(s32) = G_BITCAST %6 - G_STORE %7, %3 :: (store 4 into %ir.global0) - G_STORE %8, %3 :: (store 4 into %ir.global0) - G_STORE %9, %3 :: (store 4 into %ir.global0) + G_STORE %7, %3 :: (store 4 into %ir.global0, addrspace 1) + G_STORE %8, %3 :: (store 4 into %ir.global0, addrspace 1) + G_STORE %9, %3 :: (store 4 into %ir.global0, addrspace 1) ... --- diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-ashr.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-ashr.mir index 86692c3e1d6..a9126854c21 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-ashr.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-ashr.mir @@ -20,7 +20,7 @@ body: | %0:sgpr(s32) = COPY $sgpr0 %1:sgpr(s32) = COPY $sgpr1 %2:vgpr(s32) = COPY $vgpr0 - %3:vgpr(s64) = COPY $vgpr3_vgpr4 + %3:vgpr(p1) = COPY $vgpr3_vgpr4 ; GCN: [[C1:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 1 ; GCN: [[C4096:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 4096 @@ -80,7 +80,7 @@ body: | %17:vgpr(s32) = G_ASHR %16, %5 - G_STORE %17, %3 :: (store 4 into %ir.global0) + G_STORE %17, %3 :: (store 4 into %ir.global0, addrspace 1) ... --- diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-bitcast.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-bitcast.mir index 88f811160c0..157e7b703d7 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-bitcast.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-bitcast.mir @@ -18,9 +18,9 @@ body: | bb.0: liveins: $sgpr0, $vgpr3_vgpr4 %0:vgpr(s32) = COPY $vgpr0 - %1:vgpr(s64) = COPY $vgpr3_vgpr4 + %1:vgpr(p1) = COPY $vgpr3_vgpr4 %2:vgpr(<2 x s16>) = G_BITCAST %0 %3:vgpr(s32) = G_BITCAST %2 - G_STORE %3, %1 :: (store 4 into %ir.global0) + G_STORE %3, %1 :: (store 4 into %ir.global0, addrspace 1) ... --- diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-constant.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-constant.mir index f848edaf667..19ad8729a02 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-constant.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-constant.mir @@ -14,8 +14,8 @@ body: | bb.0: liveins: $vgpr0_vgpr1, $vgpr2_vgpr3 ; GCN-LABEL: name: constant - %0:vgpr(s64) = COPY $vgpr0_vgpr1 - %1:vgpr(s64) = COPY $vgpr2_vgpr3 + %0:vgpr(p1) = COPY $vgpr0_vgpr1 + %1:vgpr(p1) = COPY $vgpr2_vgpr3 ; GCN: %{{[0-9]+}}:sreg_32 = S_MOV_B32 1 %2:sreg_32(s32) = G_CONSTANT i32 1 @@ -49,13 +49,13 @@ body: | ; GCN: %{{[0-9]+}}:vreg_64 = REG_SEQUENCE [[LO3]], %subreg.sub0, [[HI3]], %subreg.sub1 %9:vgpr(s64) = G_FCONSTANT double 1.0 - G_STORE %2, %0 :: (volatile store 4 into %ir.global0) - G_STORE %4, %0 :: (volatile store 4 into %ir.global0) - G_STORE %6, %0 :: (volatile store 4 into %ir.global0) - G_STORE %8, %0 :: (volatile store 4 into %ir.global0) - G_STORE %3, %1 :: (volatile store 8 into %ir.global1) - G_STORE %5, %1 :: (volatile store 8 into %ir.global1) - G_STORE %7, %1 :: (volatile store 8 into %ir.global1) - G_STORE %9, %1 :: (volatile store 8 into %ir.global1) + G_STORE %2, %0 :: (volatile store 4 into %ir.global0, addrspace 1) + G_STORE %4, %0 :: (volatile store 4 into %ir.global0, addrspace 1) + G_STORE %6, %0 :: (volatile store 4 into %ir.global0, addrspace 1) + G_STORE %8, %0 :: (volatile store 4 into %ir.global0, addrspace 1) + G_STORE %3, %1 :: (volatile store 8 into %ir.global1, addrspace 1) + G_STORE %5, %1 :: (volatile store 8 into %ir.global1, addrspace 1) + G_STORE %7, %1 :: (volatile store 8 into %ir.global1, addrspace 1) + G_STORE %9, %1 :: (volatile store 8 into %ir.global1, addrspace 1) ... --- diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-copy.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-copy.mir index 495acf63472..5c169ca6b61 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-copy.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-copy.mir @@ -19,8 +19,8 @@ body: | ; GCN: [[COPY1:%[0-9]+]]:vreg_64 = COPY [[COPY]] ; GCN: [[DEF:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF ; GCN: FLAT_STORE_DWORD [[COPY1]], [[DEF]], 0, 0, 0, implicit $exec, implicit $flat_scr - %0:sgpr(s64) = COPY $sgpr2_sgpr3 - %1:vgpr(s64) = COPY %0 + %0:sgpr(p1) = COPY $sgpr2_sgpr3 + %1:vgpr(p1) = COPY %0 %2:vgpr(s32) = G_IMPLICIT_DEF G_STORE %2, %1 :: (store 4 into %ir.global0) ... diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fadd.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fadd.mir index 01a59f05a61..74c83e76aec 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fadd.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fadd.mir @@ -16,7 +16,7 @@ body: | %0:sgpr(s32) = COPY $sgpr0 %1:vgpr(s32) = COPY $vgpr0 %2:vgpr(s32) = COPY $vgpr1 - %3:vgpr(s64) = COPY $vgpr3_vgpr4 + %3:vgpr(p1) = COPY $vgpr3_vgpr4 ; fadd vs ; GCN: V_ADD_F32_e64 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmul.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmul.mir index b7e472abc93..f28caf2de08 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmul.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmul.mir @@ -16,7 +16,7 @@ body: | %0:sgpr(s32) = COPY $sgpr0 %1:vgpr(s32) = COPY $vgpr0 %2:vgpr(s32) = COPY $vgpr1 - %3:vgpr(s64) = COPY $vgpr3_vgpr4 + %3:vgpr(p1) = COPY $vgpr3_vgpr4 ; fmul vs ; GCN: V_MUL_F32_e64 @@ -30,8 +30,8 @@ body: | ; GCN: V_MUL_F32_e64 %6:vgpr(s32) = G_FMUL %1, %2 - G_STORE %4, %3 :: (store 4 into %ir.global0) - G_STORE %5, %3 :: (store 4 into %ir.global0) - G_STORE %6, %3 :: (store 4 into %ir.global0) + G_STORE %4, %3 :: (store 4 into %ir.global0, addrspace 1) + G_STORE %5, %3 :: (store 4 into %ir.global0, addrspace 1) + G_STORE %6, %3 :: (store 4 into %ir.global0, addrspace 1) ... --- diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fptoui.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fptoui.mir index 07f19c4b34d..950e7a50897 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fptoui.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fptoui.mir @@ -20,7 +20,7 @@ body: | ; GCN: [[VGPR:%[0-9]+]]:vgpr_32 = COPY $vgpr0 %1:vgpr(s32) = COPY $vgpr0 - %2:vgpr(s64) = COPY $vgpr3_vgpr4 + %2:vgpr(p1) = COPY $vgpr3_vgpr4 ; fptoui s ; GCN: V_CVT_U32_F32_e64 0, [[SGPR]], 0, 0 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-implicit-def.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-implicit-def.mir index 0596d8321a6..d6ead4ba808 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-implicit-def.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-implicit-def.mir @@ -14,9 +14,9 @@ body: | ; GCN: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr3_vgpr4 ; GCN: [[DEF:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF ; GCN: FLAT_STORE_DWORD [[COPY]], [[DEF]], 0, 0, 0, implicit $exec, implicit $flat_scr - %0:vgpr(s64) = COPY $vgpr3_vgpr4 + %0:vgpr(p1) = COPY $vgpr3_vgpr4 %1:vgpr(s32) = G_IMPLICIT_DEF - G_STORE %1, %0 :: (store 4) + G_STORE %1, %0 :: (store 4, addrspace 1) ... --- @@ -31,9 +31,9 @@ body: | ; GCN: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr3_vgpr4 ; GCN: [[DEF:%[0-9]+]]:vreg_64 = IMPLICIT_DEF ; GCN: FLAT_STORE_DWORDX2 [[COPY]], [[DEF]], 0, 0, 0, implicit $exec, implicit $flat_scr - %0:vgpr(s64) = COPY $vgpr3_vgpr4 + %0:vgpr(p1) = COPY $vgpr3_vgpr4 %1:vgpr(s64) = G_IMPLICIT_DEF - G_STORE %1, %0 :: (store 8) + G_STORE %1, %0 :: (store 8, addrspace 1) --- --- @@ -63,7 +63,7 @@ body: | ; GCN: FLAT_STORE_DWORD [[DEF]], [[V_MOV_B32_e32_]], 0, 0, 0, implicit $exec, implicit $flat_scr %0:vgpr(p1) = G_IMPLICIT_DEF %1:vgpr(s32) = G_CONSTANT 4 - G_STORE %1, %0 :: (store 4) + G_STORE %1, %0 :: (store 4, addrspace 1) ... --- @@ -79,7 +79,7 @@ body: | ; GCN: FLAT_STORE_DWORD [[DEF]], [[V_MOV_B32_e32_]], 0, 0, 0, implicit $exec, implicit $flat_scr %0:vgpr(p3) = G_IMPLICIT_DEF %1:vgpr(s32) = G_CONSTANT 4 - G_STORE %1, %0 :: (store 4) + G_STORE %1, %0 :: (store 4, addrspace 1) ... --- @@ -95,5 +95,5 @@ body: | ; GCN: FLAT_STORE_DWORD [[DEF]], [[V_MOV_B32_e32_]], 0, 0, 0, implicit $exec, implicit $flat_scr %0:vgpr(p4) = G_IMPLICIT_DEF %1:vgpr(s32) = G_CONSTANT 4 - G_STORE %1, %0 :: (store 4) + G_STORE %1, %0 :: (store 4, addrspace 1) ... diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-maxnum.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-maxnum.mir index a473259201d..67fe61df3e3 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-maxnum.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-maxnum.mir @@ -19,7 +19,7 @@ body: | %0:sgpr(s32) = COPY $sgpr0 %1:vgpr(s32) = COPY $vgpr0 %2:vgpr(s32) = COPY $vgpr1 - %3:vgpr(s64) = COPY $vgpr3_vgpr4 + %3:vgpr(p1) = COPY $vgpr3_vgpr4 ; GCN: [[SGPR64_0:%[0-9]+]]:sreg_64_xexec = COPY $sgpr10_sgpr11 ; GCN: [[VGPR64_0:%[0-9]+]]:vreg_64 = COPY $vgpr10_vgpr11 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-minnum.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-minnum.mir index 0bfe9bb7217..f8132d7a46e 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-minnum.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-minnum.mir @@ -19,7 +19,7 @@ body: | %0:sgpr(s32) = COPY $sgpr0 %1:vgpr(s32) = COPY $vgpr0 %2:vgpr(s32) = COPY $vgpr1 - %3:vgpr(s64) = COPY $vgpr3_vgpr4 + %3:vgpr(p1) = COPY $vgpr3_vgpr4 ; GCN: [[SGPR64_0:%[0-9]+]]:sreg_64_xexec = COPY $sgpr10_sgpr11 ; GCN: [[VGPR64_0:%[0-9]+]]:vreg_64 = COPY $vgpr10_vgpr11 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-or.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-or.mir index 60e1a4cdd5c..4d0de1dff5a 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-or.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-or.mir @@ -19,7 +19,7 @@ body: | %0:sgpr(s32) = COPY $sgpr0 %1:sgpr(s32) = COPY $sgpr1 %2:vgpr(s32) = COPY $vgpr0 - %3:vgpr(s64) = COPY $vgpr3_vgpr4 + %3:vgpr(p1) = COPY $vgpr3_vgpr4 %4:sgpr(s32) = G_CONSTANT i32 1 %5:sgpr(s32) = G_CONSTANT i32 4096 @@ -39,7 +39,7 @@ body: | ; GCN: [[VV:%[0-9]+]]:vgpr_32 = V_OR_B32_e32 [[SV]], [[VGPR0]] %9:vgpr(s32) = G_OR %8, %2 - G_STORE %9, %3 :: (store 4 into %ir.global0) + G_STORE %9, %3 :: (store 4 into %ir.global0, addrspace 1) ... --- diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-sitofp.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-sitofp.mir index 42fc095985a..bedcaf01f36 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-sitofp.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-sitofp.mir @@ -20,7 +20,7 @@ body: | ; GCN: [[VGPR:%[0-9]+]]:vgpr_32 = COPY $vgpr0 %1:vgpr(s32) = COPY $vgpr0 - %2:vgpr(s64) = COPY $vgpr3_vgpr4 + %2:vgpr(p1) = COPY $vgpr3_vgpr4 ; sitofp s ; GCN: V_CVT_F32_I32_e64 [[SGPR]], 0, 0 diff --git a/llvm/test/CodeGen/MIR/AArch64/invalid-extload.mir b/llvm/test/CodeGen/MIR/AArch64/invalid-extload.mir deleted file mode 100644 index cce2639dded..00000000000 --- a/llvm/test/CodeGen/MIR/AArch64/invalid-extload.mir +++ /dev/null @@ -1,23 +0,0 @@ -# RUN: not llc -mtriple=aarch64-none-linux-gnu -run-pass none -o - %s 2>&1 | FileCheck %s - -# CHECK: *** Bad machine code: Generic extload must have a narrower memory type *** -# CHECK: *** Bad machine code: Generic extload must have a narrower memory type *** -# CHECK: *** Bad machine code: Generic extload must have a narrower memory type *** -# CHECK: *** Bad machine code: Generic extload must have a narrower memory type *** -# CHECK: *** Bad machine code: Generic instruction accessing memory must have one mem operand *** -# CHECK: *** Bad machine code: Generic instruction accessing memory must have one mem operand *** - ---- -name: invalid_extload_memory_sizes -body: | - bb.0: - - %0:_(p0) = COPY $x0 - %1:_(s64) = G_ZEXTLOAD %0(p0) :: (load 8) - %2:_(s64) = G_ZEXTLOAD %0(p0) :: (load 16) - %3:_(s64) = G_SEXTLOAD %0(p0) :: (load 8) - %4:_(s64) = G_SEXTLOAD %0(p0) :: (load 16) - %5:_(s64) = G_ZEXTLOAD %0(p0) - %6:_(s64) = G_SEXTLOAD %0(p0) - -... |

