diff options
Diffstat (limited to 'llvm/test')
-rw-r--r-- | llvm/test/CodeGen/AMDGPU/load-constant-i16.ll | 138 | ||||
-rw-r--r-- | llvm/test/CodeGen/AMDGPU/load-global-i16.ll | 331 | ||||
-rw-r--r-- | llvm/test/CodeGen/AMDGPU/store-private.ll | 743 |
3 files changed, 1003 insertions, 209 deletions
diff --git a/llvm/test/CodeGen/AMDGPU/load-constant-i16.ll b/llvm/test/CodeGen/AMDGPU/load-constant-i16.ll index 628d285141b..eb79767e62b 100644 --- a/llvm/test/CodeGen/AMDGPU/load-constant-i16.ll +++ b/llvm/test/CodeGen/AMDGPU/load-constant-i16.ll @@ -137,8 +137,8 @@ define void @constant_sextload_v1i16_to_v1i32(<1 x i32> addrspace(1)* %out, <1 x ; v2i16 is naturally 4 byte aligned ; EG: VTX_READ_32 [[DST:T[0-9]\.[XYZW]]], [[DST]], 0, #1 -; TODO: This should use DST, but for some there are redundant MOVs -; EG: BFE_UINT {{[* ]*}}T{{[0-9].[XYZW]}}, {{PV.[XYZW]}}, literal +; EG: BFE_UINT {{[* ]*}}T{{[0-9].[XYZW]}}, [[DST]], literal +; EG: 16 ; EG: 16 define void @constant_zextload_v2i16_to_v2i32(<2 x i32> addrspace(1)* %out, <2 x i16> addrspace(2)* %in) #0 { %load = load <2 x i16>, <2 x i16> addrspace(2)* %in @@ -153,11 +153,11 @@ define void @constant_zextload_v2i16_to_v2i32(<2 x i32> addrspace(1)* %out, <2 x ; GCN-DAG: s_sext_i32_i16 ; v2i16 is naturally 4 byte aligned +; EG: MEM_RAT_CACHELESS STORE_RAW [[ST:T[0-9]]].XY, {{T[0-9].[XYZW]}}, ; EG: VTX_READ_32 [[DST:T[0-9]\.[XYZW]]], [[DST]], 0, #1 -; TODO: These should use DST, but for some there are redundant MOVs -; TODO: We should also use ASHR instead of LSHR + BFE -; EG-DAG: BFE_INT {{[* ]*}}T{{[0-9].[XYZW]}}, {{PV.[XYZW]}}, 0.0, literal -; EG-DAG: BFE_INT {{[* ]*}}T{{[0-9].[XYZW]}}, {{PV.[XYZW]}}, 0.0, literal +; EG-DAG: BFE_INT {{[* ]*}}[[ST]].X, [[DST]], 0.0, literal +; TODO: We should use ASHR instead of LSHR + BFE +; EG-DAG: BFE_INT {{[* ]*}}[[ST]].Y, {{PV\.[XYZW]}}, 0.0, literal ; EG-DAG: 16 ; EG-DAG: 16 define void @constant_sextload_v2i16_to_v2i32(<2 x i32> addrspace(1)* %out, <2 x i16> addrspace(2)* %in) #0 { @@ -167,16 +167,23 @@ define void @constant_sextload_v2i16_to_v2i32(<2 x i32> addrspace(1)* %out, <2 x ret void } -; FUNC-LABEL: {{^}}constant_constant_zextload_v3i16_to_v3i32: +; FUNC-LABEL: {{^}}constant_zextload_v3i16_to_v3i32: ; GCN: s_load_dwordx2 ; v3i16 is naturally 8 byte aligned -; EG-DAG: VTX_READ_32 [[DST_HI:T[0-9]\.[XYZW]]], [[DST_HI]], 0, #1 -; EG-DAG: VTX_READ_16 [[DST_LO:T[0-9]\.[XYZW]]], [[DST_LO]], 4, #1 +; EG-DAG: MEM_RAT_CACHELESS STORE_RAW [[ST_LO:T[0-9]]].XY, {{T[0-9].[XYZW]}}, +; EG-DAG: MEM_RAT_CACHELESS STORE_RAW [[ST_HI:T[0-9]]].X, {{T[0-9].[XYZW]}}, +; EG: CF_END +; EG-DAG: VTX_READ_32 [[DST_LO:T[0-9]\.[XYZW]]], {{T[0-9]\.[XYZW]}}, 0, #1 +; EG-DAG: VTX_READ_16 [[DST_HI:T[0-9]\.[XYZW]]], {{T[0-9]\.[XYZW]}}, 4, #1 ; TODO: This should use DST, but for some there are redundant MOVs -; EG: LSHR {{[* ]*}}{{T[0-9].[XYZW]}}, {{T[0-9].[XYZW]}}, literal -; EG: 16 -define void @constant_constant_zextload_v3i16_to_v3i32(<3 x i32> addrspace(1)* %out, <3 x i16> addrspace(2)* %in) { +; EG-DAG: LSHR {{[* ]*}}[[ST_LO]].Y, {{T[0-9]\.[XYZW]}}, literal +; EG-DAG: 16 +; EG-DAG: AND_INT {{[* ]*}}[[ST_LO]].X, {{T[0-9]\.[XYZW]}}, literal +; EG-DAG: AND_INT {{[* ]*}}[[ST_HI]].X, {{T[0-9]\.[XYZW]}}, literal +; EG-DAG: 65535 +; EG-DAG: 65535 +define void @constant_zextload_v3i16_to_v3i32(<3 x i32> addrspace(1)* %out, <3 x i16> addrspace(2)* %in) { entry: %ld = load <3 x i16>, <3 x i16> addrspace(2)* %in %ext = zext <3 x i16> %ld to <3 x i32> @@ -184,19 +191,20 @@ entry: ret void } -; FUNC-LABEL: {{^}}constant_constant_sextload_v3i16_to_v3i32: +; FUNC-LABEL: {{^}}constant_sextload_v3i16_to_v3i32: ; GCN: s_load_dwordx2 +; EG-DAG: MEM_RAT_CACHELESS STORE_RAW [[ST_LO:T[0-9]]].XY, {{T[0-9].[XYZW]}}, +; EG-DAG: MEM_RAT_CACHELESS STORE_RAW [[ST_HI:T[0-9]]].X, {{T[0-9].[XYZW]}}, ; v3i16 is naturally 8 byte aligned -; EG-DAG: VTX_READ_32 [[DST_HI:T[0-9]\.[XYZW]]], [[DST_HI]], 0, #1 -; EG-DAG: VTX_READ_16 [[DST_LO:T[0-9]\.[XYZW]]], [[DST_LO]], 4, #1 -; TODO: These should use DST, but for some there are redundant MOVs -; EG-DAG: ASHR {{[* ]*}}T{{[0-9].[XYZW]}}, {{PV.[XYZW]}}, literal -; EG-DAG: BFE_INT {{[* ]*}}T{{[0-9].[XYZW]}}, {{T[0-9].[XYZW]}}, 0.0, literal -; EG-DAG: BFE_INT {{[* ]*}}T{{[0-9].[XYZW]}}, {{T[0-9].[XYZW]}}, 0.0, literal +; EG-DAG: VTX_READ_32 [[DST_HI:T[0-9]\.[XYZW]]], [[PTR:T[0-9]\.[XYZW]]], 0, #1 +; EG-DAG: VTX_READ_16 [[DST_LO:T[0-9]\.[XYZW]]], {{T[0-9]\.[XYZW]}}, 4, #1 +; EG-DAG: ASHR {{[* ]*}}[[ST_LO]].Y, {{T[0-9]\.[XYZW]}}, literal +; EG-DAG: BFE_INT {{[* ]*}}[[ST_LO]].X, {{T[0-9]\.[XYZW]}}, 0.0, literal +; EG-DAG: BFE_INT {{[* ]*}}[[ST_HI]].X, {{T[0-9]\.[XYZW]}}, 0.0, literal ; EG-DAG: 16 ; EG-DAG: 16 -define void @constant_constant_sextload_v3i16_to_v3i32(<3 x i32> addrspace(1)* %out, <3 x i16> addrspace(2)* %in) { +define void @constant_sextload_v3i16_to_v3i32(<3 x i32> addrspace(1)* %out, <3 x i16> addrspace(2)* %in) { entry: %ld = load <3 x i16>, <3 x i16> addrspace(2)* %in %ext = sext <3 x i16> %ld to <3 x i32> @@ -204,20 +212,24 @@ entry: ret void } -; FUNC-LABEL: {{^}}constant_constant_zextload_v4i16_to_v4i32: +; FUNC-LABEL: {{^}}constant_zextload_v4i16_to_v4i32: ; GCN: s_load_dwordx2 ; GCN-DAG: s_and_b32 ; GCN-DAG: s_lshr_b32 ; v4i16 is naturally 8 byte aligned -; EG: VTX_READ_64 [[DST:T[0-9]\.XY]], {{T[0-9].[XYZW]}}, 0, #1 -; TODO: These should use DST, but for some there are redundant MOVs -; EG-DAG: BFE_UINT {{[* ]*}}T{{[0-9].[XYZW]}}, {{PV.[XYZW]}}, literal +; EG: MEM_RAT_CACHELESS STORE_RAW [[ST:T[0-9]]].XYZW, {{T[0-9].[XYZW]}} +; EG: VTX_READ_64 [[LD:T[0-9]]].XY, {{T[0-9].[XYZW]}}, 0, #1 +; TODO: This should use LD, but for some there are redundant MOVs +; EG-DAG: BFE_UINT {{[* ]*}}[[ST]].Y, {{.*\.[XYZW]}}, literal +; EG-DAG: BFE_UINT {{[* ]*}}[[ST]].W, {{.*\.[XYZW]}}, literal ; EG-DAG: 16 -; EG-DAG: BFE_UINT {{[* ]*}}T{{[0-9].[XYZW]}}, {{T[0-9].[XYZW]}}, literal -; EG-DAG: AND_INT {{[* ]*}}T{{[0-9].[XYZW]}}, {{T[0-9].[XYZW]}}, literal ; EG-DAG: 16 -define void @constant_constant_zextload_v4i16_to_v4i32(<4 x i32> addrspace(1)* %out, <4 x i16> addrspace(2)* %in) #0 { +; EG-DAG: AND_INT {{[* ]*}}[[ST]].X, {{T[0-9]\.[XYZW]}}, literal +; EG-DAG: AND_INT {{[* ]*}}[[ST]].Z, {{T[0-9]\.[XYZW]}}, literal +; EG-DAG: 65535 +; EG-DAG: 65535 +define void @constant_zextload_v4i16_to_v4i32(<4 x i32> addrspace(1)* %out, <4 x i16> addrspace(2)* %in) #0 { %load = load <4 x i16>, <4 x i16> addrspace(2)* %in %ext = zext <4 x i16> %load to <4 x i32> store <4 x i32> %ext, <4 x i32> addrspace(1)* %out @@ -230,13 +242,14 @@ define void @constant_constant_zextload_v4i16_to_v4i32(<4 x i32> addrspace(1)* % ; GCN-DAG: s_sext_i32_i16 ; v4i16 is naturally 8 byte aligned -; EG: VTX_READ_64 [[DST:T[0-9]\.XY]], {{T[0-9].[XYZW]}}, 0, #1 -; TODO: These should use DST, but for some there are redundant MOVs +; EG: MEM_RAT_CACHELESS STORE_RAW [[ST:T[0-9]]].XYZW, {{T[0-9]\.[XYZW]}}, +; EG: VTX_READ_64 [[DST:T[0-9]]].XY, {{T[0-9].[XYZW]}}, 0, #1 +; TODO: This should use LD, but for some there are redundant MOVs +; EG-DAG: BFE_INT {{[* ]*}}[[ST]].X, {{.*}}, 0.0, literal +; EG-DAG: BFE_INT {{[* ]*}}[[ST]].Z, {{.*}}, 0.0, literal ; TODO: We should use ASHR instead of LSHR + BFE -; EG-DAG: BFE_INT {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, 0.0, literal -; EG-DAG: BFE_INT {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, 0.0, literal -; EG-DAG: BFE_INT {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, 0.0, literal -; EG-DAG: BFE_INT {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, 0.0, literal +; EG-DAG: BFE_INT {{[* ]*}}[[ST]].Y, {{.*}}, 0.0, literal +; EG-DAG: BFE_INT {{[* ]*}}[[ST]].W, {{.*}}, 0.0, literal ; EG-DAG: 16 ; EG-DAG: 16 ; EG-DAG: 16 @@ -254,24 +267,27 @@ define void @constant_sextload_v4i16_to_v4i32(<4 x i32> addrspace(1)* %out, <4 x ; GCN-DAG: s_lshr_b32 ; v8i16 is naturally 16 byte aligned -; EG: VTX_READ_128 [[DST:T[0-9]\.XYZW]], {{T[0-9].[XYZW]}}, 0, #1 -; TODO: These should use DST, but for some there are redundant MOVs -; EG-DAG: LSHR {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, literal -; EG-DAG: LSHR {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, literal -; EG-DAG: LSHR {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, literal -; EG-DAG: LSHR {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, literal -; EG-DAG: LSHR {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, literal -; EG-DAG: LSHR {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, literal -; EG-DAG: LSHR {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, literal -; EG-DAG: LSHR {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, literal -; EG-DAG: 16 -; EG-DAG: 16 -; EG-DAG: 16 -; EG-DAG: 16 +; EG: MEM_RAT_CACHELESS STORE_RAW [[ST_HI:T[0-9]]].XYZW, {{T[0-9]+.[XYZW]}}, +; EG: MEM_RAT_CACHELESS STORE_RAW [[ST_LO:T[0-9]]].XYZW, {{T[0-9]+.[XYZW]}}, +; EG: VTX_READ_128 [[DST:T[0-9]]].XYZW, {{T[0-9].[XYZW]}}, 0, #1 +; TODO: These should use LSHR instead of BFE_UINT +; TODO: This should use DST, but for some there are redundant MOVs +; EG-DAG: BFE_UINT {{[* ]*}}[[ST_LO]].Y, {{.*}}, literal +; EG-DAG: BFE_UINT {{[* ]*}}[[ST_LO]].W, {{.*}}, literal +; EG-DAG: BFE_UINT {{[* ]*}}[[ST_HI]].Y, {{.*}}, literal +; EG-DAG: BFE_UINT {{[* ]*}}[[ST_HI]].W, {{.*}}, literal +; EG-DAG: AND_INT {{[* ]*}}[[ST_LO]].X, {{.*}}, literal +; EG-DAG: AND_INT {{[* ]*}}[[ST_LO]].Z, {{.*}}, literal +; EG-DAG: AND_INT {{[* ]*}}[[ST_HI]].X, {{.*}}, literal +; EG-DAG: AND_INT {{[* ]*}}[[ST_HI]].Z, {{.*}}, literal ; EG-DAG: 16 ; EG-DAG: 16 ; EG-DAG: 16 ; EG-DAG: 16 +; EG-DAG: 65535 +; EG-DAG: 65535 +; EG-DAG: 65535 +; EG-DAG: 65535 define void @constant_zextload_v8i16_to_v8i32(<8 x i32> addrspace(1)* %out, <8 x i16> addrspace(2)* %in) #0 { %load = load <8 x i16>, <8 x i16> addrspace(2)* %in %ext = zext <8 x i16> %load to <8 x i32> @@ -285,17 +301,19 @@ define void @constant_zextload_v8i16_to_v8i32(<8 x i32> addrspace(1)* %out, <8 x ; GCN-DAG: s_sext_i32_i16 ; v8i16 is naturally 16 byte aligned -; EG: VTX_READ_128 [[DST:T[0-9]\.XYZW]], {{T[0-9].[XYZW]}}, 0, #1 -; TODO: These should use DST, but for some there are redundant MOVs -; TODO: We should use ASHR instead of LSHR + BFE -; EG-DAG: BFE_INT {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, 0.0, literal -; EG-DAG: BFE_INT {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, 0.0, literal -; EG-DAG: BFE_INT {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, 0.0, literal -; EG-DAG: BFE_INT {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, 0.0, literal -; EG-DAG: BFE_INT {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, 0.0, literal -; EG-DAG: BFE_INT {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, 0.0, literal -; EG-DAG: BFE_INT {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, 0.0, literal -; EG-DAG: BFE_INT {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, 0.0, literal +; EG: MEM_RAT_CACHELESS STORE_RAW [[ST_HI:T[0-9]]].XYZW, {{T[0-9]+.[XYZW]}}, +; EG: MEM_RAT_CACHELESS STORE_RAW [[ST_LO:T[0-9]]].XYZW, {{T[0-9]+.[XYZW]}}, +; EG: VTX_READ_128 [[DST:T[0-9]]].XYZW, {{T[0-9].[XYZW]}}, 0, #1 +; TODO: 4 of these should use ASHR instead of LSHR + BFE_INT +; TODO: This should use DST, but for some there are redundant MOVs +; EG-DAG: BFE_INT {{[* ]*}}[[ST_LO]].Y, {{.*}}, 0.0, literal +; EG-DAG: BFE_INT {{[* ]*}}[[ST_LO]].W, {{.*}}, 0.0, literal +; EG-DAG: BFE_INT {{[* ]*}}[[ST_HI]].Y, {{.*}}, 0.0, literal +; EG-DAG: BFE_INT {{[* ]*}}[[ST_HI]].W, {{.*}}, 0.0, literal +; EG-DAG: BFE_INT {{[* ]*}}[[ST_LO]].X, {{.*}}, 0.0, literal +; EG-DAG: BFE_INT {{[* ]*}}[[ST_LO]].Z, {{.*}}, 0.0, literal +; EG-DAG: BFE_INT {{[* ]*}}[[ST_HI]].X, {{.*}}, 0.0, literal +; EG-DAG: BFE_INT {{[* ]*}}[[ST_HI]].Z, {{.*}}, 0.0, literal ; EG-DAG: 16 ; EG-DAG: 16 ; EG-DAG: 16 @@ -444,7 +462,7 @@ define void @constant_zextload_i16_to_i64(i64 addrspace(1)* %out, i16 addrspace( ; EG: VTX_READ_16 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0, #1 ; EG: ASHR {{\**}} {{T[0-9]\.[XYZW]}}, {{.*}}, literal -; TODO: Why not 15 ? +; TODO: These could be expanded earlier using ASHR 15 ; EG: 31 define void @constant_sextload_i16_to_i64(i64 addrspace(1)* %out, i16 addrspace(2)* %in) #0 { %a = load i16, i16 addrspace(2)* %in @@ -468,7 +486,7 @@ define void @constant_zextload_v1i16_to_v1i64(<1 x i64> addrspace(1)* %out, <1 x ; EG: VTX_READ_16 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0, #1 ; EG: ASHR {{\**}} {{T[0-9]\.[XYZW]}}, {{.*}}, literal -; TODO: Why not 15 ? +; TODO: These could be expanded earlier using ASHR 15 ; EG: 31 define void @constant_sextload_v1i16_to_v1i64(<1 x i64> addrspace(1)* %out, <1 x i16> addrspace(2)* %in) #0 { %load = load <1 x i16>, <1 x i16> addrspace(2)* %in diff --git a/llvm/test/CodeGen/AMDGPU/load-global-i16.ll b/llvm/test/CodeGen/AMDGPU/load-global-i16.ll index f398dd32e06..7bd131e6516 100644 --- a/llvm/test/CodeGen/AMDGPU/load-global-i16.ll +++ b/llvm/test/CodeGen/AMDGPU/load-global-i16.ll @@ -1,8 +1,8 @@ ; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GCN-NOHSA,GCN-NOHSA-SI,FUNC %s ; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=kaveri -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GCN-HSA,FUNC %s ; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GCN-NOHSA,GCN-NOHSA-VI,FUNC %s -; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s -; RUN: llc -march=r600 -mcpu=cayman < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s +; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=EGCM -check-prefix=FUNC %s +; RUN: llc -march=r600 -mcpu=cayman < %s | FileCheck -check-prefix=CM -check-prefix=EGCM -check-prefix=FUNC %s ; FIXME: r600 is broken because the bigger testcases spill and it's not implemented @@ -10,7 +10,7 @@ ; GCN-NOHSA: buffer_load_ushort v{{[0-9]+}} ; GCN-HSA: flat_load_ushort -; EG: VTX_READ_16 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0, #1 +; EGCM: VTX_READ_16 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0, #1 define void @global_load_i16(i16 addrspace(1)* %out, i16 addrspace(1)* %in) { entry: %ld = load i16, i16 addrspace(1)* %in @@ -22,7 +22,7 @@ entry: ; GCN-NOHSA: buffer_load_dword v ; GCN-HSA: flat_load_dword v -; EG: VTX_READ_32 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0, #1 +; EGCM: VTX_READ_32 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0, #1 define void @global_load_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %in) { entry: %ld = load <2 x i16>, <2 x i16> addrspace(1)* %in @@ -34,8 +34,8 @@ entry: ; GCN-NOHSA: buffer_load_dwordx2 v ; GCN-HSA: flat_load_dwordx2 v -; EG-DAG: VTX_READ_32 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0, #1 -; EG-DAG: VTX_READ_16 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 4, #1 +; EGCM-DAG: VTX_READ_32 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0, #1 +; EGCM-DAG: VTX_READ_16 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 4, #1 define void @global_load_v3i16(<3 x i16> addrspace(1)* %out, <3 x i16> addrspace(1)* %in) { entry: %ld = load <3 x i16>, <3 x i16> addrspace(1)* %in @@ -47,7 +47,7 @@ entry: ; GCN-NOHSA: buffer_load_dwordx2 ; GCN-HSA: flat_load_dwordx2 -; EG: VTX_READ_64 T{{[0-9]+}}.XY, T{{[0-9]+}}.X, 0, #1 +; EGCM: VTX_READ_64 T{{[0-9]+}}.XY, T{{[0-9]+}}.X, 0, #1 define void @global_load_v4i16(<4 x i16> addrspace(1)* %out, <4 x i16> addrspace(1)* %in) { entry: %ld = load <4 x i16>, <4 x i16> addrspace(1)* %in @@ -59,7 +59,7 @@ entry: ; GCN-NOHSA: buffer_load_dwordx4 ; GCN-HSA: flat_load_dwordx4 -; EG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 0, #1 +; EGCM: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 0, #1 define void @global_load_v8i16(<8 x i16> addrspace(1)* %out, <8 x i16> addrspace(1)* %in) { entry: %ld = load <8 x i16>, <8 x i16> addrspace(1)* %in @@ -74,8 +74,8 @@ entry: ; GCN-HSA: flat_load_dwordx4 ; GCN-HSA: flat_load_dwordx4 -; EG-DAG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 0, #1 -; EG-DAG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 16, #1 +; EGCM-DAG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 0, #1 +; EGCM-DAG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 16, #1 define void @global_load_v16i16(<16 x i16> addrspace(1)* %out, <16 x i16> addrspace(1)* %in) { entry: %ld = load <16 x i16>, <16 x i16> addrspace(1)* %in @@ -90,7 +90,7 @@ entry: ; GCN-HSA: flat_load_ushort ; GCN-HSA: flat_store_dword -; EG: VTX_READ_16 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0, #1 +; EGCM: VTX_READ_16 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0, #1 define void @global_zextload_i16_to_i32(i32 addrspace(1)* %out, i16 addrspace(1)* %in) #0 { %a = load i16, i16 addrspace(1)* %in %ext = zext i16 %a to i32 @@ -105,9 +105,9 @@ define void @global_zextload_i16_to_i32(i32 addrspace(1)* %out, i16 addrspace(1) ; GCN-HSA: flat_load_sshort ; GCN-HSA: flat_store_dword -; EG: VTX_READ_16 [[DST:T[0-9]\.[XYZW]]], T{{[0-9]+}}.X, 0, #1 -; EG: BFE_INT {{[* ]*}}T{{[0-9].[XYZW]}}, [[DST]], 0.0, literal -; EG: 16 +; EGCM: VTX_READ_16 [[DST:T[0-9]\.[XYZW]]], T{{[0-9]+}}.X, 0, #1 +; EGCM: BFE_INT {{[* ]*}}T{{[0-9].[XYZW]}}, [[DST]], 0.0, literal +; EGCM: 16 define void @global_sextload_i16_to_i32(i32 addrspace(1)* %out, i16 addrspace(1)* %in) #0 { %a = load i16, i16 addrspace(1)* %in %ext = sext i16 %a to i32 @@ -119,7 +119,7 @@ define void @global_sextload_i16_to_i32(i32 addrspace(1)* %out, i16 addrspace(1) ; GCN-NOHSA: buffer_load_ushort ; GCN-HSA: flat_load_ushort -; EG: VTX_READ_16 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0, #1 +; EGCM: VTX_READ_16 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0, #1 define void @global_zextload_v1i16_to_v1i32(<1 x i32> addrspace(1)* %out, <1 x i16> addrspace(1)* %in) #0 { %load = load <1 x i16>, <1 x i16> addrspace(1)* %in %ext = zext <1 x i16> %load to <1 x i32> @@ -131,9 +131,9 @@ define void @global_zextload_v1i16_to_v1i32(<1 x i32> addrspace(1)* %out, <1 x i ; GCN-NOHSA: buffer_load_sshort ; GCN-HSA: flat_load_sshort -; EG: VTX_READ_16 [[DST:T[0-9]\.[XYZW]]], T{{[0-9]+}}.X, 0, #1 -; EG: BFE_INT {{[* ]*}}T{{[0-9].[XYZW]}}, [[DST]], 0.0, literal -; EG: 16 +; EGCM: VTX_READ_16 [[DST:T[0-9]\.[XYZW]]], T{{[0-9]+}}.X, 0, #1 +; EGCM: BFE_INT {{[* ]*}}T{{[0-9].[XYZW]}}, [[DST]], 0.0, literal +; EGCM: 16 define void @global_sextload_v1i16_to_v1i32(<1 x i32> addrspace(1)* %out, <1 x i16> addrspace(1)* %in) #0 { %load = load <1 x i16>, <1 x i16> addrspace(1)* %in %ext = sext <1 x i16> %load to <1 x i32> @@ -145,10 +145,9 @@ define void @global_sextload_v1i16_to_v1i32(<1 x i32> addrspace(1)* %out, <1 x i ; GCN-NOHSA: buffer_load_dword ; GCN-HSA: flat_load_dword -; EG: VTX_READ_32 [[DST:T[0-9]\.[XYZW]]], [[DST]], 0, #1 -; TODO: This should use DST, but for some there are redundant MOVs -; EG: BFE_UINT {{[* ]*}}T{{[0-9].[XYZW]}}, {{PV.[XYZW]}}, literal -; EG: 16 +; EGCM: VTX_READ_32 [[DST:T[0-9]\.[XYZW]]], [[DST]], 0, #1 +; EGCM: BFE_UINT {{[* ]*}}T{{[0-9].[XYZW]}}, [[DST]], literal +; EGCM: 16 define void @global_zextload_v2i16_to_v2i32(<2 x i32> addrspace(1)* %out, <2 x i16> addrspace(1)* %in) #0 { %load = load <2 x i16>, <2 x i16> addrspace(1)* %in %ext = zext <2 x i16> %load to <2 x i32> @@ -161,13 +160,14 @@ define void @global_zextload_v2i16_to_v2i32(<2 x i32> addrspace(1)* %out, <2 x i ; GCN-HSA: flat_load_dword -; EG: VTX_READ_32 [[DST:T[0-9]\.[XYZW]]], [[DST]], 0, #1 -; TODO: These should use DST, but for some there are redundant MOVs -; TODO: We should also use ASHR instead of LSHR + BFE -; EG-DAG: BFE_INT {{[* ]*}}T{{[0-9].[XYZW]}}, {{PV.[XYZW]}}, 0.0, literal -; EG-DAG: BFE_INT {{[* ]*}}T{{[0-9].[XYZW]}}, {{PV.[XYZW]}}, 0.0, literal -; EG-DAG: 16 -; EG-DAG: 16 +; EG: MEM_RAT_CACHELESS STORE_RAW [[ST:T[0-9]]].XY, {{T[0-9]\.[XYZW]}}, +; CM: MEM_RAT_CACHELESS STORE_DWORD [[ST:T[0-9]]], {{T[0-9]\.[XYZW]}} +; EGCM: VTX_READ_32 [[DST:T[0-9].[XYZW]]], [[DST]], 0, #1 +; TODO: This should use ASHR instead of LSHR + BFE +; EGCM-DAG: BFE_INT {{[* ]*}}[[ST]].X, [[DST]], 0.0, literal +; EGCM-DAG: BFE_INT {{[* ]*}}[[ST]].Y, {{PV.[XYZW]}}, 0.0, literal +; EGCM-DAG: 16 +; EGCM-DAG: 16 define void @global_sextload_v2i16_to_v2i32(<2 x i32> addrspace(1)* %out, <2 x i16> addrspace(1)* %in) #0 { %load = load <2 x i16>, <2 x i16> addrspace(1)* %in %ext = sext <2 x i16> %load to <2 x i32> @@ -175,16 +175,22 @@ define void @global_sextload_v2i16_to_v2i32(<2 x i32> addrspace(1)* %out, <2 x i ret void } -; FUNC-LABEL: {{^}}global_global_zextload_v3i16_to_v3i32: +; FUNC-LABEL: {{^}}global_zextload_v3i16_to_v3i32: ; GCN-NOHSA: buffer_load_dwordx2 ; GCN-HSA: flat_load_dwordx2 -; EG-DAG: VTX_READ_32 [[DST_HI:T[0-9]\.[XYZW]]], [[DST_HI]], 0, #1 -; EG-DAG: VTX_READ_16 [[DST_LO:T[0-9]\.[XYZW]]], [[DST_LO]], 4, #1 +; CM: MEM_RAT_CACHELESS STORE_DWORD [[ST_HI:T[0-9]]].X, {{T[0-9]\.[XYZW]}} +; CM: MEM_RAT_CACHELESS STORE_DWORD [[ST_LO:T[0-9]]], {{T[0-9]\.[XYZW]}} +; EG: MEM_RAT_CACHELESS STORE_RAW [[ST_HI:T[0-9]]].X, {{T[0-9]\.[XYZW]}}, +; EG: MEM_RAT_CACHELESS STORE_RAW [[ST_LO:T[0-9]]].XY, {{T[0-9]\.[XYZW]}}, +; EGCM-DAG: VTX_READ_32 [[DST_LO:T[0-9]\.[XYZW]]], {{T[0-9]\.[XYZW]}}, 0, #1 +; EGCM-DAG: VTX_READ_16 [[DST_HI:T[0-9]\.[XYZW]]], {{T[0-9]\.[XYZW]}}, 4, #1 ; TODO: This should use DST, but for some there are redundant MOVs -; EG: LSHR {{[* ]*}}{{T[0-9].[XYZW]}}, {{T[0-9].[XYZW]}}, literal -; EG: 16 -define void @global_global_zextload_v3i16_to_v3i32(<3 x i32> addrspace(1)* %out, <3 x i16> addrspace(1)* %in) { +; EGCM: LSHR {{[* ]*}}[[ST_LO]].Y, {{T[0-9]\.[XYZW]}}, literal +; EGCM: 16 +; EGCM: AND_INT {{[* ]*}}[[ST_LO]].X, {{T[0-9]\.[XYZW]}}, literal +; EGCM: AND_INT {{[* ]*}}[[ST_HI]].X, [[DST_HI]], literal +define void @global_zextload_v3i16_to_v3i32(<3 x i32> addrspace(1)* %out, <3 x i16> addrspace(1)* %in) { entry: %ld = load <3 x i16>, <3 x i16> addrspace(1)* %in %ext = zext <3 x i16> %ld to <3 x i32> @@ -192,19 +198,23 @@ entry: ret void } -; FUNC-LABEL: {{^}}global_global_sextload_v3i16_to_v3i32: +; FUNC-LABEL: {{^}}global_sextload_v3i16_to_v3i32: ; GCN-NOHSA: buffer_load_dwordx2 ; GCN-HSA: flat_load_dwordx2 -; EG-DAG: VTX_READ_32 [[DST_HI:T[0-9]\.[XYZW]]], [[DST_HI]], 0, #1 -; EG-DAG: VTX_READ_16 [[DST_LO:T[0-9]\.[XYZW]]], [[DST_LO]], 4, #1 -; TODO: These should use DST, but for some there are redundant MOVs -; EG-DAG: ASHR {{[* ]*}}T{{[0-9].[XYZW]}}, {{PV.[XYZW]}}, literal -; EG-DAG: BFE_INT {{[* ]*}}T{{[0-9].[XYZW]}}, {{T[0-9].[XYZW]}}, 0.0, literal -; EG-DAG: BFE_INT {{[* ]*}}T{{[0-9].[XYZW]}}, {{T[0-9].[XYZW]}}, 0.0, literal -; EG-DAG: 16 -; EG-DAG: 16 -define void @global_global_sextload_v3i16_to_v3i32(<3 x i32> addrspace(1)* %out, <3 x i16> addrspace(1)* %in) { +; CM: MEM_RAT_CACHELESS STORE_DWORD [[ST_HI:T[0-9]]].X, {{T[0-9]\.[XYZW]}} +; CM: MEM_RAT_CACHELESS STORE_DWORD [[ST_LO:T[0-9]]], {{T[0-9]\.[XYZW]}} +; EG: MEM_RAT_CACHELESS STORE_RAW [[ST_HI:T[0-9]]].X, {{T[0-9]\.[XYZW]}}, +; EG: MEM_RAT_CACHELESS STORE_RAW [[ST_LO:T[0-9]]].XY, {{T[0-9]\.[XYZW]}}, +; EGCM-DAG: VTX_READ_32 [[DST_LO:T[0-9]\.[XYZW]]], {{T[0-9].[XYZW]}}, 0, #1 +; EGCM-DAG: VTX_READ_16 [[DST_HI:T[0-9]\.[XYZW]]], {{T[0-9].[XYZW]}}, 4, #1 +; TODO: This should use DST, but for some there are redundant MOVs +; EGCM-DAG: ASHR {{[* ]*}}[[ST_LO]].Y, {{T[0-9]\.[XYZW]}}, literal +; EGCM-DAG: BFE_INT {{[* ]*}}[[ST_LO]].X, {{T[0-9]\.[XYZW]}}, 0.0, literal +; EGCM-DAG: BFE_INT {{[* ]*}}[[ST_HI]].X, [[DST_HI]], 0.0, literal +; EGCM-DAG: 16 +; EGCM-DAG: 16 +define void @global_sextload_v3i16_to_v3i32(<3 x i32> addrspace(1)* %out, <3 x i16> addrspace(1)* %in) { entry: %ld = load <3 x i16>, <3 x i16> addrspace(1)* %in %ext = sext <3 x i16> %ld to <3 x i32> @@ -212,19 +222,22 @@ entry: ret void } -; FUNC-LABEL: {{^}}global_global_zextload_v4i16_to_v4i32: +; FUNC-LABEL: {{^}}global_zextload_v4i16_to_v4i32: ; GCN-NOHSA: buffer_load_dwordx2 ; GCN-HSA: flat_load_dwordx2 -; EG: VTX_READ_64 [[DST:T[0-9]\.XY]], {{T[0-9].[XYZW]}}, 0, #1 -; TODO: These should use DST, but for some there are redundant MOVs -; EG-DAG: BFE_UINT {{[* ]*}}T{{[0-9].[XYZW]}}, {{T[0-9].[XYZW]}}, literal -; EG-DAG: 16 -; EG-DAG: BFE_UINT {{[* ]*}}T{{[0-9].[XYZW]}}, {{T[0-9].[XYZW]}}, literal -; EG-DAG: AND_INT {{[* ]*}}T{{[0-9].[XYZW]}}, {{T[0-9].[XYZW]}}, literal -; EG-DAG: 16 -define void @global_global_zextload_v4i16_to_v4i32(<4 x i32> addrspace(1)* %out, <4 x i16> addrspace(1)* %in) #0 { +; CM: MEM_RAT_CACHELESS STORE_DWORD [[ST:T[0-9]]], {{T[0-9]\.[XYZW]}} +; EG: MEM_RAT_CACHELESS STORE_RAW [[ST:T[0-9]]].XYZW, {{T[0-9]\.[XYZW]}}, +; EGCM: VTX_READ_64 [[DST:T[0-9]]].XY, {{T[0-9].[XYZW]}}, 0, #1 +; TODO: This should use DST, but for some there are redundant MOVs +; EGCM-DAG: BFE_UINT {{[* ]*}}[[ST]].Y, {{.*}}, literal +; EGCM-DAG: 16 +; EGCM-DAG: BFE_UINT {{[* ]*}}[[ST]].W, {{.*}}, literal +; EGCM-DAG: AND_INT {{[* ]*}}[[ST]].X, {{.*}}, literal +; EGCM-DAG: AND_INT {{[* ]*}}[[ST]].Z, {{.*}}, literal +; EGCM-DAG: 16 +define void @global_zextload_v4i16_to_v4i32(<4 x i32> addrspace(1)* %out, <4 x i16> addrspace(1)* %in) #0 { %load = load <4 x i16>, <4 x i16> addrspace(1)* %in %ext = zext <4 x i16> %load to <4 x i32> store <4 x i32> %ext, <4 x i32> addrspace(1)* %out @@ -236,17 +249,19 @@ define void @global_global_zextload_v4i16_to_v4i32(<4 x i32> addrspace(1)* %out, ; GCN-HSA: flat_load_dwordx2 -; EG: VTX_READ_64 [[DST:T[0-9]\.XY]], {{T[0-9].[XYZW]}}, 0, #1 -; TODO: These should use DST, but for some there are redundant MOVs +; CM: MEM_RAT_CACHELESS STORE_DWORD [[ST:T[0-9]]], {{T[0-9]\.[XYZW]}} +; EG: MEM_RAT_CACHELESS STORE_RAW [[ST:T[0-9]]].XYZW, {{T[0-9]\.[XYZW]}}, +; EGCM: VTX_READ_64 [[DST:T[0-9]]].XY, {{T[0-9].[XYZW]}}, 0, #1 ; TODO: We should use ASHR instead of LSHR + BFE -; EG-DAG: BFE_INT {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, 0.0, literal -; EG-DAG: BFE_INT {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, 0.0, literal -; EG-DAG: BFE_INT {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, 0.0, literal -; EG-DAG: BFE_INT {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, 0.0, literal -; EG-DAG: 16 -; EG-DAG: 16 -; EG-DAG: 16 -; EG-DAG: 16 +; TODO: This should use DST, but for some there are redundant MOVs +; EGCM-DAG: BFE_INT {{[* ]*}}[[ST]].X, {{.*}}, 0.0, literal +; EGCM-DAG: BFE_INT {{[* ]*}}[[ST]].Y, {{.*}}, 0.0, literal +; EGCM-DAG: BFE_INT {{[* ]*}}[[ST]].Z, {{.*}}, 0.0, literal +; EGCM-DAG: BFE_INT {{[* ]*}}[[ST]].W, {{.*}}, 0.0, literal +; EGCM-DAG: 16 +; EGCM-DAG: 16 +; EGCM-DAG: 16 +; EGCM-DAG: 16 define void @global_sextload_v4i16_to_v4i32(<4 x i32> addrspace(1)* %out, <4 x i16> addrspace(1)* %in) #0 { %load = load <4 x i16>, <4 x i16> addrspace(1)* %in %ext = sext <4 x i16> %load to <4 x i32> @@ -258,16 +273,29 @@ define void @global_sextload_v4i16_to_v4i32(<4 x i32> addrspace(1)* %out, <4 x i ; GCN-NOHSA: buffer_load_dwordx4 ; GCN-HSA: flat_load_dwordx4 -; EG: VTX_READ_128 [[DST:T[0-9]\.XYZW]], {{T[0-9].[XYZW]}}, 0, #1 -; TODO: These should use DST, but for some there are redundant MOVs -; EG-DAG: LSHR {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, literal -; EG-DAG: LSHR {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, literal -; EG-DAG: LSHR {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, literal -; EG-DAG: LSHR {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, literal -; EG-DAG: 16 -; EG-DAG: 16 -; EG-DAG: 16 -; EG-DAG: 16 +; CM-DAG: MEM_RAT_CACHELESS STORE_DWORD [[ST_LO:T[0-9]]], {{T[0-9]\.[XYZW]}} +; CM-DAG: MEM_RAT_CACHELESS STORE_DWORD [[ST_HI:T[0-9]]], {{T[0-9]\.[XYZW]}} +; EG-DAG: MEM_RAT_CACHELESS STORE_RAW [[ST_LO:T[0-9]]].XYZW, {{T[0-9]\.[XYZW]}}, +; EG-DAG: MEM_RAT_CACHELESS STORE_RAW [[ST_HI:T[0-9]]].XYZW, {{T[0-9]\.[XYZW]}}, +; EGCM: CF_END +; EGCM: VTX_READ_128 [[DST:T[0-9]]].XYZW, {{T[0-9].[XYZW]}}, 0, #1 +; TODO: These should use LSHR instead of BFE_UINT +; EGCM-DAG: BFE_UINT {{[* ]*}}[[ST_LO]].Y, {{.*}}, literal +; EGCM-DAG: BFE_UINT {{[* ]*}}[[ST_LO]].W, {{.*}}, literal +; EGCM-DAG: BFE_UINT {{[* ]*}}[[ST_HI]].Y, {{.*}}, literal +; EGCM-DAG: BFE_UINT {{[* ]*}}[[ST_HI]].W, {{.*}}, literal +; EGCM-DAG: AND_INT {{[* ]*}}[[ST_LO]].X, {{.*}}, literal +; EGCM-DAG: AND_INT {{[* ]*}}[[ST_LO]].Z, {{.*}}, literal +; EGCM-DAG: AND_INT {{[* ]*}}[[ST_HI]].X, {{.*}}, literal +; EGCM-DAG: AND_INT {{[* ]*}}[[ST_HI]].Z, {{.*}}, literal +; EGCM-DAG: 65535 +; EGCM-DAG: 65535 +; EGCM-DAG: 65535 +; EGCM-DAG: 65535 +; EGCM-DAG: 16 +; EGCM-DAG: 16 +; EGCM-DAG: 16 +; EGCM-DAG: 16 define void @global_zextload_v8i16_to_v8i32(<8 x i32> addrspace(1)* %out, <8 x i16> addrspace(1)* %in) #0 { %load = load <8 x i16>, <8 x i16> addrspace(1)* %in %ext = zext <8 x i16> %load to <8 x i32> @@ -279,24 +307,29 @@ define void @global_zextload_v8i16_to_v8i32(<8 x i32> addrspace(1)* %out, <8 x i ; GCN-NOHSA: buffer_load_dwordx4 ; GCN-HSA: flat_load_dwordx4 -; EG: VTX_READ_128 [[DST:T[0-9]\.XYZW]], {{T[0-9].[XYZW]}}, 0, #1 -; TODO: These should use DST, but for some there are redundant MOVs -; EG-DAG: LSHR {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, literal -; EG-DAG: LSHR {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, literal -; EG-DAG: LSHR {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, literal -; EG-DAG: LSHR {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, literal -; EG-DAG: LSHR {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, literal -; EG-DAG: LSHR {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, literal -; EG-DAG: LSHR {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, literal -; EG-DAG: LSHR {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, literal -; EG-DAG: 16 -; EG-DAG: 16 -; EG-DAG: 16 -; EG-DAG: 16 -; EG-DAG: 16 -; EG-DAG: 16 -; EG-DAG: 16 -; EG-DAG: 16 +; CM-DAG: MEM_RAT_CACHELESS STORE_DWORD [[ST_LO:T[0-9]]], {{T[0-9]\.[XYZW]}} +; CM-DAG: MEM_RAT_CACHELESS STORE_DWORD [[ST_HI:T[0-9]]], {{T[0-9]\.[XYZW]}} +; EG-DAG: MEM_RAT_CACHELESS STORE_RAW [[ST_LO:T[0-9]]].XYZW, {{T[0-9]\.[XYZW]}}, +; EG-DAG: MEM_RAT_CACHELESS STORE_RAW [[ST_HI:T[0-9]]].XYZW, {{T[0-9]\.[XYZW]}}, +; EGCM: CF_END +; EGCM: VTX_READ_128 [[DST:T[0-9]]].XYZW, {{T[0-9].[XYZW]}}, 0, #1 +; TODO: These should use ASHR instead of LSHR + BFE_INT +; EGCM-DAG: BFE_INT {{[* ]*}}[[ST_LO]].Y, {{.*}}, 0.0, literal +; EGCM-DAG: BFE_INT {{[* ]*}}[[ST_LO]].W, {{.*}}, 0.0, literal +; EGCM-DAG: BFE_INT {{[* ]*}}[[ST_HI]].Y, {{.*}}, 0.0, literal +; EGCM-DAG: BFE_INT {{[* ]*}}[[ST_HI]].W, {{.*}}, 0.0, literal +; EGCM-DAG: BFE_INT {{[* ]*}}[[ST_LO]].X, {{.*}}, 0.0, literal +; EGCM-DAG: BFE_INT {{[* ]*}}[[ST_LO]].Z, {{.*}}, 0.0, literal +; EGCM-DAG: BFE_INT {{[* ]*}}[[ST_HI]].X, {{.*}}, 0.0, literal +; EGCM-DAG: BFE_INT {{[* ]*}}[[ST_HI]].Z, {{.*}}, 0.0, literal +; EGCM-DAG: 16 +; EGCM-DAG: 16 +; EGCM-DAG: 16 +; EGCM-DAG: 16 +; EGCM-DAG: 16 +; EGCM-DAG: 16 +; EGCM-DAG: 16 +; EGCM-DAG: 16 define void @global_sextload_v8i16_to_v8i32(<8 x i32> addrspace(1)* %out, <8 x i16> addrspace(1)* %in) #0 { %load = load <8 x i16>, <8 x i16> addrspace(1)* %in %ext = sext <8 x i16> %load to <8 x i32> @@ -311,8 +344,8 @@ define void @global_sextload_v8i16_to_v8i32(<8 x i32> addrspace(1)* %out, <8 x i ; GCN-HSA: flat_load_dwordx4 ; GCN-HSA: flat_load_dwordx4 -; EG-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 0, #1 -; EG-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 16, #1 +; EGCM-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 0, #1 +; EGCM-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 16, #1 define void @global_zextload_v16i16_to_v16i32(<16 x i32> addrspace(1)* %out, <16 x i16> addrspace(1)* %in) #0 { %load = load <16 x i16>, <16 x i16> addrspace(1)* %in %ext = zext <16 x i16> %load to <16 x i32> @@ -322,8 +355,8 @@ define void @global_zextload_v16i16_to_v16i32(<16 x i32> addrspace(1)* %out, <16 ; FUNC-LABEL: {{^}}global_sextload_v16i16_to_v16i32: -; EG-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 0, #1 -; EG-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 16, #1 +; EGCM-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 0, #1 +; EGCM-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 16, #1 define void @global_sextload_v16i16_to_v16i32(<16 x i32> addrspace(1)* %out, <16 x i16> addrspace(1)* %in) #0 { %load = load <16 x i16>, <16 x i16> addrspace(1)* %in %ext = sext <16 x i16> %load to <16 x i32> @@ -342,10 +375,10 @@ define void @global_sextload_v16i16_to_v16i32(<16 x i32> addrspace(1)* %out, <16 ; GCN-HSA: flat_load_dwordx4 ; GCN-HSA: flat_load_dwordx4 -; EG-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 0, #1 -; EG-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 16, #1 -; EG-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 32, #1 -; EG-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 48, #1 +; EGCM-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 0, #1 +; EGCM-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 16, #1 +; EGCM-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 32, #1 +; EGCM-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 48, #1 define void @global_zextload_v32i16_to_v32i32(<32 x i32> addrspace(1)* %out, <32 x i16> addrspace(1)* %in) #0 { %load = load <32 x i16>, <32 x i16> addrspace(1)* %in %ext = zext <32 x i16> %load to <32 x i32> @@ -364,10 +397,10 @@ define void @global_zextload_v32i16_to_v32i32(<32 x i32> addrspace(1)* %out, <32 ; GCN-HSA: flat_load_dwordx4 ; GCN-HSA: flat_load_dwordx4 -; EG-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 0, #1 -; EG-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 16, #1 -; EG-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 32, #1 -; EG-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 48, #1 +; EGCM-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 0, #1 +; EGCM-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 16, #1 +; EGCM-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 32, #1 +; EGCM-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 48, #1 define void @global_sextload_v32i16_to_v32i32(<32 x i32> addrspace(1)* %out, <32 x i16> addrspace(1)* %in) #0 { %load = load <32 x i16>, <32 x i16> addrspace(1)* %in %ext = sext <32 x i16> %load to <32 x i32> @@ -394,14 +427,14 @@ define void @global_sextload_v32i16_to_v32i32(<32 x i32> addrspace(1)* %out, <32 ; GCN-HSA: flat_load_dwordx4 ; GCN-HSA: flat_load_dwordx4 -; EG-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 0, #1 -; EG-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 16, #1 -; EG-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 32, #1 -; EG-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 48, #1 -; EG-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 64, #1 -; EG-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 80, #1 -; EG-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 96, #1 -; EG-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 112, #1 +; EGCM-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 0, #1 +; EGCM-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 16, #1 +; EGCM-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 32, #1 +; EGCM-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 48, #1 +; EGCM-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 64, #1 +; EGCM-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 80, #1 +; EGCM-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 96, #1 +; EGCM-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 112, #1 define void @global_zextload_v64i16_to_v64i32(<64 x i32> addrspace(1)* %out, <64 x i16> addrspace(1)* %in) #0 { %load = load <64 x i16>, <64 x i16> addrspace(1)* %in %ext = zext <64 x i16> %load to <64 x i32> @@ -411,14 +444,14 @@ define void @global_zextload_v64i16_to_v64i32(<64 x i32> addrspace(1)* %out, <64 ; FUNC-LABEL: {{^}}global_sextload_v64i16_to_v64i32: -; EG-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 0, #1 -; EG-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 16, #1 -; EG-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 32, #1 -; EG-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 48, #1 -; EG-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 64, #1 -; EG-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 80, #1 -; EG-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 96, #1 -; EG-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 112, #1 +; EGCM-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 0, #1 +; EGCM-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 16, #1 +; EGCM-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 32, #1 +; EGCM-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 48, #1 +; EGCM-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 64, #1 +; EGCM-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 80, #1 +; EGCM-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 96, #1 +; EGCM-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 112, #1 define void @global_sextload_v64i16_to_v64i32(<64 x i32> addrspace(1)* %out, <64 x i16> addrspace(1)* %in) #0 { %load = load <64 x i16>, <64 x i16> addrspace(1)* %in %ext = sext <64 x i16> %load to <64 x i32> @@ -434,8 +467,8 @@ define void @global_sextload_v64i16_to_v64i32(<64 x i32> addrspace(1)* %out, <64 ; GCN-NOHSA: buffer_store_dwordx2 v{{\[}}[[LO]]:[[HI]]] ; GCN-HSA: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, v{{\[}}[[LO]]:[[HI]]{{\]}} -; EG: VTX_READ_16 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0, #1 -; EG: MOV {{.*}}, 0.0 +; EGCM: VTX_READ_16 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0, #1 +; EGCM: MOV {{.*}}, 0.0 define void @global_zextload_i16_to_i64(i64 addrspace(1)* %out, i16 addrspace(1)* %in) #0 { %a = load i16, i16 addrspace(1)* %in %ext = zext i16 %a to i64 @@ -458,10 +491,10 @@ define void @global_zextload_i16_to_i64(i64 addrspace(1)* %out, i16 addrspace(1) ; GCN-NOHSA: buffer_store_dwordx2 v{{\[}}[[LO]]:[[HI]]] ; GCN-HSA: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, v{{\[}}[[LO]]:[[HI]]{{\]}} -; EG: VTX_READ_16 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0, #1 -; EG: ASHR {{\**}} {{T[0-9]\.[XYZW]}}, {{.*}}, literal -; TODO: Why not 15 ? -; EG: 31 +; EGCM: VTX_READ_16 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0, #1 +; EGCM: ASHR {{\**}} {{T[0-9]\.[XYZW]}}, {{.*}}, literal +; TODO: These could be expanded earlier using ASHR 15 +; EGCM: 31 define void @global_sextload_i16_to_i64(i64 addrspace(1)* %out, i16 addrspace(1)* %in) #0 { %a = load i16, i16 addrspace(1)* %in %ext = sext i16 %a to i64 @@ -471,8 +504,8 @@ define void @global_sextload_i16_to_i64(i64 addrspace(1)* %out, i16 addrspace(1) ; FUNC-LABEL: {{^}}global_zextload_v1i16_to_v1i64: -; EG: VTX_READ_16 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0, #1 -; EG: MOV {{.*}}, 0.0 +; EGCM: VTX_READ_16 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0, #1 +; EGCM: MOV {{.*}}, 0.0 define void @global_zextload_v1i16_to_v1i64(<1 x i64> addrspace(1)* %out, <1 x i16> addrspace(1)* %in) #0 { %load = load <1 x i16>, <1 x i16> addrspace(1)* %in %ext = zext <1 x i16> %load to <1 x i64> @@ -482,10 +515,10 @@ define void @global_zextload_v1i16_to_v1i64(<1 x i64> addrspace(1)* %out, <1 x i ; FUNC-LABEL: {{^}}global_sextload_v1i16_to_v1i64: -; EG: VTX_READ_16 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0, #1 -; EG: ASHR {{\**}} {{T[0-9]\.[XYZW]}}, {{.*}}, literal -; TODO: Why not 15 ? -; EG: 31 +; EGCM: VTX_READ_16 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0, #1 +; EGCM: ASHR {{\**}} {{T[0-9]\.[XYZW]}}, {{.*}}, literal +; TODO: These could be expanded earlier using ASHR 15 +; EGCM: 31 define void @global_sextload_v1i16_to_v1i64(<1 x i64> addrspace(1)* %out, <1 x i16> addrspace(1)* %in) #0 { %load = load <1 x i16>, <1 x i16> addrspace(1)* %in %ext = sext <1 x i16> %load to <1 x i64> @@ -503,7 +536,7 @@ define void @global_zextload_v2i16_to_v2i64(<2 x i64> addrspace(1)* %out, <2 x i ; FUNC-LABEL: {{^}}global_sextload_v2i16_to_v2i64: -; EG: VTX_READ_32 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0, #1 +; EGCM: VTX_READ_32 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0, #1 define void @global_sextload_v2i16_to_v2i64(<2 x i64> addrspace(1)* %out, <2 x i16> addrspace(1)* %in) #0 { %load = load <2 x i16>, <2 x i16> addrspace(1)* %in %ext = sext <2 x i16> %load to <2 x i64> @@ -513,7 +546,7 @@ define void @global_sextload_v2i16_to_v2i64(<2 x i64> addrspace(1)* %out, <2 x i ; FUNC-LABEL: {{^}}global_zextload_v4i16_to_v4i64: -; EG: VTX_READ_64 T{{[0-9]+}}.XY, T{{[0-9]+}}.X, 0, #1 +; EGCM: VTX_READ_64 T{{[0-9]+}}.XY, T{{[0-9]+}}.X, 0, #1 define void @global_zextload_v4i16_to_v4i64(<4 x i64> addrspace(1)* %out, <4 x i16> addrspace(1)* %in) #0 { %load = load <4 x i16>, <4 x i16> addrspace(1)* %in %ext = zext <4 x i16> %load to <4 x i64> @@ -523,7 +556,7 @@ define void @global_zextload_v4i16_to_v4i64(<4 x i64> addrspace(1)* %out, <4 x i ; FUNC-LABEL: {{^}}global_sextload_v4i16_to_v4i64: -; EG: VTX_READ_64 T{{[0-9]+}}.XY, T{{[0-9]+}}.X, 0, #1 +; EGCM: VTX_READ_64 T{{[0-9]+}}.XY, T{{[0-9]+}}.X, 0, #1 define void @global_sextload_v4i16_to_v4i64(<4 x i64> addrspace(1)* %out, <4 x i16> addrspace(1)* %in) #0 { %load = load <4 x i16>, <4 x i16> addrspace(1)* %in %ext = sext <4 x i16> %load to <4 x i64> @@ -533,7 +566,7 @@ define void @global_sextload_v4i16_to_v4i64(<4 x i64> addrspace(1)* %out, <4 x i ; FUNC-LABEL: {{^}}global_zextload_v8i16_to_v8i64: -; EG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 0, #1 +; EGCM: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 0, #1 define void @global_zextload_v8i16_to_v8i64(<8 x i64> addrspace(1)* %out, <8 x i16> addrspace(1)* %in) #0 { %load = load <8 x i16>, <8 x i16> addrspace(1)* %in %ext = zext <8 x i16> %load to <8 x i64> @@ -543,7 +576,7 @@ define void @global_zextload_v8i16_to_v8i64(<8 x i64> addrspace(1)* %out, <8 x i ; FUNC-LABEL: {{^}}global_sextload_v8i16_to_v8i64: -; EG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 0, #1 +; EGCM: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 0, #1 define void @global_sextload_v8i16_to_v8i64(<8 x i64> addrspace(1)* %out, <8 x i16> addrspace(1)* %in) #0 { %load = load <8 x i16>, <8 x i16> addrspace(1)* %in %ext = sext <8 x i16> %load to <8 x i64> @@ -553,8 +586,8 @@ define void @global_sextload_v8i16_to_v8i64(<8 x i64> addrspace(1)* %out, <8 x i ; FUNC-LABEL: {{^}}global_zextload_v16i16_to_v16i64: -; EG-DAG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 0, #1 -; EG-DAG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 16, #1 +; EGCM-DAG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 0, #1 +; EGCM-DAG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 16, #1 define void @global_zextload_v16i16_to_v16i64(<16 x i64> addrspace(1)* %out, <16 x i16> addrspace(1)* %in) #0 { %load = load <16 x i16>, <16 x i16> addrspace(1)* %in %ext = zext <16 x i16> %load to <16 x i64> @@ -564,8 +597,8 @@ define void @global_zextload_v16i16_to_v16i64(<16 x i64> addrspace(1)* %out, <16 ; FUNC-LABEL: {{^}}global_sextload_v16i16_to_v16i64: -; EG-DAG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 0, #1 -; EG-DAG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 16, #1 +; EGCM-DAG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 0, #1 +; EGCM-DAG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 16, #1 define void @global_sextload_v16i16_to_v16i64(<16 x i64> addrspace(1)* %out, <16 x i16> addrspace(1)* %in) #0 { %load = load <16 x i16>, <16 x i16> addrspace(1)* %in %ext = sext <16 x i16> %load to <16 x i64> @@ -575,10 +608,10 @@ define void @global_sextload_v16i16_to_v16i64(<16 x i64> addrspace(1)* %out, <16 ; FUNC-LABEL: {{^}}global_zextload_v32i16_to_v32i64: -; EG-DAG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 0, #1 -; EG-DAG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 16, #1 -; EG-DAG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 32, #1 -; EG-DAG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 48, #1 +; EGCM-DAG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 0, #1 +; EGCM-DAG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 16, #1 +; EGCM-DAG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 32, #1 +; EGCM-DAG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 48, #1 define void @global_zextload_v32i16_to_v32i64(<32 x i64> addrspace(1)* %out, <32 x i16> addrspace(1)* %in) #0 { %load = load <32 x i16>, <32 x i16> addrspace(1)* %in %ext = zext <32 x i16> %load to <32 x i64> @@ -588,10 +621,10 @@ define void @global_zextload_v32i16_to_v32i64(<32 x i64> addrspace(1)* %out, <32 ; FUNC-LABEL: {{^}}global_sextload_v32i16_to_v32i64: -; EG-DAG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 0, #1 -; EG-DAG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 16, #1 -; EG-DAG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 32, #1 -; EG-DAG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 48, #1 +; EGCM-DAG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 0, #1 +; EGCM-DAG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 16, #1 +; EGCM-DAG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 32, #1 +; EGCM-DAG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 48, #1 define void @global_sextload_v32i16_to_v32i64(<32 x i64> addrspace(1)* %out, <32 x i16> addrspace(1)* %in) #0 { %load = load <32 x i16>, <32 x i16> addrspace(1)* %in %ext = sext <32 x i16> %load to <32 x i64> diff --git a/llvm/test/CodeGen/AMDGPU/store-private.ll b/llvm/test/CodeGen/AMDGPU/store-private.ll new file mode 100644 index 00000000000..33d27f24e9c --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/store-private.ll @@ -0,0 +1,743 @@ +; RUN: llc -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s +; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s +; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s +; RUN: llc -march=r600 -mcpu=cayman < %s | FileCheck -check-prefix=CM -check-prefix=FUNC %s + +; FUNC-LABEL: {{^}}store_i1: +; EG: MOVA_INT +; EG: MOV {{[\* ]*}}{{T[0-9]+\.[XYZW]}}, T(0 + AR.x).X+, +; EG: MOVA_INT +; EG: MOV {{[\* ]*}}T(0 + AR.x).X+, + +; CM: MOVA_INT +; CM: MOV {{[\* ]*}}{{T[0-9]+\.[XYZW]}}, T(0 + AR.x).X+, +; CM: MOVA_INT +; CM: MOV {{[\* ]*}}T(0 + AR.x).X+, + +; SI: buffer_store_byte +define void @store_i1(i1 addrspace(0)* %out) { +entry: + store i1 true, i1 addrspace(0)* %out + ret void +} + +; i8 store +; FUNC-LABEL: {{^}}store_i8: +; EG: LSHR * [[ADDRESS:T[0-9]\.[XYZW]]], KC0[2].Y, literal.x +; EG-NEXT: 2 +; EG: MOVA_INT * AR.x (MASKED) +; EG: MOV [[OLD:T[0-9]\.[XYZW]]], {{.*}}AR.x + +; IG 0: Get the byte index and truncate the value +; EG: AND_INT * T{{[0-9]}}.[[BI_CHAN:[XYZW]]], KC0[2].Y, literal.x +; EG: LSHL * T{{[0-9]}}.[[SHIFT_CHAN:[XYZW]]], PV.[[BI_CHAN]], literal.x +; EG-NEXT: 3(4.203895e-45) +; EG: AND_INT * T{{[0-9]}}.[[TRUNC_CHAN:[XYZW]]], KC0[2].Z, literal.x +; EG-NEXT: 255(3.573311e-43) + +; EG: NOT_INT +; EG: AND_INT {{[\* ]*}}[[CLR_CHAN:T[0-9]\.[XYZW]]], {{.*}}[[OLD]] +; EG: OR_INT * [[RES:T[0-9]\.[XYZW]]] +; TODO: Is the reload necessary? +; EG: MOVA_INT * AR.x (MASKED), [[ADDRESS]] +; EG: MOV * T(0 + AR.x).X+, [[RES]] + +; SI: buffer_store_byte + +define void @store_i8(i8 addrspace(0)* %out, i8 %in) { +entry: + store i8 %in, i8 addrspace(0)* %out + ret void +} + +; i16 store +; FUNC-LABEL: {{^}}store_i16: +; EG: LSHR * [[ADDRESS:T[0-9]\.[XYZW]]], KC0[2].Y, literal.x +; EG-NEXT: 2 +; EG: MOVA_INT * AR.x (MASKED) +; EG: MOV [[OLD:T[0-9]\.[XYZW]]], {{.*}}AR.x + +; IG 0: Get the byte index and truncate the value +; EG: AND_INT * T{{[0-9]}}.[[BI_CHAN:[XYZW]]], KC0[2].Y, literal.x +; EG: LSHL * T{{[0-9]}}.[[SHIFT_CHAN:[XYZW]]], PV.[[BI_CHAN]], literal.x +; EG-NEXT: 3(4.203895e-45) +; EG: AND_INT * T{{[0-9]}}.[[TRUNC_CHAN:[XYZW]]], KC0[2].Z, literal.x +; EG-NEXT: 65535(9.183409e-41) + +; EG: NOT_INT +; EG: AND_INT {{[\* ]*}}[[CLR_CHAN:T[0-9]\.[XYZW]]], {{.*}}[[OLD]] +; EG: OR_INT * [[RES:T[0-9]\.[XYZW]]] +; TODO: Is the reload necessary? +; EG: MOVA_INT * AR.x (MASKED), [[ADDRESS]] +; EG: MOV * T(0 + AR.x).X+, [[RES]] + +; SI: buffer_store_short +define void @store_i16(i16 addrspace(0)* %out, i16 %in) { +entry: + store i16 %in, i16 addrspace(0)* %out + ret void +} + +; FUNC-LABEL: {{^}}store_i24: +; SI: s_lshr_b32 s{{[0-9]+}}, s{{[0-9]+}}, 16 +; SI-DAG: buffer_store_byte +; SI-DAG: buffer_store_short + +; EG: MOVA_INT +; EG: MOV {{[\* ]*}}{{T[0-9]+\.[XYZW]}}, T(0 + AR.x).X+, +; EG: MOVA_INT +; EG: MOV {{[\* ]*}}T(0 + AR.x).X+, +; TODO: This load and store can be eliminated +; EG: MOVA_INT +; EG: MOV {{[\* ]*}}{{T[0-9]+\.[XYZW]}}, T(0 + AR.x).X+, +; EG: MOVA_INT +; EG: MOV {{[\* ]*}}T(0 + AR.x).X+, + +; CM: MOVA_INT +; CM: MOV {{[\* ]*}}{{T[0-9]+\.[XYZW]}}, T(0 + AR.x).X+, +; CM: MOVA_INT +; CM: MOV {{[\* ]*}}T(0 + AR.x).X+, +; TODO: This load and store can be eliminated +; CM: MOVA_INT +; CM: MOV {{[\* ]*}}{{T[0-9]+\.[XYZW]}}, T(0 + AR.x).X+, +; CM: MOVA_INT +; CM: MOV {{[\* ]*}}T(0 + AR.x).X+, +define void @store_i24(i24 addrspace(0)* %out, i24 %in) { +entry: + store i24 %in, i24 addrspace(0)* %out + ret void +} + +; FUNC-LABEL: {{^}}store_i25: +; SI: s_and_b32 [[AND:s[0-9]+]], s{{[0-9]+}}, 0x1ffffff{{$}} +; SI: v_mov_b32_e32 [[VAND:v[0-9]+]], [[AND]] +; SI: buffer_store_dword [[VAND]] + +; EG: MOVA_INT +; EG: MOV {{[\* ]*}}T(0 + AR.x).X+, +; EG-NOT: MOVA_INT + +; CM: MOVA_INT +; CM: MOV {{[\* ]*}}T(0 + AR.x).X+, +; CM-NOT: MOVA_INT +define void @store_i25(i25 addrspace(0)* %out, i25 %in) { +entry: + store i25 %in, i25 addrspace(0)* %out + ret void +} + +; FUNC-LABEL: {{^}}store_v2i8: +; v2i8 is naturally 2B aligned, treat as i16 +; EG: MOVA_INT +; EG: MOV {{[\* ]*}}{{T[0-9]+\.[XYZW]}}, T(0 + AR.x).X+, +; EG: MOVA_INT +; EG: MOV {{[\* ]*}}T(0 + AR.x).X+, +; EG-NOT: MOVA_INT + +; CM: MOVA_INT +; CM: MOV {{[\* ]*}}{{T[0-9]+\.[XYZW]}}, T(0 + AR.x).X+, +; CM: MOVA_INT +; CM: MOV {{[\* ]*}}T(0 + AR.x).X+, +; CM-NOT: MOVA_INT + +; SI: buffer_store_short +define void @store_v2i8(<2 x i8> addrspace(0)* %out, <2 x i32> %in) { +entry: + %0 = trunc <2 x i32> %in to <2 x i8> + store <2 x i8> %0, <2 x i8> addrspace(0)* %out + ret void +} + +; FUNC-LABEL: {{^}}store_v2i8_unaligned: +; EG: MOVA_INT +; EG: MOV {{[\* ]*}}{{T[0-9]+\.[XYZW]}}, T(0 + AR.x).X+, +; EG: MOVA_INT +; EG: MOV {{[\* ]*}}T(0 + AR.x).X+, +; TODO: This load and store cannot be eliminated, +; they might be different locations +; EG: MOVA_INT +; EG: MOV {{[\* ]*}}{{T[0-9]+\.[XYZW]}}, T(0 + AR.x).X+, +; EG: MOVA_INT +; EG: MOV {{[\* ]*}}T(0 + AR.x).X+, + +; CM: MOVA_INT +; CM: MOV {{[\* ]*}}{{T[0-9]+\.[XYZW]}}, T(0 + AR.x).X+, +; CM: MOVA_INT +; CM: MOV {{[\* ]*}}T(0 + AR.x).X+, +; TODO: This load and store cannot be eliminated, +; they might be different locations +; CM: MOVA_INT +; CM: MOV {{[\* ]*}}{{T[0-9]+\.[XYZW]}}, T(0 + AR.x).X+, +; CM: MOVA_INT +; CM: MOV {{[\* ]*}}T(0 + AR.x).X+, + +; SI: buffer_store_byte +define void @store_v2i8_unaligned(<2 x i8> addrspace(0)* %out, <2 x i32> %in) { +entry: + %0 = trunc <2 x i32> %in to <2 x i8> + store <2 x i8> %0, <2 x i8> addrspace(0)* %out, align 1 + ret void +} + + +; FUNC-LABEL: {{^}}store_v2i16: +; v2i8 is naturally 2B aligned, treat as i16 +; EG: MOVA_INT +; EG: MOV {{[\* ]*}}T(0 + AR.x).X+, +; EG-NOT: MOVA_INT + +; CM: MOVA_INT +; CM: MOV {{[\* ]*}}T(0 + AR.x).X+, +; CM-NOT: MOVA_INT + +; SI: buffer_store_dword +define void @store_v2i16(<2 x i16> addrspace(0)* %out, <2 x i32> %in) { +entry: + %0 = trunc <2 x i32> %in to <2 x i16> + store <2 x i16> %0, <2 x i16> addrspace(0)* %out + ret void +} + +; FUNC-LABEL: {{^}}store_v2i16_unaligned: +; EG: MOVA_INT +; EG: MOV {{[\* ]*}}{{T[0-9]+\.[XYZW]}}, T(0 + AR.x).X+, +; EG: MOVA_INT +; EG: MOV {{[\* ]*}}T(0 + AR.x).X+, +; TODO: This load and store cannot be eliminated, +; they might be different locations +; EG: MOVA_INT +; EG: MOV {{[\* ]*}}{{T[0-9]+\.[XYZW]}}, T(0 + AR.x).X+, +; EG: MOVA_INT +; EG: MOV {{[\* ]*}}T(0 + AR.x).X+, + +; CM: MOVA_INT +; CM: MOV {{[\* ]*}}{{T[0-9]+\.[XYZW]}}, T(0 + AR.x).X+, +; CM: MOVA_INT +; CM: MOV {{[\* ]*}}T(0 + AR.x).X+, +; TODO: This load and store cannot be eliminated, +; they might be different locations +; CM: MOVA_INT +; CM: MOV {{[\* ]*}}{{T[0-9]+\.[XYZW]}}, T(0 + AR.x).X+, +; CM: MOVA_INT +; CM: MOV {{[\* ]*}}T(0 + AR.x).X+, + +; SI: buffer_store_short +; SI: buffer_store_short +define void @store_v2i16_unaligned(<2 x i16> addrspace(0)* %out, <2 x i32> %in) { +entry: + %0 = trunc <2 x i32> %in to <2 x i16> + store <2 x i16> %0, <2 x i16> addrspace(0)* %out, align 2 + ret void +} + +; FUNC-LABEL: {{^}}store_v4i8: +; EG: MOVA_INT +; EG: MOV {{[\* ]*}}T(0 + AR.x).X+, +; EG-NOT: MOVA_INT + +; CM: MOVA_INT +; CM: MOV {{[\* ]*}}T(0 + AR.x).X+, +; CM-NOT: MOVA_INT + +; SI: buffer_store_dword +define void @store_v4i8(<4 x i8> addrspace(0)* %out, <4 x i32> %in) { +entry: + %0 = trunc <4 x i32> %in to <4 x i8> + store <4 x i8> %0, <4 x i8> addrspace(0)* %out + ret void +} + +; FUNC-LABEL: {{^}}store_v4i8_unaligned: +; EG: MOVA_INT +; EG: MOV {{[\* ]*}}{{T[0-9]+\.[XYZW]}}, T(0 + AR.x).X+, +; EG: MOVA_INT +; EG: MOV {{[\* ]*}}T(0 + AR.x).X+, +; TODO: This load and store cannot be eliminated, +; they might be different locations +; EG: MOVA_INT +; EG: MOV {{[\* ]*}}{{T[0-9]+\.[XYZW]}}, T(0 + AR.x).X+, +; EG: MOVA_INT +; EG: MOV {{[\* ]*}}T(0 + AR.x).X+, +; TODO: This load and store cannot be eliminated, +; they might be different locations +; EG: MOVA_INT +; EG: MOV {{[\* ]*}}{{T[0-9]+\.[XYZW]}}, T(0 + AR.x).X+, +; EG: MOVA_INT +; EG: MOV {{[\* ]*}}T(0 + AR.x).X+, +; TODO: This load and store cannot be eliminated, +; they might be different locations +; EG: MOVA_INT +; EG: MOV {{[\* ]*}}{{T[0-9]+\.[XYZW]}}, T(0 + AR.x).X+, +; EG: MOVA_INT +; EG: MOV {{[\* ]*}}T(0 + AR.x).X+, + +; CM: MOVA_INT +; CM: MOV {{[\* ]*}}{{T[0-9]+\.[XYZW]}}, T(0 + AR.x).X+, +; CM: MOVA_INT +; CM: MOV {{[\* ]*}}T(0 + AR.x).X+, +; TODO: This load and store cannot be eliminated, +; they might be different locations +; CM: MOVA_INT +; CM: MOV {{[\* ]*}}{{T[0-9]+\.[XYZW]}}, T(0 + AR.x).X+, +; CM: MOVA_INT +; CM: MOV {{[\* ]*}}T(0 + AR.x).X+, +; TODO: This load and store cannot be eliminated, +; they might be different locations +; CM: MOVA_INT +; CM: MOV {{[\* ]*}}{{T[0-9]+\.[XYZW]}}, T(0 + AR.x).X+, +; CM: MOVA_INT +; CM: MOV {{[\* ]*}}T(0 + AR.x).X+, +; TODO: This load and store cannot be eliminated, +; they might be different locations +; CM: MOVA_INT +; CM: MOV {{[\* ]*}}{{T[0-9]+\.[XYZW]}}, T(0 + AR.x).X+, +; CM: MOVA_INT +; CM: MOV {{[\* ]*}}T(0 + AR.x).X+, + +; SI: buffer_store_byte +; SI: buffer_store_byte +; SI: buffer_store_byte +; SI: buffer_store_byte +; SI-NOT: buffer_store_dword +define void @store_v4i8_unaligned(<4 x i8> addrspace(0)* %out, <4 x i32> %in) { +entry: + %0 = trunc <4 x i32> %in to <4 x i8> + store <4 x i8> %0, <4 x i8> addrspace(0)* %out, align 1 + ret void +} + +; FUNC-LABEL: {{^}}store_v8i8_unaligned: +; EG: MOVA_INT +; EG: MOV {{[\* ]*}}{{T[0-9]+\.[XYZW]}}, T(0 + AR.x).X+, +; EG: MOVA_INT +; EG: MOV {{[\* ]*}}T(0 + AR.x).X+, +; TODO: This load and store cannot be eliminated, +; they might be different locations +; EG: MOVA_INT +; EG: MOV {{[\* ]*}}{{T[0-9]+\.[XYZW]}}, T(0 + AR.x).X+, +; EG: MOVA_INT +; EG: MOV {{[\* ]*}}T(0 + AR.x).X+, +; TODO: This load and store cannot be eliminated, +; they might be different locations +; EG: MOVA_INT +; EG: MOV {{[\* ]*}}{{T[0-9]+\.[XYZW]}}, T(0 + AR.x).X+, +; EG: MOVA_INT +; EG: MOV {{[\* ]*}}T(0 + AR.x).X+, +; TODO: This load and store cannot be eliminated, +; they might be different locations +; EG: MOVA_INT +; EG: MOV {{[\* ]*}}{{T[0-9]+\.[XYZW]}}, T(0 + AR.x).X+, +; EG: MOVA_INT +; EG: MOV {{[\* ]*}}T(0 + AR.x).X+, +; TODO: This load and store cannot be eliminated, +; they might be different locations +; EG: MOVA_INT +; EG: MOV {{[\* ]*}}{{T[0-9]+\.[XYZW]}}, T(0 + AR.x).X+, +; EG: MOVA_INT +; EG: MOV {{[\* ]*}}T(0 + AR.x).X+, +; TODO: This load and store cannot be eliminated, +; they might be different locations +; EG: MOVA_INT +; EG: MOV {{[\* ]*}}{{T[0-9]+\.[XYZW]}}, T(0 + AR.x).X+, +; EG: MOVA_INT +; EG: MOV {{[\* ]*}}T(0 + AR.x).X+, +; TODO: This load and store cannot be eliminated, +; they might be different locations +; EG: MOVA_INT +; EG: MOV {{[\* ]*}}{{T[0-9]+\.[XYZW]}}, T(0 + AR.x).X+, +; EG: MOVA_INT +; EG: MOV {{[\* ]*}}T(0 + AR.x).X+, +; TODO: This load and store cannot be eliminated, +; they might be different locations +; EG: MOVA_INT +; EG: MOV {{[\* ]*}}{{T[0-9]+\.[XYZW]}}, T(0 + AR.x).X+, +; EG: MOVA_INT +; EG: MOV {{[\* ]*}}T(0 + AR.x).X+, + +; CM: MOVA_INT +; CM: MOV {{[\* ]*}}{{T[0-9]+\.[XYZW]}}, T(0 + AR.x).X+, +; CM: MOVA_INT +; CM: MOV {{[\* ]*}}T(0 + AR.x).X+, +; TODO: This load and store cannot be eliminated, +; they might be different locations +; CM: MOVA_INT +; CM: MOV {{[\* ]*}}{{T[0-9]+\.[XYZW]}}, T(0 + AR.x).X+, +; CM: MOVA_INT +; CM: MOV {{[\* ]*}}T(0 + AR.x).X+, +; TODO: This load and store cannot be eliminated, +; they might be different locations +; CM: MOVA_INT +; CM: MOV {{[\* ]*}}{{T[0-9]+\.[XYZW]}}, T(0 + AR.x).X+, +; CM: MOVA_INT +; CM: MOV {{[\* ]*}}T(0 + AR.x).X+, +; TODO: This load and store cannot be eliminated, +; they might be different locations +; CM: MOVA_INT +; CM: MOV {{[\* ]*}}{{T[0-9]+\.[XYZW]}}, T(0 + AR.x).X+, +; CM: MOVA_INT +; CM: MOV {{[\* ]*}}T(0 + AR.x).X+, +; TODO: This load and store cannot be eliminated, +; they might be different locations +; CM: MOVA_INT +; CM: MOV {{[\* ]*}}{{T[0-9]+\.[XYZW]}}, T(0 + AR.x).X+, +; CM: MOVA_INT +; CM: MOV {{[\* ]*}}T(0 + AR.x).X+, +; TODO: This load and store cannot be eliminated, +; they might be different locations +; CM: MOVA_INT +; CM: MOV {{[\* ]*}}{{T[0-9]+\.[XYZW]}}, T(0 + AR.x).X+, +; CM: MOVA_INT +; CM: MOV {{[\* ]*}}T(0 + AR.x).X+, +; TODO: This load and store cannot be eliminated, +; they might be different locations +; CM: MOVA_INT +; CM: MOV {{[\* ]*}}{{T[0-9]+\.[XYZW]}}, T(0 + AR.x).X+, +; CM: MOVA_INT +; CM: MOV {{[\* ]*}}T(0 + AR.x).X+, +; TODO: This load and store cannot be eliminated, +; they might be different locations +; CM: MOVA_INT +; CM: MOV {{[\* ]*}}{{T[0-9]+\.[XYZW]}}, T(0 + AR.x).X+, +; CM: MOVA_INT +; CM: MOV {{[\* ]*}}T(0 + AR.x).X+, + +; SI: buffer_store_byte +; SI: buffer_store_byte +; SI: buffer_store_byte +; SI: buffer_store_byte +; SI: buffer_store_byte +; SI: buffer_store_byte +; SI: buffer_store_byte +; SI: buffer_store_byte +; SI-NOT: buffer_store_dword +define void @store_v8i8_unaligned(<8 x i8> addrspace(0)* %out, <8 x i32> %in) { +entry: + %0 = trunc <8 x i32> %in to <8 x i8> + store <8 x i8> %0, <8 x i8> addrspace(0)* %out, align 1 + ret void +} + +; FUNC-LABEL: {{^}}store_v4i8_halfaligned: +; EG: MOVA_INT +; EG: MOV {{[\* ]*}}{{T[0-9]+\.[XYZW]}}, T(0 + AR.x).X+, +; EG: MOVA_INT +; EG: MOV {{[\* ]*}}T(0 + AR.x).X+, +; TODO: This load and store cannot be eliminated, +; they might be different locations +; EG: MOVA_INT +; EG: MOV {{[\* ]*}}{{T[0-9]+\.[XYZW]}}, T(0 + AR.x).X+, +; EG: MOVA_INT +; EG: MOV {{[\* ]*}}T(0 + AR.x).X+, + +; CM: MOVA_INT +; CM: MOV {{[\* ]*}}{{T[0-9]+\.[XYZW]}}, T(0 + AR.x).X+, +; CM: MOVA_INT +; CM: MOV {{[\* ]*}}T(0 + AR.x).X+, +; TODO: This load and store cannot be eliminated, +; they might be different locations +; CM: MOVA_INT +; CM: MOV {{[\* ]*}}{{T[0-9]+\.[XYZW]}}, T(0 + AR.x).X+, +; CM: MOVA_INT +; CM: MOV {{[\* ]*}}T(0 + AR.x).X+, + +; SI: buffer_store_short +; SI: buffer_store_short +; SI-NOT: buffer_store_dword +define void @store_v4i8_halfaligned(<4 x i8> addrspace(0)* %out, <4 x i32> %in) { +entry: + %0 = trunc <4 x i32> %in to <4 x i8> + store <4 x i8> %0, <4 x i8> addrspace(0)* %out, align 2 + ret void +} + +; floating-point store +; FUNC-LABEL: {{^}}store_f32: +; EG: MOVA_INT +; EG: MOV {{[\* ]*}}T(0 + AR.x).X+, + +; CM: MOVA_INT +; CM: MOV {{[\* ]*}}T(0 + AR.x).X+, + +; SI: buffer_store_dword + +define void @store_f32(float addrspace(0)* %out, float %in) { + store float %in, float addrspace(0)* %out + ret void +} + +; FUNC-LABEL: {{^}}store_v4i16: +; EG: MOVA_INT +; EG: MOV {{[\* ]*}}T(0 + AR.x).X+, +; EG: MOVA_INT +; EG: MOV {{[\* ]*}}T(0 + AR.x).X+, + +; CM: MOVA_INT +; CM: MOV {{[\* ]*}}T(0 + AR.x).X+, +; CM: MOVA_INT +; CM: MOV {{[\* ]*}}T(0 + AR.x).X+, + +;TODO: why not x2? +; XSI: buffer_store_dwordx2 +; SI: buffer_store_dword +; SI: buffer_store_dword +define void @store_v4i16(<4 x i16> addrspace(0)* %out, <4 x i32> %in) { +entry: + %0 = trunc <4 x i32> %in to <4 x i16> + store <4 x i16> %0, <4 x i16> addrspace(0)* %out + ret void +} + +; vec2 floating-point stores +; FUNC-LABEL: {{^}}store_v2f32: +; EG: MOVA_INT +; EG: MOV {{[\* ]*}}T(0 + AR.x).X+, +; EG: MOVA_INT +; EG: MOV {{[\* ]*}}T(0 + AR.x).X+, + +; CM: MOVA_INT +; CM: MOV {{[\* ]*}}T(0 + AR.x).X+, +; CM: MOVA_INT +; CM: MOV {{[\* ]*}}T(0 + AR.x).X+, + +;TODO: why not x2? +; XSI: buffer_store_dwordx2 +; SI: buffer_store_dword +; SI: buffer_store_dword + +define void @store_v2f32(<2 x float> addrspace(0)* %out, float %a, float %b) { +entry: + %0 = insertelement <2 x float> <float 0.0, float 0.0>, float %a, i32 0 + %1 = insertelement <2 x float> %0, float %b, i32 1 + store <2 x float> %1, <2 x float> addrspace(0)* %out + ret void +} + +; FUNC-LABEL: {{^}}store_v3i32: +; EG: MOVA_INT +; EG: MOV {{[\* ]*}}T(0 + AR.x).X+, +; EG: MOVA_INT +; EG: MOV {{[\* ]*}}T(0 + AR.x).X+, +; EG: MOVA_INT +; EG: MOV {{[\* ]*}}T(0 + AR.x).X+, + +; CM: MOVA_INT +; CM: MOV {{[\* ]*}}T(0 + AR.x).X+, +; CM: MOVA_INT +; CM: MOV {{[\* ]*}}T(0 + AR.x).X+, +; CM: MOVA_INT +; CM: MOV {{[\* ]*}}T(0 + AR.x).X+, + +;TODO: why not x2? +; XSI-DAG: buffer_store_dwordx2 +; SI: buffer_store_dword +; SI: buffer_store_dword +; SI: buffer_store_dword + +define void @store_v3i32(<3 x i32> addrspace(0)* %out, <3 x i32> %a) nounwind { + store <3 x i32> %a, <3 x i32> addrspace(0)* %out, align 16 + ret void +} + +; FUNC-LABEL: {{^}}store_v4i32: +; EG: MOVA_INT +; EG: MOV {{[\* ]*}}T(0 + AR.x).X+, +; EG: MOVA_INT +; EG: MOV {{[\* ]*}}T(0 + AR.x).X+, +; EG: MOVA_INT +; EG: MOV {{[\* ]*}}T(0 + AR.x).X+, +; EG: MOVA_INT +; EG: MOV {{[\* ]*}}T(0 + AR.x).X+, + +; CM: MOVA_INT +; CM: MOV {{[\* ]*}}T(0 + AR.x).X+, +; CM: MOVA_INT +; CM: MOV {{[\* ]*}}T(0 + AR.x).X+, +; CM: MOVA_INT +; CM: MOV {{[\* ]*}}T(0 + AR.x).X+, +; CM: MOVA_INT +; CM: MOV {{[\* ]*}}T(0 + AR.x).X+, + +;TODO: why not x4? +; XSI: buffer_store_dwordx4 +; SI: buffer_store_dword +; SI: buffer_store_dword +; SI: buffer_store_dword +; SI: buffer_store_dword +define void @store_v4i32(<4 x i32> addrspace(0)* %out, <4 x i32> %in) { +entry: + store <4 x i32> %in, <4 x i32> addrspace(0)* %out + ret void +} + +; FUNC-LABEL: {{^}}store_v4i32_unaligned: +; EG: MOVA_INT +; EG: MOV {{[\* ]*}}T(0 + AR.x).X+, +; EG: MOVA_INT +; EG: MOV {{[\* ]*}}T(0 + AR.x).X+, +; EG: MOVA_INT +; EG: MOV {{[\* ]*}}T(0 + AR.x).X+, +; EG: MOVA_INT +; EG: MOV {{[\* ]*}}T(0 + AR.x).X+, + +; CM: MOVA_INT +; CM: MOV {{[\* ]*}}T(0 + AR.x).X+, +; CM: MOVA_INT +; CM: MOV {{[\* ]*}}T(0 + AR.x).X+, +; CM: MOVA_INT +; CM: MOV {{[\* ]*}}T(0 + AR.x).X+, +; CM: MOVA_INT +; CM: MOV {{[\* ]*}}T(0 + AR.x).X+, + +;TODO: why not x4? +; XSI: buffer_store_dwordx4 +; SI: buffer_store_dword +; SI: buffer_store_dword +; SI: buffer_store_dword +; SI: buffer_store_dword +define void @store_v4i32_unaligned(<4 x i32> addrspace(0)* %out, <4 x i32> %in) { +entry: + store <4 x i32> %in, <4 x i32> addrspace(0)* %out, align 4 + ret void +} + +; v4f32 store +; FUNC-LABEL: {{^}}store_v4f32: +; EG: MOVA_INT +; EG: MOV {{[\* ]*}}T(0 + AR.x).X+, +; EG: MOVA_INT +; EG: MOV {{[\* ]*}}T(0 + AR.x).X+, +; EG: MOVA_INT +; EG: MOV {{[\* ]*}}T(0 + AR.x).X+, +; EG: MOVA_INT +; EG: MOV {{[\* ]*}}T(0 + AR.x).X+, + +; CM: MOVA_INT +; CM: MOV {{[\* ]*}}T(0 + AR.x).X+, +; CM: MOVA_INT +; CM: MOV {{[\* ]*}}T(0 + AR.x).X+, +; CM: MOVA_INT +; CM: MOV {{[\* ]*}}T(0 + AR.x).X+, +; CM: MOVA_INT +; CM: MOV {{[\* ]*}}T(0 + AR.x).X+, + +;TODO: why not x4? +; XSI: buffer_store_dwordx4 +; SI: buffer_store_dword +; SI: buffer_store_dword +; SI: buffer_store_dword +; SI: buffer_store_dword +define void @store_v4f32(<4 x float> addrspace(0)* %out, <4 x float> addrspace(0)* %in) { + %1 = load <4 x float>, <4 x float> addrspace(0) * %in + store <4 x float> %1, <4 x float> addrspace(0)* %out + ret void +} + +; FUNC-LABEL: {{^}}store_i64_i8: +; EG: MOVA_INT +; EG: MOV {{[\* ]*}}{{T[0-9]+\.[XYZW]}}, T(0 + AR.x).X+, +; EG: MOVA_INT +; EG: MOV {{[\* ]*}}T(0 + AR.x).X+, + +; CM: MOVA_INT +; CM: MOV {{[\* ]*}}{{T[0-9]+\.[XYZW]}}, T(0 + AR.x).X+, +; CM: MOVA_INT +; CM: MOV {{[\* ]*}}T(0 + AR.x).X+, + +; SI: buffer_store_byte +define void @store_i64_i8(i8 addrspace(0)* %out, i64 %in) { +entry: + %0 = trunc i64 %in to i8 + store i8 %0, i8 addrspace(0)* %out + ret void +} + +; FUNC-LABEL: {{^}}store_i64_i16: +; EG: MOVA_INT +; EG: MOV {{[\* ]*}}{{T[0-9]+\.[XYZW]}}, T(0 + AR.x).X+, +; EG: MOVA_INT +; EG: MOV {{[\* ]*}}T(0 + AR.x).X+, + +; CM: MOVA_INT +; CM: MOV {{[\* ]*}}{{T[0-9]+\.[XYZW]}}, T(0 + AR.x).X+, +; CM: MOVA_INT +; CM: MOV {{[\* ]*}}T(0 + AR.x).X+, + +; SI: buffer_store_short +define void @store_i64_i16(i16 addrspace(0)* %out, i64 %in) { +entry: + %0 = trunc i64 %in to i16 + store i16 %0, i16 addrspace(0)* %out + ret void +} + +; The stores in this function are combined by the optimizer to create a +; 64-bit store with 32-bit alignment. This is legal and the legalizer +; should not try to split the 64-bit store back into 2 32-bit stores. + +; FUNC-LABEL: {{^}}vecload2: +; EG: MOVA_INT +; EG: MOV {{[\* ]*}}T(0 + AR.x).X+, +; EG: MOVA_INT +; EG: MOV {{[\* ]*}}T(0 + AR.x).X+, + +; CM: MOVA_INT +; CM: MOV {{[\* ]*}}T(0 + AR.x).X+, +; CM: MOVA_INT +; CM: MOV {{[\* ]*}}T(0 + AR.x).X+, + +;TODO: why not x2? +; XSI: buffer_store_dwordx2 +; SI: buffer_store_dword +; SI: buffer_store_dword +define void @vecload2(i32 addrspace(0)* nocapture %out, i32 addrspace(2)* nocapture %mem) #0 { +entry: + %0 = load i32, i32 addrspace(2)* %mem, align 4 + %arrayidx1.i = getelementptr inbounds i32, i32 addrspace(2)* %mem, i64 1 + %1 = load i32, i32 addrspace(2)* %arrayidx1.i, align 4 + store i32 %0, i32 addrspace(0)* %out, align 4 + %arrayidx1 = getelementptr inbounds i32, i32 addrspace(0)* %out, i64 1 + store i32 %1, i32 addrspace(0)* %arrayidx1, align 4 + ret void +} + +; When i128 was a legal type this program generated cannot select errors: + +; FUNC-LABEL: {{^}}"i128-const-store": +; EG: MOVA_INT +; EG: MOV {{[\* ]*}}T(0 + AR.x).X+, +; EG: MOVA_INT +; EG: MOV {{[\* ]*}}T(0 + AR.x).X+, +; EG: MOVA_INT +; EG: MOV {{[\* ]*}}T(0 + AR.x).X+, +; EG: MOVA_INT +; EG: MOV {{[\* ]*}}T(0 + AR.x).X+, + +; CM: MOVA_INT +; CM: MOV {{[\* ]*}}T(0 + AR.x).X+, +; CM: MOVA_INT +; CM: MOV {{[\* ]*}}T(0 + AR.x).X+, +; CM: MOVA_INT +; CM: MOV {{[\* ]*}}T(0 + AR.x).X+, +; CM: MOVA_INT +; CM: MOV {{[\* ]*}}T(0 + AR.x).X+, + +;TODO: why not x4? +; XSI: buffer_store_dwordx4 +; SI: buffer_store_dword +; SI: buffer_store_dword +; SI: buffer_store_dword +; SI: buffer_store_dword +define void @i128-const-store(i32 addrspace(0)* %out) { +entry: + store i32 1, i32 addrspace(0)* %out, align 4 + %arrayidx2 = getelementptr inbounds i32, i32 addrspace(0)* %out, i64 1 + store i32 1, i32 addrspace(0)* %arrayidx2, align 4 + %arrayidx4 = getelementptr inbounds i32, i32 addrspace(0)* %out, i64 2 + store i32 2, i32 addrspace(0)* %arrayidx4, align 4 + %arrayidx6 = getelementptr inbounds i32, i32 addrspace(0)* %out, i64 3 + store i32 2, i32 addrspace(0)* %arrayidx6, align 4 + ret void +} + + +attributes #0 = { nounwind } |