summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/AMDGPU/si-sgpr-spill.ll
diff options
context:
space:
mode:
authorMatt Arsenault <Matthew.Arsenault@amd.com>2017-06-28 21:38:50 +0000
committerMatt Arsenault <Matthew.Arsenault@amd.com>2017-06-28 21:38:50 +0000
commit7c525903efd25646f3d4d8f3888d9ed8b49ad843 (patch)
tree1bd48ef3d06147f4f39e509c4915b21aaf8b7394 /llvm/test/CodeGen/AMDGPU/si-sgpr-spill.ll
parentb3c88339ad9608fc0829010a053f47bb1851994a (diff)
downloadbcm5719-llvm-7c525903efd25646f3d4d8f3888d9ed8b49ad843.tar.gz
bcm5719-llvm-7c525903efd25646f3d4d8f3888d9ed8b49ad843.zip
AMDGPU: Remove SITypeRewriter
This was an old workaround for using v16i8 in some old intrinsics for resource descriptors. llvm-svn: 306603
Diffstat (limited to 'llvm/test/CodeGen/AMDGPU/si-sgpr-spill.ll')
-rw-r--r--llvm/test/CodeGen/AMDGPU/si-sgpr-spill.ll398
1 files changed, 199 insertions, 199 deletions
diff --git a/llvm/test/CodeGen/AMDGPU/si-sgpr-spill.ll b/llvm/test/CodeGen/AMDGPU/si-sgpr-spill.ll
index 8731e74d63a..3e70f2c7782 100644
--- a/llvm/test/CodeGen/AMDGPU/si-sgpr-spill.ll
+++ b/llvm/test/CodeGen/AMDGPU/si-sgpr-spill.ll
@@ -24,81 +24,81 @@
; GCN: s_endpgm
; TOVGPR: ScratchSize: 0{{$}}
-define amdgpu_ps void @main([17 x <16 x i8>] addrspace(2)* byval %arg, [32 x <16 x i8>] addrspace(2)* byval %arg1, [16 x <8 x i32>] addrspace(2)* byval %arg2, float inreg %arg3, i32 inreg %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <2 x i32> %arg7, <3 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, <2 x i32> %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19, float %arg20) {
+define amdgpu_ps void @main([17 x <4 x i32>] addrspace(2)* byval %arg, [32 x <4 x i32>] addrspace(2)* byval %arg1, [16 x <8 x i32>] addrspace(2)* byval %arg2, float inreg %arg3, i32 inreg %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <2 x i32> %arg7, <3 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, <2 x i32> %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19, float %arg20) {
main_body:
- %tmp = getelementptr [17 x <16 x i8>], [17 x <16 x i8>] addrspace(2)* %arg, i64 0, i32 0
- %tmp21 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp, !tbaa !0
- %tmp22 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 96)
- %tmp23 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 100)
- %tmp24 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 104)
- %tmp25 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 112)
- %tmp26 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 116)
- %tmp27 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 120)
- %tmp28 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 128)
- %tmp29 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 132)
- %tmp30 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 140)
- %tmp31 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 144)
- %tmp32 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 160)
- %tmp33 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 176)
- %tmp34 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 180)
- %tmp35 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 184)
- %tmp36 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 192)
- %tmp37 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 196)
- %tmp38 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 200)
- %tmp39 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 208)
- %tmp40 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 212)
- %tmp41 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 216)
- %tmp42 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 224)
- %tmp43 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 240)
- %tmp44 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 244)
- %tmp45 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 248)
- %tmp46 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 256)
- %tmp47 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 272)
- %tmp48 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 276)
- %tmp49 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 280)
- %tmp50 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 288)
- %tmp51 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 292)
- %tmp52 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 296)
- %tmp53 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 304)
- %tmp54 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 308)
- %tmp55 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 312)
- %tmp56 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 368)
- %tmp57 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 372)
- %tmp58 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 376)
- %tmp59 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 384)
+ %tmp = getelementptr [17 x <4 x i32>], [17 x <4 x i32>] addrspace(2)* %arg, i64 0, i32 0
+ %tmp21 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp, !tbaa !0
+ %tmp22 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 96)
+ %tmp23 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 100)
+ %tmp24 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 104)
+ %tmp25 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 112)
+ %tmp26 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 116)
+ %tmp27 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 120)
+ %tmp28 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 128)
+ %tmp29 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 132)
+ %tmp30 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 140)
+ %tmp31 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 144)
+ %tmp32 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 160)
+ %tmp33 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 176)
+ %tmp34 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 180)
+ %tmp35 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 184)
+ %tmp36 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 192)
+ %tmp37 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 196)
+ %tmp38 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 200)
+ %tmp39 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 208)
+ %tmp40 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 212)
+ %tmp41 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 216)
+ %tmp42 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 224)
+ %tmp43 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 240)
+ %tmp44 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 244)
+ %tmp45 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 248)
+ %tmp46 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 256)
+ %tmp47 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 272)
+ %tmp48 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 276)
+ %tmp49 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 280)
+ %tmp50 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 288)
+ %tmp51 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 292)
+ %tmp52 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 296)
+ %tmp53 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 304)
+ %tmp54 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 308)
+ %tmp55 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 312)
+ %tmp56 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 368)
+ %tmp57 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 372)
+ %tmp58 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 376)
+ %tmp59 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 384)
%tmp60 = getelementptr [16 x <8 x i32>], [16 x <8 x i32>] addrspace(2)* %arg2, i64 0, i32 0
%tmp61 = load <8 x i32>, <8 x i32> addrspace(2)* %tmp60, !tbaa !0
- %tmp62 = getelementptr [32 x <16 x i8>], [32 x <16 x i8>] addrspace(2)* %arg1, i64 0, i32 0
- %tmp63 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp62, !tbaa !0
- %tmp63.bc = bitcast <16 x i8> %tmp63 to <4 x i32>
+ %tmp62 = getelementptr [32 x <4 x i32>], [32 x <4 x i32>] addrspace(2)* %arg1, i64 0, i32 0
+ %tmp63 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp62, !tbaa !0
+ %tmp63.bc = bitcast <4 x i32> %tmp63 to <4 x i32>
%tmp64 = getelementptr [16 x <8 x i32>], [16 x <8 x i32>] addrspace(2)* %arg2, i64 0, i32 1
%tmp65 = load <8 x i32>, <8 x i32> addrspace(2)* %tmp64, !tbaa !0
- %tmp66 = getelementptr [32 x <16 x i8>], [32 x <16 x i8>] addrspace(2)* %arg1, i64 0, i32 1
- %tmp67 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp66, !tbaa !0
+ %tmp66 = getelementptr [32 x <4 x i32>], [32 x <4 x i32>] addrspace(2)* %arg1, i64 0, i32 1
+ %tmp67 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp66, !tbaa !0
%tmp68 = getelementptr [16 x <8 x i32>], [16 x <8 x i32>] addrspace(2)* %arg2, i64 0, i32 2
%tmp69 = load <8 x i32>, <8 x i32> addrspace(2)* %tmp68, !tbaa !0
- %tmp70 = getelementptr [32 x <16 x i8>], [32 x <16 x i8>] addrspace(2)* %arg1, i64 0, i32 2
- %tmp71 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp70, !tbaa !0
+ %tmp70 = getelementptr [32 x <4 x i32>], [32 x <4 x i32>] addrspace(2)* %arg1, i64 0, i32 2
+ %tmp71 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp70, !tbaa !0
%tmp72 = getelementptr [16 x <8 x i32>], [16 x <8 x i32>] addrspace(2)* %arg2, i64 0, i32 3
%tmp73 = load <8 x i32>, <8 x i32> addrspace(2)* %tmp72, !tbaa !0
- %tmp74 = getelementptr [32 x <16 x i8>], [32 x <16 x i8>] addrspace(2)* %arg1, i64 0, i32 3
- %tmp75 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp74, !tbaa !0
+ %tmp74 = getelementptr [32 x <4 x i32>], [32 x <4 x i32>] addrspace(2)* %arg1, i64 0, i32 3
+ %tmp75 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp74, !tbaa !0
%tmp76 = getelementptr [16 x <8 x i32>], [16 x <8 x i32>] addrspace(2)* %arg2, i64 0, i32 4
%tmp77 = load <8 x i32>, <8 x i32> addrspace(2)* %tmp76, !tbaa !0
- %tmp78 = getelementptr [32 x <16 x i8>], [32 x <16 x i8>] addrspace(2)* %arg1, i64 0, i32 4
- %tmp79 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp78, !tbaa !0
+ %tmp78 = getelementptr [32 x <4 x i32>], [32 x <4 x i32>] addrspace(2)* %arg1, i64 0, i32 4
+ %tmp79 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp78, !tbaa !0
%tmp80 = getelementptr [16 x <8 x i32>], [16 x <8 x i32>] addrspace(2)* %arg2, i64 0, i32 5
%tmp81 = load <8 x i32>, <8 x i32> addrspace(2)* %tmp80, !tbaa !0
- %tmp82 = getelementptr [32 x <16 x i8>], [32 x <16 x i8>] addrspace(2)* %arg1, i64 0, i32 5
- %tmp83 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp82, !tbaa !0
+ %tmp82 = getelementptr [32 x <4 x i32>], [32 x <4 x i32>] addrspace(2)* %arg1, i64 0, i32 5
+ %tmp83 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp82, !tbaa !0
%tmp84 = getelementptr [16 x <8 x i32>], [16 x <8 x i32>] addrspace(2)* %arg2, i64 0, i32 6
%tmp85 = load <8 x i32>, <8 x i32> addrspace(2)* %tmp84, !tbaa !0
- %tmp86 = getelementptr [32 x <16 x i8>], [32 x <16 x i8>] addrspace(2)* %arg1, i64 0, i32 6
- %tmp87 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp86, !tbaa !0
+ %tmp86 = getelementptr [32 x <4 x i32>], [32 x <4 x i32>] addrspace(2)* %arg1, i64 0, i32 6
+ %tmp87 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp86, !tbaa !0
%tmp88 = getelementptr [16 x <8 x i32>], [16 x <8 x i32>] addrspace(2)* %arg2, i64 0, i32 7
%tmp89 = load <8 x i32>, <8 x i32> addrspace(2)* %tmp88, !tbaa !0
- %tmp90 = getelementptr [32 x <16 x i8>], [32 x <16 x i8>] addrspace(2)* %arg1, i64 0, i32 7
- %tmp91 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp90, !tbaa !0
+ %tmp90 = getelementptr [32 x <4 x i32>], [32 x <4 x i32>] addrspace(2)* %arg1, i64 0, i32 7
+ %tmp91 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp90, !tbaa !0
%i.i = extractelement <2 x i32> %arg6, i32 0
%j.i = extractelement <2 x i32> %arg6, i32 1
%i.f.i = bitcast i32 %i.i to float
@@ -410,7 +410,7 @@ IF67: ; preds = %LOOP65
%tmp274 = insertelement <8 x i32> %tmp273, i32 %tmp268, i32 5
%tmp275 = insertelement <8 x i32> %tmp274, i32 undef, i32 6
%tmp276 = insertelement <8 x i32> %tmp275, i32 undef, i32 7
- %tmp67.bc = bitcast <16 x i8> %tmp67 to <4 x i32>
+ %tmp67.bc = bitcast <4 x i32> %tmp67 to <4 x i32>
%tmp276.bc = bitcast <8 x i32> %tmp276 to <8 x float>
%tmp277 = call <4 x float> @llvm.amdgcn.image.sample.d.v4f32.v8f32.v8i32(<8 x float> %tmp276.bc, <8 x i32> %tmp65, <4 x i32> %tmp67.bc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false)
%tmp278 = extractelement <4 x float> %tmp277, i32 0
@@ -432,7 +432,7 @@ IF67: ; preds = %LOOP65
%tmp294 = insertelement <8 x i32> %tmp293, i32 %tmp288, i32 5
%tmp295 = insertelement <8 x i32> %tmp294, i32 undef, i32 6
%tmp296 = insertelement <8 x i32> %tmp295, i32 undef, i32 7
- %tmp83.bc = bitcast <16 x i8> %tmp83 to <4 x i32>
+ %tmp83.bc = bitcast <4 x i32> %tmp83 to <4 x i32>
%tmp296.bc = bitcast <8 x i32> %tmp296 to <8 x float>
%tmp297 = call <4 x float> @llvm.amdgcn.image.sample.d.v4f32.v8f32.v8i32(<8 x float> %tmp296.bc, <8 x i32> %tmp81, <4 x i32> %tmp83.bc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false)
%tmp298 = extractelement <4 x float> %tmp297, i32 0
@@ -452,7 +452,7 @@ IF67: ; preds = %LOOP65
%tmp312 = insertelement <8 x i32> %tmp311, i32 %tmp306, i32 5
%tmp313 = insertelement <8 x i32> %tmp312, i32 undef, i32 6
%tmp314 = insertelement <8 x i32> %tmp313, i32 undef, i32 7
- %tmp79.bc = bitcast <16 x i8> %tmp79 to <4 x i32>
+ %tmp79.bc = bitcast <4 x i32> %tmp79 to <4 x i32>
%tmp314.bc = bitcast <8 x i32> %tmp314 to <8 x float>
%tmp315 = call <4 x float> @llvm.amdgcn.image.sample.d.v4f32.v8f32.v8i32(<8 x float> %tmp314.bc, <8 x i32> %tmp77, <4 x i32> %tmp79.bc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false)
%tmp316 = extractelement <4 x float> %tmp315, i32 0
@@ -515,7 +515,7 @@ IF67: ; preds = %LOOP65
%tmp372 = insertelement <8 x i32> %tmp371, i32 %tmp366, i32 5
%tmp373 = insertelement <8 x i32> %tmp372, i32 undef, i32 6
%tmp374 = insertelement <8 x i32> %tmp373, i32 undef, i32 7
- %tmp71.bc = bitcast <16 x i8> %tmp71 to <4 x i32>
+ %tmp71.bc = bitcast <4 x i32> %tmp71 to <4 x i32>
%tmp374.bc = bitcast <8 x i32> %tmp374 to <8 x float>
%tmp375 = call <4 x float> @llvm.amdgcn.image.sample.d.v4f32.v8f32.v8i32(<8 x float> %tmp374.bc, <8 x i32> %tmp69, <4 x i32> %tmp71.bc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false)
%tmp376 = extractelement <4 x float> %tmp375, i32 0
@@ -571,7 +571,7 @@ IF67: ; preds = %LOOP65
%tmp426 = insertelement <8 x i32> %tmp425, i32 %tmp420, i32 5
%tmp427 = insertelement <8 x i32> %tmp426, i32 undef, i32 6
%tmp428 = insertelement <8 x i32> %tmp427, i32 undef, i32 7
- %tmp87.bc = bitcast <16 x i8> %tmp87 to <4 x i32>
+ %tmp87.bc = bitcast <4 x i32> %tmp87 to <4 x i32>
%tmp428.bc = bitcast <8 x i32> %tmp428 to <8 x float>
%tmp429 = call <4 x float> @llvm.amdgcn.image.sample.d.v4f32.v8f32.v8i32(<8 x float> %tmp428.bc, <8 x i32> %tmp85, <4 x i32> %tmp87.bc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false)
%tmp430 = extractelement <4 x float> %tmp429, i32 0
@@ -624,7 +624,7 @@ IF67: ; preds = %LOOP65
%tmp467 = insertelement <4 x i32> %tmp466, i32 %tmp464, i32 1
%tmp468 = insertelement <4 x i32> %tmp467, i32 %tmp465, i32 2
%tmp469 = insertelement <4 x i32> %tmp468, i32 undef, i32 3
- %tmp91.bc = bitcast <16 x i8> %tmp91 to <4 x i32>
+ %tmp91.bc = bitcast <4 x i32> %tmp91 to <4 x i32>
%tmp469.bc = bitcast <4 x i32> %tmp469 to <4 x float>
%tmp470 = call <4 x float> @llvm.amdgcn.image.sample.v4f32.v4f32.v8i32(<4 x float> %tmp469.bc, <8 x i32> %tmp89, <4 x i32> %tmp91.bc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) #0
%tmp471 = extractelement <4 x float> %tmp470, i32 0
@@ -727,7 +727,7 @@ IF67: ; preds = %LOOP65
%tmp568 = insertelement <8 x i32> %tmp567, i32 %tmp562, i32 5
%tmp569 = insertelement <8 x i32> %tmp568, i32 undef, i32 6
%tmp570 = insertelement <8 x i32> %tmp569, i32 undef, i32 7
- %tmp75.bc = bitcast <16 x i8> %tmp75 to <4 x i32>
+ %tmp75.bc = bitcast <4 x i32> %tmp75 to <4 x i32>
%tmp570.bc = bitcast <8 x i32> %tmp570 to <8 x float>
%tmp571 = call <4 x float> @llvm.amdgcn.image.sample.d.v4f32.v8f32.v8i32(<8 x float> %tmp570.bc, <8 x i32> %tmp73, <4 x i32> %tmp75.bc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false)
%tmp572 = extractelement <4 x float> %tmp571, i32 0
@@ -778,149 +778,149 @@ ENDIF66: ; preds = %LOOP65
; GCN-LABEL: {{^}}main1:
; GCN: s_endpgm
; TOVGPR: ScratchSize: 0{{$}}
-define amdgpu_ps void @main1([17 x <16 x i8>] addrspace(2)* byval %arg, [32 x <16 x i8>] addrspace(2)* byval %arg1, [16 x <8 x i32>] addrspace(2)* byval %arg2, float inreg %arg3, i32 inreg %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <2 x i32> %arg7, <3 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, <2 x i32> %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19, float %arg20) #0 {
+define amdgpu_ps void @main1([17 x <4 x i32>] addrspace(2)* byval %arg, [32 x <4 x i32>] addrspace(2)* byval %arg1, [16 x <8 x i32>] addrspace(2)* byval %arg2, float inreg %arg3, i32 inreg %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <2 x i32> %arg7, <3 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, <2 x i32> %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19, float %arg20) #0 {
main_body:
- %tmp = getelementptr [17 x <16 x i8>], [17 x <16 x i8>] addrspace(2)* %arg, i64 0, i32 0
- %tmp21 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp, !tbaa !0
- %tmp22 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 0)
- %tmp23 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 4)
- %tmp24 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 8)
- %tmp25 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 12)
- %tmp26 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 28)
- %tmp27 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 48)
- %tmp28 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 52)
- %tmp29 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 56)
- %tmp30 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 64)
- %tmp31 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 68)
- %tmp32 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 72)
- %tmp33 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 76)
- %tmp34 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 128)
- %tmp35 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 132)
- %tmp36 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 144)
- %tmp37 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 148)
- %tmp38 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 152)
- %tmp39 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 160)
- %tmp40 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 164)
- %tmp41 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 168)
- %tmp42 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 172)
- %tmp43 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 176)
- %tmp44 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 180)
- %tmp45 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 184)
- %tmp46 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 192)
- %tmp47 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 196)
- %tmp48 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 200)
- %tmp49 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 208)
- %tmp50 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 212)
- %tmp51 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 216)
- %tmp52 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 220)
- %tmp53 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 236)
- %tmp54 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 240)
- %tmp55 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 244)
- %tmp56 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 248)
- %tmp57 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 252)
- %tmp58 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 256)
- %tmp59 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 260)
- %tmp60 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 264)
- %tmp61 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 268)
- %tmp62 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 272)
- %tmp63 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 276)
- %tmp64 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 280)
- %tmp65 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 284)
- %tmp66 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 288)
- %tmp67 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 292)
- %tmp68 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 464)
- %tmp69 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 468)
- %tmp70 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 472)
- %tmp71 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 496)
- %tmp72 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 500)
- %tmp73 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 504)
- %tmp74 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 512)
- %tmp75 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 516)
- %tmp76 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 524)
- %tmp77 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 532)
- %tmp78 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 536)
- %tmp79 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 540)
- %tmp80 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 544)
- %tmp81 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 548)
- %tmp82 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 552)
- %tmp83 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 556)
- %tmp84 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 560)
- %tmp85 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 564)
- %tmp86 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 568)
- %tmp87 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 572)
- %tmp88 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 576)
- %tmp89 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 580)
- %tmp90 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 584)
- %tmp91 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 588)
- %tmp92 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 592)
- %tmp93 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 596)
- %tmp94 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 600)
- %tmp95 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 604)
- %tmp96 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 608)
- %tmp97 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 612)
- %tmp98 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 616)
- %tmp99 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 624)
- %tmp100 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 628)
- %tmp101 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 632)
- %tmp102 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 636)
- %tmp103 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 640)
- %tmp104 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 644)
- %tmp105 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 648)
- %tmp106 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 652)
- %tmp107 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 656)
- %tmp108 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 660)
- %tmp109 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 664)
- %tmp110 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 668)
- %tmp111 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 672)
- %tmp112 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 676)
- %tmp113 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 680)
- %tmp114 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 684)
- %tmp115 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 688)
- %tmp116 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 692)
- %tmp117 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 696)
- %tmp118 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 700)
- %tmp119 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 704)
- %tmp120 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 708)
- %tmp121 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 712)
- %tmp122 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 716)
- %tmp123 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 864)
- %tmp124 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 868)
+ %tmp = getelementptr [17 x <4 x i32>], [17 x <4 x i32>] addrspace(2)* %arg, i64 0, i32 0
+ %tmp21 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp, !tbaa !0
+ %tmp22 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 0)
+ %tmp23 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 4)
+ %tmp24 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 8)
+ %tmp25 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 12)
+ %tmp26 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 28)
+ %tmp27 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 48)
+ %tmp28 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 52)
+ %tmp29 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 56)
+ %tmp30 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 64)
+ %tmp31 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 68)
+ %tmp32 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 72)
+ %tmp33 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 76)
+ %tmp34 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 128)
+ %tmp35 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 132)
+ %tmp36 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 144)
+ %tmp37 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 148)
+ %tmp38 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 152)
+ %tmp39 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 160)
+ %tmp40 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 164)
+ %tmp41 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 168)
+ %tmp42 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 172)
+ %tmp43 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 176)
+ %tmp44 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 180)
+ %tmp45 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 184)
+ %tmp46 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 192)
+ %tmp47 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 196)
+ %tmp48 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 200)
+ %tmp49 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 208)
+ %tmp50 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 212)
+ %tmp51 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 216)
+ %tmp52 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 220)
+ %tmp53 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 236)
+ %tmp54 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 240)
+ %tmp55 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 244)
+ %tmp56 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 248)
+ %tmp57 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 252)
+ %tmp58 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 256)
+ %tmp59 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 260)
+ %tmp60 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 264)
+ %tmp61 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 268)
+ %tmp62 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 272)
+ %tmp63 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 276)
+ %tmp64 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 280)
+ %tmp65 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 284)
+ %tmp66 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 288)
+ %tmp67 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 292)
+ %tmp68 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 464)
+ %tmp69 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 468)
+ %tmp70 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 472)
+ %tmp71 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 496)
+ %tmp72 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 500)
+ %tmp73 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 504)
+ %tmp74 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 512)
+ %tmp75 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 516)
+ %tmp76 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 524)
+ %tmp77 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 532)
+ %tmp78 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 536)
+ %tmp79 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 540)
+ %tmp80 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 544)
+ %tmp81 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 548)
+ %tmp82 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 552)
+ %tmp83 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 556)
+ %tmp84 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 560)
+ %tmp85 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 564)
+ %tmp86 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 568)
+ %tmp87 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 572)
+ %tmp88 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 576)
+ %tmp89 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 580)
+ %tmp90 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 584)
+ %tmp91 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 588)
+ %tmp92 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 592)
+ %tmp93 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 596)
+ %tmp94 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 600)
+ %tmp95 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 604)
+ %tmp96 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 608)
+ %tmp97 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 612)
+ %tmp98 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 616)
+ %tmp99 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 624)
+ %tmp100 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 628)
+ %tmp101 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 632)
+ %tmp102 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 636)
+ %tmp103 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 640)
+ %tmp104 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 644)
+ %tmp105 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 648)
+ %tmp106 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 652)
+ %tmp107 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 656)
+ %tmp108 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 660)
+ %tmp109 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 664)
+ %tmp110 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 668)
+ %tmp111 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 672)
+ %tmp112 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 676)
+ %tmp113 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 680)
+ %tmp114 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 684)
+ %tmp115 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 688)
+ %tmp116 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 692)
+ %tmp117 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 696)
+ %tmp118 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 700)
+ %tmp119 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 704)
+ %tmp120 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 708)
+ %tmp121 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 712)
+ %tmp122 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 716)
+ %tmp123 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 864)
+ %tmp124 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 868)
%tmp125 = getelementptr [16 x <8 x i32>], [16 x <8 x i32>] addrspace(2)* %arg2, i64 0, i32 0
%tmp126 = load <8 x i32>, <8 x i32> addrspace(2)* %tmp125, !tbaa !0
- %tmp127 = getelementptr [32 x <16 x i8>], [32 x <16 x i8>] addrspace(2)* %arg1, i64 0, i32 0
- %tmp128 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp127, !tbaa !0
+ %tmp127 = getelementptr [32 x <4 x i32>], [32 x <4 x i32>] addrspace(2)* %arg1, i64 0, i32 0
+ %tmp128 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp127, !tbaa !0
%tmp129 = getelementptr [16 x <8 x i32>], [16 x <8 x i32>] addrspace(2)* %arg2, i64 0, i32 1
%tmp130 = load <8 x i32>, <8 x i32> addrspace(2)* %tmp129, !tbaa !0
- %tmp131 = getelementptr [32 x <16 x i8>], [32 x <16 x i8>] addrspace(2)* %arg1, i64 0, i32 1
- %tmp132 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp131, !tbaa !0
+ %tmp131 = getelementptr [32 x <4 x i32>], [32 x <4 x i32>] addrspace(2)* %arg1, i64 0, i32 1
+ %tmp132 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp131, !tbaa !0
%tmp133 = getelementptr [16 x <8 x i32>], [16 x <8 x i32>] addrspace(2)* %arg2, i64 0, i32 2
%tmp134 = load <8 x i32>, <8 x i32> addrspace(2)* %tmp133, !tbaa !0
- %tmp135 = getelementptr [32 x <16 x i8>], [32 x <16 x i8>] addrspace(2)* %arg1, i64 0, i32 2
- %tmp136 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp135, !tbaa !0
+ %tmp135 = getelementptr [32 x <4 x i32>], [32 x <4 x i32>] addrspace(2)* %arg1, i64 0, i32 2
+ %tmp136 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp135, !tbaa !0
%tmp137 = getelementptr [16 x <8 x i32>], [16 x <8 x i32>] addrspace(2)* %arg2, i64 0, i32 3
%tmp138 = load <8 x i32>, <8 x i32> addrspace(2)* %tmp137, !tbaa !0
- %tmp139 = getelementptr [32 x <16 x i8>], [32 x <16 x i8>] addrspace(2)* %arg1, i64 0, i32 3
- %tmp140 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp139, !tbaa !0
+ %tmp139 = getelementptr [32 x <4 x i32>], [32 x <4 x i32>] addrspace(2)* %arg1, i64 0, i32 3
+ %tmp140 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp139, !tbaa !0
%tmp141 = getelementptr [16 x <8 x i32>], [16 x <8 x i32>] addrspace(2)* %arg2, i64 0, i32 4
%tmp142 = load <8 x i32>, <8 x i32> addrspace(2)* %tmp141, !tbaa !0
- %tmp143 = getelementptr [32 x <16 x i8>], [32 x <16 x i8>] addrspace(2)* %arg1, i64 0, i32 4
- %tmp144 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp143, !tbaa !0
+ %tmp143 = getelementptr [32 x <4 x i32>], [32 x <4 x i32>] addrspace(2)* %arg1, i64 0, i32 4
+ %tmp144 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp143, !tbaa !0
%tmp145 = getelementptr [16 x <8 x i32>], [16 x <8 x i32>] addrspace(2)* %arg2, i64 0, i32 5
%tmp146 = load <8 x i32>, <8 x i32> addrspace(2)* %tmp145, !tbaa !0
- %tmp147 = getelementptr [32 x <16 x i8>], [32 x <16 x i8>] addrspace(2)* %arg1, i64 0, i32 5
- %tmp148 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp147, !tbaa !0
+ %tmp147 = getelementptr [32 x <4 x i32>], [32 x <4 x i32>] addrspace(2)* %arg1, i64 0, i32 5
+ %tmp148 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp147, !tbaa !0
%tmp149 = getelementptr [16 x <8 x i32>], [16 x <8 x i32>] addrspace(2)* %arg2, i64 0, i32 6
%tmp150 = load <8 x i32>, <8 x i32> addrspace(2)* %tmp149, !tbaa !0
- %tmp151 = getelementptr [32 x <16 x i8>], [32 x <16 x i8>] addrspace(2)* %arg1, i64 0, i32 6
- %tmp152 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp151, !tbaa !0
+ %tmp151 = getelementptr [32 x <4 x i32>], [32 x <4 x i32>] addrspace(2)* %arg1, i64 0, i32 6
+ %tmp152 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp151, !tbaa !0
%tmp153 = getelementptr [16 x <8 x i32>], [16 x <8 x i32>] addrspace(2)* %arg2, i64 0, i32 7
%tmp154 = load <8 x i32>, <8 x i32> addrspace(2)* %tmp153, !tbaa !0
- %tmp155 = getelementptr [32 x <16 x i8>], [32 x <16 x i8>] addrspace(2)* %arg1, i64 0, i32 7
- %tmp156 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp155, !tbaa !0
+ %tmp155 = getelementptr [32 x <4 x i32>], [32 x <4 x i32>] addrspace(2)* %arg1, i64 0, i32 7
+ %tmp156 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp155, !tbaa !0
%tmp157 = getelementptr [16 x <8 x i32>], [16 x <8 x i32>] addrspace(2)* %arg2, i64 0, i32 8
%tmp158 = load <8 x i32>, <8 x i32> addrspace(2)* %tmp157, !tbaa !0
- %tmp159 = getelementptr [32 x <16 x i8>], [32 x <16 x i8>] addrspace(2)* %arg1, i64 0, i32 8
- %tmp160 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp159, !tbaa !0
+ %tmp159 = getelementptr [32 x <4 x i32>], [32 x <4 x i32>] addrspace(2)* %arg1, i64 0, i32 8
+ %tmp160 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp159, !tbaa !0
%tmp161 = fcmp ugt float %arg17, 0.000000e+00
%tmp162 = select i1 %tmp161, float 1.000000e+00, float 0.000000e+00
%i.i = extractelement <2 x i32> %arg6, i32 0
@@ -1144,7 +1144,7 @@ main_body:
%tmp222 = bitcast float %p2.i126 to i32
%tmp223 = insertelement <2 x i32> undef, i32 %tmp221, i32 0
%tmp224 = insertelement <2 x i32> %tmp223, i32 %tmp222, i32 1
- %tmp132.bc = bitcast <16 x i8> %tmp132 to <4 x i32>
+ %tmp132.bc = bitcast <4 x i32> %tmp132 to <4 x i32>
%tmp224.bc = bitcast <2 x i32> %tmp224 to <2 x float>
%tmp225 = call <4 x float> @llvm.amdgcn.image.sample.v4f32.v2f32.v8i32(<2 x float> %tmp224.bc, <8 x i32> %tmp130, <4 x i32> %tmp132.bc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false)
%tmp226 = extractelement <4 x float> %tmp225, i32 0
@@ -1218,7 +1218,7 @@ LOOP: ; preds = %LOOP, %main_body
%tmp279 = insertelement <4 x i32> %tmp278, i32 %tmp277, i32 1
%tmp280 = insertelement <4 x i32> %tmp279, i32 0, i32 2
%tmp281 = insertelement <4 x i32> %tmp280, i32 undef, i32 3
- %tmp148.bc = bitcast <16 x i8> %tmp148 to <4 x i32>
+ %tmp148.bc = bitcast <4 x i32> %tmp148 to <4 x i32>
%tmp281.bc = bitcast <4 x i32> %tmp281 to <4 x float>
%tmp282 = call <4 x float> @llvm.amdgcn.image.sample.l.v4f32.v4f32.v8i32(<4 x float> %tmp281.bc, <8 x i32> %tmp146, <4 x i32> %tmp148.bc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false)
%tmp283 = extractelement <4 x float> %tmp282, i32 3
@@ -1283,7 +1283,7 @@ IF189: ; preds = %LOOP
%tmp339 = bitcast float %tmp335 to i32
%tmp340 = insertelement <2 x i32> undef, i32 %tmp338, i32 0
%tmp341 = insertelement <2 x i32> %tmp340, i32 %tmp339, i32 1
- %tmp136.bc = bitcast <16 x i8> %tmp136 to <4 x i32>
+ %tmp136.bc = bitcast <4 x i32> %tmp136 to <4 x i32>
%a.bc.i = bitcast <2 x i32> %tmp341 to <2 x float>
%tmp0 = call <4 x float> @llvm.amdgcn.image.sample.v4f32.v2f32.v8i32(<2 x float> %a.bc.i, <8 x i32> %tmp134, <4 x i32> %tmp136.bc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false)
%tmp343 = extractelement <4 x float> %tmp0, i32 0
@@ -1317,7 +1317,7 @@ IF189: ; preds = %LOOP
%tmp359 = bitcast float %tmp337 to i32
%tmp360 = insertelement <2 x i32> undef, i32 %tmp358, i32 0
%tmp361 = insertelement <2 x i32> %tmp360, i32 %tmp359, i32 1
- %tmp152.bc = bitcast <16 x i8> %tmp152 to <4 x i32>
+ %tmp152.bc = bitcast <4 x i32> %tmp152 to <4 x i32>
%a.bc.i3 = bitcast <2 x i32> %tmp361 to <2 x float>
%tmp1 = call <4 x float> @llvm.amdgcn.image.sample.v4f32.v2f32.v8i32(<2 x float> %a.bc.i3, <8 x i32> %tmp150, <4 x i32> %tmp152.bc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false)
%tmp363 = extractelement <4 x float> %tmp1, i32 2
@@ -1329,7 +1329,7 @@ IF189: ; preds = %LOOP
%tmp369 = bitcast float %tmp311 to i32
%tmp370 = insertelement <2 x i32> undef, i32 %tmp368, i32 0
%tmp371 = insertelement <2 x i32> %tmp370, i32 %tmp369, i32 1
- %tmp140.bc = bitcast <16 x i8> %tmp140 to <4 x i32>
+ %tmp140.bc = bitcast <4 x i32> %tmp140 to <4 x i32>
%a.bc.i2 = bitcast <2 x i32> %tmp371 to <2 x float>
%tmp2 = call <4 x float> @llvm.amdgcn.image.sample.v4f32.v2f32.v8i32(<2 x float> %a.bc.i2, <8 x i32> %tmp138, <4 x i32> %tmp140.bc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false)
%tmp373 = extractelement <4 x float> %tmp2, i32 0
@@ -1347,7 +1347,7 @@ IF189: ; preds = %LOOP
%tmp383 = bitcast float %tmp321 to i32
%tmp384 = insertelement <2 x i32> undef, i32 %tmp382, i32 0
%tmp385 = insertelement <2 x i32> %tmp384, i32 %tmp383, i32 1
- %tmp144.bc = bitcast <16 x i8> %tmp144 to <4 x i32>
+ %tmp144.bc = bitcast <4 x i32> %tmp144 to <4 x i32>
%a.bc.i1 = bitcast <2 x i32> %tmp385 to <2 x float>
%tmp3 = call <4 x float> @llvm.amdgcn.image.sample.v4f32.v2f32.v8i32(<2 x float> %a.bc.i1, <8 x i32> %tmp142, <4 x i32> %tmp144.bc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false)
%tmp387 = extractelement <4 x float> %tmp3, i32 0
@@ -1446,7 +1446,7 @@ ENDIF197: ; preds = %IF198, %IF189
%tmp467 = bitcast float %tmp220 to i32
%tmp468 = insertelement <2 x i32> undef, i32 %tmp466, i32 0
%tmp469 = insertelement <2 x i32> %tmp468, i32 %tmp467, i32 1
- %tmp160.bc = bitcast <16 x i8> %tmp160 to <4 x i32>
+ %tmp160.bc = bitcast <4 x i32> %tmp160 to <4 x i32>
%tmp469.bc = bitcast <2 x i32> %tmp469 to <2 x float>
%tmp470 = call <4 x float> @llvm.amdgcn.image.sample.v4f32.v2f32.v8i32(<2 x float> %tmp469.bc, <8 x i32> %tmp158, <4 x i32> %tmp160.bc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false)
%tmp471 = extractelement <4 x float> %tmp470, i32 0
@@ -1465,7 +1465,7 @@ ENDIF197: ; preds = %IF198, %IF189
%tmp484 = bitcast float %p2.i138 to i32
%tmp485 = insertelement <2 x i32> undef, i32 %tmp483, i32 0
%tmp486 = insertelement <2 x i32> %tmp485, i32 %tmp484, i32 1
- %tmp156.bc = bitcast <16 x i8> %tmp156 to <4 x i32>
+ %tmp156.bc = bitcast <4 x i32> %tmp156 to <4 x i32>
%tmp486.bc = bitcast <2 x i32> %tmp486 to <2 x float>
%tmp487 = call <4 x float> @llvm.amdgcn.image.sample.v4f32.v2f32.v8i32(<2 x float> %tmp486.bc, <8 x i32> %tmp154, <4 x i32> %tmp156.bc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false)
%tmp488 = extractelement <4 x float> %tmp487, i32 0
@@ -1674,7 +1674,7 @@ ENDIF209: ; preds = %ELSE214, %ELSE211,
%tmp657 = insertelement <4 x i32> %tmp656, i32 %tmp654, i32 1
%tmp658 = insertelement <4 x i32> %tmp657, i32 %tmp655, i32 2
%tmp659 = insertelement <4 x i32> %tmp658, i32 undef, i32 3
- %tmp128.bc = bitcast <16 x i8> %tmp128 to <4 x i32>
+ %tmp128.bc = bitcast <4 x i32> %tmp128 to <4 x i32>
%tmp659.bc = bitcast <4 x i32> %tmp659 to <4 x float>
%tmp660 = call <4 x float> @llvm.amdgcn.image.sample.l.v4f32.v4f32.v8i32(<4 x float> %tmp659.bc, <8 x i32> %tmp126, <4 x i32> %tmp128.bc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false)
%tmp661 = extractelement <4 x float> %tmp660, i32 0
@@ -1869,7 +1869,7 @@ declare <4 x float> @llvm.amdgcn.image.sample.v4f32.v2f32.v8i32(<2 x float>, <8
declare <4 x float> @llvm.amdgcn.image.sample.v4f32.v4f32.v8i32(<4 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #2
declare <4 x float> @llvm.amdgcn.image.sample.d.v4f32.v8f32.v8i32(<8 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #2
declare <4 x float> @llvm.amdgcn.image.sample.l.v4f32.v4f32.v8i32(<4 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #2
-declare float @llvm.SI.load.const(<16 x i8>, i32) #1
+declare float @llvm.SI.load.const.v4i32(<4 x i32>, i32) #1
attributes #0 = { nounwind }
attributes #1 = { nounwind readnone }
OpenPOWER on IntegriCloud