diff options
| author | Yaxun Liu <Yaxun.Liu@amd.com> | 2018-02-13 18:00:25 +0000 |
|---|---|---|
| committer | Yaxun Liu <Yaxun.Liu@amd.com> | 2018-02-13 18:00:25 +0000 |
| commit | 0124b5484cdd254571e8b17b0ac510aec5edf1a5 (patch) | |
| tree | cab176450e673f3e1d9caedcd290edd1884660f4 /llvm/test/CodeGen/AMDGPU/image-schedule.ll | |
| parent | 18c0247852f717b3d72cbf0f8635e3108ff10613 (diff) | |
| download | bcm5719-llvm-0124b5484cdd254571e8b17b0ac510aec5edf1a5.tar.gz bcm5719-llvm-0124b5484cdd254571e8b17b0ac510aec5edf1a5.zip | |
[AMDGPU] Change constant addr space to 4
Differential Revision: https://reviews.llvm.org/D43170
llvm-svn: 325030
Diffstat (limited to 'llvm/test/CodeGen/AMDGPU/image-schedule.ll')
| -rw-r--r-- | llvm/test/CodeGen/AMDGPU/image-schedule.ll | 20 |
1 files changed, 10 insertions, 10 deletions
diff --git a/llvm/test/CodeGen/AMDGPU/image-schedule.ll b/llvm/test/CodeGen/AMDGPU/image-schedule.ll index 856ba04a791..6f8060f1d55 100644 --- a/llvm/test/CodeGen/AMDGPU/image-schedule.ll +++ b/llvm/test/CodeGen/AMDGPU/image-schedule.ll @@ -20,21 +20,21 @@ define dllexport amdgpu_cs void @_amdgpu_cs_main(i32 inreg %arg, i32 inreg %arg1 %.0.vec.insert = insertelement <2 x i32> undef, i32 %arg2, i32 0 %.4.vec.insert = shufflevector <2 x i32> %.0.vec.insert, <2 x i32> %tmp6, <2 x i32> <i32 0, i32 3> %tmp7 = bitcast <2 x i32> %.4.vec.insert to i64 - %tmp8 = inttoptr i64 %tmp7 to [4294967295 x i8] addrspace(2)* + %tmp8 = inttoptr i64 %tmp7 to [4294967295 x i8] addrspace(4)* %tmp9 = add <3 x i32> %arg3, %arg5 - %tmp10 = getelementptr [4294967295 x i8], [4294967295 x i8] addrspace(2)* %tmp8, i64 0, i64 32 - %tmp11 = bitcast i8 addrspace(2)* %tmp10 to <8 x i32> addrspace(2)*, !amdgpu.uniform !0 - %tmp12 = load <8 x i32>, <8 x i32> addrspace(2)* %tmp11, align 16 + %tmp10 = getelementptr [4294967295 x i8], [4294967295 x i8] addrspace(4)* %tmp8, i64 0, i64 32 + %tmp11 = bitcast i8 addrspace(4)* %tmp10 to <8 x i32> addrspace(4)*, !amdgpu.uniform !0 + %tmp12 = load <8 x i32>, <8 x i32> addrspace(4)* %tmp11, align 16 %tmp13 = shufflevector <3 x i32> %tmp9, <3 x i32> undef, <2 x i32> <i32 0, i32 1> %tmp14 = call <4 x float> @llvm.amdgcn.image.load.v4f32.v2i32.v8i32(<2 x i32> %tmp13, <8 x i32> %tmp12, i32 15, i1 false, i1 false, i1 false, i1 false) #0 - %tmp15 = inttoptr i64 %tmp7 to <8 x i32> addrspace(2)* - %tmp16 = load <8 x i32>, <8 x i32> addrspace(2)* %tmp15, align 16 + %tmp15 = inttoptr i64 %tmp7 to <8 x i32> addrspace(4)* + %tmp16 = load <8 x i32>, <8 x i32> addrspace(4)* %tmp15, align 16 call void @llvm.amdgcn.image.store.v4f32.v2i32.v8i32(<4 x float> %tmp14, <2 x i32> %tmp13, <8 x i32> %tmp16, i32 15, i1 false, i1 false, i1 false, i1 false) #0 - %tmp17 = load <8 x i32>, <8 x i32> addrspace(2)* %tmp15, align 16 + %tmp17 = load <8 x i32>, <8 x i32> addrspace(4)* %tmp15, align 16 %tmp18 = call <4 x float> @llvm.amdgcn.image.load.v4f32.v2i32.v8i32(<2 x i32> %tmp13, <8 x i32> %tmp17, i32 15, i1 false, i1 false, i1 false, i1 false) #0 - %tmp19 = getelementptr [4294967295 x i8], [4294967295 x i8] addrspace(2)* %tmp8, i64 0, i64 64 - %tmp20 = bitcast i8 addrspace(2)* %tmp19 to <8 x i32> addrspace(2)*, !amdgpu.uniform !0 - %tmp21 = load <8 x i32>, <8 x i32> addrspace(2)* %tmp20, align 16 + %tmp19 = getelementptr [4294967295 x i8], [4294967295 x i8] addrspace(4)* %tmp8, i64 0, i64 64 + %tmp20 = bitcast i8 addrspace(4)* %tmp19 to <8 x i32> addrspace(4)*, !amdgpu.uniform !0 + %tmp21 = load <8 x i32>, <8 x i32> addrspace(4)* %tmp20, align 16 call void @llvm.amdgcn.image.store.v4f32.v2i32.v8i32(<4 x float> %tmp18, <2 x i32> %tmp13, <8 x i32> %tmp21, i32 15, i1 false, i1 false, i1 false, i1 false) #0 ret void } |

