summaryrefslogtreecommitdiffstats
path: root/libclc/generic/lib/shared/vload_impl.ll
diff options
context:
space:
mode:
authorAaron Watry <awatry@gmail.com>2013-07-16 14:29:01 +0000
committerAaron Watry <awatry@gmail.com>2013-07-16 14:29:01 +0000
commit99a2f3b27475fd717618ce464646e0dcd35e2b14 (patch)
tree754eef3bd26da0f0d4a71e9ef5b2ab0e238d1280 /libclc/generic/lib/shared/vload_impl.ll
parent4cb7cf276df36a15e9b416f3d0b55d5604615112 (diff)
downloadbcm5719-llvm-99a2f3b27475fd717618ce464646e0dcd35e2b14.tar.gz
bcm5719-llvm-99a2f3b27475fd717618ce464646e0dcd35e2b14.zip
Fix and re-enable R600 vload/vstore assembly
The assembly optimizations were making unsafe assumptions about which address spaces had which identifiers. Also, fix vload/vstore with 64-bit pointers. This was broken previously on Radeon SI. This version still only has assembly versions of int/uint 2/4/8/16 for global loads and stores on R600, but it does it in a way that would be very easily extended to private/local/constant and could also be handled easily on other architectures. v2: 1) Leave v[load|store]_impl.ll in generic/lib 2) Remove vload_if.ll and vstore_if.ll interfaces 3) Fix address+offset calculations 3) Remove offset from assembly arg list llvm-svn: 186416
Diffstat (limited to 'libclc/generic/lib/shared/vload_impl.ll')
-rw-r--r--libclc/generic/lib/shared/vload_impl.ll50
1 files changed, 20 insertions, 30 deletions
diff --git a/libclc/generic/lib/shared/vload_impl.ll b/libclc/generic/lib/shared/vload_impl.ll
index ae719e0ae6d..2e70e5f2029 100644
--- a/libclc/generic/lib/shared/vload_impl.ll
+++ b/libclc/generic/lib/shared/vload_impl.ll
@@ -1,43 +1,33 @@
; This provides optimized implementations of vload4/8/16 for 32-bit int/uint
-define <2 x i32> @__clc_vload2_impl_i32__global(i32 %offset, i32 addrspace(1)* nocapture %addr) nounwind readonly alwaysinline {
- %1 = ptrtoint i32 addrspace(1)* %addr to i32
- %2 = add i32 %1, %offset
- %3 = inttoptr i32 %2 to <2 x i32> addrspace(1)*
- %4 = load <2 x i32> addrspace(1)* %3, align 4, !tbaa !3
- ret <2 x i32> %4
+define <2 x i32> @__clc_vload2_i32__addr1(i32 addrspace(1)* nocapture %addr) nounwind readonly alwaysinline {
+ %1 = bitcast i32 addrspace(1)* %addr to <2 x i32> addrspace(1)*
+ %2 = load <2 x i32> addrspace(1)* %1, align 4, !tbaa !3
+ ret <2 x i32> %2
}
-define <3 x i32> @__clc_vload3_impl_i32__global(i32 %offset, i32 addrspace(1)* nocapture %addr) nounwind readonly alwaysinline {
- %1 = ptrtoint i32 addrspace(1)* %addr to i32
- %2 = add i32 %1, %offset
- %3 = inttoptr i32 %2 to <3 x i32> addrspace(1)*
- %4 = load <3 x i32> addrspace(1)* %3, align 4, !tbaa !3
- ret <3 x i32> %4
+define <3 x i32> @__clc_vload3_i32__addr1(i32 addrspace(1)* nocapture %addr) nounwind readonly alwaysinline {
+ %1 = bitcast i32 addrspace(1)* %addr to <3 x i32> addrspace(1)*
+ %2 = load <3 x i32> addrspace(1)* %1, align 4, !tbaa !3
+ ret <3 x i32> %2
}
-define <4 x i32> @__clc_vload4_impl_i32__global(i32 %offset, i32 addrspace(1)* nocapture %addr) nounwind readonly alwaysinline {
- %1 = ptrtoint i32 addrspace(1)* %addr to i32
- %2 = add i32 %1, %offset
- %3 = inttoptr i32 %2 to <4 x i32> addrspace(1)*
- %4 = load <4 x i32> addrspace(1)* %3, align 4, !tbaa !3
- ret <4 x i32> %4
+define <4 x i32> @__clc_vload4_i32__addr1(i32 addrspace(1)* nocapture %addr) nounwind readonly alwaysinline {
+ %1 = bitcast i32 addrspace(1)* %addr to <4 x i32> addrspace(1)*
+ %2 = load <4 x i32> addrspace(1)* %1, align 4, !tbaa !3
+ ret <4 x i32> %2
}
-define <8 x i32> @__clc_vload8_impl_i32__global(i32 %offset, i32 addrspace(1)* nocapture %addr) nounwind readonly alwaysinline {
- %1 = ptrtoint i32 addrspace(1)* %addr to i32
- %2 = add i32 %1, %offset
- %3 = inttoptr i32 %2 to <8 x i32> addrspace(1)*
- %4 = load <8 x i32> addrspace(1)* %3, align 4, !tbaa !3
- ret <8 x i32> %4
+define <8 x i32> @__clc_vload8_i32__addr1(i32 addrspace(1)* nocapture %addr) nounwind readonly alwaysinline {
+ %1 = bitcast i32 addrspace(1)* %addr to <8 x i32> addrspace(1)*
+ %2 = load <8 x i32> addrspace(1)* %1, align 4, !tbaa !3
+ ret <8 x i32> %2
}
-define <16 x i32> @__clc_vload16_impl_i32__global(i32 %offset, i32 addrspace(1)* nocapture %addr) nounwind readonly alwaysinline {
- %1 = ptrtoint i32 addrspace(1)* %addr to i32
- %2 = add i32 %1, %offset
- %3 = inttoptr i32 %2 to <16 x i32> addrspace(1)*
- %4 = load <16 x i32> addrspace(1)* %3, align 4, !tbaa !3
- ret <16 x i32> %4
+define <16 x i32> @__clc_vload16_i32__addr1(i32 addrspace(1)* nocapture %addr) nounwind readonly alwaysinline {
+ %1 = bitcast i32 addrspace(1)* %addr to <16 x i32> addrspace(1)*
+ %2 = load <16 x i32> addrspace(1)* %1, align 4, !tbaa !3
+ ret <16 x i32> %2
}
!1 = metadata !{metadata !"char", metadata !5}
OpenPOWER on IntegriCloud