diff options
| author | Tom Stellard <thomas.stellard@amd.com> | 2013-06-26 18:22:20 +0000 |
|---|---|---|
| committer | Tom Stellard <thomas.stellard@amd.com> | 2013-06-26 18:22:20 +0000 |
| commit | 64b3bbae1ead6ed15a9aa3e697f90582404de6d8 (patch) | |
| tree | f5e70e1a11c6e71ff585a6d20db118a5113cd64b /libclc/generic/lib/shared/vstore_impl.ll | |
| parent | 922ac056e3ee8f39eaff1a56781e3331dc7a43d9 (diff) | |
| download | bcm5719-llvm-64b3bbae1ead6ed15a9aa3e697f90582404de6d8.tar.gz bcm5719-llvm-64b3bbae1ead6ed15a9aa3e697f90582404de6d8.zip | |
libclc: Add assembly versions of vstore for global [u]int4/8/16
The assembly should be generic, but at least currently R600 only supports
32-bit stores of [u]int1/4, and I believe that only global is well-supported.
R600 lowers the 8/16 component stores to multiple 4-component stores.
The unoptimized C versions of the other stuff is left in place.
Patch by: Aaron Watry
llvm-svn: 185009
Diffstat (limited to 'libclc/generic/lib/shared/vstore_impl.ll')
| -rw-r--r-- | libclc/generic/lib/shared/vstore_impl.ll | 50 |
1 files changed, 50 insertions, 0 deletions
diff --git a/libclc/generic/lib/shared/vstore_impl.ll b/libclc/generic/lib/shared/vstore_impl.ll new file mode 100644 index 00000000000..3baab5eb99a --- /dev/null +++ b/libclc/generic/lib/shared/vstore_impl.ll @@ -0,0 +1,50 @@ +; This provides optimized implementations of vstore4/8/16 for 32-bit int/uint + +define void @__clc_vstore2_impl_i32__global(<2 x i32> %vec, i32 %offset, i32 addrspace(1)* nocapture %addr) nounwind alwaysinline { + %1 = ptrtoint i32 addrspace(1)* %addr to i32 + %2 = add i32 %1, %offset + %3 = inttoptr i32 %2 to <2 x i32> addrspace(1)* + store <2 x i32> %vec, <2 x i32> addrspace(1)* %3, align 4, !tbaa !3 + ret void +} + +define void @__clc_vstore3_impl_i32__global(<3 x i32> %vec, i32 %offset, i32 addrspace(1)* nocapture %addr) nounwind alwaysinline { + %1 = ptrtoint i32 addrspace(1)* %addr to i32 + %2 = add i32 %1, %offset + %3 = inttoptr i32 %2 to <3 x i32> addrspace(1)* + store <3 x i32> %vec, <3 x i32> addrspace(1)* %3, align 4, !tbaa !3 + ret void +} + +define void @__clc_vstore4_impl_i32__global(<4 x i32> %vec, i32 %offset, i32 addrspace(1)* nocapture %addr) nounwind alwaysinline { + %1 = ptrtoint i32 addrspace(1)* %addr to i32 + %2 = add i32 %1, %offset + %3 = inttoptr i32 %2 to <4 x i32> addrspace(1)* + store <4 x i32> %vec, <4 x i32> addrspace(1)* %3, align 4, !tbaa !3 + ret void +} + +define void @__clc_vstore8_impl_i32__global(<8 x i32> %vec, i32 %offset, i32 addrspace(1)* nocapture %addr) nounwind alwaysinline { + %1 = ptrtoint i32 addrspace(1)* %addr to i32 + %2 = add i32 %1, %offset + %3 = inttoptr i32 %2 to <8 x i32> addrspace(1)* + store <8 x i32> %vec, <8 x i32> addrspace(1)* %3, align 4, !tbaa !3 + ret void +} + +define void @__clc_vstore16_impl_i32__global(<16 x i32> %vec, i32 %offset, i32 addrspace(1)* nocapture %addr) nounwind alwaysinline { + %1 = ptrtoint i32 addrspace(1)* %addr to i32 + %2 = add i32 %1, %offset + %3 = inttoptr i32 %2 to <16 x i32> addrspace(1)* + store <16 x i32> %vec, <16 x i32> addrspace(1)* %3, align 4, !tbaa !3 + ret void +} + + +!1 = metadata !{metadata !"char", metadata !5} +!2 = metadata !{metadata !"short", metadata !5} +!3 = metadata !{metadata !"int", metadata !5} +!4 = metadata !{metadata !"long", metadata !5} +!5 = metadata !{metadata !"omnipotent char", metadata !6} +!6 = metadata !{metadata !"Simple C/C++ TBAA"} + |

