summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/AMDGPU/scalar_to_vector.ll
diff options
context:
space:
mode:
authorMatt Arsenault <Matthew.Arsenault@amd.com>2016-07-01 22:47:50 +0000
committerMatt Arsenault <Matthew.Arsenault@amd.com>2016-07-01 22:47:50 +0000
commit327bb5ad82328bb92907bb2acf87e2282593e1e4 (patch)
tree96f20ec1942050bdab64dc965a2f9f5e292994ce /llvm/test/CodeGen/AMDGPU/scalar_to_vector.ll
parent591ff8376bbf3ee0cc6f6006300878ae96475a49 (diff)
downloadbcm5719-llvm-327bb5ad82328bb92907bb2acf87e2282593e1e4.tar.gz
bcm5719-llvm-327bb5ad82328bb92907bb2acf87e2282593e1e4.zip
AMDGPU: Improve load/store of illegal types.
There was a combine before to handle the simple copy case. Split this into handling loads and stores separately. We might want to change how this handles some of the vector extloads, since this can result in large code size increases. llvm-svn: 274394
Diffstat (limited to 'llvm/test/CodeGen/AMDGPU/scalar_to_vector.ll')
-rw-r--r--llvm/test/CodeGen/AMDGPU/scalar_to_vector.ll21
1 files changed, 8 insertions, 13 deletions
diff --git a/llvm/test/CodeGen/AMDGPU/scalar_to_vector.ll b/llvm/test/CodeGen/AMDGPU/scalar_to_vector.ll
index 0970e5d3063..55b392a3272 100644
--- a/llvm/test/CodeGen/AMDGPU/scalar_to_vector.ll
+++ b/llvm/test/CodeGen/AMDGPU/scalar_to_vector.ll
@@ -1,15 +1,14 @@
-; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
-
+; XXX - Why the packing?
; FUNC-LABEL: {{^}}scalar_to_vector_v2i32:
; SI: buffer_load_dword [[VAL:v[0-9]+]],
-; SI: v_lshrrev_b32_e32 [[RESULT:v[0-9]+]], 16, [[VAL]]
-; SI: buffer_store_short [[RESULT]]
-; SI: buffer_store_short [[RESULT]]
-; SI: buffer_store_short [[RESULT]]
-; SI: buffer_store_short [[RESULT]]
-; SI: s_endpgm
+; SI: v_lshrrev_b32_e32 [[SHR:v[0-9]+]], 16, [[VAL]]
+; SI: v_lshlrev_b32_e32 [[SHL:v[0-9]+]], 16, [[SHR]]
+; SI: v_or_b32_e32 v[[OR:[0-9]+]], [[SHL]], [[SHR]]
+; SI: v_mov_b32_e32 v[[COPY:[0-9]+]], v[[OR]]
+; SI: buffer_store_dwordx2 v{{\[}}[[OR]]:[[COPY]]{{\]}}
define void @scalar_to_vector_v2i32(<4 x i16> addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
%tmp1 = load i32, i32 addrspace(1)* %in, align 4
%bc = bitcast i32 %tmp1 to <2 x i16>
@@ -21,11 +20,7 @@ define void @scalar_to_vector_v2i32(<4 x i16> addrspace(1)* %out, i32 addrspace(
; FUNC-LABEL: {{^}}scalar_to_vector_v2f32:
; SI: buffer_load_dword [[VAL:v[0-9]+]],
; SI: v_lshrrev_b32_e32 [[RESULT:v[0-9]+]], 16, [[VAL]]
-; SI: buffer_store_short [[RESULT]]
-; SI: buffer_store_short [[RESULT]]
-; SI: buffer_store_short [[RESULT]]
-; SI: buffer_store_short [[RESULT]]
-; SI: s_endpgm
+; SI: buffer_store_dwordx2
define void @scalar_to_vector_v2f32(<4 x i16> addrspace(1)* %out, float addrspace(1)* %in) nounwind {
%tmp1 = load float, float addrspace(1)* %in, align 4
%bc = bitcast float %tmp1 to <2 x i16>
OpenPOWER on IntegriCloud