summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/AMDGPU/imm.ll
diff options
context:
space:
mode:
authorMatt Arsenault <Matthew.Arsenault@amd.com>2016-05-02 20:07:26 +0000
committerMatt Arsenault <Matthew.Arsenault@amd.com>2016-05-02 20:07:26 +0000
commit2b957b5a6f228797a871426d48098fee479883ef (patch)
treeb4bf9a9ce48d543f320cbc6e212beb630a07f17c /llvm/test/CodeGen/AMDGPU/imm.ll
parent5002a67ef2b0e05350220ba16a2b43f92001799c (diff)
downloadbcm5719-llvm-2b957b5a6f228797a871426d48098fee479883ef.tar.gz
bcm5719-llvm-2b957b5a6f228797a871426d48098fee479883ef.zip
AMDGPU: Make i64 loads/stores promote to v2i32
Now that unaligned access expansion should not attempt to produce i64 accesses, we can remove the hack in PreprocessISelDAG where this is done. This allows splitting i64 private accesses while allowing the new add nodes indexing the vector components can be folded with the base pointer arithmetic. llvm-svn: 268293
Diffstat (limited to 'llvm/test/CodeGen/AMDGPU/imm.ll')
-rw-r--r--llvm/test/CodeGen/AMDGPU/imm.ll2
1 files changed, 1 insertions, 1 deletions
diff --git a/llvm/test/CodeGen/AMDGPU/imm.ll b/llvm/test/CodeGen/AMDGPU/imm.ll
index 298cb419e4f..674eceee812 100644
--- a/llvm/test/CodeGen/AMDGPU/imm.ll
+++ b/llvm/test/CodeGen/AMDGPU/imm.ll
@@ -510,7 +510,7 @@ define void @add_inline_imm_64_f64(double addrspace(1)* %out, double %x) {
; CHECK-LABEL: {{^}}store_inline_imm_0.0_f64:
; CHECK: v_mov_b32_e32 v[[LO_VREG:[0-9]+]], 0
-; CHECK: v_mov_b32_e32 v[[HI_VREG:[0-9]+]], 0
+; CHECK: v_mov_b32_e32 v[[HI_VREG:[0-9]+]], v[[LO_VREG]]{{$}}
; CHECK: buffer_store_dwordx2 v{{\[}}[[LO_VREG]]:[[HI_VREG]]{{\]}}
define void @store_inline_imm_0.0_f64(double addrspace(1)* %out) {
store double 0.0, double addrspace(1)* %out
OpenPOWER on IntegriCloud