diff options
| author | Matt Arsenault <Matthew.Arsenault@amd.com> | 2019-02-05 00:26:12 +0000 |
|---|---|---|
| committer | Matt Arsenault <Matthew.Arsenault@amd.com> | 2019-02-05 00:26:12 +0000 |
| commit | 7f09fd6b045da9fd62529fede180ac3e48a88305 (patch) | |
| tree | 8f92d36285915a37bd117ce8c0610a82f1a34a80 /llvm/test/CodeGen | |
| parent | d4e37afe450ae1822d65223b297a4b518b9eb268 (diff) | |
| download | bcm5719-llvm-7f09fd6b045da9fd62529fede180ac3e48a88305.tar.gz bcm5719-llvm-7f09fd6b045da9fd62529fede180ac3e48a88305.zip | |
GlobalISel: Consolidate load/store legalization
The fewerElementsVectors implementation for load/stores
handles the scalar reduction case just as well, so drop
the redundant code in narrowScalar. This also introduces
support for narrowing irregular size breakdowns for
scalars.
llvm-svn: 353125
Diffstat (limited to 'llvm/test/CodeGen')
4 files changed, 91 insertions, 54 deletions
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll index da3aa3c1009..c75771a8425 100644 --- a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll +++ b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll @@ -205,7 +205,7 @@ define void @nonpow2_load_narrowing() { ret void } -; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: G_STORE %3:_(s96), %0:_(p0) :: (store 12 into %ir.c, align 16) (in function: nonpow2_store_narrowing +; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: %4:_(s64) = G_EXTRACT %3:_(s96), 0 (in function: nonpow2_store_narrowing) ; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for nonpow2_store_narrowing ; FALLBACK-WITH-REPORT-OUT-LABEL: nonpow2_store_narrowing: define void @nonpow2_store_narrowing(i96* %c) { @@ -215,7 +215,7 @@ define void @nonpow2_store_narrowing(i96* %c) { ret void } -; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: G_STORE %0:_(s96), %1:_(p0) :: (store 12 into `i96* undef`, align 16) (in function: nonpow2_constant_narrowing) +; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: %0:_(s96) = G_CONSTANT i96 0 (in function: nonpow2_constant_narrowing) ; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for nonpow2_constant_narrowing ; FALLBACK-WITH-REPORT-OUT-LABEL: nonpow2_constant_narrowing: define void @nonpow2_constant_narrowing() { diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-load.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-load.mir index 2115e82d148..bfccdb17757 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-load.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-load.mir @@ -243,7 +243,7 @@ body: | %0:_(p1) = COPY $vgpr0_vgpr1 %1:_(s128) = G_LOAD %0 :: (load 4, addrspace 1, align 4) $vgpr0_vgpr1_vgpr2_vgpr3 = COPY %1 - $vgpr0_vgpr1_vgpr2_vgpr3 = COPY %1 + ... --- @@ -255,12 +255,11 @@ body: | ; SI-LABEL: name: test_load_global_s96_align4 ; SI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; SI: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[COPY]](p1) :: (load 8, align 4, addrspace 1) - ; SI: [[COPY1:%[0-9]+]]:_(s64) = COPY [[LOAD]](s64) - ; SI: [[DEF:%[0-9]+]]:_(s96) = G_IMPLICIT_DEF - ; SI: [[INSERT:%[0-9]+]]:_(s96) = G_INSERT [[DEF]], [[COPY1]](s64), 0 ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; SI: [[GEP:%[0-9]+]]:_(p1) = G_GEP [[COPY]], [[C]](s64) ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[GEP]](p1) :: (load 4, addrspace 1) + ; SI: [[DEF:%[0-9]+]]:_(s96) = G_IMPLICIT_DEF + ; SI: [[INSERT:%[0-9]+]]:_(s96) = G_INSERT [[DEF]], [[LOAD]](s64), 0 ; SI: [[INSERT1:%[0-9]+]]:_(s96) = G_INSERT [[INSERT]], [[LOAD1]](s32), 64 ; SI: $vgpr0_vgpr1_vgpr2 = COPY [[INSERT1]](s96) ; VI-LABEL: name: test_load_global_s96_align4 @@ -285,28 +284,28 @@ body: | ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; SI: [[GEP:%[0-9]+]]:_(p1) = G_GEP [[COPY]], [[C]](s64) ; SI: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[GEP]](p1) :: (load 8, align 4, addrspace 1) - ; SI: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[LOAD]](s64), [[LOAD1]](s64) - ; SI: [[DEF:%[0-9]+]]:_(s160) = G_IMPLICIT_DEF - ; SI: [[INSERT:%[0-9]+]]:_(s160) = G_INSERT [[DEF]], [[MV]](s128), 0 ; SI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; SI: [[GEP1:%[0-9]+]]:_(p1) = G_GEP [[COPY]], [[C1]](s64) ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[GEP1]](p1) :: (load 4, addrspace 1) - ; SI: [[INSERT1:%[0-9]+]]:_(s160) = G_INSERT [[INSERT]], [[LOAD2]](s32), 128 - ; SI: S_NOP 0, implicit [[INSERT1]](s160) + ; SI: [[DEF:%[0-9]+]]:_(s160) = G_IMPLICIT_DEF + ; SI: [[INSERT:%[0-9]+]]:_(s160) = G_INSERT [[DEF]], [[LOAD]](s64), 0 + ; SI: [[INSERT1:%[0-9]+]]:_(s160) = G_INSERT [[INSERT]], [[LOAD1]](s64), 64 + ; SI: [[INSERT2:%[0-9]+]]:_(s160) = G_INSERT [[INSERT1]], [[LOAD2]](s32), 128 + ; SI: S_NOP 0, implicit [[INSERT2]](s160) ; VI-LABEL: name: test_load_global_s160_align4 ; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; VI: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[COPY]](p1) :: (load 8, align 4, addrspace 1) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; VI: [[GEP:%[0-9]+]]:_(p1) = G_GEP [[COPY]], [[C]](s64) ; VI: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[GEP]](p1) :: (load 8, align 4, addrspace 1) - ; VI: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[LOAD]](s64), [[LOAD1]](s64) - ; VI: [[DEF:%[0-9]+]]:_(s160) = G_IMPLICIT_DEF - ; VI: [[INSERT:%[0-9]+]]:_(s160) = G_INSERT [[DEF]], [[MV]](s128), 0 ; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; VI: [[GEP1:%[0-9]+]]:_(p1) = G_GEP [[COPY]], [[C1]](s64) ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[GEP1]](p1) :: (load 4, addrspace 1) - ; VI: [[INSERT1:%[0-9]+]]:_(s160) = G_INSERT [[INSERT]], [[LOAD2]](s32), 128 - ; VI: S_NOP 0, implicit [[INSERT1]](s160) + ; VI: [[DEF:%[0-9]+]]:_(s160) = G_IMPLICIT_DEF + ; VI: [[INSERT:%[0-9]+]]:_(s160) = G_INSERT [[DEF]], [[LOAD]](s64), 0 + ; VI: [[INSERT1:%[0-9]+]]:_(s160) = G_INSERT [[INSERT]], [[LOAD1]](s64), 64 + ; VI: [[INSERT2:%[0-9]+]]:_(s160) = G_INSERT [[INSERT1]], [[LOAD2]](s32), 128 + ; VI: S_NOP 0, implicit [[INSERT2]](s160) %0:_(p1) = COPY $vgpr0_vgpr1 %1:_(s160) = G_LOAD %0 :: (load 20, addrspace 1, align 4) S_NOP 0, implicit %1 @@ -327,16 +326,17 @@ body: | ; SI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; SI: [[GEP1:%[0-9]+]]:_(p1) = G_GEP [[COPY]], [[C1]](s64) ; SI: [[LOAD2:%[0-9]+]]:_(s64) = G_LOAD [[GEP1]](p1) :: (load 8, align 4, addrspace 1) - ; SI: [[MV:%[0-9]+]]:_(s192) = G_MERGE_VALUES [[LOAD]](s64), [[LOAD1]](s64), [[LOAD2]](s64) - ; SI: [[DEF:%[0-9]+]]:_(s224) = G_IMPLICIT_DEF - ; SI: [[INSERT:%[0-9]+]]:_(s224) = G_INSERT [[DEF]], [[MV]](s192), 0 ; SI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 24 ; SI: [[GEP2:%[0-9]+]]:_(p1) = G_GEP [[COPY]], [[C2]](s64) ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[GEP2]](p1) :: (load 4, addrspace 1) - ; SI: [[INSERT1:%[0-9]+]]:_(s224) = G_INSERT [[INSERT]], [[LOAD3]](s32), 192 + ; SI: [[DEF:%[0-9]+]]:_(s224) = G_IMPLICIT_DEF + ; SI: [[INSERT:%[0-9]+]]:_(s224) = G_INSERT [[DEF]], [[LOAD]](s64), 0 + ; SI: [[INSERT1:%[0-9]+]]:_(s224) = G_INSERT [[INSERT]], [[LOAD1]](s64), 64 + ; SI: [[INSERT2:%[0-9]+]]:_(s224) = G_INSERT [[INSERT1]], [[LOAD2]](s64), 128 + ; SI: [[INSERT3:%[0-9]+]]:_(s224) = G_INSERT [[INSERT2]], [[LOAD3]](s32), 192 ; SI: [[DEF1:%[0-9]+]]:_(s256) = G_IMPLICIT_DEF - ; SI: [[INSERT2:%[0-9]+]]:_(s256) = G_INSERT [[DEF1]], [[INSERT1]](s224), 0 - ; SI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[INSERT2]](s256) + ; SI: [[INSERT4:%[0-9]+]]:_(s256) = G_INSERT [[DEF1]], [[INSERT3]](s224), 0 + ; SI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[INSERT4]](s256) ; VI-LABEL: name: test_load_global_s224_align4 ; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; VI: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[COPY]](p1) :: (load 8, align 4, addrspace 1) @@ -346,16 +346,17 @@ body: | ; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; VI: [[GEP1:%[0-9]+]]:_(p1) = G_GEP [[COPY]], [[C1]](s64) ; VI: [[LOAD2:%[0-9]+]]:_(s64) = G_LOAD [[GEP1]](p1) :: (load 8, align 4, addrspace 1) - ; VI: [[MV:%[0-9]+]]:_(s192) = G_MERGE_VALUES [[LOAD]](s64), [[LOAD1]](s64), [[LOAD2]](s64) - ; VI: [[DEF:%[0-9]+]]:_(s224) = G_IMPLICIT_DEF - ; VI: [[INSERT:%[0-9]+]]:_(s224) = G_INSERT [[DEF]], [[MV]](s192), 0 ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 24 ; VI: [[GEP2:%[0-9]+]]:_(p1) = G_GEP [[COPY]], [[C2]](s64) ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[GEP2]](p1) :: (load 4, addrspace 1) - ; VI: [[INSERT1:%[0-9]+]]:_(s224) = G_INSERT [[INSERT]], [[LOAD3]](s32), 192 + ; VI: [[DEF:%[0-9]+]]:_(s224) = G_IMPLICIT_DEF + ; VI: [[INSERT:%[0-9]+]]:_(s224) = G_INSERT [[DEF]], [[LOAD]](s64), 0 + ; VI: [[INSERT1:%[0-9]+]]:_(s224) = G_INSERT [[INSERT]], [[LOAD1]](s64), 64 + ; VI: [[INSERT2:%[0-9]+]]:_(s224) = G_INSERT [[INSERT1]], [[LOAD2]](s64), 128 + ; VI: [[INSERT3:%[0-9]+]]:_(s224) = G_INSERT [[INSERT2]], [[LOAD3]](s32), 192 ; VI: [[DEF1:%[0-9]+]]:_(s256) = G_IMPLICIT_DEF - ; VI: [[INSERT2:%[0-9]+]]:_(s256) = G_INSERT [[DEF1]], [[INSERT1]](s224), 0 - ; VI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[INSERT2]](s256) + ; VI: [[INSERT4:%[0-9]+]]:_(s256) = G_INSERT [[DEF1]], [[INSERT3]](s224), 0 + ; VI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[INSERT4]](s256) %0:_(p1) = COPY $vgpr0_vgpr1 %1:_(s224) = G_LOAD %0 :: (load 28, addrspace 1, align 4) diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-store.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-store.mir index 90cdbab9632..d5fabac9855 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-store.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-store.mir @@ -258,13 +258,6 @@ body: | bb.0: liveins: $vgpr0_vgpr1, $vgpr2 - ; CHECK-LABEL: name: test_store_global_i1 - ; CHECK: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 - ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr2 - ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 - ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32) - ; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C]] - ; CHECK: G_STORE [[AND]](s32), [[COPY]](p1) :: (store 1, addrspace 1) %0:_(p1) = COPY $vgpr0_vgpr1 %1:_(s32) = COPY $vgpr2 %2:_(s1) = G_TRUNC %1 @@ -277,11 +270,16 @@ body: | bb.0: liveins: $vgpr0_vgpr1, $vgpr2 - ; CHECK-LABEL: name: test_store_global_i8 - ; CHECK: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 - ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr2 - ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32) - ; CHECK: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store 1, addrspace 1) + ; SI-LABEL: name: test_store_global_i8 + ; SI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 + ; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr2 + ; SI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32) + ; SI: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store 1, addrspace 1) + ; VI-LABEL: name: test_store_global_i8 + ; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 + ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr2 + ; VI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32) + ; VI: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store 1, addrspace 1) %0:_(p1) = COPY $vgpr0_vgpr1 %1:_(s32) = COPY $vgpr2 %2:_(s8) = G_TRUNC %1 @@ -294,11 +292,16 @@ body: | bb.0: liveins: $vgpr0_vgpr1, $vgpr2 - ; CHECK-LABEL: name: test_store_global_i16 - ; CHECK: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 - ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr2 - ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32) - ; CHECK: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store 2, addrspace 1) + ; SI-LABEL: name: test_store_global_i16 + ; SI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 + ; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr2 + ; SI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32) + ; SI: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store 2, addrspace 1) + ; VI-LABEL: name: test_store_global_i16 + ; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 + ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr2 + ; VI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32) + ; VI: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store 2, addrspace 1) %0:_(p1) = COPY $vgpr0_vgpr1 %1:_(s32) = COPY $vgpr2 %2:_(s16) = G_TRUNC %1 @@ -306,15 +309,44 @@ body: | ... --- +name: test_store_global_96 +body: | + bb.0: + liveins: $vgpr0_vgpr1_vgpr2, $vgpr3_vgpr4 + + ; SI-LABEL: name: test_store_global_96 + ; SI: [[COPY:%[0-9]+]]:_(s96) = COPY $vgpr0_vgpr1_vgpr2 + ; SI: [[COPY1:%[0-9]+]]:_(p1) = COPY $vgpr3_vgpr4 + ; SI: [[EXTRACT:%[0-9]+]]:_(s64) = G_EXTRACT [[COPY]](s96), 0 + ; SI: [[EXTRACT1:%[0-9]+]]:_(s32) = G_EXTRACT [[COPY]](s96), 64 + ; SI: G_STORE [[EXTRACT]](s64), [[COPY1]](p1) :: (store 8, align 16, addrspace 1) + ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 + ; SI: [[GEP:%[0-9]+]]:_(p1) = G_GEP [[COPY1]], [[C]](s64) + ; SI: G_STORE [[EXTRACT1]](s32), [[GEP]](p1) :: (store 4, align 8, addrspace 1) + ; VI-LABEL: name: test_store_global_96 + ; VI: [[COPY:%[0-9]+]]:_(s96) = COPY $vgpr0_vgpr1_vgpr2 + ; VI: [[COPY1:%[0-9]+]]:_(p1) = COPY $vgpr3_vgpr4 + ; VI: G_STORE [[COPY]](s96), [[COPY1]](p1) :: (store 12, align 16, addrspace 1) + %0:_(s96) = COPY $vgpr0_vgpr1_vgpr2 + %1:_(p1) = COPY $vgpr3_vgpr4 + + G_STORE %0, %1 :: (store 12, addrspace 1, align 16) +... + +--- name: test_store_global_i128 body: | bb.0: liveins: $vgpr0_vgpr1, $vgpr2_vgpr3_vgpr4_vgpr5 - ; CHECK-LABEL: name: test_store_global_i128 - ; CHECK: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 - ; CHECK: [[COPY1:%[0-9]+]]:_(s128) = COPY $vgpr2_vgpr3_vgpr4_vgpr5 - ; CHECK: G_STORE [[COPY1]](s128), [[COPY]](p1) :: (store 16, addrspace 1) + ; SI-LABEL: name: test_store_global_i128 + ; SI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 + ; SI: [[COPY1:%[0-9]+]]:_(s128) = COPY $vgpr2_vgpr3_vgpr4_vgpr5 + ; SI: G_STORE [[COPY1]](s128), [[COPY]](p1) :: (store 16, addrspace 1) + ; VI-LABEL: name: test_store_global_i128 + ; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 + ; VI: [[COPY1:%[0-9]+]]:_(s128) = COPY $vgpr2_vgpr3_vgpr4_vgpr5 + ; VI: G_STORE [[COPY1]](s128), [[COPY]](p1) :: (store 16, addrspace 1) %0:_(p1) = COPY $vgpr0_vgpr1 %1:_(s128) = COPY $vgpr2_vgpr3_vgpr4_vgpr5 G_STORE %1, %0 :: (store 16, addrspace 1) @@ -326,10 +358,14 @@ body: | bb.0: liveins: $vgpr0_vgpr1, $vgpr2_vgpr3_vgpr4_vgpr5 - ; CHECK-LABEL: name: test_store_global_v2s64 - ; CHECK: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 - ; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5 - ; CHECK: G_STORE [[COPY1]](<2 x s64>), [[COPY]](p1) :: (store 16, addrspace 1) + ; SI-LABEL: name: test_store_global_v2s64 + ; SI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 + ; SI: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5 + ; SI: G_STORE [[COPY1]](<2 x s64>), [[COPY]](p1) :: (store 16, addrspace 1) + ; VI-LABEL: name: test_store_global_v2s64 + ; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 + ; VI: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5 + ; VI: G_STORE [[COPY1]](<2 x s64>), [[COPY]](p1) :: (store 16, addrspace 1) %0:_(p1) = COPY $vgpr0_vgpr1 %1:_(<2 x s64>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5 G_STORE %1, %0 :: (store 16, addrspace 1) diff --git a/llvm/test/CodeGen/X86/GlobalISel/x86_64-fallback.ll b/llvm/test/CodeGen/X86/GlobalISel/x86_64-fallback.ll index 92bd661286c..8083af34736 100644 --- a/llvm/test/CodeGen/X86/GlobalISel/x86_64-fallback.ll +++ b/llvm/test/CodeGen/X86/GlobalISel/x86_64-fallback.ll @@ -8,7 +8,7 @@ ; the fallback path. ; Check that we fallback on invoke translation failures. -; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: G_STORE %1:_(s80), %0:_(p0) :: (store 10 into %ir.ptr, align 16) (in function: test_x86_fp80_dump) +; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: %1:_(s80) = G_FCONSTANT x86_fp80 0xK4002A000000000000000 ; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for test_x86_fp80_dump ; FALLBACK-WITH-REPORT-OUT-LABEL: test_x86_fp80_dump: define void @test_x86_fp80_dump(x86_fp80* %ptr){ |

