summaryrefslogtreecommitdiffstats
path: root/llvm/test
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test')
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll26
-rw-r--r--llvm/test/CodeGen/AArch64/aarch64-stp-cluster.ll8
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-misched-memdep-bug.ll6
-rw-r--r--llvm/test/CodeGen/AMDGPU/extload-align.ll2
-rw-r--r--llvm/test/CodeGen/ARM/2012-06-12-SchedMemLatency.ll4
-rw-r--r--llvm/test/CodeGen/ARM/ldrd-memoper.ll2
-rw-r--r--llvm/test/CodeGen/ARM/misched-int-basic-thumb2.mir4
-rw-r--r--llvm/test/CodeGen/ARM/single-issue-r52.mir2
-rw-r--r--llvm/test/CodeGen/Hexagon/post-inc-aa-metadata.ll2
-rw-r--r--llvm/test/CodeGen/PowerPC/byval-agg-info.ll4
-rw-r--r--llvm/test/CodeGen/PowerPC/combine_loads_from_build_pair.ll8
-rw-r--r--llvm/test/CodeGen/PowerPC/unal-vec-negarith.ll4
-rw-r--r--llvm/test/CodeGen/X86/GlobalISel/x86_64-fallback.ll2
-rw-r--r--llvm/test/CodeGen/X86/merge-store-partially-alias-loads.ll8
-rw-r--r--llvm/test/CodeGen/X86/stack-protector-weight.ll12
15 files changed, 47 insertions, 47 deletions
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll
index 269a08e934f..70d355bef15 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll
@@ -46,7 +46,7 @@ define [1 x double] @constant() {
; The key problem here is that we may fail to create an MBB referenced by a
; PHI. If so, we cannot complete the G_PHI and mustn't try or bad things
; happen.
-; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: cannot select: G_STORE %6:gpr(s32), %2:gpr(p0); mem:ST4[%addr] (in function: pending_phis)
+; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: cannot select: G_STORE %6:gpr(s32), %2:gpr(p0) :: (store seq_cst 4 into %ir.addr) (in function: pending_phis)
; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for pending_phis
; FALLBACK-WITH-REPORT-OUT-LABEL: pending_phis:
define i32 @pending_phis(i1 %tst, i32 %val, i32* %addr) {
@@ -65,7 +65,7 @@ false:
}
-; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: %0:_(s24) = G_LOAD %1:_(p0); mem:LD3[undef](align=1) (in function: odd_type_load)
+; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: %0:_(s24) = G_LOAD %1:_(p0) :: (load 3 from `i24* undef`, align 1) (in function: odd_type_load)
; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for odd_type_load
; FALLBACK-WITH-REPORT-OUT-LABEL: odd_type_load
define i32 @odd_type_load() {
@@ -76,7 +76,7 @@ entry:
}
; General legalizer inability to handle types whose size wasn't a power of 2.
-; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: G_STORE %1:_(s42), %0:_(p0); mem:ST6[%addr](align=8) (in function: odd_type)
+; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: G_STORE %1:_(s42), %0:_(p0) :: (store 6 into %ir.addr, align 8) (in function: odd_type)
; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for odd_type
; FALLBACK-WITH-REPORT-OUT-LABEL: odd_type:
define void @odd_type(i42* %addr) {
@@ -85,7 +85,7 @@ define void @odd_type(i42* %addr) {
ret void
}
-; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: G_STORE %1:_(<7 x s32>), %0:_(p0); mem:ST28[%addr](align=32) (in function: odd_vector)
+; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: G_STORE %1:_(<7 x s32>), %0:_(p0) :: (store 28 into %ir.addr, align 32) (in function: odd_vector)
; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for odd_vector
; FALLBACK-WITH-REPORT-OUT-LABEL: odd_vector:
define void @odd_vector(<7 x i32>* %addr) {
@@ -104,7 +104,7 @@ define i128 @sequence_sizes([8 x i8] %in) {
}
; Just to make sure we don't accidentally emit a normal load/store.
-; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: cannot select: %2:gpr(s64) = G_LOAD %0:gpr(p0); mem:LD8[%addr] (in function: atomic_ops)
+; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: cannot select: %2:gpr(s64) = G_LOAD %0:gpr(p0) :: (load seq_cst 8 from %ir.addr) (in function: atomic_ops)
; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for atomic_ops
; FALLBACK-WITH-REPORT-LABEL: atomic_ops:
define i64 @atomic_ops(i64* %addr) {
@@ -169,7 +169,7 @@ end:
br label %block
}
-; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: G_STORE %0:_(<2 x p0>), %4:_(p0); mem:ST16[undef] (in function: vector_of_pointers_insertelement)
+; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: G_STORE %0:_(<2 x p0>), %4:_(p0) :: (store 16 into `<2 x i16*>* undef`) (in function: vector_of_pointers_insertelement)
; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for vector_of_pointers_insertelement
; FALLBACK-WITH-REPORT-OUT-LABEL: vector_of_pointers_insertelement:
define void @vector_of_pointers_insertelement() {
@@ -185,7 +185,7 @@ end:
br label %block
}
-; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: G_STORE %1:_(s96), %3:_(p0); mem:ST12[undef](align=4) (in function: nonpow2_insertvalue_narrowing)
+; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: G_STORE %1:_(s96), %3:_(p0) :: (store 12 into `%struct96* undef`, align 4) (in function: nonpow2_insertvalue_narrowing)
; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for nonpow2_insertvalue_narrowing
; FALLBACK-WITH-REPORT-OUT-LABEL: nonpow2_insertvalue_narrowing:
%struct96 = type { float, float, float }
@@ -195,7 +195,7 @@ define void @nonpow2_insertvalue_narrowing(float %a) {
ret void
}
-; FALLBACK-WITH-REPORT-ERR remark: <unknown>:0:0: unable to legalize instruction: G_STORE %3, %4; mem:ST12[undef](align=16) (in function: nonpow2_add_narrowing)
+; FALLBACK-WITH-REPORT-ERR remark: <unknown>:0:0: unable to legalize instruction: G_STORE %3, %4 :: (store 12 into `i96* undef`, align 16) (in function: nonpow2_add_narrowing)
; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for nonpow2_add_narrowing
; FALLBACK-WITH-REPORT-OUT-LABEL: nonpow2_add_narrowing:
define void @nonpow2_add_narrowing() {
@@ -206,7 +206,7 @@ define void @nonpow2_add_narrowing() {
ret void
}
-; FALLBACK-WITH-REPORT-ERR remark: <unknown>:0:0: unable to legalize instruction: G_STORE %3, %4; mem:ST12[undef](align=16) (in function: nonpow2_add_narrowing)
+; FALLBACK-WITH-REPORT-ERR remark: <unknown>:0:0: unable to legalize instruction: G_STORE %3, %4 :: (store 12 into `i96* undef`, align 16) (in function: nonpow2_add_narrowing)
; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for nonpow2_or_narrowing
; FALLBACK-WITH-REPORT-OUT-LABEL: nonpow2_or_narrowing:
define void @nonpow2_or_narrowing() {
@@ -217,7 +217,7 @@ define void @nonpow2_or_narrowing() {
ret void
}
-; FALLBACK-WITH-REPORT-ERR remark: <unknown>:0:0: unable to legalize instruction: G_STORE %0, %1; mem:ST12[undef](align=16) (in function: nonpow2_load_narrowing)
+; FALLBACK-WITH-REPORT-ERR remark: <unknown>:0:0: unable to legalize instruction: G_STORE %0, %1 :: (store 12 into `i96* undef`, align 16) (in function: nonpow2_load_narrowing)
; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for nonpow2_load_narrowing
; FALLBACK-WITH-REPORT-OUT-LABEL: nonpow2_load_narrowing:
define void @nonpow2_load_narrowing() {
@@ -226,7 +226,7 @@ define void @nonpow2_load_narrowing() {
ret void
}
-; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: G_STORE %3:_(s96), %0:_(p0); mem:ST12[%c](align=16) (in function: nonpow2_store_narrowing
+; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: G_STORE %3:_(s96), %0:_(p0) :: (store 12 into %ir.c, align 16) (in function: nonpow2_store_narrowing
; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for nonpow2_store_narrowing
; FALLBACK-WITH-REPORT-OUT-LABEL: nonpow2_store_narrowing:
define void @nonpow2_store_narrowing(i96* %c) {
@@ -236,7 +236,7 @@ define void @nonpow2_store_narrowing(i96* %c) {
ret void
}
-; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: G_STORE %0:_(s96), %1:_(p0); mem:ST12[undef](align=16) (in function: nonpow2_constant_narrowing)
+; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: G_STORE %0:_(s96), %1:_(p0) :: (store 12 into `i96* undef`, align 16) (in function: nonpow2_constant_narrowing)
; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for nonpow2_constant_narrowing
; FALLBACK-WITH-REPORT-OUT-LABEL: nonpow2_constant_narrowing:
define void @nonpow2_constant_narrowing() {
@@ -246,7 +246,7 @@ define void @nonpow2_constant_narrowing() {
; Currently can't handle vector lengths that aren't an exact multiple of
; natively supported vector lengths. Test that the fall-back works for those.
-; FALLBACK-WITH-REPORT-ERR-G_IMPLICIT_DEF-LEGALIZABLE: (FIXME: this is what is expected once we can legalize non-pow-of-2 G_IMPLICIT_DEF) remark: <unknown>:0:0: unable to legalize instruction: %1:_(<7 x s64>) = G_ADD %0, %0; (in function: nonpow2_vector_add_fewerelements
+; FALLBACK-WITH-REPORT-ERR-G_IMPLICIT_DEF-LEGALIZABLE: (FIXME: this is what is expected once we can legalize non-pow-of-2 G_IMPLICIT_DEF) remark: <unknown>:0:0: unable to legalize instruction: %1:_(<7 x s64>) = G_ADD %0, %0 (in function: nonpow2_vector_add_fewerelements
; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: %2:_(s64) = G_EXTRACT_VECTOR_ELT %1:_(<7 x s64>), %3:_(s64) (in function: nonpow2_vector_add_fewerelements)
; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for nonpow2_vector_add_fewerelements
; FALLBACK-WITH-REPORT-OUT-LABEL: nonpow2_vector_add_fewerelements:
diff --git a/llvm/test/CodeGen/AArch64/aarch64-stp-cluster.ll b/llvm/test/CodeGen/AArch64/aarch64-stp-cluster.ll
index 5d6c5a7b2ca..6d9b7d4bb4f 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-stp-cluster.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-stp-cluster.ll
@@ -130,10 +130,10 @@ entry:
; CHECK: ********** MI Scheduling **********
; CHECK-LABEL: stp_volatile:%bb.0
; CHECK-NOT: Cluster ld/st
-; CHECK:SU(2): STRXui %1:gpr64, %0:gpr64common, 3; mem:Volatile
-; CHECK:SU(3): STRXui %1:gpr64, %0:gpr64common, 2; mem:Volatile
-; CHECK:SU(4): STRXui %1:gpr64, %0:gpr64common, 1; mem:Volatile
-; CHECK:SU(5): STRXui %1:gpr64, %0:gpr64common, 4; mem:Volatile
+; CHECK:SU(2): STRXui %1:gpr64, %0:gpr64common, 3 :: (volatile
+; CHECK:SU(3): STRXui %1:gpr64, %0:gpr64common, 2 :: (volatile
+; CHECK:SU(4): STRXui %1:gpr64, %0:gpr64common, 1 :: (volatile
+; CHECK:SU(5): STRXui %1:gpr64, %0:gpr64common, 4 :: (volatile
define i64 @stp_volatile(i64* nocapture %P, i64 %v) {
entry:
%arrayidx = getelementptr inbounds i64, i64* %P, i64 3
diff --git a/llvm/test/CodeGen/AArch64/arm64-misched-memdep-bug.ll b/llvm/test/CodeGen/AArch64/arm64-misched-memdep-bug.ll
index 7990138ffad..6d81d9acd86 100644
--- a/llvm/test/CodeGen/AArch64/arm64-misched-memdep-bug.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-misched-memdep-bug.ll
@@ -5,14 +5,14 @@
;
; CHECK: ********** MI Scheduling **********
; CHECK: misched_bug:%bb.0 entry
-; CHECK: SU(2): %2:gpr32 = LDRWui %0:gpr64common, 1; mem:LD4[%ptr1_plus1]
+; CHECK: SU(2): %2:gpr32 = LDRWui %0:gpr64common, 1 :: (load 4 from %ir.ptr1_plus1)
; CHECK: Successors:
; CHECK-NEXT: SU(5): Data Latency=4 Reg=%2
; CHECK-NEXT: SU(4): Ord Latency=0
-; CHECK: SU(3): STRWui $wzr, %0:gpr64common, 0; mem:ST4[%ptr1]
+; CHECK: SU(3): STRWui $wzr, %0:gpr64common, 0 :: (store 4 into %ir.ptr1)
; CHECK: Successors:
; CHECK: SU(4): Ord Latency=0
-; CHECK: SU(4): STRWui $wzr, %1:gpr64common, 0; mem:ST4[%ptr2]
+; CHECK: SU(4): STRWui $wzr, %1:gpr64common, 0 :: (store 4 into %ir.ptr2)
; CHECK: SU(5): $w0 = COPY %2
; CHECK: ** ScheduleDAGMI::schedule picking next node
define i32 @misched_bug(i32* %ptr1, i32* %ptr2) {
diff --git a/llvm/test/CodeGen/AMDGPU/extload-align.ll b/llvm/test/CodeGen/AMDGPU/extload-align.ll
index 4df4b265b23..ed6890e5c82 100644
--- a/llvm/test/CodeGen/AMDGPU/extload-align.ll
+++ b/llvm/test/CodeGen/AMDGPU/extload-align.ll
@@ -7,7 +7,7 @@ target datalayout = "A5"
; size and not 4 corresponding to the sign-extended size (i32).
; DEBUG: {{^}}# Machine code for function extload_align:
-; DEBUG: mem:LD2[<unknown>(addrspace=5)]
+; DEBUG: (load 2, addrspace 5)
; DEBUG: {{^}}# End machine code for function extload_align.
define amdgpu_kernel void @extload_align(i32 addrspace(5)* %out, i32 %index) #0 {
diff --git a/llvm/test/CodeGen/ARM/2012-06-12-SchedMemLatency.ll b/llvm/test/CodeGen/ARM/2012-06-12-SchedMemLatency.ll
index ed5255bfbeb..208cf8db99c 100644
--- a/llvm/test/CodeGen/ARM/2012-06-12-SchedMemLatency.ll
+++ b/llvm/test/CodeGen/ARM/2012-06-12-SchedMemLatency.ll
@@ -5,11 +5,11 @@
; latency regardless of whether they are barriers or not.
; CHECK: ** List Scheduling
-; CHECK: SU(2){{.*}}STR{{.*}}Volatile
+; CHECK: SU(2){{.*}}STR{{.*}}(volatile
; CHECK-NOT: SU({{.*}}): Ord
; CHECK: SU(3): Ord Latency=1
; CHECK-NOT: SU({{.*}}): Ord
-; CHECK: SU(3){{.*}}LDR{{.*}}Volatile
+; CHECK: SU(3){{.*}}LDR{{.*}}(volatile
; CHECK-NOT: SU({{.*}}): Ord
; CHECK: SU(2): Ord Latency=1
; CHECK-NOT: SU({{.*}}): Ord
diff --git a/llvm/test/CodeGen/ARM/ldrd-memoper.ll b/llvm/test/CodeGen/ARM/ldrd-memoper.ll
index 744fbd5efb8..78121adcfeb 100644
--- a/llvm/test/CodeGen/ARM/ldrd-memoper.ll
+++ b/llvm/test/CodeGen/ARM/ldrd-memoper.ll
@@ -5,7 +5,7 @@
@b = external global i64*
-; CHECK: Formed {{.*}} t2LDRD{{.*}} mem:LD4[%0] LD4[%0+4]
+; CHECK: Formed {{.*}} t2LDRD{{.*}} (load 4 from %ir.0), (load 4 from %ir.0 + 4)
define i64 @t(i64 %a) nounwind readonly {
entry:
%0 = load i64*, i64** @b, align 4
diff --git a/llvm/test/CodeGen/ARM/misched-int-basic-thumb2.mir b/llvm/test/CodeGen/ARM/misched-int-basic-thumb2.mir
index ea8cecc5a11..78e2ab035b9 100644
--- a/llvm/test/CodeGen/ARM/misched-int-basic-thumb2.mir
+++ b/llvm/test/CodeGen/ARM/misched-int-basic-thumb2.mir
@@ -42,7 +42,7 @@
# CHECK_SWIFT: Latency : 2
# CHECK_R52: Latency : 2
#
-# CHECK: SU(3): %3:rgpr = t2LDRi12 %2:rgpr, 0, 14, $noreg; mem:LD4[@g1](dereferenceable)
+# CHECK: SU(3): %3:rgpr = t2LDRi12 %2:rgpr, 0, 14, $noreg :: (dereferenceable load 4 from @g1)
# CHECK_A9: Latency : 1
# CHECK_SWIFT: Latency : 3
# CHECK_R52: Latency : 4
@@ -57,7 +57,7 @@
# CHECK_SWIFT: Latency : 14
# CHECK_R52: Latency : 8
-# CHECK: SU(8): t2STRi12 %7:rgpr, %2:rgpr, 0, 14, $noreg; mem:ST4[@g1]
+# CHECK: SU(8): t2STRi12 %7:rgpr, %2:rgpr, 0, 14, $noreg :: (store 4 into @g1)
# CHECK_A9: Latency : 1
# CHECK_SWIFT: Latency : 0
# CHECK_R52: Latency : 4
diff --git a/llvm/test/CodeGen/ARM/single-issue-r52.mir b/llvm/test/CodeGen/ARM/single-issue-r52.mir
index ba43f02101c..22be6a0eade 100644
--- a/llvm/test/CodeGen/ARM/single-issue-r52.mir
+++ b/llvm/test/CodeGen/ARM/single-issue-r52.mir
@@ -20,7 +20,7 @@
# CHECK: ********** MI Scheduling **********
# CHECK: ScheduleDAGMILive::schedule starting
-# CHECK: SU(1): %1:qqpr = VLD4d8Pseudo %0:gpr, 8, 14, $noreg; mem:LD32[%A](align=8)
+# CHECK: SU(1): %1:qqpr = VLD4d8Pseudo %0:gpr, 8, 14, $noreg :: (load 32 from %ir.A, align 8)
# CHECK: Latency : 8
# CHECK: Single Issue : true;
# CHECK: SU(2): %4:dpr = VADDv8i8 %1.dsub_0:qqpr, %1.dsub_1:qqpr, 14, $noreg
diff --git a/llvm/test/CodeGen/Hexagon/post-inc-aa-metadata.ll b/llvm/test/CodeGen/Hexagon/post-inc-aa-metadata.ll
index fd327cc53f9..940dc899124 100644
--- a/llvm/test/CodeGen/Hexagon/post-inc-aa-metadata.ll
+++ b/llvm/test/CodeGen/Hexagon/post-inc-aa-metadata.ll
@@ -3,7 +3,7 @@
; Check that the generated post-increment load has TBAA information.
; CHECK-LABEL: Machine code for function fred:
-; CHECK: = V6_vL32b_pi %{{[0-9]+}}{{[^,]*}}, 64; mem:LD64[{{.*}}](tbaa=
+; CHECK: = V6_vL32b_pi %{{[0-9]+}}{{[^,]*}}, 64 :: (load 64{{.*}}!tbaa
target triple = "hexagon"
diff --git a/llvm/test/CodeGen/PowerPC/byval-agg-info.ll b/llvm/test/CodeGen/PowerPC/byval-agg-info.ll
index ec53dee3155..21aa5821c88 100644
--- a/llvm/test/CodeGen/PowerPC/byval-agg-info.ll
+++ b/llvm/test/CodeGen/PowerPC/byval-agg-info.ll
@@ -12,6 +12,6 @@ entry:
}
; Make sure that the MMO on the store has no offset from the byval
-; variable itself (we used to have mem:ST8[%v+64]).
-; CHECK: STD killed renamable $x5, 176, $x1; mem:ST8[%v](align=16)
+; variable itself (we used to have (store 8 into %ir.v + 64)).
+; CHECK: STD killed renamable $x5, 176, $x1 :: (store 8 into %ir.v, align 16)
diff --git a/llvm/test/CodeGen/PowerPC/combine_loads_from_build_pair.ll b/llvm/test/CodeGen/PowerPC/combine_loads_from_build_pair.ll
index 45cc740d1ea..37240c575fa 100644
--- a/llvm/test/CodeGen/PowerPC/combine_loads_from_build_pair.ll
+++ b/llvm/test/CodeGen/PowerPC/combine_loads_from_build_pair.ll
@@ -9,13 +9,13 @@ define i64 @func1(i64 %p1, i64 %p2, i64 %p3, i64 %p4, { i64, i8* } %struct) {
; so we expect the LD8 to load from the address used in the original HIBITS
; load.
; CHECK-LABEL: Initial selection DAG:
-; CHECK-DAG: [[LOBITS:t[0-9]+]]: i32,ch = load<LD4[FixedStack-2]>
-; CHECK-DAG: [[HIBITS:t[0-9]+]]: i32,ch = load<LD4[FixedStack-1]>
+; CHECK-DAG: [[LOBITS:t[0-9]+]]: i32,ch = load<(load 4 from %fixed-stack.1)>
+; CHECK-DAG: [[HIBITS:t[0-9]+]]: i32,ch = load<(load 4 from %fixed-stack.2)>
; CHECK: Combining: t{{[0-9]+}}: i64 = build_pair [[LOBITS]], [[HIBITS]]
; CHECK-NEXT: Creating new node
-; CHECK-SAME: load<LD8[FixedStack-1]
+; CHECK-SAME: load<(load 8 from %fixed-stack.2, align 4)>
; CHECK-NEXT: into
-; CHECK-SAME: load<LD8[FixedStack-1]
+; CHECK-SAME: load<(load 8 from %fixed-stack.2, align 4)>
; CHECK-LABEL: Optimized lowered selection DAG:
%result = extractvalue {i64, i8* } %struct, 0
ret i64 %result
diff --git a/llvm/test/CodeGen/PowerPC/unal-vec-negarith.ll b/llvm/test/CodeGen/PowerPC/unal-vec-negarith.ll
index 98bf2bc4794..625b9b4b41d 100644
--- a/llvm/test/CodeGen/PowerPC/unal-vec-negarith.ll
+++ b/llvm/test/CodeGen/PowerPC/unal-vec-negarith.ll
@@ -9,8 +9,8 @@ entry:
%r = load <16 x i8>, <16 x i8>* %p, align 1
ret <16 x i8> %r
-; CHECK-NOT: v4i32,ch = llvm.ppc.altivec.lvx{{.*}}<LD31[%p+4294967281](align=1)>
-; CHECK: v4i32,ch = llvm.ppc.altivec.lvx{{.*}}<LD31[%p+-15](align=1)>
+; CHECK-NOT: v4i32,ch = llvm.ppc.altivec.lvx{{.*}}<(load 31 from %ir.p + 4294967281, align 1)>
+; CHECK: v4i32,ch = llvm.ppc.altivec.lvx{{.*}}<(load 31 from %ir.p - 15, align 1)>
}
attributes #0 = { nounwind "target-cpu"="pwr7" }
diff --git a/llvm/test/CodeGen/X86/GlobalISel/x86_64-fallback.ll b/llvm/test/CodeGen/X86/GlobalISel/x86_64-fallback.ll
index 959d45a0112..92bd661286c 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/x86_64-fallback.ll
+++ b/llvm/test/CodeGen/X86/GlobalISel/x86_64-fallback.ll
@@ -8,7 +8,7 @@
; the fallback path.
; Check that we fallback on invoke translation failures.
-; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: G_STORE %1:_(s80), %0:_(p0); mem:ST10[%ptr](align=16) (in function: test_x86_fp80_dump)
+; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: G_STORE %1:_(s80), %0:_(p0) :: (store 10 into %ir.ptr, align 16) (in function: test_x86_fp80_dump)
; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for test_x86_fp80_dump
; FALLBACK-WITH-REPORT-OUT-LABEL: test_x86_fp80_dump:
define void @test_x86_fp80_dump(x86_fp80* %ptr){
diff --git a/llvm/test/CodeGen/X86/merge-store-partially-alias-loads.ll b/llvm/test/CodeGen/X86/merge-store-partially-alias-loads.ll
index 8e3c4305d50..8e139b11be9 100644
--- a/llvm/test/CodeGen/X86/merge-store-partially-alias-loads.ll
+++ b/llvm/test/CodeGen/X86/merge-store-partially-alias-loads.ll
@@ -18,12 +18,12 @@
; DBGDAG-DAG: [[BASEPTR:t[0-9]+]]: i64,ch = CopyFromReg [[ENTRYTOKEN]],
; DBGDAG-DAG: [[ADDPTR:t[0-9]+]]: i64 = add {{(nuw )?}}[[BASEPTR]], Constant:i64<2>
-; DBGDAG-DAG: [[LD2:t[0-9]+]]: i16,ch = load<LD2[%tmp81](align=1)> [[ENTRYTOKEN]], [[BASEPTR]], undef:i64
-; DBGDAG-DAG: [[LD1:t[0-9]+]]: i8,ch = load<LD1[%tmp12]> [[ENTRYTOKEN]], [[ADDPTR]], undef:i64
+; DBGDAG-DAG: [[LD2:t[0-9]+]]: i16,ch = load<(load 2 from %ir.tmp81, align 1)> [[ENTRYTOKEN]], [[BASEPTR]], undef:i64
+; DBGDAG-DAG: [[LD1:t[0-9]+]]: i8,ch = load<(load 1 from %ir.tmp12)> [[ENTRYTOKEN]], [[ADDPTR]], undef:i64
-; DBGDAG-DAG: [[ST1:t[0-9]+]]: ch = store<ST1[%tmp14]> [[ENTRYTOKEN]], [[LD1]], t{{[0-9]+}}, undef:i64
+; DBGDAG-DAG: [[ST1:t[0-9]+]]: ch = store<(store 1 into %ir.tmp14)> [[ENTRYTOKEN]], [[LD1]], t{{[0-9]+}}, undef:i64
; DBGDAG-DAG: [[LOADTOKEN:t[0-9]+]]: ch = TokenFactor [[LD2]]:1, [[LD1]]:1
-; DBGDAG-DAG: [[ST2:t[0-9]+]]: ch = store<ST2[%tmp10](align=1)> [[LOADTOKEN]], [[LD2]], t{{[0-9]+}}, undef:i64
+; DBGDAG-DAG: [[ST2:t[0-9]+]]: ch = store<(store 2 into %ir.tmp10, align 1)> [[LOADTOKEN]], [[LD2]], t{{[0-9]+}}, undef:i64
; DBGDAG: X86ISD::RET_FLAG t{{[0-9]+}},
diff --git a/llvm/test/CodeGen/X86/stack-protector-weight.ll b/llvm/test/CodeGen/X86/stack-protector-weight.ll
index c58371b094a..9ec929521d6 100644
--- a/llvm/test/CodeGen/X86/stack-protector-weight.ll
+++ b/llvm/test/CodeGen/X86/stack-protector-weight.ll
@@ -16,16 +16,16 @@
; DARWIN-IR: CALL64pcrel32 @__stack_chk_fail
; MSVC-SELDAG: # Machine code for function test_branch_weights:
-; MSVC-SELDAG: mem:Volatile LD4[@__security_cookie]
-; MSVC-SELDAG: ST4[FixedStack0]
-; MSVC-SELDAG: LD4[FixedStack0]
+; MSVC-SELDAG: :: (volatile load 4 from @__security_cookie)
+; MSVC-SELDAG: (store 4 into stack)
+; MSVC-SELDAG: (volatile load 4 from %stack.0.StackGuardSlot)
; MSVC-SELDAG: CALLpcrel32 @__security_check_cookie
; MSVC always uses selection DAG now.
; MSVC-IR: # Machine code for function test_branch_weights:
-; MSVC-IR: mem:Volatile LD4[@__security_cookie]
-; MSVC-IR: ST4[FixedStack0]
-; MSVC-IR: LD4[FixedStack0]
+; MSVC-IR: :: (volatile load 4 from @__security_cookie)
+; MSVC-IR: (store 4 into stack)
+; MSVC-IR: (volatile load 4 from %stack.0.StackGuardSlot)
; MSVC-IR: CALLpcrel32 @__security_check_cookie
define i32 @test_branch_weights(i32 %n) #0 {
OpenPOWER on IntegriCloud