summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/AArch64
diff options
context:
space:
mode:
authorStephen Lin <stephenwlin@gmail.com>2013-07-14 06:24:09 +0000
committerStephen Lin <stephenwlin@gmail.com>2013-07-14 06:24:09 +0000
commitd24ab20e9b11d2076d8b9d5cd96f41a6b9c399fb (patch)
tree5876bdcdf9901ec662b77209cca2b615c0417711 /llvm/test/CodeGen/AArch64
parent8d304d5c73602bc6d6581a74a553e839d65320ff (diff)
downloadbcm5719-llvm-d24ab20e9b11d2076d8b9d5cd96f41a6b9c399fb.tar.gz
bcm5719-llvm-d24ab20e9b11d2076d8b9d5cd96f41a6b9c399fb.zip
Mass update to CodeGen tests to use CHECK-LABEL for labels corresponding to function definitions for more informative error messages. No functionality change and all updated tests passed locally.
This update was done with the following bash script: find test/CodeGen -name "*.ll" | \ while read NAME; do echo "$NAME" if ! grep -q "^; *RUN: *llc.*debug" $NAME; then TEMP=`mktemp -t temp` cp $NAME $TEMP sed -n "s/^define [^@]*@\([A-Za-z0-9_]*\)(.*$/\1/p" < $NAME | \ while read FUNC; do sed -i '' "s/;\(.*\)\([A-Za-z0-9_-]*\):\( *\)$FUNC: *\$/;\1\2-LABEL:\3$FUNC:/g" $TEMP done sed -i '' "s/;\(.*\)-LABEL-LABEL:/;\1-LABEL:/" $TEMP sed -i '' "s/;\(.*\)-NEXT-LABEL:/;\1-NEXT:/" $TEMP sed -i '' "s/;\(.*\)-NOT-LABEL:/;\1-NOT:/" $TEMP sed -i '' "s/;\(.*\)-DAG-LABEL:/;\1-DAG:/" $TEMP mv $TEMP $NAME fi done llvm-svn: 186280
Diffstat (limited to 'llvm/test/CodeGen/AArch64')
-rw-r--r--llvm/test/CodeGen/AArch64/addsub.ll8
-rw-r--r--llvm/test/CodeGen/AArch64/addsub_ext.ll6
-rw-r--r--llvm/test/CodeGen/AArch64/atomic-ops-not-barriers.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/basic-pic.ll10
-rw-r--r--llvm/test/CodeGen/AArch64/bitfield-insert.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/breg.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/callee-save.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/code-model-large-abs.ll10
-rw-r--r--llvm/test/CodeGen/AArch64/compare-branch.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/dp1.ll28
-rw-r--r--llvm/test/CodeGen/AArch64/dp2.ll24
-rw-r--r--llvm/test/CodeGen/AArch64/extract.ll10
-rw-r--r--llvm/test/CodeGen/AArch64/fastcc-reserved.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/fastcc.ll12
-rw-r--r--llvm/test/CodeGen/AArch64/floatdp_1source.ll6
-rw-r--r--llvm/test/CodeGen/AArch64/fp-dp3.ll8
-rw-r--r--llvm/test/CodeGen/AArch64/fpimm.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/func-argpassing.ll20
-rw-r--r--llvm/test/CodeGen/AArch64/func-calls.ll8
-rw-r--r--llvm/test/CodeGen/AArch64/got-abuse.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/i128-align.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/ldst-regoffset.ll14
-rw-r--r--llvm/test/CodeGen/AArch64/ldst-unscaledimm.ll10
-rw-r--r--llvm/test/CodeGen/AArch64/ldst-unsignedimm.ll10
-rw-r--r--llvm/test/CodeGen/AArch64/literal_pools.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/local_vars.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/logical_shifted_reg.ll6
-rw-r--r--llvm/test/CodeGen/AArch64/regress-tail-livereg.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/sibling-call.ll16
-rw-r--r--llvm/test/CodeGen/AArch64/tail-call.ll14
30 files changed, 127 insertions, 127 deletions
diff --git a/llvm/test/CodeGen/AArch64/addsub.ll b/llvm/test/CodeGen/AArch64/addsub.ll
index c0e1cc95f27..4d46d04b80f 100644
--- a/llvm/test/CodeGen/AArch64/addsub.ll
+++ b/llvm/test/CodeGen/AArch64/addsub.ll
@@ -9,7 +9,7 @@
; Add pure 12-bit immediates:
define void @add_small() {
-; CHECK: add_small:
+; CHECK-LABEL: add_small:
; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, #4095
%val32 = load i32* @var_i32
@@ -26,7 +26,7 @@ define void @add_small() {
; Add 12-bit immediates, shifted left by 12 bits
define void @add_med() {
-; CHECK: add_med:
+; CHECK-LABEL: add_med:
; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, #3567, lsl #12
%val32 = load i32* @var_i32
@@ -43,7 +43,7 @@ define void @add_med() {
; Subtract 12-bit immediates
define void @sub_small() {
-; CHECK: sub_small:
+; CHECK-LABEL: sub_small:
; CHECK: sub {{w[0-9]+}}, {{w[0-9]+}}, #4095
%val32 = load i32* @var_i32
@@ -60,7 +60,7 @@ define void @sub_small() {
; Subtract 12-bit immediates, shifted left by 12 bits
define void @sub_med() {
-; CHECK: sub_med:
+; CHECK-LABEL: sub_med:
; CHECK: sub {{w[0-9]+}}, {{w[0-9]+}}, #3567, lsl #12
%val32 = load i32* @var_i32
diff --git a/llvm/test/CodeGen/AArch64/addsub_ext.ll b/llvm/test/CodeGen/AArch64/addsub_ext.ll
index 0bda0f9191e..f0e11c65224 100644
--- a/llvm/test/CodeGen/AArch64/addsub_ext.ll
+++ b/llvm/test/CodeGen/AArch64/addsub_ext.ll
@@ -6,7 +6,7 @@
@var64 = global i64 0
define void @addsub_i8rhs() {
-; CHECK: addsub_i8rhs:
+; CHECK-LABEL: addsub_i8rhs:
%val8_tmp = load i8* @var8
%lhs32 = load i32* @var32
%lhs64 = load i64* @var64
@@ -81,7 +81,7 @@ end:
}
define void @addsub_i16rhs() {
-; CHECK: addsub_i16rhs:
+; CHECK-LABEL: addsub_i16rhs:
%val16_tmp = load i16* @var16
%lhs32 = load i32* @var32
%lhs64 = load i64* @var64
@@ -159,7 +159,7 @@ end:
; example), but the remaining instructions are probably not idiomatic
; in the face of "add/sub (shifted register)" so I don't intend to.
define void @addsub_i32rhs() {
-; CHECK: addsub_i32rhs:
+; CHECK-LABEL: addsub_i32rhs:
%val32_tmp = load i32* @var32
%lhs64 = load i64* @var64
diff --git a/llvm/test/CodeGen/AArch64/atomic-ops-not-barriers.ll b/llvm/test/CodeGen/AArch64/atomic-ops-not-barriers.ll
index 9888a742e32..da095a0a42c 100644
--- a/llvm/test/CodeGen/AArch64/atomic-ops-not-barriers.ll
+++ b/llvm/test/CodeGen/AArch64/atomic-ops-not-barriers.ll
@@ -1,7 +1,7 @@
; RUN: llc -mtriple=aarch64-none-linux-gnu -verify-machineinstrs < %s | FileCheck %s
define i32 @foo(i32* %var, i1 %cond) {
-; CHECK: foo:
+; CHECK-LABEL: foo:
br i1 %cond, label %atomic_ver, label %simple_ver
simple_ver:
%oldval = load i32* %var
diff --git a/llvm/test/CodeGen/AArch64/basic-pic.ll b/llvm/test/CodeGen/AArch64/basic-pic.ll
index 1f0b2826411..1b14be2801b 100644
--- a/llvm/test/CodeGen/AArch64/basic-pic.ll
+++ b/llvm/test/CodeGen/AArch64/basic-pic.ll
@@ -6,7 +6,7 @@
; CHECK-ELF: RELOCATION RECORDS FOR [.rela.text]
define i32 @get_globalvar() {
-; CHECK: get_globalvar:
+; CHECK-LABEL: get_globalvar:
%val = load i32* @var
; CHECK: adrp x[[GOTHI:[0-9]+]], :got:var
@@ -19,7 +19,7 @@ define i32 @get_globalvar() {
}
define i32* @get_globalvaraddr() {
-; CHECK: get_globalvaraddr:
+; CHECK-LABEL: get_globalvaraddr:
%val = load i32* @var
; CHECK: adrp x[[GOTHI:[0-9]+]], :got:var
@@ -33,7 +33,7 @@ define i32* @get_globalvaraddr() {
@hiddenvar = hidden global i32 0
define i32 @get_hiddenvar() {
-; CHECK: get_hiddenvar:
+; CHECK-LABEL: get_hiddenvar:
%val = load i32* @hiddenvar
; CHECK: adrp x[[HI:[0-9]+]], hiddenvar
@@ -45,7 +45,7 @@ define i32 @get_hiddenvar() {
}
define i32* @get_hiddenvaraddr() {
-; CHECK: get_hiddenvaraddr:
+; CHECK-LABEL: get_hiddenvaraddr:
%val = load i32* @hiddenvar
; CHECK: adrp [[HI:x[0-9]+]], hiddenvar
@@ -57,7 +57,7 @@ define i32* @get_hiddenvaraddr() {
}
define void()* @get_func() {
-; CHECK: get_func:
+; CHECK-LABEL: get_func:
ret void()* bitcast(void()*()* @get_func to void()*)
; CHECK: adrp x[[GOTHI:[0-9]+]], :got:get_func
diff --git a/llvm/test/CodeGen/AArch64/bitfield-insert.ll b/llvm/test/CodeGen/AArch64/bitfield-insert.ll
index fe2aedb45a6..1f046087abc 100644
--- a/llvm/test/CodeGen/AArch64/bitfield-insert.ll
+++ b/llvm/test/CodeGen/AArch64/bitfield-insert.ll
@@ -6,7 +6,7 @@
%struct.foo = type { i8, [2 x i8], i8 }
define [1 x i64] @from_clang([1 x i64] %f.coerce, i32 %n) nounwind readnone {
-; CHECK: from_clang:
+; CHECK-LABEL: from_clang:
; CHECK: bfi w0, w1, #3, #4
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/AArch64/breg.ll b/llvm/test/CodeGen/AArch64/breg.ll
index 38ed4734e1b..1ed5b9b755d 100644
--- a/llvm/test/CodeGen/AArch64/breg.ll
+++ b/llvm/test/CodeGen/AArch64/breg.ll
@@ -3,7 +3,7 @@
@stored_label = global i8* null
define void @foo() {
-; CHECK: foo:
+; CHECK-LABEL: foo:
%lab = load i8** @stored_label
indirectbr i8* %lab, [label %otherlab, label %retlab]
; CHECK: adrp {{x[0-9]+}}, stored_label
diff --git a/llvm/test/CodeGen/AArch64/callee-save.ll b/llvm/test/CodeGen/AArch64/callee-save.ll
index c66aa5bfc51..52243b05b4b 100644
--- a/llvm/test/CodeGen/AArch64/callee-save.ll
+++ b/llvm/test/CodeGen/AArch64/callee-save.ll
@@ -3,7 +3,7 @@
@var = global float 0.0
define void @foo() {
-; CHECK: foo:
+; CHECK-LABEL: foo:
; CHECK: stp d14, d15, [sp
; CHECK: stp d12, d13, [sp
diff --git a/llvm/test/CodeGen/AArch64/code-model-large-abs.ll b/llvm/test/CodeGen/AArch64/code-model-large-abs.ll
index a365568e11e..b387f285d1d 100644
--- a/llvm/test/CodeGen/AArch64/code-model-large-abs.ll
+++ b/llvm/test/CodeGen/AArch64/code-model-large-abs.ll
@@ -6,7 +6,7 @@
@var64 = global i64 0
define i8* @global_addr() {
-; CHECK: global_addr:
+; CHECK-LABEL: global_addr:
ret i8* @var8
; The movz/movk calculation should end up returned directly in x0.
; CHECK: movz x0, #:abs_g3:var8
@@ -17,7 +17,7 @@ define i8* @global_addr() {
}
define i8 @global_i8() {
-; CHECK: global_i8:
+; CHECK-LABEL: global_i8:
%val = load i8* @var8
ret i8 %val
; CHECK: movz x[[ADDR_REG:[0-9]+]], #:abs_g3:var8
@@ -28,7 +28,7 @@ define i8 @global_i8() {
}
define i16 @global_i16() {
-; CHECK: global_i16:
+; CHECK-LABEL: global_i16:
%val = load i16* @var16
ret i16 %val
; CHECK: movz x[[ADDR_REG:[0-9]+]], #:abs_g3:var16
@@ -39,7 +39,7 @@ define i16 @global_i16() {
}
define i32 @global_i32() {
-; CHECK: global_i32:
+; CHECK-LABEL: global_i32:
%val = load i32* @var32
ret i32 %val
; CHECK: movz x[[ADDR_REG:[0-9]+]], #:abs_g3:var32
@@ -50,7 +50,7 @@ define i32 @global_i32() {
}
define i64 @global_i64() {
-; CHECK: global_i64:
+; CHECK-LABEL: global_i64:
%val = load i64* @var64
ret i64 %val
; CHECK: movz x[[ADDR_REG:[0-9]+]], #:abs_g3:var64
diff --git a/llvm/test/CodeGen/AArch64/compare-branch.ll b/llvm/test/CodeGen/AArch64/compare-branch.ll
index 62f4cd58d47..75efd9d4a0d 100644
--- a/llvm/test/CodeGen/AArch64/compare-branch.ll
+++ b/llvm/test/CodeGen/AArch64/compare-branch.ll
@@ -4,7 +4,7 @@
@var64 = global i64 0
define void @foo() {
-; CHECK: foo:
+; CHECK-LABEL: foo:
%val1 = load volatile i32* @var32
%tst1 = icmp eq i32 %val1, 0
diff --git a/llvm/test/CodeGen/AArch64/dp1.ll b/llvm/test/CodeGen/AArch64/dp1.ll
index 83aa8b4f663..6a8d55cdc7e 100644
--- a/llvm/test/CodeGen/AArch64/dp1.ll
+++ b/llvm/test/CodeGen/AArch64/dp1.ll
@@ -4,7 +4,7 @@
@var64 = global i64 0
define void @rev_i32() {
-; CHECK: rev_i32:
+; CHECK-LABEL: rev_i32:
%val0_tmp = load i32* @var32
%val1_tmp = call i32 @llvm.bswap.i32(i32 %val0_tmp)
; CHECK: rev {{w[0-9]+}}, {{w[0-9]+}}
@@ -13,7 +13,7 @@ define void @rev_i32() {
}
define void @rev_i64() {
-; CHECK: rev_i64:
+; CHECK-LABEL: rev_i64:
%val0_tmp = load i64* @var64
%val1_tmp = call i64 @llvm.bswap.i64(i64 %val0_tmp)
; CHECK: rev {{x[0-9]+}}, {{x[0-9]+}}
@@ -22,7 +22,7 @@ define void @rev_i64() {
}
define void @rev32_i64() {
-; CHECK: rev32_i64:
+; CHECK-LABEL: rev32_i64:
%val0_tmp = load i64* @var64
%val1_tmp = shl i64 %val0_tmp, 32
%val5_tmp = sub i64 64, 32
@@ -35,7 +35,7 @@ define void @rev32_i64() {
}
define void @rev16_i32() {
-; CHECK: rev16_i32:
+; CHECK-LABEL: rev16_i32:
%val0_tmp = load i32* @var32
%val1_tmp = shl i32 %val0_tmp, 16
%val2_tmp = lshr i32 %val0_tmp, 16
@@ -47,7 +47,7 @@ define void @rev16_i32() {
}
define void @clz_zerodef_i32() {
-; CHECK: clz_zerodef_i32:
+; CHECK-LABEL: clz_zerodef_i32:
%val0_tmp = load i32* @var32
%val4_tmp = call i32 @llvm.ctlz.i32(i32 %val0_tmp, i1 0)
; CHECK: clz {{w[0-9]+}}, {{w[0-9]+}}
@@ -56,7 +56,7 @@ define void @clz_zerodef_i32() {
}
define void @clz_zerodef_i64() {
-; CHECK: clz_zerodef_i64:
+; CHECK-LABEL: clz_zerodef_i64:
%val0_tmp = load i64* @var64
%val4_tmp = call i64 @llvm.ctlz.i64(i64 %val0_tmp, i1 0)
; CHECK: clz {{x[0-9]+}}, {{x[0-9]+}}
@@ -65,7 +65,7 @@ define void @clz_zerodef_i64() {
}
define void @clz_zeroundef_i32() {
-; CHECK: clz_zeroundef_i32:
+; CHECK-LABEL: clz_zeroundef_i32:
%val0_tmp = load i32* @var32
%val4_tmp = call i32 @llvm.ctlz.i32(i32 %val0_tmp, i1 1)
; CHECK: clz {{w[0-9]+}}, {{w[0-9]+}}
@@ -74,7 +74,7 @@ define void @clz_zeroundef_i32() {
}
define void @clz_zeroundef_i64() {
-; CHECK: clz_zeroundef_i64:
+; CHECK-LABEL: clz_zeroundef_i64:
%val0_tmp = load i64* @var64
%val4_tmp = call i64 @llvm.ctlz.i64(i64 %val0_tmp, i1 1)
; CHECK: clz {{x[0-9]+}}, {{x[0-9]+}}
@@ -83,7 +83,7 @@ define void @clz_zeroundef_i64() {
}
define void @cttz_zerodef_i32() {
-; CHECK: cttz_zerodef_i32:
+; CHECK-LABEL: cttz_zerodef_i32:
%val0_tmp = load i32* @var32
%val4_tmp = call i32 @llvm.cttz.i32(i32 %val0_tmp, i1 0)
; CHECK: rbit [[REVERSED:w[0-9]+]], {{w[0-9]+}}
@@ -93,7 +93,7 @@ define void @cttz_zerodef_i32() {
}
define void @cttz_zerodef_i64() {
-; CHECK: cttz_zerodef_i64:
+; CHECK-LABEL: cttz_zerodef_i64:
%val0_tmp = load i64* @var64
%val4_tmp = call i64 @llvm.cttz.i64(i64 %val0_tmp, i1 0)
; CHECK: rbit [[REVERSED:x[0-9]+]], {{x[0-9]+}}
@@ -103,7 +103,7 @@ define void @cttz_zerodef_i64() {
}
define void @cttz_zeroundef_i32() {
-; CHECK: cttz_zeroundef_i32:
+; CHECK-LABEL: cttz_zeroundef_i32:
%val0_tmp = load i32* @var32
%val4_tmp = call i32 @llvm.cttz.i32(i32 %val0_tmp, i1 1)
; CHECK: rbit [[REVERSED:w[0-9]+]], {{w[0-9]+}}
@@ -113,7 +113,7 @@ define void @cttz_zeroundef_i32() {
}
define void @cttz_zeroundef_i64() {
-; CHECK: cttz_zeroundef_i64:
+; CHECK-LABEL: cttz_zeroundef_i64:
%val0_tmp = load i64* @var64
%val4_tmp = call i64 @llvm.cttz.i64(i64 %val0_tmp, i1 1)
; CHECK: rbit [[REVERSED:x[0-9]+]], {{x[0-9]+}}
@@ -125,7 +125,7 @@ define void @cttz_zeroundef_i64() {
; These two are just compilation tests really: the operation's set to Expand in
; ISelLowering.
define void @ctpop_i32() {
-; CHECK: ctpop_i32:
+; CHECK-LABEL: ctpop_i32:
%val0_tmp = load i32* @var32
%val4_tmp = call i32 @llvm.ctpop.i32(i32 %val0_tmp)
store volatile i32 %val4_tmp, i32* @var32
@@ -133,7 +133,7 @@ define void @ctpop_i32() {
}
define void @ctpop_i64() {
-; CHECK: ctpop_i64:
+; CHECK-LABEL: ctpop_i64:
%val0_tmp = load i64* @var64
%val4_tmp = call i64 @llvm.ctpop.i64(i64 %val0_tmp)
store volatile i64 %val4_tmp, i64* @var64
diff --git a/llvm/test/CodeGen/AArch64/dp2.ll b/llvm/test/CodeGen/AArch64/dp2.ll
index a5ebc2f33cc..48b0701ad1f 100644
--- a/llvm/test/CodeGen/AArch64/dp2.ll
+++ b/llvm/test/CodeGen/AArch64/dp2.ll
@@ -6,7 +6,7 @@
@var64_1 = global i64 0
define void @rorv_i64() {
-; CHECK: rorv_i64:
+; CHECK-LABEL: rorv_i64:
%val0_tmp = load i64* @var64_0
%val1_tmp = load i64* @var64_1
%val2_tmp = sub i64 64, %val1_tmp
@@ -19,7 +19,7 @@ define void @rorv_i64() {
}
define void @asrv_i64() {
-; CHECK: asrv_i64:
+; CHECK-LABEL: asrv_i64:
%val0_tmp = load i64* @var64_0
%val1_tmp = load i64* @var64_1
%val4_tmp = ashr i64 %val0_tmp, %val1_tmp
@@ -29,7 +29,7 @@ define void @asrv_i64() {
}
define void @lsrv_i64() {
-; CHECK: lsrv_i64:
+; CHECK-LABEL: lsrv_i64:
%val0_tmp = load i64* @var64_0
%val1_tmp = load i64* @var64_1
%val4_tmp = lshr i64 %val0_tmp, %val1_tmp
@@ -39,7 +39,7 @@ define void @lsrv_i64() {
}
define void @lslv_i64() {
-; CHECK: lslv_i64:
+; CHECK-LABEL: lslv_i64:
%val0_tmp = load i64* @var64_0
%val1_tmp = load i64* @var64_1
%val4_tmp = shl i64 %val0_tmp, %val1_tmp
@@ -49,7 +49,7 @@ define void @lslv_i64() {
}
define void @udiv_i64() {
-; CHECK: udiv_i64:
+; CHECK-LABEL: udiv_i64:
%val0_tmp = load i64* @var64_0
%val1_tmp = load i64* @var64_1
%val4_tmp = udiv i64 %val0_tmp, %val1_tmp
@@ -59,7 +59,7 @@ define void @udiv_i64() {
}
define void @sdiv_i64() {
-; CHECK: sdiv_i64:
+; CHECK-LABEL: sdiv_i64:
%val0_tmp = load i64* @var64_0
%val1_tmp = load i64* @var64_1
%val4_tmp = sdiv i64 %val0_tmp, %val1_tmp
@@ -70,7 +70,7 @@ define void @sdiv_i64() {
define void @lsrv_i32() {
-; CHECK: lsrv_i32:
+; CHECK-LABEL: lsrv_i32:
%val0_tmp = load i32* @var32_0
%val1_tmp = load i32* @var32_1
%val2_tmp = add i32 1, %val1_tmp
@@ -81,7 +81,7 @@ define void @lsrv_i32() {
}
define void @lslv_i32() {
-; CHECK: lslv_i32:
+; CHECK-LABEL: lslv_i32:
%val0_tmp = load i32* @var32_0
%val1_tmp = load i32* @var32_1
%val2_tmp = add i32 1, %val1_tmp
@@ -92,7 +92,7 @@ define void @lslv_i32() {
}
define void @rorv_i32() {
-; CHECK: rorv_i32:
+; CHECK-LABEL: rorv_i32:
%val0_tmp = load i32* @var32_0
%val6_tmp = load i32* @var32_1
%val1_tmp = add i32 1, %val6_tmp
@@ -106,7 +106,7 @@ define void @rorv_i32() {
}
define void @asrv_i32() {
-; CHECK: asrv_i32:
+; CHECK-LABEL: asrv_i32:
%val0_tmp = load i32* @var32_0
%val1_tmp = load i32* @var32_1
%val2_tmp = add i32 1, %val1_tmp
@@ -117,7 +117,7 @@ define void @asrv_i32() {
}
define void @sdiv_i32() {
-; CHECK: sdiv_i32:
+; CHECK-LABEL: sdiv_i32:
%val0_tmp = load i32* @var32_0
%val1_tmp = load i32* @var32_1
%val4_tmp = sdiv i32 %val0_tmp, %val1_tmp
@@ -127,7 +127,7 @@ define void @sdiv_i32() {
}
define void @udiv_i32() {
-; CHECK: udiv_i32:
+; CHECK-LABEL: udiv_i32:
%val0_tmp = load i32* @var32_0
%val1_tmp = load i32* @var32_1
%val4_tmp = udiv i32 %val0_tmp, %val1_tmp
diff --git a/llvm/test/CodeGen/AArch64/extract.ll b/llvm/test/CodeGen/AArch64/extract.ll
index 06267816a4e..62d9ed2fc9d 100644
--- a/llvm/test/CodeGen/AArch64/extract.ll
+++ b/llvm/test/CodeGen/AArch64/extract.ll
@@ -1,7 +1,7 @@
; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
define i64 @ror_i64(i64 %in) {
-; CHECK: ror_i64:
+; CHECK-LABEL: ror_i64:
%left = shl i64 %in, 19
%right = lshr i64 %in, 45
%val5 = or i64 %left, %right
@@ -10,7 +10,7 @@ define i64 @ror_i64(i64 %in) {
}
define i32 @ror_i32(i32 %in) {
-; CHECK: ror_i32:
+; CHECK-LABEL: ror_i32:
%left = shl i32 %in, 9
%right = lshr i32 %in, 23
%val5 = or i32 %left, %right
@@ -19,7 +19,7 @@ define i32 @ror_i32(i32 %in) {
}
define i32 @extr_i32(i32 %lhs, i32 %rhs) {
-; CHECK: extr_i32:
+; CHECK-LABEL: extr_i32:
%left = shl i32 %lhs, 6
%right = lshr i32 %rhs, 26
%val = or i32 %left, %right
@@ -31,7 +31,7 @@ define i32 @extr_i32(i32 %lhs, i32 %rhs) {
}
define i64 @extr_i64(i64 %lhs, i64 %rhs) {
-; CHECK: extr_i64:
+; CHECK-LABEL: extr_i64:
%right = lshr i64 %rhs, 40
%left = shl i64 %lhs, 24
%val = or i64 %right, %left
@@ -45,7 +45,7 @@ define i64 @extr_i64(i64 %lhs, i64 %rhs) {
; Regression test: a bad experimental pattern crept into git which optimised
; this pattern to a single EXTR.
define i32 @extr_regress(i32 %a, i32 %b) {
-; CHECK: extr_regress:
+; CHECK-LABEL: extr_regress:
%sh1 = shl i32 %a, 14
%sh2 = lshr i32 %b, 14
diff --git a/llvm/test/CodeGen/AArch64/fastcc-reserved.ll b/llvm/test/CodeGen/AArch64/fastcc-reserved.ll
index e40aa3033bd..c6c050570dd 100644
--- a/llvm/test/CodeGen/AArch64/fastcc-reserved.ll
+++ b/llvm/test/CodeGen/AArch64/fastcc-reserved.ll
@@ -7,7 +7,7 @@
declare fastcc void @will_pop([8 x i32], i32 %val)
define fastcc void @foo(i32 %in) {
-; CHECK: foo:
+; CHECK-LABEL: foo:
%addr = alloca i8, i32 %in
@@ -34,7 +34,7 @@ define fastcc void @foo(i32 %in) {
declare void @wont_pop([8 x i32], i32 %val)
define void @foo1(i32 %in) {
-; CHECK: foo1:
+; CHECK-LABEL: foo1:
%addr = alloca i8, i32 %in
; Normal frame setup again
diff --git a/llvm/test/CodeGen/AArch64/fastcc.ll b/llvm/test/CodeGen/AArch64/fastcc.ll
index 41cde94edc1..a4cd37858ee 100644
--- a/llvm/test/CodeGen/AArch64/fastcc.ll
+++ b/llvm/test/CodeGen/AArch64/fastcc.ll
@@ -5,10 +5,10 @@
; stack, so try to make sure this is respected.
define fastcc void @func_stack0() {
-; CHECK: func_stack0:
+; CHECK-LABEL: func_stack0:
; CHECK: sub sp, sp, #48
-; CHECK-TAIL: func_stack0:
+; CHECK-TAIL-LABEL: func_stack0:
; CHECK-TAIL: sub sp, sp, #48
@@ -45,10 +45,10 @@ define fastcc void @func_stack0() {
}
define fastcc void @func_stack8([8 x i32], i32 %stacked) {
-; CHECK: func_stack8:
+; CHECK-LABEL: func_stack8:
; CHECK: sub sp, sp, #48
-; CHECK-TAIL: func_stack8:
+; CHECK-TAIL-LABEL: func_stack8:
; CHECK-TAIL: sub sp, sp, #48
@@ -84,10 +84,10 @@ define fastcc void @func_stack8([8 x i32], i32 %stacked) {
}
define fastcc void @func_stack32([8 x i32], i128 %stacked0, i128 %stacked1) {
-; CHECK: func_stack32:
+; CHECK-LABEL: func_stack32:
; CHECK: sub sp, sp, #48
-; CHECK-TAIL: func_stack32:
+; CHECK-TAIL-LABEL: func_stack32:
; CHECK-TAIL: sub sp, sp, #48
diff --git a/llvm/test/CodeGen/AArch64/floatdp_1source.ll b/llvm/test/CodeGen/AArch64/floatdp_1source.ll
index c94ba9b57b5..3d7f8f0369f 100644
--- a/llvm/test/CodeGen/AArch64/floatdp_1source.ll
+++ b/llvm/test/CodeGen/AArch64/floatdp_1source.ll
@@ -26,7 +26,7 @@ declare float @nearbyintf(float) readonly
declare double @nearbyint(double) readonly
define void @simple_float() {
-; CHECK: simple_float:
+; CHECK-LABEL: simple_float:
%val1 = load volatile float* @varfloat
%valabs = call float @fabsf(float %val1)
@@ -65,7 +65,7 @@ define void @simple_float() {
}
define void @simple_double() {
-; CHECK: simple_double:
+; CHECK-LABEL: simple_double:
%val1 = load volatile double* @vardouble
%valabs = call double @fabs(double %val1)
@@ -104,7 +104,7 @@ define void @simple_double() {
}
define void @converts() {
-; CHECK: converts:
+; CHECK-LABEL: converts:
%val16 = load volatile half* @varhalf
%val32 = load volatile float* @varfloat
diff --git a/llvm/test/CodeGen/AArch64/fp-dp3.ll b/llvm/test/CodeGen/AArch64/fp-dp3.ll
index 09d90727c81..3a9a6fc5471 100644
--- a/llvm/test/CodeGen/AArch64/fp-dp3.ll
+++ b/llvm/test/CodeGen/AArch64/fp-dp3.ll
@@ -46,7 +46,7 @@ define float @test_fnmsub(float %a, float %b, float %c) {
define double @testd_fmadd(double %a, double %b, double %c) {
; CHECK-LABEL: testd_fmadd:
-; CHECK-NOFAST: testd_fmadd:
+; CHECK-NOFAST-LABEL: testd_fmadd:
%val = call double @llvm.fma.f64(double %a, double %b, double %c)
; CHECK: fmadd {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}
; CHECK-NOFAST: fmadd {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}
@@ -55,7 +55,7 @@ define double @testd_fmadd(double %a, double %b, double %c) {
define double @testd_fmsub(double %a, double %b, double %c) {
; CHECK-LABEL: testd_fmsub:
-; CHECK-NOFAST: testd_fmsub:
+; CHECK-NOFAST-LABEL: testd_fmsub:
%nega = fsub double -0.0, %a
%val = call double @llvm.fma.f64(double %nega, double %b, double %c)
; CHECK: fmsub {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}
@@ -65,7 +65,7 @@ define double @testd_fmsub(double %a, double %b, double %c) {
define double @testd_fnmadd(double %a, double %b, double %c) {
; CHECK-LABEL: testd_fnmadd:
-; CHECK-NOFAST: testd_fnmadd:
+; CHECK-NOFAST-LABEL: testd_fnmadd:
%negc = fsub double -0.0, %c
%val = call double @llvm.fma.f64(double %a, double %b, double %negc)
; CHECK: fnmadd {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}
@@ -75,7 +75,7 @@ define double @testd_fnmadd(double %a, double %b, double %c) {
define double @testd_fnmsub(double %a, double %b, double %c) {
; CHECK-LABEL: testd_fnmsub:
-; CHECK-NOFAST: testd_fnmsub:
+; CHECK-NOFAST-LABEL: testd_fnmsub:
%nega = fsub double -0.0, %a
%negc = fsub double -0.0, %c
%val = call double @llvm.fma.f64(double %nega, double %b, double %negc)
diff --git a/llvm/test/CodeGen/AArch64/fpimm.ll b/llvm/test/CodeGen/AArch64/fpimm.ll
index fd28aeef929..ccf7c8ae4a3 100644
--- a/llvm/test/CodeGen/AArch64/fpimm.ll
+++ b/llvm/test/CodeGen/AArch64/fpimm.ll
@@ -4,7 +4,7 @@
@varf64 = global double 0.0
define void @check_float() {
-; CHECK: check_float:
+; CHECK-LABEL: check_float:
%val = load float* @varf32
%newval1 = fadd float %val, 8.5
@@ -19,7 +19,7 @@ define void @check_float() {
}
define void @check_double() {
-; CHECK: check_double:
+; CHECK-LABEL: check_double:
%val = load double* @varf64
%newval1 = fadd double %val, 8.5
diff --git a/llvm/test/CodeGen/AArch64/func-argpassing.ll b/llvm/test/CodeGen/AArch64/func-argpassing.ll
index 736710792f4..15f8e768abb 100644
--- a/llvm/test/CodeGen/AArch64/func-argpassing.ll
+++ b/llvm/test/CodeGen/AArch64/func-argpassing.ll
@@ -11,7 +11,7 @@
@varstruct = global %myStruct zeroinitializer
define void @take_i8s(i8 %val1, i8 %val2) {
-; CHECK: take_i8s:
+; CHECK-LABEL: take_i8s:
store i8 %val2, i8* @var8
; Not using w1 may be technically allowed, but it would indicate a
; problem in itself.
@@ -20,7 +20,7 @@ define void @take_i8s(i8 %val1, i8 %val2) {
}
define void @add_floats(float %val1, float %val2) {
-; CHECK: add_floats:
+; CHECK-LABEL: add_floats:
%newval = fadd float %val1, %val2
; CHECK: fadd [[ADDRES:s[0-9]+]], s0, s1
store float %newval, float* @varfloat
@@ -31,7 +31,7 @@ define void @add_floats(float %val1, float %val2) {
; byval pointers should be allocated to the stack and copied as if
; with memcpy.
define void @take_struct(%myStruct* byval %structval) {
-; CHECK: take_struct:
+; CHECK-LABEL: take_struct:
%addr0 = getelementptr %myStruct* %structval, i64 0, i32 2
%addr1 = getelementptr %myStruct* %structval, i64 0, i32 0
@@ -51,7 +51,7 @@ define void @take_struct(%myStruct* byval %structval) {
; %structval should be at sp + 16
define void @check_byval_align(i32* byval %ignore, %myStruct* byval align 16 %structval) {
-; CHECK: check_byval_align:
+; CHECK-LABEL: check_byval_align:
%addr0 = getelementptr %myStruct* %structval, i64 0, i32 2
%addr1 = getelementptr %myStruct* %structval, i64 0, i32 0
@@ -72,7 +72,7 @@ define void @check_byval_align(i32* byval %ignore, %myStruct* byval align 16 %st
}
define i32 @return_int() {
-; CHECK: return_int:
+; CHECK-LABEL: return_int:
%val = load i32* @var32
ret i32 %val
; CHECK: ldr w0, [{{x[0-9]+}}, #:lo12:var32]
@@ -81,7 +81,7 @@ define i32 @return_int() {
}
define double @return_double() {
-; CHECK: return_double:
+; CHECK-LABEL: return_double:
ret double 3.14
; CHECK: ldr d0, [{{x[0-9]+}}, #:lo12:.LCPI
}
@@ -90,7 +90,7 @@ define double @return_double() {
; small enough to go into registers. Not all that pretty, but it
; works.
define [2 x i64] @return_struct() {
-; CHECK: return_struct:
+; CHECK-LABEL: return_struct:
%addr = bitcast %myStruct* @varstruct to [2 x i64]*
%val = load [2 x i64]* %addr
ret [2 x i64] %val
@@ -107,7 +107,7 @@ define [2 x i64] @return_struct() {
; structs larger than 16 bytes, but C semantics can still be provided
; if LLVM does it to %myStruct too. So this is the simplest check
define void @return_large_struct(%myStruct* sret %retval) {
-; CHECK: return_large_struct:
+; CHECK-LABEL: return_large_struct:
%addr0 = getelementptr %myStruct* %retval, i64 0, i32 0
%addr1 = getelementptr %myStruct* %retval, i64 0, i32 1
%addr2 = getelementptr %myStruct* %retval, i64 0, i32 2
@@ -128,7 +128,7 @@ define void @return_large_struct(%myStruct* sret %retval) {
define i32 @struct_on_stack(i8 %var0, i16 %var1, i32 %var2, i64 %var3, i128 %var45,
i32* %var6, %myStruct* byval %struct, i32* byval %stacked,
double %notstacked) {
-; CHECK: struct_on_stack:
+; CHECK-LABEL: struct_on_stack:
%addr = getelementptr %myStruct* %struct, i64 0, i32 0
%val64 = load i64* %addr
store i64 %val64, i64* @var64
@@ -148,7 +148,7 @@ define i32 @struct_on_stack(i8 %var0, i16 %var1, i32 %var2, i64 %var3, i128 %var
define void @stacked_fpu(float %var0, double %var1, float %var2, float %var3,
float %var4, float %var5, float %var6, float %var7,
float %var8) {
-; CHECK: stacked_fpu:
+; CHECK-LABEL: stacked_fpu:
store float %var8, float* @varfloat
; Beware as above: the offset would be different on big-endian
; machines if the first ldr were changed to use s-registers.
diff --git a/llvm/test/CodeGen/AArch64/func-calls.ll b/llvm/test/CodeGen/AArch64/func-calls.ll
index 13b689c4088..b12130b1470 100644
--- a/llvm/test/CodeGen/AArch64/func-calls.ll
+++ b/llvm/test/CodeGen/AArch64/func-calls.ll
@@ -17,7 +17,7 @@ declare void @take_i8s(i8 %val1, i8 %val2)
declare void @take_floats(float %val1, float %val2)
define void @simple_args() {
-; CHECK: simple_args:
+; CHECK-LABEL: simple_args:
%char1 = load i8* @var8
%char2 = load i8* @var8_2
call void @take_i8s(i8 %char1, i8 %char2)
@@ -41,7 +41,7 @@ declare [2 x i64] @return_smallstruct()
declare void @return_large_struct(%myStruct* sret %retval)
define void @simple_rets() {
-; CHECK: simple_rets:
+; CHECK-LABEL: simple_rets:
%int = call i32 @return_int()
store i32 %int, i32* @var32
@@ -106,7 +106,7 @@ declare void @check_i128_regalign(i32 %val0, i128 %val1)
define void @check_i128_align() {
-; CHECK: check_i128_align:
+; CHECK-LABEL: check_i128_align:
%val = load i128* @var128
call void @check_i128_stackalign(i32 0, i32 1, i32 2, i32 3,
i32 4, i32 5, i32 6, i32 7,
@@ -130,7 +130,7 @@ define void @check_i128_align() {
@fptr = global void()* null
define void @check_indirect_call() {
-; CHECK: check_indirect_call:
+; CHECK-LABEL: check_indirect_call:
%func = load void()** @fptr
call void %func()
; CHECK: ldr [[FPTR:x[0-9]+]], [{{x[0-9]+}}, #:lo12:fptr]
diff --git a/llvm/test/CodeGen/AArch64/got-abuse.ll b/llvm/test/CodeGen/AArch64/got-abuse.ll
index c474e5845a6..8b06031c88f 100644
--- a/llvm/test/CodeGen/AArch64/got-abuse.ll
+++ b/llvm/test/CodeGen/AArch64/got-abuse.ll
@@ -13,7 +13,7 @@ declare void @consume(i32)
declare void @func()
define void @foo() nounwind {
-; CHECK: foo:
+; CHECK-LABEL: foo:
entry:
call void @consume(i32 ptrtoint (void ()* @func to i32))
; CHECK: adrp x[[ADDRHI:[0-9]+]], :got:func
diff --git a/llvm/test/CodeGen/AArch64/i128-align.ll b/llvm/test/CodeGen/AArch64/i128-align.ll
index 8eeaa2fd96b..21ca7eda66b 100644
--- a/llvm/test/CodeGen/AArch64/i128-align.ll
+++ b/llvm/test/CodeGen/AArch64/i128-align.ll
@@ -5,7 +5,7 @@
@var = global %struct zeroinitializer
define i64 @check_size() {
-; CHECK: check_size:
+; CHECK-LABEL: check_size:
%starti = ptrtoint %struct* @var to i64
%endp = getelementptr %struct* @var, i64 1
@@ -17,7 +17,7 @@ define i64 @check_size() {
}
define i64 @check_field() {
-; CHECK: check_field:
+; CHECK-LABEL: check_field:
%starti = ptrtoint %struct* @var to i64
%endp = getelementptr %struct* @var, i64 0, i32 1
diff --git a/llvm/test/CodeGen/AArch64/ldst-regoffset.ll b/llvm/test/CodeGen/AArch64/ldst-regoffset.ll
index 45935129fd7..c83fb52146c 100644
--- a/llvm/test/CodeGen/AArch64/ldst-regoffset.ll
+++ b/llvm/test/CodeGen/AArch64/ldst-regoffset.ll
@@ -9,7 +9,7 @@
@var_double = global double 0.0
define void @ldst_8bit(i8* %base, i32 %off32, i64 %off64) {
-; CHECK: ldst_8bit:
+; CHECK-LABEL: ldst_8bit:
%addr8_sxtw = getelementptr i8* %base, i32 %off32
%val8_sxtw = load volatile i8* %addr8_sxtw
@@ -37,7 +37,7 @@ define void @ldst_8bit(i8* %base, i32 %off32, i64 %off64) {
define void @ldst_16bit(i16* %base, i32 %off32, i64 %off64) {
-; CHECK: ldst_16bit:
+; CHECK-LABEL: ldst_16bit:
%addr8_sxtwN = getelementptr i16* %base, i32 %off32
%val8_sxtwN = load volatile i16* %addr8_sxtwN
@@ -91,7 +91,7 @@ define void @ldst_16bit(i16* %base, i32 %off32, i64 %off64) {
}
define void @ldst_32bit(i32* %base, i32 %off32, i64 %off64) {
-; CHECK: ldst_32bit:
+; CHECK-LABEL: ldst_32bit:
%addr_sxtwN = getelementptr i32* %base, i32 %off32
%val_sxtwN = load volatile i32* %addr_sxtwN
@@ -143,7 +143,7 @@ define void @ldst_32bit(i32* %base, i32 %off32, i64 %off64) {
}
define void @ldst_64bit(i64* %base, i32 %off32, i64 %off64) {
-; CHECK: ldst_64bit:
+; CHECK-LABEL: ldst_64bit:
%addr_sxtwN = getelementptr i64* %base, i32 %off32
%val_sxtwN = load volatile i64* %addr_sxtwN
@@ -191,7 +191,7 @@ define void @ldst_64bit(i64* %base, i32 %off32, i64 %off64) {
}
define void @ldst_float(float* %base, i32 %off32, i64 %off64) {
-; CHECK: ldst_float:
+; CHECK-LABEL: ldst_float:
%addr_sxtwN = getelementptr float* %base, i32 %off32
%val_sxtwN = load volatile float* %addr_sxtwN
@@ -238,7 +238,7 @@ define void @ldst_float(float* %base, i32 %off32, i64 %off64) {
}
define void @ldst_double(double* %base, i32 %off32, i64 %off64) {
-; CHECK: ldst_double:
+; CHECK-LABEL: ldst_double:
%addr_sxtwN = getelementptr double* %base, i32 %off32
%val_sxtwN = load volatile double* %addr_sxtwN
@@ -286,7 +286,7 @@ define void @ldst_double(double* %base, i32 %off32, i64 %off64) {
define void @ldst_128bit(fp128* %base, i32 %off32, i64 %off64) {
-; CHECK: ldst_128bit:
+; CHECK-LABEL: ldst_128bit:
%addr_sxtwN = getelementptr fp128* %base, i32 %off32
%val_sxtwN = load volatile fp128* %addr_sxtwN
diff --git a/llvm/test/CodeGen/AArch64/ldst-unscaledimm.ll b/llvm/test/CodeGen/AArch64/ldst-unscaledimm.ll
index 78a3c83c3dd..03dedccce29 100644
--- a/llvm/test/CodeGen/AArch64/ldst-unscaledimm.ll
+++ b/llvm/test/CodeGen/AArch64/ldst-unscaledimm.ll
@@ -11,7 +11,7 @@
@varptr = global i8* null
define void @ldst_8bit() {
-; CHECK: ldst_8bit:
+; CHECK-LABEL: ldst_8bit:
; No architectural support for loads to 16-bit or 8-bit since we
; promote i8 during lowering.
@@ -72,7 +72,7 @@ define void @ldst_8bit() {
}
define void @ldst_16bit() {
-; CHECK: ldst_16bit:
+; CHECK-LABEL: ldst_16bit:
; No architectural support for loads to 16-bit or 16-bit since we
; promote i16 during lowering.
@@ -140,7 +140,7 @@ define void @ldst_16bit() {
}
define void @ldst_32bit() {
-; CHECK: ldst_32bit:
+; CHECK-LABEL: ldst_32bit:
%addr_8bit = load i8** @varptr
@@ -186,7 +186,7 @@ define void @ldst_32bit() {
}
define void @ldst_float() {
-; CHECK: ldst_float:
+; CHECK-LABEL: ldst_float:
%addr_8bit = load i8** @varptr
%addrfp_8 = getelementptr i8* %addr_8bit, i64 -5
@@ -202,7 +202,7 @@ define void @ldst_float() {
}
define void @ldst_double() {
-; CHECK: ldst_double:
+; CHECK-LABEL: ldst_double:
%addr_8bit = load i8** @varptr
%addrfp_8 = getelementptr i8* %addr_8bit, i64 4
diff --git a/llvm/test/CodeGen/AArch64/ldst-unsignedimm.ll b/llvm/test/CodeGen/AArch64/ldst-unsignedimm.ll
index 1e7540d9be0..77cef4eea0d 100644
--- a/llvm/test/CodeGen/AArch64/ldst-unsignedimm.ll
+++ b/llvm/test/CodeGen/AArch64/ldst-unsignedimm.ll
@@ -9,7 +9,7 @@
@var_double = global double 0.0
define void @ldst_8bit() {
-; CHECK: ldst_8bit:
+; CHECK-LABEL: ldst_8bit:
; No architectural support for loads to 16-bit or 8-bit since we
; promote i8 during lowering.
@@ -63,7 +63,7 @@ define void @ldst_8bit() {
}
define void @ldst_16bit() {
-; CHECK: ldst_16bit:
+; CHECK-LABEL: ldst_16bit:
; No architectural support for load volatiles to 16-bit promote i16 during
; lowering.
@@ -117,7 +117,7 @@ define void @ldst_16bit() {
}
define void @ldst_32bit() {
-; CHECK: ldst_32bit:
+; CHECK-LABEL: ldst_32bit:
; Straight 32-bit load/store
%val32_noext = load volatile i32* @var_32bit
@@ -225,7 +225,7 @@ define void @ldst_complex_offsets() {
}
define void @ldst_float() {
-; CHECK: ldst_float:
+; CHECK-LABEL: ldst_float:
%valfp = load volatile float* @var_float
; CHECK: adrp {{x[0-9]+}}, var_float
@@ -238,7 +238,7 @@ define void @ldst_float() {
}
define void @ldst_double() {
-; CHECK: ldst_double:
+; CHECK-LABEL: ldst_double:
%valfp = load volatile double* @var_double
; CHECK: adrp {{x[0-9]+}}, var_double
diff --git a/llvm/test/CodeGen/AArch64/literal_pools.ll b/llvm/test/CodeGen/AArch64/literal_pools.ll
index 9cfa8c5426e..b82f29046d3 100644
--- a/llvm/test/CodeGen/AArch64/literal_pools.ll
+++ b/llvm/test/CodeGen/AArch64/literal_pools.ll
@@ -5,7 +5,7 @@
@var64 = global i64 0
define void @foo() {
-; CHECK: foo:
+; CHECK-LABEL: foo:
%val32 = load i32* @var32
%val64 = load i64* @var64
@@ -60,7 +60,7 @@ define void @foo() {
@vardouble = global double 0.0
define void @floating_lits() {
-; CHECK: floating_lits:
+; CHECK-LABEL: floating_lits:
%floatval = load float* @varfloat
%newfloat = fadd float %floatval, 128.0
diff --git a/llvm/test/CodeGen/AArch64/local_vars.ll b/llvm/test/CodeGen/AArch64/local_vars.ll
index 5cbf5a37ec5..b5cef859e35 100644
--- a/llvm/test/CodeGen/AArch64/local_vars.ll
+++ b/llvm/test/CodeGen/AArch64/local_vars.ll
@@ -24,7 +24,7 @@ define void @trivial_func() nounwind {
}
define void @trivial_fp_func() {
-; CHECK-WITHFP: trivial_fp_func:
+; CHECK-WITHFP-LABEL: trivial_fp_func:
; CHECK-WITHFP: sub sp, sp, #16
; CHECK-WITHFP: stp x29, x30, [sp]
@@ -43,7 +43,7 @@ define void @trivial_fp_func() {
define void @stack_local() {
%local_var = alloca i64
-; CHECK: stack_local:
+; CHECK-LABEL: stack_local:
; CHECK: sub sp, sp, #16
%val = load i64* @var
diff --git a/llvm/test/CodeGen/AArch64/logical_shifted_reg.ll b/llvm/test/CodeGen/AArch64/logical_shifted_reg.ll
index bbbfcc1b911..a08ba20c7f1 100644
--- a/llvm/test/CodeGen/AArch64/logical_shifted_reg.ll
+++ b/llvm/test/CodeGen/AArch64/logical_shifted_reg.ll
@@ -7,7 +7,7 @@
@var2_64 = global i64 0
define void @logical_32bit() {
-; CHECK: logical_32bit:
+; CHECK-LABEL: logical_32bit:
%val1 = load i32* @var1_32
%val2 = load i32* @var2_32
@@ -97,7 +97,7 @@ define void @logical_32bit() {
}
define void @logical_64bit() {
-; CHECK: logical_64bit:
+; CHECK-LABEL: logical_64bit:
%val1 = load i64* @var1_64
%val2 = load i64* @var2_64
@@ -190,7 +190,7 @@ define void @logical_64bit() {
}
define void @flag_setting() {
-; CHECK: flag_setting:
+; CHECK-LABEL: flag_setting:
%val1 = load i64* @var1_64
%val2 = load i64* @var2_64
diff --git a/llvm/test/CodeGen/AArch64/regress-tail-livereg.ll b/llvm/test/CodeGen/AArch64/regress-tail-livereg.ll
index 32167752cb3..053249c6855 100644
--- a/llvm/test/CodeGen/AArch64/regress-tail-livereg.ll
+++ b/llvm/test/CodeGen/AArch64/regress-tail-livereg.ll
@@ -4,7 +4,7 @@
declare void @bar()
define void @foo() {
-; CHECK: foo:
+; CHECK-LABEL: foo:
%func = load void()** @var
; Calling a function encourages @foo to use a callee-saved register,
diff --git a/llvm/test/CodeGen/AArch64/sibling-call.ll b/llvm/test/CodeGen/AArch64/sibling-call.ll
index 6df4e7e3e52..20f1062a44d 100644
--- a/llvm/test/CodeGen/AArch64/sibling-call.ll
+++ b/llvm/test/CodeGen/AArch64/sibling-call.ll
@@ -5,7 +5,7 @@ declare void @callee_stack8([8 x i32], i64)
declare void @callee_stack16([8 x i32], i64, i64)
define void @caller_to0_from0() nounwind {
-; CHECK: caller_to0_from0:
+; CHECK-LABEL: caller_to0_from0:
; CHECK-NEXT: // BB
tail call void @callee_stack0()
ret void
@@ -13,7 +13,7 @@ define void @caller_to0_from0() nounwind {
}
define void @caller_to0_from8([8 x i32], i64) nounwind{
-; CHECK: caller_to0_from8:
+; CHECK-LABEL: caller_to0_from8:
; CHECK-NEXT: // BB
tail call void @callee_stack0()
@@ -22,7 +22,7 @@ define void @caller_to0_from8([8 x i32], i64) nounwind{
}
define void @caller_to8_from0() {
-; CHECK: caller_to8_from0:
+; CHECK-LABEL: caller_to8_from0:
; Caller isn't going to clean up any extra stack we allocate, so it
; can't be a tail call.
@@ -32,7 +32,7 @@ define void @caller_to8_from0() {
}
define void @caller_to8_from8([8 x i32], i64 %a) {
-; CHECK: caller_to8_from8:
+; CHECK-LABEL: caller_to8_from8:
; CHECK-NOT: sub sp, sp,
; This should reuse our stack area for the 42
@@ -43,7 +43,7 @@ define void @caller_to8_from8([8 x i32], i64 %a) {
}
define void @caller_to16_from8([8 x i32], i64 %a) {
-; CHECK: caller_to16_from8:
+; CHECK-LABEL: caller_to16_from8:
; Shouldn't be a tail call: we can't use SP+8 because our caller might
; have something there. This may sound obvious but implementation does
@@ -54,7 +54,7 @@ define void @caller_to16_from8([8 x i32], i64 %a) {
}
define void @caller_to8_from24([8 x i32], i64 %a, i64 %b, i64 %c) {
-; CHECK: caller_to8_from24:
+; CHECK-LABEL: caller_to8_from24:
; CHECK-NOT: sub sp, sp
; Reuse our area, putting "42" at incoming sp
@@ -65,7 +65,7 @@ define void @caller_to8_from24([8 x i32], i64 %a, i64 %b, i64 %c) {
}
define void @caller_to16_from16([8 x i32], i64 %a, i64 %b) {
-; CHECK: caller_to16_from16:
+; CHECK-LABEL: caller_to16_from16:
; CHECK-NOT: sub sp, sp,
; Here we want to make sure that both loads happen before the stores:
@@ -85,7 +85,7 @@ define void @caller_to16_from16([8 x i32], i64 %a, i64 %b) {
@func = global void(i32)* null
define void @indirect_tail() {
-; CHECK: indirect_tail:
+; CHECK-LABEL: indirect_tail:
; CHECK-NOT: sub sp, sp
%fptr = load void(i32)** @func
diff --git a/llvm/test/CodeGen/AArch64/tail-call.ll b/llvm/test/CodeGen/AArch64/tail-call.ll
index f323b151ad1..81885f10851 100644
--- a/llvm/test/CodeGen/AArch64/tail-call.ll
+++ b/llvm/test/CodeGen/AArch64/tail-call.ll
@@ -5,7 +5,7 @@ declare fastcc void @callee_stack8([8 x i32], i64)
declare fastcc void @callee_stack16([8 x i32], i64, i64)
define fastcc void @caller_to0_from0() nounwind {
-; CHECK: caller_to0_from0:
+; CHECK-LABEL: caller_to0_from0:
; CHECK-NEXT: // BB
tail call fastcc void @callee_stack0()
ret void
@@ -13,7 +13,7 @@ define fastcc void @caller_to0_from0() nounwind {
}
define fastcc void @caller_to0_from8([8 x i32], i64) {
-; CHECK: caller_to0_from8:
+; CHECK-LABEL: caller_to0_from8:
tail call fastcc void @callee_stack0()
ret void
@@ -22,7 +22,7 @@ define fastcc void @caller_to0_from8([8 x i32], i64) {
}
define fastcc void @caller_to8_from0() {
-; CHECK: caller_to8_from0:
+; CHECK-LABEL: caller_to8_from0:
; CHECK: sub sp, sp, #32
; Key point is that the "42" should go #16 below incoming stack
@@ -35,7 +35,7 @@ define fastcc void @caller_to8_from0() {
}
define fastcc void @caller_to8_from8([8 x i32], i64 %a) {
-; CHECK: caller_to8_from8:
+; CHECK-LABEL: caller_to8_from8:
; CHECK: sub sp, sp, #16
; Key point is that the "%a" should go where at SP on entry.
@@ -47,7 +47,7 @@ define fastcc void @caller_to8_from8([8 x i32], i64 %a) {
}
define fastcc void @caller_to16_from8([8 x i32], i64 %a) {
-; CHECK: caller_to16_from8:
+; CHECK-LABEL: caller_to16_from8:
; CHECK: sub sp, sp, #16
; Important point is that the call reuses the "dead" argument space
@@ -63,7 +63,7 @@ define fastcc void @caller_to16_from8([8 x i32], i64 %a) {
define fastcc void @caller_to8_from24([8 x i32], i64 %a, i64 %b, i64 %c) {
-; CHECK: caller_to8_from24:
+; CHECK-LABEL: caller_to8_from24:
; CHECK: sub sp, sp, #16
; Key point is that the "%a" should go where at #16 above SP on entry.
@@ -76,7 +76,7 @@ define fastcc void @caller_to8_from24([8 x i32], i64 %a, i64 %b, i64 %c) {
define fastcc void @caller_to16_from16([8 x i32], i64 %a, i64 %b) {
-; CHECK: caller_to16_from16:
+; CHECK-LABEL: caller_to16_from16:
; CHECK: sub sp, sp, #16
; Here we want to make sure that both loads happen before the stores:
OpenPOWER on IntegriCloud