diff options
author | David Blaikie <dblaikie@gmail.com> | 2015-04-16 23:24:18 +0000 |
---|---|---|
committer | David Blaikie <dblaikie@gmail.com> | 2015-04-16 23:24:18 +0000 |
commit | 23af64846f29e8249c717cad08ae64afc2ba647b (patch) | |
tree | 0748fde05fcaace0e05cbb14d32cd1300c257363 /llvm/test/CodeGen/AArch64 | |
parent | e68c0085199bb4e1aa036023952a0ece59afeaea (diff) | |
download | bcm5719-llvm-23af64846f29e8249c717cad08ae64afc2ba647b.tar.gz bcm5719-llvm-23af64846f29e8249c717cad08ae64afc2ba647b.zip |
[opaque pointer type] Add textual IR support for explicit type parameter to the call instruction
See r230786 and r230794 for similar changes to gep and load
respectively.
Call is a bit different because it often doesn't have a single explicit
type - usually the type is deduced from the arguments, and just the
return type is explicit. In those cases there's no need to change the
IR.
When that's not the case, the IR usually contains the pointer type of
the first operand - but since typed pointers are going away, that
representation is insufficient so I'm just stripping the "pointerness"
of the explicit type away.
This does make the IR a bit weird - it /sort of/ reads like the type of
the first operand: "call void () %x(" but %x is actually of type "void
()*" and will eventually be just of type "ptr". But this seems not too
bad and I don't think it would benefit from repeating the type
("void (), void () * %x(" and then eventually "void (), ptr %x(") as has
been done with gep and load.
This also has a side benefit: since the explicit type is no longer a
pointer, there's no ambiguity between an explicit type and a function
that returns a function pointer. Previously this case needed an explicit
type (eg: a function returning a void() function was written as
"call void () () * @x(" rather than "call void () * @x(" because of the
ambiguity between a function returning a pointer to a void() function
and a function returning void).
No ambiguity means even function pointer return types can just be
written alone, without writing the whole function's type.
This leaves /only/ the varargs case where the explicit type is required.
Given the special type syntax in call instructions, the regex-fu used
for migration was a bit more involved in its own unique way (as every
one of these is) so here it is. Use it in conjunction with the apply.sh
script and associated find/xargs commands I've provided in rr230786 to
migrate your out of tree tests. Do let me know if any of this doesn't
cover your cases & we can iterate on a more general script/regexes to
help others with out of tree tests.
About 9 test cases couldn't be automatically migrated - half of those
were functions returning function pointers, where I just had to manually
delete the function argument types now that we didn't need an explicit
function type there. The other half were typedefs of function types used
in calls - just had to manually drop the * from those.
import fileinput
import sys
import re
pat = re.compile(r'((?:=|:|^|\s)call\s(?:[^@]*?))(\s*$|\s*(?:(?:\[\[[a-zA-Z0-9_]+\]\]|[@%](?:(")?[\\\?@a-zA-Z0-9_.]*?(?(3)"|)|{{.*}}))(?:\(|$)|undef|inttoptr|bitcast|null|asm).*$)')
addrspace_end = re.compile(r"addrspace\(\d+\)\s*\*$")
func_end = re.compile("(?:void.*|\)\s*)\*$")
def conv(match, line):
if not match or re.search(addrspace_end, match.group(1)) or not re.search(func_end, match.group(1)):
return line
return line[:match.start()] + match.group(1)[:match.group(1).rfind('*')].rstrip() + match.group(2) + line[match.end():]
for line in sys.stdin:
sys.stdout.write(conv(re.search(pat, line), line))
llvm-svn: 235145
Diffstat (limited to 'llvm/test/CodeGen/AArch64')
-rw-r--r-- | llvm/test/CodeGen/AArch64/argument-blocks.ll | 2 | ||||
-rw-r--r-- | llvm/test/CodeGen/AArch64/arm64-2012-06-06-FPToUI.ll | 8 | ||||
-rw-r--r-- | llvm/test/CodeGen/AArch64/arm64-aapcs.ll | 2 | ||||
-rw-r--r-- | llvm/test/CodeGen/AArch64/arm64-abi-varargs.ll | 6 | ||||
-rw-r--r-- | llvm/test/CodeGen/AArch64/arm64-anyregcc-crash.ll | 2 | ||||
-rw-r--r-- | llvm/test/CodeGen/AArch64/arm64-anyregcc.ll | 16 | ||||
-rw-r--r-- | llvm/test/CodeGen/AArch64/arm64-fcopysign.ll | 2 | ||||
-rw-r--r-- | llvm/test/CodeGen/AArch64/arm64-join-reserved.ll | 2 | ||||
-rw-r--r-- | llvm/test/CodeGen/AArch64/arm64-patchpoint-scratch-regs.ll | 2 | ||||
-rw-r--r-- | llvm/test/CodeGen/AArch64/arm64-patchpoint-webkit_jscc.ll | 8 | ||||
-rw-r--r-- | llvm/test/CodeGen/AArch64/arm64-patchpoint.ll | 16 | ||||
-rw-r--r-- | llvm/test/CodeGen/AArch64/arm64-stackmap-nops.ll | 2 | ||||
-rw-r--r-- | llvm/test/CodeGen/AArch64/arm64-stackmap.ll | 22 | ||||
-rw-r--r-- | llvm/test/CodeGen/AArch64/br-to-eh-lpad.ll | 8 | ||||
-rw-r--r-- | llvm/test/CodeGen/AArch64/dag-combine-invaraints.ll | 2 | ||||
-rw-r--r-- | llvm/test/CodeGen/AArch64/stackmap-liveness.ll | 2 |
16 files changed, 51 insertions, 51 deletions
diff --git a/llvm/test/CodeGen/AArch64/argument-blocks.ll b/llvm/test/CodeGen/AArch64/argument-blocks.ll index f1dcfa67d0e..3169abc2dcb 100644 --- a/llvm/test/CodeGen/AArch64/argument-blocks.ll +++ b/llvm/test/CodeGen/AArch64/argument-blocks.ll @@ -64,7 +64,7 @@ define void @test_varargs_stackalign() { ; CHECK-LABEL: test_varargs_stackalign: ; CHECK-DARWINPCS: stp {{w[0-9]+}}, {{w[0-9]+}}, [sp, #16] - call void(...)* @callee([3 x float] undef, [2 x float] [float 1.0, float 2.0]) + call void(...) @callee([3 x float] undef, [2 x float] [float 1.0, float 2.0]) ret void } diff --git a/llvm/test/CodeGen/AArch64/arm64-2012-06-06-FPToUI.ll b/llvm/test/CodeGen/AArch64/arm64-2012-06-06-FPToUI.ll index 41e22e95f62..b760261f788 100644 --- a/llvm/test/CodeGen/AArch64/arm64-2012-06-06-FPToUI.ll +++ b/llvm/test/CodeGen/AArch64/arm64-2012-06-06-FPToUI.ll @@ -16,11 +16,11 @@ entry: %0 = load double, double* %d.addr, align 8 %1 = load double, double* %d.addr, align 8 %conv = fptoui double %1 to i64 - %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str, i32 0, i32 0), double %0, i64 %conv) + %call = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str, i32 0, i32 0), double %0, i64 %conv) %2 = load double, double* %d.addr, align 8 %3 = load double, double* %d.addr, align 8 %conv1 = fptoui double %3 to i32 - %call2 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([8 x i8], [8 x i8]* @.str1, i32 0, i32 0), double %2, i32 %conv1) + %call2 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([8 x i8], [8 x i8]* @.str1, i32 0, i32 0), double %2, i32 %conv1) ret void } @@ -37,12 +37,12 @@ entry: %conv = fpext float %0 to double %1 = load float, float* %f.addr, align 4 %conv1 = fptoui float %1 to i64 - %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([8 x i8], [8 x i8]* @.str2, i32 0, i32 0), double %conv, i64 %conv1) + %call = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([8 x i8], [8 x i8]* @.str2, i32 0, i32 0), double %conv, i64 %conv1) %2 = load float, float* %f.addr, align 4 %conv2 = fpext float %2 to double %3 = load float, float* %f.addr, align 4 %conv3 = fptoui float %3 to i32 - %call4 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @.str3, i32 0, i32 0), double %conv2, i32 %conv3) + %call4 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @.str3, i32 0, i32 0), double %conv2, i32 %conv3) ret void } diff --git a/llvm/test/CodeGen/AArch64/arm64-aapcs.ll b/llvm/test/CodeGen/AArch64/arm64-aapcs.ll index 41c3ad5766c..390a3c75ff8 100644 --- a/llvm/test/CodeGen/AArch64/arm64-aapcs.ll +++ b/llvm/test/CodeGen/AArch64/arm64-aapcs.ll @@ -78,7 +78,7 @@ declare void @variadic(i32 %a, ...) ; Under AAPCS variadic functions have the same calling convention as ; others. The extra arguments should go in registers rather than on the stack. define void @test_variadic() { - call void(i32, ...)* @variadic(i32 0, i64 1, double 2.0) + call void(i32, ...) @variadic(i32 0, i64 1, double 2.0) ; CHECK: fmov d0, #2.0 ; CHECK: orr w1, wzr, #0x1 ; CHECK: bl variadic diff --git a/llvm/test/CodeGen/AArch64/arm64-abi-varargs.ll b/llvm/test/CodeGen/AArch64/arm64-abi-varargs.ll index f95fec66158..03414b56144 100644 --- a/llvm/test/CodeGen/AArch64/arm64-abi-varargs.ll +++ b/llvm/test/CodeGen/AArch64/arm64-abi-varargs.ll @@ -94,7 +94,7 @@ define i32 @main() nounwind ssp { %10 = load i32, i32* %a10, align 4 %11 = load i32, i32* %a11, align 4 %12 = load i32, i32* %a12, align 4 - call void (i32, i32, i32, i32, i32, i32, i32, i32, i32, ...)* @fn9(i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i32 %8, i32 %9, i32 %10, i32 %11, i32 %12) + call void (i32, i32, i32, i32, i32, i32, i32, i32, i32, ...) @fn9(i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i32 %8, i32 %9, i32 %10, i32 %11, i32 %12) ret i32 0 } @@ -133,7 +133,7 @@ entry: store <4 x i32> %y, <4 x i32>* %y.addr, align 16 %0 = load i32, i32* %x.addr, align 4 %1 = load <4 x i32>, <4 x i32>* %y.addr, align 16 - call void (i8*, ...)* @foo(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str, i32 0, i32 0), i32 %0, <4 x i32> %1) + call void (i8*, ...) @foo(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str, i32 0, i32 0), i32 %0, <4 x i32> %1) ret void } @@ -186,6 +186,6 @@ entry: %1 = load i32, i32* %x.addr, align 4 %2 = bitcast %struct.s41* %s41 to i128* %3 = load i128, i128* %2, align 1 - call void (i8*, ...)* @foo2(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str, i32 0, i32 0), i32 %1, i128 %3) + call void (i8*, ...) @foo2(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str, i32 0, i32 0), i32 %1, i128 %3) ret void } diff --git a/llvm/test/CodeGen/AArch64/arm64-anyregcc-crash.ll b/llvm/test/CodeGen/AArch64/arm64-anyregcc-crash.ll index 241cf974c05..56c62d5fe7d 100644 --- a/llvm/test/CodeGen/AArch64/arm64-anyregcc-crash.ll +++ b/llvm/test/CodeGen/AArch64/arm64-anyregcc-crash.ll @@ -8,7 +8,7 @@ define i64 @anyreglimit(i64 %v1, i64 %v2, i64 %v3, i64 %v4, i64 %v5, i64 %v6, i6 i64 %v17, i64 %v18, i64 %v19, i64 %v20, i64 %v21, i64 %v22, i64 %v23, i64 %v24, i64 %v25, i64 %v26, i64 %v27, i64 %v28, i64 %v29, i64 %v30, i64 %v31, i64 %v32) { entry: - %result = tail call anyregcc i64 (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.i64(i64 12, i32 15, i8* inttoptr (i64 0 to i8*), i32 32, + %result = tail call anyregcc i64 (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.i64(i64 12, i32 15, i8* inttoptr (i64 0 to i8*), i32 32, i64 %v1, i64 %v2, i64 %v3, i64 %v4, i64 %v5, i64 %v6, i64 %v7, i64 %v8, i64 %v9, i64 %v10, i64 %v11, i64 %v12, i64 %v13, i64 %v14, i64 %v15, i64 %v16, i64 %v17, i64 %v18, i64 %v19, i64 %v20, i64 %v21, i64 %v22, i64 %v23, i64 %v24, diff --git a/llvm/test/CodeGen/AArch64/arm64-anyregcc.ll b/llvm/test/CodeGen/AArch64/arm64-anyregcc.ll index e26875d52f9..2a2f4519604 100644 --- a/llvm/test/CodeGen/AArch64/arm64-anyregcc.ll +++ b/llvm/test/CodeGen/AArch64/arm64-anyregcc.ll @@ -55,7 +55,7 @@ ; CHECK-NEXT: .long 3 define i64 @test() nounwind ssp uwtable { entry: - call anyregcc void (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i64 0, i32 16, i8* null, i32 2, i32 1, i32 2, i64 3) + call anyregcc void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(i64 0, i32 16, i8* null, i32 2, i32 1, i32 2, i64 3) ret i64 0 } @@ -77,7 +77,7 @@ entry: define i64 @property_access1(i8* %obj) nounwind ssp uwtable { entry: %f = inttoptr i64 281474417671919 to i8* - %ret = call anyregcc i64 (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.i64(i64 1, i32 20, i8* %f, i32 1, i8* %obj) + %ret = call anyregcc i64 (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.i64(i64 1, i32 20, i8* %f, i32 1, i8* %obj) ret i64 %ret } @@ -100,7 +100,7 @@ define i64 @property_access2() nounwind ssp uwtable { entry: %obj = alloca i64, align 8 %f = inttoptr i64 281474417671919 to i8* - %ret = call anyregcc i64 (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.i64(i64 2, i32 20, i8* %f, i32 1, i64* %obj) + %ret = call anyregcc i64 (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.i64(i64 2, i32 20, i8* %f, i32 1, i64* %obj) ret i64 %ret } @@ -123,7 +123,7 @@ define i64 @property_access3() nounwind ssp uwtable { entry: %obj = alloca i64, align 8 %f = inttoptr i64 281474417671919 to i8* - %ret = call anyregcc i64 (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.i64(i64 3, i32 20, i8* %f, i32 0, i64* %obj) + %ret = call anyregcc i64 (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.i64(i64 3, i32 20, i8* %f, i32 0, i64* %obj) ret i64 %ret } @@ -205,7 +205,7 @@ entry: define i64 @anyreg_test1(i8* %a1, i8* %a2, i8* %a3, i8* %a4, i8* %a5, i8* %a6, i8* %a7, i8* %a8, i8* %a9, i8* %a10, i8* %a11, i8* %a12, i8* %a13) nounwind ssp uwtable { entry: %f = inttoptr i64 281474417671919 to i8* - %ret = call anyregcc i64 (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.i64(i64 4, i32 20, i8* %f, i32 13, i8* %a1, i8* %a2, i8* %a3, i8* %a4, i8* %a5, i8* %a6, i8* %a7, i8* %a8, i8* %a9, i8* %a10, i8* %a11, i8* %a12, i8* %a13) + %ret = call anyregcc i64 (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.i64(i64 4, i32 20, i8* %f, i32 13, i8* %a1, i8* %a2, i8* %a3, i8* %a4, i8* %a5, i8* %a6, i8* %a7, i8* %a8, i8* %a9, i8* %a10, i8* %a11, i8* %a12, i8* %a13) ret i64 %ret } @@ -287,7 +287,7 @@ entry: define i64 @anyreg_test2(i8* %a1, i8* %a2, i8* %a3, i8* %a4, i8* %a5, i8* %a6, i8* %a7, i8* %a8, i8* %a9, i8* %a10, i8* %a11, i8* %a12, i8* %a13) nounwind ssp uwtable { entry: %f = inttoptr i64 281474417671919 to i8* - %ret = call anyregcc i64 (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.i64(i64 5, i32 20, i8* %f, i32 8, i8* %a1, i8* %a2, i8* %a3, i8* %a4, i8* %a5, i8* %a6, i8* %a7, i8* %a8, i8* %a9, i8* %a10, i8* %a11, i8* %a12, i8* %a13) + %ret = call anyregcc i64 (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.i64(i64 5, i32 20, i8* %f, i32 8, i8* %a1, i8* %a2, i8* %a3, i8* %a4, i8* %a5, i8* %a6, i8* %a7, i8* %a8, i8* %a9, i8* %a10, i8* %a11, i8* %a12, i8* %a13) ret i64 %ret } @@ -315,7 +315,7 @@ entry: ; CHECK-NEXT: .long 0 define i64 @patchpoint_spilldef(i64 %p1, i64 %p2, i64 %p3, i64 %p4) { entry: - %result = tail call anyregcc i64 (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.i64(i64 12, i32 16, i8* inttoptr (i64 0 to i8*), i32 2, i64 %p1, i64 %p2) + %result = tail call anyregcc i64 (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.i64(i64 12, i32 16, i8* inttoptr (i64 0 to i8*), i32 2, i64 %p1, i64 %p2) tail call void asm sideeffect "nop", "~{x0},~{x1},~{x2},~{x3},~{x4},~{x5},~{x6},~{x7},~{x8},~{x9},~{x10},~{x11},~{x12},~{x13},~{x14},~{x15},~{x16},~{x17},~{x18},~{x19},~{x20},~{x21},~{x22},~{x23},~{x24},~{x25},~{x26},~{x27},~{x28},~{x29},~{x30},~{x31}"() nounwind ret i64 %result } @@ -355,7 +355,7 @@ entry: define i64 @patchpoint_spillargs(i64 %p1, i64 %p2, i64 %p3, i64 %p4) { entry: tail call void asm sideeffect "nop", "~{x0},~{x1},~{x2},~{x3},~{x4},~{x5},~{x6},~{x7},~{x8},~{x9},~{x10},~{x11},~{x12},~{x13},~{x14},~{x15},~{x16},~{x17},~{x18},~{x19},~{x20},~{x21},~{x22},~{x23},~{x24},~{x25},~{x26},~{x27},~{x28},~{x29},~{x30},~{x31}"() nounwind - %result = tail call anyregcc i64 (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.i64(i64 13, i32 16, i8* inttoptr (i64 0 to i8*), i32 2, i64 %p1, i64 %p2, i64 %p3, i64 %p4) + %result = tail call anyregcc i64 (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.i64(i64 13, i32 16, i8* inttoptr (i64 0 to i8*), i32 2, i64 %p1, i64 %p2, i64 %p3, i64 %p4) ret i64 %result } diff --git a/llvm/test/CodeGen/AArch64/arm64-fcopysign.ll b/llvm/test/CodeGen/AArch64/arm64-fcopysign.ll index 66241df9444..feffd41f002 100644 --- a/llvm/test/CodeGen/AArch64/arm64-fcopysign.ll +++ b/llvm/test/CodeGen/AArch64/arm64-fcopysign.ll @@ -39,7 +39,7 @@ entry: ; CHECK: fcvt s0, d0 ; CHECK: movi.4s v[[CONST:[0-9]+]], #0x80, lsl #24 ; CHECK: bit.16b v{{[0-9]+}}, v0, v[[CONST]] - %0 = tail call double (...)* @bar() nounwind + %0 = tail call double (...) @bar() nounwind %1 = fptrunc double %0 to float %2 = tail call float @copysignf(float 5.000000e-01, float %1) nounwind readnone %3 = fadd float %1, %2 diff --git a/llvm/test/CodeGen/AArch64/arm64-join-reserved.ll b/llvm/test/CodeGen/AArch64/arm64-join-reserved.ll index e99168b5eba..dee03448354 100644 --- a/llvm/test/CodeGen/AArch64/arm64-join-reserved.ll +++ b/llvm/test/CodeGen/AArch64/arm64-join-reserved.ll @@ -10,7 +10,7 @@ target triple = "arm64-apple-macosx10" ; CHECK: ret define void @g() nounwind ssp { entry: - tail call void (i32, ...)* @f(i32 0, i32 0) nounwind + tail call void (i32, ...) @f(i32 0, i32 0) nounwind ret void } diff --git a/llvm/test/CodeGen/AArch64/arm64-patchpoint-scratch-regs.ll b/llvm/test/CodeGen/AArch64/arm64-patchpoint-scratch-regs.ll index 5a740d83df3..2651f119412 100644 --- a/llvm/test/CodeGen/AArch64/arm64-patchpoint-scratch-regs.ll +++ b/llvm/test/CodeGen/AArch64/arm64-patchpoint-scratch-regs.ll @@ -9,7 +9,7 @@ define void @clobberScratch(i32* %p) { %v = load i32, i32* %p tail call void asm sideeffect "nop", "~{x0},~{x1},~{x2},~{x3},~{x4},~{x5},~{x6},~{x7},~{x8},~{x9},~{x10},~{x11},~{x12},~{x13},~{x14},~{x15},~{x18},~{x19},~{x20},~{x21},~{x22},~{x23},~{x24},~{x25},~{x26},~{x27},~{x28},~{x29},~{x30},~{x31}"() nounwind - tail call void (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i64 5, i32 20, i8* null, i32 0, i32* %p, i32 %v) + tail call void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(i64 5, i32 20, i8* null, i32 0, i32* %p, i32 %v) store i32 %v, i32* %p ret void } diff --git a/llvm/test/CodeGen/AArch64/arm64-patchpoint-webkit_jscc.ll b/llvm/test/CodeGen/AArch64/arm64-patchpoint-webkit_jscc.ll index 8f79f80ba33..b8236c5b247 100644 --- a/llvm/test/CodeGen/AArch64/arm64-patchpoint-webkit_jscc.ll +++ b/llvm/test/CodeGen/AArch64/arm64-patchpoint-webkit_jscc.ll @@ -23,9 +23,9 @@ entry: ; FAST-NEXT: movk x16, #0xbeef ; FAST-NEXT: blr x16 %resolveCall2 = inttoptr i64 281474417671919 to i8* - %result = tail call webkit_jscc i64 (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.i64(i64 5, i32 20, i8* %resolveCall2, i32 2, i64 %p4, i64 %p2) + %result = tail call webkit_jscc i64 (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.i64(i64 5, i32 20, i8* %resolveCall2, i32 2, i64 %p4, i64 %p2) %resolveCall3 = inttoptr i64 244837814038255 to i8* - tail call webkit_jscc void (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i64 6, i32 20, i8* %resolveCall3, i32 2, i64 %p4, i64 %result) + tail call webkit_jscc void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(i64 6, i32 20, i8* %resolveCall3, i32 2, i64 %p4, i64 %result) ret void } @@ -59,7 +59,7 @@ entry: ; FAST-NEXT: movk x16, #0xbeef ; FAST-NEXT: blr x16 %call = inttoptr i64 281474417671919 to i8* - %result = call webkit_jscc i64 (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.i64(i64 7, i32 20, i8* %call, i32 6, i64 %callee, i64 2, i64 undef, i32 4, i32 undef, i64 6) + %result = call webkit_jscc i64 (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.i64(i64 7, i32 20, i8* %call, i32 6, i64 %callee, i64 2, i64 undef, i32 4, i32 undef, i64 6) ret i64 %result } @@ -101,7 +101,7 @@ entry: ; FAST-NEXT: movk x16, #0xbeef ; FAST-NEXT: blr x16 %call = inttoptr i64 281474417671919 to i8* - %result = call webkit_jscc i64 (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.i64(i64 7, i32 20, i8* %call, i32 10, i64 %callee, i64 2, i64 undef, i32 4, i32 undef, i64 6, i32 undef, i32 8, i32 undef, i64 10) + %result = call webkit_jscc i64 (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.i64(i64 7, i32 20, i8* %call, i32 10, i64 %callee, i64 2, i64 undef, i32 4, i32 undef, i64 6, i32 undef, i32 8, i32 undef, i64 10) ret i64 %result } diff --git a/llvm/test/CodeGen/AArch64/arm64-patchpoint.ll b/llvm/test/CodeGen/AArch64/arm64-patchpoint.ll index cf066532a62..d9ec7e50ff8 100644 --- a/llvm/test/CodeGen/AArch64/arm64-patchpoint.ll +++ b/llvm/test/CodeGen/AArch64/arm64-patchpoint.ll @@ -16,9 +16,9 @@ entry: ; CHECK-NEXT: blr x16 ; CHECK: ret %resolveCall2 = inttoptr i64 244837814094590 to i8* - %result = tail call i64 (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.i64(i64 2, i32 20, i8* %resolveCall2, i32 4, i64 %p1, i64 %p2, i64 %p3, i64 %p4) + %result = tail call i64 (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.i64(i64 2, i32 20, i8* %resolveCall2, i32 4, i64 %p1, i64 %p2, i64 %p3, i64 %p4) %resolveCall3 = inttoptr i64 244837814094591 to i8* - tail call void (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i64 3, i32 20, i8* %resolveCall3, i32 2, i64 %p1, i64 %result) + tail call void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(i64 3, i32 20, i8* %resolveCall3, i32 2, i64 %p1, i64 %result) ret i64 %result } @@ -38,7 +38,7 @@ entry: store i64 11, i64* %metadata store i64 12, i64* %metadata store i64 13, i64* %metadata - call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 4, i32 0, i64* %metadata) + call void (i64, i32, ...) @llvm.experimental.stackmap(i64 4, i32 0, i64* %metadata) ret void } @@ -51,14 +51,14 @@ entry: %tmp80 = add i64 %tmp79, -16 %tmp81 = inttoptr i64 %tmp80 to i64* %tmp82 = load i64, i64* %tmp81, align 8 - tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 14, i32 8, i64 %arg, i64 %tmp2, i64 %tmp10, i64 %tmp82) - tail call void (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i64 15, i32 32, i8* null, i32 3, i64 %arg, i64 %tmp10, i64 %tmp82) + tail call void (i64, i32, ...) @llvm.experimental.stackmap(i64 14, i32 8, i64 %arg, i64 %tmp2, i64 %tmp10, i64 %tmp82) + tail call void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(i64 15, i32 32, i8* null, i32 3, i64 %arg, i64 %tmp10, i64 %tmp82) %tmp83 = load i64, i64* %tmp33, align 8 %tmp84 = add i64 %tmp83, -24 %tmp85 = inttoptr i64 %tmp84 to i64* %tmp86 = load i64, i64* %tmp85, align 8 - tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 17, i32 8, i64 %arg, i64 %tmp10, i64 %tmp86) - tail call void (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i64 18, i32 32, i8* null, i32 3, i64 %arg, i64 %tmp10, i64 %tmp86) + tail call void (i64, i32, ...) @llvm.experimental.stackmap(i64 17, i32 8, i64 %arg, i64 %tmp10, i64 %tmp86) + tail call void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(i64 18, i32 32, i8* null, i32 3, i64 %arg, i64 %tmp10, i64 %tmp86) ret i64 10 } @@ -74,7 +74,7 @@ entry: ; CHECK-NEXT: nop ; CHECK-NEXT: ldp ; CHECK-NEXT: ret - %result = tail call i64 (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.i64(i64 5, i32 20, i8* null, i32 2, i64 %p1, i64 %p2) + %result = tail call i64 (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.i64(i64 5, i32 20, i8* null, i32 2, i64 %p1, i64 %p2) ret void } diff --git a/llvm/test/CodeGen/AArch64/arm64-stackmap-nops.ll b/llvm/test/CodeGen/AArch64/arm64-stackmap-nops.ll index 5915b64edf0..2647ac44296 100644 --- a/llvm/test/CodeGen/AArch64/arm64-stackmap-nops.ll +++ b/llvm/test/CodeGen/AArch64/arm64-stackmap-nops.ll @@ -8,7 +8,7 @@ entry: ; CHECK: nop ; CHECK-NEXT: nop ; CHECK-NOT: nop - tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 0, i32 16) + tail call void (i64, i32, ...) @llvm.experimental.stackmap(i64 0, i32 16) ret void } diff --git a/llvm/test/CodeGen/AArch64/arm64-stackmap.ll b/llvm/test/CodeGen/AArch64/arm64-stackmap.ll index 29e44846035..1a4df7a6f2d 100644 --- a/llvm/test/CodeGen/AArch64/arm64-stackmap.ll +++ b/llvm/test/CodeGen/AArch64/arm64-stackmap.ll @@ -78,7 +78,7 @@ target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128" define void @constantargs() { entry: %0 = inttoptr i64 244837814094590 to i8* - tail call void (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i64 1, i32 20, i8* %0, i32 0, i64 65535, i64 65536, i64 4294967295, i64 4294967296) + tail call void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(i64 1, i32 20, i8* %0, i32 0, i64 65535, i64 65536, i64 4294967295, i64 4294967296) ret void } @@ -100,7 +100,7 @@ entry: ; Runtime void->void call. call void inttoptr (i64 244837814094590 to void ()*)() ; Followed by inline OSR patchpoint with 12-byte shadow and 2 live vars. - call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 3, i32 12, i64 %a, i64 %b) + call void (i64, i32, ...) @llvm.experimental.stackmap(i64 3, i32 12, i64 %a, i64 %b) ret void } @@ -126,7 +126,7 @@ entry: cold: ; OSR patchpoint with 12-byte nop-slide and 2 live vars. %thunk = inttoptr i64 244837814094590 to i8* - call void (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i64 4, i32 20, i8* %thunk, i32 0, i64 %a, i64 %b) + call void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(i64 4, i32 20, i8* %thunk, i32 0, i64 %a, i64 %b) unreachable ret: ret void @@ -142,7 +142,7 @@ ret: define i64 @propertyRead(i64* %obj) { entry: %resolveRead = inttoptr i64 244837814094590 to i8* - %result = call i64 (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.i64(i64 5, i32 20, i8* %resolveRead, i32 1, i64* %obj) + %result = call i64 (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.i64(i64 5, i32 20, i8* %resolveRead, i32 1, i64* %obj) %add = add i64 %result, 3 ret i64 %add } @@ -162,7 +162,7 @@ entry: define void @propertyWrite(i64 %dummy1, i64* %obj, i64 %dummy2, i64 %a) { entry: %resolveWrite = inttoptr i64 244837814094590 to i8* - call anyregcc void (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i64 6, i32 20, i8* %resolveWrite, i32 2, i64* %obj, i64 %a) + call anyregcc void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(i64 6, i32 20, i8* %resolveWrite, i32 2, i64* %obj, i64 %a) ret void } @@ -184,7 +184,7 @@ entry: define void @jsVoidCall(i64 %dummy1, i64* %obj, i64 %arg, i64 %l1, i64 %l2) { entry: %resolveCall = inttoptr i64 244837814094590 to i8* - call void (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i64 7, i32 20, i8* %resolveCall, i32 2, i64* %obj, i64 %arg, i64 %l1, i64 %l2) + call void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(i64 7, i32 20, i8* %resolveCall, i32 2, i64* %obj, i64 %arg, i64 %l1, i64 %l2) ret void } @@ -206,7 +206,7 @@ entry: define i64 @jsIntCall(i64 %dummy1, i64* %obj, i64 %arg, i64 %l1, i64 %l2) { entry: %resolveCall = inttoptr i64 244837814094590 to i8* - %result = call i64 (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.i64(i64 8, i32 20, i8* %resolveCall, i32 2, i64* %obj, i64 %arg, i64 %l1, i64 %l2) + %result = call i64 (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.i64(i64 8, i32 20, i8* %resolveCall, i32 2, i64* %obj, i64 %arg, i64 %l1, i64 %l2) %add = add i64 %result, 3 ret i64 %add } @@ -226,7 +226,7 @@ entry: ; CHECK-NEXT: .short 29 define void @spilledValue(i64 %arg0, i64 %arg1, i64 %arg2, i64 %arg3, i64 %arg4, i64 %l0, i64 %l1, i64 %l2, i64 %l3, i64 %l4, i64 %l5, i64 %l6, i64 %l7, i64 %l8, i64 %l9, i64 %l10, i64 %l11, i64 %l12, i64 %l13, i64 %l14, i64 %l15, i64 %l16, i64 %l17, i64 %l18, i64 %l19, i64 %l20, i64 %l21, i64 %l22, i64 %l23, i64 %l24, i64 %l25, i64 %l26, i64 %l27) { entry: - call void (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i64 11, i32 20, i8* null, i32 5, i64 %arg0, i64 %arg1, i64 %arg2, i64 %arg3, i64 %arg4, i64 %l0, i64 %l1, i64 %l2, i64 %l3, i64 %l4, i64 %l5, i64 %l6, i64 %l7, i64 %l8, i64 %l9, i64 %l10, i64 %l11, i64 %l12, i64 %l13, i64 %l14, i64 %l15, i64 %l16, i64 %l17, i64 %l18, i64 %l19, i64 %l20, i64 %l21, i64 %l22, i64 %l23, i64 %l24, i64 %l25, i64 %l26, i64 %l27) + call void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(i64 11, i32 20, i8* null, i32 5, i64 %arg0, i64 %arg1, i64 %arg2, i64 %arg3, i64 %arg4, i64 %l0, i64 %l1, i64 %l2, i64 %l3, i64 %l4, i64 %l5, i64 %l6, i64 %l7, i64 %l8, i64 %l9, i64 %l10, i64 %l11, i64 %l12, i64 %l13, i64 %l14, i64 %l15, i64 %l16, i64 %l17, i64 %l18, i64 %l19, i64 %l20, i64 %l21, i64 %l22, i64 %l23, i64 %l24, i64 %l25, i64 %l26, i64 %l27) ret void } @@ -245,7 +245,7 @@ entry: ; CHECK-NEXT: .short 29 define webkit_jscc void @spilledStackMapValue(i64 %l0, i64 %l1, i64 %l2, i64 %l3, i64 %l4, i64 %l5, i64 %l6, i64 %l7, i64 %l8, i64 %l9, i64 %l10, i64 %l11, i64 %l12, i64 %l13, i64 %l14, i64 %l15, i64 %l16, i64 %l17, i64 %l18, i64 %l19, i64 %l20, i64 %l21, i64 %l22, i64 %l23, i64 %l24, i64 %l25, i64 %l26, i64 %l27, i64 %l28, i64 %l29) { entry: - call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 12, i32 16, i64 %l0, i64 %l1, i64 %l2, i64 %l3, i64 %l4, i64 %l5, i64 %l6, i64 %l7, i64 %l8, i64 %l9, i64 %l10, i64 %l11, i64 %l12, i64 %l13, i64 %l14, i64 %l15, i64 %l16, i64 %l17, i64 %l18, i64 %l19, i64 %l20, i64 %l21, i64 %l22, i64 %l23, i64 %l24, i64 %l25, i64 %l26, i64 %l27, i64 %l28, i64 %l29) + call void (i64, i32, ...) @llvm.experimental.stackmap(i64 12, i32 16, i64 %l0, i64 %l1, i64 %l2, i64 %l3, i64 %l4, i64 %l5, i64 %l6, i64 %l7, i64 %l8, i64 %l9, i64 %l10, i64 %l11, i64 %l12, i64 %l13, i64 %l14, i64 %l15, i64 %l16, i64 %l17, i64 %l18, i64 %l19, i64 %l20, i64 %l21, i64 %l22, i64 %l23, i64 %l24, i64 %l25, i64 %l26, i64 %l27, i64 %l28, i64 %l29) ret void } @@ -263,7 +263,7 @@ entry: ; CHECK-NEXT: .long 33 define void @liveConstant() { - tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 15, i32 8, i32 33) + tail call void (i64, i32, ...) @llvm.experimental.stackmap(i64 15, i32 8, i32 33) ret void } @@ -280,7 +280,7 @@ define void @liveConstant() { ; CHECK-NEXT: .long -{{[0-9]+}} define void @clobberLR(i32 %a) { tail call void asm sideeffect "nop", "~{x0},~{x1},~{x2},~{x3},~{x4},~{x5},~{x6},~{x7},~{x8},~{x9},~{x10},~{x11},~{x12},~{x13},~{x14},~{x15},~{x16},~{x17},~{x18},~{x19},~{x20},~{x21},~{x22},~{x23},~{x24},~{x25},~{x26},~{x27},~{x28},~{x29},~{x31}"() nounwind - tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 16, i32 8, i32 %a) + tail call void (i64, i32, ...) @llvm.experimental.stackmap(i64 16, i32 8, i32 %a) ret void } diff --git a/llvm/test/CodeGen/AArch64/br-to-eh-lpad.ll b/llvm/test/CodeGen/AArch64/br-to-eh-lpad.ll index e948b87a63d..f304ba4ca28 100644 --- a/llvm/test/CodeGen/AArch64/br-to-eh-lpad.ll +++ b/llvm/test/CodeGen/AArch64/br-to-eh-lpad.ll @@ -30,12 +30,12 @@ invoke.cont7: unreachable if.end50.thread: - tail call void (i8*, ...)* @printf(i8* getelementptr inbounds ([17 x i8], [17 x i8]* @.str1, i64 0, i64 0), i32 125) - tail call void (i8*, ...)* @printf(i8* getelementptr inbounds ([17 x i8], [17 x i8]* @.str1, i64 0, i64 0), i32 128) + tail call void (i8*, ...) @printf(i8* getelementptr inbounds ([17 x i8], [17 x i8]* @.str1, i64 0, i64 0), i32 125) + tail call void (i8*, ...) @printf(i8* getelementptr inbounds ([17 x i8], [17 x i8]* @.str1, i64 0, i64 0), i32 128) unreachable invoke.cont33: - tail call void (i8*, ...)* @printf(i8* getelementptr inbounds ([17 x i8], [17 x i8]* @.str1, i64 0, i64 0), i32 119) + tail call void (i8*, ...) @printf(i8* getelementptr inbounds ([17 x i8], [17 x i8]* @.str1, i64 0, i64 0), i32 119) unreachable invoke.cont41: @@ -51,7 +51,7 @@ lpad40: br label %finally.catchall finally.catchall: - tail call void (i8*, ...)* @printf(i8* getelementptr inbounds ([17 x i8], [17 x i8]* @.str1, i64 0, i64 0), i32 125) + tail call void (i8*, ...) @printf(i8* getelementptr inbounds ([17 x i8], [17 x i8]* @.str1, i64 0, i64 0), i32 125) unreachable } diff --git a/llvm/test/CodeGen/AArch64/dag-combine-invaraints.ll b/llvm/test/CodeGen/AArch64/dag-combine-invaraints.ll index 36141331633..ac2d057ff3c 100644 --- a/llvm/test/CodeGen/AArch64/dag-combine-invaraints.ll +++ b/llvm/test/CodeGen/AArch64/dag-combine-invaraints.ll @@ -20,7 +20,7 @@ main_: %DHSelect = select i1 %tmp8, i32 %tmp9, i32 %tmp10 store i32 %DHSelect, i32* %i32X, align 4 %tmp15 = load i32, i32* %i32X, align 4 - %tmp17 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str2, i32 0, i32 0), i32 %tmp15) + %tmp17 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str2, i32 0, i32 0), i32 %tmp15) ret i32 0 ; CHECK: main: diff --git a/llvm/test/CodeGen/AArch64/stackmap-liveness.ll b/llvm/test/CodeGen/AArch64/stackmap-liveness.ll index 91dcbb20cd8..6b37aac16f9 100644 --- a/llvm/test/CodeGen/AArch64/stackmap-liveness.ll +++ b/llvm/test/CodeGen/AArch64/stackmap-liveness.ll @@ -39,7 +39,7 @@ define i64 @stackmap_liveness(i1 %c) { ; Align ; CHECK-NEXT: .align 3 %1 = select i1 %c, i64 1, i64 2 - call anyregcc void (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i64 1, i32 32, i8* null, i32 0) + call anyregcc void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(i64 1, i32 32, i8* null, i32 0) ret i64 %1 } |