From cf12c7815fed11f789076bd0cd647fe1dfd78b45 Mon Sep 17 00:00:00 2001 From: Amara Emerson Date: Fri, 19 Jul 2019 00:24:45 +0000 Subject: [GlobalISel] Translate calls to memcpy et al to G_INTRINSIC_W_SIDE_EFFECTs and legalize later. I plan on adding memcpy optimizations in the GlobalISel pipeline, but we can't do that unless we delay lowering to actual function calls. This patch changes the translator to generate G_INTRINSIC_W_SIDE_EFFECTS for these functions, and then have each target specify that using the new custom legalizer for intrinsics hook that they want it expanded it a libcall. Differential Revision: https://reviews.llvm.org/D64895 llvm-svn: 366516 --- .../AArch64/GlobalISel/arm64-irtranslator.ll | 27 +++---- .../AArch64/GlobalISel/legalize-memcpy-et-al.mir | 91 ++++++++++++++++++++++ .../CodeGen/Mips/GlobalISel/irtranslator/call.ll | 14 +--- .../x86_64-irtranslator-struct-return.ll | 63 +++------------ 4 files changed, 116 insertions(+), 79 deletions(-) create mode 100644 llvm/test/CodeGen/AArch64/GlobalISel/legalize-memcpy-et-al.mir (limited to 'llvm/test') diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll index 6891adc5493..d031380b38e 100644 --- a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll +++ b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll @@ -1130,24 +1130,29 @@ define void @test_memcpy(i8* %dst, i8* %src, i64 %size) { ; CHECK: [[DST:%[0-9]+]]:_(p0) = COPY $x0 ; CHECK: [[SRC:%[0-9]+]]:_(p0) = COPY $x1 ; CHECK: [[SIZE:%[0-9]+]]:_(s64) = COPY $x2 -; CHECK: $x0 = COPY [[DST]] -; CHECK: $x1 = COPY [[SRC]] -; CHECK: $x2 = COPY [[SIZE]] -; CHECK: BL &memcpy, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $x0, implicit $x1, implicit $x2 +; CHECK: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memcpy), [[DST]](p0), [[SRC]](p0), [[SIZE]](s64) :: (store 1 into %ir.dst), (load 1 from %ir.src) call void @llvm.memcpy.p0i8.p0i8.i64(i8* %dst, i8* %src, i64 %size, i1 0) ret void } +declare void @llvm.memcpy.p1i8.p1i8.i64(i8 addrspace(1)*, i8 addrspace(1)*, i64, i1) +define void @test_memcpy_nonzero_as(i8 addrspace(1)* %dst, i8 addrspace(1) * %src, i64 %size) { +; CHECK-LABEL: name: test_memcpy_nonzero_as +; CHECK: [[DST:%[0-9]+]]:_(p1) = COPY $x0 +; CHECK: [[SRC:%[0-9]+]]:_(p1) = COPY $x1 +; CHECK: [[SIZE:%[0-9]+]]:_(s64) = COPY $x2 +; CHECK: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memcpy), [[DST]](p1), [[SRC]](p1), [[SIZE]](s64) :: (store 1 into %ir.dst, addrspace 1), (load 1 from %ir.src, addrspace 1) + call void @llvm.memcpy.p1i8.p1i8.i64(i8 addrspace(1)* %dst, i8 addrspace(1)* %src, i64 %size, i1 0) + ret void +} + declare void @llvm.memmove.p0i8.p0i8.i64(i8*, i8*, i64, i1) define void @test_memmove(i8* %dst, i8* %src, i64 %size) { ; CHECK-LABEL: name: test_memmove ; CHECK: [[DST:%[0-9]+]]:_(p0) = COPY $x0 ; CHECK: [[SRC:%[0-9]+]]:_(p0) = COPY $x1 ; CHECK: [[SIZE:%[0-9]+]]:_(s64) = COPY $x2 -; CHECK: $x0 = COPY [[DST]] -; CHECK: $x1 = COPY [[SRC]] -; CHECK: $x2 = COPY [[SIZE]] -; CHECK: BL &memmove, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $x0, implicit $x1, implicit $x2 +; CHECK: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memmove), [[DST]](p0), [[SRC]](p0), [[SIZE]](s64) :: (store 1 into %ir.dst), (load 1 from %ir.src) call void @llvm.memmove.p0i8.p0i8.i64(i8* %dst, i8* %src, i64 %size, i1 0) ret void } @@ -1159,11 +1164,7 @@ define void @test_memset(i8* %dst, i8 %val, i64 %size) { ; CHECK: [[SRC_C:%[0-9]+]]:_(s32) = COPY $w1 ; CHECK: [[SRC:%[0-9]+]]:_(s8) = G_TRUNC [[SRC_C]] ; CHECK: [[SIZE:%[0-9]+]]:_(s64) = COPY $x2 -; CHECK: $x0 = COPY [[DST]] -; CHECK: [[SRC_TMP:%[0-9]+]]:_(s32) = G_ANYEXT [[SRC]] -; CHECK: $w1 = COPY [[SRC_TMP]] -; CHECK: $x2 = COPY [[SIZE]] -; CHECK: BL &memset, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $x0, implicit $w1, implicit $x2 +; CHECK: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memset), [[DST]](p0), [[SRC]](s8), [[SIZE]](s64) :: (store 1 into %ir.dst) call void @llvm.memset.p0i8.i64(i8* %dst, i8 %val, i64 %size, i1 0) ret void } diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-memcpy-et-al.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-memcpy-et-al.mir new file mode 100644 index 00000000000..46836a3f291 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-memcpy-et-al.mir @@ -0,0 +1,91 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +# RUN: llc -march=aarch64 -run-pass=legalizer -verify-machineinstrs %s -o - | FileCheck %s +--- +name: test_memcpy +tracksRegLiveness: true +body: | + bb.1: + liveins: $w2, $x0, $x1 + + ; CHECK-LABEL: name: test_memcpy + ; CHECK: liveins: $w2, $x0, $x1 + ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0 + ; CHECK: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1 + ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $w2 + ; CHECK: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[COPY2]](s32) + ; CHECK: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp + ; CHECK: $x0 = COPY [[COPY]](p0) + ; CHECK: $x1 = COPY [[COPY1]](p0) + ; CHECK: $x2 = COPY [[ZEXT]](s64) + ; CHECK: BL &memcpy, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $x0, implicit $x1, implicit $x2 + ; CHECK: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp + ; CHECK: RET_ReallyLR + %0:_(p0) = COPY $x0 + %1:_(p0) = COPY $x1 + %2:_(s32) = COPY $w2 + %4:_(s1) = G_CONSTANT i1 false + %3:_(s64) = G_ZEXT %2(s32) + G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memcpy), %0(p0), %1(p0), %3(s64), %4(s1) + RET_ReallyLR + +... +--- +name: test_memmove +tracksRegLiveness: true +body: | + bb.1: + liveins: $w2, $x0, $x1 + + ; CHECK-LABEL: name: test_memmove + ; CHECK: liveins: $w2, $x0, $x1 + ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0 + ; CHECK: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1 + ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $w2 + ; CHECK: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[COPY2]](s32) + ; CHECK: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp + ; CHECK: $x0 = COPY [[COPY]](p0) + ; CHECK: $x1 = COPY [[COPY1]](p0) + ; CHECK: $x2 = COPY [[ZEXT]](s64) + ; CHECK: BL &memmove, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $x0, implicit $x1, implicit $x2 + ; CHECK: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp + ; CHECK: RET_ReallyLR + %0:_(p0) = COPY $x0 + %1:_(p0) = COPY $x1 + %2:_(s32) = COPY $w2 + %4:_(s1) = G_CONSTANT i1 false + %3:_(s64) = G_ZEXT %2(s32) + G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memmove), %0(p0), %1(p0), %3(s64), %4(s1) + RET_ReallyLR + +... +--- +name: test_memset +tracksRegLiveness: true +body: | + bb.1: + liveins: $w1, $w2, $x0 + + ; CHECK-LABEL: name: test_memset + ; CHECK: liveins: $w1, $w2, $x0 + ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0 + ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1 + ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $w2 + ; CHECK: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[COPY2]](s32) + ; CHECK: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp + ; CHECK: $x0 = COPY [[COPY]](p0) + ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32) + ; CHECK: $w1 = COPY [[COPY3]](s32) + ; CHECK: $x2 = COPY [[ZEXT]](s64) + ; CHECK: BL &memset, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $x0, implicit $w1, implicit $x2 + ; CHECK: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp + ; CHECK: RET_ReallyLR + %0:_(p0) = COPY $x0 + %1:_(s32) = COPY $w1 + %2:_(s32) = COPY $w2 + %5:_(s1) = G_CONSTANT i1 false + %3:_(s8) = G_TRUNC %1(s32) + %4:_(s64) = G_ZEXT %2(s32) + G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memset), %0(p0), %3(s8), %4(s64), %5(s1) + RET_ReallyLR + +... diff --git a/llvm/test/CodeGen/Mips/GlobalISel/irtranslator/call.ll b/llvm/test/CodeGen/Mips/GlobalISel/irtranslator/call.ll index c76750208fc..b8475835478 100644 --- a/llvm/test/CodeGen/Mips/GlobalISel/irtranslator/call.ll +++ b/llvm/test/CodeGen/Mips/GlobalISel/irtranslator/call.ll @@ -153,12 +153,7 @@ define void @call_symbol(i8* nocapture readonly %src, i8* nocapture %dest, i32 s ; MIPS32: [[COPY:%[0-9]+]]:_(p0) = COPY $a0 ; MIPS32: [[COPY1:%[0-9]+]]:_(p0) = COPY $a1 ; MIPS32: [[COPY2:%[0-9]+]]:_(s32) = COPY $a2 - ; MIPS32: ADJCALLSTACKDOWN 16, 0, implicit-def $sp, implicit $sp - ; MIPS32: $a0 = COPY [[COPY1]](p0) - ; MIPS32: $a1 = COPY [[COPY]](p0) - ; MIPS32: $a2 = COPY [[COPY2]](s32) - ; MIPS32: JAL &memcpy, csr_o32, implicit-def $ra, implicit-def $sp, implicit $a0, implicit $a1, implicit $a2 - ; MIPS32: ADJCALLSTACKUP 16, 0, implicit-def $sp, implicit $sp + ; MIPS32: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memcpy), [[COPY1]](p0), [[COPY]](p0), [[COPY2]](s32) :: (store 1 into %ir.dest), (load 1 from %ir.src) ; MIPS32: RetRA ; MIPS32_PIC-LABEL: name: call_symbol ; MIPS32_PIC: bb.1.entry: @@ -166,12 +161,7 @@ define void @call_symbol(i8* nocapture readonly %src, i8* nocapture %dest, i32 s ; MIPS32_PIC: [[COPY:%[0-9]+]]:_(p0) = COPY $a0 ; MIPS32_PIC: [[COPY1:%[0-9]+]]:_(p0) = COPY $a1 ; MIPS32_PIC: [[COPY2:%[0-9]+]]:_(s32) = COPY $a2 - ; MIPS32_PIC: ADJCALLSTACKDOWN 16, 0, implicit-def $sp, implicit $sp - ; MIPS32_PIC: $a0 = COPY [[COPY1]](p0) - ; MIPS32_PIC: $a1 = COPY [[COPY]](p0) - ; MIPS32_PIC: $a2 = COPY [[COPY2]](s32) - ; MIPS32_PIC: JAL &memcpy, csr_o32, implicit-def $ra, implicit-def $sp, implicit $a0, implicit $a1, implicit $a2 - ; MIPS32_PIC: ADJCALLSTACKUP 16, 0, implicit-def $sp, implicit $sp + ; MIPS32_PIC: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memcpy), [[COPY1]](p0), [[COPY]](p0), [[COPY2]](s32) :: (store 1 into %ir.dest), (load 1 from %ir.src) ; MIPS32_PIC: RetRA entry: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 %dest, i8* align 1 %src, i32 %length, i1 false) diff --git a/llvm/test/CodeGen/X86/GlobalISel/x86_64-irtranslator-struct-return.ll b/llvm/test/CodeGen/X86/GlobalISel/x86_64-irtranslator-struct-return.ll index 8b96798823c..4d70bbec05c 100644 --- a/llvm/test/CodeGen/X86/GlobalISel/x86_64-irtranslator-struct-return.ll +++ b/llvm/test/CodeGen/X86/GlobalISel/x86_64-irtranslator-struct-return.ll @@ -19,12 +19,7 @@ define float @test_return_f1(float %f.coerce) { ; ALL: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.retval ; ALL: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.1.f ; ALL: G_STORE [[TRUNC]](s32), [[FRAME_INDEX1]](p0) :: (store 4 into %ir.coerce.dive2) - ; ALL: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp - ; ALL: $rdi = COPY [[FRAME_INDEX]](p0) - ; ALL: $rsi = COPY [[FRAME_INDEX1]](p0) - ; ALL: $rdx = COPY [[C]](s64) - ; ALL: CALL64pcrel32 &memcpy, csr_64, implicit $rsp, implicit $ssp, implicit $rdi, implicit $rsi, implicit $rdx - ; ALL: ADJCALLSTACKUP64 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp + ; ALL: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memcpy), [[FRAME_INDEX]](p0), [[FRAME_INDEX1]](p0), [[C]](s64) :: (store 1 into %ir.0, align 4), (load 1 from %ir.1, align 4) ; ALL: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (load 4 from %ir.coerce.dive13) ; ALL: [[ANYEXT:%[0-9]+]]:_(s128) = G_ANYEXT [[LOAD]](s32) ; ALL: $xmm0 = COPY [[ANYEXT]](s128) @@ -54,12 +49,7 @@ define double @test_return_d1(double %d.coerce) { ; ALL: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.retval ; ALL: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.1.d ; ALL: G_STORE [[TRUNC]](s64), [[FRAME_INDEX1]](p0) :: (store 8 into %ir.coerce.dive2) - ; ALL: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp - ; ALL: $rdi = COPY [[FRAME_INDEX]](p0) - ; ALL: $rsi = COPY [[FRAME_INDEX1]](p0) - ; ALL: $rdx = COPY [[C]](s64) - ; ALL: CALL64pcrel32 &memcpy, csr_64, implicit $rsp, implicit $ssp, implicit $rdi, implicit $rsi, implicit $rdx - ; ALL: ADJCALLSTACKUP64 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp + ; ALL: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memcpy), [[FRAME_INDEX]](p0), [[FRAME_INDEX1]](p0), [[C]](s64) :: (store 1 into %ir.0, align 8), (load 1 from %ir.1, align 8) ; ALL: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX]](p0) :: (load 8 from %ir.coerce.dive13) ; ALL: [[ANYEXT:%[0-9]+]]:_(s128) = G_ANYEXT [[LOAD]](s64) ; ALL: $xmm0 = COPY [[ANYEXT]](s128) @@ -92,12 +82,7 @@ define { double, double } @test_return_d2(double %d.coerce0, double %d.coerce1) ; ALL: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; ALL: [[GEP:%[0-9]+]]:_(p0) = G_GEP [[FRAME_INDEX1]], [[C1]](s64) ; ALL: G_STORE [[TRUNC1]](s64), [[GEP]](p0) :: (store 8 into %ir.2) - ; ALL: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp - ; ALL: $rdi = COPY [[FRAME_INDEX]](p0) - ; ALL: $rsi = COPY [[FRAME_INDEX1]](p0) - ; ALL: $rdx = COPY [[C]](s64) - ; ALL: CALL64pcrel32 &memcpy, csr_64, implicit $rsp, implicit $ssp, implicit $rdi, implicit $rsi, implicit $rdx - ; ALL: ADJCALLSTACKUP64 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp + ; ALL: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memcpy), [[FRAME_INDEX]](p0), [[FRAME_INDEX1]](p0), [[C]](s64) :: (store 1 into %ir.3, align 8), (load 1 from %ir.4, align 8) ; ALL: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX]](p0) :: (load 8 from %ir.5) ; ALL: [[GEP1:%[0-9]+]]:_(p0) = G_GEP [[FRAME_INDEX]], [[C1]](s64) ; ALL: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[GEP1]](p0) :: (load 8 from %ir.5 + 8) @@ -131,12 +116,7 @@ define i32 @test_return_i1(i32 %i.coerce) { ; ALL: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.retval ; ALL: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.1.i ; ALL: G_STORE [[COPY]](s32), [[FRAME_INDEX1]](p0) :: (store 4 into %ir.coerce.dive2) - ; ALL: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp - ; ALL: $rdi = COPY [[FRAME_INDEX]](p0) - ; ALL: $rsi = COPY [[FRAME_INDEX1]](p0) - ; ALL: $rdx = COPY [[C]](s64) - ; ALL: CALL64pcrel32 &memcpy, csr_64, implicit $rsp, implicit $ssp, implicit $rdi, implicit $rsi, implicit $rdx - ; ALL: ADJCALLSTACKUP64 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp + ; ALL: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memcpy), [[FRAME_INDEX]](p0), [[FRAME_INDEX1]](p0), [[C]](s64) :: (store 1 into %ir.0, align 4), (load 1 from %ir.1, align 4) ; ALL: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (load 4 from %ir.coerce.dive13) ; ALL: $eax = COPY [[LOAD]](s32) ; ALL: RET 0, implicit $eax @@ -162,12 +142,7 @@ define i64 @test_return_i2(i64 %i.coerce) { ; ALL: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.retval ; ALL: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.1.i ; ALL: G_STORE [[COPY]](s64), [[FRAME_INDEX1]](p0) :: (store 8 into %ir.0, align 4) - ; ALL: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp - ; ALL: $rdi = COPY [[FRAME_INDEX]](p0) - ; ALL: $rsi = COPY [[FRAME_INDEX1]](p0) - ; ALL: $rdx = COPY [[C]](s64) - ; ALL: CALL64pcrel32 &memcpy, csr_64, implicit $rsp, implicit $ssp, implicit $rdi, implicit $rsi, implicit $rdx - ; ALL: ADJCALLSTACKUP64 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp + ; ALL: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memcpy), [[FRAME_INDEX]](p0), [[FRAME_INDEX1]](p0), [[C]](s64) :: (store 1 into %ir.1, align 4), (load 1 from %ir.2, align 4) ; ALL: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX]](p0) :: (load 8 from %ir.3, align 4) ; ALL: $rax = COPY [[LOAD]](s64) ; ALL: RET 0, implicit $rax @@ -199,24 +174,9 @@ define { i64, i32 } @test_return_i3(i64 %i.coerce0, i32 %i.coerce1) { ; ALL: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; ALL: [[GEP:%[0-9]+]]:_(p0) = G_GEP [[FRAME_INDEX2]], [[C1]](s64) ; ALL: G_STORE [[COPY1]](s32), [[GEP]](p0) :: (store 4 into %ir.1) - ; ALL: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp - ; ALL: $rdi = COPY [[FRAME_INDEX1]](p0) - ; ALL: $rsi = COPY [[FRAME_INDEX2]](p0) - ; ALL: $rdx = COPY [[C]](s64) - ; ALL: CALL64pcrel32 &memcpy, csr_64, implicit $rsp, implicit $ssp, implicit $rdi, implicit $rsi, implicit $rdx - ; ALL: ADJCALLSTACKUP64 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp - ; ALL: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp - ; ALL: $rdi = COPY [[FRAME_INDEX]](p0) - ; ALL: $rsi = COPY [[FRAME_INDEX1]](p0) - ; ALL: $rdx = COPY [[C]](s64) - ; ALL: CALL64pcrel32 &memcpy, csr_64, implicit $rsp, implicit $ssp, implicit $rdi, implicit $rsi, implicit $rdx - ; ALL: ADJCALLSTACKUP64 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp - ; ALL: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp - ; ALL: $rdi = COPY [[FRAME_INDEX3]](p0) - ; ALL: $rsi = COPY [[FRAME_INDEX]](p0) - ; ALL: $rdx = COPY [[C]](s64) - ; ALL: CALL64pcrel32 &memcpy, csr_64, implicit $rsp, implicit $ssp, implicit $rdi, implicit $rsi, implicit $rdx - ; ALL: ADJCALLSTACKUP64 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp + ; ALL: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memcpy), [[FRAME_INDEX1]](p0), [[FRAME_INDEX2]](p0), [[C]](s64) :: (store 1 into %ir.2, align 4), (load 1 from %ir.3, align 4) + ; ALL: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memcpy), [[FRAME_INDEX]](p0), [[FRAME_INDEX1]](p0), [[C]](s64) :: (store 1 into %ir.4, align 4), (load 1 from %ir.5, align 4) + ; ALL: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memcpy), [[FRAME_INDEX3]](p0), [[FRAME_INDEX]](p0), [[C]](s64) :: (store 1 into %ir.6, align 8), (load 1 from %ir.7, align 4) ; ALL: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX3]](p0) :: (load 8 from %ir.tmp) ; ALL: [[GEP1:%[0-9]+]]:_(p0) = G_GEP [[FRAME_INDEX3]], [[C1]](s64) ; ALL: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[GEP1]](p0) :: (load 4 from %ir.tmp + 8, align 8) @@ -258,12 +218,7 @@ define { i64, i64 } @test_return_i4(i64 %i.coerce0, i64 %i.coerce1) { ; ALL: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; ALL: [[GEP:%[0-9]+]]:_(p0) = G_GEP [[FRAME_INDEX1]], [[C1]](s64) ; ALL: G_STORE [[COPY1]](s64), [[GEP]](p0) :: (store 8 into %ir.2, align 4) - ; ALL: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp - ; ALL: $rdi = COPY [[FRAME_INDEX]](p0) - ; ALL: $rsi = COPY [[FRAME_INDEX1]](p0) - ; ALL: $rdx = COPY [[C]](s64) - ; ALL: CALL64pcrel32 &memcpy, csr_64, implicit $rsp, implicit $ssp, implicit $rdi, implicit $rsi, implicit $rdx - ; ALL: ADJCALLSTACKUP64 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp + ; ALL: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memcpy), [[FRAME_INDEX]](p0), [[FRAME_INDEX1]](p0), [[C]](s64) :: (store 1 into %ir.3, align 4), (load 1 from %ir.4, align 4) ; ALL: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX]](p0) :: (load 8 from %ir.5, align 4) ; ALL: [[GEP1:%[0-9]+]]:_(p0) = G_GEP [[FRAME_INDEX]], [[C1]](s64) ; ALL: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[GEP1]](p0) :: (load 8 from %ir.5 + 8, align 4) -- cgit v1.2.3