summaryrefslogtreecommitdiffstats
path: root/llvm/test
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test')
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/call-translator.ll14
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-exceptions.ll11
-rw-r--r--llvm/test/CodeGen/ARM/GlobalISel/arm-param-lowering.ll49
-rw-r--r--llvm/test/CodeGen/X86/GlobalISel/x86_64-irtranslator-struct-return.ll290
4 files changed, 309 insertions, 55 deletions
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/call-translator.ll b/llvm/test/CodeGen/AArch64/GlobalISel/call-translator.ll
index 0902e29e4a1..e3995b2cbad 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/call-translator.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/call-translator.ll
@@ -97,18 +97,10 @@ define void @test_struct_formal({double, i64, i8} %in, {double, i64, i8}* %addr)
; CHECK: [[CST2:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
; CHECK: [[GEP2:%[0-9]+]]:_(p0) = G_GEP [[ADDR]], [[CST2]](s64)
; CHECK: [[LD3:%[0-9]+]]:_(s32) = G_LOAD [[GEP2]](p0) :: (load 4 from %ir.addr + 16, align 8)
-; CHECK: [[IMPDEF:%[0-9]+]]:_(s192) = G_IMPLICIT_DEF
-; CHECK: [[INS1:%[0-9]+]]:_(s192) = G_INSERT [[IMPDEF]], [[LD1]](s64), 0
-; CHECK: [[INS2:%[0-9]+]]:_(s192) = G_INSERT [[INS1]], [[LD2]](s64), 64
-; CHECK: [[VAL:%[0-9]+]]:_(s192) = G_INSERT [[INS2]], [[LD3]](s32), 128
-; CHECK: [[DBL:%[0-9]+]]:_(s64) = G_EXTRACT [[VAL]](s192), 0
-; CHECK: [[I64:%[0-9]+]]:_(s64) = G_EXTRACT [[VAL]](s192), 64
-; CHECK: [[I32:%[0-9]+]]:_(s32) = G_EXTRACT [[VAL]](s192), 128
-
-; CHECK: $d0 = COPY [[DBL]](s64)
-; CHECK: $x0 = COPY [[I64]](s64)
-; CHECK: $w1 = COPY [[I32]](s32)
+; CHECK: $d0 = COPY [[LD1]](s64)
+; CHECK: $x0 = COPY [[LD2]](s64)
+; CHECK: $w1 = COPY [[LD3]](s32)
; CHECK: RET_ReallyLR implicit $d0, implicit $x0, implicit $w1
define {double, i64, i32} @test_struct_return({double, i64, i32}* %addr) {
%val = load {double, i64, i32}, {double, i64, i32}* %addr
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-exceptions.ll b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-exceptions.ll
index 6b8827f095d..5edf9d96ce3 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-exceptions.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-exceptions.ll
@@ -19,20 +19,15 @@ declare i32 @llvm.eh.typeid.for(i8*)
; CHECK: [[BAD]].{{[a-z]+}} (landing-pad):
; CHECK: EH_LABEL
-; CHECK: [[PTR:%[0-9]+]]:_(p0) = COPY $x0
+; CHECK: [[PTR_RET:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[SEL_PTR:%[0-9]+]]:_(p0) = COPY $x1
-; CHECK: [[SEL:%[0-9]+]]:_(s32) = G_PTRTOINT [[SEL_PTR]]
-; CHECK: [[UNDEF:%[0-9]+]]:_(s128) = G_IMPLICIT_DEF
-; CHECK: [[VAL_WITH_PTR:%[0-9]+]]:_(s128) = G_INSERT [[UNDEF]], [[PTR]](p0), 0
-; CHECK: [[PTR_SEL:%[0-9]+]]:_(s128) = G_INSERT [[VAL_WITH_PTR]], [[SEL]](s32), 64
-; CHECK: [[PTR_RET:%[0-9]+]]:_(s64) = G_EXTRACT [[PTR_SEL]](s128), 0
-; CHECK: [[SEL_RET:%[0-9]+]]:_(s32) = G_EXTRACT [[PTR_SEL]](s128), 64
+; CHECK: [[SEL_RET:%[0-9]+]]:_(s32) = G_PTRTOINT [[SEL_PTR]]
; CHECK: $x0 = COPY [[PTR_RET]]
; CHECK: $w1 = COPY [[SEL_RET]]
; CHECK: [[GOOD]].{{[a-z]+}}:
; CHECK: [[SEL:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-; CHECK: {{%[0-9]+}}:_(s128) = G_INSERT {{%[0-9]+}}, [[SEL]](s32), 64
+; CHECK: $w1 = COPY [[SEL]]
define { i8*, i32 } @bar() personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
%res32 = invoke i32 @foo(i32 42) to label %continue unwind label %broken
diff --git a/llvm/test/CodeGen/ARM/GlobalISel/arm-param-lowering.ll b/llvm/test/CodeGen/ARM/GlobalISel/arm-param-lowering.ll
index ff307af1e17..14ddeda39f0 100644
--- a/llvm/test/CodeGen/ARM/GlobalISel/arm-param-lowering.ll
+++ b/llvm/test/CodeGen/ARM/GlobalISel/arm-param-lowering.ll
@@ -209,17 +209,12 @@ define arm_aapcscc [3 x i32] @test_tiny_int_arrays([2 x i32] %arr) {
; CHECK: [[EXT3:%[0-9]+]]:_(s32) = G_EXTRACT [[RES_ARR]](s96), 0
; CHECK: [[EXT4:%[0-9]+]]:_(s32) = G_EXTRACT [[RES_ARR]](s96), 32
; CHECK: [[EXT5:%[0-9]+]]:_(s32) = G_EXTRACT [[RES_ARR]](s96), 64
-; CHECK: [[IMPDEF2:%[0-9]+]]:_(s96) = G_IMPLICIT_DEF
-; CHECK: [[INS3:%[0-9]+]]:_(s96) = G_INSERT [[IMPDEF2]], [[EXT3]](s32), 0
-; CHECK: [[INS4:%[0-9]+]]:_(s96) = G_INSERT [[INS3]], [[EXT4]](s32), 32
-; CHECK: [[INS5:%[0-9]+]]:_(s96) = G_INSERT [[INS4]], [[EXT5]](s32), 64
-; CHECK: [[R0:%[0-9]+]]:_(s32), [[R1:%[0-9]+]]:_(s32), [[R2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INS5]](s96)
; FIXME: This doesn't seem correct with regard to the AAPCS docs (which say
; that composite types larger than 4 bytes should be passed through memory),
; but it's what DAGISel does. We should fix it in the common code for both.
-; CHECK: $r0 = COPY [[R0]]
-; CHECK: $r1 = COPY [[R1]]
-; CHECK: $r2 = COPY [[R2]]
+; CHECK: $r0 = COPY [[EXT3]]
+; CHECK: $r1 = COPY [[EXT4]]
+; CHECK: $r2 = COPY [[EXT5]]
; CHECK: BX_RET 14, $noreg, implicit $r0, implicit $r1, implicit $r2
entry:
%r = notail call arm_aapcscc [3 x i32] @tiny_int_arrays_target([2 x i32] %arr)
@@ -354,12 +349,8 @@ define arm_aapcscc [2 x float] @test_fp_arrays_aapcs([3 x double] %arr) {
; CHECK: ADJCALLSTACKUP 8, 0, 14, $noreg, implicit-def $sp, implicit $sp
; CHECK: [[EXT4:%[0-9]+]]:_(s32) = G_EXTRACT [[R_MERGED]](s64), 0
; CHECK: [[EXT5:%[0-9]+]]:_(s32) = G_EXTRACT [[R_MERGED]](s64), 32
-; CHECK: [[IMPDEF2:%[0-9]+]]:_(s64) = G_IMPLICIT_DEF
-; CHECK: [[INS4:%[0-9]+]]:_(s64) = G_INSERT [[IMPDEF2]], [[EXT4]](s32), 0
-; CHECK: [[INS5:%[0-9]+]]:_(s64) = G_INSERT [[INS4]], [[EXT5]](s32), 32
-; CHECK: [[R0:%[0-9]+]]:_(s32), [[R1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INS5]](s64)
-; CHECK: $r0 = COPY [[R0]]
-; CHECK: $r1 = COPY [[R1]]
+; CHECK: $r0 = COPY [[EXT4]]
+; CHECK: $r1 = COPY [[EXT5]]
; CHECK: BX_RET 14, $noreg, implicit $r0, implicit $r1
entry:
%r = notail call arm_aapcscc [2 x float] @fp_arrays_aapcs_target([3 x double] %arr)
@@ -453,16 +444,10 @@ define arm_aapcs_vfpcc [4 x float] @test_fp_arrays_aapcs_vfp([3 x double] %x, [3
; CHECK: [[EXT12:%[0-9]+]]:_(s32) = G_EXTRACT [[R_MERGED]](s128), 32
; CHECK: [[EXT13:%[0-9]+]]:_(s32) = G_EXTRACT [[R_MERGED]](s128), 64
; CHECK: [[EXT14:%[0-9]+]]:_(s32) = G_EXTRACT [[R_MERGED]](s128), 96
-; CHECK: [[IMPDEF4:%[0-9]+]]:_(s128) = G_IMPLICIT_DEF
-; CHECK: [[INS11:%[0-9]+]]:_(s128) = G_INSERT [[IMPDEF4]], [[EXT11]](s32), 0
-; CHECK: [[INS12:%[0-9]+]]:_(s128) = G_INSERT [[INS11]], [[EXT12]](s32), 32
-; CHECK: [[INS13:%[0-9]+]]:_(s128) = G_INSERT [[INS12]], [[EXT13]](s32), 64
-; CHECK: [[INS14:%[0-9]+]]:_(s128) = G_INSERT [[INS13]], [[EXT14]](s32), 96
-; CHECK: [[R0:%[0-9]+]]:_(s32), [[R1:%[0-9]+]]:_(s32), [[R2:%[0-9]+]]:_(s32), [[R3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INS14]](s128)
-; CHECK: $s0 = COPY [[R0]]
-; CHECK: $s1 = COPY [[R1]]
-; CHECK: $s2 = COPY [[R2]]
-; CHECK: $s3 = COPY [[R3]]
+; CHECK: $s0 = COPY [[EXT11]]
+; CHECK: $s1 = COPY [[EXT12]]
+; CHECK: $s2 = COPY [[EXT13]]
+; CHECK: $s3 = COPY [[EXT14]]
; CHECK: BX_RET 14, $noreg, implicit $s0, implicit $s1, implicit $s2, implicit $s3
entry:
%r = notail call arm_aapcs_vfpcc [4 x float] @fp_arrays_aapcs_vfp_target([3 x double] %x, [3 x float] %y, [4 x double] %z)
@@ -512,12 +497,8 @@ define arm_aapcscc [2 x i32*] @test_tough_arrays([6 x [4 x i32]] %arr) {
; CHECK: ADJCALLSTACKUP 80, 0, 14, $noreg, implicit-def $sp, implicit $sp
; CHECK: [[EXT1:%[0-9]+]]:_(p0) = G_EXTRACT [[RES_ARR]](s64), 0
; CHECK: [[EXT2:%[0-9]+]]:_(p0) = G_EXTRACT [[RES_ARR]](s64), 32
-; CHECK: [[IMPDEF:%[0-9]+]]:_(s64) = G_IMPLICIT_DEF
-; CHECK: [[INS2:%[0-9]+]]:_(s64) = G_INSERT [[IMPDEF]], [[EXT1]](p0), 0
-; CHECK: [[INS3:%[0-9]+]]:_(s64) = G_INSERT [[INS2]], [[EXT2]](p0), 32
-; CHECK: [[R0:%[0-9]+]]:_(s32), [[R1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INS3]](s64)
-; CHECK: $r0 = COPY [[R0]]
-; CHECK: $r1 = COPY [[R1]]
+; CHECK: $r0 = COPY [[EXT1]]
+; CHECK: $r1 = COPY [[EXT2]]
; CHECK: BX_RET 14, $noreg, implicit $r0, implicit $r1
entry:
%r = notail call arm_aapcscc [2 x i32*] @tough_arrays_target([6 x [4 x i32]] %arr)
@@ -548,12 +529,8 @@ define arm_aapcscc {i32, i32} @test_structs({i32, i32} %x) {
; CHECK: ADJCALLSTACKUP 0, 0, 14, $noreg, implicit-def $sp, implicit $sp
; CHECK: [[EXT3:%[0-9]+]]:_(s32) = G_EXTRACT [[R]](s64), 0
; CHECK: [[EXT4:%[0-9]+]]:_(s32) = G_EXTRACT [[R]](s64), 32
-; CHECK: [[IMPDEF2:%[0-9]+]]:_(s64) = G_IMPLICIT_DEF
-; CHECK: [[INS3:%[0-9]+]]:_(s64) = G_INSERT [[IMPDEF2]], [[EXT3]](s32), 0
-; CHECK: [[INS4:%[0-9]+]]:_(s64) = G_INSERT [[INS3]], [[EXT4]](s32), 32
-; CHECK: [[R0:%[0-9]+]]:_(s32), [[R1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INS4]](s64)
-; CHECK: $r0 = COPY [[R0]](s32)
-; CHECK: $r1 = COPY [[R1]](s32)
+; CHECK: $r0 = COPY [[EXT3]](s32)
+; CHECK: $r1 = COPY [[EXT4]](s32)
; CHECK: BX_RET 14, $noreg, implicit $r0, implicit $r1
%r = notail call arm_aapcscc {i32, i32} @structs_target({i32, i32} %x)
ret {i32, i32} %r
diff --git a/llvm/test/CodeGen/X86/GlobalISel/x86_64-irtranslator-struct-return.ll b/llvm/test/CodeGen/X86/GlobalISel/x86_64-irtranslator-struct-return.ll
new file mode 100644
index 00000000000..3a025e6aa19
--- /dev/null
+++ b/llvm/test/CodeGen/X86/GlobalISel/x86_64-irtranslator-struct-return.ll
@@ -0,0 +1,290 @@
+; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+; RUN: llc -mtriple=x86_64-linux-gnu -global-isel -stop-after=irtranslator < %s -o - | FileCheck %s --check-prefix=ALL
+
+%struct.f1 = type { float }
+%struct.d1 = type { double }
+%struct.d2 = type { double, double }
+%struct.i1 = type { i32 }
+%struct.i2 = type { i32, i32 }
+%struct.i3 = type { i32, i32, i32 }
+%struct.i4 = type { i32, i32, i32, i32 }
+
+define float @test_return_f1(float %f.coerce) {
+ ; ALL-LABEL: name: test_return_f1
+ ; ALL: bb.1.entry:
+ ; ALL: liveins: $xmm0
+ ; ALL: [[COPY:%[0-9]+]]:_(s128) = COPY $xmm0
+ ; ALL: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s128)
+ ; ALL: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
+ ; ALL: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.retval
+ ; ALL: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.1.f
+ ; ALL: G_STORE [[TRUNC]](s32), [[FRAME_INDEX1]](p0) :: (store 4 into %ir.coerce.dive2)
+ ; ALL: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp
+ ; ALL: $rdi = COPY [[FRAME_INDEX]](p0)
+ ; ALL: $rsi = COPY [[FRAME_INDEX1]](p0)
+ ; ALL: $rdx = COPY [[C]](s64)
+ ; ALL: CALL64pcrel32 &memcpy, csr_64, implicit $rsp, implicit $ssp, implicit $rdi, implicit $rsi, implicit $rdx
+ ; ALL: ADJCALLSTACKUP64 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp
+ ; ALL: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (load 4 from %ir.coerce.dive13)
+ ; ALL: [[ANYEXT:%[0-9]+]]:_(s128) = G_ANYEXT [[LOAD]](s32)
+ ; ALL: $xmm0 = COPY [[ANYEXT]](s128)
+ ; ALL: RET 0, implicit $xmm0
+entry:
+ %retval = alloca %struct.f1, align 4
+ %f = alloca %struct.f1, align 4
+ %coerce.dive = getelementptr inbounds %struct.f1, %struct.f1* %f, i32 0, i32 0
+ store float %f.coerce, float* %coerce.dive, align 4
+ %0 = bitcast %struct.f1* %retval to i8*
+ %1 = bitcast %struct.f1* %f to i8*
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %0, i8* align 4 %1, i64 4, i1 false)
+ %coerce.dive1 = getelementptr inbounds %struct.f1, %struct.f1* %retval, i32 0, i32 0
+ %2 = load float, float* %coerce.dive1, align 4
+ ret float %2
+}
+
+declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i1) #1
+
+define double @test_return_d1(double %d.coerce) {
+ ; ALL-LABEL: name: test_return_d1
+ ; ALL: bb.1.entry:
+ ; ALL: liveins: $xmm0
+ ; ALL: [[COPY:%[0-9]+]]:_(s128) = COPY $xmm0
+ ; ALL: [[TRUNC:%[0-9]+]]:_(s64) = G_TRUNC [[COPY]](s128)
+ ; ALL: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
+ ; ALL: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.retval
+ ; ALL: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.1.d
+ ; ALL: G_STORE [[TRUNC]](s64), [[FRAME_INDEX1]](p0) :: (store 8 into %ir.coerce.dive2)
+ ; ALL: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp
+ ; ALL: $rdi = COPY [[FRAME_INDEX]](p0)
+ ; ALL: $rsi = COPY [[FRAME_INDEX1]](p0)
+ ; ALL: $rdx = COPY [[C]](s64)
+ ; ALL: CALL64pcrel32 &memcpy, csr_64, implicit $rsp, implicit $ssp, implicit $rdi, implicit $rsi, implicit $rdx
+ ; ALL: ADJCALLSTACKUP64 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp
+ ; ALL: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX]](p0) :: (load 8 from %ir.coerce.dive13)
+ ; ALL: [[ANYEXT:%[0-9]+]]:_(s128) = G_ANYEXT [[LOAD]](s64)
+ ; ALL: $xmm0 = COPY [[ANYEXT]](s128)
+ ; ALL: RET 0, implicit $xmm0
+entry:
+ %retval = alloca %struct.d1, align 8
+ %d = alloca %struct.d1, align 8
+ %coerce.dive = getelementptr inbounds %struct.d1, %struct.d1* %d, i32 0, i32 0
+ store double %d.coerce, double* %coerce.dive, align 8
+ %0 = bitcast %struct.d1* %retval to i8*
+ %1 = bitcast %struct.d1* %d to i8*
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 %0, i8* align 8 %1, i64 8, i1 false)
+ %coerce.dive1 = getelementptr inbounds %struct.d1, %struct.d1* %retval, i32 0, i32 0
+ %2 = load double, double* %coerce.dive1, align 8
+ ret double %2
+}
+
+define { double, double } @test_return_d2(double %d.coerce0, double %d.coerce1) {
+ ; ALL-LABEL: name: test_return_d2
+ ; ALL: bb.1.entry:
+ ; ALL: liveins: $xmm0, $xmm1
+ ; ALL: [[COPY:%[0-9]+]]:_(s128) = COPY $xmm0
+ ; ALL: [[TRUNC:%[0-9]+]]:_(s64) = G_TRUNC [[COPY]](s128)
+ ; ALL: [[COPY1:%[0-9]+]]:_(s128) = COPY $xmm1
+ ; ALL: [[TRUNC1:%[0-9]+]]:_(s64) = G_TRUNC [[COPY1]](s128)
+ ; ALL: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
+ ; ALL: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
+ ; ALL: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.retval
+ ; ALL: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.1.d
+ ; ALL: G_STORE [[TRUNC]](s64), [[FRAME_INDEX1]](p0) :: (store 8 into %ir.1)
+ ; ALL: [[GEP:%[0-9]+]]:_(p0) = G_GEP [[FRAME_INDEX1]], [[C]](s64)
+ ; ALL: G_STORE [[TRUNC1]](s64), [[GEP]](p0) :: (store 8 into %ir.2)
+ ; ALL: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp
+ ; ALL: $rdi = COPY [[FRAME_INDEX]](p0)
+ ; ALL: $rsi = COPY [[FRAME_INDEX1]](p0)
+ ; ALL: $rdx = COPY [[C1]](s64)
+ ; ALL: CALL64pcrel32 &memcpy, csr_64, implicit $rsp, implicit $ssp, implicit $rdi, implicit $rsi, implicit $rdx
+ ; ALL: ADJCALLSTACKUP64 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp
+ ; ALL: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX]](p0) :: (load 8 from %ir.5)
+ ; ALL: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
+ ; ALL: [[GEP1:%[0-9]+]]:_(p0) = G_GEP [[FRAME_INDEX]], [[C2]](s64)
+ ; ALL: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[GEP1]](p0) :: (load 8 from %ir.5 + 8)
+ ; ALL: [[ANYEXT:%[0-9]+]]:_(s128) = G_ANYEXT [[LOAD]](s64)
+ ; ALL: $xmm0 = COPY [[ANYEXT]](s128)
+ ; ALL: [[ANYEXT1:%[0-9]+]]:_(s128) = G_ANYEXT [[LOAD1]](s64)
+ ; ALL: $xmm1 = COPY [[ANYEXT1]](s128)
+ ; ALL: RET 0, implicit $xmm0, implicit $xmm1
+entry:
+ %retval = alloca %struct.d2, align 8
+ %d = alloca %struct.d2, align 8
+ %0 = bitcast %struct.d2* %d to { double, double }*
+ %1 = getelementptr inbounds { double, double }, { double, double }* %0, i32 0, i32 0
+ store double %d.coerce0, double* %1, align 8
+ %2 = getelementptr inbounds { double, double }, { double, double }* %0, i32 0, i32 1
+ store double %d.coerce1, double* %2, align 8
+ %3 = bitcast %struct.d2* %retval to i8*
+ %4 = bitcast %struct.d2* %d to i8*
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 %3, i8* align 8 %4, i64 16, i1 false)
+ %5 = bitcast %struct.d2* %retval to { double, double }*
+ %6 = load { double, double }, { double, double }* %5, align 8
+ ret { double, double } %6
+}
+
+define i32 @test_return_i1(i32 %i.coerce) {
+ ; ALL-LABEL: name: test_return_i1
+ ; ALL: bb.1.entry:
+ ; ALL: liveins: $edi
+ ; ALL: [[COPY:%[0-9]+]]:_(s32) = COPY $edi
+ ; ALL: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
+ ; ALL: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.retval
+ ; ALL: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.1.i
+ ; ALL: G_STORE [[COPY]](s32), [[FRAME_INDEX1]](p0) :: (store 4 into %ir.coerce.dive2)
+ ; ALL: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp
+ ; ALL: $rdi = COPY [[FRAME_INDEX]](p0)
+ ; ALL: $rsi = COPY [[FRAME_INDEX1]](p0)
+ ; ALL: $rdx = COPY [[C]](s64)
+ ; ALL: CALL64pcrel32 &memcpy, csr_64, implicit $rsp, implicit $ssp, implicit $rdi, implicit $rsi, implicit $rdx
+ ; ALL: ADJCALLSTACKUP64 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp
+ ; ALL: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (load 4 from %ir.coerce.dive13)
+ ; ALL: $eax = COPY [[LOAD]](s32)
+ ; ALL: RET 0, implicit $eax
+entry:
+ %retval = alloca %struct.i1, align 4
+ %i = alloca %struct.i1, align 4
+ %coerce.dive = getelementptr inbounds %struct.i1, %struct.i1* %i, i32 0, i32 0
+ store i32 %i.coerce, i32* %coerce.dive, align 4
+ %0 = bitcast %struct.i1* %retval to i8*
+ %1 = bitcast %struct.i1* %i to i8*
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %0, i8* align 4 %1, i64 4, i1 false)
+ %coerce.dive1 = getelementptr inbounds %struct.i1, %struct.i1* %retval, i32 0, i32 0
+ %2 = load i32, i32* %coerce.dive1, align 4
+ ret i32 %2
+}
+
+define i64 @test_return_i2(i64 %i.coerce) {
+ ; ALL-LABEL: name: test_return_i2
+ ; ALL: bb.1.entry:
+ ; ALL: liveins: $rdi
+ ; ALL: [[COPY:%[0-9]+]]:_(s64) = COPY $rdi
+ ; ALL: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
+ ; ALL: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.retval
+ ; ALL: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.1.i
+ ; ALL: G_STORE [[COPY]](s64), [[FRAME_INDEX1]](p0) :: (store 8 into %ir.0, align 4)
+ ; ALL: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp
+ ; ALL: $rdi = COPY [[FRAME_INDEX]](p0)
+ ; ALL: $rsi = COPY [[FRAME_INDEX1]](p0)
+ ; ALL: $rdx = COPY [[C]](s64)
+ ; ALL: CALL64pcrel32 &memcpy, csr_64, implicit $rsp, implicit $ssp, implicit $rdi, implicit $rsi, implicit $rdx
+ ; ALL: ADJCALLSTACKUP64 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp
+ ; ALL: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX]](p0) :: (load 8 from %ir.3, align 4)
+ ; ALL: $rax = COPY [[LOAD]](s64)
+ ; ALL: RET 0, implicit $rax
+entry:
+ %retval = alloca %struct.i2, align 4
+ %i = alloca %struct.i2, align 4
+ %0 = bitcast %struct.i2* %i to i64*
+ store i64 %i.coerce, i64* %0, align 4
+ %1 = bitcast %struct.i2* %retval to i8*
+ %2 = bitcast %struct.i2* %i to i8*
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %1, i8* align 4 %2, i64 8, i1 false)
+ %3 = bitcast %struct.i2* %retval to i64*
+ %4 = load i64, i64* %3, align 4
+ ret i64 %4
+}
+
+define { i64, i32 } @test_return_i3(i64 %i.coerce0, i32 %i.coerce1) {
+ ; ALL-LABEL: name: test_return_i3
+ ; ALL: bb.1.entry:
+ ; ALL: liveins: $esi, $rdi
+ ; ALL: [[COPY:%[0-9]+]]:_(s64) = COPY $rdi
+ ; ALL: [[COPY1:%[0-9]+]]:_(s32) = COPY $esi
+ ; ALL: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
+ ; ALL: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
+ ; ALL: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.retval
+ ; ALL: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.1.i
+ ; ALL: [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.2.coerce
+ ; ALL: [[FRAME_INDEX3:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.3.tmp
+ ; ALL: G_STORE [[COPY]](s64), [[FRAME_INDEX2]](p0) :: (store 8 into %ir.0, align 4)
+ ; ALL: [[GEP:%[0-9]+]]:_(p0) = G_GEP [[FRAME_INDEX2]], [[C]](s64)
+ ; ALL: G_STORE [[COPY1]](s32), [[GEP]](p0) :: (store 4 into %ir.1)
+ ; ALL: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp
+ ; ALL: $rdi = COPY [[FRAME_INDEX1]](p0)
+ ; ALL: $rsi = COPY [[FRAME_INDEX2]](p0)
+ ; ALL: $rdx = COPY [[C1]](s64)
+ ; ALL: CALL64pcrel32 &memcpy, csr_64, implicit $rsp, implicit $ssp, implicit $rdi, implicit $rsi, implicit $rdx
+ ; ALL: ADJCALLSTACKUP64 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp
+ ; ALL: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp
+ ; ALL: $rdi = COPY [[FRAME_INDEX]](p0)
+ ; ALL: $rsi = COPY [[FRAME_INDEX1]](p0)
+ ; ALL: $rdx = COPY [[C1]](s64)
+ ; ALL: CALL64pcrel32 &memcpy, csr_64, implicit $rsp, implicit $ssp, implicit $rdi, implicit $rsi, implicit $rdx
+ ; ALL: ADJCALLSTACKUP64 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp
+ ; ALL: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp
+ ; ALL: $rdi = COPY [[FRAME_INDEX3]](p0)
+ ; ALL: $rsi = COPY [[FRAME_INDEX]](p0)
+ ; ALL: $rdx = COPY [[C1]](s64)
+ ; ALL: CALL64pcrel32 &memcpy, csr_64, implicit $rsp, implicit $ssp, implicit $rdi, implicit $rsi, implicit $rdx
+ ; ALL: ADJCALLSTACKUP64 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp
+ ; ALL: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX3]](p0) :: (load 8 from %ir.tmp)
+ ; ALL: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
+ ; ALL: [[GEP1:%[0-9]+]]:_(p0) = G_GEP [[FRAME_INDEX3]], [[C2]](s64)
+ ; ALL: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[GEP1]](p0) :: (load 4 from %ir.tmp + 8, align 8)
+ ; ALL: $rax = COPY [[LOAD]](s64)
+ ; ALL: $edx = COPY [[LOAD1]](s32)
+ ; ALL: RET 0, implicit $rax, implicit $edx
+entry:
+ %retval = alloca %struct.i3, align 4
+ %i = alloca %struct.i3, align 4
+ %coerce = alloca { i64, i32 }, align 4
+ %tmp = alloca { i64, i32 }, align 8
+ %0 = getelementptr inbounds { i64, i32 }, { i64, i32 }* %coerce, i32 0, i32 0
+ store i64 %i.coerce0, i64* %0, align 4
+ %1 = getelementptr inbounds { i64, i32 }, { i64, i32 }* %coerce, i32 0, i32 1
+ store i32 %i.coerce1, i32* %1, align 4
+ %2 = bitcast %struct.i3* %i to i8*
+ %3 = bitcast { i64, i32 }* %coerce to i8*
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %2, i8* align 4 %3, i64 12, i1 false)
+ %4 = bitcast %struct.i3* %retval to i8*
+ %5 = bitcast %struct.i3* %i to i8*
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %4, i8* align 4 %5, i64 12, i1 false)
+ %6 = bitcast { i64, i32 }* %tmp to i8*
+ %7 = bitcast %struct.i3* %retval to i8*
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 %6, i8* align 4 %7, i64 12, i1 false)
+ %8 = load { i64, i32 }, { i64, i32 }* %tmp, align 8
+ ret { i64, i32 } %8
+}
+
+define { i64, i64 } @test_return_i4(i64 %i.coerce0, i64 %i.coerce1) {
+ ; ALL-LABEL: name: test_return_i4
+ ; ALL: bb.1.entry:
+ ; ALL: liveins: $rdi, $rsi
+ ; ALL: [[COPY:%[0-9]+]]:_(s64) = COPY $rdi
+ ; ALL: [[COPY1:%[0-9]+]]:_(s64) = COPY $rsi
+ ; ALL: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
+ ; ALL: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
+ ; ALL: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.retval
+ ; ALL: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.1.i
+ ; ALL: G_STORE [[COPY]](s64), [[FRAME_INDEX1]](p0) :: (store 8 into %ir.1, align 4)
+ ; ALL: [[GEP:%[0-9]+]]:_(p0) = G_GEP [[FRAME_INDEX1]], [[C]](s64)
+ ; ALL: G_STORE [[COPY1]](s64), [[GEP]](p0) :: (store 8 into %ir.2, align 4)
+ ; ALL: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp
+ ; ALL: $rdi = COPY [[FRAME_INDEX]](p0)
+ ; ALL: $rsi = COPY [[FRAME_INDEX1]](p0)
+ ; ALL: $rdx = COPY [[C1]](s64)
+ ; ALL: CALL64pcrel32 &memcpy, csr_64, implicit $rsp, implicit $ssp, implicit $rdi, implicit $rsi, implicit $rdx
+ ; ALL: ADJCALLSTACKUP64 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp
+ ; ALL: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX]](p0) :: (load 8 from %ir.5, align 4)
+ ; ALL: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
+ ; ALL: [[GEP1:%[0-9]+]]:_(p0) = G_GEP [[FRAME_INDEX]], [[C2]](s64)
+ ; ALL: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[GEP1]](p0) :: (load 8 from %ir.5 + 8, align 4)
+ ; ALL: $rax = COPY [[LOAD]](s64)
+ ; ALL: $rdx = COPY [[LOAD1]](s64)
+ ; ALL: RET 0, implicit $rax, implicit $rdx
+entry:
+ %retval = alloca %struct.i4, align 4
+ %i = alloca %struct.i4, align 4
+ %0 = bitcast %struct.i4* %i to { i64, i64 }*
+ %1 = getelementptr inbounds { i64, i64 }, { i64, i64 }* %0, i32 0, i32 0
+ store i64 %i.coerce0, i64* %1, align 4
+ %2 = getelementptr inbounds { i64, i64 }, { i64, i64 }* %0, i32 0, i32 1
+ store i64 %i.coerce1, i64* %2, align 4
+ %3 = bitcast %struct.i4* %retval to i8*
+ %4 = bitcast %struct.i4* %i to i8*
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %3, i8* align 4 %4, i64 16, i1 false)
+ %5 = bitcast %struct.i4* %retval to { i64, i64 }*
+ %6 = load { i64, i64 }, { i64, i64 }* %5, align 4
+ ret { i64, i64 } %6
+}
OpenPOWER on IntegriCloud