diff options
| author | Tim Northover <tnorthover@apple.com> | 2016-07-22 22:13:36 +0000 |
|---|---|---|
| committer | Tim Northover <tnorthover@apple.com> | 2016-07-22 22:13:36 +0000 |
| commit | 98a56eb7f43e220a1f7a5d4dc1f9c32bf5a353ae (patch) | |
| tree | fa6c4a1b442bd6377566abc8570a160bf817320b /llvm/test/CodeGen | |
| parent | e3a032a7408ccff451cb5c081511797e05dca162 (diff) | |
| download | bcm5719-llvm-98a56eb7f43e220a1f7a5d4dc1f9c32bf5a353ae.tar.gz bcm5719-llvm-98a56eb7f43e220a1f7a5d4dc1f9c32bf5a353ae.zip | |
GlobalISel: allow multiple types on MachineInstrs.
llvm-svn: 276481
Diffstat (limited to 'llvm/test/CodeGen')
5 files changed, 52 insertions, 52 deletions
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll index e02d46308d3..032a527e3e7 100644 --- a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll +++ b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll @@ -9,7 +9,7 @@ target triple = "aarch64-apple-ios" ; CHECK: name: addi64 ; CHECK: [[ARG1:%[0-9]+]](64) = COPY %x0 ; CHECK-NEXT: [[ARG2:%[0-9]+]](64) = COPY %x1 -; CHECK-NEXT: [[RES:%[0-9]+]](64) = G_ADD s64 [[ARG1]], [[ARG2]] +; CHECK-NEXT: [[RES:%[0-9]+]](64) = G_ADD { s64 } [[ARG1]], [[ARG2]] ; CHECK-NEXT: %x0 = COPY [[RES]] ; CHECK-NEXT: RET_ReallyLR implicit %x0 define i64 @addi64(i64 %arg1, i64 %arg2) { @@ -23,9 +23,9 @@ define i64 @addi64(i64 %arg1, i64 %arg2) { ; CHECK-NEXT: - { id: 0, name: ptr1, offset: 0, size: 8, alignment: 8 } ; CHECK-NEXT: - { id: 1, name: ptr2, offset: 0, size: 8, alignment: 1 } ; CHECK-NEXT: - { id: 2, name: ptr3, offset: 0, size: 128, alignment: 8 } -; CHECK: %{{[0-9]+}}(64) = G_FRAME_INDEX p0 0 -; CHECK: %{{[0-9]+}}(64) = G_FRAME_INDEX p0 1 -; CHECK: %{{[0-9]+}}(64) = G_FRAME_INDEX p0 2 +; CHECK: %{{[0-9]+}}(64) = G_FRAME_INDEX { p0 } 0 +; CHECK: %{{[0-9]+}}(64) = G_FRAME_INDEX { p0 } 1 +; CHECK: %{{[0-9]+}}(64) = G_FRAME_INDEX { p0 } 2 define void @allocai64() { %ptr1 = alloca i64 %ptr2 = alloca i64, align 1 @@ -44,7 +44,7 @@ define void @allocai64() { ; CHECK-NEXT: successors: %[[END:[0-9a-zA-Z._-]+]]({{0x[a-f0-9]+ / 0x[a-f0-9]+}} = 100.00%) ; ; Check that we emit the correct branch. -; CHECK: G_BR unsized %[[END]] +; CHECK: G_BR { unsized } %[[END]] ; ; Check that end contains the return instruction. ; CHECK: [[END]]: @@ -59,7 +59,7 @@ end: ; CHECK: name: ori64 ; CHECK: [[ARG1:%[0-9]+]](64) = COPY %x0 ; CHECK-NEXT: [[ARG2:%[0-9]+]](64) = COPY %x1 -; CHECK-NEXT: [[RES:%[0-9]+]](64) = G_OR s64 [[ARG1]], [[ARG2]] +; CHECK-NEXT: [[RES:%[0-9]+]](64) = G_OR { s64 } [[ARG1]], [[ARG2]] ; CHECK-NEXT: %x0 = COPY [[RES]] ; CHECK-NEXT: RET_ReallyLR implicit %x0 define i64 @ori64(i64 %arg1, i64 %arg2) { @@ -70,7 +70,7 @@ define i64 @ori64(i64 %arg1, i64 %arg2) { ; CHECK: name: ori32 ; CHECK: [[ARG1:%[0-9]+]](32) = COPY %w0 ; CHECK-NEXT: [[ARG2:%[0-9]+]](32) = COPY %w1 -; CHECK-NEXT: [[RES:%[0-9]+]](32) = G_OR s32 [[ARG1]], [[ARG2]] +; CHECK-NEXT: [[RES:%[0-9]+]](32) = G_OR { s32 } [[ARG1]], [[ARG2]] ; CHECK-NEXT: %w0 = COPY [[RES]] ; CHECK-NEXT: RET_ReallyLR implicit %w0 define i32 @ori32(i32 %arg1, i32 %arg2) { @@ -82,7 +82,7 @@ define i32 @ori32(i32 %arg1, i32 %arg2) { ; CHECK: name: andi64 ; CHECK: [[ARG1:%[0-9]+]](64) = COPY %x0 ; CHECK-NEXT: [[ARG2:%[0-9]+]](64) = COPY %x1 -; CHECK-NEXT: [[RES:%[0-9]+]](64) = G_AND s64 [[ARG1]], [[ARG2]] +; CHECK-NEXT: [[RES:%[0-9]+]](64) = G_AND { s64 } [[ARG1]], [[ARG2]] ; CHECK-NEXT: %x0 = COPY [[RES]] ; CHECK-NEXT: RET_ReallyLR implicit %x0 define i64 @andi64(i64 %arg1, i64 %arg2) { @@ -93,7 +93,7 @@ define i64 @andi64(i64 %arg1, i64 %arg2) { ; CHECK: name: andi32 ; CHECK: [[ARG1:%[0-9]+]](32) = COPY %w0 ; CHECK-NEXT: [[ARG2:%[0-9]+]](32) = COPY %w1 -; CHECK-NEXT: [[RES:%[0-9]+]](32) = G_AND s32 [[ARG1]], [[ARG2]] +; CHECK-NEXT: [[RES:%[0-9]+]](32) = G_AND { s32 } [[ARG1]], [[ARG2]] ; CHECK-NEXT: %w0 = COPY [[RES]] ; CHECK-NEXT: RET_ReallyLR implicit %w0 define i32 @andi32(i32 %arg1, i32 %arg2) { @@ -105,7 +105,7 @@ define i32 @andi32(i32 %arg1, i32 %arg2) { ; CHECK: name: subi64 ; CHECK: [[ARG1:%[0-9]+]](64) = COPY %x0 ; CHECK-NEXT: [[ARG2:%[0-9]+]](64) = COPY %x1 -; CHECK-NEXT: [[RES:%[0-9]+]](64) = G_SUB s64 [[ARG1]], [[ARG2]] +; CHECK-NEXT: [[RES:%[0-9]+]](64) = G_SUB { s64 } [[ARG1]], [[ARG2]] ; CHECK-NEXT: %x0 = COPY [[RES]] ; CHECK-NEXT: RET_ReallyLR implicit %x0 define i64 @subi64(i64 %arg1, i64 %arg2) { @@ -116,7 +116,7 @@ define i64 @subi64(i64 %arg1, i64 %arg2) { ; CHECK: name: subi32 ; CHECK: [[ARG1:%[0-9]+]](32) = COPY %w0 ; CHECK-NEXT: [[ARG2:%[0-9]+]](32) = COPY %w1 -; CHECK-NEXT: [[RES:%[0-9]+]](32) = G_SUB s32 [[ARG1]], [[ARG2]] +; CHECK-NEXT: [[RES:%[0-9]+]](32) = G_SUB { s32 } [[ARG1]], [[ARG2]] ; CHECK-NEXT: %w0 = COPY [[RES]] ; CHECK-NEXT: RET_ReallyLR implicit %w0 define i32 @subi32(i32 %arg1, i32 %arg2) { diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-regbankselect.mir b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-regbankselect.mir index 6eccd085858..8ec8a9f964a 100644 --- a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-regbankselect.mir +++ b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-regbankselect.mir @@ -68,8 +68,8 @@ registers: body: | bb.0.entry: liveins: %x0 - ; CHECK: %0(32) = G_ADD s32 %w0 - %0(32) = G_ADD s32 %w0, %w0 + ; CHECK: %0(32) = G_ADD { s32 } %w0 + %0(32) = G_ADD { s32 } %w0, %w0 ... --- @@ -85,8 +85,8 @@ registers: body: | bb.0.entry: liveins: %d0 - ; CHECK: %0(64) = G_ADD <2 x s32> %d0 - %0(64) = G_ADD <2 x s32> %d0, %d0 + ; CHECK: %0(64) = G_ADD { <2 x s32> } %d0 + %0(64) = G_ADD { <2 x s32> } %d0, %d0 ... --- @@ -107,9 +107,9 @@ body: | liveins: %s0, %x0 ; CHECK: %0(32) = COPY %s0 ; CHECK-NEXT: %2(32) = COPY %0 - ; CHECK-NEXT: %1(32) = G_ADD s32 %2, %w0 + ; CHECK-NEXT: %1(32) = G_ADD { s32 } %2, %w0 %0(32) = COPY %s0 - %1(32) = G_ADD s32 %0, %w0 + %1(32) = G_ADD { s32 } %0, %w0 ... # Check that we repair the assignment for %0 differently for both uses. @@ -129,9 +129,9 @@ body: | ; CHECK: %0(32) = COPY %s0 ; CHECK-NEXT: %2(32) = COPY %0 ; CHECK-NEXT: %3(32) = COPY %0 - ; CHECK-NEXT: %1(32) = G_ADD s32 %2, %3 + ; CHECK-NEXT: %1(32) = G_ADD { s32 } %2, %3 %0(32) = COPY %s0 - %1(32) = G_ADD s32 %0, %0 + %1(32) = G_ADD { s32 } %0, %0 ... --- @@ -152,10 +152,10 @@ body: | bb.0.entry: liveins: %w0 ; CHECK: %0(32) = COPY %w0 - ; CHECK-NEXT: %2(32) = G_ADD s32 %0, %w0 + ; CHECK-NEXT: %2(32) = G_ADD { s32 } %0, %w0 ; CHECK-NEXT: %1(32) = COPY %2 %0(32) = COPY %w0 - %1(32) = G_ADD s32 %0, %w0 + %1(32) = G_ADD { s32 } %0, %w0 ... --- @@ -187,7 +187,7 @@ body: | bb.1.then: successors: %bb.2.end - %3(32) = G_ADD s32 %0, %0 + %3(32) = G_ADD { s32 } %0, %0 bb.2.end: %4(32) = PHI %0, %bb.0.entry, %3, %bb.1.then @@ -211,9 +211,9 @@ body: | liveins: %w0, %s0 ; CHECK: %0(32) = COPY %w0 ; CHECK-NEXT: %2(32) = COPY %s0 - ; CHECK-NEXT: %1(32) = G_ADD s32 %0, %2 + ; CHECK-NEXT: %1(32) = G_ADD { s32 } %0, %2 %0(32) = COPY %w0 - %1(32) = G_ADD s32 %0, %s0 + %1(32) = G_ADD { s32 } %0, %s0 ... --- @@ -229,10 +229,10 @@ body: | bb.0.entry: liveins: %w0 ; CHECK: %0(32) = COPY %w0 - ; CHECK-NEXT: %1(32) = G_ADD s32 %0, %0 + ; CHECK-NEXT: %1(32) = G_ADD { s32 } %0, %0 ; CHECK-NEXT: %s0 = COPY %1 %0(32) = COPY %w0 - %s0 = G_ADD s32 %0, %0 + %s0 = G_ADD { s32 } %0, %0 ... --- @@ -271,13 +271,13 @@ body: | ; FAST-NEXT: %3(64) = COPY %0 ; FAST-NEXT: %4(64) = COPY %1 ; The mapping of G_OR is on FPR. - ; FAST-NEXT: %2(64) = G_OR <2 x s32> %3, %4 + ; FAST-NEXT: %2(64) = G_OR { <2 x s32> } %3, %4 ; Greedy mode remapped the instruction on the GPR bank. - ; GREEDY-NEXT: %2(64) = G_OR <2 x s32> %0, %1 + ; GREEDY-NEXT: %2(64) = G_OR { <2 x s32> } %0, %1 %0(64) = COPY %x0 %1(64) = COPY %x1 - %2(64) = G_OR <2 x s32> %0, %1 + %2(64) = G_OR { <2 x s32> } %0, %1 ... --- @@ -317,13 +317,13 @@ body: | ; FAST-NEXT: %3(64) = COPY %0 ; FAST-NEXT: %4(64) = COPY %1 ; The mapping of G_OR is on FPR. - ; FAST-NEXT: %2(64) = G_OR <2 x s32> %3, %4 + ; FAST-NEXT: %2(64) = G_OR { <2 x s32> } %3, %4 ; Greedy mode remapped the instruction on the GPR bank. - ; GREEDY-NEXT: %3(64) = G_OR <2 x s32> %0, %1 + ; GREEDY-NEXT: %3(64) = G_OR { <2 x s32> } %0, %1 ; We need to keep %2 into FPR because we do not know anything about it. ; GREEDY-NEXT: %2(64) = COPY %3 %0(64) = COPY %x0 %1(64) = COPY %x1 - %2(64) = G_OR <2 x s32> %0, %1 + %2(64) = G_OR { <2 x s32> } %0, %1 ... diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-add.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-add.mir index cf777eea604..928a4951799 100644 --- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-add.mir +++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-add.mir @@ -21,14 +21,14 @@ body: | bb.0.entry: liveins: %q0, %q1, %q2, %q3 ; CHECK-LABEL: name: test_vector_add - ; CHECK-DAG: [[LHS_LO:%.*]](128), [[LHS_HI:%.*]](128) = G_EXTRACT <2 x s64> %0, 0, 128 - ; CHECK-DAG: [[RHS_LO:%.*]](128), [[RHS_HI:%.*]](128) = G_EXTRACT <2 x s64> %1, 0, 128 - ; CHECK: [[RES_LO:%.*]](128) = G_ADD <2 x s64> [[LHS_LO]], [[RHS_LO]] - ; CHECK: [[RES_HI:%.*]](128) = G_ADD <2 x s64> [[LHS_HI]], [[RHS_HI]] - ; CHECK: %2(256) = G_SEQUENCE <4 x s64> [[RES_LO]], [[RES_HI]] + ; CHECK-DAG: [[LHS_LO:%.*]](128), [[LHS_HI:%.*]](128) = G_EXTRACT { <2 x s64> } %0, 0, 128 + ; CHECK-DAG: [[RHS_LO:%.*]](128), [[RHS_HI:%.*]](128) = G_EXTRACT { <2 x s64> } %1, 0, 128 + ; CHECK: [[RES_LO:%.*]](128) = G_ADD { <2 x s64> } [[LHS_LO]], [[RHS_LO]] + ; CHECK: [[RES_HI:%.*]](128) = G_ADD { <2 x s64> } [[LHS_HI]], [[RHS_HI]] + ; CHECK: %2(256) = G_SEQUENCE { <4 x s64> } [[RES_LO]], [[RES_HI]] - %0(256) = G_SEQUENCE <4 x s64> %q0, %q1 - %1(256) = G_SEQUENCE <4 x s64> %q2, %q3 - %2(256) = G_ADD <4 x s64> %0, %1 - %q0, %q1 = G_EXTRACT <2 x s64> %2, 0, 128 + %0(256) = G_SEQUENCE { <4 x s64> } %q0, %q1 + %1(256) = G_SEQUENCE { <4 x s64> } %q2, %q3 + %2(256) = G_ADD { <4 x s64> } %0, %1 + %q0, %q1 = G_EXTRACT { <2 x s64> } %2, 0, 128 ... diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/amdgpu-irtranslator.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/amdgpu-irtranslator.ll index b03f030f44f..2a948864a98 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/amdgpu-irtranslator.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/amdgpu-irtranslator.ll @@ -5,7 +5,7 @@ ; Tests for add. ; CHECK: name: addi32 -; CHECK: G_ADD s32 +; CHECK: G_ADD { s32 } define i32 @addi32(i32 %arg1, i32 %arg2) { %res = add i32 %arg1, %arg2 ret i32 %res diff --git a/llvm/test/CodeGen/MIR/X86/generic-virtual-registers.mir b/llvm/test/CodeGen/MIR/X86/generic-virtual-registers.mir index 09dc3ae3476..fb2fbea0d9a 100644 --- a/llvm/test/CodeGen/MIR/X86/generic-virtual-registers.mir +++ b/llvm/test/CodeGen/MIR/X86/generic-virtual-registers.mir @@ -33,16 +33,16 @@ registers: body: | bb.0.entry: liveins: %edi - ; CHECK: %0(32) = G_ADD s32 %edi - %0(32) = G_ADD s32 %edi, %edi - ; CHECK: %1(64) = G_ADD <2 x s32> %edi - %1(64) = G_ADD <2 x s32> %edi, %edi - ; CHECK: %2(64) = G_ADD s64 %edi - %2(64) = G_ADD s64 %edi, %edi + ; CHECK: %0(32) = G_ADD { s32 } %edi + %0(32) = G_ADD { s32 } %edi, %edi + ; CHECK: %1(64) = G_ADD { <2 x s32> } %edi + %1(64) = G_ADD { <2 x s32> } %edi, %edi + ; CHECK: %2(64) = G_ADD { s64 } %edi + %2(64) = G_ADD { s64 } %edi, %edi ; G_ADD is actually not a valid operand for structure type, ; but that is the only one we have for now for testing. - ; CHECK: %3(64) = G_ADD s64 %edi - %3(64) = G_ADD s64 %edi, %edi - ; CHECK: %4(48) = G_ADD s48 %edi - %4(48) = G_ADD s48 %edi, %edi + ; CHECK: %3(64) = G_ADD { s64 } %edi + %3(64) = G_ADD { s64 } %edi, %edi + ; CHECK: %4(48) = G_ADD { s48 } %edi + %4(48) = G_ADD { s48 } %edi, %edi ... |

