summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen
diff options
context:
space:
mode:
authorMatthias Braun <matze@braunis.de>2016-11-11 01:34:21 +0000
committerMatthias Braun <matze@braunis.de>2016-11-11 01:34:21 +0000
commit325cd2c98ab64b8dbee7f493818ba01e0143d34b (patch)
treed9e65626f00ca77244351153fbe4b3d3a68408c8 /llvm/test/CodeGen
parenta89d8ff0adc2782c53301733e308aa5954d7eff3 (diff)
downloadbcm5719-llvm-325cd2c98ab64b8dbee7f493818ba01e0143d34b.tar.gz
bcm5719-llvm-325cd2c98ab64b8dbee7f493818ba01e0143d34b.zip
ScheduleDAGInstrs: Add condjump deps to addSchedBarrierDeps()
addSchedBarrierDeps() is supposed to add use operands to the ExitSU node. The current implementation adds uses for calls/barrier instruction and the MBB live-outs in all other cases. The use operands of conditional jump instructions were missed. Also added code to macrofusion to set the latencies between nodes to zero to avoid problems with the fusing nodes lingering around in the pending list now. Differential Revision: https://reviews.llvm.org/D25140 llvm-svn: 286544
Diffstat (limited to 'llvm/test/CodeGen')
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-shrink-wrapping.ll28
-rw-r--r--llvm/test/CodeGen/AArch64/misched-fusion.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/neg-imm.ll6
-rw-r--r--llvm/test/CodeGen/AMDGPU/branch-relaxation.ll6
-rw-r--r--llvm/test/CodeGen/AMDGPU/cndmask-no-def-vcc.ll4
-rw-r--r--llvm/test/CodeGen/AMDGPU/indirect-addressing-si.ll8
-rw-r--r--llvm/test/CodeGen/AMDGPU/skip-if-dead.ll1
-rw-r--r--llvm/test/CodeGen/AMDGPU/wqm.ll2
-rw-r--r--llvm/test/CodeGen/ARM/arm-shrink-wrapping.ll1
-rw-r--r--llvm/test/CodeGen/PowerPC/ppc-shrink-wrapping.ll6
-rw-r--r--llvm/test/CodeGen/SPARC/2011-01-19-DelaySlot.ll2
-rw-r--r--llvm/test/CodeGen/SystemZ/int-cmp-48.ll2
12 files changed, 41 insertions, 27 deletions
diff --git a/llvm/test/CodeGen/AArch64/arm64-shrink-wrapping.ll b/llvm/test/CodeGen/AArch64/arm64-shrink-wrapping.ll
index e39fae6d578..255cd8e4a0d 100644
--- a/llvm/test/CodeGen/AArch64/arm64-shrink-wrapping.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-shrink-wrapping.ll
@@ -78,8 +78,8 @@ declare i32 @doSomething(i32, i32*)
; Next BB.
; CHECK: [[LOOP:LBB[0-9_]+]]: ; %for.body
; CHECK: bl _something
-; CHECK-NEXT: add [[SUM]], w0, [[SUM]]
; CHECK-NEXT: sub [[IV]], [[IV]], #1
+; CHECK-NEXT: add [[SUM]], w0, [[SUM]]
; CHECK-NEXT: cbnz [[IV]], [[LOOP]]
;
; Next BB.
@@ -144,8 +144,8 @@ declare i32 @something(...)
; Next BB.
; CHECK: [[LOOP_LABEL:LBB[0-9_]+]]: ; %for.body
; CHECK: bl _something
-; CHECK-NEXT: add [[SUM]], w0, [[SUM]]
; CHECK-NEXT: sub [[IV]], [[IV]], #1
+; CHECK-NEXT: add [[SUM]], w0, [[SUM]]
; CHECK-NEXT: cbnz [[IV]], [[LOOP_LABEL]]
; Next BB.
; CHECK: ; %for.end
@@ -188,8 +188,8 @@ for.end: ; preds = %for.body
;
; CHECK: [[LOOP_LABEL:LBB[0-9_]+]]: ; %for.body
; CHECK: bl _something
-; CHECK-NEXT: add [[SUM]], w0, [[SUM]]
; CHECK-NEXT: sub [[IV]], [[IV]], #1
+; CHECK-NEXT: add [[SUM]], w0, [[SUM]]
; CHECK-NEXT: cbnz [[IV]], [[LOOP_LABEL]]
; Next BB.
; CHECK: bl _somethingElse
@@ -259,8 +259,8 @@ declare void @somethingElse(...)
;
; CHECK: [[LOOP_LABEL:LBB[0-9_]+]]: ; %for.body
; CHECK: bl _something
-; CHECK-NEXT: add [[SUM]], w0, [[SUM]]
; CHECK-NEXT: sub [[IV]], [[IV]], #1
+; CHECK-NEXT: add [[SUM]], w0, [[SUM]]
; CHECK-NEXT: cbnz [[IV]], [[LOOP_LABEL]]
; Next BB.
; CHECK: lsl w0, [[SUM]], #3
@@ -333,32 +333,32 @@ entry:
;
; Sum is merged with the returned register.
; CHECK: add [[VA_BASE:x[0-9]+]], sp, #16
-; CHECK-NEXT: str [[VA_BASE]], [sp, #8]
; CHECK-NEXT: cmp w1, #1
+; CHECK-NEXT: str [[VA_BASE]], [sp, #8]
+; CHECK-NEXT: mov [[SUM:w0]], wzr
; CHECK-NEXT: b.lt [[IFEND_LABEL:LBB[0-9_]+]]
-; CHECK: mov [[SUM:w0]], wzr
;
; CHECK: [[LOOP_LABEL:LBB[0-9_]+]]: ; %for.body
; CHECK: ldr [[VA_ADDR:x[0-9]+]], [sp, #8]
; CHECK-NEXT: add [[NEXT_VA_ADDR:x[0-9]+]], [[VA_ADDR]], #8
; CHECK-NEXT: str [[NEXT_VA_ADDR]], [sp, #8]
; CHECK-NEXT: ldr [[VA_VAL:w[0-9]+]], {{\[}}[[VA_ADDR]]]
-; CHECK-NEXT: add [[SUM]], [[SUM]], [[VA_VAL]]
; CHECK-NEXT: sub w1, w1, #1
+; CHECK-NEXT: add [[SUM]], [[SUM]], [[VA_VAL]]
; CHECK-NEXT: cbnz w1, [[LOOP_LABEL]]
+; DISABLE-NEXT: b [[IFEND_LABEL]]
;
-; DISABLE-NEXT: b
; DISABLE: [[ELSE_LABEL]]: ; %if.else
; DISABLE: lsl w0, w1, #1
;
-; ENABLE: [[ELSE_LABEL]]: ; %if.else
-; ENABLE: lsl w0, w1, #1
-; ENABLE-NEXT: ret
-;
; CHECK: [[IFEND_LABEL]]:
; Epilogue code.
; CHECK: add sp, sp, #16
; CHECK-NEXT: ret
+;
+; ENABLE: [[ELSE_LABEL]]: ; %if.else
+; ENABLE-NEXT: lsl w0, w1, #1
+; ENABLE_NEXT: ret
define i32 @variadicFunc(i32 %cond, i32 %count, ...) #0 {
entry:
%ap = alloca i8*, align 8
@@ -413,9 +413,9 @@ declare void @llvm.va_end(i8*)
;
; CHECK: [[LOOP_LABEL:LBB[0-9_]+]]: ; %for.body
; Inline asm statement.
-; CHECK: add x19, x19, #1
; CHECK: sub [[IV]], [[IV]], #1
-; CHECK-NEXT: cbnz [[IV]], [[LOOP_LABEL]]
+; CHECK: add x19, x19, #1
+; CHECK: cbnz [[IV]], [[LOOP_LABEL]]
; Next BB.
; CHECK: mov w0, wzr
; Epilogue code.
diff --git a/llvm/test/CodeGen/AArch64/misched-fusion.ll b/llvm/test/CodeGen/AArch64/misched-fusion.ll
index d5140c62f66..d5dd9c757df 100644
--- a/llvm/test/CodeGen/AArch64/misched-fusion.ll
+++ b/llvm/test/CodeGen/AArch64/misched-fusion.ll
@@ -1,4 +1,4 @@
-; RUN: llc -o - %s -mattr=+arith-cbz-fusion,+use-postra-scheduler | FileCheck %s
+; RUN: llc -o - %s -mattr=+arith-cbz-fusion | FileCheck %s
; RUN: llc -o - %s -mcpu=cyclone | FileCheck %s
target triple = "arm64-apple-ios"
diff --git a/llvm/test/CodeGen/AArch64/neg-imm.ll b/llvm/test/CodeGen/AArch64/neg-imm.ll
index 375d3dbfd0d..46bded78cc5 100644
--- a/llvm/test/CodeGen/AArch64/neg-imm.ll
+++ b/llvm/test/CodeGen/AArch64/neg-imm.ll
@@ -30,9 +30,9 @@ if.then3:
for.inc:
; CHECK_LABEL: %for.inc
-; CHECK: add
-; CHECK-NEXT: cmp
-; CHECK: b.le
+; CHECK: cmp
+; CHECK-NEXT: add
+; CHECK-NEXT: b.le
; CHECK_LABEL: %for.cond.cleanup
%inc = add nsw i32 %x.015, 1
%cmp1 = icmp sgt i32 %x.015, %px
diff --git a/llvm/test/CodeGen/AMDGPU/branch-relaxation.ll b/llvm/test/CodeGen/AMDGPU/branch-relaxation.ll
index c298911504a..e12a0b798ee 100644
--- a/llvm/test/CodeGen/AMDGPU/branch-relaxation.ll
+++ b/llvm/test/CodeGen/AMDGPU/branch-relaxation.ll
@@ -171,6 +171,7 @@ bb3:
; GCN-NEXT: ; =>This Inner Loop Header: Depth=1
; GCN-NEXT: v_add_i32_e32 [[INC:v[0-9]+]], vcc, 1, [[LOOPIDX]]
; GCN-NEXT: v_cmp_gt_i32_e32 vcc, 10, [[INC]]
+; GCN-NEXT: s_and_b64 vcc, exec, vcc
; GCN-NEXT: ;;#ASMSTART
; GCN-NEXT: v_nop_e64
@@ -178,7 +179,6 @@ bb3:
; GCN-NEXT: v_nop_e64
; GCN-NEXT: ;;#ASMEND
-; GCN-NEXT: s_and_b64 vcc, exec, vcc
; GCN-NEXT: s_cbranch_vccz [[ENDBB:BB[0-9]+_[0-9]+]]
; GCN-NEXT: [[LONG_JUMP:BB[0-9]+_[0-9]+]]: ; %bb2
@@ -426,6 +426,8 @@ endif:
; GCN-NEXT: s_setpc_b64 vcc
; GCN-NEXT: [[LOOP_BODY]]: ; %loop_body
+; GCN: s_mov_b64 vcc, -1{{$}}
+; GCN: ;;#ASMSTART
; GCN: v_nop_e64
; GCN: v_nop_e64
; GCN: v_nop_e64
@@ -433,7 +435,6 @@ endif:
; GCN: v_nop_e64
; GCN: v_nop_e64
; GCN: ;;#ASMEND
-; GCN-NEXT: s_mov_b64 vcc, -1{{$}}
; GCN-NEXT: s_cbranch_vccz [[RET]]
; GCN-NEXT: [[LONGBB:BB[0-9]+_[0-9]+]]: ; %loop_body
@@ -493,6 +494,7 @@ ret:
; GCN: [[LONG_BR_DEST0]]
; GCN: s_cmp_eq_u32
+; GCN-NEXT: ; implicit-def
; GCN-NEXT: s_cbranch_scc0
; GCN: s_setpc_b64
diff --git a/llvm/test/CodeGen/AMDGPU/cndmask-no-def-vcc.ll b/llvm/test/CodeGen/AMDGPU/cndmask-no-def-vcc.ll
index bea565e9fb4..9b4b61cf728 100644
--- a/llvm/test/CodeGen/AMDGPU/cndmask-no-def-vcc.ll
+++ b/llvm/test/CodeGen/AMDGPU/cndmask-no-def-vcc.ll
@@ -7,7 +7,7 @@ declare i1 @llvm.amdgcn.class.f32(float, i32)
; GCN-LABEL: {{^}}vcc_shrink_vcc_def:
; GCN: v_cmp_eq_u32_e64 vcc, s{{[0-9]+}}, 0{{$}}
; GCN: v_cndmask_b32_e32 v{{[0-9]+}}, 1.0, v{{[0-9]+}}, vcc
-; GCN: v_cndmask_b32_e64 v1, 0, 1, s{{\[[0-9]+:[0-9]+\]}}
+; GCN: v_cndmask_b32_e64 v0, 0, 1, s{{\[[0-9]+:[0-9]+\]}}
define void @vcc_shrink_vcc_def(float %arg, i32 %arg1, float %arg2, i32 %arg3) {
bb0:
%tmp = icmp sgt i32 %arg1, 4
@@ -34,7 +34,7 @@ bb2:
; GCN-LABEL: {{^}}preserve_condition_undef_flag:
; GCN-NOT: vcc
; GCN: v_cndmask_b32_e32 v{{[0-9]+}}, 1.0, v{{[0-9]+}}, vcc
-; GCN: v_cndmask_b32_e64 v1, 0, 1, s{{\[[0-9]+:[0-9]+\]}}
+; GCN: v_cndmask_b32_e64 v0, 0, 1, s{{\[[0-9]+:[0-9]+\]}}
define void @preserve_condition_undef_flag(float %arg, i32 %arg1, float %arg2) {
bb0:
%tmp = icmp sgt i32 %arg1, 4
diff --git a/llvm/test/CodeGen/AMDGPU/indirect-addressing-si.ll b/llvm/test/CodeGen/AMDGPU/indirect-addressing-si.ll
index dd7c9d35820..3a933306c64 100644
--- a/llvm/test/CodeGen/AMDGPU/indirect-addressing-si.ll
+++ b/llvm/test/CodeGen/AMDGPU/indirect-addressing-si.ll
@@ -78,6 +78,8 @@ entry:
; MOVREL: v_movrels_b32_e32 v{{[0-9]}}, v0
; IDXMODE: s_addk_i32 [[ADD_IDX:s[0-9]+]], 0xfe00{{$}}
+; IDXMODE: v_mov_b32_e32 v2, 2
+; IDXMODE: v_mov_b32_e32 v3, 3
; IDXMODE-NEXT: s_set_gpr_idx_on [[ADD_IDX]], src0{{$}}
; IDXMODE-NEXT: v_mov_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}
; IDXMODE-NEXT: s_set_gpr_idx_off
@@ -95,6 +97,10 @@ entry:
; MOVREL: v_movrels_b32_e32 v{{[0-9]}}, v0
; IDXMODE: s_addk_i32 [[ADD_IDX:s[0-9]+]], 0xfe00{{$}}
+; IDXMODE: v_mov_b32_e32 v0,
+; IDXMODE: v_mov_b32_e32 v1,
+; IDXMODE: v_mov_b32_e32 v2,
+; IDXMODE: v_mov_b32_e32 v3,
; IDXMODE-NEXT: s_set_gpr_idx_on [[ADD_IDX]], src0{{$}}
; IDXMODE-NEXT: v_mov_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}
; IDXMODE-NEXT: s_set_gpr_idx_off
@@ -572,12 +578,12 @@ bb7: ; preds = %bb4, %bb1
; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x41a80000
; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x41b00000
; GCN-DAG: s_load_dword [[ARG:s[0-9]+]]
+; IDXMODE-DAG: s_add_i32 [[ARG_ADD:s[0-9]+]], [[ARG]], -16
; MOVREL-DAG: s_add_i32 m0, [[ARG]], -16
; MOVREL: v_movreld_b32_e32 v[[VEC0_ELT0]], 4.0
; GCN-NOT: m0
-; IDXMODE-DAG: s_add_i32 [[ARG_ADD:s[0-9]+]], [[ARG]], -16
; IDXMODE: s_set_gpr_idx_on [[ARG_ADD]], dst
; IDXMODE: v_mov_b32_e32 v[[VEC0_ELT0]], 4.0
; IDXMODE: s_set_gpr_idx_off
diff --git a/llvm/test/CodeGen/AMDGPU/skip-if-dead.ll b/llvm/test/CodeGen/AMDGPU/skip-if-dead.ll
index 3ab0ee15e3d..ee344b277c0 100644
--- a/llvm/test/CodeGen/AMDGPU/skip-if-dead.ll
+++ b/llvm/test/CodeGen/AMDGPU/skip-if-dead.ll
@@ -138,6 +138,7 @@ exit:
; CHECK-LABEL: {{^}}test_kill_control_flow_remainder:
; CHECK: s_cmp_lg_u32 s{{[0-9]+}}, 0
+; CHECK-NEXT: v_mov_b32_e32 v{{[0-9]+}}, 0
; CHECK-NEXT: s_cbranch_scc1 [[RETURN_BB:BB[0-9]+_[0-9]+]]
; CHECK-NEXT: ; BB#1: ; %bb
diff --git a/llvm/test/CodeGen/AMDGPU/wqm.ll b/llvm/test/CodeGen/AMDGPU/wqm.ll
index 4def7357efe..3f7b2b284c5 100644
--- a/llvm/test/CodeGen/AMDGPU/wqm.ll
+++ b/llvm/test/CodeGen/AMDGPU/wqm.ll
@@ -213,8 +213,8 @@ END:
;CHECK: image_sample
;CHECK: s_and_b64 exec, exec, [[ORIG]]
;CHECK: image_sample
-;CHECK: store
;CHECK: v_cmp
+;CHECK: store
define amdgpu_ps float @test_control_flow_3(<8 x i32> inreg %rsrc, <4 x i32> inreg %sampler, i32 %idx, i32 %coord) {
main_body:
%tex = call <4 x float> @llvm.SI.image.sample.i32(i32 %coord, <8 x i32> %rsrc, <4 x i32> %sampler, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
diff --git a/llvm/test/CodeGen/ARM/arm-shrink-wrapping.ll b/llvm/test/CodeGen/ARM/arm-shrink-wrapping.ll
index 4ab090f22b7..be6863d64a5 100644
--- a/llvm/test/CodeGen/ARM/arm-shrink-wrapping.ll
+++ b/llvm/test/CodeGen/ARM/arm-shrink-wrapping.ll
@@ -644,6 +644,7 @@ declare double @llvm.pow.f64(double, double)
; CHECK: push
;
; DISABLE: tst{{(\.w)?}} r2, #1
+; DISABLE-NEXT: vst1.64
; DISABLE-NEXT: beq [[BB13:LBB[0-9_]+]]
;
; CHECK: bl{{x?}} _pow
diff --git a/llvm/test/CodeGen/PowerPC/ppc-shrink-wrapping.ll b/llvm/test/CodeGen/PowerPC/ppc-shrink-wrapping.ll
index 387c5f692ec..08e39ed0511 100644
--- a/llvm/test/CodeGen/PowerPC/ppc-shrink-wrapping.ll
+++ b/llvm/test/CodeGen/PowerPC/ppc-shrink-wrapping.ll
@@ -210,6 +210,8 @@ for.end: ; preds = %for.body
; CHECK: mflr {{[0-9]+}}
;
; DISABLE: cmplwi 0, 3, 0
+; DISABLE-NEXT: std
+; DISABLE-NEXT: std
; DISABLE-NEXT: beq 0, .[[ELSE_LABEL:LBB[0-9_]+]]
;
; Loop preheader
@@ -290,6 +292,8 @@ declare void @somethingElse(...)
; CHECK: mflr {{[0-9]+}}
;
; DISABLE: cmplwi 0, 3, 0
+; DISABLE-NEXT: std
+; DISABLE-NEXT: std
; DISABLE-NEXT: beq 0, .[[ELSE_LABEL:LBB[0-9_]+]]
;
; CHECK: bl somethingElse
@@ -377,8 +381,8 @@ entry:
; ENABLE-DAG: li [[IV:[0-9]+]], 10
; ENABLE-DAG: std 14, -[[STACK_OFFSET:[0-9]+]](1) # 8-byte Folded Spill
;
-; DISABLE: std 14, -[[STACK_OFFSET:[0-9]+]](1) # 8-byte Folded Spill
; DISABLE: cmplwi 0, 3, 0
+; DISABLE-NEXT: std 14, -[[STACK_OFFSET:[0-9]+]](1) # 8-byte Folded Spill
; DISABLE-NEXT: beq 0, .[[ELSE_LABEL:LBB[0-9_]+]]
; DISABLE: li [[IV:[0-9]+]], 10
;
diff --git a/llvm/test/CodeGen/SPARC/2011-01-19-DelaySlot.ll b/llvm/test/CodeGen/SPARC/2011-01-19-DelaySlot.ll
index b38c955e1f6..ff6da8288fc 100644
--- a/llvm/test/CodeGen/SPARC/2011-01-19-DelaySlot.ll
+++ b/llvm/test/CodeGen/SPARC/2011-01-19-DelaySlot.ll
@@ -56,9 +56,9 @@ bb5: ; preds = %bb, %entry
define i32 @test_inlineasm(i32 %a) nounwind {
entry:
;CHECK-LABEL: test_inlineasm:
+;CHECK: cmp
;CHECK: sethi
;CHECK: !NO_APP
-;CHECK-NEXT: cmp
;CHECK-NEXT: ble
;CHECK-NEXT: mov
tail call void asm sideeffect "sethi 0, %g0", ""() nounwind
diff --git a/llvm/test/CodeGen/SystemZ/int-cmp-48.ll b/llvm/test/CodeGen/SystemZ/int-cmp-48.ll
index 277423b8cc0..2a6d9d5fcaf 100644
--- a/llvm/test/CodeGen/SystemZ/int-cmp-48.ll
+++ b/llvm/test/CodeGen/SystemZ/int-cmp-48.ll
@@ -29,8 +29,8 @@ exit:
define void @f2(i8 *%src) {
; CHECK-LABEL: f2:
; CHECK: llc [[REG:%r[0-5]]], 0(%r2)
-; CHECK: mvi 0(%r2), 0
; CHECK: tmll [[REG]], 1
+; CHECK: mvi 0(%r2), 0
; CHECK: ber %r14
; CHECK: br %r14
entry:
OpenPOWER on IntegriCloud