From 51f48295cbe8fa3a44db263b528dd9f7bae7bf9a Mon Sep 17 00:00:00 2001 From: Guozhi Wei Date: Thu, 22 Aug 2019 16:21:32 +0000 Subject: [MBP] Disable aggressive loop rotate in plain mode Patch https://reviews.llvm.org/D43256 introduced more aggressive loop layout optimization which depends on profile information. If profile information is not available, the statically estimated profile information(generated by BranchProbabilityInfo.cpp) is used. If user program doesn't behave as BranchProbabilityInfo.cpp expected, the layout may be worse. To be conservative this patch restores the original layout algorithm in plain mode. But user can still try the aggressive layout optimization with -force-precise-rotation-cost=true. Differential Revision: https://reviews.llvm.org/D65673 llvm-svn: 369664 --- llvm/test/CodeGen/AMDGPU/optimize-negated-cond.ll | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'llvm/test/CodeGen/AMDGPU/optimize-negated-cond.ll') diff --git a/llvm/test/CodeGen/AMDGPU/optimize-negated-cond.ll b/llvm/test/CodeGen/AMDGPU/optimize-negated-cond.ll index 2be99267c4e..be5d8d47205 100644 --- a/llvm/test/CodeGen/AMDGPU/optimize-negated-cond.ll +++ b/llvm/test/CodeGen/AMDGPU/optimize-negated-cond.ll @@ -3,11 +3,11 @@ ; GCN-LABEL: {{^}}negated_cond: ; GCN: BB0_1: ; GCN: v_cmp_eq_u32_e64 [[CC:[^,]+]], -; GCN: BB0_3: +; GCN: BB0_2: ; GCN-NOT: v_cndmask_b32 ; GCN-NOT: v_cmp ; GCN: s_andn2_b64 vcc, exec, [[CC]] -; GCN: s_cbranch_vccnz BB0_2 +; GCN: s_cbranch_vccnz BB0_4 define amdgpu_kernel void @negated_cond(i32 addrspace(1)* %arg1) { bb: br label %bb1 @@ -36,11 +36,11 @@ bb4: ; GCN-LABEL: {{^}}negated_cond_dominated_blocks: ; GCN: v_cmp_eq_u32_e64 [[CC:[^,]+]], -; GCN: %bb4 +; GCN: BB1_1: ; GCN-NOT: v_cndmask_b32 ; GCN-NOT: v_cmp ; GCN: s_andn2_b64 vcc, exec, [[CC]] -; GCN: s_cbranch_vccnz BB1_1 +; GCN: s_cbranch_vccz BB1_3 define amdgpu_kernel void @negated_cond_dominated_blocks(i32 addrspace(1)* %arg1) { bb: br label %bb2 -- cgit v1.2.3