summaryrefslogtreecommitdiffstats
path: root/llvm/test
diff options
context:
space:
mode:
authorJatin Bhateja <jatin.bhateja@gmail.com>2017-12-01 14:07:38 +0000
committerJatin Bhateja <jatin.bhateja@gmail.com>2017-12-01 14:07:38 +0000
commit328199ec26435e364fde303ff384a64f8fb0b63c (patch)
tree7500ceb65f55af6ba3bf967b0c70eef20a216d79 /llvm/test
parent412a991b102fac799e743b52371f575ebf6901ee (diff)
downloadbcm5719-llvm-328199ec26435e364fde303ff384a64f8fb0b63c.tar.gz
bcm5719-llvm-328199ec26435e364fde303ff384a64f8fb0b63c.zip
[X86] Improvement in CodeGen instruction selection for LEAs.
Summary: 1/ Operand folding during complex pattern matching for LEAs has been extended, such that it promotes Scale to accommodate similar operand appearing in the DAG e.g. T1 = A + B T2 = T1 + 10 T3 = T2 + A For above DAG rooted at T3, X86AddressMode will now look like Base = B , Index = A , Scale = 2 , Disp = 10 2/ During OptimizeLEAPass down the pipeline factorization is now performed over LEAs so that if there is an opportunity then complex LEAs (having 3 operands) could be factored out e.g. leal 1(%rax,%rcx,1), %rdx leal 1(%rax,%rcx,2), %rcx will be factored as following leal 1(%rax,%rcx,1), %rdx leal (%rdx,%rcx) , %edx 3/ Aggressive operand folding for AM based selection for LEAs is sensitive to loops, thus avoiding creation of any complex LEAs within a loop. 4/ Simplify LEA converts (lea (BASE,1,INDEX,0) --> add (BASE, INDEX) which offers better through put. PR32755 will be taken care of by this pathc. Previous patch revisions : r313343 , r314886 Reviewers: lsaba, RKSimon, craig.topper, qcolombet, jmolloy, jbhateja Reviewed By: lsaba, RKSimon, jbhateja Subscribers: jmolloy, spatel, igorb, llvm-commits Differential Revision: https://reviews.llvm.org/D35014 llvm-svn: 319543
Diffstat (limited to 'llvm/test')
-rw-r--r--llvm/test/CodeGen/X86/GlobalISel/callingconv.ll2
-rw-r--r--llvm/test/CodeGen/X86/GlobalISel/gep.ll34
-rw-r--r--llvm/test/CodeGen/X86/GlobalISel/memop-scalar.ll2
-rw-r--r--llvm/test/CodeGen/X86/lea-opt-cse1.ll12
-rw-r--r--llvm/test/CodeGen/X86/lea-opt-cse2.ll40
-rw-r--r--llvm/test/CodeGen/X86/lea-opt-cse3.ll34
-rw-r--r--llvm/test/CodeGen/X86/lea-opt-cse4.ll68
-rw-r--r--llvm/test/CodeGen/X86/mul-constant-i16.ll12
-rw-r--r--llvm/test/CodeGen/X86/mul-constant-i32.ll15
-rw-r--r--llvm/test/CodeGen/X86/mul-constant-i64.ll9
-rw-r--r--llvm/test/CodeGen/X86/mul-constant-result.ll14
-rw-r--r--llvm/test/CodeGen/X86/umul-with-overflow.ll16
-rw-r--r--llvm/test/Transforms/LoopStrengthReduce/X86/ivchain-X86.ll6
13 files changed, 113 insertions, 151 deletions
diff --git a/llvm/test/CodeGen/X86/GlobalISel/callingconv.ll b/llvm/test/CodeGen/X86/GlobalISel/callingconv.ll
index 4100a7217ac..a55ff862e2b 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/callingconv.ll
+++ b/llvm/test/CodeGen/X86/GlobalISel/callingconv.ll
@@ -388,7 +388,7 @@ define void @test_variadic_call_2(i8** %addr_ptr, double* %val_ptr) {
; X32-NEXT: movl 4(%ecx), %ecx
; X32-NEXT: movl %eax, (%esp)
; X32-NEXT: movl $4, %eax
-; X32-NEXT: leal (%esp,%eax), %eax
+; X32-NEXT: addl %esp, %eax
; X32-NEXT: movl %edx, 4(%esp)
; X32-NEXT: movl %ecx, 4(%eax)
; X32-NEXT: calll variadic_callee
diff --git a/llvm/test/CodeGen/X86/GlobalISel/gep.ll b/llvm/test/CodeGen/X86/GlobalISel/gep.ll
index ee66accc77d..95ad8d4eb30 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/gep.ll
+++ b/llvm/test/CodeGen/X86/GlobalISel/gep.ll
@@ -5,10 +5,10 @@
define i32* @test_gep_i8(i32 *%arr, i8 %ind) {
; X64_GISEL-LABEL: test_gep_i8:
; X64_GISEL: # BB#0:
-; X64_GISEL-NEXT: movq $4, %rax
-; X64_GISEL-NEXT: movsbq %sil, %rcx
-; X64_GISEL-NEXT: imulq %rax, %rcx
-; X64_GISEL-NEXT: leaq (%rdi,%rcx), %rax
+; X64_GISEL-NEXT: movq $4, %rcx
+; X64_GISEL-NEXT: movsbq %sil, %rax
+; X64_GISEL-NEXT: imulq %rcx, %rax
+; X64_GISEL-NEXT: addq %rdi, %rax
; X64_GISEL-NEXT: retq
;
; X64-LABEL: test_gep_i8:
@@ -25,7 +25,7 @@ define i32* @test_gep_i8_const(i32 *%arr) {
; X64_GISEL-LABEL: test_gep_i8_const:
; X64_GISEL: # BB#0:
; X64_GISEL-NEXT: movq $80, %rax
-; X64_GISEL-NEXT: leaq (%rdi,%rax), %rax
+; X64_GISEL-NEXT: addq %rdi, %rax
; X64_GISEL-NEXT: retq
;
; X64-LABEL: test_gep_i8_const:
@@ -39,10 +39,10 @@ define i32* @test_gep_i8_const(i32 *%arr) {
define i32* @test_gep_i16(i32 *%arr, i16 %ind) {
; X64_GISEL-LABEL: test_gep_i16:
; X64_GISEL: # BB#0:
-; X64_GISEL-NEXT: movq $4, %rax
-; X64_GISEL-NEXT: movswq %si, %rcx
-; X64_GISEL-NEXT: imulq %rax, %rcx
-; X64_GISEL-NEXT: leaq (%rdi,%rcx), %rax
+; X64_GISEL-NEXT: movq $4, %rcx
+; X64_GISEL-NEXT: movswq %si, %rax
+; X64_GISEL-NEXT: imulq %rcx, %rax
+; X64_GISEL-NEXT: addq %rdi, %rax
; X64_GISEL-NEXT: retq
;
; X64-LABEL: test_gep_i16:
@@ -59,7 +59,7 @@ define i32* @test_gep_i16_const(i32 *%arr) {
; X64_GISEL-LABEL: test_gep_i16_const:
; X64_GISEL: # BB#0:
; X64_GISEL-NEXT: movq $80, %rax
-; X64_GISEL-NEXT: leaq (%rdi,%rax), %rax
+; X64_GISEL-NEXT: addq %rdi, %rax
; X64_GISEL-NEXT: retq
;
; X64-LABEL: test_gep_i16_const:
@@ -73,10 +73,10 @@ define i32* @test_gep_i16_const(i32 *%arr) {
define i32* @test_gep_i32(i32 *%arr, i32 %ind) {
; X64_GISEL-LABEL: test_gep_i32:
; X64_GISEL: # BB#0:
-; X64_GISEL-NEXT: movq $4, %rax
-; X64_GISEL-NEXT: movslq %esi, %rcx
-; X64_GISEL-NEXT: imulq %rax, %rcx
-; X64_GISEL-NEXT: leaq (%rdi,%rcx), %rax
+; X64_GISEL-NEXT: movq $4, %rcx
+; X64_GISEL-NEXT: movslq %esi, %rax
+; X64_GISEL-NEXT: imulq %rcx, %rax
+; X64_GISEL-NEXT: addq %rdi, %rax
; X64_GISEL-NEXT: retq
;
; X64-LABEL: test_gep_i32:
@@ -92,7 +92,7 @@ define i32* @test_gep_i32_const(i32 *%arr) {
; X64_GISEL-LABEL: test_gep_i32_const:
; X64_GISEL: # BB#0:
; X64_GISEL-NEXT: movq $20, %rax
-; X64_GISEL-NEXT: leaq (%rdi,%rax), %rax
+; X64_GISEL-NEXT: addq %rdi, %rax
; X64_GISEL-NEXT: retq
;
; X64-LABEL: test_gep_i32_const:
@@ -108,7 +108,7 @@ define i32* @test_gep_i64(i32 *%arr, i64 %ind) {
; X64_GISEL: # BB#0:
; X64_GISEL-NEXT: movq $4, %rax
; X64_GISEL-NEXT: imulq %rsi, %rax
-; X64_GISEL-NEXT: leaq (%rdi,%rax), %rax
+; X64_GISEL-NEXT: addq %rdi, %rax
; X64_GISEL-NEXT: retq
;
; X64-LABEL: test_gep_i64:
@@ -123,7 +123,7 @@ define i32* @test_gep_i64_const(i32 *%arr) {
; X64_GISEL-LABEL: test_gep_i64_const:
; X64_GISEL: # BB#0:
; X64_GISEL-NEXT: movq $20, %rax
-; X64_GISEL-NEXT: leaq (%rdi,%rax), %rax
+; X64_GISEL-NEXT: addq %rdi, %rax
; X64_GISEL-NEXT: retq
;
; X64-LABEL: test_gep_i64_const:
diff --git a/llvm/test/CodeGen/X86/GlobalISel/memop-scalar.ll b/llvm/test/CodeGen/X86/GlobalISel/memop-scalar.ll
index 2097a3b0bfc..c73a43177e3 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/memop-scalar.ll
+++ b/llvm/test/CodeGen/X86/GlobalISel/memop-scalar.ll
@@ -181,7 +181,7 @@ define i32 @test_gep_folding_largeGepIndex(i32* %arr, i32 %val) {
; ALL-LABEL: test_gep_folding_largeGepIndex:
; ALL: # BB#0:
; ALL-NEXT: movabsq $228719476720, %rax # imm = 0x3540BE3FF0
-; ALL-NEXT: leaq (%rdi,%rax), %rax
+; ALL-NEXT: addq %rdi, %rax
; ALL-NEXT: movl %esi, (%rax)
; ALL-NEXT: movl (%rax), %eax
; ALL-NEXT: retq
diff --git a/llvm/test/CodeGen/X86/lea-opt-cse1.ll b/llvm/test/CodeGen/X86/lea-opt-cse1.ll
index 05b47690e81..512740ce075 100644
--- a/llvm/test/CodeGen/X86/lea-opt-cse1.ll
+++ b/llvm/test/CodeGen/X86/lea-opt-cse1.ll
@@ -9,27 +9,21 @@ define void @test_func(%struct.SA* nocapture %ctx, i32 %n) local_unnamed_addr {
; X64: # BB#0: # %entry
; X64-NEXT: movl (%rdi), %eax
; X64-NEXT: movl 16(%rdi), %ecx
-; X64-NEXT: leal (%rax,%rcx), %edx
; X64-NEXT: leal 1(%rax,%rcx), %eax
; X64-NEXT: movl %eax, 12(%rdi)
-; X64-NEXT: leal 1(%rcx,%rdx), %eax
+; X64-NEXT: addq %ecx, %eax
; X64-NEXT: movl %eax, 16(%rdi)
; X64-NEXT: retq
;
; X86-LABEL: test_func:
; X86: # BB#0: # %entry
-; X86-NEXT: pushl %esi
-; X86-NEXT: .cfi_def_cfa_offset 8
-; X86-NEXT: .cfi_offset %esi, -8
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl (%eax), %ecx
; X86-NEXT: movl 16(%eax), %edx
-; X86-NEXT: leal 1(%ecx,%edx), %esi
+; X86-NEXT: leal 1(%ecx,%edx), %ecx
+; X86-NEXT: movl %ecx, 12(%eax)
; X86-NEXT: addl %edx, %ecx
-; X86-NEXT: movl %esi, 12(%eax)
-; X86-NEXT: leal 1(%edx,%ecx), %ecx
; X86-NEXT: movl %ecx, 16(%eax)
-; X86-NEXT: popl %esi
; X86-NEXT: retl
entry:
%h0 = getelementptr inbounds %struct.SA, %struct.SA* %ctx, i64 0, i32 0
diff --git a/llvm/test/CodeGen/X86/lea-opt-cse2.ll b/llvm/test/CodeGen/X86/lea-opt-cse2.ll
index 865dd49a6e1..4226e3d25c8 100644
--- a/llvm/test/CodeGen/X86/lea-opt-cse2.ll
+++ b/llvm/test/CodeGen/X86/lea-opt-cse2.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown | FileCheck %s -check-prefix=X64
-; RUN: llc < %s -mtriple=i686-unknown | FileCheck %s -check-prefix=X86
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+slow-3ops-lea | FileCheck %s -check-prefix=X64
+; RUN: llc < %s -mtriple=i686-unknown -mattr=+slow-3ops-lea | FileCheck %s -check-prefix=X86
%struct.SA = type { i32 , i32 , i32 , i32 , i32};
@@ -10,43 +10,39 @@ define void @foo(%struct.SA* nocapture %ctx, i32 %n) local_unnamed_addr #0 {
; X64-NEXT: .p2align 4, 0x90
; X64-NEXT: .LBB0_1: # %loop
; X64-NEXT: # =>This Inner Loop Header: Depth=1
-; X64-NEXT: movl (%rdi), %eax
-; X64-NEXT: movl 16(%rdi), %ecx
-; X64-NEXT: leal 1(%rax,%rcx), %edx
-; X64-NEXT: movl %edx, 12(%rdi)
+; X64-NEXT: movl 16(%rdi), %eax
+; X64-NEXT: movl (%rdi), %ecx
+; X64-NEXT: addl %eax, %ecx
+; X64-NEXT: incl %ecx
+; X64-NEXT: movl %ecx, 12(%rdi)
; X64-NEXT: decl %esi
; X64-NEXT: jne .LBB0_1
; X64-NEXT: # BB#2: # %exit
-; X64-NEXT: addl %ecx, %eax
-; X64-NEXT: leal 1(%rcx,%rax), %eax
-; X64-NEXT: movl %eax, 16(%rdi)
+; X64-NEXT: addl %eax, %ecx
+; X64-NEXT: movl %ecx, 16(%rdi)
; X64-NEXT: retq
;
; X86-LABEL: foo:
; X86: # BB#0: # %entry
-; X86-NEXT: pushl %edi
-; X86-NEXT: .cfi_def_cfa_offset 8
; X86-NEXT: pushl %esi
-; X86-NEXT: .cfi_def_cfa_offset 12
-; X86-NEXT: .cfi_offset %esi, -12
-; X86-NEXT: .cfi_offset %edi, -8
+; X86-NEXT: .cfi_def_cfa_offset 8
+; X86-NEXT: .cfi_offset %esi, -8
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: .p2align 4, 0x90
; X86-NEXT: .LBB0_1: # %loop
; X86-NEXT: # =>This Inner Loop Header: Depth=1
-; X86-NEXT: movl (%eax), %edx
-; X86-NEXT: movl 16(%eax), %esi
-; X86-NEXT: leal 1(%edx,%esi), %edi
-; X86-NEXT: movl %edi, 12(%eax)
+; X86-NEXT: movl 16(%eax), %edx
+; X86-NEXT: movl (%eax), %esi
+; X86-NEXT: addl %edx, %esi
+; X86-NEXT: incl %esi
+; X86-NEXT: movl %esi, 12(%eax)
; X86-NEXT: decl %ecx
; X86-NEXT: jne .LBB0_1
; X86-NEXT: # BB#2: # %exit
-; X86-NEXT: addl %esi, %edx
-; X86-NEXT: leal 1(%esi,%edx), %ecx
-; X86-NEXT: movl %ecx, 16(%eax)
+; X86-NEXT: addl %edx, %esi
+; X86-NEXT: movl %esi, 16(%eax)
; X86-NEXT: popl %esi
-; X86-NEXT: popl %edi
; X86-NEXT: retl
entry:
br label %loop
diff --git a/llvm/test/CodeGen/X86/lea-opt-cse3.ll b/llvm/test/CodeGen/X86/lea-opt-cse3.ll
index 48ab3130bf0..57c20cceed1 100644
--- a/llvm/test/CodeGen/X86/lea-opt-cse3.ll
+++ b/llvm/test/CodeGen/X86/lea-opt-cse3.ll
@@ -8,7 +8,7 @@ define i32 @foo(i32 %a, i32 %b) local_unnamed_addr #0 {
; X64-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: leal 4(%rdi,%rsi,2), %ecx
-; X64-NEXT: leal 4(%rdi,%rsi,4), %eax
+; X64-NEXT: leal (%ecx,%esi,2), %eax
; X64-NEXT: imull %ecx, %eax
; X64-NEXT: retq
;
@@ -16,9 +16,9 @@ define i32 @foo(i32 %a, i32 %b) local_unnamed_addr #0 {
; X86: # BB#0: # %entry
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: leal 4(%ecx,%eax,2), %edx
-; X86-NEXT: leal 4(%ecx,%eax,4), %eax
-; X86-NEXT: imull %edx, %eax
+; X86-NEXT: leal 4(%ecx,%eax,2), %ecx
+; X86-NEXT: leal (%ecx,%eax,2), %eax
+; X86-NEXT: imull %ecx, %eax
; X86-NEXT: retl
entry:
%mul = shl i32 %b, 1
@@ -36,7 +36,7 @@ define i32 @foo1(i32 %a, i32 %b) local_unnamed_addr #0 {
; X64-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: leal 4(%rdi,%rsi,4), %ecx
-; X64-NEXT: leal 4(%rdi,%rsi,8), %eax
+; X64-NEXT: leal (%ecx,%esi,4), %eax
; X64-NEXT: imull %ecx, %eax
; X64-NEXT: retq
;
@@ -44,9 +44,9 @@ define i32 @foo1(i32 %a, i32 %b) local_unnamed_addr #0 {
; X86: # BB#0: # %entry
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: leal 4(%ecx,%eax,4), %edx
-; X86-NEXT: leal 4(%ecx,%eax,8), %eax
-; X86-NEXT: imull %edx, %eax
+; X86-NEXT: leal 4(%ecx,%eax,4), %ecx
+; X86-NEXT: leal (%ecx,%eax,4), %eax
+; X86-NEXT: imull %ecx, %eax
; X86-NEXT: retl
entry:
%mul = shl i32 %b, 2
@@ -68,29 +68,23 @@ define i32 @foo1_mult_basic_blocks(i32 %a, i32 %b) local_unnamed_addr #0 {
; X64-NEXT: cmpl $10, %ecx
; X64-NEXT: je .LBB2_2
; X64-NEXT: # BB#1: # %mid
-; X64-NEXT: leal 4(%rdi,%rsi,8), %eax
-; X64-NEXT: imull %eax, %ecx
-; X64-NEXT: movl %ecx, %eax
+; X64-NEXT: leal (%ecx,%esi,4), %eax
+; X64-NEXT: imull %ecx, %eax
; X64-NEXT: .LBB2_2: # %exit
; X64-NEXT: retq
;
; X86-LABEL: foo1_mult_basic_blocks:
; X86: # BB#0: # %entry
-; X86-NEXT: pushl %esi
-; X86-NEXT: .cfi_def_cfa_offset 8
-; X86-NEXT: .cfi_offset %esi, -8
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X86-NEXT: leal 4(%esi,%edx,4), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: leal 4(%eax,%edx,4), %ecx
; X86-NEXT: xorl %eax, %eax
; X86-NEXT: cmpl $10, %ecx
; X86-NEXT: je .LBB2_2
; X86-NEXT: # BB#1: # %mid
-; X86-NEXT: leal 4(%esi,%edx,8), %eax
-; X86-NEXT: imull %eax, %ecx
-; X86-NEXT: movl %ecx, %eax
+; X86-NEXT: leal (%ecx,%edx,4), %eax
+; X86-NEXT: imull %ecx, %eax
; X86-NEXT: .LBB2_2: # %exit
-; X86-NEXT: popl %esi
; X86-NEXT: retl
entry:
%mul = shl i32 %b, 2
diff --git a/llvm/test/CodeGen/X86/lea-opt-cse4.ll b/llvm/test/CodeGen/X86/lea-opt-cse4.ll
index 31f31a73d44..6f1fe282f92 100644
--- a/llvm/test/CodeGen/X86/lea-opt-cse4.ll
+++ b/llvm/test/CodeGen/X86/lea-opt-cse4.ll
@@ -1,41 +1,31 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown | FileCheck %s -check-prefix=X64
-; RUN: llc < %s -mtriple=i686-unknown | FileCheck %s -check-prefix=X86
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+slow-3ops-lea | FileCheck %s -check-prefix=X64
+; RUN: llc < %s -mtriple=i686-unknown -mattr=+slow-3ops-lea | FileCheck %s -check-prefix=X86
%struct.SA = type { i32 , i32 , i32 , i32 , i32};
define void @foo(%struct.SA* nocapture %ctx, i32 %n) local_unnamed_addr #0 {
; X64-LABEL: foo:
; X64: # BB#0: # %entry
-; X64-NEXT: movl 16(%rdi), %eax
-; X64-NEXT: movl (%rdi), %ecx
-; X64-NEXT: addl %eax, %ecx
-; X64-NEXT: addl %eax, %ecx
-; X64-NEXT: addl %eax, %ecx
-; X64-NEXT: leal (%rcx,%rax), %edx
-; X64-NEXT: leal 1(%rax,%rcx), %ecx
-; X64-NEXT: movl %ecx, 12(%rdi)
-; X64-NEXT: leal 1(%rax,%rdx), %eax
+; X64-NEXT: movl (%rdi), %eax
+; X64-NEXT: movl 16(%rdi), %ecx
+; X64-NEXT: leal (%rax,%rcx,4), %eax
+; X64-NEXT: addl $1, %eax
+; X64-NEXT: movl %eax, 12(%rdi)
+; X64-NEXT: addl %ecx, %eax
; X64-NEXT: movl %eax, 16(%rdi)
; X64-NEXT: retq
;
; X86-LABEL: foo:
; X86: # BB#0: # %entry
-; X86-NEXT: pushl %esi
-; X86-NEXT: .cfi_def_cfa_offset 8
-; X86-NEXT: .cfi_offset %esi, -8
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: movl 16(%eax), %ecx
-; X86-NEXT: movl (%eax), %edx
-; X86-NEXT: addl %ecx, %edx
-; X86-NEXT: addl %ecx, %edx
-; X86-NEXT: addl %ecx, %edx
-; X86-NEXT: leal 1(%ecx,%edx), %esi
-; X86-NEXT: addl %ecx, %edx
-; X86-NEXT: movl %esi, 12(%eax)
-; X86-NEXT: leal 1(%ecx,%edx), %ecx
+; X86-NEXT: movl (%eax), %ecx
+; X86-NEXT: movl 16(%eax), %edx
+; X86-NEXT: leal (%ecx,%edx,4), %ecx
+; X86-NEXT: addl $1, %ecx
+; X86-NEXT: movl %ecx, 12(%eax)
+; X86-NEXT: addl %edx, %ecx
; X86-NEXT: movl %ecx, 16(%eax)
-; X86-NEXT: popl %esi
; X86-NEXT: retl
entry:
%h0 = getelementptr inbounds %struct.SA, %struct.SA* %ctx, i64 0, i32 0
@@ -62,15 +52,15 @@ define void @foo_loop(%struct.SA* nocapture %ctx, i32 %n) local_unnamed_addr #0
; X64-NEXT: .p2align 4, 0x90
; X64-NEXT: .LBB1_1: # %loop
; X64-NEXT: # =>This Inner Loop Header: Depth=1
-; X64-NEXT: movl (%rdi), %ecx
; X64-NEXT: movl 16(%rdi), %eax
-; X64-NEXT: leal 1(%rcx,%rax), %edx
-; X64-NEXT: movl %edx, 12(%rdi)
+; X64-NEXT: movl (%rdi), %ecx
+; X64-NEXT: addl %eax, %ecx
+; X64-NEXT: incl %ecx
+; X64-NEXT: movl %ecx, 12(%rdi)
; X64-NEXT: decl %esi
; X64-NEXT: jne .LBB1_1
; X64-NEXT: # BB#2: # %exit
; X64-NEXT: addl %eax, %ecx
-; X64-NEXT: leal 1(%rax,%rcx), %ecx
; X64-NEXT: addl %eax, %ecx
; X64-NEXT: addl %eax, %ecx
; X64-NEXT: addl %eax, %ecx
@@ -82,26 +72,23 @@ define void @foo_loop(%struct.SA* nocapture %ctx, i32 %n) local_unnamed_addr #0
;
; X86-LABEL: foo_loop:
; X86: # BB#0: # %entry
-; X86-NEXT: pushl %edi
-; X86-NEXT: .cfi_def_cfa_offset 8
; X86-NEXT: pushl %esi
-; X86-NEXT: .cfi_def_cfa_offset 12
-; X86-NEXT: .cfi_offset %esi, -12
-; X86-NEXT: .cfi_offset %edi, -8
-; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: .cfi_def_cfa_offset 8
+; X86-NEXT: .cfi_offset %esi, -8
+; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: .p2align 4, 0x90
; X86-NEXT: .LBB1_1: # %loop
; X86-NEXT: # =>This Inner Loop Header: Depth=1
-; X86-NEXT: movl (%eax), %esi
; X86-NEXT: movl 16(%eax), %ecx
-; X86-NEXT: leal 1(%esi,%ecx), %edi
-; X86-NEXT: movl %edi, 12(%eax)
-; X86-NEXT: decl %edx
+; X86-NEXT: movl (%eax), %edx
+; X86-NEXT: addl %ecx, %edx
+; X86-NEXT: incl %edx
+; X86-NEXT: movl %edx, 12(%eax)
+; X86-NEXT: decl %esi
; X86-NEXT: jne .LBB1_1
; X86-NEXT: # BB#2: # %exit
-; X86-NEXT: addl %ecx, %esi
-; X86-NEXT: leal 1(%ecx,%esi), %edx
+; X86-NEXT: addl %ecx, %edx
; X86-NEXT: addl %ecx, %edx
; X86-NEXT: addl %ecx, %edx
; X86-NEXT: addl %ecx, %edx
@@ -110,7 +97,6 @@ define void @foo_loop(%struct.SA* nocapture %ctx, i32 %n) local_unnamed_addr #0
; X86-NEXT: addl %ecx, %edx
; X86-NEXT: movl %edx, 16(%eax)
; X86-NEXT: popl %esi
-; X86-NEXT: popl %edi
; X86-NEXT: retl
entry:
br label %loop
diff --git a/llvm/test/CodeGen/X86/mul-constant-i16.ll b/llvm/test/CodeGen/X86/mul-constant-i16.ll
index c3b822ac214..72731b3ba67 100644
--- a/llvm/test/CodeGen/X86/mul-constant-i16.ll
+++ b/llvm/test/CodeGen/X86/mul-constant-i16.ll
@@ -558,11 +558,10 @@ define i16 @test_mul_by_28(i16 %x) {
define i16 @test_mul_by_29(i16 %x) {
; X86-LABEL: test_mul_by_29:
; X86: # BB#0:
-; X86-NEXT: movzwl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: leal (%ecx,%ecx,8), %eax
-; X86-NEXT: leal (%eax,%eax,2), %eax
-; X86-NEXT: addl %ecx, %eax
-; X86-NEXT: addl %ecx, %eax
+; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: leal (%eax,%eax,8), %ecx
+; X86-NEXT: leal (%ecx,%ecx,2), %ecx
+; X86-NEXT: leal (%ecx,%eax,2), %eax
; X86-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X86-NEXT: retl
;
@@ -571,8 +570,7 @@ define i16 @test_mul_by_29(i16 %x) {
; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: leal (%rdi,%rdi,8), %eax
; X64-NEXT: leal (%rax,%rax,2), %eax
-; X64-NEXT: addl %edi, %eax
-; X64-NEXT: addl %edi, %eax
+; X64-NEXT: leal (%rax,%rdi,2), %eax
; X64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X64-NEXT: retq
%mul = mul nsw i16 %x, 29
diff --git a/llvm/test/CodeGen/X86/mul-constant-i32.ll b/llvm/test/CodeGen/X86/mul-constant-i32.ll
index 228dd5e5f37..c8ac6a70927 100644
--- a/llvm/test/CodeGen/X86/mul-constant-i32.ll
+++ b/llvm/test/CodeGen/X86/mul-constant-i32.ll
@@ -1457,11 +1457,10 @@ define i32 @test_mul_by_28(i32 %x) {
define i32 @test_mul_by_29(i32 %x) {
; X86-LABEL: test_mul_by_29:
; X86: # BB#0:
-; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: leal (%ecx,%ecx,8), %eax
-; X86-NEXT: leal (%eax,%eax,2), %eax
-; X86-NEXT: addl %ecx, %eax
-; X86-NEXT: addl %ecx, %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: leal (%eax,%eax,8), %ecx
+; X86-NEXT: leal (%ecx,%ecx,2), %ecx
+; X86-NEXT: leal (%ecx,%eax,2), %eax
; X86-NEXT: retl
;
; X64-HSW-LABEL: test_mul_by_29:
@@ -1469,8 +1468,7 @@ define i32 @test_mul_by_29(i32 %x) {
; X64-HSW-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-HSW-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:0.50]
; X64-HSW-NEXT: leal (%rax,%rax,2), %eax # sched: [1:0.50]
-; X64-HSW-NEXT: addl %edi, %eax # sched: [1:0.25]
-; X64-HSW-NEXT: addl %edi, %eax # sched: [1:0.25]
+; X64-HSW-NEXT: leal (%rax,%rdi,2), %eax # sched: [1:0.50]
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_by_29:
@@ -1478,8 +1476,7 @@ define i32 @test_mul_by_29(i32 %x) {
; X64-JAG-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-JAG-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:0.50]
; X64-JAG-NEXT: leal (%rax,%rax,2), %eax # sched: [1:0.50]
-; X64-JAG-NEXT: addl %edi, %eax # sched: [1:0.50]
-; X64-JAG-NEXT: addl %edi, %eax # sched: [1:0.50]
+; X64-JAG-NEXT: leal (%rax,%rdi,2), %eax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
;
; X86-NOOPT-LABEL: test_mul_by_29:
diff --git a/llvm/test/CodeGen/X86/mul-constant-i64.ll b/llvm/test/CodeGen/X86/mul-constant-i64.ll
index 98568a6fc8e..8e171f973e0 100644
--- a/llvm/test/CodeGen/X86/mul-constant-i64.ll
+++ b/llvm/test/CodeGen/X86/mul-constant-i64.ll
@@ -1523,8 +1523,7 @@ define i64 @test_mul_by_29(i64 %x) {
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: leal (%eax,%eax,8), %ecx
; X86-NEXT: leal (%ecx,%ecx,2), %ecx
-; X86-NEXT: addl %eax, %ecx
-; X86-NEXT: addl %eax, %ecx
+; X86-NEXT: leal (%ecx,%eax,2), %ecx
; X86-NEXT: movl $29, %eax
; X86-NEXT: mull {{[0-9]+}}(%esp)
; X86-NEXT: addl %ecx, %edx
@@ -1534,16 +1533,14 @@ define i64 @test_mul_by_29(i64 %x) {
; X64-HSW: # BB#0:
; X64-HSW-NEXT: leaq (%rdi,%rdi,8), %rax # sched: [1:0.50]
; X64-HSW-NEXT: leaq (%rax,%rax,2), %rax # sched: [1:0.50]
-; X64-HSW-NEXT: addq %rdi, %rax # sched: [1:0.25]
-; X64-HSW-NEXT: addq %rdi, %rax # sched: [1:0.25]
+; X64-HSW-NEXT: leaq (%rax,%rdi,2), %rax # sched: [1:0.50]
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_by_29:
; X64-JAG: # BB#0:
; X64-JAG-NEXT: leaq (%rdi,%rdi,8), %rax # sched: [1:0.50]
; X64-JAG-NEXT: leaq (%rax,%rax,2), %rax # sched: [1:0.50]
-; X64-JAG-NEXT: addq %rdi, %rax # sched: [1:0.50]
-; X64-JAG-NEXT: addq %rdi, %rax # sched: [1:0.50]
+; X64-JAG-NEXT: leaq (%rax,%rdi,2), %rax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
;
; X86-NOOPT-LABEL: test_mul_by_29:
diff --git a/llvm/test/CodeGen/X86/mul-constant-result.ll b/llvm/test/CodeGen/X86/mul-constant-result.ll
index 6e74c1d4e9e..f5846ab37ba 100644
--- a/llvm/test/CodeGen/X86/mul-constant-result.ll
+++ b/llvm/test/CodeGen/X86/mul-constant-result.ll
@@ -164,8 +164,7 @@ define i32 @mult(i32, i32) local_unnamed_addr #0 {
; X86-NEXT: .LBB0_35:
; X86-NEXT: leal (%eax,%eax,8), %ecx
; X86-NEXT: leal (%ecx,%ecx,2), %ecx
-; X86-NEXT: addl %eax, %ecx
-; X86-NEXT: addl %ecx, %eax
+; X86-NEXT: leal (%ecx,%eax,2), %eax
; X86-NEXT: popl %esi
; X86-NEXT: retl
; X86-NEXT: .LBB0_36:
@@ -323,16 +322,17 @@ define i32 @mult(i32, i32) local_unnamed_addr #0 {
; X64-HSW-NEXT: .LBB0_31:
; X64-HSW-NEXT: leal (%rax,%rax,8), %ecx
; X64-HSW-NEXT: leal (%rcx,%rcx,2), %ecx
-; X64-HSW-NEXT: jmp .LBB0_17
-; X64-HSW-NEXT: .LBB0_32:
-; X64-HSW-NEXT: leal (%rax,%rax,8), %ecx
-; X64-HSW-NEXT: leal (%rcx,%rcx,2), %ecx
-; X64-HSW-NEXT: addl %eax, %ecx
; X64-HSW-NEXT: .LBB0_17:
; X64-HSW-NEXT: addl %eax, %ecx
; X64-HSW-NEXT: movl %ecx, %eax
; X64-HSW-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
; X64-HSW-NEXT: retq
+; X64-HSW-NEXT: .LBB0_32:
+; X64-HSW-NEXT: leal (%rax,%rax,8), %ecx
+; X64-HSW-NEXT: leal (%rcx,%rcx,2), %ecx
+; X64-HSW-NEXT: leal (%rcx,%rax,2), %eax
+; X64-HSW-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
+; X64-HSW-NEXT: retq
; X64-HSW-NEXT: .LBB0_33:
; X64-HSW-NEXT: movl %eax, %ecx
; X64-HSW-NEXT: shll $5, %ecx
diff --git a/llvm/test/CodeGen/X86/umul-with-overflow.ll b/llvm/test/CodeGen/X86/umul-with-overflow.ll
index 2e877a0b6e0..70b7daa6c16 100644
--- a/llvm/test/CodeGen/X86/umul-with-overflow.ll
+++ b/llvm/test/CodeGen/X86/umul-with-overflow.ll
@@ -40,10 +40,10 @@ define i32 @test2(i32 %a, i32 %b) nounwind readnone {
; X64-NEXT: leal (%rdi,%rdi), %eax
; X64-NEXT: retq
entry:
- %tmp0 = add i32 %b, %a
- %tmp1 = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %tmp0, i32 2)
- %tmp2 = extractvalue { i32, i1 } %tmp1, 0
- ret i32 %tmp2
+ %tmp0 = add i32 %b, %a
+ %tmp1 = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %tmp0, i32 2)
+ %tmp2 = extractvalue { i32, i1 } %tmp1, 0
+ ret i32 %tmp2
}
define i32 @test3(i32 %a, i32 %b) nounwind readnone {
@@ -64,8 +64,8 @@ define i32 @test3(i32 %a, i32 %b) nounwind readnone {
; X64-NEXT: mull %ecx
; X64-NEXT: retq
entry:
- %tmp0 = add i32 %b, %a
- %tmp1 = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %tmp0, i32 4)
- %tmp2 = extractvalue { i32, i1 } %tmp1, 0
- ret i32 %tmp2
+ %tmp0 = add i32 %b, %a
+ %tmp1 = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %tmp0, i32 4)
+ %tmp2 = extractvalue { i32, i1 } %tmp1, 0
+ ret i32 %tmp2
}
diff --git a/llvm/test/Transforms/LoopStrengthReduce/X86/ivchain-X86.ll b/llvm/test/Transforms/LoopStrengthReduce/X86/ivchain-X86.ll
index 7c01432914f..3f6d234c929 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/X86/ivchain-X86.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/X86/ivchain-X86.ll
@@ -13,14 +13,14 @@
; X64-NEXT: .p2align
; X64: %loop
; no complex address modes
-; X64-NOT: (%{{[^)]+}},%{{[^)]+}},
+; X64-NOT: [1-9]+(%{{[^)]+}},%{{[^)]+}},
;
; X32: @simple
; no expensive address computation in the preheader
; X32-NOT: imul
; X32: %loop
; no complex address modes
-; X32-NOT: (%{{[^)]+}},%{{[^)]+}},
+; X32-NOT: [1-9]+(%{{[^)]+}},%{{[^)]+}},
define i32 @simple(i32* %a, i32* %b, i32 %x) nounwind {
entry:
br label %loop
@@ -103,7 +103,7 @@ exit:
; X32-NOT: mov{{.*}}(%esp){{$}}
; X32: %for.body{{$}}
; no complex address modes
-; X32-NOT: (%{{[^)]+}},%{{[^)]+}},
+; X32-NOT: [1-9]+(%{{[^)]+}},%{{[^)]+}},
; no reloads
; X32-NOT: (%esp)
define void @extrastride(i8* nocapture %main, i32 %main_stride, i32* nocapture %res, i32 %x, i32 %y, i32 %z) nounwind {
OpenPOWER on IntegriCloud