diff options
| author | Sanjay Patel <spatel@rotateright.com> | 2019-02-03 16:16:48 +0000 |
|---|---|---|
| committer | Sanjay Patel <spatel@rotateright.com> | 2019-02-03 16:16:48 +0000 |
| commit | 837552fe9f216d53beedd0a62f9a94854934e46e (patch) | |
| tree | 36e2d7d8577c8f47498901bd8e34c9fde0f74687 /llvm/test/CodeGen | |
| parent | 18b73a655bb611534165fe40e417fde89684f776 (diff) | |
| download | bcm5719-llvm-837552fe9f216d53beedd0a62f9a94854934e46e.tar.gz bcm5719-llvm-837552fe9f216d53beedd0a62f9a94854934e46e.zip | |
[PatternMatch] add special-case uaddo matching for increment-by-one (2nd try)
This is the most important uaddo problem mentioned in PR31754:
https://bugs.llvm.org/show_bug.cgi?id=31754
...but that was overcome in x86 codegen with D57637.
That patch also corrects the inc vs. add regressions seen with the previous attempt at this.
Still, we want to make this matcher complete, so we can potentially canonicalize the pattern
even if it's an 'add 1' operation.
Pattern matching, however, shouldn't assume that we have canonicalized IR, so we match 4
commuted variants of uaddo.
There's also a test with a crazy type to show that the existing CGP transform based on this
matcher is not limited by target legality checks.
I'm not sure if the Hexagon diff means the test is no longer testing what it intended to
test, but that should be solvable in a follow-up.
Differential Revision: https://reviews.llvm.org/D57516
llvm-svn: 352998
Diffstat (limited to 'llvm/test/CodeGen')
| -rw-r--r-- | llvm/test/CodeGen/Hexagon/swp-epilog-phi5.ll | 4 | ||||
| -rw-r--r-- | llvm/test/CodeGen/X86/copy-eflags.ll | 20 | ||||
| -rw-r--r-- | llvm/test/CodeGen/X86/rd-mod-wr-eflags.ll | 6 |
3 files changed, 15 insertions, 15 deletions
diff --git a/llvm/test/CodeGen/Hexagon/swp-epilog-phi5.ll b/llvm/test/CodeGen/Hexagon/swp-epilog-phi5.ll index 2222deed08e..3dcecad5456 100644 --- a/llvm/test/CodeGen/Hexagon/swp-epilog-phi5.ll +++ b/llvm/test/CodeGen/Hexagon/swp-epilog-phi5.ll @@ -9,8 +9,8 @@ ; CHECK: loop0 ; CHECK: [[REG0:r([0-9]+)]] += mpyi -; CHECK-NOT: r{{[0-9]+}} += add([[REG0]],#8) -; CHECK: endloop1 +; CHECK: [[REG2:r([0-9]+)]] = add([[REG1:r([0-9]+)]],add([[REG0]],#8 +; CHECK: endloop0 %s.0 = type { %s.1*, %s.4*, %s.7*, i8*, i8, i32, %s.8*, i32, i32, i32, i8, i8, i32, i32, double, i8, i8, i8, i8, i8, i8, i8, i8, i32, i8, i8, i8, i32, i32, i32, i32, i32, i32, i8**, i32, i32, i32, i32, i32, [64 x i32]*, [4 x %s.9*], [4 x %s.10*], [4 x %s.10*], i32, %s.23*, i8, i8, [16 x i8], [16 x i8], [16 x i8], i32, i8, i8, i8, i8, i16, i16, i8, i8, i8, %s.11*, i32, i32, i32, i32, i8*, i32, [4 x %s.23*], i32, i32, i32, [10 x i32], i32, i32, i32, i32, i32, %s.12*, %s.13*, %s.14*, %s.15*, %s.16*, %s.17*, %s.18*, %s.19*, %s.20*, %s.21*, %s.22* } %s.1 = type { void (%s.2*)*, void (%s.2*, i32)*, void (%s.2*)*, void (%s.2*, i8*)*, void (%s.2*)*, i32, %s.3, i32, i32, i8**, i32, i8**, i32, i32 } diff --git a/llvm/test/CodeGen/X86/copy-eflags.ll b/llvm/test/CodeGen/X86/copy-eflags.ll index 1e9a598c651..018ea8bbee2 100644 --- a/llvm/test/CodeGen/X86/copy-eflags.ll +++ b/llvm/test/CodeGen/X86/copy-eflags.ll @@ -102,13 +102,13 @@ define i32 @test2(i32* %ptr) nounwind { ; X32-NEXT: calll external ; X32-NEXT: addl $4, %esp ; X32-NEXT: testb %bl, %bl -; X32-NEXT: je .LBB1_1 -; X32-NEXT: # %bb.2: # %else -; X32-NEXT: xorl %eax, %eax +; X32-NEXT: jne .LBB1_2 +; X32-NEXT: # %bb.1: # %then +; X32-NEXT: movl $64, %eax ; X32-NEXT: popl %ebx ; X32-NEXT: retl -; X32-NEXT: .LBB1_1: # %then -; X32-NEXT: movl $64, %eax +; X32-NEXT: .LBB1_2: # %else +; X32-NEXT: xorl %eax, %eax ; X32-NEXT: popl %ebx ; X32-NEXT: retl ; @@ -120,13 +120,13 @@ define i32 @test2(i32* %ptr) nounwind { ; X64-NEXT: movl $42, %edi ; X64-NEXT: callq external ; X64-NEXT: testb %bl, %bl -; X64-NEXT: je .LBB1_1 -; X64-NEXT: # %bb.2: # %else -; X64-NEXT: xorl %eax, %eax +; X64-NEXT: jne .LBB1_2 +; X64-NEXT: # %bb.1: # %then +; X64-NEXT: movl $64, %eax ; X64-NEXT: popq %rbx ; X64-NEXT: retq -; X64-NEXT: .LBB1_1: # %then -; X64-NEXT: movl $64, %eax +; X64-NEXT: .LBB1_2: # %else +; X64-NEXT: xorl %eax, %eax ; X64-NEXT: popq %rbx ; X64-NEXT: retq entry: diff --git a/llvm/test/CodeGen/X86/rd-mod-wr-eflags.ll b/llvm/test/CodeGen/X86/rd-mod-wr-eflags.ll index 482c08632cb..410d736af73 100644 --- a/llvm/test/CodeGen/X86/rd-mod-wr-eflags.ll +++ b/llvm/test/CodeGen/X86/rd-mod-wr-eflags.ll @@ -177,11 +177,11 @@ define void @example_inc(%struct.obj2* %o) nounwind uwtable ssp { ; CHECK-NEXT: jne .LBB4_4 ; CHECK-NEXT: # %bb.3: # %if.end2 ; CHECK-NEXT: incb 14(%rdi) -; CHECK-NEXT: je .LBB4_5 +; CHECK-NEXT: jne .LBB4_4 +; CHECK-NEXT: # %bb.5: # %if.end4 +; CHECK-NEXT: jmp other # TAILCALL ; CHECK-NEXT: .LBB4_4: # %return ; CHECK-NEXT: retq -; CHECK-NEXT: .LBB4_5: # %if.end4 -; CHECK-NEXT: jmp other # TAILCALL entry: %s64 = getelementptr inbounds %struct.obj2, %struct.obj2* %o, i64 0, i32 0 %0 = load i64, i64* %s64, align 8 |

