summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/X86/or-lea.ll
diff options
context:
space:
mode:
authorSanjay Patel <spatel@rotateright.com>2015-11-09 21:16:49 +0000
committerSanjay Patel <spatel@rotateright.com>2015-11-09 21:16:49 +0000
commit32538d68117772c93c059a35523ea974397a8c56 (patch)
tree8866ae4fbcd4832e29a1544ef0674a6b15d156f3 /llvm/test/CodeGen/X86/or-lea.ll
parent65bc2b12233248f086461bea4d57bd2920c624c8 (diff)
downloadbcm5719-llvm-32538d68117772c93c059a35523ea974397a8c56.tar.gz
bcm5719-llvm-32538d68117772c93c059a35523ea974397a8c56.zip
[x86] try harder to match bitwise 'or' into an LEA
The motivation for this patch starts with the epic fail example in PR18007: https://llvm.org/bugs/show_bug.cgi?id=18007 ...unfortunately, this patch makes no difference for that case, but it solves some simpler cases. We'll get there some day. :) The current 'or' matching code was using computeKnownBits() via isBaseWithConstantOffset() -> MaskedValueIsZero(), but that's an unnecessarily limited use. We can do more by copying the logic in ValueTracking's haveNoCommonBitsSet(), so we can treat the 'or' as if it was an 'add'. There's a TODO comment here because we should lift the bit-checking logic into a helper function, so it's not duplicated in DAGCombiner. An example of the better LEA matching: leal (%rdi,%rdi), %eax andl $1, %esi orl %esi, %eax Becomes: andl $1, %esi leal (%rsi,%rdi,2), %eax Differential Revision: http://reviews.llvm.org/D13956 llvm-svn: 252515
Diffstat (limited to 'llvm/test/CodeGen/X86/or-lea.ll')
-rw-r--r--llvm/test/CodeGen/X86/or-lea.ll18
1 files changed, 6 insertions, 12 deletions
diff --git a/llvm/test/CodeGen/X86/or-lea.ll b/llvm/test/CodeGen/X86/or-lea.ll
index bd117207e6c..f28cc8569cf 100644
--- a/llvm/test/CodeGen/X86/or-lea.ll
+++ b/llvm/test/CodeGen/X86/or-lea.ll
@@ -8,9 +8,8 @@
define i32 @or_shift1_and1(i32 %x, i32 %y) {
; CHECK-LABEL: or_shift1_and1:
; CHECK: # BB#0:
-; CHECK-NEXT: addl %edi, %edi
; CHECK-NEXT: andl $1, %esi
-; CHECK-NEXT: leal (%rsi,%rdi), %eax
+; CHECK-NEXT: leal (%rsi,%rdi,2), %eax
; CHECK-NEXT: retq
%shl = shl i32 %x, 1
@@ -22,9 +21,8 @@ define i32 @or_shift1_and1(i32 %x, i32 %y) {
define i32 @or_shift1_and1_swapped(i32 %x, i32 %y) {
; CHECK-LABEL: or_shift1_and1_swapped:
; CHECK: # BB#0:
-; CHECK-NEXT: leal (%rdi,%rdi), %eax
; CHECK-NEXT: andl $1, %esi
-; CHECK-NEXT: orl %esi, %eax
+; CHECK-NEXT: leal (%rsi,%rdi,2), %eax
; CHECK-NEXT: retq
%shl = shl i32 %x, 1
@@ -36,9 +34,8 @@ define i32 @or_shift1_and1_swapped(i32 %x, i32 %y) {
define i32 @or_shift2_and1(i32 %x, i32 %y) {
; CHECK-LABEL: or_shift2_and1:
; CHECK: # BB#0:
-; CHECK-NEXT: leal (,%rdi,4), %eax
; CHECK-NEXT: andl $1, %esi
-; CHECK-NEXT: orl %esi, %eax
+; CHECK-NEXT: leal (%rsi,%rdi,4), %eax
; CHECK-NEXT: retq
%shl = shl i32 %x, 2
@@ -50,9 +47,8 @@ define i32 @or_shift2_and1(i32 %x, i32 %y) {
define i32 @or_shift3_and1(i32 %x, i32 %y) {
; CHECK-LABEL: or_shift3_and1:
; CHECK: # BB#0:
-; CHECK-NEXT: leal (,%rdi,8), %eax
; CHECK-NEXT: andl $1, %esi
-; CHECK-NEXT: orl %esi, %eax
+; CHECK-NEXT: leal (%rsi,%rdi,8), %eax
; CHECK-NEXT: retq
%shl = shl i32 %x, 3
@@ -64,9 +60,8 @@ define i32 @or_shift3_and1(i32 %x, i32 %y) {
define i32 @or_shift3_and7(i32 %x, i32 %y) {
; CHECK-LABEL: or_shift3_and7:
; CHECK: # BB#0:
-; CHECK-NEXT: leal (,%rdi,8), %eax
; CHECK-NEXT: andl $7, %esi
-; CHECK-NEXT: orl %esi, %eax
+; CHECK-NEXT: leal (%rsi,%rdi,8), %eax
; CHECK-NEXT: retq
%shl = shl i32 %x, 3
@@ -112,9 +107,8 @@ define i32 @or_shift3_and8(i32 %x, i32 %y) {
define i64 @or_shift1_and1_64(i64 %x, i64 %y) {
; CHECK-LABEL: or_shift1_and1_64:
; CHECK: # BB#0:
-; CHECK-NEXT: addq %rdi, %rdi
; CHECK-NEXT: andl $1, %esi
-; CHECK-NEXT: leaq (%rsi,%rdi), %rax
+; CHECK-NEXT: leaq (%rsi,%rdi,2), %rax
; CHECK-NEXT: retq
%shl = shl i64 %x, 1
OpenPOWER on IntegriCloud