diff options
author | Nirav Dave <niravd@google.com> | 2017-04-24 15:37:20 +0000 |
---|---|---|
committer | Nirav Dave <niravd@google.com> | 2017-04-24 15:37:20 +0000 |
commit | c799f3a809fce45d61f8600a15c251bc8ceea25f (patch) | |
tree | b67924bf6025634659adf8292b0a427dba48d9f9 | |
parent | b4f6a95680333f735836f3be9b43299e6c3ab1e8 (diff) | |
download | bcm5719-llvm-c799f3a809fce45d61f8600a15c251bc8ceea25f.tar.gz bcm5719-llvm-c799f3a809fce45d61f8600a15c251bc8ceea25f.zip |
[SDAG] Teach Chain Analysis about BaseIndexOffset addressing.
While we use BaseIndexOffset in FindBetterNeighborChains to
appropriately realize they're almost the same address and should be
improved concurrently we do not use it in isAlias using the non-index
understanding FindBaseOffset instead. Adding a BaseIndexOffset check
in isAlias like should allow indexed stores to be merged.
FindBaseOffset to be excised in subsequent patch.
Reviewers: jyknight, aditya_nandakumar, bogner
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D31987
llvm-svn: 301187
-rw-r--r-- | llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp | 15 | ||||
-rw-r--r-- | llvm/test/CodeGen/X86/i256-add.ll | 49 | ||||
-rw-r--r-- | llvm/test/CodeGen/X86/merge_store.ll | 11 |
3 files changed, 39 insertions, 36 deletions
diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index 4702d63cb61..91a56f81e97 100644 --- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -16088,6 +16088,19 @@ bool DAGCombiner::isAlias(LSBaseSDNode *Op0, LSBaseSDNode *Op1) const { if (Op1->isInvariant() && Op0->writeMem()) return false; + unsigned NumBytes0 = Op0->getMemoryVT().getSizeInBits() >> 3; + unsigned NumBytes1 = Op1->getMemoryVT().getSizeInBits() >> 3; + + // Check for BaseIndexOffset matching. + BaseIndexOffset BasePtr0 = BaseIndexOffset::match(Op0->getBasePtr(), DAG); + BaseIndexOffset BasePtr1 = BaseIndexOffset::match(Op1->getBasePtr(), DAG); + if (BasePtr0.equalBaseIndex(BasePtr1)) + return !((BasePtr0.Offset + NumBytes0 <= BasePtr1.Offset) || + (BasePtr1.Offset + NumBytes1 <= BasePtr0.Offset)); + + // FIXME: findBaseOffset and ConstantValue/GlobalValue/FrameIndex analysis + // modified to use BaseIndexOffset. + // Gather base node and offset information. SDValue Base0, Base1; int64_t Offset0, Offset1; @@ -16099,8 +16112,6 @@ bool DAGCombiner::isAlias(LSBaseSDNode *Op0, LSBaseSDNode *Op1) const { Base1, Offset1, GV1, CV1); // If they have the same base address, then check to see if they overlap. - unsigned NumBytes0 = Op0->getMemoryVT().getSizeInBits() >> 3; - unsigned NumBytes1 = Op1->getMemoryVT().getSizeInBits() >> 3; if (Base0 == Base1 || (GV0 && (GV0 == GV1)) || (CV0 && (CV0 == CV1))) return !((Offset0 + NumBytes0) <= Offset1 || (Offset1 + NumBytes1) <= Offset0); diff --git a/llvm/test/CodeGen/X86/i256-add.ll b/llvm/test/CodeGen/X86/i256-add.ll index a745f652d06..7b2656897e0 100644 --- a/llvm/test/CodeGen/X86/i256-add.ll +++ b/llvm/test/CodeGen/X86/i256-add.ll @@ -12,34 +12,35 @@ define void @add(i256* %p, i256* %q) nounwind { ; X32-NEXT: subl $12, %esp ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: movl 8(%ecx), %edx -; X32-NEXT: movl (%ecx), %ebx -; X32-NEXT: movl 4(%ecx), %edi +; X32-NEXT: movl 8(%ecx), %edi +; X32-NEXT: movl (%ecx), %edx +; X32-NEXT: movl 4(%ecx), %ebx ; X32-NEXT: movl 28(%eax), %esi ; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill ; X32-NEXT: movl 24(%eax), %ebp -; X32-NEXT: addl (%eax), %ebx -; X32-NEXT: adcl 4(%eax), %edi -; X32-NEXT: adcl 8(%eax), %edx +; X32-NEXT: addl (%eax), %edx ; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl 20(%eax), %esi +; X32-NEXT: adcl 4(%eax), %ebx +; X32-NEXT: adcl 8(%eax), %edi +; X32-NEXT: movl %edi, (%esp) # 4-byte Spill +; X32-NEXT: movl 20(%eax), %edi ; X32-NEXT: movl 12(%eax), %edx -; X32-NEXT: movl 16(%eax), %eax +; X32-NEXT: movl 16(%eax), %esi ; X32-NEXT: adcl 12(%ecx), %edx -; X32-NEXT: adcl 16(%ecx), %eax -; X32-NEXT: adcl 20(%ecx), %esi -; X32-NEXT: adcl 24(%ecx), %ebp -; X32-NEXT: movl %ebp, (%esp) # 4-byte Spill +; X32-NEXT: adcl 16(%ecx), %esi +; X32-NEXT: adcl 20(%ecx), %edi +; X32-NEXT: movl %ebp, %eax +; X32-NEXT: adcl 24(%ecx), %eax ; X32-NEXT: movl {{[0-9]+}}(%esp), %ebp # 4-byte Reload ; X32-NEXT: adcl %ebp, 28(%ecx) +; X32-NEXT: movl (%esp), %ebp # 4-byte Reload +; X32-NEXT: movl %ebp, 8(%ecx) +; X32-NEXT: movl %ebx, 4(%ecx) +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload ; X32-NEXT: movl %ebx, (%ecx) -; X32-NEXT: movl %edi, 4(%ecx) -; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload -; X32-NEXT: movl %edi, 8(%ecx) ; X32-NEXT: movl %edx, 12(%ecx) -; X32-NEXT: movl %eax, 16(%ecx) -; X32-NEXT: movl %esi, 20(%ecx) -; X32-NEXT: movl (%esp), %eax # 4-byte Reload +; X32-NEXT: movl %esi, 16(%ecx) +; X32-NEXT: movl %edi, 20(%ecx) ; X32-NEXT: movl %eax, 24(%ecx) ; X32-NEXT: addl $12, %esp ; X32-NEXT: popl %esi @@ -58,9 +59,9 @@ define void @add(i256* %p, i256* %q) nounwind { ; X64-NEXT: adcq 8(%rsi), %rdx ; X64-NEXT: adcq 16(%rsi), %rax ; X64-NEXT: adcq %r8, 24(%rdi) -; X64-NEXT: movq %rcx, (%rdi) -; X64-NEXT: movq %rdx, 8(%rdi) ; X64-NEXT: movq %rax, 16(%rdi) +; X64-NEXT: movq %rdx, 8(%rdi) +; X64-NEXT: movq %rcx, (%rdi) ; X64-NEXT: retq %a = load i256, i256* %p %b = load i256, i256* %q @@ -96,9 +97,9 @@ define void @sub(i256* %p, i256* %q) nounwind { ; X32-NEXT: sbbl 24(%esi), %eax ; X32-NEXT: movl 28(%esi), %esi ; X32-NEXT: sbbl %esi, 28(%ecx) -; X32-NEXT: movl %ebx, (%ecx) -; X32-NEXT: movl %ebp, 4(%ecx) ; X32-NEXT: movl %edi, 8(%ecx) +; X32-NEXT: movl %ebp, 4(%ecx) +; X32-NEXT: movl %ebx, (%ecx) ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload ; X32-NEXT: movl %esi, 12(%ecx) ; X32-NEXT: movl (%esp), %esi # 4-byte Reload @@ -122,9 +123,9 @@ define void @sub(i256* %p, i256* %q) nounwind { ; X64-NEXT: sbbq 8(%rsi), %rdx ; X64-NEXT: sbbq 16(%rsi), %rax ; X64-NEXT: sbbq %r8, 24(%rdi) -; X64-NEXT: movq %rcx, (%rdi) -; X64-NEXT: movq %rdx, 8(%rdi) ; X64-NEXT: movq %rax, 16(%rdi) +; X64-NEXT: movq %rdx, 8(%rdi) +; X64-NEXT: movq %rcx, (%rdi) ; X64-NEXT: retq %a = load i256, i256* %p %b = load i256, i256* %q diff --git a/llvm/test/CodeGen/X86/merge_store.ll b/llvm/test/CodeGen/X86/merge_store.ll index dcb7bd010e5..f4c4c6d3606 100644 --- a/llvm/test/CodeGen/X86/merge_store.ll +++ b/llvm/test/CodeGen/X86/merge_store.ll @@ -29,17 +29,8 @@ entry: ret void } - - ;; CHECK-LABEL: indexed-store-merge - -;; We should be able to merge the 4 consecutive stores. -;; FIXMECHECK: movl $0, 2(%rsi,%rdi) - -;; CHECK: movb $0, 2(%rsi,%rdi) -;; CHECK: movb $0, 3(%rsi,%rdi) -;; CHECK: movb $0, 4(%rsi,%rdi) -;; CHECK: movb $0, 5(%rsi,%rdi) +;; CHECK: movl $0, 2(%rsi,%rdi) ;; CHECK: movb $0, (%rsi) define void @indexed-store-merge(i64 %p, i8* %v) { entry: |