diff options
author | Matt Arsenault <Matthew.Arsenault@amd.com> | 2016-02-27 19:57:45 +0000 |
---|---|---|
committer | Matt Arsenault <Matthew.Arsenault@amd.com> | 2016-02-27 19:57:45 +0000 |
commit | 982224cfb8de39337e977219b3571c5a87aca4ce (patch) | |
tree | bab1498538efc732c8ef02877d6e289e7729e7e4 | |
parent | b7692bc3e9ad2691fc07261904b88fb15f30696b (diff) | |
download | bcm5719-llvm-982224cfb8de39337e977219b3571c5a87aca4ce.tar.gz bcm5719-llvm-982224cfb8de39337e977219b3571c5a87aca4ce.zip |
DAGCombiner: Don't unnecessarily swap operands in ReassociateOps
In the case where op = add, y = base_ptr, and x = offset, this
transform:
(op y, (op x, c1)) -> (op (op x, y), c1)
breaks the canonical form of add by putting the base pointer in the
second operand and the offset in the first.
This fix is important for the R600 target, because for some address
spaces the base pointer and the offset are stored in separate register
classes. The old pattern caused the ISel code for matching addressing
modes to put the base pointer and offset in the wrong register classes,
which required no-trivial code transformations to fix.
llvm-svn: 262148
-rw-r--r-- | llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp | 4 | ||||
-rw-r--r-- | llvm/test/CodeGen/AMDGPU/dagcombine-reassociate-bug.ll | 33 | ||||
-rw-r--r-- | llvm/test/CodeGen/AMDGPU/shl_add_constant.ll | 2 | ||||
-rw-r--r-- | llvm/test/CodeGen/X86/add-nsw-sext.ll | 6 | ||||
-rw-r--r-- | llvm/test/CodeGen/X86/combine-multiplies.ll | 10 |
5 files changed, 44 insertions, 11 deletions
diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index 9178aefac49..c977811bbbf 100644 --- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -846,9 +846,9 @@ SDValue DAGCombiner::ReassociateOps(unsigned Opc, SDLoc DL, return SDValue(); } if (N1.hasOneUse()) { - // reassoc. (op y, (op x, c1)) -> (op (op x, y), c1) iff x+c1 has one + // reassoc. (op x, (op y, c1)) -> (op (op x, y), c1) iff x+c1 has one // use - SDValue OpNode = DAG.getNode(Opc, SDLoc(N0), VT, N1.getOperand(0), N0); + SDValue OpNode = DAG.getNode(Opc, SDLoc(N0), VT, N0, N1.getOperand(0)); if (!OpNode.getNode()) return SDValue(); AddToWorklist(OpNode.getNode()); diff --git a/llvm/test/CodeGen/AMDGPU/dagcombine-reassociate-bug.ll b/llvm/test/CodeGen/AMDGPU/dagcombine-reassociate-bug.ll new file mode 100644 index 00000000000..fc2834effb1 --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/dagcombine-reassociate-bug.ll @@ -0,0 +1,33 @@ +; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck %s + +; Test for a bug where DAGCombiner::ReassociateOps() was creating adds +; with offset in the first operand and base pointers in the second. + +; CHECK-LABEL: {{^}}store_same_base_ptr: +; CHECK: buffer_store_dword v{{[0-9]+}}, [[VADDR:v\[[0-9]+:[0-9]+\]]], [[SADDR:s\[[0-9]+:[0-9]+\]]] +; CHECK: buffer_store_dword v{{[0-9]+}}, [[VADDR]], [[SADDR]] +; CHECK: buffer_store_dword v{{[0-9]+}}, [[VADDR]], [[SADDR]] +; CHECK: buffer_store_dword v{{[0-9]+}}, [[VADDR]], [[SADDR]] + +define void @store_same_base_ptr(i32 addrspace(1)* %out) { +entry: + %id = call i32 @llvm.amdgcn.workitem.id.x() #0 + %offset = sext i32 %id to i64 + %offset0 = add i64 %offset, 1027 + %ptr0 = getelementptr i32, i32 addrspace(1)* %out, i64 %offset0 + store i32 3, i32 addrspace(1)* %ptr0 + %offset1 = add i64 %offset, 1026 + %ptr1 = getelementptr i32, i32 addrspace(1)* %out, i64 %offset1 + store i32 2, i32 addrspace(1)* %ptr1 + %offset2 = add i64 %offset, 1025 + %ptr2 = getelementptr i32, i32 addrspace(1)* %out, i64 %offset2 + store i32 1, i32 addrspace(1)* %ptr2 + %offset3 = add i64 %offset, 1024 + %ptr3 = getelementptr i32, i32 addrspace(1)* %out, i64 %offset3 + store i32 0, i32 addrspace(1)* %ptr3 + ret void +} + +declare i32 @llvm.amdgcn.workitem.id.x() #0 + +attributes #0 = { nounwind readnone } diff --git a/llvm/test/CodeGen/AMDGPU/shl_add_constant.ll b/llvm/test/CodeGen/AMDGPU/shl_add_constant.ll index ec17dbaaced..b1a3f8fbdc6 100644 --- a/llvm/test/CodeGen/AMDGPU/shl_add_constant.ll +++ b/llvm/test/CodeGen/AMDGPU/shl_add_constant.ll @@ -73,7 +73,7 @@ define void @test_add_shl_add_constant(i32 addrspace(1)* %out, i32 %x, i32 %y) # ; SI-DAG: s_load_dword [[X:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xb ; SI-DAG: s_load_dword [[Y:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xc ; SI: s_lshl_b32 [[SHL3:s[0-9]+]], [[X]], 3 -; SI: s_add_i32 [[TMP:s[0-9]+]], [[SHL3]], [[Y]] +; SI: s_add_i32 [[TMP:s[0-9]+]], [[Y]], [[SHL3]] ; SI: s_add_i32 [[RESULT:s[0-9]+]], [[TMP]], 0x3d8 ; SI: v_mov_b32_e32 [[VRESULT:v[0-9]+]], [[RESULT]] ; SI: buffer_store_dword [[VRESULT]] diff --git a/llvm/test/CodeGen/X86/add-nsw-sext.ll b/llvm/test/CodeGen/X86/add-nsw-sext.ll index 0a6f6c315c1..658c58b3d61 100644 --- a/llvm/test/CodeGen/X86/add-nsw-sext.ll +++ b/llvm/test/CodeGen/X86/add-nsw-sext.ll @@ -25,7 +25,7 @@ define i64 @add_nsw_sext_add(i32 %i, i64 %x) { ; CHECK-LABEL: add_nsw_sext_add: ; CHECK: # BB#0: ; CHECK-NEXT: movslq %edi, %rax -; CHECK-NEXT: leaq 5(%rax,%rsi), %rax +; CHECK-NEXT: leaq 5(%rsi,%rax), %rax ; CHECK-NEXT: retq %add = add nsw i32 %i, 5 @@ -72,7 +72,7 @@ define i8* @gep8(i32 %i, i8* %x) { ; CHECK-LABEL: gep8: ; CHECK: # BB#0: ; CHECK-NEXT: movslq %edi, %rax -; CHECK-NEXT: leaq 5(%rax,%rsi), %rax +; CHECK-NEXT: leaq 5(%rsi,%rax), %rax ; CHECK-NEXT: retq %add = add nsw i32 %i, 5 @@ -127,7 +127,7 @@ define i128* @gep128(i32 %i, i128* %x) { ; CHECK: # BB#0: ; CHECK-NEXT: movslq %edi, %rax ; CHECK-NEXT: shlq $4, %rax -; CHECK-NEXT: leaq 80(%rax,%rsi), %rax +; CHECK-NEXT: leaq 80(%rsi,%rax), %rax ; CHECK-NEXT: retq %add = add nsw i32 %i, 5 diff --git a/llvm/test/CodeGen/X86/combine-multiplies.ll b/llvm/test/CodeGen/X86/combine-multiplies.ll index 5e51edbf52f..15528cd0714 100644 --- a/llvm/test/CodeGen/X86/combine-multiplies.ll +++ b/llvm/test/CodeGen/X86/combine-multiplies.ll @@ -31,10 +31,10 @@ ; ; CHECK-LABEL: testCombineMultiplies ; CHECK: imull $400, [[ARG1:%[a-z]+]], [[MUL:%[a-z]+]] # imm = 0x190 -; CHECK-NEXT: leal ([[MUL]],[[ARG2:%[a-z]+]]), [[LEA:%[a-z]+]] +; CHECK-NEXT: leal ([[ARG2:%[a-z]+]],[[MUL]]), [[LEA:%[a-z]+]] ; CHECK-NEXT: movl $11, {{[0-9]+}}([[LEA]],[[ARG1]],4) -; CHECK-NEXT: movl $22, {{[0-9]+}}([[MUL]],[[ARG2]]) -; CHECK-NEXT: movl $33, {{[0-9]+}}([[MUL]],[[ARG2]]) +; CHECK-NEXT: movl $22, {{[0-9]+}}([[ARG2]],[[MUL]]) +; CHECK-NEXT: movl $33, {{[0-9]+}}([[ARG2]],[[MUL]]) ; CHECK: retl ; @@ -109,7 +109,7 @@ entry: ; CHECK-NEXT: movdqa [[C242]], v2 ; CHECK-NEXT: [[C726]], v3 ; CHECK-NEXT: [[C11]], x -; CHECK-NEXT: retl +; CHECK-NEXT: retl @v2 = common global <4 x i32> zeroinitializer, align 16 @v3 = common global <4 x i32> zeroinitializer, align 16 @@ -148,7 +148,7 @@ entry: ; CHECK-NEXT: movdqa [[C242]], v2 ; CHECK-NEXT: [[C726]], v3 ; CHECK-NEXT: [[C11]], x -; CHECK-NEXT: retl +; CHECK-NEXT: retl ; Function Attrs: nounwind define void @testCombineMultiplies_non_splat(<4 x i32> %v1) { entry: |