summaryrefslogtreecommitdiffstats
path: root/llvm/test/Transforms/Reassociate
diff options
context:
space:
mode:
authorSanjay Patel <spatel@rotateright.com>2018-09-02 14:22:54 +0000
committerSanjay Patel <spatel@rotateright.com>2018-09-02 14:22:54 +0000
commitca36eb4e33e487d8ae0e77fd38218a73c8656433 (patch)
tree78e3e00ce7a52cf3d3ef67a5a1a76728de37c15b /llvm/test/Transforms/Reassociate
parentd7a62444754e095377f84731658b9d8a5ecae471 (diff)
downloadbcm5719-llvm-ca36eb4e33e487d8ae0e77fd38218a73c8656433.tar.gz
bcm5719-llvm-ca36eb4e33e487d8ae0e77fd38218a73c8656433.zip
[Reassociate] swap binop operands to increase factoring potential
If we have a pair of binops feeding another pair of binops, rearrange the operands so the matching pair are together because that allows easy factorization folds to happen in instcombine: ((X << S) & Y) & (Z << S) --> ((X << S) & (Z << S)) & Y (reassociation) --> ((X & Z) << S) & Y (factorize shift from 'and' ops optimization) This is part of solving PR37098: https://bugs.llvm.org/show_bug.cgi?id=37098 Note that there's an instcombine version of this patch attached there, but we're trying to make instcombine have less responsibility to improve compile-time efficiency. For reasons I still don't completely understand, reassociate does this kind of transform sometimes, but misses everything in my motivating cases. This patch on its own is gluing an independent cleanup chunk to the end of the existing RewriteExprTree() loop. We can build on it and do something stronger to better order the full expression tree like D40049. That might be an alternative to the proposal to add a separate reassociation pass like D41574. Differential Revision: https://reviews.llvm.org/D45842 llvm-svn: 341288
Diffstat (limited to 'llvm/test/Transforms/Reassociate')
-rw-r--r--llvm/test/Transforms/Reassociate/matching-binops.ll80
1 files changed, 45 insertions, 35 deletions
diff --git a/llvm/test/Transforms/Reassociate/matching-binops.ll b/llvm/test/Transforms/Reassociate/matching-binops.ll
index 4771e3c8e1c..23cb79df21c 100644
--- a/llvm/test/Transforms/Reassociate/matching-binops.ll
+++ b/llvm/test/Transforms/Reassociate/matching-binops.ll
@@ -16,8 +16,8 @@ define i8 @and_shl(i8 %x, i8 %y, i8 %z, i8 %shamt) {
; CHECK-LABEL: @and_shl(
; CHECK-NEXT: [[SX:%.*]] = shl i8 [[X:%.*]], [[SHAMT:%.*]]
; CHECK-NEXT: [[SY:%.*]] = shl i8 [[Y:%.*]], [[SHAMT]]
-; CHECK-NEXT: [[A:%.*]] = and i8 [[SX]], [[Z:%.*]]
-; CHECK-NEXT: [[R:%.*]] = and i8 [[A]], [[SY]]
+; CHECK-NEXT: [[A:%.*]] = and i8 [[SX]], [[SY]]
+; CHECK-NEXT: [[R:%.*]] = and i8 [[A]], [[Z:%.*]]
; CHECK-NEXT: ret i8 [[R]]
;
%sx = shl i8 %x, %shamt
@@ -31,8 +31,8 @@ define i8 @or_shl(i8 %x, i8 %y, i8 %z, i8 %shamt) {
; CHECK-LABEL: @or_shl(
; CHECK-NEXT: [[SX:%.*]] = shl i8 [[X:%.*]], [[SHAMT:%.*]]
; CHECK-NEXT: [[SY:%.*]] = shl i8 [[Y:%.*]], [[SHAMT]]
-; CHECK-NEXT: [[A:%.*]] = or i8 [[SX]], [[Z:%.*]]
-; CHECK-NEXT: [[R:%.*]] = or i8 [[A]], [[SY]]
+; CHECK-NEXT: [[A:%.*]] = or i8 [[SX]], [[SY]]
+; CHECK-NEXT: [[R:%.*]] = or i8 [[A]], [[Z:%.*]]
; CHECK-NEXT: ret i8 [[R]]
;
%sx = shl i8 %x, %shamt
@@ -46,8 +46,8 @@ define i8 @xor_shl(i8 %x, i8 %y, i8 %z, i8 %shamt) {
; CHECK-LABEL: @xor_shl(
; CHECK-NEXT: [[SX:%.*]] = shl i8 [[X:%.*]], [[SHAMT:%.*]]
; CHECK-NEXT: [[SY:%.*]] = shl i8 [[Y:%.*]], [[SHAMT]]
-; CHECK-NEXT: [[A:%.*]] = xor i8 [[SX]], [[Z:%.*]]
-; CHECK-NEXT: [[R:%.*]] = xor i8 [[A]], [[SY]]
+; CHECK-NEXT: [[A:%.*]] = xor i8 [[SX]], [[SY]]
+; CHECK-NEXT: [[R:%.*]] = xor i8 [[A]], [[Z:%.*]]
; CHECK-NEXT: ret i8 [[R]]
;
%sx = shl i8 %x, %shamt
@@ -61,8 +61,8 @@ define i8 @and_lshr(i8 %x, i8 %y, i8 %z, i8 %shamt) {
; CHECK-LABEL: @and_lshr(
; CHECK-NEXT: [[SX:%.*]] = lshr i8 [[X:%.*]], [[SHAMT:%.*]]
; CHECK-NEXT: [[SY:%.*]] = lshr i8 [[Y:%.*]], [[SHAMT]]
-; CHECK-NEXT: [[A:%.*]] = and i8 [[SX]], [[Z:%.*]]
-; CHECK-NEXT: [[R:%.*]] = and i8 [[A]], [[SY]]
+; CHECK-NEXT: [[A:%.*]] = and i8 [[SX]], [[SY]]
+; CHECK-NEXT: [[R:%.*]] = and i8 [[A]], [[Z:%.*]]
; CHECK-NEXT: ret i8 [[R]]
;
%sx = lshr i8 %x, %shamt
@@ -76,8 +76,8 @@ define i8 @or_lshr(i8 %x, i8 %y, i8 %z, i8 %shamt) {
; CHECK-LABEL: @or_lshr(
; CHECK-NEXT: [[SX:%.*]] = lshr i8 [[X:%.*]], [[SHAMT:%.*]]
; CHECK-NEXT: [[SY:%.*]] = lshr i8 [[Y:%.*]], [[SHAMT]]
-; CHECK-NEXT: [[A:%.*]] = or i8 [[SX]], [[Z:%.*]]
-; CHECK-NEXT: [[R:%.*]] = or i8 [[A]], [[SY]]
+; CHECK-NEXT: [[A:%.*]] = or i8 [[SX]], [[SY]]
+; CHECK-NEXT: [[R:%.*]] = or i8 [[A]], [[Z:%.*]]
; CHECK-NEXT: ret i8 [[R]]
;
%sx = lshr i8 %x, %shamt
@@ -91,8 +91,8 @@ define i8 @xor_lshr(i8 %x, i8 %y, i8 %z, i8 %shamt) {
; CHECK-LABEL: @xor_lshr(
; CHECK-NEXT: [[SX:%.*]] = lshr i8 [[X:%.*]], [[SHAMT:%.*]]
; CHECK-NEXT: [[SY:%.*]] = lshr i8 [[Y:%.*]], [[SHAMT]]
-; CHECK-NEXT: [[A:%.*]] = xor i8 [[SX]], [[Z:%.*]]
-; CHECK-NEXT: [[R:%.*]] = xor i8 [[A]], [[SY]]
+; CHECK-NEXT: [[A:%.*]] = xor i8 [[SX]], [[SY]]
+; CHECK-NEXT: [[R:%.*]] = xor i8 [[A]], [[Z:%.*]]
; CHECK-NEXT: ret i8 [[R]]
;
%sx = lshr i8 %x, %shamt
@@ -106,8 +106,8 @@ define i8 @and_ashr(i8 %x, i8 %y, i8 %z, i8 %shamt) {
; CHECK-LABEL: @and_ashr(
; CHECK-NEXT: [[SX:%.*]] = ashr i8 [[X:%.*]], [[SHAMT:%.*]]
; CHECK-NEXT: [[SY:%.*]] = ashr i8 [[Y:%.*]], [[SHAMT]]
-; CHECK-NEXT: [[A:%.*]] = and i8 [[SX]], [[Z:%.*]]
-; CHECK-NEXT: [[R:%.*]] = and i8 [[A]], [[SY]]
+; CHECK-NEXT: [[A:%.*]] = and i8 [[SX]], [[SY]]
+; CHECK-NEXT: [[R:%.*]] = and i8 [[A]], [[Z:%.*]]
; CHECK-NEXT: ret i8 [[R]]
;
%sx = ashr i8 %x, %shamt
@@ -121,8 +121,8 @@ define i8 @or_ashr(i8 %x, i8 %y, i8 %z, i8 %shamt) {
; CHECK-LABEL: @or_ashr(
; CHECK-NEXT: [[SX:%.*]] = ashr i8 [[X:%.*]], [[SHAMT:%.*]]
; CHECK-NEXT: [[SY:%.*]] = ashr i8 [[Y:%.*]], [[SHAMT]]
-; CHECK-NEXT: [[A:%.*]] = or i8 [[SX]], [[Z:%.*]]
-; CHECK-NEXT: [[R:%.*]] = or i8 [[A]], [[SY]]
+; CHECK-NEXT: [[A:%.*]] = or i8 [[SX]], [[SY]]
+; CHECK-NEXT: [[R:%.*]] = or i8 [[A]], [[Z:%.*]]
; CHECK-NEXT: ret i8 [[R]]
;
%sx = ashr i8 %x, %shamt
@@ -138,8 +138,8 @@ define <2 x i8> @xor_ashr(<2 x i8> %x, <2 x i8> %y, <2 x i8> %z, <2 x i8> %shamt
; CHECK-LABEL: @xor_ashr(
; CHECK-NEXT: [[SX:%.*]] = ashr <2 x i8> [[X:%.*]], [[SHAMT:%.*]]
; CHECK-NEXT: [[SY:%.*]] = ashr <2 x i8> [[Y:%.*]], [[SHAMT]]
-; CHECK-NEXT: [[A:%.*]] = xor <2 x i8> [[SX]], [[Z:%.*]]
-; CHECK-NEXT: [[R:%.*]] = xor <2 x i8> [[A]], [[SY]]
+; CHECK-NEXT: [[A:%.*]] = xor <2 x i8> [[SX]], [[SY]]
+; CHECK-NEXT: [[R:%.*]] = xor <2 x i8> [[A]], [[Z:%.*]]
; CHECK-NEXT: ret <2 x i8> [[R]]
;
%sx = ashr <2 x i8> %x, %shamt
@@ -203,19 +203,29 @@ define i8 @xor_lshr_multiuse(i8 %x, i8 %y, i8 %z, i8 %shamt) {
}
; Math ops work too. Change instruction positions too to verify placement.
+; We only care about extra uses of the first associative value - in this
+; case, it's %a. Everything else can have extra uses.
+
+declare void @use(i8)
define i8 @add_lshr(i8 %x, i8 %y, i8 %z, i8 %shamt) {
; CHECK-LABEL: @add_lshr(
; CHECK-NEXT: [[SX:%.*]] = lshr i8 [[X:%.*]], [[SHAMT:%.*]]
-; CHECK-NEXT: [[A:%.*]] = add i8 [[SX]], [[Z:%.*]]
; CHECK-NEXT: [[SY:%.*]] = lshr i8 [[Y:%.*]], [[SHAMT]]
-; CHECK-NEXT: [[R:%.*]] = add i8 [[A]], [[SY]]
-; CHECK-NEXT: ret i8 [[R]]
+; CHECK-NEXT: [[TMP1:%.*]] = add i8 [[SX]], [[SY]]
+; CHECK-NEXT: [[TMP2:%.*]] = add i8 [[TMP1]], [[Z:%.*]]
+; CHECK-NEXT: call void @use(i8 [[SX]])
+; CHECK-NEXT: call void @use(i8 [[SY]])
+; CHECK-NEXT: call void @use(i8 [[TMP2]])
+; CHECK-NEXT: ret i8 [[TMP2]]
;
%sx = lshr i8 %x, %shamt
%a = add i8 %sx, %z
%sy = lshr i8 %y, %shamt
%r = add i8 %a, %sy
+ call void @use(i8 %sx)
+ call void @use(i8 %sy)
+ call void @use(i8 %r)
ret i8 %r
}
@@ -225,8 +235,8 @@ define i8 @mul_sub(i8 %x, i8 %y, i8 %z, i8 %m) {
; CHECK-LABEL: @mul_sub(
; CHECK-NEXT: [[SX:%.*]] = sub i8 [[X:%.*]], [[M:%.*]]
; CHECK-NEXT: [[SY:%.*]] = sub i8 [[Y:%.*]], [[M]]
-; CHECK-NEXT: [[A:%.*]] = mul nsw i8 [[SX]], [[Z:%.*]]
-; CHECK-NEXT: [[R:%.*]] = mul nuw i8 [[A]], [[SY]]
+; CHECK-NEXT: [[A:%.*]] = mul i8 [[SX]], [[SY]]
+; CHECK-NEXT: [[R:%.*]] = mul i8 [[A]], [[Z:%.*]]
; CHECK-NEXT: ret i8 [[R]]
;
%sx = sub i8 %x, %m
@@ -239,9 +249,9 @@ define i8 @mul_sub(i8 %x, i8 %y, i8 %z, i8 %m) {
define i8 @add_mul(i8 %x, i8 %y, i8 %z, i8 %m) {
; CHECK-LABEL: @add_mul(
; CHECK-NEXT: [[SX:%.*]] = mul nuw i8 [[X:%.*]], 42
-; CHECK-NEXT: [[A:%.*]] = add nuw i8 [[Z:%.*]], [[SX]]
; CHECK-NEXT: [[SY:%.*]] = mul nsw i8 [[M:%.*]], [[Y:%.*]]
-; CHECK-NEXT: [[R:%.*]] = add nsw i8 [[A]], [[SY]]
+; CHECK-NEXT: [[A:%.*]] = add i8 [[SX]], [[SY]]
+; CHECK-NEXT: [[R:%.*]] = add i8 [[A]], [[Z:%.*]]
; CHECK-NEXT: ret i8 [[R]]
;
%sx = mul nuw i8 %x, 42
@@ -257,9 +267,9 @@ define i8 @add_mul(i8 %x, i8 %y, i8 %z, i8 %m) {
define float @fadd_fmul(float %x, float %y, float %z, float %m) {
; CHECK-LABEL: @fadd_fmul(
; CHECK-NEXT: [[SX:%.*]] = fmul float [[X:%.*]], [[M:%.*]]
-; CHECK-NEXT: [[A:%.*]] = fadd fast float [[SX]], [[Z:%.*]]
; CHECK-NEXT: [[SY:%.*]] = fmul float [[Y:%.*]], [[M]]
-; CHECK-NEXT: [[R:%.*]] = fadd fast float [[A]], [[SY]]
+; CHECK-NEXT: [[A:%.*]] = fadd fast float [[SX]], [[SY]]
+; CHECK-NEXT: [[R:%.*]] = fadd fast float [[A]], [[Z:%.*]]
; CHECK-NEXT: ret float [[R]]
;
%sx = fmul float %x, %m
@@ -273,8 +283,8 @@ define float @fmul_fdiv(float %x, float %y, float %z, float %m) {
; CHECK-LABEL: @fmul_fdiv(
; CHECK-NEXT: [[SX:%.*]] = fdiv float [[X:%.*]], [[M:%.*]]
; CHECK-NEXT: [[SY:%.*]] = fdiv float [[Y:%.*]], 4.200000e+01
-; CHECK-NEXT: [[A:%.*]] = fmul fast float [[SY]], [[Z:%.*]]
-; CHECK-NEXT: [[R:%.*]] = fmul fast float [[A]], [[SX]]
+; CHECK-NEXT: [[A:%.*]] = fmul fast float [[SY]], [[SX]]
+; CHECK-NEXT: [[R:%.*]] = fmul fast float [[A]], [[Z:%.*]]
; CHECK-NEXT: ret float [[R]]
;
%sx = fdiv float %x, %m
@@ -284,7 +294,7 @@ define float @fmul_fdiv(float %x, float %y, float %z, float %m) {
ret float %r
}
-; Verify that debug info for modified instructions gets discarded (references become undef).
+; Verify that debug info for modified instructions is not invalid.
define i32 @and_shl_dbg(i32 %x, i32 %y, i32 %z, i32 %shamt) {
; CHECK-LABEL: @and_shl_dbg(
@@ -296,11 +306,11 @@ define i32 @and_shl_dbg(i32 %x, i32 %y, i32 %z, i32 %shamt) {
; CHECK-NEXT: call void @llvm.dbg.value(metadata i32 [[SHL]], metadata !16, metadata !DIExpression()), !dbg !25
; CHECK-NEXT: [[SHL1:%.*]] = shl i32 [[Y]], [[SHAMT]], !dbg !26
; CHECK-NEXT: call void @llvm.dbg.value(metadata i32 [[SHL1]], metadata !17, metadata !DIExpression()), !dbg !27
-; CHECK-NEXT: [[AND:%.*]] = and i32 [[SHL]], [[Z]], !dbg !28
-; CHECK-NEXT: call void @llvm.dbg.value(metadata i32 [[AND]], metadata !18, metadata !DIExpression()), !dbg !29
-; CHECK-NEXT: [[AND2:%.*]] = and i32 [[AND]], [[SHL1]], !dbg !30
-; CHECK-NEXT: call void @llvm.dbg.value(metadata i32 [[AND2]], metadata !19, metadata !DIExpression()), !dbg !31
-; CHECK-NEXT: ret i32 [[AND2]], !dbg !32
+; CHECK-NEXT: call void @llvm.dbg.value(metadata !2, metadata !18, metadata !DIExpression()), !dbg !28
+; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[SHL]], [[SHL1]], !dbg !29
+; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[Z]], !dbg !29
+; CHECK-NEXT: call void @llvm.dbg.value(metadata i32 [[TMP2]], metadata !19, metadata !DIExpression()), !dbg !30
+; CHECK-NEXT: ret i32 [[TMP2]], !dbg !31
;
call void @llvm.dbg.value(metadata i32 %x, metadata !13, metadata !DIExpression()), !dbg !21
call void @llvm.dbg.value(metadata i32 %y, metadata !14, metadata !DIExpression()), !dbg !22
OpenPOWER on IntegriCloud