summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp9
-rw-r--r--llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-variant-f.ll16
2 files changed, 13 insertions, 12 deletions
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp b/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
index 48e9cfea9b3..2427d795c47 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
@@ -75,11 +75,12 @@ reassociateShiftAmtsOfTwoSameDirectionShifts(BinaryOperator *Sh0,
// c) (x & (-1 >> MaskShAmt)) << ShiftShAmt
// d) (x & ((-1 << MaskShAmt) >> MaskShAmt)) << ShiftShAmt
// e) ((x << MaskShAmt) l>> MaskShAmt) << ShiftShAmt
+// f) ((x << MaskShAmt) a>> MaskShAmt) << ShiftShAmt
// All these patterns can be simplified to just:
// x << ShiftShAmt
// iff:
-// a,b) (MaskShAmt+ShiftShAmt) u>= bitwidth(x)
-// c,d,e) (ShiftShAmt-MaskShAmt) s>= 0 (i.e. ShiftShAmt u>= MaskShAmt)
+// a,b) (MaskShAmt+ShiftShAmt) u>= bitwidth(x)
+// c,d,e,f) (ShiftShAmt-MaskShAmt) s>= 0 (i.e. ShiftShAmt u>= MaskShAmt)
static Instruction *
dropRedundantMaskingOfLeftShiftInput(BinaryOperator *OuterShift,
const SimplifyQuery &SQ) {
@@ -117,8 +118,8 @@ dropRedundantMaskingOfLeftShiftInput(BinaryOperator *OuterShift,
return nullptr;
// All good, we can do this fold.
} else if (match(Masked, m_c_And(m_CombineOr(MaskC, MaskD), m_Value(X))) ||
- match(Masked, m_LShr(m_Shl(m_Value(X), m_Value(MaskShAmt)),
- m_Deferred(MaskShAmt)))) {
+ match(Masked, m_Shr(m_Shl(m_Value(X), m_Value(MaskShAmt)),
+ m_Deferred(MaskShAmt)))) {
// Can we simplify (ShiftShAmt-MaskShAmt) ?
Value *ShAmtsDiff =
SimplifySubInst(ShiftShAmt, MaskShAmt, /*IsNSW=*/false, /*IsNUW=*/false,
diff --git a/llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-variant-f.ll b/llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-variant-f.ll
index cfdbe39c5cf..0e1e56869f0 100644
--- a/llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-variant-f.ll
+++ b/llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-variant-f.ll
@@ -21,7 +21,7 @@ define i32 @t0_basic(i32 %x, i32 %nbits) {
; CHECK-NEXT: [[T1:%.*]] = ashr i32 [[T0]], [[NBITS]]
; CHECK-NEXT: call void @use32(i32 [[T0]])
; CHECK-NEXT: call void @use32(i32 [[T1]])
-; CHECK-NEXT: [[T2:%.*]] = shl i32 [[T1]], [[NBITS]]
+; CHECK-NEXT: [[T2:%.*]] = shl i32 [[X]], [[NBITS]]
; CHECK-NEXT: ret i32 [[T2]]
;
%t0 = shl i32 %x, %nbits
@@ -40,7 +40,7 @@ define i32 @t1_bigger_shift(i32 %x, i32 %nbits) {
; CHECK-NEXT: call void @use32(i32 [[T0]])
; CHECK-NEXT: call void @use32(i32 [[T1]])
; CHECK-NEXT: call void @use32(i32 [[T2]])
-; CHECK-NEXT: [[T3:%.*]] = shl i32 [[T1]], [[T2]]
+; CHECK-NEXT: [[T3:%.*]] = shl i32 [[X]], [[T2]]
; CHECK-NEXT: ret i32 [[T3]]
;
%t0 = shl i32 %x, %nbits
@@ -65,7 +65,7 @@ define <3 x i32> @t2_vec_splat(<3 x i32> %x, <3 x i32> %nbits) {
; CHECK-NEXT: call void @use3xi32(<3 x i32> [[T0]])
; CHECK-NEXT: call void @use3xi32(<3 x i32> [[T1]])
; CHECK-NEXT: call void @use3xi32(<3 x i32> [[T2]])
-; CHECK-NEXT: [[T3:%.*]] = shl <3 x i32> [[T1]], [[T2]]
+; CHECK-NEXT: [[T3:%.*]] = shl <3 x i32> [[X]], [[T2]]
; CHECK-NEXT: ret <3 x i32> [[T3]]
;
%t0 = shl <3 x i32> %x, %nbits
@@ -86,7 +86,7 @@ define <3 x i32> @t3_vec_nonsplat(<3 x i32> %x, <3 x i32> %nbits) {
; CHECK-NEXT: call void @use3xi32(<3 x i32> [[T0]])
; CHECK-NEXT: call void @use3xi32(<3 x i32> [[T1]])
; CHECK-NEXT: call void @use3xi32(<3 x i32> [[T2]])
-; CHECK-NEXT: [[T3:%.*]] = shl <3 x i32> [[T1]], [[T2]]
+; CHECK-NEXT: [[T3:%.*]] = shl <3 x i32> [[X]], [[T2]]
; CHECK-NEXT: ret <3 x i32> [[T3]]
;
%t0 = shl <3 x i32> %x, %nbits
@@ -107,7 +107,7 @@ define <3 x i32> @t4_vec_undef(<3 x i32> %x, <3 x i32> %nbits) {
; CHECK-NEXT: call void @use3xi32(<3 x i32> [[T0]])
; CHECK-NEXT: call void @use3xi32(<3 x i32> [[T1]])
; CHECK-NEXT: call void @use3xi32(<3 x i32> [[T2]])
-; CHECK-NEXT: [[T3:%.*]] = shl <3 x i32> [[T1]], [[T2]]
+; CHECK-NEXT: [[T3:%.*]] = shl <3 x i32> [[X]], [[T2]]
; CHECK-NEXT: ret <3 x i32> [[T3]]
;
%t0 = shl <3 x i32> %x, %nbits
@@ -128,7 +128,7 @@ define i32 @t5_nuw(i32 %x, i32 %nbits) {
; CHECK-NEXT: [[T1:%.*]] = ashr i32 [[T0]], [[NBITS]]
; CHECK-NEXT: call void @use32(i32 [[T0]])
; CHECK-NEXT: call void @use32(i32 [[T1]])
-; CHECK-NEXT: [[T2:%.*]] = shl nuw i32 [[T1]], [[NBITS]]
+; CHECK-NEXT: [[T2:%.*]] = shl i32 [[X]], [[NBITS]]
; CHECK-NEXT: ret i32 [[T2]]
;
%t0 = shl i32 %x, %nbits
@@ -145,7 +145,7 @@ define i32 @t6_nsw(i32 %x, i32 %nbits) {
; CHECK-NEXT: [[T1:%.*]] = ashr i32 [[T0]], [[NBITS]]
; CHECK-NEXT: call void @use32(i32 [[T0]])
; CHECK-NEXT: call void @use32(i32 [[T1]])
-; CHECK-NEXT: [[T2:%.*]] = shl nsw i32 [[T1]], [[NBITS]]
+; CHECK-NEXT: [[T2:%.*]] = shl i32 [[X]], [[NBITS]]
; CHECK-NEXT: ret i32 [[T2]]
;
%t0 = shl i32 %x, %nbits
@@ -162,7 +162,7 @@ define i32 @t7_nuw_nsw(i32 %x, i32 %nbits) {
; CHECK-NEXT: [[T1:%.*]] = ashr i32 [[T0]], [[NBITS]]
; CHECK-NEXT: call void @use32(i32 [[T0]])
; CHECK-NEXT: call void @use32(i32 [[T1]])
-; CHECK-NEXT: [[T2:%.*]] = shl nuw nsw i32 [[T1]], [[NBITS]]
+; CHECK-NEXT: [[T2:%.*]] = shl i32 [[X]], [[NBITS]]
; CHECK-NEXT: ret i32 [[T2]]
;
%t0 = shl i32 %x, %nbits
OpenPOWER on IntegriCloud