diff options
4 files changed, 34 insertions, 5 deletions
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp b/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp index c832ca5644a..08aedb3200a 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp @@ -200,8 +200,21 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask, if ((DemandedMask & (~LHSKnownZero) & RHSKnownOne) == (DemandedMask & (~LHSKnownZero))) return I->getOperand(1); + } else if (I->getOpcode() == Instruction::Xor) { + // We can simplify (X^Y) -> X or Y in the user's context if we know that + // only bits from X or Y are demanded. + + ComputeMaskedBits(I->getOperand(1), RHSKnownZero, RHSKnownOne, Depth+1); + ComputeMaskedBits(I->getOperand(0), LHSKnownZero, LHSKnownOne, Depth+1); + + // If all of the demanded bits are known zero on one side, return the + // other. + if ((DemandedMask & RHSKnownZero) == DemandedMask) + return I->getOperand(0); + if ((DemandedMask & LHSKnownZero) == DemandedMask) + return I->getOperand(1); } - + // Compute the KnownZero/KnownOne bits to simplify things downstream. ComputeMaskedBits(I, KnownZero, KnownOne, Depth); return 0; diff --git a/llvm/test/Transforms/InstCombine/2010-11-01-lshr-mask.ll b/llvm/test/Transforms/InstCombine/2010-11-01-lshr-mask.ll index 5efad8a789f..80016219791 100644 --- a/llvm/test/Transforms/InstCombine/2010-11-01-lshr-mask.ll +++ b/llvm/test/Transforms/InstCombine/2010-11-01-lshr-mask.ll @@ -5,8 +5,8 @@ define i32 @main(i32 %argc) nounwind ssp { entry: %tmp3151 = trunc i32 %argc to i8 -; CHECK: %tmp3162 = shl i8 %tmp3151, 5 -; CHECK: and i8 %tmp3162, 64 +; CHECK: %0 = shl i8 %tmp3151, 5 +; CHECK: and i8 %0, 64 ; CHECK-NOT: shl ; CHECK-NOT: shr %tmp3161 = or i8 %tmp3151, -17 diff --git a/llvm/test/Transforms/InstCombine/shift.ll b/llvm/test/Transforms/InstCombine/shift.ll index fad0bd7ede3..32867761a3f 100644 --- a/llvm/test/Transforms/InstCombine/shift.ll +++ b/llvm/test/Transforms/InstCombine/shift.ll @@ -523,9 +523,9 @@ entry: %tmp51 = xor i8 %tmp50, %tmp5 %tmp52 = and i8 %tmp51, -128 %tmp53 = lshr i8 %tmp52, 7 -; CHECK: lshr i8 %tmp51, 7 %tmp54 = mul i8 %tmp53, 16 -; CHECK: shl nuw nsw i8 %tmp53, 4 +; CHECK: %0 = shl i8 %tmp4, 2 +; CHECK: %tmp54 = and i8 %0, 16 %tmp55 = xor i8 %tmp54, %tmp51 ; CHECK: ret i8 %tmp551 ret i8 %tmp55 diff --git a/llvm/test/Transforms/InstCombine/xor2.ll b/llvm/test/Transforms/InstCombine/xor2.ll index 3c992467967..4ff3b1a4552 100644 --- a/llvm/test/Transforms/InstCombine/xor2.ll +++ b/llvm/test/Transforms/InstCombine/xor2.ll @@ -66,3 +66,19 @@ test5: ; CHECK: lshr i32 %val1, 8 ; CHECK: ret } + +; defect-1 in rdar://12329730 +; Simplify (X^Y) -> X or Y in the user's context if we know that +; only bits from X or Y are demanded. +; e.g. the "x ^ 1234" can be optimized into x in the context of "t >> 16". +; Put in other word, t >> 16 -> x >> 16. +; unsigned foo(unsigned x) { nsigned t = x ^ 1234; ; return (t >> 16) + t;} +define i32 @test6(i32 %x) { + %xor = xor i32 %x, 1234 + %shr = lshr i32 %xor, 16 + %add = add i32 %shr, %xor + ret i32 %add +; CHECK: @test6 +; CHECK: lshr i32 %x, 16 +; CHECK: ret +} |