diff options
| -rw-r--r-- | llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp | 25 | ||||
| -rw-r--r-- | llvm/test/CodeGen/R600/simplify-demanded-bits-build-pair.ll | 36 | 
2 files changed, 61 insertions, 0 deletions
diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp index 2f5ee0c56c6..f98f0dd292a 100644 --- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp @@ -848,6 +848,31 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,      }      break;    } +  case ISD::BUILD_PAIR: { +    EVT HalfVT = Op.getOperand(0).getValueType(); +    unsigned HalfBitWidth = HalfVT.getScalarSizeInBits(); + +    APInt MaskLo = NewMask.getLoBits(HalfBitWidth).trunc(HalfBitWidth); +    APInt MaskHi = NewMask.getHiBits(HalfBitWidth).trunc(HalfBitWidth); + +    APInt KnownZeroLo, KnownOneLo; +    APInt KnownZeroHi, KnownOneHi; + +    if (SimplifyDemandedBits(Op.getOperand(0), MaskLo, KnownZeroLo, +                             KnownOneLo, TLO, Depth + 1)) +      return true; + +    if (SimplifyDemandedBits(Op.getOperand(1), MaskHi, KnownZeroHi, +                             KnownOneHi, TLO, Depth + 1)) +      return true; + +    KnownZero = KnownZeroLo.zext(BitWidth) | +                KnownZeroHi.zext(BitWidth).shl(HalfBitWidth); + +    KnownOne = KnownOneLo.zext(BitWidth) | +               KnownOneHi.zext(BitWidth).shl(HalfBitWidth); +    break; +  }    case ISD::ZERO_EXTEND: {      unsigned OperandBitWidth =        Op.getOperand(0).getValueType().getScalarType().getSizeInBits(); diff --git a/llvm/test/CodeGen/R600/simplify-demanded-bits-build-pair.ll b/llvm/test/CodeGen/R600/simplify-demanded-bits-build-pair.ll new file mode 100644 index 00000000000..d9f60ea1a4d --- /dev/null +++ b/llvm/test/CodeGen/R600/simplify-demanded-bits-build-pair.ll @@ -0,0 +1,36 @@ +; RUN: llc -verify-machineinstrs -march=r600 -mcpu=SI < %s | FileCheck -check-prefix=SI %s + +; 64-bit select was originally lowered with a build_pair, and this +; could be simplified to 1 cndmask instead of 2, but that broken when +; it started being implemented with a v2i32 build_vector and +; bitcasting. +define void @trunc_select_i64(i32 addrspace(1)* %out, i64 %a, i64 %b, i32 %c) { +  %cmp = icmp eq i32 %c, 0 +  %select = select i1 %cmp, i64 %a, i64 %b +  %trunc = trunc i64 %select to i32 +  store i32 %trunc, i32 addrspace(1)* %out, align 4 +  ret void +} + +; SI-LABEL: @trunc_load_alloca_i64: +; SI: V_MOVRELS_B32 +; SI-NOT: V_MOVRELS_B32 +; SI: S_ENDPGM +define void @trunc_load_alloca_i64(i64 addrspace(1)* %out, i32 %a, i32 %b) { +  %idx = add i32 %a, %b +  %alloca = alloca i64, i32 4 +  %gep0 = getelementptr i64* %alloca, i64 0 +  %gep1 = getelementptr i64* %alloca, i64 1 +  %gep2 = getelementptr i64* %alloca, i64 2 +  %gep3 = getelementptr i64* %alloca, i64 3 +  store i64 24, i64* %gep0, align 8 +  store i64 9334, i64* %gep1, align 8 +  store i64 3935, i64* %gep2, align 8 +  store i64 9342, i64* %gep3, align 8 +  %gep = getelementptr i64* %alloca, i32 %idx +  %load = load i64* %gep, align 8 +  %mask = and i64 %load, 4294967296 +  %add = add i64 %mask, -1 +  store i64 %add, i64 addrspace(1)* %out, align 4 +  ret void +}  | 

