diff options
| -rw-r--r-- | llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 88 | ||||
| -rw-r--r-- | llvm/lib/Target/AMDGPU/SIISelLowering.h | 2 | ||||
| -rw-r--r-- | llvm/test/CodeGen/AMDGPU/combine-cond-add-sub.ll | 80 | 
3 files changed, 161 insertions, 9 deletions
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp index 325dbac7e61..ad9c5e49320 100644 --- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp @@ -475,6 +475,9 @@ SITargetLowering::SITargetLowering(const TargetMachine &TM,    }    setTargetDAGCombine(ISD::ADD); +  setTargetDAGCombine(ISD::ADDCARRY); +  setTargetDAGCombine(ISD::SUB); +  setTargetDAGCombine(ISD::SUBCARRY);    setTargetDAGCombine(ISD::FADD);    setTargetDAGCombine(ISD::FSUB);    setTargetDAGCombine(ISD::FMINNUM); @@ -4859,19 +4862,81 @@ SDValue SITargetLowering::performAddCombine(SDNode *N,    // add x, sext (setcc) => subcarry x, 0, setcc    unsigned Opc = LHS.getOpcode();    if (Opc == ISD::ZERO_EXTEND || Opc == ISD::SIGN_EXTEND || -      Opc == ISD::ANY_EXTEND) +      Opc == ISD::ANY_EXTEND || Opc == ISD::ADDCARRY)      std::swap(RHS, LHS);    Opc = RHS.getOpcode(); -  if (Opc == ISD::ZERO_EXTEND || Opc == ISD::SIGN_EXTEND || -      Opc == ISD::ANY_EXTEND) { +  switch (Opc) { +  default: break; +  case ISD::ZERO_EXTEND: +  case ISD::SIGN_EXTEND: +  case ISD::ANY_EXTEND: {      auto Cond = RHS.getOperand(0); -    if (Cond.getOpcode() == ISD::SETCC) { -      SDVTList VTList = DAG.getVTList(MVT::i32, MVT::i1); -      SDValue Args[] = { LHS, DAG.getConstant(0, SL, MVT::i32), Cond }; -      Opc = (Opc == ISD::SIGN_EXTEND) ? ISD::SUBCARRY : ISD::ADDCARRY; -      return DAG.getNode(Opc, SL, VTList, Args); -    } +    if (Cond.getOpcode() != ISD::SETCC) break; +    SDVTList VTList = DAG.getVTList(MVT::i32, MVT::i1); +    SDValue Args[] = { LHS, DAG.getConstant(0, SL, MVT::i32), Cond }; +    Opc = (Opc == ISD::SIGN_EXTEND) ? ISD::SUBCARRY : ISD::ADDCARRY; +    return DAG.getNode(Opc, SL, VTList, Args); +  } +  case ISD::ADDCARRY: { +    // add x, (addcarry y, 0, cc) => addcarry x, y, cc +    auto C = dyn_cast<ConstantSDNode>(RHS.getOperand(1)); +    if (!C || C->getZExtValue() != 0) break; +    SDValue Args[] = { LHS, RHS.getOperand(0), RHS.getOperand(2) }; +    return DAG.getNode(ISD::ADDCARRY, SDLoc(N), RHS->getVTList(), Args); +  } +  } +  return SDValue(); +} + +SDValue SITargetLowering::performSubCombine(SDNode *N, +                                            DAGCombinerInfo &DCI) const { +  SelectionDAG &DAG = DCI.DAG; +  EVT VT = N->getValueType(0); + +  if (VT != MVT::i32) +    return SDValue(); + +  SDLoc SL(N); +  SDValue LHS = N->getOperand(0); +  SDValue RHS = N->getOperand(1); + +  unsigned Opc = LHS.getOpcode(); +  if (Opc != ISD::SUBCARRY) +    std::swap(RHS, LHS); + +  if (LHS.getOpcode() == ISD::SUBCARRY) { +    // sub (subcarry x, 0, cc), y => subcarry x, y, cc +    auto C = dyn_cast<ConstantSDNode>(LHS.getOperand(1)); +    if (!C || C->getZExtValue() != 0) +      return SDValue(); +    SDValue Args[] = { LHS.getOperand(0), RHS, LHS.getOperand(2) }; +    return DAG.getNode(ISD::SUBCARRY, SDLoc(N), LHS->getVTList(), Args); +  } +  return SDValue(); +} + +SDValue SITargetLowering::performAddCarrySubCarryCombine(SDNode *N, +  DAGCombinerInfo &DCI) const { + +  if (N->getValueType(0) != MVT::i32) +    return SDValue(); + +  auto C = dyn_cast<ConstantSDNode>(N->getOperand(1)); +  if (!C || C->getZExtValue() != 0) +    return SDValue(); + +  SelectionDAG &DAG = DCI.DAG; +  SDValue LHS = N->getOperand(0); + +  // addcarry (add x, y), 0, cc => addcarry x, y, cc +  // subcarry (sub x, y), 0, cc => subcarry x, y, cc +  unsigned LHSOpc = LHS.getOpcode(); +  unsigned Opc = N->getOpcode(); +  if ((LHSOpc == ISD::ADD && Opc == ISD::ADDCARRY) || +      (LHSOpc == ISD::SUB && Opc == ISD::SUBCARRY)) { +    SDValue Args[] = { LHS.getOperand(0), LHS.getOperand(1), N->getOperand(2) }; +    return DAG.getNode(Opc, SDLoc(N), N->getVTList(), Args);    }    return SDValue();  } @@ -5048,6 +5113,11 @@ SDValue SITargetLowering::PerformDAGCombine(SDNode *N,      return AMDGPUTargetLowering::PerformDAGCombine(N, DCI);    case ISD::ADD:      return performAddCombine(N, DCI); +  case ISD::SUB: +    return performSubCombine(N, DCI); +  case ISD::ADDCARRY: +  case ISD::SUBCARRY: +    return performAddCarrySubCarryCombine(N, DCI);    case ISD::FADD:      return performFAddCombine(N, DCI);    case ISD::FSUB: diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.h b/llvm/lib/Target/AMDGPU/SIISelLowering.h index 215aba8c607..24f88e632d3 100644 --- a/llvm/lib/Target/AMDGPU/SIISelLowering.h +++ b/llvm/lib/Target/AMDGPU/SIISelLowering.h @@ -109,6 +109,8 @@ class SITargetLowering final : public AMDGPUTargetLowering {    unsigned getFusedOpcode(const SelectionDAG &DAG,                            const SDNode *N0, const SDNode *N1) const;    SDValue performAddCombine(SDNode *N, DAGCombinerInfo &DCI) const; +  SDValue performAddCarrySubCarryCombine(SDNode *N, DAGCombinerInfo &DCI) const; +  SDValue performSubCombine(SDNode *N, DAGCombinerInfo &DCI) const;    SDValue performFAddCombine(SDNode *N, DAGCombinerInfo &DCI) const;    SDValue performFSubCombine(SDNode *N, DAGCombinerInfo &DCI) const;    SDValue performSetCCCombine(SDNode *N, DAGCombinerInfo &DCI) const; diff --git a/llvm/test/CodeGen/AMDGPU/combine-cond-add-sub.ll b/llvm/test/CodeGen/AMDGPU/combine-cond-add-sub.ll index 06cc7fcad06..6026a047d88 100644 --- a/llvm/test/CodeGen/AMDGPU/combine-cond-add-sub.ll +++ b/llvm/test/CodeGen/AMDGPU/combine-cond-add-sub.ll @@ -36,6 +36,86 @@ bb:    ret void  } +; GCN-LABEL: {{^}}add_adde: +; GCN: v_cmp_gt_u32_e{{32|64}} [[CC:[^,]+]], v{{[0-9]+}}, v{{[0-9]+}} +; GCN: v_addc_u32_e{{32|64}} v{{[0-9]+}}, {{[^,]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[CC]] +; GCN-NOT: v_cndmask +; GCN-NOT: v_add + +define amdgpu_kernel void @add_adde(i32 addrspace(1)* nocapture %arg, i32 %a) { +bb: +  %x = tail call i32 @llvm.amdgcn.workitem.id.x() +  %y = tail call i32 @llvm.amdgcn.workitem.id.y() +  %gep = getelementptr inbounds i32, i32 addrspace(1)* %arg, i32 %x +  %v = load i32, i32 addrspace(1)* %gep, align 4 +  %cmp = icmp ugt i32 %x, %y +  %ext = zext i1 %cmp to i32 +  %adde = add i32 %v, %ext +  %add2 = add i32 %adde, %a +  store i32 %add2, i32 addrspace(1)* %gep, align 4 +  ret void +} + +; GCN-LABEL: {{^}}adde_add: +; GCN: v_cmp_gt_u32_e{{32|64}} [[CC:[^,]+]], v{{[0-9]+}}, v{{[0-9]+}} +; GCN: v_addc_u32_e{{32|64}} v{{[0-9]+}}, {{[^,]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[CC]] +; GCN-NOT: v_cndmask +; GCN-NOT: v_add + +define amdgpu_kernel void @adde_add(i32 addrspace(1)* nocapture %arg, i32 %a) { +bb: +  %x = tail call i32 @llvm.amdgcn.workitem.id.x() +  %y = tail call i32 @llvm.amdgcn.workitem.id.y() +  %gep = getelementptr inbounds i32, i32 addrspace(1)* %arg, i32 %x +  %v = load i32, i32 addrspace(1)* %gep, align 4 +  %cmp = icmp ugt i32 %x, %y +  %ext = zext i1 %cmp to i32 +  %add = add i32 %v, %a +  %adde = add i32 %add, %ext +  store i32 %adde, i32 addrspace(1)* %gep, align 4 +  ret void +} + +; GCN-LABEL: {{^}}sub_sube: +; GCN: v_cmp_gt_u32_e{{32|64}} [[CC:[^,]+]], v{{[0-9]+}}, v{{[0-9]+}} +; GCN: v_subb_u32_e{{32|64}} v{{[0-9]+}}, {{[^,]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[CC]] +; GCN-NOT: v_cndmask +; GCN-NOT: v_sub + +define amdgpu_kernel void @sub_sube(i32 addrspace(1)* nocapture %arg, i32 %a) { +bb: +  %x = tail call i32 @llvm.amdgcn.workitem.id.x() +  %y = tail call i32 @llvm.amdgcn.workitem.id.y() +  %gep = getelementptr inbounds i32, i32 addrspace(1)* %arg, i32 %x +  %v = load i32, i32 addrspace(1)* %gep, align 4 +  %cmp = icmp ugt i32 %x, %y +  %ext = sext i1 %cmp to i32 +  %adde = add i32 %v, %ext +  %sub = sub i32 %adde, %a +  store i32 %sub, i32 addrspace(1)* %gep, align 4 +  ret void +} + +; GCN-LABEL: {{^}}sube_sub: +; GCN: v_cmp_gt_u32_e{{32|64}} [[CC:[^,]+]], v{{[0-9]+}}, v{{[0-9]+}} +; GCN: v_subb_u32_e{{32|64}} v{{[0-9]+}}, {{[^,]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[CC]] +; GCN-NOT: v_cndmask +; GCN-NOT: v_sub + +define amdgpu_kernel void @sube_sub(i32 addrspace(1)* nocapture %arg, i32 %a) { +bb: +  %x = tail call i32 @llvm.amdgcn.workitem.id.x() +  %y = tail call i32 @llvm.amdgcn.workitem.id.y() +  %gep = getelementptr inbounds i32, i32 addrspace(1)* %arg, i32 %x +  %v = load i32, i32 addrspace(1)* %gep, align 4 +  %cmp = icmp ugt i32 %x, %y +  %ext = sext i1 %cmp to i32 +  %sub = sub i32 %v, %a +  %adde = add i32 %sub, %ext +  store i32 %adde, i32 addrspace(1)* %gep, align 4 +  ret void +} +  declare i32 @llvm.amdgcn.workitem.id.x() #0  declare i32 @llvm.amdgcn.workitem.id.y() #0  | 

