summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--llvm/lib/Target/AMDGPU/SIInstrInfo.cpp59
-rw-r--r--llvm/lib/Target/AMDGPU/SIInstrInfo.h3
-rw-r--r--llvm/test/CodeGen/AMDGPU/xnor.ll86
3 files changed, 140 insertions, 8 deletions
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
index d4e47c63dca..fe3f93a4809 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
@@ -876,7 +876,7 @@ void SIInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
MachineFunction *MF = MBB.getParent();
SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
MachineFrameInfo &FrameInfo = MF->getFrameInfo();
- DebugLoc DL = MBB.findDebugLoc(MI);
+ const DebugLoc &DL = MBB.findDebugLoc(MI);
unsigned Size = FrameInfo.getObjectSize(FrameIndex);
unsigned Align = FrameInfo.getObjectAlignment(FrameIndex);
@@ -977,7 +977,7 @@ void SIInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
MachineFunction *MF = MBB.getParent();
SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
MachineFrameInfo &FrameInfo = MF->getFrameInfo();
- DebugLoc DL = MBB.findDebugLoc(MI);
+ const DebugLoc &DL = MBB.findDebugLoc(MI);
unsigned Align = FrameInfo.getObjectAlignment(FrameIndex);
unsigned Size = FrameInfo.getObjectSize(FrameIndex);
unsigned SpillSize = TRI->getSpillSize(*RC);
@@ -1032,7 +1032,7 @@ unsigned SIInstrInfo::calculateLDSSpillAddress(
MachineFunction *MF = MBB.getParent();
SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
- DebugLoc DL = MBB.findDebugLoc(MI);
+ const DebugLoc &DL = MBB.findDebugLoc(MI);
unsigned WorkGroupSize = MFI->getMaxFlatWorkGroupSize();
unsigned WavefrontSize = ST.getWavefrontSize();
@@ -1040,7 +1040,7 @@ unsigned SIInstrInfo::calculateLDSSpillAddress(
if (!MFI->hasCalculatedTID()) {
MachineBasicBlock &Entry = MBB.getParent()->front();
MachineBasicBlock::iterator Insert = Entry.front();
- DebugLoc DL = Insert->getDebugLoc();
+ const DebugLoc &DL = Insert->getDebugLoc();
TIDReg = RI.findUnusedRegister(MF->getRegInfo(), &AMDGPU::VGPR_32RegClass,
*MF);
@@ -4162,7 +4162,10 @@ void SIInstrInfo::moveToVALU(MachineInstr &TopInst,
continue;
case AMDGPU::S_XNOR_B64:
- splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_XNOR_B32, MDT);
+ if (ST.hasDLInsts())
+ splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_XNOR_B32, MDT);
+ else
+ splitScalar64BitXnor(Worklist, Inst, MDT);
Inst.eraseFromParent();
continue;
@@ -4753,13 +4756,55 @@ void SIInstrInfo::splitScalar64BitBinaryOp(SetVectorType &Worklist,
addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist);
}
+void SIInstrInfo::splitScalar64BitXnor(SetVectorType &Worklist,
+ MachineInstr &Inst,
+ MachineDominatorTree *MDT) const {
+ MachineBasicBlock &MBB = *Inst.getParent();
+ MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
+
+ MachineOperand &Dest = Inst.getOperand(0);
+ MachineOperand &Src0 = Inst.getOperand(1);
+ MachineOperand &Src1 = Inst.getOperand(2);
+ const DebugLoc &DL = Inst.getDebugLoc();
+
+ MachineBasicBlock::iterator MII = Inst;
+
+ const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg());
+
+ unsigned Interm = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
+
+ MachineOperand* Op0;
+ MachineOperand* Op1;
+
+ if (Src0.isReg() && RI.isSGPRReg(MRI, Src0.getReg())) {
+ Op0 = &Src0;
+ Op1 = &Src1;
+ } else {
+ Op0 = &Src1;
+ Op1 = &Src0;
+ }
+
+ BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B64), Interm)
+ .add(*Op0);
+
+ unsigned NewDest = MRI.createVirtualRegister(DestRC);
+
+ MachineInstr &Xor = *BuildMI(MBB, MII, DL, get(AMDGPU::S_XOR_B64), NewDest)
+ .addReg(Interm)
+ .add(*Op1);
+
+ MRI.replaceRegWith(Dest.getReg(), NewDest);
+
+ Worklist.insert(&Xor);
+}
+
void SIInstrInfo::splitScalar64BitBCNT(
SetVectorType &Worklist, MachineInstr &Inst) const {
MachineBasicBlock &MBB = *Inst.getParent();
MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
MachineBasicBlock::iterator MII = Inst;
- DebugLoc DL = Inst.getDebugLoc();
+ const DebugLoc &DL = Inst.getDebugLoc();
MachineOperand &Dest = Inst.getOperand(0);
MachineOperand &Src = Inst.getOperand(1);
@@ -4795,7 +4840,7 @@ void SIInstrInfo::splitScalar64BitBFE(SetVectorType &Worklist,
MachineBasicBlock &MBB = *Inst.getParent();
MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
MachineBasicBlock::iterator MII = Inst;
- DebugLoc DL = Inst.getDebugLoc();
+ const DebugLoc &DL = Inst.getDebugLoc();
MachineOperand &Dest = Inst.getOperand(0);
uint32_t Imm = Inst.getOperand(2).getImm();
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.h b/llvm/lib/Target/AMDGPU/SIInstrInfo.h
index c78fec6bc37..ccccd993e6a 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.h
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.h
@@ -107,6 +107,9 @@ private:
unsigned Opcode,
MachineDominatorTree *MDT = nullptr) const;
+ void splitScalar64BitXnor(SetVectorType &Worklist, MachineInstr &Inst,
+ MachineDominatorTree *MDT = nullptr) const;
+
void splitScalar64BitBCNT(SetVectorType &Worklist,
MachineInstr &Inst) const;
void splitScalar64BitBFE(SetVectorType &Worklist,
diff --git a/llvm/test/CodeGen/AMDGPU/xnor.ll b/llvm/test/CodeGen/AMDGPU/xnor.ll
index 103cb3487ca..716bcb5922e 100644
--- a/llvm/test/CodeGen/AMDGPU/xnor.ll
+++ b/llvm/test/CodeGen/AMDGPU/xnor.ll
@@ -74,9 +74,9 @@ entry:
; GCN-LABEL: {{^}}vector_xnor_i64_one_use
; GCN-NOT: s_xnor_b64
; GCN: v_not_b32
-; GCN: v_xor_b32
; GCN: v_not_b32
; GCN: v_xor_b32
+; GCN: v_xor_b32
; GCN-DL: v_xnor_b32
; GCN-DL: v_xnor_b32
define i64 @vector_xnor_i64_one_use(i64 %a, i64 %b) {
@@ -110,5 +110,89 @@ define amdgpu_kernel void @xnor_v_s_i32_one_use(i32 addrspace(1)* %out, i32 %s)
ret void
}
+; GCN-LABEL: {{^}}xnor_i64_s_v_one_use
+; GCN-NOT: s_xnor_b64
+; GCN: s_not_b64
+; GCN: v_xor_b32
+; GCN: v_xor_b32
+; GCN-DL: v_xnor_b32
+; GCN-DL: v_xnor_b32
+define amdgpu_kernel void @xnor_i64_s_v_one_use(
+ i64 addrspace(1)* %r0, i64 %a) {
+entry:
+ %b32 = call i32 @llvm.amdgcn.workitem.id.x() #1
+ %b64 = zext i32 %b32 to i64
+ %b = shl i64 %b64, 29
+ %xor = xor i64 %a, %b
+ %r0.val = xor i64 %xor, -1
+ store i64 %r0.val, i64 addrspace(1)* %r0
+ ret void
+}
+
+; GCN-LABEL: {{^}}xnor_i64_v_s_one_use
+; GCN-NOT: s_xnor_b64
+; GCN: s_not_b64
+; GCN: v_xor_b32
+; GCN: v_xor_b32
+; GCN-DL: v_xnor_b32
+; GCN-DL: v_xnor_b32
+define amdgpu_kernel void @xnor_i64_v_s_one_use(
+ i64 addrspace(1)* %r0, i64 %a) {
+entry:
+ %b32 = call i32 @llvm.amdgcn.workitem.id.x() #1
+ %b64 = zext i32 %b32 to i64
+ %b = shl i64 %b64, 29
+ %xor = xor i64 %b, %a
+ %r0.val = xor i64 %xor, -1
+ store i64 %r0.val, i64 addrspace(1)* %r0
+ ret void
+}
+
+; GCN-LABEL: {{^}}vector_xor_na_b_i32_one_use
+; GCN-NOT: s_xnor_b32
+; GCN: v_not_b32
+; GCN: v_xor_b32
+; GCN-DL: v_xnor_b32
+define i32 @vector_xor_na_b_i32_one_use(i32 %a, i32 %b) {
+entry:
+ %na = xor i32 %a, -1
+ %r = xor i32 %na, %b
+ ret i32 %r
+}
+
+; GCN-LABEL: {{^}}vector_xor_a_nb_i32_one_use
+; GCN-NOT: s_xnor_b32
+; GCN: v_not_b32
+; GCN: v_xor_b32
+; GCN-DL: v_xnor_b32
+define i32 @vector_xor_a_nb_i32_one_use(i32 %a, i32 %b) {
+entry:
+ %nb = xor i32 %b, -1
+ %r = xor i32 %a, %nb
+ ret i32 %r
+}
+
+; GCN-LABEL: {{^}}scalar_xor_a_nb_i64_one_use
+; GCN: s_xnor_b64
+define amdgpu_kernel void @scalar_xor_a_nb_i64_one_use(
+ i64 addrspace(1)* %r0, i64 %a, i64 %b) {
+entry:
+ %nb = xor i64 %b, -1
+ %r0.val = xor i64 %a, %nb
+ store i64 %r0.val, i64 addrspace(1)* %r0
+ ret void
+}
+
+; GCN-LABEL: {{^}}scalar_xor_na_b_i64_one_use
+; GCN: s_xnor_b64
+define amdgpu_kernel void @scalar_xor_na_b_i64_one_use(
+ i64 addrspace(1)* %r0, i64 %a, i64 %b) {
+entry:
+ %na = xor i64 %a, -1
+ %r0.val = xor i64 %na, %b
+ store i64 %r0.val, i64 addrspace(1)* %r0
+ ret void
+}
+
; Function Attrs: nounwind readnone
declare i32 @llvm.amdgcn.workitem.id.x() #0
OpenPOWER on IntegriCloud