summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Target/AMDGPU/SIInstructions.td
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/Target/AMDGPU/SIInstructions.td')
-rw-r--r--llvm/lib/Target/AMDGPU/SIInstructions.td14
1 files changed, 7 insertions, 7 deletions
diff --git a/llvm/lib/Target/AMDGPU/SIInstructions.td b/llvm/lib/Target/AMDGPU/SIInstructions.td
index 63d2239637c..94506f2fcd0 100644
--- a/llvm/lib/Target/AMDGPU/SIInstructions.td
+++ b/llvm/lib/Target/AMDGPU/SIInstructions.td
@@ -848,7 +848,7 @@ defm V_READLANE_B32 : VOP2SI_3VI_m <
vop3 <0x001, 0x289>,
"v_readlane_b32",
(outs SReg_32:$vdst),
- (ins VGPR_32:$src0, SCSrc_32:$src1),
+ (ins VGPR_32:$src0, SCSrc_b32:$src1),
"v_readlane_b32 $vdst, $src0, $src1",
[(set i32:$vdst, (int_amdgcn_readlane i32:$src0, i32:$src1))]
>;
@@ -857,7 +857,7 @@ defm V_WRITELANE_B32 : VOP2SI_3VI_m <
vop3 <0x002, 0x28a>,
"v_writelane_b32",
(outs VGPR_32:$vdst),
- (ins SReg_32:$src0, SCSrc_32:$src1),
+ (ins SReg_32:$src0, SCSrc_b32:$src1),
"v_writelane_b32 $vdst, $src0, $src1"
>;
@@ -1179,7 +1179,7 @@ let hasSideEffects = 0, mayLoad = 0, mayStore = 0, Uses = [EXEC] in {
// For use in patterns
def V_CNDMASK_B64_PSEUDO : VOP3Common <(outs VReg_64:$vdst),
- (ins VSrc_64:$src0, VSrc_64:$src1, SSrc_64:$src2), "", []> {
+ (ins VSrc_b64:$src0, VSrc_b64:$src1, SSrc_b64:$src2), "", []> {
let isPseudo = 1;
let isCodeGenOnly = 1;
let usesCustomInserter = 1;
@@ -1187,7 +1187,7 @@ def V_CNDMASK_B64_PSEUDO : VOP3Common <(outs VReg_64:$vdst),
// 64-bit vector move instruction. This is mainly used by the SIFoldOperands
// pass to enable folding of inline immediates.
-def V_MOV_B64_PSEUDO : PseudoInstSI <(outs VReg_64:$vdst), (ins VSrc_64:$src0)> {
+def V_MOV_B64_PSEUDO : PseudoInstSI <(outs VReg_64:$vdst), (ins VSrc_b64:$src0)> {
let VALU = 1;
}
} // End let hasSideEffects = 0, mayLoad = 0, mayStore = 0, Uses = [EXEC]
@@ -1263,14 +1263,14 @@ def SI_ELSE_BREAK : CFPseudoInstSI <
let Uses = [EXEC], Defs = [EXEC,VCC] in {
def SI_KILL : PseudoInstSI <
- (outs), (ins VSrc_32:$src),
+ (outs), (ins VSrc_b32:$src),
[(AMDGPUkill i32:$src)]> {
let isConvergent = 1;
let usesCustomInserter = 1;
}
def SI_KILL_TERMINATOR : SPseudoInstSI <
- (outs), (ins VSrc_32:$src)> {
+ (outs), (ins VSrc_b32:$src)> {
let isTerminator = 1;
}
@@ -1288,7 +1288,7 @@ def SI_PS_LIVE : PseudoInstSI <
// s_mov_b32 rather than a copy of another initialized
// register. MachineCSE skips copies, and we don't want to have to
// fold operands before it runs.
-def SI_INIT_M0 : SPseudoInstSI <(outs), (ins SSrc_32:$src)> {
+def SI_INIT_M0 : SPseudoInstSI <(outs), (ins SSrc_b32:$src)> {
let Defs = [M0];
let usesCustomInserter = 1;
let isAsCheapAsAMove = 1;
OpenPOWER on IntegriCloud