summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--llvm/include/llvm/CodeGen/TargetInstrInfo.h4
-rw-r--r--llvm/lib/Target/AArch64/AArch64InstrInfo.cpp8
-rw-r--r--llvm/lib/Target/AMDGPU/SIInstrInfo.cpp29
-rw-r--r--llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp5
-rw-r--r--llvm/lib/Target/Lanai/LanaiInstrInfo.cpp6
-rw-r--r--llvm/lib/Target/PowerPC/PPCInstrInfo.cpp10
-rw-r--r--llvm/lib/Target/RISCV/RISCVInstrInfo.cpp3
-rw-r--r--llvm/lib/Target/X86/X86InstrInfo.cpp5
-rw-r--r--llvm/test/CodeGen/AArch64/machine-sink-getmemoperandwithoffset.mir65
9 files changed, 105 insertions, 30 deletions
diff --git a/llvm/include/llvm/CodeGen/TargetInstrInfo.h b/llvm/include/llvm/CodeGen/TargetInstrInfo.h
index dbc48f4546d..e48d6bf625e 100644
--- a/llvm/include/llvm/CodeGen/TargetInstrInfo.h
+++ b/llvm/include/llvm/CodeGen/TargetInstrInfo.h
@@ -1235,6 +1235,10 @@ public:
/// Get the base operand and byte offset of an instruction that reads/writes
/// memory.
+ /// It returns false if MI does not read/write memory.
+ /// It returns false if no base operand and offset was found.
+ /// It is not guaranteed to always recognize base operand and offsets in all
+ /// cases.
virtual bool getMemOperandWithOffset(const MachineInstr &MI,
const MachineOperand *&BaseOp,
int64_t &Offset,
diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
index 103928ac43d..18c098bcaca 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
@@ -1982,6 +1982,9 @@ bool AArch64InstrInfo::getMemOperandWithOffset(const MachineInstr &LdSt,
const MachineOperand *&BaseOp,
int64_t &Offset,
const TargetRegisterInfo *TRI) const {
+ if (!LdSt.mayLoadOrStore())
+ return false;
+
unsigned Width;
return getMemOperandWithOffsetWidth(LdSt, BaseOp, Offset, Width, TRI);
}
@@ -2026,9 +2029,8 @@ bool AArch64InstrInfo::getMemOperandWithOffsetWidth(
Offset = LdSt.getOperand(3).getImm() * Scale;
}
- assert((BaseOp->isReg() || BaseOp->isFI()) &&
- "getMemOperandWithOffset only supports base "
- "operands of type register or frame index.");
+ if (!BaseOp->isReg() && !BaseOp->isFI())
+ return false;
return true;
}
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
index 5e39e7c119b..79481a1a0d2 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
@@ -260,6 +260,9 @@ bool SIInstrInfo::getMemOperandWithOffset(const MachineInstr &LdSt,
const MachineOperand *&BaseOp,
int64_t &Offset,
const TargetRegisterInfo *TRI) const {
+ if (!LdSt.mayLoadOrStore())
+ return false;
+
unsigned Opc = LdSt.getOpcode();
if (isDS(LdSt)) {
@@ -270,12 +273,11 @@ bool SIInstrInfo::getMemOperandWithOffset(const MachineInstr &LdSt,
BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::addr);
// TODO: ds_consume/ds_append use M0 for the base address. Is it safe to
// report that here?
- if (!BaseOp)
+ if (!BaseOp || !BaseOp->isReg())
return false;
Offset = OffsetImm->getImm();
- assert(BaseOp->isReg() && "getMemOperandWithOffset only supports base "
- "operands of type register.");
+
return true;
}
@@ -307,9 +309,11 @@ bool SIInstrInfo::getMemOperandWithOffset(const MachineInstr &LdSt,
EltSize *= 64;
BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::addr);
+ if (!BaseOp->isReg())
+ return false;
+
Offset = EltSize * Offset0;
- assert(BaseOp->isReg() && "getMemOperandWithOffset only supports base "
- "operands of type register.");
+
return true;
}
@@ -346,12 +350,12 @@ bool SIInstrInfo::getMemOperandWithOffset(const MachineInstr &LdSt,
getNamedOperand(LdSt, AMDGPU::OpName::offset);
BaseOp = AddrReg;
Offset = OffsetImm->getImm();
-
if (SOffset) // soffset can be an inline immediate.
Offset += SOffset->getImm();
- assert(BaseOp->isReg() && "getMemOperandWithOffset only supports base "
- "operands of type register.");
+ if (!BaseOp->isReg())
+ return false;
+
return true;
}
@@ -364,8 +368,9 @@ bool SIInstrInfo::getMemOperandWithOffset(const MachineInstr &LdSt,
const MachineOperand *SBaseReg = getNamedOperand(LdSt, AMDGPU::OpName::sbase);
BaseOp = SBaseReg;
Offset = OffsetImm->getImm();
- assert(BaseOp->isReg() && "getMemOperandWithOffset only supports base "
- "operands of type register.");
+ if (!BaseOp->isReg())
+ return false;
+
return true;
}
@@ -383,8 +388,8 @@ bool SIInstrInfo::getMemOperandWithOffset(const MachineInstr &LdSt,
}
Offset = getNamedOperand(LdSt, AMDGPU::OpName::offset)->getImm();
- assert(BaseOp->isReg() && "getMemOperandWithOffset only supports base "
- "operands of type register.");
+ if (!BaseOp->isReg())
+ return false;
return true;
}
diff --git a/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp b/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp
index c3230da1053..de22bc02724 100644
--- a/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp
@@ -2945,10 +2945,7 @@ bool HexagonInstrInfo::getMemOperandWithOffset(
const TargetRegisterInfo *TRI) const {
unsigned AccessSize = 0;
BaseOp = getBaseAndOffset(LdSt, Offset, AccessSize);
- assert((!BaseOp || BaseOp->isReg()) &&
- "getMemOperandWithOffset only supports base "
- "operands of type register.");
- return BaseOp != nullptr;
+ return BaseOp != nullptr && BaseOp->isReg();
}
/// Can these instructions execute at the same time in a bundle.
diff --git a/llvm/lib/Target/Lanai/LanaiInstrInfo.cpp b/llvm/lib/Target/Lanai/LanaiInstrInfo.cpp
index 306cdc77d0d..4ce72f9621a 100644
--- a/llvm/lib/Target/Lanai/LanaiInstrInfo.cpp
+++ b/llvm/lib/Target/Lanai/LanaiInstrInfo.cpp
@@ -788,8 +788,10 @@ bool LanaiInstrInfo::getMemOperandWithOffsetWidth(
BaseOp = &LdSt.getOperand(1);
Offset = LdSt.getOperand(2).getImm();
- assert(BaseOp->isReg() && "getMemOperandWithOffset only supports base "
- "operands of type register.");
+
+ if (!BaseOp->isReg())
+ return false;
+
return true;
}
diff --git a/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp b/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp
index 4d5257363ea..00fe43fc8b9 100644
--- a/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp
+++ b/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp
@@ -4263,12 +4263,10 @@ MachineInstr *PPCInstrInfo::findLoopInstr(
// Return true if get the base operand, byte offset of an instruction and the
// memory width. Width is the size of memory that is being loaded/stored.
bool PPCInstrInfo::getMemOperandWithOffsetWidth(
- const MachineInstr &LdSt,
- const MachineOperand *&BaseReg,
- int64_t &Offset,
- unsigned &Width,
- const TargetRegisterInfo *TRI) const {
- assert(LdSt.mayLoadOrStore() && "Expected a memory operation.");
+ const MachineInstr &LdSt, const MachineOperand *&BaseReg, int64_t &Offset,
+ unsigned &Width, const TargetRegisterInfo *TRI) const {
+ if (!LdSt.mayLoadOrStore())
+ return false;
// Handle only loads/stores with base register followed by immediate offset.
if (LdSt.getNumExplicitOperands() != 3)
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
index 11c446386ca..e41af1f462b 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
@@ -562,7 +562,8 @@ bool RISCVInstrInfo::verifyInstruction(const MachineInstr &MI,
bool RISCVInstrInfo::getMemOperandWithOffsetWidth(
const MachineInstr &LdSt, const MachineOperand *&BaseReg, int64_t &Offset,
unsigned &Width, const TargetRegisterInfo *TRI) const {
- assert(LdSt.mayLoadOrStore() && "Expected a memory operation.");
+ if (!LdSt.mayLoadOrStore())
+ return false;
// Here we assume the standard RISC-V ISA, which uses a base+offset
// addressing mode. You'll need to relax these conditions to support custom
diff --git a/llvm/lib/Target/X86/X86InstrInfo.cpp b/llvm/lib/Target/X86/X86InstrInfo.cpp
index 7f0058492c9..c70d09686b7 100644
--- a/llvm/lib/Target/X86/X86InstrInfo.cpp
+++ b/llvm/lib/Target/X86/X86InstrInfo.cpp
@@ -3218,8 +3218,9 @@ bool X86InstrInfo::getMemOperandWithOffset(
Offset = DispMO.getImm();
- assert(BaseOp->isReg() && "getMemOperandWithOffset only supports base "
- "operands of type register.");
+ if (!BaseOp->isReg())
+ return false;
+
return true;
}
diff --git a/llvm/test/CodeGen/AArch64/machine-sink-getmemoperandwithoffset.mir b/llvm/test/CodeGen/AArch64/machine-sink-getmemoperandwithoffset.mir
new file mode 100644
index 00000000000..3a21333a2b5
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/machine-sink-getmemoperandwithoffset.mir
@@ -0,0 +1,65 @@
+# RUN: llc -mtriple=aarch64-none-linux-gnu -run-pass machine-sink -o - %s | FileCheck %s
+--- |
+ define i8 @g() {
+ else.7:
+ br i1 undef, label %then.8, label %else.8, !make.implicit !0
+
+ then.8: ; preds = %else.8, %else.7
+ %merge = phi i8 [ 1, %else.7 ], [ 0, %else.8 ]
+ ret i8 %merge ;1 ;%merge
+
+ else.8: ; preds = %else.7
+ %icmp.8 = icmp eq i64 undef, undef
+ ; ret i8 0 ; added
+ br i1 %icmp.8, label %else.11, label %then.8
+
+ else.11: ; preds = %else.8
+ ret i8 undef
+ }
+
+ !0 = !{}
+...
+---
+name: g
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: gpr32all }
+ - { id: 1, class: gpr32all }
+ - { id: 2, class: gpr32 }
+ - { id: 3, class: gpr32 }
+ - { id: 4, class: gpr32all }
+ - { id: 5, class: gpr32 }
+ - { id: 6, class: gpr32all }
+body: |
+ ; Just check that the pass didn't crash/assert.
+ ; CHECK-LABEL: name: g
+ bb.0.else.7:
+ successors: %bb.1, %bb.2
+
+ %2:gpr32 = MOVi32imm 1
+ ; Sinking the below COPY instruction caused an assert to trigger before
+ ; requiring getMemOperandWithOffset to return false rather than assert
+ ; when handling non-memory operations.
+ %1:gpr32all = COPY %2
+ %3:gpr32 = COPY $wzr
+ CBNZW %3, %bb.2
+ B %bb.1
+
+ bb.1.then.8:
+ %0:gpr32all = PHI %1, %bb.0, %4, %bb.2
+ $w0 = COPY %0
+ RET_ReallyLR implicit $w0
+
+ bb.2.else.8:
+ successors: %bb.3, %bb.1
+
+ %5:gpr32 = COPY $wzr
+ %4:gpr32all = COPY %5
+ CBNZW %5, %bb.1
+ B %bb.3
+
+ bb.3.else.11:
+ %6:gpr32all = IMPLICIT_DEF
+ $w0 = COPY %6
+ RET_ReallyLR implicit $w0
+...
OpenPOWER on IntegriCloud