summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Target/PowerPC
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/Target/PowerPC')
-rw-r--r--llvm/lib/Target/PowerPC/PPCBranchCoalescing.cpp24
-rw-r--r--llvm/lib/Target/PowerPC/PPCInstrInfo.cpp6
-rw-r--r--llvm/lib/Target/PowerPC/PPCMIPeephole.cpp6
-rw-r--r--llvm/lib/Target/PowerPC/PPCQPXLoadSplat.cpp4
-rw-r--r--llvm/lib/Target/PowerPC/PPCVSXFMAMutate.cpp18
5 files changed, 29 insertions, 29 deletions
diff --git a/llvm/lib/Target/PowerPC/PPCBranchCoalescing.cpp b/llvm/lib/Target/PowerPC/PPCBranchCoalescing.cpp
index cd078972307..48b94a53823 100644
--- a/llvm/lib/Target/PowerPC/PPCBranchCoalescing.cpp
+++ b/llvm/lib/Target/PowerPC/PPCBranchCoalescing.cpp
@@ -62,9 +62,9 @@ namespace llvm {
/// %bb.0: derived from LLVM BB %entry
/// Live Ins: %f1 %f3 %x6
/// <SNIP1>
-/// %0<def> = COPY %f1; F8RC:%0
-/// %5<def> = CMPLWI %4<kill>, 0; CRRC:%5 GPRC:%4
-/// %8<def> = LXSDX %zero8, %7<kill>, %rm<imp-use>;
+/// %0 = COPY %f1; F8RC:%0
+/// %5 = CMPLWI killed %4, 0; CRRC:%5 GPRC:%4
+/// %8 = LXSDX %zero8, killed %7, implicit %rm;
/// mem:LD8[ConstantPool] F8RC:%8 G8RC:%7
/// BCC 76, %5, <%bb.2>; CRRC:%5
/// Successors according to CFG: %bb.1(?%) %bb.2(?%)
@@ -75,7 +75,7 @@ namespace llvm {
///
/// %bb.2: derived from LLVM BB %entry
/// Predecessors according to CFG: %bb.0 %bb.1
-/// %9<def> = PHI %8, <%bb.1>, %0, <%bb.0>;
+/// %9 = PHI %8, <%bb.1>, %0, <%bb.0>;
/// F8RC:%9,%8,%0
/// <SNIP2>
/// BCC 76, %5, <%bb.4>; CRRC:%5
@@ -87,10 +87,10 @@ namespace llvm {
///
/// %bb.4: derived from LLVM BB %entry
/// Predecessors according to CFG: %bb.2 %bb.3
-/// %13<def> = PHI %12, <%bb.3>, %2, <%bb.2>;
+/// %13 = PHI %12, <%bb.3>, %2, <%bb.2>;
/// F8RC:%13,%12,%2
/// <SNIP3>
-/// BLR8 %lr8<imp-use>, %rm<imp-use>, %f1<imp-use>
+/// BLR8 implicit %lr8, implicit %rm, implicit %f1
///
/// When this pattern is detected, branch coalescing will try to collapse
/// it by moving code in %bb.2 to %bb.0 and/or %bb.4 and removing %bb.3.
@@ -100,9 +100,9 @@ namespace llvm {
/// %bb.0: derived from LLVM BB %entry
/// Live Ins: %f1 %f3 %x6
/// <SNIP1>
-/// %0<def> = COPY %f1; F8RC:%0
-/// %5<def> = CMPLWI %4<kill>, 0; CRRC:%5 GPRC:%4
-/// %8<def> = LXSDX %zero8, %7<kill>, %rm<imp-use>;
+/// %0 = COPY %f1; F8RC:%0
+/// %5 = CMPLWI killed %4, 0; CRRC:%5 GPRC:%4
+/// %8 = LXSDX %zero8, killed %7, implicit %rm;
/// mem:LD8[ConstantPool] F8RC:%8 G8RC:%7
/// <SNIP2>
/// BCC 76, %5, <%bb.4>; CRRC:%5
@@ -115,12 +115,12 @@ namespace llvm {
///
/// %bb.4: derived from LLVM BB %entry
/// Predecessors according to CFG: %bb.0 %bb.1
-/// %9<def> = PHI %8, <%bb.1>, %0, <%bb.0>;
+/// %9 = PHI %8, <%bb.1>, %0, <%bb.0>;
/// F8RC:%9,%8,%0
-/// %13<def> = PHI %12, <%bb.1>, %2, <%bb.0>;
+/// %13 = PHI %12, <%bb.1>, %2, <%bb.0>;
/// F8RC:%13,%12,%2
/// <SNIP3>
-/// BLR8 %lr8<imp-use>, %rm<imp-use>, %f1<imp-use>
+/// BLR8 implicit %lr8, implicit %rm, implicit %f1
///
/// Branch Coalescing does not split blocks, it moves everything in the same
/// direction ensuring it does not break use/definition semantics.
diff --git a/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp b/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp
index 15cc1c76760..fcc38e233b2 100644
--- a/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp
+++ b/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp
@@ -2315,10 +2315,10 @@ PPCInstrInfo::isSignOrZeroExtended(const MachineInstr &MI, bool SignExt,
// For a method return value, we check the ZExt/SExt flags in attribute.
// We assume the following code sequence for method call.
- // ADJCALLSTACKDOWN 32, %r1<imp-def,dead>, %r1<imp-use>
+ // ADJCALLSTACKDOWN 32, implicit dead %r1, implicit %r1
// BL8_NOP <ga:@func>,...
- // ADJCALLSTACKUP 32, 0, %r1<imp-def,dead>, %r1<imp-use>
- // %5<def> = COPY %x3; G8RC:%5
+ // ADJCALLSTACKUP 32, 0, implicit dead %r1, implicit %r1
+ // %5 = COPY %x3; G8RC:%5
if (SrcReg == PPC::X3) {
const MachineBasicBlock *MBB = MI.getParent();
MachineBasicBlock::const_instr_iterator II =
diff --git a/llvm/lib/Target/PowerPC/PPCMIPeephole.cpp b/llvm/lib/Target/PowerPC/PPCMIPeephole.cpp
index c6fcea7c956..05eb7563893 100644
--- a/llvm/lib/Target/PowerPC/PPCMIPeephole.cpp
+++ b/llvm/lib/Target/PowerPC/PPCMIPeephole.cpp
@@ -585,8 +585,8 @@ bool PPCMIPeephole::simplifyCode(void) {
// We can eliminate RLDICL (e.g. for zero-extension)
// if all bits to clear are already zero in the input.
// This code assume following code sequence for zero-extension.
- // %6<def> = COPY %5:sub_32; (optional)
- // %8<def> = IMPLICIT_DEF;
+ // %6 = COPY %5:sub_32; (optional)
+ // %8 = IMPLICIT_DEF;
// %7<def,tied1> = INSERT_SUBREG %8<tied0>, %6, sub_32;
if (!EnableZExtElimination) break;
@@ -685,7 +685,7 @@ bool PPCMIPeephole::simplifyCode(void) {
DEBUG(dbgs() << "Optimizing LI to ADDI: ");
DEBUG(LiMI->dump());
- // There could be repeated registers in the PHI, e.g: %1<def> =
+ // There could be repeated registers in the PHI, e.g: %1 =
// PHI %6, <%bb.2>, %8, <%bb.3>, %8, <%bb.6>; So if we've
// already replaced the def instruction, skip.
if (LiMI->getOpcode() == PPC::ADDI || LiMI->getOpcode() == PPC::ADDI8)
diff --git a/llvm/lib/Target/PowerPC/PPCQPXLoadSplat.cpp b/llvm/lib/Target/PowerPC/PPCQPXLoadSplat.cpp
index 10394166ddf..544c7f2aeef 100644
--- a/llvm/lib/Target/PowerPC/PPCQPXLoadSplat.cpp
+++ b/llvm/lib/Target/PowerPC/PPCQPXLoadSplat.cpp
@@ -79,8 +79,8 @@ bool PPCQPXLoadSplat::runOnMachineFunction(MachineFunction &MF) {
}
// We're looking for a sequence like this:
- // %f0<def> = LFD 0, %x3<kill>, %qf0<imp-def>; mem:LD8[%a](tbaa=!2)
- // %qf1<def> = QVESPLATI %qf0<kill>, 0, %rm<imp-use>
+ // %f0 = LFD 0, killed %x3, implicit-def %qf0; mem:LD8[%a](tbaa=!2)
+ // %qf1 = QVESPLATI killed %qf0, 0, implicit %rm
for (auto SI = Splats.begin(); SI != Splats.end();) {
MachineInstr *SMI = *SI;
diff --git a/llvm/lib/Target/PowerPC/PPCVSXFMAMutate.cpp b/llvm/lib/Target/PowerPC/PPCVSXFMAMutate.cpp
index 4d001c0210d..422bb7ba305 100644
--- a/llvm/lib/Target/PowerPC/PPCVSXFMAMutate.cpp
+++ b/llvm/lib/Target/PowerPC/PPCVSXFMAMutate.cpp
@@ -90,21 +90,21 @@ protected:
// This pass is run after register coalescing, and so we're looking for
// a situation like this:
// ...
- // %5<def> = COPY %9; VSLRC:%5,%9
+ // %5 = COPY %9; VSLRC:%5,%9
// %5<def,tied1> = XSMADDADP %5<tied0>, %17, %16,
- // %rm<imp-use>; VSLRC:%5,%17,%16
+ // implicit %rm; VSLRC:%5,%17,%16
// ...
// %9<def,tied1> = XSMADDADP %9<tied0>, %17, %19,
- // %rm<imp-use>; VSLRC:%9,%17,%19
+ // implicit %rm; VSLRC:%9,%17,%19
// ...
// Where we can eliminate the copy by changing from the A-type to the
// M-type instruction. Specifically, for this example, this means:
// %5<def,tied1> = XSMADDADP %5<tied0>, %17, %16,
- // %rm<imp-use>; VSLRC:%5,%17,%16
+ // implicit %rm; VSLRC:%5,%17,%16
// is replaced by:
// %16<def,tied1> = XSMADDMDP %16<tied0>, %18, %9,
- // %rm<imp-use>; VSLRC:%16,%18,%9
- // and we remove: %5<def> = COPY %9; VSLRC:%5,%9
+ // implicit %rm; VSLRC:%16,%18,%9
+ // and we remove: %5 = COPY %9; VSLRC:%5,%9
SlotIndex FMAIdx = LIS->getInstructionIndex(MI);
@@ -150,10 +150,10 @@ protected:
// walking the MIs we may as well test liveness here.
//
// FIXME: There is a case that occurs in practice, like this:
- // %9<def> = COPY %f1; VSSRC:%9
+ // %9 = COPY %f1; VSSRC:%9
// ...
- // %6<def> = COPY %9; VSSRC:%6,%9
- // %7<def> = COPY %9; VSSRC:%7,%9
+ // %6 = COPY %9; VSSRC:%6,%9
+ // %7 = COPY %9; VSSRC:%7,%9
// %9<def,tied1> = XSMADDASP %9<tied0>, %1, %4; VSSRC:
// %6<def,tied1> = XSMADDASP %6<tied0>, %1, %2; VSSRC:
// %7<def,tied1> = XSMADDASP %7<tied0>, %1, %3; VSSRC:
OpenPOWER on IntegriCloud