summaryrefslogtreecommitdiffstats
path: root/llvm/lib
diff options
context:
space:
mode:
authorUlrich Weigand <ulrich.weigand@de.ibm.com>2015-05-05 19:28:34 +0000
committerUlrich Weigand <ulrich.weigand@de.ibm.com>2015-05-05 19:28:34 +0000
commit49506d78e7f437785f4d3f68063f4aa9c622bb2c (patch)
tree739e77ae549aaedcd76923e16478bc36747296cd /llvm/lib
parent80b3af7ab3f8e76507cc4491be1460f1b1d8adb2 (diff)
downloadbcm5719-llvm-49506d78e7f437785f4d3f68063f4aa9c622bb2c.tar.gz
bcm5719-llvm-49506d78e7f437785f4d3f68063f4aa9c622bb2c.zip
[SystemZ] Add CodeGen support for scalar f64 ops in vector registers
The z13 vector facility includes some instructions that operate only on the high f64 in a v2f64, effectively extending the FP register set from 16 to 32 registers. It's still better to use the old instructions if the operands happen to fit though, since the older instructions have a shorter encoding. Based on a patch by Richard Sandiford. llvm-svn: 236524
Diffstat (limited to 'llvm/lib')
-rw-r--r--llvm/lib/Target/SystemZ/SystemZ.td2
-rw-r--r--llvm/lib/Target/SystemZ/SystemZAsmPrinter.cpp44
-rw-r--r--llvm/lib/Target/SystemZ/SystemZISelLowering.cpp11
-rw-r--r--llvm/lib/Target/SystemZ/SystemZInstrFP.td14
-rw-r--r--llvm/lib/Target/SystemZ/SystemZInstrFormats.td22
-rw-r--r--llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp10
-rw-r--r--llvm/lib/Target/SystemZ/SystemZInstrVector.td42
-rw-r--r--llvm/lib/Target/SystemZ/SystemZShortenInst.cpp139
8 files changed, 261 insertions, 23 deletions
diff --git a/llvm/lib/Target/SystemZ/SystemZ.td b/llvm/lib/Target/SystemZ/SystemZ.td
index 4f3f18ed499..d4d636d3479 100644
--- a/llvm/lib/Target/SystemZ/SystemZ.td
+++ b/llvm/lib/Target/SystemZ/SystemZ.td
@@ -40,8 +40,8 @@ include "SystemZOperands.td"
include "SystemZPatterns.td"
include "SystemZInstrFormats.td"
include "SystemZInstrInfo.td"
-include "SystemZInstrFP.td"
include "SystemZInstrVector.td"
+include "SystemZInstrFP.td"
def SystemZInstrInfo : InstrInfo {}
diff --git a/llvm/lib/Target/SystemZ/SystemZAsmPrinter.cpp b/llvm/lib/Target/SystemZ/SystemZAsmPrinter.cpp
index 026a75f2140..f16488063af 100644
--- a/llvm/lib/Target/SystemZ/SystemZAsmPrinter.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZAsmPrinter.cpp
@@ -80,6 +80,27 @@ static const MCSymbolRefExpr *getGlobalOffsetTable(MCContext &Context) {
Context);
}
+// MI loads the high part of a vector from memory. Return an instruction
+// that uses replicating vector load Opcode to do the same thing.
+static MCInst lowerSubvectorLoad(const MachineInstr *MI, unsigned Opcode) {
+ return MCInstBuilder(Opcode)
+ .addReg(SystemZMC::getRegAsVR128(MI->getOperand(0).getReg()))
+ .addReg(MI->getOperand(1).getReg())
+ .addImm(MI->getOperand(2).getImm())
+ .addReg(MI->getOperand(3).getReg());
+}
+
+// MI stores the high part of a vector to memory. Return an instruction
+// that uses elemental vector store Opcode to do the same thing.
+static MCInst lowerSubvectorStore(const MachineInstr *MI, unsigned Opcode) {
+ return MCInstBuilder(Opcode)
+ .addReg(SystemZMC::getRegAsVR128(MI->getOperand(0).getReg()))
+ .addReg(MI->getOperand(1).getReg())
+ .addImm(MI->getOperand(2).getImm())
+ .addReg(MI->getOperand(3).getReg())
+ .addImm(0);
+}
+
void SystemZAsmPrinter::EmitInstruction(const MachineInstr *MI) {
SystemZMCInstLower Lower(MF->getContext(), *this);
MCInst LoweredMI;
@@ -158,6 +179,29 @@ void SystemZAsmPrinter::EmitInstruction(const MachineInstr *MI) {
.addReg(SystemZMC::getRegAsGR64(MI->getOperand(2).getReg()));
break;
+ case SystemZ::VLR32:
+ case SystemZ::VLR64:
+ LoweredMI = MCInstBuilder(SystemZ::VLR)
+ .addReg(SystemZMC::getRegAsVR128(MI->getOperand(0).getReg()))
+ .addReg(SystemZMC::getRegAsVR128(MI->getOperand(1).getReg()));
+ break;
+
+ case SystemZ::VL32:
+ LoweredMI = lowerSubvectorLoad(MI, SystemZ::VLREPF);
+ break;
+
+ case SystemZ::VL64:
+ LoweredMI = lowerSubvectorLoad(MI, SystemZ::VLREPG);
+ break;
+
+ case SystemZ::VST32:
+ LoweredMI = lowerSubvectorStore(MI, SystemZ::VSTEF);
+ break;
+
+ case SystemZ::VST64:
+ LoweredMI = lowerSubvectorStore(MI, SystemZ::VSTEG);
+ break;
+
case SystemZ::LFER:
LoweredMI = MCInstBuilder(SystemZ::VLGVF)
.addReg(SystemZMC::getRegAsGR64(MI->getOperand(0).getReg()))
diff --git a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
index 391cb8c6fc9..ff79a48179f 100644
--- a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
@@ -91,9 +91,14 @@ SystemZTargetLowering::SystemZTargetLowering(const TargetMachine &tm,
addRegisterClass(MVT::i32, &SystemZ::GRX32BitRegClass);
else
addRegisterClass(MVT::i32, &SystemZ::GR32BitRegClass);
- addRegisterClass(MVT::i64, &SystemZ::GR64BitRegClass);
- addRegisterClass(MVT::f32, &SystemZ::FP32BitRegClass);
- addRegisterClass(MVT::f64, &SystemZ::FP64BitRegClass);
+ addRegisterClass(MVT::i64, &SystemZ::GR64BitRegClass);
+ if (Subtarget.hasVector()) {
+ addRegisterClass(MVT::f32, &SystemZ::VR32BitRegClass);
+ addRegisterClass(MVT::f64, &SystemZ::VR64BitRegClass);
+ } else {
+ addRegisterClass(MVT::f32, &SystemZ::FP32BitRegClass);
+ addRegisterClass(MVT::f64, &SystemZ::FP64BitRegClass);
+ }
addRegisterClass(MVT::f128, &SystemZ::FP128BitRegClass);
if (Subtarget.hasVector()) {
diff --git a/llvm/lib/Target/SystemZ/SystemZInstrFP.td b/llvm/lib/Target/SystemZ/SystemZInstrFP.td
index efa29fa9c00..27fbd7df288 100644
--- a/llvm/lib/Target/SystemZ/SystemZInstrFP.td
+++ b/llvm/lib/Target/SystemZ/SystemZInstrFP.td
@@ -46,9 +46,14 @@ let Defs = [CC], CCValues = 0xF, CompareZeroCCMask = 0xF in {
defm LTDBR : LoadAndTestRRE<"ltdb", 0xB312, FP64>;
defm LTXBR : LoadAndTestRRE<"ltxb", 0xB342, FP128>;
}
-defm : CompareZeroFP<LTEBRCompare, FP32>;
-defm : CompareZeroFP<LTDBRCompare, FP64>;
-defm : CompareZeroFP<LTXBRCompare, FP128>;
+// Note that the comparison against zero operation is not available if we
+// have vector support, since load-and-test instructions will partially
+// clobber the target (vector) register.
+let Predicates = [FeatureNoVector] in {
+ defm : CompareZeroFP<LTEBRCompare, FP32>;
+ defm : CompareZeroFP<LTDBRCompare, FP64>;
+ defm : CompareZeroFP<LTXBRCompare, FP128>;
+}
// Moves between 64-bit integer and floating-point registers.
def LGDR : UnaryRRE<"lgd", 0xB3CD, bitconvert, GR64, FP64>;
@@ -98,6 +103,9 @@ let canFoldAsLoad = 1, SimpleBDXLoad = 1 in {
defm LE : UnaryRXPair<"le", 0x78, 0xED64, load, FP32, 4>;
defm LD : UnaryRXPair<"ld", 0x68, 0xED65, load, FP64, 8>;
+ // For z13 we prefer LDE over LE to avoid partial register dependencies.
+ def LDE32 : UnaryRXE<"lde", 0xED24, null_frag, FP32, 4>;
+
// These instructions are split after register allocation, so we don't
// want a custom inserter.
let Has20BitOffset = 1, HasIndex = 1, Is128Bit = 1 in {
diff --git a/llvm/lib/Target/SystemZ/SystemZInstrFormats.td b/llvm/lib/Target/SystemZ/SystemZInstrFormats.td
index dc9dfa801fd..71eb9986499 100644
--- a/llvm/lib/Target/SystemZ/SystemZInstrFormats.td
+++ b/llvm/lib/Target/SystemZ/SystemZInstrFormats.td
@@ -2151,10 +2151,13 @@ class PrefetchRILPC<string mnemonic, bits<12> opcode,
// A floating-point load-and test operation. Create both a normal unary
// operation and one that acts as a comparison against zero.
+// Note that the comparison against zero operation is not available if we
+// have vector support, since load-and-test instructions will partially
+// clobber the target (vector) register.
multiclass LoadAndTestRRE<string mnemonic, bits<16> opcode,
RegisterOperand cls> {
def "" : UnaryRRE<mnemonic, opcode, null_frag, cls, cls>;
- let isCodeGenOnly = 1 in
+ let isCodeGenOnly = 1, Predicates = [FeatureNoVector] in
def Compare : CompareRRE<mnemonic, opcode, null_frag, cls, cls>;
}
@@ -2401,6 +2404,23 @@ class Alias<int size, dag outs, dag ins, list<dag> pattern>
class UnaryAliasVRS<RegisterOperand cls1, RegisterOperand cls2>
: Alias<6, (outs cls1:$src1), (ins cls2:$src2), []>;
+// An alias of a UnaryVRR*, but with different register sizes.
+class UnaryAliasVRR<SDPatternOperator operator, TypedReg tr1, TypedReg tr2>
+ : Alias<6, (outs tr1.op:$V1), (ins tr2.op:$V2),
+ [(set tr1.op:$V1, (tr1.vt (operator (tr2.vt tr2.op:$V2))))]>;
+
+// An alias of a UnaryVRX, but with different register sizes.
+class UnaryAliasVRX<SDPatternOperator operator, TypedReg tr,
+ AddressingMode mode = bdxaddr12only>
+ : Alias<6, (outs tr.op:$V1), (ins mode:$XBD2),
+ [(set tr.op:$V1, (tr.vt (operator mode:$XBD2)))]>;
+
+// An alias of a StoreVRX, but with different register sizes.
+class StoreAliasVRX<SDPatternOperator operator, TypedReg tr,
+ AddressingMode mode = bdxaddr12only>
+ : Alias<6, (outs), (ins tr.op:$V1, mode:$XBD2),
+ [(operator (tr.vt tr.op:$V1), mode:$XBD2)]>;
+
// An alias of a BinaryRI, but with different register sizes.
class BinaryAliasRI<SDPatternOperator operator, RegisterOperand cls,
Immediate imm>
diff --git a/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp b/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp
index 63101a9d000..8dbd9df32b7 100644
--- a/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp
@@ -578,6 +578,10 @@ SystemZInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
Opcode = SystemZ::LDR;
else if (SystemZ::FP128BitRegClass.contains(DestReg, SrcReg))
Opcode = SystemZ::LXR;
+ else if (SystemZ::VR32BitRegClass.contains(DestReg, SrcReg))
+ Opcode = SystemZ::VLR32;
+ else if (SystemZ::VR64BitRegClass.contains(DestReg, SrcReg))
+ Opcode = SystemZ::VLR64;
else if (SystemZ::VR128BitRegClass.contains(DestReg, SrcReg))
Opcode = SystemZ::VLR;
else
@@ -1118,6 +1122,12 @@ void SystemZInstrInfo::getLoadStoreOpcodes(const TargetRegisterClass *RC,
} else if (RC == &SystemZ::FP128BitRegClass) {
LoadOpcode = SystemZ::LX;
StoreOpcode = SystemZ::STX;
+ } else if (RC == &SystemZ::VR32BitRegClass) {
+ LoadOpcode = SystemZ::VL32;
+ StoreOpcode = SystemZ::VST32;
+ } else if (RC == &SystemZ::VR64BitRegClass) {
+ LoadOpcode = SystemZ::VL64;
+ StoreOpcode = SystemZ::VST64;
} else if (RC == &SystemZ::VF128BitRegClass ||
RC == &SystemZ::VR128BitRegClass) {
LoadOpcode = SystemZ::VL;
diff --git a/llvm/lib/Target/SystemZ/SystemZInstrVector.td b/llvm/lib/Target/SystemZ/SystemZInstrVector.td
index b6c8042b3c8..8abaeb69a20 100644
--- a/llvm/lib/Target/SystemZ/SystemZInstrVector.td
+++ b/llvm/lib/Target/SystemZ/SystemZInstrVector.td
@@ -14,6 +14,8 @@
let Predicates = [FeatureVector] in {
// Register move.
def VLR : UnaryVRRa<"vlr", 0xE756, null_frag, v128any, v128any>;
+ def VLR32 : UnaryAliasVRR<null_frag, v32eb, v32eb>;
+ def VLR64 : UnaryAliasVRR<null_frag, v64db, v64db>;
// Load GR from VR element.
def VLGVB : BinaryVRSc<"vlgvb", 0xE721, null_frag, v128b, 0>;
@@ -123,6 +125,13 @@ let Predicates = [FeatureVector] in {
def : Pat<(v2f64 (z_replicate_loadf64 bdxaddr12only:$addr)),
(VLREPG bdxaddr12only:$addr)>;
+ // Use VLREP to load subvectors. These patterns use "12pair" because
+ // LEY and LDY offer full 20-bit displacement fields. It's often better
+ // to use those instructions rather than force a 20-bit displacement
+ // into a GPR temporary.
+ def VL32 : UnaryAliasVRX<load, v32eb, bdxaddr12pair>;
+ def VL64 : UnaryAliasVRX<load, v64db, bdxaddr12pair>;
+
// Load logical element and zero.
def VLLEZB : UnaryVRX<"vllezb", 0xE704, z_vllezi8, v128b, 1, 0>;
def VLLEZH : UnaryVRX<"vllezh", 0xE704, z_vllezi16, v128h, 2, 1>;
@@ -193,6 +202,13 @@ let Predicates = [FeatureVector] in {
imm32zx1:$index),
(VSTEG VR128:$val, bdxaddr12only:$addr, imm32zx1:$index)>;
+ // Use VSTE to store subvectors. These patterns use "12pair" because
+ // STEY and STDY offer full 20-bit displacement fields. It's often better
+ // to use those instructions rather than force a 20-bit displacement
+ // into a GPR temporary.
+ def VST32 : StoreAliasVRX<store, v32eb, bdxaddr12pair>;
+ def VST64 : StoreAliasVRX<store, v64db, bdxaddr12pair>;
+
// Scatter element.
def VSCEF : StoreBinaryVRV<"vscef", 0xE71B, 4, imm32zx2>;
def VSCEG : StoreBinaryVRV<"vsceg", 0xE71A, 8, imm32zx1>;
@@ -778,7 +794,7 @@ multiclass VectorRounding<Instruction insn, TypedReg tr> {
let Predicates = [FeatureVector] in {
// Add.
def VFADB : BinaryVRRc<"vfadb", 0xE7E3, fadd, v128db, v128db, 3, 0>;
- def WFADB : BinaryVRRc<"wfadb", 0xE7E3, null_frag, v64db, v64db, 3, 8>;
+ def WFADB : BinaryVRRc<"wfadb", 0xE7E3, fadd, v64db, v64db, 3, 8>;
// Convert from fixed 64-bit.
def VCDGB : TernaryVRRa<"vcdgb", 0xE7C3, null_frag, v128db, v128g, 3, 0>;
@@ -804,53 +820,55 @@ let Predicates = [FeatureVector] in {
// Divide.
def VFDDB : BinaryVRRc<"vfddb", 0xE7E5, fdiv, v128db, v128db, 3, 0>;
- def WFDDB : BinaryVRRc<"wfddb", 0xE7E5, null_frag, v64db, v64db, 3, 8>;
+ def WFDDB : BinaryVRRc<"wfddb", 0xE7E5, fdiv, v64db, v64db, 3, 8>;
// Load FP integer.
def VFIDB : TernaryVRRa<"vfidb", 0xE7C7, null_frag, v128db, v128db, 3, 0>;
def WFIDB : TernaryVRRa<"wfidb", 0xE7C7, null_frag, v64db, v64db, 3, 8>;
defm : VectorRounding<VFIDB, v128db>;
+ defm : VectorRounding<WFIDB, v64db>;
// Load lengthened.
def VLDEB : UnaryVRRa<"vldeb", 0xE7C4, z_vextend, v128db, v128eb, 2, 0>;
- def WLDEB : UnaryVRRa<"wldeb", 0xE7C4, null_frag, v64db, v32eb, 2, 8>;
+ def WLDEB : UnaryVRRa<"wldeb", 0xE7C4, fextend, v64db, v32eb, 2, 8>;
// Load rounded,
def VLEDB : TernaryVRRa<"vledb", 0xE7C5, null_frag, v128eb, v128db, 3, 0>;
def WLEDB : TernaryVRRa<"wledb", 0xE7C5, null_frag, v32eb, v64db, 3, 8>;
def : Pat<(v4f32 (z_vround (v2f64 VR128:$src))), (VLEDB VR128:$src, 0, 0)>;
+ def : FPConversion<WLEDB, fround, v32eb, v64db, 0, 0>;
// Multiply.
def VFMDB : BinaryVRRc<"vfmdb", 0xE7E7, fmul, v128db, v128db, 3, 0>;
- def WFMDB : BinaryVRRc<"wfmdb", 0xE7E7, null_frag, v64db, v64db, 3, 8>;
+ def WFMDB : BinaryVRRc<"wfmdb", 0xE7E7, fmul, v64db, v64db, 3, 8>;
// Multiply and add.
def VFMADB : TernaryVRRe<"vfmadb", 0xE78F, fma, v128db, v128db, 0, 3>;
- def WFMADB : TernaryVRRe<"wfmadb", 0xE78F, null_frag, v64db, v64db, 8, 3>;
+ def WFMADB : TernaryVRRe<"wfmadb", 0xE78F, fma, v64db, v64db, 8, 3>;
// Multiply and subtract.
def VFMSDB : TernaryVRRe<"vfmsdb", 0xE78E, fms, v128db, v128db, 0, 3>;
- def WFMSDB : TernaryVRRe<"wfmsdb", 0xE78E, null_frag, v64db, v64db, 8, 3>;
+ def WFMSDB : TernaryVRRe<"wfmsdb", 0xE78E, fms, v64db, v64db, 8, 3>;
// Load complement,
def VFLCDB : UnaryVRRa<"vflcdb", 0xE7CC, fneg, v128db, v128db, 3, 0, 0>;
- def WFLCDB : UnaryVRRa<"wflcdb", 0xE7CC, null_frag, v64db, v64db, 3, 8, 0>;
+ def WFLCDB : UnaryVRRa<"wflcdb", 0xE7CC, fneg, v64db, v64db, 3, 8, 0>;
// Load negative.
def VFLNDB : UnaryVRRa<"vflndb", 0xE7CC, fnabs, v128db, v128db, 3, 0, 1>;
- def WFLNDB : UnaryVRRa<"wflndb", 0xE7CC, null_frag, v64db, v64db, 3, 8, 1>;
+ def WFLNDB : UnaryVRRa<"wflndb", 0xE7CC, fnabs, v64db, v64db, 3, 8, 1>;
// Load positive.
def VFLPDB : UnaryVRRa<"vflpdb", 0xE7CC, fabs, v128db, v128db, 3, 0, 2>;
- def WFLPDB : UnaryVRRa<"wflpdb", 0xE7CC, null_frag, v64db, v64db, 3, 8, 2>;
+ def WFLPDB : UnaryVRRa<"wflpdb", 0xE7CC, fabs, v64db, v64db, 3, 8, 2>;
// Square root.
def VFSQDB : UnaryVRRa<"vfsqdb", 0xE7CE, fsqrt, v128db, v128db, 3, 0>;
- def WFSQDB : UnaryVRRa<"wfsqdb", 0xE7CE, null_frag, v64db, v64db, 3, 8>;
+ def WFSQDB : UnaryVRRa<"wfsqdb", 0xE7CE, fsqrt, v64db, v64db, 3, 8>;
// Subtract.
def VFSDB : BinaryVRRc<"vfsdb", 0xE7E2, fsub, v128db, v128db, 3, 0>;
- def WFSDB : BinaryVRRc<"wfsdb", 0xE7E2, null_frag, v64db, v64db, 3, 8>;
+ def WFSDB : BinaryVRRc<"wfsdb", 0xE7E2, fsub, v64db, v64db, 3, 8>;
// Test data class immediate.
let Defs = [CC] in {
@@ -866,7 +884,7 @@ let Predicates = [FeatureVector] in {
let Predicates = [FeatureVector] in {
// Compare scalar.
let Defs = [CC] in
- def WFCDB : CompareVRRa<"wfcdb", 0xE7CB, null_frag, v64db, 3>;
+ def WFCDB : CompareVRRa<"wfcdb", 0xE7CB, z_fcmp, v64db, 3>;
// Compare and signal scalar.
let Defs = [CC] in
diff --git a/llvm/lib/Target/SystemZ/SystemZShortenInst.cpp b/llvm/lib/Target/SystemZ/SystemZShortenInst.cpp
index ec7a8c40d18..d1a17c5500d 100644
--- a/llvm/lib/Target/SystemZ/SystemZShortenInst.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZShortenInst.cpp
@@ -15,6 +15,7 @@
#include "SystemZTargetMachine.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
using namespace llvm;
@@ -36,6 +37,10 @@ public:
private:
bool shortenIIF(MachineInstr &MI, unsigned *GPRMap, unsigned LiveOther,
unsigned LLIxL, unsigned LLIxH);
+ bool shortenOn0(MachineInstr &MI, unsigned Opcode);
+ bool shortenOn01(MachineInstr &MI, unsigned Opcode);
+ bool shortenOn001(MachineInstr &MI, unsigned Opcode);
+ bool shortenFPConv(MachineInstr &MI, unsigned Opcode);
const SystemZInstrInfo *TII;
@@ -97,6 +102,64 @@ bool SystemZShortenInst::shortenIIF(MachineInstr &MI, unsigned *GPRMap,
return false;
}
+// Change MI's opcode to Opcode if register operand 0 has a 4-bit encoding.
+bool SystemZShortenInst::shortenOn0(MachineInstr &MI, unsigned Opcode) {
+ if (SystemZMC::getFirstReg(MI.getOperand(0).getReg()) < 16) {
+ MI.setDesc(TII->get(Opcode));
+ return true;
+ }
+ return false;
+}
+
+// Change MI's opcode to Opcode if register operands 0 and 1 have a
+// 4-bit encoding.
+bool SystemZShortenInst::shortenOn01(MachineInstr &MI, unsigned Opcode) {
+ if (SystemZMC::getFirstReg(MI.getOperand(0).getReg()) < 16 &&
+ SystemZMC::getFirstReg(MI.getOperand(1).getReg()) < 16) {
+ MI.setDesc(TII->get(Opcode));
+ return true;
+ }
+ return false;
+}
+
+// Change MI's opcode to Opcode if register operands 0, 1 and 2 have a
+// 4-bit encoding and if operands 0 and 1 are tied.
+bool SystemZShortenInst::shortenOn001(MachineInstr &MI, unsigned Opcode) {
+ if (SystemZMC::getFirstReg(MI.getOperand(0).getReg()) < 16 &&
+ MI.getOperand(1).getReg() == MI.getOperand(0).getReg() &&
+ SystemZMC::getFirstReg(MI.getOperand(2).getReg()) < 16) {
+ MI.setDesc(TII->get(Opcode));
+ return true;
+ }
+ return false;
+}
+
+// MI is a vector-style conversion instruction with the operand order:
+// destination, source, exact-suppress, rounding-mode. If both registers
+// have a 4-bit encoding then change it to Opcode, which has operand order:
+// destination, rouding-mode, source, exact-suppress.
+bool SystemZShortenInst::shortenFPConv(MachineInstr &MI, unsigned Opcode) {
+ if (SystemZMC::getFirstReg(MI.getOperand(0).getReg()) < 16 &&
+ SystemZMC::getFirstReg(MI.getOperand(1).getReg()) < 16) {
+ MachineOperand Dest(MI.getOperand(0));
+ MachineOperand Src(MI.getOperand(1));
+ MachineOperand Suppress(MI.getOperand(2));
+ MachineOperand Mode(MI.getOperand(3));
+ MI.RemoveOperand(3);
+ MI.RemoveOperand(2);
+ MI.RemoveOperand(1);
+ MI.RemoveOperand(0);
+ MI.setDesc(TII->get(Opcode));
+ MachineInstrBuilder(*MI.getParent()->getParent(), &MI)
+ .addOperand(Dest)
+ .addOperand(Mode)
+ .addOperand(Src)
+ .addOperand(Suppress);
+ return true;
+ }
+ return false;
+}
+
// Process all instructions in MBB. Return true if something changed.
bool SystemZShortenInst::processBlock(MachineBasicBlock &MBB) {
bool Changed = false;
@@ -117,13 +180,83 @@ bool SystemZShortenInst::processBlock(MachineBasicBlock &MBB) {
// Iterate backwards through the block looking for instructions to change.
for (auto MBBI = MBB.rbegin(), MBBE = MBB.rend(); MBBI != MBBE; ++MBBI) {
MachineInstr &MI = *MBBI;
- unsigned Opcode = MI.getOpcode();
- if (Opcode == SystemZ::IILF)
+ switch (MI.getOpcode()) {
+ case SystemZ::IILF:
Changed |= shortenIIF(MI, LowGPRs, LiveHigh, SystemZ::LLILL,
SystemZ::LLILH);
- else if (Opcode == SystemZ::IIHF)
+ break;
+
+ case SystemZ::IIHF:
Changed |= shortenIIF(MI, HighGPRs, LiveLow, SystemZ::LLIHL,
SystemZ::LLIHH);
+ break;
+
+ case SystemZ::WFADB:
+ Changed |= shortenOn001(MI, SystemZ::ADBR);
+ break;
+
+ case SystemZ::WFDDB:
+ Changed |= shortenOn001(MI, SystemZ::DDBR);
+ break;
+
+ case SystemZ::WFIDB:
+ Changed |= shortenFPConv(MI, SystemZ::FIDBRA);
+ break;
+
+ case SystemZ::WLDEB:
+ Changed |= shortenOn01(MI, SystemZ::LDEBR);
+ break;
+
+ case SystemZ::WLEDB:
+ Changed |= shortenFPConv(MI, SystemZ::LEDBRA);
+ break;
+
+ case SystemZ::WFMDB:
+ Changed |= shortenOn001(MI, SystemZ::MDBR);
+ break;
+
+ case SystemZ::WFLCDB:
+ Changed |= shortenOn01(MI, SystemZ::LCDBR);
+ break;
+
+ case SystemZ::WFLNDB:
+ Changed |= shortenOn01(MI, SystemZ::LNDBR);
+ break;
+
+ case SystemZ::WFLPDB:
+ Changed |= shortenOn01(MI, SystemZ::LPDBR);
+ break;
+
+ case SystemZ::WFSQDB:
+ Changed |= shortenOn01(MI, SystemZ::SQDBR);
+ break;
+
+ case SystemZ::WFSDB:
+ Changed |= shortenOn001(MI, SystemZ::SDBR);
+ break;
+
+ case SystemZ::WFCDB:
+ Changed |= shortenOn01(MI, SystemZ::CDBR);
+ break;
+
+ case SystemZ::VL32:
+ // For z13 we prefer LDE over LE to avoid partial register dependencies.
+ Changed |= shortenOn0(MI, SystemZ::LDE32);
+ break;
+
+ case SystemZ::VST32:
+ Changed |= shortenOn0(MI, SystemZ::STE);
+ break;
+
+ case SystemZ::VL64:
+ Changed |= shortenOn0(MI, SystemZ::LD);
+ break;
+
+ case SystemZ::VST64:
+ Changed |= shortenOn0(MI, SystemZ::STD);
+ break;
+ }
+
unsigned UsedLow = 0;
unsigned UsedHigh = 0;
for (auto MOI = MI.operands_begin(), MOE = MI.operands_end();
OpenPOWER on IntegriCloud