diff options
author | Craig Topper <craig.topper@gmail.com> | 2016-07-22 05:00:35 +0000 |
---|---|---|
committer | Craig Topper <craig.topper@gmail.com> | 2016-07-22 05:00:35 +0000 |
commit | ab13b33dedb60ecd8f19afb0d9d519a4cd15be16 (patch) | |
tree | 2c8d6032e9eac69a1454af0dd8bcdf29dc632d8a /llvm/lib/Target/X86/X86InstrInfo.cpp | |
parent | 522a91181af75702f1526640d8fd356e22ea383a (diff) | |
download | bcm5719-llvm-ab13b33dedb60ecd8f19afb0d9d519a4cd15be16.tar.gz bcm5719-llvm-ab13b33dedb60ecd8f19afb0d9d519a4cd15be16.zip |
[AVX512] Update X86InstrInfo::foldMemoryOperandCustom to handle the EVEX encoded instructions too.
llvm-svn: 276390
Diffstat (limited to 'llvm/lib/Target/X86/X86InstrInfo.cpp')
-rw-r--r-- | llvm/lib/Target/X86/X86InstrInfo.cpp | 12 |
1 files changed, 8 insertions, 4 deletions
diff --git a/llvm/lib/Target/X86/X86InstrInfo.cpp b/llvm/lib/Target/X86/X86InstrInfo.cpp index e64f4548031..052191a3bff 100644 --- a/llvm/lib/Target/X86/X86InstrInfo.cpp +++ b/llvm/lib/Target/X86/X86InstrInfo.cpp @@ -5780,6 +5780,7 @@ MachineInstr *X86InstrInfo::foldMemoryOperandCustom( switch (MI.getOpcode()) { case X86::INSERTPSrr: case X86::VINSERTPSrr: + case X86::VINSERTPSZrr: // Attempt to convert the load of inserted vector into a fold load // of a single float. if (OpNum == 2) { @@ -5793,8 +5794,9 @@ MachineInstr *X86InstrInfo::foldMemoryOperandCustom( int PtrOffset = SrcIdx * 4; unsigned NewImm = (DstIdx << 4) | ZMask; unsigned NewOpCode = - (MI.getOpcode() == X86::VINSERTPSrr ? X86::VINSERTPSrm - : X86::INSERTPSrm); + (MI.getOpcode() == X86::VINSERTPSZrr) ? X86::VINSERTPSZrm : + (MI.getOpcode() == X86::VINSERTPSrr) ? X86::VINSERTPSrm : + X86::INSERTPSrm; MachineInstr *NewMI = FuseInst(MF, NewOpCode, OpNum, MOs, InsertPt, MI, *this, PtrOffset); NewMI->getOperand(NewMI->getNumOperands() - 1).setImm(NewImm); @@ -5804,6 +5806,7 @@ MachineInstr *X86InstrInfo::foldMemoryOperandCustom( break; case X86::MOVHLPSrr: case X86::VMOVHLPSrr: + case X86::VMOVHLPSZrr: // Move the upper 64-bits of the second operand to the lower 64-bits. // To fold the load, adjust the pointer to the upper and use (V)MOVLPS. // TODO: In most cases AVX doesn't have a 8-byte alignment requirement. @@ -5811,8 +5814,9 @@ MachineInstr *X86InstrInfo::foldMemoryOperandCustom( unsigned RCSize = getRegClass(MI.getDesc(), OpNum, &RI, MF)->getSize(); if (Size <= RCSize && 8 <= Align) { unsigned NewOpCode = - (MI.getOpcode() == X86::VMOVHLPSrr ? X86::VMOVLPSrm - : X86::MOVLPSrm); + (MI.getOpcode() == X86::VMOVHLPSZrr) ? X86::VMOVLPSZ128rm : + (MI.getOpcode() == X86::VMOVHLPSrr) ? X86::VMOVLPSrm : + X86::MOVLPSrm; MachineInstr *NewMI = FuseInst(MF, NewOpCode, OpNum, MOs, InsertPt, MI, *this, 8); return NewMI; |