diff options
| author | Craig Topper <craig.topper@gmail.com> | 2016-07-31 20:20:05 +0000 |
|---|---|---|
| committer | Craig Topper <craig.topper@gmail.com> | 2016-07-31 20:20:05 +0000 |
| commit | 7afdc0fb25e04f3d824fd81c8872cd57daab0b7b (patch) | |
| tree | 850042a35f07bafa03fb4ff3630842c3dccb9975 /llvm/lib/Target | |
| parent | 4c53e603604fc77c74efe8a2eb4c8e0b3f3323c4 (diff) | |
| download | bcm5719-llvm-7afdc0fb25e04f3d824fd81c8872cd57daab0b7b.tar.gz bcm5719-llvm-7afdc0fb25e04f3d824fd81c8872cd57daab0b7b.zip | |
[AVX512] Always use EVEX encodings for 128/256-bit move instructions in getLoadStoreRegOpcode if VLX is supported.
llvm-svn: 277305
Diffstat (limited to 'llvm/lib/Target')
| -rw-r--r-- | llvm/lib/Target/X86/X86InstrInfo.cpp | 39 |
1 files changed, 15 insertions, 24 deletions
diff --git a/llvm/lib/Target/X86/X86InstrInfo.cpp b/llvm/lib/Target/X86/X86InstrInfo.cpp index f8240f5e939..dda11809208 100644 --- a/llvm/lib/Target/X86/X86InstrInfo.cpp +++ b/llvm/lib/Target/X86/X86InstrInfo.cpp @@ -4840,6 +4840,7 @@ static unsigned getLoadStoreRegOpcode(unsigned Reg, bool load) { bool HasAVX = STI.hasAVX(); bool HasAVX512 = STI.hasAVX512(); + bool HasVLX = STI.hasVLX(); if (HasAVX512 && isMaskRegClass(RC)) return getLoadStoreMaskRegOpcode(RC, load); @@ -4884,38 +4885,28 @@ static unsigned getLoadStoreRegOpcode(unsigned Reg, assert(X86::RFP80RegClass.hasSubClassEq(RC) && "Unknown 10-byte regclass"); return load ? X86::LD_Fp80m : X86::ST_FpP80m; case 16: { - assert((X86::VR128RegClass.hasSubClassEq(RC) || - X86::VR128XRegClass.hasSubClassEq(RC))&& "Unknown 16-byte regclass"); + assert(X86::VR128XRegClass.hasSubClassEq(RC) && "Unknown 16-byte regclass"); // If stack is realigned we can use aligned stores. - if (X86::VR128RegClass.hasSubClassEq(RC)) { - if (isStackAligned) - return load ? (HasAVX ? X86::VMOVAPSrm : X86::MOVAPSrm) - : (HasAVX ? X86::VMOVAPSmr : X86::MOVAPSmr); - else - return load ? (HasAVX ? X86::VMOVUPSrm : X86::MOVUPSrm) - : (HasAVX ? X86::VMOVUPSmr : X86::MOVUPSmr); - } - assert(STI.hasVLX() && "Using extended register requires VLX"); if (isStackAligned) - return load ? X86::VMOVAPSZ128rm : X86::VMOVAPSZ128mr; + return load ? + (HasVLX ? X86::VMOVAPSZ128rm : HasAVX ? X86::VMOVAPSrm : X86::MOVAPSrm): + (HasVLX ? X86::VMOVAPSZ128mr : HasAVX ? X86::VMOVAPSmr : X86::MOVAPSmr); else - return load ? X86::VMOVUPSZ128rm : X86::VMOVUPSZ128mr; + return load ? + (HasVLX ? X86::VMOVUPSZ128rm : HasAVX ? X86::VMOVUPSrm : X86::MOVUPSrm): + (HasVLX ? X86::VMOVUPSZ128mr : HasAVX ? X86::VMOVUPSmr : X86::MOVUPSmr); } case 32: - assert((X86::VR256RegClass.hasSubClassEq(RC) || - X86::VR256XRegClass.hasSubClassEq(RC)) && "Unknown 32-byte regclass"); + assert(X86::VR256XRegClass.hasSubClassEq(RC) && "Unknown 32-byte regclass"); // If stack is realigned we can use aligned stores. - if (X86::VR256RegClass.hasSubClassEq(RC)) { - if (isStackAligned) - return load ? X86::VMOVAPSYrm : X86::VMOVAPSYmr; - else - return load ? X86::VMOVUPSYrm : X86::VMOVUPSYmr; - } - assert(STI.hasVLX() && "Using extended register requires VLX"); if (isStackAligned) - return load ? X86::VMOVAPSZ256rm : X86::VMOVAPSZ256mr; + return load ? + (HasVLX ? X86::VMOVAPSZ256rm : X86::VMOVAPSYrm) : + (HasVLX ? X86::VMOVAPSZ256mr : X86::VMOVAPSYmr); else - return load ? X86::VMOVUPSZ256rm : X86::VMOVUPSZ256mr; + return load ? + (HasVLX ? X86::VMOVUPSZ256rm : X86::VMOVUPSYrm) : + (HasVLX ? X86::VMOVUPSZ256mr : X86::VMOVUPSYmr); case 64: assert(X86::VR512RegClass.hasSubClassEq(RC) && "Unknown 64-byte regclass"); assert(STI.hasAVX512() && "Using 512-bit register requires AVX512"); |

