diff options
| author | Craig Topper <craig.topper@gmail.com> | 2016-05-10 05:28:04 +0000 |
|---|---|---|
| committer | Craig Topper <craig.topper@gmail.com> | 2016-05-10 05:28:04 +0000 |
| commit | 3e0c038a84c9d78d8ae27da7688d007306331c88 (patch) | |
| tree | 5b42a216bbe730a24f2a218084361a80786e8c59 /llvm/lib | |
| parent | 9f8e50cdb4461c76c908e4906b271985d0b14c94 (diff) | |
| download | bcm5719-llvm-3e0c038a84c9d78d8ae27da7688d007306331c88.tar.gz bcm5719-llvm-3e0c038a84c9d78d8ae27da7688d007306331c88.zip | |
[X86][AVX512] Strengthen the assertions from r269001. We need VLX to use the 128/256-bit move opcodes for extended registers.
llvm-svn: 269019
Diffstat (limited to 'llvm/lib')
| -rw-r--r-- | llvm/lib/Target/X86/X86InstrInfo.cpp | 5 |
1 files changed, 3 insertions, 2 deletions
diff --git a/llvm/lib/Target/X86/X86InstrInfo.cpp b/llvm/lib/Target/X86/X86InstrInfo.cpp index 4b507022868..45f3727a705 100644 --- a/llvm/lib/Target/X86/X86InstrInfo.cpp +++ b/llvm/lib/Target/X86/X86InstrInfo.cpp @@ -4653,7 +4653,7 @@ static unsigned getLoadStoreRegOpcode(unsigned Reg, return load ? (HasAVX ? X86::VMOVUPSrm : X86::MOVUPSrm) : (HasAVX ? X86::VMOVUPSmr : X86::MOVUPSmr); } - assert(STI.hasAVX512() && "Using extended register requires AVX512"); + assert(STI.hasVLX() && "Using extended register requires VLX"); if (isStackAligned) return load ? X86::VMOVAPSZ128rm : X86::VMOVAPSZ128mr; else @@ -4669,13 +4669,14 @@ static unsigned getLoadStoreRegOpcode(unsigned Reg, else return load ? X86::VMOVUPSYrm : X86::VMOVUPSYmr; } - assert(STI.hasAVX512() && "Using extended register requires AVX512"); + assert(STI.hasVLX() && "Using extended register requires VLX"); if (isStackAligned) return load ? X86::VMOVAPSZ256rm : X86::VMOVAPSZ256mr; else return load ? X86::VMOVUPSZ256rm : X86::VMOVUPSZ256mr; case 64: assert(X86::VR512RegClass.hasSubClassEq(RC) && "Unknown 64-byte regclass"); + assert(STI.hasVLX() && "Using 512-bit register requires AVX512"); if (isStackAligned) return load ? X86::VMOVAPSZrm : X86::VMOVAPSZmr; else |

