diff options
-rw-r--r-- | llvm/lib/Target/X86/X86InstrFragmentsSIMD.td | 5 | ||||
-rw-r--r-- | llvm/lib/Target/X86/X86InstrInfo.cpp | 12 | ||||
-rw-r--r-- | llvm/lib/Target/X86/X86InstrSSE.td | 23 |
3 files changed, 0 insertions, 40 deletions
diff --git a/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td b/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td index 976567e1a40..d2a190530ee 100644 --- a/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td +++ b/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td @@ -677,11 +677,6 @@ def alignedload512 : PatFrag<(ops node:$ptr), (load node:$ptr), [{ return cast<LoadSDNode>(N)->getAlignment() >= 64; }]>; -def alignedloadfsf32 : PatFrag<(ops node:$ptr), - (f32 (alignedload node:$ptr))>; -def alignedloadfsf64 : PatFrag<(ops node:$ptr), - (f64 (alignedload node:$ptr))>; - // 128-bit aligned load pattern fragments // NOTE: all 128-bit integer vector loads are promoted to v2i64 def alignedloadv4f32 : PatFrag<(ops node:$ptr), diff --git a/llvm/lib/Target/X86/X86InstrInfo.cpp b/llvm/lib/Target/X86/X86InstrInfo.cpp index 7c08efab14a..4b28c3b627d 100644 --- a/llvm/lib/Target/X86/X86InstrInfo.cpp +++ b/llvm/lib/Target/X86/X86InstrInfo.cpp @@ -2595,10 +2595,6 @@ bool X86InstrInfo::isReallyTriviallyReMaterializable(const MachineInstr &MI, case X86::VMOVDQUYrm: case X86::MMX_MOVD64rm: case X86::MMX_MOVQ64rm: - case X86::FsVMOVAPSrm: - case X86::FsVMOVAPDrm: - case X86::FsMOVAPSrm: - case X86::FsMOVAPDrm: // AVX-512 case X86::VMOVSSZrm: case X86::VMOVSDZrm: @@ -6760,8 +6756,6 @@ X86InstrInfo::areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2, case X86::MOVSDrm: case X86::MMX_MOVD64rm: case X86::MMX_MOVQ64rm: - case X86::FsMOVAPSrm: - case X86::FsMOVAPDrm: case X86::MOVAPSrm: case X86::MOVUPSrm: case X86::MOVAPDrm: @@ -6771,8 +6765,6 @@ X86InstrInfo::areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2, // AVX load instructions case X86::VMOVSSrm: case X86::VMOVSDrm: - case X86::FsVMOVAPSrm: - case X86::FsVMOVAPDrm: case X86::VMOVAPSrm: case X86::VMOVUPSrm: case X86::VMOVAPDrm: @@ -6837,8 +6829,6 @@ X86InstrInfo::areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2, case X86::MOVSDrm: case X86::MMX_MOVD64rm: case X86::MMX_MOVQ64rm: - case X86::FsMOVAPSrm: - case X86::FsMOVAPDrm: case X86::MOVAPSrm: case X86::MOVUPSrm: case X86::MOVAPDrm: @@ -6848,8 +6838,6 @@ X86InstrInfo::areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2, // AVX load instructions case X86::VMOVSSrm: case X86::VMOVSDrm: - case X86::FsVMOVAPSrm: - case X86::FsVMOVAPDrm: case X86::VMOVAPSrm: case X86::VMOVUPSrm: case X86::VMOVAPDrm: diff --git a/llvm/lib/Target/X86/X86InstrSSE.td b/llvm/lib/Target/X86/X86InstrSSE.td index 5a6bada5e51..a3d76034a5a 100644 --- a/llvm/lib/Target/X86/X86InstrSSE.td +++ b/llvm/lib/Target/X86/X86InstrSSE.td @@ -1083,29 +1083,6 @@ let Predicates = [UseSSE1] in { (MOVUPSmr addr:$dst, VR128:$src)>; } -// Alias instruction to load FR32 or FR64 from f128mem using movaps. Upper -// bits are disregarded. FIXME: Set encoding to pseudo! -let canFoldAsLoad = 1, isReMaterializable = 1, SchedRW = [WriteLoad] in { -let isCodeGenOnly = 1 in { - def FsVMOVAPSrm : VPSI<0x28, MRMSrcMem, (outs FR32:$dst), (ins f128mem:$src), - "movaps\t{$src, $dst|$dst, $src}", - [(set FR32:$dst, (alignedloadfsf32 addr:$src))], - IIC_SSE_MOVA_P_RM>, VEX; - def FsVMOVAPDrm : VPDI<0x28, MRMSrcMem, (outs FR64:$dst), (ins f128mem:$src), - "movapd\t{$src, $dst|$dst, $src}", - [(set FR64:$dst, (alignedloadfsf64 addr:$src))], - IIC_SSE_MOVA_P_RM>, VEX; - def FsMOVAPSrm : PSI<0x28, MRMSrcMem, (outs FR32:$dst), (ins f128mem:$src), - "movaps\t{$src, $dst|$dst, $src}", - [(set FR32:$dst, (alignedloadfsf32 addr:$src))], - IIC_SSE_MOVA_P_RM>; - def FsMOVAPDrm : PDI<0x28, MRMSrcMem, (outs FR64:$dst), (ins f128mem:$src), - "movapd\t{$src, $dst|$dst, $src}", - [(set FR64:$dst, (alignedloadfsf64 addr:$src))], - IIC_SSE_MOVA_P_RM>; -} -} - //===----------------------------------------------------------------------===// // SSE 1 & 2 - Move Low packed FP Instructions //===----------------------------------------------------------------------===// |