diff options
Diffstat (limited to 'llvm/lib/Target')
| -rw-r--r-- | llvm/lib/Target/X86/X86InstrAVX512.td | 23 | ||||
| -rw-r--r-- | llvm/lib/Target/X86/X86InstrInfo.cpp | 1 | ||||
| -rw-r--r-- | llvm/lib/Target/X86/X86InstrSSE.td | 29 |
3 files changed, 44 insertions, 9 deletions
diff --git a/llvm/lib/Target/X86/X86InstrAVX512.td b/llvm/lib/Target/X86/X86InstrAVX512.td index f428b201adc..6ceb5517863 100644 --- a/llvm/lib/Target/X86/X86InstrAVX512.td +++ b/llvm/lib/Target/X86/X86InstrAVX512.td @@ -11871,6 +11871,12 @@ multiclass AVX512_scalar_math_fp_patterns<SDNode Op, string OpcPrefix, SDNode Mo _.FRC:$src)))), (!cast<Instruction>("V"#OpcPrefix#Zrr_Int) _.VT:$dst, (_.VT (COPY_TO_REGCLASS _.FRC:$src, VR128X)))>; + def : Pat<(MoveNode + (_.VT VR128X:$dst), + (_.VT (scalar_to_vector + (Op (_.EltVT (extractelt (_.VT VR128X:$dst), (iPTR 0))), + (_.ScalarLdFrag addr:$src))))), + (!cast<Instruction>("V"#OpcPrefix#Zrm_Int) _.VT:$dst, addr:$src)>; // extracted masked scalar math op with insert via movss def : Pat<(MoveNode (_.VT VR128X:$src1), @@ -11884,6 +11890,16 @@ multiclass AVX512_scalar_math_fp_patterns<SDNode Op, string OpcPrefix, SDNode Mo (_.VT (COPY_TO_REGCLASS _.FRC:$src0, VR128X)), VK1WM:$mask, _.VT:$src1, (_.VT (COPY_TO_REGCLASS _.FRC:$src2, VR128X)))>; + def : Pat<(MoveNode (_.VT VR128X:$src1), + (scalar_to_vector + (X86selects VK1WM:$mask, + (Op (_.EltVT + (extractelt (_.VT VR128X:$src1), (iPTR 0))), + (_.ScalarLdFrag addr:$src2)), + _.FRC:$src0))), + (!cast<Instruction>("V"#OpcPrefix#Zrm_Intk) + (_.VT (COPY_TO_REGCLASS _.FRC:$src0, VR128X)), + VK1WM:$mask, _.VT:$src1, addr:$src2)>; // extracted masked scalar math op with insert via movss def : Pat<(MoveNode (_.VT VR128X:$src1), @@ -11895,6 +11911,13 @@ multiclass AVX512_scalar_math_fp_patterns<SDNode Op, string OpcPrefix, SDNode Mo (!cast<I>("V"#OpcPrefix#Zrr_Intkz) VK1WM:$mask, _.VT:$src1, (_.VT (COPY_TO_REGCLASS _.FRC:$src2, VR128X)))>; + def : Pat<(MoveNode (_.VT VR128X:$src1), + (scalar_to_vector + (X86selects VK1WM:$mask, + (Op (_.EltVT + (extractelt (_.VT VR128X:$src1), (iPTR 0))), + (_.ScalarLdFrag addr:$src2)), (_.EltVT ZeroFP)))), + (!cast<I>("V"#OpcPrefix#Zrm_Intkz) VK1WM:$mask, _.VT:$src1, addr:$src2)>; } } diff --git a/llvm/lib/Target/X86/X86InstrInfo.cpp b/llvm/lib/Target/X86/X86InstrInfo.cpp index 4d1791d6728..2fe438e3def 100644 --- a/llvm/lib/Target/X86/X86InstrInfo.cpp +++ b/llvm/lib/Target/X86/X86InstrInfo.cpp @@ -4685,6 +4685,7 @@ MachineInstr *X86InstrInfo::foldMemoryOperandImpl( &RI, MF); unsigned RCSize = TRI.getRegSizeInBits(*RC) / 8; if (Size < RCSize) { + // FIXME: Allow scalar intrinsic instructions like ADDSSrm_Int. // Check if it's safe to fold the load. If the size of the object is // narrower than the load width, then it's not. if (Opcode != X86::MOV64rm || RCSize != 8 || Size != 4) diff --git a/llvm/lib/Target/X86/X86InstrSSE.td b/llvm/lib/Target/X86/X86InstrSSE.td index 65849373e91..7d93d1bd985 100644 --- a/llvm/lib/Target/X86/X86InstrSSE.td +++ b/llvm/lib/Target/X86/X86InstrSSE.td @@ -2692,7 +2692,8 @@ let isCodeGenOnly = 1 in { // patterns we have to try to match. multiclass scalar_math_patterns<SDNode Op, string OpcPrefix, SDNode Move, ValueType VT, ValueType EltTy, - RegisterClass RC, Predicate BasePredicate> { + RegisterClass RC, PatFrag ld_frag, + Predicate BasePredicate> { let Predicates = [BasePredicate] in { // extracted scalar math op with insert via movss/movsd def : Pat<(VT (Move (VT VR128:$dst), @@ -2701,6 +2702,11 @@ multiclass scalar_math_patterns<SDNode Op, string OpcPrefix, SDNode Move, RC:$src))))), (!cast<Instruction>(OpcPrefix#rr_Int) VT:$dst, (VT (COPY_TO_REGCLASS RC:$src, VR128)))>; + def : Pat<(VT (Move (VT VR128:$dst), + (VT (scalar_to_vector + (Op (EltTy (extractelt (VT VR128:$dst), (iPTR 0))), + (ld_frag addr:$src)))))), + (!cast<Instruction>(OpcPrefix#rm_Int) VT:$dst, addr:$src)>; } // Repeat for AVX versions of the instructions. @@ -2712,18 +2718,23 @@ multiclass scalar_math_patterns<SDNode Op, string OpcPrefix, SDNode Move, RC:$src))))), (!cast<Instruction>("V"#OpcPrefix#rr_Int) VT:$dst, (VT (COPY_TO_REGCLASS RC:$src, VR128)))>; + def : Pat<(VT (Move (VT VR128:$dst), + (VT (scalar_to_vector + (Op (EltTy (extractelt (VT VR128:$dst), (iPTR 0))), + (ld_frag addr:$src)))))), + (!cast<Instruction>("V"#OpcPrefix#rm_Int) VT:$dst, addr:$src)>; } } -defm : scalar_math_patterns<fadd, "ADDSS", X86Movss, v4f32, f32, FR32, UseSSE1>; -defm : scalar_math_patterns<fsub, "SUBSS", X86Movss, v4f32, f32, FR32, UseSSE1>; -defm : scalar_math_patterns<fmul, "MULSS", X86Movss, v4f32, f32, FR32, UseSSE1>; -defm : scalar_math_patterns<fdiv, "DIVSS", X86Movss, v4f32, f32, FR32, UseSSE1>; +defm : scalar_math_patterns<fadd, "ADDSS", X86Movss, v4f32, f32, FR32, loadf32, UseSSE1>; +defm : scalar_math_patterns<fsub, "SUBSS", X86Movss, v4f32, f32, FR32, loadf32, UseSSE1>; +defm : scalar_math_patterns<fmul, "MULSS", X86Movss, v4f32, f32, FR32, loadf32, UseSSE1>; +defm : scalar_math_patterns<fdiv, "DIVSS", X86Movss, v4f32, f32, FR32, loadf32, UseSSE1>; -defm : scalar_math_patterns<fadd, "ADDSD", X86Movsd, v2f64, f64, FR64, UseSSE2>; -defm : scalar_math_patterns<fsub, "SUBSD", X86Movsd, v2f64, f64, FR64, UseSSE2>; -defm : scalar_math_patterns<fmul, "MULSD", X86Movsd, v2f64, f64, FR64, UseSSE2>; -defm : scalar_math_patterns<fdiv, "DIVSD", X86Movsd, v2f64, f64, FR64, UseSSE2>; +defm : scalar_math_patterns<fadd, "ADDSD", X86Movsd, v2f64, f64, FR64, loadf64, UseSSE2>; +defm : scalar_math_patterns<fsub, "SUBSD", X86Movsd, v2f64, f64, FR64, loadf64, UseSSE2>; +defm : scalar_math_patterns<fmul, "MULSD", X86Movsd, v2f64, f64, FR64, loadf64, UseSSE2>; +defm : scalar_math_patterns<fdiv, "DIVSD", X86Movsd, v2f64, f64, FR64, loadf64, UseSSE2>; /// Unop Arithmetic /// In addition, we also have a special variant of the scalar form here to |

