diff options
author | Michael Kuperstein <mkuper@google.com> | 2016-08-18 20:08:15 +0000 |
---|---|---|
committer | Michael Kuperstein <mkuper@google.com> | 2016-08-18 20:08:15 +0000 |
commit | 2bc3d4d46c5f19d8433fd088fa95d18f9707bde8 (patch) | |
tree | 1bf251351a4472649c63fb3f1bc7f2f056386f1f /llvm/lib/Target/X86 | |
parent | dea5ccb04b8be312456a5bdb6483cfb0fcb5b962 (diff) | |
download | bcm5719-llvm-2bc3d4d46c5f19d8433fd088fa95d18f9707bde8.tar.gz bcm5719-llvm-2bc3d4d46c5f19d8433fd088fa95d18f9707bde8.zip |
[SelectionDAG] Rename fextend -> fpextend, fround -> fpround, frnd -> fround
The names of the tablegen defs now match the names of the ISD nodes.
This makes the world a slightly saner place, as previously "fround" matched
ISD::FP_ROUND and not ISD::FROUND.
Differential Revision: https://reviews.llvm.org/D23597
llvm-svn: 279129
Diffstat (limited to 'llvm/lib/Target/X86')
-rw-r--r-- | llvm/lib/Target/X86/X86InstrAVX512.td | 16 | ||||
-rw-r--r-- | llvm/lib/Target/X86/X86InstrFPStack.td | 12 | ||||
-rw-r--r-- | llvm/lib/Target/X86/X86InstrSSE.td | 28 |
3 files changed, 28 insertions, 28 deletions
diff --git a/llvm/lib/Target/X86/X86InstrAVX512.td b/llvm/lib/Target/X86/X86InstrAVX512.td index b387d0cd7fd..d92af7b0a83 100644 --- a/llvm/lib/Target/X86/X86InstrAVX512.td +++ b/llvm/lib/Target/X86/X86InstrAVX512.td @@ -5595,11 +5595,11 @@ defm VCVTSD2SS : avx512_cvt_fp_scalar_sd2ss<0x5A, "vcvtsd2ss", X86fround, defm VCVTSS2SD : avx512_cvt_fp_scalar_ss2sd<0x5A, "vcvtss2sd", X86fpext, X86fpextRnd,f32x_info, f64x_info >; -def : Pat<(f64 (fextend FR32X:$src)), +def : Pat<(f64 (fpextend FR32X:$src)), (COPY_TO_REGCLASS (VCVTSS2SDZrr (COPY_TO_REGCLASS FR32X:$src, VR128X), (COPY_TO_REGCLASS FR32X:$src, VR128X)), VR128X)>, Requires<[HasAVX512]>; -def : Pat<(f64 (fextend (loadf32 addr:$src))), +def : Pat<(f64 (fpextend (loadf32 addr:$src))), (COPY_TO_REGCLASS (VCVTSS2SDZrm (v4f32 (IMPLICIT_DEF)), addr:$src), VR128X)>, Requires<[HasAVX512]>; @@ -5612,7 +5612,7 @@ def : Pat<(f64 (extloadf32 addr:$src)), (COPY_TO_REGCLASS (VMOVSSZrm addr:$src), VR128X)), VR128X)>, Requires<[HasAVX512, OptForSpeed]>; -def : Pat<(f32 (fround FR64X:$src)), +def : Pat<(f32 (fpround FR64X:$src)), (COPY_TO_REGCLASS (VCVTSD2SSZrr (COPY_TO_REGCLASS FR64X:$src, VR128X), (COPY_TO_REGCLASS FR64X:$src, VR128X)), VR128X)>, Requires<[HasAVX512]>; @@ -5666,14 +5666,14 @@ multiclass avx512_vcvt_fp_rc<bits<8> opc, string OpcodeStr, X86VectorVTInfo _, // Extend Float to Double multiclass avx512_cvtps2pd<bits<8> opc, string OpcodeStr> { let Predicates = [HasAVX512] in { - defm Z : avx512_vcvt_fp<opc, OpcodeStr, v8f64_info, v8f32x_info, fextend>, + defm Z : avx512_vcvt_fp<opc, OpcodeStr, v8f64_info, v8f32x_info, fpextend>, avx512_vcvt_fp_sae<opc, OpcodeStr, v8f64_info, v8f32x_info, X86vfpextRnd>, EVEX_V512; } let Predicates = [HasVLX] in { defm Z128 : avx512_vcvt_fp<opc, OpcodeStr, v2f64x_info, v4f32x_info, X86vfpext, "{1to2}">, EVEX_V128; - defm Z256 : avx512_vcvt_fp<opc, OpcodeStr, v4f64x_info, v4f32x_info, fextend>, + defm Z256 : avx512_vcvt_fp<opc, OpcodeStr, v4f64x_info, v4f32x_info, fpextend>, EVEX_V256; } } @@ -5681,14 +5681,14 @@ multiclass avx512_cvtps2pd<bits<8> opc, string OpcodeStr> { // Truncate Double to Float multiclass avx512_cvtpd2ps<bits<8> opc, string OpcodeStr> { let Predicates = [HasAVX512] in { - defm Z : avx512_vcvt_fp<opc, OpcodeStr, v8f32x_info, v8f64_info, fround>, + defm Z : avx512_vcvt_fp<opc, OpcodeStr, v8f32x_info, v8f64_info, fpround>, avx512_vcvt_fp_rc<opc, OpcodeStr, v8f32x_info, v8f64_info, X86vfproundRnd>, EVEX_V512; } let Predicates = [HasVLX] in { defm Z128 : avx512_vcvt_fp<opc, OpcodeStr, v4f32x_info, v2f64x_info, X86vfpround, "{1to2}", "{x}">, EVEX_V128; - defm Z256 : avx512_vcvt_fp<opc, OpcodeStr, v4f32x_info, v4f64x_info, fround, + defm Z256 : avx512_vcvt_fp<opc, OpcodeStr, v4f32x_info, v4f64x_info, fpround, "{1to4}", "{y}">, EVEX_V256; } } @@ -6025,7 +6025,7 @@ def : Pat<(v4f64 (uint_to_fp (v4i32 VR128X:$src1))), } let Predicates = [HasAVX512] in { - def : Pat<(v8f32 (fround (loadv8f64 addr:$src))), + def : Pat<(v8f32 (fpround (loadv8f64 addr:$src))), (VCVTPD2PSZrm addr:$src)>; def : Pat<(v8f64 (extloadv8f32 addr:$src)), (VCVTPS2PDZrm addr:$src)>; diff --git a/llvm/lib/Target/X86/X86InstrFPStack.td b/llvm/lib/Target/X86/X86InstrFPStack.td index 078dab41502..10f3839ea8e 100644 --- a/llvm/lib/Target/X86/X86InstrFPStack.td +++ b/llvm/lib/Target/X86/X86InstrFPStack.td @@ -711,19 +711,19 @@ def : Pat<(X86fildflag addr:$src, i64), (ILD_Fp64m64 addr:$src)>; // FP extensions map onto simple pseudo-value conversions if they are to/from // the FP stack. -def : Pat<(f64 (fextend RFP32:$src)), (COPY_TO_REGCLASS RFP32:$src, RFP64)>, +def : Pat<(f64 (fpextend RFP32:$src)), (COPY_TO_REGCLASS RFP32:$src, RFP64)>, Requires<[FPStackf32]>; -def : Pat<(f80 (fextend RFP32:$src)), (COPY_TO_REGCLASS RFP32:$src, RFP80)>, +def : Pat<(f80 (fpextend RFP32:$src)), (COPY_TO_REGCLASS RFP32:$src, RFP80)>, Requires<[FPStackf32]>; -def : Pat<(f80 (fextend RFP64:$src)), (COPY_TO_REGCLASS RFP64:$src, RFP80)>, +def : Pat<(f80 (fpextend RFP64:$src)), (COPY_TO_REGCLASS RFP64:$src, RFP80)>, Requires<[FPStackf64]>; // FP truncations map onto simple pseudo-value conversions if they are to/from // the FP stack. We have validated that only value-preserving truncations make // it through isel. -def : Pat<(f32 (fround RFP64:$src)), (COPY_TO_REGCLASS RFP64:$src, RFP32)>, +def : Pat<(f32 (fpround RFP64:$src)), (COPY_TO_REGCLASS RFP64:$src, RFP32)>, Requires<[FPStackf32]>; -def : Pat<(f32 (fround RFP80:$src)), (COPY_TO_REGCLASS RFP80:$src, RFP32)>, +def : Pat<(f32 (fpround RFP80:$src)), (COPY_TO_REGCLASS RFP80:$src, RFP32)>, Requires<[FPStackf32]>; -def : Pat<(f64 (fround RFP80:$src)), (COPY_TO_REGCLASS RFP80:$src, RFP64)>, +def : Pat<(f64 (fpround RFP80:$src)), (COPY_TO_REGCLASS RFP80:$src, RFP64)>, Requires<[FPStackf64]>; diff --git a/llvm/lib/Target/X86/X86InstrSSE.td b/llvm/lib/Target/X86/X86InstrSSE.td index 3f69f7e95c0..4269d930ca2 100644 --- a/llvm/lib/Target/X86/X86InstrSSE.td +++ b/llvm/lib/Target/X86/X86InstrSSE.td @@ -1799,16 +1799,16 @@ def VCVTSD2SSrm : I<0x5A, MRMSrcMem, (outs FR32:$dst), Sched<[WriteCvtF2FLd, ReadAfterLd]>; } -def : Pat<(f32 (fround FR64:$src)), (VCVTSD2SSrr FR64:$src, FR64:$src)>, +def : Pat<(f32 (fpround FR64:$src)), (VCVTSD2SSrr FR64:$src, FR64:$src)>, Requires<[UseAVX]>; def CVTSD2SSrr : SDI<0x5A, MRMSrcReg, (outs FR32:$dst), (ins FR64:$src), "cvtsd2ss\t{$src, $dst|$dst, $src}", - [(set FR32:$dst, (fround FR64:$src))], + [(set FR32:$dst, (fpround FR64:$src))], IIC_SSE_CVT_Scalar_RR>, Sched<[WriteCvtF2F]>; def CVTSD2SSrm : I<0x5A, MRMSrcMem, (outs FR32:$dst), (ins f64mem:$src), "cvtsd2ss\t{$src, $dst|$dst, $src}", - [(set FR32:$dst, (fround (loadf64 addr:$src)))], + [(set FR32:$dst, (fpround (loadf64 addr:$src)))], IIC_SSE_CVT_Scalar_RM>, XD, Requires<[UseSSE2, OptForSize]>, Sched<[WriteCvtF2FLd]>; @@ -1865,9 +1865,9 @@ def VCVTSS2SDrm : I<0x5A, MRMSrcMem, (outs FR64:$dst), Sched<[WriteCvtF2FLd, ReadAfterLd]>; } -def : Pat<(f64 (fextend FR32:$src)), +def : Pat<(f64 (fpextend FR32:$src)), (VCVTSS2SDrr FR32:$src, FR32:$src)>, Requires<[UseAVX]>; -def : Pat<(fextend (loadf32 addr:$src)), +def : Pat<(fpextend (loadf32 addr:$src)), (VCVTSS2SDrm (f32 (IMPLICIT_DEF)), addr:$src)>, Requires<[UseAVX]>; def : Pat<(extloadf32 addr:$src), @@ -1879,7 +1879,7 @@ def : Pat<(extloadf32 addr:$src), def CVTSS2SDrr : I<0x5A, MRMSrcReg, (outs FR64:$dst), (ins FR32:$src), "cvtss2sd\t{$src, $dst|$dst, $src}", - [(set FR64:$dst, (fextend FR32:$src))], + [(set FR64:$dst, (fpextend FR32:$src))], IIC_SSE_CVT_Scalar_RR>, XS, Requires<[UseSSE2]>, Sched<[WriteCvtF2F]>; def CVTSS2SDrm : I<0x5A, MRMSrcMem, (outs FR64:$dst), (ins f32mem:$src), @@ -1888,12 +1888,12 @@ def CVTSS2SDrm : I<0x5A, MRMSrcMem, (outs FR64:$dst), (ins f32mem:$src), IIC_SSE_CVT_Scalar_RM>, XS, Requires<[UseSSE2, OptForSize]>, Sched<[WriteCvtF2FLd]>; -// extload f32 -> f64. This matches load+fextend because we have a hack in +// extload f32 -> f64. This matches load+fpextend because we have a hack in // the isel (PreprocessForFPConvert) that can introduce loads after dag // combine. -// Since these loads aren't folded into the fextend, we have to match it +// Since these loads aren't folded into the fpextend, we have to match it // explicitly here. -def : Pat<(fextend (loadf32 addr:$src)), +def : Pat<(fpextend (loadf32 addr:$src)), (CVTSS2SDrm addr:$src)>, Requires<[UseSSE2]>; def : Pat<(extloadf32 addr:$src), (CVTSS2SDrr (MOVSSrm addr:$src))>, Requires<[UseSSE2, OptForSpeed]>; @@ -2269,26 +2269,26 @@ let Predicates = [HasAVX] in { } let Predicates = [HasAVX, NoVLX] in { - // Match fround and fextend for 128/256-bit conversions + // Match fpround and fpextend for 128/256-bit conversions def : Pat<(v4f32 (X86vfpround (v2f64 VR128:$src))), (VCVTPD2PSrr VR128:$src)>; def : Pat<(v4f32 (X86vfpround (loadv2f64 addr:$src))), (VCVTPD2PSXrm addr:$src)>; - def : Pat<(v4f32 (fround (v4f64 VR256:$src))), + def : Pat<(v4f32 (fpround (v4f64 VR256:$src))), (VCVTPD2PSYrr VR256:$src)>; - def : Pat<(v4f32 (fround (loadv4f64 addr:$src))), + def : Pat<(v4f32 (fpround (loadv4f64 addr:$src))), (VCVTPD2PSYrm addr:$src)>; def : Pat<(v2f64 (X86vfpext (v4f32 VR128:$src))), (VCVTPS2PDrr VR128:$src)>; - def : Pat<(v4f64 (fextend (v4f32 VR128:$src))), + def : Pat<(v4f64 (fpextend (v4f32 VR128:$src))), (VCVTPS2PDYrr VR128:$src)>; def : Pat<(v4f64 (extloadv4f32 addr:$src)), (VCVTPS2PDYrm addr:$src)>; } let Predicates = [UseSSE2] in { - // Match fround and fextend for 128 conversions + // Match fpround and fpextend for 128 conversions def : Pat<(v4f32 (X86vfpround (v2f64 VR128:$src))), (CVTPD2PSrr VR128:$src)>; def : Pat<(v4f32 (X86vfpround (memopv2f64 addr:$src))), |