diff options
| author | Craig Topper <craig.topper@intel.com> | 2018-07-09 16:03:01 +0000 |
|---|---|---|
| committer | Craig Topper <craig.topper@intel.com> | 2018-07-09 16:03:01 +0000 |
| commit | 16ee4b4957789e3d5d1cff2180053c70155a227b (patch) | |
| tree | 3ee271cb66028010a3a9263eafb61b0cce078848 /llvm/lib | |
| parent | 22330c700b4769fe476e541b0afbdcf08197df01 (diff) | |
| download | bcm5719-llvm-16ee4b4957789e3d5d1cff2180053c70155a227b.tar.gz bcm5719-llvm-16ee4b4957789e3d5d1cff2180053c70155a227b.zip | |
[X86] Remove some patterns that seems to be unreachable.
These patterns mapped (v2f64 (X86vzmovl (v2f64 (scalar_to_vector FR64:$src)))) to a MOVSD and an zeroing XOR. But the complexity of a pattern for (v2f64 (X86vzmovl (v2f64))) that selects MOVQ is artificially and hides this MOVSD pattern.
Weirder still, the SSE version of the pattern was explicitly blocked on SSE41, but yet we had copied it to AVX and AVX512.
llvm-svn: 336556
Diffstat (limited to 'llvm/lib')
| -rw-r--r-- | llvm/lib/Target/X86/X86InstrAVX512.td | 3 | ||||
| -rw-r--r-- | llvm/lib/Target/X86/X86InstrSSE.td | 9 |
2 files changed, 0 insertions, 12 deletions
diff --git a/llvm/lib/Target/X86/X86InstrAVX512.td b/llvm/lib/Target/X86/X86InstrAVX512.td index ff5bcef9264..9c07d82550c 100644 --- a/llvm/lib/Target/X86/X86InstrAVX512.td +++ b/llvm/lib/Target/X86/X86InstrAVX512.td @@ -4338,9 +4338,6 @@ let Predicates = [HasAVX512] in { (VMOVSSZrr (v4f32 (AVX512_128_SET0)), VR128X:$src)>; def : Pat<(v4i32 (X86vzmovl (v4i32 VR128X:$src))), (VMOVSSZrr (v4i32 (AVX512_128_SET0)), VR128X:$src)>; - def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector FR64X:$src)))), - (VMOVSDZrr (v2f64 (AVX512_128_SET0)), - (COPY_TO_REGCLASS FR64X:$src, VR128X))>; } // Move low f32 and clear high bits. diff --git a/llvm/lib/Target/X86/X86InstrSSE.td b/llvm/lib/Target/X86/X86InstrSSE.td index 4644c62666c..dc15b964854 100644 --- a/llvm/lib/Target/X86/X86InstrSSE.td +++ b/llvm/lib/Target/X86/X86InstrSSE.td @@ -352,13 +352,6 @@ let Predicates = [UseSSE1] in { } let Predicates = [UseSSE2] in { - let Predicates = [NoSSE41], AddedComplexity = 15 in { - // Move scalar to XMM zero-extended, zeroing a VR128 then do a - // MOVSD to the lower bits. - def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector FR64:$src)))), - (MOVSDrr (v2f64 (V_SET0)), (COPY_TO_REGCLASS FR64:$src, VR128))>; - } - let AddedComplexity = 20 in { // MOVSDrm already zeros the high parts of the register. def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector (loadf64 addr:$src))))), @@ -6513,8 +6506,6 @@ let Predicates = [UseAVX] in { (VBLENDPSrri (v4f32 (V_SET0)), VR128:$src, (i8 1))>; def : Pat<(v4i32 (X86vzmovl (v4i32 VR128:$src))), (VPBLENDWrri (v4i32 (V_SET0)), VR128:$src, (i8 3))>; - def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector FR64:$src)))), - (VMOVSDrr (v2f64 (V_SET0)), (COPY_TO_REGCLASS FR64:$src, VR128))>; // Move low f32 and clear high bits. def : Pat<(v8f32 (X86vzmovl (v8f32 VR256:$src))), |

