summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Target
diff options
context:
space:
mode:
authorSanjay Patel <spatel@rotateright.com>2018-12-22 16:59:02 +0000
committerSanjay Patel <spatel@rotateright.com>2018-12-22 16:59:02 +0000
commit52c02d70e276aa1e138b9f43988de60838aacb8c (patch)
tree5f3ffd03e51948e70083a8d1fbcb7235e275691a /llvm/lib/Target
parentc682c197741484e9bc07611e460f6feb4f40edf9 (diff)
downloadbcm5719-llvm-52c02d70e276aa1e138b9f43988de60838aacb8c.tar.gz
bcm5719-llvm-52c02d70e276aa1e138b9f43988de60838aacb8c.zip
[x86] add load fold patterns for movddup with vzext_load
The missed load folding noticed in D55898 is visible independent of that change either with an adjusted IR pattern to start or with AVX2/AVX512 (where the build vector becomes a broadcast first; movddup is not produced until we get into isel via tablegen patterns). Differential Revision: https://reviews.llvm.org/D55936 llvm-svn: 350005
Diffstat (limited to 'llvm/lib/Target')
-rw-r--r--llvm/lib/Target/X86/X86InstrAVX512.td2
-rw-r--r--llvm/lib/Target/X86/X86InstrSSE.td6
2 files changed, 8 insertions, 0 deletions
diff --git a/llvm/lib/Target/X86/X86InstrAVX512.td b/llvm/lib/Target/X86/X86InstrAVX512.td
index 7e60b9caf05..105ca2e87d7 100644
--- a/llvm/lib/Target/X86/X86InstrAVX512.td
+++ b/llvm/lib/Target/X86/X86InstrAVX512.td
@@ -11217,6 +11217,8 @@ def : Pat<(v2f64 (X86VBroadcast f64:$src)),
(VMOVDDUPZ128rr (v2f64 (COPY_TO_REGCLASS FR64X:$src, VR128X)))>;
def : Pat<(v2f64 (X86VBroadcast (loadv2f64 addr:$src))),
(VMOVDDUPZ128rm addr:$src)>;
+def : Pat<(v2f64 (X86VBroadcast (v2f64 (X86vzload addr:$src)))),
+ (VMOVDDUPZ128rm addr:$src)>;
def : Pat<(vselect (v2i1 VK2WM:$mask), (v2f64 (X86VBroadcast f64:$src)),
(v2f64 VR128X:$src0)),
diff --git a/llvm/lib/Target/X86/X86InstrSSE.td b/llvm/lib/Target/X86/X86InstrSSE.td
index 94cd5a611f2..e2bcd18ce66 100644
--- a/llvm/lib/Target/X86/X86InstrSSE.td
+++ b/llvm/lib/Target/X86/X86InstrSSE.td
@@ -4669,12 +4669,16 @@ defm MOVDDUP : sse3_replicate_dfp<"movddup", SchedWriteFShuffle>;
let Predicates = [HasAVX, NoVLX] in {
def : Pat<(X86Movddup (loadv2f64 addr:$src)),
(VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
+ def : Pat<(X86Movddup (v2f64 (X86vzload addr:$src))),
+ (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
}
let Predicates = [UseSSE3] in {
// No need for aligned memory as this only loads 64-bits.
def : Pat<(X86Movddup (loadv2f64 addr:$src)),
(MOVDDUPrm addr:$src)>;
+ def : Pat<(X86Movddup (v2f64 (X86vzload addr:$src))),
+ (MOVDDUPrm addr:$src)>;
}
//===---------------------------------------------------------------------===//
@@ -8034,6 +8038,8 @@ let Predicates = [HasAVX, NoVLX] in {
(VMOVDDUPrr VR128:$src)>;
def : Pat<(v2f64 (X86VBroadcast (loadv2f64 addr:$src))),
(VMOVDDUPrm addr:$src)>;
+ def : Pat<(v2f64 (X86VBroadcast (v2f64 (X86vzload addr:$src)))),
+ (VMOVDDUPrm addr:$src)>;
}
let Predicates = [HasAVX1Only] in {
OpenPOWER on IntegriCloud