summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Target
diff options
context:
space:
mode:
authorCraig Topper <craig.topper@intel.com>2018-07-18 05:10:53 +0000
committerCraig Topper <craig.topper@intel.com>2018-07-18 05:10:53 +0000
commit95063a45b84a330d6ee378256593c244f89df9d3 (patch)
tree53058358b51533a4123a71da0ced67930c4b4dc9 /llvm/lib/Target
parent1425e10cc6f8306cba01589c9e3f8459fa7e2ffe (diff)
downloadbcm5719-llvm-95063a45b84a330d6ee378256593c244f89df9d3.tar.gz
bcm5719-llvm-95063a45b84a330d6ee378256593c244f89df9d3.zip
[X86] Remove patterns that mix X86ISD::MOVLHPS/MOVHLPS with v2i64/v2f64 types.
The X86ISD::MOVLHPS/MOVHLPS should now only be emitted in SSE1 only. This means that the v2i64/v2f64 types would be illegal thus we don't need these patterns. llvm-svn: 337349
Diffstat (limited to 'llvm/lib/Target')
-rw-r--r--llvm/lib/Target/X86/X86InstrAVX512.td7
-rw-r--r--llvm/lib/Target/X86/X86InstrSSE.td26
2 files changed, 0 insertions, 33 deletions
diff --git a/llvm/lib/Target/X86/X86InstrAVX512.td b/llvm/lib/Target/X86/X86InstrAVX512.td
index 2035e49720f..b239f230915 100644
--- a/llvm/lib/Target/X86/X86InstrAVX512.td
+++ b/llvm/lib/Target/X86/X86InstrAVX512.td
@@ -6464,13 +6464,6 @@ defm VMOVLPDZ128 : avx512_mov_hilo_packed<0x12, "vmovlpd", X86Movsd,
v2f64x_info>, EVEX_CD8<64, CD8VT1>, PD, VEX_W;
let Predicates = [HasAVX512] in {
- // VMOVHPS patterns
- def : Pat<(X86Movlhps VR128X:$src1,
- (bc_v4f32 (v2i64 (scalar_to_vector (loadi64 addr:$src2))))),
- (VMOVHPSZ128rm VR128X:$src1, addr:$src2)>;
- def : Pat<(X86Movlhps VR128X:$src1,
- (bc_v4f32 (v2i64 (X86vzload addr:$src2)))),
- (VMOVHPSZ128rm VR128X:$src1, addr:$src2)>;
// VMOVHPD patterns
def : Pat<(v2f64 (X86Unpckl VR128X:$src1,
(bc_v2f64 (v2i64 (scalar_to_vector (loadi64 addr:$src2)))))),
diff --git a/llvm/lib/Target/X86/X86InstrSSE.td b/llvm/lib/Target/X86/X86InstrSSE.td
index af40b009d97..06a8799b13f 100644
--- a/llvm/lib/Target/X86/X86InstrSSE.td
+++ b/llvm/lib/Target/X86/X86InstrSSE.td
@@ -766,14 +766,6 @@ def MOVHPDmr : PDI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
} // SchedRW
let Predicates = [UseAVX] in {
- // VMOVHPS patterns
- def : Pat<(X86Movlhps VR128:$src1,
- (bc_v4f32 (v2i64 (scalar_to_vector (loadi64 addr:$src2))))),
- (VMOVHPSrm VR128:$src1, addr:$src2)>;
- def : Pat<(X86Movlhps VR128:$src1,
- (bc_v4f32 (v2i64 (X86vzload addr:$src2)))),
- (VMOVHPSrm VR128:$src1, addr:$src2)>;
-
// Also handle an i64 load because that may get selected as a faster way to
// load the data.
def : Pat<(v2f64 (X86Unpckl VR128:$src1,
@@ -781,25 +773,12 @@ let Predicates = [UseAVX] in {
(VMOVHPDrm VR128:$src1, addr:$src2)>;
def : Pat<(store (f64 (extractelt
- (bc_v2f64 (v4f32 (X86Movhlps VR128:$src, VR128:$src))),
- (iPTR 0))), addr:$dst),
- (VMOVHPDmr addr:$dst, VR128:$src)>;
-
- def : Pat<(store (f64 (extractelt
(v2f64 (X86VPermilpi VR128:$src, (i8 1))),
(iPTR 0))), addr:$dst),
(VMOVHPDmr addr:$dst, VR128:$src)>;
}
let Predicates = [UseSSE1] in {
- // MOVHPS patterns
- def : Pat<(X86Movlhps VR128:$src1,
- (bc_v4f32 (v2i64 (scalar_to_vector (loadi64 addr:$src2))))),
- (MOVHPSrm VR128:$src1, addr:$src2)>;
- def : Pat<(X86Movlhps VR128:$src1,
- (bc_v4f32 (v2i64 (X86vzload addr:$src2)))),
- (MOVHPSrm VR128:$src1, addr:$src2)>;
-
// This pattern helps select MOVHPS on SSE1 only targets. With SSE2 we'll
// end up with a movsd or bleand instead of shufp.
// No need for aligned load, we're only loading 64-bits.
@@ -817,11 +796,6 @@ let Predicates = [UseSSE2] in {
(MOVHPDrm VR128:$src1, addr:$src2)>;
def : Pat<(store (f64 (extractelt
- (bc_v2f64 (v4f32 (X86Movhlps VR128:$src, VR128:$src))),
- (iPTR 0))), addr:$dst),
- (MOVHPDmr addr:$dst, VR128:$src)>;
-
- def : Pat<(store (f64 (extractelt
(v2f64 (X86Shufp VR128:$src, VR128:$src, (i8 1))),
(iPTR 0))), addr:$dst),
(MOVHPDmr addr:$dst, VR128:$src)>;
OpenPOWER on IntegriCloud