summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Target/X86
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/Target/X86')
-rw-r--r--llvm/lib/Target/X86/X86ISelLowering.cpp5
-rw-r--r--llvm/lib/Target/X86/X86InstrAVX512.td24
-rw-r--r--llvm/lib/Target/X86/X86InstrSSE.td64
3 files changed, 1 insertions, 92 deletions
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index afd8a498445..b020644d07e 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -9889,10 +9889,7 @@ static SDValue lowerVectorShuffleAsElementInsertion(
V1Mask[V2Index] = -1;
if (!isNoopShuffleMask(V1Mask))
return SDValue();
- // This is essentially a special case blend operation, but if we have
- // general purpose blend operations, they are always faster. Bail and let
- // the rest of the lowering handle these as blends.
- if (Subtarget.hasSSE41())
+ if (!VT.is128BitVector())
return SDValue();
// Otherwise, use MOVSD or MOVSS.
diff --git a/llvm/lib/Target/X86/X86InstrAVX512.td b/llvm/lib/Target/X86/X86InstrAVX512.td
index 824f5099d8c..5fd8694eb3b 100644
--- a/llvm/lib/Target/X86/X86InstrAVX512.td
+++ b/llvm/lib/Target/X86/X86InstrAVX512.td
@@ -9732,23 +9732,11 @@ multiclass AVX512_scalar_math_f32_patterns<SDNode Op, string OpcPrefix> {
(!cast<I>("V"#OpcPrefix#SSZrr_Int) v4f32:$dst,
(COPY_TO_REGCLASS FR32X:$src, VR128X))>;
- // extracted scalar math op with insert via blend
- def : Pat<(v4f32 (X86Blendi (v4f32 VR128X:$dst), (v4f32 (scalar_to_vector
- (Op (f32 (extractelt (v4f32 VR128X:$dst), (iPTR 0))),
- FR32X:$src))), (i8 1))),
- (!cast<I>("V"#OpcPrefix#SSZrr_Int) v4f32:$dst,
- (COPY_TO_REGCLASS FR32X:$src, VR128X))>;
-
// vector math op with insert via movss
def : Pat<(v4f32 (X86Movss (v4f32 VR128X:$dst),
(Op (v4f32 VR128X:$dst), (v4f32 VR128X:$src)))),
(!cast<I>("V"#OpcPrefix#SSZrr_Int) v4f32:$dst, v4f32:$src)>;
- // vector math op with insert via blend
- def : Pat<(v4f32 (X86Blendi (v4f32 VR128X:$dst),
- (Op (v4f32 VR128X:$dst), (v4f32 VR128X:$src)), (i8 1))),
- (!cast<I>("V"#OpcPrefix#SSZrr_Int) v4f32:$dst, v4f32:$src)>;
-
// extracted masked scalar math op with insert via movss
def : Pat<(X86Movss (v4f32 VR128X:$src1),
(scalar_to_vector
@@ -9776,23 +9764,11 @@ multiclass AVX512_scalar_math_f64_patterns<SDNode Op, string OpcPrefix> {
(!cast<I>("V"#OpcPrefix#SDZrr_Int) v2f64:$dst,
(COPY_TO_REGCLASS FR64X:$src, VR128X))>;
- // extracted scalar math op with insert via blend
- def : Pat<(v2f64 (X86Blendi (v2f64 VR128X:$dst), (v2f64 (scalar_to_vector
- (Op (f64 (extractelt (v2f64 VR128X:$dst), (iPTR 0))),
- FR64X:$src))), (i8 1))),
- (!cast<I>("V"#OpcPrefix#SDZrr_Int) v2f64:$dst,
- (COPY_TO_REGCLASS FR64X:$src, VR128X))>;
-
// vector math op with insert via movsd
def : Pat<(v2f64 (X86Movsd (v2f64 VR128X:$dst),
(Op (v2f64 VR128X:$dst), (v2f64 VR128X:$src)))),
(!cast<I>("V"#OpcPrefix#SDZrr_Int) v2f64:$dst, v2f64:$src)>;
- // vector math op with insert via blend
- def : Pat<(v2f64 (X86Blendi (v2f64 VR128X:$dst),
- (Op (v2f64 VR128X:$dst), (v2f64 VR128X:$src)), (i8 1))),
- (!cast<I>("V"#OpcPrefix#SDZrr_Int) v2f64:$dst, v2f64:$src)>;
-
// extracted masked scalar math op with insert via movss
def : Pat<(X86Movsd (v2f64 VR128X:$src1),
(scalar_to_vector
diff --git a/llvm/lib/Target/X86/X86InstrSSE.td b/llvm/lib/Target/X86/X86InstrSSE.td
index ba0c9bfec73..bbaa6f4df07 100644
--- a/llvm/lib/Target/X86/X86InstrSSE.td
+++ b/llvm/lib/Target/X86/X86InstrSSE.td
@@ -2911,22 +2911,6 @@ multiclass scalar_math_f32_patterns<SDNode Op, string OpcPrefix> {
(!cast<I>(OpcPrefix#SSrr_Int) v4f32:$dst, v4f32:$src)>;
}
- // With SSE 4.1, blendi is preferred to movsd, so match that too.
- let Predicates = [UseSSE41] in {
- // extracted scalar math op with insert via blend
- def : Pat<(v4f32 (X86Blendi (v4f32 VR128:$dst), (v4f32 (scalar_to_vector
- (Op (f32 (extractelt (v4f32 VR128:$dst), (iPTR 0))),
- FR32:$src))), (i8 1))),
- (!cast<I>(OpcPrefix#SSrr_Int) v4f32:$dst,
- (COPY_TO_REGCLASS FR32:$src, VR128))>;
-
- // vector math op with insert via blend
- def : Pat<(v4f32 (X86Blendi (v4f32 VR128:$dst),
- (Op (v4f32 VR128:$dst), (v4f32 VR128:$src)), (i8 1))),
- (!cast<I>(OpcPrefix#SSrr_Int)v4f32:$dst, v4f32:$src)>;
-
- }
-
// Repeat everything for AVX.
let Predicates = [UseAVX] in {
// extracted scalar math op with insert via movss
@@ -2936,22 +2920,10 @@ multiclass scalar_math_f32_patterns<SDNode Op, string OpcPrefix> {
(!cast<I>("V"#OpcPrefix#SSrr_Int) v4f32:$dst,
(COPY_TO_REGCLASS FR32:$src, VR128))>;
- // extracted scalar math op with insert via blend
- def : Pat<(v4f32 (X86Blendi (v4f32 VR128:$dst), (v4f32 (scalar_to_vector
- (Op (f32 (extractelt (v4f32 VR128:$dst), (iPTR 0))),
- FR32:$src))), (i8 1))),
- (!cast<I>("V"#OpcPrefix#SSrr_Int) v4f32:$dst,
- (COPY_TO_REGCLASS FR32:$src, VR128))>;
-
// vector math op with insert via movss
def : Pat<(v4f32 (X86Movss (v4f32 VR128:$dst),
(Op (v4f32 VR128:$dst), (v4f32 VR128:$src)))),
(!cast<I>("V"#OpcPrefix#SSrr_Int) v4f32:$dst, v4f32:$src)>;
-
- // vector math op with insert via blend
- def : Pat<(v4f32 (X86Blendi (v4f32 VR128:$dst),
- (Op (v4f32 VR128:$dst), (v4f32 VR128:$src)), (i8 1))),
- (!cast<I>("V"#OpcPrefix#SSrr_Int) v4f32:$dst, v4f32:$src)>;
}
}
@@ -2975,21 +2947,6 @@ multiclass scalar_math_f64_patterns<SDNode Op, string OpcPrefix> {
(!cast<I>(OpcPrefix#SDrr_Int) v2f64:$dst, v2f64:$src)>;
}
- // With SSE 4.1, blendi is preferred to movsd, so match those too.
- let Predicates = [UseSSE41] in {
- // extracted scalar math op with insert via blend
- def : Pat<(v2f64 (X86Blendi (v2f64 VR128:$dst), (v2f64 (scalar_to_vector
- (Op (f64 (extractelt (v2f64 VR128:$dst), (iPTR 0))),
- FR64:$src))), (i8 1))),
- (!cast<I>(OpcPrefix#SDrr_Int) v2f64:$dst,
- (COPY_TO_REGCLASS FR64:$src, VR128))>;
-
- // vector math op with insert via blend
- def : Pat<(v2f64 (X86Blendi (v2f64 VR128:$dst),
- (Op (v2f64 VR128:$dst), (v2f64 VR128:$src)), (i8 1))),
- (!cast<I>(OpcPrefix#SDrr_Int) v2f64:$dst, v2f64:$src)>;
- }
-
// Repeat everything for AVX.
let Predicates = [UseAVX] in {
// extracted scalar math op with insert via movsd
@@ -2999,22 +2956,10 @@ multiclass scalar_math_f64_patterns<SDNode Op, string OpcPrefix> {
(!cast<I>("V"#OpcPrefix#SDrr_Int) v2f64:$dst,
(COPY_TO_REGCLASS FR64:$src, VR128))>;
- // extracted scalar math op with insert via blend
- def : Pat<(v2f64 (X86Blendi (v2f64 VR128:$dst), (v2f64 (scalar_to_vector
- (Op (f64 (extractelt (v2f64 VR128:$dst), (iPTR 0))),
- FR64:$src))), (i8 1))),
- (!cast<I>("V"#OpcPrefix#SDrr_Int) v2f64:$dst,
- (COPY_TO_REGCLASS FR64:$src, VR128))>;
-
// vector math op with insert via movsd
def : Pat<(v2f64 (X86Movsd (v2f64 VR128:$dst),
(Op (v2f64 VR128:$dst), (v2f64 VR128:$src)))),
(!cast<I>("V"#OpcPrefix#SDrr_Int) v2f64:$dst, v2f64:$src)>;
-
- // vector math op with insert via blend
- def : Pat<(v2f64 (X86Blendi (v2f64 VR128:$dst),
- (Op (v2f64 VR128:$dst), (v2f64 VR128:$src)), (i8 1))),
- (!cast<I>("V"#OpcPrefix#SDrr_Int) v2f64:$dst, v2f64:$src)>;
}
}
@@ -3301,19 +3246,10 @@ multiclass scalar_unary_math_patterns<Intrinsic Intr, string OpcPrefix,
(!cast<I>(OpcPrefix#r_Int) VT:$dst, VT:$src)>;
}
- // With SSE 4.1, blendi is preferred to movs*, so match that too.
- let Predicates = [UseSSE41] in {
- def : Pat<(VT (X86Blendi VT:$dst, (Intr VT:$src), (i8 1))),
- (!cast<I>(OpcPrefix#r_Int) VT:$dst, VT:$src)>;
- }
-
// Repeat for AVX versions of the instructions.
let Predicates = [HasAVX] in {
def : Pat<(VT (Move VT:$dst, (Intr VT:$src))),
(!cast<I>("V"#OpcPrefix#r_Int) VT:$dst, VT:$src)>;
-
- def : Pat<(VT (X86Blendi VT:$dst, (Intr VT:$src), (i8 1))),
- (!cast<I>("V"#OpcPrefix#r_Int) VT:$dst, VT:$src)>;
}
}
OpenPOWER on IntegriCloud