summaryrefslogtreecommitdiffstats
path: root/llvm/lib
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib')
-rw-r--r--llvm/lib/Target/X86/X86ISelLowering.cpp6
-rw-r--r--llvm/lib/Target/X86/X86InstrAVX512.td290
-rw-r--r--llvm/lib/Target/X86/X86InstrCompiler.td2
-rw-r--r--llvm/lib/Target/X86/X86InstrFMA.td24
-rw-r--r--llvm/lib/Target/X86/X86InstrSSE.td292
-rw-r--r--llvm/lib/Target/X86/X86InstrVecCompiler.td2
-rw-r--r--llvm/lib/Target/X86/X86RegisterInfo.td10
7 files changed, 328 insertions, 298 deletions
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 98995a73980..37338ab5796 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -613,7 +613,7 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
// Long double always uses X87, except f128 in MMX.
if (UseX87) {
if (Subtarget.is64Bit() && Subtarget.hasMMX()) {
- addRegisterClass(MVT::f128, &X86::FR128RegClass);
+ addRegisterClass(MVT::f128, &X86::VR128RegClass);
ValueTypeActions.setTypeAction(MVT::f128, TypeSoftenFloat);
setOperationAction(ISD::FABS , MVT::f128, Custom);
setOperationAction(ISD::FNEG , MVT::f128, Custom);
@@ -3078,7 +3078,7 @@ SDValue X86TargetLowering::LowerFormalArguments(
else if (RegVT == MVT::f80)
RC = &X86::RFP80RegClass;
else if (RegVT == MVT::f128)
- RC = &X86::FR128RegClass;
+ RC = &X86::VR128RegClass;
else if (RegVT.is512BitVector())
RC = &X86::VR512RegClass;
else if (RegVT.is256BitVector())
@@ -28544,7 +28544,7 @@ X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
return EmitLoweredTLSCall(MI, BB);
case X86::CMOV_FR32:
case X86::CMOV_FR64:
- case X86::CMOV_FR128:
+ case X86::CMOV_F128:
case X86::CMOV_GR8:
case X86::CMOV_GR16:
case X86::CMOV_GR32:
diff --git a/llvm/lib/Target/X86/X86InstrAVX512.td b/llvm/lib/Target/X86/X86InstrAVX512.td
index 36c859a63c8..0791df57d2f 100644
--- a/llvm/lib/Target/X86/X86InstrAVX512.td
+++ b/llvm/lib/Target/X86/X86InstrAVX512.td
@@ -1118,18 +1118,18 @@ multiclass avx512_broadcast_scalar<bits<8> opc, string OpcodeStr,
X86VectorVTInfo DestInfo, X86VectorVTInfo SrcInfo> {
def : Pat<(DestInfo.VT (X86VBroadcast SrcInfo.FRC:$src)),
(!cast<Instruction>(Name#DestInfo.ZSuffix#r)
- (COPY_TO_REGCLASS SrcInfo.FRC:$src, SrcInfo.RC))>;
+ (SrcInfo.VT (COPY_TO_REGCLASS SrcInfo.FRC:$src, SrcInfo.RC)))>;
def : Pat<(DestInfo.VT (vselect DestInfo.KRCWM:$mask,
(X86VBroadcast SrcInfo.FRC:$src),
DestInfo.RC:$src0)),
(!cast<Instruction>(Name#DestInfo.ZSuffix#rk)
DestInfo.RC:$src0, DestInfo.KRCWM:$mask,
- (COPY_TO_REGCLASS SrcInfo.FRC:$src, SrcInfo.RC))>;
+ (SrcInfo.VT (COPY_TO_REGCLASS SrcInfo.FRC:$src, SrcInfo.RC)))>;
def : Pat<(DestInfo.VT (vselect DestInfo.KRCWM:$mask,
(X86VBroadcast SrcInfo.FRC:$src),
DestInfo.ImmAllZerosV)),
(!cast<Instruction>(Name#DestInfo.ZSuffix#rkz)
- DestInfo.KRCWM:$mask, (COPY_TO_REGCLASS SrcInfo.FRC:$src, SrcInfo.RC))>;
+ DestInfo.KRCWM:$mask, (SrcInfo.VT (COPY_TO_REGCLASS SrcInfo.FRC:$src, SrcInfo.RC)))>;
}
// Split version to allow mask and broadcast node to be different types. This
@@ -1328,10 +1328,11 @@ defm VPBROADCASTQr : avx512_int_broadcast_reg_vl<0x7C, avx512vl_i64_info,
// automatically does the extract.
multiclass avx512_int_broadcast_rm_lowering<string Name,
X86VectorVTInfo DestInfo,
- X86VectorVTInfo SrcInfo> {
+ X86VectorVTInfo SrcInfo,
+ X86VectorVTInfo ExtInfo> {
def : Pat<(DestInfo.VT (X86VBroadcast (SrcInfo.VT SrcInfo.RC:$src))),
(!cast<Instruction>(Name#DestInfo.ZSuffix#"r")
- (EXTRACT_SUBREG (SrcInfo.VT SrcInfo.RC:$src), sub_xmm))>;
+ (ExtInfo.VT (EXTRACT_SUBREG (SrcInfo.VT SrcInfo.RC:$src), sub_xmm)))>;
}
multiclass avx512_int_broadcast_rm_vl<bits<8> opc, string OpcodeStr,
@@ -1339,15 +1340,15 @@ multiclass avx512_int_broadcast_rm_vl<bits<8> opc, string OpcodeStr,
let Predicates = [prd] in {
defm Z : avx512_broadcast_rm<opc, OpcodeStr, NAME, WriteShuffle256,
WriteShuffle256Ld, _.info512, _.info128>,
- avx512_int_broadcast_rm_lowering<NAME, _.info512, _.info256>,
+ avx512_int_broadcast_rm_lowering<NAME, _.info512, _.info256, _.info128>,
EVEX_V512;
// Defined separately to avoid redefinition.
- defm Z_Alt : avx512_int_broadcast_rm_lowering<NAME, _.info512, _.info512>;
+ defm Z_Alt : avx512_int_broadcast_rm_lowering<NAME, _.info512, _.info512, _.info128>;
}
let Predicates = [prd, HasVLX] in {
defm Z256 : avx512_broadcast_rm<opc, OpcodeStr, NAME, WriteShuffle256,
WriteShuffle256Ld, _.info256, _.info128>,
- avx512_int_broadcast_rm_lowering<NAME, _.info256, _.info256>,
+ avx512_int_broadcast_rm_lowering<NAME, _.info256, _.info256, _.info128>,
EVEX_V256;
defm Z128 : avx512_broadcast_rm<opc, OpcodeStr, NAME, WriteShuffle,
WriteShuffleXLd, _.info128, _.info128>,
@@ -1677,20 +1678,20 @@ defm VBROADCASTF32X2 : avx512_common_broadcast_32x2<0x19, "vbroadcastf32x2",
let Predicates = [HasVLX] in {
def : Pat<(v8f32 (X86VBroadcast (v8f32 VR256X:$src))),
- (VBROADCASTSSZ256r (EXTRACT_SUBREG (v8f32 VR256X:$src), sub_xmm))>;
+ (VBROADCASTSSZ256r (v4f32 (EXTRACT_SUBREG (v8f32 VR256X:$src), sub_xmm)))>;
def : Pat<(v4f64 (X86VBroadcast (v4f64 VR256X:$src))),
- (VBROADCASTSDZ256r (EXTRACT_SUBREG (v4f64 VR256X:$src), sub_xmm))>;
+ (VBROADCASTSDZ256r (v2f64 (EXTRACT_SUBREG (v4f64 VR256X:$src), sub_xmm)))>;
}
def : Pat<(v16f32 (X86VBroadcast (v16f32 VR512:$src))),
- (VBROADCASTSSZr (EXTRACT_SUBREG (v16f32 VR512:$src), sub_xmm))>;
+ (VBROADCASTSSZr (v4f32 (EXTRACT_SUBREG (v16f32 VR512:$src), sub_xmm)))>;
def : Pat<(v16f32 (X86VBroadcast (v8f32 VR256X:$src))),
- (VBROADCASTSSZr (EXTRACT_SUBREG (v8f32 VR256X:$src), sub_xmm))>;
+ (VBROADCASTSSZr (v4f32 (EXTRACT_SUBREG (v8f32 VR256X:$src), sub_xmm)))>;
def : Pat<(v8f64 (X86VBroadcast (v8f64 VR512:$src))),
- (VBROADCASTSDZr (EXTRACT_SUBREG (v8f64 VR512:$src), sub_xmm))>;
+ (VBROADCASTSDZr (v2f64 (EXTRACT_SUBREG (v8f64 VR512:$src), sub_xmm)))>;
def : Pat<(v8f64 (X86VBroadcast (v4f64 VR256X:$src))),
- (VBROADCASTSDZr (EXTRACT_SUBREG (v4f64 VR256X:$src), sub_xmm))>;
+ (VBROADCASTSDZr (v2f64 (EXTRACT_SUBREG (v4f64 VR256X:$src), sub_xmm)))>;
//===----------------------------------------------------------------------===//
// AVX-512 BROADCAST MASK TO VECTOR REGISTER
@@ -3730,7 +3731,7 @@ multiclass masked_move_for_extract<string InstrStr, X86VectorVTInfo From,
To.RC:$src0)),
(Cast.VT (!cast<Instruction>(InstrStr#"rrk")
Cast.RC:$src0, Cast.KRCWM:$mask,
- (EXTRACT_SUBREG From.RC:$src, To.SubRegIdx)))>;
+ (To.VT (EXTRACT_SUBREG From.RC:$src, To.SubRegIdx))))>;
def : Pat<(Cast.VT (vselect Cast.KRCWM:$mask,
(bitconvert
@@ -3739,7 +3740,7 @@ multiclass masked_move_for_extract<string InstrStr, X86VectorVTInfo From,
Cast.ImmAllZerosV)),
(Cast.VT (!cast<Instruction>(InstrStr#"rrkz")
Cast.KRCWM:$mask,
- (EXTRACT_SUBREG From.RC:$src, To.SubRegIdx)))>;
+ (To.VT (EXTRACT_SUBREG From.RC:$src, To.SubRegIdx))))>;
}
@@ -4005,10 +4006,10 @@ def : Pat<(_.VT (OpNode _.RC:$src0,
(_.EltVT _.FRC:$src1),
(_.EltVT _.FRC:$src2))))))),
(!cast<Instruction>(InstrStr#rrk)
- (COPY_TO_REGCLASS _.FRC:$src2, _.RC),
+ (_.VT (COPY_TO_REGCLASS _.FRC:$src2, _.RC)),
VK1WM:$mask,
(_.VT _.RC:$src0),
- (COPY_TO_REGCLASS _.FRC:$src1, _.RC))>;
+ (_.VT (COPY_TO_REGCLASS _.FRC:$src1, _.RC)))>;
def : Pat<(_.VT (OpNode _.RC:$src0,
(_.VT (scalar_to_vector
@@ -4018,7 +4019,7 @@ def : Pat<(_.VT (OpNode _.RC:$src0,
(!cast<Instruction>(InstrStr#rrkz)
VK1WM:$mask,
(_.VT _.RC:$src0),
- (COPY_TO_REGCLASS _.FRC:$src1, _.RC))>;
+ (_.VT (COPY_TO_REGCLASS _.FRC:$src1, _.RC)))>;
}
multiclass avx512_store_scalar_lowering<string InstrStr, AVX512VLVectorVTInfo _,
@@ -4234,22 +4235,24 @@ defm : avx512_load_scalar_lowering_subreg2<"VMOVSDZ", avx512vl_f64_info,
(iPTR 0))), GR8, sub_8bit>;
def : Pat<(f32 (X86selects VK1WM:$mask, (f32 FR32X:$src1), (f32 FR32X:$src2))),
- (COPY_TO_REGCLASS (VMOVSSZrrk (COPY_TO_REGCLASS FR32X:$src2, VR128X),
+ (COPY_TO_REGCLASS (v4f32 (VMOVSSZrrk
+ (v4f32 (COPY_TO_REGCLASS FR32X:$src2, VR128X)),
VK1WM:$mask, (v4f32 (IMPLICIT_DEF)),
- (COPY_TO_REGCLASS FR32X:$src1, VR128X)), FR32X)>;
+ (v4f32 (COPY_TO_REGCLASS FR32X:$src1, VR128X)))), FR32X)>;
def : Pat<(f32 (X86selects VK1WM:$mask, (f32 FR32X:$src1), fp32imm0)),
- (COPY_TO_REGCLASS (VMOVSSZrrkz VK1WM:$mask, (v4f32 (IMPLICIT_DEF)),
- (COPY_TO_REGCLASS FR32X:$src1, VR128X)), FR32X)>;
+ (COPY_TO_REGCLASS (v4f32 (VMOVSSZrrkz VK1WM:$mask, (v4f32 (IMPLICIT_DEF)),
+ (v4f32 (COPY_TO_REGCLASS FR32X:$src1, VR128X)))), FR32X)>;
def : Pat<(f64 (X86selects VK1WM:$mask, (f64 FR64X:$src1), (f64 FR64X:$src2))),
- (COPY_TO_REGCLASS (VMOVSDZrrk (COPY_TO_REGCLASS FR64X:$src2, VR128X),
+ (COPY_TO_REGCLASS (v2f64 (VMOVSDZrrk
+ (v2f64 (COPY_TO_REGCLASS FR64X:$src2, VR128X)),
VK1WM:$mask, (v2f64 (IMPLICIT_DEF)),
- (COPY_TO_REGCLASS FR64X:$src1, VR128X)), FR64X)>;
+ (v2f64 (COPY_TO_REGCLASS FR64X:$src1, VR128X)))), FR64X)>;
def : Pat<(f64 (X86selects VK1WM:$mask, (f64 FR64X:$src1), fpimm0)),
- (COPY_TO_REGCLASS (VMOVSDZrrkz VK1WM:$mask, (v2f64 (IMPLICIT_DEF)),
- (COPY_TO_REGCLASS FR64X:$src1, VR128X)), FR64X)>;
+ (COPY_TO_REGCLASS (v2f64 (VMOVSDZrrkz VK1WM:$mask, (v2f64 (IMPLICIT_DEF)),
+ (v2f64 (COPY_TO_REGCLASS FR64X:$src1, VR128X)))), FR64X)>;
let hasSideEffects = 0, isCodeGenOnly = 1, ForceDisassemble = 1 in {
def VMOVSSZrr_REV: AVX512<0x11, MRMDestReg, (outs VR128X:$dst),
@@ -4334,38 +4337,40 @@ let Predicates = [HasAVX512, OptForSize] in {
// Move low f32 and clear high bits.
def : Pat<(v8f32 (X86vzmovl (v8f32 VR256X:$src))),
(SUBREG_TO_REG (i32 0),
- (VMOVSSZrr (v4f32 (AVX512_128_SET0)),
- (EXTRACT_SUBREG (v8f32 VR256X:$src), sub_xmm)), sub_xmm)>;
+ (v4f32 (VMOVSSZrr (v4f32 (AVX512_128_SET0)),
+ (v4f32 (EXTRACT_SUBREG (v8f32 VR256X:$src), sub_xmm)))), sub_xmm)>;
def : Pat<(v8i32 (X86vzmovl (v8i32 VR256X:$src))),
(SUBREG_TO_REG (i32 0),
- (VMOVSSZrr (v4i32 (AVX512_128_SET0)),
- (EXTRACT_SUBREG (v8i32 VR256X:$src), sub_xmm)), sub_xmm)>;
+ (v4i32 (VMOVSSZrr (v4i32 (AVX512_128_SET0)),
+ (v4i32 (EXTRACT_SUBREG (v8i32 VR256X:$src), sub_xmm)))), sub_xmm)>;
def : Pat<(v4f64 (X86vzmovl (v4f64 VR256X:$src))),
(SUBREG_TO_REG (i32 0),
- (VMOVSDZrr (v2f64 (AVX512_128_SET0)),
- (EXTRACT_SUBREG (v4f64 VR256X:$src), sub_xmm)), sub_xmm)>;
+ (v2f64 (VMOVSDZrr (v2f64 (AVX512_128_SET0)),
+ (v2f64 (EXTRACT_SUBREG (v4f64 VR256X:$src), sub_xmm)))), sub_xmm)>;
def : Pat<(v4i64 (X86vzmovl (v4i64 VR256X:$src))),
- (SUBREG_TO_REG (i32 0), (VMOVSDZrr (v2i64 (AVX512_128_SET0)),
- (EXTRACT_SUBREG (v4i64 VR256X:$src), sub_xmm)), sub_xmm)>;
+ (SUBREG_TO_REG (i32 0),
+ (v2i64 (VMOVSDZrr (v2i64 (AVX512_128_SET0)),
+ (v2i64 (EXTRACT_SUBREG (v4i64 VR256X:$src), sub_xmm)))), sub_xmm)>;
def : Pat<(v16f32 (X86vzmovl (v16f32 VR512:$src))),
(SUBREG_TO_REG (i32 0),
- (VMOVSSZrr (v4f32 (AVX512_128_SET0)),
- (EXTRACT_SUBREG (v16f32 VR512:$src), sub_xmm)), sub_xmm)>;
+ (v4f32 (VMOVSSZrr (v4f32 (AVX512_128_SET0)),
+ (v4f32 (EXTRACT_SUBREG (v16f32 VR512:$src), sub_xmm)))), sub_xmm)>;
def : Pat<(v16i32 (X86vzmovl (v16i32 VR512:$src))),
(SUBREG_TO_REG (i32 0),
- (VMOVSSZrr (v4i32 (AVX512_128_SET0)),
- (EXTRACT_SUBREG (v16i32 VR512:$src), sub_xmm)), sub_xmm)>;
+ (v4i32 (VMOVSSZrr (v4i32 (AVX512_128_SET0)),
+ (v4i32 (EXTRACT_SUBREG (v16i32 VR512:$src), sub_xmm)))), sub_xmm)>;
def : Pat<(v8f64 (X86vzmovl (v8f64 VR512:$src))),
(SUBREG_TO_REG (i32 0),
- (VMOVSDZrr (v2f64 (AVX512_128_SET0)),
- (EXTRACT_SUBREG (v8f64 VR512:$src), sub_xmm)), sub_xmm)>;
+ (v2f64 (VMOVSDZrr (v2f64 (AVX512_128_SET0)),
+ (v2f64 (EXTRACT_SUBREG (v8f64 VR512:$src), sub_xmm)))), sub_xmm)>;
def : Pat<(v8i64 (X86vzmovl (v8i64 VR512:$src))),
- (SUBREG_TO_REG (i32 0), (VMOVSDZrr (v2i64 (AVX512_128_SET0)),
- (EXTRACT_SUBREG (v8i64 VR512:$src), sub_xmm)), sub_xmm)>;
+ (SUBREG_TO_REG (i32 0),
+ (v2i64 (VMOVSDZrr (v2i64 (AVX512_128_SET0)),
+ (v2i64 (EXTRACT_SUBREG (v8i64 VR512:$src), sub_xmm)))), sub_xmm)>;
}
@@ -4374,25 +4379,25 @@ let Predicates = [HasAVX512, OptForSize] in {
let Predicates = [HasAVX512, OptForSpeed] in {
def : Pat<(v16f32 (X86vzmovl (v16f32 VR512:$src))),
(SUBREG_TO_REG (i32 0),
- (VBLENDPSrri (v4f32 (V_SET0)),
- (EXTRACT_SUBREG (v16f32 VR512:$src), sub_xmm),
- (i8 1)), sub_xmm)>;
+ (v4f32 (VBLENDPSrri (v4f32 (V_SET0)),
+ (v4f32 (EXTRACT_SUBREG (v16f32 VR512:$src), sub_xmm)),
+ (i8 1))), sub_xmm)>;
def : Pat<(v16i32 (X86vzmovl (v16i32 VR512:$src))),
(SUBREG_TO_REG (i32 0),
- (VPBLENDWrri (v4i32 (V_SET0)),
- (EXTRACT_SUBREG (v16i32 VR512:$src), sub_xmm),
- (i8 3)), sub_xmm)>;
+ (v4i32 (VPBLENDWrri (v4i32 (V_SET0)),
+ (v4i32 (EXTRACT_SUBREG (v16i32 VR512:$src), sub_xmm)),
+ (i8 3))), sub_xmm)>;
def : Pat<(v8f64 (X86vzmovl (v8f64 VR512:$src))),
(SUBREG_TO_REG (i32 0),
- (VBLENDPDrri (v2f64 (V_SET0)),
- (EXTRACT_SUBREG (v8f64 VR512:$src), sub_xmm),
- (i8 1)), sub_xmm)>;
+ (v2f64 (VBLENDPDrri (v2f64 (V_SET0)),
+ (v2f64 (EXTRACT_SUBREG (v8f64 VR512:$src), sub_xmm)),
+ (i8 1))), sub_xmm)>;
def : Pat<(v8i64 (X86vzmovl (v8i64 VR512:$src))),
(SUBREG_TO_REG (i32 0),
- (VPBLENDWrri (v2i64 (V_SET0)),
- (EXTRACT_SUBREG (v8i64 VR512:$src), sub_xmm),
- (i8 0xf)), sub_xmm)>;
+ (v2i64 (VPBLENDWrri (v2i64 (V_SET0)),
+ (v2i64 (EXTRACT_SUBREG (v8i64 VR512:$src), sub_xmm)),
+ (i8 0xf))), sub_xmm)>;
}
let Predicates = [HasAVX512] in {
@@ -4421,7 +4426,7 @@ let Predicates = [HasAVX512] in {
// 256-bit types
def : Pat<(v8i32 (X86vzmovl (insert_subvector undef,
(v4i32 (scalar_to_vector (loadi32 addr:$src))), (iPTR 0)))),
- (SUBREG_TO_REG (i32 0), (VMOVDI2PDIZrm addr:$src), sub_xmm)>;
+ (SUBREG_TO_REG (i32 0), (v4i32 (VMOVDI2PDIZrm addr:$src)), sub_xmm)>;
def : Pat<(v8f32 (X86vzmovl (insert_subvector undef,
(v4f32 (scalar_to_vector (loadf32 addr:$src))), (iPTR 0)))),
(SUBREG_TO_REG (i32 0), (VMOVSSZrm addr:$src), sub_xmm)>;
@@ -4437,7 +4442,7 @@ let Predicates = [HasAVX512] in {
// 512-bit types
def : Pat<(v16i32 (X86vzmovl (insert_subvector undef,
(v4i32 (scalar_to_vector (loadi32 addr:$src))), (iPTR 0)))),
- (SUBREG_TO_REG (i32 0), (VMOVDI2PDIZrm addr:$src), sub_xmm)>;
+ (SUBREG_TO_REG (i32 0), (v4i32 (VMOVDI2PDIZrm addr:$src)), sub_xmm)>;
def : Pat<(v16f32 (X86vzmovl (insert_subvector undef,
(v4f32 (scalar_to_vector (loadf32 addr:$src))), (iPTR 0)))),
(SUBREG_TO_REG (i32 0), (VMOVSSZrm addr:$src), sub_xmm)>;
@@ -4451,7 +4456,7 @@ let Predicates = [HasAVX512] in {
def : Pat<(v4i64 (X86vzmovl (insert_subvector undef,
(v2i64 (scalar_to_vector (loadi64 addr:$src))), (iPTR 0)))),
- (SUBREG_TO_REG (i64 0), (VMOVQI2PQIZrm addr:$src), sub_xmm)>;
+ (SUBREG_TO_REG (i64 0), (v2i64 (VMOVQI2PQIZrm addr:$src)), sub_xmm)>;
// Extract and store.
def : Pat<(store (f32 (extractelt (v4f32 VR128X:$src), (iPTR 0))),
@@ -4487,11 +4492,11 @@ let Predicates = [HasAVX512] in {
def : Pat<(v4i64 (X86vzmovl (insert_subvector undef,
(v2i64 (scalar_to_vector GR64:$src)),(iPTR 0)))),
- (SUBREG_TO_REG (i64 0), (VMOV64toPQIZrr GR64:$src), sub_xmm)>;
+ (SUBREG_TO_REG (i64 0), (v2i64 (VMOV64toPQIZrr GR64:$src)), sub_xmm)>;
def : Pat<(v8i64 (X86vzmovl (insert_subvector undef,
(v2i64 (scalar_to_vector GR64:$src)),(iPTR 0)))),
- (SUBREG_TO_REG (i64 0), (VMOV64toPQIZrr GR64:$src), sub_xmm)>;
+ (SUBREG_TO_REG (i64 0), (v2i64 (VMOV64toPQIZrr GR64:$src)), sub_xmm)>;
// AVX 128-bit movd/movq instruction write zeros in the high 128-bit part.
def : Pat<(v2i64 (X86vzmovl (v2i64 (scalar_to_vector (zextloadi64i32 addr:$src))))),
@@ -4503,7 +4508,7 @@ let Predicates = [HasAVX512] in {
def : Pat<(v4i32 (X86vzload addr:$src)),
(VMOVDI2PDIZrm addr:$src)>;
def : Pat<(v8i32 (X86vzload addr:$src)),
- (SUBREG_TO_REG (i32 0), (VMOVDI2PDIZrm addr:$src), sub_xmm)>;
+ (SUBREG_TO_REG (i32 0), (v4i32 (VMOVDI2PDIZrm addr:$src)), sub_xmm)>;
def : Pat<(v2i64 (X86vzmovl (loadv2i64 addr:$src))),
(VMOVQI2PQIZrm addr:$src)>;
def : Pat<(v2f64 (X86vzmovl (v2f64 VR128X:$src))),
@@ -4511,21 +4516,21 @@ let Predicates = [HasAVX512] in {
def : Pat<(v2i64 (X86vzload addr:$src)),
(VMOVQI2PQIZrm addr:$src)>;
def : Pat<(v4i64 (X86vzload addr:$src)),
- (SUBREG_TO_REG (i64 0), (VMOVQI2PQIZrm addr:$src), sub_xmm)>;
+ (SUBREG_TO_REG (i64 0), (v2i64 (VMOVQI2PQIZrm addr:$src)), sub_xmm)>;
// Use regular 128-bit instructions to match 256-bit scalar_to_vec+zext.
def : Pat<(v8i32 (X86vzmovl (insert_subvector undef,
(v4i32 (scalar_to_vector GR32:$src)),(iPTR 0)))),
- (SUBREG_TO_REG (i32 0), (VMOVDI2PDIZrr GR32:$src), sub_xmm)>;
+ (SUBREG_TO_REG (i32 0), (v4i32 (VMOVDI2PDIZrr GR32:$src)), sub_xmm)>;
def : Pat<(v16i32 (X86vzmovl (insert_subvector undef,
(v4i32 (scalar_to_vector GR32:$src)),(iPTR 0)))),
- (SUBREG_TO_REG (i32 0), (VMOVDI2PDIZrr GR32:$src), sub_xmm)>;
+ (SUBREG_TO_REG (i32 0), (v4i32 (VMOVDI2PDIZrr GR32:$src)), sub_xmm)>;
// Use regular 128-bit instructions to match 512-bit scalar_to_vec+zext.
def : Pat<(v16i32 (X86vzload addr:$src)),
- (SUBREG_TO_REG (i32 0), (VMOVDI2PDIZrm addr:$src), sub_xmm)>;
+ (SUBREG_TO_REG (i32 0), (v4i32 (VMOVDI2PDIZrm addr:$src)), sub_xmm)>;
def : Pat<(v8i64 (X86vzload addr:$src)),
- (SUBREG_TO_REG (i64 0), (VMOVQI2PQIZrm addr:$src), sub_xmm)>;
+ (SUBREG_TO_REG (i64 0), (v2i64 (VMOVQI2PQIZrm addr:$src)), sub_xmm)>;
}
//===----------------------------------------------------------------------===//
@@ -5506,38 +5511,46 @@ defm : avx512_fp_logical_lowering_sizes<"VPANDN", X86andnp>;
let Predicates = [HasVLX,HasDQI] in {
// Use packed logical operations for scalar ops.
def : Pat<(f64 (X86fand FR64X:$src1, FR64X:$src2)),
- (COPY_TO_REGCLASS (VANDPDZ128rr
- (COPY_TO_REGCLASS FR64X:$src1, VR128X),
- (COPY_TO_REGCLASS FR64X:$src2, VR128X)), FR64X)>;
+ (COPY_TO_REGCLASS
+ (v2f64 (VANDPDZ128rr (v2f64 (COPY_TO_REGCLASS FR64X:$src1, VR128X)),
+ (v2f64 (COPY_TO_REGCLASS FR64X:$src2, VR128X)))),
+ FR64X)>;
def : Pat<(f64 (X86for FR64X:$src1, FR64X:$src2)),
- (COPY_TO_REGCLASS (VORPDZ128rr
- (COPY_TO_REGCLASS FR64X:$src1, VR128X),
- (COPY_TO_REGCLASS FR64X:$src2, VR128X)), FR64X)>;
+ (COPY_TO_REGCLASS
+ (v2f64 (VORPDZ128rr (v2f64 (COPY_TO_REGCLASS FR64X:$src1, VR128X)),
+ (v2f64 (COPY_TO_REGCLASS FR64X:$src2, VR128X)))),
+ FR64X)>;
def : Pat<(f64 (X86fxor FR64X:$src1, FR64X:$src2)),
- (COPY_TO_REGCLASS (VXORPDZ128rr
- (COPY_TO_REGCLASS FR64X:$src1, VR128X),
- (COPY_TO_REGCLASS FR64X:$src2, VR128X)), FR64X)>;
+ (COPY_TO_REGCLASS
+ (v2f64 (VXORPDZ128rr (v2f64 (COPY_TO_REGCLASS FR64X:$src1, VR128X)),
+ (v2f64 (COPY_TO_REGCLASS FR64X:$src2, VR128X)))),
+ FR64X)>;
def : Pat<(f64 (X86fandn FR64X:$src1, FR64X:$src2)),
- (COPY_TO_REGCLASS (VANDNPDZ128rr
- (COPY_TO_REGCLASS FR64X:$src1, VR128X),
- (COPY_TO_REGCLASS FR64X:$src2, VR128X)), FR64X)>;
+ (COPY_TO_REGCLASS
+ (v2f64 (VANDNPDZ128rr (v2f64 (COPY_TO_REGCLASS FR64X:$src1, VR128X)),
+ (v2f64 (COPY_TO_REGCLASS FR64X:$src2, VR128X)))),
+ FR64X)>;
def : Pat<(f32 (X86fand FR32X:$src1, FR32X:$src2)),
- (COPY_TO_REGCLASS (VANDPSZ128rr
- (COPY_TO_REGCLASS FR32X:$src1, VR128X),
- (COPY_TO_REGCLASS FR32X:$src2, VR128X)), FR32X)>;
+ (COPY_TO_REGCLASS
+ (v4f32 (VANDPSZ128rr (v4f32 (COPY_TO_REGCLASS FR32X:$src1, VR128X)),
+ (v4f32 (COPY_TO_REGCLASS FR32X:$src2, VR128X)))),
+ FR32X)>;
def : Pat<(f32 (X86for FR32X:$src1, FR32X:$src2)),
- (COPY_TO_REGCLASS (VORPSZ128rr
- (COPY_TO_REGCLASS FR32X:$src1, VR128X),
- (COPY_TO_REGCLASS FR32X:$src2, VR128X)), FR32X)>;
+ (COPY_TO_REGCLASS
+ (v4f32 (VORPSZ128rr (v4f32 (COPY_TO_REGCLASS FR32X:$src1, VR128X)),
+ (v4f32 (COPY_TO_REGCLASS FR32X:$src2, VR128X)))),
+ FR32X)>;
def : Pat<(f32 (X86fxor FR32X:$src1, FR32X:$src2)),
- (COPY_TO_REGCLASS (VXORPSZ128rr
- (COPY_TO_REGCLASS FR32X:$src1, VR128X),
- (COPY_TO_REGCLASS FR32X:$src2, VR128X)), FR32X)>;
+ (COPY_TO_REGCLASS
+ (v4f32 (VXORPSZ128rr (v4f32 (COPY_TO_REGCLASS FR32X:$src1, VR128X)),
+ (v4f32 (COPY_TO_REGCLASS FR32X:$src2, VR128X)))),
+ FR32X)>;
def : Pat<(f32 (X86fandn FR32X:$src1, FR32X:$src2)),
- (COPY_TO_REGCLASS (VANDNPSZ128rr
- (COPY_TO_REGCLASS FR32X:$src1, VR128X),
- (COPY_TO_REGCLASS FR32X:$src2, VR128X)), FR32X)>;
+ (COPY_TO_REGCLASS
+ (v4f32 (VANDNPSZ128rr (v4f32 (COPY_TO_REGCLASS FR32X:$src1, VR128X)),
+ (v4f32 (COPY_TO_REGCLASS FR32X:$src2, VR128X)))),
+ FR32X)>;
}
multiclass avx512_fp_scalef_p<bits<8> opc, string OpcodeStr, SDNode OpNode,
@@ -6836,36 +6849,36 @@ multiclass avx512_scalar_fma_patterns<SDNode Op, SDNode RndOp, string Prefix,
(_.EltVT (extractelt (_.VT VR128X:$src1), (iPTR 0))),
_.FRC:$src3))))),
(!cast<I>(Prefix#"213"#Suffix#"Zr_Int")
- VR128X:$src1, (COPY_TO_REGCLASS _.FRC:$src2, VR128X),
- (COPY_TO_REGCLASS _.FRC:$src3, VR128X))>;
+ VR128X:$src1, (_.VT (COPY_TO_REGCLASS _.FRC:$src2, VR128X)),
+ (_.VT (COPY_TO_REGCLASS _.FRC:$src3, VR128X)))>;
def : Pat<(_.VT (Move (_.VT VR128X:$src1), (_.VT (scalar_to_vector
(Op _.FRC:$src2, _.FRC:$src3,
(_.EltVT (extractelt (_.VT VR128X:$src1), (iPTR 0)))))))),
(!cast<I>(Prefix#"231"#Suffix#"Zr_Int")
- VR128X:$src1, (COPY_TO_REGCLASS _.FRC:$src2, VR128X),
- (COPY_TO_REGCLASS _.FRC:$src3, VR128X))>;
+ VR128X:$src1, (_.VT (COPY_TO_REGCLASS _.FRC:$src2, VR128X)),
+ (_.VT (COPY_TO_REGCLASS _.FRC:$src3, VR128X)))>;
def : Pat<(_.VT (Move (_.VT VR128X:$src1), (_.VT (scalar_to_vector
(Op _.FRC:$src2,
(_.EltVT (extractelt (_.VT VR128X:$src1), (iPTR 0))),
(_.ScalarLdFrag addr:$src3)))))),
(!cast<I>(Prefix#"213"#Suffix#"Zm_Int")
- VR128X:$src1, (COPY_TO_REGCLASS _.FRC:$src2, VR128X),
+ VR128X:$src1, (_.VT (COPY_TO_REGCLASS _.FRC:$src2, VR128X)),
addr:$src3)>;
def : Pat<(_.VT (Move (_.VT VR128X:$src1), (_.VT (scalar_to_vector
(Op (_.EltVT (extractelt (_.VT VR128X:$src1), (iPTR 0))),
(_.ScalarLdFrag addr:$src3), _.FRC:$src2))))),
(!cast<I>(Prefix#"132"#Suffix#"Zm_Int")
- VR128X:$src1, (COPY_TO_REGCLASS _.FRC:$src2, VR128X),
+ VR128X:$src1, (_.VT (COPY_TO_REGCLASS _.FRC:$src2, VR128X)),
addr:$src3)>;
def : Pat<(_.VT (Move (_.VT VR128X:$src1), (_.VT (scalar_to_vector
(Op _.FRC:$src2, (_.ScalarLdFrag addr:$src3),
(_.EltVT (extractelt (_.VT VR128X:$src1), (iPTR 0)))))))),
(!cast<I>(Prefix#"231"#Suffix#"Zm_Int")
- VR128X:$src1, (COPY_TO_REGCLASS _.FRC:$src2, VR128X),
+ VR128X:$src1, (_.VT (COPY_TO_REGCLASS _.FRC:$src2, VR128X)),
addr:$src3)>;
def : Pat<(_.VT (Move (_.VT VR128X:$src1), (_.VT (scalar_to_vector
@@ -6876,8 +6889,8 @@ multiclass avx512_scalar_fma_patterns<SDNode Op, SDNode RndOp, string Prefix,
(_.EltVT (extractelt (_.VT VR128X:$src1), (iPTR 0)))))))),
(!cast<I>(Prefix#"213"#Suffix#"Zr_Intk")
VR128X:$src1, VK1WM:$mask,
- (COPY_TO_REGCLASS _.FRC:$src2, VR128X),
- (COPY_TO_REGCLASS _.FRC:$src3, VR128X))>;
+ (_.VT (COPY_TO_REGCLASS _.FRC:$src2, VR128X)),
+ (_.VT (COPY_TO_REGCLASS _.FRC:$src3, VR128X)))>;
def : Pat<(_.VT (Move (_.VT VR128X:$src1), (_.VT (scalar_to_vector
(X86selects VK1WM:$mask,
@@ -6887,7 +6900,7 @@ multiclass avx512_scalar_fma_patterns<SDNode Op, SDNode RndOp, string Prefix,
(_.EltVT (extractelt (_.VT VR128X:$src1), (iPTR 0)))))))),
(!cast<I>(Prefix#"213"#Suffix#"Zm_Intk")
VR128X:$src1, VK1WM:$mask,
- (COPY_TO_REGCLASS _.FRC:$src2, VR128X), addr:$src3)>;
+ (_.VT (COPY_TO_REGCLASS _.FRC:$src2, VR128X)), addr:$src3)>;
def : Pat<(_.VT (Move (_.VT VR128X:$src1), (_.VT (scalar_to_vector
(X86selects VK1WM:$mask,
@@ -6896,7 +6909,7 @@ multiclass avx512_scalar_fma_patterns<SDNode Op, SDNode RndOp, string Prefix,
(_.EltVT (extractelt (_.VT VR128X:$src1), (iPTR 0)))))))),
(!cast<I>(Prefix#"132"#Suffix#"Zm_Intk")
VR128X:$src1, VK1WM:$mask,
- (COPY_TO_REGCLASS _.FRC:$src2, VR128X), addr:$src3)>;
+ (_.VT (COPY_TO_REGCLASS _.FRC:$src2, VR128X)), addr:$src3)>;
def : Pat<(_.VT (Move (_.VT VR128X:$src1), (_.VT (scalar_to_vector
(X86selects VK1WM:$mask,
@@ -6905,8 +6918,8 @@ multiclass avx512_scalar_fma_patterns<SDNode Op, SDNode RndOp, string Prefix,
(_.EltVT (extractelt (_.VT VR128X:$src1), (iPTR 0)))))))),
(!cast<I>(Prefix#"231"#Suffix#"Zr_Intk")
VR128X:$src1, VK1WM:$mask,
- (COPY_TO_REGCLASS _.FRC:$src2, VR128X),
- (COPY_TO_REGCLASS _.FRC:$src3, VR128X))>;
+ (_.VT (COPY_TO_REGCLASS _.FRC:$src2, VR128X)),
+ (_.VT (COPY_TO_REGCLASS _.FRC:$src3, VR128X)))>;
def : Pat<(_.VT (Move (_.VT VR128X:$src1), (_.VT (scalar_to_vector
(X86selects VK1WM:$mask,
@@ -6915,7 +6928,7 @@ multiclass avx512_scalar_fma_patterns<SDNode Op, SDNode RndOp, string Prefix,
(_.EltVT (extractelt (_.VT VR128X:$src1), (iPTR 0)))))))),
(!cast<I>(Prefix#"231"#Suffix#"Zm_Intk")
VR128X:$src1, VK1WM:$mask,
- (COPY_TO_REGCLASS _.FRC:$src2, VR128X), addr:$src3)>;
+ (_.VT (COPY_TO_REGCLASS _.FRC:$src2, VR128X)), addr:$src3)>;
def : Pat<(_.VT (Move (_.VT VR128X:$src1), (_.VT (scalar_to_vector
(X86selects VK1WM:$mask,
@@ -6925,8 +6938,8 @@ multiclass avx512_scalar_fma_patterns<SDNode Op, SDNode RndOp, string Prefix,
(_.EltVT ZeroFP)))))),
(!cast<I>(Prefix#"213"#Suffix#"Zr_Intkz")
VR128X:$src1, VK1WM:$mask,
- (COPY_TO_REGCLASS _.FRC:$src2, VR128X),
- (COPY_TO_REGCLASS _.FRC:$src3, VR128X))>;
+ (_.VT (COPY_TO_REGCLASS _.FRC:$src2, VR128X)),
+ (_.VT (COPY_TO_REGCLASS _.FRC:$src3, VR128X)))>;
def : Pat<(_.VT (Move (_.VT VR128X:$src1), (_.VT (scalar_to_vector
(X86selects VK1WM:$mask,
@@ -6935,8 +6948,8 @@ multiclass avx512_scalar_fma_patterns<SDNode Op, SDNode RndOp, string Prefix,
(_.EltVT ZeroFP)))))),
(!cast<I>(Prefix#"231"#Suffix#"Zr_Intkz")
VR128X:$src1, VK1WM:$mask,
- (COPY_TO_REGCLASS _.FRC:$src2, VR128X),
- (COPY_TO_REGCLASS _.FRC:$src3, VR128X))>;
+ (_.VT (COPY_TO_REGCLASS _.FRC:$src2, VR128X)),
+ (_.VT (COPY_TO_REGCLASS _.FRC:$src3, VR128X)))>;
def : Pat<(_.VT (Move (_.VT VR128X:$src1), (_.VT (scalar_to_vector
(X86selects VK1WM:$mask,
@@ -6946,7 +6959,7 @@ multiclass avx512_scalar_fma_patterns<SDNode Op, SDNode RndOp, string Prefix,
(_.EltVT ZeroFP)))))),
(!cast<I>(Prefix#"213"#Suffix#"Zm_Intkz")
VR128X:$src1, VK1WM:$mask,
- (COPY_TO_REGCLASS _.FRC:$src2, VR128X), addr:$src3)>;
+ (_.VT (COPY_TO_REGCLASS _.FRC:$src2, VR128X)), addr:$src3)>;
def : Pat<(_.VT (Move (_.VT VR128X:$src1), (_.VT (scalar_to_vector
(X86selects VK1WM:$mask,
@@ -6955,7 +6968,7 @@ multiclass avx512_scalar_fma_patterns<SDNode Op, SDNode RndOp, string Prefix,
(_.EltVT ZeroFP)))))),
(!cast<I>(Prefix#"132"#Suffix#"Zm_Intkz")
VR128X:$src1, VK1WM:$mask,
- (COPY_TO_REGCLASS _.FRC:$src2, VR128X), addr:$src3)>;
+ (_.VT (COPY_TO_REGCLASS _.FRC:$src2, VR128X)), addr:$src3)>;
def : Pat<(_.VT (Move (_.VT VR128X:$src1), (_.VT (scalar_to_vector
(X86selects VK1WM:$mask,
@@ -6964,7 +6977,7 @@ multiclass avx512_scalar_fma_patterns<SDNode Op, SDNode RndOp, string Prefix,
(_.EltVT ZeroFP)))))),
(!cast<I>(Prefix#"231"#Suffix#"Zm_Intkz")
VR128X:$src1, VK1WM:$mask,
- (COPY_TO_REGCLASS _.FRC:$src2, VR128X), addr:$src3)>;
+ (_.VT (COPY_TO_REGCLASS _.FRC:$src2, VR128X)), addr:$src3)>;
// Patterns with rounding mode.
def : Pat<(_.VT (Move (_.VT VR128X:$src1), (_.VT (scalar_to_vector
@@ -6972,16 +6985,16 @@ multiclass avx512_scalar_fma_patterns<SDNode Op, SDNode RndOp, string Prefix,
(_.EltVT (extractelt (_.VT VR128X:$src1), (iPTR 0))),
_.FRC:$src3, (i32 imm:$rc)))))),
(!cast<I>(Prefix#"213"#Suffix#"Zrb_Int")
- VR128X:$src1, (COPY_TO_REGCLASS _.FRC:$src2, VR128X),
- (COPY_TO_REGCLASS _.FRC:$src3, VR128X), imm:$rc)>;
+ VR128X:$src1, (_.VT (COPY_TO_REGCLASS _.FRC:$src2, VR128X)),
+ (_.VT (COPY_TO_REGCLASS _.FRC:$src3, VR128X)), imm:$rc)>;
def : Pat<(_.VT (Move (_.VT VR128X:$src1), (_.VT (scalar_to_vector
(RndOp _.FRC:$src2, _.FRC:$src3,
(_.EltVT (extractelt (_.VT VR128X:$src1), (iPTR 0))),
(i32 imm:$rc)))))),
(!cast<I>(Prefix#"231"#Suffix#"Zrb_Int")
- VR128X:$src1, (COPY_TO_REGCLASS _.FRC:$src2, VR128X),
- (COPY_TO_REGCLASS _.FRC:$src3, VR128X), imm:$rc)>;
+ VR128X:$src1, (_.VT (COPY_TO_REGCLASS _.FRC:$src2, VR128X)),
+ (_.VT (COPY_TO_REGCLASS _.FRC:$src3, VR128X)), imm:$rc)>;
def : Pat<(_.VT (Move (_.VT VR128X:$src1), (_.VT (scalar_to_vector
(X86selects VK1WM:$mask,
@@ -6991,8 +7004,8 @@ multiclass avx512_scalar_fma_patterns<SDNode Op, SDNode RndOp, string Prefix,
(_.EltVT (extractelt (_.VT VR128X:$src1), (iPTR 0)))))))),
(!cast<I>(Prefix#"213"#Suffix#"Zrb_Intk")
VR128X:$src1, VK1WM:$mask,
- (COPY_TO_REGCLASS _.FRC:$src2, VR128X),
- (COPY_TO_REGCLASS _.FRC:$src3, VR128X), imm:$rc)>;
+ (_.VT (COPY_TO_REGCLASS _.FRC:$src2, VR128X)),
+ (_.VT (COPY_TO_REGCLASS _.FRC:$src3, VR128X)), imm:$rc)>;
def : Pat<(_.VT (Move (_.VT VR128X:$src1), (_.VT (scalar_to_vector
(X86selects VK1WM:$mask,
@@ -7002,8 +7015,8 @@ multiclass avx512_scalar_fma_patterns<SDNode Op, SDNode RndOp, string Prefix,
(_.EltVT (extractelt (_.VT VR128X:$src1), (iPTR 0)))))))),
(!cast<I>(Prefix#"231"#Suffix#"Zrb_Intk")
VR128X:$src1, VK1WM:$mask,
- (COPY_TO_REGCLASS _.FRC:$src2, VR128X),
- (COPY_TO_REGCLASS _.FRC:$src3, VR128X), imm:$rc)>;
+ (_.VT (COPY_TO_REGCLASS _.FRC:$src2, VR128X)),
+ (_.VT (COPY_TO_REGCLASS _.FRC:$src3, VR128X)), imm:$rc)>;
def : Pat<(_.VT (Move (_.VT VR128X:$src1), (_.VT (scalar_to_vector
(X86selects VK1WM:$mask,
@@ -7013,8 +7026,8 @@ multiclass avx512_scalar_fma_patterns<SDNode Op, SDNode RndOp, string Prefix,
(_.EltVT ZeroFP)))))),
(!cast<I>(Prefix#"213"#Suffix#"Zrb_Intkz")
VR128X:$src1, VK1WM:$mask,
- (COPY_TO_REGCLASS _.FRC:$src2, VR128X),
- (COPY_TO_REGCLASS _.FRC:$src3, VR128X), imm:$rc)>;
+ (_.VT (COPY_TO_REGCLASS _.FRC:$src2, VR128X)),
+ (_.VT (COPY_TO_REGCLASS _.FRC:$src3, VR128X)), imm:$rc)>;
def : Pat<(_.VT (Move (_.VT VR128X:$src1), (_.VT (scalar_to_vector
(X86selects VK1WM:$mask,
@@ -7024,8 +7037,8 @@ multiclass avx512_scalar_fma_patterns<SDNode Op, SDNode RndOp, string Prefix,
(_.EltVT ZeroFP)))))),
(!cast<I>(Prefix#"231"#Suffix#"Zrb_Intkz")
VR128X:$src1, VK1WM:$mask,
- (COPY_TO_REGCLASS _.FRC:$src2, VR128X),
- (COPY_TO_REGCLASS _.FRC:$src3, VR128X), imm:$rc)>;
+ (_.VT (COPY_TO_REGCLASS _.FRC:$src2, VR128X)),
+ (_.VT (COPY_TO_REGCLASS _.FRC:$src3, VR128X)), imm:$rc)>;
}
}
@@ -8467,16 +8480,17 @@ let Predicates = [HasVLX] in {
// more consistent with other instructions, which are always controlled by it.
// It's encoded as 0b100.
def : Pat<(fp_to_f16 FR32X:$src),
- (i16 (EXTRACT_SUBREG (VMOVPDI2DIZrr (VCVTPS2PHZ128rr
- (COPY_TO_REGCLASS FR32X:$src, VR128X), 4)), sub_16bit))>;
+ (i16 (EXTRACT_SUBREG (VMOVPDI2DIZrr (v8i16 (VCVTPS2PHZ128rr
+ (v4f32 (COPY_TO_REGCLASS FR32X:$src, VR128X)), 4))), sub_16bit))>;
def : Pat<(f16_to_fp GR16:$src),
- (f32 (COPY_TO_REGCLASS (VCVTPH2PSZ128rr
- (COPY_TO_REGCLASS (MOVSX32rr16 GR16:$src), VR128X)), FR32X)) >;
+ (f32 (COPY_TO_REGCLASS (v4f32 (VCVTPH2PSZ128rr
+ (v8i16 (COPY_TO_REGCLASS (MOVSX32rr16 GR16:$src), VR128X)))), FR32X)) >;
def : Pat<(f16_to_fp (i16 (fp_to_f16 FR32X:$src))),
- (f32 (COPY_TO_REGCLASS (VCVTPH2PSZ128rr
- (VCVTPS2PHZ128rr (COPY_TO_REGCLASS FR32X:$src, VR128X), 4)), FR32X)) >;
+ (f32 (COPY_TO_REGCLASS (v4f32 (VCVTPH2PSZ128rr
+ (v8i16 (VCVTPS2PHZ128rr
+ (v4f32 (COPY_TO_REGCLASS FR32X:$src, VR128X)), 4)))), FR32X)) >;
}
// Unordered/Ordered scalar fp compare with Sea and set EFLAGS
@@ -10798,17 +10812,17 @@ let Predicates = [HasVLX] in {
def : Pat<(v2f64 (X86VBroadcast (loadf64 addr:$src))),
(VMOVDDUPZ128rm addr:$src)>;
def : Pat<(v2f64 (X86VBroadcast f64:$src)),
- (VMOVDDUPZ128rr (COPY_TO_REGCLASS FR64X:$src, VR128X))>;
+ (VMOVDDUPZ128rr (v2f64 (COPY_TO_REGCLASS FR64X:$src, VR128X)))>;
def : Pat<(v2f64 (X86VBroadcast (loadv2f64 addr:$src))),
(VMOVDDUPZ128rm addr:$src)>;
def : Pat<(vselect (v2i1 VK2WM:$mask), (v2f64 (X86VBroadcast f64:$src)),
(v2f64 VR128X:$src0)),
(VMOVDDUPZ128rrk VR128X:$src0, VK2WM:$mask,
- (COPY_TO_REGCLASS FR64X:$src, VR128X))>;
+ (v2f64 (COPY_TO_REGCLASS FR64X:$src, VR128X)))>;
def : Pat<(vselect (v2i1 VK2WM:$mask), (v2f64 (X86VBroadcast f64:$src)),
(bitconvert (v4i32 immAllZerosV))),
- (VMOVDDUPZ128rrkz VK2WM:$mask, (COPY_TO_REGCLASS FR64X:$src, VR128X))>;
+ (VMOVDDUPZ128rrkz VK2WM:$mask, (v2f64 (COPY_TO_REGCLASS FR64X:$src, VR128X)))>;
def : Pat<(vselect (v2i1 VK2WM:$mask), (v2f64 (X86VBroadcast (loadf64 addr:$src))),
(v2f64 VR128X:$src0)),
@@ -11521,7 +11535,7 @@ multiclass AVX512_scalar_math_fp_patterns<SDNode Op, string OpcPrefix, SDNode Mo
(Op (_.EltVT (extractelt (_.VT VR128X:$dst), (iPTR 0))),
_.FRC:$src)))),
(!cast<Instruction>("V"#OpcPrefix#Zrr_Int) _.VT:$dst,
- (COPY_TO_REGCLASS _.FRC:$src, VR128X))>;
+ (_.VT (COPY_TO_REGCLASS _.FRC:$src, VR128X)))>;
// extracted masked scalar math op with insert via movss
def : Pat<(MoveNode (_.VT VR128X:$src1),
@@ -11532,9 +11546,9 @@ multiclass AVX512_scalar_math_fp_patterns<SDNode Op, string OpcPrefix, SDNode Mo
_.FRC:$src2),
_.FRC:$src0))),
(!cast<Instruction>("V"#OpcPrefix#Zrr_Intk)
- (COPY_TO_REGCLASS _.FRC:$src0, VR128X),
+ (_.VT (COPY_TO_REGCLASS _.FRC:$src0, VR128X)),
VK1WM:$mask, _.VT:$src1,
- (COPY_TO_REGCLASS _.FRC:$src2, VR128X))>;
+ (_.VT (COPY_TO_REGCLASS _.FRC:$src2, VR128X)))>;
// extracted masked scalar math op with insert via movss
def : Pat<(MoveNode (_.VT VR128X:$src1),
@@ -11543,9 +11557,9 @@ multiclass AVX512_scalar_math_fp_patterns<SDNode Op, string OpcPrefix, SDNode Mo
(Op (_.EltVT
(extractelt (_.VT VR128X:$src1), (iPTR 0))),
_.FRC:$src2), (_.EltVT ZeroFP)))),
- (!cast<Instruction>("V"#OpcPrefix#Zrr_Intkz)
- VK1WM:$mask, _.VT:$src1,
- (COPY_TO_REGCLASS _.FRC:$src2, VR128X))>;
+ (!cast<I>("V"#OpcPrefix#Zrr_Intkz)
+ VK1WM:$mask, _.VT:$src1,
+ (_.VT (COPY_TO_REGCLASS _.FRC:$src2, VR128X)))>;
}
}
diff --git a/llvm/lib/Target/X86/X86InstrCompiler.td b/llvm/lib/Target/X86/X86InstrCompiler.td
index f1aa4faeeb8..f360c0a6a63 100644
--- a/llvm/lib/Target/X86/X86InstrCompiler.td
+++ b/llvm/lib/Target/X86/X86InstrCompiler.td
@@ -563,7 +563,7 @@ let usesCustomInserter = 1, hasNoSchedulingInfo = 1, Uses = [EFLAGS] in {
defm _FR32 : CMOVrr_PSEUDO<FR32, f32>;
defm _FR64 : CMOVrr_PSEUDO<FR64, f64>;
- defm _FR128 : CMOVrr_PSEUDO<FR128, f128>;
+ defm _F128 : CMOVrr_PSEUDO<VR128, f128>;
defm _V4F32 : CMOVrr_PSEUDO<VR128, v4f32>;
defm _V2F64 : CMOVrr_PSEUDO<VR128, v2f64>;
defm _V2I64 : CMOVrr_PSEUDO<VR128, v2i64>;
diff --git a/llvm/lib/Target/X86/X86InstrFMA.td b/llvm/lib/Target/X86/X86InstrFMA.td
index f418bfae35a..7a35d07495c 100644
--- a/llvm/lib/Target/X86/X86InstrFMA.td
+++ b/llvm/lib/Target/X86/X86InstrFMA.td
@@ -337,29 +337,29 @@ multiclass scalar_fma_patterns<SDNode Op, string Prefix, string Suffix,
(EltVT (extractelt (VT VR128:$src1), (iPTR 0))),
RC:$src3))))),
(!cast<Instruction>(Prefix#"213"#Suffix#"r_Int")
- VR128:$src1, (COPY_TO_REGCLASS RC:$src2, VR128),
- (COPY_TO_REGCLASS RC:$src3, VR128))>;
+ VR128:$src1, (VT (COPY_TO_REGCLASS RC:$src2, VR128)),
+ (VT (COPY_TO_REGCLASS RC:$src3, VR128)))>;
def : Pat<(VT (Move (VT VR128:$src1), (VT (scalar_to_vector
(Op RC:$src2,
(EltVT (extractelt (VT VR128:$src1), (iPTR 0))),
(mem_frag addr:$src3)))))),
(!cast<Instruction>(Prefix#"213"#Suffix#"m_Int")
- VR128:$src1, (COPY_TO_REGCLASS RC:$src2, VR128),
+ VR128:$src1, (VT (COPY_TO_REGCLASS RC:$src2, VR128)),
addr:$src3)>;
def : Pat<(VT (Move (VT VR128:$src1), (VT (scalar_to_vector
(Op (EltVT (extractelt (VT VR128:$src1), (iPTR 0))),
(mem_frag addr:$src3), RC:$src2))))),
(!cast<Instruction>(Prefix#"132"#Suffix#"m_Int")
- VR128:$src1, (COPY_TO_REGCLASS RC:$src2, VR128),
+ VR128:$src1, (VT (COPY_TO_REGCLASS RC:$src2, VR128)),
addr:$src3)>;
def : Pat<(VT (Move (VT VR128:$src1), (VT (scalar_to_vector
(Op RC:$src2, (mem_frag addr:$src3),
(EltVT (extractelt (VT VR128:$src1), (iPTR 0)))))))),
(!cast<Instruction>(Prefix#"231"#Suffix#"m_Int")
- VR128:$src1, (COPY_TO_REGCLASS RC:$src2, VR128),
+ VR128:$src1, (VT (COPY_TO_REGCLASS RC:$src2, VR128)),
addr:$src3)>;
}
}
@@ -598,23 +598,23 @@ multiclass scalar_fma4_patterns<SDNode Op, string Name,
def : Pat<(VT (X86vzmovl (VT (scalar_to_vector
(Op RC:$src1, RC:$src2, RC:$src3))))),
(!cast<Instruction>(Name#"rr_Int")
- (COPY_TO_REGCLASS RC:$src1, VR128),
- (COPY_TO_REGCLASS RC:$src2, VR128),
- (COPY_TO_REGCLASS RC:$src3, VR128))>;
+ (VT (COPY_TO_REGCLASS RC:$src1, VR128)),
+ (VT (COPY_TO_REGCLASS RC:$src2, VR128)),
+ (VT (COPY_TO_REGCLASS RC:$src3, VR128)))>;
def : Pat<(VT (X86vzmovl (VT (scalar_to_vector
(Op RC:$src1, RC:$src2,
(mem_frag addr:$src3)))))),
(!cast<Instruction>(Name#"rm_Int")
- (COPY_TO_REGCLASS RC:$src1, VR128),
- (COPY_TO_REGCLASS RC:$src2, VR128), addr:$src3)>;
+ (VT (COPY_TO_REGCLASS RC:$src1, VR128)),
+ (VT (COPY_TO_REGCLASS RC:$src2, VR128)), addr:$src3)>;
def : Pat<(VT (X86vzmovl (VT (scalar_to_vector
(Op RC:$src1, (mem_frag addr:$src2),
RC:$src3))))),
(!cast<Instruction>(Name#"mr_Int")
- (COPY_TO_REGCLASS RC:$src1, VR128), addr:$src2,
- (COPY_TO_REGCLASS RC:$src3, VR128))>;
+ (VT (COPY_TO_REGCLASS RC:$src1, VR128)), addr:$src2,
+ (VT (COPY_TO_REGCLASS RC:$src3, VR128)))>;
}
}
diff --git a/llvm/lib/Target/X86/X86InstrSSE.td b/llvm/lib/Target/X86/X86InstrSSE.td
index ae4b3ce83a3..2d03a8d4854 100644
--- a/llvm/lib/Target/X86/X86InstrSSE.td
+++ b/llvm/lib/Target/X86/X86InstrSSE.td
@@ -308,21 +308,23 @@ let Predicates = [UseAVX, OptForSize] in {
// Move low f32 and clear high bits.
def : Pat<(v8f32 (X86vzmovl (v8f32 VR256:$src))),
(SUBREG_TO_REG (i32 0),
- (VMOVSSrr (v4f32 (V_SET0)),
- (EXTRACT_SUBREG (v8f32 VR256:$src), sub_xmm)), sub_xmm)>;
+ (v4f32 (VMOVSSrr (v4f32 (V_SET0)),
+ (v4f32 (EXTRACT_SUBREG (v8f32 VR256:$src), sub_xmm)))), sub_xmm)>;
def : Pat<(v8i32 (X86vzmovl (v8i32 VR256:$src))),
(SUBREG_TO_REG (i32 0),
- (VMOVSSrr (v4i32 (V_SET0)),
- (EXTRACT_SUBREG (v8i32 VR256:$src), sub_xmm)), sub_xmm)>;
+ (v4i32 (VMOVSSrr (v4i32 (V_SET0)),
+ (v4i32 (EXTRACT_SUBREG (v8i32 VR256:$src), sub_xmm)))), sub_xmm)>;
def : Pat<(v4f64 (X86vzmovl (v4f64 VR256:$src))),
(SUBREG_TO_REG (i32 0),
- (VMOVSDrr (v2f64 (V_SET0)),
- (EXTRACT_SUBREG (v4f64 VR256:$src), sub_xmm)), sub_xmm)>;
+ (v2f64 (VMOVSDrr (v2f64 (V_SET0)),
+ (v2f64 (EXTRACT_SUBREG (v4f64 VR256:$src), sub_xmm)))),
+ sub_xmm)>;
def : Pat<(v4i64 (X86vzmovl (v4i64 VR256:$src))),
(SUBREG_TO_REG (i32 0),
- (VMOVSDrr (v2i64 (V_SET0)),
- (EXTRACT_SUBREG (v4i64 VR256:$src), sub_xmm)), sub_xmm)>;
+ (v2i64 (VMOVSDrr (v2i64 (V_SET0)),
+ (v2i64 (EXTRACT_SUBREG (v4i64 VR256:$src), sub_xmm)))),
+ sub_xmm)>;
}
let Predicates = [UseSSE1] in {
@@ -2421,78 +2423,94 @@ let Predicates = [HasAVX1Only] in {
let Predicates = [HasAVX, NoVLX_Or_NoDQI] in {
// Use packed logical operations for scalar ops.
def : Pat<(f64 (X86fand FR64:$src1, FR64:$src2)),
- (COPY_TO_REGCLASS (VANDPDrr
- (COPY_TO_REGCLASS FR64:$src1, VR128),
- (COPY_TO_REGCLASS FR64:$src2, VR128)), FR64)>;
+ (COPY_TO_REGCLASS
+ (v2f64 (VANDPDrr (v2f64 (COPY_TO_REGCLASS FR64:$src1, VR128)),
+ (v2f64 (COPY_TO_REGCLASS FR64:$src2, VR128)))),
+ FR64)>;
def : Pat<(f64 (X86for FR64:$src1, FR64:$src2)),
- (COPY_TO_REGCLASS (VORPDrr
- (COPY_TO_REGCLASS FR64:$src1, VR128),
- (COPY_TO_REGCLASS FR64:$src2, VR128)), FR64)>;
+ (COPY_TO_REGCLASS
+ (v2f64 (VORPDrr (v2f64 (COPY_TO_REGCLASS FR64:$src1, VR128)),
+ (v2f64 (COPY_TO_REGCLASS FR64:$src2, VR128)))),
+ FR64)>;
def : Pat<(f64 (X86fxor FR64:$src1, FR64:$src2)),
- (COPY_TO_REGCLASS (VXORPDrr
- (COPY_TO_REGCLASS FR64:$src1, VR128),
- (COPY_TO_REGCLASS FR64:$src2, VR128)), FR64)>;
+ (COPY_TO_REGCLASS
+ (v2f64 (VXORPDrr (v2f64 (COPY_TO_REGCLASS FR64:$src1, VR128)),
+ (v2f64 (COPY_TO_REGCLASS FR64:$src2, VR128)))),
+ FR64)>;
def : Pat<(f64 (X86fandn FR64:$src1, FR64:$src2)),
- (COPY_TO_REGCLASS (VANDNPDrr
- (COPY_TO_REGCLASS FR64:$src1, VR128),
- (COPY_TO_REGCLASS FR64:$src2, VR128)), FR64)>;
+ (COPY_TO_REGCLASS
+ (v2f64 (VANDNPDrr (v2f64 (COPY_TO_REGCLASS FR64:$src1, VR128)),
+ (v2f64 (COPY_TO_REGCLASS FR64:$src2, VR128)))),
+ FR64)>;
def : Pat<(f32 (X86fand FR32:$src1, FR32:$src2)),
- (COPY_TO_REGCLASS (VANDPSrr
- (COPY_TO_REGCLASS FR32:$src1, VR128),
- (COPY_TO_REGCLASS FR32:$src2, VR128)), FR32)>;
+ (COPY_TO_REGCLASS
+ (v4f32 (VANDPSrr (v4f32 (COPY_TO_REGCLASS FR32:$src1, VR128)),
+ (v4f32 (COPY_TO_REGCLASS FR32:$src2, VR128)))),
+ FR32)>;
def : Pat<(f32 (X86for FR32:$src1, FR32:$src2)),
- (COPY_TO_REGCLASS (VORPSrr
- (COPY_TO_REGCLASS FR32:$src1, VR128),
- (COPY_TO_REGCLASS FR32:$src2, VR128)), FR32)>;
+ (COPY_TO_REGCLASS
+ (v4f32 (VORPSrr (v4f32 (COPY_TO_REGCLASS FR32:$src1, VR128)),
+ (v4f32 (COPY_TO_REGCLASS FR32:$src2, VR128)))),
+ FR32)>;
def : Pat<(f32 (X86fxor FR32:$src1, FR32:$src2)),
- (COPY_TO_REGCLASS (VXORPSrr
- (COPY_TO_REGCLASS FR32:$src1, VR128),
- (COPY_TO_REGCLASS FR32:$src2, VR128)), FR32)>;
+ (COPY_TO_REGCLASS
+ (v4f32 (VXORPSrr (v4f32 (COPY_TO_REGCLASS FR32:$src1, VR128)),
+ (v4f32 (COPY_TO_REGCLASS FR32:$src2, VR128)))),
+ FR32)>;
def : Pat<(f32 (X86fandn FR32:$src1, FR32:$src2)),
- (COPY_TO_REGCLASS (VANDNPSrr
- (COPY_TO_REGCLASS FR32:$src1, VR128),
- (COPY_TO_REGCLASS FR32:$src2, VR128)), FR32)>;
+ (COPY_TO_REGCLASS
+ (v4f32 (VANDNPSrr (v4f32 (COPY_TO_REGCLASS FR32:$src1, VR128)),
+ (v4f32 (COPY_TO_REGCLASS FR32:$src2, VR128)))),
+ FR32)>;
}
let Predicates = [UseSSE1] in {
// Use packed logical operations for scalar ops.
def : Pat<(f32 (X86fand FR32:$src1, FR32:$src2)),
- (COPY_TO_REGCLASS (ANDPSrr
- (COPY_TO_REGCLASS FR32:$src1, VR128),
- (COPY_TO_REGCLASS FR32:$src2, VR128)), FR32)>;
+ (COPY_TO_REGCLASS
+ (v4f32 (ANDPSrr (v4f32 (COPY_TO_REGCLASS FR32:$src1, VR128)),
+ (v4f32 (COPY_TO_REGCLASS FR32:$src2, VR128)))),
+ FR32)>;
def : Pat<(f32 (X86for FR32:$src1, FR32:$src2)),
- (COPY_TO_REGCLASS (ORPSrr
- (COPY_TO_REGCLASS FR32:$src1, VR128),
- (COPY_TO_REGCLASS FR32:$src2, VR128)), FR32)>;
+ (COPY_TO_REGCLASS
+ (v4f32 (ORPSrr (v4f32 (COPY_TO_REGCLASS FR32:$src1, VR128)),
+ (v4f32 (COPY_TO_REGCLASS FR32:$src2, VR128)))),
+ FR32)>;
def : Pat<(f32 (X86fxor FR32:$src1, FR32:$src2)),
- (COPY_TO_REGCLASS (XORPSrr
- (COPY_TO_REGCLASS FR32:$src1, VR128),
- (COPY_TO_REGCLASS FR32:$src2, VR128)), FR32)>;
+ (COPY_TO_REGCLASS
+ (v4f32 (XORPSrr (v4f32 (COPY_TO_REGCLASS FR32:$src1, VR128)),
+ (v4f32 (COPY_TO_REGCLASS FR32:$src2, VR128)))),
+ FR32)>;
def : Pat<(f32 (X86fandn FR32:$src1, FR32:$src2)),
- (COPY_TO_REGCLASS (ANDNPSrr
- (COPY_TO_REGCLASS FR32:$src1, VR128),
- (COPY_TO_REGCLASS FR32:$src2, VR128)), FR32)>;
+ (COPY_TO_REGCLASS
+ (v4f32 (ANDNPSrr (v4f32 (COPY_TO_REGCLASS FR32:$src1, VR128)),
+ (v4f32 (COPY_TO_REGCLASS FR32:$src2, VR128)))),
+ FR32)>;
}
let Predicates = [UseSSE2] in {
// Use packed logical operations for scalar ops.
def : Pat<(f64 (X86fand FR64:$src1, FR64:$src2)),
- (COPY_TO_REGCLASS (ANDPDrr
- (COPY_TO_REGCLASS FR64:$src1, VR128),
- (COPY_TO_REGCLASS FR64:$src2, VR128)), FR64)>;
+ (COPY_TO_REGCLASS
+ (v2f64 (ANDPDrr (v2f64 (COPY_TO_REGCLASS FR64:$src1, VR128)),
+ (v2f64 (COPY_TO_REGCLASS FR64:$src2, VR128)))),
+ FR64)>;
def : Pat<(f64 (X86for FR64:$src1, FR64:$src2)),
- (COPY_TO_REGCLASS (ORPDrr
- (COPY_TO_REGCLASS FR64:$src1, VR128),
- (COPY_TO_REGCLASS FR64:$src2, VR128)), FR64)>;
+ (COPY_TO_REGCLASS
+ (v2f64 (ORPDrr (v2f64 (COPY_TO_REGCLASS FR64:$src1, VR128)),
+ (v2f64 (COPY_TO_REGCLASS FR64:$src2, VR128)))),
+ FR64)>;
def : Pat<(f64 (X86fxor FR64:$src1, FR64:$src2)),
- (COPY_TO_REGCLASS (XORPDrr
- (COPY_TO_REGCLASS FR64:$src1, VR128),
- (COPY_TO_REGCLASS FR64:$src2, VR128)), FR64)>;
+ (COPY_TO_REGCLASS
+ (v2f64 (XORPDrr (v2f64 (COPY_TO_REGCLASS FR64:$src1, VR128)),
+ (v2f64 (COPY_TO_REGCLASS FR64:$src2, VR128)))),
+ FR64)>;
def : Pat<(f64 (X86fandn FR64:$src1, FR64:$src2)),
- (COPY_TO_REGCLASS (ANDNPDrr
- (COPY_TO_REGCLASS FR64:$src1, VR128),
- (COPY_TO_REGCLASS FR64:$src2, VR128)), FR64)>;
+ (COPY_TO_REGCLASS
+ (v2f64 (ANDNPDrr (v2f64 (COPY_TO_REGCLASS FR64:$src1, VR128)),
+ (v2f64 (COPY_TO_REGCLASS FR64:$src2, VR128)))),
+ FR64)>;
}
// Patterns for packed operations when we don't have integer type available.
@@ -2679,7 +2697,7 @@ multiclass scalar_math_patterns<SDNode Op, string OpcPrefix, SDNode Move,
(Op (EltTy (extractelt (VT VR128:$dst), (iPTR 0))),
RC:$src))))),
(!cast<Instruction>(OpcPrefix#rr_Int) VT:$dst,
- (COPY_TO_REGCLASS RC:$src, VR128))>;
+ (VT (COPY_TO_REGCLASS RC:$src, VR128)))>;
}
// Repeat for AVX versions of the instructions.
@@ -2690,7 +2708,7 @@ multiclass scalar_math_patterns<SDNode Op, string OpcPrefix, SDNode Move,
(Op (EltTy (extractelt (VT VR128:$dst), (iPTR 0))),
RC:$src))))),
(!cast<Instruction>("V"#OpcPrefix#rr_Int) VT:$dst,
- (COPY_TO_REGCLASS RC:$src, VR128))>;
+ (VT (COPY_TO_REGCLASS RC:$src, VR128)))>;
}
}
@@ -4163,7 +4181,7 @@ let Predicates = [UseAVX] in {
def : Pat<(v4i64 (X86vzmovl (insert_subvector undef,
(v2i64 (scalar_to_vector GR64:$src)),(iPTR 0)))),
- (SUBREG_TO_REG (i64 0), (VMOV64toPQIrr GR64:$src), sub_xmm)>;
+ (SUBREG_TO_REG (i64 0), (v2i64 (VMOV64toPQIrr GR64:$src)), sub_xmm)>;
// AVX 128-bit movd/movq instructions write zeros in the high 128-bit part.
// These instructions also write zeros in the high part of a 256-bit register.
def : Pat<(v2i64 (X86vzmovl (v2i64 (scalar_to_vector (zextloadi64i32 addr:$src))))),
@@ -4176,13 +4194,13 @@ let Predicates = [UseAVX] in {
(VMOVDI2PDIrm addr:$src)>;
def : Pat<(v8i32 (X86vzmovl (insert_subvector undef,
(v4i32 (scalar_to_vector (loadi32 addr:$src))), (iPTR 0)))),
- (SUBREG_TO_REG (i32 0), (VMOVDI2PDIrm addr:$src), sub_xmm)>;
+ (SUBREG_TO_REG (i32 0), (v4i32 (VMOVDI2PDIrm addr:$src)), sub_xmm)>;
def : Pat<(v8i32 (X86vzload addr:$src)),
- (SUBREG_TO_REG (i64 0), (VMOVDI2PDIrm addr:$src), sub_xmm)>;
+ (SUBREG_TO_REG (i64 0), (v4i32 (VMOVDI2PDIrm addr:$src)), sub_xmm)>;
// Use regular 128-bit instructions to match 256-bit scalar_to_vec+zext.
def : Pat<(v8i32 (X86vzmovl (insert_subvector undef,
(v4i32 (scalar_to_vector GR32:$src)),(iPTR 0)))),
- (SUBREG_TO_REG (i32 0), (VMOVDI2PDIrr GR32:$src), sub_xmm)>;
+ (SUBREG_TO_REG (i32 0), (v4i32 (VMOVDI2PDIrr GR32:$src)), sub_xmm)>;
}
let Predicates = [UseSSE2] in {
@@ -4276,9 +4294,9 @@ let Predicates = [UseAVX] in {
(VMOVQI2PQIrm addr:$src)>;
def : Pat<(v4i64 (X86vzmovl (insert_subvector undef,
(v2i64 (scalar_to_vector (loadi64 addr:$src))), (iPTR 0)))),
- (SUBREG_TO_REG (i64 0), (VMOVQI2PQIrm addr:$src), sub_xmm)>;
+ (SUBREG_TO_REG (i64 0), (v2i64 (VMOVQI2PQIrm addr:$src)), sub_xmm)>;
def : Pat<(v4i64 (X86vzload addr:$src)),
- (SUBREG_TO_REG (i64 0), (VMOVQI2PQIrm addr:$src), sub_xmm)>;
+ (SUBREG_TO_REG (i64 0), (v2i64 (VMOVQI2PQIrm addr:$src)), sub_xmm)>;
}
let Predicates = [UseSSE2] in {
@@ -6448,25 +6466,25 @@ let Predicates = [HasAVX, OptForSpeed] in {
// Move low f32 and clear high bits.
def : Pat<(v8f32 (X86vzmovl (v8f32 VR256:$src))),
(SUBREG_TO_REG (i32 0),
- (VBLENDPSrri (v4f32 (V_SET0)),
- (EXTRACT_SUBREG (v8f32 VR256:$src), sub_xmm),
- (i8 1)), sub_xmm)>;
+ (v4f32 (VBLENDPSrri (v4f32 (V_SET0)),
+ (v4f32 (EXTRACT_SUBREG (v8f32 VR256:$src), sub_xmm)),
+ (i8 1))), sub_xmm)>;
def : Pat<(v8i32 (X86vzmovl (v8i32 VR256:$src))),
(SUBREG_TO_REG (i32 0),
- (VPBLENDWrri (v4i32 (V_SET0)),
- (EXTRACT_SUBREG (v8i32 VR256:$src), sub_xmm),
- (i8 3)), sub_xmm)>;
+ (v4i32 (VPBLENDWrri (v4i32 (V_SET0)),
+ (v4i32 (EXTRACT_SUBREG (v8i32 VR256:$src), sub_xmm)),
+ (i8 3))), sub_xmm)>;
def : Pat<(v4f64 (X86vzmovl (v4f64 VR256:$src))),
(SUBREG_TO_REG (i32 0),
- (VBLENDPDrri (v2f64 (V_SET0)),
- (EXTRACT_SUBREG (v4f64 VR256:$src), sub_xmm),
- (i8 1)), sub_xmm)>;
+ (v2f64 (VBLENDPDrri (v2f64 (V_SET0)),
+ (v2f64 (EXTRACT_SUBREG (v4f64 VR256:$src), sub_xmm)),
+ (i8 1))), sub_xmm)>;
def : Pat<(v4i64 (X86vzmovl (v4i64 VR256:$src))),
(SUBREG_TO_REG (i32 0),
- (VPBLENDWrri (v2i64 (V_SET0)),
- (EXTRACT_SUBREG (v4i64 VR256:$src), sub_xmm),
- (i8 0xf)), sub_xmm)>;
+ (v2i64 (VPBLENDWrri (v2i64 (V_SET0)),
+ (v2i64 (EXTRACT_SUBREG (v4i64 VR256:$src), sub_xmm)),
+ (i8 0xf))), sub_xmm)>;
}
// Prefer a movss or movsd over a blendps when optimizing for size. these were
@@ -7111,10 +7129,10 @@ def MOVNTSD : I<0x2B, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
} // SchedRW
def : Pat<(nontemporalstore FR32:$src, addr:$dst),
- (MOVNTSS addr:$dst, (COPY_TO_REGCLASS FR32:$src, VR128))>;
+ (MOVNTSS addr:$dst, (v4f32 (COPY_TO_REGCLASS FR32:$src, VR128)))>;
def : Pat<(nontemporalstore FR64:$src, addr:$dst),
- (MOVNTSD addr:$dst, (COPY_TO_REGCLASS FR64:$src, VR128))>;
+ (MOVNTSD addr:$dst, (v2f64 (COPY_TO_REGCLASS FR64:$src, VR128)))>;
} // AddedComplexity
} // HasSSE4A
@@ -7535,16 +7553,16 @@ let Predicates = [HasF16C, NoVLX] in {
// more consistent with other instructions, which are always controlled by it.
// It's encoded as 0b100.
def : Pat<(fp_to_f16 FR32:$src),
- (i16 (EXTRACT_SUBREG (VMOVPDI2DIrr (VCVTPS2PHrr
- (COPY_TO_REGCLASS FR32:$src, VR128), 4)), sub_16bit))>;
+ (i16 (EXTRACT_SUBREG (VMOVPDI2DIrr (v8i16 (VCVTPS2PHrr
+ (v4f32 (COPY_TO_REGCLASS FR32:$src, VR128)), 4))), sub_16bit))>;
def : Pat<(f16_to_fp GR16:$src),
- (f32 (COPY_TO_REGCLASS (VCVTPH2PSrr
- (COPY_TO_REGCLASS (MOVSX32rr16 GR16:$src), VR128)), FR32)) >;
+ (f32 (COPY_TO_REGCLASS (v4f32 (VCVTPH2PSrr
+ (v4i32 (COPY_TO_REGCLASS (MOVSX32rr16 GR16:$src), VR128)))), FR32)) >;
def : Pat<(f16_to_fp (i16 (fp_to_f16 FR32:$src))),
- (f32 (COPY_TO_REGCLASS (VCVTPH2PSrr
- (VCVTPS2PHrr (COPY_TO_REGCLASS FR32:$src, VR128), 4)), FR32)) >;
+ (f32 (COPY_TO_REGCLASS (v4f32 (VCVTPH2PSrr
+ (v8i16 (VCVTPS2PHrr (v4f32 (COPY_TO_REGCLASS FR32:$src, VR128)), 4)))), FR32)) >;
}
//===----------------------------------------------------------------------===//
@@ -7718,45 +7736,45 @@ let Predicates = [HasAVX2, NoVLX] in {
// Provide fallback in case the load node that is used in the patterns above
// is used by additional users, which prevents the pattern selection.
def : Pat<(v4f32 (X86VBroadcast FR32:$src)),
- (VBROADCASTSSrr (COPY_TO_REGCLASS FR32:$src, VR128))>;
+ (VBROADCASTSSrr (v4f32 (COPY_TO_REGCLASS FR32:$src, VR128)))>;
def : Pat<(v8f32 (X86VBroadcast FR32:$src)),
- (VBROADCASTSSYrr (COPY_TO_REGCLASS FR32:$src, VR128))>;
+ (VBROADCASTSSYrr (v4f32 (COPY_TO_REGCLASS FR32:$src, VR128)))>;
def : Pat<(v4f64 (X86VBroadcast FR64:$src)),
- (VBROADCASTSDYrr (COPY_TO_REGCLASS FR64:$src, VR128))>;
+ (VBROADCASTSDYrr (v2f64 (COPY_TO_REGCLASS FR64:$src, VR128)))>;
}
let Predicates = [HasAVX2, NoVLX_Or_NoBWI] in {
def : Pat<(v16i8 (X86VBroadcast GR8:$src)),
- (VPBROADCASTBrr (COPY_TO_REGCLASS
+ (VPBROADCASTBrr (v16i8 (COPY_TO_REGCLASS
(i32 (INSERT_SUBREG (i32 (IMPLICIT_DEF)),
GR8:$src, sub_8bit)),
- VR128))>;
+ VR128)))>;
def : Pat<(v32i8 (X86VBroadcast GR8:$src)),
- (VPBROADCASTBYrr (COPY_TO_REGCLASS
+ (VPBROADCASTBYrr (v16i8 (COPY_TO_REGCLASS
(i32 (INSERT_SUBREG (i32 (IMPLICIT_DEF)),
GR8:$src, sub_8bit)),
- VR128))>;
+ VR128)))>;
def : Pat<(v8i16 (X86VBroadcast GR16:$src)),
- (VPBROADCASTWrr (COPY_TO_REGCLASS
+ (VPBROADCASTWrr (v8i16 (COPY_TO_REGCLASS
(i32 (INSERT_SUBREG (i32 (IMPLICIT_DEF)),
GR16:$src, sub_16bit)),
- VR128))>;
+ VR128)))>;
def : Pat<(v16i16 (X86VBroadcast GR16:$src)),
- (VPBROADCASTWYrr (COPY_TO_REGCLASS
+ (VPBROADCASTWYrr (v8i16 (COPY_TO_REGCLASS
(i32 (INSERT_SUBREG (i32 (IMPLICIT_DEF)),
GR16:$src, sub_16bit)),
- VR128))>;
+ VR128)))>;
}
let Predicates = [HasAVX2, NoVLX] in {
def : Pat<(v4i32 (X86VBroadcast GR32:$src)),
- (VPBROADCASTDrr (COPY_TO_REGCLASS GR32:$src, VR128))>;
+ (VPBROADCASTDrr (v4i32 (COPY_TO_REGCLASS GR32:$src, VR128)))>;
def : Pat<(v8i32 (X86VBroadcast GR32:$src)),
- (VPBROADCASTDYrr (COPY_TO_REGCLASS GR32:$src, VR128))>;
+ (VPBROADCASTDYrr (v4i32 (COPY_TO_REGCLASS GR32:$src, VR128)))>;
def : Pat<(v2i64 (X86VBroadcast GR64:$src)),
- (VPBROADCASTQrr (COPY_TO_REGCLASS GR64:$src, VR128))>;
+ (VPBROADCASTQrr (v2i64 (COPY_TO_REGCLASS GR64:$src, VR128)))>;
def : Pat<(v4i64 (X86VBroadcast GR64:$src)),
- (VPBROADCASTQYrr (COPY_TO_REGCLASS GR64:$src, VR128))>;
+ (VPBROADCASTQYrr (v2i64 (COPY_TO_REGCLASS GR64:$src, VR128)))>;
}
// AVX1 broadcast patterns
@@ -7774,7 +7792,7 @@ def : Pat<(v4i32 (X86VBroadcast (loadi32 addr:$src))),
let Predicates = [HasAVX, NoVLX] in {
// 128bit broadcasts:
def : Pat<(v2f64 (X86VBroadcast f64:$src)),
- (VMOVDDUPrr (COPY_TO_REGCLASS FR64:$src, VR128))>;
+ (VMOVDDUPrr (v2f64 (COPY_TO_REGCLASS FR64:$src, VR128)))>;
def : Pat<(v2f64 (X86VBroadcast (loadf64 addr:$src))),
(VMOVDDUPrm addr:$src)>;
@@ -7786,29 +7804,29 @@ let Predicates = [HasAVX, NoVLX] in {
let Predicates = [HasAVX1Only] in {
def : Pat<(v4f32 (X86VBroadcast FR32:$src)),
- (VPERMILPSri (COPY_TO_REGCLASS FR32:$src, VR128), 0)>;
+ (VPERMILPSri (v4f32 (COPY_TO_REGCLASS FR32:$src, VR128)), 0)>;
def : Pat<(v8f32 (X86VBroadcast FR32:$src)),
(VINSERTF128rr (INSERT_SUBREG (v8f32 (IMPLICIT_DEF)),
- (VPERMILPSri (COPY_TO_REGCLASS FR32:$src, VR128), 0), sub_xmm),
- (VPERMILPSri (COPY_TO_REGCLASS FR32:$src, VR128), 0), 1)>;
+ (v4f32 (VPERMILPSri (v4f32 (COPY_TO_REGCLASS FR32:$src, VR128)), 0)), sub_xmm),
+ (v4f32 (VPERMILPSri (v4f32 (COPY_TO_REGCLASS FR32:$src, VR128)), 0)), 1)>;
def : Pat<(v4f64 (X86VBroadcast FR64:$src)),
(VINSERTF128rr (INSERT_SUBREG (v4f64 (IMPLICIT_DEF)),
- (VMOVDDUPrr (COPY_TO_REGCLASS FR64:$src, VR128)), sub_xmm),
- (VMOVDDUPrr (COPY_TO_REGCLASS FR64:$src, VR128)), 1)>;
+ (v2f64 (VMOVDDUPrr (v2f64 (COPY_TO_REGCLASS FR64:$src, VR128)))), sub_xmm),
+ (v2f64 (VMOVDDUPrr (v2f64 (COPY_TO_REGCLASS FR64:$src, VR128)))), 1)>;
def : Pat<(v4i32 (X86VBroadcast GR32:$src)),
- (VPSHUFDri (COPY_TO_REGCLASS GR32:$src, VR128), 0)>;
+ (VPSHUFDri (v4i32 (COPY_TO_REGCLASS GR32:$src, VR128)), 0)>;
def : Pat<(v8i32 (X86VBroadcast GR32:$src)),
(VINSERTF128rr (INSERT_SUBREG (v8i32 (IMPLICIT_DEF)),
- (VPSHUFDri (COPY_TO_REGCLASS GR32:$src, VR128), 0), sub_xmm),
- (VPSHUFDri (COPY_TO_REGCLASS GR32:$src, VR128), 0), 1)>;
+ (v4i32 (VPSHUFDri (v4i32 (COPY_TO_REGCLASS GR32:$src, VR128)), 0)), sub_xmm),
+ (v4i32 (VPSHUFDri (v4i32 (COPY_TO_REGCLASS GR32:$src, VR128)), 0)), 1)>;
def : Pat<(v4i64 (X86VBroadcast GR64:$src)),
(VINSERTF128rr (INSERT_SUBREG (v4i64 (IMPLICIT_DEF)),
- (VPSHUFDri (COPY_TO_REGCLASS GR64:$src, VR128), 0x44), sub_xmm),
- (VPSHUFDri (COPY_TO_REGCLASS GR64:$src, VR128), 0x44), 1)>;
+ (v4i32 (VPSHUFDri (v4i32 (COPY_TO_REGCLASS GR64:$src, VR128)), 0x44)), sub_xmm),
+ (v4i32 (VPSHUFDri (v4i32 (COPY_TO_REGCLASS GR64:$src, VR128)), 0x44)), 1)>;
def : Pat<(v2i64 (X86VBroadcast i64:$src)),
- (VPSHUFDri (COPY_TO_REGCLASS GR64:$src, VR128), 0x44)>;
+ (VPSHUFDri (v4i32 (COPY_TO_REGCLASS GR64:$src, VR128)), 0x44)>;
def : Pat<(v2i64 (X86VBroadcast (loadi64 addr:$src))),
(VMOVDDUPrm addr:$src)>;
}
@@ -7991,7 +8009,7 @@ multiclass maskmov_lowering<string InstrStr, RegisterClass RC, ValueType VT,
def: Pat<(VT (X86mload addr:$ptr, (MaskVT RC:$mask), (VT RC:$src0))),
(!cast<Instruction>(BlendStr#"rr")
RC:$src0,
- (!cast<Instruction>(InstrStr#"rm") RC:$mask, addr:$ptr),
+ (VT (!cast<Instruction>(InstrStr#"rm") RC:$mask, addr:$ptr)),
RC:$mask)>;
}
let Predicates = [HasAVX] in {
@@ -8171,49 +8189,49 @@ let Predicates = [UseAVX2] in {
}
//===----------------------------------------------------------------------===//
-// Extra selection patterns for FR128, f128, f128mem
+// Extra selection patterns for f128, f128mem
// movaps is shorter than movdqa. movaps is in SSE and movdqa is in SSE2.
-def : Pat<(alignedstore (f128 FR128:$src), addr:$dst),
- (MOVAPSmr addr:$dst, (COPY_TO_REGCLASS (f128 FR128:$src), VR128))>;
-def : Pat<(store (f128 FR128:$src), addr:$dst),
- (MOVUPSmr addr:$dst, (COPY_TO_REGCLASS (f128 FR128:$src), VR128))>;
+def : Pat<(alignedstore (f128 VR128:$src), addr:$dst),
+ (MOVAPSmr addr:$dst, (COPY_TO_REGCLASS (f128 VR128:$src), VR128))>;
+def : Pat<(store (f128 VR128:$src), addr:$dst),
+ (MOVUPSmr addr:$dst, (COPY_TO_REGCLASS (f128 VR128:$src), VR128))>;
def : Pat<(alignedloadf128 addr:$src),
- (COPY_TO_REGCLASS (MOVAPSrm addr:$src), FR128)>;
+ (COPY_TO_REGCLASS (MOVAPSrm addr:$src), VR128)>;
def : Pat<(loadf128 addr:$src),
- (COPY_TO_REGCLASS (MOVUPSrm addr:$src), FR128)>;
+ (COPY_TO_REGCLASS (MOVUPSrm addr:$src), VR128)>;
// andps is shorter than andpd or pand. andps is SSE and andpd/pand are in SSE2
-def : Pat<(X86fand FR128:$src1, (memopf128 addr:$src2)),
+def : Pat<(f128 (X86fand VR128:$src1, (memopf128 addr:$src2))),
(COPY_TO_REGCLASS
- (ANDPSrm (COPY_TO_REGCLASS FR128:$src1, VR128), f128mem:$src2),
- FR128)>;
+ (ANDPSrm (COPY_TO_REGCLASS VR128:$src1, VR128), f128mem:$src2),
+ VR128)>;
-def : Pat<(X86fand FR128:$src1, FR128:$src2),
+def : Pat<(f128 (X86fand VR128:$src1, VR128:$src2)),
(COPY_TO_REGCLASS
- (ANDPSrr (COPY_TO_REGCLASS FR128:$src1, VR128),
- (COPY_TO_REGCLASS FR128:$src2, VR128)), FR128)>;
+ (ANDPSrr (COPY_TO_REGCLASS VR128:$src1, VR128),
+ (COPY_TO_REGCLASS VR128:$src2, VR128)), VR128)>;
-def : Pat<(X86for FR128:$src1, (memopf128 addr:$src2)),
+def : Pat<(f128 (X86for VR128:$src1, (memopf128 addr:$src2))),
(COPY_TO_REGCLASS
- (ORPSrm (COPY_TO_REGCLASS FR128:$src1, VR128), f128mem:$src2),
- FR128)>;
+ (ORPSrm (COPY_TO_REGCLASS VR128:$src1, VR128), f128mem:$src2),
+ VR128)>;
-def : Pat<(X86for FR128:$src1, FR128:$src2),
+def : Pat<(f128 (X86for VR128:$src1, VR128:$src2)),
(COPY_TO_REGCLASS
- (ORPSrr (COPY_TO_REGCLASS FR128:$src1, VR128),
- (COPY_TO_REGCLASS FR128:$src2, VR128)), FR128)>;
+ (ORPSrr (COPY_TO_REGCLASS VR128:$src1, VR128),
+ (COPY_TO_REGCLASS VR128:$src2, VR128)), VR128)>;
-def : Pat<(X86fxor FR128:$src1, (memopf128 addr:$src2)),
+def : Pat<(f128 (X86fxor VR128:$src1, (memopf128 addr:$src2))),
(COPY_TO_REGCLASS
- (XORPSrm (COPY_TO_REGCLASS FR128:$src1, VR128), f128mem:$src2),
- FR128)>;
+ (XORPSrm (COPY_TO_REGCLASS VR128:$src1, VR128), f128mem:$src2),
+ VR128)>;
-def : Pat<(X86fxor FR128:$src1, FR128:$src2),
+def : Pat<(f128 (X86fxor VR128:$src1, VR128:$src2)),
(COPY_TO_REGCLASS
- (XORPSrr (COPY_TO_REGCLASS FR128:$src1, VR128),
- (COPY_TO_REGCLASS FR128:$src2, VR128)), FR128)>;
+ (XORPSrr (COPY_TO_REGCLASS VR128:$src1, VR128),
+ (COPY_TO_REGCLASS VR128:$src2, VR128)), VR128)>;
//===----------------------------------------------------------------------===//
// GFNI instructions
diff --git a/llvm/lib/Target/X86/X86InstrVecCompiler.td b/llvm/lib/Target/X86/X86InstrVecCompiler.td
index ae36391fa35..322bdb74e2d 100644
--- a/llvm/lib/Target/X86/X86InstrVecCompiler.td
+++ b/llvm/lib/Target/X86/X86InstrVecCompiler.td
@@ -263,7 +263,7 @@ multiclass subvec_zero_lowering<string MoveStr,
def : Pat<(DstTy (insert_subvector (bitconvert (ZeroTy immAllZerosV)),
(SrcTy RC:$src), (iPTR 0))),
(SUBREG_TO_REG (i64 0),
- (!cast<Instruction>("VMOV"#MoveStr#"rr") RC:$src), SubIdx)>;
+ (SrcTy (!cast<Instruction>("VMOV"#MoveStr#"rr") RC:$src)), SubIdx)>;
}
let Predicates = [HasAVX, NoVLX] in {
diff --git a/llvm/lib/Target/X86/X86RegisterInfo.td b/llvm/lib/Target/X86/X86RegisterInfo.td
index b17b3d89cd7..ee9e7891f9f 100644
--- a/llvm/lib/Target/X86/X86RegisterInfo.td
+++ b/llvm/lib/Target/X86/X86RegisterInfo.td
@@ -504,8 +504,6 @@ def FR32 : RegisterClass<"X86", [f32], 32, (sequence "XMM%u", 0, 15)>;
def FR64 : RegisterClass<"X86", [f64], 64, (add FR32)>;
-def FR128 : RegisterClass<"X86", [f128], 128, (add FR32)>;
-
// FIXME: This sets up the floating point register files as though they are f64
// values, though they really are f80 values. This will cause us to spill
@@ -527,16 +525,16 @@ def RST : RegisterClass<"X86", [f80, f64, f32], 32, (sequence "ST%u", 0, 7)> {
// Generic vector registers: VR64 and VR128.
// Ensure that float types are declared first - only float is legal on SSE1.
def VR64: RegisterClass<"X86", [x86mmx], 64, (sequence "MM%u", 0, 7)>;
-def VR128 : RegisterClass<"X86", [v4f32, v2f64, v16i8, v8i16, v4i32, v2i64],
+def VR128 : RegisterClass<"X86", [v4f32, v2f64, v16i8, v8i16, v4i32, v2i64, f128],
128, (add FR32)>;
def VR256 : RegisterClass<"X86", [v8f32, v4f64, v32i8, v16i16, v8i32, v4i64],
256, (sequence "YMM%u", 0, 15)>;
// Special classes that help the assembly parser choose some alternate
// instructions to favor 2-byte VEX encodings.
-def VR128L : RegisterClass<"X86", [v4f32, v2f64, v16i8, v8i16, v4i32, v2i64],
+def VR128L : RegisterClass<"X86", [v4f32, v2f64, v16i8, v8i16, v4i32, v2i64, f128],
128, (sequence "XMM%u", 0, 7)>;
-def VR128H : RegisterClass<"X86", [v4f32, v2f64, v16i8, v8i16, v4i32, v2i64],
+def VR128H : RegisterClass<"X86", [v4f32, v2f64, v16i8, v8i16, v4i32, v2i64, f128],
128, (sequence "XMM%u", 8, 15)>;
def VR256L : RegisterClass<"X86", [v8f32, v4f64, v32i8, v16i16, v8i32, v4i64],
256, (sequence "YMM%u", 0, 7)>;
@@ -567,7 +565,7 @@ def FR32X : RegisterClass<"X86", [f32], 32, (sequence "XMM%u", 0, 31)>;
def FR64X : RegisterClass<"X86", [f64], 64, (add FR32X)>;
// Extended VR128 and VR256 for AVX-512 instructions
-def VR128X : RegisterClass<"X86", [v4f32, v2f64, v16i8, v8i16, v4i32, v2i64],
+def VR128X : RegisterClass<"X86", [v4f32, v2f64, v16i8, v8i16, v4i32, v2i64, f128],
128, (add FR32X)>;
def VR256X : RegisterClass<"X86", [v8f32, v4f64, v32i8, v16i16, v8i32, v4i64],
256, (sequence "YMM%u", 0, 31)>;
OpenPOWER on IntegriCloud