summaryrefslogtreecommitdiffstats
path: root/llvm/lib
diff options
context:
space:
mode:
authorCraig Topper <craig.topper@intel.com>2017-08-27 19:03:36 +0000
committerCraig Topper <craig.topper@intel.com>2017-08-27 19:03:36 +0000
commit80075a5fb782989f2b14213e44b4e689f9929789 (patch)
treeef6cfef621c368147aacfa4f009769a9b54c23d7 /llvm/lib
parent55bc389aeb2495163b21559da5816f776a4bb16a (diff)
downloadbcm5719-llvm-80075a5fb782989f2b14213e44b4e689f9929789.tar.gz
bcm5719-llvm-80075a5fb782989f2b14213e44b4e689f9929789.zip
[AVX512] Add more patterns for using masked moves for subvector extracts of the lowest subvector. This time with bitcasts between the vselect and the extract.
llvm-svn: 311856
Diffstat (limited to 'llvm/lib')
-rw-r--r--llvm/lib/Target/X86/X86InstrAVX512.td181
1 files changed, 58 insertions, 123 deletions
diff --git a/llvm/lib/Target/X86/X86InstrAVX512.td b/llvm/lib/Target/X86/X86InstrAVX512.td
index f9a1c6ddb6c..12b8b0b1349 100644
--- a/llvm/lib/Target/X86/X86InstrAVX512.td
+++ b/llvm/lib/Target/X86/X86InstrAVX512.td
@@ -3845,138 +3845,73 @@ let Predicates = [HasVLX] in {
(VMOVDQU32Z256mr addr:$dst, (v32i8 (EXTRACT_SUBREG VR512:$src,sub_ymm)))>;
}
+multiclass masked_move_for_extract<string InstrStr, X86VectorVTInfo From,
+ X86VectorVTInfo To, X86VectorVTInfo Cast> {
+ def : Pat<(Cast.VT (vselect Cast.KRCWM:$mask,
+ (bitconvert
+ (To.VT (extract_subvector
+ (From.VT From.RC:$src), (iPTR 0)))),
+ To.RC:$src0)),
+ (Cast.VT (!cast<Instruction>(InstrStr#"rrk")
+ Cast.RC:$src0, Cast.KRCWM:$mask,
+ (EXTRACT_SUBREG From.RC:$src, To.SubRegIdx)))>;
+
+ def : Pat<(Cast.VT (vselect Cast.KRCWM:$mask,
+ (bitconvert
+ (To.VT (extract_subvector
+ (From.VT From.RC:$src), (iPTR 0)))),
+ Cast.ImmAllZerosV)),
+ (Cast.VT (!cast<Instruction>(InstrStr#"rrkz")
+ Cast.KRCWM:$mask,
+ (EXTRACT_SUBREG From.RC:$src, To.SubRegIdx)))>;
+}
+
+
let Predicates = [HasVLX] in {
// A masked extract from the first 128-bits of a 256-bit vector can be
// implemented with masked move.
-def : Pat<(v2i64 (vselect VK2WM:$mask,
- (extract_subvector (v4i64 VR256X:$src), (iPTR 0)),
- VR128X:$src0)),
- (v2i64 (VMOVDQA64Z128rrk VR128X:$src0, VK2WM:$mask,
- (EXTRACT_SUBREG (v4i64 VR256X:$src), sub_xmm)))>;
-def : Pat<(v4i32 (vselect VK4WM:$mask,
- (extract_subvector (v8i32 VR256X:$src), (iPTR 0)),
- VR128X:$src0)),
- (v4i32 (VMOVDQA32Z128rrk VR128X:$src0, VK4WM:$mask,
- (EXTRACT_SUBREG (v8i32 VR256X:$src), sub_xmm)))>;
-def : Pat<(v2f64 (vselect VK2WM:$mask,
- (extract_subvector (v4f64 VR256X:$src), (iPTR 0)),
- VR128X:$src0)),
- (v2f64 (VMOVAPDZ128rrk VR128X:$src0, VK2WM:$mask,
- (EXTRACT_SUBREG (v4f64 VR256X:$src), sub_xmm)))>;
-def : Pat<(v4f32 (vselect VK4WM:$mask,
- (extract_subvector (v8f32 VR256X:$src), (iPTR 0)),
- VR128X:$src0)),
- (v4f32 (VMOVAPSZ128rrk VR128X:$src0, VK4WM:$mask,
- (EXTRACT_SUBREG (v8f32 VR256X:$src), sub_xmm)))>;
-
-def : Pat<(v2i64 (vselect VK2WM:$mask,
- (extract_subvector (v4i64 VR256X:$src), (iPTR 0)),
- (bitconvert (v4i32 immAllZerosV)))),
- (v2i64 (VMOVDQA64Z128rrkz VK2WM:$mask,
- (EXTRACT_SUBREG (v4i64 VR256X:$src), sub_xmm)))>;
-def : Pat<(v4i32 (vselect VK4WM:$mask,
- (extract_subvector (v8i32 VR256X:$src), (iPTR 0)),
- (bitconvert (v4i32 immAllZerosV)))),
- (v4i32 (VMOVDQA32Z128rrkz VK4WM:$mask,
- (EXTRACT_SUBREG (v8i32 VR256X:$src), sub_xmm)))>;
-def : Pat<(v2f64 (vselect VK2WM:$mask,
- (extract_subvector (v4f64 VR256X:$src), (iPTR 0)),
- (bitconvert (v4i32 immAllZerosV)))),
- (v2f64 (VMOVAPDZ128rrkz VK2WM:$mask,
- (EXTRACT_SUBREG (v4f64 VR256X:$src), sub_xmm)))>;
-def : Pat<(v4f32 (vselect VK4WM:$mask,
- (extract_subvector (v8f32 VR256X:$src), (iPTR 0)),
- (bitconvert (v4i32 immAllZerosV)))),
- (v4f32 (VMOVAPSZ128rrkz VK4WM:$mask,
- (EXTRACT_SUBREG (v8f32 VR256X:$src), sub_xmm)))>;
+defm : masked_move_for_extract<"VMOVDQA64Z128", v4i64x_info, v2i64x_info, v2i64x_info>;
+defm : masked_move_for_extract<"VMOVDQA64Z128", v8i32x_info, v4i32x_info, v2i64x_info>;
+defm : masked_move_for_extract<"VMOVDQA64Z128", v16i16x_info, v8i16x_info, v2i64x_info>;
+defm : masked_move_for_extract<"VMOVDQA64Z128", v32i8x_info, v16i8x_info, v2i64x_info>;
+defm : masked_move_for_extract<"VMOVDQA32Z128", v4i64x_info, v2i64x_info, v4i32x_info>;
+defm : masked_move_for_extract<"VMOVDQA32Z128", v8i32x_info, v4i32x_info, v4i32x_info>;
+defm : masked_move_for_extract<"VMOVDQA32Z128", v16i16x_info, v8i16x_info, v4i32x_info>;
+defm : masked_move_for_extract<"VMOVDQA32Z128", v32i8x_info, v16i8x_info, v4i32x_info>;
+defm : masked_move_for_extract<"VMOVAPDZ128", v4f64x_info, v2f64x_info, v2f64x_info>;
+defm : masked_move_for_extract<"VMOVAPDZ128", v8f32x_info, v4f32x_info, v2f64x_info>;
+defm : masked_move_for_extract<"VMOVAPSZ128", v4f64x_info, v2f64x_info, v4f32x_info>;
+defm : masked_move_for_extract<"VMOVAPSZ128", v8f32x_info, v4f32x_info, v4f32x_info>;
// A masked extract from the first 128-bits of a 512-bit vector can be
// implemented with masked move.
-def : Pat<(v2i64 (vselect VK2WM:$mask,
- (extract_subvector (v8i64 VR512:$src), (iPTR 0)),
- VR128X:$src0)),
- (v2i64 (VMOVDQA64Z128rrk VR128X:$src0, VK2WM:$mask,
- (EXTRACT_SUBREG (v8i64 VR512:$src), sub_xmm)))>;
-def : Pat<(v4i32 (vselect VK4WM:$mask,
- (extract_subvector (v16i32 VR512:$src), (iPTR 0)),
- VR128X:$src0)),
- (v4i32 (VMOVDQA32Z128rrk VR128X:$src0, VK4WM:$mask,
- (EXTRACT_SUBREG (v16i32 VR512:$src), sub_xmm)))>;
-def : Pat<(v2f64 (vselect VK2WM:$mask,
- (extract_subvector (v8f64 VR512:$src), (iPTR 0)),
- VR128X:$src0)),
- (v2f64 (VMOVAPDZ128rrk VR128X:$src0, VK2WM:$mask,
- (EXTRACT_SUBREG (v8f64 VR512:$src), sub_xmm)))>;
-def : Pat<(v4f32 (vselect VK4WM:$mask,
- (extract_subvector (v16f32 VR512:$src), (iPTR 0)),
- VR128X:$src0)),
- (v4f32 (VMOVAPSZ128rrk VR128X:$src0, VK4WM:$mask,
- (EXTRACT_SUBREG (v16f32 VR512:$src), sub_xmm)))>;
-
-def : Pat<(v2i64 (vselect VK2WM:$mask,
- (extract_subvector (v8i64 VR512:$src), (iPTR 0)),
- (bitconvert (v4i32 immAllZerosV)))),
- (v2i64 (VMOVDQA64Z128rrkz VK2WM:$mask,
- (EXTRACT_SUBREG (v8i64 VR512:$src), sub_xmm)))>;
-def : Pat<(v4i32 (vselect VK4WM:$mask,
- (extract_subvector (v16i32 VR512:$src), (iPTR 0)),
- (bitconvert (v4i32 immAllZerosV)))),
- (v4i32 (VMOVDQA32Z128rrkz VK4WM:$mask,
- (EXTRACT_SUBREG (v16i32 VR512:$src), sub_xmm)))>;
-def : Pat<(v2f64 (vselect VK2WM:$mask,
- (extract_subvector (v8f64 VR512:$src), (iPTR 0)),
- (bitconvert (v4i32 immAllZerosV)))),
- (v2f64 (VMOVAPDZ128rrkz VK2WM:$mask,
- (EXTRACT_SUBREG (v8f64 VR512:$src), sub_xmm)))>;
-def : Pat<(v4f32 (vselect VK4WM:$mask,
- (extract_subvector (v16f32 VR512:$src), (iPTR 0)),
- (bitconvert (v4i32 immAllZerosV)))),
- (v4f32 (VMOVAPSZ128rrkz VK4WM:$mask,
- (EXTRACT_SUBREG (v16f32 VR512:$src), sub_xmm)))>;
+defm : masked_move_for_extract<"VMOVDQA64Z128", v8i64_info, v2i64x_info, v2i64x_info>;
+defm : masked_move_for_extract<"VMOVDQA64Z128", v16i32_info, v4i32x_info, v2i64x_info>;
+defm : masked_move_for_extract<"VMOVDQA64Z128", v32i16_info, v8i16x_info, v2i64x_info>;
+defm : masked_move_for_extract<"VMOVDQA64Z128", v64i8_info, v16i8x_info, v2i64x_info>;
+defm : masked_move_for_extract<"VMOVDQA32Z128", v8i64_info, v2i64x_info, v4i32x_info>;
+defm : masked_move_for_extract<"VMOVDQA32Z128", v16i32_info, v4i32x_info, v4i32x_info>;
+defm : masked_move_for_extract<"VMOVDQA32Z128", v32i16_info, v8i16x_info, v4i32x_info>;
+defm : masked_move_for_extract<"VMOVDQA32Z128", v64i8_info, v16i8x_info, v4i32x_info>;
+defm : masked_move_for_extract<"VMOVAPDZ128", v8f64_info, v2f64x_info, v2f64x_info>;
+defm : masked_move_for_extract<"VMOVAPDZ128", v16f32_info, v4f32x_info, v2f64x_info>;
+defm : masked_move_for_extract<"VMOVAPSZ128", v8f64_info, v2f64x_info, v4f32x_info>;
+defm : masked_move_for_extract<"VMOVAPSZ128", v16f32_info, v4f32x_info, v4f32x_info>;
// A masked extract from the first 256-bits of a 512-bit vector can be
// implemented with masked move.
-def : Pat<(v4i64 (vselect VK4WM:$mask,
- (extract_subvector (v8i64 VR512:$src), (iPTR 0)),
- VR256X:$src0)),
- (v4i64 (VMOVDQA64Z256rrk VR256X:$src0, VK4WM:$mask,
- (EXTRACT_SUBREG (v8i64 VR512:$src), sub_ymm)))>;
-def : Pat<(v8i32 (vselect VK8WM:$mask,
- (extract_subvector (v16i32 VR512:$src), (iPTR 0)),
- VR256X:$src0)),
- (v8i32 (VMOVDQA32Z256rrk VR256X:$src0, VK8WM:$mask,
- (EXTRACT_SUBREG (v16i32 VR512:$src), sub_ymm)))>;
-def : Pat<(v4f64 (vselect VK4WM:$mask,
- (extract_subvector (v8f64 VR512:$src), (iPTR 0)),
- VR256X:$src0)),
- (v4f64 (VMOVAPDZ256rrk VR256X:$src0, VK4WM:$mask,
- (EXTRACT_SUBREG (v8f64 VR512:$src), sub_ymm)))>;
-def : Pat<(v8f32 (vselect VK8WM:$mask,
- (extract_subvector (v16f32 VR512:$src), (iPTR 0)),
- VR256X:$src0)),
- (v8f32 (VMOVAPSZ256rrk VR256X:$src0, VK8WM:$mask,
- (EXTRACT_SUBREG (v16f32 VR512:$src), sub_ymm)))>;
-
-def : Pat<(v4i64 (vselect VK4WM:$mask,
- (extract_subvector (v8i64 VR512:$src), (iPTR 0)),
- (bitconvert (v8i32 immAllZerosV)))),
- (v4i64 (VMOVDQA64Z256rrkz VK4WM:$mask,
- (EXTRACT_SUBREG (v8i64 VR512:$src), sub_ymm)))>;
-def : Pat<(v8i32 (vselect VK8WM:$mask,
- (extract_subvector (v16i32 VR512:$src), (iPTR 0)),
- (bitconvert (v8i32 immAllZerosV)))),
- (v8i32 (VMOVDQA32Z256rrkz VK8WM:$mask,
- (EXTRACT_SUBREG (v16i32 VR512:$src), sub_ymm)))>;
-def : Pat<(v4f64 (vselect VK4WM:$mask,
- (extract_subvector (v8f64 VR512:$src), (iPTR 0)),
- (bitconvert (v8i32 immAllZerosV)))),
- (v4f64 (VMOVAPDZ256rrkz VK4WM:$mask,
- (EXTRACT_SUBREG (v8f64 VR512:$src), sub_ymm)))>;
-def : Pat<(v8f32 (vselect VK8WM:$mask,
- (extract_subvector (v16f32 VR512:$src), (iPTR 0)),
- (bitconvert (v8i32 immAllZerosV)))),
- (v8f32 (VMOVAPSZ256rrkz VK8WM:$mask,
- (EXTRACT_SUBREG (v16f32 VR512:$src), sub_ymm)))>;
+defm : masked_move_for_extract<"VMOVDQA64Z256", v8i64_info, v4i64x_info, v4i64x_info>;
+defm : masked_move_for_extract<"VMOVDQA64Z256", v16i32_info, v8i32x_info, v4i64x_info>;
+defm : masked_move_for_extract<"VMOVDQA64Z256", v32i16_info, v16i16x_info, v4i64x_info>;
+defm : masked_move_for_extract<"VMOVDQA64Z256", v64i8_info, v32i8x_info, v4i64x_info>;
+defm : masked_move_for_extract<"VMOVDQA32Z256", v8i64_info, v4i64x_info, v8i32x_info>;
+defm : masked_move_for_extract<"VMOVDQA32Z256", v16i32_info, v8i32x_info, v8i32x_info>;
+defm : masked_move_for_extract<"VMOVDQA32Z256", v32i16_info, v16i16x_info, v8i32x_info>;
+defm : masked_move_for_extract<"VMOVDQA32Z256", v64i8_info, v32i8x_info, v8i32x_info>;
+defm : masked_move_for_extract<"VMOVAPDZ256", v8f64_info, v4f64x_info, v4f64x_info>;
+defm : masked_move_for_extract<"VMOVAPDZ256", v16f32_info, v8f32x_info, v4f64x_info>;
+defm : masked_move_for_extract<"VMOVAPSZ256", v8f64_info, v4f64x_info, v8f32x_info>;
+defm : masked_move_for_extract<"VMOVAPSZ256", v16f32_info, v8f32x_info, v8f32x_info>;
}
// Move Int Doubleword to Packed Double Int
OpenPOWER on IntegriCloud