summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Target/X86
diff options
context:
space:
mode:
authorIgor Breger <igor.breger@intel.com>2016-05-01 08:40:00 +0000
committerIgor Breger <igor.breger@intel.com>2016-05-01 08:40:00 +0000
commit131008fbcb6ca3a3c3d46efef9af17e4ba253006 (patch)
tree84be11c20506826609b1ed4fedb63feed7b532eb /llvm/lib/Target/X86
parente430de8be62e2789d6fe7f3c8ef23f00462c9440 (diff)
downloadbcm5719-llvm-131008fbcb6ca3a3c3d46efef9af17e4ba253006.tar.gz
bcm5719-llvm-131008fbcb6ca3a3c3d46efef9af17e4ba253006.zip
Change AVX512 braodcastsd/ss patterns interaction with spilling . New implementation take a scalar register and generate a vector without COPY_TO_REGCLASS (turn it into a VR128 register ) .The issue is that during register allocation we may spill a scalar value using 128-bit loads and stores, wasting cache bandwidth.
Differential Revision: http://reviews.llvm.org/D19579 llvm-svn: 268190
Diffstat (limited to 'llvm/lib/Target/X86')
-rw-r--r--llvm/lib/Target/X86/X86InstrAVX512.td90
-rw-r--r--llvm/lib/Target/X86/X86InstrInfo.cpp86
-rw-r--r--llvm/lib/Target/X86/X86InstrSSE.td32
3 files changed, 98 insertions, 110 deletions
diff --git a/llvm/lib/Target/X86/X86InstrAVX512.td b/llvm/lib/Target/X86/X86InstrAVX512.td
index f23cbb7b43d..8212fedd5b7 100644
--- a/llvm/lib/Target/X86/X86InstrAVX512.td
+++ b/llvm/lib/Target/X86/X86InstrAVX512.td
@@ -821,6 +821,36 @@ def VEXTRACTPSzmr : AVX512AIi8<0x17, MRMDestMem, (outs),
//===---------------------------------------------------------------------===//
// AVX-512 BROADCAST
//---
+// broadcast with a scalar argument.
+multiclass avx512_broadcast_scalar<bits<8> opc, string OpcodeStr,
+ X86VectorVTInfo DestInfo, X86VectorVTInfo SrcInfo> {
+
+ let isCodeGenOnly = 1 in {
+ def r_s : I< opc, MRMSrcReg, (outs DestInfo.RC:$dst),
+ (ins SrcInfo.FRC:$src), OpcodeStr#"\t{$src, $dst|$dst, $src}",
+ [(set DestInfo.RC:$dst, (DestInfo.VT (X86VBroadcast SrcInfo.FRC:$src)))]>,
+ Requires<[HasAVX512]>, T8PD, EVEX;
+
+ let Constraints = "$src0 = $dst" in
+ def rk_s : I< opc, MRMSrcReg, (outs DestInfo.RC:$dst),
+ (ins DestInfo.RC:$src0, DestInfo.KRCWM:$mask, SrcInfo.FRC:$src),
+ OpcodeStr#"\t{$src, $dst {${mask}} |$dst {${mask}}, $src}",
+ [(set DestInfo.RC:$dst,
+ (vselect DestInfo.KRCWM:$mask,
+ (DestInfo.VT (X86VBroadcast SrcInfo.FRC:$src)),
+ DestInfo.RC:$src0))]>,
+ Requires<[HasAVX512]>, T8PD, EVEX, EVEX_K;
+
+ def rkz_s : I< opc, MRMSrcReg, (outs DestInfo.RC:$dst),
+ (ins DestInfo.KRCWM:$mask, SrcInfo.FRC:$src),
+ OpcodeStr#"\t{$src, $dst {${mask}} {z}|$dst {${mask}} {z}, $src}",
+ [(set DestInfo.RC:$dst,
+ (vselect DestInfo.KRCWM:$mask,
+ (DestInfo.VT (X86VBroadcast SrcInfo.FRC:$src)),
+ DestInfo.ImmAllZerosV))]>,
+ Requires<[HasAVX512]>, T8PD, EVEX, EVEX_KZ;
+ } // let isCodeGenOnly = 1 in
+}
multiclass avx512_broadcast_rm<bits<8> opc, string OpcodeStr,
X86VectorVTInfo DestInfo, X86VectorVTInfo SrcInfo> {
@@ -840,10 +870,12 @@ multiclass avx512_broadcast_rm<bits<8> opc, string OpcodeStr,
multiclass avx512_fp_broadcast_vl<bits<8> opc, string OpcodeStr,
AVX512VLVectorVTInfo _> {
defm Z : avx512_broadcast_rm<opc, OpcodeStr, _.info512, _.info128>,
+ avx512_broadcast_scalar<opc, OpcodeStr, _.info512, _.info128>,
EVEX_V512;
let Predicates = [HasVLX] in {
defm Z256 : avx512_broadcast_rm<opc, OpcodeStr, _.info256, _.info128>,
+ avx512_broadcast_scalar<opc, OpcodeStr, _.info256, _.info128>,
EVEX_V256;
}
}
@@ -852,8 +884,10 @@ let ExeDomain = SSEPackedSingle in {
defm VBROADCASTSS : avx512_fp_broadcast_vl<0x18, "vbroadcastss",
avx512vl_f32_info>;
let Predicates = [HasVLX] in {
- defm VBROADCASTSSZ128 : avx512_broadcast_rm<0x18, "vbroadcastss",
- v4f32x_info, v4f32x_info>, EVEX_V128;
+ defm VBROADCASTSSZ128 :
+ avx512_broadcast_rm<0x18, "vbroadcastss", v4f32x_info, v4f32x_info>,
+ avx512_broadcast_scalar<0x18, "vbroadcastss", v4f32x_info, v4f32x_info>,
+ EVEX_V128;
}
}
@@ -862,50 +896,6 @@ let ExeDomain = SSEPackedDouble in {
avx512vl_f64_info>, VEX_W;
}
-// avx512_broadcast_pat introduces patterns for broadcast with a scalar argument.
-// Later, we can canonize broadcast instructions before ISel phase and
-// eliminate additional patterns on ISel.
-// SrcRC_v and SrcRC_s are RegisterClasses for vector and scalar
-// representations of source
-multiclass avx512_broadcast_pat<string InstName, SDNode OpNode,
- X86VectorVTInfo _, RegisterClass SrcRC_v,
- RegisterClass SrcRC_s> {
- def : Pat<(_.VT (OpNode (_.EltVT SrcRC_s:$src))),
- (!cast<Instruction>(InstName##"r")
- (COPY_TO_REGCLASS SrcRC_s:$src, SrcRC_v))>;
-
- let AddedComplexity = 30 in {
- def : Pat<(_.VT (vselect _.KRCWM:$mask,
- (OpNode (_.EltVT SrcRC_s:$src)), _.RC:$src0)),
- (!cast<Instruction>(InstName##"rk") _.RC:$src0, _.KRCWM:$mask,
- (COPY_TO_REGCLASS SrcRC_s:$src, SrcRC_v))>;
-
- def : Pat<(_.VT(vselect _.KRCWM:$mask,
- (OpNode (_.EltVT SrcRC_s:$src)), _.ImmAllZerosV)),
- (!cast<Instruction>(InstName##"rkz") _.KRCWM:$mask,
- (COPY_TO_REGCLASS SrcRC_s:$src, SrcRC_v))>;
- }
-}
-
-defm : avx512_broadcast_pat<"VBROADCASTSSZ", X86VBroadcast, v16f32_info,
- VR128X, FR32X>;
-defm : avx512_broadcast_pat<"VBROADCASTSDZ", X86VBroadcast, v8f64_info,
- VR128X, FR64X>;
-
-let Predicates = [HasVLX] in {
- defm : avx512_broadcast_pat<"VBROADCASTSSZ256", X86VBroadcast,
- v8f32x_info, VR128X, FR32X>;
- defm : avx512_broadcast_pat<"VBROADCASTSSZ128", X86VBroadcast,
- v4f32x_info, VR128X, FR32X>;
- defm : avx512_broadcast_pat<"VBROADCASTSDZ256", X86VBroadcast,
- v4f64x_info, VR128X, FR64X>;
-}
-
-def : Pat<(v16f32 (X86VBroadcast (loadf32 addr:$src))),
- (VBROADCASTSSZm addr:$src)>;
-def : Pat<(v8f64 (X86VBroadcast (loadf64 addr:$src))),
- (VBROADCASTSDZm addr:$src)>;
-
def : Pat<(int_x86_avx512_vbroadcast_ss_512 addr:$src),
(VBROADCASTSSZm addr:$src)>;
def : Pat<(int_x86_avx512_vbroadcast_sd_512 addr:$src),
@@ -1091,14 +1081,6 @@ def : Pat<(v8f64 (X86VBroadcast (v8f64 VR512:$src))),
def : Pat<(v8f64 (X86VBroadcast (v4f64 VR256X:$src))),
(VBROADCASTSDZr (EXTRACT_SUBREG (v4f64 VR256X:$src), sub_xmm))>;
-// Provide fallback in case the load node that is used in the patterns above
-// is used by additional users, which prevents the pattern selection.
-def : Pat<(v16f32 (X86VBroadcast FR32X:$src)),
- (VBROADCASTSSZr (COPY_TO_REGCLASS FR32X:$src, VR128X))>;
-def : Pat<(v8f64 (X86VBroadcast FR64X:$src)),
- (VBROADCASTSDZr (COPY_TO_REGCLASS FR64X:$src, VR128X))>;
-
-
//===----------------------------------------------------------------------===//
// AVX-512 BROADCAST MASK TO VECTOR REGISTER
//---
diff --git a/llvm/lib/Target/X86/X86InstrInfo.cpp b/llvm/lib/Target/X86/X86InstrInfo.cpp
index 7554ab439e7..3757e1a315c 100644
--- a/llvm/lib/Target/X86/X86InstrInfo.cpp
+++ b/llvm/lib/Target/X86/X86InstrInfo.cpp
@@ -805,50 +805,54 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI)
{ X86::TZMSK64rr, X86::TZMSK64rm, 0 },
// AVX-512 foldable instructions
- { X86::VMOV64toPQIZrr, X86::VMOVQI2PQIZrm, 0 },
- { X86::VMOVDI2SSZrr, X86::VMOVDI2SSZrm, 0 },
- { X86::VMOVAPDZrr, X86::VMOVAPDZrm, TB_ALIGN_64 },
- { X86::VMOVAPSZrr, X86::VMOVAPSZrm, TB_ALIGN_64 },
- { X86::VMOVDQA32Zrr, X86::VMOVDQA32Zrm, TB_ALIGN_64 },
- { X86::VMOVDQA64Zrr, X86::VMOVDQA64Zrm, TB_ALIGN_64 },
- { X86::VMOVDQU8Zrr, X86::VMOVDQU8Zrm, 0 },
- { X86::VMOVDQU16Zrr, X86::VMOVDQU16Zrm, 0 },
- { X86::VMOVDQU32Zrr, X86::VMOVDQU32Zrm, 0 },
- { X86::VMOVDQU64Zrr, X86::VMOVDQU64Zrm, 0 },
- { X86::VMOVUPDZrr, X86::VMOVUPDZrm, 0 },
- { X86::VMOVUPSZrr, X86::VMOVUPSZrm, 0 },
- { X86::VPABSDZrr, X86::VPABSDZrm, 0 },
- { X86::VPABSQZrr, X86::VPABSQZrm, 0 },
- { X86::VBROADCASTSSZr, X86::VBROADCASTSSZm, TB_NO_REVERSE },
- { X86::VBROADCASTSDZr, X86::VBROADCASTSDZm, TB_NO_REVERSE },
+ { X86::VMOV64toPQIZrr, X86::VMOVQI2PQIZrm, 0 },
+ { X86::VMOVDI2SSZrr, X86::VMOVDI2SSZrm, 0 },
+ { X86::VMOVAPDZrr, X86::VMOVAPDZrm, TB_ALIGN_64 },
+ { X86::VMOVAPSZrr, X86::VMOVAPSZrm, TB_ALIGN_64 },
+ { X86::VMOVDQA32Zrr, X86::VMOVDQA32Zrm, TB_ALIGN_64 },
+ { X86::VMOVDQA64Zrr, X86::VMOVDQA64Zrm, TB_ALIGN_64 },
+ { X86::VMOVDQU8Zrr, X86::VMOVDQU8Zrm, 0 },
+ { X86::VMOVDQU16Zrr, X86::VMOVDQU16Zrm, 0 },
+ { X86::VMOVDQU32Zrr, X86::VMOVDQU32Zrm, 0 },
+ { X86::VMOVDQU64Zrr, X86::VMOVDQU64Zrm, 0 },
+ { X86::VMOVUPDZrr, X86::VMOVUPDZrm, 0 },
+ { X86::VMOVUPSZrr, X86::VMOVUPSZrm, 0 },
+ { X86::VPABSDZrr, X86::VPABSDZrm, 0 },
+ { X86::VPABSQZrr, X86::VPABSQZrm, 0 },
+ { X86::VBROADCASTSSZr, X86::VBROADCASTSSZm, TB_NO_REVERSE },
+ { X86::VBROADCASTSSZr_s, X86::VBROADCASTSSZm, TB_NO_REVERSE },
+ { X86::VBROADCASTSDZr, X86::VBROADCASTSDZm, TB_NO_REVERSE },
+ { X86::VBROADCASTSDZr_s, X86::VBROADCASTSDZm, TB_NO_REVERSE },
// AVX-512 foldable instructions (256-bit versions)
- { X86::VMOVAPDZ256rr, X86::VMOVAPDZ256rm, TB_ALIGN_32 },
- { X86::VMOVAPSZ256rr, X86::VMOVAPSZ256rm, TB_ALIGN_32 },
- { X86::VMOVDQA32Z256rr, X86::VMOVDQA32Z256rm, TB_ALIGN_32 },
- { X86::VMOVDQA64Z256rr, X86::VMOVDQA64Z256rm, TB_ALIGN_32 },
- { X86::VMOVDQU8Z256rr, X86::VMOVDQU8Z256rm, 0 },
- { X86::VMOVDQU16Z256rr, X86::VMOVDQU16Z256rm, 0 },
- { X86::VMOVDQU32Z256rr, X86::VMOVDQU32Z256rm, 0 },
- { X86::VMOVDQU64Z256rr, X86::VMOVDQU64Z256rm, 0 },
- { X86::VMOVUPDZ256rr, X86::VMOVUPDZ256rm, 0 },
- { X86::VMOVUPSZ256rr, X86::VMOVUPSZ256rm, 0 },
- { X86::VBROADCASTSSZ256r, X86::VBROADCASTSSZ256m, TB_NO_REVERSE },
- { X86::VBROADCASTSDZ256r, X86::VBROADCASTSDZ256m, TB_NO_REVERSE },
-
- // AVX-512 foldable instructions (256-bit versions)
- { X86::VMOVAPDZ128rr, X86::VMOVAPDZ128rm, TB_ALIGN_16 },
- { X86::VMOVAPSZ128rr, X86::VMOVAPSZ128rm, TB_ALIGN_16 },
- { X86::VMOVDQA32Z128rr, X86::VMOVDQA32Z128rm, TB_ALIGN_16 },
- { X86::VMOVDQA64Z128rr, X86::VMOVDQA64Z128rm, TB_ALIGN_16 },
- { X86::VMOVDQU8Z128rr, X86::VMOVDQU8Z128rm, 0 },
- { X86::VMOVDQU16Z128rr, X86::VMOVDQU16Z128rm, 0 },
- { X86::VMOVDQU32Z128rr, X86::VMOVDQU32Z128rm, 0 },
- { X86::VMOVDQU64Z128rr, X86::VMOVDQU64Z128rm, 0 },
- { X86::VMOVUPDZ128rr, X86::VMOVUPDZ128rm, 0 },
- { X86::VMOVUPSZ128rr, X86::VMOVUPSZ128rm, 0 },
- { X86::VBROADCASTSSZ128r, X86::VBROADCASTSSZ128m, TB_NO_REVERSE },
+ { X86::VMOVAPDZ256rr, X86::VMOVAPDZ256rm, TB_ALIGN_32 },
+ { X86::VMOVAPSZ256rr, X86::VMOVAPSZ256rm, TB_ALIGN_32 },
+ { X86::VMOVDQA32Z256rr, X86::VMOVDQA32Z256rm, TB_ALIGN_32 },
+ { X86::VMOVDQA64Z256rr, X86::VMOVDQA64Z256rm, TB_ALIGN_32 },
+ { X86::VMOVDQU8Z256rr, X86::VMOVDQU8Z256rm, 0 },
+ { X86::VMOVDQU16Z256rr, X86::VMOVDQU16Z256rm, 0 },
+ { X86::VMOVDQU32Z256rr, X86::VMOVDQU32Z256rm, 0 },
+ { X86::VMOVDQU64Z256rr, X86::VMOVDQU64Z256rm, 0 },
+ { X86::VMOVUPDZ256rr, X86::VMOVUPDZ256rm, 0 },
+ { X86::VMOVUPSZ256rr, X86::VMOVUPSZ256rm, 0 },
+ { X86::VBROADCASTSSZ256r, X86::VBROADCASTSSZ256m, TB_NO_REVERSE },
+ { X86::VBROADCASTSSZ256r_s, X86::VBROADCASTSSZ256m, TB_NO_REVERSE },
+ { X86::VBROADCASTSDZ256r, X86::VBROADCASTSDZ256m, TB_NO_REVERSE },
+ { X86::VBROADCASTSDZ256r_s, X86::VBROADCASTSDZ256m, TB_NO_REVERSE },
+ // AVX-512 foldable instructions (128-bit versions)
+ { X86::VMOVAPDZ128rr, X86::VMOVAPDZ128rm, TB_ALIGN_16 },
+ { X86::VMOVAPSZ128rr, X86::VMOVAPSZ128rm, TB_ALIGN_16 },
+ { X86::VMOVDQA32Z128rr, X86::VMOVDQA32Z128rm, TB_ALIGN_16 },
+ { X86::VMOVDQA64Z128rr, X86::VMOVDQA64Z128rm, TB_ALIGN_16 },
+ { X86::VMOVDQU8Z128rr, X86::VMOVDQU8Z128rm, 0 },
+ { X86::VMOVDQU16Z128rr, X86::VMOVDQU16Z128rm, 0 },
+ { X86::VMOVDQU32Z128rr, X86::VMOVDQU32Z128rm, 0 },
+ { X86::VMOVDQU64Z128rr, X86::VMOVDQU64Z128rm, 0 },
+ { X86::VMOVUPDZ128rr, X86::VMOVUPDZ128rm, 0 },
+ { X86::VMOVUPSZ128rr, X86::VMOVUPSZ128rm, 0 },
+ { X86::VBROADCASTSSZ128r, X86::VBROADCASTSSZ128m, TB_NO_REVERSE },
+ { X86::VBROADCASTSSZ128r_s, X86::VBROADCASTSSZ128m, TB_NO_REVERSE },
// F16C foldable instructions
{ X86::VCVTPH2PSrr, X86::VCVTPH2PSrm, 0 },
{ X86::VCVTPH2PSYrr, X86::VCVTPH2PSYrm, 0 },
diff --git a/llvm/lib/Target/X86/X86InstrSSE.td b/llvm/lib/Target/X86/X86InstrSSE.td
index da559d32ebe..23c66c99f5e 100644
--- a/llvm/lib/Target/X86/X86InstrSSE.td
+++ b/llvm/lib/Target/X86/X86InstrSSE.td
@@ -8396,17 +8396,19 @@ let Predicates = [HasAVX2] in {
def : Pat<(v4f64 (X86VBroadcast (v4f64 VR256:$src))),
(VBROADCASTSDYrr (v2f64 (EXTRACT_SUBREG (v4f64 VR256:$src),
sub_xmm)))>;
+}
-// Provide fallback in case the load node that is used in the patterns above
-// is used by additional users, which prevents the pattern selection.
- let AddedComplexity = 20 in {
- def : Pat<(v4f32 (X86VBroadcast FR32:$src)),
- (VBROADCASTSSrr (COPY_TO_REGCLASS FR32:$src, VR128))>;
- def : Pat<(v8f32 (X86VBroadcast FR32:$src)),
- (VBROADCASTSSYrr (COPY_TO_REGCLASS FR32:$src, VR128))>;
- def : Pat<(v4f64 (X86VBroadcast FR64:$src)),
- (VBROADCASTSDYrr (COPY_TO_REGCLASS FR64:$src, VR128))>;
- }
+let Predicates = [HasAVX2, NoVLX] in {
+ // Provide fallback in case the load node that is used in the patterns above
+ // is used by additional users, which prevents the pattern selection.
+ let AddedComplexity = 20 in {
+ def : Pat<(v4f32 (X86VBroadcast FR32:$src)),
+ (VBROADCASTSSrr (COPY_TO_REGCLASS FR32:$src, VR128))>;
+ def : Pat<(v8f32 (X86VBroadcast FR32:$src)),
+ (VBROADCASTSSYrr (COPY_TO_REGCLASS FR32:$src, VR128))>;
+ def : Pat<(v4f64 (X86VBroadcast FR64:$src)),
+ (VBROADCASTSDYrr (COPY_TO_REGCLASS FR64:$src, VR128))>;
+ }
}
let Predicates = [HasAVX2, NoVLX_Or_NoBWI], AddedComplexity = 20 in {
@@ -8458,6 +8460,11 @@ def : Pat<(v4i32 (X86VBroadcast (loadi32 addr:$src))),
// is used by additional users, which prevents the pattern selection.
let Predicates = [HasAVX], AddedComplexity = 20 in {
// 128bit broadcasts:
+ def : Pat<(v2f64 (X86VBroadcast f64:$src)),
+ (VMOVDDUPrr (COPY_TO_REGCLASS FR64:$src, VR128))>;
+}
+
+let Predicates = [HasAVX, NoVLX], AddedComplexity = 20 in {
def : Pat<(v4f32 (X86VBroadcast FR32:$src)),
(VPSHUFDri (COPY_TO_REGCLASS FR32:$src, VR128), 0)>;
def : Pat<(v8f32 (X86VBroadcast FR32:$src)),
@@ -8469,11 +8476,6 @@ let Predicates = [HasAVX], AddedComplexity = 20 in {
(VPSHUFDri (COPY_TO_REGCLASS FR64:$src, VR128), 0x44), sub_xmm),
(VPSHUFDri (COPY_TO_REGCLASS FR64:$src, VR128), 0x44), 1)>;
- def : Pat<(v2f64 (X86VBroadcast f64:$src)),
- (VMOVDDUPrr (COPY_TO_REGCLASS FR64:$src, VR128))>;
-}
-
-let Predicates = [HasAVX, NoVLX], AddedComplexity = 20 in {
def : Pat<(v4i32 (X86VBroadcast GR32:$src)),
(VPSHUFDri (COPY_TO_REGCLASS GR32:$src, VR128), 0)>;
def : Pat<(v8i32 (X86VBroadcast GR32:$src)),
OpenPOWER on IntegriCloud