diff options
| author | Tony Jiang <jtony@ca.ibm.com> | 2016-11-15 14:25:56 +0000 |
|---|---|---|
| committer | Tony Jiang <jtony@ca.ibm.com> | 2016-11-15 14:25:56 +0000 |
| commit | 5f850cd1b10f4dffc86285c2f71ab34cba9e929d (patch) | |
| tree | 8bf2ea3b1da693627ba11aa9c7b97ab612fe3e59 /llvm/lib/Target/PowerPC | |
| parent | 3776e7620153d3530b0ceaca628382ecb80c9a20 (diff) | |
| download | bcm5719-llvm-5f850cd1b10f4dffc86285c2f71ab34cba9e929d.tar.gz bcm5719-llvm-5f850cd1b10f4dffc86285c2f71ab34cba9e929d.zip | |
[PowerPC] Implement BE VSX load/store builtins - llvm portion.
This patch implements all the overloads for vec_xl_be and vec_xst_be. On BE,
they behaves exactly the same with vec_xl and vec_xst, therefore they are
simply implemented by defining a matching macro. On LE, they are implemented
by defining new builtins and intrinsics. For int/float/long long/double, it
is just a load (lxvw4x/lxvd2x) or store(stxvw4x/stxvd2x). For char/char/short,
we also need some extra shuffling before or after call the builtins to get the
desired BE order. For int128, simply call vec_xl or vec_xst.
llvm-svn: 286967
Diffstat (limited to 'llvm/lib/Target/PowerPC')
| -rw-r--r-- | llvm/lib/Target/PowerPC/PPCISelLowering.cpp | 8 | ||||
| -rw-r--r-- | llvm/lib/Target/PowerPC/PPCInstrVSX.td | 7 |
2 files changed, 15 insertions, 0 deletions
diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp index 22f71109690..7468b8baba1 100644 --- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp +++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp @@ -9784,9 +9784,11 @@ static bool isConsecutiveLS(SDNode *N, LSBaseSDNode *Base, case Intrinsic::ppc_altivec_lvx: case Intrinsic::ppc_altivec_lvxl: case Intrinsic::ppc_vsx_lxvw4x: + case Intrinsic::ppc_vsx_lxvw4x_be: VT = MVT::v4i32; break; case Intrinsic::ppc_vsx_lxvd2x: + case Intrinsic::ppc_vsx_lxvd2x_be: VT = MVT::v2f64; break; case Intrinsic::ppc_altivec_lvebx: @@ -9833,6 +9835,12 @@ static bool isConsecutiveLS(SDNode *N, LSBaseSDNode *Base, case Intrinsic::ppc_vsx_stxvd2x: VT = MVT::v2f64; break; + case Intrinsic::ppc_vsx_stxvw4x_be: + VT = MVT::v4i32; + break; + case Intrinsic::ppc_vsx_stxvd2x_be: + VT = MVT::v2f64; + break; case Intrinsic::ppc_altivec_stvebx: VT = MVT::i8; break; diff --git a/llvm/lib/Target/PowerPC/PPCInstrVSX.td b/llvm/lib/Target/PowerPC/PPCInstrVSX.td index 98ac3b77469..deaecbd9c5b 100644 --- a/llvm/lib/Target/PowerPC/PPCInstrVSX.td +++ b/llvm/lib/Target/PowerPC/PPCInstrVSX.td @@ -1020,6 +1020,10 @@ let Predicates = [HasVSX, HasOnlySwappingMemOps] in { (STXVD2X $rS, xoaddr:$dst)>; def : Pat<(int_ppc_vsx_stxvw4x v4i32:$rS, xoaddr:$dst), (STXVW4X $rS, xoaddr:$dst)>; + def : Pat<(int_ppc_vsx_stxvd2x_be v2f64:$rS, xoaddr:$dst), + (STXVD2X $rS, xoaddr:$dst)>; + def : Pat<(int_ppc_vsx_stxvw4x_be v4i32:$rS, xoaddr:$dst), + (STXVW4X $rS, xoaddr:$dst)>; def : Pat<(PPCstxvd2x v2f64:$rS, xoaddr:$dst), (STXVD2X $rS, xoaddr:$dst)>; } let Predicates = [IsBigEndian, HasVSX, HasOnlySwappingMemOps] in { @@ -1848,6 +1852,9 @@ let Predicates = [IsLittleEndian, HasVSX] in def : Pat<(f64 (vector_extract v2f64:$S, i64:$Idx)), (f64 VectorExtractions.LE_VARIABLE_DOUBLE)>; + def : Pat<(v4i32 (int_ppc_vsx_lxvw4x_be xoaddr:$src)), (LXVW4X xoaddr:$src)>; + def : Pat<(v2f64 (int_ppc_vsx_lxvd2x_be xoaddr:$src)), (LXVD2X xoaddr:$src)>; + let Predicates = [IsLittleEndian, HasDirectMove] in { // v16i8 scalar <-> vector conversions (LE) def : Pat<(v16i8 (scalar_to_vector i32:$A)), |

