diff options
author | Zaara Syeda <syzaara@ca.ibm.com> | 2016-11-15 18:04:13 +0000 |
---|---|---|
committer | Zaara Syeda <syzaara@ca.ibm.com> | 2016-11-15 18:04:13 +0000 |
commit | c1d2952388c46d5ae54fb8cd2152aa8cdd471d3b (patch) | |
tree | c7f6f3d75f3dc9939c8c792f493e4b7f0e13a33b /clang/lib | |
parent | a19c9e60e9aa50286ba340aa13bba8f541b25703 (diff) | |
download | bcm5719-llvm-c1d2952388c46d5ae54fb8cd2152aa8cdd471d3b.tar.gz bcm5719-llvm-c1d2952388c46d5ae54fb8cd2152aa8cdd471d3b.zip |
vector load store with length (left justified) clang portion
llvm-svn: 286994
Diffstat (limited to 'clang/lib')
-rw-r--r-- | clang/lib/CodeGen/CGBuiltin.cpp | 39 | ||||
-rw-r--r-- | clang/lib/Headers/altivec.h | 159 |
2 files changed, 191 insertions, 7 deletions
diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp index 77a3c3a9a8d..fd8f17e0efe 100644 --- a/clang/lib/CodeGen/CGBuiltin.cpp +++ b/clang/lib/CodeGen/CGBuiltin.cpp @@ -7923,11 +7923,17 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID, case PPC::BI__builtin_vsx_lxvw4x: case PPC::BI__builtin_vsx_lxvd2x_be: case PPC::BI__builtin_vsx_lxvw4x_be: + case PPC::BI__builtin_vsx_lxvl: + case PPC::BI__builtin_vsx_lxvll: { - Ops[1] = Builder.CreateBitCast(Ops[1], Int8PtrTy); - - Ops[0] = Builder.CreateGEP(Ops[1], Ops[0]); - Ops.pop_back(); + if(BuiltinID == PPC::BI__builtin_vsx_lxvl || + BuiltinID == PPC::BI__builtin_vsx_lxvll){ + Ops[0] = Builder.CreateBitCast(Ops[0], Int8PtrTy); + }else { + Ops[1] = Builder.CreateBitCast(Ops[1], Int8PtrTy); + Ops[0] = Builder.CreateGEP(Ops[1], Ops[0]); + Ops.pop_back(); + } switch (BuiltinID) { default: llvm_unreachable("Unsupported ld/lvsl/lvsr intrinsic!"); @@ -7964,6 +7970,12 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID, case PPC::BI__builtin_vsx_lxvw4x_be: ID = Intrinsic::ppc_vsx_lxvw4x_be; break; + case PPC::BI__builtin_vsx_lxvl: + ID = Intrinsic::ppc_vsx_lxvl; + break; + case PPC::BI__builtin_vsx_lxvll: + ID = Intrinsic::ppc_vsx_lxvll; + break; } llvm::Function *F = CGM.getIntrinsic(ID); return Builder.CreateCall(F, Ops, ""); @@ -7979,10 +7991,17 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID, case PPC::BI__builtin_vsx_stxvw4x: case PPC::BI__builtin_vsx_stxvd2x_be: case PPC::BI__builtin_vsx_stxvw4x_be: + case PPC::BI__builtin_vsx_stxvl: + case PPC::BI__builtin_vsx_stxvll: { - Ops[2] = Builder.CreateBitCast(Ops[2], Int8PtrTy); - Ops[1] = Builder.CreateGEP(Ops[2], Ops[1]); - Ops.pop_back(); + if(BuiltinID == PPC::BI__builtin_vsx_stxvl || + BuiltinID == PPC::BI__builtin_vsx_stxvll ){ + Ops[1] = Builder.CreateBitCast(Ops[1], Int8PtrTy); + }else { + Ops[2] = Builder.CreateBitCast(Ops[2], Int8PtrTy); + Ops[1] = Builder.CreateGEP(Ops[2], Ops[1]); + Ops.pop_back(); + } switch (BuiltinID) { default: llvm_unreachable("Unsupported st intrinsic!"); @@ -8013,6 +8032,12 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID, case PPC::BI__builtin_vsx_stxvw4x_be: ID = Intrinsic::ppc_vsx_stxvw4x_be; break; + case PPC::BI__builtin_vsx_stxvl: + ID = Intrinsic::ppc_vsx_stxvl; + break; + case PPC::BI__builtin_vsx_stxvll: + ID = Intrinsic::ppc_vsx_stxvll; + break; } llvm::Function *F = CGM.getIntrinsic(ID); return Builder.CreateCall(F, Ops, ""); diff --git a/clang/lib/Headers/altivec.h b/clang/lib/Headers/altivec.h index 5d10836517b..f1df6a13a6a 100644 --- a/clang/lib/Headers/altivec.h +++ b/clang/lib/Headers/altivec.h @@ -17,6 +17,7 @@ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. + * \*===----------------------------------------------------------------------===*/ #ifndef __ALTIVEC_H @@ -54,6 +55,10 @@ #define __ATTRS_o_ai __attribute__((__overloadable__, __always_inline__)) +#ifdef __POWER9_VECTOR__ +#include <stddef.h> +#endif + static __inline__ vector signed char __ATTRS_o_ai vec_perm( vector signed char __a, vector signed char __b, vector unsigned char __c); @@ -2650,6 +2655,160 @@ vec_insert_exp(vector unsigned int __a, vector unsigned int __b) { return __builtin_vsx_xviexpsp(__a,__b); } +#if defined(__powerpc64__) +static __inline__ vector signed char __ATTRS_o_ai vec_xl_len(signed char *__a, + size_t __b) { + return (vector signed char)__builtin_vsx_lxvl(__a, (__b << 56)); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_xl_len(unsigned char *__a, size_t __b) { + return (vector unsigned char)__builtin_vsx_lxvl(__a, (__b << 56)); +} + +static __inline__ vector signed short __ATTRS_o_ai vec_xl_len(signed short *__a, + size_t __b) { + return (vector signed short)__builtin_vsx_lxvl(__a, (__b << 56)); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_xl_len(unsigned short *__a, size_t __b) { + return (vector unsigned short)__builtin_vsx_lxvl(__a, (__b << 56)); +} + +static __inline__ vector signed int __ATTRS_o_ai vec_xl_len(signed int *__a, + size_t __b) { + return (vector signed int)__builtin_vsx_lxvl(__a, (__b << 56)); +} + +static __inline__ vector unsigned int __ATTRS_o_ai vec_xl_len(unsigned int *__a, + size_t __b) { + return (vector unsigned int)__builtin_vsx_lxvl(__a, (__b << 56)); +} + +static __inline__ vector float __ATTRS_o_ai vec_xl_len(float *__a, size_t __b) { + return (vector float)__builtin_vsx_lxvl(__a, (__b << 56)); +} + +static __inline__ vector signed __int128 __ATTRS_o_ai +vec_xl_len(signed __int128 *__a, size_t __b) { + return (vector signed __int128)__builtin_vsx_lxvl(__a, (__b << 56)); +} + +static __inline__ vector unsigned __int128 __ATTRS_o_ai +vec_xl_len(unsigned __int128 *__a, size_t __b) { + return (vector unsigned __int128)__builtin_vsx_lxvl(__a, (__b << 56)); +} + +static __inline__ vector signed long long __ATTRS_o_ai +vec_xl_len(signed long long *__a, size_t __b) { + return (vector signed long long)__builtin_vsx_lxvl(__a, (__b << 56)); +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_xl_len(unsigned long long *__a, size_t __b) { + return (vector unsigned long long)__builtin_vsx_lxvl(__a, (__b << 56)); +} + +static __inline__ vector double __ATTRS_o_ai vec_xl_len(double *__a, + size_t __b) { + return (vector double)__builtin_vsx_lxvl(__a, (__b << 56)); +} + +static __inline__ vector double __ATTRS_o_ai vec_xl_len_r(unsigned char *__a, + size_t __b) { + vector unsigned char __res = + (vector unsigned char)__builtin_vsx_lxvll(__a, (__b << 56)); +#ifdef __LITTLE_ENDIAN__ + vector unsigned char __mask = + (vector unsigned char)__builtin_altivec_lvsr(16 - __b, (int *)NULL); + __res = (vector unsigned char)__builtin_altivec_vperm_4si( + (vector int)__res, (vector int)__res, __mask); +#endif + return __res; +} + +// vec_xst_len +static __inline__ void __ATTRS_o_ai vec_xst_len(vector unsigned char __a, + unsigned char *__b, + size_t __c) { + return __builtin_vsx_stxvl((vector int)__a, __b, (__c << 56)); +} + +static __inline__ void __ATTRS_o_ai vec_xst_len(vector signed char __a, + signed char *__b, size_t __c) { + return __builtin_vsx_stxvl((vector int)__a, __b, (__c << 56)); +} + +static __inline__ void __ATTRS_o_ai vec_xst_len(vector signed short __a, + signed short *__b, size_t __c) { + return __builtin_vsx_stxvl((vector int)__a, __b, (__c << 56)); +} + +static __inline__ void __ATTRS_o_ai vec_xst_len(vector unsigned short __a, + unsigned short *__b, + size_t __c) { + return __builtin_vsx_stxvl((vector int)__a, __b, (__c << 56)); +} + +static __inline__ void __ATTRS_o_ai vec_xst_len(vector signed int __a, + signed int *__b, size_t __c) { + return __builtin_vsx_stxvl((vector int)__a, __b, (__c << 56)); +} + +static __inline__ void __ATTRS_o_ai vec_xst_len(vector unsigned int __a, + unsigned int *__b, size_t __c) { + return __builtin_vsx_stxvl((vector int)__a, __b, (__c << 56)); +} + +static __inline__ void __ATTRS_o_ai vec_xst_len(vector float __a, float *__b, + size_t __c) { + return __builtin_vsx_stxvl((vector int)__a, __b, (__c << 56)); +} + +static __inline__ void __ATTRS_o_ai vec_xst_len(vector signed __int128 __a, + signed __int128 *__b, + size_t __c) { + return __builtin_vsx_stxvl((vector int)__a, __b, (__c << 56)); +} + +static __inline__ void __ATTRS_o_ai vec_xst_len(vector unsigned __int128 __a, + unsigned __int128 *__b, + size_t __c) { + return __builtin_vsx_stxvl((vector int)__a, __b, (__c << 56)); +} + +static __inline__ void __ATTRS_o_ai vec_xst_len(vector signed long long __a, + signed long long *__b, + size_t __c) { + return __builtin_vsx_stxvl((vector int)__a, __b, (__c << 56)); +} + +static __inline__ void __ATTRS_o_ai vec_xst_len(vector unsigned long long __a, + unsigned long long *__b, + size_t __c) { + return __builtin_vsx_stxvl((vector int)__a, __b, (__c << 56)); +} + +static __inline__ void __ATTRS_o_ai vec_xst_len(vector double __a, double *__b, + size_t __c) { + return __builtin_vsx_stxvl((vector int)__a, __b, (__c << 56)); +} + +static __inline__ void __ATTRS_o_ai vec_xst_len_r(vector unsigned char __a, + unsigned char *__b, + size_t __c) { +#ifdef __LITTLE_ENDIAN__ + vector unsigned char __mask = + (vector unsigned char)__builtin_altivec_lvsl(16 - __c, (int *)NULL); + vector unsigned char __res = + __builtin_altivec_vperm_4si((vector int)__a, (vector int)__a, __mask); + return __builtin_vsx_stxvll((vector int)__res, __b, (__c << 56)); +#else + return __builtin_vsx_stxvll((vector int)__a, __b, (__c << 56)); +#endif +} +#endif #endif /* vec_cpsgn */ |