summaryrefslogtreecommitdiffstats
path: root/clang/lib
diff options
context:
space:
mode:
authorNemanja Ivanovic <nemanja.i.ibm@gmail.com>2015-06-26 19:27:20 +0000
committerNemanja Ivanovic <nemanja.i.ibm@gmail.com>2015-06-26 19:27:20 +0000
commit2f1f926e34ded4fcab55c112ee039991569f6089 (patch)
treeed022e04a2dd93640fa96254cb07fd37cbdfcbce /clang/lib
parentf502a428e6ea11fe222d369d2212e661f702ed73 (diff)
downloadbcm5719-llvm-2f1f926e34ded4fcab55c112ee039991569f6089.tar.gz
bcm5719-llvm-2f1f926e34ded4fcab55c112ee039991569f6089.zip
Add missing builtins to altivec.h for ABI compliance (vol. 1)
This patch corresponds to review: http://reviews.llvm.org/D10637 This is the first round of additions of missing builtins listed in the ABI document. More to come (this builds onto what seurer already addes). This patch adds: vector signed long long vec_abs(vector signed long long) vector double vec_abs(vector double) vector signed long long vec_add(vector signed long long, vector signed long long) vector unsigned long long vec_add(vector unsigned long long, vector unsigned long long) vector double vec_add(vector double, vector double) vector double vec_and(vector bool long long, vector double) vector double vec_and(vector double, vector bool long long) vector double vec_and(vector double, vector double) vector signed long long vec_and(vector signed long long, vector signed long long) vector double vec_andc(vector bool long long, vector double) vector double vec_andc(vector double, vector bool long long) vector double vec_andc(vector double, vector double) vector signed long long vec_andc(vector signed long long, vector signed long long) vector double vec_ceil(vector double) vector bool long long vec_cmpeq(vector double, vector double) vector bool long long vec_cmpge(vector double, vector double) vector bool long long vec_cmpge(vector signed long long, vector signed long long) vector bool long long vec_cmpge(vector unsigned long long, vector unsigned long long) vector bool long long vec_cmpgt(vector double, vector double) vector bool long long vec_cmple(vector double, vector double) vector bool long long vec_cmple(vector signed long long, vector signed long long) vector bool long long vec_cmple(vector unsigned long long, vector unsigned long long) vector bool long long vec_cmplt(vector double, vector double) vector bool long long vec_cmplt(vector signed long long, vector signed long long) vector bool long long vec_cmplt(vector unsigned long long, vector unsigned long long) llvm-svn: 240821
Diffstat (limited to 'clang/lib')
-rw-r--r--clang/lib/CodeGen/CGBuiltin.cpp7
-rw-r--r--clang/lib/Headers/altivec.h192
2 files changed, 186 insertions, 13 deletions
diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp
index 20a9532de13..5e58bcc2ce9 100644
--- a/clang/lib/CodeGen/CGBuiltin.cpp
+++ b/clang/lib/CodeGen/CGBuiltin.cpp
@@ -6560,6 +6560,13 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
llvm::Function *F = CGM.getIntrinsic(ID);
return Builder.CreateCall(F, Ops, "");
}
+ case PPC::BI__builtin_vsx_xvrspip:
+ case PPC::BI__builtin_vsx_xvrdpip:
+ llvm::Type *ResultType = ConvertType(E->getType());
+ Value *X = EmitScalarExpr(E->getArg(0));
+ ID = Intrinsic::ceil;
+ llvm::Function *F = CGM.getIntrinsic(ID, ResultType);
+ return Builder.CreateCall(F, X);
}
}
diff --git a/clang/lib/Headers/altivec.h b/clang/lib/Headers/altivec.h
index 28df8905720..be0a1dbc118 100644
--- a/clang/lib/Headers/altivec.h
+++ b/clang/lib/Headers/altivec.h
@@ -110,14 +110,28 @@ static vector signed int __ATTRS_o_ai vec_abs(vector signed int __a) {
return __builtin_altivec_vmaxsw(__a, -__a);
}
+#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
+static vector signed long long __ATTRS_o_ai
+vec_abs(vector signed long long __a) {
+ return __builtin_altivec_vmaxsd(__a, -__a);
+}
+#endif
+
static vector float __ATTRS_o_ai vec_abs(vector float __a) {
vector unsigned int __res =
(vector unsigned int)__a & (vector unsigned int)(0x7FFFFFFF);
return (vector float)__res;
}
-/* vec_abss */
+#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
+static vector double __ATTRS_o_ai vec_abs(vector double __a) {
+ vector unsigned long long __res = { 0x7FFFFFFFFFFFFFFF, 0x7FFFFFFFFFFFFFFF };
+ __res &= (vector unsigned int)__a;
+ return (vector double)__res;
+}
+#endif
+/* vec_abss */
#define __builtin_altivec_abss_v16qi vec_abss
#define __builtin_altivec_abss_v8hi vec_abss
#define __builtin_altivec_abss_v4si vec_abss
@@ -226,6 +240,16 @@ static vector unsigned int __ATTRS_o_ai vec_add(vector unsigned int __a,
}
#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
+static vector signed long long __ATTRS_o_ai
+vec_add(vector signed long long __a, vector signed long long __b) {
+ return __a + __b;
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_add(vector unsigned long long __a, vector unsigned long long __b) {
+ return __a + __b;
+}
+
static vector signed __int128 __ATTRS_o_ai vec_add(vector signed __int128 __a,
vector signed __int128 __b) {
return __a + __b;
@@ -241,6 +265,13 @@ static vector float __ATTRS_o_ai vec_add(vector float __a, vector float __b) {
return __a + __b;
}
+#ifdef __VSX__
+static vector double __ATTRS_o_ai
+vec_add(vector double __a, vector double __b) {
+ return __a + __b;
+}
+#endif // __VSX__
+
/* vec_vaddubm */
#define __builtin_altivec_vaddubm vec_vaddubm
@@ -746,6 +777,24 @@ static vector float __ATTRS_o_ai vec_and(vector float __a,
}
#ifdef __VSX__
+static vector double __ATTRS_o_ai vec_and(vector bool long long __a, vector double __b) {
+ vector unsigned long long __res =
+ (vector unsigned long long)__a & (vector unsigned long long)__b;
+ return (vector double)__res;
+}
+
+static vector double __ATTRS_o_ai vec_and(vector double __a, vector bool long long __b) {
+ vector unsigned long long __res =
+ (vector unsigned long long)__a & (vector unsigned long long)__b;
+ return (vector double)__res;
+}
+
+static vector double __ATTRS_o_ai vec_and(vector double __a, vector double __b) {
+ vector unsigned long long __res =
+ (vector unsigned long long)__a & (vector unsigned long long)__b;
+ return (vector double)__res;
+}
+
static vector signed long long __ATTRS_o_ai
vec_and(vector signed long long __a, vector signed long long __b) {
return __a & __b;
@@ -1068,6 +1117,26 @@ static vector float __ATTRS_o_ai vec_andc(vector float __a,
}
#ifdef __VSX__
+static vector double __ATTRS_o_ai
+vec_andc(vector bool long long __a, vector double __b) {
+ vector unsigned long long __res =
+ (vector unsigned long long)__a & ~(vector unsigned long long)__b;
+ return (vector double)__res;
+}
+
+static vector double __ATTRS_o_ai
+vec_andc(vector double __a, vector bool long long __b) {
+ vector unsigned long long __res =
+ (vector unsigned long long)__a & ~(vector unsigned long long)__b;
+ return (vector double)__res;
+}
+
+static vector double __ATTRS_o_ai vec_andc(vector double __a, vector double __b) {
+ vector unsigned long long __res =
+ (vector unsigned long long)__a & ~(vector unsigned long long)__b;
+ return (vector double)__res;
+}
+
static vector signed long long __ATTRS_o_ai
vec_andc(vector signed long long __a, vector signed long long __b) {
return __a & ~__b;
@@ -1338,11 +1407,20 @@ vec_vavguw(vector unsigned int __a, vector unsigned int __b) {
/* vec_ceil */
-static vector float __attribute__((__always_inline__))
-vec_ceil(vector float __a) {
+static vector float __ATTRS_o_ai vec_ceil(vector float __a) {
+#ifdef __VSX__
+ return __builtin_vsx_xvrspip(__a);
+#else
return __builtin_altivec_vrfip(__a);
+#endif
}
+#ifdef __VSX__
+static vector double __ATTRS_o_ai vec_ceil(vector double __a) {
+ return __builtin_vsx_xvrdpip(__a);
+}
+#endif
+
/* vec_vrfip */
static vector float __attribute__((__always_inline__))
@@ -1414,16 +1492,56 @@ vec_cmpeq(vector unsigned long long __a, vector unsigned long long __b) {
static vector bool int __ATTRS_o_ai vec_cmpeq(vector float __a,
vector float __b) {
+#ifdef __VSX__
+ return (vector bool int)__builtin_vsx_xvcmpeqsp(__a, __b);
+#else
return (vector bool int)__builtin_altivec_vcmpeqfp(__a, __b);
+#endif
+}
+
+#ifdef __VSX__
+static vector bool long long __ATTRS_o_ai
+vec_cmpeq(vector double __a, vector double __b) {
+ return (vector bool long long)__builtin_vsx_xvcmpeqdp(__a, __b);
}
+#endif
/* vec_cmpge */
-static vector bool int __attribute__((__always_inline__))
+static vector bool int __ATTRS_o_ai
vec_cmpge(vector float __a, vector float __b) {
+#ifdef __VSX__
+ return (vector bool int)__builtin_vsx_xvcmpgesp(__a, __b);
+#else
return (vector bool int)__builtin_altivec_vcmpgefp(__a, __b);
+#endif
+}
+
+#ifdef __VSX__
+static vector bool long long __ATTRS_o_ai
+vec_cmpge(vector double __a, vector double __b) {
+ return (vector bool long long)__builtin_vsx_xvcmpgedp(__a, __b);
+}
+#endif
+
+#ifdef __POWER8_VECTOR__
+/* Forwrad declarations as the functions are used here */
+static vector bool long long __ATTRS_o_ai
+vec_cmpgt(vector unsigned long long __a, vector unsigned long long __b);
+static vector bool long long __ATTRS_o_ai
+vec_cmpgt(vector signed long long __a, vector signed long long __b);
+
+static vector bool long long __ATTRS_o_ai
+vec_cmpge(vector signed long long __a, vector signed long long __b) {
+ return ~(vec_cmpgt(__b, __a));
}
+static vector bool long long __ATTRS_o_ai
+vec_cmpge(vector unsigned long long __a, vector unsigned long long __b) {
+ return ~(vec_cmpgt(__b, __a));
+}
+#endif
+
/* vec_vcmpgefp */
static vector bool int __attribute__((__always_inline__))
@@ -1476,9 +1594,19 @@ vec_cmpgt(vector unsigned long long __a, vector unsigned long long __b) {
static vector bool int __ATTRS_o_ai vec_cmpgt(vector float __a,
vector float __b) {
+#ifdef __VSX__
+ return (vector bool int)__builtin_vsx_xvcmpgtsp(__a, __b);
+#else
return (vector bool int)__builtin_altivec_vcmpgtfp(__a, __b);
+#endif
}
+#ifdef __VSX__
+static vector bool long long __ATTRS_o_ai
+vec_cmpgt(vector double __a, vector double __b) {
+ return (vector bool long long)__builtin_vsx_xvcmpgtdp(__a, __b);
+}
+#endif
/* vec_vcmpgtsb */
static vector bool char __attribute__((__always_inline__))
@@ -1530,47 +1658,85 @@ vec_vcmpgtfp(vector float __a, vector float __b) {
/* vec_cmple */
-static vector bool int __attribute__((__always_inline__))
+static vector bool int __ATTRS_o_ai
vec_cmple(vector float __a, vector float __b) {
- return (vector bool int)__builtin_altivec_vcmpgefp(__b, __a);
+ return vec_cmpge(__b, __a);
}
+#ifdef __VSX__
+static vector bool long long __ATTRS_o_ai
+vec_cmple(vector double __a, vector double __b) {
+ return vec_cmpge(__b, __a);
+}
+#endif
+
+#ifdef __POWER8_VECTOR__
+static vector bool long long __ATTRS_o_ai
+vec_cmple(vector signed long long __a, vector signed long long __b) {
+ return vec_cmpge(__b, __a);
+}
+
+static vector bool long long __ATTRS_o_ai
+vec_cmple(vector unsigned long long __a, vector unsigned long long __b) {
+ return vec_cmpge(__b, __a);
+}
+#endif
+
/* vec_cmplt */
static vector bool char __ATTRS_o_ai vec_cmplt(vector signed char __a,
vector signed char __b) {
- return (vector bool char)__builtin_altivec_vcmpgtsb(__b, __a);
+ return vec_cmpgt(__b, __a);
}
static vector bool char __ATTRS_o_ai vec_cmplt(vector unsigned char __a,
vector unsigned char __b) {
- return (vector bool char)__builtin_altivec_vcmpgtub(__b, __a);
+ return vec_cmpgt(__b, __a);
}
static vector bool short __ATTRS_o_ai vec_cmplt(vector short __a,
vector short __b) {
- return (vector bool short)__builtin_altivec_vcmpgtsh(__b, __a);
+ return vec_cmpgt(__b, __a);
}
static vector bool short __ATTRS_o_ai vec_cmplt(vector unsigned short __a,
vector unsigned short __b) {
- return (vector bool short)__builtin_altivec_vcmpgtuh(__b, __a);
+ return vec_cmpgt(__b, __a);
}
static vector bool int __ATTRS_o_ai vec_cmplt(vector int __a, vector int __b) {
- return (vector bool int)__builtin_altivec_vcmpgtsw(__b, __a);
+ return vec_cmpgt(__b, __a);
}
static vector bool int __ATTRS_o_ai vec_cmplt(vector unsigned int __a,
vector unsigned int __b) {
- return (vector bool int)__builtin_altivec_vcmpgtuw(__b, __a);
+ return vec_cmpgt(__b, __a);
}
static vector bool int __ATTRS_o_ai vec_cmplt(vector float __a,
vector float __b) {
- return (vector bool int)__builtin_altivec_vcmpgtfp(__b, __a);
+ return vec_cmpgt(__b, __a);
}
+#ifdef __VSX__
+static vector bool long long __ATTRS_o_ai
+vec_cmplt(vector double __a, vector double __b) {
+ return vec_cmpgt(__b, __a);
+}
+#endif
+
+#ifdef __POWER8_VECTOR__
+static vector bool long long __ATTRS_o_ai
+vec_cmplt(vector signed long long __a, vector signed long long __b) {
+ return vec_cmpgt(__b, __a);
+}
+
+static vector bool long long __ATTRS_o_ai
+vec_cmplt(vector unsigned long long __a, vector unsigned long long __b) {
+ return vec_cmpgt(__b, __a);
+}
+#endif
+
/* vec_ctf */
static vector float __ATTRS_o_ai vec_ctf(vector int __a, int __b) {
OpenPOWER on IntegriCloud