summaryrefslogtreecommitdiffstats
path: root/clang
diff options
context:
space:
mode:
Diffstat (limited to 'clang')
-rw-r--r--clang/include/clang/Basic/BuiltinsPPC.def9
-rw-r--r--clang/lib/Headers/altivec.h734
-rw-r--r--clang/test/CodeGen/builtins-ppc-altivec.c72
-rw-r--r--clang/test/CodeGen/builtins-ppc-crypto.c102
-rw-r--r--clang/test/CodeGen/builtins-ppc-p8vector.c479
5 files changed, 993 insertions, 403 deletions
diff --git a/clang/include/clang/Basic/BuiltinsPPC.def b/clang/include/clang/Basic/BuiltinsPPC.def
index 3e2861d43e8..5681c1f2ae1 100644
--- a/clang/include/clang/Basic/BuiltinsPPC.def
+++ b/clang/include/clang/Basic/BuiltinsPPC.def
@@ -280,12 +280,21 @@ BUILTIN(__builtin_vsx_xvrspip, "V4fV4f", "")
BUILTIN(__builtin_vsx_xvcmpeqdp, "V2ULLiV2dV2d", "")
BUILTIN(__builtin_vsx_xvcmpeqsp, "V4UiV4fV4f", "")
+BUILTIN(__builtin_vsx_xvcmpeqdp_p, "iiV2dV2d", "")
+BUILTIN(__builtin_vsx_xvcmpeqsp_p, "iiV4fV4f", "")
+
BUILTIN(__builtin_vsx_xvcmpgedp, "V2ULLiV2dV2d", "")
BUILTIN(__builtin_vsx_xvcmpgesp, "V4UiV4fV4f", "")
+BUILTIN(__builtin_vsx_xvcmpgedp_p, "iiV2dV2d", "")
+BUILTIN(__builtin_vsx_xvcmpgesp_p, "iiV4fV4f", "")
+
BUILTIN(__builtin_vsx_xvcmpgtdp, "V2ULLiV2dV2d", "")
BUILTIN(__builtin_vsx_xvcmpgtsp, "V4UiV4fV4f", "")
+BUILTIN(__builtin_vsx_xvcmpgtdp_p, "iiV2dV2d", "")
+BUILTIN(__builtin_vsx_xvcmpgtsp_p, "iiV4fV4f", "")
+
BUILTIN(__builtin_vsx_xvrdpim, "V2dV2d", "")
BUILTIN(__builtin_vsx_xvrspim, "V4fV4f", "")
diff --git a/clang/lib/Headers/altivec.h b/clang/lib/Headers/altivec.h
index 8eb11dc3afb..453aa312ddf 100644
--- a/clang/lib/Headers/altivec.h
+++ b/clang/lib/Headers/altivec.h
@@ -278,6 +278,38 @@ vec_add(vector double __a, vector double __b) {
}
#endif // __VSX__
+/* vec_adde */
+
+#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
+static vector signed __int128 __ATTRS_o_ai
+vec_adde(vector signed __int128 __a, vector signed __int128 __b,
+ vector signed __int128 __c) {
+ return __builtin_altivec_vaddeuqm(__a, __b, __c);
+}
+
+static vector unsigned __int128 __ATTRS_o_ai
+vec_adde(vector unsigned __int128 __a, vector unsigned __int128 __b,
+ vector unsigned __int128 __c) {
+ return __builtin_altivec_vaddeuqm(__a, __b, __c);
+}
+#endif
+
+/* vec_addec */
+
+#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
+static vector signed __int128 __ATTRS_o_ai
+vec_addec(vector signed __int128 __a, vector signed __int128 __b,
+ vector signed __int128 __c) {
+ return __builtin_altivec_vaddecuq(__a, __b, __c);
+}
+
+static vector unsigned __int128 __ATTRS_o_ai
+vec_addec(vector unsigned __int128 __a, vector unsigned __int128 __b,
+ vector unsigned __int128 __c) {
+ return __builtin_altivec_vaddecuq(__a, __b, __c);
+}
+#endif
+
/* vec_vaddubm */
#define __builtin_altivec_vaddubm vec_vaddubm
@@ -390,6 +422,12 @@ vec_vaddfp(vector float __a, vector float __b) {
/* vec_addc */
+static vector signed int __ATTRS_o_ai vec_addc(vector signed int __a,
+ vector signed int __b) {
+ return (vector signed int)__builtin_altivec_vaddcuw((vector unsigned int)__a,
+ (vector unsigned int)__b);
+}
+
static vector unsigned int __ATTRS_o_ai vec_addc(vector unsigned int __a,
vector unsigned int __b) {
return __builtin_altivec_vaddcuw(__a, __b);
@@ -398,7 +436,9 @@ static vector unsigned int __ATTRS_o_ai vec_addc(vector unsigned int __a,
#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
static vector signed __int128 __ATTRS_o_ai
vec_addc(vector signed __int128 __a, vector signed __int128 __b) {
- return __builtin_altivec_vaddcuq(__a, __b);
+ return (vector signed __int128)__builtin_altivec_vaddcuq(
+ (vector unsigned __int128)__a,
+ (vector unsigned __int128)__b);
}
static vector unsigned __int128 __ATTRS_o_ai
@@ -1512,48 +1552,6 @@ vec_cmpeq(vector double __a, vector double __b) {
}
#endif
-/* vec_cmpge */
-
-static vector bool int __ATTRS_o_ai
-vec_cmpge(vector float __a, vector float __b) {
-#ifdef __VSX__
- return (vector bool int)__builtin_vsx_xvcmpgesp(__a, __b);
-#else
- return (vector bool int)__builtin_altivec_vcmpgefp(__a, __b);
-#endif
-}
-
-#ifdef __VSX__
-static vector bool long long __ATTRS_o_ai
-vec_cmpge(vector double __a, vector double __b) {
- return (vector bool long long)__builtin_vsx_xvcmpgedp(__a, __b);
-}
-#endif
-
-#ifdef __POWER8_VECTOR__
-/* Forwrad declarations as the functions are used here */
-static vector bool long long __ATTRS_o_ai
-vec_cmpgt(vector unsigned long long __a, vector unsigned long long __b);
-static vector bool long long __ATTRS_o_ai
-vec_cmpgt(vector signed long long __a, vector signed long long __b);
-
-static vector bool long long __ATTRS_o_ai
-vec_cmpge(vector signed long long __a, vector signed long long __b) {
- return ~(vec_cmpgt(__b, __a));
-}
-
-static vector bool long long __ATTRS_o_ai
-vec_cmpge(vector unsigned long long __a, vector unsigned long long __b) {
- return ~(vec_cmpgt(__b, __a));
-}
-#endif
-
-/* vec_vcmpgefp */
-
-static vector bool int __attribute__((__always_inline__))
-vec_vcmpgefp(vector float __a, vector float __b) {
- return (vector bool int)__builtin_altivec_vcmpgefp(__a, __b);
-}
/* vec_cmpgt */
@@ -1613,6 +1611,74 @@ vec_cmpgt(vector double __a, vector double __b) {
return (vector bool long long)__builtin_vsx_xvcmpgtdp(__a, __b);
}
#endif
+
+/* vec_cmpge */
+
+static vector bool char __ATTRS_o_ai
+vec_cmpge (vector signed char __a, vector signed char __b) {
+ return ~(vec_cmpgt(__b, __a));
+}
+
+static vector bool char __ATTRS_o_ai
+vec_cmpge (vector unsigned char __a, vector unsigned char __b) {
+ return ~(vec_cmpgt(__b, __a));
+}
+
+static vector bool short __ATTRS_o_ai
+vec_cmpge (vector signed short __a, vector signed short __b) {
+ return ~(vec_cmpgt(__b, __a));
+}
+
+static vector bool short __ATTRS_o_ai
+vec_cmpge (vector unsigned short __a, vector unsigned short __b) {
+ return ~(vec_cmpgt(__b, __a));
+}
+
+static vector bool int __ATTRS_o_ai
+vec_cmpge (vector signed int __a, vector signed int __b) {
+ return ~(vec_cmpgt(__b, __a));
+}
+
+static vector bool int __ATTRS_o_ai
+vec_cmpge (vector unsigned int __a, vector unsigned int __b) {
+ return ~(vec_cmpgt(__b, __a));
+}
+
+static vector bool int __ATTRS_o_ai
+vec_cmpge(vector float __a, vector float __b) {
+#ifdef __VSX__
+ return (vector bool int)__builtin_vsx_xvcmpgesp(__a, __b);
+#else
+ return (vector bool int)__builtin_altivec_vcmpgefp(__a, __b);
+#endif
+}
+
+#ifdef __VSX__
+static vector bool long long __ATTRS_o_ai
+vec_cmpge(vector double __a, vector double __b) {
+ return (vector bool long long)__builtin_vsx_xvcmpgedp(__a, __b);
+}
+#endif
+
+#ifdef __POWER8_VECTOR__
+static vector bool long long __ATTRS_o_ai
+vec_cmpge(vector signed long long __a, vector signed long long __b) {
+ return ~(vec_cmpgt(__b, __a));
+}
+
+static vector bool long long __ATTRS_o_ai
+vec_cmpge(vector unsigned long long __a, vector unsigned long long __b) {
+ return ~(vec_cmpgt(__b, __a));
+}
+#endif
+
+/* vec_vcmpgefp */
+
+static vector bool int __attribute__((__always_inline__))
+vec_vcmpgefp(vector float __a, vector float __b) {
+ return (vector bool int)__builtin_altivec_vcmpgefp(__a, __b);
+}
+
/* vec_vcmpgtsb */
static vector bool char __attribute__((__always_inline__))
@@ -1664,6 +1730,36 @@ vec_vcmpgtfp(vector float __a, vector float __b) {
/* vec_cmple */
+static vector bool char __ATTRS_o_ai
+vec_cmple (vector signed char __a, vector signed char __b) {
+ return vec_cmpge(__b, __a);
+}
+
+static vector bool char __ATTRS_o_ai
+vec_cmple (vector unsigned char __a, vector unsigned char __b) {
+ return vec_cmpge(__b, __a);
+}
+
+static vector bool short __ATTRS_o_ai
+vec_cmple (vector signed short __a, vector signed short __b) {
+ return vec_cmpge(__b, __a);
+}
+
+static vector bool short __ATTRS_o_ai
+vec_cmple (vector unsigned short __a, vector unsigned short __b) {
+ return vec_cmpge(__b, __a);
+}
+
+static vector bool int __ATTRS_o_ai
+vec_cmple (vector signed int __a, vector signed int __b) {
+ return vec_cmpge(__b, __a);
+}
+
+static vector bool int __ATTRS_o_ai
+vec_cmple (vector unsigned int __a, vector unsigned int __b) {
+ return vec_cmpge(__b, __a);
+}
+
static vector bool int __ATTRS_o_ai
vec_cmple(vector float __a, vector float __b) {
return vec_cmpge(__b, __a);
@@ -1837,6 +1933,20 @@ vec_vctuxs(vector float __a, int __b) {
return __builtin_altivec_vctuxs(__a, __b);
}
+/* vec_double */
+
+#ifdef __VSX__
+static vector double __ATTRS_o_ai vec_double (vector signed long long __a) {
+ vector double __ret = { __a[0], __a[1] };
+ return __ret;
+}
+
+static vector double __ATTRS_o_ai vec_double (vector unsigned long long __a) {
+ vector double __ret = { __a[0], __a[1] };
+ return __ret;
+}
+#endif
+
/* vec_div */
/* Integer vector divides (vectors are scalarized, elements divided
@@ -1942,34 +2052,16 @@ static vector signed char __ATTRS_o_ai vec_eqv(vector signed char __a,
(vector unsigned int)__b);
}
-static vector signed char __ATTRS_o_ai vec_eqv(vector bool char __a,
- vector signed char __b) {
- return (vector signed char)__builtin_vsx_xxleqv((vector unsigned int)__a,
- (vector unsigned int)__b);
-}
-
-static vector signed char __ATTRS_o_ai vec_eqv(vector signed char __a,
- vector bool char __b) {
- return (vector signed char)__builtin_vsx_xxleqv((vector unsigned int)__a,
- (vector unsigned int)__b);
-}
-
static vector unsigned char __ATTRS_o_ai vec_eqv(vector unsigned char __a,
vector unsigned char __b) {
return (vector unsigned char)__builtin_vsx_xxleqv((vector unsigned int)__a,
(vector unsigned int)__b);
}
-static vector unsigned char __ATTRS_o_ai vec_eqv(vector bool char __a,
- vector unsigned char __b) {
- return (vector unsigned char)__builtin_vsx_xxleqv((vector unsigned int)__a,
- (vector unsigned int)__b);
-}
-
-static vector unsigned char __ATTRS_o_ai vec_eqv(vector unsigned char __a,
- vector bool char __b) {
- return (vector unsigned char)__builtin_vsx_xxleqv((vector unsigned int)__a,
- (vector unsigned int)__b);
+static vector bool char __ATTRS_o_ai vec_eqv(vector bool char __a,
+ vector bool char __b) {
+ return (vector bool char)__builtin_vsx_xxleqv((vector unsigned int)__a,
+ (vector unsigned int)__b);
}
static vector signed short __ATTRS_o_ai vec_eqv(vector signed short __a,
@@ -1978,70 +2070,33 @@ static vector signed short __ATTRS_o_ai vec_eqv(vector signed short __a,
(vector unsigned int)__b);
}
-static vector signed short __ATTRS_o_ai vec_eqv(vector bool short __a,
- vector signed short __b) {
- return (vector signed short)__builtin_vsx_xxleqv((vector unsigned int)__a,
- (vector unsigned int)__b);
-}
-
-static vector signed short __ATTRS_o_ai vec_eqv(vector signed short __a,
- vector bool short __b) {
- return (vector signed short)__builtin_vsx_xxleqv((vector unsigned int)__a,
- (vector unsigned int)__b);
-}
-
static vector unsigned short __ATTRS_o_ai vec_eqv(vector unsigned short __a,
vector unsigned short __b) {
return (vector unsigned short)__builtin_vsx_xxleqv((vector unsigned int)__a,
(vector unsigned int)__b);
}
-static vector unsigned short __ATTRS_o_ai vec_eqv(vector bool short __a,
- vector unsigned short __b) {
- return (vector unsigned short)__builtin_vsx_xxleqv((vector unsigned int)__a,
- (vector unsigned int)__b);
-}
-
-static vector unsigned short __ATTRS_o_ai vec_eqv(vector unsigned short __a,
- vector bool short __b) {
- return (vector unsigned short)__builtin_vsx_xxleqv((vector unsigned int)__a,
- (vector unsigned int)__b);
-}
-
-static vector signed int __ATTRS_o_ai vec_eqv(vector signed int __a,
- vector signed int __b) {
- return (vector signed int)__builtin_vsx_xxleqv((vector unsigned int)__a,
- (vector unsigned int)__b);
-}
-
-static vector signed int __ATTRS_o_ai vec_eqv(vector bool int __a,
- vector signed int __b) {
- return (vector signed int)__builtin_vsx_xxleqv((vector unsigned int)__a,
+static vector bool short __ATTRS_o_ai vec_eqv(vector bool short __a,
+ vector bool short __b) {
+ return (vector bool short)__builtin_vsx_xxleqv((vector unsigned int)__a,
(vector unsigned int)__b);
}
static vector signed int __ATTRS_o_ai vec_eqv(vector signed int __a,
- vector bool int __b) {
+ vector signed int __b) {
return (vector signed int)__builtin_vsx_xxleqv((vector unsigned int)__a,
(vector unsigned int)__b);
}
static vector unsigned int __ATTRS_o_ai vec_eqv(vector unsigned int __a,
vector unsigned int __b) {
- return __builtin_vsx_xxleqv((vector unsigned int)__a,
- (vector unsigned int)__b);
-}
-
-static vector unsigned int __ATTRS_o_ai vec_eqv(vector bool int __a,
- vector unsigned int __b) {
- return __builtin_vsx_xxleqv((vector unsigned int)__a,
- (vector unsigned int)__b);
+ return __builtin_vsx_xxleqv(__a, __b);
}
-static vector unsigned int __ATTRS_o_ai vec_eqv(vector unsigned int __a,
- vector bool int __b) {
- return __builtin_vsx_xxleqv((vector unsigned int)__a,
- (vector unsigned int)__b);
+static vector bool int __ATTRS_o_ai vec_eqv(vector bool int __a,
+ vector bool int __b) {
+ return (vector bool int)__builtin_vsx_xxleqv((vector unsigned int)__a,
+ (vector unsigned int)__b);
}
static vector signed long long __ATTRS_o_ai
@@ -2050,33 +2105,15 @@ vec_eqv(vector signed long long __a, vector signed long long __b) {
__builtin_vsx_xxleqv((vector unsigned int)__a, (vector unsigned int)__b);
}
-static vector signed long long __ATTRS_o_ai
-vec_eqv(vector bool long long __a, vector signed long long __b) {
- return (vector signed long long)
- __builtin_vsx_xxleqv((vector unsigned int)__a, (vector unsigned int)__b);
-}
-
-static vector signed long long __ATTRS_o_ai
-vec_eqv(vector signed long long __a, vector bool long long __b) {
- return (vector signed long long)
- __builtin_vsx_xxleqv((vector unsigned int)__a, (vector unsigned int)__b);
-}
-
static vector unsigned long long __ATTRS_o_ai
vec_eqv(vector unsigned long long __a, vector unsigned long long __b) {
return (vector unsigned long long)
__builtin_vsx_xxleqv((vector unsigned int)__a, (vector unsigned int)__b);
}
-static vector unsigned long long __ATTRS_o_ai
-vec_eqv(vector bool long long __a, vector unsigned long long __b) {
- return (vector unsigned long long)
- __builtin_vsx_xxleqv((vector unsigned int)__a, (vector unsigned int)__b);
-}
-
-static vector unsigned long long __ATTRS_o_ai
-vec_eqv(vector unsigned long long __a, vector bool long long __b) {
- return (vector unsigned long long)
+static vector bool long long __ATTRS_o_ai
+vec_eqv(vector bool long long __a, vector bool long long __b) {
+ return (vector bool long long)
__builtin_vsx_xxleqv((vector unsigned int)__a, (vector unsigned int)__b);
}
@@ -2085,35 +2122,11 @@ static vector float __ATTRS_o_ai vec_eqv(vector float __a, vector float __b) {
(vector unsigned int)__b);
}
-static vector float __ATTRS_o_ai vec_eqv(vector bool int __a,
- vector float __b) {
- return (vector float)__builtin_vsx_xxleqv((vector unsigned int)__a,
- (vector unsigned int)__b);
-}
-
-static vector float __ATTRS_o_ai vec_eqv(vector float __a,
- vector bool int __b) {
- return (vector float)__builtin_vsx_xxleqv((vector unsigned int)__a,
- (vector unsigned int)__b);
-}
-
static vector double __ATTRS_o_ai vec_eqv(vector double __a,
vector double __b) {
return (vector double)__builtin_vsx_xxleqv((vector unsigned int)__a,
(vector unsigned int)__b);
}
-
-static vector double __ATTRS_o_ai vec_eqv(vector bool long long __a,
- vector double __b) {
- return (vector double)__builtin_vsx_xxleqv((vector unsigned int)__a,
- (vector unsigned int)__b);
-}
-
-static vector double __ATTRS_o_ai vec_eqv(vector double __a,
- vector bool long long __b) {
- return (vector double)__builtin_vsx_xxleqv((vector unsigned int)__a,
- (vector unsigned int)__b);
-}
#endif
/* vec_expte */
@@ -2815,6 +2828,38 @@ static vector unsigned char __ATTRS_o_ai vec_lvsr(int __a, const float *__b) {
#endif
/* vec_madd */
+static vector signed short __ATTRS_o_ai
+vec_mladd(vector signed short, vector signed short, vector signed short);
+static vector signed short __ATTRS_o_ai
+vec_mladd(vector signed short, vector unsigned short, vector unsigned short);
+static vector signed short __ATTRS_o_ai
+vec_mladd(vector unsigned short, vector signed short, vector signed short);
+static vector unsigned short __ATTRS_o_ai
+vec_mladd(vector unsigned short, vector unsigned short, vector unsigned short);
+
+static vector signed short __ATTRS_o_ai
+vec_madd(vector signed short __a, vector signed short __b,
+ vector signed short __c) {
+ return vec_mladd(__a, __b, __c);
+}
+
+static vector signed short __ATTRS_o_ai
+vec_madd(vector signed short __a, vector unsigned short __b,
+ vector unsigned short __c) {
+ return vec_mladd(__a, __b, __c);
+}
+
+static vector signed short __ATTRS_o_ai
+vec_madd(vector unsigned short __a, vector signed short __b,
+ vector signed short __c) {
+ return vec_mladd(__a, __b, __c);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_madd(vector unsigned short __a, vector unsigned short __b,
+ vector unsigned short __c) {
+ return vec_mladd(__a, __b, __c);
+}
static vector float __ATTRS_o_ai
vec_madd(vector float __a, vector float __b, vector float __c) {
@@ -3256,6 +3301,16 @@ vec_mergeh(vector bool long long __a, vector unsigned long long __b) {
0x10, 0x11, 0x12, 0x13,
0x14, 0x15, 0x16, 0x17));
}
+
+static vector bool long long __ATTRS_o_ai
+vec_mergeh(vector bool long long __a, vector bool long long __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x02, 0x03,
+ 0x04, 0x05, 0x06, 0x07,
+ 0x10, 0x11, 0x12, 0x13,
+ 0x14, 0x15, 0x16, 0x17));
+}
+
static vector double __ATTRS_o_ai vec_mergeh(vector double __a,
vector double __b) {
return vec_perm(__a, __b,
@@ -3519,6 +3574,14 @@ vec_mergel(vector bool long long __a, vector unsigned long long __b) {
0x18, 0X19, 0x1A, 0x1B,
0x1C, 0x1D, 0x1E, 0x1F));
}
+static vector bool long long __ATTRS_o_ai
+vec_mergel(vector bool long long __a, vector bool long long __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B,
+ 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x18, 0X19, 0x1A, 0x1B,
+ 0x1C, 0x1D, 0x1E, 0x1F));
+}
static vector double __ATTRS_o_ai
vec_mergel(vector double __a, vector double __b) {
return vec_perm(__a, __b,
@@ -4439,6 +4502,11 @@ static vector unsigned char __ATTRS_o_ai vec_nand(vector bool char __a,
return ~(__a & __b);
}
+static vector bool char __ATTRS_o_ai vec_nand(vector bool char __a,
+ vector bool char __b) {
+ return ~(__a & __b);
+}
+
static vector signed short __ATTRS_o_ai vec_nand(vector signed short __a,
vector signed short __b) {
return ~(__a & __b);
@@ -4465,8 +4533,8 @@ static vector unsigned short __ATTRS_o_ai vec_nand(vector unsigned short __a,
}
-static vector unsigned short __ATTRS_o_ai vec_nand(vector bool short __a,
- vector unsigned short __b) {
+static vector bool short __ATTRS_o_ai vec_nand(vector bool short __a,
+ vector bool short __b) {
return ~(__a & __b);
}
@@ -4501,6 +4569,11 @@ static vector unsigned int __ATTRS_o_ai vec_nand(vector bool int __a,
return ~(__a & __b);
}
+static vector bool int __ATTRS_o_ai vec_nand(vector bool int __a,
+ vector bool int __b) {
+ return ~(__a & __b);
+}
+
static vector signed long long __ATTRS_o_ai
vec_nand(vector signed long long __a, vector signed long long __b) {
return ~(__a & __b);
@@ -4531,6 +4604,11 @@ vec_nand(vector bool long long __a, vector unsigned long long __b) {
return ~(__a & __b);
}
+static vector bool long long __ATTRS_o_ai
+vec_nand(vector bool long long __a, vector bool long long __b) {
+ return ~(__a & __b);
+}
+
#endif
/* vec_nmadd */
@@ -4909,6 +4987,11 @@ static vector unsigned char __ATTRS_o_ai vec_orc(vector bool char __a,
return __a | ~__b;
}
+static vector bool char __ATTRS_o_ai vec_orc(vector bool char __a,
+ vector bool char __b) {
+ return __a | ~__b;
+}
+
static vector signed short __ATTRS_o_ai vec_orc(vector signed short __a,
vector signed short __b) {
return __a | ~__b;
@@ -4939,6 +5022,11 @@ vec_orc(vector bool short __a, vector unsigned short __b) {
return __a | ~__b;
}
+static vector bool short __ATTRS_o_ai vec_orc(vector bool short __a,
+ vector bool short __b) {
+ return __a | ~__b;
+}
+
static vector signed int __ATTRS_o_ai vec_orc(vector signed int __a,
vector signed int __b) {
return __a | ~__b;
@@ -4969,6 +5057,11 @@ static vector unsigned int __ATTRS_o_ai vec_orc(vector bool int __a,
return __a | ~__b;
}
+static vector bool int __ATTRS_o_ai vec_orc(vector bool int __a,
+ vector bool int __b) {
+ return __a | ~__b;
+}
+
static vector signed long long __ATTRS_o_ai
vec_orc(vector signed long long __a, vector signed long long __b) {
return __a | ~__b;
@@ -4998,6 +5091,11 @@ static vector unsigned long long __ATTRS_o_ai
vec_orc(vector bool long long __a, vector unsigned long long __b) {
return __a | ~__b;
}
+
+static vector bool long long __ATTRS_o_ai
+vec_orc(vector bool long long __a, vector bool long long __b) {
+ return __a | ~__b;
+}
#endif
/* vec_vor */
@@ -9191,17 +9289,27 @@ vec_sub(vector unsigned __int128 __a, vector unsigned __int128 __b) {
}
#endif // defined(__POWER8_VECTOR__) && defined(__powerpc64__)
-static vector float __ATTRS_o_ai vec_sub(vector float __a, vector float __b) {
+#ifdef __VSX__
+static vector signed long long __ATTRS_o_ai
+vec_sub(vector signed long long __a, vector signed long long __b) {
+ return __a - __b;
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_sub(vector unsigned long long __a, vector unsigned long long __b) {
return __a - __b;
}
-#ifdef __VSX__
static vector double __ATTRS_o_ai
vec_sub(vector double __a, vector double __b) {
return __a - __b;
}
#endif
+static vector float __ATTRS_o_ai vec_sub(vector float __a, vector float __b) {
+ return __a - __b;
+}
+
/* vec_vsububm */
#define __builtin_altivec_vsububm vec_vsububm
@@ -10390,7 +10498,12 @@ static unsigned char __ATTRS_o_ai vec_extract(vector unsigned char __a,
return __a[__b];
}
-static short __ATTRS_o_ai vec_extract(vector short __a, int __b) {
+static unsigned char __ATTRS_o_ai vec_extract(vector bool char __a,
+ int __b) {
+ return __a[__b];
+}
+
+static signed short __ATTRS_o_ai vec_extract(vector signed short __a, int __b) {
return __a[__b];
}
@@ -10399,7 +10512,12 @@ static unsigned short __ATTRS_o_ai vec_extract(vector unsigned short __a,
return __a[__b];
}
-static int __ATTRS_o_ai vec_extract(vector int __a, int __b) {
+static unsigned short __ATTRS_o_ai vec_extract(vector bool short __a,
+ int __b) {
+ return __a[__b];
+}
+
+static signed int __ATTRS_o_ai vec_extract(vector signed int __a, int __b) {
return __a[__b];
}
@@ -10407,6 +10525,31 @@ static unsigned int __ATTRS_o_ai vec_extract(vector unsigned int __a, int __b) {
return __a[__b];
}
+static unsigned int __ATTRS_o_ai vec_extract(vector bool int __a, int __b) {
+ return __a[__b];
+}
+
+#ifdef __VSX__
+static signed long long __ATTRS_o_ai vec_extract(vector signed long long __a,
+ int __b) {
+ return __a[__b];
+}
+
+static unsigned long long __ATTRS_o_ai
+vec_extract(vector unsigned long long __a, int __b) {
+ return __a[__b];
+}
+
+static unsigned long long __ATTRS_o_ai vec_extract(vector bool long long __a,
+ int __b) {
+ return __a[__b];
+}
+
+static double __ATTRS_o_ai vec_extract(vector double __a, int __b) {
+ return __a[__b];
+}
+#endif
+
static float __ATTRS_o_ai vec_extract(vector float __a, int __b) {
return __a[__b];
}
@@ -10427,8 +10570,16 @@ static vector unsigned char __ATTRS_o_ai vec_insert(unsigned char __a,
return __b;
}
-static vector short __ATTRS_o_ai vec_insert(short __a, vector short __b,
- int __c) {
+static vector bool char __ATTRS_o_ai vec_insert(unsigned char __a,
+ vector bool char __b,
+ int __c) {
+ __b[__c] = __a;
+ return __b;
+}
+
+static vector signed short __ATTRS_o_ai vec_insert(signed short __a,
+ vector signed short __b,
+ int __c) {
__b[__c] = __a;
return __b;
}
@@ -10440,7 +10591,16 @@ static vector unsigned short __ATTRS_o_ai vec_insert(unsigned short __a,
return __b;
}
-static vector int __ATTRS_o_ai vec_insert(int __a, vector int __b, int __c) {
+static vector bool short __ATTRS_o_ai vec_insert(unsigned short __a,
+ vector bool short __b,
+ int __c) {
+ __b[__c] = __a;
+ return __b;
+}
+
+static vector signed int __ATTRS_o_ai vec_insert(signed int __a,
+ vector signed int __b,
+ int __c) {
__b[__c] = __a;
return __b;
}
@@ -10452,6 +10612,38 @@ static vector unsigned int __ATTRS_o_ai vec_insert(unsigned int __a,
return __b;
}
+static vector bool int __ATTRS_o_ai vec_insert(unsigned int __a,
+ vector bool int __b,
+ int __c) {
+ __b[__c] = __a;
+ return __b;
+}
+
+#ifdef __VSX__
+static vector signed long long __ATTRS_o_ai
+vec_insert(signed long long __a, vector signed long long __b, int __c) {
+ __b[__c] = __a;
+ return __b;
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_insert(unsigned long long __a, vector unsigned long long __b, int __c) {
+ __b[__c] = __a;
+ return __b;
+}
+
+static vector bool long long __ATTRS_o_ai
+vec_insert(unsigned long long __a, vector bool long long __b, int __c) {
+ __b[__c] = __a;
+ return __b;
+}
+static vector double __ATTRS_o_ai vec_insert(double __a, vector double __b,
+ int __c) {
+ __b[__c] = __a;
+ return __b;
+}
+#endif
+
static vector float __ATTRS_o_ai vec_insert(float __a, vector float __b,
int __c) {
__b[__c] = __a;
@@ -11376,6 +11568,33 @@ static vector unsigned int __ATTRS_o_ai vec_splats(unsigned int __a) {
return (vector unsigned int)(__a);
}
+#ifdef __VSX__
+static vector signed long long __ATTRS_o_ai vec_splats(signed long long __a) {
+ return (vector signed long long)(__a);
+}
+
+static vector unsigned long long __ATTRS_o_ai
+vec_splats(unsigned long long __a) {
+ return (vector unsigned long long)(__a);
+}
+
+#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
+static vector signed __int128 __ATTRS_o_ai vec_splats(signed __int128 __a) {
+ return (vector signed __int128)(__a);
+}
+
+static vector unsigned __int128 __ATTRS_o_ai
+vec_splats(unsigned __int128 __a) {
+ return (vector unsigned __int128)(__a);
+}
+
+#endif
+
+static vector double __ATTRS_o_ai vec_splats(double __a) {
+ return (vector double)(__a);
+}
+#endif
+
static vector float __ATTRS_o_ai vec_splats(float __a) {
return (vector float)(__a);
}
@@ -11546,8 +11765,18 @@ static int __ATTRS_o_ai vec_all_eq(vector bool long long __a,
#endif
static int __ATTRS_o_ai vec_all_eq(vector float __a, vector float __b) {
+#ifdef __VSX__
+ return __builtin_vsx_xvcmpeqsp_p(__CR6_LT, __a, __b);
+#else
return __builtin_altivec_vcmpeqfp_p(__CR6_LT, __a, __b);
+#endif
+}
+
+#ifdef __VSX__
+static int __ATTRS_o_ai vec_all_eq(vector double __a, vector double __b) {
+ return __builtin_vsx_xvcmpeqdp_p(__CR6_LT, __a, __b);
}
+#endif
/* vec_all_ge */
@@ -11698,8 +11927,18 @@ static int __ATTRS_o_ai vec_all_ge(vector bool long long __a,
#endif
static int __ATTRS_o_ai vec_all_ge(vector float __a, vector float __b) {
+#ifdef __VSX__
+ return __builtin_vsx_xvcmpgesp_p(__CR6_LT, __a, __b);
+#else
return __builtin_altivec_vcmpgefp_p(__CR6_LT, __a, __b);
+#endif
+}
+
+#ifdef __VSX__
+static int __ATTRS_o_ai vec_all_ge(vector double __a, vector double __b) {
+ return __builtin_vsx_xvcmpgedp_p(__CR6_LT, __a, __b);
}
+#endif
/* vec_all_gt */
@@ -11850,8 +12089,18 @@ static int __ATTRS_o_ai vec_all_gt(vector bool long long __a,
#endif
static int __ATTRS_o_ai vec_all_gt(vector float __a, vector float __b) {
+#ifdef __VSX__
+ return __builtin_vsx_xvcmpgtsp_p(__CR6_LT, __a, __b);
+#else
return __builtin_altivec_vcmpgtfp_p(__CR6_LT, __a, __b);
+#endif
+}
+
+#ifdef __VSX__
+static int __ATTRS_o_ai vec_all_gt(vector double __a, vector double __b) {
+ return __builtin_vsx_xvcmpgtdp_p(__CR6_LT, __a, __b);
}
+#endif
/* vec_all_in */
@@ -12010,9 +12259,19 @@ static int __ATTRS_o_ai vec_all_le(vector bool long long __a,
#endif
static int __ATTRS_o_ai vec_all_le(vector float __a, vector float __b) {
+#ifdef __VSX__
+ return __builtin_vsx_xvcmpgesp_p(__CR6_LT, __b, __a);
+#else
return __builtin_altivec_vcmpgefp_p(__CR6_LT, __b, __a);
+#endif
}
+#ifdef __VSX__
+static int __ATTRS_o_ai vec_all_le(vector double __a, vector double __b) {
+ return __builtin_vsx_xvcmpgedp_p(__CR6_LT, __b, __a);
+}
+#endif
+
/* vec_all_lt */
static int __ATTRS_o_ai vec_all_lt(vector signed char __a,
@@ -12163,15 +12422,35 @@ static int __ATTRS_o_ai vec_all_lt(vector bool long long __a,
#endif
static int __ATTRS_o_ai vec_all_lt(vector float __a, vector float __b) {
+#ifdef __VSX__
+ return __builtin_vsx_xvcmpgtsp_p(__CR6_LT, __b, __a);
+#else
return __builtin_altivec_vcmpgtfp_p(__CR6_LT, __b, __a);
+#endif
+}
+
+#ifdef __VSX__
+static int __ATTRS_o_ai vec_all_lt(vector double __a, vector double __b) {
+ return __builtin_vsx_xvcmpgtdp_p(__CR6_LT, __b, __a);
}
+#endif
/* vec_all_nan */
-static int __attribute__((__always_inline__)) vec_all_nan(vector float __a) {
+static int __ATTRS_o_ai vec_all_nan(vector float __a) {
+#ifdef __VSX__
+ return __builtin_vsx_xvcmpeqsp_p(__CR6_EQ, __a, __a);
+#else
return __builtin_altivec_vcmpeqfp_p(__CR6_EQ, __a, __a);
+#endif
}
+#ifdef __VSX__
+static int __ATTRS_o_ai vec_all_nan(vector double __a) {
+ return __builtin_vsx_xvcmpeqdp_p(__CR6_EQ, __a, __a);
+}
+#endif
+
/* vec_all_ne */
static int __ATTRS_o_ai vec_all_ne(vector signed char __a,
@@ -12337,22 +12616,54 @@ static int __ATTRS_o_ai vec_all_ne(vector bool long long __a,
#endif
static int __ATTRS_o_ai vec_all_ne(vector float __a, vector float __b) {
+#ifdef __VSX__
+ return __builtin_vsx_xvcmpeqdp_p(__CR6_EQ, __a, __b);
+#else
return __builtin_altivec_vcmpeqfp_p(__CR6_EQ, __a, __b);
+#endif
}
+#ifdef __VSX__
+static int __ATTRS_o_ai vec_all_ne(vector double __a, vector double __b) {
+ return __builtin_vsx_xvcmpeqdp_p(__CR6_EQ, __a, __b);
+}
+#endif
+
/* vec_all_nge */
-static int __attribute__((__always_inline__))
+static int __ATTRS_o_ai
vec_all_nge(vector float __a, vector float __b) {
+#ifdef __VSX__
+ return __builtin_vsx_xvcmpgesp_p(__CR6_EQ, __a, __b);
+#else
return __builtin_altivec_vcmpgefp_p(__CR6_EQ, __a, __b);
+#endif
+}
+
+#ifdef __VSX__
+static int __ATTRS_o_ai
+vec_all_nge(vector double __a, vector double __b) {
+ return __builtin_vsx_xvcmpgedp_p(__CR6_EQ, __a, __b);
}
+#endif
/* vec_all_ngt */
-static int __attribute__((__always_inline__))
+static int __ATTRS_o_ai
vec_all_ngt(vector float __a, vector float __b) {
+#ifdef __VSX__
+ return __builtin_vsx_xvcmpgtsp_p(__CR6_EQ, __a, __b);
+#else
return __builtin_altivec_vcmpgtfp_p(__CR6_EQ, __a, __b);
+#endif
+}
+
+#ifdef __VSX__
+static int __ATTRS_o_ai
+vec_all_ngt(vector double __a, vector double __b) {
+ return __builtin_vsx_xvcmpgtdp_p(__CR6_EQ, __a, __b);
}
+#endif
/* vec_all_nle */
@@ -12540,8 +12851,18 @@ static int __ATTRS_o_ai vec_any_eq(vector bool long long __a,
#endif
static int __ATTRS_o_ai vec_any_eq(vector float __a, vector float __b) {
+#ifdef __VSX__
+ return __builtin_vsx_xvcmpeqsp_p(__CR6_EQ_REV, __a, __b);
+#else
return __builtin_altivec_vcmpeqfp_p(__CR6_EQ_REV, __a, __b);
+#endif
+}
+
+#ifdef __VSX__
+static int __ATTRS_o_ai vec_any_eq(vector double __a, vector double __b) {
+ return __builtin_vsx_xvcmpeqdp_p(__CR6_EQ_REV, __a, __b);
}
+#endif
/* vec_any_ge */
@@ -12700,9 +13021,19 @@ static int __ATTRS_o_ai vec_any_ge(vector bool long long __a,
#endif
static int __ATTRS_o_ai vec_any_ge(vector float __a, vector float __b) {
+#ifdef __VSX__
+ return __builtin_vsx_xvcmpgesp_p(__CR6_EQ_REV, __a, __b);
+#else
return __builtin_altivec_vcmpgefp_p(__CR6_EQ_REV, __a, __b);
+#endif
}
+#ifdef __VSX__
+static int __ATTRS_o_ai vec_any_ge(vector double __a, vector double __b) {
+ return __builtin_vsx_xvcmpgedp_p(__CR6_EQ_REV, __a, __b);
+}
+#endif
+
/* vec_any_gt */
static int __ATTRS_o_ai vec_any_gt(vector signed char __a,
@@ -12860,8 +13191,18 @@ static int __ATTRS_o_ai vec_any_gt(vector bool long long __a,
#endif
static int __ATTRS_o_ai vec_any_gt(vector float __a, vector float __b) {
+#ifdef __VSX__
+ return __builtin_vsx_xvcmpgtsp_p(__CR6_EQ_REV, __a, __b);
+#else
return __builtin_altivec_vcmpgtfp_p(__CR6_EQ_REV, __a, __b);
+#endif
+}
+
+#ifdef __VSX__
+static int __ATTRS_o_ai vec_any_gt(vector double __a, vector double __b) {
+ return __builtin_vsx_xvcmpgtdp_p(__CR6_EQ_REV, __a, __b);
}
+#endif
/* vec_any_le */
@@ -13020,8 +13361,18 @@ static int __ATTRS_o_ai vec_any_le(vector bool long long __a,
#endif
static int __ATTRS_o_ai vec_any_le(vector float __a, vector float __b) {
+#ifdef __VSX__
+ return __builtin_vsx_xvcmpgesp_p(__CR6_EQ_REV, __b, __a);
+#else
return __builtin_altivec_vcmpgefp_p(__CR6_EQ_REV, __b, __a);
+#endif
+}
+
+#ifdef __VSX__
+static int __ATTRS_o_ai vec_any_le(vector double __a, vector double __b) {
+ return __builtin_vsx_xvcmpgedp_p(__CR6_EQ_REV, __b, __a);
}
+#endif
/* vec_any_lt */
@@ -13180,8 +13531,18 @@ static int __ATTRS_o_ai vec_any_lt(vector bool long long __a,
#endif
static int __ATTRS_o_ai vec_any_lt(vector float __a, vector float __b) {
+#ifdef __VSX__
+ return __builtin_vsx_xvcmpgtsp_p(__CR6_EQ_REV, __b, __a);
+#else
return __builtin_altivec_vcmpgtfp_p(__CR6_EQ_REV, __b, __a);
+#endif
+}
+
+#ifdef __VSX__
+static int __ATTRS_o_ai vec_any_lt(vector double __a, vector double __b) {
+ return __builtin_vsx_xvcmpgtdp_p(__CR6_EQ_REV, __b, __a);
}
+#endif
/* vec_any_nan */
@@ -13354,9 +13715,19 @@ static int __ATTRS_o_ai vec_any_ne(vector bool long long __a,
#endif
static int __ATTRS_o_ai vec_any_ne(vector float __a, vector float __b) {
+#ifdef __VSX__
+ return __builtin_vsx_xvcmpeqsp_p(__CR6_LT_REV, __a, __b);
+#else
return __builtin_altivec_vcmpeqfp_p(__CR6_LT_REV, __a, __b);
+#endif
}
+#ifdef __VSX__
+static int __ATTRS_o_ai vec_any_ne(vector double __a, vector double __b) {
+ return __builtin_vsx_xvcmpeqdp_p(__CR6_LT_REV, __a, __b);
+}
+#endif
+
/* vec_any_nge */
static int __attribute__((__always_inline__))
@@ -13416,6 +13787,12 @@ to the inconsistency (__builtin_crypto_ prefix on builtins that cannot be
removed with -mno-crypto). This is under development.
*/
#ifdef __CRYPTO__
+#define vec_sbox_be __builtin_altivec_crypto_vsbox
+#define vec_cipher_be __builtin_altivec_crypto_vcipher
+#define vec_cipherlast_be __builtin_altivec_crypto_vcipherlast
+#define vec_ncipher_be __builtin_altivec_crypto_vncipher
+#define vec_ncipherlast_be __builtin_altivec_crypto_vncipherlast
+
static vector unsigned long long __attribute__((__always_inline__))
__builtin_crypto_vsbox(vector unsigned long long __a) {
return __builtin_altivec_crypto_vsbox(__a);
@@ -13447,6 +13824,11 @@ __builtin_crypto_vncipherlast(vector unsigned long long __a,
#define __builtin_crypto_vshasigmad __builtin_altivec_crypto_vshasigmad
#define __builtin_crypto_vshasigmaw __builtin_altivec_crypto_vshasigmaw
+
+#define vec_shasigma_be(X, Y, Z) \
+ _Generic((X), vector unsigned int: __builtin_crypto_vshasigmaw, \
+ vector unsigned long long: __builtin_crypto_vshasigmad) \
+((X), (Y), (Z))
#endif
#ifdef __POWER8_VECTOR__
@@ -13494,8 +13876,9 @@ __builtin_crypto_vpmsumb(vector unsigned int __a, vector unsigned int __b) {
return __builtin_altivec_crypto_vpmsumw(__a, __b);
}
-static vector unsigned long long __ATTRS_o_ai __builtin_crypto_vpmsumb(
- vector unsigned long long __a, vector unsigned long long __b) {
+static vector unsigned long long __ATTRS_o_ai
+__builtin_crypto_vpmsumb(vector unsigned long long __a,
+ vector unsigned long long __b) {
return __builtin_altivec_crypto_vpmsumd(__a, __b);
}
@@ -13504,6 +13887,9 @@ static vector signed char __ATTRS_o_ai vec_vgbbd (vector signed char __a)
return __builtin_altivec_vgbbd((vector unsigned char) __a);
}
+#define vec_pmsum_be __builtin_crypto_vpmsumb
+#define vec_gb __builtin_altivec_vgbbd
+
static vector unsigned char __ATTRS_o_ai vec_vgbbd (vector unsigned char __a)
{
return __builtin_altivec_vgbbd(__a);
@@ -13521,6 +13907,14 @@ vec_vbpermq (vector unsigned char __a, vector unsigned char __b)
{
return __builtin_altivec_vbpermq(__a, __b);
}
+
+#ifdef __powerpc64__
+static vector unsigned long long __attribute__((__always_inline__))
+vec_bperm (vector unsigned __int128 __a, vector unsigned char __b) {
+ return __builtin_altivec_vbpermq((vector unsigned char) __a,
+ (vector unsigned char) __b);
+}
+#endif
#endif
#undef __ATTRS_o_ai
diff --git a/clang/test/CodeGen/builtins-ppc-altivec.c b/clang/test/CodeGen/builtins-ppc-altivec.c
index 32166b50f98..9539d6ca7af 100644
--- a/clang/test/CodeGen/builtins-ppc-altivec.c
+++ b/clang/test/CodeGen/builtins-ppc-altivec.c
@@ -940,6 +940,30 @@ void test2() {
// CHECK-LE: @llvm.ppc.altivec.vcmpeqfp
/* vec_cmpge */
+ res_vbc = vec_cmpge(vsc, vsc);
+// CHECK: @llvm.ppc.altivec.vcmpgtsb
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtsb
+
+ res_vbc = vec_cmpge(vuc, vuc);
+// CHECK: @llvm.ppc.altivec.vcmpgtub
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtub
+
+ res_vbs = vec_cmpge(vs, vs);
+// CHECK: @llvm.ppc.altivec.vcmpgtsh
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtsh
+
+ res_vbs = vec_cmpge(vus, vus);
+// CHECK: @llvm.ppc.altivec.vcmpgtuh
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtuh
+
+ res_vbi = vec_cmpge(vi, vi);
+// CHECK: @llvm.ppc.altivec.vcmpgtsw
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtsw
+
+ res_vbi = vec_cmpge(vui, vui);
+// CHECK: @llvm.ppc.altivec.vcmpgtuw
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtuw
+
res_vbi = vec_cmpge(vf, vf);
// CHECK: @llvm.ppc.altivec.vcmpgefp
// CHECK-LE: @llvm.ppc.altivec.vcmpgefp
@@ -1010,6 +1034,30 @@ void test5() {
// CHECK-LE: @llvm.ppc.altivec.vcmpgtfp
/* vec_cmple */
+ res_vbc = vec_cmple(vsc, vsc);
+// CHECK: @llvm.ppc.altivec.vcmpgtsb
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtsb
+
+ res_vbc = vec_cmple(vuc, vuc);
+// CHECK: @llvm.ppc.altivec.vcmpgtub
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtub
+
+ res_vbs = vec_cmple(vs, vs);
+// CHECK: @llvm.ppc.altivec.vcmpgtsh
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtsh
+
+ res_vbs = vec_cmple(vus, vus);
+// CHECK: @llvm.ppc.altivec.vcmpgtuh
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtuh
+
+ res_vbi = vec_cmple(vi, vi);
+// CHECK: @llvm.ppc.altivec.vcmpgtsw
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtsw
+
+ res_vbi = vec_cmple(vui, vui);
+// CHECK: @llvm.ppc.altivec.vcmpgtuw
+// CHECK-LE: @llvm.ppc.altivec.vcmpgtuw
+
res_vbi = vec_cmple(vf, vf);
// CHECK: @llvm.ppc.altivec.vcmpgefp
// CHECK-LE: @llvm.ppc.altivec.vcmpgefp
@@ -5666,6 +5714,10 @@ void test6() {
// CHECK: extractelement <16 x i8>
// CHECK-LE: extractelement <16 x i8>
+ res_uc = vec_extract(vbc, param_i);
+// CHECK: extractelement <16 x i8>
+// CHECK-LE: extractelement <16 x i8>
+
res_s = vec_extract(vs, param_i);
// CHECK: extractelement <8 x i16>
// CHECK-LE: extractelement <8 x i16>
@@ -5674,6 +5726,10 @@ void test6() {
// CHECK: extractelement <8 x i16>
// CHECK-LE: extractelement <8 x i16>
+ res_us = vec_extract(vbs, param_i);
+// CHECK: extractelement <8 x i16>
+// CHECK-LE: extractelement <8 x i16>
+
res_i = vec_extract(vi, param_i);
// CHECK: extractelement <4 x i32>
// CHECK-LE: extractelement <4 x i32>
@@ -5682,6 +5738,10 @@ void test6() {
// CHECK: extractelement <4 x i32>
// CHECK-LE: extractelement <4 x i32>
+ res_ui = vec_extract(vbi, param_i);
+// CHECK: extractelement <4 x i32>
+// CHECK-LE: extractelement <4 x i32>
+
res_f = vec_extract(vf, param_i);
// CHECK: extractelement <4 x float>
// CHECK-LE: extractelement <4 x float>
@@ -5695,6 +5755,10 @@ void test6() {
// CHECK: insertelement <16 x i8>
// CHECK-LE: insertelement <16 x i8>
+ res_vbc = vec_insert(param_uc, vbc, param_i);
+// CHECK: insertelement <16 x i8>
+// CHECK-LE: insertelement <16 x i8>
+
res_vs = vec_insert(param_s, vs, param_i);
// CHECK: insertelement <8 x i16>
// CHECK-LE: insertelement <8 x i16>
@@ -5703,6 +5767,10 @@ void test6() {
// CHECK: insertelement <8 x i16>
// CHECK-LE: insertelement <8 x i16>
+ res_vbs = vec_insert(param_us, vbs, param_i);
+// CHECK: insertelement <8 x i16>
+// CHECK-LE: insertelement <8 x i16>
+
res_vi = vec_insert(param_i, vi, param_i);
// CHECK: insertelement <4 x i32>
// CHECK-LE: insertelement <4 x i32>
@@ -5711,6 +5779,10 @@ void test6() {
// CHECK: insertelement <4 x i32>
// CHECK-LE: insertelement <4 x i32>
+ res_vbi = vec_insert(param_ui, vbi, param_i);
+// CHECK: insertelement <4 x i32>
+// CHECK-LE: insertelement <4 x i32>
+
res_vf = vec_insert(param_f, vf, param_i);
// CHECK: insertelement <4 x float>
// CHECK-LE: insertelement <4 x float>
diff --git a/clang/test/CodeGen/builtins-ppc-crypto.c b/clang/test/CodeGen/builtins-ppc-crypto.c
index 6daf608b6ff..60bdc4982d9 100644
--- a/clang/test/CodeGen/builtins-ppc-crypto.c
+++ b/clang/test/CodeGen/builtins-ppc-crypto.c
@@ -6,10 +6,6 @@
// RUN: %clang_cc1 -faltivec -triple powerpc64-unknown-unknown \
// RUN: -target-feature +crypto -target-feature +power8-vector \
// RUN: -emit-llvm %s -o - | FileCheck %s
-
-// RUN: %clang_cc1 -faltivec -triple powerpc-unknown-unknown \
-// RUN: -target-feature +crypto -target-feature +power8-vector \
-// RUN: -emit-llvm %s -o - | FileCheck %s
#include <altivec.h>
#define B_INIT1 { 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, \
0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10 };
@@ -130,7 +126,7 @@ vector unsigned long long test_vcipherlast(void)
// CHECK: @llvm.ppc.altivec.crypto.vcipherlast
}
-// CHECK: @llvm.ppc.altivec.crypto.vncipher
+// CHECK-LABEL: @test_vncipher
vector unsigned long long test_vncipher(void)
{
vector unsigned long long a = D_INIT1
@@ -302,3 +298,99 @@ vector unsigned long long test_vshasigmad_e(void)
// CHECK: @llvm.ppc.altivec.crypto.vshasigmad
}
+// CHECK-LABEL: @test_vec_sbox_be
+vector unsigned char test_vec_sbox_be(void)
+{
+ vector unsigned char a = B_INIT1
+ return vec_sbox_be(a);
+// CHECK: @llvm.ppc.altivec.crypto.vsbox
+}
+
+// CHECK-LABEL: @test_vec_cipher_be
+vector unsigned char test_vec_cipher_be(void)
+{
+ vector unsigned char a = B_INIT1
+ vector unsigned char b = B_INIT2
+ return vec_cipher_be(a, b);
+// CHECK: @llvm.ppc.altivec.crypto.vcipher
+}
+
+// CHECK-LABEL: @test_vec_cipherlast_be
+vector unsigned char test_vec_cipherlast_be(void)
+{
+ vector unsigned char a = B_INIT1
+ vector unsigned char b = B_INIT2
+ return vec_cipherlast_be(a, b);
+// CHECK: @llvm.ppc.altivec.crypto.vcipherlast
+}
+
+// CHECK-LABEL: @test_vec_ncipher_be
+vector unsigned char test_vec_ncipher_be(void)
+{
+ vector unsigned char a = B_INIT1
+ vector unsigned char b = B_INIT2
+ return vec_ncipher_be(a, b);
+// CHECK: @llvm.ppc.altivec.crypto.vncipher
+}
+
+// CHECK-LABEL: @test_vec_ncipherlast_be
+vector unsigned char test_vec_ncipherlast_be(void)
+{
+ vector unsigned char a = B_INIT1
+ vector unsigned char b = B_INIT2
+ return vec_ncipherlast_be(a, b);
+// CHECK: @llvm.ppc.altivec.crypto.vncipherlast
+}
+
+// CHECK-LABEL: @test_vec_shasigma_bew
+vector unsigned int test_vec_shasigma_bew(void)
+{
+ vector unsigned int a = W_INIT1
+ return vec_shasigma_be(a, 1, 15);
+// CHECK: @llvm.ppc.altivec.crypto.vshasigmaw
+}
+
+// CHECK-LABEL: @test_vec_shasigma_bed
+vector unsigned long long test_vec_shasigma_bed(void)
+{
+ vector unsigned long long a = D_INIT2
+ return vec_shasigma_be(a, 1, 15);
+// CHECK: @llvm.ppc.altivec.crypto.vshasigmad
+}
+
+// CHECK-LABEL: @test_vec_pmsum_beb
+vector unsigned short test_vec_pmsum_beb(void)
+{
+ vector unsigned char a = B_INIT1
+ vector unsigned char b = B_INIT2
+ return vec_pmsum_be(a, b);
+// CHECK: @llvm.ppc.altivec.crypto.vpmsumb
+}
+
+// CHECK-LABEL: @test_vec_pmsum_beh
+vector unsigned int test_vec_pmsum_beh(void)
+{
+ vector unsigned short a = H_INIT1
+ vector unsigned short b = H_INIT2
+ return vec_pmsum_be(a, b);
+// CHECK: @llvm.ppc.altivec.crypto.vpmsumh
+}
+
+// CHECK-LABEL: @test_vec_pmsum_bew
+vector unsigned long long test_vec_pmsum_bew(void)
+{
+ vector unsigned int a = W_INIT1
+ vector unsigned int b = W_INIT2
+ return vec_pmsum_be(a, b);
+// CHECK: @llvm.ppc.altivec.crypto.vpmsumw
+}
+
+// CHECK-LABEL: @test_vec_pmsum_bed
+vector unsigned __int128 test_vec_pmsum_bed(void)
+{
+ vector unsigned long long a = D_INIT1
+ vector unsigned long long b = D_INIT2
+ return vec_pmsum_be(a, b);
+// CHECK: @llvm.ppc.altivec.crypto.vpmsumd
+}
+
diff --git a/clang/test/CodeGen/builtins-ppc-p8vector.c b/clang/test/CodeGen/builtins-ppc-p8vector.c
index 208dd4347f6..29503f0134b 100644
--- a/clang/test/CodeGen/builtins-ppc-p8vector.c
+++ b/clang/test/CodeGen/builtins-ppc-p8vector.c
@@ -7,6 +7,13 @@
// (vec_cmpge, vec_cmple). Without this option, there is only one overload so
// it is selected.
+void dummy() { }
+signed int si;
+signed long long sll;
+unsigned long long ull;
+signed __int128 sx;
+unsigned __int128 ux;
+double d;
vector signed char vsc = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5 };
vector unsigned char vuc = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5 };
vector bool char vbc = { 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1 };
@@ -23,10 +30,17 @@ vector signed long long vsll = { 1, 2 };
vector unsigned long long vull = { 1, 2 };
vector bool long long vbll = { 1, 0 };
+vector signed __int128 vsx = { 1 };
+vector unsigned __int128 vux = { 1 };
+
vector float vfa = { 1.e-4f, -132.23f, -22.1, 32.00f };
vector double vda = { 1.e-11, -132.23e10 };
int res_i;
+double res_d;
+signed long long res_sll;
+unsigned long long res_ull;
+
vector signed char res_vsc;
vector unsigned char res_vuc;
vector bool char res_vbc;
@@ -43,7 +57,10 @@ vector signed long long res_vsll;
vector unsigned long long res_vull;
vector bool long long res_vbll;
-vector double res_vf;
+vector signed __int128 res_vsx;
+vector unsigned __int128 res_vux;
+
+vector float res_vf;
vector double res_vd;
// CHECK-LABEL: define void @test1
@@ -73,6 +90,37 @@ void test1() {
// CHECK-LE: add <2 x i64>
// CHECK-PPC: error: call to 'vec_add' is ambiguous
+ /* vec_addc */
+ res_vsi = vec_addc(vsi, vsi);
+// CHECK: @llvm.ppc.altivec.vaddcuw
+// CHECK-LE: @llvm.ppc.altivec.vaddcuw
+
+ res_vui = vec_addc(vui, vui);
+// CHECK: @llvm.ppc.altivec.vaddcuw
+// CHECK-LE: @llvm.ppc.altivec.vaddcuw
+
+ res_vsx = vec_addc(vsx, vsx);
+// CHECK: @llvm.ppc.altivec.vaddcuq
+// CHECK-LE: @llvm.ppc.altivec.vaddcuq
+
+ res_vux = vec_addc(vux, vux);
+// CHECK: @llvm.ppc.altivec.vaddcuq
+// CHECK-LE: @llvm.ppc.altivec.vaddcuq
+
+ /* vec_adde */
+ res_vsx = vec_adde(vsx, vsx, vsx);
+// CHECK: @llvm.ppc.altivec.vaddeuqm
+// CHECK-LE: @llvm.ppc.altivec.vaddeuqm
+
+ res_vux = vec_adde(vux, vux, vux);
+// CHECK: @llvm.ppc.altivec.vaddeuqm
+// CHECK-LE: @llvm.ppc.altivec.vaddeuqm
+
+ /* vec_addec */
+ res_vsx = vec_addec(vsx, vsx, vsx);
+// CHECK: @llvm.ppc.altivec.vaddecuq
+// CHECK-LE: @llvm.ppc.altivec.vaddecuq
+
/* vec_mergee */
res_vbi = vec_mergee(vbi, vbi);
// CHECK: @llvm.ppc.altivec.vperm
@@ -156,6 +204,15 @@ void test1() {
// CHECK-LE: call <2 x i64> @llvm.ppc.altivec.vcmpgtud(<2 x i64> %{{[0-9]*}}, <2 x i64> %{{[0-9]*}})
// CHECK-PPC: error: call to 'vec_cmplt' is ambiguous
+ /* vec_double */
+ res_vd = vec_double(vsll);
+// CHECK: sitofp i64 {{.+}} to double
+// CHECK-BE: sitofp i64 {{.+}} to double
+
+ res_vd = vec_double(vull);
+// CHECK: uitofp i64 {{.+}} to double
+// CHECK-BE: uitofp i64 {{.+}} to double
+
/* vec_eqv */
res_vsc = vec_eqv(vsc, vsc);
// CHECK: [[T1:%.+]] = bitcast <16 x i8> {{.+}} to <4 x i32>
@@ -168,18 +225,7 @@ void test1() {
// CHECK-LE: bitcast <4 x i32> [[T3]] to <16 x i8>
// CHECK-PPC: error: assigning to
- res_vsc = vec_eqv(vbc, vsc);
-// CHECK: [[T1:%.+]] = bitcast <16 x i8> {{.+}} to <4 x i32>
-// CHECK: [[T2:%.+]] = bitcast <16 x i8> {{.+}} to <4 x i32>
-// CHECK: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
-// CHECK: bitcast <4 x i32> [[T3]] to <16 x i8>
-// CHECK-LE: [[T1:%.+]] = bitcast <16 x i8> {{.+}} to <4 x i32>
-// CHECK-LE: [[T2:%.+]] = bitcast <16 x i8> {{.+}} to <4 x i32>
-// CHECK-LE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
-// CHECK-LE: bitcast <4 x i32> [[T3]] to <16 x i8>
-// CHECK-PPC: error: assigning to
-
- res_vsc = vec_eqv(vsc, vbc);
+ res_vsc = vec_eqv(vbc, vbc);
// CHECK: [[T1:%.+]] = bitcast <16 x i8> {{.+}} to <4 x i32>
// CHECK: [[T2:%.+]] = bitcast <16 x i8> {{.+}} to <4 x i32>
// CHECK: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
@@ -201,28 +247,6 @@ void test1() {
// CHECK-LE: bitcast <4 x i32> [[T3]] to <16 x i8>
// CHECK-PPC: error: assigning to
- res_vuc = vec_eqv(vbc, vuc);
-// CHECK: [[T1:%.+]] = bitcast <16 x i8> {{.+}} to <4 x i32>
-// CHECK: [[T2:%.+]] = bitcast <16 x i8> {{.+}} to <4 x i32>
-// CHECK: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
-// CHECK: bitcast <4 x i32> [[T3]] to <16 x i8>
-// CHECK-LE: [[T1:%.+]] = bitcast <16 x i8> {{.+}} to <4 x i32>
-// CHECK-LE: [[T2:%.+]] = bitcast <16 x i8> {{.+}} to <4 x i32>
-// CHECK-LE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
-// CHECK-LE: bitcast <4 x i32> [[T3]] to <16 x i8>
-// CHECK-PPC: error: assigning to
-
- res_vuc = vec_eqv(vuc, vbc);
-// CHECK: [[T1:%.+]] = bitcast <16 x i8> {{.+}} to <4 x i32>
-// CHECK: [[T2:%.+]] = bitcast <16 x i8> {{.+}} to <4 x i32>
-// CHECK: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
-// CHECK: bitcast <4 x i32> [[T3]] to <16 x i8>
-// CHECK-LE: [[T1:%.+]] = bitcast <16 x i8> {{.+}} to <4 x i32>
-// CHECK-LE: [[T2:%.+]] = bitcast <16 x i8> {{.+}} to <4 x i32>
-// CHECK-LE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
-// CHECK-LE: bitcast <4 x i32> [[T3]] to <16 x i8>
-// CHECK-PPC: error: assigning to
-
res_vss = vec_eqv(vss, vss);
// CHECK: [[T1:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32>
// CHECK: [[T2:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32>
@@ -234,18 +258,7 @@ void test1() {
// CHECK-LE: bitcast <4 x i32> [[T3]] to <8 x i16>
// CHECK-PPC: error: assigning to
- res_vss = vec_eqv(vbs, vss);
-// CHECK: [[T1:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32>
-// CHECK: [[T2:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32>
-// CHECK: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
-// CHECK: bitcast <4 x i32> [[T3]] to <8 x i16>
-// CHECK-LE: [[T1:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32>
-// CHECK-LE: [[T2:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32>
-// CHECK-LE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
-// CHECK-LE: bitcast <4 x i32> [[T3]] to <8 x i16>
-// CHECK-PPC: error: assigning to
-
- res_vss = vec_eqv(vss, vbs);
+ res_vss = vec_eqv(vbs, vbs);
// CHECK: [[T1:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32>
// CHECK: [[T2:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32>
// CHECK: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
@@ -267,39 +280,12 @@ void test1() {
// CHECK-LE: bitcast <4 x i32> [[T3]] to <8 x i16>
// CHECK-PPC: error: assigning to
- res_vus = vec_eqv(vbs, vus);
-// CHECK: [[T1:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32>
-// CHECK: [[T2:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32>
-// CHECK: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
-// CHECK: bitcast <4 x i32> [[T3]] to <8 x i16>
-// CHECK-LE: [[T1:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32>
-// CHECK-LE: [[T2:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32>
-// CHECK-LE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
-// CHECK-LE: bitcast <4 x i32> [[T3]] to <8 x i16>
-// CHECK-PPC: error: assigning to
-
- res_vus = vec_eqv(vus, vbs);
-// CHECK: [[T1:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32>
-// CHECK: [[T2:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32>
-// CHECK: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
-// CHECK: bitcast <4 x i32> [[T3]] to <8 x i16>
-// CHECK-LE: [[T1:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32>
-// CHECK-LE: [[T2:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32>
-// CHECK-LE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
-// CHECK-LE: bitcast <4 x i32> [[T3]] to <8 x i16>
-// CHECK-PPC: error: assigning to
-
res_vsi = vec_eqv(vsi, vsi);
// CHECK: call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> {{.*}}, <4 x i32> {{.+}})
// CHECK-LE: call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> {{.*}}, <4 x i32> {{.+}})
// CHECK-PPC: error: assigning to
- res_vsi = vec_eqv(vbi, vsi);
-// CHECK: call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> {{.*}}, <4 x i32> {{.+}})
-// CHECK-LE: call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> {{.*}}, <4 x i32> {{.+}})
-// CHECK-PPC: error: assigning to
-
- res_vsi = vec_eqv(vsi, vbi);
+ res_vsi = vec_eqv(vbi, vbi);
// CHECK: call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> {{.*}}, <4 x i32> {{.+}})
// CHECK-LE: call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> {{.*}}, <4 x i32> {{.+}})
// CHECK-PPC: error: assigning to
@@ -309,16 +295,6 @@ void test1() {
// CHECK-LE: call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> {{.*}}, <4 x i32> {{.+}})
// CHECK-PPC: error: assigning to
- res_vui = vec_eqv(vbi, vui);
-// CHECK: call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> {{.*}}, <4 x i32> {{.+}})
-// CHECK-LE: call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> {{.*}}, <4 x i32> {{.+}})
-// CHECK-PPC: error: assigning to
-
- res_vui = vec_eqv(vui, vbi);
-// CHECK: call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> {{.*}}, <4 x i32> {{.+}})
-// CHECK-LE: call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> {{.*}}, <4 x i32> {{.+}})
-// CHECK-PPC: error: assigning to
-
res_vsll = vec_eqv(vsll, vsll);
// CHECK: [[T1:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
// CHECK: [[T2:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
@@ -330,18 +306,7 @@ void test1() {
// CHECK-LE: bitcast <4 x i32> [[T3]] to <2 x i64>
// CHECK-PPC: error: assigning to
- res_vsll = vec_eqv(vbll, vsll);
-// CHECK: [[T1:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
-// CHECK: [[T2:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
-// CHECK: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
-// CHECK: bitcast <4 x i32> [[T3]] to <2 x i64>
-// CHECK-LE: [[T1:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
-// CHECK-LE: [[T2:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
-// CHECK-LE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
-// CHECK-LE: bitcast <4 x i32> [[T3]] to <2 x i64>
-// CHECK-PPC: error: assigning to
-
- res_vsll = vec_eqv(vsll, vbll);
+ res_vsll = vec_eqv(vbll, vbll);
// CHECK: [[T1:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
// CHECK: [[T2:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
// CHECK: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
@@ -363,28 +328,6 @@ void test1() {
// CHECK-LE: bitcast <4 x i32> [[T3]] to <2 x i64>
// CHECK-PPC: error: assigning to
- res_vull = vec_eqv(vbll, vull);
-// CHECK: [[T1:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
-// CHECK: [[T2:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
-// CHECK: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
-// CHECK: bitcast <4 x i32> [[T3]] to <2 x i64>
-// CHECK-LE: [[T1:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
-// CHECK-LE: [[T2:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
-// CHECK-LE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
-// CHECK-LE: bitcast <4 x i32> [[T3]] to <2 x i64>
-// CHECK-PPC: error: assigning to
-
- res_vull = vec_eqv(vull, vbll);
-// CHECK: [[T1:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
-// CHECK: [[T2:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
-// CHECK: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
-// CHECK: bitcast <4 x i32> [[T3]] to <2 x i64>
-// CHECK-LE: [[T1:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
-// CHECK-LE: [[T2:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
-// CHECK-LE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
-// CHECK-LE: bitcast <4 x i32> [[T3]] to <2 x i64>
-// CHECK-PPC: error: assigning to
-
res_vf = vec_eqv(vfa, vfa);
// CHECK: [[T1:%.+]] = bitcast <4 x float> {{.+}} to <4 x i32>
// CHECK: [[T2:%.+]] = bitcast <4 x float> {{.+}} to <4 x i32>
@@ -396,23 +339,6 @@ void test1() {
// CHECK-LE: bitcast <4 x i32> [[T3]] to <4 x float>
// CHECK-PPC: error: assigning to
- res_vf = vec_eqv(vbi, vfa);
-// CHECK: [[T2:%.+]] = bitcast <4 x float> {{.+}} to <4 x i32>
-// CHECK: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> {{.+}}, <4 x i32> [[T2]])
-// CHECK: bitcast <4 x i32> [[T3]] to <4 x float>
-// CHECK-LE: [[T2:%.+]] = bitcast <4 x float> {{.+}} to <4 x i32>
-// CHECK-LE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> {{.+}}, <4 x i32> [[T2]])
-// CHECK-PPC: error: assigning to
-
- res_vf = vec_eqv(vfa, vbi);
-// CHECK: [[T1:%.+]] = bitcast <4 x float> {{.+}} to <4 x i32>
-// CHECK: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32>
-// CHECK: bitcast <4 x i32> [[T3]] to <4 x float>
-// CHECK-LE: [[T1:%.+]] = bitcast <4 x float> {{.+}} to <4 x i32>
-// CHECK-LE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32>
-// CHECK-LE: bitcast <4 x i32> [[T3]] to <4 x float>
-// CHECK-PPC: error: assigning to
-
res_vd = vec_eqv(vda, vda);
// CHECK: [[T1:%.+]] = bitcast <2 x double> {{.+}} to <4 x i32>
// CHECK: [[T2:%.+]] = bitcast <2 x double> {{.+}} to <4 x i32>
@@ -424,24 +350,41 @@ void test1() {
// CHECK-LE: bitcast <4 x i32> [[T3]] to <2 x double>
// CHECK-PPC: error: assigning to
- res_vd = vec_eqv(vbll, vda);
-// CHECK: [[T2:%.+]] = bitcast <2 x double> {{.+}} to <4 x i32>
-// CHECK: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> {{.+}}, <4 x i32> [[T2]])
-// CHECK: bitcast <4 x i32> [[T3]] to <2 x double>
-// CHECK-LE: [[T2:%.+]] = bitcast <2 x double> {{.+}} to <4 x i32>
-// CHECK-LE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> {{.+}}, <4 x i32> [[T2]])
-// CHECK-LE: bitcast <4 x i32> [[T3]] to <2 x double>
-// CHECK-PPC: error: assigning to
+ /* vec_extract */
+ res_sll = vec_extract(vsll, si);
+// CHECK: extractelement <2 x i64>
+// CHECK-LE: extractelement <2 x i64>
- res_vd = vec_eqv(vda, vbll);
-// CHECK: [[T1:%.+]] = bitcast <2 x double> {{.+}} to <4 x i32>
-// CHECK: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32>
-// CHECK: bitcast <4 x i32> [[T3]] to <2 x double>
-// CHECK-LE: [[T1:%.+]] = bitcast <2 x double> {{.+}} to <4 x i32>
-// CHECK-LE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32>
-// CHECK-LE: bitcast <4 x i32> [[T3]] to <2 x double>
-// CHECK-PPC: error: assigning to
+ res_ull = vec_extract(vull, si);
+// CHECK: extractelement <2 x i64>
+// CHECK-LE: extractelement <2 x i64>
+
+ res_ull = vec_extract(vbll, si);
+// CHECK: extractelement <2 x i64>
+// CHECK-LE: extractelement <2 x i64>
+
+ res_d = vec_extract(vda, si);
+// CHECK: extractelement <2 x double>
+// CHECK-LE: extractelement <2 x double>
+
+ /* vec_insert */
+ res_vsll = vec_insert(sll, vsll, si);
+// CHECK: insertelement <2 x i64>
+// CHECK-LE: insertelement <2 x i64>
+
+ res_vbll = vec_insert(ull, vbll, si);
+// CHECK: insertelement <2 x i64>
+// CHECK-LE: insertelement <2 x i64>
+
+ res_vull = vec_insert(ull, vull, si);
+// CHECK: insertelement <2 x i64>
+// CHECK-LE: insertelement <2 x i64>
+ res_vd = vec_insert(d, vda, si);
+// CHECK: insertelement <2 x double>
+// CHECK-LE: insertelement <2 x double>
+
+ /* vec_cntlz */
res_vsc = vec_cntlz(vsc);
// CHECK: call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %{{.+}}, i1 false)
// CHECK-LE: call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %{{.+}}, i1 false)
@@ -512,6 +455,10 @@ void test1() {
// CHECK-LE: @llvm.ppc.altivec.vcmpequd.p
// CHECK-PPC: error: call to 'vec_all_eq' is ambiguous
+ res_i = vec_all_eq(vda, vda);
+// CHECK: @llvm.ppc.vsx.xvcmpeqdp.p
+// CHECK-LE: @llvm.ppc.vsx.xvcmpeqdp.p
+
/* vec_all_ne */
res_i = vec_all_ne(vsll, vsll);
// CHECK: @llvm.ppc.altivec.vcmpequd.p
@@ -548,6 +495,24 @@ void test1() {
// CHECK-LE: @llvm.ppc.altivec.vcmpequd.p
// CHECK-PPC: error: call to 'vec_all_ne' is ambiguous
+ dummy();
+// CHECK: @dummy
+
+ res_i = vec_all_ne(vda, vda);
+// CHECK: @llvm.ppc.vsx.xvcmpeqdp.p
+// CHECK-LE: @llvm.ppc.vsx.xvcmpeqdp.p
+
+ dummy();
+// CHECK: @dummy
+
+ res_i = vec_all_nge(vda, vda);
+// CHECK: @llvm.ppc.vsx.xvcmpgedp.p
+// CHECK-LE: @llvm.ppc.vsx.xvcmpgedp.p
+
+ res_i = vec_all_ngt(vda, vda);
+// CHECK: @llvm.ppc.vsx.xvcmpgtdp.p
+// CHECK-LE: @llvm.ppc.vsx.xvcmpgtdp.p
+
/* vec_any_eq */
res_i = vec_any_eq(vsll, vsll);
// CHECK: @llvm.ppc.altivec.vcmpequd.p
@@ -584,6 +549,10 @@ void test1() {
// CHECK-LE: @llvm.ppc.altivec.vcmpequd.p
// CHECK-PPC: error: call to 'vec_any_eq' is ambiguous
+ res_i = vec_any_eq(vda, vda);
+// CHECK: @llvm.ppc.vsx.xvcmpeqdp.p
+// CHECK-LE: @llvm.ppc.vsx.xvcmpeqdp.p
+
/* vec_any_ne */
res_i = vec_any_ne(vsll, vsll);
// CHECK: @llvm.ppc.altivec.vcmpequd.p
@@ -620,6 +589,10 @@ void test1() {
// CHECK-LE: @llvm.ppc.altivec.vcmpequd.p
// CHECK-PPC: error: call to 'vec_any_ne' is ambiguous
+ res_i = vec_any_ne(vda, vda);
+// CHECK: @llvm.ppc.vsx.xvcmpeqdp.p
+// CHECK-LE: @llvm.ppc.vsx.xvcmpeqdp.p
+
/* vec_all_ge */
res_i = vec_all_ge(vsll, vsll);
// CHECK: @llvm.ppc.altivec.vcmpgtsd.p
@@ -656,6 +629,10 @@ void test1() {
// CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p
// CHECK-PPC: error: call to 'vec_all_ge' is ambiguous
+ res_i = vec_all_ge(vda, vda);
+// CHECK: @llvm.ppc.vsx.xvcmpgedp.p
+// CHECK-LE: @llvm.ppc.vsx.xvcmpgedp.p
+
/* vec_all_gt */
res_i = vec_all_gt(vsll, vsll);
// CHECK: @llvm.ppc.altivec.vcmpgtsd.p
@@ -692,6 +669,10 @@ void test1() {
// CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p
// CHECK-PPC: error: call to 'vec_all_gt' is ambiguous
+ res_i = vec_all_gt(vda, vda);
+// CHECK: @llvm.ppc.vsx.xvcmpgtdp.p
+// CHECK-LE: @llvm.ppc.vsx.xvcmpgtdp.p
+
/* vec_all_le */
res_i = vec_all_le(vsll, vsll);
// CHECK: @llvm.ppc.altivec.vcmpgtsd.p
@@ -728,6 +709,10 @@ void test1() {
// CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p
// CHECK-PPC: error: call to 'vec_all_le' is ambiguous
+ res_i = vec_all_le(vda, vda);
+// CHECK: @llvm.ppc.vsx.xvcmpgedp.p
+// CHECK-LE: @llvm.ppc.vsx.xvcmpgedp.p
+
/* vec_all_lt */
res_i = vec_all_lt(vsll, vsll);
// CHECK: @llvm.ppc.altivec.vcmpgtsd.p
@@ -764,6 +749,14 @@ void test1() {
// CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p
// CHECK-PPC: error: call to 'vec_all_lt' is ambiguous
+ res_i = vec_all_lt(vda, vda);
+// CHECK: @llvm.ppc.vsx.xvcmpgtdp.p
+// CHECK-LE: @llvm.ppc.vsx.xvcmpgtdp.p
+
+ res_i = vec_all_nan(vda);
+// CHECK: @llvm.ppc.vsx.xvcmpeqdp.p
+// CHECK-LE: @llvm.ppc.vsx.xvcmpeqdp.p
+
/* vec_any_ge */
res_i = vec_any_ge(vsll, vsll);
// CHECK: @llvm.ppc.altivec.vcmpgtsd.p
@@ -800,6 +793,10 @@ void test1() {
// CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p
// CHECK-PPC: error: call to 'vec_any_ge' is ambiguous
+ res_i = vec_any_ge(vda, vda);
+// CHECK: @llvm.ppc.vsx.xvcmpgedp.p
+// CHECK-LE: @llvm.ppc.vsx.xvcmpgedp.p
+
/* vec_any_gt */
res_i = vec_any_gt(vsll, vsll);
// CHECK: @llvm.ppc.altivec.vcmpgtsd.p
@@ -836,6 +833,10 @@ void test1() {
// CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p
// CHECK-PPC: error: call to 'vec_any_gt' is ambiguous
+ res_i = vec_any_gt(vda, vda);
+// CHECK: @llvm.ppc.vsx.xvcmpgtdp.p
+// CHECK-LE: @llvm.ppc.vsx.xvcmpgtdp.p
+
/* vec_any_le */
res_i = vec_any_le(vsll, vsll);
// CHECK: @llvm.ppc.altivec.vcmpgtsd.p
@@ -872,6 +873,10 @@ void test1() {
// CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p
// CHECK-PPC: error: call to 'vec_any_le' is ambiguous
+ res_i = vec_any_le(vda, vda);
+// CHECK: @llvm.ppc.vsx.xvcmpgedp.p
+// CHECK-LE: @llvm.ppc.vsx.xvcmpgedp.p
+
/* vec_any_lt */
res_i = vec_any_lt(vsll, vsll);
// CHECK: @llvm.ppc.altivec.vcmpgtsd.p
@@ -908,6 +913,10 @@ void test1() {
// CHECK-LE: @llvm.ppc.altivec.vcmpgtud.p
// CHECK-PPC: error: call to 'vec_any_lt' is ambiguous
+ res_i = vec_any_lt(vda, vda);
+// CHECK: @llvm.ppc.vsx.xvcmpgtdp.p
+// CHECK-LE: @llvm.ppc.vsx.xvcmpgtdp.p
+
/* vec_max */
res_vsll = vec_max(vsll, vsll);
// CHECK: @llvm.ppc.altivec.vmaxsd
@@ -939,6 +948,15 @@ void test1() {
// CHECK-LE: @llvm.ppc.altivec.vmaxud
// CHECK-PPC: error: call to 'vec_max' is ambiguous
+ /* vec_mergeh */
+ res_vbll = vec_mergeh(vbll, vbll);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
+ res_vbll = vec_mergel(vbll, vbll);
+// CHECK: @llvm.ppc.altivec.vperm
+// CHECK-LE: @llvm.ppc.altivec.vperm
+
/* vec_min */
res_vsll = vec_min(vsll, vsll);
// CHECK: @llvm.ppc.altivec.vminsd
@@ -1058,6 +1076,28 @@ void test1() {
// CHECK-LE: ashr <2 x i64>
// CHECK-PPC: error: call to 'vec_sra' is ambiguous
+ /* vec_splats */
+ res_vsll = vec_splats(sll);
+// CHECK: insertelement <2 x i64>
+// CHECK-LE: insertelement <2 x i64>
+
+ res_vull = vec_splats(ull);
+// CHECK: insertelement <2 x i64>
+// CHECK-LE: insertelement <2 x i64>
+
+ res_vsx = vec_splats(sx);
+// CHECK: insertelement <1 x i128>
+// CHECK-LE: insertelement <1 x i128>
+
+ res_vux = vec_splats(ux);
+// CHECK: insertelement <1 x i128>
+// CHECK-LE: insertelement <1 x i128>
+
+ res_vd = vec_splats(d);
+// CHECK: insertelement <2 x double>
+// CHECK-LE: insertelement <2 x double>
+
+
/* vec_unpackh */
res_vsll = vec_unpackh(vsi);
// CHECK: llvm.ppc.altivec.vupkhsw
@@ -1177,13 +1217,7 @@ void test1() {
// CHECK-LE: xor <16 x i8> [[T1]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
// CHECK-PPC: warning: implicit declaration of function 'vec_nand' is invalid in C99
- res_vsc = vec_nand(vsc, vbc);
-// CHECK: [[T1:%.+]] = and <16 x i8>
-// CHECK: xor <16 x i8> [[T1]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
-// CHECK-LE: [[T1:%.+]] = and <16 x i8>
-// CHECK-LE: xor <16 x i8> [[T1]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
-
- res_vsc = vec_nand(vbc, vsc);
+ res_vsc = vec_nand(vbc, vbc);
// CHECK: [[T1:%.+]] = and <16 x i8>
// CHECK: xor <16 x i8> [[T1]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
// CHECK-LE: [[T1:%.+]] = and <16 x i8>
@@ -1195,31 +1229,13 @@ void test1() {
// CHECK-LE: [[T1:%.+]] = and <16 x i8>
// CHECK-LE: xor <16 x i8> [[T1]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
- res_vuc = vec_nand(vuc, vbc);
-// CHECK: [[T1:%.+]] = and <16 x i8>
-// CHECK: xor <16 x i8> [[T1]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
-// CHECK-LE: [[T1:%.+]] = and <16 x i8>
-// CHECK-LE: xor <16 x i8> [[T1]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
-
- res_vuc = vec_nand(vbc, vuc);
-// CHECK: [[T1:%.+]] = and <16 x i8>
-// CHECK: xor <16 x i8> [[T1]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
-// CHECK-LE: [[T1:%.+]] = and <16 x i8>
-// CHECK-LE: xor <16 x i8> [[T1]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
-
res_vss = vec_nand(vss, vss);
// CHECK: [[T1:%.+]] = and <8 x i16>
// CHECK: xor <8 x i16> [[T1]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
// CHECK-LE: [[T1:%.+]] = and <8 x i16>
// CHECK-LE: xor <8 x i16> [[T1]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
- res_vss = vec_nand(vss, vbs);
-// CHECK: [[T1:%.+]] = and <8 x i16>
-// CHECK: xor <8 x i16> [[T1]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
-// CHECK-LE: [[T1:%.+]] = and <8 x i16>
-// CHECK-LE: xor <8 x i16> [[T1]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
-
- res_vss = vec_nand(vbs, vss);
+ res_vss = vec_nand(vbs, vbs);
// CHECK: [[T1:%.+]] = and <8 x i16>
// CHECK: xor <8 x i16> [[T1]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
// CHECK-LE: [[T1:%.+]] = and <8 x i16>
@@ -1231,31 +1247,13 @@ void test1() {
// CHECK-LE: [[T1:%.+]] = and <8 x i16>
// CHECK-LE: xor <8 x i16> [[T1]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
- res_vus = vec_nand(vus, vbs);
-// CHECK: [[T1:%.+]] = and <8 x i16>
-// CHECK: xor <8 x i16> [[T1]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
-// CHECK-LE: [[T1:%.+]] = and <8 x i16>
-// CHECK-LE: xor <8 x i16> [[T1]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
-
- res_vus = vec_nand(vbs, vus);
-// CHECK: [[T1:%.+]] = and <8 x i16>
-// CHECK: xor <8 x i16> [[T1]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
-// CHECK-LE: [[T1:%.+]] = and <8 x i16>
-// CHECK-LE: xor <8 x i16> [[T1]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
-
res_vsi = vec_nand(vsi, vsi);
// CHECK: [[T1:%.+]] = and <4 x i32>
// CHECK: xor <4 x i32> [[T1]], <i32 -1, i32 -1, i32 -1, i32 -1>
// CHECK-LE: [[T1:%.+]] = and <4 x i32>
// CHECK-LE: xor <4 x i32> [[T1]], <i32 -1, i32 -1, i32 -1, i32 -1>
- res_vsi = vec_nand(vsi, vbi);
-// CHECK: [[T1:%.+]] = and <4 x i32>
-// CHECK: xor <4 x i32> [[T1]], <i32 -1, i32 -1, i32 -1, i32 -1>
-// CHECK-LE: [[T1:%.+]] = and <4 x i32>
-// CHECK-LE: xor <4 x i32> [[T1]], <i32 -1, i32 -1, i32 -1, i32 -1>
-
- res_vsi = vec_nand(vbi, vsi);
+ res_vsi = vec_nand(vbi, vbi);
// CHECK: [[T1:%.+]] = and <4 x i32>
// CHECK: xor <4 x i32> [[T1]], <i32 -1, i32 -1, i32 -1, i32 -1>
// CHECK-LE: [[T1:%.+]] = and <4 x i32>
@@ -1267,31 +1265,13 @@ void test1() {
// CHECK-LE: [[T1:%.+]] = and <4 x i32>
// CHECK-LE: xor <4 x i32> [[T1]], <i32 -1, i32 -1, i32 -1, i32 -1>
- res_vui = vec_nand(vui, vbi);
-// CHECK: [[T1:%.+]] = and <4 x i32>
-// CHECK: xor <4 x i32> [[T1]], <i32 -1, i32 -1, i32 -1, i32 -1>
-// CHECK-LE: [[T1:%.+]] = and <4 x i32>
-// CHECK-LE: xor <4 x i32> [[T1]], <i32 -1, i32 -1, i32 -1, i32 -1>
-
- res_vui = vec_nand(vbi, vui);
-// CHECK: [[T1:%.+]] = and <4 x i32>
-// CHECK: xor <4 x i32> [[T1]], <i32 -1, i32 -1, i32 -1, i32 -1>
-// CHECK-LE: [[T1:%.+]] = and <4 x i32>
-// CHECK-LE: xor <4 x i32> [[T1]], <i32 -1, i32 -1, i32 -1, i32 -1>
-
res_vsll = vec_nand(vsll, vsll);
// CHECK: [[T1:%.+]] = and <2 x i64>
// CHECK: xor <2 x i64> [[T1]], <i64 -1, i64 -1>
// CHECK-LE: [[T1:%.+]] = and <2 x i64>
// CHECK-LE: xor <2 x i64> [[T1]], <i64 -1, i64 -1>
- res_vsll = vec_nand(vsll, vbll);
-// CHECK: [[T1:%.+]] = and <2 x i64>
-// CHECK: xor <2 x i64> [[T1]], <i64 -1, i64 -1>
-// CHECK-LE: [[T1:%.+]] = and <2 x i64>
-// CHECK-LE: xor <2 x i64> [[T1]], <i64 -1, i64 -1>
-
- res_vsll = vec_nand(vbll, vsll);
+ res_vsll = vec_nand(vbll, vbll);
// CHECK: [[T1:%.+]] = and <2 x i64>
// CHECK: xor <2 x i64> [[T1]], <i64 -1, i64 -1>
// CHECK-LE: [[T1:%.+]] = and <2 x i64>
@@ -1303,18 +1283,6 @@ void test1() {
// CHECK-LE: [[T1:%.+]] = and <2 x i64>
// CHECK-LE: xor <2 x i64> [[T1]], <i64 -1, i64 -1>
- res_vull = vec_nand(vull, vbll);
-// CHECK: [[T1:%.+]] = and <2 x i64>
-// CHECK: xor <2 x i64> [[T1]], <i64 -1, i64 -1>
-// CHECK-LE: [[T1:%.+]] = and <2 x i64>
-// CHECK-LE: xor <2 x i64> [[T1]], <i64 -1, i64 -1>
-
- res_vull = vec_nand(vbll, vull);
-// CHECK: [[T1:%.+]] = and <2 x i64>
-// CHECK: xor <2 x i64> [[T1]], <i64 -1, i64 -1>
-// CHECK-LE: [[T1:%.+]] = and <2 x i64>
-// CHECK-LE: xor <2 x i64> [[T1]], <i64 -1, i64 -1>
-
/* vec_orc */
res_vsc = vec_orc(vsc, vsc);
// CHECK: [[T1:%.+]] = xor <16 x i8> {{%.+}}, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
@@ -1353,6 +1321,12 @@ void test1() {
// CHECK-LE: [[T1:%.+]] = xor <16 x i8> {{%.+}}, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
// CHECK-LE: or <16 x i8> {{%.+}}, [[T1]]
+ res_vbc = vec_orc(vbc, vbc);
+// CHECK: [[T1:%.+]] = xor <16 x i8> {{%.+}}, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+// CHECK: or <16 x i8> {{%.+}}, [[T1]]
+// CHECK-LE: [[T1:%.+]] = xor <16 x i8> {{%.+}}, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+// CHECK-LE: or <16 x i8> {{%.+}}, [[T1]]
+
res_vss = vec_orc(vss, vss);
// CHECK: [[T1:%.+]] = xor <8 x i16> {{%.+}}, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
// CHECK: or <8 x i16> {{%.+}}, [[T1]]
@@ -1389,6 +1363,12 @@ void test1() {
// CHECK-LE: [[T1:%.+]] = xor <8 x i16> {{%.+}}, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
// CHECK-LE: or <8 x i16> {{%.+}}, [[T1]]
+ res_vbs = vec_orc(vbs, vbs);
+// CHECK: [[T1:%.+]] = xor <8 x i16> {{%.+}}, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+// CHECK: or <8 x i16> {{%.+}}, [[T1]]
+// CHECK-LE: [[T1:%.+]] = xor <8 x i16> {{%.+}}, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+// CHECK-LE: or <8 x i16> {{%.+}}, [[T1]]
+
res_vsi = vec_orc(vsi, vsi);
// CHECK: [[T1:%.+]] = xor <4 x i32> {{%.+}}, <i32 -1, i32 -1, i32 -1, i32 -1>
// CHECK: or <4 x i32> {{%.+}}, [[T1]]
@@ -1425,6 +1405,12 @@ void test1() {
// CHECK-LE: [[T1:%.+]] = xor <4 x i32> {{%.+}}, <i32 -1, i32 -1, i32 -1, i32 -1>
// CHECK-LE: or <4 x i32> {{%.+}}, [[T1]]
+ res_vbi = vec_orc(vbi, vbi);
+// CHECK: [[T1:%.+]] = xor <4 x i32> {{%.+}}, <i32 -1, i32 -1, i32 -1, i32 -1>
+// CHECK: or <4 x i32> {{%.+}}, [[T1]]
+// CHECK-LE: [[T1:%.+]] = xor <4 x i32> {{%.+}}, <i32 -1, i32 -1, i32 -1, i32 -1>
+// CHECK-LE: or <4 x i32> {{%.+}}, [[T1]]
+
res_vsll = vec_orc(vsll, vsll);
// CHECK: [[T1:%.+]] = xor <2 x i64> {{%.+}}, <i64 -1, i64 -1>
// CHECK: or <2 x i64> {{%.+}}, [[T1]]
@@ -1461,6 +1447,33 @@ void test1() {
// CHECK-LE: [[T1:%.+]] = xor <2 x i64> {{%.+}}, <i64 -1, i64 -1>
// CHECK-LE: or <2 x i64> {{%.+}}, [[T1]]
+ res_vbll = vec_orc(vbll, vbll);
+// CHECK: [[T1:%.+]] = xor <2 x i64> {{%.+}}, <i64 -1, i64 -1>
+// CHECK: or <2 x i64> {{%.+}}, [[T1]]
+// CHECK-LE: [[T1:%.+]] = xor <2 x i64> {{%.+}}, <i64 -1, i64 -1>
+// CHECK-LE: or <2 x i64> {{%.+}}, [[T1]]
+
+ /* vec_sub */
+ res_vsll = vec_sub(vsll, vsll);
+// CHECK: sub <2 x i64>
+// CHECK-LE: sub <2 x i64>
+
+ res_vull = vec_sub(vull, vull);
+// CHECK: sub <2 x i64>
+// CHECK-LE: sub <2 x i64>
+
+ res_vd = vec_sub(vda, vda);
+// CHECK: fsub <2 x double>
+// CHECK-LE: fsub <2 x double>
+
+ res_vsx = vec_sub(vsx, vsx);
+// CHECK: sub <1 x i128>
+// CHECK-LE: sub <1 x i128>
+
+ res_vux = vec_sub(vux, vux);
+// CHECK: sub <1 x i128>
+// CHECK-LE: sub <1 x i128>
+
/* vec_vbpermq */
res_vsll = vec_vbpermq(vsc, vsc);
// CHECK: llvm.ppc.altivec.vbpermq
@@ -1480,4 +1493,14 @@ void test1() {
// CHECK: llvm.ppc.altivec.vgbbd
// CHECK-LE: llvm.ppc.altivec.vgbbd
// CHECK-PPC: warning: implicit declaration of function 'vec_vgbbd'
+
+ res_vuc = vec_gb(vuc);
+// CHECK: llvm.ppc.altivec.vgbbd
+// CHECK-LE: llvm.ppc.altivec.vgbbd
+// CHECK-PPC: warning: implicit declaration of function 'vec_gb'
+
+ res_vull = vec_bperm(vux, vux);
+// CHECK: llvm.ppc.altivec.vbpermq
+// CHECK-LE: llvm.ppc.altivec.vbpermq
+// CHECK-PPC: warning: implicit declaration of function 'vec_bperm'
}
OpenPOWER on IntegriCloud