summaryrefslogtreecommitdiffstats
path: root/clang/lib
diff options
context:
space:
mode:
authorBill Schmidt <wschmidt@linux.vnet.ibm.com>2014-10-06 19:02:20 +0000
committerBill Schmidt <wschmidt@linux.vnet.ibm.com>2014-10-06 19:02:20 +0000
commitcad3a5f7d4742a25dcd7dd1b5b87aca2f43f8311 (patch)
tree3715f72a650d0e8fd6878660f9dce0ddc89d4662 /clang/lib
parent43ce71f1b13bd4325b4699f8a4573e2888168067 (diff)
downloadbcm5719-llvm-cad3a5f7d4742a25dcd7dd1b5b87aca2f43f8311.tar.gz
bcm5719-llvm-cad3a5f7d4742a25dcd7dd1b5b87aca2f43f8311.zip
[PATCH][Power] Fix (and deprecate) vec_lvsl and vec_lvsr for little endian
The use of the vec_lvsl and vec_lvsr interfaces are discouraged for little endian targets since Power8 hardware is a minimum requirement, and Power8 provides reasonable performance for unaligned vector loads and stores. Up till now we have not provided "correct" (i.e., big- endian-compatible) code generation for these interfaces, as to do so produces poorly performing code. However, this has become the source of too many questions. With this patch, LLVM will now produce compatible code for these interfaces, but will also produce a deprecation warning message for PPC64LE when one of them is used. This should make the porting direction clearer to programmers. A similar patch has recently been committed to GCC. This patch includes a test for the warning message. There is a companion patch that adds two unit tests to projects/test-suite. llvm-svn: 219137
Diffstat (limited to 'clang/lib')
-rw-r--r--clang/lib/Headers/altivec.h182
1 files changed, 182 insertions, 0 deletions
diff --git a/clang/lib/Headers/altivec.h b/clang/lib/Headers/altivec.h
index f9fc64af3e7..373eded482f 100644
--- a/clang/lib/Headers/altivec.h
+++ b/clang/lib/Headers/altivec.h
@@ -2253,91 +2253,273 @@ vec_vlogefp(vector float __a)
/* vec_lvsl */
+#ifdef __LITTLE_ENDIAN__
+static vector unsigned char __ATTRS_o_ai
+__attribute__((deprecated("use assignment for unaligned little endian \
+loads/stores")))
+vec_lvsl(int __a, const signed char *__b)
+{
+ vector unsigned char mask =
+ (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
+ vector unsigned char reverse = {15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0};
+ return vec_perm(mask, mask, reverse);
+}
+#else
static vector unsigned char __ATTRS_o_ai
vec_lvsl(int __a, const signed char *__b)
{
return (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
}
+#endif
+#ifdef __LITTLE_ENDIAN__
+static vector unsigned char __ATTRS_o_ai
+__attribute__((deprecated("use assignment for unaligned little endian \
+loads/stores")))
+vec_lvsl(int __a, const unsigned char *__b)
+{
+ vector unsigned char mask =
+ (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
+ vector unsigned char reverse = {15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0};
+ return vec_perm(mask, mask, reverse);
+}
+#else
static vector unsigned char __ATTRS_o_ai
vec_lvsl(int __a, const unsigned char *__b)
{
return (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
}
+#endif
+#ifdef __LITTLE_ENDIAN__
+static vector unsigned char __ATTRS_o_ai
+__attribute__((deprecated("use assignment for unaligned little endian \
+loads/stores")))
+vec_lvsl(int __a, const short *__b)
+{
+ vector unsigned char mask =
+ (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
+ vector unsigned char reverse = {15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0};
+ return vec_perm(mask, mask, reverse);
+}
+#else
static vector unsigned char __ATTRS_o_ai
vec_lvsl(int __a, const short *__b)
{
return (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
}
+#endif
+#ifdef __LITTLE_ENDIAN__
+static vector unsigned char __ATTRS_o_ai
+__attribute__((deprecated("use assignment for unaligned little endian \
+loads/stores")))
+vec_lvsl(int __a, const unsigned short *__b)
+{
+ vector unsigned char mask =
+ (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
+ vector unsigned char reverse = {15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0};
+ return vec_perm(mask, mask, reverse);
+}
+#else
static vector unsigned char __ATTRS_o_ai
vec_lvsl(int __a, const unsigned short *__b)
{
return (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
}
+#endif
+#ifdef __LITTLE_ENDIAN__
+static vector unsigned char __ATTRS_o_ai
+__attribute__((deprecated("use assignment for unaligned little endian \
+loads/stores")))
+vec_lvsl(int __a, const int *__b)
+{
+ vector unsigned char mask =
+ (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
+ vector unsigned char reverse = {15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0};
+ return vec_perm(mask, mask, reverse);
+}
+#else
static vector unsigned char __ATTRS_o_ai
vec_lvsl(int __a, const int *__b)
{
return (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
}
+#endif
+#ifdef __LITTLE_ENDIAN__
+static vector unsigned char __ATTRS_o_ai
+__attribute__((deprecated("use assignment for unaligned little endian \
+loads/stores")))
+vec_lvsl(int __a, const unsigned int *__b)
+{
+ vector unsigned char mask =
+ (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
+ vector unsigned char reverse = {15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0};
+ return vec_perm(mask, mask, reverse);
+}
+#else
static vector unsigned char __ATTRS_o_ai
vec_lvsl(int __a, const unsigned int *__b)
{
return (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
}
+#endif
+#ifdef __LITTLE_ENDIAN__
+static vector unsigned char __ATTRS_o_ai
+__attribute__((deprecated("use assignment for unaligned little endian \
+loads/stores")))
+vec_lvsl(int __a, const float *__b)
+{
+ vector unsigned char mask =
+ (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
+ vector unsigned char reverse = {15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0};
+ return vec_perm(mask, mask, reverse);
+}
+#else
static vector unsigned char __ATTRS_o_ai
vec_lvsl(int __a, const float *__b)
{
return (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
}
+#endif
/* vec_lvsr */
+#ifdef __LITTLE_ENDIAN__
+static vector unsigned char __ATTRS_o_ai
+__attribute__((deprecated("use assignment for unaligned little endian \
+loads/stores")))
+vec_lvsr(int __a, const signed char *__b)
+{
+ vector unsigned char mask =
+ (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
+ vector unsigned char reverse = {15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0};
+ return vec_perm(mask, mask, reverse);
+}
+#else
static vector unsigned char __ATTRS_o_ai
vec_lvsr(int __a, const signed char *__b)
{
return (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
}
+#endif
+#ifdef __LITTLE_ENDIAN__
+static vector unsigned char __ATTRS_o_ai
+__attribute__((deprecated("use assignment for unaligned little endian \
+loads/stores")))
+vec_lvsr(int __a, const unsigned char *__b)
+{
+ vector unsigned char mask =
+ (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
+ vector unsigned char reverse = {15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0};
+ return vec_perm(mask, mask, reverse);
+}
+#else
static vector unsigned char __ATTRS_o_ai
vec_lvsr(int __a, const unsigned char *__b)
{
return (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
}
+#endif
+#ifdef __LITTLE_ENDIAN__
+static vector unsigned char __ATTRS_o_ai
+__attribute__((deprecated("use assignment for unaligned little endian \
+loads/stores")))
+vec_lvsr(int __a, const short *__b)
+{
+ vector unsigned char mask =
+ (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
+ vector unsigned char reverse = {15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0};
+ return vec_perm(mask, mask, reverse);
+}
+#else
static vector unsigned char __ATTRS_o_ai
vec_lvsr(int __a, const short *__b)
{
return (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
}
+#endif
+#ifdef __LITTLE_ENDIAN__
+static vector unsigned char __ATTRS_o_ai
+__attribute__((deprecated("use assignment for unaligned little endian \
+loads/stores")))
+vec_lvsr(int __a, const unsigned short *__b)
+{
+ vector unsigned char mask =
+ (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
+ vector unsigned char reverse = {15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0};
+ return vec_perm(mask, mask, reverse);
+}
+#else
static vector unsigned char __ATTRS_o_ai
vec_lvsr(int __a, const unsigned short *__b)
{
return (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
}
+#endif
+#ifdef __LITTLE_ENDIAN__
+static vector unsigned char __ATTRS_o_ai
+__attribute__((deprecated("use assignment for unaligned little endian \
+loads/stores")))
+vec_lvsr(int __a, const int *__b)
+{
+ vector unsigned char mask =
+ (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
+ vector unsigned char reverse = {15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0};
+ return vec_perm(mask, mask, reverse);
+}
+#else
static vector unsigned char __ATTRS_o_ai
vec_lvsr(int __a, const int *__b)
{
return (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
}
+#endif
+#ifdef __LITTLE_ENDIAN__
+static vector unsigned char __ATTRS_o_ai
+__attribute__((deprecated("use assignment for unaligned little endian \
+loads/stores")))
+vec_lvsr(int __a, const unsigned int *__b)
+{
+ vector unsigned char mask =
+ (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
+ vector unsigned char reverse = {15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0};
+ return vec_perm(mask, mask, reverse);
+}
+#else
static vector unsigned char __ATTRS_o_ai
vec_lvsr(int __a, const unsigned int *__b)
{
return (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
}
+#endif
+#ifdef __LITTLE_ENDIAN__
+static vector unsigned char __ATTRS_o_ai
+__attribute__((deprecated("use assignment for unaligned little endian \
+loads/stores")))
+vec_lvsr(int __a, const float *__b)
+{
+ vector unsigned char mask =
+ (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
+ vector unsigned char reverse = {15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0};
+ return vec_perm(mask, mask, reverse);
+}
+#else
static vector unsigned char __ATTRS_o_ai
vec_lvsr(int __a, const float *__b)
{
return (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
}
+#endif
/* vec_madd */
OpenPOWER on IntegriCloud