summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNemanja Ivanovic <nemanjai@ca.ibm.com>2019-11-07 20:40:32 -0600
committerNemanja Ivanovic <nemanjai@ca.ibm.com>2019-11-07 20:40:32 -0600
commit070e4027b02453f0962e5b61335a517581c5528f (patch)
treecd94e8263f9eb2fa5cd74c569bd81899a6e52375
parent9af28400d6ff59614dad2584f5de03be2e294151 (diff)
downloadbcm5719-llvm-070e4027b02453f0962e5b61335a517581c5528f.tar.gz
bcm5719-llvm-070e4027b02453f0962e5b61335a517581c5528f.zip
[PowerPC][Altivec] Emit correct builtin for single precision vec_all_ne
We currently emit a double precision comparison instruction for this, whereas we need to emit the single precision version. Differential revision: https://reviews.llvm.org/D64024
-rw-r--r--clang/lib/Headers/altivec.h2
-rw-r--r--clang/test/CodeGen/builtins-ppc-p8vector.c57
2 files changed, 58 insertions, 1 deletions
diff --git a/clang/lib/Headers/altivec.h b/clang/lib/Headers/altivec.h
index 8352f8f740c..77a0e494df3 100644
--- a/clang/lib/Headers/altivec.h
+++ b/clang/lib/Headers/altivec.h
@@ -14784,7 +14784,7 @@ static __inline__ int __ATTRS_o_ai vec_all_ne(vector bool long long __a,
static __inline__ int __ATTRS_o_ai vec_all_ne(vector float __a,
vector float __b) {
#ifdef __VSX__
- return __builtin_vsx_xvcmpeqdp_p(__CR6_EQ, (vector double)__a, (vector double)__b);
+ return __builtin_vsx_xvcmpeqsp_p(__CR6_EQ, __a, __b);
#else
return __builtin_altivec_vcmpeqfp_p(__CR6_EQ, __a, __b);
#endif
diff --git a/clang/test/CodeGen/builtins-ppc-p8vector.c b/clang/test/CodeGen/builtins-ppc-p8vector.c
index a686b0a0796..d494e463105 100644
--- a/clang/test/CodeGen/builtins-ppc-p8vector.c
+++ b/clang/test/CodeGen/builtins-ppc-p8vector.c
@@ -469,6 +469,10 @@ void test1() {
// CHECK: @llvm.ppc.vsx.xvcmpeqdp.p
// CHECK-LE: @llvm.ppc.vsx.xvcmpeqdp.p
+ res_i = vec_all_eq(vfa, vfa);
+// CHECK: @llvm.ppc.vsx.xvcmpeqsp.p
+// CHECK-LE: @llvm.ppc.vsx.xvcmpeqsp.p
+
/* vec_all_ne */
res_i = vec_all_ne(vsll, vsll);
// CHECK: @llvm.ppc.altivec.vcmpequd.p
@@ -515,6 +519,13 @@ void test1() {
dummy();
// CHECK: @dummy
+ res_i = vec_all_ne(vfa, vfa);
+// CHECK: @llvm.ppc.vsx.xvcmpeqsp.p
+// CHECK-LE: @llvm.ppc.vsx.xvcmpeqsp.p
+
+ dummy();
+// CHECK: @dummy
+
res_i = vec_all_nge(vda, vda);
// CHECK: @llvm.ppc.vsx.xvcmpgedp.p
// CHECK-LE: @llvm.ppc.vsx.xvcmpgedp.p
@@ -563,6 +574,10 @@ void test1() {
// CHECK: @llvm.ppc.vsx.xvcmpeqdp.p
// CHECK-LE: @llvm.ppc.vsx.xvcmpeqdp.p
+ res_i = vec_any_eq(vfa, vfa);
+// CHECK: @llvm.ppc.vsx.xvcmpeqsp.p
+// CHECK-LE: @llvm.ppc.vsx.xvcmpeqsp.p
+
/* vec_any_ne */
res_i = vec_any_ne(vsll, vsll);
// CHECK: @llvm.ppc.altivec.vcmpequd.p
@@ -603,6 +618,10 @@ void test1() {
// CHECK: @llvm.ppc.vsx.xvcmpeqdp.p
// CHECK-LE: @llvm.ppc.vsx.xvcmpeqdp.p
+ res_i = vec_any_ne(vfa, vfa);
+// CHECK: @llvm.ppc.vsx.xvcmpeqsp.p
+// CHECK-LE: @llvm.ppc.vsx.xvcmpeqsp.p
+
/* vec_all_ge */
res_i = vec_all_ge(vsll, vsll);
// CHECK: @llvm.ppc.altivec.vcmpgtsd.p
@@ -643,6 +662,10 @@ void test1() {
// CHECK: @llvm.ppc.vsx.xvcmpgedp.p
// CHECK-LE: @llvm.ppc.vsx.xvcmpgedp.p
+ res_i = vec_all_ge(vfa, vfa);
+// CHECK: @llvm.ppc.vsx.xvcmpgesp.p
+// CHECK-LE: @llvm.ppc.vsx.xvcmpgesp.p
+
/* vec_all_gt */
res_i = vec_all_gt(vsll, vsll);
// CHECK: @llvm.ppc.altivec.vcmpgtsd.p
@@ -683,6 +706,10 @@ void test1() {
// CHECK: @llvm.ppc.vsx.xvcmpgtdp.p
// CHECK-LE: @llvm.ppc.vsx.xvcmpgtdp.p
+ res_i = vec_all_gt(vfa, vfa);
+// CHECK: @llvm.ppc.vsx.xvcmpgtsp.p
+// CHECK-LE: @llvm.ppc.vsx.xvcmpgtsp.p
+
/* vec_all_le */
res_i = vec_all_le(vsll, vsll);
// CHECK: @llvm.ppc.altivec.vcmpgtsd.p
@@ -723,6 +750,10 @@ void test1() {
// CHECK: @llvm.ppc.vsx.xvcmpgedp.p
// CHECK-LE: @llvm.ppc.vsx.xvcmpgedp.p
+ res_i = vec_all_le(vfa, vfa);
+// CHECK: @llvm.ppc.vsx.xvcmpgesp.p
+// CHECK-LE: @llvm.ppc.vsx.xvcmpgesp.p
+
/* vec_all_lt */
res_i = vec_all_lt(vsll, vsll);
// CHECK: @llvm.ppc.altivec.vcmpgtsd.p
@@ -763,10 +794,18 @@ void test1() {
// CHECK: @llvm.ppc.vsx.xvcmpgtdp.p
// CHECK-LE: @llvm.ppc.vsx.xvcmpgtdp.p
+ res_i = vec_all_lt(vfa, vfa);
+// CHECK: @llvm.ppc.vsx.xvcmpgtsp.p
+// CHECK-LE: @llvm.ppc.vsx.xvcmpgtsp.p
+
res_i = vec_all_nan(vda);
// CHECK: @llvm.ppc.vsx.xvcmpeqdp.p
// CHECK-LE: @llvm.ppc.vsx.xvcmpeqdp.p
+ res_i = vec_all_nan(vfa);
+// CHECK: @llvm.ppc.vsx.xvcmpeqsp.p
+// CHECK-LE: @llvm.ppc.vsx.xvcmpeqsp.p
+
/* vec_any_ge */
res_i = vec_any_ge(vsll, vsll);
// CHECK: @llvm.ppc.altivec.vcmpgtsd.p
@@ -807,6 +846,10 @@ void test1() {
// CHECK: @llvm.ppc.vsx.xvcmpgedp.p
// CHECK-LE: @llvm.ppc.vsx.xvcmpgedp.p
+ res_i = vec_any_ge(vfa, vfa);
+// CHECK: @llvm.ppc.vsx.xvcmpgesp.p
+// CHECK-LE: @llvm.ppc.vsx.xvcmpgesp.p
+
/* vec_any_gt */
res_i = vec_any_gt(vsll, vsll);
// CHECK: @llvm.ppc.altivec.vcmpgtsd.p
@@ -887,6 +930,10 @@ void test1() {
// CHECK: @llvm.ppc.vsx.xvcmpgedp.p
// CHECK-LE: @llvm.ppc.vsx.xvcmpgedp.p
+ res_i = vec_any_le(vfa, vfa);
+// CHECK: @llvm.ppc.vsx.xvcmpgesp.p
+// CHECK-LE: @llvm.ppc.vsx.xvcmpgesp.p
+
/* vec_any_lt */
res_i = vec_any_lt(vsll, vsll);
// CHECK: @llvm.ppc.altivec.vcmpgtsd.p
@@ -927,6 +974,10 @@ void test1() {
// CHECK: @llvm.ppc.vsx.xvcmpgtdp.p
// CHECK-LE: @llvm.ppc.vsx.xvcmpgtdp.p
+ res_i = vec_any_lt(vfa, vfa);
+// CHECK: @llvm.ppc.vsx.xvcmpgtsp.p
+// CHECK-LE: @llvm.ppc.vsx.xvcmpgtsp.p
+
/* vec_max */
res_vsll = vec_max(vsll, vsll);
// CHECK: @llvm.ppc.altivec.vmaxsd
@@ -1309,6 +1360,12 @@ void test1() {
// CHECK-LE: [[T1:%.+]] = and <2 x i64>
// CHECK-LE: xor <2 x i64> [[T1]], <i64 -1, i64 -1>
+ res_vf = vec_nand(vfa, vfa);
+// CHECK: [[T1:%.+]] = and <4 x i32>
+// CHECK: xor <4 x i32> [[T1]], <i32 -1, i32 -1, i32 -1, i32 -1>
+// CHECK-LE: [[T1:%.+]] = and <4 x i32>
+// CHECK-LE: xor <4 x i32> [[T1]], <i32 -1, i32 -1, i32 -1, i32 -1>
+
/* vec_orc */
res_vsc = vec_orc(vsc, vsc);
// CHECK: [[T1:%.+]] = xor <16 x i8> {{%.+}}, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
OpenPOWER on IntegriCloud