diff options
| author | Nemanja Ivanovic <nemanja.i.ibm@gmail.com> | 2016-11-01 14:46:20 +0000 | 
|---|---|---|
| committer | Nemanja Ivanovic <nemanja.i.ibm@gmail.com> | 2016-11-01 14:46:20 +0000 | 
| commit | 05ce4ca0dd8c43d86f3c5f9b4f55c9ce95ee5a73 (patch) | |
| tree | 8444471416c47db7a5045bb8a103c1b7caf34021 /clang | |
| parent | 69587324e8e9b37a5a55ef2ff9536b8af76749a8 (diff) | |
| download | bcm5719-llvm-05ce4ca0dd8c43d86f3c5f9b4f55c9ce95ee5a73.tar.gz bcm5719-llvm-05ce4ca0dd8c43d86f3c5f9b4f55c9ce95ee5a73.zip  | |
[PowerPC] Implement vector shift builtins - clang portion
This patch corresponds to review https://reviews.llvm.org/D26092.
Committing on behalf of Tony Jiang.
llvm-svn: 285694
Diffstat (limited to 'clang')
| -rw-r--r-- | clang/include/clang/Basic/BuiltinsPPC.def | 4 | ||||
| -rw-r--r-- | clang/lib/Headers/altivec.h | 219 | ||||
| -rw-r--r-- | clang/test/CodeGen/builtins-ppc-altivec.c | 73 | ||||
| -rw-r--r-- | clang/test/CodeGen/builtins-ppc-p9vector.c | 21 | ||||
| -rw-r--r-- | clang/test/CodeGen/builtins-ppc-vsx.c | 136 | 
5 files changed, 439 insertions, 14 deletions
diff --git a/clang/include/clang/Basic/BuiltinsPPC.def b/clang/include/clang/Basic/BuiltinsPPC.def index a4eb1c3710c..125c708a54d 100644 --- a/clang/include/clang/Basic/BuiltinsPPC.def +++ b/clang/include/clang/Basic/BuiltinsPPC.def @@ -289,6 +289,10 @@ BUILTIN(__builtin_altivec_vabsdub, "V16UcV16UcV16Uc", "")  BUILTIN(__builtin_altivec_vabsduh, "V8UsV8UsV8Us", "")  BUILTIN(__builtin_altivec_vabsduw, "V4UiV4UiV4Ui", "") +// P9 Shift built-ins. +BUILTIN(__builtin_altivec_vslv, "V16UcV16UcV16Uc", "") +BUILTIN(__builtin_altivec_vsrv, "V16UcV16UcV16Uc", "") +  // VSX built-ins.  BUILTIN(__builtin_vsx_lxvd2x, "V2divC*", "") diff --git a/clang/lib/Headers/altivec.h b/clang/lib/Headers/altivec.h index 3c049a3d3d8..b2608cfb46c 100644 --- a/clang/lib/Headers/altivec.h +++ b/clang/lib/Headers/altivec.h @@ -7703,6 +7703,145 @@ static __inline__ vector float __ATTRS_o_ai vec_sld(vector float __a,  #endif  } +#ifdef __VSX__ +static __inline__ vector bool long long __ATTRS_o_ai +vec_sld(vector bool long long __a, vector bool long long __b, +        unsigned const int __c) { +  unsigned char __d = __c & 0x0F; +#ifdef __LITTLE_ENDIAN__ +  return vec_perm( +      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, +                                       20 - __d, 21 - __d, 22 - __d, 23 - __d, +                                       24 - __d, 25 - __d, 26 - __d, 27 - __d, +                                       28 - __d, 29 - __d, 30 - __d, 31 - __d)); +#else +  return vec_perm( +      __a, __b, +      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5, +                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10, +                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15)); +#endif +} + +static __inline__ vector signed long long __ATTRS_o_ai +vec_sld(vector signed long long __a, vector signed long long __b, +        unsigned const int __c) { +  unsigned char __d = __c & 0x0F; +#ifdef __LITTLE_ENDIAN__ +  return vec_perm( +      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, +                                       20 - __d, 21 - __d, 22 - __d, 23 - __d, +                                       24 - __d, 25 - __d, 26 - __d, 27 - __d, +                                       28 - __d, 29 - __d, 30 - __d, 31 - __d)); +#else +  return vec_perm( +      __a, __b, +      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5, +                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10, +                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15)); +#endif +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_sld(vector unsigned long long __a, vector unsigned long long __b, +        unsigned const int __c) { +  unsigned char __d = __c & 0x0F; +#ifdef __LITTLE_ENDIAN__ +  return vec_perm( +      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, +                                       20 - __d, 21 - __d, 22 - __d, 23 - __d, +                                       24 - __d, 25 - __d, 26 - __d, 27 - __d, +                                       28 - __d, 29 - __d, 30 - __d, 31 - __d)); +#else +  return vec_perm( +      __a, __b, +      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5, +                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10, +                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15)); +#endif +} + +static __inline__ vector double __ATTRS_o_ai vec_sld(vector double __a, +                                                     vector double __b, +                                                     unsigned const int __c) { +  unsigned char __d = __c & 0x0F; +#ifdef __LITTLE_ENDIAN__ +  return vec_perm( +      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, +                                       20 - __d, 21 - __d, 22 - __d, 23 - __d, +                                       24 - __d, 25 - __d, 26 - __d, 27 - __d, +                                       28 - __d, 29 - __d, 30 - __d, 31 - __d)); +#else +  return vec_perm( +      __a, __b, +      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5, +                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10, +                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15)); +#endif +} +#endif + +/* vec_sldw */ +static __inline__ vector signed char __ATTRS_o_ai vec_sldw( +    vector signed char __a, vector signed char __b, unsigned const int __c) { +  return vec_sld(__a, __b, ((__c << 2) & 0x0F)); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_sldw(vector unsigned char __a, vector unsigned char __b, +         unsigned const int __c) { +  return vec_sld(__a, __b, ((__c << 2) & 0x0F)); +} + +static __inline__ vector signed short __ATTRS_o_ai vec_sldw( +    vector signed short __a, vector signed short __b, unsigned const int __c) { +  return vec_sld(__a, __b, ((__c << 2) & 0x0F)); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_sldw(vector unsigned short __a, vector unsigned short __b, +         unsigned const int __c) { +  return vec_sld(__a, __b, ((__c << 2) & 0x0F)); +} + +static __inline__ vector signed int __ATTRS_o_ai +vec_sldw(vector signed int __a, vector signed int __b, unsigned const int __c) { +  return vec_sld(__a, __b, ((__c << 2) & 0x0F)); +} + +static __inline__ vector unsigned int __ATTRS_o_ai vec_sldw( +    vector unsigned int __a, vector unsigned int __b, unsigned const int __c) { +  return vec_sld(__a, __b, ((__c << 2) & 0x0F)); +} + +#ifdef __VSX__ +static __inline__ vector signed long long __ATTRS_o_ai +vec_sldw(vector signed long long __a, vector signed long long __b, +         unsigned const int __c) { +  return vec_sld(__a, __b, ((__c << 2) & 0x0F)); +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_sldw(vector unsigned long long __a, vector unsigned long long __b, +         unsigned const int __c) { +  return vec_sld(__a, __b, ((__c << 2) & 0x0F)); +} +#endif + +#ifdef __POWER9_VECTOR__ +/* vec_slv */ +static __inline__ vector unsigned char __ATTRS_o_ai +vec_slv(vector unsigned char __a, vector unsigned char __b) { +  return __builtin_altivec_vslv(__a, __b); +} + +/* vec_srv */ +static __inline__ vector unsigned char __ATTRS_o_ai +vec_srv(vector unsigned char __a, vector unsigned char __b) { +  return __builtin_altivec_vsrv(__a, __b); +} +#endif +  /* vec_vsldoi */  static __inline__ vector signed char __ATTRS_o_ai @@ -8026,6 +8165,20 @@ vec_sll(vector bool int __a, vector unsigned int __b) {                                                  (vector int)__b);  } +#ifdef __VSX__ +static __inline__ vector signed long long __ATTRS_o_ai +vec_sll(vector signed long long __a, vector unsigned char __b) { +  return (vector signed long long)__builtin_altivec_vsl((vector int)__a, +                                                        (vector int)__b); +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_sll(vector unsigned long long __a, vector unsigned char __b) { +  return (vector unsigned long long)__builtin_altivec_vsl((vector int)__a, +                                                          (vector int)__b); +} +#endif +  /* vec_vsl */  static __inline__ vector signed char __ATTRS_o_ai @@ -8289,6 +8442,32 @@ static __inline__ vector float __ATTRS_o_ai vec_slo(vector float __a,    return (vector float)__builtin_altivec_vslo((vector int)__a, (vector int)__b);  } +#ifdef __VSX__ +static __inline__ vector signed long long __ATTRS_o_ai +vec_slo(vector signed long long __a, vector signed char __b) { +  return (vector signed long long)__builtin_altivec_vslo((vector int)__a, +                                                         (vector int)__b); +} + +static __inline__ vector signed long long __ATTRS_o_ai +vec_slo(vector signed long long __a, vector unsigned char __b) { +  return (vector signed long long)__builtin_altivec_vslo((vector int)__a, +                                                         (vector int)__b); +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_slo(vector unsigned long long __a, vector signed char __b) { +  return (vector unsigned long long)__builtin_altivec_vslo((vector int)__a, +                                                           (vector int)__b); +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_slo(vector unsigned long long __a, vector unsigned char __b) { +  return (vector unsigned long long)__builtin_altivec_vslo((vector int)__a, +                                                           (vector int)__b); +} +#endif +  /* vec_vslo */  static __inline__ vector signed char __ATTRS_o_ai @@ -9023,6 +9202,20 @@ vec_srl(vector bool int __a, vector unsigned int __b) {                                                  (vector int)__b);  } +#ifdef __VSX__ +static __inline__ vector signed long long __ATTRS_o_ai +vec_srl(vector signed long long __a, vector unsigned char __b) { +  return (vector signed long long)__builtin_altivec_vsr((vector int)__a, +                                                        (vector int)__b); +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_srl(vector unsigned long long __a, vector unsigned char __b) { +  return (vector unsigned long long)__builtin_altivec_vsr((vector int)__a, +                                                          (vector int)__b); +} +#endif +  /* vec_vsr */  static __inline__ vector signed char __ATTRS_o_ai @@ -9286,6 +9479,32 @@ static __inline__ vector float __ATTRS_o_ai vec_sro(vector float __a,    return (vector float)__builtin_altivec_vsro((vector int)__a, (vector int)__b);  } +#ifdef __VSX__ +static __inline__ vector signed long long __ATTRS_o_ai +vec_sro(vector signed long long __a, vector signed char __b) { +  return (vector signed long long)__builtin_altivec_vsro((vector int)__a, +                                                         (vector int)__b); +} + +static __inline__ vector signed long long __ATTRS_o_ai +vec_sro(vector signed long long __a, vector unsigned char __b) { +  return (vector signed long long)__builtin_altivec_vsro((vector int)__a, +                                                         (vector int)__b); +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_sro(vector unsigned long long __a, vector signed char __b) { +  return (vector unsigned long long)__builtin_altivec_vsro((vector int)__a, +                                                           (vector int)__b); +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_sro(vector unsigned long long __a, vector unsigned char __b) { +  return (vector unsigned long long)__builtin_altivec_vsro((vector int)__a, +                                                           (vector int)__b); +} +#endif +  /* vec_vsro */  static __inline__ vector signed char __ATTRS_o_ai diff --git a/clang/test/CodeGen/builtins-ppc-altivec.c b/clang/test/CodeGen/builtins-ppc-altivec.c index 8f0de015d7b..abe49f17dff 100644 --- a/clang/test/CodeGen/builtins-ppc-altivec.c +++ b/clang/test/CodeGen/builtins-ppc-altivec.c @@ -3499,6 +3499,79 @@ void test6() {  // CHECK-LE: sub nsw i32 31  // CHECK-LE: @llvm.ppc.altivec.vperm +  /* vec_sldw */ +  res_vsc = vec_sldw(vsc, vsc, 0); +  // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1 +  // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2 +  // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3 +  // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15 +  // CHECK: @llvm.ppc.altivec.vperm +  // CHECK-LE: sub nsw i32 16 +  // CHECK-LE: sub nsw i32 17 +  // CHECK-LE: sub nsw i32 18 +  // CHECK-LE: sub nsw i32 31 +  // CHECK-LE: @llvm.ppc.altivec.vperm + +  res_vuc = vec_sldw(vuc, vuc, 0); +  // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1 +  // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2 +  // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3 +  // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15 +  // CHECK: @llvm.ppc.altivec.vperm +  // CHECK-LE: sub nsw i32 16 +  // CHECK-LE: sub nsw i32 17 +  // CHECK-LE: sub nsw i32 18 +  // CHECK-LE: sub nsw i32 31 +  // CHECK-LE: @llvm.ppc.altivec.vperm + +  res_vi = vec_sldw(vi, vi, 0); +  // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1 +  // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2 +  // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3 +  // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15 +  // CHECK: @llvm.ppc.altivec.vperm +  // CHECK-LE: sub nsw i32 16 +  // CHECK-LE: sub nsw i32 17 +  // CHECK-LE: sub nsw i32 18 +  // CHECK-LE: sub nsw i32 31 +  // CHECK-LE: @llvm.ppc.altivec.vperm + +  res_vui = vec_sldw(vui, vui, 0); +  // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1 +  // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2 +  // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3 +  // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15 +  // CHECK: @llvm.ppc.altivec.vperm +  // CHECK-LE: sub nsw i32 16 +  // CHECK-LE: sub nsw i32 17 +  // CHECK-LE: sub nsw i32 18 +  // CHECK-LE: sub nsw i32 31 +  // CHECK-LE: @llvm.ppc.altivec.vperm + +  res_vs = vec_sldw(vs, vs, 0); +  // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1 +  // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2 +  // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3 +  // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15 +  // CHECK: @llvm.ppc.altivec.vperm +  // CHECK-LE: sub nsw i32 16 +  // CHECK-LE: sub nsw i32 17 +  // CHECK-LE: sub nsw i32 18 +  // CHECK-LE: sub nsw i32 31 +  // CHECK-LE: @llvm.ppc.altivec.vperm + +  res_vus = vec_sldw(vus, vus, 0); +  // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1 +  // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2 +  // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3 +  // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15 +  // CHECK: @llvm.ppc.altivec.vperm +  // CHECK-LE: sub nsw i32 16 +  // CHECK-LE: sub nsw i32 17 +  // CHECK-LE: sub nsw i32 18 +  // CHECK-LE: sub nsw i32 31 +  // CHECK-LE: @llvm.ppc.altivec.vperm +    res_vsc = vec_vsldoi(vsc, vsc, 0);  // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1  // CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2 diff --git a/clang/test/CodeGen/builtins-ppc-p9vector.c b/clang/test/CodeGen/builtins-ppc-p9vector.c index 5cbdde2341d..32c84f1c7f4 100644 --- a/clang/test/CodeGen/builtins-ppc-p9vector.c +++ b/clang/test/CodeGen/builtins-ppc-p9vector.c @@ -756,7 +756,6 @@ signed int test62(void) {  // CHECK-LE-NEXT: ret i32    return vec_cnttz_lsbb (vuca);  } -  vector unsigned int test63(void) {  // CHECK-BE: @llvm.ppc.altivec.vprtybw(<4 x i32>  // CHECK-BE-NEXT: ret <4 x i32> @@ -764,7 +763,6 @@ vector unsigned int test63(void) {  // CHECK-NEXT: ret <4 x i32>    return vec_parity_lsbb (vuia);  } -  vector unsigned int test64(void) {  // CHECK-BE: @llvm.ppc.altivec.vprtybw(<4 x i32>  // CHECK-BE-NEXT: ret <4 x i32> @@ -772,7 +770,6 @@ vector unsigned int test64(void) {  // CHECK-NEXT: ret <4 x i32>    return vec_parity_lsbb (vsia);  } -  vector unsigned long long test65(void) {  // CHECK-BE: @llvm.ppc.altivec.vprtybd(<2 x i64>  // CHECK-BE-NEXT: ret <2 x i64> @@ -780,7 +777,6 @@ vector unsigned long long test65(void) {  // CHECK-NEXT: ret <2 x i64>    return vec_parity_lsbb (vula);  } -  vector unsigned long long test66(void) {  // CHECK-BE: @llvm.ppc.altivec.vprtybd(<2 x i64>  // CHECK-BE-NEXT: ret <2 x i64> @@ -795,7 +791,6 @@ vector unsigned __int128 test67(void) {  // CHECK-NEXT: ret <1 x i128>    return vec_parity_lsbb (vui128a);  } -  vector unsigned __int128 test68(void) {  // CHECK-BE: @llvm.ppc.altivec.vprtybq(<1 x i128>  // CHECK-BE-NEXT: ret <1 x i128> @@ -803,7 +798,6 @@ vector unsigned __int128 test68(void) {  // CHECK-NEXT: ret <1 x i128>    return vec_parity_lsbb (vsi128a);  } -  vector unsigned char test69(void) {  // CHECK-BE: call <16 x i8> @llvm.ppc.altivec.vabsdub(<16 x i8> {{.+}}, <16 x i8> {{.+}})  // CHECK: call <16 x i8> @llvm.ppc.altivec.vabsdub(<16 x i8> {{.+}}, <16 x i8> {{.+}}) @@ -819,3 +813,18 @@ vector unsigned int test71(void) {  // CHECK: call <4 x i32> @llvm.ppc.altivec.vabsduw(<4 x i32> {{.+}}, <4 x i32> {{.+}})    return vec_absd(vuia, vuib);  } +vector unsigned char test72(void) { +// CHECK-BE: @llvm.ppc.altivec.vslv(<16 x i8> +// CHECK-BE-NEXT: ret <16 x i8> +// CHECK: @llvm.ppc.altivec.vslv(<16 x i8> +// CHECK-NEXT: ret <16 x i8> +  return vec_slv (vuca, vucb); +} +vector unsigned char test73(void) { +// CHECK-BE: @llvm.ppc.altivec.vsrv(<16 x i8> +// CHECK-BE-NEXT: ret <16 x i8> +// CHECK: @llvm.ppc.altivec.vsrv(<16 x i8> +// CHECK-NEXT: ret <16 x i8> +  return vec_srv (vuca, vucb); +} + diff --git a/clang/test/CodeGen/builtins-ppc-vsx.c b/clang/test/CodeGen/builtins-ppc-vsx.c index bacc70a62f7..0f0247b79f1 100644 --- a/clang/test/CodeGen/builtins-ppc-vsx.c +++ b/clang/test/CodeGen/builtins-ppc-vsx.c @@ -1082,20 +1082,20 @@ void test1() {  // CHECK-LE: fmul <2 x double>    res_vbll = vec_reve(vbll); -  // CHECK: shufflevector <2 x i64> %{{[0-9]+}}, <2 x i64> %{{[0-9]+}}, <2 x i32> <i32 1, i32 0> -  // CHECK-LE: shufflevector <2 x i64> %{{[0-9]+}}, <2 x i64> %{{[0-9]+}}, <2 x i32> <i32 1, i32 0> +// CHECK: shufflevector <2 x i64> %{{[0-9]+}}, <2 x i64> %{{[0-9]+}}, <2 x i32> <i32 1, i32 0> +// CHECK-LE: shufflevector <2 x i64> %{{[0-9]+}}, <2 x i64> %{{[0-9]+}}, <2 x i32> <i32 1, i32 0>    res_vsll = vec_reve(vsll); -  // CHECK: shufflevector <2 x i64> %{{[0-9]+}}, <2 x i64> %{{[0-9]+}}, <2 x i32> <i32 1, i32 0> -  // CHECK-LE: shufflevector <2 x i64> %{{[0-9]+}}, <2 x i64> %{{[0-9]+}}, <2 x i32> <i32 1, i32 0> +// CHECK: shufflevector <2 x i64> %{{[0-9]+}}, <2 x i64> %{{[0-9]+}}, <2 x i32> <i32 1, i32 0> +// CHECK-LE: shufflevector <2 x i64> %{{[0-9]+}}, <2 x i64> %{{[0-9]+}}, <2 x i32> <i32 1, i32 0>    res_vull = vec_reve(vull); -  // CHECK: shufflevector <2 x i64> %{{[0-9]+}}, <2 x i64> %{{[0-9]+}}, <2 x i32> <i32 1, i32 0> -  // CHECK-LE: shufflevector <2 x i64> %{{[0-9]+}}, <2 x i64> %{{[0-9]+}}, <2 x i32> <i32 1, i32 0> +// CHECK: shufflevector <2 x i64> %{{[0-9]+}}, <2 x i64> %{{[0-9]+}}, <2 x i32> <i32 1, i32 0> +// CHECK-LE: shufflevector <2 x i64> %{{[0-9]+}}, <2 x i64> %{{[0-9]+}}, <2 x i32> <i32 1, i32 0>    res_vd = vec_reve(vd); -  // CHECK: shufflevector <2 x double> %{{[0-9]+}}, <2 x double> %{{[0-9]+}}, <2 x i32> <i32 1, i32 0> -  // CHECK-LE: shufflevector <2 x double> %{{[0-9]+}}, <2 x double> %{{[0-9]+}}, <2 x i32> <i32 1, i32 0> +// CHECK: shufflevector <2 x double> %{{[0-9]+}}, <2 x double> %{{[0-9]+}}, <2 x i32> <i32 1, i32 0> +// CHECK-LE: shufflevector <2 x double> %{{[0-9]+}}, <2 x double> %{{[0-9]+}}, <2 x i32> <i32 1, i32 0>    res_vbll = vec_revb(vbll);  // CHECK: store <16 x i8> <i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 15, i8 14, i8 13, i8 12, i8 11, i8 10, i8 9, i8 8>, <16 x i8>* {{%.+}}, align 16 @@ -1128,4 +1128,124 @@ void test1() {  // CHECK-LE: store <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <16 x i8>* {{%.+}}, align 16  // CHECK-LE: xor <16 x i8>  // CHECK-LE: call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> {{%.+}}, <4 x i32> {{%.+}}, <16 x i8> {{%.+}}) + +  res_vbll = vec_sld(vbll, vbll, 0); +// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1 +// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2 +// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3 +// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15 +// CHECK: @llvm.ppc.altivec.vperm +// CHECK-LE: sub nsw i32 16 +// CHECK-LE: sub nsw i32 17 +// CHECK-LE: sub nsw i32 18 +// CHECK-LE: sub nsw i32 31 +// CHECK-LE: @llvm.ppc.altivec.vperm + +  res_vsll = vec_sld(vsll, vsll, 0); +// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1 +// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2 +// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3 +// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15 +// CHECK: @llvm.ppc.altivec.vperm +// CHECK-LE: sub nsw i32 16 +// CHECK-LE: sub nsw i32 17 +// CHECK-LE: sub nsw i32 18 +// CHECK-LE: sub nsw i32 31 +// CHECK-LE: @llvm.ppc.altivec.vperm + +  res_vull = vec_sld(vull, vull, 0); +// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1 +// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2 +// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3 +// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15 +// CHECK: @llvm.ppc.altivec.vperm +// CHECK-LE: sub nsw i32 16 +// CHECK-LE: sub nsw i32 17 +// CHECK-LE: sub nsw i32 18 +// CHECK-LE: sub nsw i32 31 +// CHECK-LE: @llvm.ppc.altivec.vperm + +  res_vd = vec_sld(vd, vd, 0); +// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1 +// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2 +// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3 +// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15 +// CHECK: @llvm.ppc.altivec.vperm +// CHECK-LE: sub nsw i32 16 +// CHECK-LE: sub nsw i32 17 +// CHECK-LE: sub nsw i32 18 +// CHECK-LE: sub nsw i32 31 +// CHECK-LE: @llvm.ppc.altivec.vperm + +  res_vsll = vec_sldw(vsll, vsll, 0); +// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1 +// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2 +// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3 +// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15 +// CHECK: @llvm.ppc.altivec.vperm +// CHECK-LE: sub nsw i32 16 +// CHECK-LE: sub nsw i32 17 +// CHECK-LE: sub nsw i32 18 +// CHECK-LE: sub nsw i32 31 +// CHECK-LE: @llvm.ppc.altivec.vperm + +  res_vull = vec_sldw(vull, vull, 0); +// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 1 +// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 2 +// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 3 +// CHECK: add nsw i32 {{[0-9a-zA-Z%.]+}}, 15 +// CHECK: @llvm.ppc.altivec.vperm +// CHECK-LE: sub nsw i32 16 +// CHECK-LE: sub nsw i32 17 +// CHECK-LE: sub nsw i32 18 +// CHECK-LE: sub nsw i32 31 +// CHECK-LE: @llvm.ppc.altivec.vperm + +  res_vsll = vec_sll(vsll, vuc); +// CHECK: @llvm.ppc.altivec.vsl +// CHECK-LE: @llvm.ppc.altivec.vsl + +res_vull = vec_sll(vull, vuc); +// CHECK: @llvm.ppc.altivec.vsl +// CHECK-LE: @llvm.ppc.altivec.vsl + +res_vsll = vec_slo(vsll, vsc); +// CHECK: @llvm.ppc.altivec.vslo +// CHECK-LE: @llvm.ppc.altivec.vslo + +  res_vsll = vec_slo(vsll, vuc); +// CHECK: @llvm.ppc.altivec.vslo +// CHECK-LE: @llvm.ppc.altivec.vslo + +  res_vull = vec_slo(vull, vsc); +// CHECK: @llvm.ppc.altivec.vslo +// CHECK-LE: @llvm.ppc.altivec.vslo + +  res_vull = vec_slo(vull, vuc); +// CHECK: @llvm.ppc.altivec.vslo +// CHECK-LE: @llvm.ppc.altivec.vslo + +  res_vsll = vec_srl(vsll, vuc); +// CHECK: @llvm.ppc.altivec.vsr +// CHECK-LE: @llvm.ppc.altivec.vsr + +  res_vull = vec_srl(vull, vuc); +// CHECK: @llvm.ppc.altivec.vsr +// CHECK-LE: @llvm.ppc.altivec.vsr + +  res_vsll = vec_sro(vsll, vsc); +// CHECK: @llvm.ppc.altivec.vsro +// CHECK-LE: @llvm.ppc.altivec.vsro + +  res_vsll = vec_sro(vsll, vuc); +// CHECK: @llvm.ppc.altivec.vsro +// CHECK-LE: @llvm.ppc.altivec.vsro + +  res_vull = vec_sro(vull, vsc); +// CHECK: @llvm.ppc.altivec.vsro +// CHECK-LE: @llvm.ppc.altivec.vsro + +  res_vull = vec_sro(vull, vuc); +// CHECK: @llvm.ppc.altivec.vsro +// CHECK-LE: @llvm.ppc.altivec.vsro  }  | 

