summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorCraig Topper <craig.topper@gmail.com>2016-05-17 04:41:38 +0000
committerCraig Topper <craig.topper@gmail.com>2016-05-17 04:41:38 +0000
commit0bb4664a88c650757b1e1ea8dd449ec19d4a5d7d (patch)
tree91b880189e0dc4e53bf635a3c3bae44a4644768e
parent41ad25a0f981768243c5481d84c025e65550a868 (diff)
downloadbcm5719-llvm-0bb4664a88c650757b1e1ea8dd449ec19d4a5d7d.tar.gz
bcm5719-llvm-0bb4664a88c650757b1e1ea8dd449ec19d4a5d7d.zip
[AVX512] Add parentheses around macro arguments in AVX512ER intrinsics. Remove leading underscores from macro argument names. Add explicit typecasts to all macro arguments and return values. And finally reformat after all the adjustments.
This is a mostly mechanical change accomplished with a script. I tried to split out any changes to the typecasts that already existed into separate commits. llvm-svn: 269741
-rw-r--r--clang/lib/Headers/avx512erintrin.h132
1 files changed, 66 insertions, 66 deletions
diff --git a/clang/lib/Headers/avx512erintrin.h b/clang/lib/Headers/avx512erintrin.h
index 8c57c727afd..b24ba7b182a 100644
--- a/clang/lib/Headers/avx512erintrin.h
+++ b/clang/lib/Headers/avx512erintrin.h
@@ -31,66 +31,66 @@
#define _mm512_exp2a23_round_pd(A, R) __extension__ ({ \
(__m512d)__builtin_ia32_exp2pd_mask((__v8df)(__m512d)(A), \
(__v8df)_mm512_setzero_pd(), \
- (__mmask8)-1, (R)); })
+ (__mmask8)-1, (int)(R)); })
#define _mm512_mask_exp2a23_round_pd(S, M, A, R) __extension__ ({ \
(__m512d)__builtin_ia32_exp2pd_mask((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(S), \
- (__mmask8)(M), (R)); })
+ (__v8df)(__m512d)(S), (__mmask8)(M), \
+ (int)(R)); })
#define _mm512_maskz_exp2a23_round_pd(M, A, R) __extension__ ({ \
(__m512d)__builtin_ia32_exp2pd_mask((__v8df)(__m512d)(A), \
(__v8df)_mm512_setzero_pd(), \
- (__mmask8)(M), (R)); })
+ (__mmask8)(M), (int)(R)); })
#define _mm512_exp2a23_pd(A) \
- _mm512_exp2a23_round_pd((A), _MM_FROUND_CUR_DIRECTION)
+ _mm512_exp2a23_round_pd((A), _MM_FROUND_CUR_DIRECTION)
#define _mm512_mask_exp2a23_pd(S, M, A) \
- _mm512_mask_exp2a23_round_pd((S), (M), (A), _MM_FROUND_CUR_DIRECTION)
+ _mm512_mask_exp2a23_round_pd((S), (M), (A), _MM_FROUND_CUR_DIRECTION)
#define _mm512_maskz_exp2a23_pd(M, A) \
- _mm512_maskz_exp2a23_round_pd((M), (A), _MM_FROUND_CUR_DIRECTION)
+ _mm512_maskz_exp2a23_round_pd((M), (A), _MM_FROUND_CUR_DIRECTION)
#define _mm512_exp2a23_round_ps(A, R) __extension__ ({ \
(__m512)__builtin_ia32_exp2ps_mask((__v16sf)(__m512)(A), \
(__v16sf)_mm512_setzero_ps(), \
- (__mmask16)-1, (R)); })
+ (__mmask16)-1, (int)(R)); })
#define _mm512_mask_exp2a23_round_ps(S, M, A, R) __extension__ ({ \
(__m512)__builtin_ia32_exp2ps_mask((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(S), \
- (__mmask16)(M), (R)); })
+ (__v16sf)(__m512)(S), (__mmask16)(M), \
+ (int)(R)); })
#define _mm512_maskz_exp2a23_round_ps(M, A, R) __extension__ ({ \
(__m512)__builtin_ia32_exp2ps_mask((__v16sf)(__m512)(A), \
(__v16sf)_mm512_setzero_ps(), \
- (__mmask16)(M), (R)); })
+ (__mmask16)(M), (int)(R)); })
#define _mm512_exp2a23_ps(A) \
- _mm512_exp2a23_round_ps((A), _MM_FROUND_CUR_DIRECTION)
+ _mm512_exp2a23_round_ps((A), _MM_FROUND_CUR_DIRECTION)
#define _mm512_mask_exp2a23_ps(S, M, A) \
- _mm512_mask_exp2a23_round_ps((S), (M), (A), _MM_FROUND_CUR_DIRECTION)
+ _mm512_mask_exp2a23_round_ps((S), (M), (A), _MM_FROUND_CUR_DIRECTION)
#define _mm512_maskz_exp2a23_ps(M, A) \
- _mm512_maskz_exp2a23_round_ps((M), (A), _MM_FROUND_CUR_DIRECTION)
+ _mm512_maskz_exp2a23_round_ps((M), (A), _MM_FROUND_CUR_DIRECTION)
// rsqrt28
#define _mm512_rsqrt28_round_pd(A, R) __extension__ ({ \
(__m512d)__builtin_ia32_rsqrt28pd_mask((__v8df)(__m512d)(A), \
(__v8df)_mm512_setzero_pd(), \
- (__mmask8)-1, (R)); })
+ (__mmask8)-1, (int)(R)); })
#define _mm512_mask_rsqrt28_round_pd(S, M, A, R) __extension__ ({ \
(__m512d)__builtin_ia32_rsqrt28pd_mask((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(S), \
- (__mmask8)(M), (R)); })
+ (__v8df)(__m512d)(S), (__mmask8)(M), \
+ (int)(R)); })
#define _mm512_maskz_rsqrt28_round_pd(M, A, R) __extension__ ({ \
(__m512d)__builtin_ia32_rsqrt28pd_mask((__v8df)(__m512d)(A), \
(__v8df)_mm512_setzero_pd(), \
- (__mmask8)(M), (R)); })
+ (__mmask8)(M), (int)(R)); })
#define _mm512_rsqrt28_pd(A) \
_mm512_rsqrt28_round_pd((A), _MM_FROUND_CUR_DIRECTION)
@@ -104,17 +104,17 @@
#define _mm512_rsqrt28_round_ps(A, R) __extension__ ({ \
(__m512)__builtin_ia32_rsqrt28ps_mask((__v16sf)(__m512)(A), \
(__v16sf)_mm512_setzero_ps(), \
- (__mmask16)-1, (R)); })
+ (__mmask16)-1, (int)(R)); })
#define _mm512_mask_rsqrt28_round_ps(S, M, A, R) __extension__ ({ \
(__m512)__builtin_ia32_rsqrt28ps_mask((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(S), \
- (__mmask16)(M), (R)); })
+ (__v16sf)(__m512)(S), (__mmask16)(M), \
+ (int)(R)); })
#define _mm512_maskz_rsqrt28_round_ps(M, A, R) __extension__ ({ \
(__m512)__builtin_ia32_rsqrt28ps_mask((__v16sf)(__m512)(A), \
(__v16sf)_mm512_setzero_ps(), \
- (__mmask16)(M), (R)); })
+ (__mmask16)(M), (int)(R)); })
#define _mm512_rsqrt28_ps(A) \
_mm512_rsqrt28_round_ps((A), _MM_FROUND_CUR_DIRECTION)
@@ -127,21 +127,21 @@
#define _mm_rsqrt28_round_ss(A, B, R) __extension__ ({ \
(__m128)__builtin_ia32_rsqrt28ss_round_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4sf)_mm_setzero_ps(), \
- (__mmask8)-1, (R)); })
+ (__v4sf)(__m128)(B), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)-1, (int)(R)); })
#define _mm_mask_rsqrt28_round_ss(S, M, A, B, R) __extension__ ({ \
(__m128)__builtin_ia32_rsqrt28ss_round_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4sf)(__m128)(S), \
- (__mmask8)(M), (R)); })
+ (__v4sf)(__m128)(B), \
+ (__v4sf)(__m128)(S), \
+ (__mmask8)(M), (int)(R)); })
#define _mm_maskz_rsqrt28_round_ss(M, A, B, R) __extension__ ({ \
(__m128)__builtin_ia32_rsqrt28ss_round_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4sf)_mm_setzero_ps(), \
- (__mmask8)(M), (R)); })
+ (__v4sf)(__m128)(B), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)(M), (int)(R)); })
#define _mm_rsqrt28_ss(A, B) \
_mm_rsqrt28_round_ss((A), (B), _MM_FROUND_CUR_DIRECTION)
@@ -154,21 +154,21 @@
#define _mm_rsqrt28_round_sd(A, B, R) __extension__ ({ \
(__m128d)__builtin_ia32_rsqrt28sd_round_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2df)_mm_setzero_pd(), \
- (__mmask8)-1, (R)); })
+ (__v2df)(__m128d)(B), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)-1, (int)(R)); })
#define _mm_mask_rsqrt28_round_sd(S, M, A, B, R) __extension__ ({ \
(__m128d)__builtin_ia32_rsqrt28sd_round_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2df)(__m128d)(S), \
- (__mmask8)(M), (R)); })
+ (__v2df)(__m128d)(B), \
+ (__v2df)(__m128d)(S), \
+ (__mmask8)(M), (int)(R)); })
#define _mm_maskz_rsqrt28_round_sd(M, A, B, R) __extension__ ({ \
(__m128d)__builtin_ia32_rsqrt28sd_round_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2df)_mm_setzero_pd(), \
- (__mmask8)(M), (R)); })
+ (__v2df)(__m128d)(B), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)(M), (int)(R)); })
#define _mm_rsqrt28_sd(A, B) \
_mm_rsqrt28_round_sd((A), (B), _MM_FROUND_CUR_DIRECTION)
@@ -183,17 +183,17 @@
#define _mm512_rcp28_round_pd(A, R) __extension__ ({ \
(__m512d)__builtin_ia32_rcp28pd_mask((__v8df)(__m512d)(A), \
(__v8df)_mm512_setzero_pd(), \
- (__mmask8)-1, (R)); })
+ (__mmask8)-1, (int)(R)); })
#define _mm512_mask_rcp28_round_pd(S, M, A, R) __extension__ ({ \
(__m512d)__builtin_ia32_rcp28pd_mask((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(S), \
- (__mmask8)(M), (R)); })
+ (__v8df)(__m512d)(S), (__mmask8)(M), \
+ (int)(R)); })
#define _mm512_maskz_rcp28_round_pd(M, A, R) __extension__ ({ \
(__m512d)__builtin_ia32_rcp28pd_mask((__v8df)(__m512d)(A), \
(__v8df)_mm512_setzero_pd(), \
- (__mmask8)(M), (R)); })
+ (__mmask8)(M), (int)(R)); })
#define _mm512_rcp28_pd(A) \
_mm512_rcp28_round_pd((A), _MM_FROUND_CUR_DIRECTION)
@@ -207,17 +207,17 @@
#define _mm512_rcp28_round_ps(A, R) __extension__ ({ \
(__m512)__builtin_ia32_rcp28ps_mask((__v16sf)(__m512)(A), \
(__v16sf)_mm512_setzero_ps(), \
- (__mmask16)-1, (R)); })
+ (__mmask16)-1, (int)(R)); })
#define _mm512_mask_rcp28_round_ps(S, M, A, R) __extension__ ({ \
(__m512)__builtin_ia32_rcp28ps_mask((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(S), \
- (__mmask16)(M), (R)); })
+ (__v16sf)(__m512)(S), (__mmask16)(M), \
+ (int)(R)); })
#define _mm512_maskz_rcp28_round_ps(M, A, R) __extension__ ({ \
(__m512)__builtin_ia32_rcp28ps_mask((__v16sf)(__m512)(A), \
(__v16sf)_mm512_setzero_ps(), \
- (__mmask16)(M), (R)); })
+ (__mmask16)(M), (int)(R)); })
#define _mm512_rcp28_ps(A) \
_mm512_rcp28_round_ps((A), _MM_FROUND_CUR_DIRECTION)
@@ -230,21 +230,21 @@
#define _mm_rcp28_round_ss(A, B, R) __extension__ ({ \
(__m128)__builtin_ia32_rcp28ss_round_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4sf)_mm_setzero_ps(), \
- (__mmask8)-1, (R)); })
+ (__v4sf)(__m128)(B), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)-1, (int)(R)); })
#define _mm_mask_rcp28_round_ss(S, M, A, B, R) __extension__ ({ \
(__m128)__builtin_ia32_rcp28ss_round_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4sf)(__m128)(S), \
- (__mmask8)(M), (R)); })
+ (__v4sf)(__m128)(B), \
+ (__v4sf)(__m128)(S), \
+ (__mmask8)(M), (int)(R)); })
#define _mm_maskz_rcp28_round_ss(M, A, B, R) __extension__ ({ \
(__m128)__builtin_ia32_rcp28ss_round_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4sf)_mm_setzero_ps(), \
- (__mmask8)(M), (R)); })
+ (__v4sf)(__m128)(B), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)(M), (int)(R)); })
#define _mm_rcp28_ss(A, B) \
_mm_rcp28_round_ss((A), (B), _MM_FROUND_CUR_DIRECTION)
@@ -257,21 +257,21 @@
#define _mm_rcp28_round_sd(A, B, R) __extension__ ({ \
(__m128d)__builtin_ia32_rcp28sd_round_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2df)_mm_setzero_pd(), \
- (__mmask8)-1, (R)); })
+ (__v2df)(__m128d)(B), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)-1, (int)(R)); })
#define _mm_mask_rcp28_round_sd(S, M, A, B, R) __extension__ ({ \
(__m128d)__builtin_ia32_rcp28sd_round_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2df)(__m128d)(S), \
- (__mmask8)(M), (R)); })
+ (__v2df)(__m128d)(B), \
+ (__v2df)(__m128d)(S), \
+ (__mmask8)(M), (int)(R)); })
#define _mm_maskz_rcp28_round_sd(M, A, B, R) __extension__ ({ \
(__m128d)__builtin_ia32_rcp28sd_round_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2df)_mm_setzero_pd(), \
- (__mmask8)(M), (R)); })
+ (__v2df)(__m128d)(B), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)(M), (int)(R)); })
#define _mm_rcp28_sd(A, B) \
_mm_rcp28_round_sd((A), (B), _MM_FROUND_CUR_DIRECTION)
OpenPOWER on IntegriCloud