summaryrefslogtreecommitdiffstats
path: root/clang/lib/Headers/xopintrin.h
diff options
context:
space:
mode:
authorCraig Topper <craig.topper@intel.com>2018-05-31 00:51:20 +0000
committerCraig Topper <craig.topper@intel.com>2018-05-31 00:51:20 +0000
commitc6338679446a62d64c747531a8e11250dbedc8b0 (patch)
treeb3f7d4ee8b999001400c2c828fa1e74253d8f395 /clang/lib/Headers/xopintrin.h
parent7744c7f137bc7c079353f0ca40b7fa2b26bcdb3b (diff)
downloadbcm5719-llvm-c6338679446a62d64c747531a8e11250dbedc8b0.tar.gz
bcm5719-llvm-c6338679446a62d64c747531a8e11250dbedc8b0.zip
[X86] Remove __extension__ from macro intrinsics when its not needed.
I think this is a holdover from when we used to declare variables inside the macros. And then its been copy and pasted forward for years every time a new macro intrinsic gets added. Interestingly this caused some tests for IRGen to be slightly more optimized. We now return a zeroinitializer directly instead of going through a store+load. It also removed a bogus error message on another test. llvm-svn: 333613
Diffstat (limited to 'clang/lib/Headers/xopintrin.h')
-rw-r--r--clang/lib/Headers/xopintrin.h64
1 files changed, 32 insertions, 32 deletions
diff --git a/clang/lib/Headers/xopintrin.h b/clang/lib/Headers/xopintrin.h
index 4a34f770d58..34887dc79fb 100644
--- a/clang/lib/Headers/xopintrin.h
+++ b/clang/lib/Headers/xopintrin.h
@@ -237,17 +237,17 @@ _mm_rot_epi64(__m128i __A, __m128i __B)
return (__m128i)__builtin_ia32_vprotq((__v2di)__A, (__v2di)__B);
}
-#define _mm_roti_epi8(A, N) __extension__ ({ \
- (__m128i)__builtin_ia32_vprotbi((__v16qi)(__m128i)(A), (N)); })
+#define _mm_roti_epi8(A, N) \
+ (__m128i)__builtin_ia32_vprotbi((__v16qi)(__m128i)(A), (N))
-#define _mm_roti_epi16(A, N) __extension__ ({ \
- (__m128i)__builtin_ia32_vprotwi((__v8hi)(__m128i)(A), (N)); })
+#define _mm_roti_epi16(A, N) \
+ (__m128i)__builtin_ia32_vprotwi((__v8hi)(__m128i)(A), (N))
-#define _mm_roti_epi32(A, N) __extension__ ({ \
- (__m128i)__builtin_ia32_vprotdi((__v4si)(__m128i)(A), (N)); })
+#define _mm_roti_epi32(A, N) \
+ (__m128i)__builtin_ia32_vprotdi((__v4si)(__m128i)(A), (N))
-#define _mm_roti_epi64(A, N) __extension__ ({ \
- (__m128i)__builtin_ia32_vprotqi((__v2di)(__m128i)(A), (N)); })
+#define _mm_roti_epi64(A, N) \
+ (__m128i)__builtin_ia32_vprotqi((__v2di)(__m128i)(A), (N))
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_shl_epi8(__m128i __A, __m128i __B)
@@ -297,37 +297,37 @@ _mm_sha_epi64(__m128i __A, __m128i __B)
return (__m128i)__builtin_ia32_vpshaq((__v2di)__A, (__v2di)__B);
}
-#define _mm_com_epu8(A, B, N) __extension__ ({ \
+#define _mm_com_epu8(A, B, N) \
(__m128i)__builtin_ia32_vpcomub((__v16qi)(__m128i)(A), \
- (__v16qi)(__m128i)(B), (N)); })
+ (__v16qi)(__m128i)(B), (N))
-#define _mm_com_epu16(A, B, N) __extension__ ({ \
+#define _mm_com_epu16(A, B, N) \
(__m128i)__builtin_ia32_vpcomuw((__v8hi)(__m128i)(A), \
- (__v8hi)(__m128i)(B), (N)); })
+ (__v8hi)(__m128i)(B), (N))
-#define _mm_com_epu32(A, B, N) __extension__ ({ \
+#define _mm_com_epu32(A, B, N) \
(__m128i)__builtin_ia32_vpcomud((__v4si)(__m128i)(A), \
- (__v4si)(__m128i)(B), (N)); })
+ (__v4si)(__m128i)(B), (N))
-#define _mm_com_epu64(A, B, N) __extension__ ({ \
+#define _mm_com_epu64(A, B, N) \
(__m128i)__builtin_ia32_vpcomuq((__v2di)(__m128i)(A), \
- (__v2di)(__m128i)(B), (N)); })
+ (__v2di)(__m128i)(B), (N))
-#define _mm_com_epi8(A, B, N) __extension__ ({ \
+#define _mm_com_epi8(A, B, N) \
(__m128i)__builtin_ia32_vpcomb((__v16qi)(__m128i)(A), \
- (__v16qi)(__m128i)(B), (N)); })
+ (__v16qi)(__m128i)(B), (N))
-#define _mm_com_epi16(A, B, N) __extension__ ({ \
+#define _mm_com_epi16(A, B, N) \
(__m128i)__builtin_ia32_vpcomw((__v8hi)(__m128i)(A), \
- (__v8hi)(__m128i)(B), (N)); })
+ (__v8hi)(__m128i)(B), (N))
-#define _mm_com_epi32(A, B, N) __extension__ ({ \
+#define _mm_com_epi32(A, B, N) \
(__m128i)__builtin_ia32_vpcomd((__v4si)(__m128i)(A), \
- (__v4si)(__m128i)(B), (N)); })
+ (__v4si)(__m128i)(B), (N))
-#define _mm_com_epi64(A, B, N) __extension__ ({ \
+#define _mm_com_epi64(A, B, N) \
(__m128i)__builtin_ia32_vpcomq((__v2di)(__m128i)(A), \
- (__v2di)(__m128i)(B), (N)); })
+ (__v2di)(__m128i)(B), (N))
#define _MM_PCOMCTRL_LT 0
#define _MM_PCOMCTRL_LE 1
@@ -722,24 +722,24 @@ _mm_comtrue_epi64(__m128i __A, __m128i __B)
return _mm_com_epi64(__A, __B, _MM_PCOMCTRL_TRUE);
}
-#define _mm_permute2_pd(X, Y, C, I) __extension__ ({ \
+#define _mm_permute2_pd(X, Y, C, I) \
(__m128d)__builtin_ia32_vpermil2pd((__v2df)(__m128d)(X), \
(__v2df)(__m128d)(Y), \
- (__v2di)(__m128i)(C), (I)); })
+ (__v2di)(__m128i)(C), (I))
-#define _mm256_permute2_pd(X, Y, C, I) __extension__ ({ \
+#define _mm256_permute2_pd(X, Y, C, I) \
(__m256d)__builtin_ia32_vpermil2pd256((__v4df)(__m256d)(X), \
(__v4df)(__m256d)(Y), \
- (__v4di)(__m256i)(C), (I)); })
+ (__v4di)(__m256i)(C), (I))
-#define _mm_permute2_ps(X, Y, C, I) __extension__ ({ \
+#define _mm_permute2_ps(X, Y, C, I) \
(__m128)__builtin_ia32_vpermil2ps((__v4sf)(__m128)(X), (__v4sf)(__m128)(Y), \
- (__v4si)(__m128i)(C), (I)); })
+ (__v4si)(__m128i)(C), (I))
-#define _mm256_permute2_ps(X, Y, C, I) __extension__ ({ \
+#define _mm256_permute2_ps(X, Y, C, I) \
(__m256)__builtin_ia32_vpermil2ps256((__v8sf)(__m256)(X), \
(__v8sf)(__m256)(Y), \
- (__v8si)(__m256i)(C), (I)); })
+ (__v8si)(__m256i)(C), (I))
static __inline__ __m128 __DEFAULT_FN_ATTRS
_mm_frcz_ss(__m128 __A)
OpenPOWER on IntegriCloud