diff options
author | Eli Friedman <eli.friedman@gmail.com> | 2008-05-14 20:32:22 +0000 |
---|---|---|
committer | Eli Friedman <eli.friedman@gmail.com> | 2008-05-14 20:32:22 +0000 |
commit | f0d0e9e1742df600a07c9ea5c8aa40244ffce17f (patch) | |
tree | 9d3d4947ed97942b39fbd50ce160979232a7b192 /clang/lib/Headers | |
parent | 7402017b9aafae35ca3b9ec48c09cb61755ca614 (diff) | |
download | bcm5719-llvm-f0d0e9e1742df600a07c9ea5c8aa40244ffce17f.tar.gz bcm5719-llvm-f0d0e9e1742df600a07c9ea5c8aa40244ffce17f.zip |
Use double-underscores in front of all non-keywords to protect against
#define pollution.
llvm-svn: 51128
Diffstat (limited to 'clang/lib/Headers')
-rw-r--r-- | clang/lib/Headers/mmintrin.devel.h | 268 |
1 files changed, 134 insertions, 134 deletions
diff --git a/clang/lib/Headers/mmintrin.devel.h b/clang/lib/Headers/mmintrin.devel.h index 1125bc639df..858f441d6a6 100644 --- a/clang/lib/Headers/mmintrin.devel.h +++ b/clang/lib/Headers/mmintrin.devel.h @@ -30,296 +30,296 @@ #error "MMX instruction set not enabled" #else -typedef long long __m64 __attribute__((vector_size(8))); +typedef long long __m64 __attribute__((__vector_size__(8))); -typedef int __v2si __attribute__((vector_size(8))); -typedef short __v4hi __attribute__((vector_size(8))); -typedef char __v8qi __attribute__((vector_size(8))); +typedef int __v2si __attribute__((__vector_size__(8))); +typedef short __v4hi __attribute__((__vector_size__(8))); +typedef char __v8qi __attribute__((__vector_size__(8))); inline void __attribute__((__always_inline__)) _mm_empty() { __builtin_ia32_emms(); } -inline __m64 __attribute__((__always_inline__)) _mm_cvtsi32_si64(int i) +inline __m64 __attribute__((__always_inline__)) _mm_cvtsi32_si64(int __i) { - return (__m64)(__v2si){i, 0}; + return (__m64)(__v2si){__i, 0}; } -inline int __attribute__((__always_inline__)) _mm_cvtsi64_si32(__m64 m) +inline int __attribute__((__always_inline__)) _mm_cvtsi64_si32(__m64 __m) { - __v2si __mmx_var2 = (__v2si)m; + __v2si __mmx_var2 = (__v2si)__m; return __mmx_var2[0]; } -inline __m64 __attribute__((__always_inline__)) _mm_cvtsi64_m64(long long i) +inline __m64 __attribute__((__always_inline__)) _mm_cvtsi64_m64(long long __i) { - return (__m64)i; + return (__m64)__i; } -inline long long __attribute__((__always_inline__)) _mm_cvtm64_si64(__m64 m) +inline long long __attribute__((__always_inline__)) _mm_cvtm64_si64(__m64 __m) { - return (long long)m; + return (long long)__m; } -inline __m64 __attribute__((__always_inline__)) _mm_packs_pi16(__m64 m1, __m64 m2) +inline __m64 __attribute__((__always_inline__)) _mm_packs_pi16(__m64 __m1, __m64 __m2) { - return (__m64)__builtin_ia32_packsswb((__v4hi)m1, (__v4hi)m2); + return (__m64)__builtin_ia32_packsswb((__v4hi)__m1, (__v4hi)__m2); } -inline __m64 __attribute__((__always_inline__)) _mm_packs_pi32(__m64 m1, __m64 m2) +inline __m64 __attribute__((__always_inline__)) _mm_packs_pi32(__m64 __m1, __m64 __m2) { - return (__m64)__builtin_ia32_packssdw((__v2si)m1, (__v2si)m2); + return (__m64)__builtin_ia32_packssdw((__v2si)__m1, (__v2si)__m2); } -inline __m64 __attribute__((__always_inline__)) _mm_packs_pu16(__m64 m1, __m64 m2) +inline __m64 __attribute__((__always_inline__)) _mm_packs_pu16(__m64 __m1, __m64 __m2) { - return (__m64)__builtin_ia32_packuswb((__v4hi)m1, (__v4hi)m2); + return (__m64)__builtin_ia32_packuswb((__v4hi)__m1, (__v4hi)__m2); } -inline __m64 __attribute__((__always_inline__)) _mm_unpackhi_pi8(__m64 m1, __m64 m2) +inline __m64 __attribute__((__always_inline__)) _mm_unpackhi_pi8(__m64 __m1, __m64 __m2) { - return (__m64)__builtin_shufflevector((__v8qi)m1, (__v8qi)m2, 4, 8+4, 5, 8+5, 6, 8+6, 7, 8+7); + return (__m64)__builtin_shufflevector((__v8qi)__m1, (__v8qi)__m2, 4, 8+4, 5, 8+5, 6, 8+6, 7, 8+7); } -inline __m64 __attribute__((__always_inline__)) _mm_unpackhi_pi16(__m64 m1, __m64 m2) +inline __m64 __attribute__((__always_inline__)) _mm_unpackhi_pi16(__m64 __m1, __m64 __m2) { - return (__m64)__builtin_shufflevector((__v4hi)m1, (__v4hi)m2, 2, 4+2, 3, 4+3); + return (__m64)__builtin_shufflevector((__v4hi)__m1, (__v4hi)__m2, 2, 4+2, 3, 4+3); } -inline __m64 __attribute__((__always_inline__)) _mm_unpackhi_pi32(__m64 m1, __m64 m2) +inline __m64 __attribute__((__always_inline__)) _mm_unpackhi_pi32(__m64 __m1, __m64 __m2) { - return (__m64)__builtin_shufflevector((__v2si)m1, (__v2si)m2, 1, 2+1); + return (__m64)__builtin_shufflevector((__v2si)__m1, (__v2si)__m2, 1, 2+1); } -inline __m64 __attribute__((__always_inline__)) _mm_unpacklo_pi8(__m64 m1, __m64 m2) +inline __m64 __attribute__((__always_inline__)) _mm_unpacklo_pi8(__m64 __m1, __m64 __m2) { - return (__m64)__builtin_shufflevector((__v8qi)m1, (__v8qi)m2, 0, 8+0, 1, 8+1, 2, 8+2, 3, 8+3); + return (__m64)__builtin_shufflevector((__v8qi)__m1, (__v8qi)__m2, 0, 8+0, 1, 8+1, 2, 8+2, 3, 8+3); } -inline __m64 __attribute__((__always_inline__)) _mm_unpacklo_pi16(__m64 m1, __m64 m2) +inline __m64 __attribute__((__always_inline__)) _mm_unpacklo_pi16(__m64 __m1, __m64 __m2) { - return (__m64)__builtin_shufflevector((__v4hi)m1, (__v4hi)m2, 0, 4+0, 1, 4+1); + return (__m64)__builtin_shufflevector((__v4hi)__m1, (__v4hi)__m2, 0, 4+0, 1, 4+1); } -inline __m64 __attribute__((__always_inline__)) _mm_unpacklo_pi32(__m64 m1, __m64 m2) +inline __m64 __attribute__((__always_inline__)) _mm_unpacklo_pi32(__m64 __m1, __m64 __m2) { - return (__m64)__builtin_shufflevector((__v2si)m1, (__v2si)m2, 0, 2+0); + return (__m64)__builtin_shufflevector((__v2si)__m1, (__v2si)__m2, 0, 2+0); } -inline __m64 __attribute__((__always_inline__)) _mm_add_pi8(__m64 m1, __m64 m2) +inline __m64 __attribute__((__always_inline__)) _mm_add_pi8(__m64 __m1, __m64 __m2) { - return (__m64)((__v8qi)m1 + (__v8qi)m2); + return (__m64)((__v8qi)__m1 + (__v8qi)__m2); } -inline __m64 __attribute__((__always_inline__)) _mm_add_pi16(__m64 m1, __m64 m2) +inline __m64 __attribute__((__always_inline__)) _mm_add_pi16(__m64 __m1, __m64 __m2) { - return (__m64)((__v4hi)m1 + (__v4hi)m2); + return (__m64)((__v4hi)__m1 + (__v4hi)__m2); } -inline __m64 __attribute__((__always_inline__)) _mm_add_pi32(__m64 m1, __m64 m2) +inline __m64 __attribute__((__always_inline__)) _mm_add_pi32(__m64 __m1, __m64 __m2) { - return (__m64)((__v2si)m1 + (__v2si)m2); + return (__m64)((__v2si)__m1 + (__v2si)__m2); } -inline __m64 __attribute__((__always_inline__)) _mm_adds_pi8(__m64 m1, __m64 m2) +inline __m64 __attribute__((__always_inline__)) _mm_adds_pi8(__m64 __m1, __m64 __m2) { - return (__m64)__builtin_ia32_paddsb((__v8qi)m1, (__v8qi)m2); + return (__m64)__builtin_ia32_paddsb((__v8qi)__m1, (__v8qi)__m2); } -inline __m64 __attribute__((__always_inline__)) _mm_adds_pi16(__m64 m1, __m64 m2) +inline __m64 __attribute__((__always_inline__)) _mm_adds_pi16(__m64 __m1, __m64 __m2) { - return (__m64)__builtin_ia32_paddsw((__v4hi)m1, (__v4hi)m2); + return (__m64)__builtin_ia32_paddsw((__v4hi)__m1, (__v4hi)__m2); } -inline __m64 __attribute__((__always_inline__)) _mm_adds_pu8(__m64 m1, __m64 m2) +inline __m64 __attribute__((__always_inline__)) _mm_adds_pu8(__m64 __m1, __m64 __m2) { - return (__m64)__builtin_ia32_paddusb((__v8qi)m1, (__v8qi)m2); + return (__m64)__builtin_ia32_paddusb((__v8qi)__m1, (__v8qi)__m2); } -inline __m64 __attribute__((__always_inline__)) _mm_adds_pu16(__m64 m1, __m64 m2) +inline __m64 __attribute__((__always_inline__)) _mm_adds_pu16(__m64 __m1, __m64 __m2) { - return (__m64)__builtin_ia32_paddusw((__v4hi)m1, (__v4hi)m2); + return (__m64)__builtin_ia32_paddusw((__v4hi)__m1, (__v4hi)__m2); } -inline __m64 __attribute__((__always_inline__)) _mm_sub_pi8(__m64 m1, __m64 m2) +inline __m64 __attribute__((__always_inline__)) _mm_sub_pi8(__m64 __m1, __m64 __m2) { - return (__m64)((__v8qi)m1 - (__v8qi)m2); + return (__m64)((__v8qi)__m1 - (__v8qi)__m2); } -inline __m64 __attribute__((__always_inline__)) _mm_sub_pi16(__m64 m1, __m64 m2) +inline __m64 __attribute__((__always_inline__)) _mm_sub_pi16(__m64 __m1, __m64 __m2) { - return (__m64)((__v4hi)m1 - (__v4hi)m2); + return (__m64)((__v4hi)__m1 - (__v4hi)__m2); } -inline __m64 __attribute__((__always_inline__)) _mm_sub_pi32(__m64 m1, __m64 m2) +inline __m64 __attribute__((__always_inline__)) _mm_sub_pi32(__m64 __m1, __m64 __m2) { - return (__m64)((__v2si)m1 - (__v2si)m2); + return (__m64)((__v2si)__m1 - (__v2si)__m2); } -inline __m64 __attribute__((__always_inline__)) _mm_subs_pi8(__m64 m1, __m64 m2) +inline __m64 __attribute__((__always_inline__)) _mm_subs_pi8(__m64 __m1, __m64 __m2) { - return (__m64)__builtin_ia32_psubsb((__v8qi)m1, (__v8qi)m2); + return (__m64)__builtin_ia32_psubsb((__v8qi)__m1, (__v8qi)__m2); } -inline __m64 __attribute__((__always_inline__)) _mm_subs_pi16(__m64 m1, __m64 m2) +inline __m64 __attribute__((__always_inline__)) _mm_subs_pi16(__m64 __m1, __m64 __m2) { - return (__m64)__builtin_ia32_psubsw((__v4hi)m1, (__v4hi)m2); + return (__m64)__builtin_ia32_psubsw((__v4hi)__m1, (__v4hi)__m2); } -inline __m64 __attribute__((__always_inline__)) _mm_subs_pu8(__m64 m1, __m64 m2) +inline __m64 __attribute__((__always_inline__)) _mm_subs_pu8(__m64 __m1, __m64 __m2) { - return (__m64)__builtin_ia32_psubusb((__v8qi)m1, (__v8qi)m2); + return (__m64)__builtin_ia32_psubusb((__v8qi)__m1, (__v8qi)__m2); } -inline __m64 __attribute__((__always_inline__)) _mm_subs_pu16(__m64 m1, __m64 m2) +inline __m64 __attribute__((__always_inline__)) _mm_subs_pu16(__m64 __m1, __m64 __m2) { - return (__m64)__builtin_ia32_psubusw((__v4hi)m1, (__v4hi)m2); + return (__m64)__builtin_ia32_psubusw((__v4hi)__m1, (__v4hi)__m2); } -inline __m64 __attribute__((__always_inline__)) _mm_madd_pi16(__m64 m1, __m64 m2) +inline __m64 __attribute__((__always_inline__)) _mm_madd_pi16(__m64 __m1, __m64 __m2) { - return (__m64)__builtin_ia32_pmaddwd((__v4hi)m1, (__v4hi)m2); + return (__m64)__builtin_ia32_pmaddwd((__v4hi)__m1, (__v4hi)__m2); } -inline __m64 __attribute__((__always_inline__)) _mm_mulhi_pi16(__m64 m1, __m64 m2) +inline __m64 __attribute__((__always_inline__)) _mm_mulhi_pi16(__m64 __m1, __m64 __m2) { - return (__m64)__builtin_ia32_pmulhw((__v4hi)m1, (__v4hi)m2); + return (__m64)__builtin_ia32_pmulhw((__v4hi)__m1, (__v4hi)__m2); } -inline __m64 __attribute__((__always_inline__)) _mm_mullo_pi16(__m64 m1, __m64 m2) +inline __m64 __attribute__((__always_inline__)) _mm_mullo_pi16(__m64 __m1, __m64 __m2) { - return (__m64)((__v4hi)m1 * (__v4hi)m2); + return (__m64)((__v4hi)__m1 * (__v4hi)__m2); } -inline __m64 __attribute__((__always_inline__)) _mm_sll_pi16(__m64 m, __m64 count) +inline __m64 __attribute__((__always_inline__)) _mm_sll_pi16(__m64 __m, __m64 __count) { - return (__m64)__builtin_ia32_psllw((__v4hi)m, count); + return (__m64)__builtin_ia32_psllw((__v4hi)__m, __count); } -inline __m64 __attribute__((__always_inline__)) _mm_slli_pi16(__m64 m, int count) +inline __m64 __attribute__((__always_inline__)) _mm_slli_pi16(__m64 __m, int __count) { - return (__m64)__builtin_ia32_psllwi((__v4hi)m, count); + return (__m64)__builtin_ia32_psllwi((__v4hi)__m, __count); } -inline __m64 __attribute__((__always_inline__)) _mm_sll_pi32(__m64 m, __m64 count) +inline __m64 __attribute__((__always_inline__)) _mm_sll_pi32(__m64 __m, __m64 __count) { - return (__m64)__builtin_ia32_pslld((__v2si)m, count); + return (__m64)__builtin_ia32_pslld((__v2si)__m, __count); } -inline __m64 __attribute__((__always_inline__)) _mm_slli_pi32(__m64 m, int count) +inline __m64 __attribute__((__always_inline__)) _mm_slli_pi32(__m64 __m, int __count) { - return (__m64)__builtin_ia32_pslldi((__v2si)m, count); + return (__m64)__builtin_ia32_pslldi((__v2si)__m, __count); } -inline __m64 __attribute__((__always_inline__)) _mm_sll_pi64(__m64 m, __m64 count) +inline __m64 __attribute__((__always_inline__)) _mm_sll_pi64(__m64 __m, __m64 __count) { - return __builtin_ia32_psllq(m, count); + return __builtin_ia32_psllq(__m, __count); } -inline __m64 __attribute__((__always_inline__)) _mm_slli_pi64(__m64 m, int count) +inline __m64 __attribute__((__always_inline__)) _mm_slli_pi64(__m64 __m, int __count) { - return __builtin_ia32_psllqi(m, count); + return __builtin_ia32_psllqi(__m, __count); } -inline __m64 __attribute__((__always_inline__)) _mm_sra_pi16(__m64 m, __m64 count) +inline __m64 __attribute__((__always_inline__)) _mm_sra_pi16(__m64 __m, __m64 __count) { - return (__m64)__builtin_ia32_psraw((__v4hi)m, count); + return (__m64)__builtin_ia32_psraw((__v4hi)__m, __count); } -inline __m64 __attribute__((__always_inline__)) _mm_srai_pi16(__m64 m, int count) +inline __m64 __attribute__((__always_inline__)) _mm_srai_pi16(__m64 __m, int __count) { - return (__m64)__builtin_ia32_psrawi((__v4hi)m, count); + return (__m64)__builtin_ia32_psrawi((__v4hi)__m, __count); } -inline __m64 __attribute__((__always_inline__)) _mm_sra_pi32(__m64 m, __m64 count) +inline __m64 __attribute__((__always_inline__)) _mm_sra_pi32(__m64 __m, __m64 __count) { - return (__m64)__builtin_ia32_psrad((__v2si)m, count); + return (__m64)__builtin_ia32_psrad((__v2si)__m, __count); } -inline __m64 __attribute__((__always_inline__)) _mm_srai_pi32(__m64 m, int count) +inline __m64 __attribute__((__always_inline__)) _mm_srai_pi32(__m64 __m, int __count) { - return (__m64)__builtin_ia32_psradi((__v2si)m, count); + return (__m64)__builtin_ia32_psradi((__v2si)__m, __count); } -inline __m64 __attribute__((__always_inline__)) _mm_srl_pi16(__m64 m, __m64 count) +inline __m64 __attribute__((__always_inline__)) _mm_srl_pi16(__m64 __m, __m64 __count) { - return (__m64)__builtin_ia32_psrlw((__v4hi)m, count); + return (__m64)__builtin_ia32_psrlw((__v4hi)__m, __count); } -inline __m64 __attribute__((__always_inline__)) _mm_srli_pi16(__m64 m, int count) +inline __m64 __attribute__((__always_inline__)) _mm_srli_pi16(__m64 __m, int __count) { - return (__m64)__builtin_ia32_psrlwi((__v4hi)m, count); + return (__m64)__builtin_ia32_psrlwi((__v4hi)__m, __count); } -inline __m64 __attribute__((__always_inline__)) _mm_srl_pi32(__m64 m, __m64 count) +inline __m64 __attribute__((__always_inline__)) _mm_srl_pi32(__m64 __m, __m64 __count) { - return (__m64)__builtin_ia32_psrld((__v2si)m, count); + return (__m64)__builtin_ia32_psrld((__v2si)__m, __count); } -inline __m64 __attribute__((__always_inline__)) _mm_srli_pi32(__m64 m, int count) +inline __m64 __attribute__((__always_inline__)) _mm_srli_pi32(__m64 __m, int __count) { - return (__m64)__builtin_ia32_psrldi((__v2si)m, count); + return (__m64)__builtin_ia32_psrldi((__v2si)__m, __count); } -inline __m64 __attribute__((__always_inline__)) _mm_srl_pi64(__m64 m, __m64 count) +inline __m64 __attribute__((__always_inline__)) _mm_srl_pi64(__m64 __m, __m64 __count) { - return (__m64)__builtin_ia32_psrlq(m, count); + return (__m64)__builtin_ia32_psrlq(__m, __count); } -inline __m64 __attribute__((__always_inline__)) _mm_srli_pi64(__m64 m, int count) +inline __m64 __attribute__((__always_inline__)) _mm_srli_pi64(__m64 __m, int __count) { - return __builtin_ia32_psrlqi(m, count); + return __builtin_ia32_psrlqi(__m, __count); } -inline __m64 __attribute__((__always_inline__)) _mm_and_si64(__m64 m1, __m64 m2) +inline __m64 __attribute__((__always_inline__)) _mm_and_si64(__m64 __m1, __m64 __m2) { - return m1 & m2; + return __m1 & __m2; } -inline __m64 __attribute__((__always_inline__)) _mm_andnot_si64(__m64 m1, __m64 m2) +inline __m64 __attribute__((__always_inline__)) _mm_andnot_si64(__m64 __m1, __m64 __m2) { - return ~m1 & m2; + return ~__m1 & __m2; } -inline __m64 __attribute__((__always_inline__)) _mm_or_si64(__m64 m1, __m64 m2) +inline __m64 __attribute__((__always_inline__)) _mm_or_si64(__m64 __m1, __m64 __m2) { - return m1 | m2; + return __m1 | __m2; } -inline __m64 __attribute__((__always_inline__)) _mm_xor_si64(__m64 m1, __m64 m2) +inline __m64 __attribute__((__always_inline__)) _mm_xor_si64(__m64 __m1, __m64 __m2) { - return m1 ^ m2; + return __m1 ^ __m2; } -inline __m64 __attribute__((__always_inline__)) _mm_cmpeq_pi8(__m64 m1, __m64 m2) +inline __m64 __attribute__((__always_inline__)) _mm_cmpeq_pi8(__m64 __m1, __m64 __m2) { - return (__m64)__builtin_ia32_pcmpeqb((__v8qi)m1, (__v8qi)m2); + return (__m64)__builtin_ia32_pcmpeqb((__v8qi)__m1, (__v8qi)__m2); } -inline __m64 __attribute__((__always_inline__)) _mm_cmpeq_pi16(__m64 m1, __m64 m2) +inline __m64 __attribute__((__always_inline__)) _mm_cmpeq_pi16(__m64 __m1, __m64 __m2) { - return (__m64)__builtin_ia32_pcmpeqw((__v4hi)m1, (__v4hi)m2); + return (__m64)__builtin_ia32_pcmpeqw((__v4hi)__m1, (__v4hi)__m2); } -inline __m64 __attribute__((__always_inline__)) _mm_cmpeq_pi32(__m64 m1, __m64 m2) +inline __m64 __attribute__((__always_inline__)) _mm_cmpeq_pi32(__m64 __m1, __m64 __m2) { - return (__m64)__builtin_ia32_pcmpeqd((__v2si)m1, (__v2si)m2); + return (__m64)__builtin_ia32_pcmpeqd((__v2si)__m1, (__v2si)__m2); } -inline __m64 __attribute__((__always_inline__)) _mm_cmpgt_pi8(__m64 m1, __m64 m2) +inline __m64 __attribute__((__always_inline__)) _mm_cmpgt_pi8(__m64 __m1, __m64 __m2) { - return (__m64)__builtin_ia32_pcmpgtb((__v8qi)m1, (__v8qi)m2); + return (__m64)__builtin_ia32_pcmpgtb((__v8qi)__m1, (__v8qi)__m2); } -inline __m64 __attribute__((__always_inline__)) _mm_cmpgt_pi16(__m64 m1, __m64 m2) +inline __m64 __attribute__((__always_inline__)) _mm_cmpgt_pi16(__m64 __m1, __m64 __m2) { - return (__m64)__builtin_ia32_pcmpgtw((__v4hi)m1, (__v4hi)m2); + return (__m64)__builtin_ia32_pcmpgtw((__v4hi)__m1, (__v4hi)__m2); } -inline __m64 __attribute__((__always_inline__)) _mm_cmpgt_pi32(__m64 m1, __m64 m2) +inline __m64 __attribute__((__always_inline__)) _mm_cmpgt_pi32(__m64 __m1, __m64 __m2) { - return (__m64)__builtin_ia32_pcmpgtd((__v2si)m1, (__v2si)m2); + return (__m64)__builtin_ia32_pcmpgtd((__v2si)__m1, (__v2si)__m2); } inline __m64 __attribute__((__always_inline__)) _mm_setzero_si64() @@ -327,49 +327,49 @@ inline __m64 __attribute__((__always_inline__)) _mm_setzero_si64() return (__m64){ 0LL }; } -inline __m64 __attribute__((__always_inline__)) _mm_set_pi32(int i1, int i0) +inline __m64 __attribute__((__always_inline__)) _mm_set_pi32(int __i1, int __i0) { - return (__m64)(__v2si){ i0, i1 }; + return (__m64)(__v2si){ __i0, __i1 }; } -inline __m64 __attribute__((__always_inline__)) _mm_set_pi16(short s3, short s2, short s1, short s0) +inline __m64 __attribute__((__always_inline__)) _mm_set_pi16(short __s3, short __s2, short __s1, short __s0) { - return (__m64)(__v4hi){ s0, s1, s2, s3 }; + return (__m64)(__v4hi){ __s0, __s1, __s2, __s3 }; } -inline __m64 __attribute__((__always_inline__)) _mm_set_pi8(char b7, char b6, char b5, char b4, char b3, char b2, char b1, char b0) +inline __m64 __attribute__((__always_inline__)) _mm_set_pi8(char __b7, char __b6, char __b5, char __b4, char __b3, char __b2, char __b1, char __b0) { - return (__m64)(__v8qi){ b0, b1, b2, b3, b4, b5, b6, b7 }; + return (__m64)(__v8qi){ __b0, __b1, __b2, __b3, __b4, __b5, __b6, __b7 }; } -inline __m64 __attribute__((__always_inline__)) _mm_set1_pi32(int i) +inline __m64 __attribute__((__always_inline__)) _mm_set1_pi32(int __i) { - return (__m64)(__v2si){ i, i }; + return (__m64)(__v2si){ __i, __i }; } -inline __m64 __attribute__((__always_inline__)) _mm_set1_pi16(short s) +inline __m64 __attribute__((__always_inline__)) _mm_set1_pi16(short __s) { - return (__m64)(__v4hi){ s }; + return (__m64)(__v4hi){ __s }; } -inline __m64 __attribute__((__always_inline__)) _mm_set1_pi8(char b) +inline __m64 __attribute__((__always_inline__)) _mm_set1_pi8(char __b) { - return (__m64)(__v8qi){ b }; + return (__m64)(__v8qi){ __b }; } -inline __m64 __attribute__((__always_inline__)) _mm_setr_pi32(int i1, int i0) +inline __m64 __attribute__((__always_inline__)) _mm_setr_pi32(int __i1, int __i0) { - return (__m64)(__v2si){ i1, i0 }; + return (__m64)(__v2si){ __i1, __i0 }; } -inline __m64 __attribute__((__always_inline__)) _mm_setr_pi16(short s3, short s2, short s1, short s0) +inline __m64 __attribute__((__always_inline__)) _mm_setr_pi16(short __s3, short __s2, short __s1, short __s0) { - return (__m64)(__v4hi){ s3, s2, s1, s0 }; + return (__m64)(__v4hi){ __s3, __s2, __s1, __s0 }; } -inline __m64 __attribute__((__always_inline__)) _mm_setr_pi8(char b7, char b6, char b5, char b4, char b3, char b2, char b1, char b0) +inline __m64 __attribute__((__always_inline__)) _mm_setr_pi8(char __b7, char __b6, char __b5, char __b4, char __b3, char __b2, char __b1, char __b0) { - return (__m64)(__v8qi){ b7, b6, b5, b4, b3, b2, b1, b0 }; + return (__m64)(__v8qi){ __b7, __b6, __b5, __b4, __b3, __b2, __b1, __b0 }; } #endif /* __MMX__ */ |