diff options
| author | Simon Pilgrim <llvm-dev@redking.me.uk> | 2016-07-04 21:30:47 +0000 |
|---|---|---|
| committer | Simon Pilgrim <llvm-dev@redking.me.uk> | 2016-07-04 21:30:47 +0000 |
| commit | 427154db2a58ff02233943a1bb38e1b7276626b0 (patch) | |
| tree | 10d8df24ae04e60aba4dddfe6da4df018d230a09 | |
| parent | 5aebb86ac1b09a7dd05f5236fd79047fe3d5e553 (diff) | |
| download | bcm5719-llvm-427154db2a58ff02233943a1bb38e1b7276626b0.tar.gz bcm5719-llvm-427154db2a58ff02233943a1bb38e1b7276626b0.zip | |
[X86][AVX512] Converted the VSHUFPD intrinsics to generic IR
llvm-svn: 274523
| -rw-r--r-- | clang/lib/Headers/avx512fintrin.h | 37 | ||||
| -rw-r--r-- | clang/lib/Headers/avx512vlintrin.h | 70 | ||||
| -rw-r--r-- | clang/test/CodeGen/avx512f-builtins.c | 8 | ||||
| -rw-r--r-- | clang/test/CodeGen/avx512vl-builtins.c | 24 |
4 files changed, 74 insertions, 65 deletions
diff --git a/clang/lib/Headers/avx512fintrin.h b/clang/lib/Headers/avx512fintrin.h index c0d44984fd7..4bd5a8d7dbb 100644 --- a/clang/lib/Headers/avx512fintrin.h +++ b/clang/lib/Headers/avx512fintrin.h @@ -5950,6 +5950,7 @@ _mm512_kmov (__mmask16 __A) #define _mm_cvt_roundsd_si64(A, R) __extension__ ({ \ (long long)__builtin_ia32_vcvtsd2si64((__v2df)(__m128d)(A), (int)(R)); }) + static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_mask2_permutex2var_epi32 (__m512i __A, __m512i __I, __mmask16 __U, __m512i __B) @@ -7166,23 +7167,27 @@ _mm_maskz_scalef_ss (__mmask8 __U, __m128 __A, __m128 __B) (__v8di)_mm512_setzero_si512(), \ (__mmask8)(U)); }) -#define _mm512_shuffle_pd(M, V, imm) __extension__ ({ \ - (__m512d)__builtin_ia32_shufpd512_mask((__v8df)(__m512d)(M), \ - (__v8df)(__m512d)(V), (int)(imm), \ - (__v8df)_mm512_undefined_pd(), \ - (__mmask8)-1); }) - -#define _mm512_mask_shuffle_pd(W, U, M, V, imm) __extension__ ({ \ - (__m512d)__builtin_ia32_shufpd512_mask((__v8df)(__m512d)(M), \ - (__v8df)(__m512d)(V), (int)(imm), \ - (__v8df)(__m512d)(W), \ - (__mmask8)(U)); }) +#define _mm512_shuffle_pd(A, B, M) __extension__ ({ \ + (__m512d)__builtin_shufflevector((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + (((M) & 0x01) >> 0) + 0, \ + (((M) & 0x02) >> 1) + 8, \ + (((M) & 0x04) >> 2) + 2, \ + (((M) & 0x08) >> 3) + 10, \ + (((M) & 0x10) >> 4) + 4, \ + (((M) & 0x20) >> 5) + 12, \ + (((M) & 0x40) >> 6) + 6, \ + (((M) & 0x80) >> 7) + 14); }) + +#define _mm512_mask_shuffle_pd(W, U, A, B, M) __extension__ ({ \ + (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \ + (__v8df)_mm512_shuffle_pd((A), (B), (M)), \ + (__v8df)(__m512d)(W)); }) -#define _mm512_maskz_shuffle_pd(U, M, V, imm) __extension__ ({ \ - (__m512d)__builtin_ia32_shufpd512_mask((__v8df)(__m512d)(M), \ - (__v8df)(__m512d)(V), (int)(imm), \ - (__v8df)_mm512_setzero_pd(), \ - (__mmask8)(U)); }) +#define _mm512_maskz_shuffle_pd(U, A, B, M) __extension__ ({ \ + (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \ + (__v8df)_mm512_shuffle_pd((A), (B), (M)), \ + (__v8df)_mm512_setzero_pd()); }) #define _mm512_shuffle_ps(M, V, imm) __extension__ ({ \ (__m512)__builtin_ia32_shufps512_mask((__v16sf)(__m512)(M), \ diff --git a/clang/lib/Headers/avx512vlintrin.h b/clang/lib/Headers/avx512vlintrin.h index b5b371823a3..2e6f9fc83ba 100644 --- a/clang/lib/Headers/avx512vlintrin.h +++ b/clang/lib/Headers/avx512vlintrin.h @@ -7374,51 +7374,45 @@ _mm256_maskz_sra_epi64 (__mmask8 __U, __m256i __A, __m128i __B) (__v4di)_mm256_setzero_si256(), \ (__mmask8)(U)); }) -#define _mm_mask_shuffle_pd(W, U, A, B, imm) __extension__ ({ \ - (__m128d)__builtin_ia32_shufpd128_mask((__v2df)(__m128d)(A), \ - (__v2df)(__m128d)(B), (int)(imm), \ - (__v2df)(__m128d)(W), \ - (__mmask8)(U)); }) +#define _mm_mask_shuffle_pd(W, U, A, B, M) __extension__ ({ \ + (__m128d)__builtin_ia32_selectpd_128((__mmask8)(U), \ + (__v2df)_mm_shuffle_pd((A), (B), (M)), \ + (__v2df)(__m128d)(W)); }) -#define _mm_maskz_shuffle_pd(U, A, B, imm) __extension__ ({ \ - (__m128d)__builtin_ia32_shufpd128_mask((__v2df)(__m128d)(A), \ - (__v2df)(__m128d)(B), (int)(imm), \ - (__v2df)_mm_setzero_pd(), \ - (__mmask8)(U)); }) +#define _mm_maskz_shuffle_pd(U, A, B, M) __extension__ ({ \ + (__m128d)__builtin_ia32_selectpd_128((__mmask8)(U), \ + (__v2df)_mm_shuffle_pd((A), (B), (M)), \ + (__v2df)_mm_setzero_pd()); }) -#define _mm256_mask_shuffle_pd(W, U, A, B, imm) __extension__ ({ \ - (__m256d)__builtin_ia32_shufpd256_mask((__v4df)(__m256d)(A), \ - (__v4df)(__m256d)(B), (int)(imm), \ - (__v4df)(__m256d)(W), \ - (__mmask8)(U)); }) +#define _mm256_mask_shuffle_pd(W, U, A, B, M) __extension__ ({ \ + (__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \ + (__v4df)_mm256_shuffle_pd((A), (B), (M)), \ + (__v4df)(__m256d)(W)); }) -#define _mm256_maskz_shuffle_pd(U, A, B, imm) __extension__ ({ \ - (__m256d)__builtin_ia32_shufpd256_mask((__v4df)(__m256d)(A), \ - (__v4df)(__m256d)(B), (int)(imm), \ - (__v4df)_mm256_setzero_pd(), \ - (__mmask8)(U)); }) +#define _mm256_maskz_shuffle_pd(U, A, B, M) __extension__ ({ \ + (__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \ + (__v4df)_mm256_shuffle_pd((A), (B), (M)), \ + (__v4df)_mm256_setzero_pd()); }) -#define _mm_mask_shuffle_ps(W, U, A, B, imm) __extension__ ({ \ - (__m128)__builtin_ia32_shufps128_mask((__v4sf)(__m128)(A), \ - (__v4sf)(__m128)(B), (int)(imm), \ - (__v4sf)(__m128)(W), (__mmask8)(U)); }) +#define _mm_mask_shuffle_ps(W, U, A, B, M) __extension__ ({ \ + (__m128)__builtin_ia32_selectps_128((__mmask8)(U), \ + (__v4sf)_mm_shuffle_ps((A), (B), (M)), \ + (__v4sf)(__m128)(W)); }) -#define _mm_maskz_shuffle_ps(U, A, B, imm) __extension__ ({ \ - (__m128)__builtin_ia32_shufps128_mask((__v4sf)(__m128)(A), \ - (__v4sf)(__m128)(B), (int)(imm), \ - (__v4sf)_mm_setzero_ps(), \ - (__mmask8)(U)); }) +#define _mm_maskz_shuffle_ps(U, A, B, M) __extension__ ({ \ + (__m128)__builtin_ia32_selectps_128((__mmask8)(U), \ + (__v4sf)_mm_shuffle_ps((A), (B), (M)), \ + (__v4sf)_mm_setzero_ps()); }) -#define _mm256_mask_shuffle_ps(W, U, A, B, imm) __extension__ ({ \ - (__m256)__builtin_ia32_shufps256_mask((__v8sf)(__m256)(A), \ - (__v8sf)(__m256)(B), (int)(imm), \ - (__v8sf)(__m256)(W), (__mmask8)(U)); }) +#define _mm256_mask_shuffle_ps(W, U, A, B, M) __extension__ ({ \ + (__m256)__builtin_ia32_selectps_256((__mmask8)(U), \ + (__v8sf)_mm256_shuffle_ps((A), (B), (M)), \ + (__v8sf)(__m256)(W)); }) -#define _mm256_maskz_shuffle_ps(U, A, B, imm) __extension__ ({ \ - (__m256)__builtin_ia32_shufps256_mask((__v8sf)(__m256)(A), \ - (__v8sf)(__m256)(B), (int)(imm), \ - (__v8sf)_mm256_setzero_ps(), \ - (__mmask8)(U)); }) +#define _mm256_maskz_shuffle_ps(U, A, B, M) __extension__ ({ \ + (__m256)__builtin_ia32_selectps_256((__mmask8)(U), \ + (__v8sf)_mm256_shuffle_ps((A), (B), (M)), \ + (__v8sf)_mm256_setzero_ps()); }) static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_rsqrt14_pd (__m128d __A) diff --git a/clang/test/CodeGen/avx512f-builtins.c b/clang/test/CodeGen/avx512f-builtins.c index 1a24cafa573..a475e0eaaad 100644 --- a/clang/test/CodeGen/avx512f-builtins.c +++ b/clang/test/CodeGen/avx512f-builtins.c @@ -4220,19 +4220,21 @@ __m512i test_mm512_maskz_shuffle_i64x2(__mmask8 __U, __m512i __A, __m512i __B) { __m512d test_mm512_shuffle_pd(__m512d __M, __m512d __V) { // CHECK-LABEL: @test_mm512_shuffle_pd - // CHECK: @llvm.x86.avx512.mask.shuf.pd.512 + // CHECK: shufflevector <8 x double> %{{.*}}, <8 x double> %{{.*}}, <8 x i32> <i32 0, i32 8, i32 3, i32 10, i32 4, i32 12, i32 6, i32 14> return _mm512_shuffle_pd(__M, __V, 4); } __m512d test_mm512_mask_shuffle_pd(__m512d __W, __mmask8 __U, __m512d __M, __m512d __V) { // CHECK-LABEL: @test_mm512_mask_shuffle_pd - // CHECK: @llvm.x86.avx512.mask.shuf.pd.512 + // CHECK: shufflevector <8 x double> %{{.*}}, <8 x double> %{{.*}}, <8 x i32> <i32 0, i32 8, i32 3, i32 10, i32 4, i32 12, i32 6, i32 14> + // CHECK: select <8 x i1> %{{.*}}, <8 x double> %{{.*}}, <8 x double> %{{.*}} return _mm512_mask_shuffle_pd(__W, __U, __M, __V, 4); } __m512d test_mm512_maskz_shuffle_pd(__mmask8 __U, __m512d __M, __m512d __V) { // CHECK-LABEL: @test_mm512_maskz_shuffle_pd - // CHECK: @llvm.x86.avx512.mask.shuf.pd.512 + // CHECK: shufflevector <8 x double> %{{.*}}, <8 x double> %{{.*}}, <8 x i32> <i32 0, i32 8, i32 3, i32 10, i32 4, i32 12, i32 6, i32 14> + // CHECK: select <8 x i1> %{{.*}}, <8 x double> %{{.*}}, <8 x double> %{{.*}} return _mm512_maskz_shuffle_pd(__U, __M, __V, 4); } diff --git a/clang/test/CodeGen/avx512vl-builtins.c b/clang/test/CodeGen/avx512vl-builtins.c index 7e1c9908743..b4024ba8c64 100644 --- a/clang/test/CodeGen/avx512vl-builtins.c +++ b/clang/test/CodeGen/avx512vl-builtins.c @@ -5189,49 +5189,57 @@ __m256i test_mm256_maskz_shuffle_i64x2(__mmask8 __U, __m256i __A, __m256i __B) { __m128d test_mm_mask_shuffle_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) { // CHECK-LABEL: @test_mm_mask_shuffle_pd - // CHECK: @llvm.x86.avx512.mask.shuf.pd.128 + // CHECK: shufflevector <2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x i32> <i32 1, i32 3> + // CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}} return _mm_mask_shuffle_pd(__W, __U, __A, __B, 3); } __m128d test_mm_maskz_shuffle_pd(__mmask8 __U, __m128d __A, __m128d __B) { // CHECK-LABEL: @test_mm_maskz_shuffle_pd - // CHECK: @llvm.x86.avx512.mask.shuf.pd.128 + // CHECK: shufflevector <2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x i32> <i32 1, i32 3> + // CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}} return _mm_maskz_shuffle_pd(__U, __A, __B, 3); } __m256d test_mm256_mask_shuffle_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) { // CHECK-LABEL: @test_mm256_mask_shuffle_pd - // CHECK: @llvm.x86.avx512.mask.shuf.pd.256 + // CHECK: shufflevector <4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x i32> <i32 1, i32 5, i32 2, i32 6> + // CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}} return _mm256_mask_shuffle_pd(__W, __U, __A, __B, 3); } __m256d test_mm256_maskz_shuffle_pd(__mmask8 __U, __m256d __A, __m256d __B) { // CHECK-LABEL: @test_mm256_maskz_shuffle_pd - // CHECK: @llvm.x86.avx512.mask.shuf.pd.256 + // CHECK: shufflevector <4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x i32> <i32 1, i32 5, i32 2, i32 6> + // CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}} return _mm256_maskz_shuffle_pd(__U, __A, __B, 3); } __m128 test_mm_mask_shuffle_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) { // CHECK-LABEL: @test_mm_mask_shuffle_ps - // CHECK: @llvm.x86.avx512.mask.shuf.ps.128 + // CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 4, i32 4> + // CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}} return _mm_mask_shuffle_ps(__W, __U, __A, __B, 4); } __m128 test_mm_maskz_shuffle_ps(__mmask8 __U, __m128 __A, __m128 __B) { // CHECK-LABEL: @test_mm_maskz_shuffle_ps - // CHECK: @llvm.x86.avx512.mask.shuf.ps.128 + // CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 4, i32 4> + // CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}} return _mm_maskz_shuffle_ps(__U, __A, __B, 4); } __m256 test_mm256_mask_shuffle_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) { // CHECK-LABEL: @test_mm256_mask_shuffle_ps - // CHECK: @llvm.x86.avx512.mask.shuf.ps.256 + // CHECK: shufflevector <8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 8, i32 8, i32 4, i32 5, i32 12, i32 12> + // CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}} return _mm256_mask_shuffle_ps(__W, __U, __A, __B, 4); } __m256 test_mm256_maskz_shuffle_ps(__mmask8 __U, __m256 __A, __m256 __B) { // CHECK-LABEL: @test_mm256_maskz_shuffle_ps - // CHECK: @llvm.x86.avx512.mask.shuf.ps.256 + // CHECK: shufflevector <8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 8, i32 8, i32 4, i32 5, i32 12, i32 12> + // CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}} return _mm256_maskz_shuffle_ps(__U, __A, __B, 4); } |

