diff options
-rw-r--r-- | llvm/test/CodeGen/X86/avx-intrinsics-fast-isel.ll | 675 |
1 files changed, 675 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/X86/avx-intrinsics-fast-isel.ll b/llvm/test/CodeGen/X86/avx-intrinsics-fast-isel.ll new file mode 100644 index 00000000000..61e2e4c1b44 --- /dev/null +++ b/llvm/test/CodeGen/X86/avx-intrinsics-fast-isel.ll @@ -0,0 +1,675 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -fast-isel -mtriple=i386-unknown-unknown -mattr=avx,aes,pclmul | FileCheck %s --check-prefix=ALL --check-prefix=X32 +; RUN: llc < %s -fast-isel -mtriple=x86_64-unknown-unknown -mattr=avx,aes,pclmul | FileCheck %s --check-prefix=ALL --check-prefix=X64 + +; NOTE: This should use IR equivalent to what is generated by clang/test/CodeGen/avx-builtins.c + +define <4 x double> @test_mm256_add_pd(<4 x double> %a0, <4 x double> %a1) { +; X32-LABEL: test_mm256_add_pd: +; X32: # BB#0: +; X32-NEXT: vaddpd %ymm1, %ymm0, %ymm0 +; X32-NEXT: retl +; +; X64-LABEL: test_mm256_add_pd: +; X64: # BB#0: +; X64-NEXT: vaddpd %ymm1, %ymm0, %ymm0 +; X64-NEXT: retq + %res = fadd <4 x double> %a0, %a1 + ret <4 x double> %res +} + +define <8 x float> @test_mm256_add_ps(<8 x float> %a0, <8 x float> %a1) { +; X32-LABEL: test_mm256_add_ps: +; X32: # BB#0: +; X32-NEXT: vaddps %ymm1, %ymm0, %ymm0 +; X32-NEXT: retl +; +; X64-LABEL: test_mm256_add_ps: +; X64: # BB#0: +; X64-NEXT: vaddps %ymm1, %ymm0, %ymm0 +; X64-NEXT: retq + %res = fadd <8 x float> %a0, %a1 + ret <8 x float> %res +} + +define <4 x double> @test_mm256_addsub_pd(<4 x double> %a0, <4 x double> %a1) { +; X32-LABEL: test_mm256_addsub_pd: +; X32: # BB#0: +; X32-NEXT: vaddsubpd %ymm1, %ymm0, %ymm0 +; X32-NEXT: retl +; +; X64-LABEL: test_mm256_addsub_pd: +; X64: # BB#0: +; X64-NEXT: vaddsubpd %ymm1, %ymm0, %ymm0 +; X64-NEXT: retq + %res = call <4 x double> @llvm.x86.avx.addsub.pd.256(<4 x double> %a0, <4 x double> %a1) + ret <4 x double> %res +} +declare <4 x double> @llvm.x86.avx.addsub.pd.256(<4 x double>, <4 x double>) nounwind readnone + +define <8 x float> @test_mm256_addsub_ps(<8 x float> %a0, <8 x float> %a1) { +; X32-LABEL: test_mm256_addsub_ps: +; X32: # BB#0: +; X32-NEXT: vaddsubps %ymm1, %ymm0, %ymm0 +; X32-NEXT: retl +; +; X64-LABEL: test_mm256_addsub_ps: +; X64: # BB#0: +; X64-NEXT: vaddsubps %ymm1, %ymm0, %ymm0 +; X64-NEXT: retq + %res = call <8 x float> @llvm.x86.avx.addsub.ps.256(<8 x float> %a0, <8 x float> %a1) + ret <8 x float> %res +} +declare <8 x float> @llvm.x86.avx.addsub.ps.256(<8 x float>, <8 x float>) nounwind readnone + +define <4 x double> @test_mm256_and_pd(<4 x double> %a0, <4 x double> %a1) { +; X32-LABEL: test_mm256_and_pd: +; X32: # BB#0: +; X32-NEXT: vandps %ymm1, %ymm0, %ymm0 +; X32-NEXT: retl +; +; X64-LABEL: test_mm256_and_pd: +; X64: # BB#0: +; X64-NEXT: vandps %ymm1, %ymm0, %ymm0 +; X64-NEXT: retq + %1 = bitcast <4 x double> %a0 to <4 x i64> + %2 = bitcast <4 x double> %a1 to <4 x i64> + %res = and <4 x i64> %1, %2 + %bc = bitcast <4 x i64> %res to <4 x double> + ret <4 x double> %bc +} + +define <8 x float> @test_mm256_and_ps(<8 x float> %a0, <8 x float> %a1) { +; X32-LABEL: test_mm256_and_ps: +; X32: # BB#0: +; X32-NEXT: vandps %ymm1, %ymm0, %ymm0 +; X32-NEXT: retl +; +; X64-LABEL: test_mm256_and_ps: +; X64: # BB#0: +; X64-NEXT: vandps %ymm1, %ymm0, %ymm0 +; X64-NEXT: retq + %1 = bitcast <8 x float> %a0 to <8 x i32> + %2 = bitcast <8 x float> %a1 to <8 x i32> + %res = and <8 x i32> %1, %2 + %bc = bitcast <8 x i32> %res to <8 x float> + ret <8 x float> %bc +} + +define <4 x double> @test_mm256_andnot_pd(<4 x double> %a0, <4 x double> %a1) { +; X32-LABEL: test_mm256_andnot_pd: +; X32: # BB#0: +; X32-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 +; X32-NEXT: vinsertf128 $1, %xmm2, %ymm2, %ymm2 +; X32-NEXT: vxorps %ymm2, %ymm0, %ymm0 +; X32-NEXT: vandps %ymm1, %ymm0, %ymm0 +; X32-NEXT: retl +; +; X64-LABEL: test_mm256_andnot_pd: +; X64: # BB#0: +; X64-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 +; X64-NEXT: vinsertf128 $1, %xmm2, %ymm2, %ymm2 +; X64-NEXT: vxorps %ymm2, %ymm0, %ymm0 +; X64-NEXT: vandps %ymm1, %ymm0, %ymm0 +; X64-NEXT: retq + %1 = bitcast <4 x double> %a0 to <4 x i64> + %2 = bitcast <4 x double> %a1 to <4 x i64> + %3 = xor <4 x i64> %1, <i64 -1, i64 -1, i64 -1, i64 -1> + %res = and <4 x i64> %3, %2 + %bc = bitcast <4 x i64> %res to <4 x double> + ret <4 x double> %bc +} + +define <8 x float> @test_mm256_andnot_ps(<8 x float> %a0, <8 x float> %a1) { +; X32-LABEL: test_mm256_andnot_ps: +; X32: # BB#0: +; X32-NEXT: vandnps %ymm1, %ymm0, %ymm0 +; X32-NEXT: retl +; +; X64-LABEL: test_mm256_andnot_ps: +; X64: # BB#0: +; X64-NEXT: vandnps %ymm1, %ymm0, %ymm0 +; X64-NEXT: retq + %1 = bitcast <8 x float> %a0 to <8 x i32> + %2 = bitcast <8 x float> %a1 to <8 x i32> + %3 = xor <8 x i32> %1, <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1> + %res = and <8 x i32> %3, %2 + %bc = bitcast <8 x i32> %res to <8 x float> + ret <8 x float> %bc +} + +define <4 x double> @test_mm256_blend_pd(<4 x double> %a0, <4 x double> %a1) { +; X32-LABEL: test_mm256_blend_pd: +; X32: # BB#0: +; X32-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2],ymm0[3] +; X32-NEXT: retl +; +; X64-LABEL: test_mm256_blend_pd: +; X64: # BB#0: +; X64-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2],ymm0[3] +; X64-NEXT: retq + %res = shufflevector <4 x double> %a0, <4 x double> %a1, <4 x i32> <i32 0, i32 5, i32 6, i32 3> + ret <4 x double> %res +} + +define <8 x float> @test_mm256_blend_ps(<8 x float> %a0, <8 x float> %a1) { +; X32-LABEL: test_mm256_blend_ps: +; X32: # BB#0: +; X32-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4,5,6],ymm1[7] +; X32-NEXT: retl +; +; X64-LABEL: test_mm256_blend_ps: +; X64: # BB#0: +; X64-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4,5,6],ymm1[7] +; X64-NEXT: retq + %res = shufflevector <8 x float> %a0, <8 x float> %a1, <8 x i32> <i32 0, i32 9, i32 10, i32 11, i32 4, i32 5, i32 6, i32 15> + ret <8 x float> %res +} + +define <4 x double> @test_mm256_blendv_pd(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2) { +; X32-LABEL: test_mm256_blendv_pd: +; X32: # BB#0: +; X32-NEXT: vblendvpd %ymm2, %ymm1, %ymm0, %ymm0 +; X32-NEXT: retl +; +; X64-LABEL: test_mm256_blendv_pd: +; X64: # BB#0: +; X64-NEXT: vblendvpd %ymm2, %ymm1, %ymm0, %ymm0 +; X64-NEXT: retq + %res = call <4 x double> @llvm.x86.avx.blendv.pd.256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2) ; <<4 x double>> [#uses=1] + ret <4 x double> %res +} +declare <4 x double> @llvm.x86.avx.blendv.pd.256(<4 x double>, <4 x double>, <4 x double>) nounwind readnone + +define <8 x float> @test_mm256_blendv_ps(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2) { +; X32-LABEL: test_mm256_blendv_ps: +; X32: # BB#0: +; X32-NEXT: vblendvps %ymm2, %ymm1, %ymm0, %ymm0 +; X32-NEXT: retl +; +; X64-LABEL: test_mm256_blendv_ps: +; X64: # BB#0: +; X64-NEXT: vblendvps %ymm2, %ymm1, %ymm0, %ymm0 +; X64-NEXT: retq + %res = call <8 x float> @llvm.x86.avx.blendv.ps.256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2) ; <<8 x float>> [#uses=1] + ret <8 x float> %res +} +declare <8 x float> @llvm.x86.avx.blendv.ps.256(<8 x float>, <8 x float>, <8 x float>) nounwind readnone + +define <4 x double> @test_mm256_broadcast_pd(i8* %a0) { +; X32-LABEL: test_mm256_broadcast_pd: +; X32: # BB#0: +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: vbroadcastf128 (%eax), %ymm0 +; X32-NEXT: retl +; +; X64-LABEL: test_mm256_broadcast_pd: +; X64: # BB#0: +; X64-NEXT: vbroadcastf128 (%rdi), %ymm0 +; X64-NEXT: retq + %res = call <4 x double> @llvm.x86.avx.vbroadcastf128.pd.256(i8* %a0) ; <<4 x double>> [#uses=1] + ret <4 x double> %res +} +declare <4 x double> @llvm.x86.avx.vbroadcastf128.pd.256(i8*) nounwind readonly + +define <8 x float> @test_mm256_broadcast_ps(i8* %a0) { +; X32-LABEL: test_mm256_broadcast_ps: +; X32: # BB#0: +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: vbroadcastf128 (%eax), %ymm0 +; X32-NEXT: retl +; +; X64-LABEL: test_mm256_broadcast_ps: +; X64: # BB#0: +; X64-NEXT: vbroadcastf128 (%rdi), %ymm0 +; X64-NEXT: retq + %res = call <8 x float> @llvm.x86.avx.vbroadcastf128.ps.256(i8* %a0) ; <<8 x float>> [#uses=1] + ret <8 x float> %res +} +declare <8 x float> @llvm.x86.avx.vbroadcastf128.ps.256(i8*) nounwind readonly + +; TODO test_mm256_broadcast_sd +; TODO test_mm_broadcast_ss +; TODO test_mm256_broadcast_sd +; TODO test_mm256_castpd_ps +; TODO test_mm256_castpd_si256 +; TODO test_mm256_castpd128_pd256 +; TODO test_mm256_castpd256_pd128 +; TODO test_mm256_castps_pd +; TODO test_mm256_castps_si256 +; TODO test_mm256_castps128_ps256 +; TODO test_mm256_castps256_ps128 +; TODO test_mm256_castsi128_si256 +; TODO test_mm256_castsi256_pd +; TODO test_mm256_castsi256_ps +; TODO test_mm256_castsi256_si128 +; TODO test_mm256_ceil_pd +; TODO test_mm256_ceil_ps +; TODO test_mm_cmp_pd +; TODO test_mm256_cmp_pd +; TODO test _mm_cmp_ps +; TODO test _mm256_cmp_ps +; TODO test_mm_cmp_sd +; TODO test_mm_cmp_ss +; TODO test_mm256_cvtepi32_pd +; TODO test_mm256_cvtepi32_ps +; TODO test_mm256_cvtpd_epi32 +; TODO test_mm256_cvtpd_ps +; TODO test_mm256_cvtps_epi32 +; TODO test_mm256_cvtps_pd +; TODO test_mm256_cvttpd_epi32 +; TODO test_mm256_cvttps_epi32 + +define <4 x double> @test_mm256_div_pd(<4 x double> %a0, <4 x double> %a1) { +; X32-LABEL: test_mm256_div_pd: +; X32: # BB#0: +; X32-NEXT: vdivpd %ymm1, %ymm0, %ymm0 +; X32-NEXT: retl +; +; X64-LABEL: test_mm256_div_pd: +; X64: # BB#0: +; X64-NEXT: vdivpd %ymm1, %ymm0, %ymm0 +; X64-NEXT: retq + %res = fdiv <4 x double> %a0, %a1 + ret <4 x double> %res +} + +define <8 x float> @test_mm256_div_ps(<8 x float> %a0, <8 x float> %a1) { +; X32-LABEL: test_mm256_div_ps: +; X32: # BB#0: +; X32-NEXT: vdivps %ymm1, %ymm0, %ymm0 +; X32-NEXT: retl +; +; X64-LABEL: test_mm256_div_ps: +; X64: # BB#0: +; X64-NEXT: vdivps %ymm1, %ymm0, %ymm0 +; X64-NEXT: retq + %res = fdiv <8 x float> %a0, %a1 + ret <8 x float> %res +} + +; TODO test_mm256_dp_ps +; TODO test_mm256_extract_epi16 +; TODO test_mm256_extract_epi32 +; TODO test_mm256_extract_epi64 +; TODO test_mm256_extract_epi8 +; TODO test_mm256_extractf128_pd +; TODO test_mm256_extractf128_ps +; TODO test_mm256_extractf128_si256 +; TODO test_mm256_floor_pd +; TODO test_mm256_floor_ps +; TODO test_mm256_hadd_pd +; TODO test_mm256_hadd_ps +; TODO test_mm256_hsub_pd +; TODO test_mm256_hsub_ps +; TODO test_mm256_insert_epi16 +; TODO test_mm256_insert_epi32 +; TODO test_mm256_insert_epi64 +; TODO test_mm256_insert_epi8 +; TODO test_mm256_insertf128_pd +; TODO test_mm256_insertf128_ps +; TODO test_mm256_insertf128_si256 +; TODO test_mm256_lddqu_si256 +; TODO test_mm256_load_pd +; TODO test_mm256_load_ps +; TODO test_mm256_load_si256 +; TODO test_mm256_loadu_pd +; TODO test_mm256_loadu_ps +; TODO test_mm256_loadu_si256 +; TODO test_mm256_loadu2_m128 +; TODO test_mm256_loadu2_m128d +; TODO test_mm256_loadu2_m128i +; TODO test_mm_maskload_pd +; TODO test_mm256_maskload_pd +; TODO test_mm_maskload_ps +; TODO test_mm256_maskload_ps +; TODO test_mm_maskstore_pd +; TODO test_mm256_maskstore_pd +; TODO test_mm_maskstore_ps +; TODO test_mm256_maskstore_ps +; TODO test_mm256_max_pd +; TODO test_mm256_max_ps +; TODO test_mm256_min_pd +; TODO test_mm256_min_ps +; TODO test_mm256_movedup_pd +; TODO test_mm256_movehdup_ps +; TODO test_mm256_moveldup_ps +; TODO test_mm256_movemask_pd +; TODO test_mm256_movemask_ps + +define <4 x double> @test_mm256_mul_pd(<4 x double> %a0, <4 x double> %a1) { +; X32-LABEL: test_mm256_mul_pd: +; X32: # BB#0: +; X32-NEXT: vmulpd %ymm1, %ymm0, %ymm0 +; X32-NEXT: retl +; +; X64-LABEL: test_mm256_mul_pd: +; X64: # BB#0: +; X64-NEXT: vmulpd %ymm1, %ymm0, %ymm0 +; X64-NEXT: retq + %res = fmul <4 x double> %a0, %a1 + ret <4 x double> %res +} + +define <8 x float> @test_mm256_mul_ps(<8 x float> %a0, <8 x float> %a1) { +; X32-LABEL: test_mm256_mul_ps: +; X32: # BB#0: +; X32-NEXT: vmulps %ymm1, %ymm0, %ymm0 +; X32-NEXT: retl +; +; X64-LABEL: test_mm256_mul_ps: +; X64: # BB#0: +; X64-NEXT: vmulps %ymm1, %ymm0, %ymm0 +; X64-NEXT: retq + %res = fmul <8 x float> %a0, %a1 + ret <8 x float> %res +} + +; TODO test_mm256_or_pd +; TODO test_mm256_or_ps +; TODO test_mm_permute_pd +; TODO test_mm256_permute_pd +; TODO test_mm_permute_ps +; TODO test_mm256_permute_ps + +define <4 x double> @test_mm256_permute2f128_pd(<4 x double> %a0, <4 x double> %a1) { +; X32-LABEL: test_mm256_permute2f128_pd: +; X32: # BB#0: +; X32-NEXT: vperm2f128 {{.*#+}} ymm0 = zero,zero,ymm1[0,1] +; X32-NEXT: retl +; +; X64-LABEL: test_mm256_permute2f128_pd: +; X64: # BB#0: +; X64-NEXT: vperm2f128 {{.*#+}} ymm0 = zero,zero,ymm1[0,1] +; X64-NEXT: retq + %res = call <4 x double> @llvm.x86.avx.vperm2f128.pd.256(<4 x double> %a0, <4 x double> %a1, i8 44) + ret <4 x double> %res +} +declare <4 x double> @llvm.x86.avx.vperm2f128.pd.256(<4 x double>, <4 x double>, i8) nounwind readnone + +; PR26667 +define <8 x float> @test_mm256_permute2f128_ps(<8 x float> %a0, <8 x float> %a1) { +; X32-LABEL: test_mm256_permute2f128_ps: +; X32: # BB#0: +; X32-NEXT: retl +; +; X64-LABEL: test_mm256_permute2f128_ps: +; X64: # BB#0: +; X64-NEXT: retq + %res = call <8 x float> @llvm.x86.avx.vperm2f128.ps.256(<8 x float> %a0, <8 x float> %a1, i8 50) + ret <8 x float> %res +} +declare <8 x float> @llvm.x86.avx.vperm2f128.ps.256(<8 x float>, <8 x float>, i8) nounwind readnone + +define <4 x i64> @test_mm256_permute2f128_si256(<4 x i64> %a0, <4 x i64> %a1) { +; X32-LABEL: test_mm256_permute2f128_si256: +; X32: # BB#0: +; X32-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm1[2,3,0,1] +; X32-NEXT: retl +; +; X64-LABEL: test_mm256_permute2f128_si256: +; X64: # BB#0: +; X64-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm1[2,3,0,1] +; X64-NEXT: retq + %1 = bitcast <4 x i64> %a0 to <8 x i32> + %2 = bitcast <4 x i64> %a1 to <8 x i32> + %res = call <8 x i32> @llvm.x86.avx.vperm2f128.si.256(<8 x i32> %1, <8 x i32> %2, i8 35) + %bc = bitcast <8 x i32> %res to <4 x i64> + ret <4 x i64> %bc +} +declare <8 x i32> @llvm.x86.avx.vperm2f128.si.256(<8 x i32>, <8 x i32>, i8) nounwind readnone + +; TODO test_mm_permutevar_pd +; TODO test_mm256_permutevar_pd +; TODO test_mm_permutevar_ps +; TODO test_mm256_permutevar_ps +; TODO test_mm256_rcp_ps +; TODO test_mm256_round_pd +; TODO test_mm256_round_ps +; TODO test_mm256_rsqrt_ps +; TODO test_mm256_set_epi16 +; TODO test_mm256_set_epi32 +; TODO test_mm256_set_epi64x +; TODO test_mm256_set_epi8 +; TODO test_mm256_set_m128 +; TODO test_mm256_set_m128d +; TODO test_mm256_set_m128i +; TODO test_mm256_set_pd +; TODO test_mm256_set_ps +; TODO test_mm256_set1_epi16 +; TODO test_mm256_set1_epi32 +; TODO test_mm256_set1_epi64x +; TODO test_mm256_set1_epi8 +; TODO test_mm256_set1_pd +; TODO test_mm256_set1_ps +; TODO test_mm256_setr_epi16 +; TODO test_mm256_setr_epi32 +; TODO test_mm256_setr_epi64x +; TODO test_mm256_setr_epi8 +; TODO test_mm256_setr_m128 +; TODO test_mm256_setr_m128d +; TODO test_mm256_setr_m128i +; TODO test_mm256_setr_pd +; TODO test_mm256_setr_ps +; TODO test_mm256_setzero_pd +; TODO test_mm256_setzero_ps +; TODO test_mm256_setzero_si256 +; TODO test_mm256_shuffle_pd +; TODO test_mm256_shuffle_ps +; TODO test_mm256_sqrt_pd +; TODO test_mm256_sqrt_ps +; TODO test_mm256_store_pd +; TODO test_mm256_store_ps +; TODO test_mm256_store_si256 +; TODO test_mm256_storeu_pd +; TODO test_mm256_storeu_ps +; TODO test_mm256_storeu_si256 +; TODO test_mm256_storeu2_m128 +; TODO test_mm256_storeu2_m128d +; TODO test_mm256_storeu2_m128i +; TODO test_mm256_stream_pd +; TODO test_mm256_stream_ps +; TODO test_mm256_stream_si256 + +define <4 x double> @test_mm256_sub_pd(<4 x double> %a0, <4 x double> %a1) { +; X32-LABEL: test_mm256_sub_pd: +; X32: # BB#0: +; X32-NEXT: vsubpd %ymm1, %ymm0, %ymm0 +; X32-NEXT: retl +; +; X64-LABEL: test_mm256_sub_pd: +; X64: # BB#0: +; X64-NEXT: vsubpd %ymm1, %ymm0, %ymm0 +; X64-NEXT: retq + %res = fsub <4 x double> %a0, %a1 + ret <4 x double> %res +} + +define <8 x float> @test_mm256_sub_ps(<8 x float> %a0, <8 x float> %a1) { +; X32-LABEL: test_mm256_sub_ps: +; X32: # BB#0: +; X32-NEXT: vsubps %ymm1, %ymm0, %ymm0 +; X32-NEXT: retl +; +; X64-LABEL: test_mm256_sub_ps: +; X64: # BB#0: +; X64-NEXT: vsubps %ymm1, %ymm0, %ymm0 +; X64-NEXT: retq + %res = fsub <8 x float> %a0, %a1 + ret <8 x float> %res +} + +; TODO test_mm_testc_pd +; TODO test_mm256_testc_pd +; TODO test_mm_testc_ps +; TODO test_mm256_testc_ps +; TODO test_mm256_testc_si256 +; TODO test_mm_testnzc_pd +; TODO test_mm256_testnzc_pd +; TODO test_mm_testnzc_ps +; TODO test_mm256_testnzc_ps +; TODO test_mm256_testnzc_si256 +; TODO test_mm_testz_pd +; TODO test_mm256_testz_pd +; TODO test_mm_testz_ps +; TODO test_mm256_testz_ps +; TODO test_mm256_testz_si256 + +define <2 x double> @test_mm_undefined_pd() { +; X32-LABEL: test_mm_undefined_pd: +; X32: # BB#0: +; X32-NEXT: retl +; +; X64-LABEL: test_mm_undefined_pd: +; X64: # BB#0: +; X64-NEXT: retq + ret <2 x double> undef +} + +define <4 x double> @test_mm256_undefined_pd() { +; X32-LABEL: test_mm256_undefined_pd: +; X32: # BB#0: +; X32-NEXT: retl +; +; X64-LABEL: test_mm256_undefined_pd: +; X64: # BB#0: +; X64-NEXT: retq + ret <4 x double> undef +} + +define <4 x float> @test_mm_undefined_ps() { +; X32-LABEL: test_mm_undefined_ps: +; X32: # BB#0: +; X32-NEXT: retl +; +; X64-LABEL: test_mm_undefined_ps: +; X64: # BB#0: +; X64-NEXT: retq + ret <4 x float> undef +} + +define <8 x float> @test_mm256_undefined_ps() { +; X32-LABEL: test_mm256_undefined_ps: +; X32: # BB#0: +; X32-NEXT: retl +; +; X64-LABEL: test_mm256_undefined_ps: +; X64: # BB#0: +; X64-NEXT: retq + ret <8 x float> undef +} + +define <2 x i64> @test_mm_undefined_si256() { +; X32-LABEL: test_mm_undefined_si256: +; X32: # BB#0: +; X32-NEXT: retl +; +; X64-LABEL: test_mm_undefined_si256: +; X64: # BB#0: +; X64-NEXT: retq + ret <2 x i64> undef +} + +define <4 x i64> @test_mm256_undefined_si256() { +; X32-LABEL: test_mm256_undefined_si256: +; X32: # BB#0: +; X32-NEXT: retl +; +; X64-LABEL: test_mm256_undefined_si256: +; X64: # BB#0: +; X64-NEXT: retq + ret <4 x i64> undef +} + +define <4 x double> @test_mm256_unpackhi_pd(<4 x double> %a0, <4 x double> %a1) { +; X32-LABEL: test_mm256_unpackhi_pd: +; X32: # BB#0: +; X32-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3] +; X32-NEXT: retl +; +; X64-LABEL: test_mm256_unpackhi_pd: +; X64: # BB#0: +; X64-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3] +; X64-NEXT: retq + %res = shufflevector <4 x double> %a0, <4 x double> %a1, <4 x i32> <i32 1, i32 5, i32 3, i32 7> + ret <4 x double> %res +} + +define <8 x float> @test_mm256_unpackhi_ps(<8 x float> %a0, <8 x float> %a1) { +; X32-LABEL: test_mm256_unpackhi_ps: +; X32: # BB#0: +; X32-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7] +; X32-NEXT: retl +; +; X64-LABEL: test_mm256_unpackhi_ps: +; X64: # BB#0: +; X64-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7] +; X64-NEXT: retq + %res = shufflevector <8 x float> %a0, <8 x float> %a1, <8 x i32> <i32 2, i32 10, i32 3, i32 11, i32 6, i32 14, i32 7, i32 15> + ret <8 x float> %res +} + +define <4 x double> @test_mm256_unpacklo_pd(<4 x double> %a0, <4 x double> %a1) { +; X32-LABEL: test_mm256_unpacklo_pd: +; X32: # BB#0: +; X32-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] +; X32-NEXT: retl +; +; X64-LABEL: test_mm256_unpacklo_pd: +; X64: # BB#0: +; X64-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] +; X64-NEXT: retq + %res = shufflevector <4 x double> %a0, <4 x double> %a1, <4 x i32> <i32 0, i32 4, i32 2, i32 6> + ret <4 x double> %res +} + +define <8 x float> @test_mm256_unpacklo_ps(<8 x float> %a0, <8 x float> %a1) { +; X32-LABEL: test_mm256_unpacklo_ps: +; X32: # BB#0: +; X32-NEXT: vunpcklps {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5] +; X32-NEXT: retl +; +; X64-LABEL: test_mm256_unpacklo_ps: +; X64: # BB#0: +; X64-NEXT: vunpcklps {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5] +; X64-NEXT: retq + %res = shufflevector <8 x float> %a0, <8 x float> %a1, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 4, i32 12, i32 5, i32 13> + ret <8 x float> %res +} + +define <4 x double> @test_mm256_xor_pd(<4 x double> %a0, <4 x double> %a1) { +; X32-LABEL: test_mm256_xor_pd: +; X32: # BB#0: +; X32-NEXT: vxorps %ymm1, %ymm0, %ymm0 +; X32-NEXT: retl +; +; X64-LABEL: test_mm256_xor_pd: +; X64: # BB#0: +; X64-NEXT: vxorps %ymm1, %ymm0, %ymm0 +; X64-NEXT: retq + %1 = bitcast <4 x double> %a0 to <4 x i64> + %2 = bitcast <4 x double> %a1 to <4 x i64> + %res = xor <4 x i64> %1, %2 + %bc = bitcast <4 x i64> %res to <4 x double> + ret <4 x double> %bc +} + +define <8 x float> @test_mm256_xor_ps(<8 x float> %a0, <8 x float> %a1) { +; X32-LABEL: test_mm256_xor_ps: +; X32: # BB#0: +; X32-NEXT: vxorps %ymm1, %ymm0, %ymm0 +; X32-NEXT: retl +; +; X64-LABEL: test_mm256_xor_ps: +; X64: # BB#0: +; X64-NEXT: vxorps %ymm1, %ymm0, %ymm0 +; X64-NEXT: retq + %1 = bitcast <8 x float> %a0 to <8 x i32> + %2 = bitcast <8 x float> %a1 to <8 x i32> + %res = xor <8 x i32> %1, %2 + %bc = bitcast <8 x i32> %res to <8 x float> + ret <8 x float> %bc +} + +; TODO test_mm256_zeroall +; TODO test_mm256_zeroupper |