diff options
| author | Simon Pilgrim <llvm-dev@redking.me.uk> | 2018-01-18 18:38:32 +0000 |
|---|---|---|
| committer | Simon Pilgrim <llvm-dev@redking.me.uk> | 2018-01-18 18:38:32 +0000 |
| commit | 3c8e2bf830c567c59b9d16bf52774e0c0a756f2e (patch) | |
| tree | 93c2e16f4ede7ccea484ea41733e3e2bb14d21c8 /llvm/test | |
| parent | f84f118eb855f7a879fffbd40616c6d542480637 (diff) | |
| download | bcm5719-llvm-3c8e2bf830c567c59b9d16bf52774e0c0a756f2e.tar.gz bcm5719-llvm-3c8e2bf830c567c59b9d16bf52774e0c0a756f2e.zip | |
[X86][AVX] Add 256/512-bit slow PMULLD tests
llvm-svn: 322874
Diffstat (limited to 'llvm/test')
| -rw-r--r-- | llvm/test/CodeGen/X86/slow-pmulld.ll | 792 |
1 files changed, 768 insertions, 24 deletions
diff --git a/llvm/test/CodeGen/X86/slow-pmulld.ll b/llvm/test/CodeGen/X86/slow-pmulld.ll index bf7829d826f..b12b7432991 100644 --- a/llvm/test/CodeGen/X86/slow-pmulld.ll +++ b/llvm/test/CodeGen/X86/slow-pmulld.ll @@ -1,43 +1,241 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=i386-unknown-unknown -mcpu=silvermont | FileCheck %s --check-prefix=CHECK32 -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=silvermont | FileCheck %s --check-prefix=CHECK64 -; RUN: llc < %s -mtriple=i386-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE4-32 -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE4-64 +; RUN: llc < %s -mtriple=i386-unknown-unknown -mcpu=silvermont | FileCheck %s --check-prefixes=CHECK32 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=silvermont | FileCheck %s --check-prefixes=CHECK64 +; RUN: llc < %s -mtriple=i386-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=SSE4-32 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=SSE4-64 +; RUN: llc < %s -mtriple=i386-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX-32,AVX2-32 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX-64,AVX2-64 +; RUN: llc < %s -mtriple=i386-unknown-unknown -mattr=+avx512dq | FileCheck %s --check-prefixes=AVX-32,AVX512-32,AVX512DQ-32 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512dq | FileCheck %s --check-prefixes=AVX-64,AVX512-64,AVX512DQ-64 +; RUN: llc < %s -mtriple=i386-unknown-unknown -mattr=+avx512bw | FileCheck %s --check-prefixes=AVX-32,AVX512-32,AVX512BW-32 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw | FileCheck %s --check-prefixes=AVX-64,AVX512-64,AVX512BW-64 ; Make sure that the slow-pmulld feature can be used without SSE4.1. ; RUN: llc < %s -mtriple=i386-unknown-unknown -mcpu=silvermont -mattr=-sse4.1 -define <4 x i32> @foo(<4 x i8> %A) { -; CHECK32-LABEL: foo: +define <4 x i32> @test_mul_v4i32_v4i8(<4 x i8> %A) { +; CHECK32-LABEL: test_mul_v4i32_v4i8: ; CHECK32: # %bb.0: ; CHECK32-NEXT: pand {{\.LCPI.*}}, %xmm0 ; CHECK32-NEXT: pmaddwd {{\.LCPI.*}}, %xmm0 ; CHECK32-NEXT: retl ; -; CHECK64-LABEL: foo: +; CHECK64-LABEL: test_mul_v4i32_v4i8: ; CHECK64: # %bb.0: ; CHECK64-NEXT: pand {{.*}}(%rip), %xmm0 ; CHECK64-NEXT: pmaddwd {{.*}}(%rip), %xmm0 ; CHECK64-NEXT: retq ; -; SSE4-32-LABEL: foo: +; SSE4-32-LABEL: test_mul_v4i32_v4i8: ; SSE4-32: # %bb.0: ; SSE4-32-NEXT: pand {{\.LCPI.*}}, %xmm0 ; SSE4-32-NEXT: pmulld {{\.LCPI.*}}, %xmm0 ; SSE4-32-NEXT: retl ; -; SSE4-64-LABEL: foo: +; SSE4-64-LABEL: test_mul_v4i32_v4i8: ; SSE4-64: # %bb.0: ; SSE4-64-NEXT: pand {{.*}}(%rip), %xmm0 ; SSE4-64-NEXT: pmulld {{.*}}(%rip), %xmm0 ; SSE4-64-NEXT: retq +; +; AVX-32-LABEL: test_mul_v4i32_v4i8: +; AVX-32: # %bb.0: +; AVX-32-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; AVX-32-NEXT: vpbroadcastd {{.*#+}} xmm1 = [18778,18778,18778,18778] +; AVX-32-NEXT: vpmulld %xmm1, %xmm0, %xmm0 +; AVX-32-NEXT: retl +; +; AVX-64-LABEL: test_mul_v4i32_v4i8: +; AVX-64: # %bb.0: +; AVX-64-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0 +; AVX-64-NEXT: vpbroadcastd {{.*#+}} xmm1 = [18778,18778,18778,18778] +; AVX-64-NEXT: vpmulld %xmm1, %xmm0, %xmm0 +; AVX-64-NEXT: retq %z = zext <4 x i8> %A to <4 x i32> %m = mul nuw nsw <4 x i32> %z, <i32 18778, i32 18778, i32 18778, i32 18778> ret <4 x i32> %m } -define <4 x i32> @foo16(<4 x i16> %A) { -; CHECK32-LABEL: foo16: +define <8 x i32> @test_mul_v8i32_v8i8(<8 x i8> %A) { +; CHECK32-LABEL: test_mul_v8i32_v8i8: +; CHECK32: # %bb.0: +; CHECK32-NEXT: movdqa %xmm0, %xmm1 +; CHECK32-NEXT: pand {{\.LCPI.*}}, %xmm1 +; CHECK32-NEXT: movdqa {{.*#+}} xmm0 = [18778,18778,18778,18778,18778,18778,18778,18778] +; CHECK32-NEXT: movdqa %xmm1, %xmm2 +; CHECK32-NEXT: pmullw %xmm0, %xmm1 +; CHECK32-NEXT: pmulhw %xmm0, %xmm2 +; CHECK32-NEXT: movdqa %xmm1, %xmm0 +; CHECK32-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3] +; CHECK32-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7] +; CHECK32-NEXT: retl +; +; CHECK64-LABEL: test_mul_v8i32_v8i8: +; CHECK64: # %bb.0: +; CHECK64-NEXT: movdqa %xmm0, %xmm1 +; CHECK64-NEXT: pand {{.*}}(%rip), %xmm1 +; CHECK64-NEXT: movdqa {{.*#+}} xmm0 = [18778,18778,18778,18778,18778,18778,18778,18778] +; CHECK64-NEXT: movdqa %xmm1, %xmm2 +; CHECK64-NEXT: pmullw %xmm0, %xmm1 +; CHECK64-NEXT: pmulhw %xmm0, %xmm2 +; CHECK64-NEXT: movdqa %xmm1, %xmm0 +; CHECK64-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3] +; CHECK64-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7] +; CHECK64-NEXT: retq +; +; SSE4-32-LABEL: test_mul_v8i32_v8i8: +; SSE4-32: # %bb.0: +; SSE4-32-NEXT: pand {{\.LCPI.*}}, %xmm0 +; SSE4-32-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] +; SSE4-32-NEXT: pmovzxwd {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero +; SSE4-32-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero +; SSE4-32-NEXT: movdqa {{.*#+}} xmm2 = [18778,18778,18778,18778] +; SSE4-32-NEXT: pmulld %xmm2, %xmm0 +; SSE4-32-NEXT: pmulld %xmm2, %xmm1 +; SSE4-32-NEXT: retl +; +; SSE4-64-LABEL: test_mul_v8i32_v8i8: +; SSE4-64: # %bb.0: +; SSE4-64-NEXT: pand {{.*}}(%rip), %xmm0 +; SSE4-64-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] +; SSE4-64-NEXT: pmovzxwd {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero +; SSE4-64-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero +; SSE4-64-NEXT: movdqa {{.*#+}} xmm2 = [18778,18778,18778,18778] +; SSE4-64-NEXT: pmulld %xmm2, %xmm0 +; SSE4-64-NEXT: pmulld %xmm2, %xmm1 +; SSE4-64-NEXT: retq +; +; AVX-32-LABEL: test_mul_v8i32_v8i8: +; AVX-32: # %bb.0: +; AVX-32-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; AVX-32-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero +; AVX-32-NEXT: vpbroadcastd {{.*#+}} ymm1 = [18778,18778,18778,18778,18778,18778,18778,18778] +; AVX-32-NEXT: vpmulld %ymm1, %ymm0, %ymm0 +; AVX-32-NEXT: retl +; +; AVX-64-LABEL: test_mul_v8i32_v8i8: +; AVX-64: # %bb.0: +; AVX-64-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0 +; AVX-64-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero +; AVX-64-NEXT: vpbroadcastd {{.*#+}} ymm1 = [18778,18778,18778,18778,18778,18778,18778,18778] +; AVX-64-NEXT: vpmulld %ymm1, %ymm0, %ymm0 +; AVX-64-NEXT: retq + %z = zext <8 x i8> %A to <8 x i32> + %m = mul nuw nsw <8 x i32> %z, <i32 18778, i32 18778, i32 18778, i32 18778, i32 18778, i32 18778, i32 18778, i32 18778> + ret <8 x i32> %m +} + +define <16 x i32> @test_mul_v16i32_v16i8(<16 x i8> %A) { +; CHECK32-LABEL: test_mul_v16i32_v16i8: +; CHECK32: # %bb.0: +; CHECK32-NEXT: movdqa {{.*#+}} xmm2 = [18778,18778,18778,18778,18778,18778,18778,18778] +; CHECK32-NEXT: pmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero +; CHECK32-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] +; CHECK32-NEXT: pmovzxbw {{.*#+}} xmm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero +; CHECK32-NEXT: movdqa %xmm1, %xmm4 +; CHECK32-NEXT: movdqa %xmm3, %xmm5 +; CHECK32-NEXT: pmullw %xmm2, %xmm1 +; CHECK32-NEXT: pmullw %xmm2, %xmm3 +; CHECK32-NEXT: pmulhw %xmm2, %xmm4 +; CHECK32-NEXT: pmulhw %xmm2, %xmm5 +; CHECK32-NEXT: movdqa %xmm1, %xmm0 +; CHECK32-NEXT: movdqa %xmm3, %xmm2 +; CHECK32-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7] +; CHECK32-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm5[4],xmm3[5],xmm5[5],xmm3[6],xmm5[6],xmm3[7],xmm5[7] +; CHECK32-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3] +; CHECK32-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1],xmm2[2],xmm5[2],xmm2[3],xmm5[3] +; CHECK32-NEXT: retl +; +; CHECK64-LABEL: test_mul_v16i32_v16i8: +; CHECK64: # %bb.0: +; CHECK64-NEXT: movdqa {{.*#+}} xmm2 = [18778,18778,18778,18778,18778,18778,18778,18778] +; CHECK64-NEXT: pmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero +; CHECK64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] +; CHECK64-NEXT: pmovzxbw {{.*#+}} xmm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero +; CHECK64-NEXT: movdqa %xmm1, %xmm4 +; CHECK64-NEXT: movdqa %xmm3, %xmm5 +; CHECK64-NEXT: pmullw %xmm2, %xmm1 +; CHECK64-NEXT: pmullw %xmm2, %xmm3 +; CHECK64-NEXT: pmulhw %xmm2, %xmm4 +; CHECK64-NEXT: pmulhw %xmm2, %xmm5 +; CHECK64-NEXT: movdqa %xmm1, %xmm0 +; CHECK64-NEXT: movdqa %xmm3, %xmm2 +; CHECK64-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7] +; CHECK64-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm5[4],xmm3[5],xmm5[5],xmm3[6],xmm5[6],xmm3[7],xmm5[7] +; CHECK64-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3] +; CHECK64-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1],xmm2[2],xmm5[2],xmm2[3],xmm5[3] +; CHECK64-NEXT: retq +; +; SSE4-32-LABEL: test_mul_v16i32_v16i8: +; SSE4-32: # %bb.0: +; SSE4-32-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3] +; SSE4-32-NEXT: pmovzxbd {{.*#+}} xmm3 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero +; SSE4-32-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] +; SSE4-32-NEXT: pmovzxbd {{.*#+}} xmm2 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero +; SSE4-32-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] +; SSE4-32-NEXT: pmovzxbd {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero +; SSE4-32-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero +; SSE4-32-NEXT: movdqa {{.*#+}} xmm4 = [18778,18778,18778,18778] +; SSE4-32-NEXT: pmulld %xmm4, %xmm0 +; SSE4-32-NEXT: pmulld %xmm4, %xmm1 +; SSE4-32-NEXT: pmulld %xmm4, %xmm2 +; SSE4-32-NEXT: pmulld %xmm4, %xmm3 +; SSE4-32-NEXT: retl +; +; SSE4-64-LABEL: test_mul_v16i32_v16i8: +; SSE4-64: # %bb.0: +; SSE4-64-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3] +; SSE4-64-NEXT: pmovzxbd {{.*#+}} xmm3 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero +; SSE4-64-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] +; SSE4-64-NEXT: pmovzxbd {{.*#+}} xmm2 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero +; SSE4-64-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] +; SSE4-64-NEXT: pmovzxbd {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero +; SSE4-64-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero +; SSE4-64-NEXT: movdqa {{.*#+}} xmm4 = [18778,18778,18778,18778] +; SSE4-64-NEXT: pmulld %xmm4, %xmm0 +; SSE4-64-NEXT: pmulld %xmm4, %xmm1 +; SSE4-64-NEXT: pmulld %xmm4, %xmm2 +; SSE4-64-NEXT: pmulld %xmm4, %xmm3 +; SSE4-64-NEXT: retq +; +; AVX2-32-LABEL: test_mul_v16i32_v16i8: +; AVX2-32: # %bb.0: +; AVX2-32-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] +; AVX2-32-NEXT: vpmovzxbd {{.*#+}} ymm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero +; AVX2-32-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero +; AVX2-32-NEXT: vpbroadcastd {{.*#+}} ymm2 = [18778,18778,18778,18778,18778,18778,18778,18778] +; AVX2-32-NEXT: vpmulld %ymm2, %ymm0, %ymm0 +; AVX2-32-NEXT: vpmulld %ymm2, %ymm1, %ymm1 +; AVX2-32-NEXT: retl +; +; AVX2-64-LABEL: test_mul_v16i32_v16i8: +; AVX2-64: # %bb.0: +; AVX2-64-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] +; AVX2-64-NEXT: vpmovzxbd {{.*#+}} ymm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero +; AVX2-64-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero +; AVX2-64-NEXT: vpbroadcastd {{.*#+}} ymm2 = [18778,18778,18778,18778,18778,18778,18778,18778] +; AVX2-64-NEXT: vpmulld %ymm2, %ymm0, %ymm0 +; AVX2-64-NEXT: vpmulld %ymm2, %ymm1, %ymm1 +; AVX2-64-NEXT: retq +; +; AVX512-32-LABEL: test_mul_v16i32_v16i8: +; AVX512-32: # %bb.0: +; AVX512-32-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero +; AVX512-32-NEXT: vpmulld {{\.LCPI.*}}{1to16}, %zmm0, %zmm0 +; AVX512-32-NEXT: retl +; +; AVX512-64-LABEL: test_mul_v16i32_v16i8: +; AVX512-64: # %bb.0: +; AVX512-64-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero +; AVX512-64-NEXT: vpmulld {{.*}}(%rip){1to16}, %zmm0, %zmm0 +; AVX512-64-NEXT: retq + %z = zext <16 x i8> %A to <16 x i32> + %m = mul nuw nsw <16 x i32> %z, <i32 18778, i32 18778, i32 18778, i32 18778, i32 18778, i32 18778, i32 18778, i32 18778, i32 18778, i32 18778, i32 18778, i32 18778, i32 18778, i32 18778, i32 18778, i32 18778> + ret <16 x i32> %m +} + +define <4 x i32> @test_mul_v4i32_v4i16(<4 x i16> %A) { +; CHECK32-LABEL: test_mul_v4i32_v4i16: ; CHECK32: # %bb.0: ; CHECK32-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15] ; CHECK32-NEXT: movdqa {{.*#+}} xmm1 = <18778,18778,18778,18778,u,u,u,u> @@ -47,7 +245,7 @@ define <4 x i32> @foo16(<4 x i16> %A) { ; CHECK32-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3] ; CHECK32-NEXT: retl ; -; CHECK64-LABEL: foo16: +; CHECK64-LABEL: test_mul_v4i32_v4i16: ; CHECK64: # %bb.0: ; CHECK64-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15] ; CHECK64-NEXT: movdqa {{.*#+}} xmm1 = <18778,18778,18778,18778,u,u,u,u> @@ -57,82 +255,628 @@ define <4 x i32> @foo16(<4 x i16> %A) { ; CHECK64-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3] ; CHECK64-NEXT: retq ; -; SSE4-32-LABEL: foo16: +; SSE4-32-LABEL: test_mul_v4i32_v4i16: ; SSE4-32: # %bb.0: ; SSE4-32-NEXT: pxor %xmm1, %xmm1 ; SSE4-32-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7] ; SSE4-32-NEXT: pmulld {{\.LCPI.*}}, %xmm0 ; SSE4-32-NEXT: retl ; -; SSE4-64-LABEL: foo16: +; SSE4-64-LABEL: test_mul_v4i32_v4i16: ; SSE4-64: # %bb.0: ; SSE4-64-NEXT: pxor %xmm1, %xmm1 ; SSE4-64-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7] ; SSE4-64-NEXT: pmulld {{.*}}(%rip), %xmm0 ; SSE4-64-NEXT: retq +; +; AVX-32-LABEL: test_mul_v4i32_v4i16: +; AVX-32: # %bb.0: +; AVX-32-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVX-32-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7] +; AVX-32-NEXT: vpbroadcastd {{.*#+}} xmm1 = [18778,18778,18778,18778] +; AVX-32-NEXT: vpmulld %xmm1, %xmm0, %xmm0 +; AVX-32-NEXT: retl +; +; AVX-64-LABEL: test_mul_v4i32_v4i16: +; AVX-64: # %bb.0: +; AVX-64-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVX-64-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7] +; AVX-64-NEXT: vpbroadcastd {{.*#+}} xmm1 = [18778,18778,18778,18778] +; AVX-64-NEXT: vpmulld %xmm1, %xmm0, %xmm0 +; AVX-64-NEXT: retq %z = zext <4 x i16> %A to <4 x i32> %m = mul nuw nsw <4 x i32> %z, <i32 18778, i32 18778, i32 18778, i32 18778> ret <4 x i32> %m } -define <4 x i32> @foo_os(<4 x i8> %A) minsize { -; CHECK32-LABEL: foo_os: +define <8 x i32> @test_mul_v8i32_v8i16(<8 x i16> %A) { +; CHECK32-LABEL: test_mul_v8i32_v8i16: +; CHECK32: # %bb.0: +; CHECK32-NEXT: movdqa %xmm0, %xmm1 +; CHECK32-NEXT: movdqa {{.*#+}} xmm0 = [18778,18778,18778,18778,18778,18778,18778,18778] +; CHECK32-NEXT: movdqa %xmm1, %xmm2 +; CHECK32-NEXT: pmullw %xmm0, %xmm1 +; CHECK32-NEXT: pmulhuw %xmm0, %xmm2 +; CHECK32-NEXT: movdqa %xmm1, %xmm0 +; CHECK32-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7] +; CHECK32-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3] +; CHECK32-NEXT: retl +; +; CHECK64-LABEL: test_mul_v8i32_v8i16: +; CHECK64: # %bb.0: +; CHECK64-NEXT: movdqa %xmm0, %xmm1 +; CHECK64-NEXT: movdqa {{.*#+}} xmm0 = [18778,18778,18778,18778,18778,18778,18778,18778] +; CHECK64-NEXT: movdqa %xmm1, %xmm2 +; CHECK64-NEXT: pmullw %xmm0, %xmm1 +; CHECK64-NEXT: pmulhuw %xmm0, %xmm2 +; CHECK64-NEXT: movdqa %xmm1, %xmm0 +; CHECK64-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7] +; CHECK64-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3] +; CHECK64-NEXT: retq +; +; SSE4-32-LABEL: test_mul_v8i32_v8i16: +; SSE4-32: # %bb.0: +; SSE4-32-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] +; SSE4-32-NEXT: pmovzxwd {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero +; SSE4-32-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero +; SSE4-32-NEXT: movdqa {{.*#+}} xmm2 = [18778,18778,18778,18778] +; SSE4-32-NEXT: pmulld %xmm2, %xmm0 +; SSE4-32-NEXT: pmulld %xmm2, %xmm1 +; SSE4-32-NEXT: retl +; +; SSE4-64-LABEL: test_mul_v8i32_v8i16: +; SSE4-64: # %bb.0: +; SSE4-64-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] +; SSE4-64-NEXT: pmovzxwd {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero +; SSE4-64-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero +; SSE4-64-NEXT: movdqa {{.*#+}} xmm2 = [18778,18778,18778,18778] +; SSE4-64-NEXT: pmulld %xmm2, %xmm0 +; SSE4-64-NEXT: pmulld %xmm2, %xmm1 +; SSE4-64-NEXT: retq +; +; AVX-32-LABEL: test_mul_v8i32_v8i16: +; AVX-32: # %bb.0: +; AVX-32-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero +; AVX-32-NEXT: vpbroadcastd {{.*#+}} ymm1 = [18778,18778,18778,18778,18778,18778,18778,18778] +; AVX-32-NEXT: vpmulld %ymm1, %ymm0, %ymm0 +; AVX-32-NEXT: retl +; +; AVX-64-LABEL: test_mul_v8i32_v8i16: +; AVX-64: # %bb.0: +; AVX-64-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero +; AVX-64-NEXT: vpbroadcastd {{.*#+}} ymm1 = [18778,18778,18778,18778,18778,18778,18778,18778] +; AVX-64-NEXT: vpmulld %ymm1, %ymm0, %ymm0 +; AVX-64-NEXT: retq + %z = zext <8 x i16> %A to <8 x i32> + %m = mul nuw nsw <8 x i32> %z, <i32 18778, i32 18778, i32 18778, i32 18778, i32 18778, i32 18778, i32 18778, i32 18778> + ret <8 x i32> %m +} + +define <16 x i32> @test_mul_v16i32_v16i16(<16 x i16> %A) { +; CHECK32-LABEL: test_mul_v16i32_v16i16: +; CHECK32: # %bb.0: +; CHECK32-NEXT: movdqa %xmm1, %xmm3 +; CHECK32-NEXT: movdqa %xmm0, %xmm1 +; CHECK32-NEXT: movdqa {{.*#+}} xmm0 = [18778,18778,18778,18778,18778,18778,18778,18778] +; CHECK32-NEXT: movdqa %xmm1, %xmm2 +; CHECK32-NEXT: movdqa %xmm3, %xmm4 +; CHECK32-NEXT: pmullw %xmm0, %xmm1 +; CHECK32-NEXT: pmulhuw %xmm0, %xmm2 +; CHECK32-NEXT: pmullw %xmm0, %xmm3 +; CHECK32-NEXT: pmulhuw %xmm0, %xmm4 +; CHECK32-NEXT: movdqa %xmm1, %xmm0 +; CHECK32-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7] +; CHECK32-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3] +; CHECK32-NEXT: movdqa %xmm3, %xmm2 +; CHECK32-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7] +; CHECK32-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3] +; CHECK32-NEXT: retl +; +; CHECK64-LABEL: test_mul_v16i32_v16i16: +; CHECK64: # %bb.0: +; CHECK64-NEXT: movdqa %xmm1, %xmm3 +; CHECK64-NEXT: movdqa %xmm0, %xmm1 +; CHECK64-NEXT: movdqa {{.*#+}} xmm0 = [18778,18778,18778,18778,18778,18778,18778,18778] +; CHECK64-NEXT: movdqa %xmm1, %xmm2 +; CHECK64-NEXT: movdqa %xmm3, %xmm4 +; CHECK64-NEXT: pmullw %xmm0, %xmm1 +; CHECK64-NEXT: pmulhuw %xmm0, %xmm2 +; CHECK64-NEXT: pmullw %xmm0, %xmm3 +; CHECK64-NEXT: pmulhuw %xmm0, %xmm4 +; CHECK64-NEXT: movdqa %xmm1, %xmm0 +; CHECK64-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7] +; CHECK64-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3] +; CHECK64-NEXT: movdqa %xmm3, %xmm2 +; CHECK64-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7] +; CHECK64-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3] +; CHECK64-NEXT: retq +; +; SSE4-32-LABEL: test_mul_v16i32_v16i16: +; SSE4-32: # %bb.0: +; SSE4-32-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1] +; SSE4-32-NEXT: pmovzxwd {{.*#+}} xmm3 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero +; SSE4-32-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1] +; SSE4-32-NEXT: pmovzxwd {{.*#+}} xmm4 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero +; SSE4-32-NEXT: pmovzxwd {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero +; SSE4-32-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero +; SSE4-32-NEXT: movdqa {{.*#+}} xmm1 = [18778,18778,18778,18778] +; SSE4-32-NEXT: pmulld %xmm1, %xmm0 +; SSE4-32-NEXT: pmulld %xmm1, %xmm2 +; SSE4-32-NEXT: pmulld %xmm1, %xmm4 +; SSE4-32-NEXT: pmulld %xmm1, %xmm3 +; SSE4-32-NEXT: movdqa %xmm4, %xmm1 +; SSE4-32-NEXT: retl +; +; SSE4-64-LABEL: test_mul_v16i32_v16i16: +; SSE4-64: # %bb.0: +; SSE4-64-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1] +; SSE4-64-NEXT: pmovzxwd {{.*#+}} xmm3 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero +; SSE4-64-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1] +; SSE4-64-NEXT: pmovzxwd {{.*#+}} xmm4 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero +; SSE4-64-NEXT: pmovzxwd {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero +; SSE4-64-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero +; SSE4-64-NEXT: movdqa {{.*#+}} xmm1 = [18778,18778,18778,18778] +; SSE4-64-NEXT: pmulld %xmm1, %xmm0 +; SSE4-64-NEXT: pmulld %xmm1, %xmm2 +; SSE4-64-NEXT: pmulld %xmm1, %xmm4 +; SSE4-64-NEXT: pmulld %xmm1, %xmm3 +; SSE4-64-NEXT: movdqa %xmm4, %xmm1 +; SSE4-64-NEXT: retq +; +; AVX2-32-LABEL: test_mul_v16i32_v16i16: +; AVX2-32: # %bb.0: +; AVX2-32-NEXT: vextracti128 $1, %ymm0, %xmm1 +; AVX2-32-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero +; AVX2-32-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero +; AVX2-32-NEXT: vpbroadcastd {{.*#+}} ymm2 = [18778,18778,18778,18778,18778,18778,18778,18778] +; AVX2-32-NEXT: vpmulld %ymm2, %ymm0, %ymm0 +; AVX2-32-NEXT: vpmulld %ymm2, %ymm1, %ymm1 +; AVX2-32-NEXT: retl +; +; AVX2-64-LABEL: test_mul_v16i32_v16i16: +; AVX2-64: # %bb.0: +; AVX2-64-NEXT: vextracti128 $1, %ymm0, %xmm1 +; AVX2-64-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero +; AVX2-64-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero +; AVX2-64-NEXT: vpbroadcastd {{.*#+}} ymm2 = [18778,18778,18778,18778,18778,18778,18778,18778] +; AVX2-64-NEXT: vpmulld %ymm2, %ymm0, %ymm0 +; AVX2-64-NEXT: vpmulld %ymm2, %ymm1, %ymm1 +; AVX2-64-NEXT: retq +; +; AVX512-32-LABEL: test_mul_v16i32_v16i16: +; AVX512-32: # %bb.0: +; AVX512-32-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero +; AVX512-32-NEXT: vpmulld {{\.LCPI.*}}{1to16}, %zmm0, %zmm0 +; AVX512-32-NEXT: retl +; +; AVX512-64-LABEL: test_mul_v16i32_v16i16: +; AVX512-64: # %bb.0: +; AVX512-64-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero +; AVX512-64-NEXT: vpmulld {{.*}}(%rip){1to16}, %zmm0, %zmm0 +; AVX512-64-NEXT: retq + %z = zext <16 x i16> %A to <16 x i32> + %m = mul nuw nsw <16 x i32> %z, <i32 18778, i32 18778, i32 18778, i32 18778, i32 18778, i32 18778, i32 18778, i32 18778, i32 18778, i32 18778, i32 18778, i32 18778, i32 18778, i32 18778, i32 18778, i32 18778> + ret <16 x i32> %m +} + +; +; MinSize Tests +; + +define <4 x i32> @test_mul_v4i32_v4i8_minsize(<4 x i8> %A) minsize { +; CHECK32-LABEL: test_mul_v4i32_v4i8_minsize: ; CHECK32: # %bb.0: ; CHECK32-NEXT: pand {{\.LCPI.*}}, %xmm0 ; CHECK32-NEXT: pmulld {{\.LCPI.*}}, %xmm0 ; CHECK32-NEXT: retl ; -; CHECK64-LABEL: foo_os: +; CHECK64-LABEL: test_mul_v4i32_v4i8_minsize: ; CHECK64: # %bb.0: ; CHECK64-NEXT: pand {{.*}}(%rip), %xmm0 ; CHECK64-NEXT: pmulld {{.*}}(%rip), %xmm0 ; CHECK64-NEXT: retq ; -; SSE4-32-LABEL: foo_os: +; SSE4-32-LABEL: test_mul_v4i32_v4i8_minsize: ; SSE4-32: # %bb.0: ; SSE4-32-NEXT: pand {{\.LCPI.*}}, %xmm0 ; SSE4-32-NEXT: pmulld {{\.LCPI.*}}, %xmm0 ; SSE4-32-NEXT: retl ; -; SSE4-64-LABEL: foo_os: +; SSE4-64-LABEL: test_mul_v4i32_v4i8_minsize: ; SSE4-64: # %bb.0: ; SSE4-64-NEXT: pand {{.*}}(%rip), %xmm0 ; SSE4-64-NEXT: pmulld {{.*}}(%rip), %xmm0 ; SSE4-64-NEXT: retq +; +; AVX-32-LABEL: test_mul_v4i32_v4i8_minsize: +; AVX-32: # %bb.0: +; AVX-32-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; AVX-32-NEXT: vpbroadcastd {{.*#+}} xmm1 = [18778,18778,18778,18778] +; AVX-32-NEXT: vpmulld %xmm1, %xmm0, %xmm0 +; AVX-32-NEXT: retl +; +; AVX-64-LABEL: test_mul_v4i32_v4i8_minsize: +; AVX-64: # %bb.0: +; AVX-64-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0 +; AVX-64-NEXT: vpbroadcastd {{.*#+}} xmm1 = [18778,18778,18778,18778] +; AVX-64-NEXT: vpmulld %xmm1, %xmm0, %xmm0 +; AVX-64-NEXT: retq %z = zext <4 x i8> %A to <4 x i32> %m = mul nuw nsw <4 x i32> %z, <i32 18778, i32 18778, i32 18778, i32 18778> ret <4 x i32> %m } -define <4 x i32> @foo_os16(<4 x i16> %A) minsize { -; CHECK32-LABEL: foo_os16: +define <8 x i32> @test_mul_v8i32_v8i8_minsize(<8 x i8> %A) minsize { +; CHECK32-LABEL: test_mul_v8i32_v8i8_minsize: +; CHECK32: # %bb.0: +; CHECK32-NEXT: pand {{\.LCPI.*}}, %xmm0 +; CHECK32-NEXT: movdqa {{.*#+}} xmm2 = [18778,18778,18778,18778] +; CHECK32-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] +; CHECK32-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero +; CHECK32-NEXT: pmovzxwd {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero +; CHECK32-NEXT: pmulld %xmm2, %xmm0 +; CHECK32-NEXT: pmulld %xmm2, %xmm1 +; CHECK32-NEXT: retl +; +; CHECK64-LABEL: test_mul_v8i32_v8i8_minsize: +; CHECK64: # %bb.0: +; CHECK64-NEXT: pand {{.*}}(%rip), %xmm0 +; CHECK64-NEXT: movdqa {{.*#+}} xmm2 = [18778,18778,18778,18778] +; CHECK64-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] +; CHECK64-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero +; CHECK64-NEXT: pmovzxwd {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero +; CHECK64-NEXT: pmulld %xmm2, %xmm0 +; CHECK64-NEXT: pmulld %xmm2, %xmm1 +; CHECK64-NEXT: retq +; +; SSE4-32-LABEL: test_mul_v8i32_v8i8_minsize: +; SSE4-32: # %bb.0: +; SSE4-32-NEXT: pand {{\.LCPI.*}}, %xmm0 +; SSE4-32-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] +; SSE4-32-NEXT: pmovzxwd {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero +; SSE4-32-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero +; SSE4-32-NEXT: movdqa {{.*#+}} xmm2 = [18778,18778,18778,18778] +; SSE4-32-NEXT: pmulld %xmm2, %xmm0 +; SSE4-32-NEXT: pmulld %xmm2, %xmm1 +; SSE4-32-NEXT: retl +; +; SSE4-64-LABEL: test_mul_v8i32_v8i8_minsize: +; SSE4-64: # %bb.0: +; SSE4-64-NEXT: pand {{.*}}(%rip), %xmm0 +; SSE4-64-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] +; SSE4-64-NEXT: pmovzxwd {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero +; SSE4-64-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero +; SSE4-64-NEXT: movdqa {{.*#+}} xmm2 = [18778,18778,18778,18778] +; SSE4-64-NEXT: pmulld %xmm2, %xmm0 +; SSE4-64-NEXT: pmulld %xmm2, %xmm1 +; SSE4-64-NEXT: retq +; +; AVX-32-LABEL: test_mul_v8i32_v8i8_minsize: +; AVX-32: # %bb.0: +; AVX-32-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; AVX-32-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero +; AVX-32-NEXT: vpbroadcastd {{.*#+}} ymm1 = [18778,18778,18778,18778,18778,18778,18778,18778] +; AVX-32-NEXT: vpmulld %ymm1, %ymm0, %ymm0 +; AVX-32-NEXT: retl +; +; AVX-64-LABEL: test_mul_v8i32_v8i8_minsize: +; AVX-64: # %bb.0: +; AVX-64-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0 +; AVX-64-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero +; AVX-64-NEXT: vpbroadcastd {{.*#+}} ymm1 = [18778,18778,18778,18778,18778,18778,18778,18778] +; AVX-64-NEXT: vpmulld %ymm1, %ymm0, %ymm0 +; AVX-64-NEXT: retq + %z = zext <8 x i8> %A to <8 x i32> + %m = mul nuw nsw <8 x i32> %z, <i32 18778, i32 18778, i32 18778, i32 18778, i32 18778, i32 18778, i32 18778, i32 18778> + ret <8 x i32> %m +} + +define <16 x i32> @test_mul_v16i32_v16i8_minsize(<16 x i8> %A) minsize { +; CHECK32-LABEL: test_mul_v16i32_v16i8_minsize: +; CHECK32: # %bb.0: +; CHECK32-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3] +; CHECK32-NEXT: movdqa {{.*#+}} xmm5 = [18778,18778,18778,18778] +; CHECK32-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,2,3] +; CHECK32-NEXT: pmovzxbd {{.*#+}} xmm3 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero +; CHECK32-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] +; CHECK32-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero +; CHECK32-NEXT: pmovzxbd {{.*#+}} xmm2 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero +; CHECK32-NEXT: pmovzxbd {{.*#+}} xmm1 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero,xmm4[2],zero,zero,zero,xmm4[3],zero,zero,zero +; CHECK32-NEXT: pmulld %xmm5, %xmm0 +; CHECK32-NEXT: pmulld %xmm5, %xmm1 +; CHECK32-NEXT: pmulld %xmm5, %xmm2 +; CHECK32-NEXT: pmulld %xmm5, %xmm3 +; CHECK32-NEXT: retl +; +; CHECK64-LABEL: test_mul_v16i32_v16i8_minsize: +; CHECK64: # %bb.0: +; CHECK64-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3] +; CHECK64-NEXT: movdqa {{.*#+}} xmm5 = [18778,18778,18778,18778] +; CHECK64-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,2,3] +; CHECK64-NEXT: pmovzxbd {{.*#+}} xmm3 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero +; CHECK64-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] +; CHECK64-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero +; CHECK64-NEXT: pmovzxbd {{.*#+}} xmm2 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero +; CHECK64-NEXT: pmovzxbd {{.*#+}} xmm1 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero,xmm4[2],zero,zero,zero,xmm4[3],zero,zero,zero +; CHECK64-NEXT: pmulld %xmm5, %xmm0 +; CHECK64-NEXT: pmulld %xmm5, %xmm1 +; CHECK64-NEXT: pmulld %xmm5, %xmm2 +; CHECK64-NEXT: pmulld %xmm5, %xmm3 +; CHECK64-NEXT: retq +; +; SSE4-32-LABEL: test_mul_v16i32_v16i8_minsize: +; SSE4-32: # %bb.0: +; SSE4-32-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3] +; SSE4-32-NEXT: pmovzxbd {{.*#+}} xmm3 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero +; SSE4-32-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] +; SSE4-32-NEXT: pmovzxbd {{.*#+}} xmm2 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero +; SSE4-32-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] +; SSE4-32-NEXT: pmovzxbd {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero +; SSE4-32-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero +; SSE4-32-NEXT: movdqa {{.*#+}} xmm4 = [18778,18778,18778,18778] +; SSE4-32-NEXT: pmulld %xmm4, %xmm0 +; SSE4-32-NEXT: pmulld %xmm4, %xmm1 +; SSE4-32-NEXT: pmulld %xmm4, %xmm2 +; SSE4-32-NEXT: pmulld %xmm4, %xmm3 +; SSE4-32-NEXT: retl +; +; SSE4-64-LABEL: test_mul_v16i32_v16i8_minsize: +; SSE4-64: # %bb.0: +; SSE4-64-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3] +; SSE4-64-NEXT: pmovzxbd {{.*#+}} xmm3 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero +; SSE4-64-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] +; SSE4-64-NEXT: pmovzxbd {{.*#+}} xmm2 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero +; SSE4-64-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] +; SSE4-64-NEXT: pmovzxbd {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero +; SSE4-64-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero +; SSE4-64-NEXT: movdqa {{.*#+}} xmm4 = [18778,18778,18778,18778] +; SSE4-64-NEXT: pmulld %xmm4, %xmm0 +; SSE4-64-NEXT: pmulld %xmm4, %xmm1 +; SSE4-64-NEXT: pmulld %xmm4, %xmm2 +; SSE4-64-NEXT: pmulld %xmm4, %xmm3 +; SSE4-64-NEXT: retq +; +; AVX2-32-LABEL: test_mul_v16i32_v16i8_minsize: +; AVX2-32: # %bb.0: +; AVX2-32-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] +; AVX2-32-NEXT: vpmovzxbd {{.*#+}} ymm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero +; AVX2-32-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero +; AVX2-32-NEXT: vpbroadcastd {{.*#+}} ymm2 = [18778,18778,18778,18778,18778,18778,18778,18778] +; AVX2-32-NEXT: vpmulld %ymm2, %ymm0, %ymm0 +; AVX2-32-NEXT: vpmulld %ymm2, %ymm1, %ymm1 +; AVX2-32-NEXT: retl +; +; AVX2-64-LABEL: test_mul_v16i32_v16i8_minsize: +; AVX2-64: # %bb.0: +; AVX2-64-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] +; AVX2-64-NEXT: vpmovzxbd {{.*#+}} ymm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero +; AVX2-64-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero +; AVX2-64-NEXT: vpbroadcastd {{.*#+}} ymm2 = [18778,18778,18778,18778,18778,18778,18778,18778] +; AVX2-64-NEXT: vpmulld %ymm2, %ymm0, %ymm0 +; AVX2-64-NEXT: vpmulld %ymm2, %ymm1, %ymm1 +; AVX2-64-NEXT: retq +; +; AVX512-32-LABEL: test_mul_v16i32_v16i8_minsize: +; AVX512-32: # %bb.0: +; AVX512-32-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero +; AVX512-32-NEXT: vpmulld {{\.LCPI.*}}{1to16}, %zmm0, %zmm0 +; AVX512-32-NEXT: retl +; +; AVX512-64-LABEL: test_mul_v16i32_v16i8_minsize: +; AVX512-64: # %bb.0: +; AVX512-64-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero +; AVX512-64-NEXT: vpmulld {{.*}}(%rip){1to16}, %zmm0, %zmm0 +; AVX512-64-NEXT: retq + %z = zext <16 x i8> %A to <16 x i32> + %m = mul nuw nsw <16 x i32> %z, <i32 18778, i32 18778, i32 18778, i32 18778, i32 18778, i32 18778, i32 18778, i32 18778, i32 18778, i32 18778, i32 18778, i32 18778, i32 18778, i32 18778, i32 18778, i32 18778> + ret <16 x i32> %m +} + +define <4 x i32> @test_mul_v4i32_v4i16_minsize(<4 x i16> %A) minsize { +; CHECK32-LABEL: test_mul_v4i32_v4i16_minsize: ; CHECK32: # %bb.0: ; CHECK32-NEXT: pxor %xmm1, %xmm1 ; CHECK32-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7] ; CHECK32-NEXT: pmulld {{\.LCPI.*}}, %xmm0 ; CHECK32-NEXT: retl ; -; CHECK64-LABEL: foo_os16: +; CHECK64-LABEL: test_mul_v4i32_v4i16_minsize: ; CHECK64: # %bb.0: ; CHECK64-NEXT: pxor %xmm1, %xmm1 ; CHECK64-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7] ; CHECK64-NEXT: pmulld {{.*}}(%rip), %xmm0 ; CHECK64-NEXT: retq ; -; SSE4-32-LABEL: foo_os16: +; SSE4-32-LABEL: test_mul_v4i32_v4i16_minsize: ; SSE4-32: # %bb.0: ; SSE4-32-NEXT: pxor %xmm1, %xmm1 ; SSE4-32-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7] ; SSE4-32-NEXT: pmulld {{\.LCPI.*}}, %xmm0 ; SSE4-32-NEXT: retl ; -; SSE4-64-LABEL: foo_os16: +; SSE4-64-LABEL: test_mul_v4i32_v4i16_minsize: ; SSE4-64: # %bb.0: ; SSE4-64-NEXT: pxor %xmm1, %xmm1 ; SSE4-64-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7] ; SSE4-64-NEXT: pmulld {{.*}}(%rip), %xmm0 ; SSE4-64-NEXT: retq +; +; AVX-32-LABEL: test_mul_v4i32_v4i16_minsize: +; AVX-32: # %bb.0: +; AVX-32-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVX-32-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7] +; AVX-32-NEXT: vpbroadcastd {{.*#+}} xmm1 = [18778,18778,18778,18778] +; AVX-32-NEXT: vpmulld %xmm1, %xmm0, %xmm0 +; AVX-32-NEXT: retl +; +; AVX-64-LABEL: test_mul_v4i32_v4i16_minsize: +; AVX-64: # %bb.0: +; AVX-64-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVX-64-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7] +; AVX-64-NEXT: vpbroadcastd {{.*#+}} xmm1 = [18778,18778,18778,18778] +; AVX-64-NEXT: vpmulld %xmm1, %xmm0, %xmm0 +; AVX-64-NEXT: retq %z = zext <4 x i16> %A to <4 x i32> %m = mul nuw nsw <4 x i32> %z, <i32 18778, i32 18778, i32 18778, i32 18778> ret <4 x i32> %m } + +define <8 x i32> @test_mul_v8i32_v8i16_minsize(<8 x i16> %A) minsize { +; CHECK32-LABEL: test_mul_v8i32_v8i16_minsize: +; CHECK32: # %bb.0: +; CHECK32-NEXT: movdqa {{.*#+}} xmm2 = [18778,18778,18778,18778] +; CHECK32-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] +; CHECK32-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero +; CHECK32-NEXT: pmovzxwd {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero +; CHECK32-NEXT: pmulld %xmm2, %xmm0 +; CHECK32-NEXT: pmulld %xmm2, %xmm1 +; CHECK32-NEXT: retl +; +; CHECK64-LABEL: test_mul_v8i32_v8i16_minsize: +; CHECK64: # %bb.0: +; CHECK64-NEXT: movdqa {{.*#+}} xmm2 = [18778,18778,18778,18778] +; CHECK64-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] +; CHECK64-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero +; CHECK64-NEXT: pmovzxwd {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero +; CHECK64-NEXT: pmulld %xmm2, %xmm0 +; CHECK64-NEXT: pmulld %xmm2, %xmm1 +; CHECK64-NEXT: retq +; +; SSE4-32-LABEL: test_mul_v8i32_v8i16_minsize: +; SSE4-32: # %bb.0: +; SSE4-32-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] +; SSE4-32-NEXT: pmovzxwd {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero +; SSE4-32-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero +; SSE4-32-NEXT: movdqa {{.*#+}} xmm2 = [18778,18778,18778,18778] +; SSE4-32-NEXT: pmulld %xmm2, %xmm0 +; SSE4-32-NEXT: pmulld %xmm2, %xmm1 +; SSE4-32-NEXT: retl +; +; SSE4-64-LABEL: test_mul_v8i32_v8i16_minsize: +; SSE4-64: # %bb.0: +; SSE4-64-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] +; SSE4-64-NEXT: pmovzxwd {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero +; SSE4-64-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero +; SSE4-64-NEXT: movdqa {{.*#+}} xmm2 = [18778,18778,18778,18778] +; SSE4-64-NEXT: pmulld %xmm2, %xmm0 +; SSE4-64-NEXT: pmulld %xmm2, %xmm1 +; SSE4-64-NEXT: retq +; +; AVX-32-LABEL: test_mul_v8i32_v8i16_minsize: +; AVX-32: # %bb.0: +; AVX-32-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero +; AVX-32-NEXT: vpbroadcastd {{.*#+}} ymm1 = [18778,18778,18778,18778,18778,18778,18778,18778] +; AVX-32-NEXT: vpmulld %ymm1, %ymm0, %ymm0 +; AVX-32-NEXT: retl +; +; AVX-64-LABEL: test_mul_v8i32_v8i16_minsize: +; AVX-64: # %bb.0: +; AVX-64-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero +; AVX-64-NEXT: vpbroadcastd {{.*#+}} ymm1 = [18778,18778,18778,18778,18778,18778,18778,18778] +; AVX-64-NEXT: vpmulld %ymm1, %ymm0, %ymm0 +; AVX-64-NEXT: retq + %z = zext <8 x i16> %A to <8 x i32> + %m = mul nuw nsw <8 x i32> %z, <i32 18778, i32 18778, i32 18778, i32 18778, i32 18778, i32 18778, i32 18778, i32 18778> + ret <8 x i32> %m +} + +define <16 x i32> @test_mul_v16i32_v16i16_minsize(<16 x i16> %A) minsize { +; CHECK32-LABEL: test_mul_v16i32_v16i16_minsize: +; CHECK32: # %bb.0: +; CHECK32-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1] +; CHECK32-NEXT: pmovzxwd {{.*#+}} xmm3 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero +; CHECK32-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1] +; CHECK32-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero +; CHECK32-NEXT: pmovzxwd {{.*#+}} xmm4 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero +; CHECK32-NEXT: pmovzxwd {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero +; CHECK32-NEXT: movdqa {{.*#+}} xmm1 = [18778,18778,18778,18778] +; CHECK32-NEXT: pmulld %xmm1, %xmm4 +; CHECK32-NEXT: pmulld %xmm1, %xmm0 +; CHECK32-NEXT: pmulld %xmm1, %xmm2 +; CHECK32-NEXT: pmulld %xmm1, %xmm3 +; CHECK32-NEXT: movdqa %xmm4, %xmm1 +; CHECK32-NEXT: retl +; +; CHECK64-LABEL: test_mul_v16i32_v16i16_minsize: +; CHECK64: # %bb.0: +; CHECK64-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1] +; CHECK64-NEXT: pmovzxwd {{.*#+}} xmm3 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero +; CHECK64-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1] +; CHECK64-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero +; CHECK64-NEXT: pmovzxwd {{.*#+}} xmm4 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero +; CHECK64-NEXT: pmovzxwd {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero +; CHECK64-NEXT: movdqa {{.*#+}} xmm1 = [18778,18778,18778,18778] +; CHECK64-NEXT: pmulld %xmm1, %xmm4 +; CHECK64-NEXT: pmulld %xmm1, %xmm0 +; CHECK64-NEXT: pmulld %xmm1, %xmm2 +; CHECK64-NEXT: pmulld %xmm1, %xmm3 +; CHECK64-NEXT: movdqa %xmm4, %xmm1 +; CHECK64-NEXT: retq +; +; SSE4-32-LABEL: test_mul_v16i32_v16i16_minsize: +; SSE4-32: # %bb.0: +; SSE4-32-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1] +; SSE4-32-NEXT: pmovzxwd {{.*#+}} xmm3 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero +; SSE4-32-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1] +; SSE4-32-NEXT: pmovzxwd {{.*#+}} xmm4 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero +; SSE4-32-NEXT: pmovzxwd {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero +; SSE4-32-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero +; SSE4-32-NEXT: movdqa {{.*#+}} xmm1 = [18778,18778,18778,18778] +; SSE4-32-NEXT: pmulld %xmm1, %xmm0 +; SSE4-32-NEXT: pmulld %xmm1, %xmm2 +; SSE4-32-NEXT: pmulld %xmm1, %xmm4 +; SSE4-32-NEXT: pmulld %xmm1, %xmm3 +; SSE4-32-NEXT: movdqa %xmm4, %xmm1 +; SSE4-32-NEXT: retl +; +; SSE4-64-LABEL: test_mul_v16i32_v16i16_minsize: +; SSE4-64: # %bb.0: +; SSE4-64-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1] +; SSE4-64-NEXT: pmovzxwd {{.*#+}} xmm3 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero +; SSE4-64-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1] +; SSE4-64-NEXT: pmovzxwd {{.*#+}} xmm4 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero +; SSE4-64-NEXT: pmovzxwd {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero +; SSE4-64-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero +; SSE4-64-NEXT: movdqa {{.*#+}} xmm1 = [18778,18778,18778,18778] +; SSE4-64-NEXT: pmulld %xmm1, %xmm0 +; SSE4-64-NEXT: pmulld %xmm1, %xmm2 +; SSE4-64-NEXT: pmulld %xmm1, %xmm4 +; SSE4-64-NEXT: pmulld %xmm1, %xmm3 +; SSE4-64-NEXT: movdqa %xmm4, %xmm1 +; SSE4-64-NEXT: retq +; +; AVX2-32-LABEL: test_mul_v16i32_v16i16_minsize: +; AVX2-32: # %bb.0: +; AVX2-32-NEXT: vextracti128 $1, %ymm0, %xmm1 +; AVX2-32-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero +; AVX2-32-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero +; AVX2-32-NEXT: vpbroadcastd {{.*#+}} ymm2 = [18778,18778,18778,18778,18778,18778,18778,18778] +; AVX2-32-NEXT: vpmulld %ymm2, %ymm0, %ymm0 +; AVX2-32-NEXT: vpmulld %ymm2, %ymm1, %ymm1 +; AVX2-32-NEXT: retl +; +; AVX2-64-LABEL: test_mul_v16i32_v16i16_minsize: +; AVX2-64: # %bb.0: +; AVX2-64-NEXT: vextracti128 $1, %ymm0, %xmm1 +; AVX2-64-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero +; AVX2-64-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero +; AVX2-64-NEXT: vpbroadcastd {{.*#+}} ymm2 = [18778,18778,18778,18778,18778,18778,18778,18778] +; AVX2-64-NEXT: vpmulld %ymm2, %ymm0, %ymm0 +; AVX2-64-NEXT: vpmulld %ymm2, %ymm1, %ymm1 +; AVX2-64-NEXT: retq +; +; AVX512-32-LABEL: test_mul_v16i32_v16i16_minsize: +; AVX512-32: # %bb.0: +; AVX512-32-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero +; AVX512-32-NEXT: vpmulld {{\.LCPI.*}}{1to16}, %zmm0, %zmm0 +; AVX512-32-NEXT: retl +; +; AVX512-64-LABEL: test_mul_v16i32_v16i16_minsize: +; AVX512-64: # %bb.0: +; AVX512-64-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero +; AVX512-64-NEXT: vpmulld {{.*}}(%rip){1to16}, %zmm0, %zmm0 +; AVX512-64-NEXT: retq + %z = zext <16 x i16> %A to <16 x i32> + %m = mul nuw nsw <16 x i32> %z, <i32 18778, i32 18778, i32 18778, i32 18778, i32 18778, i32 18778, i32 18778, i32 18778, i32 18778, i32 18778, i32 18778, i32 18778, i32 18778, i32 18778, i32 18778, i32 18778> + ret <16 x i32> %m +} |

