diff options
-rw-r--r-- | llvm/test/CodeGen/X86/madd.ll | 496 |
1 files changed, 421 insertions, 75 deletions
diff --git a/llvm/test/CodeGen/X86/madd.ll b/llvm/test/CodeGen/X86/madd.ll index c6e5d440809..d21fd4e0ca4 100644 --- a/llvm/test/CodeGen/X86/madd.ll +++ b/llvm/test/CodeGen/X86/madd.ll @@ -1,8 +1,9 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=SSE2 -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX2 -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=AVX,AVX512,AVX512F -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw | FileCheck %s --check-prefixes=AVX,AVX512,AVX512BW +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX256,AVX2 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=AVX,AVX256,AVX512,AVX512F +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw | FileCheck %s --check-prefixes=AVX,AVX256,AVX512,AVX512BW define i32 @_Z10test_shortPsS_i(i16* nocapture readonly, i16* nocapture readonly, i32) local_unnamed_addr #0 { ; SSE2-LABEL: _Z10test_shortPsS_i: @@ -30,29 +31,55 @@ define i32 @_Z10test_shortPsS_i(i16* nocapture readonly, i16* nocapture readonly ; SSE2-NEXT: movd %xmm1, %eax ; SSE2-NEXT: retq ; -; AVX-LABEL: _Z10test_shortPsS_i: -; AVX: # %bb.0: # %entry -; AVX-NEXT: movl %edx, %eax -; AVX-NEXT: vpxor %xmm0, %xmm0, %xmm0 -; AVX-NEXT: xorl %ecx, %ecx -; AVX-NEXT: .p2align 4, 0x90 -; AVX-NEXT: .LBB0_1: # %vector.body -; AVX-NEXT: # =>This Inner Loop Header: Depth=1 -; AVX-NEXT: vmovdqu (%rsi,%rcx,2), %xmm1 -; AVX-NEXT: vpmaddwd (%rdi,%rcx,2), %xmm1, %xmm1 -; AVX-NEXT: vpaddd %ymm0, %ymm1, %ymm0 -; AVX-NEXT: addq $8, %rcx -; AVX-NEXT: cmpq %rcx, %rax -; AVX-NEXT: jne .LBB0_1 -; AVX-NEXT: # %bb.2: # %middle.block -; AVX-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX-NEXT: vpaddd %ymm1, %ymm0, %ymm0 -; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] -; AVX-NEXT: vpaddd %ymm1, %ymm0, %ymm0 -; AVX-NEXT: vphaddd %ymm0, %ymm0, %ymm0 -; AVX-NEXT: vmovd %xmm0, %eax -; AVX-NEXT: vzeroupper -; AVX-NEXT: retq +; AVX1-LABEL: _Z10test_shortPsS_i: +; AVX1: # %bb.0: # %entry +; AVX1-NEXT: movl %edx, %eax +; AVX1-NEXT: vpxor %xmm0, %xmm0, %xmm0 +; AVX1-NEXT: xorl %ecx, %ecx +; AVX1-NEXT: .p2align 4, 0x90 +; AVX1-NEXT: .LBB0_1: # %vector.body +; AVX1-NEXT: # =>This Inner Loop Header: Depth=1 +; AVX1-NEXT: vmovdqu (%rsi,%rcx,2), %xmm1 +; AVX1-NEXT: vpmaddwd (%rdi,%rcx,2), %xmm1, %xmm1 +; AVX1-NEXT: vpaddd %xmm0, %xmm1, %xmm1 +; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] +; AVX1-NEXT: addq $8, %rcx +; AVX1-NEXT: cmpq %rcx, %rax +; AVX1-NEXT: jne .LBB0_1 +; AVX1-NEXT: # %bb.2: # %middle.block +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 +; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] +; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] +; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vmovd %xmm0, %eax +; AVX1-NEXT: vzeroupper +; AVX1-NEXT: retq +; +; AVX256-LABEL: _Z10test_shortPsS_i: +; AVX256: # %bb.0: # %entry +; AVX256-NEXT: movl %edx, %eax +; AVX256-NEXT: vpxor %xmm0, %xmm0, %xmm0 +; AVX256-NEXT: xorl %ecx, %ecx +; AVX256-NEXT: .p2align 4, 0x90 +; AVX256-NEXT: .LBB0_1: # %vector.body +; AVX256-NEXT: # =>This Inner Loop Header: Depth=1 +; AVX256-NEXT: vmovdqu (%rsi,%rcx,2), %xmm1 +; AVX256-NEXT: vpmaddwd (%rdi,%rcx,2), %xmm1, %xmm1 +; AVX256-NEXT: vpaddd %ymm0, %ymm1, %ymm0 +; AVX256-NEXT: addq $8, %rcx +; AVX256-NEXT: cmpq %rcx, %rax +; AVX256-NEXT: jne .LBB0_1 +; AVX256-NEXT: # %bb.2: # %middle.block +; AVX256-NEXT: vextracti128 $1, %ymm0, %xmm1 +; AVX256-NEXT: vpaddd %ymm1, %ymm0, %ymm0 +; AVX256-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] +; AVX256-NEXT: vpaddd %ymm1, %ymm0, %ymm0 +; AVX256-NEXT: vphaddd %ymm0, %ymm0, %ymm0 +; AVX256-NEXT: vmovd %xmm0, %eax +; AVX256-NEXT: vzeroupper +; AVX256-NEXT: retq entry: %3 = zext i32 %2 to i64 br label %vector.body @@ -131,6 +158,50 @@ define i32 @_Z10test_shortPsS_i_512(i16* nocapture readonly, i16* nocapture read ; SSE2-NEXT: movd %xmm1, %eax ; SSE2-NEXT: retq ; +; AVX1-LABEL: _Z10test_shortPsS_i_512: +; AVX1: # %bb.0: # %entry +; AVX1-NEXT: movl %edx, %eax +; AVX1-NEXT: vpxor %xmm0, %xmm0, %xmm0 +; AVX1-NEXT: xorl %ecx, %ecx +; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVX1-NEXT: .p2align 4, 0x90 +; AVX1-NEXT: .LBB1_1: # %vector.body +; AVX1-NEXT: # =>This Inner Loop Header: Depth=1 +; AVX1-NEXT: vpmovsxwd 8(%rdi,%rcx,2), %xmm2 +; AVX1-NEXT: vpmovsxwd (%rdi,%rcx,2), %xmm3 +; AVX1-NEXT: vpackssdw %xmm2, %xmm3, %xmm2 +; AVX1-NEXT: vpmovsxwd 24(%rdi,%rcx,2), %xmm3 +; AVX1-NEXT: vpmovsxwd 16(%rdi,%rcx,2), %xmm4 +; AVX1-NEXT: vpackssdw %xmm3, %xmm4, %xmm3 +; AVX1-NEXT: vpmovsxwd 8(%rsi,%rcx,2), %xmm4 +; AVX1-NEXT: vpmovsxwd (%rsi,%rcx,2), %xmm5 +; AVX1-NEXT: vpackssdw %xmm4, %xmm5, %xmm4 +; AVX1-NEXT: vpmaddwd %xmm2, %xmm4, %xmm2 +; AVX1-NEXT: vpmovsxwd 24(%rsi,%rcx,2), %xmm4 +; AVX1-NEXT: vpmovsxwd 16(%rsi,%rcx,2), %xmm5 +; AVX1-NEXT: vpackssdw %xmm4, %xmm5, %xmm4 +; AVX1-NEXT: vpmaddwd %xmm3, %xmm4, %xmm3 +; AVX1-NEXT: vpaddd %xmm1, %xmm3, %xmm3 +; AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm1[4,5,6,7] +; AVX1-NEXT: vpaddd %xmm0, %xmm2, %xmm2 +; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7] +; AVX1-NEXT: addq $16, %rcx +; AVX1-NEXT: cmpq %rcx, %rax +; AVX1-NEXT: jne .LBB1_1 +; AVX1-NEXT: # %bb.2: # %middle.block +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 +; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3 +; AVX1-NEXT: vpaddd %xmm3, %xmm2, %xmm2 +; AVX1-NEXT: vpaddd %xmm2, %xmm1, %xmm1 +; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] +; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] +; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vmovd %xmm0, %eax +; AVX1-NEXT: vzeroupper +; AVX1-NEXT: retq +; ; AVX2-LABEL: _Z10test_shortPsS_i_512: ; AVX2: # %bb.0: # %entry ; AVX2-NEXT: movl %edx, %eax @@ -249,30 +320,62 @@ define i32 @test_unsigned_short(i16* nocapture readonly, i16* nocapture readonly ; SSE2-NEXT: movd %xmm0, %eax ; SSE2-NEXT: retq ; -; AVX-LABEL: test_unsigned_short: -; AVX: # %bb.0: # %entry -; AVX-NEXT: movl %edx, %eax -; AVX-NEXT: vpxor %xmm0, %xmm0, %xmm0 -; AVX-NEXT: xorl %ecx, %ecx -; AVX-NEXT: .p2align 4, 0x90 -; AVX-NEXT: .LBB2_1: # %vector.body -; AVX-NEXT: # =>This Inner Loop Header: Depth=1 -; AVX-NEXT: vpmovzxwd {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero -; AVX-NEXT: vpmovzxwd {{.*#+}} ymm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero -; AVX-NEXT: vpmulld %ymm1, %ymm2, %ymm1 -; AVX-NEXT: vpaddd %ymm0, %ymm1, %ymm0 -; AVX-NEXT: addq $8, %rcx -; AVX-NEXT: cmpq %rcx, %rax -; AVX-NEXT: jne .LBB2_1 -; AVX-NEXT: # %bb.2: # %middle.block -; AVX-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX-NEXT: vpaddd %ymm1, %ymm0, %ymm0 -; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] -; AVX-NEXT: vpaddd %ymm1, %ymm0, %ymm0 -; AVX-NEXT: vphaddd %ymm0, %ymm0, %ymm0 -; AVX-NEXT: vmovd %xmm0, %eax -; AVX-NEXT: vzeroupper -; AVX-NEXT: retq +; AVX1-LABEL: test_unsigned_short: +; AVX1: # %bb.0: # %entry +; AVX1-NEXT: movl %edx, %eax +; AVX1-NEXT: vpxor %xmm0, %xmm0, %xmm0 +; AVX1-NEXT: xorl %ecx, %ecx +; AVX1-NEXT: .p2align 4, 0x90 +; AVX1-NEXT: .LBB2_1: # %vector.body +; AVX1-NEXT: # =>This Inner Loop Header: Depth=1 +; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero +; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero +; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm3 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero +; AVX1-NEXT: vpmulld %xmm1, %xmm3, %xmm1 +; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm3 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero +; AVX1-NEXT: vpmulld %xmm2, %xmm3, %xmm2 +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3 +; AVX1-NEXT: vpaddd %xmm3, %xmm1, %xmm1 +; AVX1-NEXT: vpaddd %xmm0, %xmm2, %xmm0 +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX1-NEXT: addq $8, %rcx +; AVX1-NEXT: cmpq %rcx, %rax +; AVX1-NEXT: jne .LBB2_1 +; AVX1-NEXT: # %bb.2: # %middle.block +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 +; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] +; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] +; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vmovd %xmm0, %eax +; AVX1-NEXT: vzeroupper +; AVX1-NEXT: retq +; +; AVX256-LABEL: test_unsigned_short: +; AVX256: # %bb.0: # %entry +; AVX256-NEXT: movl %edx, %eax +; AVX256-NEXT: vpxor %xmm0, %xmm0, %xmm0 +; AVX256-NEXT: xorl %ecx, %ecx +; AVX256-NEXT: .p2align 4, 0x90 +; AVX256-NEXT: .LBB2_1: # %vector.body +; AVX256-NEXT: # =>This Inner Loop Header: Depth=1 +; AVX256-NEXT: vpmovzxwd {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero +; AVX256-NEXT: vpmovzxwd {{.*#+}} ymm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero +; AVX256-NEXT: vpmulld %ymm1, %ymm2, %ymm1 +; AVX256-NEXT: vpaddd %ymm0, %ymm1, %ymm0 +; AVX256-NEXT: addq $8, %rcx +; AVX256-NEXT: cmpq %rcx, %rax +; AVX256-NEXT: jne .LBB2_1 +; AVX256-NEXT: # %bb.2: # %middle.block +; AVX256-NEXT: vextracti128 $1, %ymm0, %xmm1 +; AVX256-NEXT: vpaddd %ymm1, %ymm0, %ymm0 +; AVX256-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] +; AVX256-NEXT: vpaddd %ymm1, %ymm0, %ymm0 +; AVX256-NEXT: vphaddd %ymm0, %ymm0, %ymm0 +; AVX256-NEXT: vmovd %xmm0, %eax +; AVX256-NEXT: vzeroupper +; AVX256-NEXT: retq entry: %3 = zext i32 %2 to i64 br label %vector.body @@ -351,6 +454,52 @@ define i32 @test_unsigned_short_512(i16* nocapture readonly, i16* nocapture read ; SSE2-NEXT: movd %xmm1, %eax ; SSE2-NEXT: retq ; +; AVX1-LABEL: test_unsigned_short_512: +; AVX1: # %bb.0: # %entry +; AVX1-NEXT: movl %edx, %eax +; AVX1-NEXT: vpxor %xmm0, %xmm0, %xmm0 +; AVX1-NEXT: xorl %ecx, %ecx +; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVX1-NEXT: .p2align 4, 0x90 +; AVX1-NEXT: .LBB3_1: # %vector.body +; AVX1-NEXT: # =>This Inner Loop Header: Depth=1 +; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero +; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm3 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero +; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm4 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero +; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm5 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero +; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm6 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero +; AVX1-NEXT: vpmulld %xmm2, %xmm6, %xmm2 +; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm6 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero +; AVX1-NEXT: vpmulld %xmm3, %xmm6, %xmm3 +; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm6 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero +; AVX1-NEXT: vpmulld %xmm4, %xmm6, %xmm4 +; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm6 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero +; AVX1-NEXT: vpmulld %xmm5, %xmm6, %xmm5 +; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm6 +; AVX1-NEXT: vpaddd %xmm6, %xmm2, %xmm2 +; AVX1-NEXT: vpaddd %xmm1, %xmm3, %xmm1 +; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 +; AVX1-NEXT: vpaddd %xmm2, %xmm4, %xmm2 +; AVX1-NEXT: vpaddd %xmm0, %xmm5, %xmm0 +; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 +; AVX1-NEXT: addq $16, %rcx +; AVX1-NEXT: cmpq %rcx, %rax +; AVX1-NEXT: jne .LBB3_1 +; AVX1-NEXT: # %bb.2: # %middle.block +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 +; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3 +; AVX1-NEXT: vpaddd %xmm3, %xmm2, %xmm2 +; AVX1-NEXT: vpaddd %xmm2, %xmm1, %xmm1 +; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] +; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] +; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vmovd %xmm0, %eax +; AVX1-NEXT: vzeroupper +; AVX1-NEXT: retq +; ; AVX2-LABEL: test_unsigned_short_512: ; AVX2: # %bb.0: # %entry ; AVX2-NEXT: movl %edx, %eax @@ -495,6 +644,42 @@ define i32 @_Z9test_charPcS_i(i8* nocapture readonly, i8* nocapture readonly, i3 ; SSE2-NEXT: movd %xmm1, %eax ; SSE2-NEXT: retq ; +; AVX1-LABEL: _Z9test_charPcS_i: +; AVX1: # %bb.0: # %entry +; AVX1-NEXT: movl %edx, %eax +; AVX1-NEXT: vpxor %xmm0, %xmm0, %xmm0 +; AVX1-NEXT: xorl %ecx, %ecx +; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVX1-NEXT: .p2align 4, 0x90 +; AVX1-NEXT: .LBB4_1: # %vector.body +; AVX1-NEXT: # =>This Inner Loop Header: Depth=1 +; AVX1-NEXT: vpmovsxbw (%rdi,%rcx), %xmm2 +; AVX1-NEXT: vpmovsxbw 8(%rdi,%rcx), %xmm3 +; AVX1-NEXT: vpmovsxbw (%rsi,%rcx), %xmm4 +; AVX1-NEXT: vpmaddwd %xmm2, %xmm4, %xmm2 +; AVX1-NEXT: vpmovsxbw 8(%rsi,%rcx), %xmm4 +; AVX1-NEXT: vpmaddwd %xmm3, %xmm4, %xmm3 +; AVX1-NEXT: vpaddd %xmm1, %xmm3, %xmm3 +; AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm1[4,5,6,7] +; AVX1-NEXT: vpaddd %xmm0, %xmm2, %xmm2 +; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7] +; AVX1-NEXT: addq $16, %rcx +; AVX1-NEXT: cmpq %rcx, %rax +; AVX1-NEXT: jne .LBB4_1 +; AVX1-NEXT: # %bb.2: # %middle.block +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 +; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3 +; AVX1-NEXT: vpaddd %xmm3, %xmm2, %xmm2 +; AVX1-NEXT: vpaddd %xmm2, %xmm1, %xmm1 +; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] +; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] +; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vmovd %xmm0, %eax +; AVX1-NEXT: vzeroupper +; AVX1-NEXT: retq +; ; AVX2-LABEL: _Z9test_charPcS_i: ; AVX2: # %bb.0: # %entry ; AVX2-NEXT: movl %edx, %eax @@ -669,6 +854,60 @@ define i32 @_Z9test_charPcS_i_512(i8* nocapture readonly, i8* nocapture readonly ; SSE2-NEXT: movd %xmm1, %eax ; SSE2-NEXT: retq ; +; AVX1-LABEL: _Z9test_charPcS_i_512: +; AVX1: # %bb.0: # %entry +; AVX1-NEXT: movl %edx, %eax +; AVX1-NEXT: vpxor %xmm0, %xmm0, %xmm0 +; AVX1-NEXT: xorl %ecx, %ecx +; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3 +; AVX1-NEXT: .p2align 4, 0x90 +; AVX1-NEXT: .LBB5_1: # %vector.body +; AVX1-NEXT: # =>This Inner Loop Header: Depth=1 +; AVX1-NEXT: vpmovsxbw 24(%rdi,%rcx), %xmm4 +; AVX1-NEXT: vpmovsxbw 16(%rdi,%rcx), %xmm5 +; AVX1-NEXT: vpmovsxbw (%rdi,%rcx), %xmm6 +; AVX1-NEXT: vpmovsxbw 8(%rdi,%rcx), %xmm8 +; AVX1-NEXT: vpmovsxbw 24(%rsi,%rcx), %xmm7 +; AVX1-NEXT: vpmaddwd %xmm4, %xmm7, %xmm4 +; AVX1-NEXT: vpmovsxbw 16(%rsi,%rcx), %xmm7 +; AVX1-NEXT: vpmaddwd %xmm5, %xmm7, %xmm5 +; AVX1-NEXT: vpmovsxbw (%rsi,%rcx), %xmm7 +; AVX1-NEXT: vpmaddwd %xmm6, %xmm7, %xmm6 +; AVX1-NEXT: vpmovsxbw 8(%rsi,%rcx), %xmm7 +; AVX1-NEXT: vpmaddwd %xmm8, %xmm7, %xmm7 +; AVX1-NEXT: vpaddd %xmm2, %xmm7, %xmm7 +; AVX1-NEXT: vblendps {{.*#+}} ymm2 = ymm7[0,1,2,3],ymm2[4,5,6,7] +; AVX1-NEXT: vpaddd %xmm0, %xmm6, %xmm6 +; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm6[0,1,2,3],ymm0[4,5,6,7] +; AVX1-NEXT: vpaddd %xmm1, %xmm5, %xmm5 +; AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm5[0,1,2,3],ymm1[4,5,6,7] +; AVX1-NEXT: vpaddd %xmm3, %xmm4, %xmm4 +; AVX1-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7] +; AVX1-NEXT: addq $32, %rcx +; AVX1-NEXT: cmpq %rcx, %rax +; AVX1-NEXT: jne .LBB5_1 +; AVX1-NEXT: # %bb.2: # %middle.block +; AVX1-NEXT: vpaddd %xmm3, %xmm2, %xmm4 +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5 +; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm6 +; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm2 +; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm3 +; AVX1-NEXT: vpaddd %xmm3, %xmm2, %xmm2 +; AVX1-NEXT: vpaddd %xmm2, %xmm6, %xmm2 +; AVX1-NEXT: vpaddd %xmm2, %xmm5, %xmm2 +; AVX1-NEXT: vpaddd %xmm4, %xmm1, %xmm1 +; AVX1-NEXT: vpaddd %xmm2, %xmm1, %xmm1 +; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] +; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] +; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vmovd %xmm0, %eax +; AVX1-NEXT: vzeroupper +; AVX1-NEXT: retq +; ; AVX2-LABEL: _Z9test_charPcS_i_512: ; AVX2: # %bb.0: # %entry ; AVX2-NEXT: movl %edx, %eax @@ -864,6 +1103,20 @@ define <4 x i32> @larger_mul(<16 x i16> %A, <16 x i16> %B) { ; SSE2-NEXT: paddd %xmm1, %xmm0 ; SSE2-NEXT: retq ; +; AVX1-LABEL: larger_mul: +; AVX1: # %bb.0: +; AVX1-NEXT: vpmovsxwd %xmm0, %xmm2 +; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] +; AVX1-NEXT: vpmovsxwd %xmm0, %xmm0 +; AVX1-NEXT: vpackssdw %xmm0, %xmm2, %xmm0 +; AVX1-NEXT: vpmovsxwd %xmm1, %xmm2 +; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] +; AVX1-NEXT: vpmovsxwd %xmm1, %xmm1 +; AVX1-NEXT: vpackssdw %xmm1, %xmm2, %xmm1 +; AVX1-NEXT: vpmaddwd %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vzeroupper +; AVX1-NEXT: retq +; ; AVX2-LABEL: larger_mul: ; AVX2: # %bb.0: ; AVX2-NEXT: vpmovsxwd %xmm0, %ymm0 @@ -914,10 +1167,19 @@ define <8 x i32> @pmaddwd_16(<16 x i16> %A, <16 x i16> %B) { ; SSE2-NEXT: pmaddwd %xmm3, %xmm1 ; SSE2-NEXT: retq ; -; AVX-LABEL: pmaddwd_16: -; AVX: # %bb.0: -; AVX-NEXT: vpmaddwd %ymm1, %ymm0, %ymm0 -; AVX-NEXT: retq +; AVX1-LABEL: pmaddwd_16: +; AVX1: # %bb.0: +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 +; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3 +; AVX1-NEXT: vpmaddwd %xmm3, %xmm2, %xmm2 +; AVX1-NEXT: vpmaddwd %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 +; AVX1-NEXT: retq +; +; AVX256-LABEL: pmaddwd_16: +; AVX256: # %bb.0: +; AVX256-NEXT: vpmaddwd %ymm1, %ymm0, %ymm0 +; AVX256-NEXT: retq %a = sext <16 x i16> %A to <16 x i32> %b = sext <16 x i16> %B to <16 x i32> %m = mul nsw <16 x i32> %a, %b @@ -936,6 +1198,20 @@ define <16 x i32> @pmaddwd_32(<32 x i16> %A, <32 x i16> %B) { ; SSE2-NEXT: pmaddwd %xmm7, %xmm3 ; SSE2-NEXT: retq ; +; AVX1-LABEL: pmaddwd_32: +; AVX1: # %bb.0: +; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4 +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5 +; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm6 +; AVX1-NEXT: vpmaddwd %xmm6, %xmm4, %xmm4 +; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm6 +; AVX1-NEXT: vpmaddwd %xmm6, %xmm5, %xmm5 +; AVX1-NEXT: vpmaddwd %xmm2, %xmm0, %xmm0 +; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm0, %ymm0 +; AVX1-NEXT: vpmaddwd %xmm3, %xmm1, %xmm1 +; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm1, %ymm1 +; AVX1-NEXT: retq +; ; AVX2-LABEL: pmaddwd_32: ; AVX2: # %bb.0: ; AVX2-NEXT: vpmaddwd %ymm2, %ymm0, %ymm0 @@ -996,15 +1272,28 @@ define <4 x i32> @pmaddwd_negative1(<8 x i16> %A, <8 x i16> %B) { ; SSE2-NEXT: paddd %xmm2, %xmm0 ; SSE2-NEXT: retq ; -; AVX-LABEL: pmaddwd_negative1: -; AVX: # %bb.0: -; AVX-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero -; AVX-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero -; AVX-NEXT: vpmulld %ymm1, %ymm0, %ymm0 -; AVX-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX-NEXT: vphaddd %xmm1, %xmm0, %xmm0 -; AVX-NEXT: vzeroupper -; AVX-NEXT: retq +; AVX1-LABEL: pmaddwd_negative1: +; AVX1: # %bb.0: +; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[2,3,0,1] +; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero +; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero +; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm1[2,3,0,1] +; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero +; AVX1-NEXT: vpmulld %xmm3, %xmm2, %xmm2 +; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero +; AVX1-NEXT: vpmulld %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vphaddd %xmm2, %xmm0, %xmm0 +; AVX1-NEXT: retq +; +; AVX256-LABEL: pmaddwd_negative1: +; AVX256: # %bb.0: +; AVX256-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero +; AVX256-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero +; AVX256-NEXT: vpmulld %ymm1, %ymm0, %ymm0 +; AVX256-NEXT: vextracti128 $1, %ymm0, %xmm1 +; AVX256-NEXT: vphaddd %xmm1, %xmm0, %xmm0 +; AVX256-NEXT: vzeroupper +; AVX256-NEXT: retq %a = zext <8 x i16> %A to <8 x i32> %b = zext <8 x i16> %B to <8 x i32> %m = mul nuw <8 x i32> %a, %b @@ -1044,14 +1333,24 @@ define <4 x i32> @pmaddwd_negative2(<8 x i16> %A) { ; SSE2-NEXT: paddd %xmm1, %xmm0 ; SSE2-NEXT: retq ; -; AVX-LABEL: pmaddwd_negative2: -; AVX: # %bb.0: -; AVX-NEXT: vpmovsxwd %xmm0, %ymm0 -; AVX-NEXT: vpmulld {{.*}}(%rip), %ymm0, %ymm0 -; AVX-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX-NEXT: vphaddd %xmm1, %xmm0, %xmm0 -; AVX-NEXT: vzeroupper -; AVX-NEXT: retq +; AVX1-LABEL: pmaddwd_negative2: +; AVX1: # %bb.0: +; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] +; AVX1-NEXT: vpmovsxwd %xmm1, %xmm1 +; AVX1-NEXT: vpmovsxwd %xmm0, %xmm0 +; AVX1-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0 +; AVX1-NEXT: vpmulld {{.*}}(%rip), %xmm1, %xmm1 +; AVX1-NEXT: vphaddd %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: retq +; +; AVX256-LABEL: pmaddwd_negative2: +; AVX256: # %bb.0: +; AVX256-NEXT: vpmovsxwd %xmm0, %ymm0 +; AVX256-NEXT: vpmulld {{.*}}(%rip), %ymm0, %ymm0 +; AVX256-NEXT: vextracti128 $1, %ymm0, %xmm1 +; AVX256-NEXT: vphaddd %xmm1, %xmm0, %xmm0 +; AVX256-NEXT: vzeroupper +; AVX256-NEXT: retq %a = sext <8 x i16> %A to <8 x i32> %m = mul nsw <8 x i32> %a, <i32 32768, i32 -32768, i32 0, i32 0, i32 1, i32 7, i32 42, i32 32> %odd = shufflevector <8 x i32> %m, <8 x i32> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6> @@ -1086,10 +1385,19 @@ define <8 x i32> @jumbled_indices8(<16 x i16> %A, <16 x i16> %B) { ; SSE2-NEXT: pmaddwd %xmm3, %xmm1 ; SSE2-NEXT: retq ; -; AVX-LABEL: jumbled_indices8: -; AVX: # %bb.0: -; AVX-NEXT: vpmaddwd %ymm1, %ymm0, %ymm0 -; AVX-NEXT: retq +; AVX1-LABEL: jumbled_indices8: +; AVX1: # %bb.0: +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 +; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3 +; AVX1-NEXT: vpmaddwd %xmm3, %xmm2, %xmm2 +; AVX1-NEXT: vpmaddwd %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 +; AVX1-NEXT: retq +; +; AVX256-LABEL: jumbled_indices8: +; AVX256: # %bb.0: +; AVX256-NEXT: vpmaddwd %ymm1, %ymm0, %ymm0 +; AVX256-NEXT: retq %exta = sext <16 x i16> %A to <16 x i32> %extb = sext <16 x i16> %B to <16 x i32> %m = mul <16 x i32> %exta, %extb @@ -1108,6 +1416,20 @@ define <16 x i32> @jumbled_indices16(<32 x i16> %A, <32 x i16> %B) { ; SSE2-NEXT: pmaddwd %xmm7, %xmm3 ; SSE2-NEXT: retq ; +; AVX1-LABEL: jumbled_indices16: +; AVX1: # %bb.0: +; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4 +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5 +; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm6 +; AVX1-NEXT: vpmaddwd %xmm6, %xmm4, %xmm4 +; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm6 +; AVX1-NEXT: vpmaddwd %xmm6, %xmm5, %xmm5 +; AVX1-NEXT: vpmaddwd %xmm2, %xmm0, %xmm0 +; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm0, %ymm0 +; AVX1-NEXT: vpmaddwd %xmm3, %xmm1, %xmm1 +; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm1, %ymm1 +; AVX1-NEXT: retq +; ; AVX2-LABEL: jumbled_indices16: ; AVX2: # %bb.0: ; AVX2-NEXT: vpmaddwd %ymm2, %ymm0, %ymm0 @@ -1156,6 +1478,30 @@ define <32 x i32> @jumbled_indices32(<64 x i16> %A, <64 x i16> %B) { ; SSE2-NEXT: movq %rdi, %rax ; SSE2-NEXT: retq ; +; AVX1-LABEL: jumbled_indices32: +; AVX1: # %bb.0: +; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm8 +; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm9 +; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm10 +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm11 +; AVX1-NEXT: vextractf128 $1, %ymm7, %xmm12 +; AVX1-NEXT: vpmaddwd %xmm12, %xmm8, %xmm8 +; AVX1-NEXT: vextractf128 $1, %ymm6, %xmm12 +; AVX1-NEXT: vpmaddwd %xmm12, %xmm9, %xmm9 +; AVX1-NEXT: vextractf128 $1, %ymm5, %xmm12 +; AVX1-NEXT: vpmaddwd %xmm12, %xmm10, %xmm10 +; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm12 +; AVX1-NEXT: vpmaddwd %xmm12, %xmm11, %xmm11 +; AVX1-NEXT: vpmaddwd %xmm4, %xmm0, %xmm0 +; AVX1-NEXT: vinsertf128 $1, %xmm11, %ymm0, %ymm0 +; AVX1-NEXT: vpmaddwd %xmm5, %xmm1, %xmm1 +; AVX1-NEXT: vinsertf128 $1, %xmm10, %ymm1, %ymm1 +; AVX1-NEXT: vpmaddwd %xmm6, %xmm2, %xmm2 +; AVX1-NEXT: vinsertf128 $1, %xmm9, %ymm2, %ymm2 +; AVX1-NEXT: vpmaddwd %xmm7, %xmm3, %xmm3 +; AVX1-NEXT: vinsertf128 $1, %xmm8, %ymm3, %ymm3 +; AVX1-NEXT: retq +; ; AVX2-LABEL: jumbled_indices32: ; AVX2: # %bb.0: ; AVX2-NEXT: vpmaddwd %ymm4, %ymm0, %ymm0 |