; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=SSE2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX256,AVX2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=AVX,AVX256,AVX512,AVX512F ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw | FileCheck %s --check-prefixes=AVX,AVX256,AVX512,AVX512BW define i32 @_Z10test_shortPsS_i(i16* nocapture readonly, i16* nocapture readonly, i32) local_unnamed_addr #0 { ; SSE2-LABEL: _Z10test_shortPsS_i: ; SSE2: # %bb.0: # %entry ; SSE2-NEXT: movl %edx, %eax ; SSE2-NEXT: pxor %xmm0, %xmm0 ; SSE2-NEXT: xorl %ecx, %ecx ; SSE2-NEXT: pxor %xmm1, %xmm1 ; SSE2-NEXT: .p2align 4, 0x90 ; SSE2-NEXT: .LBB0_1: # %vector.body ; SSE2-NEXT: # =>This Inner Loop Header: Depth=1 ; SSE2-NEXT: movdqu (%rdi,%rcx,2), %xmm2 ; SSE2-NEXT: movdqu (%rsi,%rcx,2), %xmm3 ; SSE2-NEXT: pmaddwd %xmm2, %xmm3 ; SSE2-NEXT: paddd %xmm3, %xmm1 ; SSE2-NEXT: addq $8, %rcx ; SSE2-NEXT: cmpq %rcx, %rax ; SSE2-NEXT: jne .LBB0_1 ; SSE2-NEXT: # %bb.2: # %middle.block ; SSE2-NEXT: paddd %xmm0, %xmm1 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1] ; SSE2-NEXT: paddd %xmm1, %xmm0 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] ; SSE2-NEXT: paddd %xmm0, %xmm1 ; SSE2-NEXT: movd %xmm1, %eax ; SSE2-NEXT: retq ; ; AVX1-LABEL: _Z10test_shortPsS_i: ; AVX1: # %bb.0: # %entry ; AVX1-NEXT: movl %edx, %eax ; AVX1-NEXT: vpxor %xmm0, %xmm0, %xmm0 ; AVX1-NEXT: xorl %ecx, %ecx ; AVX1-NEXT: .p2align 4, 0x90 ; AVX1-NEXT: .LBB0_1: # %vector.body ; AVX1-NEXT: # =>This Inner Loop Header: Depth=1 ; AVX1-NEXT: vmovdqu (%rsi,%rcx,2), %xmm1 ; AVX1-NEXT: vpmaddwd (%rdi,%rcx,2), %xmm1, %xmm1 ; AVX1-NEXT: vpaddd %xmm0, %xmm1, %xmm1 ; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] ; AVX1-NEXT: addq $8, %rcx ; AVX1-NEXT: cmpq %rcx, %rax ; AVX1-NEXT: jne .LBB0_1 ; AVX1-NEXT: # %bb.2: # %middle.block ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 ; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] ; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] ; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vmovd %xmm0, %eax ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; ; AVX256-LABEL: _Z10test_shortPsS_i: ; AVX256: # %bb.0: # %entry ; AVX256-NEXT: movl %edx, %eax ; AVX256-NEXT: vpxor %xmm0, %xmm0, %xmm0 ; AVX256-NEXT: xorl %ecx, %ecx ; AVX256-NEXT: .p2align 4, 0x90 ; AVX256-NEXT: .LBB0_1: # %vector.body ; AVX256-NEXT: # =>This Inner Loop Header: Depth=1 ; AVX256-NEXT: vmovdqu (%rsi,%rcx,2), %xmm1 ; AVX256-NEXT: vpmaddwd (%rdi,%rcx,2), %xmm1, %xmm1 ; AVX256-NEXT: vpaddd %ymm0, %ymm1, %ymm0 ; AVX256-NEXT: addq $8, %rcx ; AVX256-NEXT: cmpq %rcx, %rax ; AVX256-NEXT: jne .LBB0_1 ; AVX256-NEXT: # %bb.2: # %middle.block ; AVX256-NEXT: vextracti128 $1, %ymm0, %xmm1 ; AVX256-NEXT: vpaddd %ymm1, %ymm0, %ymm0 ; AVX256-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] ; AVX256-NEXT: vpaddd %ymm1, %ymm0, %ymm0 ; AVX256-NEXT: vphaddd %ymm0, %ymm0, %ymm0 ; AVX256-NEXT: vmovd %xmm0, %eax ; AVX256-NEXT: vzeroupper ; AVX256-NEXT: retq entry: %3 = zext i32 %2 to i64 br label %vector.body vector.body: %index = phi i64 [ %index.next, %vector.body ], [ 0, %entry ] %vec.phi = phi <8 x i32> [ %11, %vector.body ], [ zeroinitializer, %entry ] %4 = getelementptr inbounds i16, i16* %0, i64 %index %5 = bitcast i16* %4 to <8 x i16>* %wide.load = load <8 x i16>, <8 x i16>* %5, align 2 %6 = sext <8 x i16> %wide.load to <8 x i32> %7 = getelementptr inbounds i16, i16* %1, i64 %index %8 = bitcast i16* %7 to <8 x i16>* %wide.load14 = load <8 x i16>, <8 x i16>* %8, align 2 %9 = sext <8 x i16> %wide.load14 to <8 x i32> %10 = mul nsw <8 x i32> %9, %6 %11 = add nsw <8 x i32> %10, %vec.phi %index.next = add i64 %index, 8 %12 = icmp eq i64 %index.next, %3 br i1 %12, label %middle.block, label %vector.body middle.block: %rdx.shuf = shufflevector <8 x i32> %11, <8 x i32> undef, <8 x i32> %bin.rdx = add <8 x i32> %11, %rdx.shuf %rdx.shuf15 = shufflevector <8 x i32> %bin.rdx, <8 x i32> undef, <8 x i32> %bin.rdx16 = add <8 x i32> %bin.rdx, %rdx.shuf15 %rdx.shuf17 = shufflevector <8 x i32> %bin.rdx16, <8 x i32> undef, <8 x i32> %bin.rdx18 = add <8 x i32> %bin.rdx16, %rdx.shuf17 %13 = extractelement <8 x i32> %bin.rdx18, i32 0 ret i32 %13 } define i32 @_Z10test_shortPsS_i_512(i16* nocapture readonly, i16* nocapture readonly, i32) local_unnamed_addr #0 { ; SSE2-LABEL: _Z10test_shortPsS_i_512: ; SSE2: # %bb.0: # %entry ; SSE2-NEXT: movl %edx, %eax ; SSE2-NEXT: pxor %xmm0, %xmm0 ; SSE2-NEXT: xorl %ecx, %ecx ; SSE2-NEXT: pxor %xmm1, %xmm1 ; SSE2-NEXT: pxor %xmm3, %xmm3 ; SSE2-NEXT: pxor %xmm2, %xmm2 ; SSE2-NEXT: .p2align 4, 0x90 ; SSE2-NEXT: .LBB1_1: # %vector.body ; SSE2-NEXT: # =>This Inner Loop Header: Depth=1 ; SSE2-NEXT: movdqu (%rdi,%rcx,2), %xmm4 ; SSE2-NEXT: movdqu 16(%rdi,%rcx,2), %xmm8 ; SSE2-NEXT: movdqu (%rsi,%rcx,2), %xmm6 ; SSE2-NEXT: movdqu 16(%rsi,%rcx,2), %xmm7 ; SSE2-NEXT: movdqa %xmm6, %xmm5 ; SSE2-NEXT: pmulhw %xmm4, %xmm5 ; SSE2-NEXT: pmullw %xmm4, %xmm6 ; SSE2-NEXT: movdqa %xmm6, %xmm4 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3] ; SSE2-NEXT: paddd %xmm4, %xmm0 ; SSE2-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm5[4],xmm6[5],xmm5[5],xmm6[6],xmm5[6],xmm6[7],xmm5[7] ; SSE2-NEXT: paddd %xmm6, %xmm1 ; SSE2-NEXT: movdqa %xmm7, %xmm4 ; SSE2-NEXT: pmulhw %xmm8, %xmm4 ; SSE2-NEXT: pmullw %xmm8, %xmm7 ; SSE2-NEXT: movdqa %xmm7, %xmm5 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3] ; SSE2-NEXT: paddd %xmm5, %xmm3 ; SSE2-NEXT: punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm4[4],xmm7[5],xmm4[5],xmm7[6],xmm4[6],xmm7[7],xmm4[7] ; SSE2-NEXT: paddd %xmm7, %xmm2 ; SSE2-NEXT: addq $16, %rcx ; SSE2-NEXT: cmpq %rcx, %rax ; SSE2-NEXT: jne .LBB1_1 ; SSE2-NEXT: # %bb.2: # %middle.block ; SSE2-NEXT: paddd %xmm3, %xmm0 ; SSE2-NEXT: paddd %xmm2, %xmm1 ; SSE2-NEXT: paddd %xmm0, %xmm1 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1] ; SSE2-NEXT: paddd %xmm1, %xmm0 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] ; SSE2-NEXT: paddd %xmm0, %xmm1 ; SSE2-NEXT: movd %xmm1, %eax ; SSE2-NEXT: retq ; ; AVX1-LABEL: _Z10test_shortPsS_i_512: ; AVX1: # %bb.0: # %entry ; AVX1-NEXT: movl %edx, %eax ; AVX1-NEXT: vpxor %xmm0, %xmm0, %xmm0 ; AVX1-NEXT: xorl %ecx, %ecx ; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX1-NEXT: .p2align 4, 0x90 ; AVX1-NEXT: .LBB1_1: # %vector.body ; AVX1-NEXT: # =>This Inner Loop Header: Depth=1 ; AVX1-NEXT: vmovdqu (%rdi,%rcx,2), %ymm2 ; AVX1-NEXT: vmovdqu (%rsi,%rcx,2), %ymm3 ; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4 ; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm5 ; AVX1-NEXT: vpmaddwd %xmm4, %xmm5, %xmm4 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5 ; AVX1-NEXT: vpaddd %xmm5, %xmm4, %xmm4 ; AVX1-NEXT: vpmaddwd %xmm2, %xmm3, %xmm2 ; AVX1-NEXT: vpaddd %xmm1, %xmm2, %xmm1 ; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm1, %ymm1 ; AVX1-NEXT: addq $16, %rcx ; AVX1-NEXT: cmpq %rcx, %rax ; AVX1-NEXT: jne .LBB1_1 ; AVX1-NEXT: # %bb.2: # %middle.block ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3 ; AVX1-NEXT: vpaddd %xmm3, %xmm2, %xmm2 ; AVX1-NEXT: vpaddd %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: vpaddd %xmm0, %xmm1, %xmm0 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] ; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] ; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vmovd %xmm0, %eax ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; ; AVX2-LABEL: _Z10test_shortPsS_i_512: ; AVX2: # %bb.0: # %entry ; AVX2-NEXT: movl %edx, %eax ; AVX2-NEXT: vpxor %xmm0, %xmm0, %xmm0 ; AVX2-NEXT: xorl %ecx, %ecx ; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX2-NEXT: .p2align 4, 0x90 ; AVX2-NEXT: .LBB1_1: # %vector.body ; AVX2-NEXT: # =>This Inner Loop Header: Depth=1 ; AVX2-NEXT: vmovdqu (%rsi,%rcx,2), %ymm2 ; AVX2-NEXT: vpmaddwd (%rdi,%rcx,2), %ymm2, %ymm2 ; AVX2-NEXT: vpaddd %ymm1, %ymm2, %ymm1 ; AVX2-NEXT: addq $16, %rcx ; AVX2-NEXT: cmpq %rcx, %rax ; AVX2-NEXT: jne .LBB1_1 ; AVX2-NEXT: # %bb.2: # %middle.block ; AVX2-NEXT: vpaddd %ymm0, %ymm1, %ymm0 ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 ; AVX2-NEXT: vpaddd %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] ; AVX2-NEXT: vpaddd %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: vphaddd %ymm0, %ymm0, %ymm0 ; AVX2-NEXT: vmovd %xmm0, %eax ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; ; AVX512-LABEL: _Z10test_shortPsS_i_512: ; AVX512: # %bb.0: # %entry ; AVX512-NEXT: movl %edx, %eax ; AVX512-NEXT: vpxor %xmm0, %xmm0, %xmm0 ; AVX512-NEXT: xorl %ecx, %ecx ; AVX512-NEXT: .p2align 4, 0x90 ; AVX512-NEXT: .LBB1_1: # %vector.body ; AVX512-NEXT: # =>This Inner Loop Header: Depth=1 ; AVX512-NEXT: vmovdqu (%rsi,%rcx,2), %ymm1 ; AVX512-NEXT: vpmaddwd (%rdi,%rcx,2), %ymm1, %ymm1 ; AVX512-NEXT: vpaddd %zmm0, %zmm1, %zmm0 ; AVX512-NEXT: addq $16, %rcx ; AVX512-NEXT: cmpq %rcx, %rax ; AVX512-NEXT: jne .LBB1_1 ; AVX512-NEXT: # %bb.2: # %middle.block ; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 ; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] ; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] ; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vmovd %xmm0, %eax ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq entry: %3 = zext i32 %2 to i64 br label %vector.body vector.body: %index = phi i64 [ %index.next, %vector.body ], [ 0, %entry ] %vec.phi = phi <16 x i32> [ %11, %vector.body ], [ zeroinitializer, %entry ] %4 = getelementptr inbounds i16, i16* %0, i64 %index %5 = bitcast i16* %4 to <16 x i16>* %wide.load = load <16 x i16>, <16 x i16>* %5, align 2 %6 = sext <16 x i16> %wide.load to <16 x i32> %7 = getelementptr inbounds i16, i16* %1, i64 %index %8 = bitcast i16* %7 to <16 x i16>* %wide.load14 = load <16 x i16>, <16 x i16>* %8, align 2 %9 = sext <16 x i16> %wide.load14 to <16 x i32> %10 = mul nsw <16 x i32> %9, %6 %11 = add nsw <16 x i32> %10, %vec.phi %index.next = add i64 %index, 16 %12 = icmp eq i64 %index.next, %3 br i1 %12, label %middle.block, label %vector.body middle.block: %rdx.shuf1 = shufflevector <16 x i32> %11, <16 x i32> undef, <16 x i32> %bin.rdx1 = add <16 x i32> %11, %rdx.shuf1 %rdx.shuf = shufflevector <16 x i32> %bin.rdx1, <16 x i32> undef, <16 x i32> %bin.rdx = add <16 x i32> %bin.rdx1, %rdx.shuf %rdx.shuf15 = shufflevector <16 x i32> %bin.rdx, <16 x i32> undef, <16 x i32> %bin.rdx16 = add <16 x i32> %bin.rdx, %rdx.shuf15 %rdx.shuf17 = shufflevector <16 x i32> %bin.rdx16, <16 x i32> undef, <16 x i32> %bin.rdx18 = add <16 x i32> %bin.rdx16, %rdx.shuf17 %13 = extractelement <16 x i32> %bin.rdx18, i32 0 ret i32 %13 } define i32 @test_unsigned_short(i16* nocapture readonly, i16* nocapture readonly, i32) local_unnamed_addr #0 { ; SSE2-LABEL: test_unsigned_short: ; SSE2: # %bb.0: # %entry ; SSE2-NEXT: movl %edx, %eax ; SSE2-NEXT: pxor %xmm0, %xmm0 ; SSE2-NEXT: xorl %ecx, %ecx ; SSE2-NEXT: pxor %xmm1, %xmm1 ; SSE2-NEXT: .p2align 4, 0x90 ; SSE2-NEXT: .LBB2_1: # %vector.body ; SSE2-NEXT: # =>This Inner Loop Header: Depth=1 ; SSE2-NEXT: movdqu (%rdi,%rcx,2), %xmm2 ; SSE2-NEXT: movdqu (%rsi,%rcx,2), %xmm3 ; SSE2-NEXT: movdqa %xmm3, %xmm4 ; SSE2-NEXT: pmulhuw %xmm2, %xmm4 ; SSE2-NEXT: pmullw %xmm2, %xmm3 ; SSE2-NEXT: movdqa %xmm3, %xmm2 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3] ; SSE2-NEXT: paddd %xmm2, %xmm0 ; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7] ; SSE2-NEXT: paddd %xmm3, %xmm1 ; SSE2-NEXT: addq $8, %rcx ; SSE2-NEXT: cmpq %rcx, %rax ; SSE2-NEXT: jne .LBB2_1 ; SSE2-NEXT: # %bb.2: # %middle.block ; SSE2-NEXT: paddd %xmm1, %xmm0 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] ; SSE2-NEXT: paddd %xmm0, %xmm1 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3] ; SSE2-NEXT: paddd %xmm1, %xmm0 ; SSE2-NEXT: movd %xmm0, %eax ; SSE2-NEXT: retq ; ; AVX1-LABEL: test_unsigned_short: ; AVX1: # %bb.0: # %entry ; AVX1-NEXT: movl %edx, %eax ; AVX1-NEXT: vpxor %xmm0, %xmm0, %xmm0 ; AVX1-NEXT: xorl %ecx, %ecx ; AVX1-NEXT: .p2align 4, 0x90 ; AVX1-NEXT: .LBB2_1: # %vector.body ; AVX1-NEXT: # =>This Inner Loop Header: Depth=1 ; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero ; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero ; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm3 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero ; AVX1-NEXT: vpmulld %xmm1, %xmm3, %xmm1 ; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm3 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero ; AVX1-NEXT: vpmulld %xmm2, %xmm3, %xmm2 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3 ; AVX1-NEXT: vpaddd %xmm3, %xmm1, %xmm1 ; AVX1-NEXT: vpaddd %xmm0, %xmm2, %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX1-NEXT: addq $8, %rcx ; AVX1-NEXT: cmpq %rcx, %rax ; AVX1-NEXT: jne .LBB2_1 ; AVX1-NEXT: # %bb.2: # %middle.block ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 ; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] ; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] ; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vmovd %xmm0, %eax ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; ; AVX256-LABEL: test_unsigned_short: ; AVX256: # %bb.0: # %entry ; AVX256-NEXT: movl %edx, %eax ; AVX256-NEXT: vpxor %xmm0, %xmm0, %xmm0 ; AVX256-NEXT: xorl %ecx, %ecx ; AVX256-NEXT: .p2align 4, 0x90 ; AVX256-NEXT: .LBB2_1: # %vector.body ; AVX256-NEXT: # =>This Inner Loop Header: Depth=1 ; AVX256-NEXT: vpmovzxwd {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero ; AVX256-NEXT: vpmovzxwd {{.*#+}} ymm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero ; AVX256-NEXT: vpmulld %ymm1, %ymm2, %ymm1 ; AVX256-NEXT: vpaddd %ymm0, %ymm1, %ymm0 ; AVX256-NEXT: addq $8, %rcx ; AVX256-NEXT: cmpq %rcx, %rax ; AVX256-NEXT: jne .LBB2_1 ; AVX256-NEXT: # %bb.2: # %middle.block ; AVX256-NEXT: vextracti128 $1, %ymm0, %xmm1 ; AVX256-NEXT: vpaddd %ymm1, %ymm0, %ymm0 ; AVX256-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] ; AVX256-NEXT: vpaddd %ymm1, %ymm0, %ymm0 ; AVX256-NEXT: vphaddd %ymm0, %ymm0, %ymm0 ; AVX256-NEXT: vmovd %xmm0, %eax ; AVX256-NEXT: vzeroupper ; AVX256-NEXT: retq entry: %3 = zext i32 %2 to i64 br label %vector.body vector.body: %index = phi i64 [ %index.next, %vector.body ], [ 0, %entry ] %vec.phi = phi <8 x i32> [ %11, %vector.body ], [ zeroinitializer, %entry ] %4 = getelementptr inbounds i16, i16* %0, i64 %index %5 = bitcast i16* %4 to <8 x i16>* %wide.load = load <8 x i16>, <8 x i16>* %5, align 2 %6 = zext <8 x i16> %wide.load to <8 x i32> %7 = getelementptr inbounds i16, i16* %1, i64 %index %8 = bitcast i16* %7 to <8 x i16>* %wide.load14 = load <8 x i16>, <8 x i16>* %8, align 2 %9 = zext <8 x i16> %wide.load14 to <8 x i32> %10 = mul nsw <8 x i32> %9, %6 %11 = add nsw <8 x i32> %10, %vec.phi %index.next = add i64 %index, 8 %12 = icmp eq i64 %index.next, %3 br i1 %12, label %middle.block, label %vector.body middle.block: %rdx.shuf = shufflevector <8 x i32> %11, <8 x i32> undef, <8 x i32> %bin.rdx = add <8 x i32> %11, %rdx.shuf %rdx.shuf15 = shufflevector <8 x i32> %bin.rdx, <8 x i32> undef, <8 x i32> %bin.rdx16 = add <8 x i32> %bin.rdx, %rdx.shuf15 %rdx.shuf17 = shufflevector <8 x i32> %bin.rdx16, <8 x i32> undef, <8 x i32> %bin.rdx18 = add <8 x i32> %bin.rdx16, %rdx.shuf17 %13 = extractelement <8 x i32> %bin.rdx18, i32 0 ret i32 %13 } define i32 @test_unsigned_short_512(i16* nocapture readonly, i16* nocapture readonly, i32) local_unnamed_addr #0 { ; SSE2-LABEL: test_unsigned_short_512: ; SSE2: # %bb.0: # %entry ; SSE2-NEXT: movl %edx, %eax ; SSE2-NEXT: pxor %xmm0, %xmm0 ; SSE2-NEXT: xorl %ecx, %ecx ; SSE2-NEXT: pxor %xmm1, %xmm1 ; SSE2-NEXT: pxor %xmm3, %xmm3 ; SSE2-NEXT: pxor %xmm2, %xmm2 ; SSE2-NEXT: .p2align 4, 0x90 ; SSE2-NEXT: .LBB3_1: # %vector.body ; SSE2-NEXT: # =>This Inner Loop Header: Depth=1 ; SSE2-NEXT: movdqu (%rdi,%rcx,2), %xmm4 ; SSE2-NEXT: movdqu 16(%rdi,%rcx,2), %xmm8 ; SSE2-NEXT: movdqu (%rsi,%rcx,2), %xmm6 ; SSE2-NEXT: movdqu 16(%rsi,%rcx,2), %xmm7 ; SSE2-NEXT: movdqa %xmm6, %xmm5 ; SSE2-NEXT: pmulhuw %xmm4, %xmm5 ; SSE2-NEXT: pmullw %xmm4, %xmm6 ; SSE2-NEXT: movdqa %xmm6, %xmm4 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3] ; SSE2-NEXT: paddd %xmm4, %xmm0 ; SSE2-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm5[4],xmm6[5],xmm5[5],xmm6[6],xmm5[6],xmm6[7],xmm5[7] ; SSE2-NEXT: paddd %xmm6, %xmm1 ; SSE2-NEXT: movdqa %xmm7, %xmm4 ; SSE2-NEXT: pmulhuw %xmm8, %xmm4 ; SSE2-NEXT: pmullw %xmm8, %xmm7 ; SSE2-NEXT: movdqa %xmm7, %xmm5 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3] ; SSE2-NEXT: paddd %xmm5, %xmm3 ; SSE2-NEXT: punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm4[4],xmm7[5],xmm4[5],xmm7[6],xmm4[6],xmm7[7],xmm4[7] ; SSE2-NEXT: paddd %xmm7, %xmm2 ; SSE2-NEXT: addq $16, %rcx ; SSE2-NEXT: cmpq %rcx, %rax ; SSE2-NEXT: jne .LBB3_1 ; SSE2-NEXT: # %bb.2: # %middle.block ; SSE2-NEXT: paddd %xmm3, %xmm0 ; SSE2-NEXT: paddd %xmm2, %xmm1 ; SSE2-NEXT: paddd %xmm0, %xmm1 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1] ; SSE2-NEXT: paddd %xmm1, %xmm0 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] ; SSE2-NEXT: paddd %xmm0, %xmm1 ; SSE2-NEXT: movd %xmm1, %eax ; SSE2-NEXT: retq ; ; AVX1-LABEL: test_unsigned_short_512: ; AVX1: # %bb.0: # %entry ; AVX1-NEXT: movl %edx, %eax ; AVX1-NEXT: vpxor %xmm0, %xmm0, %xmm0 ; AVX1-NEXT: xorl %ecx, %ecx ; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX1-NEXT: .p2align 4, 0x90 ; AVX1-NEXT: .LBB3_1: # %vector.body ; AVX1-NEXT: # =>This Inner Loop Header: Depth=1 ; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero ; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm3 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero ; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm4 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero ; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm5 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero ; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm6 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero ; AVX1-NEXT: vpmulld %xmm2, %xmm6, %xmm2 ; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm6 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero ; AVX1-NEXT: vpmulld %xmm3, %xmm6, %xmm3 ; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm6 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero ; AVX1-NEXT: vpmulld %xmm4, %xmm6, %xmm4 ; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm6 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero ; AVX1-NEXT: vpmulld %xmm5, %xmm6, %xmm5 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm6 ; AVX1-NEXT: vpaddd %xmm6, %xmm2, %xmm2 ; AVX1-NEXT: vpaddd %xmm1, %xmm3, %xmm1 ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 ; AVX1-NEXT: vpaddd %xmm2, %xmm4, %xmm2 ; AVX1-NEXT: vpaddd %xmm0, %xmm5, %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 ; AVX1-NEXT: addq $16, %rcx ; AVX1-NEXT: cmpq %rcx, %rax ; AVX1-NEXT: jne .LBB3_1 ; AVX1-NEXT: # %bb.2: # %middle.block ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3 ; AVX1-NEXT: vpaddd %xmm3, %xmm2, %xmm2 ; AVX1-NEXT: vpaddd %xmm2, %xmm1, %xmm1 ; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] ; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] ; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vmovd %xmm0, %eax ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; ; AVX2-LABEL: test_unsigned_short_512: ; AVX2: # %bb.0: # %entry ; AVX2-NEXT: movl %edx, %eax ; AVX2-NEXT: vpxor %xmm0, %xmm0, %xmm0 ; AVX2-NEXT: xorl %ecx, %ecx ; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX2-NEXT: .p2align 4, 0x90 ; AVX2-NEXT: .LBB3_1: # %vector.body ; AVX2-NEXT: # =>This Inner Loop Header: Depth=1 ; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero ; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm3 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero ; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm4 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero ; AVX2-NEXT: vpmulld %ymm2, %ymm4, %ymm2 ; AVX2-NEXT: vpaddd %ymm1, %ymm2, %ymm1 ; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero ; AVX2-NEXT: vpmulld %ymm3, %ymm2, %ymm2 ; AVX2-NEXT: vpaddd %ymm0, %ymm2, %ymm0 ; AVX2-NEXT: addq $16, %rcx ; AVX2-NEXT: cmpq %rcx, %rax ; AVX2-NEXT: jne .LBB3_1 ; AVX2-NEXT: # %bb.2: # %middle.block ; AVX2-NEXT: vpaddd %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 ; AVX2-NEXT: vpaddd %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] ; AVX2-NEXT: vpaddd %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: vphaddd %ymm0, %ymm0, %ymm0 ; AVX2-NEXT: vmovd %xmm0, %eax ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; ; AVX512-LABEL: test_unsigned_short_512: ; AVX512: # %bb.0: # %entry ; AVX512-NEXT: movl %edx, %eax ; AVX512-NEXT: vpxor %xmm0, %xmm0, %xmm0 ; AVX512-NEXT: xorl %ecx, %ecx ; AVX512-NEXT: .p2align 4, 0x90 ; AVX512-NEXT: .LBB3_1: # %vector.body ; AVX512-NEXT: # =>This Inner Loop Header: Depth=1 ; AVX512-NEXT: vpmovzxwd {{.*#+}} zmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero ; AVX512-NEXT: vpmovzxwd {{.*#+}} zmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero ; AVX512-NEXT: vpmulld %zmm1, %zmm2, %zmm1 ; AVX512-NEXT: vpaddd %zmm0, %zmm1, %zmm0 ; AVX512-NEXT: addq $16, %rcx ; AVX512-NEXT: cmpq %rcx, %rax ; AVX512-NEXT: jne .LBB3_1 ; AVX512-NEXT: # %bb.2: # %middle.block ; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 ; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] ; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] ; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vmovd %xmm0, %eax ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq entry: %3 = zext i32 %2 to i64 br label %vector.body vector.body: %index = phi i64 [ %index.next, %vector.body ], [ 0, %entry ] %vec.phi = phi <16 x i32> [ %11, %vector.body ], [ zeroinitializer, %entry ] %4 = getelementptr inbounds i16, i16* %0, i64 %index %5 = bitcast i16* %4 to <16 x i16>* %wide.load = load <16 x i16>, <16 x i16>* %5, align 2 %6 = zext <16 x i16> %wide.load to <16 x i32> %7 = getelementptr inbounds i16, i16* %1, i64 %index %8 = bitcast i16* %7 to <16 x i16>* %wide.load14 = load <16 x i16>, <16 x i16>* %8, align 2 %9 = zext <16 x i16> %wide.load14 to <16 x i32> %10 = mul nsw <16 x i32> %9, %6 %11 = add nsw <16 x i32> %10, %vec.phi %index.next = add i64 %index, 16 %12 = icmp eq i64 %index.next, %3 br i1 %12, label %middle.block, label %vector.body middle.block: %rdx.shuf1 = shufflevector <16 x i32> %11, <16 x i32> undef, <16 x i32> %bin.rdx1 = add <16 x i32> %11, %rdx.shuf1 %rdx.shuf = shufflevector <16 x i32> %bin.rdx1, <16 x i32> undef, <16 x i32> %bin.rdx = add <16 x i32> %bin.rdx1, %rdx.shuf %rdx.shuf15 = shufflevector <16 x i32> %bin.rdx, <16 x i32> undef, <16 x i32> %bin.rdx16 = add <16 x i32> %bin.rdx, %rdx.shuf15 %rdx.shuf17 = shufflevector <16 x i32> %bin.rdx16, <16 x i32> undef, <16 x i32> %bin.rdx18 = add <16 x i32> %bin.rdx16, %rdx.shuf17 %13 = extractelement <16 x i32> %bin.rdx18, i32 0 ret i32 %13 } define i32 @_Z9test_charPcS_i(i8* nocapture readonly, i8* nocapture readonly, i32) local_unnamed_addr #0 { ; SSE2-LABEL: _Z9test_charPcS_i: ; SSE2: # %bb.0: # %entry ; SSE2-NEXT: movl %edx, %eax ; SSE2-NEXT: pxor %xmm0, %xmm0 ; SSE2-NEXT: xorl %ecx, %ecx ; SSE2-NEXT: pxor %xmm1, %xmm1 ; SSE2-NEXT: pxor %xmm3, %xmm3 ; SSE2-NEXT: pxor %xmm2, %xmm2 ; SSE2-NEXT: .p2align 4, 0x90 ; SSE2-NEXT: .LBB4_1: # %vector.body ; SSE2-NEXT: # =>This Inner Loop Header: Depth=1 ; SSE2-NEXT: movq {{.*#+}} xmm4 = mem[0],zero ; SSE2-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSE2-NEXT: psraw $8, %xmm4 ; SSE2-NEXT: movq {{.*#+}} xmm5 = mem[0],zero ; SSE2-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSE2-NEXT: psraw $8, %xmm5 ; SSE2-NEXT: pmullw %xmm4, %xmm5 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3] ; SSE2-NEXT: psrad $16, %xmm4 ; SSE2-NEXT: paddd %xmm4, %xmm0 ; SSE2-NEXT: punpckhwd {{.*#+}} xmm4 = xmm4[4],xmm5[4],xmm4[5],xmm5[5],xmm4[6],xmm5[6],xmm4[7],xmm5[7] ; SSE2-NEXT: psrad $16, %xmm4 ; SSE2-NEXT: paddd %xmm4, %xmm1 ; SSE2-NEXT: movq {{.*#+}} xmm4 = mem[0],zero ; SSE2-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSE2-NEXT: psraw $8, %xmm4 ; SSE2-NEXT: movq {{.*#+}} xmm5 = mem[0],zero ; SSE2-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSE2-NEXT: psraw $8, %xmm5 ; SSE2-NEXT: pmullw %xmm4, %xmm5 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3] ; SSE2-NEXT: psrad $16, %xmm4 ; SSE2-NEXT: paddd %xmm4, %xmm3 ; SSE2-NEXT: punpckhwd {{.*#+}} xmm4 = xmm4[4],xmm5[4],xmm4[5],xmm5[5],xmm4[6],xmm5[6],xmm4[7],xmm5[7] ; SSE2-NEXT: psrad $16, %xmm4 ; SSE2-NEXT: paddd %xmm4, %xmm2 ; SSE2-NEXT: addq $16, %rcx ; SSE2-NEXT: cmpq %rcx, %rax ; SSE2-NEXT: jne .LBB4_1 ; SSE2-NEXT: # %bb.2: # %middle.block ; SSE2-NEXT: paddd %xmm3, %xmm0 ; SSE2-NEXT: paddd %xmm2, %xmm1 ; SSE2-NEXT: paddd %xmm0, %xmm1 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1] ; SSE2-NEXT: paddd %xmm1, %xmm0 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] ; SSE2-NEXT: paddd %xmm0, %xmm1 ; SSE2-NEXT: movd %xmm1, %eax ; SSE2-NEXT: retq ; ; AVX1-LABEL: _Z9test_charPcS_i: ; AVX1: # %bb.0: # %entry ; AVX1-NEXT: movl %edx, %eax ; AVX1-NEXT: vpxor %xmm0, %xmm0, %xmm0 ; AVX1-NEXT: xorl %ecx, %ecx ; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX1-NEXT: .p2align 4, 0x90 ; AVX1-NEXT: .LBB4_1: # %vector.body ; AVX1-NEXT: # =>This Inner Loop Header: Depth=1 ; AVX1-NEXT: vpmovsxbw 8(%rdi,%rcx), %xmm2 ; AVX1-NEXT: vpmovsxbw 8(%rsi,%rcx), %xmm3 ; AVX1-NEXT: vpmaddwd %xmm2, %xmm3, %xmm2 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3 ; AVX1-NEXT: vpaddd %xmm3, %xmm2, %xmm2 ; AVX1-NEXT: vpmovsxbw (%rdi,%rcx), %xmm3 ; AVX1-NEXT: vpmovsxbw (%rsi,%rcx), %xmm4 ; AVX1-NEXT: vpmaddwd %xmm3, %xmm4, %xmm3 ; AVX1-NEXT: vpaddd %xmm1, %xmm3, %xmm1 ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 ; AVX1-NEXT: addq $16, %rcx ; AVX1-NEXT: cmpq %rcx, %rax ; AVX1-NEXT: jne .LBB4_1 ; AVX1-NEXT: # %bb.2: # %middle.block ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3 ; AVX1-NEXT: vpaddd %xmm3, %xmm2, %xmm2 ; AVX1-NEXT: vpaddd %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: vpaddd %xmm0, %xmm1, %xmm0 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] ; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] ; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vmovd %xmm0, %eax ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; ; AVX2-LABEL: _Z9test_charPcS_i: ; AVX2: # %bb.0: # %entry ; AVX2-NEXT: movl %edx, %eax ; AVX2-NEXT: vpxor %xmm0, %xmm0, %xmm0 ; AVX2-NEXT: xorl %ecx, %ecx ; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX2-NEXT: .p2align 4, 0x90 ; AVX2-NEXT: .LBB4_1: # %vector.body ; AVX2-NEXT: # =>This Inner Loop Header: Depth=1 ; AVX2-NEXT: vpmovsxbw (%rdi,%rcx), %ymm2 ; AVX2-NEXT: vpmovsxbw (%rsi,%rcx), %ymm3 ; AVX2-NEXT: vpmaddwd %ymm2, %ymm3, %ymm2 ; AVX2-NEXT: vpaddd %ymm1, %ymm2, %ymm1 ; AVX2-NEXT: addq $16, %rcx ; AVX2-NEXT: cmpq %rcx, %rax ; AVX2-NEXT: jne .LBB4_1 ; AVX2-NEXT: # %bb.2: # %middle.block ; AVX2-NEXT: vpaddd %ymm0, %ymm1, %ymm0 ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 ; AVX2-NEXT: vpaddd %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] ; AVX2-NEXT: vpaddd %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: vphaddd %ymm0, %ymm0, %ymm0 ; AVX2-NEXT: vmovd %xmm0, %eax ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; ; AVX512-LABEL: _Z9test_charPcS_i: ; AVX512: # %bb.0: # %entry ; AVX512-NEXT: movl %edx, %eax ; AVX512-NEXT: vpxor %xmm0, %xmm0, %xmm0 ; AVX512-NEXT: xorl %ecx, %ecx ; AVX512-NEXT: .p2align 4, 0x90 ; AVX512-NEXT: .LBB4_1: # %vector.body ; AVX512-NEXT: # =>This Inner Loop Header: Depth=1 ; AVX512-NEXT: vpmovsxbw (%rdi,%rcx), %ymm1 ; AVX512-NEXT: vpmovsxbw (%rsi,%rcx), %ymm2 ; AVX512-NEXT: vpmaddwd %ymm1, %ymm2, %ymm1 ; AVX512-NEXT: vpaddd %zmm0, %zmm1, %zmm0 ; AVX512-NEXT: addq $16, %rcx ; AVX512-NEXT: cmpq %rcx, %rax ; AVX512-NEXT: jne .LBB4_1 ; AVX512-NEXT: # %bb.2: # %middle.block ; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 ; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] ; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] ; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vmovd %xmm0, %eax ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq entry: %3 = zext i32 %2 to i64 br label %vector.body vector.body: %index = phi i64 [ %index.next, %vector.body ], [ 0, %entry ] %vec.phi = phi <16 x i32> [ %11, %vector.body ], [ zeroinitializer, %entry ] %4 = getelementptr inbounds i8, i8* %0, i64 %index %5 = bitcast i8* %4 to <16 x i8>* %wide.load = load <16 x i8>, <16 x i8>* %5, align 1 %6 = sext <16 x i8> %wide.load to <16 x i32> %7 = getelementptr inbounds i8, i8* %1, i64 %index %8 = bitcast i8* %7 to <16 x i8>* %wide.load14 = load <16 x i8>, <16 x i8>* %8, align 1 %9 = sext <16 x i8> %wide.load14 to <16 x i32> %10 = mul nsw <16 x i32> %9, %6 %11 = add nsw <16 x i32> %10, %vec.phi %index.next = add i64 %index, 16 %12 = icmp eq i64 %index.next, %3 br i1 %12, label %middle.block, label %vector.body middle.block: %rdx.shuf = shufflevector <16 x i32> %11, <16 x i32> undef, <16 x i32> %bin.rdx = add <16 x i32> %11, %rdx.shuf %rdx.shuf15 = shufflevector <16 x i32> %bin.rdx, <16 x i32> undef, <16 x i32> %bin.rdx16 = add <16 x i32> %bin.rdx, %rdx.shuf15 %rdx.shuf17 = shufflevector <16 x i32> %bin.rdx16, <16 x i32> undef, <16 x i32> %bin.rdx18 = add <16 x i32> %bin.rdx16, %rdx.shuf17 %rdx.shuf19 = shufflevector <16 x i32> %bin.rdx18, <16 x i32> undef, <16 x i32> %bin.rdx20 = add <16 x i32> %bin.rdx18, %rdx.shuf19 %13 = extractelement <16 x i32> %bin.rdx20, i32 0 ret i32 %13 } define i32 @_Z9test_charPcS_i_512(i8* nocapture readonly, i8* nocapture readonly, i32) local_unnamed_addr #0 { ; SSE2-LABEL: _Z9test_charPcS_i_512: ; SSE2: # %bb.0: # %entry ; SSE2-NEXT: movl %edx, %eax ; SSE2-NEXT: pxor %xmm8, %xmm8 ; SSE2-NEXT: xorl %ecx, %ecx ; SSE2-NEXT: pxor %xmm3, %xmm3 ; SSE2-NEXT: pxor %xmm9, %xmm9 ; SSE2-NEXT: pxor %xmm10, %xmm10 ; SSE2-NEXT: pxor %xmm4, %xmm4 ; SSE2-NEXT: pxor %xmm6, %xmm6 ; SSE2-NEXT: pxor %xmm5, %xmm5 ; SSE2-NEXT: pxor %xmm7, %xmm7 ; SSE2-NEXT: .p2align 4, 0x90 ; SSE2-NEXT: .LBB5_1: # %vector.body ; SSE2-NEXT: # =>This Inner Loop Header: Depth=1 ; SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero ; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] ; SSE2-NEXT: psraw $8, %xmm1 ; SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSE2-NEXT: psraw $8, %xmm0 ; SSE2-NEXT: pmullw %xmm1, %xmm0 ; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] ; SSE2-NEXT: psrad $16, %xmm1 ; SSE2-NEXT: paddd %xmm1, %xmm7 ; SSE2-NEXT: movq {{.*#+}} xmm1 = mem[0],zero ; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSE2-NEXT: psraw $8, %xmm1 ; SSE2-NEXT: movq {{.*#+}} xmm2 = mem[0],zero ; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSE2-NEXT: psraw $8, %xmm2 ; SSE2-NEXT: pmullw %xmm1, %xmm2 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3] ; SSE2-NEXT: psrad $16, %xmm1 ; SSE2-NEXT: paddd %xmm1, %xmm8 ; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7] ; SSE2-NEXT: psrad $16, %xmm1 ; SSE2-NEXT: paddd %xmm1, %xmm3 ; SSE2-NEXT: movq {{.*#+}} xmm1 = mem[0],zero ; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSE2-NEXT: psraw $8, %xmm1 ; SSE2-NEXT: movq {{.*#+}} xmm2 = mem[0],zero ; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSE2-NEXT: psraw $8, %xmm2 ; SSE2-NEXT: pmullw %xmm1, %xmm2 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3] ; SSE2-NEXT: psrad $16, %xmm1 ; SSE2-NEXT: paddd %xmm1, %xmm9 ; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7] ; SSE2-NEXT: psrad $16, %xmm1 ; SSE2-NEXT: paddd %xmm1, %xmm10 ; SSE2-NEXT: movq {{.*#+}} xmm1 = mem[0],zero ; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSE2-NEXT: psraw $8, %xmm1 ; SSE2-NEXT: movq {{.*#+}} xmm2 = mem[0],zero ; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSE2-NEXT: psraw $8, %xmm2 ; SSE2-NEXT: pmullw %xmm1, %xmm2 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3] ; SSE2-NEXT: psrad $16, %xmm1 ; SSE2-NEXT: paddd %xmm1, %xmm4 ; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7] ; SSE2-NEXT: psrad $16, %xmm1 ; SSE2-NEXT: paddd %xmm1, %xmm6 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3] ; SSE2-NEXT: psrad $16, %xmm0 ; SSE2-NEXT: paddd %xmm0, %xmm5 ; SSE2-NEXT: addq $32, %rcx ; SSE2-NEXT: cmpq %rcx, %rax ; SSE2-NEXT: jne .LBB5_1 ; SSE2-NEXT: # %bb.2: # %middle.block ; SSE2-NEXT: paddd %xmm6, %xmm3 ; SSE2-NEXT: paddd %xmm7, %xmm10 ; SSE2-NEXT: paddd %xmm3, %xmm10 ; SSE2-NEXT: paddd %xmm4, %xmm8 ; SSE2-NEXT: paddd %xmm5, %xmm9 ; SSE2-NEXT: paddd %xmm10, %xmm9 ; SSE2-NEXT: paddd %xmm8, %xmm9 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm9[2,3,0,1] ; SSE2-NEXT: paddd %xmm9, %xmm0 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] ; SSE2-NEXT: paddd %xmm0, %xmm1 ; SSE2-NEXT: movd %xmm1, %eax ; SSE2-NEXT: retq ; ; AVX1-LABEL: _Z9test_charPcS_i_512: ; AVX1: # %bb.0: # %entry ; AVX1-NEXT: movl %edx, %eax ; AVX1-NEXT: vpxor %xmm0, %xmm0, %xmm0 ; AVX1-NEXT: xorl %ecx, %ecx ; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2 ; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3 ; AVX1-NEXT: .p2align 4, 0x90 ; AVX1-NEXT: .LBB5_1: # %vector.body ; AVX1-NEXT: # =>This Inner Loop Header: Depth=1 ; AVX1-NEXT: vpmovsxbw 24(%rdi,%rcx), %xmm4 ; AVX1-NEXT: vpmovsxbw 16(%rdi,%rcx), %xmm5 ; AVX1-NEXT: vpmovsxbw (%rdi,%rcx), %xmm6 ; AVX1-NEXT: vpmovsxbw 8(%rdi,%rcx), %xmm8 ; AVX1-NEXT: vpmovsxbw 24(%rsi,%rcx), %xmm7 ; AVX1-NEXT: vpmaddwd %xmm4, %xmm7, %xmm4 ; AVX1-NEXT: vpmovsxbw 16(%rsi,%rcx), %xmm7 ; AVX1-NEXT: vpmaddwd %xmm5, %xmm7, %xmm5 ; AVX1-NEXT: vpmovsxbw (%rsi,%rcx), %xmm7 ; AVX1-NEXT: vpmaddwd %xmm6, %xmm7, %xmm6 ; AVX1-NEXT: vpmovsxbw 8(%rsi,%rcx), %xmm7 ; AVX1-NEXT: vpmaddwd %xmm8, %xmm7, %xmm7 ; AVX1-NEXT: vpaddd %xmm2, %xmm7, %xmm7 ; AVX1-NEXT: vblendps {{.*#+}} ymm2 = ymm7[0,1,2,3],ymm2[4,5,6,7] ; AVX1-NEXT: vpaddd %xmm0, %xmm6, %xmm6 ; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm6[0,1,2,3],ymm0[4,5,6,7] ; AVX1-NEXT: vpaddd %xmm1, %xmm5, %xmm5 ; AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm5[0,1,2,3],ymm1[4,5,6,7] ; AVX1-NEXT: vpaddd %xmm3, %xmm4, %xmm4 ; AVX1-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7] ; AVX1-NEXT: addq $32, %rcx ; AVX1-NEXT: cmpq %rcx, %rax ; AVX1-NEXT: jne .LBB5_1 ; AVX1-NEXT: # %bb.2: # %middle.block ; AVX1-NEXT: vpaddd %xmm3, %xmm2, %xmm4 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm6 ; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm2 ; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm3 ; AVX1-NEXT: vpaddd %xmm3, %xmm2, %xmm2 ; AVX1-NEXT: vpaddd %xmm2, %xmm6, %xmm2 ; AVX1-NEXT: vpaddd %xmm2, %xmm5, %xmm2 ; AVX1-NEXT: vpaddd %xmm4, %xmm1, %xmm1 ; AVX1-NEXT: vpaddd %xmm2, %xmm1, %xmm1 ; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] ; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] ; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vmovd %xmm0, %eax ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; ; AVX2-LABEL: _Z9test_charPcS_i_512: ; AVX2: # %bb.0: # %entry ; AVX2-NEXT: movl %edx, %eax ; AVX2-NEXT: vpxor %xmm0, %xmm0, %xmm0 ; AVX2-NEXT: xorl %ecx, %ecx ; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2 ; AVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3 ; AVX2-NEXT: .p2align 4, 0x90 ; AVX2-NEXT: .LBB5_1: # %vector.body ; AVX2-NEXT: # =>This Inner Loop Header: Depth=1 ; AVX2-NEXT: vpmovsxbw (%rdi,%rcx), %xmm4 ; AVX2-NEXT: vpmovsxbw 8(%rdi,%rcx), %xmm5 ; AVX2-NEXT: vpmovsxbw 16(%rdi,%rcx), %xmm6 ; AVX2-NEXT: vpmovsxbw 24(%rdi,%rcx), %xmm8 ; AVX2-NEXT: vpmovsxbw (%rsi,%rcx), %xmm7 ; AVX2-NEXT: vpmaddwd %xmm4, %xmm7, %xmm4 ; AVX2-NEXT: vpmovsxbw 8(%rsi,%rcx), %xmm7 ; AVX2-NEXT: vpmaddwd %xmm5, %xmm7, %xmm5 ; AVX2-NEXT: vpmovsxbw 16(%rsi,%rcx), %xmm7 ; AVX2-NEXT: vpmaddwd %xmm6, %xmm7, %xmm6 ; AVX2-NEXT: vpmovsxbw 24(%rsi,%rcx), %xmm7 ; AVX2-NEXT: vpmaddwd %xmm8, %xmm7, %xmm7 ; AVX2-NEXT: vpaddd %ymm3, %ymm7, %ymm3 ; AVX2-NEXT: vpaddd %ymm2, %ymm6, %ymm2 ; AVX2-NEXT: vpaddd %ymm1, %ymm5, %ymm1 ; AVX2-NEXT: vpaddd %ymm0, %ymm4, %ymm0 ; AVX2-NEXT: addq $32, %rcx ; AVX2-NEXT: cmpq %rcx, %rax ; AVX2-NEXT: jne .LBB5_1 ; AVX2-NEXT: # %bb.2: # %middle.block ; AVX2-NEXT: vpaddd %ymm2, %ymm0, %ymm0 ; AVX2-NEXT: vpaddd %ymm3, %ymm1, %ymm1 ; AVX2-NEXT: vpaddd %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 ; AVX2-NEXT: vpaddd %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] ; AVX2-NEXT: vpaddd %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: vphaddd %ymm0, %ymm0, %ymm0 ; AVX2-NEXT: vmovd %xmm0, %eax ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; ; AVX512F-LABEL: _Z9test_charPcS_i_512: ; AVX512F: # %bb.0: # %entry ; AVX512F-NEXT: movl %edx, %eax ; AVX512F-NEXT: vpxor %xmm0, %xmm0, %xmm0 ; AVX512F-NEXT: xorl %ecx, %ecx ; AVX512F-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX512F-NEXT: .p2align 4, 0x90 ; AVX512F-NEXT: .LBB5_1: # %vector.body ; AVX512F-NEXT: # =>This Inner Loop Header: Depth=1 ; AVX512F-NEXT: vpmovsxbw (%rdi,%rcx), %ymm2 ; AVX512F-NEXT: vpmovsxbw 16(%rdi,%rcx), %ymm3 ; AVX512F-NEXT: vpmovsxbw (%rsi,%rcx), %ymm4 ; AVX512F-NEXT: vpmaddwd %ymm2, %ymm4, %ymm2 ; AVX512F-NEXT: vpmovsxbw 16(%rsi,%rcx), %ymm4 ; AVX512F-NEXT: vpmaddwd %ymm3, %ymm4, %ymm3 ; AVX512F-NEXT: vpaddd %zmm1, %zmm3, %zmm1 ; AVX512F-NEXT: vpaddd %zmm0, %zmm2, %zmm0 ; AVX512F-NEXT: addq $32, %rcx ; AVX512F-NEXT: cmpq %rcx, %rax ; AVX512F-NEXT: jne .LBB5_1 ; AVX512F-NEXT: # %bb.2: # %middle.block ; AVX512F-NEXT: vpaddd %zmm1, %zmm0, %zmm0 ; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512F-NEXT: vpaddd %zmm1, %zmm0, %zmm0 ; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm1 ; AVX512F-NEXT: vpaddd %zmm1, %zmm0, %zmm0 ; AVX512F-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] ; AVX512F-NEXT: vpaddd %zmm1, %zmm0, %zmm0 ; AVX512F-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] ; AVX512F-NEXT: vpaddd %zmm1, %zmm0, %zmm0 ; AVX512F-NEXT: vmovd %xmm0, %eax ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; ; AVX512BW-LABEL: _Z9test_charPcS_i_512: ; AVX512BW: # %bb.0: # %entry ; AVX512BW-NEXT: movl %edx, %eax ; AVX512BW-NEXT: vpxor %xmm0, %xmm0, %xmm0 ; AVX512BW-NEXT: xorl %ecx, %ecx ; AVX512BW-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX512BW-NEXT: .p2align 4, 0x90 ; AVX512BW-NEXT: .LBB5_1: # %vector.body ; AVX512BW-NEXT: # =>This Inner Loop Header: Depth=1 ; AVX512BW-NEXT: vpmovsxbw (%rdi,%rcx), %zmm2 ; AVX512BW-NEXT: vpmovsxbw (%rsi,%rcx), %zmm3 ; AVX512BW-NEXT: vpmaddwd %zmm2, %zmm3, %zmm2 ; AVX512BW-NEXT: vpaddd %zmm1, %zmm2, %zmm1 ; AVX512BW-NEXT: addq $32, %rcx ; AVX512BW-NEXT: cmpq %rcx, %rax ; AVX512BW-NEXT: jne .LBB5_1 ; AVX512BW-NEXT: # %bb.2: # %middle.block ; AVX512BW-NEXT: vpaddd %zmm0, %zmm1, %zmm0 ; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512BW-NEXT: vpaddd %zmm1, %zmm0, %zmm0 ; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm1 ; AVX512BW-NEXT: vpaddd %zmm1, %zmm0, %zmm0 ; AVX512BW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] ; AVX512BW-NEXT: vpaddd %zmm1, %zmm0, %zmm0 ; AVX512BW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] ; AVX512BW-NEXT: vpaddd %zmm1, %zmm0, %zmm0 ; AVX512BW-NEXT: vmovd %xmm0, %eax ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq entry: %3 = zext i32 %2 to i64 br label %vector.body vector.body: %index = phi i64 [ %index.next, %vector.body ], [ 0, %entry ] %vec.phi = phi <32 x i32> [ %11, %vector.body ], [ zeroinitializer, %entry ] %4 = getelementptr inbounds i8, i8* %0, i64 %index %5 = bitcast i8* %4 to <32 x i8>* %wide.load = load <32 x i8>, <32 x i8>* %5, align 1 %6 = sext <32 x i8> %wide.load to <32 x i32> %7 = getelementptr inbounds i8, i8* %1, i64 %index %8 = bitcast i8* %7 to <32 x i8>* %wide.load14 = load <32 x i8>, <32 x i8>* %8, align 1 %9 = sext <32 x i8> %wide.load14 to <32 x i32> %10 = mul nsw <32 x i32> %9, %6 %11 = add nsw <32 x i32> %10, %vec.phi %index.next = add i64 %index, 32 %12 = icmp eq i64 %index.next, %3 br i1 %12, label %middle.block, label %vector.body middle.block: %rdx.shuf1 = shufflevector <32 x i32> %11, <32 x i32> undef, <32 x i32> %bin.rdx1 = add <32 x i32> %11, %rdx.shuf1 %rdx.shuf = shufflevector <32 x i32> %bin.rdx1, <32 x i32> undef, <32 x i32> %bin.rdx = add <32 x i32> %bin.rdx1, %rdx.shuf %rdx.shuf15 = shufflevector <32 x i32> %bin.rdx, <32 x i32> undef, <32 x i32> %bin.rdx32 = add <32 x i32> %bin.rdx, %rdx.shuf15 %rdx.shuf17 = shufflevector <32 x i32> %bin.rdx32, <32 x i32> undef, <32 x i32> %bin.rdx18 = add <32 x i32> %bin.rdx32, %rdx.shuf17 %rdx.shuf19 = shufflevector <32 x i32> %bin.rdx18, <32 x i32> undef, <32 x i32> %bin.rdx20 = add <32 x i32> %bin.rdx18, %rdx.shuf19 %13 = extractelement <32 x i32> %bin.rdx20, i32 0 ret i32 %13 } define <4 x i32> @pmaddwd_8(<8 x i16> %A, <8 x i16> %B) { ; SSE2-LABEL: pmaddwd_8: ; SSE2: # %bb.0: ; SSE2-NEXT: pmaddwd %xmm1, %xmm0 ; SSE2-NEXT: retq ; ; AVX-LABEL: pmaddwd_8: ; AVX: # %bb.0: ; AVX-NEXT: vpmaddwd %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq %a = sext <8 x i16> %A to <8 x i32> %b = sext <8 x i16> %B to <8 x i32> %m = mul nsw <8 x i32> %a, %b %odd = shufflevector <8 x i32> %m, <8 x i32> undef, <4 x i32> %even = shufflevector <8 x i32> %m, <8 x i32> undef, <4 x i32> %ret = add <4 x i32> %odd, %even ret <4 x i32> %ret } define <4 x i32> @pmaddwd_8_swapped(<8 x i16> %A, <8 x i16> %B) { ; SSE2-LABEL: pmaddwd_8_swapped: ; SSE2: # %bb.0: ; SSE2-NEXT: pmaddwd %xmm1, %xmm0 ; SSE2-NEXT: retq ; ; AVX-LABEL: pmaddwd_8_swapped: ; AVX: # %bb.0: ; AVX-NEXT: vpmaddwd %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq %a = sext <8 x i16> %A to <8 x i32> %b = sext <8 x i16> %B to <8 x i32> %m = mul nsw <8 x i32> %a, %b %odd = shufflevector <8 x i32> %m, <8 x i32> undef, <4 x i32> %even = shufflevector <8 x i32> %m, <8 x i32> undef, <4 x i32> %ret = add <4 x i32> %even, %odd ret <4 x i32> %ret } define <4 x i32> @larger_mul(<16 x i16> %A, <16 x i16> %B) { ; SSE2-LABEL: larger_mul: ; SSE2: # %bb.0: ; SSE2-NEXT: movdqa %xmm0, %xmm1 ; SSE2-NEXT: pmulhw %xmm2, %xmm1 ; SSE2-NEXT: pmullw %xmm2, %xmm0 ; SSE2-NEXT: movdqa %xmm0, %xmm2 ; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7] ; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] ; SSE2-NEXT: movdqa %xmm0, %xmm1 ; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2] ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm2[1,3] ; SSE2-NEXT: paddd %xmm1, %xmm0 ; SSE2-NEXT: retq ; ; AVX1-LABEL: larger_mul: ; AVX1: # %bb.0: ; AVX1-NEXT: vpmovsxwd %xmm0, %xmm2 ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] ; AVX1-NEXT: vpmovsxwd %xmm0, %xmm0 ; AVX1-NEXT: vpackssdw %xmm0, %xmm2, %xmm0 ; AVX1-NEXT: vpmovsxwd %xmm1, %xmm2 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] ; AVX1-NEXT: vpmovsxwd %xmm1, %xmm1 ; AVX1-NEXT: vpackssdw %xmm1, %xmm2, %xmm1 ; AVX1-NEXT: vpmaddwd %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; ; AVX2-LABEL: larger_mul: ; AVX2: # %bb.0: ; AVX2-NEXT: vpmovsxwd %xmm0, %ymm0 ; AVX2-NEXT: vpmovsxwd %xmm1, %ymm1 ; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2 ; AVX2-NEXT: vpackssdw %xmm2, %xmm1, %xmm1 ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2 ; AVX2-NEXT: vpackssdw %xmm2, %xmm0, %xmm0 ; AVX2-NEXT: vpmaddwd %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; ; AVX512-LABEL: larger_mul: ; AVX512: # %bb.0: ; AVX512-NEXT: vpmovsxwd %ymm0, %zmm0 ; AVX512-NEXT: vpmovsxwd %ymm1, %zmm1 ; AVX512-NEXT: vpmulld %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vpextrd $2, %xmm0, %eax ; AVX512-NEXT: vpinsrd $1, %eax, %xmm0, %xmm1 ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm2 ; AVX512-NEXT: vmovd %xmm2, %eax ; AVX512-NEXT: vpinsrd $2, %eax, %xmm1, %xmm1 ; AVX512-NEXT: vpextrd $2, %xmm2, %eax ; AVX512-NEXT: vpinsrd $3, %eax, %xmm1, %xmm1 ; AVX512-NEXT: vpextrd $3, %xmm0, %eax ; AVX512-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3] ; AVX512-NEXT: vpinsrd $1, %eax, %xmm0, %xmm0 ; AVX512-NEXT: vpextrd $1, %xmm2, %eax ; AVX512-NEXT: vpinsrd $2, %eax, %xmm0, %xmm0 ; AVX512-NEXT: vpextrd $3, %xmm2, %eax ; AVX512-NEXT: vpinsrd $3, %eax, %xmm0, %xmm0 ; AVX512-NEXT: vpaddd %xmm0, %xmm1, %xmm0 ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %a = sext <16 x i16> %A to <16 x i32> %b = sext <16 x i16> %B to <16 x i32> %m = mul nsw <16 x i32> %a, %b %odd = shufflevector <16 x i32> %m, <16 x i32> undef, <4 x i32> %even = shufflevector <16 x i32> %m, <16 x i32> undef, <4 x i32> %ret = add <4 x i32> %odd, %even ret <4 x i32> %ret } define <8 x i32> @pmaddwd_16(<16 x i16> %A, <16 x i16> %B) { ; SSE2-LABEL: pmaddwd_16: ; SSE2: # %bb.0: ; SSE2-NEXT: pmaddwd %xmm2, %xmm0 ; SSE2-NEXT: pmaddwd %xmm3, %xmm1 ; SSE2-NEXT: retq ; ; AVX1-LABEL: pmaddwd_16: ; AVX1: # %bb.0: ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3 ; AVX1-NEXT: vpmaddwd %xmm3, %xmm2, %xmm2 ; AVX1-NEXT: vpmaddwd %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 ; AVX1-NEXT: retq ; ; AVX256-LABEL: pmaddwd_16: ; AVX256: # %bb.0: ; AVX256-NEXT: vpmaddwd %ymm1, %ymm0, %ymm0 ; AVX256-NEXT: retq %a = sext <16 x i16> %A to <16 x i32> %b = sext <16 x i16> %B to <16 x i32> %m = mul nsw <16 x i32> %a, %b %odd = shufflevector <16 x i32> %m, <16 x i32> undef, <8 x i32> %even = shufflevector <16 x i32> %m, <16 x i32> undef, <8 x i32> %ret = add <8 x i32> %odd, %even ret <8 x i32> %ret } define <16 x i32> @pmaddwd_32(<32 x i16> %A, <32 x i16> %B) { ; SSE2-LABEL: pmaddwd_32: ; SSE2: # %bb.0: ; SSE2-NEXT: pmaddwd %xmm4, %xmm0 ; SSE2-NEXT: pmaddwd %xmm5, %xmm1 ; SSE2-NEXT: pmaddwd %xmm6, %xmm2 ; SSE2-NEXT: pmaddwd %xmm7, %xmm3 ; SSE2-NEXT: retq ; ; AVX1-LABEL: pmaddwd_32: ; AVX1: # %bb.0: ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5 ; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm6 ; AVX1-NEXT: vpmaddwd %xmm6, %xmm4, %xmm4 ; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm6 ; AVX1-NEXT: vpmaddwd %xmm6, %xmm5, %xmm5 ; AVX1-NEXT: vpmaddwd %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm0, %ymm0 ; AVX1-NEXT: vpmaddwd %xmm3, %xmm1, %xmm1 ; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm1, %ymm1 ; AVX1-NEXT: retq ; ; AVX2-LABEL: pmaddwd_32: ; AVX2: # %bb.0: ; AVX2-NEXT: vpmaddwd %ymm2, %ymm0, %ymm0 ; AVX2-NEXT: vpmaddwd %ymm3, %ymm1, %ymm1 ; AVX2-NEXT: retq ; ; AVX512F-LABEL: pmaddwd_32: ; AVX512F: # %bb.0: ; AVX512F-NEXT: vpmaddwd %ymm3, %ymm1, %ymm1 ; AVX512F-NEXT: vpmaddwd %ymm2, %ymm0, %ymm0 ; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 ; AVX512F-NEXT: retq ; ; AVX512BW-LABEL: pmaddwd_32: ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: vpmaddwd %zmm1, %zmm0, %zmm0 ; AVX512BW-NEXT: retq %a = sext <32 x i16> %A to <32 x i32> %b = sext <32 x i16> %B to <32 x i32> %m = mul nsw <32 x i32> %a, %b %odd = shufflevector <32 x i32> %m, <32 x i32> undef, <16 x i32> %even = shufflevector <32 x i32> %m, <32 x i32> undef, <16 x i32> %ret = add <16 x i32> %odd, %even ret <16 x i32> %ret } define <4 x i32> @pmaddwd_const(<8 x i16> %A) { ; SSE2-LABEL: pmaddwd_const: ; SSE2: # %bb.0: ; SSE2-NEXT: pmaddwd {{.*}}(%rip), %xmm0 ; SSE2-NEXT: retq ; ; AVX-LABEL: pmaddwd_const: ; AVX: # %bb.0: ; AVX-NEXT: vpmaddwd {{.*}}(%rip), %xmm0, %xmm0 ; AVX-NEXT: retq %a = sext <8 x i16> %A to <8 x i32> %m = mul nsw <8 x i32> %a, %odd = shufflevector <8 x i32> %m, <8 x i32> undef, <4 x i32> %even = shufflevector <8 x i32> %m, <8 x i32> undef, <4 x i32> %ret = add <4 x i32> %odd, %even ret <4 x i32> %ret } ; Do not select unsigned i16 multiplication define <4 x i32> @pmaddwd_negative1(<8 x i16> %A, <8 x i16> %B) { ; SSE2-LABEL: pmaddwd_negative1: ; SSE2: # %bb.0: ; SSE2-NEXT: movdqa %xmm0, %xmm2 ; SSE2-NEXT: pmulhuw %xmm1, %xmm2 ; SSE2-NEXT: pmullw %xmm1, %xmm0 ; SSE2-NEXT: movdqa %xmm0, %xmm1 ; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7] ; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3] ; SSE2-NEXT: movdqa %xmm0, %xmm2 ; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm1[0,2] ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[1,3] ; SSE2-NEXT: paddd %xmm2, %xmm0 ; SSE2-NEXT: retq ; ; AVX1-LABEL: pmaddwd_negative1: ; AVX1: # %bb.0: ; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[2,3,0,1] ; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero ; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero ; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm1[2,3,0,1] ; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero ; AVX1-NEXT: vpmulld %xmm3, %xmm2, %xmm2 ; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero ; AVX1-NEXT: vpmulld %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vphaddd %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: retq ; ; AVX256-LABEL: pmaddwd_negative1: ; AVX256: # %bb.0: ; AVX256-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero ; AVX256-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero ; AVX256-NEXT: vpmulld %ymm1, %ymm0, %ymm0 ; AVX256-NEXT: vextracti128 $1, %ymm0, %xmm1 ; AVX256-NEXT: vphaddd %xmm1, %xmm0, %xmm0 ; AVX256-NEXT: vzeroupper ; AVX256-NEXT: retq %a = zext <8 x i16> %A to <8 x i32> %b = zext <8 x i16> %B to <8 x i32> %m = mul nuw <8 x i32> %a, %b %odd = shufflevector <8 x i32> %m, <8 x i32> undef, <4 x i32> %even = shufflevector <8 x i32> %m, <8 x i32> undef, <4 x i32> %ret = add <4 x i32> %odd, %even ret <4 x i32> %ret } ; Do not select if constant is too large define <4 x i32> @pmaddwd_negative2(<8 x i16> %A) { ; SSE2-LABEL: pmaddwd_negative2: ; SSE2: # %bb.0: ; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3] ; SSE2-NEXT: psrad $16, %xmm1 ; SSE2-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4,4,5,5,6,6,7,7] ; SSE2-NEXT: psrad $16, %xmm0 ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [1,7,42,32] ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] ; SSE2-NEXT: pmuludq %xmm2, %xmm0 ; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm0[0,2,2,3] ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3] ; SSE2-NEXT: pmuludq %xmm3, %xmm0 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] ; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1] ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [32768,4294934528,0,0] ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,3,3] ; SSE2-NEXT: pmuludq %xmm2, %xmm1 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,2,2,3] ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3] ; SSE2-NEXT: pmuludq %xmm3, %xmm1 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] ; SSE2-NEXT: movdqa %xmm0, %xmm1 ; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm4[0,2] ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm4[1,3] ; SSE2-NEXT: paddd %xmm1, %xmm0 ; SSE2-NEXT: retq ; ; AVX1-LABEL: pmaddwd_negative2: ; AVX1: # %bb.0: ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] ; AVX1-NEXT: vpmovsxwd %xmm1, %xmm1 ; AVX1-NEXT: vpmovsxwd %xmm0, %xmm0 ; AVX1-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0 ; AVX1-NEXT: vpmulld {{.*}}(%rip), %xmm1, %xmm1 ; AVX1-NEXT: vphaddd %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: retq ; ; AVX256-LABEL: pmaddwd_negative2: ; AVX256: # %bb.0: ; AVX256-NEXT: vpmovsxwd %xmm0, %ymm0 ; AVX256-NEXT: vpmulld {{.*}}(%rip), %ymm0, %ymm0 ; AVX256-NEXT: vextracti128 $1, %ymm0, %xmm1 ; AVX256-NEXT: vphaddd %xmm1, %xmm0, %xmm0 ; AVX256-NEXT: vzeroupper ; AVX256-NEXT: retq %a = sext <8 x i16> %A to <8 x i32> %m = mul nsw <8 x i32> %a, %odd = shufflevector <8 x i32> %m, <8 x i32> undef, <4 x i32> %even = shufflevector <8 x i32> %m, <8 x i32> undef, <4 x i32> %ret = add <4 x i32> %odd, %even ret <4 x i32> %ret } define <4 x i32> @jumbled_indices4(<8 x i16> %A, <8 x i16> %B) { ; SSE2-LABEL: jumbled_indices4: ; SSE2: # %bb.0: ; SSE2-NEXT: pmaddwd %xmm1, %xmm0 ; SSE2-NEXT: retq ; ; AVX-LABEL: jumbled_indices4: ; AVX: # %bb.0: ; AVX-NEXT: vpmaddwd %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq %exta = sext <8 x i16> %A to <8 x i32> %extb = sext <8 x i16> %B to <8 x i32> %m = mul <8 x i32> %exta, %extb %sa = shufflevector <8 x i32> %m, <8 x i32> undef, <4 x i32> %sb = shufflevector <8 x i32> %m, <8 x i32> undef, <4 x i32> %a = add <4 x i32> %sa, %sb ret <4 x i32> %a } define <8 x i32> @jumbled_indices8(<16 x i16> %A, <16 x i16> %B) { ; SSE2-LABEL: jumbled_indices8: ; SSE2: # %bb.0: ; SSE2-NEXT: pmaddwd %xmm2, %xmm0 ; SSE2-NEXT: pmaddwd %xmm3, %xmm1 ; SSE2-NEXT: retq ; ; AVX1-LABEL: jumbled_indices8: ; AVX1: # %bb.0: ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3 ; AVX1-NEXT: vpmaddwd %xmm3, %xmm2, %xmm2 ; AVX1-NEXT: vpmaddwd %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 ; AVX1-NEXT: retq ; ; AVX256-LABEL: jumbled_indices8: ; AVX256: # %bb.0: ; AVX256-NEXT: vpmaddwd %ymm1, %ymm0, %ymm0 ; AVX256-NEXT: retq %exta = sext <16 x i16> %A to <16 x i32> %extb = sext <16 x i16> %B to <16 x i32> %m = mul <16 x i32> %exta, %extb %sa = shufflevector <16 x i32> %m, <16 x i32> undef, <8 x i32> %sb = shufflevector <16 x i32> %m, <16 x i32> undef, <8 x i32> %a = add <8 x i32> %sa, %sb ret <8 x i32> %a } define <16 x i32> @jumbled_indices16(<32 x i16> %A, <32 x i16> %B) { ; SSE2-LABEL: jumbled_indices16: ; SSE2: # %bb.0: ; SSE2-NEXT: pmaddwd %xmm4, %xmm0 ; SSE2-NEXT: pmaddwd %xmm5, %xmm1 ; SSE2-NEXT: pmaddwd %xmm6, %xmm2 ; SSE2-NEXT: pmaddwd %xmm7, %xmm3 ; SSE2-NEXT: retq ; ; AVX1-LABEL: jumbled_indices16: ; AVX1: # %bb.0: ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5 ; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm6 ; AVX1-NEXT: vpmaddwd %xmm6, %xmm4, %xmm4 ; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm6 ; AVX1-NEXT: vpmaddwd %xmm6, %xmm5, %xmm5 ; AVX1-NEXT: vpmaddwd %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm0, %ymm0 ; AVX1-NEXT: vpmaddwd %xmm3, %xmm1, %xmm1 ; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm1, %ymm1 ; AVX1-NEXT: retq ; ; AVX2-LABEL: jumbled_indices16: ; AVX2: # %bb.0: ; AVX2-NEXT: vpmaddwd %ymm2, %ymm0, %ymm0 ; AVX2-NEXT: vpmaddwd %ymm3, %ymm1, %ymm1 ; AVX2-NEXT: retq ; ; AVX512F-LABEL: jumbled_indices16: ; AVX512F: # %bb.0: ; AVX512F-NEXT: vpmaddwd %ymm3, %ymm1, %ymm1 ; AVX512F-NEXT: vpmaddwd %ymm2, %ymm0, %ymm0 ; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 ; AVX512F-NEXT: retq ; ; AVX512BW-LABEL: jumbled_indices16: ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: vpmaddwd %zmm1, %zmm0, %zmm0 ; AVX512BW-NEXT: retq %exta = sext <32 x i16> %A to <32 x i32> %extb = sext <32 x i16> %B to <32 x i32> %m = mul <32 x i32> %exta, %extb %sa = shufflevector <32 x i32> %m, <32 x i32> undef, <16 x i32> %sb = shufflevector <32 x i32> %m, <32 x i32> undef, <16 x i32> %a = add <16 x i32> %sa, %sb ret <16 x i32> %a } define <32 x i32> @jumbled_indices32(<64 x i16> %A, <64 x i16> %B) { ; SSE2-LABEL: jumbled_indices32: ; SSE2: # %bb.0: ; SSE2-NEXT: pmaddwd {{[0-9]+}}(%rsp), %xmm0 ; SSE2-NEXT: pmaddwd {{[0-9]+}}(%rsp), %xmm1 ; SSE2-NEXT: pmaddwd {{[0-9]+}}(%rsp), %xmm2 ; SSE2-NEXT: pmaddwd {{[0-9]+}}(%rsp), %xmm3 ; SSE2-NEXT: pmaddwd {{[0-9]+}}(%rsp), %xmm4 ; SSE2-NEXT: pmaddwd {{[0-9]+}}(%rsp), %xmm5 ; SSE2-NEXT: pmaddwd {{[0-9]+}}(%rsp), %xmm6 ; SSE2-NEXT: pmaddwd {{[0-9]+}}(%rsp), %xmm7 ; SSE2-NEXT: movdqa %xmm7, 112(%rdi) ; SSE2-NEXT: movdqa %xmm6, 96(%rdi) ; SSE2-NEXT: movdqa %xmm5, 80(%rdi) ; SSE2-NEXT: movdqa %xmm4, 64(%rdi) ; SSE2-NEXT: movdqa %xmm3, 48(%rdi) ; SSE2-NEXT: movdqa %xmm2, 32(%rdi) ; SSE2-NEXT: movdqa %xmm1, 16(%rdi) ; SSE2-NEXT: movdqa %xmm0, (%rdi) ; SSE2-NEXT: movq %rdi, %rax ; SSE2-NEXT: retq ; ; AVX1-LABEL: jumbled_indices32: ; AVX1: # %bb.0: ; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm8 ; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm9 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm10 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm11 ; AVX1-NEXT: vextractf128 $1, %ymm7, %xmm12 ; AVX1-NEXT: vpmaddwd %xmm12, %xmm8, %xmm8 ; AVX1-NEXT: vextractf128 $1, %ymm6, %xmm12 ; AVX1-NEXT: vpmaddwd %xmm12, %xmm9, %xmm9 ; AVX1-NEXT: vextractf128 $1, %ymm5, %xmm12 ; AVX1-NEXT: vpmaddwd %xmm12, %xmm10, %xmm10 ; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm12 ; AVX1-NEXT: vpmaddwd %xmm12, %xmm11, %xmm11 ; AVX1-NEXT: vpmaddwd %xmm4, %xmm0, %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm11, %ymm0, %ymm0 ; AVX1-NEXT: vpmaddwd %xmm5, %xmm1, %xmm1 ; AVX1-NEXT: vinsertf128 $1, %xmm10, %ymm1, %ymm1 ; AVX1-NEXT: vpmaddwd %xmm6, %xmm2, %xmm2 ; AVX1-NEXT: vinsertf128 $1, %xmm9, %ymm2, %ymm2 ; AVX1-NEXT: vpmaddwd %xmm7, %xmm3, %xmm3 ; AVX1-NEXT: vinsertf128 $1, %xmm8, %ymm3, %ymm3 ; AVX1-NEXT: retq ; ; AVX2-LABEL: jumbled_indices32: ; AVX2: # %bb.0: ; AVX2-NEXT: vpmaddwd %ymm4, %ymm0, %ymm0 ; AVX2-NEXT: vpmaddwd %ymm5, %ymm1, %ymm1 ; AVX2-NEXT: vpmaddwd %ymm6, %ymm2, %ymm2 ; AVX2-NEXT: vpmaddwd %ymm7, %ymm3, %ymm3 ; AVX2-NEXT: retq ; ; AVX512F-LABEL: jumbled_indices32: ; AVX512F: # %bb.0: ; AVX512F-NEXT: vpmaddwd %ymm5, %ymm1, %ymm1 ; AVX512F-NEXT: vpmaddwd %ymm4, %ymm0, %ymm0 ; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 ; AVX512F-NEXT: vpmaddwd %ymm7, %ymm3, %ymm1 ; AVX512F-NEXT: vpmaddwd %ymm6, %ymm2, %ymm2 ; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm2, %zmm1 ; AVX512F-NEXT: retq ; ; AVX512BW-LABEL: jumbled_indices32: ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: vpmaddwd %zmm2, %zmm0, %zmm0 ; AVX512BW-NEXT: vpmaddwd %zmm3, %zmm1, %zmm1 ; AVX512BW-NEXT: retq %exta = sext <64 x i16> %A to <64 x i32> %extb = sext <64 x i16> %B to <64 x i32> %m = mul <64 x i32> %exta, %extb %sa = shufflevector <64 x i32> %m, <64 x i32> undef, <32 x i32> %sb = shufflevector <64 x i32> %m, <64 x i32> undef, <32 x i32> %a = add <32 x i32> %sa, %sb ret <32 x i32> %a }