; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=x86_64-apple-macosx10.8.0 -mattr=+sse2 | FileCheck %s --check-prefix=SSE --check-prefix=SSE2 ; RUN: llc < %s -mtriple=x86_64-apple-macosx10.8.0 -mattr=+ssse3 | FileCheck %s --check-prefix=SSE --check-prefix=SSSE3 ; RUN: llc < %s -mtriple=x86_64-apple-macosx10.8.0 -mattr=+avx | FileCheck %s --check-prefix=AVX --check-prefix=AVX1 ; RUN: llc < %s -mtriple=x86_64-apple-macosx10.8.0 -mattr=+avx2 | FileCheck %s --check-prefix=AVX --check-prefix=AVX2 define void @test1(i16* nocapture %head) nounwind { ; SSE-LABEL: test1: ; SSE: ## BB#0: ## %vector.ph ; SSE-NEXT: movdqu (%rdi), %xmm0 ; SSE-NEXT: psubusw {{.*}}(%rip), %xmm0 ; SSE-NEXT: movdqu %xmm0, (%rdi) ; SSE-NEXT: retq ; ; AVX-LABEL: test1: ; AVX: ## BB#0: ## %vector.ph ; AVX-NEXT: vmovdqu (%rdi), %xmm0 ; AVX-NEXT: vpsubusw {{.*}}(%rip), %xmm0, %xmm0 ; AVX-NEXT: vmovdqu %xmm0, (%rdi) ; AVX-NEXT: retq vector.ph: %0 = getelementptr inbounds i16, i16* %head, i64 0 %1 = bitcast i16* %0 to <8 x i16>* %2 = load <8 x i16>, <8 x i16>* %1, align 2 %3 = icmp slt <8 x i16> %2, zeroinitializer %4 = xor <8 x i16> %2, %5 = select <8 x i1> %3, <8 x i16> %4, <8 x i16> zeroinitializer store <8 x i16> %5, <8 x i16>* %1, align 2 ret void } define void @test2(i16* nocapture %head) nounwind { ; SSE-LABEL: test2: ; SSE: ## BB#0: ## %vector.ph ; SSE-NEXT: movdqu (%rdi), %xmm0 ; SSE-NEXT: psubusw {{.*}}(%rip), %xmm0 ; SSE-NEXT: movdqu %xmm0, (%rdi) ; SSE-NEXT: retq ; ; AVX-LABEL: test2: ; AVX: ## BB#0: ## %vector.ph ; AVX-NEXT: vmovdqu (%rdi), %xmm0 ; AVX-NEXT: vpsubusw {{.*}}(%rip), %xmm0, %xmm0 ; AVX-NEXT: vmovdqu %xmm0, (%rdi) ; AVX-NEXT: retq vector.ph: %0 = getelementptr inbounds i16, i16* %head, i64 0 %1 = bitcast i16* %0 to <8 x i16>* %2 = load <8 x i16>, <8 x i16>* %1, align 2 %3 = icmp ugt <8 x i16> %2, %4 = add <8 x i16> %2, %5 = select <8 x i1> %3, <8 x i16> %4, <8 x i16> zeroinitializer store <8 x i16> %5, <8 x i16>* %1, align 2 ret void } define void @test3(i16* nocapture %head, i16 zeroext %w) nounwind { ; SSE-LABEL: test3: ; SSE: ## BB#0: ## %vector.ph ; SSE-NEXT: movd %esi, %xmm0 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7] ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1] ; SSE-NEXT: movdqu (%rdi), %xmm1 ; SSE-NEXT: psubusw %xmm0, %xmm1 ; SSE-NEXT: movdqu %xmm1, (%rdi) ; SSE-NEXT: retq ; ; AVX1-LABEL: test3: ; AVX1: ## BB#0: ## %vector.ph ; AVX1-NEXT: vmovd %esi, %xmm0 ; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7] ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1] ; AVX1-NEXT: vmovdqu (%rdi), %xmm1 ; AVX1-NEXT: vpsubusw %xmm0, %xmm1, %xmm0 ; AVX1-NEXT: vmovdqu %xmm0, (%rdi) ; AVX1-NEXT: retq ; ; AVX2-LABEL: test3: ; AVX2: ## BB#0: ## %vector.ph ; AVX2-NEXT: vmovd %esi, %xmm0 ; AVX2-NEXT: vpbroadcastw %xmm0, %xmm0 ; AVX2-NEXT: vmovdqu (%rdi), %xmm1 ; AVX2-NEXT: vpsubusw %xmm0, %xmm1, %xmm0 ; AVX2-NEXT: vmovdqu %xmm0, (%rdi) ; AVX2-NEXT: retq vector.ph: %0 = insertelement <8 x i16> undef, i16 %w, i32 0 %broadcast15 = shufflevector <8 x i16> %0, <8 x i16> undef, <8 x i32> zeroinitializer %1 = getelementptr inbounds i16, i16* %head, i64 0 %2 = bitcast i16* %1 to <8 x i16>* %3 = load <8 x i16>, <8 x i16>* %2, align 2 %4 = icmp ult <8 x i16> %3, %broadcast15 %5 = sub <8 x i16> %3, %broadcast15 %6 = select <8 x i1> %4, <8 x i16> zeroinitializer, <8 x i16> %5 store <8 x i16> %6, <8 x i16>* %2, align 2 ret void } define void @test4(i8* nocapture %head) nounwind { ; SSE-LABEL: test4: ; SSE: ## BB#0: ## %vector.ph ; SSE-NEXT: movdqu (%rdi), %xmm0 ; SSE-NEXT: psubusb {{.*}}(%rip), %xmm0 ; SSE-NEXT: movdqu %xmm0, (%rdi) ; SSE-NEXT: retq ; ; AVX-LABEL: test4: ; AVX: ## BB#0: ## %vector.ph ; AVX-NEXT: vmovdqu (%rdi), %xmm0 ; AVX-NEXT: vpsubusb {{.*}}(%rip), %xmm0, %xmm0 ; AVX-NEXT: vmovdqu %xmm0, (%rdi) ; AVX-NEXT: retq vector.ph: %0 = getelementptr inbounds i8, i8* %head, i64 0 %1 = bitcast i8* %0 to <16 x i8>* %2 = load <16 x i8>, <16 x i8>* %1, align 1 %3 = icmp slt <16 x i8> %2, zeroinitializer %4 = xor <16 x i8> %2, %5 = select <16 x i1> %3, <16 x i8> %4, <16 x i8> zeroinitializer store <16 x i8> %5, <16 x i8>* %1, align 1 ret void } define void @test5(i8* nocapture %head) nounwind { ; SSE-LABEL: test5: ; SSE: ## BB#0: ## %vector.ph ; SSE-NEXT: movdqu (%rdi), %xmm0 ; SSE-NEXT: psubusb {{.*}}(%rip), %xmm0 ; SSE-NEXT: movdqu %xmm0, (%rdi) ; SSE-NEXT: retq ; ; AVX-LABEL: test5: ; AVX: ## BB#0: ## %vector.ph ; AVX-NEXT: vmovdqu (%rdi), %xmm0 ; AVX-NEXT: vpsubusb {{.*}}(%rip), %xmm0, %xmm0 ; AVX-NEXT: vmovdqu %xmm0, (%rdi) ; AVX-NEXT: retq vector.ph: %0 = getelementptr inbounds i8, i8* %head, i64 0 %1 = bitcast i8* %0 to <16 x i8>* %2 = load <16 x i8>, <16 x i8>* %1, align 1 %3 = icmp ugt <16 x i8> %2, %4 = add <16 x i8> %2, %5 = select <16 x i1> %3, <16 x i8> %4, <16 x i8> zeroinitializer store <16 x i8> %5, <16 x i8>* %1, align 1 ret void } define void @test6(i8* nocapture %head, i8 zeroext %w) nounwind { ; SSE2-LABEL: test6: ; SSE2: ## BB#0: ## %vector.ph ; SSE2-NEXT: movd %esi, %xmm0 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7] ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1] ; SSE2-NEXT: movdqu (%rdi), %xmm1 ; SSE2-NEXT: psubusb %xmm0, %xmm1 ; SSE2-NEXT: movdqu %xmm1, (%rdi) ; SSE2-NEXT: retq ; ; SSSE3-LABEL: test6: ; SSSE3: ## BB#0: ## %vector.ph ; SSSE3-NEXT: movd %esi, %xmm0 ; SSSE3-NEXT: pxor %xmm1, %xmm1 ; SSSE3-NEXT: pshufb %xmm1, %xmm0 ; SSSE3-NEXT: movdqu (%rdi), %xmm1 ; SSSE3-NEXT: psubusb %xmm0, %xmm1 ; SSSE3-NEXT: movdqu %xmm1, (%rdi) ; SSSE3-NEXT: retq ; ; AVX1-LABEL: test6: ; AVX1: ## BB#0: ## %vector.ph ; AVX1-NEXT: vmovd %esi, %xmm0 ; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX1-NEXT: vpshufb %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vmovdqu (%rdi), %xmm1 ; AVX1-NEXT: vpsubusb %xmm0, %xmm1, %xmm0 ; AVX1-NEXT: vmovdqu %xmm0, (%rdi) ; AVX1-NEXT: retq ; ; AVX2-LABEL: test6: ; AVX2: ## BB#0: ## %vector.ph ; AVX2-NEXT: vmovd %esi, %xmm0 ; AVX2-NEXT: vpbroadcastb %xmm0, %xmm0 ; AVX2-NEXT: vmovdqu (%rdi), %xmm1 ; AVX2-NEXT: vpsubusb %xmm0, %xmm1, %xmm0 ; AVX2-NEXT: vmovdqu %xmm0, (%rdi) ; AVX2-NEXT: retq vector.ph: %0 = insertelement <16 x i8> undef, i8 %w, i32 0 %broadcast15 = shufflevector <16 x i8> %0, <16 x i8> undef, <16 x i32> zeroinitializer %1 = getelementptr inbounds i8, i8* %head, i64 0 %2 = bitcast i8* %1 to <16 x i8>* %3 = load <16 x i8>, <16 x i8>* %2, align 1 %4 = icmp ult <16 x i8> %3, %broadcast15 %5 = sub <16 x i8> %3, %broadcast15 %6 = select <16 x i1> %4, <16 x i8> zeroinitializer, <16 x i8> %5 store <16 x i8> %6, <16 x i8>* %2, align 1 ret void } define void @test7(i16* nocapture %head) nounwind { ; SSE-LABEL: test7: ; SSE: ## BB#0: ## %vector.ph ; SSE-NEXT: movdqu (%rdi), %xmm0 ; SSE-NEXT: movdqu 16(%rdi), %xmm1 ; SSE-NEXT: movdqa {{.*#+}} xmm2 = [32768,32768,32768,32768,32768,32768,32768,32768] ; SSE-NEXT: psubusw %xmm2, %xmm0 ; SSE-NEXT: psubusw %xmm2, %xmm1 ; SSE-NEXT: movdqu %xmm1, 16(%rdi) ; SSE-NEXT: movdqu %xmm0, (%rdi) ; SSE-NEXT: retq ; ; AVX1-LABEL: test7: ; AVX1: ## BB#0: ## %vector.ph ; AVX1-NEXT: vmovdqu (%rdi), %ymm0 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 ; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2 ; AVX1-NEXT: vpcmpgtw %xmm1, %xmm2, %xmm1 ; AVX1-NEXT: vpcmpgtw %xmm0, %xmm2, %xmm2 ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1 ; AVX1-NEXT: vxorps {{.*}}(%rip), %ymm0, %ymm0 ; AVX1-NEXT: vandps %ymm0, %ymm1, %ymm0 ; AVX1-NEXT: vmovups %ymm0, (%rdi) ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; ; AVX2-LABEL: test7: ; AVX2: ## BB#0: ## %vector.ph ; AVX2-NEXT: vmovdqu (%rdi), %ymm0 ; AVX2-NEXT: vpsubusw {{.*}}(%rip), %ymm0, %ymm0 ; AVX2-NEXT: vmovdqu %ymm0, (%rdi) ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq vector.ph: %0 = getelementptr inbounds i16, i16* %head, i64 0 %1 = bitcast i16* %0 to <16 x i16>* %2 = load <16 x i16>, <16 x i16>* %1, align 2 %3 = icmp slt <16 x i16> %2, zeroinitializer %4 = xor <16 x i16> %2, %5 = select <16 x i1> %3, <16 x i16> %4, <16 x i16> zeroinitializer store <16 x i16> %5, <16 x i16>* %1, align 2 ret void } define void @test8(i16* nocapture %head) nounwind { ; SSE-LABEL: test8: ; SSE: ## BB#0: ## %vector.ph ; SSE-NEXT: movdqu (%rdi), %xmm0 ; SSE-NEXT: movdqu 16(%rdi), %xmm1 ; SSE-NEXT: movdqa {{.*#+}} xmm2 = [32767,32767,32767,32767,32767,32767,32767,32767] ; SSE-NEXT: psubusw %xmm2, %xmm0 ; SSE-NEXT: psubusw %xmm2, %xmm1 ; SSE-NEXT: movdqu %xmm1, 16(%rdi) ; SSE-NEXT: movdqu %xmm0, (%rdi) ; SSE-NEXT: retq ; ; AVX1-LABEL: test8: ; AVX1: ## BB#0: ## %vector.ph ; AVX1-NEXT: vmovdqu (%rdi), %ymm0 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [32768,32768,32768,32768,32768,32768,32768,32768] ; AVX1-NEXT: vpxor %xmm2, %xmm1, %xmm3 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [65534,65534,65534,65534,65534,65534,65534,65534] ; AVX1-NEXT: vpcmpgtw %xmm4, %xmm3, %xmm3 ; AVX1-NEXT: vpxor %xmm2, %xmm0, %xmm2 ; AVX1-NEXT: vpcmpgtw %xmm4, %xmm2, %xmm2 ; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [32769,32769,32769,32769,32769,32769,32769,32769] ; AVX1-NEXT: vpaddw %xmm3, %xmm1, %xmm1 ; AVX1-NEXT: vpaddw %xmm3, %xmm0, %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX1-NEXT: vandps %ymm0, %ymm2, %ymm0 ; AVX1-NEXT: vmovups %ymm0, (%rdi) ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; ; AVX2-LABEL: test8: ; AVX2: ## BB#0: ## %vector.ph ; AVX2-NEXT: vmovdqu (%rdi), %ymm0 ; AVX2-NEXT: vpsubusw {{.*}}(%rip), %ymm0, %ymm0 ; AVX2-NEXT: vmovdqu %ymm0, (%rdi) ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq vector.ph: %0 = getelementptr inbounds i16, i16* %head, i64 0 %1 = bitcast i16* %0 to <16 x i16>* %2 = load <16 x i16>, <16 x i16>* %1, align 2 %3 = icmp ugt <16 x i16> %2, %4 = add <16 x i16> %2, %5 = select <16 x i1> %3, <16 x i16> %4, <16 x i16> zeroinitializer store <16 x i16> %5, <16 x i16>* %1, align 2 ret void } define void @test9(i16* nocapture %head, i16 zeroext %w) nounwind { ; SSE-LABEL: test9: ; SSE: ## BB#0: ## %vector.ph ; SSE-NEXT: movd %esi, %xmm0 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7] ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1] ; SSE-NEXT: movdqu (%rdi), %xmm1 ; SSE-NEXT: movdqu 16(%rdi), %xmm2 ; SSE-NEXT: psubusw %xmm0, %xmm1 ; SSE-NEXT: psubusw %xmm0, %xmm2 ; SSE-NEXT: movdqu %xmm2, 16(%rdi) ; SSE-NEXT: movdqu %xmm1, (%rdi) ; SSE-NEXT: retq ; ; AVX1-LABEL: test9: ; AVX1: ## BB#0: ## %vector.ph ; AVX1-NEXT: vmovdqu (%rdi), %ymm0 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 ; AVX1-NEXT: vmovd %esi, %xmm2 ; AVX1-NEXT: vpshuflw {{.*#+}} xmm2 = xmm2[0,0,0,0,4,5,6,7] ; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,0,1,1] ; AVX1-NEXT: vpsubw %xmm2, %xmm1, %xmm3 ; AVX1-NEXT: vpsubw %xmm2, %xmm0, %xmm4 ; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm3 ; AVX1-NEXT: vpmaxuw %xmm2, %xmm1, %xmm4 ; AVX1-NEXT: vpcmpeqw %xmm4, %xmm1, %xmm1 ; AVX1-NEXT: vpmaxuw %xmm2, %xmm0, %xmm2 ; AVX1-NEXT: vpcmpeqw %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX1-NEXT: vandps %ymm3, %ymm0, %ymm0 ; AVX1-NEXT: vmovups %ymm0, (%rdi) ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; ; AVX2-LABEL: test9: ; AVX2: ## BB#0: ## %vector.ph ; AVX2-NEXT: vmovd %esi, %xmm0 ; AVX2-NEXT: vpbroadcastw %xmm0, %ymm0 ; AVX2-NEXT: vmovdqu (%rdi), %ymm1 ; AVX2-NEXT: vpsubusw %ymm0, %ymm1, %ymm0 ; AVX2-NEXT: vmovdqu %ymm0, (%rdi) ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq vector.ph: %0 = insertelement <16 x i16> undef, i16 %w, i32 0 %broadcast15 = shufflevector <16 x i16> %0, <16 x i16> undef, <16 x i32> zeroinitializer %1 = getelementptr inbounds i16, i16* %head, i64 0 %2 = bitcast i16* %1 to <16 x i16>* %3 = load <16 x i16>, <16 x i16>* %2, align 2 %4 = icmp ult <16 x i16> %3, %broadcast15 %5 = sub <16 x i16> %3, %broadcast15 %6 = select <16 x i1> %4, <16 x i16> zeroinitializer, <16 x i16> %5 store <16 x i16> %6, <16 x i16>* %2, align 2 ret void } define void @test10(i8* nocapture %head) nounwind { ; SSE-LABEL: test10: ; SSE: ## BB#0: ## %vector.ph ; SSE-NEXT: movdqu (%rdi), %xmm0 ; SSE-NEXT: movdqu 16(%rdi), %xmm1 ; SSE-NEXT: movdqa {{.*#+}} xmm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128] ; SSE-NEXT: psubusb %xmm2, %xmm0 ; SSE-NEXT: psubusb %xmm2, %xmm1 ; SSE-NEXT: movdqu %xmm1, 16(%rdi) ; SSE-NEXT: movdqu %xmm0, (%rdi) ; SSE-NEXT: retq ; ; AVX1-LABEL: test10: ; AVX1: ## BB#0: ## %vector.ph ; AVX1-NEXT: vmovdqu (%rdi), %ymm0 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 ; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2 ; AVX1-NEXT: vpcmpgtb %xmm1, %xmm2, %xmm1 ; AVX1-NEXT: vpcmpgtb %xmm0, %xmm2, %xmm2 ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1 ; AVX1-NEXT: vxorps {{.*}}(%rip), %ymm0, %ymm0 ; AVX1-NEXT: vandps %ymm0, %ymm1, %ymm0 ; AVX1-NEXT: vmovups %ymm0, (%rdi) ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; ; AVX2-LABEL: test10: ; AVX2: ## BB#0: ## %vector.ph ; AVX2-NEXT: vmovdqu (%rdi), %ymm0 ; AVX2-NEXT: vpsubusb {{.*}}(%rip), %ymm0, %ymm0 ; AVX2-NEXT: vmovdqu %ymm0, (%rdi) ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq vector.ph: %0 = getelementptr inbounds i8, i8* %head, i64 0 %1 = bitcast i8* %0 to <32 x i8>* %2 = load <32 x i8>, <32 x i8>* %1, align 1 %3 = icmp slt <32 x i8> %2, zeroinitializer %4 = xor <32 x i8> %2, %5 = select <32 x i1> %3, <32 x i8> %4, <32 x i8> zeroinitializer store <32 x i8> %5, <32 x i8>* %1, align 1 ret void } define void @test11(i8* nocapture %head) nounwind { ; SSE-LABEL: test11: ; SSE: ## BB#0: ## %vector.ph ; SSE-NEXT: movdqu (%rdi), %xmm0 ; SSE-NEXT: movdqu 16(%rdi), %xmm1 ; SSE-NEXT: movdqa {{.*#+}} xmm2 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127] ; SSE-NEXT: psubusb %xmm2, %xmm0 ; SSE-NEXT: psubusb %xmm2, %xmm1 ; SSE-NEXT: movdqu %xmm1, 16(%rdi) ; SSE-NEXT: movdqu %xmm0, (%rdi) ; SSE-NEXT: retq ; ; AVX1-LABEL: test11: ; AVX1: ## BB#0: ## %vector.ph ; AVX1-NEXT: vmovdqu (%rdi), %ymm0 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128] ; AVX1-NEXT: vpxor %xmm2, %xmm1, %xmm3 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [254,254,254,254,254,254,254,254,254,254,254,254,254,254,254,254] ; AVX1-NEXT: vpcmpgtb %xmm4, %xmm3, %xmm3 ; AVX1-NEXT: vpxor %xmm2, %xmm0, %xmm2 ; AVX1-NEXT: vpcmpgtb %xmm4, %xmm2, %xmm2 ; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [129,129,129,129,129,129,129,129,129,129,129,129,129,129,129,129] ; AVX1-NEXT: vpaddb %xmm3, %xmm1, %xmm1 ; AVX1-NEXT: vpaddb %xmm3, %xmm0, %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX1-NEXT: vandps %ymm0, %ymm2, %ymm0 ; AVX1-NEXT: vmovups %ymm0, (%rdi) ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; ; AVX2-LABEL: test11: ; AVX2: ## BB#0: ## %vector.ph ; AVX2-NEXT: vmovdqu (%rdi), %ymm0 ; AVX2-NEXT: vpsubusb {{.*}}(%rip), %ymm0, %ymm0 ; AVX2-NEXT: vmovdqu %ymm0, (%rdi) ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq vector.ph: %0 = getelementptr inbounds i8, i8* %head, i64 0 %1 = bitcast i8* %0 to <32 x i8>* %2 = load <32 x i8>, <32 x i8>* %1, align 1 %3 = icmp ugt <32 x i8> %2, %4 = add <32 x i8> %2, %5 = select <32 x i1> %3, <32 x i8> %4, <32 x i8> zeroinitializer store <32 x i8> %5, <32 x i8>* %1, align 1 ret void } define void @test12(i8* nocapture %head, i8 zeroext %w) nounwind { ; SSE2-LABEL: test12: ; SSE2: ## BB#0: ## %vector.ph ; SSE2-NEXT: movd %esi, %xmm0 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7] ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1] ; SSE2-NEXT: movdqu (%rdi), %xmm1 ; SSE2-NEXT: movdqu 16(%rdi), %xmm2 ; SSE2-NEXT: psubusb %xmm0, %xmm1 ; SSE2-NEXT: psubusb %xmm0, %xmm2 ; SSE2-NEXT: movdqu %xmm2, 16(%rdi) ; SSE2-NEXT: movdqu %xmm1, (%rdi) ; SSE2-NEXT: retq ; ; SSSE3-LABEL: test12: ; SSSE3: ## BB#0: ## %vector.ph ; SSSE3-NEXT: movd %esi, %xmm0 ; SSSE3-NEXT: pxor %xmm1, %xmm1 ; SSSE3-NEXT: pshufb %xmm1, %xmm0 ; SSSE3-NEXT: movdqu (%rdi), %xmm1 ; SSSE3-NEXT: movdqu 16(%rdi), %xmm2 ; SSSE3-NEXT: psubusb %xmm0, %xmm1 ; SSSE3-NEXT: psubusb %xmm0, %xmm2 ; SSSE3-NEXT: movdqu %xmm2, 16(%rdi) ; SSSE3-NEXT: movdqu %xmm1, (%rdi) ; SSSE3-NEXT: retq ; ; AVX1-LABEL: test12: ; AVX1: ## BB#0: ## %vector.ph ; AVX1-NEXT: vmovdqu (%rdi), %ymm0 ; AVX1-NEXT: vmovd %esi, %xmm1 ; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2 ; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 ; AVX1-NEXT: vpsubb %xmm1, %xmm2, %xmm3 ; AVX1-NEXT: vpsubb %xmm1, %xmm0, %xmm4 ; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm3 ; AVX1-NEXT: vpmaxub %xmm1, %xmm2, %xmm4 ; AVX1-NEXT: vpcmpeqb %xmm4, %xmm2, %xmm2 ; AVX1-NEXT: vpmaxub %xmm1, %xmm0, %xmm1 ; AVX1-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 ; AVX1-NEXT: vandps %ymm3, %ymm0, %ymm0 ; AVX1-NEXT: vmovups %ymm0, (%rdi) ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; ; AVX2-LABEL: test12: ; AVX2: ## BB#0: ## %vector.ph ; AVX2-NEXT: vmovd %esi, %xmm0 ; AVX2-NEXT: vpbroadcastb %xmm0, %ymm0 ; AVX2-NEXT: vmovdqu (%rdi), %ymm1 ; AVX2-NEXT: vpsubusb %ymm0, %ymm1, %ymm0 ; AVX2-NEXT: vmovdqu %ymm0, (%rdi) ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq vector.ph: %0 = insertelement <32 x i8> undef, i8 %w, i32 0 %broadcast15 = shufflevector <32 x i8> %0, <32 x i8> undef, <32 x i32> zeroinitializer %1 = getelementptr inbounds i8, i8* %head, i64 0 %2 = bitcast i8* %1 to <32 x i8>* %3 = load <32 x i8>, <32 x i8>* %2, align 1 %4 = icmp ult <32 x i8> %3, %broadcast15 %5 = sub <32 x i8> %3, %broadcast15 %6 = select <32 x i1> %4, <32 x i8> zeroinitializer, <32 x i8> %5 store <32 x i8> %6, <32 x i8>* %2, align 1 ret void } define void @test13(i16* nocapture %head, i32* nocapture %w) nounwind { ; SSE2-LABEL: test13: ; SSE2: ## BB#0: ## %vector.ph ; SSE2-NEXT: movdqu (%rdi), %xmm0 ; SSE2-NEXT: movdqu (%rsi), %xmm2 ; SSE2-NEXT: movdqu 16(%rsi), %xmm3 ; SSE2-NEXT: pxor %xmm4, %xmm4 ; SSE2-NEXT: movdqa %xmm0, %xmm1 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3] ; SSE2-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7] ; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [2147483648,2147483648,2147483648,2147483648] ; SSE2-NEXT: movdqa %xmm0, %xmm5 ; SSE2-NEXT: psubd %xmm3, %xmm0 ; SSE2-NEXT: pxor %xmm4, %xmm3 ; SSE2-NEXT: pxor %xmm4, %xmm5 ; SSE2-NEXT: pcmpgtd %xmm5, %xmm3 ; SSE2-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,2,2,3,4,5,6,7] ; SSE2-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,6,6,7] ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3] ; SSE2-NEXT: movdqa %xmm2, %xmm5 ; SSE2-NEXT: pxor %xmm4, %xmm5 ; SSE2-NEXT: pxor %xmm1, %xmm4 ; SSE2-NEXT: pcmpgtd %xmm4, %xmm5 ; SSE2-NEXT: pshuflw {{.*#+}} xmm4 = xmm5[0,2,2,3,4,5,6,7] ; SSE2-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,6,6,7] ; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,2,2,3] ; SSE2-NEXT: punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm3[0] ; SSE2-NEXT: psubd %xmm2, %xmm1 ; SSE2-NEXT: pslld $16, %xmm0 ; SSE2-NEXT: psrad $16, %xmm0 ; SSE2-NEXT: pslld $16, %xmm1 ; SSE2-NEXT: psrad $16, %xmm1 ; SSE2-NEXT: packssdw %xmm0, %xmm1 ; SSE2-NEXT: pandn %xmm1, %xmm4 ; SSE2-NEXT: movdqu %xmm4, (%rdi) ; SSE2-NEXT: retq ; ; SSSE3-LABEL: test13: ; SSSE3: ## BB#0: ## %vector.ph ; SSSE3-NEXT: movdqu (%rdi), %xmm0 ; SSSE3-NEXT: movdqu (%rsi), %xmm2 ; SSSE3-NEXT: movdqu 16(%rsi), %xmm3 ; SSSE3-NEXT: pxor %xmm4, %xmm4 ; SSSE3-NEXT: movdqa %xmm0, %xmm1 ; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3] ; SSSE3-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7] ; SSSE3-NEXT: movdqa {{.*#+}} xmm4 = [2147483648,2147483648,2147483648,2147483648] ; SSSE3-NEXT: movdqa %xmm0, %xmm5 ; SSSE3-NEXT: psubd %xmm3, %xmm0 ; SSSE3-NEXT: pxor %xmm4, %xmm3 ; SSSE3-NEXT: pxor %xmm4, %xmm5 ; SSSE3-NEXT: pcmpgtd %xmm5, %xmm3 ; SSSE3-NEXT: movdqa {{.*#+}} xmm5 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15] ; SSSE3-NEXT: pshufb %xmm5, %xmm3 ; SSSE3-NEXT: movdqa %xmm2, %xmm6 ; SSSE3-NEXT: pxor %xmm4, %xmm6 ; SSSE3-NEXT: pxor %xmm1, %xmm4 ; SSSE3-NEXT: pcmpgtd %xmm4, %xmm6 ; SSSE3-NEXT: pshufb %xmm5, %xmm6 ; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm6 = xmm6[0],xmm3[0] ; SSSE3-NEXT: psubd %xmm2, %xmm1 ; SSSE3-NEXT: pshufb %xmm5, %xmm0 ; SSSE3-NEXT: pshufb %xmm5, %xmm1 ; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0] ; SSSE3-NEXT: pandn %xmm1, %xmm6 ; SSSE3-NEXT: movdqu %xmm6, (%rdi) ; SSSE3-NEXT: retq ; ; AVX1-LABEL: test13: ; AVX1: ## BB#0: ## %vector.ph ; AVX1-NEXT: vmovdqu (%rsi), %ymm0 ; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero ; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero ; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [2147483648,2147483648,2147483648,2147483648] ; AVX1-NEXT: vpxor %xmm3, %xmm2, %xmm4 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5 ; AVX1-NEXT: vpxor %xmm3, %xmm5, %xmm6 ; AVX1-NEXT: vpcmpgtd %xmm4, %xmm6, %xmm4 ; AVX1-NEXT: vpxor %xmm3, %xmm1, %xmm6 ; AVX1-NEXT: vpxor %xmm3, %xmm0, %xmm3 ; AVX1-NEXT: vpcmpgtd %xmm6, %xmm3, %xmm3 ; AVX1-NEXT: vpacksswb %xmm4, %xmm3, %xmm3 ; AVX1-NEXT: vpsubd %xmm0, %xmm1, %xmm0 ; AVX1-NEXT: vpsubd %xmm5, %xmm2, %xmm1 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15] ; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1 ; AVX1-NEXT: vpshufb %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] ; AVX1-NEXT: vpandn %xmm0, %xmm3, %xmm0 ; AVX1-NEXT: vmovdqu %xmm0, (%rdi) ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; ; AVX2-LABEL: test13: ; AVX2: ## BB#0: ## %vector.ph ; AVX2-NEXT: vmovdqu (%rsi), %ymm0 ; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero ; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %ymm2 ; AVX2-NEXT: vpxor %ymm2, %ymm1, %ymm3 ; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm2 ; AVX2-NEXT: vpcmpgtd %ymm3, %ymm2, %ymm2 ; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm3 ; AVX2-NEXT: vpacksswb %xmm3, %xmm2, %xmm2 ; AVX2-NEXT: vpsubd %ymm0, %ymm1, %ymm0 ; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31] ; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3] ; AVX2-NEXT: vpandn %xmm0, %xmm2, %xmm0 ; AVX2-NEXT: vmovdqu %xmm0, (%rdi) ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq vector.ph: %0 = getelementptr inbounds i16, i16* %head, i64 0 %1 = bitcast i16* %0 to <8 x i16>* %2 = load <8 x i16>, <8 x i16>* %1, align 2 %3 = getelementptr inbounds i32, i32* %w, i64 0 %4 = bitcast i32* %3 to <8 x i32>* %5 = load <8 x i32>, <8 x i32>* %4, align 2 %6 = zext <8 x i16> %2 to <8 x i32> %7 = icmp ult <8 x i32> %6, %5 %8 = sub <8 x i32> %6, %5 %9 = trunc <8 x i32> %8 to <8 x i16> %10 = select <8 x i1> %7, <8 x i16> zeroinitializer, <8 x i16> %9 store <8 x i16> %10, <8 x i16>* %1, align 1 ret void } define void @test14(i8* nocapture %head, i32* nocapture %w) nounwind { ; SSE2-LABEL: test14: ; SSE2: ## BB#0: ## %vector.ph ; SSE2-NEXT: movdqu (%rdi), %xmm0 ; SSE2-NEXT: movdqu (%rsi), %xmm8 ; SSE2-NEXT: movdqu 16(%rsi), %xmm9 ; SSE2-NEXT: movdqu 32(%rsi), %xmm10 ; SSE2-NEXT: movdqu 48(%rsi), %xmm7 ; SSE2-NEXT: pxor %xmm3, %xmm3 ; SSE2-NEXT: movdqa %xmm0, %xmm1 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7] ; SSE2-NEXT: movdqa %xmm1, %xmm2 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3] ; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7] ; SSE2-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm3[8],xmm0[9],xmm3[9],xmm0[10],xmm3[10],xmm0[11],xmm3[11],xmm0[12],xmm3[12],xmm0[13],xmm3[13],xmm0[14],xmm3[14],xmm0[15],xmm3[15] ; SSE2-NEXT: movdqa %xmm0, %xmm6 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm3[0],xmm6[1],xmm3[1],xmm6[2],xmm3[2],xmm6[3],xmm3[3] ; SSE2-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7] ; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [2147483648,2147483648,2147483648,2147483648] ; SSE2-NEXT: movdqa %xmm0, %xmm5 ; SSE2-NEXT: psubd %xmm7, %xmm0 ; SSE2-NEXT: pxor %xmm3, %xmm7 ; SSE2-NEXT: pxor %xmm3, %xmm5 ; SSE2-NEXT: pcmpgtd %xmm5, %xmm7 ; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [255,255,255,255] ; SSE2-NEXT: pand %xmm5, %xmm7 ; SSE2-NEXT: movdqa %xmm6, %xmm4 ; SSE2-NEXT: psubd %xmm10, %xmm6 ; SSE2-NEXT: pxor %xmm3, %xmm10 ; SSE2-NEXT: pxor %xmm3, %xmm4 ; SSE2-NEXT: pcmpgtd %xmm4, %xmm10 ; SSE2-NEXT: pand %xmm5, %xmm10 ; SSE2-NEXT: packuswb %xmm7, %xmm10 ; SSE2-NEXT: movdqa %xmm1, %xmm4 ; SSE2-NEXT: psubd %xmm9, %xmm1 ; SSE2-NEXT: pxor %xmm3, %xmm9 ; SSE2-NEXT: pxor %xmm3, %xmm4 ; SSE2-NEXT: pcmpgtd %xmm4, %xmm9 ; SSE2-NEXT: pand %xmm5, %xmm9 ; SSE2-NEXT: movdqa %xmm8, %xmm4 ; SSE2-NEXT: pxor %xmm3, %xmm4 ; SSE2-NEXT: pxor %xmm2, %xmm3 ; SSE2-NEXT: pcmpgtd %xmm3, %xmm4 ; SSE2-NEXT: pand %xmm5, %xmm4 ; SSE2-NEXT: packuswb %xmm9, %xmm4 ; SSE2-NEXT: packuswb %xmm10, %xmm4 ; SSE2-NEXT: psubd %xmm8, %xmm2 ; SSE2-NEXT: pand %xmm5, %xmm0 ; SSE2-NEXT: pand %xmm5, %xmm6 ; SSE2-NEXT: packuswb %xmm0, %xmm6 ; SSE2-NEXT: pand %xmm5, %xmm1 ; SSE2-NEXT: pand %xmm5, %xmm2 ; SSE2-NEXT: packuswb %xmm1, %xmm2 ; SSE2-NEXT: packuswb %xmm6, %xmm2 ; SSE2-NEXT: pandn %xmm2, %xmm4 ; SSE2-NEXT: movdqu %xmm4, (%rdi) ; SSE2-NEXT: retq ; ; SSSE3-LABEL: test14: ; SSSE3: ## BB#0: ## %vector.ph ; SSSE3-NEXT: movdqu (%rdi), %xmm0 ; SSSE3-NEXT: movdqu (%rsi), %xmm8 ; SSSE3-NEXT: movdqu 16(%rsi), %xmm9 ; SSSE3-NEXT: movdqu 32(%rsi), %xmm10 ; SSSE3-NEXT: movdqu 48(%rsi), %xmm7 ; SSSE3-NEXT: pxor %xmm3, %xmm3 ; SSSE3-NEXT: movdqa %xmm0, %xmm1 ; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7] ; SSSE3-NEXT: movdqa %xmm1, %xmm2 ; SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3] ; SSSE3-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7] ; SSSE3-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm3[8],xmm0[9],xmm3[9],xmm0[10],xmm3[10],xmm0[11],xmm3[11],xmm0[12],xmm3[12],xmm0[13],xmm3[13],xmm0[14],xmm3[14],xmm0[15],xmm3[15] ; SSSE3-NEXT: movdqa %xmm0, %xmm6 ; SSSE3-NEXT: punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm3[0],xmm6[1],xmm3[1],xmm6[2],xmm3[2],xmm6[3],xmm3[3] ; SSSE3-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7] ; SSSE3-NEXT: movdqa {{.*#+}} xmm3 = [2147483648,2147483648,2147483648,2147483648] ; SSSE3-NEXT: movdqa %xmm0, %xmm5 ; SSSE3-NEXT: psubd %xmm7, %xmm0 ; SSSE3-NEXT: pxor %xmm3, %xmm7 ; SSSE3-NEXT: pxor %xmm3, %xmm5 ; SSSE3-NEXT: pcmpgtd %xmm5, %xmm7 ; SSSE3-NEXT: movdqa {{.*#+}} xmm5 = ; SSSE3-NEXT: pshufb %xmm5, %xmm7 ; SSSE3-NEXT: movdqa %xmm6, %xmm4 ; SSSE3-NEXT: psubd %xmm10, %xmm6 ; SSSE3-NEXT: pxor %xmm3, %xmm10 ; SSSE3-NEXT: pxor %xmm3, %xmm4 ; SSSE3-NEXT: pcmpgtd %xmm4, %xmm10 ; SSSE3-NEXT: pshufb %xmm5, %xmm10 ; SSSE3-NEXT: punpckldq {{.*#+}} xmm10 = xmm10[0],xmm7[0],xmm10[1],xmm7[1] ; SSSE3-NEXT: movdqa %xmm1, %xmm4 ; SSSE3-NEXT: psubd %xmm9, %xmm1 ; SSSE3-NEXT: pxor %xmm3, %xmm9 ; SSSE3-NEXT: pxor %xmm3, %xmm4 ; SSSE3-NEXT: pcmpgtd %xmm4, %xmm9 ; SSSE3-NEXT: movdqa {{.*#+}} xmm4 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u> ; SSSE3-NEXT: pshufb %xmm4, %xmm9 ; SSSE3-NEXT: movdqa %xmm8, %xmm5 ; SSSE3-NEXT: pxor %xmm3, %xmm5 ; SSSE3-NEXT: pxor %xmm2, %xmm3 ; SSSE3-NEXT: pcmpgtd %xmm3, %xmm5 ; SSSE3-NEXT: pshufb %xmm4, %xmm5 ; SSSE3-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm9[0],xmm5[1],xmm9[1] ; SSSE3-NEXT: movsd {{.*#+}} xmm10 = xmm5[0],xmm10[1] ; SSSE3-NEXT: psubd %xmm8, %xmm2 ; SSSE3-NEXT: movdqa {{.*#+}} xmm3 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0] ; SSSE3-NEXT: pand %xmm3, %xmm0 ; SSSE3-NEXT: pand %xmm3, %xmm6 ; SSSE3-NEXT: packuswb %xmm0, %xmm6 ; SSSE3-NEXT: pand %xmm3, %xmm1 ; SSSE3-NEXT: pand %xmm3, %xmm2 ; SSSE3-NEXT: packuswb %xmm1, %xmm2 ; SSSE3-NEXT: packuswb %xmm6, %xmm2 ; SSSE3-NEXT: andnpd %xmm2, %xmm10 ; SSSE3-NEXT: movupd %xmm10, (%rdi) ; SSSE3-NEXT: retq ; ; AVX1-LABEL: test14: ; AVX1: ## BB#0: ## %vector.ph ; AVX1-NEXT: vmovdqu (%rsi), %ymm0 ; AVX1-NEXT: vmovdqu 32(%rsi), %ymm1 ; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm8 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero ; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm9 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero ; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm10 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero ; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm5 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero ; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = [2147483648,2147483648,2147483648,2147483648] ; AVX1-NEXT: vpxor %xmm6, %xmm5, %xmm7 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 ; AVX1-NEXT: vpxor %xmm6, %xmm2, %xmm3 ; AVX1-NEXT: vpcmpgtd %xmm7, %xmm3, %xmm3 ; AVX1-NEXT: vpxor %xmm6, %xmm10, %xmm7 ; AVX1-NEXT: vpxor %xmm6, %xmm1, %xmm4 ; AVX1-NEXT: vpcmpgtd %xmm7, %xmm4, %xmm4 ; AVX1-NEXT: vpacksswb %xmm3, %xmm4, %xmm11 ; AVX1-NEXT: vpxor %xmm6, %xmm9, %xmm4 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm7 ; AVX1-NEXT: vpxor %xmm6, %xmm7, %xmm3 ; AVX1-NEXT: vpcmpgtd %xmm4, %xmm3, %xmm3 ; AVX1-NEXT: vpxor %xmm6, %xmm8, %xmm4 ; AVX1-NEXT: vpxor %xmm6, %xmm0, %xmm6 ; AVX1-NEXT: vpcmpgtd %xmm4, %xmm6, %xmm4 ; AVX1-NEXT: vpacksswb %xmm3, %xmm4, %xmm3 ; AVX1-NEXT: vpacksswb %xmm11, %xmm3, %xmm3 ; AVX1-NEXT: vpsubd %xmm0, %xmm8, %xmm0 ; AVX1-NEXT: vpsubd %xmm7, %xmm9, %xmm4 ; AVX1-NEXT: vpsubd %xmm1, %xmm10, %xmm1 ; AVX1-NEXT: vpsubd %xmm2, %xmm5, %xmm2 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0] ; AVX1-NEXT: vpand %xmm5, %xmm2, %xmm2 ; AVX1-NEXT: vpand %xmm5, %xmm1, %xmm1 ; AVX1-NEXT: vpackuswb %xmm2, %xmm1, %xmm1 ; AVX1-NEXT: vpand %xmm5, %xmm4, %xmm2 ; AVX1-NEXT: vpand %xmm5, %xmm0, %xmm0 ; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX1-NEXT: vpblendvb %xmm3, %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vmovdqu %xmm0, (%rdi) ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; ; AVX2-LABEL: test14: ; AVX2: ## BB#0: ## %vector.ph ; AVX2-NEXT: vmovdqu (%rsi), %ymm0 ; AVX2-NEXT: vmovdqu 32(%rsi), %ymm1 ; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm2 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero ; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm3 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero ; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %ymm4 ; AVX2-NEXT: vpxor %ymm4, %ymm3, %ymm5 ; AVX2-NEXT: vpxor %ymm4, %ymm1, %ymm6 ; AVX2-NEXT: vpcmpgtd %ymm5, %ymm6, %ymm5 ; AVX2-NEXT: vextracti128 $1, %ymm5, %xmm6 ; AVX2-NEXT: vpacksswb %xmm6, %xmm5, %xmm5 ; AVX2-NEXT: vpxor %ymm4, %ymm2, %ymm6 ; AVX2-NEXT: vpxor %ymm4, %ymm0, %ymm4 ; AVX2-NEXT: vpcmpgtd %ymm6, %ymm4, %ymm4 ; AVX2-NEXT: vextracti128 $1, %ymm4, %xmm6 ; AVX2-NEXT: vpacksswb %xmm6, %xmm4, %xmm4 ; AVX2-NEXT: vpacksswb %xmm5, %xmm4, %xmm4 ; AVX2-NEXT: vpsubd %ymm0, %ymm2, %ymm0 ; AVX2-NEXT: vpsubd %ymm1, %ymm3, %ymm1 ; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31] ; AVX2-NEXT: vpshufb %ymm2, %ymm1, %ymm1 ; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3] ; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u> ; AVX2-NEXT: vpshufb %xmm3, %xmm1, %xmm1 ; AVX2-NEXT: vpshufb %ymm2, %ymm0, %ymm0 ; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3] ; AVX2-NEXT: vpshufb %xmm3, %xmm0, %xmm0 ; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] ; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX2-NEXT: vpblendvb %xmm4, %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: vmovdqu %xmm0, (%rdi) ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq vector.ph: %0 = getelementptr inbounds i8, i8* %head, i64 0 %1 = bitcast i8* %0 to <16 x i8>* %2 = load <16 x i8>, <16 x i8>* %1, align 2 %3 = getelementptr inbounds i32, i32* %w, i64 0 %4 = bitcast i32* %3 to <16 x i32>* %5 = load <16 x i32>, <16 x i32>* %4, align 2 %6 = zext <16 x i8> %2 to <16 x i32> %7 = icmp ult <16 x i32> %6, %5 %8 = sub <16 x i32> %6, %5 %9 = trunc <16 x i32> %8 to <16 x i8> %10 = select <16 x i1> %7, <16 x i8> zeroinitializer, <16 x i8> %9 store <16 x i8> %10, <16 x i8>* %1, align 1 ret void } define void @test15(i16* nocapture %head, i32* nocapture %w) nounwind { ; SSE2-LABEL: test15: ; SSE2: ## BB#0: ## %vector.ph ; SSE2-NEXT: movdqu (%rdi), %xmm0 ; SSE2-NEXT: movdqu (%rsi), %xmm2 ; SSE2-NEXT: movdqu 16(%rsi), %xmm3 ; SSE2-NEXT: pxor %xmm4, %xmm4 ; SSE2-NEXT: movdqa %xmm0, %xmm1 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3] ; SSE2-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7] ; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [2147483648,2147483648,2147483648,2147483648] ; SSE2-NEXT: movdqa %xmm0, %xmm5 ; SSE2-NEXT: psubd %xmm3, %xmm0 ; SSE2-NEXT: pxor %xmm4, %xmm3 ; SSE2-NEXT: pxor %xmm4, %xmm5 ; SSE2-NEXT: pcmpgtd %xmm3, %xmm5 ; SSE2-NEXT: pshuflw {{.*#+}} xmm3 = xmm5[0,2,2,3,4,5,6,7] ; SSE2-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,6,6,7] ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3] ; SSE2-NEXT: movdqa %xmm2, %xmm5 ; SSE2-NEXT: pxor %xmm4, %xmm5 ; SSE2-NEXT: pxor %xmm1, %xmm4 ; SSE2-NEXT: pcmpgtd %xmm5, %xmm4 ; SSE2-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[0,2,2,3,4,5,6,7] ; SSE2-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,6,6,7] ; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,2,2,3] ; SSE2-NEXT: punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm3[0] ; SSE2-NEXT: psubd %xmm2, %xmm1 ; SSE2-NEXT: pslld $16, %xmm0 ; SSE2-NEXT: psrad $16, %xmm0 ; SSE2-NEXT: pslld $16, %xmm1 ; SSE2-NEXT: psrad $16, %xmm1 ; SSE2-NEXT: packssdw %xmm0, %xmm1 ; SSE2-NEXT: pand %xmm4, %xmm1 ; SSE2-NEXT: movdqu %xmm1, (%rdi) ; SSE2-NEXT: retq ; ; SSSE3-LABEL: test15: ; SSSE3: ## BB#0: ## %vector.ph ; SSSE3-NEXT: movdqu (%rdi), %xmm0 ; SSSE3-NEXT: movdqu (%rsi), %xmm2 ; SSSE3-NEXT: movdqu 16(%rsi), %xmm4 ; SSSE3-NEXT: pxor %xmm3, %xmm3 ; SSSE3-NEXT: movdqa %xmm0, %xmm1 ; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3] ; SSSE3-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7] ; SSSE3-NEXT: movdqa {{.*#+}} xmm3 = [2147483648,2147483648,2147483648,2147483648] ; SSSE3-NEXT: movdqa %xmm0, %xmm5 ; SSSE3-NEXT: psubd %xmm4, %xmm0 ; SSSE3-NEXT: pxor %xmm3, %xmm4 ; SSSE3-NEXT: pxor %xmm3, %xmm5 ; SSSE3-NEXT: pcmpgtd %xmm4, %xmm5 ; SSSE3-NEXT: movdqa {{.*#+}} xmm4 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15] ; SSSE3-NEXT: pshufb %xmm4, %xmm5 ; SSSE3-NEXT: movdqa %xmm2, %xmm6 ; SSSE3-NEXT: pxor %xmm3, %xmm6 ; SSSE3-NEXT: pxor %xmm1, %xmm3 ; SSSE3-NEXT: pcmpgtd %xmm6, %xmm3 ; SSSE3-NEXT: pshufb %xmm4, %xmm3 ; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm5[0] ; SSSE3-NEXT: psubd %xmm2, %xmm1 ; SSSE3-NEXT: pshufb %xmm4, %xmm0 ; SSSE3-NEXT: pshufb %xmm4, %xmm1 ; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0] ; SSSE3-NEXT: pand %xmm3, %xmm1 ; SSSE3-NEXT: movdqu %xmm1, (%rdi) ; SSSE3-NEXT: retq ; ; AVX1-LABEL: test15: ; AVX1: ## BB#0: ## %vector.ph ; AVX1-NEXT: vmovdqu (%rsi), %ymm0 ; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero ; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero ; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [2147483648,2147483648,2147483648,2147483648] ; AVX1-NEXT: vpxor %xmm3, %xmm2, %xmm4 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5 ; AVX1-NEXT: vpxor %xmm3, %xmm5, %xmm6 ; AVX1-NEXT: vpcmpgtd %xmm6, %xmm4, %xmm4 ; AVX1-NEXT: vpxor %xmm3, %xmm1, %xmm6 ; AVX1-NEXT: vpxor %xmm3, %xmm0, %xmm3 ; AVX1-NEXT: vpcmpgtd %xmm3, %xmm6, %xmm3 ; AVX1-NEXT: vpacksswb %xmm4, %xmm3, %xmm3 ; AVX1-NEXT: vpsubd %xmm0, %xmm1, %xmm0 ; AVX1-NEXT: vpsubd %xmm5, %xmm2, %xmm1 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15] ; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1 ; AVX1-NEXT: vpshufb %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] ; AVX1-NEXT: vpand %xmm0, %xmm3, %xmm0 ; AVX1-NEXT: vmovdqu %xmm0, (%rdi) ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; ; AVX2-LABEL: test15: ; AVX2: ## BB#0: ## %vector.ph ; AVX2-NEXT: vmovdqu (%rsi), %ymm0 ; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero ; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %ymm2 ; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm3 ; AVX2-NEXT: vpxor %ymm2, %ymm1, %ymm2 ; AVX2-NEXT: vpcmpgtd %ymm3, %ymm2, %ymm2 ; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm3 ; AVX2-NEXT: vpacksswb %xmm3, %xmm2, %xmm2 ; AVX2-NEXT: vpsubd %ymm0, %ymm1, %ymm0 ; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31] ; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3] ; AVX2-NEXT: vpand %xmm0, %xmm2, %xmm0 ; AVX2-NEXT: vmovdqu %xmm0, (%rdi) ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq vector.ph: %0 = getelementptr inbounds i16, i16* %head, i64 0 %1 = bitcast i16* %0 to <8 x i16>* %2 = load <8 x i16>, <8 x i16>* %1, align 2 %3 = getelementptr inbounds i32, i32* %w, i64 0 %4 = bitcast i32* %3 to <8 x i32>* %5 = load <8 x i32>, <8 x i32>* %4, align 2 %6 = zext <8 x i16> %2 to <8 x i32> %7 = icmp ugt <8 x i32> %6, %5 %8 = sub <8 x i32> %6, %5 %9 = trunc <8 x i32> %8 to <8 x i16> %10 = select <8 x i1> %7, <8 x i16> %9, <8 x i16> zeroinitializer store <8 x i16> %10, <8 x i16>* %1, align 1 ret void } define void @test16(i16* nocapture %head, i32* nocapture %w) nounwind { ; SSE2-LABEL: test16: ; SSE2: ## BB#0: ## %vector.ph ; SSE2-NEXT: movdqu (%rdi), %xmm0 ; SSE2-NEXT: movdqu (%rsi), %xmm2 ; SSE2-NEXT: movdqu 16(%rsi), %xmm3 ; SSE2-NEXT: pxor %xmm4, %xmm4 ; SSE2-NEXT: movdqa %xmm0, %xmm1 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3] ; SSE2-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7] ; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [2147483648,2147483648,2147483648,2147483648] ; SSE2-NEXT: movdqa %xmm0, %xmm5 ; SSE2-NEXT: psubd %xmm3, %xmm0 ; SSE2-NEXT: pxor %xmm4, %xmm3 ; SSE2-NEXT: pxor %xmm4, %xmm5 ; SSE2-NEXT: pcmpgtd %xmm3, %xmm5 ; SSE2-NEXT: pshuflw {{.*#+}} xmm3 = xmm5[0,2,2,3,4,5,6,7] ; SSE2-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,6,6,7] ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3] ; SSE2-NEXT: movdqa %xmm2, %xmm5 ; SSE2-NEXT: pxor %xmm4, %xmm5 ; SSE2-NEXT: pxor %xmm1, %xmm4 ; SSE2-NEXT: pcmpgtd %xmm5, %xmm4 ; SSE2-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[0,2,2,3,4,5,6,7] ; SSE2-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,6,6,7] ; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,2,2,3] ; SSE2-NEXT: punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm3[0] ; SSE2-NEXT: psubd %xmm2, %xmm1 ; SSE2-NEXT: pslld $16, %xmm0 ; SSE2-NEXT: psrad $16, %xmm0 ; SSE2-NEXT: pslld $16, %xmm1 ; SSE2-NEXT: psrad $16, %xmm1 ; SSE2-NEXT: packssdw %xmm0, %xmm1 ; SSE2-NEXT: pand %xmm4, %xmm1 ; SSE2-NEXT: movdqu %xmm1, (%rdi) ; SSE2-NEXT: retq ; ; SSSE3-LABEL: test16: ; SSSE3: ## BB#0: ## %vector.ph ; SSSE3-NEXT: movdqu (%rdi), %xmm0 ; SSSE3-NEXT: movdqu (%rsi), %xmm2 ; SSSE3-NEXT: movdqu 16(%rsi), %xmm4 ; SSSE3-NEXT: pxor %xmm3, %xmm3 ; SSSE3-NEXT: movdqa %xmm0, %xmm1 ; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3] ; SSSE3-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7] ; SSSE3-NEXT: movdqa {{.*#+}} xmm3 = [2147483648,2147483648,2147483648,2147483648] ; SSSE3-NEXT: movdqa %xmm0, %xmm5 ; SSSE3-NEXT: psubd %xmm4, %xmm0 ; SSSE3-NEXT: pxor %xmm3, %xmm4 ; SSSE3-NEXT: pxor %xmm3, %xmm5 ; SSSE3-NEXT: pcmpgtd %xmm4, %xmm5 ; SSSE3-NEXT: movdqa {{.*#+}} xmm4 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15] ; SSSE3-NEXT: pshufb %xmm4, %xmm5 ; SSSE3-NEXT: movdqa %xmm2, %xmm6 ; SSSE3-NEXT: pxor %xmm3, %xmm6 ; SSSE3-NEXT: pxor %xmm1, %xmm3 ; SSSE3-NEXT: pcmpgtd %xmm6, %xmm3 ; SSSE3-NEXT: pshufb %xmm4, %xmm3 ; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm5[0] ; SSSE3-NEXT: psubd %xmm2, %xmm1 ; SSSE3-NEXT: pshufb %xmm4, %xmm0 ; SSSE3-NEXT: pshufb %xmm4, %xmm1 ; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0] ; SSSE3-NEXT: pand %xmm3, %xmm1 ; SSSE3-NEXT: movdqu %xmm1, (%rdi) ; SSSE3-NEXT: retq ; ; AVX1-LABEL: test16: ; AVX1: ## BB#0: ## %vector.ph ; AVX1-NEXT: vmovdqu (%rsi), %ymm0 ; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero ; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero ; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [2147483648,2147483648,2147483648,2147483648] ; AVX1-NEXT: vpxor %xmm3, %xmm2, %xmm4 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5 ; AVX1-NEXT: vpxor %xmm3, %xmm5, %xmm6 ; AVX1-NEXT: vpcmpgtd %xmm6, %xmm4, %xmm4 ; AVX1-NEXT: vpxor %xmm3, %xmm1, %xmm6 ; AVX1-NEXT: vpxor %xmm3, %xmm0, %xmm3 ; AVX1-NEXT: vpcmpgtd %xmm3, %xmm6, %xmm3 ; AVX1-NEXT: vpacksswb %xmm4, %xmm3, %xmm3 ; AVX1-NEXT: vpsubd %xmm0, %xmm1, %xmm0 ; AVX1-NEXT: vpsubd %xmm5, %xmm2, %xmm1 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15] ; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1 ; AVX1-NEXT: vpshufb %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] ; AVX1-NEXT: vpand %xmm0, %xmm3, %xmm0 ; AVX1-NEXT: vmovdqu %xmm0, (%rdi) ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; ; AVX2-LABEL: test16: ; AVX2: ## BB#0: ## %vector.ph ; AVX2-NEXT: vmovdqu (%rsi), %ymm0 ; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero ; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %ymm2 ; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm3 ; AVX2-NEXT: vpxor %ymm2, %ymm1, %ymm2 ; AVX2-NEXT: vpcmpgtd %ymm3, %ymm2, %ymm2 ; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm3 ; AVX2-NEXT: vpacksswb %xmm3, %xmm2, %xmm2 ; AVX2-NEXT: vpsubd %ymm0, %ymm1, %ymm0 ; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31] ; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3] ; AVX2-NEXT: vpand %xmm0, %xmm2, %xmm0 ; AVX2-NEXT: vmovdqu %xmm0, (%rdi) ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq vector.ph: %0 = getelementptr inbounds i16, i16* %head, i64 0 %1 = bitcast i16* %0 to <8 x i16>* %2 = load <8 x i16>, <8 x i16>* %1, align 2 %3 = getelementptr inbounds i32, i32* %w, i64 0 %4 = bitcast i32* %3 to <8 x i32>* %5 = load <8 x i32>, <8 x i32>* %4, align 2 %6 = zext <8 x i16> %2 to <8 x i32> %7 = icmp ult <8 x i32> %5, %6 %8 = sub <8 x i32> %6, %5 %9 = trunc <8 x i32> %8 to <8 x i16> %10 = select <8 x i1> %7, <8 x i16> %9, <8 x i16> zeroinitializer store <8 x i16> %10, <8 x i16>* %1, align 1 ret void }