summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/X86/vector-idiv-sdiv-256.ll
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test/CodeGen/X86/vector-idiv-sdiv-256.ll')
-rw-r--r--llvm/test/CodeGen/X86/vector-idiv-sdiv-256.ll297
1 files changed, 297 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/X86/vector-idiv-sdiv-256.ll b/llvm/test/CodeGen/X86/vector-idiv-sdiv-256.ll
index eb59603a35a..8fe9c5b248c 100644
--- a/llvm/test/CodeGen/X86/vector-idiv-sdiv-256.ll
+++ b/llvm/test/CodeGen/X86/vector-idiv-sdiv-256.ll
@@ -242,6 +242,137 @@ define <32 x i8> @test_div7_32i8(<32 x i8> %a) nounwind {
}
;
+; sdiv by non-splat constant
+;
+
+define <32 x i8> @test_divconstant_32i8(<32 x i8> %a) nounwind {
+; AVX1-LABEL: test_divconstant_32i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [1,0,0,0,0,0,1,1,1,0,0,1,0,0,1,1]
+; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
+; AVX1-NEXT: vpmullw %xmm1, %xmm3, %xmm3
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [255,255,255,255,255,255,255,255]
+; AVX1-NEXT: vpand %xmm1, %xmm3, %xmm3
+; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm4 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
+; AVX1-NEXT: vpmullw {{.*}}(%rip), %xmm4, %xmm4
+; AVX1-NEXT: vpand %xmm1, %xmm4, %xmm4
+; AVX1-NEXT: vpackuswb %xmm3, %xmm4, %xmm3
+; AVX1-NEXT: vpmovsxbw %xmm2, %xmm4
+; AVX1-NEXT: vpmullw {{.*}}(%rip), %xmm4, %xmm4
+; AVX1-NEXT: vpsrlw $8, %xmm4, %xmm4
+; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,3,0,1]
+; AVX1-NEXT: vpmovsxbw %xmm2, %xmm2
+; AVX1-NEXT: vpmullw {{.*}}(%rip), %xmm2, %xmm2
+; AVX1-NEXT: vpsrlw $8, %xmm2, %xmm2
+; AVX1-NEXT: vpackuswb %xmm2, %xmm4, %xmm2
+; AVX1-NEXT: vpaddb %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; AVX1-NEXT: vpsraw $8, %xmm3, %xmm3
+; AVX1-NEXT: vpmullw {{.*}}(%rip), %xmm3, %xmm3
+; AVX1-NEXT: vpsrlw $8, %xmm3, %xmm3
+; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm4 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; AVX1-NEXT: vpsraw $8, %xmm4, %xmm4
+; AVX1-NEXT: vpmullw {{.*}}(%rip), %xmm4, %xmm4
+; AVX1-NEXT: vpsrlw $8, %xmm4, %xmm4
+; AVX1-NEXT: vpackuswb %xmm3, %xmm4, %xmm3
+; AVX1-NEXT: vpsrlw $7, %xmm2, %xmm2
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX1-NEXT: vpand %xmm4, %xmm2, %xmm2
+; AVX1-NEXT: vpaddb %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1,0,0,1,0,0,1,1,1,0,0,0,0,0,1]
+; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15]
+; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm5 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; AVX1-NEXT: vpmullw %xmm3, %xmm5, %xmm3
+; AVX1-NEXT: vpand %xmm1, %xmm3, %xmm3
+; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm5 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX1-NEXT: vpmullw {{.*}}(%rip), %xmm5, %xmm5
+; AVX1-NEXT: vpand %xmm1, %xmm5, %xmm1
+; AVX1-NEXT: vpackuswb %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpmovsxbw %xmm0, %xmm3
+; AVX1-NEXT: vpmullw {{.*}}(%rip), %xmm3, %xmm3
+; AVX1-NEXT: vpsrlw $8, %xmm3, %xmm3
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; AVX1-NEXT: vpmovsxbw %xmm0, %xmm0
+; AVX1-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm0
+; AVX1-NEXT: vpackuswb %xmm0, %xmm3, %xmm0
+; AVX1-NEXT: vpaddb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; AVX1-NEXT: vpsraw $8, %xmm1, %xmm1
+; AVX1-NEXT: vpmullw {{.*}}(%rip), %xmm1, %xmm1
+; AVX1-NEXT: vpsrlw $8, %xmm1, %xmm1
+; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm3 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; AVX1-NEXT: vpsraw $8, %xmm3, %xmm3
+; AVX1-NEXT: vpmullw {{.*}}(%rip), %xmm3, %xmm3
+; AVX1-NEXT: vpsrlw $8, %xmm3, %xmm3
+; AVX1-NEXT: vpackuswb %xmm1, %xmm3, %xmm1
+; AVX1-NEXT: vpsrlw $7, %xmm0, %xmm0
+; AVX1-NEXT: vpand %xmm4, %xmm0, %xmm0
+; AVX1-NEXT: vpaddb %xmm0, %xmm1, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2NOBW-LABEL: test_divconstant_32i8:
+; AVX2NOBW: # %bb.0:
+; AVX2NOBW-NEXT: vmovdqa {{.*#+}} ymm1 = [1,1,0,0,1,0,0,1,1,1,0,0,0,0,0,1,1,0,0,0,0,0,1,1,1,0,0,1,0,0,1,1]
+; AVX2NOBW-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm1[8],ymm0[8],ymm1[9],ymm0[9],ymm1[10],ymm0[10],ymm1[11],ymm0[11],ymm1[12],ymm0[12],ymm1[13],ymm0[13],ymm1[14],ymm0[14],ymm1[15],ymm0[15],ymm1[24],ymm0[24],ymm1[25],ymm0[25],ymm1[26],ymm0[26],ymm1[27],ymm0[27],ymm1[28],ymm0[28],ymm1[29],ymm0[29],ymm1[30],ymm0[30],ymm1[31],ymm0[31]
+; AVX2NOBW-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
+; AVX2NOBW-NEXT: vpmullw %ymm2, %ymm3, %ymm2
+; AVX2NOBW-NEXT: vmovdqa {{.*#+}} ymm3 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
+; AVX2NOBW-NEXT: vpand %ymm3, %ymm2, %ymm2
+; AVX2NOBW-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[4],ymm0[4],ymm1[5],ymm0[5],ymm1[6],ymm0[6],ymm1[7],ymm0[7],ymm1[16],ymm0[16],ymm1[17],ymm0[17],ymm1[18],ymm0[18],ymm1[19],ymm0[19],ymm1[20],ymm0[20],ymm1[21],ymm0[21],ymm1[22],ymm0[22],ymm1[23],ymm0[23]
+; AVX2NOBW-NEXT: vpunpcklbw {{.*#+}} ymm4 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
+; AVX2NOBW-NEXT: vpmullw %ymm1, %ymm4, %ymm1
+; AVX2NOBW-NEXT: vpand %ymm3, %ymm1, %ymm1
+; AVX2NOBW-NEXT: vpackuswb %ymm2, %ymm1, %ymm1
+; AVX2NOBW-NEXT: vextracti128 $1, %ymm0, %xmm2
+; AVX2NOBW-NEXT: vpmovsxbw %xmm2, %ymm2
+; AVX2NOBW-NEXT: vpmullw {{.*}}(%rip), %ymm2, %ymm2
+; AVX2NOBW-NEXT: vpsrlw $8, %ymm2, %ymm2
+; AVX2NOBW-NEXT: vpmovsxbw %xmm0, %ymm0
+; AVX2NOBW-NEXT: vpmullw {{.*}}(%rip), %ymm0, %ymm0
+; AVX2NOBW-NEXT: vpsrlw $8, %ymm0, %ymm0
+; AVX2NOBW-NEXT: vpackuswb %ymm2, %ymm0, %ymm0
+; AVX2NOBW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
+; AVX2NOBW-NEXT: vpaddb %ymm1, %ymm0, %ymm0
+; AVX2NOBW-NEXT: vpunpckhbw {{.*#+}} ymm1 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
+; AVX2NOBW-NEXT: vpsraw $8, %ymm1, %ymm1
+; AVX2NOBW-NEXT: vpmullw {{.*}}(%rip), %ymm1, %ymm1
+; AVX2NOBW-NEXT: vpsrlw $8, %ymm1, %ymm1
+; AVX2NOBW-NEXT: vpunpcklbw {{.*#+}} ymm2 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
+; AVX2NOBW-NEXT: vpsraw $8, %ymm2, %ymm2
+; AVX2NOBW-NEXT: vpmullw {{.*}}(%rip), %ymm2, %ymm2
+; AVX2NOBW-NEXT: vpsrlw $8, %ymm2, %ymm2
+; AVX2NOBW-NEXT: vpackuswb %ymm1, %ymm2, %ymm1
+; AVX2NOBW-NEXT: vpsrlw $7, %ymm0, %ymm0
+; AVX2NOBW-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
+; AVX2NOBW-NEXT: vpaddb %ymm0, %ymm1, %ymm0
+; AVX2NOBW-NEXT: retq
+;
+; AVX512BW-LABEL: test_divconstant_32i8:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: vpmovzxbw {{.*#+}} zmm1 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
+; AVX512BW-NEXT: vpmullw {{.*}}(%rip), %zmm1, %zmm1
+; AVX512BW-NEXT: vpmovwb %zmm1, %ymm1
+; AVX512BW-NEXT: vpmovsxbw %ymm0, %zmm0
+; AVX512BW-NEXT: vpmullw {{.*}}(%rip), %zmm0, %zmm0
+; AVX512BW-NEXT: vpsrlw $8, %zmm0, %zmm0
+; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
+; AVX512BW-NEXT: vpaddb %ymm1, %ymm0, %ymm0
+; AVX512BW-NEXT: vpsrlw $7, %ymm0, %ymm1
+; AVX512BW-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1
+; AVX512BW-NEXT: vpmovsxbw %ymm0, %zmm0
+; AVX512BW-NEXT: vpsravw {{.*}}(%rip), %zmm0, %zmm0
+; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
+; AVX512BW-NEXT: vpaddb %ymm1, %ymm0, %ymm0
+; AVX512BW-NEXT: retq
+ %res = sdiv <32 x i8> %a, <i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 22, i8 21, i8 20, i8 19, i8 18, i8 17, i8 16, i8 15, i8 14, i8 13, i8 12, i8 11, i8 10, i8 9, i8 8, i8 7>
+ ret <32 x i8> %res
+}
+
+;
; srem by 7
;
@@ -544,3 +675,169 @@ define <32 x i8> @test_rem7_32i8(<32 x i8> %a) nounwind {
%res = srem <32 x i8> %a, <i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7>
ret <32 x i8> %res
}
+
+;
+; srem by non-splat constant
+;
+
+define <32 x i8> @test_remconstant_32i8(<32 x i8> %a) nounwind {
+; AVX1-LABEL: test_remconstant_32i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [1,0,0,0,0,0,1,1,1,0,0,1,0,0,1,1]
+; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
+; AVX1-NEXT: vpmullw %xmm1, %xmm3, %xmm3
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [255,255,255,255,255,255,255,255]
+; AVX1-NEXT: vpand %xmm1, %xmm3, %xmm3
+; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm4 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
+; AVX1-NEXT: vpmullw {{.*}}(%rip), %xmm4, %xmm4
+; AVX1-NEXT: vpand %xmm1, %xmm4, %xmm4
+; AVX1-NEXT: vpackuswb %xmm3, %xmm4, %xmm3
+; AVX1-NEXT: vpmovsxbw %xmm2, %xmm4
+; AVX1-NEXT: vpmullw {{.*}}(%rip), %xmm4, %xmm4
+; AVX1-NEXT: vpsrlw $8, %xmm4, %xmm4
+; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm2[2,3,0,1]
+; AVX1-NEXT: vpmovsxbw %xmm5, %xmm5
+; AVX1-NEXT: vpmullw {{.*}}(%rip), %xmm5, %xmm5
+; AVX1-NEXT: vpsrlw $8, %xmm5, %xmm5
+; AVX1-NEXT: vpackuswb %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vpaddb %xmm3, %xmm4, %xmm3
+; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; AVX1-NEXT: vpsraw $8, %xmm4, %xmm4
+; AVX1-NEXT: vpmullw {{.*}}(%rip), %xmm4, %xmm4
+; AVX1-NEXT: vpsrlw $8, %xmm4, %xmm4
+; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm5 = xmm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; AVX1-NEXT: vpsraw $8, %xmm5, %xmm5
+; AVX1-NEXT: vpmullw {{.*}}(%rip), %xmm5, %xmm5
+; AVX1-NEXT: vpsrlw $8, %xmm5, %xmm5
+; AVX1-NEXT: vpackuswb %xmm4, %xmm5, %xmm4
+; AVX1-NEXT: vpsrlw $7, %xmm3, %xmm5
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX1-NEXT: vpand %xmm3, %xmm5, %xmm5
+; AVX1-NEXT: vpaddb %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm5 = xmm4[8],xmm0[8],xmm4[9],xmm0[9],xmm4[10],xmm0[10],xmm4[11],xmm0[11],xmm4[12],xmm0[12],xmm4[13],xmm0[13],xmm4[14],xmm0[14],xmm4[15],xmm0[15]
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = [22,21,20,19,18,17,16,15,14,13,12,11,10,9,8,7]
+; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm6 = xmm6[8],xmm0[8],xmm6[9],xmm0[9],xmm6[10],xmm0[10],xmm6[11],xmm0[11],xmm6[12],xmm0[12],xmm6[13],xmm0[13],xmm6[14],xmm0[14],xmm6[15],xmm0[15]
+; AVX1-NEXT: vpmullw %xmm6, %xmm5, %xmm5
+; AVX1-NEXT: vpand %xmm1, %xmm5, %xmm5
+; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero,xmm4[4],zero,xmm4[5],zero,xmm4[6],zero,xmm4[7],zero
+; AVX1-NEXT: vpmullw {{.*}}(%rip), %xmm4, %xmm4
+; AVX1-NEXT: vpand %xmm1, %xmm4, %xmm4
+; AVX1-NEXT: vpackuswb %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vpsubb %xmm4, %xmm2, %xmm2
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [1,1,0,0,1,0,0,1,1,1,0,0,0,0,0,1]
+; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm4[8],xmm0[8],xmm4[9],xmm0[9],xmm4[10],xmm0[10],xmm4[11],xmm0[11],xmm4[12],xmm0[12],xmm4[13],xmm0[13],xmm4[14],xmm0[14],xmm4[15],xmm0[15]
+; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm5 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; AVX1-NEXT: vpmullw %xmm4, %xmm5, %xmm4
+; AVX1-NEXT: vpand %xmm1, %xmm4, %xmm4
+; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm5 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX1-NEXT: vpmullw {{.*}}(%rip), %xmm5, %xmm5
+; AVX1-NEXT: vpand %xmm1, %xmm5, %xmm5
+; AVX1-NEXT: vpackuswb %xmm4, %xmm5, %xmm4
+; AVX1-NEXT: vpmovsxbw %xmm0, %xmm5
+; AVX1-NEXT: vpmullw {{.*}}(%rip), %xmm5, %xmm5
+; AVX1-NEXT: vpsrlw $8, %xmm5, %xmm5
+; AVX1-NEXT: vpshufd {{.*#+}} xmm6 = xmm0[2,3,0,1]
+; AVX1-NEXT: vpmovsxbw %xmm6, %xmm6
+; AVX1-NEXT: vpmullw {{.*}}(%rip), %xmm6, %xmm6
+; AVX1-NEXT: vpsrlw $8, %xmm6, %xmm6
+; AVX1-NEXT: vpackuswb %xmm6, %xmm5, %xmm5
+; AVX1-NEXT: vpaddb %xmm4, %xmm5, %xmm4
+; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm5 = xmm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; AVX1-NEXT: vpsraw $8, %xmm5, %xmm5
+; AVX1-NEXT: vpmullw {{.*}}(%rip), %xmm5, %xmm5
+; AVX1-NEXT: vpsrlw $8, %xmm5, %xmm5
+; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm6 = xmm4[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; AVX1-NEXT: vpsraw $8, %xmm6, %xmm6
+; AVX1-NEXT: vpmullw {{.*}}(%rip), %xmm6, %xmm6
+; AVX1-NEXT: vpsrlw $8, %xmm6, %xmm6
+; AVX1-NEXT: vpackuswb %xmm5, %xmm6, %xmm5
+; AVX1-NEXT: vpsrlw $7, %xmm4, %xmm4
+; AVX1-NEXT: vpand %xmm3, %xmm4, %xmm3
+; AVX1-NEXT: vpaddb %xmm3, %xmm5, %xmm3
+; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15]
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22]
+; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm5 = xmm5[8],xmm0[8],xmm5[9],xmm0[9],xmm5[10],xmm0[10],xmm5[11],xmm0[11],xmm5[12],xmm0[12],xmm5[13],xmm0[13],xmm5[14],xmm0[14],xmm5[15],xmm0[15]
+; AVX1-NEXT: vpmullw %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vpand %xmm1, %xmm4, %xmm4
+; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
+; AVX1-NEXT: vpmullw {{.*}}(%rip), %xmm3, %xmm3
+; AVX1-NEXT: vpand %xmm1, %xmm3, %xmm1
+; AVX1-NEXT: vpackuswb %xmm4, %xmm1, %xmm1
+; AVX1-NEXT: vpsubb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2NOBW-LABEL: test_remconstant_32i8:
+; AVX2NOBW: # %bb.0:
+; AVX2NOBW-NEXT: vmovdqa {{.*#+}} ymm2 = [1,1,0,0,1,0,0,1,1,1,0,0,0,0,0,1,1,0,0,0,0,0,1,1,1,0,0,1,0,0,1,1]
+; AVX2NOBW-NEXT: vpunpckhbw {{.*#+}} ymm1 = ymm2[8],ymm0[8],ymm2[9],ymm0[9],ymm2[10],ymm0[10],ymm2[11],ymm0[11],ymm2[12],ymm0[12],ymm2[13],ymm0[13],ymm2[14],ymm0[14],ymm2[15],ymm0[15],ymm2[24],ymm0[24],ymm2[25],ymm0[25],ymm2[26],ymm0[26],ymm2[27],ymm0[27],ymm2[28],ymm0[28],ymm2[29],ymm0[29],ymm2[30],ymm0[30],ymm2[31],ymm0[31]
+; AVX2NOBW-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
+; AVX2NOBW-NEXT: vpmullw %ymm1, %ymm3, %ymm3
+; AVX2NOBW-NEXT: vmovdqa {{.*#+}} ymm1 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
+; AVX2NOBW-NEXT: vpand %ymm1, %ymm3, %ymm3
+; AVX2NOBW-NEXT: vpunpcklbw {{.*#+}} ymm2 = ymm2[0],ymm0[0],ymm2[1],ymm0[1],ymm2[2],ymm0[2],ymm2[3],ymm0[3],ymm2[4],ymm0[4],ymm2[5],ymm0[5],ymm2[6],ymm0[6],ymm2[7],ymm0[7],ymm2[16],ymm0[16],ymm2[17],ymm0[17],ymm2[18],ymm0[18],ymm2[19],ymm0[19],ymm2[20],ymm0[20],ymm2[21],ymm0[21],ymm2[22],ymm0[22],ymm2[23],ymm0[23]
+; AVX2NOBW-NEXT: vpunpcklbw {{.*#+}} ymm4 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
+; AVX2NOBW-NEXT: vpmullw %ymm2, %ymm4, %ymm2
+; AVX2NOBW-NEXT: vpand %ymm1, %ymm2, %ymm2
+; AVX2NOBW-NEXT: vpackuswb %ymm3, %ymm2, %ymm2
+; AVX2NOBW-NEXT: vextracti128 $1, %ymm0, %xmm3
+; AVX2NOBW-NEXT: vpmovsxbw %xmm3, %ymm3
+; AVX2NOBW-NEXT: vpmullw {{.*}}(%rip), %ymm3, %ymm3
+; AVX2NOBW-NEXT: vpsrlw $8, %ymm3, %ymm3
+; AVX2NOBW-NEXT: vpmovsxbw %xmm0, %ymm4
+; AVX2NOBW-NEXT: vpmullw {{.*}}(%rip), %ymm4, %ymm4
+; AVX2NOBW-NEXT: vpsrlw $8, %ymm4, %ymm4
+; AVX2NOBW-NEXT: vpackuswb %ymm3, %ymm4, %ymm3
+; AVX2NOBW-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,2,1,3]
+; AVX2NOBW-NEXT: vpaddb %ymm2, %ymm3, %ymm2
+; AVX2NOBW-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
+; AVX2NOBW-NEXT: vpsraw $8, %ymm3, %ymm3
+; AVX2NOBW-NEXT: vpmullw {{.*}}(%rip), %ymm3, %ymm3
+; AVX2NOBW-NEXT: vpsrlw $8, %ymm3, %ymm3
+; AVX2NOBW-NEXT: vpunpcklbw {{.*#+}} ymm4 = ymm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
+; AVX2NOBW-NEXT: vpsraw $8, %ymm4, %ymm4
+; AVX2NOBW-NEXT: vpmullw {{.*}}(%rip), %ymm4, %ymm4
+; AVX2NOBW-NEXT: vpsrlw $8, %ymm4, %ymm4
+; AVX2NOBW-NEXT: vpackuswb %ymm3, %ymm4, %ymm3
+; AVX2NOBW-NEXT: vpsrlw $7, %ymm2, %ymm2
+; AVX2NOBW-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX2NOBW-NEXT: vpaddb %ymm2, %ymm3, %ymm2
+; AVX2NOBW-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm2[8],ymm0[8],ymm2[9],ymm0[9],ymm2[10],ymm0[10],ymm2[11],ymm0[11],ymm2[12],ymm0[12],ymm2[13],ymm0[13],ymm2[14],ymm0[14],ymm2[15],ymm0[15],ymm2[24],ymm0[24],ymm2[25],ymm0[25],ymm2[26],ymm0[26],ymm2[27],ymm0[27],ymm2[28],ymm0[28],ymm2[29],ymm0[29],ymm2[30],ymm0[30],ymm2[31],ymm0[31]
+; AVX2NOBW-NEXT: vmovdqa {{.*#+}} ymm4 = [7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,22,21,20,19,18,17,16,15,14,13,12,11,10,9,8,7]
+; AVX2NOBW-NEXT: vpunpckhbw {{.*#+}} ymm5 = ymm4[8],ymm0[8],ymm4[9],ymm0[9],ymm4[10],ymm0[10],ymm4[11],ymm0[11],ymm4[12],ymm0[12],ymm4[13],ymm0[13],ymm4[14],ymm0[14],ymm4[15],ymm0[15],ymm4[24],ymm0[24],ymm4[25],ymm0[25],ymm4[26],ymm0[26],ymm4[27],ymm0[27],ymm4[28],ymm0[28],ymm4[29],ymm0[29],ymm4[30],ymm0[30],ymm4[31],ymm0[31]
+; AVX2NOBW-NEXT: vpmullw %ymm5, %ymm3, %ymm3
+; AVX2NOBW-NEXT: vpand %ymm1, %ymm3, %ymm3
+; AVX2NOBW-NEXT: vpunpcklbw {{.*#+}} ymm2 = ymm2[0],ymm0[0],ymm2[1],ymm0[1],ymm2[2],ymm0[2],ymm2[3],ymm0[3],ymm2[4],ymm0[4],ymm2[5],ymm0[5],ymm2[6],ymm0[6],ymm2[7],ymm0[7],ymm2[16],ymm0[16],ymm2[17],ymm0[17],ymm2[18],ymm0[18],ymm2[19],ymm0[19],ymm2[20],ymm0[20],ymm2[21],ymm0[21],ymm2[22],ymm0[22],ymm2[23],ymm0[23]
+; AVX2NOBW-NEXT: vpunpcklbw {{.*#+}} ymm4 = ymm4[0],ymm0[0],ymm4[1],ymm0[1],ymm4[2],ymm0[2],ymm4[3],ymm0[3],ymm4[4],ymm0[4],ymm4[5],ymm0[5],ymm4[6],ymm0[6],ymm4[7],ymm0[7],ymm4[16],ymm0[16],ymm4[17],ymm0[17],ymm4[18],ymm0[18],ymm4[19],ymm0[19],ymm4[20],ymm0[20],ymm4[21],ymm0[21],ymm4[22],ymm0[22],ymm4[23],ymm0[23]
+; AVX2NOBW-NEXT: vpmullw %ymm4, %ymm2, %ymm2
+; AVX2NOBW-NEXT: vpand %ymm1, %ymm2, %ymm1
+; AVX2NOBW-NEXT: vpackuswb %ymm3, %ymm1, %ymm1
+; AVX2NOBW-NEXT: vpsubb %ymm1, %ymm0, %ymm0
+; AVX2NOBW-NEXT: retq
+;
+; AVX512BW-LABEL: test_remconstant_32i8:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: vpmovzxbw {{.*#+}} zmm1 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
+; AVX512BW-NEXT: vpmullw {{.*}}(%rip), %zmm1, %zmm1
+; AVX512BW-NEXT: vpmovwb %zmm1, %ymm1
+; AVX512BW-NEXT: vpmovsxbw %ymm0, %zmm2
+; AVX512BW-NEXT: vpmullw {{.*}}(%rip), %zmm2, %zmm2
+; AVX512BW-NEXT: vpsrlw $8, %zmm2, %zmm2
+; AVX512BW-NEXT: vpmovwb %zmm2, %ymm2
+; AVX512BW-NEXT: vpaddb %ymm1, %ymm2, %ymm1
+; AVX512BW-NEXT: vpsrlw $7, %ymm1, %ymm2
+; AVX512BW-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX512BW-NEXT: vpmovsxbw %ymm1, %zmm1
+; AVX512BW-NEXT: vpsravw {{.*}}(%rip), %zmm1, %zmm1
+; AVX512BW-NEXT: vpmovwb %zmm1, %ymm1
+; AVX512BW-NEXT: vpaddb %ymm2, %ymm1, %ymm1
+; AVX512BW-NEXT: vpmovzxbw {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
+; AVX512BW-NEXT: vpmullw {{.*}}(%rip), %zmm1, %zmm1
+; AVX512BW-NEXT: vpmovwb %zmm1, %ymm1
+; AVX512BW-NEXT: vpsubb %ymm1, %ymm0, %ymm0
+; AVX512BW-NEXT: retq
+ %res = srem <32 x i8> %a, <i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 22, i8 21, i8 20, i8 19, i8 18, i8 17, i8 16, i8 15, i8 14, i8 13, i8 12, i8 11, i8 10, i8 9, i8 8, i8 7>
+ ret <32 x i8> %res
+}
OpenPOWER on IntegriCloud