diff options
Diffstat (limited to 'llvm/test/CodeGen/X86/combine-urem.ll')
-rw-r--r-- | llvm/test/CodeGen/X86/combine-urem.ll | 42 |
1 files changed, 21 insertions, 21 deletions
diff --git a/llvm/test/CodeGen/X86/combine-urem.ll b/llvm/test/CodeGen/X86/combine-urem.ll index 4c7716bbaeb..2530136c054 100644 --- a/llvm/test/CodeGen/X86/combine-urem.ll +++ b/llvm/test/CodeGen/X86/combine-urem.ll @@ -6,11 +6,11 @@ ; fold (urem undef, x) -> 0 define <4 x i32> @combine_vec_urem_undef0(<4 x i32> %x) { ; SSE-LABEL: combine_vec_urem_undef0: -; SSE: # BB#0: +; SSE: # %bb.0: ; SSE-NEXT: retq ; ; AVX-LABEL: combine_vec_urem_undef0: -; AVX: # BB#0: +; AVX: # %bb.0: ; AVX-NEXT: retq %1 = urem <4 x i32> undef, %x ret <4 x i32> %1 @@ -19,11 +19,11 @@ define <4 x i32> @combine_vec_urem_undef0(<4 x i32> %x) { ; fold (urem x, undef) -> undef define <4 x i32> @combine_vec_urem_undef1(<4 x i32> %x) { ; SSE-LABEL: combine_vec_urem_undef1: -; SSE: # BB#0: +; SSE: # %bb.0: ; SSE-NEXT: retq ; ; AVX-LABEL: combine_vec_urem_undef1: -; AVX: # BB#0: +; AVX: # %bb.0: ; AVX-NEXT: retq %1 = urem <4 x i32> %x, undef ret <4 x i32> %1 @@ -32,17 +32,17 @@ define <4 x i32> @combine_vec_urem_undef1(<4 x i32> %x) { ; fold (urem x, pow2) -> (and x, (pow2-1)) define <4 x i32> @combine_vec_urem_by_pow2a(<4 x i32> %x) { ; SSE-LABEL: combine_vec_urem_by_pow2a: -; SSE: # BB#0: +; SSE: # %bb.0: ; SSE-NEXT: andps {{.*}}(%rip), %xmm0 ; SSE-NEXT: retq ; ; AVX1-LABEL: combine_vec_urem_by_pow2a: -; AVX1: # BB#0: +; AVX1: # %bb.0: ; AVX1-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: combine_vec_urem_by_pow2a: -; AVX2: # BB#0: +; AVX2: # %bb.0: ; AVX2-NEXT: vbroadcastss {{.*#+}} xmm1 = [3,3,3,3] ; AVX2-NEXT: vandps %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: retq @@ -52,12 +52,12 @@ define <4 x i32> @combine_vec_urem_by_pow2a(<4 x i32> %x) { define <4 x i32> @combine_vec_urem_by_pow2b(<4 x i32> %x) { ; SSE-LABEL: combine_vec_urem_by_pow2b: -; SSE: # BB#0: +; SSE: # %bb.0: ; SSE-NEXT: andps {{.*}}(%rip), %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: combine_vec_urem_by_pow2b: -; AVX: # BB#0: +; AVX: # %bb.0: ; AVX-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0 ; AVX-NEXT: retq %1 = urem <4 x i32> %x, <i32 1, i32 4, i32 8, i32 16> @@ -66,7 +66,7 @@ define <4 x i32> @combine_vec_urem_by_pow2b(<4 x i32> %x) { define <4 x i32> @combine_vec_urem_by_pow2c(<4 x i32> %x, <4 x i32> %y) { ; SSE-LABEL: combine_vec_urem_by_pow2c: -; SSE: # BB#0: +; SSE: # %bb.0: ; SSE-NEXT: pslld $23, %xmm1 ; SSE-NEXT: paddd {{.*}}(%rip), %xmm1 ; SSE-NEXT: cvttps2dq %xmm1, %xmm1 @@ -76,7 +76,7 @@ define <4 x i32> @combine_vec_urem_by_pow2c(<4 x i32> %x, <4 x i32> %y) { ; SSE-NEXT: retq ; ; AVX1-LABEL: combine_vec_urem_by_pow2c: -; AVX1: # BB#0: +; AVX1: # %bb.0: ; AVX1-NEXT: vpslld $23, %xmm1, %xmm1 ; AVX1-NEXT: vpaddd {{.*}}(%rip), %xmm1, %xmm1 ; AVX1-NEXT: vcvttps2dq %xmm1, %xmm1 @@ -86,7 +86,7 @@ define <4 x i32> @combine_vec_urem_by_pow2c(<4 x i32> %x, <4 x i32> %y) { ; AVX1-NEXT: retq ; ; AVX2-LABEL: combine_vec_urem_by_pow2c: -; AVX2: # BB#0: +; AVX2: # %bb.0: ; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [1,1,1,1] ; AVX2-NEXT: vpsllvd %xmm1, %xmm2, %xmm1 ; AVX2-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 @@ -100,7 +100,7 @@ define <4 x i32> @combine_vec_urem_by_pow2c(<4 x i32> %x, <4 x i32> %y) { define <4 x i32> @combine_vec_urem_by_pow2d(<4 x i32> %x, <4 x i32> %y) { ; SSE-LABEL: combine_vec_urem_by_pow2d: -; SSE: # BB#0: +; SSE: # %bb.0: ; SSE-NEXT: movdqa %xmm1, %xmm2 ; SSE-NEXT: psrldq {{.*#+}} xmm2 = xmm2[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero ; SSE-NEXT: movdqa {{.*#+}} xmm3 = [2147483648,2147483648,2147483648,2147483648] @@ -125,7 +125,7 @@ define <4 x i32> @combine_vec_urem_by_pow2d(<4 x i32> %x, <4 x i32> %y) { ; SSE-NEXT: retq ; ; AVX1-LABEL: combine_vec_urem_by_pow2d: -; AVX1: # BB#0: +; AVX1: # %bb.0: ; AVX1-NEXT: vpsrldq {{.*#+}} xmm2 = xmm1[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero ; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [2147483648,2147483648,2147483648,2147483648] ; AVX1-NEXT: vpsrld %xmm2, %xmm3, %xmm2 @@ -145,7 +145,7 @@ define <4 x i32> @combine_vec_urem_by_pow2d(<4 x i32> %x, <4 x i32> %y) { ; AVX1-NEXT: retq ; ; AVX2-LABEL: combine_vec_urem_by_pow2d: -; AVX2: # BB#0: +; AVX2: # %bb.0: ; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648] ; AVX2-NEXT: vpsrlvd %xmm1, %xmm2, %xmm1 ; AVX2-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 @@ -160,7 +160,7 @@ define <4 x i32> @combine_vec_urem_by_pow2d(<4 x i32> %x, <4 x i32> %y) { ; fold (urem x, (shl pow2, y)) -> (and x, (add (shl pow2, y), -1)) define <4 x i32> @combine_vec_urem_by_shl_pow2a(<4 x i32> %x, <4 x i32> %y) { ; SSE-LABEL: combine_vec_urem_by_shl_pow2a: -; SSE: # BB#0: +; SSE: # %bb.0: ; SSE-NEXT: pslld $23, %xmm1 ; SSE-NEXT: paddd {{.*}}(%rip), %xmm1 ; SSE-NEXT: cvttps2dq %xmm1, %xmm1 @@ -171,7 +171,7 @@ define <4 x i32> @combine_vec_urem_by_shl_pow2a(<4 x i32> %x, <4 x i32> %y) { ; SSE-NEXT: retq ; ; AVX1-LABEL: combine_vec_urem_by_shl_pow2a: -; AVX1: # BB#0: +; AVX1: # %bb.0: ; AVX1-NEXT: vpslld $23, %xmm1, %xmm1 ; AVX1-NEXT: vpaddd {{.*}}(%rip), %xmm1, %xmm1 ; AVX1-NEXT: vcvttps2dq %xmm1, %xmm1 @@ -182,7 +182,7 @@ define <4 x i32> @combine_vec_urem_by_shl_pow2a(<4 x i32> %x, <4 x i32> %y) { ; AVX1-NEXT: retq ; ; AVX2-LABEL: combine_vec_urem_by_shl_pow2a: -; AVX2: # BB#0: +; AVX2: # %bb.0: ; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [4,4,4,4] ; AVX2-NEXT: vpsllvd %xmm1, %xmm2, %xmm1 ; AVX2-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 @@ -196,7 +196,7 @@ define <4 x i32> @combine_vec_urem_by_shl_pow2a(<4 x i32> %x, <4 x i32> %y) { define <4 x i32> @combine_vec_urem_by_shl_pow2b(<4 x i32> %x, <4 x i32> %y) { ; SSE-LABEL: combine_vec_urem_by_shl_pow2b: -; SSE: # BB#0: +; SSE: # %bb.0: ; SSE-NEXT: pslld $23, %xmm1 ; SSE-NEXT: paddd {{.*}}(%rip), %xmm1 ; SSE-NEXT: cvttps2dq %xmm1, %xmm1 @@ -207,7 +207,7 @@ define <4 x i32> @combine_vec_urem_by_shl_pow2b(<4 x i32> %x, <4 x i32> %y) { ; SSE-NEXT: retq ; ; AVX1-LABEL: combine_vec_urem_by_shl_pow2b: -; AVX1: # BB#0: +; AVX1: # %bb.0: ; AVX1-NEXT: vpslld $23, %xmm1, %xmm1 ; AVX1-NEXT: vpaddd {{.*}}(%rip), %xmm1, %xmm1 ; AVX1-NEXT: vcvttps2dq %xmm1, %xmm1 @@ -218,7 +218,7 @@ define <4 x i32> @combine_vec_urem_by_shl_pow2b(<4 x i32> %x, <4 x i32> %y) { ; AVX1-NEXT: retq ; ; AVX2-LABEL: combine_vec_urem_by_shl_pow2b: -; AVX2: # BB#0: +; AVX2: # %bb.0: ; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [1,4,8,16] ; AVX2-NEXT: vpsllvd %xmm1, %xmm2, %xmm1 ; AVX2-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 |