diff options
| -rw-r--r-- | llvm/test/CodeGen/X86/combine-sdiv.ll | 67 | ||||
| -rw-r--r-- | llvm/test/CodeGen/X86/combine-srem.ll | 68 | ||||
| -rw-r--r-- | llvm/test/CodeGen/X86/combine-udiv.ll | 67 | ||||
| -rw-r--r-- | llvm/test/CodeGen/X86/combine-urem.ll | 68 |
4 files changed, 270 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/X86/combine-sdiv.ll b/llvm/test/CodeGen/X86/combine-sdiv.ll index 7f0573c6175..e4e3dcf0406 100644 --- a/llvm/test/CodeGen/X86/combine-sdiv.ll +++ b/llvm/test/CodeGen/X86/combine-sdiv.ll @@ -106,6 +106,73 @@ define <4 x i32> @combine_vec_sdiv_by_minsigned(<4 x i32> %x) { ret <4 x i32> %1 } +; TODO fold (sdiv 0, x) -> 0 +define i32 @combine_sdiv_zero(i32 %x) { +; CHECK-LABEL: combine_sdiv_zero: +; CHECK: # %bb.0: +; CHECK-NEXT: xorl %eax, %eax +; CHECK-NEXT: xorl %edx, %edx +; CHECK-NEXT: idivl %edi +; CHECK-NEXT: retq + %1 = sdiv i32 0, %x + ret i32 %1 +} + +define <4 x i32> @combine_vec_sdiv_zero(<4 x i32> %x) { +; SSE-LABEL: combine_vec_sdiv_zero: +; SSE: # %bb.0: +; SSE-NEXT: pextrd $1, %xmm0, %ecx +; SSE-NEXT: xorl %eax, %eax +; SSE-NEXT: xorl %edx, %edx +; SSE-NEXT: idivl %ecx +; SSE-NEXT: movl %eax, %ecx +; SSE-NEXT: movd %xmm0, %esi +; SSE-NEXT: xorl %eax, %eax +; SSE-NEXT: xorl %edx, %edx +; SSE-NEXT: idivl %esi +; SSE-NEXT: movd %eax, %xmm1 +; SSE-NEXT: pinsrd $1, %ecx, %xmm1 +; SSE-NEXT: pextrd $2, %xmm0, %ecx +; SSE-NEXT: xorl %eax, %eax +; SSE-NEXT: xorl %edx, %edx +; SSE-NEXT: idivl %ecx +; SSE-NEXT: pinsrd $2, %eax, %xmm1 +; SSE-NEXT: pextrd $3, %xmm0, %ecx +; SSE-NEXT: xorl %eax, %eax +; SSE-NEXT: xorl %edx, %edx +; SSE-NEXT: idivl %ecx +; SSE-NEXT: pinsrd $3, %eax, %xmm1 +; SSE-NEXT: movdqa %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: combine_vec_sdiv_zero: +; AVX: # %bb.0: +; AVX-NEXT: vpextrd $1, %xmm0, %ecx +; AVX-NEXT: xorl %eax, %eax +; AVX-NEXT: xorl %edx, %edx +; AVX-NEXT: idivl %ecx +; AVX-NEXT: movl %eax, %ecx +; AVX-NEXT: vmovd %xmm0, %esi +; AVX-NEXT: xorl %eax, %eax +; AVX-NEXT: xorl %edx, %edx +; AVX-NEXT: idivl %esi +; AVX-NEXT: vmovd %eax, %xmm1 +; AVX-NEXT: vpinsrd $1, %ecx, %xmm1, %xmm1 +; AVX-NEXT: vpextrd $2, %xmm0, %ecx +; AVX-NEXT: xorl %eax, %eax +; AVX-NEXT: xorl %edx, %edx +; AVX-NEXT: idivl %ecx +; AVX-NEXT: vpinsrd $2, %eax, %xmm1, %xmm1 +; AVX-NEXT: vpextrd $3, %xmm0, %ecx +; AVX-NEXT: xorl %eax, %eax +; AVX-NEXT: xorl %edx, %edx +; AVX-NEXT: idivl %ecx +; AVX-NEXT: vpinsrd $3, %eax, %xmm1, %xmm0 +; AVX-NEXT: retq + %1 = sdiv <4 x i32> zeroinitializer, %x + ret <4 x i32> %1 +} + ; TODO fold (sdiv x, x) -> 1 define i32 @combine_sdiv_dupe(i32 %x) { ; CHECK-LABEL: combine_sdiv_dupe: diff --git a/llvm/test/CodeGen/X86/combine-srem.ll b/llvm/test/CodeGen/X86/combine-srem.ll index 0fe1aa2c88b..663d247c10a 100644 --- a/llvm/test/CodeGen/X86/combine-srem.ll +++ b/llvm/test/CodeGen/X86/combine-srem.ll @@ -100,6 +100,74 @@ define <4 x i32> @combine_vec_srem_by_minsigned(<4 x i32> %x) { ret <4 x i32> %1 } +; TODO fold (srem 0, x) -> 0 +define i32 @combine_srem_zero(i32 %x) { +; CHECK-LABEL: combine_srem_zero: +; CHECK: # %bb.0: +; CHECK-NEXT: xorl %eax, %eax +; CHECK-NEXT: xorl %edx, %edx +; CHECK-NEXT: idivl %edi +; CHECK-NEXT: movl %edx, %eax +; CHECK-NEXT: retq + %1 = srem i32 0, %x + ret i32 %1 +} + +define <4 x i32> @combine_vec_srem_zero(<4 x i32> %x) { +; SSE-LABEL: combine_vec_srem_zero: +; SSE: # %bb.0: +; SSE-NEXT: pextrd $1, %xmm0, %ecx +; SSE-NEXT: xorl %eax, %eax +; SSE-NEXT: xorl %edx, %edx +; SSE-NEXT: idivl %ecx +; SSE-NEXT: movl %edx, %ecx +; SSE-NEXT: movd %xmm0, %esi +; SSE-NEXT: xorl %eax, %eax +; SSE-NEXT: xorl %edx, %edx +; SSE-NEXT: idivl %esi +; SSE-NEXT: movd %edx, %xmm1 +; SSE-NEXT: pinsrd $1, %ecx, %xmm1 +; SSE-NEXT: pextrd $2, %xmm0, %ecx +; SSE-NEXT: xorl %eax, %eax +; SSE-NEXT: xorl %edx, %edx +; SSE-NEXT: idivl %ecx +; SSE-NEXT: pinsrd $2, %edx, %xmm1 +; SSE-NEXT: pextrd $3, %xmm0, %ecx +; SSE-NEXT: xorl %eax, %eax +; SSE-NEXT: xorl %edx, %edx +; SSE-NEXT: idivl %ecx +; SSE-NEXT: pinsrd $3, %edx, %xmm1 +; SSE-NEXT: movdqa %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: combine_vec_srem_zero: +; AVX: # %bb.0: +; AVX-NEXT: vpextrd $1, %xmm0, %ecx +; AVX-NEXT: xorl %eax, %eax +; AVX-NEXT: xorl %edx, %edx +; AVX-NEXT: idivl %ecx +; AVX-NEXT: movl %edx, %ecx +; AVX-NEXT: vmovd %xmm0, %esi +; AVX-NEXT: xorl %eax, %eax +; AVX-NEXT: xorl %edx, %edx +; AVX-NEXT: idivl %esi +; AVX-NEXT: vmovd %edx, %xmm1 +; AVX-NEXT: vpinsrd $1, %ecx, %xmm1, %xmm1 +; AVX-NEXT: vpextrd $2, %xmm0, %ecx +; AVX-NEXT: xorl %eax, %eax +; AVX-NEXT: xorl %edx, %edx +; AVX-NEXT: idivl %ecx +; AVX-NEXT: vpinsrd $2, %edx, %xmm1, %xmm1 +; AVX-NEXT: vpextrd $3, %xmm0, %ecx +; AVX-NEXT: xorl %eax, %eax +; AVX-NEXT: xorl %edx, %edx +; AVX-NEXT: idivl %ecx +; AVX-NEXT: vpinsrd $3, %edx, %xmm1, %xmm0 +; AVX-NEXT: retq + %1 = srem <4 x i32> zeroinitializer, %x + ret <4 x i32> %1 +} + ; TODO fold (srem x, x) -> 0 define i32 @combine_srem_dupe(i32 %x) { ; CHECK-LABEL: combine_srem_dupe: diff --git a/llvm/test/CodeGen/X86/combine-udiv.ll b/llvm/test/CodeGen/X86/combine-udiv.ll index 0b054c3bea5..17dd7a07870 100644 --- a/llvm/test/CodeGen/X86/combine-udiv.ll +++ b/llvm/test/CodeGen/X86/combine-udiv.ll @@ -76,6 +76,73 @@ define <4 x i32> @combine_vec_udiv_by_minsigned(<4 x i32> %x) { ret <4 x i32> %1 } +; TODO fold (udiv 0, x) -> 0 +define i32 @combine_udiv_zero(i32 %x) { +; CHECK-LABEL: combine_udiv_zero: +; CHECK: # %bb.0: +; CHECK-NEXT: xorl %eax, %eax +; CHECK-NEXT: xorl %edx, %edx +; CHECK-NEXT: divl %edi +; CHECK-NEXT: retq + %1 = udiv i32 0, %x + ret i32 %1 +} + +define <4 x i32> @combine_vec_udiv_zero(<4 x i32> %x) { +; SSE-LABEL: combine_vec_udiv_zero: +; SSE: # %bb.0: +; SSE-NEXT: pextrd $1, %xmm0, %ecx +; SSE-NEXT: xorl %eax, %eax +; SSE-NEXT: xorl %edx, %edx +; SSE-NEXT: divl %ecx +; SSE-NEXT: movl %eax, %ecx +; SSE-NEXT: movd %xmm0, %esi +; SSE-NEXT: xorl %eax, %eax +; SSE-NEXT: xorl %edx, %edx +; SSE-NEXT: divl %esi +; SSE-NEXT: movd %eax, %xmm1 +; SSE-NEXT: pinsrd $1, %ecx, %xmm1 +; SSE-NEXT: pextrd $2, %xmm0, %ecx +; SSE-NEXT: xorl %eax, %eax +; SSE-NEXT: xorl %edx, %edx +; SSE-NEXT: divl %ecx +; SSE-NEXT: pinsrd $2, %eax, %xmm1 +; SSE-NEXT: pextrd $3, %xmm0, %ecx +; SSE-NEXT: xorl %eax, %eax +; SSE-NEXT: xorl %edx, %edx +; SSE-NEXT: divl %ecx +; SSE-NEXT: pinsrd $3, %eax, %xmm1 +; SSE-NEXT: movdqa %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: combine_vec_udiv_zero: +; AVX: # %bb.0: +; AVX-NEXT: vpextrd $1, %xmm0, %ecx +; AVX-NEXT: xorl %eax, %eax +; AVX-NEXT: xorl %edx, %edx +; AVX-NEXT: divl %ecx +; AVX-NEXT: movl %eax, %ecx +; AVX-NEXT: vmovd %xmm0, %esi +; AVX-NEXT: xorl %eax, %eax +; AVX-NEXT: xorl %edx, %edx +; AVX-NEXT: divl %esi +; AVX-NEXT: vmovd %eax, %xmm1 +; AVX-NEXT: vpinsrd $1, %ecx, %xmm1, %xmm1 +; AVX-NEXT: vpextrd $2, %xmm0, %ecx +; AVX-NEXT: xorl %eax, %eax +; AVX-NEXT: xorl %edx, %edx +; AVX-NEXT: divl %ecx +; AVX-NEXT: vpinsrd $2, %eax, %xmm1, %xmm1 +; AVX-NEXT: vpextrd $3, %xmm0, %ecx +; AVX-NEXT: xorl %eax, %eax +; AVX-NEXT: xorl %edx, %edx +; AVX-NEXT: divl %ecx +; AVX-NEXT: vpinsrd $3, %eax, %xmm1, %xmm0 +; AVX-NEXT: retq + %1 = udiv <4 x i32> zeroinitializer, %x + ret <4 x i32> %1 +} + ; TODO fold (udiv x, x) -> 1 define i32 @combine_udiv_dupe(i32 %x) { ; CHECK-LABEL: combine_udiv_dupe: diff --git a/llvm/test/CodeGen/X86/combine-urem.ll b/llvm/test/CodeGen/X86/combine-urem.ll index 2c625f4bf68..203a7b97add 100644 --- a/llvm/test/CodeGen/X86/combine-urem.ll +++ b/llvm/test/CodeGen/X86/combine-urem.ll @@ -89,6 +89,74 @@ define <4 x i32> @combine_vec_urem_by_minsigned(<4 x i32> %x) { ret <4 x i32> %1 } +; TODO fold (urem 0, x) -> 0 +define i32 @combine_urem_zero(i32 %x) { +; CHECK-LABEL: combine_urem_zero: +; CHECK: # %bb.0: +; CHECK-NEXT: xorl %eax, %eax +; CHECK-NEXT: xorl %edx, %edx +; CHECK-NEXT: divl %edi +; CHECK-NEXT: movl %edx, %eax +; CHECK-NEXT: retq + %1 = urem i32 0, %x + ret i32 %1 +} + +define <4 x i32> @combine_vec_urem_zero(<4 x i32> %x) { +; SSE-LABEL: combine_vec_urem_zero: +; SSE: # %bb.0: +; SSE-NEXT: pextrd $1, %xmm0, %ecx +; SSE-NEXT: xorl %eax, %eax +; SSE-NEXT: xorl %edx, %edx +; SSE-NEXT: divl %ecx +; SSE-NEXT: movl %edx, %ecx +; SSE-NEXT: movd %xmm0, %esi +; SSE-NEXT: xorl %eax, %eax +; SSE-NEXT: xorl %edx, %edx +; SSE-NEXT: divl %esi +; SSE-NEXT: movd %edx, %xmm1 +; SSE-NEXT: pinsrd $1, %ecx, %xmm1 +; SSE-NEXT: pextrd $2, %xmm0, %ecx +; SSE-NEXT: xorl %eax, %eax +; SSE-NEXT: xorl %edx, %edx +; SSE-NEXT: divl %ecx +; SSE-NEXT: pinsrd $2, %edx, %xmm1 +; SSE-NEXT: pextrd $3, %xmm0, %ecx +; SSE-NEXT: xorl %eax, %eax +; SSE-NEXT: xorl %edx, %edx +; SSE-NEXT: divl %ecx +; SSE-NEXT: pinsrd $3, %edx, %xmm1 +; SSE-NEXT: movdqa %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: combine_vec_urem_zero: +; AVX: # %bb.0: +; AVX-NEXT: vpextrd $1, %xmm0, %ecx +; AVX-NEXT: xorl %eax, %eax +; AVX-NEXT: xorl %edx, %edx +; AVX-NEXT: divl %ecx +; AVX-NEXT: movl %edx, %ecx +; AVX-NEXT: vmovd %xmm0, %esi +; AVX-NEXT: xorl %eax, %eax +; AVX-NEXT: xorl %edx, %edx +; AVX-NEXT: divl %esi +; AVX-NEXT: vmovd %edx, %xmm1 +; AVX-NEXT: vpinsrd $1, %ecx, %xmm1, %xmm1 +; AVX-NEXT: vpextrd $2, %xmm0, %ecx +; AVX-NEXT: xorl %eax, %eax +; AVX-NEXT: xorl %edx, %edx +; AVX-NEXT: divl %ecx +; AVX-NEXT: vpinsrd $2, %edx, %xmm1, %xmm1 +; AVX-NEXT: vpextrd $3, %xmm0, %ecx +; AVX-NEXT: xorl %eax, %eax +; AVX-NEXT: xorl %edx, %edx +; AVX-NEXT: divl %ecx +; AVX-NEXT: vpinsrd $3, %edx, %xmm1, %xmm0 +; AVX-NEXT: retq + %1 = urem <4 x i32> zeroinitializer, %x + ret <4 x i32> %1 +} + ; TODO fold (urem x, x) -> 0 define i32 @combine_urem_dupe(i32 %x) { ; CHECK-LABEL: combine_urem_dupe: |

