diff options
| author | Simon Pilgrim <llvm-dev@redking.me.uk> | 2019-01-14 15:08:51 +0000 |
|---|---|---|
| committer | Simon Pilgrim <llvm-dev@redking.me.uk> | 2019-01-14 15:08:51 +0000 |
| commit | 8c2e9e1fef3b9f7cf82e1463ae46a460c2bb73f5 (patch) | |
| tree | f9b7e90274eeb69c70e7638ba5d1fb92bc9b1ed3 /llvm/test/CodeGen/X86 | |
| parent | 7fc6882374f805ed780f07001e317c8b0dc7711a (diff) | |
| download | bcm5719-llvm-8c2e9e1fef3b9f7cf82e1463ae46a460c2bb73f5.tar.gz bcm5719-llvm-8c2e9e1fef3b9f7cf82e1463ae46a460c2bb73f5.zip | |
[X86] Add sub saturation constant folding and self tests.
llvm-svn: 351071
Diffstat (limited to 'llvm/test/CodeGen/X86')
| -rw-r--r-- | llvm/test/CodeGen/X86/combine-sub-ssat.ll | 81 | ||||
| -rw-r--r-- | llvm/test/CodeGen/X86/combine-sub-usat.ll | 73 |
2 files changed, 152 insertions, 2 deletions
diff --git a/llvm/test/CodeGen/X86/combine-sub-ssat.ll b/llvm/test/CodeGen/X86/combine-sub-ssat.ll index eb146df5eb7..8e8b8091906 100644 --- a/llvm/test/CodeGen/X86/combine-sub-ssat.ll +++ b/llvm/test/CodeGen/X86/combine-sub-ssat.ll @@ -35,7 +35,56 @@ define <8 x i16> @combine_undef_v8i16(<8 x i16> %a0) { ret <8 x i16> %res } -; fold (ssub_sat c, 0) -> x +; fold (ssub_sat c1, c2) -> c3 +define i32 @combine_constfold_i32() { +; CHECK-LABEL: combine_constfold_i32: +; CHECK: # %bb.0: +; CHECK-NEXT: movl $100, %eax +; CHECK-NEXT: xorl %ecx, %ecx +; CHECK-NEXT: movl $100, %edx +; CHECK-NEXT: subl $2147483647, %edx # imm = 0x7FFFFFFF +; CHECK-NEXT: setns %cl +; CHECK-NEXT: addl $2147483647, %ecx # imm = 0x7FFFFFFF +; CHECK-NEXT: subl $2147483647, %eax # imm = 0x7FFFFFFF +; CHECK-NEXT: cmovol %ecx, %eax +; CHECK-NEXT: retq + %res = call i32 @llvm.ssub.sat.i32(i32 100, i32 2147483647) + ret i32 %res +} + +define <8 x i16> @combine_constfold_v8i16() { +; SSE-LABEL: combine_constfold_v8i16: +; SSE: # %bb.0: +; SSE-NEXT: movdqa {{.*#+}} xmm0 = [0,1,255,65535,65535,65281,32776,1] +; SSE-NEXT: psubsw {{.*}}(%rip), %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: combine_constfold_v8i16: +; AVX: # %bb.0: +; AVX-NEXT: vmovdqa {{.*#+}} xmm0 = [0,1,255,65535,65535,65281,32776,1] +; AVX-NEXT: vpsubsw {{.*}}(%rip), %xmm0, %xmm0 +; AVX-NEXT: retq + %res = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> <i16 0, i16 1, i16 255, i16 65535, i16 -1, i16 -255, i16 -32760, i16 1>, <8 x i16> <i16 1, i16 65535, i16 1, i16 65535, i16 1, i16 65535, i16 -10, i16 65535>) + ret <8 x i16> %res +} + +define <8 x i16> @combine_constfold_undef_v8i16() { +; SSE-LABEL: combine_constfold_undef_v8i16: +; SSE: # %bb.0: +; SSE-NEXT: movdqa {{.*#+}} xmm0 = <u,1,u,65535,65535,65281,32776,1> +; SSE-NEXT: psubsw {{.*}}(%rip), %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: combine_constfold_undef_v8i16: +; AVX: # %bb.0: +; AVX-NEXT: vmovdqa {{.*#+}} xmm0 = <u,1,u,65535,65535,65281,32776,1> +; AVX-NEXT: vpsubsw {{.*}}(%rip), %xmm0, %xmm0 +; AVX-NEXT: retq + %res = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> <i16 undef, i16 1, i16 undef, i16 65535, i16 -1, i16 -255, i16 -32760, i16 1>, <8 x i16> <i16 1, i16 undef, i16 undef, i16 65535, i16 1, i16 65535, i16 -10, i16 65535>) + ret <8 x i16> %res +} + +; fold (ssub_sat x, 0) -> x define i32 @combine_zero_i32(i32 %a0) { ; CHECK-LABEL: combine_zero_i32: ; CHECK: # %bb.0: @@ -52,3 +101,33 @@ define <8 x i16> @combine_zero_v8i16(<8 x i16> %a0) { %1 = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> %a0, <8 x i16> zeroinitializer) ret <8 x i16> %1 } + +; fold (ssub_sat x, x) -> 0 +define i32 @combine_self_i32(i32 %a0) { +; CHECK-LABEL: combine_self_i32: +; CHECK: # %bb.0: +; CHECK-NEXT: xorl %eax, %eax +; CHECK-NEXT: movl %edi, %ecx +; CHECK-NEXT: subl %edi, %ecx +; CHECK-NEXT: setns %al +; CHECK-NEXT: addl $2147483647, %eax # imm = 0x7FFFFFFF +; CHECK-NEXT: subl %edi, %edi +; CHECK-NEXT: cmovnol %edi, %eax +; CHECK-NEXT: retq + %1 = call i32 @llvm.ssub.sat.i32(i32 %a0, i32 %a0) + ret i32 %1 +} + +define <8 x i16> @combine_self_v8i16(<8 x i16> %a0) { +; SSE-LABEL: combine_self_v8i16: +; SSE: # %bb.0: +; SSE-NEXT: psubsw %xmm0, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: combine_self_v8i16: +; AVX: # %bb.0: +; AVX-NEXT: vpsubsw %xmm0, %xmm0, %xmm0 +; AVX-NEXT: retq + %1 = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> %a0, <8 x i16> %a0) + ret <8 x i16> %1 +} diff --git a/llvm/test/CodeGen/X86/combine-sub-usat.ll b/llvm/test/CodeGen/X86/combine-sub-usat.ll index 03c6e8840a8..2de91e5ee4e 100644 --- a/llvm/test/CodeGen/X86/combine-sub-usat.ll +++ b/llvm/test/CodeGen/X86/combine-sub-usat.ll @@ -35,7 +35,52 @@ define <8 x i16> @combine_undef_v8i16(<8 x i16> %a0) { ret <8 x i16> %res } -; fold (usub_sat c, 0) -> x +; fold (usub_sat c1, c2) -> c3 +define i32 @combine_constfold_i32() { +; CHECK-LABEL: combine_constfold_i32: +; CHECK: # %bb.0: +; CHECK-NEXT: xorl %ecx, %ecx +; CHECK-NEXT: movl $100, %eax +; CHECK-NEXT: subl $-1, %eax +; CHECK-NEXT: cmovbl %ecx, %eax +; CHECK-NEXT: retq + %res = call i32 @llvm.usub.sat.i32(i32 100, i32 4294967295) + ret i32 %res +} + +define <8 x i16> @combine_constfold_v8i16() { +; SSE-LABEL: combine_constfold_v8i16: +; SSE: # %bb.0: +; SSE-NEXT: movdqa {{.*#+}} xmm0 = [0,1,255,65535,65535,65281,1,1] +; SSE-NEXT: psubusw {{.*}}(%rip), %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: combine_constfold_v8i16: +; AVX: # %bb.0: +; AVX-NEXT: vmovdqa {{.*#+}} xmm0 = [0,1,255,65535,65535,65281,1,1] +; AVX-NEXT: vpsubusw {{.*}}(%rip), %xmm0, %xmm0 +; AVX-NEXT: retq + %res = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> <i16 0, i16 1, i16 255, i16 65535, i16 -1, i16 -255, i16 -65535, i16 1>, <8 x i16> <i16 1, i16 65535, i16 1, i16 65535, i16 1, i16 65535, i16 1, i16 65535>) + ret <8 x i16> %res +} + +define <8 x i16> @combine_constfold_undef_v8i16() { +; SSE-LABEL: combine_constfold_undef_v8i16: +; SSE: # %bb.0: +; SSE-NEXT: movdqa {{.*#+}} xmm0 = <u,1,u,65535,65535,65281,1,1> +; SSE-NEXT: psubusw {{.*}}(%rip), %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: combine_constfold_undef_v8i16: +; AVX: # %bb.0: +; AVX-NEXT: vmovdqa {{.*#+}} xmm0 = <u,1,u,65535,65535,65281,1,1> +; AVX-NEXT: vpsubusw {{.*}}(%rip), %xmm0, %xmm0 +; AVX-NEXT: retq + %res = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> <i16 undef, i16 1, i16 undef, i16 65535, i16 -1, i16 -255, i16 -65535, i16 1>, <8 x i16> <i16 1, i16 undef, i16 undef, i16 65535, i16 1, i16 65535, i16 1, i16 65535>) + ret <8 x i16> %res +} + +; fold (usub_sat x, 0) -> x define i32 @combine_zero_i32(i32 %a0) { ; CHECK-LABEL: combine_zero_i32: ; CHECK: # %bb.0: @@ -52,3 +97,29 @@ define <8 x i16> @combine_zero_v8i16(<8 x i16> %a0) { %1 = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> %a0, <8 x i16> zeroinitializer) ret <8 x i16> %1 } + +; fold (usub_sat x, x) -> 0 +define i32 @combine_self_i32(i32 %a0) { +; CHECK-LABEL: combine_self_i32: +; CHECK: # %bb.0: +; CHECK-NEXT: xorl %eax, %eax +; CHECK-NEXT: subl %edi, %edi +; CHECK-NEXT: cmovael %edi, %eax +; CHECK-NEXT: retq + %1 = call i32 @llvm.usub.sat.i32(i32 %a0, i32 %a0) + ret i32 %1 +} + +define <8 x i16> @combine_self_v8i16(<8 x i16> %a0) { +; SSE-LABEL: combine_self_v8i16: +; SSE: # %bb.0: +; SSE-NEXT: psubusw %xmm0, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: combine_self_v8i16: +; AVX: # %bb.0: +; AVX-NEXT: vpsubusw %xmm0, %xmm0, %xmm0 +; AVX-NEXT: retq + %1 = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> %a0, <8 x i16> %a0) + ret <8 x i16> %1 +} |

