diff options
author | Simon Pilgrim <llvm-dev@redking.me.uk> | 2016-11-06 16:36:29 +0000 |
---|---|---|
committer | Simon Pilgrim <llvm-dev@redking.me.uk> | 2016-11-06 16:36:29 +0000 |
commit | 3ac353cb51570049e0c9e0b174fb8dc78ed30473 (patch) | |
tree | dc873d0750f59dc1c48abd4b0be109dbd42ba124 /llvm/test/CodeGen/X86/known-bits-vector.ll | |
parent | 46de41330ca51f665f422dbcb7ea48076cdc345f (diff) | |
download | bcm5719-llvm-3ac353cb51570049e0c9e0b174fb8dc78ed30473.tar.gz bcm5719-llvm-3ac353cb51570049e0c9e0b174fb8dc78ed30473.zip |
[X86] Add knownbits vector xor test
In preparation for demandedelts support
llvm-svn: 286074
Diffstat (limited to 'llvm/test/CodeGen/X86/known-bits-vector.ll')
-rw-r--r-- | llvm/test/CodeGen/X86/known-bits-vector.ll | 31 |
1 files changed, 31 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/X86/known-bits-vector.ll b/llvm/test/CodeGen/X86/known-bits-vector.ll index 044070e0e90..0fd2899c130 100644 --- a/llvm/test/CodeGen/X86/known-bits-vector.ll +++ b/llvm/test/CodeGen/X86/known-bits-vector.ll @@ -112,3 +112,34 @@ define <4 x float> @knownbits_mask_or_shuffle_uitofp(<4 x i32> %a0) nounwind { %4 = uitofp <4 x i32> %3 to <4 x float> ret <4 x float> %4 } + +define <4 x float> @knownbits_mask_xor_shuffle_uitofp(<4 x i32> %a0) nounwind { +; X32-LABEL: knownbits_mask_xor_shuffle_uitofp: +; X32: # BB#0: +; X32-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X32-NEXT: vpxor {{\.LCPI.*}}, %xmm0, %xmm0 +; X32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,2,3,3] +; X32-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],mem[1],xmm0[2],mem[3],xmm0[4],mem[5],xmm0[6],mem[7] +; X32-NEXT: vpsrld $16, %xmm0, %xmm0 +; X32-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],mem[1],xmm0[2],mem[3],xmm0[4],mem[5],xmm0[6],mem[7] +; X32-NEXT: vaddps {{\.LCPI.*}}, %xmm0, %xmm0 +; X32-NEXT: vaddps %xmm0, %xmm1, %xmm0 +; X32-NEXT: retl +; +; X64-LABEL: knownbits_mask_xor_shuffle_uitofp: +; X64: # BB#0: +; X64-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0 +; X64-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0 +; X64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,2,3,3] +; X64-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],mem[1],xmm0[2],mem[3],xmm0[4],mem[5],xmm0[6],mem[7] +; X64-NEXT: vpsrld $16, %xmm0, %xmm0 +; X64-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],mem[1],xmm0[2],mem[3],xmm0[4],mem[5],xmm0[6],mem[7] +; X64-NEXT: vaddps {{.*}}(%rip), %xmm0, %xmm0 +; X64-NEXT: vaddps %xmm0, %xmm1, %xmm0 +; X64-NEXT: retq + %1 = and <4 x i32> %a0, <i32 -1, i32 -1, i32 255, i32 4085> + %2 = xor <4 x i32> %1, <i32 65535, i32 65535, i32 65535, i32 65535> + %3 = shufflevector <4 x i32> %2, <4 x i32> undef, <4 x i32> <i32 2, i32 2, i32 3, i32 3> + %4 = uitofp <4 x i32> %3 to <4 x float> + ret <4 x float> %4 +} |