diff options
author | Nirav Dave <niravd@google.com> | 2016-12-09 16:15:12 +0000 |
---|---|---|
committer | Nirav Dave <niravd@google.com> | 2016-12-09 16:15:12 +0000 |
commit | fd51ff4fd810ea3eeaad09ae27546989c0ac4626 (patch) | |
tree | 3395dde7ef23eac6ddadd127e778a221814ddcd6 /llvm/test/CodeGen/X86 | |
parent | b9eb99f57070efd04c7a87ab875d4bc960b85730 (diff) | |
download | bcm5719-llvm-fd51ff4fd810ea3eeaad09ae27546989c0ac4626.tar.gz bcm5719-llvm-fd51ff4fd810ea3eeaad09ae27546989c0ac4626.zip |
In visitSTORE, always use FindBetterChain, rather than only when UseAA is enabled.
Retrying after fixing overly aggressive load-store forwarding optimization.
Simplify Consecutive Merge Store Candidate Search
Now that address aliasing is much less conservative, push through
simplified store merging search which only checks for parallel stores
through the chain subgraph. This is cleaner as the separation of
non-interfering loads/stores from the store-merging logic.
Whem merging stores, search up the chain through a single load, and
finds all possible stores by looking down from through a load and a
TokenFactor to all stores visited. This improves the quality of the
output SelectionDAG and generally the output CodeGen (with some
exceptions).
Additional Minor Changes:
1. Finishes removing unused AliasLoad code
2. Unifies the the chain aggregation in the merged stores across
code paths
3. Re-add the Store node to the worklist after calling
SimplifyDemandedBits.
4. Increase GatherAllAliasesMaxDepth from 6 to 18. That number is
arbitrary, but seemed sufficient to not cause regressions in
tests.
This finishes the change Matt Arsenault started in r246307 and
jyknight's original patch.
Many tests required some changes as memory operations are now
reorderable. Some tests relying on the order were changed to use
volatile memory operations
Noteworthy tests:
CodeGen/AArch64/argument-blocks.ll -
It's not entirely clear what the test_varargs_stackalign test is
supposed to be asserting, but the new code looks right.
CodeGen/AArch64/arm64-memset-inline.lli -
CodeGen/AArch64/arm64-stur.ll -
CodeGen/ARM/memset-inline.ll -
The backend now generates *worse* code due to store merging
succeeding, as we do do a 16-byte constant-zero store efficiently.
CodeGen/AArch64/merge-store.ll -
Improved, but there still seems to be an extraneous vector insert
from an element to itself?
CodeGen/PowerPC/ppc64-align-long-double.ll -
Worse code emitted in this case, due to the improved store->load
forwarding.
CodeGen/X86/dag-merge-fast-accesses.ll -
CodeGen/X86/MergeConsecutiveStores.ll -
CodeGen/X86/stores-merging.ll -
CodeGen/Mips/load-store-left-right.ll -
Restored correct merging of non-aligned stores
CodeGen/AMDGPU/promote-alloca-stored-pointer-value.ll -
Improved. Correctly merges buffer_store_dword calls
CodeGen/AMDGPU/si-triv-disjoint-mem-access.ll -
Improved. Sidesteps loading a stored value and
merges two stores
CodeGen/X86/pr18023.ll -
This test has been removed, as it was asserting incorrect
behavior. Non-volatile stores *CAN* be moved past volatile loads,
and now are.
CodeGen/X86/vector-idiv.ll -
CodeGen/X86/vector-lzcnt-128.ll -
It's basically impossible to tell what these tests are actually
testing. But, looks like the code got better due to the memory
operations being recognized as non-aliasing.
CodeGen/X86/win32-eh.ll -
Both loads of the securitycookie are now merged.
Reviewers: arsenm, hfinkel, tstellarAMD, jyknight, nhaehnle
Subscribers: wdng, nhaehnle, nemanjai, arsenm, weimingz, niravd, RKSimon, aemerson, qcolombet, dsanders, resistor, tstellarAMD, t.p.northover, spatel
Differential Revision: https://reviews.llvm.org/D14834
llvm-svn: 289221
Diffstat (limited to 'llvm/test/CodeGen/X86')
25 files changed, 1054 insertions, 1255 deletions
diff --git a/llvm/test/CodeGen/X86/2010-09-17-SideEffectsInChain.ll b/llvm/test/CodeGen/X86/2010-09-17-SideEffectsInChain.ll index b7380196bd9..54a7763eb69 100644 --- a/llvm/test/CodeGen/X86/2010-09-17-SideEffectsInChain.ll +++ b/llvm/test/CodeGen/X86/2010-09-17-SideEffectsInChain.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -combiner-alias-analysis -march=x86-64 -mcpu=core2 | FileCheck %s +; RUN: llc < %s -march=x86-64 -mcpu=core2 | FileCheck %s target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64" target triple = "x86_64-apple-darwin10.4" diff --git a/llvm/test/CodeGen/X86/2012-11-28-merge-store-alias.ll b/llvm/test/CodeGen/X86/2012-11-28-merge-store-alias.ll index c16deeff3d9..2e8206a7591 100644 --- a/llvm/test/CodeGen/X86/2012-11-28-merge-store-alias.ll +++ b/llvm/test/CodeGen/X86/2012-11-28-merge-store-alias.ll @@ -3,8 +3,8 @@ ; CHECK: merge_stores_can ; CHECK: callq foo ; CHECK: xorps %xmm0, %xmm0 -; CHECK-NEXT: movl 36(%rsp), %ebp ; CHECK-NEXT: movups %xmm0 +; CHECK-NEXT: movl 36(%rsp), %ebp ; CHECK: callq foo ; CHECK: ret declare i32 @foo([10 x i32]* ) diff --git a/llvm/test/CodeGen/X86/MergeConsecutiveStores.ll b/llvm/test/CodeGen/X86/MergeConsecutiveStores.ll index b50253bf2b0..ee7ae24e288 100644 --- a/llvm/test/CodeGen/X86/MergeConsecutiveStores.ll +++ b/llvm/test/CodeGen/X86/MergeConsecutiveStores.ll @@ -292,16 +292,12 @@ block4: ; preds = %4, %.lr.ph ret void } -;; On x86, even unaligned copies should be merged to vector ops. -;; TODO: however, this cannot happen at the moment, due to brokenness -;; in MergeConsecutiveStores. See UseAA FIXME in DAGCombiner.cpp -;; visitSTORE. - +;; On x86, even unaligned copies can be merged to vector ops. ; CHECK-LABEL: merge_loads_no_align: ; load: -; CHECK-NOT: vmovups ;; TODO +; CHECK: vmovups ; store: -; CHECK-NOT: vmovups ;; TODO +; CHECK: vmovups ; CHECK: ret define void @merge_loads_no_align(i32 %count, %struct.B* noalias nocapture %q, %struct.B* noalias nocapture %p) nounwind uwtable noinline ssp { %a1 = icmp sgt i32 %count, 0 @@ -549,8 +545,8 @@ define void @merge_vec_element_and_scalar_load([6 x i64]* %array) { ; CHECK-LABEL: merge_vec_element_and_scalar_load ; CHECK: movq (%rdi), %rax +; CHECK-NEXT: movq 8(%rdi), %rcx ; CHECK-NEXT: movq %rax, 32(%rdi) -; CHECK-NEXT: movq 8(%rdi), %rax -; CHECK-NEXT: movq %rax, 40(%rdi) +; CHECK-NEXT: movq %rcx, 40(%rdi) ; CHECK-NEXT: retq } diff --git a/llvm/test/CodeGen/X86/avx512-mask-op.ll b/llvm/test/CodeGen/X86/avx512-mask-op.ll index 4bfdd711deb..6ea64774528 100644 --- a/llvm/test/CodeGen/X86/avx512-mask-op.ll +++ b/llvm/test/CodeGen/X86/avx512-mask-op.ll @@ -1173,10 +1173,6 @@ define void @ktest_2(<32 x float> %in, float * %base) { ; KNL-NEXT: kmovw %k0, %eax ; KNL-NEXT: vpinsrb $15, %eax, %xmm2, %xmm2 ; KNL-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm2 -; KNL-NEXT: vpsllw $7, %ymm2, %ymm2 -; KNL-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2 -; KNL-NEXT: vpxor %ymm3, %ymm3, %ymm3 -; KNL-NEXT: vpcmpgtb %ymm2, %ymm3, %ymm2 ; KNL-NEXT: vmovups 4(%rdi), %zmm3 {%k2} {z} ; KNL-NEXT: vmovups 68(%rdi), %zmm4 {%k1} {z} ; KNL-NEXT: vcmpltps %zmm4, %zmm1, %k0 diff --git a/llvm/test/CodeGen/X86/chain_order.ll b/llvm/test/CodeGen/X86/chain_order.ll index 8c3aa6e1515..cc48e5b6149 100644 --- a/llvm/test/CodeGen/X86/chain_order.ll +++ b/llvm/test/CodeGen/X86/chain_order.ll @@ -11,9 +11,9 @@ define void @cftx020(double* nocapture %a) { ; CHECK-NEXT: vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0] ; CHECK-NEXT: vaddpd %xmm1, %xmm0, %xmm0 ; CHECK-NEXT: vmovupd (%rdi), %xmm1 -; CHECK-NEXT: vsubpd 16(%rdi), %xmm1, %xmm1 ; CHECK-NEXT: vmovupd %xmm0, (%rdi) -; CHECK-NEXT: vmovupd %xmm1, 16(%rdi) +; CHECK-NEXT: vsubpd 16(%rdi), %xmm1, %xmm0 +; CHECK-NEXT: vmovupd %xmm0, 16(%rdi) ; CHECK-NEXT: retq entry: %0 = load double, double* %a, align 8 diff --git a/llvm/test/CodeGen/X86/clear_upper_vector_element_bits.ll b/llvm/test/CodeGen/X86/clear_upper_vector_element_bits.ll index 693bf2e17d5..f8833b733af 100644 --- a/llvm/test/CodeGen/X86/clear_upper_vector_element_bits.ll +++ b/llvm/test/CodeGen/X86/clear_upper_vector_element_bits.ll @@ -151,47 +151,47 @@ define <16 x i8> @_clearupper16xi8a(<16 x i8>) nounwind { ; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp) ; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax ; SSE-NEXT: movd %eax, %xmm0 -; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %r9d -; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx -; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %esi -; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %r8d -; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %edi ; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax ; SSE-NEXT: movd %eax, %xmm1 ; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] -; SSE-NEXT: movd %esi, %xmm0 ; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax -; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %esi -; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx -; SSE-NEXT: movd %ecx, %xmm2 +; SSE-NEXT: movd %eax, %xmm0 +; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; SSE-NEXT: movd %eax, %xmm2 ; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] ; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7] -; SSE-NEXT: movd %edx, %xmm0 -; SSE-NEXT: movd %esi, %xmm1 -; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] -; SSE-NEXT: movd %edi, %xmm0 -; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx -; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx -; SSE-NEXT: movd %edx, %xmm3 +; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; SSE-NEXT: movd %eax, %xmm0 +; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; SSE-NEXT: movd %eax, %xmm3 ; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7] -; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3],xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7] -; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7] -; SSE-NEXT: movd %r9d, %xmm0 +; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; SSE-NEXT: movd %eax, %xmm0 +; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax ; SSE-NEXT: movd %eax, %xmm1 ; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] -; SSE-NEXT: movd %r8d, %xmm0 -; SSE-NEXT: movd %ecx, %xmm2 -; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] -; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7] +; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7] +; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7] ; SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero -; SSE-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero -; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] -; SSE-NEXT: movd {{.*#+}} xmm4 = mem[0],zero,zero,zero +; SSE-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero +; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] +; SSE-NEXT: movd {{.*#+}} xmm3 = mem[0],zero,zero,zero ; SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7] +; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7] +; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; SSE-NEXT: movd %eax, %xmm2 +; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; SSE-NEXT: movd %eax, %xmm3 +; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7] +; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; SSE-NEXT: movd %eax, %xmm2 +; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; SSE-NEXT: movd %eax, %xmm4 +; SSE-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3],xmm4[4],xmm2[4],xmm4[5],xmm2[5],xmm4[6],xmm2[6],xmm4[7],xmm2[7] +; SSE-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3],xmm4[4],xmm3[4],xmm4[5],xmm3[5],xmm4[6],xmm3[6],xmm4[7],xmm3[7] ; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7] ; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] -; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7] -; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7] ; SSE-NEXT: pand {{.*}}(%rip), %xmm0 ; SSE-NEXT: retq ; diff --git a/llvm/test/CodeGen/X86/combiner-aa-0.ll b/llvm/test/CodeGen/X86/combiner-aa-0.ll deleted file mode 100644 index 403059d90ab..00000000000 --- a/llvm/test/CodeGen/X86/combiner-aa-0.ll +++ /dev/null @@ -1,20 +0,0 @@ -; RUN: llc < %s -march=x86-64 -combiner-global-alias-analysis -combiner-alias-analysis - -target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128" - %struct.Hash_Key = type { [4 x i32], i32 } -@g_flipV_hashkey = external global %struct.Hash_Key, align 16 ; <%struct.Hash_Key*> [#uses=1] - -define void @foo() nounwind { - %t0 = load i32, i32* undef, align 16 ; <i32> [#uses=1] - %t1 = load i32, i32* null, align 4 ; <i32> [#uses=1] - %t2 = srem i32 %t0, 32 ; <i32> [#uses=1] - %t3 = shl i32 1, %t2 ; <i32> [#uses=1] - %t4 = xor i32 %t3, %t1 ; <i32> [#uses=1] - store i32 %t4, i32* null, align 4 - %t5 = getelementptr %struct.Hash_Key, %struct.Hash_Key* @g_flipV_hashkey, i64 0, i32 0, i64 0 ; <i32*> [#uses=2] - %t6 = load i32, i32* %t5, align 4 ; <i32> [#uses=1] - %t7 = shl i32 1, undef ; <i32> [#uses=1] - %t8 = xor i32 %t7, %t6 ; <i32> [#uses=1] - store i32 %t8, i32* %t5, align 4 - unreachable -} diff --git a/llvm/test/CodeGen/X86/combiner-aa-1.ll b/llvm/test/CodeGen/X86/combiner-aa-1.ll deleted file mode 100644 index cc3e5ca1260..00000000000 --- a/llvm/test/CodeGen/X86/combiner-aa-1.ll +++ /dev/null @@ -1,23 +0,0 @@ -; RUN: llc < %s --combiner-alias-analysis --combiner-global-alias-analysis -; PR4880 - -target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32" -target triple = "i386-pc-linux-gnu" - -%struct.alst_node = type { %struct.node } -%struct.arg_node = type { %struct.node, i8*, %struct.alst_node* } -%struct.arglst_node = type { %struct.alst_node, %struct.arg_node*, %struct.arglst_node* } -%struct.lam_node = type { %struct.alst_node, %struct.arg_node*, %struct.alst_node* } -%struct.node = type { i32 (...)**, %struct.node* } - -define i32 @._ZN8lam_node18resolve_name_clashEP8arg_nodeP9alst_node._ZNK8lam_nodeeqERK8exp_node._ZN11arglst_nodeD0Ev(%struct.lam_node* %this.this, %struct.arg_node* %outer_arg, %struct.alst_node* %env.cmp, %struct.arglst_node* %this, i32 %functionID) { -comb_entry: - %.SV59 = alloca %struct.node* ; <%struct.node**> [#uses=1] - %0 = load i32 (...)**, i32 (...)*** null, align 4 ; <i32 (...)**> [#uses=1] - %1 = getelementptr inbounds i32 (...)*, i32 (...)** %0, i32 3 ; <i32 (...)**> [#uses=1] - %2 = load i32 (...)*, i32 (...)** %1, align 4 ; <i32 (...)*> [#uses=1] - store %struct.node* undef, %struct.node** %.SV59 - %3 = bitcast i32 (...)* %2 to i32 (%struct.node*)* ; <i32 (%struct.node*)*> [#uses=1] - %4 = tail call i32 %3(%struct.node* undef) ; <i32> [#uses=0] - unreachable -} diff --git a/llvm/test/CodeGen/X86/copy-eflags.ll b/llvm/test/CodeGen/X86/copy-eflags.ll index 796c1ecd8c7..d98d8a7839b 100644 --- a/llvm/test/CodeGen/X86/copy-eflags.ll +++ b/llvm/test/CodeGen/X86/copy-eflags.ll @@ -9,19 +9,22 @@ target triple = "i686-unknown-linux-gnu" @.str = private unnamed_addr constant [4 x i8] c"%d\0A\00", align 1 ; CHECK-LABEL: func: -; This tests whether eax is properly saved/restored around the lahf/sahf -; instruction sequences. +; This tests whether eax is properly saved/restored around the +; lahf/sahf instruction sequences. We make mem op volatile to prevent +; their reordering to avoid spills. + + define i32 @func() { entry: %bval = load i8, i8* @b %inc = add i8 %bval, 1 - store i8 %inc, i8* @b - %cval = load i32, i32* @c + store volatile i8 %inc, i8* @b + %cval = load volatile i32, i32* @c %inc1 = add nsw i32 %cval, 1 - store i32 %inc1, i32* @c - %aval = load i8, i8* @a + store volatile i32 %inc1, i32* @c + %aval = load volatile i8, i8* @a %inc2 = add i8 %aval, 1 - store i8 %inc2, i8* @a + store volatile i8 %inc2, i8* @a ; Copy flags produced by the incb of %inc1 to a register, need to save+restore ; eax around it. The flags will be reused by %tobool. ; CHECK: pushl %eax diff --git a/llvm/test/CodeGen/X86/dag-merge-fast-accesses.ll b/llvm/test/CodeGen/X86/dag-merge-fast-accesses.ll index 867881d83d3..e5dfccb278c 100644 --- a/llvm/test/CodeGen/X86/dag-merge-fast-accesses.ll +++ b/llvm/test/CodeGen/X86/dag-merge-fast-accesses.ll @@ -51,19 +51,11 @@ define void @merge_vec_element_store(<4 x double> %v, double* %ptr) { } -;; TODO: FAST *should* be: -;; movups (%rdi), %xmm0 -;; movups %xmm0, 40(%rdi) -;; ..but is not currently. See the UseAA FIXME in DAGCombiner.cpp -;; visitSTORE. - define void @merge_vec_load_and_stores(i64 *%ptr) { ; FAST-LABEL: merge_vec_load_and_stores: ; FAST: # BB#0: -; FAST-NEXT: movq (%rdi), %rax -; FAST-NEXT: movq 8(%rdi), %rcx -; FAST-NEXT: movq %rax, 40(%rdi) -; FAST-NEXT: movq %rcx, 48(%rdi) +; FAST-NEXT: movups (%rdi), %xmm0 +; FAST-NEXT: movups %xmm0, 40(%rdi) ; FAST-NEXT: retq ; ; SLOW-LABEL: merge_vec_load_and_stores: diff --git a/llvm/test/CodeGen/X86/dont-trunc-store-double-to-float.ll b/llvm/test/CodeGen/X86/dont-trunc-store-double-to-float.ll index 8a334d21631..05245d0d9e1 100644 --- a/llvm/test/CodeGen/X86/dont-trunc-store-double-to-float.ll +++ b/llvm/test/CodeGen/X86/dont-trunc-store-double-to-float.ll @@ -1,9 +1,9 @@ ; RUN: llc -march=x86 < %s | FileCheck %s ; CHECK-LABEL: @bar -; CHECK: movl $1074339512, -; CHECK: movl $1374389535, -; CHECK: movl $1078523331, +; CHECK-DAG: movl $1074339512, +; CHECK-DAG: movl $1374389535, +; CHECK-DAG: movl $1078523331, define void @bar() unnamed_addr { entry-block: %a = alloca double diff --git a/llvm/test/CodeGen/X86/extractelement-legalization-store-ordering.ll b/llvm/test/CodeGen/X86/extractelement-legalization-store-ordering.ll index 946516c8a46..d472cdad0ea 100644 --- a/llvm/test/CodeGen/X86/extractelement-legalization-store-ordering.ll +++ b/llvm/test/CodeGen/X86/extractelement-legalization-store-ordering.ll @@ -18,13 +18,13 @@ target datalayout = "e-m:o-p:32:32-f64:32:64-f80:128-n8:16:32-S128" ; CHECK-NEXT: movdqa %xmm0, (%edx) ; CHECK-NEXT: shll $4, %ecx ; CHECK-NEXT: movl (%ecx,%edx), %esi -; CHECK-NEXT: movl 12(%ecx,%edx), %edi +; CHECK-NEXT: movl 4(%ecx,%edx), %edi ; CHECK-NEXT: movl 8(%ecx,%edx), %ebx -; CHECK-NEXT: movl 4(%ecx,%edx), %edx +; CHECK-NEXT: movl 12(%ecx,%edx), %edx ; CHECK-NEXT: movl %esi, 12(%eax,%ecx) -; CHECK-NEXT: movl %edx, (%eax,%ecx) +; CHECK-NEXT: movl %edi, (%eax,%ecx) ; CHECK-NEXT: movl %ebx, 8(%eax,%ecx) -; CHECK-NEXT: movl %edi, 4(%eax,%ecx) +; CHECK-NEXT: movl %edx, 4(%eax,%ecx) ; CHECK-NEXT: popl %esi ; CHECK-NEXT: popl %edi ; CHECK-NEXT: popl %ebx diff --git a/llvm/test/CodeGen/X86/i256-add.ll b/llvm/test/CodeGen/X86/i256-add.ll index 6164d898ca1..d2821dc3dce 100644 --- a/llvm/test/CodeGen/X86/i256-add.ll +++ b/llvm/test/CodeGen/X86/i256-add.ll @@ -2,17 +2,17 @@ ; RUN: grep adcl %t | count 7 ; RUN: grep sbbl %t | count 7 -define void @add(i256* %p, i256* %q) nounwind { +define void @add(i256* %p, i256* %q, i256* %r) nounwind { %a = load i256, i256* %p %b = load i256, i256* %q %c = add i256 %a, %b - store i256 %c, i256* %p + store i256 %c, i256* %r ret void } -define void @sub(i256* %p, i256* %q) nounwind { +define void @sub(i256* %p, i256* %q, i256* %r) nounwind { %a = load i256, i256* %p %b = load i256, i256* %q %c = sub i256 %a, %b - store i256 %c, i256* %p + store i256 %c, i256* %r ret void } diff --git a/llvm/test/CodeGen/X86/i386-shrink-wrapping.ll b/llvm/test/CodeGen/X86/i386-shrink-wrapping.ll index 2c3e384b70a..d4e099ac655 100644 --- a/llvm/test/CodeGen/X86/i386-shrink-wrapping.ll +++ b/llvm/test/CodeGen/X86/i386-shrink-wrapping.ll @@ -55,8 +55,7 @@ target triple = "i386-apple-macosx10.5" ; ; CHECK-NEXT: L_e$non_lazy_ptr, [[E:%[a-z]+]] ; CHECK-NEXT: movb [[D]], ([[E]]) -; CHECK-NEXT: L_f$non_lazy_ptr, [[F:%[a-z]+]] -; CHECK-NEXT: movsbl ([[F]]), [[CONV:%[a-z]+]] +; CHECK-NEXT: movsbl ([[E]]), [[CONV:%[a-z]+]] ; CHECK-NEXT: movl $6, [[CONV:%[a-z]+]] ; The eflags is used in the next instruction. ; If that instruction disappear, we are not exercising the bug @@ -96,7 +95,7 @@ for.end: ; preds = %for.cond.preheader %.b3 = load i1, i1* @d, align 1 %tmp2 = select i1 %.b3, i8 0, i8 6 store i8 %tmp2, i8* @e, align 1 - %tmp3 = load i8, i8* @f, align 1 + %tmp3 = load i8, i8* @e, align 1 %conv = sext i8 %tmp3 to i32 %add = add nsw i32 %conv, 1 %rem = srem i32 %tmp1, %add diff --git a/llvm/test/CodeGen/X86/live-range-nosubreg.ll b/llvm/test/CodeGen/X86/live-range-nosubreg.ll index f28d59237b4..899a375221c 100644 --- a/llvm/test/CodeGen/X86/live-range-nosubreg.ll +++ b/llvm/test/CodeGen/X86/live-range-nosubreg.ll @@ -1,7 +1,6 @@ -; RUN: llc -march=x86-64 < %s | FileCheck %s +; RUN: llc -march=x86-64 < %s -; Check for a sane output. This testcase used to crash. See PR29132. -; CHECK: leal -1 +; This testcase used to crash. See PR29132. target triple = "x86_64-unknown-linux-gnu" diff --git a/llvm/test/CodeGen/X86/merge-consecutive-loads-128.ll b/llvm/test/CodeGen/X86/merge-consecutive-loads-128.ll index 1f2bd4bb0dc..5f07aa80b45 100644 --- a/llvm/test/CodeGen/X86/merge-consecutive-loads-128.ll +++ b/llvm/test/CodeGen/X86/merge-consecutive-loads-128.ll @@ -1037,12 +1037,12 @@ define <2 x i64> @merge_2i64_i64_12_volatile(i64* %ptr) nounwind uwtable noinlin define <4 x float> @merge_4f32_f32_2345_volatile(float* %ptr) nounwind uwtable noinline ssp { ; SSE2-LABEL: merge_4f32_f32_2345_volatile: ; SSE2: # BB#0: -; SSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; SSE2-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; SSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; SSE2-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero -; SSE2-NEXT: movss {{.*#+}} xmm3 = mem[0],zero,zero,zero ; SSE2-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] -; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1] +; SSE2-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] ; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] ; SSE2-NEXT: retq ; @@ -1065,13 +1065,13 @@ define <4 x float> @merge_4f32_f32_2345_volatile(float* %ptr) nounwind uwtable n ; X32-SSE1-LABEL: merge_4f32_f32_2345_volatile: ; X32-SSE1: # BB#0: ; X32-SSE1-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-SSE1-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero -; X32-SSE1-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero -; X32-SSE1-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero -; X32-SSE1-NEXT: movss {{.*#+}} xmm3 = mem[0],zero,zero,zero -; X32-SSE1-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] -; X32-SSE1-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1] -; X32-SSE1-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] +; X32-SSE1-DAG: movss 8(%eax), %[[R0:xmm[0-3]]] # [[R0]] = mem[0],zero,zero,zero +; X32-SSE1-DAG: movss 12(%eax), %[[R1:xmm[0-3]]] # [[R1]] = mem[0],zero,zero,zero +; X32-SSE1-DAG: movss 16(%eax), %[[R2:xmm[0-3]]] # [[R2]] = mem[0],zero,zero,zero +; X32-SSE1-DAG: movss 20(%eax), %[[R3:xmm[0-3]]] # [[R3]] = mem[0],zero,zero,zero +; X32-SSE1-DAG: unpcklps %[[R2]], %[[R0]] # [[R0]] = [[R0]][0],[[R2]][0],[[R0]][1],[[R2]][1] +; X32-SSE1-DAG: unpcklps %[[R3]], %[[R1]] # [[R1]] = [[R1]][0],[[R3]][0],[[R1]][1],[[R3]][1] +; X32-SSE1-DAG: unpcklps %[[R1]], %[[R0]] # [[R0]] = [[R0]][0],[[R1]][0],[[R0]][1],[[R1]][1] ; X32-SSE1-NEXT: retl ; ; X32-SSE41-LABEL: merge_4f32_f32_2345_volatile: diff --git a/llvm/test/CodeGen/X86/merge-consecutive-loads-256.ll b/llvm/test/CodeGen/X86/merge-consecutive-loads-256.ll index 48a277a9c95..79bce603d12 100644 --- a/llvm/test/CodeGen/X86/merge-consecutive-loads-256.ll +++ b/llvm/test/CodeGen/X86/merge-consecutive-loads-256.ll @@ -682,10 +682,10 @@ define <16 x i16> @merge_16i16_i16_0uu3zzuuuuuzCuEF_volatile(i16* %ptr) nounwind ; AVX1: # BB#0: ; AVX1-NEXT: vpxor %xmm0, %xmm0, %xmm0 ; AVX1-NEXT: vpinsrw $0, (%rdi), %xmm0, %xmm1 -; AVX1-NEXT: vpinsrw $3, 6(%rdi), %xmm1, %xmm1 ; AVX1-NEXT: vpinsrw $4, 24(%rdi), %xmm0, %xmm0 ; AVX1-NEXT: vpinsrw $6, 28(%rdi), %xmm0, %xmm0 ; AVX1-NEXT: vpinsrw $7, 30(%rdi), %xmm0, %xmm0 +; AVX1-NEXT: vpinsrw $3, 6(%rdi), %xmm1, %xmm1 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 ; AVX1-NEXT: retq ; @@ -693,10 +693,10 @@ define <16 x i16> @merge_16i16_i16_0uu3zzuuuuuzCuEF_volatile(i16* %ptr) nounwind ; AVX2: # BB#0: ; AVX2-NEXT: vpxor %xmm0, %xmm0, %xmm0 ; AVX2-NEXT: vpinsrw $0, (%rdi), %xmm0, %xmm1 -; AVX2-NEXT: vpinsrw $3, 6(%rdi), %xmm1, %xmm1 ; AVX2-NEXT: vpinsrw $4, 24(%rdi), %xmm0, %xmm0 ; AVX2-NEXT: vpinsrw $6, 28(%rdi), %xmm0, %xmm0 ; AVX2-NEXT: vpinsrw $7, 30(%rdi), %xmm0, %xmm0 +; AVX2-NEXT: vpinsrw $3, 6(%rdi), %xmm1, %xmm1 ; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 ; AVX2-NEXT: retq ; @@ -704,10 +704,10 @@ define <16 x i16> @merge_16i16_i16_0uu3zzuuuuuzCuEF_volatile(i16* %ptr) nounwind ; AVX512F: # BB#0: ; AVX512F-NEXT: vpxor %xmm0, %xmm0, %xmm0 ; AVX512F-NEXT: vpinsrw $0, (%rdi), %xmm0, %xmm1 -; AVX512F-NEXT: vpinsrw $3, 6(%rdi), %xmm1, %xmm1 ; AVX512F-NEXT: vpinsrw $4, 24(%rdi), %xmm0, %xmm0 ; AVX512F-NEXT: vpinsrw $6, 28(%rdi), %xmm0, %xmm0 ; AVX512F-NEXT: vpinsrw $7, 30(%rdi), %xmm0, %xmm0 +; AVX512F-NEXT: vpinsrw $3, 6(%rdi), %xmm1, %xmm1 ; AVX512F-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 ; AVX512F-NEXT: retq ; @@ -716,10 +716,10 @@ define <16 x i16> @merge_16i16_i16_0uu3zzuuuuuzCuEF_volatile(i16* %ptr) nounwind ; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-AVX-NEXT: vpxor %xmm0, %xmm0, %xmm0 ; X32-AVX-NEXT: vpinsrw $0, (%eax), %xmm0, %xmm1 -; X32-AVX-NEXT: vpinsrw $3, 6(%eax), %xmm1, %xmm1 ; X32-AVX-NEXT: vpinsrw $4, 24(%eax), %xmm0, %xmm0 ; X32-AVX-NEXT: vpinsrw $6, 28(%eax), %xmm0, %xmm0 ; X32-AVX-NEXT: vpinsrw $7, 30(%eax), %xmm0, %xmm0 +; X32-AVX-NEXT: vpinsrw $3, 6(%eax), %xmm1, %xmm1 ; X32-AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 ; X32-AVX-NEXT: retl %ptr0 = getelementptr inbounds i16, i16* %ptr, i64 0 diff --git a/llvm/test/CodeGen/X86/merge-store-partially-alias-loads.ll b/llvm/test/CodeGen/X86/merge-store-partially-alias-loads.ll index 735e64a076d..5a180229bb9 100644 --- a/llvm/test/CodeGen/X86/merge-store-partially-alias-loads.ll +++ b/llvm/test/CodeGen/X86/merge-store-partially-alias-loads.ll @@ -21,11 +21,11 @@ ; DBGDAG-DAG: [[LD2:t[0-9]+]]: i16,ch = load<LD2[%tmp81](align=1)> [[ENTRYTOKEN]], [[BASEPTR]], undef:i64 ; DBGDAG-DAG: [[LD1:t[0-9]+]]: i8,ch = load<LD1[%tmp12]> [[ENTRYTOKEN]], [[ADDPTR]], undef:i64 -; DBGDAG: [[LOADTOKEN:t[0-9]+]]: ch = TokenFactor [[LD2]]:1, [[LD1]]:1 +; DBGDAG-DAG: [[ST1:t[0-9]+]]: ch = store<ST1[%tmp14]> [[ENTRYTOKEN]], [[LD1]], t{{[0-9]+}}, undef:i64 +; DBGDAG-DAG: [[LOADTOKEN:t[0-9]+]]: ch = TokenFactor [[LD2]]:1, [[LD1]]:1 +; DBGDAG: [[ST2:t[0-9]+]]: ch = store<ST2[%tmp10](align=1)> [[LOADTOKEN]], [[LD2]], t{{[0-9]+}}, undef:i64 -; DBGDAG-DAG: [[ST2:t[0-9]+]]: ch = store<ST2[%tmp10](align=1)> [[LOADTOKEN]], [[LD2]], t{{[0-9]+}}, undef:i64 -; DBGDAG-DAG: [[ST1:t[0-9]+]]: ch = store<ST1[%tmp14]> [[ST2]], [[LD1]], t{{[0-9]+}}, undef:i64 -; DBGDAG: X86ISD::RET_FLAG [[ST1]], +; DBGDAG: X86ISD::RET_FLAG t{{[0-9]+}}, ; DBGDAG: Type-legalized selection DAG: BB#0 'merge_store_partial_overlap_load:' define void @merge_store_partial_overlap_load([4 x i8]* %tmp) { diff --git a/llvm/test/CodeGen/X86/pr18023.ll b/llvm/test/CodeGen/X86/pr18023.ll deleted file mode 100644 index c7ea20c281b..00000000000 --- a/llvm/test/CodeGen/X86/pr18023.ll +++ /dev/null @@ -1,31 +0,0 @@ -; RUN: llc < %s -mtriple x86_64-apple-macosx10.9.0 | FileCheck %s -; PR18023 - -; CHECK: movabsq $4294967296, %rcx -; CHECK: movq %rcx, (%rax) -; CHECK: movl $1, 4(%rax) -; CHECK: movl $0, 4(%rax) -; CHECK: movq $1, 4(%rax) - -@c = common global i32 0, align 4 -@a = common global [3 x i32] zeroinitializer, align 4 -@b = common global i32 0, align 4 -@.str = private unnamed_addr constant [4 x i8] c"%d\0A\00", align 1 - -define void @func() { - store i32 1, i32* getelementptr inbounds ([3 x i32], [3 x i32]* @a, i64 0, i64 1), align 4 - store i32 0, i32* getelementptr inbounds ([3 x i32], [3 x i32]* @a, i64 0, i64 0), align 4 - %1 = load volatile i32, i32* @b, align 4 - store i32 1, i32* getelementptr inbounds ([3 x i32], [3 x i32]* @a, i64 0, i64 1), align 4 - store i32 0, i32* getelementptr inbounds ([3 x i32], [3 x i32]* @a, i64 0, i64 1), align 4 - %2 = load volatile i32, i32* @b, align 4 - store i32 1, i32* getelementptr inbounds ([3 x i32], [3 x i32]* @a, i64 0, i64 1), align 4 - store i32 0, i32* getelementptr inbounds ([3 x i32], [3 x i32]* @a, i64 0, i64 2), align 4 - %3 = load volatile i32, i32* @b, align 4 - store i32 3, i32* @c, align 4 - %4 = load i32, i32* getelementptr inbounds ([3 x i32], [3 x i32]* @a, i64 0, i64 1), align 4 - %call = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str, i64 0, i64 0), i32 %4) - ret void -} - -declare i32 @printf(i8*, ...) diff --git a/llvm/test/CodeGen/X86/split-store.ll b/llvm/test/CodeGen/X86/split-store.ll index 8a276f8d2f7..1d3fba69ca7 100644 --- a/llvm/test/CodeGen/X86/split-store.ll +++ b/llvm/test/CodeGen/X86/split-store.ll @@ -1,8 +1,8 @@ ; RUN: llc -mtriple=x86_64-unknown-unknown < %s | FileCheck %s ; CHECK-LABEL: int32_float_pair -; CHECK: movl %edi, (%rsi) -; CHECK: movss %xmm0, 4(%rsi) +; CHECK-DAG: movl %edi, (%rsi) +; CHECK-DAG: movss %xmm0, 4(%rsi) define void @int32_float_pair(i32 %tmp1, float %tmp2, i64* %ref.tmp) { entry: %t0 = bitcast float %tmp2 to i32 @@ -15,8 +15,8 @@ entry: } ; CHECK-LABEL: float_int32_pair -; CHECK: movss %xmm0, (%rsi) -; CHECK: movl %edi, 4(%rsi) +; CHECK-DAG: movss %xmm0, (%rsi) +; CHECK-DAG: movl %edi, 4(%rsi) define void @float_int32_pair(float %tmp1, i32 %tmp2, i64* %ref.tmp) { entry: %t0 = bitcast float %tmp1 to i32 @@ -29,9 +29,9 @@ entry: } ; CHECK-LABEL: int16_float_pair -; CHECK: movzwl %di, %eax -; CHECK: movl %eax, (%rsi) -; CHECK: movss %xmm0, 4(%rsi) +; CHECK-DAG: movzwl %di, %eax +; CHECK-DAG: movl %eax, (%rsi) +; CHECK-DAG: movss %xmm0, 4(%rsi) define void @int16_float_pair(i16 signext %tmp1, float %tmp2, i64* %ref.tmp) { entry: %t0 = bitcast float %tmp2 to i32 @@ -44,9 +44,9 @@ entry: } ; CHECK-LABEL: int8_float_pair -; CHECK: movzbl %dil, %eax -; CHECK: movl %eax, (%rsi) -; CHECK: movss %xmm0, 4(%rsi) +; CHECK-DAG: movzbl %dil, %eax +; CHECK-DAG: movl %eax, (%rsi) +; CHECK-DAG: movss %xmm0, 4(%rsi) define void @int8_float_pair(i8 signext %tmp1, float %tmp2, i64* %ref.tmp) { entry: %t0 = bitcast float %tmp2 to i32 diff --git a/llvm/test/CodeGen/X86/stores-merging.ll b/llvm/test/CodeGen/X86/stores-merging.ll index 9e479bd71b9..dbfb06881d8 100644 --- a/llvm/test/CodeGen/X86/stores-merging.ll +++ b/llvm/test/CodeGen/X86/stores-merging.ll @@ -13,9 +13,9 @@ target triple = "x86_64-unknown-linux-gnu" ;; the same result in memory in the end. ; CHECK-LABEL: redundant_stores_merging: -; CHECK: movl $123, e+8(%rip) -; CHECK: movabsq $1958505086977, %rax +; CHECK: movabsq $528280977409, %rax ; CHECK: movq %rax, e+4(%rip) +; CHECK: movl $456, e+8(%rip) define void @redundant_stores_merging() { entry: store i32 1, i32* getelementptr inbounds (%structTy, %structTy* @e, i64 0, i32 1), align 4 @@ -26,9 +26,9 @@ entry: ;; This variant tests PR25154. ; CHECK-LABEL: redundant_stores_merging_reverse: -; CHECK: movl $123, e+8(%rip) -; CHECK: movabsq $1958505086977, %rax +; CHECK: movabsq $528280977409, %rax ; CHECK: movq %rax, e+4(%rip) +; CHECK: movl $456, e+8(%rip) define void @redundant_stores_merging_reverse() { entry: store i32 123, i32* getelementptr inbounds (%structTy, %structTy* @e, i64 0, i32 2), align 4 @@ -45,9 +45,8 @@ entry: ;; a movl, after the store to 3). ;; CHECK-LABEL: overlapping_stores_merging: -;; CHECK: movw $0, b+2(%rip) +;; CHECK: movl $1, b(%rip) ;; CHECK: movw $2, b+3(%rip) -;; CHECK: movw $1, b(%rip) define void @overlapping_stores_merging() { entry: store i16 0, i16* bitcast (i8* getelementptr inbounds ([8 x i8], [8 x i8]* @b, i64 0, i64 2) to i16*), align 2 diff --git a/llvm/test/CodeGen/X86/vector-compare-results.ll b/llvm/test/CodeGen/X86/vector-compare-results.ll index 81112d25b32..213a3ec1bc8 100644 --- a/llvm/test/CodeGen/X86/vector-compare-results.ll +++ b/llvm/test/CodeGen/X86/vector-compare-results.ll @@ -327,98 +327,98 @@ define <32 x i1> @test_cmp_v32i8(<32 x i8> %a0, <32 x i8> %a1) nounwind { ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) -; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp) -; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) -; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al +; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %cl +; SSE2-NEXT: andb $1, %cl +; SSE2-NEXT: movb %cl, 2(%rdi) ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al +; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %cl +; SSE2-NEXT: andb $1, %cl +; SSE2-NEXT: movb %cl, (%rdi) ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movq %rdi, %rax @@ -813,98 +813,98 @@ define <32 x i1> @test_cmp_v32i16(<32 x i16> %a0, <32 x i16> %a1) nounwind { ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) -; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp) -; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) -; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al +; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %cl +; SSE2-NEXT: andb $1, %cl +; SSE2-NEXT: movb %cl, 2(%rdi) ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al +; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %cl +; SSE2-NEXT: andb $1, %cl +; SSE2-NEXT: movb %cl, (%rdi) ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movq %rdi, %rax @@ -1070,196 +1070,196 @@ define <64 x i1> @test_cmp_v64i8(<64 x i8> %a0, <64 x i8> %a1) nounwind { ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 6(%rdi) -; SSE2-NEXT: movdqa %xmm2, -{{[0-9]+}}(%rsp) -; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 4(%rdi) -; SSE2-NEXT: movdqa %xmm1, -{{[0-9]+}}(%rsp) -; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) -; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 6(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 6(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 4(%rdi) +; SSE2-NEXT: movb %al, 6(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, 6(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 6(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 6(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 4(%rdi) +; SSE2-NEXT: movb %al, 6(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, 6(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 6(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 6(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 4(%rdi) +; SSE2-NEXT: movb %al, 6(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, 6(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 6(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al +; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %cl +; SSE2-NEXT: andb $1, %cl +; SSE2-NEXT: movb %cl, 6(%rdi) ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 6(%rdi) +; SSE2-NEXT: movdqa %xmm2, -{{[0-9]+}}(%rsp) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 4(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) -; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) -; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 6(%rdi) +; SSE2-NEXT: movb %al, 4(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 4(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, 4(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 4(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 6(%rdi) +; SSE2-NEXT: movb %al, 4(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 4(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, 4(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 4(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 6(%rdi) +; SSE2-NEXT: movb %al, 4(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 4(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, 4(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 4(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 6(%rdi) +; SSE2-NEXT: movb %al, 4(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al +; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %cl +; SSE2-NEXT: andb $1, %cl +; SSE2-NEXT: movb %cl, 4(%rdi) ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 4(%rdi) +; SSE2-NEXT: movdqa %xmm1, -{{[0-9]+}}(%rsp) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 6(%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 4(%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 6(%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 4(%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 6(%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 4(%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al +; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %cl +; SSE2-NEXT: andb $1, %cl +; SSE2-NEXT: movb %cl, 2(%rdi) ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 6(%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 4(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 6(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 4(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 6(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 4(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 6(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 4(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al +; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %cl +; SSE2-NEXT: andb $1, %cl +; SSE2-NEXT: movb %cl, (%rdi) ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movq %rdi, %rax @@ -2153,98 +2153,98 @@ define <32 x i1> @test_cmp_v32f32(<32 x float> %a0, <32 x float> %a1) nounwind { ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) -; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp) -; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) -; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al +; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %cl +; SSE2-NEXT: andb $1, %cl +; SSE2-NEXT: movb %cl, 2(%rdi) ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al +; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %cl +; SSE2-NEXT: andb $1, %cl +; SSE2-NEXT: movb %cl, (%rdi) ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movq %rdi, %rax @@ -2989,98 +2989,98 @@ define <32 x i1> @test_cmp_v32i32(<32 x i32> %a0, <32 x i32> %a1) nounwind { ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) -; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp) -; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) -; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al +; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %cl +; SSE2-NEXT: andb $1, %cl +; SSE2-NEXT: movb %cl, 2(%rdi) ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al +; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %cl +; SSE2-NEXT: andb $1, %cl +; SSE2-NEXT: movb %cl, (%rdi) ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movq %rdi, %rax @@ -3513,196 +3513,196 @@ define <64 x i1> @test_cmp_v64i16(<64 x i16> %a0, <64 x i16> %a1) nounwind { ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 6(%rdi) -; SSE2-NEXT: movdqa %xmm4, -{{[0-9]+}}(%rsp) -; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 4(%rdi) -; SSE2-NEXT: movdqa %xmm2, -{{[0-9]+}}(%rsp) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) -; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp) -; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 6(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 6(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 4(%rdi) +; SSE2-NEXT: movb %al, 6(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, 6(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 6(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 6(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 4(%rdi) +; SSE2-NEXT: movb %al, 6(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, 6(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 6(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 6(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 4(%rdi) +; SSE2-NEXT: movb %al, 6(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, 6(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 6(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al +; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %cl +; SSE2-NEXT: andb $1, %cl +; SSE2-NEXT: movb %cl, 6(%rdi) ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 6(%rdi) +; SSE2-NEXT: movdqa %xmm4, -{{[0-9]+}}(%rsp) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 4(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) -; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) -; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 6(%rdi) +; SSE2-NEXT: movb %al, 4(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 4(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, 4(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 4(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 6(%rdi) +; SSE2-NEXT: movb %al, 4(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 4(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, 4(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 4(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 6(%rdi) +; SSE2-NEXT: movb %al, 4(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 4(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, 4(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 4(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 6(%rdi) +; SSE2-NEXT: movb %al, 4(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al +; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %cl +; SSE2-NEXT: andb $1, %cl +; SSE2-NEXT: movb %cl, 4(%rdi) ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 4(%rdi) +; SSE2-NEXT: movdqa %xmm2, -{{[0-9]+}}(%rsp) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 6(%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 4(%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 6(%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 4(%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 6(%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 4(%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al +; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %cl +; SSE2-NEXT: andb $1, %cl +; SSE2-NEXT: movb %cl, 2(%rdi) ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 6(%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 4(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 6(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 4(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 6(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 4(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 6(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 4(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al +; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %cl +; SSE2-NEXT: andb $1, %cl +; SSE2-NEXT: movb %cl, (%rdi) ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movq %rdi, %rax @@ -4671,392 +4671,392 @@ define <128 x i1> @test_cmp_v128i8(<128 x i8> %a0, <128 x i8> %a1) nounwind { ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 14(%rdi) -; SSE2-NEXT: movdqa %xmm6, -{{[0-9]+}}(%rsp) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 12(%rdi) -; SSE2-NEXT: movdqa %xmm5, -{{[0-9]+}}(%rsp) -; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 10(%rdi) -; SSE2-NEXT: movdqa %xmm4, -{{[0-9]+}}(%rsp) -; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 8(%rdi) -; SSE2-NEXT: movdqa %xmm3, -{{[0-9]+}}(%rsp) +; SSE2-NEXT: movb %al, 14(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 6(%rdi) -; SSE2-NEXT: movdqa %xmm2, -{{[0-9]+}}(%rsp) +; SSE2-NEXT: movb %al, 14(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 4(%rdi) -; SSE2-NEXT: movdqa %xmm1, -{{[0-9]+}}(%rsp) +; SSE2-NEXT: movb %al, 14(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) -; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp) +; SSE2-NEXT: movb %al, 14(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 14(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 14(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 12(%rdi) +; SSE2-NEXT: movb %al, 14(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 10(%rdi) +; SSE2-NEXT: movb %al, 14(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 8(%rdi) +; SSE2-NEXT: movb %al, 14(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 6(%rdi) +; SSE2-NEXT: movb %al, 14(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 4(%rdi) +; SSE2-NEXT: movb %al, 14(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, 14(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 14(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al +; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %cl +; SSE2-NEXT: andb $1, %cl +; SSE2-NEXT: movb %cl, 14(%rdi) ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 14(%rdi) +; SSE2-NEXT: movdqa %xmm6, -{{[0-9]+}}(%rsp) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 12(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 10(%rdi) -; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 8(%rdi) -; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 6(%rdi) +; SSE2-NEXT: movb %al, 12(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 4(%rdi) +; SSE2-NEXT: movb %al, 12(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, 12(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 12(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 14(%rdi) +; SSE2-NEXT: movb %al, 12(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 12(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 10(%rdi) +; SSE2-NEXT: movb %al, 12(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 8(%rdi) +; SSE2-NEXT: movb %al, 12(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 6(%rdi) +; SSE2-NEXT: movb %al, 12(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 4(%rdi) +; SSE2-NEXT: movb %al, 12(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, 12(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 12(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 14(%rdi) +; SSE2-NEXT: movb %al, 12(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al +; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %cl +; SSE2-NEXT: andb $1, %cl +; SSE2-NEXT: movb %cl, 12(%rdi) ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 12(%rdi) +; SSE2-NEXT: movdqa %xmm5, -{{[0-9]+}}(%rsp) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 10(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 8(%rdi) -; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 6(%rdi) -; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 4(%rdi) +; SSE2-NEXT: movb %al, 10(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, 10(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 10(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 14(%rdi) +; SSE2-NEXT: movb %al, 10(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 12(%rdi) +; SSE2-NEXT: movb %al, 10(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 10(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 8(%rdi) +; SSE2-NEXT: movb %al, 10(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 6(%rdi) +; SSE2-NEXT: movb %al, 10(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 4(%rdi) +; SSE2-NEXT: movb %al, 10(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, 10(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 10(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 14(%rdi) +; SSE2-NEXT: movb %al, 10(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 12(%rdi) +; SSE2-NEXT: movb %al, 10(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al +; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %cl +; SSE2-NEXT: andb $1, %cl +; SSE2-NEXT: movb %cl, 10(%rdi) ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 10(%rdi) +; SSE2-NEXT: movdqa %xmm4, -{{[0-9]+}}(%rsp) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 8(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 6(%rdi) -; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 4(%rdi) -; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, 8(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 8(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 14(%rdi) +; SSE2-NEXT: movb %al, 8(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 12(%rdi) +; SSE2-NEXT: movb %al, 8(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 10(%rdi) +; SSE2-NEXT: movb %al, 8(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 8(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 6(%rdi) +; SSE2-NEXT: movb %al, 8(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 4(%rdi) +; SSE2-NEXT: movb %al, 8(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, 8(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 8(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 14(%rdi) +; SSE2-NEXT: movb %al, 8(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 12(%rdi) +; SSE2-NEXT: movb %al, 8(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 10(%rdi) +; SSE2-NEXT: movb %al, 8(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al +; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %cl +; SSE2-NEXT: andb $1, %cl +; SSE2-NEXT: movb %cl, 8(%rdi) ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 8(%rdi) +; SSE2-NEXT: movdqa %xmm3, -{{[0-9]+}}(%rsp) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 6(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 4(%rdi) +; SSE2-NEXT: movb %al, 6(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, 6(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 6(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 14(%rdi) +; SSE2-NEXT: movb %al, 6(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 12(%rdi) +; SSE2-NEXT: movb %al, 6(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 10(%rdi) +; SSE2-NEXT: movb %al, 6(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 8(%rdi) +; SSE2-NEXT: movb %al, 6(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 6(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 4(%rdi) +; SSE2-NEXT: movb %al, 6(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, 6(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 6(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 14(%rdi) +; SSE2-NEXT: movb %al, 6(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 12(%rdi) +; SSE2-NEXT: movb %al, 6(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al +; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %cl +; SSE2-NEXT: andb $1, %cl +; SSE2-NEXT: movb %cl, 6(%rdi) ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 10(%rdi) +; SSE2-NEXT: movb %al, 6(%rdi) +; SSE2-NEXT: movdqa %xmm2, -{{[0-9]+}}(%rsp) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 8(%rdi) +; SSE2-NEXT: movb %al, 4(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 6(%rdi) +; SSE2-NEXT: movb %al, 4(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 4(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, 4(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 4(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 14(%rdi) +; SSE2-NEXT: movb %al, 4(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 12(%rdi) +; SSE2-NEXT: movb %al, 4(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 10(%rdi) +; SSE2-NEXT: movb %al, 4(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 8(%rdi) +; SSE2-NEXT: movb %al, 4(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 6(%rdi) +; SSE2-NEXT: movb %al, 4(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 4(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, 4(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 4(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 14(%rdi) +; SSE2-NEXT: movb %al, 4(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al +; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %cl +; SSE2-NEXT: andb $1, %cl +; SSE2-NEXT: movb %cl, 4(%rdi) ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 12(%rdi) +; SSE2-NEXT: movb %al, 4(%rdi) +; SSE2-NEXT: movdqa %xmm1, -{{[0-9]+}}(%rsp) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 10(%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 8(%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 6(%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 4(%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 14(%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 12(%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 10(%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 8(%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 6(%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 4(%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al +; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %cl +; SSE2-NEXT: andb $1, %cl +; SSE2-NEXT: movb %cl, 2(%rdi) ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 14(%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 12(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 10(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 8(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 6(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 4(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 14(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 12(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 10(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 8(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 6(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 4(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al +; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %cl +; SSE2-NEXT: andb $1, %cl +; SSE2-NEXT: movb %cl, (%rdi) ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movq %rdi, %rax @@ -6438,98 +6438,98 @@ define <32 x i1> @test_cmp_v32f64(<32 x double> %a0, <32 x double> %a1) nounwind ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) -; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp) -; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) -; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al +; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %cl +; SSE2-NEXT: andb $1, %cl +; SSE2-NEXT: movb %cl, 2(%rdi) ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al +; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %cl +; SSE2-NEXT: andb $1, %cl +; SSE2-NEXT: movb %cl, (%rdi) ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movq %rdi, %rax @@ -7340,98 +7340,98 @@ define <32 x i1> @test_cmp_v32i64(<32 x i64> %a0, <32 x i64> %a1) nounwind { ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) -; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp) -; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) -; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al -; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al +; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %cl +; SSE2-NEXT: andb $1, %cl +; SSE2-NEXT: movb %cl, 2(%rdi) ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al ; SSE2-NEXT: andb $1, %al -; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al +; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %cl +; SSE2-NEXT: andb $1, %cl +; SSE2-NEXT: movb %cl, (%rdi) ; SSE2-NEXT: andb $1, %al ; SSE2-NEXT: movb %al, (%rdi) ; SSE2-NEXT: movq %rdi, %rax diff --git a/llvm/test/CodeGen/X86/vector-shuffle-variable-128.ll b/llvm/test/CodeGen/X86/vector-shuffle-variable-128.ll index d130e7ff00b..2b79142ee57 100644 --- a/llvm/test/CodeGen/X86/vector-shuffle-variable-128.ll +++ b/llvm/test/CodeGen/X86/vector-shuffle-variable-128.ll @@ -36,8 +36,8 @@ define <2 x i64> @var_shuffle_v2i64_v2i64_xx_i64(<2 x i64> %x, i32 %i0, i32 %i1) ; SSE-NEXT: movslq %edi, %rax ; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp) ; SSE-NEXT: movslq %esi, %rcx -; SSE-NEXT: movq {{.*#+}} xmm0 = mem[0],zero ; SSE-NEXT: movq {{.*#+}} xmm1 = mem[0],zero +; SSE-NEXT: movq {{.*#+}} xmm0 = mem[0],zero ; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] ; SSE-NEXT: retq ; @@ -48,7 +48,7 @@ define <2 x i64> @var_shuffle_v2i64_v2i64_xx_i64(<2 x i64> %x, i32 %i0, i32 %i1) ; AVX-NEXT: movslq %esi, %rcx ; AVX-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero ; AVX-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero -; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] ; AVX-NEXT: retq %x0 = extractelement <2 x i64> %x, i32 %i0 %x1 = extractelement <2 x i64> %x, i32 %i1 @@ -67,10 +67,10 @@ define <4 x float> @var_shuffle_v4f32_v4f32_xxxx_i32(<4 x float> %x, i32 %i0, i3 ; SSE2-NEXT: movslq %ecx, %rcx ; SSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; SSE2-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; SSE2-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] ; SSE2-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero +; SSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] -; SSE2-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero -; SSE2-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] ; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] ; SSE2-NEXT: retq ; @@ -83,10 +83,10 @@ define <4 x float> @var_shuffle_v4f32_v4f32_xxxx_i32(<4 x float> %x, i32 %i0, i3 ; SSSE3-NEXT: movslq %ecx, %rcx ; SSSE3-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; SSSE3-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; SSSE3-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] ; SSSE3-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero +; SSSE3-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; SSSE3-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] -; SSSE3-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero -; SSSE3-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] ; SSSE3-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] ; SSSE3-NEXT: retq ; @@ -136,10 +136,10 @@ define <4 x i32> @var_shuffle_v4i32_v4i32_xxxx_i32(<4 x i32> %x, i32 %i0, i32 %i ; SSE2-NEXT: movslq %ecx, %rcx ; SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero ; SSE2-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] ; SSE2-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero +; SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] -; SSE2-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero -; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] ; SSE2-NEXT: retq ; @@ -152,10 +152,10 @@ define <4 x i32> @var_shuffle_v4i32_v4i32_xxxx_i32(<4 x i32> %x, i32 %i0, i32 %i ; SSSE3-NEXT: movslq %ecx, %rcx ; SSSE3-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero ; SSSE3-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] ; SSSE3-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero +; SSSE3-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero ; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] -; SSSE3-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero -; SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] ; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] ; SSSE3-NEXT: retq ; @@ -204,38 +204,38 @@ define <8 x i16> @var_shuffle_v8i16_v8i16_xxxxxxxx_i16(<8 x i16> %x, i16 %i0, i1 ; SSE2-NEXT: # kill: %EDX<def> %EDX<kill> %RDX<def> ; SSE2-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def> ; SSE2-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def> -; SSE2-NEXT: movswq %di, %rax +; SSE2-NEXT: movswq %di, %r10 ; SSE2-NEXT: movswq %si, %rsi -; SSE2-NEXT: movswq %dx, %rdx -; SSE2-NEXT: movswq %cx, %r10 -; SSE2-NEXT: movswq %r8w, %r11 +; SSE2-NEXT: movswq %dx, %r11 +; SSE2-NEXT: movswq %cx, %rcx +; SSE2-NEXT: movswq %r8w, %r8 ; SSE2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp) -; SSE2-NEXT: movswq %r9w, %r8 -; SSE2-NEXT: movswq {{[0-9]+}}(%rsp), %rcx +; SSE2-NEXT: movswq %r9w, %rax +; SSE2-NEXT: movswq {{[0-9]+}}(%rsp), %rdx ; SSE2-NEXT: movswq {{[0-9]+}}(%rsp), %rdi -; SSE2-NEXT: movzwl -24(%rsp,%rcx,2), %ecx ; SSE2-NEXT: movzwl -24(%rsp,%rdi,2), %edi -; SSE2-NEXT: movzwl -24(%rsp,%rax,2), %eax -; SSE2-NEXT: movzwl -24(%rsp,%rsi,2), %esi -; SSE2-NEXT: movd %ecx, %xmm0 -; SSE2-NEXT: movzwl -24(%rsp,%rdx,2), %ecx +; SSE2-NEXT: movd %edi, %xmm0 +; SSE2-NEXT: movzwl -24(%rsp,%rcx,2), %ecx ; SSE2-NEXT: movd %ecx, %xmm1 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3] -; SSE2-NEXT: movzwl -24(%rsp,%r10,2), %ecx +; SSE2-NEXT: movzwl -24(%rsp,%rax,2), %eax ; SSE2-NEXT: movd %eax, %xmm0 -; SSE2-NEXT: movzwl -24(%rsp,%r11,2), %eax +; SSE2-NEXT: movzwl -24(%rsp,%rsi,2), %eax ; SSE2-NEXT: movd %eax, %xmm2 -; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3] -; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] -; SSE2-NEXT: movd %edi, %xmm1 -; SSE2-NEXT: movd %ecx, %xmm2 +; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3] ; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3] -; SSE2-NEXT: movd %esi, %xmm1 +; SSE2-NEXT: movzwl -24(%rsp,%rdx,2), %eax +; SSE2-NEXT: movd %eax, %xmm0 +; SSE2-NEXT: movzwl -24(%rsp,%r11,2), %eax +; SSE2-NEXT: movd %eax, %xmm1 +; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3] ; SSE2-NEXT: movzwl -24(%rsp,%r8,2), %eax ; SSE2-NEXT: movd %eax, %xmm3 -; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3] -; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3] +; SSE2-NEXT: movzwl -24(%rsp,%r10,2), %eax +; SSE2-NEXT: movd %eax, %xmm0 +; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3] ; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] +; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3] ; SSE2-NEXT: retq ; ; SSSE3-LABEL: var_shuffle_v8i16_v8i16_xxxxxxxx_i16: @@ -246,43 +246,42 @@ define <8 x i16> @var_shuffle_v8i16_v8i16_xxxxxxxx_i16(<8 x i16> %x, i16 %i0, i1 ; SSSE3-NEXT: # kill: %EDX<def> %EDX<kill> %RDX<def> ; SSSE3-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def> ; SSSE3-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def> -; SSSE3-NEXT: movswq %di, %rax +; SSSE3-NEXT: movswq %di, %r10 ; SSSE3-NEXT: movswq %si, %rsi -; SSSE3-NEXT: movswq %dx, %rdx -; SSSE3-NEXT: movswq %cx, %r10 -; SSSE3-NEXT: movswq %r8w, %r11 +; SSSE3-NEXT: movswq %dx, %r11 +; SSSE3-NEXT: movswq %cx, %rcx +; SSSE3-NEXT: movswq %r8w, %r8 ; SSSE3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp) -; SSSE3-NEXT: movswq %r9w, %r8 -; SSSE3-NEXT: movswq {{[0-9]+}}(%rsp), %rcx +; SSSE3-NEXT: movswq %r9w, %rax +; SSSE3-NEXT: movswq {{[0-9]+}}(%rsp), %rdx ; SSSE3-NEXT: movswq {{[0-9]+}}(%rsp), %rdi -; SSSE3-NEXT: movzwl -24(%rsp,%rcx,2), %ecx ; SSSE3-NEXT: movzwl -24(%rsp,%rdi,2), %edi -; SSSE3-NEXT: movzwl -24(%rsp,%rax,2), %eax -; SSSE3-NEXT: movzwl -24(%rsp,%rsi,2), %esi -; SSSE3-NEXT: movd %ecx, %xmm0 -; SSSE3-NEXT: movzwl -24(%rsp,%rdx,2), %ecx +; SSSE3-NEXT: movd %edi, %xmm0 +; SSSE3-NEXT: movzwl -24(%rsp,%rcx,2), %ecx ; SSSE3-NEXT: movd %ecx, %xmm1 ; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3] -; SSSE3-NEXT: movzwl -24(%rsp,%r10,2), %ecx +; SSSE3-NEXT: movzwl -24(%rsp,%rax,2), %eax ; SSSE3-NEXT: movd %eax, %xmm0 -; SSSE3-NEXT: movzwl -24(%rsp,%r11,2), %eax +; SSSE3-NEXT: movzwl -24(%rsp,%rsi,2), %eax ; SSSE3-NEXT: movd %eax, %xmm2 -; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3] -; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] -; SSSE3-NEXT: movd %edi, %xmm1 -; SSSE3-NEXT: movd %ecx, %xmm2 +; SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3] ; SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3] -; SSSE3-NEXT: movd %esi, %xmm1 +; SSSE3-NEXT: movzwl -24(%rsp,%rdx,2), %eax +; SSSE3-NEXT: movd %eax, %xmm0 +; SSSE3-NEXT: movzwl -24(%rsp,%r11,2), %eax +; SSSE3-NEXT: movd %eax, %xmm1 +; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3] ; SSSE3-NEXT: movzwl -24(%rsp,%r8,2), %eax ; SSSE3-NEXT: movd %eax, %xmm3 -; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3] -; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3] +; SSSE3-NEXT: movzwl -24(%rsp,%r10,2), %eax +; SSSE3-NEXT: movd %eax, %xmm0 +; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3] ; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] +; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3] ; SSSE3-NEXT: retq ; ; SSE41-LABEL: var_shuffle_v8i16_v8i16_xxxxxxxx_i16: ; SSE41: # BB#0: -; SSE41-NEXT: pushq %rbx ; SSE41-NEXT: # kill: %R9D<def> %R9D<kill> %R9<def> ; SSE41-NEXT: # kill: %R8D<def> %R8D<kill> %R8<def> ; SSE41-NEXT: # kill: %ECX<def> %ECX<kill> %RCX<def> @@ -290,26 +289,23 @@ define <8 x i16> @var_shuffle_v8i16_v8i16_xxxxxxxx_i16(<8 x i16> %x, i16 %i0, i1 ; SSE41-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def> ; SSE41-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def> ; SSE41-NEXT: movswq %di, %rax -; SSE41-NEXT: movswq %si, %rbx -; SSE41-NEXT: movswq %dx, %r11 +; SSE41-NEXT: movswq %si, %rsi +; SSE41-NEXT: movswq %dx, %rdx ; SSE41-NEXT: movswq %cx, %r10 ; SSE41-NEXT: movswq %r8w, %rdi ; SSE41-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp) ; SSE41-NEXT: movswq %r9w, %rcx -; SSE41-NEXT: movswq {{[0-9]+}}(%rsp), %rdx -; SSE41-NEXT: movswq {{[0-9]+}}(%rsp), %rsi -; SSE41-NEXT: movzwl -16(%rsp,%rdx,2), %edx -; SSE41-NEXT: movzwl -16(%rsp,%rsi,2), %esi -; SSE41-NEXT: movzwl -16(%rsp,%rax,2), %eax +; SSE41-NEXT: movswq {{[0-9]+}}(%rsp), %r8 +; SSE41-NEXT: movswq {{[0-9]+}}(%rsp), %r9 +; SSE41-NEXT: movzwl -24(%rsp,%rax,2), %eax ; SSE41-NEXT: movd %eax, %xmm0 -; SSE41-NEXT: pinsrw $1, -16(%rsp,%rbx,2), %xmm0 -; SSE41-NEXT: pinsrw $2, -16(%rsp,%r11,2), %xmm0 -; SSE41-NEXT: pinsrw $3, -16(%rsp,%r10,2), %xmm0 -; SSE41-NEXT: pinsrw $4, -16(%rsp,%rdi,2), %xmm0 -; SSE41-NEXT: pinsrw $5, -16(%rsp,%rcx,2), %xmm0 -; SSE41-NEXT: pinsrw $6, %edx, %xmm0 -; SSE41-NEXT: pinsrw $7, %esi, %xmm0 -; SSE41-NEXT: popq %rbx +; SSE41-NEXT: pinsrw $1, -24(%rsp,%rsi,2), %xmm0 +; SSE41-NEXT: pinsrw $2, -24(%rsp,%rdx,2), %xmm0 +; SSE41-NEXT: pinsrw $3, -24(%rsp,%r10,2), %xmm0 +; SSE41-NEXT: pinsrw $4, -24(%rsp,%rdi,2), %xmm0 +; SSE41-NEXT: pinsrw $5, -24(%rsp,%rcx,2), %xmm0 +; SSE41-NEXT: pinsrw $6, -24(%rsp,%r8,2), %xmm0 +; SSE41-NEXT: pinsrw $7, -24(%rsp,%r9,2), %xmm0 ; SSE41-NEXT: retq ; ; AVX-LABEL: var_shuffle_v8i16_v8i16_xxxxxxxx_i16: @@ -331,8 +327,6 @@ define <8 x i16> @var_shuffle_v8i16_v8i16_xxxxxxxx_i16(<8 x i16> %x, i16 %i0, i1 ; AVX-NEXT: movswq %r9w, %rax ; AVX-NEXT: movswq {{[0-9]+}}(%rsp), %rsi ; AVX-NEXT: movswq {{[0-9]+}}(%rsp), %rdx -; AVX-NEXT: movzwl -24(%rsp,%rsi,2), %esi -; AVX-NEXT: movzwl -24(%rsp,%rdx,2), %edx ; AVX-NEXT: movzwl -24(%rsp,%r10,2), %ebx ; AVX-NEXT: vmovd %ebx, %xmm0 ; AVX-NEXT: vpinsrw $1, -24(%rsp,%r11,2), %xmm0, %xmm0 @@ -340,8 +334,8 @@ define <8 x i16> @var_shuffle_v8i16_v8i16_xxxxxxxx_i16(<8 x i16> %x, i16 %i0, i1 ; AVX-NEXT: vpinsrw $3, -24(%rsp,%rcx,2), %xmm0, %xmm0 ; AVX-NEXT: vpinsrw $4, -24(%rsp,%rdi,2), %xmm0, %xmm0 ; AVX-NEXT: vpinsrw $5, -24(%rsp,%rax,2), %xmm0, %xmm0 -; AVX-NEXT: vpinsrw $6, %esi, %xmm0, %xmm0 -; AVX-NEXT: vpinsrw $7, %edx, %xmm0, %xmm0 +; AVX-NEXT: vpinsrw $6, -24(%rsp,%rsi,2), %xmm0, %xmm0 +; AVX-NEXT: vpinsrw $7, -24(%rsp,%rdx,2), %xmm0, %xmm0 ; AVX-NEXT: popq %rbx ; AVX-NEXT: popq %r14 ; AVX-NEXT: retq @@ -377,67 +371,67 @@ define <16 x i8> @var_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8(<16 x i8> %x, i8 % ; SSE2-NEXT: movsbq {{[0-9]+}}(%rsp), %r10 ; SSE2-NEXT: leaq -{{[0-9]+}}(%rsp), %r11 ; SSE2-NEXT: movzbl (%r10,%r11), %eax -; SSE2-NEXT: movd %eax, %xmm15 +; SSE2-NEXT: movd %eax, %xmm8 ; SSE2-NEXT: movsbq {{[0-9]+}}(%rsp), %rax ; SSE2-NEXT: movzbl (%rax,%r11), %eax -; SSE2-NEXT: movd %eax, %xmm8 +; SSE2-NEXT: movd %eax, %xmm15 ; SSE2-NEXT: movsbq {{[0-9]+}}(%rsp), %rax ; SSE2-NEXT: movzbl (%rax,%r11), %eax ; SSE2-NEXT: movd %eax, %xmm9 -; SSE2-NEXT: movsbq %dl, %rax +; SSE2-NEXT: movsbq %cl, %rax ; SSE2-NEXT: movzbl (%rax,%r11), %eax ; SSE2-NEXT: movd %eax, %xmm3 ; SSE2-NEXT: movsbq {{[0-9]+}}(%rsp), %rax ; SSE2-NEXT: movzbl (%rax,%r11), %eax ; SSE2-NEXT: movd %eax, %xmm10 -; SSE2-NEXT: movsbq %dil, %rax +; SSE2-NEXT: movsbq %r9b, %rax ; SSE2-NEXT: movzbl (%rax,%r11), %eax -; SSE2-NEXT: movd %eax, %xmm0 +; SSE2-NEXT: movd %eax, %xmm7 ; SSE2-NEXT: movsbq {{[0-9]+}}(%rsp), %rax ; SSE2-NEXT: movzbl (%rax,%r11), %eax ; SSE2-NEXT: movd %eax, %xmm11 -; SSE2-NEXT: movsbq %r8b, %rax +; SSE2-NEXT: movsbq %sil, %rax ; SSE2-NEXT: movzbl (%rax,%r11), %eax -; SSE2-NEXT: movd %eax, %xmm7 +; SSE2-NEXT: movd %eax, %xmm6 ; SSE2-NEXT: movsbq {{[0-9]+}}(%rsp), %rax ; SSE2-NEXT: movzbl (%rax,%r11), %eax -; SSE2-NEXT: movd %eax, %xmm2 +; SSE2-NEXT: movd %eax, %xmm12 ; SSE2-NEXT: movsbq {{[0-9]+}}(%rsp), %rax ; SSE2-NEXT: movzbl (%rax,%r11), %eax -; SSE2-NEXT: movd %eax, %xmm12 +; SSE2-NEXT: movd %eax, %xmm5 ; SSE2-NEXT: movsbq {{[0-9]+}}(%rsp), %rax ; SSE2-NEXT: movzbl (%rax,%r11), %eax ; SSE2-NEXT: movd %eax, %xmm13 -; SSE2-NEXT: movsbq %cl, %rax +; SSE2-NEXT: movsbq %dl, %rax ; SSE2-NEXT: movzbl (%rax,%r11), %eax -; SSE2-NEXT: movd %eax, %xmm6 +; SSE2-NEXT: movd %eax, %xmm4 ; SSE2-NEXT: movsbq {{[0-9]+}}(%rsp), %rax ; SSE2-NEXT: movzbl (%rax,%r11), %eax ; SSE2-NEXT: movd %eax, %xmm14 -; SSE2-NEXT: movsbq %sil, %rax +; SSE2-NEXT: movsbq %r8b, %rax ; SSE2-NEXT: movzbl (%rax,%r11), %eax -; SSE2-NEXT: movd %eax, %xmm5 +; SSE2-NEXT: movd %eax, %xmm1 ; SSE2-NEXT: movsbq {{[0-9]+}}(%rsp), %rax ; SSE2-NEXT: movzbl (%rax,%r11), %eax -; SSE2-NEXT: movd %eax, %xmm4 -; SSE2-NEXT: movsbq %r9b, %rax +; SSE2-NEXT: movd %eax, %xmm2 +; SSE2-NEXT: movsbq %dil, %rax ; SSE2-NEXT: movzbl (%rax,%r11), %eax -; SSE2-NEXT: movd %eax, %xmm1 +; SSE2-NEXT: movd %eax, %xmm0 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm15 = xmm15[0],xmm8[0],xmm15[1],xmm8[1],xmm15[2],xmm8[2],xmm15[3],xmm8[3],xmm15[4],xmm8[4],xmm15[5],xmm8[5],xmm15[6],xmm8[6],xmm15[7],xmm8[7] ; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm9[0],xmm3[1],xmm9[1],xmm3[2],xmm9[2],xmm3[3],xmm9[3],xmm3[4],xmm9[4],xmm3[5],xmm9[5],xmm3[6],xmm9[6],xmm3[7],xmm9[7] ; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm15[0],xmm3[1],xmm15[1],xmm3[2],xmm15[2],xmm3[3],xmm15[3],xmm3[4],xmm15[4],xmm3[5],xmm15[5],xmm3[6],xmm15[6],xmm3[7],xmm15[7] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm10[0],xmm0[1],xmm10[1],xmm0[2],xmm10[2],xmm0[3],xmm10[3],xmm0[4],xmm10[4],xmm0[5],xmm10[5],xmm0[6],xmm10[6],xmm0[7],xmm10[7] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm11[0],xmm7[1],xmm11[1],xmm7[2],xmm11[2],xmm7[3],xmm11[3],xmm7[4],xmm11[4],xmm7[5],xmm11[5],xmm7[6],xmm11[6],xmm7[7],xmm11[7] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm7[0],xmm0[1],xmm7[1],xmm0[2],xmm7[2],xmm0[3],xmm7[3],xmm0[4],xmm7[4],xmm0[5],xmm7[5],xmm0[6],xmm7[6],xmm0[7],xmm7[7] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm12[0],xmm2[1],xmm12[1],xmm2[2],xmm12[2],xmm2[3],xmm12[3],xmm2[4],xmm12[4],xmm2[5],xmm12[5],xmm2[6],xmm12[6],xmm2[7],xmm12[7] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm13[0],xmm6[1],xmm13[1],xmm6[2],xmm13[2],xmm6[3],xmm13[3],xmm6[4],xmm13[4],xmm6[5],xmm13[5],xmm6[6],xmm13[6],xmm6[7],xmm13[7] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm2[0],xmm6[1],xmm2[1],xmm6[2],xmm2[2],xmm6[3],xmm2[3],xmm6[4],xmm2[4],xmm6[5],xmm2[5],xmm6[6],xmm2[6],xmm6[7],xmm2[7] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm14[0],xmm5[1],xmm14[1],xmm5[2],xmm14[2],xmm5[3],xmm14[3],xmm5[4],xmm14[4],xmm5[5],xmm14[5],xmm5[6],xmm14[6],xmm5[7],xmm14[7] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm1[0],xmm5[1],xmm1[1],xmm5[2],xmm1[2],xmm5[3],xmm1[3],xmm5[4],xmm1[4],xmm5[5],xmm1[5],xmm5[6],xmm1[6],xmm5[7],xmm1[7] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1],xmm5[2],xmm6[2],xmm5[3],xmm6[3],xmm5[4],xmm6[4],xmm5[5],xmm6[5],xmm5[6],xmm6[6],xmm5[7],xmm6[7] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1],xmm0[2],xmm5[2],xmm0[3],xmm5[3],xmm0[4],xmm5[4],xmm0[5],xmm5[5],xmm0[6],xmm5[6],xmm0[7],xmm5[7] +; SSE2-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm10[0],xmm7[1],xmm10[1],xmm7[2],xmm10[2],xmm7[3],xmm10[3],xmm7[4],xmm10[4],xmm7[5],xmm10[5],xmm7[6],xmm10[6],xmm7[7],xmm10[7] +; SSE2-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm11[0],xmm6[1],xmm11[1],xmm6[2],xmm11[2],xmm6[3],xmm11[3],xmm6[4],xmm11[4],xmm6[5],xmm11[5],xmm6[6],xmm11[6],xmm6[7],xmm11[7] +; SSE2-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3],xmm6[4],xmm7[4],xmm6[5],xmm7[5],xmm6[6],xmm7[6],xmm6[7],xmm7[7] +; SSE2-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm3[0],xmm6[1],xmm3[1],xmm6[2],xmm3[2],xmm6[3],xmm3[3],xmm6[4],xmm3[4],xmm6[5],xmm3[5],xmm6[6],xmm3[6],xmm6[7],xmm3[7] +; SSE2-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm12[0],xmm5[1],xmm12[1],xmm5[2],xmm12[2],xmm5[3],xmm12[3],xmm5[4],xmm12[4],xmm5[5],xmm12[5],xmm5[6],xmm12[6],xmm5[7],xmm12[7] +; SSE2-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm13[0],xmm4[1],xmm13[1],xmm4[2],xmm13[2],xmm4[3],xmm13[3],xmm4[4],xmm13[4],xmm4[5],xmm13[5],xmm4[6],xmm13[6],xmm4[7],xmm13[7] +; SSE2-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3],xmm4[4],xmm5[4],xmm4[5],xmm5[5],xmm4[6],xmm5[6],xmm4[7],xmm5[7] +; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm14[0],xmm1[1],xmm14[1],xmm1[2],xmm14[2],xmm1[3],xmm14[3],xmm1[4],xmm14[4],xmm1[5],xmm14[5],xmm1[6],xmm14[6],xmm1[7],xmm14[7] +; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7] +; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] +; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7] +; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1],xmm0[2],xmm6[2],xmm0[3],xmm6[3],xmm0[4],xmm6[4],xmm0[5],xmm6[5],xmm0[6],xmm6[6],xmm0[7],xmm6[7] ; SSE2-NEXT: retq ; ; SSSE3-LABEL: var_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8: @@ -452,201 +446,157 @@ define <16 x i8> @var_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8(<16 x i8> %x, i8 % ; SSSE3-NEXT: movsbq {{[0-9]+}}(%rsp), %r10 ; SSSE3-NEXT: leaq -{{[0-9]+}}(%rsp), %r11 ; SSSE3-NEXT: movzbl (%r10,%r11), %eax -; SSSE3-NEXT: movd %eax, %xmm15 +; SSSE3-NEXT: movd %eax, %xmm8 ; SSSE3-NEXT: movsbq {{[0-9]+}}(%rsp), %rax ; SSSE3-NEXT: movzbl (%rax,%r11), %eax -; SSSE3-NEXT: movd %eax, %xmm8 +; SSSE3-NEXT: movd %eax, %xmm15 ; SSSE3-NEXT: movsbq {{[0-9]+}}(%rsp), %rax ; SSSE3-NEXT: movzbl (%rax,%r11), %eax ; SSSE3-NEXT: movd %eax, %xmm9 -; SSSE3-NEXT: movsbq %dl, %rax +; SSSE3-NEXT: movsbq %cl, %rax ; SSSE3-NEXT: movzbl (%rax,%r11), %eax ; SSSE3-NEXT: movd %eax, %xmm3 ; SSSE3-NEXT: movsbq {{[0-9]+}}(%rsp), %rax ; SSSE3-NEXT: movzbl (%rax,%r11), %eax ; SSSE3-NEXT: movd %eax, %xmm10 -; SSSE3-NEXT: movsbq %dil, %rax +; SSSE3-NEXT: movsbq %r9b, %rax ; SSSE3-NEXT: movzbl (%rax,%r11), %eax -; SSSE3-NEXT: movd %eax, %xmm0 +; SSSE3-NEXT: movd %eax, %xmm7 ; SSSE3-NEXT: movsbq {{[0-9]+}}(%rsp), %rax ; SSSE3-NEXT: movzbl (%rax,%r11), %eax ; SSSE3-NEXT: movd %eax, %xmm11 -; SSSE3-NEXT: movsbq %r8b, %rax +; SSSE3-NEXT: movsbq %sil, %rax ; SSSE3-NEXT: movzbl (%rax,%r11), %eax -; SSSE3-NEXT: movd %eax, %xmm7 +; SSSE3-NEXT: movd %eax, %xmm6 ; SSSE3-NEXT: movsbq {{[0-9]+}}(%rsp), %rax ; SSSE3-NEXT: movzbl (%rax,%r11), %eax -; SSSE3-NEXT: movd %eax, %xmm2 +; SSSE3-NEXT: movd %eax, %xmm12 ; SSSE3-NEXT: movsbq {{[0-9]+}}(%rsp), %rax ; SSSE3-NEXT: movzbl (%rax,%r11), %eax -; SSSE3-NEXT: movd %eax, %xmm12 +; SSSE3-NEXT: movd %eax, %xmm5 ; SSSE3-NEXT: movsbq {{[0-9]+}}(%rsp), %rax ; SSSE3-NEXT: movzbl (%rax,%r11), %eax ; SSSE3-NEXT: movd %eax, %xmm13 -; SSSE3-NEXT: movsbq %cl, %rax +; SSSE3-NEXT: movsbq %dl, %rax ; SSSE3-NEXT: movzbl (%rax,%r11), %eax -; SSSE3-NEXT: movd %eax, %xmm6 +; SSSE3-NEXT: movd %eax, %xmm4 ; SSSE3-NEXT: movsbq {{[0-9]+}}(%rsp), %rax ; SSSE3-NEXT: movzbl (%rax,%r11), %eax ; SSSE3-NEXT: movd %eax, %xmm14 -; SSSE3-NEXT: movsbq %sil, %rax +; SSSE3-NEXT: movsbq %r8b, %rax ; SSSE3-NEXT: movzbl (%rax,%r11), %eax -; SSSE3-NEXT: movd %eax, %xmm5 +; SSSE3-NEXT: movd %eax, %xmm1 ; SSSE3-NEXT: movsbq {{[0-9]+}}(%rsp), %rax ; SSSE3-NEXT: movzbl (%rax,%r11), %eax -; SSSE3-NEXT: movd %eax, %xmm4 -; SSSE3-NEXT: movsbq %r9b, %rax +; SSSE3-NEXT: movd %eax, %xmm2 +; SSSE3-NEXT: movsbq %dil, %rax ; SSSE3-NEXT: movzbl (%rax,%r11), %eax -; SSSE3-NEXT: movd %eax, %xmm1 +; SSSE3-NEXT: movd %eax, %xmm0 ; SSSE3-NEXT: punpcklbw {{.*#+}} xmm15 = xmm15[0],xmm8[0],xmm15[1],xmm8[1],xmm15[2],xmm8[2],xmm15[3],xmm8[3],xmm15[4],xmm8[4],xmm15[5],xmm8[5],xmm15[6],xmm8[6],xmm15[7],xmm8[7] ; SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm9[0],xmm3[1],xmm9[1],xmm3[2],xmm9[2],xmm3[3],xmm9[3],xmm3[4],xmm9[4],xmm3[5],xmm9[5],xmm3[6],xmm9[6],xmm3[7],xmm9[7] ; SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm15[0],xmm3[1],xmm15[1],xmm3[2],xmm15[2],xmm3[3],xmm15[3],xmm3[4],xmm15[4],xmm3[5],xmm15[5],xmm3[6],xmm15[6],xmm3[7],xmm15[7] -; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm10[0],xmm0[1],xmm10[1],xmm0[2],xmm10[2],xmm0[3],xmm10[3],xmm0[4],xmm10[4],xmm0[5],xmm10[5],xmm0[6],xmm10[6],xmm0[7],xmm10[7] -; SSSE3-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm11[0],xmm7[1],xmm11[1],xmm7[2],xmm11[2],xmm7[3],xmm11[3],xmm7[4],xmm11[4],xmm7[5],xmm11[5],xmm7[6],xmm11[6],xmm7[7],xmm11[7] -; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm7[0],xmm0[1],xmm7[1],xmm0[2],xmm7[2],xmm0[3],xmm7[3],xmm0[4],xmm7[4],xmm0[5],xmm7[5],xmm0[6],xmm7[6],xmm0[7],xmm7[7] -; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7] -; SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm12[0],xmm2[1],xmm12[1],xmm2[2],xmm12[2],xmm2[3],xmm12[3],xmm2[4],xmm12[4],xmm2[5],xmm12[5],xmm2[6],xmm12[6],xmm2[7],xmm12[7] -; SSSE3-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm13[0],xmm6[1],xmm13[1],xmm6[2],xmm13[2],xmm6[3],xmm13[3],xmm6[4],xmm13[4],xmm6[5],xmm13[5],xmm6[6],xmm13[6],xmm6[7],xmm13[7] -; SSSE3-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm2[0],xmm6[1],xmm2[1],xmm6[2],xmm2[2],xmm6[3],xmm2[3],xmm6[4],xmm2[4],xmm6[5],xmm2[5],xmm6[6],xmm2[6],xmm6[7],xmm2[7] -; SSSE3-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm14[0],xmm5[1],xmm14[1],xmm5[2],xmm14[2],xmm5[3],xmm14[3],xmm5[4],xmm14[4],xmm5[5],xmm14[5],xmm5[6],xmm14[6],xmm5[7],xmm14[7] -; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7] -; SSSE3-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm1[0],xmm5[1],xmm1[1],xmm5[2],xmm1[2],xmm5[3],xmm1[3],xmm5[4],xmm1[4],xmm5[5],xmm1[5],xmm5[6],xmm1[6],xmm5[7],xmm1[7] -; SSSE3-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1],xmm5[2],xmm6[2],xmm5[3],xmm6[3],xmm5[4],xmm6[4],xmm5[5],xmm6[5],xmm5[6],xmm6[6],xmm5[7],xmm6[7] -; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1],xmm0[2],xmm5[2],xmm0[3],xmm5[3],xmm0[4],xmm5[4],xmm0[5],xmm5[5],xmm0[6],xmm5[6],xmm0[7],xmm5[7] +; SSSE3-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm10[0],xmm7[1],xmm10[1],xmm7[2],xmm10[2],xmm7[3],xmm10[3],xmm7[4],xmm10[4],xmm7[5],xmm10[5],xmm7[6],xmm10[6],xmm7[7],xmm10[7] +; SSSE3-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm11[0],xmm6[1],xmm11[1],xmm6[2],xmm11[2],xmm6[3],xmm11[3],xmm6[4],xmm11[4],xmm6[5],xmm11[5],xmm6[6],xmm11[6],xmm6[7],xmm11[7] +; SSSE3-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3],xmm6[4],xmm7[4],xmm6[5],xmm7[5],xmm6[6],xmm7[6],xmm6[7],xmm7[7] +; SSSE3-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm3[0],xmm6[1],xmm3[1],xmm6[2],xmm3[2],xmm6[3],xmm3[3],xmm6[4],xmm3[4],xmm6[5],xmm3[5],xmm6[6],xmm3[6],xmm6[7],xmm3[7] +; SSSE3-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm12[0],xmm5[1],xmm12[1],xmm5[2],xmm12[2],xmm5[3],xmm12[3],xmm5[4],xmm12[4],xmm5[5],xmm12[5],xmm5[6],xmm12[6],xmm5[7],xmm12[7] +; SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm13[0],xmm4[1],xmm13[1],xmm4[2],xmm13[2],xmm4[3],xmm13[3],xmm4[4],xmm13[4],xmm4[5],xmm13[5],xmm4[6],xmm13[6],xmm4[7],xmm13[7] +; SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3],xmm4[4],xmm5[4],xmm4[5],xmm5[5],xmm4[6],xmm5[6],xmm4[7],xmm5[7] +; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm14[0],xmm1[1],xmm14[1],xmm1[2],xmm14[2],xmm1[3],xmm14[3],xmm1[4],xmm14[4],xmm1[5],xmm14[5],xmm1[6],xmm14[6],xmm1[7],xmm14[7] +; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7] +; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] +; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7] +; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1],xmm0[2],xmm6[2],xmm0[3],xmm6[3],xmm0[4],xmm6[4],xmm0[5],xmm6[5],xmm0[6],xmm6[6],xmm0[7],xmm6[7] ; SSSE3-NEXT: retq ; ; SSE41-LABEL: var_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8: ; SSE41: # BB#0: -; SSE41-NEXT: pushq %rbp -; SSE41-NEXT: pushq %r15 -; SSE41-NEXT: pushq %r14 -; SSE41-NEXT: pushq %r13 -; SSE41-NEXT: pushq %r12 -; SSE41-NEXT: pushq %rbx ; SSE41-NEXT: # kill: %R9D<def> %R9D<kill> %R9<def> ; SSE41-NEXT: # kill: %R8D<def> %R8D<kill> %R8<def> ; SSE41-NEXT: # kill: %ECX<def> %ECX<kill> %RCX<def> ; SSE41-NEXT: # kill: %EDX<def> %EDX<kill> %RDX<def> ; SSE41-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def> ; SSE41-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def> -; SSE41-NEXT: movsbq %dil, %r15 -; SSE41-NEXT: movsbq %sil, %r14 -; SSE41-NEXT: movsbq %dl, %r11 -; SSE41-NEXT: movsbq %cl, %r10 -; SSE41-NEXT: movsbq %r8b, %r8 +; SSE41-NEXT: movsbq %dil, %rdi ; SSE41-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp) -; SSE41-NEXT: movsbq %r9b, %r9 -; SSE41-NEXT: movsbq {{[0-9]+}}(%rsp), %r12 -; SSE41-NEXT: movsbq {{[0-9]+}}(%rsp), %r13 -; SSE41-NEXT: movsbq {{[0-9]+}}(%rsp), %rbp -; SSE41-NEXT: movsbq {{[0-9]+}}(%rsp), %rbx ; SSE41-NEXT: leaq -{{[0-9]+}}(%rsp), %rax -; SSE41-NEXT: movzbl (%r15,%rax), %ecx -; SSE41-NEXT: movd %ecx, %xmm0 -; SSE41-NEXT: movsbq {{[0-9]+}}(%rsp), %r15 -; SSE41-NEXT: pinsrb $1, (%r14,%rax), %xmm0 -; SSE41-NEXT: movsbq {{[0-9]+}}(%rsp), %r14 -; SSE41-NEXT: pinsrb $2, (%r11,%rax), %xmm0 -; SSE41-NEXT: movsbq {{[0-9]+}}(%rsp), %r11 -; SSE41-NEXT: pinsrb $3, (%r10,%rax), %xmm0 -; SSE41-NEXT: movsbq {{[0-9]+}}(%rsp), %r10 -; SSE41-NEXT: pinsrb $4, (%r8,%rax), %xmm0 +; SSE41-NEXT: movzbl (%rdi,%rax), %edi +; SSE41-NEXT: movd %edi, %xmm0 +; SSE41-NEXT: movsbq %sil, %rsi +; SSE41-NEXT: pinsrb $1, (%rsi,%rax), %xmm0 +; SSE41-NEXT: movsbq %dl, %rdx +; SSE41-NEXT: pinsrb $2, (%rdx,%rax), %xmm0 +; SSE41-NEXT: movsbq %cl, %rcx +; SSE41-NEXT: pinsrb $3, (%rcx,%rax), %xmm0 +; SSE41-NEXT: movsbq %r8b, %rcx +; SSE41-NEXT: pinsrb $4, (%rcx,%rax), %xmm0 +; SSE41-NEXT: movsbq %r9b, %rcx +; SSE41-NEXT: pinsrb $5, (%rcx,%rax), %xmm0 ; SSE41-NEXT: movsbq {{[0-9]+}}(%rsp), %rcx -; SSE41-NEXT: pinsrb $5, (%r9,%rax), %xmm0 -; SSE41-NEXT: movsbq {{[0-9]+}}(%rsp), %rdx -; SSE41-NEXT: movzbl (%r12,%rax), %esi -; SSE41-NEXT: movzbl (%r13,%rax), %edi -; SSE41-NEXT: movzbl (%rbp,%rax), %ebp -; SSE41-NEXT: movzbl (%rbx,%rax), %ebx -; SSE41-NEXT: movzbl (%r15,%rax), %r8d -; SSE41-NEXT: movzbl (%r14,%rax), %r9d -; SSE41-NEXT: movzbl (%r11,%rax), %r11d -; SSE41-NEXT: movzbl (%r10,%rax), %r10d -; SSE41-NEXT: movzbl (%rcx,%rax), %ecx -; SSE41-NEXT: movzbl (%rdx,%rax), %eax -; SSE41-NEXT: pinsrb $6, %esi, %xmm0 -; SSE41-NEXT: pinsrb $7, %edi, %xmm0 -; SSE41-NEXT: pinsrb $8, %ebp, %xmm0 -; SSE41-NEXT: pinsrb $9, %ebx, %xmm0 -; SSE41-NEXT: pinsrb $10, %r8d, %xmm0 -; SSE41-NEXT: pinsrb $11, %r9d, %xmm0 -; SSE41-NEXT: pinsrb $12, %r11d, %xmm0 -; SSE41-NEXT: pinsrb $13, %r10d, %xmm0 -; SSE41-NEXT: pinsrb $14, %ecx, %xmm0 -; SSE41-NEXT: pinsrb $15, %eax, %xmm0 -; SSE41-NEXT: popq %rbx -; SSE41-NEXT: popq %r12 -; SSE41-NEXT: popq %r13 -; SSE41-NEXT: popq %r14 -; SSE41-NEXT: popq %r15 -; SSE41-NEXT: popq %rbp +; SSE41-NEXT: pinsrb $6, (%rcx,%rax), %xmm0 +; SSE41-NEXT: movsbq {{[0-9]+}}(%rsp), %rcx +; SSE41-NEXT: pinsrb $7, (%rcx,%rax), %xmm0 +; SSE41-NEXT: movsbq {{[0-9]+}}(%rsp), %rcx +; SSE41-NEXT: pinsrb $8, (%rcx,%rax), %xmm0 +; SSE41-NEXT: movsbq {{[0-9]+}}(%rsp), %rcx +; SSE41-NEXT: pinsrb $9, (%rcx,%rax), %xmm0 +; SSE41-NEXT: movsbq {{[0-9]+}}(%rsp), %rcx +; SSE41-NEXT: pinsrb $10, (%rcx,%rax), %xmm0 +; SSE41-NEXT: movsbq {{[0-9]+}}(%rsp), %rcx +; SSE41-NEXT: pinsrb $11, (%rcx,%rax), %xmm0 +; SSE41-NEXT: movsbq {{[0-9]+}}(%rsp), %rcx +; SSE41-NEXT: pinsrb $12, (%rcx,%rax), %xmm0 +; SSE41-NEXT: movsbq {{[0-9]+}}(%rsp), %rcx +; SSE41-NEXT: pinsrb $13, (%rcx,%rax), %xmm0 +; SSE41-NEXT: movsbq {{[0-9]+}}(%rsp), %rcx +; SSE41-NEXT: pinsrb $14, (%rcx,%rax), %xmm0 +; SSE41-NEXT: movsbq {{[0-9]+}}(%rsp), %rcx +; SSE41-NEXT: pinsrb $15, (%rcx,%rax), %xmm0 ; SSE41-NEXT: retq ; ; AVX-LABEL: var_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8: ; AVX: # BB#0: -; AVX-NEXT: pushq %rbp -; AVX-NEXT: pushq %r15 -; AVX-NEXT: pushq %r14 -; AVX-NEXT: pushq %r13 -; AVX-NEXT: pushq %r12 -; AVX-NEXT: pushq %rbx ; AVX-NEXT: # kill: %R9D<def> %R9D<kill> %R9<def> ; AVX-NEXT: # kill: %R8D<def> %R8D<kill> %R8<def> ; AVX-NEXT: # kill: %ECX<def> %ECX<kill> %RCX<def> ; AVX-NEXT: # kill: %EDX<def> %EDX<kill> %RDX<def> ; AVX-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def> ; AVX-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def> -; AVX-NEXT: movsbq %dil, %r10 -; AVX-NEXT: movsbq %sil, %r11 -; AVX-NEXT: movsbq %dl, %r14 -; AVX-NEXT: movsbq %cl, %r15 -; AVX-NEXT: movsbq %r8b, %r8 +; AVX-NEXT: movsbq %dil, %rax ; AVX-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp) -; AVX-NEXT: movsbq %r9b, %r9 -; AVX-NEXT: movsbq {{[0-9]+}}(%rsp), %r12 -; AVX-NEXT: movsbq {{[0-9]+}}(%rsp), %r13 -; AVX-NEXT: movsbq {{[0-9]+}}(%rsp), %rbp -; AVX-NEXT: movsbq {{[0-9]+}}(%rsp), %rcx ; AVX-NEXT: leaq -{{[0-9]+}}(%rsp), %rdi -; AVX-NEXT: movzbl (%r10,%rdi), %eax +; AVX-NEXT: movzbl (%rax,%rdi), %eax ; AVX-NEXT: vmovd %eax, %xmm0 -; AVX-NEXT: movsbq {{[0-9]+}}(%rsp), %r10 -; AVX-NEXT: vpinsrb $1, (%r11,%rdi), %xmm0, %xmm0 -; AVX-NEXT: movsbq {{[0-9]+}}(%rsp), %r11 -; AVX-NEXT: vpinsrb $2, (%r14,%rdi), %xmm0, %xmm0 -; AVX-NEXT: movsbq {{[0-9]+}}(%rsp), %r14 -; AVX-NEXT: vpinsrb $3, (%r15,%rdi), %xmm0, %xmm0 -; AVX-NEXT: movsbq {{[0-9]+}}(%rsp), %r15 -; AVX-NEXT: vpinsrb $4, (%r8,%rdi), %xmm0, %xmm0 -; AVX-NEXT: movsbq {{[0-9]+}}(%rsp), %r8 -; AVX-NEXT: vpinsrb $5, (%r9,%rdi), %xmm0, %xmm0 -; AVX-NEXT: movsbq {{[0-9]+}}(%rsp), %rsi -; AVX-NEXT: movzbl (%r12,%rdi), %edx -; AVX-NEXT: movzbl (%r13,%rdi), %ebx -; AVX-NEXT: movzbl (%rbp,%rdi), %ebp -; AVX-NEXT: movzbl (%rcx,%rdi), %ecx -; AVX-NEXT: movzbl (%r10,%rdi), %eax -; AVX-NEXT: movzbl (%r11,%rdi), %r9d -; AVX-NEXT: movzbl (%r14,%rdi), %r10d -; AVX-NEXT: movzbl (%r15,%rdi), %r11d -; AVX-NEXT: movzbl (%r8,%rdi), %r8d -; AVX-NEXT: movzbl (%rsi,%rdi), %esi -; AVX-NEXT: vpinsrb $6, %edx, %xmm0, %xmm0 -; AVX-NEXT: vpinsrb $7, %ebx, %xmm0, %xmm0 -; AVX-NEXT: vpinsrb $8, %ebp, %xmm0, %xmm0 -; AVX-NEXT: vpinsrb $9, %ecx, %xmm0, %xmm0 -; AVX-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0 -; AVX-NEXT: vpinsrb $11, %r9d, %xmm0, %xmm0 -; AVX-NEXT: vpinsrb $12, %r10d, %xmm0, %xmm0 -; AVX-NEXT: vpinsrb $13, %r11d, %xmm0, %xmm0 -; AVX-NEXT: vpinsrb $14, %r8d, %xmm0, %xmm0 -; AVX-NEXT: vpinsrb $15, %esi, %xmm0, %xmm0 -; AVX-NEXT: popq %rbx -; AVX-NEXT: popq %r12 -; AVX-NEXT: popq %r13 -; AVX-NEXT: popq %r14 -; AVX-NEXT: popq %r15 -; AVX-NEXT: popq %rbp +; AVX-NEXT: movsbq %sil, %rax +; AVX-NEXT: vpinsrb $1, (%rax,%rdi), %xmm0, %xmm0 +; AVX-NEXT: movsbq %dl, %rax +; AVX-NEXT: vpinsrb $2, (%rax,%rdi), %xmm0, %xmm0 +; AVX-NEXT: movsbq %cl, %rax +; AVX-NEXT: vpinsrb $3, (%rax,%rdi), %xmm0, %xmm0 +; AVX-NEXT: movsbq %r8b, %rax +; AVX-NEXT: vpinsrb $4, (%rax,%rdi), %xmm0, %xmm0 +; AVX-NEXT: movsbq %r9b, %rax +; AVX-NEXT: vpinsrb $5, (%rax,%rdi), %xmm0, %xmm0 +; AVX-NEXT: movsbq {{[0-9]+}}(%rsp), %rax +; AVX-NEXT: vpinsrb $6, (%rax,%rdi), %xmm0, %xmm0 +; AVX-NEXT: movsbq {{[0-9]+}}(%rsp), %rax +; AVX-NEXT: vpinsrb $7, (%rax,%rdi), %xmm0, %xmm0 +; AVX-NEXT: movsbq {{[0-9]+}}(%rsp), %rax +; AVX-NEXT: vpinsrb $8, (%rax,%rdi), %xmm0, %xmm0 +; AVX-NEXT: movsbq {{[0-9]+}}(%rsp), %rax +; AVX-NEXT: vpinsrb $9, (%rax,%rdi), %xmm0, %xmm0 +; AVX-NEXT: movsbq {{[0-9]+}}(%rsp), %rax +; AVX-NEXT: vpinsrb $10, (%rax,%rdi), %xmm0, %xmm0 +; AVX-NEXT: movsbq {{[0-9]+}}(%rsp), %rax +; AVX-NEXT: vpinsrb $11, (%rax,%rdi), %xmm0, %xmm0 +; AVX-NEXT: movsbq {{[0-9]+}}(%rsp), %rax +; AVX-NEXT: vpinsrb $12, (%rax,%rdi), %xmm0, %xmm0 +; AVX-NEXT: movsbq {{[0-9]+}}(%rsp), %rax +; AVX-NEXT: vpinsrb $13, (%rax,%rdi), %xmm0, %xmm0 +; AVX-NEXT: movsbq {{[0-9]+}}(%rsp), %rax +; AVX-NEXT: vpinsrb $14, (%rax,%rdi), %xmm0, %xmm0 +; AVX-NEXT: movsbq {{[0-9]+}}(%rsp), %rax +; AVX-NEXT: vpinsrb $15, (%rax,%rdi), %xmm0, %xmm0 ; AVX-NEXT: retq %x0 = extractelement <16 x i8> %x, i8 %i0 %x1 = extractelement <16 x i8> %x, i8 %i1 @@ -697,11 +647,11 @@ define <4 x i32> @mem_shuffle_v4i32_v4i32_xxxx_i32(<4 x i32> %x, i32* %i) nounwi ; SSE2-NEXT: movslq 12(%rdi), %rsi ; SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero ; SSE2-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] ; SSE2-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero +; SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] -; SSE2-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero -; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] -; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] +; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] ; SSE2-NEXT: retq ; ; SSSE3-LABEL: mem_shuffle_v4i32_v4i32_xxxx_i32: @@ -713,11 +663,11 @@ define <4 x i32> @mem_shuffle_v4i32_v4i32_xxxx_i32(<4 x i32> %x, i32* %i) nounwi ; SSSE3-NEXT: movslq 12(%rdi), %rsi ; SSSE3-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero ; SSSE3-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] ; SSSE3-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero +; SSSE3-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero ; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] -; SSSE3-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero -; SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] -; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] +; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] ; SSSE3-NEXT: retq ; ; SSE41-LABEL: mem_shuffle_v4i32_v4i32_xxxx_i32: @@ -767,270 +717,218 @@ define <4 x i32> @mem_shuffle_v4i32_v4i32_xxxx_i32(<4 x i32> %x, i32* %i) nounwi define <16 x i8> @mem_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8(<16 x i8> %x, i8* %i) nounwind { ; SSE2-LABEL: mem_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8: ; SSE2: # BB#0: -; SSE2-NEXT: movsbq (%rdi), %rcx +; SSE2-NEXT: movsbq (%rdi), %rax ; SSE2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp) -; SSE2-NEXT: leaq -{{[0-9]+}}(%rsp), %rax -; SSE2-NEXT: movzbl (%rcx,%rax), %ecx -; SSE2-NEXT: movd %ecx, %xmm0 -; SSE2-NEXT: movsbq 8(%rdi), %rcx -; SSE2-NEXT: movzbl (%rcx,%rax), %ecx -; SSE2-NEXT: movd %ecx, %xmm8 -; SSE2-NEXT: movsbq 12(%rdi), %rcx -; SSE2-NEXT: movzbl (%rcx,%rax), %ecx -; SSE2-NEXT: movd %ecx, %xmm9 -; SSE2-NEXT: movsbq 4(%rdi), %rcx -; SSE2-NEXT: movzbl (%rcx,%rax), %ecx -; SSE2-NEXT: movd %ecx, %xmm3 -; SSE2-NEXT: movsbq 14(%rdi), %rcx -; SSE2-NEXT: movzbl (%rcx,%rax), %ecx -; SSE2-NEXT: movd %ecx, %xmm10 -; SSE2-NEXT: movsbq 6(%rdi), %rcx -; SSE2-NEXT: movzbl (%rcx,%rax), %ecx -; SSE2-NEXT: movd %ecx, %xmm5 -; SSE2-NEXT: movsbq 10(%rdi), %rcx -; SSE2-NEXT: movzbl (%rcx,%rax), %ecx -; SSE2-NEXT: movd %ecx, %xmm11 -; SSE2-NEXT: movsbq 2(%rdi), %rcx -; SSE2-NEXT: movzbl (%rcx,%rax), %ecx -; SSE2-NEXT: movd %ecx, %xmm7 -; SSE2-NEXT: movsbq 15(%rdi), %rcx -; SSE2-NEXT: movzbl (%rcx,%rax), %ecx -; SSE2-NEXT: movd %ecx, %xmm12 -; SSE2-NEXT: movsbq 7(%rdi), %rcx -; SSE2-NEXT: movzbl (%rcx,%rax), %ecx -; SSE2-NEXT: movd %ecx, %xmm2 -; SSE2-NEXT: movsbq 11(%rdi), %rcx -; SSE2-NEXT: movzbl (%rcx,%rax), %ecx -; SSE2-NEXT: movd %ecx, %xmm13 -; SSE2-NEXT: movsbq 3(%rdi), %rcx -; SSE2-NEXT: movzbl (%rcx,%rax), %ecx -; SSE2-NEXT: movd %ecx, %xmm6 -; SSE2-NEXT: movsbq 13(%rdi), %rcx -; SSE2-NEXT: movzbl (%rcx,%rax), %ecx -; SSE2-NEXT: movd %ecx, %xmm14 -; SSE2-NEXT: movsbq 5(%rdi), %rcx -; SSE2-NEXT: movzbl (%rcx,%rax), %ecx -; SSE2-NEXT: movd %ecx, %xmm4 -; SSE2-NEXT: movsbq 9(%rdi), %rcx -; SSE2-NEXT: movzbl (%rcx,%rax), %ecx -; SSE2-NEXT: movd %ecx, %xmm15 -; SSE2-NEXT: movsbq 1(%rdi), %rcx -; SSE2-NEXT: movzbl (%rcx,%rax), %eax -; SSE2-NEXT: movd %eax, %xmm1 -; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm8[0],xmm0[1],xmm8[1],xmm0[2],xmm8[2],xmm0[3],xmm8[3],xmm0[4],xmm8[4],xmm0[5],xmm8[5],xmm0[6],xmm8[6],xmm0[7],xmm8[7] +; SSE2-NEXT: movsbq 15(%rdi), %rdx +; SSE2-NEXT: leaq -{{[0-9]+}}(%rsp), %rcx +; SSE2-NEXT: movzbl (%rdx,%rcx), %edx +; SSE2-NEXT: movd %edx, %xmm8 +; SSE2-NEXT: movsbq 7(%rdi), %rdx +; SSE2-NEXT: movzbl (%rdx,%rcx), %edx +; SSE2-NEXT: movd %edx, %xmm15 +; SSE2-NEXT: movsbq 11(%rdi), %rdx +; SSE2-NEXT: movzbl (%rdx,%rcx), %edx +; SSE2-NEXT: movd %edx, %xmm9 +; SSE2-NEXT: movsbq 3(%rdi), %rdx +; SSE2-NEXT: movzbl (%rdx,%rcx), %edx +; SSE2-NEXT: movd %edx, %xmm3 +; SSE2-NEXT: movsbq 13(%rdi), %rdx +; SSE2-NEXT: movzbl (%rdx,%rcx), %edx +; SSE2-NEXT: movd %edx, %xmm10 +; SSE2-NEXT: movsbq 5(%rdi), %rdx +; SSE2-NEXT: movzbl (%rdx,%rcx), %edx +; SSE2-NEXT: movd %edx, %xmm7 +; SSE2-NEXT: movsbq 9(%rdi), %rdx +; SSE2-NEXT: movzbl (%rdx,%rcx), %edx +; SSE2-NEXT: movd %edx, %xmm11 +; SSE2-NEXT: movsbq 1(%rdi), %rdx +; SSE2-NEXT: movzbl (%rdx,%rcx), %edx +; SSE2-NEXT: movd %edx, %xmm6 +; SSE2-NEXT: movsbq 14(%rdi), %rdx +; SSE2-NEXT: movzbl (%rdx,%rcx), %edx +; SSE2-NEXT: movd %edx, %xmm12 +; SSE2-NEXT: movsbq 6(%rdi), %rdx +; SSE2-NEXT: movzbl (%rdx,%rcx), %edx +; SSE2-NEXT: movd %edx, %xmm5 +; SSE2-NEXT: movsbq 10(%rdi), %rdx +; SSE2-NEXT: movzbl (%rdx,%rcx), %edx +; SSE2-NEXT: movd %edx, %xmm13 +; SSE2-NEXT: movsbq 2(%rdi), %rdx +; SSE2-NEXT: movzbl (%rdx,%rcx), %edx +; SSE2-NEXT: movd %edx, %xmm4 +; SSE2-NEXT: movsbq 12(%rdi), %rdx +; SSE2-NEXT: movzbl (%rdx,%rcx), %edx +; SSE2-NEXT: movd %edx, %xmm14 +; SSE2-NEXT: movsbq 4(%rdi), %rdx +; SSE2-NEXT: movzbl (%rdx,%rcx), %edx +; SSE2-NEXT: movd %edx, %xmm1 +; SSE2-NEXT: movsbq 8(%rdi), %rdx +; SSE2-NEXT: movzbl (%rdx,%rcx), %edx +; SSE2-NEXT: movd %edx, %xmm2 +; SSE2-NEXT: movzbl (%rax,%rcx), %eax +; SSE2-NEXT: movd %eax, %xmm0 +; SSE2-NEXT: punpcklbw {{.*#+}} xmm15 = xmm15[0],xmm8[0],xmm15[1],xmm8[1],xmm15[2],xmm8[2],xmm15[3],xmm8[3],xmm15[4],xmm8[4],xmm15[5],xmm8[5],xmm15[6],xmm8[6],xmm15[7],xmm8[7] ; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm9[0],xmm3[1],xmm9[1],xmm3[2],xmm9[2],xmm3[3],xmm9[3],xmm3[4],xmm9[4],xmm3[5],xmm9[5],xmm3[6],xmm9[6],xmm3[7],xmm9[7] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm10[0],xmm5[1],xmm10[1],xmm5[2],xmm10[2],xmm5[3],xmm10[3],xmm5[4],xmm10[4],xmm5[5],xmm10[5],xmm5[6],xmm10[6],xmm5[7],xmm10[7] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm11[0],xmm7[1],xmm11[1],xmm7[2],xmm11[2],xmm7[3],xmm11[3],xmm7[4],xmm11[4],xmm7[5],xmm11[5],xmm7[6],xmm11[6],xmm7[7],xmm11[7] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm5[0],xmm7[1],xmm5[1],xmm7[2],xmm5[2],xmm7[3],xmm5[3],xmm7[4],xmm5[4],xmm7[5],xmm5[5],xmm7[6],xmm5[6],xmm7[7],xmm5[7] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm7[0],xmm0[1],xmm7[1],xmm0[2],xmm7[2],xmm0[3],xmm7[3],xmm0[4],xmm7[4],xmm0[5],xmm7[5],xmm0[6],xmm7[6],xmm0[7],xmm7[7] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm12[0],xmm2[1],xmm12[1],xmm2[2],xmm12[2],xmm2[3],xmm12[3],xmm2[4],xmm12[4],xmm2[5],xmm12[5],xmm2[6],xmm12[6],xmm2[7],xmm12[7] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm13[0],xmm6[1],xmm13[1],xmm6[2],xmm13[2],xmm6[3],xmm13[3],xmm6[4],xmm13[4],xmm6[5],xmm13[5],xmm6[6],xmm13[6],xmm6[7],xmm13[7] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm2[0],xmm6[1],xmm2[1],xmm6[2],xmm2[2],xmm6[3],xmm2[3],xmm6[4],xmm2[4],xmm6[5],xmm2[5],xmm6[6],xmm2[6],xmm6[7],xmm2[7] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm14[0],xmm4[1],xmm14[1],xmm4[2],xmm14[2],xmm4[3],xmm14[3],xmm4[4],xmm14[4],xmm4[5],xmm14[5],xmm4[6],xmm14[6],xmm4[7],xmm14[7] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm15[0],xmm1[1],xmm15[1],xmm1[2],xmm15[2],xmm1[3],xmm15[3],xmm1[4],xmm15[4],xmm1[5],xmm15[5],xmm1[6],xmm15[6],xmm1[7],xmm15[7] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm6[0],xmm1[1],xmm6[1],xmm1[2],xmm6[2],xmm1[3],xmm6[3],xmm1[4],xmm6[4],xmm1[5],xmm6[5],xmm1[6],xmm6[6],xmm1[7],xmm6[7] +; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm15[0],xmm3[1],xmm15[1],xmm3[2],xmm15[2],xmm3[3],xmm15[3],xmm3[4],xmm15[4],xmm3[5],xmm15[5],xmm3[6],xmm15[6],xmm3[7],xmm15[7] +; SSE2-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm10[0],xmm7[1],xmm10[1],xmm7[2],xmm10[2],xmm7[3],xmm10[3],xmm7[4],xmm10[4],xmm7[5],xmm10[5],xmm7[6],xmm10[6],xmm7[7],xmm10[7] +; SSE2-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm11[0],xmm6[1],xmm11[1],xmm6[2],xmm11[2],xmm6[3],xmm11[3],xmm6[4],xmm11[4],xmm6[5],xmm11[5],xmm6[6],xmm11[6],xmm6[7],xmm11[7] +; SSE2-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3],xmm6[4],xmm7[4],xmm6[5],xmm7[5],xmm6[6],xmm7[6],xmm6[7],xmm7[7] +; SSE2-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm3[0],xmm6[1],xmm3[1],xmm6[2],xmm3[2],xmm6[3],xmm3[3],xmm6[4],xmm3[4],xmm6[5],xmm3[5],xmm6[6],xmm3[6],xmm6[7],xmm3[7] +; SSE2-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm12[0],xmm5[1],xmm12[1],xmm5[2],xmm12[2],xmm5[3],xmm12[3],xmm5[4],xmm12[4],xmm5[5],xmm12[5],xmm5[6],xmm12[6],xmm5[7],xmm12[7] +; SSE2-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm13[0],xmm4[1],xmm13[1],xmm4[2],xmm13[2],xmm4[3],xmm13[3],xmm4[4],xmm13[4],xmm4[5],xmm13[5],xmm4[6],xmm13[6],xmm4[7],xmm13[7] +; SSE2-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3],xmm4[4],xmm5[4],xmm4[5],xmm5[5],xmm4[6],xmm5[6],xmm4[7],xmm5[7] +; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm14[0],xmm1[1],xmm14[1],xmm1[2],xmm14[2],xmm1[3],xmm14[3],xmm1[4],xmm14[4],xmm1[5],xmm14[5],xmm1[6],xmm14[6],xmm1[7],xmm14[7] +; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7] ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] +; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7] +; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1],xmm0[2],xmm6[2],xmm0[3],xmm6[3],xmm0[4],xmm6[4],xmm0[5],xmm6[5],xmm0[6],xmm6[6],xmm0[7],xmm6[7] ; SSE2-NEXT: retq ; ; SSSE3-LABEL: mem_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8: ; SSSE3: # BB#0: -; SSSE3-NEXT: movsbq (%rdi), %rcx +; SSSE3-NEXT: movsbq (%rdi), %rax ; SSSE3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp) -; SSSE3-NEXT: leaq -{{[0-9]+}}(%rsp), %rax -; SSSE3-NEXT: movzbl (%rcx,%rax), %ecx -; SSSE3-NEXT: movd %ecx, %xmm0 -; SSSE3-NEXT: movsbq 8(%rdi), %rcx -; SSSE3-NEXT: movzbl (%rcx,%rax), %ecx -; SSSE3-NEXT: movd %ecx, %xmm8 -; SSSE3-NEXT: movsbq 12(%rdi), %rcx -; SSSE3-NEXT: movzbl (%rcx,%rax), %ecx -; SSSE3-NEXT: movd %ecx, %xmm9 -; SSSE3-NEXT: movsbq 4(%rdi), %rcx -; SSSE3-NEXT: movzbl (%rcx,%rax), %ecx -; SSSE3-NEXT: movd %ecx, %xmm3 -; SSSE3-NEXT: movsbq 14(%rdi), %rcx -; SSSE3-NEXT: movzbl (%rcx,%rax), %ecx -; SSSE3-NEXT: movd %ecx, %xmm10 -; SSSE3-NEXT: movsbq 6(%rdi), %rcx -; SSSE3-NEXT: movzbl (%rcx,%rax), %ecx -; SSSE3-NEXT: movd %ecx, %xmm5 -; SSSE3-NEXT: movsbq 10(%rdi), %rcx -; SSSE3-NEXT: movzbl (%rcx,%rax), %ecx -; SSSE3-NEXT: movd %ecx, %xmm11 -; SSSE3-NEXT: movsbq 2(%rdi), %rcx -; SSSE3-NEXT: movzbl (%rcx,%rax), %ecx -; SSSE3-NEXT: movd %ecx, %xmm7 -; SSSE3-NEXT: movsbq 15(%rdi), %rcx -; SSSE3-NEXT: movzbl (%rcx,%rax), %ecx -; SSSE3-NEXT: movd %ecx, %xmm12 -; SSSE3-NEXT: movsbq 7(%rdi), %rcx -; SSSE3-NEXT: movzbl (%rcx,%rax), %ecx -; SSSE3-NEXT: movd %ecx, %xmm2 -; SSSE3-NEXT: movsbq 11(%rdi), %rcx -; SSSE3-NEXT: movzbl (%rcx,%rax), %ecx -; SSSE3-NEXT: movd %ecx, %xmm13 -; SSSE3-NEXT: movsbq 3(%rdi), %rcx -; SSSE3-NEXT: movzbl (%rcx,%rax), %ecx -; SSSE3-NEXT: movd %ecx, %xmm6 -; SSSE3-NEXT: movsbq 13(%rdi), %rcx -; SSSE3-NEXT: movzbl (%rcx,%rax), %ecx -; SSSE3-NEXT: movd %ecx, %xmm14 -; SSSE3-NEXT: movsbq 5(%rdi), %rcx -; SSSE3-NEXT: movzbl (%rcx,%rax), %ecx -; SSSE3-NEXT: movd %ecx, %xmm4 -; SSSE3-NEXT: movsbq 9(%rdi), %rcx -; SSSE3-NEXT: movzbl (%rcx,%rax), %ecx -; SSSE3-NEXT: movd %ecx, %xmm15 -; SSSE3-NEXT: movsbq 1(%rdi), %rcx -; SSSE3-NEXT: movzbl (%rcx,%rax), %eax -; SSSE3-NEXT: movd %eax, %xmm1 -; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm8[0],xmm0[1],xmm8[1],xmm0[2],xmm8[2],xmm0[3],xmm8[3],xmm0[4],xmm8[4],xmm0[5],xmm8[5],xmm0[6],xmm8[6],xmm0[7],xmm8[7] +; SSSE3-NEXT: movsbq 15(%rdi), %rdx +; SSSE3-NEXT: leaq -{{[0-9]+}}(%rsp), %rcx +; SSSE3-NEXT: movzbl (%rdx,%rcx), %edx +; SSSE3-NEXT: movd %edx, %xmm8 +; SSSE3-NEXT: movsbq 7(%rdi), %rdx +; SSSE3-NEXT: movzbl (%rdx,%rcx), %edx +; SSSE3-NEXT: movd %edx, %xmm15 +; SSSE3-NEXT: movsbq 11(%rdi), %rdx +; SSSE3-NEXT: movzbl (%rdx,%rcx), %edx +; SSSE3-NEXT: movd %edx, %xmm9 +; SSSE3-NEXT: movsbq 3(%rdi), %rdx +; SSSE3-NEXT: movzbl (%rdx,%rcx), %edx +; SSSE3-NEXT: movd %edx, %xmm3 +; SSSE3-NEXT: movsbq 13(%rdi), %rdx +; SSSE3-NEXT: movzbl (%rdx,%rcx), %edx +; SSSE3-NEXT: movd %edx, %xmm10 +; SSSE3-NEXT: movsbq 5(%rdi), %rdx +; SSSE3-NEXT: movzbl (%rdx,%rcx), %edx +; SSSE3-NEXT: movd %edx, %xmm7 +; SSSE3-NEXT: movsbq 9(%rdi), %rdx +; SSSE3-NEXT: movzbl (%rdx,%rcx), %edx +; SSSE3-NEXT: movd %edx, %xmm11 +; SSSE3-NEXT: movsbq 1(%rdi), %rdx +; SSSE3-NEXT: movzbl (%rdx,%rcx), %edx +; SSSE3-NEXT: movd %edx, %xmm6 +; SSSE3-NEXT: movsbq 14(%rdi), %rdx +; SSSE3-NEXT: movzbl (%rdx,%rcx), %edx +; SSSE3-NEXT: movd %edx, %xmm12 +; SSSE3-NEXT: movsbq 6(%rdi), %rdx +; SSSE3-NEXT: movzbl (%rdx,%rcx), %edx +; SSSE3-NEXT: movd %edx, %xmm5 +; SSSE3-NEXT: movsbq 10(%rdi), %rdx +; SSSE3-NEXT: movzbl (%rdx,%rcx), %edx +; SSSE3-NEXT: movd %edx, %xmm13 +; SSSE3-NEXT: movsbq 2(%rdi), %rdx +; SSSE3-NEXT: movzbl (%rdx,%rcx), %edx +; SSSE3-NEXT: movd %edx, %xmm4 +; SSSE3-NEXT: movsbq 12(%rdi), %rdx +; SSSE3-NEXT: movzbl (%rdx,%rcx), %edx +; SSSE3-NEXT: movd %edx, %xmm14 +; SSSE3-NEXT: movsbq 4(%rdi), %rdx +; SSSE3-NEXT: movzbl (%rdx,%rcx), %edx +; SSSE3-NEXT: movd %edx, %xmm1 +; SSSE3-NEXT: movsbq 8(%rdi), %rdx +; SSSE3-NEXT: movzbl (%rdx,%rcx), %edx +; SSSE3-NEXT: movd %edx, %xmm2 +; SSSE3-NEXT: movzbl (%rax,%rcx), %eax +; SSSE3-NEXT: movd %eax, %xmm0 +; SSSE3-NEXT: punpcklbw {{.*#+}} xmm15 = xmm15[0],xmm8[0],xmm15[1],xmm8[1],xmm15[2],xmm8[2],xmm15[3],xmm8[3],xmm15[4],xmm8[4],xmm15[5],xmm8[5],xmm15[6],xmm8[6],xmm15[7],xmm8[7] ; SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm9[0],xmm3[1],xmm9[1],xmm3[2],xmm9[2],xmm3[3],xmm9[3],xmm3[4],xmm9[4],xmm3[5],xmm9[5],xmm3[6],xmm9[6],xmm3[7],xmm9[7] -; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7] -; SSSE3-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm10[0],xmm5[1],xmm10[1],xmm5[2],xmm10[2],xmm5[3],xmm10[3],xmm5[4],xmm10[4],xmm5[5],xmm10[5],xmm5[6],xmm10[6],xmm5[7],xmm10[7] -; SSSE3-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm11[0],xmm7[1],xmm11[1],xmm7[2],xmm11[2],xmm7[3],xmm11[3],xmm7[4],xmm11[4],xmm7[5],xmm11[5],xmm7[6],xmm11[6],xmm7[7],xmm11[7] -; SSSE3-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm5[0],xmm7[1],xmm5[1],xmm7[2],xmm5[2],xmm7[3],xmm5[3],xmm7[4],xmm5[4],xmm7[5],xmm5[5],xmm7[6],xmm5[6],xmm7[7],xmm5[7] -; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm7[0],xmm0[1],xmm7[1],xmm0[2],xmm7[2],xmm0[3],xmm7[3],xmm0[4],xmm7[4],xmm0[5],xmm7[5],xmm0[6],xmm7[6],xmm0[7],xmm7[7] -; SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm12[0],xmm2[1],xmm12[1],xmm2[2],xmm12[2],xmm2[3],xmm12[3],xmm2[4],xmm12[4],xmm2[5],xmm12[5],xmm2[6],xmm12[6],xmm2[7],xmm12[7] -; SSSE3-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm13[0],xmm6[1],xmm13[1],xmm6[2],xmm13[2],xmm6[3],xmm13[3],xmm6[4],xmm13[4],xmm6[5],xmm13[5],xmm6[6],xmm13[6],xmm6[7],xmm13[7] -; SSSE3-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm2[0],xmm6[1],xmm2[1],xmm6[2],xmm2[2],xmm6[3],xmm2[3],xmm6[4],xmm2[4],xmm6[5],xmm2[5],xmm6[6],xmm2[6],xmm6[7],xmm2[7] -; SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm14[0],xmm4[1],xmm14[1],xmm4[2],xmm14[2],xmm4[3],xmm14[3],xmm4[4],xmm14[4],xmm4[5],xmm14[5],xmm4[6],xmm14[6],xmm4[7],xmm14[7] -; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm15[0],xmm1[1],xmm15[1],xmm1[2],xmm15[2],xmm1[3],xmm15[3],xmm1[4],xmm15[4],xmm1[5],xmm15[5],xmm1[6],xmm15[6],xmm1[7],xmm15[7] -; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7] -; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm6[0],xmm1[1],xmm6[1],xmm1[2],xmm6[2],xmm1[3],xmm6[3],xmm1[4],xmm6[4],xmm1[5],xmm6[5],xmm1[6],xmm6[6],xmm1[7],xmm6[7] +; SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm15[0],xmm3[1],xmm15[1],xmm3[2],xmm15[2],xmm3[3],xmm15[3],xmm3[4],xmm15[4],xmm3[5],xmm15[5],xmm3[6],xmm15[6],xmm3[7],xmm15[7] +; SSSE3-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm10[0],xmm7[1],xmm10[1],xmm7[2],xmm10[2],xmm7[3],xmm10[3],xmm7[4],xmm10[4],xmm7[5],xmm10[5],xmm7[6],xmm10[6],xmm7[7],xmm10[7] +; SSSE3-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm11[0],xmm6[1],xmm11[1],xmm6[2],xmm11[2],xmm6[3],xmm11[3],xmm6[4],xmm11[4],xmm6[5],xmm11[5],xmm6[6],xmm11[6],xmm6[7],xmm11[7] +; SSSE3-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3],xmm6[4],xmm7[4],xmm6[5],xmm7[5],xmm6[6],xmm7[6],xmm6[7],xmm7[7] +; SSSE3-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm3[0],xmm6[1],xmm3[1],xmm6[2],xmm3[2],xmm6[3],xmm3[3],xmm6[4],xmm3[4],xmm6[5],xmm3[5],xmm6[6],xmm3[6],xmm6[7],xmm3[7] +; SSSE3-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm12[0],xmm5[1],xmm12[1],xmm5[2],xmm12[2],xmm5[3],xmm12[3],xmm5[4],xmm12[4],xmm5[5],xmm12[5],xmm5[6],xmm12[6],xmm5[7],xmm12[7] +; SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm13[0],xmm4[1],xmm13[1],xmm4[2],xmm13[2],xmm4[3],xmm13[3],xmm4[4],xmm13[4],xmm4[5],xmm13[5],xmm4[6],xmm13[6],xmm4[7],xmm13[7] +; SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3],xmm4[4],xmm5[4],xmm4[5],xmm5[5],xmm4[6],xmm5[6],xmm4[7],xmm5[7] +; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm14[0],xmm1[1],xmm14[1],xmm1[2],xmm14[2],xmm1[3],xmm14[3],xmm1[4],xmm14[4],xmm1[5],xmm14[5],xmm1[6],xmm14[6],xmm1[7],xmm14[7] +; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7] ; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] +; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7] +; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1],xmm0[2],xmm6[2],xmm0[3],xmm6[3],xmm0[4],xmm6[4],xmm0[5],xmm6[5],xmm0[6],xmm6[6],xmm0[7],xmm6[7] ; SSSE3-NEXT: retq ; ; SSE41-LABEL: mem_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8: ; SSE41: # BB#0: -; SSE41-NEXT: pushq %rbp -; SSE41-NEXT: pushq %r15 -; SSE41-NEXT: pushq %r14 -; SSE41-NEXT: pushq %r13 -; SSE41-NEXT: pushq %r12 -; SSE41-NEXT: pushq %rbx -; SSE41-NEXT: movsbq (%rdi), %rax +; SSE41-NEXT: movsbq (%rdi), %rcx ; SSE41-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp) -; SSE41-NEXT: movsbq 1(%rdi), %r15 -; SSE41-NEXT: movsbq 2(%rdi), %r8 -; SSE41-NEXT: movsbq 3(%rdi), %r9 -; SSE41-NEXT: movsbq 4(%rdi), %r10 -; SSE41-NEXT: movsbq 5(%rdi), %r11 -; SSE41-NEXT: movsbq 6(%rdi), %r14 -; SSE41-NEXT: movsbq 7(%rdi), %r12 -; SSE41-NEXT: movsbq 8(%rdi), %r13 -; SSE41-NEXT: movsbq 9(%rdi), %rdx +; SSE41-NEXT: leaq -{{[0-9]+}}(%rsp), %rax +; SSE41-NEXT: movzbl (%rcx,%rax), %ecx +; SSE41-NEXT: movd %ecx, %xmm0 +; SSE41-NEXT: movsbq 1(%rdi), %rcx +; SSE41-NEXT: pinsrb $1, (%rcx,%rax), %xmm0 +; SSE41-NEXT: movsbq 2(%rdi), %rcx +; SSE41-NEXT: pinsrb $2, (%rcx,%rax), %xmm0 +; SSE41-NEXT: movsbq 3(%rdi), %rcx +; SSE41-NEXT: pinsrb $3, (%rcx,%rax), %xmm0 +; SSE41-NEXT: movsbq 4(%rdi), %rcx +; SSE41-NEXT: pinsrb $4, (%rcx,%rax), %xmm0 +; SSE41-NEXT: movsbq 5(%rdi), %rcx +; SSE41-NEXT: pinsrb $5, (%rcx,%rax), %xmm0 +; SSE41-NEXT: movsbq 6(%rdi), %rcx +; SSE41-NEXT: pinsrb $6, (%rcx,%rax), %xmm0 +; SSE41-NEXT: movsbq 7(%rdi), %rcx +; SSE41-NEXT: pinsrb $7, (%rcx,%rax), %xmm0 +; SSE41-NEXT: movsbq 8(%rdi), %rcx +; SSE41-NEXT: pinsrb $8, (%rcx,%rax), %xmm0 +; SSE41-NEXT: movsbq 9(%rdi), %rcx +; SSE41-NEXT: pinsrb $9, (%rcx,%rax), %xmm0 ; SSE41-NEXT: movsbq 10(%rdi), %rcx -; SSE41-NEXT: movsbq 11(%rdi), %rsi -; SSE41-NEXT: movsbq 12(%rdi), %rbx -; SSE41-NEXT: leaq -{{[0-9]+}}(%rsp), %rbp -; SSE41-NEXT: movzbl (%rax,%rbp), %eax -; SSE41-NEXT: movd %eax, %xmm0 -; SSE41-NEXT: movsbq 13(%rdi), %rax -; SSE41-NEXT: pinsrb $1, (%r15,%rbp), %xmm0 -; SSE41-NEXT: movsbq 14(%rdi), %r15 -; SSE41-NEXT: movsbq 15(%rdi), %rdi -; SSE41-NEXT: movzbl (%rdi,%rbp), %edi -; SSE41-NEXT: movzbl (%r15,%rbp), %r15d -; SSE41-NEXT: movzbl (%rax,%rbp), %eax -; SSE41-NEXT: movzbl (%rbx,%rbp), %ebx -; SSE41-NEXT: movzbl (%rsi,%rbp), %esi -; SSE41-NEXT: movzbl (%rcx,%rbp), %ecx -; SSE41-NEXT: movzbl (%rdx,%rbp), %edx -; SSE41-NEXT: movzbl (%r13,%rbp), %r13d -; SSE41-NEXT: movzbl (%r12,%rbp), %r12d -; SSE41-NEXT: movzbl (%r14,%rbp), %r14d -; SSE41-NEXT: movzbl (%r11,%rbp), %r11d -; SSE41-NEXT: movzbl (%r10,%rbp), %r10d -; SSE41-NEXT: movzbl (%r9,%rbp), %r9d -; SSE41-NEXT: movzbl (%r8,%rbp), %ebp -; SSE41-NEXT: pinsrb $2, %ebp, %xmm0 -; SSE41-NEXT: pinsrb $3, %r9d, %xmm0 -; SSE41-NEXT: pinsrb $4, %r10d, %xmm0 -; SSE41-NEXT: pinsrb $5, %r11d, %xmm0 -; SSE41-NEXT: pinsrb $6, %r14d, %xmm0 -; SSE41-NEXT: pinsrb $7, %r12d, %xmm0 -; SSE41-NEXT: pinsrb $8, %r13d, %xmm0 -; SSE41-NEXT: pinsrb $9, %edx, %xmm0 -; SSE41-NEXT: pinsrb $10, %ecx, %xmm0 -; SSE41-NEXT: pinsrb $11, %esi, %xmm0 -; SSE41-NEXT: pinsrb $12, %ebx, %xmm0 -; SSE41-NEXT: pinsrb $13, %eax, %xmm0 -; SSE41-NEXT: pinsrb $14, %r15d, %xmm0 -; SSE41-NEXT: pinsrb $15, %edi, %xmm0 -; SSE41-NEXT: popq %rbx -; SSE41-NEXT: popq %r12 -; SSE41-NEXT: popq %r13 -; SSE41-NEXT: popq %r14 -; SSE41-NEXT: popq %r15 -; SSE41-NEXT: popq %rbp +; SSE41-NEXT: pinsrb $10, (%rcx,%rax), %xmm0 +; SSE41-NEXT: movsbq 11(%rdi), %rcx +; SSE41-NEXT: pinsrb $11, (%rcx,%rax), %xmm0 +; SSE41-NEXT: movsbq 12(%rdi), %rcx +; SSE41-NEXT: pinsrb $12, (%rcx,%rax), %xmm0 +; SSE41-NEXT: movsbq 13(%rdi), %rcx +; SSE41-NEXT: pinsrb $13, (%rcx,%rax), %xmm0 +; SSE41-NEXT: movsbq 14(%rdi), %rcx +; SSE41-NEXT: pinsrb $14, (%rcx,%rax), %xmm0 +; SSE41-NEXT: movsbq 15(%rdi), %rcx +; SSE41-NEXT: pinsrb $15, (%rcx,%rax), %xmm0 ; SSE41-NEXT: retq ; ; AVX-LABEL: mem_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8: ; AVX: # BB#0: -; AVX-NEXT: pushq %rbp -; AVX-NEXT: pushq %r15 -; AVX-NEXT: pushq %r14 -; AVX-NEXT: pushq %r13 -; AVX-NEXT: pushq %r12 -; AVX-NEXT: pushq %rbx -; AVX-NEXT: movsbq (%rdi), %rsi +; AVX-NEXT: movsbq (%rdi), %rax ; AVX-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp) -; AVX-NEXT: movsbq 1(%rdi), %r15 -; AVX-NEXT: movsbq 2(%rdi), %r8 -; AVX-NEXT: movsbq 3(%rdi), %r9 -; AVX-NEXT: movsbq 4(%rdi), %r10 -; AVX-NEXT: movsbq 5(%rdi), %r11 -; AVX-NEXT: movsbq 6(%rdi), %r14 -; AVX-NEXT: movsbq 7(%rdi), %r12 -; AVX-NEXT: movsbq 8(%rdi), %r13 -; AVX-NEXT: movsbq 9(%rdi), %rdx +; AVX-NEXT: leaq -{{[0-9]+}}(%rsp), %rcx +; AVX-NEXT: movzbl (%rax,%rcx), %eax +; AVX-NEXT: vmovd %eax, %xmm0 +; AVX-NEXT: movsbq 1(%rdi), %rax +; AVX-NEXT: vpinsrb $1, (%rax,%rcx), %xmm0, %xmm0 +; AVX-NEXT: movsbq 2(%rdi), %rax +; AVX-NEXT: vpinsrb $2, (%rax,%rcx), %xmm0, %xmm0 +; AVX-NEXT: movsbq 3(%rdi), %rax +; AVX-NEXT: vpinsrb $3, (%rax,%rcx), %xmm0, %xmm0 +; AVX-NEXT: movsbq 4(%rdi), %rax +; AVX-NEXT: vpinsrb $4, (%rax,%rcx), %xmm0, %xmm0 +; AVX-NEXT: movsbq 5(%rdi), %rax +; AVX-NEXT: vpinsrb $5, (%rax,%rcx), %xmm0, %xmm0 +; AVX-NEXT: movsbq 6(%rdi), %rax +; AVX-NEXT: vpinsrb $6, (%rax,%rcx), %xmm0, %xmm0 +; AVX-NEXT: movsbq 7(%rdi), %rax +; AVX-NEXT: vpinsrb $7, (%rax,%rcx), %xmm0, %xmm0 +; AVX-NEXT: movsbq 8(%rdi), %rax +; AVX-NEXT: vpinsrb $8, (%rax,%rcx), %xmm0, %xmm0 +; AVX-NEXT: movsbq 9(%rdi), %rax +; AVX-NEXT: vpinsrb $9, (%rax,%rcx), %xmm0, %xmm0 ; AVX-NEXT: movsbq 10(%rdi), %rax -; AVX-NEXT: movsbq 11(%rdi), %rcx -; AVX-NEXT: movsbq 12(%rdi), %rbx -; AVX-NEXT: leaq -{{[0-9]+}}(%rsp), %rbp -; AVX-NEXT: movzbl (%rsi,%rbp), %esi -; AVX-NEXT: vmovd %esi, %xmm0 -; AVX-NEXT: movsbq 13(%rdi), %rsi -; AVX-NEXT: vpinsrb $1, (%r15,%rbp), %xmm0, %xmm0 -; AVX-NEXT: movsbq 14(%rdi), %r15 -; AVX-NEXT: movsbq 15(%rdi), %rdi -; AVX-NEXT: movzbl (%rdi,%rbp), %edi -; AVX-NEXT: movzbl (%r15,%rbp), %r15d -; AVX-NEXT: movzbl (%rsi,%rbp), %esi -; AVX-NEXT: movzbl (%rbx,%rbp), %ebx -; AVX-NEXT: movzbl (%rcx,%rbp), %ecx -; AVX-NEXT: movzbl (%rax,%rbp), %eax -; AVX-NEXT: movzbl (%rdx,%rbp), %edx -; AVX-NEXT: movzbl (%r13,%rbp), %r13d -; AVX-NEXT: movzbl (%r12,%rbp), %r12d -; AVX-NEXT: movzbl (%r14,%rbp), %r14d -; AVX-NEXT: movzbl (%r11,%rbp), %r11d -; AVX-NEXT: movzbl (%r10,%rbp), %r10d -; AVX-NEXT: movzbl (%r9,%rbp), %r9d -; AVX-NEXT: movzbl (%r8,%rbp), %ebp -; AVX-NEXT: vpinsrb $2, %ebp, %xmm0, %xmm0 -; AVX-NEXT: vpinsrb $3, %r9d, %xmm0, %xmm0 -; AVX-NEXT: vpinsrb $4, %r10d, %xmm0, %xmm0 -; AVX-NEXT: vpinsrb $5, %r11d, %xmm0, %xmm0 -; AVX-NEXT: vpinsrb $6, %r14d, %xmm0, %xmm0 -; AVX-NEXT: vpinsrb $7, %r12d, %xmm0, %xmm0 -; AVX-NEXT: vpinsrb $8, %r13d, %xmm0, %xmm0 -; AVX-NEXT: vpinsrb $9, %edx, %xmm0, %xmm0 -; AVX-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0 -; AVX-NEXT: vpinsrb $11, %ecx, %xmm0, %xmm0 -; AVX-NEXT: vpinsrb $12, %ebx, %xmm0, %xmm0 -; AVX-NEXT: vpinsrb $13, %esi, %xmm0, %xmm0 -; AVX-NEXT: vpinsrb $14, %r15d, %xmm0, %xmm0 -; AVX-NEXT: vpinsrb $15, %edi, %xmm0, %xmm0 -; AVX-NEXT: popq %rbx -; AVX-NEXT: popq %r12 -; AVX-NEXT: popq %r13 -; AVX-NEXT: popq %r14 -; AVX-NEXT: popq %r15 -; AVX-NEXT: popq %rbp +; AVX-NEXT: vpinsrb $10, (%rax,%rcx), %xmm0, %xmm0 +; AVX-NEXT: movsbq 11(%rdi), %rax +; AVX-NEXT: vpinsrb $11, (%rax,%rcx), %xmm0, %xmm0 +; AVX-NEXT: movsbq 12(%rdi), %rax +; AVX-NEXT: vpinsrb $12, (%rax,%rcx), %xmm0, %xmm0 +; AVX-NEXT: movsbq 13(%rdi), %rax +; AVX-NEXT: vpinsrb $13, (%rax,%rcx), %xmm0, %xmm0 +; AVX-NEXT: movsbq 14(%rdi), %rax +; AVX-NEXT: vpinsrb $14, (%rax,%rcx), %xmm0, %xmm0 +; AVX-NEXT: movsbq 15(%rdi), %rax +; AVX-NEXT: vpinsrb $15, (%rax,%rcx), %xmm0, %xmm0 ; AVX-NEXT: retq %p0 = getelementptr inbounds i8, i8* %i, i64 0 %p1 = getelementptr inbounds i8, i8* %i, i64 1 @@ -1159,27 +1057,27 @@ define <8 x i16> @var_shuffle_v8i16_v8i16_xyxyxy00_i16(<8 x i16> %x, <8 x i16> % ; SSE2-NEXT: movswq %r8w, %rdi ; SSE2-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp) ; SSE2-NEXT: movswq %r9w, %rax -; SSE2-NEXT: movzwl -24(%rsp,%rsi,2), %esi ; SSE2-NEXT: xorl %edx, %edx ; SSE2-NEXT: movd %edx, %xmm0 ; SSE2-NEXT: movzwl -24(%rsp,%rcx,2), %ecx ; SSE2-NEXT: movd %ecx, %xmm1 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3] -; SSE2-NEXT: movd %esi, %xmm2 ; SSE2-NEXT: movzwl -24(%rsp,%rax,2), %eax +; SSE2-NEXT: movd %eax, %xmm2 +; SSE2-NEXT: movzwl -24(%rsp,%rsi,2), %eax ; SSE2-NEXT: movd %eax, %xmm3 -; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3] -; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3] -; SSE2-NEXT: movzwl -40(%rsp,%r10,2), %eax -; SSE2-NEXT: movzwl -40(%rsp,%r11,2), %ecx -; SSE2-NEXT: movd %ecx, %xmm1 +; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3] +; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3] +; SSE2-NEXT: movzwl -40(%rsp,%r11,2), %eax +; SSE2-NEXT: movd %eax, %xmm1 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3] -; SSE2-NEXT: movd %eax, %xmm0 ; SSE2-NEXT: movzwl -40(%rsp,%rdi,2), %eax -; SSE2-NEXT: movd %eax, %xmm3 -; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3] -; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] +; SSE2-NEXT: movd %eax, %xmm2 +; SSE2-NEXT: movzwl -40(%rsp,%r10,2), %eax +; SSE2-NEXT: movd %eax, %xmm0 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3] +; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] +; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3] ; SSE2-NEXT: retq ; ; SSSE3-LABEL: var_shuffle_v8i16_v8i16_xyxyxy00_i16: @@ -1198,27 +1096,27 @@ define <8 x i16> @var_shuffle_v8i16_v8i16_xyxyxy00_i16(<8 x i16> %x, <8 x i16> % ; SSSE3-NEXT: movswq %r8w, %rdi ; SSSE3-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp) ; SSSE3-NEXT: movswq %r9w, %rax -; SSSE3-NEXT: movzwl -24(%rsp,%rsi,2), %esi ; SSSE3-NEXT: xorl %edx, %edx ; SSSE3-NEXT: movd %edx, %xmm0 ; SSSE3-NEXT: movzwl -24(%rsp,%rcx,2), %ecx ; SSSE3-NEXT: movd %ecx, %xmm1 ; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3] -; SSSE3-NEXT: movd %esi, %xmm2 ; SSSE3-NEXT: movzwl -24(%rsp,%rax,2), %eax +; SSSE3-NEXT: movd %eax, %xmm2 +; SSSE3-NEXT: movzwl -24(%rsp,%rsi,2), %eax ; SSSE3-NEXT: movd %eax, %xmm3 -; SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3] -; SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3] -; SSSE3-NEXT: movzwl -40(%rsp,%r10,2), %eax -; SSSE3-NEXT: movzwl -40(%rsp,%r11,2), %ecx -; SSSE3-NEXT: movd %ecx, %xmm1 +; SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3] +; SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3] +; SSSE3-NEXT: movzwl -40(%rsp,%r11,2), %eax +; SSSE3-NEXT: movd %eax, %xmm1 ; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3] -; SSSE3-NEXT: movd %eax, %xmm0 ; SSSE3-NEXT: movzwl -40(%rsp,%rdi,2), %eax -; SSSE3-NEXT: movd %eax, %xmm3 -; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3] -; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] +; SSSE3-NEXT: movd %eax, %xmm2 +; SSSE3-NEXT: movzwl -40(%rsp,%r10,2), %eax +; SSSE3-NEXT: movd %eax, %xmm0 ; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3] +; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] +; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3] ; SSSE3-NEXT: retq ; ; SSE41-LABEL: var_shuffle_v8i16_v8i16_xyxyxy00_i16: diff --git a/llvm/test/CodeGen/X86/vector-shuffle-variable-256.ll b/llvm/test/CodeGen/X86/vector-shuffle-variable-256.ll index bd59328aaf8..a34c596ced4 100644 --- a/llvm/test/CodeGen/X86/vector-shuffle-variable-256.ll +++ b/llvm/test/CodeGen/X86/vector-shuffle-variable-256.ll @@ -1,4 +1,4 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; NOTE: Assertions have been autogenerated by update_llc_test_checks.py ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2 @@ -18,7 +18,7 @@ define <4 x double> @var_shuffle_v4f64_v4f64_xxxx_i64(<4 x double> %x, i64 %i0, ; ALL-NEXT: vmovhpd {{.*#+}} xmm0 = xmm0[0],mem[0] ; ALL-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero ; ALL-NEXT: vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0] -; ALL-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; ALL-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 ; ALL-NEXT: movq %rbp, %rsp ; ALL-NEXT: popq %rbp ; ALL-NEXT: retq @@ -67,7 +67,7 @@ define <4 x double> @var_shuffle_v4f64_v2f64_xxxx_i64(<2 x double> %x, i64 %i0, ; ALL-NEXT: vmovhpd {{.*#+}} xmm0 = xmm0[0],mem[0] ; ALL-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero ; ALL-NEXT: vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0] -; ALL-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; ALL-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 ; ALL-NEXT: retq %x0 = extractelement <2 x double> %x, i64 %i0 %x1 = extractelement <2 x double> %x, i64 %i1 @@ -90,11 +90,11 @@ define <4 x i64> @var_shuffle_v4i64_v4i64_xxxx_i64(<4 x i64> %x, i64 %i0, i64 %i ; AVX1-NEXT: vmovaps %ymm0, (%rsp) ; AVX1-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero ; AVX1-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero -; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] ; AVX1-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero ; AVX1-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero -; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] -; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0] +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 ; AVX1-NEXT: movq %rbp, %rsp ; AVX1-NEXT: popq %rbp ; AVX1-NEXT: retq @@ -108,11 +108,11 @@ define <4 x i64> @var_shuffle_v4i64_v4i64_xxxx_i64(<4 x i64> %x, i64 %i0, i64 %i ; AVX2-NEXT: vmovaps %ymm0, (%rsp) ; AVX2-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero ; AVX2-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero -; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] ; AVX2-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero ; AVX2-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero -; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] -; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 +; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0] +; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 ; AVX2-NEXT: movq %rbp, %rsp ; AVX2-NEXT: popq %rbp ; AVX2-NEXT: retq @@ -137,7 +137,7 @@ define <4 x i64> @var_shuffle_v4i64_v4i64_xx00_i64(<4 x i64> %x, i64 %i0, i64 %i ; AVX1-NEXT: vmovaps %ymm0, (%rsp) ; AVX1-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero ; AVX1-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero -; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] ; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX1-NEXT: movq %rbp, %rsp @@ -153,7 +153,7 @@ define <4 x i64> @var_shuffle_v4i64_v4i64_xx00_i64(<4 x i64> %x, i64 %i0, i64 %i ; AVX2-NEXT: vmovaps %ymm0, (%rsp) ; AVX2-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero ; AVX2-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero -; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] ; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 ; AVX2-NEXT: movq %rbp, %rsp @@ -176,11 +176,11 @@ define <4 x i64> @var_shuffle_v4i64_v2i64_xxxx_i64(<2 x i64> %x, i64 %i0, i64 %i ; AVX1-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp) ; AVX1-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero ; AVX1-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero -; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] ; AVX1-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero ; AVX1-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero -; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] -; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0] +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: var_shuffle_v4i64_v2i64_xxxx_i64: @@ -188,11 +188,11 @@ define <4 x i64> @var_shuffle_v4i64_v2i64_xxxx_i64(<2 x i64> %x, i64 %i0, i64 %i ; AVX2-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp) ; AVX2-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero ; AVX2-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero -; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] ; AVX2-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero ; AVX2-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero -; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] -; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 +; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0] +; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 ; AVX2-NEXT: retq %x0 = extractelement <2 x i64> %x, i64 %i0 %x1 = extractelement <2 x i64> %x, i64 %i1 @@ -210,29 +210,29 @@ define <8 x float> @var_shuffle_v8f32_v8f32_xxxxxxxx_i32(<8 x float> %x, i32 %i0 ; AVX1: # BB#0: ; AVX1-NEXT: pushq %rbp ; AVX1-NEXT: movq %rsp, %rbp +; AVX1-NEXT: pushq %rbx ; AVX1-NEXT: andq $-32, %rsp ; AVX1-NEXT: subq $64, %rsp ; AVX1-NEXT: movslq %edi, %rax -; AVX1-NEXT: movslq %esi, %rsi -; AVX1-NEXT: movslq %edx, %rdx -; AVX1-NEXT: movslq %ecx, %r11 -; AVX1-NEXT: movslq %r8d, %r10 +; AVX1-NEXT: movslq %esi, %rbx +; AVX1-NEXT: movslq %edx, %r11 +; AVX1-NEXT: movslq %ecx, %r10 +; AVX1-NEXT: movslq %r8d, %rdi ; AVX1-NEXT: vmovaps %ymm0, (%rsp) -; AVX1-NEXT: movslq %r9d, %r8 -; AVX1-NEXT: movslq 16(%rbp), %rdi -; AVX1-NEXT: movslq 24(%rbp), %rcx +; AVX1-NEXT: movslq %r9d, %rcx +; AVX1-NEXT: movslq 16(%rbp), %rdx +; AVX1-NEXT: movslq 24(%rbp), %rsi ; AVX1-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[2,3] +; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3] +; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],mem[0] ; AVX1-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero -; AVX1-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero -; AVX1-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0],mem[0],xmm2[2,3] -; AVX1-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1],mem[0],xmm2[3] -; AVX1-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1,2],mem[0] -; AVX1-NEXT: vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero -; AVX1-NEXT: vinsertps {{.*#+}} xmm3 = xmm3[0],mem[0],xmm3[2,3] -; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm3[0,1],xmm0[0],xmm3[3] -; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0] -; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0 -; AVX1-NEXT: movq %rbp, %rsp +; AVX1-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0],mem[0],xmm1[2,3] +; AVX1-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],mem[0],xmm1[3] +; AVX1-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],mem[0] +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 +; AVX1-NEXT: leaq -8(%rbp), %rsp +; AVX1-NEXT: popq %rbx ; AVX1-NEXT: popq %rbp ; AVX1-NEXT: retq ; @@ -284,26 +284,26 @@ define <8 x float> @var_shuffle_v8f32_v8f32_xxxxxxxx_i32(<8 x float> %x, i32 %i0 define <8 x float> @var_shuffle_v8f32_v4f32_xxxxxxxx_i32(<4 x float> %x, i32 %i0, i32 %i1, i32 %i2, i32 %i3, i32 %i4, i32 %i5, i32 %i6, i32 %i7) nounwind { ; ALL-LABEL: var_shuffle_v8f32_v4f32_xxxxxxxx_i32: ; ALL: # BB#0: +; ALL-NEXT: pushq %rbx ; ALL-NEXT: movslq %edi, %rax -; ALL-NEXT: movslq %esi, %rsi -; ALL-NEXT: movslq %edx, %rdx -; ALL-NEXT: movslq %ecx, %r11 -; ALL-NEXT: movslq %r8d, %r10 +; ALL-NEXT: movslq %esi, %rbx +; ALL-NEXT: movslq %edx, %r11 +; ALL-NEXT: movslq %ecx, %r10 +; ALL-NEXT: movslq %r8d, %rdi ; ALL-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp) -; ALL-NEXT: movslq %r9d, %r8 -; ALL-NEXT: movslq {{[0-9]+}}(%rsp), %rdi -; ALL-NEXT: movslq {{[0-9]+}}(%rsp), %rcx +; ALL-NEXT: movslq %r9d, %rcx +; ALL-NEXT: movslq {{[0-9]+}}(%rsp), %rdx +; ALL-NEXT: movslq {{[0-9]+}}(%rsp), %rsi ; ALL-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; ALL-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[2,3] +; ALL-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3] +; ALL-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],mem[0] ; ALL-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero -; ALL-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero -; ALL-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0],mem[0],xmm2[2,3] -; ALL-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1],mem[0],xmm2[3] -; ALL-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1,2],mem[0] -; ALL-NEXT: vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero -; ALL-NEXT: vinsertps {{.*#+}} xmm3 = xmm3[0],mem[0],xmm3[2,3] -; ALL-NEXT: vinsertps {{.*#+}} xmm0 = xmm3[0,1],xmm0[0],xmm3[3] -; ALL-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0] -; ALL-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0 +; ALL-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0],mem[0],xmm1[2,3] +; ALL-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],mem[0],xmm1[3] +; ALL-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],mem[0] +; ALL-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 +; ALL-NEXT: popq %rbx ; ALL-NEXT: retq %x0 = extractelement <4 x float> %x, i32 %i0 %x1 = extractelement <4 x float> %x, i32 %i1 @@ -336,26 +336,19 @@ define <16 x i16> @var_shuffle_v16i16_v16i16_xxxxxxxxxxxxxxxx_i16(<16 x i16> %x, ; AVX1-NEXT: movzwl (%rsp,%rax,2), %eax ; AVX1-NEXT: vmovd %eax, %xmm0 ; AVX1-NEXT: movslq 40(%rbp), %rax -; AVX1-NEXT: movzwl (%rsp,%rax,2), %eax -; AVX1-NEXT: vpinsrw $1, %eax, %xmm0, %xmm0 +; AVX1-NEXT: vpinsrw $1, (%rsp,%rax,2), %xmm0, %xmm0 ; AVX1-NEXT: movslq 48(%rbp), %rax -; AVX1-NEXT: movzwl (%rsp,%rax,2), %eax -; AVX1-NEXT: vpinsrw $2, %eax, %xmm0, %xmm0 +; AVX1-NEXT: vpinsrw $2, (%rsp,%rax,2), %xmm0, %xmm0 ; AVX1-NEXT: movslq 56(%rbp), %rax -; AVX1-NEXT: movzwl (%rsp,%rax,2), %eax -; AVX1-NEXT: vpinsrw $3, %eax, %xmm0, %xmm0 +; AVX1-NEXT: vpinsrw $3, (%rsp,%rax,2), %xmm0, %xmm0 ; AVX1-NEXT: movslq 64(%rbp), %rax -; AVX1-NEXT: movzwl (%rsp,%rax,2), %eax -; AVX1-NEXT: vpinsrw $4, %eax, %xmm0, %xmm0 +; AVX1-NEXT: vpinsrw $4, (%rsp,%rax,2), %xmm0, %xmm0 ; AVX1-NEXT: movslq 72(%rbp), %rax -; AVX1-NEXT: movzwl (%rsp,%rax,2), %eax -; AVX1-NEXT: vpinsrw $5, %eax, %xmm0, %xmm0 +; AVX1-NEXT: vpinsrw $5, (%rsp,%rax,2), %xmm0, %xmm0 ; AVX1-NEXT: movslq 80(%rbp), %rax -; AVX1-NEXT: movzwl (%rsp,%rax,2), %eax -; AVX1-NEXT: vpinsrw $6, %eax, %xmm0, %xmm0 +; AVX1-NEXT: vpinsrw $6, (%rsp,%rax,2), %xmm0, %xmm0 ; AVX1-NEXT: movslq 88(%rbp), %rax -; AVX1-NEXT: movzwl (%rsp,%rax,2), %eax -; AVX1-NEXT: vpinsrw $7, %eax, %xmm0, %xmm0 +; AVX1-NEXT: vpinsrw $7, (%rsp,%rax,2), %xmm0, %xmm0 ; AVX1-NEXT: movslq %edi, %rax ; AVX1-NEXT: movzwl (%rsp,%rax,2), %eax ; AVX1-NEXT: vmovd %eax, %xmm1 @@ -370,11 +363,9 @@ define <16 x i16> @var_shuffle_v16i16_v16i16_xxxxxxxxxxxxxxxx_i16(<16 x i16> %x, ; AVX1-NEXT: movslq %r9d, %rax ; AVX1-NEXT: vpinsrw $5, (%rsp,%rax,2), %xmm1, %xmm1 ; AVX1-NEXT: movslq 16(%rbp), %rax -; AVX1-NEXT: movzwl (%rsp,%rax,2), %eax -; AVX1-NEXT: vpinsrw $6, %eax, %xmm1, %xmm1 +; AVX1-NEXT: vpinsrw $6, (%rsp,%rax,2), %xmm1, %xmm1 ; AVX1-NEXT: movslq 24(%rbp), %rax -; AVX1-NEXT: movzwl (%rsp,%rax,2), %eax -; AVX1-NEXT: vpinsrw $7, %eax, %xmm1, %xmm1 +; AVX1-NEXT: vpinsrw $7, (%rsp,%rax,2), %xmm1, %xmm1 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 ; AVX1-NEXT: movq %rbp, %rsp ; AVX1-NEXT: popq %rbp @@ -391,26 +382,19 @@ define <16 x i16> @var_shuffle_v16i16_v16i16_xxxxxxxxxxxxxxxx_i16(<16 x i16> %x, ; AVX2-NEXT: movzwl (%rsp,%rax,2), %eax ; AVX2-NEXT: vmovd %eax, %xmm0 ; AVX2-NEXT: movslq 40(%rbp), %rax -; AVX2-NEXT: movzwl (%rsp,%rax,2), %eax -; AVX2-NEXT: vpinsrw $1, %eax, %xmm0, %xmm0 +; AVX2-NEXT: vpinsrw $1, (%rsp,%rax,2), %xmm0, %xmm0 ; AVX2-NEXT: movslq 48(%rbp), %rax -; AVX2-NEXT: movzwl (%rsp,%rax,2), %eax -; AVX2-NEXT: vpinsrw $2, %eax, %xmm0, %xmm0 +; AVX2-NEXT: vpinsrw $2, (%rsp,%rax,2), %xmm0, %xmm0 ; AVX2-NEXT: movslq 56(%rbp), %rax -; AVX2-NEXT: movzwl (%rsp,%rax,2), %eax -; AVX2-NEXT: vpinsrw $3, %eax, %xmm0, %xmm0 +; AVX2-NEXT: vpinsrw $3, (%rsp,%rax,2), %xmm0, %xmm0 ; AVX2-NEXT: movslq 64(%rbp), %rax -; AVX2-NEXT: movzwl (%rsp,%rax,2), %eax -; AVX2-NEXT: vpinsrw $4, %eax, %xmm0, %xmm0 +; AVX2-NEXT: vpinsrw $4, (%rsp,%rax,2), %xmm0, %xmm0 ; AVX2-NEXT: movslq 72(%rbp), %rax -; AVX2-NEXT: movzwl (%rsp,%rax,2), %eax -; AVX2-NEXT: vpinsrw $5, %eax, %xmm0, %xmm0 +; AVX2-NEXT: vpinsrw $5, (%rsp,%rax,2), %xmm0, %xmm0 ; AVX2-NEXT: movslq 80(%rbp), %rax -; AVX2-NEXT: movzwl (%rsp,%rax,2), %eax -; AVX2-NEXT: vpinsrw $6, %eax, %xmm0, %xmm0 +; AVX2-NEXT: vpinsrw $6, (%rsp,%rax,2), %xmm0, %xmm0 ; AVX2-NEXT: movslq 88(%rbp), %rax -; AVX2-NEXT: movzwl (%rsp,%rax,2), %eax -; AVX2-NEXT: vpinsrw $7, %eax, %xmm0, %xmm0 +; AVX2-NEXT: vpinsrw $7, (%rsp,%rax,2), %xmm0, %xmm0 ; AVX2-NEXT: movslq %edi, %rax ; AVX2-NEXT: movzwl (%rsp,%rax,2), %eax ; AVX2-NEXT: vmovd %eax, %xmm1 @@ -425,11 +409,9 @@ define <16 x i16> @var_shuffle_v16i16_v16i16_xxxxxxxxxxxxxxxx_i16(<16 x i16> %x, ; AVX2-NEXT: movslq %r9d, %rax ; AVX2-NEXT: vpinsrw $5, (%rsp,%rax,2), %xmm1, %xmm1 ; AVX2-NEXT: movslq 16(%rbp), %rax -; AVX2-NEXT: movzwl (%rsp,%rax,2), %eax -; AVX2-NEXT: vpinsrw $6, %eax, %xmm1, %xmm1 +; AVX2-NEXT: vpinsrw $6, (%rsp,%rax,2), %xmm1, %xmm1 ; AVX2-NEXT: movslq 24(%rbp), %rax -; AVX2-NEXT: movzwl (%rsp,%rax,2), %eax -; AVX2-NEXT: vpinsrw $7, %eax, %xmm1, %xmm1 +; AVX2-NEXT: vpinsrw $7, (%rsp,%rax,2), %xmm1, %xmm1 ; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 ; AVX2-NEXT: movq %rbp, %rsp ; AVX2-NEXT: popq %rbp @@ -477,26 +459,19 @@ define <16 x i16> @var_shuffle_v16i16_v8i16_xxxxxxxxxxxxxxxx_i16(<8 x i16> %x, i ; AVX1-NEXT: movzwl -24(%rsp,%rax,2), %eax ; AVX1-NEXT: vmovd %eax, %xmm0 ; AVX1-NEXT: movslq {{[0-9]+}}(%rsp), %rax -; AVX1-NEXT: movzwl -24(%rsp,%rax,2), %eax -; AVX1-NEXT: vpinsrw $1, %eax, %xmm0, %xmm0 +; AVX1-NEXT: vpinsrw $1, -24(%rsp,%rax,2), %xmm0, %xmm0 ; AVX1-NEXT: movslq {{[0-9]+}}(%rsp), %rax -; AVX1-NEXT: movzwl -24(%rsp,%rax,2), %eax -; AVX1-NEXT: vpinsrw $2, %eax, %xmm0, %xmm0 +; AVX1-NEXT: vpinsrw $2, -24(%rsp,%rax,2), %xmm0, %xmm0 ; AVX1-NEXT: movslq {{[0-9]+}}(%rsp), %rax -; AVX1-NEXT: movzwl -24(%rsp,%rax,2), %eax -; AVX1-NEXT: vpinsrw $3, %eax, %xmm0, %xmm0 +; AVX1-NEXT: vpinsrw $3, -24(%rsp,%rax,2), %xmm0, %xmm0 ; AVX1-NEXT: movslq {{[0-9]+}}(%rsp), %rax -; AVX1-NEXT: movzwl -24(%rsp,%rax,2), %eax -; AVX1-NEXT: vpinsrw $4, %eax, %xmm0, %xmm0 +; AVX1-NEXT: vpinsrw $4, -24(%rsp,%rax,2), %xmm0, %xmm0 ; AVX1-NEXT: movslq {{[0-9]+}}(%rsp), %rax -; AVX1-NEXT: movzwl -24(%rsp,%rax,2), %eax -; AVX1-NEXT: vpinsrw $5, %eax, %xmm0, %xmm0 +; AVX1-NEXT: vpinsrw $5, -24(%rsp,%rax,2), %xmm0, %xmm0 ; AVX1-NEXT: movslq {{[0-9]+}}(%rsp), %rax -; AVX1-NEXT: movzwl -24(%rsp,%rax,2), %eax -; AVX1-NEXT: vpinsrw $6, %eax, %xmm0, %xmm0 +; AVX1-NEXT: vpinsrw $6, -24(%rsp,%rax,2), %xmm0, %xmm0 ; AVX1-NEXT: movslq {{[0-9]+}}(%rsp), %rax -; AVX1-NEXT: movzwl -24(%rsp,%rax,2), %eax -; AVX1-NEXT: vpinsrw $7, %eax, %xmm0, %xmm0 +; AVX1-NEXT: vpinsrw $7, -24(%rsp,%rax,2), %xmm0, %xmm0 ; AVX1-NEXT: movslq %edi, %rax ; AVX1-NEXT: movzwl -24(%rsp,%rax,2), %eax ; AVX1-NEXT: vmovd %eax, %xmm1 @@ -511,11 +486,9 @@ define <16 x i16> @var_shuffle_v16i16_v8i16_xxxxxxxxxxxxxxxx_i16(<8 x i16> %x, i ; AVX1-NEXT: movslq %r9d, %rax ; AVX1-NEXT: vpinsrw $5, -24(%rsp,%rax,2), %xmm1, %xmm1 ; AVX1-NEXT: movslq {{[0-9]+}}(%rsp), %rax -; AVX1-NEXT: movzwl -24(%rsp,%rax,2), %eax -; AVX1-NEXT: vpinsrw $6, %eax, %xmm1, %xmm1 +; AVX1-NEXT: vpinsrw $6, -24(%rsp,%rax,2), %xmm1, %xmm1 ; AVX1-NEXT: movslq {{[0-9]+}}(%rsp), %rax -; AVX1-NEXT: movzwl -24(%rsp,%rax,2), %eax -; AVX1-NEXT: vpinsrw $7, %eax, %xmm1, %xmm1 +; AVX1-NEXT: vpinsrw $7, -24(%rsp,%rax,2), %xmm1, %xmm1 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 ; AVX1-NEXT: retq ; @@ -526,26 +499,19 @@ define <16 x i16> @var_shuffle_v16i16_v8i16_xxxxxxxxxxxxxxxx_i16(<8 x i16> %x, i ; AVX2-NEXT: movzwl -24(%rsp,%rax,2), %eax ; AVX2-NEXT: vmovd %eax, %xmm0 ; AVX2-NEXT: movslq {{[0-9]+}}(%rsp), %rax -; AVX2-NEXT: movzwl -24(%rsp,%rax,2), %eax -; AVX2-NEXT: vpinsrw $1, %eax, %xmm0, %xmm0 +; AVX2-NEXT: vpinsrw $1, -24(%rsp,%rax,2), %xmm0, %xmm0 ; AVX2-NEXT: movslq {{[0-9]+}}(%rsp), %rax -; AVX2-NEXT: movzwl -24(%rsp,%rax,2), %eax -; AVX2-NEXT: vpinsrw $2, %eax, %xmm0, %xmm0 +; AVX2-NEXT: vpinsrw $2, -24(%rsp,%rax,2), %xmm0, %xmm0 ; AVX2-NEXT: movslq {{[0-9]+}}(%rsp), %rax -; AVX2-NEXT: movzwl -24(%rsp,%rax,2), %eax -; AVX2-NEXT: vpinsrw $3, %eax, %xmm0, %xmm0 +; AVX2-NEXT: vpinsrw $3, -24(%rsp,%rax,2), %xmm0, %xmm0 ; AVX2-NEXT: movslq {{[0-9]+}}(%rsp), %rax -; AVX2-NEXT: movzwl -24(%rsp,%rax,2), %eax -; AVX2-NEXT: vpinsrw $4, %eax, %xmm0, %xmm0 +; AVX2-NEXT: vpinsrw $4, -24(%rsp,%rax,2), %xmm0, %xmm0 ; AVX2-NEXT: movslq {{[0-9]+}}(%rsp), %rax -; AVX2-NEXT: movzwl -24(%rsp,%rax,2), %eax -; AVX2-NEXT: vpinsrw $5, %eax, %xmm0, %xmm0 +; AVX2-NEXT: vpinsrw $5, -24(%rsp,%rax,2), %xmm0, %xmm0 ; AVX2-NEXT: movslq {{[0-9]+}}(%rsp), %rax -; AVX2-NEXT: movzwl -24(%rsp,%rax,2), %eax -; AVX2-NEXT: vpinsrw $6, %eax, %xmm0, %xmm0 +; AVX2-NEXT: vpinsrw $6, -24(%rsp,%rax,2), %xmm0, %xmm0 ; AVX2-NEXT: movslq {{[0-9]+}}(%rsp), %rax -; AVX2-NEXT: movzwl -24(%rsp,%rax,2), %eax -; AVX2-NEXT: vpinsrw $7, %eax, %xmm0, %xmm0 +; AVX2-NEXT: vpinsrw $7, -24(%rsp,%rax,2), %xmm0, %xmm0 ; AVX2-NEXT: movslq %edi, %rax ; AVX2-NEXT: movzwl -24(%rsp,%rax,2), %eax ; AVX2-NEXT: vmovd %eax, %xmm1 @@ -560,11 +526,9 @@ define <16 x i16> @var_shuffle_v16i16_v8i16_xxxxxxxxxxxxxxxx_i16(<8 x i16> %x, i ; AVX2-NEXT: movslq %r9d, %rax ; AVX2-NEXT: vpinsrw $5, -24(%rsp,%rax,2), %xmm1, %xmm1 ; AVX2-NEXT: movslq {{[0-9]+}}(%rsp), %rax -; AVX2-NEXT: movzwl -24(%rsp,%rax,2), %eax -; AVX2-NEXT: vpinsrw $6, %eax, %xmm1, %xmm1 +; AVX2-NEXT: vpinsrw $6, -24(%rsp,%rax,2), %xmm1, %xmm1 ; AVX2-NEXT: movslq {{[0-9]+}}(%rsp), %rax -; AVX2-NEXT: movzwl -24(%rsp,%rax,2), %eax -; AVX2-NEXT: vpinsrw $7, %eax, %xmm1, %xmm1 +; AVX2-NEXT: vpinsrw $7, -24(%rsp,%rax,2), %xmm1, %xmm1 ; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 ; AVX2-NEXT: retq %x0 = extractelement <8 x i16> %x, i32 %i0 @@ -620,11 +584,11 @@ define <4 x i64> @mem_shuffle_v4i64_v4i64_xxxx_i64(<4 x i64> %x, i64* %i) nounwi ; AVX1-NEXT: vmovaps %ymm0, (%rsp) ; AVX1-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero ; AVX1-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero +; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] +; AVX1-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero ; AVX1-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero ; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0] -; AVX1-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero -; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0] -; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 ; AVX1-NEXT: movq %rbp, %rsp ; AVX1-NEXT: popq %rbp ; AVX1-NEXT: retq @@ -642,11 +606,11 @@ define <4 x i64> @mem_shuffle_v4i64_v4i64_xxxx_i64(<4 x i64> %x, i64* %i) nounwi ; AVX2-NEXT: vmovaps %ymm0, (%rsp) ; AVX2-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero ; AVX2-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero +; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] +; AVX2-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero ; AVX2-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero ; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0] -; AVX2-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero -; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0] -; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 +; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 ; AVX2-NEXT: movq %rbp, %rsp ; AVX2-NEXT: popq %rbp ; AVX2-NEXT: retq @@ -679,11 +643,11 @@ define <4 x i64> @mem_shuffle_v4i64_v2i64_xxxx_i64(<2 x i64> %x, i64* %i) nounwi ; AVX1-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp) ; AVX1-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero ; AVX1-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero +; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] +; AVX1-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero ; AVX1-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero ; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0] -; AVX1-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero -; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0] -; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: mem_shuffle_v4i64_v2i64_xxxx_i64: @@ -695,11 +659,11 @@ define <4 x i64> @mem_shuffle_v4i64_v2i64_xxxx_i64(<2 x i64> %x, i64* %i) nounwi ; AVX2-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp) ; AVX2-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero ; AVX2-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero +; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] +; AVX2-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero ; AVX2-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero ; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0] -; AVX2-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero -; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0] -; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 +; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 ; AVX2-NEXT: retq %p0 = getelementptr inbounds i64, i64* %i, i32 0 %p1 = getelementptr inbounds i64, i64* %i, i32 1 diff --git a/llvm/test/CodeGen/X86/win32-eh.ll b/llvm/test/CodeGen/X86/win32-eh.ll index 88403c68740..de8464e4f8b 100644 --- a/llvm/test/CodeGen/X86/win32-eh.ll +++ b/llvm/test/CodeGen/X86/win32-eh.ll @@ -27,23 +27,26 @@ catch: ; CHECK-LABEL: _use_except_handler3: ; CHECK: pushl %ebp -; CHECK: movl %esp, %ebp -; CHECK: pushl %ebx -; CHECK: pushl %edi -; CHECK: pushl %esi -; CHECK: subl ${{[0-9]+}}, %esp -; CHECK: movl $-1, -16(%ebp) -; CHECK: movl $L__ehtable$use_except_handler3, -20(%ebp) -; CHECK: leal -28(%ebp), %[[node:[^ ,]*]] -; CHECK: movl $__except_handler3, -24(%ebp) -; CHECK: movl %fs:0, %[[next:[^ ,]*]] -; CHECK: movl %[[next]], -28(%ebp) -; CHECK: movl %[[node]], %fs:0 -; CHECK: calll _may_throw_or_crash +; CHECK-NEXT: movl %esp, %ebp +; CHECK-NEXT: pushl %ebx +; CHECK-NEXT: pushl %edi +; CHECK-NEXT: pushl %esi +; CHECK-NEXT: subl ${{[0-9]+}}, %esp +; CHECK-NEXT: movl %esp, -36(%ebp) +; CHECK-NEXT: movl $-1, -16(%ebp) +; CHECK-NEXT: movl $L__ehtable$use_except_handler3, -20(%ebp) +; CHECK-NEXT: leal -28(%ebp), %[[node:[^ ,]*]] +; CHECK-NEXT: movl $__except_handler3, -24(%ebp) +; CHECK-NEXT: movl %fs:0, %[[next:[^ ,]*]] +; CHECK-NEXT: movl %[[next]], -28(%ebp) +; CHECK-NEXT: movl %[[node]], %fs:0 +; CHECK-NEXT: movl $0, -16(%ebp) +; CHECK-NEXT: calll _may_throw_or_crash + ; CHECK: movl -28(%ebp), %[[next:[^ ,]*]] -; CHECK: movl %[[next]], %fs:0 +; CHECK-NEXT: movl %[[next]], %fs:0 ; CHECK: retl -; CHECK: LBB1_2: # %catch{{$}} +; CHECK-NEXT: LBB1_2: # %catch{{$}} ; CHECK: .section .xdata,"dr" ; CHECK-LABEL: L__ehtable$use_except_handler3: @@ -66,23 +69,37 @@ catch: ; CHECK-LABEL: _use_except_handler4: ; CHECK: pushl %ebp -; CHECK: movl %esp, %ebp -; CHECK: subl ${{[0-9]+}}, %esp -; CHECK: movl %esp, -36(%ebp) -; CHECK: movl $-2, -16(%ebp) -; CHECK: movl $L__ehtable$use_except_handler4, %[[lsda:[^ ,]*]] -; CHECK: xorl ___security_cookie, %[[lsda]] -; CHECK: movl %[[lsda]], -20(%ebp) -; CHECK: leal -28(%ebp), %[[node:[^ ,]*]] -; CHECK: movl $__except_handler4, -24(%ebp) -; CHECK: movl %fs:0, %[[next:[^ ,]*]] -; CHECK: movl %[[next]], -28(%ebp) -; CHECK: movl %[[node]], %fs:0 -; CHECK: calll _may_throw_or_crash +; CHECK-NEXT: movl %esp, %ebp +; CHECK-NEXT: pushl %ebx +; CHECK-NEXT: pushl %edi +; CHECK-NEXT: pushl %esi +; CHECK-NEXT: subl ${{[0-9]+}}, %esp +; CHECK-NEXT: movl %ebp, %eax +; CHECK-NEXT: movl %esp, -36(%ebp) +; CHECK-NEXT: movl $-2, -16(%ebp) +; CHECK-NEXT: movl $L__ehtable$use_except_handler4, %[[lsda:[^ ,]*]] +; CHECK-NEXT: movl ___security_cookie, %[[seccookie:[^ ,]*]] +; CHECK-NEXT: xorl %[[seccookie]], %[[lsda]] +; CHECK-NEXT: movl %[[lsda]], -20(%ebp) +; CHECK-NEXT: xorl %[[seccookie]], %[[tmp1:[^ ,]*]] +; CHECK-NEXT: movl %[[tmp1]], -40(%ebp) +; CHECK-NEXT: leal -28(%ebp), %[[node:[^ ,]*]] +; CHECK-NEXT: movl $__except_handler4, -24(%ebp) +; CHECK-NEXT: movl %fs:0, %[[next:[^ ,]*]] +; CHECK-NEXT: movl %[[next]], -28(%ebp) +; CHECK-NEXT: movl %[[node]], %fs:0 +; CHECK-NEXT: movl $0, -16(%ebp) +; CHECK-NEXT: calll _may_throw_or_crash + ; CHECK: movl -28(%ebp), %[[next:[^ ,]*]] -; CHECK: movl %[[next]], %fs:0 -; CHECK: retl -; CHECK: LBB2_2: # %catch{{$}} +; CHECK-NEXT: movl %[[next]], %fs:0 +; CHECK-NEXT: addl $28, %esp +; CHECK-NEXT: popl %esi +; CHECK-NEXT: popl %edi +; CHECK-NEXT: popl %ebx +; CHECK-NEXT: popl %ebp +; CHECK-NEXT: retl +; CHECK-NEXT: LBB2_2: # %catch{{$}} ; CHECK: .section .xdata,"dr" ; CHECK-LABEL: L__ehtable$use_except_handler4: @@ -109,26 +126,33 @@ catch: ; CHECK-LABEL: _use_except_handler4_ssp: ; CHECK: pushl %ebp -; CHECK: movl %esp, %ebp -; CHECK: subl ${{[0-9]+}}, %esp -; CHECK: movl %ebp, %[[ehguard:[^ ,]*]] -; CHECK: movl %esp, -36(%ebp) -; CHECK: movl $-2, -16(%ebp) -; CHECK: movl $L__ehtable$use_except_handler4_ssp, %[[lsda:[^ ,]*]] -; CHECK: xorl ___security_cookie, %[[lsda]] -; CHECK: movl %[[lsda]], -20(%ebp) -; CHECK: xorl ___security_cookie, %[[ehguard]] -; CHECK: movl %[[ehguard]], -40(%ebp) -; CHECK: leal -28(%ebp), %[[node:[^ ,]*]] -; CHECK: movl $__except_handler4, -24(%ebp) -; CHECK: movl %fs:0, %[[next:[^ ,]*]] -; CHECK: movl %[[next]], -28(%ebp) -; CHECK: movl %[[node]], %fs:0 -; CHECK: calll _may_throw_or_crash +; CHECK-NEXT: movl %esp, %ebp +; CHECK-NEXT: pushl %ebx +; CHECK-NEXT: pushl %edi +; CHECK-NEXT: pushl %esi +; CHECK-NEXT: subl ${{[0-9]+}}, %esp +; CHECK-NEXT: movl %ebp, %[[ehguard:[^ ,]*]] +; CHECK-NEXT: movl %esp, -36(%ebp) +; CHECK-NEXT: movl $-2, -16(%ebp) +; CHECK-NEXT: movl $L__ehtable$use_except_handler4_ssp, %[[lsda:[^ ,]*]] +; CHECK-NEXT: movl ___security_cookie, %[[seccookie:[^ ,]*]] +; CHECK-NEXT: xorl %[[seccookie]], %[[lsda]] +; CHECK-NEXT: movl %[[lsda]], -20(%ebp) +; CHECK-NEXT: xorl %[[seccookie]], %[[ehguard]] +; CHECK-NEXT: movl %[[ehguard]], -40(%ebp) +; CHECK-NEXT: leal -28(%ebp), %[[node:[^ ,]*]] +; CHECK-NEXT: movl $__except_handler4, -24(%ebp) +; CHECK-NEXT: movl %fs:0, %[[next:[^ ,]*]] +; CHECK-NEXT: movl %[[next]], -28(%ebp) +; CHECK-NEXT: movl %[[node]], %fs:0 +; CHECK-NEXT: movl $0, -16(%ebp) +; CHECK-NEXT: calll _may_throw_or_crash ; CHECK: movl -28(%ebp), %[[next:[^ ,]*]] -; CHECK: movl %[[next]], %fs:0 +; CHECK-NEXT: movl %[[next]], %fs:0 ; CHECK: retl -; CHECK: [[catch:[^ ,]*]]: # %catch{{$}} +; CHECK-NEXT: [[catch:[^ ,]*]]: # %catch{{$}} + + ; CHECK: .section .xdata,"dr" ; CHECK-LABEL: L__ehtable$use_except_handler4_ssp: @@ -155,23 +179,26 @@ catch: ; CHECK-LABEL: _use_CxxFrameHandler3: ; CHECK: pushl %ebp -; CHECK: movl %esp, %ebp -; CHECK: subl ${{[0-9]+}}, %esp -; CHECK: movl %esp, -28(%ebp) -; CHECK: movl $-1, -16(%ebp) -; CHECK: leal -24(%ebp), %[[node:[^ ,]*]] -; CHECK: movl $___ehhandler$use_CxxFrameHandler3, -20(%ebp) -; CHECK: movl %fs:0, %[[next:[^ ,]*]] -; CHECK: movl %[[next]], -24(%ebp) -; CHECK: movl %[[node]], %fs:0 -; CHECK: movl $0, -16(%ebp) -; CHECK: calll _may_throw_or_crash +; CHECK-NEXT: movl %esp, %ebp +; CHECK-NEXT: pushl %ebx +; CHECK-NEXT: pushl %edi +; CHECK-NEXT: pushl %esi +; CHECK-NEXT: subl ${{[0-9]+}}, %esp +; CHECK-NEXT: movl %esp, -28(%ebp) +; CHECK-NEXT: movl $-1, -16(%ebp) +; CHECK-NEXT: leal -24(%ebp), %[[node:[^ ,]*]] +; CHECK-NEXT: movl $___ehhandler$use_CxxFrameHandler3, -20(%ebp) +; CHECK-NEXT: movl %fs:0, %[[next:[^ ,]*]] +; CHECK-NEXT: movl %[[next]], -24(%ebp) +; CHECK-NEXT: movl %[[node]], %fs:0 +; CHECK-NEXT: movl $0, -16(%ebp) +; CHECK-NEXT: calll _may_throw_or_crash ; CHECK: movl -24(%ebp), %[[next:[^ ,]*]] -; CHECK: movl %[[next]], %fs:0 +; CHECK-NEXT: movl %[[next]], %fs:0 ; CHECK: retl ; CHECK: .section .xdata,"dr" -; CHECK: .p2align 2 +; CHECK-NEXT: .p2align 2 ; CHECK-LABEL: L__ehtable$use_CxxFrameHandler3: ; CHECK-NEXT: .long 429065506 ; CHECK-NEXT: .long 2 @@ -185,8 +212,8 @@ catch: ; CHECK-LABEL: ___ehhandler$use_CxxFrameHandler3: ; CHECK: movl $L__ehtable$use_CxxFrameHandler3, %eax -; CHECK: jmp ___CxxFrameHandler3 # TAILCALL +; CHECK-NEXT: jmp ___CxxFrameHandler3 # TAILCALL ; CHECK: .safeseh __except_handler3 -; CHECK: .safeseh __except_handler4 -; CHECK: .safeseh ___ehhandler$use_CxxFrameHandler3 +; CHECK-NEXT: .safeseh __except_handler4 +; CHECK-NEXT: .safeseh ___ehhandler$use_CxxFrameHandler3 |