diff options
| author | Sanjay Patel <spatel@rotateright.com> | 2019-05-28 17:37:58 +0000 |
|---|---|---|
| committer | Sanjay Patel <spatel@rotateright.com> | 2019-05-28 17:37:58 +0000 |
| commit | f7980e727f2c08660b8bb23f80e131306faceca8 (patch) | |
| tree | 036ca699db595266e03e8c2effe3b2cc5b961d3f /llvm/test/CodeGen/X86/vec_uaddo.ll | |
| parent | 04a087ace786b81711a1eaf8f7f092bb31d6dd25 (diff) | |
| download | bcm5719-llvm-f7980e727f2c08660b8bb23f80e131306faceca8.tar.gz bcm5719-llvm-f7980e727f2c08660b8bb23f80e131306faceca8.zip | |
Revert "[x86] split 256-bit store of concatenated vectors"
This reverts commit d5a8637072f4c556b88156bd2f6237a2ead47d31.
Most likely suspect for this bot failure:
http://lab.llvm.org:8011/builders/clang-cmake-x86_64-avx2-linux/builds/9684
llvm-svn: 361850
Diffstat (limited to 'llvm/test/CodeGen/X86/vec_uaddo.ll')
| -rw-r--r-- | llvm/test/CodeGen/X86/vec_uaddo.ll | 24 |
1 files changed, 12 insertions, 12 deletions
diff --git a/llvm/test/CodeGen/X86/vec_uaddo.ll b/llvm/test/CodeGen/X86/vec_uaddo.ll index 4e9cd2efb74..41a0e258e3d 100644 --- a/llvm/test/CodeGen/X86/vec_uaddo.ll +++ b/llvm/test/CodeGen/X86/vec_uaddo.ll @@ -501,8 +501,8 @@ define <8 x i32> @uaddo_v8i32(<8 x i32> %a0, <8 x i32> %a1, <8 x i32>* %p2) noun ; AVX1-NEXT: vpcmpeqd %xmm0, %xmm1, %xmm0 ; AVX1-NEXT: vpxor %xmm4, %xmm0, %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm0 -; AVX1-NEXT: vmovdqa %xmm2, 16(%rdi) -; AVX1-NEXT: vmovdqa %xmm1, (%rdi) +; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 +; AVX1-NEXT: vmovaps %ymm1, (%rdi) ; AVX1-NEXT: retq ; ; AVX2-LABEL: uaddo_v8i32: @@ -633,19 +633,19 @@ define <16 x i32> @uaddo_v16i32(<16 x i32> %a0, <16 x i32> %a1, <16 x i32>* %p2) ; AVX1-NEXT: vpxor %xmm6, %xmm0, %xmm0 ; AVX1-NEXT: vpackssdw %xmm7, %xmm0, %xmm0 ; AVX1-NEXT: vpacksswb %xmm1, %xmm0, %xmm1 +; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm2, %ymm2 +; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3 ; AVX1-NEXT: vpmovsxbd %xmm1, %xmm0 -; AVX1-NEXT: vpshufd {{.*#+}} xmm6 = xmm1[1,1,2,3] -; AVX1-NEXT: vpmovsxbd %xmm6, %xmm6 -; AVX1-NEXT: vinsertf128 $1, %xmm6, %ymm0, %ymm0 -; AVX1-NEXT: vpshufd {{.*#+}} xmm6 = xmm1[2,3,0,1] -; AVX1-NEXT: vpmovsxbd %xmm6, %xmm6 +; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm1[1,1,2,3] +; AVX1-NEXT: vpmovsxbd %xmm4, %xmm4 +; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0 +; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm1[2,3,0,1] +; AVX1-NEXT: vpmovsxbd %xmm4, %xmm4 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[3,3,0,1] ; AVX1-NEXT: vpmovsxbd %xmm1, %xmm1 -; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm6, %ymm1 -; AVX1-NEXT: vmovdqa %xmm4, 48(%rdi) -; AVX1-NEXT: vmovdqa %xmm3, 32(%rdi) -; AVX1-NEXT: vmovdqa %xmm5, 16(%rdi) -; AVX1-NEXT: vmovdqa %xmm2, (%rdi) +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm4, %ymm1 +; AVX1-NEXT: vmovaps %ymm3, 32(%rdi) +; AVX1-NEXT: vmovaps %ymm2, (%rdi) ; AVX1-NEXT: retq ; ; AVX2-LABEL: uaddo_v16i32: |

