summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/X86/subvector-broadcast.ll
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test/CodeGen/X86/subvector-broadcast.ll')
-rw-r--r--llvm/test/CodeGen/X86/subvector-broadcast.ll68
1 files changed, 34 insertions, 34 deletions
diff --git a/llvm/test/CodeGen/X86/subvector-broadcast.ll b/llvm/test/CodeGen/X86/subvector-broadcast.ll
index 7ecfac5151f..3ce584eff2a 100644
--- a/llvm/test/CodeGen/X86/subvector-broadcast.ll
+++ b/llvm/test/CodeGen/X86/subvector-broadcast.ll
@@ -835,24 +835,24 @@ define <16 x i32> @test_broadcast_4i32_16i32_chain(<4 x i32>* %p0, <4 x float>*
define void @fallback_broadcast_v4i64_to_v8i64(<4 x i64> %a, <8 x i64> %b) {
; X32-AVX1-LABEL: fallback_broadcast_v4i64_to_v8i64:
; X32-AVX1: # %bb.0: # %entry
-; X32-AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [1,0,2,0]
-; X32-AVX1-NEXT: vpaddq %xmm3, %xmm0, %xmm4
-; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
-; X32-AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [3,0,4,0]
+; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; X32-AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [3,0,4,0]
+; X32-AVX1-NEXT: vpaddq %xmm4, %xmm3, %xmm3
+; X32-AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [1,0,2,0]
; X32-AVX1-NEXT: vpaddq %xmm5, %xmm0, %xmm0
-; X32-AVX1-NEXT: vmovaps {{.*#+}} ymm6 = [1,0,2,0,3,0,4,0]
-; X32-AVX1-NEXT: vextractf128 $1, %ymm2, %xmm7
-; X32-AVX1-NEXT: vpaddq %xmm5, %xmm7, %xmm7
-; X32-AVX1-NEXT: vpaddq %xmm3, %xmm2, %xmm2
-; X32-AVX1-NEXT: vinsertf128 $1, %xmm7, %ymm2, %ymm2
-; X32-AVX1-NEXT: vextractf128 $1, %ymm1, %xmm7
-; X32-AVX1-NEXT: vpaddq %xmm5, %xmm7, %xmm5
-; X32-AVX1-NEXT: vpaddq %xmm3, %xmm1, %xmm1
-; X32-AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm1, %ymm1
-; X32-AVX1-NEXT: vandps %ymm6, %ymm1, %ymm1
-; X32-AVX1-NEXT: vandps %ymm6, %ymm2, %ymm2
-; X32-AVX1-NEXT: vmovdqu %xmm0, ga4+16
-; X32-AVX1-NEXT: vmovdqu %xmm4, ga4
+; X32-AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm0
+; X32-AVX1-NEXT: vmovaps {{.*#+}} ymm3 = [1,0,2,0,3,0,4,0]
+; X32-AVX1-NEXT: vextractf128 $1, %ymm2, %xmm6
+; X32-AVX1-NEXT: vpaddq %xmm4, %xmm6, %xmm6
+; X32-AVX1-NEXT: vpaddq %xmm5, %xmm2, %xmm2
+; X32-AVX1-NEXT: vinsertf128 $1, %xmm6, %ymm2, %ymm2
+; X32-AVX1-NEXT: vextractf128 $1, %ymm1, %xmm6
+; X32-AVX1-NEXT: vpaddq %xmm4, %xmm6, %xmm4
+; X32-AVX1-NEXT: vpaddq %xmm5, %xmm1, %xmm1
+; X32-AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm1, %ymm1
+; X32-AVX1-NEXT: vandps %ymm3, %ymm1, %ymm1
+; X32-AVX1-NEXT: vandps %ymm3, %ymm2, %ymm2
+; X32-AVX1-NEXT: vmovups %ymm0, ga4
; X32-AVX1-NEXT: vmovups %ymm2, gb4+32
; X32-AVX1-NEXT: vmovups %ymm1, gb4
; X32-AVX1-NEXT: vzeroupper
@@ -886,24 +886,24 @@ define void @fallback_broadcast_v4i64_to_v8i64(<4 x i64> %a, <8 x i64> %b) {
;
; X64-AVX1-LABEL: fallback_broadcast_v4i64_to_v8i64:
; X64-AVX1: # %bb.0: # %entry
-; X64-AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [1,2]
-; X64-AVX1-NEXT: vpaddq %xmm3, %xmm0, %xmm4
-; X64-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
-; X64-AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [3,4]
+; X64-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; X64-AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [3,4]
+; X64-AVX1-NEXT: vpaddq %xmm4, %xmm3, %xmm3
+; X64-AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [1,2]
; X64-AVX1-NEXT: vpaddq %xmm5, %xmm0, %xmm0
-; X64-AVX1-NEXT: vmovaps {{.*#+}} ymm6 = [1,2,3,4]
-; X64-AVX1-NEXT: vextractf128 $1, %ymm2, %xmm7
-; X64-AVX1-NEXT: vpaddq %xmm5, %xmm7, %xmm7
-; X64-AVX1-NEXT: vpaddq %xmm3, %xmm2, %xmm2
-; X64-AVX1-NEXT: vinsertf128 $1, %xmm7, %ymm2, %ymm2
-; X64-AVX1-NEXT: vextractf128 $1, %ymm1, %xmm7
-; X64-AVX1-NEXT: vpaddq %xmm5, %xmm7, %xmm5
-; X64-AVX1-NEXT: vpaddq %xmm3, %xmm1, %xmm1
-; X64-AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm1, %ymm1
-; X64-AVX1-NEXT: vandps %ymm6, %ymm1, %ymm1
-; X64-AVX1-NEXT: vandps %ymm6, %ymm2, %ymm2
-; X64-AVX1-NEXT: vmovdqu %xmm0, ga4+{{.*}}(%rip)
-; X64-AVX1-NEXT: vmovdqu %xmm4, {{.*}}(%rip)
+; X64-AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm0
+; X64-AVX1-NEXT: vmovaps {{.*#+}} ymm3 = [1,2,3,4]
+; X64-AVX1-NEXT: vextractf128 $1, %ymm2, %xmm6
+; X64-AVX1-NEXT: vpaddq %xmm4, %xmm6, %xmm6
+; X64-AVX1-NEXT: vpaddq %xmm5, %xmm2, %xmm2
+; X64-AVX1-NEXT: vinsertf128 $1, %xmm6, %ymm2, %ymm2
+; X64-AVX1-NEXT: vextractf128 $1, %ymm1, %xmm6
+; X64-AVX1-NEXT: vpaddq %xmm4, %xmm6, %xmm4
+; X64-AVX1-NEXT: vpaddq %xmm5, %xmm1, %xmm1
+; X64-AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm1, %ymm1
+; X64-AVX1-NEXT: vandps %ymm3, %ymm1, %ymm1
+; X64-AVX1-NEXT: vandps %ymm3, %ymm2, %ymm2
+; X64-AVX1-NEXT: vmovups %ymm0, {{.*}}(%rip)
; X64-AVX1-NEXT: vmovups %ymm2, gb4+{{.*}}(%rip)
; X64-AVX1-NEXT: vmovups %ymm1, {{.*}}(%rip)
; X64-AVX1-NEXT: vzeroupper
OpenPOWER on IntegriCloud