summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/X86/broadcast-elm-cross-splat-vec.ll
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test/CodeGen/X86/broadcast-elm-cross-splat-vec.ll')
-rw-r--r--llvm/test/CodeGen/X86/broadcast-elm-cross-splat-vec.ll64
1 files changed, 32 insertions, 32 deletions
diff --git a/llvm/test/CodeGen/X86/broadcast-elm-cross-splat-vec.ll b/llvm/test/CodeGen/X86/broadcast-elm-cross-splat-vec.ll
index d276f2eaca8..90f65597810 100644
--- a/llvm/test/CodeGen/X86/broadcast-elm-cross-splat-vec.ll
+++ b/llvm/test/CodeGen/X86/broadcast-elm-cross-splat-vec.ll
@@ -1641,7 +1641,7 @@ define <8 x float> @f8xf32_f64(<8 x float> %a) {
define <8 x float> @f8xf32_f128(<8 x float> %a) {
; AVX-LABEL: f8xf32_f128:
; AVX: # %bb.0:
-; AVX-NEXT: vbroadcastf128 {{.*#+}} ymm1 = [4.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00,4.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00]
+; AVX-NEXT: vbroadcastf128 {{.*#+}} ymm1 = [4,1,2,3,4,1,2,3]
; AVX-NEXT: # ymm1 = mem[0,1,0,1]
; AVX-NEXT: vaddps %ymm1, %ymm0, %ymm0
; AVX-NEXT: vdivps %ymm0, %ymm1, %ymm0
@@ -1649,7 +1649,7 @@ define <8 x float> @f8xf32_f128(<8 x float> %a) {
;
; ALL32-LABEL: f8xf32_f128:
; ALL32: # %bb.0:
-; ALL32-NEXT: vbroadcastf128 {{.*#+}} ymm1 = [4.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00,4.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00]
+; ALL32-NEXT: vbroadcastf128 {{.*#+}} ymm1 = [4,1,2,3,4,1,2,3]
; ALL32-NEXT: # ymm1 = mem[0,1,0,1]
; ALL32-NEXT: vaddps %ymm1, %ymm0, %ymm0
; ALL32-NEXT: vdivps %ymm0, %ymm1, %ymm0
@@ -1657,7 +1657,7 @@ define <8 x float> @f8xf32_f128(<8 x float> %a) {
;
; AVX-64-LABEL: f8xf32_f128:
; AVX-64: # %bb.0:
-; AVX-64-NEXT: vbroadcastf128 {{.*#+}} ymm1 = [4.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00,4.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00]
+; AVX-64-NEXT: vbroadcastf128 {{.*#+}} ymm1 = [4,1,2,3,4,1,2,3]
; AVX-64-NEXT: # ymm1 = mem[0,1,0,1]
; AVX-64-NEXT: vaddps %ymm1, %ymm0, %ymm0
; AVX-64-NEXT: vdivps %ymm0, %ymm1, %ymm0
@@ -1665,7 +1665,7 @@ define <8 x float> @f8xf32_f128(<8 x float> %a) {
;
; ALL64-LABEL: f8xf32_f128:
; ALL64: # %bb.0:
-; ALL64-NEXT: vbroadcastf128 {{.*#+}} ymm1 = [4.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00,4.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00]
+; ALL64-NEXT: vbroadcastf128 {{.*#+}} ymm1 = [4,1,2,3,4,1,2,3]
; ALL64-NEXT: # ymm1 = mem[0,1,0,1]
; ALL64-NEXT: vaddps %ymm1, %ymm0, %ymm0
; ALL64-NEXT: vdivps %ymm0, %ymm1, %ymm0
@@ -1735,7 +1735,7 @@ define <16 x float> @f16xf32_f64(<16 x float> %a) {
define <16 x float> @f16xf32_f128(<16 x float> %a) {
; AVX-LABEL: f16xf32_f128:
; AVX: # %bb.0:
-; AVX-NEXT: vbroadcastf128 {{.*#+}} ymm2 = [4.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00,4.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00]
+; AVX-NEXT: vbroadcastf128 {{.*#+}} ymm2 = [4,1,2,3,4,1,2,3]
; AVX-NEXT: # ymm2 = mem[0,1,0,1]
; AVX-NEXT: vaddps %ymm2, %ymm1, %ymm1
; AVX-NEXT: vaddps %ymm2, %ymm0, %ymm0
@@ -1745,7 +1745,7 @@ define <16 x float> @f16xf32_f128(<16 x float> %a) {
;
; AVX2-LABEL: f16xf32_f128:
; AVX2: # %bb.0:
-; AVX2-NEXT: vbroadcastf128 {{.*#+}} ymm2 = [4.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00,4.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00]
+; AVX2-NEXT: vbroadcastf128 {{.*#+}} ymm2 = [4,1,2,3,4,1,2,3]
; AVX2-NEXT: # ymm2 = mem[0,1,0,1]
; AVX2-NEXT: vaddps %ymm2, %ymm1, %ymm1
; AVX2-NEXT: vaddps %ymm2, %ymm0, %ymm0
@@ -1755,7 +1755,7 @@ define <16 x float> @f16xf32_f128(<16 x float> %a) {
;
; AVX512-LABEL: f16xf32_f128:
; AVX512: # %bb.0:
-; AVX512-NEXT: vbroadcastf32x4 {{.*#+}} zmm1 = [4.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00,4.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00,4.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00,4.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00]
+; AVX512-NEXT: vbroadcastf32x4 {{.*#+}} zmm1 = [4,1,2,3,4,1,2,3,4,1,2,3,4,1,2,3]
; AVX512-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512-NEXT: vaddps %zmm1, %zmm0, %zmm0
; AVX512-NEXT: vdivps %zmm0, %zmm1, %zmm0
@@ -1763,7 +1763,7 @@ define <16 x float> @f16xf32_f128(<16 x float> %a) {
;
; AVX-64-LABEL: f16xf32_f128:
; AVX-64: # %bb.0:
-; AVX-64-NEXT: vbroadcastf128 {{.*#+}} ymm2 = [4.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00,4.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00]
+; AVX-64-NEXT: vbroadcastf128 {{.*#+}} ymm2 = [4,1,2,3,4,1,2,3]
; AVX-64-NEXT: # ymm2 = mem[0,1,0,1]
; AVX-64-NEXT: vaddps %ymm2, %ymm1, %ymm1
; AVX-64-NEXT: vaddps %ymm2, %ymm0, %ymm0
@@ -1773,7 +1773,7 @@ define <16 x float> @f16xf32_f128(<16 x float> %a) {
;
; AVX2-64-LABEL: f16xf32_f128:
; AVX2-64: # %bb.0:
-; AVX2-64-NEXT: vbroadcastf128 {{.*#+}} ymm2 = [4.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00,4.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00]
+; AVX2-64-NEXT: vbroadcastf128 {{.*#+}} ymm2 = [4,1,2,3,4,1,2,3]
; AVX2-64-NEXT: # ymm2 = mem[0,1,0,1]
; AVX2-64-NEXT: vaddps %ymm2, %ymm1, %ymm1
; AVX2-64-NEXT: vaddps %ymm2, %ymm0, %ymm0
@@ -1783,7 +1783,7 @@ define <16 x float> @f16xf32_f128(<16 x float> %a) {
;
; AVX512F-64-LABEL: f16xf32_f128:
; AVX512F-64: # %bb.0:
-; AVX512F-64-NEXT: vbroadcastf32x4 {{.*#+}} zmm1 = [4.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00,4.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00,4.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00,4.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00]
+; AVX512F-64-NEXT: vbroadcastf32x4 {{.*#+}} zmm1 = [4,1,2,3,4,1,2,3,4,1,2,3,4,1,2,3]
; AVX512F-64-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512F-64-NEXT: vaddps %zmm1, %zmm0, %zmm0
; AVX512F-64-NEXT: vdivps %zmm0, %zmm1, %zmm0
@@ -1797,7 +1797,7 @@ define <16 x float> @f16xf32_f128(<16 x float> %a) {
define <16 x float> @f16xf32_f256(<16 x float> %a) {
; AVX-LABEL: f16xf32_f256:
; AVX: # %bb.0:
-; AVX-NEXT: vmovaps {{.*#+}} ymm2 = [8.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00,4.000000e+00,5.000000e+00,6.000000e+00,7.000000e+00]
+; AVX-NEXT: vmovaps {{.*#+}} ymm2 = [8,1,2,3,4,5,6,7]
; AVX-NEXT: vaddps %ymm2, %ymm1, %ymm1
; AVX-NEXT: vaddps %ymm2, %ymm0, %ymm0
; AVX-NEXT: vdivps %ymm0, %ymm2, %ymm0
@@ -1806,7 +1806,7 @@ define <16 x float> @f16xf32_f256(<16 x float> %a) {
;
; AVX2-LABEL: f16xf32_f256:
; AVX2: # %bb.0:
-; AVX2-NEXT: vmovaps {{.*#+}} ymm2 = [8.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00,4.000000e+00,5.000000e+00,6.000000e+00,7.000000e+00]
+; AVX2-NEXT: vmovaps {{.*#+}} ymm2 = [8,1,2,3,4,5,6,7]
; AVX2-NEXT: vaddps %ymm2, %ymm1, %ymm1
; AVX2-NEXT: vaddps %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vdivps %ymm0, %ymm2, %ymm0
@@ -1815,7 +1815,7 @@ define <16 x float> @f16xf32_f256(<16 x float> %a) {
;
; AVX512-LABEL: f16xf32_f256:
; AVX512: # %bb.0:
-; AVX512-NEXT: vbroadcastf64x4 {{.*#+}} zmm1 = [8.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00,4.000000e+00,5.000000e+00,6.000000e+00,7.000000e+00,8.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00,4.000000e+00,5.000000e+00,6.000000e+00,7.000000e+00]
+; AVX512-NEXT: vbroadcastf64x4 {{.*#+}} zmm1 = [8,1,2,3,4,5,6,7,8,1,2,3,4,5,6,7]
; AVX512-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3]
; AVX512-NEXT: vaddps %zmm1, %zmm0, %zmm0
; AVX512-NEXT: vdivps %zmm0, %zmm1, %zmm0
@@ -1823,7 +1823,7 @@ define <16 x float> @f16xf32_f256(<16 x float> %a) {
;
; AVX-64-LABEL: f16xf32_f256:
; AVX-64: # %bb.0:
-; AVX-64-NEXT: vmovaps {{.*#+}} ymm2 = [8.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00,4.000000e+00,5.000000e+00,6.000000e+00,7.000000e+00]
+; AVX-64-NEXT: vmovaps {{.*#+}} ymm2 = [8,1,2,3,4,5,6,7]
; AVX-64-NEXT: vaddps %ymm2, %ymm1, %ymm1
; AVX-64-NEXT: vaddps %ymm2, %ymm0, %ymm0
; AVX-64-NEXT: vdivps %ymm0, %ymm2, %ymm0
@@ -1832,7 +1832,7 @@ define <16 x float> @f16xf32_f256(<16 x float> %a) {
;
; AVX2-64-LABEL: f16xf32_f256:
; AVX2-64: # %bb.0:
-; AVX2-64-NEXT: vmovaps {{.*#+}} ymm2 = [8.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00,4.000000e+00,5.000000e+00,6.000000e+00,7.000000e+00]
+; AVX2-64-NEXT: vmovaps {{.*#+}} ymm2 = [8,1,2,3,4,5,6,7]
; AVX2-64-NEXT: vaddps %ymm2, %ymm1, %ymm1
; AVX2-64-NEXT: vaddps %ymm2, %ymm0, %ymm0
; AVX2-64-NEXT: vdivps %ymm0, %ymm2, %ymm0
@@ -1841,7 +1841,7 @@ define <16 x float> @f16xf32_f256(<16 x float> %a) {
;
; AVX512F-64-LABEL: f16xf32_f256:
; AVX512F-64: # %bb.0:
-; AVX512F-64-NEXT: vbroadcastf64x4 {{.*#+}} zmm1 = [8.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00,4.000000e+00,5.000000e+00,6.000000e+00,7.000000e+00,8.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00,4.000000e+00,5.000000e+00,6.000000e+00,7.000000e+00]
+; AVX512F-64-NEXT: vbroadcastf64x4 {{.*#+}} zmm1 = [8,1,2,3,4,5,6,7,8,1,2,3,4,5,6,7]
; AVX512F-64-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3]
; AVX512F-64-NEXT: vaddps %zmm1, %zmm0, %zmm0
; AVX512F-64-NEXT: vdivps %zmm0, %zmm1, %zmm0
@@ -1855,7 +1855,7 @@ define <16 x float> @f16xf32_f256(<16 x float> %a) {
define <4 x double> @f4xf64_f128(<4 x double> %a) {
; AVX-LABEL: f4xf64_f128:
; AVX: # %bb.0:
-; AVX-NEXT: vbroadcastf128 {{.*#+}} ymm1 = [2.000000e+00,1.000000e+00,2.000000e+00,1.000000e+00]
+; AVX-NEXT: vbroadcastf128 {{.*#+}} ymm1 = [2,1,2,1]
; AVX-NEXT: # ymm1 = mem[0,1,0,1]
; AVX-NEXT: vaddpd %ymm1, %ymm0, %ymm0
; AVX-NEXT: vdivpd %ymm0, %ymm1, %ymm0
@@ -1863,7 +1863,7 @@ define <4 x double> @f4xf64_f128(<4 x double> %a) {
;
; ALL32-LABEL: f4xf64_f128:
; ALL32: # %bb.0:
-; ALL32-NEXT: vbroadcastf128 {{.*#+}} ymm1 = [2.000000e+00,1.000000e+00,2.000000e+00,1.000000e+00]
+; ALL32-NEXT: vbroadcastf128 {{.*#+}} ymm1 = [2,1,2,1]
; ALL32-NEXT: # ymm1 = mem[0,1,0,1]
; ALL32-NEXT: vaddpd %ymm1, %ymm0, %ymm0
; ALL32-NEXT: vdivpd %ymm0, %ymm1, %ymm0
@@ -1871,7 +1871,7 @@ define <4 x double> @f4xf64_f128(<4 x double> %a) {
;
; AVX-64-LABEL: f4xf64_f128:
; AVX-64: # %bb.0:
-; AVX-64-NEXT: vbroadcastf128 {{.*#+}} ymm1 = [2.000000e+00,1.000000e+00,2.000000e+00,1.000000e+00]
+; AVX-64-NEXT: vbroadcastf128 {{.*#+}} ymm1 = [2,1,2,1]
; AVX-64-NEXT: # ymm1 = mem[0,1,0,1]
; AVX-64-NEXT: vaddpd %ymm1, %ymm0, %ymm0
; AVX-64-NEXT: vdivpd %ymm0, %ymm1, %ymm0
@@ -1879,7 +1879,7 @@ define <4 x double> @f4xf64_f128(<4 x double> %a) {
;
; ALL64-LABEL: f4xf64_f128:
; ALL64: # %bb.0:
-; ALL64-NEXT: vbroadcastf128 {{.*#+}} ymm1 = [2.000000e+00,1.000000e+00,2.000000e+00,1.000000e+00]
+; ALL64-NEXT: vbroadcastf128 {{.*#+}} ymm1 = [2,1,2,1]
; ALL64-NEXT: # ymm1 = mem[0,1,0,1]
; ALL64-NEXT: vaddpd %ymm1, %ymm0, %ymm0
; ALL64-NEXT: vdivpd %ymm0, %ymm1, %ymm0
@@ -1893,7 +1893,7 @@ define <4 x double> @f4xf64_f128(<4 x double> %a) {
define <8 x double> @f8xf64_f128(<8 x double> %a) {
; AVX-LABEL: f8xf64_f128:
; AVX: # %bb.0:
-; AVX-NEXT: vbroadcastf128 {{.*#+}} ymm2 = [2.000000e+00,1.000000e+00,2.000000e+00,1.000000e+00]
+; AVX-NEXT: vbroadcastf128 {{.*#+}} ymm2 = [2,1,2,1]
; AVX-NEXT: # ymm2 = mem[0,1,0,1]
; AVX-NEXT: vaddpd %ymm2, %ymm1, %ymm1
; AVX-NEXT: vaddpd %ymm2, %ymm0, %ymm0
@@ -1903,7 +1903,7 @@ define <8 x double> @f8xf64_f128(<8 x double> %a) {
;
; AVX2-LABEL: f8xf64_f128:
; AVX2: # %bb.0:
-; AVX2-NEXT: vbroadcastf128 {{.*#+}} ymm2 = [2.000000e+00,1.000000e+00,2.000000e+00,1.000000e+00]
+; AVX2-NEXT: vbroadcastf128 {{.*#+}} ymm2 = [2,1,2,1]
; AVX2-NEXT: # ymm2 = mem[0,1,0,1]
; AVX2-NEXT: vaddpd %ymm2, %ymm1, %ymm1
; AVX2-NEXT: vaddpd %ymm2, %ymm0, %ymm0
@@ -1913,7 +1913,7 @@ define <8 x double> @f8xf64_f128(<8 x double> %a) {
;
; AVX512-LABEL: f8xf64_f128:
; AVX512: # %bb.0:
-; AVX512-NEXT: vbroadcastf32x4 {{.*#+}} zmm1 = [2.000000e+00,1.000000e+00,2.000000e+00,1.000000e+00,2.000000e+00,1.000000e+00,2.000000e+00,1.000000e+00]
+; AVX512-NEXT: vbroadcastf32x4 {{.*#+}} zmm1 = [2,1,2,1,2,1,2,1]
; AVX512-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512-NEXT: vaddpd %zmm1, %zmm0, %zmm0
; AVX512-NEXT: vdivpd %zmm0, %zmm1, %zmm0
@@ -1921,7 +1921,7 @@ define <8 x double> @f8xf64_f128(<8 x double> %a) {
;
; AVX-64-LABEL: f8xf64_f128:
; AVX-64: # %bb.0:
-; AVX-64-NEXT: vbroadcastf128 {{.*#+}} ymm2 = [2.000000e+00,1.000000e+00,2.000000e+00,1.000000e+00]
+; AVX-64-NEXT: vbroadcastf128 {{.*#+}} ymm2 = [2,1,2,1]
; AVX-64-NEXT: # ymm2 = mem[0,1,0,1]
; AVX-64-NEXT: vaddpd %ymm2, %ymm1, %ymm1
; AVX-64-NEXT: vaddpd %ymm2, %ymm0, %ymm0
@@ -1931,7 +1931,7 @@ define <8 x double> @f8xf64_f128(<8 x double> %a) {
;
; AVX2-64-LABEL: f8xf64_f128:
; AVX2-64: # %bb.0:
-; AVX2-64-NEXT: vbroadcastf128 {{.*#+}} ymm2 = [2.000000e+00,1.000000e+00,2.000000e+00,1.000000e+00]
+; AVX2-64-NEXT: vbroadcastf128 {{.*#+}} ymm2 = [2,1,2,1]
; AVX2-64-NEXT: # ymm2 = mem[0,1,0,1]
; AVX2-64-NEXT: vaddpd %ymm2, %ymm1, %ymm1
; AVX2-64-NEXT: vaddpd %ymm2, %ymm0, %ymm0
@@ -1941,7 +1941,7 @@ define <8 x double> @f8xf64_f128(<8 x double> %a) {
;
; AVX512F-64-LABEL: f8xf64_f128:
; AVX512F-64: # %bb.0:
-; AVX512F-64-NEXT: vbroadcastf32x4 {{.*#+}} zmm1 = [2.000000e+00,1.000000e+00,2.000000e+00,1.000000e+00,2.000000e+00,1.000000e+00,2.000000e+00,1.000000e+00]
+; AVX512F-64-NEXT: vbroadcastf32x4 {{.*#+}} zmm1 = [2,1,2,1,2,1,2,1]
; AVX512F-64-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512F-64-NEXT: vaddpd %zmm1, %zmm0, %zmm0
; AVX512F-64-NEXT: vdivpd %zmm0, %zmm1, %zmm0
@@ -1962,7 +1962,7 @@ define <8 x double> @f8xf64_f128(<8 x double> %a) {
define <8 x double> @f8xf64_f256(<8 x double> %a) {
; AVX-LABEL: f8xf64_f256:
; AVX: # %bb.0:
-; AVX-NEXT: vmovapd {{.*#+}} ymm2 = [4.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00]
+; AVX-NEXT: vmovapd {{.*#+}} ymm2 = [4,1,2,3]
; AVX-NEXT: vaddpd %ymm2, %ymm1, %ymm1
; AVX-NEXT: vaddpd %ymm2, %ymm0, %ymm0
; AVX-NEXT: vdivpd %ymm0, %ymm2, %ymm0
@@ -1971,7 +1971,7 @@ define <8 x double> @f8xf64_f256(<8 x double> %a) {
;
; AVX2-LABEL: f8xf64_f256:
; AVX2: # %bb.0:
-; AVX2-NEXT: vmovapd {{.*#+}} ymm2 = [4.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00]
+; AVX2-NEXT: vmovapd {{.*#+}} ymm2 = [4,1,2,3]
; AVX2-NEXT: vaddpd %ymm2, %ymm1, %ymm1
; AVX2-NEXT: vaddpd %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vdivpd %ymm0, %ymm2, %ymm0
@@ -1980,7 +1980,7 @@ define <8 x double> @f8xf64_f256(<8 x double> %a) {
;
; AVX512-LABEL: f8xf64_f256:
; AVX512: # %bb.0:
-; AVX512-NEXT: vbroadcastf64x4 {{.*#+}} zmm1 = [4.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00,4.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00]
+; AVX512-NEXT: vbroadcastf64x4 {{.*#+}} zmm1 = [4,1,2,3,4,1,2,3]
; AVX512-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3]
; AVX512-NEXT: vaddpd %zmm1, %zmm0, %zmm0
; AVX512-NEXT: vdivpd %zmm0, %zmm1, %zmm0
@@ -1988,7 +1988,7 @@ define <8 x double> @f8xf64_f256(<8 x double> %a) {
;
; AVX-64-LABEL: f8xf64_f256:
; AVX-64: # %bb.0:
-; AVX-64-NEXT: vmovapd {{.*#+}} ymm2 = [4.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00]
+; AVX-64-NEXT: vmovapd {{.*#+}} ymm2 = [4,1,2,3]
; AVX-64-NEXT: vaddpd %ymm2, %ymm1, %ymm1
; AVX-64-NEXT: vaddpd %ymm2, %ymm0, %ymm0
; AVX-64-NEXT: vdivpd %ymm0, %ymm2, %ymm0
@@ -1997,7 +1997,7 @@ define <8 x double> @f8xf64_f256(<8 x double> %a) {
;
; AVX2-64-LABEL: f8xf64_f256:
; AVX2-64: # %bb.0:
-; AVX2-64-NEXT: vmovapd {{.*#+}} ymm2 = [4.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00]
+; AVX2-64-NEXT: vmovapd {{.*#+}} ymm2 = [4,1,2,3]
; AVX2-64-NEXT: vaddpd %ymm2, %ymm1, %ymm1
; AVX2-64-NEXT: vaddpd %ymm2, %ymm0, %ymm0
; AVX2-64-NEXT: vdivpd %ymm0, %ymm2, %ymm0
@@ -2006,7 +2006,7 @@ define <8 x double> @f8xf64_f256(<8 x double> %a) {
;
; AVX512F-64-LABEL: f8xf64_f256:
; AVX512F-64: # %bb.0:
-; AVX512F-64-NEXT: vbroadcastf64x4 {{.*#+}} zmm1 = [4.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00,4.000000e+00,1.000000e+00,2.000000e+00,3.000000e+00]
+; AVX512F-64-NEXT: vbroadcastf64x4 {{.*#+}} zmm1 = [4,1,2,3,4,1,2,3]
; AVX512F-64-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3]
; AVX512F-64-NEXT: vaddpd %zmm1, %zmm0, %zmm0
; AVX512F-64-NEXT: vdivpd %zmm0, %zmm1, %zmm0
OpenPOWER on IntegriCloud