diff options
Diffstat (limited to 'llvm/test/CodeGen/X86/avx-schedule.ll')
| -rw-r--r-- | llvm/test/CodeGen/X86/avx-schedule.ll | 272 |
1 files changed, 136 insertions, 136 deletions
diff --git a/llvm/test/CodeGen/X86/avx-schedule.ll b/llvm/test/CodeGen/X86/avx-schedule.ll index 3f0035dd9fb..dd71956da63 100644 --- a/llvm/test/CodeGen/X86/avx-schedule.ll +++ b/llvm/test/CodeGen/X86/avx-schedule.ll @@ -42,7 +42,7 @@ define <4 x double> @test_addpd(<4 x double> %a0, <4 x double> %a1, <4 x double> ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00] ; ZNVER1-NEXT: vaddpd (%rdi), %ymm0, %ymm0 # sched: [10:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = fadd <4 x double> %a0, %a1 %2 = load <4 x double>, <4 x double> *%a2, align 32 %3 = fadd <4 x double> %1, %2 @@ -84,7 +84,7 @@ define <8 x float> @test_addps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00] ; ZNVER1-NEXT: vaddps (%rdi), %ymm0, %ymm0 # sched: [10:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = fadd <8 x float> %a0, %a1 %2 = load <8 x float>, <8 x float> *%a2, align 32 %3 = fadd <8 x float> %1, %2 @@ -126,7 +126,7 @@ define <4 x double> @test_addsubpd(<4 x double> %a0, <4 x double> %a1, <4 x doub ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vaddsubpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00] ; ZNVER1-NEXT: vaddsubpd (%rdi), %ymm0, %ymm0 # sched: [10:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <4 x double> @llvm.x86.avx.addsub.pd.256(<4 x double> %a0, <4 x double> %a1) %2 = load <4 x double>, <4 x double> *%a2, align 32 %3 = call <4 x double> @llvm.x86.avx.addsub.pd.256(<4 x double> %1, <4 x double> %2) @@ -169,7 +169,7 @@ define <8 x float> @test_addsubps(<8 x float> %a0, <8 x float> %a1, <8 x float> ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vaddsubps %ymm1, %ymm0, %ymm0 # sched: [3:1.00] ; ZNVER1-NEXT: vaddsubps (%rdi), %ymm0, %ymm0 # sched: [10:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <8 x float> @llvm.x86.avx.addsub.ps.256(<8 x float> %a0, <8 x float> %a1) %2 = load <8 x float>, <8 x float> *%a2, align 32 %3 = call <8 x float> @llvm.x86.avx.addsub.ps.256(<8 x float> %1, <8 x float> %2) @@ -218,7 +218,7 @@ define <4 x double> @test_andnotpd(<4 x double> %a0, <4 x double> %a1, <4 x doub ; ZNVER1-NEXT: vandnpd %ymm1, %ymm0, %ymm0 # sched: [1:0.25] ; ZNVER1-NEXT: vandnpd (%rdi), %ymm0, %ymm0 # sched: [8:0.50] ; ZNVER1-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = bitcast <4 x double> %a0 to <4 x i64> %2 = bitcast <4 x double> %a1 to <4 x i64> %3 = xor <4 x i64> %1, <i64 -1, i64 -1, i64 -1, i64 -1> @@ -273,7 +273,7 @@ define <8 x float> @test_andnotps(<8 x float> %a0, <8 x float> %a1, <8 x float> ; ZNVER1-NEXT: vandnps %ymm1, %ymm0, %ymm0 # sched: [1:0.25] ; ZNVER1-NEXT: vandnps (%rdi), %ymm0, %ymm0 # sched: [8:0.50] ; ZNVER1-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = bitcast <8 x float> %a0 to <4 x i64> %2 = bitcast <8 x float> %a1 to <4 x i64> %3 = xor <4 x i64> %1, <i64 -1, i64 -1, i64 -1, i64 -1> @@ -328,7 +328,7 @@ define <4 x double> @test_andpd(<4 x double> %a0, <4 x double> %a1, <4 x double> ; ZNVER1-NEXT: vandpd %ymm1, %ymm0, %ymm0 # sched: [1:0.25] ; ZNVER1-NEXT: vandpd (%rdi), %ymm0, %ymm0 # sched: [8:0.50] ; ZNVER1-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = bitcast <4 x double> %a0 to <4 x i64> %2 = bitcast <4 x double> %a1 to <4 x i64> %3 = and <4 x i64> %1, %2 @@ -381,7 +381,7 @@ define <8 x float> @test_andps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a ; ZNVER1-NEXT: vandps %ymm1, %ymm0, %ymm0 # sched: [1:0.25] ; ZNVER1-NEXT: vandps (%rdi), %ymm0, %ymm0 # sched: [8:0.50] ; ZNVER1-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = bitcast <8 x float> %a0 to <4 x i64> %2 = bitcast <8 x float> %a1 to <4 x i64> %3 = and <4 x i64> %1, %2 @@ -434,7 +434,7 @@ define <4 x double> @test_blendpd(<4 x double> %a0, <4 x double> %a1, <4 x doubl ; ZNVER1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2],ymm0[3] sched: [1:0.50] ; ZNVER1-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00] ; ZNVER1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],mem[1,2],ymm0[3] sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = shufflevector <4 x double> %a0, <4 x double> %a1, <4 x i32> <i32 0, i32 5, i32 6, i32 3> %2 = load <4 x double>, <4 x double> *%a2, align 32 %3 = fadd <4 x double> %a1, %1 @@ -477,7 +477,7 @@ define <8 x float> @test_blendps(<8 x float> %a0, <8 x float> %a1, <8 x float> * ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2],ymm0[3,4,5,6,7] sched: [1:0.50] ; ZNVER1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2],ymm0[3],mem[4,5,6],ymm0[7] sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = shufflevector <8 x float> %a0, <8 x float> %a1, <8 x i32> <i32 0, i32 9, i32 10, i32 3, i32 4, i32 5, i32 6, i32 7> %2 = load <8 x float>, <8 x float> *%a2, align 32 %3 = shufflevector <8 x float> %1, <8 x float> %2, <8 x i32> <i32 0, i32 1, i32 10, i32 3, i32 12, i32 13, i32 14, i32 7> @@ -519,7 +519,7 @@ define <4 x double> @test_blendvpd(<4 x double> %a0, <4 x double> %a1, <4 x doub ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vblendvpd %ymm2, %ymm1, %ymm0, %ymm0 # sched: [1:0.50] ; ZNVER1-NEXT: vblendvpd %ymm2, (%rdi), %ymm0, %ymm0 # sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <4 x double> @llvm.x86.avx.blendv.pd.256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2) %2 = load <4 x double>, <4 x double> *%a3, align 32 %3 = call <4 x double> @llvm.x86.avx.blendv.pd.256(<4 x double> %1, <4 x double> %2, <4 x double> %a2) @@ -562,7 +562,7 @@ define <8 x float> @test_blendvps(<8 x float> %a0, <8 x float> %a1, <8 x float> ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vblendvps %ymm2, %ymm1, %ymm0, %ymm0 # sched: [1:0.50] ; ZNVER1-NEXT: vblendvps %ymm2, (%rdi), %ymm0, %ymm0 # sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <8 x float> @llvm.x86.avx.blendv.ps.256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2) %2 = load <8 x float>, <8 x float> *%a3, align 32 %3 = call <8 x float> @llvm.x86.avx.blendv.ps.256(<8 x float> %1, <8 x float> %2, <8 x float> %a2) @@ -599,7 +599,7 @@ define <8 x float> @test_broadcastf128(<4 x float> *%a0) { ; ZNVER1-LABEL: test_broadcastf128: ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1] sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = load <4 x float>, <4 x float> *%a0, align 32 %2 = shufflevector <4 x float> %1, <4 x float> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3> ret <8 x float> %2 @@ -634,7 +634,7 @@ define <4 x double> @test_broadcastsd_ymm(double *%a0) { ; ZNVER1-LABEL: test_broadcastsd_ymm: ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vbroadcastsd (%rdi), %ymm0 # sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = load double, double *%a0, align 8 %2 = insertelement <4 x double> undef, double %1, i32 0 %3 = shufflevector <4 x double> %2, <4 x double> undef, <4 x i32> zeroinitializer @@ -670,7 +670,7 @@ define <4 x float> @test_broadcastss(float *%a0) { ; ZNVER1-LABEL: test_broadcastss: ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vbroadcastss (%rdi), %xmm0 # sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = load float, float *%a0, align 4 %2 = insertelement <4 x float> undef, float %1, i32 0 %3 = shufflevector <4 x float> %2, <4 x float> undef, <4 x i32> zeroinitializer @@ -706,7 +706,7 @@ define <8 x float> @test_broadcastss_ymm(float *%a0) { ; ZNVER1-LABEL: test_broadcastss_ymm: ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vbroadcastss (%rdi), %ymm0 # sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = load float, float *%a0, align 4 %2 = insertelement <8 x float> undef, float %1, i32 0 %3 = shufflevector <8 x float> %2, <8 x float> undef, <8 x i32> zeroinitializer @@ -754,7 +754,7 @@ define <4 x double> @test_cmppd(<4 x double> %a0, <4 x double> %a1, <4 x double> ; ZNVER1-NEXT: vcmpeqpd %ymm1, %ymm0, %ymm1 # sched: [3:1.00] ; ZNVER1-NEXT: vcmpeqpd (%rdi), %ymm0, %ymm0 # sched: [10:1.00] ; ZNVER1-NEXT: vorpd %ymm0, %ymm1, %ymm0 # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = fcmp oeq <4 x double> %a0, %a1 %2 = load <4 x double>, <4 x double> *%a2, align 32 %3 = fcmp oeq <4 x double> %a0, %2 @@ -806,7 +806,7 @@ define <8 x float> @test_cmpps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a ; ZNVER1-NEXT: vcmpeqps %ymm1, %ymm0, %ymm1 # sched: [3:1.00] ; ZNVER1-NEXT: vcmpeqps (%rdi), %ymm0, %ymm0 # sched: [10:1.00] ; ZNVER1-NEXT: vorps %ymm0, %ymm1, %ymm0 # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = fcmp oeq <8 x float> %a0, %a1 %2 = load <8 x float>, <8 x float> *%a2, align 32 %3 = fcmp oeq <8 x float> %a0, %2 @@ -858,7 +858,7 @@ define <4 x double> @test_cvtdq2pd(<4 x i32> %a0, <4 x i32> *%a1) { ; ZNVER1-NEXT: vcvtdq2pd (%rdi), %ymm1 # sched: [12:1.00] ; ZNVER1-NEXT: vcvtdq2pd %xmm0, %ymm0 # sched: [5:1.00] ; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = sitofp <4 x i32> %a0 to <4 x double> %2 = load <4 x i32>, <4 x i32> *%a1, align 16 %3 = sitofp <4 x i32> %2 to <4 x double> @@ -909,7 +909,7 @@ define <8 x float> @test_cvtdq2ps(<8 x i32> %a0, <8 x i32> *%a1) { ; ZNVER1-NEXT: vcvtdq2ps (%rdi), %ymm1 # sched: [12:1.00] ; ZNVER1-NEXT: vcvtdq2ps %ymm0, %ymm0 # sched: [5:1.00] ; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = sitofp <8 x i32> %a0 to <8 x float> %2 = load <8 x i32>, <8 x i32> *%a1, align 16 %3 = sitofp <8 x i32> %2 to <8 x float> @@ -957,8 +957,8 @@ define <8 x i32> @test_cvtpd2dq(<4 x double> %a0, <4 x double> *%a1) { ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vcvttpd2dqy (%rdi), %xmm1 # sched: [12:1.00] ; ZNVER1-NEXT: vcvttpd2dq %ymm0, %xmm0 # sched: [5:1.00] -; ZNVER1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 # sched: [1:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 # sched: [2:0.67] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = fptosi <4 x double> %a0 to <4 x i32> %2 = load <4 x double>, <4 x double> *%a1, align 32 %3 = fptosi <4 x double> %2 to <4 x i32> @@ -1004,10 +1004,10 @@ define <8 x float> @test_cvtpd2ps(<4 x double> %a0, <4 x double> *%a1) { ; ; ZNVER1-LABEL: test_cvtpd2ps: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vcvtpd2psy (%rdi), %xmm1 # sched: [12:1.00] +; ZNVER1-NEXT: vcvtpd2psy (%rdi), %xmm1 # sched: [11:1.00] ; ZNVER1-NEXT: vcvtpd2ps %ymm0, %xmm0 # sched: [5:1.00] -; ZNVER1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 # sched: [1:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 # sched: [2:0.67] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = fptrunc <4 x double> %a0 to <4 x float> %2 = load <4 x double>, <4 x double> *%a1, align 32 %3 = fptrunc <4 x double> %2 to <4 x float> @@ -1056,7 +1056,7 @@ define <8 x i32> @test_cvtps2dq(<8 x float> %a0, <8 x float> *%a1) { ; ZNVER1-NEXT: vcvttps2dq (%rdi), %ymm1 # sched: [12:1.00] ; ZNVER1-NEXT: vcvttps2dq %ymm0, %ymm0 # sched: [5:1.00] ; ZNVER1-NEXT: vorps %ymm1, %ymm0, %ymm0 # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = fptosi <8 x float> %a0 to <8 x i32> %2 = load <8 x float>, <8 x float> *%a1, align 32 %3 = fptosi <8 x float> %2 to <8 x i32> @@ -1097,9 +1097,9 @@ define <4 x double> @test_divpd(<4 x double> %a0, <4 x double> %a1, <4 x double> ; ; ZNVER1-LABEL: test_divpd: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vdivpd %ymm1, %ymm0, %ymm0 # sched: [15:1.00] -; ZNVER1-NEXT: vdivpd (%rdi), %ymm0, %ymm0 # sched: [22:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: vdivpd %ymm1, %ymm0, %ymm0 # sched: [15:15.00] +; ZNVER1-NEXT: vdivpd (%rdi), %ymm0, %ymm0 # sched: [22:22.00] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = fdiv <4 x double> %a0, %a1 %2 = load <4 x double>, <4 x double> *%a2, align 32 %3 = fdiv <4 x double> %1, %2 @@ -1139,9 +1139,9 @@ define <8 x float> @test_divps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a ; ; ZNVER1-LABEL: test_divps: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vdivps %ymm1, %ymm0, %ymm0 # sched: [15:1.00] -; ZNVER1-NEXT: vdivps (%rdi), %ymm0, %ymm0 # sched: [22:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: vdivps %ymm1, %ymm0, %ymm0 # sched: [12:12.00] +; ZNVER1-NEXT: vdivps (%rdi), %ymm0, %ymm0 # sched: [19:19.00] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = fdiv <8 x float> %a0, %a1 %2 = load <8 x float>, <8 x float> *%a2, align 32 %3 = fdiv <8 x float> %1, %2 @@ -1181,9 +1181,9 @@ define <8 x float> @test_dpps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a2 ; ; ZNVER1-LABEL: test_dpps: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vdpps $7, %ymm1, %ymm0, %ymm0 # sched: [3:1.00] -; ZNVER1-NEXT: vdpps $7, (%rdi), %ymm0, %ymm0 # sched: [10:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: vdpps $7, %ymm1, %ymm0, %ymm0 # sched: [100:?] +; ZNVER1-NEXT: vdpps $7, (%rdi), %ymm0, %ymm0 # sched: [100:?] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <8 x float> @llvm.x86.avx.dp.ps.256(<8 x float> %a0, <8 x float> %a1, i8 7) %2 = load <8 x float>, <8 x float> *%a2, align 32 %3 = call <8 x float> @llvm.x86.avx.dp.ps.256(<8 x float> %1, <8 x float> %2, i8 7) @@ -1228,10 +1228,10 @@ define <4 x float> @test_extractf128(<8 x float> %a0, <8 x float> %a1, <4 x floa ; ; ZNVER1-LABEL: test_extractf128: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vextractf128 $1, %ymm0, %xmm0 # sched: [1:0.50] -; ZNVER1-NEXT: vextractf128 $1, %ymm1, (%rdi) # sched: [1:0.50] -; ZNVER1-NEXT: vzeroupper -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: vextractf128 $1, %ymm0, %xmm0 # sched: [1:0.33] +; ZNVER1-NEXT: vextractf128 $1, %ymm1, (%rdi) # sched: [8:0.50] +; ZNVER1-NEXT: vzeroupper # sched: [100:?] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = shufflevector <8 x float> %a0, <8 x float> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7> %2 = shufflevector <8 x float> %a1, <8 x float> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7> store <4 x float> %2, <4 x float> *%a2 @@ -1271,9 +1271,9 @@ define <4 x double> @test_haddpd(<4 x double> %a0, <4 x double> %a1, <4 x double ; ; ZNVER1-LABEL: test_haddpd: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vhaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00] -; ZNVER1-NEXT: vhaddpd (%rdi), %ymm0, %ymm0 # sched: [10:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: vhaddpd %ymm1, %ymm0, %ymm0 # sched: [100:?] +; ZNVER1-NEXT: vhaddpd (%rdi), %ymm0, %ymm0 # sched: [100:?] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <4 x double> @llvm.x86.avx.hadd.pd.256(<4 x double> %a0, <4 x double> %a1) %2 = load <4 x double>, <4 x double> *%a2, align 32 %3 = call <4 x double> @llvm.x86.avx.hadd.pd.256(<4 x double> %1, <4 x double> %2) @@ -1314,9 +1314,9 @@ define <8 x float> @test_haddps(<8 x float> %a0, <8 x float> %a1, <8 x float> *% ; ; ZNVER1-LABEL: test_haddps: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vhaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00] -; ZNVER1-NEXT: vhaddps (%rdi), %ymm0, %ymm0 # sched: [10:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: vhaddps %ymm1, %ymm0, %ymm0 # sched: [100:?] +; ZNVER1-NEXT: vhaddps (%rdi), %ymm0, %ymm0 # sched: [100:?] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <8 x float> @llvm.x86.avx.hadd.ps.256(<8 x float> %a0, <8 x float> %a1) %2 = load <8 x float>, <8 x float> *%a2, align 32 %3 = call <8 x float> @llvm.x86.avx.hadd.ps.256(<8 x float> %1, <8 x float> %2) @@ -1357,9 +1357,9 @@ define <4 x double> @test_hsubpd(<4 x double> %a0, <4 x double> %a1, <4 x double ; ; ZNVER1-LABEL: test_hsubpd: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vhsubpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00] -; ZNVER1-NEXT: vhsubpd (%rdi), %ymm0, %ymm0 # sched: [10:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: vhsubpd %ymm1, %ymm0, %ymm0 # sched: [100:?] +; ZNVER1-NEXT: vhsubpd (%rdi), %ymm0, %ymm0 # sched: [100:?] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <4 x double> @llvm.x86.avx.hsub.pd.256(<4 x double> %a0, <4 x double> %a1) %2 = load <4 x double>, <4 x double> *%a2, align 32 %3 = call <4 x double> @llvm.x86.avx.hsub.pd.256(<4 x double> %1, <4 x double> %2) @@ -1400,9 +1400,9 @@ define <8 x float> @test_hsubps(<8 x float> %a0, <8 x float> %a1, <8 x float> *% ; ; ZNVER1-LABEL: test_hsubps: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vhsubps %ymm1, %ymm0, %ymm0 # sched: [3:1.00] -; ZNVER1-NEXT: vhsubps (%rdi), %ymm0, %ymm0 # sched: [10:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: vhsubps %ymm1, %ymm0, %ymm0 # sched: [100:?] +; ZNVER1-NEXT: vhsubps (%rdi), %ymm0, %ymm0 # sched: [100:?] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <8 x float> @llvm.x86.avx.hsub.ps.256(<8 x float> %a0, <8 x float> %a1) %2 = load <8 x float>, <8 x float> *%a2, align 32 %3 = call <8 x float> @llvm.x86.avx.hsub.ps.256(<8 x float> %1, <8 x float> %2) @@ -1448,10 +1448,10 @@ define <8 x float> @test_insertf128(<8 x float> %a0, <4 x float> %a1, <4 x float ; ; ZNVER1-LABEL: test_insertf128: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1 # sched: [1:0.50] -; ZNVER1-NEXT: vinsertf128 $1, (%rdi), %ymm0, %ymm0 # sched: [8:0.50] +; ZNVER1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1 # sched: [2:0.67] +; ZNVER1-NEXT: vinsertf128 $1, (%rdi), %ymm0, %ymm0 # sched: [9:0.67] ; ZNVER1-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = shufflevector <4 x float> %a1, <4 x float> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef> %2 = shufflevector <8 x float> %a0, <8 x float> %1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11> %3 = load <4 x float>, <4 x float> *%a2, align 16 @@ -1490,7 +1490,7 @@ define <32 x i8> @test_lddqu(i8* %a0) { ; ZNVER1-LABEL: test_lddqu: ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vlddqu (%rdi), %ymm0 # sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <32 x i8> @llvm.x86.avx.ldu.dq.256(i8* %a0) ret <32 x i8> %1 } @@ -1534,10 +1534,10 @@ define <2 x double> @test_maskmovpd(i8* %a0, <2 x i64> %a1, <2 x double> %a2) { ; ; ZNVER1-LABEL: test_maskmovpd: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vmaskmovpd (%rdi), %xmm0, %xmm2 -; ZNVER1-NEXT: vmaskmovpd %xmm1, %xmm0, (%rdi) +; ZNVER1-NEXT: vmaskmovpd (%rdi), %xmm0, %xmm2 # sched: [8:0.50] +; ZNVER1-NEXT: vmaskmovpd %xmm1, %xmm0, (%rdi) # sched: [4:0.50] ; ZNVER1-NEXT: vmovapd %xmm2, %xmm0 # sched: [1:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <2 x double> @llvm.x86.avx.maskload.pd(i8* %a0, <2 x i64> %a1) call void @llvm.x86.avx.maskstore.pd(i8* %a0, <2 x i64> %a1, <2 x double> %a2) ret <2 x double> %1 @@ -1583,10 +1583,10 @@ define <4 x double> @test_maskmovpd_ymm(i8* %a0, <4 x i64> %a1, <4 x double> %a2 ; ; ZNVER1-LABEL: test_maskmovpd_ymm: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vmaskmovpd (%rdi), %ymm0, %ymm2 -; ZNVER1-NEXT: vmaskmovpd %ymm1, %ymm0, (%rdi) +; ZNVER1-NEXT: vmaskmovpd (%rdi), %ymm0, %ymm2 # sched: [8:1.00] +; ZNVER1-NEXT: vmaskmovpd %ymm1, %ymm0, (%rdi) # sched: [5:1.00] ; ZNVER1-NEXT: vmovapd %ymm2, %ymm0 # sched: [1:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <4 x double> @llvm.x86.avx.maskload.pd.256(i8* %a0, <4 x i64> %a1) call void @llvm.x86.avx.maskstore.pd.256(i8* %a0, <4 x i64> %a1, <4 x double> %a2) ret <4 x double> %1 @@ -1632,10 +1632,10 @@ define <4 x float> @test_maskmovps(i8* %a0, <4 x i32> %a1, <4 x float> %a2) { ; ; ZNVER1-LABEL: test_maskmovps: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vmaskmovps (%rdi), %xmm0, %xmm2 -; ZNVER1-NEXT: vmaskmovps %xmm1, %xmm0, (%rdi) +; ZNVER1-NEXT: vmaskmovps (%rdi), %xmm0, %xmm2 # sched: [8:0.50] +; ZNVER1-NEXT: vmaskmovps %xmm1, %xmm0, (%rdi) # sched: [4:0.50] ; ZNVER1-NEXT: vmovaps %xmm2, %xmm0 # sched: [1:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <4 x float> @llvm.x86.avx.maskload.ps(i8* %a0, <4 x i32> %a1) call void @llvm.x86.avx.maskstore.ps(i8* %a0, <4 x i32> %a1, <4 x float> %a2) ret <4 x float> %1 @@ -1681,10 +1681,10 @@ define <8 x float> @test_maskmovps_ymm(i8* %a0, <8 x i32> %a1, <8 x float> %a2) ; ; ZNVER1-LABEL: test_maskmovps_ymm: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vmaskmovps (%rdi), %ymm0, %ymm2 -; ZNVER1-NEXT: vmaskmovps %ymm1, %ymm0, (%rdi) +; ZNVER1-NEXT: vmaskmovps (%rdi), %ymm0, %ymm2 # sched: [8:1.00] +; ZNVER1-NEXT: vmaskmovps %ymm1, %ymm0, (%rdi) # sched: [5:1.00] ; ZNVER1-NEXT: vmovaps %ymm2, %ymm0 # sched: [1:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <8 x float> @llvm.x86.avx.maskload.ps.256(i8* %a0, <8 x i32> %a1) call void @llvm.x86.avx.maskstore.ps.256(i8* %a0, <8 x i32> %a1, <8 x float> %a2) ret <8 x float> %1 @@ -1727,7 +1727,7 @@ define <4 x double> @test_maxpd(<4 x double> %a0, <4 x double> %a1, <4 x double> ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vmaxpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00] ; ZNVER1-NEXT: vmaxpd (%rdi), %ymm0, %ymm0 # sched: [10:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <4 x double> @llvm.x86.avx.max.pd.256(<4 x double> %a0, <4 x double> %a1) %2 = load <4 x double>, <4 x double> *%a2, align 32 %3 = call <4 x double> @llvm.x86.avx.max.pd.256(<4 x double> %1, <4 x double> %2) @@ -1770,7 +1770,7 @@ define <8 x float> @test_maxps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vmaxps %ymm1, %ymm0, %ymm0 # sched: [3:1.00] ; ZNVER1-NEXT: vmaxps (%rdi), %ymm0, %ymm0 # sched: [10:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <8 x float> @llvm.x86.avx.max.ps.256(<8 x float> %a0, <8 x float> %a1) %2 = load <8 x float>, <8 x float> *%a2, align 32 %3 = call <8 x float> @llvm.x86.avx.max.ps.256(<8 x float> %1, <8 x float> %2) @@ -1813,7 +1813,7 @@ define <4 x double> @test_minpd(<4 x double> %a0, <4 x double> %a1, <4 x double> ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vminpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00] ; ZNVER1-NEXT: vminpd (%rdi), %ymm0, %ymm0 # sched: [10:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <4 x double> @llvm.x86.avx.min.pd.256(<4 x double> %a0, <4 x double> %a1) %2 = load <4 x double>, <4 x double> *%a2, align 32 %3 = call <4 x double> @llvm.x86.avx.min.pd.256(<4 x double> %1, <4 x double> %2) @@ -1856,7 +1856,7 @@ define <8 x float> @test_minps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vminps %ymm1, %ymm0, %ymm0 # sched: [3:1.00] ; ZNVER1-NEXT: vminps (%rdi), %ymm0, %ymm0 # sched: [10:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <8 x float> @llvm.x86.avx.min.ps.256(<8 x float> %a0, <8 x float> %a1) %2 = load <8 x float>, <8 x float> *%a2, align 32 %3 = call <8 x float> @llvm.x86.avx.min.ps.256(<8 x float> %1, <8 x float> %2) @@ -1905,7 +1905,7 @@ define <4 x double> @test_movapd(<4 x double> *%a0, <4 x double> *%a1) { ; ZNVER1-NEXT: vmovapd (%rdi), %ymm0 # sched: [8:0.50] ; ZNVER1-NEXT: vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:1.00] ; ZNVER1-NEXT: vmovapd %ymm0, (%rsi) # sched: [1:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = load <4 x double>, <4 x double> *%a0, align 32 %2 = fadd <4 x double> %1, %1 store <4 x double> %2, <4 x double> *%a1, align 32 @@ -1953,7 +1953,7 @@ define <8 x float> @test_movaps(<8 x float> *%a0, <8 x float> *%a1) { ; ZNVER1-NEXT: vmovaps (%rdi), %ymm0 # sched: [8:0.50] ; ZNVER1-NEXT: vaddps %ymm0, %ymm0, %ymm0 # sched: [3:1.00] ; ZNVER1-NEXT: vmovaps %ymm0, (%rsi) # sched: [1:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = load <8 x float>, <8 x float> *%a0, align 32 %2 = fadd <8 x float> %1, %1 store <8 x float> %2, <8 x float> *%a1, align 32 @@ -2001,7 +2001,7 @@ define <4 x double> @test_movddup(<4 x double> %a0, <4 x double> *%a1) { ; ZNVER1-NEXT: vmovddup {{.*#+}} ymm1 = mem[0,0,2,2] sched: [8:0.50] ; ZNVER1-NEXT: vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2] sched: [1:0.50] ; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = shufflevector <4 x double> %a0, <4 x double> undef, <4 x i32> <i32 0, i32 0, i32 2, i32 2> %2 = load <4 x double>, <4 x double> *%a1, align 32 %3 = shufflevector <4 x double> %2, <4 x double> undef, <4 x i32> <i32 0, i32 0, i32 2, i32 2> @@ -2041,9 +2041,9 @@ define i32 @test_movmskpd(<4 x double> %a0) { ; ; ZNVER1-LABEL: test_movmskpd: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vmovmskpd %ymm0, %eax # sched: [1:0.25] -; ZNVER1-NEXT: vzeroupper -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: vmovmskpd %ymm0, %eax # sched: [1:1.00] +; ZNVER1-NEXT: vzeroupper # sched: [100:?] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call i32 @llvm.x86.avx.movmsk.pd.256(<4 x double> %a0) ret i32 %1 } @@ -2081,9 +2081,9 @@ define i32 @test_movmskps(<8 x float> %a0) { ; ; ZNVER1-LABEL: test_movmskps: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vmovmskps %ymm0, %eax # sched: [1:0.25] -; ZNVER1-NEXT: vzeroupper -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: vmovmskps %ymm0, %eax # sched: [1:1.00] +; ZNVER1-NEXT: vzeroupper # sched: [100:?] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call i32 @llvm.x86.avx.movmsk.ps.256(<8 x float> %a0) ret i32 %1 } @@ -2124,7 +2124,7 @@ define <4 x double> @test_movntpd(<4 x double> %a0, <4 x double> *%a1) { ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:1.00] ; ZNVER1-NEXT: vmovntpd %ymm0, (%rdi) # sched: [1:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = fadd <4 x double> %a0, %a0 store <4 x double> %1, <4 x double> *%a1, align 32, !nontemporal !0 ret <4 x double> %1 @@ -2165,7 +2165,7 @@ define <8 x float> @test_movntps(<8 x float> %a0, <8 x float> *%a1) { ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vaddps %ymm0, %ymm0, %ymm0 # sched: [3:1.00] ; ZNVER1-NEXT: vmovntps %ymm0, (%rdi) # sched: [1:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = fadd <8 x float> %a0, %a0 store <8 x float> %1, <8 x float> *%a1, align 32, !nontemporal !0 ret <8 x float> %1 @@ -2212,7 +2212,7 @@ define <8 x float> @test_movshdup(<8 x float> %a0, <8 x float> *%a1) { ; ZNVER1-NEXT: vmovshdup {{.*#+}} ymm1 = mem[1,1,3,3,5,5,7,7] sched: [8:0.50] ; ZNVER1-NEXT: vmovshdup {{.*#+}} ymm0 = ymm0[1,1,3,3,5,5,7,7] sched: [1:0.50] ; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = shufflevector <8 x float> %a0, <8 x float> undef, <8 x i32> <i32 1, i32 1, i32 3, i32 3, i32 5, i32 5, i32 7, i32 7> %2 = load <8 x float>, <8 x float> *%a1, align 32 %3 = shufflevector <8 x float> %2, <8 x float> undef, <8 x i32> <i32 1, i32 1, i32 3, i32 3, i32 5, i32 5, i32 7, i32 7> @@ -2261,7 +2261,7 @@ define <8 x float> @test_movsldup(<8 x float> %a0, <8 x float> *%a1) { ; ZNVER1-NEXT: vmovsldup {{.*#+}} ymm1 = mem[0,0,2,2,4,4,6,6] sched: [8:0.50] ; ZNVER1-NEXT: vmovsldup {{.*#+}} ymm0 = ymm0[0,0,2,2,4,4,6,6] sched: [1:0.50] ; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = shufflevector <8 x float> %a0, <8 x float> undef, <8 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6> %2 = load <8 x float>, <8 x float> *%a1, align 32 %3 = shufflevector <8 x float> %2, <8 x float> undef, <8 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6> @@ -2312,7 +2312,7 @@ define <4 x double> @test_movupd(<4 x double> *%a0, <4 x double> *%a1) { ; ZNVER1-NEXT: vmovupd (%rdi), %ymm0 # sched: [8:0.50] ; ZNVER1-NEXT: vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:1.00] ; ZNVER1-NEXT: vmovupd %ymm0, (%rsi) # sched: [1:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = load <4 x double>, <4 x double> *%a0, align 1 %2 = fadd <4 x double> %1, %1 store <4 x double> %2, <4 x double> *%a1, align 1 @@ -2362,7 +2362,7 @@ define <8 x float> @test_movups(<8 x float> *%a0, <8 x float> *%a1) { ; ZNVER1-NEXT: vmovups (%rdi), %ymm0 # sched: [8:0.50] ; ZNVER1-NEXT: vaddps %ymm0, %ymm0, %ymm0 # sched: [3:1.00] ; ZNVER1-NEXT: vmovups %ymm0, (%rsi) # sched: [1:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = load <8 x float>, <8 x float> *%a0, align 1 %2 = fadd <8 x float> %1, %1 store <8 x float> %2, <8 x float> *%a1, align 1 @@ -2402,9 +2402,9 @@ define <4 x double> @test_mulpd(<4 x double> %a0, <4 x double> %a1, <4 x double> ; ; ZNVER1-LABEL: test_mulpd: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vmulpd %ymm1, %ymm0, %ymm0 # sched: [5:1.00] -; ZNVER1-NEXT: vmulpd (%rdi), %ymm0, %ymm0 # sched: [12:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: vmulpd %ymm1, %ymm0, %ymm0 # sched: [4:0.50] +; ZNVER1-NEXT: vmulpd (%rdi), %ymm0, %ymm0 # sched: [11:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = fmul <4 x double> %a0, %a1 %2 = load <4 x double>, <4 x double> *%a2, align 32 %3 = fmul <4 x double> %1, %2 @@ -2444,9 +2444,9 @@ define <8 x float> @test_mulps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a ; ; ZNVER1-LABEL: test_mulps: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vmulps %ymm1, %ymm0, %ymm0 # sched: [5:1.00] -; ZNVER1-NEXT: vmulps (%rdi), %ymm0, %ymm0 # sched: [12:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: vmulps %ymm1, %ymm0, %ymm0 # sched: [4:0.50] +; ZNVER1-NEXT: vmulps (%rdi), %ymm0, %ymm0 # sched: [11:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = fmul <8 x float> %a0, %a1 %2 = load <8 x float>, <8 x float> *%a2, align 32 %3 = fmul <8 x float> %1, %2 @@ -2494,7 +2494,7 @@ define <4 x double> @orpd(<4 x double> %a0, <4 x double> %a1, <4 x double> *%a2) ; ZNVER1-NEXT: vorpd %ymm1, %ymm0, %ymm0 # sched: [1:0.25] ; ZNVER1-NEXT: vorpd (%rdi), %ymm0, %ymm0 # sched: [8:0.50] ; ZNVER1-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = bitcast <4 x double> %a0 to <4 x i64> %2 = bitcast <4 x double> %a1 to <4 x i64> %3 = or <4 x i64> %1, %2 @@ -2547,7 +2547,7 @@ define <8 x float> @test_orps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a2 ; ZNVER1-NEXT: vorps %ymm1, %ymm0, %ymm0 # sched: [1:0.25] ; ZNVER1-NEXT: vorps (%rdi), %ymm0, %ymm0 # sched: [8:0.50] ; ZNVER1-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = bitcast <8 x float> %a0 to <4 x i64> %2 = bitcast <8 x float> %a1 to <4 x i64> %3 = or <4 x i64> %1, %2 @@ -2600,7 +2600,7 @@ define <2 x double> @test_permilpd(<2 x double> %a0, <2 x double> *%a1) { ; ZNVER1-NEXT: vpermilpd {{.*#+}} xmm1 = mem[1,0] sched: [8:0.50] ; ZNVER1-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] sched: [1:0.50] ; ZNVER1-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = shufflevector <2 x double> %a0, <2 x double> undef, <2 x i32> <i32 1, i32 0> %2 = load <2 x double>, <2 x double> *%a1, align 16 %3 = shufflevector <2 x double> %2, <2 x double> undef, <2 x i32> <i32 1, i32 0> @@ -2649,7 +2649,7 @@ define <4 x double> @test_permilpd_ymm(<4 x double> %a0, <4 x double> *%a1) { ; ZNVER1-NEXT: vpermilpd {{.*#+}} ymm1 = mem[1,0,2,3] sched: [8:0.50] ; ZNVER1-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,0,2,3] sched: [1:0.50] ; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = shufflevector <4 x double> %a0, <4 x double> undef, <4 x i32> <i32 1, i32 0, i32 2, i32 3> %2 = load <4 x double>, <4 x double> *%a1, align 32 %3 = shufflevector <4 x double> %2, <4 x double> undef, <4 x i32> <i32 1, i32 0, i32 2, i32 3> @@ -2698,7 +2698,7 @@ define <4 x float> @test_permilps(<4 x float> %a0, <4 x float> *%a1) { ; ZNVER1-NEXT: vpermilps {{.*#+}} xmm1 = mem[3,2,1,0] sched: [8:0.50] ; ZNVER1-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,2,1,0] sched: [1:0.50] ; ZNVER1-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = shufflevector <4 x float> %a0, <4 x float> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0> %2 = load <4 x float>, <4 x float> *%a1, align 16 %3 = shufflevector <4 x float> %2, <4 x float> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0> @@ -2747,7 +2747,7 @@ define <8 x float> @test_permilps_ymm(<8 x float> %a0, <8 x float> *%a1) { ; ZNVER1-NEXT: vpermilps {{.*#+}} ymm1 = mem[3,2,1,0,7,6,5,4] sched: [8:0.50] ; ZNVER1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4] sched: [1:0.50] ; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = shufflevector <8 x float> %a0, <8 x float> undef, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4> %2 = load <8 x float>, <8 x float> *%a1, align 32 %3 = shufflevector <8 x float> %2, <8 x float> undef, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4> @@ -2790,7 +2790,7 @@ define <2 x double> @test_permilvarpd(<2 x double> %a0, <2 x i64> %a1, <2 x i64> ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vpermilpd %xmm1, %xmm0, %xmm0 # sched: [1:0.50] ; ZNVER1-NEXT: vpermilpd (%rdi), %xmm0, %xmm0 # sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <2 x double> @llvm.x86.avx.vpermilvar.pd(<2 x double> %a0, <2 x i64> %a1) %2 = load <2 x i64>, <2 x i64> *%a2, align 16 %3 = call <2 x double> @llvm.x86.avx.vpermilvar.pd(<2 x double> %1, <2 x i64> %2) @@ -2833,7 +2833,7 @@ define <4 x double> @test_permilvarpd_ymm(<4 x double> %a0, <4 x i64> %a1, <4 x ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vpermilpd %ymm1, %ymm0, %ymm0 # sched: [1:0.50] ; ZNVER1-NEXT: vpermilpd (%rdi), %ymm0, %ymm0 # sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> %a0, <4 x i64> %a1) %2 = load <4 x i64>, <4 x i64> *%a2, align 32 %3 = call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> %1, <4 x i64> %2) @@ -2876,7 +2876,7 @@ define <4 x float> @test_permilvarps(<4 x float> %a0, <4 x i32> %a1, <4 x i32> * ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vpermilps %xmm1, %xmm0, %xmm0 # sched: [1:0.50] ; ZNVER1-NEXT: vpermilps (%rdi), %xmm0, %xmm0 # sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %a0, <4 x i32> %a1) %2 = load <4 x i32>, <4 x i32> *%a2, align 16 %3 = call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %1, <4 x i32> %2) @@ -2919,7 +2919,7 @@ define <8 x float> @test_permilvarps_ymm(<8 x float> %a0, <8 x i32> %a1, <8 x i3 ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vpermilps %ymm1, %ymm0, %ymm0 # sched: [1:0.50] ; ZNVER1-NEXT: vpermilps (%rdi), %ymm0, %ymm0 # sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %a0, <8 x i32> %a1) %2 = load <8 x i32>, <8 x i32> *%a2, align 32 %3 = call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %1, <8 x i32> %2) @@ -2968,7 +2968,7 @@ define <8 x float> @test_rcpps(<8 x float> %a0, <8 x float> *%a1) { ; ZNVER1-NEXT: vrcpps (%rdi), %ymm1 # sched: [12:0.50] ; ZNVER1-NEXT: vrcpps %ymm0, %ymm0 # sched: [5:0.50] ; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <8 x float> @llvm.x86.avx.rcp.ps.256(<8 x float> %a0) %2 = load <8 x float>, <8 x float> *%a1, align 32 %3 = call <8 x float> @llvm.x86.avx.rcp.ps.256(<8 x float> %2) @@ -3015,10 +3015,10 @@ define <4 x double> @test_roundpd(<4 x double> %a0, <4 x double> *%a1) { ; ; ZNVER1-LABEL: test_roundpd: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vroundpd $7, (%rdi), %ymm1 # sched: [10:1.00] -; ZNVER1-NEXT: vroundpd $7, %ymm0, %ymm0 # sched: [3:1.00] +; ZNVER1-NEXT: vroundpd $7, (%rdi), %ymm1 # sched: [11:1.00] +; ZNVER1-NEXT: vroundpd $7, %ymm0, %ymm0 # sched: [4:1.00] ; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <4 x double> @llvm.x86.avx.round.pd.256(<4 x double> %a0, i32 7) %2 = load <4 x double>, <4 x double> *%a1, align 32 %3 = call <4 x double> @llvm.x86.avx.round.pd.256(<4 x double> %2, i32 7) @@ -3065,10 +3065,10 @@ define <8 x float> @test_roundps(<8 x float> %a0, <8 x float> *%a1) { ; ; ZNVER1-LABEL: test_roundps: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vroundps $7, (%rdi), %ymm1 # sched: [10:1.00] -; ZNVER1-NEXT: vroundps $7, %ymm0, %ymm0 # sched: [3:1.00] +; ZNVER1-NEXT: vroundps $7, (%rdi), %ymm1 # sched: [11:1.00] +; ZNVER1-NEXT: vroundps $7, %ymm0, %ymm0 # sched: [4:1.00] ; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <8 x float> @llvm.x86.avx.round.ps.256(<8 x float> %a0, i32 7) %2 = load <8 x float>, <8 x float> *%a1, align 32 %3 = call <8 x float> @llvm.x86.avx.round.ps.256(<8 x float> %2, i32 7) @@ -3118,7 +3118,7 @@ define <8 x float> @test_rsqrtps(<8 x float> %a0, <8 x float> *%a1) { ; ZNVER1-NEXT: vrsqrtps (%rdi), %ymm1 # sched: [12:0.50] ; ZNVER1-NEXT: vrsqrtps %ymm0, %ymm0 # sched: [5:0.50] ; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <8 x float> @llvm.x86.avx.rsqrt.ps.256(<8 x float> %a0) %2 = load <8 x float>, <8 x float> *%a1, align 32 %3 = call <8 x float> @llvm.x86.avx.rsqrt.ps.256(<8 x float> %2) @@ -3168,7 +3168,7 @@ define <4 x double> @test_shufpd(<4 x double> %a0, <4 x double> %a1, <4 x double ; ZNVER1-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[1],ymm1[0],ymm0[2],ymm1[3] sched: [1:0.50] ; ZNVER1-NEXT: vshufpd {{.*#+}} ymm1 = ymm1[1],mem[0],ymm1[2],mem[3] sched: [8:0.50] ; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = shufflevector <4 x double> %a0, <4 x double> %a1, <4 x i32> <i32 1, i32 4, i32 2, i32 7> %2 = load <4 x double>, <4 x double> *%a2, align 32 %3 = shufflevector <4 x double> %a1, <4 x double> %2, <4 x i32> <i32 1, i32 4, i32 2, i32 7> @@ -3211,7 +3211,7 @@ define <8 x float> @test_shufps(<8 x float> %a0, <8 x float> %a1, <8 x float> *% ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,0],ymm1[0,0],ymm0[4,4],ymm1[4,4] sched: [1:0.50] ; ZNVER1-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,3],mem[0,0],ymm0[4,7],mem[4,4] sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = shufflevector <8 x float> %a0, <8 x float> %a1, <8 x i32> <i32 0, i32 0, i32 8, i32 8, i32 4, i32 4, i32 12, i32 12> %2 = load <8 x float>, <8 x float> *%a2, align 32 %3 = shufflevector <8 x float> %1, <8 x float> %2, <8 x i32> <i32 0, i32 3, i32 8, i32 8, i32 4, i32 7, i32 12, i32 12> @@ -3256,10 +3256,10 @@ define <4 x double> @test_sqrtpd(<4 x double> %a0, <4 x double> *%a1) { ; ; ZNVER1-LABEL: test_sqrtpd: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vsqrtpd (%rdi), %ymm1 # sched: [27:1.00] -; ZNVER1-NEXT: vsqrtpd %ymm0, %ymm0 # sched: [20:1.00] +; ZNVER1-NEXT: vsqrtpd (%rdi), %ymm1 # sched: [47:47.00] +; ZNVER1-NEXT: vsqrtpd %ymm0, %ymm0 # sched: [40:40.00] ; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <4 x double> @llvm.x86.avx.sqrt.pd.256(<4 x double> %a0) %2 = load <4 x double>, <4 x double> *%a1, align 32 %3 = call <4 x double> @llvm.x86.avx.sqrt.pd.256(<4 x double> %2) @@ -3306,10 +3306,10 @@ define <8 x float> @test_sqrtps(<8 x float> %a0, <8 x float> *%a1) { ; ; ZNVER1-LABEL: test_sqrtps: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vsqrtps (%rdi), %ymm1 # sched: [27:1.00] -; ZNVER1-NEXT: vsqrtps %ymm0, %ymm0 # sched: [20:1.00] +; ZNVER1-NEXT: vsqrtps (%rdi), %ymm1 # sched: [35:35.00] +; ZNVER1-NEXT: vsqrtps %ymm0, %ymm0 # sched: [28:28.00] ; ZNVER1-NEXT: vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <8 x float> @llvm.x86.avx.sqrt.ps.256(<8 x float> %a0) %2 = load <8 x float>, <8 x float> *%a1, align 32 %3 = call <8 x float> @llvm.x86.avx.sqrt.ps.256(<8 x float> %2) @@ -3353,7 +3353,7 @@ define <4 x double> @test_subpd(<4 x double> %a0, <4 x double> %a1, <4 x double> ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vsubpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00] ; ZNVER1-NEXT: vsubpd (%rdi), %ymm0, %ymm0 # sched: [10:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = fsub <4 x double> %a0, %a1 %2 = load <4 x double>, <4 x double> *%a2, align 32 %3 = fsub <4 x double> %1, %2 @@ -3395,7 +3395,7 @@ define <8 x float> @test_subps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vsubps %ymm1, %ymm0, %ymm0 # sched: [3:1.00] ; ZNVER1-NEXT: vsubps (%rdi), %ymm0, %ymm0 # sched: [10:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = fsub <8 x float> %a0, %a1 %2 = load <8 x float>, <8 x float> *%a2, align 32 %3 = fsub <8 x float> %1, %2 @@ -3455,7 +3455,7 @@ define i32 @test_testpd(<2 x double> %a0, <2 x double> %a1, <2 x double> *%a2) { ; ZNVER1-NEXT: setb %al # sched: [1:0.25] ; ZNVER1-NEXT: vtestpd (%rdi), %xmm0 # sched: [8:0.50] ; ZNVER1-NEXT: adcl $0, %eax # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call i32 @llvm.x86.avx.vtestc.pd(<2 x double> %a0, <2 x double> %a1) %2 = load <2 x double>, <2 x double> *%a2, align 16 %3 = call i32 @llvm.x86.avx.vtestc.pd(<2 x double> %a0, <2 x double> %2) @@ -3521,8 +3521,8 @@ define i32 @test_testpd_ymm(<4 x double> %a0, <4 x double> %a1, <4 x double> *%a ; ZNVER1-NEXT: setb %al # sched: [1:0.25] ; ZNVER1-NEXT: vtestpd (%rdi), %ymm0 # sched: [8:0.50] ; ZNVER1-NEXT: adcl $0, %eax # sched: [1:0.25] -; ZNVER1-NEXT: vzeroupper -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: vzeroupper # sched: [100:?] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call i32 @llvm.x86.avx.vtestc.pd.256(<4 x double> %a0, <4 x double> %a1) %2 = load <4 x double>, <4 x double> *%a2, align 32 %3 = call i32 @llvm.x86.avx.vtestc.pd.256(<4 x double> %a0, <4 x double> %2) @@ -3584,7 +3584,7 @@ define i32 @test_testps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2) { ; ZNVER1-NEXT: setb %al # sched: [1:0.25] ; ZNVER1-NEXT: vtestps (%rdi), %xmm0 # sched: [8:0.50] ; ZNVER1-NEXT: adcl $0, %eax # sched: [1:0.25] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call i32 @llvm.x86.avx.vtestc.ps(<4 x float> %a0, <4 x float> %a1) %2 = load <4 x float>, <4 x float> *%a2, align 16 %3 = call i32 @llvm.x86.avx.vtestc.ps(<4 x float> %a0, <4 x float> %2) @@ -3650,8 +3650,8 @@ define i32 @test_testps_ymm(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a2) ; ZNVER1-NEXT: setb %al # sched: [1:0.25] ; ZNVER1-NEXT: vtestps (%rdi), %ymm0 # sched: [8:0.50] ; ZNVER1-NEXT: adcl $0, %eax # sched: [1:0.25] -; ZNVER1-NEXT: vzeroupper -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: vzeroupper # sched: [100:?] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call i32 @llvm.x86.avx.vtestc.ps.256(<8 x float> %a0, <8 x float> %a1) %2 = load <8 x float>, <8 x float> *%a2, align 32 %3 = call i32 @llvm.x86.avx.vtestc.ps.256(<8 x float> %a0, <8 x float> %2) @@ -3701,7 +3701,7 @@ define <4 x double> @test_unpckhpd(<4 x double> %a0, <4 x double> %a1, <4 x doub ; ZNVER1-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3] sched: [1:0.50] ; ZNVER1-NEXT: vunpckhpd {{.*#+}} ymm1 = ymm1[1],mem[1],ymm1[3],mem[3] sched: [8:0.50] ; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = shufflevector <4 x double> %a0, <4 x double> %a1, <4 x i32> <i32 1, i32 5, i32 3, i32 7> %2 = load <4 x double>, <4 x double> *%a2, align 32 %3 = shufflevector <4 x double> %a1, <4 x double> %2, <4 x i32> <i32 1, i32 5, i32 3, i32 7> @@ -3744,7 +3744,7 @@ define <8 x float> @test_unpckhps(<8 x float> %a0, <8 x float> %a1, <8 x float> ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7] sched: [1:0.50] ; ZNVER1-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7] sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = shufflevector <8 x float> %a0, <8 x float> %a1, <8 x i32> <i32 2, i32 10, i32 3, i32 11, i32 6, i32 14, i32 7, i32 15> %2 = load <8 x float>, <8 x float> *%a2, align 32 %3 = shufflevector <8 x float> %1, <8 x float> %2, <8 x i32> <i32 2, i32 10, i32 3, i32 11, i32 6, i32 14, i32 7, i32 15> @@ -3792,7 +3792,7 @@ define <4 x double> @test_unpcklpd(<4 x double> %a0, <4 x double> %a1, <4 x doub ; ZNVER1-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] sched: [1:0.50] ; ZNVER1-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm1[0],mem[0],ymm1[2],mem[2] sched: [8:0.50] ; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = shufflevector <4 x double> %a0, <4 x double> %a1, <4 x i32> <i32 0, i32 4, i32 2, i32 6> %2 = load <4 x double>, <4 x double> *%a2, align 32 %3 = shufflevector <4 x double> %a1, <4 x double> %2, <4 x i32> <i32 0, i32 4, i32 2, i32 6> @@ -3835,7 +3835,7 @@ define <8 x float> @test_unpcklps(<8 x float> %a0, <8 x float> %a1, <8 x float> ; ZNVER1: # BB#0: ; ZNVER1-NEXT: vunpcklps {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5] sched: [1:0.50] ; ZNVER1-NEXT: vunpcklps {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5] sched: [8:0.50] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = shufflevector <8 x float> %a0, <8 x float> %a1, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 4, i32 12, i32 5, i32 13> %2 = load <8 x float>, <8 x float> *%a2, align 32 %3 = shufflevector <8 x float> %1, <8 x float> %2, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 4, i32 12, i32 5, i32 13> @@ -3883,7 +3883,7 @@ define <4 x double> @test_xorpd(<4 x double> %a0, <4 x double> %a1, <4 x double> ; ZNVER1-NEXT: vxorpd %ymm1, %ymm0, %ymm0 # sched: [1:0.25] ; ZNVER1-NEXT: vxorpd (%rdi), %ymm0, %ymm0 # sched: [8:0.50] ; ZNVER1-NEXT: vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = bitcast <4 x double> %a0 to <4 x i64> %2 = bitcast <4 x double> %a1 to <4 x i64> %3 = xor <4 x i64> %1, %2 @@ -3936,7 +3936,7 @@ define <8 x float> @test_xorps(<8 x float> %a0, <8 x float> %a1, <8 x float> *%a ; ZNVER1-NEXT: vxorps %ymm1, %ymm0, %ymm0 # sched: [1:0.25] ; ZNVER1-NEXT: vxorps (%rdi), %ymm0, %ymm0 # sched: [8:0.50] ; ZNVER1-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00] -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = bitcast <8 x float> %a0 to <4 x i64> %2 = bitcast <8 x float> %a1 to <4 x i64> %3 = xor <4 x i64> %1, %2 @@ -3976,8 +3976,8 @@ define void @test_zeroall() { ; ; ZNVER1-LABEL: test_zeroall: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vzeroall -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: vzeroall # sched: [100:?] +; ZNVER1-NEXT: retq # sched: [1:0.50] call void @llvm.x86.avx.vzeroall() ret void } @@ -4011,8 +4011,8 @@ define void @test_zeroupper() { ; ; ZNVER1-LABEL: test_zeroupper: ; ZNVER1: # BB#0: -; ZNVER1-NEXT: vzeroupper -; ZNVER1-NEXT: retq # sched: [5:0.50] +; ZNVER1-NEXT: vzeroupper # sched: [100:?] +; ZNVER1-NEXT: retq # sched: [1:0.50] call void @llvm.x86.avx.vzeroupper() ret void } |

