diff options
author | Simon Pilgrim <llvm-dev@redking.me.uk> | 2018-03-15 14:45:30 +0000 |
---|---|---|
committer | Simon Pilgrim <llvm-dev@redking.me.uk> | 2018-03-15 14:45:30 +0000 |
commit | fb7aa57bf14268e229b4eab57e105b691f1c48b3 (patch) | |
tree | 1164f88eeb366330d5fc8a6e5bd3c2785aaf83cf /llvm/test | |
parent | 9a54397f81c9882d9cd25bde666f4b0c50bf0654 (diff) | |
download | bcm5719-llvm-fb7aa57bf14268e229b4eab57e105b691f1c48b3.tar.gz bcm5719-llvm-fb7aa57bf14268e229b4eab57e105b691f1c48b3.zip |
[X86][SSE] Introduce Float/Vector WriteMove, WriteLoad and Writetore scheduler classes
As discussed on D44428 and PR36726, this patch splits off WriteFMove/WriteVecMove, WriteFLoad/WriteVecLoad and WriteFStore/WriteVecStore scheduler classes to permit vectors to be handled separately from gpr/scalar types.
I've minimised the diff here by only moving various basic SSE/AVX vector instructions across - we can fix the rest when called for. This does fix the MOVDQA vs MOVAPS/MOVAPD discrepancies mentioned on D44428.
Differential Revision: https://reviews.llvm.org/D44471
llvm-svn: 327630
Diffstat (limited to 'llvm/test')
-rw-r--r-- | llvm/test/CodeGen/X86/avx-schedule.ll | 8 | ||||
-rw-r--r-- | llvm/test/CodeGen/X86/avx2-schedule.ll | 10 | ||||
-rw-r--r-- | llvm/test/CodeGen/X86/sha-schedule.ll | 12 | ||||
-rw-r--r-- | llvm/test/CodeGen/X86/sse-schedule.ll | 6 | ||||
-rw-r--r-- | llvm/test/CodeGen/X86/sse2-schedule.ll | 8 | ||||
-rw-r--r-- | llvm/test/CodeGen/X86/sse3-schedule.ll | 6 | ||||
-rw-r--r-- | llvm/test/CodeGen/X86/sse41-schedule.ll | 22 |
7 files changed, 36 insertions, 36 deletions
diff --git a/llvm/test/CodeGen/X86/avx-schedule.ll b/llvm/test/CodeGen/X86/avx-schedule.ll index 6d25787c190..7f583a47efd 100644 --- a/llvm/test/CodeGen/X86/avx-schedule.ll +++ b/llvm/test/CodeGen/X86/avx-schedule.ll @@ -2103,7 +2103,7 @@ define <2 x double> @test_maskmovpd(i8* %a0, <2 x i64> %a1, <2 x double> %a2) { ; ZNVER1: # %bb.0: ; ZNVER1-NEXT: vmaskmovpd (%rdi), %xmm0, %xmm2 # sched: [8:0.50] ; ZNVER1-NEXT: vmaskmovpd %xmm1, %xmm0, (%rdi) # sched: [4:0.50] -; ZNVER1-NEXT: vmovapd %xmm2, %xmm0 # sched: [1:0.50] +; ZNVER1-NEXT: vmovapd %xmm2, %xmm0 # sched: [1:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <2 x double> @llvm.x86.avx.maskload.pd(i8* %a0, <2 x i64> %a1) call void @llvm.x86.avx.maskstore.pd(i8* %a0, <2 x i64> %a1, <2 x double> %a2) @@ -2166,7 +2166,7 @@ define <4 x double> @test_maskmovpd_ymm(i8* %a0, <4 x i64> %a1, <4 x double> %a2 ; ZNVER1: # %bb.0: ; ZNVER1-NEXT: vmaskmovpd (%rdi), %ymm0, %ymm2 # sched: [8:1.00] ; ZNVER1-NEXT: vmaskmovpd %ymm1, %ymm0, (%rdi) # sched: [5:1.00] -; ZNVER1-NEXT: vmovapd %ymm2, %ymm0 # sched: [1:0.50] +; ZNVER1-NEXT: vmovapd %ymm2, %ymm0 # sched: [1:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <4 x double> @llvm.x86.avx.maskload.pd.256(i8* %a0, <4 x i64> %a1) call void @llvm.x86.avx.maskstore.pd.256(i8* %a0, <4 x i64> %a1, <4 x double> %a2) @@ -2229,7 +2229,7 @@ define <4 x float> @test_maskmovps(i8* %a0, <4 x i32> %a1, <4 x float> %a2) { ; ZNVER1: # %bb.0: ; ZNVER1-NEXT: vmaskmovps (%rdi), %xmm0, %xmm2 # sched: [8:0.50] ; ZNVER1-NEXT: vmaskmovps %xmm1, %xmm0, (%rdi) # sched: [4:0.50] -; ZNVER1-NEXT: vmovaps %xmm2, %xmm0 # sched: [1:0.50] +; ZNVER1-NEXT: vmovaps %xmm2, %xmm0 # sched: [1:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <4 x float> @llvm.x86.avx.maskload.ps(i8* %a0, <4 x i32> %a1) call void @llvm.x86.avx.maskstore.ps(i8* %a0, <4 x i32> %a1, <4 x float> %a2) @@ -2292,7 +2292,7 @@ define <8 x float> @test_maskmovps_ymm(i8* %a0, <8 x i32> %a1, <8 x float> %a2) ; ZNVER1: # %bb.0: ; ZNVER1-NEXT: vmaskmovps (%rdi), %ymm0, %ymm2 # sched: [8:1.00] ; ZNVER1-NEXT: vmaskmovps %ymm1, %ymm0, (%rdi) # sched: [5:1.00] -; ZNVER1-NEXT: vmovaps %ymm2, %ymm0 # sched: [1:0.50] +; ZNVER1-NEXT: vmovaps %ymm2, %ymm0 # sched: [1:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = call <8 x float> @llvm.x86.avx.maskload.ps.256(i8* %a0, <8 x i32> %a1) call void @llvm.x86.avx.maskstore.ps.256(i8* %a0, <8 x i32> %a1, <8 x float> %a2) diff --git a/llvm/test/CodeGen/X86/avx2-schedule.ll b/llvm/test/CodeGen/X86/avx2-schedule.ll index d06b6a78a8c..e7152f867e8 100644 --- a/llvm/test/CodeGen/X86/avx2-schedule.ll +++ b/llvm/test/CodeGen/X86/avx2-schedule.ll @@ -573,7 +573,7 @@ define <8 x i32> @test_inserti128(<8 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) define <4 x i64> @test_movntdqa(i8* %a0) { ; GENERIC-LABEL: test_movntdqa: ; GENERIC: # %bb.0: -; GENERIC-NEXT: vmovntdqa (%rdi), %ymm0 # sched: [4:0.50] +; GENERIC-NEXT: vmovntdqa (%rdi), %ymm0 # sched: [6:0.50] ; GENERIC-NEXT: retq # sched: [1:1.00] ; ; HASWELL-LABEL: test_movntdqa: @@ -3380,7 +3380,7 @@ declare <8 x i32> @llvm.x86.avx2.pmadd.wd(<16 x i16>, <16 x i16>) nounwind readn define <4 x i32> @test_pmaskmovd(i8* %a0, <4 x i32> %a1, <4 x i32> %a2) { ; GENERIC-LABEL: test_pmaskmovd: ; GENERIC: # %bb.0: -; GENERIC-NEXT: vpmaskmovd (%rdi), %xmm0, %xmm2 # sched: [4:0.50] +; GENERIC-NEXT: vpmaskmovd (%rdi), %xmm0, %xmm2 # sched: [6:0.50] ; GENERIC-NEXT: vpmaskmovd %xmm1, %xmm0, (%rdi) # sched: [1:1.00] ; GENERIC-NEXT: vmovdqa %xmm2, %xmm0 # sched: [1:0.50] ; GENERIC-NEXT: retq # sched: [1:1.00] @@ -3429,7 +3429,7 @@ declare void @llvm.x86.avx2.maskstore.d(i8*, <4 x i32>, <4 x i32>) nounwind define <8 x i32> @test_pmaskmovd_ymm(i8* %a0, <8 x i32> %a1, <8 x i32> %a2) { ; GENERIC-LABEL: test_pmaskmovd_ymm: ; GENERIC: # %bb.0: -; GENERIC-NEXT: vpmaskmovd (%rdi), %ymm0, %ymm2 # sched: [4:0.50] +; GENERIC-NEXT: vpmaskmovd (%rdi), %ymm0, %ymm2 # sched: [6:0.50] ; GENERIC-NEXT: vpmaskmovd %ymm1, %ymm0, (%rdi) # sched: [1:1.00] ; GENERIC-NEXT: vmovdqa %ymm2, %ymm0 # sched: [1:0.50] ; GENERIC-NEXT: retq # sched: [1:1.00] @@ -3478,7 +3478,7 @@ declare void @llvm.x86.avx2.maskstore.d.256(i8*, <8 x i32>, <8 x i32>) nounwind define <2 x i64> @test_pmaskmovq(i8* %a0, <2 x i64> %a1, <2 x i64> %a2) { ; GENERIC-LABEL: test_pmaskmovq: ; GENERIC: # %bb.0: -; GENERIC-NEXT: vpmaskmovq (%rdi), %xmm0, %xmm2 # sched: [4:0.50] +; GENERIC-NEXT: vpmaskmovq (%rdi), %xmm0, %xmm2 # sched: [6:0.50] ; GENERIC-NEXT: vpmaskmovq %xmm1, %xmm0, (%rdi) # sched: [1:1.00] ; GENERIC-NEXT: vmovdqa %xmm2, %xmm0 # sched: [1:0.50] ; GENERIC-NEXT: retq # sched: [1:1.00] @@ -3527,7 +3527,7 @@ declare void @llvm.x86.avx2.maskstore.q(i8*, <2 x i64>, <2 x i64>) nounwind define <4 x i64> @test_pmaskmovq_ymm(i8* %a0, <4 x i64> %a1, <4 x i64> %a2) { ; GENERIC-LABEL: test_pmaskmovq_ymm: ; GENERIC: # %bb.0: -; GENERIC-NEXT: vpmaskmovq (%rdi), %ymm0, %ymm2 # sched: [4:0.50] +; GENERIC-NEXT: vpmaskmovq (%rdi), %ymm0, %ymm2 # sched: [6:0.50] ; GENERIC-NEXT: vpmaskmovq %ymm1, %ymm0, (%rdi) # sched: [1:1.00] ; GENERIC-NEXT: vmovdqa %ymm2, %ymm0 # sched: [1:0.50] ; GENERIC-NEXT: retq # sched: [1:1.00] diff --git a/llvm/test/CodeGen/X86/sha-schedule.ll b/llvm/test/CodeGen/X86/sha-schedule.ll index 138ff888b92..cecdc1c41b6 100644 --- a/llvm/test/CodeGen/X86/sha-schedule.ll +++ b/llvm/test/CodeGen/X86/sha-schedule.ll @@ -210,11 +210,11 @@ define <4 x i32> @test_sha256rnds2(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2, ; ; GOLDMONT-LABEL: test_sha256rnds2: ; GOLDMONT: # %bb.0: -; GOLDMONT-NEXT: movaps %xmm0, %xmm3 # sched: [1:1.00] -; GOLDMONT-NEXT: movaps %xmm2, %xmm0 # sched: [1:1.00] +; GOLDMONT-NEXT: movaps %xmm0, %xmm3 # sched: [1:0.50] +; GOLDMONT-NEXT: movaps %xmm2, %xmm0 # sched: [1:0.50] ; GOLDMONT-NEXT: sha256rnds2 %xmm0, %xmm1, %xmm3 # sched: [4:1.00] ; GOLDMONT-NEXT: sha256rnds2 %xmm0, (%rdi), %xmm3 # sched: [7:1.00] -; GOLDMONT-NEXT: movaps %xmm3, %xmm0 # sched: [1:1.00] +; GOLDMONT-NEXT: movaps %xmm3, %xmm0 # sched: [1:0.50] ; GOLDMONT-NEXT: retq # sched: [4:1.00] ; ; CANNONLAKE-LABEL: test_sha256rnds2: @@ -228,11 +228,11 @@ define <4 x i32> @test_sha256rnds2(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2, ; ; ZNVER1-LABEL: test_sha256rnds2: ; ZNVER1: # %bb.0: -; ZNVER1-NEXT: vmovaps %xmm0, %xmm3 # sched: [1:0.50] -; ZNVER1-NEXT: vmovaps %xmm2, %xmm0 # sched: [1:0.50] +; ZNVER1-NEXT: vmovaps %xmm0, %xmm3 # sched: [1:0.25] +; ZNVER1-NEXT: vmovaps %xmm2, %xmm0 # sched: [1:0.25] ; ZNVER1-NEXT: sha256rnds2 %xmm0, %xmm1, %xmm3 # sched: [4:1.00] ; ZNVER1-NEXT: sha256rnds2 %xmm0, (%rdi), %xmm3 # sched: [11:1.00] -; ZNVER1-NEXT: vmovaps %xmm3, %xmm0 # sched: [1:0.50] +; ZNVER1-NEXT: vmovaps %xmm3, %xmm0 # sched: [1:0.25] ; ZNVER1-NEXT: retq # sched: [1:0.50] %1 = load <4 x i32>, <4 x i32>* %a3 %2 = tail call <4 x i32> @llvm.x86.sha256rnds2(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2) diff --git a/llvm/test/CodeGen/X86/sse-schedule.ll b/llvm/test/CodeGen/X86/sse-schedule.ll index 8ebea98274d..3a123df699f 100644 --- a/llvm/test/CodeGen/X86/sse-schedule.ll +++ b/llvm/test/CodeGen/X86/sse-schedule.ll @@ -2557,7 +2557,7 @@ define <4 x float> @test_rcpps(<4 x float> %a0, <4 x float> *%a1) { ; SLM-NEXT: rcpps (%rdi), %xmm1 # sched: [8:1.00] ; SLM-NEXT: rcpps %xmm0, %xmm0 # sched: [5:1.00] ; SLM-NEXT: addps %xmm0, %xmm1 # sched: [3:1.00] -; SLM-NEXT: movaps %xmm1, %xmm0 # sched: [1:1.00] +; SLM-NEXT: movaps %xmm1, %xmm0 # sched: [1:0.50] ; SLM-NEXT: retq # sched: [4:1.00] ; ; SANDY-LABEL: test_rcpps: @@ -2729,7 +2729,7 @@ define <4 x float> @test_rsqrtps(<4 x float> %a0, <4 x float> *%a1) { ; SLM-NEXT: rsqrtps (%rdi), %xmm1 # sched: [8:1.00] ; SLM-NEXT: rsqrtps %xmm0, %xmm0 # sched: [5:1.00] ; SLM-NEXT: addps %xmm0, %xmm1 # sched: [3:1.00] -; SLM-NEXT: movaps %xmm1, %xmm0 # sched: [1:1.00] +; SLM-NEXT: movaps %xmm1, %xmm0 # sched: [1:0.50] ; SLM-NEXT: retq # sched: [4:1.00] ; ; SANDY-LABEL: test_rsqrtps: @@ -3038,7 +3038,7 @@ define <4 x float> @test_sqrtps(<4 x float> %a0, <4 x float> *%a1) { ; SLM-NEXT: sqrtps (%rdi), %xmm1 # sched: [18:1.00] ; SLM-NEXT: sqrtps %xmm0, %xmm0 # sched: [15:1.00] ; SLM-NEXT: addps %xmm0, %xmm1 # sched: [3:1.00] -; SLM-NEXT: movaps %xmm1, %xmm0 # sched: [1:1.00] +; SLM-NEXT: movaps %xmm1, %xmm0 # sched: [1:0.50] ; SLM-NEXT: retq # sched: [4:1.00] ; ; SANDY-LABEL: test_sqrtps: diff --git a/llvm/test/CodeGen/X86/sse2-schedule.ll b/llvm/test/CodeGen/X86/sse2-schedule.ll index a7c1c75226e..cb554e7c931 100644 --- a/llvm/test/CodeGen/X86/sse2-schedule.ll +++ b/llvm/test/CodeGen/X86/sse2-schedule.ll @@ -3564,7 +3564,7 @@ define <2 x double> @test_movsd_reg(<2 x double> %a0, <2 x double> %a1) { ; SLM-LABEL: test_movsd_reg: ; SLM: # %bb.0: ; SLM-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0] sched: [1:1.00] -; SLM-NEXT: movaps %xmm1, %xmm0 # sched: [1:1.00] +; SLM-NEXT: movaps %xmm1, %xmm0 # sched: [1:0.50] ; SLM-NEXT: retq # sched: [4:1.00] ; ; SANDY-LABEL: test_movsd_reg: @@ -8756,7 +8756,7 @@ define <2 x double> @test_sqrtpd(<2 x double> %a0, <2 x double> *%a1) { ; SLM-NEXT: sqrtpd (%rdi), %xmm1 # sched: [18:1.00] ; SLM-NEXT: sqrtpd %xmm0, %xmm0 # sched: [15:1.00] ; SLM-NEXT: addpd %xmm0, %xmm1 # sched: [3:1.00] -; SLM-NEXT: movapd %xmm1, %xmm0 # sched: [1:1.00] +; SLM-NEXT: movapd %xmm1, %xmm0 # sched: [1:0.50] ; SLM-NEXT: retq # sched: [4:1.00] ; ; SANDY-LABEL: test_sqrtpd: @@ -9284,10 +9284,10 @@ define <2 x double> @test_unpcklpd(<2 x double> %a0, <2 x double> %a1, <2 x doub ; SLM-LABEL: test_unpcklpd: ; SLM: # %bb.0: ; SLM-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0] sched: [1:1.00] -; SLM-NEXT: movapd %xmm0, %xmm1 # sched: [1:1.00] +; SLM-NEXT: movapd %xmm0, %xmm1 # sched: [1:0.50] ; SLM-NEXT: unpcklpd {{.*#+}} xmm1 = xmm1[0],mem[0] sched: [4:1.00] ; SLM-NEXT: addpd %xmm0, %xmm1 # sched: [3:1.00] -; SLM-NEXT: movapd %xmm1, %xmm0 # sched: [1:1.00] +; SLM-NEXT: movapd %xmm1, %xmm0 # sched: [1:0.50] ; SLM-NEXT: retq # sched: [4:1.00] ; ; SANDY-LABEL: test_unpcklpd: diff --git a/llvm/test/CodeGen/X86/sse3-schedule.ll b/llvm/test/CodeGen/X86/sse3-schedule.ll index bb7694c1e4a..1b1e765ab27 100644 --- a/llvm/test/CodeGen/X86/sse3-schedule.ll +++ b/llvm/test/CodeGen/X86/sse3-schedule.ll @@ -566,7 +566,7 @@ define <2 x double> @test_movddup(<2 x double> %a0, <2 x double> *%a1) { ; SLM-NEXT: movddup {{.*#+}} xmm1 = mem[0,0] sched: [4:1.00] ; SLM-NEXT: movddup {{.*#+}} xmm0 = xmm0[0,0] sched: [1:1.00] ; SLM-NEXT: subpd %xmm0, %xmm1 # sched: [3:1.00] -; SLM-NEXT: movapd %xmm1, %xmm0 # sched: [1:1.00] +; SLM-NEXT: movapd %xmm1, %xmm0 # sched: [1:0.50] ; SLM-NEXT: retq # sched: [4:1.00] ; ; SANDY-LABEL: test_movddup: @@ -645,7 +645,7 @@ define <4 x float> @test_movshdup(<4 x float> %a0, <4 x float> *%a1) { ; SLM-NEXT: movshdup {{.*#+}} xmm1 = mem[1,1,3,3] sched: [4:1.00] ; SLM-NEXT: movshdup {{.*#+}} xmm0 = xmm0[1,1,3,3] sched: [1:1.00] ; SLM-NEXT: addps %xmm0, %xmm1 # sched: [3:1.00] -; SLM-NEXT: movaps %xmm1, %xmm0 # sched: [1:1.00] +; SLM-NEXT: movaps %xmm1, %xmm0 # sched: [1:0.50] ; SLM-NEXT: retq # sched: [4:1.00] ; ; SANDY-LABEL: test_movshdup: @@ -724,7 +724,7 @@ define <4 x float> @test_movsldup(<4 x float> %a0, <4 x float> *%a1) { ; SLM-NEXT: movsldup {{.*#+}} xmm1 = mem[0,0,2,2] sched: [4:1.00] ; SLM-NEXT: movsldup {{.*#+}} xmm0 = xmm0[0,0,2,2] sched: [1:1.00] ; SLM-NEXT: addps %xmm0, %xmm1 # sched: [3:1.00] -; SLM-NEXT: movaps %xmm1, %xmm0 # sched: [1:1.00] +; SLM-NEXT: movaps %xmm1, %xmm0 # sched: [1:0.50] ; SLM-NEXT: retq # sched: [4:1.00] ; ; SANDY-LABEL: test_movsldup: diff --git a/llvm/test/CodeGen/X86/sse41-schedule.ll b/llvm/test/CodeGen/X86/sse41-schedule.ll index 60fc0e8e514..17188884983 100644 --- a/llvm/test/CodeGen/X86/sse41-schedule.ll +++ b/llvm/test/CodeGen/X86/sse41-schedule.ll @@ -163,11 +163,11 @@ define <2 x double> @test_blendvpd(<2 x double> %a0, <2 x double> %a1, <2 x doub ; ; SLM-LABEL: test_blendvpd: ; SLM: # %bb.0: -; SLM-NEXT: movapd %xmm0, %xmm3 # sched: [1:1.00] -; SLM-NEXT: movaps %xmm2, %xmm0 # sched: [1:1.00] +; SLM-NEXT: movapd %xmm0, %xmm3 # sched: [1:0.50] +; SLM-NEXT: movaps %xmm2, %xmm0 # sched: [1:0.50] ; SLM-NEXT: blendvpd %xmm0, %xmm1, %xmm3 # sched: [1:1.00] ; SLM-NEXT: blendvpd %xmm0, (%rdi), %xmm3 # sched: [4:1.00] -; SLM-NEXT: movapd %xmm3, %xmm0 # sched: [1:1.00] +; SLM-NEXT: movapd %xmm3, %xmm0 # sched: [1:0.50] ; SLM-NEXT: retq # sched: [4:1.00] ; ; SANDY-LABEL: test_blendvpd: @@ -230,11 +230,11 @@ define <4 x float> @test_blendvps(<4 x float> %a0, <4 x float> %a1, <4 x float> ; ; SLM-LABEL: test_blendvps: ; SLM: # %bb.0: -; SLM-NEXT: movaps %xmm0, %xmm3 # sched: [1:1.00] -; SLM-NEXT: movaps %xmm2, %xmm0 # sched: [1:1.00] +; SLM-NEXT: movaps %xmm0, %xmm3 # sched: [1:0.50] +; SLM-NEXT: movaps %xmm2, %xmm0 # sched: [1:0.50] ; SLM-NEXT: blendvps %xmm0, %xmm1, %xmm3 # sched: [1:1.00] ; SLM-NEXT: blendvps %xmm0, (%rdi), %xmm3 # sched: [4:1.00] -; SLM-NEXT: movaps %xmm3, %xmm0 # sched: [1:1.00] +; SLM-NEXT: movaps %xmm3, %xmm0 # sched: [1:0.50] ; SLM-NEXT: retq # sched: [4:1.00] ; ; SANDY-LABEL: test_blendvps: @@ -717,7 +717,7 @@ define <16 x i8> @test_pblendvb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> %a2, <16 ; SLM-LABEL: test_pblendvb: ; SLM: # %bb.0: ; SLM-NEXT: movdqa %xmm0, %xmm3 # sched: [1:0.50] -; SLM-NEXT: movaps %xmm2, %xmm0 # sched: [1:1.00] +; SLM-NEXT: movaps %xmm2, %xmm0 # sched: [1:0.50] ; SLM-NEXT: pblendvb %xmm0, %xmm1, %xmm3 # sched: [1:1.00] ; SLM-NEXT: pblendvb %xmm0, (%rdi), %xmm3 # sched: [4:1.00] ; SLM-NEXT: movdqa %xmm3, %xmm0 # sched: [1:0.50] @@ -2991,7 +2991,7 @@ define <2 x double> @test_roundpd(<2 x double> %a0, <2 x double> *%a1) { ; SLM-NEXT: roundpd $7, (%rdi), %xmm1 # sched: [6:1.00] ; SLM-NEXT: roundpd $7, %xmm0, %xmm0 # sched: [3:1.00] ; SLM-NEXT: addpd %xmm0, %xmm1 # sched: [3:1.00] -; SLM-NEXT: movapd %xmm1, %xmm0 # sched: [1:1.00] +; SLM-NEXT: movapd %xmm1, %xmm0 # sched: [1:0.50] ; SLM-NEXT: retq # sched: [4:1.00] ; ; SANDY-LABEL: test_roundpd: @@ -3063,7 +3063,7 @@ define <4 x float> @test_roundps(<4 x float> %a0, <4 x float> *%a1) { ; SLM-NEXT: roundps $7, (%rdi), %xmm1 # sched: [6:1.00] ; SLM-NEXT: roundps $7, %xmm0, %xmm0 # sched: [3:1.00] ; SLM-NEXT: addps %xmm0, %xmm1 # sched: [3:1.00] -; SLM-NEXT: movaps %xmm1, %xmm0 # sched: [1:1.00] +; SLM-NEXT: movaps %xmm1, %xmm0 # sched: [1:0.50] ; SLM-NEXT: retq # sched: [4:1.00] ; ; SANDY-LABEL: test_roundps: @@ -3133,7 +3133,7 @@ define <2 x double> @test_roundsd(<2 x double> %a0, <2 x double> %a1, <2 x doubl ; ; SLM-LABEL: test_roundsd: ; SLM: # %bb.0: -; SLM-NEXT: movapd %xmm0, %xmm2 # sched: [1:1.00] +; SLM-NEXT: movapd %xmm0, %xmm2 # sched: [1:0.50] ; SLM-NEXT: roundsd $7, (%rdi), %xmm0 # sched: [6:1.00] ; SLM-NEXT: roundsd $7, %xmm1, %xmm2 # sched: [3:1.00] ; SLM-NEXT: addpd %xmm2, %xmm0 # sched: [3:1.00] @@ -3206,7 +3206,7 @@ define <4 x float> @test_roundss(<4 x float> %a0, <4 x float> %a1, <4 x float> * ; ; SLM-LABEL: test_roundss: ; SLM: # %bb.0: -; SLM-NEXT: movaps %xmm0, %xmm2 # sched: [1:1.00] +; SLM-NEXT: movaps %xmm0, %xmm2 # sched: [1:0.50] ; SLM-NEXT: roundss $7, (%rdi), %xmm0 # sched: [6:1.00] ; SLM-NEXT: roundss $7, %xmm1, %xmm2 # sched: [3:1.00] ; SLM-NEXT: addps %xmm2, %xmm0 # sched: [3:1.00] |