diff options
| author | Craig Topper <craig.topper@intel.com> | 2018-07-26 23:22:11 +0000 |
|---|---|---|
| committer | Craig Topper <craig.topper@intel.com> | 2018-07-26 23:22:11 +0000 |
| commit | 1a40a065499deab5b5bd2bdc9d4ca471dd1f85f0 (patch) | |
| tree | 5745decb5072017c2b0669434ee74d63df14fcc4 /llvm/test | |
| parent | 4d23f45a11325dfc03a9904cfc1e354c54f06081 (diff) | |
| download | bcm5719-llvm-1a40a065499deab5b5bd2bdc9d4ca471dd1f85f0.tar.gz bcm5719-llvm-1a40a065499deab5b5bd2bdc9d4ca471dd1f85f0.zip | |
[SelectionDAGBuilder] Add masked loads to PendingLoads rather than calling DAG.setRoot.
Masked loads are calling DAG.getRoot rather than calling SelectionDAGBuilder::getRoot, which means the PendingLoads weren't emptied to update the root and create any needed TokenFactor. So it would be incorrect to call setRoot for the masked load.
This patch instead adds the masked load to PendingLoads so that the root doesn't get update until a store or scatter or something happens.. Alternatively, we could call SelectionDAGBuilder::getRoot before it, but that would create unnecessary serialization.
llvm-svn: 338085
Diffstat (limited to 'llvm/test')
| -rw-r--r-- | llvm/test/CodeGen/X86/avx512-bugfix-26264.ll | 18 | ||||
| -rw-r--r-- | llvm/test/CodeGen/X86/avx512-masked-memop-64-32.ll | 29 | ||||
| -rw-r--r-- | llvm/test/CodeGen/X86/masked_memop.ll | 2 |
3 files changed, 23 insertions, 26 deletions
diff --git a/llvm/test/CodeGen/X86/avx512-bugfix-26264.ll b/llvm/test/CodeGen/X86/avx512-bugfix-26264.ll index 4d54fb71523..e9d0161dd94 100644 --- a/llvm/test/CodeGen/X86/avx512-bugfix-26264.ll +++ b/llvm/test/CodeGen/X86/avx512-bugfix-26264.ll @@ -7,13 +7,12 @@ define <32 x double> @test_load_32f64(<32 x double>* %ptrs, <32 x i1> %mask, <32 ; AVX512BW-NEXT: vpsllw $7, %ymm0, %ymm0 ; AVX512BW-NEXT: vpmovb2m %zmm0, %k1 ; AVX512BW-NEXT: vblendmpd (%rdi), %zmm1, %zmm0 {%k1} -; AVX512BW-NEXT: kshiftrd $16, %k1, %k2 -; AVX512BW-NEXT: vblendmpd 128(%rdi), %zmm3, %zmm5 {%k2} +; AVX512BW-NEXT: kshiftrw $8, %k1, %k2 +; AVX512BW-NEXT: vblendmpd 64(%rdi), %zmm2, %zmm1 {%k2} +; AVX512BW-NEXT: kshiftrd $16, %k1, %k1 +; AVX512BW-NEXT: vblendmpd 128(%rdi), %zmm3, %zmm2 {%k1} ; AVX512BW-NEXT: kshiftrw $8, %k1, %k1 -; AVX512BW-NEXT: vblendmpd 64(%rdi), %zmm2, %zmm1 {%k1} -; AVX512BW-NEXT: kshiftrw $8, %k2, %k1 ; AVX512BW-NEXT: vblendmpd 192(%rdi), %zmm4, %zmm3 {%k1} -; AVX512BW-NEXT: vmovapd %zmm5, %zmm2 ; AVX512BW-NEXT: retq %res = call <32 x double> @llvm.masked.load.v32f64.p0v32f64(<32 x double>* %ptrs, i32 4, <32 x i1> %mask, <32 x double> %src0) ret <32 x double> %res @@ -25,13 +24,12 @@ define <32 x i64> @test_load_32i64(<32 x i64>* %ptrs, <32 x i1> %mask, <32 x i64 ; AVX512BW-NEXT: vpsllw $7, %ymm0, %ymm0 ; AVX512BW-NEXT: vpmovb2m %zmm0, %k1 ; AVX512BW-NEXT: vpblendmq (%rdi), %zmm1, %zmm0 {%k1} -; AVX512BW-NEXT: kshiftrd $16, %k1, %k2 -; AVX512BW-NEXT: vpblendmq 128(%rdi), %zmm3, %zmm5 {%k2} +; AVX512BW-NEXT: kshiftrw $8, %k1, %k2 +; AVX512BW-NEXT: vpblendmq 64(%rdi), %zmm2, %zmm1 {%k2} +; AVX512BW-NEXT: kshiftrd $16, %k1, %k1 +; AVX512BW-NEXT: vpblendmq 128(%rdi), %zmm3, %zmm2 {%k1} ; AVX512BW-NEXT: kshiftrw $8, %k1, %k1 -; AVX512BW-NEXT: vpblendmq 64(%rdi), %zmm2, %zmm1 {%k1} -; AVX512BW-NEXT: kshiftrw $8, %k2, %k1 ; AVX512BW-NEXT: vpblendmq 192(%rdi), %zmm4, %zmm3 {%k1} -; AVX512BW-NEXT: vmovdqa64 %zmm5, %zmm2 ; AVX512BW-NEXT: retq %res = call <32 x i64> @llvm.masked.load.v32i64.p0v32i64(<32 x i64>* %ptrs, i32 4, <32 x i1> %mask, <32 x i64> %src0) ret <32 x i64> %res diff --git a/llvm/test/CodeGen/X86/avx512-masked-memop-64-32.ll b/llvm/test/CodeGen/X86/avx512-masked-memop-64-32.ll index 275884c6de0..f199cb097aa 100644 --- a/llvm/test/CodeGen/X86/avx512-masked-memop-64-32.ll +++ b/llvm/test/CodeGen/X86/avx512-masked-memop-64-32.ll @@ -94,10 +94,10 @@ declare <16 x i32*> @llvm.masked.load.v16p0i32.p0v16p0i32(<16 x i32*>*, i32, <16 define <16 x i32*> @test23(<16 x i32*> %trigger, <16 x i32*>* %addr) { ; AVX512-LABEL: test23: ; AVX512: ## %bb.0: -; AVX512-NEXT: vptestnmq %zmm0, %zmm0, %k1 -; AVX512-NEXT: vptestnmq %zmm1, %zmm1, %k2 -; AVX512-NEXT: vmovdqu64 64(%rdi), %zmm1 {%k2} {z} -; AVX512-NEXT: vmovdqu64 (%rdi), %zmm0 {%k1} {z} +; AVX512-NEXT: vptestnmq %zmm1, %zmm1, %k1 +; AVX512-NEXT: vptestnmq %zmm0, %zmm0, %k2 +; AVX512-NEXT: vmovdqu64 (%rdi), %zmm0 {%k2} {z} +; AVX512-NEXT: vmovdqu64 64(%rdi), %zmm1 {%k1} {z} ; AVX512-NEXT: retq %mask = icmp eq <16 x i32*> %trigger, zeroinitializer %res = call <16 x i32*> @llvm.masked.load.v16p0i32.p0v16p0i32(<16 x i32*>* %addr, i32 4, <16 x i1>%mask, <16 x i32*>zeroinitializer) @@ -234,19 +234,19 @@ declare <16 x double> @llvm.masked.load.v16f64.p0v16f64(<16 x double>* %ptrs, i3 define <32 x double> @test_load_32f64(<32 x double>* %ptrs, <32 x i1> %mask, <32 x double> %src0) { ; AVX512F-LABEL: test_load_32f64: ; AVX512F: ## %bb.0: -; AVX512F-NEXT: vpmovsxbd %xmm0, %zmm5 +; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm5 +; AVX512F-NEXT: vpmovsxbd %xmm5, %zmm5 ; AVX512F-NEXT: vpslld $31, %zmm5, %zmm5 ; AVX512F-NEXT: vptestmd %zmm5, %zmm5, %k1 -; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm0 ; AVX512F-NEXT: vpmovsxbd %xmm0, %zmm0 ; AVX512F-NEXT: vpslld $31, %zmm0, %zmm0 ; AVX512F-NEXT: vptestmd %zmm0, %zmm0, %k2 -; AVX512F-NEXT: vblendmpd 128(%rdi), %zmm3, %zmm5 {%k2} -; AVX512F-NEXT: vblendmpd (%rdi), %zmm1, %zmm0 {%k1} +; AVX512F-NEXT: vblendmpd (%rdi), %zmm1, %zmm0 {%k2} +; AVX512F-NEXT: vblendmpd 128(%rdi), %zmm3, %zmm5 {%k1} ; AVX512F-NEXT: kshiftrw $8, %k2, %k2 -; AVX512F-NEXT: vblendmpd 192(%rdi), %zmm4, %zmm3 {%k2} +; AVX512F-NEXT: vblendmpd 64(%rdi), %zmm2, %zmm1 {%k2} ; AVX512F-NEXT: kshiftrw $8, %k1, %k1 -; AVX512F-NEXT: vblendmpd 64(%rdi), %zmm2, %zmm1 {%k1} +; AVX512F-NEXT: vblendmpd 192(%rdi), %zmm4, %zmm3 {%k1} ; AVX512F-NEXT: vmovapd %zmm5, %zmm2 ; AVX512F-NEXT: retq ; @@ -255,13 +255,12 @@ define <32 x double> @test_load_32f64(<32 x double>* %ptrs, <32 x i1> %mask, <32 ; SKX-NEXT: vpsllw $7, %ymm0, %ymm0 ; SKX-NEXT: vpmovb2m %ymm0, %k1 ; SKX-NEXT: vblendmpd (%rdi), %zmm1, %zmm0 {%k1} -; SKX-NEXT: kshiftrd $16, %k1, %k2 -; SKX-NEXT: vblendmpd 128(%rdi), %zmm3, %zmm5 {%k2} +; SKX-NEXT: kshiftrw $8, %k1, %k2 +; SKX-NEXT: vblendmpd 64(%rdi), %zmm2, %zmm1 {%k2} +; SKX-NEXT: kshiftrd $16, %k1, %k1 +; SKX-NEXT: vblendmpd 128(%rdi), %zmm3, %zmm2 {%k1} ; SKX-NEXT: kshiftrw $8, %k1, %k1 -; SKX-NEXT: vblendmpd 64(%rdi), %zmm2, %zmm1 {%k1} -; SKX-NEXT: kshiftrw $8, %k2, %k1 ; SKX-NEXT: vblendmpd 192(%rdi), %zmm4, %zmm3 {%k1} -; SKX-NEXT: vmovapd %zmm5, %zmm2 ; SKX-NEXT: retq %res = call <32 x double> @llvm.masked.load.v32f64.p0v32f64(<32 x double>* %ptrs, i32 4, <32 x i1> %mask, <32 x double> %src0) ret <32 x double> %res diff --git a/llvm/test/CodeGen/X86/masked_memop.ll b/llvm/test/CodeGen/X86/masked_memop.ll index aa6ae096445..812d9f50fe3 100644 --- a/llvm/test/CodeGen/X86/masked_memop.ll +++ b/llvm/test/CodeGen/X86/masked_memop.ll @@ -976,8 +976,8 @@ define <4 x i64> @mload_constmask_v4i64(<4 x i64>* %addr, <4 x i64> %dst) { define <8 x double> @mload_constmask_v8f64(<8 x double>* %addr, <8 x double> %dst) { ; AVX-LABEL: mload_constmask_v8f64: ; AVX: ## %bb.0: -; AVX-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],mem[6,7] ; AVX-NEXT: vblendps {{.*#+}} ymm0 = mem[0,1,2,3,4,5],ymm0[6,7] +; AVX-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],mem[6,7] ; AVX-NEXT: retq ; ; AVX512F-LABEL: mload_constmask_v8f64: |

