diff options
Diffstat (limited to 'llvm/test/CodeGen/X86/avx512-bugfix-26264.ll')
-rw-r--r-- | llvm/test/CodeGen/X86/avx512-bugfix-26264.ll | 18 |
1 files changed, 8 insertions, 10 deletions
diff --git a/llvm/test/CodeGen/X86/avx512-bugfix-26264.ll b/llvm/test/CodeGen/X86/avx512-bugfix-26264.ll index 4d54fb71523..e9d0161dd94 100644 --- a/llvm/test/CodeGen/X86/avx512-bugfix-26264.ll +++ b/llvm/test/CodeGen/X86/avx512-bugfix-26264.ll @@ -7,13 +7,12 @@ define <32 x double> @test_load_32f64(<32 x double>* %ptrs, <32 x i1> %mask, <32 ; AVX512BW-NEXT: vpsllw $7, %ymm0, %ymm0 ; AVX512BW-NEXT: vpmovb2m %zmm0, %k1 ; AVX512BW-NEXT: vblendmpd (%rdi), %zmm1, %zmm0 {%k1} -; AVX512BW-NEXT: kshiftrd $16, %k1, %k2 -; AVX512BW-NEXT: vblendmpd 128(%rdi), %zmm3, %zmm5 {%k2} +; AVX512BW-NEXT: kshiftrw $8, %k1, %k2 +; AVX512BW-NEXT: vblendmpd 64(%rdi), %zmm2, %zmm1 {%k2} +; AVX512BW-NEXT: kshiftrd $16, %k1, %k1 +; AVX512BW-NEXT: vblendmpd 128(%rdi), %zmm3, %zmm2 {%k1} ; AVX512BW-NEXT: kshiftrw $8, %k1, %k1 -; AVX512BW-NEXT: vblendmpd 64(%rdi), %zmm2, %zmm1 {%k1} -; AVX512BW-NEXT: kshiftrw $8, %k2, %k1 ; AVX512BW-NEXT: vblendmpd 192(%rdi), %zmm4, %zmm3 {%k1} -; AVX512BW-NEXT: vmovapd %zmm5, %zmm2 ; AVX512BW-NEXT: retq %res = call <32 x double> @llvm.masked.load.v32f64.p0v32f64(<32 x double>* %ptrs, i32 4, <32 x i1> %mask, <32 x double> %src0) ret <32 x double> %res @@ -25,13 +24,12 @@ define <32 x i64> @test_load_32i64(<32 x i64>* %ptrs, <32 x i1> %mask, <32 x i64 ; AVX512BW-NEXT: vpsllw $7, %ymm0, %ymm0 ; AVX512BW-NEXT: vpmovb2m %zmm0, %k1 ; AVX512BW-NEXT: vpblendmq (%rdi), %zmm1, %zmm0 {%k1} -; AVX512BW-NEXT: kshiftrd $16, %k1, %k2 -; AVX512BW-NEXT: vpblendmq 128(%rdi), %zmm3, %zmm5 {%k2} +; AVX512BW-NEXT: kshiftrw $8, %k1, %k2 +; AVX512BW-NEXT: vpblendmq 64(%rdi), %zmm2, %zmm1 {%k2} +; AVX512BW-NEXT: kshiftrd $16, %k1, %k1 +; AVX512BW-NEXT: vpblendmq 128(%rdi), %zmm3, %zmm2 {%k1} ; AVX512BW-NEXT: kshiftrw $8, %k1, %k1 -; AVX512BW-NEXT: vpblendmq 64(%rdi), %zmm2, %zmm1 {%k1} -; AVX512BW-NEXT: kshiftrw $8, %k2, %k1 ; AVX512BW-NEXT: vpblendmq 192(%rdi), %zmm4, %zmm3 {%k1} -; AVX512BW-NEXT: vmovdqa64 %zmm5, %zmm2 ; AVX512BW-NEXT: retq %res = call <32 x i64> @llvm.masked.load.v32i64.p0v32i64(<32 x i64>* %ptrs, i32 4, <32 x i1> %mask, <32 x i64> %src0) ret <32 x i64> %res |