diff options
Diffstat (limited to 'llvm/test/CodeGen/X86/avx512-mask-zext-bugfix.ll')
-rwxr-xr-x | llvm/test/CodeGen/X86/avx512-mask-zext-bugfix.ll | 41 |
1 files changed, 19 insertions, 22 deletions
diff --git a/llvm/test/CodeGen/X86/avx512-mask-zext-bugfix.ll b/llvm/test/CodeGen/X86/avx512-mask-zext-bugfix.ll index 11aba2fdf73..f501d9c9294 100755 --- a/llvm/test/CodeGen/X86/avx512-mask-zext-bugfix.ll +++ b/llvm/test/CodeGen/X86/avx512-mask-zext-bugfix.ll @@ -17,25 +17,21 @@ declare i32 @check_mask16(i16 zeroext %res_mask, i16 zeroext %exp_mask, i8* %fna define void @test_xmm(i32 %shift, i32 %mulp, <2 x i64> %a,i8* %arraydecay,i8* %fname){ ; CHECK-LABEL: test_xmm: ; CHECK: ## %bb.0: -; CHECK-NEXT: subq $72, %rsp -; CHECK-NEXT: .cfi_def_cfa_offset 80 -; CHECK-NEXT: movl $4, %eax +; CHECK-NEXT: subq $56, %rsp +; CHECK-NEXT: .cfi_def_cfa_offset 64 ; CHECK-NEXT: vpmovw2m %xmm0, %k0 ; CHECK-NEXT: movl $2, %esi -; CHECK-NEXT: movl $8, %edi -; CHECK-NEXT: movl %edi, {{[0-9]+}}(%rsp) ## 4-byte Spill +; CHECK-NEXT: movl $8, %eax ; CHECK-NEXT: movq %rdx, %rdi -; CHECK-NEXT: movl {{[0-9]+}}(%rsp), %r8d ## 4-byte Reload ; CHECK-NEXT: movq %rdx, {{[0-9]+}}(%rsp) ## 8-byte Spill -; CHECK-NEXT: movl %r8d, %edx +; CHECK-NEXT: movl %eax, %edx +; CHECK-NEXT: kmovw %k0, {{[0-9]+}}(%rsp) ## 2-byte Spill ; CHECK-NEXT: movq %rcx, {{[0-9]+}}(%rsp) ## 8-byte Spill ; CHECK-NEXT: vmovaps %xmm0, {{[0-9]+}}(%rsp) ## 16-byte Spill -; CHECK-NEXT: movl %eax, {{[0-9]+}}(%rsp) ## 4-byte Spill -; CHECK-NEXT: kmovw %k0, {{[0-9]+}}(%rsp) ## 2-byte Spill ; CHECK-NEXT: callq _calc_expected_mask_val ; CHECK-NEXT: movl %eax, %edx -; CHECK-NEXT: movw %dx, %r9w -; CHECK-NEXT: movzwl %r9w, %esi +; CHECK-NEXT: movw %dx, %r8w +; CHECK-NEXT: movzwl %r8w, %esi ; CHECK-NEXT: kmovw {{[0-9]+}}(%rsp), %k0 ## 2-byte Reload ; CHECK-NEXT: kmovb %k0, %edi ; CHECK-NEXT: movq {{[0-9]+}}(%rsp), %rdx ## 8-byte Reload @@ -45,25 +41,26 @@ define void @test_xmm(i32 %shift, i32 %mulp, <2 x i64> %a,i8* %arraydecay,i8* %f ; CHECK-NEXT: vpmovd2m %xmm0, %k0 ; CHECK-NEXT: kmovq %k0, %k1 ; CHECK-NEXT: kmovd %k0, %esi -; CHECK-NEXT: movb %sil, %r10b -; CHECK-NEXT: movzbl %r10b, %esi -; CHECK-NEXT: movw %si, %r9w +; CHECK-NEXT: movb %sil, %r9b +; CHECK-NEXT: movzbl %r9b, %esi +; CHECK-NEXT: movw %si, %r8w ; CHECK-NEXT: movq {{[0-9]+}}(%rsp), %rdi ## 8-byte Reload -; CHECK-NEXT: movl {{[0-9]+}}(%rsp), %esi ## 4-byte Reload +; CHECK-NEXT: movl $4, %esi +; CHECK-NEXT: movl %esi, {{[0-9]+}}(%rsp) ## 4-byte Spill ; CHECK-NEXT: movl {{[0-9]+}}(%rsp), %edx ## 4-byte Reload ; CHECK-NEXT: movl %eax, {{[0-9]+}}(%rsp) ## 4-byte Spill ; CHECK-NEXT: kmovw %k1, {{[0-9]+}}(%rsp) ## 2-byte Spill -; CHECK-NEXT: movw %r9w, {{[0-9]+}}(%rsp) ## 2-byte Spill +; CHECK-NEXT: movw %r8w, {{[0-9]+}}(%rsp) ## 2-byte Spill ; CHECK-NEXT: callq _calc_expected_mask_val -; CHECK-NEXT: movw %ax, %r9w -; CHECK-NEXT: movw {{[0-9]+}}(%rsp), %r11w ## 2-byte Reload -; CHECK-NEXT: movzwl %r11w, %edi -; CHECK-NEXT: movzwl %r9w, %esi +; CHECK-NEXT: movw %ax, %r8w +; CHECK-NEXT: movw {{[0-9]+}}(%rsp), %r10w ## 2-byte Reload +; CHECK-NEXT: movzwl %r10w, %edi +; CHECK-NEXT: movzwl %r8w, %esi ; CHECK-NEXT: movq {{[0-9]+}}(%rsp), %rdx ## 8-byte Reload ; CHECK-NEXT: movq {{[0-9]+}}(%rsp), %rcx ## 8-byte Reload ; CHECK-NEXT: callq _check_mask16 -; CHECK-NEXT: movl %eax, {{[0-9]+}}(%rsp) ## 4-byte Spill -; CHECK-NEXT: addq $72, %rsp +; CHECK-NEXT: movl %eax, (%rsp) ## 4-byte Spill +; CHECK-NEXT: addq $56, %rsp ; CHECK-NEXT: retq %d2 = bitcast <2 x i64> %a to <8 x i16> %m2 = call i8 @llvm.x86.avx512.cvtw2mask.128(<8 x i16> %d2) |