diff options
| author | Sanjay Patel <spatel@rotateright.com> | 2016-05-25 16:39:47 +0000 |
|---|---|---|
| committer | Sanjay Patel <spatel@rotateright.com> | 2016-05-25 16:39:47 +0000 |
| commit | 3955360b24c7c70f2dcf301a3f59a2fc89e50238 (patch) | |
| tree | de0b019653f97f893d8ef75ae7a2aa186c8a0ed6 /llvm/test/CodeGen/X86/vzero-excess.ll | |
| parent | d884927463d772adbbbf1073b2a3187c2a82d5b0 (diff) | |
| download | bcm5719-llvm-3955360b24c7c70f2dcf301a3f59a2fc89e50238.tar.gz bcm5719-llvm-3955360b24c7c70f2dcf301a3f59a2fc89e50238.zip | |
[x86, AVX] allow explicit calls to VZERO* to modify state in VZeroUpperInserter pass (PR27823)
As noted in the review, there are still problems, so this doesn't the bug completely.
Differential Revision: http://reviews.llvm.org/D20529
llvm-svn: 270718
Diffstat (limited to 'llvm/test/CodeGen/X86/vzero-excess.ll')
| -rw-r--r-- | llvm/test/CodeGen/X86/vzero-excess.ll | 7 |
1 files changed, 2 insertions, 5 deletions
diff --git a/llvm/test/CodeGen/X86/vzero-excess.ll b/llvm/test/CodeGen/X86/vzero-excess.ll index 7537b8eefb9..0ed90741b61 100644 --- a/llvm/test/CodeGen/X86/vzero-excess.ll +++ b/llvm/test/CodeGen/X86/vzero-excess.ll @@ -1,7 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s -; FIXME: The vzeroupper added by the VZeroUpperInserter pass is unnecessary in these tests. +; In the following 4 tests, the existing call to VZU/VZA ensures clean state before +; the call to the unknown, so we don't need to insert a second VZU at that point. define <4 x float> @zeroupper_v4f32(<8 x float> *%x, <8 x float> %y) nounwind { ; CHECK-LABEL: zeroupper_v4f32: @@ -11,7 +12,6 @@ define <4 x float> @zeroupper_v4f32(<8 x float> *%x, <8 x float> %y) nounwind { ; CHECK-NEXT: vmovups %ymm0, (%rsp) # 32-byte Spill ; CHECK-NEXT: movq %rdi, %rbx ; CHECK-NEXT: vzeroupper -; CHECK-NEXT: vzeroupper ; CHECK-NEXT: callq the_unknown ; CHECK-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload ; CHECK-NEXT: vaddps (%rbx), %ymm0, %ymm0 @@ -37,7 +37,6 @@ define <8 x float> @zeroupper_v8f32(<8 x float> %x) nounwind { ; CHECK-NEXT: subq $56, %rsp ; CHECK-NEXT: vmovups %ymm0, (%rsp) # 32-byte Spill ; CHECK-NEXT: vzeroupper -; CHECK-NEXT: vzeroupper ; CHECK-NEXT: callq the_unknown ; CHECK-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload ; CHECK-NEXT: addq $56, %rsp @@ -55,7 +54,6 @@ define <4 x float> @zeroall_v4f32(<8 x float> *%x, <8 x float> %y) nounwind { ; CHECK-NEXT: vmovups %ymm0, (%rsp) # 32-byte Spill ; CHECK-NEXT: movq %rdi, %rbx ; CHECK-NEXT: vzeroall -; CHECK-NEXT: vzeroupper ; CHECK-NEXT: callq the_unknown ; CHECK-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload ; CHECK-NEXT: vaddps (%rbx), %ymm0, %ymm0 @@ -81,7 +79,6 @@ define <8 x float> @zeroall_v8f32(<8 x float> %x) nounwind { ; CHECK-NEXT: subq $56, %rsp ; CHECK-NEXT: vmovups %ymm0, (%rsp) # 32-byte Spill ; CHECK-NEXT: vzeroall -; CHECK-NEXT: vzeroupper ; CHECK-NEXT: callq the_unknown ; CHECK-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload ; CHECK-NEXT: addq $56, %rsp |

