diff options
Diffstat (limited to 'llvm/test/CodeGen/X86/logical-load-fold.ll')
-rw-r--r-- | llvm/test/CodeGen/X86/logical-load-fold.ll | 17 |
1 files changed, 9 insertions, 8 deletions
diff --git a/llvm/test/CodeGen/X86/logical-load-fold.ll b/llvm/test/CodeGen/X86/logical-load-fold.ll index 5aac2d7910c..73930ca8bca 100644 --- a/llvm/test/CodeGen/X86/logical-load-fold.ll +++ b/llvm/test/CodeGen/X86/logical-load-fold.ll @@ -1,27 +1,28 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=sse2,sse-unaligned-mem | FileCheck %s --check-prefix=SSE2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx | FileCheck %s --check-prefix=AVX -; Although we have the ability to fold an unaligned load with AVX +; Although we have the ability to fold an unaligned load with AVX ; and under special conditions with some SSE implementations, we ; can not fold the load under any circumstances in these test ; cases because they are not 16-byte loads. The load must be ; executed as a scalar ('movs*') with a zero extension to -; 128-bits and then used in the packed logical ('andp*') op. +; 128-bits and then used in the packed logical ('andp*') op. ; PR22371 - http://llvm.org/bugs/show_bug.cgi?id=22371 define double @load_double_no_fold(double %x, double %y) { ; SSE2-LABEL: load_double_no_fold: -; SSE2: BB#0: +; SSE2: # BB#0: ; SSE2-NEXT: cmplesd %xmm0, %xmm1 ; SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero -; SSE2-NEXT: andpd %xmm1, %xmm0 +; SSE2-NEXT: andps %xmm1, %xmm0 ; SSE2-NEXT: retq ; ; AVX-LABEL: load_double_no_fold: -; AVX: BB#0: +; AVX: # BB#0: ; AVX-NEXT: vcmplesd %xmm0, %xmm1, %xmm0 ; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero -; AVX-NEXT: vandpd %xmm1, %xmm0, %xmm0 +; AVX-NEXT: vandps %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq %cmp = fcmp oge double %x, %y @@ -32,14 +33,14 @@ define double @load_double_no_fold(double %x, double %y) { define float @load_float_no_fold(float %x, float %y) { ; SSE2-LABEL: load_float_no_fold: -; SSE2: BB#0: +; SSE2: # BB#0: ; SSE2-NEXT: cmpless %xmm0, %xmm1 ; SSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; SSE2-NEXT: andps %xmm1, %xmm0 ; SSE2-NEXT: retq ; ; AVX-LABEL: load_float_no_fold: -; AVX: BB#0: +; AVX: # BB#0: ; AVX-NEXT: vcmpless %xmm0, %xmm1, %xmm0 ; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero ; AVX-NEXT: vandps %xmm1, %xmm0, %xmm0 |