diff options
| author | Sanjay Patel <spatel@rotateright.com> | 2016-10-02 17:07:24 +0000 |
|---|---|---|
| committer | Sanjay Patel <spatel@rotateright.com> | 2016-10-02 17:07:24 +0000 |
| commit | 170d7eb3031e7b13e10a6e7e92eddc3630e51872 (patch) | |
| tree | 05d2c0e7afb5c7e3223d2a2660b7f9e17e69b7ac /llvm | |
| parent | dfbbbcd6628f0380707e73f97b6159d8c4878ac8 (diff) | |
| download | bcm5719-llvm-170d7eb3031e7b13e10a6e7e92eddc3630e51872.tar.gz bcm5719-llvm-170d7eb3031e7b13e10a6e7e92eddc3630e51872.zip | |
[x86] remove 'nan' strings from copysign assertions; NFC
Preemptively scrubbing these to avoid a bot fail as in PR30443:
https://llvm.org/bugs/show_bug.cgi?id=30443
I'm nearly done with a patch to fix these cases, so not trying very
hard to do better for the temporary win.
I plan to use better checks than what the script produces for the vectorized cases.
llvm-svn: 283072
Diffstat (limited to 'llvm')
| -rw-r--r-- | llvm/test/CodeGen/X86/vec-copysign.ll | 16 |
1 files changed, 8 insertions, 8 deletions
diff --git a/llvm/test/CodeGen/X86/vec-copysign.ll b/llvm/test/CodeGen/X86/vec-copysign.ll index 6392ca990c3..2db06555ea2 100644 --- a/llvm/test/CodeGen/X86/vec-copysign.ll +++ b/llvm/test/CodeGen/X86/vec-copysign.ll @@ -13,7 +13,7 @@ define <4 x float> @v4f32(<4 x float> %a, <4 x float> %b) nounwind { ; SSE2-NEXT: andps %xmm3, %xmm2 ; SSE2-NEXT: movaps %xmm0, %xmm4 ; SSE2-NEXT: shufps {{.*#+}} xmm4 = xmm4[3,1,2,3] -; SSE2-NEXT: movaps {{.*#+}} xmm5 = [nan,nan,nan,nan] +; SSE2-NEXT: movaps {{.*#+}} xmm5 ; SSE2-NEXT: andps %xmm5, %xmm4 ; SSE2-NEXT: orps %xmm2, %xmm4 ; SSE2-NEXT: movaps %xmm1, %xmm2 @@ -43,7 +43,7 @@ define <4 x float> @v4f32(<4 x float> %a, <4 x float> %b) nounwind { ; AVX: # BB#0: ; AVX-NEXT: vmovaps {{.*#+}} xmm2 = [-0.000000e+00,-0.000000e+00,-0.000000e+00,-0.000000e+00] ; AVX-NEXT: vandps %xmm2, %xmm1, %xmm3 -; AVX-NEXT: vmovaps {{.*#+}} xmm4 = [nan,nan,nan,nan] +; AVX-NEXT: vmovaps {{.*#+}} xmm4 ; AVX-NEXT: vandps %xmm4, %xmm0, %xmm5 ; AVX-NEXT: vorps %xmm3, %xmm5, %xmm3 ; AVX-NEXT: vmovshdup {{.*#+}} xmm5 = xmm1[1,1,3,3] @@ -80,7 +80,7 @@ define <8 x float> @v8f32(<8 x float> %a, <8 x float> %b) nounwind { ; SSE2-NEXT: andps %xmm8, %xmm0 ; SSE2-NEXT: movaps %xmm5, %xmm7 ; SSE2-NEXT: shufps {{.*#+}} xmm7 = xmm7[3,1,2,3] -; SSE2-NEXT: movaps {{.*#+}} xmm6 = [nan,nan,nan,nan] +; SSE2-NEXT: movaps {{.*#+}} xmm6 ; SSE2-NEXT: andps %xmm6, %xmm7 ; SSE2-NEXT: orps %xmm0, %xmm7 ; SSE2-NEXT: movaps %xmm2, %xmm0 @@ -139,7 +139,7 @@ define <8 x float> @v8f32(<8 x float> %a, <8 x float> %b) nounwind { ; AVX-NEXT: vmovaps {{.*#+}} xmm2 = [-0.000000e+00,-0.000000e+00,-0.000000e+00,-0.000000e+00] ; AVX-NEXT: vandps %xmm2, %xmm4, %xmm5 ; AVX-NEXT: vextractf128 $1, %ymm0, %xmm6 -; AVX-NEXT: vmovaps {{.*#+}} xmm3 = [nan,nan,nan,nan] +; AVX-NEXT: vmovaps {{.*#+}} xmm3 ; AVX-NEXT: vandps %xmm3, %xmm6, %xmm7 ; AVX-NEXT: vorps %xmm5, %xmm7, %xmm8 ; AVX-NEXT: vmovshdup {{.*#+}} xmm7 = xmm4[1,1,3,3] @@ -194,7 +194,7 @@ define <2 x double> @v2f64(<2 x double> %a, <2 x double> %b) nounwind { ; SSE2-NEXT: movaps {{.*#+}} xmm3 = [-0.000000e+00,-0.000000e+00] ; SSE2-NEXT: movaps %xmm1, %xmm4 ; SSE2-NEXT: andps %xmm3, %xmm4 -; SSE2-NEXT: movaps {{.*#+}} xmm5 = [nan,nan] +; SSE2-NEXT: movaps {{.*#+}} xmm5 ; SSE2-NEXT: movaps %xmm0, %xmm2 ; SSE2-NEXT: andps %xmm5, %xmm2 ; SSE2-NEXT: orps %xmm4, %xmm2 @@ -211,7 +211,7 @@ define <2 x double> @v2f64(<2 x double> %a, <2 x double> %b) nounwind { ; AVX: # BB#0: ; AVX-NEXT: vmovapd {{.*#+}} xmm2 = [-0.000000e+00,-0.000000e+00] ; AVX-NEXT: vandpd %xmm2, %xmm1, %xmm3 -; AVX-NEXT: vmovapd {{.*#+}} xmm4 = [nan,nan] +; AVX-NEXT: vmovapd {{.*#+}} xmm4 ; AVX-NEXT: vandpd %xmm4, %xmm0, %xmm5 ; AVX-NEXT: vorpd %xmm3, %xmm5, %xmm3 ; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0] @@ -233,7 +233,7 @@ define <4 x double> @v4f64(<4 x double> %a, <4 x double> %b) nounwind { ; SSE2-NEXT: movaps {{.*#+}} xmm5 = [-0.000000e+00,-0.000000e+00] ; SSE2-NEXT: movaps %xmm2, %xmm6 ; SSE2-NEXT: andps %xmm5, %xmm6 -; SSE2-NEXT: movaps {{.*#+}} xmm7 = [nan,nan] +; SSE2-NEXT: movaps {{.*#+}} xmm7 ; SSE2-NEXT: andps %xmm7, %xmm0 ; SSE2-NEXT: orps %xmm6, %xmm0 ; SSE2-NEXT: movhlps {{.*#+}} xmm2 = xmm2[1,1] @@ -262,7 +262,7 @@ define <4 x double> @v4f64(<4 x double> %a, <4 x double> %b) nounwind { ; AVX-NEXT: vmovapd {{.*#+}} xmm3 = [-0.000000e+00,-0.000000e+00] ; AVX-NEXT: vandpd %xmm3, %xmm2, %xmm4 ; AVX-NEXT: vextractf128 $1, %ymm0, %xmm5 -; AVX-NEXT: vmovapd {{.*#+}} xmm6 = [nan,nan] +; AVX-NEXT: vmovapd {{.*#+}} xmm6 ; AVX-NEXT: vandpd %xmm6, %xmm5, %xmm7 ; AVX-NEXT: vorpd %xmm4, %xmm7, %xmm4 ; AVX-NEXT: vpermilpd {{.*#+}} xmm2 = xmm2[1,0] |

