summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorCameron McInally <cameron.mcinally@nyu.edu>2019-06-06 15:29:11 +0000
committerCameron McInally <cameron.mcinally@nyu.edu>2019-06-06 15:29:11 +0000
commit1d85a7518c6b660a85caabd580b632f9abd5a8ab (patch)
treed1effe7dce7e366a66ad16cb4d2b6048e1cc2ebd
parent03e8369a72804d2aeebb2dc36ee17f32c7537275 (diff)
downloadbcm5719-llvm-1d85a7518c6b660a85caabd580b632f9abd5a8ab.tar.gz
bcm5719-llvm-1d85a7518c6b660a85caabd580b632f9abd5a8ab.zip
[NFC][CodeGen] Add unary fneg tests to fp-fast.ll fp-fold.ll fp-in-intregs.ll fp-stack-compare-cmov.ll fp-stack-compare.ll fsxor-alignment.ll
llvm-svn: 362712
-rw-r--r--llvm/test/CodeGen/X86/fp-fast.ll10
-rw-r--r--llvm/test/CodeGen/X86/fp-fold.ll21
-rw-r--r--llvm/test/CodeGen/X86/fp-in-intregs.ll7
-rw-r--r--llvm/test/CodeGen/X86/fp-stack-compare-cmov.ll10
-rw-r--r--llvm/test/CodeGen/X86/fp-stack-compare.ll12
-rw-r--r--llvm/test/CodeGen/X86/fsxor-alignment.ll26
6 files changed, 85 insertions, 1 deletions
diff --git a/llvm/test/CodeGen/X86/fp-fast.ll b/llvm/test/CodeGen/X86/fp-fast.ll
index 7abedfd2d17..9e8cdf0a530 100644
--- a/llvm/test/CodeGen/X86/fp-fast.ll
+++ b/llvm/test/CodeGen/X86/fp-fast.ll
@@ -104,3 +104,13 @@ define float @test10(float %a) {
%t2 = fadd float %a, %t1
ret float %t2
}
+
+define float @test11(float %a) {
+; CHECK-LABEL: test11:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vxorps %xmm0, %xmm0, %xmm0
+; CHECK-NEXT: retq
+ %t1 = fneg float %a
+ %t2 = fadd float %a, %t1
+ ret float %t2
+}
diff --git a/llvm/test/CodeGen/X86/fp-fold.ll b/llvm/test/CodeGen/X86/fp-fold.ll
index aee45185700..45cfa58703a 100644
--- a/llvm/test/CodeGen/X86/fp-fold.ll
+++ b/llvm/test/CodeGen/X86/fp-fold.ll
@@ -34,6 +34,16 @@ define float @fadd_produce_zero(float %x) {
ret float %r
}
+define float @fadd_produce_zero_unary_fneg(float %x) {
+; ANY-LABEL: fadd_produce_zero_unary_fneg:
+; ANY: # %bb.0:
+; ANY-NEXT: xorps %xmm0, %xmm0
+; ANY-NEXT: retq
+ %neg = fneg nsz float %x
+ %r = fadd nnan float %neg, %x
+ ret float %r
+}
+
define float @fadd_reassociate(float %x) {
; ANY-LABEL: fadd_reassociate:
; ANY: # %bb.0:
@@ -88,6 +98,17 @@ define float @fsub_neg_x_y(float %x, float %y) {
ret float %r
}
+define float @unary_neg_x_y(float %x, float %y) {
+; ANY-LABEL: unary_neg_x_y:
+; ANY: # %bb.0:
+; ANY-NEXT: subss %xmm0, %xmm1
+; ANY-NEXT: movaps %xmm1, %xmm0
+; ANY-NEXT: retq
+ %neg = fneg nsz float %x
+ %r = fadd nsz float %neg, %y
+ ret float %r
+}
+
define float @fsub_neg_y(float %x, float %y) {
; ANY-LABEL: fsub_neg_y:
; ANY: # %bb.0:
diff --git a/llvm/test/CodeGen/X86/fp-in-intregs.ll b/llvm/test/CodeGen/X86/fp-in-intregs.ll
index 1f5121d271c..89631a388cd 100644
--- a/llvm/test/CodeGen/X86/fp-in-intregs.ll
+++ b/llvm/test/CodeGen/X86/fp-in-intregs.ll
@@ -18,5 +18,12 @@ entry:
ret i32 %tmp210
}
+define i32 @test3(float %x) nounwind {
+entry:
+ %tmp2 = fneg float %x ; <float> [#uses=1]
+ %tmp210 = bitcast float %tmp2 to i32 ; <i32> [#uses=1]
+ ret i32 %tmp210
+}
+
declare float @copysignf(float, float) nounwind readnone
diff --git a/llvm/test/CodeGen/X86/fp-stack-compare-cmov.ll b/llvm/test/CodeGen/X86/fp-stack-compare-cmov.ll
index d0e816db3b6..72cd034fc91 100644
--- a/llvm/test/CodeGen/X86/fp-stack-compare-cmov.ll
+++ b/llvm/test/CodeGen/X86/fp-stack-compare-cmov.ll
@@ -10,3 +10,13 @@ define float @foo(float* %col.2.0) {
%iftmp.2.0 = select i1 %tmp16, float %tmp20, float %tmp
ret float %iftmp.2.0
}
+
+define float @foo_unary_fneg(float* %col.2.0) {
+; CHECK: fucompi
+; CHECK: fcmov
+ %tmp = load float, float* %col.2.0
+ %tmp16 = fcmp olt float %tmp, 0.000000e+00
+ %tmp20 = fneg float %tmp
+ %iftmp.2.0 = select i1 %tmp16, float %tmp20, float %tmp
+ ret float %iftmp.2.0
+}
diff --git a/llvm/test/CodeGen/X86/fp-stack-compare.ll b/llvm/test/CodeGen/X86/fp-stack-compare.ll
index 8ff0dd442f9..974e4d55b22 100644
--- a/llvm/test/CodeGen/X86/fp-stack-compare.ll
+++ b/llvm/test/CodeGen/X86/fp-stack-compare.ll
@@ -12,3 +12,15 @@ define float @foo(float* %col.2.0) {
%iftmp.2.0 = select i1 %tmp16, float %tmp20, float %tmp
ret float %iftmp.2.0
}
+
+define float @foo_unary_fneg(float* %col.2.0) {
+; CHECK: fucomp
+; CHECK-NOT: fucompi
+; CHECK: j
+; CHECK-NOT: fcmov
+ %tmp = load float, float* %col.2.0
+ %tmp16 = fcmp olt float %tmp, 0.000000e+00
+ %tmp20 = fneg float %tmp
+ %iftmp.2.0 = select i1 %tmp16, float %tmp20, float %tmp
+ ret float %iftmp.2.0
+}
diff --git a/llvm/test/CodeGen/X86/fsxor-alignment.ll b/llvm/test/CodeGen/X86/fsxor-alignment.ll
index 6f9738f50fb..f43ece087e6 100644
--- a/llvm/test/CodeGen/X86/fsxor-alignment.ll
+++ b/llvm/test/CodeGen/X86/fsxor-alignment.ll
@@ -1,5 +1,5 @@
; RUN: llc < %s -mtriple=i686-- -mattr=+sse2 -enable-unsafe-fp-math | \
-; RUN: grep -v sp | grep xorps | count 2
+; RUN: grep -v sp | grep xorps | count 8
; Don't fold the incoming stack arguments into the xorps instructions used
; to do floating-point negations, because the arguments aren't vectors
@@ -12,3 +12,27 @@ define void @foo(float* %p, float* %q, float %s, float %y) {
store float %yy, float* %q
ret void
}
+
+define void @foo_unary_fneg_x_y(float* %p, float* %q, float %s, float %y) {
+ %ss = fneg float %s
+ %yy = fneg float %y
+ store float %ss, float* %p
+ store float %yy, float* %q
+ ret void
+}
+
+define void @foo_unary_fneg_x(float* %p, float* %q, float %s, float %y) {
+ %ss = fneg float %s
+ %yy = fsub float -0.0, %y
+ store float %ss, float* %p
+ store float %yy, float* %q
+ ret void
+}
+
+define void @foo_unary_fneg_y(float* %p, float* %q, float %s, float %y) {
+ %ss = fsub float -0.0, %s
+ %yy = fneg float %y
+ store float %ss, float* %p
+ store float %yy, float* %q
+ ret void
+}
OpenPOWER on IntegriCloud