summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/X86
diff options
context:
space:
mode:
authorStephen Canon <scanon@apple.com>2015-09-22 11:43:17 +0000
committerStephen Canon <scanon@apple.com>2015-09-22 11:43:17 +0000
commit8216d88511cae5fa9b62ac22ed874fa543cadea4 (patch)
tree59dd85cc6f34c01e1fbcfe045b8f91dad1ee12b1 /llvm/test/CodeGen/X86
parent10c80e79963f26b0c3d155506376a8c0ab4472bd (diff)
downloadbcm5719-llvm-8216d88511cae5fa9b62ac22ed874fa543cadea4.tar.gz
bcm5719-llvm-8216d88511cae5fa9b62ac22ed874fa543cadea4.zip
Don't raise inexact when lowering ceil, floor, round, trunc.
The C standard has historically not specified whether or not these functions should raise the inexact flag. Traditionally on Darwin, these functions *did* raise inexact, and the llvm lowerings followed that conventions. n1778 (C bindings for IEEE-754 (2008)) clarifies that these functions should not set inexact. This patch brings the lowerings for arm64 and x86 in line with the newly specified behavior. This also lets us fold some logic into TD patterns, which is nice. Differential Revision: http://reviews.llvm.org/D12969 llvm-svn: 248266
Diffstat (limited to 'llvm/test/CodeGen/X86')
-rw-r--r--llvm/test/CodeGen/X86/avx-cvt.ll2
-rw-r--r--llvm/test/CodeGen/X86/floor-soft-float.ll2
-rw-r--r--llvm/test/CodeGen/X86/rounding-ops.ll24
-rw-r--r--llvm/test/CodeGen/X86/stack-folding-fp-avx1.ll4
-rw-r--r--llvm/test/CodeGen/X86/stack-folding-fp-sse42.ll4
5 files changed, 18 insertions, 18 deletions
diff --git a/llvm/test/CodeGen/X86/avx-cvt.ll b/llvm/test/CodeGen/X86/avx-cvt.ll
index 2f039862b36..84aee16a988 100644
--- a/llvm/test/CodeGen/X86/avx-cvt.ll
+++ b/llvm/test/CodeGen/X86/avx-cvt.ll
@@ -137,7 +137,7 @@ declare double @llvm.nearbyint.f64(double %p)
define float @floor_f32(float %a) {
; CHECK-LABEL: floor_f32:
; CHECK: # BB#0:
-; CHECK-NEXT: vroundss $1, %xmm0, %xmm0, %xmm0
+; CHECK-NEXT: vroundss $9, %xmm0, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call float @llvm.floor.f32(float %a)
ret float %res
diff --git a/llvm/test/CodeGen/X86/floor-soft-float.ll b/llvm/test/CodeGen/X86/floor-soft-float.ll
index 7bb738513f5..3b28ecc6379 100644
--- a/llvm/test/CodeGen/X86/floor-soft-float.ll
+++ b/llvm/test/CodeGen/X86/floor-soft-float.ll
@@ -6,7 +6,7 @@ target triple = "x86_64-unknown-linux-gnu"
declare float @llvm.floor.f32(float)
; CHECK-SOFT-FLOAT: callq floorf
-; CHECK-HARD-FLOAT: roundss $1, %xmm0, %xmm0
+; CHECK-HARD-FLOAT: roundss $9, %xmm0, %xmm0
define float @myfloor(float %a) {
%val = tail call float @llvm.floor.f32(float %a)
ret float %val
diff --git a/llvm/test/CodeGen/X86/rounding-ops.ll b/llvm/test/CodeGen/X86/rounding-ops.ll
index 69f4bfb9f47..15a11d1d6a9 100644
--- a/llvm/test/CodeGen/X86/rounding-ops.ll
+++ b/llvm/test/CodeGen/X86/rounding-ops.ll
@@ -6,10 +6,10 @@ define float @test1(float %x) nounwind {
ret float %call
; CHECK-SSE-LABEL: test1:
-; CHECK-SSE: roundss $1
+; CHECK-SSE: roundss $9
; CHECK-AVX-LABEL: test1:
-; CHECK-AVX: vroundss $1
+; CHECK-AVX: vroundss $9
}
declare float @floorf(float) nounwind readnone
@@ -19,10 +19,10 @@ define double @test2(double %x) nounwind {
ret double %call
; CHECK-SSE-LABEL: test2:
-; CHECK-SSE: roundsd $1
+; CHECK-SSE: roundsd $9
; CHECK-AVX-LABEL: test2:
-; CHECK-AVX: vroundsd $1
+; CHECK-AVX: vroundsd $9
}
declare double @floor(double) nounwind readnone
@@ -58,10 +58,10 @@ define float @test5(float %x) nounwind {
ret float %call
; CHECK-SSE-LABEL: test5:
-; CHECK-SSE: roundss $2
+; CHECK-SSE: roundss $10
; CHECK-AVX-LABEL: test5:
-; CHECK-AVX: vroundss $2
+; CHECK-AVX: vroundss $10
}
declare float @ceilf(float) nounwind readnone
@@ -71,10 +71,10 @@ define double @test6(double %x) nounwind {
ret double %call
; CHECK-SSE-LABEL: test6:
-; CHECK-SSE: roundsd $2
+; CHECK-SSE: roundsd $10
; CHECK-AVX-LABEL: test6:
-; CHECK-AVX: vroundsd $2
+; CHECK-AVX: vroundsd $10
}
declare double @ceil(double) nounwind readnone
@@ -110,10 +110,10 @@ define float @test9(float %x) nounwind {
ret float %call
; CHECK-SSE-LABEL: test9:
-; CHECK-SSE: roundss $3
+; CHECK-SSE: roundss $11
; CHECK-AVX-LABEL: test9:
-; CHECK-AVX: vroundss $3
+; CHECK-AVX: vroundss $11
}
declare float @truncf(float) nounwind readnone
@@ -123,10 +123,10 @@ define double @test10(double %x) nounwind {
ret double %call
; CHECK-SSE-LABEL: test10:
-; CHECK-SSE: roundsd $3
+; CHECK-SSE: roundsd $11
; CHECK-AVX-LABEL: test10:
-; CHECK-AVX: vroundsd $3
+; CHECK-AVX: vroundsd $11
}
declare double @trunc(double) nounwind readnone
diff --git a/llvm/test/CodeGen/X86/stack-folding-fp-avx1.ll b/llvm/test/CodeGen/X86/stack-folding-fp-avx1.ll
index 63aa742bdf0..7ac17067a69 100644
--- a/llvm/test/CodeGen/X86/stack-folding-fp-avx1.ll
+++ b/llvm/test/CodeGen/X86/stack-folding-fp-avx1.ll
@@ -1411,7 +1411,7 @@ declare <8 x float> @llvm.x86.avx.round.ps.256(<8 x float>, i32) nounwind readno
define double @stack_fold_roundsd(double %a0) optsize {
;CHECK-LABEL: stack_fold_roundsd
- ;CHECK: vroundsd $1, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 8-byte Folded Reload
+ ;CHECK: vroundsd $9, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 8-byte Folded Reload
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
%2 = call double @llvm.floor.f64(double %a0)
ret double %2
@@ -1423,7 +1423,7 @@ declare <2 x double> @llvm.x86.sse41.round.sd(<2 x double>, <2 x double>, i32) n
define float @stack_fold_roundss(float %a0) optsize {
;CHECK-LABEL: stack_fold_roundss
- ;CHECK: vroundss $1, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 4-byte Folded Reload
+ ;CHECK: vroundss $9, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 4-byte Folded Reload
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
%2 = call float @llvm.floor.f32(float %a0)
ret float %2
diff --git a/llvm/test/CodeGen/X86/stack-folding-fp-sse42.ll b/llvm/test/CodeGen/X86/stack-folding-fp-sse42.ll
index 8ee23a1048f..8086f06bf1b 100644
--- a/llvm/test/CodeGen/X86/stack-folding-fp-sse42.ll
+++ b/llvm/test/CodeGen/X86/stack-folding-fp-sse42.ll
@@ -886,7 +886,7 @@ declare <4 x float> @llvm.x86.sse41.round.ps(<4 x float>, i32) nounwind readnone
define double @stack_fold_roundsd(double %a0) optsize {
;CHECK-LABEL: stack_fold_roundsd
- ;CHECK: roundsd $1, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 8-byte Folded Reload
+ ;CHECK: roundsd $9, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 8-byte Folded Reload
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
%2 = call double @llvm.floor.f64(double %a0)
ret double %2
@@ -898,7 +898,7 @@ declare <2 x double> @llvm.x86.sse41.round.sd(<2 x double>, <2 x double>, i32) n
define float @stack_fold_roundss(float %a0) minsize {
;CHECK-LABEL: stack_fold_roundss
- ;CHECK: roundss $1, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 4-byte Folded Reload
+ ;CHECK: roundss $9, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 4-byte Folded Reload
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
%2 = call float @llvm.floor.f32(float %a0)
ret float %2
OpenPOWER on IntegriCloud