summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/X86/sse-intrinsics-fast-isel.ll
diff options
context:
space:
mode:
authorSanjay Patel <spatel@rotateright.com>2016-06-15 21:22:15 +0000
committerSanjay Patel <spatel@rotateright.com>2016-06-15 21:22:15 +0000
commit74b40bdb5379ea53a5e84dba4f3f05fd2ad1975d (patch)
treeac370c4190c76224189cf74312bf0df2777a48fb /llvm/test/CodeGen/X86/sse-intrinsics-fast-isel.ll
parent280cfd1a690468577fdffa8f15321be42eae201a (diff)
downloadbcm5719-llvm-74b40bdb5379ea53a5e84dba4f3f05fd2ad1975d.tar.gz
bcm5719-llvm-74b40bdb5379ea53a5e84dba4f3f05fd2ad1975d.zip
[x86, SSE] update packed FP compare tests for direct translation from builtin to IR
The clang side of this was r272840: http://reviews.llvm.org/rL272840 A follow-up step would be to auto-upgrade and remove these LLVM intrinsics completely. Differential Revision: http://reviews.llvm.org/D21269 llvm-svn: 272841
Diffstat (limited to 'llvm/test/CodeGen/X86/sse-intrinsics-fast-isel.ll')
-rw-r--r--llvm/test/CodeGen/X86/sse-intrinsics-fast-isel.ll49
1 files changed, 36 insertions, 13 deletions
diff --git a/llvm/test/CodeGen/X86/sse-intrinsics-fast-isel.ll b/llvm/test/CodeGen/X86/sse-intrinsics-fast-isel.ll
index 8d7f6c33e6d..fe63b82f814 100644
--- a/llvm/test/CodeGen/X86/sse-intrinsics-fast-isel.ll
+++ b/llvm/test/CodeGen/X86/sse-intrinsics-fast-isel.ll
@@ -196,10 +196,11 @@ define <4 x float> @test_mm_cmpeq_ps(<4 x float> %a0, <4 x float> %a1) nounwind
; X64: # BB#0:
; X64-NEXT: cmpeqps %xmm1, %xmm0
; X64-NEXT: retq
- %res = call <4 x float> @llvm.x86.sse.cmp.ps(<4 x float> %a0, <4 x float> %a1, i8 0)
+ %cmp = fcmp oeq <4 x float> %a0, %a1
+ %sext = sext <4 x i1> %cmp to <4 x i32>
+ %res = bitcast <4 x i32> %sext to <4 x float>
ret <4 x float> %res
}
-declare <4 x float> @llvm.x86.sse.cmp.ps(<4 x float>, <4 x float>, i8) nounwind readnone
define <4 x float> @test_mm_cmpeq_ss(<4 x float> %a0, <4 x float> %a1) nounwind {
; X32-LABEL: test_mm_cmpeq_ss:
@@ -228,7 +229,9 @@ define <4 x float> @test_mm_cmpge_ps(<4 x float> %a0, <4 x float> %a1) nounwind
; X64-NEXT: cmpleps %xmm0, %xmm1
; X64-NEXT: movaps %xmm1, %xmm0
; X64-NEXT: retq
- %res = call <4 x float> @llvm.x86.sse.cmp.ps(<4 x float> %a1, <4 x float> %a0, i8 2)
+ %cmp = fcmp ole <4 x float> %a1, %a0
+ %sext = sext <4 x i1> %cmp to <4 x i32>
+ %res = bitcast <4 x i32> %sext to <4 x float>
ret <4 x float> %res
}
@@ -261,7 +264,9 @@ define <4 x float> @test_mm_cmpgt_ps(<4 x float> %a0, <4 x float> %a1) nounwind
; X64-NEXT: cmpltps %xmm0, %xmm1
; X64-NEXT: movaps %xmm1, %xmm0
; X64-NEXT: retq
- %res = call <4 x float> @llvm.x86.sse.cmp.ps(<4 x float> %a1, <4 x float> %a0, i8 1)
+ %cmp = fcmp olt <4 x float> %a1, %a0
+ %sext = sext <4 x i1> %cmp to <4 x i32>
+ %res = bitcast <4 x i32> %sext to <4 x float>
ret <4 x float> %res
}
@@ -292,7 +297,9 @@ define <4 x float> @test_mm_cmple_ps(<4 x float> %a0, <4 x float> %a1) nounwind
; X64: # BB#0:
; X64-NEXT: cmpleps %xmm1, %xmm0
; X64-NEXT: retq
- %res = call <4 x float> @llvm.x86.sse.cmp.ps(<4 x float> %a0, <4 x float> %a1, i8 2)
+ %cmp = fcmp ole <4 x float> %a0, %a1
+ %sext = sext <4 x i1> %cmp to <4 x i32>
+ %res = bitcast <4 x i32> %sext to <4 x float>
ret <4 x float> %res
}
@@ -320,7 +327,9 @@ define <4 x float> @test_mm_cmplt_ps(<4 x float> %a0, <4 x float> %a1) nounwind
; X64: # BB#0:
; X64-NEXT: cmpltps %xmm1, %xmm0
; X64-NEXT: retq
- %res = call <4 x float> @llvm.x86.sse.cmp.ps(<4 x float> %a0, <4 x float> %a1, i8 1)
+ %cmp = fcmp olt <4 x float> %a0, %a1
+ %sext = sext <4 x i1> %cmp to <4 x i32>
+ %res = bitcast <4 x i32> %sext to <4 x float>
ret <4 x float> %res
}
@@ -348,7 +357,9 @@ define <4 x float> @test_mm_cmpneq_ps(<4 x float> %a0, <4 x float> %a1) nounwind
; X64: # BB#0:
; X64-NEXT: cmpneqps %xmm1, %xmm0
; X64-NEXT: retq
- %res = call <4 x float> @llvm.x86.sse.cmp.ps(<4 x float> %a0, <4 x float> %a1, i8 4)
+ %cmp = fcmp une <4 x float> %a0, %a1
+ %sext = sext <4 x i1> %cmp to <4 x i32>
+ %res = bitcast <4 x i32> %sext to <4 x float>
ret <4 x float> %res
}
@@ -378,7 +389,9 @@ define <4 x float> @test_mm_cmpnge_ps(<4 x float> %a0, <4 x float> %a1) nounwind
; X64-NEXT: cmpnleps %xmm0, %xmm1
; X64-NEXT: movaps %xmm1, %xmm0
; X64-NEXT: retq
- %res = call <4 x float> @llvm.x86.sse.cmp.ps(<4 x float> %a1, <4 x float> %a0, i8 6)
+ %cmp = fcmp ugt <4 x float> %a1, %a0
+ %sext = sext <4 x i1> %cmp to <4 x i32>
+ %res = bitcast <4 x i32> %sext to <4 x float>
ret <4 x float> %res
}
@@ -411,7 +424,9 @@ define <4 x float> @test_mm_cmpngt_ps(<4 x float> %a0, <4 x float> %a1) nounwind
; X64-NEXT: cmpnltps %xmm0, %xmm1
; X64-NEXT: movaps %xmm1, %xmm0
; X64-NEXT: retq
- %res = call <4 x float> @llvm.x86.sse.cmp.ps(<4 x float> %a1, <4 x float> %a0, i8 5)
+ %cmp = fcmp uge <4 x float> %a1, %a0
+ %sext = sext <4 x i1> %cmp to <4 x i32>
+ %res = bitcast <4 x i32> %sext to <4 x float>
ret <4 x float> %res
}
@@ -442,7 +457,9 @@ define <4 x float> @test_mm_cmpnle_ps(<4 x float> %a0, <4 x float> %a1) nounwind
; X64: # BB#0:
; X64-NEXT: cmpnleps %xmm1, %xmm0
; X64-NEXT: retq
- %res = call <4 x float> @llvm.x86.sse.cmp.ps(<4 x float> %a0, <4 x float> %a1, i8 6)
+ %cmp = fcmp ugt <4 x float> %a0, %a1
+ %sext = sext <4 x i1> %cmp to <4 x i32>
+ %res = bitcast <4 x i32> %sext to <4 x float>
ret <4 x float> %res
}
@@ -470,7 +487,9 @@ define <4 x float> @test_mm_cmpnlt_ps(<4 x float> %a0, <4 x float> %a1) nounwind
; X64: # BB#0:
; X64-NEXT: cmpnltps %xmm1, %xmm0
; X64-NEXT: retq
- %res = call <4 x float> @llvm.x86.sse.cmp.ps(<4 x float> %a0, <4 x float> %a1, i8 5)
+ %cmp = fcmp uge <4 x float> %a0, %a1
+ %sext = sext <4 x i1> %cmp to <4 x i32>
+ %res = bitcast <4 x i32> %sext to <4 x float>
ret <4 x float> %res
}
@@ -498,7 +517,9 @@ define <4 x float> @test_mm_cmpord_ps(<4 x float> %a0, <4 x float> %a1) nounwind
; X64: # BB#0:
; X64-NEXT: cmpordps %xmm1, %xmm0
; X64-NEXT: retq
- %res = call <4 x float> @llvm.x86.sse.cmp.ps(<4 x float> %a0, <4 x float> %a1, i8 7)
+ %cmp = fcmp ord <4 x float> %a0, %a1
+ %sext = sext <4 x i1> %cmp to <4 x i32>
+ %res = bitcast <4 x i32> %sext to <4 x float>
ret <4 x float> %res
}
@@ -526,7 +547,9 @@ define <4 x float> @test_mm_cmpunord_ps(<4 x float> %a0, <4 x float> %a1) nounwi
; X64: # BB#0:
; X64-NEXT: cmpunordps %xmm1, %xmm0
; X64-NEXT: retq
- %res = call <4 x float> @llvm.x86.sse.cmp.ps(<4 x float> %a0, <4 x float> %a1, i8 3)
+ %cmp = fcmp uno <4 x float> %a0, %a1
+ %sext = sext <4 x i1> %cmp to <4 x i32>
+ %res = bitcast <4 x i32> %sext to <4 x float>
ret <4 x float> %res
}
OpenPOWER on IntegriCloud