summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/X86
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test/CodeGen/X86')
-rw-r--r--llvm/test/CodeGen/X86/fp-intrinsics.ll24
-rw-r--r--llvm/test/CodeGen/X86/fp128-cast-strict.ll30
-rw-r--r--llvm/test/CodeGen/X86/fp128-libcalls-strict.ll144
-rw-r--r--llvm/test/CodeGen/X86/vector-constrained-fp-intrinsics-flags.ll4
-rw-r--r--llvm/test/CodeGen/X86/vector-constrained-fp-intrinsics.ll146
5 files changed, 174 insertions, 174 deletions
diff --git a/llvm/test/CodeGen/X86/fp-intrinsics.ll b/llvm/test/CodeGen/X86/fp-intrinsics.ll
index 7883b9ba468..aca346a25f1 100644
--- a/llvm/test/CodeGen/X86/fp-intrinsics.ll
+++ b/llvm/test/CodeGen/X86/fp-intrinsics.ll
@@ -1104,10 +1104,10 @@ define i128 @f20s128(double %x) nounwind strictfp {
; X87-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X87-NEXT: movl {{[0-9]+}}(%esp), %edx
; X87-NEXT: movl {{[0-9]+}}(%esp), %edi
-; X87-NEXT: movl %edi, 12(%esi)
-; X87-NEXT: movl %edx, 8(%esi)
-; X87-NEXT: movl %ecx, 4(%esi)
+; X87-NEXT: movl %edi, 8(%esi)
+; X87-NEXT: movl %edx, 12(%esi)
; X87-NEXT: movl %eax, (%esi)
+; X87-NEXT: movl %ecx, 4(%esi)
; X87-NEXT: movl %esi, %eax
; X87-NEXT: addl $36, %esp
; X87-NEXT: popl %esi
@@ -1130,10 +1130,10 @@ define i128 @f20s128(double %x) nounwind strictfp {
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edi
-; X86-SSE-NEXT: movl %edi, 12(%esi)
-; X86-SSE-NEXT: movl %edx, 8(%esi)
-; X86-SSE-NEXT: movl %ecx, 4(%esi)
+; X86-SSE-NEXT: movl %edi, 8(%esi)
+; X86-SSE-NEXT: movl %edx, 12(%esi)
; X86-SSE-NEXT: movl %eax, (%esi)
+; X86-SSE-NEXT: movl %ecx, 4(%esi)
; X86-SSE-NEXT: movl %esi, %eax
; X86-SSE-NEXT: addl $36, %esp
; X86-SSE-NEXT: popl %esi
@@ -1443,10 +1443,10 @@ define i128 @f20u128(double %x) nounwind strictfp {
; X87-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X87-NEXT: movl {{[0-9]+}}(%esp), %edx
; X87-NEXT: movl {{[0-9]+}}(%esp), %edi
-; X87-NEXT: movl %edi, 12(%esi)
-; X87-NEXT: movl %edx, 8(%esi)
-; X87-NEXT: movl %ecx, 4(%esi)
+; X87-NEXT: movl %edi, 8(%esi)
+; X87-NEXT: movl %edx, 12(%esi)
; X87-NEXT: movl %eax, (%esi)
+; X87-NEXT: movl %ecx, 4(%esi)
; X87-NEXT: movl %esi, %eax
; X87-NEXT: addl $36, %esp
; X87-NEXT: popl %esi
@@ -1469,10 +1469,10 @@ define i128 @f20u128(double %x) nounwind strictfp {
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edi
-; X86-SSE-NEXT: movl %edi, 12(%esi)
-; X86-SSE-NEXT: movl %edx, 8(%esi)
-; X86-SSE-NEXT: movl %ecx, 4(%esi)
+; X86-SSE-NEXT: movl %edi, 8(%esi)
+; X86-SSE-NEXT: movl %edx, 12(%esi)
; X86-SSE-NEXT: movl %eax, (%esi)
+; X86-SSE-NEXT: movl %ecx, 4(%esi)
; X86-SSE-NEXT: movl %esi, %eax
; X86-SSE-NEXT: addl $36, %esp
; X86-SSE-NEXT: popl %esi
diff --git a/llvm/test/CodeGen/X86/fp128-cast-strict.ll b/llvm/test/CodeGen/X86/fp128-cast-strict.ll
index 5300ab6c965..868dfc41637 100644
--- a/llvm/test/CodeGen/X86/fp128-cast-strict.ll
+++ b/llvm/test/CodeGen/X86/fp128-cast-strict.ll
@@ -47,10 +47,10 @@ define void @TestFPExtF32_F128() nounwind strictfp {
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X86-NEXT: movl %esi, vf128+8
-; X86-NEXT: movl %edx, vf128+12
-; X86-NEXT: movl %eax, vf128
+; X86-NEXT: movl %esi, vf128+12
+; X86-NEXT: movl %edx, vf128+8
; X86-NEXT: movl %ecx, vf128+4
+; X86-NEXT: movl %eax, vf128
; X86-NEXT: addl $24, %esp
; X86-NEXT: popl %esi
; X86-NEXT: retl
@@ -94,10 +94,10 @@ define void @TestFPExtF64_F128() nounwind strictfp {
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X86-NEXT: movl %esi, vf128+8
-; X86-NEXT: movl %edx, vf128+12
-; X86-NEXT: movl %eax, vf128
+; X86-NEXT: movl %esi, vf128+12
+; X86-NEXT: movl %edx, vf128+8
; X86-NEXT: movl %ecx, vf128+4
+; X86-NEXT: movl %eax, vf128
; X86-NEXT: addl $40, %esp
; X86-NEXT: popl %esi
; X86-NEXT: retl
@@ -143,10 +143,10 @@ define void @TestFPExtF80_F128() nounwind strictfp {
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X86-NEXT: movl %esi, vf128+8
-; X86-NEXT: movl %edx, vf128+12
-; X86-NEXT: movl %eax, vf128
+; X86-NEXT: movl %esi, vf128+12
+; X86-NEXT: movl %edx, vf128+8
; X86-NEXT: movl %ecx, vf128+4
+; X86-NEXT: movl %eax, vf128
; X86-NEXT: addl $40, %esp
; X86-NEXT: popl %esi
; X86-NEXT: retl
@@ -396,10 +396,10 @@ define i128 @fptosi_i128(fp128 %x) nounwind strictfp {
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
-; X86-NEXT: movl %edi, 12(%esi)
-; X86-NEXT: movl %edx, 8(%esi)
-; X86-NEXT: movl %ecx, 4(%esi)
+; X86-NEXT: movl %edi, 8(%esi)
+; X86-NEXT: movl %edx, 12(%esi)
; X86-NEXT: movl %eax, (%esi)
+; X86-NEXT: movl %ecx, 4(%esi)
; X86-NEXT: movl %esi, %eax
; X86-NEXT: addl $20, %esp
; X86-NEXT: popl %esi
@@ -535,10 +535,10 @@ define i128 @fptoui_i128(fp128 %x) nounwind strictfp {
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
-; X86-NEXT: movl %edi, 12(%esi)
-; X86-NEXT: movl %edx, 8(%esi)
-; X86-NEXT: movl %ecx, 4(%esi)
+; X86-NEXT: movl %edi, 8(%esi)
+; X86-NEXT: movl %edx, 12(%esi)
; X86-NEXT: movl %eax, (%esi)
+; X86-NEXT: movl %ecx, 4(%esi)
; X86-NEXT: movl %esi, %eax
; X86-NEXT: addl $20, %esp
; X86-NEXT: popl %esi
diff --git a/llvm/test/CodeGen/X86/fp128-libcalls-strict.ll b/llvm/test/CodeGen/X86/fp128-libcalls-strict.ll
index 482ae36820a..b705c760287 100644
--- a/llvm/test/CodeGen/X86/fp128-libcalls-strict.ll
+++ b/llvm/test/CodeGen/X86/fp128-libcalls-strict.ll
@@ -42,10 +42,10 @@ define fp128 @add(fp128 %x, fp128 %y) nounwind strictfp {
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
-; X86-NEXT: movl %edi, 12(%esi)
-; X86-NEXT: movl %edx, 8(%esi)
-; X86-NEXT: movl %ecx, 4(%esi)
+; X86-NEXT: movl %edi, 8(%esi)
+; X86-NEXT: movl %edx, 12(%esi)
; X86-NEXT: movl %eax, (%esi)
+; X86-NEXT: movl %ecx, 4(%esi)
; X86-NEXT: movl %esi, %eax
; X86-NEXT: addl $20, %esp
; X86-NEXT: popl %esi
@@ -87,10 +87,10 @@ define fp128 @sub(fp128 %x, fp128 %y) nounwind strictfp {
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
-; X86-NEXT: movl %edi, 12(%esi)
-; X86-NEXT: movl %edx, 8(%esi)
-; X86-NEXT: movl %ecx, 4(%esi)
+; X86-NEXT: movl %edi, 8(%esi)
+; X86-NEXT: movl %edx, 12(%esi)
; X86-NEXT: movl %eax, (%esi)
+; X86-NEXT: movl %ecx, 4(%esi)
; X86-NEXT: movl %esi, %eax
; X86-NEXT: addl $20, %esp
; X86-NEXT: popl %esi
@@ -132,10 +132,10 @@ define fp128 @mul(fp128 %x, fp128 %y) nounwind strictfp {
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
-; X86-NEXT: movl %edi, 12(%esi)
-; X86-NEXT: movl %edx, 8(%esi)
-; X86-NEXT: movl %ecx, 4(%esi)
+; X86-NEXT: movl %edi, 8(%esi)
+; X86-NEXT: movl %edx, 12(%esi)
; X86-NEXT: movl %eax, (%esi)
+; X86-NEXT: movl %ecx, 4(%esi)
; X86-NEXT: movl %esi, %eax
; X86-NEXT: addl $20, %esp
; X86-NEXT: popl %esi
@@ -177,10 +177,10 @@ define fp128 @div(fp128 %x, fp128 %y) nounwind strictfp {
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
-; X86-NEXT: movl %edi, 12(%esi)
-; X86-NEXT: movl %edx, 8(%esi)
-; X86-NEXT: movl %ecx, 4(%esi)
+; X86-NEXT: movl %edi, 8(%esi)
+; X86-NEXT: movl %edx, 12(%esi)
; X86-NEXT: movl %eax, (%esi)
+; X86-NEXT: movl %ecx, 4(%esi)
; X86-NEXT: movl %esi, %eax
; X86-NEXT: addl $20, %esp
; X86-NEXT: popl %esi
@@ -226,10 +226,10 @@ define fp128 @fma(fp128 %x, fp128 %y, fp128 %z) nounwind strictfp {
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
-; X86-NEXT: movl %edi, 12(%esi)
-; X86-NEXT: movl %edx, 8(%esi)
-; X86-NEXT: movl %ecx, 4(%esi)
+; X86-NEXT: movl %edi, 8(%esi)
+; X86-NEXT: movl %edx, 12(%esi)
; X86-NEXT: movl %eax, (%esi)
+; X86-NEXT: movl %ecx, 4(%esi)
; X86-NEXT: movl %esi, %eax
; X86-NEXT: addl $20, %esp
; X86-NEXT: popl %esi
@@ -271,10 +271,10 @@ define fp128 @frem(fp128 %x, fp128 %y) nounwind strictfp {
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
-; X86-NEXT: movl %edi, 12(%esi)
-; X86-NEXT: movl %edx, 8(%esi)
-; X86-NEXT: movl %ecx, 4(%esi)
+; X86-NEXT: movl %edi, 8(%esi)
+; X86-NEXT: movl %edx, 12(%esi)
; X86-NEXT: movl %eax, (%esi)
+; X86-NEXT: movl %ecx, 4(%esi)
; X86-NEXT: movl %esi, %eax
; X86-NEXT: addl $20, %esp
; X86-NEXT: popl %esi
@@ -312,10 +312,10 @@ define fp128 @ceil(fp128 %x) nounwind strictfp {
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
-; X86-NEXT: movl %edi, 12(%esi)
-; X86-NEXT: movl %edx, 8(%esi)
-; X86-NEXT: movl %ecx, 4(%esi)
+; X86-NEXT: movl %edi, 8(%esi)
+; X86-NEXT: movl %edx, 12(%esi)
; X86-NEXT: movl %eax, (%esi)
+; X86-NEXT: movl %ecx, 4(%esi)
; X86-NEXT: movl %esi, %eax
; X86-NEXT: addl $20, %esp
; X86-NEXT: popl %esi
@@ -353,10 +353,10 @@ define fp128 @cos(fp128 %x) nounwind strictfp {
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
-; X86-NEXT: movl %edi, 12(%esi)
-; X86-NEXT: movl %edx, 8(%esi)
-; X86-NEXT: movl %ecx, 4(%esi)
+; X86-NEXT: movl %edi, 8(%esi)
+; X86-NEXT: movl %edx, 12(%esi)
; X86-NEXT: movl %eax, (%esi)
+; X86-NEXT: movl %ecx, 4(%esi)
; X86-NEXT: movl %esi, %eax
; X86-NEXT: addl $20, %esp
; X86-NEXT: popl %esi
@@ -394,10 +394,10 @@ define fp128 @exp(fp128 %x) nounwind strictfp {
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
-; X86-NEXT: movl %edi, 12(%esi)
-; X86-NEXT: movl %edx, 8(%esi)
-; X86-NEXT: movl %ecx, 4(%esi)
+; X86-NEXT: movl %edi, 8(%esi)
+; X86-NEXT: movl %edx, 12(%esi)
; X86-NEXT: movl %eax, (%esi)
+; X86-NEXT: movl %ecx, 4(%esi)
; X86-NEXT: movl %esi, %eax
; X86-NEXT: addl $20, %esp
; X86-NEXT: popl %esi
@@ -435,10 +435,10 @@ define fp128 @exp2(fp128 %x) nounwind strictfp {
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
-; X86-NEXT: movl %edi, 12(%esi)
-; X86-NEXT: movl %edx, 8(%esi)
-; X86-NEXT: movl %ecx, 4(%esi)
+; X86-NEXT: movl %edi, 8(%esi)
+; X86-NEXT: movl %edx, 12(%esi)
; X86-NEXT: movl %eax, (%esi)
+; X86-NEXT: movl %ecx, 4(%esi)
; X86-NEXT: movl %esi, %eax
; X86-NEXT: addl $20, %esp
; X86-NEXT: popl %esi
@@ -476,10 +476,10 @@ define fp128 @floor(fp128 %x) nounwind strictfp {
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
-; X86-NEXT: movl %edi, 12(%esi)
-; X86-NEXT: movl %edx, 8(%esi)
-; X86-NEXT: movl %ecx, 4(%esi)
+; X86-NEXT: movl %edi, 8(%esi)
+; X86-NEXT: movl %edx, 12(%esi)
; X86-NEXT: movl %eax, (%esi)
+; X86-NEXT: movl %ecx, 4(%esi)
; X86-NEXT: movl %esi, %eax
; X86-NEXT: addl $20, %esp
; X86-NEXT: popl %esi
@@ -517,10 +517,10 @@ define fp128 @log(fp128 %x) nounwind strictfp {
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
-; X86-NEXT: movl %edi, 12(%esi)
-; X86-NEXT: movl %edx, 8(%esi)
-; X86-NEXT: movl %ecx, 4(%esi)
+; X86-NEXT: movl %edi, 8(%esi)
+; X86-NEXT: movl %edx, 12(%esi)
; X86-NEXT: movl %eax, (%esi)
+; X86-NEXT: movl %ecx, 4(%esi)
; X86-NEXT: movl %esi, %eax
; X86-NEXT: addl $20, %esp
; X86-NEXT: popl %esi
@@ -558,10 +558,10 @@ define fp128 @log10(fp128 %x) nounwind strictfp {
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
-; X86-NEXT: movl %edi, 12(%esi)
-; X86-NEXT: movl %edx, 8(%esi)
-; X86-NEXT: movl %ecx, 4(%esi)
+; X86-NEXT: movl %edi, 8(%esi)
+; X86-NEXT: movl %edx, 12(%esi)
; X86-NEXT: movl %eax, (%esi)
+; X86-NEXT: movl %ecx, 4(%esi)
; X86-NEXT: movl %esi, %eax
; X86-NEXT: addl $20, %esp
; X86-NEXT: popl %esi
@@ -599,10 +599,10 @@ define fp128 @log2(fp128 %x) nounwind strictfp {
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
-; X86-NEXT: movl %edi, 12(%esi)
-; X86-NEXT: movl %edx, 8(%esi)
-; X86-NEXT: movl %ecx, 4(%esi)
+; X86-NEXT: movl %edi, 8(%esi)
+; X86-NEXT: movl %edx, 12(%esi)
; X86-NEXT: movl %eax, (%esi)
+; X86-NEXT: movl %ecx, 4(%esi)
; X86-NEXT: movl %esi, %eax
; X86-NEXT: addl $20, %esp
; X86-NEXT: popl %esi
@@ -644,10 +644,10 @@ define fp128 @maxnum(fp128 %x, fp128 %y) nounwind strictfp {
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
-; X86-NEXT: movl %edi, 12(%esi)
-; X86-NEXT: movl %edx, 8(%esi)
-; X86-NEXT: movl %ecx, 4(%esi)
+; X86-NEXT: movl %edi, 8(%esi)
+; X86-NEXT: movl %edx, 12(%esi)
; X86-NEXT: movl %eax, (%esi)
+; X86-NEXT: movl %ecx, 4(%esi)
; X86-NEXT: movl %esi, %eax
; X86-NEXT: addl $20, %esp
; X86-NEXT: popl %esi
@@ -689,10 +689,10 @@ define fp128 @minnum(fp128 %x, fp128 %y) nounwind strictfp {
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
-; X86-NEXT: movl %edi, 12(%esi)
-; X86-NEXT: movl %edx, 8(%esi)
-; X86-NEXT: movl %ecx, 4(%esi)
+; X86-NEXT: movl %edi, 8(%esi)
+; X86-NEXT: movl %edx, 12(%esi)
; X86-NEXT: movl %eax, (%esi)
+; X86-NEXT: movl %ecx, 4(%esi)
; X86-NEXT: movl %esi, %eax
; X86-NEXT: addl $20, %esp
; X86-NEXT: popl %esi
@@ -730,10 +730,10 @@ define fp128 @nearbyint(fp128 %x) nounwind strictfp {
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
-; X86-NEXT: movl %edi, 12(%esi)
-; X86-NEXT: movl %edx, 8(%esi)
-; X86-NEXT: movl %ecx, 4(%esi)
+; X86-NEXT: movl %edi, 8(%esi)
+; X86-NEXT: movl %edx, 12(%esi)
; X86-NEXT: movl %eax, (%esi)
+; X86-NEXT: movl %ecx, 4(%esi)
; X86-NEXT: movl %esi, %eax
; X86-NEXT: addl $20, %esp
; X86-NEXT: popl %esi
@@ -775,10 +775,10 @@ define fp128 @pow(fp128 %x, fp128 %y) nounwind strictfp {
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
-; X86-NEXT: movl %edi, 12(%esi)
-; X86-NEXT: movl %edx, 8(%esi)
-; X86-NEXT: movl %ecx, 4(%esi)
+; X86-NEXT: movl %edi, 8(%esi)
+; X86-NEXT: movl %edx, 12(%esi)
; X86-NEXT: movl %eax, (%esi)
+; X86-NEXT: movl %ecx, 4(%esi)
; X86-NEXT: movl %esi, %eax
; X86-NEXT: addl $20, %esp
; X86-NEXT: popl %esi
@@ -817,10 +817,10 @@ define fp128 @powi(fp128 %x, i32 %y) nounwind strictfp {
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
-; X86-NEXT: movl %edi, 12(%esi)
-; X86-NEXT: movl %edx, 8(%esi)
-; X86-NEXT: movl %ecx, 4(%esi)
+; X86-NEXT: movl %edi, 8(%esi)
+; X86-NEXT: movl %edx, 12(%esi)
; X86-NEXT: movl %eax, (%esi)
+; X86-NEXT: movl %ecx, 4(%esi)
; X86-NEXT: movl %esi, %eax
; X86-NEXT: addl $20, %esp
; X86-NEXT: popl %esi
@@ -858,10 +858,10 @@ define fp128 @rint(fp128 %x) nounwind strictfp {
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
-; X86-NEXT: movl %edi, 12(%esi)
-; X86-NEXT: movl %edx, 8(%esi)
-; X86-NEXT: movl %ecx, 4(%esi)
+; X86-NEXT: movl %edi, 8(%esi)
+; X86-NEXT: movl %edx, 12(%esi)
; X86-NEXT: movl %eax, (%esi)
+; X86-NEXT: movl %ecx, 4(%esi)
; X86-NEXT: movl %esi, %eax
; X86-NEXT: addl $20, %esp
; X86-NEXT: popl %esi
@@ -899,10 +899,10 @@ define fp128 @round(fp128 %x) nounwind strictfp {
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
-; X86-NEXT: movl %edi, 12(%esi)
-; X86-NEXT: movl %edx, 8(%esi)
-; X86-NEXT: movl %ecx, 4(%esi)
+; X86-NEXT: movl %edi, 8(%esi)
+; X86-NEXT: movl %edx, 12(%esi)
; X86-NEXT: movl %eax, (%esi)
+; X86-NEXT: movl %ecx, 4(%esi)
; X86-NEXT: movl %esi, %eax
; X86-NEXT: addl $20, %esp
; X86-NEXT: popl %esi
@@ -940,10 +940,10 @@ define fp128 @sin(fp128 %x) nounwind strictfp {
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
-; X86-NEXT: movl %edi, 12(%esi)
-; X86-NEXT: movl %edx, 8(%esi)
-; X86-NEXT: movl %ecx, 4(%esi)
+; X86-NEXT: movl %edi, 8(%esi)
+; X86-NEXT: movl %edx, 12(%esi)
; X86-NEXT: movl %eax, (%esi)
+; X86-NEXT: movl %ecx, 4(%esi)
; X86-NEXT: movl %esi, %eax
; X86-NEXT: addl $20, %esp
; X86-NEXT: popl %esi
@@ -981,10 +981,10 @@ define fp128 @sqrt(fp128 %x) nounwind strictfp {
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
-; X86-NEXT: movl %edi, 12(%esi)
-; X86-NEXT: movl %edx, 8(%esi)
-; X86-NEXT: movl %ecx, 4(%esi)
+; X86-NEXT: movl %edi, 8(%esi)
+; X86-NEXT: movl %edx, 12(%esi)
; X86-NEXT: movl %eax, (%esi)
+; X86-NEXT: movl %ecx, 4(%esi)
; X86-NEXT: movl %esi, %eax
; X86-NEXT: addl $20, %esp
; X86-NEXT: popl %esi
@@ -1022,10 +1022,10 @@ define fp128 @trunc(fp128 %x) nounwind strictfp {
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
-; X86-NEXT: movl %edi, 12(%esi)
-; X86-NEXT: movl %edx, 8(%esi)
-; X86-NEXT: movl %ecx, 4(%esi)
+; X86-NEXT: movl %edi, 8(%esi)
+; X86-NEXT: movl %edx, 12(%esi)
; X86-NEXT: movl %eax, (%esi)
+; X86-NEXT: movl %ecx, 4(%esi)
; X86-NEXT: movl %esi, %eax
; X86-NEXT: addl $20, %esp
; X86-NEXT: popl %esi
diff --git a/llvm/test/CodeGen/X86/vector-constrained-fp-intrinsics-flags.ll b/llvm/test/CodeGen/X86/vector-constrained-fp-intrinsics-flags.ll
index 6b9d84be407..b1ef02c855a 100644
--- a/llvm/test/CodeGen/X86/vector-constrained-fp-intrinsics-flags.ll
+++ b/llvm/test/CodeGen/X86/vector-constrained-fp-intrinsics-flags.ll
@@ -40,8 +40,8 @@ define <4 x double> @constrained_vector_fadd_v4f64() #0 {
; CHECK: [[MOVAPDrm:%[0-9]+]]:vr128 = MOVAPDrm $rip, 1, $noreg, %const.0, $noreg :: (load 16 from constant-pool)
; CHECK: [[ADDPDrm:%[0-9]+]]:vr128 = ADDPDrm [[MOVAPDrm]], $rip, 1, $noreg, %const.1, $noreg, implicit $mxcsr :: (load 16 from constant-pool)
; CHECK: [[ADDPDrm1:%[0-9]+]]:vr128 = ADDPDrm [[MOVAPDrm]], $rip, 1, $noreg, %const.2, $noreg, implicit $mxcsr :: (load 16 from constant-pool)
-; CHECK: $xmm0 = COPY [[ADDPDrm]]
-; CHECK: $xmm1 = COPY [[ADDPDrm1]]
+; CHECK: $xmm0 = COPY [[ADDPDrm1]]
+; CHECK: $xmm1 = COPY [[ADDPDrm]]
; CHECK: RET 0, $xmm0, $xmm1
entry:
%add = call <4 x double> @llvm.experimental.constrained.fadd.v4f64(
diff --git a/llvm/test/CodeGen/X86/vector-constrained-fp-intrinsics.ll b/llvm/test/CodeGen/X86/vector-constrained-fp-intrinsics.ll
index 434690c2359..6b460c64841 100644
--- a/llvm/test/CodeGen/X86/vector-constrained-fp-intrinsics.ll
+++ b/llvm/test/CodeGen/X86/vector-constrained-fp-intrinsics.ll
@@ -115,10 +115,10 @@ define <4 x double> @constrained_vector_fdiv_v4f64() #0 {
; CHECK-LABEL: constrained_vector_fdiv_v4f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movapd {{.*#+}} xmm2 = [1.0E+1,1.0E+1]
-; CHECK-NEXT: movapd {{.*#+}} xmm0 = [1.0E+0,2.0E+0]
-; CHECK-NEXT: divpd %xmm2, %xmm0
; CHECK-NEXT: movapd {{.*#+}} xmm1 = [3.0E+0,4.0E+0]
; CHECK-NEXT: divpd %xmm2, %xmm1
+; CHECK-NEXT: movapd {{.*#+}} xmm0 = [1.0E+0,2.0E+0]
+; CHECK-NEXT: divpd %xmm2, %xmm0
; CHECK-NEXT: retq
;
; AVX1-LABEL: constrained_vector_fdiv_v4f64:
@@ -292,9 +292,9 @@ define <3 x double> @constrained_vector_frem_v3f64() #0 {
; CHECK-NEXT: callq fmod
; CHECK-NEXT: movsd %xmm0, {{[0-9]+}}(%rsp)
; CHECK-NEXT: fldl {{[0-9]+}}(%rsp)
-; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload
+; CHECK-NEXT: movsd (%rsp), %xmm0 # 8-byte Reload
; CHECK-NEXT: # xmm0 = mem[0],zero
-; CHECK-NEXT: movsd (%rsp), %xmm1 # 8-byte Reload
+; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 8-byte Reload
; CHECK-NEXT: # xmm1 = mem[0],zero
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
@@ -507,10 +507,10 @@ entry:
define <4 x double> @constrained_vector_fmul_v4f64() #0 {
; CHECK-LABEL: constrained_vector_fmul_v4f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: movapd {{.*#+}} xmm1 = [1.7976931348623157E+308,1.7976931348623157E+308]
-; CHECK-NEXT: movapd {{.*#+}} xmm0 = [2.0E+0,3.0E+0]
-; CHECK-NEXT: mulpd %xmm1, %xmm0
-; CHECK-NEXT: mulpd {{.*}}(%rip), %xmm1
+; CHECK-NEXT: movapd {{.*#+}} xmm0 = [1.7976931348623157E+308,1.7976931348623157E+308]
+; CHECK-NEXT: movapd {{.*#+}} xmm1 = [4.0E+0,5.0E+0]
+; CHECK-NEXT: mulpd %xmm0, %xmm1
+; CHECK-NEXT: mulpd {{.*}}(%rip), %xmm0
; CHECK-NEXT: retq
;
; AVX1-LABEL: constrained_vector_fmul_v4f64:
@@ -644,10 +644,10 @@ entry:
define <4 x double> @constrained_vector_fadd_v4f64() #0 {
; CHECK-LABEL: constrained_vector_fadd_v4f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: movapd {{.*#+}} xmm1 = [1.7976931348623157E+308,1.7976931348623157E+308]
-; CHECK-NEXT: movapd {{.*#+}} xmm0 = [1.0E+0,1.0000000000000001E-1]
-; CHECK-NEXT: addpd %xmm1, %xmm0
-; CHECK-NEXT: addpd {{.*}}(%rip), %xmm1
+; CHECK-NEXT: movapd {{.*#+}} xmm0 = [1.7976931348623157E+308,1.7976931348623157E+308]
+; CHECK-NEXT: movapd {{.*#+}} xmm1 = [2.0E+0,2.0000000000000001E-1]
+; CHECK-NEXT: addpd %xmm0, %xmm1
+; CHECK-NEXT: addpd {{.*}}(%rip), %xmm0
; CHECK-NEXT: retq
;
; AVX1-LABEL: constrained_vector_fadd_v4f64:
@@ -784,10 +784,10 @@ entry:
define <4 x double> @constrained_vector_fsub_v4f64() #0 {
; CHECK-LABEL: constrained_vector_fsub_v4f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: movapd {{.*#+}} xmm1 = [-1.7976931348623157E+308,-1.7976931348623157E+308]
-; CHECK-NEXT: movapd %xmm1, %xmm0
-; CHECK-NEXT: subpd {{.*}}(%rip), %xmm0
+; CHECK-NEXT: movapd {{.*#+}} xmm0 = [-1.7976931348623157E+308,-1.7976931348623157E+308]
+; CHECK-NEXT: movapd %xmm0, %xmm1
; CHECK-NEXT: subpd {{.*}}(%rip), %xmm1
+; CHECK-NEXT: subpd {{.*}}(%rip), %xmm0
; CHECK-NEXT: retq
;
; AVX1-LABEL: constrained_vector_fsub_v4f64:
@@ -912,8 +912,8 @@ entry:
define <4 x double> @constrained_vector_sqrt_v4f64() #0 {
; CHECK-LABEL: constrained_vector_sqrt_v4f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: sqrtpd {{.*}}(%rip), %xmm0
; CHECK-NEXT: sqrtpd {{.*}}(%rip), %xmm1
+; CHECK-NEXT: sqrtpd {{.*}}(%rip), %xmm0
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_sqrt_v4f64:
@@ -1077,9 +1077,9 @@ define <3 x double> @constrained_vector_pow_v3f64() #0 {
; CHECK-NEXT: callq pow
; CHECK-NEXT: movsd %xmm0, {{[0-9]+}}(%rsp)
; CHECK-NEXT: fldl {{[0-9]+}}(%rsp)
-; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload
+; CHECK-NEXT: movsd (%rsp), %xmm0 # 8-byte Reload
; CHECK-NEXT: # xmm0 = mem[0],zero
-; CHECK-NEXT: movsd (%rsp), %xmm1 # 8-byte Reload
+; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 8-byte Reload
; CHECK-NEXT: # xmm1 = mem[0],zero
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
@@ -1333,9 +1333,9 @@ define <3 x double> @constrained_vector_powi_v3f64() #0 {
; CHECK-NEXT: callq __powidf2
; CHECK-NEXT: movsd %xmm0, {{[0-9]+}}(%rsp)
; CHECK-NEXT: fldl {{[0-9]+}}(%rsp)
-; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload
+; CHECK-NEXT: movsd (%rsp), %xmm0 # 8-byte Reload
; CHECK-NEXT: # xmm0 = mem[0],zero
-; CHECK-NEXT: movsd (%rsp), %xmm1 # 8-byte Reload
+; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 8-byte Reload
; CHECK-NEXT: # xmm1 = mem[0],zero
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
@@ -1570,9 +1570,9 @@ define <3 x double> @constrained_vector_sin_v3f64() #0 {
; CHECK-NEXT: callq sin
; CHECK-NEXT: movsd %xmm0, {{[0-9]+}}(%rsp)
; CHECK-NEXT: fldl {{[0-9]+}}(%rsp)
-; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload
+; CHECK-NEXT: movsd (%rsp), %xmm0 # 8-byte Reload
; CHECK-NEXT: # xmm0 = mem[0],zero
-; CHECK-NEXT: movsd (%rsp), %xmm1 # 8-byte Reload
+; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 8-byte Reload
; CHECK-NEXT: # xmm1 = mem[0],zero
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
@@ -1794,9 +1794,9 @@ define <3 x double> @constrained_vector_cos_v3f64() #0 {
; CHECK-NEXT: callq cos
; CHECK-NEXT: movsd %xmm0, {{[0-9]+}}(%rsp)
; CHECK-NEXT: fldl {{[0-9]+}}(%rsp)
-; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload
+; CHECK-NEXT: movsd (%rsp), %xmm0 # 8-byte Reload
; CHECK-NEXT: # xmm0 = mem[0],zero
-; CHECK-NEXT: movsd (%rsp), %xmm1 # 8-byte Reload
+; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 8-byte Reload
; CHECK-NEXT: # xmm1 = mem[0],zero
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
@@ -2018,9 +2018,9 @@ define <3 x double> @constrained_vector_exp_v3f64() #0 {
; CHECK-NEXT: callq exp
; CHECK-NEXT: movsd %xmm0, {{[0-9]+}}(%rsp)
; CHECK-NEXT: fldl {{[0-9]+}}(%rsp)
-; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload
+; CHECK-NEXT: movsd (%rsp), %xmm0 # 8-byte Reload
; CHECK-NEXT: # xmm0 = mem[0],zero
-; CHECK-NEXT: movsd (%rsp), %xmm1 # 8-byte Reload
+; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 8-byte Reload
; CHECK-NEXT: # xmm1 = mem[0],zero
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
@@ -2242,9 +2242,9 @@ define <3 x double> @constrained_vector_exp2_v3f64() #0 {
; CHECK-NEXT: callq exp2
; CHECK-NEXT: movsd %xmm0, {{[0-9]+}}(%rsp)
; CHECK-NEXT: fldl {{[0-9]+}}(%rsp)
-; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload
+; CHECK-NEXT: movsd (%rsp), %xmm0 # 8-byte Reload
; CHECK-NEXT: # xmm0 = mem[0],zero
-; CHECK-NEXT: movsd (%rsp), %xmm1 # 8-byte Reload
+; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 8-byte Reload
; CHECK-NEXT: # xmm1 = mem[0],zero
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
@@ -2466,9 +2466,9 @@ define <3 x double> @constrained_vector_log_v3f64() #0 {
; CHECK-NEXT: callq log
; CHECK-NEXT: movsd %xmm0, {{[0-9]+}}(%rsp)
; CHECK-NEXT: fldl {{[0-9]+}}(%rsp)
-; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload
+; CHECK-NEXT: movsd (%rsp), %xmm0 # 8-byte Reload
; CHECK-NEXT: # xmm0 = mem[0],zero
-; CHECK-NEXT: movsd (%rsp), %xmm1 # 8-byte Reload
+; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 8-byte Reload
; CHECK-NEXT: # xmm1 = mem[0],zero
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
@@ -2690,9 +2690,9 @@ define <3 x double> @constrained_vector_log10_v3f64() #0 {
; CHECK-NEXT: callq log10
; CHECK-NEXT: movsd %xmm0, {{[0-9]+}}(%rsp)
; CHECK-NEXT: fldl {{[0-9]+}}(%rsp)
-; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload
+; CHECK-NEXT: movsd (%rsp), %xmm0 # 8-byte Reload
; CHECK-NEXT: # xmm0 = mem[0],zero
-; CHECK-NEXT: movsd (%rsp), %xmm1 # 8-byte Reload
+; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 8-byte Reload
; CHECK-NEXT: # xmm1 = mem[0],zero
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
@@ -2914,9 +2914,9 @@ define <3 x double> @constrained_vector_log2_v3f64() #0 {
; CHECK-NEXT: callq log2
; CHECK-NEXT: movsd %xmm0, {{[0-9]+}}(%rsp)
; CHECK-NEXT: fldl {{[0-9]+}}(%rsp)
-; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload
+; CHECK-NEXT: movsd (%rsp), %xmm0 # 8-byte Reload
; CHECK-NEXT: # xmm0 = mem[0],zero
-; CHECK-NEXT: movsd (%rsp), %xmm1 # 8-byte Reload
+; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 8-byte Reload
; CHECK-NEXT: # xmm1 = mem[0],zero
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
@@ -3116,9 +3116,9 @@ define <3 x double> @constrained_vector_rint_v3f64() #0 {
; CHECK-NEXT: callq rint
; CHECK-NEXT: movsd %xmm0, {{[0-9]+}}(%rsp)
; CHECK-NEXT: fldl {{[0-9]+}}(%rsp)
-; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload
+; CHECK-NEXT: movsd (%rsp), %xmm0 # 8-byte Reload
; CHECK-NEXT: # xmm0 = mem[0],zero
-; CHECK-NEXT: movsd (%rsp), %xmm1 # 8-byte Reload
+; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 8-byte Reload
; CHECK-NEXT: # xmm1 = mem[0],zero
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
@@ -3286,9 +3286,9 @@ define <3 x double> @constrained_vector_nearby_v3f64() #0 {
; CHECK-NEXT: callq nearbyint
; CHECK-NEXT: movsd %xmm0, {{[0-9]+}}(%rsp)
; CHECK-NEXT: fldl {{[0-9]+}}(%rsp)
-; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload
+; CHECK-NEXT: movsd (%rsp), %xmm0 # 8-byte Reload
; CHECK-NEXT: # xmm0 = mem[0],zero
-; CHECK-NEXT: movsd (%rsp), %xmm1 # 8-byte Reload
+; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 8-byte Reload
; CHECK-NEXT: # xmm1 = mem[0],zero
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
@@ -3492,9 +3492,9 @@ define <3 x double> @constrained_vector_max_v3f64() #0 {
; CHECK-NEXT: callq fmax
; CHECK-NEXT: movsd %xmm0, {{[0-9]+}}(%rsp)
; CHECK-NEXT: fldl {{[0-9]+}}(%rsp)
-; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload
+; CHECK-NEXT: movsd (%rsp), %xmm0 # 8-byte Reload
; CHECK-NEXT: # xmm0 = mem[0],zero
-; CHECK-NEXT: movsd (%rsp), %xmm1 # 8-byte Reload
+; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 8-byte Reload
; CHECK-NEXT: # xmm1 = mem[0],zero
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
@@ -3742,9 +3742,9 @@ define <3 x double> @constrained_vector_min_v3f64() #0 {
; CHECK-NEXT: callq fmin
; CHECK-NEXT: movsd %xmm0, {{[0-9]+}}(%rsp)
; CHECK-NEXT: fldl {{[0-9]+}}(%rsp)
-; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload
+; CHECK-NEXT: movsd (%rsp), %xmm0 # 8-byte Reload
; CHECK-NEXT: # xmm0 = mem[0],zero
-; CHECK-NEXT: movsd (%rsp), %xmm1 # 8-byte Reload
+; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 8-byte Reload
; CHECK-NEXT: # xmm1 = mem[0],zero
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
@@ -3975,9 +3975,9 @@ entry:
define <3 x i64> @constrained_vector_fptosi_v3i64_v3f32() #0 {
; CHECK-LABEL: constrained_vector_fptosi_v3i64_v3f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: cvttss2si {{.*}}(%rip), %rax
-; CHECK-NEXT: cvttss2si {{.*}}(%rip), %rdx
; CHECK-NEXT: cvttss2si {{.*}}(%rip), %rcx
+; CHECK-NEXT: cvttss2si {{.*}}(%rip), %rdx
+; CHECK-NEXT: cvttss2si {{.*}}(%rip), %rax
; CHECK-NEXT: retq
;
; AVX1-LABEL: constrained_vector_fptosi_v3i64_v3f32:
@@ -4217,9 +4217,9 @@ entry:
define <3 x i64> @constrained_vector_fptosi_v3i64_v3f64() #0 {
; CHECK-LABEL: constrained_vector_fptosi_v3i64_v3f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %rax
-; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %rdx
; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %rcx
+; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %rdx
+; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %rax
; CHECK-NEXT: retq
;
; AVX1-LABEL: constrained_vector_fptosi_v3i64_v3f64:
@@ -5542,9 +5542,9 @@ define <3 x double> @constrained_vector_fpext_v3f32() #0 {
; CHECK-LABEL: constrained_vector_fpext_v3f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: cvtss2sd %xmm0, %xmm1
+; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: cvtss2sd %xmm0, %xmm0
-; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; CHECK-NEXT: cvtss2sd %xmm1, %xmm1
; CHECK-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
; CHECK-NEXT: cvtss2sd %xmm2, %xmm2
; CHECK-NEXT: movsd %xmm2, -{{[0-9]+}}(%rsp)
@@ -5573,8 +5573,8 @@ entry:
define <4 x double> @constrained_vector_fpext_v4f32() #0 {
; CHECK-LABEL: constrained_vector_fpext_v4f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: cvtps2pd {{.*}}(%rip), %xmm0
; CHECK-NEXT: cvtps2pd {{.*}}(%rip), %xmm1
+; CHECK-NEXT: cvtps2pd {{.*}}(%rip), %xmm0
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_fpext_v4f32:
@@ -5694,9 +5694,9 @@ define <3 x double> @constrained_vector_ceil_v3f64() #0 {
; CHECK-NEXT: callq ceil
; CHECK-NEXT: movsd %xmm0, {{[0-9]+}}(%rsp)
; CHECK-NEXT: fldl {{[0-9]+}}(%rsp)
-; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload
+; CHECK-NEXT: movsd (%rsp), %xmm0 # 8-byte Reload
; CHECK-NEXT: # xmm0 = mem[0],zero
-; CHECK-NEXT: movsd (%rsp), %xmm1 # 8-byte Reload
+; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 8-byte Reload
; CHECK-NEXT: # xmm1 = mem[0],zero
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
@@ -5822,9 +5822,9 @@ define <3 x double> @constrained_vector_floor_v3f64() #0 {
; CHECK-NEXT: callq floor
; CHECK-NEXT: movsd %xmm0, {{[0-9]+}}(%rsp)
; CHECK-NEXT: fldl {{[0-9]+}}(%rsp)
-; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload
+; CHECK-NEXT: movsd (%rsp), %xmm0 # 8-byte Reload
; CHECK-NEXT: # xmm0 = mem[0],zero
-; CHECK-NEXT: movsd (%rsp), %xmm1 # 8-byte Reload
+; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 8-byte Reload
; CHECK-NEXT: # xmm1 = mem[0],zero
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
@@ -5972,9 +5972,9 @@ define <3 x double> @constrained_vector_round_v3f64() #0 {
; CHECK-NEXT: callq round
; CHECK-NEXT: movsd %xmm0, {{[0-9]+}}(%rsp)
; CHECK-NEXT: fldl {{[0-9]+}}(%rsp)
-; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload
+; CHECK-NEXT: movsd (%rsp), %xmm0 # 8-byte Reload
; CHECK-NEXT: # xmm0 = mem[0],zero
-; CHECK-NEXT: movsd (%rsp), %xmm1 # 8-byte Reload
+; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 8-byte Reload
; CHECK-NEXT: # xmm1 = mem[0],zero
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
@@ -6112,9 +6112,9 @@ define <3 x double> @constrained_vector_trunc_v3f64() #0 {
; CHECK-NEXT: callq trunc
; CHECK-NEXT: movsd %xmm0, {{[0-9]+}}(%rsp)
; CHECK-NEXT: fldl {{[0-9]+}}(%rsp)
-; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload
+; CHECK-NEXT: movsd (%rsp), %xmm0 # 8-byte Reload
; CHECK-NEXT: # xmm0 = mem[0],zero
-; CHECK-NEXT: movsd (%rsp), %xmm1 # 8-byte Reload
+; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 8-byte Reload
; CHECK-NEXT: # xmm1 = mem[0],zero
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
@@ -6396,8 +6396,8 @@ entry:
define <3 x double> @constrained_vector_sitofp_v3f64_v3i64(<3 x i64> %x) #0 {
; CHECK-LABEL: constrained_vector_sitofp_v3f64_v3i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: cvtsi2sd %rdi, %xmm0
; CHECK-NEXT: cvtsi2sd %rsi, %xmm1
+; CHECK-NEXT: cvtsi2sd %rdi, %xmm0
; CHECK-NEXT: cvtsi2sd %rdx, %xmm2
; CHECK-NEXT: movsd %xmm2, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: fldl -{{[0-9]+}}(%rsp)
@@ -7255,15 +7255,15 @@ entry:
define <4 x double> @constrained_vector_uitofp_v4f64_v4i32(<4 x i32> %x) #0 {
; CHECK-LABEL: constrained_vector_uitofp_v4f64_v4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: movapd %xmm0, %xmm1
; CHECK-NEXT: xorpd %xmm2, %xmm2
-; CHECK-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
-; CHECK-NEXT: movapd {{.*#+}} xmm3 = [4.503599627370496E+15,4.503599627370496E+15]
-; CHECK-NEXT: orpd %xmm3, %xmm0
-; CHECK-NEXT: subpd %xmm3, %xmm0
+; CHECK-NEXT: movapd %xmm0, %xmm1
; CHECK-NEXT: unpckhps {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; CHECK-NEXT: movapd {{.*#+}} xmm3 = [4.503599627370496E+15,4.503599627370496E+15]
; CHECK-NEXT: orpd %xmm3, %xmm1
; CHECK-NEXT: subpd %xmm3, %xmm1
+; CHECK-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; CHECK-NEXT: orpd %xmm3, %xmm0
+; CHECK-NEXT: subpd %xmm3, %xmm0
; CHECK-NEXT: retq
;
; AVX1-LABEL: constrained_vector_uitofp_v4f64_v4i32:
@@ -7331,22 +7331,22 @@ define <4 x double> @constrained_vector_uitofp_v4f64_v4i64(<4 x i64> %x) #0 {
; CHECK-LABEL: constrained_vector_uitofp_v4f64_v4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movdqa {{.*#+}} xmm2 = [4294967295,4294967295]
-; CHECK-NEXT: movdqa %xmm0, %xmm3
+; CHECK-NEXT: movdqa %xmm1, %xmm3
; CHECK-NEXT: pand %xmm2, %xmm3
; CHECK-NEXT: movdqa {{.*#+}} xmm4 = [4841369599423283200,4841369599423283200]
; CHECK-NEXT: por %xmm4, %xmm3
-; CHECK-NEXT: psrlq $32, %xmm0
-; CHECK-NEXT: movdqa {{.*#+}} xmm5 = [4985484787499139072,4985484787499139072]
-; CHECK-NEXT: por %xmm5, %xmm0
-; CHECK-NEXT: movapd {{.*#+}} xmm6 = [1.9342813118337666E+25,1.9342813118337666E+25]
-; CHECK-NEXT: subpd %xmm6, %xmm0
-; CHECK-NEXT: addpd %xmm3, %xmm0
-; CHECK-NEXT: pand %xmm1, %xmm2
-; CHECK-NEXT: por %xmm4, %xmm2
; CHECK-NEXT: psrlq $32, %xmm1
+; CHECK-NEXT: movdqa {{.*#+}} xmm5 = [4985484787499139072,4985484787499139072]
; CHECK-NEXT: por %xmm5, %xmm1
+; CHECK-NEXT: movapd {{.*#+}} xmm6 = [1.9342813118337666E+25,1.9342813118337666E+25]
; CHECK-NEXT: subpd %xmm6, %xmm1
-; CHECK-NEXT: addpd %xmm2, %xmm1
+; CHECK-NEXT: addpd %xmm3, %xmm1
+; CHECK-NEXT: pand %xmm0, %xmm2
+; CHECK-NEXT: por %xmm4, %xmm2
+; CHECK-NEXT: psrlq $32, %xmm0
+; CHECK-NEXT: por %xmm5, %xmm0
+; CHECK-NEXT: subpd %xmm6, %xmm0
+; CHECK-NEXT: addpd %xmm2, %xmm0
; CHECK-NEXT: retq
;
; AVX1-LABEL: constrained_vector_uitofp_v4f64_v4i64:
OpenPOWER on IntegriCloud