summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorCraig Topper <craig.topper@intel.com>2018-09-14 18:27:09 +0000
committerCraig Topper <craig.topper@intel.com>2018-09-14 18:27:09 +0000
commitac356cac0c8a870e8f213df05bbb79714e3e9ac7 (patch)
treef149d87e323e4f89f208b374401e65cce5d0cf59
parent0039792d7bb53c5000063872d91f5f6bd174be5a (diff)
downloadbcm5719-llvm-ac356cac0c8a870e8f213df05bbb79714e3e9ac7.tar.gz
bcm5719-llvm-ac356cac0c8a870e8f213df05bbb79714e3e9ac7.zip
[X86] Re-generate test checks using current version of the script. NFC
The regular expression used for stack accesses is different today. llvm-svn: 342256
-rw-r--r--llvm/test/CodeGen/X86/avx512-regcall-NoMask.ll20
-rw-r--r--llvm/test/CodeGen/X86/pmul.ll20
2 files changed, 20 insertions, 20 deletions
diff --git a/llvm/test/CodeGen/X86/avx512-regcall-NoMask.ll b/llvm/test/CodeGen/X86/avx512-regcall-NoMask.ll
index ea1780b7c5d..9bdb19a7b57 100644
--- a/llvm/test/CodeGen/X86/avx512-regcall-NoMask.ll
+++ b/llvm/test/CodeGen/X86/avx512-regcall-NoMask.ll
@@ -871,7 +871,7 @@ define x86_regcallcc <32 x float> @testf32_inp(<32 x float> %a, <32 x float> %b,
; X32-LABEL: testf32_inp:
; X32: # %bb.0:
; X32-NEXT: subl $44, %esp
-; X32-NEXT: vmovups %xmm7, {{[0-9]+}}(%esp) # 16-byte Spill
+; X32-NEXT: vmovups %xmm7, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
; X32-NEXT: vmovups %xmm6, (%esp) # 16-byte Spill
; X32-NEXT: vaddps %zmm2, %zmm0, %zmm6
; X32-NEXT: vaddps %zmm3, %zmm1, %zmm7
@@ -882,7 +882,7 @@ define x86_regcallcc <32 x float> @testf32_inp(<32 x float> %a, <32 x float> %b,
; X32-NEXT: vaddps %zmm4, %zmm0, %zmm0
; X32-NEXT: vaddps %zmm5, %zmm1, %zmm1
; X32-NEXT: vmovups (%esp), %xmm6 # 16-byte Reload
-; X32-NEXT: vmovups {{[0-9]+}}(%esp), %xmm7 # 16-byte Reload
+; X32-NEXT: vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm7 # 16-byte Reload
; X32-NEXT: addl $44, %esp
; X32-NEXT: retl
;
@@ -923,14 +923,14 @@ define x86_regcallcc i32 @testi32_inp(i32 %a1, i32 %a2, i32 %a3, i32 %a4, i32 %a
; X32-NEXT: pushl %ebp
; X32-NEXT: pushl %ebx
; X32-NEXT: subl $20, %esp
-; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: movl %edi, %esi
-; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: movl %edx, %ebx
; X32-NEXT: movl %edx, (%esp) # 4-byte Spill
-; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: movl %eax, %edx
-; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: subl %ecx, %edx
; X32-NEXT: movl {{[0-9]+}}(%esp), %edi
; X32-NEXT: movl %edi, %ebp
@@ -942,7 +942,7 @@ define x86_regcallcc i32 @testi32_inp(i32 %a1, i32 %a2, i32 %a3, i32 %a4, i32 %a
; X32-NEXT: subl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: imull %ebx, %ecx
; X32-NEXT: addl %ecx, %edx
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
; X32-NEXT: movl %ebx, %ebp
; X32-NEXT: subl {{[0-9]+}}(%esp), %ebp
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
@@ -950,10 +950,10 @@ define x86_regcallcc i32 @testi32_inp(i32 %a1, i32 %a2, i32 %a3, i32 %a4, i32 %a
; X32-NEXT: subl {{[0-9]+}}(%esp), %eax
; X32-NEXT: imull %ebp, %eax
; X32-NEXT: addl %eax, %edx
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
-; X32-NEXT: addl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
; X32-NEXT: movl (%esp), %ebp # 4-byte Reload
-; X32-NEXT: addl {{[0-9]+}}(%esp), %ebp # 4-byte Folded Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
; X32-NEXT: addl {{[0-9]+}}(%esp), %ebx
; X32-NEXT: addl {{[0-9]+}}(%esp), %edi
; X32-NEXT: imull %eax, %edi
diff --git a/llvm/test/CodeGen/X86/pmul.ll b/llvm/test/CodeGen/X86/pmul.ll
index 0555f167119..36f06a64a65 100644
--- a/llvm/test/CodeGen/X86/pmul.ll
+++ b/llvm/test/CodeGen/X86/pmul.ll
@@ -283,12 +283,12 @@ define <4 x i32> @mul_v4i32spill(<4 x i32> %i, <4 x i32> %j) nounwind {
; SSE2-LABEL: mul_v4i32spill:
; SSE2: # %bb.0: # %entry
; SSE2-NEXT: subq $40, %rsp
-; SSE2-NEXT: movaps %xmm1, {{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE2-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
; SSE2-NEXT: callq foo
; SSE2-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm2 # 16-byte Reload
+; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
; SSE2-NEXT: pmuludq %xmm2, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
@@ -301,22 +301,22 @@ define <4 x i32> @mul_v4i32spill(<4 x i32> %i, <4 x i32> %j) nounwind {
; SSE41-LABEL: mul_v4i32spill:
; SSE41: # %bb.0: # %entry
; SSE41-NEXT: subq $40, %rsp
-; SSE41-NEXT: movaps %xmm1, {{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE41-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE41-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
; SSE41-NEXT: callq foo
; SSE41-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload
-; SSE41-NEXT: pmulld {{[0-9]+}}(%rsp), %xmm0 # 16-byte Folded Reload
+; SSE41-NEXT: pmulld {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; SSE41-NEXT: addq $40, %rsp
; SSE41-NEXT: retq
;
; AVX-LABEL: mul_v4i32spill:
; AVX: # %bb.0: # %entry
; AVX-NEXT: subq $40, %rsp
-; AVX-NEXT: vmovaps %xmm1, {{[0-9]+}}(%rsp) # 16-byte Spill
+; AVX-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; AVX-NEXT: callq foo
; AVX-NEXT: vmovdqa (%rsp), %xmm0 # 16-byte Reload
-; AVX-NEXT: vpmulld {{[0-9]+}}(%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX-NEXT: vpmulld {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT: addq $40, %rsp
; AVX-NEXT: retq
entry:
@@ -330,13 +330,13 @@ define <2 x i64> @mul_v2i64spill(<2 x i64> %i, <2 x i64> %j) nounwind {
; SSE-LABEL: mul_v2i64spill:
; SSE: # %bb.0: # %entry
; SSE-NEXT: subq $40, %rsp
-; SSE-NEXT: movaps %xmm1, {{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
; SSE-NEXT: callq foo
; SSE-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload
; SSE-NEXT: movdqa %xmm0, %xmm2
; SSE-NEXT: psrlq $32, %xmm2
-; SSE-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm3 # 16-byte Reload
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
; SSE-NEXT: pmuludq %xmm3, %xmm2
; SSE-NEXT: movdqa %xmm3, %xmm1
; SSE-NEXT: psrlq $32, %xmm1
@@ -351,12 +351,12 @@ define <2 x i64> @mul_v2i64spill(<2 x i64> %i, <2 x i64> %j) nounwind {
; AVX-LABEL: mul_v2i64spill:
; AVX: # %bb.0: # %entry
; AVX-NEXT: subq $40, %rsp
-; AVX-NEXT: vmovaps %xmm1, {{[0-9]+}}(%rsp) # 16-byte Spill
+; AVX-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; AVX-NEXT: callq foo
; AVX-NEXT: vmovdqa (%rsp), %xmm3 # 16-byte Reload
; AVX-NEXT: vpsrlq $32, %xmm3, %xmm0
-; AVX-NEXT: vmovdqa {{[0-9]+}}(%rsp), %xmm2 # 16-byte Reload
+; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
; AVX-NEXT: vpmuludq %xmm2, %xmm0, %xmm0
; AVX-NEXT: vpsrlq $32, %xmm2, %xmm1
; AVX-NEXT: vpmuludq %xmm1, %xmm3, %xmm1
OpenPOWER on IntegriCloud