summaryrefslogtreecommitdiffstats
path: root/llvm/test
diff options
context:
space:
mode:
authorIgor Breger <igor.breger@intel.com>2017-06-19 13:12:57 +0000
committerIgor Breger <igor.breger@intel.com>2017-06-19 13:12:57 +0000
commitbd2dedaa38b13325fe8b2b0e1536fcf5dbe37036 (patch)
tree247225d5f70c46366b860fc4839000f4367473f3 /llvm/test
parent5f746c8e2704d311cc01c8edb5460a0cb19c74e2 (diff)
downloadbcm5719-llvm-bd2dedaa38b13325fe8b2b0e1536fcf5dbe37036.tar.gz
bcm5719-llvm-bd2dedaa38b13325fe8b2b0e1536fcf5dbe37036.zip
[GlobalISel][X86] Fold FI/G_GEP into LDR/STR instruction addressing mode.
Summary: Implement some of the simplest addressing modes.It should help to test ABI. Reviewers: zvi, guyblank Reviewed By: guyblank Subscribers: rovka, llvm-commits, kristof.beyls Differential Revision: https://reviews.llvm.org/D33888 llvm-svn: 305691
Diffstat (limited to 'llvm/test')
-rw-r--r--llvm/test/CodeGen/X86/GlobalISel/add-scalar.ll34
-rw-r--r--llvm/test/CodeGen/X86/GlobalISel/callingconv.ll92
-rw-r--r--llvm/test/CodeGen/X86/GlobalISel/ext.ll15
-rw-r--r--llvm/test/CodeGen/X86/GlobalISel/memop-scalar-x32.ll36
-rw-r--r--llvm/test/CodeGen/X86/GlobalISel/memop-scalar.ll48
-rw-r--r--llvm/test/CodeGen/X86/GlobalISel/select-memop-scalar-x32.mir60
-rw-r--r--llvm/test/CodeGen/X86/GlobalISel/select-memop-scalar.mir92
7 files changed, 206 insertions, 171 deletions
diff --git a/llvm/test/CodeGen/X86/GlobalISel/add-scalar.ll b/llvm/test/CodeGen/X86/GlobalISel/add-scalar.ll
index 55c82546403..a5dc7906363 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/add-scalar.ll
+++ b/llvm/test/CodeGen/X86/GlobalISel/add-scalar.ll
@@ -18,18 +18,10 @@ define i64 @test_add_i64(i64 %arg1, i64 %arg2) {
; X32-NEXT: movl %esp, %ebp
; X32-NEXT: .Lcfi2:
; X32-NEXT: .cfi_def_cfa_register %ebp
-; X32-NEXT: pushl %esi
-; X32-NEXT: .Lcfi3:
-; X32-NEXT: .cfi_offset %esi, -12
-; X32-NEXT: leal 8(%ebp), %ecx
-; X32-NEXT: leal 12(%ebp), %esi
-; X32-NEXT: leal 16(%ebp), %eax
-; X32-NEXT: movl (%eax), %eax
-; X32-NEXT: leal 20(%ebp), %edx
-; X32-NEXT: movl (%edx), %edx
-; X32-NEXT: addl (%ecx), %eax
-; X32-NEXT: adcl (%esi), %edx
-; X32-NEXT: popl %esi
+; X32-NEXT: movl 16(%ebp), %eax
+; X32-NEXT: movl 20(%ebp), %edx
+; X32-NEXT: addl 8(%ebp), %eax
+; X32-NEXT: adcl 12(%ebp), %edx
; X32-NEXT: popl %ebp
; X32-NEXT: retl
%ret = add i64 %arg1, %arg2
@@ -46,10 +38,8 @@ define i32 @test_add_i32(i32 %arg1, i32 %arg2) {
;
; X32-LABEL: test_add_i32:
; X32: # BB#0:
-; X32-NEXT: leal 4(%esp), %ecx
-; X32-NEXT: leal 8(%esp), %eax
-; X32-NEXT: movl (%eax), %eax
-; X32-NEXT: addl (%ecx), %eax
+; X32-NEXT: movl 8(%esp), %eax
+; X32-NEXT: addl 4(%esp), %eax
; X32-NEXT: retl
%ret = add i32 %arg1, %arg2
ret i32 %ret
@@ -66,10 +56,8 @@ define i16 @test_add_i16(i16 %arg1, i16 %arg2) {
;
; X32-LABEL: test_add_i16:
; X32: # BB#0:
-; X32-NEXT: leal 4(%esp), %ecx
-; X32-NEXT: leal 8(%esp), %eax
-; X32-NEXT: movzwl (%eax), %eax
-; X32-NEXT: addw (%ecx), %ax
+; X32-NEXT: movzwl 8(%esp), %eax
+; X32-NEXT: addw 4(%esp), %ax
; X32-NEXT: retl
%ret = add i16 %arg1, %arg2
ret i16 %ret
@@ -84,10 +72,8 @@ define i8 @test_add_i8(i8 %arg1, i8 %arg2) {
;
; X32-LABEL: test_add_i8:
; X32: # BB#0:
-; X32-NEXT: leal 4(%esp), %ecx
-; X32-NEXT: leal 8(%esp), %eax
-; X32-NEXT: movb (%eax), %al
-; X32-NEXT: addb (%ecx), %al
+; X32-NEXT: movb 8(%esp), %al
+; X32-NEXT: addb 4(%esp), %al
; X32-NEXT: retl
%ret = add i8 %arg1, %arg2
ret i8 %ret
diff --git a/llvm/test/CodeGen/X86/GlobalISel/callingconv.ll b/llvm/test/CodeGen/X86/GlobalISel/callingconv.ll
index 997115d4d90..8a14436e29d 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/callingconv.ll
+++ b/llvm/test/CodeGen/X86/GlobalISel/callingconv.ll
@@ -38,16 +38,10 @@ define i64 @test_ret_i64() {
}
define i8 @test_arg_i8(i8 %a) {
-; X32_GISEL-LABEL: test_arg_i8:
-; X32_GISEL: # BB#0:
-; X32_GISEL-NEXT: leal 4(%esp), %eax
-; X32_GISEL-NEXT: movb (%eax), %al
-; X32_GISEL-NEXT: retl
-;
-; X32_ISEL-LABEL: test_arg_i8:
-; X32_ISEL: # BB#0:
-; X32_ISEL-NEXT: movb 4(%esp), %al
-; X32_ISEL-NEXT: retl
+; X32-LABEL: test_arg_i8:
+; X32: # BB#0:
+; X32-NEXT: movb 4(%esp), %al
+; X32-NEXT: retl
;
; X64-LABEL: test_arg_i8:
; X64: # BB#0:
@@ -57,16 +51,10 @@ define i8 @test_arg_i8(i8 %a) {
}
define i16 @test_arg_i16(i16 %a) {
-; X32_GISEL-LABEL: test_arg_i16:
-; X32_GISEL: # BB#0:
-; X32_GISEL-NEXT: leal 4(%esp), %eax
-; X32_GISEL-NEXT: movzwl (%eax), %eax
-; X32_GISEL-NEXT: retl
-;
-; X32_ISEL-LABEL: test_arg_i16:
-; X32_ISEL: # BB#0:
-; X32_ISEL-NEXT: movzwl 4(%esp), %eax
-; X32_ISEL-NEXT: retl
+; X32-LABEL: test_arg_i16:
+; X32: # BB#0:
+; X32-NEXT: movzwl 4(%esp), %eax
+; X32-NEXT: retl
;
; X64-LABEL: test_arg_i16:
; X64: # BB#0:
@@ -76,16 +64,10 @@ define i16 @test_arg_i16(i16 %a) {
}
define i32 @test_arg_i32(i32 %a) {
-; X32_GISEL-LABEL: test_arg_i32:
-; X32_GISEL: # BB#0:
-; X32_GISEL-NEXT: leal 4(%esp), %eax
-; X32_GISEL-NEXT: movl (%eax), %eax
-; X32_GISEL-NEXT: retl
-;
-; X32_ISEL-LABEL: test_arg_i32:
-; X32_ISEL: # BB#0:
-; X32_ISEL-NEXT: movl 4(%esp), %eax
-; X32_ISEL-NEXT: retl
+; X32-LABEL: test_arg_i32:
+; X32: # BB#0:
+; X32-NEXT: movl 4(%esp), %eax
+; X32-NEXT: retl
;
; X64-LABEL: test_arg_i32:
; X64: # BB#0:
@@ -95,19 +77,11 @@ define i32 @test_arg_i32(i32 %a) {
}
define i64 @test_arg_i64(i64 %a) {
-; X32_GISEL-LABEL: test_arg_i64:
-; X32_GISEL: # BB#0:
-; X32_GISEL-NEXT: leal 4(%esp), %eax
-; X32_GISEL-NEXT: movl (%eax), %eax
-; X32_GISEL-NEXT: leal 8(%esp), %ecx
-; X32_GISEL-NEXT: movl (%ecx), %edx
-; X32_GISEL-NEXT: retl
-;
-; X32_ISEL-LABEL: test_arg_i64:
-; X32_ISEL: # BB#0:
-; X32_ISEL-NEXT: movl 4(%esp), %eax
-; X32_ISEL-NEXT: movl 8(%esp), %edx
-; X32_ISEL-NEXT: retl
+; X32-LABEL: test_arg_i64:
+; X32: # BB#0:
+; X32-NEXT: movl 4(%esp), %eax
+; X32-NEXT: movl 8(%esp), %edx
+; X32-NEXT: retl
;
; X64-LABEL: test_arg_i64:
; X64: # BB#0:
@@ -117,30 +91,16 @@ define i64 @test_arg_i64(i64 %a) {
}
define i64 @test_i64_args_8(i64 %arg1, i64 %arg2, i64 %arg3, i64 %arg4, i64 %arg5, i64 %arg6, i64 %arg7, i64 %arg8) {
-; X32_GISEL-LABEL: test_i64_args_8:
-; X32_GISEL: # BB#0:
-; X32_GISEL-NEXT: leal 60(%esp), %eax
-; X32_GISEL-NEXT: movl (%eax), %eax
-; X32_GISEL-NEXT: leal 64(%esp), %ecx
-; X32_GISEL-NEXT: movl (%ecx), %edx
-; X32_GISEL-NEXT: retl
-;
-; X32_ISEL-LABEL: test_i64_args_8:
-; X32_ISEL: # BB#0:
-; X32_ISEL-NEXT: movl 60(%esp), %eax
-; X32_ISEL-NEXT: movl 64(%esp), %edx
-; X32_ISEL-NEXT: retl
-;
-; X64_GISEL-LABEL: test_i64_args_8:
-; X64_GISEL: # BB#0:
-; X64_GISEL-NEXT: leaq 16(%rsp), %rax
-; X64_GISEL-NEXT: movq (%rax), %rax
-; X64_GISEL-NEXT: retq
+; X32-LABEL: test_i64_args_8:
+; X32: # BB#0:
+; X32-NEXT: movl 60(%esp), %eax
+; X32-NEXT: movl 64(%esp), %edx
+; X32-NEXT: retl
;
-; X64_ISEL-LABEL: test_i64_args_8:
-; X64_ISEL: # BB#0:
-; X64_ISEL-NEXT: movq 16(%rsp), %rax
-; X64_ISEL-NEXT: retq
+; X64-LABEL: test_i64_args_8:
+; X64: # BB#0:
+; X64-NEXT: movq 16(%rsp), %rax
+; X64-NEXT: retq
ret i64 %arg8
}
diff --git a/llvm/test/CodeGen/X86/GlobalISel/ext.ll b/llvm/test/CodeGen/X86/GlobalISel/ext.ll
index 27aecd118b3..392c973c120 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/ext.ll
+++ b/llvm/test/CodeGen/X86/GlobalISel/ext.ll
@@ -11,8 +11,7 @@ define i32 @test_zext_i1(i32 %a) {
;
; X32-LABEL: test_zext_i1:
; X32: # BB#0:
-; X32-NEXT: leal 4(%esp), %eax
-; X32-NEXT: movl (%eax), %eax
+; X32-NEXT: movl 4(%esp), %eax
; X32-NEXT: andl $1, %eax
; X32-NEXT: retl
%val = trunc i32 %a to i1
@@ -28,8 +27,7 @@ define i32 @test_zext_i8(i8 %val) {
;
; X32-LABEL: test_zext_i8:
; X32: # BB#0:
-; X32-NEXT: leal 4(%esp), %eax
-; X32-NEXT: movzbl (%eax), %eax
+; X32-NEXT: movzbl 4(%esp), %eax
; X32-NEXT: retl
%r = zext i8 %val to i32
ret i32 %r
@@ -43,8 +41,7 @@ define i32 @test_zext_i16(i16 %val) {
;
; X32-LABEL: test_zext_i16:
; X32: # BB#0:
-; X32-NEXT: leal 4(%esp), %eax
-; X32-NEXT: movzwl (%eax), %eax
+; X32-NEXT: movzwl 4(%esp), %eax
; X32-NEXT: retl
%r = zext i16 %val to i32
ret i32 %r
@@ -58,8 +55,7 @@ define i32 @test_sext_i8(i8 %val) {
;
; X32-LABEL: test_sext_i8:
; X32: # BB#0:
-; X32-NEXT: leal 4(%esp), %eax
-; X32-NEXT: movsbl (%eax), %eax
+; X32-NEXT: movsbl 4(%esp), %eax
; X32-NEXT: retl
%r = sext i8 %val to i32
ret i32 %r
@@ -73,8 +69,7 @@ define i32 @test_sext_i16(i16 %val) {
;
; X32-LABEL: test_sext_i16:
; X32: # BB#0:
-; X32-NEXT: leal 4(%esp), %eax
-; X32-NEXT: movswl (%eax), %eax
+; X32-NEXT: movswl 4(%esp), %eax
; X32-NEXT: retl
%r = sext i16 %val to i32
ret i32 %r
diff --git a/llvm/test/CodeGen/X86/GlobalISel/memop-scalar-x32.ll b/llvm/test/CodeGen/X86/GlobalISel/memop-scalar-x32.ll
index 5df52c5a058..2757e649325 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/memop-scalar-x32.ll
+++ b/llvm/test/CodeGen/X86/GlobalISel/memop-scalar-x32.ll
@@ -7,8 +7,7 @@
define i8 @test_load_i8(i8 * %p1) {
; ALL-LABEL: test_load_i8:
; ALL: # BB#0:
-; ALL-NEXT: leal 4(%esp), %eax
-; ALL-NEXT: movl (%eax), %eax
+; ALL-NEXT: movl 4(%esp), %eax
; ALL-NEXT: movb (%eax), %al
; ALL-NEXT: retl
%r = load i8, i8* %p1
@@ -18,8 +17,7 @@ define i8 @test_load_i8(i8 * %p1) {
define i16 @test_load_i16(i16 * %p1) {
; ALL-LABEL: test_load_i16:
; ALL: # BB#0:
-; ALL-NEXT: leal 4(%esp), %eax
-; ALL-NEXT: movl (%eax), %eax
+; ALL-NEXT: movl 4(%esp), %eax
; ALL-NEXT: movzwl (%eax), %eax
; ALL-NEXT: retl
%r = load i16, i16* %p1
@@ -29,8 +27,7 @@ define i16 @test_load_i16(i16 * %p1) {
define i32 @test_load_i32(i32 * %p1) {
; ALL-LABEL: test_load_i32:
; ALL: # BB#0:
-; ALL-NEXT: leal 4(%esp), %eax
-; ALL-NEXT: movl (%eax), %eax
+; ALL-NEXT: movl 4(%esp), %eax
; ALL-NEXT: movl (%eax), %eax
; ALL-NEXT: retl
%r = load i32, i32* %p1
@@ -40,10 +37,8 @@ define i32 @test_load_i32(i32 * %p1) {
define i8 * @test_store_i8(i8 %val, i8 * %p1) {
; ALL-LABEL: test_store_i8:
; ALL: # BB#0:
-; ALL-NEXT: leal 4(%esp), %eax
-; ALL-NEXT: movb (%eax), %cl
-; ALL-NEXT: leal 8(%esp), %eax
-; ALL-NEXT: movl (%eax), %eax
+; ALL-NEXT: movb 4(%esp), %cl
+; ALL-NEXT: movl 8(%esp), %eax
; ALL-NEXT: movb %cl, (%eax)
; ALL-NEXT: retl
store i8 %val, i8* %p1
@@ -53,10 +48,8 @@ define i8 * @test_store_i8(i8 %val, i8 * %p1) {
define i16 * @test_store_i16(i16 %val, i16 * %p1) {
; ALL-LABEL: test_store_i16:
; ALL: # BB#0:
-; ALL-NEXT: leal 4(%esp), %eax
-; ALL-NEXT: movzwl (%eax), %ecx
-; ALL-NEXT: leal 8(%esp), %eax
-; ALL-NEXT: movl (%eax), %eax
+; ALL-NEXT: movzwl 4(%esp), %ecx
+; ALL-NEXT: movl 8(%esp), %eax
; ALL-NEXT: movw %cx, (%eax)
; ALL-NEXT: retl
store i16 %val, i16* %p1
@@ -66,10 +59,8 @@ define i16 * @test_store_i16(i16 %val, i16 * %p1) {
define i32 * @test_store_i32(i32 %val, i32 * %p1) {
; ALL-LABEL: test_store_i32:
; ALL: # BB#0:
-; ALL-NEXT: leal 4(%esp), %eax
-; ALL-NEXT: movl (%eax), %ecx
-; ALL-NEXT: leal 8(%esp), %eax
-; ALL-NEXT: movl (%eax), %eax
+; ALL-NEXT: movl 4(%esp), %ecx
+; ALL-NEXT: movl 8(%esp), %eax
; ALL-NEXT: movl %ecx, (%eax)
; ALL-NEXT: retl
store i32 %val, i32* %p1
@@ -79,8 +70,7 @@ define i32 * @test_store_i32(i32 %val, i32 * %p1) {
define i32* @test_load_ptr(i32** %ptr1) {
; ALL-LABEL: test_load_ptr:
; ALL: # BB#0:
-; ALL-NEXT: leal 4(%esp), %eax
-; ALL-NEXT: movl (%eax), %eax
+; ALL-NEXT: movl 4(%esp), %eax
; ALL-NEXT: movl (%eax), %eax
; ALL-NEXT: retl
%p = load i32*, i32** %ptr1
@@ -90,10 +80,8 @@ define i32* @test_load_ptr(i32** %ptr1) {
define void @test_store_ptr(i32** %ptr1, i32* %a) {
; ALL-LABEL: test_store_ptr:
; ALL: # BB#0:
-; ALL-NEXT: leal 4(%esp), %eax
-; ALL-NEXT: movl (%eax), %eax
-; ALL-NEXT: leal 8(%esp), %ecx
-; ALL-NEXT: movl (%ecx), %ecx
+; ALL-NEXT: movl 4(%esp), %eax
+; ALL-NEXT: movl 8(%esp), %ecx
; ALL-NEXT: movl %ecx, (%eax)
; ALL-NEXT: retl
store i32* %a, i32** %ptr1
diff --git a/llvm/test/CodeGen/X86/GlobalISel/memop-scalar.ll b/llvm/test/CodeGen/X86/GlobalISel/memop-scalar.ll
index d3d4b297a80..2e04b3cf20b 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/memop-scalar.ll
+++ b/llvm/test/CodeGen/X86/GlobalISel/memop-scalar.ll
@@ -45,11 +45,11 @@ define float @test_load_float(float * %p1) {
; SSE-NEXT: movd %eax, %xmm0
; SSE-NEXT: retq
;
-; ALL_AVX-LABEL: test_load_float:
-; ALL_AVX: # BB#0:
-; ALL_AVX-NEXT: movl (%rdi), %eax
-; ALL_AVX-NEXT: vmovd %eax, %xmm0
-; ALL_AVX-NEXT: retq
+; ALL-LABEL: test_load_float:
+; ALL: # BB#0:
+; ALL-NEXT: movl (%rdi), %eax
+; ALL-NEXT: movd %eax, %xmm0
+; ALL-NEXT: retq
%r = load float, float* %p1
ret float %r
}
@@ -61,11 +61,11 @@ define double @test_load_double(double * %p1) {
; SSE-NEXT: movq %rax, %xmm0
; SSE-NEXT: retq
;
-; ALL_AVX-LABEL: test_load_double:
-; ALL_AVX: # BB#0:
-; ALL_AVX-NEXT: movq (%rdi), %rax
-; ALL_AVX-NEXT: vmovq %rax, %xmm0
-; ALL_AVX-NEXT: retq
+; ALL-LABEL: test_load_double:
+; ALL: # BB#0:
+; ALL-NEXT: movq (%rdi), %rax
+; ALL-NEXT: movq %rax, %xmm0
+; ALL-NEXT: retq
%r = load double, double* %p1
ret double %r
}
@@ -122,7 +122,6 @@ define double * @test_store_double(double %val, double * %p1) {
; SSE_GREEDY-NEXT: movsd %xmm0, (%rdi)
; SSE_GREEDY-NEXT: movq %rdi, %rax
; SSE_GREEDY-NEXT: retq
-;
store double %val, double* %p1
ret double * %p1;
}
@@ -144,3 +143,30 @@ define void @test_store_ptr(i32** %ptr1, i32* %a) {
store i32* %a, i32** %ptr1
ret void
}
+
+define i32 @test_gep_folding(i32* %arr, i32 %val) {
+; ALL-LABEL: test_gep_folding:
+; ALL: # BB#0:
+; ALL-NEXT: movl %esi, 20(%rdi)
+; ALL-NEXT: movl 20(%rdi), %eax
+; ALL-NEXT: retq
+ %arrayidx = getelementptr i32, i32* %arr, i32 5
+ store i32 %val, i32* %arrayidx
+ %r = load i32, i32* %arrayidx
+ ret i32 %r
+}
+
+; check that gep index doesn't folded into memory operand
+define i32 @test_gep_folding_largeGepIndex(i32* %arr, i32 %val) {
+; ALL-LABEL: test_gep_folding_largeGepIndex:
+; ALL: # BB#0:
+; ALL-NEXT: movabsq $228719476720, %rax # imm = 0x3540BE3FF0
+; ALL-NEXT: leaq (%rdi,%rax), %rax
+; ALL-NEXT: movl %esi, (%rax)
+; ALL-NEXT: movl (%rax), %eax
+; ALL-NEXT: retq
+ %arrayidx = getelementptr i32, i32* %arr, i64 57179869180
+ store i32 %val, i32* %arrayidx
+ %r = load i32, i32* %arrayidx
+ ret i32 %r
+}
diff --git a/llvm/test/CodeGen/X86/GlobalISel/select-memop-scalar-x32.mir b/llvm/test/CodeGen/X86/GlobalISel/select-memop-scalar-x32.mir
index 09f414b48a8..af09ea04929 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/select-memop-scalar-x32.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/select-memop-scalar-x32.mir
@@ -50,7 +50,7 @@ legalized: true
regBankSelected: true
# ALL: registers:
# ALL-NEXT: - { id: 0, class: gr32, preferred-register: '' }
-# ALL-NEXT: - { id: 1, class: gr32, preferred-register: '' }
+# ALL-NEXT: - { id: 1, class: gpr, preferred-register: '' }
# ALL-NEXT: - { id: 2, class: gr8, preferred-register: '' }
registers:
- { id: 0, class: gpr }
@@ -58,8 +58,7 @@ registers:
- { id: 2, class: gpr }
fixedStack:
- { id: 0, offset: 0, size: 4, alignment: 16, isImmutable: true, isAliased: false }
-# ALL: %1 = LEA32r %fixed-stack.0, 1, _, 0, _
-# ALL-NEXT: %0 = MOV32rm %1, 1, _, 0, _ :: (invariant load 4 from %fixed-stack.0, align 0)
+# ALL: %0 = MOV32rm %fixed-stack.0, 1, _, 0, _ :: (invariant load 4 from %fixed-stack.0, align 0)
# ALL-NEXT: %2 = MOV8rm %0, 1, _, 0, _ :: (load 1 from %ir.p1)
# ALL-NEXT: %al = COPY %2
# ALL-NEXT: RET 0, implicit %al
@@ -80,7 +79,7 @@ legalized: true
regBankSelected: true
# ALL: registers:
# ALL-NEXT: - { id: 0, class: gr32, preferred-register: '' }
-# ALL-NEXT: - { id: 1, class: gr32, preferred-register: '' }
+# ALL-NEXT: - { id: 1, class: gpr, preferred-register: '' }
# ALL-NEXT: - { id: 2, class: gr16, preferred-register: '' }
registers:
- { id: 0, class: gpr }
@@ -88,8 +87,7 @@ registers:
- { id: 2, class: gpr }
fixedStack:
- { id: 0, offset: 0, size: 4, alignment: 16, isImmutable: true, isAliased: false }
-# ALL: %1 = LEA32r %fixed-stack.0, 1, _, 0, _
-# ALL-NEXT: %0 = MOV32rm %1, 1, _, 0, _ :: (invariant load 4 from %fixed-stack.0, align 0)
+# ALL: %0 = MOV32rm %fixed-stack.0, 1, _, 0, _ :: (invariant load 4 from %fixed-stack.0, align 0)
# ALL-NEXT: %2 = MOV16rm %0, 1, _, 0, _ :: (load 2 from %ir.p1)
# ALL-NEXT: %ax = COPY %2
# ALL-NEXT: RET 0, implicit %ax
@@ -110,7 +108,7 @@ legalized: true
regBankSelected: true
# ALL: registers:
# ALL-NEXT: - { id: 0, class: gr32, preferred-register: '' }
-# ALL-NEXT: - { id: 1, class: gr32, preferred-register: '' }
+# ALL-NEXT: - { id: 1, class: gpr, preferred-register: '' }
# ALL-NEXT: - { id: 2, class: gr32, preferred-register: '' }
registers:
- { id: 0, class: gpr }
@@ -118,8 +116,7 @@ registers:
- { id: 2, class: gpr }
fixedStack:
- { id: 0, offset: 0, size: 4, alignment: 16, isImmutable: true, isAliased: false }
-# ALL: %1 = LEA32r %fixed-stack.0, 1, _, 0, _
-# ALL-NEXT: %0 = MOV32rm %1, 1, _, 0, _ :: (invariant load 4 from %fixed-stack.0, align 0)
+# ALL: %0 = MOV32rm %fixed-stack.0, 1, _, 0, _ :: (invariant load 4 from %fixed-stack.0, align 0)
# ALL-NEXT: %2 = MOV32rm %0, 1, _, 0, _ :: (load 4 from %ir.p1)
# ALL-NEXT: %eax = COPY %2
# ALL-NEXT: RET 0, implicit %eax
@@ -141,8 +138,8 @@ regBankSelected: true
# ALL: registers:
# ALL-NEXT: - { id: 0, class: gr8, preferred-register: '' }
# ALL-NEXT: - { id: 1, class: gr32, preferred-register: '' }
-# ALL-NEXT: - { id: 2, class: gr32, preferred-register: '' }
-# ALL-NEXT: - { id: 3, class: gr32, preferred-register: '' }
+# ALL-NEXT: - { id: 2, class: gpr, preferred-register: '' }
+# ALL-NEXT: - { id: 3, class: gpr, preferred-register: '' }
registers:
- { id: 0, class: gpr }
- { id: 1, class: gpr }
@@ -151,10 +148,8 @@ registers:
fixedStack:
- { id: 0, offset: 4, size: 4, alignment: 4, isImmutable: true, isAliased: false }
- { id: 1, offset: 0, size: 1, alignment: 16, isImmutable: true, isAliased: false }
-# ALL: %2 = LEA32r %fixed-stack.0, 1, _, 0, _
-# ALL-NEXT: %0 = MOV8rm %2, 1, _, 0, _ :: (invariant load 1 from %fixed-stack.0, align 0)
-# ALL-NEXT: %3 = LEA32r %fixed-stack.1, 1, _, 0, _
-# ALL-NEXT: %1 = MOV32rm %3, 1, _, 0, _ :: (invariant load 4 from %fixed-stack.1, align 0)
+# ALL: %0 = MOV8rm %fixed-stack.0, 1, _, 0, _ :: (invariant load 1 from %fixed-stack.0, align 0)
+# ALL-NEXT: %1 = MOV32rm %fixed-stack.1, 1, _, 0, _ :: (invariant load 4 from %fixed-stack.1, align 0)
# ALL-NEXT: MOV8mr %1, 1, _, 0, _, %0 :: (store 1 into %ir.p1)
# ALL-NEXT: %eax = COPY %1
# ALL-NEXT: RET 0, implicit %eax
@@ -178,8 +173,8 @@ regBankSelected: true
# ALL: registers:
# ALL-NEXT: - { id: 0, class: gr16, preferred-register: '' }
# ALL-NEXT: - { id: 1, class: gr32, preferred-register: '' }
-# ALL-NEXT: - { id: 2, class: gr32, preferred-register: '' }
-# ALL-NEXT: - { id: 3, class: gr32, preferred-register: '' }
+# ALL-NEXT: - { id: 2, class: gpr, preferred-register: '' }
+# ALL-NEXT: - { id: 3, class: gpr, preferred-register: '' }
registers:
- { id: 0, class: gpr }
- { id: 1, class: gpr }
@@ -188,10 +183,8 @@ registers:
fixedStack:
- { id: 0, offset: 4, size: 4, alignment: 4, isImmutable: true, isAliased: false }
- { id: 1, offset: 0, size: 2, alignment: 16, isImmutable: true, isAliased: false }
-# ALL: %2 = LEA32r %fixed-stack.0, 1, _, 0, _
-# ALL-NEXT: %0 = MOV16rm %2, 1, _, 0, _ :: (invariant load 2 from %fixed-stack.0, align 0)
-# ALL-NEXT: %3 = LEA32r %fixed-stack.1, 1, _, 0, _
-# ALL-NEXT: %1 = MOV32rm %3, 1, _, 0, _ :: (invariant load 4 from %fixed-stack.1, align 0)
+# ALL: %0 = MOV16rm %fixed-stack.0, 1, _, 0, _ :: (invariant load 2 from %fixed-stack.0, align 0)
+# ALL-NEXT: %1 = MOV32rm %fixed-stack.1, 1, _, 0, _ :: (invariant load 4 from %fixed-stack.1, align 0)
# ALL-NEXT: MOV16mr %1, 1, _, 0, _, %0 :: (store 2 into %ir.p1)
# ALL-NEXT: %eax = COPY %1
# ALL-NEXT: RET 0, implicit %eax
@@ -215,8 +208,8 @@ regBankSelected: true
# ALL: registers:
# ALL-NEXT: - { id: 0, class: gr32, preferred-register: '' }
# ALL-NEXT: - { id: 1, class: gr32, preferred-register: '' }
-# ALL-NEXT: - { id: 2, class: gr32, preferred-register: '' }
-# ALL-NEXT: - { id: 3, class: gr32, preferred-register: '' }
+# ALL-NEXT: - { id: 2, class: gpr, preferred-register: '' }
+# ALL-NEXT: - { id: 3, class: gpr, preferred-register: '' }
registers:
- { id: 0, class: gpr }
- { id: 1, class: gpr }
@@ -225,10 +218,8 @@ registers:
fixedStack:
- { id: 0, offset: 4, size: 4, alignment: 4, isImmutable: true, isAliased: false }
- { id: 1, offset: 0, size: 4, alignment: 16, isImmutable: true, isAliased: false }
-# ALL: %2 = LEA32r %fixed-stack.0, 1, _, 0, _
-# ALL-NEXT: %0 = MOV32rm %2, 1, _, 0, _ :: (invariant load 4 from %fixed-stack.0, align 0)
-# ALL-NEXT: %3 = LEA32r %fixed-stack.1, 1, _, 0, _
-# ALL-NEXT: %1 = MOV32rm %3, 1, _, 0, _ :: (invariant load 4 from %fixed-stack.1, align 0)
+# ALL: %0 = MOV32rm %fixed-stack.0, 1, _, 0, _ :: (invariant load 4 from %fixed-stack.0, align 0)
+# ALL-NEXT: %1 = MOV32rm %fixed-stack.1, 1, _, 0, _ :: (invariant load 4 from %fixed-stack.1, align 0)
# ALL-NEXT: MOV32mr %1, 1, _, 0, _, %0 :: (store 4 into %ir.p1)
# ALL-NEXT: %eax = COPY %1
# ALL-NEXT: RET 0, implicit %eax
@@ -251,7 +242,7 @@ legalized: true
regBankSelected: true
# ALL: registers:
# ALL-NEXT: - { id: 0, class: gr32, preferred-register: '' }
-# ALL-NEXT: - { id: 1, class: gr32, preferred-register: '' }
+# ALL-NEXT: - { id: 1, class: gpr, preferred-register: '' }
# ALL-NEXT: - { id: 2, class: gr32, preferred-register: '' }
registers:
- { id: 0, class: gpr }
@@ -259,8 +250,7 @@ registers:
- { id: 2, class: gpr }
fixedStack:
- { id: 0, offset: 0, size: 4, alignment: 16, isImmutable: true, isAliased: false }
-# ALL: %1 = LEA32r %fixed-stack.0, 1, _, 0, _
-# ALL-NEXT: %0 = MOV32rm %1, 1, _, 0, _ :: (invariant load 4 from %fixed-stack.0, align 0)
+# ALL: %0 = MOV32rm %fixed-stack.0, 1, _, 0, _ :: (invariant load 4 from %fixed-stack.0, align 0)
# ALL-NEXT: %2 = MOV32rm %0, 1, _, 0, _ :: (load 4 from %ir.ptr1)
# ALL-NEXT: %eax = COPY %2
# ALL-NEXT: RET 0, implicit %eax
@@ -282,8 +272,8 @@ regBankSelected: true
# ALL: registers:
# ALL-NEXT: - { id: 0, class: gr32, preferred-register: '' }
# ALL-NEXT: - { id: 1, class: gr32, preferred-register: '' }
-# ALL-NEXT: - { id: 2, class: gr32, preferred-register: '' }
-# ALL-NEXT: - { id: 3, class: gr32, preferred-register: '' }
+# ALL-NEXT: - { id: 2, class: gpr, preferred-register: '' }
+# ALL-NEXT: - { id: 3, class: gpr, preferred-register: '' }
registers:
- { id: 0, class: gpr }
- { id: 1, class: gpr }
@@ -292,10 +282,8 @@ registers:
fixedStack:
- { id: 0, offset: 4, size: 4, alignment: 4, isImmutable: true, isAliased: false }
- { id: 1, offset: 0, size: 4, alignment: 16, isImmutable: true, isAliased: false }
-# ALL: %2 = LEA32r %fixed-stack.0, 1, _, 0, _
-# ALL-NEXT: %0 = MOV32rm %2, 1, _, 0, _ :: (invariant load 4 from %fixed-stack.0, align 0)
-# ALL-NEXT: %3 = LEA32r %fixed-stack.1, 1, _, 0, _
-# ALL-NEXT: %1 = MOV32rm %3, 1, _, 0, _ :: (invariant load 4 from %fixed-stack.1, align 0)
+# ALL: %0 = MOV32rm %fixed-stack.0, 1, _, 0, _ :: (invariant load 4 from %fixed-stack.0, align 0)
+# ALL-NEXT: %1 = MOV32rm %fixed-stack.1, 1, _, 0, _ :: (invariant load 4 from %fixed-stack.1, align 0)
# ALL-NEXT: MOV32mr %0, 1, _, 0, _, %1 :: (store 4 into %ir.ptr1)
# ALL-NEXT: RET 0
body: |
diff --git a/llvm/test/CodeGen/X86/GlobalISel/select-memop-scalar.mir b/llvm/test/CodeGen/X86/GlobalISel/select-memop-scalar.mir
index 6d03d7525d2..9aaeb09b120 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/select-memop-scalar.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/select-memop-scalar.mir
@@ -83,6 +83,20 @@
store i32* %a, i32** %ptr1
ret void
}
+
+ define i32 @test_gep_folding(i32* %arr, i32 %val) {
+ %arrayidx = getelementptr i32, i32* %arr, i32 5
+ store i32 %val, i32* %arrayidx
+ %r = load i32, i32* %arrayidx
+ ret i32 %r
+ }
+
+ define i32 @test_gep_folding_largeGepIndex(i32* %arr, i32 %val) #0 {
+ %arrayidx = getelementptr i32, i32* %arr, i64 57179869180
+ store i32 %val, i32* %arrayidx
+ %r = load i32, i32* %arrayidx
+ ret i32 %r
+ }
...
---
# ALL-LABEL: name: test_load_i8
@@ -498,3 +512,81 @@ body: |
RET 0
...
+---
+name: test_gep_folding
+# ALL-LABEL: name: test_gep_folding
+alignment: 4
+legalized: true
+regBankSelected: true
+# ALL: registers:
+# ALL-NEXT: - { id: 0, class: gr64, preferred-register: '' }
+# ALL-NEXT: - { id: 1, class: gr32, preferred-register: '' }
+# ALL-NEXT: - { id: 2, class: gpr, preferred-register: '' }
+# ALL-NEXT: - { id: 3, class: gpr, preferred-register: '' }
+# ALL-NEXT: - { id: 4, class: gr32, preferred-register: '' }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+ - { id: 2, class: gpr }
+ - { id: 3, class: gpr }
+ - { id: 4, class: gpr }
+# ALL: %0 = COPY %rdi
+# ALL-NEXT: %1 = COPY %esi
+# ALL-NEXT: MOV32mr %0, 1, _, 20, _, %1 :: (store 4 into %ir.arrayidx)
+# ALL-NEXT: %4 = MOV32rm %0, 1, _, 20, _ :: (load 4 from %ir.arrayidx)
+# ALL-NEXT: %eax = COPY %4
+# ALL-NEXT: RET 0, implicit %eax
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %esi, %rdi
+
+ %0(p0) = COPY %rdi
+ %1(s32) = COPY %esi
+ %2(s64) = G_CONSTANT i64 20
+ %3(p0) = G_GEP %0, %2(s64)
+ G_STORE %1(s32), %3(p0) :: (store 4 into %ir.arrayidx)
+ %4(s32) = G_LOAD %3(p0) :: (load 4 from %ir.arrayidx)
+ %eax = COPY %4(s32)
+ RET 0, implicit %eax
+
+...
+---
+name: test_gep_folding_largeGepIndex
+# ALL-LABEL: name: test_gep_folding_largeGepIndex
+alignment: 4
+legalized: true
+regBankSelected: true
+# ALL: registers:
+# ALL-NEXT: - { id: 0, class: gr64, preferred-register: '' }
+# ALL-NEXT: - { id: 1, class: gr32, preferred-register: '' }
+# ALL-NEXT: - { id: 2, class: gr64_nosp, preferred-register: '' }
+# ALL-NEXT: - { id: 3, class: gr64, preferred-register: '' }
+# ALL-NEXT: - { id: 4, class: gr32, preferred-register: '' }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+ - { id: 2, class: gpr }
+ - { id: 3, class: gpr }
+ - { id: 4, class: gpr }
+# ALL: %0 = COPY %rdi
+# ALL-NEXT: %1 = COPY %esi
+# ALL-NEXT: %2 = MOV64ri 228719476720
+# ALL-NEXT: %3 = LEA64r %0, 1, %2, 0, _
+# ALL-NEXT: MOV32mr %3, 1, _, 0, _, %1 :: (store 4 into %ir.arrayidx)
+# ALL-NEXT: %4 = MOV32rm %3, 1, _, 0, _ :: (load 4 from %ir.arrayidx)
+# ALL-NEXT: %eax = COPY %4
+# ALL-NEXT: RET 0, implicit %eax
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %esi, %rdi
+
+ %0(p0) = COPY %rdi
+ %1(s32) = COPY %esi
+ %2(s64) = G_CONSTANT i64 228719476720
+ %3(p0) = G_GEP %0, %2(s64)
+ G_STORE %1(s32), %3(p0) :: (store 4 into %ir.arrayidx)
+ %4(s32) = G_LOAD %3(p0) :: (load 4 from %ir.arrayidx)
+ %eax = COPY %4(s32)
+ RET 0, implicit %eax
+
+...
OpenPOWER on IntegriCloud