summaryrefslogtreecommitdiffstats
path: root/llvm
diff options
context:
space:
mode:
authorDaniel Neilson <dneilson@azul.com>2018-05-08 19:08:12 +0000
committerDaniel Neilson <dneilson@azul.com>2018-05-08 19:08:12 +0000
commit65a7eb71f9a605b66b4f705a2a305d2e22e07903 (patch)
tree7a3c69e899a204459c6433932366a824b00c9a1b /llvm
parentd722d614028517591474c6bb629a14517ea2da11 (diff)
downloadbcm5719-llvm-65a7eb71f9a605b66b4f705a2a305d2e22e07903.tar.gz
bcm5719-llvm-65a7eb71f9a605b66b4f705a2a305d2e22e07903.zip
Changing constants in a test (NFC)
Summary: Changing the lengths of the atomic memory intrinsics in a test to make sure that they don't get lowered into loads/stores if/when expansion of these occurs in selectiondag. llvm-svn: 331800
Diffstat (limited to 'llvm')
-rw-r--r--llvm/test/CodeGen/X86/element-wise-atomic-memory-intrinsics.ll75
1 files changed, 39 insertions, 36 deletions
diff --git a/llvm/test/CodeGen/X86/element-wise-atomic-memory-intrinsics.ll b/llvm/test/CodeGen/X86/element-wise-atomic-memory-intrinsics.ll
index 88778b317b9..0872da2a183 100644
--- a/llvm/test/CodeGen/X86/element-wise-atomic-memory-intrinsics.ll
+++ b/llvm/test/CodeGen/X86/element-wise-atomic-memory-intrinsics.ll
@@ -2,46 +2,46 @@
define i8* @test_memcpy1(i8* %P, i8* %Q) {
; CHECK: test_memcpy
- call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 %P, i8* align 4 %Q, i32 1, i32 1)
+ call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 %P, i8* align 4 %Q, i32 1024, i32 1)
ret i8* %P
; 3rd arg (%edx) -- length
- ; CHECK-DAG: movl $1, %edx
+ ; CHECK-DAG: movl $1024, %edx
; CHECK: __llvm_memcpy_element_unordered_atomic_1
}
define i8* @test_memcpy2(i8* %P, i8* %Q) {
; CHECK: test_memcpy2
- call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 %P, i8* align 4 %Q, i32 2, i32 2)
+ call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 %P, i8* align 4 %Q, i32 1024, i32 2)
ret i8* %P
; 3rd arg (%edx) -- length
- ; CHECK-DAG: movl $2, %edx
+ ; CHECK-DAG: movl $1024, %edx
; CHECK: __llvm_memcpy_element_unordered_atomic_2
}
define i8* @test_memcpy4(i8* %P, i8* %Q) {
; CHECK: test_memcpy4
- call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 %P, i8* align 4 %Q, i32 4, i32 4)
+ call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 %P, i8* align 4 %Q, i32 1024, i32 4)
ret i8* %P
; 3rd arg (%edx) -- length
- ; CHECK-DAG: movl $4, %edx
+ ; CHECK-DAG: movl $1024, %edx
; CHECK: __llvm_memcpy_element_unordered_atomic_4
}
define i8* @test_memcpy8(i8* %P, i8* %Q) {
; CHECK: test_memcpy8
- call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 8 %P, i8* align 8 %Q, i32 8, i32 8)
+ call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 8 %P, i8* align 8 %Q, i32 1024, i32 8)
ret i8* %P
; 3rd arg (%edx) -- length
- ; CHECK-DAG: movl $8, %edx
+ ; CHECK-DAG: movl $1024, %edx
; CHECK: __llvm_memcpy_element_unordered_atomic_8
}
define i8* @test_memcpy16(i8* %P, i8* %Q) {
; CHECK: test_memcpy16
- call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 16 %P, i8* align 16 %Q, i32 16, i32 16)
+ call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 16 %P, i8* align 16 %Q, i32 1024, i32 16)
ret i8* %P
; 3rd arg (%edx) -- length
- ; CHECK-DAG: movl $16, %edx
+ ; CHECK-DAG: movl $1024, %edx
; CHECK: __llvm_memcpy_element_unordered_atomic_16
}
@@ -57,53 +57,54 @@ define void @test_memcpy_args(i8** %Storage) {
; 2nd arg (%rsi)
; CHECK-DAG: movq 8(%rdi), %rsi
; 3rd arg (%edx) -- length
- ; CHECK-DAG: movl $4, %edx
+ ; CHECK-DAG: movl $1024, %edx
; CHECK: __llvm_memcpy_element_unordered_atomic_4
- call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 %Dst, i8* align 4 %Src, i32 4, i32 4) ret void
+ call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 %Dst, i8* align 4 %Src, i32 1024, i32 4)
+ ret void
}
define i8* @test_memmove1(i8* %P, i8* %Q) {
; CHECK: test_memmove
- call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 %P, i8* align 4 %Q, i32 1, i32 1)
+ call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 %P, i8* align 4 %Q, i32 1024, i32 1)
ret i8* %P
; 3rd arg (%edx) -- length
- ; CHECK-DAG: movl $1, %edx
+ ; CHECK-DAG: movl $1024, %edx
; CHECK: __llvm_memmove_element_unordered_atomic_1
}
define i8* @test_memmove2(i8* %P, i8* %Q) {
; CHECK: test_memmove2
- call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 %P, i8* align 4 %Q, i32 2, i32 2)
+ call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 %P, i8* align 4 %Q, i32 1024, i32 2)
ret i8* %P
; 3rd arg (%edx) -- length
- ; CHECK-DAG: movl $2, %edx
+ ; CHECK-DAG: movl $1024, %edx
; CHECK: __llvm_memmove_element_unordered_atomic_2
}
define i8* @test_memmove4(i8* %P, i8* %Q) {
; CHECK: test_memmove4
- call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 %P, i8* align 4 %Q, i32 4, i32 4)
+ call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 %P, i8* align 4 %Q, i32 1024, i32 4)
ret i8* %P
; 3rd arg (%edx) -- length
- ; CHECK-DAG: movl $4, %edx
+ ; CHECK-DAG: movl $1024, %edx
; CHECK: __llvm_memmove_element_unordered_atomic_4
}
define i8* @test_memmove8(i8* %P, i8* %Q) {
; CHECK: test_memmove8
- call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 8 %P, i8* align 8 %Q, i32 8, i32 8)
+ call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 8 %P, i8* align 8 %Q, i32 1024, i32 8)
ret i8* %P
; 3rd arg (%edx) -- length
- ; CHECK-DAG: movl $8, %edx
+ ; CHECK-DAG: movl $1024, %edx
; CHECK: __llvm_memmove_element_unordered_atomic_8
}
define i8* @test_memmove16(i8* %P, i8* %Q) {
; CHECK: test_memmove16
- call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 16 %P, i8* align 16 %Q, i32 16, i32 16)
+ call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 16 %P, i8* align 16 %Q, i32 1024, i32 16)
ret i8* %P
; 3rd arg (%edx) -- length
- ; CHECK-DAG: movl $16, %edx
+ ; CHECK-DAG: movl $1024, %edx
; CHECK: __llvm_memmove_element_unordered_atomic_16
}
@@ -119,53 +120,54 @@ define void @test_memmove_args(i8** %Storage) {
; 2nd arg (%rsi)
; CHECK-DAG: movq 8(%rdi), %rsi
; 3rd arg (%edx) -- length
- ; CHECK-DAG: movl $4, %edx
+ ; CHECK-DAG: movl $1024, %edx
; CHECK: __llvm_memmove_element_unordered_atomic_4
- call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 %Dst, i8* align 4 %Src, i32 4, i32 4) ret void
+ call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 %Dst, i8* align 4 %Src, i32 1024, i32 4)
+ ret void
}
define i8* @test_memset1(i8* %P, i8 %V) {
; CHECK: test_memset
- call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 4 %P, i8 %V, i32 1, i32 1)
+ call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 1 %P, i8 %V, i32 1024, i32 1)
ret i8* %P
; 3rd arg (%edx) -- length
- ; CHECK-DAG: movl $1, %edx
+ ; CHECK-DAG: movl $1024, %edx
; CHECK: __llvm_memset_element_unordered_atomic_1
}
define i8* @test_memset2(i8* %P, i8 %V) {
; CHECK: test_memset2
- call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 4 %P, i8 %V, i32 2, i32 2)
+ call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 2 %P, i8 %V, i32 1024, i32 2)
ret i8* %P
; 3rd arg (%edx) -- length
- ; CHECK-DAG: movl $2, %edx
+ ; CHECK-DAG: movl $1024, %edx
; CHECK: __llvm_memset_element_unordered_atomic_2
}
define i8* @test_memset4(i8* %P, i8 %V) {
; CHECK: test_memset4
- call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 4 %P, i8 %V, i32 4, i32 4)
+ call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 4 %P, i8 %V, i32 1024, i32 4)
ret i8* %P
; 3rd arg (%edx) -- length
- ; CHECK-DAG: movl $4, %edx
+ ; CHECK-DAG: movl $1024, %edx
; CHECK: __llvm_memset_element_unordered_atomic_4
}
define i8* @test_memset8(i8* %P, i8 %V) {
; CHECK: test_memset8
- call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 8 %P, i8 %V, i32 8, i32 8)
+ call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 8 %P, i8 %V, i32 1024, i32 8)
ret i8* %P
; 3rd arg (%edx) -- length
- ; CHECK-DAG: movl $8, %edx
+ ; CHECK-DAG: movl $1024, %edx
; CHECK: __llvm_memset_element_unordered_atomic_8
}
define i8* @test_memset16(i8* %P, i8 %V) {
; CHECK: test_memset16
- call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 16 %P, i8 %V, i32 16, i32 16)
+ call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 16 %P, i8 %V, i32 1024, i32 16)
ret i8* %P
; 3rd arg (%edx) -- length
- ; CHECK-DAG: movl $16, %edx
+ ; CHECK-DAG: movl $1024, %edx
; CHECK: __llvm_memset_element_unordered_atomic_16
}
@@ -179,9 +181,10 @@ define void @test_memset_args(i8** %Storage, i8* %V) {
; 2nd arg (%rsi)
; CHECK-DAG: movzbl (%rsi), %esi
; 3rd arg (%edx) -- length
- ; CHECK-DAG: movl $4, %edx
+ ; CHECK-DAG: movl $1024, %edx
; CHECK: __llvm_memset_element_unordered_atomic_4
- call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 4 %Dst, i8 %Val, i32 4, i32 4) ret void
+ call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 4 %Dst, i8 %Val, i32 1024, i32 4)
+ ret void
}
declare void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i32) nounwind
OpenPOWER on IntegriCloud