summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/AArch64
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test/CodeGen/AArch64')
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll12
-rw-r--r--llvm/test/CodeGen/AArch64/PBQP-csr.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/aarch64-DAGCombine-findBetterNeighborChains-crash.ll6
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-2012-05-07-MemcpyAlignBug.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-abi-varargs.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-abi_align.ll18
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-fast-isel-intrinsic.ll24
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-memcpy-inline.ll20
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-memset-inline.ll8
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-memset-to-bzero.ll8
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-misaligned-memcpy-inline.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-misched-basic-A53.ll6
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-misched-basic-A57.ll6
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-stur.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-virtual_base.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/fast-isel-memcpy.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/func-argpassing.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/ldp-stp-scaled-unscaled-pairs.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/ldst-paired-aliasing.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/ldst-zero.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/machine-combiner-madd.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/memcpy-f128.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/merge-store-dependency.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/mergestores_noimplicitfloat.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/misched-stp.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/pr33172.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/tailcall-mem-intrinsics.ll12
-rw-r--r--llvm/test/CodeGen/AArch64/tailcall-string-rvo.ll4
28 files changed, 96 insertions, 96 deletions
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll
index f710cd7551d..077c21c0557 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll
@@ -1147,7 +1147,7 @@ define void()* @test_global_func() {
ret void()* @allocai64
}
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8*, i8*, i64, i32 %align, i1 %volatile)
+declare void @llvm.memcpy.p0i8.p0i8.i64(i8*, i8*, i64, i1)
define void @test_memcpy(i8* %dst, i8* %src, i64 %size) {
; CHECK-LABEL: name: test_memcpy
; CHECK: [[DST:%[0-9]+]]:_(p0) = COPY %x0
@@ -1157,11 +1157,11 @@ define void @test_memcpy(i8* %dst, i8* %src, i64 %size) {
; CHECK: %x1 = COPY [[SRC]]
; CHECK: %x2 = COPY [[SIZE]]
; CHECK: BL &memcpy, csr_aarch64_aapcs, implicit-def %lr, implicit %sp, implicit %x0, implicit %x1, implicit %x2
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* %dst, i8* %src, i64 %size, i32 1, i1 0)
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %dst, i8* %src, i64 %size, i1 0)
ret void
}
-declare void @llvm.memmove.p0i8.p0i8.i64(i8*, i8*, i64, i32 %align, i1 %volatile)
+declare void @llvm.memmove.p0i8.p0i8.i64(i8*, i8*, i64, i1)
define void @test_memmove(i8* %dst, i8* %src, i64 %size) {
; CHECK-LABEL: name: test_memmove
; CHECK: [[DST:%[0-9]+]]:_(p0) = COPY %x0
@@ -1171,11 +1171,11 @@ define void @test_memmove(i8* %dst, i8* %src, i64 %size) {
; CHECK: %x1 = COPY [[SRC]]
; CHECK: %x2 = COPY [[SIZE]]
; CHECK: BL &memmove, csr_aarch64_aapcs, implicit-def %lr, implicit %sp, implicit %x0, implicit %x1, implicit %x2
- call void @llvm.memmove.p0i8.p0i8.i64(i8* %dst, i8* %src, i64 %size, i32 1, i1 0)
+ call void @llvm.memmove.p0i8.p0i8.i64(i8* %dst, i8* %src, i64 %size, i1 0)
ret void
}
-declare void @llvm.memset.p0i8.i64(i8*, i8, i64, i32 %align, i1 %volatile)
+declare void @llvm.memset.p0i8.i64(i8*, i8, i64, i1)
define void @test_memset(i8* %dst, i8 %val, i64 %size) {
; CHECK-LABEL: name: test_memset
; CHECK: [[DST:%[0-9]+]]:_(p0) = COPY %x0
@@ -1187,7 +1187,7 @@ define void @test_memset(i8* %dst, i8 %val, i64 %size) {
; CHECK: %w1 = COPY [[SRC_TMP]]
; CHECK: %x2 = COPY [[SIZE]]
; CHECK: BL &memset, csr_aarch64_aapcs, implicit-def %lr, implicit %sp, implicit %x0, implicit %w1, implicit %x2
- call void @llvm.memset.p0i8.i64(i8* %dst, i8 %val, i64 %size, i32 1, i1 0)
+ call void @llvm.memset.p0i8.i64(i8* %dst, i8 %val, i64 %size, i1 0)
ret void
}
diff --git a/llvm/test/CodeGen/AArch64/PBQP-csr.ll b/llvm/test/CodeGen/AArch64/PBQP-csr.ll
index 16d7f8cb7a5..e071eda17e3 100644
--- a/llvm/test/CodeGen/AArch64/PBQP-csr.ll
+++ b/llvm/test/CodeGen/AArch64/PBQP-csr.ll
@@ -22,7 +22,7 @@ entry:
%z.i60 = getelementptr inbounds %rs, %rs* %r, i64 0, i32 9, i32 2
%na = getelementptr inbounds %rs, %rs* %r, i64 0, i32 0
%0 = bitcast double* %x.i to i8*
- call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 72, i32 8, i1 false)
+ call void @llvm.memset.p0i8.i64(i8* align 8 %0, i8 0, i64 72, i1 false)
%1 = load i32, i32* %na, align 4
%cmp70 = icmp sgt i32 %1, 0
br i1 %cmp70, label %for.body.lr.ph, label %for.end
@@ -87,5 +87,5 @@ for.end: ; preds = %for.end.loopexit, %
}
; Function Attrs: nounwind
-declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1)
+declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1)
diff --git a/llvm/test/CodeGen/AArch64/aarch64-DAGCombine-findBetterNeighborChains-crash.ll b/llvm/test/CodeGen/AArch64/aarch64-DAGCombine-findBetterNeighborChains-crash.ll
index fb4df34df29..043ce0933a9 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-DAGCombine-findBetterNeighborChains-crash.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-DAGCombine-findBetterNeighborChains-crash.ll
@@ -6,13 +6,13 @@ target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128"
declare void @extern(i8*)
; Function Attrs: argmemonly nounwind
-declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) #0
+declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) #0
; Function Attrs: nounwind
define void @func(float* noalias %arg, i32* noalias %arg1, i8* noalias %arg2, i8* noalias %arg3) #1 {
bb:
%tmp = getelementptr inbounds i8, i8* %arg2, i64 88
- tail call void @llvm.memset.p0i8.i64(i8* noalias %arg2, i8 0, i64 40, i32 8, i1 false)
+ tail call void @llvm.memset.p0i8.i64(i8* align 8 noalias %arg2, i8 0, i64 40, i1 false)
store i8 0, i8* %arg3
store i8 2, i8* %arg2
store float 0.000000e+00, float* %arg
@@ -27,7 +27,7 @@ bb:
define void @func2(float* noalias %arg, i32* noalias %arg1, i8* noalias %arg2, i8* noalias %arg3) #1 {
bb:
%tmp = getelementptr inbounds i8, i8* %arg2, i64 88
- tail call void @llvm.memset.p0i8.i64(i8* noalias %arg2, i8 0, i64 40, i32 8, i1 false)
+ tail call void @llvm.memset.p0i8.i64(i8* align 8 noalias %arg2, i8 0, i64 40, i1 false)
store i8 0, i8* %arg3
store i8 2, i8* %arg2
store float 0.000000e+00, float* %arg
diff --git a/llvm/test/CodeGen/AArch64/arm64-2012-05-07-MemcpyAlignBug.ll b/llvm/test/CodeGen/AArch64/arm64-2012-05-07-MemcpyAlignBug.ll
index b38b4f2a2b2..2b6cd7c2d28 100644
--- a/llvm/test/CodeGen/AArch64/arm64-2012-05-07-MemcpyAlignBug.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-2012-05-07-MemcpyAlignBug.ll
@@ -14,8 +14,8 @@
; CHECK-NEXT: str [[VAL2]], [x0]
define void @foo(i8* %a) {
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* %a, i8* bitcast ([3 x i32]* @b to i8*), i64 12, i32 4, i1 false)
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %a, i8* align 4 bitcast ([3 x i32]* @b to i8*), i64 12, i1 false)
ret void
}
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) nounwind
+declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) nounwind
diff --git a/llvm/test/CodeGen/AArch64/arm64-abi-varargs.ll b/llvm/test/CodeGen/AArch64/arm64-abi-varargs.ll
index d6a1686d566..e0fa5dbbaf9 100644
--- a/llvm/test/CodeGen/AArch64/arm64-abi-varargs.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-abi-varargs.ll
@@ -164,10 +164,10 @@ entry:
%4 = bitcast i8* %ap.align to %struct.s41*
%5 = bitcast %struct.s41* %vs to i8*
%6 = bitcast %struct.s41* %4 to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* %5, i8* %6, i64 16, i32 16, i1 false)
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 %5, i8* align 16 %6, i64 16, i1 false)
ret void
}
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) nounwind
+declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) nounwind
define void @bar2(i32 %x, i128 %s41.coerce) nounwind {
entry:
diff --git a/llvm/test/CodeGen/AArch64/arm64-abi_align.ll b/llvm/test/CodeGen/AArch64/arm64-abi_align.ll
index 56a882a2a15..bfb74b598ff 100644
--- a/llvm/test/CodeGen/AArch64/arm64-abi_align.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-abi_align.ll
@@ -300,14 +300,14 @@ entry:
%tmp = alloca %struct.s42, align 4
%tmp1 = alloca %struct.s42, align 4
%0 = bitcast %struct.s42* %tmp to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* bitcast (%struct.s42* @g42 to i8*), i64 24, i32 4, i1 false), !tbaa.struct !4
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %0, i8* align 4 bitcast (%struct.s42* @g42 to i8*), i64 24, i1 false), !tbaa.struct !4
%1 = bitcast %struct.s42* %tmp1 to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* bitcast (%struct.s42* @g42_2 to i8*), i64 24, i32 4, i1 false), !tbaa.struct !4
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %1, i8* align 4 bitcast (%struct.s42* @g42_2 to i8*), i64 24, i1 false), !tbaa.struct !4
%call = call i32 @f42(i32 3, %struct.s42* %tmp, %struct.s42* %tmp1) #5
ret i32 %call
}
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) #4
+declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) #4
declare i32 @f42_stack(i32 %i, i32 %i2, i32 %i3, i32 %i4, i32 %i5, i32 %i6,
i32 %i7, i32 %i8, i32 %i9, %struct.s42* nocapture %s1,
@@ -346,9 +346,9 @@ entry:
%tmp = alloca %struct.s42, align 4
%tmp1 = alloca %struct.s42, align 4
%0 = bitcast %struct.s42* %tmp to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* bitcast (%struct.s42* @g42 to i8*), i64 24, i32 4, i1 false), !tbaa.struct !4
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %0, i8* align 4 bitcast (%struct.s42* @g42 to i8*), i64 24, i1 false), !tbaa.struct !4
%1 = bitcast %struct.s42* %tmp1 to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* bitcast (%struct.s42* @g42_2 to i8*), i64 24, i32 4, i1 false), !tbaa.struct !4
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %1, i8* align 4 bitcast (%struct.s42* @g42_2 to i8*), i64 24, i1 false), !tbaa.struct !4
%call = call i32 @f42_stack(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7,
i32 8, i32 9, %struct.s42* %tmp, %struct.s42* %tmp1) #5
ret i32 %call
@@ -414,9 +414,9 @@ entry:
%tmp = alloca %struct.s43, align 16
%tmp1 = alloca %struct.s43, align 16
%0 = bitcast %struct.s43* %tmp to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* bitcast (%struct.s43* @g43 to i8*), i64 32, i32 16, i1 false), !tbaa.struct !4
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 %0, i8* align 16 bitcast (%struct.s43* @g43 to i8*), i64 32, i1 false), !tbaa.struct !4
%1 = bitcast %struct.s43* %tmp1 to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* bitcast (%struct.s43* @g43_2 to i8*), i64 32, i32 16, i1 false), !tbaa.struct !4
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 %1, i8* align 16 bitcast (%struct.s43* @g43_2 to i8*), i64 32, i1 false), !tbaa.struct !4
%call = call i32 @f43(i32 3, %struct.s43* %tmp, %struct.s43* %tmp1) #5
ret i32 %call
}
@@ -465,9 +465,9 @@ entry:
%tmp = alloca %struct.s43, align 16
%tmp1 = alloca %struct.s43, align 16
%0 = bitcast %struct.s43* %tmp to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* bitcast (%struct.s43* @g43 to i8*), i64 32, i32 16, i1 false), !tbaa.struct !4
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 %0, i8* align 16 bitcast (%struct.s43* @g43 to i8*), i64 32, i1 false), !tbaa.struct !4
%1 = bitcast %struct.s43* %tmp1 to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* bitcast (%struct.s43* @g43_2 to i8*), i64 32, i32 16, i1 false), !tbaa.struct !4
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 %1, i8* align 16 bitcast (%struct.s43* @g43_2 to i8*), i64 32, i1 false), !tbaa.struct !4
%call = call i32 @f43_stack(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7,
i32 8, i32 9, %struct.s43* %tmp, %struct.s43* %tmp1) #5
ret i32 %call
diff --git a/llvm/test/CodeGen/AArch64/arm64-fast-isel-intrinsic.ll b/llvm/test/CodeGen/AArch64/arm64-fast-isel-intrinsic.ll
index 1af960a12a1..e43160ab340 100644
--- a/llvm/test/CodeGen/AArch64/arm64-fast-isel-intrinsic.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-fast-isel-intrinsic.ll
@@ -11,11 +11,11 @@ define void @t1() {
; ARM64: mov x2, #80
; ARM64: uxtb w1, w9
; ARM64: bl _memset
- call void @llvm.memset.p0i8.i64(i8* getelementptr inbounds ([80 x i8], [80 x i8]* @message, i32 0, i32 0), i8 0, i64 80, i32 16, i1 false)
+ call void @llvm.memset.p0i8.i64(i8* align 16 getelementptr inbounds ([80 x i8], [80 x i8]* @message, i32 0, i32 0), i8 0, i64 80, i1 false)
ret void
}
-declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1)
+declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1)
define void @t2() {
; ARM64-LABEL: t2
@@ -25,11 +25,11 @@ define void @t2() {
; ARM64: add x1, x8, _message@PAGEOFF
; ARM64: mov x2, #80
; ARM64: bl _memcpy
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* getelementptr inbounds ([80 x i8], [80 x i8]* @temp, i32 0, i32 0), i8* getelementptr inbounds ([80 x i8], [80 x i8]* @message, i32 0, i32 0), i64 80, i32 16, i1 false)
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 getelementptr inbounds ([80 x i8], [80 x i8]* @temp, i32 0, i32 0), i8* align 16 getelementptr inbounds ([80 x i8], [80 x i8]* @message, i32 0, i32 0), i64 80, i1 false)
ret void
}
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1)
+declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1)
define void @t3() {
; ARM64-LABEL: t3
@@ -39,11 +39,11 @@ define void @t3() {
; ARM64: add x1, x8, _message@PAGEOFF
; ARM64: mov x2, #20
; ARM64: bl _memmove
- call void @llvm.memmove.p0i8.p0i8.i64(i8* getelementptr inbounds ([80 x i8], [80 x i8]* @temp, i32 0, i32 0), i8* getelementptr inbounds ([80 x i8], [80 x i8]* @message, i32 0, i32 0), i64 20, i32 16, i1 false)
+ call void @llvm.memmove.p0i8.p0i8.i64(i8* align 16 getelementptr inbounds ([80 x i8], [80 x i8]* @temp, i32 0, i32 0), i8* align 16 getelementptr inbounds ([80 x i8], [80 x i8]* @message, i32 0, i32 0), i64 20, i1 false)
ret void
}
-declare void @llvm.memmove.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1)
+declare void @llvm.memmove.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1)
define void @t4() {
; ARM64-LABEL: t4
@@ -58,7 +58,7 @@ define void @t4() {
; ARM64: ldrb w11, [x9, #16]
; ARM64: strb w11, [x8, #16]
; ARM64: ret
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* getelementptr inbounds ([80 x i8], [80 x i8]* @temp, i32 0, i32 0), i8* getelementptr inbounds ([80 x i8], [80 x i8]* @message, i32 0, i32 0), i64 17, i32 16, i1 false)
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 getelementptr inbounds ([80 x i8], [80 x i8]* @temp, i32 0, i32 0), i8* align 16 getelementptr inbounds ([80 x i8], [80 x i8]* @message, i32 0, i32 0), i64 17, i1 false)
ret void
}
@@ -75,7 +75,7 @@ define void @t5() {
; ARM64: ldrb w11, [x9, #16]
; ARM64: strb w11, [x8, #16]
; ARM64: ret
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* getelementptr inbounds ([80 x i8], [80 x i8]* @temp, i32 0, i32 0), i8* getelementptr inbounds ([80 x i8], [80 x i8]* @message, i32 0, i32 0), i64 17, i32 8, i1 false)
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 getelementptr inbounds ([80 x i8], [80 x i8]* @temp, i32 0, i32 0), i8* align 8 getelementptr inbounds ([80 x i8], [80 x i8]* @message, i32 0, i32 0), i64 17, i1 false)
ret void
}
@@ -92,7 +92,7 @@ define void @t6() {
; ARM64: ldrb w10, [x9, #8]
; ARM64: strb w10, [x8, #8]
; ARM64: ret
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* getelementptr inbounds ([80 x i8], [80 x i8]* @temp, i32 0, i32 0), i8* getelementptr inbounds ([80 x i8], [80 x i8]* @message, i32 0, i32 0), i64 9, i32 4, i1 false)
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 getelementptr inbounds ([80 x i8], [80 x i8]* @temp, i32 0, i32 0), i8* align 4 getelementptr inbounds ([80 x i8], [80 x i8]* @message, i32 0, i32 0), i64 9, i1 false)
ret void
}
@@ -111,7 +111,7 @@ define void @t7() {
; ARM64: ldrb w10, [x9, #6]
; ARM64: strb w10, [x8, #6]
; ARM64: ret
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* getelementptr inbounds ([80 x i8], [80 x i8]* @temp, i32 0, i32 0), i8* getelementptr inbounds ([80 x i8], [80 x i8]* @message, i32 0, i32 0), i64 7, i32 2, i1 false)
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 2 getelementptr inbounds ([80 x i8], [80 x i8]* @temp, i32 0, i32 0), i8* align 2 getelementptr inbounds ([80 x i8], [80 x i8]* @message, i32 0, i32 0), i64 7, i1 false)
ret void
}
@@ -130,7 +130,7 @@ define void @t8() {
; ARM64: ldrb w10, [x9, #3]
; ARM64: strb w10, [x8, #3]
; ARM64: ret
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* getelementptr inbounds ([80 x i8], [80 x i8]* @temp, i32 0, i32 0), i8* getelementptr inbounds ([80 x i8], [80 x i8]* @message, i32 0, i32 0), i64 4, i32 1, i1 false)
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 getelementptr inbounds ([80 x i8], [80 x i8]* @temp, i32 0, i32 0), i8* align 1 getelementptr inbounds ([80 x i8], [80 x i8]* @message, i32 0, i32 0), i64 4, i1 false)
ret void
}
@@ -143,6 +143,6 @@ define void @test_distant_memcpy(i8* %dst) {
; ARM64: strb [[BYTE]], [x0]
%array = alloca i8, i32 8192
%elem = getelementptr i8, i8* %array, i32 8000
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* %dst, i8* %elem, i64 1, i32 1, i1 false)
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %dst, i8* %elem, i64 1, i1 false)
ret void
}
diff --git a/llvm/test/CodeGen/AArch64/arm64-memcpy-inline.ll b/llvm/test/CodeGen/AArch64/arm64-memcpy-inline.ll
index 0590031fbcd..4f8f3a227bb 100644
--- a/llvm/test/CodeGen/AArch64/arm64-memcpy-inline.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-memcpy-inline.ll
@@ -22,7 +22,7 @@ entry:
; CHECK: strh [[REG1]], [x[[BASEREG2]], #8]
; CHECK: ldr [[REG2:x[0-9]+]],
; CHECK: str [[REG2]],
- call void @llvm.memcpy.p0i8.p0i8.i32(i8* getelementptr inbounds (%struct.x, %struct.x* @dst, i32 0, i32 0), i8* getelementptr inbounds (%struct.x, %struct.x* @src, i32 0, i32 0), i32 11, i32 8, i1 false)
+ call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 getelementptr inbounds (%struct.x, %struct.x* @dst, i32 0, i32 0), i8* align 8 getelementptr inbounds (%struct.x, %struct.x* @src, i32 0, i32 0), i32 11, i1 false)
ret i32 0
}
@@ -33,7 +33,7 @@ entry:
; CHECK: stur [[DEST]], [x0, #15]
; CHECK: ldr [[DEST:q[0-9]+]], [x[[BASEREG]]]
; CHECK: str [[DEST]], [x0]
- tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %C, i8* getelementptr inbounds ([31 x i8], [31 x i8]* @.str1, i64 0, i64 0), i64 31, i32 1, i1 false)
+ tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %C, i8* getelementptr inbounds ([31 x i8], [31 x i8]* @.str1, i64 0, i64 0), i64 31, i1 false)
ret void
}
@@ -45,7 +45,7 @@ entry:
; CHECK: str [[REG3]], [x0, #32]
; CHECK: ldp [[DEST1:q[0-9]+]], [[DEST2:q[0-9]+]], [x{{[0-9]+}}]
; CHECK: stp [[DEST1]], [[DEST2]], [x0]
- tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %C, i8* getelementptr inbounds ([36 x i8], [36 x i8]* @.str2, i64 0, i64 0), i64 36, i32 1, i1 false)
+ tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %C, i8* getelementptr inbounds ([36 x i8], [36 x i8]* @.str2, i64 0, i64 0), i64 36, i1 false)
ret void
}
@@ -56,7 +56,7 @@ entry:
; CHECK: str [[REG4]], [x0, #16]
; CHECK: ldr [[DEST:q[0-9]+]], [x[[BASEREG]]]
; CHECK: str [[DEST]], [x0]
- tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %C, i8* getelementptr inbounds ([24 x i8], [24 x i8]* @.str3, i64 0, i64 0), i64 24, i32 1, i1 false)
+ tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %C, i8* getelementptr inbounds ([24 x i8], [24 x i8]* @.str3, i64 0, i64 0), i64 24, i1 false)
ret void
}
@@ -67,7 +67,7 @@ entry:
; CHECK: strh [[REG5]], [x0, #16]
; CHECK: ldr [[REG6:q[0-9]+]], [x{{[0-9]+}}]
; CHECK: str [[REG6]], [x0]
- tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %C, i8* getelementptr inbounds ([18 x i8], [18 x i8]* @.str4, i64 0, i64 0), i64 18, i32 1, i1 false)
+ tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %C, i8* getelementptr inbounds ([18 x i8], [18 x i8]* @.str4, i64 0, i64 0), i64 18, i1 false)
ret void
}
@@ -80,7 +80,7 @@ entry:
; CHECK: mov [[REG8:w[0-9]+]],
; CHECK: movk [[REG8]],
; CHECK: str [[REG8]], [x0]
- tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %C, i8* getelementptr inbounds ([7 x i8], [7 x i8]* @.str5, i64 0, i64 0), i64 7, i32 1, i1 false)
+ tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %C, i8* getelementptr inbounds ([7 x i8], [7 x i8]* @.str5, i64 0, i64 0), i64 7, i1 false)
ret void
}
@@ -91,7 +91,7 @@ entry:
; CHECK: stur [[REG9]], [x{{[0-9]+}}, #6]
; CHECK: ldr
; CHECK: str
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* getelementptr inbounds ([512 x i8], [512 x i8]* @spool.splbuf, i64 0, i64 0), i8* getelementptr inbounds ([14 x i8], [14 x i8]* @.str6, i64 0, i64 0), i64 14, i32 1, i1 false)
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* getelementptr inbounds ([512 x i8], [512 x i8]* @spool.splbuf, i64 0, i64 0), i8* getelementptr inbounds ([14 x i8], [14 x i8]* @.str6, i64 0, i64 0), i64 14, i1 false)
ret void
}
@@ -104,9 +104,9 @@ entry:
; CHECK: str [[REG10]], [x0]
%0 = bitcast %struct.Foo* %a to i8*
%1 = bitcast %struct.Foo* %b to i8*
- tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %0, i8* %1, i32 16, i32 4, i1 false)
+ tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %0, i8* align 4 %1, i32 16, i1 false)
ret void
}
-declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i32, i1) nounwind
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) nounwind
+declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i1) nounwind
+declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) nounwind
diff --git a/llvm/test/CodeGen/AArch64/arm64-memset-inline.ll b/llvm/test/CodeGen/AArch64/arm64-memset-inline.ll
index 8c872cc6150..ecdfcc6673a 100644
--- a/llvm/test/CodeGen/AArch64/arm64-memset-inline.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-memset-inline.ll
@@ -5,7 +5,7 @@ entry:
; CHECK-LABEL: t1:
; CHECK: str wzr, [x0, #8]
; CHECK: str xzr, [x0]
- call void @llvm.memset.p0i8.i64(i8* %c, i8 0, i64 12, i32 8, i1 false)
+ call void @llvm.memset.p0i8.i64(i8* align 8 %c, i8 0, i64 12, i1 false)
ret void
}
@@ -17,11 +17,11 @@ entry:
; CHECK: str xzr, [sp, #8]
%buf = alloca [26 x i8], align 1
%0 = getelementptr inbounds [26 x i8], [26 x i8]* %buf, i32 0, i32 0
- call void @llvm.memset.p0i8.i32(i8* %0, i8 0, i32 26, i32 1, i1 false)
+ call void @llvm.memset.p0i8.i32(i8* %0, i8 0, i32 26, i1 false)
call void @something(i8* %0) nounwind
ret void
}
declare void @something(i8*) nounwind
-declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i32, i1) nounwind
-declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) nounwind
+declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i1) nounwind
+declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) nounwind
diff --git a/llvm/test/CodeGen/AArch64/arm64-memset-to-bzero.ll b/llvm/test/CodeGen/AArch64/arm64-memset-to-bzero.ll
index 3466e1bace5..87a0232c734 100644
--- a/llvm/test/CodeGen/AArch64/arm64-memset-to-bzero.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-memset-to-bzero.ll
@@ -10,11 +10,11 @@
; CHECK-LINUX: {{b|bl}} memset
define void @fct1(i8* nocapture %ptr) {
entry:
- tail call void @llvm.memset.p0i8.i64(i8* %ptr, i8 0, i64 256, i32 1, i1 false)
+ tail call void @llvm.memset.p0i8.i64(i8* %ptr, i8 0, i64 256, i1 false)
ret void
}
-declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1)
+declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1)
; CHECK-LABEL: fct2:
; When the size is bigger than 256, change into bzero.
@@ -22,7 +22,7 @@ declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1)
; CHECK-LINUX: {{b|bl}} memset
define void @fct2(i8* nocapture %ptr) {
entry:
- tail call void @llvm.memset.p0i8.i64(i8* %ptr, i8 0, i64 257, i32 1, i1 false)
+ tail call void @llvm.memset.p0i8.i64(i8* %ptr, i8 0, i64 257, i1 false)
ret void
}
@@ -33,7 +33,7 @@ entry:
define void @fct3(i8* nocapture %ptr, i32 %unknown) {
entry:
%conv = sext i32 %unknown to i64
- tail call void @llvm.memset.p0i8.i64(i8* %ptr, i8 0, i64 %conv, i32 1, i1 false)
+ tail call void @llvm.memset.p0i8.i64(i8* %ptr, i8 0, i64 %conv, i1 false)
ret void
}
diff --git a/llvm/test/CodeGen/AArch64/arm64-misaligned-memcpy-inline.ll b/llvm/test/CodeGen/AArch64/arm64-misaligned-memcpy-inline.ll
index 85572f2cf0f..7ecf214b4be 100644
--- a/llvm/test/CodeGen/AArch64/arm64-misaligned-memcpy-inline.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-misaligned-memcpy-inline.ll
@@ -7,8 +7,8 @@ define void @t0(i8* %out, i8* %in) {
; CHECK: orr w2, wzr, #0x10
; CHECK-NEXT: bl _memcpy
entry:
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* %out, i8* %in, i64 16, i32 1, i1 false)
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %out, i8* %in, i64 16, i1 false)
ret void
}
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i32, i1)
+declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i1)
diff --git a/llvm/test/CodeGen/AArch64/arm64-misched-basic-A53.ll b/llvm/test/CodeGen/AArch64/arm64-misched-basic-A53.ll
index 07df9cb32db..f0b9ccc8b5d 100644
--- a/llvm/test/CodeGen/AArch64/arm64-misched-basic-A53.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-misched-basic-A53.ll
@@ -26,9 +26,9 @@ entry:
%yy = alloca i32, align 4
store i32 0, i32* %retval
%0 = bitcast [8 x i32]* %x to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* bitcast ([8 x i32]* @main.x to i8*), i64 32, i32 4, i1 false)
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %0, i8* align 4 bitcast ([8 x i32]* @main.x to i8*), i64 32, i1 false)
%1 = bitcast [8 x i32]* %y to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* bitcast ([8 x i32]* @main.y to i8*), i64 32, i32 4, i1 false)
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %1, i8* align 4 bitcast ([8 x i32]* @main.y to i8*), i64 32, i1 false)
store i32 0, i32* %xx, align 4
store i32 0, i32* %yy, align 4
store i32 0, i32* %i, align 4
@@ -105,7 +105,7 @@ define <4 x float> @neon4xfloat(<4 x float> %A, <4 x float> %B) {
}
; Function Attrs: nounwind
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i32, i1) #1
+declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i1) #1
attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
attributes #1 = { nounwind }
diff --git a/llvm/test/CodeGen/AArch64/arm64-misched-basic-A57.ll b/llvm/test/CodeGen/AArch64/arm64-misched-basic-A57.ll
index 711d2f7397b..c2f53e88a95 100644
--- a/llvm/test/CodeGen/AArch64/arm64-misched-basic-A57.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-misched-basic-A57.ll
@@ -32,9 +32,9 @@ entry:
%yy = alloca i32, align 4
store i32 0, i32* %retval
%0 = bitcast [8 x i32]* %x to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* bitcast ([8 x i32]* @main.x to i8*), i64 32, i32 4, i1 false)
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %0, i8* align 4 bitcast ([8 x i32]* @main.x to i8*), i64 32, i1 false)
%1 = bitcast [8 x i32]* %y to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* bitcast ([8 x i32]* @main.y to i8*), i64 32, i32 4, i1 false)
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %1, i8* align 4 bitcast ([8 x i32]* @main.y to i8*), i64 32, i1 false)
store i32 0, i32* %xx, align 4
store i32 0, i32* %yy, align 4
store i32 0, i32* %i, align 4
@@ -106,7 +106,7 @@ for.end: ; preds = %for.cond
; Function Attrs: nounwind
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i32, i1) #1
+declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i1) #1
attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
attributes #1 = { nounwind }
diff --git a/llvm/test/CodeGen/AArch64/arm64-stur.ll b/llvm/test/CodeGen/AArch64/arm64-stur.ll
index 4a3229a39b5..8e0736c4fba 100644
--- a/llvm/test/CodeGen/AArch64/arm64-stur.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-stur.ll
@@ -55,11 +55,11 @@ define void @foo(%struct.X* nocapture %p) nounwind optsize ssp {
; CHECK-NEXT: ret
%B = getelementptr inbounds %struct.X, %struct.X* %p, i64 0, i32 1
%val = bitcast i64* %B to i8*
- call void @llvm.memset.p0i8.i64(i8* %val, i8 0, i64 16, i32 1, i1 false)
+ call void @llvm.memset.p0i8.i64(i8* %val, i8 0, i64 16, i1 false)
ret void
}
-declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) nounwind
+declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) nounwind
; Unaligned 16b stores are split into 8b stores for performance.
; radar://15424193
diff --git a/llvm/test/CodeGen/AArch64/arm64-virtual_base.ll b/llvm/test/CodeGen/AArch64/arm64-virtual_base.ll
index 4ecfde4f83e..4ce0d2f0007 100644
--- a/llvm/test/CodeGen/AArch64/arm64-virtual_base.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-virtual_base.ll
@@ -43,9 +43,9 @@ entry:
%tmp14 = bitcast double* %arraydecay5.3.1 to i8*
%arraydecay11.3.1 = getelementptr inbounds %struct.Bicubic_Patch_Struct, %struct.Bicubic_Patch_Struct* %Shape, i64 0, i32 12, i64 1, i64 3, i64 0
%tmp15 = bitcast double* %arraydecay11.3.1 to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* %tmp14, i8* %tmp15, i64 24, i32 1, i1 false)
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %tmp14, i8* %tmp15, i64 24, i1 false)
ret void
}
; Function Attrs: nounwind
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1)
+declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1)
diff --git a/llvm/test/CodeGen/AArch64/fast-isel-memcpy.ll b/llvm/test/CodeGen/AArch64/fast-isel-memcpy.ll
index 07595a954db..290e0c918ad 100644
--- a/llvm/test/CodeGen/AArch64/fast-isel-memcpy.ll
+++ b/llvm/test/CodeGen/AArch64/fast-isel-memcpy.ll
@@ -8,8 +8,8 @@
define void @test(i64 %a, i8* %b) {
%1 = and i64 %a, 9223372036854775807
%2 = inttoptr i64 %1 to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* %2, i8* %b, i64 8, i32 8, i1 false)
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 %2, i8* align 8 %b, i64 8, i1 false)
ret void
}
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8*, i8*, i64, i32, i1)
+declare void @llvm.memcpy.p0i8.p0i8.i64(i8*, i8*, i64, i1)
diff --git a/llvm/test/CodeGen/AArch64/func-argpassing.ll b/llvm/test/CodeGen/AArch64/func-argpassing.ll
index cf6545dab38..824a1893940 100644
--- a/llvm/test/CodeGen/AArch64/func-argpassing.ll
+++ b/llvm/test/CodeGen/AArch64/func-argpassing.ll
@@ -186,11 +186,11 @@ define void @check_i128_stackalign(i32 %val0, i32 %val1, i32 %val2, i32 %val3,
ret void
}
-declare void @llvm.memcpy.p0i8.p0i8.i32(i8*, i8*, i32, i32, i1)
+declare void @llvm.memcpy.p0i8.p0i8.i32(i8*, i8*, i32, i1)
define i32 @test_extern() {
; CHECK-LABEL: test_extern:
- call void @llvm.memcpy.p0i8.p0i8.i32(i8* undef, i8* undef, i32 undef, i32 4, i1 0)
+ call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 undef, i8* align 4 undef, i32 undef, i1 0)
; CHECK: bl memcpy
ret i32 0
}
diff --git a/llvm/test/CodeGen/AArch64/ldp-stp-scaled-unscaled-pairs.ll b/llvm/test/CodeGen/AArch64/ldp-stp-scaled-unscaled-pairs.ll
index 35117a147ee..951bd4ada3c 100644
--- a/llvm/test/CodeGen/AArch64/ldp-stp-scaled-unscaled-pairs.ll
+++ b/llvm/test/CodeGen/AArch64/ldp-stp-scaled-unscaled-pairs.ll
@@ -115,11 +115,11 @@ entry:
%C = getelementptr inbounds [12 x i8], [12 x i8]* %a2, i64 0, i64 4
%1 = bitcast i8* %C to i64*
store i64 0, i64* %1, align 4
- call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 8, i32 8, i1 false)
+ call void @llvm.memset.p0i8.i64(i8* align 8 %0, i8 0, i64 8, i1 false)
ret void
}
-declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1)
+declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1)
attributes #0 = { nounwind }
diff --git a/llvm/test/CodeGen/AArch64/ldst-paired-aliasing.ll b/llvm/test/CodeGen/AArch64/ldst-paired-aliasing.ll
index 9c698b5fdcc..0f8ffb50c8d 100644
--- a/llvm/test/CodeGen/AArch64/ldst-paired-aliasing.ll
+++ b/llvm/test/CodeGen/AArch64/ldst-paired-aliasing.ll
@@ -5,7 +5,7 @@ target triple = "aarch64--linux-gnu"
declare void @f(i8*, i8*)
declare void @f2(i8*, i8*)
declare void @_Z5setupv()
-declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) #3
+declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) #3
define i32 @main() local_unnamed_addr #1 {
; Make sure the stores happen in the correct order (the exact instructions could change).
@@ -24,7 +24,7 @@ for.body.lr.ph.i.i.i.i.i.i63:
tail call void @_Z5setupv()
%x2 = getelementptr inbounds [10 x i32], [10 x i32]* %b1, i64 0, i64 6
%x3 = bitcast i32* %x2 to i8*
- call void @llvm.memset.p0i8.i64(i8* %x3, i8 0, i64 16, i32 8, i1 false)
+ call void @llvm.memset.p0i8.i64(i8* align 8 %x3, i8 0, i64 16, i1 false)
%arraydecay2 = getelementptr inbounds [10 x i32], [10 x i32]* %b1, i64 0, i64 0
%x4 = bitcast [10 x i32]* %b1 to <4 x i32>*
store <4 x i32> <i32 1, i32 1, i32 1, i32 1>, <4 x i32>* %x4, align 16
diff --git a/llvm/test/CodeGen/AArch64/ldst-zero.ll b/llvm/test/CodeGen/AArch64/ldst-zero.ll
index 7d443a631f9..0ada6fd84cb 100644
--- a/llvm/test/CodeGen/AArch64/ldst-zero.ll
+++ b/llvm/test/CodeGen/AArch64/ldst-zero.ll
@@ -3,7 +3,7 @@
; Tests to check that zero stores which are generated as STP xzr, xzr aren't
; scheduled incorrectly due to incorrect alias information
-declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1)
+declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1)
%struct.tree_common = type { i8*, i8*, i32 }
; Original test case which exhibited the bug
@@ -14,7 +14,7 @@ define void @test1(%struct.tree_common* %t, i32 %code, i8* %type) {
; CHECK-DAG: str xzr, [x0]
entry:
%0 = bitcast %struct.tree_common* %t to i8*
- tail call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 24, i32 8, i1 false)
+ tail call void @llvm.memset.p0i8.i64(i8* align 8 %0, i8 0, i64 24, i1 false)
%code1 = getelementptr inbounds %struct.tree_common, %struct.tree_common* %t, i64 0, i32 2
store i32 %code, i32* %code1, align 8
%type2 = getelementptr inbounds %struct.tree_common, %struct.tree_common* %t, i64 0, i32 1
diff --git a/llvm/test/CodeGen/AArch64/machine-combiner-madd.ll b/llvm/test/CodeGen/AArch64/machine-combiner-madd.ll
index 4efe4e9cfb0..5ace6e63136 100644
--- a/llvm/test/CodeGen/AArch64/machine-combiner-madd.ll
+++ b/llvm/test/CodeGen/AArch64/machine-combiner-madd.ll
@@ -19,7 +19,7 @@
%class.D = type { %class.basic_string.base, [4 x i8] }
%class.basic_string.base = type <{ i64, i64, i32 }>
@a = global %class.D* zeroinitializer, align 8
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i32, i1)
+declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i1)
define internal void @fun() section ".text.startup" {
entry:
%tmp.i.i = alloca %class.D, align 8
@@ -31,7 +31,7 @@ loop:
%x = load %class.D*, %class.D** getelementptr inbounds (%class.D*, %class.D** @a, i64 0), align 8
%arrayidx.i.i.i = getelementptr inbounds %class.D, %class.D* %x, i64 %conv11.i.i
%d = bitcast %class.D* %arrayidx.i.i.i to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* nonnull %y, i8* %d, i64 24, i32 8, i1 false)
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 nonnull %y, i8* align 8 %d, i64 24, i1 false)
%inc.i.i = add i64 %i, 1
%cmp.i.i = icmp slt i64 %inc.i.i, 0
br i1 %cmp.i.i, label %loop, label %exit
diff --git a/llvm/test/CodeGen/AArch64/memcpy-f128.ll b/llvm/test/CodeGen/AArch64/memcpy-f128.ll
index 7e6ec36104a..8b91b843108 100644
--- a/llvm/test/CodeGen/AArch64/memcpy-f128.ll
+++ b/llvm/test/CodeGen/AArch64/memcpy-f128.ll
@@ -12,8 +12,8 @@ define void @test1() {
; CHECK: str q0
; CHECK: ret
entry:
- tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* undef, i8* bitcast (%structA* @stubA to i8*), i64 48, i32 8, i1 false)
+ tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 undef, i8* align 8 bitcast (%structA* @stubA to i8*), i64 48, i1 false)
ret void
}
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i32, i1)
+declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i1)
diff --git a/llvm/test/CodeGen/AArch64/merge-store-dependency.ll b/llvm/test/CodeGen/AArch64/merge-store-dependency.ll
index 4f2af9ed7e6..5bed63ef895 100644
--- a/llvm/test/CodeGen/AArch64/merge-store-dependency.ll
+++ b/llvm/test/CodeGen/AArch64/merge-store-dependency.ll
@@ -14,7 +14,7 @@ entry:
; A53: str [[DATA]], {{.*}}
%0 = bitcast %struct1* %fde to i8*
- tail call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 40, i32 8, i1 false)
+ tail call void @llvm.memset.p0i8.i64(i8* align 8 %0, i8 0, i64 40, i1 false)
%state = getelementptr inbounds %struct1, %struct1* %fde, i64 0, i32 4
store i16 256, i16* %state, align 8
%fd1 = getelementptr inbounds %struct1, %struct1* %fde, i64 0, i32 2
@@ -58,6 +58,6 @@ exit:
ret void
}
-declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1)
+declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1)
declare i32 @fcntl(i32, i32, ...)
declare noalias i8* @foo()
diff --git a/llvm/test/CodeGen/AArch64/mergestores_noimplicitfloat.ll b/llvm/test/CodeGen/AArch64/mergestores_noimplicitfloat.ll
index 74aeaf75d03..cd64ae11550 100644
--- a/llvm/test/CodeGen/AArch64/mergestores_noimplicitfloat.ll
+++ b/llvm/test/CodeGen/AArch64/mergestores_noimplicitfloat.ll
@@ -16,8 +16,8 @@ target triple = "arm64-apple-ios10.0.0"
; CHECK-DAG: str [[R3]], [x0, #24]
define void @pr33475(i8* %p0, i8* %p1) noimplicitfloat {
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* %p0, i8* %p1, i64 32, i32 4, i1 false)
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %p0, i8* align 4 %p1, i64 32, i1 false)
ret void
}
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8*, i8*, i64, i32, i1)
+declare void @llvm.memcpy.p0i8.p0i8.i64(i8*, i8*, i64, i1)
diff --git a/llvm/test/CodeGen/AArch64/misched-stp.ll b/llvm/test/CodeGen/AArch64/misched-stp.ll
index 1c9ea68834c..1afec40f192 100644
--- a/llvm/test/CodeGen/AArch64/misched-stp.ll
+++ b/llvm/test/CodeGen/AArch64/misched-stp.ll
@@ -30,7 +30,7 @@ entry:
ret void
}
-declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1)
+declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1)
%struct.tree_common = type { i8*, i8*, i32 }
; CHECK-LABEL: test_zero
@@ -41,7 +41,7 @@ declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1)
define void @test_zero(%struct.tree_common* %t, i32 %code, i8* %type) {
entry:
%0 = bitcast %struct.tree_common* %t to i8*
- tail call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 24, i32 8, i1 false)
+ tail call void @llvm.memset.p0i8.i64(i8* align 8 %0, i8 0, i64 24, i1 false)
%code1 = getelementptr inbounds %struct.tree_common, %struct.tree_common* %t, i64 0, i32 2
store i32 %code, i32* %code1, align 8
%type2 = getelementptr inbounds %struct.tree_common, %struct.tree_common* %t, i64 0, i32 1
diff --git a/llvm/test/CodeGen/AArch64/pr33172.ll b/llvm/test/CodeGen/AArch64/pr33172.ll
index 1e1da78b28f..098d5358b02 100644
--- a/llvm/test/CodeGen/AArch64/pr33172.ll
+++ b/llvm/test/CodeGen/AArch64/pr33172.ll
@@ -21,12 +21,12 @@ entry:
%wide.load8291059.4 = load i64, i64* bitcast (float* getelementptr inbounds ([200 x float], [200 x float]* @main.b, i64 0, i64 18) to i64*), align 8
store i64 %wide.load8281058.4, i64* bitcast (float* getelementptr inbounds ([200 x float], [200 x float]* @main.x, i64 0, i64 16) to i64*), align 8
store i64 %wide.load8291059.4, i64* bitcast (float* getelementptr inbounds ([200 x float], [200 x float]* @main.x, i64 0, i64 18) to i64*), align 8
- tail call void @llvm.memset.p0i8.i64(i8* bitcast ([200 x float]* @main.b to i8*), i8 0, i64 undef, i32 8, i1 false) #2
+ tail call void @llvm.memset.p0i8.i64(i8* align 8 bitcast ([200 x float]* @main.b to i8*), i8 0, i64 undef, i1 false) #2
unreachable
}
; Function Attrs: argmemonly nounwind
-declare void @llvm.memset.p0i8.i64(i8* nocapture writeonly, i8, i64, i32, i1) #1
+declare void @llvm.memset.p0i8.i64(i8* nocapture writeonly, i8, i64, i1) #1
attributes #1 = { argmemonly nounwind }
attributes #2 = { nounwind }
diff --git a/llvm/test/CodeGen/AArch64/tailcall-mem-intrinsics.ll b/llvm/test/CodeGen/AArch64/tailcall-mem-intrinsics.ll
index b970fb12415..c780d15b58d 100644
--- a/llvm/test/CodeGen/AArch64/tailcall-mem-intrinsics.ll
+++ b/llvm/test/CodeGen/AArch64/tailcall-mem-intrinsics.ll
@@ -4,7 +4,7 @@
; CHECK: b memcpy
define void @tail_memcpy(i8* nocapture %p, i8* nocapture readonly %q, i32 %n) #0 {
entry:
- tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %p, i8* %q, i32 %n, i32 1, i1 false)
+ tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %p, i8* %q, i32 %n, i1 false)
ret void
}
@@ -12,7 +12,7 @@ entry:
; CHECK: b memmove
define void @tail_memmove(i8* nocapture %p, i8* nocapture readonly %q, i32 %n) #0 {
entry:
- tail call void @llvm.memmove.p0i8.p0i8.i32(i8* %p, i8* %q, i32 %n, i32 1, i1 false)
+ tail call void @llvm.memmove.p0i8.p0i8.i32(i8* %p, i8* %q, i32 %n, i1 false)
ret void
}
@@ -20,12 +20,12 @@ entry:
; CHECK: b memset
define void @tail_memset(i8* nocapture %p, i8 %c, i32 %n) #0 {
entry:
- tail call void @llvm.memset.p0i8.i32(i8* %p, i8 %c, i32 %n, i32 1, i1 false)
+ tail call void @llvm.memset.p0i8.i32(i8* %p, i8 %c, i32 %n, i1 false)
ret void
}
-declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i32, i1) #0
-declare void @llvm.memmove.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i32, i1) #0
-declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i32, i1) #0
+declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i1) #0
+declare void @llvm.memmove.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i1) #0
+declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i1) #0
attributes #0 = { nounwind }
diff --git a/llvm/test/CodeGen/AArch64/tailcall-string-rvo.ll b/llvm/test/CodeGen/AArch64/tailcall-string-rvo.ll
index bdc09235afd..d9d2180b5ef 100644
--- a/llvm/test/CodeGen/AArch64/tailcall-string-rvo.ll
+++ b/llvm/test/CodeGen/AArch64/tailcall-string-rvo.ll
@@ -32,7 +32,7 @@ bb:
%tmp1 = bitcast %class.basic_string.11.42.73* %arg to %union.anon.8.39.70**
store %union.anon.8.39.70* %tmp, %union.anon.8.39.70** %tmp1, align 8
%tmp2 = bitcast %union.anon.8.39.70* %tmp to i8*
- tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %tmp2, i8* nonnull undef, i64 13, i32 1, i1 false)
+ tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %tmp2, i8* nonnull undef, i64 13, i1 false)
%tmp3 = getelementptr inbounds %class.basic_string.11.42.73, %class.basic_string.11.42.73* %arg, i64 0, i32 0, i32 0, i32 1
store i64 13, i64* %tmp3, align 8
%tmp4 = getelementptr inbounds %class.basic_string.11.42.73, %class.basic_string.11.42.73* %arg, i64 0, i32 0, i32 0, i32 2, i32 1, i64 5
@@ -42,6 +42,6 @@ bb:
}
; Function Attrs: argmemonly nounwind
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i32, i1) #0
+declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i1) #0
attributes #0 = { argmemonly nounwind }
OpenPOWER on IntegriCloud