diff options
author | Daniel Neilson <dneilson@azul.com> | 2018-01-19 17:13:12 +0000 |
---|---|---|
committer | Daniel Neilson <dneilson@azul.com> | 2018-01-19 17:13:12 +0000 |
commit | 1e68724d24ba38de7c7cdb2e1939d78c8b37cc0d (patch) | |
tree | ef2e22d141b391e512da3c2df5c65f906eb7b98f /llvm/test/CodeGen | |
parent | 6e938effaaf2016eb76e1b73aba2aa38a245cb70 (diff) | |
download | bcm5719-llvm-1e68724d24ba38de7c7cdb2e1939d78c8b37cc0d.tar.gz bcm5719-llvm-1e68724d24ba38de7c7cdb2e1939d78c8b37cc0d.zip |
Remove alignment argument from memcpy/memmove/memset in favour of alignment attributes (Step 1)
Summary:
This is a resurrection of work first proposed and discussed in Aug 2015:
http://lists.llvm.org/pipermail/llvm-dev/2015-August/089384.html
and initially landed (but then backed out) in Nov 2015:
http://lists.llvm.org/pipermail/llvm-commits/Week-of-Mon-20151109/312083.html
The @llvm.memcpy/memmove/memset intrinsics currently have an explicit argument
which is required to be a constant integer. It represents the alignment of the
dest (and source), and so must be the minimum of the actual alignment of the
two.
This change is the first in a series that allows source and dest to each
have their own alignments by using the alignment attribute on their arguments.
In this change we:
1) Remove the alignment argument.
2) Add alignment attributes to the source & dest arguments. We, temporarily,
require that the alignments for source & dest be equal.
For example, code which used to read:
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* %src, i32 100, i32 4, i1 false)
will now read
call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %dest, i8* align 4 %src, i32 100, i1 false)
Downstream users may have to update their lit tests that check for
@llvm.memcpy/memmove/memset call/declaration patterns. The following extended sed script
may help with updating the majority of your tests, but it does not catch all possible
patterns so some manual checking and updating will be required.
s~declare void @llvm\.mem(set|cpy|move)\.p([^(]*)\((.*), i32, i1\)~declare void @llvm.mem\1.p\2(\3, i1)~g
s~call void @llvm\.memset\.p([^(]*)i8\(i8([^*]*)\* (.*), i8 (.*), i8 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i8(i8\2* \3, i8 \4, i8 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i16\(i8([^*]*)\* (.*), i8 (.*), i16 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i16(i8\2* \3, i8 \4, i16 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i32\(i8([^*]*)\* (.*), i8 (.*), i32 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i32(i8\2* \3, i8 \4, i32 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i64\(i8([^*]*)\* (.*), i8 (.*), i64 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i64(i8\2* \3, i8 \4, i64 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i128\(i8([^*]*)\* (.*), i8 (.*), i128 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i128(i8\2* \3, i8 \4, i128 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i8\(i8([^*]*)\* (.*), i8 (.*), i8 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i8(i8\2* align \6 \3, i8 \4, i8 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i16\(i8([^*]*)\* (.*), i8 (.*), i16 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i16(i8\2* align \6 \3, i8 \4, i16 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i32\(i8([^*]*)\* (.*), i8 (.*), i32 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i32(i8\2* align \6 \3, i8 \4, i32 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i64\(i8([^*]*)\* (.*), i8 (.*), i64 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i64(i8\2* align \6 \3, i8 \4, i64 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i128\(i8([^*]*)\* (.*), i8 (.*), i128 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i128(i8\2* align \6 \3, i8 \4, i128 \5, i1 \7)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i8\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i8 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i8(i8\3* \4, i8\5* \6, i8 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i16\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i16 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i16(i8\3* \4, i8\5* \6, i16 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i32\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i32 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i32(i8\3* \4, i8\5* \6, i32 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i64\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i64 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i64(i8\3* \4, i8\5* \6, i64 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i128\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i128 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i128(i8\3* \4, i8\5* \6, i128 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i8\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i8 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i8(i8\3* align \8 \4, i8\5* align \8 \6, i8 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i16\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i16 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i16(i8\3* align \8 \4, i8\5* align \8 \6, i16 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i32\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i32 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i32(i8\3* align \8 \4, i8\5* align \8 \6, i32 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i64\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i64 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i64(i8\3* align \8 \4, i8\5* align \8 \6, i64 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i128\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i128 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i128(i8\3* align \8 \4, i8\5* align \8 \6, i128 \7, i1 \9)~g
The remaining changes in the series will:
Step 2) Expand the IRBuilder API to allow creation of memcpy/memmove with differing
source and dest alignments.
Step 3) Update Clang to use the new IRBuilder API.
Step 4) Update Polly to use the new IRBuilder API.
Step 5) Update LLVM passes that create memcpy/memmove calls to use the new IRBuilder API,
and those that use use MemIntrinsicInst::[get|set]Alignment() to use
getDestAlignment() and getSourceAlignment() instead.
Step 6) Remove the single-alignment IRBuilder API for memcpy/memmove, and the
MemIntrinsicInst::[get|set]Alignment() methods.
Reviewers: pete, hfinkel, lhames, reames, bollu
Reviewed By: reames
Subscribers: niosHD, reames, jholewinski, qcolombet, jfb, sanjoy, arsenm, dschuff, dylanmckay, mehdi_amini, sdardis, nemanjai, david2050, nhaehnle, javed.absar, sbc100, jgravelle-google, eraman, aheejin, kbarton, JDevlieghere, asb, rbar, johnrusso, simoncook, jordy.potman.lists, apazos, sabuasal, llvm-commits
Differential Revision: https://reviews.llvm.org/D41675
llvm-svn: 322965
Diffstat (limited to 'llvm/test/CodeGen')
196 files changed, 1070 insertions, 1091 deletions
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll index f710cd7551d..077c21c0557 100644 --- a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll +++ b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll @@ -1147,7 +1147,7 @@ define void()* @test_global_func() { ret void()* @allocai64 } -declare void @llvm.memcpy.p0i8.p0i8.i64(i8*, i8*, i64, i32 %align, i1 %volatile) +declare void @llvm.memcpy.p0i8.p0i8.i64(i8*, i8*, i64, i1) define void @test_memcpy(i8* %dst, i8* %src, i64 %size) { ; CHECK-LABEL: name: test_memcpy ; CHECK: [[DST:%[0-9]+]]:_(p0) = COPY %x0 @@ -1157,11 +1157,11 @@ define void @test_memcpy(i8* %dst, i8* %src, i64 %size) { ; CHECK: %x1 = COPY [[SRC]] ; CHECK: %x2 = COPY [[SIZE]] ; CHECK: BL &memcpy, csr_aarch64_aapcs, implicit-def %lr, implicit %sp, implicit %x0, implicit %x1, implicit %x2 - call void @llvm.memcpy.p0i8.p0i8.i64(i8* %dst, i8* %src, i64 %size, i32 1, i1 0) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* %dst, i8* %src, i64 %size, i1 0) ret void } -declare void @llvm.memmove.p0i8.p0i8.i64(i8*, i8*, i64, i32 %align, i1 %volatile) +declare void @llvm.memmove.p0i8.p0i8.i64(i8*, i8*, i64, i1) define void @test_memmove(i8* %dst, i8* %src, i64 %size) { ; CHECK-LABEL: name: test_memmove ; CHECK: [[DST:%[0-9]+]]:_(p0) = COPY %x0 @@ -1171,11 +1171,11 @@ define void @test_memmove(i8* %dst, i8* %src, i64 %size) { ; CHECK: %x1 = COPY [[SRC]] ; CHECK: %x2 = COPY [[SIZE]] ; CHECK: BL &memmove, csr_aarch64_aapcs, implicit-def %lr, implicit %sp, implicit %x0, implicit %x1, implicit %x2 - call void @llvm.memmove.p0i8.p0i8.i64(i8* %dst, i8* %src, i64 %size, i32 1, i1 0) + call void @llvm.memmove.p0i8.p0i8.i64(i8* %dst, i8* %src, i64 %size, i1 0) ret void } -declare void @llvm.memset.p0i8.i64(i8*, i8, i64, i32 %align, i1 %volatile) +declare void @llvm.memset.p0i8.i64(i8*, i8, i64, i1) define void @test_memset(i8* %dst, i8 %val, i64 %size) { ; CHECK-LABEL: name: test_memset ; CHECK: [[DST:%[0-9]+]]:_(p0) = COPY %x0 @@ -1187,7 +1187,7 @@ define void @test_memset(i8* %dst, i8 %val, i64 %size) { ; CHECK: %w1 = COPY [[SRC_TMP]] ; CHECK: %x2 = COPY [[SIZE]] ; CHECK: BL &memset, csr_aarch64_aapcs, implicit-def %lr, implicit %sp, implicit %x0, implicit %w1, implicit %x2 - call void @llvm.memset.p0i8.i64(i8* %dst, i8 %val, i64 %size, i32 1, i1 0) + call void @llvm.memset.p0i8.i64(i8* %dst, i8 %val, i64 %size, i1 0) ret void } diff --git a/llvm/test/CodeGen/AArch64/PBQP-csr.ll b/llvm/test/CodeGen/AArch64/PBQP-csr.ll index 16d7f8cb7a5..e071eda17e3 100644 --- a/llvm/test/CodeGen/AArch64/PBQP-csr.ll +++ b/llvm/test/CodeGen/AArch64/PBQP-csr.ll @@ -22,7 +22,7 @@ entry: %z.i60 = getelementptr inbounds %rs, %rs* %r, i64 0, i32 9, i32 2 %na = getelementptr inbounds %rs, %rs* %r, i64 0, i32 0 %0 = bitcast double* %x.i to i8* - call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 72, i32 8, i1 false) + call void @llvm.memset.p0i8.i64(i8* align 8 %0, i8 0, i64 72, i1 false) %1 = load i32, i32* %na, align 4 %cmp70 = icmp sgt i32 %1, 0 br i1 %cmp70, label %for.body.lr.ph, label %for.end @@ -87,5 +87,5 @@ for.end: ; preds = %for.end.loopexit, % } ; Function Attrs: nounwind -declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) +declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) diff --git a/llvm/test/CodeGen/AArch64/aarch64-DAGCombine-findBetterNeighborChains-crash.ll b/llvm/test/CodeGen/AArch64/aarch64-DAGCombine-findBetterNeighborChains-crash.ll index fb4df34df29..043ce0933a9 100644 --- a/llvm/test/CodeGen/AArch64/aarch64-DAGCombine-findBetterNeighborChains-crash.ll +++ b/llvm/test/CodeGen/AArch64/aarch64-DAGCombine-findBetterNeighborChains-crash.ll @@ -6,13 +6,13 @@ target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128" declare void @extern(i8*) ; Function Attrs: argmemonly nounwind -declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) #0 +declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) #0 ; Function Attrs: nounwind define void @func(float* noalias %arg, i32* noalias %arg1, i8* noalias %arg2, i8* noalias %arg3) #1 { bb: %tmp = getelementptr inbounds i8, i8* %arg2, i64 88 - tail call void @llvm.memset.p0i8.i64(i8* noalias %arg2, i8 0, i64 40, i32 8, i1 false) + tail call void @llvm.memset.p0i8.i64(i8* align 8 noalias %arg2, i8 0, i64 40, i1 false) store i8 0, i8* %arg3 store i8 2, i8* %arg2 store float 0.000000e+00, float* %arg @@ -27,7 +27,7 @@ bb: define void @func2(float* noalias %arg, i32* noalias %arg1, i8* noalias %arg2, i8* noalias %arg3) #1 { bb: %tmp = getelementptr inbounds i8, i8* %arg2, i64 88 - tail call void @llvm.memset.p0i8.i64(i8* noalias %arg2, i8 0, i64 40, i32 8, i1 false) + tail call void @llvm.memset.p0i8.i64(i8* align 8 noalias %arg2, i8 0, i64 40, i1 false) store i8 0, i8* %arg3 store i8 2, i8* %arg2 store float 0.000000e+00, float* %arg diff --git a/llvm/test/CodeGen/AArch64/arm64-2012-05-07-MemcpyAlignBug.ll b/llvm/test/CodeGen/AArch64/arm64-2012-05-07-MemcpyAlignBug.ll index b38b4f2a2b2..2b6cd7c2d28 100644 --- a/llvm/test/CodeGen/AArch64/arm64-2012-05-07-MemcpyAlignBug.ll +++ b/llvm/test/CodeGen/AArch64/arm64-2012-05-07-MemcpyAlignBug.ll @@ -14,8 +14,8 @@ ; CHECK-NEXT: str [[VAL2]], [x0] define void @foo(i8* %a) { - call void @llvm.memcpy.p0i8.p0i8.i64(i8* %a, i8* bitcast ([3 x i32]* @b to i8*), i64 12, i32 4, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %a, i8* align 4 bitcast ([3 x i32]* @b to i8*), i64 12, i1 false) ret void } -declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) nounwind +declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) nounwind diff --git a/llvm/test/CodeGen/AArch64/arm64-abi-varargs.ll b/llvm/test/CodeGen/AArch64/arm64-abi-varargs.ll index d6a1686d566..e0fa5dbbaf9 100644 --- a/llvm/test/CodeGen/AArch64/arm64-abi-varargs.ll +++ b/llvm/test/CodeGen/AArch64/arm64-abi-varargs.ll @@ -164,10 +164,10 @@ entry: %4 = bitcast i8* %ap.align to %struct.s41* %5 = bitcast %struct.s41* %vs to i8* %6 = bitcast %struct.s41* %4 to i8* - call void @llvm.memcpy.p0i8.p0i8.i64(i8* %5, i8* %6, i64 16, i32 16, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 %5, i8* align 16 %6, i64 16, i1 false) ret void } -declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) nounwind +declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) nounwind define void @bar2(i32 %x, i128 %s41.coerce) nounwind { entry: diff --git a/llvm/test/CodeGen/AArch64/arm64-abi_align.ll b/llvm/test/CodeGen/AArch64/arm64-abi_align.ll index 56a882a2a15..bfb74b598ff 100644 --- a/llvm/test/CodeGen/AArch64/arm64-abi_align.ll +++ b/llvm/test/CodeGen/AArch64/arm64-abi_align.ll @@ -300,14 +300,14 @@ entry: %tmp = alloca %struct.s42, align 4 %tmp1 = alloca %struct.s42, align 4 %0 = bitcast %struct.s42* %tmp to i8* - call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* bitcast (%struct.s42* @g42 to i8*), i64 24, i32 4, i1 false), !tbaa.struct !4 + call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %0, i8* align 4 bitcast (%struct.s42* @g42 to i8*), i64 24, i1 false), !tbaa.struct !4 %1 = bitcast %struct.s42* %tmp1 to i8* - call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* bitcast (%struct.s42* @g42_2 to i8*), i64 24, i32 4, i1 false), !tbaa.struct !4 + call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %1, i8* align 4 bitcast (%struct.s42* @g42_2 to i8*), i64 24, i1 false), !tbaa.struct !4 %call = call i32 @f42(i32 3, %struct.s42* %tmp, %struct.s42* %tmp1) #5 ret i32 %call } -declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) #4 +declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) #4 declare i32 @f42_stack(i32 %i, i32 %i2, i32 %i3, i32 %i4, i32 %i5, i32 %i6, i32 %i7, i32 %i8, i32 %i9, %struct.s42* nocapture %s1, @@ -346,9 +346,9 @@ entry: %tmp = alloca %struct.s42, align 4 %tmp1 = alloca %struct.s42, align 4 %0 = bitcast %struct.s42* %tmp to i8* - call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* bitcast (%struct.s42* @g42 to i8*), i64 24, i32 4, i1 false), !tbaa.struct !4 + call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %0, i8* align 4 bitcast (%struct.s42* @g42 to i8*), i64 24, i1 false), !tbaa.struct !4 %1 = bitcast %struct.s42* %tmp1 to i8* - call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* bitcast (%struct.s42* @g42_2 to i8*), i64 24, i32 4, i1 false), !tbaa.struct !4 + call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %1, i8* align 4 bitcast (%struct.s42* @g42_2 to i8*), i64 24, i1 false), !tbaa.struct !4 %call = call i32 @f42_stack(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, %struct.s42* %tmp, %struct.s42* %tmp1) #5 ret i32 %call @@ -414,9 +414,9 @@ entry: %tmp = alloca %struct.s43, align 16 %tmp1 = alloca %struct.s43, align 16 %0 = bitcast %struct.s43* %tmp to i8* - call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* bitcast (%struct.s43* @g43 to i8*), i64 32, i32 16, i1 false), !tbaa.struct !4 + call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 %0, i8* align 16 bitcast (%struct.s43* @g43 to i8*), i64 32, i1 false), !tbaa.struct !4 %1 = bitcast %struct.s43* %tmp1 to i8* - call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* bitcast (%struct.s43* @g43_2 to i8*), i64 32, i32 16, i1 false), !tbaa.struct !4 + call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 %1, i8* align 16 bitcast (%struct.s43* @g43_2 to i8*), i64 32, i1 false), !tbaa.struct !4 %call = call i32 @f43(i32 3, %struct.s43* %tmp, %struct.s43* %tmp1) #5 ret i32 %call } @@ -465,9 +465,9 @@ entry: %tmp = alloca %struct.s43, align 16 %tmp1 = alloca %struct.s43, align 16 %0 = bitcast %struct.s43* %tmp to i8* - call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* bitcast (%struct.s43* @g43 to i8*), i64 32, i32 16, i1 false), !tbaa.struct !4 + call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 %0, i8* align 16 bitcast (%struct.s43* @g43 to i8*), i64 32, i1 false), !tbaa.struct !4 %1 = bitcast %struct.s43* %tmp1 to i8* - call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* bitcast (%struct.s43* @g43_2 to i8*), i64 32, i32 16, i1 false), !tbaa.struct !4 + call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 %1, i8* align 16 bitcast (%struct.s43* @g43_2 to i8*), i64 32, i1 false), !tbaa.struct !4 %call = call i32 @f43_stack(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, %struct.s43* %tmp, %struct.s43* %tmp1) #5 ret i32 %call diff --git a/llvm/test/CodeGen/AArch64/arm64-fast-isel-intrinsic.ll b/llvm/test/CodeGen/AArch64/arm64-fast-isel-intrinsic.ll index 1af960a12a1..e43160ab340 100644 --- a/llvm/test/CodeGen/AArch64/arm64-fast-isel-intrinsic.ll +++ b/llvm/test/CodeGen/AArch64/arm64-fast-isel-intrinsic.ll @@ -11,11 +11,11 @@ define void @t1() { ; ARM64: mov x2, #80 ; ARM64: uxtb w1, w9 ; ARM64: bl _memset - call void @llvm.memset.p0i8.i64(i8* getelementptr inbounds ([80 x i8], [80 x i8]* @message, i32 0, i32 0), i8 0, i64 80, i32 16, i1 false) + call void @llvm.memset.p0i8.i64(i8* align 16 getelementptr inbounds ([80 x i8], [80 x i8]* @message, i32 0, i32 0), i8 0, i64 80, i1 false) ret void } -declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) +declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) define void @t2() { ; ARM64-LABEL: t2 @@ -25,11 +25,11 @@ define void @t2() { ; ARM64: add x1, x8, _message@PAGEOFF ; ARM64: mov x2, #80 ; ARM64: bl _memcpy - call void @llvm.memcpy.p0i8.p0i8.i64(i8* getelementptr inbounds ([80 x i8], [80 x i8]* @temp, i32 0, i32 0), i8* getelementptr inbounds ([80 x i8], [80 x i8]* @message, i32 0, i32 0), i64 80, i32 16, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 getelementptr inbounds ([80 x i8], [80 x i8]* @temp, i32 0, i32 0), i8* align 16 getelementptr inbounds ([80 x i8], [80 x i8]* @message, i32 0, i32 0), i64 80, i1 false) ret void } -declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) +declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) define void @t3() { ; ARM64-LABEL: t3 @@ -39,11 +39,11 @@ define void @t3() { ; ARM64: add x1, x8, _message@PAGEOFF ; ARM64: mov x2, #20 ; ARM64: bl _memmove - call void @llvm.memmove.p0i8.p0i8.i64(i8* getelementptr inbounds ([80 x i8], [80 x i8]* @temp, i32 0, i32 0), i8* getelementptr inbounds ([80 x i8], [80 x i8]* @message, i32 0, i32 0), i64 20, i32 16, i1 false) + call void @llvm.memmove.p0i8.p0i8.i64(i8* align 16 getelementptr inbounds ([80 x i8], [80 x i8]* @temp, i32 0, i32 0), i8* align 16 getelementptr inbounds ([80 x i8], [80 x i8]* @message, i32 0, i32 0), i64 20, i1 false) ret void } -declare void @llvm.memmove.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) +declare void @llvm.memmove.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) define void @t4() { ; ARM64-LABEL: t4 @@ -58,7 +58,7 @@ define void @t4() { ; ARM64: ldrb w11, [x9, #16] ; ARM64: strb w11, [x8, #16] ; ARM64: ret - call void @llvm.memcpy.p0i8.p0i8.i64(i8* getelementptr inbounds ([80 x i8], [80 x i8]* @temp, i32 0, i32 0), i8* getelementptr inbounds ([80 x i8], [80 x i8]* @message, i32 0, i32 0), i64 17, i32 16, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 getelementptr inbounds ([80 x i8], [80 x i8]* @temp, i32 0, i32 0), i8* align 16 getelementptr inbounds ([80 x i8], [80 x i8]* @message, i32 0, i32 0), i64 17, i1 false) ret void } @@ -75,7 +75,7 @@ define void @t5() { ; ARM64: ldrb w11, [x9, #16] ; ARM64: strb w11, [x8, #16] ; ARM64: ret - call void @llvm.memcpy.p0i8.p0i8.i64(i8* getelementptr inbounds ([80 x i8], [80 x i8]* @temp, i32 0, i32 0), i8* getelementptr inbounds ([80 x i8], [80 x i8]* @message, i32 0, i32 0), i64 17, i32 8, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 getelementptr inbounds ([80 x i8], [80 x i8]* @temp, i32 0, i32 0), i8* align 8 getelementptr inbounds ([80 x i8], [80 x i8]* @message, i32 0, i32 0), i64 17, i1 false) ret void } @@ -92,7 +92,7 @@ define void @t6() { ; ARM64: ldrb w10, [x9, #8] ; ARM64: strb w10, [x8, #8] ; ARM64: ret - call void @llvm.memcpy.p0i8.p0i8.i64(i8* getelementptr inbounds ([80 x i8], [80 x i8]* @temp, i32 0, i32 0), i8* getelementptr inbounds ([80 x i8], [80 x i8]* @message, i32 0, i32 0), i64 9, i32 4, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 getelementptr inbounds ([80 x i8], [80 x i8]* @temp, i32 0, i32 0), i8* align 4 getelementptr inbounds ([80 x i8], [80 x i8]* @message, i32 0, i32 0), i64 9, i1 false) ret void } @@ -111,7 +111,7 @@ define void @t7() { ; ARM64: ldrb w10, [x9, #6] ; ARM64: strb w10, [x8, #6] ; ARM64: ret - call void @llvm.memcpy.p0i8.p0i8.i64(i8* getelementptr inbounds ([80 x i8], [80 x i8]* @temp, i32 0, i32 0), i8* getelementptr inbounds ([80 x i8], [80 x i8]* @message, i32 0, i32 0), i64 7, i32 2, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 2 getelementptr inbounds ([80 x i8], [80 x i8]* @temp, i32 0, i32 0), i8* align 2 getelementptr inbounds ([80 x i8], [80 x i8]* @message, i32 0, i32 0), i64 7, i1 false) ret void } @@ -130,7 +130,7 @@ define void @t8() { ; ARM64: ldrb w10, [x9, #3] ; ARM64: strb w10, [x8, #3] ; ARM64: ret - call void @llvm.memcpy.p0i8.p0i8.i64(i8* getelementptr inbounds ([80 x i8], [80 x i8]* @temp, i32 0, i32 0), i8* getelementptr inbounds ([80 x i8], [80 x i8]* @message, i32 0, i32 0), i64 4, i32 1, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 getelementptr inbounds ([80 x i8], [80 x i8]* @temp, i32 0, i32 0), i8* align 1 getelementptr inbounds ([80 x i8], [80 x i8]* @message, i32 0, i32 0), i64 4, i1 false) ret void } @@ -143,6 +143,6 @@ define void @test_distant_memcpy(i8* %dst) { ; ARM64: strb [[BYTE]], [x0] %array = alloca i8, i32 8192 %elem = getelementptr i8, i8* %array, i32 8000 - call void @llvm.memcpy.p0i8.p0i8.i64(i8* %dst, i8* %elem, i64 1, i32 1, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* %dst, i8* %elem, i64 1, i1 false) ret void } diff --git a/llvm/test/CodeGen/AArch64/arm64-memcpy-inline.ll b/llvm/test/CodeGen/AArch64/arm64-memcpy-inline.ll index 0590031fbcd..4f8f3a227bb 100644 --- a/llvm/test/CodeGen/AArch64/arm64-memcpy-inline.ll +++ b/llvm/test/CodeGen/AArch64/arm64-memcpy-inline.ll @@ -22,7 +22,7 @@ entry: ; CHECK: strh [[REG1]], [x[[BASEREG2]], #8] ; CHECK: ldr [[REG2:x[0-9]+]], ; CHECK: str [[REG2]], - call void @llvm.memcpy.p0i8.p0i8.i32(i8* getelementptr inbounds (%struct.x, %struct.x* @dst, i32 0, i32 0), i8* getelementptr inbounds (%struct.x, %struct.x* @src, i32 0, i32 0), i32 11, i32 8, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 getelementptr inbounds (%struct.x, %struct.x* @dst, i32 0, i32 0), i8* align 8 getelementptr inbounds (%struct.x, %struct.x* @src, i32 0, i32 0), i32 11, i1 false) ret i32 0 } @@ -33,7 +33,7 @@ entry: ; CHECK: stur [[DEST]], [x0, #15] ; CHECK: ldr [[DEST:q[0-9]+]], [x[[BASEREG]]] ; CHECK: str [[DEST]], [x0] - tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %C, i8* getelementptr inbounds ([31 x i8], [31 x i8]* @.str1, i64 0, i64 0), i64 31, i32 1, i1 false) + tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %C, i8* getelementptr inbounds ([31 x i8], [31 x i8]* @.str1, i64 0, i64 0), i64 31, i1 false) ret void } @@ -45,7 +45,7 @@ entry: ; CHECK: str [[REG3]], [x0, #32] ; CHECK: ldp [[DEST1:q[0-9]+]], [[DEST2:q[0-9]+]], [x{{[0-9]+}}] ; CHECK: stp [[DEST1]], [[DEST2]], [x0] - tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %C, i8* getelementptr inbounds ([36 x i8], [36 x i8]* @.str2, i64 0, i64 0), i64 36, i32 1, i1 false) + tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %C, i8* getelementptr inbounds ([36 x i8], [36 x i8]* @.str2, i64 0, i64 0), i64 36, i1 false) ret void } @@ -56,7 +56,7 @@ entry: ; CHECK: str [[REG4]], [x0, #16] ; CHECK: ldr [[DEST:q[0-9]+]], [x[[BASEREG]]] ; CHECK: str [[DEST]], [x0] - tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %C, i8* getelementptr inbounds ([24 x i8], [24 x i8]* @.str3, i64 0, i64 0), i64 24, i32 1, i1 false) + tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %C, i8* getelementptr inbounds ([24 x i8], [24 x i8]* @.str3, i64 0, i64 0), i64 24, i1 false) ret void } @@ -67,7 +67,7 @@ entry: ; CHECK: strh [[REG5]], [x0, #16] ; CHECK: ldr [[REG6:q[0-9]+]], [x{{[0-9]+}}] ; CHECK: str [[REG6]], [x0] - tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %C, i8* getelementptr inbounds ([18 x i8], [18 x i8]* @.str4, i64 0, i64 0), i64 18, i32 1, i1 false) + tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %C, i8* getelementptr inbounds ([18 x i8], [18 x i8]* @.str4, i64 0, i64 0), i64 18, i1 false) ret void } @@ -80,7 +80,7 @@ entry: ; CHECK: mov [[REG8:w[0-9]+]], ; CHECK: movk [[REG8]], ; CHECK: str [[REG8]], [x0] - tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %C, i8* getelementptr inbounds ([7 x i8], [7 x i8]* @.str5, i64 0, i64 0), i64 7, i32 1, i1 false) + tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %C, i8* getelementptr inbounds ([7 x i8], [7 x i8]* @.str5, i64 0, i64 0), i64 7, i1 false) ret void } @@ -91,7 +91,7 @@ entry: ; CHECK: stur [[REG9]], [x{{[0-9]+}}, #6] ; CHECK: ldr ; CHECK: str - call void @llvm.memcpy.p0i8.p0i8.i64(i8* getelementptr inbounds ([512 x i8], [512 x i8]* @spool.splbuf, i64 0, i64 0), i8* getelementptr inbounds ([14 x i8], [14 x i8]* @.str6, i64 0, i64 0), i64 14, i32 1, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* getelementptr inbounds ([512 x i8], [512 x i8]* @spool.splbuf, i64 0, i64 0), i8* getelementptr inbounds ([14 x i8], [14 x i8]* @.str6, i64 0, i64 0), i64 14, i1 false) ret void } @@ -104,9 +104,9 @@ entry: ; CHECK: str [[REG10]], [x0] %0 = bitcast %struct.Foo* %a to i8* %1 = bitcast %struct.Foo* %b to i8* - tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %0, i8* %1, i32 16, i32 4, i1 false) + tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %0, i8* align 4 %1, i32 16, i1 false) ret void } -declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i32, i1) nounwind -declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) nounwind +declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i1) nounwind +declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) nounwind diff --git a/llvm/test/CodeGen/AArch64/arm64-memset-inline.ll b/llvm/test/CodeGen/AArch64/arm64-memset-inline.ll index 8c872cc6150..ecdfcc6673a 100644 --- a/llvm/test/CodeGen/AArch64/arm64-memset-inline.ll +++ b/llvm/test/CodeGen/AArch64/arm64-memset-inline.ll @@ -5,7 +5,7 @@ entry: ; CHECK-LABEL: t1: ; CHECK: str wzr, [x0, #8] ; CHECK: str xzr, [x0] - call void @llvm.memset.p0i8.i64(i8* %c, i8 0, i64 12, i32 8, i1 false) + call void @llvm.memset.p0i8.i64(i8* align 8 %c, i8 0, i64 12, i1 false) ret void } @@ -17,11 +17,11 @@ entry: ; CHECK: str xzr, [sp, #8] %buf = alloca [26 x i8], align 1 %0 = getelementptr inbounds [26 x i8], [26 x i8]* %buf, i32 0, i32 0 - call void @llvm.memset.p0i8.i32(i8* %0, i8 0, i32 26, i32 1, i1 false) + call void @llvm.memset.p0i8.i32(i8* %0, i8 0, i32 26, i1 false) call void @something(i8* %0) nounwind ret void } declare void @something(i8*) nounwind -declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i32, i1) nounwind -declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) nounwind +declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i1) nounwind +declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) nounwind diff --git a/llvm/test/CodeGen/AArch64/arm64-memset-to-bzero.ll b/llvm/test/CodeGen/AArch64/arm64-memset-to-bzero.ll index 3466e1bace5..87a0232c734 100644 --- a/llvm/test/CodeGen/AArch64/arm64-memset-to-bzero.ll +++ b/llvm/test/CodeGen/AArch64/arm64-memset-to-bzero.ll @@ -10,11 +10,11 @@ ; CHECK-LINUX: {{b|bl}} memset define void @fct1(i8* nocapture %ptr) { entry: - tail call void @llvm.memset.p0i8.i64(i8* %ptr, i8 0, i64 256, i32 1, i1 false) + tail call void @llvm.memset.p0i8.i64(i8* %ptr, i8 0, i64 256, i1 false) ret void } -declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) +declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) ; CHECK-LABEL: fct2: ; When the size is bigger than 256, change into bzero. @@ -22,7 +22,7 @@ declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) ; CHECK-LINUX: {{b|bl}} memset define void @fct2(i8* nocapture %ptr) { entry: - tail call void @llvm.memset.p0i8.i64(i8* %ptr, i8 0, i64 257, i32 1, i1 false) + tail call void @llvm.memset.p0i8.i64(i8* %ptr, i8 0, i64 257, i1 false) ret void } @@ -33,7 +33,7 @@ entry: define void @fct3(i8* nocapture %ptr, i32 %unknown) { entry: %conv = sext i32 %unknown to i64 - tail call void @llvm.memset.p0i8.i64(i8* %ptr, i8 0, i64 %conv, i32 1, i1 false) + tail call void @llvm.memset.p0i8.i64(i8* %ptr, i8 0, i64 %conv, i1 false) ret void } diff --git a/llvm/test/CodeGen/AArch64/arm64-misaligned-memcpy-inline.ll b/llvm/test/CodeGen/AArch64/arm64-misaligned-memcpy-inline.ll index 85572f2cf0f..7ecf214b4be 100644 --- a/llvm/test/CodeGen/AArch64/arm64-misaligned-memcpy-inline.ll +++ b/llvm/test/CodeGen/AArch64/arm64-misaligned-memcpy-inline.ll @@ -7,8 +7,8 @@ define void @t0(i8* %out, i8* %in) { ; CHECK: orr w2, wzr, #0x10 ; CHECK-NEXT: bl _memcpy entry: - call void @llvm.memcpy.p0i8.p0i8.i64(i8* %out, i8* %in, i64 16, i32 1, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* %out, i8* %in, i64 16, i1 false) ret void } -declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i32, i1) +declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i1) diff --git a/llvm/test/CodeGen/AArch64/arm64-misched-basic-A53.ll b/llvm/test/CodeGen/AArch64/arm64-misched-basic-A53.ll index 07df9cb32db..f0b9ccc8b5d 100644 --- a/llvm/test/CodeGen/AArch64/arm64-misched-basic-A53.ll +++ b/llvm/test/CodeGen/AArch64/arm64-misched-basic-A53.ll @@ -26,9 +26,9 @@ entry: %yy = alloca i32, align 4 store i32 0, i32* %retval %0 = bitcast [8 x i32]* %x to i8* - call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* bitcast ([8 x i32]* @main.x to i8*), i64 32, i32 4, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %0, i8* align 4 bitcast ([8 x i32]* @main.x to i8*), i64 32, i1 false) %1 = bitcast [8 x i32]* %y to i8* - call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* bitcast ([8 x i32]* @main.y to i8*), i64 32, i32 4, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %1, i8* align 4 bitcast ([8 x i32]* @main.y to i8*), i64 32, i1 false) store i32 0, i32* %xx, align 4 store i32 0, i32* %yy, align 4 store i32 0, i32* %i, align 4 @@ -105,7 +105,7 @@ define <4 x float> @neon4xfloat(<4 x float> %A, <4 x float> %B) { } ; Function Attrs: nounwind -declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i32, i1) #1 +declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i1) #1 attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" } attributes #1 = { nounwind } diff --git a/llvm/test/CodeGen/AArch64/arm64-misched-basic-A57.ll b/llvm/test/CodeGen/AArch64/arm64-misched-basic-A57.ll index 711d2f7397b..c2f53e88a95 100644 --- a/llvm/test/CodeGen/AArch64/arm64-misched-basic-A57.ll +++ b/llvm/test/CodeGen/AArch64/arm64-misched-basic-A57.ll @@ -32,9 +32,9 @@ entry: %yy = alloca i32, align 4 store i32 0, i32* %retval %0 = bitcast [8 x i32]* %x to i8* - call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* bitcast ([8 x i32]* @main.x to i8*), i64 32, i32 4, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %0, i8* align 4 bitcast ([8 x i32]* @main.x to i8*), i64 32, i1 false) %1 = bitcast [8 x i32]* %y to i8* - call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* bitcast ([8 x i32]* @main.y to i8*), i64 32, i32 4, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %1, i8* align 4 bitcast ([8 x i32]* @main.y to i8*), i64 32, i1 false) store i32 0, i32* %xx, align 4 store i32 0, i32* %yy, align 4 store i32 0, i32* %i, align 4 @@ -106,7 +106,7 @@ for.end: ; preds = %for.cond ; Function Attrs: nounwind -declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i32, i1) #1 +declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i1) #1 attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" } attributes #1 = { nounwind } diff --git a/llvm/test/CodeGen/AArch64/arm64-stur.ll b/llvm/test/CodeGen/AArch64/arm64-stur.ll index 4a3229a39b5..8e0736c4fba 100644 --- a/llvm/test/CodeGen/AArch64/arm64-stur.ll +++ b/llvm/test/CodeGen/AArch64/arm64-stur.ll @@ -55,11 +55,11 @@ define void @foo(%struct.X* nocapture %p) nounwind optsize ssp { ; CHECK-NEXT: ret %B = getelementptr inbounds %struct.X, %struct.X* %p, i64 0, i32 1 %val = bitcast i64* %B to i8* - call void @llvm.memset.p0i8.i64(i8* %val, i8 0, i64 16, i32 1, i1 false) + call void @llvm.memset.p0i8.i64(i8* %val, i8 0, i64 16, i1 false) ret void } -declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) nounwind +declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) nounwind ; Unaligned 16b stores are split into 8b stores for performance. ; radar://15424193 diff --git a/llvm/test/CodeGen/AArch64/arm64-virtual_base.ll b/llvm/test/CodeGen/AArch64/arm64-virtual_base.ll index 4ecfde4f83e..4ce0d2f0007 100644 --- a/llvm/test/CodeGen/AArch64/arm64-virtual_base.ll +++ b/llvm/test/CodeGen/AArch64/arm64-virtual_base.ll @@ -43,9 +43,9 @@ entry: %tmp14 = bitcast double* %arraydecay5.3.1 to i8* %arraydecay11.3.1 = getelementptr inbounds %struct.Bicubic_Patch_Struct, %struct.Bicubic_Patch_Struct* %Shape, i64 0, i32 12, i64 1, i64 3, i64 0 %tmp15 = bitcast double* %arraydecay11.3.1 to i8* - call void @llvm.memcpy.p0i8.p0i8.i64(i8* %tmp14, i8* %tmp15, i64 24, i32 1, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* %tmp14, i8* %tmp15, i64 24, i1 false) ret void } ; Function Attrs: nounwind -declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) +declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) diff --git a/llvm/test/CodeGen/AArch64/fast-isel-memcpy.ll b/llvm/test/CodeGen/AArch64/fast-isel-memcpy.ll index 07595a954db..290e0c918ad 100644 --- a/llvm/test/CodeGen/AArch64/fast-isel-memcpy.ll +++ b/llvm/test/CodeGen/AArch64/fast-isel-memcpy.ll @@ -8,8 +8,8 @@ define void @test(i64 %a, i8* %b) { %1 = and i64 %a, 9223372036854775807 %2 = inttoptr i64 %1 to i8* - call void @llvm.memcpy.p0i8.p0i8.i64(i8* %2, i8* %b, i64 8, i32 8, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 %2, i8* align 8 %b, i64 8, i1 false) ret void } -declare void @llvm.memcpy.p0i8.p0i8.i64(i8*, i8*, i64, i32, i1) +declare void @llvm.memcpy.p0i8.p0i8.i64(i8*, i8*, i64, i1) diff --git a/llvm/test/CodeGen/AArch64/func-argpassing.ll b/llvm/test/CodeGen/AArch64/func-argpassing.ll index cf6545dab38..824a1893940 100644 --- a/llvm/test/CodeGen/AArch64/func-argpassing.ll +++ b/llvm/test/CodeGen/AArch64/func-argpassing.ll @@ -186,11 +186,11 @@ define void @check_i128_stackalign(i32 %val0, i32 %val1, i32 %val2, i32 %val3, ret void } -declare void @llvm.memcpy.p0i8.p0i8.i32(i8*, i8*, i32, i32, i1) +declare void @llvm.memcpy.p0i8.p0i8.i32(i8*, i8*, i32, i1) define i32 @test_extern() { ; CHECK-LABEL: test_extern: - call void @llvm.memcpy.p0i8.p0i8.i32(i8* undef, i8* undef, i32 undef, i32 4, i1 0) + call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 undef, i8* align 4 undef, i32 undef, i1 0) ; CHECK: bl memcpy ret i32 0 } diff --git a/llvm/test/CodeGen/AArch64/ldp-stp-scaled-unscaled-pairs.ll b/llvm/test/CodeGen/AArch64/ldp-stp-scaled-unscaled-pairs.ll index 35117a147ee..951bd4ada3c 100644 --- a/llvm/test/CodeGen/AArch64/ldp-stp-scaled-unscaled-pairs.ll +++ b/llvm/test/CodeGen/AArch64/ldp-stp-scaled-unscaled-pairs.ll @@ -115,11 +115,11 @@ entry: %C = getelementptr inbounds [12 x i8], [12 x i8]* %a2, i64 0, i64 4 %1 = bitcast i8* %C to i64* store i64 0, i64* %1, align 4 - call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 8, i32 8, i1 false) + call void @llvm.memset.p0i8.i64(i8* align 8 %0, i8 0, i64 8, i1 false) ret void } -declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) +declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) attributes #0 = { nounwind } diff --git a/llvm/test/CodeGen/AArch64/ldst-paired-aliasing.ll b/llvm/test/CodeGen/AArch64/ldst-paired-aliasing.ll index 9c698b5fdcc..0f8ffb50c8d 100644 --- a/llvm/test/CodeGen/AArch64/ldst-paired-aliasing.ll +++ b/llvm/test/CodeGen/AArch64/ldst-paired-aliasing.ll @@ -5,7 +5,7 @@ target triple = "aarch64--linux-gnu" declare void @f(i8*, i8*) declare void @f2(i8*, i8*) declare void @_Z5setupv() -declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) #3 +declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) #3 define i32 @main() local_unnamed_addr #1 { ; Make sure the stores happen in the correct order (the exact instructions could change). @@ -24,7 +24,7 @@ for.body.lr.ph.i.i.i.i.i.i63: tail call void @_Z5setupv() %x2 = getelementptr inbounds [10 x i32], [10 x i32]* %b1, i64 0, i64 6 %x3 = bitcast i32* %x2 to i8* - call void @llvm.memset.p0i8.i64(i8* %x3, i8 0, i64 16, i32 8, i1 false) + call void @llvm.memset.p0i8.i64(i8* align 8 %x3, i8 0, i64 16, i1 false) %arraydecay2 = getelementptr inbounds [10 x i32], [10 x i32]* %b1, i64 0, i64 0 %x4 = bitcast [10 x i32]* %b1 to <4 x i32>* store <4 x i32> <i32 1, i32 1, i32 1, i32 1>, <4 x i32>* %x4, align 16 diff --git a/llvm/test/CodeGen/AArch64/ldst-zero.ll b/llvm/test/CodeGen/AArch64/ldst-zero.ll index 7d443a631f9..0ada6fd84cb 100644 --- a/llvm/test/CodeGen/AArch64/ldst-zero.ll +++ b/llvm/test/CodeGen/AArch64/ldst-zero.ll @@ -3,7 +3,7 @@ ; Tests to check that zero stores which are generated as STP xzr, xzr aren't ; scheduled incorrectly due to incorrect alias information -declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) +declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) %struct.tree_common = type { i8*, i8*, i32 } ; Original test case which exhibited the bug @@ -14,7 +14,7 @@ define void @test1(%struct.tree_common* %t, i32 %code, i8* %type) { ; CHECK-DAG: str xzr, [x0] entry: %0 = bitcast %struct.tree_common* %t to i8* - tail call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 24, i32 8, i1 false) + tail call void @llvm.memset.p0i8.i64(i8* align 8 %0, i8 0, i64 24, i1 false) %code1 = getelementptr inbounds %struct.tree_common, %struct.tree_common* %t, i64 0, i32 2 store i32 %code, i32* %code1, align 8 %type2 = getelementptr inbounds %struct.tree_common, %struct.tree_common* %t, i64 0, i32 1 diff --git a/llvm/test/CodeGen/AArch64/machine-combiner-madd.ll b/llvm/test/CodeGen/AArch64/machine-combiner-madd.ll index 4efe4e9cfb0..5ace6e63136 100644 --- a/llvm/test/CodeGen/AArch64/machine-combiner-madd.ll +++ b/llvm/test/CodeGen/AArch64/machine-combiner-madd.ll @@ -19,7 +19,7 @@ %class.D = type { %class.basic_string.base, [4 x i8] } %class.basic_string.base = type <{ i64, i64, i32 }> @a = global %class.D* zeroinitializer, align 8 -declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i32, i1) +declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i1) define internal void @fun() section ".text.startup" { entry: %tmp.i.i = alloca %class.D, align 8 @@ -31,7 +31,7 @@ loop: %x = load %class.D*, %class.D** getelementptr inbounds (%class.D*, %class.D** @a, i64 0), align 8 %arrayidx.i.i.i = getelementptr inbounds %class.D, %class.D* %x, i64 %conv11.i.i %d = bitcast %class.D* %arrayidx.i.i.i to i8* - call void @llvm.memcpy.p0i8.p0i8.i64(i8* nonnull %y, i8* %d, i64 24, i32 8, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 nonnull %y, i8* align 8 %d, i64 24, i1 false) %inc.i.i = add i64 %i, 1 %cmp.i.i = icmp slt i64 %inc.i.i, 0 br i1 %cmp.i.i, label %loop, label %exit diff --git a/llvm/test/CodeGen/AArch64/memcpy-f128.ll b/llvm/test/CodeGen/AArch64/memcpy-f128.ll index 7e6ec36104a..8b91b843108 100644 --- a/llvm/test/CodeGen/AArch64/memcpy-f128.ll +++ b/llvm/test/CodeGen/AArch64/memcpy-f128.ll @@ -12,8 +12,8 @@ define void @test1() { ; CHECK: str q0 ; CHECK: ret entry: - tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* undef, i8* bitcast (%structA* @stubA to i8*), i64 48, i32 8, i1 false) + tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 undef, i8* align 8 bitcast (%structA* @stubA to i8*), i64 48, i1 false) ret void } -declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i32, i1) +declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i1) diff --git a/llvm/test/CodeGen/AArch64/merge-store-dependency.ll b/llvm/test/CodeGen/AArch64/merge-store-dependency.ll index 4f2af9ed7e6..5bed63ef895 100644 --- a/llvm/test/CodeGen/AArch64/merge-store-dependency.ll +++ b/llvm/test/CodeGen/AArch64/merge-store-dependency.ll @@ -14,7 +14,7 @@ entry: ; A53: str [[DATA]], {{.*}} %0 = bitcast %struct1* %fde to i8* - tail call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 40, i32 8, i1 false) + tail call void @llvm.memset.p0i8.i64(i8* align 8 %0, i8 0, i64 40, i1 false) %state = getelementptr inbounds %struct1, %struct1* %fde, i64 0, i32 4 store i16 256, i16* %state, align 8 %fd1 = getelementptr inbounds %struct1, %struct1* %fde, i64 0, i32 2 @@ -58,6 +58,6 @@ exit: ret void } -declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) +declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) declare i32 @fcntl(i32, i32, ...) declare noalias i8* @foo() diff --git a/llvm/test/CodeGen/AArch64/mergestores_noimplicitfloat.ll b/llvm/test/CodeGen/AArch64/mergestores_noimplicitfloat.ll index 74aeaf75d03..cd64ae11550 100644 --- a/llvm/test/CodeGen/AArch64/mergestores_noimplicitfloat.ll +++ b/llvm/test/CodeGen/AArch64/mergestores_noimplicitfloat.ll @@ -16,8 +16,8 @@ target triple = "arm64-apple-ios10.0.0" ; CHECK-DAG: str [[R3]], [x0, #24] define void @pr33475(i8* %p0, i8* %p1) noimplicitfloat { - call void @llvm.memcpy.p0i8.p0i8.i64(i8* %p0, i8* %p1, i64 32, i32 4, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %p0, i8* align 4 %p1, i64 32, i1 false) ret void } -declare void @llvm.memcpy.p0i8.p0i8.i64(i8*, i8*, i64, i32, i1) +declare void @llvm.memcpy.p0i8.p0i8.i64(i8*, i8*, i64, i1) diff --git a/llvm/test/CodeGen/AArch64/misched-stp.ll b/llvm/test/CodeGen/AArch64/misched-stp.ll index 1c9ea68834c..1afec40f192 100644 --- a/llvm/test/CodeGen/AArch64/misched-stp.ll +++ b/llvm/test/CodeGen/AArch64/misched-stp.ll @@ -30,7 +30,7 @@ entry: ret void } -declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) +declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) %struct.tree_common = type { i8*, i8*, i32 } ; CHECK-LABEL: test_zero @@ -41,7 +41,7 @@ declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) define void @test_zero(%struct.tree_common* %t, i32 %code, i8* %type) { entry: %0 = bitcast %struct.tree_common* %t to i8* - tail call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 24, i32 8, i1 false) + tail call void @llvm.memset.p0i8.i64(i8* align 8 %0, i8 0, i64 24, i1 false) %code1 = getelementptr inbounds %struct.tree_common, %struct.tree_common* %t, i64 0, i32 2 store i32 %code, i32* %code1, align 8 %type2 = getelementptr inbounds %struct.tree_common, %struct.tree_common* %t, i64 0, i32 1 diff --git a/llvm/test/CodeGen/AArch64/pr33172.ll b/llvm/test/CodeGen/AArch64/pr33172.ll index 1e1da78b28f..098d5358b02 100644 --- a/llvm/test/CodeGen/AArch64/pr33172.ll +++ b/llvm/test/CodeGen/AArch64/pr33172.ll @@ -21,12 +21,12 @@ entry: %wide.load8291059.4 = load i64, i64* bitcast (float* getelementptr inbounds ([200 x float], [200 x float]* @main.b, i64 0, i64 18) to i64*), align 8 store i64 %wide.load8281058.4, i64* bitcast (float* getelementptr inbounds ([200 x float], [200 x float]* @main.x, i64 0, i64 16) to i64*), align 8 store i64 %wide.load8291059.4, i64* bitcast (float* getelementptr inbounds ([200 x float], [200 x float]* @main.x, i64 0, i64 18) to i64*), align 8 - tail call void @llvm.memset.p0i8.i64(i8* bitcast ([200 x float]* @main.b to i8*), i8 0, i64 undef, i32 8, i1 false) #2 + tail call void @llvm.memset.p0i8.i64(i8* align 8 bitcast ([200 x float]* @main.b to i8*), i8 0, i64 undef, i1 false) #2 unreachable } ; Function Attrs: argmemonly nounwind -declare void @llvm.memset.p0i8.i64(i8* nocapture writeonly, i8, i64, i32, i1) #1 +declare void @llvm.memset.p0i8.i64(i8* nocapture writeonly, i8, i64, i1) #1 attributes #1 = { argmemonly nounwind } attributes #2 = { nounwind } diff --git a/llvm/test/CodeGen/AArch64/tailcall-mem-intrinsics.ll b/llvm/test/CodeGen/AArch64/tailcall-mem-intrinsics.ll index b970fb12415..c780d15b58d 100644 --- a/llvm/test/CodeGen/AArch64/tailcall-mem-intrinsics.ll +++ b/llvm/test/CodeGen/AArch64/tailcall-mem-intrinsics.ll @@ -4,7 +4,7 @@ ; CHECK: b memcpy define void @tail_memcpy(i8* nocapture %p, i8* nocapture readonly %q, i32 %n) #0 { entry: - tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %p, i8* %q, i32 %n, i32 1, i1 false) + tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %p, i8* %q, i32 %n, i1 false) ret void } @@ -12,7 +12,7 @@ entry: ; CHECK: b memmove define void @tail_memmove(i8* nocapture %p, i8* nocapture readonly %q, i32 %n) #0 { entry: - tail call void @llvm.memmove.p0i8.p0i8.i32(i8* %p, i8* %q, i32 %n, i32 1, i1 false) + tail call void @llvm.memmove.p0i8.p0i8.i32(i8* %p, i8* %q, i32 %n, i1 false) ret void } @@ -20,12 +20,12 @@ entry: ; CHECK: b memset define void @tail_memset(i8* nocapture %p, i8 %c, i32 %n) #0 { entry: - tail call void @llvm.memset.p0i8.i32(i8* %p, i8 %c, i32 %n, i32 1, i1 false) + tail call void @llvm.memset.p0i8.i32(i8* %p, i8 %c, i32 %n, i1 false) ret void } -declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i32, i1) #0 -declare void @llvm.memmove.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i32, i1) #0 -declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i32, i1) #0 +declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i1) #0 +declare void @llvm.memmove.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i1) #0 +declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i1) #0 attributes #0 = { nounwind } diff --git a/llvm/test/CodeGen/AArch64/tailcall-string-rvo.ll b/llvm/test/CodeGen/AArch64/tailcall-string-rvo.ll index bdc09235afd..d9d2180b5ef 100644 --- a/llvm/test/CodeGen/AArch64/tailcall-string-rvo.ll +++ b/llvm/test/CodeGen/AArch64/tailcall-string-rvo.ll @@ -32,7 +32,7 @@ bb: %tmp1 = bitcast %class.basic_string.11.42.73* %arg to %union.anon.8.39.70** store %union.anon.8.39.70* %tmp, %union.anon.8.39.70** %tmp1, align 8 %tmp2 = bitcast %union.anon.8.39.70* %tmp to i8* - tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %tmp2, i8* nonnull undef, i64 13, i32 1, i1 false) + tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %tmp2, i8* nonnull undef, i64 13, i1 false) %tmp3 = getelementptr inbounds %class.basic_string.11.42.73, %class.basic_string.11.42.73* %arg, i64 0, i32 0, i32 0, i32 1 store i64 13, i64* %tmp3, align 8 %tmp4 = getelementptr inbounds %class.basic_string.11.42.73, %class.basic_string.11.42.73* %arg, i64 0, i32 0, i32 0, i32 2, i32 1, i64 5 @@ -42,6 +42,6 @@ bb: } ; Function Attrs: argmemonly nounwind -declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i32, i1) #0 +declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i1) #0 attributes #0 = { argmemonly nounwind } diff --git a/llvm/test/CodeGen/AMDGPU/addrspacecast-constantexpr.ll b/llvm/test/CodeGen/AMDGPU/addrspacecast-constantexpr.ll index 8cabc7dae13..b40fcb3e492 100644 --- a/llvm/test/CodeGen/AMDGPU/addrspacecast-constantexpr.ll +++ b/llvm/test/CodeGen/AMDGPU/addrspacecast-constantexpr.ll @@ -1,6 +1,6 @@ ; RUN: opt -mtriple=amdgcn-unknown-amdhsa -S -amdgpu-annotate-kernel-features < %s | FileCheck -check-prefix=HSA %s -declare void @llvm.memcpy.p1i32.p4i32.i32(i32 addrspace(1)* nocapture, i32 addrspace(4)* nocapture, i32, i32, i1) #0 +declare void @llvm.memcpy.p1i32.p4i32.i32(i32 addrspace(1)* nocapture, i32 addrspace(4)* nocapture, i32, i1) #0 @lds.i32 = unnamed_addr addrspace(3) global i32 undef, align 4 @lds.arr = unnamed_addr addrspace(3) global [256 x i32] undef, align 4 @@ -68,7 +68,7 @@ define amdgpu_kernel void @cmpxchg_constant_cast_group_gv_gep_to_flat(i32 addrsp ; HSA: @memcpy_constant_cast_group_gv_gep_to_flat(i32 addrspace(1)* %out) #2 define amdgpu_kernel void @memcpy_constant_cast_group_gv_gep_to_flat(i32 addrspace(1)* %out) #1 { - call void @llvm.memcpy.p1i32.p4i32.i32(i32 addrspace(1)* %out, i32 addrspace(4)* getelementptr ([256 x i32], [256 x i32] addrspace(4)* addrspacecast ([256 x i32] addrspace(3)* @lds.arr to [256 x i32] addrspace(4)*), i64 0, i64 8), i32 32, i32 4, i1 false) + call void @llvm.memcpy.p1i32.p4i32.i32(i32 addrspace(1)* align 4 %out, i32 addrspace(4)* align 4 getelementptr ([256 x i32], [256 x i32] addrspace(4)* addrspacecast ([256 x i32] addrspace(3)* @lds.arr to [256 x i32] addrspace(4)*), i64 0, i64 8), i32 32, i1 false) ret void } diff --git a/llvm/test/CodeGen/AMDGPU/lds-alignment.ll b/llvm/test/CodeGen/AMDGPU/lds-alignment.ll index c23dea2b6b7..84c8d9b778c 100644 --- a/llvm/test/CodeGen/AMDGPU/lds-alignment.ll +++ b/llvm/test/CodeGen/AMDGPU/lds-alignment.ll @@ -9,16 +9,16 @@ @lds.missing.align.0 = internal unnamed_addr addrspace(3) global [39 x i32] undef @lds.missing.align.1 = internal unnamed_addr addrspace(3) global [7 x i64] undef -declare void @llvm.memcpy.p3i8.p1i8.i32(i8 addrspace(3)* nocapture, i8 addrspace(1)* nocapture readonly, i32, i32, i1) #0 -declare void @llvm.memcpy.p1i8.p3i8.i32(i8 addrspace(1)* nocapture, i8 addrspace(3)* nocapture readonly, i32, i32, i1) #0 +declare void @llvm.memcpy.p3i8.p1i8.i32(i8 addrspace(3)* nocapture, i8 addrspace(1)* nocapture readonly, i32, i1) #0 +declare void @llvm.memcpy.p1i8.p3i8.i32(i8 addrspace(1)* nocapture, i8 addrspace(3)* nocapture readonly, i32, i1) #0 ; HSA-LABEL: {{^}}test_no_round_size_1: ; HSA: workgroup_group_segment_byte_size = 38 define amdgpu_kernel void @test_no_round_size_1(i8 addrspace(1)* %out, i8 addrspace(1)* %in) #1 { %lds.align16.0.bc = bitcast [38 x i8] addrspace(3)* @lds.align16.0 to i8 addrspace(3)* - call void @llvm.memcpy.p3i8.p1i8.i32(i8 addrspace(3)* %lds.align16.0.bc, i8 addrspace(1)* %in, i32 38, i32 4, i1 false) - call void @llvm.memcpy.p1i8.p3i8.i32(i8 addrspace(1)* %out, i8 addrspace(3)* %lds.align16.0.bc, i32 38, i32 4, i1 false) + call void @llvm.memcpy.p3i8.p1i8.i32(i8 addrspace(3)* align 4 %lds.align16.0.bc, i8 addrspace(1)* align 4 %in, i32 38, i1 false) + call void @llvm.memcpy.p1i8.p3i8.i32(i8 addrspace(1)* align 4 %out, i8 addrspace(3)* align 4 %lds.align16.0.bc, i32 38, i1 false) ret void } @@ -36,12 +36,12 @@ define amdgpu_kernel void @test_no_round_size_1(i8 addrspace(1)* %out, i8 addrsp ; HSA: group_segment_alignment = 4 define amdgpu_kernel void @test_round_size_2(i8 addrspace(1)* %out, i8 addrspace(1)* %in) #1 { %lds.align16.0.bc = bitcast [38 x i8] addrspace(3)* @lds.align16.0 to i8 addrspace(3)* - call void @llvm.memcpy.p3i8.p1i8.i32(i8 addrspace(3)* %lds.align16.0.bc, i8 addrspace(1)* %in, i32 38, i32 4, i1 false) - call void @llvm.memcpy.p1i8.p3i8.i32(i8 addrspace(1)* %out, i8 addrspace(3)* %lds.align16.0.bc, i32 38, i32 4, i1 false) + call void @llvm.memcpy.p3i8.p1i8.i32(i8 addrspace(3)* align 4 %lds.align16.0.bc, i8 addrspace(1)* align 4 %in, i32 38, i1 false) + call void @llvm.memcpy.p1i8.p3i8.i32(i8 addrspace(1)* align 4 %out, i8 addrspace(3)* align 4 %lds.align16.0.bc, i32 38, i1 false) %lds.align16.1.bc = bitcast [38 x i8] addrspace(3)* @lds.align16.1 to i8 addrspace(3)* - call void @llvm.memcpy.p3i8.p1i8.i32(i8 addrspace(3)* %lds.align16.1.bc, i8 addrspace(1)* %in, i32 38, i32 4, i1 false) - call void @llvm.memcpy.p1i8.p3i8.i32(i8 addrspace(1)* %out, i8 addrspace(3)* %lds.align16.1.bc, i32 38, i32 4, i1 false) + call void @llvm.memcpy.p3i8.p1i8.i32(i8 addrspace(3)* align 4 %lds.align16.1.bc, i8 addrspace(1)* align 4 %in, i32 38, i1 false) + call void @llvm.memcpy.p1i8.p3i8.i32(i8 addrspace(1)* align 4 %out, i8 addrspace(3)* align 4 %lds.align16.1.bc, i32 38, i1 false) ret void } @@ -52,12 +52,12 @@ define amdgpu_kernel void @test_round_size_2(i8 addrspace(1)* %out, i8 addrspace ; HSA: group_segment_alignment = 4 define amdgpu_kernel void @test_round_size_2_align_8(i8 addrspace(1)* %out, i8 addrspace(1)* %in) #1 { %lds.align16.0.bc = bitcast [38 x i8] addrspace(3)* @lds.align16.0 to i8 addrspace(3)* - call void @llvm.memcpy.p3i8.p1i8.i32(i8 addrspace(3)* %lds.align16.0.bc, i8 addrspace(1)* %in, i32 38, i32 8, i1 false) - call void @llvm.memcpy.p1i8.p3i8.i32(i8 addrspace(1)* %out, i8 addrspace(3)* %lds.align16.0.bc, i32 38, i32 8, i1 false) + call void @llvm.memcpy.p3i8.p1i8.i32(i8 addrspace(3)* align 8 %lds.align16.0.bc, i8 addrspace(1)* align 8 %in, i32 38, i1 false) + call void @llvm.memcpy.p1i8.p3i8.i32(i8 addrspace(1)* align 8 %out, i8 addrspace(3)* align 8 %lds.align16.0.bc, i32 38, i1 false) %lds.align8.0.bc = bitcast [38 x i8] addrspace(3)* @lds.align8.0 to i8 addrspace(3)* - call void @llvm.memcpy.p3i8.p1i8.i32(i8 addrspace(3)* %lds.align8.0.bc, i8 addrspace(1)* %in, i32 38, i32 8, i1 false) - call void @llvm.memcpy.p1i8.p3i8.i32(i8 addrspace(1)* %out, i8 addrspace(3)* %lds.align8.0.bc, i32 38, i32 8, i1 false) + call void @llvm.memcpy.p3i8.p1i8.i32(i8 addrspace(3)* align 8 %lds.align8.0.bc, i8 addrspace(1)* align 8 %in, i32 38, i1 false) + call void @llvm.memcpy.p1i8.p3i8.i32(i8 addrspace(1)* align 8 %out, i8 addrspace(3)* align 8 %lds.align8.0.bc, i32 38, i1 false) ret void } @@ -67,11 +67,11 @@ define amdgpu_kernel void @test_round_size_2_align_8(i8 addrspace(1)* %out, i8 a ; HSA: group_segment_alignment = 4 define amdgpu_kernel void @test_round_local_lds_and_arg(i8 addrspace(1)* %out, i8 addrspace(1)* %in, i8 addrspace(3)* %lds.arg) #1 { %lds.align16.0.bc = bitcast [38 x i8] addrspace(3)* @lds.align16.0 to i8 addrspace(3)* - call void @llvm.memcpy.p3i8.p1i8.i32(i8 addrspace(3)* %lds.align16.0.bc, i8 addrspace(1)* %in, i32 38, i32 4, i1 false) + call void @llvm.memcpy.p3i8.p1i8.i32(i8 addrspace(3)* align 4 %lds.align16.0.bc, i8 addrspace(1)* align 4 %in, i32 38, i1 false) - call void @llvm.memcpy.p1i8.p3i8.i32(i8 addrspace(1)* %out, i8 addrspace(3)* %lds.align16.0.bc, i32 38, i32 4, i1 false) - call void @llvm.memcpy.p3i8.p1i8.i32(i8 addrspace(3)* %lds.arg, i8 addrspace(1)* %in, i32 38, i32 4, i1 false) - call void @llvm.memcpy.p1i8.p3i8.i32(i8 addrspace(1)* %out, i8 addrspace(3)* %lds.arg, i32 38, i32 4, i1 false) + call void @llvm.memcpy.p1i8.p3i8.i32(i8 addrspace(1)* align 4 %out, i8 addrspace(3)* align 4 %lds.align16.0.bc, i32 38, i1 false) + call void @llvm.memcpy.p3i8.p1i8.i32(i8 addrspace(3)* align 4 %lds.arg, i8 addrspace(1)* align 4 %in, i32 38, i1 false) + call void @llvm.memcpy.p1i8.p3i8.i32(i8 addrspace(1)* align 4 %out, i8 addrspace(3)* align 4 %lds.arg, i32 38, i1 false) ret void } @@ -79,8 +79,8 @@ define amdgpu_kernel void @test_round_local_lds_and_arg(i8 addrspace(1)* %out, i ; HSA: workgroup_group_segment_byte_size = 0 ; HSA: group_segment_alignment = 4 define amdgpu_kernel void @test_round_lds_arg(i8 addrspace(1)* %out, i8 addrspace(1)* %in, i8 addrspace(3)* %lds.arg) #1 { - call void @llvm.memcpy.p3i8.p1i8.i32(i8 addrspace(3)* %lds.arg, i8 addrspace(1)* %in, i32 38, i32 4, i1 false) - call void @llvm.memcpy.p1i8.p3i8.i32(i8 addrspace(1)* %out, i8 addrspace(3)* %lds.arg, i32 38, i32 4, i1 false) + call void @llvm.memcpy.p3i8.p1i8.i32(i8 addrspace(3)* align 4 %lds.arg, i8 addrspace(1)* align 4 %in, i32 38, i1 false) + call void @llvm.memcpy.p1i8.p3i8.i32(i8 addrspace(1)* align 4 %out, i8 addrspace(3)* align 4 %lds.arg, i32 38, i1 false) ret void } @@ -89,8 +89,8 @@ define amdgpu_kernel void @test_round_lds_arg(i8 addrspace(1)* %out, i8 addrspac ; HSA: workgroup_group_segment_byte_size = 0 ; HSA: group_segment_alignment = 4 define amdgpu_kernel void @test_high_align_lds_arg(i8 addrspace(1)* %out, i8 addrspace(1)* %in, i8 addrspace(3)* align 64 %lds.arg) #1 { - call void @llvm.memcpy.p3i8.p1i8.i32(i8 addrspace(3)* %lds.arg, i8 addrspace(1)* %in, i32 38, i32 64, i1 false) - call void @llvm.memcpy.p1i8.p3i8.i32(i8 addrspace(1)* %out, i8 addrspace(3)* %lds.arg, i32 38, i32 64, i1 false) + call void @llvm.memcpy.p3i8.p1i8.i32(i8 addrspace(3)* align 64 %lds.arg, i8 addrspace(1)* align 64 %in, i32 38, i1 false) + call void @llvm.memcpy.p1i8.p3i8.i32(i8 addrspace(1)* align 64 %out, i8 addrspace(3)* align 64 %lds.arg, i32 38, i1 false) ret void } @@ -100,12 +100,12 @@ define amdgpu_kernel void @test_high_align_lds_arg(i8 addrspace(1)* %out, i8 add ; HSA: group_segment_alignment = 4 define amdgpu_kernel void @test_missing_alignment_size_2_order0(i8 addrspace(1)* %out, i8 addrspace(1)* %in) #1 { %lds.missing.align.0.bc = bitcast [39 x i32] addrspace(3)* @lds.missing.align.0 to i8 addrspace(3)* - call void @llvm.memcpy.p3i8.p1i8.i32(i8 addrspace(3)* %lds.missing.align.0.bc, i8 addrspace(1)* %in, i32 160, i32 4, i1 false) - call void @llvm.memcpy.p1i8.p3i8.i32(i8 addrspace(1)* %out, i8 addrspace(3)* %lds.missing.align.0.bc, i32 160, i32 4, i1 false) + call void @llvm.memcpy.p3i8.p1i8.i32(i8 addrspace(3)* align 4 %lds.missing.align.0.bc, i8 addrspace(1)* align 4 %in, i32 160, i1 false) + call void @llvm.memcpy.p1i8.p3i8.i32(i8 addrspace(1)* align 4 %out, i8 addrspace(3)* align 4 %lds.missing.align.0.bc, i32 160, i1 false) %lds.missing.align.1.bc = bitcast [7 x i64] addrspace(3)* @lds.missing.align.1 to i8 addrspace(3)* - call void @llvm.memcpy.p3i8.p1i8.i32(i8 addrspace(3)* %lds.missing.align.1.bc, i8 addrspace(1)* %in, i32 56, i32 8, i1 false) - call void @llvm.memcpy.p1i8.p3i8.i32(i8 addrspace(1)* %out, i8 addrspace(3)* %lds.missing.align.1.bc, i32 56, i32 8, i1 false) + call void @llvm.memcpy.p3i8.p1i8.i32(i8 addrspace(3)* align 8 %lds.missing.align.1.bc, i8 addrspace(1)* align 8 %in, i32 56, i1 false) + call void @llvm.memcpy.p1i8.p3i8.i32(i8 addrspace(1)* align 8 %out, i8 addrspace(3)* align 8 %lds.missing.align.1.bc, i32 56, i1 false) ret void } @@ -116,12 +116,12 @@ define amdgpu_kernel void @test_missing_alignment_size_2_order0(i8 addrspace(1)* ; HSA: group_segment_alignment = 4 define amdgpu_kernel void @test_missing_alignment_size_2_order1(i8 addrspace(1)* %out, i8 addrspace(1)* %in) #1 { %lds.missing.align.1.bc = bitcast [7 x i64] addrspace(3)* @lds.missing.align.1 to i8 addrspace(3)* - call void @llvm.memcpy.p3i8.p1i8.i32(i8 addrspace(3)* %lds.missing.align.1.bc, i8 addrspace(1)* %in, i32 56, i32 8, i1 false) - call void @llvm.memcpy.p1i8.p3i8.i32(i8 addrspace(1)* %out, i8 addrspace(3)* %lds.missing.align.1.bc, i32 56, i32 8, i1 false) + call void @llvm.memcpy.p3i8.p1i8.i32(i8 addrspace(3)* align 8 %lds.missing.align.1.bc, i8 addrspace(1)* align 8 %in, i32 56, i1 false) + call void @llvm.memcpy.p1i8.p3i8.i32(i8 addrspace(1)* align 8 %out, i8 addrspace(3)* align 8 %lds.missing.align.1.bc, i32 56, i1 false) %lds.missing.align.0.bc = bitcast [39 x i32] addrspace(3)* @lds.missing.align.0 to i8 addrspace(3)* - call void @llvm.memcpy.p3i8.p1i8.i32(i8 addrspace(3)* %lds.missing.align.0.bc, i8 addrspace(1)* %in, i32 160, i32 4, i1 false) - call void @llvm.memcpy.p1i8.p3i8.i32(i8 addrspace(1)* %out, i8 addrspace(3)* %lds.missing.align.0.bc, i32 160, i32 4, i1 false) + call void @llvm.memcpy.p3i8.p1i8.i32(i8 addrspace(3)* align 4 %lds.missing.align.0.bc, i8 addrspace(1)* align 4 %in, i32 160, i1 false) + call void @llvm.memcpy.p1i8.p3i8.i32(i8 addrspace(1)* align 4 %out, i8 addrspace(3)* align 4 %lds.missing.align.0.bc, i32 160, i1 false) ret void } @@ -144,16 +144,16 @@ define amdgpu_kernel void @test_missing_alignment_size_2_order1(i8 addrspace(1)* ; HSA: group_segment_alignment = 4 define amdgpu_kernel void @test_round_size_3_order0(i8 addrspace(1)* %out, i8 addrspace(1)* %in) #1 { %lds.align32.0.bc = bitcast [38 x i8] addrspace(3)* @lds.align32.0 to i8 addrspace(3)* - call void @llvm.memcpy.p3i8.p1i8.i32(i8 addrspace(3)* %lds.align32.0.bc, i8 addrspace(1)* %in, i32 38, i32 8, i1 false) - call void @llvm.memcpy.p1i8.p3i8.i32(i8 addrspace(1)* %out, i8 addrspace(3)* %lds.align32.0.bc, i32 38, i32 8, i1 false) + call void @llvm.memcpy.p3i8.p1i8.i32(i8 addrspace(3)* align 8 %lds.align32.0.bc, i8 addrspace(1)* align 8 %in, i32 38, i1 false) + call void @llvm.memcpy.p1i8.p3i8.i32(i8 addrspace(1)* align 8 %out, i8 addrspace(3)* align 8 %lds.align32.0.bc, i32 38, i1 false) %lds.align16.0.bc = bitcast [38 x i8] addrspace(3)* @lds.align16.0 to i8 addrspace(3)* - call void @llvm.memcpy.p3i8.p1i8.i32(i8 addrspace(3)* %lds.align16.0.bc, i8 addrspace(1)* %in, i32 38, i32 8, i1 false) - call void @llvm.memcpy.p1i8.p3i8.i32(i8 addrspace(1)* %out, i8 addrspace(3)* %lds.align16.0.bc, i32 38, i32 8, i1 false) + call void @llvm.memcpy.p3i8.p1i8.i32(i8 addrspace(3)* align 8 %lds.align16.0.bc, i8 addrspace(1)* align 8 %in, i32 38, i1 false) + call void @llvm.memcpy.p1i8.p3i8.i32(i8 addrspace(1)* align 8 %out, i8 addrspace(3)* align 8 %lds.align16.0.bc, i32 38, i1 false) %lds.align8.0.bc = bitcast [38 x i8] addrspace(3)* @lds.align8.0 to i8 addrspace(3)* - call void @llvm.memcpy.p3i8.p1i8.i32(i8 addrspace(3)* %lds.align8.0.bc, i8 addrspace(1)* %in, i32 38, i32 8, i1 false) - call void @llvm.memcpy.p1i8.p3i8.i32(i8 addrspace(1)* %out, i8 addrspace(3)* %lds.align8.0.bc, i32 38, i32 8, i1 false) + call void @llvm.memcpy.p3i8.p1i8.i32(i8 addrspace(3)* align 8 %lds.align8.0.bc, i8 addrspace(1)* align 8 %in, i32 38, i1 false) + call void @llvm.memcpy.p1i8.p3i8.i32(i8 addrspace(1)* align 8 %out, i8 addrspace(3)* align 8 %lds.align8.0.bc, i32 38, i1 false) ret void } @@ -165,16 +165,16 @@ define amdgpu_kernel void @test_round_size_3_order0(i8 addrspace(1)* %out, i8 ad ; HSA: group_segment_alignment = 4 define amdgpu_kernel void @test_round_size_3_order1(i8 addrspace(1)* %out, i8 addrspace(1)* %in) #1 { %lds.align32.0.bc = bitcast [38 x i8] addrspace(3)* @lds.align32.0 to i8 addrspace(3)* - call void @llvm.memcpy.p3i8.p1i8.i32(i8 addrspace(3)* %lds.align32.0.bc, i8 addrspace(1)* %in, i32 38, i32 8, i1 false) - call void @llvm.memcpy.p1i8.p3i8.i32(i8 addrspace(1)* %out, i8 addrspace(3)* %lds.align32.0.bc, i32 38, i32 8, i1 false) + call void @llvm.memcpy.p3i8.p1i8.i32(i8 addrspace(3)* align 8 %lds.align32.0.bc, i8 addrspace(1)* align 8 %in, i32 38, i1 false) + call void @llvm.memcpy.p1i8.p3i8.i32(i8 addrspace(1)* align 8 %out, i8 addrspace(3)* align 8 %lds.align32.0.bc, i32 38, i1 false) %lds.align8.0.bc = bitcast [38 x i8] addrspace(3)* @lds.align8.0 to i8 addrspace(3)* - call void @llvm.memcpy.p3i8.p1i8.i32(i8 addrspace(3)* %lds.align8.0.bc, i8 addrspace(1)* %in, i32 38, i32 8, i1 false) - call void @llvm.memcpy.p1i8.p3i8.i32(i8 addrspace(1)* %out, i8 addrspace(3)* %lds.align8.0.bc, i32 38, i32 8, i1 false) + call void @llvm.memcpy.p3i8.p1i8.i32(i8 addrspace(3)* align 8 %lds.align8.0.bc, i8 addrspace(1)* align 8 %in, i32 38, i1 false) + call void @llvm.memcpy.p1i8.p3i8.i32(i8 addrspace(1)* align 8 %out, i8 addrspace(3)* align 8 %lds.align8.0.bc, i32 38, i1 false) %lds.align16.0.bc = bitcast [38 x i8] addrspace(3)* @lds.align16.0 to i8 addrspace(3)* - call void @llvm.memcpy.p3i8.p1i8.i32(i8 addrspace(3)* %lds.align16.0.bc, i8 addrspace(1)* %in, i32 38, i32 8, i1 false) - call void @llvm.memcpy.p1i8.p3i8.i32(i8 addrspace(1)* %out, i8 addrspace(3)* %lds.align16.0.bc, i32 38, i32 8, i1 false) + call void @llvm.memcpy.p3i8.p1i8.i32(i8 addrspace(3)* align 8 %lds.align16.0.bc, i8 addrspace(1)* align 8 %in, i32 38, i1 false) + call void @llvm.memcpy.p1i8.p3i8.i32(i8 addrspace(1)* align 8 %out, i8 addrspace(3)* align 8 %lds.align16.0.bc, i32 38, i1 false) ret void } @@ -186,16 +186,16 @@ define amdgpu_kernel void @test_round_size_3_order1(i8 addrspace(1)* %out, i8 ad ; HSA: group_segment_alignment = 4 define amdgpu_kernel void @test_round_size_3_order2(i8 addrspace(1)* %out, i8 addrspace(1)* %in) #1 { %lds.align16.0.bc = bitcast [38 x i8] addrspace(3)* @lds.align16.0 to i8 addrspace(3)* - call void @llvm.memcpy.p3i8.p1i8.i32(i8 addrspace(3)* %lds.align16.0.bc, i8 addrspace(1)* %in, i32 38, i32 8, i1 false) - call void @llvm.memcpy.p1i8.p3i8.i32(i8 addrspace(1)* %out, i8 addrspace(3)* %lds.align16.0.bc, i32 38, i32 8, i1 false) + call void @llvm.memcpy.p3i8.p1i8.i32(i8 addrspace(3)* align 8 %lds.align16.0.bc, i8 addrspace(1)* align 8 %in, i32 38, i1 false) + call void @llvm.memcpy.p1i8.p3i8.i32(i8 addrspace(1)* align 8 %out, i8 addrspace(3)* align 8 %lds.align16.0.bc, i32 38, i1 false) %lds.align32.0.bc = bitcast [38 x i8] addrspace(3)* @lds.align32.0 to i8 addrspace(3)* - call void @llvm.memcpy.p3i8.p1i8.i32(i8 addrspace(3)* %lds.align32.0.bc, i8 addrspace(1)* %in, i32 38, i32 8, i1 false) - call void @llvm.memcpy.p1i8.p3i8.i32(i8 addrspace(1)* %out, i8 addrspace(3)* %lds.align32.0.bc, i32 38, i32 8, i1 false) + call void @llvm.memcpy.p3i8.p1i8.i32(i8 addrspace(3)* align 8 %lds.align32.0.bc, i8 addrspace(1)* align 8 %in, i32 38, i1 false) + call void @llvm.memcpy.p1i8.p3i8.i32(i8 addrspace(1)* align 8 %out, i8 addrspace(3)* align 8 %lds.align32.0.bc, i32 38, i1 false) %lds.align8.0.bc = bitcast [38 x i8] addrspace(3)* @lds.align8.0 to i8 addrspace(3)* - call void @llvm.memcpy.p3i8.p1i8.i32(i8 addrspace(3)* %lds.align8.0.bc, i8 addrspace(1)* %in, i32 38, i32 8, i1 false) - call void @llvm.memcpy.p1i8.p3i8.i32(i8 addrspace(1)* %out, i8 addrspace(3)* %lds.align8.0.bc, i32 38, i32 8, i1 false) + call void @llvm.memcpy.p3i8.p1i8.i32(i8 addrspace(3)* align 8 %lds.align8.0.bc, i8 addrspace(1)* align 8 %in, i32 38, i1 false) + call void @llvm.memcpy.p1i8.p3i8.i32(i8 addrspace(1)* align 8 %out, i8 addrspace(3)* align 8 %lds.align8.0.bc, i32 38, i1 false) ret void } @@ -207,16 +207,16 @@ define amdgpu_kernel void @test_round_size_3_order2(i8 addrspace(1)* %out, i8 ad ; HSA: group_segment_alignment = 4 define amdgpu_kernel void @test_round_size_3_order3(i8 addrspace(1)* %out, i8 addrspace(1)* %in) #1 { %lds.align16.0.bc = bitcast [38 x i8] addrspace(3)* @lds.align16.0 to i8 addrspace(3)* - call void @llvm.memcpy.p3i8.p1i8.i32(i8 addrspace(3)* %lds.align16.0.bc, i8 addrspace(1)* %in, i32 38, i32 8, i1 false) - call void @llvm.memcpy.p1i8.p3i8.i32(i8 addrspace(1)* %out, i8 addrspace(3)* %lds.align16.0.bc, i32 38, i32 8, i1 false) + call void @llvm.memcpy.p3i8.p1i8.i32(i8 addrspace(3)* align 8 %lds.align16.0.bc, i8 addrspace(1)* align 8 %in, i32 38, i1 false) + call void @llvm.memcpy.p1i8.p3i8.i32(i8 addrspace(1)* align 8 %out, i8 addrspace(3)* align 8 %lds.align16.0.bc, i32 38, i1 false) %lds.align8.0.bc = bitcast [38 x i8] addrspace(3)* @lds.align8.0 to i8 addrspace(3)* - call void @llvm.memcpy.p3i8.p1i8.i32(i8 addrspace(3)* %lds.align8.0.bc, i8 addrspace(1)* %in, i32 38, i32 8, i1 false) - call void @llvm.memcpy.p1i8.p3i8.i32(i8 addrspace(1)* %out, i8 addrspace(3)* %lds.align8.0.bc, i32 38, i32 8, i1 false) + call void @llvm.memcpy.p3i8.p1i8.i32(i8 addrspace(3)* align 8 %lds.align8.0.bc, i8 addrspace(1)* align 8 %in, i32 38, i1 false) + call void @llvm.memcpy.p1i8.p3i8.i32(i8 addrspace(1)* align 8 %out, i8 addrspace(3)* align 8 %lds.align8.0.bc, i32 38, i1 false) %lds.align32.0.bc = bitcast [38 x i8] addrspace(3)* @lds.align32.0 to i8 addrspace(3)* - call void @llvm.memcpy.p3i8.p1i8.i32(i8 addrspace(3)* %lds.align32.0.bc, i8 addrspace(1)* %in, i32 38, i32 8, i1 false) - call void @llvm.memcpy.p1i8.p3i8.i32(i8 addrspace(1)* %out, i8 addrspace(3)* %lds.align32.0.bc, i32 38, i32 8, i1 false) + call void @llvm.memcpy.p3i8.p1i8.i32(i8 addrspace(3)* align 8 %lds.align32.0.bc, i8 addrspace(1)* align 8 %in, i32 38, i1 false) + call void @llvm.memcpy.p1i8.p3i8.i32(i8 addrspace(1)* align 8 %out, i8 addrspace(3)* align 8 %lds.align32.0.bc, i32 38, i1 false) ret void } @@ -228,16 +228,16 @@ define amdgpu_kernel void @test_round_size_3_order3(i8 addrspace(1)* %out, i8 ad ; HSA: group_segment_alignment = 4 define amdgpu_kernel void @test_round_size_3_order4(i8 addrspace(1)* %out, i8 addrspace(1)* %in) #1 { %lds.align8.0.bc = bitcast [38 x i8] addrspace(3)* @lds.align8.0 to i8 addrspace(3)* - call void @llvm.memcpy.p3i8.p1i8.i32(i8 addrspace(3)* %lds.align8.0.bc, i8 addrspace(1)* %in, i32 38, i32 8, i1 false) - call void @llvm.memcpy.p1i8.p3i8.i32(i8 addrspace(1)* %out, i8 addrspace(3)* %lds.align8.0.bc, i32 38, i32 8, i1 false) + call void @llvm.memcpy.p3i8.p1i8.i32(i8 addrspace(3)* align 8 %lds.align8.0.bc, i8 addrspace(1)* align 8 %in, i32 38, i1 false) + call void @llvm.memcpy.p1i8.p3i8.i32(i8 addrspace(1)* align 8 %out, i8 addrspace(3)* align 8 %lds.align8.0.bc, i32 38, i1 false) %lds.align32.0.bc = bitcast [38 x i8] addrspace(3)* @lds.align32.0 to i8 addrspace(3)* - call void @llvm.memcpy.p3i8.p1i8.i32(i8 addrspace(3)* %lds.align32.0.bc, i8 addrspace(1)* %in, i32 38, i32 8, i1 false) - call void @llvm.memcpy.p1i8.p3i8.i32(i8 addrspace(1)* %out, i8 addrspace(3)* %lds.align32.0.bc, i32 38, i32 8, i1 false) + call void @llvm.memcpy.p3i8.p1i8.i32(i8 addrspace(3)* align 8 %lds.align32.0.bc, i8 addrspace(1)* align 8 %in, i32 38, i1 false) + call void @llvm.memcpy.p1i8.p3i8.i32(i8 addrspace(1)* align 8 %out, i8 addrspace(3)* align 8 %lds.align32.0.bc, i32 38, i1 false) %lds.align16.0.bc = bitcast [38 x i8] addrspace(3)* @lds.align16.0 to i8 addrspace(3)* - call void @llvm.memcpy.p3i8.p1i8.i32(i8 addrspace(3)* %lds.align16.0.bc, i8 addrspace(1)* %in, i32 38, i32 8, i1 false) - call void @llvm.memcpy.p1i8.p3i8.i32(i8 addrspace(1)* %out, i8 addrspace(3)* %lds.align16.0.bc, i32 38, i32 8, i1 false) + call void @llvm.memcpy.p3i8.p1i8.i32(i8 addrspace(3)* align 8 %lds.align16.0.bc, i8 addrspace(1)* align 8 %in, i32 38, i1 false) + call void @llvm.memcpy.p1i8.p3i8.i32(i8 addrspace(1)* align 8 %out, i8 addrspace(3)* align 8 %lds.align16.0.bc, i32 38, i1 false) ret void } @@ -249,16 +249,16 @@ define amdgpu_kernel void @test_round_size_3_order4(i8 addrspace(1)* %out, i8 ad ; HSA: group_segment_alignment = 4 define amdgpu_kernel void @test_round_size_3_order5(i8 addrspace(1)* %out, i8 addrspace(1)* %in) #1 { %lds.align8.0.bc = bitcast [38 x i8] addrspace(3)* @lds.align8.0 to i8 addrspace(3)* - call void @llvm.memcpy.p3i8.p1i8.i32(i8 addrspace(3)* %lds.align8.0.bc, i8 addrspace(1)* %in, i32 38, i32 8, i1 false) - call void @llvm.memcpy.p1i8.p3i8.i32(i8 addrspace(1)* %out, i8 addrspace(3)* %lds.align8.0.bc, i32 38, i32 8, i1 false) + call void @llvm.memcpy.p3i8.p1i8.i32(i8 addrspace(3)* align 8 %lds.align8.0.bc, i8 addrspace(1)* align 8 %in, i32 38, i1 false) + call void @llvm.memcpy.p1i8.p3i8.i32(i8 addrspace(1)* align 8 %out, i8 addrspace(3)* align 8 %lds.align8.0.bc, i32 38, i1 false) %lds.align16.0.bc = bitcast [38 x i8] addrspace(3)* @lds.align16.0 to i8 addrspace(3)* - call void @llvm.memcpy.p3i8.p1i8.i32(i8 addrspace(3)* %lds.align16.0.bc, i8 addrspace(1)* %in, i32 38, i32 8, i1 false) - call void @llvm.memcpy.p1i8.p3i8.i32(i8 addrspace(1)* %out, i8 addrspace(3)* %lds.align16.0.bc, i32 38, i32 8, i1 false) + call void @llvm.memcpy.p3i8.p1i8.i32(i8 addrspace(3)* align 8 %lds.align16.0.bc, i8 addrspace(1)* align 8 %in, i32 38, i1 false) + call void @llvm.memcpy.p1i8.p3i8.i32(i8 addrspace(1)* align 8 %out, i8 addrspace(3)* align 8 %lds.align16.0.bc, i32 38, i1 false) %lds.align32.0.bc = bitcast [38 x i8] addrspace(3)* @lds.align32.0 to i8 addrspace(3)* - call void @llvm.memcpy.p3i8.p1i8.i32(i8 addrspace(3)* %lds.align32.0.bc, i8 addrspace(1)* %in, i32 38, i32 8, i1 false) - call void @llvm.memcpy.p1i8.p3i8.i32(i8 addrspace(1)* %out, i8 addrspace(3)* %lds.align32.0.bc, i32 38, i32 8, i1 false) + call void @llvm.memcpy.p3i8.p1i8.i32(i8 addrspace(3)* align 8 %lds.align32.0.bc, i8 addrspace(1)* align 8 %in, i32 38, i1 false) + call void @llvm.memcpy.p1i8.p3i8.i32(i8 addrspace(1)* align 8 %out, i8 addrspace(3)* align 8 %lds.align32.0.bc, i32 38, i1 false) ret void } diff --git a/llvm/test/CodeGen/AMDGPU/llvm.memcpy.ll b/llvm/test/CodeGen/AMDGPU/llvm.memcpy.ll index 4068c020e70..77eb4900ea5 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.memcpy.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.memcpy.ll @@ -1,9 +1,9 @@ ; RUN: llc -march=amdgcn -mcpu=tahiti -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s ; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s -declare void @llvm.memcpy.p3i8.p3i8.i32(i8 addrspace(3)* nocapture, i8 addrspace(3)* nocapture, i32, i32, i1) nounwind -declare void @llvm.memcpy.p1i8.p1i8.i64(i8 addrspace(1)* nocapture, i8 addrspace(1)* nocapture, i64, i32, i1) nounwind -declare void @llvm.memcpy.p1i8.p2i8.i64(i8 addrspace(1)* nocapture, i8 addrspace(2)* nocapture, i64, i32, i1) nounwind +declare void @llvm.memcpy.p3i8.p3i8.i32(i8 addrspace(3)* nocapture, i8 addrspace(3)* nocapture, i32, i1) nounwind +declare void @llvm.memcpy.p1i8.p1i8.i64(i8 addrspace(1)* nocapture, i8 addrspace(1)* nocapture, i64, i1) nounwind +declare void @llvm.memcpy.p1i8.p2i8.i64(i8 addrspace(1)* nocapture, i8 addrspace(2)* nocapture, i64, i1) nounwind ; FUNC-LABEL: {{^}}test_small_memcpy_i64_lds_to_lds_align1: @@ -83,7 +83,7 @@ declare void @llvm.memcpy.p1i8.p2i8.i64(i8 addrspace(1)* nocapture, i8 addrspace define amdgpu_kernel void @test_small_memcpy_i64_lds_to_lds_align1(i64 addrspace(3)* noalias %out, i64 addrspace(3)* noalias %in) nounwind { %bcin = bitcast i64 addrspace(3)* %in to i8 addrspace(3)* %bcout = bitcast i64 addrspace(3)* %out to i8 addrspace(3)* - call void @llvm.memcpy.p3i8.p3i8.i32(i8 addrspace(3)* %bcout, i8 addrspace(3)* %bcin, i32 32, i32 1, i1 false) nounwind + call void @llvm.memcpy.p3i8.p3i8.i32(i8 addrspace(3)* %bcout, i8 addrspace(3)* %bcin, i32 32, i1 false) nounwind ret void } @@ -128,7 +128,7 @@ define amdgpu_kernel void @test_small_memcpy_i64_lds_to_lds_align1(i64 addrspace define amdgpu_kernel void @test_small_memcpy_i64_lds_to_lds_align2(i64 addrspace(3)* noalias %out, i64 addrspace(3)* noalias %in) nounwind { %bcin = bitcast i64 addrspace(3)* %in to i8 addrspace(3)* %bcout = bitcast i64 addrspace(3)* %out to i8 addrspace(3)* - call void @llvm.memcpy.p3i8.p3i8.i32(i8 addrspace(3)* %bcout, i8 addrspace(3)* %bcin, i32 32, i32 2, i1 false) nounwind + call void @llvm.memcpy.p3i8.p3i8.i32(i8 addrspace(3)* align 2 %bcout, i8 addrspace(3)* align 2 %bcin, i32 32, i1 false) nounwind ret void } @@ -147,7 +147,7 @@ define amdgpu_kernel void @test_small_memcpy_i64_lds_to_lds_align2(i64 addrspace define amdgpu_kernel void @test_small_memcpy_i64_lds_to_lds_align4(i64 addrspace(3)* noalias %out, i64 addrspace(3)* noalias %in) nounwind { %bcin = bitcast i64 addrspace(3)* %in to i8 addrspace(3)* %bcout = bitcast i64 addrspace(3)* %out to i8 addrspace(3)* - call void @llvm.memcpy.p3i8.p3i8.i32(i8 addrspace(3)* %bcout, i8 addrspace(3)* %bcin, i32 32, i32 4, i1 false) nounwind + call void @llvm.memcpy.p3i8.p3i8.i32(i8 addrspace(3)* align 4 %bcout, i8 addrspace(3)* align 4 %bcin, i32 32, i1 false) nounwind ret void } @@ -164,7 +164,7 @@ define amdgpu_kernel void @test_small_memcpy_i64_lds_to_lds_align4(i64 addrspace define amdgpu_kernel void @test_small_memcpy_i64_lds_to_lds_align8(i64 addrspace(3)* noalias %out, i64 addrspace(3)* noalias %in) nounwind { %bcin = bitcast i64 addrspace(3)* %in to i8 addrspace(3)* %bcout = bitcast i64 addrspace(3)* %out to i8 addrspace(3)* - call void @llvm.memcpy.p3i8.p3i8.i32(i8 addrspace(3)* %bcout, i8 addrspace(3)* %bcin, i32 32, i32 8, i1 false) nounwind + call void @llvm.memcpy.p3i8.p3i8.i32(i8 addrspace(3)* align 8 %bcout, i8 addrspace(3)* align 8 %bcin, i32 32, i1 false) nounwind ret void } @@ -241,7 +241,7 @@ define amdgpu_kernel void @test_small_memcpy_i64_lds_to_lds_align8(i64 addrspace define amdgpu_kernel void @test_small_memcpy_i64_global_to_global_align1(i64 addrspace(1)* noalias %out, i64 addrspace(1)* noalias %in) nounwind { %bcin = bitcast i64 addrspace(1)* %in to i8 addrspace(1)* %bcout = bitcast i64 addrspace(1)* %out to i8 addrspace(1)* - call void @llvm.memcpy.p1i8.p1i8.i64(i8 addrspace(1)* %bcout, i8 addrspace(1)* %bcin, i64 32, i32 1, i1 false) nounwind + call void @llvm.memcpy.p1i8.p1i8.i64(i8 addrspace(1)* %bcout, i8 addrspace(1)* %bcin, i64 32, i1 false) nounwind ret void } @@ -284,7 +284,7 @@ define amdgpu_kernel void @test_small_memcpy_i64_global_to_global_align1(i64 add define amdgpu_kernel void @test_small_memcpy_i64_global_to_global_align2(i64 addrspace(1)* noalias %out, i64 addrspace(1)* noalias %in) nounwind { %bcin = bitcast i64 addrspace(1)* %in to i8 addrspace(1)* %bcout = bitcast i64 addrspace(1)* %out to i8 addrspace(1)* - call void @llvm.memcpy.p1i8.p1i8.i64(i8 addrspace(1)* %bcout, i8 addrspace(1)* %bcin, i64 32, i32 2, i1 false) nounwind + call void @llvm.memcpy.p1i8.p1i8.i64(i8 addrspace(1)* align 2 %bcout, i8 addrspace(1)* align 2 %bcin, i64 32, i1 false) nounwind ret void } @@ -297,7 +297,7 @@ define amdgpu_kernel void @test_small_memcpy_i64_global_to_global_align2(i64 add define amdgpu_kernel void @test_small_memcpy_i64_global_to_global_align4(i64 addrspace(1)* noalias %out, i64 addrspace(1)* noalias %in) nounwind { %bcin = bitcast i64 addrspace(1)* %in to i8 addrspace(1)* %bcout = bitcast i64 addrspace(1)* %out to i8 addrspace(1)* - call void @llvm.memcpy.p1i8.p1i8.i64(i8 addrspace(1)* %bcout, i8 addrspace(1)* %bcin, i64 32, i32 4, i1 false) nounwind + call void @llvm.memcpy.p1i8.p1i8.i64(i8 addrspace(1)* align 4 %bcout, i8 addrspace(1)* align 4 %bcin, i64 32, i1 false) nounwind ret void } @@ -310,7 +310,7 @@ define amdgpu_kernel void @test_small_memcpy_i64_global_to_global_align4(i64 add define amdgpu_kernel void @test_small_memcpy_i64_global_to_global_align8(i64 addrspace(1)* noalias %out, i64 addrspace(1)* noalias %in) nounwind { %bcin = bitcast i64 addrspace(1)* %in to i8 addrspace(1)* %bcout = bitcast i64 addrspace(1)* %out to i8 addrspace(1)* - call void @llvm.memcpy.p1i8.p1i8.i64(i8 addrspace(1)* %bcout, i8 addrspace(1)* %bcin, i64 32, i32 8, i1 false) nounwind + call void @llvm.memcpy.p1i8.p1i8.i64(i8 addrspace(1)* align 8 %bcout, i8 addrspace(1)* align 8 %bcin, i64 32, i1 false) nounwind ret void } @@ -323,7 +323,7 @@ define amdgpu_kernel void @test_small_memcpy_i64_global_to_global_align8(i64 add define amdgpu_kernel void @test_small_memcpy_i64_global_to_global_align16(i64 addrspace(1)* noalias %out, i64 addrspace(1)* noalias %in) nounwind { %bcin = bitcast i64 addrspace(1)* %in to i8 addrspace(1)* %bcout = bitcast i64 addrspace(1)* %out to i8 addrspace(1)* - call void @llvm.memcpy.p1i8.p1i8.i64(i8 addrspace(1)* %bcout, i8 addrspace(1)* %bcin, i64 32, i32 16, i1 false) nounwind + call void @llvm.memcpy.p1i8.p1i8.i64(i8 addrspace(1)* align 16 %bcout, i8 addrspace(1)* align 16 %bcin, i64 32, i1 false) nounwind ret void } @@ -342,7 +342,7 @@ define amdgpu_kernel void @test_small_memcpy_i64_global_to_global_align16(i64 ad ; SI-DAG: buffer_store_dwordx4 define amdgpu_kernel void @test_memcpy_const_string_align4(i8 addrspace(1)* noalias %out) nounwind { %str = bitcast [16 x i8] addrspace(2)* @hello.align4 to i8 addrspace(2)* - call void @llvm.memcpy.p1i8.p2i8.i64(i8 addrspace(1)* %out, i8 addrspace(2)* %str, i64 32, i32 4, i1 false) + call void @llvm.memcpy.p1i8.p2i8.i64(i8 addrspace(1)* align 4 %out, i8 addrspace(2)* align 4 %str, i64 32, i1 false) ret void } @@ -367,6 +367,6 @@ define amdgpu_kernel void @test_memcpy_const_string_align4(i8 addrspace(1)* noal ; SI: buffer_store_byte define amdgpu_kernel void @test_memcpy_const_string_align1(i8 addrspace(1)* noalias %out) nounwind { %str = bitcast [16 x i8] addrspace(2)* @hello.align1 to i8 addrspace(2)* - call void @llvm.memcpy.p1i8.p2i8.i64(i8 addrspace(1)* %out, i8 addrspace(2)* %str, i64 32, i32 1, i1 false) + call void @llvm.memcpy.p1i8.p2i8.i64(i8 addrspace(1)* %out, i8 addrspace(2)* %str, i64 32, i1 false) ret void } diff --git a/llvm/test/CodeGen/AMDGPU/lower-mem-intrinsics.ll b/llvm/test/CodeGen/AMDGPU/lower-mem-intrinsics.ll index 778467207a0..498a65dc0a6 100644 --- a/llvm/test/CodeGen/AMDGPU/lower-mem-intrinsics.ll +++ b/llvm/test/CodeGen/AMDGPU/lower-mem-intrinsics.ll @@ -1,16 +1,16 @@ ; RUN: opt -S -amdgpu-lower-intrinsics %s | FileCheck -check-prefix=OPT %s -declare void @llvm.memcpy.p1i8.p1i8.i64(i8 addrspace(1)* nocapture, i8 addrspace(1)* nocapture readonly, i64, i32, i1) #1 -declare void @llvm.memcpy.p1i8.p3i8.i32(i8 addrspace(1)* nocapture, i8 addrspace(3)* nocapture readonly, i32, i32, i1) #1 +declare void @llvm.memcpy.p1i8.p1i8.i64(i8 addrspace(1)* nocapture, i8 addrspace(1)* nocapture readonly, i64, i1) #1 +declare void @llvm.memcpy.p1i8.p3i8.i32(i8 addrspace(1)* nocapture, i8 addrspace(3)* nocapture readonly, i32, i1) #1 -declare void @llvm.memmove.p1i8.p1i8.i64(i8 addrspace(1)* nocapture, i8 addrspace(1)* nocapture readonly, i64, i32, i1) #1 -declare void @llvm.memset.p1i8.i64(i8 addrspace(1)* nocapture, i8, i64, i32, i1) #1 +declare void @llvm.memmove.p1i8.p1i8.i64(i8 addrspace(1)* nocapture, i8 addrspace(1)* nocapture readonly, i64, i1) #1 +declare void @llvm.memset.p1i8.i64(i8 addrspace(1)* nocapture, i8, i64, i1) #1 ; Test the upper bound for sizes to leave ; OPT-LABEL: @max_size_small_static_memcpy_caller0( -; OPT: call void @llvm.memcpy.p1i8.p1i8.i64(i8 addrspace(1)* %dst, i8 addrspace(1)* %src, i64 1024, i32 1, i1 false) +; OPT: call void @llvm.memcpy.p1i8.p1i8.i64(i8 addrspace(1)* %dst, i8 addrspace(1)* %src, i64 1024, i1 false) define amdgpu_kernel void @max_size_small_static_memcpy_caller0(i8 addrspace(1)* %dst, i8 addrspace(1)* %src) #0 { - call void @llvm.memcpy.p1i8.p1i8.i64(i8 addrspace(1)* %dst, i8 addrspace(1)* %src, i64 1024, i32 1, i1 false) + call void @llvm.memcpy.p1i8.p1i8.i64(i8 addrspace(1)* %dst, i8 addrspace(1)* %src, i64 1024, i1 false) ret void } @@ -26,14 +26,14 @@ define amdgpu_kernel void @max_size_small_static_memcpy_caller0(i8 addrspace(1)* ; OPT-NEXT: [[T5:%[0-9]+]] = icmp ult i64 [[T4]], 1025 ; OPT-NEXT: br i1 [[T5]], label %load-store-loop, label %memcpy-split define amdgpu_kernel void @min_size_large_static_memcpy_caller0(i8 addrspace(1)* %dst, i8 addrspace(1)* %src) #0 { - call void @llvm.memcpy.p1i8.p1i8.i64(i8 addrspace(1)* %dst, i8 addrspace(1)* %src, i64 1025, i32 1, i1 false) + call void @llvm.memcpy.p1i8.p1i8.i64(i8 addrspace(1)* %dst, i8 addrspace(1)* %src, i64 1025, i1 false) ret void } ; OPT-LABEL: @max_size_small_static_memmove_caller0( -; OPT: call void @llvm.memmove.p1i8.p1i8.i64(i8 addrspace(1)* %dst, i8 addrspace(1)* %src, i64 1024, i32 1, i1 false) +; OPT: call void @llvm.memmove.p1i8.p1i8.i64(i8 addrspace(1)* %dst, i8 addrspace(1)* %src, i64 1024, i1 false) define amdgpu_kernel void @max_size_small_static_memmove_caller0(i8 addrspace(1)* %dst, i8 addrspace(1)* %src) #0 { - call void @llvm.memmove.p1i8.p1i8.i64(i8 addrspace(1)* %dst, i8 addrspace(1)* %src, i64 1024, i32 1, i1 false) + call void @llvm.memmove.p1i8.p1i8.i64(i8 addrspace(1)* %dst, i8 addrspace(1)* %src, i64 1024, i1 false) ret void } @@ -44,14 +44,14 @@ define amdgpu_kernel void @max_size_small_static_memmove_caller0(i8 addrspace(1) ; OPT: getelementptr ; OPT-NEXT: store i8 define amdgpu_kernel void @min_size_large_static_memmove_caller0(i8 addrspace(1)* %dst, i8 addrspace(1)* %src) #0 { - call void @llvm.memmove.p1i8.p1i8.i64(i8 addrspace(1)* %dst, i8 addrspace(1)* %src, i64 1025, i32 1, i1 false) + call void @llvm.memmove.p1i8.p1i8.i64(i8 addrspace(1)* %dst, i8 addrspace(1)* %src, i64 1025, i1 false) ret void } ; OPT-LABEL: @max_size_small_static_memset_caller0( -; OPT: call void @llvm.memset.p1i8.i64(i8 addrspace(1)* %dst, i8 %val, i64 1024, i32 1, i1 false) +; OPT: call void @llvm.memset.p1i8.i64(i8 addrspace(1)* %dst, i8 %val, i64 1024, i1 false) define amdgpu_kernel void @max_size_small_static_memset_caller0(i8 addrspace(1)* %dst, i8 %val) #0 { - call void @llvm.memset.p1i8.i64(i8 addrspace(1)* %dst, i8 %val, i64 1024, i32 1, i1 false) + call void @llvm.memset.p1i8.i64(i8 addrspace(1)* %dst, i8 %val, i64 1024, i1 false) ret void } @@ -60,7 +60,7 @@ define amdgpu_kernel void @max_size_small_static_memset_caller0(i8 addrspace(1)* ; OPT: getelementptr ; OPT: store i8 define amdgpu_kernel void @min_size_large_static_memset_caller0(i8 addrspace(1)* %dst, i8 %val) #0 { - call void @llvm.memset.p1i8.i64(i8 addrspace(1)* %dst, i8 %val, i64 1025, i32 1, i1 false) + call void @llvm.memset.p1i8.i64(i8 addrspace(1)* %dst, i8 %val, i64 1025, i1 false) ret void } @@ -68,7 +68,7 @@ define amdgpu_kernel void @min_size_large_static_memset_caller0(i8 addrspace(1)* ; OPT-NOT: call ; OPT: phi define amdgpu_kernel void @variable_memcpy_caller0(i8 addrspace(1)* %dst, i8 addrspace(1)* %src, i64 %n) #0 { - call void @llvm.memcpy.p1i8.p1i8.i64(i8 addrspace(1)* %dst, i8 addrspace(1)* %src, i64 %n, i32 1, i1 false) + call void @llvm.memcpy.p1i8.p1i8.i64(i8 addrspace(1)* %dst, i8 addrspace(1)* %src, i64 %n, i1 false) ret void } @@ -76,7 +76,7 @@ define amdgpu_kernel void @variable_memcpy_caller0(i8 addrspace(1)* %dst, i8 add ; OPT-NOT: call ; OPT: phi define amdgpu_kernel void @variable_memcpy_caller1(i8 addrspace(1)* %dst, i8 addrspace(1)* %src, i64 %n) #0 { - call void @llvm.memcpy.p1i8.p1i8.i64(i8 addrspace(1)* %dst, i8 addrspace(1)* %src, i64 %n, i32 1, i1 false) + call void @llvm.memcpy.p1i8.p1i8.i64(i8 addrspace(1)* %dst, i8 addrspace(1)* %src, i64 %n, i1 false) ret void } @@ -87,8 +87,8 @@ define amdgpu_kernel void @variable_memcpy_caller1(i8 addrspace(1)* %dst, i8 add ; OPT: phi ; OPT-NOT: call define amdgpu_kernel void @memcpy_multi_use_one_function(i8 addrspace(1)* %dst0, i8 addrspace(1)* %dst1, i8 addrspace(1)* %src, i64 %n, i64 %m) #0 { - call void @llvm.memcpy.p1i8.p1i8.i64(i8 addrspace(1)* %dst0, i8 addrspace(1)* %src, i64 %n, i32 1, i1 false) - call void @llvm.memcpy.p1i8.p1i8.i64(i8 addrspace(1)* %dst1, i8 addrspace(1)* %src, i64 %m, i32 1, i1 false) + call void @llvm.memcpy.p1i8.p1i8.i64(i8 addrspace(1)* %dst0, i8 addrspace(1)* %src, i64 %n, i1 false) + call void @llvm.memcpy.p1i8.p1i8.i64(i8 addrspace(1)* %dst1, i8 addrspace(1)* %src, i64 %m, i1 false) ret void } @@ -99,7 +99,7 @@ define amdgpu_kernel void @memcpy_multi_use_one_function(i8 addrspace(1)* %dst0, ; OPT: getelementptr inbounds i8, i8 addrspace(1)* ; OPT: store i8 define amdgpu_kernel void @memcpy_alt_type(i8 addrspace(1)* %dst, i8 addrspace(3)* %src, i32 %n) #0 { - call void @llvm.memcpy.p1i8.p3i8.i32(i8 addrspace(1)* %dst, i8 addrspace(3)* %src, i32 %n, i32 1, i1 false) + call void @llvm.memcpy.p1i8.p3i8.i32(i8 addrspace(1)* %dst, i8 addrspace(3)* %src, i32 %n, i1 false) ret void } @@ -110,10 +110,10 @@ define amdgpu_kernel void @memcpy_alt_type(i8 addrspace(1)* %dst, i8 addrspace(3 ; OPT: getelementptr inbounds i8, i8 addrspace(1)* ; OPT: store i8 -; OPT: call void @llvm.memcpy.p1i8.p1i8.i64(i8 addrspace(1)* %dst1, i8 addrspace(1)* %src, i64 102, i32 1, i1 false) +; OPT: call void @llvm.memcpy.p1i8.p1i8.i64(i8 addrspace(1)* %dst1, i8 addrspace(1)* %src, i64 102, i1 false) define amdgpu_kernel void @memcpy_multi_use_one_function_keep_small(i8 addrspace(1)* %dst0, i8 addrspace(1)* %dst1, i8 addrspace(1)* %src, i64 %n) #0 { - call void @llvm.memcpy.p1i8.p1i8.i64(i8 addrspace(1)* %dst0, i8 addrspace(1)* %src, i64 %n, i32 1, i1 false) - call void @llvm.memcpy.p1i8.p1i8.i64(i8 addrspace(1)* %dst1, i8 addrspace(1)* %src, i64 102, i32 1, i1 false) + call void @llvm.memcpy.p1i8.p1i8.i64(i8 addrspace(1)* %dst0, i8 addrspace(1)* %src, i64 %n, i1 false) + call void @llvm.memcpy.p1i8.p1i8.i64(i8 addrspace(1)* %dst1, i8 addrspace(1)* %src, i64 102, i1 false) ret void } diff --git a/llvm/test/CodeGen/AMDGPU/promote-alloca-mem-intrinsics.ll b/llvm/test/CodeGen/AMDGPU/promote-alloca-mem-intrinsics.ll index 7343dd6bbda..fcf64ce8016 100644 --- a/llvm/test/CodeGen/AMDGPU/promote-alloca-mem-intrinsics.ll +++ b/llvm/test/CodeGen/AMDGPU/promote-alloca-mem-intrinsics.ll @@ -1,52 +1,52 @@ ; RUN: opt -S -mtriple=amdgcn-unknown-amdhsa -mcpu=kaveri -amdgpu-promote-alloca < %s | FileCheck %s -declare void @llvm.memcpy.p0i8.p1i8.i32(i8* nocapture, i8 addrspace(1)* nocapture, i32, i32, i1) #0 -declare void @llvm.memcpy.p1i8.p0i8.i32(i8 addrspace(1)* nocapture, i8* nocapture, i32, i32, i1) #0 +declare void @llvm.memcpy.p0i8.p1i8.i32(i8* nocapture, i8 addrspace(1)* nocapture, i32, i1) #0 +declare void @llvm.memcpy.p1i8.p0i8.i32(i8 addrspace(1)* nocapture, i8* nocapture, i32, i1) #0 -declare void @llvm.memmove.p0i8.p1i8.i32(i8* nocapture, i8 addrspace(1)* nocapture, i32, i32, i1) #0 -declare void @llvm.memmove.p1i8.p0i8.i32(i8 addrspace(1)* nocapture, i8* nocapture, i32, i32, i1) #0 +declare void @llvm.memmove.p0i8.p1i8.i32(i8* nocapture, i8 addrspace(1)* nocapture, i32, i1) #0 +declare void @llvm.memmove.p1i8.p0i8.i32(i8 addrspace(1)* nocapture, i8* nocapture, i32, i1) #0 -declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i32, i1) #0 +declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i1) #0 declare i32 @llvm.objectsize.i32.p0i8(i8*, i1, i1) #1 ; CHECK-LABEL: @promote_with_memcpy( ; CHECK: getelementptr inbounds [64 x [17 x i32]], [64 x [17 x i32]] addrspace(3)* @promote_with_memcpy.alloca, i32 0, i32 %{{[0-9]+}} -; CHECK: call void @llvm.memcpy.p3i8.p1i8.i32(i8 addrspace(3)* %alloca.bc, i8 addrspace(1)* %in.bc, i32 68, i32 4, i1 false) -; CHECK: call void @llvm.memcpy.p1i8.p3i8.i32(i8 addrspace(1)* %out.bc, i8 addrspace(3)* %alloca.bc, i32 68, i32 4, i1 false) +; CHECK: call void @llvm.memcpy.p3i8.p1i8.i32(i8 addrspace(3)* align 4 %alloca.bc, i8 addrspace(1)* align 4 %in.bc, i32 68, i1 false) +; CHECK: call void @llvm.memcpy.p1i8.p3i8.i32(i8 addrspace(1)* align 4 %out.bc, i8 addrspace(3)* align 4 %alloca.bc, i32 68, i1 false) define amdgpu_kernel void @promote_with_memcpy(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 { %alloca = alloca [17 x i32], align 4 %alloca.bc = bitcast [17 x i32]* %alloca to i8* %in.bc = bitcast i32 addrspace(1)* %in to i8 addrspace(1)* %out.bc = bitcast i32 addrspace(1)* %out to i8 addrspace(1)* - call void @llvm.memcpy.p0i8.p1i8.i32(i8* %alloca.bc, i8 addrspace(1)* %in.bc, i32 68, i32 4, i1 false) - call void @llvm.memcpy.p1i8.p0i8.i32(i8 addrspace(1)* %out.bc, i8* %alloca.bc, i32 68, i32 4, i1 false) + call void @llvm.memcpy.p0i8.p1i8.i32(i8* align 4 %alloca.bc, i8 addrspace(1)* align 4 %in.bc, i32 68, i1 false) + call void @llvm.memcpy.p1i8.p0i8.i32(i8 addrspace(1)* align 4 %out.bc, i8* align 4 %alloca.bc, i32 68, i1 false) ret void } ; CHECK-LABEL: @promote_with_memmove( ; CHECK: getelementptr inbounds [64 x [17 x i32]], [64 x [17 x i32]] addrspace(3)* @promote_with_memmove.alloca, i32 0, i32 %{{[0-9]+}} -; CHECK: call void @llvm.memmove.p3i8.p1i8.i32(i8 addrspace(3)* %alloca.bc, i8 addrspace(1)* %in.bc, i32 68, i32 4, i1 false) -; CHECK: call void @llvm.memmove.p1i8.p3i8.i32(i8 addrspace(1)* %out.bc, i8 addrspace(3)* %alloca.bc, i32 68, i32 4, i1 false) +; CHECK: call void @llvm.memmove.p3i8.p1i8.i32(i8 addrspace(3)* align 4 %alloca.bc, i8 addrspace(1)* align 4 %in.bc, i32 68, i1 false) +; CHECK: call void @llvm.memmove.p1i8.p3i8.i32(i8 addrspace(1)* align 4 %out.bc, i8 addrspace(3)* align 4 %alloca.bc, i32 68, i1 false) define amdgpu_kernel void @promote_with_memmove(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 { %alloca = alloca [17 x i32], align 4 %alloca.bc = bitcast [17 x i32]* %alloca to i8* %in.bc = bitcast i32 addrspace(1)* %in to i8 addrspace(1)* %out.bc = bitcast i32 addrspace(1)* %out to i8 addrspace(1)* - call void @llvm.memmove.p0i8.p1i8.i32(i8* %alloca.bc, i8 addrspace(1)* %in.bc, i32 68, i32 4, i1 false) - call void @llvm.memmove.p1i8.p0i8.i32(i8 addrspace(1)* %out.bc, i8* %alloca.bc, i32 68, i32 4, i1 false) + call void @llvm.memmove.p0i8.p1i8.i32(i8* align 4 %alloca.bc, i8 addrspace(1)* align 4 %in.bc, i32 68, i1 false) + call void @llvm.memmove.p1i8.p0i8.i32(i8 addrspace(1)* align 4 %out.bc, i8* align 4 %alloca.bc, i32 68, i1 false) ret void } ; CHECK-LABEL: @promote_with_memset( ; CHECK: getelementptr inbounds [64 x [17 x i32]], [64 x [17 x i32]] addrspace(3)* @promote_with_memset.alloca, i32 0, i32 %{{[0-9]+}} -; CHECK: call void @llvm.memset.p3i8.i32(i8 addrspace(3)* %alloca.bc, i8 7, i32 68, i32 4, i1 false) +; CHECK: call void @llvm.memset.p3i8.i32(i8 addrspace(3)* align 4 %alloca.bc, i8 7, i32 68, i1 false) define amdgpu_kernel void @promote_with_memset(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 { %alloca = alloca [17 x i32], align 4 %alloca.bc = bitcast [17 x i32]* %alloca to i8* %in.bc = bitcast i32 addrspace(1)* %in to i8 addrspace(1)* %out.bc = bitcast i32 addrspace(1)* %out to i8 addrspace(1)* - call void @llvm.memset.p0i8.i32(i8* %alloca.bc, i8 7, i32 68, i32 4, i1 false) + call void @llvm.memset.p0i8.i32(i8* align 4 %alloca.bc, i8 7, i32 68, i1 false) ret void } diff --git a/llvm/test/CodeGen/AMDGPU/stack-size-overflow.ll b/llvm/test/CodeGen/AMDGPU/stack-size-overflow.ll index 45a399b058c..322e5ca6219 100644 --- a/llvm/test/CodeGen/AMDGPU/stack-size-overflow.ll +++ b/llvm/test/CodeGen/AMDGPU/stack-size-overflow.ll @@ -1,7 +1,7 @@ ; RUN: not llc -march=amdgcn < %s 2>&1 | FileCheck -check-prefix=ERROR %s ; RUN: not llc -march=amdgcn < %s | FileCheck -check-prefix=GCN %s -declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i32, i1) #1 +declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i1) #1 ; ERROR: error: stack size limit exceeded (4294967296) in stack_size_limit ; GCN: ; ScratchSize: 4294967296 @@ -9,6 +9,6 @@ define amdgpu_kernel void @stack_size_limit() #0 { entry: %alloca = alloca [1073741823 x i32], align 4 %bc = bitcast [1073741823 x i32]* %alloca to i8* - call void @llvm.memset.p0i8.i32(i8* %bc, i8 9, i32 1073741823, i32 1, i1 true) + call void @llvm.memset.p0i8.i32(i8* %bc, i8 9, i32 1073741823, i1 true) ret void } diff --git a/llvm/test/CodeGen/ARM/2009-03-07-SpillerBug.ll b/llvm/test/CodeGen/ARM/2009-03-07-SpillerBug.ll index 567400318ee..62a9aa23f29 100644 --- a/llvm/test/CodeGen/ARM/2009-03-07-SpillerBug.ll +++ b/llvm/test/CodeGen/ARM/2009-03-07-SpillerBug.ll @@ -59,7 +59,7 @@ bb3: ; preds = %entry %34 = fadd double %31, 0.000000e+00 %35 = fadd double %32, 0.000000e+00 %36 = bitcast %struct.ggPoint3* %x to i8* - call void @llvm.memcpy.p0i8.p0i8.i32(i8* null, i8* %36, i32 24, i32 4, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 null, i8* align 4 %36, i32 24, i1 false) store double %33, double* null, align 8 br i1 false, label %_Z20ggRaySphereIntersectRK6ggRay3RK8ggSphereddRd.exit, label %bb5.i.i.i @@ -76,4 +76,4 @@ bb7: ; preds = %entry ret i32 0 } -declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i32, i1) nounwind +declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i1) nounwind diff --git a/llvm/test/CodeGen/ARM/2011-03-10-DAGCombineCrash.ll b/llvm/test/CodeGen/ARM/2011-03-10-DAGCombineCrash.ll index c447a1f25b6..30a388bb587 100644 --- a/llvm/test/CodeGen/ARM/2011-03-10-DAGCombineCrash.ll +++ b/llvm/test/CodeGen/ARM/2011-03-10-DAGCombineCrash.ll @@ -16,7 +16,7 @@ bb: ; preds = %entry bb1: ; preds = %entry %0 = call %struct.ui* @vn_pp_to_ui(i32* undef) nounwind - call void @llvm.memset.p0i8.i32(i8* undef, i8 0, i32 40, i32 4, i1 false) + call void @llvm.memset.p0i8.i32(i8* align 4 undef, i8 0, i32 40, i1 false) %1 = getelementptr inbounds %struct.ui, %struct.ui* %0, i32 0, i32 0 store %struct.mo* undef, %struct.mo** %1, align 4 %2 = getelementptr inbounds %struct.ui, %struct.ui* %0, i32 0, i32 5 @@ -40,7 +40,7 @@ bb6: ; preds = %bb3 declare %struct.ui* @vn_pp_to_ui(i32*) -declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i32, i1) nounwind +declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i1) nounwind declare i32 @mo_create_nnm(%struct.mo*, i64, i32**) diff --git a/llvm/test/CodeGen/ARM/2011-10-26-memset-inline.ll b/llvm/test/CodeGen/ARM/2011-10-26-memset-inline.ll index c3b7c4ea86c..8d6ce34c26d 100644 --- a/llvm/test/CodeGen/ARM/2011-10-26-memset-inline.ll +++ b/llvm/test/CodeGen/ARM/2011-10-26-memset-inline.ll @@ -14,8 +14,8 @@ target triple = "thumbv7-apple-ios5.0.0" ; CHECK-UNALIGNED: str define void @foo(i8* nocapture %c) nounwind optsize { entry: - call void @llvm.memset.p0i8.i64(i8* %c, i8 -1, i64 5, i32 1, i1 false) + call void @llvm.memset.p0i8.i64(i8* %c, i8 -1, i64 5, i1 false) ret void } -declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) nounwind +declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) nounwind diff --git a/llvm/test/CodeGen/ARM/2011-10-26-memset-with-neon.ll b/llvm/test/CodeGen/ARM/2011-10-26-memset-with-neon.ll index c8e08c22ab1..7024a653b6c 100644 --- a/llvm/test/CodeGen/ARM/2011-10-26-memset-with-neon.ll +++ b/llvm/test/CodeGen/ARM/2011-10-26-memset-with-neon.ll @@ -5,8 +5,8 @@ ; CHECK: vst1.64 define void @f_0_40(i8* nocapture %c) nounwind optsize { entry: - call void @llvm.memset.p0i8.i64(i8* %c, i8 0, i64 40, i32 16, i1 false) + call void @llvm.memset.p0i8.i64(i8* align 16 %c, i8 0, i64 40, i1 false) ret void } -declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) nounwind +declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) nounwind diff --git a/llvm/test/CodeGen/ARM/2012-04-24-SplitEHCriticalEdge.ll b/llvm/test/CodeGen/ARM/2012-04-24-SplitEHCriticalEdge.ll index ce0dcc70952..ef33b2f5018 100644 --- a/llvm/test/CodeGen/ARM/2012-04-24-SplitEHCriticalEdge.ll +++ b/llvm/test/CodeGen/ARM/2012-04-24-SplitEHCriticalEdge.ll @@ -19,7 +19,7 @@ declare i32 @llvm.eh.typeid.for(i8*) nounwind readnone declare i8* @__cxa_begin_catch(i8*) -declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i32, i1) nounwind +declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i1) nounwind declare void @__cxa_end_catch() diff --git a/llvm/test/CodeGen/ARM/Windows/memset.ll b/llvm/test/CodeGen/ARM/Windows/memset.ll index 500e25e259c..c9b22f47a15 100644 --- a/llvm/test/CodeGen/ARM/Windows/memset.ll +++ b/llvm/test/CodeGen/ARM/Windows/memset.ll @@ -2,11 +2,11 @@ @source = common global [512 x i8] zeroinitializer, align 4 -declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i32, i1) nounwind +declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i1) nounwind define void @function() { entry: - call void @llvm.memset.p0i8.i32(i8* bitcast ([512 x i8]* @source to i8*), i8 0, i32 512, i32 0, i1 false) + call void @llvm.memset.p0i8.i32(i8* bitcast ([512 x i8]* @source to i8*), i8 0, i32 512, i1 false) unreachable } diff --git a/llvm/test/CodeGen/ARM/Windows/no-aeabi.ll b/llvm/test/CodeGen/ARM/Windows/no-aeabi.ll index a4103b0a676..a5f7fc8daf6 100644 --- a/llvm/test/CodeGen/ARM/Windows/no-aeabi.ll +++ b/llvm/test/CodeGen/ARM/Windows/no-aeabi.ll @@ -1,14 +1,14 @@ ; RUN: llc -mtriple=thumbv7-windows-itanium -mcpu=cortex-a9 -verify-machineinstrs -o - %s | FileCheck %s -declare void @llvm.memmove.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i32, i1) nounwind -declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i32, i1) nounwind +declare void @llvm.memmove.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i1) nounwind +declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i1) nounwind @source = common global [512 x i8] zeroinitializer, align 4 @target = common global [512 x i8] zeroinitializer, align 4 define void @move() nounwind { entry: - call void @llvm.memmove.p0i8.p0i8.i32(i8* bitcast ([512 x i8]* @target to i8*), i8* bitcast ([512 x i8]* @source to i8*), i32 512, i32 0, i1 false) + call void @llvm.memmove.p0i8.p0i8.i32(i8* bitcast ([512 x i8]* @target to i8*), i8* bitcast ([512 x i8]* @source to i8*), i32 512, i1 false) unreachable } @@ -16,7 +16,7 @@ entry: define void @copy() nounwind { entry: - call void @llvm.memcpy.p0i8.p0i8.i32(i8* bitcast ([512 x i8]* @target to i8*), i8* bitcast ([512 x i8]* @source to i8*), i32 512, i32 0, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i32(i8* bitcast ([512 x i8]* @target to i8*), i8* bitcast ([512 x i8]* @source to i8*), i32 512, i1 false) unreachable } diff --git a/llvm/test/CodeGen/ARM/arm-eabi.ll b/llvm/test/CodeGen/ARM/arm-eabi.ll index 898055dd109..c2f364ab92b 100644 --- a/llvm/test/CodeGen/ARM/arm-eabi.ll +++ b/llvm/test/CodeGen/ARM/arm-eabi.ll @@ -39,7 +39,7 @@ define void @foo(i32* %t) { %4 = bitcast %struct.my_s* %3 to i8* ; CHECK-EABI: bl __aeabi_memcpy ; CHECK-GNUEABI: bl memcpy - call void @llvm.memcpy.p0i8.p0i8.i32(i8* %4, i8* inttoptr (i32 1 to i8*), i32 72, i32 4, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %4, i8* align 4 inttoptr (i32 1 to i8*), i32 72, i1 false) ret void } @@ -50,22 +50,22 @@ entry: ; memmove ; CHECK-EABI: bl __aeabi_memmove ; CHECK-GNUEABI: bl memmove - call void @llvm.memmove.p0i8.p0i8.i32(i8* %dest, i8* %src, i32 500, i32 0, i1 false) + call void @llvm.memmove.p0i8.p0i8.i32(i8* %dest, i8* %src, i32 500, i1 false) ; memcpy ; CHECK-EABI: bl __aeabi_memcpy ; CHECK-GNUEABI: bl memcpy - call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* %src, i32 500, i32 0, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* %src, i32 500, i1 false) ; memset ; CHECK-EABI: mov r2, #1 ; CHECK-EABI: bl __aeabi_memset ; CHECK-GNUEABI: mov r1, #1 ; CHECK-GNUEABI: bl memset - call void @llvm.memset.p0i8.i32(i8* %dest, i8 1, i32 500, i32 0, i1 false) + call void @llvm.memset.p0i8.i32(i8* %dest, i8 1, i32 500, i1 false) ret void } -declare void @llvm.memmove.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i32, i1) nounwind -declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i32, i1) nounwind -declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i32, i1) nounwind +declare void @llvm.memmove.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i1) nounwind +declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i1) nounwind +declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i1) nounwind diff --git a/llvm/test/CodeGen/ARM/constantpool-promote-ldrh.ll b/llvm/test/CodeGen/ARM/constantpool-promote-ldrh.ll index 59970495874..0767d729a0a 100644 --- a/llvm/test/CodeGen/ARM/constantpool-promote-ldrh.ll +++ b/llvm/test/CodeGen/ARM/constantpool-promote-ldrh.ll @@ -12,10 +12,10 @@ target triple = "thumbv6m-arm-linux-gnueabi" ; CHECK: ldrh r{{[0-9]+}}, {{\[}}[[base]]] define hidden i32 @fn1() #0 { entry: - call void @llvm.memcpy.p0i8.p0i8.i32(i8* undef, i8* bitcast ([4 x i16]* @fn1.a to i8*), i32 8, i32 2, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 2 undef, i8* align 2 bitcast ([4 x i16]* @fn1.a to i8*), i32 8, i1 false) ret i32 undef } ; Function Attrs: argmemonly nounwind -declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture writeonly, i8* nocapture readonly, i32, i32, i1) +declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture writeonly, i8* nocapture readonly, i32, i1) attributes #0 = { "target-features"="+strict-align" } diff --git a/llvm/test/CodeGen/ARM/constantpool-promote.ll b/llvm/test/CodeGen/ARM/constantpool-promote.ll index d5361f33a98..ccd86257dd3 100644 --- a/llvm/test/CodeGen/ARM/constantpool-promote.ll +++ b/llvm/test/CodeGen/ARM/constantpool-promote.ll @@ -120,7 +120,7 @@ define void @fn1() "target-features"="+strict-align" { entry: %a = alloca [4 x i16], align 2 %0 = bitcast [4 x i16]* %a to i8* - call void @llvm.memcpy.p0i8.p0i8.i32(i8* %0, i8* bitcast ([4 x i16]* @fn1.a to i8*), i32 8, i32 2, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 2 %0, i8* align 2 bitcast ([4 x i16]* @fn1.a to i8*), i32 8, i1 false) ret void } @@ -128,7 +128,7 @@ define void @fn2() "target-features"="+strict-align" { entry: %a = alloca [8 x i8], align 2 %0 = bitcast [8 x i8]* %a to i8* - call void @llvm.memcpy.p0i8.p0i8.i32(i8* %0, i8* bitcast ([8 x i8]* @fn2.a to i8*), i32 16, i32 1, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i32(i8* %0, i8* bitcast ([8 x i8]* @fn2.a to i8*), i32 16, i1 false) ret void } @@ -156,7 +156,7 @@ define void @pr32130() #0 { ; CHECK-V7: [[x]]: ; CHECK-V7: .asciz "s\000\000" define void @test10(i8* %a) local_unnamed_addr #0 { - call void @llvm.memmove.p0i8.p0i8.i32(i8* %a, i8* getelementptr inbounds ([2 x i8], [2 x i8]* @.str, i32 0, i32 0), i32 1, i32 1, i1 false) + call void @llvm.memmove.p0i8.p0i8.i32(i8* align 1 %a, i8* align 1 getelementptr inbounds ([2 x i8], [2 x i8]* @.str, i32 0, i32 0), i32 1, i1 false) ret void } @@ -174,16 +174,16 @@ define void @test10(i8* %a) local_unnamed_addr #0 { ; CHECK-V7ARM: .short 3 ; CHECK-V7ARM: .short 4 define void @test11(i16* %a) local_unnamed_addr #0 { - call void @llvm.memmove.p0i16.p0i16.i32(i16* %a, i16* getelementptr inbounds ([2 x i16], [2 x i16]* @.arr1, i32 0, i32 0), i32 2, i32 2, i1 false) + call void @llvm.memmove.p0i16.p0i16.i32(i16* align 2 %a, i16* align 2 getelementptr inbounds ([2 x i16], [2 x i16]* @.arr1, i32 0, i32 0), i32 2, i1 false) ret void } declare void @b(i8*) #1 declare void @c(i16*) #1 -declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture writeonly, i8* nocapture readonly, i32, i32, i1) -declare void @llvm.memmove.p0i8.p0i8.i32(i8*, i8*, i32, i32, i1) local_unnamed_addr -declare void @llvm.memmove.p0i16.p0i16.i32(i16*, i16*, i32, i32, i1) local_unnamed_addr +declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture writeonly, i8* nocapture readonly, i32, i1) +declare void @llvm.memmove.p0i8.p0i8.i32(i8*, i8*, i32, i1) local_unnamed_addr +declare void @llvm.memmove.p0i16.p0i16.i32(i16*, i16*, i32, i1) local_unnamed_addr attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" } attributes #1 = { "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" } diff --git a/llvm/test/CodeGen/ARM/crash-O0.ll b/llvm/test/CodeGen/ARM/crash-O0.ll index f92af999be5..bfbab8a9933 100644 --- a/llvm/test/CodeGen/ARM/crash-O0.ll +++ b/llvm/test/CodeGen/ARM/crash-O0.ll @@ -12,7 +12,7 @@ entry: } @.str523 = private constant [256 x i8] c"<Unknown>\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00", align 4 ; <[256 x i8]*> [#uses=1] -declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i32, i1) nounwind +declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i1) nounwind ; This function uses the scavenger for an ADDri instruction. ; ARMBaseRegisterInfo::estimateRSStackSizeLimit must return a 255 limit. @@ -21,8 +21,8 @@ entry: %letter = alloca i8 ; <i8*> [#uses=0] %prodvers = alloca [256 x i8] ; <[256 x i8]*> [#uses=1] %buildver = alloca [256 x i8] ; <[256 x i8]*> [#uses=0] - call void @llvm.memcpy.p0i8.p0i8.i32(i8* undef, i8* getelementptr inbounds ([256 x i8], [256 x i8]* @.str523, i32 0, i32 0), i32 256, i32 1, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 undef, i8* align 1 getelementptr inbounds ([256 x i8], [256 x i8]* @.str523, i32 0, i32 0), i32 256, i1 false) %prodvers2 = bitcast [256 x i8]* %prodvers to i8* ; <i8*> [#uses=1] - call void @llvm.memcpy.p0i8.p0i8.i32(i8* %prodvers2, i8* getelementptr inbounds ([256 x i8], [256 x i8]* @.str523, i32 0, i32 0), i32 256, i32 1, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 %prodvers2, i8* align 1 getelementptr inbounds ([256 x i8], [256 x i8]* @.str523, i32 0, i32 0), i32 256, i1 false) unreachable } diff --git a/llvm/test/CodeGen/ARM/debug-info-blocks.ll b/llvm/test/CodeGen/ARM/debug-info-blocks.ll index ba265cc3547..cc1a45f23da 100644 --- a/llvm/test/CodeGen/ARM/debug-info-blocks.ll +++ b/llvm/test/CodeGen/ARM/debug-info-blocks.ll @@ -35,7 +35,7 @@ declare i8* @objc_msgSend(i8*, i8*, ...) declare void @llvm.dbg.value(metadata, metadata, metadata) nounwind readnone -declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i32, i1) nounwind +declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i1) nounwind define hidden void @foobar_func_block_invoke_0(i8* %.block_descriptor, %0* %loadedMydata, [4 x i32] %bounds.coerce0, [4 x i32] %data.coerce0) ssp !dbg !23 { %1 = alloca %0*, align 4 @@ -77,7 +77,7 @@ define hidden void @foobar_func_block_invoke_0(i8* %.block_descriptor, %0* %load %24 = bitcast i8* %23 to %struct.CR*, !dbg !143 %25 = bitcast %struct.CR* %24 to i8*, !dbg !143 %26 = bitcast %struct.CR* %data to i8*, !dbg !143 - call void @llvm.memcpy.p0i8.p0i8.i32(i8* %25, i8* %26, i32 16, i32 4, i1 false), !dbg !143 + call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %25, i8* align 4 %26, i32 16, i1 false), !dbg !143 %27 = getelementptr inbounds %2, %2* %6, i32 0, i32 6, !dbg !144 %28 = load %3*, %3** %27, align 4, !dbg !144 %29 = load i32, i32* @"OBJC_IVAR_$_MyWork._bounds", !dbg !144 @@ -86,7 +86,7 @@ define hidden void @foobar_func_block_invoke_0(i8* %.block_descriptor, %0* %load %32 = bitcast i8* %31 to %struct.CR*, !dbg !144 %33 = bitcast %struct.CR* %32 to i8*, !dbg !144 %34 = bitcast %struct.CR* %bounds to i8*, !dbg !144 - call void @llvm.memcpy.p0i8.p0i8.i32(i8* %33, i8* %34, i32 16, i32 4, i1 false), !dbg !144 + call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %33, i8* align 4 %34, i32 16, i1 false), !dbg !144 %35 = getelementptr inbounds %2, %2* %6, i32 0, i32 6, !dbg !145 %36 = load %3*, %3** %35, align 4, !dbg !145 %37 = getelementptr inbounds %2, %2* %6, i32 0, i32 5, !dbg !145 diff --git a/llvm/test/CodeGen/ARM/dyn-stackalloc.ll b/llvm/test/CodeGen/ARM/dyn-stackalloc.ll index 5b963fd64de..b653acbd6a7 100644 --- a/llvm/test/CodeGen/ARM/dyn-stackalloc.ll +++ b/llvm/test/CodeGen/ARM/dyn-stackalloc.ll @@ -51,7 +51,7 @@ define void @t2(%struct.comment* %vc, i8* %tag, i8* %contents) { %tmp9 = call i8* @strcpy(i8* %tmp6, i8* %tag) %tmp6.len = call i32 @strlen(i8* %tmp6) %tmp6.indexed = getelementptr i8, i8* %tmp6, i32 %tmp6.len - call void @llvm.memcpy.p0i8.p0i8.i32(i8* %tmp6.indexed, i8* getelementptr inbounds ([2 x i8], [2 x i8]* @str215, i32 0, i32 0), i32 2, i32 1, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 %tmp6.indexed, i8* align 1 getelementptr inbounds ([2 x i8], [2 x i8]* @str215, i32 0, i32 0), i32 2, i1 false) %tmp15 = call i8* @strcat(i8* %tmp6, i8* %contents) call fastcc void @comment_add(%struct.comment* %vc, i8* %tmp6) ret void @@ -65,4 +65,4 @@ declare fastcc void @comment_add(%struct.comment*, i8*) declare i8* @strcpy(i8*, i8*) -declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i32, i1) nounwind +declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i1) nounwind diff --git a/llvm/test/CodeGen/ARM/fast-isel-intrinsic.ll b/llvm/test/CodeGen/ARM/fast-isel-intrinsic.ll index 277461aa566..8d9c27b6f22 100644 --- a/llvm/test/CodeGen/ARM/fast-isel-intrinsic.ll +++ b/llvm/test/CodeGen/ARM/fast-isel-intrinsic.ll @@ -44,11 +44,11 @@ define void @t1() nounwind ssp { ; THUMB-LONG: movt r3, :upper16:L_memset$non_lazy_ptr ; THUMB-LONG: ldr r3, [r3] ; THUMB-LONG: blx r3 - call void @llvm.memset.p0i8.i32(i8* getelementptr inbounds ([60 x i8], [60 x i8]* @message1, i32 0, i32 5), i8 64, i32 10, i32 4, i1 false) + call void @llvm.memset.p0i8.i32(i8* align 4 getelementptr inbounds ([60 x i8], [60 x i8]* @message1, i32 0, i32 5), i8 64, i32 10, i1 false) ret void } -declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i32, i1) nounwind +declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i1) nounwind define void @t2() nounwind ssp { ; ARM-LABEL: t2: @@ -93,11 +93,11 @@ define void @t2() nounwind ssp { ; THUMB-LONG: movt r3, :upper16:L_memcpy$non_lazy_ptr ; THUMB-LONG: ldr r3, [r3] ; THUMB-LONG: blx r3 - call void @llvm.memcpy.p0i8.p0i8.i32(i8* getelementptr inbounds ([60 x i8], [60 x i8]* @temp, i32 0, i32 4), i8* getelementptr inbounds ([60 x i8], [60 x i8]* @temp, i32 0, i32 16), i32 17, i32 4, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 getelementptr inbounds ([60 x i8], [60 x i8]* @temp, i32 0, i32 4), i8* align 4 getelementptr inbounds ([60 x i8], [60 x i8]* @temp, i32 0, i32 16), i32 17, i1 false) ret void } -declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i32, i1) nounwind +declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i1) nounwind define void @t3() nounwind ssp { ; ARM-LABEL: t3: @@ -141,7 +141,7 @@ define void @t3() nounwind ssp { ; THUMB-LONG: movt r3, :upper16:L_memmove$non_lazy_ptr ; THUMB-LONG: ldr r3, [r3] ; THUMB-LONG: blx r3 - call void @llvm.memmove.p0i8.p0i8.i32(i8* getelementptr inbounds ([60 x i8], [60 x i8]* @temp, i32 0, i32 4), i8* getelementptr inbounds ([60 x i8], [60 x i8]* @temp, i32 0, i32 16), i32 10, i32 1, i1 false) + call void @llvm.memmove.p0i8.p0i8.i32(i8* align 1 getelementptr inbounds ([60 x i8], [60 x i8]* @temp, i32 0, i32 4), i8* align 1 getelementptr inbounds ([60 x i8], [60 x i8]* @temp, i32 0, i32 16), i32 10, i1 false) ret void } @@ -173,11 +173,11 @@ define void @t4() nounwind ssp { ; THUMB: ldrh r1, [r0, #24] ; THUMB: strh r1, [r0, #12] ; THUMB: bx lr - call void @llvm.memcpy.p0i8.p0i8.i32(i8* getelementptr inbounds ([60 x i8], [60 x i8]* @temp, i32 0, i32 4), i8* getelementptr inbounds ([60 x i8], [60 x i8]* @temp, i32 0, i32 16), i32 10, i32 4, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 getelementptr inbounds ([60 x i8], [60 x i8]* @temp, i32 0, i32 4), i8* align 4 getelementptr inbounds ([60 x i8], [60 x i8]* @temp, i32 0, i32 16), i32 10, i1 false) ret void } -declare void @llvm.memmove.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i32, i1) nounwind +declare void @llvm.memmove.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i1) nounwind define void @t5() nounwind ssp { ; ARM-LABEL: t5: @@ -215,7 +215,7 @@ define void @t5() nounwind ssp { ; THUMB: ldrh r1, [r0, #24] ; THUMB: strh r1, [r0, #12] ; THUMB: bx lr - call void @llvm.memcpy.p0i8.p0i8.i32(i8* getelementptr inbounds ([60 x i8], [60 x i8]* @temp, i32 0, i32 4), i8* getelementptr inbounds ([60 x i8], [60 x i8]* @temp, i32 0, i32 16), i32 10, i32 2, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 2 getelementptr inbounds ([60 x i8], [60 x i8]* @temp, i32 0, i32 4), i8* align 2 getelementptr inbounds ([60 x i8], [60 x i8]* @temp, i32 0, i32 16), i32 10, i1 false) ret void } @@ -275,14 +275,14 @@ define void @t6() nounwind ssp { ; THUMB: ldrb r1, [r0, #25] ; THUMB: strb r1, [r0, #13] ; THUMB: bx lr - call void @llvm.memcpy.p0i8.p0i8.i32(i8* getelementptr inbounds ([60 x i8], [60 x i8]* @temp, i32 0, i32 4), i8* getelementptr inbounds ([60 x i8], [60 x i8]* @temp, i32 0, i32 16), i32 10, i32 1, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 getelementptr inbounds ([60 x i8], [60 x i8]* @temp, i32 0, i32 4), i8* align 1 getelementptr inbounds ([60 x i8], [60 x i8]* @temp, i32 0, i32 16), i32 10, i1 false) ret void } ; rdar://13202135 define void @t7() nounwind ssp { ; Just make sure this doesn't assert when we have an odd length and an alignment of 2. - call void @llvm.memcpy.p0i8.p0i8.i32(i8* getelementptr inbounds ([60 x i8], [60 x i8]* @temp, i32 0, i32 4), i8* getelementptr inbounds ([60 x i8], [60 x i8]* @temp, i32 0, i32 16), i32 3, i32 2, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 2 getelementptr inbounds ([60 x i8], [60 x i8]* @temp, i32 0, i32 4), i8* align 2 getelementptr inbounds ([60 x i8], [60 x i8]* @temp, i32 0, i32 16), i32 3, i1 false) ret void } diff --git a/llvm/test/CodeGen/ARM/interval-update-remat.ll b/llvm/test/CodeGen/ARM/interval-update-remat.ll index 524e8a0aa49..216f7e915a8 100644 --- a/llvm/test/CodeGen/ARM/interval-update-remat.ll +++ b/llvm/test/CodeGen/ARM/interval-update-remat.ll @@ -85,7 +85,7 @@ _ZN7MessageD1Ev.exit33: ; preds = %delete.notnull.i.i. if.end: ; preds = %_ZN7MessageD1Ev.exit33, %entry %message_.i.i = getelementptr inbounds %class.AssertionResult.24.249.299.1324.2349, %class.AssertionResult.24.249.299.1324.2349* %gtest_ar, i32 0, i32 1 %call.i.i.i = call %class.scoped_ptr.23.248.298.1323.2348* @_ZN10scoped_ptrI25Trans_NS___1_basic_stringIciiEED2Ev(%class.scoped_ptr.23.248.298.1323.2348* %message_.i.i) - call void @llvm.memset.p0i8.i32(i8* null, i8 0, i32 12, i32 4, i1 false) + call void @llvm.memset.p0i8.i32(i8* align 4 null, i8 0, i32 12, i1 false) call void @_ZN25Trans_NS___1_basic_stringIciiE5m_fn2Ev(%class.Trans_NS___1_basic_string.18.243.293.1318.2343* nonnull %ref.tmp) call void @_Z19CreateSOCKSv5Paramsv(%class.scoped_refptr.19.244.294.1319.2344* nonnull sret %agg.tmp16) %callback_.i = getelementptr inbounds %class.TestCompletionCallback.9.234.284.1309.2334, %class.TestCompletionCallback.9.234.284.1309.2334* %callback, i32 0, i32 1 @@ -137,7 +137,7 @@ declare void @_ZN18ClientSocketHandle5m_fn3IPiEEvRK25Trans_NS___1_basic_stringIc declare void @_Z19CreateSOCKSv5Paramsv(%class.scoped_refptr.19.244.294.1319.2344* sret) ; Function Attrs: argmemonly nounwind -declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i32, i1) #0 +declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i1) #0 declare %class.BoundNetLog.20.245.295.1320.2345* @_ZN11BoundNetLogD1Ev(%class.BoundNetLog.20.245.295.1320.2345* returned) unnamed_addr diff --git a/llvm/test/CodeGen/ARM/ldm-stm-base-materialization.ll b/llvm/test/CodeGen/ARM/ldm-stm-base-materialization.ll index a3231f95f47..755619e8b3e 100644 --- a/llvm/test/CodeGen/ARM/ldm-stm-base-materialization.ll +++ b/llvm/test/CodeGen/ARM/ldm-stm-base-materialization.ll @@ -22,7 +22,7 @@ entry: %2 = load i32*, i32** @b, align 4 %arrayidx1 = getelementptr inbounds i32, i32* %2, i32 1 %3 = bitcast i32* %arrayidx1 to i8* - tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %1, i8* %3, i32 24, i32 4, i1 false) + tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %1, i8* align 4 %3, i32 24, i1 false) ret void } @@ -43,7 +43,7 @@ entry: %2 = load i32*, i32** @b, align 4 %arrayidx1 = getelementptr inbounds i32, i32* %2, i32 1 %3 = bitcast i32* %arrayidx1 to i8* - tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %1, i8* %3, i32 28, i32 4, i1 false) + tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %1, i8* align 4 %3, i32 28, i1 false) ret void } @@ -64,7 +64,7 @@ entry: %2 = load i32*, i32** @b, align 4 %arrayidx1 = getelementptr inbounds i32, i32* %2, i32 1 %3 = bitcast i32* %arrayidx1 to i8* - tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %1, i8* %3, i32 32, i32 4, i1 false) + tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %1, i8* align 4 %3, i32 32, i1 false) ret void } @@ -85,9 +85,9 @@ entry: %2 = load i32*, i32** @b, align 4 %arrayidx1 = getelementptr inbounds i32, i32* %2, i32 1 %3 = bitcast i32* %arrayidx1 to i8* - tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %1, i8* %3, i32 36, i32 4, i1 false) + tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %1, i8* align 4 %3, i32 36, i1 false) ret void } ; Function Attrs: nounwind -declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i32, i1) #1 +declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i1) #1 diff --git a/llvm/test/CodeGen/ARM/machine-cse-cmp.ll b/llvm/test/CodeGen/ARM/machine-cse-cmp.ll index 611cba6ed1f..10e56a346a2 100644 --- a/llvm/test/CodeGen/ARM/machine-cse-cmp.ll +++ b/llvm/test/CodeGen/ARM/machine-cse-cmp.ll @@ -37,14 +37,14 @@ entry: for.body.lr.ph: ; preds = %entry %1 = icmp sgt i32 %0, 1 %smax = select i1 %1, i32 %0, i32 1 - call void @llvm.memset.p0i8.i32(i8* getelementptr inbounds ([250 x i8], [250 x i8]* @bar, i32 0, i32 0), i8 0, i32 %smax, i32 1, i1 false) + call void @llvm.memset.p0i8.i32(i8* getelementptr inbounds ([250 x i8], [250 x i8]* @bar, i32 0, i32 0), i8 0, i32 %smax, i1 false) unreachable for.cond1.preheader: ; preds = %entry ret void } -declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i32, i1) nounwind +declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i1) nounwind ; rdar://12462006 define i8* @f3(i8* %base, i32* nocapture %offset, i32 %size) nounwind { diff --git a/llvm/test/CodeGen/ARM/memcpy-inline.ll b/llvm/test/CodeGen/ARM/memcpy-inline.ll index b447497b270..1dccf0b9905 100644 --- a/llvm/test/CodeGen/ARM/memcpy-inline.ll +++ b/llvm/test/CodeGen/ARM/memcpy-inline.ll @@ -23,7 +23,7 @@ entry: ; CHECK-T1: strb [[TREG1]], ; CHECK-T1: ldrh [[TREG2:r[0-9]]], ; CHECK-T1: strh [[TREG2]] - call void @llvm.memcpy.p0i8.p0i8.i32(i8* getelementptr inbounds (%struct.x, %struct.x* @dst, i32 0, i32 0), i8* getelementptr inbounds (%struct.x, %struct.x* @src, i32 0, i32 0), i32 11, i32 8, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 getelementptr inbounds (%struct.x, %struct.x* @dst, i32 0, i32 0), i8* align 8 getelementptr inbounds (%struct.x, %struct.x* @src, i32 0, i32 0), i32 11, i1 false) ret i32 0 } @@ -37,7 +37,7 @@ entry: ; CHECK: vst1.8 {d{{[0-9]+}}, d{{[0-9]+}}}, [r0] ; CHECK-T1-LABEL: t1: ; CHECK-T1: bl _memcpy - tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %C, i8* getelementptr inbounds ([31 x i8], [31 x i8]* @.str1, i64 0, i64 0), i64 31, i32 1, i1 false) + tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %C, i8* getelementptr inbounds ([31 x i8], [31 x i8]* @.str1, i64 0, i64 0), i64 31, i1 false) ret void } @@ -55,7 +55,7 @@ entry: ; CHECK: vst1.8 {d{{[0-9]+}}, d{{[0-9]+}}}, [r3] ; CHECK-T1-LABEL: t2: ; CHECK-T1: bl _memcpy - tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %C, i8* getelementptr inbounds ([36 x i8], [36 x i8]* @.str2, i64 0, i64 0), i64 36, i32 1, i1 false) + tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %C, i8* getelementptr inbounds ([36 x i8], [36 x i8]* @.str2, i64 0, i64 0), i64 36, i1 false) ret void } @@ -68,7 +68,7 @@ entry: ; CHECK: vst1.8 {d{{[0-9]+}}}, [r0] ; CHECK-T1-LABEL: t3: ; CHECK-T1: bl _memcpy - tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %C, i8* getelementptr inbounds ([24 x i8], [24 x i8]* @.str3, i64 0, i64 0), i64 24, i32 1, i1 false) + tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %C, i8* getelementptr inbounds ([24 x i8], [24 x i8]* @.str3, i64 0, i64 0), i64 24, i1 false) ret void } @@ -80,7 +80,7 @@ entry: ; CHECK: strh [[REG5:r[0-9]+]], [r0] ; CHECK-T1-LABEL: t4: ; CHECK-T1: bl _memcpy - tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %C, i8* getelementptr inbounds ([18 x i8], [18 x i8]* @.str4, i64 0, i64 0), i64 18, i32 1, i1 false) + tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %C, i8* getelementptr inbounds ([18 x i8], [18 x i8]* @.str4, i64 0, i64 0), i64 18, i1 false) ret void } @@ -96,7 +96,7 @@ entry: ; CHECK: str [[REG7]] ; CHECK-T1-LABEL: t5: ; CHECK-T1: bl _memcpy - tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %C, i8* getelementptr inbounds ([7 x i8], [7 x i8]* @.str5, i64 0, i64 0), i64 7, i32 1, i1 false) + tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %C, i8* getelementptr inbounds ([7 x i8], [7 x i8]* @.str5, i64 0, i64 0), i64 7, i1 false) ret void } @@ -114,7 +114,7 @@ entry: ; CHECK-T1: strh [[TREG5]], ; CHECK-T1: ldr [[TREG6:r[0-9]]], ; CHECK-T1: str [[TREG6]] - call void @llvm.memcpy.p0i8.p0i8.i64(i8* getelementptr inbounds ([512 x i8], [512 x i8]* @spool.splbuf, i64 0, i64 0), i8* getelementptr inbounds ([14 x i8], [14 x i8]* @.str6, i64 0, i64 0), i64 14, i32 1, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* getelementptr inbounds ([512 x i8], [512 x i8]* @spool.splbuf, i64 0, i64 0), i8* getelementptr inbounds ([14 x i8], [14 x i8]* @.str6, i64 0, i64 0), i64 14, i1 false) ret void } @@ -130,9 +130,9 @@ entry: ; CHECK-T1: str %0 = bitcast %struct.Foo* %a to i8* %1 = bitcast %struct.Foo* %b to i8* - tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %0, i8* %1, i32 16, i32 4, i1 false) + tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %0, i8* align 4 %1, i32 16, i1 false) ret void } -declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i32, i1) nounwind -declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) nounwind +declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i1) nounwind +declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) nounwind diff --git a/llvm/test/CodeGen/ARM/memcpy-ldm-stm.ll b/llvm/test/CodeGen/ARM/memcpy-ldm-stm.ll index 2ebe7ed5b14..314f559e357 100644 --- a/llvm/test/CodeGen/ARM/memcpy-ldm-stm.ll +++ b/llvm/test/CodeGen/ARM/memcpy-ldm-stm.ll @@ -24,7 +24,7 @@ entry: ; Think of the monstrosity '{{\[}}[[LB]]]' as '[ [[LB]] ]' without the spaces. ; CHECK-NEXT: ldrb{{(\.w)?}} {{.*}}, {{\[}}[[LB]]] ; CHECK-NEXT: strb{{(\.w)?}} {{.*}}, {{\[}}[[SB]]] - tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* bitcast ([64 x i32]* @s to i8*), i8* bitcast ([64 x i32]* @d to i8*), i32 17, i32 4, i1 false) + tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 bitcast ([64 x i32]* @s to i8*), i8* align 4 bitcast ([64 x i32]* @d to i8*), i32 17, i1 false) ret void } @@ -42,7 +42,7 @@ entry: ; CHECK-NEXT: ldrb{{(\.w)?}} {{.*}}, {{\[}}[[LB]], #2] ; CHECK-NEXT: strb{{(\.w)?}} {{.*}}, {{\[}}[[SB]], #2] ; CHECK-NEXT: strh{{(\.w)?}} {{.*}}, {{\[}}[[SB]]] - tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* bitcast ([64 x i32]* @s to i8*), i8* bitcast ([64 x i32]* @d to i8*), i32 15, i32 4, i1 false) + tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 bitcast ([64 x i32]* @s to i8*), i8* align 4 bitcast ([64 x i32]* @d to i8*), i32 15, i1 false) ret void } @@ -54,13 +54,13 @@ entry: define void @t3() { call void @llvm.memcpy.p0i8.p0i8.i32( - i8* getelementptr inbounds (%struct.T, %struct.T* @copy, i32 0, i32 0), - i8* getelementptr inbounds (%struct.T, %struct.T* @etest, i32 0, i32 0), - i32 24, i32 8, i1 false) + i8* align 8 getelementptr inbounds (%struct.T, %struct.T* @copy, i32 0, i32 0), + i8* align 8 getelementptr inbounds (%struct.T, %struct.T* @etest, i32 0, i32 0), + i32 24, i1 false) call void @llvm.memcpy.p0i8.p0i8.i32( - i8* getelementptr inbounds (%struct.T, %struct.T* @copy, i32 0, i32 0), - i8* getelementptr inbounds (%struct.T, %struct.T* @etest, i32 0, i32 0), - i32 24, i32 8, i1 false) + i8* align 8 getelementptr inbounds (%struct.T, %struct.T* @copy, i32 0, i32 0), + i8* align 8 getelementptr inbounds (%struct.T, %struct.T* @etest, i32 0, i32 0), + i32 24, i1 false) ret void } @@ -70,7 +70,7 @@ define void @t3() { define void @test3(%struct.S* %d, %struct.S* %s) #0 { %1 = bitcast %struct.S* %d to i8* %2 = bitcast %struct.S* %s to i8* - tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %1, i8* %2, i32 48, i32 4, i1 false) + tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %1, i8* align 4 %2, i32 48, i1 false) ; 3 ldm/stm pairs in v6; 2 in v7 ; CHECK: ldm{{(\.w)?}} {{[rl0-9]+!?}}, [[REGLIST1:{.*}]] ; CHECK: stm{{(\.w)?}} {{[rl0-9]+!?}}, [[REGLIST1]] @@ -91,4 +91,4 @@ declare void @g(i32*) attributes #0 = { "no-frame-pointer-elim"="true" } ; Function Attrs: nounwind -declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i32, i1) #1 +declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i1) #1 diff --git a/llvm/test/CodeGen/ARM/memcpy-no-inline.ll b/llvm/test/CodeGen/ARM/memcpy-no-inline.ll index 126546095e1..7aaac19eee3 100644 --- a/llvm/test/CodeGen/ARM/memcpy-no-inline.ll +++ b/llvm/test/CodeGen/ARM/memcpy-no-inline.ll @@ -14,7 +14,7 @@ entry: ; CHECK-NOT: ldm %mystring = alloca [31 x i8], align 1 %0 = getelementptr inbounds [31 x i8], [31 x i8]* %mystring, i32 0, i32 0 - call void @llvm.memcpy.p0i8.p0i8.i32(i8* %0, i8* getelementptr inbounds ([31 x i8], [31 x i8]* @.str, i32 0, i32 0), i32 31, i32 1, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 %0, i8* align 1 getelementptr inbounds ([31 x i8], [31 x i8]* @.str, i32 0, i32 0), i32 31, i1 false) ret void } @@ -24,10 +24,10 @@ entry: ; CHECK-NOT: __aeabi_memcpy %mystring = alloca [31 x i8], align 1 %0 = getelementptr inbounds [31 x i8], [31 x i8]* %mystring, i32 0, i32 0 - call void @llvm.memcpy.p0i8.p0i8.i32(i8* %0, i8* getelementptr inbounds ([21 x i8], [21 x i8]* @.str.1, i32 0, i32 0), i32 21, i32 1, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 %0, i8* align 1 getelementptr inbounds ([21 x i8], [21 x i8]* @.str.1, i32 0, i32 0), i32 21, i1 false) ret void } -declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i32, i1) #1 +declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i1) #1 attributes #0 = { minsize noinline nounwind optsize } diff --git a/llvm/test/CodeGen/ARM/memfunc.ll b/llvm/test/CodeGen/ARM/memfunc.ll index ed6746290b7..882091b67f0 100644 --- a/llvm/test/CodeGen/ARM/memfunc.ll +++ b/llvm/test/CodeGen/ARM/memfunc.ll @@ -16,13 +16,13 @@ entry: ; CHECK-DARWIN: bl _memmove ; CHECK-EABI: bl __aeabi_memmove ; CHECK-GNUEABI: bl memmove - call void @llvm.memmove.p0i8.p0i8.i32(i8* %dest, i8* %src, i32 500, i32 0, i1 false) + call void @llvm.memmove.p0i8.p0i8.i32(i8* %dest, i8* %src, i32 500, i1 false) ; CHECK-IOS: bl _memcpy ; CHECK-DARWIN: bl _memcpy ; CHECK-EABI: bl __aeabi_memcpy ; CHECK-GNUEABI: bl memcpy - call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* %src, i32 500, i32 0, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* %src, i32 500, i1 false) ; EABI memset swaps arguments ; CHECK-IOS: mov r1, #1 @@ -33,7 +33,7 @@ entry: ; CHECK-EABI: bl __aeabi_memset ; CHECK-GNUEABI: mov r1, #1 ; CHECK-GNUEABI: bl memset - call void @llvm.memset.p0i8.i32(i8* %dest, i8 1, i32 500, i32 0, i1 false) + call void @llvm.memset.p0i8.i32(i8* %dest, i8 1, i32 500, i1 false) ; EABI uses memclr if value set to 0 ; CHECK-IOS: mov r1, #0 @@ -42,7 +42,7 @@ entry: ; CHECK-DARWIN: bl _memset ; CHECK-EABI: bl __aeabi_memclr ; CHECK-GNUEABI: bl memset - call void @llvm.memset.p0i8.i32(i8* %dest, i8 0, i32 500, i32 0, i1 false) + call void @llvm.memset.p0i8.i32(i8* %dest, i8 0, i32 500, i1 false) ; EABI uses aligned function variants if possible @@ -50,49 +50,49 @@ entry: ; CHECK-DARWIN: bl _memmove ; CHECK-EABI: bl __aeabi_memmove4 ; CHECK-GNUEABI: bl memmove - call void @llvm.memmove.p0i8.p0i8.i32(i8* %dest, i8* %src, i32 500, i32 4, i1 false) + call void @llvm.memmove.p0i8.p0i8.i32(i8* align 4 %dest, i8* align 4 %src, i32 500, i1 false) ; CHECK-IOS: bl _memcpy ; CHECK-DARWIN: bl _memcpy ; CHECK-EABI: bl __aeabi_memcpy4 ; CHECK-GNUEABI: bl memcpy - call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* %src, i32 500, i32 4, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %dest, i8* align 4 %src, i32 500, i1 false) ; CHECK-IOS: bl _memset ; CHECK-DARWIN: bl _memset ; CHECK-EABI: bl __aeabi_memset4 ; CHECK-GNUEABI: bl memset - call void @llvm.memset.p0i8.i32(i8* %dest, i8 1, i32 500, i32 4, i1 false) + call void @llvm.memset.p0i8.i32(i8* align 4 %dest, i8 1, i32 500, i1 false) ; CHECK-IOS: bl _memset ; CHECK-DARWIN: bl _memset ; CHECK-EABI: bl __aeabi_memclr4 ; CHECK-GNUEABI: bl memset - call void @llvm.memset.p0i8.i32(i8* %dest, i8 0, i32 500, i32 4, i1 false) + call void @llvm.memset.p0i8.i32(i8* align 4 %dest, i8 0, i32 500, i1 false) ; CHECK-IOS: bl _memmove ; CHECK-DARWIN: bl _memmove ; CHECK-EABI: bl __aeabi_memmove8 ; CHECK-GNUEABI: bl memmove - call void @llvm.memmove.p0i8.p0i8.i32(i8* %dest, i8* %src, i32 500, i32 8, i1 false) + call void @llvm.memmove.p0i8.p0i8.i32(i8* align 8 %dest, i8* align 8 %src, i32 500, i1 false) ; CHECK-IOS: bl _memcpy ; CHECK-DARWIN: bl _memcpy ; CHECK-EABI: bl __aeabi_memcpy8 ; CHECK-GNUEABI: bl memcpy - call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* %src, i32 500, i32 8, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 %dest, i8* align 8 %src, i32 500, i1 false) ; CHECK-IOS: bl _memset ; CHECK-DARWIN: bl _memset ; CHECK-EABI: bl __aeabi_memset8 ; CHECK-GNUEABI: bl memset - call void @llvm.memset.p0i8.i32(i8* %dest, i8 1, i32 500, i32 8, i1 false) + call void @llvm.memset.p0i8.i32(i8* align 8 %dest, i8 1, i32 500, i1 false) ; CHECK-IOS: bl _memset ; CHECK-DARWIN: bl _memset ; CHECK-EABI: bl __aeabi_memclr8 ; CHECK-GNUEABI: bl memset - call void @llvm.memset.p0i8.i32(i8* %dest, i8 0, i32 500, i32 8, i1 false) + call void @llvm.memset.p0i8.i32(i8* align 8 %dest, i8 0, i32 500, i1 false) unreachable } @@ -113,7 +113,7 @@ entry: ; CHECK-GNUEABI: bl memmove %arr0 = alloca [9 x i8], align 1 %0 = bitcast [9 x i8]* %arr0 to i8* - call void @llvm.memmove.p0i8.p0i8.i32(i8* %dest, i8* %0, i32 %n, i32 0, i1 false) + call void @llvm.memmove.p0i8.p0i8.i32(i8* %dest, i8* %0, i32 %n, i1 false) ; CHECK: add r1, sp, #16 ; CHECK-IOS: bl _memcpy @@ -122,7 +122,7 @@ entry: ; CHECK-GNUEABI: bl memcpy %arr1 = alloca [9 x i8], align 1 %1 = bitcast [9 x i8]* %arr1 to i8* - call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* %1, i32 %n, i32 0, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* %1, i32 %n, i1 false) ; CHECK-IOS: mov r0, sp ; CHECK-IOS: mov r1, #1 @@ -138,7 +138,7 @@ entry: ; CHECK-GNUEABI: bl memset %arr2 = alloca [9 x i8], align 1 %2 = bitcast [9 x i8]* %arr2 to i8* - call void @llvm.memset.p0i8.i32(i8* %2, i8 1, i32 %n, i32 0, i1 false) + call void @llvm.memset.p0i8.i32(i8* %2, i8 1, i32 %n, i1 false) unreachable } @@ -155,7 +155,7 @@ entry: ; CHECK-GNUEABI: bl memmove %arr0 = alloca [7 x i8], align 1 %0 = bitcast [7 x i8]* %arr0 to i8* - call void @llvm.memmove.p0i8.p0i8.i32(i8* %dest, i8* %0, i32 %n, i32 0, i1 false) + call void @llvm.memmove.p0i8.p0i8.i32(i8* %dest, i8* %0, i32 %n, i1 false) ; CHECK: {{add(.w)? r1, sp, #10|sub(.w)? r1, r(7|11), #22}} ; CHECK-IOS: bl _memcpy @@ -164,7 +164,7 @@ entry: ; CHECK-GNUEABI: bl memcpy %arr1 = alloca [7 x i8], align 1 %1 = bitcast [7 x i8]* %arr1 to i8* - call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* %1, i32 %n, i32 0, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* %1, i32 %n, i1 false) ; CHECK: {{add(.w)? r0, sp, #3|sub(.w)? r0, r(7|11), #29}} ; CHECK-IOS: mov r1, #1 @@ -177,7 +177,7 @@ entry: ; CHECK-GNUEABI: bl memset %arr2 = alloca [7 x i8], align 1 %2 = bitcast [7 x i8]* %arr2 to i8* - call void @llvm.memset.p0i8.i32(i8* %2, i8 1, i32 %n, i32 0, i1 false) + call void @llvm.memset.p0i8.i32(i8* %2, i8 1, i32 %n, i1 false) unreachable } @@ -194,7 +194,7 @@ entry: ; CHECK-GNUEABI: bl memmove %arr0 = alloca [9 x i8], align 1 %0 = getelementptr inbounds [9 x i8], [9 x i8]* %arr0, i32 0, i32 4 - call void @llvm.memmove.p0i8.p0i8.i32(i8* %dest, i8* %0, i32 %n, i32 0, i1 false) + call void @llvm.memmove.p0i8.p0i8.i32(i8* %dest, i8* %0, i32 %n, i1 false) ; CHECK: {{add(.w)? r., sp, #(10|14)|sub(.w) r., r(7|11), #26}} ; CHECK-IOS: bl _memcpy @@ -203,7 +203,7 @@ entry: ; CHECK-GNUEABI: bl memcpy %arr1 = alloca [9 x i8], align 1 %1 = getelementptr inbounds [9 x i8], [9 x i8]* %arr1, i32 0, i32 4 - call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* %1, i32 %n, i32 0, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* %1, i32 %n, i1 false) ; CHECK: {{add(.w)? r., sp, #(1|5)|sub(.w) r., r(7|11), #35}} ; CHECK-IOS: mov r1, #1 @@ -216,7 +216,7 @@ entry: ; CHECK-GNUEABI: bl memset %arr2 = alloca [9 x i8], align 1 %2 = getelementptr inbounds [9 x i8], [9 x i8]* %arr2, i32 0, i32 4 - call void @llvm.memset.p0i8.i32(i8* %2, i8 1, i32 %n, i32 0, i1 false) + call void @llvm.memset.p0i8.i32(i8* %2, i8 1, i32 %n, i1 false) unreachable } @@ -233,7 +233,7 @@ entry: ; CHECK-GNUEABI: bl memmove %arr0 = alloca [13 x i8], align 1 %0 = getelementptr inbounds [13 x i8], [13 x i8]* %arr0, i32 0, i32 1 - call void @llvm.memmove.p0i8.p0i8.i32(i8* %dest, i8* %0, i32 %n, i32 0, i1 false) + call void @llvm.memmove.p0i8.p0i8.i32(i8* %dest, i8* %0, i32 %n, i1 false) ; CHECK: {{add(.w)? r., sp, #(10|14)|sub(.w)? r., r(7|11), #34}} ; CHECK-IOS: bl _memcpy @@ -242,7 +242,7 @@ entry: ; CHECK-GNUEABI: bl memcpy %arr1 = alloca [13 x i8], align 1 %1 = getelementptr inbounds [13 x i8], [13 x i8]* %arr1, i32 0, i32 1 - call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* %1, i32 %n, i32 0, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* %1, i32 %n, i1 false) ; CHECK: {{add(.w)? r., sp, #(1|5)|sub(.w)? r., r(7|11), #47}} ; CHECK-IOS: mov r1, #1 @@ -255,7 +255,7 @@ entry: ; CHECK-GNUEABI: bl memset %arr2 = alloca [13 x i8], align 1 %2 = getelementptr inbounds [13 x i8], [13 x i8]* %arr2, i32 0, i32 1 - call void @llvm.memset.p0i8.i32(i8* %2, i8 1, i32 %n, i32 0, i1 false) + call void @llvm.memset.p0i8.i32(i8* %2, i8 1, i32 %n, i1 false) unreachable } @@ -272,7 +272,7 @@ entry: ; CHECK-GNUEABI: bl memmove %arr0 = alloca [13 x i8], align 1 %0 = getelementptr inbounds [13 x i8], [13 x i8]* %arr0, i32 0, i32 %i - call void @llvm.memmove.p0i8.p0i8.i32(i8* %dest, i8* %0, i32 %n, i32 0, i1 false) + call void @llvm.memmove.p0i8.p0i8.i32(i8* %dest, i8* %0, i32 %n, i1 false) ; CHECK: {{add(.w)? r., sp, #(10|14)|sub(.w)? r., r(7|11), #42}} ; CHECK-IOS: bl _memcpy @@ -281,7 +281,7 @@ entry: ; CHECK-GNUEABI: bl memcpy %arr1 = alloca [13 x i8], align 1 %1 = getelementptr inbounds [13 x i8], [13 x i8]* %arr1, i32 0, i32 %i - call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* %1, i32 %n, i32 0, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* %1, i32 %n, i1 false) ; CHECK: {{add(.w)? r., sp, #(1|5)|sub(.w)? r., r(7|11), #55}} ; CHECK-IOS: mov r1, #1 @@ -294,7 +294,7 @@ entry: ; CHECK-GNUEABI: bl memset %arr2 = alloca [13 x i8], align 1 %2 = getelementptr inbounds [13 x i8], [13 x i8]* %arr2, i32 0, i32 %i - call void @llvm.memset.p0i8.i32(i8* %2, i8 1, i32 %n, i32 0, i1 false) + call void @llvm.memset.p0i8.i32(i8* %2, i8 1, i32 %n, i1 false) unreachable } @@ -311,7 +311,7 @@ entry: ; CHECK-GNUEABI: bl memmove %arr0 = alloca [13 x i8], align 1 %0 = getelementptr [13 x i8], [13 x i8]* %arr0, i32 0, i32 4 - call void @llvm.memmove.p0i8.p0i8.i32(i8* %dest, i8* %0, i32 %n, i32 0, i1 false) + call void @llvm.memmove.p0i8.p0i8.i32(i8* %dest, i8* %0, i32 %n, i1 false) ; CHECK: {{add(.w)? r., sp, #(10|14)|sub(.w)? r., r(7|11), #34}} ; CHECK-IOS: bl _memcpy @@ -320,7 +320,7 @@ entry: ; CHECK-GNUEABI: bl memcpy %arr1 = alloca [13 x i8], align 1 %1 = getelementptr [13 x i8], [13 x i8]* %arr1, i32 0, i32 4 - call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* %1, i32 %n, i32 0, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* %1, i32 %n, i1 false) ; CHECK: {{add(.w)? r., sp, #(1|5)|sub(.w)? r., r(7|11), #47}} ; CHECK-IOS: mov r1, #1 @@ -333,7 +333,7 @@ entry: ; CHECK-GNUEABI: bl memset %arr2 = alloca [13 x i8], align 1 %2 = getelementptr [13 x i8], [13 x i8]* %arr2, i32 0, i32 4 - call void @llvm.memset.p0i8.i32(i8* %2, i8 1, i32 %n, i32 0, i1 false) + call void @llvm.memset.p0i8.i32(i8* %2, i8 1, i32 %n, i1 false) unreachable } @@ -350,7 +350,7 @@ entry: ; CHECK-GNUEABI: bl memmove %arr0 = alloca [13 x i8], align 1 %0 = getelementptr inbounds [13 x i8], [13 x i8]* %arr0, i32 0, i32 16 - call void @llvm.memmove.p0i8.p0i8.i32(i8* %dest, i8* %0, i32 %n, i32 0, i1 false) + call void @llvm.memmove.p0i8.p0i8.i32(i8* %dest, i8* %0, i32 %n, i1 false) ; CHECK: {{add(.w)? r., sp, #(10|14)|sub(.w)? r., r(7|11), #34}} ; CHECK-IOS: bl _memcpy @@ -359,7 +359,7 @@ entry: ; CHECK-GNUEABI: bl memcpy %arr1 = alloca [13 x i8], align 1 %1 = getelementptr inbounds [13 x i8], [13 x i8]* %arr1, i32 0, i32 16 - call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* %1, i32 %n, i32 0, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* %1, i32 %n, i1 false) ; CHECK: {{add(.w)? r., sp, #(1|5)|sub(.w)? r., r(7|11), #47}} ; CHECK-IOS: mov r1, #1 @@ -372,7 +372,7 @@ entry: ; CHECK-GNUEABI: bl memset %arr2 = alloca [13 x i8], align 1 %2 = getelementptr inbounds [13 x i8], [13 x i8]* %arr2, i32 0, i32 16 - call void @llvm.memset.p0i8.i32(i8* %2, i8 1, i32 %n, i32 0, i1 false) + call void @llvm.memset.p0i8.i32(i8* %2, i8 1, i32 %n, i1 false) unreachable } @@ -390,15 +390,15 @@ entry: @arr9 = weak_odr global [128 x i8] undef define void @f9(i8* %dest, i32 %n) "no-frame-pointer-elim"="true" { entry: - call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* getelementptr inbounds ([7 x i8], [7 x i8]* @arr1, i32 0, i32 0), i32 %n, i32 1, i1 false) - call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* getelementptr inbounds ([8 x i8], [8 x i8]* @arr2, i32 0, i32 0), i32 %n, i32 1, i1 false) - call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* getelementptr inbounds ([7 x i8], [7 x i8]* @arr3, i32 0, i32 0), i32 %n, i32 1, i1 false) - call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* getelementptr inbounds ([8 x i8], [8 x i8]* @arr4, i32 0, i32 0), i32 %n, i32 1, i1 false) - call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* getelementptr inbounds ([7 x i8], [7 x i8]* @arr5, i32 0, i32 0), i32 %n, i32 1, i1 false) - call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* getelementptr inbounds ([7 x i8], [7 x i8]* @arr6, i32 0, i32 0), i32 %n, i32 1, i1 false) - call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* getelementptr inbounds ([7 x i8], [7 x i8]* @arr7, i32 0, i32 0), i32 %n, i32 1, i1 false) - call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* getelementptr inbounds ([128 x i8], [128 x i8]* @arr8, i32 0, i32 0), i32 %n, i32 1, i1 false) - call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* getelementptr inbounds ([128 x i8], [128 x i8]* @arr9, i32 0, i32 0), i32 %n, i32 1, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* getelementptr inbounds ([7 x i8], [7 x i8]* @arr1, i32 0, i32 0), i32 %n, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* getelementptr inbounds ([8 x i8], [8 x i8]* @arr2, i32 0, i32 0), i32 %n, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* getelementptr inbounds ([7 x i8], [7 x i8]* @arr3, i32 0, i32 0), i32 %n, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* getelementptr inbounds ([8 x i8], [8 x i8]* @arr4, i32 0, i32 0), i32 %n, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* getelementptr inbounds ([7 x i8], [7 x i8]* @arr5, i32 0, i32 0), i32 %n, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* getelementptr inbounds ([7 x i8], [7 x i8]* @arr6, i32 0, i32 0), i32 %n, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* getelementptr inbounds ([7 x i8], [7 x i8]* @arr7, i32 0, i32 0), i32 %n, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* getelementptr inbounds ([128 x i8], [128 x i8]* @arr8, i32 0, i32 0), i32 %n, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* getelementptr inbounds ([128 x i8], [128 x i8]* @arr9, i32 0, i32 0), i32 %n, i1 false) unreachable } @@ -428,6 +428,6 @@ entry: ; CHECK-NOT: arr7: -declare void @llvm.memmove.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i32, i1) nounwind -declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i32, i1) nounwind -declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i32, i1) nounwind +declare void @llvm.memmove.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i1) nounwind +declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i1) nounwind +declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i1) nounwind diff --git a/llvm/test/CodeGen/ARM/memset-inline.ll b/llvm/test/CodeGen/ARM/memset-inline.ll index b2bd257701d..01b21e9d387 100644 --- a/llvm/test/CodeGen/ARM/memset-inline.ll +++ b/llvm/test/CodeGen/ARM/memset-inline.ll @@ -12,7 +12,7 @@ entry: ; CHECK-6M: str r1, [r0] ; CHECK-6M: str r1, [r0, #4] ; CHECK-6M: str r1, [r0, #8] - call void @llvm.memset.p0i8.i64(i8* %c, i8 0, i64 12, i32 8, i1 false) + call void @llvm.memset.p0i8.i64(i8* align 8 %c, i8 0, i64 12, i1 false) ret void } @@ -33,7 +33,7 @@ entry: ; CHECK-6M: str [[REG]], [sp] %buf = alloca [26 x i8], align 1 %0 = getelementptr inbounds [26 x i8], [26 x i8]* %buf, i32 0, i32 0 - call void @llvm.memset.p0i8.i32(i8* %0, i8 0, i32 26, i32 1, i1 false) + call void @llvm.memset.p0i8.i32(i8* %0, i8 0, i32 26, i1 false) call void @something(i8* %0) nounwind ret void } @@ -54,7 +54,7 @@ entry: for.body: %i = phi i32 [ 0, %entry ], [ %inc, %for.body ] %0 = trunc i32 %i to i8 - call void @llvm.memset.p0i8.i32(i8* %p, i8 %0, i32 4, i32 1, i1 false) + call void @llvm.memset.p0i8.i32(i8* %p, i8 %0, i32 4, i1 false) call void @something(i8* %p) %inc = add nuw nsw i32 %i, 1 %exitcond = icmp eq i32 %inc, 255 @@ -78,7 +78,7 @@ entry: for.body: %i = phi i32 [ 0, %entry ], [ %inc, %for.body ] %0 = trunc i32 %i to i8 - call void @llvm.memset.p0i8.i32(i8* %p, i8 %0, i32 4, i32 2, i1 false) + call void @llvm.memset.p0i8.i32(i8* align 2 %p, i8 %0, i32 4, i1 false) call void @something(i8* %p) %inc = add nuw nsw i32 %i, 1 %exitcond = icmp eq i32 %inc, 255 @@ -89,5 +89,5 @@ for.end: } declare void @something(i8*) nounwind -declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i32, i1) nounwind -declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) nounwind +declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i1) nounwind +declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) nounwind diff --git a/llvm/test/CodeGen/ARM/stack-protector-bmovpcb_call.ll b/llvm/test/CodeGen/ARM/stack-protector-bmovpcb_call.ll index 2a7a82da8f6..84bf7ac826e 100644 --- a/llvm/test/CodeGen/ARM/stack-protector-bmovpcb_call.ll +++ b/llvm/test/CodeGen/ARM/stack-protector-bmovpcb_call.ll @@ -15,13 +15,13 @@ define i32 @main() #0 { entry: %title = alloca [15 x i8], align 1 %0 = getelementptr inbounds [15 x i8], [15 x i8]* %title, i32 0, i32 0 - call void @llvm.memcpy.p0i8.p0i8.i32(i8* %0, i8* getelementptr inbounds ([15 x i8], [15 x i8]* @main.title, i32 0, i32 0), i32 15, i32 1, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 %0, i8* align 1 getelementptr inbounds ([15 x i8], [15 x i8]* @main.title, i32 0, i32 0), i32 15, i1 false) %call = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @.str, i32 0, i32 0), i8* %0) #3 ret i32 0 } ; Function Attrs: nounwind -declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i32, i1) #1 +declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i1) #1 ; Function Attrs: nounwind optsize declare i32 @printf(i8* nocapture readonly, ...) #2 diff --git a/llvm/test/CodeGen/ARM/struct-byval-frame-index.ll b/llvm/test/CodeGen/ARM/struct-byval-frame-index.ll index b3ed5de857b..c6509cfe9cf 100644 --- a/llvm/test/CodeGen/ARM/struct-byval-frame-index.ll +++ b/llvm/test/CodeGen/ARM/struct-byval-frame-index.ll @@ -60,10 +60,10 @@ target triple = "armv7l-unknown-linux-gnueabihf" @brefframe = external global [4 x [4 x i8]], align 1 ; Function Attrs: nounwind -declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i32, i1) #0 +declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i1) #0 ; Function Attrs: nounwind -declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i32, i1) #0 +declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i1) #0 ; Function Attrs: nounwind declare void @SetMotionVectorsMB(%structK* nocapture, i32) #1 @@ -122,10 +122,10 @@ for.cond210.preheader: ; preds = %if.then169 unreachable if.end230: ; preds = %if.end164 - tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* undef, i8* bitcast ([4 x i32]* @b8mode to i8*), i32 16, i32 4, i1 false) + tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 undef, i8* align 4 bitcast ([4 x i32]* @b8mode to i8*), i32 16, i1 false) %b8pdir = getelementptr inbounds %structK, %structK* %2, i32 %1, i32 15 %3 = bitcast [4 x i32]* %b8pdir to i8* - tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %3, i8* bitcast ([4 x i32]* @b8pdir to i8*), i32 16, i32 4, i1 false) + tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %3, i8* align 4 bitcast ([4 x i32]* @b8pdir to i8*), i32 16, i1 false) br i1 undef, label %if.end236, label %if.then233 if.then233: ; preds = %if.end230 diff --git a/llvm/test/CodeGen/ARM/tailcall-mem-intrinsics.ll b/llvm/test/CodeGen/ARM/tailcall-mem-intrinsics.ll index 6744efa8ab8..08370f2bf12 100644 --- a/llvm/test/CodeGen/ARM/tailcall-mem-intrinsics.ll +++ b/llvm/test/CodeGen/ARM/tailcall-mem-intrinsics.ll @@ -4,7 +4,7 @@ ; CHECK: bl __aeabi_memcpy define i8* @tail_memcpy_ret(i8* nocapture %p, i8* nocapture readonly %q, i32 %n) #0 { entry: - tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %p, i8* %q, i32 %n, i32 1, i1 false) + tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %p, i8* %q, i32 %n, i1 false) ret i8* %p } @@ -12,7 +12,7 @@ entry: ; CHECK: bl __aeabi_memmove define i8* @tail_memmove_ret(i8* nocapture %p, i8* nocapture readonly %q, i32 %n) #0 { entry: - tail call void @llvm.memmove.p0i8.p0i8.i32(i8* %p, i8* %q, i32 %n, i32 1, i1 false) + tail call void @llvm.memmove.p0i8.p0i8.i32(i8* %p, i8* %q, i32 %n, i1 false) ret i8* %p } @@ -20,12 +20,12 @@ entry: ; CHECK: bl __aeabi_memset define i8* @tail_memset_ret(i8* nocapture %p, i8 %c, i32 %n) #0 { entry: - tail call void @llvm.memset.p0i8.i32(i8* %p, i8 %c, i32 %n, i32 1, i1 false) + tail call void @llvm.memset.p0i8.i32(i8* %p, i8 %c, i32 %n, i1 false) ret i8* %p } -declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i32, i1) #0 -declare void @llvm.memmove.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i32, i1) #0 -declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i32, i1) #0 +declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i1) #0 +declare void @llvm.memmove.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i1) #0 +declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i1) #0 attributes #0 = { nounwind } diff --git a/llvm/test/CodeGen/AVR/std-ldd-immediate-overflow.ll b/llvm/test/CodeGen/AVR/std-ldd-immediate-overflow.ll index 290e349c534..5580e3ae973 100644 --- a/llvm/test/CodeGen/AVR/std-ldd-immediate-overflow.ll +++ b/llvm/test/CodeGen/AVR/std-ldd-immediate-overflow.ll @@ -8,11 +8,11 @@ define i32 @std_ldd_overflow() { store i32 0, i32 *%1 %2 = bitcast [4 x i8]* %dst to i8* %3 = bitcast [4 x i8]* %src to i8* - call void @llvm.memcpy.p0i8.p0i8.i16(i8* %2, i8* %3, i16 4, i32 1, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i16(i8* %2, i8* %3, i16 4, i1 false) ; CHECK-NOT: std {{[XYZ]}}+64, {{r[0-9]+}} ; CHECK-NOT: ldd {{r[0-9]+}}, {{[XYZ]}}+64 ret i32 0 } -declare void @llvm.memcpy.p0i8.p0i8.i16(i8* nocapture writeonly, i8* nocapture readonly, i16, i32, i1) +declare void @llvm.memcpy.p0i8.p0i8.i16(i8* nocapture writeonly, i8* nocapture readonly, i16, i1) diff --git a/llvm/test/CodeGen/BPF/byval.ll b/llvm/test/CodeGen/BPF/byval.ll index 25ba909d9cd..2d2e8d289d6 100644 --- a/llvm/test/CodeGen/BPF/byval.ll +++ b/llvm/test/CodeGen/BPF/byval.ll @@ -16,7 +16,7 @@ entry: store i32 3, i32* %arrayinit.element2, align 8 %arrayinit.start = getelementptr inbounds %struct.S, %struct.S* %.compoundliteral, i64 0, i32 0, i64 3 %scevgep4 = bitcast i32* %arrayinit.start to i8* - call void @llvm.memset.p0i8.i64(i8* %scevgep4, i8 0, i64 28, i32 4, i1 false) + call void @llvm.memset.p0i8.i64(i8* align 4 %scevgep4, i8 0, i64 28, i1 false) call void @foo(i32 %a, %struct.S* byval align 8 %.compoundliteral) #3 ret void } @@ -24,4 +24,4 @@ entry: declare void @foo(i32, %struct.S* byval align 8) #1 ; Function Attrs: nounwind -declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) #3 +declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) #3 diff --git a/llvm/test/CodeGen/BPF/ex1.ll b/llvm/test/CodeGen/BPF/ex1.ll index 97cc7e07ab9..de9599b54d2 100644 --- a/llvm/test/CodeGen/BPF/ex1.ll +++ b/llvm/test/CodeGen/BPF/ex1.ll @@ -12,7 +12,7 @@ define i32 @bpf_prog1(%struct.bpf_context* nocapture %ctx) #0 section "events/ne %devname = alloca [3 x i8], align 1 %fmt = alloca [15 x i8], align 1 %1 = getelementptr inbounds [3 x i8], [3 x i8]* %devname, i64 0, i64 0 - call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* getelementptr inbounds ([3 x i8], [3 x i8]* @bpf_prog1.devname, i64 0, i64 0), i64 3, i32 1, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* getelementptr inbounds ([3 x i8], [3 x i8]* @bpf_prog1.devname, i64 0, i64 0), i64 3, i1 false) %2 = getelementptr inbounds %struct.bpf_context, %struct.bpf_context* %ctx, i64 0, i32 0 %3 = load i64, i64* %2, align 8 %4 = inttoptr i64 %3 to %struct.sk_buff* @@ -25,7 +25,7 @@ define i32 @bpf_prog1(%struct.bpf_context* nocapture %ctx) #0 section "events/ne ; <label>:10 ; preds = %0 %11 = getelementptr inbounds [15 x i8], [15 x i8]* %fmt, i64 0, i64 0 - call void @llvm.memcpy.p0i8.p0i8.i64(i8* %11, i8* getelementptr inbounds ([15 x i8], [15 x i8]* @bpf_prog1.fmt, i64 0, i64 0), i64 15, i32 1, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* %11, i8* getelementptr inbounds ([15 x i8], [15 x i8]* @bpf_prog1.fmt, i64 0, i64 0), i64 15, i1 false) %12 = call i32 (i8*, i32, ...) inttoptr (i64 11 to i32 (i8*, i32, ...)*)(i8* %11, i32 15, %struct.sk_buff* %4, i8* %7) #1 ; CHECK-LABEL: bpf_prog1: ; CHECK: call 4 @@ -43,4 +43,4 @@ define i32 @bpf_prog1(%struct.bpf_context* nocapture %ctx) #0 section "events/ne } ; Function Attrs: nounwind -declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) #1 +declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) #1 diff --git a/llvm/test/CodeGen/BPF/fi_ri.ll b/llvm/test/CodeGen/BPF/fi_ri.ll index 12452988e8a..b59f3f6f283 100644 --- a/llvm/test/CodeGen/BPF/fi_ri.ll +++ b/llvm/test/CodeGen/BPF/fi_ri.ll @@ -10,7 +10,7 @@ define i32 @test() #0 { ; CHECK: *(u32 *)(r10 - 8) = r1 ; CHECK: *(u64 *)(r10 - 16) = r1 ; CHECK: *(u64 *)(r10 - 24) = r1 - call void @llvm.memset.p0i8.i64(i8* %1, i8 0, i64 20, i32 4, i1 false) + call void @llvm.memset.p0i8.i64(i8* align 4 %1, i8 0, i64 20, i1 false) ; CHECK: r1 = r10 ; CHECK: r1 += -20 %2 = getelementptr inbounds %struct.key_t, %struct.key_t* %key, i64 0, i32 1, i64 0 @@ -20,6 +20,6 @@ define i32 @test() #0 { } ; Function Attrs: nounwind argmemonly -declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) #1 +declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) #1 declare void @test1(i8*) #2 diff --git a/llvm/test/CodeGen/BPF/reloc.ll b/llvm/test/CodeGen/BPF/reloc.ll index 75dbebf311e..53a7a1f4eab 100644 --- a/llvm/test/CodeGen/BPF/reloc.ll +++ b/llvm/test/CodeGen/BPF/reloc.ll @@ -12,7 +12,7 @@ define i32 @bpf_prog1(%struct.bpf_context* nocapture %ctx) #0 section "events/ne %devname = alloca [3 x i8], align 1 %fmt = alloca [15 x i8], align 1 %1 = getelementptr inbounds [3 x i8], [3 x i8]* %devname, i64 0, i64 0 - call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* getelementptr inbounds ([3 x i8], [3 x i8]* @bpf_prog1.devname, i64 0, i64 0), i64 3, i32 1, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* getelementptr inbounds ([3 x i8], [3 x i8]* @bpf_prog1.devname, i64 0, i64 0), i64 3, i1 false) %2 = getelementptr inbounds %struct.bpf_context, %struct.bpf_context* %ctx, i64 0, i32 0 %3 = load i64, i64* %2, align 8 %4 = inttoptr i64 %3 to %struct.sk_buff* @@ -25,7 +25,7 @@ define i32 @bpf_prog1(%struct.bpf_context* nocapture %ctx) #0 section "events/ne ; <label>:10 ; preds = %0 %11 = getelementptr inbounds [15 x i8], [15 x i8]* %fmt, i64 0, i64 0 - call void @llvm.memcpy.p0i8.p0i8.i64(i8* %11, i8* getelementptr inbounds ([15 x i8], [15 x i8]* @bpf_prog1.fmt, i64 0, i64 0), i64 15, i32 1, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* %11, i8* getelementptr inbounds ([15 x i8], [15 x i8]* @bpf_prog1.fmt, i64 0, i64 0), i64 15, i1 false) %12 = call i32 (i8*, i32, ...) inttoptr (i64 11 to i32 (i8*, i32, ...)*)(i8* %11, i32 15, %struct.sk_buff* %4, i8* %7) #1 br label %13 @@ -38,6 +38,6 @@ define i32 @bpf_prog1(%struct.bpf_context* nocapture %ctx) #0 section "events/ne } ; Function Attrs: nounwind -declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) #1 +declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) #1 attributes #0 = { norecurse } diff --git a/llvm/test/CodeGen/BPF/rodata_1.ll b/llvm/test/CodeGen/BPF/rodata_1.ll index 9aa9e8c5780..687a1531d59 100644 --- a/llvm/test/CodeGen/BPF/rodata_1.ll +++ b/llvm/test/CodeGen/BPF/rodata_1.ll @@ -33,8 +33,8 @@ define i32 @test() local_unnamed_addr #0 { ; CHECK-LABEL: test: entry: - tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* getelementptr inbounds (%struct.test_t1, %struct.test_t1* @g1, i64 0, i32 0), i8* getelementptr inbounds (%struct.test_t1, %struct.test_t1* @test.t1, i64 0, i32 0), i64 3, i32 1, i1 false) - tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* bitcast (%struct.test_t2* @g2 to i8*), i8* bitcast (%struct.test_t2* @test.t2 to i8*), i64 20, i32 4, i1 false) + tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* getelementptr inbounds (%struct.test_t1, %struct.test_t1* @g1, i64 0, i32 0), i8* getelementptr inbounds (%struct.test_t1, %struct.test_t1* @test.t1, i64 0, i32 0), i64 3, i1 false) + tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 bitcast (%struct.test_t2* @g2 to i8*), i8* align 4 bitcast (%struct.test_t2* @test.t2 to i8*), i64 20, i1 false) ; CHECK: r1 = g1 ; CHECK: r2 = 0 ; CHECK: *(u8 *)(r1 + 1) = r2 @@ -46,7 +46,7 @@ entry: } ; CHECK: .section .rodata,"a",@progbits -declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i32, i1) #1 +declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i1) #1 attributes #0 = { nounwind } attributes #1 = { argmemonly nounwind } diff --git a/llvm/test/CodeGen/BPF/rodata_2.ll b/llvm/test/CodeGen/BPF/rodata_2.ll index 86b80118bc1..a7231f9635d 100644 --- a/llvm/test/CodeGen/BPF/rodata_2.ll +++ b/llvm/test/CodeGen/BPF/rodata_2.ll @@ -31,7 +31,7 @@ define i32 @test() local_unnamed_addr #0 { ; CHECK-LABEL: test: entry: - tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* getelementptr inbounds (%struct.test_t2, %struct.test_t2* @g, i64 0, i32 0), i8* getelementptr inbounds (%struct.test_t2, %struct.test_t2* @test.t2, i64 0, i32 0), i64 32, i32 4, i1 false) + tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 getelementptr inbounds (%struct.test_t2, %struct.test_t2* @g, i64 0, i32 0), i8* align 4 getelementptr inbounds (%struct.test_t2, %struct.test_t2* @test.t2, i64 0, i32 0), i64 32, i1 false) ; CHECK: r1 = g ; CHECK: r2 = 0 ; CHECK: *(u32 *)(r1 + 28) = r2 @@ -45,7 +45,7 @@ entry: } ; CHECK: .section .rodata.cst32,"aM",@progbits,32 -declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i32, i1) #1 +declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i1) #1 attributes #0 = { nounwind } attributes #1 = { argmemonly nounwind } diff --git a/llvm/test/CodeGen/BPF/rodata_3.ll b/llvm/test/CodeGen/BPF/rodata_3.ll index 814ce764546..df8296c8a2f 100644 --- a/llvm/test/CodeGen/BPF/rodata_3.ll +++ b/llvm/test/CodeGen/BPF/rodata_3.ll @@ -25,7 +25,7 @@ ; Function Attrs: nounwind define i32 @test() local_unnamed_addr #0 { entry: - tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* getelementptr inbounds (%struct.test_t1, %struct.test_t1* @g, i64 0, i32 0), i8* getelementptr inbounds (%struct.test_t1, %struct.test_t1* @test.t1, i64 0, i32 0), i64 16, i32 4, i1 false) + tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 getelementptr inbounds (%struct.test_t1, %struct.test_t1* @g, i64 0, i32 0), i8* align 4 getelementptr inbounds (%struct.test_t1, %struct.test_t1* @test.t1, i64 0, i32 0), i64 16, i1 false) ; CHECK-EL: r2 = 1 ; CHECK-EL: *(u32 *)(r1 + 0) = r2 ; CHECK-EB: r2 = 16777216 @@ -35,7 +35,7 @@ entry: ; CHECK-EL: .section .rodata.cst16,"aM",@progbits,16 ; CHECK-EB: .section .rodata.cst16,"aM",@progbits,16 -declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i32, i1) #1 +declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i1) #1 attributes #0 = { nounwind } attributes #1 = { argmemonly nounwind } diff --git a/llvm/test/CodeGen/BPF/rodata_4.ll b/llvm/test/CodeGen/BPF/rodata_4.ll index d6b9fba5be0..6e7b289d005 100644 --- a/llvm/test/CodeGen/BPF/rodata_4.ll +++ b/llvm/test/CodeGen/BPF/rodata_4.ll @@ -27,7 +27,7 @@ define i32 @test() local_unnamed_addr #0 { ; CHECK-LABEL: test: entry: - tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* bitcast (%struct.test_t1* @g to i8*), i8* bitcast (%struct.test_t1* getelementptr inbounds ([4 x %struct.test_t1], [4 x %struct.test_t1]* @test.t1, i64 0, i64 1) to i8*), i64 6, i32 2, i1 false) + tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 2 bitcast (%struct.test_t1* @g to i8*), i8* align 2 bitcast (%struct.test_t1* getelementptr inbounds ([4 x %struct.test_t1], [4 x %struct.test_t1]* @test.t1, i64 0, i64 1) to i8*), i64 6, i1 false) ; CHECK: r2 = 600 ; CHECK: *(u16 *)(r1 + 2) = r2 ; CHECK: r2 = 60 @@ -37,7 +37,7 @@ entry: ; CHECK .section .rodata,"a",@progbits ; Function Attrs: argmemonly nounwind -declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i32, i1) #1 +declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i1) #1 attributes #0 = { nounwind } attributes #1 = { argmemonly nounwind } diff --git a/llvm/test/CodeGen/BPF/sanity.ll b/llvm/test/CodeGen/BPF/sanity.ll index 33cfc2fb030..8729f932391 100644 --- a/llvm/test/CodeGen/BPF/sanity.ll +++ b/llvm/test/CodeGen/BPF/sanity.ll @@ -103,7 +103,7 @@ declare i32 @manyarg(i32, i32, i32, i32, i32) #2 define void @foo_printf() #1 { %fmt = alloca [9 x i8], align 1 %1 = getelementptr inbounds [9 x i8], [9 x i8]* %fmt, i64 0, i64 0 - call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* getelementptr inbounds ([9 x i8], [9 x i8]* @foo_printf.fmt, i64 0, i64 0), i64 9, i32 1, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* getelementptr inbounds ([9 x i8], [9 x i8]* @foo_printf.fmt, i64 0, i64 0), i64 9, i1 false) ; CHECK-LABEL: foo_printf: ; CHECK: r1 = 729618802566522216 ll %2 = call i32 (i8*, ...) @printf(i8* %1) #3 @@ -111,7 +111,7 @@ define void @foo_printf() #1 { } ; Function Attrs: nounwind -declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) #3 +declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) #3 ; Function Attrs: nounwind declare i32 @printf(i8* nocapture, ...) #4 diff --git a/llvm/test/CodeGen/BPF/undef.ll b/llvm/test/CodeGen/BPF/undef.ll index 586a24d1816..3736cb7a61d 100644 --- a/llvm/test/CodeGen/BPF/undef.ll +++ b/llvm/test/CodeGen/BPF/undef.ll @@ -54,12 +54,12 @@ define i32 @ebpf_filter(%struct.__sk_buff* nocapture readnone %ebpf_packet) #0 s %6 = getelementptr inbounds %struct.routing_key_2, %struct.routing_key_2* %key, i64 0, i32 0, i64 5 store i8 10, i8* %6, align 1 %7 = getelementptr inbounds %struct.routing_key_2, %struct.routing_key_2* %key, i64 1, i32 0, i64 0 - call void @llvm.memset.p0i8.i64(i8* %7, i8 0, i64 30, i32 1, i1 false) + call void @llvm.memset.p0i8.i64(i8* %7, i8 0, i64 30, i1 false) %8 = call i32 (%struct.bpf_map_def*, %struct.routing_key_2*, ...) bitcast (i32 (...)* @bpf_map_lookup_elem to i32 (%struct.bpf_map_def*, %struct.routing_key_2*, ...)*)(%struct.bpf_map_def* nonnull @routing, %struct.routing_key_2* nonnull %key) #3 ret i32 undef } ; Function Attrs: nounwind argmemonly -declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) #1 +declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) #1 declare i32 @bpf_map_lookup_elem(...) #2 diff --git a/llvm/test/CodeGen/BPF/warn-call.ll b/llvm/test/CodeGen/BPF/warn-call.ll index 6dadb359cf6..f7ff83ade26 100644 --- a/llvm/test/CodeGen/BPF/warn-call.ll +++ b/llvm/test/CodeGen/BPF/warn-call.ll @@ -6,14 +6,14 @@ define i8* @warn(i8* returned, i8*, i64) local_unnamed_addr #0 !dbg !6 { tail call void @llvm.dbg.value(metadata i8* %0, i64 0, metadata !14, metadata !17), !dbg !18 tail call void @llvm.dbg.value(metadata i8* %1, i64 0, metadata !15, metadata !17), !dbg !19 tail call void @llvm.dbg.value(metadata i64 %2, i64 0, metadata !16, metadata !17), !dbg !20 - tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* %1, i64 %2, i32 1, i1 false), !dbg !21 + tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* %1, i64 %2, i1 false), !dbg !21 %4 = tail call i8* @foo(i8* %0, i8* %1, i64 %2) #5, !dbg !22 %5 = tail call fastcc i8* @bar(i8* %0), !dbg !23 ret i8* %5, !dbg !24 } ; Function Attrs: argmemonly nounwind -declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i32, i1) #1 +declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i1) #1 declare i8* @foo(i8*, i8*, i64) local_unnamed_addr #2 diff --git a/llvm/test/CodeGen/Generic/ForceStackAlign.ll b/llvm/test/CodeGen/Generic/ForceStackAlign.ll index 57ccb2c41d7..7eed8321308 100644 --- a/llvm/test/CodeGen/Generic/ForceStackAlign.ll +++ b/llvm/test/CodeGen/Generic/ForceStackAlign.ll @@ -18,10 +18,10 @@ entry: if.then: %0 = alloca i8, i32 %i - call void @llvm.memset.p0i8.i32(i8* %0, i8 0, i32 %i, i32 1, i1 false) + call void @llvm.memset.p0i8.i32(i8* %0, i8 0, i32 %i, i1 false) %call = call i32 @f(i8* %0) %conv = sext i32 %call to i64 ret i64 %conv } -declare void @llvm.memset.p0i8.i32(i8*, i8, i32, i32, i1) nounwind +declare void @llvm.memset.p0i8.i32(i8*, i8, i32, i1) nounwind diff --git a/llvm/test/CodeGen/Generic/invalid-memcpy.ll b/llvm/test/CodeGen/Generic/invalid-memcpy.ll index d4252bc9d98..51a580678e0 100644 --- a/llvm/test/CodeGen/Generic/invalid-memcpy.ll +++ b/llvm/test/CodeGen/Generic/invalid-memcpy.ll @@ -10,8 +10,8 @@ define void @Bork() { entry: %Qux = alloca [33 x i8] %Qux1 = bitcast [33 x i8]* %Qux to i8* - call void @llvm.memcpy.p0i8.p0i8.i64(i8* %Qux1, i8* getelementptr inbounds ([33 x i8], [33 x i8]* @C.0.1173, i32 0, i32 0), i64 33, i32 8, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 %Qux1, i8* align 8 getelementptr inbounds ([33 x i8], [33 x i8]* @C.0.1173, i32 0, i32 0), i64 33, i1 false) ret void } -declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) nounwind +declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) nounwind diff --git a/llvm/test/CodeGen/Hexagon/adjust-latency-stackST.ll b/llvm/test/CodeGen/Hexagon/adjust-latency-stackST.ll index 915db91635f..e8d8364f4ed 100644 --- a/llvm/test/CodeGen/Hexagon/adjust-latency-stackST.ll +++ b/llvm/test/CodeGen/Hexagon/adjust-latency-stackST.ll @@ -26,7 +26,7 @@ b1: store %struct.0* %v5, %struct.0** %v2, align 4 %v6 = bitcast %struct.0* %v5 to i8* %v7 = load i8*, i8** bitcast (%struct.0** @G to i8**), align 4 - tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %v6, i8* %v7, i32 48, i32 4, i1 false) + tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %v6, i8* align 4 %v7, i32 48, i1 false) %v8 = getelementptr inbounds %struct.0, %struct.0* %a0, i32 0, i32 2, i32 0, i32 1 store i32 5, i32* %v8, align 4 %v9 = getelementptr inbounds %struct.0, %struct.0* %v5, i32 0, i32 2, i32 0, i32 1 @@ -64,14 +64,14 @@ b32: ; preds = %b1 %v33 = bitcast %struct.0* %a0 to i8** %v34 = load i8*, i8** %v33, align 4 %v35 = bitcast %struct.0* %a0 to i8* - call void @llvm.memcpy.p0i8.p0i8.i32(i8* %v35, i8* %v34, i32 48, i32 4, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %v35, i8* align 4 %v34, i32 48, i1 false) br label %b36 b36: ; preds = %b32, %b18 ret i32 undef } -declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture writeonly, i8* nocapture readonly, i32, i32, i1) #1 +declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture writeonly, i8* nocapture readonly, i32, i1) #1 declare i32 @f0(...) #0 declare i32 @f1(...) #0 diff --git a/llvm/test/CodeGen/Hexagon/branchfolder-keep-impdef.ll b/llvm/test/CodeGen/Hexagon/branchfolder-keep-impdef.ll index e09f7986621..541d9d51142 100644 --- a/llvm/test/CodeGen/Hexagon/branchfolder-keep-impdef.ll +++ b/llvm/test/CodeGen/Hexagon/branchfolder-keep-impdef.ll @@ -18,11 +18,11 @@ b1: ; preds = %b0 b2: ; preds = %b1, %b0 %t1 = phi i8* [ %t0, %b1 ], [ undef, %b0 ] %t2 = getelementptr inbounds i8, i8* %t1, i32 %p0 - tail call void @llvm.memmove.p0i8.p0i8.i32(i8* undef, i8* %t2, i32 undef, i32 1, i1 false) #1 + tail call void @llvm.memmove.p0i8.p0i8.i32(i8* undef, i8* %t2, i32 undef, i1 false) #1 unreachable } -declare void @llvm.memmove.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i32, i1) #0 +declare void @llvm.memmove.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i1) #0 attributes #0 = { argmemonly nounwind } attributes #1 = { nounwind } diff --git a/llvm/test/CodeGen/Hexagon/early-if-conversion-bug1.ll b/llvm/test/CodeGen/Hexagon/early-if-conversion-bug1.ll index 6739b03985d..804b083e84b 100644 --- a/llvm/test/CodeGen/Hexagon/early-if-conversion-bug1.ll +++ b/llvm/test/CodeGen/Hexagon/early-if-conversion-bug1.ll @@ -23,7 +23,7 @@ target triple = "hexagon" %union.anon.0 = type { i8 } ; Function Attrs: nounwind -declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i32, i1) #0 +declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i1) #0 declare i32 @__gxx_personality_v0(...) @@ -98,7 +98,7 @@ entry: if.then: ; preds = %entry %1 = bitcast %"class.std::__1::ostreambuf_iterator"* %retval to i8* %2 = bitcast %"class.std::__1::ostreambuf_iterator"* %__s to i8* - call void @llvm.memcpy.p0i8.p0i8.i32(i8* %1, i8* %2, i32 4, i32 4, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %1, i8* align 4 %2, i32 4, i1 false) br label %return if.end: ; preds = %entry @@ -166,7 +166,7 @@ if.then12: ; preds = %if.then8 store %"class.std::__1::basic_streambuf"* null, %"class.std::__1::basic_streambuf"** %__sbuf_13, align 4 %22 = bitcast %"class.std::__1::ostreambuf_iterator"* %retval to i8* %23 = bitcast %"class.std::__1::ostreambuf_iterator"* %__s to i8* - call void @llvm.memcpy.p0i8.p0i8.i32(i8* %22, i8* %23, i32 4, i32 4, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %22, i8* align 4 %23, i32 4, i1 false) br label %return if.end14: ; preds = %if.then8 @@ -296,7 +296,7 @@ if.then22: ; preds = %invoke.cont store %"class.std::__1::basic_streambuf"* null, %"class.std::__1::basic_streambuf"** %__sbuf_23, align 4 %53 = bitcast %"class.std::__1::ostreambuf_iterator"* %retval to i8* %54 = bitcast %"class.std::__1::ostreambuf_iterator"* %__s to i8* - call void @llvm.memcpy.p0i8.p0i8.i32(i8* %53, i8* %54, i32 4, i32 4, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %53, i8* align 4 %54, i32 4, i1 false) store i32 1, i32* %cleanup.dest.slot br label %cleanup @@ -361,7 +361,7 @@ if.then34: ; preds = %if.then30 store %"class.std::__1::basic_streambuf"* null, %"class.std::__1::basic_streambuf"** %__sbuf_35, align 4 %69 = bitcast %"class.std::__1::ostreambuf_iterator"* %retval to i8* %70 = bitcast %"class.std::__1::ostreambuf_iterator"* %__s to i8* - call void @llvm.memcpy.p0i8.p0i8.i32(i8* %69, i8* %70, i32 4, i32 4, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %69, i8* align 4 %70, i32 4, i1 false) br label %return if.end36: ; preds = %if.then30 @@ -381,7 +381,7 @@ if.end37: ; preds = %if.end36, %if.end25 %74 = load i32, i32* %__r.i, align 4 %75 = bitcast %"class.std::__1::ostreambuf_iterator"* %retval to i8* %76 = bitcast %"class.std::__1::ostreambuf_iterator"* %__s to i8* - call void @llvm.memcpy.p0i8.p0i8.i32(i8* %75, i8* %76, i32 4, i32 4, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %75, i8* align 4 %76, i32 4, i1 false) br label %return return: ; preds = %if.end37, %if.then34, %cleanup, %if.then12, %if.then diff --git a/llvm/test/CodeGen/Hexagon/mem-fi-add.ll b/llvm/test/CodeGen/Hexagon/mem-fi-add.ll index a46029fdb5e..4ec62c58170 100644 --- a/llvm/test/CodeGen/Hexagon/mem-fi-add.ll +++ b/llvm/test/CodeGen/Hexagon/mem-fi-add.ll @@ -13,14 +13,14 @@ define void @foo() #0 { entry: %t = alloca [4 x [2 x i32]], align 8 %0 = bitcast [4 x [2 x i32]]* %t to i8* - call void @llvm.memset.p0i8.i32(i8* %0, i8 0, i32 32, i32 8, i1 false) + call void @llvm.memset.p0i8.i32(i8* align 8 %0, i8 0, i32 32, i1 false) %arraydecay = getelementptr inbounds [4 x [2 x i32]], [4 x [2 x i32]]* %t, i32 0, i32 0 call void @bar([2 x i32]* %arraydecay) #1 ret void } ; Function Attrs: nounwind -declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i32, i1) #1 +declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i1) #1 declare void @bar([2 x i32]*) #2 diff --git a/llvm/test/CodeGen/Hexagon/memcpy-likely-aligned.ll b/llvm/test/CodeGen/Hexagon/memcpy-likely-aligned.ll index f2677efc304..2cce3f12c1d 100644 --- a/llvm/test/CodeGen/Hexagon/memcpy-likely-aligned.ll +++ b/llvm/test/CodeGen/Hexagon/memcpy-likely-aligned.ll @@ -25,8 +25,8 @@ entry: %m = getelementptr inbounds %struct.n, %struct.n* %p, i32 0, i32 0 %arraydecay = getelementptr inbounds [2 x %struct.l], [2 x %struct.l]* %m, i32 0, i32 0 %3 = bitcast %struct.l* %arraydecay to i8* - call void @llvm.memcpy.p0i8.p0i8.i32(i8* %3, i8* getelementptr inbounds ({ <{ { %struct.e, { i8, i8, i8, [5 x i8] }, %struct.e }, { %struct.e, { i8, i8, i8, [5 x i8] }, %struct.e } }> }, { <{ { %struct.e, { i8, i8, i8, [5 x i8] }, %struct.e }, { %struct.e, { i8, i8, i8, [5 x i8] }, %struct.e } }> }* @y, i32 0, i32 0, i32 0, i32 0, i32 0), i32 32, i32 4, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %3, i8* align 4 getelementptr inbounds ({ <{ { %struct.e, { i8, i8, i8, [5 x i8] }, %struct.e }, { %struct.e, { i8, i8, i8, [5 x i8] }, %struct.e } }> }, { <{ { %struct.e, { i8, i8, i8, [5 x i8] }, %struct.e }, { %struct.e, { i8, i8, i8, [5 x i8] }, %struct.e } }> }* @y, i32 0, i32 0, i32 0, i32 0, i32 0), i32 32, i1 false) ret void } -declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i32, i1) nounwind +declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i1) nounwind diff --git a/llvm/test/CodeGen/Hexagon/rdf-filter-defs.ll b/llvm/test/CodeGen/Hexagon/rdf-filter-defs.ll index 735b20e697f..5c55c500634 100644 --- a/llvm/test/CodeGen/Hexagon/rdf-filter-defs.ll +++ b/llvm/test/CodeGen/Hexagon/rdf-filter-defs.ll @@ -120,7 +120,7 @@ b18: %t45 = getelementptr inbounds i8, i8* %t42, i32 %p1 %t46 = load i32, i32* %t0, align 4 %t47 = sub i32 %t46, %p1 - tail call void @llvm.memmove.p0i8.p0i8.i32(i8* %t44, i8* %t45, i32 %t47, i32 1, i1 false) #1 + tail call void @llvm.memmove.p0i8.p0i8.i32(i8* %t44, i8* %t45, i32 %t47, i1 false) #1 %t48 = icmp eq %type.0* %p0, %p2 %t49 = load i32, i32* %t22, align 4 %t50 = icmp ugt i32 %t49, 15 @@ -158,7 +158,7 @@ b25: %t61 = select i1 %t60, i32 %t13, i32 0 %t62 = add i32 %t61, %p3 %t63 = getelementptr inbounds i8, i8* %t59, i32 %t62 - tail call void @llvm.memmove.p0i8.p0i8.i32(i8* %t55, i8* %t63, i32 %t13, i32 1, i1 false) #1 + tail call void @llvm.memmove.p0i8.p0i8.i32(i8* %t55, i8* %t63, i32 %t13, i1 false) #1 br label %b27 b26: @@ -171,7 +171,7 @@ b26: %t70 = bitcast %type.3* %t67 to i8* %t71 = select i1 %t66, i8* %t69, i8* %t70 %t72 = getelementptr inbounds i8, i8* %t71, i32 %p3 - tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %t55, i8* %t72, i32 %t13, i32 1, i1 false) #1 + tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %t55, i8* %t72, i32 %t13, i1 false) #1 br label %b27 b27: @@ -203,8 +203,8 @@ b33: ret %type.0* %p0 } -declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture writeonly, i8* nocapture readonly, i32, i32, i1) #0 -declare void @llvm.memmove.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i32, i1) #0 +declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture writeonly, i8* nocapture readonly, i32, i1) #0 +declare void @llvm.memmove.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i1) #0 declare void @blah(%type.4*) local_unnamed_addr declare void @danny(%type.4*) local_unnamed_addr diff --git a/llvm/test/CodeGen/Hexagon/store-imm-stack-object.ll b/llvm/test/CodeGen/Hexagon/store-imm-stack-object.ll index c0eaea26cc2..5566bda7683 100644 --- a/llvm/test/CodeGen/Hexagon/store-imm-stack-object.ll +++ b/llvm/test/CodeGen/Hexagon/store-imm-stack-object.ll @@ -59,11 +59,11 @@ b0: store i32 875770417, i32* %v4, align 4 %v11 = getelementptr inbounds [100 x i8], [100 x i8]* %v5, i32 0, i32 0 call void @llvm.lifetime.start(i64 100, i8* %v11) - call void @llvm.memset.p0i8.i32(i8* %v11, i8 0, i32 100, i32 8, i1 false) + call void @llvm.memset.p0i8.i32(i8* align 8 %v11, i8 0, i32 100, i1 false) store i8 50, i8* %v11, align 8 %v12 = getelementptr inbounds [101 x i8], [101 x i8]* %v6, i32 0, i32 0 call void @llvm.lifetime.start(i64 101, i8* %v12) - call void @llvm.memset.p0i8.i32(i8* %v12, i8 0, i32 101, i32 8, i1 false) + call void @llvm.memset.p0i8.i32(i8* align 8 %v12, i8 0, i32 101, i1 false) store i8 49, i8* %v12, align 8 call void @test3(i8* %v7, i8* %v8, i8* %v9, i8* %v10, i8* %v11, i8* %v12) call void @llvm.lifetime.end(i64 101, i8* %v12) @@ -77,7 +77,7 @@ b0: declare void @llvm.lifetime.start(i64, i8* nocapture) #0 declare void @llvm.lifetime.end(i64, i8* nocapture) #0 -declare void @llvm.memset.p0i8.i32(i8* nocapture writeonly, i8, i32, i32, i1) #0 +declare void @llvm.memset.p0i8.i32(i8* nocapture writeonly, i8, i32, i1) #0 declare void @test3(i8*, i8*, i8*, i8*, i8*, i8*) declare void @test4(i8*, i8*, i8*, i8*) diff --git a/llvm/test/CodeGen/Hexagon/tail-call-mem-intrinsics.ll b/llvm/test/CodeGen/Hexagon/tail-call-mem-intrinsics.ll index 90fb75e5be0..7f0fb6281ff 100644 --- a/llvm/test/CodeGen/Hexagon/tail-call-mem-intrinsics.ll +++ b/llvm/test/CodeGen/Hexagon/tail-call-mem-intrinsics.ll @@ -4,7 +4,7 @@ ; CHECK: jump memcpy define void @tail_memcpy(i8* nocapture %p, i8* nocapture readonly %q, i32 %n) #0 { entry: - tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %p, i8* %q, i32 %n, i32 1, i1 false) + tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %p, i8* %q, i32 %n, i1 false) ret void } @@ -12,7 +12,7 @@ entry: ; CHECK: jump memmove define void @tail_memmove(i8* nocapture %p, i8* nocapture readonly %q, i32 %n) #0 { entry: - tail call void @llvm.memmove.p0i8.p0i8.i32(i8* %p, i8* %q, i32 %n, i32 1, i1 false) + tail call void @llvm.memmove.p0i8.p0i8.i32(i8* %p, i8* %q, i32 %n, i1 false) ret void } @@ -20,12 +20,12 @@ entry: ; CHECK: jump memset define void @tail_memset(i8* nocapture %p, i8 %c, i32 %n) #0 { entry: - tail call void @llvm.memset.p0i8.i32(i8* %p, i8 %c, i32 %n, i32 1, i1 false) + tail call void @llvm.memset.p0i8.i32(i8* %p, i8 %c, i32 %n, i1 false) ret void } -declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i32, i1) #0 -declare void @llvm.memmove.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i32, i1) #0 -declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i32, i1) #0 +declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i1) #0 +declare void @llvm.memmove.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i1) #0 +declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i1) #0 attributes #0 = { nounwind } diff --git a/llvm/test/CodeGen/MSP430/memset.ll b/llvm/test/CodeGen/MSP430/memset.ll index a24bfafc200..10b506c60d9 100644 --- a/llvm/test/CodeGen/MSP430/memset.ll +++ b/llvm/test/CodeGen/MSP430/memset.ll @@ -13,10 +13,10 @@ entry: ; CHECK-NEXT: mov.w #5, r13 ; CHECK-NEXT: mov.w #128, r14 ; CHECK-NEXT: call #memset - call void @llvm.memset.p0i8.i16(i8* %0, i8 5, i16 128, i32 1, i1 false) + call void @llvm.memset.p0i8.i16(i8* %0, i8 5, i16 128, i1 false) ret void } ; Function Attrs: nounwind -declare void @llvm.memset.p0i8.i16(i8* nocapture, i8, i16, i32, i1) nounwind +declare void @llvm.memset.p0i8.i16(i8* nocapture, i8, i16, i1) nounwind diff --git a/llvm/test/CodeGen/Mips/2012-12-12-ExpandMemcpy.ll b/llvm/test/CodeGen/Mips/2012-12-12-ExpandMemcpy.ll index 24bcfaee8ba..2964b19c1d3 100644 --- a/llvm/test/CodeGen/Mips/2012-12-12-ExpandMemcpy.ll +++ b/llvm/test/CodeGen/Mips/2012-12-12-ExpandMemcpy.ll @@ -4,8 +4,8 @@ define void @t(i8* %ptr) { entry: - tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %ptr, i8* getelementptr inbounds ([7 x i8], [7 x i8]* @.str, i64 0, i64 0), i64 7, i32 1, i1 false) + tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %ptr, i8* getelementptr inbounds ([7 x i8], [7 x i8]* @.str, i64 0, i64 0), i64 7, i1 false) ret void } -declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) nounwind +declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) nounwind diff --git a/llvm/test/CodeGen/Mips/Fast-ISel/memtest1.ll b/llvm/test/CodeGen/Mips/Fast-ISel/memtest1.ll index aca6aa569ba..3e30f75d6f4 100644 --- a/llvm/test/CodeGen/Mips/Fast-ISel/memtest1.ll +++ b/llvm/test/CodeGen/Mips/Fast-ISel/memtest1.ll @@ -10,9 +10,9 @@ @i = global i32 12, align 4 @dest = common global [50 x i8] zeroinitializer, align 1 -declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i32, i1) -declare void @llvm.memmove.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i32, i1) -declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i32, i1) +declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i1) +declare void @llvm.memmove.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i1) +declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i1) define void @cpy(i8* %src, i32 %i) { ; ALL-LABEL: cpy: @@ -28,8 +28,7 @@ define void @cpy(i8* %src, i32 %i) { ; ALL: jalr $[[T2]] ; ALL-NEXT: nop ; ALL-NOT: {{.*}}$2{{.*}} - call void @llvm.memcpy.p0i8.p0i8.i32(i8* getelementptr inbounds ([50 x i8], [50 x i8]* @dest, i32 0, i32 0), - i8* %src, i32 %i, i32 1, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i32(i8* getelementptr inbounds ([50 x i8], [50 x i8]* @dest, i32 0, i32 0), i8* %src, i32 %i, i1 false) ret void } @@ -48,8 +47,7 @@ define void @mov(i8* %src, i32 %i) { ; ALL: jalr $[[T2]] ; ALL-NEXT: nop ; ALL-NOT: {{.*}}$2{{.*}} - call void @llvm.memmove.p0i8.p0i8.i32(i8* getelementptr inbounds ([50 x i8], [50 x i8]* @dest, i32 0, i32 0), - i8* %src, i32 %i, i32 1, i1 false) + call void @llvm.memmove.p0i8.p0i8.i32(i8* getelementptr inbounds ([50 x i8], [50 x i8]* @dest, i32 0, i32 0), i8* %src, i32 %i, i1 false) ret void } @@ -68,7 +66,6 @@ define void @clear(i32 %i) { ; ALL: jalr $[[T2]] ; ALL-NEXT: nop ; ALL-NOT: {{.*}}$2{{.*}} - call void @llvm.memset.p0i8.i32(i8* getelementptr inbounds ([50 x i8], [50 x i8]* @dest, i32 0, i32 0), - i8 42, i32 %i, i32 1, i1 false) + call void @llvm.memset.p0i8.i32(i8* getelementptr inbounds ([50 x i8], [50 x i8]* @dest, i32 0, i32 0), i8 42, i32 %i, i1 false) ret void } diff --git a/llvm/test/CodeGen/Mips/biggot.ll b/llvm/test/CodeGen/Mips/biggot.ll index b266b5e05e2..305dcf85572 100644 --- a/llvm/test/CodeGen/Mips/biggot.ll +++ b/llvm/test/CodeGen/Mips/biggot.ll @@ -48,8 +48,8 @@ entry: %0 = bitcast i32* %d to i8* %1 = bitcast i32* %s to i8* - tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %0, i8* %1, i32 %n, i32 4, i1 false) + tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %0, i8* align 4 %1, i32 %n, i1 false) ret void } -declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i32, i1) nounwind +declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i1) nounwind diff --git a/llvm/test/CodeGen/Mips/cconv/arguments-small-structures-bigger-than-32bits.ll b/llvm/test/CodeGen/Mips/cconv/arguments-small-structures-bigger-than-32bits.ll index 56f9a64908b..33d1a4fe1b7 100644 --- a/llvm/test/CodeGen/Mips/cconv/arguments-small-structures-bigger-than-32bits.ll +++ b/llvm/test/CodeGen/Mips/cconv/arguments-small-structures-bigger-than-32bits.ll @@ -43,7 +43,7 @@ declare void @fS1(i48 inreg) #1 declare void @fS2(i40 inreg) #1 -declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i32, i1) #2 +declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i1) #2 define void @f1() #0 { entry: @@ -51,7 +51,7 @@ entry: %s1_1.coerce = alloca { i48 } %0 = bitcast { i48 }* %s1_1.coerce to i8* %1 = bitcast %struct.S1* %s1_1 to i8* - call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* %1, i64 6, i32 0, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* %1, i64 6, i1 false) %2 = getelementptr { i48 }, { i48 }* %s1_1.coerce, i32 0, i32 0 %3 = load i48, i48* %2, align 1 call void @fS1(i48 inreg %3) @@ -68,7 +68,7 @@ entry: %s2_1.coerce = alloca { i40 } %0 = bitcast { i40 }* %s2_1.coerce to i8* %1 = bitcast %struct.S2* %s2_1 to i8* - call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* %1, i64 5, i32 0, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* %1, i64 5, i1 false) %2 = getelementptr { i40 }, { i40 }* %s2_1.coerce, i32 0, i32 0 %3 = load i40, i40* %2, align 1 call void @fS2(i40 inreg %3) diff --git a/llvm/test/CodeGen/Mips/cconv/arguments-varargs-small-structs-byte.ll b/llvm/test/CodeGen/Mips/cconv/arguments-varargs-small-structs-byte.ll index b41b5b7597c..5009c9efb43 100644 --- a/llvm/test/CodeGen/Mips/cconv/arguments-varargs-small-structs-byte.ll +++ b/llvm/test/CodeGen/Mips/cconv/arguments-varargs-small-structs-byte.ll @@ -172,7 +172,7 @@ entry: %0 = load %struct.SmallStruct_3b*, %struct.SmallStruct_3b** %ss.addr, align 8 %1 = bitcast { i24 }* %.coerce to i8* %2 = bitcast %struct.SmallStruct_3b* %0 to i8* - call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* %2, i64 3, i32 0, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* %2, i64 3, i1 false) %3 = getelementptr { i24 }, { i24 }* %.coerce, i32 0, i32 0 %4 = load i24, i24* %3, align 1 call void (i8*, ...) @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @.str, i32 0, i32 0), i24 inreg %4) @@ -181,7 +181,7 @@ entry: ; CHECK: dsll $[[R1:[0-9]+]], $[[R2:[0-9]+]], 40 } -declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i32, i1) #1 +declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i1) #1 define void @smallStruct_4b(%struct.SmallStruct_4b* %ss) #0 { entry: @@ -205,7 +205,7 @@ entry: %0 = load %struct.SmallStruct_5b*, %struct.SmallStruct_5b** %ss.addr, align 8 %1 = bitcast { i40 }* %.coerce to i8* %2 = bitcast %struct.SmallStruct_5b* %0 to i8* - call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* %2, i64 5, i32 0, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* %2, i64 5, i1 false) %3 = getelementptr { i40 }, { i40 }* %.coerce, i32 0, i32 0 %4 = load i40, i40* %3, align 1 call void (i8*, ...) @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @.str, i32 0, i32 0), i40 inreg %4) @@ -222,7 +222,7 @@ entry: %0 = load %struct.SmallStruct_6b*, %struct.SmallStruct_6b** %ss.addr, align 8 %1 = bitcast { i48 }* %.coerce to i8* %2 = bitcast %struct.SmallStruct_6b* %0 to i8* - call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* %2, i64 6, i32 0, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* %2, i64 6, i1 false) %3 = getelementptr { i48 }, { i48 }* %.coerce, i32 0, i32 0 %4 = load i48, i48* %3, align 1 call void (i8*, ...) @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @.str, i32 0, i32 0), i48 inreg %4) @@ -239,7 +239,7 @@ entry: %0 = load %struct.SmallStruct_7b*, %struct.SmallStruct_7b** %ss.addr, align 8 %1 = bitcast { i56 }* %.coerce to i8* %2 = bitcast %struct.SmallStruct_7b* %0 to i8* - call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* %2, i64 7, i32 0, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* %2, i64 7, i1 false) %3 = getelementptr { i56 }, { i56 }* %.coerce, i32 0, i32 0 %4 = load i56, i56* %3, align 1 call void (i8*, ...) @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @.str, i32 0, i32 0), i56 inreg %4) @@ -272,7 +272,7 @@ entry: %0 = load %struct.SmallStruct_9b*, %struct.SmallStruct_9b** %ss.addr, align 8 %1 = bitcast { i64, i8 }* %.coerce to i8* %2 = bitcast %struct.SmallStruct_9b* %0 to i8* - call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* %2, i64 9, i32 0, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* %2, i64 9, i1 false) %3 = getelementptr { i64, i8 }, { i64, i8 }* %.coerce, i32 0, i32 0 %4 = load i64, i64* %3, align 1 %5 = getelementptr { i64, i8 }, { i64, i8 }* %.coerce, i32 0, i32 1 diff --git a/llvm/test/CodeGen/Mips/cconv/arguments-varargs-small-structs-combinations.ll b/llvm/test/CodeGen/Mips/cconv/arguments-varargs-small-structs-combinations.ll index 8a20f5e43f1..d3c8f280c59 100644 --- a/llvm/test/CodeGen/Mips/cconv/arguments-varargs-small-structs-combinations.ll +++ b/llvm/test/CodeGen/Mips/cconv/arguments-varargs-small-structs-combinations.ll @@ -107,7 +107,7 @@ entry: %0 = load %struct.SmallStruct_1b1s1b*, %struct.SmallStruct_1b1s1b** %ss.addr, align 8 %1 = bitcast { i48 }* %.coerce to i8* %2 = bitcast %struct.SmallStruct_1b1s1b* %0 to i8* - call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* %2, i64 6, i32 0, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* %2, i64 6, i1 false) %3 = getelementptr { i48 }, { i48 }* %.coerce, i32 0, i32 0 %4 = load i48, i48* %3, align 1 call void (i8*, ...) @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @.str, i32 0, i32 0), i48 inreg %4) @@ -116,7 +116,7 @@ entry: ; CHECK: dsll $[[R1:[0-9]+]], $[[R2:[0-9]+]], 16 } -declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i32, i1) #1 +declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i1) #1 define void @smallStruct_1s1i(%struct.SmallStruct_1s1i* %ss) #0 { entry: @@ -141,7 +141,7 @@ entry: %0 = load %struct.SmallStruct_3b1s*, %struct.SmallStruct_3b1s** %ss.addr, align 8 %1 = bitcast { i48 }* %.coerce to i8* %2 = bitcast %struct.SmallStruct_3b1s* %0 to i8* - call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* %2, i64 6, i32 0, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* %2, i64 6, i1 false) %3 = getelementptr { i48 }, { i48 }* %.coerce, i32 0, i32 0 %4 = load i48, i48* %3, align 1 call void (i8*, ...) @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @.str, i32 0, i32 0), i48 inreg %4) diff --git a/llvm/test/CodeGen/Mips/cconv/return-struct.ll b/llvm/test/CodeGen/Mips/cconv/return-struct.ll index 0997cfbd98a..3ccef2631cc 100644 --- a/llvm/test/CodeGen/Mips/cconv/return-struct.ll +++ b/llvm/test/CodeGen/Mips/cconv/return-struct.ll @@ -18,7 +18,7 @@ @struct_6xi32 = global {[6 x i32]} zeroinitializer @struct_128xi16 = global {[128 x i16]} zeroinitializer -declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i32, i1) +declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i1) define inreg {i8} @ret_struct_i8() nounwind { entry: @@ -50,7 +50,7 @@ define inreg {i16} @ret_struct_i16() nounwind { entry: %retval = alloca {i8,i8}, align 1 %0 = bitcast {i8,i8}* %retval to i8* - call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* getelementptr inbounds ({i8,i8}, {i8,i8}* @struct_2byte, i32 0, i32 0), i64 2, i32 1, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* getelementptr inbounds ({i8,i8}, {i8,i8}* @struct_2byte, i32 0, i32 0), i64 2, i1 false) %1 = bitcast {i8,i8}* %retval to {i16}* %2 = load volatile {i16}, {i16}* %1 ret {i16} %2 @@ -144,7 +144,7 @@ entry: define void @ret_struct_128xi16({[128 x i16]}* sret %returnval) { entry: %0 = bitcast {[128 x i16]}* %returnval to i8* - call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* bitcast ({[128 x i16]}* @struct_128xi16 to i8*), i64 256, i32 2, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 2 %0, i8* align 2 bitcast ({[128 x i16]}* @struct_128xi16 to i8*), i64 256, i1 false) ret void } diff --git a/llvm/test/CodeGen/Mips/largeimmprinting.ll b/llvm/test/CodeGen/Mips/largeimmprinting.ll index f27e11425b9..6460260f67b 100644 --- a/llvm/test/CodeGen/Mips/largeimmprinting.ll +++ b/llvm/test/CodeGen/Mips/largeimmprinting.ll @@ -26,11 +26,11 @@ entry: %agg.tmp = alloca %struct.S1, align 1 %tmp = getelementptr inbounds %struct.S1, %struct.S1* %agg.tmp, i32 0, i32 0, i32 0 - call void @llvm.memcpy.p0i8.p0i8.i32(i8* %tmp, i8* getelementptr inbounds (%struct.S1, %struct.S1* @s1, i32 0, i32 0, i32 0), i32 65536, i32 1, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 %tmp, i8* align 1 getelementptr inbounds (%struct.S1, %struct.S1* @s1, i32 0, i32 0, i32 0), i32 65536, i1 false) call void @f2(%struct.S1* byval %agg.tmp) nounwind ret void } declare void @f2(%struct.S1* byval) -declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i32, i1) nounwind +declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i1) nounwind diff --git a/llvm/test/CodeGen/Mips/long-calls.ll b/llvm/test/CodeGen/Mips/long-calls.ll index 8a95e9b9307..d4652a54635 100644 --- a/llvm/test/CodeGen/Mips/long-calls.ll +++ b/llvm/test/CodeGen/Mips/long-calls.ll @@ -17,7 +17,7 @@ ; RUN: | FileCheck -check-prefix=ON64 %s declare void @callee() -declare void @llvm.memset.p0i8.i32(i8* nocapture writeonly, i8, i32, i32, i1) +declare void @llvm.memset.p0i8.i32(i8* nocapture writeonly, i8, i32, i1) @val = internal unnamed_addr global [20 x i32] zeroinitializer, align 4 @@ -52,6 +52,6 @@ define void @caller() { ; ON64: jalr $25 call void @callee() - call void @llvm.memset.p0i8.i32(i8* bitcast ([20 x i32]* @val to i8*), i8 0, i32 80, i32 4, i1 false) + call void @llvm.memset.p0i8.i32(i8* align 4 bitcast ([20 x i32]* @val to i8*), i8 0, i32 80, i1 false) ret void } diff --git a/llvm/test/CodeGen/Mips/memcpy.ll b/llvm/test/CodeGen/Mips/memcpy.ll index 5c4ebb27dde..0feb1fc5862 100644 --- a/llvm/test/CodeGen/Mips/memcpy.ll +++ b/llvm/test/CodeGen/Mips/memcpy.ll @@ -9,11 +9,11 @@ entry: ; CHECK-NOT: call16(memcpy %arraydecay = getelementptr inbounds %struct.S1, %struct.S1* %s1, i32 0, i32 1, i32 0 - tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %arraydecay, i8* getelementptr inbounds ([31 x i8], [31 x i8]* @.str, i32 0, i32 0), i32 31, i32 1, i1 false) + tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 %arraydecay, i8* align 1 getelementptr inbounds ([31 x i8], [31 x i8]* @.str, i32 0, i32 0), i32 31, i1 false) %arrayidx = getelementptr inbounds %struct.S1, %struct.S1* %s1, i32 0, i32 1, i32 40 store i8 %n, i8* %arrayidx, align 1 ret void } -declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i32, i1) nounwind +declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i1) nounwind diff --git a/llvm/test/CodeGen/Mips/pr33978.ll b/llvm/test/CodeGen/Mips/pr33978.ll index 19fa1715baa..c3d6ee51c6e 100644 --- a/llvm/test/CodeGen/Mips/pr33978.ll +++ b/llvm/test/CodeGen/Mips/pr33978.ll @@ -11,10 +11,10 @@ start: %b = alloca [22 x i8] %c = bitcast [22 x i8]* %a to i8* %d = getelementptr inbounds [22 x i8], [22 x i8]* %b, i32 0, i32 2 - call void @llvm.memcpy.p0i8.p0i8.i32(i8* %c, i8* %d, i32 20, i32 1, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i32(i8* %c, i8* %d, i32 20, i1 false) %e = getelementptr inbounds [22 x i8], [22 x i8]* %b, i32 0, i32 6 - call void @llvm.memcpy.p0i8.p0i8.i32(i8* %0, i8* %e, i32 12, i32 1, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i32(i8* %0, i8* %e, i32 12, i1 false) ret void } -declare void @llvm.memcpy.p0i8.p0i8.i32(i8*, i8*, i32, i32, i1) +declare void @llvm.memcpy.p0i8.p0i8.i32(i8*, i8*, i32, i1) diff --git a/llvm/test/CodeGen/Mips/tailcall/tailcall.ll b/llvm/test/CodeGen/Mips/tailcall/tailcall.ll index 02556fbfd3d..b17f9efd7a0 100644 --- a/llvm/test/CodeGen/Mips/tailcall/tailcall.ll +++ b/llvm/test/CodeGen/Mips/tailcall/tailcall.ll @@ -267,7 +267,7 @@ entry: declare i32 @callee12() -declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i32, i1) nounwind +declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i1) nounwind define i32 @caller12(%struct.S* nocapture byval %a0) nounwind { entry: @@ -283,7 +283,7 @@ entry: ; PIC16: jalrc %0 = bitcast %struct.S* %a0 to i8* - tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* bitcast (%struct.S* @gs1 to i8*), i8* %0, i32 8, i32 4, i1 false) + tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 bitcast (%struct.S* @gs1 to i8*), i8* align 4 %0, i32 8, i1 false) %call = tail call i32 @callee12() nounwind ret i32 %call } diff --git a/llvm/test/CodeGen/NVPTX/lower-aggr-copies.ll b/llvm/test/CodeGen/NVPTX/lower-aggr-copies.ll index 1da1af65947..80f9107472f 100644 --- a/llvm/test/CodeGen/NVPTX/lower-aggr-copies.ll +++ b/llvm/test/CodeGen/NVPTX/lower-aggr-copies.ll @@ -7,13 +7,13 @@ target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" target triple = "nvptx64-unknown-unknown" -declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i32, i1) #1 -declare void @llvm.memmove.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i32, i1) #1 -declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) #1 +declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i1) #1 +declare void @llvm.memmove.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i1) #1 +declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) #1 define i8* @memcpy_caller(i8* %dst, i8* %src, i64 %n) #0 { entry: - tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %dst, i8* %src, i64 %n, i32 1, i1 false) + tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %dst, i8* %src, i64 %n, i1 false) ret i8* %dst ; IR-LABEL: @memcpy_caller @@ -46,7 +46,7 @@ entry: define i8* @memcpy_volatile_caller(i8* %dst, i8* %src, i64 %n) #0 { entry: - tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %dst, i8* %src, i64 %n, i32 1, i1 true) + tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %dst, i8* %src, i64 %n, i1 true) ret i8* %dst ; IR-LABEL: @memcpy_volatile_caller @@ -81,7 +81,7 @@ define i8* @memcpy_casting_caller(i32* %dst, i32* %src, i64 %n) #0 { entry: %0 = bitcast i32* %dst to i8* %1 = bitcast i32* %src to i8* - tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* %1, i64 %n, i32 1, i1 false) + tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* %1, i64 %n, i1 false) ret i8* %0 ; Check that casts in calls to memcpy are handled properly @@ -94,7 +94,7 @@ entry: define i8* @memcpy_known_size(i8* %dst, i8* %src) { entry: - tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %dst, i8* %src, i64 144, i32 1, i1 false) + tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %dst, i8* %src, i64 144, i1 false) ret i8* %dst ; Check that calls with compile-time constant size are handled correctly @@ -115,7 +115,7 @@ entry: define i8* @memset_caller(i8* %dst, i32 %c, i64 %n) #0 { entry: %0 = trunc i32 %c to i8 - tail call void @llvm.memset.p0i8.i64(i8* %dst, i8 %0, i64 %n, i32 1, i1 false) + tail call void @llvm.memset.p0i8.i64(i8* %dst, i8 %0, i64 %n, i1 false) ret i8* %dst ; IR-LABEL: @memset_caller @@ -139,7 +139,7 @@ entry: define i8* @volatile_memset_caller(i8* %dst, i32 %c, i64 %n) #0 { entry: %0 = trunc i32 %c to i8 - tail call void @llvm.memset.p0i8.i64(i8* %dst, i8 %0, i64 %n, i32 1, i1 true) + tail call void @llvm.memset.p0i8.i64(i8* %dst, i8 %0, i64 %n, i1 true) ret i8* %dst ; IR-LABEL: @volatile_memset_caller @@ -151,7 +151,7 @@ entry: define i8* @memmove_caller(i8* %dst, i8* %src, i64 %n) #0 { entry: - tail call void @llvm.memmove.p0i8.p0i8.i64(i8* %dst, i8* %src, i64 %n, i32 1, i1 false) + tail call void @llvm.memmove.p0i8.p0i8.i64(i8* %dst, i8* %src, i64 %n, i1 false) ret i8* %dst ; IR-LABEL: @memmove_caller diff --git a/llvm/test/CodeGen/PowerPC/2011-12-05-NoSpillDupCR.ll b/llvm/test/CodeGen/PowerPC/2011-12-05-NoSpillDupCR.ll index e87fca07410..14a89b03495 100644 --- a/llvm/test/CodeGen/PowerPC/2011-12-05-NoSpillDupCR.ll +++ b/llvm/test/CodeGen/PowerPC/2011-12-05-NoSpillDupCR.ll @@ -179,7 +179,7 @@ for.end.7: ; preds = %entry, %for.end.7 br i1 %exitcond.7, label %for.end12, label %for.end.7 } -declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) nounwind +declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) nounwind declare i32 @puts(i8* nocapture) nounwind diff --git a/llvm/test/CodeGen/PowerPC/2011-12-06-SpillAndRestoreCR.ll b/llvm/test/CodeGen/PowerPC/2011-12-06-SpillAndRestoreCR.ll index ca752f568e0..02da82d4029 100644 --- a/llvm/test/CodeGen/PowerPC/2011-12-06-SpillAndRestoreCR.ll +++ b/llvm/test/CodeGen/PowerPC/2011-12-06-SpillAndRestoreCR.ll @@ -213,7 +213,7 @@ for.end23: ; preds = %for.end17 ret i32 0 } -declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) nounwind +declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) nounwind declare i32 @puts(i8* nocapture) nounwind diff --git a/llvm/test/CodeGen/PowerPC/MMO-flags-assertion.ll b/llvm/test/CodeGen/PowerPC/MMO-flags-assertion.ll index ab9f76f4609..64fa85a8195 100644 --- a/llvm/test/CodeGen/PowerPC/MMO-flags-assertion.ll +++ b/llvm/test/CodeGen/PowerPC/MMO-flags-assertion.ll @@ -4,7 +4,7 @@ ; Assertion `MMO->getFlags() == getFlags() && "Flags mismatch !"' failed. declare void @_Z3fn11F(%class.F* byval align 8) local_unnamed_addr -declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i32, i1) +declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i1) declare signext i32 @_ZN1F11isGlobalRegEv(%class.F*) local_unnamed_addr declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) declare void @_Z10EmitLValuev(%class.F* sret) local_unnamed_addr @@ -28,7 +28,7 @@ entry: call void @_Z10EmitLValuev(%class.F* nonnull sret %XLValue) %1 = bitcast %class.F* %agg.tmp1 to i8* call void @llvm.lifetime.start.p0i8(i64 96, i8* nonnull %1) - call void @llvm.memcpy.p0i8.p0i8.i64(i8* nonnull %1, i8* nonnull %0, i64 96, i32 8, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 nonnull %1, i8* align 8 nonnull %0, i64 96, i1 false) call void @_Z3fn11F(%class.F* byval nonnull align 8 %XLValue) %call.i = call signext i32 @_ZN1F11isGlobalRegEv(%class.F* nonnull %agg.tmp1) call void @llvm.lifetime.end.p0i8(i64 96, i8* nonnull %1) diff --git a/llvm/test/CodeGen/PowerPC/aantidep-inline-asm-use.ll b/llvm/test/CodeGen/PowerPC/aantidep-inline-asm-use.ll index f0c0deacf4d..d31a5553bf9 100644 --- a/llvm/test/CodeGen/PowerPC/aantidep-inline-asm-use.ll +++ b/llvm/test/CodeGen/PowerPC/aantidep-inline-asm-use.ll @@ -112,7 +112,7 @@ _ZN10SubProcess12SafeSyscalls5closeEi.exit22: ; preds = %_ZN10SubProcess12Sa br label %.thread .thread: ; preds = %45, %.thread.outer - call void @llvm.memset.p0i8.i64(i8* undef, i8 0, i64 56, i32 8, i1 false) + call void @llvm.memset.p0i8.i64(i8* align 8 undef, i8 0, i64 56, i1 false) store i8* %21, i8** undef, align 8 store i32 1073741824, i32* undef, align 8 %22 = call { i64, i64, i64, i64, i64, i64, i64 } asm sideeffect "sc\0A\09mfcr $0", "=&{r0},=&{r3},=&{r4},=&{r5},=&{r6},=&{r7},=&{r8},{r0},{r3},{r4},{r5},~{cr0},~{ctr},~{memory},~{r11},~{r12}"(i64 342, i64 80871424, i64 undef, i64 0) #2, !srcloc !1 @@ -296,7 +296,7 @@ _ZN10SubProcess12SafeSyscalls5fcntlEiil.exit: ; preds = %_ZN10SubProcess12Sa } ; Function Attrs: nounwind argmemonly -declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) #1 +declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) #1 attributes #0 = { nounwind "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="pwr8" "target-features"="+altivec,+bpermd,+crypto,+direct-move,+extdiv,+power8-vector,+vsx,-qpx" "unsafe-fp-math"="false" "use-soft-float"="false" } attributes #1 = { nounwind argmemonly } diff --git a/llvm/test/CodeGen/PowerPC/ctrloop-reg.ll b/llvm/test/CodeGen/PowerPC/ctrloop-reg.ll index 477d2aacdf4..7a7d81537bf 100644 --- a/llvm/test/CodeGen/PowerPC/ctrloop-reg.ll +++ b/llvm/test/CodeGen/PowerPC/ctrloop-reg.ll @@ -74,7 +74,7 @@ declare i32 @interp(%struct.ref_s.1.49.91.115.121.139.145.151.157.163.169.175.18 declare i32 @dict_lookup(%struct.ref_s.1.49.91.115.121.139.145.151.157.163.169.175.181.211*, %struct.ref_s.1.49.91.115.121.139.145.151.157.163.169.175.181.211*, %struct.ref_s.1.49.91.115.121.139.145.151.157.163.169.175.181.211*, %struct.ref_s.1.49.91.115.121.139.145.151.157.163.169.175.181.211**) -declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) nounwind +declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) nounwind declare i32 @obj_compare(...) diff --git a/llvm/test/CodeGen/PowerPC/emptystruct.ll b/llvm/test/CodeGen/PowerPC/emptystruct.ll index bd8a974ab12..b0e41ec29e4 100644 --- a/llvm/test/CodeGen/PowerPC/emptystruct.ll +++ b/llvm/test/CodeGen/PowerPC/emptystruct.ll @@ -21,7 +21,7 @@ entry: %0 = load %struct.empty*, %struct.empty** %a2.addr, align 8 %1 = bitcast %struct.empty* %agg.result to i8* %2 = bitcast %struct.empty* %0 to i8* - call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* %2, i64 0, i32 1, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* %2, i64 0, i1 false) ret void } @@ -31,7 +31,7 @@ entry: ; CHECK-NOT: std 6, ; CHECK: blr -declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) nounwind +declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) nounwind define void @caller(%struct.empty* noalias sret %agg.result) nounwind { entry: diff --git a/llvm/test/CodeGen/PowerPC/fsl-e500mc.ll b/llvm/test/CodeGen/PowerPC/fsl-e500mc.ll index fe3e19b72d1..b1bb09da3e1 100644 --- a/llvm/test/CodeGen/PowerPC/fsl-e500mc.ll +++ b/llvm/test/CodeGen/PowerPC/fsl-e500mc.ll @@ -15,8 +15,8 @@ entry: ; CHECK-NOT: bl memcpy %0 = bitcast %struct.teststruct* %agg.result to i8* %1 = bitcast %struct.teststruct* %in to i8* - tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %0, i8* %1, i32 52, i32 4, i1 false) + tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %0, i8* align 4 %1, i32 52, i1 false) ret void } -declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i32, i1) nounwind +declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i1) nounwind diff --git a/llvm/test/CodeGen/PowerPC/fsl-e5500.ll b/llvm/test/CodeGen/PowerPC/fsl-e5500.ll index dae47fb037f..595d91ad620 100644 --- a/llvm/test/CodeGen/PowerPC/fsl-e5500.ll +++ b/llvm/test/CodeGen/PowerPC/fsl-e5500.ll @@ -15,8 +15,8 @@ entry: ; CHECK-NOT: bl memcpy %0 = bitcast %struct.teststruct* %agg.result to i8* %1 = bitcast %struct.teststruct* %in to i8* - tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* %1, i64 100, i32 4, i1 false) + tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %0, i8* align 4 %1, i64 100, i1 false) ret void } -declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) nounwind +declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) nounwind diff --git a/llvm/test/CodeGen/PowerPC/glob-comp-aa-crash.ll b/llvm/test/CodeGen/PowerPC/glob-comp-aa-crash.ll index 51275f3cdae..9f0e706da32 100644 --- a/llvm/test/CodeGen/PowerPC/glob-comp-aa-crash.ll +++ b/llvm/test/CodeGen/PowerPC/glob-comp-aa-crash.ll @@ -35,7 +35,7 @@ entry: invoke.cont: ; preds = %entry %__exception_ = getelementptr inbounds %"class.std::__1::__assoc_sub_state", %"class.std::__1::__assoc_sub_state"* %this, i64 0, i32 1 %0 = bitcast { i64, i64 }* %tmp to i8* - call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 16, i32 8, i1 false) + call void @llvm.memset.p0i8.i64(i8* align 8 %0, i8 0, i64 16, i1 false) call void @_ZNSt15__exception_ptr13exception_ptrC1EMS0_FvvE(%"class.std::__exception_ptr::exception_ptr"* %ref.tmp, { i64, i64 }* byval %tmp) #5 %call = call zeroext i1 @_ZNSt15__exception_ptrneERKNS_13exception_ptrES2_(%"class.std::__exception_ptr::exception_ptr"* %__exception_, %"class.std::__exception_ptr::exception_ptr"* %ref.tmp) #5 call void @_ZNSt15__exception_ptr13exception_ptrD1Ev(%"class.std::__exception_ptr::exception_ptr"* %ref.tmp) #5 @@ -120,7 +120,7 @@ declare void @_ZNSt3__15mutex6unlockEv(%"class.std::__1::mutex"*) #1 declare void @_ZNSt3__15mutex4lockEv(%"class.std::__1::mutex"*) #0 ; Function Attrs: nounwind -declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) #3 +declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) #3 attributes #0 = { optsize "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" } attributes #1 = { nounwind optsize "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" } diff --git a/llvm/test/CodeGen/PowerPC/isel-rc-nox0.ll b/llvm/test/CodeGen/PowerPC/isel-rc-nox0.ll index e3479f8586f..582778f01dd 100644 --- a/llvm/test/CodeGen/PowerPC/isel-rc-nox0.ll +++ b/llvm/test/CodeGen/PowerPC/isel-rc-nox0.ll @@ -21,7 +21,7 @@ crc32_gentab.exit: ; preds = %for.cond1.preheader br label %for.cond1.preheader.i2961.i for.cond1.preheader.i2961.i: ; preds = %for.inc44.i2977.i, %crc32_gentab.exit - call void @llvm.memset.p0i8.i64(i8* bitcast ([1 x [9 x i32]]* @g_62 to i8*), i8 -1, i64 36, i32 4, i1 false) #1 + call void @llvm.memset.p0i8.i64(i8* align 4 bitcast ([1 x [9 x i32]]* @g_62 to i8*), i8 -1, i64 36, i1 false) #1 %0 = load i32, i32* %retval.0.i.i.i, align 4 %tobool.i2967.i = icmp eq i32 %0, 0 br label %for.body21.i2968.i @@ -40,7 +40,7 @@ func_80.exit2978.i: ; preds = %for.inc44.i2977.i } ; Function Attrs: nounwind -declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) #1 +declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) #1 attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "ssp-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" } attributes #1 = { nounwind } diff --git a/llvm/test/CodeGen/PowerPC/licm-remat.ll b/llvm/test/CodeGen/PowerPC/licm-remat.ll index f9c14052452..e72a8b0cd3e 100644 --- a/llvm/test/CodeGen/PowerPC/licm-remat.ll +++ b/llvm/test/CodeGen/PowerPC/licm-remat.ll @@ -13,9 +13,9 @@ @_ZN6snappy8internalL8wordmaskE = internal unnamed_addr constant [5 x i32] [i32 0, i32 255, i32 65535, i32 16777215, i32 -1], align 4 ; Function Attrs: argmemonly nounwind -declare void @llvm.memmove.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i32, i1) #2 +declare void @llvm.memmove.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i1) #2 ; Function Attrs: argmemonly nounwind -declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i32, i1) #2 +declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i1) #2 define linkonce_odr void @ZN6snappyDecompressor_(%"class.snappy::SnappyDecompressor"* %this, %"class.snappy::SnappyIOVecWriter"* %writer) { ; CHECK-LABEL: ZN6snappyDecompressor_: @@ -126,7 +126,7 @@ if.end18.i207: ; preds = %if.then10.i193, %co %iov_base.i.i200 = getelementptr inbounds %"struct.snappy::iovec", %"struct.snappy::iovec"* %12, i64 %17, i32 0 %18 = load i8*, i8** %iov_base.i.i200, align 8 %add.ptr.i.i201 = getelementptr inbounds i8, i8* %18, i64 %15 - call void @llvm.memcpy.p0i8.p0i8.i64(i8* %add.ptr.i.i201, i8* %add.ptr24, i64 %.sroa.speculated.i199, i32 1, i1 false) #12 + call void @llvm.memcpy.p0i8.p0i8.i64(i8* %add.ptr.i.i201, i8* %add.ptr24, i64 %.sroa.speculated.i199, i1 false) #12 %add30.i203 = add i64 0, %.sroa.speculated.i199 store i64 %add30.i203, i64* null, align 8 %.pre245 = load i64, i64* %0, align 8 @@ -164,7 +164,7 @@ cleanup102: ; preds = %land.lhs.true5.i %iov_base.i.i = getelementptr inbounds %"struct.snappy::iovec", %"struct.snappy::iovec"* %7, i64 %8, i32 0 %23 = load i8*, i8** %iov_base.i.i, align 8 %add.ptr.i.i = getelementptr inbounds i8, i8* %23, i64 %9 - call void @llvm.memmove.p0i8.p0i8.i64(i8* %add.ptr.i.i, i8* %incdec.ptr, i64 16, i32 1, i1 false) #12 + call void @llvm.memmove.p0i8.p0i8.i64(i8* %add.ptr.i.i, i8* %incdec.ptr, i64 16, i1 false) #12 %24 = load <2 x i64>, <2 x i64>* %1, align 8 %25 = insertelement <2 x i64> undef, i64 %conv9, i32 0 %26 = shufflevector <2 x i64> %25, <2 x i64> undef, <2 x i32> zeroinitializer diff --git a/llvm/test/CodeGen/PowerPC/lxv-aligned-stack-slots.ll b/llvm/test/CodeGen/PowerPC/lxv-aligned-stack-slots.ll index 9c5432578b4..e8b65a38075 100644 --- a/llvm/test/CodeGen/PowerPC/lxv-aligned-stack-slots.ll +++ b/llvm/test/CodeGen/PowerPC/lxv-aligned-stack-slots.ll @@ -24,7 +24,7 @@ define void @unaligned_slot() #0 { %1 = alloca %class2, align 8 %2 = getelementptr inbounds %class2, %class2* %1, i64 0, i32 0, i32 0, i32 2 %3 = bitcast %union.anon* %2 to i8* - call void @llvm.memcpy.p0i8.p0i8.i64(i8* nonnull getelementptr inbounds (%class1, %class1* @ext, i64 0, i32 0, i32 1, i64 8), i8* nonnull %3, i64 16, i32 8, i1 false) #2 + call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 nonnull getelementptr inbounds (%class1, %class1* @ext, i64 0, i32 0, i32 1, i64 8), i8* align 8 nonnull %3, i64 16, i1 false) #2 ret void } ; CHECK-LABEL: aligned_slot: @@ -34,12 +34,12 @@ define void @aligned_slot() #0 { %1 = alloca %class2, align 16 %2 = getelementptr inbounds %class2, %class2* %1, i64 0, i32 0, i32 0, i32 2 %3 = bitcast %union.anon* %2 to i8* - call void @llvm.memcpy.p0i8.p0i8.i64(i8* nonnull getelementptr inbounds (%class1, %class1* @ext, i64 0, i32 0, i32 1, i64 8), i8* nonnull %3, i64 16, i32 8, i1 false) #2 + call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 nonnull getelementptr inbounds (%class1, %class1* @ext, i64 0, i32 0, i32 1, i64 8), i8* align 8 nonnull %3, i64 16, i1 false) #2 ret void } ; Function Attrs: argmemonly nounwind -declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i32, i1) #1 +declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i1) #1 attributes #0 = { nounwind "target-cpu"="pwr9" "target-features"="+altivec,+bpermd,+crypto,+direct-move,+extdiv,+htm,+power8-vector,+power9-vector,+vsx,-qpx" "unsafe-fp-math"="false" "use-soft-float"="false" } attributes #1 = { argmemonly nounwind } diff --git a/llvm/test/CodeGen/PowerPC/memcpy-vec.ll b/llvm/test/CodeGen/PowerPC/memcpy-vec.ll index 3046b26e76c..d97d604128e 100644 --- a/llvm/test/CodeGen/PowerPC/memcpy-vec.ll +++ b/llvm/test/CodeGen/PowerPC/memcpy-vec.ll @@ -9,7 +9,7 @@ define void @foo1(double* nocapture %x, double* nocapture readonly %y) #0 { entry: %0 = bitcast double* %x to i8* %1 = bitcast double* %y to i8* - tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* %1, i64 32, i32 8, i1 false) + tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 %0, i8* align 8 %1, i64 32, i1 false) ret void ; PWR7-LABEL: @foo1 @@ -34,14 +34,14 @@ entry: } ; Function Attrs: nounwind -declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i32, i1) #0 +declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i1) #0 ; Function Attrs: nounwind define void @foo2(double* nocapture %x, double* nocapture readonly %y) #0 { entry: %0 = bitcast double* %x to i8* %1 = bitcast double* %y to i8* - tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* %1, i64 128, i32 8, i1 false) + tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 %0, i8* align 8 %1, i64 128, i1 false) ret void ; PWR7-LABEL: @foo2 @@ -64,7 +64,7 @@ entry: define void @bar1(double* nocapture %x) #0 { entry: %0 = bitcast double* %x to i8* - tail call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 128, i32 8, i1 false) + tail call void @llvm.memset.p0i8.i64(i8* align 8 %0, i8 0, i64 128, i1 false) ret void ; PWR7-LABEL: @bar1 @@ -87,7 +87,7 @@ entry: define void @bar2(double* nocapture %x) #0 { entry: %0 = bitcast double* %x to i8* - tail call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 128, i32 32, i1 false) + tail call void @llvm.memset.p0i8.i64(i8* align 32 %0, i8 0, i64 128, i1 false) ret void ; PWR7-LABEL: @bar2 @@ -107,7 +107,7 @@ entry: } ; Function Attrs: nounwind -declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) #0 +declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) #0 attributes #0 = { nounwind } diff --git a/llvm/test/CodeGen/PowerPC/memcpy_dereferenceable.ll b/llvm/test/CodeGen/PowerPC/memcpy_dereferenceable.ll index ed821849f09..36d37f294ee 100644 --- a/llvm/test/CodeGen/PowerPC/memcpy_dereferenceable.ll +++ b/llvm/test/CodeGen/PowerPC/memcpy_dereferenceable.ll @@ -22,7 +22,7 @@ entry: end: ; copy third element into first element by memcpy - call void @llvm.memcpy.p0i8.p0i8.i64(i8* nonnull %dst, i8* %src, i64 16, i32 8, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 nonnull %dst, i8* align 8 %src, i64 16, i1 false) ; copy third element into second element by LD/ST %vec2 = load <2 x i64>, <2 x i64>* %pvec2, align 8 store <2 x i64> %vec2, <2 x i64>* %pvec1, align 8 @@ -30,7 +30,7 @@ end: dummy: ; to make use of %src in another BB - call void @llvm.memcpy.p0i8.p0i8.i64(i8* %src, i8* %src, i64 0, i32 0, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* %src, i8* %src, i64 0, i1 false) br label %end } @@ -55,7 +55,7 @@ entry: end: ; copy third element into first element by memcpy - call void @llvm.memmove.p0i8.p0i8.i64(i8* nonnull %dst, i8* %src, i64 16, i32 8, i1 false) + call void @llvm.memmove.p0i8.p0i8.i64(i8* align 8 nonnull %dst, i8* align 8 %src, i64 16, i1 false) ; copy third element into second element by LD/ST %vec2 = load <2 x i64>, <2 x i64>* %pvec2, align 8 store <2 x i64> %vec2, <2 x i64>* %pvec1, align 8 @@ -63,12 +63,12 @@ end: dummy: ; to make use of %src in another BB - call void @llvm.memcpy.p0i8.p0i8.i64(i8* %src, i8* %src, i64 0, i32 0, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* %src, i8* %src, i64 0, i1 false) br label %end } ; Function Attrs: argmemonly nounwind -declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i32, i1) #1 -declare void @llvm.memmove.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i32, i1) #1 +declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i1) #1 +declare void @llvm.memmove.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i1) #1 attributes #1 = { argmemonly nounwind } diff --git a/llvm/test/CodeGen/PowerPC/memset-nc-le.ll b/llvm/test/CodeGen/PowerPC/memset-nc-le.ll index cd6253f50e5..2924ff3d44b 100644 --- a/llvm/test/CodeGen/PowerPC/memset-nc-le.ll +++ b/llvm/test/CodeGen/PowerPC/memset-nc-le.ll @@ -7,7 +7,7 @@ define void @test_vsx() unnamed_addr #0 align 2 { entry: %0 = load i32, i32* undef, align 4 %1 = trunc i32 %0 to i8 - call void @llvm.memset.p0i8.i64(i8* null, i8 %1, i64 32, i32 1, i1 false) + call void @llvm.memset.p0i8.i64(i8* null, i8 %1, i64 32, i1 false) ret void ; CHECK-LABEL: @test_vsx @@ -17,7 +17,7 @@ entry: } ; Function Attrs: nounwind -declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) #1 +declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) #1 attributes #0 = { nounwind "target-cpu"="pwr8" } attributes #1 = { nounwind } diff --git a/llvm/test/CodeGen/PowerPC/memset-nc.ll b/llvm/test/CodeGen/PowerPC/memset-nc.ll index fd4327ef4d6..663d0cb1d67 100644 --- a/llvm/test/CodeGen/PowerPC/memset-nc.ll +++ b/llvm/test/CodeGen/PowerPC/memset-nc.ll @@ -8,7 +8,7 @@ define void @test_qpx() unnamed_addr #0 align 2 { entry: %0 = load i32, i32* undef, align 4 %1 = trunc i32 %0 to i8 - call void @llvm.memset.p0i8.i64(i8* null, i8 %1, i64 64, i32 32, i1 false) + call void @llvm.memset.p0i8.i64(i8* align 32 null, i8 %1, i64 64, i1 false) ret void ; CHECK-LABEL: @test_qpx @@ -22,14 +22,14 @@ entry: } ; Function Attrs: nounwind -declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) #1 +declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) #1 ; Function Attrs: nounwind define void @test_vsx() unnamed_addr #2 align 2 { entry: %0 = load i32, i32* undef, align 4 %1 = trunc i32 %0 to i8 - call void @llvm.memset.p0i8.i64(i8* null, i8 %1, i64 32, i32 1, i1 false) + call void @llvm.memset.p0i8.i64(i8* null, i8 %1, i64 32, i1 false) ret void ; CHECK-LABEL: @test_vsx diff --git a/llvm/test/CodeGen/PowerPC/merge-st-chain-op.ll b/llvm/test/CodeGen/PowerPC/merge-st-chain-op.ll index bfb911c0115..4d5b9170ece 100644 --- a/llvm/test/CodeGen/PowerPC/merge-st-chain-op.ll +++ b/llvm/test/CodeGen/PowerPC/merge-st-chain-op.ll @@ -17,7 +17,7 @@ _ZN4llvm18IntrusiveRefCntPtrIN5clang13DiagnosticIDsEEC2EPS2_.exit: ; preds = %en store <2 x i8*> <i8* bitcast (i64* getelementptr inbounds ([0 x i64], [0 x i64]* @_ZNSs4_Rep20_S_empty_rep_storageE, i64 0, i64 3) to i8*), i8* bitcast (i64* getelementptr inbounds ([0 x i64], [0 x i64]* @_ZNSs4_Rep20_S_empty_rep_storageE, i64 0, i64 3) to i8*)>, <2 x i8*>* undef, align 8 %IgnoreWarnings.i = getelementptr inbounds i8, i8* %call2, i64 4 %0 = bitcast i8* %IgnoreWarnings.i to i32* - call void @llvm.memset.p0i8.i64(i8* null, i8 0, i64 48, i32 8, i1 false) #4 + call void @llvm.memset.p0i8.i64(i8* align 8 null, i8 0, i64 48, i1 false) #4 store i32 251658240, i32* %0, align 4 store i256 37662610426935100959726589394453639584271499769928088551424, i256* null, align 8 store i32 1, i32* %ref_cnt.i.i, align 4 @@ -31,7 +31,7 @@ return: ; preds = %entry declare noalias i8* @_Znwm() #1 ; Function Attrs: nounwind argmemonly -declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) #2 +declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) #2 attributes #0 = { nounwind "target-cpu"="pwr7" } attributes #1 = { nobuiltin "target-cpu"="pwr7" } diff --git a/llvm/test/CodeGen/PowerPC/ppc-empty-fs.ll b/llvm/test/CodeGen/PowerPC/ppc-empty-fs.ll index c89ca2ea2ab..8b600893225 100644 --- a/llvm/test/CodeGen/PowerPC/ppc-empty-fs.ll +++ b/llvm/test/CodeGen/PowerPC/ppc-empty-fs.ll @@ -15,14 +15,14 @@ entry: store i64 %x.coerce, i64* %0, align 1 %1 = bitcast %struct.fab* %agg.result to i8* %2 = bitcast %struct.fab* %x to i8* - call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* %2, i64 8, i32 4, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %1, i8* align 4 %2, i64 8, i1 false) ret void } ; CHECK: func_fab ; Function Attrs: nounwind -declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i32, i1) #1 +declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i1) #1 attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-realign-stack" "stack-protector-buffer-size"="8" "target-features"="" "unsafe-fp-math"="false" "use-soft-float"="false" } attributes #1 = { nounwind } diff --git a/llvm/test/CodeGen/PowerPC/pr27350.ll b/llvm/test/CodeGen/PowerPC/pr27350.ll index f6ad38e2f29..7dbd5110700 100644 --- a/llvm/test/CodeGen/PowerPC/pr27350.ll +++ b/llvm/test/CodeGen/PowerPC/pr27350.ll @@ -1,12 +1,12 @@ ; RUN: llc -verify-machineinstrs -mcpu=ppc64le -mtriple=powerpc64le-unknown-linux-gnu < %s ; Function Attrs: argmemonly nounwind -declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i32, i1) #0 +declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i1) #0 ; Function Attrs: nounwind define internal fastcc void @foo() unnamed_addr #1 align 2 { entry: - call void @llvm.memcpy.p0i8.p0i8.i64(i8* undef, i8* null, i64 16, i32 8, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 undef, i8* align 8 null, i64 16, i1 false) %0 = load <2 x i64>, <2 x i64>* null, align 8 %1 = extractelement <2 x i64> %0, i32 1 %.fca.1.insert159.i = insertvalue [2 x i64] undef, i64 %1, 1 diff --git a/llvm/test/CodeGen/PowerPC/resolvefi-basereg.ll b/llvm/test/CodeGen/PowerPC/resolvefi-basereg.ll index 9e83f0979fe..731f37d2770 100644 --- a/llvm/test/CodeGen/PowerPC/resolvefi-basereg.ll +++ b/llvm/test/CodeGen/PowerPC/resolvefi-basereg.ll @@ -32,9 +32,9 @@ entry: %agg.tmp117 = alloca %struct.S1998, align 16 %agg.tmp118 = alloca %struct.S1998, align 16 %agg.tmp119 = alloca %struct.S1998, align 16 - call void @llvm.memset.p0i8.i64(i8* bitcast (%struct.S1998* @s1998 to i8*), i8 0, i64 5168, i32 16, i1 false) - call void @llvm.memset.p0i8.i64(i8* bitcast ([5 x %struct.S1998]* @a1998 to i8*), i8 0, i64 25840, i32 16, i1 false) - call void @llvm.memset.p0i8.i64(i8* bitcast (%struct.Info* @info to i8*), i8 0, i64 832, i32 8, i1 false) + call void @llvm.memset.p0i8.i64(i8* align 16 bitcast (%struct.S1998* @s1998 to i8*), i8 0, i64 5168, i1 false) + call void @llvm.memset.p0i8.i64(i8* align 16 bitcast ([5 x %struct.S1998]* @a1998 to i8*), i8 0, i64 25840, i1 false) + call void @llvm.memset.p0i8.i64(i8* align 8 bitcast (%struct.Info* @info to i8*), i8 0, i64 832, i1 false) store i8* bitcast (%struct.S1998* @s1998 to i8*), i8** getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 2), align 8 store i8* bitcast ([5 x %struct.S1998]* @a1998 to i8*), i8** getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 3), align 8 store i8* bitcast (%struct.S1998* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 3) to i8*), i8** getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 4), align 8 @@ -329,32 +329,32 @@ if.end: ; preds = %if.then, %entry %61 = load i32, i32* %j, align 4 store i32 %61, i32* getelementptr inbounds (%struct.Info, %struct.Info* @info, i32 0, i32 1), align 4 %62 = bitcast %struct.S1998* %agg.tmp111 to i8* - call void @llvm.memcpy.p0i8.p0i8.i64(i8* %62, i8* bitcast (%struct.S1998* @s1998 to i8*), i64 5168, i32 16, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 %62, i8* align 16 bitcast (%struct.S1998* @s1998 to i8*), i64 5168, i1 false) %63 = bitcast %struct.S1998* %agg.tmp112 to i8* - call void @llvm.memcpy.p0i8.p0i8.i64(i8* %63, i8* bitcast (%struct.S1998* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2) to i8*), i64 5168, i32 16, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 %63, i8* align 16 bitcast (%struct.S1998* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2) to i8*), i64 5168, i1 false) call void @check1998(%struct.S1998* sret %agg.tmp, %struct.S1998* byval align 16 %agg.tmp111, %struct.S1998* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 1), %struct.S1998* byval align 16 %agg.tmp112) call void @checkx1998(%struct.S1998* byval align 16 %agg.tmp) %64 = bitcast %struct.S1998* %agg.tmp113 to i8* - call void @llvm.memcpy.p0i8.p0i8.i64(i8* %64, i8* bitcast (%struct.S1998* @s1998 to i8*), i64 5168, i32 16, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 %64, i8* align 16 bitcast (%struct.S1998* @s1998 to i8*), i64 5168, i1 false) %65 = bitcast %struct.S1998* %agg.tmp114 to i8* - call void @llvm.memcpy.p0i8.p0i8.i64(i8* %65, i8* bitcast (%struct.S1998* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2) to i8*), i64 5168, i32 16, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 %65, i8* align 16 bitcast (%struct.S1998* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2) to i8*), i64 5168, i1 false) %66 = bitcast %struct.S1998* %agg.tmp115 to i8* - call void @llvm.memcpy.p0i8.p0i8.i64(i8* %66, i8* bitcast (%struct.S1998* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2) to i8*), i64 5168, i32 16, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 %66, i8* align 16 bitcast (%struct.S1998* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2) to i8*), i64 5168, i1 false) call void (i32, ...) @check1998va(i32 signext 1, double 1.000000e+00, %struct.S1998* byval align 16 %agg.tmp113, i64 2, %struct.S1998* byval align 16 %agg.tmp114, %struct.S1998* byval align 16 %agg.tmp115) %67 = bitcast %struct.S1998* %agg.tmp116 to i8* - call void @llvm.memcpy.p0i8.p0i8.i64(i8* %67, i8* bitcast (%struct.S1998* @s1998 to i8*), i64 5168, i32 16, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 %67, i8* align 16 bitcast (%struct.S1998* @s1998 to i8*), i64 5168, i1 false) %68 = bitcast %struct.S1998* %agg.tmp117 to i8* - call void @llvm.memcpy.p0i8.p0i8.i64(i8* %68, i8* bitcast (%struct.S1998* @s1998 to i8*), i64 5168, i32 16, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 %68, i8* align 16 bitcast (%struct.S1998* @s1998 to i8*), i64 5168, i1 false) %69 = bitcast %struct.S1998* %agg.tmp118 to i8* - call void @llvm.memcpy.p0i8.p0i8.i64(i8* %69, i8* bitcast (%struct.S1998* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2) to i8*), i64 5168, i32 16, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 %69, i8* align 16 bitcast (%struct.S1998* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2) to i8*), i64 5168, i1 false) %70 = bitcast %struct.S1998* %agg.tmp119 to i8* - call void @llvm.memcpy.p0i8.p0i8.i64(i8* %70, i8* bitcast (%struct.S1998* @s1998 to i8*), i64 5168, i32 16, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 %70, i8* align 16 bitcast (%struct.S1998* @s1998 to i8*), i64 5168, i1 false) call void (i32, ...) @check1998va(i32 signext 2, %struct.S1998* byval align 16 %agg.tmp116, %struct.S1998* byval align 16 %agg.tmp117, ppc_fp128 0xM40000000000000000000000000000000, %struct.S1998* byval align 16 %agg.tmp118, %struct.S1998* byval align 16 %agg.tmp119) ret void } -declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) -declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i32, i1) +declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) +declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i1) declare void @check1998(%struct.S1998* sret, %struct.S1998* byval align 16, %struct.S1998*, %struct.S1998* byval align 16) declare void @check1998va(i32 signext, ...) diff --git a/llvm/test/CodeGen/PowerPC/resolvefi-disp.ll b/llvm/test/CodeGen/PowerPC/resolvefi-disp.ll index 72755df0eb9..68a31278e77 100644 --- a/llvm/test/CodeGen/PowerPC/resolvefi-disp.ll +++ b/llvm/test/CodeGen/PowerPC/resolvefi-disp.ll @@ -30,17 +30,17 @@ entry: %b2 = alloca %struct.S2760, align 32 %2 = bitcast %struct.S2760* %arg0 to i8* %3 = bitcast %struct.S2760* %0 to i8* - call void @llvm.memcpy.p0i8.p0i8.i64(i8* %2, i8* %3, i64 11104, i32 16, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 %2, i8* align 16 %3, i64 11104, i1 false) %4 = bitcast %struct.S2760* %arg2 to i8* %5 = bitcast %struct.S2760* %1 to i8* - call void @llvm.memcpy.p0i8.p0i8.i64(i8* %4, i8* %5, i64 11104, i32 16, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 %4, i8* align 16 %5, i64 11104, i1 false) store %struct.S2760* %arg1, %struct.S2760** %arg1.addr, align 8 %6 = bitcast %struct.S2760* %ret to i8* - call void @llvm.memset.p0i8.i64(i8* %6, i8 0, i64 11104, i32 32, i1 false) + call void @llvm.memset.p0i8.i64(i8* align 32 %6, i8 0, i64 11104, i1 false) %7 = bitcast %struct.S2760* %b1 to i8* - call void @llvm.memset.p0i8.i64(i8* %7, i8 0, i64 11104, i32 32, i1 false) + call void @llvm.memset.p0i8.i64(i8* align 32 %7, i8 0, i64 11104, i1 false) %8 = bitcast %struct.S2760* %b2 to i8* - call void @llvm.memset.p0i8.i64(i8* %8, i8 0, i64 11104, i32 32, i1 false) + call void @llvm.memset.p0i8.i64(i8* align 32 %8, i8 0, i64 11104, i1 false) %b = getelementptr inbounds %struct.S2760, %struct.S2760* %arg0, i32 0, i32 1 %g = getelementptr inbounds %struct.anon, %struct.anon* %b, i32 0, i32 1 %9 = load i64, i64* %g, align 8 @@ -61,11 +61,11 @@ if.end: ; preds = %if.then, %entry store i64 %12, i64* %g4, align 8 %13 = bitcast %struct.S2760* %agg.result to i8* %14 = bitcast %struct.S2760* %ret to i8* - call void @llvm.memcpy.p0i8.p0i8.i64(i8* %13, i8* %14, i64 11104, i32 32, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 32 %13, i8* align 32 %14, i64 11104, i1 false) ret void } -declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i32, i1) +declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i1) -declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) +declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) diff --git a/llvm/test/CodeGen/PowerPC/structsinmem.ll b/llvm/test/CodeGen/PowerPC/structsinmem.ll index 01b0848e707..c8ea3be7cce 100644 --- a/llvm/test/CodeGen/PowerPC/structsinmem.ll +++ b/llvm/test/CodeGen/PowerPC/structsinmem.ll @@ -43,19 +43,19 @@ entry: %p6 = alloca %struct.s6, align 4 %p7 = alloca %struct.s7, align 4 %0 = bitcast %struct.s1* %p1 to i8* - call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* getelementptr inbounds (%struct.s1, %struct.s1* @caller1.p1, i32 0, i32 0), i64 1, i32 1, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* getelementptr inbounds (%struct.s1, %struct.s1* @caller1.p1, i32 0, i32 0), i64 1, i1 false) %1 = bitcast %struct.s2* %p2 to i8* - call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* bitcast (%struct.s2* @caller1.p2 to i8*), i64 2, i32 2, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 2 %1, i8* align 2 bitcast (%struct.s2* @caller1.p2 to i8*), i64 2, i1 false) %2 = bitcast %struct.s3* %p3 to i8* - call void @llvm.memcpy.p0i8.p0i8.i64(i8* %2, i8* bitcast ({ i16, i8, i8 }* @caller1.p3 to i8*), i64 4, i32 2, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 2 %2, i8* align 2 bitcast ({ i16, i8, i8 }* @caller1.p3 to i8*), i64 4, i1 false) %3 = bitcast %struct.s4* %p4 to i8* - call void @llvm.memcpy.p0i8.p0i8.i64(i8* %3, i8* bitcast (%struct.s4* @caller1.p4 to i8*), i64 4, i32 4, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %3, i8* align 4 bitcast (%struct.s4* @caller1.p4 to i8*), i64 4, i1 false) %4 = bitcast %struct.s5* %p5 to i8* - call void @llvm.memcpy.p0i8.p0i8.i64(i8* %4, i8* bitcast ({ i32, i8, [3 x i8] }* @caller1.p5 to i8*), i64 8, i32 4, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %4, i8* align 4 bitcast ({ i32, i8, [3 x i8] }* @caller1.p5 to i8*), i64 8, i1 false) %5 = bitcast %struct.s6* %p6 to i8* - call void @llvm.memcpy.p0i8.p0i8.i64(i8* %5, i8* bitcast ({ i32, i16, [2 x i8] }* @caller1.p6 to i8*), i64 8, i32 4, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %5, i8* align 4 bitcast ({ i32, i16, [2 x i8] }* @caller1.p6 to i8*), i64 8, i1 false) %6 = bitcast %struct.s7* %p7 to i8* - call void @llvm.memcpy.p0i8.p0i8.i64(i8* %6, i8* bitcast ({ i32, i16, i8, i8 }* @caller1.p7 to i8*), i64 8, i32 4, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %6, i8* align 4 bitcast ({ i32, i16, i8, i8 }* @caller1.p7 to i8*), i64 8, i1 false) %call = call i32 @callee1(i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, %struct.s1* byval %p1, %struct.s2* byval %p2, %struct.s3* byval %p3, %struct.s4* byval %p4, %struct.s5* byval %p5, %struct.s6* byval %p6, %struct.s7* byval %p7) ret i32 %call @@ -68,7 +68,7 @@ entry: ; CHECK: std {{[0-9]+}}, 160(1) } -declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) nounwind +declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) nounwind define internal i32 @callee1(i32 %z1, i32 %z2, i32 %z3, i32 %z4, i32 %z5, i32 %z6, i32 %z7, i32 %z8, %struct.s1* byval %v1, %struct.s2* byval %v2, %struct.s3* byval %v3, %struct.s4* byval %v4, %struct.s5* byval %v5, %struct.s6* byval %v6, %struct.s7* byval %v7) nounwind { entry: @@ -132,19 +132,19 @@ entry: %p6 = alloca %struct.t6, align 1 %p7 = alloca %struct.t7, align 1 %0 = bitcast %struct.t1* %p1 to i8* - call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* getelementptr inbounds (%struct.t1, %struct.t1* @caller2.p1, i32 0, i32 0), i64 1, i32 1, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* getelementptr inbounds (%struct.t1, %struct.t1* @caller2.p1, i32 0, i32 0), i64 1, i1 false) %1 = bitcast %struct.t2* %p2 to i8* - call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* bitcast ({ i16 }* @caller2.p2 to i8*), i64 2, i32 1, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* bitcast ({ i16 }* @caller2.p2 to i8*), i64 2, i1 false) %2 = bitcast %struct.t3* %p3 to i8* - call void @llvm.memcpy.p0i8.p0i8.i64(i8* %2, i8* bitcast (%struct.t3* @caller2.p3 to i8*), i64 3, i32 1, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* %2, i8* bitcast (%struct.t3* @caller2.p3 to i8*), i64 3, i1 false) %3 = bitcast %struct.t4* %p4 to i8* - call void @llvm.memcpy.p0i8.p0i8.i64(i8* %3, i8* bitcast ({ i32 }* @caller2.p4 to i8*), i64 4, i32 1, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* %3, i8* bitcast ({ i32 }* @caller2.p4 to i8*), i64 4, i1 false) %4 = bitcast %struct.t5* %p5 to i8* - call void @llvm.memcpy.p0i8.p0i8.i64(i8* %4, i8* bitcast (%struct.t5* @caller2.p5 to i8*), i64 5, i32 1, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* %4, i8* bitcast (%struct.t5* @caller2.p5 to i8*), i64 5, i1 false) %5 = bitcast %struct.t6* %p6 to i8* - call void @llvm.memcpy.p0i8.p0i8.i64(i8* %5, i8* bitcast (%struct.t6* @caller2.p6 to i8*), i64 6, i32 1, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* %5, i8* bitcast (%struct.t6* @caller2.p6 to i8*), i64 6, i1 false) %6 = bitcast %struct.t7* %p7 to i8* - call void @llvm.memcpy.p0i8.p0i8.i64(i8* %6, i8* bitcast (%struct.t7* @caller2.p7 to i8*), i64 7, i32 1, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* %6, i8* bitcast (%struct.t7* @caller2.p7 to i8*), i64 7, i1 false) %call = call i32 @callee2(i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, %struct.t1* byval %p1, %struct.t2* byval %p2, %struct.t3* byval %p3, %struct.t4* byval %p4, %struct.t5* byval %p5, %struct.t6* byval %p6, %struct.t7* byval %p7) ret i32 %call diff --git a/llvm/test/CodeGen/PowerPC/structsinregs.ll b/llvm/test/CodeGen/PowerPC/structsinregs.ll index 54679f259e9..d8afc8f8559 100644 --- a/llvm/test/CodeGen/PowerPC/structsinregs.ll +++ b/llvm/test/CodeGen/PowerPC/structsinregs.ll @@ -43,19 +43,19 @@ entry: %p6 = alloca %struct.s6, align 4 %p7 = alloca %struct.s7, align 4 %0 = bitcast %struct.s1* %p1 to i8* - call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* getelementptr inbounds (%struct.s1, %struct.s1* @caller1.p1, i32 0, i32 0), i64 1, i32 1, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* getelementptr inbounds (%struct.s1, %struct.s1* @caller1.p1, i32 0, i32 0), i64 1, i1 false) %1 = bitcast %struct.s2* %p2 to i8* - call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* bitcast (%struct.s2* @caller1.p2 to i8*), i64 2, i32 2, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 2 %1, i8* align 2 bitcast (%struct.s2* @caller1.p2 to i8*), i64 2, i1 false) %2 = bitcast %struct.s3* %p3 to i8* - call void @llvm.memcpy.p0i8.p0i8.i64(i8* %2, i8* bitcast ({ i16, i8, i8 }* @caller1.p3 to i8*), i64 4, i32 2, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 2 %2, i8* align 2 bitcast ({ i16, i8, i8 }* @caller1.p3 to i8*), i64 4, i1 false) %3 = bitcast %struct.s4* %p4 to i8* - call void @llvm.memcpy.p0i8.p0i8.i64(i8* %3, i8* bitcast (%struct.s4* @caller1.p4 to i8*), i64 4, i32 4, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %3, i8* align 4 bitcast (%struct.s4* @caller1.p4 to i8*), i64 4, i1 false) %4 = bitcast %struct.s5* %p5 to i8* - call void @llvm.memcpy.p0i8.p0i8.i64(i8* %4, i8* bitcast ({ i32, i8, [3 x i8] }* @caller1.p5 to i8*), i64 8, i32 4, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %4, i8* align 4 bitcast ({ i32, i8, [3 x i8] }* @caller1.p5 to i8*), i64 8, i1 false) %5 = bitcast %struct.s6* %p6 to i8* - call void @llvm.memcpy.p0i8.p0i8.i64(i8* %5, i8* bitcast ({ i32, i16, [2 x i8] }* @caller1.p6 to i8*), i64 8, i32 4, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %5, i8* align 4 bitcast ({ i32, i16, [2 x i8] }* @caller1.p6 to i8*), i64 8, i1 false) %6 = bitcast %struct.s7* %p7 to i8* - call void @llvm.memcpy.p0i8.p0i8.i64(i8* %6, i8* bitcast ({ i32, i16, i8, i8 }* @caller1.p7 to i8*), i64 8, i32 4, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %6, i8* align 4 bitcast ({ i32, i16, i8, i8 }* @caller1.p7 to i8*), i64 8, i1 false) %call = call i32 @callee1(%struct.s1* byval %p1, %struct.s2* byval %p2, %struct.s3* byval %p3, %struct.s4* byval %p4, %struct.s5* byval %p5, %struct.s6* byval %p6, %struct.s7* byval %p7) ret i32 %call @@ -69,7 +69,7 @@ entry: ; CHECK: lbz 3, 160(31) } -declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) nounwind +declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) nounwind define internal i32 @callee1(%struct.s1* byval %v1, %struct.s2* byval %v2, %struct.s3* byval %v3, %struct.s4* byval %v4, %struct.s5* byval %v5, %struct.s6* byval %v6, %struct.s7* byval %v7) nounwind { entry: @@ -125,19 +125,19 @@ entry: %p6 = alloca %struct.t6, align 1 %p7 = alloca %struct.t7, align 1 %0 = bitcast %struct.t1* %p1 to i8* - call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* getelementptr inbounds (%struct.t1, %struct.t1* @caller2.p1, i32 0, i32 0), i64 1, i32 1, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* getelementptr inbounds (%struct.t1, %struct.t1* @caller2.p1, i32 0, i32 0), i64 1, i1 false) %1 = bitcast %struct.t2* %p2 to i8* - call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* bitcast ({ i16 }* @caller2.p2 to i8*), i64 2, i32 1, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* bitcast ({ i16 }* @caller2.p2 to i8*), i64 2, i1 false) %2 = bitcast %struct.t3* %p3 to i8* - call void @llvm.memcpy.p0i8.p0i8.i64(i8* %2, i8* bitcast (%struct.t3* @caller2.p3 to i8*), i64 3, i32 1, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* %2, i8* bitcast (%struct.t3* @caller2.p3 to i8*), i64 3, i1 false) %3 = bitcast %struct.t4* %p4 to i8* - call void @llvm.memcpy.p0i8.p0i8.i64(i8* %3, i8* bitcast ({ i32 }* @caller2.p4 to i8*), i64 4, i32 1, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* %3, i8* bitcast ({ i32 }* @caller2.p4 to i8*), i64 4, i1 false) %4 = bitcast %struct.t5* %p5 to i8* - call void @llvm.memcpy.p0i8.p0i8.i64(i8* %4, i8* bitcast (%struct.t5* @caller2.p5 to i8*), i64 5, i32 1, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* %4, i8* bitcast (%struct.t5* @caller2.p5 to i8*), i64 5, i1 false) %5 = bitcast %struct.t6* %p6 to i8* - call void @llvm.memcpy.p0i8.p0i8.i64(i8* %5, i8* bitcast (%struct.t6* @caller2.p6 to i8*), i64 6, i32 1, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* %5, i8* bitcast (%struct.t6* @caller2.p6 to i8*), i64 6, i1 false) %6 = bitcast %struct.t7* %p7 to i8* - call void @llvm.memcpy.p0i8.p0i8.i64(i8* %6, i8* bitcast (%struct.t7* @caller2.p7 to i8*), i64 7, i32 1, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* %6, i8* bitcast (%struct.t7* @caller2.p7 to i8*), i64 7, i1 false) %call = call i32 @callee2(%struct.t1* byval %p1, %struct.t2* byval %p2, %struct.t3* byval %p3, %struct.t4* byval %p4, %struct.t5* byval %p5, %struct.t6* byval %p6, %struct.t7* byval %p7) ret i32 %call diff --git a/llvm/test/CodeGen/PowerPC/stwu8.ll b/llvm/test/CodeGen/PowerPC/stwu8.ll index 4dfef4b0323..f6d7ec9334c 100644 --- a/llvm/test/CodeGen/PowerPC/stwu8.ll +++ b/llvm/test/CodeGen/PowerPC/stwu8.ll @@ -13,7 +13,7 @@ define void @test1(%class.spell_checker.21.103.513.538* %this) unnamed_addr alig entry: %_M_header.i.i.i.i.i.i = getelementptr inbounds %class.spell_checker.21.103.513.538, %class.spell_checker.21.103.513.538* %this, i64 0, i32 0, i32 0, i32 0, i32 1 %0 = bitcast %"struct.std::_Rb_tree_node_base.17.99.509.534"* %_M_header.i.i.i.i.i.i to i8* - call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 40, i32 4, i1 false) nounwind + call void @llvm.memset.p0i8.i64(i8* align 4 %0, i8 0, i64 40, i1 false) nounwind store %"struct.std::_Rb_tree_node_base.17.99.509.534"* %_M_header.i.i.i.i.i.i, %"struct.std::_Rb_tree_node_base.17.99.509.534"** undef, align 8 unreachable } @@ -21,4 +21,4 @@ entry: ; CHECK: @test1 ; CHECK: stwu -declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) nounwind +declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) nounwind diff --git a/llvm/test/CodeGen/PowerPC/tailcall-string-rvo.ll b/llvm/test/CodeGen/PowerPC/tailcall-string-rvo.ll index cf365dfa2a6..c48ee467031 100644 --- a/llvm/test/CodeGen/PowerPC/tailcall-string-rvo.ll +++ b/llvm/test/CodeGen/PowerPC/tailcall-string-rvo.ll @@ -32,7 +32,7 @@ bb: %tmp1 = bitcast %class.basic_string.11.42.73* %arg to %union.anon.8.39.70** store %union.anon.8.39.70* %tmp, %union.anon.8.39.70** %tmp1, align 8 %tmp2 = bitcast %union.anon.8.39.70* %tmp to i8* - tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %tmp2, i8* nonnull undef, i64 13, i32 1, i1 false) + tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %tmp2, i8* nonnull undef, i64 13, i1 false) %tmp3 = getelementptr inbounds %class.basic_string.11.42.73, %class.basic_string.11.42.73* %arg, i64 0, i32 0, i32 0, i32 1 store i64 13, i64* %tmp3, align 8 %tmp4 = getelementptr inbounds %class.basic_string.11.42.73, %class.basic_string.11.42.73* %arg, i64 0, i32 0, i32 0, i32 2, i32 1, i64 5 @@ -42,6 +42,6 @@ bb: } ; Function Attrs: argmemonly nounwind -declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i32, i1) #0 +declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i1) #0 attributes #0 = { argmemonly nounwind } diff --git a/llvm/test/CodeGen/PowerPC/toc-load-sched-bug.ll b/llvm/test/CodeGen/PowerPC/toc-load-sched-bug.ll index 21ccbf6f1ea..8e2aadf87fa 100644 --- a/llvm/test/CodeGen/PowerPC/toc-load-sched-bug.ll +++ b/llvm/test/CodeGen/PowerPC/toc-load-sched-bug.ll @@ -222,7 +222,7 @@ if.then: ; preds = %_ZNK4llvm7ErrorOrIS %Filename.i = getelementptr inbounds %"class.llvm::SMDiagnostic", %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 2 %10 = getelementptr inbounds %"class.std::allocator", %"class.std::allocator"* %ref.tmp.i.i2.i, i64 0, i32 0 %11 = bitcast %"class.llvm::SMDiagnostic"* %ref.tmp to i8* - call void @llvm.memset.p0i8.i64(i8* %11, i8 0, i64 16, i32 8, i1 false) #3 + call void @llvm.memset.p0i8.i64(i8* align 8 %11, i8 0, i64 16, i1 false) #3 call void @llvm.lifetime.start.p0i8(i64 1, i8* %10) #3 %tobool.i.i4.i = icmp eq i8* %4, null br i1 %tobool.i.i4.i, label %if.then.i.i6.i, label %if.end.i.i8.i @@ -265,7 +265,7 @@ _ZN4llvm12SMDiagnosticC2ENS_9StringRefENS_9SourceMgr8DiagKindES1_.exit: ; preds store i8* bitcast (i64* getelementptr inbounds ([0 x i64], [0 x i64]* @_ZNSs4_Rep20_S_empty_rep_storageE, i64 0, i64 3) to i8*), i8** %_M_p.i.i.i.i.i, align 8, !tbaa !13 %Ranges.i = getelementptr inbounds %"class.llvm::SMDiagnostic", %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 8 %13 = bitcast %"class.std::vector.79"* %Ranges.i to i8* - call void @llvm.memset.p0i8.i64(i8* %13, i8 0, i64 24, i32 8, i1 false) #3 + call void @llvm.memset.p0i8.i64(i8* align 8 %13, i8 0, i64 24, i1 false) #3 %14 = getelementptr inbounds %"class.llvm::SMDiagnostic", %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 9, i32 0, i32 0, i32 0, i32 1, i32 0, i32 0, i64 0 %BeginX.i.i.i.i.i.i = getelementptr inbounds %"class.llvm::SMDiagnostic", %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 9, i32 0, i32 0, i32 0, i32 0, i32 0 store i8* %14, i8** %BeginX.i.i.i.i.i.i, align 8, !tbaa !23 @@ -275,13 +275,13 @@ _ZN4llvm12SMDiagnosticC2ENS_9StringRefENS_9SourceMgr8DiagKindES1_.exit: ; preds %add.ptr.i.i.i.i.i.i = getelementptr inbounds %"class.llvm::SMDiagnostic", %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 9, i32 0, i32 0, i32 0, i32 1, i32 0, i32 0, i64 96 store i8* %add.ptr.i.i.i.i.i.i, i8** %CapacityX.i.i.i.i.i.i, align 8, !tbaa !26 %15 = bitcast %"class.llvm::SMDiagnostic"* %Err to i8* - call void @llvm.memcpy.p0i8.p0i8.i64(i8* %15, i8* %11, i64 16, i32 8, i1 false) #3 + call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 %15, i8* align 8 %11, i64 16, i1 false) #3 %Filename.i38 = getelementptr inbounds %"class.llvm::SMDiagnostic", %"class.llvm::SMDiagnostic"* %Err, i64 0, i32 2 call void @_ZNSs4swapERSs(%"class.std::basic_string"* %Filename.i38, %"class.std::basic_string"* dereferenceable(8) %Filename.i) #3 %LineNo.i39 = getelementptr inbounds %"class.llvm::SMDiagnostic", %"class.llvm::SMDiagnostic"* %Err, i64 0, i32 3 %16 = bitcast i32* %LineNo.i39 to i8* %17 = bitcast i32* %LineNo.i to i8* - call void @llvm.memcpy.p0i8.p0i8.i64(i8* %16, i8* %17, i64 12, i32 4, i1 false) #3 + call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %16, i8* align 4 %17, i64 12, i1 false) #3 %Message.i40 = getelementptr inbounds %"class.llvm::SMDiagnostic", %"class.llvm::SMDiagnostic"* %Err, i64 0, i32 6 call void @_ZNSs4swapERSs(%"class.std::basic_string"* %Message.i40, %"class.std::basic_string"* dereferenceable(8) %Message.i) #3 %LineContents.i = getelementptr inbounds %"class.llvm::SMDiagnostic", %"class.llvm::SMDiagnostic"* %Err, i64 0, i32 7 @@ -294,7 +294,7 @@ _ZN4llvm12SMDiagnosticC2ENS_9StringRefENS_9SourceMgr8DiagKindES1_.exit: ; preds %_M_end_of_storage.i11.i.i.i = getelementptr inbounds %"class.llvm::SMDiagnostic", %"class.llvm::SMDiagnostic"* %Err, i64 0, i32 8, i32 0, i32 0, i32 2 %_M_start2.i.i.i.i = getelementptr inbounds %"class.llvm::SMDiagnostic", %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 8, i32 0, i32 0, i32 0 %19 = bitcast %"class.std::vector.79"* %Ranges.i41 to i8* - call void @llvm.memset.p0i8.i64(i8* %19, i8 0, i64 16, i32 8, i1 false) #3 + call void @llvm.memset.p0i8.i64(i8* align 8 %19, i8 0, i64 16, i1 false) #3 %20 = load %"struct.std::pair"*, %"struct.std::pair"** %_M_start2.i.i.i.i, align 8, !tbaa !27 store %"struct.std::pair"* %20, %"struct.std::pair"** %_M_start.i7.i.i.i, align 8, !tbaa !27 store %"struct.std::pair"* null, %"struct.std::pair"** %_M_start2.i.i.i.i, align 8, !tbaa !27 @@ -449,7 +449,7 @@ declare void @__assert_fail(i8*, i8*, i32 zeroext, i8*) #4 declare dereferenceable(8) %"class.std::basic_string"* @_ZNSs6insertEmPKcm(%"class.std::basic_string"*, i64, i8*, i64) #1 ; Function Attrs: nounwind -declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i32, i1) #3 +declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i1) #3 ; Function Attrs: nounwind declare void @_ZNSs4_Rep10_M_destroyERKSaIcE(%"struct.std::basic_string<char, std::char_traits<char>, std::allocator<char> >::_Rep"*, %"class.std::allocator"* dereferenceable(1)) #0 @@ -471,7 +471,7 @@ declare %"class.llvm::Module"* @_ZN4llvm7ParseIREPNS_12MemoryBufferERNS_12SMDiag declare void @_ZNSs4swapERSs(%"class.std::basic_string"*, %"class.std::basic_string"* dereferenceable(8)) #1 ; Function Attrs: nounwind -declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) #3 +declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) #3 attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" } attributes #1 = { "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" } diff --git a/llvm/test/CodeGen/RISCV/frame.ll b/llvm/test/CodeGen/RISCV/frame.ll index fc12d246526..11dc784426e 100644 --- a/llvm/test/CodeGen/RISCV/frame.ll +++ b/llvm/test/CodeGen/RISCV/frame.ll @@ -47,12 +47,12 @@ define i32 @test() nounwind { ; RV32I-WITHFP-NEXT: ret %key = alloca %struct.key_t, align 4 %1 = bitcast %struct.key_t* %key to i8* - call void @llvm.memset.p0i8.i64(i8* %1, i8 0, i64 20, i32 4, i1 false) + call void @llvm.memset.p0i8.i64(i8* align 4 %1, i8 0, i64 20, i1 false) %2 = getelementptr inbounds %struct.key_t, %struct.key_t* %key, i64 0, i32 1, i64 0 call void @test1(i8* %2) #3 ret i32 0 } -declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) +declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) declare void @test1(i8*) diff --git a/llvm/test/CodeGen/SystemZ/dag-combine-02.ll b/llvm/test/CodeGen/SystemZ/dag-combine-02.ll index 2d96aafb938..c73b49029e8 100644 --- a/llvm/test/CodeGen/SystemZ/dag-combine-02.ll +++ b/llvm/test/CodeGen/SystemZ/dag-combine-02.ll @@ -18,7 +18,7 @@ declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #0 ; Function Attrs: argmemonly nounwind -declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i32, i1) #0 +declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i1) #0 ; Function Attrs: argmemonly nounwind declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #0 @@ -100,7 +100,7 @@ define signext i32 @main(i32 signext, i8** nocapture readonly) local_unnamed_add store i64 0, i64* @g_56, align 8 %62 = bitcast [4 x [7 x i16*]]* %3 to i8* call void @llvm.lifetime.start.p0i8(i64 224, i8* nonnull %62) #5 - call void @llvm.memcpy.p0i8.p0i8.i64(i8* nonnull %62, i8* bitcast ([4 x [7 x i16*]]* @func_22.l_91 to i8*), i64 224, i32 8, i1 false) #5 + call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 nonnull %62, i8* align 8 bitcast ([4 x [7 x i16*]]* @func_22.l_91 to i8*), i64 224, i1 false) #5 %63 = getelementptr inbounds [4 x [7 x i16*]], [4 x [7 x i16*]]* %3, i64 0, i64 0, i64 2 store i16** %63, i16*** @g_102, align 8 %64 = load i64, i64* @g_56, align 8 diff --git a/llvm/test/CodeGen/SystemZ/loop-01.ll b/llvm/test/CodeGen/SystemZ/loop-01.ll index 79afc7f4198..262cda9f6d1 100644 --- a/llvm/test/CodeGen/SystemZ/loop-01.ll +++ b/llvm/test/CodeGen/SystemZ/loop-01.ll @@ -246,7 +246,7 @@ for.body: ; preds = %for.body.preheader, %for.body %2 = type <{ %3, i32, [4 x i8] }> %3 = type { i16*, i16*, i16* } -declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i32, i1) #0 +declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i1) #0 define void @f8() { ; CHECK-Z13-LABEL: f8: @@ -274,22 +274,22 @@ bb5: ; preds = %bb5, %bb2 %tmp9 = getelementptr inbounds %0, %0* %tmp6, i64 -1 %tmp10 = bitcast %0* %tmp9 to i8* %tmp11 = bitcast %0* %tmp8 to i8* - tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %tmp10, i8* %tmp11, i64 24, i32 8, i1 false) + tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 %tmp10, i8* align 8 %tmp11, i64 24, i1 false) %tmp12 = getelementptr inbounds %0, %0* %tmp7, i64 -2 %tmp13 = getelementptr inbounds %0, %0* %tmp6, i64 -2 %tmp14 = bitcast %0* %tmp13 to i8* %tmp15 = bitcast %0* %tmp12 to i8* - tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %tmp14, i8* %tmp15, i64 24, i32 8, i1 false) + tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 %tmp14, i8* align 8 %tmp15, i64 24, i1 false) %tmp16 = getelementptr inbounds %0, %0* %tmp7, i64 -3 %tmp17 = getelementptr inbounds %0, %0* %tmp6, i64 -3 %tmp18 = bitcast %0* %tmp17 to i8* %tmp19 = bitcast %0* %tmp16 to i8* - tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %tmp18, i8* %tmp19, i64 24, i32 8, i1 false) + tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 %tmp18, i8* align 8 %tmp19, i64 24, i1 false) %tmp20 = getelementptr inbounds %0, %0* %tmp7, i64 -4 %tmp21 = getelementptr inbounds %0, %0* %tmp6, i64 -4 %tmp22 = bitcast %0* %tmp21 to i8* %tmp23 = bitcast %0* %tmp20 to i8* - tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %tmp22, i8* %tmp23, i64 24, i32 8, i1 false) + tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 %tmp22, i8* align 8 %tmp23, i64 24, i1 false) br label %bb5 } diff --git a/llvm/test/CodeGen/SystemZ/loop-03.ll b/llvm/test/CodeGen/SystemZ/loop-03.ll index 79bd23e6274..7ba7165cdff 100644 --- a/llvm/test/CodeGen/SystemZ/loop-03.ll +++ b/llvm/test/CodeGen/SystemZ/loop-03.ll @@ -15,7 +15,7 @@ %7 = type { i64, i64, %8** } %8 = type { i64, i64*, i64*, %4*, i64, i32*, %5, i32, i64, i64 } -declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i32, i1) +declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i1) define void @fun0(%0*) { ; CHECK-LABEL: .LBB0_4 @@ -72,7 +72,7 @@ define void @fun0(%0*) { ; <label>:24: ; preds = %24, %14 %25 = phi i64 [ %23, %14 ], [ %27, %24 ] - call void @llvm.memcpy.p0i8.p0i8.i64(i8* undef, i8* nonnull undef, i64 %4, i32 1, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* undef, i8* nonnull undef, i64 %4, i1 false) %26 = getelementptr inbounds i8, i8* null, i64 %4 store i8* %26, i8** undef, align 8 %27 = add i64 %25, -4 @@ -83,7 +83,7 @@ define void @fun0(%0*) { br i1 undef, label %31, label %30 ; <label>:30: ; preds = %29 - call void @llvm.memcpy.p0i8.p0i8.i64(i8* %26, i8* nonnull undef, i64 %4, i32 1, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* %26, i8* nonnull undef, i64 %4, i1 false) br label %31 ; <label>:31: ; preds = %30, %29 diff --git a/llvm/test/CodeGen/SystemZ/memcpy-01.ll b/llvm/test/CodeGen/SystemZ/memcpy-01.ll index 1d7b28e940b..ee4e71b53ce 100644 --- a/llvm/test/CodeGen/SystemZ/memcpy-01.ll +++ b/llvm/test/CodeGen/SystemZ/memcpy-01.ll @@ -2,107 +2,98 @@ ; ; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s -declare void @llvm.memcpy.p0i8.p0i8.i32(i8 *nocapture, i8 *nocapture, i32, i32, i1) nounwind -declare void @llvm.memcpy.p0i8.p0i8.i64(i8 *nocapture, i8 *nocapture, i64, i32, i1) nounwind +declare void @llvm.memcpy.p0i8.p0i8.i32(i8 *nocapture, i8 *nocapture, i32, i1) nounwind +declare void @llvm.memcpy.p0i8.p0i8.i64(i8 *nocapture, i8 *nocapture, i64, i1) nounwind declare void @foo(i8 *, i8 *) ; Test a no-op move, i32 version. -define void @f1(i8 *%dest, i8 *%src) { +define void @f1(i8* %dest, i8* %src) { ; CHECK-LABEL: f1: ; CHECK-NOT: %r2 ; CHECK-NOT: %r3 ; CHECK: br %r14 - call void @llvm.memcpy.p0i8.p0i8.i32(i8 *%dest, i8 *%src, i32 0, i32 1, - i1 false) + call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* %src, i32 0, i1 false) ret void } ; Test a no-op move, i64 version. -define void @f2(i8 *%dest, i8 *%src) { +define void @f2(i8* %dest, i8* %src) { ; CHECK-LABEL: f2: ; CHECK-NOT: %r2 ; CHECK-NOT: %r3 ; CHECK: br %r14 - call void @llvm.memcpy.p0i8.p0i8.i64(i8 *%dest, i8 *%src, i64 0, i32 1, - i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* %dest, i8* %src, i64 0, i1 false) ret void } ; Test a 1-byte move, i32 version. -define void @f3(i8 *%dest, i8 *%src) { +define void @f3(i8* %dest, i8* %src) { ; CHECK-LABEL: f3: ; CHECK: mvc 0(1,%r2), 0(%r3) ; CHECK: br %r14 - call void @llvm.memcpy.p0i8.p0i8.i32(i8 *%dest, i8 *%src, i32 1, i32 1, - i1 false) + call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* %src, i32 1, i1 false) ret void } ; Test a 1-byte move, i64 version. -define void @f4(i8 *%dest, i8 *%src) { +define void @f4(i8* %dest, i8* %src) { ; CHECK-LABEL: f4: ; CHECK: mvc 0(1,%r2), 0(%r3) ; CHECK: br %r14 - call void @llvm.memcpy.p0i8.p0i8.i64(i8 *%dest, i8 *%src, i64 1, i32 1, - i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* %dest, i8* %src, i64 1, i1 false) ret void } ; Test the upper range of a single MVC, i32 version. -define void @f5(i8 *%dest, i8 *%src) { +define void @f5(i8* %dest, i8* %src) { ; CHECK-LABEL: f5: ; CHECK: mvc 0(256,%r2), 0(%r3) ; CHECK: br %r14 - call void @llvm.memcpy.p0i8.p0i8.i32(i8 *%dest, i8 *%src, i32 256, i32 1, - i1 false) + call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* %src, i32 256, i1 false) ret void } ; Test the upper range of a single MVC, i64 version. -define void @f6(i8 *%dest, i8 *%src) { +define void @f6(i8* %dest, i8* %src) { ; CHECK-LABEL: f6: ; CHECK: mvc 0(256,%r2), 0(%r3) ; CHECK: br %r14 - call void @llvm.memcpy.p0i8.p0i8.i64(i8 *%dest, i8 *%src, i64 256, i32 1, - i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* %dest, i8* %src, i64 256, i1 false) ret void } ; Test the first case that needs two MVCs. -define void @f7(i8 *%dest, i8 *%src) { +define void @f7(i8* %dest, i8* %src) { ; CHECK-LABEL: f7: ; CHECK: mvc 0(256,%r2), 0(%r3) ; CHECK: mvc 256(1,%r2), 256(%r3) ; CHECK: br %r14 - call void @llvm.memcpy.p0i8.p0i8.i32(i8 *%dest, i8 *%src, i32 257, i32 1, - i1 false) + call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* %src, i32 257, i1 false) ret void } ; Test the last-but-one case that needs two MVCs. -define void @f8(i8 *%dest, i8 *%src) { +define void @f8(i8* %dest, i8* %src) { ; CHECK-LABEL: f8: ; CHECK: mvc 0(256,%r2), 0(%r3) ; CHECK: mvc 256(255,%r2), 256(%r3) ; CHECK: br %r14 - call void @llvm.memcpy.p0i8.p0i8.i64(i8 *%dest, i8 *%src, i64 511, i32 1, - i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* %dest, i8* %src, i64 511, i1 false) ret void } ; Test the last case that needs two MVCs. -define void @f9(i8 *%dest, i8 *%src) { +define void @f9(i8* %dest, i8* %src) { ; CHECK-LABEL: f9: ; CHECK: mvc 0(256,%r2), 0(%r3) ; CHECK: mvc 256(256,%r2), 256(%r3) ; CHECK: br %r14 - call void @llvm.memcpy.p0i8.p0i8.i64(i8 *%dest, i8 *%src, i64 512, i32 1, - i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* %dest, i8* %src, i64 512, i1 false) ret void } ; Test an arbitrary value that uses straight-line code. -define void @f10(i8 *%dest, i8 *%src) { +define void @f10(i8* %dest, i8* %src) { ; CHECK-LABEL: f10: ; CHECK: mvc 0(256,%r2), 0(%r3) ; CHECK: mvc 256(256,%r2), 256(%r3) @@ -110,13 +101,12 @@ define void @f10(i8 *%dest, i8 *%src) { ; CHECK: mvc 768(256,%r2), 768(%r3) ; CHECK: mvc 1024(255,%r2), 1024(%r3) ; CHECK: br %r14 - call void @llvm.memcpy.p0i8.p0i8.i64(i8 *%dest, i8 *%src, i64 1279, i32 1, - i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* %dest, i8* %src, i64 1279, i1 false) ret void } ; ...and again in cases where not all parts are in range of MVC. -define void @f11(i8 *%srcbase, i8 *%destbase) { +define void @f11(i8* %srcbase, i8* %destbase) { ; CHECK-LABEL: f11: ; CHECK: mvc 4000(256,%r2), 3500(%r3) ; CHECK: lay [[NEWDEST:%r[1-5]]], 4256(%r2) @@ -126,10 +116,9 @@ define void @f11(i8 *%srcbase, i8 *%destbase) { ; CHECK: mvc 512(256,[[NEWDEST]]), 0([[NEWSRC]]) ; CHECK: mvc 768(255,[[NEWDEST]]), 256([[NEWSRC]]) ; CHECK: br %r14 - %dest = getelementptr i8, i8 *%srcbase, i64 4000 + %dest = getelementptr i8, i8* %srcbase, i64 4000 %src = getelementptr i8, i8* %destbase, i64 3500 - call void @llvm.memcpy.p0i8.p0i8.i64(i8 *%dest, i8 *%src, i64 1279, i32 1, - i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* %dest, i8* %src, i64 1279, i1 false) ret void } @@ -148,10 +137,9 @@ define void @f12() { %arr = alloca [6000 x i8] %dest = getelementptr [6000 x i8], [6000 x i8] *%arr, i64 0, i64 3900 %src = getelementptr [6000 x i8], [6000 x i8] *%arr, i64 0, i64 1924 - call void @foo(i8 *%dest, i8 *%src) - call void @llvm.memcpy.p0i8.p0i8.i64(i8 *%dest, i8 *%src, i64 1279, i32 1, - i1 false) - call void @foo(i8 *%dest, i8 *%src) + call void @foo(i8* %dest, i8* %src) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* %dest, i8* %src, i64 1279, i1 false) + call void @foo(i8* %dest, i8* %src) ret void } @@ -170,15 +158,14 @@ define void @f13() { %arr = alloca [6000 x i8] %dest = getelementptr [6000 x i8], [6000 x i8] *%arr, i64 0, i64 24 %src = getelementptr [6000 x i8], [6000 x i8] *%arr, i64 0, i64 3650 - call void @foo(i8 *%dest, i8 *%src) - call void @llvm.memcpy.p0i8.p0i8.i64(i8 *%dest, i8 *%src, i64 1279, i32 1, - i1 false) - call void @foo(i8 *%dest, i8 *%src) + call void @foo(i8* %dest, i8* %src) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* %dest, i8* %src, i64 1279, i1 false) + call void @foo(i8* %dest, i8* %src) ret void } ; Test the last case that is done using straight-line code. -define void @f14(i8 *%dest, i8 *%src) { +define void @f14(i8* %dest, i8* %src) { ; CHECK-LABEL: f14: ; CHECK: mvc 0(256,%r2), 0(%r3) ; CHECK: mvc 256(256,%r2), 256(%r3) @@ -187,13 +174,12 @@ define void @f14(i8 *%dest, i8 *%src) { ; CHECK: mvc 1024(256,%r2), 1024(%r3) ; CHECK: mvc 1280(256,%r2), 1280(%r3) ; CHECK: br %r14 - call void @llvm.memcpy.p0i8.p0i8.i64(i8 *%dest, i8 *%src, i64 1536, i32 1, - i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* %dest, i8* %src, i64 1536, i1 false) ret void } ; Test the first case that is done using a loop. -define void @f15(i8 *%dest, i8 *%src) { +define void @f15(i8* %dest, i8* %src) { ; CHECK-LABEL: f15: ; CHECK: lghi [[COUNT:%r[0-5]]], 6 ; CHECK: [[LABEL:\.L[^:]*]]: @@ -204,8 +190,7 @@ define void @f15(i8 *%dest, i8 *%src) { ; CHECK: brctg [[COUNT]], [[LABEL]] ; CHECK: mvc 0(1,%r2), 0(%r3) ; CHECK: br %r14 - call void @llvm.memcpy.p0i8.p0i8.i64(i8 *%dest, i8 *%src, i64 1537, i32 1, - i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* %dest, i8* %src, i64 1537, i1 false) ret void } @@ -227,9 +212,8 @@ define void @f16() { %arr = alloca [3200 x i8] %dest = getelementptr [3200 x i8], [3200 x i8] *%arr, i64 0, i64 1600 %src = getelementptr [3200 x i8], [3200 x i8] *%arr, i64 0, i64 0 - call void @foo(i8 *%dest, i8 *%src) - call void @llvm.memcpy.p0i8.p0i8.i64(i8 *%dest, i8 *%src, i64 1537, i32 1, - i1 false) - call void @foo(i8 *%dest, i8 *%src) + call void @foo(i8* %dest, i8* %src) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* %dest, i8* %src, i64 1537, i1 false) + call void @foo(i8* %dest, i8* %src) ret void } diff --git a/llvm/test/CodeGen/SystemZ/memset-01.ll b/llvm/test/CodeGen/SystemZ/memset-01.ll index f17901cc73a..73b3ffa5b4a 100644 --- a/llvm/test/CodeGen/SystemZ/memset-01.ll +++ b/llvm/test/CodeGen/SystemZ/memset-01.ll @@ -2,131 +2,131 @@ ; ; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s -declare void @llvm.memset.p0i8.i32(i8 *nocapture, i8, i32, i32, i1) nounwind -declare void @llvm.memset.p0i8.i64(i8 *nocapture, i8, i64, i32, i1) nounwind +declare void @llvm.memset.p0i8.i32(i8 *nocapture, i8, i32, i1) nounwind +declare void @llvm.memset.p0i8.i64(i8 *nocapture, i8, i64, i1) nounwind ; No bytes, i32 version. -define void @f1(i8 *%dest, i8 %val) { +define void @f1(i8* %dest, i8 %val) { ; CHECK-LABEL: f1: ; CHECK-NOT: %r2 ; CHECK-NOT: %r3 ; CHECK: br %r14 - call void @llvm.memset.p0i8.i32(i8 *%dest, i8 %val, i32 0, i32 1, i1 false) + call void @llvm.memset.p0i8.i32(i8* %dest, i8 %val, i32 0, i1 false) ret void } ; No bytes, i64 version. -define void @f2(i8 *%dest, i8 %val) { +define void @f2(i8* %dest, i8 %val) { ; CHECK-LABEL: f2: ; CHECK-NOT: %r2 ; CHECK-NOT: %r3 ; CHECK: br %r14 - call void @llvm.memset.p0i8.i64(i8 *%dest, i8 %val, i64 0, i32 1, i1 false) + call void @llvm.memset.p0i8.i64(i8* %dest, i8 %val, i64 0, i1 false) ret void } ; 1 byte, i32 version. -define void @f3(i8 *%dest, i8 %val) { +define void @f3(i8* %dest, i8 %val) { ; CHECK-LABEL: f3: ; CHECK: stc %r3, 0(%r2) ; CHECK: br %r14 - call void @llvm.memset.p0i8.i32(i8 *%dest, i8 %val, i32 1, i32 1, i1 false) + call void @llvm.memset.p0i8.i32(i8* %dest, i8 %val, i32 1, i1 false) ret void } ; 1 byte, i64 version. -define void @f4(i8 *%dest, i8 %val) { +define void @f4(i8* %dest, i8 %val) { ; CHECK-LABEL: f4: ; CHECK: stc %r3, 0(%r2) ; CHECK: br %r14 - call void @llvm.memset.p0i8.i64(i8 *%dest, i8 %val, i64 1, i32 1, i1 false) + call void @llvm.memset.p0i8.i64(i8* %dest, i8 %val, i64 1, i1 false) ret void } ; 2 bytes, i32 version. -define void @f5(i8 *%dest, i8 %val) { +define void @f5(i8* %dest, i8 %val) { ; CHECK-LABEL: f5: ; CHECK-DAG: stc %r3, 0(%r2) ; CHECK-DAG: stc %r3, 1(%r2) ; CHECK: br %r14 - call void @llvm.memset.p0i8.i32(i8 *%dest, i8 %val, i32 2, i32 1, i1 false) + call void @llvm.memset.p0i8.i32(i8* %dest, i8 %val, i32 2, i1 false) ret void } ; 2 bytes, i64 version. -define void @f6(i8 *%dest, i8 %val) { +define void @f6(i8* %dest, i8 %val) { ; CHECK-LABEL: f6: ; CHECK-DAG: stc %r3, 0(%r2) ; CHECK-DAG: stc %r3, 1(%r2) ; CHECK: br %r14 - call void @llvm.memset.p0i8.i64(i8 *%dest, i8 %val, i64 2, i32 1, i1 false) + call void @llvm.memset.p0i8.i64(i8* %dest, i8 %val, i64 2, i1 false) ret void } ; 3 bytes, i32 version. -define void @f7(i8 *%dest, i8 %val) { +define void @f7(i8* %dest, i8 %val) { ; CHECK-LABEL: f7: ; CHECK: stc %r3, 0(%r2) ; CHECK: mvc 1(2,%r2), 0(%r2) ; CHECK: br %r14 - call void @llvm.memset.p0i8.i32(i8 *%dest, i8 %val, i32 3, i32 1, i1 false) + call void @llvm.memset.p0i8.i32(i8* %dest, i8 %val, i32 3, i1 false) ret void } ; 3 bytes, i64 version. -define void @f8(i8 *%dest, i8 %val) { +define void @f8(i8* %dest, i8 %val) { ; CHECK-LABEL: f8: ; CHECK: stc %r3, 0(%r2) ; CHECK: mvc 1(2,%r2), 0(%r2) ; CHECK: br %r14 - call void @llvm.memset.p0i8.i64(i8 *%dest, i8 %val, i64 3, i32 1, i1 false) + call void @llvm.memset.p0i8.i64(i8* %dest, i8 %val, i64 3, i1 false) ret void } ; 257 bytes, i32 version. -define void @f9(i8 *%dest, i8 %val) { +define void @f9(i8* %dest, i8 %val) { ; CHECK-LABEL: f9: ; CHECK: stc %r3, 0(%r2) ; CHECK: mvc 1(256,%r2), 0(%r2) ; CHECK: br %r14 - call void @llvm.memset.p0i8.i32(i8 *%dest, i8 %val, i32 257, i32 1, i1 false) + call void @llvm.memset.p0i8.i32(i8* %dest, i8 %val, i32 257, i1 false) ret void } ; 257 bytes, i64 version. -define void @f10(i8 *%dest, i8 %val) { +define void @f10(i8* %dest, i8 %val) { ; CHECK-LABEL: f10: ; CHECK: stc %r3, 0(%r2) ; CHECK: mvc 1(256,%r2), 0(%r2) ; CHECK: br %r14 - call void @llvm.memset.p0i8.i64(i8 *%dest, i8 %val, i64 257, i32 1, i1 false) + call void @llvm.memset.p0i8.i64(i8* %dest, i8 %val, i64 257, i1 false) ret void } ; 258 bytes, i32 version. We need two MVCs. -define void @f11(i8 *%dest, i8 %val) { +define void @f11(i8* %dest, i8 %val) { ; CHECK-LABEL: f11: ; CHECK: stc %r3, 0(%r2) ; CHECK: mvc 1(256,%r2), 0(%r2) ; CHECK: mvc 257(1,%r2), 256(%r2) ; CHECK: br %r14 - call void @llvm.memset.p0i8.i32(i8 *%dest, i8 %val, i32 258, i32 1, i1 false) + call void @llvm.memset.p0i8.i32(i8* %dest, i8 %val, i32 258, i1 false) ret void } ; 258 bytes, i64 version. -define void @f12(i8 *%dest, i8 %val) { +define void @f12(i8* %dest, i8 %val) { ; CHECK-LABEL: f12: ; CHECK: stc %r3, 0(%r2) ; CHECK: mvc 1(256,%r2), 0(%r2) ; CHECK: mvc 257(1,%r2), 256(%r2) ; CHECK: br %r14 - call void @llvm.memset.p0i8.i64(i8 *%dest, i8 %val, i64 258, i32 1, i1 false) + call void @llvm.memset.p0i8.i64(i8* %dest, i8 %val, i64 258, i1 false) ret void } ; Test the largest case for which straight-line code is used. -define void @f13(i8 *%dest, i8 %val) { +define void @f13(i8* %dest, i8 %val) { ; CHECK-LABEL: f13: ; CHECK: stc %r3, 0(%r2) ; CHECK: mvc 1(256,%r2), 0(%r2) @@ -136,14 +136,13 @@ define void @f13(i8 *%dest, i8 %val) { ; CHECK: mvc 1025(256,%r2), 1024(%r2) ; CHECK: mvc 1281(256,%r2), 1280(%r2) ; CHECK: br %r14 - call void @llvm.memset.p0i8.i64(i8 *%dest, i8 %val, i64 1537, i32 1, - i1 false) + call void @llvm.memset.p0i8.i64(i8* %dest, i8 %val, i64 1537, i1 false) ret void } ; Test the next size up, which uses a loop. We leave the other corner ; cases to memcpy-01.ll. -define void @f14(i8 *%dest, i8 %val) { +define void @f14(i8* %dest, i8 %val) { ; CHECK-LABEL: f14: ; CHECK: stc %r3, 0(%r2) ; CHECK: lghi [[COUNT:%r[0-5]]], 6 @@ -154,7 +153,6 @@ define void @f14(i8 *%dest, i8 %val) { ; CHECK: brctg [[COUNT]], [[LABEL]] ; CHECK: mvc 1(1,%r2), 0(%r2) ; CHECK: br %r14 - call void @llvm.memset.p0i8.i64(i8 *%dest, i8 %val, i64 1538, i32 1, - i1 false) + call void @llvm.memset.p0i8.i64(i8* %dest, i8 %val, i64 1538, i1 false) ret void } diff --git a/llvm/test/CodeGen/SystemZ/memset-02.ll b/llvm/test/CodeGen/SystemZ/memset-02.ll index b4724c0b574..3f5ffca3398 100644 --- a/llvm/test/CodeGen/SystemZ/memset-02.ll +++ b/llvm/test/CodeGen/SystemZ/memset-02.ll @@ -2,161 +2,161 @@ ; ; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s -declare void @llvm.memset.p0i8.i32(i8 *nocapture, i8, i32, i32, i1) nounwind -declare void @llvm.memset.p0i8.i64(i8 *nocapture, i8, i64, i32, i1) nounwind +declare void @llvm.memset.p0i8.i32(i8 *nocapture, i8, i32, i1) nounwind +declare void @llvm.memset.p0i8.i64(i8 *nocapture, i8, i64, i1) nounwind ; No bytes, i32 version. -define void @f1(i8 *%dest) { +define void @f1(i8* %dest) { ; CHECK-LABEL: f1: ; CHECK-NOT: %r2 ; CHECK: br %r14 - call void @llvm.memset.p0i8.i32(i8 *%dest, i8 128, i32 0, i32 1, i1 false) + call void @llvm.memset.p0i8.i32(i8* %dest, i8 128, i32 0, i1 false) ret void } ; No bytes, i64 version. -define void @f2(i8 *%dest) { +define void @f2(i8* %dest) { ; CHECK-LABEL: f2: ; CHECK-NOT: %r2 ; CHECK: br %r14 - call void @llvm.memset.p0i8.i64(i8 *%dest, i8 128, i64 0, i32 1, i1 false) + call void @llvm.memset.p0i8.i64(i8* %dest, i8 128, i64 0, i1 false) ret void } ; 1 byte, i32 version. -define void @f3(i8 *%dest) { +define void @f3(i8* %dest) { ; CHECK-LABEL: f3: ; CHECK: mvi 0(%r2), 128 ; CHECK: br %r14 - call void @llvm.memset.p0i8.i32(i8 *%dest, i8 128, i32 1, i32 1, i1 false) + call void @llvm.memset.p0i8.i32(i8* %dest, i8 128, i32 1, i1 false) ret void } ; 1 byte, i64 version. -define void @f4(i8 *%dest) { +define void @f4(i8* %dest) { ; CHECK-LABEL: f4: ; CHECK: mvi 0(%r2), 128 ; CHECK: br %r14 - call void @llvm.memset.p0i8.i64(i8 *%dest, i8 128, i64 1, i32 1, i1 false) + call void @llvm.memset.p0i8.i64(i8* %dest, i8 128, i64 1, i1 false) ret void } ; 2 bytes, i32 version. -define void @f5(i8 *%dest) { +define void @f5(i8* %dest) { ; CHECK-LABEL: f5: ; CHECK: mvhhi 0(%r2), -32640 ; CHECK: br %r14 - call void @llvm.memset.p0i8.i32(i8 *%dest, i8 128, i32 2, i32 1, i1 false) + call void @llvm.memset.p0i8.i32(i8* %dest, i8 128, i32 2, i1 false) ret void } ; 2 bytes, i64 version. -define void @f6(i8 *%dest) { +define void @f6(i8* %dest) { ; CHECK-LABEL: f6: ; CHECK: mvhhi 0(%r2), -32640 ; CHECK: br %r14 - call void @llvm.memset.p0i8.i64(i8 *%dest, i8 128, i64 2, i32 1, i1 false) + call void @llvm.memset.p0i8.i64(i8* %dest, i8 128, i64 2, i1 false) ret void } ; 3 bytes, i32 version. -define void @f7(i8 *%dest) { +define void @f7(i8* %dest) { ; CHECK-LABEL: f7: ; CHECK-DAG: mvhhi 0(%r2), -32640 ; CHECK-DAG: mvi 2(%r2), 128 ; CHECK: br %r14 - call void @llvm.memset.p0i8.i32(i8 *%dest, i8 128, i32 3, i32 1, i1 false) + call void @llvm.memset.p0i8.i32(i8* %dest, i8 128, i32 3, i1 false) ret void } ; 3 bytes, i64 version. -define void @f8(i8 *%dest) { +define void @f8(i8* %dest) { ; CHECK-LABEL: f8: ; CHECK-DAG: mvhhi 0(%r2), -32640 ; CHECK-DAG: mvi 2(%r2), 128 ; CHECK: br %r14 - call void @llvm.memset.p0i8.i64(i8 *%dest, i8 128, i64 3, i32 1, i1 false) + call void @llvm.memset.p0i8.i64(i8* %dest, i8 128, i64 3, i1 false) ret void } ; 4 bytes, i32 version. -define void @f9(i8 *%dest) { +define void @f9(i8* %dest) { ; CHECK-LABEL: f9: ; CHECK: iilf [[REG:%r[0-5]]], 2155905152 ; CHECK: st [[REG]], 0(%r2) ; CHECK: br %r14 - call void @llvm.memset.p0i8.i32(i8 *%dest, i8 128, i32 4, i32 1, i1 false) + call void @llvm.memset.p0i8.i32(i8* %dest, i8 128, i32 4, i1 false) ret void } ; 4 bytes, i64 version. -define void @f10(i8 *%dest) { +define void @f10(i8* %dest) { ; CHECK-LABEL: f10: ; CHECK: iilf [[REG:%r[0-5]]], 2155905152 ; CHECK: st [[REG]], 0(%r2) ; CHECK: br %r14 - call void @llvm.memset.p0i8.i64(i8 *%dest, i8 128, i64 4, i32 1, i1 false) + call void @llvm.memset.p0i8.i64(i8* %dest, i8 128, i64 4, i1 false) ret void } ; 5 bytes, i32 version. -define void @f11(i8 *%dest) { +define void @f11(i8* %dest) { ; CHECK-LABEL: f11: ; CHECK: mvi 0(%r2), 128 ; CHECK: mvc 1(4,%r2), 0(%r2) ; CHECK: br %r14 - call void @llvm.memset.p0i8.i32(i8 *%dest, i8 128, i32 5, i32 1, i1 false) + call void @llvm.memset.p0i8.i32(i8* %dest, i8 128, i32 5, i1 false) ret void } ; 5 bytes, i64 version. -define void @f12(i8 *%dest) { +define void @f12(i8* %dest) { ; CHECK-LABEL: f12: ; CHECK: mvi 0(%r2), 128 ; CHECK: mvc 1(4,%r2), 0(%r2) ; CHECK: br %r14 - call void @llvm.memset.p0i8.i64(i8 *%dest, i8 128, i64 5, i32 1, i1 false) + call void @llvm.memset.p0i8.i64(i8* %dest, i8 128, i64 5, i1 false) ret void } ; 257 bytes, i32 version. -define void @f13(i8 *%dest) { +define void @f13(i8* %dest) { ; CHECK-LABEL: f13: ; CHECK: mvi 0(%r2), 128 ; CHECK: mvc 1(256,%r2), 0(%r2) ; CHECK: br %r14 - call void @llvm.memset.p0i8.i32(i8 *%dest, i8 128, i32 257, i32 1, i1 false) + call void @llvm.memset.p0i8.i32(i8* %dest, i8 128, i32 257, i1 false) ret void } ; 257 bytes, i64 version. -define void @f14(i8 *%dest) { +define void @f14(i8* %dest) { ; CHECK-LABEL: f14: ; CHECK: mvi 0(%r2), 128 ; CHECK: mvc 1(256,%r2), 0(%r2) ; CHECK: br %r14 - call void @llvm.memset.p0i8.i64(i8 *%dest, i8 128, i64 257, i32 1, i1 false) + call void @llvm.memset.p0i8.i64(i8* %dest, i8 128, i64 257, i1 false) ret void } ; 258 bytes, i32 version. We need two MVCs. -define void @f15(i8 *%dest) { +define void @f15(i8* %dest) { ; CHECK-LABEL: f15: ; CHECK: mvi 0(%r2), 128 ; CHECK: mvc 1(256,%r2), 0(%r2) ; CHECK: mvc 257(1,%r2), 256(%r2) ; CHECK: br %r14 - call void @llvm.memset.p0i8.i32(i8 *%dest, i8 128, i32 258, i32 1, i1 false) + call void @llvm.memset.p0i8.i32(i8* %dest, i8 128, i32 258, i1 false) ret void } ; 258 bytes, i64 version. -define void @f16(i8 *%dest) { +define void @f16(i8* %dest) { ; CHECK-LABEL: f16: ; CHECK: mvi 0(%r2), 128 ; CHECK: mvc 1(256,%r2), 0(%r2) ; CHECK: mvc 257(1,%r2), 256(%r2) ; CHECK: br %r14 - call void @llvm.memset.p0i8.i64(i8 *%dest, i8 128, i64 258, i32 1, i1 false) + call void @llvm.memset.p0i8.i64(i8* %dest, i8 128, i64 258, i1 false) ret void } diff --git a/llvm/test/CodeGen/SystemZ/memset-03.ll b/llvm/test/CodeGen/SystemZ/memset-03.ll index a95f89fc7c0..a6370f4ab10 100644 --- a/llvm/test/CodeGen/SystemZ/memset-03.ll +++ b/llvm/test/CodeGen/SystemZ/memset-03.ll @@ -2,381 +2,381 @@ ; ; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s -declare void @llvm.memset.p0i8.i32(i8 *nocapture, i8, i32, i32, i1) nounwind -declare void @llvm.memset.p0i8.i64(i8 *nocapture, i8, i64, i32, i1) nounwind +declare void @llvm.memset.p0i8.i32(i8 *nocapture, i8, i32, i1) nounwind +declare void @llvm.memset.p0i8.i64(i8 *nocapture, i8, i64, i1) nounwind ; No bytes, i32 version. -define void @f1(i8 *%dest) { +define void @f1(i8* %dest) { ; CHECK-LABEL: f1: ; CHECK-NOT: %r2 ; CHECK: br %r14 - call void @llvm.memset.p0i8.i32(i8 *%dest, i8 0, i32 0, i32 1, i1 false) + call void @llvm.memset.p0i8.i32(i8* %dest, i8 0, i32 0, i1 false) ret void } ; No bytes, i64 version. -define void @f2(i8 *%dest) { +define void @f2(i8* %dest) { ; CHECK-LABEL: f2: ; CHECK-NOT: %r2 ; CHECK: br %r14 - call void @llvm.memset.p0i8.i64(i8 *%dest, i8 0, i64 0, i32 1, i1 false) + call void @llvm.memset.p0i8.i64(i8* %dest, i8 0, i64 0, i1 false) ret void } ; 1 byte, i32 version. -define void @f3(i8 *%dest) { +define void @f3(i8* %dest) { ; CHECK-LABEL: f3: ; CHECK: mvi 0(%r2), 0 ; CHECK: br %r14 - call void @llvm.memset.p0i8.i32(i8 *%dest, i8 0, i32 1, i32 1, i1 false) + call void @llvm.memset.p0i8.i32(i8* %dest, i8 0, i32 1, i1 false) ret void } ; 1 byte, i64 version. -define void @f4(i8 *%dest) { +define void @f4(i8* %dest) { ; CHECK-LABEL: f4: ; CHECK: mvi 0(%r2), 0 ; CHECK: br %r14 - call void @llvm.memset.p0i8.i64(i8 *%dest, i8 0, i64 1, i32 1, i1 false) + call void @llvm.memset.p0i8.i64(i8* %dest, i8 0, i64 1, i1 false) ret void } ; 2 bytes, i32 version. -define void @f5(i8 *%dest) { +define void @f5(i8* %dest) { ; CHECK-LABEL: f5: ; CHECK: mvhhi 0(%r2), 0 ; CHECK: br %r14 - call void @llvm.memset.p0i8.i32(i8 *%dest, i8 0, i32 2, i32 1, i1 false) + call void @llvm.memset.p0i8.i32(i8* %dest, i8 0, i32 2, i1 false) ret void } ; 2 bytes, i64 version. -define void @f6(i8 *%dest) { +define void @f6(i8* %dest) { ; CHECK-LABEL: f6: ; CHECK: mvhhi 0(%r2), 0 ; CHECK: br %r14 - call void @llvm.memset.p0i8.i64(i8 *%dest, i8 0, i64 2, i32 1, i1 false) + call void @llvm.memset.p0i8.i64(i8* %dest, i8 0, i64 2, i1 false) ret void } ; 3 bytes, i32 version. -define void @f7(i8 *%dest) { +define void @f7(i8* %dest) { ; CHECK-LABEL: f7: ; CHECK-DAG: mvhhi 0(%r2), 0 ; CHECK-DAG: mvi 2(%r2), 0 ; CHECK: br %r14 - call void @llvm.memset.p0i8.i32(i8 *%dest, i8 0, i32 3, i32 1, i1 false) + call void @llvm.memset.p0i8.i32(i8* %dest, i8 0, i32 3, i1 false) ret void } ; 3 bytes, i64 version. -define void @f8(i8 *%dest) { +define void @f8(i8* %dest) { ; CHECK-LABEL: f8: ; CHECK-DAG: mvhhi 0(%r2), 0 ; CHECK-DAG: mvi 2(%r2), 0 ; CHECK: br %r14 - call void @llvm.memset.p0i8.i64(i8 *%dest, i8 0, i64 3, i32 1, i1 false) + call void @llvm.memset.p0i8.i64(i8* %dest, i8 0, i64 3, i1 false) ret void } ; 4 bytes, i32 version. -define void @f9(i8 *%dest) { +define void @f9(i8* %dest) { ; CHECK-LABEL: f9: ; CHECK: mvhi 0(%r2), 0 ; CHECK: br %r14 - call void @llvm.memset.p0i8.i32(i8 *%dest, i8 0, i32 4, i32 1, i1 false) + call void @llvm.memset.p0i8.i32(i8* %dest, i8 0, i32 4, i1 false) ret void } ; 4 bytes, i64 version. -define void @f10(i8 *%dest) { +define void @f10(i8* %dest) { ; CHECK-LABEL: f10: ; CHECK: mvhi 0(%r2), 0 ; CHECK: br %r14 - call void @llvm.memset.p0i8.i64(i8 *%dest, i8 0, i64 4, i32 1, i1 false) + call void @llvm.memset.p0i8.i64(i8* %dest, i8 0, i64 4, i1 false) ret void } ; 5 bytes, i32 version. -define void @f11(i8 *%dest) { +define void @f11(i8* %dest) { ; CHECK-LABEL: f11: ; CHECK-DAG: mvhi 0(%r2), 0 ; CHECK-DAG: mvi 4(%r2), 0 ; CHECK: br %r14 - call void @llvm.memset.p0i8.i32(i8 *%dest, i8 0, i32 5, i32 1, i1 false) + call void @llvm.memset.p0i8.i32(i8* %dest, i8 0, i32 5, i1 false) ret void } ; 5 bytes, i64 version. -define void @f12(i8 *%dest) { +define void @f12(i8* %dest) { ; CHECK-LABEL: f12: ; CHECK-DAG: mvhi 0(%r2), 0 ; CHECK-DAG: mvi 4(%r2), 0 ; CHECK: br %r14 - call void @llvm.memset.p0i8.i64(i8 *%dest, i8 0, i64 5, i32 1, i1 false) + call void @llvm.memset.p0i8.i64(i8* %dest, i8 0, i64 5, i1 false) ret void } ; 6 bytes, i32 version. -define void @f13(i8 *%dest) { +define void @f13(i8* %dest) { ; CHECK-LABEL: f13: ; CHECK-DAG: mvhi 0(%r2), 0 ; CHECK-DAG: mvhhi 4(%r2), 0 ; CHECK: br %r14 - call void @llvm.memset.p0i8.i32(i8 *%dest, i8 0, i32 6, i32 1, i1 false) + call void @llvm.memset.p0i8.i32(i8* %dest, i8 0, i32 6, i1 false) ret void } ; 6 bytes, i64 version. -define void @f14(i8 *%dest) { +define void @f14(i8* %dest) { ; CHECK-LABEL: f14: ; CHECK-DAG: mvhi 0(%r2), 0 ; CHECK-DAG: mvhhi 4(%r2), 0 ; CHECK: br %r14 - call void @llvm.memset.p0i8.i64(i8 *%dest, i8 0, i64 6, i32 1, i1 false) + call void @llvm.memset.p0i8.i64(i8* %dest, i8 0, i64 6, i1 false) ret void } ; 7 bytes, i32 version. -define void @f15(i8 *%dest) { +define void @f15(i8* %dest) { ; CHECK-LABEL: f15: ; CHECK: xc 0(7,%r2), 0(%r2) ; CHECK: br %r14 - call void @llvm.memset.p0i8.i32(i8 *%dest, i8 0, i32 7, i32 1, i1 false) + call void @llvm.memset.p0i8.i32(i8* %dest, i8 0, i32 7, i1 false) ret void } ; 7 bytes, i64 version. -define void @f16(i8 *%dest) { +define void @f16(i8* %dest) { ; CHECK-LABEL: f16: ; CHECK: xc 0(7,%r2), 0(%r2) ; CHECK: br %r14 - call void @llvm.memset.p0i8.i64(i8 *%dest, i8 0, i64 7, i32 1, i1 false) + call void @llvm.memset.p0i8.i64(i8* %dest, i8 0, i64 7, i1 false) ret void } ; 8 bytes, i32 version. -define void @f17(i8 *%dest) { +define void @f17(i8* %dest) { ; CHECK-LABEL: f17: ; CHECK: mvghi 0(%r2), 0 ; CHECK: br %r14 - call void @llvm.memset.p0i8.i32(i8 *%dest, i8 0, i32 8, i32 1, i1 false) + call void @llvm.memset.p0i8.i32(i8* %dest, i8 0, i32 8, i1 false) ret void } ; 8 bytes, i64 version. -define void @f18(i8 *%dest) { +define void @f18(i8* %dest) { ; CHECK-LABEL: f18: ; CHECK: mvghi 0(%r2), 0 ; CHECK: br %r14 - call void @llvm.memset.p0i8.i64(i8 *%dest, i8 0, i64 8, i32 1, i1 false) + call void @llvm.memset.p0i8.i64(i8* %dest, i8 0, i64 8, i1 false) ret void } ; 9 bytes, i32 version. -define void @f19(i8 *%dest) { +define void @f19(i8* %dest) { ; CHECK-LABEL: f19: ; CHECK-DAG: mvghi 0(%r2), 0 ; CHECK-DAG: mvi 8(%r2), 0 ; CHECK: br %r14 - call void @llvm.memset.p0i8.i32(i8 *%dest, i8 0, i32 9, i32 1, i1 false) + call void @llvm.memset.p0i8.i32(i8* %dest, i8 0, i32 9, i1 false) ret void } ; 9 bytes, i64 version. -define void @f20(i8 *%dest) { +define void @f20(i8* %dest) { ; CHECK-LABEL: f20: ; CHECK-DAG: mvghi 0(%r2), 0 ; CHECK-DAG: mvi 8(%r2), 0 ; CHECK: br %r14 - call void @llvm.memset.p0i8.i64(i8 *%dest, i8 0, i64 9, i32 1, i1 false) + call void @llvm.memset.p0i8.i64(i8* %dest, i8 0, i64 9, i1 false) ret void } ; 10 bytes, i32 version. -define void @f21(i8 *%dest) { +define void @f21(i8* %dest) { ; CHECK-LABEL: f21: ; CHECK-DAG: mvghi 0(%r2), 0 ; CHECK-DAG: mvhhi 8(%r2), 0 ; CHECK: br %r14 - call void @llvm.memset.p0i8.i32(i8 *%dest, i8 0, i32 10, i32 1, i1 false) + call void @llvm.memset.p0i8.i32(i8* %dest, i8 0, i32 10, i1 false) ret void } ; 10 bytes, i64 version. -define void @f22(i8 *%dest) { +define void @f22(i8* %dest) { ; CHECK-LABEL: f22: ; CHECK-DAG: mvghi 0(%r2), 0 ; CHECK-DAG: mvhhi 8(%r2), 0 ; CHECK: br %r14 - call void @llvm.memset.p0i8.i64(i8 *%dest, i8 0, i64 10, i32 1, i1 false) + call void @llvm.memset.p0i8.i64(i8* %dest, i8 0, i64 10, i1 false) ret void } ; 11 bytes, i32 version. -define void @f23(i8 *%dest) { +define void @f23(i8* %dest) { ; CHECK-LABEL: f23: ; CHECK: xc 0(11,%r2), 0(%r2) ; CHECK: br %r14 - call void @llvm.memset.p0i8.i32(i8 *%dest, i8 0, i32 11, i32 1, i1 false) + call void @llvm.memset.p0i8.i32(i8* %dest, i8 0, i32 11, i1 false) ret void } ; 11 bytes, i64 version. -define void @f24(i8 *%dest) { +define void @f24(i8* %dest) { ; CHECK-LABEL: f24: ; CHECK: xc 0(11,%r2), 0(%r2) ; CHECK: br %r14 - call void @llvm.memset.p0i8.i64(i8 *%dest, i8 0, i64 11, i32 1, i1 false) + call void @llvm.memset.p0i8.i64(i8* %dest, i8 0, i64 11, i1 false) ret void } ; 12 bytes, i32 version. -define void @f25(i8 *%dest) { +define void @f25(i8* %dest) { ; CHECK-LABEL: f25: ; CHECK-DAG: mvghi 0(%r2), 0 ; CHECK-DAG: mvhi 8(%r2), 0 ; CHECK: br %r14 - call void @llvm.memset.p0i8.i32(i8 *%dest, i8 0, i32 12, i32 1, i1 false) + call void @llvm.memset.p0i8.i32(i8* %dest, i8 0, i32 12, i1 false) ret void } ; 12 bytes, i64 version. -define void @f26(i8 *%dest) { +define void @f26(i8* %dest) { ; CHECK-LABEL: f26: ; CHECK-DAG: mvghi 0(%r2), 0 ; CHECK-DAG: mvhi 8(%r2), 0 ; CHECK: br %r14 - call void @llvm.memset.p0i8.i32(i8 *%dest, i8 0, i32 12, i32 1, i1 false) + call void @llvm.memset.p0i8.i32(i8* %dest, i8 0, i32 12, i1 false) ret void } ; 13 bytes, i32 version. -define void @f27(i8 *%dest) { +define void @f27(i8* %dest) { ; CHECK-LABEL: f27: ; CHECK: xc 0(13,%r2), 0(%r2) ; CHECK: br %r14 - call void @llvm.memset.p0i8.i32(i8 *%dest, i8 0, i32 13, i32 1, i1 false) + call void @llvm.memset.p0i8.i32(i8* %dest, i8 0, i32 13, i1 false) ret void } ; 13 bytes, i64 version. -define void @f28(i8 *%dest) { +define void @f28(i8* %dest) { ; CHECK-LABEL: f28: ; CHECK: xc 0(13,%r2), 0(%r2) ; CHECK: br %r14 - call void @llvm.memset.p0i8.i64(i8 *%dest, i8 0, i64 13, i32 1, i1 false) + call void @llvm.memset.p0i8.i64(i8* %dest, i8 0, i64 13, i1 false) ret void } ; 14 bytes, i32 version. -define void @f29(i8 *%dest) { +define void @f29(i8* %dest) { ; CHECK-LABEL: f29: ; CHECK: xc 0(14,%r2), 0(%r2) ; CHECK: br %r14 - call void @llvm.memset.p0i8.i32(i8 *%dest, i8 0, i32 14, i32 1, i1 false) + call void @llvm.memset.p0i8.i32(i8* %dest, i8 0, i32 14, i1 false) ret void } ; 14 bytes, i64 version. -define void @f30(i8 *%dest) { +define void @f30(i8* %dest) { ; CHECK-LABEL: f30: ; CHECK: xc 0(14,%r2), 0(%r2) ; CHECK: br %r14 - call void @llvm.memset.p0i8.i64(i8 *%dest, i8 0, i64 14, i32 1, i1 false) + call void @llvm.memset.p0i8.i64(i8* %dest, i8 0, i64 14, i1 false) ret void } ; 15 bytes, i32 version. -define void @f31(i8 *%dest) { +define void @f31(i8* %dest) { ; CHECK-LABEL: f31: ; CHECK: xc 0(15,%r2), 0(%r2) ; CHECK: br %r14 - call void @llvm.memset.p0i8.i32(i8 *%dest, i8 0, i32 15, i32 1, i1 false) + call void @llvm.memset.p0i8.i32(i8* %dest, i8 0, i32 15, i1 false) ret void } ; 15 bytes, i64 version. -define void @f32(i8 *%dest) { +define void @f32(i8* %dest) { ; CHECK-LABEL: f32: ; CHECK: xc 0(15,%r2), 0(%r2) ; CHECK: br %r14 - call void @llvm.memset.p0i8.i64(i8 *%dest, i8 0, i64 15, i32 1, i1 false) + call void @llvm.memset.p0i8.i64(i8* %dest, i8 0, i64 15, i1 false) ret void } ; 16 bytes, i32 version. -define void @f33(i8 *%dest) { +define void @f33(i8* %dest) { ; CHECK-LABEL: f33: ; CHECK-DAG: mvghi 0(%r2), 0 ; CHECK-DAG: mvghi 8(%r2), 0 ; CHECK: br %r14 - call void @llvm.memset.p0i8.i32(i8 *%dest, i8 0, i32 16, i32 1, i1 false) + call void @llvm.memset.p0i8.i32(i8* %dest, i8 0, i32 16, i1 false) ret void } ; 16 bytes, i64 version. -define void @f34(i8 *%dest) { +define void @f34(i8* %dest) { ; CHECK-LABEL: f34: ; CHECK-DAG: mvghi 0(%r2), 0 ; CHECK-DAG: mvghi 8(%r2), 0 ; CHECK: br %r14 - call void @llvm.memset.p0i8.i64(i8 *%dest, i8 0, i64 16, i32 1, i1 false) + call void @llvm.memset.p0i8.i64(i8* %dest, i8 0, i64 16, i1 false) ret void } ; 17 bytes, i32 version. -define void @f35(i8 *%dest) { +define void @f35(i8* %dest) { ; CHECK-LABEL: f35: ; CHECK: xc 0(17,%r2), 0(%r2) ; CHECK: br %r14 - call void @llvm.memset.p0i8.i32(i8 *%dest, i8 0, i32 17, i32 1, i1 false) + call void @llvm.memset.p0i8.i32(i8* %dest, i8 0, i32 17, i1 false) ret void } ; 17 bytes, i64 version. -define void @f36(i8 *%dest) { +define void @f36(i8* %dest) { ; CHECK-LABEL: f36: ; CHECK: xc 0(17,%r2), 0(%r2) ; CHECK: br %r14 - call void @llvm.memset.p0i8.i64(i8 *%dest, i8 0, i64 17, i32 1, i1 false) + call void @llvm.memset.p0i8.i64(i8* %dest, i8 0, i64 17, i1 false) ret void } ; 256 bytes, i32 version. -define void @f37(i8 *%dest) { +define void @f37(i8* %dest) { ; CHECK-LABEL: f37: ; CHECK: xc 0(256,%r2), 0(%r2) ; CHECK: br %r14 - call void @llvm.memset.p0i8.i32(i8 *%dest, i8 0, i32 256, i32 1, i1 false) + call void @llvm.memset.p0i8.i32(i8* %dest, i8 0, i32 256, i1 false) ret void } ; 256 bytes, i64 version. -define void @f38(i8 *%dest) { +define void @f38(i8* %dest) { ; CHECK-LABEL: f38: ; CHECK: xc 0(256,%r2), 0(%r2) ; CHECK: br %r14 - call void @llvm.memset.p0i8.i64(i8 *%dest, i8 0, i64 256, i32 1, i1 false) + call void @llvm.memset.p0i8.i64(i8* %dest, i8 0, i64 256, i1 false) ret void } ; 257 bytes, i32 version. We need two MVCs. -define void @f39(i8 *%dest) { +define void @f39(i8* %dest) { ; CHECK-LABEL: f39: ; CHECK: xc 0(256,%r2), 0(%r2) ; CHECK: xc 256(1,%r2), 256(%r2) ; CHECK: br %r14 - call void @llvm.memset.p0i8.i32(i8 *%dest, i8 0, i32 257, i32 1, i1 false) + call void @llvm.memset.p0i8.i32(i8* %dest, i8 0, i32 257, i1 false) ret void } ; 257 bytes, i64 version. -define void @f40(i8 *%dest) { +define void @f40(i8* %dest) { ; CHECK-LABEL: f40: ; CHECK: xc 0(256,%r2), 0(%r2) ; CHECK: xc 256(1,%r2), 256(%r2) ; CHECK: br %r14 - call void @llvm.memset.p0i8.i64(i8 *%dest, i8 0, i64 257, i32 1, i1 false) + call void @llvm.memset.p0i8.i64(i8* %dest, i8 0, i64 257, i1 false) ret void } diff --git a/llvm/test/CodeGen/SystemZ/memset-04.ll b/llvm/test/CodeGen/SystemZ/memset-04.ll index 7906e8d10a1..dcb8b6bad81 100644 --- a/llvm/test/CodeGen/SystemZ/memset-04.ll +++ b/llvm/test/CodeGen/SystemZ/memset-04.ll @@ -2,397 +2,397 @@ ; ; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s -declare void @llvm.memset.p0i8.i32(i8 *nocapture, i8, i32, i32, i1) nounwind -declare void @llvm.memset.p0i8.i64(i8 *nocapture, i8, i64, i32, i1) nounwind +declare void @llvm.memset.p0i8.i32(i8 *nocapture, i8, i32, i1) nounwind +declare void @llvm.memset.p0i8.i64(i8 *nocapture, i8, i64, i1) nounwind ; No bytes, i32 version. -define void @f1(i8 *%dest) { +define void @f1(i8* %dest) { ; CHECK-LABEL: f1: ; CHECK-NOT: %r2 ; CHECK: br %r14 - call void @llvm.memset.p0i8.i32(i8 *%dest, i8 -1, i32 0, i32 1, i1 false) + call void @llvm.memset.p0i8.i32(i8* %dest, i8 -1, i32 0, i1 false) ret void } ; No bytes, i64 version. -define void @f2(i8 *%dest) { +define void @f2(i8* %dest) { ; CHECK-LABEL: f2: ; CHECK-NOT: %r2 ; CHECK: br %r14 - call void @llvm.memset.p0i8.i64(i8 *%dest, i8 -1, i64 0, i32 1, i1 false) + call void @llvm.memset.p0i8.i64(i8* %dest, i8 -1, i64 0, i1 false) ret void } ; 1 byte, i32 version. -define void @f3(i8 *%dest) { +define void @f3(i8* %dest) { ; CHECK-LABEL: f3: ; CHECK: mvi 0(%r2), 255 ; CHECK: br %r14 - call void @llvm.memset.p0i8.i32(i8 *%dest, i8 -1, i32 1, i32 1, i1 false) + call void @llvm.memset.p0i8.i32(i8* %dest, i8 -1, i32 1, i1 false) ret void } ; 1 byte, i64 version. -define void @f4(i8 *%dest) { +define void @f4(i8* %dest) { ; CHECK-LABEL: f4: ; CHECK: mvi 0(%r2), 255 ; CHECK: br %r14 - call void @llvm.memset.p0i8.i64(i8 *%dest, i8 -1, i64 1, i32 1, i1 false) + call void @llvm.memset.p0i8.i64(i8* %dest, i8 -1, i64 1, i1 false) ret void } ; 2 bytes, i32 version. -define void @f5(i8 *%dest) { +define void @f5(i8* %dest) { ; CHECK-LABEL: f5: ; CHECK: mvhhi 0(%r2), -1 ; CHECK: br %r14 - call void @llvm.memset.p0i8.i32(i8 *%dest, i8 -1, i32 2, i32 1, i1 false) + call void @llvm.memset.p0i8.i32(i8* %dest, i8 -1, i32 2, i1 false) ret void } ; 2 bytes, i64 version. -define void @f6(i8 *%dest) { +define void @f6(i8* %dest) { ; CHECK-LABEL: f6: ; CHECK: mvhhi 0(%r2), -1 ; CHECK: br %r14 - call void @llvm.memset.p0i8.i64(i8 *%dest, i8 -1, i64 2, i32 1, i1 false) + call void @llvm.memset.p0i8.i64(i8* %dest, i8 -1, i64 2, i1 false) ret void } ; 3 bytes, i32 version. -define void @f7(i8 *%dest) { +define void @f7(i8* %dest) { ; CHECK-LABEL: f7: ; CHECK-DAG: mvhhi 0(%r2), -1 ; CHECK-DAG: mvi 2(%r2), 255 ; CHECK: br %r14 - call void @llvm.memset.p0i8.i32(i8 *%dest, i8 -1, i32 3, i32 1, i1 false) + call void @llvm.memset.p0i8.i32(i8* %dest, i8 -1, i32 3, i1 false) ret void } ; 3 bytes, i64 version. -define void @f8(i8 *%dest) { +define void @f8(i8* %dest) { ; CHECK-LABEL: f8: ; CHECK-DAG: mvhhi 0(%r2), -1 ; CHECK-DAG: mvi 2(%r2), 255 ; CHECK: br %r14 - call void @llvm.memset.p0i8.i64(i8 *%dest, i8 -1, i64 3, i32 1, i1 false) + call void @llvm.memset.p0i8.i64(i8* %dest, i8 -1, i64 3, i1 false) ret void } ; 4 bytes, i32 version. -define void @f9(i8 *%dest) { +define void @f9(i8* %dest) { ; CHECK-LABEL: f9: ; CHECK: mvhi 0(%r2), -1 ; CHECK: br %r14 - call void @llvm.memset.p0i8.i32(i8 *%dest, i8 -1, i32 4, i32 1, i1 false) + call void @llvm.memset.p0i8.i32(i8* %dest, i8 -1, i32 4, i1 false) ret void } ; 4 bytes, i64 version. -define void @f10(i8 *%dest) { +define void @f10(i8* %dest) { ; CHECK-LABEL: f10: ; CHECK: mvhi 0(%r2), -1 ; CHECK: br %r14 - call void @llvm.memset.p0i8.i64(i8 *%dest, i8 -1, i64 4, i32 1, i1 false) + call void @llvm.memset.p0i8.i64(i8* %dest, i8 -1, i64 4, i1 false) ret void } ; 5 bytes, i32 version. -define void @f11(i8 *%dest) { +define void @f11(i8* %dest) { ; CHECK-LABEL: f11: ; CHECK-DAG: mvhi 0(%r2), -1 ; CHECK-DAG: mvi 4(%r2), 255 ; CHECK: br %r14 - call void @llvm.memset.p0i8.i32(i8 *%dest, i8 -1, i32 5, i32 1, i1 false) + call void @llvm.memset.p0i8.i32(i8* %dest, i8 -1, i32 5, i1 false) ret void } ; 5 bytes, i64 version. -define void @f12(i8 *%dest) { +define void @f12(i8* %dest) { ; CHECK-LABEL: f12: ; CHECK-DAG: mvhi 0(%r2), -1 ; CHECK-DAG: mvi 4(%r2), 255 ; CHECK: br %r14 - call void @llvm.memset.p0i8.i64(i8 *%dest, i8 -1, i64 5, i32 1, i1 false) + call void @llvm.memset.p0i8.i64(i8* %dest, i8 -1, i64 5, i1 false) ret void } ; 6 bytes, i32 version. -define void @f13(i8 *%dest) { +define void @f13(i8* %dest) { ; CHECK-LABEL: f13: ; CHECK-DAG: mvhi 0(%r2), -1 ; CHECK-DAG: mvhhi 4(%r2), -1 ; CHECK: br %r14 - call void @llvm.memset.p0i8.i32(i8 *%dest, i8 -1, i32 6, i32 1, i1 false) + call void @llvm.memset.p0i8.i32(i8* %dest, i8 -1, i32 6, i1 false) ret void } ; 6 bytes, i64 version. -define void @f14(i8 *%dest) { +define void @f14(i8* %dest) { ; CHECK-LABEL: f14: ; CHECK-DAG: mvhi 0(%r2), -1 ; CHECK-DAG: mvhhi 4(%r2), -1 ; CHECK: br %r14 - call void @llvm.memset.p0i8.i64(i8 *%dest, i8 -1, i64 6, i32 1, i1 false) + call void @llvm.memset.p0i8.i64(i8* %dest, i8 -1, i64 6, i1 false) ret void } ; 7 bytes, i32 version. -define void @f15(i8 *%dest) { +define void @f15(i8* %dest) { ; CHECK-LABEL: f15: ; CHECK: mvi 0(%r2), 255 ; CHECK: mvc 1(6,%r2), 0(%r2) ; CHECK: br %r14 - call void @llvm.memset.p0i8.i32(i8 *%dest, i8 -1, i32 7, i32 1, i1 false) + call void @llvm.memset.p0i8.i32(i8* %dest, i8 -1, i32 7, i1 false) ret void } ; 7 bytes, i64 version. -define void @f16(i8 *%dest) { +define void @f16(i8* %dest) { ; CHECK-LABEL: f16: ; CHECK: mvi 0(%r2), 255 ; CHECK: mvc 1(6,%r2), 0(%r2) ; CHECK: br %r14 - call void @llvm.memset.p0i8.i64(i8 *%dest, i8 -1, i64 7, i32 1, i1 false) + call void @llvm.memset.p0i8.i64(i8* %dest, i8 -1, i64 7, i1 false) ret void } ; 8 bytes, i32 version. -define void @f17(i8 *%dest) { +define void @f17(i8* %dest) { ; CHECK-LABEL: f17: ; CHECK: mvghi 0(%r2), -1 ; CHECK: br %r14 - call void @llvm.memset.p0i8.i32(i8 *%dest, i8 -1, i32 8, i32 1, i1 false) + call void @llvm.memset.p0i8.i32(i8* %dest, i8 -1, i32 8, i1 false) ret void } ; 8 bytes, i64 version. -define void @f18(i8 *%dest) { +define void @f18(i8* %dest) { ; CHECK-LABEL: f18: ; CHECK: mvghi 0(%r2), -1 ; CHECK: br %r14 - call void @llvm.memset.p0i8.i64(i8 *%dest, i8 -1, i64 8, i32 1, i1 false) + call void @llvm.memset.p0i8.i64(i8* %dest, i8 -1, i64 8, i1 false) ret void } ; 9 bytes, i32 version. -define void @f19(i8 *%dest) { +define void @f19(i8* %dest) { ; CHECK-LABEL: f19: ; CHECK-DAG: mvghi 0(%r2), -1 ; CHECK-DAG: mvi 8(%r2), 255 ; CHECK: br %r14 - call void @llvm.memset.p0i8.i32(i8 *%dest, i8 -1, i32 9, i32 1, i1 false) + call void @llvm.memset.p0i8.i32(i8* %dest, i8 -1, i32 9, i1 false) ret void } ; 9 bytes, i64 version. -define void @f20(i8 *%dest) { +define void @f20(i8* %dest) { ; CHECK-LABEL: f20: ; CHECK-DAG: mvghi 0(%r2), -1 ; CHECK-DAG: mvi 8(%r2), 255 ; CHECK: br %r14 - call void @llvm.memset.p0i8.i64(i8 *%dest, i8 -1, i64 9, i32 1, i1 false) + call void @llvm.memset.p0i8.i64(i8* %dest, i8 -1, i64 9, i1 false) ret void } ; 10 bytes, i32 version. -define void @f21(i8 *%dest) { +define void @f21(i8* %dest) { ; CHECK-LABEL: f21: ; CHECK-DAG: mvghi 0(%r2), -1 ; CHECK-DAG: mvhhi 8(%r2), -1 ; CHECK: br %r14 - call void @llvm.memset.p0i8.i32(i8 *%dest, i8 -1, i32 10, i32 1, i1 false) + call void @llvm.memset.p0i8.i32(i8* %dest, i8 -1, i32 10, i1 false) ret void } ; 10 bytes, i64 version. -define void @f22(i8 *%dest) { +define void @f22(i8* %dest) { ; CHECK-LABEL: f22: ; CHECK-DAG: mvghi 0(%r2), -1 ; CHECK-DAG: mvhhi 8(%r2), -1 ; CHECK: br %r14 - call void @llvm.memset.p0i8.i64(i8 *%dest, i8 -1, i64 10, i32 1, i1 false) + call void @llvm.memset.p0i8.i64(i8* %dest, i8 -1, i64 10, i1 false) ret void } ; 11 bytes, i32 version. -define void @f23(i8 *%dest) { +define void @f23(i8* %dest) { ; CHECK-LABEL: f23: ; CHECK: mvi 0(%r2), 255 ; CHECK: mvc 1(10,%r2), 0(%r2) ; CHECK: br %r14 - call void @llvm.memset.p0i8.i32(i8 *%dest, i8 -1, i32 11, i32 1, i1 false) + call void @llvm.memset.p0i8.i32(i8* %dest, i8 -1, i32 11, i1 false) ret void } ; 11 bytes, i64 version. -define void @f24(i8 *%dest) { +define void @f24(i8* %dest) { ; CHECK-LABEL: f24: ; CHECK: mvi 0(%r2), 255 ; CHECK: mvc 1(10,%r2), 0(%r2) ; CHECK: br %r14 - call void @llvm.memset.p0i8.i64(i8 *%dest, i8 -1, i64 11, i32 1, i1 false) + call void @llvm.memset.p0i8.i64(i8* %dest, i8 -1, i64 11, i1 false) ret void } ; 12 bytes, i32 version. -define void @f25(i8 *%dest) { +define void @f25(i8* %dest) { ; CHECK-LABEL: f25: ; CHECK-DAG: mvghi 0(%r2), -1 ; CHECK-DAG: mvhi 8(%r2), -1 ; CHECK: br %r14 - call void @llvm.memset.p0i8.i32(i8 *%dest, i8 -1, i32 12, i32 1, i1 false) + call void @llvm.memset.p0i8.i32(i8* %dest, i8 -1, i32 12, i1 false) ret void } ; 12 bytes, i64 version. -define void @f26(i8 *%dest) { +define void @f26(i8* %dest) { ; CHECK-LABEL: f26: ; CHECK-DAG: mvghi 0(%r2), -1 ; CHECK-DAG: mvhi 8(%r2), -1 ; CHECK: br %r14 - call void @llvm.memset.p0i8.i64(i8 *%dest, i8 -1, i64 12, i32 1, i1 false) + call void @llvm.memset.p0i8.i64(i8* %dest, i8 -1, i64 12, i1 false) ret void } ; 13 bytes, i32 version. -define void @f27(i8 *%dest) { +define void @f27(i8* %dest) { ; CHECK-LABEL: f27: ; CHECK: mvi 0(%r2), 255 ; CHECK: mvc 1(12,%r2), 0(%r2) ; CHECK: br %r14 - call void @llvm.memset.p0i8.i32(i8 *%dest, i8 -1, i32 13, i32 1, i1 false) + call void @llvm.memset.p0i8.i32(i8* %dest, i8 -1, i32 13, i1 false) ret void } ; 13 bytes, i64 version. -define void @f28(i8 *%dest) { +define void @f28(i8* %dest) { ; CHECK-LABEL: f28: ; CHECK: mvi 0(%r2), 255 ; CHECK: mvc 1(12,%r2), 0(%r2) ; CHECK: br %r14 - call void @llvm.memset.p0i8.i64(i8 *%dest, i8 -1, i64 13, i32 1, i1 false) + call void @llvm.memset.p0i8.i64(i8* %dest, i8 -1, i64 13, i1 false) ret void } ; 14 bytes, i32 version. -define void @f29(i8 *%dest) { +define void @f29(i8* %dest) { ; CHECK-LABEL: f29: ; CHECK: mvi 0(%r2), 255 ; CHECK: mvc 1(13,%r2), 0(%r2) ; CHECK: br %r14 - call void @llvm.memset.p0i8.i32(i8 *%dest, i8 -1, i32 14, i32 1, i1 false) + call void @llvm.memset.p0i8.i32(i8* %dest, i8 -1, i32 14, i1 false) ret void } ; 14 bytes, i64 version. -define void @f30(i8 *%dest) { +define void @f30(i8* %dest) { ; CHECK-LABEL: f30: ; CHECK: mvi 0(%r2), 255 ; CHECK: mvc 1(13,%r2), 0(%r2) ; CHECK: br %r14 - call void @llvm.memset.p0i8.i64(i8 *%dest, i8 -1, i64 14, i32 1, i1 false) + call void @llvm.memset.p0i8.i64(i8* %dest, i8 -1, i64 14, i1 false) ret void } ; 15 bytes, i32 version. -define void @f31(i8 *%dest) { +define void @f31(i8* %dest) { ; CHECK-LABEL: f31: ; CHECK: mvi 0(%r2), 255 ; CHECK: mvc 1(14,%r2), 0(%r2) ; CHECK: br %r14 - call void @llvm.memset.p0i8.i32(i8 *%dest, i8 -1, i32 15, i32 1, i1 false) + call void @llvm.memset.p0i8.i32(i8* %dest, i8 -1, i32 15, i1 false) ret void } ; 15 bytes, i64 version. -define void @f32(i8 *%dest) { +define void @f32(i8* %dest) { ; CHECK-LABEL: f32: ; CHECK: mvi 0(%r2), 255 ; CHECK: mvc 1(14,%r2), 0(%r2) ; CHECK: br %r14 - call void @llvm.memset.p0i8.i64(i8 *%dest, i8 -1, i64 15, i32 1, i1 false) + call void @llvm.memset.p0i8.i64(i8* %dest, i8 -1, i64 15, i1 false) ret void } ; 16 bytes, i32 version. -define void @f33(i8 *%dest) { +define void @f33(i8* %dest) { ; CHECK-LABEL: f33: ; CHECK-DAG: mvghi 0(%r2), -1 ; CHECK-DAG: mvghi 8(%r2), -1 ; CHECK: br %r14 - call void @llvm.memset.p0i8.i32(i8 *%dest, i8 -1, i32 16, i32 1, i1 false) + call void @llvm.memset.p0i8.i32(i8* %dest, i8 -1, i32 16, i1 false) ret void } ; 16 bytes, i64 version. -define void @f34(i8 *%dest) { +define void @f34(i8* %dest) { ; CHECK-LABEL: f34: ; CHECK-DAG: mvghi 0(%r2), -1 ; CHECK-DAG: mvghi 8(%r2), -1 ; CHECK: br %r14 - call void @llvm.memset.p0i8.i64(i8 *%dest, i8 -1, i64 16, i32 1, i1 false) + call void @llvm.memset.p0i8.i64(i8* %dest, i8 -1, i64 16, i1 false) ret void } ; 17 bytes, i32 version. -define void @f35(i8 *%dest) { +define void @f35(i8* %dest) { ; CHECK-LABEL: f35: ; CHECK: mvi 0(%r2), 255 ; CHECK: mvc 1(16,%r2), 0(%r2) ; CHECK: br %r14 - call void @llvm.memset.p0i8.i32(i8 *%dest, i8 -1, i32 17, i32 1, i1 false) + call void @llvm.memset.p0i8.i32(i8* %dest, i8 -1, i32 17, i1 false) ret void } ; 17 bytes, i64 version. -define void @f36(i8 *%dest) { +define void @f36(i8* %dest) { ; CHECK-LABEL: f36: ; CHECK: mvi 0(%r2), 255 ; CHECK: mvc 1(16,%r2), 0(%r2) ; CHECK: br %r14 - call void @llvm.memset.p0i8.i64(i8 *%dest, i8 -1, i64 17, i32 1, i1 false) + call void @llvm.memset.p0i8.i64(i8* %dest, i8 -1, i64 17, i1 false) ret void } ; 257 bytes, i32 version. -define void @f37(i8 *%dest) { +define void @f37(i8* %dest) { ; CHECK-LABEL: f37: ; CHECK: mvi 0(%r2), 255 ; CHECK: mvc 1(256,%r2), 0(%r2) ; CHECK: br %r14 - call void @llvm.memset.p0i8.i32(i8 *%dest, i8 -1, i32 257, i32 1, i1 false) + call void @llvm.memset.p0i8.i32(i8* %dest, i8 -1, i32 257, i1 false) ret void } ; 257 bytes, i64 version. -define void @f38(i8 *%dest) { +define void @f38(i8* %dest) { ; CHECK-LABEL: f38: ; CHECK: mvi 0(%r2), 255 ; CHECK: mvc 1(256,%r2), 0(%r2) ; CHECK: br %r14 - call void @llvm.memset.p0i8.i64(i8 *%dest, i8 -1, i64 257, i32 1, i1 false) + call void @llvm.memset.p0i8.i64(i8* %dest, i8 -1, i64 257, i1 false) ret void } ; 258 bytes, i32 version. We need two MVCs. -define void @f39(i8 *%dest) { +define void @f39(i8* %dest) { ; CHECK-LABEL: f39: ; CHECK: mvi 0(%r2), 255 ; CHECK: mvc 1(256,%r2), 0(%r2) ; CHECK: mvc 257(1,%r2), 256(%r2) ; CHECK: br %r14 - call void @llvm.memset.p0i8.i32(i8 *%dest, i8 -1, i32 258, i32 1, i1 false) + call void @llvm.memset.p0i8.i32(i8* %dest, i8 -1, i32 258, i1 false) ret void } ; 258 bytes, i64 version. -define void @f40(i8 *%dest) { +define void @f40(i8* %dest) { ; CHECK-LABEL: f40: ; CHECK: mvi 0(%r2), 255 ; CHECK: mvc 1(256,%r2), 0(%r2) ; CHECK: mvc 257(1,%r2), 256(%r2) ; CHECK: br %r14 - call void @llvm.memset.p0i8.i64(i8 *%dest, i8 -1, i64 258, i32 1, i1 false) + call void @llvm.memset.p0i8.i64(i8* %dest, i8 -1, i64 258, i1 false) ret void } diff --git a/llvm/test/CodeGen/SystemZ/tail-call-mem-intrinsics.ll b/llvm/test/CodeGen/SystemZ/tail-call-mem-intrinsics.ll index 65cc394f8a9..0290d425e57 100644 --- a/llvm/test/CodeGen/SystemZ/tail-call-mem-intrinsics.ll +++ b/llvm/test/CodeGen/SystemZ/tail-call-mem-intrinsics.ll @@ -4,7 +4,7 @@ ; CHECK: jg memcpy define void @tail_memcpy(i8* nocapture %p, i8* nocapture readonly %q, i32 %n) #0 { entry: - tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %p, i8* %q, i32 %n, i32 1, i1 false) + tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %p, i8* %q, i32 %n, i1 false) ret void } @@ -12,7 +12,7 @@ entry: ; CHECK: jg memmove define void @tail_memmove(i8* nocapture %p, i8* nocapture readonly %q, i32 %n) #0 { entry: - tail call void @llvm.memmove.p0i8.p0i8.i32(i8* %p, i8* %q, i32 %n, i32 1, i1 false) + tail call void @llvm.memmove.p0i8.p0i8.i32(i8* %p, i8* %q, i32 %n, i1 false) ret void } @@ -20,12 +20,12 @@ entry: ; CHECK: jg memset define void @tail_memset(i8* nocapture %p, i8 %c, i32 %n) #0 { entry: - tail call void @llvm.memset.p0i8.i32(i8* %p, i8 %c, i32 %n, i32 1, i1 false) + tail call void @llvm.memset.p0i8.i32(i8* %p, i8 %c, i32 %n, i1 false) ret void } -declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i32, i1) #0 -declare void @llvm.memmove.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i32, i1) #0 -declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i32, i1) #0 +declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i1) #0 +declare void @llvm.memmove.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i1) #0 +declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i1) #0 attributes #0 = { nounwind } diff --git a/llvm/test/CodeGen/Thumb/2011-05-11-DAGLegalizer.ll b/llvm/test/CodeGen/Thumb/2011-05-11-DAGLegalizer.ll index d8e165145bd..1ba08591668 100644 --- a/llvm/test/CodeGen/Thumb/2011-05-11-DAGLegalizer.ll +++ b/llvm/test/CodeGen/Thumb/2011-05-11-DAGLegalizer.ll @@ -29,25 +29,25 @@ do.body: ; preds = %entry %arrayidx = getelementptr inbounds [4 x %struct.RRRRRRRR], [4 x %struct.RRRRRRRR]* %eph, i32 0, i32 0 %tmp2 = bitcast %struct.RRRRRRRR* %agg.tmp to i8* %tmp3 = bitcast %struct.RRRRRRRR* %arrayidx to i8* - call void @llvm.memcpy.p0i8.p0i8.i32(i8* %tmp2, i8* %tmp3, i32 312, i32 4, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %tmp2, i8* align 4 %tmp3, i32 312, i1 false) %tmp5 = load %struct.MMMMMMMMMMMM*, %struct.MMMMMMMMMMMM** %aidData.addr %eph6 = getelementptr inbounds %struct.MMMMMMMMMMMM, %struct.MMMMMMMMMMMM* %tmp5, i32 0, i32 0 %arrayidx7 = getelementptr inbounds [4 x %struct.RRRRRRRR], [4 x %struct.RRRRRRRR]* %eph6, i32 0, i32 1 %tmp8 = bitcast %struct.RRRRRRRR* %agg.tmp4 to i8* %tmp9 = bitcast %struct.RRRRRRRR* %arrayidx7 to i8* - call void @llvm.memcpy.p0i8.p0i8.i32(i8* %tmp8, i8* %tmp9, i32 312, i32 4, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %tmp8, i8* align 4 %tmp9, i32 312, i1 false) %tmp11 = load %struct.MMMMMMMMMMMM*, %struct.MMMMMMMMMMMM** %aidData.addr %eph12 = getelementptr inbounds %struct.MMMMMMMMMMMM, %struct.MMMMMMMMMMMM* %tmp11, i32 0, i32 0 %arrayidx13 = getelementptr inbounds [4 x %struct.RRRRRRRR], [4 x %struct.RRRRRRRR]* %eph12, i32 0, i32 2 %tmp14 = bitcast %struct.RRRRRRRR* %agg.tmp10 to i8* %tmp15 = bitcast %struct.RRRRRRRR* %arrayidx13 to i8* - call void @llvm.memcpy.p0i8.p0i8.i32(i8* %tmp14, i8* %tmp15, i32 312, i32 4, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %tmp14, i8* align 4 %tmp15, i32 312, i1 false) %tmp17 = load %struct.MMMMMMMMMMMM*, %struct.MMMMMMMMMMMM** %aidData.addr %eph18 = getelementptr inbounds %struct.MMMMMMMMMMMM, %struct.MMMMMMMMMMMM* %tmp17, i32 0, i32 0 %arrayidx19 = getelementptr inbounds [4 x %struct.RRRRRRRR], [4 x %struct.RRRRRRRR]* %eph18, i32 0, i32 3 %tmp20 = bitcast %struct.RRRRRRRR* %agg.tmp16 to i8* %tmp21 = bitcast %struct.RRRRRRRR* %arrayidx19 to i8* - call void @llvm.memcpy.p0i8.p0i8.i32(i8* %tmp20, i8* %tmp21, i32 312, i32 4, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %tmp20, i8* align 4 %tmp21, i32 312, i1 false) call void (i8*, i32, i8*, i8*, ...) @CLLoggingLog(i8* %tmp, i32 2, i8* getelementptr inbounds ([62 x i8], [62 x i8]* @__PRETTY_FUNCTION__._ZN12CLGll, i32 0, i32 0), i8* getelementptr inbounds ([75 x i8], [75 x i8]* @.str, i32 0, i32 0), %struct.RRRRRRRR* byval %agg.tmp, %struct.RRRRRRRR* byval %agg.tmp4, %struct.RRRRRRRR* byval %agg.tmp10, %struct.RRRRRRRR* byval %agg.tmp16) br label %do.end @@ -57,4 +57,4 @@ do.end: ; preds = %do.body declare void @CLLoggingLog(i8*, i32, i8*, i8*, ...) -declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i32, i1) nounwind +declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i1) nounwind diff --git a/llvm/test/CodeGen/Thumb/dyn-stackalloc.ll b/llvm/test/CodeGen/Thumb/dyn-stackalloc.ll index c94c904e4cd..c6b5c7b3513 100644 --- a/llvm/test/CodeGen/Thumb/dyn-stackalloc.ll +++ b/llvm/test/CodeGen/Thumb/dyn-stackalloc.ll @@ -61,7 +61,7 @@ define void @t2(%struct.comment* %vc, i8* %tag, i8* %contents) { %tmp9 = call i8* @strcpy( i8* %tmp6, i8* %tag ) %tmp6.len = call i32 @strlen( i8* %tmp6 ) %tmp6.indexed = getelementptr i8, i8* %tmp6, i32 %tmp6.len - call void @llvm.memcpy.p0i8.p0i8.i32(i8* %tmp6.indexed, i8* getelementptr inbounds ([2 x i8], [2 x i8]* @str215, i32 0, i32 0), i32 2, i32 1, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 %tmp6.indexed, i8* align 1 getelementptr inbounds ([2 x i8], [2 x i8]* @str215, i32 0, i32 0), i32 2, i1 false) %tmp15 = call i8* @strcat( i8* %tmp6, i8* %contents ) call fastcc void @comment_add( %struct.comment* %vc, i8* %tmp6 ) ret void @@ -73,6 +73,6 @@ declare i8* @strcat(i8*, i8*) declare fastcc void @comment_add(%struct.comment*, i8*) -declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i32, i1) nounwind +declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i1) nounwind declare i8* @strcpy(i8*, i8*) diff --git a/llvm/test/CodeGen/Thumb/ldm-stm-base-materialization-thumb2.ll b/llvm/test/CodeGen/Thumb/ldm-stm-base-materialization-thumb2.ll index 7901a158a95..17073941878 100644 --- a/llvm/test/CodeGen/Thumb/ldm-stm-base-materialization-thumb2.ll +++ b/llvm/test/CodeGen/Thumb/ldm-stm-base-materialization-thumb2.ll @@ -22,7 +22,7 @@ entry: %2 = load i32*, i32** @b, align 4 %arrayidx1 = getelementptr inbounds i32, i32* %2, i32 1 %3 = bitcast i32* %arrayidx1 to i8* - tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %1, i8* %3, i32 24, i32 4, i1 false) + tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %1, i8* align 4 %3, i32 24, i1 false) ret void } @@ -43,7 +43,7 @@ entry: %2 = load i32*, i32** @b, align 4 %arrayidx1 = getelementptr inbounds i32, i32* %2, i32 1 %3 = bitcast i32* %arrayidx1 to i8* - tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %1, i8* %3, i32 28, i32 4, i1 false) + tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %1, i8* align 4 %3, i32 28, i1 false) ret void } @@ -64,7 +64,7 @@ entry: %2 = load i32*, i32** @b, align 4 %arrayidx1 = getelementptr inbounds i32, i32* %2, i32 1 %3 = bitcast i32* %arrayidx1 to i8* - tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %1, i8* %3, i32 32, i32 4, i1 false) + tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %1, i8* align 4 %3, i32 32, i1 false) ret void } @@ -85,9 +85,9 @@ entry: %2 = load i32*, i32** @b, align 4 %arrayidx1 = getelementptr inbounds i32, i32* %2, i32 1 %3 = bitcast i32* %arrayidx1 to i8* - tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %1, i8* %3, i32 36, i32 4, i1 false) + tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %1, i8* align 4 %3, i32 36, i1 false) ret void } ; Function Attrs: nounwind -declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i32, i1) #1 +declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i1) #1 diff --git a/llvm/test/CodeGen/Thumb/ldm-stm-base-materialization.ll b/llvm/test/CodeGen/Thumb/ldm-stm-base-materialization.ll index 0be796eb8f8..355fe804ebc 100644 --- a/llvm/test/CodeGen/Thumb/ldm-stm-base-materialization.ll +++ b/llvm/test/CodeGen/Thumb/ldm-stm-base-materialization.ll @@ -23,7 +23,7 @@ entry: %2 = load i32*, i32** @b, align 4 %arrayidx1 = getelementptr inbounds i32, i32* %2, i32 1 %3 = bitcast i32* %arrayidx1 to i8* - tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %1, i8* %3, i32 24, i32 4, i1 false) + tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %1, i8* align 4 %3, i32 24, i1 false) ret void } @@ -44,7 +44,7 @@ entry: %2 = load i32*, i32** @b, align 4 %arrayidx1 = getelementptr inbounds i32, i32* %2, i32 1 %3 = bitcast i32* %arrayidx1 to i8* - tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %1, i8* %3, i32 28, i32 4, i1 false) + tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %1, i8* align 4 %3, i32 28, i1 false) ret void } @@ -65,7 +65,7 @@ entry: %2 = load i32*, i32** @b, align 4 %arrayidx1 = getelementptr inbounds i32, i32* %2, i32 1 %3 = bitcast i32* %arrayidx1 to i8* - tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %1, i8* %3, i32 32, i32 4, i1 false) + tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %1, i8* align 4 %3, i32 32, i1 false) ret void } @@ -88,9 +88,9 @@ entry: %2 = load i32*, i32** @b, align 4 %arrayidx1 = getelementptr inbounds i32, i32* %2, i32 1 %3 = bitcast i32* %arrayidx1 to i8* - tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %1, i8* %3, i32 36, i32 4, i1 false) + tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %1, i8* align 4 %3, i32 36, i1 false) ret void } ; Function Attrs: nounwind -declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i32, i1) #1 +declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i1) #1 diff --git a/llvm/test/CodeGen/Thumb/stack-coloring-without-frame-ptr.ll b/llvm/test/CodeGen/Thumb/stack-coloring-without-frame-ptr.ll index 6678f68c4e8..60a83bb0d66 100644 --- a/llvm/test/CodeGen/Thumb/stack-coloring-without-frame-ptr.ll +++ b/llvm/test/CodeGen/Thumb/stack-coloring-without-frame-ptr.ll @@ -13,7 +13,7 @@ entry: %0 = bitcast %deque* %var3 to i8* %1 = bitcast %iterator* %var1 to i8* call void @llvm.lifetime.start.p0i8(i64 16, i8* %1) nounwind - call void @llvm.memcpy.p0i8.p0i8.i32(i8* %1, i8* %0, i32 16, i32 4, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %1, i8* align 4 %0, i32 16, i1 false) call void @llvm.lifetime.end.p0i8(i64 16, i8* %1) nounwind %2 = bitcast %insert_iterator* %var2 to i8* @@ -22,7 +22,7 @@ entry: ret i32 0 } -declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i32, i1) nounwind +declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i1) nounwind declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) nounwind diff --git a/llvm/test/CodeGen/Thumb2/2009-08-04-SubregLoweringBug.ll b/llvm/test/CodeGen/Thumb2/2009-08-04-SubregLoweringBug.ll index 779e100d419..cf88de6c7cd 100644 --- a/llvm/test/CodeGen/Thumb2/2009-08-04-SubregLoweringBug.ll +++ b/llvm/test/CodeGen/Thumb2/2009-08-04-SubregLoweringBug.ll @@ -12,7 +12,7 @@ entry: br i1 undef, label %bb, label %bb6.preheader bb6.preheader: ; preds = %entry - call void @llvm.memcpy.p0i8.p0i8.i32(i8* undef, i8* undef, i32 12, i32 4, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 undef, i8* align 4 undef, i32 12, i1 false) br i1 undef, label %bb15, label %bb13 bb: ; preds = %entry @@ -30,4 +30,4 @@ bb15: ; preds = %bb13, %bb6.preheader ret void } -declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i32, i1) nounwind +declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i1) nounwind diff --git a/llvm/test/CodeGen/Thumb2/2012-01-13-CBNZBug.ll b/llvm/test/CodeGen/Thumb2/2012-01-13-CBNZBug.ll index 9121044be4f..e5be8df0863 100644 --- a/llvm/test/CodeGen/Thumb2/2012-01-13-CBNZBug.ll +++ b/llvm/test/CodeGen/Thumb2/2012-01-13-CBNZBug.ll @@ -9,7 +9,7 @@ @lookup_list = external hidden unnamed_addr global %struct.Dict_node_struct*, align 4 -declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i32, i1) nounwind +declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i1) nounwind define hidden fastcc void @rdictionary_lookup(%struct.Dict_node_struct* %dn, i8* nocapture %s) nounwind ssp { ; CHECK-LABEL: rdictionary_lookup: @@ -78,7 +78,7 @@ if.then5: ; preds = %if.end3 %call6 = tail call fastcc i8* @xalloc(i32 20) %5 = bitcast i8* %call6 to %struct.Dict_node_struct* %6 = bitcast %struct.Dict_node_struct* %dn.tr to i8* - tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %call6, i8* %6, i32 16, i32 4, i1 false) + tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %call6, i8* align 4 %6, i32 16, i1 false) %7 = load %struct.Dict_node_struct*, %struct.Dict_node_struct** @lookup_list, align 4 %right7 = getelementptr inbounds i8, i8* %call6, i32 16 %8 = bitcast i8* %right7 to %struct.Dict_node_struct** diff --git a/llvm/test/CodeGen/WebAssembly/global.ll b/llvm/test/CodeGen/WebAssembly/global.ll index bb942bee560..e42ddf449c5 100644 --- a/llvm/test/CodeGen/WebAssembly/global.ll +++ b/llvm/test/CodeGen/WebAssembly/global.ll @@ -23,9 +23,9 @@ define i32 @foo() { ; CHECK-NEXT: .result i32{{$}} ; CHECK-NEXT: i32.call $push0=, memcpy@FUNCTION, $0, $1, $2{{$}} ; CHECK-NEXT: return $pop0{{$}} -declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i32, i1) +declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i1) define i8* @call_memcpy(i8* %p, i8* nocapture readonly %q, i32 %n) { - tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %p, i8* %q, i32 %n, i32 1, i1 false) + tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %p, i8* %q, i32 %n, i1 false) ret i8* %p } diff --git a/llvm/test/CodeGen/WebAssembly/mem-intrinsics.ll b/llvm/test/CodeGen/WebAssembly/mem-intrinsics.ll index 32a7117a1ea..1e28ef3d76a 100644 --- a/llvm/test/CodeGen/WebAssembly/mem-intrinsics.ll +++ b/llvm/test/CodeGen/WebAssembly/mem-intrinsics.ll @@ -5,9 +5,9 @@ target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128" target triple = "wasm32-unknown-unknown-wasm" -declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i32, i1) -declare void @llvm.memmove.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i32, i1) -declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i32, i1) +declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i1) +declare void @llvm.memmove.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i1) +declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i1) ; Test that return values are optimized. @@ -15,7 +15,7 @@ declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i32, i1) ; CHECK: i32.call $push0=, memcpy@FUNCTION, $0, $1, $2{{$}} ; CHECK-NEXT: return $pop0{{$}} define i8* @copy_yes(i8* %dst, i8* %src, i32 %len) { - call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dst, i8* %src, i32 %len, i32 1, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dst, i8* %src, i32 %len, i1 false) ret i8* %dst } @@ -23,7 +23,7 @@ define i8* @copy_yes(i8* %dst, i8* %src, i32 %len) { ; CHECK: i32.call $drop=, memcpy@FUNCTION, $0, $1, $2{{$}} ; CHECK-NEXT: return{{$}} define void @copy_no(i8* %dst, i8* %src, i32 %len) { - call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dst, i8* %src, i32 %len, i32 1, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dst, i8* %src, i32 %len, i1 false) ret void } @@ -31,7 +31,7 @@ define void @copy_no(i8* %dst, i8* %src, i32 %len) { ; CHECK: i32.call $push0=, memmove@FUNCTION, $0, $1, $2{{$}} ; CHECK-NEXT: return $pop0{{$}} define i8* @move_yes(i8* %dst, i8* %src, i32 %len) { - call void @llvm.memmove.p0i8.p0i8.i32(i8* %dst, i8* %src, i32 %len, i32 1, i1 false) + call void @llvm.memmove.p0i8.p0i8.i32(i8* %dst, i8* %src, i32 %len, i1 false) ret i8* %dst } @@ -39,7 +39,7 @@ define i8* @move_yes(i8* %dst, i8* %src, i32 %len) { ; CHECK: i32.call $drop=, memmove@FUNCTION, $0, $1, $2{{$}} ; CHECK-NEXT: return{{$}} define void @move_no(i8* %dst, i8* %src, i32 %len) { - call void @llvm.memmove.p0i8.p0i8.i32(i8* %dst, i8* %src, i32 %len, i32 1, i1 false) + call void @llvm.memmove.p0i8.p0i8.i32(i8* %dst, i8* %src, i32 %len, i1 false) ret void } @@ -47,7 +47,7 @@ define void @move_no(i8* %dst, i8* %src, i32 %len) { ; CHECK: i32.call $push0=, memset@FUNCTION, $0, $1, $2{{$}} ; CHECK-NEXT: return $pop0{{$}} define i8* @set_yes(i8* %dst, i8 %src, i32 %len) { - call void @llvm.memset.p0i8.i32(i8* %dst, i8 %src, i32 %len, i32 1, i1 false) + call void @llvm.memset.p0i8.i32(i8* %dst, i8 %src, i32 %len, i1 false) ret i8* %dst } @@ -55,7 +55,7 @@ define i8* @set_yes(i8* %dst, i8 %src, i32 %len) { ; CHECK: i32.call $drop=, memset@FUNCTION, $0, $1, $2{{$}} ; CHECK-NEXT: return{{$}} define void @set_no(i8* %dst, i8 %src, i32 %len) { - call void @llvm.memset.p0i8.i32(i8* %dst, i8 %src, i32 %len, i32 1, i1 false) + call void @llvm.memset.p0i8.i32(i8* %dst, i8 %src, i32 %len, i1 false) ret void } @@ -70,8 +70,8 @@ entry: %b = alloca [2048 x i8], align 16 %0 = getelementptr inbounds [2048 x i8], [2048 x i8]* %a, i32 0, i32 0 %1 = getelementptr inbounds [2048 x i8], [2048 x i8]* %b, i32 0, i32 0 - call void @llvm.memset.p0i8.i32(i8* %0, i8 256, i32 1024, i32 16, i1 false) - call void @llvm.memset.p0i8.i32(i8* %1, i8 256, i32 1024, i32 16, i1 false) + call void @llvm.memset.p0i8.i32(i8* align 16 %0, i8 256, i32 1024, i1 false) + call void @llvm.memset.p0i8.i32(i8* align 16 %1, i8 256, i32 1024, i1 false) ret void } @@ -93,7 +93,7 @@ bb5: br i1 %tmp6, label %bb7, label %bb8 bb7: - call void @llvm.memset.p0i8.i32(i8* %arg, i8 %arg1, i32 %arg2, i32 1, i1 false) + call void @llvm.memset.p0i8.i32(i8* %arg, i8 %arg1, i32 %arg2, i1 false) br label %bb11 bb8: @@ -124,7 +124,7 @@ bb5: br i1 %tmp6, label %bb7, label %bb8 bb7: - call void @llvm.memset.p0i8.i32(i8* %arg, i8 %arg1, i32 %arg2, i32 1, i1 false) + call void @llvm.memset.p0i8.i32(i8* %arg, i8 %arg1, i32 %arg2, i1 false) br label %bb11 bb8: diff --git a/llvm/test/CodeGen/X86/2007-10-15-CoalescerCrash.ll b/llvm/test/CodeGen/X86/2007-10-15-CoalescerCrash.ll index 65e5ed76213..5cdf4dec3c5 100644 --- a/llvm/test/CodeGen/X86/2007-10-15-CoalescerCrash.ll +++ b/llvm/test/CodeGen/X86/2007-10-15-CoalescerCrash.ll @@ -224,7 +224,7 @@ declare void @fancy_abort(i8*, i32, i8*) declare i8* @pool_alloc(%struct.alloc_pool_def*) -declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) +declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) declare void @link_block(%struct.basic_block_def*, %struct.basic_block_def*) diff --git a/llvm/test/CodeGen/X86/2009-01-25-NoSSE.ll b/llvm/test/CodeGen/X86/2009-01-25-NoSSE.ll index 58bce75fc73..6d390b71114 100644 --- a/llvm/test/CodeGen/X86/2009-01-25-NoSSE.ll +++ b/llvm/test/CodeGen/X86/2009-01-25-NoSSE.ll @@ -12,11 +12,11 @@ define void @foo() nounwind { entry: %termios = alloca %struct.ktermios, align 8 %termios1 = bitcast %struct.ktermios* %termios to i8* - call void @llvm.memset.p0i8.i64(i8* %termios1, i8 0, i64 44, i32 8, i1 false) + call void @llvm.memset.p0i8.i64(i8* align 8 %termios1, i8 0, i64 44, i1 false) call void @bar(%struct.ktermios* %termios) nounwind ret void } declare void @bar(%struct.ktermios*) -declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) nounwind +declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) nounwind diff --git a/llvm/test/CodeGen/X86/2009-11-16-UnfoldMemOpBug.ll b/llvm/test/CodeGen/X86/2009-11-16-UnfoldMemOpBug.ll index 1cfd108db65..b58ee5be82e 100644 --- a/llvm/test/CodeGen/X86/2009-11-16-UnfoldMemOpBug.ll +++ b/llvm/test/CodeGen/X86/2009-11-16-UnfoldMemOpBug.ll @@ -17,7 +17,7 @@ bb1: ; CHECK: movups %xmm0, 12(%rsp) ; CHECK: movaps %xmm1, (%rsp) %tmp2 = phi i32 [ %tmp3, %bb1 ], [ 0, %entry ] - call void @llvm.memcpy.p0i8.p0i8.i64(i8* %tmp1, i8* getelementptr inbounds ([28 x i8], [28 x i8]* @str, i64 0, i64 0), i64 28, i32 1, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* %tmp1, i8* getelementptr inbounds ([28 x i8], [28 x i8]* @str, i64 0, i64 0), i64 28, i1 false) %tmp3 = add i32 %tmp2, 1 %tmp4 = icmp eq i32 %tmp3, %count br i1 %tmp4, label %bb2, label %bb1 @@ -26,4 +26,4 @@ bb2: ret void } -declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) nounwind +declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) nounwind diff --git a/llvm/test/CodeGen/X86/2010-04-08-CoalescerBug.ll b/llvm/test/CodeGen/X86/2010-04-08-CoalescerBug.ll index c3dfbfc15ec..6c9c743eed7 100644 --- a/llvm/test/CodeGen/X86/2010-04-08-CoalescerBug.ll +++ b/llvm/test/CodeGen/X86/2010-04-08-CoalescerBug.ll @@ -19,8 +19,8 @@ entry: %tmp4 = getelementptr inbounds %struct.FC, %struct.FC* %tmp3, i64 0, i32 1, i64 0 %tmp5 = bitcast [32 x i32]* %BitValueArray to i8* %tmp6 = bitcast i32* %tmp4 to i8* - call void @llvm.memcpy.p0i8.p0i8.i64(i8* %tmp5, i8* %tmp6, i64 128, i32 4, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %tmp5, i8* align 4 %tmp6, i64 128, i1 false) unreachable } -declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) nounwind +declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) nounwind diff --git a/llvm/test/CodeGen/X86/2010-04-21-CoalescerBug.ll b/llvm/test/CodeGen/X86/2010-04-21-CoalescerBug.ll index d5987645cfc..46dedb48ff1 100644 --- a/llvm/test/CodeGen/X86/2010-04-21-CoalescerBug.ll +++ b/llvm/test/CodeGen/X86/2010-04-21-CoalescerBug.ll @@ -8,8 +8,8 @@ define void @t(%struct.CMTimeMapping* noalias nocapture sret %agg.result) nounwind optsize ssp { entry: %agg.result1 = bitcast %struct.CMTimeMapping* %agg.result to i8* ; <i8*> [#uses=1] - tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %agg.result1, i8* null, i64 96, i32 4, i1 false) + tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %agg.result1, i8* align 4 null, i64 96, i1 false) ret void } -declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) nounwind +declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) nounwind diff --git a/llvm/test/CodeGen/X86/2010-06-25-CoalescerSubRegDefDead.ll b/llvm/test/CodeGen/X86/2010-06-25-CoalescerSubRegDefDead.ll index ffb51572a30..dd7c3fa571c 100644 --- a/llvm/test/CodeGen/X86/2010-06-25-CoalescerSubRegDefDead.ll +++ b/llvm/test/CodeGen/X86/2010-06-25-CoalescerSubRegDefDead.ll @@ -26,7 +26,7 @@ bb: ; CHECK: rep;stosl %tmp5 = bitcast i32* %tmp4 to i8* - call void @llvm.memset.p0i8.i64(i8* %tmp5, i8 0, i64 84, i32 4, i1 false) + call void @llvm.memset.p0i8.i64(i8* align 4 %tmp5, i8 0, i64 84, i1 false) %tmp6 = getelementptr inbounds %struct.type, %struct.type* %s, i32 0, i32 62 store i32* null, i32** %tmp6, align 8 br label %bb1 @@ -36,4 +36,4 @@ bb1: ret i32 42 } -declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) nounwind +declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) nounwind diff --git a/llvm/test/CodeGen/X86/2010-09-17-SideEffectsInChain.ll b/llvm/test/CodeGen/X86/2010-09-17-SideEffectsInChain.ll index 9e33d2bf6ac..3a5942513e8 100644 --- a/llvm/test/CodeGen/X86/2010-09-17-SideEffectsInChain.ll +++ b/llvm/test/CodeGen/X86/2010-09-17-SideEffectsInChain.ll @@ -2,7 +2,7 @@ target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64" target triple = "x86_64-apple-darwin10.4" -declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) nounwind +declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) nounwind define fastcc i32 @cli_magic_scandesc(i8* %in) nounwind ssp { entry: @@ -12,7 +12,7 @@ entry: %d = load i8, i8* %b, align 8 %e = load i8, i8* %c, align 8 %f = bitcast [64 x i8]* %a to i8* - call void @llvm.memcpy.p0i8.p0i8.i64(i8* %f, i8* %in, i64 64, i32 8, i1 false) nounwind + call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 %f, i8* align 8 %in, i64 64, i1 false) nounwind store i8 %d, i8* %b, align 8 store i8 %e, i8* %c, align 8 ret i32 0 diff --git a/llvm/test/CodeGen/X86/2012-01-10-UndefExceptionEdge.ll b/llvm/test/CodeGen/X86/2012-01-10-UndefExceptionEdge.ll index 20615afdfa1..97a33893fa0 100644 --- a/llvm/test/CodeGen/X86/2012-01-10-UndefExceptionEdge.ll +++ b/llvm/test/CodeGen/X86/2012-01-10-UndefExceptionEdge.ll @@ -14,7 +14,7 @@ target triple = "i386-apple-macosx10.7" @Exception = external unnamed_addr constant { i8*, i8* } -declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i32, i1) nounwind +declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i1) nounwind define void @f(i32* nocapture %arg, i32* nocapture %arg1, i32* nocapture %arg2, i32* nocapture %arg3, i32 %arg4, i32 %arg5) optsize ssp personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) { bb: @@ -85,7 +85,7 @@ bb41: ; preds = %bb38 to label %bb42 unwind label %bb20 bb42: ; preds = %bb41 - tail call void @llvm.memset.p0i8.i32(i8* %tmp32, i8 0, i32 %tmp9, i32 1, i1 false) nounwind + tail call void @llvm.memset.p0i8.i32(i8* %tmp32, i8 0, i32 %tmp9, i1 false) nounwind br i1 %tmp35, label %bb43, label %bb45 bb43: ; preds = %bb42 @@ -101,7 +101,7 @@ bb45: ; preds = %bb57, %bb42 br i1 %tmp47, label %bb48, label %bb59 bb48: ; preds = %bb45 - tail call void @llvm.memset.p0i8.i32(i8* %tmp32, i8 0, i32 %tmp9, i32 1, i1 false) nounwind + tail call void @llvm.memset.p0i8.i32(i8* %tmp32, i8 0, i32 %tmp9, i1 false) nounwind br i1 %tmp36, label %bb49, label %bb57 bb49: ; preds = %bb49, %bb48 @@ -120,7 +120,7 @@ bb57: ; preds = %bb49, %bb48 bb59: ; preds = %bb45 %tmp60 = ashr i32 %tmp46, 31 - tail call void @llvm.memset.p0i8.i32(i8* null, i8 0, i32 %tmp37, i32 1, i1 false) nounwind + tail call void @llvm.memset.p0i8.i32(i8* null, i8 0, i32 %tmp37, i1 false) nounwind br i1 %tmp36, label %bb61, label %bb67 bb61: ; preds = %bb61, %bb59 diff --git a/llvm/test/CodeGen/X86/alignment-2.ll b/llvm/test/CodeGen/X86/alignment-2.ll index a38a3626702..b191b986232 100644 --- a/llvm/test/CodeGen/X86/alignment-2.ll +++ b/llvm/test/CodeGen/X86/alignment-2.ll @@ -23,8 +23,8 @@ bb: ; CHECK-NOT: movaps {{[0-9]*}}(%{{[a-z]*}}), {{%xmm[0-9]}} %myopt = alloca %struct.printQueryOpt, align 4 %tmp = bitcast %struct.printQueryOpt* %myopt to i8* - call void @llvm.memcpy.p0i8.p0i8.i32(i8* %tmp, i8* bitcast (%struct.printQueryOpt* getelementptr inbounds (%struct._psqlSettings, %struct._psqlSettings* @pset, i32 0, i32 4) to i8*), i32 76, i32 4, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %tmp, i8* align 4 bitcast (%struct.printQueryOpt* getelementptr inbounds (%struct._psqlSettings, %struct._psqlSettings* @pset, i32 0, i32 4) to i8*), i32 76, i1 false) ret i8 0 } -declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i32, i1) nounwind +declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i1) nounwind diff --git a/llvm/test/CodeGen/X86/bug26810.ll b/llvm/test/CodeGen/X86/bug26810.ll index 816bc8224d8..263008131e7 100644 --- a/llvm/test/CodeGen/X86/bug26810.ll +++ b/llvm/test/CodeGen/X86/bug26810.ll @@ -114,7 +114,7 @@ loop.exit: ; preds = %for.body.i define void @init() local_unnamed_addr #1 { entry: - call void @llvm.memset.p0i8.i32(i8* bitcast ([8 x <2 x double>]* @"\01?v@@3PAU__m128d@@A" to i8*), i8 0, i32 128, i32 16, i1 false) + call void @llvm.memset.p0i8.i32(i8* align 16 bitcast ([8 x <2 x double>]* @"\01?v@@3PAU__m128d@@A" to i8*), i8 0, i32 128, i1 false) %call.i = tail call i64 @_time64(i64* null) %conv = trunc i64 %call.i to i32 tail call void @srand(i32 %conv) @@ -284,7 +284,7 @@ declare i32 @fclose(%struct._iobuf* nocapture) local_unnamed_addr #5 declare i64 @_time64(i64*) local_unnamed_addr #4 ; Function Attrs: argmemonly nounwind -declare void @llvm.memset.p0i8.i32(i8* nocapture writeonly, i8, i32, i32, i1) #6 +declare void @llvm.memset.p0i8.i32(i8* nocapture writeonly, i8, i32, i1) #6 attributes #0 = { norecurse "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="pentium4" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" } attributes #1 = { "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="pentium4" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" } diff --git a/llvm/test/CodeGen/X86/darwin-bzero.ll b/llvm/test/CodeGen/X86/darwin-bzero.ll index 3d03ec677a0..60032665255 100644 --- a/llvm/test/CodeGen/X86/darwin-bzero.ll +++ b/llvm/test/CodeGen/X86/darwin-bzero.ll @@ -3,12 +3,12 @@ ; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck -check-prefixes=CHECK,NOBZERO %s ; RUN: llc < %s -mtriple=x86_64-apple-ios10.0-simulator | FileCheck -check-prefixes=CHECK,NOBZERO %s -declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i32, i1) nounwind +declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i1) nounwind ; CHECK-LABEL: foo: ; BZERO: {{calll|callq}} ___bzero ; NOBZERO-NOT: bzero define void @foo(i8* %p, i32 %len) { - call void @llvm.memset.p0i8.i32(i8* %p, i8 0, i32 %len, i32 1, i1 false) + call void @llvm.memset.p0i8.i32(i8* %p, i8 0, i32 %len, i1 false) ret void } diff --git a/llvm/test/CodeGen/X86/fast-isel-call.ll b/llvm/test/CodeGen/X86/fast-isel-call.ll index 3f394514e2c..2f3f4151424 100644 --- a/llvm/test/CodeGen/X86/fast-isel-call.ll +++ b/llvm/test/CodeGen/X86/fast-isel-call.ll @@ -31,10 +31,10 @@ define void @test2(%struct.s* %d) nounwind { ; CHECK: movl %eax, 8(%esp) } -declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i32, i1) nounwind +declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i1) nounwind define void @test3(i8* %a) { - call void @llvm.memset.p0i8.i32(i8* %a, i8 0, i32 100, i32 1, i1 false) + call void @llvm.memset.p0i8.i32(i8* %a, i8 0, i32 100, i1 false) ret void ; CHECK-LABEL: test3: ; CHECK: movl {{.*}}, (%esp) @@ -43,10 +43,10 @@ define void @test3(i8* %a) { ; CHECK: calll {{.*}}memset } -declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i32, i1) nounwind +declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i1) nounwind define void @test4(i8* %a, i8* %b) { - call void @llvm.memcpy.p0i8.p0i8.i32(i8* %a, i8* %b, i32 100, i32 1, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i32(i8* %a, i8* %b, i32 100, i1 false) ret void ; CHECK-LABEL: test4: ; CHECK: movl {{.*}}, (%esp) diff --git a/llvm/test/CodeGen/X86/fast-isel-deadcode.ll b/llvm/test/CodeGen/X86/fast-isel-deadcode.ll index 5381dc4858a..b7eab1a45c6 100644 --- a/llvm/test/CodeGen/X86/fast-isel-deadcode.ll +++ b/llvm/test/CodeGen/X86/fast-isel-deadcode.ll @@ -131,10 +131,10 @@ func.exit: ; preds = %if.then.i, %if.else.i, %if.end.5.i store { <2 x float>, float } %.fca.1.insert.i, { <2 x float>, float }* %tmp, align 8 %2 = bitcast { <2 x float>, float }* %tmp to i8* %3 = bitcast %struct.FVector* %ref.tmp to i8* - call void @llvm.memcpy.p0i8.p0i8.i64(i8* %3, i8* %2, i64 12, i32 4, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %3, i8* align 4 %2, i64 12, i1 false) %4 = bitcast %struct.FVector* %v to i8* %5 = bitcast %struct.FVector* %ref.tmp to i8* - call void @llvm.memcpy.p0i8.p0i8.i64(i8* %4, i8* %5, i64 12, i32 4, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %4, i8* align 4 %5, i64 12, i1 false) %6 = bitcast %struct.FVector* %v to i8* call void @llvm.lifetime.end.p0i8(i64 12, i8* %6) nounwind ret i32 0 @@ -142,6 +142,6 @@ func.exit: ; preds = %if.then.i, %if.else.i, %if.end.5.i declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) argmemonly nounwind -declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i32, i1) argmemonly nounwind +declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i1) argmemonly nounwind declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) argmemonly nounwind diff --git a/llvm/test/CodeGen/X86/fast-isel-x86-64.ll b/llvm/test/CodeGen/X86/fast-isel-x86-64.ll index c87353ed1f5..7fb2670e6d1 100644 --- a/llvm/test/CodeGen/X86/fast-isel-x86-64.ll +++ b/llvm/test/CodeGen/X86/fast-isel-x86-64.ll @@ -172,11 +172,11 @@ entry: ; CHECK: callq } -declare void @llvm.memcpy.p0i8.p0i8.i64(i8*, i8*, i64, i32, i1) +declare void @llvm.memcpy.p0i8.p0i8.i64(i8*, i8*, i64, i1) ; rdar://9289488 - fast-isel shouldn't bail out on llvm.memcpy define void @test15(i8* %a, i8* %b) nounwind { - call void @llvm.memcpy.p0i8.p0i8.i64(i8* %a, i8* %b, i64 4, i32 4, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %a, i8* align 4 %b, i64 4, i1 false) ret void ; CHECK-LABEL: test15: ; CHECK-NEXT: movl (%rsi), %eax diff --git a/llvm/test/CodeGen/X86/force-align-stack-alloca.ll b/llvm/test/CodeGen/X86/force-align-stack-alloca.ll index 8d42680e199..e9f38e9af62 100644 --- a/llvm/test/CodeGen/X86/force-align-stack-alloca.ll +++ b/llvm/test/CodeGen/X86/force-align-stack-alloca.ll @@ -67,10 +67,10 @@ entry: if.then: %0 = alloca i8, i32 %i - call void @llvm.memset.p0i8.i32(i8* %0, i8 0, i32 %i, i32 1, i1 false) + call void @llvm.memset.p0i8.i32(i8* %0, i8 0, i32 %i, i1 false) %call = call i32 @f(i8* %0) %conv = sext i32 %call to i64 ret i64 %conv } -declare void @llvm.memset.p0i8.i32(i8*, i8, i32, i32, i1) nounwind +declare void @llvm.memset.p0i8.i32(i8*, i8, i32, i1) nounwind diff --git a/llvm/test/CodeGen/X86/immediate_merging.ll b/llvm/test/CodeGen/X86/immediate_merging.ll index e1c29191498..a6e36c73467 100644 --- a/llvm/test/CodeGen/X86/immediate_merging.ll +++ b/llvm/test/CodeGen/X86/immediate_merging.ll @@ -94,7 +94,7 @@ entry: ret i32 0 } -declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i32, i1) #1 +declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i1) #1 @AA = common global [100 x i8] zeroinitializer, align 1 @@ -121,6 +121,6 @@ define void @foomemset() optsize { ; X64-NEXT: movq %rax, {{.*}}(%rip) ; X64-NEXT: retq entry: - call void @llvm.memset.p0i8.i32(i8* getelementptr inbounds ([100 x i8], [100 x i8]* @AA, i32 0, i32 0), i8 33, i32 24, i32 1, i1 false) + call void @llvm.memset.p0i8.i32(i8* getelementptr inbounds ([100 x i8], [100 x i8]* @AA, i32 0, i32 0), i8 33, i32 24, i1 false) ret void } diff --git a/llvm/test/CodeGen/X86/immediate_merging64.ll b/llvm/test/CodeGen/X86/immediate_merging64.ll index 57f5b3b79d9..12be8bdff83 100644 --- a/llvm/test/CodeGen/X86/immediate_merging64.ll +++ b/llvm/test/CodeGen/X86/immediate_merging64.ll @@ -19,7 +19,7 @@ define i1 @imm_multiple_users(i64 %a, i64* %b) optsize { ret i1 %cmp } -declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) +declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) ; Inlined memsets requiring multiple same-sized stores should be lowered using ; the register, rather than immediate, form of stores when optimizing for @@ -31,6 +31,6 @@ define void @memset_zero(i8* noalias nocapture %D) optsize { ; CHECK-NEXT: movq %rax, 7(%rdi) ; CHECK-NEXT: movq %rax, (%rdi) ; CHECK-NEXT: retq - tail call void @llvm.memset.p0i8.i64(i8* %D, i8 0, i64 15, i32 1, i1 false) + tail call void @llvm.memset.p0i8.i64(i8* %D, i8 0, i64 15, i1 false) ret void } diff --git a/llvm/test/CodeGen/X86/lea-opt-memop-check-1.ll b/llvm/test/CodeGen/X86/lea-opt-memop-check-1.ll index 6ad55d42868..00d47fae25a 100644 --- a/llvm/test/CodeGen/X86/lea-opt-memop-check-1.ll +++ b/llvm/test/CodeGen/X86/lea-opt-memop-check-1.ll @@ -6,7 +6,7 @@ target datalayout = "e-m:x-p:32:32-i64:64-f80:32-n8:16:32-a:0:32-S32" target triple = "i686-pc-windows-msvc" -declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i32, i1) argmemonly nounwind +declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i1) argmemonly nounwind declare <2 x i64> @_mm_xor_si128(<2 x i64>, <2 x i64>) optsize declare <2 x i64> @llvm.x86.pclmulqdq(<2 x i64>, <2 x i64>, i8) nounwind readnone declare <4 x float> @_mm_castsi128_ps(<2 x i64>) optsize @@ -15,7 +15,7 @@ declare <4 x float> @_mm_castsi128_ps(<2 x i64>) optsize define void @test1(i8* nocapture readonly %src, i32 %len) #0 { %parts = alloca [4 x i32], align 4 %part0 = bitcast [4 x i32]* %parts to i8* - call void @llvm.memcpy.p0i8.p0i8.i32(i8* %part0, i8* %src, i32 %len, i32 1, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i32(i8* %part0, i8* %src, i32 %len, i1 false) %call0 = tail call <2 x i64> @_mm_xor_si128(<2 x i64> undef, <2 x i64> <i64 -9187201950435737472, i64 -9187201950435737472>) %tmp0 = tail call <2 x i64> @llvm.x86.pclmulqdq(<2 x i64> undef, <2 x i64> <i64 7631803798, i64 5708721108>, i8 16) %call1 = tail call <4 x float> @_mm_castsi128_ps(<2 x i64> %tmp0) diff --git a/llvm/test/CodeGen/X86/load-slice.ll b/llvm/test/CodeGen/X86/load-slice.ll index 8803512eec0..3cbb70bd70d 100644 --- a/llvm/test/CodeGen/X86/load-slice.ll +++ b/llvm/test/CodeGen/X86/load-slice.ll @@ -70,7 +70,7 @@ entry: } ; Function Attrs: nounwind -declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i32, i1) #1 +declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i1) #1 ; Function Attrs: nounwind declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) diff --git a/llvm/test/CodeGen/X86/lsr-normalization.ll b/llvm/test/CodeGen/X86/lsr-normalization.ll index a8e3ab1ae99..f56256aa468 100644 --- a/llvm/test/CodeGen/X86/lsr-normalization.ll +++ b/llvm/test/CodeGen/X86/lsr-normalization.ll @@ -21,7 +21,7 @@ define i32 @main(i32 %arg, i8** nocapture %arg1) nounwind { bb: %tmp = alloca %0, align 8 ; <%0*> [#uses=11] %tmp2 = bitcast %0* %tmp to i8* ; <i8*> [#uses=1] - call void @llvm.memset.p0i8.i64(i8* %tmp2, i8 0, i64 16, i32 8, i1 false) nounwind + call void @llvm.memset.p0i8.i64(i8* align 8 %tmp2, i8 0, i64 16, i1 false) nounwind %tmp3 = getelementptr inbounds %0, %0* %tmp, i64 0, i32 0 ; <%0**> [#uses=3] store %0* %tmp, %0** %tmp3 %tmp4 = getelementptr inbounds %0, %0* %tmp, i64 0, i32 1 ; <%0**> [#uses=1] @@ -98,7 +98,7 @@ declare void @_ZNSt15_List_node_base4hookEPS_(%0*, %0*) declare noalias i8* @_Znwm(i64) -declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) nounwind +declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) nounwind declare void @_ZdlPv(i8*) nounwind diff --git a/llvm/test/CodeGen/X86/mcu-abi.ll b/llvm/test/CodeGen/X86/mcu-abi.ll index 1cc277c863f..baafea26c22 100644 --- a/llvm/test/CodeGen/X86/mcu-abi.ll +++ b/llvm/test/CodeGen/X86/mcu-abi.ll @@ -70,7 +70,7 @@ define void @ret_large_struct(%struct.st12_t* noalias nocapture sret %agg.result entry: %0 = bitcast %struct.st12_t* %agg.result to i8* %1 = bitcast %struct.st12_t* %r to i8* - call void @llvm.memcpy.p0i8.p0i8.i32(i8* %0, i8* %1, i32 48, i32 1, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i32(i8* %0, i8* %1, i32 48, i1 false) ret void } @@ -104,7 +104,7 @@ define i32 @test_fp128(fp128* %ptr) #0 { ret i32 %ret } -declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i32, i1) #1 +declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i1) #1 ; CHECK-LABEL: test_alignment_d: ; CHECK-NOT: andl {{.+}}, %esp diff --git a/llvm/test/CodeGen/X86/mem-intrin-base-reg.ll b/llvm/test/CodeGen/X86/mem-intrin-base-reg.ll index 59a2207b470..1a45ed57b26 100644 --- a/llvm/test/CodeGen/X86/mem-intrin-base-reg.ll +++ b/llvm/test/CodeGen/X86/mem-intrin-base-reg.ll @@ -9,12 +9,12 @@ target triple = "i686-pc-windows-msvc" ; which all of the X86 string instructions use. declare void @escape_vla_and_icmp(i8*, i1 zeroext) -declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i32, i1) -declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i32, i1) +declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i1) +declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i1) define i32 @memcpy_novla_vector(<4 x i32>* %vp0, i8* %a, i8* %b, i32 %n, i1 zeroext %cond) { %foo = alloca <4 x i32>, align 16 - call void @llvm.memcpy.p0i8.p0i8.i32(i8* %a, i8* %b, i32 128, i32 4, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %a, i8* align 4 %b, i32 128, i1 false) br i1 %cond, label %spill_vectors, label %no_vectors no_vectors: @@ -40,7 +40,7 @@ spill_vectors: define i32 @memcpy_vla_vector(<4 x i32>* %vp0, i8* %a, i8* %b, i32 %n, i1 zeroext %cond) { %foo = alloca <4 x i32>, align 16 - call void @llvm.memcpy.p0i8.p0i8.i32(i8* %a, i8* %b, i32 128, i32 4, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %a, i8* align 4 %b, i32 128, i1 false) br i1 %cond, label %spill_vectors, label %no_vectors no_vectors: @@ -69,7 +69,7 @@ spill_vectors: define i32 @memset_vla_vector(<4 x i32>* %vp0, i8* %a, i32 %n, i1 zeroext %cond) { %foo = alloca <4 x i32>, align 16 - call void @llvm.memset.p0i8.i32(i8* %a, i8 42, i32 128, i32 4, i1 false) + call void @llvm.memset.p0i8.i32(i8* align 4 %a, i8 42, i32 128, i1 false) br i1 %cond, label %spill_vectors, label %no_vectors no_vectors: diff --git a/llvm/test/CodeGen/X86/memcpy-2.ll b/llvm/test/CodeGen/X86/memcpy-2.ll index 040dd153d64..6deeaa69853 100644 --- a/llvm/test/CodeGen/X86/memcpy-2.ll +++ b/llvm/test/CodeGen/X86/memcpy-2.ll @@ -48,7 +48,7 @@ entry: ; X86-64: movq $0 %tmp1 = alloca [25 x i8] %tmp2 = bitcast [25 x i8]* %tmp1 to i8* - call void @llvm.memcpy.p0i8.p0i8.i32(i8* %tmp2, i8* getelementptr inbounds ([25 x i8], [25 x i8]* @.str, i32 0, i32 0), i32 25, i32 1, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 %tmp2, i8* align 1 getelementptr inbounds ([25 x i8], [25 x i8]* @.str, i32 0, i32 0), i32 25, i1 false) unreachable } @@ -86,7 +86,7 @@ entry: ; X86-64: movaps %xmm0, (%rdi) %tmp2 = bitcast %struct.s0* %a to i8* ; <i8*> [#uses=1] %tmp3 = bitcast %struct.s0* %b to i8* ; <i8*> [#uses=1] - tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %tmp2, i8* %tmp3, i32 16, i32 16, i1 false) + tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 16 %tmp2, i8* align 16 %tmp3, i32 16, i1 false) ret void } @@ -135,7 +135,7 @@ entry: ; X86-64: movq %rax, (%rdi) %tmp2 = bitcast %struct.s0* %a to i8* ; <i8*> [#uses=1] %tmp3 = bitcast %struct.s0* %b to i8* ; <i8*> [#uses=1] - tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %tmp2, i8* %tmp3, i32 16, i32 8, i1 false) + tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 %tmp2, i8* align 8 %tmp3, i32 16, i1 false) ret void } @@ -202,8 +202,8 @@ entry: %tmp1 = alloca [30 x i8] %tmp2 = bitcast [30 x i8]* %tmp1 to i8* - call void @llvm.memcpy.p0i8.p0i8.i32(i8* %tmp2, i8* getelementptr inbounds ([30 x i8], [30 x i8]* @.str2, i32 0, i32 0), i32 30, i32 1, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 %tmp2, i8* align 1 getelementptr inbounds ([30 x i8], [30 x i8]* @.str2, i32 0, i32 0), i32 30, i1 false) unreachable } -declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i32, i1) nounwind +declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i1) nounwind diff --git a/llvm/test/CodeGen/X86/memcpy-from-string.ll b/llvm/test/CodeGen/X86/memcpy-from-string.ll index d62d9e20254..8e2444ebe0e 100644 --- a/llvm/test/CodeGen/X86/memcpy-from-string.ll +++ b/llvm/test/CodeGen/X86/memcpy-from-string.ll @@ -17,8 +17,8 @@ target triple = "x86_64-unknown-linux-gnu" ; CHECK: movw $15212, 4(%rdi) ; CHECK: movl $1802117222, (%rdi) define void @foo(i8* %tmp2) { - call void @llvm.memcpy.p0i8.p0i8.i64(i8* %tmp2, i8* getelementptr inbounds ([10 x i8], [10 x i8]* @0, i64 0, i64 3), i64 7, i32 1, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* %tmp2, i8* getelementptr inbounds ([10 x i8], [10 x i8]* @0, i64 0, i64 3), i64 7, i1 false) ret void } -declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i32, i1) +declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i1) diff --git a/llvm/test/CodeGen/X86/memcpy.ll b/llvm/test/CodeGen/X86/memcpy.ll index 4351014192b..87e350a9039 100644 --- a/llvm/test/CodeGen/X86/memcpy.ll +++ b/llvm/test/CodeGen/X86/memcpy.ll @@ -1,14 +1,14 @@ ; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=core2 | FileCheck %s -check-prefix=LINUX ; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=core2 | FileCheck %s -check-prefix=DARWIN -declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) nounwind -declare void @llvm.memcpy.p256i8.p256i8.i64(i8 addrspace(256)* nocapture, i8 addrspace(256)* nocapture, i64, i32, i1) nounwind +declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) nounwind +declare void @llvm.memcpy.p256i8.p256i8.i64(i8 addrspace(256)* nocapture, i8 addrspace(256)* nocapture, i64, i1) nounwind ; Variable memcpy's should lower to calls. define i8* @test1(i8* %a, i8* %b, i64 %n) nounwind { entry: - tail call void @llvm.memcpy.p0i8.p0i8.i64( i8* %a, i8* %b, i64 %n, i32 1, i1 0 ) + tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %a, i8* %b, i64 %n, i1 0 ) ret i8* %a ; LINUX-LABEL: test1: @@ -20,7 +20,7 @@ define i8* @test2(i64* %a, i64* %b, i64 %n) nounwind { entry: %tmp14 = bitcast i64* %a to i8* %tmp25 = bitcast i64* %b to i8* - tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %tmp14, i8* %tmp25, i64 %n, i32 8, i1 0 ) + tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 %tmp14, i8* align 8 %tmp25, i64 %n, i1 0 ) ret i8* %tmp14 ; LINUX-LABEL: test2: @@ -35,7 +35,7 @@ entry: ; rdar://8821501 define void @test3(i8* nocapture %A, i8* nocapture %B) nounwind optsize noredzone { entry: - tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %A, i8* %B, i64 64, i32 1, i1 false) + tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %A, i8* %B, i64 64, i1 false) ret void ; LINUX-LABEL: test3: ; LINUX: memcpy @@ -61,7 +61,7 @@ entry: } define void @test3_minsize(i8* nocapture %A, i8* nocapture %B) nounwind minsize noredzone { - tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %A, i8* %B, i64 64, i32 1, i1 false) + tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %A, i8* %B, i64 64, i1 false) ret void ; LINUX-LABEL: test3_minsize: ; LINUX: memcpy @@ -71,7 +71,7 @@ define void @test3_minsize(i8* nocapture %A, i8* nocapture %B) nounwind minsize } define void @test3_minsize_optsize(i8* nocapture %A, i8* nocapture %B) nounwind optsize minsize noredzone { - tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %A, i8* %B, i64 64, i32 1, i1 false) + tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %A, i8* %B, i64 64, i1 false) ret void ; LINUX-LABEL: test3_minsize_optsize: ; LINUX: memcpy @@ -83,7 +83,7 @@ define void @test3_minsize_optsize(i8* nocapture %A, i8* nocapture %B) nounwind ; Large constant memcpy's should be inlined when not optimizing for size. define void @test4(i8* nocapture %A, i8* nocapture %B) nounwind noredzone { entry: - tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %A, i8* %B, i64 64, i32 1, i1 false) + tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %A, i8* %B, i64 64, i1 false) ret void ; LINUX-LABEL: test4: ; LINUX: movq @@ -105,7 +105,7 @@ entry: define void @test5(i8* nocapture %C) nounwind uwtable ssp { entry: - tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %C, i8* getelementptr inbounds ([30 x i8], [30 x i8]* @.str, i64 0, i64 0), i64 16, i32 1, i1 false) + tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %C, i8* getelementptr inbounds ([30 x i8], [30 x i8]* @.str, i64 0, i64 0), i64 16, i1 false) ret void ; DARWIN-LABEL: test5: @@ -122,7 +122,7 @@ entry: ; DARWIN: test6 ; DARWIN: movw $0, 8 ; DARWIN: movq $120, 0 - tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* null, i8* getelementptr inbounds ([2 x i8], [2 x i8]* @.str2, i64 0, i64 0), i64 10, i32 1, i1 false) + tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* null, i8* getelementptr inbounds ([2 x i8], [2 x i8]* @.str2, i64 0, i64 0), i64 10, i1 false) ret void } @@ -136,14 +136,14 @@ define void @PR15348(i8* %a, i8* %b) { ; LINUX: movq ; LINUX: movq ; LINUX: movq - call void @llvm.memcpy.p0i8.p0i8.i64(i8* %a, i8* %b, i64 17, i32 0, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* %a, i8* %b, i64 17, i1 false) ret void } ; Memcpys from / to address space 256 should be lowered to appropriate loads / ; stores if small enough. define void @addrspace256(i8 addrspace(256)* %a, i8 addrspace(256)* %b) nounwind { - tail call void @llvm.memcpy.p256i8.p256i8.i64(i8 addrspace(256)* %a, i8 addrspace(256)* %b, i64 16, i32 8, i1 false) + tail call void @llvm.memcpy.p256i8.p256i8.i64(i8 addrspace(256)* align 8 %a, i8 addrspace(256)* align 8 %b, i64 16, i1 false) ret void ; LINUX-LABEL: addrspace256: ; LINUX: movq %gs: diff --git a/llvm/test/CodeGen/X86/memset-2.ll b/llvm/test/CodeGen/X86/memset-2.ll index e94432884b1..a0511f2804a 100644 --- a/llvm/test/CodeGen/X86/memset-2.ll +++ b/llvm/test/CodeGen/X86/memset-2.ll @@ -11,7 +11,7 @@ define fastcc void @t1() nounwind { ; CHECK-NEXT: calll _memset ; CHECK-NEXT: addl $16, %esp entry: - call void @llvm.memset.p0i8.i32(i8* null, i8 0, i32 188, i32 1, i1 false) + call void @llvm.memset.p0i8.i32(i8* null, i8 0, i32 188, i1 false) unreachable } @@ -23,11 +23,11 @@ define fastcc void @t2(i8 signext %c) nounwind { ; CHECK-NEXT: movl $76, {{[0-9]+}}(%esp) ; CHECK-NEXT: calll _memset entry: - call void @llvm.memset.p0i8.i32(i8* undef, i8 %c, i32 76, i32 1, i1 false) + call void @llvm.memset.p0i8.i32(i8* undef, i8 %c, i32 76, i1 false) unreachable } -declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i32, i1) nounwind +declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i1) nounwind define void @t3(i8* nocapture %s, i8 %a) nounwind { ; CHECK-LABEL: t3: @@ -39,7 +39,7 @@ define void @t3(i8* nocapture %s, i8 %a) nounwind { ; CHECK-NEXT: movl %ecx, (%eax) ; CHECK-NEXT: retl entry: - tail call void @llvm.memset.p0i8.i32(i8* %s, i8 %a, i32 8, i32 1, i1 false) + tail call void @llvm.memset.p0i8.i32(i8* %s, i8 %a, i32 8, i1 false) ret void } @@ -56,6 +56,6 @@ define void @t4(i8* nocapture %s, i8 %a) nounwind { ; CHECK-NEXT: movb %cl, 14(%eax) ; CHECK-NEXT: retl entry: - tail call void @llvm.memset.p0i8.i32(i8* %s, i8 %a, i32 15, i32 1, i1 false) + tail call void @llvm.memset.p0i8.i32(i8* %s, i8 %a, i32 15, i1 false) ret void } diff --git a/llvm/test/CodeGen/X86/memset-3.ll b/llvm/test/CodeGen/X86/memset-3.ll index 455e6756013..47c7ab99d29 100644 --- a/llvm/test/CodeGen/X86/memset-3.ll +++ b/llvm/test/CodeGen/X86/memset-3.ll @@ -5,8 +5,8 @@ define void @t() nounwind ssp { entry: %buf = alloca [512 x i8], align 1 %ptr = getelementptr inbounds [512 x i8], [512 x i8]* %buf, i32 0, i32 0 - call void @llvm.memset.p0i8.i32(i8* %ptr, i8 undef, i32 512, i32 1, i1 false) + call void @llvm.memset.p0i8.i32(i8* %ptr, i8 undef, i32 512, i1 false) unreachable } -declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i32, i1) nounwind +declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i1) nounwind diff --git a/llvm/test/CodeGen/X86/memset-nonzero.ll b/llvm/test/CodeGen/X86/memset-nonzero.ll index cc434bf18ab..37b98b40192 100644 --- a/llvm/test/CodeGen/X86/memset-nonzero.ll +++ b/llvm/test/CodeGen/X86/memset-nonzero.ll @@ -225,7 +225,7 @@ define void @memset_16_nonconst_bytes(i8* %x, i8 %c) { ; AVX2-NEXT: vpbroadcastb %xmm0, %xmm0 ; AVX2-NEXT: vmovdqu %xmm0, (%rdi) ; AVX2-NEXT: retq - tail call void @llvm.memset.p0i8.i64(i8* %x, i8 %c, i64 16, i32 1, i1 false) + tail call void @llvm.memset.p0i8.i64(i8* %x, i8 %c, i64 16, i1 false) ret void } @@ -268,7 +268,7 @@ define void @memset_32_nonconst_bytes(i8* %x, i8 %c) { ; AVX2-NEXT: vmovdqu %ymm0, (%rdi) ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq - tail call void @llvm.memset.p0i8.i64(i8* %x, i8 %c, i64 32, i32 1, i1 false) + tail call void @llvm.memset.p0i8.i64(i8* %x, i8 %c, i64 32, i1 false) ret void } @@ -319,7 +319,7 @@ define void @memset_64_nonconst_bytes(i8* %x, i8 %c) { ; AVX2-NEXT: vmovdqu %ymm0, (%rdi) ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq - tail call void @llvm.memset.p0i8.i64(i8* %x, i8 %c, i64 64, i32 1, i1 false) + tail call void @llvm.memset.p0i8.i64(i8* %x, i8 %c, i64 64, i1 false) ret void } @@ -386,7 +386,7 @@ define void @memset_128_nonconst_bytes(i8* %x, i8 %c) { ; AVX2-NEXT: vmovdqu %ymm0, (%rdi) ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq - tail call void @llvm.memset.p0i8.i64(i8* %x, i8 %c, i64 128, i32 1, i1 false) + tail call void @llvm.memset.p0i8.i64(i8* %x, i8 %c, i64 128, i1 false) ret void } @@ -451,9 +451,9 @@ define void @memset_256_nonconst_bytes(i8* %x, i8 %c) { ; AVX2-NEXT: vmovdqu %ymm0, (%rdi) ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq - tail call void @llvm.memset.p0i8.i64(i8* %x, i8 %c, i64 256, i32 1, i1 false) + tail call void @llvm.memset.p0i8.i64(i8* %x, i8 %c, i64 256, i1 false) ret void } -declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) #1 +declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) #1 diff --git a/llvm/test/CodeGen/X86/memset-sse-stack-realignment.ll b/llvm/test/CodeGen/X86/memset-sse-stack-realignment.ll index d77a7ed3816..68fa15e3398 100644 --- a/llvm/test/CodeGen/X86/memset-sse-stack-realignment.ll +++ b/llvm/test/CodeGen/X86/memset-sse-stack-realignment.ll @@ -9,7 +9,7 @@ define void @test1(i32 %t) nounwind { %tmp1210 = alloca i8, i32 32, align 4 - call void @llvm.memset.p0i8.i64(i8* %tmp1210, i8 0, i64 32, i32 4, i1 false) + call void @llvm.memset.p0i8.i64(i8* align 4 %tmp1210, i8 0, i64 32, i1 false) %x = alloca i8, i32 %t call void @dummy(i8* %x) ret void @@ -42,7 +42,7 @@ define void @test1(i32 %t) nounwind { define void @test2(i32 %t) nounwind { %tmp1210 = alloca i8, i32 16, align 4 - call void @llvm.memset.p0i8.i64(i8* %tmp1210, i8 0, i64 16, i32 4, i1 false) + call void @llvm.memset.p0i8.i64(i8* align 4 %tmp1210, i8 0, i64 16, i1 false) %x = alloca i8, i32 %t call void @dummy(i8* %x) ret void @@ -74,4 +74,4 @@ define void @test2(i32 %t) nounwind { declare void @dummy(i8*) -declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) nounwind +declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) nounwind diff --git a/llvm/test/CodeGen/X86/memset.ll b/llvm/test/CodeGen/X86/memset.ll index c9d8fbd58aa..6d5c4cd0f8a 100644 --- a/llvm/test/CodeGen/X86/memset.ll +++ b/llvm/test/CodeGen/X86/memset.ll @@ -58,14 +58,14 @@ entry: %up_mvd116 = getelementptr [8 x %struct.x], [8 x %struct.x]* %up_mvd, i32 0, i32 0 ; <%struct.x*> [#uses=1] %tmp110117 = bitcast [8 x %struct.x]* %up_mvd to i8* ; <i8*> [#uses=1] - call void @llvm.memset.p0i8.i64(i8* %tmp110117, i8 0, i64 32, i32 8, i1 false) + call void @llvm.memset.p0i8.i64(i8* align 8 %tmp110117, i8 0, i64 32, i1 false) call void @foo( %struct.x* %up_mvd116 ) nounwind ret void } declare void @foo(%struct.x*) -declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) nounwind +declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) nounwind ; Ensure that alignment of '0' in an @llvm.memset intrinsic results in ; unaligned loads and stores. @@ -97,6 +97,6 @@ define void @PR15348(i8* %a) { ; YMM-NEXT: vmovups %xmm0, (%eax) ; YMM-NEXT: movb $0, 16(%eax) ; YMM-NEXT: retl - call void @llvm.memset.p0i8.i64(i8* %a, i8 0, i64 17, i32 0, i1 false) + call void @llvm.memset.p0i8.i64(i8* %a, i8 0, i64 17, i1 false) ret void } diff --git a/llvm/test/CodeGen/X86/memset64-on-x86-32.ll b/llvm/test/CodeGen/X86/memset64-on-x86-32.ll index 0fc21920409..f9707c66863 100644 --- a/llvm/test/CodeGen/X86/memset64-on-x86-32.ll +++ b/llvm/test/CodeGen/X86/memset64-on-x86-32.ll @@ -51,9 +51,9 @@ define void @bork() nounwind { ; SLOW_64-NEXT: movq $0, 8 ; SLOW_64-NEXT: movq $0, 0 ; SLOW_64-NEXT: retq - call void @llvm.memset.p0i8.i64(i8* null, i8 0, i64 80, i32 4, i1 false) + call void @llvm.memset.p0i8.i64(i8* align 4 null, i8 0, i64 80, i1 false) ret void } -declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) nounwind +declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) nounwind diff --git a/llvm/test/CodeGen/X86/misaligned-memset.ll b/llvm/test/CodeGen/X86/misaligned-memset.ll index ef8e0e81ad7..f7a6d577c08 100644 --- a/llvm/test/CodeGen/X86/misaligned-memset.ll +++ b/llvm/test/CodeGen/X86/misaligned-memset.ll @@ -7,9 +7,9 @@ define i32 @main() nounwind ssp { entry: %retval = alloca i32, align 4 store i32 0, i32* %retval - call void @llvm.memset.p0i8.i64(i8* bitcast (i64* getelementptr inbounds ([3 x i64], [3 x i64]* @a, i32 0, i64 1) to i8*), i8 0, i64 16, i32 1, i1 false) + call void @llvm.memset.p0i8.i64(i8* bitcast (i64* getelementptr inbounds ([3 x i64], [3 x i64]* @a, i32 0, i64 1) to i8*), i8 0, i64 16, i1 false) %0 = load i32, i32* %retval ret i32 %0 } -declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) nounwind +declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) nounwind diff --git a/llvm/test/CodeGen/X86/misched-new.ll b/llvm/test/CodeGen/X86/misched-new.ll index 4e42c931454..5a93577a214 100644 --- a/llvm/test/CodeGen/X86/misched-new.ll +++ b/llvm/test/CodeGen/X86/misched-new.ll @@ -11,7 +11,7 @@ ; FIXME: There should be an assert in the coalescer that we're not rematting ; "not-quite-dead" copies, but that breaks a lot of tests <rdar://problem/11148682>. -declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) nounwind +declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) nounwind ; From oggenc. ; After coalescing, we have a dead superreg (RAX) definition. @@ -24,7 +24,7 @@ entry: br i1 undef, label %for.cond.preheader, label %if.end for.cond.preheader: ; preds = %entry - call void @llvm.memcpy.p0i8.p0i8.i64(i8* undef, i8* null, i64 128, i32 4, i1 false) nounwind + call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 undef, i8* align 4 null, i64 128, i1 false) nounwind unreachable if.end: ; preds = %entry diff --git a/llvm/test/CodeGen/X86/negate-add-zero.ll b/llvm/test/CodeGen/X86/negate-add-zero.ll index 64f20a6f81b..beb87e3e903 100644 --- a/llvm/test/CodeGen/X86/negate-add-zero.ll +++ b/llvm/test/CodeGen/X86/negate-add-zero.ll @@ -1133,4 +1133,4 @@ declare %"struct.std::basic_ostream<char,std::char_traits<char> >"* @_ZlsIdLi5EL declare %"struct.std::basic_ostream<char,std::char_traits<char> >"* @_ZlsIdLi5ELi6EERSoS0_RK15FixedMatrixBaseIT_XT0_EXT1_EE(%"struct.std::basic_ostream<char,std::char_traits<char> >"*, %"struct.FixedMatrixBase<double,5,6>"*) -declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) nounwind +declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) nounwind diff --git a/llvm/test/CodeGen/X86/optimize-max-0.ll b/llvm/test/CodeGen/X86/optimize-max-0.ll index 2dde95738d1..b5e8627a88b 100644 --- a/llvm/test/CodeGen/X86/optimize-max-0.ll +++ b/llvm/test/CodeGen/X86/optimize-max-0.ll @@ -173,7 +173,7 @@ bb23: ; preds = %bb24, %bb.nph %47 = mul i32 %y.21, %w %.sum5 = add i32 %47, %.sum3 %48 = getelementptr i8, i8* %j, i32 %.sum5 - tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %48, i8* %46, i32 %w, i32 1, i1 false) + tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %48, i8* %46, i32 %w, i1 false) br label %bb24 bb24: ; preds = %bb23 @@ -190,7 +190,7 @@ bb26: ; preds = %bb24.bb26_crit_edge %50 = getelementptr i8, i8* %j, i32 %.sum4 %51 = mul i32 %x, %w %52 = sdiv i32 %51, 2 - tail call void @llvm.memset.p0i8.i32(i8* %50, i8 -128, i32 %52, i32 1, i1 false) + tail call void @llvm.memset.p0i8.i32(i8* %50, i8 -128, i32 %52, i1 false) ret void bb29: ; preds = %bb20, %entry @@ -208,7 +208,7 @@ bb30: ; preds = %bb31, %bb.nph11 %57 = getelementptr i8, i8* %r, i32 %56 %58 = mul i32 %y.310, %w %59 = getelementptr i8, i8* %j, i32 %58 - tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %59, i8* %57, i32 %w, i32 1, i1 false) + tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %59, i8* %57, i32 %w, i1 false) br label %bb31 bb31: ; preds = %bb30 @@ -224,7 +224,7 @@ bb33: ; preds = %bb31.bb33_crit_edge %61 = getelementptr i8, i8* %j, i32 %60 %62 = mul i32 %x, %w %63 = sdiv i32 %62, 2 - tail call void @llvm.memset.p0i8.i32(i8* %61, i8 -128, i32 %63, i32 1, i1 false) + tail call void @llvm.memset.p0i8.i32(i8* %61, i8 -128, i32 %63, i1 false) ret void return: ; preds = %bb20 @@ -398,7 +398,7 @@ bb23: ; preds = %bb24, %bb.nph %47 = mul i32 %y.21, %w %.sum5 = add i32 %47, %.sum3 %48 = getelementptr i8, i8* %j, i32 %.sum5 - tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %48, i8* %46, i32 %w, i32 1, i1 false) + tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %48, i8* %46, i32 %w, i1 false) br label %bb24 bb24: ; preds = %bb23 @@ -415,7 +415,7 @@ bb26: ; preds = %bb24.bb26_crit_edge %50 = getelementptr i8, i8* %j, i32 %.sum4 %51 = mul i32 %x, %w %52 = udiv i32 %51, 2 - tail call void @llvm.memset.p0i8.i32(i8* %50, i8 -128, i32 %52, i32 1, i1 false) + tail call void @llvm.memset.p0i8.i32(i8* %50, i8 -128, i32 %52, i1 false) ret void bb29: ; preds = %bb20, %entry @@ -433,7 +433,7 @@ bb30: ; preds = %bb31, %bb.nph11 %57 = getelementptr i8, i8* %r, i32 %56 %58 = mul i32 %y.310, %w %59 = getelementptr i8, i8* %j, i32 %58 - tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %59, i8* %57, i32 %w, i32 1, i1 false) + tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %59, i8* %57, i32 %w, i1 false) br label %bb31 bb31: ; preds = %bb30 @@ -449,13 +449,13 @@ bb33: ; preds = %bb31.bb33_crit_edge %61 = getelementptr i8, i8* %j, i32 %60 %62 = mul i32 %x, %w %63 = udiv i32 %62, 2 - tail call void @llvm.memset.p0i8.i32(i8* %61, i8 -128, i32 %63, i32 1, i1 false) + tail call void @llvm.memset.p0i8.i32(i8* %61, i8 -128, i32 %63, i1 false) ret void return: ; preds = %bb20 ret void } -declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i32, i1) nounwind +declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i1) nounwind -declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i32, i1) nounwind +declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i1) nounwind diff --git a/llvm/test/CodeGen/X86/pr11985.ll b/llvm/test/CodeGen/X86/pr11985.ll index 94b37215f63..99084d61140 100644 --- a/llvm/test/CodeGen/X86/pr11985.ll +++ b/llvm/test/CodeGen/X86/pr11985.ll @@ -24,7 +24,7 @@ define float @foo(i8* nocapture %buf, float %a, float %b) nounwind uwtable { ; NEHALEM-NEXT: movups %xmm2, (%rdi) entry: - tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %buf, i8* blockaddress(@foo, %out), i64 22, i32 1, i1 false) + tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %buf, i8* blockaddress(@foo, %out), i64 22, i1 false) br label %out out: ; preds = %entry @@ -32,4 +32,4 @@ out: ; preds = %entry ret float %add } -declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) nounwind +declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) nounwind diff --git a/llvm/test/CodeGen/X86/pr14333.ll b/llvm/test/CodeGen/X86/pr14333.ll index 89779302d7f..8298ca5a7d6 100644 --- a/llvm/test/CodeGen/X86/pr14333.ll +++ b/llvm/test/CodeGen/X86/pr14333.ll @@ -6,7 +6,7 @@ define void @bar(%foo* %zed) { %tmp2 = getelementptr inbounds %foo, %foo* %zed, i64 0, i32 1 store i64 0, i64* %tmp2, align 8 %tmp3 = bitcast %foo* %zed to i8* - call void @llvm.memset.p0i8.i64(i8* %tmp3, i8 0, i64 16, i32 8, i1 false) + call void @llvm.memset.p0i8.i64(i8* align 8 %tmp3, i8 0, i64 16, i1 false) ret void } -declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) nounwind +declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) nounwind diff --git a/llvm/test/CodeGen/X86/pr34088.ll b/llvm/test/CodeGen/X86/pr34088.ll index 4fa24a50648..2fb000f3538 100644 --- a/llvm/test/CodeGen/X86/pr34088.ll +++ b/llvm/test/CodeGen/X86/pr34088.ll @@ -31,13 +31,13 @@ define i32 @pr34088() local_unnamed_addr { entry: %foo = alloca %struct.Foo, align 4 %0 = bitcast %struct.Foo* %foo to i8* - call void @llvm.memset.p0i8.i32(i8* nonnull %0, i8 0, i32 20, i32 4, i1 false) + call void @llvm.memset.p0i8.i32(i8* align 4 nonnull %0, i8 0, i32 20, i1 false) %buffer1 = getelementptr inbounds %struct.Foo, %struct.Foo* %foo, i32 0, i32 1, i32 1 %1 = bitcast %struct.Buffer* %buffer1 to i64* %2 = load i64, i64* %1, align 4 - call void @llvm.memset.p0i8.i32(i8* nonnull %0, i8 -51, i32 20, i32 4, i1 false) + call void @llvm.memset.p0i8.i32(i8* align 4 nonnull %0, i8 -51, i32 20, i1 false) store i64 %2, i64* %1, align 4 ret i32 0 } -declare void @llvm.memset.p0i8.i32(i8* nocapture writeonly, i8, i32, i32, i1) +declare void @llvm.memset.p0i8.i32(i8* nocapture writeonly, i8, i32, i1) diff --git a/llvm/test/CodeGen/X86/ragreedy-hoist-spill.ll b/llvm/test/CodeGen/X86/ragreedy-hoist-spill.ll index 0178c9ec1c9..a6d4c6e97bc 100644 --- a/llvm/test/CodeGen/X86/ragreedy-hoist-spill.ll +++ b/llvm/test/CodeGen/X86/ragreedy-hoist-spill.ll @@ -63,7 +63,7 @@ SyTime.exit2720: br i1 %cmp293427, label %for.body.lr.ph, label %while.body.preheader for.body.lr.ph: - call void @llvm.memset.p0i8.i64(i8* undef, i8 32, i64 512, i32 16, i1 false) + call void @llvm.memset.p0i8.i64(i8* align 16 undef, i8 32, i64 512, i1 false) br label %while.body.preheader while.body.preheader: @@ -377,7 +377,7 @@ cleanup: declare i32 @fileno(%struct.TMP.2* nocapture) declare i64 @"\01_write"(i32, i8*, i64) declare i32 @__maskrune(i32, i64) -declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) +declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) !llvm.ident = !{!0} diff --git a/llvm/test/CodeGen/X86/regparm.ll b/llvm/test/CodeGen/X86/regparm.ll index f427010edc5..01a734f9f47 100644 --- a/llvm/test/CodeGen/X86/regparm.ll +++ b/llvm/test/CodeGen/X86/regparm.ll @@ -9,7 +9,7 @@ target datalayout = "e-m:e-p:32:32-f64:32:64-f80:32-n8:16:32-S128" target triple = "i386-unknown-linux-gnu" ; Function Attrs: argmemonly nounwind -declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture writeonly, i8* nocapture readonly, i32, i32, i1) #1 +declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture writeonly, i8* nocapture readonly, i32, i1) #1 define void @use_memset(i8* inreg nocapture %dest, i8 inreg %c, i32 inreg %n) local_unnamed_addr #0 { entry: @@ -30,12 +30,12 @@ entry: ;FASTWIN: movzbl %dl, %edx ;FASTWIN-NEXT: calll _memset ;FASTWIN-NEXT: retl - tail call void @llvm.memset.p0i8.i32(i8* %dest, i8 %c, i32 %n, i32 1, i1 false) + tail call void @llvm.memset.p0i8.i32(i8* %dest, i8 %c, i32 %n, i1 false) ret void } ; Function Attrs: argmemonly nounwind -declare void @llvm.memset.p0i8.i32(i8* nocapture writeonly, i8, i32, i32, i1) #1 +declare void @llvm.memset.p0i8.i32(i8* nocapture writeonly, i8, i32, i1) #1 attributes #0 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="pentium4" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" } diff --git a/llvm/test/CodeGen/X86/remat-fold-load.ll b/llvm/test/CodeGen/X86/remat-fold-load.ll index 3478033bfbf..e640974bdd2 100644 --- a/llvm/test/CodeGen/X86/remat-fold-load.ll +++ b/llvm/test/CodeGen/X86/remat-fold-load.ll @@ -16,7 +16,7 @@ target triple = "i386-unknown-linux-gnu" %type_d = type { i64 } %type_e = type { %type_c, i64 } -declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i32, i1) nounwind +declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i1) nounwind define linkonce_odr void @test() nounwind { entry: @@ -41,7 +41,7 @@ if.then.i.i.i.i71: ; preds = %while.body12 %tmp1 = getelementptr inbounds %type_a, %type_a* %tmp, i32 0, i32 1, i32 0, i32 1 %buf_6.i.i.i.i70 = bitcast %type_d* %tmp1 to i8** %tmp2 = load i8*, i8** %buf_6.i.i.i.i70, align 4 - call void @llvm.memcpy.p0i8.p0i8.i32(i8* undef, i8* %tmp2, i32 undef, i32 1, i1 false) nounwind + call void @llvm.memcpy.p0i8.p0i8.i32(i8* undef, i8* %tmp2, i32 undef, i1 false) nounwind unreachable if.else.i.i.i.i74: ; preds = %while.body12 @@ -69,7 +69,7 @@ if.then.i.i.i.i92: ; preds = %if.else.i.i.i.i74 %tmp12 = getelementptr inbounds %type_e, %type_e* %tmp9, i32 0, i32 0, i32 1 %buf_6.i.i.i.i91 = bitcast %type_d* %tmp12 to i8** %tmp13 = load i8*, i8** %buf_6.i.i.i.i91, align 4 - call void @llvm.memcpy.p0i8.p0i8.i32(i8* %call4.i.i.i.i89, i8* %tmp13, i32 %tmp10, i32 1, i1 false) nounwind + call void @llvm.memcpy.p0i8.p0i8.i32(i8* %call4.i.i.i.i89, i8* %tmp13, i32 %tmp10, i1 false) nounwind br label %A if.else.i.i.i.i95: ; preds = %if.else.i.i.i.i74 diff --git a/llvm/test/CodeGen/X86/slow-unaligned-mem.ll b/llvm/test/CodeGen/X86/slow-unaligned-mem.ll index a3a21892339..54c248f3b04 100644 --- a/llvm/test/CodeGen/X86/slow-unaligned-mem.ll +++ b/llvm/test/CodeGen/X86/slow-unaligned-mem.ll @@ -88,9 +88,9 @@ define void @store_zeros(i8* %a) { ; FAST: # %bb.0: ; FAST-NEXT: movl {{[0-9]+}}(%esp), %eax ; FAST-NOT: movl - call void @llvm.memset.p0i8.i64(i8* %a, i8 0, i64 64, i32 1, i1 false) + call void @llvm.memset.p0i8.i64(i8* %a, i8 0, i64 64, i1 false) ret void } -declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) +declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) diff --git a/llvm/test/CodeGen/X86/small-byval-memcpy.ll b/llvm/test/CodeGen/X86/small-byval-memcpy.ll index 3c03750199c..c5c9a3d8416 100644 --- a/llvm/test/CodeGen/X86/small-byval-memcpy.ll +++ b/llvm/test/CodeGen/X86/small-byval-memcpy.ll @@ -2,10 +2,10 @@ ; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=nehalem | FileCheck %s --check-prefix=NEHALEM ; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=btver2 | FileCheck %s --check-prefix=BTVER2 -declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i32, i1) +declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i1) define void @copy16bytes(i8* nocapture %a, i8* nocapture readonly %b) { - call void @llvm.memcpy.p0i8.p0i8.i64(i8* %a, i8* %b, i64 16, i32 1, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* %a, i8* %b, i64 16, i1 false) ret void ; CHECK-LABEL: copy16bytes diff --git a/llvm/test/CodeGen/X86/stack-align.ll b/llvm/test/CodeGen/X86/stack-align.ll index 192306462d1..338ced0ebf1 100644 --- a/llvm/test/CodeGen/X86/stack-align.ll +++ b/llvm/test/CodeGen/X86/stack-align.ll @@ -71,7 +71,7 @@ define x86_stdcallcc void @test5(%struct.sixteen* byval nocapture readonly align %1 = getelementptr inbounds [16 x i8], [16 x i8]* %d.sroa.0, i32 0, i32 0 call void @llvm.lifetime.start.p0i8(i64 16, i8* %1) %2 = getelementptr inbounds %struct.sixteen, %struct.sixteen* %s, i32 0, i32 0, i32 0 - call void @llvm.memcpy.p0i8.p0i8.i32(i8* %1, i8* %2, i32 16, i32 1, i1 true) + call void @llvm.memcpy.p0i8.p0i8.i32(i8* %1, i8* %2, i32 16, i1 true) call void @llvm.lifetime.end.p0i8(i64 16, i8* %1) ret void ; CHECK-LABEL: test5: @@ -84,7 +84,7 @@ define x86_stdcallcc void @test5(%struct.sixteen* byval nocapture readonly align declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) argmemonly nounwind -declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i32, i1) argmemonly nounwind +declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i1) argmemonly nounwind declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) argmemonly nounwind diff --git a/llvm/test/CodeGen/X86/stack-protector.ll b/llvm/test/CodeGen/X86/stack-protector.ll index 5166ed5b02a..d4eee18244f 100644 --- a/llvm/test/CodeGen/X86/stack-protector.ll +++ b/llvm/test/CodeGen/X86/stack-protector.ll @@ -3768,7 +3768,7 @@ entry: %test.coerce = alloca { i64, i8 } %0 = bitcast { i64, i8 }* %test.coerce to i8* %1 = bitcast %struct.small_char* %test to i8* - call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* %1, i64 12, i32 0, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* %1, i64 12, i1 false) %2 = getelementptr { i64, i8 }, { i64, i8 }* %test.coerce, i32 0, i32 0 %3 = load i64, i64* %2, align 1 %4 = getelementptr { i64, i8 }, { i64, i8 }* %test.coerce, i32 0, i32 1 @@ -3806,7 +3806,7 @@ entry: %test.coerce = alloca { i64, i8 } %0 = bitcast { i64, i8 }* %test.coerce to i8* %1 = bitcast %struct.small_char* %test to i8* - call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* %1, i64 12, i32 0, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* %1, i64 12, i1 false) %2 = getelementptr { i64, i8 }, { i64, i8 }* %test.coerce, i32 0, i32 0 %3 = load i64, i64* %2, align 1 %4 = getelementptr { i64, i8 }, { i64, i8 }* %test.coerce, i32 0, i32 1 @@ -3922,7 +3922,7 @@ declare void @_Z3exceptPi(i32*) declare i32 @__gxx_personality_v0(...) declare i32* @getp() declare i32 @dummy(...) -declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i32, i1) +declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i1) attributes #0 = { ssp } attributes #1 = { sspstrong } diff --git a/llvm/test/CodeGen/X86/tail-dup-merge-loop-headers.ll b/llvm/test/CodeGen/X86/tail-dup-merge-loop-headers.ll index 197fd72586a..736a6d8500d 100644 --- a/llvm/test/CodeGen/X86/tail-dup-merge-loop-headers.ll +++ b/llvm/test/CodeGen/X86/tail-dup-merge-loop-headers.ll @@ -97,7 +97,7 @@ if.end19: ; preds = %entry br i1 %or.cond203, label %cleanup, label %if.end50 if.end50: ; preds = %if.end19 - tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* nonnull %call, i8* undef, i64 %conv, i32 1, i1 false) + tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* nonnull %call, i8* undef, i64 %conv, i1 false) %cmp1.i.i = icmp ugt i32 %mul, 3 br i1 %cmp1.i.i, label %shared_preheader, label %wunpsect.exit.thread.loopexit391 @@ -185,6 +185,6 @@ declare void @cli_dbgmsg(i8*, ...) local_unnamed_addr #0 declare i8* @cli_calloc(i64, i64) local_unnamed_addr #0 ; Function Attrs: argmemonly nounwind -declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i32, i1) #1 +declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i1) #1 attributes #0 = { nounwind } attributes #1 = { argmemonly nounwind } diff --git a/llvm/test/CodeGen/X86/tailcall-mem-intrinsics.ll b/llvm/test/CodeGen/X86/tailcall-mem-intrinsics.ll index 7491ea659ba..ee3489701df 100644 --- a/llvm/test/CodeGen/X86/tailcall-mem-intrinsics.ll +++ b/llvm/test/CodeGen/X86/tailcall-mem-intrinsics.ll @@ -4,7 +4,7 @@ ; CHECK: jmp memcpy define void @tail_memcpy(i8* nocapture %p, i8* nocapture readonly %q, i32 %n) #0 { entry: - tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %p, i8* %q, i32 %n, i32 1, i1 false) + tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %p, i8* %q, i32 %n, i1 false) ret void } @@ -12,7 +12,7 @@ entry: ; CHECK: jmp memmove define void @tail_memmove(i8* nocapture %p, i8* nocapture readonly %q, i32 %n) #0 { entry: - tail call void @llvm.memmove.p0i8.p0i8.i32(i8* %p, i8* %q, i32 %n, i32 1, i1 false) + tail call void @llvm.memmove.p0i8.p0i8.i32(i8* %p, i8* %q, i32 %n, i1 false) ret void } @@ -20,7 +20,7 @@ entry: ; CHECK: jmp memset define void @tail_memset(i8* nocapture %p, i8 %c, i32 %n) #0 { entry: - tail call void @llvm.memset.p0i8.i32(i8* %p, i8 %c, i32 %n, i32 1, i1 false) + tail call void @llvm.memset.p0i8.i32(i8* %p, i8 %c, i32 %n, i1 false) ret void } @@ -28,7 +28,7 @@ entry: ; CHECK: jmp memcpy define i8* @tail_memcpy_ret(i8* nocapture %p, i8* nocapture readonly %q, i32 %n) #0 { entry: - tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %p, i8* %q, i32 %n, i32 1, i1 false) + tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %p, i8* %q, i32 %n, i1 false) ret i8* %p } @@ -36,7 +36,7 @@ entry: ; CHECK: jmp memmove define i8* @tail_memmove_ret(i8* nocapture %p, i8* nocapture readonly %q, i32 %n) #0 { entry: - tail call void @llvm.memmove.p0i8.p0i8.i32(i8* %p, i8* %q, i32 %n, i32 1, i1 false) + tail call void @llvm.memmove.p0i8.p0i8.i32(i8* %p, i8* %q, i32 %n, i1 false) ret i8* %p } @@ -44,12 +44,12 @@ entry: ; CHECK: jmp memset define i8* @tail_memset_ret(i8* nocapture %p, i8 %c, i32 %n) #0 { entry: - tail call void @llvm.memset.p0i8.i32(i8* %p, i8 %c, i32 %n, i32 1, i1 false) + tail call void @llvm.memset.p0i8.i32(i8* %p, i8 %c, i32 %n, i1 false) ret i8* %p } -declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i32, i1) #0 -declare void @llvm.memmove.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i32, i1) #0 -declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i32, i1) #0 +declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i1) #0 +declare void @llvm.memmove.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i1) #0 +declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i1) #0 attributes #0 = { nounwind } diff --git a/llvm/test/CodeGen/X86/tlv-1.ll b/llvm/test/CodeGen/X86/tlv-1.ll index 5f017d31dbb..0dbd00c55eb 100644 --- a/llvm/test/CodeGen/X86/tlv-1.ll +++ b/llvm/test/CodeGen/X86/tlv-1.ll @@ -7,7 +7,7 @@ define void @main() nounwind ssp { ; CHECK-LABEL: main: entry: - call void @llvm.memset.p0i8.i64(i8* getelementptr inbounds (%struct.A, %struct.A* @c, i32 0, i32 0, i32 0), i8 0, i64 60, i32 1, i1 false) + call void @llvm.memset.p0i8.i64(i8* getelementptr inbounds (%struct.A, %struct.A* @c, i32 0, i32 0, i32 0), i8 0, i64 60, i1 false) unreachable ; CHECK: movq _c@TLVP(%rip), %rdi ; CHECK-NEXT: callq *(%rdi) @@ -31,7 +31,7 @@ entry: ret i32 %sub } -declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) nounwind +declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) nounwind @a = thread_local global i32 0 ; <i32*> [#uses=0] @b = thread_local global i32 0 ; <i32*> [#uses=0] diff --git a/llvm/test/CodeGen/X86/unaligned-load.ll b/llvm/test/CodeGen/X86/unaligned-load.ll index 644a3644730..1ceca2cd06d 100644 --- a/llvm/test/CodeGen/X86/unaligned-load.ll +++ b/llvm/test/CodeGen/X86/unaligned-load.ll @@ -12,14 +12,14 @@ entry: bb: ; preds = %bb, %entry %String2Loc9 = getelementptr inbounds [31 x i8], [31 x i8]* %String2Loc, i64 0, i64 0 - call void @llvm.memcpy.p0i8.p0i8.i64(i8* %String2Loc9, i8* getelementptr inbounds ([31 x i8], [31 x i8]* @.str3, i64 0, i64 0), i64 31, i32 1, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* %String2Loc9, i8* getelementptr inbounds ([31 x i8], [31 x i8]* @.str3, i64 0, i64 0), i64 31, i1 false) br label %bb return: ; No predecessors! ret void } -declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) nounwind +declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) nounwind ; I386: calll {{_?}}memcpy diff --git a/llvm/test/CodeGen/X86/unused_stackslots.ll b/llvm/test/CodeGen/X86/unused_stackslots.ll index 82fd3db1ccb..dca01275ca7 100644 --- a/llvm/test/CodeGen/X86/unused_stackslots.ll +++ b/llvm/test/CodeGen/X86/unused_stackslots.ll @@ -202,14 +202,14 @@ land.lhs.true54: ; preds = %for.end50 br i1 %tobool56, label %for.inc73, label %for.body61.preheader for.body61.preheader: ; preds = %land.lhs.true54 - call void @llvm.memcpy.p0i8.p0i8.i64(i8* nonnull bitcast (i32* getelementptr ([4 x i32], [4 x i32]* @e, i64 4, i64 0) to i8*), i8* %tmp1, i64 32, i32 16, i1 false) - call void @llvm.memcpy.p0i8.p0i8.i64(i8* nonnull bitcast (i32* getelementptr ([4 x i32], [4 x i32]* @e, i64 6, i64 0) to i8*), i8* %tmp2, i64 32, i32 16, i1 false) - call void @llvm.memcpy.p0i8.p0i8.i64(i8* nonnull bitcast (i32* getelementptr ([4 x i32], [4 x i32]* @e, i64 8, i64 0) to i8*), i8* %tmp3, i64 32, i32 16, i1 false) - call void @llvm.memcpy.p0i8.p0i8.i64(i8* nonnull bitcast (i32* getelementptr ([4 x i32], [4 x i32]* @e, i64 10, i64 0) to i8*), i8* %tmp4, i64 32, i32 16, i1 false) - call void @llvm.memcpy.p0i8.p0i8.i64(i8* nonnull bitcast (i32* getelementptr ([4 x i32], [4 x i32]* @e, i64 12, i64 0) to i8*), i8* %tmp5, i64 32, i32 16, i1 false) - call void @llvm.memcpy.p0i8.p0i8.i64(i8* nonnull bitcast (i32* getelementptr ([4 x i32], [4 x i32]* @e, i64 14, i64 0) to i8*), i8* %tmp6, i64 32, i32 16, i1 false) - call void @llvm.memcpy.p0i8.p0i8.i64(i8* nonnull bitcast (i32* getelementptr ([4 x i32], [4 x i32]* @e, i64 16, i64 0) to i8*), i8* %tmp7, i64 32, i32 16, i1 false) - call void @llvm.memcpy.p0i8.p0i8.i64(i8* nonnull bitcast (i32* getelementptr ([4 x i32], [4 x i32]* @e, i64 18, i64 0) to i8*), i8* %tmp8, i64 32, i32 16, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 nonnull bitcast (i32* getelementptr ([4 x i32], [4 x i32]* @e, i64 4, i64 0) to i8*), i8* align 16 %tmp1, i64 32, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 nonnull bitcast (i32* getelementptr ([4 x i32], [4 x i32]* @e, i64 6, i64 0) to i8*), i8* align 16 %tmp2, i64 32, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 nonnull bitcast (i32* getelementptr ([4 x i32], [4 x i32]* @e, i64 8, i64 0) to i8*), i8* align 16 %tmp3, i64 32, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 nonnull bitcast (i32* getelementptr ([4 x i32], [4 x i32]* @e, i64 10, i64 0) to i8*), i8* align 16 %tmp4, i64 32, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 nonnull bitcast (i32* getelementptr ([4 x i32], [4 x i32]* @e, i64 12, i64 0) to i8*), i8* align 16 %tmp5, i64 32, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 nonnull bitcast (i32* getelementptr ([4 x i32], [4 x i32]* @e, i64 14, i64 0) to i8*), i8* align 16 %tmp6, i64 32, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 nonnull bitcast (i32* getelementptr ([4 x i32], [4 x i32]* @e, i64 16, i64 0) to i8*), i8* align 16 %tmp7, i64 32, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 nonnull bitcast (i32* getelementptr ([4 x i32], [4 x i32]* @e, i64 18, i64 0) to i8*), i8* align 16 %tmp8, i64 32, i1 false) %call70 = tail call i32 @distortion4x4(i32* nonnull getelementptr inbounds ([4 x i32], [4 x i32]* @e, i64 0, i64 0)) #3 %add71 = add nsw i32 %call70, %m.3.lcssa.lcssa br label %for.inc73 @@ -234,7 +234,7 @@ declare void @LumaPrediction4x4(i32, i32, i32, i32, i32, i16 signext, i16 signex declare i32 @distortion4x4(i32*) #2 ; Function Attrs: argmemonly nounwind -declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i32, i1) #1 +declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i1) #1 ; Function Attrs: argmemonly nounwind declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #1 diff --git a/llvm/test/CodeGen/X86/unwindraise.ll b/llvm/test/CodeGen/X86/unwindraise.ll index db39f4ed455..2da07fb1753 100644 --- a/llvm/test/CodeGen/X86/unwindraise.ll +++ b/llvm/test/CodeGen/X86/unwindraise.ll @@ -34,7 +34,7 @@ entry: call fastcc void @uw_init_context_1(%struct._Unwind_Context* %this_context, i8* %0, i8* %1) %2 = bitcast %struct._Unwind_Context* %cur_context to i8* %3 = bitcast %struct._Unwind_Context* %this_context to i8* - call void @llvm.memcpy.p0i8.p0i8.i64(i8* %2, i8* %3, i64 240, i32 8, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 %2, i8* align 8 %3, i64 240, i1 false) %personality = getelementptr inbounds %struct._Unwind_FrameState, %struct._Unwind_FrameState* %fs, i64 0, i32 6 %retaddr_column.i = getelementptr inbounds %struct._Unwind_FrameState, %struct._Unwind_FrameState* %fs, i64 0, i32 9 %flags.i.i.i.i = getelementptr inbounds %struct._Unwind_Context, %struct._Unwind_Context* %cur_context, i64 0, i32 5 @@ -121,7 +121,7 @@ while.end: ; preds = %if.then4 %16 = ptrtoint i8* %15 to i64 %private_2 = getelementptr inbounds %struct._Unwind_Exception, %struct._Unwind_Exception* %exc, i64 0, i32 3 store i64 %16, i64* %private_2, align 8 - call void @llvm.memcpy.p0i8.p0i8.i64(i8* %2, i8* %3, i64 240, i32 8, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 %2, i8* align 8 %3, i64 240, i1 false) %17 = bitcast %struct._Unwind_FrameState* %fs.i to i8* call void @llvm.lifetime.start.p0i8(i64 -1, i8* %17) %personality.i = getelementptr inbounds %struct._Unwind_FrameState, %struct._Unwind_FrameState* %fs.i, i64 0, i32 6 @@ -234,7 +234,7 @@ declare i8* @llvm.eh.dwarf.cfa(i32) nounwind declare i8* @llvm.returnaddress(i32) nounwind readnone -declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) nounwind +declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) nounwind declare fastcc i64 @uw_install_context_1(%struct._Unwind_Context*, %struct._Unwind_Context*) uwtable diff --git a/llvm/test/CodeGen/X86/variable-sized-darwin-bzero.ll b/llvm/test/CodeGen/X86/variable-sized-darwin-bzero.ll index 3971190f02c..55c5dd39d12 100644 --- a/llvm/test/CodeGen/X86/variable-sized-darwin-bzero.ll +++ b/llvm/test/CodeGen/X86/variable-sized-darwin-bzero.ll @@ -1,8 +1,8 @@ ; RUN: llc < %s -mtriple=i686-apple-darwin10 | grep __bzero define void @foo(i8* %p, i64 %n) { - call void @llvm.memset.p0i8.i64(i8* %p, i8 0, i64 %n, i32 4, i1 false) + call void @llvm.memset.p0i8.i64(i8* align 4 %p, i8 0, i64 %n, i1 false) ret void } -declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) nounwind +declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) nounwind diff --git a/llvm/test/CodeGen/X86/vectorcall.ll b/llvm/test/CodeGen/X86/vectorcall.ll index 598a339ee2f..9914780e04c 100644 --- a/llvm/test/CodeGen/X86/vectorcall.ll +++ b/llvm/test/CodeGen/X86/vectorcall.ll @@ -157,7 +157,7 @@ entry: %retval = alloca %struct.HVA4, align 16 %0 = bitcast %struct.HVA4* %retval to i8* %1 = bitcast %struct.HVA4* %b to i8* - call void @llvm.memcpy.p0i8.p0i8.i32(i8* %0, i8* %1, i32 64, i32 16, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 16 %0, i8* align 16 %1, i32 64, i1 false) %2 = load %struct.HVA4, %struct.HVA4* %retval, align 16 ret %struct.HVA4 %2 } @@ -168,18 +168,18 @@ entry: ; CHECK: movaps 48(%{{[re]}}sp), %xmm3 ; CHECK: ret{{[ql]}} -declare void @llvm.memset.p0i8.i64(i8* nocapture writeonly, i8, i64, i32, i1) -declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i32, i1) -declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture writeonly, i8* nocapture readonly, i32, i32, i1) +declare void @llvm.memset.p0i8.i64(i8* nocapture writeonly, i8, i64, i1) +declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i1) +declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture writeonly, i8* nocapture readonly, i32, i1) define x86_vectorcallcc void @test_mixed_7(%struct.HVA5* noalias sret %agg.result) { entry: %a = alloca %struct.HVA5, align 16 %0 = bitcast %struct.HVA5* %a to i8* - call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 80, i32 16, i1 false) + call void @llvm.memset.p0i8.i64(i8* align 16 %0, i8 0, i64 80, i1 false) %1 = bitcast %struct.HVA5* %agg.result to i8* %2 = bitcast %struct.HVA5* %a to i8* - call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* %2, i64 80, i32 16, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 %1, i8* align 16 %2, i64 80, i1 false) ret void } ; CHECK-LABEL: test_mixed_7 diff --git a/llvm/test/CodeGen/X86/x86-64-static-relo-movl.ll b/llvm/test/CodeGen/X86/x86-64-static-relo-movl.ll index 5da3a470503..658187e2204 100644 --- a/llvm/test/CodeGen/X86/x86-64-static-relo-movl.ll +++ b/llvm/test/CodeGen/X86/x86-64-static-relo-movl.ll @@ -10,7 +10,7 @@ define void @setup() { %pending = alloca %struct.MatchInfo, align 8 %t = bitcast %struct.MatchInfo* %pending to i8* - call void @llvm.memcpy.p0i8.p0i8.i64(i8* %t, i8* bitcast (%struct.MatchInfo* @NO_MATCH to i8*), i64 512, i32 8, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 %t, i8* align 8 bitcast (%struct.MatchInfo* @NO_MATCH to i8*), i64 512, i1 false) %u = getelementptr inbounds %struct.MatchInfo, %struct.MatchInfo* %pending, i32 0, i32 2 %v = load i64, i64* %u, align 8 br label %done @@ -21,4 +21,4 @@ done: } ; Function Attrs: nounwind -declare void @llvm.memcpy.p0i8.p0i8.i64(i8*, i8*, i64, i32, i1) +declare void @llvm.memcpy.p0i8.p0i8.i64(i8*, i8*, i64, i1) diff --git a/llvm/test/CodeGen/X86/x86-repmov-copy-eflags.ll b/llvm/test/CodeGen/X86/x86-repmov-copy-eflags.ll index ad398885728..49afb39b6d4 100644 --- a/llvm/test/CodeGen/X86/x86-repmov-copy-eflags.ll +++ b/llvm/test/CodeGen/X86/x86-repmov-copy-eflags.ll @@ -10,7 +10,7 @@ entry: %g = alloca %struct.T, align 8 %r = alloca i32, align 8 store i32 0, i32* %r, align 4 - call void @llvm.memcpy.p0i8.p0i8.i32(i8* %p, i8* %q, i32 24, i32 8, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 %p, i8* align 8 %q, i32 24, i1 false) br label %while.body while.body: ; preds = %while.body, %entry @@ -26,7 +26,7 @@ while.end: ; preds = %while.body } ; Function Attrs: argmemonly nounwind -declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i32, i1) #1 +declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i1) #1 declare void @g(%struct.T*) diff --git a/llvm/test/CodeGen/XCore/memcpy.ll b/llvm/test/CodeGen/XCore/memcpy.ll index fe424c50cb2..c747374ed51 100644 --- a/llvm/test/CodeGen/XCore/memcpy.ll +++ b/llvm/test/CodeGen/XCore/memcpy.ll @@ -6,7 +6,7 @@ define void @f1(i8* %dst, i8* %src, i32 %n) nounwind { ; CHECK: bl __memcpy_4 entry: %0 = shl i32 %n, 2 - call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dst, i8* %src, i32 %0, i32 4, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %dst, i8* align 4 %src, i32 %0, i1 false) ret void } @@ -15,7 +15,7 @@ define void @f2(i8* %dst, i8* %src, i32 %n) nounwind { ; CHECK-LABEL: f2: ; CHECK: bl memcpy entry: - call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dst, i8* %src, i32 %n, i32 4, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %dst, i8* align 4 %src, i32 %n, i1 false) ret void } @@ -25,8 +25,8 @@ define void @f3(i8* %dst, i8* %src, i32 %n) nounwind { ; CHECK: bl memcpy entry: %0 = shl i32 %n, 2 - call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dst, i8* %src, i32 %0, i32 2, i1 false) + call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 2 %dst, i8* align 2 %src, i32 %0, i1 false) ret void } -declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i32, i1) nounwind +declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i1) nounwind |