summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/X86
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test/CodeGen/X86')
-rw-r--r--llvm/test/CodeGen/X86/2007-10-15-CoalescerCrash.ll2
-rw-r--r--llvm/test/CodeGen/X86/2009-01-25-NoSSE.ll4
-rw-r--r--llvm/test/CodeGen/X86/2009-11-16-UnfoldMemOpBug.ll4
-rw-r--r--llvm/test/CodeGen/X86/2010-04-08-CoalescerBug.ll4
-rw-r--r--llvm/test/CodeGen/X86/2010-04-21-CoalescerBug.ll4
-rw-r--r--llvm/test/CodeGen/X86/2010-06-25-CoalescerSubRegDefDead.ll4
-rw-r--r--llvm/test/CodeGen/X86/2010-09-17-SideEffectsInChain.ll4
-rw-r--r--llvm/test/CodeGen/X86/2012-01-10-UndefExceptionEdge.ll8
-rw-r--r--llvm/test/CodeGen/X86/alignment-2.ll4
-rw-r--r--llvm/test/CodeGen/X86/bug26810.ll4
-rw-r--r--llvm/test/CodeGen/X86/darwin-bzero.ll4
-rw-r--r--llvm/test/CodeGen/X86/fast-isel-call.ll8
-rw-r--r--llvm/test/CodeGen/X86/fast-isel-deadcode.ll6
-rw-r--r--llvm/test/CodeGen/X86/fast-isel-x86-64.ll4
-rw-r--r--llvm/test/CodeGen/X86/force-align-stack-alloca.ll4
-rw-r--r--llvm/test/CodeGen/X86/immediate_merging.ll4
-rw-r--r--llvm/test/CodeGen/X86/immediate_merging64.ll4
-rw-r--r--llvm/test/CodeGen/X86/lea-opt-memop-check-1.ll4
-rw-r--r--llvm/test/CodeGen/X86/load-slice.ll2
-rw-r--r--llvm/test/CodeGen/X86/lsr-normalization.ll4
-rw-r--r--llvm/test/CodeGen/X86/mcu-abi.ll4
-rw-r--r--llvm/test/CodeGen/X86/mem-intrin-base-reg.ll10
-rw-r--r--llvm/test/CodeGen/X86/memcpy-2.ll10
-rw-r--r--llvm/test/CodeGen/X86/memcpy-from-string.ll4
-rw-r--r--llvm/test/CodeGen/X86/memcpy.ll24
-rw-r--r--llvm/test/CodeGen/X86/memset-2.ll10
-rw-r--r--llvm/test/CodeGen/X86/memset-3.ll4
-rw-r--r--llvm/test/CodeGen/X86/memset-nonzero.ll12
-rw-r--r--llvm/test/CodeGen/X86/memset-sse-stack-realignment.ll6
-rw-r--r--llvm/test/CodeGen/X86/memset.ll6
-rw-r--r--llvm/test/CodeGen/X86/memset64-on-x86-32.ll4
-rw-r--r--llvm/test/CodeGen/X86/misaligned-memset.ll4
-rw-r--r--llvm/test/CodeGen/X86/misched-new.ll4
-rw-r--r--llvm/test/CodeGen/X86/negate-add-zero.ll2
-rw-r--r--llvm/test/CodeGen/X86/optimize-max-0.ll20
-rw-r--r--llvm/test/CodeGen/X86/pr11985.ll4
-rw-r--r--llvm/test/CodeGen/X86/pr14333.ll4
-rw-r--r--llvm/test/CodeGen/X86/pr34088.ll6
-rw-r--r--llvm/test/CodeGen/X86/ragreedy-hoist-spill.ll4
-rw-r--r--llvm/test/CodeGen/X86/regparm.ll6
-rw-r--r--llvm/test/CodeGen/X86/remat-fold-load.ll6
-rw-r--r--llvm/test/CodeGen/X86/slow-unaligned-mem.ll4
-rw-r--r--llvm/test/CodeGen/X86/small-byval-memcpy.ll4
-rw-r--r--llvm/test/CodeGen/X86/stack-align.ll4
-rw-r--r--llvm/test/CodeGen/X86/stack-protector.ll6
-rw-r--r--llvm/test/CodeGen/X86/tail-dup-merge-loop-headers.ll4
-rw-r--r--llvm/test/CodeGen/X86/tailcall-mem-intrinsics.ll18
-rw-r--r--llvm/test/CodeGen/X86/tlv-1.ll4
-rw-r--r--llvm/test/CodeGen/X86/unaligned-load.ll4
-rw-r--r--llvm/test/CodeGen/X86/unused_stackslots.ll18
-rw-r--r--llvm/test/CodeGen/X86/unwindraise.ll6
-rw-r--r--llvm/test/CodeGen/X86/variable-sized-darwin-bzero.ll4
-rw-r--r--llvm/test/CodeGen/X86/vectorcall.ll12
-rw-r--r--llvm/test/CodeGen/X86/x86-64-static-relo-movl.ll4
-rw-r--r--llvm/test/CodeGen/X86/x86-repmov-copy-eflags.ll4
55 files changed, 168 insertions, 168 deletions
diff --git a/llvm/test/CodeGen/X86/2007-10-15-CoalescerCrash.ll b/llvm/test/CodeGen/X86/2007-10-15-CoalescerCrash.ll
index 65e5ed76213..5cdf4dec3c5 100644
--- a/llvm/test/CodeGen/X86/2007-10-15-CoalescerCrash.ll
+++ b/llvm/test/CodeGen/X86/2007-10-15-CoalescerCrash.ll
@@ -224,7 +224,7 @@ declare void @fancy_abort(i8*, i32, i8*)
declare i8* @pool_alloc(%struct.alloc_pool_def*)
-declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1)
+declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1)
declare void @link_block(%struct.basic_block_def*, %struct.basic_block_def*)
diff --git a/llvm/test/CodeGen/X86/2009-01-25-NoSSE.ll b/llvm/test/CodeGen/X86/2009-01-25-NoSSE.ll
index 58bce75fc73..6d390b71114 100644
--- a/llvm/test/CodeGen/X86/2009-01-25-NoSSE.ll
+++ b/llvm/test/CodeGen/X86/2009-01-25-NoSSE.ll
@@ -12,11 +12,11 @@ define void @foo() nounwind {
entry:
%termios = alloca %struct.ktermios, align 8
%termios1 = bitcast %struct.ktermios* %termios to i8*
- call void @llvm.memset.p0i8.i64(i8* %termios1, i8 0, i64 44, i32 8, i1 false)
+ call void @llvm.memset.p0i8.i64(i8* align 8 %termios1, i8 0, i64 44, i1 false)
call void @bar(%struct.ktermios* %termios) nounwind
ret void
}
declare void @bar(%struct.ktermios*)
-declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) nounwind
+declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) nounwind
diff --git a/llvm/test/CodeGen/X86/2009-11-16-UnfoldMemOpBug.ll b/llvm/test/CodeGen/X86/2009-11-16-UnfoldMemOpBug.ll
index 1cfd108db65..b58ee5be82e 100644
--- a/llvm/test/CodeGen/X86/2009-11-16-UnfoldMemOpBug.ll
+++ b/llvm/test/CodeGen/X86/2009-11-16-UnfoldMemOpBug.ll
@@ -17,7 +17,7 @@ bb1:
; CHECK: movups %xmm0, 12(%rsp)
; CHECK: movaps %xmm1, (%rsp)
%tmp2 = phi i32 [ %tmp3, %bb1 ], [ 0, %entry ]
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* %tmp1, i8* getelementptr inbounds ([28 x i8], [28 x i8]* @str, i64 0, i64 0), i64 28, i32 1, i1 false)
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %tmp1, i8* getelementptr inbounds ([28 x i8], [28 x i8]* @str, i64 0, i64 0), i64 28, i1 false)
%tmp3 = add i32 %tmp2, 1
%tmp4 = icmp eq i32 %tmp3, %count
br i1 %tmp4, label %bb2, label %bb1
@@ -26,4 +26,4 @@ bb2:
ret void
}
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) nounwind
+declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) nounwind
diff --git a/llvm/test/CodeGen/X86/2010-04-08-CoalescerBug.ll b/llvm/test/CodeGen/X86/2010-04-08-CoalescerBug.ll
index c3dfbfc15ec..6c9c743eed7 100644
--- a/llvm/test/CodeGen/X86/2010-04-08-CoalescerBug.ll
+++ b/llvm/test/CodeGen/X86/2010-04-08-CoalescerBug.ll
@@ -19,8 +19,8 @@ entry:
%tmp4 = getelementptr inbounds %struct.FC, %struct.FC* %tmp3, i64 0, i32 1, i64 0
%tmp5 = bitcast [32 x i32]* %BitValueArray to i8*
%tmp6 = bitcast i32* %tmp4 to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* %tmp5, i8* %tmp6, i64 128, i32 4, i1 false)
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %tmp5, i8* align 4 %tmp6, i64 128, i1 false)
unreachable
}
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) nounwind
+declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) nounwind
diff --git a/llvm/test/CodeGen/X86/2010-04-21-CoalescerBug.ll b/llvm/test/CodeGen/X86/2010-04-21-CoalescerBug.ll
index d5987645cfc..46dedb48ff1 100644
--- a/llvm/test/CodeGen/X86/2010-04-21-CoalescerBug.ll
+++ b/llvm/test/CodeGen/X86/2010-04-21-CoalescerBug.ll
@@ -8,8 +8,8 @@
define void @t(%struct.CMTimeMapping* noalias nocapture sret %agg.result) nounwind optsize ssp {
entry:
%agg.result1 = bitcast %struct.CMTimeMapping* %agg.result to i8* ; <i8*> [#uses=1]
- tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %agg.result1, i8* null, i64 96, i32 4, i1 false)
+ tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %agg.result1, i8* align 4 null, i64 96, i1 false)
ret void
}
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) nounwind
+declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) nounwind
diff --git a/llvm/test/CodeGen/X86/2010-06-25-CoalescerSubRegDefDead.ll b/llvm/test/CodeGen/X86/2010-06-25-CoalescerSubRegDefDead.ll
index ffb51572a30..dd7c3fa571c 100644
--- a/llvm/test/CodeGen/X86/2010-06-25-CoalescerSubRegDefDead.ll
+++ b/llvm/test/CodeGen/X86/2010-06-25-CoalescerSubRegDefDead.ll
@@ -26,7 +26,7 @@ bb:
; CHECK: rep;stosl
%tmp5 = bitcast i32* %tmp4 to i8*
- call void @llvm.memset.p0i8.i64(i8* %tmp5, i8 0, i64 84, i32 4, i1 false)
+ call void @llvm.memset.p0i8.i64(i8* align 4 %tmp5, i8 0, i64 84, i1 false)
%tmp6 = getelementptr inbounds %struct.type, %struct.type* %s, i32 0, i32 62
store i32* null, i32** %tmp6, align 8
br label %bb1
@@ -36,4 +36,4 @@ bb1:
ret i32 42
}
-declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) nounwind
+declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) nounwind
diff --git a/llvm/test/CodeGen/X86/2010-09-17-SideEffectsInChain.ll b/llvm/test/CodeGen/X86/2010-09-17-SideEffectsInChain.ll
index 9e33d2bf6ac..3a5942513e8 100644
--- a/llvm/test/CodeGen/X86/2010-09-17-SideEffectsInChain.ll
+++ b/llvm/test/CodeGen/X86/2010-09-17-SideEffectsInChain.ll
@@ -2,7 +2,7 @@
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
target triple = "x86_64-apple-darwin10.4"
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) nounwind
+declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) nounwind
define fastcc i32 @cli_magic_scandesc(i8* %in) nounwind ssp {
entry:
@@ -12,7 +12,7 @@ entry:
%d = load i8, i8* %b, align 8
%e = load i8, i8* %c, align 8
%f = bitcast [64 x i8]* %a to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* %f, i8* %in, i64 64, i32 8, i1 false) nounwind
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 %f, i8* align 8 %in, i64 64, i1 false) nounwind
store i8 %d, i8* %b, align 8
store i8 %e, i8* %c, align 8
ret i32 0
diff --git a/llvm/test/CodeGen/X86/2012-01-10-UndefExceptionEdge.ll b/llvm/test/CodeGen/X86/2012-01-10-UndefExceptionEdge.ll
index 20615afdfa1..97a33893fa0 100644
--- a/llvm/test/CodeGen/X86/2012-01-10-UndefExceptionEdge.ll
+++ b/llvm/test/CodeGen/X86/2012-01-10-UndefExceptionEdge.ll
@@ -14,7 +14,7 @@ target triple = "i386-apple-macosx10.7"
@Exception = external unnamed_addr constant { i8*, i8* }
-declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i32, i1) nounwind
+declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i1) nounwind
define void @f(i32* nocapture %arg, i32* nocapture %arg1, i32* nocapture %arg2, i32* nocapture %arg3, i32 %arg4, i32 %arg5) optsize ssp personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
bb:
@@ -85,7 +85,7 @@ bb41: ; preds = %bb38
to label %bb42 unwind label %bb20
bb42: ; preds = %bb41
- tail call void @llvm.memset.p0i8.i32(i8* %tmp32, i8 0, i32 %tmp9, i32 1, i1 false) nounwind
+ tail call void @llvm.memset.p0i8.i32(i8* %tmp32, i8 0, i32 %tmp9, i1 false) nounwind
br i1 %tmp35, label %bb43, label %bb45
bb43: ; preds = %bb42
@@ -101,7 +101,7 @@ bb45: ; preds = %bb57, %bb42
br i1 %tmp47, label %bb48, label %bb59
bb48: ; preds = %bb45
- tail call void @llvm.memset.p0i8.i32(i8* %tmp32, i8 0, i32 %tmp9, i32 1, i1 false) nounwind
+ tail call void @llvm.memset.p0i8.i32(i8* %tmp32, i8 0, i32 %tmp9, i1 false) nounwind
br i1 %tmp36, label %bb49, label %bb57
bb49: ; preds = %bb49, %bb48
@@ -120,7 +120,7 @@ bb57: ; preds = %bb49, %bb48
bb59: ; preds = %bb45
%tmp60 = ashr i32 %tmp46, 31
- tail call void @llvm.memset.p0i8.i32(i8* null, i8 0, i32 %tmp37, i32 1, i1 false) nounwind
+ tail call void @llvm.memset.p0i8.i32(i8* null, i8 0, i32 %tmp37, i1 false) nounwind
br i1 %tmp36, label %bb61, label %bb67
bb61: ; preds = %bb61, %bb59
diff --git a/llvm/test/CodeGen/X86/alignment-2.ll b/llvm/test/CodeGen/X86/alignment-2.ll
index a38a3626702..b191b986232 100644
--- a/llvm/test/CodeGen/X86/alignment-2.ll
+++ b/llvm/test/CodeGen/X86/alignment-2.ll
@@ -23,8 +23,8 @@ bb:
; CHECK-NOT: movaps {{[0-9]*}}(%{{[a-z]*}}), {{%xmm[0-9]}}
%myopt = alloca %struct.printQueryOpt, align 4
%tmp = bitcast %struct.printQueryOpt* %myopt to i8*
- call void @llvm.memcpy.p0i8.p0i8.i32(i8* %tmp, i8* bitcast (%struct.printQueryOpt* getelementptr inbounds (%struct._psqlSettings, %struct._psqlSettings* @pset, i32 0, i32 4) to i8*), i32 76, i32 4, i1 false)
+ call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %tmp, i8* align 4 bitcast (%struct.printQueryOpt* getelementptr inbounds (%struct._psqlSettings, %struct._psqlSettings* @pset, i32 0, i32 4) to i8*), i32 76, i1 false)
ret i8 0
}
-declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i32, i1) nounwind
+declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i1) nounwind
diff --git a/llvm/test/CodeGen/X86/bug26810.ll b/llvm/test/CodeGen/X86/bug26810.ll
index 816bc8224d8..263008131e7 100644
--- a/llvm/test/CodeGen/X86/bug26810.ll
+++ b/llvm/test/CodeGen/X86/bug26810.ll
@@ -114,7 +114,7 @@ loop.exit: ; preds = %for.body.i
define void @init() local_unnamed_addr #1 {
entry:
- call void @llvm.memset.p0i8.i32(i8* bitcast ([8 x <2 x double>]* @"\01?v@@3PAU__m128d@@A" to i8*), i8 0, i32 128, i32 16, i1 false)
+ call void @llvm.memset.p0i8.i32(i8* align 16 bitcast ([8 x <2 x double>]* @"\01?v@@3PAU__m128d@@A" to i8*), i8 0, i32 128, i1 false)
%call.i = tail call i64 @_time64(i64* null)
%conv = trunc i64 %call.i to i32
tail call void @srand(i32 %conv)
@@ -284,7 +284,7 @@ declare i32 @fclose(%struct._iobuf* nocapture) local_unnamed_addr #5
declare i64 @_time64(i64*) local_unnamed_addr #4
; Function Attrs: argmemonly nounwind
-declare void @llvm.memset.p0i8.i32(i8* nocapture writeonly, i8, i32, i32, i1) #6
+declare void @llvm.memset.p0i8.i32(i8* nocapture writeonly, i8, i32, i1) #6
attributes #0 = { norecurse "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="pentium4" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
attributes #1 = { "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="pentium4" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
diff --git a/llvm/test/CodeGen/X86/darwin-bzero.ll b/llvm/test/CodeGen/X86/darwin-bzero.ll
index 3d03ec677a0..60032665255 100644
--- a/llvm/test/CodeGen/X86/darwin-bzero.ll
+++ b/llvm/test/CodeGen/X86/darwin-bzero.ll
@@ -3,12 +3,12 @@
; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck -check-prefixes=CHECK,NOBZERO %s
; RUN: llc < %s -mtriple=x86_64-apple-ios10.0-simulator | FileCheck -check-prefixes=CHECK,NOBZERO %s
-declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i32, i1) nounwind
+declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i1) nounwind
; CHECK-LABEL: foo:
; BZERO: {{calll|callq}} ___bzero
; NOBZERO-NOT: bzero
define void @foo(i8* %p, i32 %len) {
- call void @llvm.memset.p0i8.i32(i8* %p, i8 0, i32 %len, i32 1, i1 false)
+ call void @llvm.memset.p0i8.i32(i8* %p, i8 0, i32 %len, i1 false)
ret void
}
diff --git a/llvm/test/CodeGen/X86/fast-isel-call.ll b/llvm/test/CodeGen/X86/fast-isel-call.ll
index 3f394514e2c..2f3f4151424 100644
--- a/llvm/test/CodeGen/X86/fast-isel-call.ll
+++ b/llvm/test/CodeGen/X86/fast-isel-call.ll
@@ -31,10 +31,10 @@ define void @test2(%struct.s* %d) nounwind {
; CHECK: movl %eax, 8(%esp)
}
-declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i32, i1) nounwind
+declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i1) nounwind
define void @test3(i8* %a) {
- call void @llvm.memset.p0i8.i32(i8* %a, i8 0, i32 100, i32 1, i1 false)
+ call void @llvm.memset.p0i8.i32(i8* %a, i8 0, i32 100, i1 false)
ret void
; CHECK-LABEL: test3:
; CHECK: movl {{.*}}, (%esp)
@@ -43,10 +43,10 @@ define void @test3(i8* %a) {
; CHECK: calll {{.*}}memset
}
-declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i32, i1) nounwind
+declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i1) nounwind
define void @test4(i8* %a, i8* %b) {
- call void @llvm.memcpy.p0i8.p0i8.i32(i8* %a, i8* %b, i32 100, i32 1, i1 false)
+ call void @llvm.memcpy.p0i8.p0i8.i32(i8* %a, i8* %b, i32 100, i1 false)
ret void
; CHECK-LABEL: test4:
; CHECK: movl {{.*}}, (%esp)
diff --git a/llvm/test/CodeGen/X86/fast-isel-deadcode.ll b/llvm/test/CodeGen/X86/fast-isel-deadcode.ll
index 5381dc4858a..b7eab1a45c6 100644
--- a/llvm/test/CodeGen/X86/fast-isel-deadcode.ll
+++ b/llvm/test/CodeGen/X86/fast-isel-deadcode.ll
@@ -131,10 +131,10 @@ func.exit: ; preds = %if.then.i, %if.else.i, %if.end.5.i
store { <2 x float>, float } %.fca.1.insert.i, { <2 x float>, float }* %tmp, align 8
%2 = bitcast { <2 x float>, float }* %tmp to i8*
%3 = bitcast %struct.FVector* %ref.tmp to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* %3, i8* %2, i64 12, i32 4, i1 false)
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %3, i8* align 4 %2, i64 12, i1 false)
%4 = bitcast %struct.FVector* %v to i8*
%5 = bitcast %struct.FVector* %ref.tmp to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* %4, i8* %5, i64 12, i32 4, i1 false)
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %4, i8* align 4 %5, i64 12, i1 false)
%6 = bitcast %struct.FVector* %v to i8*
call void @llvm.lifetime.end.p0i8(i64 12, i8* %6) nounwind
ret i32 0
@@ -142,6 +142,6 @@ func.exit: ; preds = %if.then.i, %if.else.i, %if.end.5.i
declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) argmemonly nounwind
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i32, i1) argmemonly nounwind
+declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i1) argmemonly nounwind
declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) argmemonly nounwind
diff --git a/llvm/test/CodeGen/X86/fast-isel-x86-64.ll b/llvm/test/CodeGen/X86/fast-isel-x86-64.ll
index c87353ed1f5..7fb2670e6d1 100644
--- a/llvm/test/CodeGen/X86/fast-isel-x86-64.ll
+++ b/llvm/test/CodeGen/X86/fast-isel-x86-64.ll
@@ -172,11 +172,11 @@ entry:
; CHECK: callq
}
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8*, i8*, i64, i32, i1)
+declare void @llvm.memcpy.p0i8.p0i8.i64(i8*, i8*, i64, i1)
; rdar://9289488 - fast-isel shouldn't bail out on llvm.memcpy
define void @test15(i8* %a, i8* %b) nounwind {
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* %a, i8* %b, i64 4, i32 4, i1 false)
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %a, i8* align 4 %b, i64 4, i1 false)
ret void
; CHECK-LABEL: test15:
; CHECK-NEXT: movl (%rsi), %eax
diff --git a/llvm/test/CodeGen/X86/force-align-stack-alloca.ll b/llvm/test/CodeGen/X86/force-align-stack-alloca.ll
index 8d42680e199..e9f38e9af62 100644
--- a/llvm/test/CodeGen/X86/force-align-stack-alloca.ll
+++ b/llvm/test/CodeGen/X86/force-align-stack-alloca.ll
@@ -67,10 +67,10 @@ entry:
if.then:
%0 = alloca i8, i32 %i
- call void @llvm.memset.p0i8.i32(i8* %0, i8 0, i32 %i, i32 1, i1 false)
+ call void @llvm.memset.p0i8.i32(i8* %0, i8 0, i32 %i, i1 false)
%call = call i32 @f(i8* %0)
%conv = sext i32 %call to i64
ret i64 %conv
}
-declare void @llvm.memset.p0i8.i32(i8*, i8, i32, i32, i1) nounwind
+declare void @llvm.memset.p0i8.i32(i8*, i8, i32, i1) nounwind
diff --git a/llvm/test/CodeGen/X86/immediate_merging.ll b/llvm/test/CodeGen/X86/immediate_merging.ll
index e1c29191498..a6e36c73467 100644
--- a/llvm/test/CodeGen/X86/immediate_merging.ll
+++ b/llvm/test/CodeGen/X86/immediate_merging.ll
@@ -94,7 +94,7 @@ entry:
ret i32 0
}
-declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i32, i1) #1
+declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i1) #1
@AA = common global [100 x i8] zeroinitializer, align 1
@@ -121,6 +121,6 @@ define void @foomemset() optsize {
; X64-NEXT: movq %rax, {{.*}}(%rip)
; X64-NEXT: retq
entry:
- call void @llvm.memset.p0i8.i32(i8* getelementptr inbounds ([100 x i8], [100 x i8]* @AA, i32 0, i32 0), i8 33, i32 24, i32 1, i1 false)
+ call void @llvm.memset.p0i8.i32(i8* getelementptr inbounds ([100 x i8], [100 x i8]* @AA, i32 0, i32 0), i8 33, i32 24, i1 false)
ret void
}
diff --git a/llvm/test/CodeGen/X86/immediate_merging64.ll b/llvm/test/CodeGen/X86/immediate_merging64.ll
index 57f5b3b79d9..12be8bdff83 100644
--- a/llvm/test/CodeGen/X86/immediate_merging64.ll
+++ b/llvm/test/CodeGen/X86/immediate_merging64.ll
@@ -19,7 +19,7 @@ define i1 @imm_multiple_users(i64 %a, i64* %b) optsize {
ret i1 %cmp
}
-declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1)
+declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1)
; Inlined memsets requiring multiple same-sized stores should be lowered using
; the register, rather than immediate, form of stores when optimizing for
@@ -31,6 +31,6 @@ define void @memset_zero(i8* noalias nocapture %D) optsize {
; CHECK-NEXT: movq %rax, 7(%rdi)
; CHECK-NEXT: movq %rax, (%rdi)
; CHECK-NEXT: retq
- tail call void @llvm.memset.p0i8.i64(i8* %D, i8 0, i64 15, i32 1, i1 false)
+ tail call void @llvm.memset.p0i8.i64(i8* %D, i8 0, i64 15, i1 false)
ret void
}
diff --git a/llvm/test/CodeGen/X86/lea-opt-memop-check-1.ll b/llvm/test/CodeGen/X86/lea-opt-memop-check-1.ll
index 6ad55d42868..00d47fae25a 100644
--- a/llvm/test/CodeGen/X86/lea-opt-memop-check-1.ll
+++ b/llvm/test/CodeGen/X86/lea-opt-memop-check-1.ll
@@ -6,7 +6,7 @@
target datalayout = "e-m:x-p:32:32-i64:64-f80:32-n8:16:32-a:0:32-S32"
target triple = "i686-pc-windows-msvc"
-declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i32, i1) argmemonly nounwind
+declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i1) argmemonly nounwind
declare <2 x i64> @_mm_xor_si128(<2 x i64>, <2 x i64>) optsize
declare <2 x i64> @llvm.x86.pclmulqdq(<2 x i64>, <2 x i64>, i8) nounwind readnone
declare <4 x float> @_mm_castsi128_ps(<2 x i64>) optsize
@@ -15,7 +15,7 @@ declare <4 x float> @_mm_castsi128_ps(<2 x i64>) optsize
define void @test1(i8* nocapture readonly %src, i32 %len) #0 {
%parts = alloca [4 x i32], align 4
%part0 = bitcast [4 x i32]* %parts to i8*
- call void @llvm.memcpy.p0i8.p0i8.i32(i8* %part0, i8* %src, i32 %len, i32 1, i1 false)
+ call void @llvm.memcpy.p0i8.p0i8.i32(i8* %part0, i8* %src, i32 %len, i1 false)
%call0 = tail call <2 x i64> @_mm_xor_si128(<2 x i64> undef, <2 x i64> <i64 -9187201950435737472, i64 -9187201950435737472>)
%tmp0 = tail call <2 x i64> @llvm.x86.pclmulqdq(<2 x i64> undef, <2 x i64> <i64 7631803798, i64 5708721108>, i8 16)
%call1 = tail call <4 x float> @_mm_castsi128_ps(<2 x i64> %tmp0)
diff --git a/llvm/test/CodeGen/X86/load-slice.ll b/llvm/test/CodeGen/X86/load-slice.ll
index 8803512eec0..3cbb70bd70d 100644
--- a/llvm/test/CodeGen/X86/load-slice.ll
+++ b/llvm/test/CodeGen/X86/load-slice.ll
@@ -70,7 +70,7 @@ entry:
}
; Function Attrs: nounwind
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i32, i1) #1
+declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i1) #1
; Function Attrs: nounwind
declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture)
diff --git a/llvm/test/CodeGen/X86/lsr-normalization.ll b/llvm/test/CodeGen/X86/lsr-normalization.ll
index a8e3ab1ae99..f56256aa468 100644
--- a/llvm/test/CodeGen/X86/lsr-normalization.ll
+++ b/llvm/test/CodeGen/X86/lsr-normalization.ll
@@ -21,7 +21,7 @@ define i32 @main(i32 %arg, i8** nocapture %arg1) nounwind {
bb:
%tmp = alloca %0, align 8 ; <%0*> [#uses=11]
%tmp2 = bitcast %0* %tmp to i8* ; <i8*> [#uses=1]
- call void @llvm.memset.p0i8.i64(i8* %tmp2, i8 0, i64 16, i32 8, i1 false) nounwind
+ call void @llvm.memset.p0i8.i64(i8* align 8 %tmp2, i8 0, i64 16, i1 false) nounwind
%tmp3 = getelementptr inbounds %0, %0* %tmp, i64 0, i32 0 ; <%0**> [#uses=3]
store %0* %tmp, %0** %tmp3
%tmp4 = getelementptr inbounds %0, %0* %tmp, i64 0, i32 1 ; <%0**> [#uses=1]
@@ -98,7 +98,7 @@ declare void @_ZNSt15_List_node_base4hookEPS_(%0*, %0*)
declare noalias i8* @_Znwm(i64)
-declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) nounwind
+declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) nounwind
declare void @_ZdlPv(i8*) nounwind
diff --git a/llvm/test/CodeGen/X86/mcu-abi.ll b/llvm/test/CodeGen/X86/mcu-abi.ll
index 1cc277c863f..baafea26c22 100644
--- a/llvm/test/CodeGen/X86/mcu-abi.ll
+++ b/llvm/test/CodeGen/X86/mcu-abi.ll
@@ -70,7 +70,7 @@ define void @ret_large_struct(%struct.st12_t* noalias nocapture sret %agg.result
entry:
%0 = bitcast %struct.st12_t* %agg.result to i8*
%1 = bitcast %struct.st12_t* %r to i8*
- call void @llvm.memcpy.p0i8.p0i8.i32(i8* %0, i8* %1, i32 48, i32 1, i1 false)
+ call void @llvm.memcpy.p0i8.p0i8.i32(i8* %0, i8* %1, i32 48, i1 false)
ret void
}
@@ -104,7 +104,7 @@ define i32 @test_fp128(fp128* %ptr) #0 {
ret i32 %ret
}
-declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i32, i1) #1
+declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i1) #1
; CHECK-LABEL: test_alignment_d:
; CHECK-NOT: andl {{.+}}, %esp
diff --git a/llvm/test/CodeGen/X86/mem-intrin-base-reg.ll b/llvm/test/CodeGen/X86/mem-intrin-base-reg.ll
index 59a2207b470..1a45ed57b26 100644
--- a/llvm/test/CodeGen/X86/mem-intrin-base-reg.ll
+++ b/llvm/test/CodeGen/X86/mem-intrin-base-reg.ll
@@ -9,12 +9,12 @@ target triple = "i686-pc-windows-msvc"
; which all of the X86 string instructions use.
declare void @escape_vla_and_icmp(i8*, i1 zeroext)
-declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i32, i1)
-declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i32, i1)
+declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i1)
+declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i1)
define i32 @memcpy_novla_vector(<4 x i32>* %vp0, i8* %a, i8* %b, i32 %n, i1 zeroext %cond) {
%foo = alloca <4 x i32>, align 16
- call void @llvm.memcpy.p0i8.p0i8.i32(i8* %a, i8* %b, i32 128, i32 4, i1 false)
+ call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %a, i8* align 4 %b, i32 128, i1 false)
br i1 %cond, label %spill_vectors, label %no_vectors
no_vectors:
@@ -40,7 +40,7 @@ spill_vectors:
define i32 @memcpy_vla_vector(<4 x i32>* %vp0, i8* %a, i8* %b, i32 %n, i1 zeroext %cond) {
%foo = alloca <4 x i32>, align 16
- call void @llvm.memcpy.p0i8.p0i8.i32(i8* %a, i8* %b, i32 128, i32 4, i1 false)
+ call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %a, i8* align 4 %b, i32 128, i1 false)
br i1 %cond, label %spill_vectors, label %no_vectors
no_vectors:
@@ -69,7 +69,7 @@ spill_vectors:
define i32 @memset_vla_vector(<4 x i32>* %vp0, i8* %a, i32 %n, i1 zeroext %cond) {
%foo = alloca <4 x i32>, align 16
- call void @llvm.memset.p0i8.i32(i8* %a, i8 42, i32 128, i32 4, i1 false)
+ call void @llvm.memset.p0i8.i32(i8* align 4 %a, i8 42, i32 128, i1 false)
br i1 %cond, label %spill_vectors, label %no_vectors
no_vectors:
diff --git a/llvm/test/CodeGen/X86/memcpy-2.ll b/llvm/test/CodeGen/X86/memcpy-2.ll
index 040dd153d64..6deeaa69853 100644
--- a/llvm/test/CodeGen/X86/memcpy-2.ll
+++ b/llvm/test/CodeGen/X86/memcpy-2.ll
@@ -48,7 +48,7 @@ entry:
; X86-64: movq $0
%tmp1 = alloca [25 x i8]
%tmp2 = bitcast [25 x i8]* %tmp1 to i8*
- call void @llvm.memcpy.p0i8.p0i8.i32(i8* %tmp2, i8* getelementptr inbounds ([25 x i8], [25 x i8]* @.str, i32 0, i32 0), i32 25, i32 1, i1 false)
+ call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 %tmp2, i8* align 1 getelementptr inbounds ([25 x i8], [25 x i8]* @.str, i32 0, i32 0), i32 25, i1 false)
unreachable
}
@@ -86,7 +86,7 @@ entry:
; X86-64: movaps %xmm0, (%rdi)
%tmp2 = bitcast %struct.s0* %a to i8* ; <i8*> [#uses=1]
%tmp3 = bitcast %struct.s0* %b to i8* ; <i8*> [#uses=1]
- tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %tmp2, i8* %tmp3, i32 16, i32 16, i1 false)
+ tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 16 %tmp2, i8* align 16 %tmp3, i32 16, i1 false)
ret void
}
@@ -135,7 +135,7 @@ entry:
; X86-64: movq %rax, (%rdi)
%tmp2 = bitcast %struct.s0* %a to i8* ; <i8*> [#uses=1]
%tmp3 = bitcast %struct.s0* %b to i8* ; <i8*> [#uses=1]
- tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %tmp2, i8* %tmp3, i32 16, i32 8, i1 false)
+ tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 %tmp2, i8* align 8 %tmp3, i32 16, i1 false)
ret void
}
@@ -202,8 +202,8 @@ entry:
%tmp1 = alloca [30 x i8]
%tmp2 = bitcast [30 x i8]* %tmp1 to i8*
- call void @llvm.memcpy.p0i8.p0i8.i32(i8* %tmp2, i8* getelementptr inbounds ([30 x i8], [30 x i8]* @.str2, i32 0, i32 0), i32 30, i32 1, i1 false)
+ call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 %tmp2, i8* align 1 getelementptr inbounds ([30 x i8], [30 x i8]* @.str2, i32 0, i32 0), i32 30, i1 false)
unreachable
}
-declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i32, i1) nounwind
+declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i1) nounwind
diff --git a/llvm/test/CodeGen/X86/memcpy-from-string.ll b/llvm/test/CodeGen/X86/memcpy-from-string.ll
index d62d9e20254..8e2444ebe0e 100644
--- a/llvm/test/CodeGen/X86/memcpy-from-string.ll
+++ b/llvm/test/CodeGen/X86/memcpy-from-string.ll
@@ -17,8 +17,8 @@ target triple = "x86_64-unknown-linux-gnu"
; CHECK: movw $15212, 4(%rdi)
; CHECK: movl $1802117222, (%rdi)
define void @foo(i8* %tmp2) {
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* %tmp2, i8* getelementptr inbounds ([10 x i8], [10 x i8]* @0, i64 0, i64 3), i64 7, i32 1, i1 false)
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %tmp2, i8* getelementptr inbounds ([10 x i8], [10 x i8]* @0, i64 0, i64 3), i64 7, i1 false)
ret void
}
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i32, i1)
+declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i1)
diff --git a/llvm/test/CodeGen/X86/memcpy.ll b/llvm/test/CodeGen/X86/memcpy.ll
index 4351014192b..87e350a9039 100644
--- a/llvm/test/CodeGen/X86/memcpy.ll
+++ b/llvm/test/CodeGen/X86/memcpy.ll
@@ -1,14 +1,14 @@
; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=core2 | FileCheck %s -check-prefix=LINUX
; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=core2 | FileCheck %s -check-prefix=DARWIN
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) nounwind
-declare void @llvm.memcpy.p256i8.p256i8.i64(i8 addrspace(256)* nocapture, i8 addrspace(256)* nocapture, i64, i32, i1) nounwind
+declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) nounwind
+declare void @llvm.memcpy.p256i8.p256i8.i64(i8 addrspace(256)* nocapture, i8 addrspace(256)* nocapture, i64, i1) nounwind
; Variable memcpy's should lower to calls.
define i8* @test1(i8* %a, i8* %b, i64 %n) nounwind {
entry:
- tail call void @llvm.memcpy.p0i8.p0i8.i64( i8* %a, i8* %b, i64 %n, i32 1, i1 0 )
+ tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %a, i8* %b, i64 %n, i1 0 )
ret i8* %a
; LINUX-LABEL: test1:
@@ -20,7 +20,7 @@ define i8* @test2(i64* %a, i64* %b, i64 %n) nounwind {
entry:
%tmp14 = bitcast i64* %a to i8*
%tmp25 = bitcast i64* %b to i8*
- tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %tmp14, i8* %tmp25, i64 %n, i32 8, i1 0 )
+ tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 %tmp14, i8* align 8 %tmp25, i64 %n, i1 0 )
ret i8* %tmp14
; LINUX-LABEL: test2:
@@ -35,7 +35,7 @@ entry:
; rdar://8821501
define void @test3(i8* nocapture %A, i8* nocapture %B) nounwind optsize noredzone {
entry:
- tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %A, i8* %B, i64 64, i32 1, i1 false)
+ tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %A, i8* %B, i64 64, i1 false)
ret void
; LINUX-LABEL: test3:
; LINUX: memcpy
@@ -61,7 +61,7 @@ entry:
}
define void @test3_minsize(i8* nocapture %A, i8* nocapture %B) nounwind minsize noredzone {
- tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %A, i8* %B, i64 64, i32 1, i1 false)
+ tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %A, i8* %B, i64 64, i1 false)
ret void
; LINUX-LABEL: test3_minsize:
; LINUX: memcpy
@@ -71,7 +71,7 @@ define void @test3_minsize(i8* nocapture %A, i8* nocapture %B) nounwind minsize
}
define void @test3_minsize_optsize(i8* nocapture %A, i8* nocapture %B) nounwind optsize minsize noredzone {
- tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %A, i8* %B, i64 64, i32 1, i1 false)
+ tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %A, i8* %B, i64 64, i1 false)
ret void
; LINUX-LABEL: test3_minsize_optsize:
; LINUX: memcpy
@@ -83,7 +83,7 @@ define void @test3_minsize_optsize(i8* nocapture %A, i8* nocapture %B) nounwind
; Large constant memcpy's should be inlined when not optimizing for size.
define void @test4(i8* nocapture %A, i8* nocapture %B) nounwind noredzone {
entry:
- tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %A, i8* %B, i64 64, i32 1, i1 false)
+ tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %A, i8* %B, i64 64, i1 false)
ret void
; LINUX-LABEL: test4:
; LINUX: movq
@@ -105,7 +105,7 @@ entry:
define void @test5(i8* nocapture %C) nounwind uwtable ssp {
entry:
- tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %C, i8* getelementptr inbounds ([30 x i8], [30 x i8]* @.str, i64 0, i64 0), i64 16, i32 1, i1 false)
+ tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %C, i8* getelementptr inbounds ([30 x i8], [30 x i8]* @.str, i64 0, i64 0), i64 16, i1 false)
ret void
; DARWIN-LABEL: test5:
@@ -122,7 +122,7 @@ entry:
; DARWIN: test6
; DARWIN: movw $0, 8
; DARWIN: movq $120, 0
- tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* null, i8* getelementptr inbounds ([2 x i8], [2 x i8]* @.str2, i64 0, i64 0), i64 10, i32 1, i1 false)
+ tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* null, i8* getelementptr inbounds ([2 x i8], [2 x i8]* @.str2, i64 0, i64 0), i64 10, i1 false)
ret void
}
@@ -136,14 +136,14 @@ define void @PR15348(i8* %a, i8* %b) {
; LINUX: movq
; LINUX: movq
; LINUX: movq
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* %a, i8* %b, i64 17, i32 0, i1 false)
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %a, i8* %b, i64 17, i1 false)
ret void
}
; Memcpys from / to address space 256 should be lowered to appropriate loads /
; stores if small enough.
define void @addrspace256(i8 addrspace(256)* %a, i8 addrspace(256)* %b) nounwind {
- tail call void @llvm.memcpy.p256i8.p256i8.i64(i8 addrspace(256)* %a, i8 addrspace(256)* %b, i64 16, i32 8, i1 false)
+ tail call void @llvm.memcpy.p256i8.p256i8.i64(i8 addrspace(256)* align 8 %a, i8 addrspace(256)* align 8 %b, i64 16, i1 false)
ret void
; LINUX-LABEL: addrspace256:
; LINUX: movq %gs:
diff --git a/llvm/test/CodeGen/X86/memset-2.ll b/llvm/test/CodeGen/X86/memset-2.ll
index e94432884b1..a0511f2804a 100644
--- a/llvm/test/CodeGen/X86/memset-2.ll
+++ b/llvm/test/CodeGen/X86/memset-2.ll
@@ -11,7 +11,7 @@ define fastcc void @t1() nounwind {
; CHECK-NEXT: calll _memset
; CHECK-NEXT: addl $16, %esp
entry:
- call void @llvm.memset.p0i8.i32(i8* null, i8 0, i32 188, i32 1, i1 false)
+ call void @llvm.memset.p0i8.i32(i8* null, i8 0, i32 188, i1 false)
unreachable
}
@@ -23,11 +23,11 @@ define fastcc void @t2(i8 signext %c) nounwind {
; CHECK-NEXT: movl $76, {{[0-9]+}}(%esp)
; CHECK-NEXT: calll _memset
entry:
- call void @llvm.memset.p0i8.i32(i8* undef, i8 %c, i32 76, i32 1, i1 false)
+ call void @llvm.memset.p0i8.i32(i8* undef, i8 %c, i32 76, i1 false)
unreachable
}
-declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i32, i1) nounwind
+declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i1) nounwind
define void @t3(i8* nocapture %s, i8 %a) nounwind {
; CHECK-LABEL: t3:
@@ -39,7 +39,7 @@ define void @t3(i8* nocapture %s, i8 %a) nounwind {
; CHECK-NEXT: movl %ecx, (%eax)
; CHECK-NEXT: retl
entry:
- tail call void @llvm.memset.p0i8.i32(i8* %s, i8 %a, i32 8, i32 1, i1 false)
+ tail call void @llvm.memset.p0i8.i32(i8* %s, i8 %a, i32 8, i1 false)
ret void
}
@@ -56,6 +56,6 @@ define void @t4(i8* nocapture %s, i8 %a) nounwind {
; CHECK-NEXT: movb %cl, 14(%eax)
; CHECK-NEXT: retl
entry:
- tail call void @llvm.memset.p0i8.i32(i8* %s, i8 %a, i32 15, i32 1, i1 false)
+ tail call void @llvm.memset.p0i8.i32(i8* %s, i8 %a, i32 15, i1 false)
ret void
}
diff --git a/llvm/test/CodeGen/X86/memset-3.ll b/llvm/test/CodeGen/X86/memset-3.ll
index 455e6756013..47c7ab99d29 100644
--- a/llvm/test/CodeGen/X86/memset-3.ll
+++ b/llvm/test/CodeGen/X86/memset-3.ll
@@ -5,8 +5,8 @@ define void @t() nounwind ssp {
entry:
%buf = alloca [512 x i8], align 1
%ptr = getelementptr inbounds [512 x i8], [512 x i8]* %buf, i32 0, i32 0
- call void @llvm.memset.p0i8.i32(i8* %ptr, i8 undef, i32 512, i32 1, i1 false)
+ call void @llvm.memset.p0i8.i32(i8* %ptr, i8 undef, i32 512, i1 false)
unreachable
}
-declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i32, i1) nounwind
+declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i1) nounwind
diff --git a/llvm/test/CodeGen/X86/memset-nonzero.ll b/llvm/test/CodeGen/X86/memset-nonzero.ll
index cc434bf18ab..37b98b40192 100644
--- a/llvm/test/CodeGen/X86/memset-nonzero.ll
+++ b/llvm/test/CodeGen/X86/memset-nonzero.ll
@@ -225,7 +225,7 @@ define void @memset_16_nonconst_bytes(i8* %x, i8 %c) {
; AVX2-NEXT: vpbroadcastb %xmm0, %xmm0
; AVX2-NEXT: vmovdqu %xmm0, (%rdi)
; AVX2-NEXT: retq
- tail call void @llvm.memset.p0i8.i64(i8* %x, i8 %c, i64 16, i32 1, i1 false)
+ tail call void @llvm.memset.p0i8.i64(i8* %x, i8 %c, i64 16, i1 false)
ret void
}
@@ -268,7 +268,7 @@ define void @memset_32_nonconst_bytes(i8* %x, i8 %c) {
; AVX2-NEXT: vmovdqu %ymm0, (%rdi)
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
- tail call void @llvm.memset.p0i8.i64(i8* %x, i8 %c, i64 32, i32 1, i1 false)
+ tail call void @llvm.memset.p0i8.i64(i8* %x, i8 %c, i64 32, i1 false)
ret void
}
@@ -319,7 +319,7 @@ define void @memset_64_nonconst_bytes(i8* %x, i8 %c) {
; AVX2-NEXT: vmovdqu %ymm0, (%rdi)
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
- tail call void @llvm.memset.p0i8.i64(i8* %x, i8 %c, i64 64, i32 1, i1 false)
+ tail call void @llvm.memset.p0i8.i64(i8* %x, i8 %c, i64 64, i1 false)
ret void
}
@@ -386,7 +386,7 @@ define void @memset_128_nonconst_bytes(i8* %x, i8 %c) {
; AVX2-NEXT: vmovdqu %ymm0, (%rdi)
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
- tail call void @llvm.memset.p0i8.i64(i8* %x, i8 %c, i64 128, i32 1, i1 false)
+ tail call void @llvm.memset.p0i8.i64(i8* %x, i8 %c, i64 128, i1 false)
ret void
}
@@ -451,9 +451,9 @@ define void @memset_256_nonconst_bytes(i8* %x, i8 %c) {
; AVX2-NEXT: vmovdqu %ymm0, (%rdi)
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
- tail call void @llvm.memset.p0i8.i64(i8* %x, i8 %c, i64 256, i32 1, i1 false)
+ tail call void @llvm.memset.p0i8.i64(i8* %x, i8 %c, i64 256, i1 false)
ret void
}
-declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) #1
+declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) #1
diff --git a/llvm/test/CodeGen/X86/memset-sse-stack-realignment.ll b/llvm/test/CodeGen/X86/memset-sse-stack-realignment.ll
index d77a7ed3816..68fa15e3398 100644
--- a/llvm/test/CodeGen/X86/memset-sse-stack-realignment.ll
+++ b/llvm/test/CodeGen/X86/memset-sse-stack-realignment.ll
@@ -9,7 +9,7 @@
define void @test1(i32 %t) nounwind {
%tmp1210 = alloca i8, i32 32, align 4
- call void @llvm.memset.p0i8.i64(i8* %tmp1210, i8 0, i64 32, i32 4, i1 false)
+ call void @llvm.memset.p0i8.i64(i8* align 4 %tmp1210, i8 0, i64 32, i1 false)
%x = alloca i8, i32 %t
call void @dummy(i8* %x)
ret void
@@ -42,7 +42,7 @@ define void @test1(i32 %t) nounwind {
define void @test2(i32 %t) nounwind {
%tmp1210 = alloca i8, i32 16, align 4
- call void @llvm.memset.p0i8.i64(i8* %tmp1210, i8 0, i64 16, i32 4, i1 false)
+ call void @llvm.memset.p0i8.i64(i8* align 4 %tmp1210, i8 0, i64 16, i1 false)
%x = alloca i8, i32 %t
call void @dummy(i8* %x)
ret void
@@ -74,4 +74,4 @@ define void @test2(i32 %t) nounwind {
declare void @dummy(i8*)
-declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) nounwind
+declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) nounwind
diff --git a/llvm/test/CodeGen/X86/memset.ll b/llvm/test/CodeGen/X86/memset.ll
index c9d8fbd58aa..6d5c4cd0f8a 100644
--- a/llvm/test/CodeGen/X86/memset.ll
+++ b/llvm/test/CodeGen/X86/memset.ll
@@ -58,14 +58,14 @@ entry:
%up_mvd116 = getelementptr [8 x %struct.x], [8 x %struct.x]* %up_mvd, i32 0, i32 0 ; <%struct.x*> [#uses=1]
%tmp110117 = bitcast [8 x %struct.x]* %up_mvd to i8* ; <i8*> [#uses=1]
- call void @llvm.memset.p0i8.i64(i8* %tmp110117, i8 0, i64 32, i32 8, i1 false)
+ call void @llvm.memset.p0i8.i64(i8* align 8 %tmp110117, i8 0, i64 32, i1 false)
call void @foo( %struct.x* %up_mvd116 ) nounwind
ret void
}
declare void @foo(%struct.x*)
-declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) nounwind
+declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) nounwind
; Ensure that alignment of '0' in an @llvm.memset intrinsic results in
; unaligned loads and stores.
@@ -97,6 +97,6 @@ define void @PR15348(i8* %a) {
; YMM-NEXT: vmovups %xmm0, (%eax)
; YMM-NEXT: movb $0, 16(%eax)
; YMM-NEXT: retl
- call void @llvm.memset.p0i8.i64(i8* %a, i8 0, i64 17, i32 0, i1 false)
+ call void @llvm.memset.p0i8.i64(i8* %a, i8 0, i64 17, i1 false)
ret void
}
diff --git a/llvm/test/CodeGen/X86/memset64-on-x86-32.ll b/llvm/test/CodeGen/X86/memset64-on-x86-32.ll
index 0fc21920409..f9707c66863 100644
--- a/llvm/test/CodeGen/X86/memset64-on-x86-32.ll
+++ b/llvm/test/CodeGen/X86/memset64-on-x86-32.ll
@@ -51,9 +51,9 @@ define void @bork() nounwind {
; SLOW_64-NEXT: movq $0, 8
; SLOW_64-NEXT: movq $0, 0
; SLOW_64-NEXT: retq
- call void @llvm.memset.p0i8.i64(i8* null, i8 0, i64 80, i32 4, i1 false)
+ call void @llvm.memset.p0i8.i64(i8* align 4 null, i8 0, i64 80, i1 false)
ret void
}
-declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) nounwind
+declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) nounwind
diff --git a/llvm/test/CodeGen/X86/misaligned-memset.ll b/llvm/test/CodeGen/X86/misaligned-memset.ll
index ef8e0e81ad7..f7a6d577c08 100644
--- a/llvm/test/CodeGen/X86/misaligned-memset.ll
+++ b/llvm/test/CodeGen/X86/misaligned-memset.ll
@@ -7,9 +7,9 @@ define i32 @main() nounwind ssp {
entry:
%retval = alloca i32, align 4
store i32 0, i32* %retval
- call void @llvm.memset.p0i8.i64(i8* bitcast (i64* getelementptr inbounds ([3 x i64], [3 x i64]* @a, i32 0, i64 1) to i8*), i8 0, i64 16, i32 1, i1 false)
+ call void @llvm.memset.p0i8.i64(i8* bitcast (i64* getelementptr inbounds ([3 x i64], [3 x i64]* @a, i32 0, i64 1) to i8*), i8 0, i64 16, i1 false)
%0 = load i32, i32* %retval
ret i32 %0
}
-declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) nounwind
+declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) nounwind
diff --git a/llvm/test/CodeGen/X86/misched-new.ll b/llvm/test/CodeGen/X86/misched-new.ll
index 4e42c931454..5a93577a214 100644
--- a/llvm/test/CodeGen/X86/misched-new.ll
+++ b/llvm/test/CodeGen/X86/misched-new.ll
@@ -11,7 +11,7 @@
; FIXME: There should be an assert in the coalescer that we're not rematting
; "not-quite-dead" copies, but that breaks a lot of tests <rdar://problem/11148682>.
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) nounwind
+declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) nounwind
; From oggenc.
; After coalescing, we have a dead superreg (RAX) definition.
@@ -24,7 +24,7 @@ entry:
br i1 undef, label %for.cond.preheader, label %if.end
for.cond.preheader: ; preds = %entry
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* undef, i8* null, i64 128, i32 4, i1 false) nounwind
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 undef, i8* align 4 null, i64 128, i1 false) nounwind
unreachable
if.end: ; preds = %entry
diff --git a/llvm/test/CodeGen/X86/negate-add-zero.ll b/llvm/test/CodeGen/X86/negate-add-zero.ll
index 64f20a6f81b..beb87e3e903 100644
--- a/llvm/test/CodeGen/X86/negate-add-zero.ll
+++ b/llvm/test/CodeGen/X86/negate-add-zero.ll
@@ -1133,4 +1133,4 @@ declare %"struct.std::basic_ostream<char,std::char_traits<char> >"* @_ZlsIdLi5EL
declare %"struct.std::basic_ostream<char,std::char_traits<char> >"* @_ZlsIdLi5ELi6EERSoS0_RK15FixedMatrixBaseIT_XT0_EXT1_EE(%"struct.std::basic_ostream<char,std::char_traits<char> >"*, %"struct.FixedMatrixBase<double,5,6>"*)
-declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) nounwind
+declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) nounwind
diff --git a/llvm/test/CodeGen/X86/optimize-max-0.ll b/llvm/test/CodeGen/X86/optimize-max-0.ll
index 2dde95738d1..b5e8627a88b 100644
--- a/llvm/test/CodeGen/X86/optimize-max-0.ll
+++ b/llvm/test/CodeGen/X86/optimize-max-0.ll
@@ -173,7 +173,7 @@ bb23: ; preds = %bb24, %bb.nph
%47 = mul i32 %y.21, %w
%.sum5 = add i32 %47, %.sum3
%48 = getelementptr i8, i8* %j, i32 %.sum5
- tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %48, i8* %46, i32 %w, i32 1, i1 false)
+ tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %48, i8* %46, i32 %w, i1 false)
br label %bb24
bb24: ; preds = %bb23
@@ -190,7 +190,7 @@ bb26: ; preds = %bb24.bb26_crit_edge
%50 = getelementptr i8, i8* %j, i32 %.sum4
%51 = mul i32 %x, %w
%52 = sdiv i32 %51, 2
- tail call void @llvm.memset.p0i8.i32(i8* %50, i8 -128, i32 %52, i32 1, i1 false)
+ tail call void @llvm.memset.p0i8.i32(i8* %50, i8 -128, i32 %52, i1 false)
ret void
bb29: ; preds = %bb20, %entry
@@ -208,7 +208,7 @@ bb30: ; preds = %bb31, %bb.nph11
%57 = getelementptr i8, i8* %r, i32 %56
%58 = mul i32 %y.310, %w
%59 = getelementptr i8, i8* %j, i32 %58
- tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %59, i8* %57, i32 %w, i32 1, i1 false)
+ tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %59, i8* %57, i32 %w, i1 false)
br label %bb31
bb31: ; preds = %bb30
@@ -224,7 +224,7 @@ bb33: ; preds = %bb31.bb33_crit_edge
%61 = getelementptr i8, i8* %j, i32 %60
%62 = mul i32 %x, %w
%63 = sdiv i32 %62, 2
- tail call void @llvm.memset.p0i8.i32(i8* %61, i8 -128, i32 %63, i32 1, i1 false)
+ tail call void @llvm.memset.p0i8.i32(i8* %61, i8 -128, i32 %63, i1 false)
ret void
return: ; preds = %bb20
@@ -398,7 +398,7 @@ bb23: ; preds = %bb24, %bb.nph
%47 = mul i32 %y.21, %w
%.sum5 = add i32 %47, %.sum3
%48 = getelementptr i8, i8* %j, i32 %.sum5
- tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %48, i8* %46, i32 %w, i32 1, i1 false)
+ tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %48, i8* %46, i32 %w, i1 false)
br label %bb24
bb24: ; preds = %bb23
@@ -415,7 +415,7 @@ bb26: ; preds = %bb24.bb26_crit_edge
%50 = getelementptr i8, i8* %j, i32 %.sum4
%51 = mul i32 %x, %w
%52 = udiv i32 %51, 2
- tail call void @llvm.memset.p0i8.i32(i8* %50, i8 -128, i32 %52, i32 1, i1 false)
+ tail call void @llvm.memset.p0i8.i32(i8* %50, i8 -128, i32 %52, i1 false)
ret void
bb29: ; preds = %bb20, %entry
@@ -433,7 +433,7 @@ bb30: ; preds = %bb31, %bb.nph11
%57 = getelementptr i8, i8* %r, i32 %56
%58 = mul i32 %y.310, %w
%59 = getelementptr i8, i8* %j, i32 %58
- tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %59, i8* %57, i32 %w, i32 1, i1 false)
+ tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %59, i8* %57, i32 %w, i1 false)
br label %bb31
bb31: ; preds = %bb30
@@ -449,13 +449,13 @@ bb33: ; preds = %bb31.bb33_crit_edge
%61 = getelementptr i8, i8* %j, i32 %60
%62 = mul i32 %x, %w
%63 = udiv i32 %62, 2
- tail call void @llvm.memset.p0i8.i32(i8* %61, i8 -128, i32 %63, i32 1, i1 false)
+ tail call void @llvm.memset.p0i8.i32(i8* %61, i8 -128, i32 %63, i1 false)
ret void
return: ; preds = %bb20
ret void
}
-declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i32, i1) nounwind
+declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i1) nounwind
-declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i32, i1) nounwind
+declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i1) nounwind
diff --git a/llvm/test/CodeGen/X86/pr11985.ll b/llvm/test/CodeGen/X86/pr11985.ll
index 94b37215f63..99084d61140 100644
--- a/llvm/test/CodeGen/X86/pr11985.ll
+++ b/llvm/test/CodeGen/X86/pr11985.ll
@@ -24,7 +24,7 @@ define float @foo(i8* nocapture %buf, float %a, float %b) nounwind uwtable {
; NEHALEM-NEXT: movups %xmm2, (%rdi)
entry:
- tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %buf, i8* blockaddress(@foo, %out), i64 22, i32 1, i1 false)
+ tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %buf, i8* blockaddress(@foo, %out), i64 22, i1 false)
br label %out
out: ; preds = %entry
@@ -32,4 +32,4 @@ out: ; preds = %entry
ret float %add
}
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) nounwind
+declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) nounwind
diff --git a/llvm/test/CodeGen/X86/pr14333.ll b/llvm/test/CodeGen/X86/pr14333.ll
index 89779302d7f..8298ca5a7d6 100644
--- a/llvm/test/CodeGen/X86/pr14333.ll
+++ b/llvm/test/CodeGen/X86/pr14333.ll
@@ -6,7 +6,7 @@ define void @bar(%foo* %zed) {
%tmp2 = getelementptr inbounds %foo, %foo* %zed, i64 0, i32 1
store i64 0, i64* %tmp2, align 8
%tmp3 = bitcast %foo* %zed to i8*
- call void @llvm.memset.p0i8.i64(i8* %tmp3, i8 0, i64 16, i32 8, i1 false)
+ call void @llvm.memset.p0i8.i64(i8* align 8 %tmp3, i8 0, i64 16, i1 false)
ret void
}
-declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) nounwind
+declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) nounwind
diff --git a/llvm/test/CodeGen/X86/pr34088.ll b/llvm/test/CodeGen/X86/pr34088.ll
index 4fa24a50648..2fb000f3538 100644
--- a/llvm/test/CodeGen/X86/pr34088.ll
+++ b/llvm/test/CodeGen/X86/pr34088.ll
@@ -31,13 +31,13 @@ define i32 @pr34088() local_unnamed_addr {
entry:
%foo = alloca %struct.Foo, align 4
%0 = bitcast %struct.Foo* %foo to i8*
- call void @llvm.memset.p0i8.i32(i8* nonnull %0, i8 0, i32 20, i32 4, i1 false)
+ call void @llvm.memset.p0i8.i32(i8* align 4 nonnull %0, i8 0, i32 20, i1 false)
%buffer1 = getelementptr inbounds %struct.Foo, %struct.Foo* %foo, i32 0, i32 1, i32 1
%1 = bitcast %struct.Buffer* %buffer1 to i64*
%2 = load i64, i64* %1, align 4
- call void @llvm.memset.p0i8.i32(i8* nonnull %0, i8 -51, i32 20, i32 4, i1 false)
+ call void @llvm.memset.p0i8.i32(i8* align 4 nonnull %0, i8 -51, i32 20, i1 false)
store i64 %2, i64* %1, align 4
ret i32 0
}
-declare void @llvm.memset.p0i8.i32(i8* nocapture writeonly, i8, i32, i32, i1)
+declare void @llvm.memset.p0i8.i32(i8* nocapture writeonly, i8, i32, i1)
diff --git a/llvm/test/CodeGen/X86/ragreedy-hoist-spill.ll b/llvm/test/CodeGen/X86/ragreedy-hoist-spill.ll
index 0178c9ec1c9..a6d4c6e97bc 100644
--- a/llvm/test/CodeGen/X86/ragreedy-hoist-spill.ll
+++ b/llvm/test/CodeGen/X86/ragreedy-hoist-spill.ll
@@ -63,7 +63,7 @@ SyTime.exit2720:
br i1 %cmp293427, label %for.body.lr.ph, label %while.body.preheader
for.body.lr.ph:
- call void @llvm.memset.p0i8.i64(i8* undef, i8 32, i64 512, i32 16, i1 false)
+ call void @llvm.memset.p0i8.i64(i8* align 16 undef, i8 32, i64 512, i1 false)
br label %while.body.preheader
while.body.preheader:
@@ -377,7 +377,7 @@ cleanup:
declare i32 @fileno(%struct.TMP.2* nocapture)
declare i64 @"\01_write"(i32, i8*, i64)
declare i32 @__maskrune(i32, i64)
-declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1)
+declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1)
!llvm.ident = !{!0}
diff --git a/llvm/test/CodeGen/X86/regparm.ll b/llvm/test/CodeGen/X86/regparm.ll
index f427010edc5..01a734f9f47 100644
--- a/llvm/test/CodeGen/X86/regparm.ll
+++ b/llvm/test/CodeGen/X86/regparm.ll
@@ -9,7 +9,7 @@ target datalayout = "e-m:e-p:32:32-f64:32:64-f80:32-n8:16:32-S128"
target triple = "i386-unknown-linux-gnu"
; Function Attrs: argmemonly nounwind
-declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture writeonly, i8* nocapture readonly, i32, i32, i1) #1
+declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture writeonly, i8* nocapture readonly, i32, i1) #1
define void @use_memset(i8* inreg nocapture %dest, i8 inreg %c, i32 inreg %n) local_unnamed_addr #0 {
entry:
@@ -30,12 +30,12 @@ entry:
;FASTWIN: movzbl %dl, %edx
;FASTWIN-NEXT: calll _memset
;FASTWIN-NEXT: retl
- tail call void @llvm.memset.p0i8.i32(i8* %dest, i8 %c, i32 %n, i32 1, i1 false)
+ tail call void @llvm.memset.p0i8.i32(i8* %dest, i8 %c, i32 %n, i1 false)
ret void
}
; Function Attrs: argmemonly nounwind
-declare void @llvm.memset.p0i8.i32(i8* nocapture writeonly, i8, i32, i32, i1) #1
+declare void @llvm.memset.p0i8.i32(i8* nocapture writeonly, i8, i32, i1) #1
attributes #0 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="pentium4" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
diff --git a/llvm/test/CodeGen/X86/remat-fold-load.ll b/llvm/test/CodeGen/X86/remat-fold-load.ll
index 3478033bfbf..e640974bdd2 100644
--- a/llvm/test/CodeGen/X86/remat-fold-load.ll
+++ b/llvm/test/CodeGen/X86/remat-fold-load.ll
@@ -16,7 +16,7 @@ target triple = "i386-unknown-linux-gnu"
%type_d = type { i64 }
%type_e = type { %type_c, i64 }
-declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i32, i1) nounwind
+declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i1) nounwind
define linkonce_odr void @test() nounwind {
entry:
@@ -41,7 +41,7 @@ if.then.i.i.i.i71: ; preds = %while.body12
%tmp1 = getelementptr inbounds %type_a, %type_a* %tmp, i32 0, i32 1, i32 0, i32 1
%buf_6.i.i.i.i70 = bitcast %type_d* %tmp1 to i8**
%tmp2 = load i8*, i8** %buf_6.i.i.i.i70, align 4
- call void @llvm.memcpy.p0i8.p0i8.i32(i8* undef, i8* %tmp2, i32 undef, i32 1, i1 false) nounwind
+ call void @llvm.memcpy.p0i8.p0i8.i32(i8* undef, i8* %tmp2, i32 undef, i1 false) nounwind
unreachable
if.else.i.i.i.i74: ; preds = %while.body12
@@ -69,7 +69,7 @@ if.then.i.i.i.i92: ; preds = %if.else.i.i.i.i74
%tmp12 = getelementptr inbounds %type_e, %type_e* %tmp9, i32 0, i32 0, i32 1
%buf_6.i.i.i.i91 = bitcast %type_d* %tmp12 to i8**
%tmp13 = load i8*, i8** %buf_6.i.i.i.i91, align 4
- call void @llvm.memcpy.p0i8.p0i8.i32(i8* %call4.i.i.i.i89, i8* %tmp13, i32 %tmp10, i32 1, i1 false) nounwind
+ call void @llvm.memcpy.p0i8.p0i8.i32(i8* %call4.i.i.i.i89, i8* %tmp13, i32 %tmp10, i1 false) nounwind
br label %A
if.else.i.i.i.i95: ; preds = %if.else.i.i.i.i74
diff --git a/llvm/test/CodeGen/X86/slow-unaligned-mem.ll b/llvm/test/CodeGen/X86/slow-unaligned-mem.ll
index a3a21892339..54c248f3b04 100644
--- a/llvm/test/CodeGen/X86/slow-unaligned-mem.ll
+++ b/llvm/test/CodeGen/X86/slow-unaligned-mem.ll
@@ -88,9 +88,9 @@ define void @store_zeros(i8* %a) {
; FAST: # %bb.0:
; FAST-NEXT: movl {{[0-9]+}}(%esp), %eax
; FAST-NOT: movl
- call void @llvm.memset.p0i8.i64(i8* %a, i8 0, i64 64, i32 1, i1 false)
+ call void @llvm.memset.p0i8.i64(i8* %a, i8 0, i64 64, i1 false)
ret void
}
-declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1)
+declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1)
diff --git a/llvm/test/CodeGen/X86/small-byval-memcpy.ll b/llvm/test/CodeGen/X86/small-byval-memcpy.ll
index 3c03750199c..c5c9a3d8416 100644
--- a/llvm/test/CodeGen/X86/small-byval-memcpy.ll
+++ b/llvm/test/CodeGen/X86/small-byval-memcpy.ll
@@ -2,10 +2,10 @@
; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=nehalem | FileCheck %s --check-prefix=NEHALEM
; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=btver2 | FileCheck %s --check-prefix=BTVER2
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i32, i1)
+declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i1)
define void @copy16bytes(i8* nocapture %a, i8* nocapture readonly %b) {
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* %a, i8* %b, i64 16, i32 1, i1 false)
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %a, i8* %b, i64 16, i1 false)
ret void
; CHECK-LABEL: copy16bytes
diff --git a/llvm/test/CodeGen/X86/stack-align.ll b/llvm/test/CodeGen/X86/stack-align.ll
index 192306462d1..338ced0ebf1 100644
--- a/llvm/test/CodeGen/X86/stack-align.ll
+++ b/llvm/test/CodeGen/X86/stack-align.ll
@@ -71,7 +71,7 @@ define x86_stdcallcc void @test5(%struct.sixteen* byval nocapture readonly align
%1 = getelementptr inbounds [16 x i8], [16 x i8]* %d.sroa.0, i32 0, i32 0
call void @llvm.lifetime.start.p0i8(i64 16, i8* %1)
%2 = getelementptr inbounds %struct.sixteen, %struct.sixteen* %s, i32 0, i32 0, i32 0
- call void @llvm.memcpy.p0i8.p0i8.i32(i8* %1, i8* %2, i32 16, i32 1, i1 true)
+ call void @llvm.memcpy.p0i8.p0i8.i32(i8* %1, i8* %2, i32 16, i1 true)
call void @llvm.lifetime.end.p0i8(i64 16, i8* %1)
ret void
; CHECK-LABEL: test5:
@@ -84,7 +84,7 @@ define x86_stdcallcc void @test5(%struct.sixteen* byval nocapture readonly align
declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) argmemonly nounwind
-declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i32, i1) argmemonly nounwind
+declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i1) argmemonly nounwind
declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) argmemonly nounwind
diff --git a/llvm/test/CodeGen/X86/stack-protector.ll b/llvm/test/CodeGen/X86/stack-protector.ll
index 5166ed5b02a..d4eee18244f 100644
--- a/llvm/test/CodeGen/X86/stack-protector.ll
+++ b/llvm/test/CodeGen/X86/stack-protector.ll
@@ -3768,7 +3768,7 @@ entry:
%test.coerce = alloca { i64, i8 }
%0 = bitcast { i64, i8 }* %test.coerce to i8*
%1 = bitcast %struct.small_char* %test to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* %1, i64 12, i32 0, i1 false)
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* %1, i64 12, i1 false)
%2 = getelementptr { i64, i8 }, { i64, i8 }* %test.coerce, i32 0, i32 0
%3 = load i64, i64* %2, align 1
%4 = getelementptr { i64, i8 }, { i64, i8 }* %test.coerce, i32 0, i32 1
@@ -3806,7 +3806,7 @@ entry:
%test.coerce = alloca { i64, i8 }
%0 = bitcast { i64, i8 }* %test.coerce to i8*
%1 = bitcast %struct.small_char* %test to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* %1, i64 12, i32 0, i1 false)
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* %1, i64 12, i1 false)
%2 = getelementptr { i64, i8 }, { i64, i8 }* %test.coerce, i32 0, i32 0
%3 = load i64, i64* %2, align 1
%4 = getelementptr { i64, i8 }, { i64, i8 }* %test.coerce, i32 0, i32 1
@@ -3922,7 +3922,7 @@ declare void @_Z3exceptPi(i32*)
declare i32 @__gxx_personality_v0(...)
declare i32* @getp()
declare i32 @dummy(...)
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i32, i1)
+declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i1)
attributes #0 = { ssp }
attributes #1 = { sspstrong }
diff --git a/llvm/test/CodeGen/X86/tail-dup-merge-loop-headers.ll b/llvm/test/CodeGen/X86/tail-dup-merge-loop-headers.ll
index 197fd72586a..736a6d8500d 100644
--- a/llvm/test/CodeGen/X86/tail-dup-merge-loop-headers.ll
+++ b/llvm/test/CodeGen/X86/tail-dup-merge-loop-headers.ll
@@ -97,7 +97,7 @@ if.end19: ; preds = %entry
br i1 %or.cond203, label %cleanup, label %if.end50
if.end50: ; preds = %if.end19
- tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* nonnull %call, i8* undef, i64 %conv, i32 1, i1 false)
+ tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* nonnull %call, i8* undef, i64 %conv, i1 false)
%cmp1.i.i = icmp ugt i32 %mul, 3
br i1 %cmp1.i.i, label %shared_preheader, label %wunpsect.exit.thread.loopexit391
@@ -185,6 +185,6 @@ declare void @cli_dbgmsg(i8*, ...) local_unnamed_addr #0
declare i8* @cli_calloc(i64, i64) local_unnamed_addr #0
; Function Attrs: argmemonly nounwind
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i32, i1) #1
+declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i1) #1
attributes #0 = { nounwind }
attributes #1 = { argmemonly nounwind }
diff --git a/llvm/test/CodeGen/X86/tailcall-mem-intrinsics.ll b/llvm/test/CodeGen/X86/tailcall-mem-intrinsics.ll
index 7491ea659ba..ee3489701df 100644
--- a/llvm/test/CodeGen/X86/tailcall-mem-intrinsics.ll
+++ b/llvm/test/CodeGen/X86/tailcall-mem-intrinsics.ll
@@ -4,7 +4,7 @@
; CHECK: jmp memcpy
define void @tail_memcpy(i8* nocapture %p, i8* nocapture readonly %q, i32 %n) #0 {
entry:
- tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %p, i8* %q, i32 %n, i32 1, i1 false)
+ tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %p, i8* %q, i32 %n, i1 false)
ret void
}
@@ -12,7 +12,7 @@ entry:
; CHECK: jmp memmove
define void @tail_memmove(i8* nocapture %p, i8* nocapture readonly %q, i32 %n) #0 {
entry:
- tail call void @llvm.memmove.p0i8.p0i8.i32(i8* %p, i8* %q, i32 %n, i32 1, i1 false)
+ tail call void @llvm.memmove.p0i8.p0i8.i32(i8* %p, i8* %q, i32 %n, i1 false)
ret void
}
@@ -20,7 +20,7 @@ entry:
; CHECK: jmp memset
define void @tail_memset(i8* nocapture %p, i8 %c, i32 %n) #0 {
entry:
- tail call void @llvm.memset.p0i8.i32(i8* %p, i8 %c, i32 %n, i32 1, i1 false)
+ tail call void @llvm.memset.p0i8.i32(i8* %p, i8 %c, i32 %n, i1 false)
ret void
}
@@ -28,7 +28,7 @@ entry:
; CHECK: jmp memcpy
define i8* @tail_memcpy_ret(i8* nocapture %p, i8* nocapture readonly %q, i32 %n) #0 {
entry:
- tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %p, i8* %q, i32 %n, i32 1, i1 false)
+ tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %p, i8* %q, i32 %n, i1 false)
ret i8* %p
}
@@ -36,7 +36,7 @@ entry:
; CHECK: jmp memmove
define i8* @tail_memmove_ret(i8* nocapture %p, i8* nocapture readonly %q, i32 %n) #0 {
entry:
- tail call void @llvm.memmove.p0i8.p0i8.i32(i8* %p, i8* %q, i32 %n, i32 1, i1 false)
+ tail call void @llvm.memmove.p0i8.p0i8.i32(i8* %p, i8* %q, i32 %n, i1 false)
ret i8* %p
}
@@ -44,12 +44,12 @@ entry:
; CHECK: jmp memset
define i8* @tail_memset_ret(i8* nocapture %p, i8 %c, i32 %n) #0 {
entry:
- tail call void @llvm.memset.p0i8.i32(i8* %p, i8 %c, i32 %n, i32 1, i1 false)
+ tail call void @llvm.memset.p0i8.i32(i8* %p, i8 %c, i32 %n, i1 false)
ret i8* %p
}
-declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i32, i1) #0
-declare void @llvm.memmove.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i32, i1) #0
-declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i32, i1) #0
+declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i1) #0
+declare void @llvm.memmove.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i1) #0
+declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i1) #0
attributes #0 = { nounwind }
diff --git a/llvm/test/CodeGen/X86/tlv-1.ll b/llvm/test/CodeGen/X86/tlv-1.ll
index 5f017d31dbb..0dbd00c55eb 100644
--- a/llvm/test/CodeGen/X86/tlv-1.ll
+++ b/llvm/test/CodeGen/X86/tlv-1.ll
@@ -7,7 +7,7 @@
define void @main() nounwind ssp {
; CHECK-LABEL: main:
entry:
- call void @llvm.memset.p0i8.i64(i8* getelementptr inbounds (%struct.A, %struct.A* @c, i32 0, i32 0, i32 0), i8 0, i64 60, i32 1, i1 false)
+ call void @llvm.memset.p0i8.i64(i8* getelementptr inbounds (%struct.A, %struct.A* @c, i32 0, i32 0, i32 0), i8 0, i64 60, i1 false)
unreachable
; CHECK: movq _c@TLVP(%rip), %rdi
; CHECK-NEXT: callq *(%rdi)
@@ -31,7 +31,7 @@ entry:
ret i32 %sub
}
-declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) nounwind
+declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) nounwind
@a = thread_local global i32 0 ; <i32*> [#uses=0]
@b = thread_local global i32 0 ; <i32*> [#uses=0]
diff --git a/llvm/test/CodeGen/X86/unaligned-load.ll b/llvm/test/CodeGen/X86/unaligned-load.ll
index 644a3644730..1ceca2cd06d 100644
--- a/llvm/test/CodeGen/X86/unaligned-load.ll
+++ b/llvm/test/CodeGen/X86/unaligned-load.ll
@@ -12,14 +12,14 @@ entry:
bb: ; preds = %bb, %entry
%String2Loc9 = getelementptr inbounds [31 x i8], [31 x i8]* %String2Loc, i64 0, i64 0
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* %String2Loc9, i8* getelementptr inbounds ([31 x i8], [31 x i8]* @.str3, i64 0, i64 0), i64 31, i32 1, i1 false)
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %String2Loc9, i8* getelementptr inbounds ([31 x i8], [31 x i8]* @.str3, i64 0, i64 0), i64 31, i1 false)
br label %bb
return: ; No predecessors!
ret void
}
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) nounwind
+declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) nounwind
; I386: calll {{_?}}memcpy
diff --git a/llvm/test/CodeGen/X86/unused_stackslots.ll b/llvm/test/CodeGen/X86/unused_stackslots.ll
index 82fd3db1ccb..dca01275ca7 100644
--- a/llvm/test/CodeGen/X86/unused_stackslots.ll
+++ b/llvm/test/CodeGen/X86/unused_stackslots.ll
@@ -202,14 +202,14 @@ land.lhs.true54: ; preds = %for.end50
br i1 %tobool56, label %for.inc73, label %for.body61.preheader
for.body61.preheader: ; preds = %land.lhs.true54
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* nonnull bitcast (i32* getelementptr ([4 x i32], [4 x i32]* @e, i64 4, i64 0) to i8*), i8* %tmp1, i64 32, i32 16, i1 false)
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* nonnull bitcast (i32* getelementptr ([4 x i32], [4 x i32]* @e, i64 6, i64 0) to i8*), i8* %tmp2, i64 32, i32 16, i1 false)
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* nonnull bitcast (i32* getelementptr ([4 x i32], [4 x i32]* @e, i64 8, i64 0) to i8*), i8* %tmp3, i64 32, i32 16, i1 false)
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* nonnull bitcast (i32* getelementptr ([4 x i32], [4 x i32]* @e, i64 10, i64 0) to i8*), i8* %tmp4, i64 32, i32 16, i1 false)
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* nonnull bitcast (i32* getelementptr ([4 x i32], [4 x i32]* @e, i64 12, i64 0) to i8*), i8* %tmp5, i64 32, i32 16, i1 false)
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* nonnull bitcast (i32* getelementptr ([4 x i32], [4 x i32]* @e, i64 14, i64 0) to i8*), i8* %tmp6, i64 32, i32 16, i1 false)
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* nonnull bitcast (i32* getelementptr ([4 x i32], [4 x i32]* @e, i64 16, i64 0) to i8*), i8* %tmp7, i64 32, i32 16, i1 false)
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* nonnull bitcast (i32* getelementptr ([4 x i32], [4 x i32]* @e, i64 18, i64 0) to i8*), i8* %tmp8, i64 32, i32 16, i1 false)
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 nonnull bitcast (i32* getelementptr ([4 x i32], [4 x i32]* @e, i64 4, i64 0) to i8*), i8* align 16 %tmp1, i64 32, i1 false)
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 nonnull bitcast (i32* getelementptr ([4 x i32], [4 x i32]* @e, i64 6, i64 0) to i8*), i8* align 16 %tmp2, i64 32, i1 false)
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 nonnull bitcast (i32* getelementptr ([4 x i32], [4 x i32]* @e, i64 8, i64 0) to i8*), i8* align 16 %tmp3, i64 32, i1 false)
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 nonnull bitcast (i32* getelementptr ([4 x i32], [4 x i32]* @e, i64 10, i64 0) to i8*), i8* align 16 %tmp4, i64 32, i1 false)
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 nonnull bitcast (i32* getelementptr ([4 x i32], [4 x i32]* @e, i64 12, i64 0) to i8*), i8* align 16 %tmp5, i64 32, i1 false)
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 nonnull bitcast (i32* getelementptr ([4 x i32], [4 x i32]* @e, i64 14, i64 0) to i8*), i8* align 16 %tmp6, i64 32, i1 false)
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 nonnull bitcast (i32* getelementptr ([4 x i32], [4 x i32]* @e, i64 16, i64 0) to i8*), i8* align 16 %tmp7, i64 32, i1 false)
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 nonnull bitcast (i32* getelementptr ([4 x i32], [4 x i32]* @e, i64 18, i64 0) to i8*), i8* align 16 %tmp8, i64 32, i1 false)
%call70 = tail call i32 @distortion4x4(i32* nonnull getelementptr inbounds ([4 x i32], [4 x i32]* @e, i64 0, i64 0)) #3
%add71 = add nsw i32 %call70, %m.3.lcssa.lcssa
br label %for.inc73
@@ -234,7 +234,7 @@ declare void @LumaPrediction4x4(i32, i32, i32, i32, i32, i16 signext, i16 signex
declare i32 @distortion4x4(i32*) #2
; Function Attrs: argmemonly nounwind
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i32, i1) #1
+declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i1) #1
; Function Attrs: argmemonly nounwind
declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #1
diff --git a/llvm/test/CodeGen/X86/unwindraise.ll b/llvm/test/CodeGen/X86/unwindraise.ll
index db39f4ed455..2da07fb1753 100644
--- a/llvm/test/CodeGen/X86/unwindraise.ll
+++ b/llvm/test/CodeGen/X86/unwindraise.ll
@@ -34,7 +34,7 @@ entry:
call fastcc void @uw_init_context_1(%struct._Unwind_Context* %this_context, i8* %0, i8* %1)
%2 = bitcast %struct._Unwind_Context* %cur_context to i8*
%3 = bitcast %struct._Unwind_Context* %this_context to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* %2, i8* %3, i64 240, i32 8, i1 false)
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 %2, i8* align 8 %3, i64 240, i1 false)
%personality = getelementptr inbounds %struct._Unwind_FrameState, %struct._Unwind_FrameState* %fs, i64 0, i32 6
%retaddr_column.i = getelementptr inbounds %struct._Unwind_FrameState, %struct._Unwind_FrameState* %fs, i64 0, i32 9
%flags.i.i.i.i = getelementptr inbounds %struct._Unwind_Context, %struct._Unwind_Context* %cur_context, i64 0, i32 5
@@ -121,7 +121,7 @@ while.end: ; preds = %if.then4
%16 = ptrtoint i8* %15 to i64
%private_2 = getelementptr inbounds %struct._Unwind_Exception, %struct._Unwind_Exception* %exc, i64 0, i32 3
store i64 %16, i64* %private_2, align 8
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* %2, i8* %3, i64 240, i32 8, i1 false)
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 %2, i8* align 8 %3, i64 240, i1 false)
%17 = bitcast %struct._Unwind_FrameState* %fs.i to i8*
call void @llvm.lifetime.start.p0i8(i64 -1, i8* %17)
%personality.i = getelementptr inbounds %struct._Unwind_FrameState, %struct._Unwind_FrameState* %fs.i, i64 0, i32 6
@@ -234,7 +234,7 @@ declare i8* @llvm.eh.dwarf.cfa(i32) nounwind
declare i8* @llvm.returnaddress(i32) nounwind readnone
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) nounwind
+declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) nounwind
declare fastcc i64 @uw_install_context_1(%struct._Unwind_Context*, %struct._Unwind_Context*) uwtable
diff --git a/llvm/test/CodeGen/X86/variable-sized-darwin-bzero.ll b/llvm/test/CodeGen/X86/variable-sized-darwin-bzero.ll
index 3971190f02c..55c5dd39d12 100644
--- a/llvm/test/CodeGen/X86/variable-sized-darwin-bzero.ll
+++ b/llvm/test/CodeGen/X86/variable-sized-darwin-bzero.ll
@@ -1,8 +1,8 @@
; RUN: llc < %s -mtriple=i686-apple-darwin10 | grep __bzero
define void @foo(i8* %p, i64 %n) {
- call void @llvm.memset.p0i8.i64(i8* %p, i8 0, i64 %n, i32 4, i1 false)
+ call void @llvm.memset.p0i8.i64(i8* align 4 %p, i8 0, i64 %n, i1 false)
ret void
}
-declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) nounwind
+declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) nounwind
diff --git a/llvm/test/CodeGen/X86/vectorcall.ll b/llvm/test/CodeGen/X86/vectorcall.ll
index 598a339ee2f..9914780e04c 100644
--- a/llvm/test/CodeGen/X86/vectorcall.ll
+++ b/llvm/test/CodeGen/X86/vectorcall.ll
@@ -157,7 +157,7 @@ entry:
%retval = alloca %struct.HVA4, align 16
%0 = bitcast %struct.HVA4* %retval to i8*
%1 = bitcast %struct.HVA4* %b to i8*
- call void @llvm.memcpy.p0i8.p0i8.i32(i8* %0, i8* %1, i32 64, i32 16, i1 false)
+ call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 16 %0, i8* align 16 %1, i32 64, i1 false)
%2 = load %struct.HVA4, %struct.HVA4* %retval, align 16
ret %struct.HVA4 %2
}
@@ -168,18 +168,18 @@ entry:
; CHECK: movaps 48(%{{[re]}}sp), %xmm3
; CHECK: ret{{[ql]}}
-declare void @llvm.memset.p0i8.i64(i8* nocapture writeonly, i8, i64, i32, i1)
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i32, i1)
-declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture writeonly, i8* nocapture readonly, i32, i32, i1)
+declare void @llvm.memset.p0i8.i64(i8* nocapture writeonly, i8, i64, i1)
+declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i1)
+declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture writeonly, i8* nocapture readonly, i32, i1)
define x86_vectorcallcc void @test_mixed_7(%struct.HVA5* noalias sret %agg.result) {
entry:
%a = alloca %struct.HVA5, align 16
%0 = bitcast %struct.HVA5* %a to i8*
- call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 80, i32 16, i1 false)
+ call void @llvm.memset.p0i8.i64(i8* align 16 %0, i8 0, i64 80, i1 false)
%1 = bitcast %struct.HVA5* %agg.result to i8*
%2 = bitcast %struct.HVA5* %a to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* %2, i64 80, i32 16, i1 false)
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 %1, i8* align 16 %2, i64 80, i1 false)
ret void
}
; CHECK-LABEL: test_mixed_7
diff --git a/llvm/test/CodeGen/X86/x86-64-static-relo-movl.ll b/llvm/test/CodeGen/X86/x86-64-static-relo-movl.ll
index 5da3a470503..658187e2204 100644
--- a/llvm/test/CodeGen/X86/x86-64-static-relo-movl.ll
+++ b/llvm/test/CodeGen/X86/x86-64-static-relo-movl.ll
@@ -10,7 +10,7 @@
define void @setup() {
%pending = alloca %struct.MatchInfo, align 8
%t = bitcast %struct.MatchInfo* %pending to i8*
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* %t, i8* bitcast (%struct.MatchInfo* @NO_MATCH to i8*), i64 512, i32 8, i1 false)
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 %t, i8* align 8 bitcast (%struct.MatchInfo* @NO_MATCH to i8*), i64 512, i1 false)
%u = getelementptr inbounds %struct.MatchInfo, %struct.MatchInfo* %pending, i32 0, i32 2
%v = load i64, i64* %u, align 8
br label %done
@@ -21,4 +21,4 @@ done:
}
; Function Attrs: nounwind
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8*, i8*, i64, i32, i1)
+declare void @llvm.memcpy.p0i8.p0i8.i64(i8*, i8*, i64, i1)
diff --git a/llvm/test/CodeGen/X86/x86-repmov-copy-eflags.ll b/llvm/test/CodeGen/X86/x86-repmov-copy-eflags.ll
index ad398885728..49afb39b6d4 100644
--- a/llvm/test/CodeGen/X86/x86-repmov-copy-eflags.ll
+++ b/llvm/test/CodeGen/X86/x86-repmov-copy-eflags.ll
@@ -10,7 +10,7 @@ entry:
%g = alloca %struct.T, align 8
%r = alloca i32, align 8
store i32 0, i32* %r, align 4
- call void @llvm.memcpy.p0i8.p0i8.i32(i8* %p, i8* %q, i32 24, i32 8, i1 false)
+ call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 %p, i8* align 8 %q, i32 24, i1 false)
br label %while.body
while.body: ; preds = %while.body, %entry
@@ -26,7 +26,7 @@ while.end: ; preds = %while.body
}
; Function Attrs: argmemonly nounwind
-declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i32, i1) #1
+declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i1) #1
declare void @g(%struct.T*)
OpenPOWER on IntegriCloud