diff options
author | David Blaikie <dblaikie@gmail.com> | 2015-02-27 21:17:42 +0000 |
---|---|---|
committer | David Blaikie <dblaikie@gmail.com> | 2015-02-27 21:17:42 +0000 |
commit | a79ac14fa68297f9888bc70a10df5ed9b8864e38 (patch) | |
tree | 8d8217a8928e3ee599bdde405e2e178b3a55b645 /llvm/test/CodeGen/Hexagon | |
parent | 83687fb9e654c9d0086e7f6b728c26fa0b729e71 (diff) | |
download | bcm5719-llvm-a79ac14fa68297f9888bc70a10df5ed9b8864e38.tar.gz bcm5719-llvm-a79ac14fa68297f9888bc70a10df5ed9b8864e38.zip |
[opaque pointer type] Add textual IR support for explicit type parameter to load instruction
Essentially the same as the GEP change in r230786.
A similar migration script can be used to update test cases, though a few more
test case improvements/changes were required this time around: (r229269-r229278)
import fileinput
import sys
import re
pat = re.compile(r"((?:=|:|^)\s*load (?:atomic )?(?:volatile )?(.*?))(| addrspace\(\d+\) *)\*($| *(?:%|@|null|undef|blockaddress|getelementptr|addrspacecast|bitcast|inttoptr|\[\[[a-zA-Z]|\{\{).*$)")
for line in sys.stdin:
sys.stdout.write(re.sub(pat, r"\1, \2\3*\4", line))
Reviewers: rafael, dexonsmith, grosser
Differential Revision: http://reviews.llvm.org/D7649
llvm-svn: 230794
Diffstat (limited to 'llvm/test/CodeGen/Hexagon')
64 files changed, 344 insertions, 344 deletions
diff --git a/llvm/test/CodeGen/Hexagon/BranchPredict.ll b/llvm/test/CodeGen/Hexagon/BranchPredict.ll index 5d564493e50..0cd616b31a3 100644 --- a/llvm/test/CodeGen/Hexagon/BranchPredict.ll +++ b/llvm/test/CodeGen/Hexagon/BranchPredict.ll @@ -53,7 +53,7 @@ return: ; preds = %if.else, %if.then define i32 @foo_bar(i32 %a, i16 signext %b) nounwind { ; CHECK: if{{ *}}(!cmp.eq(r{{[0-9]*}}.new, #0)) jump:nt entry: - %0 = load i32* @j, align 4 + %0 = load i32, i32* @j, align 4 %tobool = icmp eq i32 %0, 0 br i1 %tobool, label %if.else, label %if.then, !prof !0 diff --git a/llvm/test/CodeGen/Hexagon/absaddr-store.ll b/llvm/test/CodeGen/Hexagon/absaddr-store.ll index 5c2554df8ae..3be4b1cc261 100644 --- a/llvm/test/CodeGen/Hexagon/absaddr-store.ll +++ b/llvm/test/CodeGen/Hexagon/absaddr-store.ll @@ -9,7 +9,7 @@ define zeroext i8 @absStoreByte() nounwind { ; CHECK: memb(##b){{ *}}={{ *}}r{{[0-9]+}} entry: - %0 = load i8* @b, align 1 + %0 = load i8, i8* @b, align 1 %conv = zext i8 %0 to i32 %mul = mul nsw i32 100, %conv %conv1 = trunc i32 %mul to i8 @@ -20,7 +20,7 @@ entry: define signext i16 @absStoreHalf() nounwind { ; CHECK: memh(##c){{ *}}={{ *}}r{{[0-9]+}} entry: - %0 = load i16* @c, align 2 + %0 = load i16, i16* @c, align 2 %conv = sext i16 %0 to i32 %mul = mul nsw i32 100, %conv %conv1 = trunc i32 %mul to i16 @@ -31,7 +31,7 @@ entry: define i32 @absStoreWord() nounwind { ; CHECK: memw(##a){{ *}}={{ *}}r{{[0-9]+}} entry: - %0 = load i32* @a, align 4 + %0 = load i32, i32* @a, align 4 %mul = mul nsw i32 100, %0 store i32 %mul, i32* @a, align 4 ret i32 %mul diff --git a/llvm/test/CodeGen/Hexagon/absimm.ll b/llvm/test/CodeGen/Hexagon/absimm.ll index b8f5edc2647..07adb3fe49d 100644 --- a/llvm/test/CodeGen/Hexagon/absimm.ll +++ b/llvm/test/CodeGen/Hexagon/absimm.ll @@ -12,7 +12,7 @@ entry: define i32* @f2(i32* nocapture %i) nounwind { entry: ; CHECK: r{{[0-9]+}}{{ *}}={{ *}}memw(##786432) - %0 = load volatile i32* inttoptr (i32 786432 to i32*), align 262144 + %0 = load volatile i32, i32* inttoptr (i32 786432 to i32*), align 262144 %1 = inttoptr i32 %0 to i32* ret i32* %1 } diff --git a/llvm/test/CodeGen/Hexagon/always-ext.ll b/llvm/test/CodeGen/Hexagon/always-ext.ll index e164e9a3516..8b4b2f5bf4f 100644 --- a/llvm/test/CodeGen/Hexagon/always-ext.ll +++ b/llvm/test/CodeGen/Hexagon/always-ext.ll @@ -24,8 +24,8 @@ entry: br i1 undef, label %for.body.us, label %for.end for.body.us: ; preds = %entry - %0 = load %struct.CuTest.1.28.31.37.40.43.52.55.67.85.111** null, align 4 - %1 = load i32* undef, align 4 + %0 = load %struct.CuTest.1.28.31.37.40.43.52.55.67.85.111*, %struct.CuTest.1.28.31.37.40.43.52.55.67.85.111** null, align 4 + %1 = load i32, i32* undef, align 4 %cmp.i.us = icmp slt i32 %1, 1024 br i1 %cmp.i.us, label %CuSuiteAdd.exit.us, label %cond.false6.i.us diff --git a/llvm/test/CodeGen/Hexagon/block-addr.ll b/llvm/test/CodeGen/Hexagon/block-addr.ll index dc0d6e60fd2..902765e42ef 100644 --- a/llvm/test/CodeGen/Hexagon/block-addr.ll +++ b/llvm/test/CodeGen/Hexagon/block-addr.ll @@ -10,7 +10,7 @@ entry: br label %while.body while.body: - %ret.0.load17 = load volatile i32* %ret, align 4 + %ret.0.load17 = load volatile i32, i32* %ret, align 4 switch i32 %ret.0.load17, label %label6 [ i32 0, label %label0 i32 1, label %label1 @@ -21,37 +21,37 @@ while.body: ] label0: - %ret.0.load18 = load volatile i32* %ret, align 4 + %ret.0.load18 = load volatile i32, i32* %ret, align 4 %inc = add nsw i32 %ret.0.load18, 1 store volatile i32 %inc, i32* %ret, align 4 br label %while.body label1: - %ret.0.load19 = load volatile i32* %ret, align 4 + %ret.0.load19 = load volatile i32, i32* %ret, align 4 %inc2 = add nsw i32 %ret.0.load19, 1 store volatile i32 %inc2, i32* %ret, align 4 br label %while.body label2: - %ret.0.load20 = load volatile i32* %ret, align 4 + %ret.0.load20 = load volatile i32, i32* %ret, align 4 %inc4 = add nsw i32 %ret.0.load20, 1 store volatile i32 %inc4, i32* %ret, align 4 br label %while.body label3: - %ret.0.load21 = load volatile i32* %ret, align 4 + %ret.0.load21 = load volatile i32, i32* %ret, align 4 %inc6 = add nsw i32 %ret.0.load21, 1 store volatile i32 %inc6, i32* %ret, align 4 br label %while.body label4: - %ret.0.load22 = load volatile i32* %ret, align 4 + %ret.0.load22 = load volatile i32, i32* %ret, align 4 %inc8 = add nsw i32 %ret.0.load22, 1 store volatile i32 %inc8, i32* %ret, align 4 br label %while.body label5: - %ret.0.load23 = load volatile i32* %ret, align 4 + %ret.0.load23 = load volatile i32, i32* %ret, align 4 %inc10 = add nsw i32 %ret.0.load23, 1 store volatile i32 %inc10, i32* %ret, align 4 br label %while.body diff --git a/llvm/test/CodeGen/Hexagon/cext-check.ll b/llvm/test/CodeGen/Hexagon/cext-check.ll index 9fc3d22174e..19b91c5245b 100644 --- a/llvm/test/CodeGen/Hexagon/cext-check.ll +++ b/llvm/test/CodeGen/Hexagon/cext-check.ll @@ -7,19 +7,19 @@ define i32 @cext_test1(i32* %a) nounwind { ; CHECK-NOT: r{{[0-9]+}}{{ *}}={{ *}}memw(r{{[0-9]+}}{{ *}}+{{ *}}##4092) ; CHECK-NOT: r{{[0-9]+}}{{ *}}={{ *}}add(r{{[0-9]+}}{{ *}},{{ *}}##300) entry: - %0 = load i32* %a, align 4 + %0 = load i32, i32* %a, align 4 %tobool = icmp ne i32 %0, 0 br i1 %tobool, label %if.then, label %if.end if.then: %arrayidx1 = getelementptr inbounds i32, i32* %a, i32 2000 - %1 = load i32* %arrayidx1, align 4 + %1 = load i32, i32* %arrayidx1, align 4 %add = add nsw i32 %1, 300000 br label %return if.end: %arrayidx2 = getelementptr inbounds i32, i32* %a, i32 1023 - %2 = load i32* %arrayidx2, align 4 + %2 = load i32, i32* %arrayidx2, align 4 %add3 = add nsw i32 %2, 300 br label %return @@ -39,14 +39,14 @@ entry: if.then: %arrayidx = getelementptr inbounds i8, i8* %a, i32 1023 - %0 = load i8* %arrayidx, align 1 + %0 = load i8, i8* %arrayidx, align 1 %conv = zext i8 %0 to i32 %add = add nsw i32 %conv, 300000 br label %return if.end: %arrayidx1 = getelementptr inbounds i8, i8* %a, i32 1024 - %1 = load i8* %arrayidx1, align 1 + %1 = load i8, i8* %arrayidx1, align 1 %conv2 = zext i8 %1 to i32 %add3 = add nsw i32 %conv2, 6000 br label %return diff --git a/llvm/test/CodeGen/Hexagon/cext-valid-packet2.ll b/llvm/test/CodeGen/Hexagon/cext-valid-packet2.ll index 03904d99511..2eba7432996 100644 --- a/llvm/test/CodeGen/Hexagon/cext-valid-packet2.ll +++ b/llvm/test/CodeGen/Hexagon/cext-valid-packet2.ll @@ -10,24 +10,24 @@ define i32 @test(i32* nocapture %a, i32* nocapture %b, i32 %c) nounwind { entry: %add = add nsw i32 %c, 200002 - %0 = load i32* %a, align 4 + %0 = load i32, i32* %a, align 4 %add1 = add nsw i32 %0, 200000 %arrayidx2 = getelementptr inbounds i32, i32* %a, i32 3000 store i32 %add1, i32* %arrayidx2, align 4 - %1 = load i32* %b, align 4 + %1 = load i32, i32* %b, align 4 %add4 = add nsw i32 %1, 200001 %arrayidx5 = getelementptr inbounds i32, i32* %a, i32 1 store i32 %add4, i32* %arrayidx5, align 4 %arrayidx7 = getelementptr inbounds i32, i32* %b, i32 1 - %2 = load i32* %arrayidx7, align 4 + %2 = load i32, i32* %arrayidx7, align 4 %cmp = icmp sgt i32 %add4, %2 br i1 %cmp, label %if.then, label %if.else if.then: ; preds = %entry %arrayidx8 = getelementptr inbounds i32, i32* %a, i32 2 - %3 = load i32* %arrayidx8, align 4 + %3 = load i32, i32* %arrayidx8, align 4 %arrayidx9 = getelementptr inbounds i32, i32* %b, i32 2000 - %4 = load i32* %arrayidx9, align 4 + %4 = load i32, i32* %arrayidx9, align 4 %sub = sub nsw i32 %3, %4 %arrayidx10 = getelementptr inbounds i32, i32* %a, i32 4000 store i32 %sub, i32* %arrayidx10, align 4 diff --git a/llvm/test/CodeGen/Hexagon/cmp_pred2.ll b/llvm/test/CodeGen/Hexagon/cmp_pred2.ll index a20b9f09b6e..28f3e1bac8d 100644 --- a/llvm/test/CodeGen/Hexagon/cmp_pred2.ll +++ b/llvm/test/CodeGen/Hexagon/cmp_pred2.ll @@ -11,7 +11,7 @@ entry: br i1 %cmp, label %if.then, label %entry.if.end_crit_edge entry.if.end_crit_edge: - %.pre = load i32* @c, align 4 + %.pre = load i32, i32* @c, align 4 br label %if.end if.then: @@ -32,7 +32,7 @@ entry: br i1 %cmp, label %entry.if.end_crit_edge, label %if.then entry.if.end_crit_edge: - %.pre = load i32* @c, align 4 + %.pre = load i32, i32* @c, align 4 br label %if.end if.then: @@ -53,7 +53,7 @@ entry: br i1 %cmp, label %entry.if.end_crit_edge, label %if.then entry.if.end_crit_edge: - %.pre = load i32* @c, align 4 + %.pre = load i32, i32* @c, align 4 br label %if.end if.then: @@ -73,7 +73,7 @@ entry: br i1 %cmp, label %if.then, label %entry.if.end_crit_edge entry.if.end_crit_edge: - %.pre = load i32* @c, align 4 + %.pre = load i32, i32* @c, align 4 br label %if.end if.then: diff --git a/llvm/test/CodeGen/Hexagon/cmpb_pred.ll b/llvm/test/CodeGen/Hexagon/cmpb_pred.ll index 0960da1fa06..cf0c5a825ba 100644 --- a/llvm/test/CodeGen/Hexagon/cmpb_pred.ll +++ b/llvm/test/CodeGen/Hexagon/cmpb_pred.ll @@ -16,7 +16,7 @@ entry: define i32 @Func_3b(i32) nounwind readonly { entry: ; CHECK-NOT: mux - %1 = load i8* @Enum_global, align 1 + %1 = load i8, i8* @Enum_global, align 1 %2 = trunc i32 %0 to i8 %cmp = icmp ne i8 %1, %2 %selv = zext i1 %cmp to i32 @@ -35,7 +35,7 @@ entry: define i32 @Func_3d(i32) nounwind readonly { entry: ; CHECK-NOT: mux - %1 = load i8* @Enum_global, align 1 + %1 = load i8, i8* @Enum_global, align 1 %2 = trunc i32 %0 to i8 %cmp = icmp eq i8 %1, %2 %selv = zext i1 %cmp to i32 @@ -45,7 +45,7 @@ entry: define i32 @Func_3e(i32) nounwind readonly { entry: ; CHECK-NOT: mux - %1 = load i8* @Enum_global, align 1 + %1 = load i8, i8* @Enum_global, align 1 %2 = trunc i32 %0 to i8 %cmp = icmp eq i8 %1, %2 %selv = zext i1 %cmp to i32 diff --git a/llvm/test/CodeGen/Hexagon/combine.ll b/llvm/test/CodeGen/Hexagon/combine.ll index 721998596c8..2e320d977d6 100644 --- a/llvm/test/CodeGen/Hexagon/combine.ll +++ b/llvm/test/CodeGen/Hexagon/combine.ll @@ -6,8 +6,8 @@ define void @foo() nounwind { entry: - %0 = load i32* @j, align 4 - %1 = load i64* @k, align 8 + %0 = load i32, i32* @j, align 4 + %1 = load i64, i64* @k, align 8 %conv = trunc i64 %1 to i32 %2 = call i64 @llvm.hexagon.A2.combinew(i32 %0, i32 %conv) store i64 %2, i64* @k, align 8 diff --git a/llvm/test/CodeGen/Hexagon/combine_ir.ll b/llvm/test/CodeGen/Hexagon/combine_ir.ll index 35e997bbc79..634a5c82a91 100644 --- a/llvm/test/CodeGen/Hexagon/combine_ir.ll +++ b/llvm/test/CodeGen/Hexagon/combine_ir.ll @@ -4,7 +4,7 @@ define void @word(i32* nocapture %a) nounwind { entry: - %0 = load i32* %a, align 4 + %0 = load i32, i32* %a, align 4 %1 = zext i32 %0 to i64 tail call void @bar(i64 %1) nounwind ret void @@ -17,10 +17,10 @@ declare void @bar(i64) define void @halfword(i16* nocapture %a) nounwind { entry: - %0 = load i16* %a, align 2 + %0 = load i16, i16* %a, align 2 %1 = zext i16 %0 to i64 %add.ptr = getelementptr inbounds i16, i16* %a, i32 1 - %2 = load i16* %add.ptr, align 2 + %2 = load i16, i16* %add.ptr, align 2 %3 = zext i16 %2 to i64 %4 = shl nuw nsw i64 %3, 16 %ins = or i64 %4, %1 @@ -33,10 +33,10 @@ entry: define void @byte(i8* nocapture %a) nounwind { entry: - %0 = load i8* %a, align 1 + %0 = load i8, i8* %a, align 1 %1 = zext i8 %0 to i64 %add.ptr = getelementptr inbounds i8, i8* %a, i32 1 - %2 = load i8* %add.ptr, align 1 + %2 = load i8, i8* %add.ptr, align 1 %3 = zext i8 %2 to i64 %4 = shl nuw nsw i64 %3, 8 %ins = or i64 %4, %1 diff --git a/llvm/test/CodeGen/Hexagon/convertdptoint.ll b/llvm/test/CodeGen/Hexagon/convertdptoint.ll index fa068c4c8a5..a09c2fd14b1 100644 --- a/llvm/test/CodeGen/Hexagon/convertdptoint.ll +++ b/llvm/test/CodeGen/Hexagon/convertdptoint.ll @@ -14,13 +14,13 @@ entry: store i32 0, i32* %retval store double 1.540000e+01, double* %a, align 8 store double 9.100000e+00, double* %b, align 8 - %0 = load double* %a, align 8 - %1 = load double* %b, align 8 + %0 = load double, double* %a, align 8 + %1 = load double, double* %b, align 8 %add = fadd double %0, %1 store double %add, double* %c, align 8 - %2 = load double* %c, align 8 + %2 = load double, double* %c, align 8 %conv = fptosi double %2 to i32 store i32 %conv, i32* %i, align 4 - %3 = load i32* %i, align 4 + %3 = load i32, i32* %i, align 4 ret i32 %3 } diff --git a/llvm/test/CodeGen/Hexagon/convertdptoll.ll b/llvm/test/CodeGen/Hexagon/convertdptoll.ll index 1b4dd86bd01..f46d46cf76b 100644 --- a/llvm/test/CodeGen/Hexagon/convertdptoll.ll +++ b/llvm/test/CodeGen/Hexagon/convertdptoll.ll @@ -14,14 +14,14 @@ entry: store i32 0, i32* %retval store double 1.540000e+01, double* %a, align 8 store double 9.100000e+00, double* %b, align 8 - %0 = load double* %a, align 8 - %1 = load double* %b, align 8 + %0 = load double, double* %a, align 8 + %1 = load double, double* %b, align 8 %add = fadd double %0, %1 store double %add, double* %c, align 8 - %2 = load double* %c, align 8 + %2 = load double, double* %c, align 8 %conv = fptosi double %2 to i64 store i64 %conv, i64* %i, align 8 - %3 = load i64* %i, align 8 + %3 = load i64, i64* %i, align 8 %conv1 = trunc i64 %3 to i32 ret i32 %conv1 } diff --git a/llvm/test/CodeGen/Hexagon/convertsptoint.ll b/llvm/test/CodeGen/Hexagon/convertsptoint.ll index b8a9d6c8083..7593e57d852 100644 --- a/llvm/test/CodeGen/Hexagon/convertsptoint.ll +++ b/llvm/test/CodeGen/Hexagon/convertsptoint.ll @@ -14,13 +14,13 @@ entry: store i32 0, i32* %retval store float 0x402ECCCCC0000000, float* %a, align 4 store float 0x4022333340000000, float* %b, align 4 - %0 = load float* %a, align 4 - %1 = load float* %b, align 4 + %0 = load float, float* %a, align 4 + %1 = load float, float* %b, align 4 %add = fadd float %0, %1 store float %add, float* %c, align 4 - %2 = load float* %c, align 4 + %2 = load float, float* %c, align 4 %conv = fptosi float %2 to i32 store i32 %conv, i32* %i, align 4 - %3 = load i32* %i, align 4 + %3 = load i32, i32* %i, align 4 ret i32 %3 } diff --git a/llvm/test/CodeGen/Hexagon/convertsptoll.ll b/llvm/test/CodeGen/Hexagon/convertsptoll.ll index 1c4df94784a..d8432cbc812 100644 --- a/llvm/test/CodeGen/Hexagon/convertsptoll.ll +++ b/llvm/test/CodeGen/Hexagon/convertsptoll.ll @@ -14,14 +14,14 @@ entry: store i32 0, i32* %retval store float 0x402ECCCCC0000000, float* %a, align 4 store float 0x4022333340000000, float* %b, align 4 - %0 = load float* %a, align 4 - %1 = load float* %b, align 4 + %0 = load float, float* %a, align 4 + %1 = load float, float* %b, align 4 %add = fadd float %0, %1 store float %add, float* %c, align 4 - %2 = load float* %c, align 4 + %2 = load float, float* %c, align 4 %conv = fptosi float %2 to i64 store i64 %conv, i64* %i, align 8 - %3 = load i64* %i, align 8 + %3 = load i64, i64* %i, align 8 %conv1 = trunc i64 %3 to i32 ret i32 %conv1 } diff --git a/llvm/test/CodeGen/Hexagon/dadd.ll b/llvm/test/CodeGen/Hexagon/dadd.ll index a86a90c89a1..5fcd705bab2 100644 --- a/llvm/test/CodeGen/Hexagon/dadd.ll +++ b/llvm/test/CodeGen/Hexagon/dadd.ll @@ -11,8 +11,8 @@ entry: %c = alloca double, align 8 store double 1.540000e+01, double* %a, align 8 store double 9.100000e+00, double* %b, align 8 - %0 = load double* %a, align 8 - %1 = load double* %b, align 8 + %0 = load double, double* %a, align 8 + %1 = load double, double* %b, align 8 %add = fadd double %0, %1 store double %add, double* %c, align 8 ret i32 0 diff --git a/llvm/test/CodeGen/Hexagon/dmul.ll b/llvm/test/CodeGen/Hexagon/dmul.ll index cbe0d7f3289..1b79e0aa7d7 100644 --- a/llvm/test/CodeGen/Hexagon/dmul.ll +++ b/llvm/test/CodeGen/Hexagon/dmul.ll @@ -10,8 +10,8 @@ entry: %c = alloca double, align 8 store double 1.540000e+01, double* %a, align 8 store double 9.100000e+00, double* %b, align 8 - %0 = load double* %b, align 8 - %1 = load double* %a, align 8 + %0 = load double, double* %b, align 8 + %1 = load double, double* %a, align 8 %mul = fmul double %0, %1 store double %mul, double* %c, align 8 ret i32 0 diff --git a/llvm/test/CodeGen/Hexagon/double.ll b/llvm/test/CodeGen/Hexagon/double.ll index c3b6f378ec8..b4d025cd7fd 100644 --- a/llvm/test/CodeGen/Hexagon/double.ll +++ b/llvm/test/CodeGen/Hexagon/double.ll @@ -10,13 +10,13 @@ entry: store double* %acc, double** %acc.addr, align 4 store double %num, double* %num.addr, align 8 store double %num2, double* %num2.addr, align 8 - %0 = load double** %acc.addr, align 4 - %1 = load double* %0 - %2 = load double* %num.addr, align 8 + %0 = load double*, double** %acc.addr, align 4 + %1 = load double, double* %0 + %2 = load double, double* %num.addr, align 8 %add = fadd double %1, %2 - %3 = load double* %num2.addr, align 8 + %3 = load double, double* %num2.addr, align 8 %sub = fsub double %add, %3 - %4 = load double** %acc.addr, align 4 + %4 = load double*, double** %acc.addr, align 4 store double %sub, double* %4 ret void } diff --git a/llvm/test/CodeGen/Hexagon/doubleconvert-ieee-rnd-near.ll b/llvm/test/CodeGen/Hexagon/doubleconvert-ieee-rnd-near.ll index 54e7ce3bcdd..6bf8224904e 100644 --- a/llvm/test/CodeGen/Hexagon/doubleconvert-ieee-rnd-near.ll +++ b/llvm/test/CodeGen/Hexagon/doubleconvert-ieee-rnd-near.ll @@ -14,13 +14,13 @@ entry: store i32 0, i32* %retval store double 1.540000e+01, double* %a, align 8 store double 9.100000e+00, double* %b, align 8 - %0 = load double* %a, align 8 - %1 = load double* %b, align 8 + %0 = load double, double* %a, align 8 + %1 = load double, double* %b, align 8 %add = fadd double %0, %1 store double %add, double* %c, align 8 - %2 = load double* %c, align 8 + %2 = load double, double* %c, align 8 %conv = fptosi double %2 to i32 store i32 %conv, i32* %i, align 4 - %3 = load i32* %i, align 4 + %3 = load i32, i32* %i, align 4 ret i32 %3 } diff --git a/llvm/test/CodeGen/Hexagon/dsub.ll b/llvm/test/CodeGen/Hexagon/dsub.ll index f271492d057..8b37301d84f 100644 --- a/llvm/test/CodeGen/Hexagon/dsub.ll +++ b/llvm/test/CodeGen/Hexagon/dsub.ll @@ -10,8 +10,8 @@ entry: %c = alloca double, align 8 store double 1.540000e+01, double* %a, align 8 store double 9.100000e+00, double* %b, align 8 - %0 = load double* %b, align 8 - %1 = load double* %a, align 8 + %0 = load double, double* %b, align 8 + %1 = load double, double* %a, align 8 %sub = fsub double %0, %1 store double %sub, double* %c, align 8 ret i32 0 diff --git a/llvm/test/CodeGen/Hexagon/extload-combine.ll b/llvm/test/CodeGen/Hexagon/extload-combine.ll index b3b8bf07032..519177fc75f 100644 --- a/llvm/test/CodeGen/Hexagon/extload-combine.ll +++ b/llvm/test/CodeGen/Hexagon/extload-combine.ll @@ -19,7 +19,7 @@ define i64 @short_test1() #0 { ; CHECK: combine(#0, [[VAR]]) entry: store i16 0, i16* @a, align 2 - %0 = load i16* @b, align 2 + %0 = load i16, i16* @b, align 2 %conv2 = zext i16 %0 to i64 ret i64 %conv2 } @@ -30,7 +30,7 @@ define i64 @short_test2() #0 { ; CHECK: sxtw([[VAR1]]) entry: store i16 0, i16* @a, align 2 - %0 = load i16* @c, align 2 + %0 = load i16, i16* @c, align 2 %conv2 = sext i16 %0 to i64 ret i64 %conv2 } @@ -41,7 +41,7 @@ define i64 @char_test1() #0 { ; CHECK: combine(#0, [[VAR2]]) entry: store i8 0, i8* @char_a, align 1 - %0 = load i8* @char_b, align 1 + %0 = load i8, i8* @char_b, align 1 %conv2 = zext i8 %0 to i64 ret i64 %conv2 } @@ -52,7 +52,7 @@ define i64 @char_test2() #0 { ; CHECK: sxtw([[VAR3]]) entry: store i8 0, i8* @char_a, align 1 - %0 = load i8* @char_c, align 1 + %0 = load i8, i8* @char_c, align 1 %conv2 = sext i8 %0 to i64 ret i64 %conv2 } @@ -63,7 +63,7 @@ define i64 @int_test1() #0 { ; CHECK: combine(#0, [[VAR4]]) entry: store i32 0, i32* @int_a, align 4 - %0 = load i32* @int_b, align 4 + %0 = load i32, i32* @int_b, align 4 %conv = zext i32 %0 to i64 ret i64 %conv } @@ -74,7 +74,7 @@ define i64 @int_test2() #0 { ; CHECK: sxtw([[VAR5]]) entry: store i32 0, i32* @int_a, align 4 - %0 = load i32* @int_c, align 4 + %0 = load i32, i32* @int_c, align 4 %conv = sext i32 %0 to i64 ret i64 %conv } diff --git a/llvm/test/CodeGen/Hexagon/fadd.ll b/llvm/test/CodeGen/Hexagon/fadd.ll index b95e1475ff7..6cf0fbbccf7 100644 --- a/llvm/test/CodeGen/Hexagon/fadd.ll +++ b/llvm/test/CodeGen/Hexagon/fadd.ll @@ -10,8 +10,8 @@ entry: %c = alloca float, align 4 store float 0x402ECCCCC0000000, float* %a, align 4 store float 0x4022333340000000, float* %b, align 4 - %0 = load float* %a, align 4 - %1 = load float* %b, align 4 + %0 = load float, float* %a, align 4 + %1 = load float, float* %b, align 4 %add = fadd float %0, %1 store float %add, float* %c, align 4 ret i32 0 diff --git a/llvm/test/CodeGen/Hexagon/fcmp.ll b/llvm/test/CodeGen/Hexagon/fcmp.ll index e7b649e2b8c..5cf3c57b5e9 100644 --- a/llvm/test/CodeGen/Hexagon/fcmp.ll +++ b/llvm/test/CodeGen/Hexagon/fcmp.ll @@ -8,7 +8,7 @@ entry: %retval = alloca i32, align 4 %y.addr = alloca float, align 4 store float %y, float* %y.addr, align 4 - %0 = load float* %y.addr, align 4 + %0 = load float, float* %y.addr, align 4 %cmp = fcmp ogt float %0, 0x406AD7EFA0000000 br i1 %cmp, label %if.then, label %if.else @@ -21,7 +21,7 @@ if.else: ; preds = %entry br label %return return: ; preds = %if.else, %if.then - %1 = load i32* %retval + %1 = load i32, i32* %retval ret i32 %1 } @@ -31,7 +31,7 @@ entry: %a = alloca float, align 4 store i32 0, i32* %retval store float 0x40012E0A00000000, float* %a, align 4 - %0 = load float* %a, align 4 + %0 = load float, float* %a, align 4 %call = call i32 @foo(float %0) ret i32 %call } diff --git a/llvm/test/CodeGen/Hexagon/float.ll b/llvm/test/CodeGen/Hexagon/float.ll index bec9f5852e3..03d1fbf44cb 100644 --- a/llvm/test/CodeGen/Hexagon/float.ll +++ b/llvm/test/CodeGen/Hexagon/float.ll @@ -10,13 +10,13 @@ entry: store float* %acc, float** %acc.addr, align 4 store float %num, float* %num.addr, align 4 store float %num2, float* %num2.addr, align 4 - %0 = load float** %acc.addr, align 4 - %1 = load float* %0 - %2 = load float* %num.addr, align 4 + %0 = load float*, float** %acc.addr, align 4 + %1 = load float, float* %0 + %2 = load float, float* %num.addr, align 4 %add = fadd float %1, %2 - %3 = load float* %num2.addr, align 4 + %3 = load float, float* %num2.addr, align 4 %sub = fsub float %add, %3 - %4 = load float** %acc.addr, align 4 + %4 = load float*, float** %acc.addr, align 4 store float %sub, float* %4 ret void } diff --git a/llvm/test/CodeGen/Hexagon/floatconvert-ieee-rnd-near.ll b/llvm/test/CodeGen/Hexagon/floatconvert-ieee-rnd-near.ll index bec9f5852e3..03d1fbf44cb 100644 --- a/llvm/test/CodeGen/Hexagon/floatconvert-ieee-rnd-near.ll +++ b/llvm/test/CodeGen/Hexagon/floatconvert-ieee-rnd-near.ll @@ -10,13 +10,13 @@ entry: store float* %acc, float** %acc.addr, align 4 store float %num, float* %num.addr, align 4 store float %num2, float* %num2.addr, align 4 - %0 = load float** %acc.addr, align 4 - %1 = load float* %0 - %2 = load float* %num.addr, align 4 + %0 = load float*, float** %acc.addr, align 4 + %1 = load float, float* %0 + %2 = load float, float* %num.addr, align 4 %add = fadd float %1, %2 - %3 = load float* %num2.addr, align 4 + %3 = load float, float* %num2.addr, align 4 %sub = fsub float %add, %3 - %4 = load float** %acc.addr, align 4 + %4 = load float*, float** %acc.addr, align 4 store float %sub, float* %4 ret void } diff --git a/llvm/test/CodeGen/Hexagon/fmul.ll b/llvm/test/CodeGen/Hexagon/fmul.ll index 4766845b114..4f55d0bec47 100644 --- a/llvm/test/CodeGen/Hexagon/fmul.ll +++ b/llvm/test/CodeGen/Hexagon/fmul.ll @@ -11,8 +11,8 @@ entry: %c = alloca float, align 4 store float 0x402ECCCCC0000000, float* %a, align 4 store float 0x4022333340000000, float* %b, align 4 - %0 = load float* %b, align 4 - %1 = load float* %a, align 4 + %0 = load float, float* %b, align 4 + %1 = load float, float* %a, align 4 %mul = fmul float %0, %1 store float %mul, float* %c, align 4 ret i32 0 diff --git a/llvm/test/CodeGen/Hexagon/frame.ll b/llvm/test/CodeGen/Hexagon/frame.ll index dc87c732d6f..e87acb8cd79 100644 --- a/llvm/test/CodeGen/Hexagon/frame.ll +++ b/llvm/test/CodeGen/Hexagon/frame.ll @@ -10,14 +10,14 @@ define i32 @foo() nounwind { entry: %i = alloca i32, align 4 - %0 = load i32* @num, align 4 + %0 = load i32, i32* @num, align 4 store i32 %0, i32* %i, align 4 - %1 = load i32* %i, align 4 - %2 = load i32* @acc, align 4 + %1 = load i32, i32* %i, align 4 + %2 = load i32, i32* @acc, align 4 %mul = mul nsw i32 %1, %2 - %3 = load i32* @num2, align 4 + %3 = load i32, i32* @num2, align 4 %add = add nsw i32 %mul, %3 store i32 %add, i32* %i, align 4 - %4 = load i32* %i, align 4 + %4 = load i32, i32* %i, align 4 ret i32 %4 } diff --git a/llvm/test/CodeGen/Hexagon/fsub.ll b/llvm/test/CodeGen/Hexagon/fsub.ll index 07c866f4c2e..ca7bdc4d0b3 100644 --- a/llvm/test/CodeGen/Hexagon/fsub.ll +++ b/llvm/test/CodeGen/Hexagon/fsub.ll @@ -10,8 +10,8 @@ entry: %c = alloca float, align 4 store float 0x402ECCCCC0000000, float* %a, align 4 store float 0x4022333340000000, float* %b, align 4 - %0 = load float* %b, align 4 - %1 = load float* %a, align 4 + %0 = load float, float* %b, align 4 + %1 = load float, float* %a, align 4 %sub = fsub float %0, %1 store float %sub, float* %c, align 4 ret i32 0 diff --git a/llvm/test/CodeGen/Hexagon/fusedandshift.ll b/llvm/test/CodeGen/Hexagon/fusedandshift.ll index 022b3c67345..59a1e1d84fc 100644 --- a/llvm/test/CodeGen/Hexagon/fusedandshift.ll +++ b/llvm/test/CodeGen/Hexagon/fusedandshift.ll @@ -5,7 +5,7 @@ define i32 @main(i16* %a, i16* %b) nounwind { entry: - %0 = load i16* %a, align 2 + %0 = load i16, i16* %a, align 2 %conv1 = sext i16 %0 to i32 %shr1 = ashr i32 %conv1, 3 %and1 = and i32 %shr1, 15 diff --git a/llvm/test/CodeGen/Hexagon/gp-plus-offset-load.ll b/llvm/test/CodeGen/Hexagon/gp-plus-offset-load.ll index a1b80a65f82..583f67aa050 100644 --- a/llvm/test/CodeGen/Hexagon/gp-plus-offset-load.ll +++ b/llvm/test/CodeGen/Hexagon/gp-plus-offset-load.ll @@ -12,7 +12,7 @@ entry: br i1 %cmp, label %if.then, label %if.end if.then: ; preds = %entry - %0 = load i32* getelementptr inbounds (%struct.struc* @foo, i32 0, i32 3), align 4 + %0 = load i32, i32* getelementptr inbounds (%struct.struc* @foo, i32 0, i32 3), align 4 store i32 %0, i32* %ival, align 4 br label %if.end @@ -27,7 +27,7 @@ entry: br i1 %cmp, label %if.then, label %if.end if.then: ; preds = %entry - %0 = load i8* getelementptr inbounds (%struct.struc* @foo, i32 0, i32 1), align 1 + %0 = load i8, i8* getelementptr inbounds (%struct.struc* @foo, i32 0, i32 1), align 1 store i8 %0, i8* %ival, align 1 br label %if.end @@ -42,7 +42,7 @@ entry: br i1 %cmp, label %if.then, label %if.end if.then: ; preds = %entry - %0 = load i16* getelementptr inbounds (%struct.struc* @foo, i32 0, i32 2), align 2 + %0 = load i16, i16* getelementptr inbounds (%struct.struc* @foo, i32 0, i32 2), align 2 store i16 %0, i16* %ival, align 2 br label %if.end diff --git a/llvm/test/CodeGen/Hexagon/gp-rel.ll b/llvm/test/CodeGen/Hexagon/gp-rel.ll index 561869e8ef3..bb7cb182bf1 100644 --- a/llvm/test/CodeGen/Hexagon/gp-rel.ll +++ b/llvm/test/CodeGen/Hexagon/gp-rel.ll @@ -10,14 +10,14 @@ entry: ; CHECK: r{{[0-9]+}}{{ *}}={{ *}}memw(#a) ; CHECK: r{{[0-9]+}}{{ *}}={{ *}}memw(#b) ; CHECK: if{{ *}}(p{{[0-3]}}) memw(##c){{ *}}={{ *}}r{{[0-9]+}} - %0 = load i32* @a, align 4 - %1 = load i32* @b, align 4 + %0 = load i32, i32* @a, align 4 + %1 = load i32, i32* @b, align 4 %add = add nsw i32 %1, %0 %cmp = icmp eq i32 %0, %1 br i1 %cmp, label %if.then, label %entry.if.end_crit_edge entry.if.end_crit_edge: - %.pre = load i32* @c, align 4 + %.pre = load i32, i32* @c, align 4 br label %if.end if.then: diff --git a/llvm/test/CodeGen/Hexagon/hwloop-cleanup.ll b/llvm/test/CodeGen/Hexagon/hwloop-cleanup.ll index 81124072487..643fe11f2cb 100644 --- a/llvm/test/CodeGen/Hexagon/hwloop-cleanup.ll +++ b/llvm/test/CodeGen/Hexagon/hwloop-cleanup.ll @@ -20,7 +20,7 @@ for.body: ; preds = %for.body.preheader, %sum.03 = phi i32 [ %add, %for.body ], [ 0, %for.body.preheader ] %arrayidx.phi = phi i32* [ %arrayidx.inc, %for.body ], [ %b, %for.body.preheader ] %i.02 = phi i32 [ %inc, %for.body ], [ 0, %for.body.preheader ] - %0 = load i32* %arrayidx.phi, align 4 + %0 = load i32, i32* %arrayidx.phi, align 4 %add = add nsw i32 %0, %sum.03 %inc = add nsw i32 %i.02, 1 %exitcond = icmp eq i32 %inc, %n @@ -50,7 +50,7 @@ for.body: %sum.02 = phi i32 [ 0, %entry ], [ %add, %for.body ] %arrayidx.phi = phi i32* [ %b, %entry ], [ %arrayidx.inc, %for.body ] %i.01 = phi i32 [ 0, %entry ], [ %inc, %for.body ] - %0 = load i32* %arrayidx.phi, align 4 + %0 = load i32, i32* %arrayidx.phi, align 4 %add = add nsw i32 %0, %sum.02 %inc = add nsw i32 %i.01, 1 %exitcond = icmp eq i32 %inc, 40 diff --git a/llvm/test/CodeGen/Hexagon/hwloop-dbg.ll b/llvm/test/CodeGen/Hexagon/hwloop-dbg.ll index 7dfea5d68f6..ca7da9ecdb0 100644 --- a/llvm/test/CodeGen/Hexagon/hwloop-dbg.ll +++ b/llvm/test/CodeGen/Hexagon/hwloop-dbg.ll @@ -19,7 +19,7 @@ for.body: ; preds = %for.body, %entry %b.addr.01 = phi i32* [ %b, %entry ], [ %incdec.ptr, %for.body ] %incdec.ptr = getelementptr inbounds i32, i32* %b.addr.01, i32 1, !dbg !21 tail call void @llvm.dbg.value(metadata i32* %incdec.ptr, i64 0, metadata !14, metadata !{!"0x102"}), !dbg !21 - %0 = load i32* %b.addr.01, align 4, !dbg !21 + %0 = load i32, i32* %b.addr.01, align 4, !dbg !21 store i32 %0, i32* %arrayidx.phi, align 4, !dbg !21 %inc = add nsw i32 %i.02, 1, !dbg !26 tail call void @llvm.dbg.value(metadata i32 %inc, i64 0, metadata !15, metadata !{!"0x102"}), !dbg !26 diff --git a/llvm/test/CodeGen/Hexagon/hwloop-le.ll b/llvm/test/CodeGen/Hexagon/hwloop-le.ll index 984a26387a9..85a1b3db673 100644 --- a/llvm/test/CodeGen/Hexagon/hwloop-le.ll +++ b/llvm/test/CodeGen/Hexagon/hwloop-le.ll @@ -15,7 +15,7 @@ for.body.lr.ph: ; preds = %entry for.body: ; preds = %for.body.lr.ph, %for.body %i.04 = phi i32 [ 28395, %for.body.lr.ph ], [ %inc, %for.body ] %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04 - %0 = load i8* %arrayidx, align 1 + %0 = load i8, i8* %arrayidx, align 1 %conv = zext i8 %0 to i32 %add = add nsw i32 %conv, 1 %conv1 = trunc i32 %add to i8 @@ -44,7 +44,7 @@ for.body.lr.ph: ; preds = %entry for.body: ; preds = %for.body.lr.ph, %for.body %i.04 = phi i32 [ 9073, %for.body.lr.ph ], [ %inc, %for.body ] %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04 - %0 = load i8* %arrayidx, align 1 + %0 = load i8, i8* %arrayidx, align 1 %conv = zext i8 %0 to i32 %add = add nsw i32 %conv, 1 %conv1 = trunc i32 %add to i8 @@ -73,7 +73,7 @@ for.body.lr.ph: ; preds = %entry for.body: ; preds = %for.body.lr.ph, %for.body %i.04 = phi i32 [ 21956, %for.body.lr.ph ], [ %inc, %for.body ] %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04 - %0 = load i8* %arrayidx, align 1 + %0 = load i8, i8* %arrayidx, align 1 %conv = zext i8 %0 to i32 %add = add nsw i32 %conv, 1 %conv1 = trunc i32 %add to i8 @@ -102,7 +102,7 @@ for.body.lr.ph: ; preds = %entry for.body: ; preds = %for.body.lr.ph, %for.body %i.04 = phi i32 [ 16782, %for.body.lr.ph ], [ %inc, %for.body ] %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04 - %0 = load i8* %arrayidx, align 1 + %0 = load i8, i8* %arrayidx, align 1 %conv = zext i8 %0 to i32 %add = add nsw i32 %conv, 1 %conv1 = trunc i32 %add to i8 @@ -131,7 +131,7 @@ for.body.lr.ph: ; preds = %entry for.body: ; preds = %for.body.lr.ph, %for.body %i.04 = phi i32 [ 19097, %for.body.lr.ph ], [ %inc, %for.body ] %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04 - %0 = load i8* %arrayidx, align 1 + %0 = load i8, i8* %arrayidx, align 1 %conv = zext i8 %0 to i32 %add = add nsw i32 %conv, 1 %conv1 = trunc i32 %add to i8 @@ -160,7 +160,7 @@ for.body.lr.ph: ; preds = %entry for.body: ; preds = %for.body.lr.ph, %for.body %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ] %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04 - %0 = load i8* %arrayidx, align 1 + %0 = load i8, i8* %arrayidx, align 1 %conv = zext i8 %0 to i32 %add = add nsw i32 %conv, 1 %conv1 = trunc i32 %add to i8 @@ -189,7 +189,7 @@ for.body.lr.ph: ; preds = %entry for.body: ; preds = %for.body.lr.ph, %for.body %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ] %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04 - %0 = load i8* %arrayidx, align 1 + %0 = load i8, i8* %arrayidx, align 1 %conv = zext i8 %0 to i32 %add = add nsw i32 %conv, 1 %conv1 = trunc i32 %add to i8 @@ -218,7 +218,7 @@ for.body.lr.ph: ; preds = %entry for.body: ; preds = %for.body.lr.ph, %for.body %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ] %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04 - %0 = load i8* %arrayidx, align 1 + %0 = load i8, i8* %arrayidx, align 1 %conv = zext i8 %0 to i32 %add = add nsw i32 %conv, 1 %conv1 = trunc i32 %add to i8 @@ -247,7 +247,7 @@ for.body.lr.ph: ; preds = %entry for.body: ; preds = %for.body.lr.ph, %for.body %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ] %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04 - %0 = load i8* %arrayidx, align 1 + %0 = load i8, i8* %arrayidx, align 1 %conv = zext i8 %0 to i32 %add = add nsw i32 %conv, 1 %conv1 = trunc i32 %add to i8 @@ -276,7 +276,7 @@ for.body.lr.ph: ; preds = %entry for.body: ; preds = %for.body.lr.ph, %for.body %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ] %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04 - %0 = load i8* %arrayidx, align 1 + %0 = load i8, i8* %arrayidx, align 1 %conv = zext i8 %0 to i32 %add = add nsw i32 %conv, 1 %conv1 = trunc i32 %add to i8 @@ -305,7 +305,7 @@ for.body.lr.ph: ; preds = %entry for.body: ; preds = %for.body.lr.ph, %for.body %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ] %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04 - %0 = load i8* %arrayidx, align 1 + %0 = load i8, i8* %arrayidx, align 1 %conv = zext i8 %0 to i32 %add = add nsw i32 %conv, 1 %conv1 = trunc i32 %add to i8 @@ -334,7 +334,7 @@ for.body.lr.ph: ; preds = %entry for.body: ; preds = %for.body.lr.ph, %for.body %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ] %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04 - %0 = load i8* %arrayidx, align 1 + %0 = load i8, i8* %arrayidx, align 1 %conv = zext i8 %0 to i32 %add = add nsw i32 %conv, 1 %conv1 = trunc i32 %add to i8 @@ -363,7 +363,7 @@ for.body.lr.ph: ; preds = %entry for.body: ; preds = %for.body.lr.ph, %for.body %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ] %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04 - %0 = load i8* %arrayidx, align 1 + %0 = load i8, i8* %arrayidx, align 1 %conv = zext i8 %0 to i32 %add = add nsw i32 %conv, 1 %conv1 = trunc i32 %add to i8 @@ -392,7 +392,7 @@ for.body.lr.ph: ; preds = %entry for.body: ; preds = %for.body.lr.ph, %for.body %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ] %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04 - %0 = load i8* %arrayidx, align 1 + %0 = load i8, i8* %arrayidx, align 1 %conv = zext i8 %0 to i32 %add = add nsw i32 %conv, 1 %conv1 = trunc i32 %add to i8 @@ -421,7 +421,7 @@ for.body.lr.ph: ; preds = %entry for.body: ; preds = %for.body.lr.ph, %for.body %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ] %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04 - %0 = load i8* %arrayidx, align 1 + %0 = load i8, i8* %arrayidx, align 1 %conv = zext i8 %0 to i32 %add = add nsw i32 %conv, 1 %conv1 = trunc i32 %add to i8 diff --git a/llvm/test/CodeGen/Hexagon/hwloop-lt.ll b/llvm/test/CodeGen/Hexagon/hwloop-lt.ll index 23be6fe37ba..804f76456e2 100644 --- a/llvm/test/CodeGen/Hexagon/hwloop-lt.ll +++ b/llvm/test/CodeGen/Hexagon/hwloop-lt.ll @@ -15,7 +15,7 @@ for.body.lr.ph: ; preds = %entry for.body: ; preds = %for.body.lr.ph, %for.body %i.04 = phi i32 [ 8531, %for.body.lr.ph ], [ %inc, %for.body ] %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04 - %0 = load i8* %arrayidx, align 1 + %0 = load i8, i8* %arrayidx, align 1 %conv = zext i8 %0 to i32 %add = add nsw i32 %conv, 1 %conv1 = trunc i32 %add to i8 @@ -44,7 +44,7 @@ for.body.lr.ph: ; preds = %entry for.body: ; preds = %for.body.lr.ph, %for.body %i.04 = phi i32 [ 9152, %for.body.lr.ph ], [ %inc, %for.body ] %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04 - %0 = load i8* %arrayidx, align 1 + %0 = load i8, i8* %arrayidx, align 1 %conv = zext i8 %0 to i32 %add = add nsw i32 %conv, 1 %conv1 = trunc i32 %add to i8 @@ -73,7 +73,7 @@ for.body.lr.ph: ; preds = %entry for.body: ; preds = %for.body.lr.ph, %for.body %i.04 = phi i32 [ 18851, %for.body.lr.ph ], [ %inc, %for.body ] %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04 - %0 = load i8* %arrayidx, align 1 + %0 = load i8, i8* %arrayidx, align 1 %conv = zext i8 %0 to i32 %add = add nsw i32 %conv, 1 %conv1 = trunc i32 %add to i8 @@ -102,7 +102,7 @@ for.body.lr.ph: ; preds = %entry for.body: ; preds = %for.body.lr.ph, %for.body %i.04 = phi i32 [ 25466, %for.body.lr.ph ], [ %inc, %for.body ] %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04 - %0 = load i8* %arrayidx, align 1 + %0 = load i8, i8* %arrayidx, align 1 %conv = zext i8 %0 to i32 %add = add nsw i32 %conv, 1 %conv1 = trunc i32 %add to i8 @@ -131,7 +131,7 @@ for.body.lr.ph: ; preds = %entry for.body: ; preds = %for.body.lr.ph, %for.body %i.04 = phi i32 [ 9295, %for.body.lr.ph ], [ %inc, %for.body ] %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04 - %0 = load i8* %arrayidx, align 1 + %0 = load i8, i8* %arrayidx, align 1 %conv = zext i8 %0 to i32 %add = add nsw i32 %conv, 1 %conv1 = trunc i32 %add to i8 @@ -160,7 +160,7 @@ for.body.lr.ph: ; preds = %entry for.body: ; preds = %for.body.lr.ph, %for.body %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ] %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04 - %0 = load i8* %arrayidx, align 1 + %0 = load i8, i8* %arrayidx, align 1 %conv = zext i8 %0 to i32 %add = add nsw i32 %conv, 1 %conv1 = trunc i32 %add to i8 @@ -189,7 +189,7 @@ for.body.lr.ph: ; preds = %entry for.body: ; preds = %for.body.lr.ph, %for.body %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ] %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04 - %0 = load i8* %arrayidx, align 1 + %0 = load i8, i8* %arrayidx, align 1 %conv = zext i8 %0 to i32 %add = add nsw i32 %conv, 1 %conv1 = trunc i32 %add to i8 @@ -218,7 +218,7 @@ for.body.lr.ph: ; preds = %entry for.body: ; preds = %for.body.lr.ph, %for.body %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ] %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04 - %0 = load i8* %arrayidx, align 1 + %0 = load i8, i8* %arrayidx, align 1 %conv = zext i8 %0 to i32 %add = add nsw i32 %conv, 1 %conv1 = trunc i32 %add to i8 @@ -247,7 +247,7 @@ for.body.lr.ph: ; preds = %entry for.body: ; preds = %for.body.lr.ph, %for.body %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ] %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04 - %0 = load i8* %arrayidx, align 1 + %0 = load i8, i8* %arrayidx, align 1 %conv = zext i8 %0 to i32 %add = add nsw i32 %conv, 1 %conv1 = trunc i32 %add to i8 @@ -276,7 +276,7 @@ for.body.lr.ph: ; preds = %entry for.body: ; preds = %for.body.lr.ph, %for.body %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ] %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04 - %0 = load i8* %arrayidx, align 1 + %0 = load i8, i8* %arrayidx, align 1 %conv = zext i8 %0 to i32 %add = add nsw i32 %conv, 1 %conv1 = trunc i32 %add to i8 @@ -305,7 +305,7 @@ for.body.lr.ph: ; preds = %entry for.body: ; preds = %for.body.lr.ph, %for.body %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ] %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04 - %0 = load i8* %arrayidx, align 1 + %0 = load i8, i8* %arrayidx, align 1 %conv = zext i8 %0 to i32 %add = add nsw i32 %conv, 1 %conv1 = trunc i32 %add to i8 @@ -334,7 +334,7 @@ for.body.lr.ph: ; preds = %entry for.body: ; preds = %for.body.lr.ph, %for.body %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ] %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04 - %0 = load i8* %arrayidx, align 1 + %0 = load i8, i8* %arrayidx, align 1 %conv = zext i8 %0 to i32 %add = add nsw i32 %conv, 1 %conv1 = trunc i32 %add to i8 @@ -363,7 +363,7 @@ for.body.lr.ph: ; preds = %entry for.body: ; preds = %for.body.lr.ph, %for.body %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ] %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04 - %0 = load i8* %arrayidx, align 1 + %0 = load i8, i8* %arrayidx, align 1 %conv = zext i8 %0 to i32 %add = add nsw i32 %conv, 1 %conv1 = trunc i32 %add to i8 @@ -392,7 +392,7 @@ for.body.lr.ph: ; preds = %entry for.body: ; preds = %for.body.lr.ph, %for.body %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ] %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04 - %0 = load i8* %arrayidx, align 1 + %0 = load i8, i8* %arrayidx, align 1 %conv = zext i8 %0 to i32 %add = add nsw i32 %conv, 1 %conv1 = trunc i32 %add to i8 @@ -421,7 +421,7 @@ for.body.lr.ph: ; preds = %entry for.body: ; preds = %for.body.lr.ph, %for.body %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ] %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04 - %0 = load i8* %arrayidx, align 1 + %0 = load i8, i8* %arrayidx, align 1 %conv = zext i8 %0 to i32 %add = add nsw i32 %conv, 1 %conv1 = trunc i32 %add to i8 diff --git a/llvm/test/CodeGen/Hexagon/hwloop-ne.ll b/llvm/test/CodeGen/Hexagon/hwloop-ne.ll index 8f512a23681..12ef3b5dd0b 100644 --- a/llvm/test/CodeGen/Hexagon/hwloop-ne.ll +++ b/llvm/test/CodeGen/Hexagon/hwloop-ne.ll @@ -15,7 +15,7 @@ for.body.lr.ph: ; preds = %entry for.body: ; preds = %for.body.lr.ph, %for.body %i.04 = phi i32 [ 32623, %for.body.lr.ph ], [ %inc, %for.body ] %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04 - %0 = load i8* %arrayidx, align 1 + %0 = load i8, i8* %arrayidx, align 1 %conv = zext i8 %0 to i32 %add = add nsw i32 %conv, 1 %conv1 = trunc i32 %add to i8 @@ -44,7 +44,7 @@ for.body.lr.ph: ; preds = %entry for.body: ; preds = %for.body.lr.ph, %for.body %i.04 = phi i32 [ 29554, %for.body.lr.ph ], [ %inc, %for.body ] %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04 - %0 = load i8* %arrayidx, align 1 + %0 = load i8, i8* %arrayidx, align 1 %conv = zext i8 %0 to i32 %add = add nsw i32 %conv, 1 %conv1 = trunc i32 %add to i8 @@ -73,7 +73,7 @@ for.body.lr.ph: ; preds = %entry for.body: ; preds = %for.body.lr.ph, %for.body %i.04 = phi i32 [ 15692, %for.body.lr.ph ], [ %inc, %for.body ] %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04 - %0 = load i8* %arrayidx, align 1 + %0 = load i8, i8* %arrayidx, align 1 %conv = zext i8 %0 to i32 %add = add nsw i32 %conv, 1 %conv1 = trunc i32 %add to i8 @@ -102,7 +102,7 @@ for.body.lr.ph: ; preds = %entry for.body: ; preds = %for.body.lr.ph, %for.body %i.04 = phi i32 [ 10449, %for.body.lr.ph ], [ %inc, %for.body ] %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04 - %0 = load i8* %arrayidx, align 1 + %0 = load i8, i8* %arrayidx, align 1 %conv = zext i8 %0 to i32 %add = add nsw i32 %conv, 1 %conv1 = trunc i32 %add to i8 @@ -131,7 +131,7 @@ for.body.lr.ph: ; preds = %entry for.body: ; preds = %for.body.lr.ph, %for.body %i.04 = phi i32 [ 32087, %for.body.lr.ph ], [ %inc, %for.body ] %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04 - %0 = load i8* %arrayidx, align 1 + %0 = load i8, i8* %arrayidx, align 1 %conv = zext i8 %0 to i32 %add = add nsw i32 %conv, 1 %conv1 = trunc i32 %add to i8 @@ -160,7 +160,7 @@ for.body.lr.ph: ; preds = %entry for.body: ; preds = %for.body.lr.ph, %for.body %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ] %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04 - %0 = load i8* %arrayidx, align 1 + %0 = load i8, i8* %arrayidx, align 1 %conv = zext i8 %0 to i32 %add = add nsw i32 %conv, 1 %conv1 = trunc i32 %add to i8 @@ -189,7 +189,7 @@ for.body.lr.ph: ; preds = %entry for.body: ; preds = %for.body.lr.ph, %for.body %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ] %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04 - %0 = load i8* %arrayidx, align 1 + %0 = load i8, i8* %arrayidx, align 1 %conv = zext i8 %0 to i32 %add = add nsw i32 %conv, 1 %conv1 = trunc i32 %add to i8 @@ -218,7 +218,7 @@ for.body.lr.ph: ; preds = %entry for.body: ; preds = %for.body.lr.ph, %for.body %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ] %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04 - %0 = load i8* %arrayidx, align 1 + %0 = load i8, i8* %arrayidx, align 1 %conv = zext i8 %0 to i32 %add = add nsw i32 %conv, 1 %conv1 = trunc i32 %add to i8 @@ -247,7 +247,7 @@ for.body.lr.ph: ; preds = %entry for.body: ; preds = %for.body.lr.ph, %for.body %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ] %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04 - %0 = load i8* %arrayidx, align 1 + %0 = load i8, i8* %arrayidx, align 1 %conv = zext i8 %0 to i32 %add = add nsw i32 %conv, 1 %conv1 = trunc i32 %add to i8 @@ -276,7 +276,7 @@ for.body.lr.ph: ; preds = %entry for.body: ; preds = %for.body.lr.ph, %for.body %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ] %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04 - %0 = load i8* %arrayidx, align 1 + %0 = load i8, i8* %arrayidx, align 1 %conv = zext i8 %0 to i32 %add = add nsw i32 %conv, 1 %conv1 = trunc i32 %add to i8 @@ -305,7 +305,7 @@ for.body.lr.ph: ; preds = %entry for.body: ; preds = %for.body.lr.ph, %for.body %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ] %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04 - %0 = load i8* %arrayidx, align 1 + %0 = load i8, i8* %arrayidx, align 1 %conv = zext i8 %0 to i32 %add = add nsw i32 %conv, 1 %conv1 = trunc i32 %add to i8 @@ -334,7 +334,7 @@ for.body.lr.ph: ; preds = %entry for.body: ; preds = %for.body.lr.ph, %for.body %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ] %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04 - %0 = load i8* %arrayidx, align 1 + %0 = load i8, i8* %arrayidx, align 1 %conv = zext i8 %0 to i32 %add = add nsw i32 %conv, 1 %conv1 = trunc i32 %add to i8 @@ -363,7 +363,7 @@ for.body.lr.ph: ; preds = %entry for.body: ; preds = %for.body.lr.ph, %for.body %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ] %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04 - %0 = load i8* %arrayidx, align 1 + %0 = load i8, i8* %arrayidx, align 1 %conv = zext i8 %0 to i32 %add = add nsw i32 %conv, 1 %conv1 = trunc i32 %add to i8 @@ -392,7 +392,7 @@ for.body.lr.ph: ; preds = %entry for.body: ; preds = %for.body.lr.ph, %for.body %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ] %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04 - %0 = load i8* %arrayidx, align 1 + %0 = load i8, i8* %arrayidx, align 1 %conv = zext i8 %0 to i32 %add = add nsw i32 %conv, 1 %conv1 = trunc i32 %add to i8 @@ -421,7 +421,7 @@ for.body.lr.ph: ; preds = %entry for.body: ; preds = %for.body.lr.ph, %for.body %i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ] %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04 - %0 = load i8* %arrayidx, align 1 + %0 = load i8, i8* %arrayidx, align 1 %conv = zext i8 %0 to i32 %add = add nsw i32 %conv, 1 %conv1 = trunc i32 %add to i8 diff --git a/llvm/test/CodeGen/Hexagon/i16_VarArg.ll b/llvm/test/CodeGen/Hexagon/i16_VarArg.ll index 9914e01de01..41cecec07c0 100644 --- a/llvm/test/CodeGen/Hexagon/i16_VarArg.ll +++ b/llvm/test/CodeGen/Hexagon/i16_VarArg.ll @@ -20,8 +20,8 @@ declare i32 @printf(i8*, ...) define i32 @main() { - %a = load double* @A - %b = load double* @B + %a = load double, double* @A + %b = load double, double* @B %lt_r = fcmp olt double %a, %b %le_r = fcmp ole double %a, %b %gt_r = fcmp ogt double %a, %b diff --git a/llvm/test/CodeGen/Hexagon/i1_VarArg.ll b/llvm/test/CodeGen/Hexagon/i1_VarArg.ll index 408943da166..8b5625c99a0 100644 --- a/llvm/test/CodeGen/Hexagon/i1_VarArg.ll +++ b/llvm/test/CodeGen/Hexagon/i1_VarArg.ll @@ -20,8 +20,8 @@ declare i32 @printf(i8*, ...) define i32 @main() { - %a = load double* @A - %b = load double* @B + %a = load double, double* @A + %b = load double, double* @B %lt_r = fcmp olt double %a, %b %le_r = fcmp ole double %a, %b %gt_r = fcmp ogt double %a, %b diff --git a/llvm/test/CodeGen/Hexagon/i8_VarArg.ll b/llvm/test/CodeGen/Hexagon/i8_VarArg.ll index f3dec92eaff..7283ba461d0 100644 --- a/llvm/test/CodeGen/Hexagon/i8_VarArg.ll +++ b/llvm/test/CodeGen/Hexagon/i8_VarArg.ll @@ -20,8 +20,8 @@ declare i32 @printf(i8*, ...) define i32 @main() { - %a = load double* @A - %b = load double* @B + %a = load double, double* @A + %b = load double, double* @B %lt_r = fcmp olt double %a, %b %le_r = fcmp ole double %a, %b %gt_r = fcmp ogt double %a, %b diff --git a/llvm/test/CodeGen/Hexagon/idxload-with-zero-offset.ll b/llvm/test/CodeGen/Hexagon/idxload-with-zero-offset.ll index aa834ce3c99..f1a9d38f1b1 100644 --- a/llvm/test/CodeGen/Hexagon/idxload-with-zero-offset.ll +++ b/llvm/test/CodeGen/Hexagon/idxload-with-zero-offset.ll @@ -8,7 +8,7 @@ define i32 @load_w(i32* nocapture %a, i32 %n, i32 %m) nounwind { entry: %tmp = add i32 %n, %m %scevgep9 = getelementptr i32, i32* %a, i32 %tmp - %val = load i32* %scevgep9, align 4 + %val = load i32, i32* %scevgep9, align 4 ret i32 %val } @@ -19,7 +19,7 @@ define i16 @load_uh(i16* nocapture %a, i32 %n, i32 %m) nounwind { entry: %tmp = add i32 %n, %m %scevgep9 = getelementptr i16, i16* %a, i32 %tmp - %val = load i16* %scevgep9, align 2 + %val = load i16, i16* %scevgep9, align 2 ret i16 %val } @@ -30,7 +30,7 @@ define i32 @load_h(i16* nocapture %a, i32 %n, i32 %m) nounwind { entry: %tmp = add i32 %n, %m %scevgep9 = getelementptr i16, i16* %a, i32 %tmp - %val = load i16* %scevgep9, align 2 + %val = load i16, i16* %scevgep9, align 2 %conv = sext i16 %val to i32 ret i32 %conv } @@ -42,7 +42,7 @@ define i8 @load_ub(i8* nocapture %a, i32 %n, i32 %m) nounwind { entry: %tmp = add i32 %n, %m %scevgep9 = getelementptr i8, i8* %a, i32 %tmp - %val = load i8* %scevgep9, align 1 + %val = load i8, i8* %scevgep9, align 1 ret i8 %val } @@ -53,7 +53,7 @@ define i32 @foo_2(i8* nocapture %a, i32 %n, i32 %m) nounwind { entry: %tmp = add i32 %n, %m %scevgep9 = getelementptr i8, i8* %a, i32 %tmp - %val = load i8* %scevgep9, align 1 + %val = load i8, i8* %scevgep9, align 1 %conv = sext i8 %val to i32 ret i32 %conv } @@ -65,6 +65,6 @@ define i64 @load_d(i64* nocapture %a, i32 %n, i32 %m) nounwind { entry: %tmp = add i32 %n, %m %scevgep9 = getelementptr i64, i64* %a, i32 %tmp - %val = load i64* %scevgep9, align 8 + %val = load i64, i64* %scevgep9, align 8 ret i64 %val } diff --git a/llvm/test/CodeGen/Hexagon/macint.ll b/llvm/test/CodeGen/Hexagon/macint.ll index b3b9d0ee7a0..458a537467c 100644 --- a/llvm/test/CodeGen/Hexagon/macint.ll +++ b/llvm/test/CodeGen/Hexagon/macint.ll @@ -5,7 +5,7 @@ define i32 @main(i32* %a, i32* %b) nounwind { entry: - %0 = load i32* %a, align 4 + %0 = load i32, i32* %a, align 4 %div = udiv i32 %0, 10000 %rem = urem i32 %div, 10 store i32 %rem, i32* %b, align 4 diff --git a/llvm/test/CodeGen/Hexagon/memops.ll b/llvm/test/CodeGen/Hexagon/memops.ll index 6a02028778b..e4a8bf7c95e 100644 --- a/llvm/test/CodeGen/Hexagon/memops.ll +++ b/llvm/test/CodeGen/Hexagon/memops.ll @@ -4,7 +4,7 @@ define void @memop_unsigned_char_add5(i8* nocapture %p) nounwind { entry: ; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}#5 - %0 = load i8* %p, align 1 + %0 = load i8, i8* %p, align 1 %conv = zext i8 %0 to i32 %add = add nsw i32 %conv, 5 %conv1 = trunc i32 %add to i8 @@ -16,7 +16,7 @@ define void @memop_unsigned_char_add(i8* nocapture %p, i8 zeroext %x) nounwind { entry: ; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}r{{[0-9]+}} %conv = zext i8 %x to i32 - %0 = load i8* %p, align 1 + %0 = load i8, i8* %p, align 1 %conv1 = zext i8 %0 to i32 %add = add nsw i32 %conv1, %conv %conv2 = trunc i32 %add to i8 @@ -28,7 +28,7 @@ define void @memop_unsigned_char_sub(i8* nocapture %p, i8 zeroext %x) nounwind { entry: ; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}-={{ *}}r{{[0-9]+}} %conv = zext i8 %x to i32 - %0 = load i8* %p, align 1 + %0 = load i8, i8* %p, align 1 %conv1 = zext i8 %0 to i32 %sub = sub nsw i32 %conv1, %conv %conv2 = trunc i32 %sub to i8 @@ -39,7 +39,7 @@ entry: define void @memop_unsigned_char_or(i8* nocapture %p, i8 zeroext %x) nounwind { entry: ; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}|={{ *}}r{{[0-9]+}} - %0 = load i8* %p, align 1 + %0 = load i8, i8* %p, align 1 %or3 = or i8 %0, %x store i8 %or3, i8* %p, align 1 ret void @@ -48,7 +48,7 @@ entry: define void @memop_unsigned_char_and(i8* nocapture %p, i8 zeroext %x) nounwind { entry: ; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}&={{ *}}r{{[0-9]+}} - %0 = load i8* %p, align 1 + %0 = load i8, i8* %p, align 1 %and3 = and i8 %0, %x store i8 %and3, i8* %p, align 1 ret void @@ -57,7 +57,7 @@ entry: define void @memop_unsigned_char_clrbit(i8* nocapture %p) nounwind { entry: ; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}clrbit({{ *}}#5{{ *}}) - %0 = load i8* %p, align 1 + %0 = load i8, i8* %p, align 1 %conv = zext i8 %0 to i32 %and = and i32 %conv, 223 %conv1 = trunc i32 %and to i8 @@ -68,7 +68,7 @@ entry: define void @memop_unsigned_char_setbit(i8* nocapture %p) nounwind { entry: ; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}setbit({{ *}}#7{{ *}}) - %0 = load i8* %p, align 1 + %0 = load i8, i8* %p, align 1 %conv = zext i8 %0 to i32 %or = or i32 %conv, 128 %conv1 = trunc i32 %or to i8 @@ -80,7 +80,7 @@ define void @memop_unsigned_char_add5_index(i8* nocapture %p, i32 %i) nounwind { entry: ; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}#5 %add.ptr = getelementptr inbounds i8, i8* %p, i32 %i - %0 = load i8* %add.ptr, align 1 + %0 = load i8, i8* %add.ptr, align 1 %conv = zext i8 %0 to i32 %add = add nsw i32 %conv, 5 %conv1 = trunc i32 %add to i8 @@ -93,7 +93,7 @@ entry: ; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}r{{[0-9]+}} %conv = zext i8 %x to i32 %add.ptr = getelementptr inbounds i8, i8* %p, i32 %i - %0 = load i8* %add.ptr, align 1 + %0 = load i8, i8* %add.ptr, align 1 %conv1 = zext i8 %0 to i32 %add = add nsw i32 %conv1, %conv %conv2 = trunc i32 %add to i8 @@ -106,7 +106,7 @@ entry: ; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}-={{ *}}r{{[0-9]+}} %conv = zext i8 %x to i32 %add.ptr = getelementptr inbounds i8, i8* %p, i32 %i - %0 = load i8* %add.ptr, align 1 + %0 = load i8, i8* %add.ptr, align 1 %conv1 = zext i8 %0 to i32 %sub = sub nsw i32 %conv1, %conv %conv2 = trunc i32 %sub to i8 @@ -118,7 +118,7 @@ define void @memop_unsigned_char_or_index(i8* nocapture %p, i32 %i, i8 zeroext % entry: ; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}|={{ *}}r{{[0-9]+}} %add.ptr = getelementptr inbounds i8, i8* %p, i32 %i - %0 = load i8* %add.ptr, align 1 + %0 = load i8, i8* %add.ptr, align 1 %or3 = or i8 %0, %x store i8 %or3, i8* %add.ptr, align 1 ret void @@ -128,7 +128,7 @@ define void @memop_unsigned_char_and_index(i8* nocapture %p, i32 %i, i8 zeroext entry: ; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}&={{ *}}r{{[0-9]+}} %add.ptr = getelementptr inbounds i8, i8* %p, i32 %i - %0 = load i8* %add.ptr, align 1 + %0 = load i8, i8* %add.ptr, align 1 %and3 = and i8 %0, %x store i8 %and3, i8* %add.ptr, align 1 ret void @@ -138,7 +138,7 @@ define void @memop_unsigned_char_clrbit_index(i8* nocapture %p, i32 %i) nounwind entry: ; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}clrbit({{ *}}#5{{ *}}) %add.ptr = getelementptr inbounds i8, i8* %p, i32 %i - %0 = load i8* %add.ptr, align 1 + %0 = load i8, i8* %add.ptr, align 1 %conv = zext i8 %0 to i32 %and = and i32 %conv, 223 %conv1 = trunc i32 %and to i8 @@ -150,7 +150,7 @@ define void @memop_unsigned_char_setbit_index(i8* nocapture %p, i32 %i) nounwind entry: ; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}setbit({{ *}}#7{{ *}}) %add.ptr = getelementptr inbounds i8, i8* %p, i32 %i - %0 = load i8* %add.ptr, align 1 + %0 = load i8, i8* %add.ptr, align 1 %conv = zext i8 %0 to i32 %or = or i32 %conv, 128 %conv1 = trunc i32 %or to i8 @@ -162,7 +162,7 @@ define void @memop_unsigned_char_add5_index5(i8* nocapture %p) nounwind { entry: ; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}+={{ *}}#5 %add.ptr = getelementptr inbounds i8, i8* %p, i32 5 - %0 = load i8* %add.ptr, align 1 + %0 = load i8, i8* %add.ptr, align 1 %conv = zext i8 %0 to i32 %add = add nsw i32 %conv, 5 %conv1 = trunc i32 %add to i8 @@ -175,7 +175,7 @@ entry: ; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}+={{ *}}r{{[0-9]+}} %conv = zext i8 %x to i32 %add.ptr = getelementptr inbounds i8, i8* %p, i32 5 - %0 = load i8* %add.ptr, align 1 + %0 = load i8, i8* %add.ptr, align 1 %conv1 = zext i8 %0 to i32 %add = add nsw i32 %conv1, %conv %conv2 = trunc i32 %add to i8 @@ -188,7 +188,7 @@ entry: ; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}-={{ *}}r{{[0-9]+}} %conv = zext i8 %x to i32 %add.ptr = getelementptr inbounds i8, i8* %p, i32 5 - %0 = load i8* %add.ptr, align 1 + %0 = load i8, i8* %add.ptr, align 1 %conv1 = zext i8 %0 to i32 %sub = sub nsw i32 %conv1, %conv %conv2 = trunc i32 %sub to i8 @@ -200,7 +200,7 @@ define void @memop_unsigned_char_or_index5(i8* nocapture %p, i8 zeroext %x) noun entry: ; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}|={{ *}}r{{[0-9]+}} %add.ptr = getelementptr inbounds i8, i8* %p, i32 5 - %0 = load i8* %add.ptr, align 1 + %0 = load i8, i8* %add.ptr, align 1 %or3 = or i8 %0, %x store i8 %or3, i8* %add.ptr, align 1 ret void @@ -210,7 +210,7 @@ define void @memop_unsigned_char_and_index5(i8* nocapture %p, i8 zeroext %x) nou entry: ; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}&={{ *}}r{{[0-9]+}} %add.ptr = getelementptr inbounds i8, i8* %p, i32 5 - %0 = load i8* %add.ptr, align 1 + %0 = load i8, i8* %add.ptr, align 1 %and3 = and i8 %0, %x store i8 %and3, i8* %add.ptr, align 1 ret void @@ -220,7 +220,7 @@ define void @memop_unsigned_char_clrbit_index5(i8* nocapture %p) nounwind { entry: ; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}={{ *}}clrbit({{ *}}#5{{ *}}) %add.ptr = getelementptr inbounds i8, i8* %p, i32 5 - %0 = load i8* %add.ptr, align 1 + %0 = load i8, i8* %add.ptr, align 1 %conv = zext i8 %0 to i32 %and = and i32 %conv, 223 %conv1 = trunc i32 %and to i8 @@ -232,7 +232,7 @@ define void @memop_unsigned_char_setbit_index5(i8* nocapture %p) nounwind { entry: ; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}={{ *}}setbit({{ *}}#7{{ *}}) %add.ptr = getelementptr inbounds i8, i8* %p, i32 5 - %0 = load i8* %add.ptr, align 1 + %0 = load i8, i8* %add.ptr, align 1 %conv = zext i8 %0 to i32 %or = or i32 %conv, 128 %conv1 = trunc i32 %or to i8 @@ -243,7 +243,7 @@ entry: define void @memop_signed_char_add5(i8* nocapture %p) nounwind { entry: ; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}#5 - %0 = load i8* %p, align 1 + %0 = load i8, i8* %p, align 1 %conv2 = zext i8 %0 to i32 %add = add nsw i32 %conv2, 5 %conv1 = trunc i32 %add to i8 @@ -255,7 +255,7 @@ define void @memop_signed_char_add(i8* nocapture %p, i8 signext %x) nounwind { entry: ; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}r{{[0-9]+}} %conv4 = zext i8 %x to i32 - %0 = load i8* %p, align 1 + %0 = load i8, i8* %p, align 1 %conv13 = zext i8 %0 to i32 %add = add nsw i32 %conv13, %conv4 %conv2 = trunc i32 %add to i8 @@ -267,7 +267,7 @@ define void @memop_signed_char_sub(i8* nocapture %p, i8 signext %x) nounwind { entry: ; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}-={{ *}}r{{[0-9]+}} %conv4 = zext i8 %x to i32 - %0 = load i8* %p, align 1 + %0 = load i8, i8* %p, align 1 %conv13 = zext i8 %0 to i32 %sub = sub nsw i32 %conv13, %conv4 %conv2 = trunc i32 %sub to i8 @@ -278,7 +278,7 @@ entry: define void @memop_signed_char_or(i8* nocapture %p, i8 signext %x) nounwind { entry: ; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}|={{ *}}r{{[0-9]+}} - %0 = load i8* %p, align 1 + %0 = load i8, i8* %p, align 1 %or3 = or i8 %0, %x store i8 %or3, i8* %p, align 1 ret void @@ -287,7 +287,7 @@ entry: define void @memop_signed_char_and(i8* nocapture %p, i8 signext %x) nounwind { entry: ; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}&={{ *}}r{{[0-9]+}} - %0 = load i8* %p, align 1 + %0 = load i8, i8* %p, align 1 %and3 = and i8 %0, %x store i8 %and3, i8* %p, align 1 ret void @@ -296,7 +296,7 @@ entry: define void @memop_signed_char_clrbit(i8* nocapture %p) nounwind { entry: ; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}clrbit({{ *}}#5{{ *}}) - %0 = load i8* %p, align 1 + %0 = load i8, i8* %p, align 1 %conv2 = zext i8 %0 to i32 %and = and i32 %conv2, 223 %conv1 = trunc i32 %and to i8 @@ -307,7 +307,7 @@ entry: define void @memop_signed_char_setbit(i8* nocapture %p) nounwind { entry: ; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}setbit({{ *}}#7{{ *}}) - %0 = load i8* %p, align 1 + %0 = load i8, i8* %p, align 1 %conv2 = zext i8 %0 to i32 %or = or i32 %conv2, 128 %conv1 = trunc i32 %or to i8 @@ -319,7 +319,7 @@ define void @memop_signed_char_add5_index(i8* nocapture %p, i32 %i) nounwind { entry: ; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}#5 %add.ptr = getelementptr inbounds i8, i8* %p, i32 %i - %0 = load i8* %add.ptr, align 1 + %0 = load i8, i8* %add.ptr, align 1 %conv2 = zext i8 %0 to i32 %add = add nsw i32 %conv2, 5 %conv1 = trunc i32 %add to i8 @@ -332,7 +332,7 @@ entry: ; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}r{{[0-9]+}} %conv4 = zext i8 %x to i32 %add.ptr = getelementptr inbounds i8, i8* %p, i32 %i - %0 = load i8* %add.ptr, align 1 + %0 = load i8, i8* %add.ptr, align 1 %conv13 = zext i8 %0 to i32 %add = add nsw i32 %conv13, %conv4 %conv2 = trunc i32 %add to i8 @@ -345,7 +345,7 @@ entry: ; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}-={{ *}}r{{[0-9]+}} %conv4 = zext i8 %x to i32 %add.ptr = getelementptr inbounds i8, i8* %p, i32 %i - %0 = load i8* %add.ptr, align 1 + %0 = load i8, i8* %add.ptr, align 1 %conv13 = zext i8 %0 to i32 %sub = sub nsw i32 %conv13, %conv4 %conv2 = trunc i32 %sub to i8 @@ -357,7 +357,7 @@ define void @memop_signed_char_or_index(i8* nocapture %p, i32 %i, i8 signext %x) entry: ; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}|={{ *}}r{{[0-9]+}} %add.ptr = getelementptr inbounds i8, i8* %p, i32 %i - %0 = load i8* %add.ptr, align 1 + %0 = load i8, i8* %add.ptr, align 1 %or3 = or i8 %0, %x store i8 %or3, i8* %add.ptr, align 1 ret void @@ -367,7 +367,7 @@ define void @memop_signed_char_and_index(i8* nocapture %p, i32 %i, i8 signext %x entry: ; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}&={{ *}}r{{[0-9]+}} %add.ptr = getelementptr inbounds i8, i8* %p, i32 %i - %0 = load i8* %add.ptr, align 1 + %0 = load i8, i8* %add.ptr, align 1 %and3 = and i8 %0, %x store i8 %and3, i8* %add.ptr, align 1 ret void @@ -377,7 +377,7 @@ define void @memop_signed_char_clrbit_index(i8* nocapture %p, i32 %i) nounwind { entry: ; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}clrbit({{ *}}#5{{ *}}) %add.ptr = getelementptr inbounds i8, i8* %p, i32 %i - %0 = load i8* %add.ptr, align 1 + %0 = load i8, i8* %add.ptr, align 1 %conv2 = zext i8 %0 to i32 %and = and i32 %conv2, 223 %conv1 = trunc i32 %and to i8 @@ -389,7 +389,7 @@ define void @memop_signed_char_setbit_index(i8* nocapture %p, i32 %i) nounwind { entry: ; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}setbit({{ *}}#7{{ *}}) %add.ptr = getelementptr inbounds i8, i8* %p, i32 %i - %0 = load i8* %add.ptr, align 1 + %0 = load i8, i8* %add.ptr, align 1 %conv2 = zext i8 %0 to i32 %or = or i32 %conv2, 128 %conv1 = trunc i32 %or to i8 @@ -401,7 +401,7 @@ define void @memop_signed_char_add5_index5(i8* nocapture %p) nounwind { entry: ; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}+={{ *}}#5 %add.ptr = getelementptr inbounds i8, i8* %p, i32 5 - %0 = load i8* %add.ptr, align 1 + %0 = load i8, i8* %add.ptr, align 1 %conv2 = zext i8 %0 to i32 %add = add nsw i32 %conv2, 5 %conv1 = trunc i32 %add to i8 @@ -414,7 +414,7 @@ entry: ; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}+={{ *}}r{{[0-9]+}} %conv4 = zext i8 %x to i32 %add.ptr = getelementptr inbounds i8, i8* %p, i32 5 - %0 = load i8* %add.ptr, align 1 + %0 = load i8, i8* %add.ptr, align 1 %conv13 = zext i8 %0 to i32 %add = add nsw i32 %conv13, %conv4 %conv2 = trunc i32 %add to i8 @@ -427,7 +427,7 @@ entry: ; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}-={{ *}}r{{[0-9]+}} %conv4 = zext i8 %x to i32 %add.ptr = getelementptr inbounds i8, i8* %p, i32 5 - %0 = load i8* %add.ptr, align 1 + %0 = load i8, i8* %add.ptr, align 1 %conv13 = zext i8 %0 to i32 %sub = sub nsw i32 %conv13, %conv4 %conv2 = trunc i32 %sub to i8 @@ -439,7 +439,7 @@ define void @memop_signed_char_or_index5(i8* nocapture %p, i8 signext %x) nounwi entry: ; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}|={{ *}}r{{[0-9]+}} %add.ptr = getelementptr inbounds i8, i8* %p, i32 5 - %0 = load i8* %add.ptr, align 1 + %0 = load i8, i8* %add.ptr, align 1 %or3 = or i8 %0, %x store i8 %or3, i8* %add.ptr, align 1 ret void @@ -449,7 +449,7 @@ define void @memop_signed_char_and_index5(i8* nocapture %p, i8 signext %x) nounw entry: ; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}&={{ *}}r{{[0-9]+}} %add.ptr = getelementptr inbounds i8, i8* %p, i32 5 - %0 = load i8* %add.ptr, align 1 + %0 = load i8, i8* %add.ptr, align 1 %and3 = and i8 %0, %x store i8 %and3, i8* %add.ptr, align 1 ret void @@ -459,7 +459,7 @@ define void @memop_signed_char_clrbit_index5(i8* nocapture %p) nounwind { entry: ; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}={{ *}}clrbit({{ *}}#5{{ *}}) %add.ptr = getelementptr inbounds i8, i8* %p, i32 5 - %0 = load i8* %add.ptr, align 1 + %0 = load i8, i8* %add.ptr, align 1 %conv2 = zext i8 %0 to i32 %and = and i32 %conv2, 223 %conv1 = trunc i32 %and to i8 @@ -471,7 +471,7 @@ define void @memop_signed_char_setbit_index5(i8* nocapture %p) nounwind { entry: ; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}={{ *}}setbit({{ *}}#7{{ *}}) %add.ptr = getelementptr inbounds i8, i8* %p, i32 5 - %0 = load i8* %add.ptr, align 1 + %0 = load i8, i8* %add.ptr, align 1 %conv2 = zext i8 %0 to i32 %or = or i32 %conv2, 128 %conv1 = trunc i32 %or to i8 @@ -482,7 +482,7 @@ entry: define void @memop_unsigned_short_add5(i16* nocapture %p) nounwind { entry: ; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}#5 - %0 = load i16* %p, align 2 + %0 = load i16, i16* %p, align 2 %conv = zext i16 %0 to i32 %add = add nsw i32 %conv, 5 %conv1 = trunc i32 %add to i16 @@ -494,7 +494,7 @@ define void @memop_unsigned_short_add(i16* nocapture %p, i16 zeroext %x) nounwin entry: ; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}r{{[0-9]+}} %conv = zext i16 %x to i32 - %0 = load i16* %p, align 2 + %0 = load i16, i16* %p, align 2 %conv1 = zext i16 %0 to i32 %add = add nsw i32 %conv1, %conv %conv2 = trunc i32 %add to i16 @@ -506,7 +506,7 @@ define void @memop_unsigned_short_sub(i16* nocapture %p, i16 zeroext %x) nounwin entry: ; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}-={{ *}}r{{[0-9]+}} %conv = zext i16 %x to i32 - %0 = load i16* %p, align 2 + %0 = load i16, i16* %p, align 2 %conv1 = zext i16 %0 to i32 %sub = sub nsw i32 %conv1, %conv %conv2 = trunc i32 %sub to i16 @@ -517,7 +517,7 @@ entry: define void @memop_unsigned_short_or(i16* nocapture %p, i16 zeroext %x) nounwind { entry: ; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}|={{ *}}r{{[0-9]+}} - %0 = load i16* %p, align 2 + %0 = load i16, i16* %p, align 2 %or3 = or i16 %0, %x store i16 %or3, i16* %p, align 2 ret void @@ -526,7 +526,7 @@ entry: define void @memop_unsigned_short_and(i16* nocapture %p, i16 zeroext %x) nounwind { entry: ; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}&={{ *}}r{{[0-9]+}} - %0 = load i16* %p, align 2 + %0 = load i16, i16* %p, align 2 %and3 = and i16 %0, %x store i16 %and3, i16* %p, align 2 ret void @@ -535,7 +535,7 @@ entry: define void @memop_unsigned_short_clrbit(i16* nocapture %p) nounwind { entry: ; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}clrbit({{ *}}#5{{ *}}) - %0 = load i16* %p, align 2 + %0 = load i16, i16* %p, align 2 %conv = zext i16 %0 to i32 %and = and i32 %conv, 65503 %conv1 = trunc i32 %and to i16 @@ -546,7 +546,7 @@ entry: define void @memop_unsigned_short_setbit(i16* nocapture %p) nounwind { entry: ; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}setbit({{ *}}#7{{ *}}) - %0 = load i16* %p, align 2 + %0 = load i16, i16* %p, align 2 %conv = zext i16 %0 to i32 %or = or i32 %conv, 128 %conv1 = trunc i32 %or to i16 @@ -558,7 +558,7 @@ define void @memop_unsigned_short_add5_index(i16* nocapture %p, i32 %i) nounwind entry: ; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}#5 %add.ptr = getelementptr inbounds i16, i16* %p, i32 %i - %0 = load i16* %add.ptr, align 2 + %0 = load i16, i16* %add.ptr, align 2 %conv = zext i16 %0 to i32 %add = add nsw i32 %conv, 5 %conv1 = trunc i32 %add to i16 @@ -571,7 +571,7 @@ entry: ; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}r{{[0-9]+}} %conv = zext i16 %x to i32 %add.ptr = getelementptr inbounds i16, i16* %p, i32 %i - %0 = load i16* %add.ptr, align 2 + %0 = load i16, i16* %add.ptr, align 2 %conv1 = zext i16 %0 to i32 %add = add nsw i32 %conv1, %conv %conv2 = trunc i32 %add to i16 @@ -584,7 +584,7 @@ entry: ; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}-={{ *}}r{{[0-9]+}} %conv = zext i16 %x to i32 %add.ptr = getelementptr inbounds i16, i16* %p, i32 %i - %0 = load i16* %add.ptr, align 2 + %0 = load i16, i16* %add.ptr, align 2 %conv1 = zext i16 %0 to i32 %sub = sub nsw i32 %conv1, %conv %conv2 = trunc i32 %sub to i16 @@ -596,7 +596,7 @@ define void @memop_unsigned_short_or_index(i16* nocapture %p, i32 %i, i16 zeroex entry: ; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}|={{ *}}r{{[0-9]+}} %add.ptr = getelementptr inbounds i16, i16* %p, i32 %i - %0 = load i16* %add.ptr, align 2 + %0 = load i16, i16* %add.ptr, align 2 %or3 = or i16 %0, %x store i16 %or3, i16* %add.ptr, align 2 ret void @@ -606,7 +606,7 @@ define void @memop_unsigned_short_and_index(i16* nocapture %p, i32 %i, i16 zeroe entry: ; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}&={{ *}}r{{[0-9]+}} %add.ptr = getelementptr inbounds i16, i16* %p, i32 %i - %0 = load i16* %add.ptr, align 2 + %0 = load i16, i16* %add.ptr, align 2 %and3 = and i16 %0, %x store i16 %and3, i16* %add.ptr, align 2 ret void @@ -616,7 +616,7 @@ define void @memop_unsigned_short_clrbit_index(i16* nocapture %p, i32 %i) nounwi entry: ; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}clrbit({{ *}}#5{{ *}}) %add.ptr = getelementptr inbounds i16, i16* %p, i32 %i - %0 = load i16* %add.ptr, align 2 + %0 = load i16, i16* %add.ptr, align 2 %conv = zext i16 %0 to i32 %and = and i32 %conv, 65503 %conv1 = trunc i32 %and to i16 @@ -628,7 +628,7 @@ define void @memop_unsigned_short_setbit_index(i16* nocapture %p, i32 %i) nounwi entry: ; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}setbit({{ *}}#7{{ *}}) %add.ptr = getelementptr inbounds i16, i16* %p, i32 %i - %0 = load i16* %add.ptr, align 2 + %0 = load i16, i16* %add.ptr, align 2 %conv = zext i16 %0 to i32 %or = or i32 %conv, 128 %conv1 = trunc i32 %or to i16 @@ -640,7 +640,7 @@ define void @memop_unsigned_short_add5_index5(i16* nocapture %p) nounwind { entry: ; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}+={{ *}}#5 %add.ptr = getelementptr inbounds i16, i16* %p, i32 5 - %0 = load i16* %add.ptr, align 2 + %0 = load i16, i16* %add.ptr, align 2 %conv = zext i16 %0 to i32 %add = add nsw i32 %conv, 5 %conv1 = trunc i32 %add to i16 @@ -653,7 +653,7 @@ entry: ; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}+={{ *}}r{{[0-9]+}} %conv = zext i16 %x to i32 %add.ptr = getelementptr inbounds i16, i16* %p, i32 5 - %0 = load i16* %add.ptr, align 2 + %0 = load i16, i16* %add.ptr, align 2 %conv1 = zext i16 %0 to i32 %add = add nsw i32 %conv1, %conv %conv2 = trunc i32 %add to i16 @@ -666,7 +666,7 @@ entry: ; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}-={{ *}}r{{[0-9]+}} %conv = zext i16 %x to i32 %add.ptr = getelementptr inbounds i16, i16* %p, i32 5 - %0 = load i16* %add.ptr, align 2 + %0 = load i16, i16* %add.ptr, align 2 %conv1 = zext i16 %0 to i32 %sub = sub nsw i32 %conv1, %conv %conv2 = trunc i32 %sub to i16 @@ -678,7 +678,7 @@ define void @memop_unsigned_short_or_index5(i16* nocapture %p, i16 zeroext %x) n entry: ; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}|={{ *}}r{{[0-9]+}} %add.ptr = getelementptr inbounds i16, i16* %p, i32 5 - %0 = load i16* %add.ptr, align 2 + %0 = load i16, i16* %add.ptr, align 2 %or3 = or i16 %0, %x store i16 %or3, i16* %add.ptr, align 2 ret void @@ -688,7 +688,7 @@ define void @memop_unsigned_short_and_index5(i16* nocapture %p, i16 zeroext %x) entry: ; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}&={{ *}}r{{[0-9]+}} %add.ptr = getelementptr inbounds i16, i16* %p, i32 5 - %0 = load i16* %add.ptr, align 2 + %0 = load i16, i16* %add.ptr, align 2 %and3 = and i16 %0, %x store i16 %and3, i16* %add.ptr, align 2 ret void @@ -698,7 +698,7 @@ define void @memop_unsigned_short_clrbit_index5(i16* nocapture %p) nounwind { entry: ; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}={{ *}}clrbit({{ *}}#5{{ *}}) %add.ptr = getelementptr inbounds i16, i16* %p, i32 5 - %0 = load i16* %add.ptr, align 2 + %0 = load i16, i16* %add.ptr, align 2 %conv = zext i16 %0 to i32 %and = and i32 %conv, 65503 %conv1 = trunc i32 %and to i16 @@ -710,7 +710,7 @@ define void @memop_unsigned_short_setbit_index5(i16* nocapture %p) nounwind { entry: ; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}={{ *}}setbit({{ *}}#7{{ *}}) %add.ptr = getelementptr inbounds i16, i16* %p, i32 5 - %0 = load i16* %add.ptr, align 2 + %0 = load i16, i16* %add.ptr, align 2 %conv = zext i16 %0 to i32 %or = or i32 %conv, 128 %conv1 = trunc i32 %or to i16 @@ -721,7 +721,7 @@ entry: define void @memop_signed_short_add5(i16* nocapture %p) nounwind { entry: ; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}#5 - %0 = load i16* %p, align 2 + %0 = load i16, i16* %p, align 2 %conv2 = zext i16 %0 to i32 %add = add nsw i32 %conv2, 5 %conv1 = trunc i32 %add to i16 @@ -733,7 +733,7 @@ define void @memop_signed_short_add(i16* nocapture %p, i16 signext %x) nounwind entry: ; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}r{{[0-9]+}} %conv4 = zext i16 %x to i32 - %0 = load i16* %p, align 2 + %0 = load i16, i16* %p, align 2 %conv13 = zext i16 %0 to i32 %add = add nsw i32 %conv13, %conv4 %conv2 = trunc i32 %add to i16 @@ -745,7 +745,7 @@ define void @memop_signed_short_sub(i16* nocapture %p, i16 signext %x) nounwind entry: ; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}-={{ *}}r{{[0-9]+}} %conv4 = zext i16 %x to i32 - %0 = load i16* %p, align 2 + %0 = load i16, i16* %p, align 2 %conv13 = zext i16 %0 to i32 %sub = sub nsw i32 %conv13, %conv4 %conv2 = trunc i32 %sub to i16 @@ -756,7 +756,7 @@ entry: define void @memop_signed_short_or(i16* nocapture %p, i16 signext %x) nounwind { entry: ; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}|={{ *}}r{{[0-9]+}} - %0 = load i16* %p, align 2 + %0 = load i16, i16* %p, align 2 %or3 = or i16 %0, %x store i16 %or3, i16* %p, align 2 ret void @@ -765,7 +765,7 @@ entry: define void @memop_signed_short_and(i16* nocapture %p, i16 signext %x) nounwind { entry: ; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}&={{ *}}r{{[0-9]+}} - %0 = load i16* %p, align 2 + %0 = load i16, i16* %p, align 2 %and3 = and i16 %0, %x store i16 %and3, i16* %p, align 2 ret void @@ -774,7 +774,7 @@ entry: define void @memop_signed_short_clrbit(i16* nocapture %p) nounwind { entry: ; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}clrbit({{ *}}#5{{ *}}) - %0 = load i16* %p, align 2 + %0 = load i16, i16* %p, align 2 %conv2 = zext i16 %0 to i32 %and = and i32 %conv2, 65503 %conv1 = trunc i32 %and to i16 @@ -785,7 +785,7 @@ entry: define void @memop_signed_short_setbit(i16* nocapture %p) nounwind { entry: ; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}setbit({{ *}}#7{{ *}}) - %0 = load i16* %p, align 2 + %0 = load i16, i16* %p, align 2 %conv2 = zext i16 %0 to i32 %or = or i32 %conv2, 128 %conv1 = trunc i32 %or to i16 @@ -797,7 +797,7 @@ define void @memop_signed_short_add5_index(i16* nocapture %p, i32 %i) nounwind { entry: ; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}#5 %add.ptr = getelementptr inbounds i16, i16* %p, i32 %i - %0 = load i16* %add.ptr, align 2 + %0 = load i16, i16* %add.ptr, align 2 %conv2 = zext i16 %0 to i32 %add = add nsw i32 %conv2, 5 %conv1 = trunc i32 %add to i16 @@ -810,7 +810,7 @@ entry: ; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}r{{[0-9]+}} %conv4 = zext i16 %x to i32 %add.ptr = getelementptr inbounds i16, i16* %p, i32 %i - %0 = load i16* %add.ptr, align 2 + %0 = load i16, i16* %add.ptr, align 2 %conv13 = zext i16 %0 to i32 %add = add nsw i32 %conv13, %conv4 %conv2 = trunc i32 %add to i16 @@ -823,7 +823,7 @@ entry: ; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}-={{ *}}r{{[0-9]+}} %conv4 = zext i16 %x to i32 %add.ptr = getelementptr inbounds i16, i16* %p, i32 %i - %0 = load i16* %add.ptr, align 2 + %0 = load i16, i16* %add.ptr, align 2 %conv13 = zext i16 %0 to i32 %sub = sub nsw i32 %conv13, %conv4 %conv2 = trunc i32 %sub to i16 @@ -835,7 +835,7 @@ define void @memop_signed_short_or_index(i16* nocapture %p, i32 %i, i16 signext entry: ; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}|={{ *}}r{{[0-9]+}} %add.ptr = getelementptr inbounds i16, i16* %p, i32 %i - %0 = load i16* %add.ptr, align 2 + %0 = load i16, i16* %add.ptr, align 2 %or3 = or i16 %0, %x store i16 %or3, i16* %add.ptr, align 2 ret void @@ -845,7 +845,7 @@ define void @memop_signed_short_and_index(i16* nocapture %p, i32 %i, i16 signext entry: ; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}&={{ *}}r{{[0-9]+}} %add.ptr = getelementptr inbounds i16, i16* %p, i32 %i - %0 = load i16* %add.ptr, align 2 + %0 = load i16, i16* %add.ptr, align 2 %and3 = and i16 %0, %x store i16 %and3, i16* %add.ptr, align 2 ret void @@ -855,7 +855,7 @@ define void @memop_signed_short_clrbit_index(i16* nocapture %p, i32 %i) nounwind entry: ; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}clrbit({{ *}}#5{{ *}}) %add.ptr = getelementptr inbounds i16, i16* %p, i32 %i - %0 = load i16* %add.ptr, align 2 + %0 = load i16, i16* %add.ptr, align 2 %conv2 = zext i16 %0 to i32 %and = and i32 %conv2, 65503 %conv1 = trunc i32 %and to i16 @@ -867,7 +867,7 @@ define void @memop_signed_short_setbit_index(i16* nocapture %p, i32 %i) nounwind entry: ; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}setbit({{ *}}#7{{ *}}) %add.ptr = getelementptr inbounds i16, i16* %p, i32 %i - %0 = load i16* %add.ptr, align 2 + %0 = load i16, i16* %add.ptr, align 2 %conv2 = zext i16 %0 to i32 %or = or i32 %conv2, 128 %conv1 = trunc i32 %or to i16 @@ -879,7 +879,7 @@ define void @memop_signed_short_add5_index5(i16* nocapture %p) nounwind { entry: ; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}+={{ *}}#5 %add.ptr = getelementptr inbounds i16, i16* %p, i32 5 - %0 = load i16* %add.ptr, align 2 + %0 = load i16, i16* %add.ptr, align 2 %conv2 = zext i16 %0 to i32 %add = add nsw i32 %conv2, 5 %conv1 = trunc i32 %add to i16 @@ -892,7 +892,7 @@ entry: ; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}+={{ *}}r{{[0-9]+}} %conv4 = zext i16 %x to i32 %add.ptr = getelementptr inbounds i16, i16* %p, i32 5 - %0 = load i16* %add.ptr, align 2 + %0 = load i16, i16* %add.ptr, align 2 %conv13 = zext i16 %0 to i32 %add = add nsw i32 %conv13, %conv4 %conv2 = trunc i32 %add to i16 @@ -905,7 +905,7 @@ entry: ; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}-={{ *}}r{{[0-9]+}} %conv4 = zext i16 %x to i32 %add.ptr = getelementptr inbounds i16, i16* %p, i32 5 - %0 = load i16* %add.ptr, align 2 + %0 = load i16, i16* %add.ptr, align 2 %conv13 = zext i16 %0 to i32 %sub = sub nsw i32 %conv13, %conv4 %conv2 = trunc i32 %sub to i16 @@ -917,7 +917,7 @@ define void @memop_signed_short_or_index5(i16* nocapture %p, i16 signext %x) nou entry: ; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}|={{ *}}r{{[0-9]+}} %add.ptr = getelementptr inbounds i16, i16* %p, i32 5 - %0 = load i16* %add.ptr, align 2 + %0 = load i16, i16* %add.ptr, align 2 %or3 = or i16 %0, %x store i16 %or3, i16* %add.ptr, align 2 ret void @@ -927,7 +927,7 @@ define void @memop_signed_short_and_index5(i16* nocapture %p, i16 signext %x) no entry: ; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}&={{ *}}r{{[0-9]+}} %add.ptr = getelementptr inbounds i16, i16* %p, i32 5 - %0 = load i16* %add.ptr, align 2 + %0 = load i16, i16* %add.ptr, align 2 %and3 = and i16 %0, %x store i16 %and3, i16* %add.ptr, align 2 ret void @@ -937,7 +937,7 @@ define void @memop_signed_short_clrbit_index5(i16* nocapture %p) nounwind { entry: ; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}={{ *}}clrbit({{ *}}#5{{ *}}) %add.ptr = getelementptr inbounds i16, i16* %p, i32 5 - %0 = load i16* %add.ptr, align 2 + %0 = load i16, i16* %add.ptr, align 2 %conv2 = zext i16 %0 to i32 %and = and i32 %conv2, 65503 %conv1 = trunc i32 %and to i16 @@ -949,7 +949,7 @@ define void @memop_signed_short_setbit_index5(i16* nocapture %p) nounwind { entry: ; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}={{ *}}setbit({{ *}}#7{{ *}}) %add.ptr = getelementptr inbounds i16, i16* %p, i32 5 - %0 = load i16* %add.ptr, align 2 + %0 = load i16, i16* %add.ptr, align 2 %conv2 = zext i16 %0 to i32 %or = or i32 %conv2, 128 %conv1 = trunc i32 %or to i16 @@ -960,7 +960,7 @@ entry: define void @memop_signed_int_add5(i32* nocapture %p) nounwind { entry: ; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}#5 - %0 = load i32* %p, align 4 + %0 = load i32, i32* %p, align 4 %add = add i32 %0, 5 store i32 %add, i32* %p, align 4 ret void @@ -969,7 +969,7 @@ entry: define void @memop_signed_int_add(i32* nocapture %p, i32 %x) nounwind { entry: ; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}r{{[0-9]+}} - %0 = load i32* %p, align 4 + %0 = load i32, i32* %p, align 4 %add = add i32 %0, %x store i32 %add, i32* %p, align 4 ret void @@ -978,7 +978,7 @@ entry: define void @memop_signed_int_sub(i32* nocapture %p, i32 %x) nounwind { entry: ; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}-={{ *}}r{{[0-9]+}} - %0 = load i32* %p, align 4 + %0 = load i32, i32* %p, align 4 %sub = sub i32 %0, %x store i32 %sub, i32* %p, align 4 ret void @@ -987,7 +987,7 @@ entry: define void @memop_signed_int_or(i32* nocapture %p, i32 %x) nounwind { entry: ; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}|={{ *}}r{{[0-9]+}} - %0 = load i32* %p, align 4 + %0 = load i32, i32* %p, align 4 %or = or i32 %0, %x store i32 %or, i32* %p, align 4 ret void @@ -996,7 +996,7 @@ entry: define void @memop_signed_int_and(i32* nocapture %p, i32 %x) nounwind { entry: ; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}&={{ *}}r{{[0-9]+}} - %0 = load i32* %p, align 4 + %0 = load i32, i32* %p, align 4 %and = and i32 %0, %x store i32 %and, i32* %p, align 4 ret void @@ -1005,7 +1005,7 @@ entry: define void @memop_signed_int_clrbit(i32* nocapture %p) nounwind { entry: ; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}clrbit({{ *}}#5{{ *}}) - %0 = load i32* %p, align 4 + %0 = load i32, i32* %p, align 4 %and = and i32 %0, -33 store i32 %and, i32* %p, align 4 ret void @@ -1014,7 +1014,7 @@ entry: define void @memop_signed_int_setbit(i32* nocapture %p) nounwind { entry: ; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}setbit({{ *}}#7{{ *}}) - %0 = load i32* %p, align 4 + %0 = load i32, i32* %p, align 4 %or = or i32 %0, 128 store i32 %or, i32* %p, align 4 ret void @@ -1024,7 +1024,7 @@ define void @memop_signed_int_add5_index(i32* nocapture %p, i32 %i) nounwind { entry: ; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}#5 %add.ptr = getelementptr inbounds i32, i32* %p, i32 %i - %0 = load i32* %add.ptr, align 4 + %0 = load i32, i32* %add.ptr, align 4 %add = add i32 %0, 5 store i32 %add, i32* %add.ptr, align 4 ret void @@ -1034,7 +1034,7 @@ define void @memop_signed_int_add_index(i32* nocapture %p, i32 %i, i32 %x) nounw entry: ; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}r{{[0-9]+}} %add.ptr = getelementptr inbounds i32, i32* %p, i32 %i - %0 = load i32* %add.ptr, align 4 + %0 = load i32, i32* %add.ptr, align 4 %add = add i32 %0, %x store i32 %add, i32* %add.ptr, align 4 ret void @@ -1044,7 +1044,7 @@ define void @memop_signed_int_sub_index(i32* nocapture %p, i32 %i, i32 %x) nounw entry: ; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}-={{ *}}r{{[0-9]+}} %add.ptr = getelementptr inbounds i32, i32* %p, i32 %i - %0 = load i32* %add.ptr, align 4 + %0 = load i32, i32* %add.ptr, align 4 %sub = sub i32 %0, %x store i32 %sub, i32* %add.ptr, align 4 ret void @@ -1054,7 +1054,7 @@ define void @memop_signed_int_or_index(i32* nocapture %p, i32 %i, i32 %x) nounwi entry: ; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}|={{ *}}r{{[0-9]+}} %add.ptr = getelementptr inbounds i32, i32* %p, i32 %i - %0 = load i32* %add.ptr, align 4 + %0 = load i32, i32* %add.ptr, align 4 %or = or i32 %0, %x store i32 %or, i32* %add.ptr, align 4 ret void @@ -1064,7 +1064,7 @@ define void @memop_signed_int_and_index(i32* nocapture %p, i32 %i, i32 %x) nounw entry: ; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}&={{ *}}r{{[0-9]+}} %add.ptr = getelementptr inbounds i32, i32* %p, i32 %i - %0 = load i32* %add.ptr, align 4 + %0 = load i32, i32* %add.ptr, align 4 %and = and i32 %0, %x store i32 %and, i32* %add.ptr, align 4 ret void @@ -1074,7 +1074,7 @@ define void @memop_signed_int_clrbit_index(i32* nocapture %p, i32 %i) nounwind { entry: ; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}clrbit({{ *}}#5{{ *}}) %add.ptr = getelementptr inbounds i32, i32* %p, i32 %i - %0 = load i32* %add.ptr, align 4 + %0 = load i32, i32* %add.ptr, align 4 %and = and i32 %0, -33 store i32 %and, i32* %add.ptr, align 4 ret void @@ -1084,7 +1084,7 @@ define void @memop_signed_int_setbit_index(i32* nocapture %p, i32 %i) nounwind { entry: ; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}setbit({{ *}}#7{{ *}}) %add.ptr = getelementptr inbounds i32, i32* %p, i32 %i - %0 = load i32* %add.ptr, align 4 + %0 = load i32, i32* %add.ptr, align 4 %or = or i32 %0, 128 store i32 %or, i32* %add.ptr, align 4 ret void @@ -1094,7 +1094,7 @@ define void @memop_signed_int_add5_index5(i32* nocapture %p) nounwind { entry: ; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}+={{ *}}#5 %add.ptr = getelementptr inbounds i32, i32* %p, i32 5 - %0 = load i32* %add.ptr, align 4 + %0 = load i32, i32* %add.ptr, align 4 %add = add i32 %0, 5 store i32 %add, i32* %add.ptr, align 4 ret void @@ -1104,7 +1104,7 @@ define void @memop_signed_int_add_index5(i32* nocapture %p, i32 %x) nounwind { entry: ; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}+={{ *}}r{{[0-9]+}} %add.ptr = getelementptr inbounds i32, i32* %p, i32 5 - %0 = load i32* %add.ptr, align 4 + %0 = load i32, i32* %add.ptr, align 4 %add = add i32 %0, %x store i32 %add, i32* %add.ptr, align 4 ret void @@ -1114,7 +1114,7 @@ define void @memop_signed_int_sub_index5(i32* nocapture %p, i32 %x) nounwind { entry: ; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}-={{ *}}r{{[0-9]+}} %add.ptr = getelementptr inbounds i32, i32* %p, i32 5 - %0 = load i32* %add.ptr, align 4 + %0 = load i32, i32* %add.ptr, align 4 %sub = sub i32 %0, %x store i32 %sub, i32* %add.ptr, align 4 ret void @@ -1124,7 +1124,7 @@ define void @memop_signed_int_or_index5(i32* nocapture %p, i32 %x) nounwind { entry: ; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}|={{ *}}r{{[0-9]+}} %add.ptr = getelementptr inbounds i32, i32* %p, i32 5 - %0 = load i32* %add.ptr, align 4 + %0 = load i32, i32* %add.ptr, align 4 %or = or i32 %0, %x store i32 %or, i32* %add.ptr, align 4 ret void @@ -1134,7 +1134,7 @@ define void @memop_signed_int_and_index5(i32* nocapture %p, i32 %x) nounwind { entry: ; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}&={{ *}}r{{[0-9]+}} %add.ptr = getelementptr inbounds i32, i32* %p, i32 5 - %0 = load i32* %add.ptr, align 4 + %0 = load i32, i32* %add.ptr, align 4 %and = and i32 %0, %x store i32 %and, i32* %add.ptr, align 4 ret void @@ -1144,7 +1144,7 @@ define void @memop_signed_int_clrbit_index5(i32* nocapture %p) nounwind { entry: ; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}={{ *}}clrbit({{ *}}#5{{ *}}) %add.ptr = getelementptr inbounds i32, i32* %p, i32 5 - %0 = load i32* %add.ptr, align 4 + %0 = load i32, i32* %add.ptr, align 4 %and = and i32 %0, -33 store i32 %and, i32* %add.ptr, align 4 ret void @@ -1154,7 +1154,7 @@ define void @memop_signed_int_setbit_index5(i32* nocapture %p) nounwind { entry: ; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}={{ *}}setbit({{ *}}#7{{ *}}) %add.ptr = getelementptr inbounds i32, i32* %p, i32 5 - %0 = load i32* %add.ptr, align 4 + %0 = load i32, i32* %add.ptr, align 4 %or = or i32 %0, 128 store i32 %or, i32* %add.ptr, align 4 ret void @@ -1163,7 +1163,7 @@ entry: define void @memop_unsigned_int_add5(i32* nocapture %p) nounwind { entry: ; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}#5 - %0 = load i32* %p, align 4 + %0 = load i32, i32* %p, align 4 %add = add nsw i32 %0, 5 store i32 %add, i32* %p, align 4 ret void @@ -1172,7 +1172,7 @@ entry: define void @memop_unsigned_int_add(i32* nocapture %p, i32 %x) nounwind { entry: ; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}r{{[0-9]+}} - %0 = load i32* %p, align 4 + %0 = load i32, i32* %p, align 4 %add = add nsw i32 %0, %x store i32 %add, i32* %p, align 4 ret void @@ -1181,7 +1181,7 @@ entry: define void @memop_unsigned_int_sub(i32* nocapture %p, i32 %x) nounwind { entry: ; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}-={{ *}}r{{[0-9]+}} - %0 = load i32* %p, align 4 + %0 = load i32, i32* %p, align 4 %sub = sub nsw i32 %0, %x store i32 %sub, i32* %p, align 4 ret void @@ -1190,7 +1190,7 @@ entry: define void @memop_unsigned_int_or(i32* nocapture %p, i32 %x) nounwind { entry: ; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}|={{ *}}r{{[0-9]+}} - %0 = load i32* %p, align 4 + %0 = load i32, i32* %p, align 4 %or = or i32 %0, %x store i32 %or, i32* %p, align 4 ret void @@ -1199,7 +1199,7 @@ entry: define void @memop_unsigned_int_and(i32* nocapture %p, i32 %x) nounwind { entry: ; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}&={{ *}}r{{[0-9]+}} - %0 = load i32* %p, align 4 + %0 = load i32, i32* %p, align 4 %and = and i32 %0, %x store i32 %and, i32* %p, align 4 ret void @@ -1208,7 +1208,7 @@ entry: define void @memop_unsigned_int_clrbit(i32* nocapture %p) nounwind { entry: ; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}clrbit({{ *}}#5{{ *}}) - %0 = load i32* %p, align 4 + %0 = load i32, i32* %p, align 4 %and = and i32 %0, -33 store i32 %and, i32* %p, align 4 ret void @@ -1217,7 +1217,7 @@ entry: define void @memop_unsigned_int_setbit(i32* nocapture %p) nounwind { entry: ; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}setbit({{ *}}#7{{ *}}) - %0 = load i32* %p, align 4 + %0 = load i32, i32* %p, align 4 %or = or i32 %0, 128 store i32 %or, i32* %p, align 4 ret void @@ -1227,7 +1227,7 @@ define void @memop_unsigned_int_add5_index(i32* nocapture %p, i32 %i) nounwind { entry: ; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}#5 %add.ptr = getelementptr inbounds i32, i32* %p, i32 %i - %0 = load i32* %add.ptr, align 4 + %0 = load i32, i32* %add.ptr, align 4 %add = add nsw i32 %0, 5 store i32 %add, i32* %add.ptr, align 4 ret void @@ -1237,7 +1237,7 @@ define void @memop_unsigned_int_add_index(i32* nocapture %p, i32 %i, i32 %x) nou entry: ; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}r{{[0-9]+}} %add.ptr = getelementptr inbounds i32, i32* %p, i32 %i - %0 = load i32* %add.ptr, align 4 + %0 = load i32, i32* %add.ptr, align 4 %add = add nsw i32 %0, %x store i32 %add, i32* %add.ptr, align 4 ret void @@ -1247,7 +1247,7 @@ define void @memop_unsigned_int_sub_index(i32* nocapture %p, i32 %i, i32 %x) nou entry: ; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}-={{ *}}r{{[0-9]+}} %add.ptr = getelementptr inbounds i32, i32* %p, i32 %i - %0 = load i32* %add.ptr, align 4 + %0 = load i32, i32* %add.ptr, align 4 %sub = sub nsw i32 %0, %x store i32 %sub, i32* %add.ptr, align 4 ret void @@ -1257,7 +1257,7 @@ define void @memop_unsigned_int_or_index(i32* nocapture %p, i32 %i, i32 %x) noun entry: ; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}|={{ *}}r{{[0-9]+}} %add.ptr = getelementptr inbounds i32, i32* %p, i32 %i - %0 = load i32* %add.ptr, align 4 + %0 = load i32, i32* %add.ptr, align 4 %or = or i32 %0, %x store i32 %or, i32* %add.ptr, align 4 ret void @@ -1267,7 +1267,7 @@ define void @memop_unsigned_int_and_index(i32* nocapture %p, i32 %i, i32 %x) nou entry: ; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}&={{ *}}r{{[0-9]+}} %add.ptr = getelementptr inbounds i32, i32* %p, i32 %i - %0 = load i32* %add.ptr, align 4 + %0 = load i32, i32* %add.ptr, align 4 %and = and i32 %0, %x store i32 %and, i32* %add.ptr, align 4 ret void @@ -1277,7 +1277,7 @@ define void @memop_unsigned_int_clrbit_index(i32* nocapture %p, i32 %i) nounwind entry: ; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}clrbit({{ *}}#5{{ *}}) %add.ptr = getelementptr inbounds i32, i32* %p, i32 %i - %0 = load i32* %add.ptr, align 4 + %0 = load i32, i32* %add.ptr, align 4 %and = and i32 %0, -33 store i32 %and, i32* %add.ptr, align 4 ret void @@ -1287,7 +1287,7 @@ define void @memop_unsigned_int_setbit_index(i32* nocapture %p, i32 %i) nounwind entry: ; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}setbit({{ *}}#7{{ *}}) %add.ptr = getelementptr inbounds i32, i32* %p, i32 %i - %0 = load i32* %add.ptr, align 4 + %0 = load i32, i32* %add.ptr, align 4 %or = or i32 %0, 128 store i32 %or, i32* %add.ptr, align 4 ret void @@ -1297,7 +1297,7 @@ define void @memop_unsigned_int_add5_index5(i32* nocapture %p) nounwind { entry: ; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}+={{ *}}#5 %add.ptr = getelementptr inbounds i32, i32* %p, i32 5 - %0 = load i32* %add.ptr, align 4 + %0 = load i32, i32* %add.ptr, align 4 %add = add nsw i32 %0, 5 store i32 %add, i32* %add.ptr, align 4 ret void @@ -1307,7 +1307,7 @@ define void @memop_unsigned_int_add_index5(i32* nocapture %p, i32 %x) nounwind { entry: ; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}+={{ *}}r{{[0-9]+}} %add.ptr = getelementptr inbounds i32, i32* %p, i32 5 - %0 = load i32* %add.ptr, align 4 + %0 = load i32, i32* %add.ptr, align 4 %add = add nsw i32 %0, %x store i32 %add, i32* %add.ptr, align 4 ret void @@ -1317,7 +1317,7 @@ define void @memop_unsigned_int_sub_index5(i32* nocapture %p, i32 %x) nounwind { entry: ; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}-={{ *}}r{{[0-9]+}} %add.ptr = getelementptr inbounds i32, i32* %p, i32 5 - %0 = load i32* %add.ptr, align 4 + %0 = load i32, i32* %add.ptr, align 4 %sub = sub nsw i32 %0, %x store i32 %sub, i32* %add.ptr, align 4 ret void @@ -1327,7 +1327,7 @@ define void @memop_unsigned_int_or_index5(i32* nocapture %p, i32 %x) nounwind { entry: ; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}|={{ *}}r{{[0-9]+}} %add.ptr = getelementptr inbounds i32, i32* %p, i32 5 - %0 = load i32* %add.ptr, align 4 + %0 = load i32, i32* %add.ptr, align 4 %or = or i32 %0, %x store i32 %or, i32* %add.ptr, align 4 ret void @@ -1337,7 +1337,7 @@ define void @memop_unsigned_int_and_index5(i32* nocapture %p, i32 %x) nounwind { entry: ; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}&={{ *}}r{{[0-9]+}} %add.ptr = getelementptr inbounds i32, i32* %p, i32 5 - %0 = load i32* %add.ptr, align 4 + %0 = load i32, i32* %add.ptr, align 4 %and = and i32 %0, %x store i32 %and, i32* %add.ptr, align 4 ret void @@ -1347,7 +1347,7 @@ define void @memop_unsigned_int_clrbit_index5(i32* nocapture %p) nounwind { entry: ; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}={{ *}}clrbit({{ *}}#5{{ *}}) %add.ptr = getelementptr inbounds i32, i32* %p, i32 5 - %0 = load i32* %add.ptr, align 4 + %0 = load i32, i32* %add.ptr, align 4 %and = and i32 %0, -33 store i32 %and, i32* %add.ptr, align 4 ret void @@ -1357,7 +1357,7 @@ define void @memop_unsigned_int_setbit_index5(i32* nocapture %p) nounwind { entry: ; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}={{ *}}setbit({{ *}}#7{{ *}}) %add.ptr = getelementptr inbounds i32, i32* %p, i32 5 - %0 = load i32* %add.ptr, align 4 + %0 = load i32, i32* %add.ptr, align 4 %or = or i32 %0, 128 store i32 %or, i32* %add.ptr, align 4 ret void diff --git a/llvm/test/CodeGen/Hexagon/memops1.ll b/llvm/test/CodeGen/Hexagon/memops1.ll index 3ba1a3e049b..37e885b6e0c 100644 --- a/llvm/test/CodeGen/Hexagon/memops1.ll +++ b/llvm/test/CodeGen/Hexagon/memops1.ll @@ -7,9 +7,9 @@ entry: ; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#40){{ *}}-={{ *}}#1 %p.addr = alloca i32*, align 4 store i32* %p, i32** %p.addr, align 4 - %0 = load i32** %p.addr, align 4 + %0 = load i32*, i32** %p.addr, align 4 %add.ptr = getelementptr inbounds i32, i32* %0, i32 10 - %1 = load i32* %add.ptr, align 4 + %1 = load i32, i32* %add.ptr, align 4 %sub = sub nsw i32 %1, 1 store i32 %sub, i32* %add.ptr, align 4 ret void @@ -22,11 +22,11 @@ entry: %i.addr = alloca i32, align 4 store i32* %p, i32** %p.addr, align 4 store i32 %i, i32* %i.addr, align 4 - %0 = load i32** %p.addr, align 4 - %1 = load i32* %i.addr, align 4 + %0 = load i32*, i32** %p.addr, align 4 + %1 = load i32, i32* %i.addr, align 4 %add.ptr = getelementptr inbounds i32, i32* %0, i32 %1 %add.ptr1 = getelementptr inbounds i32, i32* %add.ptr, i32 10 - %2 = load i32* %add.ptr1, align 4 + %2 = load i32, i32* %add.ptr1, align 4 %sub = sub nsw i32 %2, 1 store i32 %sub, i32* %add.ptr1, align 4 ret void diff --git a/llvm/test/CodeGen/Hexagon/memops2.ll b/llvm/test/CodeGen/Hexagon/memops2.ll index 5a0532f84fc..f9f8a247811 100644 --- a/llvm/test/CodeGen/Hexagon/memops2.ll +++ b/llvm/test/CodeGen/Hexagon/memops2.ll @@ -6,7 +6,7 @@ define void @f(i16* nocapture %p) nounwind { entry: ; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}-={{ *}}#1 %add.ptr = getelementptr inbounds i16, i16* %p, i32 10 - %0 = load i16* %add.ptr, align 2 + %0 = load i16, i16* %add.ptr, align 2 %conv2 = zext i16 %0 to i32 %sub = add nsw i32 %conv2, 65535 %conv1 = trunc i32 %sub to i16 @@ -19,7 +19,7 @@ entry: ; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}-={{ *}}#1 %add.ptr.sum = add i32 %i, 10 %add.ptr1 = getelementptr inbounds i16, i16* %p, i32 %add.ptr.sum - %0 = load i16* %add.ptr1, align 2 + %0 = load i16, i16* %add.ptr1, align 2 %conv3 = zext i16 %0 to i32 %sub = add nsw i32 %conv3, 65535 %conv2 = trunc i32 %sub to i16 diff --git a/llvm/test/CodeGen/Hexagon/memops3.ll b/llvm/test/CodeGen/Hexagon/memops3.ll index 1e80baf362a..6cd7fdc4861 100644 --- a/llvm/test/CodeGen/Hexagon/memops3.ll +++ b/llvm/test/CodeGen/Hexagon/memops3.ll @@ -6,7 +6,7 @@ define void @f(i8* nocapture %p) nounwind { entry: ; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}-={{ *}}#1 %add.ptr = getelementptr inbounds i8, i8* %p, i32 10 - %0 = load i8* %add.ptr, align 1 + %0 = load i8, i8* %add.ptr, align 1 %conv = zext i8 %0 to i32 %sub = add nsw i32 %conv, 255 %conv1 = trunc i32 %sub to i8 @@ -19,7 +19,7 @@ entry: ; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}-={{ *}}#1 %add.ptr.sum = add i32 %i, 10 %add.ptr1 = getelementptr inbounds i8, i8* %p, i32 %add.ptr.sum - %0 = load i8* %add.ptr1, align 1 + %0 = load i8, i8* %add.ptr1, align 1 %conv = zext i8 %0 to i32 %sub = add nsw i32 %conv, 255 %conv2 = trunc i32 %sub to i8 diff --git a/llvm/test/CodeGen/Hexagon/misaligned-access.ll b/llvm/test/CodeGen/Hexagon/misaligned-access.ll index 4dafb44cc3e..f4b0cb9cb1e 100644 --- a/llvm/test/CodeGen/Hexagon/misaligned-access.ll +++ b/llvm/test/CodeGen/Hexagon/misaligned-access.ll @@ -7,10 +7,10 @@ declare i32 @_hi(i64) #1 define i32 @CSDRSEARCH_executeSearchManager() #0 { entry: %temp = alloca i32, align 4 - %0 = load i32* @temp1, align 4 + %0 = load i32, i32* @temp1, align 4 store i32 %0, i32* %temp, align 4 %1 = bitcast i32* %temp to i64* - %2 = load i64* %1, align 8 + %2 = load i64, i64* %1, align 8 %call = call i32 @_hi(i64 %2) ret i32 %call } diff --git a/llvm/test/CodeGen/Hexagon/mpy.ll b/llvm/test/CodeGen/Hexagon/mpy.ll index d5c5ae34535..3ecf7d46ccb 100644 --- a/llvm/test/CodeGen/Hexagon/mpy.ll +++ b/llvm/test/CodeGen/Hexagon/mpy.ll @@ -9,10 +9,10 @@ entry: store i32 %acc, i32* %acc.addr, align 4 store i32 %num, i32* %num.addr, align 4 store i32 %num2, i32* %num2.addr, align 4 - %0 = load i32* %num.addr, align 4 - %1 = load i32* %acc.addr, align 4 + %0 = load i32, i32* %num.addr, align 4 + %1 = load i32, i32* %acc.addr, align 4 %mul = mul nsw i32 %0, %1 - %2 = load i32* %num2.addr, align 4 + %2 = load i32, i32* %num2.addr, align 4 %add = add nsw i32 %mul, %2 store i32 %add, i32* %num.addr, align 4 ret void diff --git a/llvm/test/CodeGen/Hexagon/newvaluejump.ll b/llvm/test/CodeGen/Hexagon/newvaluejump.ll index 9c7ca55cb8f..3e1ee179573 100644 --- a/llvm/test/CodeGen/Hexagon/newvaluejump.ll +++ b/llvm/test/CodeGen/Hexagon/newvaluejump.ll @@ -9,10 +9,10 @@ entry: ; CHECK: if (cmp.eq(r{{[0-9]+}}.new, #0)) jump{{.}} %addr1 = alloca i32, align 4 %addr2 = alloca i32, align 4 - %0 = load i32* @i, align 4 + %0 = load i32, i32* @i, align 4 store i32 %0, i32* %addr1, align 4 call void @bar(i32 1, i32 2) - %1 = load i32* @j, align 4 + %1 = load i32, i32* @j, align 4 %tobool = icmp ne i32 %1, 0 br i1 %tobool, label %if.then, label %if.else diff --git a/llvm/test/CodeGen/Hexagon/newvaluejump2.ll b/llvm/test/CodeGen/Hexagon/newvaluejump2.ll index 3d50ea5422c..36a0db16cfc 100644 --- a/llvm/test/CodeGen/Hexagon/newvaluejump2.ll +++ b/llvm/test/CodeGen/Hexagon/newvaluejump2.ll @@ -7,9 +7,9 @@ define i32 @main() nounwind { entry: ; CHECK: if (cmp.gt(r{{[0-9]+}}.new, r{{[0-9]+}})) jump:{{[t|nt]}} .LBB{{[0-9]+}}_{{[0-9]+}} %Reg2 = alloca i8, align 1 - %0 = load i8* %Reg2, align 1 + %0 = load i8, i8* %Reg2, align 1 %conv0 = zext i8 %0 to i32 - %1 = load i8* @Reg, align 1 + %1 = load i8, i8* @Reg, align 1 %conv1 = zext i8 %1 to i32 %tobool = icmp sle i32 %conv0, %conv1 br i1 %tobool, label %if.then, label %if.else diff --git a/llvm/test/CodeGen/Hexagon/newvaluestore.ll b/llvm/test/CodeGen/Hexagon/newvaluestore.ll index 93cf3479ab5..13cbba2d08e 100644 --- a/llvm/test/CodeGen/Hexagon/newvaluestore.ll +++ b/llvm/test/CodeGen/Hexagon/newvaluestore.ll @@ -11,11 +11,11 @@ entry: %number1 = alloca i32, align 4 %number2 = alloca i32, align 4 %number3 = alloca i32, align 4 - %0 = load i32 * @i, align 4 + %0 = load i32 , i32 * @i, align 4 store i32 %0, i32* %number1, align 4 - %1 = load i32 * @j, align 4 + %1 = load i32 , i32 * @j, align 4 store i32 %1, i32* %number2, align 4 - %2 = load i32 * @k, align 4 + %2 = load i32 , i32 * @k, align 4 store i32 %2, i32* %number3, align 4 ret i32 %0 } diff --git a/llvm/test/CodeGen/Hexagon/opt-fabs.ll b/llvm/test/CodeGen/Hexagon/opt-fabs.ll index 31b56fd6e98..da657e4b1b8 100644 --- a/llvm/test/CodeGen/Hexagon/opt-fabs.ll +++ b/llvm/test/CodeGen/Hexagon/opt-fabs.ll @@ -7,7 +7,7 @@ define float @my_fabsf(float %x) nounwind { entry: %x.addr = alloca float, align 4 store float %x, float* %x.addr, align 4 - %0 = load float* %x.addr, align 4 + %0 = load float, float* %x.addr, align 4 %call = call float @fabsf(float %0) readnone ret float %call } diff --git a/llvm/test/CodeGen/Hexagon/opt-fneg.ll b/llvm/test/CodeGen/Hexagon/opt-fneg.ll index 479b4b64069..97895786586 100644 --- a/llvm/test/CodeGen/Hexagon/opt-fneg.ll +++ b/llvm/test/CodeGen/Hexagon/opt-fneg.ll @@ -6,7 +6,7 @@ entry: ; CHECK: r{{[0-9]+}} = togglebit(r{{[0-9]+}}, #31) %x.addr = alloca float, align 4 store float %x, float* %x.addr, align 4 - %0 = load float* %x.addr, align 4 + %0 = load float, float* %x.addr, align 4 %sub = fsub float -0.000000e+00, %0 ret float %sub } diff --git a/llvm/test/CodeGen/Hexagon/postinc-load.ll b/llvm/test/CodeGen/Hexagon/postinc-load.ll index 547c7c69010..a9d987981d6 100644 --- a/llvm/test/CodeGen/Hexagon/postinc-load.ll +++ b/llvm/test/CodeGen/Hexagon/postinc-load.ll @@ -12,8 +12,8 @@ for.body: %arrayidx.phi = phi i32* [ %a, %entry ], [ %arrayidx.inc, %for.body ] %arrayidx1.phi = phi i16* [ %b, %entry ], [ %arrayidx1.inc, %for.body ] %sum.03 = phi i32 [ 0, %entry ], [ %add2, %for.body ] - %0 = load i32* %arrayidx.phi, align 4 - %1 = load i16* %arrayidx1.phi, align 2 + %0 = load i32, i32* %arrayidx.phi, align 4 + %1 = load i16, i16* %arrayidx1.phi, align 2 %conv = sext i16 %1 to i32 %add = add i32 %0, %sum.03 %add2 = add i32 %add, %conv diff --git a/llvm/test/CodeGen/Hexagon/postinc-store.ll b/llvm/test/CodeGen/Hexagon/postinc-store.ll index b836f715bfc..6315ca14a95 100644 --- a/llvm/test/CodeGen/Hexagon/postinc-store.ll +++ b/llvm/test/CodeGen/Hexagon/postinc-store.ll @@ -11,8 +11,8 @@ for.body: ; preds = %for.body, %entry %lsr.iv = phi i32 [ %lsr.iv.next, %for.body ], [ 10, %entry ] %arrayidx.phi = phi i32* [ %a, %entry ], [ %arrayidx.inc, %for.body ] %arrayidx1.phi = phi i16* [ %b, %entry ], [ %arrayidx1.inc, %for.body ] - %0 = load i32* %arrayidx.phi, align 4 - %1 = load i16* %arrayidx1.phi, align 2 + %0 = load i32, i32* %arrayidx.phi, align 4 + %1 = load i16, i16* %arrayidx1.phi, align 2 %conv = sext i16 %1 to i32 %factor = mul i32 %0, 2 %add3 = add i32 %factor, %conv diff --git a/llvm/test/CodeGen/Hexagon/pred-gp.ll b/llvm/test/CodeGen/Hexagon/pred-gp.ll index 299bd8679da..3868e098007 100644 --- a/llvm/test/CodeGen/Hexagon/pred-gp.ll +++ b/llvm/test/CodeGen/Hexagon/pred-gp.ll @@ -14,11 +14,11 @@ entry: br i1 %cmp, label %if.then, label %entry.if.end_crit_edge entry.if.end_crit_edge: - %.pre = load i32* @c, align 4 + %.pre = load i32, i32* @c, align 4 br label %if.end if.then: - %0 = load i32* @d, align 4 + %0 = load i32, i32* @d, align 4 store i32 %0, i32* @c, align 4 br label %if.end diff --git a/llvm/test/CodeGen/Hexagon/pred-instrs.ll b/llvm/test/CodeGen/Hexagon/pred-instrs.ll index 800073e49b0..e0a75f13dfa 100644 --- a/llvm/test/CodeGen/Hexagon/pred-instrs.ll +++ b/llvm/test/CodeGen/Hexagon/pred-instrs.ll @@ -25,6 +25,6 @@ if.else: ; preds = %entry if.end: ; preds = %if.else, %if.then %storemerge = phi i32 [ %and, %if.else ], [ %shl, %if.then ] store i32 %storemerge, i32* @a, align 4 - %0 = load i32* @d, align 4 + %0 = load i32, i32* @d, align 4 ret i32 %0 } diff --git a/llvm/test/CodeGen/Hexagon/remove_lsr.ll b/llvm/test/CodeGen/Hexagon/remove_lsr.ll index 640fdb5f02f..3b85c486348 100644 --- a/llvm/test/CodeGen/Hexagon/remove_lsr.ll +++ b/llvm/test/CodeGen/Hexagon/remove_lsr.ll @@ -54,9 +54,9 @@ for.body: ; preds = %for.body, %entry %7 = trunc i64 %6 to i32 %8 = tail call i32 @llvm.hexagon.C2.mux(i32 %conv8, i32 %5, i32 %7) store i32 %8, i32* %lsr.iv2931, align 4 - %srcval = load i64* %lsr.iv27, align 8 - %9 = load i8* %lsr.iv40, align 1 - %10 = load i8* %lsr.iv37, align 1 + %srcval = load i64, i64* %lsr.iv27, align 8 + %9 = load i8, i8* %lsr.iv40, align 1 + %10 = load i8, i8* %lsr.iv37, align 1 %lftr.wideiv = trunc i32 %lsr.iv42 to i8 %exitcond = icmp eq i8 %lftr.wideiv, 32 %scevgep26 = getelementptr %union.vect64, %union.vect64* %lsr.iv, i32 1 diff --git a/llvm/test/CodeGen/Hexagon/static.ll b/llvm/test/CodeGen/Hexagon/static.ll index 683a4c21bcb..760b8b55972 100644 --- a/llvm/test/CodeGen/Hexagon/static.ll +++ b/llvm/test/CodeGen/Hexagon/static.ll @@ -10,10 +10,10 @@ define void @foo() nounwind { entry: - %0 = load i32* @num, align 4 - %1 = load i32* @acc, align 4 + %0 = load i32, i32* @num, align 4 + %1 = load i32, i32* @acc, align 4 %mul = mul nsw i32 %0, %1 - %2 = load i32* @val, align 4 + %2 = load i32, i32* @val, align 4 %add = add nsw i32 %mul, %2 store i32 %add, i32* @num, align 4 ret void diff --git a/llvm/test/CodeGen/Hexagon/struct_args.ll b/llvm/test/CodeGen/Hexagon/struct_args.ll index f91300b5067..95b76c7999d 100644 --- a/llvm/test/CodeGen/Hexagon/struct_args.ll +++ b/llvm/test/CodeGen/Hexagon/struct_args.ll @@ -8,7 +8,7 @@ define void @foo() nounwind { entry: - %0 = load i64* bitcast (%struct.small* @s1 to i64*), align 1 + %0 = load i64, i64* bitcast (%struct.small* @s1 to i64*), align 1 call void @bar(i64 %0) ret void } diff --git a/llvm/test/CodeGen/Hexagon/tfr-to-combine.ll b/llvm/test/CodeGen/Hexagon/tfr-to-combine.ll index e3057cd1611..d22d685f7d6 100644 --- a/llvm/test/CodeGen/Hexagon/tfr-to-combine.ll +++ b/llvm/test/CodeGen/Hexagon/tfr-to-combine.ll @@ -20,7 +20,7 @@ define i64 @test2() #0 { ; CHECK: combine(#0, r{{[0-9]+}}) entry: store i16 0, i16* @a, align 2 - %0 = load i16* @c, align 2 + %0 = load i16, i16* @c, align 2 %conv2 = zext i16 %0 to i64 ret i64 %conv2 } diff --git a/llvm/test/CodeGen/Hexagon/union-1.ll b/llvm/test/CodeGen/Hexagon/union-1.ll index 00565868b12..8f2ff28b381 100644 --- a/llvm/test/CodeGen/Hexagon/union-1.ll +++ b/llvm/test/CodeGen/Hexagon/union-1.ll @@ -5,10 +5,10 @@ define void @word(i32* nocapture %a) nounwind { entry: - %0 = load i32* %a, align 4 + %0 = load i32, i32* %a, align 4 %1 = zext i32 %0 to i64 %add.ptr = getelementptr inbounds i32, i32* %a, i32 1 - %2 = load i32* %add.ptr, align 4 + %2 = load i32, i32* %add.ptr, align 4 %3 = zext i32 %2 to i64 %4 = shl nuw i64 %3, 32 %ins = or i64 %4, %1 diff --git a/llvm/test/CodeGen/Hexagon/vaddh.ll b/llvm/test/CodeGen/Hexagon/vaddh.ll index 01d20410978..88194b750ad 100644 --- a/llvm/test/CodeGen/Hexagon/vaddh.ll +++ b/llvm/test/CodeGen/Hexagon/vaddh.ll @@ -6,8 +6,8 @@ define void @foo() nounwind { entry: - %0 = load i32* @j, align 4 - %1 = load i32* @k, align 4 + %0 = load i32, i32* @j, align 4 + %1 = load i32, i32* @k, align 4 %2 = call i32 @llvm.hexagon.A2.svaddh(i32 %0, i32 %1) store i32 %2, i32* @k, align 4 ret void diff --git a/llvm/test/CodeGen/Hexagon/validate-offset.ll b/llvm/test/CodeGen/Hexagon/validate-offset.ll index 9e7d0aa0783..8de006c80b1 100644 --- a/llvm/test/CodeGen/Hexagon/validate-offset.ll +++ b/llvm/test/CodeGen/Hexagon/validate-offset.ll @@ -11,26 +11,26 @@ entry: %b.addr = alloca i32, align 4 store i32 %a, i32* %a.addr, align 4 store i32 %b, i32* %b.addr, align 4 - %0 = load i32* %a.addr, align 4 - %1 = load i32* %b.addr, align 4 + %0 = load i32, i32* %a.addr, align 4 + %1 = load i32, i32* %b.addr, align 4 %cmp = icmp sgt i32 %0, %1 br i1 %cmp, label %if.then, label %if.else if.then: - %2 = load i32* %a.addr, align 4 - %3 = load i32* %b.addr, align 4 + %2 = load i32, i32* %a.addr, align 4 + %3 = load i32, i32* %b.addr, align 4 %add = add nsw i32 %2, %3 store i32 %add, i32* %retval br label %return if.else: - %4 = load i32* %a.addr, align 4 - %5 = load i32* %b.addr, align 4 + %4 = load i32, i32* %a.addr, align 4 + %5 = load i32, i32* %b.addr, align 4 %sub = sub nsw i32 %4, %5 store i32 %sub, i32* %retval br label %return return: - %6 = load i32* %retval + %6 = load i32, i32* %retval ret i32 %6 } diff --git a/llvm/test/CodeGen/Hexagon/zextloadi1.ll b/llvm/test/CodeGen/Hexagon/zextloadi1.ll index b58d9332695..9ce7bea9fce 100644 --- a/llvm/test/CodeGen/Hexagon/zextloadi1.ll +++ b/llvm/test/CodeGen/Hexagon/zextloadi1.ll @@ -13,13 +13,13 @@ @i129_s = external global i129 define void @i129_ls() nounwind { - %tmp = load i129* @i129_l + %tmp = load i129, i129* @i129_l store i129 %tmp, i129* @i129_s ret void } define void @i65_ls() nounwind { - %tmp = load i65* @i65_l + %tmp = load i65, i65* @i65_l store i65 %tmp, i65* @i65_s ret void } |