diff options
Diffstat (limited to 'llvm/test/Bitcode')
-rw-r--r-- | llvm/test/Bitcode/arm32_neon_vcnt_upgrade.ll | 4 | ||||
-rw-r--r-- | llvm/test/Bitcode/case-ranges-3.3.ll | 4 | ||||
-rw-r--r-- | llvm/test/Bitcode/function-encoding-rel-operands.ll | 2 | ||||
-rw-r--r-- | llvm/test/Bitcode/memInstructions.3.2.ll | 128 | ||||
-rw-r--r-- | llvm/test/Bitcode/metadata-2.ll | 2 | ||||
-rw-r--r-- | llvm/test/Bitcode/upgrade-loop-metadata.ll | 4 | ||||
-rw-r--r-- | llvm/test/Bitcode/use-list-order.ll | 6 |
7 files changed, 75 insertions, 75 deletions
diff --git a/llvm/test/Bitcode/arm32_neon_vcnt_upgrade.ll b/llvm/test/Bitcode/arm32_neon_vcnt_upgrade.ll index ed3981b465a..0032c4a9b7a 100644 --- a/llvm/test/Bitcode/arm32_neon_vcnt_upgrade.ll +++ b/llvm/test/Bitcode/arm32_neon_vcnt_upgrade.ll @@ -4,7 +4,7 @@ define <4 x i16> @vclz16(<4 x i16>* %A) nounwind { ;CHECK: @vclz16 - %tmp1 = load <4 x i16>* %A + %tmp1 = load <4 x i16>, <4 x i16>* %A %tmp2 = call <4 x i16> @llvm.arm.neon.vclz.v4i16(<4 x i16> %tmp1) ;CHECK: {{call.*@llvm.ctlz.v4i16\(<4 x i16>.*, i1 false}} ret <4 x i16> %tmp2 @@ -12,7 +12,7 @@ define <4 x i16> @vclz16(<4 x i16>* %A) nounwind { define <8 x i8> @vcnt8(<8 x i8>* %A) nounwind { ;CHECK: @vcnt8 - %tmp1 = load <8 x i8>* %A + %tmp1 = load <8 x i8>, <8 x i8>* %A %tmp2 = call <8 x i8> @llvm.arm.neon.vcnt.v8i8(<8 x i8> %tmp1) ;CHECK: call <8 x i8> @llvm.ctpop.v8i8(<8 x i8> ret <8 x i8> %tmp2 diff --git a/llvm/test/Bitcode/case-ranges-3.3.ll b/llvm/test/Bitcode/case-ranges-3.3.ll index 020b37f49db..eb55ef1ad21 100644 --- a/llvm/test/Bitcode/case-ranges-3.3.ll +++ b/llvm/test/Bitcode/case-ranges-3.3.ll @@ -10,7 +10,7 @@ define i32 @foo(i32 %x) nounwind ssp uwtable { %1 = alloca i32, align 4 %2 = alloca i32, align 4 store i32 %x, i32* %2, align 4 - %3 = load i32* %2, align 4 + %3 = load i32, i32* %2, align 4 switch i32 %3, label %9 [ ; CHECK: switch i32 %3, label %9 i32 -3, label %4 @@ -63,6 +63,6 @@ define i32 @foo(i32 %x) nounwind ssp uwtable { br label %11 ; <label>:11 - %12 = load i32* %1 + %12 = load i32, i32* %1 ret i32 %12 } diff --git a/llvm/test/Bitcode/function-encoding-rel-operands.ll b/llvm/test/Bitcode/function-encoding-rel-operands.ll index d7a751664c3..1307dd48337 100644 --- a/llvm/test/Bitcode/function-encoding-rel-operands.ll +++ b/llvm/test/Bitcode/function-encoding-rel-operands.ll @@ -44,7 +44,7 @@ define double @test_float_binops(i32 %a) nounwind { define i1 @test_load(i32 %a, {i32, i32}* %ptr) nounwind { entry: %0 = getelementptr inbounds {i32, i32}, {i32, i32}* %ptr, i32 %a, i32 0 - %1 = load i32* %0 + %1 = load i32, i32* %0 %2 = icmp eq i32 %1, %a ret i1 %2 } diff --git a/llvm/test/Bitcode/memInstructions.3.2.ll b/llvm/test/Bitcode/memInstructions.3.2.ll index 356ecf77fd3..f430086e96a 100644 --- a/llvm/test/Bitcode/memInstructions.3.2.ll +++ b/llvm/test/Bitcode/memInstructions.3.2.ll @@ -27,53 +27,53 @@ entry: %ptr1 = alloca i8 store i8 2, i8* %ptr1 -; CHECK: %res1 = load i8* %ptr1 - %res1 = load i8* %ptr1 +; CHECK: %res1 = load i8, i8* %ptr1 + %res1 = load i8, i8* %ptr1 -; CHECK-NEXT: %res2 = load volatile i8* %ptr1 - %res2 = load volatile i8* %ptr1 +; CHECK-NEXT: %res2 = load volatile i8, i8* %ptr1 + %res2 = load volatile i8, i8* %ptr1 -; CHECK-NEXT: %res3 = load i8* %ptr1, align 1 - %res3 = load i8* %ptr1, align 1 +; CHECK-NEXT: %res3 = load i8, i8* %ptr1, align 1 + %res3 = load i8, i8* %ptr1, align 1 -; CHECK-NEXT: %res4 = load volatile i8* %ptr1, align 1 - %res4 = load volatile i8* %ptr1, align 1 +; CHECK-NEXT: %res4 = load volatile i8, i8* %ptr1, align 1 + %res4 = load volatile i8, i8* %ptr1, align 1 -; CHECK-NEXT: %res5 = load i8* %ptr1, !nontemporal !0 - %res5 = load i8* %ptr1, !nontemporal !0 +; CHECK-NEXT: %res5 = load i8, i8* %ptr1, !nontemporal !0 + %res5 = load i8, i8* %ptr1, !nontemporal !0 -; CHECK-NEXT: %res6 = load volatile i8* %ptr1, !nontemporal !0 - %res6 = load volatile i8* %ptr1, !nontemporal !0 +; CHECK-NEXT: %res6 = load volatile i8, i8* %ptr1, !nontemporal !0 + %res6 = load volatile i8, i8* %ptr1, !nontemporal !0 -; CHECK-NEXT: %res7 = load i8* %ptr1, align 1, !nontemporal !0 - %res7 = load i8* %ptr1, align 1, !nontemporal !0 +; CHECK-NEXT: %res7 = load i8, i8* %ptr1, align 1, !nontemporal !0 + %res7 = load i8, i8* %ptr1, align 1, !nontemporal !0 -; CHECK-NEXT: %res8 = load volatile i8* %ptr1, align 1, !nontemporal !0 - %res8 = load volatile i8* %ptr1, align 1, !nontemporal !0 +; CHECK-NEXT: %res8 = load volatile i8, i8* %ptr1, align 1, !nontemporal !0 + %res8 = load volatile i8, i8* %ptr1, align 1, !nontemporal !0 -; CHECK-NEXT: %res9 = load i8* %ptr1, !invariant.load !1 - %res9 = load i8* %ptr1, !invariant.load !1 +; CHECK-NEXT: %res9 = load i8, i8* %ptr1, !invariant.load !1 + %res9 = load i8, i8* %ptr1, !invariant.load !1 -; CHECK-NEXT: %res10 = load volatile i8* %ptr1, !invariant.load !1 - %res10 = load volatile i8* %ptr1, !invariant.load !1 +; CHECK-NEXT: %res10 = load volatile i8, i8* %ptr1, !invariant.load !1 + %res10 = load volatile i8, i8* %ptr1, !invariant.load !1 -; CHECK-NEXT: %res11 = load i8* %ptr1, align 1, !invariant.load !1 - %res11 = load i8* %ptr1, align 1, !invariant.load !1 +; CHECK-NEXT: %res11 = load i8, i8* %ptr1, align 1, !invariant.load !1 + %res11 = load i8, i8* %ptr1, align 1, !invariant.load !1 -; CHECK-NEXT: %res12 = load volatile i8* %ptr1, align 1, !invariant.load !1 - %res12 = load volatile i8* %ptr1, align 1, !invariant.load !1 +; CHECK-NEXT: %res12 = load volatile i8, i8* %ptr1, align 1, !invariant.load !1 + %res12 = load volatile i8, i8* %ptr1, align 1, !invariant.load !1 -; CHECK-NEXT: %res13 = load i8* %ptr1, {{[(!nontemporal !0, !invariant.load !1) | (!invariant.load !1, !nontemporal !0)]}} - %res13 = load i8* %ptr1, !nontemporal !0, !invariant.load !1 +; CHECK-NEXT: %res13 = load i8, i8* %ptr1, {{[(!nontemporal !0, !invariant.load !1) | (!invariant.load !1, !nontemporal !0)]}} + %res13 = load i8, i8* %ptr1, !nontemporal !0, !invariant.load !1 -; CHECK-NEXT: %res14 = load volatile i8* %ptr1, {{[(!nontemporal !0, !invariant.load !1) | (!invariant.load !1, !nontemporal !0)]}} - %res14 = load volatile i8* %ptr1, !nontemporal !0, !invariant.load !1 +; CHECK-NEXT: %res14 = load volatile i8, i8* %ptr1, {{[(!nontemporal !0, !invariant.load !1) | (!invariant.load !1, !nontemporal !0)]}} + %res14 = load volatile i8, i8* %ptr1, !nontemporal !0, !invariant.load !1 -; CHECK-NEXT: %res15 = load i8* %ptr1, align 1, {{[(!nontemporal !0, !invariant.load !1) | (!invariant.load !1, !nontemporal !0)]}} - %res15 = load i8* %ptr1, align 1, !nontemporal !0, !invariant.load !1 +; CHECK-NEXT: %res15 = load i8, i8* %ptr1, align 1, {{[(!nontemporal !0, !invariant.load !1) | (!invariant.load !1, !nontemporal !0)]}} + %res15 = load i8, i8* %ptr1, align 1, !nontemporal !0, !invariant.load !1 -; CHECK-NEXT: %res16 = load volatile i8* %ptr1, align 1, {{[(!nontemporal !0, !invariant.load !1) | (!invariant.load !1, !nontemporal !0)]}} - %res16 = load volatile i8* %ptr1, align 1, !nontemporal !0, !invariant.load !1 +; CHECK-NEXT: %res16 = load volatile i8, i8* %ptr1, align 1, {{[(!nontemporal !0, !invariant.load !1) | (!invariant.load !1, !nontemporal !0)]}} + %res16 = load volatile i8, i8* %ptr1, align 1, !nontemporal !0, !invariant.load !1 ret void } @@ -83,53 +83,53 @@ entry: %ptr1 = alloca i8 store i8 2, i8* %ptr1 -; CHECK: %res1 = load atomic i8* %ptr1 unordered, align 1 - %res1 = load atomic i8* %ptr1 unordered, align 1 +; CHECK: %res1 = load atomic i8, i8* %ptr1 unordered, align 1 + %res1 = load atomic i8, i8* %ptr1 unordered, align 1 -; CHECK-NEXT: %res2 = load atomic i8* %ptr1 monotonic, align 1 - %res2 = load atomic i8* %ptr1 monotonic, align 1 +; CHECK-NEXT: %res2 = load atomic i8, i8* %ptr1 monotonic, align 1 + %res2 = load atomic i8, i8* %ptr1 monotonic, align 1 -; CHECK-NEXT: %res3 = load atomic i8* %ptr1 acquire, align 1 - %res3 = load atomic i8* %ptr1 acquire, align 1 +; CHECK-NEXT: %res3 = load atomic i8, i8* %ptr1 acquire, align 1 + %res3 = load atomic i8, i8* %ptr1 acquire, align 1 -; CHECK-NEXT: %res4 = load atomic i8* %ptr1 seq_cst, align 1 - %res4 = load atomic i8* %ptr1 seq_cst, align 1 +; CHECK-NEXT: %res4 = load atomic i8, i8* %ptr1 seq_cst, align 1 + %res4 = load atomic i8, i8* %ptr1 seq_cst, align 1 -; CHECK-NEXT: %res5 = load atomic volatile i8* %ptr1 unordered, align 1 - %res5 = load atomic volatile i8* %ptr1 unordered, align 1 +; CHECK-NEXT: %res5 = load atomic volatile i8, i8* %ptr1 unordered, align 1 + %res5 = load atomic volatile i8, i8* %ptr1 unordered, align 1 -; CHECK-NEXT: %res6 = load atomic volatile i8* %ptr1 monotonic, align 1 - %res6 = load atomic volatile i8* %ptr1 monotonic, align 1 +; CHECK-NEXT: %res6 = load atomic volatile i8, i8* %ptr1 monotonic, align 1 + %res6 = load atomic volatile i8, i8* %ptr1 monotonic, align 1 -; CHECK-NEXT: %res7 = load atomic volatile i8* %ptr1 acquire, align 1 - %res7 = load atomic volatile i8* %ptr1 acquire, align 1 +; CHECK-NEXT: %res7 = load atomic volatile i8, i8* %ptr1 acquire, align 1 + %res7 = load atomic volatile i8, i8* %ptr1 acquire, align 1 -; CHECK-NEXT: %res8 = load atomic volatile i8* %ptr1 seq_cst, align 1 - %res8 = load atomic volatile i8* %ptr1 seq_cst, align 1 +; CHECK-NEXT: %res8 = load atomic volatile i8, i8* %ptr1 seq_cst, align 1 + %res8 = load atomic volatile i8, i8* %ptr1 seq_cst, align 1 -; CHECK-NEXT: %res9 = load atomic i8* %ptr1 singlethread unordered, align 1 - %res9 = load atomic i8* %ptr1 singlethread unordered, align 1 +; CHECK-NEXT: %res9 = load atomic i8, i8* %ptr1 singlethread unordered, align 1 + %res9 = load atomic i8, i8* %ptr1 singlethread unordered, align 1 -; CHECK-NEXT: %res10 = load atomic i8* %ptr1 singlethread monotonic, align 1 - %res10 = load atomic i8* %ptr1 singlethread monotonic, align 1 +; CHECK-NEXT: %res10 = load atomic i8, i8* %ptr1 singlethread monotonic, align 1 + %res10 = load atomic i8, i8* %ptr1 singlethread monotonic, align 1 -; CHECK-NEXT: %res11 = load atomic i8* %ptr1 singlethread acquire, align 1 - %res11 = load atomic i8* %ptr1 singlethread acquire, align 1 +; CHECK-NEXT: %res11 = load atomic i8, i8* %ptr1 singlethread acquire, align 1 + %res11 = load atomic i8, i8* %ptr1 singlethread acquire, align 1 -; CHECK-NEXT: %res12 = load atomic i8* %ptr1 singlethread seq_cst, align 1 - %res12 = load atomic i8* %ptr1 singlethread seq_cst, align 1 +; CHECK-NEXT: %res12 = load atomic i8, i8* %ptr1 singlethread seq_cst, align 1 + %res12 = load atomic i8, i8* %ptr1 singlethread seq_cst, align 1 -; CHECK-NEXT: %res13 = load atomic volatile i8* %ptr1 singlethread unordered, align 1 - %res13 = load atomic volatile i8* %ptr1 singlethread unordered, align 1 +; CHECK-NEXT: %res13 = load atomic volatile i8, i8* %ptr1 singlethread unordered, align 1 + %res13 = load atomic volatile i8, i8* %ptr1 singlethread unordered, align 1 -; CHECK-NEXT: %res14 = load atomic volatile i8* %ptr1 singlethread monotonic, align 1 - %res14 = load atomic volatile i8* %ptr1 singlethread monotonic, align 1 +; CHECK-NEXT: %res14 = load atomic volatile i8, i8* %ptr1 singlethread monotonic, align 1 + %res14 = load atomic volatile i8, i8* %ptr1 singlethread monotonic, align 1 -; CHECK-NEXT: %res15 = load atomic volatile i8* %ptr1 singlethread acquire, align 1 - %res15 = load atomic volatile i8* %ptr1 singlethread acquire, align 1 +; CHECK-NEXT: %res15 = load atomic volatile i8, i8* %ptr1 singlethread acquire, align 1 + %res15 = load atomic volatile i8, i8* %ptr1 singlethread acquire, align 1 -; CHECK-NEXT: %res16 = load atomic volatile i8* %ptr1 singlethread seq_cst, align 1 - %res16 = load atomic volatile i8* %ptr1 singlethread seq_cst, align 1 +; CHECK-NEXT: %res16 = load atomic volatile i8, i8* %ptr1 singlethread seq_cst, align 1 + %res16 = load atomic volatile i8, i8* %ptr1 singlethread seq_cst, align 1 ret void } diff --git a/llvm/test/Bitcode/metadata-2.ll b/llvm/test/Bitcode/metadata-2.ll index 07371a30d27..a5367da4af3 100644 --- a/llvm/test/Bitcode/metadata-2.ll +++ b/llvm/test/Bitcode/metadata-2.ll @@ -77,7 +77,7 @@ entry: define internal void @_D5tango4core8BitManip16__moduleinfoCtorZ() nounwind { moduleinfoCtorEntry: - %current = load %ModuleReference** @_Dmodule_ref ; <%ModuleReference*> [#uses=1] + %current = load %ModuleReference*, %ModuleReference** @_Dmodule_ref ; <%ModuleReference*> [#uses=1] store %ModuleReference* %current, %ModuleReference** getelementptr (%ModuleReference* @_D5tango4core8BitManip11__moduleRefZ, i32 0, i32 0) store %ModuleReference* @_D5tango4core8BitManip11__moduleRefZ, %ModuleReference** @_Dmodule_ref ret void diff --git a/llvm/test/Bitcode/upgrade-loop-metadata.ll b/llvm/test/Bitcode/upgrade-loop-metadata.ll index be2a99a47af..8dee9075787 100644 --- a/llvm/test/Bitcode/upgrade-loop-metadata.ll +++ b/llvm/test/Bitcode/upgrade-loop-metadata.ll @@ -10,7 +10,7 @@ entry: br label %for.cond for.cond: ; preds = %for.inc, %entry - %0 = load i32* %i, align 4 + %0 = load i32, i32* %i, align 4 %cmp = icmp slt i32 %0, 16 br i1 %cmp, label %for.body, label %for.end, !llvm.loop !1 @@ -18,7 +18,7 @@ for.body: ; preds = %for.cond br label %for.inc for.inc: ; preds = %for.body - %1 = load i32* %i, align 4 + %1 = load i32, i32* %i, align 4 %inc = add nsw i32 %1, 1 store i32 %inc, i32* %i, align 4 br label %for.cond diff --git a/llvm/test/Bitcode/use-list-order.ll b/llvm/test/Bitcode/use-list-order.ll index 6617b9c5edf..f57b4a67e36 100644 --- a/llvm/test/Bitcode/use-list-order.ll +++ b/llvm/test/Bitcode/use-list-order.ll @@ -79,13 +79,13 @@ entry: define i1 @loadb() { entry: - %b = load i1* @b + %b = load i1, i1* @b ret i1 %b } define i1 @loada() { entry: - %a = load i1* getelementptr ([4 x i1]* @a, i64 0, i64 2) + %a = load i1, i1* getelementptr ([4 x i1]* @a, i64 0, i64 2) ret i1 %a } @@ -115,7 +115,7 @@ first: define i4 @globalAndFunctionFunctionUser() { entry: - %local = load i4* @globalAndFunction + %local = load i4, i4* @globalAndFunction ret i4 %local } |