summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/SPARC
diff options
context:
space:
mode:
authorDavid Blaikie <dblaikie@gmail.com>2015-02-27 21:17:42 +0000
committerDavid Blaikie <dblaikie@gmail.com>2015-02-27 21:17:42 +0000
commita79ac14fa68297f9888bc70a10df5ed9b8864e38 (patch)
tree8d8217a8928e3ee599bdde405e2e178b3a55b645 /llvm/test/CodeGen/SPARC
parent83687fb9e654c9d0086e7f6b728c26fa0b729e71 (diff)
downloadbcm5719-llvm-a79ac14fa68297f9888bc70a10df5ed9b8864e38.tar.gz
bcm5719-llvm-a79ac14fa68297f9888bc70a10df5ed9b8864e38.zip
[opaque pointer type] Add textual IR support for explicit type parameter to load instruction
Essentially the same as the GEP change in r230786. A similar migration script can be used to update test cases, though a few more test case improvements/changes were required this time around: (r229269-r229278) import fileinput import sys import re pat = re.compile(r"((?:=|:|^)\s*load (?:atomic )?(?:volatile )?(.*?))(| addrspace\(\d+\) *)\*($| *(?:%|@|null|undef|blockaddress|getelementptr|addrspacecast|bitcast|inttoptr|\[\[[a-zA-Z]|\{\{).*$)") for line in sys.stdin: sys.stdout.write(re.sub(pat, r"\1, \2\3*\4", line)) Reviewers: rafael, dexonsmith, grosser Differential Revision: http://reviews.llvm.org/D7649 llvm-svn: 230794
Diffstat (limited to 'llvm/test/CodeGen/SPARC')
-rw-r--r--llvm/test/CodeGen/SPARC/2008-10-10-InlineAsmMemoryOperand.ll2
-rw-r--r--llvm/test/CodeGen/SPARC/2009-08-28-PIC.ll4
-rw-r--r--llvm/test/CodeGen/SPARC/2011-01-11-CC.ll6
-rw-r--r--llvm/test/CodeGen/SPARC/2011-01-22-SRet.ll6
-rw-r--r--llvm/test/CodeGen/SPARC/64abi.ll16
-rw-r--r--llvm/test/CodeGen/SPARC/64bit.ll16
-rw-r--r--llvm/test/CodeGen/SPARC/atomics.ll8
-rw-r--r--llvm/test/CodeGen/SPARC/fp128.ll34
-rw-r--r--llvm/test/CodeGen/SPARC/globals.ll2
-rw-r--r--llvm/test/CodeGen/SPARC/leafproc.ll2
-rw-r--r--llvm/test/CodeGen/SPARC/mult-alt-generic-sparc.ll38
-rw-r--r--llvm/test/CodeGen/SPARC/obj-relocs.ll2
-rw-r--r--llvm/test/CodeGen/SPARC/private.ll2
-rw-r--r--llvm/test/CodeGen/SPARC/setjmp.ll4
-rw-r--r--llvm/test/CodeGen/SPARC/spillsize.ll4
-rw-r--r--llvm/test/CodeGen/SPARC/tls.ll4
-rw-r--r--llvm/test/CodeGen/SPARC/varargs.ll2
17 files changed, 76 insertions, 76 deletions
diff --git a/llvm/test/CodeGen/SPARC/2008-10-10-InlineAsmMemoryOperand.ll b/llvm/test/CodeGen/SPARC/2008-10-10-InlineAsmMemoryOperand.ll
index 373a1967307..07e250b3c98 100644
--- a/llvm/test/CodeGen/SPARC/2008-10-10-InlineAsmMemoryOperand.ll
+++ b/llvm/test/CodeGen/SPARC/2008-10-10-InlineAsmMemoryOperand.ll
@@ -8,7 +8,7 @@ define internal void @set_fast_math() nounwind {
entry:
%fsr = alloca i32 ; <i32*> [#uses=4]
call void asm "st %fsr, $0", "=*m"(i32* %fsr) nounwind
- %0 = load i32* %fsr, align 4 ; <i32> [#uses=1]
+ %0 = load i32, i32* %fsr, align 4 ; <i32> [#uses=1]
%1 = or i32 %0, 4194304 ; <i32> [#uses=1]
store i32 %1, i32* %fsr, align 4
call void asm sideeffect "ld $0, %fsr", "*m"(i32* %fsr) nounwind
diff --git a/llvm/test/CodeGen/SPARC/2009-08-28-PIC.ll b/llvm/test/CodeGen/SPARC/2009-08-28-PIC.ll
index b004b11b853..baad2ae507d 100644
--- a/llvm/test/CodeGen/SPARC/2009-08-28-PIC.ll
+++ b/llvm/test/CodeGen/SPARC/2009-08-28-PIC.ll
@@ -14,7 +14,7 @@
define i32 @func(i32 %a) nounwind readonly {
entry:
- %0 = load i32* @foo, align 4 ; <i32> [#uses=1]
+ %0 = load i32, i32* @foo, align 4 ; <i32> [#uses=1]
ret i32 %0
}
@@ -36,7 +36,7 @@ entry:
br i1 %cmp, label %if.then, label %if.end
if.then:
- %ret = load i32* @foo, align 4
+ %ret = load i32, i32* @foo, align 4
ret i32 %ret
if.end:
diff --git a/llvm/test/CodeGen/SPARC/2011-01-11-CC.ll b/llvm/test/CodeGen/SPARC/2011-01-11-CC.ll
index 50f3a65ff9a..6ea78dd7e16 100644
--- a/llvm/test/CodeGen/SPARC/2011-01-11-CC.ll
+++ b/llvm/test/CodeGen/SPARC/2011-01-11-CC.ll
@@ -177,13 +177,13 @@ define void @test_adde_sube(i8* %a, i8* %b, i8* %sum, i8* %diff) {
entry:
%0 = bitcast i8* %a to i128*
%1 = bitcast i8* %b to i128*
- %2 = load i128* %0
- %3 = load i128* %1
+ %2 = load i128, i128* %0
+ %3 = load i128, i128* %1
%4 = add i128 %2, %3
%5 = bitcast i8* %sum to i128*
store i128 %4, i128* %5
tail call void asm sideeffect "", "=*m,*m"(i128 *%0, i128* %5) nounwind
- %6 = load i128* %0
+ %6 = load i128, i128* %0
%7 = sub i128 %2, %6
%8 = bitcast i8* %diff to i128*
store i128 %7, i128* %8
diff --git a/llvm/test/CodeGen/SPARC/2011-01-22-SRet.ll b/llvm/test/CodeGen/SPARC/2011-01-22-SRet.ll
index d9ebf3a521b..ae9764e8208 100644
--- a/llvm/test/CodeGen/SPARC/2011-01-22-SRet.ll
+++ b/llvm/test/CodeGen/SPARC/2011-01-22-SRet.ll
@@ -25,11 +25,11 @@ entry:
%f = alloca %struct.foo_t, align 8
call void @make_foo(%struct.foo_t* noalias sret %f, i32 10, i32 20, i32 30) nounwind
%0 = getelementptr inbounds %struct.foo_t, %struct.foo_t* %f, i32 0, i32 0
- %1 = load i32* %0, align 8
+ %1 = load i32, i32* %0, align 8
%2 = getelementptr inbounds %struct.foo_t, %struct.foo_t* %f, i32 0, i32 1
- %3 = load i32* %2, align 4
+ %3 = load i32, i32* %2, align 4
%4 = getelementptr inbounds %struct.foo_t, %struct.foo_t* %f, i32 0, i32 2
- %5 = load i32* %4, align 8
+ %5 = load i32, i32* %4, align 8
%6 = add nsw i32 %3, %1
%7 = add nsw i32 %6, %5
ret i32 %7
diff --git a/llvm/test/CodeGen/SPARC/64abi.ll b/llvm/test/CodeGen/SPARC/64abi.ll
index d31fc4f15ac..a7e482c898c 100644
--- a/llvm/test/CodeGen/SPARC/64abi.ll
+++ b/llvm/test/CodeGen/SPARC/64abi.ll
@@ -240,10 +240,10 @@ define void @call_inreg_ii(i32* %p, i32 %i1, i32 %i2) {
; CHECK: ldx [%i2], %i0
; CHECK: ldx [%i3], %i1
define { i64, i64 } @ret_i64_pair(i32 %a0, i32 %a1, i64* %p, i64* %q) {
- %r1 = load i64* %p
+ %r1 = load i64, i64* %p
%rv1 = insertvalue { i64, i64 } undef, i64 %r1, 0
store i64 0, i64* %p
- %r2 = load i64* %q
+ %r2 = load i64, i64* %q
%rv2 = insertvalue { i64, i64 } %rv1, i64 %r2, 1
ret { i64, i64 } %rv2
}
@@ -268,10 +268,10 @@ define void @call_ret_i64_pair(i64* %i0) {
; CHECK: ld [%i3], %f2
define { i32, float } @ret_i32_float_pair(i32 %a0, i32 %a1,
i32* %p, float* %q) {
- %r1 = load i32* %p
+ %r1 = load i32, i32* %p
%rv1 = insertvalue { i32, float } undef, i32 %r1, 0
store i32 0, i32* %p
- %r2 = load float* %q
+ %r2 = load float, float* %q
%rv2 = insertvalue { i32, float } %rv1, float %r2, 1
ret { i32, float } %rv2
}
@@ -297,10 +297,10 @@ define void @call_ret_i32_float_pair(i32* %i0, float* %i1) {
; CHECK: ld [%i3], %f1
define inreg { i32, float } @ret_i32_float_packed(i32 %a0, i32 %a1,
i32* %p, float* %q) {
- %r1 = load i32* %p
+ %r1 = load i32, i32* %p
%rv1 = insertvalue { i32, float } undef, i32 %r1, 0
store i32 0, i32* %p
- %r2 = load float* %q
+ %r2 = load float, float* %q
%rv2 = insertvalue { i32, float } %rv1, float %r2, 1
ret { i32, float } %rv2
}
@@ -329,10 +329,10 @@ define void @call_ret_i32_float_packed(i32* %i0, float* %i1) {
; CHECK: or [[R3]], [[R1]], %i0
define inreg { i32, i32 } @ret_i32_packed(i32 %a0, i32 %a1,
i32* %p, i32* %q) {
- %r1 = load i32* %p
+ %r1 = load i32, i32* %p
%rv1 = insertvalue { i32, i32 } undef, i32 %r1, 1
store i32 0, i32* %p
- %r2 = load i32* %q
+ %r2 = load i32, i32* %q
%rv2 = insertvalue { i32, i32 } %rv1, i32 %r2, 0
ret { i32, i32 } %rv2
}
diff --git a/llvm/test/CodeGen/SPARC/64bit.ll b/llvm/test/CodeGen/SPARC/64bit.ll
index 57e1fd72c13..274fa32fad3 100644
--- a/llvm/test/CodeGen/SPARC/64bit.ll
+++ b/llvm/test/CodeGen/SPARC/64bit.ll
@@ -140,17 +140,17 @@ define i64 @reg_imm_alu(i64 %x, i64 %y, i64 %z) {
; CHECK: ldsh [%i3]
; CHECK: sth %
define i64 @loads(i64* %p, i32* %q, i32* %r, i16* %s) {
- %a = load i64* %p
+ %a = load i64, i64* %p
%ai = add i64 1, %a
store i64 %ai, i64* %p
- %b = load i32* %q
+ %b = load i32, i32* %q
%b2 = zext i32 %b to i64
%bi = trunc i64 %ai to i32
store i32 %bi, i32* %q
- %c = load i32* %r
+ %c = load i32, i32* %r
%c2 = sext i32 %c to i64
store i64 %ai, i64* %p
- %d = load i16* %s
+ %d = load i16, i16* %s
%d2 = sext i16 %d to i64
%di = trunc i64 %ai to i16
store i16 %di, i16* %s
@@ -164,7 +164,7 @@ define i64 @loads(i64* %p, i32* %q, i32* %r, i16* %s) {
; CHECK: load_bool
; CHECK: ldub [%i0], %i0
define i64 @load_bool(i1* %p) {
- %a = load i1* %p
+ %a = load i1, i1* %p
%b = zext i1 %a to i64
ret i64 %b
}
@@ -178,7 +178,7 @@ define i64 @load_bool(i1* %p) {
define void @stores(i64* %p, i32* %q, i16* %r, i8* %s) {
%p1 = getelementptr i64, i64* %p, i64 1
%p2 = getelementptr i64, i64* %p, i64 2
- %pv = load i64* %p1
+ %pv = load i64, i64* %p1
store i64 %pv, i64* %p2
%q2 = getelementptr i32, i32* %q, i32 -2
@@ -200,8 +200,8 @@ define void @stores(i64* %p, i32* %q, i16* %r, i8* %s) {
; CHECK: ldub [%i0], [[R:%[goli][0-7]]]
; CHECK: sll [[R]], [[R]], %i0
define i8 @promote_shifts(i8* %p) {
- %L24 = load i8* %p
- %L32 = load i8* %p
+ %L24 = load i8, i8* %p
+ %L32 = load i8, i8* %p
%B36 = shl i8 %L24, %L32
ret i8 %B36
}
diff --git a/llvm/test/CodeGen/SPARC/atomics.ll b/llvm/test/CodeGen/SPARC/atomics.ll
index ee6c1f8999b..bea9a337469 100644
--- a/llvm/test/CodeGen/SPARC/atomics.ll
+++ b/llvm/test/CodeGen/SPARC/atomics.ll
@@ -9,8 +9,8 @@
; CHECK: st {{.+}}, [%o2]
define i32 @test_atomic_i32(i32* %ptr1, i32* %ptr2, i32* %ptr3) {
entry:
- %0 = load atomic i32* %ptr1 acquire, align 8
- %1 = load atomic i32* %ptr2 acquire, align 8
+ %0 = load atomic i32, i32* %ptr1 acquire, align 8
+ %1 = load atomic i32, i32* %ptr2 acquire, align 8
%2 = add i32 %0, %1
store atomic i32 %2, i32* %ptr3 release, align 8
ret i32 %2
@@ -25,8 +25,8 @@ entry:
; CHECK: stx {{.+}}, [%o2]
define i64 @test_atomic_i64(i64* %ptr1, i64* %ptr2, i64* %ptr3) {
entry:
- %0 = load atomic i64* %ptr1 acquire, align 8
- %1 = load atomic i64* %ptr2 acquire, align 8
+ %0 = load atomic i64, i64* %ptr1 acquire, align 8
+ %1 = load atomic i64, i64* %ptr2 acquire, align 8
%2 = add i64 %0, %1
store atomic i64 %2, i64* %ptr3 release, align 8
ret i64 %2
diff --git a/llvm/test/CodeGen/SPARC/fp128.ll b/llvm/test/CodeGen/SPARC/fp128.ll
index a06112a56ba..c864cb7d599 100644
--- a/llvm/test/CodeGen/SPARC/fp128.ll
+++ b/llvm/test/CodeGen/SPARC/fp128.ll
@@ -28,10 +28,10 @@
define void @f128_ops(fp128* noalias sret %scalar.result, fp128* byval %a, fp128* byval %b, fp128* byval %c, fp128* byval %d) {
entry:
- %0 = load fp128* %a, align 8
- %1 = load fp128* %b, align 8
- %2 = load fp128* %c, align 8
- %3 = load fp128* %d, align 8
+ %0 = load fp128, fp128* %a, align 8
+ %1 = load fp128, fp128* %b, align 8
+ %2 = load fp128, fp128* %c, align 8
+ %3 = load fp128, fp128* %d, align 8
%4 = fadd fp128 %0, %1
%5 = fsub fp128 %4, %2
%6 = fmul fp128 %5, %3
@@ -56,7 +56,7 @@ entry:
define void @f128_spill(fp128* noalias sret %scalar.result, fp128* byval %a) {
entry:
- %0 = load fp128* %a, align 8
+ %0 = load fp128, fp128* %a, align 8
call void asm sideeffect "", "~{f0},~{f1},~{f2},~{f3},~{f4},~{f5},~{f6},~{f7},~{f8},~{f9},~{f10},~{f11},~{f12},~{f13},~{f14},~{f15},~{f16},~{f17},~{f18},~{f19},~{f20},~{f21},~{f22},~{f23},~{f24},~{f25},~{f26},~{f27},~{f28},~{f29},~{f30},~{f31}"()
store fp128 %0, fp128* %scalar.result, align 8
ret void
@@ -71,8 +71,8 @@ entry:
define i32 @f128_compare(fp128* byval %f0, fp128* byval %f1, i32 %a, i32 %b) {
entry:
- %0 = load fp128* %f0, align 8
- %1 = load fp128* %f1, align 8
+ %0 = load fp128, fp128* %f0, align 8
+ %1 = load fp128, fp128* %f1, align 8
%cond = fcmp ult fp128 %0, %1
%ret = select i1 %cond, i32 %a, i32 %b
ret i32 %ret
@@ -107,7 +107,7 @@ entry:
define void @f128_abs(fp128* noalias sret %scalar.result, fp128* byval %a) {
entry:
- %0 = load fp128* %a, align 8
+ %0 = load fp128, fp128* %a, align 8
%1 = tail call fp128 @llvm.fabs.f128(fp128 %0)
store fp128 %1, fp128* %scalar.result, align 8
ret void
@@ -142,8 +142,8 @@ entry:
define void @fp128_unaligned(fp128* %a, fp128* %b, fp128* %c) {
entry:
- %0 = load fp128* %a, align 1
- %1 = load fp128* %b, align 1
+ %0 = load fp128, fp128* %a, align 1
+ %1 = load fp128, fp128* %b, align 1
%2 = fadd fp128 %0, %1
store fp128 %2, fp128* %c, align 1
ret void
@@ -173,8 +173,8 @@ entry:
define i32 @f128_to_i32(fp128* %a, fp128* %b) {
entry:
- %0 = load fp128* %a, align 8
- %1 = load fp128* %b, align 8
+ %0 = load fp128, fp128* %a, align 8
+ %1 = load fp128, fp128* %b, align 8
%2 = fptoui fp128 %0 to i32
%3 = fptosi fp128 %1 to i32
%4 = add i32 %2, %3
@@ -197,12 +197,12 @@ define void @test_itoq_qtoi(i64 %a, i32 %b, fp128* %c, fp128* %d, i64* %ptr0, fp
entry:
%0 = sitofp i64 %a to fp128
store fp128 %0, fp128* %ptr1, align 8
- %cval = load fp128* %c, align 8
+ %cval = load fp128, fp128* %c, align 8
%1 = fptosi fp128 %cval to i64
store i64 %1, i64* %ptr0, align 8
%2 = sitofp i32 %b to fp128
store fp128 %2, fp128* %ptr1, align 8
- %dval = load fp128* %d, align 8
+ %dval = load fp128, fp128* %d, align 8
%3 = fptosi fp128 %dval to i32
%4 = bitcast i64* %ptr0 to i32*
store i32 %3, i32* %4, align 8
@@ -225,12 +225,12 @@ define void @test_utoq_qtou(i64 %a, i32 %b, fp128* %c, fp128* %d, i64* %ptr0, fp
entry:
%0 = uitofp i64 %a to fp128
store fp128 %0, fp128* %ptr1, align 8
- %cval = load fp128* %c, align 8
+ %cval = load fp128, fp128* %c, align 8
%1 = fptoui fp128 %cval to i64
store i64 %1, i64* %ptr0, align 8
%2 = uitofp i32 %b to fp128
store fp128 %2, fp128* %ptr1, align 8
- %dval = load fp128* %d, align 8
+ %dval = load fp128, fp128* %d, align 8
%3 = fptoui fp128 %dval to i32
%4 = bitcast i64* %ptr0 to i32*
store i32 %3, i32* %4, align 8
@@ -242,7 +242,7 @@ entry:
define void @f128_neg(fp128* noalias sret %scalar.result, fp128* byval %a) {
entry:
- %0 = load fp128* %a, align 8
+ %0 = load fp128, fp128* %a, align 8
%1 = fsub fp128 0xL00000000000000008000000000000000, %0
store fp128 %1, fp128* %scalar.result, align 8
ret void
diff --git a/llvm/test/CodeGen/SPARC/globals.ll b/llvm/test/CodeGen/SPARC/globals.ll
index 3d3eba28af6..3ef135f6d70 100644
--- a/llvm/test/CodeGen/SPARC/globals.ll
+++ b/llvm/test/CodeGen/SPARC/globals.ll
@@ -8,7 +8,7 @@
@G = external global i8
define zeroext i8 @loadG() {
- %tmp = load i8* @G
+ %tmp = load i8, i8* @G
ret i8 %tmp
}
diff --git a/llvm/test/CodeGen/SPARC/leafproc.ll b/llvm/test/CodeGen/SPARC/leafproc.ll
index e6a77dcf90c..fd74e5ca723 100644
--- a/llvm/test/CodeGen/SPARC/leafproc.ll
+++ b/llvm/test/CodeGen/SPARC/leafproc.ll
@@ -75,6 +75,6 @@ entry:
%2 = getelementptr inbounds [2 x i32], [2 x i32]* %array, i32 0, i32 1
store i32 2, i32* %2, align 4
%3 = getelementptr inbounds [2 x i32], [2 x i32]* %array, i32 0, i32 %a
- %4 = load i32* %3, align 4
+ %4 = load i32, i32* %3, align 4
ret i32 %4
}
diff --git a/llvm/test/CodeGen/SPARC/mult-alt-generic-sparc.ll b/llvm/test/CodeGen/SPARC/mult-alt-generic-sparc.ll
index 6a67616d53b..57864c2df16 100644
--- a/llvm/test/CodeGen/SPARC/mult-alt-generic-sparc.ll
+++ b/llvm/test/CodeGen/SPARC/mult-alt-generic-sparc.ll
@@ -33,10 +33,10 @@ entry:
%in1 = alloca i32, align 4
store i32 0, i32* %out0, align 4
store i32 1, i32* %in1, align 4
- %tmp = load i32* %in1, align 4
+ %tmp = load i32, i32* %in1, align 4
%0 = call i32 asm "foo $1,$0", "=r,<r"(i32 %tmp) nounwind
store i32 %0, i32* %out0, align 4
- %tmp1 = load i32* %in1, align 4
+ %tmp1 = load i32, i32* %in1, align 4
%1 = call i32 asm "foo $1,$0", "=r,r<"(i32 %tmp1) nounwind
store i32 %1, i32* %out0, align 4
ret void
@@ -48,10 +48,10 @@ entry:
%in1 = alloca i32, align 4
store i32 0, i32* %out0, align 4
store i32 1, i32* %in1, align 4
- %tmp = load i32* %in1, align 4
+ %tmp = load i32, i32* %in1, align 4
%0 = call i32 asm "foo $1,$0", "=r,>r"(i32 %tmp) nounwind
store i32 %0, i32* %out0, align 4
- %tmp1 = load i32* %in1, align 4
+ %tmp1 = load i32, i32* %in1, align 4
%1 = call i32 asm "foo $1,$0", "=r,r>"(i32 %tmp1) nounwind
store i32 %1, i32* %out0, align 4
ret void
@@ -63,7 +63,7 @@ entry:
%in1 = alloca i32, align 4
store i32 0, i32* %out0, align 4
store i32 1, i32* %in1, align 4
- %tmp = load i32* %in1, align 4
+ %tmp = load i32, i32* %in1, align 4
%0 = call i32 asm "foo $1,$0", "=r,r"(i32 %tmp) nounwind
store i32 %0, i32* %out0, align 4
ret void
@@ -120,10 +120,10 @@ entry:
%in1 = alloca i32, align 4
store i32 0, i32* %out0, align 4
store i32 1, i32* %in1, align 4
- %tmp = load i32* %in1, align 4
+ %tmp = load i32, i32* %in1, align 4
%0 = call i32 asm "foo $1,$0", "=r,imr"(i32 %tmp) nounwind
store i32 %0, i32* %out0, align 4
- %tmp1 = load i32* @min1, align 4
+ %tmp1 = load i32, i32* @min1, align 4
%1 = call i32 asm "foo $1,$0", "=r,imr"(i32 %tmp1) nounwind
store i32 %1, i32* %out0, align 4
%2 = call i32 asm "foo $1,$0", "=r,imr"(i32 1) nounwind
@@ -137,10 +137,10 @@ entry:
%in1 = alloca i32, align 4
store i32 0, i32* %out0, align 4
store i32 1, i32* %in1, align 4
- %tmp = load i32* %in1, align 4
+ %tmp = load i32, i32* %in1, align 4
%0 = call i32 asm "foo $1,$0", "=r,X"(i32 %tmp) nounwind
store i32 %0, i32* %out0, align 4
- %tmp1 = load i32* @min1, align 4
+ %tmp1 = load i32, i32* @min1, align 4
%1 = call i32 asm "foo $1,$0", "=r,X"(i32 %tmp1) nounwind
store i32 %1, i32* %out0, align 4
%2 = call i32 asm "foo $1,$0", "=r,X"(i32 1) nounwind
@@ -166,7 +166,7 @@ entry:
define void @multi_m() nounwind {
entry:
- %tmp = load i32* @min1, align 4
+ %tmp = load i32, i32* @min1, align 4
call void asm "foo $1,$0", "=*m|r,m|r"(i32* @mout0, i32 %tmp) nounwind
ret void
}
@@ -191,10 +191,10 @@ entry:
%in1 = alloca i32, align 4
store i32 0, i32* %out0, align 4
store i32 1, i32* %in1, align 4
- %tmp = load i32* %in1, align 4
+ %tmp = load i32, i32* %in1, align 4
%0 = call i32 asm "foo $1,$0", "=r|r,r|<r"(i32 %tmp) nounwind
store i32 %0, i32* %out0, align 4
- %tmp1 = load i32* %in1, align 4
+ %tmp1 = load i32, i32* %in1, align 4
%1 = call i32 asm "foo $1,$0", "=r|r,r|r<"(i32 %tmp1) nounwind
store i32 %1, i32* %out0, align 4
ret void
@@ -206,10 +206,10 @@ entry:
%in1 = alloca i32, align 4
store i32 0, i32* %out0, align 4
store i32 1, i32* %in1, align 4
- %tmp = load i32* %in1, align 4
+ %tmp = load i32, i32* %in1, align 4
%0 = call i32 asm "foo $1,$0", "=r|r,r|>r"(i32 %tmp) nounwind
store i32 %0, i32* %out0, align 4
- %tmp1 = load i32* %in1, align 4
+ %tmp1 = load i32, i32* %in1, align 4
%1 = call i32 asm "foo $1,$0", "=r|r,r|r>"(i32 %tmp1) nounwind
store i32 %1, i32* %out0, align 4
ret void
@@ -221,7 +221,7 @@ entry:
%in1 = alloca i32, align 4
store i32 0, i32* %out0, align 4
store i32 1, i32* %in1, align 4
- %tmp = load i32* %in1, align 4
+ %tmp = load i32, i32* %in1, align 4
%0 = call i32 asm "foo $1,$0", "=r|r,r|m"(i32 %tmp) nounwind
store i32 %0, i32* %out0, align 4
ret void
@@ -278,10 +278,10 @@ entry:
%in1 = alloca i32, align 4
store i32 0, i32* %out0, align 4
store i32 1, i32* %in1, align 4
- %tmp = load i32* %in1, align 4
+ %tmp = load i32, i32* %in1, align 4
%0 = call i32 asm "foo $1,$0", "=r|r,r|imr"(i32 %tmp) nounwind
store i32 %0, i32* %out0, align 4
- %tmp1 = load i32* @min1, align 4
+ %tmp1 = load i32, i32* @min1, align 4
%1 = call i32 asm "foo $1,$0", "=r|r,r|imr"(i32 %tmp1) nounwind
store i32 %1, i32* %out0, align 4
%2 = call i32 asm "foo $1,$0", "=r|r,r|imr"(i32 1) nounwind
@@ -295,10 +295,10 @@ entry:
%in1 = alloca i32, align 4
store i32 0, i32* %out0, align 4
store i32 1, i32* %in1, align 4
- %tmp = load i32* %in1, align 4
+ %tmp = load i32, i32* %in1, align 4
%0 = call i32 asm "foo $1,$0", "=r|r,r|X"(i32 %tmp) nounwind
store i32 %0, i32* %out0, align 4
- %tmp1 = load i32* @min1, align 4
+ %tmp1 = load i32, i32* @min1, align 4
%1 = call i32 asm "foo $1,$0", "=r|r,r|X"(i32 %tmp1) nounwind
store i32 %1, i32* %out0, align 4
%2 = call i32 asm "foo $1,$0", "=r|r,r|X"(i32 1) nounwind
diff --git a/llvm/test/CodeGen/SPARC/obj-relocs.ll b/llvm/test/CodeGen/SPARC/obj-relocs.ll
index 6d57598795d..115263ac5d4 100644
--- a/llvm/test/CodeGen/SPARC/obj-relocs.ll
+++ b/llvm/test/CodeGen/SPARC/obj-relocs.ll
@@ -21,7 +21,7 @@
define i64 @foo(i64 %a) {
entry:
- %0 = load i64* @AGlobalVar, align 4
+ %0 = load i64, i64* @AGlobalVar, align 4
%1 = add i64 %a, %0
%2 = call i64 @bar(i64 %1)
ret i64 %2
diff --git a/llvm/test/CodeGen/SPARC/private.ll b/llvm/test/CodeGen/SPARC/private.ll
index 38cea4ca6e4..400d907e150 100644
--- a/llvm/test/CodeGen/SPARC/private.ll
+++ b/llvm/test/CodeGen/SPARC/private.ll
@@ -11,7 +11,7 @@ define private void @foo() {
define i32 @bar() {
call void @foo()
- %1 = load i32* @baz, align 4
+ %1 = load i32, i32* @baz, align 4
ret i32 %1
}
diff --git a/llvm/test/CodeGen/SPARC/setjmp.ll b/llvm/test/CodeGen/SPARC/setjmp.ll
index 0f9e546242a..b43b880598d 100644
--- a/llvm/test/CodeGen/SPARC/setjmp.ll
+++ b/llvm/test/CodeGen/SPARC/setjmp.ll
@@ -35,7 +35,7 @@ entry:
%4 = getelementptr inbounds %struct.jmpbuf_env, %struct.jmpbuf_env* %inbuf, i32 0, i32 3
store i32 %3, i32* %4, align 4, !tbaa !4
store %struct.jmpbuf_env* %inbuf, %struct.jmpbuf_env** @jenv, align 4, !tbaa !3
- %5 = load i32* %1, align 4, !tbaa !4
+ %5 = load i32, i32* %1, align 4, !tbaa !4
%6 = icmp eq i32 %5, 1
%7 = icmp eq i32 %3, 0
%or.cond = and i1 %6, %7
@@ -46,7 +46,7 @@ entry:
unreachable
bar.exit: ; preds = %entry
- %8 = load i32* %0, align 4, !tbaa !4
+ %8 = load i32, i32* %0, align 4, !tbaa !4
%9 = call i32 (i8*, ...)* @printf(i8* noalias getelementptr inbounds ([30 x i8]* @.cst, i32 0, i32 0), i32 %8) #0
ret i32 0
}
diff --git a/llvm/test/CodeGen/SPARC/spillsize.ll b/llvm/test/CodeGen/SPARC/spillsize.ll
index 2fcab5470fe..a82e5098ffd 100644
--- a/llvm/test/CodeGen/SPARC/spillsize.ll
+++ b/llvm/test/CodeGen/SPARC/spillsize.ll
@@ -11,13 +11,13 @@ target triple = "sparcv9"
; CHECK: ldx [%fp+
define void @spill4(i64* nocapture %p) {
entry:
- %val0 = load i64* %p
+ %val0 = load i64, i64* %p
%cmp0 = icmp ult i64 %val0, 385672958347594845
%cm80 = zext i1 %cmp0 to i64
store i64 %cm80, i64* %p, align 8
tail call void asm sideeffect "", "~{i0},~{i1},~{i2},~{i3},~{i4},~{i5},~{g2},~{g3},~{g4},~{g5},~{l0},~{l1},~{l2},~{l3},~{l4},~{l5},~{l6},~{l7},~{o0},~{o1},~{o2},~{o3},~{o4},~{o5},~{o7}"()
%arrayidx1 = getelementptr inbounds i64, i64* %p, i64 1
- %val = load i64* %arrayidx1
+ %val = load i64, i64* %arrayidx1
%cmp = icmp ult i64 %val, 385672958347594845
%cm8 = select i1 %cmp, i64 10, i64 20
store i64 %cm8, i64* %arrayidx1, align 8
diff --git a/llvm/test/CodeGen/SPARC/tls.ll b/llvm/test/CodeGen/SPARC/tls.ll
index ce3e0053984..d54cf60a32f 100644
--- a/llvm/test/CodeGen/SPARC/tls.ll
+++ b/llvm/test/CodeGen/SPARC/tls.ll
@@ -34,7 +34,7 @@
define i32 @test_tls_local() {
entry:
- %0 = load i32* @local_symbol, align 4
+ %0 = load i32, i32* @local_symbol, align 4
%1 = add i32 %0, 1
store i32 %1, i32* @local_symbol, align 4
ret i32 %1
@@ -68,7 +68,7 @@ entry:
define i32 @test_tls_extern() {
entry:
- %0 = load i32* @extern_symbol, align 4
+ %0 = load i32, i32* @extern_symbol, align 4
%1 = add i32 %0, 1
store i32 %1, i32* @extern_symbol, align 4
ret i32 %1
diff --git a/llvm/test/CodeGen/SPARC/varargs.ll b/llvm/test/CodeGen/SPARC/varargs.ll
index dea512a9f9d..9f1864471ef 100644
--- a/llvm/test/CodeGen/SPARC/varargs.ll
+++ b/llvm/test/CodeGen/SPARC/varargs.ll
@@ -25,7 +25,7 @@ for.cond:
%fmt.addr.0 = phi i8* [ %fmt, %entry ], [ %incdec.ptr, %for.cond.backedge ]
%sum.addr.0 = phi double [ %sum, %entry ], [ %sum.addr.0.be, %for.cond.backedge ]
%incdec.ptr = getelementptr inbounds i8, i8* %fmt.addr.0, i64 1
- %0 = load i8* %fmt.addr.0, align 1
+ %0 = load i8, i8* %fmt.addr.0, align 1
%conv = sext i8 %0 to i32
switch i32 %conv, label %sw.default [
i32 105, label %sw.bb
OpenPOWER on IntegriCloud