diff options
| author | David Blaikie <dblaikie@gmail.com> | 2015-02-27 21:17:42 +0000 |
|---|---|---|
| committer | David Blaikie <dblaikie@gmail.com> | 2015-02-27 21:17:42 +0000 |
| commit | a79ac14fa68297f9888bc70a10df5ed9b8864e38 (patch) | |
| tree | 8d8217a8928e3ee599bdde405e2e178b3a55b645 /llvm/test/CodeGen/SystemZ/frame-06.ll | |
| parent | 83687fb9e654c9d0086e7f6b728c26fa0b729e71 (diff) | |
| download | bcm5719-llvm-a79ac14fa68297f9888bc70a10df5ed9b8864e38.tar.gz bcm5719-llvm-a79ac14fa68297f9888bc70a10df5ed9b8864e38.zip | |
[opaque pointer type] Add textual IR support for explicit type parameter to load instruction
Essentially the same as the GEP change in r230786.
A similar migration script can be used to update test cases, though a few more
test case improvements/changes were required this time around: (r229269-r229278)
import fileinput
import sys
import re
pat = re.compile(r"((?:=|:|^)\s*load (?:atomic )?(?:volatile )?(.*?))(| addrspace\(\d+\) *)\*($| *(?:%|@|null|undef|blockaddress|getelementptr|addrspacecast|bitcast|inttoptr|\[\[[a-zA-Z]|\{\{).*$)")
for line in sys.stdin:
sys.stdout.write(re.sub(pat, r"\1, \2\3*\4", line))
Reviewers: rafael, dexonsmith, grosser
Differential Revision: http://reviews.llvm.org/D7649
llvm-svn: 230794
Diffstat (limited to 'llvm/test/CodeGen/SystemZ/frame-06.ll')
| -rw-r--r-- | llvm/test/CodeGen/SystemZ/frame-06.ll | 76 |
1 files changed, 38 insertions, 38 deletions
diff --git a/llvm/test/CodeGen/SystemZ/frame-06.ll b/llvm/test/CodeGen/SystemZ/frame-06.ll index f649286db2f..c2aa8af336a 100644 --- a/llvm/test/CodeGen/SystemZ/frame-06.ll +++ b/llvm/test/CodeGen/SystemZ/frame-06.ll @@ -29,20 +29,20 @@ define void @f1(i64 *%ptr) { ; CHECK: stg {{.*}}, 8(%r2) ; CHECK: lmg %r6, %r15, 48(%r15) ; CHECK: br %r14 - %l0 = load volatile i64 *%ptr - %l1 = load volatile i64 *%ptr - %l3 = load volatile i64 *%ptr - %l4 = load volatile i64 *%ptr - %l5 = load volatile i64 *%ptr - %l6 = load volatile i64 *%ptr - %l7 = load volatile i64 *%ptr - %l8 = load volatile i64 *%ptr - %l9 = load volatile i64 *%ptr - %l10 = load volatile i64 *%ptr - %l11 = load volatile i64 *%ptr - %l12 = load volatile i64 *%ptr - %l13 = load volatile i64 *%ptr - %l14 = load volatile i64 *%ptr + %l0 = load volatile i64 , i64 *%ptr + %l1 = load volatile i64 , i64 *%ptr + %l3 = load volatile i64 , i64 *%ptr + %l4 = load volatile i64 , i64 *%ptr + %l5 = load volatile i64 , i64 *%ptr + %l6 = load volatile i64 , i64 *%ptr + %l7 = load volatile i64 , i64 *%ptr + %l8 = load volatile i64 , i64 *%ptr + %l9 = load volatile i64 , i64 *%ptr + %l10 = load volatile i64 , i64 *%ptr + %l11 = load volatile i64 , i64 *%ptr + %l12 = load volatile i64 , i64 *%ptr + %l13 = load volatile i64 , i64 *%ptr + %l14 = load volatile i64 , i64 *%ptr %add0 = add i64 %l0, %l0 %add1 = add i64 %l1, %add0 %add3 = add i64 %l3, %add1 @@ -97,19 +97,19 @@ define void @f2(i64 *%ptr) { ; CHECK: stg {{.*}}, 8(%r2) ; CHECK: lmg %r7, %r15, 56(%r15) ; CHECK: br %r14 - %l0 = load volatile i64 *%ptr - %l1 = load volatile i64 *%ptr - %l3 = load volatile i64 *%ptr - %l4 = load volatile i64 *%ptr - %l5 = load volatile i64 *%ptr - %l7 = load volatile i64 *%ptr - %l8 = load volatile i64 *%ptr - %l9 = load volatile i64 *%ptr - %l10 = load volatile i64 *%ptr - %l11 = load volatile i64 *%ptr - %l12 = load volatile i64 *%ptr - %l13 = load volatile i64 *%ptr - %l14 = load volatile i64 *%ptr + %l0 = load volatile i64 , i64 *%ptr + %l1 = load volatile i64 , i64 *%ptr + %l3 = load volatile i64 , i64 *%ptr + %l4 = load volatile i64 , i64 *%ptr + %l5 = load volatile i64 , i64 *%ptr + %l7 = load volatile i64 , i64 *%ptr + %l8 = load volatile i64 , i64 *%ptr + %l9 = load volatile i64 , i64 *%ptr + %l10 = load volatile i64 , i64 *%ptr + %l11 = load volatile i64 , i64 *%ptr + %l12 = load volatile i64 , i64 *%ptr + %l13 = load volatile i64 , i64 *%ptr + %l14 = load volatile i64 , i64 *%ptr %add0 = add i64 %l0, %l0 %add1 = add i64 %l1, %add0 %add3 = add i64 %l3, %add1 @@ -160,12 +160,12 @@ define void @f3(i64 *%ptr) { ; CHECK: stg {{.*}}, 8(%r2) ; CHECK: lmg %r14, %r15, 112(%r15) ; CHECK: br %r14 - %l0 = load volatile i64 *%ptr - %l1 = load volatile i64 *%ptr - %l3 = load volatile i64 *%ptr - %l4 = load volatile i64 *%ptr - %l5 = load volatile i64 *%ptr - %l14 = load volatile i64 *%ptr + %l0 = load volatile i64 , i64 *%ptr + %l1 = load volatile i64 , i64 *%ptr + %l3 = load volatile i64 , i64 *%ptr + %l4 = load volatile i64 , i64 *%ptr + %l5 = load volatile i64 , i64 *%ptr + %l14 = load volatile i64 , i64 *%ptr %add0 = add i64 %l0, %l0 %add1 = add i64 %l1, %add0 %add3 = add i64 %l3, %add1 @@ -196,11 +196,11 @@ define void @f4(i64 *%ptr) { ; CHECK-NOT: %r12 ; CHECK-NOT: %r13 ; CHECK: br %r14 - %l0 = load volatile i64 *%ptr - %l1 = load volatile i64 *%ptr - %l3 = load volatile i64 *%ptr - %l4 = load volatile i64 *%ptr - %l5 = load volatile i64 *%ptr + %l0 = load volatile i64 , i64 *%ptr + %l1 = load volatile i64 , i64 *%ptr + %l3 = load volatile i64 , i64 *%ptr + %l4 = load volatile i64 , i64 *%ptr + %l5 = load volatile i64 , i64 *%ptr %add0 = add i64 %l0, %l0 %add1 = add i64 %l1, %add0 %add3 = add i64 %l3, %add1 |

