summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/R600/fma-combine.ll
diff options
context:
space:
mode:
authorDavid Blaikie <dblaikie@gmail.com>2015-02-27 21:17:42 +0000
committerDavid Blaikie <dblaikie@gmail.com>2015-02-27 21:17:42 +0000
commita79ac14fa68297f9888bc70a10df5ed9b8864e38 (patch)
tree8d8217a8928e3ee599bdde405e2e178b3a55b645 /llvm/test/CodeGen/R600/fma-combine.ll
parent83687fb9e654c9d0086e7f6b728c26fa0b729e71 (diff)
downloadbcm5719-llvm-a79ac14fa68297f9888bc70a10df5ed9b8864e38.tar.gz
bcm5719-llvm-a79ac14fa68297f9888bc70a10df5ed9b8864e38.zip
[opaque pointer type] Add textual IR support for explicit type parameter to load instruction
Essentially the same as the GEP change in r230786. A similar migration script can be used to update test cases, though a few more test case improvements/changes were required this time around: (r229269-r229278) import fileinput import sys import re pat = re.compile(r"((?:=|:|^)\s*load (?:atomic )?(?:volatile )?(.*?))(| addrspace\(\d+\) *)\*($| *(?:%|@|null|undef|blockaddress|getelementptr|addrspacecast|bitcast|inttoptr|\[\[[a-zA-Z]|\{\{).*$)") for line in sys.stdin: sys.stdout.write(re.sub(pat, r"\1, \2\3*\4", line)) Reviewers: rafael, dexonsmith, grosser Differential Revision: http://reviews.llvm.org/D7649 llvm-svn: 230794
Diffstat (limited to 'llvm/test/CodeGen/R600/fma-combine.ll')
-rw-r--r--llvm/test/CodeGen/R600/fma-combine.ll90
1 files changed, 45 insertions, 45 deletions
diff --git a/llvm/test/CodeGen/R600/fma-combine.ll b/llvm/test/CodeGen/R600/fma-combine.ll
index 9c77c15a02b..bd574b87711 100644
--- a/llvm/test/CodeGen/R600/fma-combine.ll
+++ b/llvm/test/CodeGen/R600/fma-combine.ll
@@ -20,9 +20,9 @@ define void @combine_to_fma_f64_0(double addrspace(1)* noalias %out, double addr
%gep.2 = getelementptr double, double addrspace(1)* %gep.0, i32 2
%gep.out = getelementptr double, double addrspace(1)* %out, i32 %tid
- %a = load double addrspace(1)* %gep.0
- %b = load double addrspace(1)* %gep.1
- %c = load double addrspace(1)* %gep.2
+ %a = load double, double addrspace(1)* %gep.0
+ %b = load double, double addrspace(1)* %gep.1
+ %c = load double, double addrspace(1)* %gep.2
%mul = fmul double %a, %b
%fma = fadd double %mul, %c
@@ -50,10 +50,10 @@ define void @combine_to_fma_f64_0_2use(double addrspace(1)* noalias %out, double
%gep.out.0 = getelementptr double, double addrspace(1)* %out, i32 %tid
%gep.out.1 = getelementptr double, double addrspace(1)* %gep.out.0, i32 1
- %a = load double addrspace(1)* %gep.0
- %b = load double addrspace(1)* %gep.1
- %c = load double addrspace(1)* %gep.2
- %d = load double addrspace(1)* %gep.3
+ %a = load double, double addrspace(1)* %gep.0
+ %b = load double, double addrspace(1)* %gep.1
+ %c = load double, double addrspace(1)* %gep.2
+ %d = load double, double addrspace(1)* %gep.3
%mul = fmul double %a, %b
%fma0 = fadd double %mul, %c
@@ -77,9 +77,9 @@ define void @combine_to_fma_f64_1(double addrspace(1)* noalias %out, double addr
%gep.2 = getelementptr double, double addrspace(1)* %gep.0, i32 2
%gep.out = getelementptr double, double addrspace(1)* %out, i32 %tid
- %a = load double addrspace(1)* %gep.0
- %b = load double addrspace(1)* %gep.1
- %c = load double addrspace(1)* %gep.2
+ %a = load double, double addrspace(1)* %gep.0
+ %b = load double, double addrspace(1)* %gep.1
+ %c = load double, double addrspace(1)* %gep.2
%mul = fmul double %a, %b
%fma = fadd double %c, %mul
@@ -101,9 +101,9 @@ define void @combine_to_fma_fsub_0_f64(double addrspace(1)* noalias %out, double
%gep.2 = getelementptr double, double addrspace(1)* %gep.0, i32 2
%gep.out = getelementptr double, double addrspace(1)* %out, i32 %tid
- %a = load double addrspace(1)* %gep.0
- %b = load double addrspace(1)* %gep.1
- %c = load double addrspace(1)* %gep.2
+ %a = load double, double addrspace(1)* %gep.0
+ %b = load double, double addrspace(1)* %gep.1
+ %c = load double, double addrspace(1)* %gep.2
%mul = fmul double %a, %b
%fma = fsub double %mul, %c
@@ -131,10 +131,10 @@ define void @combine_to_fma_fsub_f64_0_2use(double addrspace(1)* noalias %out, d
%gep.out.0 = getelementptr double, double addrspace(1)* %out, i32 %tid
%gep.out.1 = getelementptr double, double addrspace(1)* %gep.out.0, i32 1
- %a = load double addrspace(1)* %gep.0
- %b = load double addrspace(1)* %gep.1
- %c = load double addrspace(1)* %gep.2
- %d = load double addrspace(1)* %gep.3
+ %a = load double, double addrspace(1)* %gep.0
+ %b = load double, double addrspace(1)* %gep.1
+ %c = load double, double addrspace(1)* %gep.2
+ %d = load double, double addrspace(1)* %gep.3
%mul = fmul double %a, %b
%fma0 = fsub double %mul, %c
@@ -158,9 +158,9 @@ define void @combine_to_fma_fsub_1_f64(double addrspace(1)* noalias %out, double
%gep.2 = getelementptr double, double addrspace(1)* %gep.0, i32 2
%gep.out = getelementptr double, double addrspace(1)* %out, i32 %tid
- %a = load double addrspace(1)* %gep.0
- %b = load double addrspace(1)* %gep.1
- %c = load double addrspace(1)* %gep.2
+ %a = load double, double addrspace(1)* %gep.0
+ %b = load double, double addrspace(1)* %gep.1
+ %c = load double, double addrspace(1)* %gep.2
%mul = fmul double %a, %b
%fma = fsub double %c, %mul
@@ -188,10 +188,10 @@ define void @combine_to_fma_fsub_1_f64_2use(double addrspace(1)* noalias %out, d
%gep.out.0 = getelementptr double, double addrspace(1)* %out, i32 %tid
%gep.out.1 = getelementptr double, double addrspace(1)* %gep.out.0, i32 1
- %a = load double addrspace(1)* %gep.0
- %b = load double addrspace(1)* %gep.1
- %c = load double addrspace(1)* %gep.2
- %d = load double addrspace(1)* %gep.3
+ %a = load double, double addrspace(1)* %gep.0
+ %b = load double, double addrspace(1)* %gep.1
+ %c = load double, double addrspace(1)* %gep.2
+ %d = load double, double addrspace(1)* %gep.3
%mul = fmul double %a, %b
%fma0 = fsub double %c, %mul
@@ -215,9 +215,9 @@ define void @combine_to_fma_fsub_2_f64(double addrspace(1)* noalias %out, double
%gep.2 = getelementptr double, double addrspace(1)* %gep.0, i32 2
%gep.out = getelementptr double, double addrspace(1)* %out, i32 %tid
- %a = load double addrspace(1)* %gep.0
- %b = load double addrspace(1)* %gep.1
- %c = load double addrspace(1)* %gep.2
+ %a = load double, double addrspace(1)* %gep.0
+ %b = load double, double addrspace(1)* %gep.1
+ %c = load double, double addrspace(1)* %gep.2
%mul = fmul double %a, %b
%mul.neg = fsub double -0.0, %mul
@@ -246,10 +246,10 @@ define void @combine_to_fma_fsub_2_f64_2uses_neg(double addrspace(1)* noalias %o
%gep.out.0 = getelementptr double, double addrspace(1)* %out, i32 %tid
%gep.out.1 = getelementptr double, double addrspace(1)* %gep.out.0, i32 1
- %a = load double addrspace(1)* %gep.0
- %b = load double addrspace(1)* %gep.1
- %c = load double addrspace(1)* %gep.2
- %d = load double addrspace(1)* %gep.3
+ %a = load double, double addrspace(1)* %gep.0
+ %b = load double, double addrspace(1)* %gep.1
+ %c = load double, double addrspace(1)* %gep.2
+ %d = load double, double addrspace(1)* %gep.3
%mul = fmul double %a, %b
%mul.neg = fsub double -0.0, %mul
@@ -280,10 +280,10 @@ define void @combine_to_fma_fsub_2_f64_2uses_mul(double addrspace(1)* noalias %o
%gep.out.0 = getelementptr double, double addrspace(1)* %out, i32 %tid
%gep.out.1 = getelementptr double, double addrspace(1)* %gep.out.0, i32 1
- %a = load double addrspace(1)* %gep.0
- %b = load double addrspace(1)* %gep.1
- %c = load double addrspace(1)* %gep.2
- %d = load double addrspace(1)* %gep.3
+ %a = load double, double addrspace(1)* %gep.0
+ %b = load double, double addrspace(1)* %gep.1
+ %c = load double, double addrspace(1)* %gep.2
+ %d = load double, double addrspace(1)* %gep.3
%mul = fmul double %a, %b
%mul.neg = fsub double -0.0, %mul
@@ -315,11 +315,11 @@ define void @aggressive_combine_to_fma_fsub_0_f64(double addrspace(1)* noalias %
%gep.4 = getelementptr double, double addrspace(1)* %gep.0, i32 4
%gep.out = getelementptr double, double addrspace(1)* %out, i32 %tid
- %x = load double addrspace(1)* %gep.0
- %y = load double addrspace(1)* %gep.1
- %z = load double addrspace(1)* %gep.2
- %u = load double addrspace(1)* %gep.3
- %v = load double addrspace(1)* %gep.4
+ %x = load double, double addrspace(1)* %gep.0
+ %y = load double, double addrspace(1)* %gep.1
+ %z = load double, double addrspace(1)* %gep.2
+ %u = load double, double addrspace(1)* %gep.3
+ %v = load double, double addrspace(1)* %gep.4
%tmp0 = fmul double %u, %v
%tmp1 = call double @llvm.fma.f64(double %x, double %y, double %tmp0) #0
@@ -350,11 +350,11 @@ define void @aggressive_combine_to_fma_fsub_1_f64(double addrspace(1)* noalias %
%gep.4 = getelementptr double, double addrspace(1)* %gep.0, i32 4
%gep.out = getelementptr double, double addrspace(1)* %out, i32 %tid
- %x = load double addrspace(1)* %gep.0
- %y = load double addrspace(1)* %gep.1
- %z = load double addrspace(1)* %gep.2
- %u = load double addrspace(1)* %gep.3
- %v = load double addrspace(1)* %gep.4
+ %x = load double, double addrspace(1)* %gep.0
+ %y = load double, double addrspace(1)* %gep.1
+ %z = load double, double addrspace(1)* %gep.2
+ %u = load double, double addrspace(1)* %gep.3
+ %v = load double, double addrspace(1)* %gep.4
%tmp0 = fmul double %u, %v
%tmp1 = call double @llvm.fma.f64(double %y, double %z, double %tmp0) #0
OpenPOWER on IntegriCloud