summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/X86/fp_load_fold.ll
diff options
context:
space:
mode:
authorDan Gohman <gohman@apple.com>2009-06-04 22:49:04 +0000
committerDan Gohman <gohman@apple.com>2009-06-04 22:49:04 +0000
commita5b9645c4b7a1d8be5e41081b99f27b49b8aa8cf (patch)
tree7f5a9f6633be0c4e77a1bb00e5bfcfcca14e219b /llvm/test/CodeGen/X86/fp_load_fold.ll
parent72a4d2fec138ad6d2becbc69c6d034246a056d09 (diff)
downloadbcm5719-llvm-a5b9645c4b7a1d8be5e41081b99f27b49b8aa8cf.tar.gz
bcm5719-llvm-a5b9645c4b7a1d8be5e41081b99f27b49b8aa8cf.zip
Split the Add, Sub, and Mul instruction opcodes into separate
integer and floating-point opcodes, introducing FAdd, FSub, and FMul. For now, the AsmParser, BitcodeReader, and IRBuilder all preserve backwards compatability, and the Core LLVM APIs preserve backwards compatibility for IR producers. Most front-ends won't need to change immediately. This implements the first step of the plan outlined here: http://nondot.org/sabre/LLVMNotes/IntegerOverflow.txt llvm-svn: 72897
Diffstat (limited to 'llvm/test/CodeGen/X86/fp_load_fold.ll')
-rw-r--r--llvm/test/CodeGen/X86/fp_load_fold.ll8
1 files changed, 4 insertions, 4 deletions
diff --git a/llvm/test/CodeGen/X86/fp_load_fold.ll b/llvm/test/CodeGen/X86/fp_load_fold.ll
index 7c33cb32bc6..655ad3df323 100644
--- a/llvm/test/CodeGen/X86/fp_load_fold.ll
+++ b/llvm/test/CodeGen/X86/fp_load_fold.ll
@@ -5,25 +5,25 @@
define double @test_add(double %X, double* %P) {
%Y = load double* %P ; <double> [#uses=1]
- %R = add double %X, %Y ; <double> [#uses=1]
+ %R = fadd double %X, %Y ; <double> [#uses=1]
ret double %R
}
define double @test_mul(double %X, double* %P) {
%Y = load double* %P ; <double> [#uses=1]
- %R = mul double %X, %Y ; <double> [#uses=1]
+ %R = fmul double %X, %Y ; <double> [#uses=1]
ret double %R
}
define double @test_sub(double %X, double* %P) {
%Y = load double* %P ; <double> [#uses=1]
- %R = sub double %X, %Y ; <double> [#uses=1]
+ %R = fsub double %X, %Y ; <double> [#uses=1]
ret double %R
}
define double @test_subr(double %X, double* %P) {
%Y = load double* %P ; <double> [#uses=1]
- %R = sub double %Y, %X ; <double> [#uses=1]
+ %R = fsub double %Y, %X ; <double> [#uses=1]
ret double %R
}
OpenPOWER on IntegriCloud