diff options
| author | Dan Gohman <gohman@apple.com> | 2009-06-04 22:49:04 +0000 |
|---|---|---|
| committer | Dan Gohman <gohman@apple.com> | 2009-06-04 22:49:04 +0000 |
| commit | a5b9645c4b7a1d8be5e41081b99f27b49b8aa8cf (patch) | |
| tree | 7f5a9f6633be0c4e77a1bb00e5bfcfcca14e219b /llvm/test/CodeGen/ARM | |
| parent | 72a4d2fec138ad6d2becbc69c6d034246a056d09 (diff) | |
| download | bcm5719-llvm-a5b9645c4b7a1d8be5e41081b99f27b49b8aa8cf.tar.gz bcm5719-llvm-a5b9645c4b7a1d8be5e41081b99f27b49b8aa8cf.zip | |
Split the Add, Sub, and Mul instruction opcodes into separate
integer and floating-point opcodes, introducing
FAdd, FSub, and FMul.
For now, the AsmParser, BitcodeReader, and IRBuilder all preserve
backwards compatability, and the Core LLVM APIs preserve backwards
compatibility for IR producers. Most front-ends won't need to change
immediately.
This implements the first step of the plan outlined here:
http://nondot.org/sabre/LLVMNotes/IntegerOverflow.txt
llvm-svn: 72897
Diffstat (limited to 'llvm/test/CodeGen/ARM')
| -rw-r--r-- | llvm/test/CodeGen/ARM/2007-01-19-InfiniteLoop.ll | 8 | ||||
| -rw-r--r-- | llvm/test/CodeGen/ARM/2009-02-27-SpillerBug.ll | 280 | ||||
| -rw-r--r-- | llvm/test/CodeGen/ARM/2009-03-07-SpillerBug.ll | 56 | ||||
| -rw-r--r-- | llvm/test/CodeGen/ARM/2009-04-08-FloatUndef.ll | 4 | ||||
| -rw-r--r-- | llvm/test/CodeGen/ARM/cse-libcalls.ll | 2 | ||||
| -rw-r--r-- | llvm/test/CodeGen/ARM/fixunsdfdi.ll | 2 | ||||
| -rw-r--r-- | llvm/test/CodeGen/ARM/fnmul.ll | 4 | ||||
| -rw-r--r-- | llvm/test/CodeGen/ARM/fparith.ll | 16 | ||||
| -rw-r--r-- | llvm/test/CodeGen/ARM/fpmem.ll | 4 | ||||
| -rw-r--r-- | llvm/test/CodeGen/ARM/illegal-vector-bitcast.ll | 2 | ||||
| -rw-r--r-- | llvm/test/CodeGen/ARM/vfp.ll | 8 |
11 files changed, 193 insertions, 193 deletions
diff --git a/llvm/test/CodeGen/ARM/2007-01-19-InfiniteLoop.ll b/llvm/test/CodeGen/ARM/2007-01-19-InfiniteLoop.ll index 3661c4c06d6..6e11b169101 100644 --- a/llvm/test/CodeGen/ARM/2007-01-19-InfiniteLoop.ll +++ b/llvm/test/CodeGen/ARM/2007-01-19-InfiniteLoop.ll @@ -35,8 +35,8 @@ cond_next589: ; preds = %cond_next489 %tmp612 = load i32* null ; <i32> [#uses=1] %tmp629 = load i32* null ; <i32> [#uses=1] %tmp629a = sitofp i32 %tmp629 to double ; <double> [#uses=1] - %tmp631 = mul double %tmp629a, 0.000000e+00 ; <double> [#uses=1] - %tmp632 = add double 0.000000e+00, %tmp631 ; <double> [#uses=1] + %tmp631 = fmul double %tmp629a, 0.000000e+00 ; <double> [#uses=1] + %tmp632 = fadd double 0.000000e+00, %tmp631 ; <double> [#uses=1] %tmp642 = call fastcc i32 @sign( i32 %tmp576, i32 %tmp561 ) ; <i32> [#uses=1] %tmp650 = mul i32 %tmp606, %tmp642 ; <i32> [#uses=1] %tmp656 = mul i32 %tmp650, %tmp612 ; <i32> [#uses=1] @@ -46,8 +46,8 @@ cond_next589: ; preds = %cond_next489 %tmp666 = sub i32 %tmp660, %tmp496 ; <i32> [#uses=1] %tmp667 = sitofp i32 %tmp666 to double ; <double> [#uses=2] call void @levrun_linfo_inter( i32 %tmp576, i32 0, i32* null, i32* null ) - %tmp671 = mul double %tmp667, %tmp667 ; <double> [#uses=1] - %tmp675 = add double %tmp671, 0.000000e+00 ; <double> [#uses=1] + %tmp671 = fmul double %tmp667, %tmp667 ; <double> [#uses=1] + %tmp675 = fadd double %tmp671, 0.000000e+00 ; <double> [#uses=1] %tmp678 = fcmp oeq double %tmp632, %tmp675 ; <i1> [#uses=1] br i1 %tmp678, label %cond_true679, label %cond_false693 diff --git a/llvm/test/CodeGen/ARM/2009-02-27-SpillerBug.ll b/llvm/test/CodeGen/ARM/2009-02-27-SpillerBug.ll index 56e949f832c..bd5b7195944 100644 --- a/llvm/test/CodeGen/ARM/2009-02-27-SpillerBug.ll +++ b/llvm/test/CodeGen/ARM/2009-02-27-SpillerBug.ll @@ -11,7 +11,7 @@ bb.thread: br label %bb52 bb32: ; preds = %bb52 - %0 = add double 0.000000e+00, 0.000000e+00 ; <double> [#uses=1] + %0 = fadd double 0.000000e+00, 0.000000e+00 ; <double> [#uses=1] %1 = add i32 %j.1, 1 ; <i32> [#uses=1] br label %bb52 @@ -29,14 +29,14 @@ bb53: ; preds = %bb52 bb55: ; preds = %bb53 %4 = load double* @a, align 4 ; <double> [#uses=10] - %5 = add double %4, 0.000000e+00 ; <double> [#uses=16] + %5 = fadd double %4, 0.000000e+00 ; <double> [#uses=16] %6 = fcmp ogt double %k.4, 0.000000e+00 ; <i1> [#uses=1] - %.pn404 = mul double %4, %4 ; <double> [#uses=4] - %.pn402 = mul double %5, %5 ; <double> [#uses=5] + %.pn404 = fmul double %4, %4 ; <double> [#uses=4] + %.pn402 = fmul double %5, %5 ; <double> [#uses=5] %.pn165.in = load double* @N ; <double> [#uses=5] - %.pn198 = mul double 0.000000e+00, %5 ; <double> [#uses=1] - %.pn185 = sub double -0.000000e+00, 0.000000e+00 ; <double> [#uses=1] - %.pn147 = sub double -0.000000e+00, 0.000000e+00 ; <double> [#uses=1] + %.pn198 = fmul double 0.000000e+00, %5 ; <double> [#uses=1] + %.pn185 = fsub double -0.000000e+00, 0.000000e+00 ; <double> [#uses=1] + %.pn147 = fsub double -0.000000e+00, 0.000000e+00 ; <double> [#uses=1] %.pn141 = fdiv double 0.000000e+00, %4 ; <double> [#uses=1] %.pn142 = fdiv double 0.000000e+00, %5 ; <double> [#uses=1] %.pn136 = fdiv double 0.000000e+00, 0.000000e+00 ; <double> [#uses=1] @@ -47,178 +47,178 @@ bb55: ; preds = %bb53 %.pn117 = fdiv double 0.000000e+00, %4 ; <double> [#uses=1] %.pn118 = fdiv double %.pn185, %5 ; <double> [#uses=1] %.pn88 = fdiv double %.pn147, %5 ; <double> [#uses=1] - %.pn81 = sub double %.pn141, %.pn142 ; <double> [#uses=1] - %.pn77 = sub double 0.000000e+00, %.pn136 ; <double> [#uses=1] - %.pn75 = sub double 0.000000e+00, %.pn132 ; <double> [#uses=1] - %.pn69 = sub double %.pn123, %.pn124 ; <double> [#uses=1] - %.pn67 = sub double 0.000000e+00, %.pn120 ; <double> [#uses=1] - %.pn56 = sub double %.pn117, %.pn118 ; <double> [#uses=1] - %.pn42 = sub double 0.000000e+00, %.pn88 ; <double> [#uses=1] - %.pn60 = mul double %.pn81, 0.000000e+00 ; <double> [#uses=1] - %.pn57 = add double %.pn77, 0.000000e+00 ; <double> [#uses=1] - %.pn58 = mul double %.pn75, %.pn165.in ; <double> [#uses=1] - %.pn32 = add double %.pn69, 0.000000e+00 ; <double> [#uses=1] - %.pn33 = mul double %.pn67, %.pn165.in ; <double> [#uses=1] - %.pn17 = sub double 0.000000e+00, %.pn60 ; <double> [#uses=1] - %.pn9 = add double %.pn57, %.pn58 ; <double> [#uses=1] - %.pn30 = mul double 0.000000e+00, %.pn56 ; <double> [#uses=1] - %.pn24 = mul double 0.000000e+00, %.pn42 ; <double> [#uses=1] - %.pn1 = add double %.pn32, %.pn33 ; <double> [#uses=1] - %.pn28 = sub double %.pn30, 0.000000e+00 ; <double> [#uses=1] - %.pn26 = add double %.pn28, 0.000000e+00 ; <double> [#uses=1] - %.pn22 = sub double %.pn26, 0.000000e+00 ; <double> [#uses=1] - %.pn20 = sub double %.pn24, 0.000000e+00 ; <double> [#uses=1] - %.pn18 = add double %.pn22, 0.000000e+00 ; <double> [#uses=1] - %.pn16 = add double %.pn20, 0.000000e+00 ; <double> [#uses=1] - %.pn14 = sub double %.pn18, 0.000000e+00 ; <double> [#uses=1] - %.pn12 = sub double %.pn16, %.pn17 ; <double> [#uses=1] - %.pn10 = add double %.pn14, 0.000000e+00 ; <double> [#uses=1] - %.pn8 = add double %.pn12, 0.000000e+00 ; <double> [#uses=1] - %.pn6 = sub double %.pn10, 0.000000e+00 ; <double> [#uses=1] - %.pn4 = sub double %.pn8, %.pn9 ; <double> [#uses=1] - %.pn2 = add double %.pn6, 0.000000e+00 ; <double> [#uses=1] - %.pn = add double %.pn4, 0.000000e+00 ; <double> [#uses=1] - %N1.0 = sub double %.pn2, 0.000000e+00 ; <double> [#uses=2] - %D1.0 = sub double %.pn, %.pn1 ; <double> [#uses=2] + %.pn81 = fsub double %.pn141, %.pn142 ; <double> [#uses=1] + %.pn77 = fsub double 0.000000e+00, %.pn136 ; <double> [#uses=1] + %.pn75 = fsub double 0.000000e+00, %.pn132 ; <double> [#uses=1] + %.pn69 = fsub double %.pn123, %.pn124 ; <double> [#uses=1] + %.pn67 = fsub double 0.000000e+00, %.pn120 ; <double> [#uses=1] + %.pn56 = fsub double %.pn117, %.pn118 ; <double> [#uses=1] + %.pn42 = fsub double 0.000000e+00, %.pn88 ; <double> [#uses=1] + %.pn60 = fmul double %.pn81, 0.000000e+00 ; <double> [#uses=1] + %.pn57 = fadd double %.pn77, 0.000000e+00 ; <double> [#uses=1] + %.pn58 = fmul double %.pn75, %.pn165.in ; <double> [#uses=1] + %.pn32 = fadd double %.pn69, 0.000000e+00 ; <double> [#uses=1] + %.pn33 = fmul double %.pn67, %.pn165.in ; <double> [#uses=1] + %.pn17 = fsub double 0.000000e+00, %.pn60 ; <double> [#uses=1] + %.pn9 = fadd double %.pn57, %.pn58 ; <double> [#uses=1] + %.pn30 = fmul double 0.000000e+00, %.pn56 ; <double> [#uses=1] + %.pn24 = fmul double 0.000000e+00, %.pn42 ; <double> [#uses=1] + %.pn1 = fadd double %.pn32, %.pn33 ; <double> [#uses=1] + %.pn28 = fsub double %.pn30, 0.000000e+00 ; <double> [#uses=1] + %.pn26 = fadd double %.pn28, 0.000000e+00 ; <double> [#uses=1] + %.pn22 = fsub double %.pn26, 0.000000e+00 ; <double> [#uses=1] + %.pn20 = fsub double %.pn24, 0.000000e+00 ; <double> [#uses=1] + %.pn18 = fadd double %.pn22, 0.000000e+00 ; <double> [#uses=1] + %.pn16 = fadd double %.pn20, 0.000000e+00 ; <double> [#uses=1] + %.pn14 = fsub double %.pn18, 0.000000e+00 ; <double> [#uses=1] + %.pn12 = fsub double %.pn16, %.pn17 ; <double> [#uses=1] + %.pn10 = fadd double %.pn14, 0.000000e+00 ; <double> [#uses=1] + %.pn8 = fadd double %.pn12, 0.000000e+00 ; <double> [#uses=1] + %.pn6 = fsub double %.pn10, 0.000000e+00 ; <double> [#uses=1] + %.pn4 = fsub double %.pn8, %.pn9 ; <double> [#uses=1] + %.pn2 = fadd double %.pn6, 0.000000e+00 ; <double> [#uses=1] + %.pn = fadd double %.pn4, 0.000000e+00 ; <double> [#uses=1] + %N1.0 = fsub double %.pn2, 0.000000e+00 ; <double> [#uses=2] + %D1.0 = fsub double %.pn, %.pn1 ; <double> [#uses=2] br i1 %6, label %bb62, label %bb64 bb62: ; preds = %bb55 - %7 = mul double 0.000000e+00, %4 ; <double> [#uses=1] - %8 = sub double -0.000000e+00, %7 ; <double> [#uses=3] - %9 = mul double 0.000000e+00, %5 ; <double> [#uses=1] - %10 = sub double -0.000000e+00, %9 ; <double> [#uses=3] - %11 = mul double %.pn404, %4 ; <double> [#uses=5] - %12 = mul double %.pn402, %5 ; <double> [#uses=5] - %13 = mul double 0.000000e+00, -2.000000e+00 ; <double> [#uses=1] + %7 = fmul double 0.000000e+00, %4 ; <double> [#uses=1] + %8 = fsub double -0.000000e+00, %7 ; <double> [#uses=3] + %9 = fmul double 0.000000e+00, %5 ; <double> [#uses=1] + %10 = fsub double -0.000000e+00, %9 ; <double> [#uses=3] + %11 = fmul double %.pn404, %4 ; <double> [#uses=5] + %12 = fmul double %.pn402, %5 ; <double> [#uses=5] + %13 = fmul double 0.000000e+00, -2.000000e+00 ; <double> [#uses=1] %14 = fdiv double 0.000000e+00, %.pn402 ; <double> [#uses=1] - %15 = sub double 0.000000e+00, %14 ; <double> [#uses=1] - %16 = mul double 0.000000e+00, %15 ; <double> [#uses=1] - %17 = add double %13, %16 ; <double> [#uses=1] - %18 = mul double %.pn165.in, -2.000000e+00 ; <double> [#uses=5] - %19 = mul double %18, 0.000000e+00 ; <double> [#uses=1] - %20 = add double %17, %19 ; <double> [#uses=1] - %21 = mul double 0.000000e+00, %20 ; <double> [#uses=1] - %22 = add double 0.000000e+00, %21 ; <double> [#uses=1] + %15 = fsub double 0.000000e+00, %14 ; <double> [#uses=1] + %16 = fmul double 0.000000e+00, %15 ; <double> [#uses=1] + %17 = fadd double %13, %16 ; <double> [#uses=1] + %18 = fmul double %.pn165.in, -2.000000e+00 ; <double> [#uses=5] + %19 = fmul double %18, 0.000000e+00 ; <double> [#uses=1] + %20 = fadd double %17, %19 ; <double> [#uses=1] + %21 = fmul double 0.000000e+00, %20 ; <double> [#uses=1] + %22 = fadd double 0.000000e+00, %21 ; <double> [#uses=1] %23 = fdiv double 0.000000e+00, %12 ; <double> [#uses=1] - %24 = sub double 0.000000e+00, %23 ; <double> [#uses=0] - %25 = mul double %18, 0.000000e+00 ; <double> [#uses=1] - %26 = add double 0.000000e+00, %25 ; <double> [#uses=1] - %27 = mul double 0.000000e+00, %26 ; <double> [#uses=1] - %28 = sub double %22, %27 ; <double> [#uses=1] - %29 = mul double %11, %4 ; <double> [#uses=1] - %30 = mul double %12, %5 ; <double> [#uses=3] - %31 = mul double %.pn165.in, -4.000000e+00 ; <double> [#uses=1] - %32 = mul double %.pn165.in, 0x3FF5555555555555 ; <double> [#uses=1] - %33 = mul double %32, 0.000000e+00 ; <double> [#uses=2] - %34 = add double %28, 0.000000e+00 ; <double> [#uses=1] - %35 = sub double -0.000000e+00, 0.000000e+00 ; <double> [#uses=1] + %24 = fsub double 0.000000e+00, %23 ; <double> [#uses=0] + %25 = fmul double %18, 0.000000e+00 ; <double> [#uses=1] + %26 = fadd double 0.000000e+00, %25 ; <double> [#uses=1] + %27 = fmul double 0.000000e+00, %26 ; <double> [#uses=1] + %28 = fsub double %22, %27 ; <double> [#uses=1] + %29 = fmul double %11, %4 ; <double> [#uses=1] + %30 = fmul double %12, %5 ; <double> [#uses=3] + %31 = fmul double %.pn165.in, -4.000000e+00 ; <double> [#uses=1] + %32 = fmul double %.pn165.in, 0x3FF5555555555555 ; <double> [#uses=1] + %33 = fmul double %32, 0.000000e+00 ; <double> [#uses=2] + %34 = fadd double %28, 0.000000e+00 ; <double> [#uses=1] + %35 = fsub double -0.000000e+00, 0.000000e+00 ; <double> [#uses=1] %36 = fdiv double %35, %11 ; <double> [#uses=1] %37 = fdiv double 0.000000e+00, %12 ; <double> [#uses=1] - %38 = sub double %36, %37 ; <double> [#uses=1] - %39 = mul double 0.000000e+00, %38 ; <double> [#uses=1] - %40 = add double 0.000000e+00, %39 ; <double> [#uses=1] - %41 = add double %40, 0.000000e+00 ; <double> [#uses=1] - %42 = add double %41, 0.000000e+00 ; <double> [#uses=1] - %43 = mul double %42, 0.000000e+00 ; <double> [#uses=1] - %44 = sub double %34, %43 ; <double> [#uses=1] + %38 = fsub double %36, %37 ; <double> [#uses=1] + %39 = fmul double 0.000000e+00, %38 ; <double> [#uses=1] + %40 = fadd double 0.000000e+00, %39 ; <double> [#uses=1] + %41 = fadd double %40, 0.000000e+00 ; <double> [#uses=1] + %42 = fadd double %41, 0.000000e+00 ; <double> [#uses=1] + %43 = fmul double %42, 0.000000e+00 ; <double> [#uses=1] + %44 = fsub double %34, %43 ; <double> [#uses=1] %45 = tail call double @llvm.exp.f64(double %8) nounwind ; <double> [#uses=1] - %46 = sub double -0.000000e+00, %45 ; <double> [#uses=2] + %46 = fsub double -0.000000e+00, %45 ; <double> [#uses=2] %47 = fdiv double %46, 0.000000e+00 ; <double> [#uses=1] - %48 = mul double %30, %5 ; <double> [#uses=1] + %48 = fmul double %30, %5 ; <double> [#uses=1] %49 = fdiv double 0.000000e+00, %48 ; <double> [#uses=1] - %50 = sub double %47, %49 ; <double> [#uses=1] - %51 = mul double %50, -4.000000e+00 ; <double> [#uses=1] - %52 = add double %51, 0.000000e+00 ; <double> [#uses=1] + %50 = fsub double %47, %49 ; <double> [#uses=1] + %51 = fmul double %50, -4.000000e+00 ; <double> [#uses=1] + %52 = fadd double %51, 0.000000e+00 ; <double> [#uses=1] %53 = fdiv double %46, %11 ; <double> [#uses=1] - %54 = sub double %53, 0.000000e+00 ; <double> [#uses=1] - %55 = mul double %31, %54 ; <double> [#uses=1] - %56 = add double %52, %55 ; <double> [#uses=1] - %57 = add double %56, 0.000000e+00 ; <double> [#uses=1] - %58 = add double %44, %57 ; <double> [#uses=1] - %59 = sub double %58, 0.000000e+00 ; <double> [#uses=1] + %54 = fsub double %53, 0.000000e+00 ; <double> [#uses=1] + %55 = fmul double %31, %54 ; <double> [#uses=1] + %56 = fadd double %52, %55 ; <double> [#uses=1] + %57 = fadd double %56, 0.000000e+00 ; <double> [#uses=1] + %58 = fadd double %44, %57 ; <double> [#uses=1] + %59 = fsub double %58, 0.000000e+00 ; <double> [#uses=1] %60 = tail call double @llvm.exp.f64(double 0.000000e+00) nounwind ; <double> [#uses=1] - %61 = sub double -0.000000e+00, %60 ; <double> [#uses=1] + %61 = fsub double -0.000000e+00, %60 ; <double> [#uses=1] %62 = fdiv double 0.000000e+00, -6.000000e+00 ; <double> [#uses=1] %63 = fdiv double %61, %5 ; <double> [#uses=1] - %64 = sub double 0.000000e+00, %63 ; <double> [#uses=1] - %65 = mul double %62, %64 ; <double> [#uses=1] - %66 = sub double 0.000000e+00, %65 ; <double> [#uses=1] - %67 = sub double -0.000000e+00, 0.000000e+00 ; <double> [#uses=2] + %64 = fsub double 0.000000e+00, %63 ; <double> [#uses=1] + %65 = fmul double %62, %64 ; <double> [#uses=1] + %66 = fsub double 0.000000e+00, %65 ; <double> [#uses=1] + %67 = fsub double -0.000000e+00, 0.000000e+00 ; <double> [#uses=2] %68 = tail call double @llvm.exp.f64(double %10) nounwind ; <double> [#uses=1] - %69 = sub double -0.000000e+00, %68 ; <double> [#uses=2] + %69 = fsub double -0.000000e+00, %68 ; <double> [#uses=2] %70 = fdiv double %67, %.pn404 ; <double> [#uses=1] %71 = fdiv double %69, %.pn402 ; <double> [#uses=1] - %72 = sub double %70, %71 ; <double> [#uses=1] - %73 = mul double %72, -5.000000e-01 ; <double> [#uses=1] + %72 = fsub double %70, %71 ; <double> [#uses=1] + %73 = fmul double %72, -5.000000e-01 ; <double> [#uses=1] %74 = fdiv double %67, %4 ; <double> [#uses=1] %75 = fdiv double %69, %5 ; <double> [#uses=1] - %76 = sub double %74, %75 ; <double> [#uses=1] - %77 = mul double %76, 0.000000e+00 ; <double> [#uses=1] - %78 = add double %73, %77 ; <double> [#uses=1] - %79 = mul double 0.000000e+00, %78 ; <double> [#uses=1] - %80 = add double %66, %79 ; <double> [#uses=1] + %76 = fsub double %74, %75 ; <double> [#uses=1] + %77 = fmul double %76, 0.000000e+00 ; <double> [#uses=1] + %78 = fadd double %73, %77 ; <double> [#uses=1] + %79 = fmul double 0.000000e+00, %78 ; <double> [#uses=1] + %80 = fadd double %66, %79 ; <double> [#uses=1] %81 = fdiv double 0.000000e+00, %.pn404 ; <double> [#uses=1] %82 = fdiv double 0.000000e+00, %.pn402 ; <double> [#uses=1] - %83 = sub double %81, %82 ; <double> [#uses=1] - %84 = mul double %83, -5.000000e-01 ; <double> [#uses=1] + %83 = fsub double %81, %82 ; <double> [#uses=1] + %84 = fmul double %83, -5.000000e-01 ; <double> [#uses=1] %85 = fdiv double 0.000000e+00, %4 ; <double> [#uses=1] %86 = fdiv double 0.000000e+00, %5 ; <double> [#uses=1] - %87 = sub double %85, %86 ; <double> [#uses=1] - %88 = mul double %87, 0.000000e+00 ; <double> [#uses=1] - %89 = add double %84, %88 ; <double> [#uses=1] - %90 = mul double 0.000000e+00, %89 ; <double> [#uses=1] - %91 = sub double %80, %90 ; <double> [#uses=1] + %87 = fsub double %85, %86 ; <double> [#uses=1] + %88 = fmul double %87, 0.000000e+00 ; <double> [#uses=1] + %89 = fadd double %84, %88 ; <double> [#uses=1] + %90 = fmul double 0.000000e+00, %89 ; <double> [#uses=1] + %91 = fsub double %80, %90 ; <double> [#uses=1] %92 = tail call double @llvm.exp.f64(double %8) nounwind ; <double> [#uses=1] - %93 = sub double -0.000000e+00, %92 ; <double> [#uses=1] + %93 = fsub double -0.000000e+00, %92 ; <double> [#uses=1] %94 = tail call double @llvm.exp.f64(double %10) nounwind ; <double> [#uses=1] - %95 = sub double -0.000000e+00, %94 ; <double> [#uses=3] + %95 = fsub double -0.000000e+00, %94 ; <double> [#uses=3] %96 = fdiv double %95, %.pn402 ; <double> [#uses=1] - %97 = sub double 0.000000e+00, %96 ; <double> [#uses=1] - %98 = mul double 0.000000e+00, %97 ; <double> [#uses=1] + %97 = fsub double 0.000000e+00, %96 ; <double> [#uses=1] + %98 = fmul double 0.000000e+00, %97 ; <double> [#uses=1] %99 = fdiv double %93, %11 ; <double> [#uses=1] %100 = fdiv double %95, %12 ; <double> [#uses=1] - %101 = sub double %99, %100 ; <double> [#uses=1] - %102 = sub double %98, %101 ; <double> [#uses=1] + %101 = fsub double %99, %100 ; <double> [#uses=1] + %102 = fsub double %98, %101 ; <double> [#uses=1] %103 = fdiv double %95, %5 ; <double> [#uses=1] - %104 = sub double 0.000000e+00, %103 ; <double> [#uses=1] - %105 = mul double %18, %104 ; <double> [#uses=1] - %106 = add double %102, %105 ; <double> [#uses=1] - %107 = mul double %106, %k.4 ; <double> [#uses=1] - %108 = add double %91, %107 ; <double> [#uses=1] - %109 = sub double %108, 0.000000e+00 ; <double> [#uses=1] + %104 = fsub double 0.000000e+00, %103 ; <double> [#uses=1] + %105 = fmul double %18, %104 ; <double> [#uses=1] + %106 = fadd double %102, %105 ; <double> [#uses=1] + %107 = fmul double %106, %k.4 ; <double> [#uses=1] + %108 = fadd double %91, %107 ; <double> [#uses=1] + %109 = fsub double %108, 0.000000e+00 ; <double> [#uses=1] %110 = tail call double @llvm.exp.f64(double %8) nounwind ; <double> [#uses=1] - %111 = sub double -0.000000e+00, %110 ; <double> [#uses=2] + %111 = fsub double -0.000000e+00, %110 ; <double> [#uses=2] %112 = tail call double @llvm.exp.f64(double %10) nounwind ; <double> [#uses=1] - %113 = sub double -0.000000e+00, %112 ; <double> [#uses=2] + %113 = fsub double -0.000000e+00, %112 ; <double> [#uses=2] %114 = fdiv double %111, %11 ; <double> [#uses=1] %115 = fdiv double %113, %12 ; <double> [#uses=1] - %116 = sub double %114, %115 ; <double> [#uses=1] - %117 = mul double 0.000000e+00, %116 ; <double> [#uses=1] + %116 = fsub double %114, %115 ; <double> [#uses=1] + %117 = fmul double 0.000000e+00, %116 ; <double> [#uses=1] %118 = fdiv double %111, %29 ; <double> [#uses=1] %119 = fdiv double %113, %30 ; <double> [#uses=1] - %120 = sub double %118, %119 ; <double> [#uses=1] - %121 = sub double %117, %120 ; <double> [#uses=1] - %122 = mul double %18, 0.000000e+00 ; <double> [#uses=1] - %123 = add double %121, %122 ; <double> [#uses=1] - %124 = mul double %33, 0.000000e+00 ; <double> [#uses=1] - %125 = add double %123, %124 ; <double> [#uses=1] - %126 = add double %109, %125 ; <double> [#uses=1] + %120 = fsub double %118, %119 ; <double> [#uses=1] + %121 = fsub double %117, %120 ; <double> [#uses=1] + %122 = fmul double %18, 0.000000e+00 ; <double> [#uses=1] + %123 = fadd double %121, %122 ; <double> [#uses=1] + %124 = fmul double %33, 0.000000e+00 ; <double> [#uses=1] + %125 = fadd double %123, %124 ; <double> [#uses=1] + %126 = fadd double %109, %125 ; <double> [#uses=1] %127 = tail call double @llvm.exp.f64(double 0.000000e+00) nounwind ; <double> [#uses=1] - %128 = sub double -0.000000e+00, %127 ; <double> [#uses=2] + %128 = fsub double -0.000000e+00, %127 ; <double> [#uses=2] %129 = fdiv double %128, %30 ; <double> [#uses=1] - %130 = sub double 0.000000e+00, %129 ; <double> [#uses=1] - %131 = sub double 0.000000e+00, %130 ; <double> [#uses=1] + %130 = fsub double 0.000000e+00, %129 ; <double> [#uses=1] + %131 = fsub double 0.000000e+00, %130 ; <double> [#uses=1] %132 = fdiv double 0.000000e+00, %.pn404 ; <double> [#uses=1] - %133 = sub double %132, 0.000000e+00 ; <double> [#uses=1] - %134 = mul double %18, %133 ; <double> [#uses=1] - %135 = add double %131, %134 ; <double> [#uses=1] + %133 = fsub double %132, 0.000000e+00 ; <double> [#uses=1] + %134 = fmul double %18, %133 ; <double> [#uses=1] + %135 = fadd double %131, %134 ; <double> [#uses=1] %136 = fdiv double %128, %5 ; <double> [#uses=1] - %137 = sub double 0.000000e+00, %136 ; <double> [#uses=1] - %138 = mul double %33, %137 ; <double> [#uses=1] - %139 = add double %135, %138 ; <double> [#uses=1] - %140 = sub double %126, %139 ; <double> [#uses=1] - %141 = add double %N1.0, %59 ; <double> [#uses=1] - %142 = add double %D1.0, %140 ; <double> [#uses=1] + %137 = fsub double 0.000000e+00, %136 ; <double> [#uses=1] + %138 = fmul double %33, %137 ; <double> [#uses=1] + %139 = fadd double %135, %138 ; <double> [#uses=1] + %140 = fsub double %126, %139 ; <double> [#uses=1] + %141 = fadd double %N1.0, %59 ; <double> [#uses=1] + %142 = fadd double %D1.0, %140 ; <double> [#uses=1] br label %bb64 bb64: ; preds = %bb62, %bb55 diff --git a/llvm/test/CodeGen/ARM/2009-03-07-SpillerBug.ll b/llvm/test/CodeGen/ARM/2009-03-07-SpillerBug.ll index 7556616f995..399ed3081f2 100644 --- a/llvm/test/CodeGen/ARM/2009-03-07-SpillerBug.ll +++ b/llvm/test/CodeGen/ARM/2009-03-07-SpillerBug.ll @@ -26,39 +26,39 @@ entry: bb3: ; preds = %entry %2 = fdiv double 1.000000e+00, 0.000000e+00 ; <double> [#uses=1] - %3 = mul double 0.000000e+00, %2 ; <double> [#uses=2] + %3 = fmul double 0.000000e+00, %2 ; <double> [#uses=2] %4 = call double @llvm.sqrt.f64(double 0.000000e+00) nounwind ; <double> [#uses=1] %5 = fdiv double 1.000000e+00, %4 ; <double> [#uses=2] - %6 = mul double %3, %5 ; <double> [#uses=2] - %7 = mul double 0.000000e+00, %5 ; <double> [#uses=2] - %8 = mul double %3, %7 ; <double> [#uses=1] - %9 = sub double %8, 0.000000e+00 ; <double> [#uses=1] - %10 = mul double 0.000000e+00, %6 ; <double> [#uses=1] - %11 = sub double 0.000000e+00, %10 ; <double> [#uses=1] - %12 = sub double -0.000000e+00, %11 ; <double> [#uses=1] - %13 = mul double %0, %0 ; <double> [#uses=2] - %14 = sub double %13, 0.000000e+00 ; <double> [#uses=1] + %6 = fmul double %3, %5 ; <double> [#uses=2] + %7 = fmul double 0.000000e+00, %5 ; <double> [#uses=2] + %8 = fmul double %3, %7 ; <double> [#uses=1] + %9 = fsub double %8, 0.000000e+00 ; <double> [#uses=1] + %10 = fmul double 0.000000e+00, %6 ; <double> [#uses=1] + %11 = fsub double 0.000000e+00, %10 ; <double> [#uses=1] + %12 = fsub double -0.000000e+00, %11 ; <double> [#uses=1] + %13 = fmul double %0, %0 ; <double> [#uses=2] + %14 = fsub double %13, 0.000000e+00 ; <double> [#uses=1] %15 = call double @llvm.sqrt.f64(double %14) ; <double> [#uses=1] - %16 = mul double 0.000000e+00, %15 ; <double> [#uses=1] + %16 = fmul double 0.000000e+00, %15 ; <double> [#uses=1] %17 = fdiv double %16, %0 ; <double> [#uses=1] - %18 = add double 0.000000e+00, %17 ; <double> [#uses=1] + %18 = fadd double 0.000000e+00, %17 ; <double> [#uses=1] %19 = call double @acos(double %18) nounwind readonly ; <double> [#uses=1] %20 = load double* null, align 4 ; <double> [#uses=1] - %21 = mul double %20, 0x401921FB54442D18 ; <double> [#uses=1] + %21 = fmul double %20, 0x401921FB54442D18 ; <double> [#uses=1] %22 = call double @sin(double %19) nounwind readonly ; <double> [#uses=2] - %23 = mul double %22, 0.000000e+00 ; <double> [#uses=2] - %24 = mul double %6, %23 ; <double> [#uses=1] - %25 = mul double %7, %23 ; <double> [#uses=1] + %23 = fmul double %22, 0.000000e+00 ; <double> [#uses=2] + %24 = fmul double %6, %23 ; <double> [#uses=1] + %25 = fmul double %7, %23 ; <double> [#uses=1] %26 = call double @sin(double %21) nounwind readonly ; <double> [#uses=1] - %27 = mul double %22, %26 ; <double> [#uses=2] - %28 = mul double %9, %27 ; <double> [#uses=1] - %29 = mul double %27, %12 ; <double> [#uses=1] - %30 = add double %24, %28 ; <double> [#uses=1] - %31 = add double 0.000000e+00, %29 ; <double> [#uses=1] - %32 = add double %25, 0.000000e+00 ; <double> [#uses=1] - %33 = add double %30, 0.000000e+00 ; <double> [#uses=1] - %34 = add double %31, 0.000000e+00 ; <double> [#uses=1] - %35 = add double %32, 0.000000e+00 ; <double> [#uses=1] + %27 = fmul double %22, %26 ; <double> [#uses=2] + %28 = fmul double %9, %27 ; <double> [#uses=1] + %29 = fmul double %27, %12 ; <double> [#uses=1] + %30 = fadd double %24, %28 ; <double> [#uses=1] + %31 = fadd double 0.000000e+00, %29 ; <double> [#uses=1] + %32 = fadd double %25, 0.000000e+00 ; <double> [#uses=1] + %33 = fadd double %30, 0.000000e+00 ; <double> [#uses=1] + %34 = fadd double %31, 0.000000e+00 ; <double> [#uses=1] + %35 = fadd double %32, 0.000000e+00 ; <double> [#uses=1] %36 = bitcast %struct.ggPoint3* %x to i8* ; <i8*> [#uses=1] call void @llvm.memcpy.i32(i8* null, i8* %36, i32 24, i32 4) nounwind store double %33, double* null, align 8 @@ -68,9 +68,9 @@ bb5.i.i.i: ; preds = %bb3 unreachable _Z20ggRaySphereIntersectRK6ggRay3RK8ggSphereddRd.exit: ; preds = %bb3 - %37 = sub double %13, 0.000000e+00 ; <double> [#uses=0] - %38 = sub double -0.000000e+00, %34 ; <double> [#uses=0] - %39 = sub double -0.000000e+00, %35 ; <double> [#uses=0] + %37 = fsub double %13, 0.000000e+00 ; <double> [#uses=0] + %38 = fsub double -0.000000e+00, %34 ; <double> [#uses=0] + %39 = fsub double -0.000000e+00, %35 ; <double> [#uses=0] ret i32 1 bb7: ; preds = %entry diff --git a/llvm/test/CodeGen/ARM/2009-04-08-FloatUndef.ll b/llvm/test/CodeGen/ARM/2009-04-08-FloatUndef.ll index 9dc3b3485ec..f394847362f 100644 --- a/llvm/test/CodeGen/ARM/2009-04-08-FloatUndef.ll +++ b/llvm/test/CodeGen/ARM/2009-04-08-FloatUndef.ll @@ -4,8 +4,8 @@ define void @execute_shader(<4 x float>* %OUT, <4 x float>* %IN, <4 x float>* %C entry: %input2 = load <4 x float>* null, align 16 ; <<4 x float>> [#uses=2] %shuffle7 = shufflevector <4 x float> %input2, <4 x float> <float 0.000000e+00, float 1.000000e+00, float 0.000000e+00, float 1.000000e+00>, <4 x i32> <i32 2, i32 2, i32 2, i32 2> ; <<4 x float>> [#uses=1] - %mul1 = mul <4 x float> %shuffle7, zeroinitializer ; <<4 x float>> [#uses=1] - %add2 = add <4 x float> %mul1, %input2 ; <<4 x float>> [#uses=1] + %mul1 = fmul <4 x float> %shuffle7, zeroinitializer ; <<4 x float>> [#uses=1] + %add2 = fadd <4 x float> %mul1, %input2 ; <<4 x float>> [#uses=1] store <4 x float> %add2, <4 x float>* null, align 16 ret void } diff --git a/llvm/test/CodeGen/ARM/cse-libcalls.ll b/llvm/test/CodeGen/ARM/cse-libcalls.ll index 3b499a4021c..4f4091af483 100644 --- a/llvm/test/CodeGen/ARM/cse-libcalls.ll +++ b/llvm/test/CodeGen/ARM/cse-libcalls.ll @@ -16,7 +16,7 @@ bb28.i: ; preds = %bb28.i, %entry br i1 false, label %bb502.loopexit.i, label %bb28.i bb.nph53.i: ; preds = %bb502.loopexit.i - %tmp354.i = sub double -0.000000e+00, %tmp10.i4 ; <double> [#uses=0] + %tmp354.i = fsub double -0.000000e+00, %tmp10.i4 ; <double> [#uses=0] br label %bb244.i bb244.i: ; preds = %bb244.i, %bb.nph53.i diff --git a/llvm/test/CodeGen/ARM/fixunsdfdi.ll b/llvm/test/CodeGen/ARM/fixunsdfdi.ll index d3038b9af76..777a3d69a19 100644 --- a/llvm/test/CodeGen/ARM/fixunsdfdi.ll +++ b/llvm/test/CodeGen/ARM/fixunsdfdi.ll @@ -13,7 +13,7 @@ bb5: ; preds = %bb3 %u.in.mask = and i64 %x14, -4294967296 ; <i64> [#uses=1] %.ins = or i64 0, %u.in.mask ; <i64> [#uses=1] %0 = bitcast i64 %.ins to double ; <double> [#uses=1] - %1 = sub double %x, %0 ; <double> [#uses=1] + %1 = fsub double %x, %0 ; <double> [#uses=1] %2 = fptosi double %1 to i32 ; <i32> [#uses=1] %3 = add i32 %2, 0 ; <i32> [#uses=1] %4 = zext i32 %3 to i64 ; <i64> [#uses=1] diff --git a/llvm/test/CodeGen/ARM/fnmul.ll b/llvm/test/CodeGen/ARM/fnmul.ll index 87a30c99e28..7bbda2d76d5 100644 --- a/llvm/test/CodeGen/ARM/fnmul.ll +++ b/llvm/test/CodeGen/ARM/fnmul.ll @@ -4,8 +4,8 @@ define double @t1(double %a, double %b) { entry: - %tmp2 = sub double -0.000000e+00, %a ; <double> [#uses=1] - %tmp4 = mul double %tmp2, %b ; <double> [#uses=1] + %tmp2 = fsub double -0.000000e+00, %a ; <double> [#uses=1] + %tmp4 = fmul double %tmp2, %b ; <double> [#uses=1] ret double %tmp4 } diff --git a/llvm/test/CodeGen/ARM/fparith.ll b/llvm/test/CodeGen/ARM/fparith.ll index 11933d5f70c..568a6c41a0d 100644 --- a/llvm/test/CodeGen/ARM/fparith.ll +++ b/llvm/test/CodeGen/ARM/fparith.ll @@ -10,49 +10,49 @@ define float @f1(float %a, float %b) { entry: - %tmp = add float %a, %b ; <float> [#uses=1] + %tmp = fadd float %a, %b ; <float> [#uses=1] ret float %tmp } define double @f2(double %a, double %b) { entry: - %tmp = add double %a, %b ; <double> [#uses=1] + %tmp = fadd double %a, %b ; <double> [#uses=1] ret double %tmp } define float @f3(float %a, float %b) { entry: - %tmp = mul float %a, %b ; <float> [#uses=1] + %tmp = fmul float %a, %b ; <float> [#uses=1] ret float %tmp } define double @f4(double %a, double %b) { entry: - %tmp = mul double %a, %b ; <double> [#uses=1] + %tmp = fmul double %a, %b ; <double> [#uses=1] ret double %tmp } define float @f5(float %a, float %b) { entry: - %tmp = sub float %a, %b ; <float> [#uses=1] + %tmp = fsub float %a, %b ; <float> [#uses=1] ret float %tmp } define double @f6(double %a, double %b) { entry: - %tmp = sub double %a, %b ; <double> [#uses=1] + %tmp = fsub double %a, %b ; <double> [#uses=1] ret double %tmp } define float @f7(float %a) { entry: - %tmp1 = sub float -0.000000e+00, %a ; <float> [#uses=1] + %tmp1 = fsub float -0.000000e+00, %a ; <float> [#uses=1] ret float %tmp1 } define double @f8(double %a) { entry: - %tmp1 = sub double -0.000000e+00, %a ; <double> [#uses=1] + %tmp1 = fsub double -0.000000e+00, %a ; <double> [#uses=1] ret double %tmp1 } diff --git a/llvm/test/CodeGen/ARM/fpmem.ll b/llvm/test/CodeGen/ARM/fpmem.ll index 48204ecdebf..13653bbe6aa 100644 --- a/llvm/test/CodeGen/ARM/fpmem.ll +++ b/llvm/test/CodeGen/ARM/fpmem.ll @@ -11,12 +11,12 @@ define float @f1(float %a) { define float @f2(float* %v, float %u) { %tmp = load float* %v ; <float> [#uses=1] - %tmp1 = add float %tmp, %u ; <float> [#uses=1] + %tmp1 = fadd float %tmp, %u ; <float> [#uses=1] ret float %tmp1 } define void @f3(float %a, float %b, float* %v) { - %tmp = add float %a, %b ; <float> [#uses=1] + %tmp = fadd float %a, %b ; <float> [#uses=1] store float %tmp, float* %v ret void } diff --git a/llvm/test/CodeGen/ARM/illegal-vector-bitcast.ll b/llvm/test/CodeGen/ARM/illegal-vector-bitcast.ll index 79f9929b7df..ad24eb5dad7 100644 --- a/llvm/test/CodeGen/ARM/illegal-vector-bitcast.ll +++ b/llvm/test/CodeGen/ARM/illegal-vector-bitcast.ll @@ -3,7 +3,7 @@ define void @foo(<8 x float>* %f, <8 x float>* %g, <4 x i64>* %y) { %h = load <8 x float>* %f - %i = mul <8 x float> %h, <float 0x3FF19999A0000000, float 0x400A666660000000, float 0x40119999A0000000, float 0x40159999A0000000, float 0.5, float 0x3FE3333340000000, float 0x3FE6666660000000, float 0x3FE99999A0000000> + %i = fmul <8 x float> %h, <float 0x3FF19999A0000000, float 0x400A666660000000, float 0x40119999A0000000, float 0x40159999A0000000, float 0.5, float 0x3FE3333340000000, float 0x3FE6666660000000, float 0x3FE99999A0000000> %m = bitcast <8 x float> %i to <4 x i64> %z = load <4 x i64>* %y %n = mul <4 x i64> %z, %m diff --git a/llvm/test/CodeGen/ARM/vfp.ll b/llvm/test/CodeGen/ARM/vfp.ll index 2acb33f9aeb..f58da440935 100644 --- a/llvm/test/CodeGen/ARM/vfp.ll +++ b/llvm/test/CodeGen/ARM/vfp.ll @@ -39,10 +39,10 @@ define void @test_abs(float* %P, double* %D) { define void @test_add(float* %P, double* %D) { %a = load float* %P ; <float> [#uses=2] - %b = add float %a, %a ; <float> [#uses=1] + %b = fadd float %a, %a ; <float> [#uses=1] store float %b, float* %P %A = load double* %D ; <double> [#uses=2] - %B = add double %A, %A ; <double> [#uses=1] + %B = fadd double %A, %A ; <double> [#uses=1] store double %B, double* %D ret void } @@ -61,8 +61,8 @@ define void @test_fma(float* %P1, float* %P2, float* %P3) { %a1 = load float* %P1 ; <float> [#uses=1] %a2 = load float* %P2 ; <float> [#uses=1] %a3 = load float* %P3 ; <float> [#uses=1] - %X = mul float %a1, %a2 ; <float> [#uses=1] - %Y = sub float %X, %a3 ; <float> [#uses=1] + %X = fmul float %a1, %a2 ; <float> [#uses=1] + %Y = fsub float %X, %a3 ; <float> [#uses=1] store float %Y, float* %P1 ret void } |

