summaryrefslogtreecommitdiffstats
path: root/llvm/test/Transforms/InstCombine/fmul-sqrt.ll
blob: 61d98c7972cafb9ee5789a66b86e274361323790 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -S -instcombine < %s | FileCheck %s

declare double @llvm.sqrt.f64(double) nounwind readnone speculatable
declare void @use(double)

; sqrt(a) * sqrt(b) no math flags

define double @sqrt_a_sqrt_b(double %a, double %b) {
; CHECK-LABEL: @sqrt_a_sqrt_b(
; CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.sqrt.f64(double [[A:%.*]])
; CHECK-NEXT:    [[TMP2:%.*]] = call double @llvm.sqrt.f64(double [[B:%.*]])
; CHECK-NEXT:    [[MUL:%.*]] = fmul double [[TMP1]], [[TMP2]]
; CHECK-NEXT:    ret double [[MUL]]
;
  %1 = call double @llvm.sqrt.f64(double %a)
  %2 = call double @llvm.sqrt.f64(double %b)
  %mul = fmul double %1, %2
  ret double %mul
}

; sqrt(a) * sqrt(b) fast-math, multiple uses

define double @sqrt_a_sqrt_b_multiple_uses(double %a, double %b) {
; CHECK-LABEL: @sqrt_a_sqrt_b_multiple_uses(
; CHECK-NEXT:    [[TMP1:%.*]] = call fast double @llvm.sqrt.f64(double [[A:%.*]])
; CHECK-NEXT:    [[TMP2:%.*]] = call fast double @llvm.sqrt.f64(double [[B:%.*]])
; CHECK-NEXT:    [[MUL:%.*]] = fmul fast double [[TMP1]], [[TMP2]]
; CHECK-NEXT:    call void @use(double [[TMP2]])
; CHECK-NEXT:    ret double [[MUL]]
;
  %1 = call fast double @llvm.sqrt.f64(double %a)
  %2 = call fast double @llvm.sqrt.f64(double %b)
  %mul = fmul fast double %1, %2
  call void @use(double %2)
  ret double %mul
}

; sqrt(a) * sqrt(b) => sqrt(a*b) with fast-math

define double @sqrt_a_sqrt_b_reassoc_nnan(double %a, double %b) {
; CHECK-LABEL: @sqrt_a_sqrt_b_reassoc_nnan(
; CHECK-NEXT:    [[TMP1:%.*]] = fmul reassoc nnan double [[A:%.*]], [[B:%.*]]
; CHECK-NEXT:    [[TMP2:%.*]] = call reassoc nnan double @llvm.sqrt.f64(double [[TMP1]])
; CHECK-NEXT:    ret double [[TMP2]]
;
  %1 = call double @llvm.sqrt.f64(double %a)
  %2 = call double @llvm.sqrt.f64(double %b)
  %mul = fmul reassoc nnan double %1, %2
  ret double %mul
}

; nnan disallows the possibility that both operands are negative,
; so we won't return a number when the answer should be NaN.

define double @sqrt_a_sqrt_b_reassoc(double %a, double %b) {
; CHECK-LABEL: @sqrt_a_sqrt_b_reassoc(
; CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.sqrt.f64(double [[A:%.*]])
; CHECK-NEXT:    [[TMP2:%.*]] = call double @llvm.sqrt.f64(double [[B:%.*]])
; CHECK-NEXT:    [[MUL:%.*]] = fmul reassoc double [[TMP1]], [[TMP2]]
; CHECK-NEXT:    ret double [[MUL]]
;
  %1 = call double @llvm.sqrt.f64(double %a)
  %2 = call double @llvm.sqrt.f64(double %b)
  %mul = fmul reassoc double %1, %2
  ret double %mul
}

; sqrt(a) * sqrt(b) * sqrt(c) * sqrt(d) => sqrt(a*b*c*d) with fast-math
; 'reassoc nnan' on the fmuls is all that is required, but check propagation of other FMF.

define double @sqrt_a_sqrt_b_sqrt_c_sqrt_d_reassoc(double %a, double %b, double %c, double %d) {
; CHECK-LABEL: @sqrt_a_sqrt_b_sqrt_c_sqrt_d_reassoc(
; CHECK-NEXT:    [[TMP1:%.*]] = fmul reassoc nnan arcp double [[A:%.*]], [[B:%.*]]
; CHECK-NEXT:    [[TMP2:%.*]] = fmul reassoc nnan double [[TMP1]], [[C:%.*]]
; CHECK-NEXT:    [[TMP3:%.*]] = fmul reassoc nnan ninf double [[TMP2]], [[D:%.*]]
; CHECK-NEXT:    [[TMP4:%.*]] = call reassoc nnan ninf double @llvm.sqrt.f64(double [[TMP3]])
; CHECK-NEXT:    ret double [[TMP4]]
;
  %1 = call double @llvm.sqrt.f64(double %a)
  %2 = call double @llvm.sqrt.f64(double %b)
  %3 = call double @llvm.sqrt.f64(double %c)
  %4 = call double @llvm.sqrt.f64(double %d)
  %mul = fmul reassoc nnan arcp double %1, %2
  %mul1 = fmul reassoc nnan double %mul, %3
  %mul2 = fmul reassoc nnan ninf double %mul1, %4
  ret double %mul2
}

OpenPOWER on IntegriCloud