diff options
Diffstat (limited to 'llvm/test/CodeGen/SystemZ/fp-conv-02.ll')
| -rw-r--r-- | llvm/test/CodeGen/SystemZ/fp-conv-02.ll | 81 |
1 files changed, 81 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/SystemZ/fp-conv-02.ll b/llvm/test/CodeGen/SystemZ/fp-conv-02.ll index f284e1dc2ae..eb405da2dae 100644 --- a/llvm/test/CodeGen/SystemZ/fp-conv-02.ll +++ b/llvm/test/CodeGen/SystemZ/fp-conv-02.ll @@ -69,3 +69,84 @@ define double @f6(float *%base, i64 %index) { %res = fpext float %val to double ret double %res } + +; Test a case where we spill the source of at least one LDEBR. We want +; to use LDEB if possible. +define void @f7(double *%ptr1, float *%ptr2) { +; CHECK: f7: +; CHECK: ldeb {{%f[0-9]+}}, 16{{[04]}}(%r15) +; CHECK: br %r14 + %val0 = load volatile float *%ptr2 + %val1 = load volatile float *%ptr2 + %val2 = load volatile float *%ptr2 + %val3 = load volatile float *%ptr2 + %val4 = load volatile float *%ptr2 + %val5 = load volatile float *%ptr2 + %val6 = load volatile float *%ptr2 + %val7 = load volatile float *%ptr2 + %val8 = load volatile float *%ptr2 + %val9 = load volatile float *%ptr2 + %val10 = load volatile float *%ptr2 + %val11 = load volatile float *%ptr2 + %val12 = load volatile float *%ptr2 + %val13 = load volatile float *%ptr2 + %val14 = load volatile float *%ptr2 + %val15 = load volatile float *%ptr2 + %val16 = load volatile float *%ptr2 + + %ext0 = fpext float %val0 to double + %ext1 = fpext float %val1 to double + %ext2 = fpext float %val2 to double + %ext3 = fpext float %val3 to double + %ext4 = fpext float %val4 to double + %ext5 = fpext float %val5 to double + %ext6 = fpext float %val6 to double + %ext7 = fpext float %val7 to double + %ext8 = fpext float %val8 to double + %ext9 = fpext float %val9 to double + %ext10 = fpext float %val10 to double + %ext11 = fpext float %val11 to double + %ext12 = fpext float %val12 to double + %ext13 = fpext float %val13 to double + %ext14 = fpext float %val14 to double + %ext15 = fpext float %val15 to double + %ext16 = fpext float %val16 to double + + store volatile float %val0, float *%ptr2 + store volatile float %val1, float *%ptr2 + store volatile float %val2, float *%ptr2 + store volatile float %val3, float *%ptr2 + store volatile float %val4, float *%ptr2 + store volatile float %val5, float *%ptr2 + store volatile float %val6, float *%ptr2 + store volatile float %val7, float *%ptr2 + store volatile float %val8, float *%ptr2 + store volatile float %val9, float *%ptr2 + store volatile float %val10, float *%ptr2 + store volatile float %val11, float *%ptr2 + store volatile float %val12, float *%ptr2 + store volatile float %val13, float *%ptr2 + store volatile float %val14, float *%ptr2 + store volatile float %val15, float *%ptr2 + store volatile float %val16, float *%ptr2 + + store volatile double %ext0, double *%ptr1 + store volatile double %ext1, double *%ptr1 + store volatile double %ext2, double *%ptr1 + store volatile double %ext3, double *%ptr1 + store volatile double %ext4, double *%ptr1 + store volatile double %ext5, double *%ptr1 + store volatile double %ext6, double *%ptr1 + store volatile double %ext7, double *%ptr1 + store volatile double %ext8, double *%ptr1 + store volatile double %ext9, double *%ptr1 + store volatile double %ext10, double *%ptr1 + store volatile double %ext11, double *%ptr1 + store volatile double %ext12, double *%ptr1 + store volatile double %ext13, double *%ptr1 + store volatile double %ext14, double *%ptr1 + store volatile double %ext15, double *%ptr1 + store volatile double %ext16, double *%ptr1 + + ret void +} |

