From c99d0e9186ed592a722c6711cea1eda4da4aebe5 Mon Sep 17 00:00:00 2001 From: Benjamin Kramer Date: Tue, 7 Aug 2012 11:13:19 +0000 Subject: PR13095: Give an inline cost bonus to functions using byval arguments. We give a bonus for every argument because the argument setup is not needed anymore when the function is inlined. With this patch we interpret byval arguments as a compact representation of many arguments. The byval argument setup is implemented in the backend as an inline memcpy, so to model the cost as accurately as possible we take the number of pointer-sized elements in the byval argument and give a bonus of 2 instructions for every one of those. The bonus is capped at 8 elements, which is the number of stores at which the x86 backend switches from an expanded inline memcpy to a real memcpy. It would be better to use the real memcpy threshold from the backend, but it's not available via TargetData. This change brings the performance of c-ray in line with gcc 4.7. The included test case tries to reproduce the c-ray problem to catch regressions for this benchmark early, its performance is dominated by the inline decision of a specific call. This only has a small impact on most code, more on x86 and arm than on x86_64 due to the way the ABI works. When building LLVM for x86 it gives a small inline cost boost to virtually any function using StringRef or STL allocators, but only a 0.01% increase in overall binary size. The size of gcc compiled by clang actually shrunk by a couple bytes with this patch applied, but not significantly. llvm-svn: 161413 --- llvm/test/Transforms/Inline/inline-byval-bonus.ll | 193 ++++++++++++++++++++++ 1 file changed, 193 insertions(+) create mode 100644 llvm/test/Transforms/Inline/inline-byval-bonus.ll (limited to 'llvm/test') diff --git a/llvm/test/Transforms/Inline/inline-byval-bonus.ll b/llvm/test/Transforms/Inline/inline-byval-bonus.ll new file mode 100644 index 00000000000..f3ed819a7f3 --- /dev/null +++ b/llvm/test/Transforms/Inline/inline-byval-bonus.ll @@ -0,0 +1,193 @@ +; RUN: opt -S -inline -inline-threshold=275 < %s | FileCheck %s +; PR13095 + +; The performance of the c-ray benchmark largely depends on the inlining of a +; specific call to @ray_sphere. This test case is designed to verify that it's +; inlined at -O3. + +target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" +target triple = "x86_64-apple-macosx10.8.0" + +%struct.sphere = type { %struct.vec3, double, %struct.material, %struct.sphere* } +%struct.vec3 = type { double, double, double } +%struct.material = type { %struct.vec3, double, double } +%struct.ray = type { %struct.vec3, %struct.vec3 } +%struct.spoint = type { %struct.vec3, %struct.vec3, %struct.vec3, double } + +define i32 @caller(%struct.sphere* %i) { + %shadow_ray = alloca %struct.ray, align 8 + call void @fix(%struct.ray* %shadow_ray) + + %call = call i32 @ray_sphere(%struct.sphere* %i, %struct.ray* byval align 8 %shadow_ray, %struct.spoint* null) + ret i32 %call + +; CHECK: @caller +; CHECK-NOT: call i32 @ray_sphere +; CHECK: ret i32 +} + +declare void @fix(%struct.ray*) + +define i32 @ray_sphere(%struct.sphere* nocapture %sph, %struct.ray* nocapture byval align 8 %ray, %struct.spoint* %sp) nounwind uwtable ssp { + %1 = getelementptr inbounds %struct.ray* %ray, i64 0, i32 1, i32 0 + %2 = load double* %1, align 8 + %3 = fmul double %2, %2 + %4 = getelementptr inbounds %struct.ray* %ray, i64 0, i32 1, i32 1 + %5 = load double* %4, align 8 + %6 = fmul double %5, %5 + %7 = fadd double %3, %6 + %8 = getelementptr inbounds %struct.ray* %ray, i64 0, i32 1, i32 2 + %9 = load double* %8, align 8 + %10 = fmul double %9, %9 + %11 = fadd double %7, %10 + %12 = fmul double %2, 2.000000e+00 + %13 = getelementptr inbounds %struct.ray* %ray, i64 0, i32 0, i32 0 + %14 = load double* %13, align 8 + %15 = getelementptr inbounds %struct.sphere* %sph, i64 0, i32 0, i32 0 + %16 = load double* %15, align 8 + %17 = fsub double %14, %16 + %18 = fmul double %12, %17 + %19 = fmul double %5, 2.000000e+00 + %20 = getelementptr inbounds %struct.ray* %ray, i64 0, i32 0, i32 1 + %21 = load double* %20, align 8 + %22 = getelementptr inbounds %struct.sphere* %sph, i64 0, i32 0, i32 1 + %23 = load double* %22, align 8 + %24 = fsub double %21, %23 + %25 = fmul double %19, %24 + %26 = fadd double %18, %25 + %27 = fmul double %9, 2.000000e+00 + %28 = getelementptr inbounds %struct.ray* %ray, i64 0, i32 0, i32 2 + %29 = load double* %28, align 8 + %30 = getelementptr inbounds %struct.sphere* %sph, i64 0, i32 0, i32 2 + %31 = load double* %30, align 8 + %32 = fsub double %29, %31 + %33 = fmul double %27, %32 + %34 = fadd double %26, %33 + %35 = fmul double %16, %16 + %36 = fmul double %23, %23 + %37 = fadd double %35, %36 + %38 = fmul double %31, %31 + %39 = fadd double %37, %38 + %40 = fmul double %14, %14 + %41 = fadd double %40, %39 + %42 = fmul double %21, %21 + %43 = fadd double %42, %41 + %44 = fmul double %29, %29 + %45 = fadd double %44, %43 + %46 = fsub double -0.000000e+00, %16 + %47 = fmul double %14, %46 + %48 = fmul double %21, %23 + %49 = fsub double %47, %48 + %50 = fmul double %29, %31 + %51 = fsub double %49, %50 + %52 = fmul double %51, 2.000000e+00 + %53 = fadd double %52, %45 + %54 = getelementptr inbounds %struct.sphere* %sph, i64 0, i32 1 + %55 = load double* %54, align 8 + %56 = fmul double %55, %55 + %57 = fsub double %53, %56 + %58 = fmul double %34, %34 + %59 = fmul double %11, 4.000000e+00 + %60 = fmul double %59, %57 + %61 = fsub double %58, %60 + %62 = fcmp olt double %61, 0.000000e+00 + br i1 %62, label %130, label %63 + +;