diff options
author | Paul Robinson <paul_robinson@playstation.sony.com> | 2014-11-03 18:19:26 +0000 |
---|---|---|
committer | Paul Robinson <paul_robinson@playstation.sony.com> | 2014-11-03 18:19:26 +0000 |
commit | ad06e430ce0f63ba197f78b58a67267d993e574f (patch) | |
tree | e2eac6d81ced48405cd68607f81f07ccc883bf2b /llvm/test/Transforms/FunctionAttrs/optnone-simple.ll | |
parent | 3d5a02f677262841d951257b928e49d18c460410 (diff) | |
download | bcm5719-llvm-ad06e430ce0f63ba197f78b58a67267d993e574f.tar.gz bcm5719-llvm-ad06e430ce0f63ba197f78b58a67267d993e574f.zip |
Normally an 'optnone' function goes through fast-isel, which does not
call DAGCombiner. But we ran into a case (on Windows) where the
calling convention causes argument lowering to bail out of fast-isel,
and we end up in CodeGenAndEmitDAG() which does run DAGCombiner.
So, we need to make DAGCombiner check for 'optnone' after all.
Commit includes the test that found this, plus another one that got
missed in the original optnone work.
llvm-svn: 221168
Diffstat (limited to 'llvm/test/Transforms/FunctionAttrs/optnone-simple.ll')
-rw-r--r-- | llvm/test/Transforms/FunctionAttrs/optnone-simple.ll | 135 |
1 files changed, 135 insertions, 0 deletions
diff --git a/llvm/test/Transforms/FunctionAttrs/optnone-simple.ll b/llvm/test/Transforms/FunctionAttrs/optnone-simple.ll new file mode 100644 index 00000000000..9d0f8e3710a --- /dev/null +++ b/llvm/test/Transforms/FunctionAttrs/optnone-simple.ll @@ -0,0 +1,135 @@ +; RUN: opt -O3 -S < %s | FileCheck %s +; Show 'optnone' suppresses optimizations. + +; Two attribute groups that differ only by 'optnone'. +; 'optnone' requires 'noinline' so #0 is 'noinline' by itself, +; even though it would otherwise be irrelevant to this example. +attributes #0 = { noinline } +attributes #1 = { noinline optnone } + +; int iadd(int a, int b){ return a + b; } + +define i32 @iadd_optimize(i32 %a, i32 %b) #0 { +entry: + %a.addr = alloca i32, align 4 + %b.addr = alloca i32, align 4 + store i32 %a, i32* %a.addr, align 4 + store i32 %b, i32* %b.addr, align 4 + %0 = load i32* %a.addr, align 4 + %1 = load i32* %b.addr, align 4 + %add = add nsw i32 %0, %1 + ret i32 %add +} + +; CHECK-LABEL: @iadd_optimize +; CHECK-NOT: alloca +; CHECK-NOT: store +; CHECK-NOT: load +; CHECK: ret + +define i32 @iadd_optnone(i32 %a, i32 %b) #1 { +entry: + %a.addr = alloca i32, align 4 + %b.addr = alloca i32, align 4 + store i32 %a, i32* %a.addr, align 4 + store i32 %b, i32* %b.addr, align 4 + %0 = load i32* %a.addr, align 4 + %1 = load i32* %b.addr, align 4 + %add = add nsw i32 %0, %1 + ret i32 %add +} + +; CHECK-LABEL: @iadd_optnone +; CHECK: alloca i32 +; CHECK: alloca i32 +; CHECK: store i32 +; CHECK: store i32 +; CHECK: load i32 +; CHECK: load i32 +; CHECK: add nsw i32 +; CHECK: ret i32 + +; float fsub(float a, float b){ return a - b; } + +define float @fsub_optimize(float %a, float %b) #0 { +entry: + %a.addr = alloca float, align 4 + %b.addr = alloca float, align 4 + store float %a, float* %a.addr, align 4 + store float %b, float* %b.addr, align 4 + %0 = load float* %a.addr, align 4 + %1 = load float* %b.addr, align 4 + %sub = fsub float %0, %1 + ret float %sub +} + +; CHECK-LABEL: @fsub_optimize +; CHECK-NOT: alloca +; CHECK-NOT: store +; CHECK-NOT: load +; CHECK: ret + +define float @fsub_optnone(float %a, float %b) #1 { +entry: + %a.addr = alloca float, align 4 + %b.addr = alloca float, align 4 + store float %a, float* %a.addr, align 4 + store float %b, float* %b.addr, align 4 + %0 = load float* %a.addr, align 4 + %1 = load float* %b.addr, align 4 + %sub = fsub float %0, %1 + ret float %sub +} + +; CHECK-LABEL: @fsub_optnone +; CHECK: alloca float +; CHECK: alloca float +; CHECK: store float +; CHECK: store float +; CHECK: load float +; CHECK: load float +; CHECK: fsub float +; CHECK: ret float + +; typedef float __attribute__((ext_vector_type(4))) float4; +; float4 vmul(float4 a, float4 b){ return a * b; } + +define <4 x float> @vmul_optimize(<4 x float> %a, <4 x float> %b) #0 { +entry: + %a.addr = alloca <4 x float>, align 16 + %b.addr = alloca <4 x float>, align 16 + store <4 x float> %a, <4 x float>* %a.addr, align 16 + store <4 x float> %b, <4 x float>* %b.addr, align 16 + %0 = load <4 x float>* %a.addr, align 16 + %1 = load <4 x float>* %b.addr, align 16 + %mul = fmul <4 x float> %0, %1 + ret <4 x float> %mul +} + +; CHECK-LABEL: @vmul_optimize +; CHECK-NOT: alloca +; CHECK-NOT: store +; CHECK-NOT: load +; CHECK: ret + +define <4 x float> @vmul_optnone(<4 x float> %a, <4 x float> %b) #1 { +entry: + %a.addr = alloca <4 x float>, align 16 + %b.addr = alloca <4 x float>, align 16 + store <4 x float> %a, <4 x float>* %a.addr, align 16 + store <4 x float> %b, <4 x float>* %b.addr, align 16 + %0 = load <4 x float>* %a.addr, align 16 + %1 = load <4 x float>* %b.addr, align 16 + %mul = fmul <4 x float> %0, %1 + ret <4 x float> %mul +} + +; CHECK-LABEL: @vmul_optnone +; CHECK: alloca <4 x float> +; CHECK: alloca <4 x float> +; CHECK: store <4 x float> +; CHECK: store <4 x float> +; CHECK: load <4 x float> +; CHECK: load <4 x float> +; CHECK: fmul <4 x float> +; CHECK: ret |