diff options
| author | Gor Nishanov <GorNishanov@gmail.com> | 2017-05-16 14:11:39 +0000 |
|---|---|---|
| committer | Gor Nishanov <GorNishanov@gmail.com> | 2017-05-16 14:11:39 +0000 |
| commit | 23453c11ff50632364fd617c9c9f2a1693621b8d (patch) | |
| tree | 6162034164c3f2fba426a1d50c2d6c088d225a06 /llvm/test/Transforms/Coroutines | |
| parent | 41e656768d2b6c7f3d27119d862b7eaf9055f3c7 (diff) | |
| download | bcm5719-llvm-23453c11ff50632364fd617c9c9f2a1693621b8d.tar.gz bcm5719-llvm-23453c11ff50632364fd617c9c9f2a1693621b8d.zip | |
[coroutines] Handle unwind edge splitting
Summary:
RewritePHIs algorithm used in building of CoroFrame inserts a placeholder
```
%placeholder = phi [%val]
```
on every edge leading to a block starting with PHI node with multiple incoming edges,
so that if one of the incoming values was spilled and need to be reloaded, we have a
place to insert a reload. We use SplitEdge helper function to split the incoming edge.
SplitEdge function does not deal with unwind edges comping into a block with an EHPad.
This patch adds an ehAwareSplitEdge function that can correctly split the unwind edge.
For landing pads, we clone the landing pad into every edge block and replace the original
landing pad with a PHI collection the values from all incoming landing pads.
For WinEH pads, we keep the original EHPad in place and insert cleanuppad/cleapret in the
edge blocks.
Reviewers: majnemer, rnk
Reviewed By: majnemer
Subscribers: EricWF, llvm-commits
Differential Revision: https://reviews.llvm.org/D31845
llvm-svn: 303172
Diffstat (limited to 'llvm/test/Transforms/Coroutines')
| -rw-r--r-- | llvm/test/Transforms/Coroutines/coro-eh-aware-edge-split.ll | 218 |
1 files changed, 218 insertions, 0 deletions
diff --git a/llvm/test/Transforms/Coroutines/coro-eh-aware-edge-split.ll b/llvm/test/Transforms/Coroutines/coro-eh-aware-edge-split.ll new file mode 100644 index 00000000000..5da0e3c199d --- /dev/null +++ b/llvm/test/Transforms/Coroutines/coro-eh-aware-edge-split.ll @@ -0,0 +1,218 @@ +; Check that we can handle edge splits leading into a landingpad +; RUN: opt < %s -coro-split -S | FileCheck %s + +target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-unknown-linux-gnu" + +; CHECK-LABEL: define internal fastcc void @f.resume( +define void @f(i1 %cond) "coroutine.presplit"="1" personality i32 0 { +entry: + %id = call token @llvm.coro.id(i32 16, i8* null, i8* null, i8* null) + %size = tail call i64 @llvm.coro.size.i64() + %alloc = call i8* @malloc(i64 %size) + %hdl = call i8* @llvm.coro.begin(token %id, i8* %alloc) + %sp = call i8 @llvm.coro.suspend(token none, i1 false) + switch i8 %sp, label %coro.ret [ + i8 0, label %resume + i8 1, label %cleanup + ] + +resume: + br i1 %cond, label %invoke1, label %invoke2 + +invoke1: + invoke void @may_throw1() + to label %unreach unwind label %pad.with.phi +invoke2: + invoke void @may_throw2() + to label %unreach unwind label %pad.with.phi + +; Verify that we cloned landing pad on every edge and inserted a reload of the spilled value + +; CHECK: pad.with.phi.from.invoke2: +; CHECK: %0 = landingpad { i8*, i32 } +; CHECK: catch i8* null +; CHECK: br label %pad.with.phi + +; CHECK: pad.with.phi.from.invoke1: +; CHECK: %1 = landingpad { i8*, i32 } +; CHECK: catch i8* null +; CHECK: br label %pad.with.phi + +; CHECK: pad.with.phi: +; CHECK: %val = phi i32 [ 0, %pad.with.phi.from.invoke1 ], [ 1, %pad.with.phi.from.invoke2 ] +; CHECK: %lp = phi { i8*, i32 } [ %0, %pad.with.phi.from.invoke2 ], [ %1, %pad.with.phi.from.invoke1 ] +; CHECK: %exn = extractvalue { i8*, i32 } %lp, 0 +; CHECK: call i8* @__cxa_begin_catch(i8* %exn) +; CHECK: call void @use_val(i32 %val) +; CHECK: call void @__cxa_end_catch() +; CHECK: call void @free(i8* %vFrame) +; CHECK: ret void + +pad.with.phi: + %val = phi i32 [ 0, %invoke1 ], [ 1, %invoke2 ] + %lp = landingpad { i8*, i32 } + catch i8* null + %exn = extractvalue { i8*, i32 } %lp, 0 + call i8* @__cxa_begin_catch(i8* %exn) + call void @use_val(i32 %val) + call void @__cxa_end_catch() + br label %cleanup + +cleanup: ; preds = %invoke.cont15, %if.else, %if.then, %ehcleanup21, %init.suspend + %mem = call i8* @llvm.coro.free(token %id, i8* %hdl) + call void @free(i8* %mem) + br label %coro.ret + +coro.ret: + call i1 @llvm.coro.end(i8* null, i1 false) + ret void + +unreach: + unreachable +} + +; CHECK-LABEL: define internal fastcc void @g.resume( +define void @g(i1 %cond, i32 %x, i32 %y) "coroutine.presplit"="1" personality i32 0 { +entry: + %id = call token @llvm.coro.id(i32 16, i8* null, i8* null, i8* null) + %size = tail call i64 @llvm.coro.size.i64() + %alloc = call i8* @malloc(i64 %size) + %hdl = call i8* @llvm.coro.begin(token %id, i8* %alloc) + %sp = call i8 @llvm.coro.suspend(token none, i1 false) + switch i8 %sp, label %coro.ret [ + i8 0, label %resume + i8 1, label %cleanup + ] + +resume: + br i1 %cond, label %invoke1, label %invoke2 + +invoke1: + invoke void @may_throw1() + to label %unreach unwind label %pad.with.phi +invoke2: + invoke void @may_throw2() + to label %unreach unwind label %pad.with.phi + +; Verify that we created cleanuppads on every edge and inserted a reload of the spilled value + +; CHECK: pad.with.phi.from.invoke2: +; CHECK: %0 = cleanuppad within none [] +; CHECK: %y.reload.addr = getelementptr inbounds %g.Frame, %g.Frame* %FramePtr, i32 0, i32 6 +; CHECK: %y.reload = load i32, i32* %y.reload.addr +; CHECK: cleanupret from %0 unwind label %pad.with.phi + +; CHECK: pad.with.phi.from.invoke1: +; CHECK: %1 = cleanuppad within none [] +; CHECK: %x.reload.addr = getelementptr inbounds %g.Frame, %g.Frame* %FramePtr, i32 0, i32 5 +; CHECK: %x.reload = load i32, i32* %x.reload.addr +; CHECK: cleanupret from %1 unwind label %pad.with.phi + +; CHECK: pad.with.phi: +; CHECK: %val = phi i32 [ %x.reload, %pad.with.phi.from.invoke1 ], [ %y.reload, %pad.with.phi.from.invoke2 ] +; CHECK: %tok = cleanuppad within none [] +; CHECK: call void @use_val(i32 %val) +; CHECK: cleanupret from %tok unwind to caller + +pad.with.phi: + %val = phi i32 [ %x, %invoke1 ], [ %y, %invoke2 ] + %tok = cleanuppad within none [] + call void @use_val(i32 %val) + cleanupret from %tok unwind to caller + +cleanup: ; preds = %invoke.cont15, %if.else, %if.then, %ehcleanup21, %init.suspend + %mem = call i8* @llvm.coro.free(token %id, i8* %hdl) + call void @free(i8* %mem) + br label %coro.ret + +coro.ret: + call i1 @llvm.coro.end(i8* null, i1 false) + ret void + +unreach: + unreachable +} + +; CHECK-LABEL: define internal fastcc void @h.resume( +define void @h(i1 %cond, i32 %x, i32 %y) "coroutine.presplit"="1" personality i32 0 { +entry: + %id = call token @llvm.coro.id(i32 16, i8* null, i8* null, i8* null) + %size = tail call i64 @llvm.coro.size.i64() + %alloc = call i8* @malloc(i64 %size) + %hdl = call i8* @llvm.coro.begin(token %id, i8* %alloc) + %sp = call i8 @llvm.coro.suspend(token none, i1 false) + switch i8 %sp, label %coro.ret [ + i8 0, label %resume + i8 1, label %cleanup + ] + +resume: + br i1 %cond, label %invoke1, label %invoke2 + +invoke1: + invoke void @may_throw1() + to label %coro.ret unwind label %pad.with.phi +invoke2: + invoke void @may_throw2() + to label %coro.ret unwind label %pad.with.phi + +; Verify that we created cleanuppads on every edge and inserted a reload of the spilled value + +; CHECK: pad.with.phi.from.invoke2: +; CHECK: %0 = cleanuppad within none [] +; CHECK: %y.reload.addr = getelementptr inbounds %h.Frame, %h.Frame* %FramePtr, i32 0, i32 6 +; CHECK: %y.reload = load i32, i32* %y.reload.addr +; CHECK: cleanupret from %0 unwind label %pad.with.phi + +; CHECK: pad.with.phi.from.invoke1: +; CHECK: %1 = cleanuppad within none [] +; CHECK: %x.reload.addr = getelementptr inbounds %h.Frame, %h.Frame* %FramePtr, i32 0, i32 5 +; CHECK: %x.reload = load i32, i32* %x.reload.addr +; CHECK: cleanupret from %1 unwind label %pad.with.phi + +; CHECK: pad.with.phi: +; CHECK: %val = phi i32 [ %x.reload, %pad.with.phi.from.invoke1 ], [ %y.reload, %pad.with.phi.from.invoke2 ] +; CHECK: %switch = catchswitch within none [label %catch] unwind to caller +pad.with.phi: + %val = phi i32 [ %x, %invoke1 ], [ %y, %invoke2 ] + %switch = catchswitch within none [label %catch] unwind to caller + +catch: ; preds = %catch.dispatch + %pad = catchpad within %switch [i8* null, i32 64, i8* null] + call void @use_val(i32 %val) + catchret from %pad to label %coro.ret + +cleanup: ; preds = %invoke.cont15, %if.else, %if.then, %ehcleanup21, %init.suspend + %mem = call i8* @llvm.coro.free(token %id, i8* %hdl) + call void @free(i8* %mem) + br label %coro.ret + +coro.ret: + call i1 @llvm.coro.end(i8* null, i1 false) + ret void +} + +; Function Attrs: argmemonly nounwind readonly +declare token @llvm.coro.id(i32, i8* readnone, i8* nocapture readonly, i8*) +declare noalias i8* @malloc(i64) +declare i64 @llvm.coro.size.i64() +declare i8* @llvm.coro.begin(token, i8* writeonly) + +; Function Attrs: nounwind +declare token @llvm.coro.save(i8*) +declare i8 @llvm.coro.suspend(token, i1) + +; Function Attrs: argmemonly nounwind +declare void @may_throw1() +declare void @may_throw2() + +declare i8* @__cxa_begin_catch(i8*) + +declare void @use_val(i32) +declare void @__cxa_end_catch() + +; Function Attrs: nounwind +declare i1 @llvm.coro.end(i8*, i1) +declare void @free(i8*) +declare i8* @llvm.coro.free(token, i8* nocapture readonly) |

