summaryrefslogtreecommitdiffstats
path: root/drivers/lguest
diff options
context:
space:
mode:
authorRusty Russell <rusty@rustcorp.com.au>2007-10-22 11:03:30 +1000
committerRusty Russell <rusty@rustcorp.com.au>2007-10-23 15:49:52 +1000
commitcc6d4fbcef328acdc9fa7023e69f39f753f72fe1 (patch)
tree860672e7da1a3516e36dd40f962552451ef0bcf2 /drivers/lguest
parent4614a3a3b638dfd7a67d0237944f6a76331af61d (diff)
downloadblackbird-op-linux-cc6d4fbcef328acdc9fa7023e69f39f753f72fe1.tar.gz
blackbird-op-linux-cc6d4fbcef328acdc9fa7023e69f39f753f72fe1.zip
Introduce "hcall" pointer to indicate pending hypercall.
Currently we look at the "trapnum" to see if the Guest wants a hypercall. But once the hypercall is done we have to reset trapnum to a bogus value, otherwise if we exit to userspace and return, we'd run the same hypercall twice (that was a nasty bug to find!). This has two main effects: 1) When Jes's patch changes the hypercall args to be a generic "struct hcall_args" we simply change the type of "lg->hcall". It's set by arch code, so if it has to copy args or something it can do so, and point "hcall" into lg->arch somewhere. 2) Async hypercalls only get run when an actual hypercall is pending. This simplfies the code a little and is a more logical semantic. Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Diffstat (limited to 'drivers/lguest')
-rw-r--r--drivers/lguest/core.c8
-rw-r--r--drivers/lguest/hypercalls.c48
-rw-r--r--drivers/lguest/lg.h3
-rw-r--r--drivers/lguest/x86/core.c13
4 files changed, 34 insertions, 38 deletions
diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
index 06869a2d3b40..02556bae9e9f 100644
--- a/drivers/lguest/core.c
+++ b/drivers/lguest/core.c
@@ -198,10 +198,10 @@ int run_guest(struct lguest *lg, unsigned long __user *user)
{
/* We stop running once the Guest is dead. */
while (!lg->dead) {
- /* First we run any hypercalls the Guest wants done: either in
- * the hypercall ring in "struct lguest_data", or directly by
- * using int 31 (LGUEST_TRAP_ENTRY). */
- do_hypercalls(lg);
+ /* First we run any hypercalls the Guest wants done. */
+ if (lg->hcall)
+ do_hypercalls(lg);
+
/* It's possible the Guest did a SEND_DMA hypercall to the
* Launcher, in which case we return from the read() now. */
if (lg->dma_is_pending) {
diff --git a/drivers/lguest/hypercalls.c b/drivers/lguest/hypercalls.c
index 8bde20934f91..0175a9f03347 100644
--- a/drivers/lguest/hypercalls.c
+++ b/drivers/lguest/hypercalls.c
@@ -241,19 +241,6 @@ static void initialize(struct lguest *lg)
* is one other way we can do things for the Guest, as we see in
* emulate_insn(). */
-/*H:110 Tricky point: we mark the hypercall as "done" once we've done it.
- * Normally we don't need to do this: the Guest will run again and update the
- * trap number before we come back around the run_guest() loop to
- * do_hypercalls().
- *
- * However, if we are signalled or the Guest sends DMA to the Launcher, that
- * loop will exit without running the Guest. When it comes back it would try
- * to re-run the hypercall. */
-static void clear_hcall(struct lguest *lg)
-{
- lg->regs->trapnum = 255;
-}
-
/*H:100
* Hypercalls
*
@@ -262,16 +249,12 @@ static void clear_hcall(struct lguest *lg)
*/
void do_hypercalls(struct lguest *lg)
{
- /* Not initialized yet? */
+ /* Not initialized yet? This hypercall must do it. */
if (unlikely(!lg->lguest_data)) {
- /* Did the Guest make a hypercall? We might have come back for
- * some other reason (an interrupt, a different trap). */
- if (lg->regs->trapnum == LGUEST_TRAP_ENTRY) {
- /* Set up the "struct lguest_data" */
- initialize(lg);
- /* The hypercall is done. */
- clear_hcall(lg);
- }
+ /* Set up the "struct lguest_data" */
+ initialize(lg);
+ /* Hcall is done. */
+ lg->hcall = NULL;
return;
}
@@ -281,12 +264,21 @@ void do_hypercalls(struct lguest *lg)
do_async_hcalls(lg);
/* If we stopped reading the hypercall ring because the Guest did a
- * SEND_DMA to the Launcher, we want to return now. Otherwise if the
- * Guest asked us to do a hypercall, we do it. */
- if (!lg->dma_is_pending && lg->regs->trapnum == LGUEST_TRAP_ENTRY) {
- do_hcall(lg, lg->regs);
- /* The hypercall is done. */
- clear_hcall(lg);
+ * SEND_DMA to the Launcher, we want to return now. Otherwise we do
+ * the hypercall. */
+ if (!lg->dma_is_pending) {
+ do_hcall(lg, lg->hcall);
+ /* Tricky point: we reset the hcall pointer to mark the
+ * hypercall as "done". We use the hcall pointer rather than
+ * the trap number to indicate a hypercall is pending.
+ * Normally it doesn't matter: the Guest will run again and
+ * update the trap number before we come back here.
+ *
+ * However, if we are signalled or the Guest sends DMA to the
+ * Launcher, the run_guest() loop will exit without running the
+ * Guest. When it comes back it would try to re-run the
+ * hypercall. */
+ lg->hcall = NULL;
}
}
diff --git a/drivers/lguest/lg.h b/drivers/lguest/lg.h
index 203d3100c3b4..662994b776cc 100644
--- a/drivers/lguest/lg.h
+++ b/drivers/lguest/lg.h
@@ -106,6 +106,9 @@ struct lguest
u32 esp1;
u8 ss1;
+ /* If a hypercall was asked for, this points to the arguments. */
+ struct lguest_regs *hcall;
+
/* Do we need to stop what we're doing and return to userspace? */
int break_out;
wait_queue_head_t break_wq;
diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
index e2f46b16ce31..0cc251cbc72a 100644
--- a/drivers/lguest/x86/core.c
+++ b/drivers/lguest/x86/core.c
@@ -316,13 +316,14 @@ void lguest_arch_handle_trap(struct lguest *lg)
return;
break;
case 32 ... 255:
- /* These values mean a real interrupt occurred, in
- * which case the Host handler has already been run.
- * We just do a friendly check if another process
- * should now be run, then fall through to loop
- * around: */
+ /* These values mean a real interrupt occurred, in which case
+ * the Host handler has already been run. We just do a
+ * friendly check if another process should now be run, then
+ * return to run the Guest again */
cond_resched();
- case LGUEST_TRAP_ENTRY: /* Handled before re-entering Guest */
+ return;
+ case LGUEST_TRAP_ENTRY:
+ lg->hcall = lg->regs;
return;
}
OpenPOWER on IntegriCloud