From af585b921e5d1e919947c4b1164b59507fe7cd7b Mon Sep 17 00:00:00 2001 From: Gleb Natapov Date: Thu, 14 Oct 2010 11:22:46 +0200 Subject: KVM: Halt vcpu if page it tries to access is swapped out If a guest accesses swapped out memory do not swap it in from vcpu thread context. Schedule work to do swapping and put vcpu into halted state instead. Interrupts will still be delivered to the guest and if interrupt will cause reschedule guest will continue to run another task. [avi: remove call to get_user_pages_noio(), nacked by Linus; this makes everything synchrnous again] Acked-by: Rik van Riel Signed-off-by: Gleb Natapov Signed-off-by: Marcelo Tosatti --- include/trace/events/kvm.h | 90 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 90 insertions(+) (limited to 'include/trace') diff --git a/include/trace/events/kvm.h b/include/trace/events/kvm.h index 6dd3a51ab1cb..a78a5e574632 100644 --- a/include/trace/events/kvm.h +++ b/include/trace/events/kvm.h @@ -185,6 +185,96 @@ TRACE_EVENT(kvm_age_page, __entry->referenced ? "YOUNG" : "OLD") ); +#ifdef CONFIG_KVM_ASYNC_PF +TRACE_EVENT( + kvm_try_async_get_page, + TP_PROTO(bool async, u64 pfn), + TP_ARGS(async, pfn), + + TP_STRUCT__entry( + __field(__u64, pfn) + ), + + TP_fast_assign( + __entry->pfn = (!async) ? pfn : (u64)-1; + ), + + TP_printk("pfn %#llx", __entry->pfn) +); + +TRACE_EVENT( + kvm_async_pf_not_present, + TP_PROTO(u64 gva), + TP_ARGS(gva), + + TP_STRUCT__entry( + __field(__u64, gva) + ), + + TP_fast_assign( + __entry->gva = gva; + ), + + TP_printk("gva %#llx not present", __entry->gva) +); + +TRACE_EVENT( + kvm_async_pf_ready, + TP_PROTO(u64 gva), + TP_ARGS(gva), + + TP_STRUCT__entry( + __field(__u64, gva) + ), + + TP_fast_assign( + __entry->gva = gva; + ), + + TP_printk("gva %#llx ready", __entry->gva) +); + +TRACE_EVENT( + kvm_async_pf_completed, + TP_PROTO(unsigned long address, struct page *page, u64 gva), + TP_ARGS(address, page, gva), + + TP_STRUCT__entry( + __field(unsigned long, address) + __field(pfn_t, pfn) + __field(u64, gva) + ), + + TP_fast_assign( + __entry->address = address; + __entry->pfn = page ? page_to_pfn(page) : 0; + __entry->gva = gva; + ), + + TP_printk("gva %#llx address %#lx pfn %#llx", __entry->gva, + __entry->address, __entry->pfn) +); + +TRACE_EVENT( + kvm_async_pf_doublefault, + TP_PROTO(u64 gva, u64 gfn), + TP_ARGS(gva, gfn), + + TP_STRUCT__entry( + __field(u64, gva) + __field(u64, gfn) + ), + + TP_fast_assign( + __entry->gva = gva; + __entry->gfn = gfn; + ), + + TP_printk("gva = %#llx, gfn = %#llx", __entry->gva, __entry->gfn) +); + +#endif + #endif /* _TRACE_KVM_MAIN_H */ /* This part must be outside protection */ -- cgit v1.2.1 From 7c90705bf2a373aa238661bdb6446f27299ef489 Mon Sep 17 00:00:00 2001 From: Gleb Natapov Date: Thu, 14 Oct 2010 11:22:53 +0200 Subject: KVM: Inject asynchronous page fault into a PV guest if page is swapped out. Send async page fault to a PV guest if it accesses swapped out memory. Guest will choose another task to run upon receiving the fault. Allow async page fault injection only when guest is in user mode since otherwise guest may be in non-sleepable context and will not be able to reschedule. Vcpu will be halted if guest will fault on the same page again or if vcpu executes kernel code. Acked-by: Rik van Riel Signed-off-by: Gleb Natapov Signed-off-by: Marcelo Tosatti --- include/trace/events/kvm.h | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) (limited to 'include/trace') diff --git a/include/trace/events/kvm.h b/include/trace/events/kvm.h index a78a5e574632..9c2cc6a96e82 100644 --- a/include/trace/events/kvm.h +++ b/include/trace/events/kvm.h @@ -204,34 +204,39 @@ TRACE_EVENT( TRACE_EVENT( kvm_async_pf_not_present, - TP_PROTO(u64 gva), - TP_ARGS(gva), + TP_PROTO(u64 token, u64 gva), + TP_ARGS(token, gva), TP_STRUCT__entry( + __field(__u64, token) __field(__u64, gva) ), TP_fast_assign( + __entry->token = token; __entry->gva = gva; ), - TP_printk("gva %#llx not present", __entry->gva) + TP_printk("token %#llx gva %#llx not present", __entry->token, + __entry->gva) ); TRACE_EVENT( kvm_async_pf_ready, - TP_PROTO(u64 gva), - TP_ARGS(gva), + TP_PROTO(u64 token, u64 gva), + TP_ARGS(token, gva), TP_STRUCT__entry( + __field(__u64, token) __field(__u64, gva) ), TP_fast_assign( + __entry->token = token; __entry->gva = gva; ), - TP_printk("gva %#llx ready", __entry->gva) + TP_printk("token %#llx gva %#llx ready", __entry->token, __entry->gva) ); TRACE_EVENT( -- cgit v1.2.1 From 64be5007066173d11a4635eedd57d41a3b3a7027 Mon Sep 17 00:00:00 2001 From: Gleb Natapov Date: Sun, 24 Oct 2010 16:49:08 +0200 Subject: KVM: x86: trace "exit to userspace" event Add tracepoint for userspace exit. Signed-off-by: Gleb Natapov Signed-off-by: Marcelo Tosatti --- include/trace/events/kvm.h | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) (limited to 'include/trace') diff --git a/include/trace/events/kvm.h b/include/trace/events/kvm.h index 9c2cc6a96e82..c86f4e8e0bc9 100644 --- a/include/trace/events/kvm.h +++ b/include/trace/events/kvm.h @@ -6,6 +6,36 @@ #undef TRACE_SYSTEM #define TRACE_SYSTEM kvm +#define ERSN(x) { KVM_EXIT_##x, "KVM_EXIT_" #x } + +#define kvm_trace_exit_reason \ + ERSN(UNKNOWN), ERSN(EXCEPTION), ERSN(IO), ERSN(HYPERCALL), \ + ERSN(DEBUG), ERSN(HLT), ERSN(MMIO), ERSN(IRQ_WINDOW_OPEN), \ + ERSN(SHUTDOWN), ERSN(FAIL_ENTRY), ERSN(INTR), ERSN(SET_TPR), \ + ERSN(TPR_ACCESS), ERSN(S390_SIEIC), ERSN(S390_RESET), ERSN(DCR),\ + ERSN(NMI), ERSN(INTERNAL_ERROR), ERSN(OSI) + +TRACE_EVENT(kvm_userspace_exit, + TP_PROTO(__u32 reason, int errno), + TP_ARGS(reason, errno), + + TP_STRUCT__entry( + __field( __u32, reason ) + __field( int, errno ) + ), + + TP_fast_assign( + __entry->reason = reason; + __entry->errno = errno; + ), + + TP_printk("reason %s (%d)", + __entry->errno < 0 ? + (__entry->errno == -EINTR ? "restart" : "error") : + __print_symbolic(__entry->reason, kvm_trace_exit_reason), + __entry->errno < 0 ? -__entry->errno : __entry->reason) +); + #if defined(__KVM_HAVE_IOAPIC) TRACE_EVENT(kvm_set_irq, TP_PROTO(unsigned int gsi, int level, int irq_source_id), -- cgit v1.2.1 From c9b263d2be9c535b410f6617710534f798bf0ff0 Mon Sep 17 00:00:00 2001 From: Xiao Guangrong Date: Mon, 1 Nov 2010 16:58:43 +0800 Subject: KVM: fix tracing kvm_try_async_get_page Tracing 'async' and *pfn is useless, since 'async' is always true, and '*pfn' is always "fault_pfn' We can trace 'gva' and 'gfn' instead, it can help us to see the life-cycle of an async_pf Signed-off-by: Xiao Guangrong Signed-off-by: Marcelo Tosatti --- include/trace/events/kvm.h | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) (limited to 'include/trace') diff --git a/include/trace/events/kvm.h b/include/trace/events/kvm.h index c86f4e8e0bc9..d94d6c312ca1 100644 --- a/include/trace/events/kvm.h +++ b/include/trace/events/kvm.h @@ -218,18 +218,20 @@ TRACE_EVENT(kvm_age_page, #ifdef CONFIG_KVM_ASYNC_PF TRACE_EVENT( kvm_try_async_get_page, - TP_PROTO(bool async, u64 pfn), - TP_ARGS(async, pfn), + TP_PROTO(u64 gva, u64 gfn), + TP_ARGS(gva, gfn), TP_STRUCT__entry( - __field(__u64, pfn) + __field(u64, gva) + __field(u64, gfn) ), TP_fast_assign( - __entry->pfn = (!async) ? pfn : (u64)-1; + __entry->gva = gva; + __entry->gfn = gfn; ), - TP_printk("pfn %#llx", __entry->pfn) + TP_printk("gva = %#llx, gfn = %#llx", __entry->gva, __entry->gfn) ); TRACE_EVENT( -- cgit v1.2.1 From 0730388b97d20cc568c25b42b9a23b28959b481f Mon Sep 17 00:00:00 2001 From: Xiao Guangrong Date: Mon, 1 Nov 2010 16:59:39 +0800 Subject: KVM: cleanup async_pf tracepoints Use 'DECLARE_EVENT_CLASS' to cleanup async_pf tracepoints Acked-by: Gleb Natapov Signed-off-by: Xiao Guangrong Signed-off-by: Marcelo Tosatti --- include/trace/events/kvm.h | 76 +++++++++++++++++++++------------------------- 1 file changed, 35 insertions(+), 41 deletions(-) (limited to 'include/trace') diff --git a/include/trace/events/kvm.h b/include/trace/events/kvm.h index d94d6c312ca1..46e3cd8e197a 100644 --- a/include/trace/events/kvm.h +++ b/include/trace/events/kvm.h @@ -216,59 +216,71 @@ TRACE_EVENT(kvm_age_page, ); #ifdef CONFIG_KVM_ASYNC_PF -TRACE_EVENT( - kvm_try_async_get_page, +DECLARE_EVENT_CLASS(kvm_async_get_page_class, + TP_PROTO(u64 gva, u64 gfn), + TP_ARGS(gva, gfn), TP_STRUCT__entry( - __field(u64, gva) + __field(__u64, gva) __field(u64, gfn) - ), + ), TP_fast_assign( __entry->gva = gva; __entry->gfn = gfn; - ), + ), TP_printk("gva = %#llx, gfn = %#llx", __entry->gva, __entry->gfn) ); -TRACE_EVENT( - kvm_async_pf_not_present, +DEFINE_EVENT(kvm_async_get_page_class, kvm_try_async_get_page, + + TP_PROTO(u64 gva, u64 gfn), + + TP_ARGS(gva, gfn) +); + +DEFINE_EVENT(kvm_async_get_page_class, kvm_async_pf_doublefault, + + TP_PROTO(u64 gva, u64 gfn), + + TP_ARGS(gva, gfn) +); + +DECLARE_EVENT_CLASS(kvm_async_pf_nopresent_ready, + TP_PROTO(u64 token, u64 gva), + TP_ARGS(token, gva), TP_STRUCT__entry( __field(__u64, token) __field(__u64, gva) - ), + ), TP_fast_assign( __entry->token = token; __entry->gva = gva; - ), + ), + + TP_printk("token %#llx gva %#llx", __entry->token, __entry->gva) - TP_printk("token %#llx gva %#llx not present", __entry->token, - __entry->gva) ); -TRACE_EVENT( - kvm_async_pf_ready, +DEFINE_EVENT(kvm_async_pf_nopresent_ready, kvm_async_pf_not_present, + TP_PROTO(u64 token, u64 gva), - TP_ARGS(token, gva), - TP_STRUCT__entry( - __field(__u64, token) - __field(__u64, gva) - ), + TP_ARGS(token, gva) +); - TP_fast_assign( - __entry->token = token; - __entry->gva = gva; - ), +DEFINE_EVENT(kvm_async_pf_nopresent_ready, kvm_async_pf_ready, + + TP_PROTO(u64 token, u64 gva), - TP_printk("token %#llx gva %#llx ready", __entry->token, __entry->gva) + TP_ARGS(token, gva) ); TRACE_EVENT( @@ -292,24 +304,6 @@ TRACE_EVENT( __entry->address, __entry->pfn) ); -TRACE_EVENT( - kvm_async_pf_doublefault, - TP_PROTO(u64 gva, u64 gfn), - TP_ARGS(gva, gfn), - - TP_STRUCT__entry( - __field(u64, gva) - __field(u64, gfn) - ), - - TP_fast_assign( - __entry->gva = gva; - __entry->gfn = gfn; - ), - - TP_printk("gva = %#llx, gfn = %#llx", __entry->gva, __entry->gfn) -); - #endif #endif /* _TRACE_KVM_MAIN_H */ -- cgit v1.2.1