summaryrefslogtreecommitdiffstats
path: root/arch/i386/kernel/vmi.c
diff options
context:
space:
mode:
authorZachary Amsden <zach@vmware.com>2007-02-13 13:26:21 +0100
committerAndi Kleen <andi@basil.nowhere.org>2007-02-13 13:26:21 +0100
commitbbab4f3bb7f528d2b8ccb5de9ae5f6ff3fb29684 (patch)
tree141d035b9d79711e6679fadc31c9583f908dfedb /arch/i386/kernel/vmi.c
parent7ce0bcfd1667736f1293cff845139bbee53186de (diff)
downloadblackbird-obmc-linux-bbab4f3bb7f528d2b8ccb5de9ae5f6ff3fb29684.tar.gz
blackbird-obmc-linux-bbab4f3bb7f528d2b8ccb5de9ae5f6ff3fb29684.zip
[PATCH] i386: vMI timer patches
VMI timer code. It works by taking over the local APIC clock when APIC is configured, which requires a couple hooks into the APIC code. The backend timer code could be commonized into the timer infrastructure, but there are some pieces missing (stolen time, in particular), and the exact semantics of when to do accounting for NO_IDLE need to be shared between different hypervisors as well. So for now, VMI timer is a separate module. [Adrian Bunk: cleanups] Subject: VMI timer patches Signed-off-by: Zachary Amsden <zach@vmware.com> Signed-off-by: Andi Kleen <ak@suse.de> Cc: Andi Kleen <ak@suse.de> Cc: Jeremy Fitzhardinge <jeremy@xensource.com> Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: Chris Wright <chrisw@sous-sol.org> Signed-off-by: Andrew Morton <akpm@osdl.org>
Diffstat (limited to 'arch/i386/kernel/vmi.c')
-rw-r--r--arch/i386/kernel/vmi.c45
1 files changed, 45 insertions, 0 deletions
diff --git a/arch/i386/kernel/vmi.c b/arch/i386/kernel/vmi.c
index a94d64b10f75..bb5a7abf949c 100644
--- a/arch/i386/kernel/vmi.c
+++ b/arch/i386/kernel/vmi.c
@@ -34,6 +34,7 @@
#include <asm/apic.h>
#include <asm/processor.h>
#include <asm/timer.h>
+#include <asm/vmi_time.h>
/* Convenient for calling VMI functions indirectly in the ROM */
typedef u32 __attribute__((regparm(1))) (VROMFUNC)(void);
@@ -67,6 +68,7 @@ struct {
void (*set_linear_mapping)(int, u32, u32, u32);
void (*flush_tlb)(int);
void (*set_initial_ap_state)(int, int);
+ void (*halt)(void);
} vmi_ops;
/* XXX move this to alternative.h */
@@ -252,6 +254,19 @@ static void vmi_nop(void)
{
}
+/* For NO_IDLE_HZ, we stop the clock when halting the kernel */
+#ifdef CONFIG_NO_IDLE_HZ
+static fastcall void vmi_safe_halt(void)
+{
+ int idle = vmi_stop_hz_timer();
+ vmi_ops.halt();
+ if (idle) {
+ local_irq_disable();
+ vmi_account_time_restart_hz_timer();
+ local_irq_enable();
+ }
+}
+#endif
#ifdef CONFIG_DEBUG_PAGE_TYPE
@@ -727,7 +742,12 @@ static inline int __init activate_vmi(void)
(char *)paravirt_ops.save_fl);
patch_offset(&irq_save_disable_callout[IRQ_PATCH_DISABLE],
(char *)paravirt_ops.irq_disable);
+#ifndef CONFIG_NO_IDLE_HZ
para_fill(safe_halt, Halt);
+#else
+ vmi_ops.halt = vmi_get_function(VMI_CALL_Halt);
+ paravirt_ops.safe_halt = vmi_safe_halt;
+#endif
para_fill(wbinvd, WBINVD);
/* paravirt_ops.read_msr = vmi_rdmsr */
/* paravirt_ops.write_msr = vmi_wrmsr */
@@ -838,6 +858,31 @@ static inline int __init activate_vmi(void)
#endif
/*
+ * Check for VMI timer functionality by probing for a cycle frequency method
+ */
+ reloc = call_vrom_long_func(vmi_rom, get_reloc, VMI_CALL_GetCycleFrequency);
+ if (rel->type != VMI_RELOCATION_NONE) {
+ vmi_timer_ops.get_cycle_frequency = (void *)rel->eip;
+ vmi_timer_ops.get_cycle_counter =
+ vmi_get_function(VMI_CALL_GetCycleCounter);
+ vmi_timer_ops.get_wallclock =
+ vmi_get_function(VMI_CALL_GetWallclockTime);
+ vmi_timer_ops.wallclock_updated =
+ vmi_get_function(VMI_CALL_WallclockUpdated);
+ vmi_timer_ops.set_alarm = vmi_get_function(VMI_CALL_SetAlarm);
+ vmi_timer_ops.cancel_alarm =
+ vmi_get_function(VMI_CALL_CancelAlarm);
+ paravirt_ops.time_init = vmi_time_init;
+ paravirt_ops.get_wallclock = vmi_get_wallclock;
+ paravirt_ops.set_wallclock = vmi_set_wallclock;
+#ifdef CONFIG_X86_LOCAL_APIC
+ paravirt_ops.setup_boot_clock = vmi_timer_setup_boot_alarm;
+ paravirt_ops.setup_secondary_clock = vmi_timer_setup_secondary_alarm;
+#endif
+ custom_sched_clock = vmi_sched_clock;
+ }
+
+ /*
* Alternative instruction rewriting doesn't happen soon enough
* to convert VMI_IRET to a call instead of a jump; so we have
* to do this before IRQs get reenabled. Fortunately, it is
OpenPOWER on IntegriCloud