summaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorJeremy Fitzhardinge <jeremy@goop.org>2008-04-02 10:54:11 -0700
committerIngo Molnar <mingo@elte.hu>2008-04-24 23:57:33 +0200
commitb77797fb2bf31bf076e6b69736119bc6a077525b (patch)
tree4d0bfcb5bcc96988ef421c807837d7236fdb0e07 /arch/x86
parent2bd50036b5dfc929390ddc48be7f6314447b2be3 (diff)
downloadtalos-op-linux-b77797fb2bf31bf076e6b69736119bc6a077525b.tar.gz
talos-op-linux-b77797fb2bf31bf076e6b69736119bc6a077525b.zip
xen: fold xen_sysexit into xen_iret
xen_sysexit and xen_iret were doing essentially the same thing. Rather than having a separate implementation for xen_sysexit, we can just strip the stack back to an iret frame and jump into xen_iret. This removes a lot of code and complexity - specifically, another critical region. Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kernel/entry_32.S9
-rw-r--r--arch/x86/xen/xen-asm.S70
2 files changed, 15 insertions, 64 deletions
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index 209c334bb920..2a609dc3271c 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -1044,15 +1044,8 @@ ENTRY(xen_hypervisor_callback)
jmp xen_iret_crit_fixup
-1: cmpl $xen_sysexit_start_crit,%eax
- jb 2f
- cmpl $xen_sysexit_end_crit,%eax
- jae 2f
-
- jmp xen_sysexit_crit_fixup
-
ENTRY(xen_do_upcall)
-2: mov %esp, %eax
+1: mov %esp, %eax
call xen_evtchn_do_upcall
jmp ret_from_intr
CFI_ENDPROC
diff --git a/arch/x86/xen/xen-asm.S b/arch/x86/xen/xen-asm.S
index 53cae923e148..2497a30f41de 100644
--- a/arch/x86/xen/xen-asm.S
+++ b/arch/x86/xen/xen-asm.S
@@ -108,6 +108,20 @@ ENDPATCH(xen_restore_fl_direct)
RELOC(xen_restore_fl_direct, 2b+1)
/*
+ We can't use sysexit directly, because we're not running in ring0.
+ But we can easily fake it up using iret. Assuming xen_sysexit
+ is jumped to with a standard stack frame, we can just strip it
+ back to a standard iret frame and use iret.
+ */
+ENTRY(xen_sysexit)
+ movl PT_EAX(%esp), %eax /* Shouldn't be necessary? */
+ orl $X86_EFLAGS_IF, PT_EFLAGS(%esp)
+ lea PT_EIP(%esp), %esp
+
+ jmp xen_iret
+ENDPROC(xen_sysexit)
+
+/*
This is run where a normal iret would be run, with the same stack setup:
8: eflags
4: cs
@@ -276,62 +290,6 @@ ENTRY(xen_iret_crit_fixup)
2: jmp xen_do_upcall
-ENTRY(xen_sysexit)
- /* Store vcpu_info pointer for easy access. Do it this
- way to avoid having to reload %fs */
-#ifdef CONFIG_SMP
- GET_THREAD_INFO(%eax)
- movl TI_cpu(%eax),%eax
- movl __per_cpu_offset(,%eax,4),%eax
- mov per_cpu__xen_vcpu(%eax),%eax
-#else
- movl per_cpu__xen_vcpu, %eax
-#endif
-
- /* We can't actually use sysexit in a pv guest,
- so fake it up with iret */
- pushl $__USER_DS /* user stack segment */
- pushl %ecx /* user esp */
- pushl PT_EFLAGS+2*4(%esp) /* user eflags */
- pushl $__USER_CS /* user code segment */
- pushl %edx /* user eip */
-
-xen_sysexit_start_crit:
- /* Unmask events... */
- movb $0, XEN_vcpu_info_mask(%eax)
- /* ...and test for pending.
- There's a preempt window here, but it doesn't
- matter because we're within the critical section. */
- testb $0xff, XEN_vcpu_info_pending(%eax)
-
- /* If there's something pending, mask events again so we
- can directly inject it back into the kernel. */
- jnz 1f
-
- movl PT_EAX+5*4(%esp),%eax
-2: iret
-1: movb $1, XEN_vcpu_info_mask(%eax)
-xen_sysexit_end_crit:
- addl $5*4, %esp /* remove iret frame */
- /* no need to re-save regs, but need to restore kernel %fs */
- mov $__KERNEL_PERCPU, %eax
- mov %eax, %fs
- jmp xen_do_upcall
-.section __ex_table,"a"
- .align 4
- .long 2b,iret_exc
-.previous
-
- .globl xen_sysexit_start_crit, xen_sysexit_end_crit
-/*
- sysexit fixup is easy, since the old frame is still sitting there
- on the stack. We just need to remove the new recursive
- interrupt and return.
- */
-ENTRY(xen_sysexit_crit_fixup)
- addl $PT_OLDESP+5*4, %esp /* remove frame+iret */
- jmp xen_do_upcall
-
/*
Force an event check by making a hypercall,
but preserve regs before making the call.
OpenPOWER on IntegriCloud