diff options
Diffstat (limited to 'arch/x86/entry/entry_32.S')
-rw-r--r-- | arch/x86/entry/entry_32.S | 160 |
1 files changed, 80 insertions, 80 deletions
diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S index f07baf0388bc..09fe5606a118 100644 --- a/arch/x86/entry/entry_32.S +++ b/arch/x86/entry/entry_32.S @@ -730,7 +730,7 @@ * %eax: prev task * %edx: next task */ -ENTRY(__switch_to_asm) +SYM_CODE_START(__switch_to_asm) /* * Save callee-saved registers * This must match the order in struct inactive_task_frame @@ -769,7 +769,7 @@ ENTRY(__switch_to_asm) popl %ebp jmp __switch_to -END(__switch_to_asm) +SYM_CODE_END(__switch_to_asm) /* * The unwinder expects the last frame on the stack to always be at the same @@ -778,7 +778,7 @@ END(__switch_to_asm) * asmlinkage function so its argument has to be pushed on the stack. This * wrapper creates a proper "end of stack" frame header before the call. */ -ENTRY(schedule_tail_wrapper) +SYM_FUNC_START(schedule_tail_wrapper) FRAME_BEGIN pushl %eax @@ -787,7 +787,7 @@ ENTRY(schedule_tail_wrapper) FRAME_END ret -ENDPROC(schedule_tail_wrapper) +SYM_FUNC_END(schedule_tail_wrapper) /* * A newly forked process directly context switches into this address. * @@ -795,7 +795,7 @@ ENDPROC(schedule_tail_wrapper) * ebx: kernel thread func (NULL for user thread) * edi: kernel thread arg */ -ENTRY(ret_from_fork) +SYM_CODE_START(ret_from_fork) call schedule_tail_wrapper testl %ebx, %ebx @@ -818,7 +818,7 @@ ENTRY(ret_from_fork) */ movl $0, PT_EAX(%esp) jmp 2b -END(ret_from_fork) +SYM_CODE_END(ret_from_fork) /* * Return to user mode is not as complex as all this looks, @@ -828,8 +828,7 @@ END(ret_from_fork) */ # userspace resumption stub bypassing syscall exit tracing - ALIGN -ret_from_exception: +SYM_CODE_START_LOCAL(ret_from_exception) preempt_stop(CLBR_ANY) ret_from_intr: #ifdef CONFIG_VM86 @@ -846,15 +845,14 @@ ret_from_intr: cmpl $USER_RPL, %eax jb restore_all_kernel # not returning to v8086 or userspace -ENTRY(resume_userspace) DISABLE_INTERRUPTS(CLBR_ANY) TRACE_IRQS_OFF movl %esp, %eax call prepare_exit_to_usermode jmp restore_all -END(ret_from_exception) +SYM_CODE_END(ret_from_exception) -GLOBAL(__begin_SYSENTER_singlestep_region) +SYM_ENTRY(__begin_SYSENTER_singlestep_region, SYM_L_GLOBAL, SYM_A_NONE) /* * All code from here through __end_SYSENTER_singlestep_region is subject * to being single-stepped if a user program sets TF and executes SYSENTER. @@ -869,9 +867,10 @@ GLOBAL(__begin_SYSENTER_singlestep_region) * Xen doesn't set %esp to be precisely what the normal SYSENTER * entry point expects, so fix it up before using the normal path. */ -ENTRY(xen_sysenter_target) +SYM_CODE_START(xen_sysenter_target) addl $5*4, %esp /* remove xen-provided frame */ jmp .Lsysenter_past_esp +SYM_CODE_END(xen_sysenter_target) #endif /* @@ -906,7 +905,7 @@ ENTRY(xen_sysenter_target) * ebp user stack * 0(%ebp) arg6 */ -ENTRY(entry_SYSENTER_32) +SYM_FUNC_START(entry_SYSENTER_32) /* * On entry-stack with all userspace-regs live - save and * restore eflags and %eax to use it as scratch-reg for the cr3 @@ -1033,8 +1032,8 @@ ENTRY(entry_SYSENTER_32) pushl $X86_EFLAGS_FIXED popfl jmp .Lsysenter_flags_fixed -GLOBAL(__end_SYSENTER_singlestep_region) -ENDPROC(entry_SYSENTER_32) +SYM_ENTRY(__end_SYSENTER_singlestep_region, SYM_L_GLOBAL, SYM_A_NONE) +SYM_FUNC_END(entry_SYSENTER_32) /* * 32-bit legacy system call entry. @@ -1064,7 +1063,7 @@ ENDPROC(entry_SYSENTER_32) * edi arg5 * ebp arg6 */ -ENTRY(entry_INT80_32) +SYM_FUNC_START(entry_INT80_32) ASM_CLAC pushl %eax /* pt_regs->orig_ax */ @@ -1120,7 +1119,7 @@ restore_all_kernel: jmp .Lirq_return .section .fixup, "ax" -ENTRY(iret_exc ) +SYM_CODE_START(iret_exc) pushl $0 # no error code pushl $do_iret_error @@ -1137,9 +1136,10 @@ ENTRY(iret_exc ) #endif jmp common_exception +SYM_CODE_END(iret_exc) .previous _ASM_EXTABLE(.Lirq_return, iret_exc) -ENDPROC(entry_INT80_32) +SYM_FUNC_END(entry_INT80_32) .macro FIXUP_ESPFIX_STACK /* @@ -1193,7 +1193,7 @@ ENDPROC(entry_INT80_32) * We pack 1 stub into every 8-byte block. */ .align 8 -ENTRY(irq_entries_start) +SYM_CODE_START(irq_entries_start) vector=FIRST_EXTERNAL_VECTOR .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR) pushl $(~vector+0x80) /* Note: always in signed byte range */ @@ -1201,11 +1201,11 @@ ENTRY(irq_entries_start) jmp common_interrupt .align 8 .endr -END(irq_entries_start) +SYM_CODE_END(irq_entries_start) #ifdef CONFIG_X86_LOCAL_APIC .align 8 -ENTRY(spurious_entries_start) +SYM_CODE_START(spurious_entries_start) vector=FIRST_SYSTEM_VECTOR .rept (NR_VECTORS - FIRST_SYSTEM_VECTOR) pushl $(~vector+0x80) /* Note: always in signed byte range */ @@ -1213,9 +1213,9 @@ ENTRY(spurious_entries_start) jmp common_spurious .align 8 .endr -END(spurious_entries_start) +SYM_CODE_END(spurious_entries_start) -common_spurious: +SYM_CODE_START_LOCAL(common_spurious) ASM_CLAC addl $-0x80, (%esp) /* Adjust vector into the [-256, -1] range */ SAVE_ALL switch_stacks=1 @@ -1224,7 +1224,7 @@ common_spurious: movl %esp, %eax call smp_spurious_interrupt jmp ret_from_intr -ENDPROC(common_spurious) +SYM_CODE_END(common_spurious) #endif /* @@ -1232,7 +1232,7 @@ ENDPROC(common_spurious) * so IRQ-flags tracing has to follow that: */ .p2align CONFIG_X86_L1_CACHE_SHIFT -common_interrupt: +SYM_CODE_START_LOCAL(common_interrupt) ASM_CLAC addl $-0x80, (%esp) /* Adjust vector into the [-256, -1] range */ @@ -1242,10 +1242,10 @@ common_interrupt: movl %esp, %eax call do_IRQ jmp ret_from_intr -ENDPROC(common_interrupt) +SYM_CODE_END(common_interrupt) #define BUILD_INTERRUPT3(name, nr, fn) \ -ENTRY(name) \ +SYM_FUNC_START(name) \ ASM_CLAC; \ pushl $~(nr); \ SAVE_ALL switch_stacks=1; \ @@ -1254,7 +1254,7 @@ ENTRY(name) \ movl %esp, %eax; \ call fn; \ jmp ret_from_intr; \ -ENDPROC(name) +SYM_FUNC_END(name) #define BUILD_INTERRUPT(name, nr) \ BUILD_INTERRUPT3(name, nr, smp_##name); \ @@ -1262,14 +1262,14 @@ ENDPROC(name) /* The include is where all of the SMP etc. interrupts come from */ #include <asm/entry_arch.h> -ENTRY(coprocessor_error) +SYM_CODE_START(coprocessor_error) ASM_CLAC pushl $0 pushl $do_coprocessor_error jmp common_exception -END(coprocessor_error) +SYM_CODE_END(coprocessor_error) -ENTRY(simd_coprocessor_error) +SYM_CODE_START(simd_coprocessor_error) ASM_CLAC pushl $0 #ifdef CONFIG_X86_INVD_BUG @@ -1281,99 +1281,99 @@ ENTRY(simd_coprocessor_error) pushl $do_simd_coprocessor_error #endif jmp common_exception -END(simd_coprocessor_error) +SYM_CODE_END(simd_coprocessor_error) -ENTRY(device_not_available) +SYM_CODE_START(device_not_available) ASM_CLAC pushl $-1 # mark this as an int pushl $do_device_not_available jmp common_exception -END(device_not_available) +SYM_CODE_END(device_not_available) #ifdef CONFIG_PARAVIRT -ENTRY(native_iret) +SYM_CODE_START(native_iret) iret _ASM_EXTABLE(native_iret, iret_exc) -END(native_iret) +SYM_CODE_END(native_iret) #endif -ENTRY(overflow) +SYM_CODE_START(overflow) ASM_CLAC pushl $0 pushl $do_overflow jmp common_exception -END(overflow) +SYM_CODE_END(overflow) -ENTRY(bounds) +SYM_CODE_START(bounds) ASM_CLAC pushl $0 pushl $do_bounds jmp common_exception -END(bounds) +SYM_CODE_END(bounds) -ENTRY(invalid_op) +SYM_CODE_START(invalid_op) ASM_CLAC pushl $0 pushl $do_invalid_op jmp common_exception -END(invalid_op) +SYM_CODE_END(invalid_op) -ENTRY(coprocessor_segment_overrun) +SYM_CODE_START(coprocessor_segment_overrun) ASM_CLAC pushl $0 pushl $do_coprocessor_segment_overrun jmp common_exception -END(coprocessor_segment_overrun) +SYM_CODE_END(coprocessor_segment_overrun) -ENTRY(invalid_TSS) +SYM_CODE_START(invalid_TSS) ASM_CLAC pushl $do_invalid_TSS jmp common_exception -END(invalid_TSS) +SYM_CODE_END(invalid_TSS) -ENTRY(segment_not_present) +SYM_CODE_START(segment_not_present) ASM_CLAC pushl $do_segment_not_present jmp common_exception -END(segment_not_present) +SYM_CODE_END(segment_not_present) -ENTRY(stack_segment) +SYM_CODE_START(stack_segment) ASM_CLAC pushl $do_stack_segment jmp common_exception -END(stack_segment) +SYM_CODE_END(stack_segment) -ENTRY(alignment_check) +SYM_CODE_START(alignment_check) ASM_CLAC pushl $do_alignment_check jmp common_exception -END(alignment_check) +SYM_CODE_END(alignment_check) -ENTRY(divide_error) +SYM_CODE_START(divide_error) ASM_CLAC pushl $0 # no error code pushl $do_divide_error jmp common_exception -END(divide_error) +SYM_CODE_END(divide_error) #ifdef CONFIG_X86_MCE -ENTRY(machine_check) +SYM_CODE_START(machine_check) ASM_CLAC pushl $0 pushl machine_check_vector jmp common_exception -END(machine_check) +SYM_CODE_END(machine_check) #endif -ENTRY(spurious_interrupt_bug) +SYM_CODE_START(spurious_interrupt_bug) ASM_CLAC pushl $0 pushl $do_spurious_interrupt_bug jmp common_exception -END(spurious_interrupt_bug) +SYM_CODE_END(spurious_interrupt_bug) #ifdef CONFIG_XEN_PV -ENTRY(xen_hypervisor_callback) +SYM_FUNC_START(xen_hypervisor_callback) /* * Check to see if we got the event in the critical * region in xen_iret_direct, after we've reenabled @@ -1397,7 +1397,7 @@ ENTRY(xen_hypervisor_callback) call xen_maybe_preempt_hcall #endif jmp ret_from_intr -ENDPROC(xen_hypervisor_callback) +SYM_FUNC_END(xen_hypervisor_callback) /* * Hypervisor uses this for application faults while it executes. @@ -1411,7 +1411,7 @@ ENDPROC(xen_hypervisor_callback) * to pop the stack frame we end up in an infinite loop of failsafe callbacks. * We distinguish between categories by maintaining a status value in EAX. */ -ENTRY(xen_failsafe_callback) +SYM_FUNC_START(xen_failsafe_callback) pushl %eax movl $1, %eax 1: mov 4(%esp), %ds @@ -1448,7 +1448,7 @@ ENTRY(xen_failsafe_callback) _ASM_EXTABLE(2b, 7b) _ASM_EXTABLE(3b, 8b) _ASM_EXTABLE(4b, 9b) -ENDPROC(xen_failsafe_callback) +SYM_FUNC_END(xen_failsafe_callback) #endif /* CONFIG_XEN_PV */ #ifdef CONFIG_XEN_PVHVM @@ -1470,13 +1470,13 @@ BUILD_INTERRUPT3(hv_stimer0_callback_vector, HYPERV_STIMER0_VECTOR, #endif /* CONFIG_HYPERV */ -ENTRY(page_fault) +SYM_CODE_START(page_fault) ASM_CLAC pushl $do_page_fault jmp common_exception_read_cr2 -END(page_fault) +SYM_CODE_END(page_fault) -common_exception_read_cr2: +SYM_CODE_START_LOCAL_NOALIGN(common_exception_read_cr2) /* the function address is in %gs's slot on the stack */ SAVE_ALL switch_stacks=1 skip_gs=1 unwind_espfix=1 @@ -1498,9 +1498,9 @@ common_exception_read_cr2: movl %esp, %eax # pt_regs pointer CALL_NOSPEC %edi jmp ret_from_exception -END(common_exception_read_cr2) +SYM_CODE_END(common_exception_read_cr2) -common_exception: +SYM_CODE_START_LOCAL_NOALIGN(common_exception) /* the function address is in %gs's slot on the stack */ SAVE_ALL switch_stacks=1 skip_gs=1 unwind_espfix=1 ENCODE_FRAME_POINTER @@ -1519,9 +1519,9 @@ common_exception: movl %esp, %eax # pt_regs pointer CALL_NOSPEC %edi jmp ret_from_exception -END(common_exception) +SYM_CODE_END(common_exception) -ENTRY(debug) +SYM_CODE_START(debug) /* * Entry from sysenter is now handled in common_exception */ @@ -1529,7 +1529,7 @@ ENTRY(debug) pushl $-1 # mark this as an int pushl $do_debug jmp common_exception -END(debug) +SYM_CODE_END(debug) /* * NMI is doubly nasty. It can happen on the first instruction of @@ -1538,7 +1538,7 @@ END(debug) * switched stacks. We handle both conditions by simply checking whether we * interrupted kernel code running on the SYSENTER stack. */ -ENTRY(nmi) +SYM_CODE_START(nmi) ASM_CLAC #ifdef CONFIG_X86_ESPFIX32 @@ -1631,9 +1631,9 @@ ENTRY(nmi) lss (1+5+6)*4(%esp), %esp # back to espfix stack jmp .Lirq_return #endif -END(nmi) +SYM_CODE_END(nmi) -ENTRY(int3) +SYM_CODE_START(int3) ASM_CLAC pushl $-1 # mark this as an int @@ -1644,22 +1644,22 @@ ENTRY(int3) movl %esp, %eax # pt_regs pointer call do_int3 jmp ret_from_exception -END(int3) +SYM_CODE_END(int3) -ENTRY(general_protection) +SYM_CODE_START(general_protection) pushl $do_general_protection jmp common_exception -END(general_protection) +SYM_CODE_END(general_protection) #ifdef CONFIG_KVM_GUEST -ENTRY(async_page_fault) +SYM_CODE_START(async_page_fault) ASM_CLAC pushl $do_async_page_fault jmp common_exception_read_cr2 -END(async_page_fault) +SYM_CODE_END(async_page_fault) #endif -ENTRY(rewind_stack_do_exit) +SYM_CODE_START(rewind_stack_do_exit) /* Prevent any naive code from trying to unwind to our caller. */ xorl %ebp, %ebp @@ -1668,4 +1668,4 @@ ENTRY(rewind_stack_do_exit) call do_exit 1: jmp 1b -END(rewind_stack_do_exit) +SYM_CODE_END(rewind_stack_do_exit) |