diff options
Diffstat (limited to 'arch')
-rw-r--r-- | arch/parisc/kernel/head.S | 15 | ||||
-rw-r--r-- | arch/parisc/kernel/hpmc.S | 16 | ||||
-rw-r--r-- | arch/parisc/kernel/pacache.S | 80 | ||||
-rw-r--r-- | arch/parisc/kernel/perf_asm.S | 26 | ||||
-rw-r--r-- | arch/parisc/kernel/real2.S | 16 | ||||
-rw-r--r-- | arch/parisc/kernel/syscall.S | 71 |
6 files changed, 99 insertions, 125 deletions
diff --git a/arch/parisc/kernel/head.S b/arch/parisc/kernel/head.S index eaad2328fea1..9676c486bb63 100644 --- a/arch/parisc/kernel/head.S +++ b/arch/parisc/kernel/head.S @@ -2,7 +2,7 @@ * License. See the file "COPYING" in the main directory of this archive * for more details. * - * Copyright (C) 1999 by Helge Deller + * Copyright (C) 1999-2007 by Helge Deller <deller@gmx.de> * Copyright 1999 SuSE GmbH (Philipp Rumpf) * Copyright 1999 Philipp Rumpf (prumpf@tux.org) * Copyright 2000 Hewlett Packard (Paul Bame, bame@puffin.external.hp.com) @@ -19,16 +19,17 @@ #include <asm/assembly.h> #include <asm/pgtable.h> +#include <linux/linkage.h> + .level LEVEL .data - - .export boot_args -boot_args: +ENTRY(boot_args) .word 0 /* arg0 */ .word 0 /* arg1 */ .word 0 /* arg2 */ .word 0 /* arg3 */ +END(boot_args) .text .align 4 @@ -38,10 +39,9 @@ boot_args: .import fault_vector_11,code /* IVA parisc 1.1 32 bit */ .import $global$ /* forward declaration */ #endif /*!CONFIG_64BIT*/ - .export stext .export _stext,data /* Kernel want it this way! */ _stext: -stext: +ENTRY(stext) .proc .callinfo @@ -343,6 +343,9 @@ smp_slave_stext: .procend #endif /* CONFIG_SMP */ + +ENDPROC(stext) + #ifndef CONFIG_64BIT .data diff --git a/arch/parisc/kernel/hpmc.S b/arch/parisc/kernel/hpmc.S index c412c0adc4a9..d8baa158d8a0 100644 --- a/arch/parisc/kernel/hpmc.S +++ b/arch/parisc/kernel/hpmc.S @@ -46,6 +46,8 @@ #include <asm/assembly.h> #include <asm/pdc.h> +#include <linux/linkage.h> + /* * stack for os_hpmc, the HPMC handler. * buffer for IODC procedures (for the HPMC handler). @@ -69,17 +71,15 @@ hpmc_raddr: #define HPMC_PIM_DATA_SIZE 896 /* Enough to hold all architected 2.0 state */ - .export hpmc_pim_data, data .align 8 -hpmc_pim_data: +ENTRY(hpmc_pim_data) .block HPMC_PIM_DATA_SIZE +END(hpmc_pim_data) .text - .export os_hpmc, code .import intr_save, code - -os_hpmc: +ENTRY(os_hpmc) /* * registers modified: @@ -294,11 +294,9 @@ os_hpmc_6: b . nop +ENDPROC(os_hpmc) /* this label used to compute os_hpmc checksum */ - - .export os_hpmc_end, code - -os_hpmc_end: +ENTRY(os_hpmc_end) nop diff --git a/arch/parisc/kernel/pacache.S b/arch/parisc/kernel/pacache.S index e81c9937d10a..75d522e2d058 100644 --- a/arch/parisc/kernel/pacache.S +++ b/arch/parisc/kernel/pacache.S @@ -45,13 +45,12 @@ #include <asm/assembly.h> #include <asm/pgtable.h> #include <asm/cache.h> +#include <linux/linkage.h> .text .align 128 - .export flush_tlb_all_local,code - -flush_tlb_all_local: +ENTRY(flush_tlb_all_local) .proc .callinfo NO_CALLS .entry @@ -200,11 +199,11 @@ fdtdone: .exit .procend +ENDPROC(flush_tlb_all_local) - .export flush_instruction_cache_local,code .import cache_info,data -flush_instruction_cache_local: +ENTRY(flush_instruction_cache_local) .proc .callinfo NO_CALLS .entry @@ -241,11 +240,11 @@ fisync: .exit .procend +ENDPROC(flush_instruction_cache_local) - .export flush_data_cache_local, code - .import cache_info, data -flush_data_cache_local: + .import cache_info, data +ENTRY(flush_data_cache_local) .proc .callinfo NO_CALLS .entry @@ -283,11 +282,11 @@ fdsync: .exit .procend +ENDPROC(flush_data_cache_local) - .export copy_user_page_asm,code .align 16 -copy_user_page_asm: +ENTRY(copy_user_page_asm) .proc .callinfo NO_CALLS .entry @@ -409,6 +408,7 @@ copy_user_page_asm: .exit .procend +ENDPROC(copy_user_page_asm) /* * NOTE: Code in clear_user_page has a hard coded dependency on the @@ -446,9 +446,7 @@ copy_user_page_asm: * lobby for such a change. */ - .export copy_user_page_asm,code - -copy_user_page_asm: +ENTRY(copy_user_page_asm) .proc .callinfo NO_CALLS .entry @@ -534,11 +532,10 @@ copy_user_page_asm: .exit .procend +ENDPROC(copy_user_page_asm) #endif - .export __clear_user_page_asm,code - -__clear_user_page_asm: +ENTRY(__clear_user_page_asm) .proc .callinfo NO_CALLS .entry @@ -618,10 +615,9 @@ __clear_user_page_asm: .exit .procend +ENDPROC(__clear_user_page_asm) - .export flush_kernel_dcache_page_asm - -flush_kernel_dcache_page_asm: +ENTRY(flush_kernel_dcache_page_asm) .proc .callinfo NO_CALLS .entry @@ -662,10 +658,9 @@ flush_kernel_dcache_page_asm: .exit .procend +ENDPROC(flush_kernel_dcache_page_asm) - .export flush_user_dcache_page - -flush_user_dcache_page: +ENTRY(flush_user_dcache_page) .proc .callinfo NO_CALLS .entry @@ -706,10 +701,9 @@ flush_user_dcache_page: .exit .procend +ENDPROC(flush_user_dcache_page) - .export flush_user_icache_page - -flush_user_icache_page: +ENTRY(flush_user_icache_page) .proc .callinfo NO_CALLS .entry @@ -750,11 +744,10 @@ flush_user_icache_page: .exit .procend +ENDPROC(flush_user_icache_page) - .export purge_kernel_dcache_page - -purge_kernel_dcache_page: +ENTRY(purge_kernel_dcache_page) .proc .callinfo NO_CALLS .entry @@ -794,15 +787,14 @@ purge_kernel_dcache_page: .exit .procend +ENDPROC(purge_kernel_dcache_page) #if 0 /* Currently not used, but it still is a possible alternate * solution. */ - .export flush_alias_page - -flush_alias_page: +ENTRY(flush_alias_page) .proc .callinfo NO_CALLS .entry @@ -882,10 +874,9 @@ flush_user_dcache_range_asm: .exit .procend +ENDPROC(flush_alias_page) - .export flush_kernel_dcache_range_asm - -flush_kernel_dcache_range_asm: +ENTRY(flush_kernel_dcache_range_asm) .proc .callinfo NO_CALLS .entry @@ -905,10 +896,9 @@ flush_kernel_dcache_range_asm: .exit .procend +ENDPROC(flush_kernel_dcache_range_asm) - .export flush_user_icache_range_asm - -flush_user_icache_range_asm: +ENTRY(flush_user_icache_range_asm) .proc .callinfo NO_CALLS .entry @@ -927,10 +917,9 @@ flush_user_icache_range_asm: .exit .procend +ENDPROC(flush_user_icache_range_asm) - .export flush_kernel_icache_page - -flush_kernel_icache_page: +ENTRY(flush_kernel_icache_page) .proc .callinfo NO_CALLS .entry @@ -971,10 +960,9 @@ flush_kernel_icache_page: .exit .procend +ENDPROC(flush_kernel_icache_page) - .export flush_kernel_icache_range_asm - -flush_kernel_icache_range_asm: +ENTRY(flush_kernel_icache_range_asm) .proc .callinfo NO_CALLS .entry @@ -992,14 +980,13 @@ flush_kernel_icache_range_asm: nop .exit .procend +ENDPROC(flush_kernel_icache_range_asm) /* align should cover use of rfi in disable_sr_hashing_asm and * srdis_done. */ .align 256 - .export disable_sr_hashing_asm,code - -disable_sr_hashing_asm: +ENTRY(disable_sr_hashing_asm) .proc .callinfo NO_CALLS .entry @@ -1088,5 +1075,6 @@ srdis_done: .exit .procend +ENDPROC(disable_sr_hashing_asm) .end diff --git a/arch/parisc/kernel/perf_asm.S b/arch/parisc/kernel/perf_asm.S index 5e7bb90e7e08..43874ca3ed67 100644 --- a/arch/parisc/kernel/perf_asm.S +++ b/arch/parisc/kernel/perf_asm.S @@ -20,6 +20,7 @@ */ #include <asm/assembly.h> +#include <linux/linkage.h> #ifdef CONFIG_64BIT .level 2.0w @@ -41,10 +42,8 @@ ; starting/stopping the coprocessor with the pmenb/pmdis. ; .text - .align 32 - .export perf_intrigue_enable_perf_counters,code -perf_intrigue_enable_perf_counters: +ENTRY(perf_intrigue_enable_perf_counters) .proc .callinfo frame=0,NO_CALLS .entry @@ -69,9 +68,9 @@ perf_intrigue_enable_perf_counters: nop .exit .procend +ENDPROC(perf_intrigue_enable_perf_counters) - .export perf_intrigue_disable_perf_counters,code -perf_intrigue_disable_perf_counters: +ENTRY(perf_intrigue_disable_perf_counters) .proc .callinfo frame=0,NO_CALLS .entry @@ -86,6 +85,7 @@ perf_intrigue_disable_perf_counters: mtctl %r26,ccr ; turn off performance coprocessor .exit .procend +ENDPROC(perf_intrigue_disable_perf_counters) ;*********************************************************************** ;* @@ -117,8 +117,7 @@ perf_intrigue_disable_perf_counters: ;* ;*********************************************************************** - .export perf_rdr_shift_in_W,code -perf_rdr_shift_in_W: +ENTRY(perf_rdr_shift_in_W) .proc .callinfo frame=0,NO_CALLS .entry @@ -550,6 +549,7 @@ perf_rdr_shift_in_W_leave: .exit MTDIAG_2 (24) ; restore DR2 .procend +ENDPROC(perf_rdr_shift_in_W) ;*********************************************************************** @@ -575,8 +575,7 @@ perf_rdr_shift_in_W_leave: ;* ;*********************************************************************** - .export perf_rdr_shift_out_W,code -perf_rdr_shift_out_W: +ENTRY(perf_rdr_shift_out_W) .proc .callinfo frame=0,NO_CALLS .entry @@ -983,6 +982,7 @@ perf_rdr_shift_out_W_leave: .exit MTDIAG_2 (23) ; restore DR2 .procend +ENDPROC(perf_rdr_shift_out_W) ;*********************************************************************** @@ -1012,8 +1012,7 @@ perf_rdr_shift_out_W_leave: ;* ;*********************************************************************** - .export perf_rdr_shift_in_U,code -perf_rdr_shift_in_U: +ENTRY(perf_rdr_shift_in_U) .proc .callinfo frame=0,NO_CALLS .entry @@ -1343,6 +1342,7 @@ perf_rdr_shift_in_U_leave: .exit MTDIAG_2 (24) ; restore DR2 .procend +ENDPROC(perf_rdr_shift_in_U) ;*********************************************************************** ;* @@ -1369,8 +1369,7 @@ perf_rdr_shift_in_U_leave: ;* ;*********************************************************************** - .export perf_rdr_shift_out_U,code -perf_rdr_shift_out_U: +ENTRY(perf_rdr_shift_out_U) .proc .callinfo frame=0,NO_CALLS .entry @@ -1687,4 +1686,5 @@ perf_rdr_shift_out_U_leave: .exit MTDIAG_2 (23) ; restore DR2 .procend +ENDPROC(perf_rdr_shift_out_U) diff --git a/arch/parisc/kernel/real2.S b/arch/parisc/kernel/real2.S index 789061f6ceb4..7a92695d95a6 100644 --- a/arch/parisc/kernel/real2.S +++ b/arch/parisc/kernel/real2.S @@ -11,6 +11,8 @@ #include <asm/psw.h> #include <asm/assembly.h> +#include <linux/linkage.h> + .section .bss .export real_stack .export real32_stack @@ -39,8 +41,6 @@ save_cr_end: .text - .export real32_call_asm - /* unsigned long real32_call_asm(unsigned int *sp, * unsigned int *arg0p, * unsigned int iodc_fn) @@ -49,7 +49,7 @@ save_cr_end: * iodc_fn is the IODC function to call */ -real32_call_asm: +ENTRY(real32_call_asm) STREG %rp, -RP_OFFSET(%sp) /* save RP */ #ifdef CONFIG_64BIT callee_save @@ -107,6 +107,7 @@ ric_ret: LDREG -RP_OFFSET(%sp), %rp /* restore RP */ bv 0(%rp) nop +ENDPROC(real32_call_asm) # define PUSH_CR(r, where) mfctl r, %r1 ! STREG,ma %r1, REG_SZ(where) @@ -218,7 +219,6 @@ rfi_r2v_1: /************************ 64-bit real-mode calls ***********************/ /* This is only usable in wide kernels right now and will probably stay so */ .text - .export real64_call_asm /* unsigned long real64_call_asm(unsigned long *sp, * unsigned long *arg0p, * unsigned long fn) @@ -226,7 +226,7 @@ rfi_r2v_1: * arg0p points to where saved arg values may be found * iodc_fn is the IODC function to call */ -real64_call_asm: +ENTRY(real64_call_asm) std %rp, -0x10(%sp) /* save RP */ std %sp, -8(%arg0) /* save SP on real-mode stack */ copy %arg0, %sp /* adopt the real-mode SP */ @@ -272,19 +272,21 @@ r64_ret: ldd -0x10(%sp), %rp /* restore RP */ bv 0(%rp) nop +ENDPROC(real64_call_asm) #endif - .export __canonicalize_funcptr_for_compare .text /* http://lists.parisc-linux.org/hypermail/parisc-linux/10916.html ** GCC 3.3 and later has a new function in libgcc.a for ** comparing function pointers. */ -__canonicalize_funcptr_for_compare: +ENTRY(__canonicalize_funcptr_for_compare) #ifdef CONFIG_64BIT bve (%r2) #else bv %r0(%r2) #endif copy %r26,%r28 +ENDPROC(__canonicalize_funcptr_for_compare) + diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S index a05800429304..de1812de5183 100644 --- a/arch/parisc/kernel/syscall.S +++ b/arch/parisc/kernel/syscall.S @@ -12,10 +12,11 @@ #include <asm/errno.h> #include <asm/psw.h> #include <asm/thread_info.h> - #include <asm/assembly.h> #include <asm/processor.h> +#include <linux/linkage.h> + /* We fill the empty parts of the gateway page with * something that will kill the kernel or a * userspace application. @@ -28,11 +29,18 @@ .level 1.1 #endif +/* on 64bit pad to 64bit values */ +#ifdef CONFIG_64BIT +#define ULONG_WORD(x) .word 0, x +#else +#define ULONG_WORD(x) .word x +#endif + + .text .import syscall_exit,code .import syscall_exit_rfi,code - .export linux_gateway_page /* Linux gateway page is aliased to virtual page 0 in the kernel * address space. Since it is a gateway page it cannot be @@ -43,7 +51,7 @@ */ .align ASM_PAGE_SIZE -linux_gateway_page: +ENTRY(linux_gateway_page) /* ADDRESS 0x00 to 0xb0 = 176 bytes / 4 bytes per insn = 44 insns */ .rept 44 @@ -595,73 +603,49 @@ cas_action: the other for the store. Either return -EFAULT. Each of the entries must be relocated. */ .section __ex_table,"aw" -#ifdef CONFIG_64BIT - /* Pad the address calculation */ - .word 0,(2b - linux_gateway_page) - .word 0,(3b - linux_gateway_page) -#else - .word (2b - linux_gateway_page) - .word (3b - linux_gateway_page) -#endif + ULONG_WORD(2b - linux_gateway_page) + ULONG_WORD(3b - linux_gateway_page) .previous .section __ex_table,"aw" -#ifdef CONFIG_64BIT - /* Pad the address calculation */ - .word 0,(1b - linux_gateway_page) - .word 0,(3b - linux_gateway_page) -#else - .word (1b - linux_gateway_page) - .word (3b - linux_gateway_page) -#endif + ULONG_WORD(1b - linux_gateway_page) + ULONG_WORD(3b - linux_gateway_page) .previous end_compare_and_swap: /* Make sure nothing else is placed on this page */ .align ASM_PAGE_SIZE - .export end_linux_gateway_page -end_linux_gateway_page: +END(linux_gateway_page) +ENTRY(end_linux_gateway_page) /* Relocate symbols assuming linux_gateway_page is mapped to virtual address 0x0 */ -#ifdef CONFIG_64BIT - /* FIXME: The code will always be on the gateay page - and thus it will be on the first 4k, the - assembler seems to think that the final - subtraction result is only a word in - length, so we pad the value. - */ -#define LWS_ENTRY(_name_) .word 0,(lws_##_name_ - linux_gateway_page) -#else -#define LWS_ENTRY(_name_) .word (lws_##_name_ - linux_gateway_page) -#endif + +#define LWS_ENTRY(_name_) ULONG_WORD(lws_##_name_ - linux_gateway_page) .section .rodata,"a" .align ASM_PAGE_SIZE /* Light-weight-syscall table */ /* Start of lws table. */ - .export lws_table -.Llws_table: -lws_table: +ENTRY(lws_table) LWS_ENTRY(compare_and_swap32) /* 0 - ELF32 Atomic compare and swap */ LWS_ENTRY(compare_and_swap64) /* 1 - ELF64 Atomic compare and swap */ +END(lws_table) /* End of lws table */ .align ASM_PAGE_SIZE - .export sys_call_table -.Lsys_call_table: -sys_call_table: +ENTRY(sys_call_table) #include "syscall_table.S" +END(sys_call_table) #ifdef CONFIG_64BIT .align ASM_PAGE_SIZE - .export sys_call_table64 -.Lsys_call_table64: -sys_call_table64: +ENTRY(sys_call_table64) #define SYSCALL_TABLE_64BIT #include "syscall_table.S" +END(sys_call_table64) #endif #ifdef CONFIG_SMP @@ -671,9 +655,7 @@ sys_call_table64: */ .section .data .align 4096 - .export lws_lock_start -.Llws_lock_start: -lws_lock_start: +ENTRY(lws_lock_start) /* lws locks */ .align 16 .rept 16 @@ -683,6 +665,7 @@ lws_lock_start: .word 0 .word 0 .endr +END(lws_lock_start) .previous #endif /* CONFIG_SMP for lws_lock_start */ |