diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2008-01-29 22:46:14 +1100 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-01-29 22:46:14 +1100 |
commit | 5ea293a9048d3a58cb0c840fa719d85ad14cba47 (patch) | |
tree | 88d1dd1eece2cfcbd858ff2c00fb0240dcfab3c7 /include/asm-generic | |
parent | 03bc26cfefd6db756e6bc7fcda11dc17ada7be16 (diff) | |
parent | d3883ecebbf9e095b9e379dabbbe8b2c1ee7a41c (diff) | |
download | blackbird-op-linux-5ea293a9048d3a58cb0c840fa719d85ad14cba47.tar.gz blackbird-op-linux-5ea293a9048d3a58cb0c840fa719d85ad14cba47.zip |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/sam/kbuild
* git://git.kernel.org/pub/scm/linux/kernel/git/sam/kbuild: (79 commits)
Remove references to "make dep"
kconfig: document use of HAVE_*
Introduce new section reference annotations tags: __ref, __refdata, __refconst
kbuild: warn about ld added unique sections
kbuild: add verbose option to Section mismatch reporting in modpost
kconfig: tristate choices with mixed tristate and boolean values
asm-generic/vmlix.lds.h: simplify __mem{init,exit}* dependencies
remove __attribute_used__
kbuild: support ARCH=x86 in buildtar
kconfig: remove "enable"
kbuild: simplified warning report in modpost
kbuild: introduce a few helpers in modpost
kbuild: use simpler section mismatch warnings in modpost
kbuild: link vmlinux.o before kallsyms passes
kbuild: introduce new option to enhance section mismatch analysis
Use separate sections for __dev/__cpu/__mem code/data
compiler.h: introduce __section()
all archs: consolidate init and exit sections in vmlinux.lds.h
kbuild: check section names consistently in modpost
kbuild: introduce blacklisting in modpost
...
Diffstat (limited to 'include/asm-generic')
-rw-r--r-- | include/asm-generic/vmlinux.lds.h | 89 |
1 files changed, 87 insertions, 2 deletions
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 9f584cc5c5fb..76df771be585 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -9,10 +9,46 @@ /* Align . to a 8 byte boundary equals to maximum function alignment. */ #define ALIGN_FUNCTION() . = ALIGN(8) +/* The actual configuration determine if the init/exit sections + * are handled as text/data or they can be discarded (which + * often happens at runtime) + */ +#ifdef CONFIG_HOTPLUG +#define DEV_KEEP(sec) *(.dev##sec) +#define DEV_DISCARD(sec) +#else +#define DEV_KEEP(sec) +#define DEV_DISCARD(sec) *(.dev##sec) +#endif + +#ifdef CONFIG_HOTPLUG_CPU +#define CPU_KEEP(sec) *(.cpu##sec) +#define CPU_DISCARD(sec) +#else +#define CPU_KEEP(sec) +#define CPU_DISCARD(sec) *(.cpu##sec) +#endif + +#if defined(CONFIG_MEMORY_HOTPLUG) +#define MEM_KEEP(sec) *(.mem##sec) +#define MEM_DISCARD(sec) +#else +#define MEM_KEEP(sec) +#define MEM_DISCARD(sec) *(.mem##sec) +#endif + + /* .data section */ #define DATA_DATA \ *(.data) \ *(.data.init.refok) \ + *(.ref.data) \ + DEV_KEEP(init.data) \ + DEV_KEEP(exit.data) \ + CPU_KEEP(init.data) \ + CPU_KEEP(exit.data) \ + MEM_KEEP(init.data) \ + MEM_KEEP(exit.data) \ . = ALIGN(8); \ VMLINUX_SYMBOL(__start___markers) = .; \ *(__markers) \ @@ -132,6 +168,17 @@ *(__ksymtab_strings) \ } \ \ + /* __*init sections */ \ + __init_rodata : AT(ADDR(__init_rodata) - LOAD_OFFSET) { \ + *(.ref.rodata) \ + DEV_KEEP(init.rodata) \ + DEV_KEEP(exit.rodata) \ + CPU_KEEP(init.rodata) \ + CPU_KEEP(exit.rodata) \ + MEM_KEEP(init.rodata) \ + MEM_KEEP(exit.rodata) \ + } \ + \ /* Built-in module parameters. */ \ __param : AT(ADDR(__param) - LOAD_OFFSET) { \ VMLINUX_SYMBOL(__start___param) = .; \ @@ -139,7 +186,6 @@ VMLINUX_SYMBOL(__stop___param) = .; \ VMLINUX_SYMBOL(__end_rodata) = .; \ } \ - \ . = ALIGN((align)); /* RODATA provided for backward compatibility. @@ -158,8 +204,16 @@ #define TEXT_TEXT \ ALIGN_FUNCTION(); \ *(.text) \ + *(.ref.text) \ *(.text.init.refok) \ - *(.exit.text.refok) + *(.exit.text.refok) \ + DEV_KEEP(init.text) \ + DEV_KEEP(exit.text) \ + CPU_KEEP(init.text) \ + CPU_KEEP(exit.text) \ + MEM_KEEP(init.text) \ + MEM_KEEP(exit.text) + /* sched.text is aling to function alignment to secure we have same * address even at second ld pass when generating System.map */ @@ -183,6 +237,37 @@ *(.kprobes.text) \ VMLINUX_SYMBOL(__kprobes_text_end) = .; +/* init and exit section handling */ +#define INIT_DATA \ + *(.init.data) \ + DEV_DISCARD(init.data) \ + DEV_DISCARD(init.rodata) \ + CPU_DISCARD(init.data) \ + CPU_DISCARD(init.rodata) \ + MEM_DISCARD(init.data) \ + MEM_DISCARD(init.rodata) + +#define INIT_TEXT \ + *(.init.text) \ + DEV_DISCARD(init.text) \ + CPU_DISCARD(init.text) \ + MEM_DISCARD(init.text) + +#define EXIT_DATA \ + *(.exit.data) \ + DEV_DISCARD(exit.data) \ + DEV_DISCARD(exit.rodata) \ + CPU_DISCARD(exit.data) \ + CPU_DISCARD(exit.rodata) \ + MEM_DISCARD(exit.data) \ + MEM_DISCARD(exit.rodata) + +#define EXIT_TEXT \ + *(.exit.text) \ + DEV_DISCARD(exit.text) \ + CPU_DISCARD(exit.text) \ + MEM_DISCARD(exit.text) + /* DWARF debug sections. Symbols in the DWARF debugging sections are relative to the beginning of the section so we begin them at 0. */ |