summaryrefslogtreecommitdiffstats
path: root/init
diff options
context:
space:
mode:
Diffstat (limited to 'init')
-rw-r--r--init/Kconfig242
-rw-r--r--init/Makefile8
-rw-r--r--init/calibrate.c24
-rw-r--r--init/do_mounts.c1
-rw-r--r--init/do_mounts_initrd.c4
-rw-r--r--init/do_mounts_rd.c1
-rw-r--r--init/initramfs.c25
-rw-r--r--init/main.c134
-rw-r--r--init/version.c4
9 files changed, 261 insertions, 182 deletions
diff --git a/init/Kconfig b/init/Kconfig
index c7bac39d6c61..cb64c5889e02 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -76,6 +76,14 @@ config INIT_ENV_ARG_LIMIT
variables passed to init from the kernel command line.
+config CROSS_COMPILE
+ string "Cross-compiler tool prefix"
+ help
+ Same as running 'make CROSS_COMPILE=prefix-' but stored for
+ default make runs in this kernel build directory. You don't
+ need to set this unless you want the configured kernel build
+ directory to select the cross-compiler automatically.
+
config LOCALVERSION
string "Local version - append to kernel release"
help
@@ -115,10 +123,13 @@ config HAVE_KERNEL_BZIP2
config HAVE_KERNEL_LZMA
bool
+config HAVE_KERNEL_LZO
+ bool
+
choice
prompt "Kernel compression mode"
default KERNEL_GZIP
- depends on HAVE_KERNEL_GZIP || HAVE_KERNEL_BZIP2 || HAVE_KERNEL_LZMA
+ depends on HAVE_KERNEL_GZIP || HAVE_KERNEL_BZIP2 || HAVE_KERNEL_LZMA || HAVE_KERNEL_LZO
help
The linux kernel is a kind of self-extracting executable.
Several compression algorithms are available, which differ
@@ -141,9 +152,8 @@ config KERNEL_GZIP
bool "Gzip"
depends on HAVE_KERNEL_GZIP
help
- The old and tried gzip compression. Its compression ratio is
- the poorest among the 3 choices; however its speed (both
- compression and decompression) is the fastest.
+ The old and tried gzip compression. It provides a good balance
+ between compression ratio and decompression speed.
config KERNEL_BZIP2
bool "Bzip2"
@@ -164,6 +174,14 @@ config KERNEL_LZMA
two. Compression is slowest. The kernel size is about 33%
smaller with LZMA in comparison to gzip.
+config KERNEL_LZO
+ bool "LZO"
+ depends on HAVE_KERNEL_LZO
+ help
+ Its compression ratio is the poorest among the 4. The kernel
+ size is about about 10% bigger than gzip; however its speed
+ (both compression and decompression) is the fastest.
+
endchoice
config SWAP
@@ -297,7 +315,7 @@ config AUDIT
config AUDITSYSCALL
bool "Enable system-call auditing support"
- depends on AUDIT && (X86 || PPC || PPC64 || S390 || IA64 || UML || SPARC64|| SUPERH)
+ depends on AUDIT && (X86 || PPC || S390 || IA64 || UML || SPARC64 || SUPERH)
default y if SECURITY_SELINUX
help
Enable low-overhead system-call auditing infrastructure that
@@ -334,6 +352,15 @@ config TREE_PREEMPT_RCU
is also required. It also scales down nicely to
smaller systems.
+config TINY_RCU
+ bool "UP-only small-memory-footprint RCU"
+ depends on !SMP
+ help
+ This option selects the RCU implementation that is
+ designed for UP systems from which real-time response
+ is not required. This option greatly reduces the
+ memory footprint of RCU.
+
endchoice
config RCU_TRACE
@@ -377,6 +404,22 @@ config RCU_FANOUT_EXACT
Say N if unsure.
+config RCU_FAST_NO_HZ
+ bool "Accelerate last non-dyntick-idle CPU's grace periods"
+ depends on TREE_RCU && NO_HZ && SMP
+ default n
+ help
+ This option causes RCU to attempt to accelerate grace periods
+ in order to allow the final CPU to enter dynticks-idle state
+ more quickly. On the other hand, this option increases the
+ overhead of the dynticks-idle checking, particularly on systems
+ with large numbers of CPUs.
+
+ Say Y if energy efficiency is critically important, particularly
+ if you have relatively few CPUs.
+
+ Say N if you are unsure.
+
config TREE_RCU_TRACE
def_bool RCU_TRACE && ( TREE_RCU || TREE_PREEMPT_RCU )
select DEBUG_FS
@@ -426,59 +469,9 @@ config LOG_BUF_SHIFT
config HAVE_UNSTABLE_SCHED_CLOCK
bool
-config GROUP_SCHED
- bool "Group CPU scheduler"
- depends on EXPERIMENTAL
- default n
- help
- This feature lets CPU scheduler recognize task groups and control CPU
- bandwidth allocation to such task groups.
- In order to create a group from arbitrary set of processes, use
- CONFIG_CGROUPS. (See Control Group support.)
-
-config FAIR_GROUP_SCHED
- bool "Group scheduling for SCHED_OTHER"
- depends on GROUP_SCHED
- default GROUP_SCHED
-
-config RT_GROUP_SCHED
- bool "Group scheduling for SCHED_RR/FIFO"
- depends on EXPERIMENTAL
- depends on GROUP_SCHED
- default n
- help
- This feature lets you explicitly allocate real CPU bandwidth
- to users or control groups (depending on the "Basis for grouping tasks"
- setting below. If enabled, it will also make it impossible to
- schedule realtime tasks for non-root users until you allocate
- realtime bandwidth for them.
- See Documentation/scheduler/sched-rt-group.txt for more information.
-
-choice
- depends on GROUP_SCHED
- prompt "Basis for grouping tasks"
- default USER_SCHED
-
-config USER_SCHED
- bool "user id"
- help
- This option will choose userid as the basis for grouping
- tasks, thus providing equal CPU bandwidth to each user.
-
-config CGROUP_SCHED
- bool "Control groups"
- depends on CGROUPS
- help
- This option allows you to create arbitrary task groups
- using the "cgroup" pseudo filesystem and control
- the cpu bandwidth allocated to each such task group.
- Refer to Documentation/cgroups/cgroups.txt for more
- information on "cgroup" pseudo filesystem.
-
-endchoice
-
menuconfig CGROUPS
boolean "Control Group support"
+ depends on EVENTFD
help
This option adds support for grouping sets of processes together, for
use with process control subsystems such as Cpusets, CFS, memory
@@ -597,6 +590,62 @@ config CGROUP_MEM_RES_CTLR_SWAP
Now, memory usage of swap_cgroup is 2 bytes per entry. If swap page
size is 4096bytes, 512k per 1Gbytes of swap.
+menuconfig CGROUP_SCHED
+ bool "Group CPU scheduler"
+ depends on EXPERIMENTAL && CGROUPS
+ default n
+ help
+ This feature lets CPU scheduler recognize task groups and control CPU
+ bandwidth allocation to such task groups. It uses cgroups to group
+ tasks.
+
+if CGROUP_SCHED
+config FAIR_GROUP_SCHED
+ bool "Group scheduling for SCHED_OTHER"
+ depends on CGROUP_SCHED
+ default CGROUP_SCHED
+
+config RT_GROUP_SCHED
+ bool "Group scheduling for SCHED_RR/FIFO"
+ depends on EXPERIMENTAL
+ depends on CGROUP_SCHED
+ default n
+ help
+ This feature lets you explicitly allocate real CPU bandwidth
+ to task groups. If enabled, it will also make it impossible to
+ schedule realtime tasks for non-root users until you allocate
+ realtime bandwidth for them.
+ See Documentation/scheduler/sched-rt-group.txt for more information.
+
+endif #CGROUP_SCHED
+
+config BLK_CGROUP
+ tristate "Block IO controller"
+ depends on CGROUPS && BLOCK
+ default n
+ ---help---
+ Generic block IO controller cgroup interface. This is the common
+ cgroup interface which should be used by various IO controlling
+ policies.
+
+ Currently, CFQ IO scheduler uses it to recognize task groups and
+ control disk bandwidth allocation (proportional time slice allocation)
+ to such task groups.
+
+ This option only enables generic Block IO controller infrastructure.
+ One needs to also enable actual IO controlling logic in CFQ for it
+ to take effect. (CONFIG_CFQ_GROUP_IOSCHED=y).
+
+ See Documentation/cgroups/blkio-controller.txt for more information.
+
+config DEBUG_BLK_CGROUP
+ bool "Enable Block IO controller debugging"
+ depends on BLK_CGROUP
+ default n
+ ---help---
+ Enable some debugging help. Currently it exports additional stat
+ files in a cgroup which can be useful for debugging.
+
endif # CGROUPS
config MM_OWNER
@@ -606,7 +655,7 @@ config SYSFS_DEPRECATED
bool
config SYSFS_DEPRECATED_V2
- bool "remove sysfs features which may confuse old userspace tools"
+ bool "enable deprecated sysfs features to support old userspace tools"
depends on SYSFS
default n
select SYSFS_DEPRECATED
@@ -754,6 +803,7 @@ config UID16
config SYSCTL_SYSCALL
bool "Sysctl syscall support" if EMBEDDED
+ depends on PROC_SYSCTL
default y
select SYSCTL
---help---
@@ -921,6 +971,11 @@ config HAVE_PERF_EVENTS
help
See tools/perf/design.txt for details.
+config PERF_USE_VMALLOC
+ bool
+ help
+ See tools/perf/design.txt for details
+
menu "Kernel Performance Events And Counters"
config PERF_EVENTS
@@ -932,7 +987,7 @@ config PERF_EVENTS
Enable kernel support for various performance events provided
by software and hardware.
- Software events are supported either build-in or via the
+ Software events are supported either built-in or via the
use of generic tracepoints.
Most modern CPUs support performance events via performance
@@ -944,26 +999,13 @@ config PERF_EVENTS
used to profile the code that runs on that CPU.
The Linux Performance Event subsystem provides an abstraction of
- these software and hardware cevent apabilities, available via a
+ these software and hardware event capabilities, available via a
system call and used by the "perf" utility in tools/perf/. It
provides per task and per CPU counters, and it provides event
capabilities on top of those.
Say Y if unsure.
-config EVENT_PROFILE
- bool "Tracepoint profiling sources"
- depends on PERF_EVENTS && EVENT_TRACING
- default y
- help
- Allow the use of tracepoints as software performance events.
-
- When this is enabled, you can create perf events based on
- tracepoints using PERF_TYPE_TRACEPOINT and the tracepoint ID
- found in debugfs://tracing/events/*/*/id. (The -e/--events
- option to the perf tool can parse and interpret symbolic
- tracepoints, in the subsystem:tracepoint_name format.)
-
config PERF_COUNTERS
bool "Kernel performance counters (old config option)"
depends on HAVE_PERF_EVENTS
@@ -976,6 +1018,19 @@ config PERF_COUNTERS
Say N if unsure.
+config DEBUG_PERF_USE_VMALLOC
+ default n
+ bool "Debug: use vmalloc to back perf mmap() buffers"
+ depends on PERF_EVENTS && DEBUG_KERNEL
+ select PERF_USE_VMALLOC
+ help
+ Use vmalloc memory to back perf mmap() buffers.
+
+ Mostly useful for debugging the vmalloc code on platforms
+ that don't require it.
+
+ Say N if unsure.
+
endmenu
config VM_EVENT_COUNTERS
@@ -1051,8 +1106,30 @@ config SLOB
endchoice
+config MMAP_ALLOW_UNINITIALIZED
+ bool "Allow mmapped anonymous memory to be uninitialized"
+ depends on EMBEDDED && !MMU
+ default n
+ help
+ Normally, and according to the Linux spec, anonymous memory obtained
+ from mmap() has it's contents cleared before it is passed to
+ userspace. Enabling this config option allows you to request that
+ mmap() skip that if it is given an MAP_UNINITIALIZED flag, thus
+ providing a huge performance boost. If this option is not enabled,
+ then the flag will be ignored.
+
+ This is taken advantage of by uClibc's malloc(), and also by
+ ELF-FDPIC binfmt's brk and stack allocator.
+
+ Because of the obvious security issues, this option should only be
+ enabled on embedded devices where you control what is run in
+ userspace. Since that isn't generally a problem on no-MMU systems,
+ it is normally safe to say Y here.
+
+ See Documentation/nommu-mmap.txt for more information.
+
config PROFILING
- bool "Profiling support (EXPERIMENTAL)"
+ bool "Profiling support"
help
Say Y here to enable the extended profiling support mechanisms used
by profilers such as OProfile.
@@ -1066,20 +1143,6 @@ config TRACEPOINTS
source "arch/Kconfig"
-config SLOW_WORK
- default n
- bool
- help
- The slow work thread pool provides a number of dynamically allocated
- threads that can be used by the kernel to perform operations that
- take a relatively long time.
-
- An example of this would be CacheFiles doing a path lookup followed
- by a series of mkdirs and a create call, all of which have to touch
- disk.
-
- See Documentation/slow-work.txt.
-
endmenu # General setup
config HAVE_GENERIC_DMA_COHERENT
@@ -1192,3 +1255,8 @@ source "block/Kconfig"
config PREEMPT_NOTIFIERS
bool
+config PADATA
+ depends on SMP
+ bool
+
+source "kernel/Kconfig.locks"
diff --git a/init/Makefile b/init/Makefile
index 4a243df426f7..0bf677aa0872 100644
--- a/init/Makefile
+++ b/init/Makefile
@@ -15,12 +15,8 @@ mounts-$(CONFIG_BLK_DEV_RAM) += do_mounts_rd.o
mounts-$(CONFIG_BLK_DEV_INITRD) += do_mounts_initrd.o
mounts-$(CONFIG_BLK_DEV_MD) += do_mounts_md.o
-# files to be removed upon make clean
-clean-files := ../include/linux/compile.h
-
# dependencies on generated files need to be listed explicitly
-
-$(obj)/version.o: include/linux/compile.h
+$(obj)/version.o: include/generated/compile.h
# compile.h changes depending on hostname, generation number, etc,
# so we regenerate it always.
@@ -30,7 +26,7 @@ $(obj)/version.o: include/linux/compile.h
chk_compile.h = :
quiet_chk_compile.h = echo ' CHK $@'
silent_chk_compile.h = :
-include/linux/compile.h: FORCE
+include/generated/compile.h: FORCE
@$($(quiet)chk_compile.h)
$(Q)$(CONFIG_SHELL) $(srctree)/scripts/mkcompile_h $@ \
"$(UTS_MACHINE)" "$(CONFIG_SMP)" "$(CONFIG_PREEMPT)" "$(CC) $(KBUILD_CFLAGS)"
diff --git a/init/calibrate.c b/init/calibrate.c
index a379c9061199..6eb48e53d61c 100644
--- a/init/calibrate.c
+++ b/init/calibrate.c
@@ -123,23 +123,26 @@ void __cpuinit calibrate_delay(void)
{
unsigned long ticks, loopbit;
int lps_precision = LPS_PREC;
+ static bool printed;
if (preset_lpj) {
loops_per_jiffy = preset_lpj;
- printk(KERN_INFO
- "Calibrating delay loop (skipped) preset value.. ");
- } else if ((smp_processor_id() == 0) && lpj_fine) {
+ if (!printed)
+ pr_info("Calibrating delay loop (skipped) "
+ "preset value.. ");
+ } else if ((!printed) && lpj_fine) {
loops_per_jiffy = lpj_fine;
- printk(KERN_INFO
- "Calibrating delay loop (skipped), "
+ pr_info("Calibrating delay loop (skipped), "
"value calculated using timer frequency.. ");
} else if ((loops_per_jiffy = calibrate_delay_direct()) != 0) {
- printk(KERN_INFO
- "Calibrating delay using timer specific routine.. ");
+ if (!printed)
+ pr_info("Calibrating delay using timer "
+ "specific routine.. ");
} else {
loops_per_jiffy = (1<<12);
- printk(KERN_INFO "Calibrating delay loop... ");
+ if (!printed)
+ pr_info("Calibrating delay loop... ");
while ((loops_per_jiffy <<= 1) != 0) {
/* wait for "start of" clock tick */
ticks = jiffies;
@@ -170,7 +173,10 @@ void __cpuinit calibrate_delay(void)
loops_per_jiffy &= ~loopbit;
}
}
- printk(KERN_CONT "%lu.%02lu BogoMIPS (lpj=%lu)\n",
+ if (!printed)
+ pr_cont("%lu.%02lu BogoMIPS (lpj=%lu)\n",
loops_per_jiffy/(500000/HZ),
(loops_per_jiffy/(5000/HZ)) % 100, loops_per_jiffy);
+
+ printed = true;
}
diff --git a/init/do_mounts.c b/init/do_mounts.c
index bb008d064c1a..02e3ca4fc527 100644
--- a/init/do_mounts.c
+++ b/init/do_mounts.c
@@ -15,6 +15,7 @@
#include <linux/initrd.h>
#include <linux/async.h>
#include <linux/fs_struct.h>
+#include <linux/slab.h>
#include <linux/nfs_fs.h>
#include <linux/nfs_fs_sb.h>
diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
index 614241b5200c..2b108538d0d9 100644
--- a/init/do_mounts_initrd.c
+++ b/init/do_mounts_initrd.c
@@ -30,11 +30,7 @@ static int __init do_linuxrc(void * shell)
extern char * envp_init[];
sys_close(old_fd);sys_close(root_fd);
- sys_close(0);sys_close(1);sys_close(2);
sys_setsid();
- (void) sys_open("/dev/console",O_RDWR,0);
- (void) sys_dup(0);
- (void) sys_dup(0);
return kernel_execve(shell, argv, envp_init);
}
diff --git a/init/do_mounts_rd.c b/init/do_mounts_rd.c
index 027a402708de..bf3ef667bf36 100644
--- a/init/do_mounts_rd.c
+++ b/init/do_mounts_rd.c
@@ -7,6 +7,7 @@
#include <linux/cramfs_fs.h>
#include <linux/initrd.h>
#include <linux/string.h>
+#include <linux/slab.h>
#include "do_mounts.h"
#include "../fs/squashfs/squashfs_fs.h"
diff --git a/init/initramfs.c b/init/initramfs.c
index 4c00edc59689..4b9c20205092 100644
--- a/init/initramfs.c
+++ b/init/initramfs.c
@@ -413,7 +413,7 @@ static unsigned my_inptr; /* index of next byte to be processed in inbuf */
static char * __init unpack_to_rootfs(char *buf, unsigned len)
{
- int written;
+ int written, res;
decompress_fn decompress;
const char *compress_name;
static __initdata char msg_buf[64];
@@ -445,17 +445,20 @@ static char * __init unpack_to_rootfs(char *buf, unsigned len)
}
this_header = 0;
decompress = decompress_method(buf, len, &compress_name);
- if (decompress)
- decompress(buf, len, NULL, flush_buffer, NULL,
+ if (decompress) {
+ res = decompress(buf, len, NULL, flush_buffer, NULL,
&my_inptr, error);
- else if (compress_name) {
+ if (res)
+ error("decompressor failed");
+ } else if (compress_name) {
if (!message) {
snprintf(msg_buf, sizeof msg_buf,
"compression method %s not configured",
compress_name);
message = msg_buf;
}
- }
+ } else
+ error("junk in compressed archive");
if (state != Reset)
error("junk in compressed archive");
this_header = saved_offset + my_inptr;
@@ -523,7 +526,7 @@ static void __init clean_rootfs(void)
int fd;
void *buf;
struct linux_dirent64 *dirp;
- int count;
+ int num;
fd = sys_open("/", O_RDONLY, 0);
WARN_ON(fd < 0);
@@ -537,9 +540,9 @@ static void __init clean_rootfs(void)
}
dirp = buf;
- count = sys_getdents64(fd, dirp, BUF_SIZE);
- while (count > 0) {
- while (count > 0) {
+ num = sys_getdents64(fd, dirp, BUF_SIZE);
+ while (num > 0) {
+ while (num > 0) {
struct stat st;
int ret;
@@ -552,12 +555,12 @@ static void __init clean_rootfs(void)
sys_unlink(dirp->d_name);
}
- count -= dirp->d_reclen;
+ num -= dirp->d_reclen;
dirp = (void *)dirp + dirp->d_reclen;
}
dirp = buf;
memset(buf, 0, BUF_SIZE);
- count = sys_getdents64(fd, dirp, BUF_SIZE);
+ num = sys_getdents64(fd, dirp, BUF_SIZE);
}
sys_close(fd);
diff --git a/init/main.c b/init/main.c
index 7449819a4805..b8b6effe9ff4 100644
--- a/init/main.c
+++ b/init/main.c
@@ -25,7 +25,6 @@
#include <linux/bootmem.h>
#include <linux/acpi.h>
#include <linux/tty.h>
-#include <linux/gfp.h>
#include <linux/percpu.h>
#include <linux/kmod.h>
#include <linux/vmalloc.h>
@@ -33,7 +32,6 @@
#include <linux/start_kernel.h>
#include <linux/security.h>
#include <linux/smp.h>
-#include <linux/workqueue.h>
#include <linux/profile.h>
#include <linux/rcupdate.h>
#include <linux/moduleparam.h>
@@ -63,13 +61,13 @@
#include <linux/sched.h>
#include <linux/signal.h>
#include <linux/idr.h>
+#include <linux/kgdb.h>
#include <linux/ftrace.h>
#include <linux/async.h>
#include <linux/kmemcheck.h>
-#include <linux/kmemtrace.h>
#include <linux/sfi.h>
#include <linux/shmem_fs.h>
-#include <trace/boot.h>
+#include <linux/slab.h>
#include <asm/io.h>
#include <asm/bugs.h>
@@ -124,7 +122,9 @@ static char *ramdisk_execute_command;
#ifdef CONFIG_SMP
/* Setup configured maximum number of CPUs to activate */
-unsigned int __initdata setup_max_cpus = NR_CPUS;
+unsigned int setup_max_cpus = NR_CPUS;
+EXPORT_SYMBOL(setup_max_cpus);
+
/*
* Setup routine for controlling SMP activation
@@ -149,6 +149,20 @@ static int __init nosmp(char *str)
early_param("nosmp", nosmp);
+/* this is hard limit */
+static int __init nrcpus(char *str)
+{
+ int nr_cpus;
+
+ get_option(&str, &nr_cpus);
+ if (nr_cpus > 0 && nr_cpus < nr_cpu_ids)
+ nr_cpu_ids = nr_cpus;
+
+ return 0;
+}
+
+early_param("nr_cpus", nrcpus);
+
static int __init maxcpus(char *str)
{
get_option(&str, &setup_max_cpus);
@@ -160,7 +174,7 @@ static int __init maxcpus(char *str)
early_param("maxcpus", maxcpus);
#else
-const unsigned int setup_max_cpus = NR_CPUS;
+static const unsigned int setup_max_cpus = NR_CPUS;
#endif
/*
@@ -251,7 +265,7 @@ early_param("loglevel", loglevel);
/*
* Unknown boot options get handed to init, unless they look like
- * failed parameters
+ * unused parameters (modprobe will find them in /proc/cmdline).
*/
static int __init unknown_bootoption(char *param, char *val)
{
@@ -272,14 +286,9 @@ static int __init unknown_bootoption(char *param, char *val)
if (obsolete_checksetup(param))
return 0;
- /*
- * Preemptive maintenance for "why didn't my misspelled command
- * line work?"
- */
- if (strchr(param, '.') && (!val || strchr(param, '.') < val)) {
- printk(KERN_ERR "Unknown boot option `%s': ignoring\n", param);
+ /* Unused module parameter. */
+ if (strchr(param, '.') && (!val || strchr(param, '.') < val))
return 0;
- }
if (panic_later)
return 0;
@@ -374,12 +383,6 @@ static void __init smp_init(void)
{
unsigned int cpu;
- /*
- * Set up the current CPU as possible to migrate to.
- * The other ones will be done by cpu_up/cpu_down()
- */
- set_cpu_active(smp_processor_id(), true);
-
/* FIXME: This should be done in userspace --RR */
for_each_present_cpu(cpu) {
if (num_online_cpus() >= setup_max_cpus)
@@ -418,17 +421,26 @@ static void __init setup_command_line(char *command_line)
* gcc-3.4 accidentally inlines this function, so use noinline.
*/
+static __initdata DECLARE_COMPLETION(kthreadd_done);
+
static noinline void __init_refok rest_init(void)
__releases(kernel_lock)
{
int pid;
rcu_scheduler_starting();
+ /*
+ * We need to spawn init first so that it obtains pid 1, however
+ * the init task will end up wanting to create kthreads, which, if
+ * we schedule it before we create kthreadd, will OOPS.
+ */
kernel_thread(kernel_init, NULL, CLONE_FS | CLONE_SIGHAND);
numa_default_policy();
pid = kernel_thread(kthreadd, NULL, CLONE_FS | CLONE_FILES);
+ rcu_read_lock();
kthreadd_task = find_task_by_pid_ns(pid, &init_pid_ns);
- unlock_kernel();
+ rcu_read_unlock();
+ complete(&kthreadd_done);
/*
* The boot idle thread must execute schedule()
@@ -491,6 +503,7 @@ static void __init boot_cpu_init(void)
int cpu = smp_processor_id();
/* Mark the boot cpu "present", "online" etc for SMP and UP case */
set_cpu_online(cpu, true);
+ set_cpu_active(cpu, true);
set_cpu_present(cpu, true);
set_cpu_possible(cpu, true);
}
@@ -515,6 +528,7 @@ static void __init mm_init(void)
page_cgroup_init_flatmem();
mem_init();
kmem_cache_init();
+ percpu_init_late();
pgtable_cache_init();
vmalloc_init();
}
@@ -548,7 +562,6 @@ asmlinkage void __init start_kernel(void)
* Interrupts are still disabled. Do necessary setups, then
* enable them
*/
- lock_kernel();
tick_init();
boot_cpu_init();
page_address_init();
@@ -560,7 +573,7 @@ asmlinkage void __init start_kernel(void)
setup_per_cpu_areas();
smp_prepare_boot_cpu(); /* arch-specific boot-cpu hooks */
- build_all_zonelists();
+ build_all_zonelists(NULL);
page_alloc_init();
printk(KERN_NOTICE "Kernel command line: %s\n", boot_command_line);
@@ -594,6 +607,7 @@ asmlinkage void __init start_kernel(void)
local_irq_disable();
}
rcu_init();
+ radix_tree_init();
/* init some links before init_ISA_irqs() */
early_irq_init();
init_IRQ();
@@ -611,7 +625,7 @@ asmlinkage void __init start_kernel(void)
local_irq_enable();
/* Interrupts are enabled now so all GFP allocations are safe. */
- set_gfp_allowed_mask(__GFP_BITS_MASK);
+ gfp_allowed_mask = __GFP_BITS_MASK;
kmem_cache_init_late();
@@ -645,7 +659,6 @@ asmlinkage void __init start_kernel(void)
#endif
page_cgroup_init();
enable_debug_pagealloc();
- kmemtrace_init();
kmemleak_init();
debug_objects_mem_init();
idr_init_cache();
@@ -668,8 +681,8 @@ asmlinkage void __init start_kernel(void)
buffer_init();
key_init();
security_init();
+ dbg_late_init();
vfs_caches_init(totalram_pages);
- radix_tree_init();
signals_init();
/* rootfs populating might need page-writeback */
page_writeback_init();
@@ -696,10 +709,10 @@ asmlinkage void __init start_kernel(void)
static void __init do_ctors(void)
{
#ifdef CONFIG_CONSTRUCTORS
- ctor_fn_t *call = (ctor_fn_t *) __ctors_start;
+ ctor_fn_t *fn = (ctor_fn_t *) __ctors_start;
- for (; call < (ctor_fn_t *) __ctors_end; call++)
- (*call)();
+ for (; fn < (ctor_fn_t *) __ctors_end; fn++)
+ (*fn)();
#endif
}
@@ -707,38 +720,33 @@ int initcall_debug;
core_param(initcall_debug, initcall_debug, bool, 0644);
static char msgbuf[64];
-static struct boot_trace_call call;
-static struct boot_trace_ret ret;
int do_one_initcall(initcall_t fn)
{
int count = preempt_count();
ktime_t calltime, delta, rettime;
+ unsigned long long duration;
+ int ret;
if (initcall_debug) {
- call.caller = task_pid_nr(current);
- printk("calling %pF @ %i\n", fn, call.caller);
+ printk("calling %pF @ %i\n", fn, task_pid_nr(current));
calltime = ktime_get();
- trace_boot_call(&call, fn);
- enable_boot_trace();
}
- ret.result = fn();
+ ret = fn();
if (initcall_debug) {
- disable_boot_trace();
rettime = ktime_get();
delta = ktime_sub(rettime, calltime);
- ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10;
- trace_boot_ret(&ret, fn);
- printk("initcall %pF returned %d after %Ld usecs\n", fn,
- ret.result, ret.duration);
+ duration = (unsigned long long) ktime_to_ns(delta) >> 10;
+ printk("initcall %pF returned %d after %lld usecs\n", fn,
+ ret, duration);
}
msgbuf[0] = 0;
- if (ret.result && ret.result != -ENODEV && initcall_debug)
- sprintf(msgbuf, "error code %d ", ret.result);
+ if (ret && ret != -ENODEV && initcall_debug)
+ sprintf(msgbuf, "error code %d ", ret);
if (preempt_count() != count) {
strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
@@ -752,7 +760,7 @@ int do_one_initcall(initcall_t fn)
printk("initcall %pF returned with %s\n", fn, msgbuf);
}
- return ret.result;
+ return ret;
}
@@ -760,10 +768,10 @@ extern initcall_t __initcall_start[], __initcall_end[], __early_initcall_end[];
static void __init do_initcalls(void)
{
- initcall_t *call;
+ initcall_t *fn;
- for (call = __early_initcall_end; call < __initcall_end; call++)
- do_one_initcall(*call);
+ for (fn = __early_initcall_end; fn < __initcall_end; fn++)
+ do_one_initcall(*fn);
/* Make sure there is no pending stuff from the initcall sequence */
flush_scheduled_work();
@@ -778,8 +786,6 @@ static void __init do_initcalls(void)
*/
static void __init do_basic_setup(void)
{
- rcu_init_sched(); /* needed by module_init stage. */
- init_workqueues();
cpuset_init_smp();
usermodehelper_init();
init_tmpfs();
@@ -791,10 +797,10 @@ static void __init do_basic_setup(void)
static void __init do_pre_smp_initcalls(void)
{
- initcall_t *call;
+ initcall_t *fn;
- for (call = __initcall_start; call < __early_initcall_end; call++)
- do_one_initcall(*call);
+ for (fn = __initcall_start; fn < __early_initcall_end; fn++)
+ do_one_initcall(*fn);
}
static void run_init_process(char *init_filename)
@@ -812,16 +818,10 @@ static noinline int init_post(void)
/* need to finish all async __init code before freeing the memory */
async_synchronize_full();
free_initmem();
- unlock_kernel();
mark_rodata_ro();
system_state = SYSTEM_RUNNING;
numa_default_policy();
- if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
- printk(KERN_WARNING "Warning: unable to open an initial console.\n");
-
- (void) sys_dup(0);
- (void) sys_dup(0);
current->signal->flags |= SIGNAL_UNKILLABLE;
@@ -847,17 +847,20 @@ static noinline int init_post(void)
run_init_process("/bin/init");
run_init_process("/bin/sh");
- panic("No init found. Try passing init= option to kernel.");
+ panic("No init found. Try passing init= option to kernel. "
+ "See Linux Documentation/init.txt for guidance.");
}
static int __init kernel_init(void * unused)
{
- lock_kernel();
-
+ /*
+ * Wait until kthreadd is all set-up.
+ */
+ wait_for_completion(&kthreadd_done);
/*
* init can allocate pages on any node
*/
- set_mems_allowed(node_possible_map);
+ set_mems_allowed(node_states[N_HIGH_MEMORY]);
/*
* init can run on any cpu.
*/
@@ -877,13 +880,18 @@ static int __init kernel_init(void * unused)
smp_prepare_cpus(setup_max_cpus);
do_pre_smp_initcalls();
- start_boot_trace();
smp_init();
sched_init_smp();
do_basic_setup();
+ /* Open the /dev/console on the rootfs, this should never fail */
+ if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
+ printk(KERN_WARNING "Warning: unable to open an initial console.\n");
+
+ (void) sys_dup(0);
+ (void) sys_dup(0);
/*
* check if there is an early userspace init. If yes, let it do all
* the work
diff --git a/init/version.c b/init/version.c
index 52a8b98642b8..adff586401a5 100644
--- a/init/version.c
+++ b/init/version.c
@@ -6,11 +6,11 @@
* May be freely distributed as part of Linux.
*/
-#include <linux/compile.h>
+#include <generated/compile.h>
#include <linux/module.h>
#include <linux/uts.h>
#include <linux/utsname.h>
-#include <linux/utsrelease.h>
+#include <generated/utsrelease.h>
#include <linux/version.h>
#ifndef CONFIG_KALLSYMS
OpenPOWER on IntegriCloud