summaryrefslogtreecommitdiffstats
path: root/kernel/fork.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-10-15 13:46:29 +0200
committerIngo Molnar <mingo@elte.hu>2008-10-15 13:46:29 +0200
commitb2aaf8f74cdc84a9182f6cabf198b7763bcb9d40 (patch)
tree53ccb1c2c14751fe69cf93102e76e97021f6df07 /kernel/fork.c
parent4f962d4d65923d7b722192e729840cfb79af0a5a (diff)
parent278429cff8809958d25415ba0ed32b59866ab1a8 (diff)
downloadblackbird-op-linux-b2aaf8f74cdc84a9182f6cabf198b7763bcb9d40.tar.gz
blackbird-op-linux-b2aaf8f74cdc84a9182f6cabf198b7763bcb9d40.zip
Merge branch 'linus' into stackprotector
Conflicts: arch/x86/kernel/Makefile include/asm-x86/pda.h
Diffstat (limited to 'kernel/fork.c')
-rw-r--r--kernel/fork.c143
1 files changed, 77 insertions, 66 deletions
diff --git a/kernel/fork.c b/kernel/fork.c
index d428336e7aa1..99c5c655b098 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -23,18 +23,22 @@
#include <linux/sem.h>
#include <linux/file.h>
#include <linux/fdtable.h>
+#include <linux/iocontext.h>
#include <linux/key.h>
#include <linux/binfmts.h>
#include <linux/mman.h>
+#include <linux/mmu_notifier.h>
#include <linux/fs.h>
#include <linux/nsproxy.h>
#include <linux/capability.h>
#include <linux/cpu.h>
#include <linux/cgroup.h>
#include <linux/security.h>
+#include <linux/hugetlb.h>
#include <linux/swap.h>
#include <linux/syscalls.h>
#include <linux/jiffies.h>
+#include <linux/tracehook.h>
#include <linux/futex.h>
#include <linux/task_io_accounting_ops.h>
#include <linux/rcupdate.h>
@@ -92,6 +96,23 @@ int nr_processes(void)
static struct kmem_cache *task_struct_cachep;
#endif
+#ifndef __HAVE_ARCH_THREAD_INFO_ALLOCATOR
+static inline struct thread_info *alloc_thread_info(struct task_struct *tsk)
+{
+#ifdef CONFIG_DEBUG_STACK_USAGE
+ gfp_t mask = GFP_KERNEL | __GFP_ZERO;
+#else
+ gfp_t mask = GFP_KERNEL;
+#endif
+ return (struct thread_info *)__get_free_pages(mask, THREAD_SIZE_ORDER);
+}
+
+static inline void free_thread_info(struct thread_info *ti)
+{
+ free_pages((unsigned long)ti, THREAD_SIZE_ORDER);
+}
+#endif
+
/* SLAB cache for signal_struct structures (tsk->signal) */
static struct kmem_cache *signal_cachep;
@@ -311,6 +332,14 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
}
/*
+ * Clear hugetlb-related page reserves for children. This only
+ * affects MAP_PRIVATE mappings. Faults generated by the child
+ * are not guaranteed to succeed, even if read-only
+ */
+ if (is_vm_hugetlb_page(tmp))
+ reset_vma_resv_huge_pages(tmp);
+
+ /*
* Link in the new vma and copy the page table entries.
*/
*pprev = tmp;
@@ -378,7 +407,7 @@ static struct mm_struct * mm_init(struct mm_struct * mm, struct task_struct *p)
INIT_LIST_HEAD(&mm->mmlist);
mm->flags = (current->mm) ? current->mm->flags
: MMF_DUMP_FILTER_DEFAULT;
- mm->core_waiters = 0;
+ mm->core_state = NULL;
mm->nr_ptes = 0;
set_mm_counter(mm, file_rss, 0);
set_mm_counter(mm, anon_rss, 0);
@@ -391,6 +420,7 @@ static struct mm_struct * mm_init(struct mm_struct * mm, struct task_struct *p)
if (likely(!mm_alloc_pgd(mm))) {
mm->def_flags = 0;
+ mmu_notifier_mm_init(mm);
return mm;
}
@@ -423,6 +453,7 @@ void __mmdrop(struct mm_struct *mm)
BUG_ON(mm == &init_mm);
mm_free_pgd(mm);
destroy_context(mm);
+ mmu_notifier_mm_destroy(mm);
free_mm(mm);
}
EXPORT_SYMBOL_GPL(__mmdrop);
@@ -452,7 +483,7 @@ EXPORT_SYMBOL_GPL(mmput);
/**
* get_task_mm - acquire a reference to the task's mm
*
- * Returns %NULL if the task has no mm. Checks PF_BORROWED_MM (meaning
+ * Returns %NULL if the task has no mm. Checks PF_KTHREAD (meaning
* this kernel workthread has transiently adopted a user mm with use_mm,
* to do its AIO) is not set and if so returns a reference to it, after
* bumping up the use count. User must release the mm via mmput()
@@ -465,7 +496,7 @@ struct mm_struct *get_task_mm(struct task_struct *task)
task_lock(task);
mm = task->mm;
if (mm) {
- if (task->flags & PF_BORROWED_MM)
+ if (task->flags & PF_KTHREAD)
mm = NULL;
else
atomic_inc(&mm->mm_users);
@@ -634,13 +665,6 @@ static struct fs_struct *__copy_fs_struct(struct fs_struct *old)
path_get(&old->root);
fs->pwd = old->pwd;
path_get(&old->pwd);
- if (old->altroot.dentry) {
- fs->altroot = old->altroot;
- path_get(&old->altroot);
- } else {
- fs->altroot.mnt = NULL;
- fs->altroot.dentry = NULL;
- }
read_unlock(&old->lock);
}
return fs;
@@ -783,6 +807,7 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
sig->leader = 0; /* session leadership doesn't inherit */
sig->tty_old_pgrp = NULL;
+ sig->tty = NULL;
sig->utime = sig->stime = sig->cutime = sig->cstime = cputime_zero;
sig->gtime = cputime_zero;
@@ -790,6 +815,7 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
sig->nvcsw = sig->nivcsw = sig->cnvcsw = sig->cnivcsw = 0;
sig->min_flt = sig->maj_flt = sig->cmin_flt = sig->cmaj_flt = 0;
sig->inblock = sig->oublock = sig->cinblock = sig->coublock = 0;
+ task_io_accounting_init(&sig->ioac);
sig->sum_sched_runtime = 0;
INIT_LIST_HEAD(&sig->cpu_timers[0]);
INIT_LIST_HEAD(&sig->cpu_timers[1]);
@@ -818,6 +844,7 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
void __cleanup_signal(struct signal_struct *sig)
{
exit_thread_group_keys(sig);
+ tty_kref_put(sig->tty);
kmem_cache_free(signal_cachep, sig);
}
@@ -837,8 +864,7 @@ static void copy_flags(unsigned long clone_flags, struct task_struct *p)
new_flags &= ~PF_SUPERPRIV;
new_flags |= PF_FORKNOEXEC;
- if (!(clone_flags & CLONE_PTRACE))
- p->ptrace = 0;
+ new_flags |= PF_STARTING;
p->flags = new_flags;
clear_freeze_flag(p);
}
@@ -879,7 +905,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
struct pt_regs *regs,
unsigned long stack_size,
int __user *child_tidptr,
- struct pid *pid)
+ struct pid *pid,
+ int trace)
{
int retval;
struct task_struct *p;
@@ -914,7 +941,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
rt_mutex_init_task(p);
-#ifdef CONFIG_TRACE_IRQFLAGS
+#ifdef CONFIG_PROVE_LOCKING
DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled);
DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
#endif
@@ -972,13 +999,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
p->last_switch_timestamp = 0;
#endif
-#ifdef CONFIG_TASK_XACCT
- p->rchar = 0; /* I/O counter: bytes read */
- p->wchar = 0; /* I/O counter: bytes written */
- p->syscr = 0; /* I/O counter: read syscalls */
- p->syscw = 0; /* I/O counter: write syscalls */
-#endif
- task_io_accounting_init(p);
+ task_io_accounting_init(&p->ioac);
acct_clear_integrals(p);
p->it_virt_expires = cputime_zero;
@@ -1085,6 +1106,12 @@ static struct task_struct *copy_process(unsigned long clone_flags,
if (clone_flags & CLONE_THREAD)
p->tgid = current->tgid;
+ if (current->nsproxy != p->nsproxy) {
+ retval = ns_cgroup_clone(p, pid);
+ if (retval)
+ goto bad_fork_free_pid;
+ }
+
p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
/*
* Clear TID on mm_release()?
@@ -1129,8 +1156,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
*/
p->group_leader = p;
INIT_LIST_HEAD(&p->thread_group);
- INIT_LIST_HEAD(&p->ptrace_children);
- INIT_LIST_HEAD(&p->ptrace_list);
/* Now that the task is set up, run cgroup callbacks if
* necessary. We need to run them before the task is visible
@@ -1161,7 +1186,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
p->real_parent = current->real_parent;
else
p->real_parent = current;
- p->parent = p->real_parent;
spin_lock(&current->sighand->siglock);
@@ -1202,16 +1226,16 @@ static struct task_struct *copy_process(unsigned long clone_flags,
}
if (likely(p->pid)) {
- add_parent(p);
- if (unlikely(p->ptrace & PT_PTRACED))
- __ptrace_link(p, current->parent);
+ list_add_tail(&p->sibling, &p->real_parent->children);
+ tracehook_finish_clone(p, clone_flags, trace);
if (thread_group_leader(p)) {
if (clone_flags & CLONE_NEWPID)
p->nsproxy->pid_ns->child_reaper = p;
p->signal->leader_pid = pid;
- p->signal->tty = current->signal->tty;
+ tty_kref_put(p->signal->tty);
+ p->signal->tty = tty_kref_get(current->signal->tty);
set_task_pgrp(p, task_pgrp_nr(current));
set_task_session(p, task_session_nr(current));
attach_pid(p, PIDTYPE_PGID, task_pgrp(current));
@@ -1289,29 +1313,13 @@ struct task_struct * __cpuinit fork_idle(int cpu)
struct pt_regs regs;
task = copy_process(CLONE_VM, 0, idle_regs(&regs), 0, NULL,
- &init_struct_pid);
+ &init_struct_pid, 0);
if (!IS_ERR(task))
init_idle(task, cpu);
return task;
}
-static int fork_traceflag(unsigned clone_flags)
-{
- if (clone_flags & CLONE_UNTRACED)
- return 0;
- else if (clone_flags & CLONE_VFORK) {
- if (current->ptrace & PT_TRACE_VFORK)
- return PTRACE_EVENT_VFORK;
- } else if ((clone_flags & CSIGNAL) != SIGCHLD) {
- if (current->ptrace & PT_TRACE_CLONE)
- return PTRACE_EVENT_CLONE;
- } else if (current->ptrace & PT_TRACE_FORK)
- return PTRACE_EVENT_FORK;
-
- return 0;
-}
-
/*
* Ok, this is the main fork-routine.
*
@@ -1346,14 +1354,14 @@ long do_fork(unsigned long clone_flags,
}
}
- if (unlikely(current->ptrace)) {
- trace = fork_traceflag (clone_flags);
- if (trace)
- clone_flags |= CLONE_PTRACE;
- }
+ /*
+ * When called from kernel_thread, don't do user tracing stuff.
+ */
+ if (likely(user_mode(regs)))
+ trace = tracehook_prepare_clone(clone_flags);
p = copy_process(clone_flags, stack_start, regs, stack_size,
- child_tidptr, NULL);
+ child_tidptr, NULL, trace);
/*
* Do this prior waking up the new thread - the thread pointer
* might get invalid after that point, if the thread exits quickly.
@@ -1371,32 +1379,35 @@ long do_fork(unsigned long clone_flags,
init_completion(&vfork);
}
- if ((p->ptrace & PT_PTRACED) || (clone_flags & CLONE_STOPPED)) {
+ tracehook_report_clone(trace, regs, clone_flags, nr, p);
+
+ /*
+ * We set PF_STARTING at creation in case tracing wants to
+ * use this to distinguish a fully live task from one that
+ * hasn't gotten to tracehook_report_clone() yet. Now we
+ * clear it and set the child going.
+ */
+ p->flags &= ~PF_STARTING;
+
+ if (unlikely(clone_flags & CLONE_STOPPED)) {
/*
* We'll start up with an immediate SIGSTOP.
*/
sigaddset(&p->pending.signal, SIGSTOP);
set_tsk_thread_flag(p, TIF_SIGPENDING);
- }
-
- if (!(clone_flags & CLONE_STOPPED))
- wake_up_new_task(p, clone_flags);
- else
__set_task_state(p, TASK_STOPPED);
-
- if (unlikely (trace)) {
- current->ptrace_message = nr;
- ptrace_notify ((trace << 8) | SIGTRAP);
+ } else {
+ wake_up_new_task(p, clone_flags);
}
+ tracehook_report_clone_complete(trace, regs,
+ clone_flags, nr, p);
+
if (clone_flags & CLONE_VFORK) {
freezer_do_not_count();
wait_for_completion(&vfork);
freezer_count();
- if (unlikely (current->ptrace & PT_TRACE_VFORK_DONE)) {
- current->ptrace_message = nr;
- ptrace_notify ((PTRACE_EVENT_VFORK_DONE << 8) | SIGTRAP);
- }
+ tracehook_report_vfork_done(p, nr);
}
} else {
nr = PTR_ERR(p);
@@ -1408,7 +1419,7 @@ long do_fork(unsigned long clone_flags,
#define ARCH_MIN_MMSTRUCT_ALIGN 0
#endif
-static void sighand_ctor(struct kmem_cache *cachep, void *data)
+static void sighand_ctor(void *data)
{
struct sighand_struct *sighand = data;
OpenPOWER on IntegriCloud