diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-12-12 12:22:13 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-12-12 12:22:13 -0800 |
commit | 9977d9b379cb77e0f67bd6f4563618106e58e11d (patch) | |
tree | 0191accfddf578edb52c69c933d64521e3dce297 /arch/ia64/kernel/process.c | |
parent | cf4af01221579a4e895f43dbfc47598fbfc5a731 (diff) | |
parent | 541880d9a2c7871f6370071d55aa6662d329c51e (diff) | |
download | talos-obmc-linux-9977d9b379cb77e0f67bd6f4563618106e58e11d.tar.gz talos-obmc-linux-9977d9b379cb77e0f67bd6f4563618106e58e11d.zip |
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/signal
Pull big execve/kernel_thread/fork unification series from Al Viro:
"All architectures are converted to new model. Quite a bit of that
stuff is actually shared with architecture trees; in such cases it's
literally shared branch pulled by both, not a cherry-pick.
A lot of ugliness and black magic is gone (-3KLoC total in this one):
- kernel_thread()/kernel_execve()/sys_execve() redesign.
We don't do syscalls from kernel anymore for either kernel_thread()
or kernel_execve():
kernel_thread() is essentially clone(2) with callback run before we
return to userland, the callbacks either never return or do
successful do_execve() before returning.
kernel_execve() is a wrapper for do_execve() - it doesn't need to
do transition to user mode anymore.
As a result kernel_thread() and kernel_execve() are
arch-independent now - they live in kernel/fork.c and fs/exec.c
resp. sys_execve() is also in fs/exec.c and it's completely
architecture-independent.
- daemonize() is gone, along with its parts in fs/*.c
- struct pt_regs * is no longer passed to do_fork/copy_process/
copy_thread/do_execve/search_binary_handler/->load_binary/do_coredump.
- sys_fork()/sys_vfork()/sys_clone() unified; some architectures
still need wrappers (ones with callee-saved registers not saved in
pt_regs on syscall entry), but the main part of those suckers is in
kernel/fork.c now."
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/signal: (113 commits)
do_coredump(): get rid of pt_regs argument
print_fatal_signal(): get rid of pt_regs argument
ptrace_signal(): get rid of unused arguments
get rid of ptrace_signal_deliver() arguments
new helper: signal_pt_regs()
unify default ptrace_signal_deliver
flagday: kill pt_regs argument of do_fork()
death to idle_regs()
don't pass regs to copy_process()
flagday: don't pass regs to copy_thread()
bfin: switch to generic vfork, get rid of pointless wrappers
xtensa: switch to generic clone()
openrisc: switch to use of generic fork and clone
unicore32: switch to generic clone(2)
score: switch to generic fork/vfork/clone
c6x: sanitize copy_thread(), get rid of clone(2) wrapper, switch to generic clone()
take sys_fork/sys_vfork/sys_clone prototypes to linux/syscalls.h
mn10300: switch to generic fork/vfork/clone
h8300: switch to generic fork/vfork/clone
tile: switch to generic clone()
...
Conflicts:
arch/microblaze/include/asm/Kbuild
Diffstat (limited to 'arch/ia64/kernel/process.c')
-rw-r--r-- | arch/ia64/kernel/process.c | 161 |
1 files changed, 60 insertions, 101 deletions
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c index 35e106f2ed13..31360cbbd5f8 100644 --- a/arch/ia64/kernel/process.c +++ b/arch/ia64/kernel/process.c @@ -393,72 +393,24 @@ ia64_load_extra (struct task_struct *task) int copy_thread(unsigned long clone_flags, unsigned long user_stack_base, unsigned long user_stack_size, - struct task_struct *p, struct pt_regs *regs) + struct task_struct *p) { extern char ia64_ret_from_clone; struct switch_stack *child_stack, *stack; unsigned long rbs, child_rbs, rbs_size; struct pt_regs *child_ptregs; + struct pt_regs *regs = current_pt_regs(); int retval = 0; -#ifdef CONFIG_SMP - /* - * For SMP idle threads, fork_by_hand() calls do_fork with - * NULL regs. - */ - if (!regs) - return 0; -#endif - - stack = ((struct switch_stack *) regs) - 1; - child_ptregs = (struct pt_regs *) ((unsigned long) p + IA64_STK_OFFSET) - 1; child_stack = (struct switch_stack *) child_ptregs - 1; - /* copy parent's switch_stack & pt_regs to child: */ - memcpy(child_stack, stack, sizeof(*child_ptregs) + sizeof(*child_stack)); - rbs = (unsigned long) current + IA64_RBS_OFFSET; child_rbs = (unsigned long) p + IA64_RBS_OFFSET; - rbs_size = stack->ar_bspstore - rbs; - - /* copy the parent's register backing store to the child: */ - memcpy((void *) child_rbs, (void *) rbs, rbs_size); - - if (likely(user_mode(child_ptregs))) { - if (clone_flags & CLONE_SETTLS) - child_ptregs->r13 = regs->r16; /* see sys_clone2() in entry.S */ - if (user_stack_base) { - child_ptregs->r12 = user_stack_base + user_stack_size - 16; - child_ptregs->ar_bspstore = user_stack_base; - child_ptregs->ar_rnat = 0; - child_ptregs->loadrs = 0; - } - } else { - /* - * Note: we simply preserve the relative position of - * the stack pointer here. There is no need to - * allocate a scratch area here, since that will have - * been taken care of by the caller of sys_clone() - * already. - */ - child_ptregs->r12 = (unsigned long) child_ptregs - 16; /* kernel sp */ - child_ptregs->r13 = (unsigned long) p; /* set `current' pointer */ - } - child_stack->ar_bspstore = child_rbs + rbs_size; - child_stack->b0 = (unsigned long) &ia64_ret_from_clone; /* copy parts of thread_struct: */ p->thread.ksp = (unsigned long) child_stack - 16; - /* stop some PSR bits from being inherited. - * the psr.up/psr.pp bits must be cleared on fork but inherited on execve() - * therefore we must specify them explicitly here and not include them in - * IA64_PSR_BITS_TO_CLEAR. - */ - child_ptregs->cr_ipsr = ((child_ptregs->cr_ipsr | IA64_PSR_BITS_TO_SET) - & ~(IA64_PSR_BITS_TO_CLEAR | IA64_PSR_PP | IA64_PSR_UP)); - /* * NOTE: The calling convention considers all floating point * registers in the high partition (fph) to be scratch. Since @@ -480,8 +432,66 @@ copy_thread(unsigned long clone_flags, # define THREAD_FLAGS_TO_SET 0 p->thread.flags = ((current->thread.flags & ~THREAD_FLAGS_TO_CLEAR) | THREAD_FLAGS_TO_SET); + ia64_drop_fpu(p); /* don't pick up stale state from a CPU's fph */ + if (unlikely(p->flags & PF_KTHREAD)) { + if (unlikely(!user_stack_base)) { + /* fork_idle() called us */ + return 0; + } + memset(child_stack, 0, sizeof(*child_ptregs) + sizeof(*child_stack)); + child_stack->r4 = user_stack_base; /* payload */ + child_stack->r5 = user_stack_size; /* argument */ + /* + * Preserve PSR bits, except for bits 32-34 and 37-45, + * which we can't read. + */ + child_ptregs->cr_ipsr = ia64_getreg(_IA64_REG_PSR) | IA64_PSR_BN; + /* mark as valid, empty frame */ + child_ptregs->cr_ifs = 1UL << 63; + child_stack->ar_fpsr = child_ptregs->ar_fpsr + = ia64_getreg(_IA64_REG_AR_FPSR); + child_stack->pr = (1 << PRED_KERNEL_STACK); + child_stack->ar_bspstore = child_rbs; + child_stack->b0 = (unsigned long) &ia64_ret_from_clone; + + /* stop some PSR bits from being inherited. + * the psr.up/psr.pp bits must be cleared on fork but inherited on execve() + * therefore we must specify them explicitly here and not include them in + * IA64_PSR_BITS_TO_CLEAR. + */ + child_ptregs->cr_ipsr = ((child_ptregs->cr_ipsr | IA64_PSR_BITS_TO_SET) + & ~(IA64_PSR_BITS_TO_CLEAR | IA64_PSR_PP | IA64_PSR_UP)); + + return 0; + } + stack = ((struct switch_stack *) regs) - 1; + /* copy parent's switch_stack & pt_regs to child: */ + memcpy(child_stack, stack, sizeof(*child_ptregs) + sizeof(*child_stack)); + + /* copy the parent's register backing store to the child: */ + rbs_size = stack->ar_bspstore - rbs; + memcpy((void *) child_rbs, (void *) rbs, rbs_size); + if (clone_flags & CLONE_SETTLS) + child_ptregs->r13 = regs->r16; /* see sys_clone2() in entry.S */ + if (user_stack_base) { + child_ptregs->r12 = user_stack_base + user_stack_size - 16; + child_ptregs->ar_bspstore = user_stack_base; + child_ptregs->ar_rnat = 0; + child_ptregs->loadrs = 0; + } + child_stack->ar_bspstore = child_rbs + rbs_size; + child_stack->b0 = (unsigned long) &ia64_ret_from_clone; + + /* stop some PSR bits from being inherited. + * the psr.up/psr.pp bits must be cleared on fork but inherited on execve() + * therefore we must specify them explicitly here and not include them in + * IA64_PSR_BITS_TO_CLEAR. + */ + child_ptregs->cr_ipsr = ((child_ptregs->cr_ipsr | IA64_PSR_BITS_TO_SET) + & ~(IA64_PSR_BITS_TO_CLEAR | IA64_PSR_PP | IA64_PSR_UP)); + #ifdef CONFIG_PERFMON if (current->thread.pfm_context) pfm_inherit(p, child_ptregs); @@ -608,57 +618,6 @@ dump_fpu (struct pt_regs *pt, elf_fpregset_t dst) return 1; /* f0-f31 are always valid so we always return 1 */ } -long -sys_execve (const char __user *filename, - const char __user *const __user *argv, - const char __user *const __user *envp, - struct pt_regs *regs) -{ - struct filename *fname; - int error; - - fname = getname(filename); - error = PTR_ERR(fname); - if (IS_ERR(fname)) - goto out; - error = do_execve(fname->name, argv, envp, regs); - putname(fname); -out: - return error; -} - -pid_t -kernel_thread (int (*fn)(void *), void *arg, unsigned long flags) -{ - extern void start_kernel_thread (void); - unsigned long *helper_fptr = (unsigned long *) &start_kernel_thread; - struct { - struct switch_stack sw; - struct pt_regs pt; - } regs; - - memset(®s, 0, sizeof(regs)); - regs.pt.cr_iip = helper_fptr[0]; /* set entry point (IP) */ - regs.pt.r1 = helper_fptr[1]; /* set GP */ - regs.pt.r9 = (unsigned long) fn; /* 1st argument */ - regs.pt.r11 = (unsigned long) arg; /* 2nd argument */ - /* Preserve PSR bits, except for bits 32-34 and 37-45, which we can't read. */ - regs.pt.cr_ipsr = ia64_getreg(_IA64_REG_PSR) | IA64_PSR_BN; - regs.pt.cr_ifs = 1UL << 63; /* mark as valid, empty frame */ - regs.sw.ar_fpsr = regs.pt.ar_fpsr = ia64_getreg(_IA64_REG_AR_FPSR); - regs.sw.ar_bspstore = (unsigned long) current + IA64_RBS_OFFSET; - regs.sw.pr = (1 << PRED_KERNEL_STACK); - return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, ®s.pt, 0, NULL, NULL); -} -EXPORT_SYMBOL(kernel_thread); - -/* This gets called from kernel_thread() via ia64_invoke_thread_helper(). */ -int -kernel_thread_helper (int (*fn)(void *), void *arg) -{ - return (*fn)(arg); -} - /* * Flush thread state. This is called when a thread does an execve(). */ |