diff options
Diffstat (limited to 'arch/parisc/kernel')
-rw-r--r-- | arch/parisc/kernel/Makefile | 4 | ||||
-rw-r--r-- | arch/parisc/kernel/entry.S | 106 | ||||
-rw-r--r-- | arch/parisc/kernel/ftrace.c | 147 | ||||
-rw-r--r-- | arch/parisc/kernel/head.S | 9 | ||||
-rw-r--r-- | arch/parisc/kernel/process.c | 7 | ||||
-rw-r--r-- | arch/parisc/kernel/ptrace.c | 368 | ||||
-rw-r--r-- | arch/parisc/kernel/syscall.S | 3 | ||||
-rw-r--r-- | arch/parisc/kernel/time.c | 63 |
8 files changed, 549 insertions, 158 deletions
diff --git a/arch/parisc/kernel/Makefile b/arch/parisc/kernel/Makefile index ff87b4603e3d..69a11183d48d 100644 --- a/arch/parisc/kernel/Makefile +++ b/arch/parisc/kernel/Makefile @@ -15,11 +15,7 @@ ifdef CONFIG_FUNCTION_TRACER # Do not profile debug and lowlevel utilities CFLAGS_REMOVE_ftrace.o = -pg CFLAGS_REMOVE_cache.o = -pg -CFLAGS_REMOVE_irq.o = -pg -CFLAGS_REMOVE_pacache.o = -pg CFLAGS_REMOVE_perf.o = -pg -CFLAGS_REMOVE_traps.o = -pg -CFLAGS_REMOVE_unaligned.o = -pg CFLAGS_REMOVE_unwind.o = -pg endif diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S index 623496c11756..baa3d9d6e971 100644 --- a/arch/parisc/kernel/entry.S +++ b/arch/parisc/kernel/entry.S @@ -667,7 +667,7 @@ * boundary */ - .text + .section .text.hot .align 2048 ENTRY(fault_vector_20) @@ -1970,43 +1970,107 @@ pt_regs_ok: b intr_restore copy %r25,%r16 - .import schedule,code syscall_do_resched: - BL schedule,%r2 + load32 syscall_check_resched,%r2 /* if resched, we start over again */ + load32 schedule,%r19 + bv %r0(%r19) /* jumps to schedule() */ #ifdef CONFIG_64BIT ldo -16(%r30),%r29 /* Reference param save area */ #else nop #endif - b syscall_check_resched /* if resched, we start over again */ - nop ENDPROC(syscall_exit) #ifdef CONFIG_FUNCTION_TRACER + .import ftrace_function_trampoline,code -ENTRY(_mcount) - copy %r3, %arg2 + .align L1_CACHE_BYTES + .globl mcount + .type mcount, @function +ENTRY(mcount) +_mcount: + .export _mcount,data + .proc + .callinfo caller,frame=0 + .entry + /* + * The 64bit mcount() function pointer needs 4 dwords, of which the + * first two are free. We optimize it here and put 2 instructions for + * calling mcount(), and 2 instructions for ftrace_stub(). That way we + * have all on one L1 cacheline. + */ b ftrace_function_trampoline + copy %r3, %arg2 /* caller original %sp */ +ftrace_stub: + .globl ftrace_stub + .type ftrace_stub, @function +#ifdef CONFIG_64BIT + bve (%rp) +#else + bv %r0(%rp) +#endif nop -ENDPROC(_mcount) - +#ifdef CONFIG_64BIT + .dword mcount + .dword 0 /* code in head.S puts value of global gp here */ +#endif + .exit + .procend +ENDPROC(mcount) + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + .align 8 + .globl return_to_handler + .type return_to_handler, @function ENTRY(return_to_handler) - load32 return_trampoline, %rp - copy %ret0, %arg0 - copy %ret1, %arg1 - b ftrace_return_to_handler - nop -return_trampoline: - copy %ret0, %rp - copy %r23, %ret0 - copy %r24, %ret1 + .proc + .callinfo caller,frame=FRAME_SIZE + .entry + .export parisc_return_to_handler,data +parisc_return_to_handler: + copy %r3,%r1 + STREG %r0,-RP_OFFSET(%sp) /* store 0 as %rp */ + copy %sp,%r3 + STREGM %r1,FRAME_SIZE(%sp) + STREG %ret0,8(%r3) + STREG %ret1,16(%r3) -.globl ftrace_stub -ftrace_stub: +#ifdef CONFIG_64BIT + loadgp +#endif + + /* call ftrace_return_to_handler(0) */ + .import ftrace_return_to_handler,code + load32 ftrace_return_to_handler,%ret0 + load32 .Lftrace_ret,%r2 +#ifdef CONFIG_64BIT + ldo -16(%sp),%ret1 /* Reference param save area */ + bve (%ret0) +#else + bv %r0(%ret0) +#endif + ldi 0,%r26 +.Lftrace_ret: + copy %ret0,%rp + + /* restore original return values */ + LDREG 8(%r3),%ret0 + LDREG 16(%r3),%ret1 + + /* return from function */ +#ifdef CONFIG_64BIT + bve (%rp) +#else bv %r0(%rp) - nop +#endif + LDREGM -FRAME_SIZE(%sp),%r3 + .exit + .procend ENDPROC(return_to_handler) + +#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ + #endif /* CONFIG_FUNCTION_TRACER */ #ifdef CONFIG_IRQSTACKS diff --git a/arch/parisc/kernel/ftrace.c b/arch/parisc/kernel/ftrace.c index 559d400f9385..a828a0adf52c 100644 --- a/arch/parisc/kernel/ftrace.c +++ b/arch/parisc/kernel/ftrace.c @@ -1,6 +1,6 @@ /* * Code for tracing calls in Linux kernel. - * Copyright (C) 2009 Helge Deller <deller@gmx.de> + * Copyright (C) 2009-2016 Helge Deller <deller@gmx.de> * * based on code for x86 which is: * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> @@ -13,104 +13,24 @@ #include <linux/init.h> #include <linux/ftrace.h> +#include <asm/assembly.h> #include <asm/sections.h> #include <asm/ftrace.h> +#define __hot __attribute__ ((__section__ (".text.hot"))) #ifdef CONFIG_FUNCTION_GRAPH_TRACER - -/* Add a function return address to the trace stack on thread info.*/ -static int push_return_trace(unsigned long ret, unsigned long long time, - unsigned long func, int *depth) -{ - int index; - - if (!current->ret_stack) - return -EBUSY; - - /* The return trace stack is full */ - if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) { - atomic_inc(¤t->trace_overrun); - return -EBUSY; - } - - index = ++current->curr_ret_stack; - barrier(); - current->ret_stack[index].ret = ret; - current->ret_stack[index].func = func; - current->ret_stack[index].calltime = time; - *depth = index; - - return 0; -} - -/* Retrieve a function return address to the trace stack on thread info.*/ -static void pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret) -{ - int index; - - index = current->curr_ret_stack; - - if (unlikely(index < 0)) { - ftrace_graph_stop(); - WARN_ON(1); - /* Might as well panic, otherwise we have no where to go */ - *ret = (unsigned long) - dereference_function_descriptor(&panic); - return; - } - - *ret = current->ret_stack[index].ret; - trace->func = current->ret_stack[index].func; - trace->calltime = current->ret_stack[index].calltime; - trace->overrun = atomic_read(¤t->trace_overrun); - trace->depth = index; - barrier(); - current->curr_ret_stack--; - -} - -/* - * Send the trace to the ring-buffer. - * @return the original return address. - */ -unsigned long ftrace_return_to_handler(unsigned long retval0, - unsigned long retval1) -{ - struct ftrace_graph_ret trace; - unsigned long ret; - - pop_return_trace(&trace, &ret); - trace.rettime = local_clock(); - ftrace_graph_return(&trace); - - if (unlikely(!ret)) { - ftrace_graph_stop(); - WARN_ON(1); - /* Might as well panic. What else to do? */ - ret = (unsigned long) - dereference_function_descriptor(&panic); - } - - /* HACK: we hand over the old functions' return values - in %r23 and %r24. Assembly in entry.S will take care - and move those to their final registers %ret0 and %ret1 */ - asm( "copy %0, %%r23 \n\t" - "copy %1, %%r24 \n" : : "r" (retval0), "r" (retval1) ); - - return ret; -} - /* * Hook the return address and push it in the stack of return addrs * in current thread info. */ -void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) +static void __hot prepare_ftrace_return(unsigned long *parent, + unsigned long self_addr) { unsigned long old; - unsigned long long calltime; struct ftrace_graph_ent trace; + extern int parisc_return_to_handler; if (unlikely(ftrace_graph_is_dead())) return; @@ -119,64 +39,47 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) return; old = *parent; - *parent = (unsigned long) - dereference_function_descriptor(&return_to_handler); - if (unlikely(!__kernel_text_address(old))) { - ftrace_graph_stop(); - *parent = old; - WARN_ON(1); - return; - } - - calltime = local_clock(); + trace.func = self_addr; + trace.depth = current->curr_ret_stack + 1; - if (push_return_trace(old, calltime, - self_addr, &trace.depth) == -EBUSY) { - *parent = old; + /* Only trace if the calling function expects to */ + if (!ftrace_graph_entry(&trace)) return; - } - trace.func = self_addr; + if (ftrace_push_return_trace(old, self_addr, &trace.depth, + 0 ) == -EBUSY) + return; - /* Only trace if the calling function expects to */ - if (!ftrace_graph_entry(&trace)) { - current->curr_ret_stack--; - *parent = old; - } + /* activate parisc_return_to_handler() as return point */ + *parent = (unsigned long) &parisc_return_to_handler; } - #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ - -void ftrace_function_trampoline(unsigned long parent, +void notrace __hot ftrace_function_trampoline(unsigned long parent, unsigned long self_addr, unsigned long org_sp_gr3) { - extern ftrace_func_t ftrace_trace_function; + extern ftrace_func_t ftrace_trace_function; /* depends on CONFIG_DYNAMIC_FTRACE */ + extern int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace); if (ftrace_trace_function != ftrace_stub) { - ftrace_trace_function(parent, self_addr); + /* struct ftrace_ops *op, struct pt_regs *regs); */ + ftrace_trace_function(parent, self_addr, NULL, NULL); return; } + #ifdef CONFIG_FUNCTION_GRAPH_TRACER - if (ftrace_graph_entry && ftrace_graph_return) { - unsigned long sp; + if (ftrace_graph_return != (trace_func_graph_ret_t) ftrace_stub || + ftrace_graph_entry != ftrace_graph_entry_stub) { unsigned long *parent_rp; - asm volatile ("copy %%r30, %0" : "=r"(sp)); - /* sanity check: is stack pointer which we got from - assembler function in entry.S in a reasonable - range compared to current stack pointer? */ - if ((sp - org_sp_gr3) > 0x400) - return; - /* calculate pointer to %rp in stack */ - parent_rp = (unsigned long *) org_sp_gr3 - 0x10; + parent_rp = (unsigned long *) (org_sp_gr3 - RP_OFFSET); /* sanity check: parent_rp should hold parent */ if (*parent_rp != parent) return; - + prepare_ftrace_return(parent_rp, self_addr); return; } diff --git a/arch/parisc/kernel/head.S b/arch/parisc/kernel/head.S index 75aa0db9f69e..bbbe360b458f 100644 --- a/arch/parisc/kernel/head.S +++ b/arch/parisc/kernel/head.S @@ -129,6 +129,15 @@ $pgt_fill_loop: /* And the stack pointer too */ ldo THREAD_SZ_ALGN(%r6),%sp +#if defined(CONFIG_64BIT) && defined(CONFIG_FUNCTION_TRACER) + .import _mcount,data + /* initialize mcount FPTR */ + /* Get the global data pointer */ + loadgp + load32 PA(_mcount), %r10 + std %dp,0x18(%r10) +#endif + #ifdef CONFIG_SMP /* Set the smp rendezvous address into page zero. ** It would be safer to do this in init_smp_config() but diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c index 809905a811ed..40639439d8b3 100644 --- a/arch/parisc/kernel/process.c +++ b/arch/parisc/kernel/process.c @@ -144,13 +144,6 @@ void machine_power_off(void) void (*pm_power_off)(void) = machine_power_off; EXPORT_SYMBOL(pm_power_off); -/* - * Free current thread data structures etc.. - */ -void exit_thread(void) -{ -} - void flush_thread(void) { /* Only needs to handle fpu stuff or perf monitors. diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c index 8fb81a391599..b5458b37fc5b 100644 --- a/arch/parisc/kernel/ptrace.c +++ b/arch/parisc/kernel/ptrace.c @@ -4,18 +4,20 @@ * Copyright (C) 2000 Hewlett-Packard Co, Linuxcare Inc. * Copyright (C) 2000 Matthew Wilcox <matthew@wil.cx> * Copyright (C) 2000 David Huggins-Daines <dhd@debian.org> - * Copyright (C) 2008 Helge Deller <deller@gmx.de> + * Copyright (C) 2008-2016 Helge Deller <deller@gmx.de> */ #include <linux/kernel.h> #include <linux/sched.h> #include <linux/mm.h> #include <linux/smp.h> +#include <linux/elf.h> #include <linux/errno.h> #include <linux/ptrace.h> #include <linux/tracehook.h> #include <linux/user.h> #include <linux/personality.h> +#include <linux/regset.h> #include <linux/security.h> #include <linux/seccomp.h> #include <linux/compat.h> @@ -30,6 +32,17 @@ /* PSW bits we allow the debugger to modify */ #define USER_PSW_BITS (PSW_N | PSW_B | PSW_V | PSW_CB) +#define CREATE_TRACE_POINTS +#include <trace/events/syscalls.h> + +/* + * These are our native regset flavors. + */ +enum parisc_regset { + REGSET_GENERAL, + REGSET_FP +}; + /* * Called by kernel/ptrace.c when detaching.. * @@ -114,6 +127,7 @@ void user_enable_block_step(struct task_struct *task) long arch_ptrace(struct task_struct *child, long request, unsigned long addr, unsigned long data) { + unsigned long __user *datap = (unsigned long __user *)data; unsigned long tmp; long ret = -EIO; @@ -126,7 +140,7 @@ long arch_ptrace(struct task_struct *child, long request, addr >= sizeof(struct pt_regs)) break; tmp = *(unsigned long *) ((char *) task_regs(child) + addr); - ret = put_user(tmp, (unsigned long __user *) data); + ret = put_user(tmp, datap); break; /* Write the word at location addr in the USER area. This will need @@ -165,6 +179,34 @@ long arch_ptrace(struct task_struct *child, long request, } break; + case PTRACE_GETREGS: /* Get all gp regs from the child. */ + return copy_regset_to_user(child, + task_user_regset_view(current), + REGSET_GENERAL, + 0, sizeof(struct user_regs_struct), + datap); + + case PTRACE_SETREGS: /* Set all gp regs in the child. */ + return copy_regset_from_user(child, + task_user_regset_view(current), + REGSET_GENERAL, + 0, sizeof(struct user_regs_struct), + datap); + + case PTRACE_GETFPREGS: /* Get the child FPU state. */ + return copy_regset_to_user(child, + task_user_regset_view(current), + REGSET_FP, + 0, sizeof(struct user_fp_struct), + datap); + + case PTRACE_SETFPREGS: /* Set the child FPU state. */ + return copy_regset_from_user(child, + task_user_regset_view(current), + REGSET_FP, + 0, sizeof(struct user_fp_struct), + datap); + default: ret = ptrace_request(child, request, addr, data); break; @@ -283,6 +325,10 @@ long do_syscall_trace_enter(struct pt_regs *regs) regs->gr[20] = -1UL; goto out; } +#ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS + if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) + trace_sys_enter(regs, regs->gr[20]); +#endif #ifdef CONFIG_64BIT if (!is_compat_task()) @@ -311,6 +357,324 @@ void do_syscall_trace_exit(struct pt_regs *regs) audit_syscall_exit(regs); +#ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS + if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) + trace_sys_exit(regs, regs->gr[20]); +#endif + if (stepping || test_thread_flag(TIF_SYSCALL_TRACE)) tracehook_report_syscall_exit(regs, stepping); } + + +/* + * regset functions. + */ + +static int fpr_get(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + void *kbuf, void __user *ubuf) +{ + struct pt_regs *regs = task_regs(target); + __u64 *k = kbuf; + __u64 __user *u = ubuf; + __u64 reg; + + pos /= sizeof(reg); + count /= sizeof(reg); + + if (kbuf) + for (; count > 0 && pos < ELF_NFPREG; --count) + *k++ = regs->fr[pos++]; + else + for (; count > 0 && pos < ELF_NFPREG; --count) + if (__put_user(regs->fr[pos++], u++)) + return -EFAULT; + + kbuf = k; + ubuf = u; + pos *= sizeof(reg); + count *= sizeof(reg); + return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, + ELF_NFPREG * sizeof(reg), -1); +} + +static int fpr_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + struct pt_regs *regs = task_regs(target); + const __u64 *k = kbuf; + const __u64 __user *u = ubuf; + __u64 reg; + + pos /= sizeof(reg); + count /= sizeof(reg); + + if (kbuf) + for (; count > 0 && pos < ELF_NFPREG; --count) + regs->fr[pos++] = *k++; + else + for (; count > 0 && pos < ELF_NFPREG; --count) { + if (__get_user(reg, u++)) + return -EFAULT; + regs->fr[pos++] = reg; + } + + kbuf = k; + ubuf = u; + pos *= sizeof(reg); + count *= sizeof(reg); + return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, + ELF_NFPREG * sizeof(reg), -1); +} + +#define RI(reg) (offsetof(struct user_regs_struct,reg) / sizeof(long)) + +static unsigned long get_reg(struct pt_regs *regs, int num) +{ + switch (num) { + case RI(gr[0]) ... RI(gr[31]): return regs->gr[num - RI(gr[0])]; + case RI(sr[0]) ... RI(sr[7]): return regs->sr[num - RI(sr[0])]; + case RI(iasq[0]): return regs->iasq[0]; + case RI(iasq[1]): return regs->iasq[1]; + case RI(iaoq[0]): return regs->iaoq[0]; + case RI(iaoq[1]): return regs->iaoq[1]; + case RI(sar): return regs->sar; + case RI(iir): return regs->iir; + case RI(isr): return regs->isr; + case RI(ior): return regs->ior; + case RI(ipsw): return regs->ipsw; + case RI(cr27): return regs->cr27; + case RI(cr0): return mfctl(0); + case RI(cr24): return mfctl(24); + case RI(cr25): return mfctl(25); + case RI(cr26): return mfctl(26); + case RI(cr28): return mfctl(28); + case RI(cr29): return mfctl(29); + case RI(cr30): return mfctl(30); + case RI(cr31): return mfctl(31); + case RI(cr8): return mfctl(8); + case RI(cr9): return mfctl(9); + case RI(cr12): return mfctl(12); + case RI(cr13): return mfctl(13); + case RI(cr10): return mfctl(10); + case RI(cr15): return mfctl(15); + default: return 0; + } +} + +static void set_reg(struct pt_regs *regs, int num, unsigned long val) +{ + switch (num) { + case RI(gr[0]): /* + * PSW is in gr[0]. + * Allow writing to Nullify, Divide-step-correction, + * and carry/borrow bits. + * BEWARE, if you set N, and then single step, it won't + * stop on the nullified instruction. + */ + val &= USER_PSW_BITS; + regs->gr[0] &= ~USER_PSW_BITS; + regs->gr[0] |= val; + return; + case RI(gr[1]) ... RI(gr[31]): + regs->gr[num - RI(gr[0])] = val; + return; + case RI(iaoq[0]): + case RI(iaoq[1]): + regs->iaoq[num - RI(iaoq[0])] = val; + return; + case RI(sar): regs->sar = val; + return; + default: return; +#if 0 + /* do not allow to change any of the following registers (yet) */ + case RI(sr[0]) ... RI(sr[7]): return regs->sr[num - RI(sr[0])]; + case RI(iasq[0]): return regs->iasq[0]; + case RI(iasq[1]): return regs->iasq[1]; + case RI(iir): return regs->iir; + case RI(isr): return regs->isr; + case RI(ior): return regs->ior; + case RI(ipsw): return regs->ipsw; + case RI(cr27): return regs->cr27; + case cr0, cr24, cr25, cr26, cr27, cr28, cr29, cr30, cr31; + case cr8, cr9, cr12, cr13, cr10, cr15; +#endif + } +} + +static int gpr_get(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + void *kbuf, void __user *ubuf) +{ + struct pt_regs *regs = task_regs(target); + unsigned long *k = kbuf; + unsigned long __user *u = ubuf; + unsigned long reg; + + pos /= sizeof(reg); + count /= sizeof(reg); + + if (kbuf) + for (; count > 0 && pos < ELF_NGREG; --count) + *k++ = get_reg(regs, pos++); + else + for (; count > 0 && pos < ELF_NGREG; --count) + if (__put_user(get_reg(regs, pos++), u++)) + return -EFAULT; + kbuf = k; + ubuf = u; + pos *= sizeof(reg); + count *= sizeof(reg); + return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, + ELF_NGREG * sizeof(reg), -1); +} + +static int gpr_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + struct pt_regs *regs = task_regs(target); + const unsigned long *k = kbuf; + const unsigned long __user *u = ubuf; + unsigned long reg; + + pos /= sizeof(reg); + count /= sizeof(reg); + + if (kbuf) + for (; count > 0 && pos < ELF_NGREG; --count) + set_reg(regs, pos++, *k++); + else + for (; count > 0 && pos < ELF_NGREG; --count) { + if (__get_user(reg, u++)) + return -EFAULT; + set_reg(regs, pos++, reg); + } + + kbuf = k; + ubuf = u; + pos *= sizeof(reg); + count *= sizeof(reg); + return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, + ELF_NGREG * sizeof(reg), -1); +} + +static const struct user_regset native_regsets[] = { + [REGSET_GENERAL] = { + .core_note_type = NT_PRSTATUS, .n = ELF_NGREG, + .size = sizeof(long), .align = sizeof(long), + .get = gpr_get, .set = gpr_set + }, + [REGSET_FP] = { + .core_note_type = NT_PRFPREG, .n = ELF_NFPREG, + .size = sizeof(__u64), .align = sizeof(__u64), + .get = fpr_get, .set = fpr_set + } +}; + +static const struct user_regset_view user_parisc_native_view = { + .name = "parisc", .e_machine = ELF_ARCH, .ei_osabi = ELFOSABI_LINUX, + .regsets = native_regsets, .n = ARRAY_SIZE(native_regsets) +}; + +#ifdef CONFIG_64BIT +#include <linux/compat.h> + +static int gpr32_get(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + void *kbuf, void __user *ubuf) +{ + struct pt_regs *regs = task_regs(target); + compat_ulong_t *k = kbuf; + compat_ulong_t __user *u = ubuf; + compat_ulong_t reg; + + pos /= sizeof(reg); + count /= sizeof(reg); + + if (kbuf) + for (; count > 0 && pos < ELF_NGREG; --count) + *k++ = get_reg(regs, pos++); + else + for (; count > 0 && pos < ELF_NGREG; --count) + if (__put_user((compat_ulong_t) get_reg(regs, pos++), u++)) + return -EFAULT; + + kbuf = k; + ubuf = u; + pos *= sizeof(reg); + count *= sizeof(reg); + return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, + ELF_NGREG * sizeof(reg), -1); +} + +static int gpr32_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + struct pt_regs *regs = task_regs(target); + const compat_ulong_t *k = kbuf; + const compat_ulong_t __user *u = ubuf; + compat_ulong_t reg; + + pos /= sizeof(reg); + count /= sizeof(reg); + + if (kbuf) + for (; count > 0 && pos < ELF_NGREG; --count) + set_reg(regs, pos++, *k++); + else + for (; count > 0 && pos < ELF_NGREG; --count) { + if (__get_user(reg, u++)) + return -EFAULT; + set_reg(regs, pos++, reg); + } + + kbuf = k; + ubuf = u; + pos *= sizeof(reg); + count *= sizeof(reg); + return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, + ELF_NGREG * sizeof(reg), -1); +} + +/* + * These are the regset flavors matching the 32bit native set. + */ +static const struct user_regset compat_regsets[] = { + [REGSET_GENERAL] = { + .core_note_type = NT_PRSTATUS, .n = ELF_NGREG, + .size = sizeof(compat_long_t), .align = sizeof(compat_long_t), + .get = gpr32_get, .set = gpr32_set + }, + [REGSET_FP] = { + .core_note_type = NT_PRFPREG, .n = ELF_NFPREG, + .size = sizeof(__u64), .align = sizeof(__u64), + .get = fpr_get, .set = fpr_set + } +}; + +static const struct user_regset_view user_parisc_compat_view = { + .name = "parisc", .e_machine = EM_PARISC, .ei_osabi = ELFOSABI_LINUX, + .regsets = compat_regsets, .n = ARRAY_SIZE(compat_regsets) +}; +#endif /* CONFIG_64BIT */ + +const struct user_regset_view *task_user_regset_view(struct task_struct *task) +{ + BUILD_BUG_ON(sizeof(struct user_regs_struct)/sizeof(long) != ELF_NGREG); + BUILD_BUG_ON(sizeof(struct user_fp_struct)/sizeof(__u64) != ELF_NFPREG); +#ifdef CONFIG_64BIT + if (is_compat_task()) + return &user_parisc_compat_view; +#endif + return &user_parisc_native_view; +} diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S index c976ebfe2269..d03422e5f188 100644 --- a/arch/parisc/kernel/syscall.S +++ b/arch/parisc/kernel/syscall.S @@ -344,7 +344,7 @@ tracesys_next: #endif cmpib,COND(=),n -1,%r20,tracesys_exit /* seccomp may have returned -1 */ - comiclr,>>= __NR_Linux_syscalls, %r20, %r0 + comiclr,>> __NR_Linux_syscalls, %r20, %r0 b,n .Ltracesys_nosys LDREGX %r20(%r19), %r19 @@ -912,6 +912,7 @@ END(lws_table) .align 8 ENTRY(sys_call_table) + .export sys_call_table,data #include "syscall_table.S" END(sys_call_table) diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c index 400acac0a304..58dd6801f5be 100644 --- a/arch/parisc/kernel/time.c +++ b/arch/parisc/kernel/time.c @@ -38,6 +38,18 @@ static unsigned long clocktick __read_mostly; /* timer cycles per tick */ +#ifndef CONFIG_64BIT +/* + * The processor-internal cycle counter (Control Register 16) is used as time + * source for the sched_clock() function. This register is 64bit wide on a + * 64-bit kernel and 32bit on a 32-bit kernel. Since sched_clock() always + * requires a 64bit counter we emulate on the 32-bit kernel the higher 32bits + * with a per-cpu variable which we increase every time the counter + * wraps-around (which happens every ~4 secounds). + */ +static DEFINE_PER_CPU(unsigned long, cr16_high_32_bits); +#endif + /* * We keep time on PA-RISC Linux by using the Interval Timer which is * a pair of registers; one is read-only and one is write-only; both @@ -108,6 +120,12 @@ irqreturn_t __irq_entry timer_interrupt(int irq, void *dev_id) */ mtctl(next_tick, 16); +#if !defined(CONFIG_64BIT) + /* check for overflow on a 32bit kernel (every ~4 seconds). */ + if (unlikely(next_tick < now)) + this_cpu_inc(cr16_high_32_bits); +#endif + /* Skip one clocktick on purpose if we missed next_tick. * The new CR16 must be "later" than current CR16 otherwise * itimer would not fire until CR16 wrapped - e.g 4 seconds @@ -219,6 +237,12 @@ void __init start_cpu_itimer(void) unsigned int cpu = smp_processor_id(); unsigned long next_tick = mfctl(16) + clocktick; +#if defined(CONFIG_HAVE_UNSTABLE_SCHED_CLOCK) && defined(CONFIG_64BIT) + /* With multiple 64bit CPUs online, the cr16's are not syncronized. */ + if (cpu != 0) + clear_sched_clock_stable(); +#endif + mtctl(next_tick, 16); /* kick off Interval Timer (CR16) */ per_cpu(cpu_data, cpu).it_value = next_tick; @@ -246,15 +270,52 @@ void read_persistent_clock(struct timespec *ts) } } + +/* + * sched_clock() framework + */ + +static u32 cyc2ns_mul __read_mostly; +static u32 cyc2ns_shift __read_mostly; + +u64 sched_clock(void) +{ + u64 now; + + /* Get current cycle counter (Control Register 16). */ +#ifdef CONFIG_64BIT + now = mfctl(16); +#else + now = mfctl(16) + (((u64) this_cpu_read(cr16_high_32_bits)) << 32); +#endif + + /* return the value in ns (cycles_2_ns) */ + return mul_u64_u32_shr(now, cyc2ns_mul, cyc2ns_shift); +} + + +/* + * timer interrupt and sched_clock() initialization + */ + void __init time_init(void) { unsigned long current_cr16_khz; + current_cr16_khz = PAGE0->mem_10msec/10; /* kHz */ clocktick = (100 * PAGE0->mem_10msec) / HZ; + /* calculate mult/shift values for cr16 */ + clocks_calc_mult_shift(&cyc2ns_mul, &cyc2ns_shift, current_cr16_khz, + NSEC_PER_MSEC, 0); + +#if defined(CONFIG_HAVE_UNSTABLE_SCHED_CLOCK) && defined(CONFIG_64BIT) + /* At bootup only one 64bit CPU is online and cr16 is "stable" */ + set_sched_clock_stable(); +#endif + start_cpu_itimer(); /* get CPU 0 started */ /* register at clocksource framework */ - current_cr16_khz = PAGE0->mem_10msec/10; /* kHz */ clocksource_register_khz(&clocksource_cr16, current_cr16_khz); } |