diff options
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r-- | arch/arm/kernel/asm-offsets.c | 2 | ||||
-rw-r--r-- | arch/arm/kernel/entry-armv.S | 4 | ||||
-rw-r--r-- | arch/arm/kernel/entry-common.S | 25 | ||||
-rw-r--r-- | arch/arm/kernel/perf_event_v6.c | 4 | ||||
-rw-r--r-- | arch/arm/kernel/perf_event_v7.c | 3 | ||||
-rw-r--r-- | arch/arm/kernel/perf_event_xscale.c | 6 | ||||
-rw-r--r-- | arch/arm/kernel/process.c | 2 | ||||
-rw-r--r-- | arch/arm/kernel/signal.c | 14 | ||||
-rw-r--r-- | arch/arm/kernel/sys_oabi-compat.c | 4 |
9 files changed, 43 insertions, 21 deletions
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c index 27c5381518d8..974d8d7d1bcd 100644 --- a/arch/arm/kernel/asm-offsets.c +++ b/arch/arm/kernel/asm-offsets.c @@ -61,7 +61,7 @@ int main(void) { DEFINE(TSK_ACTIVE_MM, offsetof(struct task_struct, active_mm)); -#ifdef CONFIG_CC_STACKPROTECTOR +#ifdef CONFIG_STACKPROTECTOR DEFINE(TSK_STACK_CANARY, offsetof(struct task_struct, stack_canary)); #endif BLANK(); diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index 1752033b0070..179a9f6bd1e3 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -791,7 +791,7 @@ ENTRY(__switch_to) ldr r6, [r2, #TI_CPU_DOMAIN] #endif switch_tls r1, r4, r5, r3, r7 -#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) +#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP) ldr r7, [r2, #TI_TASK] ldr r8, =__stack_chk_guard .if (TSK_STACK_CANARY > IMM12_MASK) @@ -807,7 +807,7 @@ ENTRY(__switch_to) ldr r0, =thread_notify_head mov r1, #THREAD_NOTIFY_SWITCH bl atomic_notifier_call_chain -#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) +#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP) str r7, [r8] #endif THUMB( mov ip, r4 ) diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index 20df608bf343..106a1466518d 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -39,12 +39,13 @@ saved_pc .req lr .section .entry.text,"ax",%progbits .align 5 -#if !(IS_ENABLED(CONFIG_TRACE_IRQFLAGS) || IS_ENABLED(CONFIG_CONTEXT_TRACKING)) +#if !(IS_ENABLED(CONFIG_TRACE_IRQFLAGS) || IS_ENABLED(CONFIG_CONTEXT_TRACKING) || \ + IS_ENABLED(CONFIG_DEBUG_RSEQ)) /* * This is the fast syscall return path. We do as little as possible here, * such as avoiding writing r0 to the stack. We only use this path if we - * have tracing and context tracking disabled - the overheads from those - * features make this path too inefficient. + * have tracing, context tracking and rseq debug disabled - the overheads + * from those features make this path too inefficient. */ ret_fast_syscall: UNWIND(.fnstart ) @@ -71,14 +72,20 @@ fast_work_pending: /* fall through to work_pending */ #else /* - * The "replacement" ret_fast_syscall for when tracing or context tracking - * is enabled. As we will need to call out to some C functions, we save - * r0 first to avoid needing to save registers around each C function call. + * The "replacement" ret_fast_syscall for when tracing, context tracking, + * or rseq debug is enabled. As we will need to call out to some C functions, + * we save r0 first to avoid needing to save registers around each C function + * call. */ ret_fast_syscall: UNWIND(.fnstart ) UNWIND(.cantunwind ) str r0, [sp, #S_R0 + S_OFF]! @ save returned r0 +#if IS_ENABLED(CONFIG_DEBUG_RSEQ) + /* do_rseq_syscall needs interrupts enabled. */ + mov r0, sp @ 'regs' + bl do_rseq_syscall +#endif disable_irq_notrace @ disable interrupts ldr r2, [tsk, #TI_ADDR_LIMIT] cmp r2, #TASK_SIZE @@ -113,6 +120,12 @@ ENDPROC(ret_fast_syscall) */ ENTRY(ret_to_user) ret_slow_syscall: +#if IS_ENABLED(CONFIG_DEBUG_RSEQ) + /* do_rseq_syscall needs interrupts enabled. */ + enable_irq_notrace @ enable interrupts + mov r0, sp @ 'regs' + bl do_rseq_syscall +#endif disable_irq_notrace @ disable interrupts ENTRY(ret_to_user_from_irq) ldr r2, [tsk, #TI_ADDR_LIMIT] diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c index 1d7061a38922..be42c4f66a40 100644 --- a/arch/arm/kernel/perf_event_v6.c +++ b/arch/arm/kernel/perf_event_v6.c @@ -303,12 +303,10 @@ static void armv6pmu_enable_event(struct perf_event *event) } static irqreturn_t -armv6pmu_handle_irq(int irq_num, - void *dev) +armv6pmu_handle_irq(struct arm_pmu *cpu_pmu) { unsigned long pmcr = armv6_pmcr_read(); struct perf_sample_data data; - struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev; struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events); struct pt_regs *regs; int idx; diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c index 870b66c1e4ef..57f01e059f39 100644 --- a/arch/arm/kernel/perf_event_v7.c +++ b/arch/arm/kernel/perf_event_v7.c @@ -946,11 +946,10 @@ static void armv7pmu_disable_event(struct perf_event *event) raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } -static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev) +static irqreturn_t armv7pmu_handle_irq(struct arm_pmu *cpu_pmu) { u32 pmnc; struct perf_sample_data data; - struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev; struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events); struct pt_regs *regs; int idx; diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c index fcf218da660e..88d1a76f5367 100644 --- a/arch/arm/kernel/perf_event_xscale.c +++ b/arch/arm/kernel/perf_event_xscale.c @@ -142,11 +142,10 @@ xscale1_pmnc_counter_has_overflowed(unsigned long pmnc, } static irqreturn_t -xscale1pmu_handle_irq(int irq_num, void *dev) +xscale1pmu_handle_irq(struct arm_pmu *cpu_pmu) { unsigned long pmnc; struct perf_sample_data data; - struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev; struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events); struct pt_regs *regs; int idx; @@ -489,11 +488,10 @@ xscale2_pmnc_counter_has_overflowed(unsigned long of_flags, } static irqreturn_t -xscale2pmu_handle_irq(int irq_num, void *dev) +xscale2pmu_handle_irq(struct arm_pmu *cpu_pmu) { unsigned long pmnc, of_flags; struct perf_sample_data data; - struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev; struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events); struct pt_regs *regs; int idx; diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 1523cb18b109..225d1c58d2de 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -39,7 +39,7 @@ #include <asm/tls.h> #include <asm/vdso.h> -#ifdef CONFIG_CC_STACKPROTECTOR +#ifdef CONFIG_STACKPROTECTOR #include <linux/stackprotector.h> unsigned long __stack_chk_guard __read_mostly; EXPORT_SYMBOL(__stack_chk_guard); diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index bd8810d4acb3..dec130e7078c 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c @@ -541,6 +541,12 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs) int ret; /* + * Increment event counter and perform fixup for the pre-signal + * frame. + */ + rseq_signal_deliver(ksig, regs); + + /* * Set up the stack frame */ if (ksig->ka.sa.sa_flags & SA_SIGINFO) @@ -660,6 +666,7 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall) } else { clear_thread_flag(TIF_NOTIFY_RESUME); tracehook_notify_resume(regs); + rseq_handle_notify_resume(NULL, regs); } } local_irq_disable(); @@ -703,3 +710,10 @@ asmlinkage void addr_limit_check_failed(void) { addr_limit_user_check(); } + +#ifdef CONFIG_DEBUG_RSEQ +asmlinkage void do_rseq_syscall(struct pt_regs *regs) +{ + rseq_syscall(regs); +} +#endif diff --git a/arch/arm/kernel/sys_oabi-compat.c b/arch/arm/kernel/sys_oabi-compat.c index b9786f491873..1df21a61e379 100644 --- a/arch/arm/kernel/sys_oabi-compat.c +++ b/arch/arm/kernel/sys_oabi-compat.c @@ -286,7 +286,7 @@ asmlinkage long sys_oabi_epoll_wait(int epfd, return -EINVAL; if (!access_ok(VERIFY_WRITE, events, sizeof(*events) * maxevents)) return -EFAULT; - kbuf = kmalloc(sizeof(*kbuf) * maxevents, GFP_KERNEL); + kbuf = kmalloc_array(maxevents, sizeof(*kbuf), GFP_KERNEL); if (!kbuf) return -ENOMEM; fs = get_fs(); @@ -324,7 +324,7 @@ asmlinkage long sys_oabi_semtimedop(int semid, return -EINVAL; if (!access_ok(VERIFY_READ, tsops, sizeof(*tsops) * nsops)) return -EFAULT; - sops = kmalloc(sizeof(*sops) * nsops, GFP_KERNEL); + sops = kmalloc_array(nsops, sizeof(*sops), GFP_KERNEL); if (!sops) return -ENOMEM; err = 0; |