diff options
Diffstat (limited to 'arch/arc/kernel')
-rw-r--r-- | arch/arc/kernel/ctx_sw.c | 13 | ||||
-rw-r--r-- | arch/arc/kernel/ctx_sw_asm.S | 11 | ||||
-rw-r--r-- | arch/arc/kernel/entry.S | 24 | ||||
-rw-r--r-- | arch/arc/kernel/head.S | 2 | ||||
-rw-r--r-- | arch/arc/kernel/irq.c | 12 | ||||
-rw-r--r-- | arch/arc/kernel/kgdb.c | 12 | ||||
-rw-r--r-- | arch/arc/kernel/kprobes.c | 6 | ||||
-rw-r--r-- | arch/arc/kernel/reset.c | 1 | ||||
-rw-r--r-- | arch/arc/kernel/setup.c | 11 | ||||
-rw-r--r-- | arch/arc/kernel/smp.c | 10 | ||||
-rw-r--r-- | arch/arc/kernel/stacktrace.c | 5 | ||||
-rw-r--r-- | arch/arc/kernel/time.c | 11 | ||||
-rw-r--r-- | arch/arc/kernel/traps.c | 3 |
13 files changed, 80 insertions, 41 deletions
diff --git a/arch/arc/kernel/ctx_sw.c b/arch/arc/kernel/ctx_sw.c index 34410eb1a308..c14a5bea0c76 100644 --- a/arch/arc/kernel/ctx_sw.c +++ b/arch/arc/kernel/ctx_sw.c @@ -17,6 +17,8 @@ #include <asm/asm-offsets.h> #include <linux/sched.h> +#define KSP_WORD_OFF ((TASK_THREAD + THREAD_KSP) / 4) + struct task_struct *__sched __switch_to(struct task_struct *prev_task, struct task_struct *next_task) { @@ -45,7 +47,16 @@ __switch_to(struct task_struct *prev_task, struct task_struct *next_task) #endif /* set ksp of outgoing task in tsk->thread.ksp */ +#if KSP_WORD_OFF <= 255 "st.as sp, [%3, %1] \n\t" +#else + /* + * Workaround for NR_CPUS=4k + * %1 is bigger than 255 (S9 offset for st.as) + */ + "add2 r24, %3, %1 \n\t" + "st sp, [r24] \n\t" +#endif "sync \n\t" @@ -97,7 +108,7 @@ __switch_to(struct task_struct *prev_task, struct task_struct *next_task) /* FP/BLINK restore generated by gcc (standard func epilogue */ : "=r"(tmp) - : "n"((TASK_THREAD + THREAD_KSP) / 4), "r"(next), "r"(prev) + : "n"(KSP_WORD_OFF), "r"(next), "r"(prev) : "blink" ); diff --git a/arch/arc/kernel/ctx_sw_asm.S b/arch/arc/kernel/ctx_sw_asm.S index d8972345e4c2..65690e7fcc8c 100644 --- a/arch/arc/kernel/ctx_sw_asm.S +++ b/arch/arc/kernel/ctx_sw_asm.S @@ -14,6 +14,8 @@ #include <asm/asm-offsets.h> #include <asm/linkage.h> +#define KSP_WORD_OFF ((TASK_THREAD + THREAD_KSP) / 4) + ;################### Low Level Context Switch ########################## .section .sched.text,"ax",@progbits @@ -28,8 +30,13 @@ __switch_to: SAVE_CALLEE_SAVED_KERNEL /* Save the now KSP in task->thread.ksp */ - st.as sp, [r0, (TASK_THREAD + THREAD_KSP)/4] - +#if KSP_WORD_OFF <= 255 + st.as sp, [r0, KSP_WORD_OFF] +#else + /* Workaround for NR_CPUS=4k as ST.as can only take s9 offset */ + add2 r24, r0, KSP_WORD_OFF + st sp, [r24] +#endif /* * Return last task in r0 (return reg) * On ARC, Return reg = First Arg reg = r0. diff --git a/arch/arc/kernel/entry.S b/arch/arc/kernel/entry.S index b908dde8a331..47d09d07f093 100644 --- a/arch/arc/kernel/entry.S +++ b/arch/arc/kernel/entry.S @@ -250,6 +250,14 @@ ARC_ENTRY handle_interrupt_level1 lr r0, [icause1] and r0, r0, 0x1f +#ifdef CONFIG_TRACE_IRQFLAGS + ; icause1 needs to be read early, before calling tracing, which + ; can clobber scratch regs, hence use of stack to stash it + push r0 + TRACE_ASM_IRQ_DISABLE + pop r0 +#endif + bl.d @arch_do_IRQ mov r1, sp @@ -337,9 +345,9 @@ ARC_ENTRY EV_TLBProtV ; vineetg: Mar 6th: Random Seg Fault issue #1 ; ecr and efa were not saved in case an Intr sneaks in ; after fake rtie - ; + lr r2, [ecr] - lr r1, [efa] ; Faulting Data address + lr r0, [efa] ; Faulting Data address ; --------(4) Return from CPU Exception Mode --------- ; Fake a rtie, but rtie to next label @@ -348,6 +356,8 @@ ARC_ENTRY EV_TLBProtV FAKE_RET_FROM_EXCPN r9 + mov r1, sp + ;------ (5) Type of Protection Violation? ---------- ; ; ProtV Hardware Exception is triggered for Access Faults of 2 types @@ -358,16 +368,12 @@ ARC_ENTRY EV_TLBProtV bbit1 r2, ECR_C_BIT_PROTV_MISALIG_DATA, 4f ;========= (6a) Access Violation Processing ======== - mov r0, sp ; pt_regs bl do_page_fault b ret_from_exception ;========== (6b) Non aligned access ============ 4: - mov r0, r1 - mov r1, sp ; pt_regs -#ifdef CONFIG_ARC_MISALIGN_ACCESS SAVE_CALLEE_SAVED_USER mov r2, sp ; callee_regs @@ -376,9 +382,6 @@ ARC_ENTRY EV_TLBProtV ; TBD: optimize - do this only if a callee reg was involved ; either a dst of emulated LD/ST or src with address-writeback RESTORE_CALLEE_SAVED_USER -#else - bl do_misaligned_error -#endif b ret_from_exception @@ -575,6 +578,7 @@ resume_user_mode_begin: ; --- (Slow Path #2) pending signal --- mov r0, sp ; pt_regs for arg to do_signal()/do_notify_resume() + GET_CURR_THR_INFO_FLAGS r9 bbit0 r9, TIF_SIGPENDING, .Lchk_notify_resume ; Normal Trap/IRQ entry only saves Scratch (caller-saved) regs @@ -640,6 +644,8 @@ resume_kernel_mode: restore_regs : + TRACE_ASM_IRQ_ENABLE + lr r10, [status32] ; Restore REG File. In case multiple Events outstanding, diff --git a/arch/arc/kernel/head.S b/arch/arc/kernel/head.S index 0f944f024513..2c878e964a64 100644 --- a/arch/arc/kernel/head.S +++ b/arch/arc/kernel/head.S @@ -95,7 +95,7 @@ stext: ;---------------------------------------------------------------- ; First lines of code run by secondary before jumping to 'C' ;---------------------------------------------------------------- - .section .init.text, "ax",@progbits + .section .text, "ax",@progbits .type first_lines_of_secondary, @function .globl first_lines_of_secondary diff --git a/arch/arc/kernel/irq.c b/arch/arc/kernel/irq.c index 5fc92455da36..a4b141ee9a6a 100644 --- a/arch/arc/kernel/irq.c +++ b/arch/arc/kernel/irq.c @@ -39,10 +39,14 @@ void arc_init_IRQ(void) level_mask |= IS_ENABLED(CONFIG_ARC_IRQ5_LV2) << 5; level_mask |= IS_ENABLED(CONFIG_ARC_IRQ6_LV2) << 6; - if (level_mask) { + /* + * Write to register, even if no LV2 IRQs configured to reset it + * in case bootloader had mucked with it + */ + write_aux_reg(AUX_IRQ_LEV, level_mask); + + if (level_mask) pr_info("Level-2 interrupts bitset %x\n", level_mask); - write_aux_reg(AUX_IRQ_LEV, level_mask); - } } /* @@ -146,7 +150,7 @@ void arch_do_IRQ(unsigned int irq, struct pt_regs *regs) set_irq_regs(old_regs); } -int __init get_hw_config_num_irq(void) +int get_hw_config_num_irq(void) { uint32_t val = read_aux_reg(ARC_REG_VECBASE_BCR); diff --git a/arch/arc/kernel/kgdb.c b/arch/arc/kernel/kgdb.c index a7698fb14818..a2ff5c5d1450 100644 --- a/arch/arc/kernel/kgdb.c +++ b/arch/arc/kernel/kgdb.c @@ -196,6 +196,18 @@ void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip) instruction_pointer(regs) = ip; } +static void kgdb_call_nmi_hook(void *ignored) +{ + kgdb_nmicallback(raw_smp_processor_id(), NULL); +} + +void kgdb_roundup_cpus(unsigned long flags) +{ + local_irq_enable(); + smp_call_function(kgdb_call_nmi_hook, NULL, 0); + local_irq_disable(); +} + struct kgdb_arch arch_kgdb_ops = { /* breakpoint instruction: TRAP_S 0x3 */ #ifdef CONFIG_CPU_BIG_ENDIAN diff --git a/arch/arc/kernel/kprobes.c b/arch/arc/kernel/kprobes.c index 72f97822784a..eb1c2ee5eaf0 100644 --- a/arch/arc/kernel/kprobes.c +++ b/arch/arc/kernel/kprobes.c @@ -87,13 +87,13 @@ static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) { - __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp; + __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp); kcb->kprobe_status = kcb->prev_kprobe.status; } static inline void __kprobes set_current_kprobe(struct kprobe *p) { - __get_cpu_var(current_kprobe) = p; + __this_cpu_write(current_kprobe, p); } static void __kprobes resume_execution(struct kprobe *p, unsigned long addr, @@ -237,7 +237,7 @@ int __kprobes arc_kprobe_handler(unsigned long addr, struct pt_regs *regs) return 1; } else if (kprobe_running()) { - p = __get_cpu_var(current_kprobe); + p = __this_cpu_read(current_kprobe); if (p->break_handler && p->break_handler(p, regs)) { setup_singlestep(p, regs); kcb->kprobe_status = KPROBE_HIT_SS; diff --git a/arch/arc/kernel/reset.c b/arch/arc/kernel/reset.c index e227a2b1c943..2768fa1e39b9 100644 --- a/arch/arc/kernel/reset.c +++ b/arch/arc/kernel/reset.c @@ -31,3 +31,4 @@ void machine_power_off(void) } void (*pm_power_off) (void) = NULL; +EXPORT_SYMBOL(pm_power_off); diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c index f7d4a41d0629..643eae4436e0 100644 --- a/arch/arc/kernel/setup.c +++ b/arch/arc/kernel/setup.c @@ -36,8 +36,7 @@ struct task_struct *_current_task[NR_CPUS]; /* For stack switching */ struct cpuinfo_arc cpuinfo_arc700[NR_CPUS]; - -void read_arc_build_cfg_regs(void) +static void read_arc_build_cfg_regs(void) { struct bcr_perip uncached_space; struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()]; @@ -105,7 +104,7 @@ static const struct cpuinfo_data arc_cpu_tbl[] = { { {0x00, NULL } } }; -char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len) +static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len) { int n = 0; struct cpuinfo_arc *cpu = &cpuinfo_arc700[cpu_id]; @@ -170,7 +169,7 @@ static const struct id_to_str mac_mul_nm[] = { {0x6, "Dual 16x16 and 32x16"} }; -char *arc_extn_mumbojumbo(int cpu_id, char *buf, int len) +static char *arc_extn_mumbojumbo(int cpu_id, char *buf, int len) { int n = 0; struct cpuinfo_arc *cpu = &cpuinfo_arc700[cpu_id]; @@ -233,7 +232,7 @@ char *arc_extn_mumbojumbo(int cpu_id, char *buf, int len) return buf; } -void arc_chk_ccms(void) +static void arc_chk_ccms(void) { #if defined(CONFIG_ARC_HAS_DCCM) || defined(CONFIG_ARC_HAS_ICCM) struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()]; @@ -268,7 +267,7 @@ void arc_chk_ccms(void) * hardware has dedicated regs which need to be saved/restored on ctx-sw * (Single Precision uses core regs), thus kernel is kind of oblivious to it */ -void arc_chk_fpu(void) +static void arc_chk_fpu(void) { struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()]; diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c index bca3052c956d..c2f9ebbc38f6 100644 --- a/arch/arc/kernel/smp.c +++ b/arch/arc/kernel/smp.c @@ -95,7 +95,7 @@ void __init smp_cpus_done(unsigned int max_cpus) * If it turns out to be elaborate, it's better to code it in assembly * */ -void __attribute__((weak)) arc_platform_smp_wait_to_boot(int cpu) +void __weak arc_platform_smp_wait_to_boot(int cpu) { /* * As a hack for debugging - since debugger will single-step over the @@ -128,6 +128,7 @@ void start_kernel_secondary(void) atomic_inc(&mm->mm_users); atomic_inc(&mm->mm_count); current->active_mm = mm; + cpumask_set_cpu(cpu, mm_cpumask(mm)); notify_cpu_starting(cpu); set_cpu_online(cpu, true); @@ -210,7 +211,6 @@ enum ipi_msg_type { IPI_NOP = 0, IPI_RESCHEDULE = 1, IPI_CALL_FUNC, - IPI_CALL_FUNC_SINGLE, IPI_CPU_STOP }; @@ -254,7 +254,7 @@ void smp_send_stop(void) void arch_send_call_function_single_ipi(int cpu) { - ipi_send_msg(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE); + ipi_send_msg(cpumask_of(cpu), IPI_CALL_FUNC); } void arch_send_call_function_ipi_mask(const struct cpumask *mask) @@ -286,10 +286,6 @@ static inline void __do_IPI(unsigned long *ops, struct ipi_data *ipi, int cpu) generic_smp_call_function_interrupt(); break; - case IPI_CALL_FUNC_SINGLE: - generic_smp_call_function_single_interrupt(); - break; - case IPI_CPU_STOP: ipi_cpu_stop(cpu); break; diff --git a/arch/arc/kernel/stacktrace.c b/arch/arc/kernel/stacktrace.c index f8b7d880304d..9ce47cfe2303 100644 --- a/arch/arc/kernel/stacktrace.c +++ b/arch/arc/kernel/stacktrace.c @@ -237,11 +237,14 @@ unsigned int get_wchan(struct task_struct *tsk) */ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) { + /* Assumes @tsk is sleeping so unwinds from __switch_to */ arc_unwind_core(tsk, NULL, __collect_all_but_sched, trace); } void save_stack_trace(struct stack_trace *trace) { - arc_unwind_core(current, NULL, __collect_all, trace); + /* Pass NULL for task so it unwinds the current call frame */ + arc_unwind_core(NULL, NULL, __collect_all, trace); } +EXPORT_SYMBOL_GPL(save_stack_trace); #endif diff --git a/arch/arc/kernel/time.c b/arch/arc/kernel/time.c index 3fde7de3ea67..e5f3a837fb35 100644 --- a/arch/arc/kernel/time.c +++ b/arch/arc/kernel/time.c @@ -63,9 +63,10 @@ int arc_counter_setup(void) { - /* RTSC insn taps into cpu clk, needs no setup */ - - /* For SMP, only allowed if cross-core-sync, hence usable as cs */ + /* + * For SMP this needs to be 0. However Kconfig glue doesn't + * enable this option for SMP configs + */ return 1; } @@ -206,7 +207,7 @@ static DEFINE_PER_CPU(struct clock_event_device, arc_clockevent_device) = { static irqreturn_t timer_irq_handler(int irq, void *dev_id) { - struct clock_event_device *clk = &__get_cpu_var(arc_clockevent_device); + struct clock_event_device *clk = this_cpu_ptr(&arc_clockevent_device); arc_timer_event_ack(clk->mode == CLOCK_EVT_MODE_PERIODIC); clk->event_handler(clk); @@ -223,7 +224,7 @@ static struct irqaction arc_timer_irq = { * Setup the local event timer for @cpu * N.B. weak so that some exotic ARC SoCs can completely override it */ -void __attribute__((weak)) arc_local_timer_setup(unsigned int cpu) +void __weak arc_local_timer_setup(unsigned int cpu) { struct clock_event_device *clk = &per_cpu(arc_clockevent_device, cpu); diff --git a/arch/arc/kernel/traps.c b/arch/arc/kernel/traps.c index e21692d2fdab..3eadfdabc322 100644 --- a/arch/arc/kernel/traps.c +++ b/arch/arc/kernel/traps.c @@ -84,19 +84,18 @@ DO_ERROR_INFO(SIGBUS, "Invalid Mem Access", do_memory_error, BUS_ADRERR) DO_ERROR_INFO(SIGTRAP, "Breakpoint Set", trap_is_brkpt, TRAP_BRKPT) DO_ERROR_INFO(SIGBUS, "Misaligned Access", do_misaligned_error, BUS_ADRALN) -#ifdef CONFIG_ARC_MISALIGN_ACCESS /* * Entry Point for Misaligned Data access Exception, for emulating in software */ int do_misaligned_access(unsigned long address, struct pt_regs *regs, struct callee_regs *cregs) { + /* If emulation not enabled, or failed, kill the task */ if (misaligned_fixup(address, regs, cregs) != 0) return do_misaligned_error(address, regs); return 0; } -#endif /* * Entry point for miscll errors such as Nested Exceptions |