diff options
Diffstat (limited to 'arch/mips/kernel')
35 files changed, 659 insertions, 604 deletions
diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c index 58ad63d7eb42..9c7f3e136d50 100644 --- a/arch/mips/kernel/binfmt_elfn32.c +++ b/arch/mips/kernel/binfmt_elfn32.c @@ -1,5 +1,6 @@ /* * Support for n32 Linux/MIPS ELF binaries. + * Author: Ralf Baechle (ralf@linux-mips.org) * * Copyright (C) 1999, 2001 Ralf Baechle * Copyright (C) 1999, 2001 Silicon Graphics, Inc. @@ -37,7 +38,6 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG]; #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2) #include <asm/processor.h> -#include <linux/module.h> #include <linux/elfcore.h> #include <linux/compat.h> #include <linux/math64.h> @@ -96,12 +96,6 @@ jiffies_to_compat_timeval(unsigned long jiffies, struct compat_timeval *value) #define ELF_CORE_EFLAGS EF_MIPS_ABI2 -MODULE_DESCRIPTION("Binary format loader for compatibility with n32 Linux/MIPS binaries"); -MODULE_AUTHOR("Ralf Baechle (ralf@linux-mips.org)"); - -#undef MODULE_DESCRIPTION -#undef MODULE_AUTHOR - #undef TASK_SIZE #define TASK_SIZE TASK_SIZE32 diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c index 49fb881481f7..1ab34322dd97 100644 --- a/arch/mips/kernel/binfmt_elfo32.c +++ b/arch/mips/kernel/binfmt_elfo32.c @@ -1,5 +1,6 @@ /* * Support for o32 Linux/MIPS ELF binaries. + * Author: Ralf Baechle (ralf@linux-mips.org) * * Copyright (C) 1999, 2001 Ralf Baechle * Copyright (C) 1999, 2001 Silicon Graphics, Inc. @@ -42,7 +43,6 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG]; #include <asm/processor.h> -#include <linux/module.h> #include <linux/elfcore.h> #include <linux/compat.h> #include <linux/math64.h> @@ -99,12 +99,6 @@ jiffies_to_compat_timeval(unsigned long jiffies, struct compat_timeval *value) value->tv_usec = rem / NSEC_PER_USEC; } -MODULE_DESCRIPTION("Binary format loader for compatibility with o32 Linux/MIPS binaries"); -MODULE_AUTHOR("Ralf Baechle (ralf@linux-mips.org)"); - -#undef MODULE_DESCRIPTION -#undef MODULE_AUTHOR - #undef TASK_SIZE #define TASK_SIZE TASK_SIZE32 diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c index 46c227fc98f5..12c718181e5e 100644 --- a/arch/mips/kernel/branch.c +++ b/arch/mips/kernel/branch.c @@ -9,7 +9,7 @@ #include <linux/kernel.h> #include <linux/sched.h> #include <linux/signal.h> -#include <linux/module.h> +#include <linux/export.h> #include <asm/branch.h> #include <asm/cpu.h> #include <asm/cpu-features.h> @@ -866,3 +866,37 @@ unaligned: force_sig(SIGBUS, current); return -EFAULT; } + +#if (defined CONFIG_KPROBES) || (defined CONFIG_UPROBES) + +int __insn_is_compact_branch(union mips_instruction insn) +{ + if (!cpu_has_mips_r6) + return 0; + + switch (insn.i_format.opcode) { + case blezl_op: + case bgtzl_op: + case blez_op: + case bgtz_op: + /* + * blez[l] and bgtz[l] opcodes with non-zero rt + * are MIPS R6 compact branches + */ + if (insn.i_format.rt) + return 1; + break; + case bc6_op: + case balc6_op: + case pop10_op: + case pop30_op: + case pop66_op: + case pop76_op: + return 1; + } + + return 0; +} +EXPORT_SYMBOL_GPL(__insn_is_compact_branch); + +#endif /* CONFIG_KPROBES || CONFIG_UPROBES */ diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c index a88d44247cc8..dd3175442c9e 100644 --- a/arch/mips/kernel/cpu-probe.c +++ b/arch/mips/kernel/cpu-probe.c @@ -352,7 +352,12 @@ __setup("nohtw", htw_disable); static int mips_ftlb_disabled; static int mips_has_ftlb_configured; -static int set_ftlb_enable(struct cpuinfo_mips *c, int enable); +enum ftlb_flags { + FTLB_EN = 1 << 0, + FTLB_SET_PROB = 1 << 1, +}; + +static int set_ftlb_enable(struct cpuinfo_mips *c, enum ftlb_flags flags); static int __init ftlb_disable(char *s) { @@ -371,8 +376,6 @@ static int __init ftlb_disable(char *s) return 1; } - back_to_back_c0_hazard(); - config4 = read_c0_config4(); /* Check that FTLB has been disabled */ @@ -531,7 +534,7 @@ static unsigned int calculate_ftlb_probability(struct cpuinfo_mips *c) return 3; } -static int set_ftlb_enable(struct cpuinfo_mips *c, int enable) +static int set_ftlb_enable(struct cpuinfo_mips *c, enum ftlb_flags flags) { unsigned int config; @@ -542,33 +545,33 @@ static int set_ftlb_enable(struct cpuinfo_mips *c, int enable) case CPU_P6600: /* proAptiv & related cores use Config6 to enable the FTLB */ config = read_c0_config6(); - /* Clear the old probability value */ - config &= ~(3 << MIPS_CONF6_FTLBP_SHIFT); - if (enable) - /* Enable FTLB */ - write_c0_config6(config | - (calculate_ftlb_probability(c) - << MIPS_CONF6_FTLBP_SHIFT) - | MIPS_CONF6_FTLBEN); + + if (flags & FTLB_EN) + config |= MIPS_CONF6_FTLBEN; else - /* Disable FTLB */ - write_c0_config6(config & ~MIPS_CONF6_FTLBEN); + config &= ~MIPS_CONF6_FTLBEN; + + if (flags & FTLB_SET_PROB) { + config &= ~(3 << MIPS_CONF6_FTLBP_SHIFT); + config |= calculate_ftlb_probability(c) + << MIPS_CONF6_FTLBP_SHIFT; + } + + write_c0_config6(config); + back_to_back_c0_hazard(); break; case CPU_I6400: - /* I6400 & related cores use Config7 to configure FTLB */ - config = read_c0_config7(); - /* Clear the old probability value */ - config &= ~(3 << MIPS_CONF7_FTLBP_SHIFT); - write_c0_config7(config | (calculate_ftlb_probability(c) - << MIPS_CONF7_FTLBP_SHIFT)); - break; + /* There's no way to disable the FTLB */ + if (!(flags & FTLB_EN)) + return 1; + return 0; case CPU_LOONGSON3: /* Flush ITLB, DTLB, VTLB and FTLB */ write_c0_diag(LOONGSON_DIAG_ITLB | LOONGSON_DIAG_DTLB | LOONGSON_DIAG_VTLB | LOONGSON_DIAG_FTLB); /* Loongson-3 cores use Config6 to enable the FTLB */ config = read_c0_config6(); - if (enable) + if (flags & FTLB_EN) /* Enable FTLB */ write_c0_config6(config & ~MIPS_CONF6_FTLBDIS); else @@ -788,6 +791,7 @@ static inline unsigned int decode_config4(struct cpuinfo_mips *c) PAGE_SIZE, config4); /* Switch FTLB off */ set_ftlb_enable(c, 0); + mips_ftlb_disabled = 1; break; } c->tlbsizeftlbsets = 1 << @@ -852,7 +856,7 @@ static void decode_configs(struct cpuinfo_mips *c) c->scache.flags = MIPS_CACHE_NOT_PRESENT; /* Enable FTLB if present and not disabled */ - set_ftlb_enable(c, !mips_ftlb_disabled); + set_ftlb_enable(c, mips_ftlb_disabled ? 0 : FTLB_EN); ok = decode_config0(c); /* Read Config registers. */ BUG_ON(!ok); /* Arch spec violation! */ @@ -902,6 +906,9 @@ static void decode_configs(struct cpuinfo_mips *c) } } + /* configure the FTLB write probability */ + set_ftlb_enable(c, (mips_ftlb_disabled ? 0 : FTLB_EN) | FTLB_SET_PROB); + mips_probe_watch_registers(c); #ifndef CONFIG_MIPS_CPS diff --git a/arch/mips/kernel/crash.c b/arch/mips/kernel/crash.c index 610f0f3bdb34..1723b1762297 100644 --- a/arch/mips/kernel/crash.c +++ b/arch/mips/kernel/crash.c @@ -47,9 +47,14 @@ static void crash_shutdown_secondary(void *passed_regs) static void crash_kexec_prepare_cpus(void) { + static int cpus_stopped; unsigned int msecs; + unsigned int ncpus; - unsigned int ncpus = num_online_cpus() - 1;/* Excluding the panic cpu */ + if (cpus_stopped) + return; + + ncpus = num_online_cpus() - 1;/* Excluding the panic cpu */ dump_send_ipi(crash_shutdown_secondary); smp_wmb(); @@ -64,6 +69,17 @@ static void crash_kexec_prepare_cpus(void) cpu_relax(); mdelay(1); } + + cpus_stopped = 1; +} + +/* Override the weak function in kernel/panic.c */ +void crash_smp_send_stop(void) +{ + if (_crash_smp_send_stop) + _crash_smp_send_stop(); + + crash_kexec_prepare_cpus(); } #else /* !defined(CONFIG_SMP) */ diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c index 937c54bc8ccc..30a3b75e88eb 100644 --- a/arch/mips/kernel/ftrace.c +++ b/arch/mips/kernel/ftrace.c @@ -382,8 +382,8 @@ void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra, if (unlikely(faulted)) goto out; - if (ftrace_push_return_trace(old_parent_ra, self_ra, &trace.depth, fp) - == -EBUSY) { + if (ftrace_push_return_trace(old_parent_ra, self_ra, &trace.depth, fp, + NULL) == -EBUSY) { *parent_ra_addr = old_parent_ra; return; } diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S index 17326a90d53c..dc0b29612891 100644 --- a/arch/mips/kernel/genex.S +++ b/arch/mips/kernel/genex.S @@ -142,9 +142,8 @@ LEAF(__r4k_wait) PTR_LA k1, __r4k_wait ori k0, 0x1f /* 32 byte rollback region */ xori k0, 0x1f - bne k0, k1, 9f + bne k0, k1, \handler MTC0 k0, CP0_EPC -9: .set pop .endm diff --git a/arch/mips/kernel/kprobes.c b/arch/mips/kernel/kprobes.c index 212f46f2014e..f5c8bce70db2 100644 --- a/arch/mips/kernel/kprobes.c +++ b/arch/mips/kernel/kprobes.c @@ -32,7 +32,8 @@ #include <asm/ptrace.h> #include <asm/branch.h> #include <asm/break.h> -#include <asm/inst.h> + +#include "probes-common.h" static const union mips_instruction breakpoint_insn = { .b_format = { @@ -55,63 +56,7 @@ DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); static int __kprobes insn_has_delayslot(union mips_instruction insn) { - switch (insn.i_format.opcode) { - - /* - * This group contains: - * jr and jalr are in r_format format. - */ - case spec_op: - switch (insn.r_format.func) { - case jr_op: - case jalr_op: - break; - default: - goto insn_ok; - } - - /* - * This group contains: - * bltz_op, bgez_op, bltzl_op, bgezl_op, - * bltzal_op, bgezal_op, bltzall_op, bgezall_op. - */ - case bcond_op: - - /* - * These are unconditional and in j_format. - */ - case jal_op: - case j_op: - - /* - * These are conditional and in i_format. - */ - case beq_op: - case beql_op: - case bne_op: - case bnel_op: - case blez_op: - case blezl_op: - case bgtz_op: - case bgtzl_op: - - /* - * These are the FPA/cp1 branch instructions. - */ - case cop1_op: - -#ifdef CONFIG_CPU_CAVIUM_OCTEON - case lwc2_op: /* This is bbit0 on Octeon */ - case ldc2_op: /* This is bbit032 on Octeon */ - case swc2_op: /* This is bbit1 on Octeon */ - case sdc2_op: /* This is bbit132 on Octeon */ -#endif - return 1; - default: - break; - } -insn_ok: - return 0; + return __insn_has_delay_slot(insn); } /* @@ -161,6 +106,12 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p) goto out; } + if (__insn_is_compact_branch(insn)) { + pr_notice("Kprobes for compact branches are not supported\n"); + ret = -EINVAL; + goto out; + } + /* insn: must be on special executable page on mips. */ p->ainsn.insn = get_insn_slot(); if (!p->ainsn.insn) { diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c index 0b29646bcee7..50fb62544df7 100644 --- a/arch/mips/kernel/linux32.c +++ b/arch/mips/kernel/linux32.c @@ -26,7 +26,6 @@ #include <linux/utsname.h> #include <linux/personality.h> #include <linux/dnotify.h> -#include <linux/module.h> #include <linux/binfmts.h> #include <linux/security.h> #include <linux/compat.h> diff --git a/arch/mips/kernel/machine_kexec.c b/arch/mips/kernel/machine_kexec.c index 50980bf3983e..59725204105c 100644 --- a/arch/mips/kernel/machine_kexec.c +++ b/arch/mips/kernel/machine_kexec.c @@ -25,6 +25,7 @@ void (*_machine_crash_shutdown)(struct pt_regs *regs) = NULL; #ifdef CONFIG_SMP void (*relocated_kexec_smp_wait) (void *); atomic_t kexec_ready_to_reboot = ATOMIC_INIT(0); +void (*_crash_smp_send_stop)(void) = NULL; #endif int diff --git a/arch/mips/kernel/mips-cpc.c b/arch/mips/kernel/mips-cpc.c index 566b8d2c092c..a4964c334cab 100644 --- a/arch/mips/kernel/mips-cpc.c +++ b/arch/mips/kernel/mips-cpc.c @@ -21,6 +21,11 @@ static DEFINE_PER_CPU_ALIGNED(spinlock_t, cpc_core_lock); static DEFINE_PER_CPU_ALIGNED(unsigned long, cpc_core_lock_flags); +phys_addr_t __weak mips_cpc_default_phys_base(void) +{ + return 0; +} + /** * mips_cpc_phys_base - retrieve the physical base address of the CPC * @@ -43,8 +48,12 @@ static phys_addr_t mips_cpc_phys_base(void) if (cpc_base & CM_GCR_CPC_BASE_CPCEN_MSK) return cpc_base & CM_GCR_CPC_BASE_CPCBASE_MSK; - /* Otherwise, give it the default address & enable it */ + /* Otherwise, use the default address */ cpc_base = mips_cpc_default_phys_base(); + if (!cpc_base) + return cpc_base; + + /* Enable the CPC, mapped at the default address */ write_gcr_cpc_base(cpc_base | CM_GCR_CPC_BASE_CPCEN_MSK); return cpc_base; } @@ -52,7 +61,7 @@ static phys_addr_t mips_cpc_phys_base(void) int mips_cpc_probe(void) { phys_addr_t addr; - unsigned cpu; + unsigned int cpu; for_each_possible_cpu(cpu) spin_lock_init(&per_cpu(cpc_core_lock, cpu)); @@ -70,7 +79,12 @@ int mips_cpc_probe(void) void mips_cpc_lock_other(unsigned int core) { - unsigned curr_core; + unsigned int curr_core; + + if (mips_cm_revision() >= CM_REV_CM3) + /* Systems with CM >= 3 lock the CPC via mips_cm_lock_other */ + return; + preempt_disable(); curr_core = current_cpu_data.core; spin_lock_irqsave(&per_cpu(cpc_core_lock, curr_core), @@ -86,7 +100,13 @@ void mips_cpc_lock_other(unsigned int core) void mips_cpc_unlock_other(void) { - unsigned curr_core = current_cpu_data.core; + unsigned int curr_core; + + if (mips_cm_revision() >= CM_REV_CM3) + /* Systems with CM >= 3 lock the CPC via mips_cm_lock_other */ + return; + + curr_core = current_cpu_data.core; spin_unlock_irqrestore(&per_cpu(cpc_core_lock, curr_core), per_cpu(cpc_core_lock_flags, curr_core)); preempt_enable(); diff --git a/arch/mips/kernel/mips-r2-to-r6-emul.c b/arch/mips/kernel/mips-r2-to-r6-emul.c index c3372cac6db2..bd09853aecdf 100644 --- a/arch/mips/kernel/mips-r2-to-r6-emul.c +++ b/arch/mips/kernel/mips-r2-to-r6-emul.c @@ -15,7 +15,6 @@ #include <linux/debugfs.h> #include <linux/init.h> #include <linux/kernel.h> -#include <linux/module.h> #include <linux/ptrace.h> #include <linux/seq_file.h> @@ -900,7 +899,7 @@ static inline int mipsr2_find_op_func(struct pt_regs *regs, u32 inst, * mipsr2_decoder: Decode and emulate a MIPS R2 instruction * @regs: Process register set * @inst: Instruction to decode and emulate - * @fcr31: Floating Point Control and Status Register returned + * @fcr31: Floating Point Control and Status Register Cause bits returned */ int mipsr2_decoder(struct pt_regs *regs, u32 inst, unsigned long *fcr31) { @@ -1164,20 +1163,22 @@ fpu_emul: regs->regs[31] = r31; regs->cp0_epc = epc; if (!used_math()) { /* First time FPU user. */ + preempt_disable(); err = init_fpu(); + preempt_enable(); set_used_math(); } lose_fpu(1); /* Save FPU state for the emulator. */ err = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 0, &fault_addr); - *fcr31 = current->thread.fpu.fcr31; /* - * We can't allow the emulated instruction to leave any of - * the cause bits set in $fcr31. + * We can't allow the emulated instruction to leave any + * enabled Cause bits set in $fcr31. */ - current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X; + *fcr31 = res = mask_fcr31_x(current->thread.fpu.fcr31); + current->thread.fpu.fcr31 &= ~res; /* * this is a tricky issue - lose_fpu() uses LL/SC atomics diff --git a/arch/mips/kernel/module.c b/arch/mips/kernel/module.c index 79850e376ef6..94627a3a6a0d 100644 --- a/arch/mips/kernel/module.c +++ b/arch/mips/kernel/module.c @@ -20,6 +20,7 @@ #undef DEBUG +#include <linux/extable.h> #include <linux/moduleloader.h> #include <linux/elf.h> #include <linux/mm.h> diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c index 5b31a9405ebc..7cf653e21423 100644 --- a/arch/mips/kernel/pm-cps.c +++ b/arch/mips/kernel/pm-cps.c @@ -8,6 +8,7 @@ * option) any later version. */ +#include <linux/cpuhotplug.h> #include <linux/init.h> #include <linux/percpu.h> #include <linux/slab.h> @@ -70,13 +71,8 @@ static DEFINE_PER_CPU_ALIGNED(atomic_t, pm_barrier); DEFINE_PER_CPU_ALIGNED(struct mips_static_suspend_state, cps_cpu_state); /* A somewhat arbitrary number of labels & relocs for uasm */ -static struct uasm_label labels[32] __initdata; -static struct uasm_reloc relocs[32] __initdata; - -/* CPU dependant sync types */ -static unsigned stype_intervention; -static unsigned stype_memory; -static unsigned stype_ordering; +static struct uasm_label labels[32]; +static struct uasm_reloc relocs[32]; enum mips_reg { zero, at, v0, v1, a0, a1, a2, a3, @@ -134,7 +130,7 @@ int cps_pm_enter_state(enum cps_pm_state state) return -EINVAL; /* Calculate which coupled CPUs (VPEs) are online */ -#ifdef CONFIG_MIPS_MT +#if defined(CONFIG_MIPS_MT) || defined(CONFIG_CPU_MIPSR6) if (cpu_online(cpu)) { cpumask_and(coupled_mask, cpu_online_mask, &cpu_sibling_map[cpu]); @@ -198,10 +194,10 @@ int cps_pm_enter_state(enum cps_pm_state state) return 0; } -static void __init cps_gen_cache_routine(u32 **pp, struct uasm_label **pl, - struct uasm_reloc **pr, - const struct cache_desc *cache, - unsigned op, int lbl) +static void cps_gen_cache_routine(u32 **pp, struct uasm_label **pl, + struct uasm_reloc **pr, + const struct cache_desc *cache, + unsigned op, int lbl) { unsigned cache_size = cache->ways << cache->waybit; unsigned i; @@ -242,10 +238,10 @@ static void __init cps_gen_cache_routine(u32 **pp, struct uasm_label **pl, uasm_i_nop(pp); } -static int __init cps_gen_flush_fsb(u32 **pp, struct uasm_label **pl, - struct uasm_reloc **pr, - const struct cpuinfo_mips *cpu_info, - int lbl) +static int cps_gen_flush_fsb(u32 **pp, struct uasm_label **pl, + struct uasm_reloc **pr, + const struct cpuinfo_mips *cpu_info, + int lbl) { unsigned i, fsb_size = 8; unsigned num_loads = (fsb_size * 3) / 2; @@ -272,14 +268,9 @@ static int __init cps_gen_flush_fsb(u32 **pp, struct uasm_label **pl, /* On older ones it's unavailable */ return -1; - /* CPUs which do not require the workaround */ - case CPU_P5600: - case CPU_I6400: - return 0; - default: - WARN_ONCE(1, "pm-cps: FSB flush unsupported for this CPU\n"); - return -1; + /* Assume that the CPU does not need this workaround */ + return 0; } /* @@ -320,8 +311,8 @@ static int __init cps_gen_flush_fsb(u32 **pp, struct uasm_label **pl, i * line_size * line_stride, t0); } - /* Completion barrier */ - uasm_i_sync(pp, stype_memory); + /* Barrier ensuring previous cache invalidates are complete */ + uasm_i_sync(pp, STYPE_SYNC); uasm_i_ehb(pp); /* Check whether the pipeline stalled due to the FSB being full */ @@ -340,9 +331,9 @@ static int __init cps_gen_flush_fsb(u32 **pp, struct uasm_label **pl, return 0; } -static void __init cps_gen_set_top_bit(u32 **pp, struct uasm_label **pl, - struct uasm_reloc **pr, - unsigned r_addr, int lbl) +static void cps_gen_set_top_bit(u32 **pp, struct uasm_label **pl, + struct uasm_reloc **pr, + unsigned r_addr, int lbl) { uasm_i_lui(pp, t0, uasm_rel_hi(0x80000000)); uasm_build_label(pl, *pp, lbl); @@ -353,7 +344,7 @@ static void __init cps_gen_set_top_bit(u32 **pp, struct uasm_label **pl, uasm_i_nop(pp); } -static void * __init cps_gen_entry_code(unsigned cpu, enum cps_pm_state state) +static void *cps_gen_entry_code(unsigned cpu, enum cps_pm_state state) { struct uasm_label *l = labels; struct uasm_reloc *r = relocs; @@ -411,7 +402,7 @@ static void * __init cps_gen_entry_code(unsigned cpu, enum cps_pm_state state) if (coupled_coherence) { /* Increment ready_count */ - uasm_i_sync(&p, stype_ordering); + uasm_i_sync(&p, STYPE_SYNC_MB); uasm_build_label(&l, p, lbl_incready); uasm_i_ll(&p, t1, 0, r_nc_count); uasm_i_addiu(&p, t2, t1, 1); @@ -419,8 +410,8 @@ static void * __init cps_gen_entry_code(unsigned cpu, enum cps_pm_state state) uasm_il_beqz(&p, &r, t2, lbl_incready); uasm_i_addiu(&p, t1, t1, 1); - /* Ordering barrier */ - uasm_i_sync(&p, stype_ordering); + /* Barrier ensuring all CPUs see the updated r_nc_count value */ + uasm_i_sync(&p, STYPE_SYNC_MB); /* * If this is the last VPE to become ready for non-coherence @@ -441,7 +432,8 @@ static void * __init cps_gen_entry_code(unsigned cpu, enum cps_pm_state state) uasm_i_lw(&p, t0, 0, r_nc_count); uasm_il_bltz(&p, &r, t0, lbl_secondary_cont); uasm_i_ehb(&p); - uasm_i_yield(&p, zero, t1); + if (cpu_has_mipsmt) + uasm_i_yield(&p, zero, t1); uasm_il_b(&p, &r, lbl_poll_cont); uasm_i_nop(&p); } else { @@ -449,8 +441,21 @@ static void * __init cps_gen_entry_code(unsigned cpu, enum cps_pm_state state) * The core will lose power & this VPE will not continue * so it can simply halt here. */ - uasm_i_addiu(&p, t0, zero, TCHALT_H); - uasm_i_mtc0(&p, t0, 2, 4); + if (cpu_has_mipsmt) { + /* Halt the VPE via C0 tchalt register */ + uasm_i_addiu(&p, t0, zero, TCHALT_H); + uasm_i_mtc0(&p, t0, 2, 4); + } else if (cpu_has_vp) { + /* Halt the VP via the CPC VP_STOP register */ + unsigned int vpe_id; + + vpe_id = cpu_vpe_id(&cpu_data[cpu]); + uasm_i_addiu(&p, t0, zero, 1 << vpe_id); + UASM_i_LA(&p, t1, (long)addr_cpc_cl_vp_stop()); + uasm_i_sw(&p, t0, 0, t1); + } else { + BUG(); + } uasm_build_label(&l, p, lbl_secondary_hang); uasm_il_b(&p, &r, lbl_secondary_hang); uasm_i_nop(&p); @@ -472,22 +477,24 @@ static void * __init cps_gen_entry_code(unsigned cpu, enum cps_pm_state state) cps_gen_cache_routine(&p, &l, &r, &cpu_data[cpu].dcache, Index_Writeback_Inv_D, lbl_flushdcache); - /* Completion barrier */ - uasm_i_sync(&p, stype_memory); + /* Barrier ensuring previous cache invalidates are complete */ + uasm_i_sync(&p, STYPE_SYNC); uasm_i_ehb(&p); - /* - * Disable all but self interventions. The load from COHCTL is defined - * by the interAptiv & proAptiv SUMs as ensuring that the operation - * resulting from the preceding store is complete. - */ - uasm_i_addiu(&p, t0, zero, 1 << cpu_data[cpu].core); - uasm_i_sw(&p, t0, 0, r_pcohctl); - uasm_i_lw(&p, t0, 0, r_pcohctl); - - /* Sync to ensure previous interventions are complete */ - uasm_i_sync(&p, stype_intervention); - uasm_i_ehb(&p); + if (mips_cm_revision() < CM_REV_CM3) { + /* + * Disable all but self interventions. The load from COHCTL is + * defined by the interAptiv & proAptiv SUMs as ensuring that the + * operation resulting from the preceding store is complete. + */ + uasm_i_addiu(&p, t0, zero, 1 << cpu_data[cpu].core); + uasm_i_sw(&p, t0, 0, r_pcohctl); + uasm_i_lw(&p, t0, 0, r_pcohctl); + + /* Barrier to ensure write to coherence control is complete */ + uasm_i_sync(&p, STYPE_SYNC); + uasm_i_ehb(&p); + } /* Disable coherence */ uasm_i_sw(&p, zero, 0, r_pcohctl); @@ -531,8 +538,8 @@ static void * __init cps_gen_entry_code(unsigned cpu, enum cps_pm_state state) goto gen_done; } - /* Completion barrier */ - uasm_i_sync(&p, stype_memory); + /* Barrier to ensure write to CPC command is complete */ + uasm_i_sync(&p, STYPE_SYNC); uasm_i_ehb(&p); } @@ -562,26 +569,29 @@ static void * __init cps_gen_entry_code(unsigned cpu, enum cps_pm_state state) * will run this. The first will actually re-enable coherence & the * rest will just be performing a rather unusual nop. */ - uasm_i_addiu(&p, t0, zero, CM_GCR_Cx_COHERENCE_COHDOMAINEN_MSK); + uasm_i_addiu(&p, t0, zero, mips_cm_revision() < CM_REV_CM3 + ? CM_GCR_Cx_COHERENCE_COHDOMAINEN_MSK + : CM3_GCR_Cx_COHERENCE_COHEN_MSK); + uasm_i_sw(&p, t0, 0, r_pcohctl); uasm_i_lw(&p, t0, 0, r_pcohctl); - /* Completion barrier */ - uasm_i_sync(&p, stype_memory); + /* Barrier to ensure write to coherence control is complete */ + uasm_i_sync(&p, STYPE_SYNC); uasm_i_ehb(&p); if (coupled_coherence && (state == CPS_PM_NC_WAIT)) { /* Decrement ready_count */ uasm_build_label(&l, p, lbl_decready); - uasm_i_sync(&p, stype_ordering); + uasm_i_sync(&p, STYPE_SYNC_MB); uasm_i_ll(&p, t1, 0, r_nc_count); uasm_i_addiu(&p, t2, t1, -1); uasm_i_sc(&p, t2, 0, r_nc_count); uasm_il_beqz(&p, &r, t2, lbl_decready); uasm_i_andi(&p, v0, t1, (1 << fls(smp_num_siblings)) - 1); - /* Ordering barrier */ - uasm_i_sync(&p, stype_ordering); + /* Barrier ensuring all CPUs see the updated r_nc_count value */ + uasm_i_sync(&p, STYPE_SYNC_MB); } if (coupled_coherence && (state == CPS_PM_CLOCK_GATED)) { @@ -602,8 +612,8 @@ static void * __init cps_gen_entry_code(unsigned cpu, enum cps_pm_state state) */ uasm_build_label(&l, p, lbl_secondary_cont); - /* Ordering barrier */ - uasm_i_sync(&p, stype_ordering); + /* Barrier ensuring all CPUs see the updated r_nc_count value */ + uasm_i_sync(&p, STYPE_SYNC_MB); } /* The core is coherent, time to return to C code */ @@ -628,7 +638,7 @@ out_err: return NULL; } -static int __init cps_gen_core_entries(unsigned cpu) +static int cps_pm_online_cpu(unsigned int cpu) { enum cps_pm_state state; unsigned core = cpu_data[cpu].core; @@ -670,29 +680,10 @@ static int __init cps_gen_core_entries(unsigned cpu) static int __init cps_pm_init(void) { - unsigned cpu; - int err; - - /* Detect appropriate sync types for the system */ - switch (current_cpu_data.cputype) { - case CPU_INTERAPTIV: - case CPU_PROAPTIV: - case CPU_M5150: - case CPU_P5600: - case CPU_I6400: - stype_intervention = 0x2; - stype_memory = 0x3; - stype_ordering = 0x10; - break; - - default: - pr_warn("Power management is using heavyweight sync 0\n"); - } - /* A CM is required for all non-coherent states */ if (!mips_cm_present()) { pr_warn("pm-cps: no CM, non-coherent states unavailable\n"); - goto out; + return 0; } /* @@ -722,12 +713,7 @@ static int __init cps_pm_init(void) pr_warn("pm-cps: no CPC, clock & power gating unavailable\n"); } - for_each_present_cpu(cpu) { - err = cps_gen_core_entries(cpu); - if (err) - return err; - } -out: - return 0; + return cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "AP_PM_CPS_CPU_ONLINE", + cps_pm_online_cpu, NULL); } arch_initcall(cps_pm_init); diff --git a/arch/mips/kernel/probes-common.h b/arch/mips/kernel/probes-common.h new file mode 100644 index 000000000000..dd08e41134b6 --- /dev/null +++ b/arch/mips/kernel/probes-common.h @@ -0,0 +1,83 @@ +/* + * Copyright (C) 2016 Imagination Technologies + * Author: Marcin Nowakowski <marcin.nowakowski@imgtec.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef __PROBES_COMMON_H +#define __PROBES_COMMON_H + +#include <asm/inst.h> + +int __insn_is_compact_branch(union mips_instruction insn); + +static inline int __insn_has_delay_slot(const union mips_instruction insn) +{ + switch (insn.i_format.opcode) { + /* + * jr and jalr are in r_format format. + */ + case spec_op: + switch (insn.r_format.func) { + case jalr_op: + case jr_op: + return 1; + } + break; + + /* + * This group contains: + * bltz_op, bgez_op, bltzl_op, bgezl_op, + * bltzal_op, bgezal_op, bltzall_op, bgezall_op. + */ + case bcond_op: + switch (insn.i_format.rt) { + case bltz_op: + case bltzl_op: + case bgez_op: + case bgezl_op: + case bltzal_op: + case bltzall_op: + case bgezal_op: + case bgezall_op: + case bposge32_op: + return 1; + } + break; + + /* + * These are unconditional and in j_format. + */ + case jal_op: + case j_op: + case beq_op: + case beql_op: + case bne_op: + case bnel_op: + case blez_op: /* not really i_format */ + case blezl_op: + case bgtz_op: + case bgtzl_op: + return 1; + + /* + * And now the FPA/cp1 branch instructions. + */ + case cop1_op: +#ifdef CONFIG_CPU_CAVIUM_OCTEON + case lwc2_op: /* This is bbit0 on Octeon */ + case ldc2_op: /* This is bbit032 on Octeon */ + case swc2_op: /* This is bbit1 on Octeon */ + case sdc2_op: /* This is bbit132 on Octeon */ +#endif + return 1; + } + + return 0; +} + +#endif /* __PROBES_COMMON_H */ diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c index 97dc01b03631..4eff2aed7360 100644 --- a/arch/mips/kernel/proc.c +++ b/arch/mips/kernel/proc.c @@ -135,6 +135,13 @@ static int show_cpuinfo(struct seq_file *m, void *v) seq_printf(m, "package\t\t\t: %d\n", cpu_data[n].package); seq_printf(m, "core\t\t\t: %d\n", cpu_data[n].core); +#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_CPU_MIPSR6) + if (cpu_has_mipsmt) + seq_printf(m, "VPE\t\t\t: %d\n", cpu_data[n].vpe_id); + else if (cpu_has_vp) + seq_printf(m, "VP\t\t\t: %d\n", cpu_data[n].vpe_id); +#endif + sprintf(fmt, "VCE%%c exceptions\t\t: %s\n", cpu_has_vce ? "%u" : "not available"); seq_printf(m, fmt, 'D', vced_count); diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index 7429ad09fbe3..9514e5f2209f 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c @@ -569,9 +569,16 @@ static void arch_dump_stack(void *info) dump_stack(); } -void arch_trigger_all_cpu_backtrace(bool include_self) +void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self) { - smp_call_function(arch_dump_stack, NULL, 1); + long this_cpu = get_cpu(); + + if (cpumask_test_cpu(this_cpu, mask) && !exclude_self) + dump_stack(); + + smp_call_function_many(mask, arch_dump_stack, NULL, 1); + + put_cpu(); } int mips_get_process_fp_mode(struct task_struct *task) @@ -605,14 +612,14 @@ int mips_set_process_fp_mode(struct task_struct *task, unsigned int value) return -EOPNOTSUPP; /* Avoid inadvertently triggering emulation */ - if ((value & PR_FP_MODE_FR) && cpu_has_fpu && - !(current_cpu_data.fpu_id & MIPS_FPIR_F64)) + if ((value & PR_FP_MODE_FR) && raw_cpu_has_fpu && + !(raw_current_cpu_data.fpu_id & MIPS_FPIR_F64)) return -EOPNOTSUPP; - if ((value & PR_FP_MODE_FRE) && cpu_has_fpu && !cpu_has_fre) + if ((value & PR_FP_MODE_FRE) && raw_cpu_has_fpu && !cpu_has_fre) return -EOPNOTSUPP; /* FR = 0 not supported in MIPS R6 */ - if (!(value & PR_FP_MODE_FR) && cpu_has_fpu && cpu_has_mips_r6) + if (!(value & PR_FP_MODE_FR) && raw_cpu_has_fpu && cpu_has_mips_r6) return -EOPNOTSUPP; /* Proceed with the mode switch */ diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c index 6103b24d1bfc..a92994d60e91 100644 --- a/arch/mips/kernel/ptrace.c +++ b/arch/mips/kernel/ptrace.c @@ -79,16 +79,15 @@ void ptrace_disable(struct task_struct *child) } /* - * Poke at FCSR according to its mask. Don't set the cause bits as - * this is currently not handled correctly in FP context restoration - * and will cause an oops if a corresponding enable bit is set. + * Poke at FCSR according to its mask. Set the Cause bits even + * if a corresponding Enable bit is set. This will be noticed at + * the time the thread is switched to and SIGFPE thrown accordingly. */ static void ptrace_setfcr31(struct task_struct *child, u32 value) { u32 fcr31; u32 mask; - value &= ~FPU_CSR_ALL_X; fcr31 = child->thread.fpu.fcr31; mask = boot_cpu_data.fpu_msk31; child->thread.fpu.fcr31 = (value & ~mask) | (fcr31 & mask); @@ -817,6 +816,7 @@ long arch_ptrace(struct task_struct *child, long request, break; #endif case FPC_CSR: + init_fp_ctx(child); ptrace_setfcr31(child, data); break; case DSP_BASE ... DSP_BASE + 5: { diff --git a/arch/mips/kernel/ptrace32.c b/arch/mips/kernel/ptrace32.c index 283b5a1967d1..7e71a4e0281b 100644 --- a/arch/mips/kernel/ptrace32.c +++ b/arch/mips/kernel/ptrace32.c @@ -70,7 +70,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, break; copied = access_process_vm(child, (u64)addrOthers, &tmp, - sizeof(tmp), 0); + sizeof(tmp), FOLL_FORCE); if (copied != sizeof(tmp)) break; ret = put_user(tmp, (u32 __user *) (unsigned long) data); @@ -179,7 +179,8 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, break; ret = 0; if (access_process_vm(child, (u64)addrOthers, &data, - sizeof(data), 1) == sizeof(data)) + sizeof(data), + FOLL_FORCE | FOLL_WRITE) == sizeof(data)) break; ret = -EIO; break; diff --git a/arch/mips/kernel/r2300_fpu.S b/arch/mips/kernel/r2300_fpu.S index b4ac6374a38f..918f2f6d3861 100644 --- a/arch/mips/kernel/r2300_fpu.S +++ b/arch/mips/kernel/r2300_fpu.S @@ -21,106 +21,84 @@ #define EX(a,b) \ 9: a,##b; \ .section __ex_table,"a"; \ + PTR 9b,fault; \ + .previous + +#define EX2(a,b) \ +9: a,##b; \ + .section __ex_table,"a"; \ PTR 9b,bad_stack; \ + PTR 9b+4,bad_stack; \ .previous .set noreorder .set mips1 - /* Save floating point context */ + +/** + * _save_fp_context() - save FP context from the FPU + * @a0 - pointer to fpregs field of sigcontext + * @a1 - pointer to fpc_csr field of sigcontext + * + * Save FP context, including the 32 FP data registers and the FP + * control & status register, from the FPU to signal context. + */ LEAF(_save_fp_context) .set push SET_HARDFLOAT li v0, 0 # assume success - cfc1 t1,fcr31 - EX(swc1 $f0,(SC_FPREGS+0)(a0)) - EX(swc1 $f1,(SC_FPREGS+8)(a0)) - EX(swc1 $f2,(SC_FPREGS+16)(a0)) - EX(swc1 $f3,(SC_FPREGS+24)(a0)) - EX(swc1 $f4,(SC_FPREGS+32)(a0)) - EX(swc1 $f5,(SC_FPREGS+40)(a0)) - EX(swc1 $f6,(SC_FPREGS+48)(a0)) - EX(swc1 $f7,(SC_FPREGS+56)(a0)) - EX(swc1 $f8,(SC_FPREGS+64)(a0)) - EX(swc1 $f9,(SC_FPREGS+72)(a0)) - EX(swc1 $f10,(SC_FPREGS+80)(a0)) - EX(swc1 $f11,(SC_FPREGS+88)(a0)) - EX(swc1 $f12,(SC_FPREGS+96)(a0)) - EX(swc1 $f13,(SC_FPREGS+104)(a0)) - EX(swc1 $f14,(SC_FPREGS+112)(a0)) - EX(swc1 $f15,(SC_FPREGS+120)(a0)) - EX(swc1 $f16,(SC_FPREGS+128)(a0)) - EX(swc1 $f17,(SC_FPREGS+136)(a0)) - EX(swc1 $f18,(SC_FPREGS+144)(a0)) - EX(swc1 $f19,(SC_FPREGS+152)(a0)) - EX(swc1 $f20,(SC_FPREGS+160)(a0)) - EX(swc1 $f21,(SC_FPREGS+168)(a0)) - EX(swc1 $f22,(SC_FPREGS+176)(a0)) - EX(swc1 $f23,(SC_FPREGS+184)(a0)) - EX(swc1 $f24,(SC_FPREGS+192)(a0)) - EX(swc1 $f25,(SC_FPREGS+200)(a0)) - EX(swc1 $f26,(SC_FPREGS+208)(a0)) - EX(swc1 $f27,(SC_FPREGS+216)(a0)) - EX(swc1 $f28,(SC_FPREGS+224)(a0)) - EX(swc1 $f29,(SC_FPREGS+232)(a0)) - EX(swc1 $f30,(SC_FPREGS+240)(a0)) - EX(swc1 $f31,(SC_FPREGS+248)(a0)) - EX(sw t1,(SC_FPC_CSR)(a0)) - cfc1 t0,$0 # implementation/version + cfc1 t1, fcr31 + EX2(s.d $f0, 0(a0)) + EX2(s.d $f2, 16(a0)) + EX2(s.d $f4, 32(a0)) + EX2(s.d $f6, 48(a0)) + EX2(s.d $f8, 64(a0)) + EX2(s.d $f10, 80(a0)) + EX2(s.d $f12, 96(a0)) + EX2(s.d $f14, 112(a0)) + EX2(s.d $f16, 128(a0)) + EX2(s.d $f18, 144(a0)) + EX2(s.d $f20, 160(a0)) + EX2(s.d $f22, 176(a0)) + EX2(s.d $f24, 192(a0)) + EX2(s.d $f26, 208(a0)) + EX2(s.d $f28, 224(a0)) + EX2(s.d $f30, 240(a0)) jr ra + EX(sw t1, (a1)) .set pop - .set nomacro - EX(sw t0,(SC_FPC_EIR)(a0)) - .set macro END(_save_fp_context) -/* - * Restore FPU state: - * - fp gp registers - * - cp1 status/control register +/** + * _restore_fp_context() - restore FP context to the FPU + * @a0 - pointer to fpregs field of sigcontext + * @a1 - pointer to fpc_csr field of sigcontext * - * We base the decision which registers to restore from the signal stack - * frame on the current content of c0_status, not on the content of the - * stack frame which might have been changed by the user. + * Restore FP context, including the 32 FP data registers and the FP + * control & status register, from signal context to the FPU. */ LEAF(_restore_fp_context) .set push SET_HARDFLOAT li v0, 0 # assume success - EX(lw t0,(SC_FPC_CSR)(a0)) - EX(lwc1 $f0,(SC_FPREGS+0)(a0)) - EX(lwc1 $f1,(SC_FPREGS+8)(a0)) - EX(lwc1 $f2,(SC_FPREGS+16)(a0)) - EX(lwc1 $f3,(SC_FPREGS+24)(a0)) - EX(lwc1 $f4,(SC_FPREGS+32)(a0)) - EX(lwc1 $f5,(SC_FPREGS+40)(a0)) - EX(lwc1 $f6,(SC_FPREGS+48)(a0)) - EX(lwc1 $f7,(SC_FPREGS+56)(a0)) - EX(lwc1 $f8,(SC_FPREGS+64)(a0)) - EX(lwc1 $f9,(SC_FPREGS+72)(a0)) - EX(lwc1 $f10,(SC_FPREGS+80)(a0)) - EX(lwc1 $f11,(SC_FPREGS+88)(a0)) - EX(lwc1 $f12,(SC_FPREGS+96)(a0)) - EX(lwc1 $f13,(SC_FPREGS+104)(a0)) - EX(lwc1 $f14,(SC_FPREGS+112)(a0)) - EX(lwc1 $f15,(SC_FPREGS+120)(a0)) - EX(lwc1 $f16,(SC_FPREGS+128)(a0)) - EX(lwc1 $f17,(SC_FPREGS+136)(a0)) - EX(lwc1 $f18,(SC_FPREGS+144)(a0)) - EX(lwc1 $f19,(SC_FPREGS+152)(a0)) - EX(lwc1 $f20,(SC_FPREGS+160)(a0)) - EX(lwc1 $f21,(SC_FPREGS+168)(a0)) - EX(lwc1 $f22,(SC_FPREGS+176)(a0)) - EX(lwc1 $f23,(SC_FPREGS+184)(a0)) - EX(lwc1 $f24,(SC_FPREGS+192)(a0)) - EX(lwc1 $f25,(SC_FPREGS+200)(a0)) - EX(lwc1 $f26,(SC_FPREGS+208)(a0)) - EX(lwc1 $f27,(SC_FPREGS+216)(a0)) - EX(lwc1 $f28,(SC_FPREGS+224)(a0)) - EX(lwc1 $f29,(SC_FPREGS+232)(a0)) - EX(lwc1 $f30,(SC_FPREGS+240)(a0)) - EX(lwc1 $f31,(SC_FPREGS+248)(a0)) + EX(lw t0, (a1)) + EX2(l.d $f0, 0(a0)) + EX2(l.d $f2, 16(a0)) + EX2(l.d $f4, 32(a0)) + EX2(l.d $f6, 48(a0)) + EX2(l.d $f8, 64(a0)) + EX2(l.d $f10, 80(a0)) + EX2(l.d $f12, 96(a0)) + EX2(l.d $f14, 112(a0)) + EX2(l.d $f16, 128(a0)) + EX2(l.d $f18, 144(a0)) + EX2(l.d $f20, 160(a0)) + EX2(l.d $f22, 176(a0)) + EX2(l.d $f24, 192(a0)) + EX2(l.d $f26, 208(a0)) + EX2(l.d $f28, 224(a0)) + EX2(l.d $f30, 240(a0)) jr ra - ctc1 t0,fcr31 + ctc1 t0, fcr31 .set pop END(_restore_fp_context) .set reorder diff --git a/arch/mips/kernel/r6000_fpu.S b/arch/mips/kernel/r6000_fpu.S index 47077380c15c..9cc7bfab3419 100644 --- a/arch/mips/kernel/r6000_fpu.S +++ b/arch/mips/kernel/r6000_fpu.S @@ -21,7 +21,14 @@ .set push SET_HARDFLOAT - /* Save floating point context */ +/** + * _save_fp_context() - save FP context from the FPU + * @a0 - pointer to fpregs field of sigcontext + * @a1 - pointer to fpc_csr field of sigcontext + * + * Save FP context, including the 32 FP data registers and the FP + * control & status register, from the FPU to signal context. + */ LEAF(_save_fp_context) mfc0 t0,CP0_STATUS sll t0,t0,2 @@ -30,59 +37,59 @@ cfc1 t1,fcr31 /* Store the 16 double precision registers */ - sdc1 $f0,(SC_FPREGS+0)(a0) - sdc1 $f2,(SC_FPREGS+16)(a0) - sdc1 $f4,(SC_FPREGS+32)(a0) - sdc1 $f6,(SC_FPREGS+48)(a0) - sdc1 $f8,(SC_FPREGS+64)(a0) - sdc1 $f10,(SC_FPREGS+80)(a0) - sdc1 $f12,(SC_FPREGS+96)(a0) - sdc1 $f14,(SC_FPREGS+112)(a0) - sdc1 $f16,(SC_FPREGS+128)(a0) - sdc1 $f18,(SC_FPREGS+144)(a0) - sdc1 $f20,(SC_FPREGS+160)(a0) - sdc1 $f22,(SC_FPREGS+176)(a0) - sdc1 $f24,(SC_FPREGS+192)(a0) - sdc1 $f26,(SC_FPREGS+208)(a0) - sdc1 $f28,(SC_FPREGS+224)(a0) - sdc1 $f30,(SC_FPREGS+240)(a0) + sdc1 $f0,0(a0) + sdc1 $f2,16(a0) + sdc1 $f4,32(a0) + sdc1 $f6,48(a0) + sdc1 $f8,64(a0) + sdc1 $f10,80(a0) + sdc1 $f12,96(a0) + sdc1 $f14,112(a0) + sdc1 $f16,128(a0) + sdc1 $f18,144(a0) + sdc1 $f20,160(a0) + sdc1 $f22,176(a0) + sdc1 $f24,192(a0) + sdc1 $f26,208(a0) + sdc1 $f28,224(a0) + sdc1 $f30,240(a0) jr ra - sw t0,SC_FPC_CSR(a0) + sw t0,(a1) 1: jr ra nop END(_save_fp_context) -/* Restore FPU state: - * - fp gp registers - * - cp1 status/control register +/** + * _restore_fp_context() - restore FP context to the FPU + * @a0 - pointer to fpregs field of sigcontext + * @a1 - pointer to fpc_csr field of sigcontext * - * We base the decision which registers to restore from the signal stack - * frame on the current content of c0_status, not on the content of the - * stack frame which might have been changed by the user. + * Restore FP context, including the 32 FP data registers and the FP + * control & status register, from signal context to the FPU. */ LEAF(_restore_fp_context) mfc0 t0,CP0_STATUS sll t0,t0,2 bgez t0,1f - lw t0,SC_FPC_CSR(a0) + lw t0,(a1) /* Restore the 16 double precision registers */ - ldc1 $f0,(SC_FPREGS+0)(a0) - ldc1 $f2,(SC_FPREGS+16)(a0) - ldc1 $f4,(SC_FPREGS+32)(a0) - ldc1 $f6,(SC_FPREGS+48)(a0) - ldc1 $f8,(SC_FPREGS+64)(a0) - ldc1 $f10,(SC_FPREGS+80)(a0) - ldc1 $f12,(SC_FPREGS+96)(a0) - ldc1 $f14,(SC_FPREGS+112)(a0) - ldc1 $f16,(SC_FPREGS+128)(a0) - ldc1 $f18,(SC_FPREGS+144)(a0) - ldc1 $f20,(SC_FPREGS+160)(a0) - ldc1 $f22,(SC_FPREGS+176)(a0) - ldc1 $f24,(SC_FPREGS+192)(a0) - ldc1 $f26,(SC_FPREGS+208)(a0) - ldc1 $f28,(SC_FPREGS+224)(a0) - ldc1 $f30,(SC_FPREGS+240)(a0) + ldc1 $f0,0(a0) + ldc1 $f2,16(a0) + ldc1 $f4,32(a0) + ldc1 $f6,48(a0) + ldc1 $f8,64(a0) + ldc1 $f10,80(a0) + ldc1 $f12,96(a0) + ldc1 $f14,112(a0) + ldc1 $f16,128(a0) + ldc1 $f18,144(a0) + ldc1 $f20,160(a0) + ldc1 $f22,176(a0) + ldc1 $f24,192(a0) + ldc1 $f26,208(a0) + ldc1 $f28,224(a0) + ldc1 $f30,240(a0) jr ra ctc1 t0,fcr31 1: jr ra diff --git a/arch/mips/kernel/relocate.c b/arch/mips/kernel/relocate.c index ca1cc30c0891..1958910b75c0 100644 --- a/arch/mips/kernel/relocate.c +++ b/arch/mips/kernel/relocate.c @@ -200,7 +200,7 @@ static inline __init unsigned long get_random_boot(void) #if defined(CONFIG_USE_OF) /* Get any additional entropy passed in device tree */ - { + if (initial_boot_params) { int node, len; u64 *prop; diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S index c8e43e0c4066..c29d397eee86 100644 --- a/arch/mips/kernel/scall32-o32.S +++ b/arch/mips/kernel/scall32-o32.S @@ -597,3 +597,6 @@ EXPORT(sys_call_table) PTR sys_copy_file_range /* 4360 */ PTR sys_preadv2 PTR sys_pwritev2 + PTR sys_pkey_mprotect + PTR sys_pkey_alloc + PTR sys_pkey_free /* 4365 */ diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S index e6ede125059f..0687f96ee912 100644 --- a/arch/mips/kernel/scall64-64.S +++ b/arch/mips/kernel/scall64-64.S @@ -435,4 +435,7 @@ EXPORT(sys_call_table) PTR sys_copy_file_range /* 5320 */ PTR sys_preadv2 PTR sys_pwritev2 + PTR sys_pkey_mprotect + PTR sys_pkey_alloc + PTR sys_pkey_free /* 5325 */ .size sys_call_table,.-sys_call_table diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S index 51d3988933f8..0331ba39a065 100644 --- a/arch/mips/kernel/scall64-n32.S +++ b/arch/mips/kernel/scall64-n32.S @@ -430,4 +430,7 @@ EXPORT(sysn32_call_table) PTR sys_copy_file_range PTR compat_sys_preadv2 /* 6325 */ PTR compat_sys_pwritev2 + PTR sys_pkey_mprotect + PTR sys_pkey_alloc + PTR sys_pkey_free .size sysn32_call_table,.-sysn32_call_table diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S index 6efa7136748f..5a47042dd25f 100644 --- a/arch/mips/kernel/scall64-o32.S +++ b/arch/mips/kernel/scall64-o32.S @@ -585,4 +585,7 @@ EXPORT(sys32_call_table) PTR sys_copy_file_range /* 4360 */ PTR compat_sys_preadv2 PTR compat_sys_pwritev2 + PTR sys_pkey_mprotect + PTR sys_pkey_alloc + PTR sys_pkey_free /* 4365 */ .size sys32_call_table,.-sys32_call_table diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c index 36cf8d65c47d..f66e5ce505b2 100644 --- a/arch/mips/kernel/setup.c +++ b/arch/mips/kernel/setup.c @@ -87,6 +87,13 @@ void __init add_memory_region(phys_addr_t start, phys_addr_t size, long type) int x = boot_mem_map.nr_map; int i; + /* + * If the region reaches the top of the physical address space, adjust + * the size slightly so that (start + size) doesn't overflow + */ + if (start + size - 1 == (phys_addr_t)ULLONG_MAX) + --size; + /* Sanity check */ if (start + size < start) { pr_warn("Trying to add an invalid memory region, skipped\n"); @@ -361,6 +368,19 @@ static void __init bootmem_init(void) end = PFN_DOWN(boot_mem_map.map[i].addr + boot_mem_map.map[i].size); +#ifndef CONFIG_HIGHMEM + /* + * Skip highmem here so we get an accurate max_low_pfn if low + * memory stops short of high memory. + * If the region overlaps HIGHMEM_START, end is clipped so + * max_pfn excludes the highmem portion. + */ + if (start >= PFN_DOWN(HIGHMEM_START)) + continue; + if (end > PFN_DOWN(HIGHMEM_START)) + end = PFN_DOWN(HIGHMEM_START); +#endif + if (end > max_low_pfn) max_low_pfn = end; if (start < min_low_pfn) @@ -757,7 +777,6 @@ static void __init arch_mem_init(char **cmdline_p) device_tree_init(); sparse_init(); plat_swiotlb_setup(); - paging_init(); dma_contiguous_reserve(PFN_PHYS(max_low_pfn)); /* Tell bootmem about cma reserved memblock section */ @@ -870,6 +889,7 @@ void __init setup_arch(char **cmdline_p) prefill_possible_map(); cpu_cache_init(); + paging_init(); } unsigned long kernelsp[NR_CPUS]; diff --git a/arch/mips/kernel/smp-cps.c b/arch/mips/kernel/smp-cps.c index e9d9fc6c754c..6183ad84cc73 100644 --- a/arch/mips/kernel/smp-cps.c +++ b/arch/mips/kernel/smp-cps.c @@ -513,7 +513,7 @@ static void cps_cpu_die(unsigned int cpu) * in which case the CPC will refuse to power down the core. */ do { - mips_cm_lock_other(core, vpe_id); + mips_cm_lock_other(core, 0); mips_cpc_lock_other(core); stat = read_cpc_co_stat_conf(); stat &= CPC_Cx_STAT_CONF_SEQSTATE_MSK; diff --git a/arch/mips/kernel/smp-gic.c b/arch/mips/kernel/smp-gic.c deleted file mode 100644 index 9b63829cf929..000000000000 --- a/arch/mips/kernel/smp-gic.c +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Copyright (C) 2013 Imagination Technologies - * Author: Paul Burton <paul.burton@imgtec.com> - * - * Based on smp-cmp.c: - * Copyright (C) 2007 MIPS Technologies, Inc. - * Author: Chris Dearman (chris@mips.com) - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - */ - -#include <linux/irqchip/mips-gic.h> -#include <linux/printk.h> - -#include <asm/mips-cpc.h> -#include <asm/smp-ops.h> - -void gic_send_ipi_single(int cpu, unsigned int action) -{ - unsigned long flags; - unsigned int intr; - unsigned int core = cpu_data[cpu].core; - - pr_debug("CPU%d: %s cpu %d action %u status %08x\n", - smp_processor_id(), __func__, cpu, action, read_c0_status()); - - local_irq_save(flags); - - switch (action) { - case SMP_CALL_FUNCTION: - intr = plat_ipi_call_int_xlate(cpu); - break; - - case SMP_RESCHEDULE_YOURSELF: - intr = plat_ipi_resched_int_xlate(cpu); - break; - - default: - BUG(); - } - - gic_send_ipi(intr); - - if (mips_cpc_present() && (core != current_cpu_data.core)) { - while (!cpumask_test_cpu(cpu, &cpu_coherent_mask)) { - mips_cm_lock_other(core, 0); - mips_cpc_lock_other(core); - write_cpc_co_cmd(CPC_Cx_CMD_PWRUP); - mips_cpc_unlock_other(); - mips_cm_unlock_other(); - } - } - - local_irq_restore(flags); -} - -void gic_send_ipi_mask(const struct cpumask *mask, unsigned int action) -{ - unsigned int i; - - for_each_cpu(i, mask) - gic_send_ipi_single(i, action); -} diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c index 4f9570a57e8d..e077ea3e11fb 100644 --- a/arch/mips/kernel/smp-mt.c +++ b/arch/mips/kernel/smp-mt.c @@ -289,26 +289,3 @@ struct plat_smp_ops vsmp_smp_ops = { .prepare_cpus = vsmp_prepare_cpus, }; -#ifdef CONFIG_PROC_FS -static int proc_cpuinfo_chain_call(struct notifier_block *nfb, - unsigned long action_unused, void *data) -{ - struct proc_cpuinfo_notifier_args *pcn = data; - struct seq_file *m = pcn->m; - unsigned long n = pcn->n; - - if (!cpu_has_mipsmt) - return NOTIFY_OK; - - seq_printf(m, "VPE\t\t\t: %d\n", cpu_data[n].vpe_id); - - return NOTIFY_OK; -} - -static int __init proc_cpuinfo_notifier_init(void) -{ - return proc_cpuinfo_notifier(proc_cpuinfo_chain_call, 0); -} - -subsys_initcall(proc_cpuinfo_notifier_init); -#endif diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index f95f094f36e4..7ebb1918e2ac 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c @@ -25,7 +25,7 @@ #include <linux/smp.h> #include <linux/spinlock.h> #include <linux/threads.h> -#include <linux/module.h> +#include <linux/export.h> #include <linux/time.h> #include <linux/timex.h> #include <linux/sched.h> @@ -192,9 +192,11 @@ void mips_smp_send_ipi_mask(const struct cpumask *mask, unsigned int action) continue; while (!cpumask_test_cpu(cpu, &cpu_coherent_mask)) { + mips_cm_lock_other(core, 0); mips_cpc_lock_other(core); write_cpc_co_cmd(CPC_Cx_CMD_PWRUP); mips_cpc_unlock_other(); + mips_cm_unlock_other(); } } } @@ -229,7 +231,7 @@ static struct irqaction irq_call = { .name = "IPI call" }; -static __init void smp_ipi_init_one(unsigned int virq, +static void smp_ipi_init_one(unsigned int virq, struct irqaction *action) { int ret; @@ -239,9 +241,11 @@ static __init void smp_ipi_init_one(unsigned int virq, BUG_ON(ret); } -static int __init mips_smp_ipi_init(void) +static unsigned int call_virq, sched_virq; + +int mips_smp_ipi_allocate(const struct cpumask *mask) { - unsigned int call_virq, sched_virq; + int virq; struct irq_domain *ipidomain; struct device_node *node; @@ -268,16 +272,20 @@ static int __init mips_smp_ipi_init(void) if (!ipidomain) return 0; - call_virq = irq_reserve_ipi(ipidomain, cpu_possible_mask); - BUG_ON(!call_virq); + virq = irq_reserve_ipi(ipidomain, mask); + BUG_ON(!virq); + if (!call_virq) + call_virq = virq; - sched_virq = irq_reserve_ipi(ipidomain, cpu_possible_mask); - BUG_ON(!sched_virq); + virq = irq_reserve_ipi(ipidomain, mask); + BUG_ON(!virq); + if (!sched_virq) + sched_virq = virq; if (irq_domain_is_ipi_per_cpu(ipidomain)) { int cpu; - for_each_cpu(cpu, cpu_possible_mask) { + for_each_cpu(cpu, mask) { smp_ipi_init_one(call_virq + cpu, &irq_call); smp_ipi_init_one(sched_virq + cpu, &irq_resched); } @@ -286,6 +294,45 @@ static int __init mips_smp_ipi_init(void) smp_ipi_init_one(sched_virq, &irq_resched); } + return 0; +} + +int mips_smp_ipi_free(const struct cpumask *mask) +{ + struct irq_domain *ipidomain; + struct device_node *node; + + node = of_irq_find_parent(of_root); + ipidomain = irq_find_matching_host(node, DOMAIN_BUS_IPI); + + /* + * Some platforms have half DT setup. So if we found irq node but + * didn't find an ipidomain, try to search for one that is not in the + * DT. + */ + if (node && !ipidomain) + ipidomain = irq_find_matching_host(NULL, DOMAIN_BUS_IPI); + + BUG_ON(!ipidomain); + + if (irq_domain_is_ipi_per_cpu(ipidomain)) { + int cpu; + + for_each_cpu(cpu, mask) { + remove_irq(call_virq + cpu, &irq_call); + remove_irq(sched_virq + cpu, &irq_resched); + } + } + irq_destroy_ipi(call_virq, mask); + irq_destroy_ipi(sched_virq, mask); + return 0; +} + + +static int __init mips_smp_ipi_init(void) +{ + mips_smp_ipi_allocate(cpu_possible_mask); + call_desc = irq_to_desc(call_virq); sched_desc = irq_to_desc(sched_virq); @@ -322,6 +369,9 @@ asmlinkage void start_secondary(void) cpumask_set_cpu(cpu, &cpu_coherent_mask); notify_cpu_starting(cpu); + cpumask_set_cpu(cpu, &cpu_callin_map); + synchronise_count_slave(cpu); + set_cpu_online(cpu, true); set_cpu_sibling_map(cpu); @@ -329,10 +379,6 @@ asmlinkage void start_secondary(void) calculate_cpu_foreign_map(); - cpumask_set_cpu(cpu, &cpu_callin_map); - - synchronise_count_slave(cpu); - /* * irq will be enabled in ->smp_finish(), enabling it too early * is dangerous. diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index 3de85be2486a..3905003dfe2b 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -21,6 +21,7 @@ #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> +#include <linux/extable.h> #include <linux/mm.h> #include <linux/sched.h> #include <linux/smp.h> @@ -48,6 +49,7 @@ #include <asm/fpu.h> #include <asm/fpu_emulator.h> #include <asm/idle.h> +#include <asm/mips-cm.h> #include <asm/mips-r2-to-r6-emul.h> #include <asm/mipsregs.h> #include <asm/mipsmtregs.h> @@ -154,7 +156,7 @@ static void show_backtrace(struct task_struct *task, const struct pt_regs *regs) print_ip_sym(pc); pc = unwind_stack(task, &sp, pc, &ra); } while (pc); - printk("\n"); + pr_cont("\n"); } /* @@ -172,22 +174,24 @@ static void show_stacktrace(struct task_struct *task, printk("Stack :"); i = 0; while ((unsigned long) sp & (PAGE_SIZE - 1)) { - if (i && ((i % (64 / field)) == 0)) - printk("\n "); + if (i && ((i % (64 / field)) == 0)) { + pr_cont("\n"); + printk(" "); + } if (i > 39) { - printk(" ..."); + pr_cont(" ..."); break; } if (__get_user(stackdata, sp++)) { - printk(" (Bad stack address)"); + pr_cont(" (Bad stack address)"); break; } - printk(" %0*lx", field, stackdata); + pr_cont(" %0*lx", field, stackdata); i++; } - printk("\n"); + pr_cont("\n"); show_backtrace(task, regs); } @@ -227,18 +231,19 @@ static void show_code(unsigned int __user *pc) long i; unsigned short __user *pc16 = NULL; - printk("\nCode:"); + printk("Code:"); if ((unsigned long)pc & 1) pc16 = (unsigned short __user *)((unsigned long)pc & ~1); for(i = -3 ; i < 6 ; i++) { unsigned int insn; if (pc16 ? __get_user(insn, pc16 + i) : __get_user(insn, pc + i)) { - printk(" (Bad address in epc)\n"); + pr_cont(" (Bad address in epc)\n"); break; } - printk("%c%0*x%c", (i?' ':'<'), pc16 ? 4 : 8, insn, (i?' ':'>')); + pr_cont("%c%0*x%c", (i?' ':'<'), pc16 ? 4 : 8, insn, (i?' ':'>')); } + pr_cont("\n"); } static void __show_regs(const struct pt_regs *regs) @@ -257,15 +262,15 @@ static void __show_regs(const struct pt_regs *regs) if ((i % 4) == 0) printk("$%2d :", i); if (i == 0) - printk(" %0*lx", field, 0UL); + pr_cont(" %0*lx", field, 0UL); else if (i == 26 || i == 27) - printk(" %*s", field, ""); + pr_cont(" %*s", field, ""); else - printk(" %0*lx", field, regs->regs[i]); + pr_cont(" %0*lx", field, regs->regs[i]); i++; if ((i % 4) == 0) - printk("\n"); + pr_cont("\n"); } #ifdef CONFIG_CPU_HAS_SMARTMIPS @@ -286,46 +291,46 @@ static void __show_regs(const struct pt_regs *regs) if (cpu_has_3kex) { if (regs->cp0_status & ST0_KUO) - printk("KUo "); + pr_cont("KUo "); if (regs->cp0_status & ST0_IEO) - printk("IEo "); + pr_cont("IEo "); if (regs->cp0_status & ST0_KUP) - printk("KUp "); + pr_cont("KUp "); if (regs->cp0_status & ST0_IEP) - printk("IEp "); + pr_cont("IEp "); if (regs->cp0_status & ST0_KUC) - printk("KUc "); + pr_cont("KUc "); if (regs->cp0_status & ST0_IEC) - printk("IEc "); + pr_cont("IEc "); } else if (cpu_has_4kex) { if (regs->cp0_status & ST0_KX) - printk("KX "); + pr_cont("KX "); if (regs->cp0_status & ST0_SX) - printk("SX "); + pr_cont("SX "); if (regs->cp0_status & ST0_UX) - printk("UX "); + pr_cont("UX "); switch (regs->cp0_status & ST0_KSU) { case KSU_USER: - printk("USER "); + pr_cont("USER "); break; case KSU_SUPERVISOR: - printk("SUPERVISOR "); + pr_cont("SUPERVISOR "); break; case KSU_KERNEL: - printk("KERNEL "); + pr_cont("KERNEL "); break; default: - printk("BAD_MODE "); + pr_cont("BAD_MODE "); break; } if (regs->cp0_status & ST0_ERL) - printk("ERL "); + pr_cont("ERL "); if (regs->cp0_status & ST0_EXL) - printk("EXL "); + pr_cont("EXL "); if (regs->cp0_status & ST0_IE) - printk("IE "); + pr_cont("IE "); } - printk("\n"); + pr_cont("\n"); exccode = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE; printk("Cause : %08x (ExcCode %02x)\n", cause, exccode); @@ -444,6 +449,8 @@ asmlinkage void do_be(struct pt_regs *regs) if (board_be_handler) action = board_be_handler(regs, fixup != NULL); + else + mips_cm_error_report(); switch (action) { case MIPS_BE_DISCARD: @@ -701,6 +708,32 @@ asmlinkage void do_ov(struct pt_regs *regs) exception_exit(prev_state); } +/* + * Send SIGFPE according to FCSR Cause bits, which must have already + * been masked against Enable bits. This is impotant as Inexact can + * happen together with Overflow or Underflow, and `ptrace' can set + * any bits. + */ +void force_fcr31_sig(unsigned long fcr31, void __user *fault_addr, + struct task_struct *tsk) +{ + struct siginfo si = { .si_addr = fault_addr, .si_signo = SIGFPE }; + + if (fcr31 & FPU_CSR_INV_X) + si.si_code = FPE_FLTINV; + else if (fcr31 & FPU_CSR_DIV_X) + si.si_code = FPE_FLTDIV; + else if (fcr31 & FPU_CSR_OVF_X) + si.si_code = FPE_FLTOVF; + else if (fcr31 & FPU_CSR_UDF_X) + si.si_code = FPE_FLTUND; + else if (fcr31 & FPU_CSR_INE_X) + si.si_code = FPE_FLTRES; + else + si.si_code = __SI_FAULT; + force_sig_info(SIGFPE, &si, tsk); +} + int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcr31) { struct siginfo si = { 0 }; @@ -711,27 +744,7 @@ int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcr31) return 0; case SIGFPE: - si.si_addr = fault_addr; - si.si_signo = sig; - /* - * Inexact can happen together with Overflow or Underflow. - * Respect the mask to deliver the correct exception. - */ - fcr31 &= (fcr31 & FPU_CSR_ALL_E) << - (ffs(FPU_CSR_ALL_X) - ffs(FPU_CSR_ALL_E)); - if (fcr31 & FPU_CSR_INV_X) - si.si_code = FPE_FLTINV; - else if (fcr31 & FPU_CSR_DIV_X) - si.si_code = FPE_FLTDIV; - else if (fcr31 & FPU_CSR_OVF_X) - si.si_code = FPE_FLTOVF; - else if (fcr31 & FPU_CSR_UDF_X) - si.si_code = FPE_FLTUND; - else if (fcr31 & FPU_CSR_INE_X) - si.si_code = FPE_FLTRES; - else - si.si_code = __SI_FAULT; - force_sig_info(sig, &si, current); + force_fcr31_sig(fcr31, fault_addr, current); return 1; case SIGBUS: @@ -795,13 +808,13 @@ static int simulate_fp(struct pt_regs *regs, unsigned int opcode, /* Run the emulator */ sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1, &fault_addr); - fcr31 = current->thread.fpu.fcr31; /* - * We can't allow the emulated instruction to leave any of - * the cause bits set in $fcr31. + * We can't allow the emulated instruction to leave any + * enabled Cause bits set in $fcr31. */ - current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X; + fcr31 = mask_fcr31_x(current->thread.fpu.fcr31); + current->thread.fpu.fcr31 &= ~fcr31; /* Restore the hardware register state */ own_fpu(1); @@ -827,7 +840,7 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31) goto out; /* Clear FCSR.Cause before enabling interrupts */ - write_32bit_cp1_register(CP1_STATUS, fcr31 & ~FPU_CSR_ALL_X); + write_32bit_cp1_register(CP1_STATUS, fcr31 & ~mask_fcr31_x(fcr31)); local_irq_enable(); die_if_kernel("FP exception in kernel code", regs); @@ -849,13 +862,13 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31) /* Run the emulator */ sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1, &fault_addr); - fcr31 = current->thread.fpu.fcr31; /* - * We can't allow the emulated instruction to leave any of - * the cause bits set in $fcr31. + * We can't allow the emulated instruction to leave any + * enabled Cause bits set in $fcr31. */ - current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X; + fcr31 = mask_fcr31_x(current->thread.fpu.fcr31); + current->thread.fpu.fcr31 &= ~fcr31; /* Restore the hardware register state */ own_fpu(1); /* Using the FPU again. */ @@ -1420,13 +1433,13 @@ asmlinkage void do_cpu(struct pt_regs *regs) sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 0, &fault_addr); - fcr31 = current->thread.fpu.fcr31; /* * We can't allow the emulated instruction to leave - * any of the cause bits set in $fcr31. + * any enabled Cause bits set in $fcr31. */ - current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X; + fcr31 = mask_fcr31_x(current->thread.fpu.fcr31); + current->thread.fpu.fcr31 &= ~fcr31; /* Send a signal if required. */ if (!process_fpemu_return(sig, fault_addr, fcr31) && !err) @@ -2091,6 +2104,14 @@ static void configure_exception_vector(void) { if (cpu_has_veic || cpu_has_vint) { unsigned long sr = set_c0_status(ST0_BEV); + /* If available, use WG to set top bits of EBASE */ + if (cpu_has_ebase_wg) { +#ifdef CONFIG_64BIT + write_c0_ebase_64(ebase | MIPS_EBASE_WG); +#else + write_c0_ebase(ebase | MIPS_EBASE_WG); +#endif + } write_c0_ebase(ebase); write_c0_status(sr); /* Setting vector spacing enables EI/VI mode */ @@ -2127,8 +2148,17 @@ void per_cpu_trap_init(bool is_boot_cpu) * We shouldn't trust a secondary core has a sane EBASE register * so use the one calculated by the boot CPU. */ - if (!is_boot_cpu) + if (!is_boot_cpu) { + /* If available, use WG to set top bits of EBASE */ + if (cpu_has_ebase_wg) { +#ifdef CONFIG_64BIT + write_c0_ebase_64(ebase | MIPS_EBASE_WG); +#else + write_c0_ebase(ebase | MIPS_EBASE_WG); +#endif + } write_c0_ebase(ebase); + } cp0_compare_irq_shift = CAUSEB_TI - CAUSEB_IP; cp0_compare_irq = (read_c0_intctl() >> INTCTLB_IPTI) & 7; @@ -2209,13 +2239,39 @@ void __init trap_init(void) if (cpu_has_veic || cpu_has_vint) { unsigned long size = 0x200 + VECTORSPACING*64; + phys_addr_t ebase_pa; + ebase = (unsigned long) __alloc_bootmem(size, 1 << fls(size), 0); + + /* + * Try to ensure ebase resides in KSeg0 if possible. + * + * It shouldn't generally be in XKPhys on MIPS64 to avoid + * hitting a poorly defined exception base for Cache Errors. + * The allocation is likely to be in the low 512MB of physical, + * in which case we should be able to convert to KSeg0. + * + * EVA is special though as it allows segments to be rearranged + * and to become uncached during cache error handling. + */ + ebase_pa = __pa(ebase); + if (!IS_ENABLED(CONFIG_EVA) && !WARN_ON(ebase_pa >= 0x20000000)) + ebase = CKSEG0ADDR(ebase_pa); } else { ebase = CAC_BASE; - if (cpu_has_mips_r2_r6) - ebase += (read_c0_ebase() & 0x3ffff000); + if (cpu_has_mips_r2_r6) { + if (cpu_has_ebase_wg) { +#ifdef CONFIG_64BIT + ebase = (read_c0_ebase_64() & ~0xfff); +#else + ebase = (read_c0_ebase() & ~0xfff); +#endif + } else { + ebase += (read_c0_ebase() & 0x3ffff000); + } + } } if (cpu_has_mmips) { diff --git a/arch/mips/kernel/uprobes.c b/arch/mips/kernel/uprobes.c index 8452d933a645..dbb917403131 100644 --- a/arch/mips/kernel/uprobes.c +++ b/arch/mips/kernel/uprobes.c @@ -8,71 +8,12 @@ #include <asm/branch.h> #include <asm/cpu-features.h> #include <asm/ptrace.h> -#include <asm/inst.h> + +#include "probes-common.h" static inline int insn_has_delay_slot(const union mips_instruction insn) { - switch (insn.i_format.opcode) { - /* - * jr and jalr are in r_format format. - */ - case spec_op: - switch (insn.r_format.func) { - case jalr_op: - case jr_op: - return 1; - } - break; - - /* - * This group contains: - * bltz_op, bgez_op, bltzl_op, bgezl_op, - * bltzal_op, bgezal_op, bltzall_op, bgezall_op. - */ - case bcond_op: - switch (insn.i_format.rt) { - case bltz_op: - case bltzl_op: - case bgez_op: - case bgezl_op: - case bltzal_op: - case bltzall_op: - case bgezal_op: - case bgezall_op: - case bposge32_op: - return 1; - } - break; - - /* - * These are unconditional and in j_format. - */ - case jal_op: - case j_op: - case beq_op: - case beql_op: - case bne_op: - case bnel_op: - case blez_op: /* not really i_format */ - case blezl_op: - case bgtz_op: - case bgtzl_op: - return 1; - - /* - * And now the FPA/cp1 branch instructions. - */ - case cop1_op: -#ifdef CONFIG_CPU_CAVIUM_OCTEON - case lwc2_op: /* This is bbit0 on Octeon */ - case ldc2_op: /* This is bbit032 on Octeon */ - case swc2_op: /* This is bbit1 on Octeon */ - case sdc2_op: /* This is bbit132 on Octeon */ -#endif - return 1; - } - - return 0; + return __insn_has_delay_slot(insn); } /** @@ -95,6 +36,12 @@ int arch_uprobe_analyze_insn(struct arch_uprobe *aup, return -EINVAL; inst.word = aup->insn[0]; + + if (__insn_is_compact_branch(inst)) { + pr_notice("Uprobes for compact branches are not supported\n"); + return -EINVAL; + } + aup->ixol[0] = aup->insn[insn_has_delay_slot(inst)]; aup->ixol[1] = UPROBE_BRK_UPROBE_XOL; /* NOP */ @@ -157,7 +104,6 @@ bool is_trap_insn(uprobe_opcode_t *insn) int arch_uprobe_pre_xol(struct arch_uprobe *aup, struct pt_regs *regs) { struct uprobe_task *utask = current->utask; - union mips_instruction insn; /* * Now find the EPC where to resume after the breakpoint has been @@ -168,10 +114,10 @@ int arch_uprobe_pre_xol(struct arch_uprobe *aup, struct pt_regs *regs) unsigned long epc; epc = regs->cp0_epc; - __compute_return_epc_for_insn(regs, insn); + __compute_return_epc_for_insn(regs, + (union mips_instruction) aup->insn[0]); aup->resume_epc = regs->cp0_epc; } - utask->autask.saved_trap_nr = current->thread.trap_nr; current->thread.trap_nr = UPROBE_TRAP_NR; regs->cp0_epc = current->utask->xol_vaddr; @@ -222,7 +168,7 @@ int arch_uprobe_exception_notify(struct notifier_block *self, return NOTIFY_DONE; switch (val) { - case DIE_BREAK: + case DIE_UPROBE: if (uprobe_pre_sstep_notifier(regs)) return NOTIFY_STOP; break; @@ -257,7 +203,7 @@ unsigned long arch_uretprobe_hijack_return_addr( ra = regs->regs[31]; /* Replace the return address with the trampoline address */ - regs->regs[31] = ra; + regs->regs[31] = trampoline_vaddr; return ra; } @@ -280,40 +226,17 @@ int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm, return uprobe_write_opcode(mm, vaddr, UPROBE_SWBP_INSN); } -/** - * set_orig_insn - Restore the original instruction. - * @mm: the probed process address space. - * @auprobe: arch specific probepoint information. - * @vaddr: the virtual address to insert the opcode. - * - * For mm @mm, restore the original opcode (opcode) at @vaddr. - * Return 0 (success) or a negative errno. - * - * This overrides the weak version in kernel/events/uprobes.c. - */ -int set_orig_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, - unsigned long vaddr) -{ - return uprobe_write_opcode(mm, vaddr, - *(uprobe_opcode_t *)&auprobe->orig_inst[0].word); -} - void __weak arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr, void *src, unsigned long len) { - void *kaddr; + unsigned long kaddr, kstart; /* Initialize the slot */ - kaddr = kmap_atomic(page); - memcpy(kaddr + (vaddr & ~PAGE_MASK), src, len); - kunmap_atomic(kaddr); - - /* - * The MIPS version of flush_icache_range will operate safely on - * user space addresses and more importantly, it doesn't require a - * VMA argument. - */ - flush_icache_range(vaddr, vaddr + len); + kaddr = (unsigned long)kmap_atomic(page); + kstart = kaddr + (vaddr & ~PAGE_MASK); + memcpy((void *)kstart, src, len); + flush_icache_range(kstart, kstart + len); + kunmap_atomic((void *)kaddr); } /** diff --git a/arch/mips/kernel/vdso.c b/arch/mips/kernel/vdso.c index 9abe447a4b48..f9dbfb14af33 100644 --- a/arch/mips/kernel/vdso.c +++ b/arch/mips/kernel/vdso.c @@ -39,16 +39,16 @@ static struct vm_special_mapping vdso_vvar_mapping = { static void __init init_vdso_image(struct mips_vdso_image *image) { unsigned long num_pages, i; + unsigned long data_pfn; BUG_ON(!PAGE_ALIGNED(image->data)); BUG_ON(!PAGE_ALIGNED(image->size)); num_pages = image->size / PAGE_SIZE; - for (i = 0; i < num_pages; i++) { - image->mapping.pages[i] = - virt_to_page(image->data + (i * PAGE_SIZE)); - } + data_pfn = __phys_to_pfn(__pa_symbol(image->data)); + for (i = 0; i < num_pages; i++) + image->mapping.pages[i] = pfn_to_page(data_pfn + i); } static int __init init_vdso(void) diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S index a82c178d0bb9..d5de67591735 100644 --- a/arch/mips/kernel/vmlinux.lds.S +++ b/arch/mips/kernel/vmlinux.lds.S @@ -55,6 +55,7 @@ SECTIONS .text : { TEXT_TEXT SCHED_TEXT + CPUIDLE_TEXT LOCK_TEXT KPROBES_TEXT IRQENTRY_TEXT |