diff options
Diffstat (limited to 'arch/mips/kernel')
28 files changed, 1921 insertions, 1217 deletions
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile index 1c1b71752c84..26c6175e1379 100644 --- a/arch/mips/kernel/Makefile +++ b/arch/mips/kernel/Makefile @@ -30,6 +30,7 @@ obj-$(CONFIG_CSRC_R4K) += csrc-r4k.o obj-$(CONFIG_CSRC_SB1250) += csrc-sb1250.o obj-$(CONFIG_SYNC_R4K) += sync-r4k.o +obj-$(CONFIG_DEBUG_FS) += segment.o obj-$(CONFIG_STACKTRACE) += stacktrace.o obj-$(CONFIG_MODULES) += mips_ksyms.o module.o obj-$(CONFIG_MODULES_USE_ELF_RELA) += module-rela.o @@ -55,7 +56,11 @@ obj-$(CONFIG_MIPS_CMP) += smp-cmp.o obj-$(CONFIG_CPU_MIPSR2) += spram.o obj-$(CONFIG_MIPS_VPE_LOADER) += vpe.o +obj-$(CONFIG_MIPS_VPE_LOADER_CMP) += vpe-cmp.o +obj-$(CONFIG_MIPS_VPE_LOADER_MT) += vpe-mt.o obj-$(CONFIG_MIPS_VPE_APSP_API) += rtlx.o +obj-$(CONFIG_MIPS_VPE_APSP_API_CMP) += rtlx-cmp.o +obj-$(CONFIG_MIPS_VPE_APSP_API_MT) += rtlx-mt.o obj-$(CONFIG_I8259) += i8259.o obj-$(CONFIG_IRQ_CPU) += irq_cpu.o diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c index 202e581e6096..7faf5f2bee25 100644 --- a/arch/mips/kernel/binfmt_elfo32.c +++ b/arch/mips/kernel/binfmt_elfo32.c @@ -28,6 +28,18 @@ typedef double elf_fpreg_t; typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG]; /* + * In order to be sure that we don't attempt to execute an O32 binary which + * requires 64 bit FP (FR=1) on a system which does not support it we refuse + * to execute any binary which has bits specified by the following macro set + * in its ELF header flags. + */ +#ifdef CONFIG_MIPS_O32_FP64_SUPPORT +# define __MIPS_O32_FP64_MUST_BE_ZERO 0 +#else +# define __MIPS_O32_FP64_MUST_BE_ZERO EF_MIPS_FP64 +#endif + +/* * This is used to ensure we don't load something for the wrong architecture. */ #define elf_check_arch(hdr) \ @@ -44,6 +56,8 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG]; if (((__h->e_flags & EF_MIPS_ABI) != 0) && \ ((__h->e_flags & EF_MIPS_ABI) != EF_MIPS_ABI_O32)) \ __res = 0; \ + if (__h->e_flags & __MIPS_O32_FP64_MUST_BE_ZERO) \ + __res = 0; \ \ __res; \ }) diff --git a/arch/mips/kernel/bmips_vec.S b/arch/mips/kernel/bmips_vec.S index bd79c4f9bff4..a5bf73d22fcc 100644 --- a/arch/mips/kernel/bmips_vec.S +++ b/arch/mips/kernel/bmips_vec.S @@ -8,11 +8,11 @@ * Reset/NMI/re-entry vectors for BMIPS processors */ -#include <linux/init.h> #include <asm/asm.h> #include <asm/asmmacro.h> #include <asm/cacheops.h> +#include <asm/cpu.h> #include <asm/regdef.h> #include <asm/mipsregs.h> #include <asm/stackframe.h> @@ -91,12 +91,18 @@ NESTED(bmips_reset_nmi_vec, PT_SIZE, sp) beqz k0, bmips_smp_entry #if defined(CONFIG_CPU_BMIPS5000) + mfc0 k0, CP0_PRID + li k1, PRID_IMP_BMIPS5000 + andi k0, 0xff00 + bne k0, k1, 1f + /* if we're not on core 0, this must be the SMP boot signal */ li k1, (3 << 25) mfc0 k0, $22 and k0, k1 bnez k0, bmips_smp_entry -#endif +1: +#endif /* CONFIG_CPU_BMIPS5000 */ #endif /* CONFIG_SMP */ /* nope, it's just a regular NMI */ @@ -139,7 +145,12 @@ bmips_smp_entry: xori k0, 0x04 mtc0 k0, CP0_CONFIG + mfc0 k0, CP0_PRID + andi k0, 0xff00 #if defined(CONFIG_CPU_BMIPS4350) || defined(CONFIG_CPU_BMIPS4380) + li k1, PRID_IMP_BMIPS43XX + bne k0, k1, 2f + /* initialize CPU1's local I-cache */ li k0, 0x80000000 li k1, 0x80010000 @@ -150,14 +161,21 @@ bmips_smp_entry: 1: cache Index_Store_Tag_I, 0(k0) addiu k0, 16 bne k0, k1, 1b -#elif defined(CONFIG_CPU_BMIPS5000) + + b 3f +2: +#endif /* CONFIG_CPU_BMIPS4350 || CONFIG_CPU_BMIPS4380 */ +#if defined(CONFIG_CPU_BMIPS5000) /* set exception vector base */ + li k1, PRID_IMP_BMIPS5000 + bne k0, k1, 3f + la k0, ebase lw k0, 0(k0) mtc0 k0, $15, 1 BARRIER -#endif - +#endif /* CONFIG_CPU_BMIPS5000 */ +3: /* jump back to kseg0 in case we need to remap the kseg1 area */ la k0, 1f jr k0 @@ -221,8 +239,18 @@ END(bmips_smp_int_vec) LEAF(bmips_enable_xks01) #if defined(CONFIG_XKS01) - + mfc0 t0, CP0_PRID + andi t2, t0, 0xff00 #if defined(CONFIG_CPU_BMIPS4380) + li t1, PRID_IMP_BMIPS43XX + bne t2, t1, 1f + + andi t0, 0xff + addiu t1, t0, -PRID_REV_BMIPS4380_HI + bgtz t1, 2f + addiu t0, -PRID_REV_BMIPS4380_LO + bltz t0, 2f + mfc0 t0, $22, 3 li t1, 0x1ff0 li t2, (1 << 12) | (1 << 9) @@ -231,7 +259,13 @@ LEAF(bmips_enable_xks01) or t0, t2 mtc0 t0, $22, 3 BARRIER -#elif defined(CONFIG_CPU_BMIPS5000) + b 2f +1: +#endif /* CONFIG_CPU_BMIPS4380 */ +#if defined(CONFIG_CPU_BMIPS5000) + li t1, PRID_IMP_BMIPS5000 + bne t2, t1, 2f + mfc0 t0, $22, 5 li t1, 0x01ff li t2, (1 << 8) | (1 << 5) @@ -240,12 +274,8 @@ LEAF(bmips_enable_xks01) or t0, t2 mtc0 t0, $22, 5 BARRIER -#else - -#error Missing XKS01 setup - -#endif - +#endif /* CONFIG_CPU_BMIPS5000 */ +2: #endif /* defined(CONFIG_XKS01) */ jr ra diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c index c814287bdf5d..530f832de02c 100644 --- a/arch/mips/kernel/cpu-probe.c +++ b/arch/mips/kernel/cpu-probe.c @@ -112,7 +112,7 @@ static inline unsigned long cpu_get_fpu_id(void) unsigned long tmp, fpu_id; tmp = read_c0_status(); - __enable_fpu(); + __enable_fpu(FPU_AS_IS); fpu_id = read_32bit_cp1_register(CP1_REVISION); write_c0_status(tmp); return fpu_id; @@ -163,6 +163,25 @@ static void set_isa(struct cpuinfo_mips *c, unsigned int isa) static char unknown_isa[] = KERN_ERR \ "Unsupported ISA type, c0.config0: %d."; +static void set_ftlb_enable(struct cpuinfo_mips *c, int enable) +{ + unsigned int config6; + /* + * Config6 is implementation dependent and it's currently only + * used by proAptiv + */ + if (c->cputype == CPU_PROAPTIV) { + config6 = read_c0_config6(); + if (enable) + /* Enable FTLB */ + write_c0_config6(config6 | MIPS_CONF6_FTLBEN); + else + /* Disable FTLB */ + write_c0_config6(config6 & ~MIPS_CONF6_FTLBEN); + back_to_back_c0_hazard(); + } +} + static inline unsigned int decode_config0(struct cpuinfo_mips *c) { unsigned int config0; @@ -170,8 +189,13 @@ static inline unsigned int decode_config0(struct cpuinfo_mips *c) config0 = read_c0_config(); - if (((config0 & MIPS_CONF_MT) >> 7) == 1) + /* + * Look for Standard TLB or Dual VTLB and FTLB + */ + if ((((config0 & MIPS_CONF_MT) >> 7) == 1) || + (((config0 & MIPS_CONF_MT) >> 7) == 4)) c->options |= MIPS_CPU_TLB; + isa = (config0 & MIPS_CONF_AT) >> 13; switch (isa) { case 0: @@ -226,8 +250,11 @@ static inline unsigned int decode_config1(struct cpuinfo_mips *c) c->options |= MIPS_CPU_FPU; c->options |= MIPS_CPU_32FPR; } - if (cpu_has_tlb) + if (cpu_has_tlb) { c->tlbsize = ((config1 & MIPS_CONF1_TLBS) >> 25) + 1; + c->tlbsizevtlb = c->tlbsize; + c->tlbsizeftlbsets = 0; + } return config1 & MIPS_CONF_M; } @@ -272,6 +299,8 @@ static inline unsigned int decode_config3(struct cpuinfo_mips *c) c->options |= MIPS_CPU_MICROMIPS; if (config3 & MIPS_CONF3_VZ) c->ases |= MIPS_ASE_VZ; + if (config3 & MIPS_CONF3_SC) + c->options |= MIPS_CPU_SEGMENTS; return config3 & MIPS_CONF_M; } @@ -279,12 +308,51 @@ static inline unsigned int decode_config3(struct cpuinfo_mips *c) static inline unsigned int decode_config4(struct cpuinfo_mips *c) { unsigned int config4; + unsigned int newcf4; + unsigned int mmuextdef; + unsigned int ftlb_page = MIPS_CONF4_FTLBPAGESIZE; config4 = read_c0_config4(); - if ((config4 & MIPS_CONF4_MMUEXTDEF) == MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT - && cpu_has_tlb) - c->tlbsize += (config4 & MIPS_CONF4_MMUSIZEEXT) * 0x40; + if (cpu_has_tlb) { + if (((config4 & MIPS_CONF4_IE) >> 29) == 2) + c->options |= MIPS_CPU_TLBINV; + mmuextdef = config4 & MIPS_CONF4_MMUEXTDEF; + switch (mmuextdef) { + case MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT: + c->tlbsize += (config4 & MIPS_CONF4_MMUSIZEEXT) * 0x40; + c->tlbsizevtlb = c->tlbsize; + break; + case MIPS_CONF4_MMUEXTDEF_VTLBSIZEEXT: + c->tlbsizevtlb += + ((config4 & MIPS_CONF4_VTLBSIZEEXT) >> + MIPS_CONF4_VTLBSIZEEXT_SHIFT) * 0x40; + c->tlbsize = c->tlbsizevtlb; + ftlb_page = MIPS_CONF4_VFTLBPAGESIZE; + /* fall through */ + case MIPS_CONF4_MMUEXTDEF_FTLBSIZEEXT: + newcf4 = (config4 & ~ftlb_page) | + (page_size_ftlb(mmuextdef) << + MIPS_CONF4_FTLBPAGESIZE_SHIFT); + write_c0_config4(newcf4); + back_to_back_c0_hazard(); + config4 = read_c0_config4(); + if (config4 != newcf4) { + pr_err("PAGE_SIZE 0x%lx is not supported by FTLB (config4=0x%x)\n", + PAGE_SIZE, config4); + /* Switch FTLB off */ + set_ftlb_enable(c, 0); + break; + } + c->tlbsizeftlbsets = 1 << + ((config4 & MIPS_CONF4_FTLBSETS) >> + MIPS_CONF4_FTLBSETS_SHIFT); + c->tlbsizeftlbways = ((config4 & MIPS_CONF4_FTLBWAYS) >> + MIPS_CONF4_FTLBWAYS_SHIFT) + 2; + c->tlbsize += c->tlbsizeftlbways * c->tlbsizeftlbsets; + break; + } + } c->kscratch_mask = (config4 >> 16) & 0xff; @@ -312,6 +380,9 @@ static void decode_configs(struct cpuinfo_mips *c) c->scache.flags = MIPS_CACHE_NOT_PRESENT; + /* Enable FTLB if present */ + set_ftlb_enable(c, 1); + ok = decode_config0(c); /* Read Config registers. */ BUG_ON(!ok); /* Arch spec violation! */ if (ok) @@ -675,7 +746,6 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu) static inline void cpu_probe_mips(struct cpuinfo_mips *c, unsigned int cpu) { - decode_configs(c); switch (c->processor_id & PRID_IMP_MASK) { case PRID_IMP_4KC: c->cputype = CPU_4KC; @@ -739,8 +809,26 @@ static inline void cpu_probe_mips(struct cpuinfo_mips *c, unsigned int cpu) c->cputype = CPU_74K; __cpu_name[cpu] = "MIPS 1074Kc"; break; + case PRID_IMP_INTERAPTIV_UP: + c->cputype = CPU_INTERAPTIV; + __cpu_name[cpu] = "MIPS interAptiv"; + break; + case PRID_IMP_INTERAPTIV_MP: + c->cputype = CPU_INTERAPTIV; + __cpu_name[cpu] = "MIPS interAptiv (multi)"; + break; + case PRID_IMP_PROAPTIV_UP: + c->cputype = CPU_PROAPTIV; + __cpu_name[cpu] = "MIPS proAptiv"; + break; + case PRID_IMP_PROAPTIV_MP: + c->cputype = CPU_PROAPTIV; + __cpu_name[cpu] = "MIPS proAptiv (multi)"; + break; } + decode_configs(c); + spram_config(); } @@ -943,6 +1031,7 @@ static inline void cpu_probe_netlogic(struct cpuinfo_mips *c, int cpu) switch (c->processor_id & PRID_IMP_MASK) { case PRID_IMP_NETLOGIC_XLP2XX: + case PRID_IMP_NETLOGIC_XLP9XX: c->cputype = CPU_XLP; __cpu_name[cpu] = "Broadcom XLPII"; break; diff --git a/arch/mips/kernel/crash.c b/arch/mips/kernel/crash.c index 93aa302948d7..d21264681e97 100644 --- a/arch/mips/kernel/crash.c +++ b/arch/mips/kernel/crash.c @@ -5,7 +5,6 @@ #include <linux/bootmem.h> #include <linux/crash_dump.h> #include <linux/delay.h> -#include <linux/init.h> #include <linux/irq.h> #include <linux/types.h> #include <linux/sched.h> diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S index 47d7583cd67f..d84f6a509502 100644 --- a/arch/mips/kernel/genex.S +++ b/arch/mips/kernel/genex.S @@ -476,6 +476,7 @@ NESTED(nmi_handler, PT_SIZE, sp) BUILD_HANDLER ov ov sti silent /* #12 */ BUILD_HANDLER tr tr sti silent /* #13 */ BUILD_HANDLER fpe fpe fpe silent /* #15 */ + BUILD_HANDLER ftlb ftlb none silent /* #16 */ BUILD_HANDLER mdmx mdmx sti silent /* #22 */ #ifdef CONFIG_HARDWARE_WATCHPOINTS /* diff --git a/arch/mips/kernel/idle.c b/arch/mips/kernel/idle.c index f7991d95bff9..3553243bf9d6 100644 --- a/arch/mips/kernel/idle.c +++ b/arch/mips/kernel/idle.c @@ -184,6 +184,8 @@ void __init check_wait(void) case CPU_24K: case CPU_34K: case CPU_1004K: + case CPU_INTERAPTIV: + case CPU_PROAPTIV: cpu_wait = r4k_wait; if (read_c0_config7() & MIPS_CONF7_WII) cpu_wait = r4k_wait_irqoff; diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c index 8c58d8a84bf3..00d20974b3e7 100644 --- a/arch/mips/kernel/proc.c +++ b/arch/mips/kernel/proc.c @@ -65,26 +65,25 @@ static int show_cpuinfo(struct seq_file *m, void *v) cpu_data[n].watch_reg_masks[i]); seq_printf(m, "]\n"); } - if (cpu_has_mips_r) { - seq_printf(m, "isa\t\t\t: mips1"); - if (cpu_has_mips_2) - seq_printf(m, "%s", " mips2"); - if (cpu_has_mips_3) - seq_printf(m, "%s", " mips3"); - if (cpu_has_mips_4) - seq_printf(m, "%s", " mips4"); - if (cpu_has_mips_5) - seq_printf(m, "%s", " mips5"); - if (cpu_has_mips32r1) - seq_printf(m, "%s", " mips32r1"); - if (cpu_has_mips32r2) - seq_printf(m, "%s", " mips32r2"); - if (cpu_has_mips64r1) - seq_printf(m, "%s", " mips64r1"); - if (cpu_has_mips64r2) - seq_printf(m, "%s", " mips64r2"); - seq_printf(m, "\n"); - } + + seq_printf(m, "isa\t\t\t: mips1"); + if (cpu_has_mips_2) + seq_printf(m, "%s", " mips2"); + if (cpu_has_mips_3) + seq_printf(m, "%s", " mips3"); + if (cpu_has_mips_4) + seq_printf(m, "%s", " mips4"); + if (cpu_has_mips_5) + seq_printf(m, "%s", " mips5"); + if (cpu_has_mips32r1) + seq_printf(m, "%s", " mips32r1"); + if (cpu_has_mips32r2) + seq_printf(m, "%s", " mips32r2"); + if (cpu_has_mips64r1) + seq_printf(m, "%s", " mips64r1"); + if (cpu_has_mips64r2) + seq_printf(m, "%s", " mips64r2"); + seq_printf(m, "\n"); seq_printf(m, "ASEs implemented\t:"); if (cpu_has_mips16) seq_printf(m, "%s", " mips16"); @@ -107,7 +106,14 @@ static int show_cpuinfo(struct seq_file *m, void *v) seq_printf(m, "kscratch registers\t: %d\n", hweight8(cpu_data[n].kscratch_mask)); seq_printf(m, "core\t\t\t: %d\n", cpu_data[n].core); - +#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC) + if (cpu_has_mipsmt) { + seq_printf(m, "VPE\t\t\t: %d\n", cpu_data[n].vpe_id); +#if defined(CONFIG_MIPS_MT_SMTC) + seq_printf(m, "TC\t\t\t: %d\n", cpu_data[n].tc_id); +#endif + } +#endif sprintf(fmt, "VCE%%c exceptions\t\t: %s\n", cpu_has_vce ? "%u" : "not available"); seq_printf(m, fmt, 'D', vced_count); diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index ddc76103e78c..6ae540e133b2 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c @@ -60,15 +60,11 @@ void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp) /* New thread loses kernel privileges. */ status = regs->cp0_status & ~(ST0_CU0|ST0_CU1|ST0_FR|KU_MASK); -#ifdef CONFIG_64BIT - status |= test_thread_flag(TIF_32BIT_REGS) ? 0 : ST0_FR; -#endif status |= KU_USER; regs->cp0_status = status; clear_used_math(); clear_fpu_owner(); - if (cpu_has_dsp) - __init_dsp(); + init_dsp(); regs->cp0_epc = pc; regs->regs[29] = sp; } diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c index b52e1d2b33e0..7da9b76db4d9 100644 --- a/arch/mips/kernel/ptrace.c +++ b/arch/mips/kernel/ptrace.c @@ -137,13 +137,13 @@ int ptrace_getfpregs(struct task_struct *child, __u32 __user *data) if (cpu_has_mipsmt) { unsigned int vpflags = dvpe(); flags = read_c0_status(); - __enable_fpu(); + __enable_fpu(FPU_AS_IS); __asm__ __volatile__("cfc1\t%0,$0" : "=r" (tmp)); write_c0_status(flags); evpe(vpflags); } else { flags = read_c0_status(); - __enable_fpu(); + __enable_fpu(FPU_AS_IS); __asm__ __volatile__("cfc1\t%0,$0" : "=r" (tmp)); write_c0_status(flags); } @@ -408,6 +408,7 @@ long arch_ptrace(struct task_struct *child, long request, /* Read the word at location addr in the USER area. */ case PTRACE_PEEKUSR: { struct pt_regs *regs; + fpureg_t *fregs; unsigned long tmp = 0; regs = task_pt_regs(child); @@ -418,26 +419,28 @@ long arch_ptrace(struct task_struct *child, long request, tmp = regs->regs[addr]; break; case FPR_BASE ... FPR_BASE + 31: - if (tsk_used_math(child)) { - fpureg_t *fregs = get_fpu_regs(child); + if (!tsk_used_math(child)) { + /* FP not yet used */ + tmp = -1; + break; + } + fregs = get_fpu_regs(child); #ifdef CONFIG_32BIT + if (test_thread_flag(TIF_32BIT_FPREGS)) { /* * The odd registers are actually the high * order bits of the values stored in the even * registers - unless we're using r2k_switch.S. */ if (addr & 1) - tmp = (unsigned long) (fregs[((addr & ~1) - 32)] >> 32); + tmp = fregs[(addr & ~1) - 32] >> 32; else - tmp = (unsigned long) (fregs[(addr - 32)] & 0xffffffff); -#endif -#ifdef CONFIG_64BIT - tmp = fregs[addr - FPR_BASE]; -#endif - } else { - tmp = -1; /* FP not yet used */ + tmp = fregs[addr - 32]; + break; } +#endif + tmp = fregs[addr - FPR_BASE]; break; case PC: tmp = regs->cp0_epc; @@ -483,13 +486,13 @@ long arch_ptrace(struct task_struct *child, long request, if (cpu_has_mipsmt) { unsigned int vpflags = dvpe(); flags = read_c0_status(); - __enable_fpu(); + __enable_fpu(FPU_AS_IS); __asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp)); write_c0_status(flags); evpe(vpflags); } else { flags = read_c0_status(); - __enable_fpu(); + __enable_fpu(FPU_AS_IS); __asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp)); write_c0_status(flags); } @@ -554,22 +557,25 @@ long arch_ptrace(struct task_struct *child, long request, child->thread.fpu.fcr31 = 0; } #ifdef CONFIG_32BIT - /* - * The odd registers are actually the high order bits - * of the values stored in the even registers - unless - * we're using r2k_switch.S. - */ - if (addr & 1) { - fregs[(addr & ~1) - FPR_BASE] &= 0xffffffff; - fregs[(addr & ~1) - FPR_BASE] |= ((unsigned long long) data) << 32; - } else { - fregs[addr - FPR_BASE] &= ~0xffffffffLL; - fregs[addr - FPR_BASE] |= data; + if (test_thread_flag(TIF_32BIT_FPREGS)) { + /* + * The odd registers are actually the high + * order bits of the values stored in the even + * registers - unless we're using r2k_switch.S. + */ + if (addr & 1) { + fregs[(addr & ~1) - FPR_BASE] &= + 0xffffffff; + fregs[(addr & ~1) - FPR_BASE] |= + ((u64)data) << 32; + } else { + fregs[addr - FPR_BASE] &= ~0xffffffffLL; + fregs[addr - FPR_BASE] |= data; + } + break; } #endif -#ifdef CONFIG_64BIT fregs[addr - FPR_BASE] = data; -#endif break; } case PC: diff --git a/arch/mips/kernel/ptrace32.c b/arch/mips/kernel/ptrace32.c index 9486055ba660..b8aa2dd5b00b 100644 --- a/arch/mips/kernel/ptrace32.c +++ b/arch/mips/kernel/ptrace32.c @@ -80,6 +80,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, /* Read the word at location addr in the USER area. */ case PTRACE_PEEKUSR: { struct pt_regs *regs; + fpureg_t *fregs; unsigned int tmp; regs = task_pt_regs(child); @@ -90,21 +91,25 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, tmp = regs->regs[addr]; break; case FPR_BASE ... FPR_BASE + 31: - if (tsk_used_math(child)) { - fpureg_t *fregs = get_fpu_regs(child); - + if (!tsk_used_math(child)) { + /* FP not yet used */ + tmp = -1; + break; + } + fregs = get_fpu_regs(child); + if (test_thread_flag(TIF_32BIT_FPREGS)) { /* * The odd registers are actually the high * order bits of the values stored in the even * registers - unless we're using r2k_switch.S. */ if (addr & 1) - tmp = (unsigned long) (fregs[((addr & ~1) - 32)] >> 32); + tmp = fregs[(addr & ~1) - 32] >> 32; else - tmp = (unsigned long) (fregs[(addr - 32)] & 0xffffffff); - } else { - tmp = -1; /* FP not yet used */ + tmp = fregs[addr - 32]; + break; } + tmp = fregs[addr - FPR_BASE]; break; case PC: tmp = regs->cp0_epc; @@ -147,13 +152,13 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, if (cpu_has_mipsmt) { unsigned int vpflags = dvpe(); flags = read_c0_status(); - __enable_fpu(); + __enable_fpu(FPU_AS_IS); __asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp)); write_c0_status(flags); evpe(vpflags); } else { flags = read_c0_status(); - __enable_fpu(); + __enable_fpu(FPU_AS_IS); __asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp)); write_c0_status(flags); } @@ -236,20 +241,24 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, sizeof(child->thread.fpu)); child->thread.fpu.fcr31 = 0; } - /* - * The odd registers are actually the high order bits - * of the values stored in the even registers - unless - * we're using r2k_switch.S. - */ - if (addr & 1) { - fregs[(addr & ~1) - FPR_BASE] &= 0xffffffff; - fregs[(addr & ~1) - FPR_BASE] |= ((unsigned long long) data) << 32; - } else { - fregs[addr - FPR_BASE] &= ~0xffffffffLL; - /* Must cast, lest sign extension fill upper - bits! */ - fregs[addr - FPR_BASE] |= (unsigned int)data; + if (test_thread_flag(TIF_32BIT_FPREGS)) { + /* + * The odd registers are actually the high + * order bits of the values stored in the even + * registers - unless we're using r2k_switch.S. + */ + if (addr & 1) { + fregs[(addr & ~1) - FPR_BASE] &= + 0xffffffff; + fregs[(addr & ~1) - FPR_BASE] |= + ((u64)data) << 32; + } else { + fregs[addr - FPR_BASE] &= ~0xffffffffLL; + fregs[addr - FPR_BASE] |= data; + } + break; } + fregs[addr - FPR_BASE] = data; break; } case PC: diff --git a/arch/mips/kernel/r4k_fpu.S b/arch/mips/kernel/r4k_fpu.S index 55ffe149dae9..253b2fb52026 100644 --- a/arch/mips/kernel/r4k_fpu.S +++ b/arch/mips/kernel/r4k_fpu.S @@ -35,7 +35,15 @@ LEAF(_save_fp_context) cfc1 t1, fcr31 -#ifdef CONFIG_64BIT +#if defined(CONFIG_64BIT) || defined(CONFIG_MIPS32_R2) + .set push +#ifdef CONFIG_MIPS32_R2 + .set mips64r2 + mfc0 t0, CP0_STATUS + sll t0, t0, 5 + bgez t0, 1f # skip storing odd if FR=0 + nop +#endif /* Store the 16 odd double precision registers */ EX sdc1 $f1, SC_FPREGS+8(a0) EX sdc1 $f3, SC_FPREGS+24(a0) @@ -53,6 +61,7 @@ LEAF(_save_fp_context) EX sdc1 $f27, SC_FPREGS+216(a0) EX sdc1 $f29, SC_FPREGS+232(a0) EX sdc1 $f31, SC_FPREGS+248(a0) +1: .set pop #endif /* Store the 16 even double precision registers */ @@ -82,7 +91,31 @@ LEAF(_save_fp_context) LEAF(_save_fp_context32) cfc1 t1, fcr31 - EX sdc1 $f0, SC32_FPREGS+0(a0) + mfc0 t0, CP0_STATUS + sll t0, t0, 5 + bgez t0, 1f # skip storing odd if FR=0 + nop + + /* Store the 16 odd double precision registers */ + EX sdc1 $f1, SC32_FPREGS+8(a0) + EX sdc1 $f3, SC32_FPREGS+24(a0) + EX sdc1 $f5, SC32_FPREGS+40(a0) + EX sdc1 $f7, SC32_FPREGS+56(a0) + EX sdc1 $f9, SC32_FPREGS+72(a0) + EX sdc1 $f11, SC32_FPREGS+88(a0) + EX sdc1 $f13, SC32_FPREGS+104(a0) + EX sdc1 $f15, SC32_FPREGS+120(a0) + EX sdc1 $f17, SC32_FPREGS+136(a0) + EX sdc1 $f19, SC32_FPREGS+152(a0) + EX sdc1 $f21, SC32_FPREGS+168(a0) + EX sdc1 $f23, SC32_FPREGS+184(a0) + EX sdc1 $f25, SC32_FPREGS+200(a0) + EX sdc1 $f27, SC32_FPREGS+216(a0) + EX sdc1 $f29, SC32_FPREGS+232(a0) + EX sdc1 $f31, SC32_FPREGS+248(a0) + + /* Store the 16 even double precision registers */ +1: EX sdc1 $f0, SC32_FPREGS+0(a0) EX sdc1 $f2, SC32_FPREGS+16(a0) EX sdc1 $f4, SC32_FPREGS+32(a0) EX sdc1 $f6, SC32_FPREGS+48(a0) @@ -114,7 +147,16 @@ LEAF(_save_fp_context32) */ LEAF(_restore_fp_context) EX lw t0, SC_FPC_CSR(a0) -#ifdef CONFIG_64BIT + +#if defined(CONFIG_64BIT) || defined(CONFIG_MIPS32_R2) + .set push +#ifdef CONFIG_MIPS32_R2 + .set mips64r2 + mfc0 t0, CP0_STATUS + sll t0, t0, 5 + bgez t0, 1f # skip loading odd if FR=0 + nop +#endif EX ldc1 $f1, SC_FPREGS+8(a0) EX ldc1 $f3, SC_FPREGS+24(a0) EX ldc1 $f5, SC_FPREGS+40(a0) @@ -131,6 +173,7 @@ LEAF(_restore_fp_context) EX ldc1 $f27, SC_FPREGS+216(a0) EX ldc1 $f29, SC_FPREGS+232(a0) EX ldc1 $f31, SC_FPREGS+248(a0) +1: .set pop #endif EX ldc1 $f0, SC_FPREGS+0(a0) EX ldc1 $f2, SC_FPREGS+16(a0) @@ -157,7 +200,30 @@ LEAF(_restore_fp_context) LEAF(_restore_fp_context32) /* Restore an o32 sigcontext. */ EX lw t0, SC32_FPC_CSR(a0) - EX ldc1 $f0, SC32_FPREGS+0(a0) + + mfc0 t0, CP0_STATUS + sll t0, t0, 5 + bgez t0, 1f # skip loading odd if FR=0 + nop + + EX ldc1 $f1, SC32_FPREGS+8(a0) + EX ldc1 $f3, SC32_FPREGS+24(a0) + EX ldc1 $f5, SC32_FPREGS+40(a0) + EX ldc1 $f7, SC32_FPREGS+56(a0) + EX ldc1 $f9, SC32_FPREGS+72(a0) + EX ldc1 $f11, SC32_FPREGS+88(a0) + EX ldc1 $f13, SC32_FPREGS+104(a0) + EX ldc1 $f15, SC32_FPREGS+120(a0) + EX ldc1 $f17, SC32_FPREGS+136(a0) + EX ldc1 $f19, SC32_FPREGS+152(a0) + EX ldc1 $f21, SC32_FPREGS+168(a0) + EX ldc1 $f23, SC32_FPREGS+184(a0) + EX ldc1 $f25, SC32_FPREGS+200(a0) + EX ldc1 $f27, SC32_FPREGS+216(a0) + EX ldc1 $f29, SC32_FPREGS+232(a0) + EX ldc1 $f31, SC32_FPREGS+248(a0) + +1: EX ldc1 $f0, SC32_FPREGS+0(a0) EX ldc1 $f2, SC32_FPREGS+16(a0) EX ldc1 $f4, SC32_FPREGS+32(a0) EX ldc1 $f6, SC32_FPREGS+48(a0) diff --git a/arch/mips/kernel/r4k_switch.S b/arch/mips/kernel/r4k_switch.S index 078de5eaca8f..cc78dd9a17c7 100644 --- a/arch/mips/kernel/r4k_switch.S +++ b/arch/mips/kernel/r4k_switch.S @@ -123,7 +123,7 @@ * Save a thread's fp context. */ LEAF(_save_fp) -#ifdef CONFIG_64BIT +#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) mfc0 t0, CP0_STATUS #endif fpu_save_double a0 t0 t1 # clobbers t1 @@ -134,7 +134,7 @@ LEAF(_save_fp) * Restore a thread's fp context. */ LEAF(_restore_fp) -#ifdef CONFIG_64BIT +#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) mfc0 t0, CP0_STATUS #endif fpu_restore_double a0 t0 t1 # clobbers t1 @@ -228,6 +228,47 @@ LEAF(_init_fpu) mtc1 t1, $f29 mtc1 t1, $f30 mtc1 t1, $f31 + +#ifdef CONFIG_CPU_MIPS32_R2 + .set push + .set mips64r2 + sll t0, t0, 5 # is Status.FR set? + bgez t0, 1f # no: skip setting upper 32b + + mthc1 t1, $f0 + mthc1 t1, $f1 + mthc1 t1, $f2 + mthc1 t1, $f3 + mthc1 t1, $f4 + mthc1 t1, $f5 + mthc1 t1, $f6 + mthc1 t1, $f7 + mthc1 t1, $f8 + mthc1 t1, $f9 + mthc1 t1, $f10 + mthc1 t1, $f11 + mthc1 t1, $f12 + mthc1 t1, $f13 + mthc1 t1, $f14 + mthc1 t1, $f15 + mthc1 t1, $f16 + mthc1 t1, $f17 + mthc1 t1, $f18 + mthc1 t1, $f19 + mthc1 t1, $f20 + mthc1 t1, $f21 + mthc1 t1, $f22 + mthc1 t1, $f23 + mthc1 t1, $f24 + mthc1 t1, $f25 + mthc1 t1, $f26 + mthc1 t1, $f27 + mthc1 t1, $f28 + mthc1 t1, $f29 + mthc1 t1, $f30 + mthc1 t1, $f31 +1: .set pop +#endif /* CONFIG_CPU_MIPS32_R2 */ #else .set mips3 dmtc1 t1, $f0 diff --git a/arch/mips/kernel/rtlx-cmp.c b/arch/mips/kernel/rtlx-cmp.c new file mode 100644 index 000000000000..56dc69635153 --- /dev/null +++ b/arch/mips/kernel/rtlx-cmp.c @@ -0,0 +1,116 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2005 MIPS Technologies, Inc. All rights reserved. + * Copyright (C) 2013 Imagination Technologies Ltd. + */ +#include <linux/device.h> +#include <linux/fs.h> +#include <linux/err.h> +#include <linux/wait.h> +#include <linux/sched.h> +#include <linux/smp.h> + +#include <asm/mips_mt.h> +#include <asm/vpe.h> +#include <asm/rtlx.h> + +static int major; + +static void rtlx_interrupt(void) +{ + int i; + struct rtlx_info *info; + struct rtlx_info **p = vpe_get_shared(aprp_cpu_index()); + + if (p == NULL || *p == NULL) + return; + + info = *p; + + if (info->ap_int_pending == 1 && smp_processor_id() == 0) { + for (i = 0; i < RTLX_CHANNELS; i++) { + wake_up(&channel_wqs[i].lx_queue); + wake_up(&channel_wqs[i].rt_queue); + } + info->ap_int_pending = 0; + } +} + +void _interrupt_sp(void) +{ + smp_send_reschedule(aprp_cpu_index()); +} + +int __init rtlx_module_init(void) +{ + struct device *dev; + int i, err; + + if (!cpu_has_mipsmt) { + pr_warn("VPE loader: not a MIPS MT capable processor\n"); + return -ENODEV; + } + + if (num_possible_cpus() - aprp_cpu_index() < 1) { + pr_warn("No TCs reserved for AP/SP, not initializing RTLX.\n" + "Pass maxcpus=<n> argument as kernel argument\n"); + + return -ENODEV; + } + + major = register_chrdev(0, RTLX_MODULE_NAME, &rtlx_fops); + if (major < 0) { + pr_err("rtlx_module_init: unable to register device\n"); + return major; + } + + /* initialise the wait queues */ + for (i = 0; i < RTLX_CHANNELS; i++) { + init_waitqueue_head(&channel_wqs[i].rt_queue); + init_waitqueue_head(&channel_wqs[i].lx_queue); + atomic_set(&channel_wqs[i].in_open, 0); + mutex_init(&channel_wqs[i].mutex); + + dev = device_create(mt_class, NULL, MKDEV(major, i), NULL, + "%s%d", RTLX_MODULE_NAME, i); + if (IS_ERR(dev)) { + err = PTR_ERR(dev); + goto out_chrdev; + } + } + + /* set up notifiers */ + rtlx_notify.start = rtlx_starting; + rtlx_notify.stop = rtlx_stopping; + vpe_notify(aprp_cpu_index(), &rtlx_notify); + + if (cpu_has_vint) { + aprp_hook = rtlx_interrupt; + } else { + pr_err("APRP RTLX init on non-vectored-interrupt processor\n"); + err = -ENODEV; + goto out_class; + } + + return 0; + +out_class: + for (i = 0; i < RTLX_CHANNELS; i++) + device_destroy(mt_class, MKDEV(major, i)); +out_chrdev: + unregister_chrdev(major, RTLX_MODULE_NAME); + + return err; +} + +void __exit rtlx_module_exit(void) +{ + int i; + + for (i = 0; i < RTLX_CHANNELS; i++) + device_destroy(mt_class, MKDEV(major, i)); + unregister_chrdev(major, RTLX_MODULE_NAME); +} diff --git a/arch/mips/kernel/rtlx-mt.c b/arch/mips/kernel/rtlx-mt.c new file mode 100644 index 000000000000..91d61ba422b4 --- /dev/null +++ b/arch/mips/kernel/rtlx-mt.c @@ -0,0 +1,148 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2005 MIPS Technologies, Inc. All rights reserved. + * Copyright (C) 2013 Imagination Technologies Ltd. + */ +#include <linux/device.h> +#include <linux/fs.h> +#include <linux/err.h> +#include <linux/wait.h> +#include <linux/sched.h> +#include <linux/interrupt.h> +#include <linux/irq.h> + +#include <asm/mips_mt.h> +#include <asm/vpe.h> +#include <asm/rtlx.h> + +static int major; + +static void rtlx_dispatch(void) +{ + if (read_c0_cause() & read_c0_status() & C_SW0) + do_IRQ(MIPS_CPU_IRQ_BASE + MIPS_CPU_RTLX_IRQ); +} + +/* + * Interrupt handler may be called before rtlx_init has otherwise had + * a chance to run. + */ +static irqreturn_t rtlx_interrupt(int irq, void *dev_id) +{ + unsigned int vpeflags; + unsigned long flags; + int i; + + /* Ought not to be strictly necessary for SMTC builds */ + local_irq_save(flags); + vpeflags = dvpe(); + set_c0_status(0x100 << MIPS_CPU_RTLX_IRQ); + irq_enable_hazard(); + evpe(vpeflags); + local_irq_restore(flags); + + for (i = 0; i < RTLX_CHANNELS; i++) { + wake_up(&channel_wqs[i].lx_queue); + wake_up(&channel_wqs[i].rt_queue); + } + + return IRQ_HANDLED; +} + +static struct irqaction rtlx_irq = { + .handler = rtlx_interrupt, + .name = "RTLX", +}; + +static int rtlx_irq_num = MIPS_CPU_IRQ_BASE + MIPS_CPU_RTLX_IRQ; + +void _interrupt_sp(void) +{ + unsigned long flags; + + local_irq_save(flags); + dvpe(); + settc(1); + write_vpe_c0_cause(read_vpe_c0_cause() | C_SW0); + evpe(EVPE_ENABLE); + local_irq_restore(flags); +} + +int __init rtlx_module_init(void) +{ + struct device *dev; + int i, err; + + if (!cpu_has_mipsmt) { + pr_warn("VPE loader: not a MIPS MT capable processor\n"); + return -ENODEV; + } + + if (aprp_cpu_index() == 0) { + pr_warn("No TCs reserved for AP/SP, not initializing RTLX.\n" + "Pass maxtcs=<n> argument as kernel argument\n"); + + return -ENODEV; + } + + major = register_chrdev(0, RTLX_MODULE_NAME, &rtlx_fops); + if (major < 0) { + pr_err("rtlx_module_init: unable to register device\n"); + return major; + } + + /* initialise the wait queues */ + for (i = 0; i < RTLX_CHANNELS; i++) { + init_waitqueue_head(&channel_wqs[i].rt_queue); + init_waitqueue_head(&channel_wqs[i].lx_queue); + atomic_set(&channel_wqs[i].in_open, 0); + mutex_init(&channel_wqs[i].mutex); + + dev = device_create(mt_class, NULL, MKDEV(major, i), NULL, + "%s%d", RTLX_MODULE_NAME, i); + if (IS_ERR(dev)) { + err = PTR_ERR(dev); + goto out_chrdev; + } + } + + /* set up notifiers */ + rtlx_notify.start = rtlx_starting; + rtlx_notify.stop = rtlx_stopping; + vpe_notify(aprp_cpu_index(), &rtlx_notify); + + if (cpu_has_vint) { + aprp_hook = rtlx_dispatch; + } else { + pr_err("APRP RTLX init on non-vectored-interrupt processor\n"); + err = -ENODEV; + goto out_class; + } + + rtlx_irq.dev_id = rtlx; + err = setup_irq(rtlx_irq_num, &rtlx_irq); + if (err) + goto out_class; + + return 0; + +out_class: + for (i = 0; i < RTLX_CHANNELS; i++) + device_destroy(mt_class, MKDEV(major, i)); +out_chrdev: + unregister_chrdev(major, RTLX_MODULE_NAME); + + return err; +} + +void __exit rtlx_module_exit(void) +{ + int i; + + for (i = 0; i < RTLX_CHANNELS; i++) + device_destroy(mt_class, MKDEV(major, i)); + unregister_chrdev(major, RTLX_MODULE_NAME); +} diff --git a/arch/mips/kernel/rtlx.c b/arch/mips/kernel/rtlx.c index 2c12ea1668d1..31b1b763cb29 100644 --- a/arch/mips/kernel/rtlx.c +++ b/arch/mips/kernel/rtlx.c @@ -1,114 +1,51 @@ /* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * * Copyright (C) 2005 MIPS Technologies, Inc. All rights reserved. * Copyright (C) 2005, 06 Ralf Baechle (ralf@linux-mips.org) - * - * This program is free software; you can distribute it and/or modify it - * under the terms of the GNU General Public License (Version 2) as - * published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. - * + * Copyright (C) 2013 Imagination Technologies Ltd. */ - -#include <linux/device.h> #include <linux/kernel.h> #include <linux/fs.h> -#include <linux/init.h> -#include <asm/uaccess.h> -#include <linux/list.h> -#include <linux/vmalloc.h> -#include <linux/elf.h> -#include <linux/seq_file.h> #include <linux/syscalls.h> #include <linux/moduleloader.h> -#include <linux/interrupt.h> -#include <linux/poll.h> -#include <linux/sched.h> -#include <linux/wait.h> +#include <linux/atomic.h> #include <asm/mipsmtregs.h> #include <asm/mips_mt.h> -#include <asm/cacheflush.h> -#include <linux/atomic.h> -#include <asm/cpu.h> #include <asm/processor.h> -#include <asm/vpe.h> #include <asm/rtlx.h> #include <asm/setup.h> +#include <asm/vpe.h> -static struct rtlx_info *rtlx; -static int major; -static char module_name[] = "rtlx"; - -static struct chan_waitqueues { - wait_queue_head_t rt_queue; - wait_queue_head_t lx_queue; - atomic_t in_open; - struct mutex mutex; -} channel_wqs[RTLX_CHANNELS]; - -static struct vpe_notifications notify; static int sp_stopping; - -extern void *vpe_get_shared(int index); - -static void rtlx_dispatch(void) -{ - do_IRQ(MIPS_CPU_IRQ_BASE + MIPS_CPU_RTLX_IRQ); -} - - -/* Interrupt handler may be called before rtlx_init has otherwise had - a chance to run. -*/ -static irqreturn_t rtlx_interrupt(int irq, void *dev_id) -{ - unsigned int vpeflags; - unsigned long flags; - int i; - - /* Ought not to be strictly necessary for SMTC builds */ - local_irq_save(flags); - vpeflags = dvpe(); - set_c0_status(0x100 << MIPS_CPU_RTLX_IRQ); - irq_enable_hazard(); - evpe(vpeflags); - local_irq_restore(flags); - - for (i = 0; i < RTLX_CHANNELS; i++) { - wake_up(&channel_wqs[i].lx_queue); - wake_up(&channel_wqs[i].rt_queue); - } - - return IRQ_HANDLED; -} +struct rtlx_info *rtlx; +struct chan_waitqueues channel_wqs[RTLX_CHANNELS]; +struct vpe_notifications rtlx_notify; +void (*aprp_hook)(void) = NULL; +EXPORT_SYMBOL(aprp_hook); static void __used dump_rtlx(void) { int i; - printk("id 0x%lx state %d\n", rtlx->id, rtlx->state); + pr_info("id 0x%lx state %d\n", rtlx->id, rtlx->state); for (i = 0; i < RTLX_CHANNELS; i++) { struct rtlx_channel *chan = &rtlx->channel[i]; - printk(" rt_state %d lx_state %d buffer_size %d\n", - chan->rt_state, chan->lx_state, chan->buffer_size); + pr_info(" rt_state %d lx_state %d buffer_size %d\n", + chan->rt_state, chan->lx_state, chan->buffer_size); - printk(" rt_read %d rt_write %d\n", - chan->rt_read, chan->rt_write); + pr_info(" rt_read %d rt_write %d\n", + chan->rt_read, chan->rt_write); - printk(" lx_read %d lx_write %d\n", - chan->lx_read, chan->lx_write); + pr_info(" lx_read %d lx_write %d\n", + chan->lx_read, chan->lx_write); - printk(" rt_buffer <%s>\n", chan->rt_buffer); - printk(" lx_buffer <%s>\n", chan->lx_buffer); + pr_info(" rt_buffer <%s>\n", chan->rt_buffer); + pr_info(" lx_buffer <%s>\n", chan->lx_buffer); } } @@ -116,8 +53,7 @@ static void __used dump_rtlx(void) static int rtlx_init(struct rtlx_info *rtlxi) { if (rtlxi->id != RTLX_ID) { - printk(KERN_ERR "no valid RTLX id at 0x%p 0x%lx\n", - rtlxi, rtlxi->id); + pr_err("no valid RTLX id at 0x%p 0x%lx\n", rtlxi, rtlxi->id); return -ENOEXEC; } @@ -127,20 +63,20 @@ static int rtlx_init(struct rtlx_info *rtlxi) } /* notifications */ -static void starting(int vpe) +void rtlx_starting(int vpe) { int i; sp_stopping = 0; /* force a reload of rtlx */ - rtlx=NULL; + rtlx = NULL; /* wake up any sleeping rtlx_open's */ for (i = 0; i < RTLX_CHANNELS; i++) wake_up_interruptible(&channel_wqs[i].lx_queue); } -static void stopping(int vpe) +void rtlx_stopping(int vpe) { int i; @@ -158,31 +94,30 @@ int rtlx_open(int index, int can_sleep) int ret = 0; if (index >= RTLX_CHANNELS) { - printk(KERN_DEBUG "rtlx_open index out of range\n"); + pr_debug(KERN_DEBUG "rtlx_open index out of range\n"); return -ENOSYS; } if (atomic_inc_return(&channel_wqs[index].in_open) > 1) { - printk(KERN_DEBUG "rtlx_open channel %d already opened\n", - index); + pr_debug(KERN_DEBUG "rtlx_open channel %d already opened\n", index); ret = -EBUSY; goto out_fail; } if (rtlx == NULL) { - if( (p = vpe_get_shared(tclimit)) == NULL) { - if (can_sleep) { - ret = __wait_event_interruptible( + p = vpe_get_shared(aprp_cpu_index()); + if (p == NULL) { + if (can_sleep) { + ret = __wait_event_interruptible( channel_wqs[index].lx_queue, - (p = vpe_get_shared(tclimit))); - if (ret) + (p = vpe_get_shared(aprp_cpu_index()))); + if (ret) + goto out_fail; + } else { + pr_debug("No SP program loaded, and device opened with O_NONBLOCK\n"); + ret = -ENOSYS; goto out_fail; - } else { - printk(KERN_DEBUG "No SP program loaded, and device " - "opened with O_NONBLOCK\n"); - ret = -ENOSYS; - goto out_fail; - } + } } smp_rmb(); @@ -204,24 +139,24 @@ int rtlx_open(int index, int can_sleep) ret = -ERESTARTSYS; goto out_fail; } - finish_wait(&channel_wqs[index].lx_queue, &wait); + finish_wait(&channel_wqs[index].lx_queue, + &wait); } else { - pr_err(" *vpe_get_shared is NULL. " - "Has an SP program been loaded?\n"); + pr_err(" *vpe_get_shared is NULL. Has an SP program been loaded?\n"); ret = -ENOSYS; goto out_fail; } } if ((unsigned int)*p < KSEG0) { - printk(KERN_WARNING "vpe_get_shared returned an " - "invalid pointer maybe an error code %d\n", - (int)*p); + pr_warn("vpe_get_shared returned an invalid pointer maybe an error code %d\n", + (int)*p); ret = -ENOSYS; goto out_fail; } - if ((ret = rtlx_init(*p)) < 0) + ret = rtlx_init(*p); + if (ret < 0) goto out_ret; } @@ -352,7 +287,7 @@ ssize_t rtlx_write(int index, const void __user *buffer, size_t count) size_t fl; if (rtlx == NULL) - return(-ENOSYS); + return -ENOSYS; rt = &rtlx->channel[index]; @@ -361,8 +296,8 @@ ssize_t rtlx_write(int index, const void __user *buffer, size_t count) rt_read = rt->rt_read; /* total number of bytes to copy */ - count = min(count, (size_t)write_spacefree(rt_read, rt->rt_write, - rt->buffer_size)); + count = min_t(size_t, count, write_spacefree(rt_read, rt->rt_write, + rt->buffer_size)); /* first bit from write pointer to the end of the buffer, or count */ fl = min(count, (size_t) rt->buffer_size - rt->rt_write); @@ -372,9 +307,8 @@ ssize_t rtlx_write(int index, const void __user *buffer, size_t count) goto out; /* if there's any left copy to the beginning of the buffer */ - if (count - fl) { + if (count - fl) failed = copy_from_user(rt->rt_buffer, buffer + fl, count - fl); - } out: count -= failed; @@ -384,6 +318,8 @@ out: smp_wmb(); mutex_unlock(&channel_wqs[index].mutex); + _interrupt_sp(); + return count; } @@ -398,7 +334,7 @@ static int file_release(struct inode *inode, struct file *filp) return rtlx_release(iminor(inode)); } -static unsigned int file_poll(struct file *file, poll_table * wait) +static unsigned int file_poll(struct file *file, poll_table *wait) { int minor = iminor(file_inode(file)); unsigned int mask = 0; @@ -420,21 +356,20 @@ static unsigned int file_poll(struct file *file, poll_table * wait) return mask; } -static ssize_t file_read(struct file *file, char __user * buffer, size_t count, - loff_t * ppos) +static ssize_t file_read(struct file *file, char __user *buffer, size_t count, + loff_t *ppos) { int minor = iminor(file_inode(file)); /* data available? */ - if (!rtlx_read_poll(minor, (file->f_flags & O_NONBLOCK) ? 0 : 1)) { - return 0; // -EAGAIN makes cat whinge - } + if (!rtlx_read_poll(minor, (file->f_flags & O_NONBLOCK) ? 0 : 1)) + return 0; /* -EAGAIN makes 'cat' whine */ return rtlx_read(minor, buffer, count); } -static ssize_t file_write(struct file *file, const char __user * buffer, - size_t count, loff_t * ppos) +static ssize_t file_write(struct file *file, const char __user *buffer, + size_t count, loff_t *ppos) { int minor = iminor(file_inode(file)); @@ -454,100 +389,16 @@ static ssize_t file_write(struct file *file, const char __user * buffer, return rtlx_write(minor, buffer, count); } -static const struct file_operations rtlx_fops = { +const struct file_operations rtlx_fops = { .owner = THIS_MODULE, - .open = file_open, + .open = file_open, .release = file_release, .write = file_write, - .read = file_read, - .poll = file_poll, + .read = file_read, + .poll = file_poll, .llseek = noop_llseek, }; -static struct irqaction rtlx_irq = { - .handler = rtlx_interrupt, - .name = "RTLX", -}; - -static int rtlx_irq_num = MIPS_CPU_IRQ_BASE + MIPS_CPU_RTLX_IRQ; - -static char register_chrdev_failed[] __initdata = - KERN_ERR "rtlx_module_init: unable to register device\n"; - -static int __init rtlx_module_init(void) -{ - struct device *dev; - int i, err; - - if (!cpu_has_mipsmt) { - printk("VPE loader: not a MIPS MT capable processor\n"); - return -ENODEV; - } - - if (tclimit == 0) { - printk(KERN_WARNING "No TCs reserved for AP/SP, not " - "initializing RTLX.\nPass maxtcs=<n> argument as kernel " - "argument\n"); - - return -ENODEV; - } - - major = register_chrdev(0, module_name, &rtlx_fops); - if (major < 0) { - printk(register_chrdev_failed); - return major; - } - - /* initialise the wait queues */ - for (i = 0; i < RTLX_CHANNELS; i++) { - init_waitqueue_head(&channel_wqs[i].rt_queue); - init_waitqueue_head(&channel_wqs[i].lx_queue); - atomic_set(&channel_wqs[i].in_open, 0); - mutex_init(&channel_wqs[i].mutex); - - dev = device_create(mt_class, NULL, MKDEV(major, i), NULL, - "%s%d", module_name, i); - if (IS_ERR(dev)) { - err = PTR_ERR(dev); - goto out_chrdev; - } - } - - /* set up notifiers */ - notify.start = starting; - notify.stop = stopping; - vpe_notify(tclimit, ¬ify); - - if (cpu_has_vint) - set_vi_handler(MIPS_CPU_RTLX_IRQ, rtlx_dispatch); - else { - pr_err("APRP RTLX init on non-vectored-interrupt processor\n"); - err = -ENODEV; - goto out_chrdev; - } - - rtlx_irq.dev_id = rtlx; - setup_irq(rtlx_irq_num, &rtlx_irq); - - return 0; - -out_chrdev: - for (i = 0; i < RTLX_CHANNELS; i++) - device_destroy(mt_class, MKDEV(major, i)); - - return err; -} - -static void __exit rtlx_module_exit(void) -{ - int i; - - for (i = 0; i < RTLX_CHANNELS; i++) - device_destroy(mt_class, MKDEV(major, i)); - - unregister_chrdev(major, module_name); -} - module_init(rtlx_module_init); module_exit(rtlx_module_exit); diff --git a/arch/mips/kernel/segment.c b/arch/mips/kernel/segment.c new file mode 100644 index 000000000000..076ead2a9859 --- /dev/null +++ b/arch/mips/kernel/segment.c @@ -0,0 +1,110 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2013 Imagination Technologies Ltd. + */ + +#include <linux/kernel.h> +#include <linux/debugfs.h> +#include <linux/seq_file.h> +#include <asm/cpu.h> +#include <asm/mipsregs.h> + +static void build_segment_config(char *str, unsigned int cfg) +{ + unsigned int am; + static const char * const am_str[] = { + "UK", "MK", "MSK", "MUSK", "MUSUK", "USK", + "RSRVD", "UUSK"}; + + /* Segment access mode. */ + am = (cfg & MIPS_SEGCFG_AM) >> MIPS_SEGCFG_AM_SHIFT; + str += sprintf(str, "%-5s", am_str[am]); + + /* + * Access modes MK, MSK and MUSK are mapped segments. Therefore + * there is no direct physical address mapping. + */ + if ((am == 0) || (am > 3)) { + str += sprintf(str, " %03lx", + ((cfg & MIPS_SEGCFG_PA) >> MIPS_SEGCFG_PA_SHIFT)); + str += sprintf(str, " %01ld", + ((cfg & MIPS_SEGCFG_C) >> MIPS_SEGCFG_C_SHIFT)); + } else { + str += sprintf(str, " UND"); + str += sprintf(str, " U"); + } + + /* Exception configuration. */ + str += sprintf(str, " %01ld\n", + ((cfg & MIPS_SEGCFG_EU) >> MIPS_SEGCFG_EU_SHIFT)); +} + +static int show_segments(struct seq_file *m, void *v) +{ + unsigned int segcfg; + char str[42]; + + seq_puts(m, "Segment Virtual Size Access Mode Physical Caching EU\n"); + seq_puts(m, "------- ------- ---- ----------- -------- ------- --\n"); + + segcfg = read_c0_segctl0(); + build_segment_config(str, segcfg); + seq_printf(m, " 0 e0000000 512M %s", str); + + segcfg >>= 16; + build_segment_config(str, segcfg); + seq_printf(m, " 1 c0000000 512M %s", str); + + segcfg = read_c0_segctl1(); + build_segment_config(str, segcfg); + seq_printf(m, " 2 a0000000 512M %s", str); + + segcfg >>= 16; + build_segment_config(str, segcfg); + seq_printf(m, " 3 80000000 512M %s", str); + + segcfg = read_c0_segctl2(); + build_segment_config(str, segcfg); + seq_printf(m, " 4 40000000 1G %s", str); + + segcfg >>= 16; + build_segment_config(str, segcfg); + seq_printf(m, " 5 00000000 1G %s\n", str); + + return 0; +} + +static int segments_open(struct inode *inode, struct file *file) +{ + return single_open(file, show_segments, NULL); +} + +static const struct file_operations segments_fops = { + .open = segments_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int __init segments_info(void) +{ + extern struct dentry *mips_debugfs_dir; + struct dentry *segments; + + if (cpu_has_segments) { + if (!mips_debugfs_dir) + return -ENODEV; + + segments = debugfs_create_file("segments", S_IRUGO, + mips_debugfs_dir, NULL, + &segments_fops); + if (!segments) + return -ENOMEM; + } + return 0; +} + +device_initcall(segments_info); diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c index 2f285abc76d5..5199563c4403 100644 --- a/arch/mips/kernel/signal.c +++ b/arch/mips/kernel/signal.c @@ -71,8 +71,9 @@ static int protected_save_fp_context(struct sigcontext __user *sc) int err; while (1) { lock_fpu_owner(); - own_fpu_inatomic(1); - err = save_fp_context(sc); /* this might fail */ + err = own_fpu_inatomic(1); + if (!err) + err = save_fp_context(sc); /* this might fail */ unlock_fpu_owner(); if (likely(!err)) break; @@ -91,8 +92,9 @@ static int protected_restore_fp_context(struct sigcontext __user *sc) int err, tmp __maybe_unused; while (1) { lock_fpu_owner(); - own_fpu_inatomic(0); - err = restore_fp_context(sc); /* this might fail */ + err = own_fpu_inatomic(0); + if (!err) + err = restore_fp_context(sc); /* this might fail */ unlock_fpu_owner(); if (likely(!err)) break; diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c index 1905a419aa46..3d60f7750fa8 100644 --- a/arch/mips/kernel/signal32.c +++ b/arch/mips/kernel/signal32.c @@ -85,8 +85,9 @@ static int protected_save_fp_context32(struct sigcontext32 __user *sc) int err; while (1) { lock_fpu_owner(); - own_fpu_inatomic(1); - err = save_fp_context32(sc); /* this might fail */ + err = own_fpu_inatomic(1); + if (!err) + err = save_fp_context32(sc); /* this might fail */ unlock_fpu_owner(); if (likely(!err)) break; @@ -105,8 +106,9 @@ static int protected_restore_fp_context32(struct sigcontext32 __user *sc) int err, tmp __maybe_unused; while (1) { lock_fpu_owner(); - own_fpu_inatomic(0); - err = restore_fp_context32(sc); /* this might fail */ + err = own_fpu_inatomic(0); + if (!err) + err = restore_fp_context32(sc); /* this might fail */ unlock_fpu_owner(); if (likely(!err)) break; diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c index 2362665ba496..ea4c2dc31692 100644 --- a/arch/mips/kernel/smp-bmips.c +++ b/arch/mips/kernel/smp-bmips.c @@ -49,8 +49,10 @@ cpumask_t bmips_booted_mask; unsigned long bmips_smp_boot_sp; unsigned long bmips_smp_boot_gp; -static void bmips_send_ipi_single(int cpu, unsigned int action); -static irqreturn_t bmips_ipi_interrupt(int irq, void *dev_id); +static void bmips43xx_send_ipi_single(int cpu, unsigned int action); +static void bmips5000_send_ipi_single(int cpu, unsigned int action); +static irqreturn_t bmips43xx_ipi_interrupt(int irq, void *dev_id); +static irqreturn_t bmips5000_ipi_interrupt(int irq, void *dev_id); /* SW interrupts 0,1 are used for interprocessor signaling */ #define IPI0_IRQ (MIPS_CPU_IRQ_BASE + 0) @@ -64,49 +66,58 @@ static irqreturn_t bmips_ipi_interrupt(int irq, void *dev_id); static void __init bmips_smp_setup(void) { int i, cpu = 1, boot_cpu = 0; - -#if defined(CONFIG_CPU_BMIPS4350) || defined(CONFIG_CPU_BMIPS4380) int cpu_hw_intr; - /* arbitration priority */ - clear_c0_brcm_cmt_ctrl(0x30); - - /* NBK and weak order flags */ - set_c0_brcm_config_0(0x30000); - - /* Find out if we are running on TP0 or TP1 */ - boot_cpu = !!(read_c0_brcm_cmt_local() & (1 << 31)); - - /* - * MIPS interrupts 0,1 (SW INT 0,1) cross over to the other thread - * MIPS interrupt 2 (HW INT 0) is the CPU0 L1 controller output - * MIPS interrupt 3 (HW INT 1) is the CPU1 L1 controller output - */ - if (boot_cpu == 0) - cpu_hw_intr = 0x02; - else - cpu_hw_intr = 0x1d; - - change_c0_brcm_cmt_intr(0xf8018000, (cpu_hw_intr << 27) | (0x03 << 15)); - - /* single core, 2 threads (2 pipelines) */ - max_cpus = 2; -#elif defined(CONFIG_CPU_BMIPS5000) - /* enable raceless SW interrupts */ - set_c0_brcm_config(0x03 << 22); - - /* route HW interrupt 0 to CPU0, HW interrupt 1 to CPU1 */ - change_c0_brcm_mode(0x1f << 27, 0x02 << 27); - - /* N cores, 2 threads per core */ - max_cpus = (((read_c0_brcm_config() >> 6) & 0x03) + 1) << 1; + switch (current_cpu_type()) { + case CPU_BMIPS4350: + case CPU_BMIPS4380: + /* arbitration priority */ + clear_c0_brcm_cmt_ctrl(0x30); + + /* NBK and weak order flags */ + set_c0_brcm_config_0(0x30000); + + /* Find out if we are running on TP0 or TP1 */ + boot_cpu = !!(read_c0_brcm_cmt_local() & (1 << 31)); + + /* + * MIPS interrupts 0,1 (SW INT 0,1) cross over to the other + * thread + * MIPS interrupt 2 (HW INT 0) is the CPU0 L1 controller output + * MIPS interrupt 3 (HW INT 1) is the CPU1 L1 controller output + */ + if (boot_cpu == 0) + cpu_hw_intr = 0x02; + else + cpu_hw_intr = 0x1d; + + change_c0_brcm_cmt_intr(0xf8018000, + (cpu_hw_intr << 27) | (0x03 << 15)); + + /* single core, 2 threads (2 pipelines) */ + max_cpus = 2; + + break; + case CPU_BMIPS5000: + /* enable raceless SW interrupts */ + set_c0_brcm_config(0x03 << 22); + + /* route HW interrupt 0 to CPU0, HW interrupt 1 to CPU1 */ + change_c0_brcm_mode(0x1f << 27, 0x02 << 27); + + /* N cores, 2 threads per core */ + max_cpus = (((read_c0_brcm_config() >> 6) & 0x03) + 1) << 1; + + /* clear any pending SW interrupts */ + for (i = 0; i < max_cpus; i++) { + write_c0_brcm_action(ACTION_CLR_IPI(i, 0)); + write_c0_brcm_action(ACTION_CLR_IPI(i, 1)); + } - /* clear any pending SW interrupts */ - for (i = 0; i < max_cpus; i++) { - write_c0_brcm_action(ACTION_CLR_IPI(i, 0)); - write_c0_brcm_action(ACTION_CLR_IPI(i, 1)); + break; + default: + max_cpus = 1; } -#endif if (!bmips_smp_enabled) max_cpus = 1; @@ -134,6 +145,20 @@ static void __init bmips_smp_setup(void) */ static void bmips_prepare_cpus(unsigned int max_cpus) { + irqreturn_t (*bmips_ipi_interrupt)(int irq, void *dev_id); + + switch (current_cpu_type()) { + case CPU_BMIPS4350: + case CPU_BMIPS4380: + bmips_ipi_interrupt = bmips43xx_ipi_interrupt; + break; + case CPU_BMIPS5000: + bmips_ipi_interrupt = bmips5000_ipi_interrupt; + break; + default: + return; + } + if (request_irq(IPI0_IRQ, bmips_ipi_interrupt, IRQF_PERCPU, "smp_ipi0", NULL)) panic("Can't request IPI0 interrupt"); @@ -168,26 +193,39 @@ static void bmips_boot_secondary(int cpu, struct task_struct *idle) pr_info("SMP: Booting CPU%d...\n", cpu); - if (cpumask_test_cpu(cpu, &bmips_booted_mask)) - bmips_send_ipi_single(cpu, 0); + if (cpumask_test_cpu(cpu, &bmips_booted_mask)) { + switch (current_cpu_type()) { + case CPU_BMIPS4350: + case CPU_BMIPS4380: + bmips43xx_send_ipi_single(cpu, 0); + break; + case CPU_BMIPS5000: + bmips5000_send_ipi_single(cpu, 0); + break; + } + } else { -#if defined(CONFIG_CPU_BMIPS4350) || defined(CONFIG_CPU_BMIPS4380) - /* Reset slave TP1 if booting from TP0 */ - if (cpu_logical_map(cpu) == 1) - set_c0_brcm_cmt_ctrl(0x01); -#elif defined(CONFIG_CPU_BMIPS5000) - if (cpu & 0x01) - write_c0_brcm_action(ACTION_BOOT_THREAD(cpu)); - else { - /* - * core N thread 0 was already booted; just - * pulse the NMI line - */ - bmips_write_zscm_reg(0x210, 0xc0000000); - udelay(10); - bmips_write_zscm_reg(0x210, 0x00); + switch (current_cpu_type()) { + case CPU_BMIPS4350: + case CPU_BMIPS4380: + /* Reset slave TP1 if booting from TP0 */ + if (cpu_logical_map(cpu) == 1) + set_c0_brcm_cmt_ctrl(0x01); + break; + case CPU_BMIPS5000: + if (cpu & 0x01) + write_c0_brcm_action(ACTION_BOOT_THREAD(cpu)); + else { + /* + * core N thread 0 was already booted; just + * pulse the NMI line + */ + bmips_write_zscm_reg(0x210, 0xc0000000); + udelay(10); + bmips_write_zscm_reg(0x210, 0x00); + } + break; } -#endif cpumask_set_cpu(cpu, &bmips_booted_mask); } } @@ -199,26 +237,32 @@ static void bmips_init_secondary(void) { /* move NMI vector to kseg0, in case XKS01 is enabled */ -#if defined(CONFIG_CPU_BMIPS4350) || defined(CONFIG_CPU_BMIPS4380) - void __iomem *cbr = BMIPS_GET_CBR(); + void __iomem *cbr; unsigned long old_vec; unsigned long relo_vector; int boot_cpu; - boot_cpu = !!(read_c0_brcm_cmt_local() & (1 << 31)); - relo_vector = boot_cpu ? BMIPS_RELO_VECTOR_CONTROL_0 : - BMIPS_RELO_VECTOR_CONTROL_1; + switch (current_cpu_type()) { + case CPU_BMIPS4350: + case CPU_BMIPS4380: + cbr = BMIPS_GET_CBR(); - old_vec = __raw_readl(cbr + relo_vector); - __raw_writel(old_vec & ~0x20000000, cbr + relo_vector); + boot_cpu = !!(read_c0_brcm_cmt_local() & (1 << 31)); + relo_vector = boot_cpu ? BMIPS_RELO_VECTOR_CONTROL_0 : + BMIPS_RELO_VECTOR_CONTROL_1; - clear_c0_cause(smp_processor_id() ? C_SW1 : C_SW0); -#elif defined(CONFIG_CPU_BMIPS5000) - write_c0_brcm_bootvec(read_c0_brcm_bootvec() & - (smp_processor_id() & 0x01 ? ~0x20000000 : ~0x2000)); + old_vec = __raw_readl(cbr + relo_vector); + __raw_writel(old_vec & ~0x20000000, cbr + relo_vector); - write_c0_brcm_action(ACTION_CLR_IPI(smp_processor_id(), 0)); -#endif + clear_c0_cause(smp_processor_id() ? C_SW1 : C_SW0); + break; + case CPU_BMIPS5000: + write_c0_brcm_bootvec(read_c0_brcm_bootvec() & + (smp_processor_id() & 0x01 ? ~0x20000000 : ~0x2000)); + + write_c0_brcm_action(ACTION_CLR_IPI(smp_processor_id(), 0)); + break; + } } /* @@ -243,8 +287,6 @@ static void bmips_cpus_done(void) { } -#if defined(CONFIG_CPU_BMIPS5000) - /* * BMIPS5000 raceless IPIs * @@ -253,12 +295,12 @@ static void bmips_cpus_done(void) * IPI1 is used for SMP_CALL_FUNCTION */ -static void bmips_send_ipi_single(int cpu, unsigned int action) +static void bmips5000_send_ipi_single(int cpu, unsigned int action) { write_c0_brcm_action(ACTION_SET_IPI(cpu, action == SMP_CALL_FUNCTION)); } -static irqreturn_t bmips_ipi_interrupt(int irq, void *dev_id) +static irqreturn_t bmips5000_ipi_interrupt(int irq, void *dev_id) { int action = irq - IPI0_IRQ; @@ -272,7 +314,14 @@ static irqreturn_t bmips_ipi_interrupt(int irq, void *dev_id) return IRQ_HANDLED; } -#else +static void bmips5000_send_ipi_mask(const struct cpumask *mask, + unsigned int action) +{ + unsigned int i; + + for_each_cpu(i, mask) + bmips5000_send_ipi_single(i, action); +} /* * BMIPS43xx racey IPIs @@ -287,7 +336,7 @@ static irqreturn_t bmips_ipi_interrupt(int irq, void *dev_id) static DEFINE_SPINLOCK(ipi_lock); static DEFINE_PER_CPU(int, ipi_action_mask); -static void bmips_send_ipi_single(int cpu, unsigned int action) +static void bmips43xx_send_ipi_single(int cpu, unsigned int action) { unsigned long flags; @@ -298,7 +347,7 @@ static void bmips_send_ipi_single(int cpu, unsigned int action) spin_unlock_irqrestore(&ipi_lock, flags); } -static irqreturn_t bmips_ipi_interrupt(int irq, void *dev_id) +static irqreturn_t bmips43xx_ipi_interrupt(int irq, void *dev_id) { unsigned long flags; int action, cpu = irq - IPI0_IRQ; @@ -317,15 +366,13 @@ static irqreturn_t bmips_ipi_interrupt(int irq, void *dev_id) return IRQ_HANDLED; } -#endif /* BMIPS type */ - -static void bmips_send_ipi_mask(const struct cpumask *mask, +static void bmips43xx_send_ipi_mask(const struct cpumask *mask, unsigned int action) { unsigned int i; for_each_cpu(i, mask) - bmips_send_ipi_single(i, action); + bmips43xx_send_ipi_single(i, action); } #ifdef CONFIG_HOTPLUG_CPU @@ -381,15 +428,30 @@ void __ref play_dead(void) #endif /* CONFIG_HOTPLUG_CPU */ -struct plat_smp_ops bmips_smp_ops = { +struct plat_smp_ops bmips43xx_smp_ops = { + .smp_setup = bmips_smp_setup, + .prepare_cpus = bmips_prepare_cpus, + .boot_secondary = bmips_boot_secondary, + .smp_finish = bmips_smp_finish, + .init_secondary = bmips_init_secondary, + .cpus_done = bmips_cpus_done, + .send_ipi_single = bmips43xx_send_ipi_single, + .send_ipi_mask = bmips43xx_send_ipi_mask, +#ifdef CONFIG_HOTPLUG_CPU + .cpu_disable = bmips_cpu_disable, + .cpu_die = bmips_cpu_die, +#endif +}; + +struct plat_smp_ops bmips5000_smp_ops = { .smp_setup = bmips_smp_setup, .prepare_cpus = bmips_prepare_cpus, .boot_secondary = bmips_boot_secondary, .smp_finish = bmips_smp_finish, .init_secondary = bmips_init_secondary, .cpus_done = bmips_cpus_done, - .send_ipi_single = bmips_send_ipi_single, - .send_ipi_mask = bmips_send_ipi_mask, + .send_ipi_single = bmips5000_send_ipi_single, + .send_ipi_mask = bmips5000_send_ipi_mask, #ifdef CONFIG_HOTPLUG_CPU .cpu_disable = bmips_cpu_disable, .cpu_die = bmips_cpu_die, @@ -427,43 +489,47 @@ void bmips_ebase_setup(void) BUG_ON(ebase != CKSEG0); -#if defined(CONFIG_CPU_BMIPS4350) - /* - * BMIPS4350 cannot relocate the normal vectors, but it - * can relocate the BEV=1 vectors. So CPU1 starts up at - * the relocated BEV=1, IV=0 general exception vector @ - * 0xa000_0380. - * - * set_uncached_handler() is used here because: - * - CPU1 will run this from uncached space - * - None of the cacheflush functions are set up yet - */ - set_uncached_handler(BMIPS_WARM_RESTART_VEC - CKSEG0, - &bmips_smp_int_vec, 0x80); - __sync(); - return; -#elif defined(CONFIG_CPU_BMIPS4380) - /* - * 0x8000_0000: reset/NMI (initially in kseg1) - * 0x8000_0400: normal vectors - */ - new_ebase = 0x80000400; - cbr = BMIPS_GET_CBR(); - __raw_writel(0x80080800, cbr + BMIPS_RELO_VECTOR_CONTROL_0); - __raw_writel(0xa0080800, cbr + BMIPS_RELO_VECTOR_CONTROL_1); -#elif defined(CONFIG_CPU_BMIPS5000) - /* - * 0x8000_0000: reset/NMI (initially in kseg1) - * 0x8000_1000: normal vectors - */ - new_ebase = 0x80001000; - write_c0_brcm_bootvec(0xa0088008); - write_c0_ebase(new_ebase); - if (max_cpus > 2) - bmips_write_zscm_reg(0xa0, 0xa008a008); -#else - return; -#endif + switch (current_cpu_type()) { + case CPU_BMIPS4350: + /* + * BMIPS4350 cannot relocate the normal vectors, but it + * can relocate the BEV=1 vectors. So CPU1 starts up at + * the relocated BEV=1, IV=0 general exception vector @ + * 0xa000_0380. + * + * set_uncached_handler() is used here because: + * - CPU1 will run this from uncached space + * - None of the cacheflush functions are set up yet + */ + set_uncached_handler(BMIPS_WARM_RESTART_VEC - CKSEG0, + &bmips_smp_int_vec, 0x80); + __sync(); + return; + case CPU_BMIPS4380: + /* + * 0x8000_0000: reset/NMI (initially in kseg1) + * 0x8000_0400: normal vectors + */ + new_ebase = 0x80000400; + cbr = BMIPS_GET_CBR(); + __raw_writel(0x80080800, cbr + BMIPS_RELO_VECTOR_CONTROL_0); + __raw_writel(0xa0080800, cbr + BMIPS_RELO_VECTOR_CONTROL_1); + break; + case CPU_BMIPS5000: + /* + * 0x8000_0000: reset/NMI (initially in kseg1) + * 0x8000_1000: normal vectors + */ + new_ebase = 0x80001000; + write_c0_brcm_bootvec(0xa0088008); + write_c0_ebase(new_ebase); + if (max_cpus > 2) + bmips_write_zscm_reg(0xa0, 0xa008a008); + break; + default: + return; + } + board_nmi_handler_setup = &bmips_nmi_handler_setup; ebase = new_ebase; } diff --git a/arch/mips/kernel/smp-cmp.c b/arch/mips/kernel/smp-cmp.c index 5969f1e9b62a..1b925d8a610c 100644 --- a/arch/mips/kernel/smp-cmp.c +++ b/arch/mips/kernel/smp-cmp.c @@ -199,11 +199,14 @@ void __init cmp_prepare_cpus(unsigned int max_cpus) pr_debug("SMPCMP: CPU%d: %s max_cpus=%d\n", smp_processor_id(), __func__, max_cpus); +#ifdef CONFIG_MIPS_MT /* * FIXME: some of these options are per-system, some per-core and * some per-cpu */ mips_mt_set_cpuoptions(); +#endif + } struct plat_smp_ops cmp_smp_ops = { diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c index 57a3f7a2b370..0fb8cefc9114 100644 --- a/arch/mips/kernel/smp-mt.c +++ b/arch/mips/kernel/smp-mt.c @@ -71,6 +71,7 @@ static unsigned int __init smvp_vpe_init(unsigned int tc, unsigned int mvpconf0, /* Record this as available CPU */ set_cpu_possible(tc, true); + set_cpu_present(tc, true); __cpu_number_map[tc] = ++ncpu; __cpu_logical_map[ncpu] = tc; } @@ -112,12 +113,39 @@ static void __init smvp_tc_init(unsigned int tc, unsigned int mvpconf0) write_tc_c0_tchalt(TCHALT_H); } +#ifdef CONFIG_IRQ_GIC +static void mp_send_ipi_single(int cpu, unsigned int action) +{ + unsigned long flags; + + local_irq_save(flags); + + switch (action) { + case SMP_CALL_FUNCTION: + gic_send_ipi(plat_ipi_call_int_xlate(cpu)); + break; + + case SMP_RESCHEDULE_YOURSELF: + gic_send_ipi(plat_ipi_resched_int_xlate(cpu)); + break; + } + + local_irq_restore(flags); +} +#endif + static void vsmp_send_ipi_single(int cpu, unsigned int action) { int i; unsigned long flags; int vpflags; +#ifdef CONFIG_IRQ_GIC + if (gic_present) { + mp_send_ipi_single(cpu, action); + return; + } +#endif local_irq_save(flags); vpflags = dvpe(); /* can't access the other CPU's registers whilst MVPE enabled */ diff --git a/arch/mips/kernel/spram.c b/arch/mips/kernel/spram.c index 93f86817f20a..b242e2c10ea0 100644 --- a/arch/mips/kernel/spram.c +++ b/arch/mips/kernel/spram.c @@ -8,7 +8,6 @@ * * Copyright (C) 2007, 2008 MIPS Technologies, Inc. */ -#include <linux/init.h> #include <linux/kernel.h> #include <linux/ptrace.h> #include <linux/stddef.h> @@ -206,6 +205,8 @@ void spram_config(void) case CPU_34K: case CPU_74K: case CPU_1004K: + case CPU_INTERAPTIV: + case CPU_PROAPTIV: config0 = read_c0_config(); /* FIXME: addresses are Malta specific */ if (config0 & (1<<24)) { diff --git a/arch/mips/kernel/sync-r4k.c b/arch/mips/kernel/sync-r4k.c index 84536bf4a154..c24ad5f4b324 100644 --- a/arch/mips/kernel/sync-r4k.c +++ b/arch/mips/kernel/sync-r4k.c @@ -11,7 +11,6 @@ */ #include <linux/kernel.h> -#include <linux/init.h> #include <linux/irqflags.h> #include <linux/cpumask.h> diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index f9c8746be8d6..e0b499694d18 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -78,6 +78,7 @@ extern asmlinkage void handle_cpu(void); extern asmlinkage void handle_ov(void); extern asmlinkage void handle_tr(void); extern asmlinkage void handle_fpe(void); +extern asmlinkage void handle_ftlb(void); extern asmlinkage void handle_mdmx(void); extern asmlinkage void handle_watch(void); extern asmlinkage void handle_mt(void); @@ -1080,7 +1081,7 @@ asmlinkage void do_cpu(struct pt_regs *regs) unsigned long old_epc, old31; unsigned int opcode; unsigned int cpid; - int status; + int status, err; unsigned long __maybe_unused flags; prev_state = exception_enter(); @@ -1153,19 +1154,19 @@ asmlinkage void do_cpu(struct pt_regs *regs) case 1: if (used_math()) /* Using the FPU again. */ - own_fpu(1); + err = own_fpu(1); else { /* First time FPU user. */ - init_fpu(); + err = init_fpu(); set_used_math(); } - if (!raw_cpu_has_fpu) { + if (!raw_cpu_has_fpu || err) { int sig; void __user *fault_addr = NULL; sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 0, &fault_addr); - if (!process_fpemu_return(sig, fault_addr)) + if (!process_fpemu_return(sig, fault_addr) && !err) mt_ase_fp_affinity(); } @@ -1336,6 +1337,8 @@ static inline void parity_protection_init(void) case CPU_34K: case CPU_74K: case CPU_1004K: + case CPU_INTERAPTIV: + case CPU_PROAPTIV: { #define ERRCTL_PE 0x80000000 #define ERRCTL_L2P 0x00800000 @@ -1425,14 +1428,27 @@ asmlinkage void cache_parity_error(void) printk("Decoded c0_cacheerr: %s cache fault in %s reference.\n", reg_val & (1<<30) ? "secondary" : "primary", reg_val & (1<<31) ? "data" : "insn"); - printk("Error bits: %s%s%s%s%s%s%s\n", - reg_val & (1<<29) ? "ED " : "", - reg_val & (1<<28) ? "ET " : "", - reg_val & (1<<26) ? "EE " : "", - reg_val & (1<<25) ? "EB " : "", - reg_val & (1<<24) ? "EI " : "", - reg_val & (1<<23) ? "E1 " : "", - reg_val & (1<<22) ? "E0 " : ""); + if (cpu_has_mips_r2 && + ((current_cpu_data.processor_id && 0xff0000) == PRID_COMP_MIPS)) { + pr_err("Error bits: %s%s%s%s%s%s%s%s\n", + reg_val & (1<<29) ? "ED " : "", + reg_val & (1<<28) ? "ET " : "", + reg_val & (1<<27) ? "ES " : "", + reg_val & (1<<26) ? "EE " : "", + reg_val & (1<<25) ? "EB " : "", + reg_val & (1<<24) ? "EI " : "", + reg_val & (1<<23) ? "E1 " : "", + reg_val & (1<<22) ? "E0 " : ""); + } else { + pr_err("Error bits: %s%s%s%s%s%s%s\n", + reg_val & (1<<29) ? "ED " : "", + reg_val & (1<<28) ? "ET " : "", + reg_val & (1<<26) ? "EE " : "", + reg_val & (1<<25) ? "EB " : "", + reg_val & (1<<24) ? "EI " : "", + reg_val & (1<<23) ? "E1 " : "", + reg_val & (1<<22) ? "E0 " : ""); + } printk("IDX: 0x%08x\n", reg_val & ((1<<22)-1)); #if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64) @@ -1446,6 +1462,34 @@ asmlinkage void cache_parity_error(void) panic("Can't handle the cache error!"); } +asmlinkage void do_ftlb(void) +{ + const int field = 2 * sizeof(unsigned long); + unsigned int reg_val; + + /* For the moment, report the problem and hang. */ + if (cpu_has_mips_r2 && + ((current_cpu_data.processor_id && 0xff0000) == PRID_COMP_MIPS)) { + pr_err("FTLB error exception, cp0_ecc=0x%08x:\n", + read_c0_ecc()); + pr_err("cp0_errorepc == %0*lx\n", field, read_c0_errorepc()); + reg_val = read_c0_cacheerr(); + pr_err("c0_cacheerr == %08x\n", reg_val); + + if ((reg_val & 0xc0000000) == 0xc0000000) { + pr_err("Decoded c0_cacheerr: FTLB parity error\n"); + } else { + pr_err("Decoded c0_cacheerr: %s cache fault in %s reference.\n", + reg_val & (1<<30) ? "secondary" : "primary", + reg_val & (1<<31) ? "data" : "insn"); + } + } else { + pr_err("FTLB error exception\n"); + } + /* Just print the cacheerr bits for now */ + cache_parity_error(); +} + /* * SDBBP EJTAG debug exception handler. * We skip the instruction and return to the next instruction. @@ -1995,6 +2039,7 @@ void __init trap_init(void) if (cpu_has_fpu && !cpu_has_nofpuex) set_except_vector(15, handle_fpe); + set_except_vector(16, handle_ftlb); set_except_vector(22, handle_mdmx); if (cpu_has_mcheck) diff --git a/arch/mips/kernel/vpe-cmp.c b/arch/mips/kernel/vpe-cmp.c new file mode 100644 index 000000000000..9268ebc0f61e --- /dev/null +++ b/arch/mips/kernel/vpe-cmp.c @@ -0,0 +1,180 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2004, 2005 MIPS Technologies, Inc. All rights reserved. + * Copyright (C) 2013 Imagination Technologies Ltd. + */ +#include <linux/kernel.h> +#include <linux/device.h> +#include <linux/fs.h> +#include <linux/slab.h> +#include <linux/export.h> + +#include <asm/vpe.h> + +static int major; + +void cleanup_tc(struct tc *tc) +{ + +} + +static ssize_t store_kill(struct device *dev, struct device_attribute *attr, + const char *buf, size_t len) +{ + struct vpe *vpe = get_vpe(aprp_cpu_index()); + struct vpe_notifications *notifier; + + list_for_each_entry(notifier, &vpe->notify, list) + notifier->stop(aprp_cpu_index()); + + release_progmem(vpe->load_addr); + vpe->state = VPE_STATE_UNUSED; + + return len; +} +static DEVICE_ATTR(kill, S_IWUSR, NULL, store_kill); + +static ssize_t ntcs_show(struct device *cd, struct device_attribute *attr, + char *buf) +{ + struct vpe *vpe = get_vpe(aprp_cpu_index()); + + return sprintf(buf, "%d\n", vpe->ntcs); +} + +static ssize_t ntcs_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t len) +{ + struct vpe *vpe = get_vpe(aprp_cpu_index()); + unsigned long new; + int ret; + + ret = kstrtoul(buf, 0, &new); + if (ret < 0) + return ret; + + /* APRP can only reserve one TC in a VPE and no more. */ + if (new != 1) + return -EINVAL; + + vpe->ntcs = new; + + return len; +} +static DEVICE_ATTR_RW(ntcs); + +static struct attribute *vpe_attrs[] = { + &dev_attr_kill.attr, + &dev_attr_ntcs.attr, + NULL, +}; +ATTRIBUTE_GROUPS(vpe); + +static void vpe_device_release(struct device *cd) +{ + kfree(cd); +} + +static struct class vpe_class = { + .name = "vpe", + .owner = THIS_MODULE, + .dev_release = vpe_device_release, + .dev_groups = vpe_groups, +}; + +static struct device vpe_device; + +int __init vpe_module_init(void) +{ + struct vpe *v = NULL; + struct tc *t; + int err; + + if (!cpu_has_mipsmt) { + pr_warn("VPE loader: not a MIPS MT capable processor\n"); + return -ENODEV; + } + + if (num_possible_cpus() - aprp_cpu_index() < 1) { + pr_warn("No VPEs reserved for AP/SP, not initialize VPE loader\n" + "Pass maxcpus=<n> argument as kernel argument\n"); + return -ENODEV; + } + + major = register_chrdev(0, VPE_MODULE_NAME, &vpe_fops); + if (major < 0) { + pr_warn("VPE loader: unable to register character device\n"); + return major; + } + + err = class_register(&vpe_class); + if (err) { + pr_err("vpe_class registration failed\n"); + goto out_chrdev; + } + + device_initialize(&vpe_device); + vpe_device.class = &vpe_class, + vpe_device.parent = NULL, + dev_set_name(&vpe_device, "vpe_sp"); + vpe_device.devt = MKDEV(major, VPE_MODULE_MINOR); + err = device_add(&vpe_device); + if (err) { + pr_err("Adding vpe_device failed\n"); + goto out_class; + } + + t = alloc_tc(aprp_cpu_index()); + if (!t) { + pr_warn("VPE: unable to allocate TC\n"); + err = -ENOMEM; + goto out_dev; + } + + /* VPE */ + v = alloc_vpe(aprp_cpu_index()); + if (v == NULL) { + pr_warn("VPE: unable to allocate VPE\n"); + kfree(t); + err = -ENOMEM; + goto out_dev; + } + + v->ntcs = 1; + + /* add the tc to the list of this vpe's tc's. */ + list_add(&t->tc, &v->tc); + + /* TC */ + t->pvpe = v; /* set the parent vpe */ + + return 0; + +out_dev: + device_del(&vpe_device); + +out_class: + class_unregister(&vpe_class); + +out_chrdev: + unregister_chrdev(major, VPE_MODULE_NAME); + + return err; +} + +void __exit vpe_module_exit(void) +{ + struct vpe *v, *n; + + device_del(&vpe_device); + class_unregister(&vpe_class); + unregister_chrdev(major, VPE_MODULE_NAME); + + /* No locking needed here */ + list_for_each_entry_safe(v, n, &vpecontrol.vpe_list, list) + if (v->state != VPE_STATE_UNUSED) + release_vpe(v); +} diff --git a/arch/mips/kernel/vpe-mt.c b/arch/mips/kernel/vpe-mt.c new file mode 100644 index 000000000000..949ae0e17018 --- /dev/null +++ b/arch/mips/kernel/vpe-mt.c @@ -0,0 +1,523 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2004, 2005 MIPS Technologies, Inc. All rights reserved. + * Copyright (C) 2013 Imagination Technologies Ltd. + */ +#include <linux/kernel.h> +#include <linux/device.h> +#include <linux/fs.h> +#include <linux/slab.h> +#include <linux/export.h> + +#include <asm/mipsregs.h> +#include <asm/mipsmtregs.h> +#include <asm/mips_mt.h> +#include <asm/vpe.h> + +static int major; + +/* The number of TCs and VPEs physically available on the core */ +static int hw_tcs, hw_vpes; + +/* We are prepared so configure and start the VPE... */ +int vpe_run(struct vpe *v) +{ + unsigned long flags, val, dmt_flag; + struct vpe_notifications *notifier; + unsigned int vpeflags; + struct tc *t; + + /* check we are the Master VPE */ + local_irq_save(flags); + val = read_c0_vpeconf0(); + if (!(val & VPECONF0_MVP)) { + pr_warn("VPE loader: only Master VPE's are able to config MT\n"); + local_irq_restore(flags); + + return -1; + } + + dmt_flag = dmt(); + vpeflags = dvpe(); + + if (list_empty(&v->tc)) { + evpe(vpeflags); + emt(dmt_flag); + local_irq_restore(flags); + + pr_warn("VPE loader: No TC's associated with VPE %d\n", + v->minor); + + return -ENOEXEC; + } + + t = list_first_entry(&v->tc, struct tc, tc); + + /* Put MVPE's into 'configuration state' */ + set_c0_mvpcontrol(MVPCONTROL_VPC); + + settc(t->index); + + /* should check it is halted, and not activated */ + if ((read_tc_c0_tcstatus() & TCSTATUS_A) || + !(read_tc_c0_tchalt() & TCHALT_H)) { + evpe(vpeflags); + emt(dmt_flag); + local_irq_restore(flags); + + pr_warn("VPE loader: TC %d is already active!\n", + t->index); + + return -ENOEXEC; + } + + /* + * Write the address we want it to start running from in the TCPC + * register. + */ + write_tc_c0_tcrestart((unsigned long)v->__start); + write_tc_c0_tccontext((unsigned long)0); + + /* + * Mark the TC as activated, not interrupt exempt and not dynamically + * allocatable + */ + val = read_tc_c0_tcstatus(); + val = (val & ~(TCSTATUS_DA | TCSTATUS_IXMT)) | TCSTATUS_A; + write_tc_c0_tcstatus(val); + + write_tc_c0_tchalt(read_tc_c0_tchalt() & ~TCHALT_H); + + /* + * The sde-kit passes 'memsize' to __start in $a3, so set something + * here... Or set $a3 to zero and define DFLT_STACK_SIZE and + * DFLT_HEAP_SIZE when you compile your program + */ + mttgpr(6, v->ntcs); + mttgpr(7, physical_memsize); + + /* set up VPE1 */ + /* + * bind the TC to VPE 1 as late as possible so we only have the final + * VPE registers to set up, and so an EJTAG probe can trigger on it + */ + write_tc_c0_tcbind((read_tc_c0_tcbind() & ~TCBIND_CURVPE) | 1); + + write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~(VPECONF0_VPA)); + + back_to_back_c0_hazard(); + + /* Set up the XTC bit in vpeconf0 to point at our tc */ + write_vpe_c0_vpeconf0((read_vpe_c0_vpeconf0() & ~(VPECONF0_XTC)) + | (t->index << VPECONF0_XTC_SHIFT)); + + back_to_back_c0_hazard(); + + /* enable this VPE */ + write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA); + + /* clear out any left overs from a previous program */ + write_vpe_c0_status(0); + write_vpe_c0_cause(0); + + /* take system out of configuration state */ + clear_c0_mvpcontrol(MVPCONTROL_VPC); + + /* + * SMTC/SMVP kernels manage VPE enable independently, + * but uniprocessor kernels need to turn it on, even + * if that wasn't the pre-dvpe() state. + */ +#ifdef CONFIG_SMP + evpe(vpeflags); +#else + evpe(EVPE_ENABLE); +#endif + emt(dmt_flag); + local_irq_restore(flags); + + list_for_each_entry(notifier, &v->notify, list) + notifier->start(VPE_MODULE_MINOR); + + return 0; +} + +void cleanup_tc(struct tc *tc) +{ + unsigned long flags; + unsigned int mtflags, vpflags; + int tmp; + + local_irq_save(flags); + mtflags = dmt(); + vpflags = dvpe(); + /* Put MVPE's into 'configuration state' */ + set_c0_mvpcontrol(MVPCONTROL_VPC); + + settc(tc->index); + tmp = read_tc_c0_tcstatus(); + + /* mark not allocated and not dynamically allocatable */ + tmp &= ~(TCSTATUS_A | TCSTATUS_DA); + tmp |= TCSTATUS_IXMT; /* interrupt exempt */ + write_tc_c0_tcstatus(tmp); + + write_tc_c0_tchalt(TCHALT_H); + mips_ihb(); + + clear_c0_mvpcontrol(MVPCONTROL_VPC); + evpe(vpflags); + emt(mtflags); + local_irq_restore(flags); +} + +/* module wrapper entry points */ +/* give me a vpe */ +void *vpe_alloc(void) +{ + int i; + struct vpe *v; + + /* find a vpe */ + for (i = 1; i < MAX_VPES; i++) { + v = get_vpe(i); + if (v != NULL) { + v->state = VPE_STATE_INUSE; + return v; + } + } + return NULL; +} +EXPORT_SYMBOL(vpe_alloc); + +/* start running from here */ +int vpe_start(void *vpe, unsigned long start) +{ + struct vpe *v = vpe; + + v->__start = start; + return vpe_run(v); +} +EXPORT_SYMBOL(vpe_start); + +/* halt it for now */ +int vpe_stop(void *vpe) +{ + struct vpe *v = vpe; + struct tc *t; + unsigned int evpe_flags; + + evpe_flags = dvpe(); + + t = list_entry(v->tc.next, struct tc, tc); + if (t != NULL) { + settc(t->index); + write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~VPECONF0_VPA); + } + + evpe(evpe_flags); + + return 0; +} +EXPORT_SYMBOL(vpe_stop); + +/* I've done with it thank you */ +int vpe_free(void *vpe) +{ + struct vpe *v = vpe; + struct tc *t; + unsigned int evpe_flags; + + t = list_entry(v->tc.next, struct tc, tc); + if (t == NULL) + return -ENOEXEC; + + evpe_flags = dvpe(); + + /* Put MVPE's into 'configuration state' */ + set_c0_mvpcontrol(MVPCONTROL_VPC); + + settc(t->index); + write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~VPECONF0_VPA); + + /* halt the TC */ + write_tc_c0_tchalt(TCHALT_H); + mips_ihb(); + + /* mark the TC unallocated */ + write_tc_c0_tcstatus(read_tc_c0_tcstatus() & ~TCSTATUS_A); + + v->state = VPE_STATE_UNUSED; + + clear_c0_mvpcontrol(MVPCONTROL_VPC); + evpe(evpe_flags); + + return 0; +} +EXPORT_SYMBOL(vpe_free); + +static ssize_t store_kill(struct device *dev, struct device_attribute *attr, + const char *buf, size_t len) +{ + struct vpe *vpe = get_vpe(aprp_cpu_index()); + struct vpe_notifications *notifier; + + list_for_each_entry(notifier, &vpe->notify, list) + notifier->stop(aprp_cpu_index()); + + release_progmem(vpe->load_addr); + cleanup_tc(get_tc(aprp_cpu_index())); + vpe_stop(vpe); + vpe_free(vpe); + + return len; +} +static DEVICE_ATTR(kill, S_IWUSR, NULL, store_kill); + +static ssize_t ntcs_show(struct device *cd, struct device_attribute *attr, + char *buf) +{ + struct vpe *vpe = get_vpe(aprp_cpu_index()); + + return sprintf(buf, "%d\n", vpe->ntcs); +} + +static ssize_t ntcs_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t len) +{ + struct vpe *vpe = get_vpe(aprp_cpu_index()); + unsigned long new; + int ret; + + ret = kstrtoul(buf, 0, &new); + if (ret < 0) + return ret; + + if (new == 0 || new > (hw_tcs - aprp_cpu_index())) + return -EINVAL; + + vpe->ntcs = new; + + return len; +} +static DEVICE_ATTR_RW(ntcs); + +static struct attribute *vpe_attrs[] = { + &dev_attr_kill.attr, + &dev_attr_ntcs.attr, + NULL, +}; +ATTRIBUTE_GROUPS(vpe); + +static void vpe_device_release(struct device *cd) +{ + kfree(cd); +} + +static struct class vpe_class = { + .name = "vpe", + .owner = THIS_MODULE, + .dev_release = vpe_device_release, + .dev_groups = vpe_groups, +}; + +static struct device vpe_device; + +int __init vpe_module_init(void) +{ + unsigned int mtflags, vpflags; + unsigned long flags, val; + struct vpe *v = NULL; + struct tc *t; + int tc, err; + + if (!cpu_has_mipsmt) { + pr_warn("VPE loader: not a MIPS MT capable processor\n"); + return -ENODEV; + } + + if (vpelimit == 0) { + pr_warn("No VPEs reserved for AP/SP, not initialize VPE loader\n" + "Pass maxvpes=<n> argument as kernel argument\n"); + + return -ENODEV; + } + + if (aprp_cpu_index() == 0) { + pr_warn("No TCs reserved for AP/SP, not initialize VPE loader\n" + "Pass maxtcs=<n> argument as kernel argument\n"); + + return -ENODEV; + } + + major = register_chrdev(0, VPE_MODULE_NAME, &vpe_fops); + if (major < 0) { + pr_warn("VPE loader: unable to register character device\n"); + return major; + } + + err = class_register(&vpe_class); + if (err) { + pr_err("vpe_class registration failed\n"); + goto out_chrdev; + } + + device_initialize(&vpe_device); + vpe_device.class = &vpe_class, + vpe_device.parent = NULL, + dev_set_name(&vpe_device, "vpe1"); + vpe_device.devt = MKDEV(major, VPE_MODULE_MINOR); + err = device_add(&vpe_device); + if (err) { + pr_err("Adding vpe_device failed\n"); + goto out_class; + } + + local_irq_save(flags); + mtflags = dmt(); + vpflags = dvpe(); + + /* Put MVPE's into 'configuration state' */ + set_c0_mvpcontrol(MVPCONTROL_VPC); + + val = read_c0_mvpconf0(); + hw_tcs = (val & MVPCONF0_PTC) + 1; + hw_vpes = ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1; + + for (tc = aprp_cpu_index(); tc < hw_tcs; tc++) { + /* + * Must re-enable multithreading temporarily or in case we + * reschedule send IPIs or similar we might hang. + */ + clear_c0_mvpcontrol(MVPCONTROL_VPC); + evpe(vpflags); + emt(mtflags); + local_irq_restore(flags); + t = alloc_tc(tc); + if (!t) { + err = -ENOMEM; + goto out_dev; + } + + local_irq_save(flags); + mtflags = dmt(); + vpflags = dvpe(); + set_c0_mvpcontrol(MVPCONTROL_VPC); + + /* VPE's */ + if (tc < hw_tcs) { + settc(tc); + + v = alloc_vpe(tc); + if (v == NULL) { + pr_warn("VPE: unable to allocate VPE\n"); + goto out_reenable; + } + + v->ntcs = hw_tcs - aprp_cpu_index(); + + /* add the tc to the list of this vpe's tc's. */ + list_add(&t->tc, &v->tc); + + /* deactivate all but vpe0 */ + if (tc >= aprp_cpu_index()) { + unsigned long tmp = read_vpe_c0_vpeconf0(); + + tmp &= ~VPECONF0_VPA; + + /* master VPE */ + tmp |= VPECONF0_MVP; + write_vpe_c0_vpeconf0(tmp); + } + + /* disable multi-threading with TC's */ + write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() & + ~VPECONTROL_TE); + + if (tc >= vpelimit) { + /* + * Set config to be the same as vpe0, + * particularly kseg0 coherency alg + */ + write_vpe_c0_config(read_c0_config()); + } + } + + /* TC's */ + t->pvpe = v; /* set the parent vpe */ + + if (tc >= aprp_cpu_index()) { + unsigned long tmp; + + settc(tc); + + /* Any TC that is bound to VPE0 gets left as is - in + * case we are running SMTC on VPE0. A TC that is bound + * to any other VPE gets bound to VPE0, ideally I'd like + * to make it homeless but it doesn't appear to let me + * bind a TC to a non-existent VPE. Which is perfectly + * reasonable. + * + * The (un)bound state is visible to an EJTAG probe so + * may notify GDB... + */ + tmp = read_tc_c0_tcbind(); + if (tmp & TCBIND_CURVPE) { + /* tc is bound >vpe0 */ + write_tc_c0_tcbind(tmp & ~TCBIND_CURVPE); + + t->pvpe = get_vpe(0); /* set the parent vpe */ + } + + /* halt the TC */ + write_tc_c0_tchalt(TCHALT_H); + mips_ihb(); + + tmp = read_tc_c0_tcstatus(); + + /* mark not activated and not dynamically allocatable */ + tmp &= ~(TCSTATUS_A | TCSTATUS_DA); + tmp |= TCSTATUS_IXMT; /* interrupt exempt */ + write_tc_c0_tcstatus(tmp); + } + } + +out_reenable: + /* release config state */ + clear_c0_mvpcontrol(MVPCONTROL_VPC); + + evpe(vpflags); + emt(mtflags); + local_irq_restore(flags); + + return 0; + +out_dev: + device_del(&vpe_device); + +out_class: + class_unregister(&vpe_class); + +out_chrdev: + unregister_chrdev(major, VPE_MODULE_NAME); + + return err; +} + +void __exit vpe_module_exit(void) +{ + struct vpe *v, *n; + + device_del(&vpe_device); + class_unregister(&vpe_class); + unregister_chrdev(major, VPE_MODULE_NAME); + + /* No locking needed here */ + list_for_each_entry_safe(v, n, &vpecontrol.vpe_list, list) { + if (v->state != VPE_STATE_UNUSED) + release_vpe(v); + } +} diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c index 2d5c142bad67..11da314565cc 100644 --- a/arch/mips/kernel/vpe.c +++ b/arch/mips/kernel/vpe.c @@ -1,37 +1,22 @@ /* - * Copyright (C) 2004, 2005 MIPS Technologies, Inc. All rights reserved. - * - * This program is free software; you can distribute it and/or modify it - * under the terms of the GNU General Public License (Version 2) as - * published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. - */ - -/* - * VPE support module + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. * - * Provides support for loading a MIPS SP program on VPE1. - * The SP environment is rather simple, no tlb's. It needs to be relocatable - * (or partially linked). You should initialise your stack in the startup - * code. This loader looks for the symbol __start and sets up - * execution to resume from there. The MIPS SDE kit contains suitable examples. + * Copyright (C) 2004, 2005 MIPS Technologies, Inc. All rights reserved. + * Copyright (C) 2013 Imagination Technologies Ltd. * - * To load and run, simply cat a SP 'program file' to /dev/vpe1. - * i.e cat spapp >/dev/vpe1. + * VPE spport module for loading a MIPS SP program into VPE1. The SP + * environment is rather simple since there are no TLBs. It needs + * to be relocatable (or partiall linked). Initialize your stack in + * the startup-code. The loader looks for the symbol __start and sets + * up the execution to resume from there. To load and run, simply do + * a cat SP 'binary' to the /dev/vpe1 device. */ #include <linux/kernel.h> #include <linux/device.h> #include <linux/fs.h> #include <linux/init.h> -#include <asm/uaccess.h> #include <linux/slab.h> #include <linux/list.h> #include <linux/vmalloc.h> @@ -46,13 +31,10 @@ #include <asm/mipsmtregs.h> #include <asm/cacheflush.h> #include <linux/atomic.h> -#include <asm/cpu.h> #include <asm/mips_mt.h> #include <asm/processor.h> #include <asm/vpe.h> -typedef void *vpe_handle; - #ifndef ARCH_SHF_SMALL #define ARCH_SHF_SMALL 0 #endif @@ -60,95 +42,15 @@ typedef void *vpe_handle; /* If this is set, the section belongs in the init part of the module */ #define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1)) -/* - * The number of TCs and VPEs physically available on the core - */ -static int hw_tcs, hw_vpes; -static char module_name[] = "vpe"; -static int major; -static const int minor = 1; /* fixed for now */ - -/* grab the likely amount of memory we will need. */ -#ifdef CONFIG_MIPS_VPE_LOADER_TOM -#define P_SIZE (2 * 1024 * 1024) -#else -/* add an overhead to the max kmalloc size for non-striped symbols/etc */ -#define P_SIZE (256 * 1024) -#endif - -extern unsigned long physical_memsize; - -#define MAX_VPES 16 -#define VPE_PATH_MAX 256 - -enum vpe_state { - VPE_STATE_UNUSED = 0, - VPE_STATE_INUSE, - VPE_STATE_RUNNING -}; - -enum tc_state { - TC_STATE_UNUSED = 0, - TC_STATE_INUSE, - TC_STATE_RUNNING, - TC_STATE_DYNAMIC -}; - -struct vpe { - enum vpe_state state; - - /* (device) minor associated with this vpe */ - int minor; - - /* elfloader stuff */ - void *load_addr; - unsigned long len; - char *pbuffer; - unsigned long plen; - char cwd[VPE_PATH_MAX]; - - unsigned long __start; - - /* tc's associated with this vpe */ - struct list_head tc; - - /* The list of vpe's */ - struct list_head list; - - /* shared symbol address */ - void *shared_ptr; - - /* the list of who wants to know when something major happens */ - struct list_head notify; - - unsigned int ntcs; -}; - -struct tc { - enum tc_state state; - int index; - - struct vpe *pvpe; /* parent VPE */ - struct list_head tc; /* The list of TC's with this VPE */ - struct list_head list; /* The global list of tc's */ -}; - -struct { - spinlock_t vpe_list_lock; - struct list_head vpe_list; /* Virtual processing elements */ - spinlock_t tc_list_lock; - struct list_head tc_list; /* Thread contexts */ -} vpecontrol = { +struct vpe_control vpecontrol = { .vpe_list_lock = __SPIN_LOCK_UNLOCKED(vpe_list_lock), .vpe_list = LIST_HEAD_INIT(vpecontrol.vpe_list), .tc_list_lock = __SPIN_LOCK_UNLOCKED(tc_list_lock), .tc_list = LIST_HEAD_INIT(vpecontrol.tc_list) }; -static void release_progmem(void *ptr); - /* get the vpe associated with this minor */ -static struct vpe *get_vpe(int minor) +struct vpe *get_vpe(int minor) { struct vpe *res, *v; @@ -158,7 +60,7 @@ static struct vpe *get_vpe(int minor) res = NULL; spin_lock(&vpecontrol.vpe_list_lock); list_for_each_entry(v, &vpecontrol.vpe_list, list) { - if (v->minor == minor) { + if (v->minor == VPE_MODULE_MINOR) { res = v; break; } @@ -169,7 +71,7 @@ static struct vpe *get_vpe(int minor) } /* get the vpe associated with this minor */ -static struct tc *get_tc(int index) +struct tc *get_tc(int index) { struct tc *res, *t; @@ -187,12 +89,13 @@ static struct tc *get_tc(int index) } /* allocate a vpe and associate it with this minor (or index) */ -static struct vpe *alloc_vpe(int minor) +struct vpe *alloc_vpe(int minor) { struct vpe *v; - if ((v = kzalloc(sizeof(struct vpe), GFP_KERNEL)) == NULL) - return NULL; + v = kzalloc(sizeof(struct vpe), GFP_KERNEL); + if (v == NULL) + goto out; INIT_LIST_HEAD(&v->tc); spin_lock(&vpecontrol.vpe_list_lock); @@ -200,17 +103,19 @@ static struct vpe *alloc_vpe(int minor) spin_unlock(&vpecontrol.vpe_list_lock); INIT_LIST_HEAD(&v->notify); - v->minor = minor; + v->minor = VPE_MODULE_MINOR; +out: return v; } /* allocate a tc. At startup only tc0 is running, all other can be halted. */ -static struct tc *alloc_tc(int index) +struct tc *alloc_tc(int index) { struct tc *tc; - if ((tc = kzalloc(sizeof(struct tc), GFP_KERNEL)) == NULL) + tc = kzalloc(sizeof(struct tc), GFP_KERNEL); + if (tc == NULL) goto out; INIT_LIST_HEAD(&tc->tc); @@ -225,7 +130,7 @@ out: } /* clean up and free everything */ -static void release_vpe(struct vpe *v) +void release_vpe(struct vpe *v) { list_del(&v->list); if (v->load_addr) @@ -233,28 +138,8 @@ static void release_vpe(struct vpe *v) kfree(v); } -static void __maybe_unused dump_mtregs(void) -{ - unsigned long val; - - val = read_c0_config3(); - printk("config3 0x%lx MT %ld\n", val, - (val & CONFIG3_MT) >> CONFIG3_MT_SHIFT); - - val = read_c0_mvpcontrol(); - printk("MVPControl 0x%lx, STLB %ld VPC %ld EVP %ld\n", val, - (val & MVPCONTROL_STLB) >> MVPCONTROL_STLB_SHIFT, - (val & MVPCONTROL_VPC) >> MVPCONTROL_VPC_SHIFT, - (val & MVPCONTROL_EVP)); - - val = read_c0_mvpconf0(); - printk("mvpconf0 0x%lx, PVPE %ld PTC %ld M %ld\n", val, - (val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT, - val & MVPCONF0_PTC, (val & MVPCONF0_M) >> MVPCONF0_M_SHIFT); -} - -/* Find some VPE program space */ -static void *alloc_progmem(unsigned long len) +/* Find some VPE program space */ +void *alloc_progmem(unsigned long len) { void *addr; @@ -273,7 +158,7 @@ static void *alloc_progmem(unsigned long len) return addr; } -static void release_progmem(void *ptr) +void release_progmem(void *ptr) { #ifndef CONFIG_MIPS_VPE_LOADER_TOM kfree(ptr); @@ -281,7 +166,7 @@ static void release_progmem(void *ptr) } /* Update size with this section: return offset. */ -static long get_offset(unsigned long *size, Elf_Shdr * sechdr) +static long get_offset(unsigned long *size, Elf_Shdr *sechdr) { long ret; @@ -294,8 +179,8 @@ static long get_offset(unsigned long *size, Elf_Shdr * sechdr) might -- code, read-only data, read-write data, small data. Tally sizes, and place the offsets into sh_entsize fields: high bit means it belongs in init. */ -static void layout_sections(struct module *mod, const Elf_Ehdr * hdr, - Elf_Shdr * sechdrs, const char *secstrings) +static void layout_sections(struct module *mod, const Elf_Ehdr *hdr, + Elf_Shdr *sechdrs, const char *secstrings) { static unsigned long const masks[][2] = { /* NOTE: all executable code must be the first section @@ -315,7 +200,6 @@ static void layout_sections(struct module *mod, const Elf_Ehdr * hdr, for (i = 0; i < hdr->e_shnum; ++i) { Elf_Shdr *s = &sechdrs[i]; - // || strncmp(secstrings + s->sh_name, ".init", 5) == 0) if ((s->sh_flags & masks[m][0]) != masks[m][0] || (s->sh_flags & masks[m][1]) || s->sh_entsize != ~0UL) @@ -330,7 +214,6 @@ static void layout_sections(struct module *mod, const Elf_Ehdr * hdr, } } - /* from module-elf32.c, but subverted a little */ struct mips_hi16 { @@ -353,20 +236,18 @@ static int apply_r_mips_gprel16(struct module *me, uint32_t *location, { int rel; - if( !(*location & 0xffff) ) { + if (!(*location & 0xffff)) { rel = (int)v - gp_addr; - } - else { + } else { /* .sbss + gp(relative) + offset */ /* kludge! */ rel = (int)(short)((int)v + gp_offs + (int)(short)(*location & 0xffff) - gp_addr); } - if( (rel > 32768) || (rel < -32768) ) { - printk(KERN_DEBUG "VPE loader: apply_r_mips_gprel16: " - "relative address 0x%x out of range of gp register\n", - rel); + if ((rel > 32768) || (rel < -32768)) { + pr_debug("VPE loader: apply_r_mips_gprel16: relative address 0x%x out of range of gp register\n", + rel); return -ENOEXEC; } @@ -380,12 +261,12 @@ static int apply_r_mips_pc16(struct module *me, uint32_t *location, { int rel; rel = (((unsigned int)v - (unsigned int)location)); - rel >>= 2; // because the offset is in _instructions_ not bytes. - rel -= 1; // and one instruction less due to the branch delay slot. + rel >>= 2; /* because the offset is in _instructions_ not bytes. */ + rel -= 1; /* and one instruction less due to the branch delay slot. */ - if( (rel > 32768) || (rel < -32768) ) { - printk(KERN_DEBUG "VPE loader: " - "apply_r_mips_pc16: relative address out of range 0x%x\n", rel); + if ((rel > 32768) || (rel < -32768)) { + pr_debug("VPE loader: apply_r_mips_pc16: relative address out of range 0x%x\n", + rel); return -ENOEXEC; } @@ -406,8 +287,7 @@ static int apply_r_mips_26(struct module *me, uint32_t *location, Elf32_Addr v) { if (v % 4) { - printk(KERN_DEBUG "VPE loader: apply_r_mips_26 " - " unaligned relocation\n"); + pr_debug("VPE loader: apply_r_mips_26: unaligned relocation\n"); return -ENOEXEC; } @@ -438,7 +318,7 @@ static int apply_r_mips_hi16(struct module *me, uint32_t *location, * the carry we need to add. Save the information, and let LO16 do the * actual relocation. */ - n = kmalloc(sizeof *n, GFP_KERNEL); + n = kmalloc(sizeof(*n), GFP_KERNEL); if (!n) return -ENOMEM; @@ -470,9 +350,7 @@ static int apply_r_mips_lo16(struct module *me, uint32_t *location, * The value for the HI16 had best be the same. */ if (v != l->value) { - printk(KERN_DEBUG "VPE loader: " - "apply_r_mips_lo16/hi16: \t" - "inconsistent value information\n"); + pr_debug("VPE loader: apply_r_mips_lo16/hi16: inconsistent value information\n"); goto out_free; } @@ -568,20 +446,19 @@ static int apply_relocations(Elf32_Shdr *sechdrs, + ELF32_R_SYM(r_info); if (!sym->st_value) { - printk(KERN_DEBUG "%s: undefined weak symbol %s\n", - me->name, strtab + sym->st_name); + pr_debug("%s: undefined weak symbol %s\n", + me->name, strtab + sym->st_name); /* just print the warning, dont barf */ } v = sym->st_value; res = reloc_handlers[ELF32_R_TYPE(r_info)](me, location, v); - if( res ) { + if (res) { char *r = rstrs[ELF32_R_TYPE(r_info)]; - printk(KERN_WARNING "VPE loader: .text+0x%x " - "relocation type %s for symbol \"%s\" failed\n", - rel[i].r_offset, r ? r : "UNKNOWN", - strtab + sym->st_name); + pr_warn("VPE loader: .text+0x%x relocation type %s for symbol \"%s\" failed\n", + rel[i].r_offset, r ? r : "UNKNOWN", + strtab + sym->st_name); return res; } } @@ -596,10 +473,8 @@ static inline void save_gp_address(unsigned int secbase, unsigned int rel) } /* end module-elf32.c */ - - /* Change all symbols so that sh_value encodes the pointer directly. */ -static void simplify_symbols(Elf_Shdr * sechdrs, +static void simplify_symbols(Elf_Shdr *sechdrs, unsigned int symindex, const char *strtab, const char *secstrings, @@ -640,18 +515,16 @@ static void simplify_symbols(Elf_Shdr * sechdrs, break; case SHN_MIPS_SCOMMON: - printk(KERN_DEBUG "simplify_symbols: ignoring SHN_MIPS_SCOMMON " - "symbol <%s> st_shndx %d\n", strtab + sym[i].st_name, - sym[i].st_shndx); - // .sbss section + pr_debug("simplify_symbols: ignoring SHN_MIPS_SCOMMON symbol <%s> st_shndx %d\n", + strtab + sym[i].st_name, sym[i].st_shndx); + /* .sbss section */ break; default: secbase = sechdrs[sym[i].st_shndx].sh_addr; - if (strncmp(strtab + sym[i].st_name, "_gp", 3) == 0) { + if (strncmp(strtab + sym[i].st_name, "_gp", 3) == 0) save_gp_address(secbase, sym[i].st_value); - } sym[i].st_value += secbase; break; @@ -660,142 +533,21 @@ static void simplify_symbols(Elf_Shdr * sechdrs, } #ifdef DEBUG_ELFLOADER -static void dump_elfsymbols(Elf_Shdr * sechdrs, unsigned int symindex, +static void dump_elfsymbols(Elf_Shdr *sechdrs, unsigned int symindex, const char *strtab, struct module *mod) { Elf_Sym *sym = (void *)sechdrs[symindex].sh_addr; unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym); - printk(KERN_DEBUG "dump_elfsymbols: n %d\n", n); + pr_debug("dump_elfsymbols: n %d\n", n); for (i = 1; i < n; i++) { - printk(KERN_DEBUG " i %d name <%s> 0x%x\n", i, - strtab + sym[i].st_name, sym[i].st_value); + pr_debug(" i %d name <%s> 0x%x\n", i, strtab + sym[i].st_name, + sym[i].st_value); } } #endif -/* We are prepared so configure and start the VPE... */ -static int vpe_run(struct vpe * v) -{ - unsigned long flags, val, dmt_flag; - struct vpe_notifications *n; - unsigned int vpeflags; - struct tc *t; - - /* check we are the Master VPE */ - local_irq_save(flags); - val = read_c0_vpeconf0(); - if (!(val & VPECONF0_MVP)) { - printk(KERN_WARNING - "VPE loader: only Master VPE's are allowed to configure MT\n"); - local_irq_restore(flags); - - return -1; - } - - dmt_flag = dmt(); - vpeflags = dvpe(); - - if (list_empty(&v->tc)) { - evpe(vpeflags); - emt(dmt_flag); - local_irq_restore(flags); - - printk(KERN_WARNING - "VPE loader: No TC's associated with VPE %d\n", - v->minor); - - return -ENOEXEC; - } - - t = list_first_entry(&v->tc, struct tc, tc); - - /* Put MVPE's into 'configuration state' */ - set_c0_mvpcontrol(MVPCONTROL_VPC); - - settc(t->index); - - /* should check it is halted, and not activated */ - if ((read_tc_c0_tcstatus() & TCSTATUS_A) || !(read_tc_c0_tchalt() & TCHALT_H)) { - evpe(vpeflags); - emt(dmt_flag); - local_irq_restore(flags); - - printk(KERN_WARNING "VPE loader: TC %d is already active!\n", - t->index); - - return -ENOEXEC; - } - - /* Write the address we want it to start running from in the TCPC register. */ - write_tc_c0_tcrestart((unsigned long)v->__start); - write_tc_c0_tccontext((unsigned long)0); - - /* - * Mark the TC as activated, not interrupt exempt and not dynamically - * allocatable - */ - val = read_tc_c0_tcstatus(); - val = (val & ~(TCSTATUS_DA | TCSTATUS_IXMT)) | TCSTATUS_A; - write_tc_c0_tcstatus(val); - - write_tc_c0_tchalt(read_tc_c0_tchalt() & ~TCHALT_H); - - /* - * The sde-kit passes 'memsize' to __start in $a3, so set something - * here... Or set $a3 to zero and define DFLT_STACK_SIZE and - * DFLT_HEAP_SIZE when you compile your program - */ - mttgpr(6, v->ntcs); - mttgpr(7, physical_memsize); - - /* set up VPE1 */ - /* - * bind the TC to VPE 1 as late as possible so we only have the final - * VPE registers to set up, and so an EJTAG probe can trigger on it - */ - write_tc_c0_tcbind((read_tc_c0_tcbind() & ~TCBIND_CURVPE) | 1); - - write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~(VPECONF0_VPA)); - - back_to_back_c0_hazard(); - - /* Set up the XTC bit in vpeconf0 to point at our tc */ - write_vpe_c0_vpeconf0( (read_vpe_c0_vpeconf0() & ~(VPECONF0_XTC)) - | (t->index << VPECONF0_XTC_SHIFT)); - - back_to_back_c0_hazard(); - - /* enable this VPE */ - write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA); - - /* clear out any left overs from a previous program */ - write_vpe_c0_status(0); - write_vpe_c0_cause(0); - - /* take system out of configuration state */ - clear_c0_mvpcontrol(MVPCONTROL_VPC); - - /* - * SMTC/SMVP kernels manage VPE enable independently, - * but uniprocessor kernels need to turn it on, even - * if that wasn't the pre-dvpe() state. - */ -#ifdef CONFIG_SMP - evpe(vpeflags); -#else - evpe(EVPE_ENABLE); -#endif - emt(dmt_flag); - local_irq_restore(flags); - - list_for_each_entry(n, &v->notify, list) - n->start(minor); - - return 0; -} - -static int find_vpe_symbols(struct vpe * v, Elf_Shdr * sechdrs, +static int find_vpe_symbols(struct vpe *v, Elf_Shdr *sechdrs, unsigned int symindex, const char *strtab, struct module *mod) { @@ -803,16 +555,14 @@ static int find_vpe_symbols(struct vpe * v, Elf_Shdr * sechdrs, unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym); for (i = 1; i < n; i++) { - if (strcmp(strtab + sym[i].st_name, "__start") == 0) { + if (strcmp(strtab + sym[i].st_name, "__start") == 0) v->__start = sym[i].st_value; - } - if (strcmp(strtab + sym[i].st_name, "vpe_shared") == 0) { + if (strcmp(strtab + sym[i].st_name, "vpe_shared") == 0) v->shared_ptr = (void *)sym[i].st_value; - } } - if ( (v->__start == 0) || (v->shared_ptr == NULL)) + if ((v->__start == 0) || (v->shared_ptr == NULL)) return -1; return 0; @@ -823,14 +573,14 @@ static int find_vpe_symbols(struct vpe * v, Elf_Shdr * sechdrs, * contents of the program (p)buffer performing relocatations/etc, free's it * when finished. */ -static int vpe_elfload(struct vpe * v) +static int vpe_elfload(struct vpe *v) { Elf_Ehdr *hdr; Elf_Shdr *sechdrs; long err = 0; char *secstrings, *strtab = NULL; unsigned int len, i, symindex = 0, strindex = 0, relocate = 0; - struct module mod; // so we can re-use the relocations code + struct module mod; /* so we can re-use the relocations code */ memset(&mod, 0, sizeof(struct module)); strcpy(mod.name, "VPE loader"); @@ -844,8 +594,7 @@ static int vpe_elfload(struct vpe * v) || (hdr->e_type != ET_REL && hdr->e_type != ET_EXEC) || !elf_check_arch(hdr) || hdr->e_shentsize != sizeof(*sechdrs)) { - printk(KERN_WARNING - "VPE loader: program wrong arch or weird elf version\n"); + pr_warn("VPE loader: program wrong arch or weird elf version\n"); return -ENOEXEC; } @@ -854,8 +603,7 @@ static int vpe_elfload(struct vpe * v) relocate = 1; if (len < hdr->e_shoff + hdr->e_shnum * sizeof(Elf_Shdr)) { - printk(KERN_ERR "VPE loader: program length %u truncated\n", - len); + pr_err("VPE loader: program length %u truncated\n", len); return -ENOEXEC; } @@ -870,22 +618,24 @@ static int vpe_elfload(struct vpe * v) if (relocate) { for (i = 1; i < hdr->e_shnum; i++) { - if (sechdrs[i].sh_type != SHT_NOBITS - && len < sechdrs[i].sh_offset + sechdrs[i].sh_size) { - printk(KERN_ERR "VPE program length %u truncated\n", + if ((sechdrs[i].sh_type != SHT_NOBITS) && + (len < sechdrs[i].sh_offset + sechdrs[i].sh_size)) { + pr_err("VPE program length %u truncated\n", len); return -ENOEXEC; } /* Mark all sections sh_addr with their address in the temporary image. */ - sechdrs[i].sh_addr = (size_t) hdr + sechdrs[i].sh_offset; + sechdrs[i].sh_addr = (size_t) hdr + + sechdrs[i].sh_offset; /* Internal symbols and strings. */ if (sechdrs[i].sh_type == SHT_SYMTAB) { symindex = i; strindex = sechdrs[i].sh_link; - strtab = (char *)hdr + sechdrs[strindex].sh_offset; + strtab = (char *)hdr + + sechdrs[strindex].sh_offset; } } layout_sections(&mod, hdr, sechdrs, secstrings); @@ -912,8 +662,9 @@ static int vpe_elfload(struct vpe * v) /* Update sh_addr to point to copy in image. */ sechdrs[i].sh_addr = (unsigned long)dest; - printk(KERN_DEBUG " section sh_name %s sh_addr 0x%x\n", - secstrings + sechdrs[i].sh_name, sechdrs[i].sh_addr); + pr_debug(" section sh_name %s sh_addr 0x%x\n", + secstrings + sechdrs[i].sh_name, + sechdrs[i].sh_addr); } /* Fix up syms, so that st_value is a pointer to location. */ @@ -934,17 +685,18 @@ static int vpe_elfload(struct vpe * v) continue; if (sechdrs[i].sh_type == SHT_REL) - err = apply_relocations(sechdrs, strtab, symindex, i, - &mod); + err = apply_relocations(sechdrs, strtab, + symindex, i, &mod); else if (sechdrs[i].sh_type == SHT_RELA) - err = apply_relocate_add(sechdrs, strtab, symindex, i, - &mod); + err = apply_relocate_add(sechdrs, strtab, + symindex, i, &mod); if (err < 0) return err; } } else { - struct elf_phdr *phdr = (struct elf_phdr *) ((char *)hdr + hdr->e_phoff); + struct elf_phdr *phdr = (struct elf_phdr *) + ((char *)hdr + hdr->e_phoff); for (i = 0; i < hdr->e_phnum; i++) { if (phdr->p_type == PT_LOAD) { @@ -962,11 +714,15 @@ static int vpe_elfload(struct vpe * v) if (sechdrs[i].sh_type == SHT_SYMTAB) { symindex = i; strindex = sechdrs[i].sh_link; - strtab = (char *)hdr + sechdrs[strindex].sh_offset; + strtab = (char *)hdr + + sechdrs[strindex].sh_offset; - /* mark the symtab's address for when we try to find the - magic symbols */ - sechdrs[i].sh_addr = (size_t) hdr + sechdrs[i].sh_offset; + /* + * mark symtab's address for when we try + * to find the magic symbols + */ + sechdrs[i].sh_addr = (size_t) hdr + + sechdrs[i].sh_offset; } } } @@ -977,53 +733,19 @@ static int vpe_elfload(struct vpe * v) if ((find_vpe_symbols(v, sechdrs, symindex, strtab, &mod)) < 0) { if (v->__start == 0) { - printk(KERN_WARNING "VPE loader: program does not contain " - "a __start symbol\n"); + pr_warn("VPE loader: program does not contain a __start symbol\n"); return -ENOEXEC; } if (v->shared_ptr == NULL) - printk(KERN_WARNING "VPE loader: " - "program does not contain vpe_shared symbol.\n" - " Unable to use AMVP (AP/SP) facilities.\n"); + pr_warn("VPE loader: program does not contain vpe_shared symbol.\n" + " Unable to use AMVP (AP/SP) facilities.\n"); } - printk(" elf loaded\n"); + pr_info(" elf loaded\n"); return 0; } -static void cleanup_tc(struct tc *tc) -{ - unsigned long flags; - unsigned int mtflags, vpflags; - int tmp; - - local_irq_save(flags); - mtflags = dmt(); - vpflags = dvpe(); - /* Put MVPE's into 'configuration state' */ - set_c0_mvpcontrol(MVPCONTROL_VPC); - - settc(tc->index); - tmp = read_tc_c0_tcstatus(); - - /* mark not allocated and not dynamically allocatable */ - tmp &= ~(TCSTATUS_A | TCSTATUS_DA); - tmp |= TCSTATUS_IXMT; /* interrupt exempt */ - write_tc_c0_tcstatus(tmp); - - write_tc_c0_tchalt(TCHALT_H); - mips_ihb(); - - /* bind it to anything other than VPE1 */ -// write_tc_c0_tcbind(read_tc_c0_tcbind() & ~TCBIND_CURVPE); // | TCBIND_CURVPE - - clear_c0_mvpcontrol(MVPCONTROL_VPC); - evpe(vpflags); - emt(mtflags); - local_irq_restore(flags); -} - static int getcwd(char *buff, int size) { mm_segment_t old_fs; @@ -1043,39 +765,39 @@ static int getcwd(char *buff, int size) static int vpe_open(struct inode *inode, struct file *filp) { enum vpe_state state; - struct vpe_notifications *not; + struct vpe_notifications *notifier; struct vpe *v; int ret; - if (minor != iminor(inode)) { + if (VPE_MODULE_MINOR != iminor(inode)) { /* assume only 1 device at the moment. */ - pr_warning("VPE loader: only vpe1 is supported\n"); + pr_warn("VPE loader: only vpe1 is supported\n"); return -ENODEV; } - if ((v = get_vpe(tclimit)) == NULL) { - pr_warning("VPE loader: unable to get vpe\n"); + v = get_vpe(aprp_cpu_index()); + if (v == NULL) { + pr_warn("VPE loader: unable to get vpe\n"); return -ENODEV; } state = xchg(&v->state, VPE_STATE_INUSE); if (state != VPE_STATE_UNUSED) { - printk(KERN_DEBUG "VPE loader: tc in use dumping regs\n"); + pr_debug("VPE loader: tc in use dumping regs\n"); - list_for_each_entry(not, &v->notify, list) { - not->stop(tclimit); - } + list_for_each_entry(notifier, &v->notify, list) + notifier->stop(aprp_cpu_index()); release_progmem(v->load_addr); - cleanup_tc(get_tc(tclimit)); + cleanup_tc(get_tc(aprp_cpu_index())); } /* this of-course trashes what was there before... */ v->pbuffer = vmalloc(P_SIZE); if (!v->pbuffer) { - pr_warning("VPE loader: unable to allocate memory\n"); + pr_warn("VPE loader: unable to allocate memory\n"); return -ENOMEM; } v->plen = P_SIZE; @@ -1085,7 +807,7 @@ static int vpe_open(struct inode *inode, struct file *filp) v->cwd[0] = 0; ret = getcwd(v->cwd, VPE_PATH_MAX); if (ret < 0) - printk(KERN_WARNING "VPE loader: open, getcwd returned %d\n", ret); + pr_warn("VPE loader: open, getcwd returned %d\n", ret); v->shared_ptr = NULL; v->__start = 0; @@ -1099,20 +821,20 @@ static int vpe_release(struct inode *inode, struct file *filp) Elf_Ehdr *hdr; int ret = 0; - v = get_vpe(tclimit); + v = get_vpe(aprp_cpu_index()); if (v == NULL) return -ENODEV; hdr = (Elf_Ehdr *) v->pbuffer; if (memcmp(hdr->e_ident, ELFMAG, SELFMAG) == 0) { - if (vpe_elfload(v) >= 0) { + if ((vpe_elfload(v) >= 0) && vpe_run) { vpe_run(v); } else { - printk(KERN_WARNING "VPE loader: ELF load failed.\n"); + pr_warn("VPE loader: ELF load failed.\n"); ret = -ENOEXEC; } } else { - printk(KERN_WARNING "VPE loader: only elf files are supported\n"); + pr_warn("VPE loader: only elf files are supported\n"); ret = -ENOEXEC; } @@ -1130,22 +852,22 @@ static int vpe_release(struct inode *inode, struct file *filp) return ret; } -static ssize_t vpe_write(struct file *file, const char __user * buffer, - size_t count, loff_t * ppos) +static ssize_t vpe_write(struct file *file, const char __user *buffer, + size_t count, loff_t *ppos) { size_t ret = count; struct vpe *v; - if (iminor(file_inode(file)) != minor) + if (iminor(file_inode(file)) != VPE_MODULE_MINOR) return -ENODEV; - v = get_vpe(tclimit); + v = get_vpe(aprp_cpu_index()); + if (v == NULL) return -ENODEV; if ((count + v->len) > v->plen) { - printk(KERN_WARNING - "VPE loader: elf size too big. Perhaps strip uneeded symbols\n"); + pr_warn("VPE loader: elf size too big. Perhaps strip uneeded symbols\n"); return -ENOMEM; } @@ -1157,7 +879,7 @@ static ssize_t vpe_write(struct file *file, const char __user * buffer, return ret; } -static const struct file_operations vpe_fops = { +const struct file_operations vpe_fops = { .owner = THIS_MODULE, .open = vpe_open, .release = vpe_release, @@ -1165,396 +887,40 @@ static const struct file_operations vpe_fops = { .llseek = noop_llseek, }; -/* module wrapper entry points */ -/* give me a vpe */ -vpe_handle vpe_alloc(void) -{ - int i; - struct vpe *v; - - /* find a vpe */ - for (i = 1; i < MAX_VPES; i++) { - if ((v = get_vpe(i)) != NULL) { - v->state = VPE_STATE_INUSE; - return v; - } - } - return NULL; -} - -EXPORT_SYMBOL(vpe_alloc); - -/* start running from here */ -int vpe_start(vpe_handle vpe, unsigned long start) -{ - struct vpe *v = vpe; - - v->__start = start; - return vpe_run(v); -} - -EXPORT_SYMBOL(vpe_start); - -/* halt it for now */ -int vpe_stop(vpe_handle vpe) -{ - struct vpe *v = vpe; - struct tc *t; - unsigned int evpe_flags; - - evpe_flags = dvpe(); - - if ((t = list_entry(v->tc.next, struct tc, tc)) != NULL) { - - settc(t->index); - write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~VPECONF0_VPA); - } - - evpe(evpe_flags); - - return 0; -} - -EXPORT_SYMBOL(vpe_stop); - -/* I've done with it thank you */ -int vpe_free(vpe_handle vpe) -{ - struct vpe *v = vpe; - struct tc *t; - unsigned int evpe_flags; - - if ((t = list_entry(v->tc.next, struct tc, tc)) == NULL) { - return -ENOEXEC; - } - - evpe_flags = dvpe(); - - /* Put MVPE's into 'configuration state' */ - set_c0_mvpcontrol(MVPCONTROL_VPC); - - settc(t->index); - write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~VPECONF0_VPA); - - /* halt the TC */ - write_tc_c0_tchalt(TCHALT_H); - mips_ihb(); - - /* mark the TC unallocated */ - write_tc_c0_tcstatus(read_tc_c0_tcstatus() & ~TCSTATUS_A); - - v->state = VPE_STATE_UNUSED; - - clear_c0_mvpcontrol(MVPCONTROL_VPC); - evpe(evpe_flags); - - return 0; -} - -EXPORT_SYMBOL(vpe_free); - void *vpe_get_shared(int index) { - struct vpe *v; + struct vpe *v = get_vpe(index); - if ((v = get_vpe(index)) == NULL) + if (v == NULL) return NULL; return v->shared_ptr; } - EXPORT_SYMBOL(vpe_get_shared); int vpe_notify(int index, struct vpe_notifications *notify) { - struct vpe *v; + struct vpe *v = get_vpe(index); - if ((v = get_vpe(index)) == NULL) + if (v == NULL) return -1; list_add(¬ify->list, &v->notify); return 0; } - EXPORT_SYMBOL(vpe_notify); char *vpe_getcwd(int index) { - struct vpe *v; + struct vpe *v = get_vpe(index); - if ((v = get_vpe(index)) == NULL) + if (v == NULL) return NULL; return v->cwd; } - EXPORT_SYMBOL(vpe_getcwd); -static ssize_t store_kill(struct device *dev, struct device_attribute *attr, - const char *buf, size_t len) -{ - struct vpe *vpe = get_vpe(tclimit); - struct vpe_notifications *not; - - list_for_each_entry(not, &vpe->notify, list) { - not->stop(tclimit); - } - - release_progmem(vpe->load_addr); - cleanup_tc(get_tc(tclimit)); - vpe_stop(vpe); - vpe_free(vpe); - - return len; -} -static DEVICE_ATTR(kill, S_IWUSR, NULL, store_kill); - -static ssize_t ntcs_show(struct device *cd, struct device_attribute *attr, - char *buf) -{ - struct vpe *vpe = get_vpe(tclimit); - - return sprintf(buf, "%d\n", vpe->ntcs); -} - -static ssize_t ntcs_store(struct device *dev, struct device_attribute *attr, - const char *buf, size_t len) -{ - struct vpe *vpe = get_vpe(tclimit); - unsigned long new; - char *endp; - - new = simple_strtoul(buf, &endp, 0); - if (endp == buf) - goto out_einval; - - if (new == 0 || new > (hw_tcs - tclimit)) - goto out_einval; - - vpe->ntcs = new; - - return len; - -out_einval: - return -EINVAL; -} -static DEVICE_ATTR_RW(ntcs); - -static struct attribute *vpe_attrs[] = { - &dev_attr_kill.attr, - &dev_attr_ntcs.attr, - NULL, -}; -ATTRIBUTE_GROUPS(vpe); - -static void vpe_device_release(struct device *cd) -{ - kfree(cd); -} - -struct class vpe_class = { - .name = "vpe", - .owner = THIS_MODULE, - .dev_release = vpe_device_release, - .dev_groups = vpe_groups, -}; - -struct device vpe_device; - -static int __init vpe_module_init(void) -{ - unsigned int mtflags, vpflags; - unsigned long flags, val; - struct vpe *v = NULL; - struct tc *t; - int tc, err; - - if (!cpu_has_mipsmt) { - printk("VPE loader: not a MIPS MT capable processor\n"); - return -ENODEV; - } - - if (vpelimit == 0) { - printk(KERN_WARNING "No VPEs reserved for AP/SP, not " - "initializing VPE loader.\nPass maxvpes=<n> argument as " - "kernel argument\n"); - - return -ENODEV; - } - - if (tclimit == 0) { - printk(KERN_WARNING "No TCs reserved for AP/SP, not " - "initializing VPE loader.\nPass maxtcs=<n> argument as " - "kernel argument\n"); - - return -ENODEV; - } - - major = register_chrdev(0, module_name, &vpe_fops); - if (major < 0) { - printk("VPE loader: unable to register character device\n"); - return major; - } - - err = class_register(&vpe_class); - if (err) { - printk(KERN_ERR "vpe_class registration failed\n"); - goto out_chrdev; - } - - device_initialize(&vpe_device); - vpe_device.class = &vpe_class, - vpe_device.parent = NULL, - dev_set_name(&vpe_device, "vpe1"); - vpe_device.devt = MKDEV(major, minor); - err = device_add(&vpe_device); - if (err) { - printk(KERN_ERR "Adding vpe_device failed\n"); - goto out_class; - } - - local_irq_save(flags); - mtflags = dmt(); - vpflags = dvpe(); - - /* Put MVPE's into 'configuration state' */ - set_c0_mvpcontrol(MVPCONTROL_VPC); - - /* dump_mtregs(); */ - - val = read_c0_mvpconf0(); - hw_tcs = (val & MVPCONF0_PTC) + 1; - hw_vpes = ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1; - - for (tc = tclimit; tc < hw_tcs; tc++) { - /* - * Must re-enable multithreading temporarily or in case we - * reschedule send IPIs or similar we might hang. - */ - clear_c0_mvpcontrol(MVPCONTROL_VPC); - evpe(vpflags); - emt(mtflags); - local_irq_restore(flags); - t = alloc_tc(tc); - if (!t) { - err = -ENOMEM; - goto out; - } - - local_irq_save(flags); - mtflags = dmt(); - vpflags = dvpe(); - set_c0_mvpcontrol(MVPCONTROL_VPC); - - /* VPE's */ - if (tc < hw_tcs) { - settc(tc); - - if ((v = alloc_vpe(tc)) == NULL) { - printk(KERN_WARNING "VPE: unable to allocate VPE\n"); - - goto out_reenable; - } - - v->ntcs = hw_tcs - tclimit; - - /* add the tc to the list of this vpe's tc's. */ - list_add(&t->tc, &v->tc); - - /* deactivate all but vpe0 */ - if (tc >= tclimit) { - unsigned long tmp = read_vpe_c0_vpeconf0(); - - tmp &= ~VPECONF0_VPA; - - /* master VPE */ - tmp |= VPECONF0_MVP; - write_vpe_c0_vpeconf0(tmp); - } - - /* disable multi-threading with TC's */ - write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() & ~VPECONTROL_TE); - - if (tc >= vpelimit) { - /* - * Set config to be the same as vpe0, - * particularly kseg0 coherency alg - */ - write_vpe_c0_config(read_c0_config()); - } - } - - /* TC's */ - t->pvpe = v; /* set the parent vpe */ - - if (tc >= tclimit) { - unsigned long tmp; - - settc(tc); - - /* Any TC that is bound to VPE0 gets left as is - in case - we are running SMTC on VPE0. A TC that is bound to any - other VPE gets bound to VPE0, ideally I'd like to make - it homeless but it doesn't appear to let me bind a TC - to a non-existent VPE. Which is perfectly reasonable. - - The (un)bound state is visible to an EJTAG probe so may - notify GDB... - */ - - if (((tmp = read_tc_c0_tcbind()) & TCBIND_CURVPE)) { - /* tc is bound >vpe0 */ - write_tc_c0_tcbind(tmp & ~TCBIND_CURVPE); - - t->pvpe = get_vpe(0); /* set the parent vpe */ - } - - /* halt the TC */ - write_tc_c0_tchalt(TCHALT_H); - mips_ihb(); - - tmp = read_tc_c0_tcstatus(); - - /* mark not activated and not dynamically allocatable */ - tmp &= ~(TCSTATUS_A | TCSTATUS_DA); - tmp |= TCSTATUS_IXMT; /* interrupt exempt */ - write_tc_c0_tcstatus(tmp); - } - } - -out_reenable: - /* release config state */ - clear_c0_mvpcontrol(MVPCONTROL_VPC); - - evpe(vpflags); - emt(mtflags); - local_irq_restore(flags); - - return 0; - -out_class: - class_unregister(&vpe_class); -out_chrdev: - unregister_chrdev(major, module_name); - -out: - return err; -} - -static void __exit vpe_module_exit(void) -{ - struct vpe *v, *n; - - device_del(&vpe_device); - unregister_chrdev(major, module_name); - - /* No locking needed here */ - list_for_each_entry_safe(v, n, &vpecontrol.vpe_list, list) { - if (v->state != VPE_STATE_UNUSED) - release_vpe(v); - } -} - module_init(vpe_module_init); module_exit(vpe_module_exit); MODULE_DESCRIPTION("MIPS VPE Loader"); |