diff options
Diffstat (limited to 'arch/mips/kernel')
32 files changed, 447 insertions, 499 deletions
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile index 89b07ea8d249..d6e97df51cfb 100644 --- a/arch/mips/kernel/Makefile +++ b/arch/mips/kernel/Makefile @@ -80,7 +80,7 @@ obj-$(CONFIG_KGDB) += kgdb.o obj-$(CONFIG_PROC_FS) += proc.o obj-$(CONFIG_MAGIC_SYSRQ) += sysrq.o -obj-$(CONFIG_64BIT) += cpu-bugs64.o +obj-$(CONFIG_CPU_R4X00_BUGS64) += r4k-bugs64.o obj-$(CONFIG_I8253) += i8253.o diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c index 7a12763d553a..6ee3f7218c67 100644 --- a/arch/mips/kernel/binfmt_elfn32.c +++ b/arch/mips/kernel/binfmt_elfn32.c @@ -100,7 +100,7 @@ jiffies_to_old_timeval32(unsigned long jiffies, struct old_timeval32 *value) #undef TASK_SIZE #define TASK_SIZE TASK_SIZE32 -#undef ns_to_timeval -#define ns_to_timeval ns_to_old_timeval32 +#undef ns_to_kernel_old_timeval +#define ns_to_kernel_old_timeval ns_to_old_timeval32 #include "../../../fs/binfmt_elf.c" diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c index e6db06a1d31a..6dd103d3cebb 100644 --- a/arch/mips/kernel/binfmt_elfo32.c +++ b/arch/mips/kernel/binfmt_elfo32.c @@ -103,7 +103,7 @@ jiffies_to_old_timeval32(unsigned long jiffies, struct old_timeval32 *value) #undef TASK_SIZE #define TASK_SIZE TASK_SIZE32 -#undef ns_to_timeval -#define ns_to_timeval ns_to_old_timeval32 +#undef ns_to_kernel_old_timeval +#define ns_to_kernel_old_timeval ns_to_old_timeval32 #include "../../../fs/binfmt_elf.c" diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c index 1db29957a931..2c38f75d87ff 100644 --- a/arch/mips/kernel/branch.c +++ b/arch/mips/kernel/branch.c @@ -58,6 +58,7 @@ int __mm_isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn, unsigned long *contpc) { union mips_instruction insn = (union mips_instruction)dec_insn.insn; + int __maybe_unused bc_false = 0; if (!cpu_has_mmips) return 0; @@ -139,7 +140,6 @@ int __mm_isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn, #ifdef CONFIG_MIPS_FP_SUPPORT case mm_bc2f_op: case mm_bc1f_op: { - int bc_false = 0; unsigned int fcr31; unsigned int bit; diff --git a/arch/mips/kernel/cacheinfo.c b/arch/mips/kernel/cacheinfo.c index f777e44653d5..47312c529410 100644 --- a/arch/mips/kernel/cacheinfo.c +++ b/arch/mips/kernel/cacheinfo.c @@ -50,6 +50,25 @@ static int __init_cache_level(unsigned int cpu) return 0; } +static void fill_cpumask_siblings(int cpu, cpumask_t *cpu_map) +{ + int cpu1; + + for_each_possible_cpu(cpu1) + if (cpus_are_siblings(cpu, cpu1)) + cpumask_set_cpu(cpu1, cpu_map); +} + +static void fill_cpumask_cluster(int cpu, cpumask_t *cpu_map) +{ + int cpu1; + int cluster = cpu_cluster(&cpu_data[cpu]); + + for_each_possible_cpu(cpu1) + if (cpu_cluster(&cpu_data[cpu1]) == cluster) + cpumask_set_cpu(cpu1, cpu_map); +} + static int __populate_cache_leaves(unsigned int cpu) { struct cpuinfo_mips *c = ¤t_cpu_data; @@ -57,14 +76,20 @@ static int __populate_cache_leaves(unsigned int cpu) struct cacheinfo *this_leaf = this_cpu_ci->info_list; if (c->icache.waysize) { + /* L1 caches are per core */ + fill_cpumask_siblings(cpu, &this_leaf->shared_cpu_map); populate_cache(dcache, this_leaf, 1, CACHE_TYPE_DATA); + fill_cpumask_siblings(cpu, &this_leaf->shared_cpu_map); populate_cache(icache, this_leaf, 1, CACHE_TYPE_INST); } else { populate_cache(dcache, this_leaf, 1, CACHE_TYPE_UNIFIED); } - if (c->scache.waysize) + if (c->scache.waysize) { + /* L2 cache is per cluster */ + fill_cpumask_cluster(cpu, &this_leaf->shared_cpu_map); populate_cache(scache, this_leaf, 2, CACHE_TYPE_UNIFIED); + } if (c->tcache.waysize) populate_cache(tcache, this_leaf, 3, CACHE_TYPE_UNIFIED); diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c index 9635c1db3ae6..6ab6b03d35ba 100644 --- a/arch/mips/kernel/cpu-probe.c +++ b/arch/mips/kernel/cpu-probe.c @@ -102,7 +102,12 @@ static void cpu_set_fpu_2008(struct cpuinfo_mips *c) if (fir & MIPS_FPIR_HAS2008) { fcsr = read_32bit_cp1_register(CP1_STATUS); - fcsr0 = fcsr & ~(FPU_CSR_ABS2008 | FPU_CSR_NAN2008); + /* + * MAC2008 toolchain never landed in real world, so we're only + * testing wether it can be disabled and don't try to enabled + * it. + */ + fcsr0 = fcsr & ~(FPU_CSR_ABS2008 | FPU_CSR_NAN2008 | FPU_CSR_MAC2008); write_32bit_cp1_register(CP1_STATUS, fcsr0); fcsr0 = read_32bit_cp1_register(CP1_STATUS); @@ -112,6 +117,15 @@ static void cpu_set_fpu_2008(struct cpuinfo_mips *c) write_32bit_cp1_register(CP1_STATUS, fcsr); + if (c->isa_level & (MIPS_CPU_ISA_M32R2 | MIPS_CPU_ISA_M64R2)) { + /* + * The bit for MAC2008 might be reused by R6 in future, + * so we only test for R2-R5. + */ + if (fcsr0 & FPU_CSR_MAC2008) + c->options |= MIPS_CPU_MAC_2008_ONLY; + } + if (!(fcsr0 & FPU_CSR_NAN2008)) c->options |= MIPS_CPU_NAN_LEGACY; if (fcsr1 & FPU_CSR_NAN2008) @@ -608,7 +622,7 @@ static int set_ftlb_enable(struct cpuinfo_mips *c, enum ftlb_flags flags) if (!(flags & FTLB_EN)) return 1; return 0; - case CPU_LOONGSON3: + case CPU_LOONGSON64: /* Flush ITLB, DTLB, VTLB and FTLB */ write_c0_diag(LOONGSON_DIAG_ITLB | LOONGSON_DIAG_DTLB | LOONGSON_DIAG_VTLB | LOONGSON_DIAG_FTLB); @@ -1384,15 +1398,6 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu) break; } break; - case PRID_IMP_R4300: - c->cputype = CPU_R4300; - __cpu_name[cpu] = "R4300"; - set_isa(c, MIPS_CPU_ISA_III); - c->fpu_msk31 |= FPU_CSR_CONDX; - c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR | - MIPS_CPU_LLSC; - c->tlbsize = 32; - break; case PRID_IMP_R4600: c->cputype = CPU_R4600; __cpu_name[cpu] = "R4600"; @@ -1468,14 +1473,6 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu) MIPS_CPU_LLSC; c->tlbsize = 48; break; - case PRID_IMP_R5432: - c->cputype = CPU_R5432; - __cpu_name[cpu] = "R5432"; - set_isa(c, MIPS_CPU_ISA_IV); - c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR | - MIPS_CPU_WATCH | MIPS_CPU_LLSC; - c->tlbsize = 48; - break; case PRID_IMP_R5500: c->cputype = CPU_R5500; __cpu_name[cpu] = "R5500"; @@ -1508,15 +1505,6 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu) */ c->tlbsize = (read_c0_info() & (1 << 29)) ? 64 : 48; break; - case PRID_IMP_R8000: - c->cputype = CPU_R8000; - __cpu_name[cpu] = "RM8000"; - set_isa(c, MIPS_CPU_ISA_IV); - c->options = MIPS_CPU_TLB | MIPS_CPU_4KEX | - MIPS_CPU_FPU | MIPS_CPU_32FPR | - MIPS_CPU_LLSC; - c->tlbsize = 384; /* has weird TLB: 3-way x 128 */ - break; case PRID_IMP_R10000: c->cputype = CPU_R10000; __cpu_name[cpu] = "R10000"; @@ -1552,34 +1540,38 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu) MIPS_CPU_LLSC | MIPS_CPU_BP_GHIST; c->tlbsize = 64; break; - case PRID_IMP_LOONGSON_64: /* Loongson-2/3 */ + case PRID_IMP_LOONGSON_64C: /* Loongson-2/3 */ switch (c->processor_id & PRID_REV_MASK) { case PRID_REV_LOONGSON2E: - c->cputype = CPU_LOONGSON2; + c->cputype = CPU_LOONGSON2EF; __cpu_name[cpu] = "ICT Loongson-2"; set_elf_platform(cpu, "loongson2e"); set_isa(c, MIPS_CPU_ISA_III); c->fpu_msk31 |= FPU_CSR_CONDX; break; case PRID_REV_LOONGSON2F: - c->cputype = CPU_LOONGSON2; + c->cputype = CPU_LOONGSON2EF; __cpu_name[cpu] = "ICT Loongson-2"; set_elf_platform(cpu, "loongson2f"); set_isa(c, MIPS_CPU_ISA_III); c->fpu_msk31 |= FPU_CSR_CONDX; break; case PRID_REV_LOONGSON3A_R1: - c->cputype = CPU_LOONGSON3; + c->cputype = CPU_LOONGSON64; __cpu_name[cpu] = "ICT Loongson-3"; set_elf_platform(cpu, "loongson3a"); set_isa(c, MIPS_CPU_ISA_M64R1); + c->ases |= (MIPS_ASE_LOONGSON_MMI | MIPS_ASE_LOONGSON_CAM | + MIPS_ASE_LOONGSON_EXT); break; case PRID_REV_LOONGSON3B_R1: case PRID_REV_LOONGSON3B_R2: - c->cputype = CPU_LOONGSON3; + c->cputype = CPU_LOONGSON64; __cpu_name[cpu] = "ICT Loongson-3"; set_elf_platform(cpu, "loongson3b"); set_isa(c, MIPS_CPU_ISA_M64R1); + c->ases |= (MIPS_ASE_LOONGSON_MMI | MIPS_ASE_LOONGSON_CAM | + MIPS_ASE_LOONGSON_EXT); break; } @@ -1587,12 +1579,13 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu) MIPS_CPU_FPU | MIPS_CPU_LLSC | MIPS_CPU_32FPR; c->tlbsize = 64; + set_cpu_asid_mask(c, MIPS_ENTRYHI_ASID); c->writecombine = _CACHE_UNCACHED_ACCELERATED; break; case PRID_IMP_LOONGSON_32: /* Loongson-1 */ decode_configs(c); - c->cputype = CPU_LOONGSON1; + c->cputype = CPU_LOONGSON32; switch (c->processor_id & PRID_REV_MASK) { case PRID_REV_LOONGSON1B: @@ -1925,18 +1918,18 @@ platform: static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu) { switch (c->processor_id & PRID_IMP_MASK) { - case PRID_IMP_LOONGSON_64: /* Loongson-2/3 */ + case PRID_IMP_LOONGSON_64C: /* Loongson-2/3 */ switch (c->processor_id & PRID_REV_MASK) { case PRID_REV_LOONGSON3A_R2_0: case PRID_REV_LOONGSON3A_R2_1: - c->cputype = CPU_LOONGSON3; + c->cputype = CPU_LOONGSON64; __cpu_name[cpu] = "ICT Loongson-3"; set_elf_platform(cpu, "loongson3a"); set_isa(c, MIPS_CPU_ISA_M64R2); break; case PRID_REV_LOONGSON3A_R3_0: case PRID_REV_LOONGSON3A_R3_1: - c->cputype = CPU_LOONGSON3; + c->cputype = CPU_LOONGSON64; __cpu_name[cpu] = "ICT Loongson-3"; set_elf_platform(cpu, "loongson3a"); set_isa(c, MIPS_CPU_ISA_M64R2); @@ -1946,6 +1939,19 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu) decode_configs(c); c->options |= MIPS_CPU_FTLB | MIPS_CPU_TLBINV | MIPS_CPU_LDPTE; c->writecombine = _CACHE_UNCACHED_ACCELERATED; + c->ases |= (MIPS_ASE_LOONGSON_MMI | MIPS_ASE_LOONGSON_CAM | + MIPS_ASE_LOONGSON_EXT | MIPS_ASE_LOONGSON_EXT2); + break; + case PRID_IMP_LOONGSON_64G: + c->cputype = CPU_LOONGSON64; + __cpu_name[cpu] = "ICT Loongson-3"; + set_elf_platform(cpu, "loongson3a"); + set_isa(c, MIPS_CPU_ISA_M64R2); + decode_configs(c); + c->options |= MIPS_CPU_FTLB | MIPS_CPU_TLBINV | MIPS_CPU_LDPTE; + c->writecombine = _CACHE_UNCACHED_ACCELERATED; + c->ases |= (MIPS_ASE_LOONGSON_MMI | MIPS_ASE_LOONGSON_CAM | + MIPS_ASE_LOONGSON_EXT | MIPS_ASE_LOONGSON_EXT2); break; default: panic("Unknown Loongson Processor ID!"); @@ -1956,27 +1962,66 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu) static inline void cpu_probe_ingenic(struct cpuinfo_mips *c, unsigned int cpu) { decode_configs(c); - /* JZRISC does not implement the CP0 counter. */ + + /* + * XBurst misses a config2 register, so config3 decode was skipped in + * decode_configs(). + */ + decode_config3(c); + + /* XBurst does not implement the CP0 counter. */ c->options &= ~MIPS_CPU_COUNTER; BUG_ON(!__builtin_constant_p(cpu_has_counter) || cpu_has_counter); + switch (c->processor_id & PRID_IMP_MASK) { - case PRID_IMP_JZRISC: - c->cputype = CPU_JZRISC; + case PRID_IMP_XBURST_REV1: + + /* + * The XBurst core by default attempts to avoid branch target + * buffer lookups by detecting & special casing loops. This + * feature will cause BogoMIPS and lpj calculate in error. + * Set cp0 config7 bit 4 to disable this feature. + */ + set_c0_config7(MIPS_CONF7_BTB_LOOP_EN); + + switch (c->processor_id & PRID_COMP_MASK) { + + /* + * The config0 register in the XBurst CPUs with a processor ID of + * PRID_COMP_INGENIC_D0 report themselves as MIPS32r2 compatible, + * but they don't actually support this ISA. + */ + case PRID_COMP_INGENIC_D0: + c->isa_level &= ~MIPS_CPU_ISA_M32R2; + break; + + /* + * The config0 register in the XBurst CPUs with a processor ID of + * PRID_COMP_INGENIC_D1 has an abandoned huge page tlb mode, this + * mode is not compatible with the MIPS standard, it will cause + * tlbmiss and into an infinite loop (line 21 in the tlb-funcs.S) + * when starting the init process. After chip reset, the default + * is HPTLB mode, Write 0xa9000000 to cp0 register 5 sel 4 to + * switch back to VTLB mode to prevent getting stuck. + */ + case PRID_COMP_INGENIC_D1: + write_c0_page_ctrl(XBURST_PAGECTRL_HPTLB_DIS); + break; + + default: + break; + } + /* fall-through */ + case PRID_IMP_XBURST_REV2: + c->cputype = CPU_XBURST; c->writecombine = _CACHE_UNCACHED_ACCELERATED; - __cpu_name[cpu] = "Ingenic JZRISC"; + __cpu_name[cpu] = "Ingenic XBurst"; break; + default: panic("Unknown Ingenic Processor ID!"); break; } - - /* - * The config0 register in the Xburst CPUs with a processor ID of - * PRID_COMP_INGENIC_D0 report themselves as MIPS32r2 compatible, - * but they don't actually support this ISA. - */ - if ((c->processor_id & PRID_COMP_MASK) == PRID_COMP_INGENIC_D0) - c->isa_level &= ~MIPS_CPU_ISA_M32R2; } static inline void cpu_probe_netlogic(struct cpuinfo_mips *c, int cpu) @@ -2185,6 +2230,39 @@ void cpu_probe(void) elf_hwcap |= HWCAP_MIPS_MSA; } + if (cpu_has_mips16) + elf_hwcap |= HWCAP_MIPS_MIPS16; + + if (cpu_has_mdmx) + elf_hwcap |= HWCAP_MIPS_MDMX; + + if (cpu_has_mips3d) + elf_hwcap |= HWCAP_MIPS_MIPS3D; + + if (cpu_has_smartmips) + elf_hwcap |= HWCAP_MIPS_SMARTMIPS; + + if (cpu_has_dsp) + elf_hwcap |= HWCAP_MIPS_DSP; + + if (cpu_has_dsp2) + elf_hwcap |= HWCAP_MIPS_DSP2; + + if (cpu_has_dsp3) + elf_hwcap |= HWCAP_MIPS_DSP3; + + if (cpu_has_mips16e2) + elf_hwcap |= HWCAP_MIPS_MIPS16E2; + + if (cpu_has_loongson_mmi) + elf_hwcap |= HWCAP_LOONGSON_MMI; + + if (cpu_has_loongson_ext) + elf_hwcap |= HWCAP_LOONGSON_EXT; + + if (cpu_has_loongson_ext2) + elf_hwcap |= HWCAP_LOONGSON_EXT2; + if (cpu_has_vz) cpu_probe_vz(c); diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S index 5469d43b6966..4849a48afc0f 100644 --- a/arch/mips/kernel/entry.S +++ b/arch/mips/kernel/entry.S @@ -19,7 +19,7 @@ #include <asm/thread_info.h> #include <asm/war.h> -#ifndef CONFIG_PREEMPT +#ifndef CONFIG_PREEMPTION #define resume_kernel restore_all #else #define __ret_from_irq ret_from_exception @@ -27,7 +27,7 @@ .text .align 5 -#ifndef CONFIG_PREEMPT +#ifndef CONFIG_PREEMPTION FEXPORT(ret_from_exception) local_irq_disable # preempt stop b __ret_from_irq @@ -53,7 +53,7 @@ resume_userspace: bnez t0, work_pending j restore_all -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPTION resume_kernel: local_irq_disable lw t0, TI_PRE_COUNT($28) diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S index 398b905b027d..0a43c9125267 100644 --- a/arch/mips/kernel/genex.S +++ b/arch/mips/kernel/genex.S @@ -18,6 +18,7 @@ #include <asm/fpregdef.h> #include <asm/mipsregs.h> #include <asm/stackframe.h> +#include <asm/sync.h> #include <asm/war.h> #include <asm/thread_info.h> @@ -32,9 +33,6 @@ NESTED(except_vec3_generic, 0, sp) .set push .set noat -#if R5432_CP0_INTERRUPT_WAR - mfc0 k0, CP0_INDEX -#endif mfc0 k1, CP0_CAUSE andi k1, k1, 0x7c #ifdef CONFIG_64BIT @@ -356,8 +354,9 @@ NESTED(ejtag_debug_handler, PT_SIZE, sp) #ifdef CONFIG_SMP 1: PTR_LA k0, ejtag_debug_buffer_spinlock - ll k0, 0(k0) - bnez k0, 1b + __SYNC(full, loongson3_war) +2: ll k0, 0(k0) + bnez k0, 2b PTR_LA k0, ejtag_debug_buffer_spinlock sc k0, 0(k0) beqz k0, 1b @@ -660,7 +659,7 @@ isrdhwr: .set pop END(handle_ri_rdhwr) -#ifdef CONFIG_64BIT +#ifdef CONFIG_CPU_R4X00_BUGS64 /* A temporary overflow handler used by check_daddi(). */ __INIT diff --git a/arch/mips/kernel/idle.c b/arch/mips/kernel/idle.c index 7388f1374d5f..37f8e78e2869 100644 --- a/arch/mips/kernel/idle.c +++ b/arch/mips/kernel/idle.c @@ -151,7 +151,6 @@ void __init check_wait(void) cpu_wait = r39xx_wait; break; case CPU_R4200: -/* case CPU_R4300: */ case CPU_R4600: case CPU_R4640: case CPU_R4650: @@ -173,14 +172,15 @@ void __init check_wait(void) case CPU_CAVIUM_OCTEON_PLUS: case CPU_CAVIUM_OCTEON2: case CPU_CAVIUM_OCTEON3: - case CPU_JZRISC: - case CPU_LOONGSON1: + case CPU_XBURST: + case CPU_LOONGSON32: case CPU_XLR: case CPU_XLP: cpu_wait = r4k_wait; break; - case CPU_LOONGSON3: - if ((c->processor_id & PRID_REV_MASK) >= PRID_REV_LOONGSON3A_R2_0) + case CPU_LOONGSON64: + if ((c->processor_id & (PRID_IMP_MASK | PRID_REV_MASK)) >= + (PRID_IMP_LOONGSON_64C | PRID_REV_LOONGSON3A_R2_0)) cpu_wait = r4k_wait; break; diff --git a/arch/mips/kernel/mips-cm.c b/arch/mips/kernel/mips-cm.c index e5ea3db23d6b..cdb93ed91cde 100644 --- a/arch/mips/kernel/mips-cm.c +++ b/arch/mips/kernel/mips-cm.c @@ -194,7 +194,7 @@ static void mips_cm_probe_l2sync(void) write_gcr_l2_only_sync_base(addr | CM_GCR_L2_ONLY_SYNC_BASE_SYNCEN); /* Map the region */ - mips_cm_l2sync_base = ioremap_nocache(addr, MIPS_CM_L2SYNC_SIZE); + mips_cm_l2sync_base = ioremap(addr, MIPS_CM_L2SYNC_SIZE); } int mips_cm_probe(void) @@ -215,7 +215,7 @@ int mips_cm_probe(void) if (!addr) return -ENODEV; - mips_gcr_base = ioremap_nocache(addr, MIPS_CM_GCR_SIZE); + mips_gcr_base = ioremap(addr, MIPS_CM_GCR_SIZE); if (!mips_gcr_base) return -ENXIO; diff --git a/arch/mips/kernel/mips-cpc.c b/arch/mips/kernel/mips-cpc.c index 69e3e0b556bf..8d2535123f11 100644 --- a/arch/mips/kernel/mips-cpc.c +++ b/arch/mips/kernel/mips-cpc.c @@ -78,7 +78,7 @@ int mips_cpc_probe(void) if (!addr) return -ENODEV; - mips_cpc_base = ioremap_nocache(addr, 0x8000); + mips_cpc_base = ioremap(addr, 0x8000); if (!mips_cpc_base) return -ENXIO; diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c index a3e2da8391ea..128fc9999c56 100644 --- a/arch/mips/kernel/perf_event_mipsxx.c +++ b/arch/mips/kernel/perf_event_mipsxx.c @@ -1623,7 +1623,7 @@ static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config) raw_event.cntr_mask = raw_id > 127 ? CNTR_ODD : CNTR_EVEN; break; - case CPU_LOONGSON3: + case CPU_LOONGSON64: raw_event.cntr_mask = raw_id > 127 ? CNTR_ODD : CNTR_EVEN; break; } @@ -1764,12 +1764,12 @@ init_hw_perf_events(void) mipspmu.general_event_map = &mipsxxcore_event_map; mipspmu.cache_event_map = &mipsxxcore_cache_map; break; - case CPU_LOONGSON1: + case CPU_LOONGSON32: mipspmu.name = "mips/loongson1"; mipspmu.general_event_map = &mipsxxcore_event_map; mipspmu.cache_event_map = &mipsxxcore_cache_map; break; - case CPU_LOONGSON3: + case CPU_LOONGSON64: mipspmu.name = "mips/loongson3"; mipspmu.general_event_map = &loongson3_event_map; mipspmu.cache_event_map = &loongson3_cache_map; diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c index a26f40db15d0..9bf60d7d44d3 100644 --- a/arch/mips/kernel/pm-cps.c +++ b/arch/mips/kernel/pm-cps.c @@ -307,7 +307,7 @@ static int cps_gen_flush_fsb(u32 **pp, struct uasm_label **pl, } /* Barrier ensuring previous cache invalidates are complete */ - uasm_i_sync(pp, STYPE_SYNC); + uasm_i_sync(pp, __SYNC_full); uasm_i_ehb(pp); /* Check whether the pipeline stalled due to the FSB being full */ @@ -397,7 +397,7 @@ static void *cps_gen_entry_code(unsigned cpu, enum cps_pm_state state) if (coupled_coherence) { /* Increment ready_count */ - uasm_i_sync(&p, STYPE_SYNC_MB); + uasm_i_sync(&p, __SYNC_mb); uasm_build_label(&l, p, lbl_incready); uasm_i_ll(&p, t1, 0, r_nc_count); uasm_i_addiu(&p, t2, t1, 1); @@ -406,7 +406,7 @@ static void *cps_gen_entry_code(unsigned cpu, enum cps_pm_state state) uasm_i_addiu(&p, t1, t1, 1); /* Barrier ensuring all CPUs see the updated r_nc_count value */ - uasm_i_sync(&p, STYPE_SYNC_MB); + uasm_i_sync(&p, __SYNC_mb); /* * If this is the last VPE to become ready for non-coherence @@ -473,7 +473,7 @@ static void *cps_gen_entry_code(unsigned cpu, enum cps_pm_state state) Index_Writeback_Inv_D, lbl_flushdcache); /* Barrier ensuring previous cache invalidates are complete */ - uasm_i_sync(&p, STYPE_SYNC); + uasm_i_sync(&p, __SYNC_full); uasm_i_ehb(&p); if (mips_cm_revision() < CM_REV_CM3) { @@ -487,7 +487,7 @@ static void *cps_gen_entry_code(unsigned cpu, enum cps_pm_state state) uasm_i_lw(&p, t0, 0, r_pcohctl); /* Barrier to ensure write to coherence control is complete */ - uasm_i_sync(&p, STYPE_SYNC); + uasm_i_sync(&p, __SYNC_full); uasm_i_ehb(&p); } @@ -534,7 +534,7 @@ static void *cps_gen_entry_code(unsigned cpu, enum cps_pm_state state) } /* Barrier to ensure write to CPC command is complete */ - uasm_i_sync(&p, STYPE_SYNC); + uasm_i_sync(&p, __SYNC_full); uasm_i_ehb(&p); } @@ -572,13 +572,13 @@ static void *cps_gen_entry_code(unsigned cpu, enum cps_pm_state state) uasm_i_lw(&p, t0, 0, r_pcohctl); /* Barrier to ensure write to coherence control is complete */ - uasm_i_sync(&p, STYPE_SYNC); + uasm_i_sync(&p, __SYNC_full); uasm_i_ehb(&p); if (coupled_coherence && (state == CPS_PM_NC_WAIT)) { /* Decrement ready_count */ uasm_build_label(&l, p, lbl_decready); - uasm_i_sync(&p, STYPE_SYNC_MB); + uasm_i_sync(&p, __SYNC_mb); uasm_i_ll(&p, t1, 0, r_nc_count); uasm_i_addiu(&p, t2, t1, -1); uasm_i_sc(&p, t2, 0, r_nc_count); @@ -586,7 +586,7 @@ static void *cps_gen_entry_code(unsigned cpu, enum cps_pm_state state) uasm_i_andi(&p, v0, t1, (1 << fls(smp_num_siblings)) - 1); /* Barrier ensuring all CPUs see the updated r_nc_count value */ - uasm_i_sync(&p, STYPE_SYNC_MB); + uasm_i_sync(&p, __SYNC_mb); } if (coupled_coherence && (state == CPS_PM_CLOCK_GATED)) { @@ -608,7 +608,7 @@ static void *cps_gen_entry_code(unsigned cpu, enum cps_pm_state state) uasm_build_label(&l, p, lbl_secondary_cont); /* Barrier ensuring all CPUs see the updated r_nc_count value */ - uasm_i_sync(&p, STYPE_SYNC_MB); + uasm_i_sync(&p, __SYNC_mb); } /* The core is coherent, time to return to C code */ diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c index b2de408a259e..f8d36710cd58 100644 --- a/arch/mips/kernel/proc.c +++ b/arch/mips/kernel/proc.c @@ -124,6 +124,10 @@ static int show_cpuinfo(struct seq_file *m, void *v) if (cpu_has_eva) seq_printf(m, "%s", " eva"); if (cpu_has_htw) seq_printf(m, "%s", " htw"); if (cpu_has_xpa) seq_printf(m, "%s", " xpa"); + if (cpu_has_loongson_mmi) seq_printf(m, "%s", " loongson-mmi"); + if (cpu_has_loongson_cam) seq_printf(m, "%s", " loongson-cam"); + if (cpu_has_loongson_ext) seq_printf(m, "%s", " loongson-ext"); + if (cpu_has_loongson_ext2) seq_printf(m, "%s", " loongson-ext2"); seq_printf(m, "\n"); if (cpu_has_mmips) { diff --git a/arch/mips/kernel/cpu-bugs64.c b/arch/mips/kernel/r4k-bugs64.c index fa62cd1dff93..1ff19f1ea5ca 100644 --- a/arch/mips/kernel/cpu-bugs64.c +++ b/arch/mips/kernel/r4k-bugs64.c @@ -24,7 +24,8 @@ static char r4kwar[] __initdata = static char daddiwar[] __initdata = "Enable CPU_DADDI_WORKAROUNDS to rectify."; -static inline void align_mod(const int align, const int mod) +static __always_inline __init +void align_mod(const int align, const int mod) { asm volatile( ".set push\n\t" @@ -38,8 +39,9 @@ static inline void align_mod(const int align, const int mod) : "n"(align), "n"(mod)); } -static __always_inline void mult_sh_align_mod(long *v1, long *v2, long *w, - const int align, const int mod) +static __always_inline __init +void mult_sh_align_mod(long *v1, long *v2, long *w, + const int align, const int mod) { unsigned long flags; int m1, m2; @@ -113,7 +115,7 @@ static __always_inline void mult_sh_align_mod(long *v1, long *v2, long *w, *w = lw; } -static inline void check_mult_sh(void) +static __always_inline __init void check_mult_sh(void) { long v1[8], v2[8], w[8]; int bug, fix, i; @@ -176,7 +178,7 @@ asmlinkage void __init do_daddi_ov(struct pt_regs *regs) exception_exit(prev_state); } -static inline void check_daddi(void) +static __init void check_daddi(void) { extern asmlinkage void handle_daddi_ov(void); unsigned long flags; @@ -240,9 +242,9 @@ static inline void check_daddi(void) panic(bug64hit, !DADDI_WAR ? daddiwar : nowar); } -int daddiu_bug = IS_ENABLED(CONFIG_CPU_MIPSR6) ? 0 : -1; +int daddiu_bug = -1; -static inline void check_daddiu(void) +static __init void check_daddiu(void) { long v, w, tmp; @@ -310,14 +312,11 @@ static inline void check_daddiu(void) void __init check_bugs64_early(void) { - if (!IS_ENABLED(CONFIG_CPU_MIPSR6)) { - check_mult_sh(); - check_daddiu(); - } + check_mult_sh(); + check_daddiu(); } void __init check_bugs64(void) { - if (!IS_ENABLED(CONFIG_CPU_MIPSR6)) - check_daddi(); + check_daddi(); } diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S index d9434cd0f568..b449b68662a9 100644 --- a/arch/mips/kernel/scall32-o32.S +++ b/arch/mips/kernel/scall32-o32.S @@ -217,7 +217,7 @@ einval: li v0, -ENOSYS #define sys_sched_getaffinity mipsmt_sys_sched_getaffinity #endif /* CONFIG_MIPS_MT_FPAFF */ -#define __SYSCALL(nr, entry, nargs) PTR entry +#define __SYSCALL(nr, entry) PTR entry .align 2 .type sys_call_table, @object EXPORT(sys_call_table) diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S index c761ddfed9e6..35d8c86b160e 100644 --- a/arch/mips/kernel/scall64-n32.S +++ b/arch/mips/kernel/scall64-n32.S @@ -101,7 +101,7 @@ not_n32_scall: END(handle_sysn32) -#define __SYSCALL(nr, entry, nargs) PTR entry +#define __SYSCALL(nr, entry) PTR entry .type sysn32_call_table, @object EXPORT(sysn32_call_table) #include <asm/syscall_table_64_n32.h> diff --git a/arch/mips/kernel/scall64-n64.S b/arch/mips/kernel/scall64-n64.S index 727fb8a1b0eb..23b2e2b1609c 100644 --- a/arch/mips/kernel/scall64-n64.S +++ b/arch/mips/kernel/scall64-n64.S @@ -109,7 +109,7 @@ illegal_syscall: j n64_syscall_exit END(handle_sys64) -#define __SYSCALL(nr, entry, nargs) PTR entry +#define __SYSCALL(nr, entry) PTR entry .align 3 .type sys_call_table, @object EXPORT(sys_call_table) diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S index feb2653490df..41df8221bb8f 100644 --- a/arch/mips/kernel/scall64-o32.S +++ b/arch/mips/kernel/scall64-o32.S @@ -213,7 +213,7 @@ einval: li v0, -ENOSYS jr ra END(sys32_syscall) -#define __SYSCALL(nr, entry, nargs) PTR entry +#define __SYSCALL(nr, entry) PTR entry .align 3 .type sys32_call_table,@object EXPORT(sys32_call_table) diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c index ab349d2381c3..1ac2752fb791 100644 --- a/arch/mips/kernel/setup.c +++ b/arch/mips/kernel/setup.c @@ -63,20 +63,20 @@ unsigned long mips_machtype __read_mostly = MACH_UNKNOWN; EXPORT_SYMBOL(mips_machtype); -struct boot_mem_map boot_mem_map; - static char __initdata command_line[COMMAND_LINE_SIZE]; char __initdata arcs_cmdline[COMMAND_LINE_SIZE]; #ifdef CONFIG_CMDLINE_BOOL -static char __initdata builtin_cmdline[COMMAND_LINE_SIZE] = CONFIG_CMDLINE; +static const char builtin_cmdline[] __initconst = CONFIG_CMDLINE; +#else +static const char builtin_cmdline[] __initconst = ""; #endif /* * mips_io_port_base is the begin of the address space to which x86 style * I/O ports are mapped. */ -const unsigned long mips_io_port_base = -1; +unsigned long mips_io_port_base = -1; EXPORT_SYMBOL(mips_io_port_base); static struct resource code_resource = { .name = "Kernel code", }; @@ -92,8 +92,10 @@ EXPORT_SYMBOL(ARCH_PFN_OFFSET); void __init add_memory_region(phys_addr_t start, phys_addr_t size, long type) { - int x = boot_mem_map.nr_map; - int i; + /* + * Note: This function only exists for historical reason, + * new code should use memblock_add or memblock_add_node instead. + */ /* * If the region reaches the top of the physical address space, adjust @@ -108,38 +110,23 @@ void __init add_memory_region(phys_addr_t start, phys_addr_t size, long type) return; } - /* - * Try to merge with existing entry, if any. - */ - for (i = 0; i < boot_mem_map.nr_map; i++) { - struct boot_mem_map_entry *entry = boot_mem_map.map + i; - unsigned long top; - - if (entry->type != type) - continue; - - if (start + size < entry->addr) - continue; /* no overlap */ + if (start < PHYS_OFFSET) + return; - if (entry->addr + entry->size < start) - continue; /* no overlap */ + memblock_add(start, size); + /* Reserve any memory except the ordinary RAM ranges. */ + switch (type) { + case BOOT_MEM_RAM: + break; - top = max(entry->addr + entry->size, start + size); - entry->addr = min(entry->addr, start); - entry->size = top - entry->addr; + case BOOT_MEM_NOMAP: /* Discard the range from the system. */ + memblock_remove(start, size); + break; - return; + default: /* Reserve the rest of the memory types at boot time */ + memblock_reserve(start, size); + break; } - - if (boot_mem_map.nr_map == BOOT_MEM_MAP_MAX) { - pr_err("Ooops! Too many entries in the memory map!\n"); - return; - } - - boot_mem_map.map[x].addr = start; - boot_mem_map.map[x].size = size; - boot_mem_map.map[x].type = type; - boot_mem_map.nr_map++; } void __init detect_memory_region(phys_addr_t start, phys_addr_t sz_min, phys_addr_t sz_max) @@ -161,70 +148,6 @@ void __init detect_memory_region(phys_addr_t start, phys_addr_t sz_min, phys_add add_memory_region(start, size, BOOT_MEM_RAM); } -static bool __init __maybe_unused memory_region_available(phys_addr_t start, - phys_addr_t size) -{ - int i; - bool in_ram = false, free = true; - - for (i = 0; i < boot_mem_map.nr_map; i++) { - phys_addr_t start_, end_; - - start_ = boot_mem_map.map[i].addr; - end_ = boot_mem_map.map[i].addr + boot_mem_map.map[i].size; - - switch (boot_mem_map.map[i].type) { - case BOOT_MEM_RAM: - if (start >= start_ && start + size <= end_) - in_ram = true; - break; - case BOOT_MEM_RESERVED: - case BOOT_MEM_NOMAP: - if ((start >= start_ && start < end_) || - (start < start_ && start + size >= start_)) - free = false; - break; - default: - continue; - } - } - - return in_ram && free; -} - -static void __init print_memory_map(void) -{ - int i; - const int field = 2 * sizeof(unsigned long); - - for (i = 0; i < boot_mem_map.nr_map; i++) { - printk(KERN_INFO " memory: %0*Lx @ %0*Lx ", - field, (unsigned long long) boot_mem_map.map[i].size, - field, (unsigned long long) boot_mem_map.map[i].addr); - - switch (boot_mem_map.map[i].type) { - case BOOT_MEM_RAM: - printk(KERN_CONT "(usable)\n"); - break; - case BOOT_MEM_INIT_RAM: - printk(KERN_CONT "(usable after init)\n"); - break; - case BOOT_MEM_ROM_DATA: - printk(KERN_CONT "(ROM data)\n"); - break; - case BOOT_MEM_RESERVED: - printk(KERN_CONT "(reserved)\n"); - break; - case BOOT_MEM_NOMAP: - printk(KERN_CONT "(nomap)\n"); - break; - default: - printk(KERN_CONT "type %lu\n", boot_mem_map.map[i].type); - break; - } - } -} - /* * Manage initrd */ @@ -364,7 +287,7 @@ static unsigned long __init init_initrd(void) * Initialize the bootmem allocator. It also setup initrd related data * if needed. */ -#if defined(CONFIG_SGI_IP27) || (defined(CONFIG_CPU_LOONGSON3) && defined(CONFIG_NUMA)) +#if defined(CONFIG_SGI_IP27) || (defined(CONFIG_CPU_LOONGSON64) && defined(CONFIG_NUMA)) static void __init bootmem_init(void) { @@ -376,8 +299,11 @@ static void __init bootmem_init(void) static void __init bootmem_init(void) { - phys_addr_t ramstart = PHYS_ADDR_MAX; - int i; + struct memblock_region *mem; + phys_addr_t ramstart, ramend; + + ramstart = memblock_start_of_DRAM(); + ramend = memblock_end_of_DRAM(); /* * Sanity check any INITRD first. We don't take it into account @@ -391,122 +317,66 @@ static void __init bootmem_init(void) memblock_reserve(__pa_symbol(&_text), __pa_symbol(&_end) - __pa_symbol(&_text)); + /* max_low_pfn is not a number of pages but the end pfn of low mem */ + +#ifdef CONFIG_MIPS_AUTO_PFN_OFFSET + ARCH_PFN_OFFSET = PFN_UP(ramstart); +#else /* - * max_low_pfn is not a number of pages. The number of pages - * of the system is given by 'max_low_pfn - min_low_pfn'. + * Reserve any memory between the start of RAM and PHYS_OFFSET */ - min_low_pfn = ~0UL; - max_low_pfn = 0; - - /* Find the highest and lowest page frame numbers we have available. */ - for (i = 0; i < boot_mem_map.nr_map; i++) { - unsigned long start, end; - - if (boot_mem_map.map[i].type != BOOT_MEM_RAM) - continue; + if (ramstart > PHYS_OFFSET) + memblock_reserve(PHYS_OFFSET, ramstart - PHYS_OFFSET); - start = PFN_UP(boot_mem_map.map[i].addr); - end = PFN_DOWN(boot_mem_map.map[i].addr - + boot_mem_map.map[i].size); + if (PFN_UP(ramstart) > ARCH_PFN_OFFSET) { + pr_info("Wasting %lu bytes for tracking %lu unused pages\n", + (unsigned long)((PFN_UP(ramstart) - ARCH_PFN_OFFSET) * sizeof(struct page)), + (unsigned long)(PFN_UP(ramstart) - ARCH_PFN_OFFSET)); + } +#endif - ramstart = min(ramstart, boot_mem_map.map[i].addr); + min_low_pfn = ARCH_PFN_OFFSET; + max_pfn = PFN_DOWN(ramend); + for_each_memblock(memory, mem) { + unsigned long start = memblock_region_memory_base_pfn(mem); + unsigned long end = memblock_region_memory_end_pfn(mem); -#ifndef CONFIG_HIGHMEM /* * Skip highmem here so we get an accurate max_low_pfn if low * memory stops short of high memory. * If the region overlaps HIGHMEM_START, end is clipped so * max_pfn excludes the highmem portion. */ + if (memblock_is_nomap(mem)) + continue; if (start >= PFN_DOWN(HIGHMEM_START)) continue; if (end > PFN_DOWN(HIGHMEM_START)) end = PFN_DOWN(HIGHMEM_START); -#endif - if (end > max_low_pfn) max_low_pfn = end; - if (start < min_low_pfn) - min_low_pfn = start; } if (min_low_pfn >= max_low_pfn) panic("Incorrect memory mapping !!!"); -#ifdef CONFIG_MIPS_AUTO_PFN_OFFSET - ARCH_PFN_OFFSET = PFN_UP(ramstart); -#else - /* - * Reserve any memory between the start of RAM and PHYS_OFFSET - */ - if (ramstart > PHYS_OFFSET) { - add_memory_region(PHYS_OFFSET, ramstart - PHYS_OFFSET, - BOOT_MEM_RESERVED); - memblock_reserve(PHYS_OFFSET, ramstart - PHYS_OFFSET); - } - - if (min_low_pfn > ARCH_PFN_OFFSET) { - pr_info("Wasting %lu bytes for tracking %lu unused pages\n", - (min_low_pfn - ARCH_PFN_OFFSET) * sizeof(struct page), - min_low_pfn - ARCH_PFN_OFFSET); - } else if (ARCH_PFN_OFFSET - min_low_pfn > 0UL) { - pr_info("%lu free pages won't be used\n", - ARCH_PFN_OFFSET - min_low_pfn); - } - min_low_pfn = ARCH_PFN_OFFSET; -#endif - - /* - * Determine low and high memory ranges - */ - max_pfn = max_low_pfn; - if (max_low_pfn > PFN_DOWN(HIGHMEM_START)) { + if (max_pfn > PFN_DOWN(HIGHMEM_START)) { #ifdef CONFIG_HIGHMEM highstart_pfn = PFN_DOWN(HIGHMEM_START); - highend_pfn = max_low_pfn; -#endif + highend_pfn = max_pfn; +#else max_low_pfn = PFN_DOWN(HIGHMEM_START); - } - - /* Install all valid RAM ranges to the memblock memory region */ - for (i = 0; i < boot_mem_map.nr_map; i++) { - unsigned long start, end; - - start = PFN_UP(boot_mem_map.map[i].addr); - end = PFN_DOWN(boot_mem_map.map[i].addr - + boot_mem_map.map[i].size); - - if (start < min_low_pfn) - start = min_low_pfn; -#ifndef CONFIG_HIGHMEM - /* Ignore highmem regions if highmem is unsupported */ - if (end > max_low_pfn) - end = max_low_pfn; + max_pfn = max_low_pfn; #endif - if (end <= start) - continue; - - memblock_add_node(PFN_PHYS(start), PFN_PHYS(end - start), 0); + } - /* Reserve any memory except the ordinary RAM ranges. */ - switch (boot_mem_map.map[i].type) { - case BOOT_MEM_RAM: - break; - case BOOT_MEM_NOMAP: /* Discard the range from the system. */ - memblock_remove(PFN_PHYS(start), PFN_PHYS(end - start)); - continue; - default: /* Reserve the rest of the memory types at boot time */ - memblock_reserve(PFN_PHYS(start), PFN_PHYS(end - start)); - break; - } - /* - * In any case the added to the memblock memory regions - * (highmem/lowmem, available/reserved, etc) are considered - * as present, so inform sparsemem about them. - */ - memory_present(0, start, end); - } + /* + * In any case the added to the memblock memory regions + * (highmem/lowmem, available/reserved, etc) are considered + * as present, so inform sparsemem about them. + */ + memblocks_present(); /* * Reserve initrd memory if needed. @@ -528,8 +398,9 @@ static int __init early_parse_mem(char *p) * size. */ if (usermem == 0) { - boot_mem_map.nr_map = 0; usermem = 1; + memblock_remove(memblock_start_of_DRAM(), + memblock_end_of_DRAM() - memblock_start_of_DRAM()); } start = 0; size = memparse(p, &p); @@ -586,14 +457,13 @@ early_param("memmap", early_parse_memmap); unsigned long setup_elfcorehdr, setup_elfcorehdr_size; static int __init early_parse_elfcorehdr(char *p) { - int i; + struct memblock_region *mem; setup_elfcorehdr = memparse(p, &p); - for (i = 0; i < boot_mem_map.nr_map; i++) { - unsigned long start = boot_mem_map.map[i].addr; - unsigned long end = (boot_mem_map.map[i].addr + - boot_mem_map.map[i].size); + for_each_memblock(memory, mem) { + unsigned long start = mem->base; + unsigned long end = start + mem->size; if (setup_elfcorehdr >= start && setup_elfcorehdr < end) { /* * Reserve from the elf core header to the end of @@ -613,47 +483,20 @@ static int __init early_parse_elfcorehdr(char *p) early_param("elfcorehdr", early_parse_elfcorehdr); #endif -static void __init arch_mem_addpart(phys_addr_t mem, phys_addr_t end, int type) -{ - phys_addr_t size; - int i; - - size = end - mem; - if (!size) - return; - - /* Make sure it is in the boot_mem_map */ - for (i = 0; i < boot_mem_map.nr_map; i++) { - if (mem >= boot_mem_map.map[i].addr && - mem < (boot_mem_map.map[i].addr + - boot_mem_map.map[i].size)) - return; - } - add_memory_region(mem, size, type); -} - #ifdef CONFIG_KEXEC -static inline unsigned long long get_total_mem(void) -{ - unsigned long long total; - - total = max_pfn - min_low_pfn; - return total << PAGE_SHIFT; -} - static void __init mips_parse_crashkernel(void) { unsigned long long total_mem; unsigned long long crash_size, crash_base; int ret; - total_mem = get_total_mem(); + total_mem = memblock_phys_mem_size(); ret = parse_crashkernel(boot_command_line, total_mem, &crash_size, &crash_base); if (ret != 0 || crash_size <= 0) return; - if (!memory_region_available(crash_base, crash_size)) { + if (!memblock_find_in_range(crash_base, crash_base + crash_size, crash_size, 0)) { pr_warn("Invalid memory region reserved for crash kernel\n"); return; } @@ -672,8 +515,7 @@ static void __init request_crashkernel(struct resource *res) ret = request_resource(res, &crashk_res); if (!ret) pr_info("Reserving %ldMB of memory at %ldMB for crashkernel\n", - (unsigned long)((crashk_res.end - - crashk_res.start + 1) >> 20), + (unsigned long)(resource_size(&crashk_res) >> 20), (unsigned long)(crashk_res.start >> 20)); } #else /* !defined(CONFIG_KEXEC) */ @@ -686,11 +528,105 @@ static void __init request_crashkernel(struct resource *res) } #endif /* !defined(CONFIG_KEXEC) */ -#define USE_PROM_CMDLINE IS_ENABLED(CONFIG_MIPS_CMDLINE_FROM_BOOTLOADER) -#define USE_DTB_CMDLINE IS_ENABLED(CONFIG_MIPS_CMDLINE_FROM_DTB) -#define EXTEND_WITH_PROM IS_ENABLED(CONFIG_MIPS_CMDLINE_DTB_EXTEND) -#define BUILTIN_EXTEND_WITH_PROM \ - IS_ENABLED(CONFIG_MIPS_CMDLINE_BUILTIN_EXTEND) +static void __init check_kernel_sections_mem(void) +{ + phys_addr_t start = PFN_PHYS(PFN_DOWN(__pa_symbol(&_text))); + phys_addr_t size = PFN_PHYS(PFN_UP(__pa_symbol(&_end))) - start; + + if (!memblock_is_region_memory(start, size)) { + pr_info("Kernel sections are not in the memory maps\n"); + memblock_add(start, size); + } +} + +static void __init bootcmdline_append(const char *s, size_t max) +{ + if (!s[0] || !max) + return; + + if (boot_command_line[0]) + strlcat(boot_command_line, " ", COMMAND_LINE_SIZE); + + strlcat(boot_command_line, s, max); +} + +#ifdef CONFIG_OF_EARLY_FLATTREE + +static int __init bootcmdline_scan_chosen(unsigned long node, const char *uname, + int depth, void *data) +{ + bool *dt_bootargs = data; + const char *p; + int l; + + if (depth != 1 || !data || + (strcmp(uname, "chosen") != 0 && strcmp(uname, "chosen@0") != 0)) + return 0; + + p = of_get_flat_dt_prop(node, "bootargs", &l); + if (p != NULL && l > 0) { + bootcmdline_append(p, min(l, COMMAND_LINE_SIZE)); + *dt_bootargs = true; + } + + return 1; +} + +#endif /* CONFIG_OF_EARLY_FLATTREE */ + +static void __init bootcmdline_init(char **cmdline_p) +{ + bool dt_bootargs = false; + + /* + * If CMDLINE_OVERRIDE is enabled then initializing the command line is + * trivial - we simply use the built-in command line unconditionally & + * unmodified. + */ + if (IS_ENABLED(CONFIG_CMDLINE_OVERRIDE)) { + strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE); + return; + } + + /* + * If the user specified a built-in command line & + * MIPS_CMDLINE_BUILTIN_EXTEND, then the built-in command line is + * prepended to arguments from the bootloader or DT so we'll copy them + * to the start of boot_command_line here. Otherwise, empty + * boot_command_line to undo anything early_init_dt_scan_chosen() did. + */ + if (IS_ENABLED(CONFIG_MIPS_CMDLINE_BUILTIN_EXTEND)) + strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE); + else + boot_command_line[0] = 0; + +#ifdef CONFIG_OF_EARLY_FLATTREE + /* + * If we're configured to take boot arguments from DT, look for those + * now. + */ + if (IS_ENABLED(CONFIG_MIPS_CMDLINE_FROM_DTB)) + of_scan_flat_dt(bootcmdline_scan_chosen, &dt_bootargs); +#endif + + /* + * If we didn't get any arguments from DT (regardless of whether that's + * because we weren't configured to look for them, or because we looked + * & found none) then we'll take arguments from the bootloader. + * plat_mem_setup() should have filled arcs_cmdline with arguments from + * the bootloader. + */ + if (IS_ENABLED(CONFIG_MIPS_CMDLINE_DTB_EXTEND) || !dt_bootargs) + bootcmdline_append(arcs_cmdline, COMMAND_LINE_SIZE); + + /* + * If the user specified a built-in command line & we didn't already + * prepend it, we append it to boot_command_line here. + */ + if (IS_ENABLED(CONFIG_CMDLINE_BOOL) && + !IS_ENABLED(CONFIG_MIPS_CMDLINE_BUILTIN_EXTEND)) + bootcmdline_append(builtin_cmdline, COMMAND_LINE_SIZE); +} /* * arch_mem_init - initialize memory management subsystem @@ -718,79 +654,27 @@ static void __init arch_mem_init(char **cmdline_p) { extern void plat_mem_setup(void); - /* - * Initialize boot_command_line to an innocuous but non-empty string in - * order to prevent early_init_dt_scan_chosen() from copying - * CONFIG_CMDLINE into it without our knowledge. We handle - * CONFIG_CMDLINE ourselves below & don't want to duplicate its - * content because repeating arguments can be problematic. - */ - strlcpy(boot_command_line, " ", COMMAND_LINE_SIZE); - /* call board setup routine */ plat_mem_setup(); memblock_set_bottom_up(true); - /* - * Make sure all kernel memory is in the maps. The "UP" and - * "DOWN" are opposite for initdata since if it crosses over - * into another memory section you don't want that to be - * freed when the initdata is freed. - */ - arch_mem_addpart(PFN_DOWN(__pa_symbol(&_text)) << PAGE_SHIFT, - PFN_UP(__pa_symbol(&_edata)) << PAGE_SHIFT, - BOOT_MEM_RAM); - arch_mem_addpart(PFN_UP(__pa_symbol(&__init_begin)) << PAGE_SHIFT, - PFN_DOWN(__pa_symbol(&__init_end)) << PAGE_SHIFT, - BOOT_MEM_INIT_RAM); - arch_mem_addpart(PFN_DOWN(__pa_symbol(&__bss_start)) << PAGE_SHIFT, - PFN_UP(__pa_symbol(&__bss_stop)) << PAGE_SHIFT, - BOOT_MEM_RAM); - - pr_info("Determined physical RAM map:\n"); - print_memory_map(); - -#if defined(CONFIG_CMDLINE_BOOL) && defined(CONFIG_CMDLINE_OVERRIDE) - strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE); -#else - if ((USE_PROM_CMDLINE && arcs_cmdline[0]) || - (USE_DTB_CMDLINE && !boot_command_line[0])) - strlcpy(boot_command_line, arcs_cmdline, COMMAND_LINE_SIZE); - - if (EXTEND_WITH_PROM && arcs_cmdline[0]) { - if (boot_command_line[0]) - strlcat(boot_command_line, " ", COMMAND_LINE_SIZE); - strlcat(boot_command_line, arcs_cmdline, COMMAND_LINE_SIZE); - } - -#if defined(CONFIG_CMDLINE_BOOL) - if (builtin_cmdline[0]) { - if (boot_command_line[0]) - strlcat(boot_command_line, " ", COMMAND_LINE_SIZE); - strlcat(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE); - } - - if (BUILTIN_EXTEND_WITH_PROM && arcs_cmdline[0]) { - if (boot_command_line[0]) - strlcat(boot_command_line, " ", COMMAND_LINE_SIZE); - strlcat(boot_command_line, arcs_cmdline, COMMAND_LINE_SIZE); - } -#endif -#endif + bootcmdline_init(cmdline_p); strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE); - *cmdline_p = command_line; parse_early_param(); - if (usermem) { - pr_info("User-defined physical RAM map:\n"); - print_memory_map(); - } + if (usermem) + pr_info("User-defined physical RAM map overwrite\n"); + + check_kernel_sections_mem(); early_init_fdt_reserve_self(); early_init_fdt_scan_reserved_mem(); +#ifndef CONFIG_NUMA + memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0); +#endif bootmem_init(); /* @@ -813,8 +697,7 @@ static void __init arch_mem_init(char **cmdline_p) mips_parse_crashkernel(); #ifdef CONFIG_KEXEC if (crashk_res.start != crashk_res.end) - memblock_reserve(crashk_res.start, - crashk_res.end - crashk_res.start + 1); + memblock_reserve(crashk_res.start, resource_size(&crashk_res)); #endif device_tree_init(); sparse_init(); @@ -830,12 +713,12 @@ static void __init arch_mem_init(char **cmdline_p) memblock_dump_all(); - early_memtest(PFN_PHYS(min_low_pfn), PFN_PHYS(max_low_pfn)); + early_memtest(PFN_PHYS(ARCH_PFN_OFFSET), PFN_PHYS(max_low_pfn)); } static void __init resource_init(void) { - int i; + struct memblock_region *region; if (UNCAC_BASE != IO_BASE) return; @@ -847,16 +730,10 @@ static void __init resource_init(void) bss_resource.start = __pa_symbol(&__bss_start); bss_resource.end = __pa_symbol(&__bss_stop) - 1; - for (i = 0; i < boot_mem_map.nr_map; i++) { + for_each_memblock(memory, region) { + phys_addr_t start = PFN_PHYS(memblock_region_memory_base_pfn(region)); + phys_addr_t end = PFN_PHYS(memblock_region_memory_end_pfn(region)) - 1; struct resource *res; - unsigned long start, end; - - start = boot_mem_map.map[i].addr; - end = boot_mem_map.map[i].addr + boot_mem_map.map[i].size - 1; - if (start >= HIGHMEM_START) - continue; - if (end >= HIGHMEM_START) - end = HIGHMEM_START - 1; res = memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES); if (!res) @@ -865,20 +742,8 @@ static void __init resource_init(void) res->start = start; res->end = end; - res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; - - switch (boot_mem_map.map[i].type) { - case BOOT_MEM_RAM: - case BOOT_MEM_INIT_RAM: - case BOOT_MEM_ROM_DATA: - res->name = "System RAM"; - res->flags |= IORESOURCE_SYSRAM; - break; - case BOOT_MEM_RESERVED: - case BOOT_MEM_NOMAP: - default: - res->name = "reserved"; - } + res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; + res->name = "System RAM"; request_resource(&iomem_resource, res); @@ -929,8 +794,6 @@ void __init setup_arch(char **cmdline_p) #if defined(CONFIG_VT) #if defined(CONFIG_VGA_CONSOLE) conswitchp = &vga_con; -#elif defined(CONFIG_DUMMY_CONSOLE) - conswitchp = &dummy_con; #endif #endif diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c index 76fae9b79f13..9058e9dcf080 100644 --- a/arch/mips/kernel/smp-bmips.c +++ b/arch/mips/kernel/smp-bmips.c @@ -31,7 +31,6 @@ #include <asm/pgtable.h> #include <asm/processor.h> #include <asm/bootinfo.h> -#include <asm/pmon.h> #include <asm/cacheflush.h> #include <asm/tlbflush.h> #include <asm/mipsregs.h> @@ -464,10 +463,10 @@ static void bmips_wr_vec(unsigned long dst, char *start, char *end) static inline void bmips_nmi_handler_setup(void) { - bmips_wr_vec(BMIPS_NMI_RESET_VEC, &bmips_reset_nmi_vec, - &bmips_reset_nmi_vec_end); - bmips_wr_vec(BMIPS_WARM_RESTART_VEC, &bmips_smp_int_vec, - &bmips_smp_int_vec_end); + bmips_wr_vec(BMIPS_NMI_RESET_VEC, bmips_reset_nmi_vec, + bmips_reset_nmi_vec_end); + bmips_wr_vec(BMIPS_WARM_RESTART_VEC, bmips_smp_int_vec, + bmips_smp_int_vec_end); } struct reset_vec_info { diff --git a/arch/mips/kernel/sync-r4k.c b/arch/mips/kernel/sync-r4k.c index f2973ce87f53..abdd7aaa3311 100644 --- a/arch/mips/kernel/sync-r4k.c +++ b/arch/mips/kernel/sync-r4k.c @@ -90,6 +90,9 @@ void synchronise_count_master(int cpu) void synchronise_count_slave(int cpu) { int i; + unsigned long flags; + + local_irq_save(flags); /* * Not every cpu is online at the time this gets called, @@ -113,5 +116,7 @@ void synchronise_count_slave(int cpu) } /* Arrange for an interrupt in a short while */ write_c0_compare(read_c0_count() + COUNTON); + + local_irq_restore(flags); } #undef NR_LOOPS diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c index b6dc78ad5d8c..c333e5788664 100644 --- a/arch/mips/kernel/syscall.c +++ b/arch/mips/kernel/syscall.c @@ -37,6 +37,7 @@ #include <asm/signal.h> #include <asm/sim.h> #include <asm/shmparam.h> +#include <asm/sync.h> #include <asm/sysmips.h> #include <asm/switch_to.h> @@ -80,6 +81,7 @@ SYSCALL_DEFINE6(mips_mmap2, unsigned long, addr, unsigned long, len, save_static_function(sys_fork); save_static_function(sys_clone); +save_static_function(sys_clone3); SYSCALL_DEFINE1(set_thread_area, unsigned long, addr) { @@ -137,6 +139,7 @@ static inline int mips_atomic_set(unsigned long addr, unsigned long new) " .set "MIPS_ISA_ARCH_LEVEL" \n" " li %[err], 0 \n" "1: \n" + " " __SYNC(full, loongson3_war) " \n" user_ll("%[old]", "(%[addr])") " move %[tmp], %[new] \n" "2: \n" diff --git a/arch/mips/kernel/syscalls/Makefile b/arch/mips/kernel/syscalls/Makefile index a3d4bec695c6..6efb2f6889a7 100644 --- a/arch/mips/kernel/syscalls/Makefile +++ b/arch/mips/kernel/syscalls/Makefile @@ -18,7 +18,7 @@ quiet_cmd_syshdr = SYSHDR $@ '$(syshdr_pfx_$(basetarget))' \ '$(syshdr_offset_$(basetarget))' -quiet_cmd_sysnr = SYSNR $@ +quiet_cmd_sysnr = SYSNR $@ cmd_sysnr = $(CONFIG_SHELL) '$(sysnr)' '$<' '$@' \ '$(sysnr_abis_$(basetarget))' \ '$(sysnr_pfx_$(basetarget))' \ diff --git a/arch/mips/kernel/syscalls/syscall_n32.tbl b/arch/mips/kernel/syscalls/syscall_n32.tbl index c9c879ec9b6d..1f9e8ad636cc 100644 --- a/arch/mips/kernel/syscalls/syscall_n32.tbl +++ b/arch/mips/kernel/syscalls/syscall_n32.tbl @@ -373,4 +373,6 @@ 432 n32 fsmount sys_fsmount 433 n32 fspick sys_fspick 434 n32 pidfd_open sys_pidfd_open -# 435 reserved for clone3 +435 n32 clone3 __sys_clone3 +437 n32 openat2 sys_openat2 +438 n32 pidfd_getfd sys_pidfd_getfd diff --git a/arch/mips/kernel/syscalls/syscall_n64.tbl b/arch/mips/kernel/syscalls/syscall_n64.tbl index bbce9159caa1..c0b9d802dbf6 100644 --- a/arch/mips/kernel/syscalls/syscall_n64.tbl +++ b/arch/mips/kernel/syscalls/syscall_n64.tbl @@ -349,4 +349,6 @@ 432 n64 fsmount sys_fsmount 433 n64 fspick sys_fspick 434 n64 pidfd_open sys_pidfd_open -# 435 reserved for clone3 +435 n64 clone3 __sys_clone3 +437 n64 openat2 sys_openat2 +438 n64 pidfd_getfd sys_pidfd_getfd diff --git a/arch/mips/kernel/syscalls/syscall_o32.tbl b/arch/mips/kernel/syscalls/syscall_o32.tbl index 9653591428ec..ac586774c980 100644 --- a/arch/mips/kernel/syscalls/syscall_o32.tbl +++ b/arch/mips/kernel/syscalls/syscall_o32.tbl @@ -422,4 +422,6 @@ 432 o32 fsmount sys_fsmount 433 o32 fspick sys_fspick 434 o32 pidfd_open sys_pidfd_open -# 435 reserved for clone3 +435 o32 clone3 __sys_clone3 +437 o32 openat2 sys_openat2 +438 o32 pidfd_getfd sys_pidfd_getfd diff --git a/arch/mips/kernel/syscalls/syscalltbl.sh b/arch/mips/kernel/syscalls/syscalltbl.sh index acd338d33bbe..1e2570740c20 100644 --- a/arch/mips/kernel/syscalls/syscalltbl.sh +++ b/arch/mips/kernel/syscalls/syscalltbl.sh @@ -13,10 +13,10 @@ emit() { t_entry="$3" while [ $t_nxt -lt $t_nr ]; do - printf "__SYSCALL(%s, sys_ni_syscall, )\n" "${t_nxt}" + printf "__SYSCALL(%s,sys_ni_syscall)\n" "${t_nxt}" t_nxt=$((t_nxt+1)) done - printf "__SYSCALL(%s, %s, )\n" "${t_nxt}" "${t_entry}" + printf "__SYSCALL(%s,%s)\n" "${t_nxt}" "${t_entry}" } grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | ( diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index 342e41de9d64..31968cbd6464 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -210,11 +210,6 @@ void show_stack(struct task_struct *task, unsigned long *sp) regs.regs[29] = task->thread.reg29; regs.regs[31] = 0; regs.cp0_epc = task->thread.reg31; -#ifdef CONFIG_KGDB_KDB - } else if (atomic_read(&kgdb_active) != -1 && - kdb_current_regs) { - memcpy(®s, kdb_current_regs, sizeof(regs)); -#endif /* CONFIG_KGDB_KDB */ } else { prepare_frametrace(®s); } @@ -1761,7 +1756,7 @@ static inline void parity_protection_init(void) case CPU_5KC: case CPU_5KE: - case CPU_LOONGSON1: + case CPU_LOONGSON32: write_c0_ecc(0x80000000); back_to_back_c0_hazard(); /* Set the PE bit (bit 31) in the c0_errctl register. */ @@ -2394,7 +2389,7 @@ void __init trap_init(void) else { if (cpu_has_vtag_icache) set_except_vector(EXCCODE_RI, handle_ri_rdhwr_tlbp); - else if (current_cpu_type() == CPU_LOONGSON3) + else if (current_cpu_type() == CPU_LOONGSON64) set_except_vector(EXCCODE_RI, handle_ri_rdhwr_tlbp); else set_except_vector(EXCCODE_RI, handle_ri_rdhwr); diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c index 92bd2b0f0548..ca6fc4762d97 100644 --- a/arch/mips/kernel/unaligned.c +++ b/arch/mips/kernel/unaligned.c @@ -131,7 +131,7 @@ do { \ : "r" (addr), "i" (-EFAULT)); \ } while(0) -#ifdef CONFIG_CPU_HAS_LOAD_STORE_LR +#ifndef CONFIG_CPU_NO_LOAD_STORE_LR #define _LoadW(addr, value, res, type) \ do { \ __asm__ __volatile__ ( \ @@ -152,7 +152,7 @@ do { \ : "r" (addr), "i" (-EFAULT)); \ } while(0) -#else /* !CONFIG_CPU_HAS_LOAD_STORE_LR */ +#else /* CONFIG_CPU_NO_LOAD_STORE_LR */ /* For CPUs without lwl instruction */ #define _LoadW(addr, value, res, type) \ do { \ @@ -187,7 +187,7 @@ do { \ : "r" (addr), "i" (-EFAULT)); \ } while(0) -#endif /* !CONFIG_CPU_HAS_LOAD_STORE_LR */ +#endif /* CONFIG_CPU_NO_LOAD_STORE_LR */ #define _LoadHWU(addr, value, res, type) \ do { \ @@ -213,7 +213,7 @@ do { \ : "r" (addr), "i" (-EFAULT)); \ } while(0) -#ifdef CONFIG_CPU_HAS_LOAD_STORE_LR +#ifndef CONFIG_CPU_NO_LOAD_STORE_LR #define _LoadWU(addr, value, res, type) \ do { \ __asm__ __volatile__ ( \ @@ -256,7 +256,7 @@ do { \ : "r" (addr), "i" (-EFAULT)); \ } while(0) -#else /* !CONFIG_CPU_HAS_LOAD_STORE_LR */ +#else /* CONFIG_CPU_NO_LOAD_STORE_LR */ /* For CPUs without lwl and ldl instructions */ #define _LoadWU(addr, value, res, type) \ do { \ @@ -340,7 +340,7 @@ do { \ : "r" (addr), "i" (-EFAULT)); \ } while(0) -#endif /* !CONFIG_CPU_HAS_LOAD_STORE_LR */ +#endif /* CONFIG_CPU_NO_LOAD_STORE_LR */ #define _StoreHW(addr, value, res, type) \ @@ -366,7 +366,7 @@ do { \ : "r" (value), "r" (addr), "i" (-EFAULT));\ } while(0) -#ifdef CONFIG_CPU_HAS_LOAD_STORE_LR +#ifndef CONFIG_CPU_NO_LOAD_STORE_LR #define _StoreW(addr, value, res, type) \ do { \ __asm__ __volatile__ ( \ @@ -407,7 +407,7 @@ do { \ : "r" (value), "r" (addr), "i" (-EFAULT)); \ } while(0) -#else /* !CONFIG_CPU_HAS_LOAD_STORE_LR */ +#else /* CONFIG_CPU_NO_LOAD_STORE_LR */ #define _StoreW(addr, value, res, type) \ do { \ __asm__ __volatile__ ( \ @@ -483,7 +483,7 @@ do { \ : "memory"); \ } while(0) -#endif /* !CONFIG_CPU_HAS_LOAD_STORE_LR */ +#endif /* CONFIG_CPU_NO_LOAD_STORE_LR */ #else /* __BIG_ENDIAN */ @@ -509,7 +509,7 @@ do { \ : "r" (addr), "i" (-EFAULT)); \ } while(0) -#ifdef CONFIG_CPU_HAS_LOAD_STORE_LR +#ifndef CONFIG_CPU_NO_LOAD_STORE_LR #define _LoadW(addr, value, res, type) \ do { \ __asm__ __volatile__ ( \ @@ -530,7 +530,7 @@ do { \ : "r" (addr), "i" (-EFAULT)); \ } while(0) -#else /* !CONFIG_CPU_HAS_LOAD_STORE_LR */ +#else /* CONFIG_CPU_NO_LOAD_STORE_LR */ /* For CPUs without lwl instruction */ #define _LoadW(addr, value, res, type) \ do { \ @@ -565,7 +565,7 @@ do { \ : "r" (addr), "i" (-EFAULT)); \ } while(0) -#endif /* !CONFIG_CPU_HAS_LOAD_STORE_LR */ +#endif /* CONFIG_CPU_NO_LOAD_STORE_LR */ #define _LoadHWU(addr, value, res, type) \ @@ -592,7 +592,7 @@ do { \ : "r" (addr), "i" (-EFAULT)); \ } while(0) -#ifdef CONFIG_CPU_HAS_LOAD_STORE_LR +#ifndef CONFIG_CPU_NO_LOAD_STORE_LR #define _LoadWU(addr, value, res, type) \ do { \ __asm__ __volatile__ ( \ @@ -635,7 +635,7 @@ do { \ : "r" (addr), "i" (-EFAULT)); \ } while(0) -#else /* !CONFIG_CPU_HAS_LOAD_STORE_LR */ +#else /* CONFIG_CPU_NO_LOAD_STORE_LR */ /* For CPUs without lwl and ldl instructions */ #define _LoadWU(addr, value, res, type) \ do { \ @@ -718,7 +718,7 @@ do { \ : "=&r" (value), "=r" (res) \ : "r" (addr), "i" (-EFAULT)); \ } while(0) -#endif /* !CONFIG_CPU_HAS_LOAD_STORE_LR */ +#endif /* CONFIG_CPU_NO_LOAD_STORE_LR */ #define _StoreHW(addr, value, res, type) \ do { \ @@ -743,7 +743,7 @@ do { \ : "r" (value), "r" (addr), "i" (-EFAULT));\ } while(0) -#ifdef CONFIG_CPU_HAS_LOAD_STORE_LR +#ifndef CONFIG_CPU_NO_LOAD_STORE_LR #define _StoreW(addr, value, res, type) \ do { \ __asm__ __volatile__ ( \ @@ -784,7 +784,7 @@ do { \ : "r" (value), "r" (addr), "i" (-EFAULT)); \ } while(0) -#else /* !CONFIG_CPU_HAS_LOAD_STORE_LR */ +#else /* CONFIG_CPU_NO_LOAD_STORE_LR */ /* For CPUs without swl and sdl instructions */ #define _StoreW(addr, value, res, type) \ do { \ @@ -861,7 +861,7 @@ do { \ : "memory"); \ } while(0) -#endif /* !CONFIG_CPU_HAS_LOAD_STORE_LR */ +#endif /* CONFIG_CPU_NO_LOAD_STORE_LR */ #endif #define LoadHWU(addr, value, res) _LoadHWU(addr, value, res, kernel) diff --git a/arch/mips/kernel/vdso.c b/arch/mips/kernel/vdso.c index 3a372686ffca..bc35f8499111 100644 --- a/arch/mips/kernel/vdso.c +++ b/arch/mips/kernel/vdso.c @@ -20,9 +20,12 @@ #include <asm/mips-cps.h> #include <asm/page.h> #include <asm/vdso.h> +#include <vdso/helpers.h> +#include <vdso/vsyscall.h> /* Kernel-provided data used by the VDSO. */ -static union mips_vdso_data vdso_data __page_aligned_data; +static union mips_vdso_data mips_vdso_data __page_aligned_data; +struct vdso_data *vdso_data = mips_vdso_data.data; /* * Mapping for the VDSO data/GIC pages. The real pages are mapped manually, as @@ -66,34 +69,6 @@ static int __init init_vdso(void) } subsys_initcall(init_vdso); -void update_vsyscall(struct timekeeper *tk) -{ - vdso_data_write_begin(&vdso_data); - - vdso_data.xtime_sec = tk->xtime_sec; - vdso_data.xtime_nsec = tk->tkr_mono.xtime_nsec; - vdso_data.wall_to_mono_sec = tk->wall_to_monotonic.tv_sec; - vdso_data.wall_to_mono_nsec = tk->wall_to_monotonic.tv_nsec; - vdso_data.cs_shift = tk->tkr_mono.shift; - - vdso_data.clock_mode = tk->tkr_mono.clock->archdata.vdso_clock_mode; - if (vdso_data.clock_mode != VDSO_CLOCK_NONE) { - vdso_data.cs_mult = tk->tkr_mono.mult; - vdso_data.cs_cycle_last = tk->tkr_mono.cycle_last; - vdso_data.cs_mask = tk->tkr_mono.mask; - } - - vdso_data_write_end(&vdso_data); -} - -void update_vsyscall_tz(void) -{ - if (vdso_data.clock_mode != VDSO_CLOCK_NONE) { - vdso_data.tz_minuteswest = sys_tz.tz_minuteswest; - vdso_data.tz_dsttime = sys_tz.tz_dsttime; - } -} - static unsigned long vdso_base(void) { unsigned long base; @@ -163,7 +138,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) */ if (cpu_has_dc_aliases) { base = __ALIGN_MASK(base, shm_align_mask); - base += ((unsigned long)&vdso_data - gic_size) & shm_align_mask; + base += ((unsigned long)vdso_data - gic_size) & shm_align_mask; } data_addr = base + gic_size; @@ -189,7 +164,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) /* Map data page. */ ret = remap_pfn_range(vma, data_addr, - virt_to_phys(&vdso_data) >> PAGE_SHIFT, + virt_to_phys(vdso_data) >> PAGE_SHIFT, PAGE_SIZE, PAGE_READONLY); if (ret) goto out; diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S index 33ee0d18fb0a..a5f00ec73ea6 100644 --- a/arch/mips/kernel/vmlinux.lds.S +++ b/arch/mips/kernel/vmlinux.lds.S @@ -10,6 +10,11 @@ */ #define BSS_FIRST_SECTIONS *(.bss..swapper_pg_dir) +/* Cavium Octeon should not have a separate PT_NOTE Program Header. */ +#ifndef CONFIG_CAVIUM_OCTEON_SOC +#define EMITS_PT_NOTE +#endif + #include <asm-generic/vmlinux.lds.h> #undef mips @@ -76,16 +81,8 @@ SECTIONS __stop___dbe_table = .; } -#ifdef CONFIG_CAVIUM_OCTEON_SOC -#define NOTES_HEADER -#else /* CONFIG_CAVIUM_OCTEON_SOC */ -#define NOTES_HEADER :note -#endif /* CONFIG_CAVIUM_OCTEON_SOC */ - NOTES :text NOTES_HEADER - .dummy : { *(.dummy) } :text - _sdata = .; /* Start of data section */ - RODATA + RO_DATA(4096) /* writeable */ .data : { /* Data */ |