From 9a244b95ddb62a17b62f4b061b6e13ca4d177942 Mon Sep 17 00:00:00 2001 From: Ralf Baechle Date: Wed, 11 Oct 2006 19:30:03 +0100 Subject: [MIPS] Pass NULL not 0 for pointer value. Signed-off-by: Ralf Baechle --- arch/mips/kernel/smp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/mips/kernel') diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index 1af3612a1ce8..db80957ada89 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c @@ -310,7 +310,7 @@ static void flush_tlb_all_ipi(void *info) void flush_tlb_all(void) { - on_each_cpu(flush_tlb_all_ipi, 0, 1, 1); + on_each_cpu(flush_tlb_all_ipi, NULL, 1, 1); } static void flush_tlb_mm_ipi(void *mm) -- cgit v1.2.1 From 55b7428303d390c53d3a1bc587de8592ce65900e Mon Sep 17 00:00:00 2001 From: Franck Bui-Huu Date: Fri, 13 Oct 2006 13:37:35 +0200 Subject: [MIPS] Use kallsyms_lookup_size_offset() instead of kallsyms_lookup() This new routine doesn't lookup for symbol names. So we needn't to pass any char buffers or pointer since we don't care about names. Signed-off-by: Franck Bui-Huu Signed-off-by: Ralf Baechle --- arch/mips/kernel/process.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) (limited to 'arch/mips/kernel') diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index 9f307eb1a31e..ec8209f3a0c6 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c @@ -358,10 +358,8 @@ static int __init frame_info_init(void) unsigned long size = 0; #ifdef CONFIG_KALLSYMS unsigned long ofs; - char *modname; - char namebuf[KSYM_NAME_LEN + 1]; - kallsyms_lookup((unsigned long)schedule, &size, &ofs, &modname, namebuf); + kallsyms_lookup_size_offset((unsigned long)schedule, &size, &ofs); #endif schedule_mfi.func = schedule; schedule_mfi.func_size = size; @@ -403,8 +401,6 @@ unsigned long unwind_stack(struct task_struct *task, unsigned long *sp, { unsigned long stack_page; struct mips_frame_info info; - char *modname; - char namebuf[KSYM_NAME_LEN + 1]; unsigned long size, ofs; int leaf; extern void ret_from_irq(void); @@ -433,7 +429,7 @@ unsigned long unwind_stack(struct task_struct *task, unsigned long *sp, } return 0; } - if (!kallsyms_lookup(pc, &size, &ofs, &modname, namebuf)) + if (!kallsyms_lookup_size_offset(pc, &size, &ofs)) return 0; /* * Return ra if an exception occured at the first instruction -- cgit v1.2.1 From 53571ce47010562f5e67782ea00206f379a5cd65 Mon Sep 17 00:00:00 2001 From: Thiemo Seufer Date: Sun, 13 Aug 2006 00:53:29 +0100 Subject: [MIPS] Fix O32 personality(2) call with 0xffffffff argument. A sign extension bug did result in sys_personality being invoked with a 0xffffffffffffffffUL argument, so querying the current personality didn't work. Signed-off-by: Thiemo Seufer Signed-off-by: Ralf Baechle --- arch/mips/kernel/linux32.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'arch/mips/kernel') diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c index 53f4171fc188..7a3ebbeba1f3 100644 --- a/arch/mips/kernel/linux32.c +++ b/arch/mips/kernel/linux32.c @@ -1055,7 +1055,9 @@ asmlinkage long sys32_newuname(struct new_utsname __user * name) asmlinkage int sys32_personality(unsigned long personality) { int ret; - if (current->personality == PER_LINUX32 && personality == PER_LINUX) + personality &= 0xffffffff; + if (personality(current->personality) == PER_LINUX32 && + personality == PER_LINUX) personality = PER_LINUX32; ret = sys_personality(personality); if (ret == PER_LINUX32) -- cgit v1.2.1 From 089c7e7f2da7c3245de47377252683bd9edae738 Mon Sep 17 00:00:00 2001 From: Ralf Baechle Date: Mon, 16 Oct 2006 16:49:37 +0100 Subject: [MIPS] Use compat_sys_mount. This fixes mount problems with smbfs, ncpfs and NFSv4. Signed-off-by: Ralf Baechle --- arch/mips/kernel/scall64-n32.S | 2 +- arch/mips/kernel/scall64-o32.S | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/mips/kernel') diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S index 6d9f18727ac5..8c453f8ffea6 100644 --- a/arch/mips/kernel/scall64-n32.S +++ b/arch/mips/kernel/scall64-n32.S @@ -280,7 +280,7 @@ EXPORT(sysn32_call_table) PTR sys_sync PTR sys_acct PTR sys32_settimeofday - PTR sys_mount /* 6160 */ + PTR compat_sys_mount /* 6160 */ PTR sys_umount PTR sys_swapon PTR sys_swapoff diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S index 2e6d0673163e..d105917d6d93 100644 --- a/arch/mips/kernel/scall64-o32.S +++ b/arch/mips/kernel/scall64-o32.S @@ -226,7 +226,7 @@ sys_call_table: PTR sys_ni_syscall /* was sys_stat */ PTR sys_lseek PTR sys_getpid /* 4020 */ - PTR sys_mount + PTR compat_sys_mount PTR sys_oldumount PTR sys_setuid PTR sys_getuid -- cgit v1.2.1 From eea32d4c6e272b6c324c8c22df4c28274fcb5a21 Mon Sep 17 00:00:00 2001 From: Atsushi Nemoto Date: Mon, 16 Oct 2006 22:48:49 +0900 Subject: [MIPS] save_context_stack fix CONFIG_KALLSYMS=n case is obviously wrong, though it is harmless since CONFIG_KALLSYMS is always enabled with CONFIG_STACKTRACE for now. Signed-off-by: Atsushi Nemoto Signed-off-by: Ralf Baechle --- arch/mips/kernel/stacktrace.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/mips/kernel') diff --git a/arch/mips/kernel/stacktrace.c b/arch/mips/kernel/stacktrace.c index 4aabe526a68e..a586aba337a7 100644 --- a/arch/mips/kernel/stacktrace.c +++ b/arch/mips/kernel/stacktrace.c @@ -57,7 +57,7 @@ static void save_context_stack(struct stack_trace *trace, pc = unwind_stack(task, &sp, pc, &ra); } while (pc); #else - save_raw_context_stack(sp); + save_raw_context_stack(trace, sp); #endif } -- cgit v1.2.1 From d2bcf87d0fcdc10d1be65b03fd032bec05efe49f Mon Sep 17 00:00:00 2001 From: Ralf Baechle Date: Wed, 18 Oct 2006 23:52:17 +0100 Subject: [MIPS] Reserve syscall numbers for kexec_load. Signed-off-by: Ralf Baechle --- arch/mips/kernel/scall32-o32.S | 3 ++- arch/mips/kernel/scall64-64.S | 1 + arch/mips/kernel/scall64-n32.S | 1 + arch/mips/kernel/scall64-o32.S | 1 + 4 files changed, 5 insertions(+), 1 deletion(-) (limited to 'arch/mips/kernel') diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S index 61362e6fa9ec..720fac3435d5 100644 --- a/arch/mips/kernel/scall32-o32.S +++ b/arch/mips/kernel/scall32-o32.S @@ -652,7 +652,8 @@ einval: li v0, -EINVAL sys sys_vmsplice 4 sys sys_move_pages 6 sys sys_set_robust_list 2 - sys sys_get_robust_list 3 + sys sys_get_robust_list 3 /* 4310 */ + sys sys_ni_syscall 0 .endm /* We pre-compute the number of _instruction_ bytes needed to diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S index 6c7b5ed0ea6e..3a34f62c8b1b 100644 --- a/arch/mips/kernel/scall64-64.S +++ b/arch/mips/kernel/scall64-64.S @@ -468,3 +468,4 @@ sys_call_table: PTR sys_move_pages PTR sys_set_robust_list PTR sys_get_robust_list + PTR sys_ni_syscall /* 5270 */ diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S index 8c453f8ffea6..67b92a1d6c72 100644 --- a/arch/mips/kernel/scall64-n32.S +++ b/arch/mips/kernel/scall64-n32.S @@ -394,3 +394,4 @@ EXPORT(sysn32_call_table) PTR sys_move_pages PTR compat_sys_set_robust_list PTR compat_sys_get_robust_list + PTR sys_ni_syscall diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S index d105917d6d93..2875c4a3fa58 100644 --- a/arch/mips/kernel/scall64-o32.S +++ b/arch/mips/kernel/scall64-o32.S @@ -516,4 +516,5 @@ sys_call_table: PTR compat_sys_move_pages PTR compat_sys_set_robust_list PTR compat_sys_get_robust_list /* 4310 */ + PTR sys_ni_syscall .size sys_call_table,.-sys_call_table -- cgit v1.2.1 From 61ce1efe6e40233663d27ab8ac9ba9710eebcaad Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Fri, 27 Oct 2006 11:41:44 -0700 Subject: [PATCH] vmlinux.lds: consolidate initcall sections Add a vmlinux.lds.h helper macro for defining the eight-level initcall table, teach all the architectures to use it. This is a prerequisite for a patch which performs initcall synchronisation for multithreaded-probing. Cc: Greg KH Signed-off-by: Andrew Morton [ Added AVR32 as well ] Signed-off-by: Haavard Skinnemoen Signed-off-by: Linus Torvalds --- arch/mips/kernel/vmlinux.lds.S | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) (limited to 'arch/mips/kernel') diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S index 0bb9cd889456..25ed3337ce35 100644 --- a/arch/mips/kernel/vmlinux.lds.S +++ b/arch/mips/kernel/vmlinux.lds.S @@ -91,13 +91,7 @@ SECTIONS __initcall_start = .; .initcall.init : { - *(.initcall1.init) - *(.initcall2.init) - *(.initcall3.init) - *(.initcall4.init) - *(.initcall5.init) - *(.initcall6.init) - *(.initcall7.init) + INITCALLS } __initcall_end = .; -- cgit v1.2.1 From 991ea26dcbc2524a054f37911ea375e631cb8891 Mon Sep 17 00:00:00 2001 From: Ralf Baechle Date: Sun, 29 Oct 2006 21:07:40 +0000 Subject: [MIPS] Wire up getcpu(2) and epoll_wait(2) syscalls. Signed-off-by: Ralf Baechle --- arch/mips/kernel/scall32-o32.S | 2 ++ arch/mips/kernel/scall64-64.S | 2 ++ arch/mips/kernel/scall64-n32.S | 2 ++ arch/mips/kernel/scall64-o32.S | 2 ++ 4 files changed, 8 insertions(+) (limited to 'arch/mips/kernel') diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S index 720fac3435d5..a95f37de080e 100644 --- a/arch/mips/kernel/scall32-o32.S +++ b/arch/mips/kernel/scall32-o32.S @@ -654,6 +654,8 @@ einval: li v0, -EINVAL sys sys_set_robust_list 2 sys sys_get_robust_list 3 /* 4310 */ sys sys_ni_syscall 0 + sys sys_getcpu 3 + sys sys_epoll_pwait 6 .endm /* We pre-compute the number of _instruction_ bytes needed to diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S index 3a34f62c8b1b..8fb0f60f657b 100644 --- a/arch/mips/kernel/scall64-64.S +++ b/arch/mips/kernel/scall64-64.S @@ -469,3 +469,5 @@ sys_call_table: PTR sys_set_robust_list PTR sys_get_robust_list PTR sys_ni_syscall /* 5270 */ + PTR sys_getcpu + PTR sys_epoll_pwait diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S index 67b92a1d6c72..0da5ca2040ff 100644 --- a/arch/mips/kernel/scall64-n32.S +++ b/arch/mips/kernel/scall64-n32.S @@ -395,3 +395,5 @@ EXPORT(sysn32_call_table) PTR compat_sys_set_robust_list PTR compat_sys_get_robust_list PTR sys_ni_syscall + PTR sys_getcpu + PTR sys_epoll_pwait diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S index 2875c4a3fa58..b9d00cae8b5f 100644 --- a/arch/mips/kernel/scall64-o32.S +++ b/arch/mips/kernel/scall64-o32.S @@ -517,4 +517,6 @@ sys_call_table: PTR compat_sys_set_robust_list PTR compat_sys_get_robust_list /* 4310 */ PTR sys_ni_syscall + PTR sys_getcpu + PTR sys_epoll_pwait .size sys_call_table,.-sys_call_table -- cgit v1.2.1 From 3ab0f40f333007eb31dc1e08f578ec224c7d71c2 Mon Sep 17 00:00:00 2001 From: Yoichi Yuasa Date: Tue, 31 Oct 2006 13:44:38 +0900 Subject: [MIPS] Fix warning of printk format in mips_srs_init() arch/mips/kernel/traps.c:1115: warning: int format, long unsigned int arg (arg 2) Signed-off-by: Yoichi Yuasa Signed-off-by: Ralf Baechle --- arch/mips/kernel/traps.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/mips/kernel') diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index cce8313ec27d..9fda1b8be3a7 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -1111,7 +1111,7 @@ static struct shadow_registers { static void mips_srs_init(void) { shadow_registers.sr_supported = ((read_c0_srsctl() >> 26) & 0x0f) + 1; - printk(KERN_INFO "%d MIPSR2 register sets available\n", + printk(KERN_INFO "%ld MIPSR2 register sets available\n", shadow_registers.sr_supported); shadow_registers.sr_allocated = 1; /* Set 0 used by kernel */ } -- cgit v1.2.1 From 781b0f8d4f9c90137ea32771346ab49f0e5319b3 Mon Sep 17 00:00:00 2001 From: Ralf Baechle Date: Tue, 31 Oct 2006 18:25:10 +0000 Subject: [MIPS] VSMP: Fix initialization ordering bug. Signed-off-by: Ralf Baechle --- arch/mips/kernel/smp-mt.c | 152 +++++++++++++++++++++++++--------------------- 1 file changed, 83 insertions(+), 69 deletions(-) (limited to 'arch/mips/kernel') diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c index 3b5f3b632622..06b29fa73f56 100644 --- a/arch/mips/kernel/smp-mt.c +++ b/arch/mips/kernel/smp-mt.c @@ -140,15 +140,88 @@ static struct irqaction irq_call = { .name = "IPI_call" }; +static void __init smp_copy_vpe_config(void) +{ + write_vpe_c0_status( + (read_c0_status() & ~(ST0_IM | ST0_IE | ST0_KSU)) | ST0_CU0); + + /* set config to be the same as vpe0, particularly kseg0 coherency alg */ + write_vpe_c0_config( read_c0_config()); + + /* make sure there are no software interrupts pending */ + write_vpe_c0_cause(0); + + /* Propagate Config7 */ + write_vpe_c0_config7(read_c0_config7()); +} + +static unsigned int __init smp_vpe_init(unsigned int tc, unsigned int mvpconf0, + unsigned int ncpu) +{ + if (tc > ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT)) + return ncpu; + + /* Deactivate all but VPE 0 */ + if (tc != 0) { + unsigned long tmp = read_vpe_c0_vpeconf0(); + + tmp &= ~VPECONF0_VPA; + + /* master VPE */ + tmp |= VPECONF0_MVP; + write_vpe_c0_vpeconf0(tmp); + + /* Record this as available CPU */ + cpu_set(tc, phys_cpu_present_map); + __cpu_number_map[tc] = ++ncpu; + __cpu_logical_map[ncpu] = tc; + } + + /* Disable multi-threading with TC's */ + write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() & ~VPECONTROL_TE); + + if (tc != 0) + smp_copy_vpe_config(); + + return ncpu; +} + +static void __init smp_tc_init(unsigned int tc, unsigned int mvpconf0) +{ + unsigned long tmp; + + if (!tc) + return; + + /* bind a TC to each VPE, May as well put all excess TC's + on the last VPE */ + if (tc >= (((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT)+1)) + write_tc_c0_tcbind(read_tc_c0_tcbind() | ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT)); + else { + write_tc_c0_tcbind(read_tc_c0_tcbind() | tc); + + /* and set XTC */ + write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | (tc << VPECONF0_XTC_SHIFT)); + } + + tmp = read_tc_c0_tcstatus(); + + /* mark not allocated and not dynamically allocatable */ + tmp &= ~(TCSTATUS_A | TCSTATUS_DA); + tmp |= TCSTATUS_IXMT; /* interrupt exempt */ + write_tc_c0_tcstatus(tmp); + + write_tc_c0_tchalt(TCHALT_H); +} + /* * Common setup before any secondaries are started * Make sure all CPU's are in a sensible state before we boot any of the * secondarys */ -void plat_smp_setup(void) +void __init plat_smp_setup(void) { - unsigned long val; - int i, num; + unsigned int mvpconf0, ntc, tc, ncpu = 0; #ifdef CONFIG_MIPS_MT_FPAFF /* If we have an FPU, enroll ourselves in the FPU-full mask */ @@ -167,75 +240,16 @@ void plat_smp_setup(void) /* Put MVPE's into 'configuration state' */ set_c0_mvpcontrol(MVPCONTROL_VPC); - val = read_c0_mvpconf0(); + mvpconf0 = read_c0_mvpconf0(); + ntc = (mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT; /* we'll always have more TC's than VPE's, so loop setting everything to a sensible state */ - for (i = 0, num = 0; i <= ((val & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT); i++) { - settc(i); - - /* VPE's */ - if (i <= ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT)) { - - /* deactivate all but vpe0 */ - if (i != 0) { - unsigned long tmp = read_vpe_c0_vpeconf0(); - - tmp &= ~VPECONF0_VPA; - - /* master VPE */ - tmp |= VPECONF0_MVP; - write_vpe_c0_vpeconf0(tmp); - - /* Record this as available CPU */ - cpu_set(i, phys_cpu_present_map); - __cpu_number_map[i] = ++num; - __cpu_logical_map[num] = i; - } - - /* disable multi-threading with TC's */ - write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() & ~VPECONTROL_TE); - - if (i != 0) { - write_vpe_c0_status((read_c0_status() & ~(ST0_IM | ST0_IE | ST0_KSU)) | ST0_CU0); + for (tc = 0; tc <= ntc; tc++) { + settc(tc); - /* set config to be the same as vpe0, particularly kseg0 coherency alg */ - write_vpe_c0_config( read_c0_config()); - - /* make sure there are no software interrupts pending */ - write_vpe_c0_cause(0); - - /* Propagate Config7 */ - write_vpe_c0_config7(read_c0_config7()); - } - - } - - /* TC's */ - - if (i != 0) { - unsigned long tmp; - - /* bind a TC to each VPE, May as well put all excess TC's - on the last VPE */ - if ( i >= (((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT)+1) ) - write_tc_c0_tcbind(read_tc_c0_tcbind() | ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) ); - else { - write_tc_c0_tcbind( read_tc_c0_tcbind() | i); - - /* and set XTC */ - write_vpe_c0_vpeconf0( read_vpe_c0_vpeconf0() | (i << VPECONF0_XTC_SHIFT)); - } - - tmp = read_tc_c0_tcstatus(); - - /* mark not allocated and not dynamically allocatable */ - tmp &= ~(TCSTATUS_A | TCSTATUS_DA); - tmp |= TCSTATUS_IXMT; /* interrupt exempt */ - write_tc_c0_tcstatus(tmp); - - write_tc_c0_tchalt(TCHALT_H); - } + smp_tc_init(tc, mvpconf0); + ncpu = smp_vpe_init(tc, mvpconf0, ncpu); } /* Release config state */ @@ -243,7 +257,7 @@ void plat_smp_setup(void) /* We'll wait until starting the secondaries before starting MVPE */ - printk(KERN_INFO "Detected %i available secondary CPU(s)\n", num); + printk(KERN_INFO "Detected %i available secondary CPU(s)\n", ncpu); } void __init plat_prepare_cpus(unsigned int max_cpus) -- cgit v1.2.1 From 70e46f48cb5933119712ee27945309a4bfc98282 Mon Sep 17 00:00:00 2001 From: Ralf Baechle Date: Tue, 31 Oct 2006 18:33:09 +0000 Subject: [MIPS] VSMP: Synchronize cp0 counters on bootup. Signed-off-by: Ralf Baechle --- arch/mips/kernel/smp-mt.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/mips/kernel') diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c index 06b29fa73f56..2ac19a6cbf68 100644 --- a/arch/mips/kernel/smp-mt.c +++ b/arch/mips/kernel/smp-mt.c @@ -153,6 +153,8 @@ static void __init smp_copy_vpe_config(void) /* Propagate Config7 */ write_vpe_c0_config7(read_c0_config7()); + + write_vpe_c0_count(read_c0_count()); } static unsigned int __init smp_vpe_init(unsigned int tc, unsigned int mvpconf0, -- cgit v1.2.1 From 16b7b2ac0148e839da86af8747b6fa4aad43a9b7 Mon Sep 17 00:00:00 2001 From: Atsushi Nemoto Date: Tue, 24 Oct 2006 00:21:27 +0900 Subject: [MIPS] Fixup migration to GENERIC_TIME Since we already moved to GENERIC_TIME, we should implement alternatives of old do_gettimeoffset routines to get sub-jiffies resolution from gettimeofday(). This patch includes: * MIPS clocksource support (based on works by Manish Lachwani). * remove unused gettimeoffset routines and related codes. * remove unised 64bit do_div64_32(). * simplify mips_hpt_init. (no argument needed, __init tag) * simplify c0_hpt_timer_init. (no need to write to c0_count) * remove some hpt_init routines. * mips_hpt_mask variable to specify bitmask of hpt value. * convert jmr3927_do_gettimeoffset to jmr3927_hpt_read. * convert ip27_do_gettimeoffset to ip27_hpt_read. * convert bcm1480_do_gettimeoffset to bcm1480_hpt_read. * simplify sb1250 hpt functions. (no need to subtract and shift) Signed-off-by: Atsushi Nemoto Signed-off-by: Ralf Baechle --- arch/mips/kernel/time.c | 319 ++++++++---------------------------------------- 1 file changed, 53 insertions(+), 266 deletions(-) (limited to 'arch/mips/kernel') diff --git a/arch/mips/kernel/time.c b/arch/mips/kernel/time.c index debe86c2f691..e535f86efa2f 100644 --- a/arch/mips/kernel/time.c +++ b/arch/mips/kernel/time.c @@ -11,6 +11,7 @@ * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ +#include #include #include #include @@ -67,15 +68,9 @@ int (*rtc_mips_set_time)(unsigned long) = null_rtc_set_time; int (*rtc_mips_set_mmss)(unsigned long); -/* usecs per counter cycle, shifted to left by 32 bits */ -static unsigned int sll32_usecs_per_cycle; - /* how many counter cycles in a jiffy */ static unsigned long cycles_per_jiffy __read_mostly; -/* Cycle counter value at the previous timer interrupt.. */ -static unsigned int timerhi, timerlo; - /* expirelo is the count value for next CPU timer interrupt */ static unsigned int expirelo; @@ -93,7 +88,7 @@ static unsigned int null_hpt_read(void) return 0; } -static void null_hpt_init(unsigned int count) +static void __init null_hpt_init(void) { /* nothing */ } @@ -128,186 +123,18 @@ static unsigned int c0_hpt_read(void) return read_c0_count(); } -/* For use solely as a high precision timer. */ -static void c0_hpt_init(unsigned int count) -{ - write_c0_count(read_c0_count() - count); -} - /* For use both as a high precision timer and an interrupt source. */ -static void c0_hpt_timer_init(unsigned int count) +static void __init c0_hpt_timer_init(void) { - count = read_c0_count() - count; - expirelo = (count / cycles_per_jiffy + 1) * cycles_per_jiffy; - write_c0_count(expirelo - cycles_per_jiffy); + expirelo = read_c0_count() + cycles_per_jiffy; write_c0_compare(expirelo); - write_c0_count(count); } int (*mips_timer_state)(void); void (*mips_timer_ack)(void); unsigned int (*mips_hpt_read)(void); -void (*mips_hpt_init)(unsigned int); - -/* - * Gettimeoffset routines. These routines returns the time duration - * since last timer interrupt in usecs. - * - * If the exact CPU counter frequency is known, use fixed_rate_gettimeoffset. - * Otherwise use calibrate_gettimeoffset() - * - * If the CPU does not have the counter register, you can either supply - * your own gettimeoffset() routine, or use null_gettimeoffset(), which - * gives the same resolution as HZ. - */ - -static unsigned long null_gettimeoffset(void) -{ - return 0; -} - - -/* The function pointer to one of the gettimeoffset funcs. */ -unsigned long (*do_gettimeoffset)(void) = null_gettimeoffset; - - -static unsigned long fixed_rate_gettimeoffset(void) -{ - u32 count; - unsigned long res; - - /* Get last timer tick in absolute kernel time */ - count = mips_hpt_read(); - - /* .. relative to previous jiffy (32 bits is enough) */ - count -= timerlo; - - __asm__("multu %1,%2" - : "=h" (res) - : "r" (count), "r" (sll32_usecs_per_cycle) - : "lo", GCC_REG_ACCUM); - - /* - * Due to possible jiffies inconsistencies, we need to check - * the result so that we'll get a timer that is monotonic. - */ - if (res >= USECS_PER_JIFFY) - res = USECS_PER_JIFFY - 1; - - return res; -} - - -/* - * Cached "1/(clocks per usec) * 2^32" value. - * It has to be recalculated once each jiffy. - */ -static unsigned long cached_quotient; - -/* Last jiffy when calibrate_divXX_gettimeoffset() was called. */ -static unsigned long last_jiffies; - -/* - * This is moved from dec/time.c:do_ioasic_gettimeoffset() by Maciej. - */ -static unsigned long calibrate_div32_gettimeoffset(void) -{ - u32 count; - unsigned long res, tmp; - unsigned long quotient; - - tmp = jiffies; - - quotient = cached_quotient; - - if (last_jiffies != tmp) { - last_jiffies = tmp; - if (last_jiffies != 0) { - unsigned long r0; - do_div64_32(r0, timerhi, timerlo, tmp); - do_div64_32(quotient, USECS_PER_JIFFY, - USECS_PER_JIFFY_FRAC, r0); - cached_quotient = quotient; - } - } - - /* Get last timer tick in absolute kernel time */ - count = mips_hpt_read(); - - /* .. relative to previous jiffy (32 bits is enough) */ - count -= timerlo; - - __asm__("multu %1,%2" - : "=h" (res) - : "r" (count), "r" (quotient) - : "lo", GCC_REG_ACCUM); - - /* - * Due to possible jiffies inconsistencies, we need to check - * the result so that we'll get a timer that is monotonic. - */ - if (res >= USECS_PER_JIFFY) - res = USECS_PER_JIFFY - 1; - - return res; -} - -static unsigned long calibrate_div64_gettimeoffset(void) -{ - u32 count; - unsigned long res, tmp; - unsigned long quotient; - - tmp = jiffies; - - quotient = cached_quotient; - - if (last_jiffies != tmp) { - last_jiffies = tmp; - if (last_jiffies) { - unsigned long r0; - __asm__(".set push\n\t" - ".set mips3\n\t" - "lwu %0,%3\n\t" - "dsll32 %1,%2,0\n\t" - "or %1,%1,%0\n\t" - "ddivu $0,%1,%4\n\t" - "mflo %1\n\t" - "dsll32 %0,%5,0\n\t" - "or %0,%0,%6\n\t" - "ddivu $0,%0,%1\n\t" - "mflo %0\n\t" - ".set pop" - : "=&r" (quotient), "=&r" (r0) - : "r" (timerhi), "m" (timerlo), - "r" (tmp), "r" (USECS_PER_JIFFY), - "r" (USECS_PER_JIFFY_FRAC) - : "hi", "lo", GCC_REG_ACCUM); - cached_quotient = quotient; - } - } - - /* Get last timer tick in absolute kernel time */ - count = mips_hpt_read(); - - /* .. relative to previous jiffy (32 bits is enough) */ - count -= timerlo; - - __asm__("multu %1,%2" - : "=h" (res) - : "r" (count), "r" (quotient) - : "lo", GCC_REG_ACCUM); - - /* - * Due to possible jiffies inconsistencies, we need to check - * the result so that we'll get a timer that is monotonic. - */ - if (res >= USECS_PER_JIFFY) - res = USECS_PER_JIFFY - 1; - - return res; -} - +void (*mips_hpt_init)(void) __initdata = null_hpt_init; +unsigned int mips_hpt_mask = 0xffffffff; /* last time when xtime and rtc are sync'ed up */ static long last_rtc_update; @@ -334,18 +161,10 @@ void local_timer_interrupt(int irq, void *dev_id) */ irqreturn_t timer_interrupt(int irq, void *dev_id) { - unsigned long j; - unsigned int count; - write_seqlock(&xtime_lock); - count = mips_hpt_read(); mips_timer_ack(); - /* Update timerhi/timerlo for intra-jiffy calibration. */ - timerhi += count < timerlo; /* Wrap around */ - timerlo = count; - /* * call the generic timer interrupt handling */ @@ -368,47 +187,6 @@ irqreturn_t timer_interrupt(int irq, void *dev_id) } } - /* - * If jiffies has overflown in this timer_interrupt, we must - * update the timer[hi]/[lo] to make fast gettimeoffset funcs - * quotient calc still valid. -arca - * - * The first timer interrupt comes late as interrupts are - * enabled long after timers are initialized. Therefore the - * high precision timer is fast, leading to wrong gettimeoffset() - * calculations. We deal with it by setting it based on the - * number of its ticks between the second and the third interrupt. - * That is still somewhat imprecise, but it's a good estimate. - * --macro - */ - j = jiffies; - if (j < 4) { - static unsigned int prev_count; - static int hpt_initialized; - - switch (j) { - case 0: - timerhi = timerlo = 0; - mips_hpt_init(count); - break; - case 2: - prev_count = count; - break; - case 3: - if (!hpt_initialized) { - unsigned int c3 = 3 * (count - prev_count); - - timerhi = 0; - timerlo = c3; - mips_hpt_init(count - c3); - hpt_initialized = 1; - } - break; - default: - break; - } - } - write_sequnlock(&xtime_lock); /* @@ -476,12 +254,11 @@ asmlinkage void ll_local_timer_interrupt(int irq) * 1) board_time_init() - * a) (optional) set up RTC routines, * b) (optional) calibrate and set the mips_hpt_frequency - * (only needed if you intended to use fixed_rate_gettimeoffset - * or use cpu counter as timer interrupt source) + * (only needed if you intended to use cpu counter as timer interrupt + * source) * 2) setup xtime based on rtc_mips_get_time(). - * 3) choose a appropriate gettimeoffset routine. - * 4) calculate a couple of cached variables for later usage - * 5) plat_timer_setup() - + * 3) calculate a couple of cached variables for later usage + * 4) plat_timer_setup() - * a) (optional) over-write any choices made above by time_init(). * b) machine specific code should setup the timer irqaction. * c) enable the timer interrupt @@ -533,13 +310,48 @@ static unsigned int __init calibrate_hpt(void) } while (--i); hpt_end = mips_hpt_read(); - hpt_count = hpt_end - hpt_start; + hpt_count = (hpt_end - hpt_start) & mips_hpt_mask; hz = HZ; frequency = (u64)hpt_count * (u64)hz; return frequency >> log_2_loops; } +static cycle_t read_mips_hpt(void) +{ + return (cycle_t)mips_hpt_read(); +} + +static struct clocksource clocksource_mips = { + .name = "MIPS", + .read = read_mips_hpt, + .is_continuous = 1, +}; + +static void __init init_mips_clocksource(void) +{ + u64 temp; + u32 shift; + + if (!mips_hpt_frequency || mips_hpt_read == null_hpt_read) + return; + + /* Calclate a somewhat reasonable rating value */ + clocksource_mips.rating = 200 + mips_hpt_frequency / 10000000; + /* Find a shift value */ + for (shift = 32; shift > 0; shift--) { + temp = (u64) NSEC_PER_SEC << shift; + do_div(temp, mips_hpt_frequency); + if ((temp >> 32) == 0) + break; + } + clocksource_mips.shift = shift; + clocksource_mips.mult = (u32)temp; + clocksource_mips.mask = mips_hpt_mask; + + clocksource_register(&clocksource_mips); +} + void __init time_init(void) { if (board_time_init) @@ -555,41 +367,21 @@ void __init time_init(void) -xtime.tv_sec, -xtime.tv_nsec); /* Choose appropriate high precision timer routines. */ - if (!cpu_has_counter && !mips_hpt_read) { + if (!cpu_has_counter && !mips_hpt_read) /* No high precision timer -- sorry. */ mips_hpt_read = null_hpt_read; - mips_hpt_init = null_hpt_init; - } else if (!mips_hpt_frequency && !mips_timer_state) { + else if (!mips_hpt_frequency && !mips_timer_state) { /* A high precision timer of unknown frequency. */ - if (!mips_hpt_read) { + if (!mips_hpt_read) /* No external high precision timer -- use R4k. */ mips_hpt_read = c0_hpt_read; - mips_hpt_init = c0_hpt_init; - } - - if (cpu_has_mips32r1 || cpu_has_mips32r2 || - (current_cpu_data.isa_level == MIPS_CPU_ISA_I) || - (current_cpu_data.isa_level == MIPS_CPU_ISA_II)) - /* - * We need to calibrate the counter but we don't have - * 64-bit division. - */ - do_gettimeoffset = calibrate_div32_gettimeoffset; - else - /* - * We need to calibrate the counter but we *do* have - * 64-bit division. - */ - do_gettimeoffset = calibrate_div64_gettimeoffset; } else { /* We know counter frequency. Or we can get it. */ if (!mips_hpt_read) { /* No external high precision timer -- use R4k. */ mips_hpt_read = c0_hpt_read; - if (mips_timer_state) - mips_hpt_init = c0_hpt_init; - else { + if (!mips_timer_state) { /* No external timer interrupt -- use R4k. */ mips_hpt_init = c0_hpt_timer_init; mips_timer_ack = c0_timer_ack; @@ -598,16 +390,9 @@ void __init time_init(void) if (!mips_hpt_frequency) mips_hpt_frequency = calibrate_hpt(); - do_gettimeoffset = fixed_rate_gettimeoffset; - /* Calculate cache parameters. */ cycles_per_jiffy = (mips_hpt_frequency + HZ / 2) / HZ; - /* sll32_usecs_per_cycle = 10^6 * 2^32 / mips_counter_freq */ - do_div64_32(sll32_usecs_per_cycle, - 1000000, mips_hpt_frequency / 2, - mips_hpt_frequency); - /* Report the high precision timer rate for a reference. */ printk("Using %u.%03u MHz high precision timer.\n", ((mips_hpt_frequency + 500) / 1000) / 1000, @@ -619,7 +404,7 @@ void __init time_init(void) mips_timer_ack = null_timer_ack; /* This sets up the high precision timer for the first interrupt. */ - mips_hpt_init(mips_hpt_read()); + mips_hpt_init(); /* * Call board specific timer interrupt setup. @@ -633,6 +418,8 @@ void __init time_init(void) * is not invoked accidentally. */ plat_timer_setup(&timer_irqaction); + + init_mips_clocksource(); } #define FEBRUARY 2 -- cgit v1.2.1 From 242954b5aa8e5ec84f46a84637daf08ee4247c6e Mon Sep 17 00:00:00 2001 From: Ralf Baechle Date: Tue, 24 Oct 2006 02:29:01 +0100 Subject: [MIPS] 16K & 64K page size fixes Derived from Peter Watkins 's work. Signed-off-by: Ralf Baechle --- arch/mips/kernel/asm-offsets.c | 2 +- arch/mips/kernel/head.S | 3 ++- arch/mips/kernel/r4k_switch.S | 5 +++++ arch/mips/kernel/vmlinux.lds.S | 10 ++++++++++ 4 files changed, 18 insertions(+), 2 deletions(-) (limited to 'arch/mips/kernel') diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c index e9ce5b3721af..ff88b06f89df 100644 --- a/arch/mips/kernel/asm-offsets.c +++ b/arch/mips/kernel/asm-offsets.c @@ -22,7 +22,7 @@ #define offset(string, ptr, member) \ __asm__("\n@@@" string "%0" : : "i" (_offset(ptr, member))) #define constant(string, member) \ - __asm__("\n@@@" string "%x0" : : "ri" (member)) + __asm__("\n@@@" string "%X0" : : "ri" (member)) #define size(string, size) \ __asm__("\n@@@" string "%0" : : "i" (sizeof(size))) #define linefeed text("") diff --git a/arch/mips/kernel/head.S b/arch/mips/kernel/head.S index 8c6db0fc72f0..ddc1b71c9378 100644 --- a/arch/mips/kernel/head.S +++ b/arch/mips/kernel/head.S @@ -189,7 +189,8 @@ NESTED(kernel_entry, 16, sp) # kernel entry point MTC0 zero, CP0_CONTEXT # clear context register PTR_LA $28, init_thread_union - PTR_ADDIU sp, $28, _THREAD_SIZE - 32 + PTR_LI sp, _THREAD_SIZE - 32 + PTR_ADDU sp, $28 set_saved_sp sp, t0, t1 PTR_SUBU sp, 4 * SZREG # init stack pointer diff --git a/arch/mips/kernel/r4k_switch.S b/arch/mips/kernel/r4k_switch.S index d5c8b82fed72..cc566cf12246 100644 --- a/arch/mips/kernel/r4k_switch.S +++ b/arch/mips/kernel/r4k_switch.S @@ -85,7 +85,12 @@ move $28, a2 cpu_restore_nonscratch a1 +#if (_THREAD_SIZE - 32) < 0x10000 PTR_ADDIU t0, $28, _THREAD_SIZE - 32 +#else + PTR_LI t0, _THREAD_SIZE - 32 + PTR_ADDU t0, $28 +#endif set_saved_sp t0, t1, t2 #ifdef CONFIG_MIPS_MT_SMTC /* Read-modify-writes of Status must be atomic on a VPE */ diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S index 25ed3337ce35..79f0317d84ac 100644 --- a/arch/mips/kernel/vmlinux.lds.S +++ b/arch/mips/kernel/vmlinux.lds.S @@ -50,6 +50,16 @@ SECTIONS /* writeable */ .data : { /* Data */ . = . + DATAOFFSET; /* for CONFIG_MAPPED_KERNEL */ + /* + * This ALIGN is needed as a workaround for a bug a gcc bug upto 4.1 which + * limits the maximum alignment to at most 32kB and results in the following + * warning: + * + * CC arch/mips/kernel/init_task.o + * arch/mips/kernel/init_task.c:30: warning: alignment of ‘init_thread_union’ + * is greater than maximum object file alignment. Using 32768 + */ + . = ALIGN(_PAGE_SIZE); *(.data.init_task) *(.data) -- cgit v1.2.1 From cb56837ea5f15fa5279fd490f292134c3a92e5de Mon Sep 17 00:00:00 2001 From: Ralf Baechle Date: Tue, 31 Oct 2006 22:49:04 +0000 Subject: [MIPS] SMTC: Fix crash if # of TC's > # of VPE's after pt_regs irq cleanup. Signed-off-by: Ralf Baechle --- arch/mips/kernel/entry.S | 3 +++ arch/mips/kernel/smtc-asm.S | 7 ++++++- 2 files changed, 9 insertions(+), 1 deletion(-) (limited to 'arch/mips/kernel') diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S index 417c08ac76eb..f10b6a19f8bf 100644 --- a/arch/mips/kernel/entry.S +++ b/arch/mips/kernel/entry.S @@ -83,7 +83,10 @@ FEXPORT(syscall_exit) FEXPORT(restore_all) # restore full frame #ifdef CONFIG_MIPS_MT_SMTC /* Detect and execute deferred IPI "interrupts" */ + LONG_L s0, TI_REGS($28) + LONG_S sp, TI_REGS($28) jal deferred_smtc_ipi + LONG_S s0, TI_REGS($28) /* Re-arm any temporarily masked interrupts not explicitly "acked" */ mfc0 v0, CP0_TCSTATUS ori v1, v0, TCSTATUS_IXMT diff --git a/arch/mips/kernel/smtc-asm.S b/arch/mips/kernel/smtc-asm.S index 1cb9441f1474..921207c4a83c 100644 --- a/arch/mips/kernel/smtc-asm.S +++ b/arch/mips/kernel/smtc-asm.S @@ -101,7 +101,9 @@ FEXPORT(__smtc_ipi_vector) lw t0,PT_PADSLOT5(sp) /* Argument from sender passed in stack pad slot 4 */ lw a0,PT_PADSLOT4(sp) - PTR_LA ra, _ret_from_irq + LONG_L s0, TI_REGS($28) + LONG_S sp, TI_REGS($28) + PTR_LA ra, ret_from_irq jr t0 /* @@ -119,7 +121,10 @@ LEAF(self_ipi) subu t1,sp,PT_SIZE sw ra,PT_EPC(t1) sw a0,PT_PADSLOT4(t1) + LONG_L s0, TI_REGS($28) + LONG_S sp, TI_REGS($28) la t2,ipi_decode + LONG_S s0, TI_REGS($28) sw t2,PT_PADSLOT5(t1) /* Save pre-disable value of TCStatus */ sw t0,PT_TCSTATUS(t1) -- cgit v1.2.1 From 64c590b7a62ae1272fe4afd7b915de314591f35e Mon Sep 17 00:00:00 2001 From: Ralf Baechle Date: Wed, 1 Nov 2006 00:22:00 +0000 Subject: [MIPS] SMTC: Synchronize cp0 counters on bootup. Signed-off-by: Ralf Baechle --- arch/mips/kernel/smtc.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/mips/kernel') diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c index cc1f7474f7d7..3b78caf112f5 100644 --- a/arch/mips/kernel/smtc.c +++ b/arch/mips/kernel/smtc.c @@ -476,6 +476,7 @@ void mipsmt_prepare_cpus(void) write_vpe_c0_compare(0); /* Propagate Config7 */ write_vpe_c0_config7(read_c0_config7()); + write_vpe_c0_count(read_c0_count()); } /* enable multi-threading within VPE */ write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() | VPECONTROL_TE); -- cgit v1.2.1 From 9ba126cfbf505f4d5b39ed294cedd241321c7a91 Mon Sep 17 00:00:00 2001 From: Ralf Baechle Date: Fri, 13 Oct 2006 11:22:52 +0100 Subject: [MIPS] Fix warning about init_initrd() call if !CONFIG_BLK_DEV_INITRD. Signed-off-by: Ralf Baechle --- arch/mips/kernel/setup.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'arch/mips/kernel') diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c index fdbb508661c5..8f6e89697ccf 100644 --- a/arch/mips/kernel/setup.c +++ b/arch/mips/kernel/setup.c @@ -223,7 +223,11 @@ disable: #else /* !CONFIG_BLK_DEV_INITRD */ -#define init_initrd() 0 +static unsigned long __init init_initrd(void) +{ + return 0; +} + #define finalize_initrd() do {} while (0) #endif -- cgit v1.2.1 From 4a4cf77923eeb3cec40a302656d6ab5ced04ba48 Mon Sep 17 00:00:00 2001 From: Ralf Baechle Date: Mon, 6 Nov 2006 17:41:06 +0000 Subject: [MIPS] Make irq number allocator generally available for fixing EV64120. Signed-off-by: Ralf Baechle --- arch/mips/kernel/irq.c | 42 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) (limited to 'arch/mips/kernel') diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c index dd24434392b6..9b0e49d63d7b 100644 --- a/arch/mips/kernel/irq.c +++ b/arch/mips/kernel/irq.c @@ -26,6 +26,48 @@ #include #include +static unsigned long irq_map[NR_IRQS / BITS_PER_LONG]; + +int __devinit allocate_irqno(void) +{ + int irq; + +again: + irq = find_first_zero_bit(irq_map, NR_IRQS); + + if (irq >= NR_IRQS) + return -ENOSPC; + + if (test_and_set_bit(irq, irq_map)) + goto again; + + return irq; +} + +EXPORT_SYMBOL_GPL(allocate_irqno); + +/* + * Allocate the 16 legacy interrupts for i8259 devices. This happens early + * in the kernel initialization so treating allocation failure as BUG() is + * ok. + */ +void __init alloc_legacy_irqno(void) +{ + int i; + + for (i = 0; i <= 16; i++) + BUG_ON(test_and_set_bit(i, irq_map)); +} + +void __devinit free_irqno(unsigned int irq) +{ + smp_mb__before_clear_bit(); + clear_bit(irq, irq_map); + smp_mb__after_clear_bit(); +} + +EXPORT_SYMBOL_GPL(free_irqno); + /* * 'what should we do if we get a hw irq event on an illegal vector'. * each architecture has to answer this themselves. -- cgit v1.2.1