diff options
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r-- | arch/powerpc/kernel/btext.c | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/crash.c | 6 | ||||
-rw-r--r-- | arch/powerpc/kernel/exceptions-64e.S | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/exceptions-64s.S | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/head_40x.S | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/head_44x.S | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/head_64.S | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/head_fsl_booke.S | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/l2cr_6xx.S | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/lparcfg.c | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/paca.c | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/perf_event.c | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/ppc_save_regs.S | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/prom.c | 4 | ||||
-rw-r--r-- | arch/powerpc/kernel/ptrace.c | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/rtasd.c | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/setup-common.c | 3 | ||||
-rw-r--r-- | arch/powerpc/kernel/swsusp_32.S | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/traps.c | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/udbg_16550.c | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/vdso32/sigtramp.S | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/vdso64/sigtramp.S | 2 |
22 files changed, 27 insertions, 24 deletions
diff --git a/arch/powerpc/kernel/btext.c b/arch/powerpc/kernel/btext.c index 625942ae5585..60b3e377b1e4 100644 --- a/arch/powerpc/kernel/btext.c +++ b/arch/powerpc/kernel/btext.c @@ -99,7 +99,7 @@ void __init btext_prepare_BAT(void) /* This function can be used to enable the early boot text when doing * OF booting or within bootx init. It must be followed by a btext_unmap() - * call before the logical address becomes unuseable + * call before the logical address becomes unusable */ void __init btext_setup_display(int width, int height, int depth, int pitch, unsigned long address) diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c index 3d569e2aff18..3d3d416339dd 100644 --- a/arch/powerpc/kernel/crash.c +++ b/arch/powerpc/kernel/crash.c @@ -163,7 +163,7 @@ static void crash_kexec_prepare_cpus(int cpu) } /* wait for all the CPUs to hit real mode but timeout if they don't come in */ -#ifdef CONFIG_PPC_STD_MMU_64 +#if defined(CONFIG_PPC_STD_MMU_64) && defined(CONFIG_SMP) static void crash_kexec_wait_realmode(int cpu) { unsigned int msecs; @@ -188,6 +188,8 @@ static void crash_kexec_wait_realmode(int cpu) } mb(); } +#else +static inline void crash_kexec_wait_realmode(int cpu) {} #endif /* @@ -344,9 +346,7 @@ void default_machine_crash_shutdown(struct pt_regs *regs) crash_save_cpu(regs, crashing_cpu); crash_kexec_prepare_cpus(crashing_cpu); cpu_set(crashing_cpu, cpus_in_crash); -#if defined(CONFIG_PPC_STD_MMU_64) && defined(CONFIG_SMP) crash_kexec_wait_realmode(crashing_cpu); -#endif machine_kexec_mask_interrupts(); diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S index 5c43063d2506..9651acc3504a 100644 --- a/arch/powerpc/kernel/exceptions-64e.S +++ b/arch/powerpc/kernel/exceptions-64e.S @@ -379,7 +379,7 @@ interrupt_end_book3e: mfspr r13,SPRN_SPRG_PACA /* get our PACA */ b system_call_common -/* Auxillary Processor Unavailable Interrupt */ +/* Auxiliary Processor Unavailable Interrupt */ START_EXCEPTION(ap_unavailable); NORMAL_EXCEPTION_PROLOG(0xf20, PROLOG_ADDITION_NONE) EXCEPTION_COMMON(0xf20, PACA_EXGEN, INTS_KEEP) diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index c532cb2c927a..aeb739e18769 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -5,7 +5,7 @@ * handling and other fixed offset specific things. * * This file is meant to be #included from head_64.S due to - * position dependant assembly. + * position dependent assembly. * * Most of this originates from head_64.S and thus has the same * copyright history. diff --git a/arch/powerpc/kernel/head_40x.S b/arch/powerpc/kernel/head_40x.S index 9dd21a8c4d52..a91626d87fc9 100644 --- a/arch/powerpc/kernel/head_40x.S +++ b/arch/powerpc/kernel/head_40x.S @@ -766,7 +766,7 @@ DataAccess: * miss get to this point to load the TLB. * r10 - TLB_TAG value * r11 - Linux PTE - * r12, r9 - avilable to use + * r12, r9 - available to use * PID - loaded with proper value when we get here * Upon exit, we reload everything and RFI. * Actually, it will fit now, but oh well.....a common place diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S index cbb3436b592d..5e12b741ba5f 100644 --- a/arch/powerpc/kernel/head_44x.S +++ b/arch/powerpc/kernel/head_44x.S @@ -178,7 +178,7 @@ interrupt_base: NORMAL_EXCEPTION_PROLOG EXC_XFER_EE_LITE(0x0c00, DoSyscall) - /* Auxillary Processor Unavailable Interrupt */ + /* Auxiliary Processor Unavailable Interrupt */ EXCEPTION(0x2020, AuxillaryProcessorUnavailable, unknown_exception, EXC_XFER_EE) /* Decrementer Interrupt */ diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index 271140b38b6f..3a319f9c9d3e 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@ -40,7 +40,7 @@ #include <asm/kvm_book3s_asm.h> #include <asm/ptrace.h> -/* The physical memory is layed out such that the secondary processor +/* The physical memory is laid out such that the secondary processor * spin code sits at 0x0000...0x00ff. On server, the vectors follow * using the layout described in exceptions-64s.S */ diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S index 3e02710d9562..5ecf54cfa7d4 100644 --- a/arch/powerpc/kernel/head_fsl_booke.S +++ b/arch/powerpc/kernel/head_fsl_booke.S @@ -326,7 +326,7 @@ interrupt_base: NORMAL_EXCEPTION_PROLOG EXC_XFER_EE_LITE(0x0c00, DoSyscall) - /* Auxillary Processor Unavailable Interrupt */ + /* Auxiliary Processor Unavailable Interrupt */ EXCEPTION(0x2900, AuxillaryProcessorUnavailable, unknown_exception, EXC_XFER_EE) /* Decrementer Interrupt */ diff --git a/arch/powerpc/kernel/l2cr_6xx.S b/arch/powerpc/kernel/l2cr_6xx.S index 2a2f3c3f6d80..97ec8557f974 100644 --- a/arch/powerpc/kernel/l2cr_6xx.S +++ b/arch/powerpc/kernel/l2cr_6xx.S @@ -151,7 +151,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) /**** Might be a good idea to set L2DO here - to prevent instructions from getting into the cache. But since we invalidate the next time we enable the cache it doesn't really matter. - Don't do this unless you accomodate all processor variations. + Don't do this unless you accommodate all processor variations. The bit moved on the 7450..... ****/ diff --git a/arch/powerpc/kernel/lparcfg.c b/arch/powerpc/kernel/lparcfg.c index 16468362ad57..301db65f05a1 100644 --- a/arch/powerpc/kernel/lparcfg.c +++ b/arch/powerpc/kernel/lparcfg.c @@ -262,7 +262,7 @@ static void parse_ppp_data(struct seq_file *m) seq_printf(m, "system_active_processors=%d\n", ppp_data.active_system_procs); - /* pool related entries are apropriate for shared configs */ + /* pool related entries are appropriate for shared configs */ if (lppaca_of(0).shared_proc) { unsigned long pool_idle_time, pool_procs; diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c index f4adf89d7614..10f0aadee95b 100644 --- a/arch/powerpc/kernel/paca.c +++ b/arch/powerpc/kernel/paca.c @@ -203,7 +203,7 @@ void __init free_unused_pacas(void) { int new_size; - new_size = PAGE_ALIGN(sizeof(struct paca_struct) * num_possible_cpus()); + new_size = PAGE_ALIGN(sizeof(struct paca_struct) * nr_cpu_ids); if (new_size >= paca_size) return; diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c index 97e0ae414940..c4063b7f49a0 100644 --- a/arch/powerpc/kernel/perf_event.c +++ b/arch/powerpc/kernel/perf_event.c @@ -759,7 +759,7 @@ static int power_pmu_add(struct perf_event *event, int ef_flags) /* * If group events scheduling transaction was started, - * skip the schedulability test here, it will be peformed + * skip the schedulability test here, it will be performed * at commit time(->commit_txn) as a whole */ if (cpuhw->group_flag & PERF_EVENT_TXN) diff --git a/arch/powerpc/kernel/ppc_save_regs.S b/arch/powerpc/kernel/ppc_save_regs.S index e83ba3f078e4..1b1787d52896 100644 --- a/arch/powerpc/kernel/ppc_save_regs.S +++ b/arch/powerpc/kernel/ppc_save_regs.S @@ -15,7 +15,7 @@ /* * Grab the register values as they are now. - * This won't do a particularily good job because we really + * This won't do a particularly good job because we really * want our caller's caller's registers, and our caller has * already executed its prologue. * ToDo: We could reach back into the caller's save area to do diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index 05b7139d6a27..e74fa12afc82 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -683,7 +683,7 @@ void __init early_init_devtree(void *params) #endif #ifdef CONFIG_PHYP_DUMP - /* scan tree to see if dump occured during last boot */ + /* scan tree to see if dump occurred during last boot */ of_scan_flat_dt(early_init_dt_scan_phyp_dump, NULL); #endif @@ -739,7 +739,7 @@ void __init early_init_devtree(void *params) DBG("Scanning CPUs ...\n"); - /* Retreive CPU related informations from the flat tree + /* Retrieve CPU related informations from the flat tree * (altivec support, boot CPU ID, ...) */ of_scan_flat_dt(early_init_dt_scan_cpus, NULL); diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c index 895b082f1e48..55613e33e263 100644 --- a/arch/powerpc/kernel/ptrace.c +++ b/arch/powerpc/kernel/ptrace.c @@ -463,7 +463,7 @@ static int vr_set(struct task_struct *target, const struct user_regset *regset, #ifdef CONFIG_VSX /* * Currently to set and and get all the vsx state, you need to call - * the fp and VMX calls aswell. This only get/sets the lower 32 + * the fp and VMX calls as well. This only get/sets the lower 32 * 128bit VSX registers. */ diff --git a/arch/powerpc/kernel/rtasd.c b/arch/powerpc/kernel/rtasd.c index 7980ec0e1e1a..67f6c3b51357 100644 --- a/arch/powerpc/kernel/rtasd.c +++ b/arch/powerpc/kernel/rtasd.c @@ -465,7 +465,7 @@ static void start_event_scan(void) pr_debug("rtasd: will sleep for %d milliseconds\n", (30000 / rtas_event_scan_rate)); - /* Retreive errors from nvram if any */ + /* Retrieve errors from nvram if any */ retreive_nvram_error_log(); schedule_delayed_work_on(cpumask_first(cpu_online_mask), diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index 9d4882a46647..21f30cb68077 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c @@ -509,6 +509,9 @@ void __init smp_setup_cpu_maps(void) */ cpu_init_thread_core_maps(nthreads); + /* Now that possible cpus are set, set nr_cpu_ids for later use */ + nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1; + free_unused_pacas(); } #endif /* CONFIG_SMP */ diff --git a/arch/powerpc/kernel/swsusp_32.S b/arch/powerpc/kernel/swsusp_32.S index b0754e237438..ba4dee3d233f 100644 --- a/arch/powerpc/kernel/swsusp_32.S +++ b/arch/powerpc/kernel/swsusp_32.S @@ -143,7 +143,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) /* Disable MSR:DR to make sure we don't take a TLB or * hash miss during the copy, as our hash table will - * for a while be unuseable. For .text, we assume we are + * for a while be unusable. For .text, we assume we are * covered by a BAT. This works only for non-G5 at this * point. G5 will need a better approach, possibly using * a small temporary hash table filled with large mappings, diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index bd74fac169be..5ddb801bc154 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -959,7 +959,7 @@ void __kprobes program_check_exception(struct pt_regs *regs) * ESR_DST (!?) or 0. In the process of chasing this with the * hardware people - not sure if it can happen on any illegal * instruction or only on FP instructions, whether there is a - * pattern to occurences etc. -dgibson 31/Mar/2003 */ + * pattern to occurrences etc. -dgibson 31/Mar/2003 */ switch (do_mathemu(regs)) { case 0: emulate_single_step(regs); diff --git a/arch/powerpc/kernel/udbg_16550.c b/arch/powerpc/kernel/udbg_16550.c index b4b167b33643..baa33a7517bc 100644 --- a/arch/powerpc/kernel/udbg_16550.c +++ b/arch/powerpc/kernel/udbg_16550.c @@ -1,5 +1,5 @@ /* - * udbg for NS16550 compatable serial ports + * udbg for NS16550 compatible serial ports * * Copyright (C) 2001-2005 PPC 64 Team, IBM Corp * diff --git a/arch/powerpc/kernel/vdso32/sigtramp.S b/arch/powerpc/kernel/vdso32/sigtramp.S index 68d49dd71dcc..cf0c9c9c24f9 100644 --- a/arch/powerpc/kernel/vdso32/sigtramp.S +++ b/arch/powerpc/kernel/vdso32/sigtramp.S @@ -19,7 +19,7 @@ /* The nop here is a hack. The dwarf2 unwind routines subtract 1 from the return address to get an address in the middle of the presumed - call instruction. Since we don't have a call here, we artifically + call instruction. Since we don't have a call here, we artificially extend the range covered by the unwind info by adding a nop before the real start. */ nop diff --git a/arch/powerpc/kernel/vdso64/sigtramp.S b/arch/powerpc/kernel/vdso64/sigtramp.S index 59eb59bb4082..45ea281e9a21 100644 --- a/arch/powerpc/kernel/vdso64/sigtramp.S +++ b/arch/powerpc/kernel/vdso64/sigtramp.S @@ -20,7 +20,7 @@ /* The nop here is a hack. The dwarf2 unwind routines subtract 1 from the return address to get an address in the middle of the presumed - call instruction. Since we don't have a call here, we artifically + call instruction. Since we don't have a call here, we artificially extend the range covered by the unwind info by padding before the real start. */ nop |