diff options
Diffstat (limited to 'arch/powerpc/kernel')
24 files changed, 234 insertions, 406 deletions
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index c287980b7e65..80e9fe2632b8 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -12,10 +12,10 @@ endif obj-y := semaphore.o cputable.o ptrace.o syscalls.o \ irq.o align.o signal_32.o pmc.o vdso.o \ - init_task.o process.o + init_task.o process.o systbl.o obj-y += vdso32/ obj-$(CONFIG_PPC64) += setup_64.o binfmt_elf32.o sys_ppc32.o \ - signal_64.o ptrace32.o systbl.o \ + signal_64.o ptrace32.o \ paca.o cpu_setup_power4.o \ firmware.o sysfs.o idle_64.o obj-$(CONFIG_PPC64) += vdso64/ @@ -46,7 +46,7 @@ extra-$(CONFIG_8xx) := head_8xx.o extra-y += vmlinux.lds obj-y += time.o prom.o traps.o setup-common.o udbg.o -obj-$(CONFIG_PPC32) += entry_32.o setup_32.o misc_32.o systbl.o +obj-$(CONFIG_PPC32) += entry_32.o setup_32.o misc_32.o obj-$(CONFIG_PPC64) += misc_64.o dma_64.o iommu.o obj-$(CONFIG_PPC_MULTIPLATFORM) += prom_init.o obj-$(CONFIG_MODULES) += ppc_ksyms.o diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c index 8c21d378f5d2..778f22fd85d2 100644 --- a/arch/powerpc/kernel/crash.c +++ b/arch/powerpc/kernel/crash.c @@ -134,8 +134,10 @@ static void crash_kexec_prepare_cpus(void) * the crash CPU will send an IPI and wait for other CPUs to * respond. If not, proceed the kexec boot even though we failed to * capture other CPU states. + * Delay of at least 10 seconds. */ - msecs = 1000000; + printk(KERN_ALERT "Sending IPI to other cpus...\n"); + msecs = 10000; while ((atomic_read(&waiting_for_crash_ipi) > 0) && (--msecs > 0)) { barrier(); mdelay(1); diff --git a/arch/powerpc/kernel/fpu.S b/arch/powerpc/kernel/fpu.S index e4362dfa37fb..340730fb8c91 100644 --- a/arch/powerpc/kernel/fpu.S +++ b/arch/powerpc/kernel/fpu.S @@ -66,7 +66,7 @@ _GLOBAL(load_up_fpu) #else ld r4,PACACURRENT(r13) addi r5,r4,THREAD /* Get THREAD */ - ld r4,THREAD_FPEXC_MODE(r5) + lwz r4,THREAD_FPEXC_MODE(r5) ori r12,r12,MSR_FP or r12,r12,r4 std r12,_MSR(r1) diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S index 03b25f9359f8..a0579e859b21 100644 --- a/arch/powerpc/kernel/head_32.S +++ b/arch/powerpc/kernel/head_32.S @@ -714,6 +714,7 @@ AltiVecUnavailable: #ifdef CONFIG_ALTIVEC bne load_up_altivec /* if from user, just load it up */ #endif /* CONFIG_ALTIVEC */ + addi r3,r1,STACK_FRAME_OVERHEAD EXC_XFER_EE_LITE(0xf20, altivec_unavailable_exception) PerformanceMonitor: diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index 308268466342..11f2cd5af7dc 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@ -139,7 +139,7 @@ _GLOBAL(__secondary_hold) ori r24,r24,MSR_RI mtmsrd r24 /* RI on */ - /* Grab our linux cpu number */ + /* Grab our physical cpu number */ mr r24,r3 /* Tell the master cpu we're here */ @@ -153,12 +153,7 @@ _GLOBAL(__secondary_hold) cmpdi 0,r4,1 bne 100b -#ifdef CONFIG_HMT - SET_REG_IMMEDIATE(r4, .hmt_init) - mtctr r4 - bctr -#else -#ifdef CONFIG_SMP +#if defined(CONFIG_SMP) || defined(CONFIG_KEXEC) LOAD_REG_IMMEDIATE(r4, .pSeries_secondary_smp_init) mtctr r4 mr r3,r24 @@ -166,7 +161,6 @@ _GLOBAL(__secondary_hold) #else BUG_OPCODE #endif -#endif /* This value is used to mark exception frames on the stack. */ .section ".toc","aw" @@ -321,7 +315,6 @@ exception_marker: label##_pSeries: \ HMT_MEDIUM; \ mtspr SPRN_SPRG1,r13; /* save r13 */ \ - RUNLATCH_ON(r13); \ EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common) #define STD_EXCEPTION_ISERIES(n, label, area) \ @@ -329,7 +322,6 @@ label##_pSeries: \ label##_iSeries: \ HMT_MEDIUM; \ mtspr SPRN_SPRG1,r13; /* save r13 */ \ - RUNLATCH_ON(r13); \ EXCEPTION_PROLOG_ISERIES_1(area); \ EXCEPTION_PROLOG_ISERIES_2; \ b label##_common @@ -339,7 +331,6 @@ label##_iSeries: \ label##_iSeries: \ HMT_MEDIUM; \ mtspr SPRN_SPRG1,r13; /* save r13 */ \ - RUNLATCH_ON(r13); \ EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN); \ lbz r10,PACAPROCENABLED(r13); \ cmpwi 0,r10,0; \ @@ -392,6 +383,7 @@ label##_common: \ label##_common: \ EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \ DISABLE_INTS; \ + bl .ppc64_runlatch_on; \ addi r3,r1,STACK_FRAME_OVERHEAD; \ bl hdlr; \ b .ret_from_except_lite @@ -409,7 +401,6 @@ __start_interrupts: _machine_check_pSeries: HMT_MEDIUM mtspr SPRN_SPRG1,r13 /* save r13 */ - RUNLATCH_ON(r13) EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common) . = 0x300 @@ -436,7 +427,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_SLB) data_access_slb_pSeries: HMT_MEDIUM mtspr SPRN_SPRG1,r13 - RUNLATCH_ON(r13) mfspr r13,SPRN_SPRG3 /* get paca address into r13 */ std r3,PACA_EXSLB+EX_R3(r13) mfspr r3,SPRN_DAR @@ -462,7 +452,6 @@ data_access_slb_pSeries: instruction_access_slb_pSeries: HMT_MEDIUM mtspr SPRN_SPRG1,r13 - RUNLATCH_ON(r13) mfspr r13,SPRN_SPRG3 /* get paca address into r13 */ std r3,PACA_EXSLB+EX_R3(r13) mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */ @@ -493,7 +482,6 @@ instruction_access_slb_pSeries: .globl system_call_pSeries system_call_pSeries: HMT_MEDIUM - RUNLATCH_ON(r9) mr r9,r13 mfmsr r10 mfspr r13,SPRN_SPRG3 @@ -577,7 +565,6 @@ slb_miss_user_pseries: system_reset_fwnmi: HMT_MEDIUM mtspr SPRN_SPRG1,r13 /* save r13 */ - RUNLATCH_ON(r13) EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common) .globl machine_check_fwnmi @@ -585,7 +572,6 @@ system_reset_fwnmi: machine_check_fwnmi: HMT_MEDIUM mtspr SPRN_SPRG1,r13 /* save r13 */ - RUNLATCH_ON(r13) EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common) #ifdef CONFIG_PPC_ISERIES @@ -749,11 +735,12 @@ iSeries_secondary_smp_loop: .globl decrementer_iSeries_masked decrementer_iSeries_masked: + /* We may not have a valid TOC pointer in here. */ li r11,1 ld r12,PACALPPACAPTR(r13) stb r11,LPPACADECRINT(r12) - LOAD_REG_ADDRBASE(r12,tb_ticks_per_jiffy) - lwz r12,ADDROFF(tb_ticks_per_jiffy)(r12) + LOAD_REG_IMMEDIATE(r12, tb_ticks_per_jiffy) + lwz r12,0(r12) mtspr SPRN_DEC,r12 /* fall through */ @@ -895,7 +882,6 @@ unrecov_fer: .align 7 .globl data_access_common data_access_common: - RUNLATCH_ON(r10) /* It wont fit in the 0x300 handler */ mfspr r10,SPRN_DAR std r10,PACA_EXGEN+EX_DAR(r13) mfspr r10,SPRN_DSISR @@ -1043,6 +1029,7 @@ hardware_interrupt_common: EXCEPTION_PROLOG_COMMON(0x500, PACA_EXGEN) hardware_interrupt_entry: DISABLE_INTS + bl .ppc64_runlatch_on addi r3,r1,STACK_FRAME_OVERHEAD bl .do_IRQ b .ret_from_except_lite @@ -1817,22 +1804,6 @@ _STATIC(start_here_multiplatform) ori r6,r6,MSR_RI mtmsrd r6 /* RI on */ -#ifdef CONFIG_HMT - /* Start up the second thread on cpu 0 */ - mfspr r3,SPRN_PVR - srwi r3,r3,16 - cmpwi r3,0x34 /* Pulsar */ - beq 90f - cmpwi r3,0x36 /* Icestar */ - beq 90f - cmpwi r3,0x37 /* SStar */ - beq 90f - b 91f /* HMT not supported */ -90: li r3,0 - bl .hmt_start_secondary -91: -#endif - /* The following gets the stack and TOC set up with the regs */ /* pointing to the real addr of the kernel stack. This is */ /* all done to support the C function call below which sets */ @@ -1946,77 +1917,8 @@ _STATIC(start_here_common) bl .start_kernel -_GLOBAL(hmt_init) -#ifdef CONFIG_HMT - LOAD_REG_IMMEDIATE(r5, hmt_thread_data) - mfspr r7,SPRN_PVR - srwi r7,r7,16 - cmpwi r7,0x34 /* Pulsar */ - beq 90f - cmpwi r7,0x36 /* Icestar */ - beq 91f - cmpwi r7,0x37 /* SStar */ - beq 91f - b 101f -90: mfspr r6,SPRN_PIR - andi. r6,r6,0x1f - b 92f -91: mfspr r6,SPRN_PIR - andi. r6,r6,0x3ff -92: sldi r4,r24,3 - stwx r6,r5,r4 - bl .hmt_start_secondary - b 101f - -__hmt_secondary_hold: - LOAD_REG_IMMEDIATE(r5, hmt_thread_data) - clrldi r5,r5,4 - li r7,0 - mfspr r6,SPRN_PIR - mfspr r8,SPRN_PVR - srwi r8,r8,16 - cmpwi r8,0x34 - bne 93f - andi. r6,r6,0x1f - b 103f -93: andi. r6,r6,0x3f - -103: lwzx r8,r5,r7 - cmpw r8,r6 - beq 104f - addi r7,r7,8 - b 103b - -104: addi r7,r7,4 - lwzx r9,r5,r7 - mr r24,r9 -101: -#endif - mr r3,r24 - b .pSeries_secondary_smp_init - -#ifdef CONFIG_HMT -_GLOBAL(hmt_start_secondary) - LOAD_REG_IMMEDIATE(r4,__hmt_secondary_hold) - clrldi r4,r4,4 - mtspr SPRN_NIADORM, r4 - mfspr r4, SPRN_MSRDORM - li r5, -65 - and r4, r4, r5 - mtspr SPRN_MSRDORM, r4 - lis r4,0xffef - ori r4,r4,0x7403 - mtspr SPRN_TSC, r4 - li r4,0x1f4 - mtspr SPRN_TST, r4 - mfspr r4, SPRN_HID0 - ori r4, r4, 0x1 - mtspr SPRN_HID0, r4 - mfspr r4, SPRN_CTRLF - oris r4, r4, 0x40 - mtspr SPRN_CTRLT, r4 - blr -#endif + /* Not reached */ + BUG_OPCODE /* * We put a few things here that have to be page-aligned. diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c index 4d9b4388918b..946f3219fd29 100644 --- a/arch/powerpc/kernel/iommu.c +++ b/arch/powerpc/kernel/iommu.c @@ -334,9 +334,6 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl, spin_unlock_irqrestore(&(tbl->it_lock), flags); - /* Make sure updates are seen by hardware */ - mb(); - DBG("mapped %d elements:\n", outcount); /* For the sake of iommu_unmap_sg, we clear out the length in the @@ -347,6 +344,10 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl, outs->dma_address = DMA_ERROR_CODE; outs->dma_length = 0; } + + /* Make sure updates are seen by hardware */ + mb(); + return outcount; failure: @@ -358,6 +359,8 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl, npages = (PAGE_ALIGN(s->dma_address + s->dma_length) - vaddr) >> PAGE_SHIFT; __iommu_free(tbl, vaddr, npages); + s->dma_address = DMA_ERROR_CODE; + s->dma_length = 0; } } spin_unlock_irqrestore(&(tbl->it_lock), flags); diff --git a/arch/powerpc/kernel/legacy_serial.c b/arch/powerpc/kernel/legacy_serial.c index f970ace208d3..c7a799a09516 100644 --- a/arch/powerpc/kernel/legacy_serial.c +++ b/arch/powerpc/kernel/legacy_serial.c @@ -134,7 +134,6 @@ static int __init add_legacy_soc_port(struct device_node *np, return add_legacy_port(np, -1, UPIO_MEM, addr, addr, NO_IRQ, flags); } -#ifdef CONFIG_ISA static int __init add_legacy_isa_port(struct device_node *np, struct device_node *isa_brg) { @@ -168,7 +167,6 @@ static int __init add_legacy_isa_port(struct device_node *np, return add_legacy_port(np, index, UPIO_PORT, reg[1], taddr, NO_IRQ, UPF_BOOT_AUTOCONF); } -#endif #ifdef CONFIG_PCI static int __init add_legacy_pci_port(struct device_node *np, @@ -276,7 +274,6 @@ void __init find_legacy_serial_ports(void) of_node_put(soc); } -#ifdef CONFIG_ISA /* First fill our array with ISA ports */ for (np = NULL; (np = of_find_node_by_type(np, "serial"));) { struct device_node *isa = of_get_parent(np); @@ -287,7 +284,6 @@ void __init find_legacy_serial_ports(void) } of_node_put(isa); } -#endif #ifdef CONFIG_PCI /* Next, try to locate PCI ports */ diff --git a/arch/powerpc/kernel/lparcfg.c b/arch/powerpc/kernel/lparcfg.c index 1ae96a8ed7e2..e789fef4eb8a 100644 --- a/arch/powerpc/kernel/lparcfg.c +++ b/arch/powerpc/kernel/lparcfg.c @@ -341,7 +341,7 @@ static int lparcfg_data(struct seq_file *m, void *v) const char *system_id = ""; unsigned int *lp_index_ptr, lp_index = 0; struct device_node *rtas_node; - int *lrdrp; + int *lrdrp = NULL; rootdn = find_path_device("/"); if (rootdn) { @@ -362,7 +362,9 @@ static int lparcfg_data(struct seq_file *m, void *v) seq_printf(m, "partition_id=%d\n", (int)lp_index); rtas_node = find_path_device("/rtas"); - lrdrp = (int *)get_property(rtas_node, "ibm,lrdr-capacity", NULL); + if (rtas_node) + lrdrp = (int *)get_property(rtas_node, "ibm,lrdr-capacity", + NULL); if (lrdrp == NULL) { partition_potential_processors = vdso_data->processorCount; diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c index d6431440c54f..ee166c586642 100644 --- a/arch/powerpc/kernel/machine_kexec_64.c +++ b/arch/powerpc/kernel/machine_kexec_64.c @@ -26,8 +26,6 @@ #include <asm/prom.h> #include <asm/smp.h> -#define HASH_GROUP_SIZE 0x80 /* size of each hash group, asm/mmu.h */ - int default_machine_kexec_prepare(struct kimage *image) { int i; @@ -61,7 +59,7 @@ int default_machine_kexec_prepare(struct kimage *image) */ if (htab_address) { low = __pa(htab_address); - high = low + (htab_hash_mask + 1) * HASH_GROUP_SIZE; + high = low + htab_size_bytes; for (i = 0; i < image->nr_segments; i++) { begin = image->segment[i].mem; @@ -294,7 +292,7 @@ void default_machine_kexec(struct kimage *image) } /* Values we need to export to the second kernel via the device tree. */ -static unsigned long htab_base, htab_size, kernel_end; +static unsigned long htab_base, kernel_end; static struct property htab_base_prop = { .name = "linux,htab-base", @@ -305,7 +303,7 @@ static struct property htab_base_prop = { static struct property htab_size_prop = { .name = "linux,htab-size", .length = sizeof(unsigned long), - .value = (unsigned char *)&htab_size, + .value = (unsigned char *)&htab_size_bytes, }; static struct property kernel_end_prop = { @@ -331,8 +329,6 @@ static void __init export_htab_values(void) htab_base = __pa(htab_address); prom_add_property(node, &htab_base_prop); - - htab_size = 1UL << ppc64_pft_size; prom_add_property(node, &htab_size_prop); out: diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c index d9a459c144d8..8a731ea877b7 100644 --- a/arch/powerpc/kernel/ppc_ksyms.c +++ b/arch/powerpc/kernel/ppc_ksyms.c @@ -79,15 +79,8 @@ EXPORT_SYMBOL(sys_sigreturn); EXPORT_SYMBOL(strcpy); EXPORT_SYMBOL(strncpy); EXPORT_SYMBOL(strcat); -EXPORT_SYMBOL(strncat); -EXPORT_SYMBOL(strchr); -EXPORT_SYMBOL(strrchr); -EXPORT_SYMBOL(strpbrk); -EXPORT_SYMBOL(strstr); EXPORT_SYMBOL(strlen); -EXPORT_SYMBOL(strnlen); EXPORT_SYMBOL(strcmp); -EXPORT_SYMBOL(strncmp); EXPORT_SYMBOL(strcasecmp); EXPORT_SYMBOL(csum_partial); @@ -185,9 +178,6 @@ EXPORT_SYMBOL(adb_try_handler_change); EXPORT_SYMBOL(cuda_request); EXPORT_SYMBOL(cuda_poll); #endif /* CONFIG_ADB_CUDA */ -#ifdef CONFIG_PPC_PMAC -EXPORT_SYMBOL(sys_ctrler); -#endif #ifdef CONFIG_VT EXPORT_SYMBOL(kd_mksound); #endif @@ -205,7 +195,6 @@ EXPORT_SYMBOL(__lshrdi3); EXPORT_SYMBOL(memcpy); EXPORT_SYMBOL(memset); EXPORT_SYMBOL(memmove); -EXPORT_SYMBOL(memscan); EXPORT_SYMBOL(memcmp); EXPORT_SYMBOL(memchr); @@ -214,7 +203,6 @@ EXPORT_SYMBOL(screen_info); #endif #ifdef CONFIG_PPC32 -EXPORT_SYMBOL(__delay); EXPORT_SYMBOL(timer_interrupt); EXPORT_SYMBOL(irq_desc); EXPORT_SYMBOL(tb_ticks_per_jiffy); @@ -222,10 +210,6 @@ EXPORT_SYMBOL(console_drivers); EXPORT_SYMBOL(cacheable_memcpy); #endif -EXPORT_SYMBOL(__up); -EXPORT_SYMBOL(__down); -EXPORT_SYMBOL(__down_interruptible); - #ifdef CONFIG_8xx EXPORT_SYMBOL(cpm_install_handler); EXPORT_SYMBOL(cpm_free_handler); diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 57703994a063..c225cf154bfe 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -888,3 +888,35 @@ void dump_stack(void) show_stack(current, NULL); } EXPORT_SYMBOL(dump_stack); + +#ifdef CONFIG_PPC64 +void ppc64_runlatch_on(void) +{ + unsigned long ctrl; + + if (cpu_has_feature(CPU_FTR_CTRL) && !test_thread_flag(TIF_RUNLATCH)) { + HMT_medium(); + + ctrl = mfspr(SPRN_CTRLF); + ctrl |= CTRL_RUNLATCH; + mtspr(SPRN_CTRLT, ctrl); + + set_thread_flag(TIF_RUNLATCH); + } +} + +void ppc64_runlatch_off(void) +{ + unsigned long ctrl; + + if (cpu_has_feature(CPU_FTR_CTRL) && test_thread_flag(TIF_RUNLATCH)) { + HMT_medium(); + + clear_thread_flag(TIF_RUNLATCH); + + ctrl = mfspr(SPRN_CTRLF); + ctrl &= ~CTRL_RUNLATCH; + mtspr(SPRN_CTRLT, ctrl); + } +} +#endif diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index d50c8df0183e..294832a7e0a6 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -491,7 +491,12 @@ void __init finish_device_tree(void) size = 16; finish_node(allnodes, &size, 1); size -= 16; - end = start = (unsigned long) __va(lmb_alloc(size, 128)); + + if (0 == size) + end = start = 0; + else + end = start = (unsigned long)__va(lmb_alloc(size, 128)); + finish_node(allnodes, &end, 0); BUG_ON(end != start + size); @@ -1398,8 +1403,8 @@ struct device_node *of_find_node_by_name(struct device_node *from, read_lock(&devtree_lock); np = from ? from->allnext : allnodes; - for (; np != 0; np = np->allnext) - if (np->name != 0 && strcasecmp(np->name, name) == 0 + for (; np != NULL; np = np->allnext) + if (np->name != NULL && strcasecmp(np->name, name) == 0 && of_node_get(np)) break; if (from) @@ -1917,3 +1922,30 @@ int prom_update_property(struct device_node *np, return 0; } + +#ifdef CONFIG_KEXEC +/* We may have allocated the flat device tree inside the crash kernel region + * in prom_init. If so we need to move it out into regular memory. */ +void kdump_move_device_tree(void) +{ + unsigned long start, end; + struct boot_param_header *new; + + start = __pa((unsigned long)initial_boot_params); + end = start + initial_boot_params->totalsize; + + if (end < crashk_res.start || start > crashk_res.end) + return; + + new = (struct boot_param_header*) + __va(lmb_alloc(initial_boot_params->totalsize, PAGE_SIZE)); + + memcpy(new, initial_boot_params, initial_boot_params->totalsize); + + initial_boot_params = new; + + DBG("Flat device tree blob moved to %p\n", initial_boot_params); + + /* XXX should we unreserve the old DT? */ +} +#endif /* CONFIG_KEXEC */ diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index 7881ec96ef11..d34fe537400e 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c @@ -205,14 +205,6 @@ static cell_t __initdata regbuf[1024]; #define MAX_CPU_THREADS 2 -/* TO GO */ -#ifdef CONFIG_HMT -struct { - unsigned int pir; - unsigned int threadid; -} hmt_thread_data[NR_CPUS]; -#endif /* CONFIG_HMT */ - /* * Error results ... some OF calls will return "-1" on error, some * will return 0, some will return either. To simplify, here are @@ -1319,10 +1311,6 @@ static void __init prom_hold_cpus(void) */ *spinloop = 0; -#ifdef CONFIG_HMT - for (i = 0; i < NR_CPUS; i++) - RELOC(hmt_thread_data)[i].pir = 0xdeadbeef; -#endif /* look for cpus */ for (node = 0; prom_next_node(&node); ) { type[0] = 0; @@ -1389,32 +1377,6 @@ static void __init prom_hold_cpus(void) /* Reserve cpu #s for secondary threads. They start later. */ cpuid += cpu_threads; } -#ifdef CONFIG_HMT - /* Only enable HMT on processors that provide support. */ - if (__is_processor(PV_PULSAR) || - __is_processor(PV_ICESTAR) || - __is_processor(PV_SSTAR)) { - prom_printf(" starting secondary threads\n"); - - for (i = 0; i < NR_CPUS; i += 2) { - if (!cpu_online(i)) - continue; - - if (i == 0) { - unsigned long pir = mfspr(SPRN_PIR); - if (__is_processor(PV_PULSAR)) { - RELOC(hmt_thread_data)[i].pir = - pir & 0x1f; - } else { - RELOC(hmt_thread_data)[i].pir = - pir & 0x3ff; - } - } - } - } else { - prom_printf("Processor is not HMT capable\n"); - } -#endif if (cpuid > NR_CPUS) prom_printf("WARNING: maximum CPUs (" __stringify(NR_CPUS) @@ -2098,6 +2060,10 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4, */ prom_init_stdout(); + /* Bail if this is a kdump kernel. */ + if (PHYSICAL_START > 0) + prom_panic("Error: You can't boot a kdump kernel from OF!\n"); + /* * Check for an initrd */ diff --git a/arch/powerpc/kernel/prom_parse.c b/arch/powerpc/kernel/prom_parse.c index a8099c806150..3934c227549b 100644 --- a/arch/powerpc/kernel/prom_parse.c +++ b/arch/powerpc/kernel/prom_parse.c @@ -465,8 +465,10 @@ u32 *of_get_pci_address(struct device_node *dev, int bar_no, u64 *size, if (parent == NULL) return NULL; bus = of_match_bus(parent); - if (strcmp(bus->name, "pci")) + if (strcmp(bus->name, "pci")) { + of_node_put(parent); return NULL; + } bus->count_cells(dev, &na, &ns); of_node_put(parent); if (!OF_CHECK_COUNTS(na, ns)) diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c index 7fe4a5c944c9..b5b2add7ad1e 100644 --- a/arch/powerpc/kernel/rtas.c +++ b/arch/powerpc/kernel/rtas.c @@ -22,6 +22,7 @@ #include <asm/prom.h> #include <asm/rtas.h> +#include <asm/hvcall.h> #include <asm/semaphore.h> #include <asm/machdep.h> #include <asm/page.h> @@ -565,6 +566,7 @@ static int ibm_suspend_me_token = RTAS_UNKNOWN_SERVICE; #ifdef CONFIG_PPC_PSERIES static void rtas_percpu_suspend_me(void *info) { + int i; long rc; long flags; struct rtas_suspend_me_data *data = @@ -587,18 +589,16 @@ static void rtas_percpu_suspend_me(void *info) if (rc == H_Continue) { data->waiting = 0; - rtas_call(ibm_suspend_me_token, 0, 1, - data->args->args); + data->args->args[data->args->nargs] = + rtas_call(ibm_suspend_me_token, 0, 1, NULL); + for_each_cpu(i) + plpar_hcall_norets(H_PROD,i); } else { data->waiting = -EBUSY; printk(KERN_ERR "Error on H_Join hypervisor call\n"); } out: - /* before we restore interrupts, make sure we don't - * generate a spurious soft lockup errors - */ - touch_softlockup_watchdog(); local_irq_restore(flags); return; } diff --git a/arch/powerpc/kernel/rtas_flash.c b/arch/powerpc/kernel/rtas_flash.c index 50500093c97f..aaf384c3f04a 100644 --- a/arch/powerpc/kernel/rtas_flash.c +++ b/arch/powerpc/kernel/rtas_flash.c @@ -672,8 +672,7 @@ static void rtas_flash_firmware(int reboot_type) static void remove_flash_pde(struct proc_dir_entry *dp) { if (dp) { - if (dp->data != NULL) - kfree(dp->data); + kfree(dp->data); dp->owner = NULL; remove_proc_entry(dp->name, dp->parent); } diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index e29b275e09e0..f96c49b03ba0 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -311,8 +311,6 @@ void smp_release_cpus(void) DBG(" <- smp_release_cpus()\n"); } -#else -#define smp_release_cpus() #endif /* CONFIG_SMP || CONFIG_KEXEC */ /* @@ -398,6 +396,9 @@ void __init setup_system(void) { DBG(" -> setup_system()\n"); +#ifdef CONFIG_KEXEC + kdump_move_device_tree(); +#endif /* * Unflatten the device-tree passed by prom_init or kexec */ @@ -470,10 +471,12 @@ void __init setup_system(void) check_smt_enabled(); smp_setup_cpu_maps(); +#ifdef CONFIG_SMP /* Release secondary cpus out of their spinloops at 0x60 now that * we can map physical -> logical CPU ids */ smp_release_cpus(); +#endif printk("Starting Linux PPC64 %s\n", system_utsname.version); diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c index 3747ab0dac3f..bd837b5dbf06 100644 --- a/arch/powerpc/kernel/signal_32.c +++ b/arch/powerpc/kernel/signal_32.c @@ -142,11 +142,7 @@ static inline int get_old_sigaction(struct k_sigaction *new_ka, return 0; } -static inline compat_uptr_t to_user_ptr(void *kp) -{ - return (compat_uptr_t)(u64)kp; -} - +#define to_user_ptr(p) ptr_to_compat(p) #define from_user_ptr(p) compat_ptr(p) static inline int save_general_regs(struct pt_regs *regs, @@ -213,8 +209,8 @@ static inline int get_old_sigaction(struct k_sigaction *new_ka, return 0; } -#define to_user_ptr(p) (p) -#define from_user_ptr(p) (p) +#define to_user_ptr(p) ((unsigned long)(p)) +#define from_user_ptr(p) ((void __user *)(p)) static inline int save_general_regs(struct pt_regs *regs, struct mcontext __user *frame) @@ -254,11 +250,9 @@ int do_signal(sigset_t *oldset, struct pt_regs *regs); */ long sys_sigsuspend(old_sigset_t mask) { - sigset_t saveset; - mask &= _BLOCKABLE; spin_lock_irq(¤t->sighand->siglock); - saveset = current->blocked; + current->saved_sigmask = current->blocked; siginitset(¤t->blocked, mask); recalc_sigpending(); spin_unlock_irq(¤t->sighand->siglock); @@ -528,7 +522,7 @@ long compat_sys_rt_sigaction(int sig, const struct sigaction32 __user *act, ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); if (!ret && oact) { - ret = put_user((long)old_ka.sa.sa_handler, &oact->sa_handler); + ret = put_user(to_user_ptr(old_ka.sa.sa_handler), &oact->sa_handler); ret |= put_sigset_t(&oact->sa_mask, &old_ka.sa.sa_mask); ret |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags); } @@ -677,8 +671,8 @@ long compat_sys_rt_sigqueueinfo(u32 pid, u32 sig, compat_siginfo_t __user *uinfo int compat_sys_sigaltstack(u32 __new, u32 __old, int r5, int r6, int r7, int r8, struct pt_regs *regs) { - stack_32_t __user * newstack = (stack_32_t __user *)(long) __new; - stack_32_t __user * oldstack = (stack_32_t __user *)(long) __old; + stack_32_t __user * newstack = compat_ptr(__new); + stack_32_t __user * oldstack = compat_ptr(__old); stack_t uss, uoss; int ret; mm_segment_t old_fs; @@ -710,7 +704,7 @@ int compat_sys_sigaltstack(u32 __new, u32 __old, int r5, set_fs(old_fs); /* Copy the stack information to the user output buffer */ if (!ret && oldstack && - (put_user((long)uoss.ss_sp, &oldstack->ss_sp) || + (put_user(ptr_to_compat(uoss.ss_sp), &oldstack->ss_sp) || __put_user(uoss.ss_flags, &oldstack->ss_flags) || __put_user(uoss.ss_size, &oldstack->ss_size))) return -EFAULT; diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c index b3193116e686..497a5d3df359 100644 --- a/arch/powerpc/kernel/signal_64.c +++ b/arch/powerpc/kernel/signal_64.c @@ -60,8 +60,8 @@ struct rt_sigframe { struct ucontext uc; unsigned long _unused[2]; unsigned int tramp[TRAMP_SIZE]; - struct siginfo *pinfo; - void *puc; + struct siginfo __user *pinfo; + void __user *puc; struct siginfo info; /* 64 bit ABI allows for 288 bytes below sp before decrementing it. */ char abigap[288]; diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index c8458c531b25..13595a64f013 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -540,6 +540,9 @@ int __devinit start_secondary(void *unused) if (smp_ops->take_timebase) smp_ops->take_timebase(); + if (system_state > SYSTEM_BOOTING) + per_cpu(last_jiffy, cpu) = get_tb(); + spin_lock(&call_lock); cpu_set(cpu, cpu_online_map); spin_unlock(&call_lock); diff --git a/arch/powerpc/kernel/sys_ppc32.c b/arch/powerpc/kernel/sys_ppc32.c index 475249dc2350..cd75ab2908fa 100644 --- a/arch/powerpc/kernel/sys_ppc32.c +++ b/arch/powerpc/kernel/sys_ppc32.c @@ -176,7 +176,6 @@ struct timex32 { }; extern int do_adjtimex(struct timex *); -extern void ppc_adjtimex(void); asmlinkage long compat_sys_adjtimex(struct timex32 __user *utp) { @@ -209,9 +208,6 @@ asmlinkage long compat_sys_adjtimex(struct timex32 __user *utp) ret = do_adjtimex(&txc); - /* adjust the conversion of TB to time of day to track adjtimex */ - ppc_adjtimex(); - if(put_user(txc.modes, &utp->modes) || __put_user(txc.offset, &utp->offset) || __put_user(txc.freq, &utp->freq) || diff --git a/arch/powerpc/kernel/systbl.S b/arch/powerpc/kernel/systbl.S index 007b15ee36d2..8a9f994ed917 100644 --- a/arch/powerpc/kernel/systbl.S +++ b/arch/powerpc/kernel/systbl.S @@ -36,8 +36,6 @@ #ifdef CONFIG_PPC64 #define sys_sigpending sys_ni_syscall #define sys_old_getrlimit sys_ni_syscall -#else -#define ppc_rtas sys_ni_syscall #endif _GLOBAL(sys_call_table) @@ -323,3 +321,4 @@ SYSCALL(spu_run) SYSCALL(spu_create) COMPAT_SYS(pselect6) COMPAT_SYS(ppoll) +SYSCALL(unshare) diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index c4a294d657b9..2a7ddc579379 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -50,6 +50,7 @@ #include <linux/security.h> #include <linux/percpu.h> #include <linux/rtc.h> +#include <linux/jiffies.h> #include <asm/io.h> #include <asm/processor.h> @@ -99,7 +100,15 @@ EXPORT_SYMBOL(tb_ticks_per_usec); unsigned long tb_ticks_per_sec; u64 tb_to_xs; unsigned tb_to_us; -unsigned long processor_freq; + +#define TICKLEN_SCALE (SHIFT_SCALE - 10) +u64 last_tick_len; /* units are ns / 2^TICKLEN_SCALE */ +u64 ticklen_to_xs; /* 0.64 fraction */ + +/* If last_tick_len corresponds to about 1/HZ seconds, then + last_tick_len << TICKLEN_SHIFT will be about 2^63. */ +#define TICKLEN_SHIFT (63 - 30 - TICKLEN_SCALE + SHIFT_HZ) + DEFINE_SPINLOCK(rtc_lock); EXPORT_SYMBOL_GPL(rtc_lock); @@ -113,10 +122,6 @@ extern unsigned long wall_jiffies; extern struct timezone sys_tz; static long timezone_offset; -void ppc_adjtimex(void); - -static unsigned adjusting_time = 0; - unsigned long ppc_proc_freq; unsigned long ppc_tb_freq; @@ -178,8 +183,7 @@ static __inline__ void timer_check_rtc(void) */ if (ppc_md.set_rtc_time && ntp_synced() && xtime.tv_sec - last_rtc_update >= 659 && - abs((xtime.tv_nsec/1000) - (1000000-1000000/HZ)) < 500000/HZ && - jiffies - wall_jiffies == 1) { + abs((xtime.tv_nsec/1000) - (1000000-1000000/HZ)) < 500000/HZ) { struct rtc_time tm; to_tm(xtime.tv_sec + 1 + timezone_offset, &tm); tm.tm_year -= 1900; @@ -226,15 +230,14 @@ void do_gettimeofday(struct timeval *tv) if (__USE_RTC()) { /* do this the old way */ unsigned long flags, seq; - unsigned int sec, nsec, usec, lost; + unsigned int sec, nsec, usec; do { seq = read_seqbegin_irqsave(&xtime_lock, flags); sec = xtime.tv_sec; nsec = xtime.tv_nsec + tb_ticks_since(tb_last_stamp); - lost = jiffies - wall_jiffies; } while (read_seqretry_irqrestore(&xtime_lock, seq, flags)); - usec = nsec / 1000 + lost * (1000000 / HZ); + usec = nsec / 1000; while (usec >= 1000000) { usec -= 1000000; ++sec; @@ -248,23 +251,6 @@ void do_gettimeofday(struct timeval *tv) EXPORT_SYMBOL(do_gettimeofday); -/* Synchronize xtime with do_gettimeofday */ - -static inline void timer_sync_xtime(unsigned long cur_tb) -{ -#ifdef CONFIG_PPC64 - /* why do we do this? */ - struct timeval my_tv; - - __do_gettimeofday(&my_tv, cur_tb); - - if (xtime.tv_sec <= my_tv.tv_sec) { - xtime.tv_sec = my_tv.tv_sec; - xtime.tv_nsec = my_tv.tv_usec * 1000; - } -#endif -} - /* * There are two copies of tb_to_xs and stamp_xsec so that no * lock is needed to access and use these values in @@ -323,15 +309,30 @@ static __inline__ void timer_recalc_offset(u64 cur_tb) { unsigned long offset; u64 new_stamp_xsec; + u64 tlen, t2x; if (__USE_RTC()) return; + tlen = current_tick_length(); offset = cur_tb - do_gtod.varp->tb_orig_stamp; - if ((offset & 0x80000000u) == 0) - return; - new_stamp_xsec = do_gtod.varp->stamp_xsec - + mulhdu(offset, do_gtod.varp->tb_to_xs); - update_gtod(cur_tb, new_stamp_xsec, do_gtod.varp->tb_to_xs); + if (tlen == last_tick_len && offset < 0x80000000u) { + /* check that we're still in sync; if not, resync */ + struct timeval tv; + __do_gettimeofday(&tv, cur_tb); + if (tv.tv_sec <= xtime.tv_sec && + (tv.tv_sec < xtime.tv_sec || + tv.tv_usec * 1000 <= xtime.tv_nsec)) + return; + } + if (tlen != last_tick_len) { + t2x = mulhdu(tlen << TICKLEN_SHIFT, ticklen_to_xs); + last_tick_len = tlen; + } else + t2x = do_gtod.varp->tb_to_xs; + new_stamp_xsec = (u64) xtime.tv_nsec * XSEC_PER_SEC; + do_div(new_stamp_xsec, 1000000000); + new_stamp_xsec += (u64) xtime.tv_sec * XSEC_PER_SEC; + update_gtod(cur_tb, new_stamp_xsec, t2x); } #ifdef CONFIG_SMP @@ -462,13 +463,10 @@ void timer_interrupt(struct pt_regs * regs) write_seqlock(&xtime_lock); tb_last_jiffy += tb_ticks_per_jiffy; tb_last_stamp = per_cpu(last_jiffy, cpu); - timer_recalc_offset(tb_last_jiffy); do_timer(regs); - timer_sync_xtime(tb_last_jiffy); + timer_recalc_offset(tb_last_jiffy); timer_check_rtc(); write_sequnlock(&xtime_lock); - if (adjusting_time && (time_adjust == 0)) - ppc_adjtimex(); } next_dec = tb_ticks_per_jiffy - ticks; @@ -492,16 +490,18 @@ void timer_interrupt(struct pt_regs * regs) void wakeup_decrementer(void) { - int i; + unsigned long ticks; - set_dec(tb_ticks_per_jiffy); /* - * We don't expect this to be called on a machine with a 601, - * so using get_tbl is fine. + * The timebase gets saved on sleep and restored on wakeup, + * so all we need to do is to reset the decrementer. */ - tb_last_stamp = tb_last_jiffy = get_tb(); - for_each_cpu(i) - per_cpu(last_jiffy, i) = tb_last_stamp; + ticks = tb_ticks_since(__get_cpu_var(last_jiffy)); + if (ticks < tb_ticks_per_jiffy) + ticks = tb_ticks_per_jiffy - ticks; + else + ticks = 1; + set_dec(ticks); } #ifdef CONFIG_SMP @@ -541,8 +541,8 @@ int do_settimeofday(struct timespec *tv) time_t wtm_sec, new_sec = tv->tv_sec; long wtm_nsec, new_nsec = tv->tv_nsec; unsigned long flags; - long int tb_delta; - u64 new_xsec, tb_delta_xs; + u64 new_xsec; + unsigned long tb_delta; if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) return -EINVAL; @@ -563,9 +563,19 @@ int do_settimeofday(struct timespec *tv) first_settimeofday = 0; } #endif + + /* + * Subtract off the number of nanoseconds since the + * beginning of the last tick. + * Note that since we don't increment jiffies_64 anywhere other + * than in do_timer (since we don't have a lost tick problem), + * wall_jiffies will always be the same as jiffies, + * and therefore the (jiffies - wall_jiffies) computation + * has been removed. + */ tb_delta = tb_ticks_since(tb_last_stamp); - tb_delta += (jiffies - wall_jiffies) * tb_ticks_per_jiffy; - tb_delta_xs = mulhdu(tb_delta, do_gtod.varp->tb_to_xs); + tb_delta = mulhdu(tb_delta, do_gtod.varp->tb_to_xs); /* in xsec */ + new_nsec -= SCALE_XSEC(tb_delta, 1000000000); wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - new_sec); wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - new_nsec); @@ -580,12 +590,12 @@ int do_settimeofday(struct timespec *tv) ntp_clear(); - new_xsec = 0; - if (new_nsec != 0) { - new_xsec = (u64)new_nsec * XSEC_PER_SEC; + new_xsec = xtime.tv_nsec; + if (new_xsec != 0) { + new_xsec *= XSEC_PER_SEC; do_div(new_xsec, NSEC_PER_SEC); } - new_xsec += (u64)new_sec * XSEC_PER_SEC - tb_delta_xs; + new_xsec += (u64)xtime.tv_sec * XSEC_PER_SEC; update_gtod(tb_last_jiffy, new_xsec, do_gtod.varp->tb_to_xs); vdso_data->tz_minuteswest = sys_tz.tz_minuteswest; @@ -612,10 +622,10 @@ void __init generic_calibrate_decr(void) ppc_tb_freq = DEFAULT_TB_FREQ; /* hardcoded default */ node_found = 0; - if (cpu != 0) { + if (cpu) { fp = (unsigned int *)get_property(cpu, "timebase-frequency", NULL); - if (fp != 0) { + if (fp) { node_found = 1; ppc_tb_freq = *fp; } @@ -626,10 +636,10 @@ void __init generic_calibrate_decr(void) ppc_proc_freq = DEFAULT_PROC_FREQ; node_found = 0; - if (cpu != 0) { + if (cpu) { fp = (unsigned int *)get_property(cpu, "clock-frequency", NULL); - if (fp != 0) { + if (fp) { node_found = 1; ppc_proc_freq = *fp; } @@ -671,7 +681,7 @@ void __init time_init(void) unsigned long flags; unsigned long tm = 0; struct div_result res; - u64 scale; + u64 scale, x; unsigned shift; if (ppc_md.time_init != NULL) @@ -693,11 +703,36 @@ void __init time_init(void) } tb_ticks_per_jiffy = ppc_tb_freq / HZ; - tb_ticks_per_sec = tb_ticks_per_jiffy * HZ; + tb_ticks_per_sec = ppc_tb_freq; tb_ticks_per_usec = ppc_tb_freq / 1000000; tb_to_us = mulhwu_scale_factor(ppc_tb_freq, 1000000); - div128_by_32(1024*1024, 0, tb_ticks_per_sec, &res); - tb_to_xs = res.result_low; + + /* + * Calculate the length of each tick in ns. It will not be + * exactly 1e9/HZ unless ppc_tb_freq is divisible by HZ. + * We compute 1e9 * tb_ticks_per_jiffy / ppc_tb_freq, + * rounded up. + */ + x = (u64) NSEC_PER_SEC * tb_ticks_per_jiffy + ppc_tb_freq - 1; + do_div(x, ppc_tb_freq); + tick_nsec = x; + last_tick_len = x << TICKLEN_SCALE; + + /* + * Compute ticklen_to_xs, which is a factor which gets multiplied + * by (last_tick_len << TICKLEN_SHIFT) to get a tb_to_xs value. + * It is computed as: + * ticklen_to_xs = 2^N / (tb_ticks_per_jiffy * 1e9) + * where N = 64 + 20 - TICKLEN_SCALE - TICKLEN_SHIFT + * so as to give the result as a 0.64 fixed-point fraction. + */ + div128_by_32(1ULL << (64 + 20 - TICKLEN_SCALE - TICKLEN_SHIFT), 0, + tb_ticks_per_jiffy, &res); + div128_by_32(res.result_high, res.result_low, NSEC_PER_SEC, &res); + ticklen_to_xs = res.result_low; + + /* Compute tb_to_xs from tick_nsec */ + tb_to_xs = mulhdu(last_tick_len << TICKLEN_SHIFT, ticklen_to_xs); /* * Compute scale factor for sched_clock. @@ -724,6 +759,14 @@ void __init time_init(void) tm = get_boot_time(); write_seqlock_irqsave(&xtime_lock, flags); + + /* If platform provided a timezone (pmac), we correct the time */ + if (timezone_offset) { + sys_tz.tz_minuteswest = -timezone_offset / 60; + sys_tz.tz_dsttime = 0; + tm -= timezone_offset; + } + xtime.tv_sec = tm; xtime.tv_nsec = 0; do_gtod.varp = &do_gtod.vars[0]; @@ -738,18 +781,11 @@ void __init time_init(void) vdso_data->tb_orig_stamp = tb_last_jiffy; vdso_data->tb_update_count = 0; vdso_data->tb_ticks_per_sec = tb_ticks_per_sec; - vdso_data->stamp_xsec = xtime.tv_sec * XSEC_PER_SEC; + vdso_data->stamp_xsec = (u64) xtime.tv_sec * XSEC_PER_SEC; vdso_data->tb_to_xs = tb_to_xs; time_freq = 0; - /* If platform provided a timezone (pmac), we correct the time */ - if (timezone_offset) { - sys_tz.tz_minuteswest = -timezone_offset / 60; - sys_tz.tz_dsttime = 0; - xtime.tv_sec -= timezone_offset; - } - last_rtc_update = xtime.tv_sec; set_normalized_timespec(&wall_to_monotonic, -xtime.tv_sec, -xtime.tv_nsec); @@ -759,126 +795,6 @@ void __init time_init(void) set_dec(tb_ticks_per_jiffy); } -/* - * After adjtimex is called, adjust the conversion of tb ticks - * to microseconds to keep do_gettimeofday synchronized - * with ntpd. - * - * Use the time_adjust, time_freq and time_offset computed by adjtimex to - * adjust the frequency. - */ - -/* #define DEBUG_PPC_ADJTIMEX 1 */ - -void ppc_adjtimex(void) -{ -#ifdef CONFIG_PPC64 - unsigned long den, new_tb_ticks_per_sec, tb_ticks, old_xsec, - new_tb_to_xs, new_xsec, new_stamp_xsec; - unsigned long tb_ticks_per_sec_delta; - long delta_freq, ltemp; - struct div_result divres; - unsigned long flags; - long singleshot_ppm = 0; - - /* - * Compute parts per million frequency adjustment to - * accomplish the time adjustment implied by time_offset to be - * applied over the elapsed time indicated by time_constant. - * Use SHIFT_USEC to get it into the same units as - * time_freq. - */ - if ( time_offset < 0 ) { - ltemp = -time_offset; - ltemp <<= SHIFT_USEC - SHIFT_UPDATE; - ltemp >>= SHIFT_KG + time_constant; - ltemp = -ltemp; - } else { - ltemp = time_offset; - ltemp <<= SHIFT_USEC - SHIFT_UPDATE; - ltemp >>= SHIFT_KG + time_constant; - } - - /* If there is a single shot time adjustment in progress */ - if ( time_adjust ) { -#ifdef DEBUG_PPC_ADJTIMEX - printk("ppc_adjtimex: "); - if ( adjusting_time == 0 ) - printk("starting "); - printk("single shot time_adjust = %ld\n", time_adjust); -#endif - - adjusting_time = 1; - - /* - * Compute parts per million frequency adjustment - * to match time_adjust - */ - singleshot_ppm = tickadj * HZ; - /* - * The adjustment should be tickadj*HZ to match the code in - * linux/kernel/timer.c, but experiments show that this is too - * large. 3/4 of tickadj*HZ seems about right - */ - singleshot_ppm -= singleshot_ppm / 4; - /* Use SHIFT_USEC to get it into the same units as time_freq */ - singleshot_ppm <<= SHIFT_USEC; - if ( time_adjust < 0 ) - singleshot_ppm = -singleshot_ppm; - } - else { -#ifdef DEBUG_PPC_ADJTIMEX - if ( adjusting_time ) - printk("ppc_adjtimex: ending single shot time_adjust\n"); -#endif - adjusting_time = 0; - } - - /* Add up all of the frequency adjustments */ - delta_freq = time_freq + ltemp + singleshot_ppm; - - /* - * Compute a new value for tb_ticks_per_sec based on - * the frequency adjustment - */ - den = 1000000 * (1 << (SHIFT_USEC - 8)); - if ( delta_freq < 0 ) { - tb_ticks_per_sec_delta = ( tb_ticks_per_sec * ( (-delta_freq) >> (SHIFT_USEC - 8))) / den; - new_tb_ticks_per_sec = tb_ticks_per_sec + tb_ticks_per_sec_delta; - } - else { - tb_ticks_per_sec_delta = ( tb_ticks_per_sec * ( delta_freq >> (SHIFT_USEC - 8))) / den; - new_tb_ticks_per_sec = tb_ticks_per_sec - tb_ticks_per_sec_delta; - } - -#ifdef DEBUG_PPC_ADJTIMEX - printk("ppc_adjtimex: ltemp = %ld, time_freq = %ld, singleshot_ppm = %ld\n", ltemp, time_freq, singleshot_ppm); - printk("ppc_adjtimex: tb_ticks_per_sec - base = %ld new = %ld\n", tb_ticks_per_sec, new_tb_ticks_per_sec); -#endif - - /* - * Compute a new value of tb_to_xs (used to convert tb to - * microseconds) and a new value of stamp_xsec which is the - * time (in 1/2^20 second units) corresponding to - * tb_orig_stamp. This new value of stamp_xsec compensates - * for the change in frequency (implied by the new tb_to_xs) - * which guarantees that the current time remains the same. - */ - write_seqlock_irqsave( &xtime_lock, flags ); - tb_ticks = get_tb() - do_gtod.varp->tb_orig_stamp; - div128_by_32(1024*1024, 0, new_tb_ticks_per_sec, &divres); - new_tb_to_xs = divres.result_low; - new_xsec = mulhdu(tb_ticks, new_tb_to_xs); - - old_xsec = mulhdu(tb_ticks, do_gtod.varp->tb_to_xs); - new_stamp_xsec = do_gtod.varp->stamp_xsec + old_xsec - new_xsec; - - update_gtod(do_gtod.varp->tb_orig_stamp, new_stamp_xsec, new_tb_to_xs); - - write_sequnlock_irqrestore( &xtime_lock, flags ); -#endif /* CONFIG_PPC64 */ -} - #define FEBRUARY 2 #define STARTOFTIME 1970 diff --git a/arch/powerpc/kernel/udbg_16550.c b/arch/powerpc/kernel/udbg_16550.c index 2da65a9c93f6..5d29dcca523c 100644 --- a/arch/powerpc/kernel/udbg_16550.c +++ b/arch/powerpc/kernel/udbg_16550.c @@ -144,7 +144,7 @@ unsigned int udbg_probe_uart_speed(void __iomem *comport, unsigned int clock) } #ifdef CONFIG_PPC_MAPLE -void udbg_maple_real_putc(unsigned char c) +void udbg_maple_real_putc(char c) { if (udbg_comport) { while ((real_readb(&udbg_comport->lsr) & LSR_THRE) == 0) |