diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-03-17 19:34:12 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-03-17 19:34:12 -0700 |
commit | 08351fc6a75731226e1112fc7254542bd3a2912e (patch) | |
tree | 8b25bd168e0663c766f0332c8be082aa7d6ed265 /arch/tile/kernel | |
parent | 0df0914d414a504b975f3cc66ace0c16ef55b7f3 (diff) | |
parent | 0dccb0489f9a5a13a33e828ab965aa49685d12f8 (diff) | |
download | blackbird-op-linux-08351fc6a75731226e1112fc7254542bd3a2912e.tar.gz blackbird-op-linux-08351fc6a75731226e1112fc7254542bd3a2912e.zip |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf/linux-tile
* git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf/linux-tile: (27 commits)
arch/tile: support newer binutils assembler shift semantics
arch/tile: fix deadlock bugs in rwlock implementation
drivers/edac: provide support for tile architecture
tile on-chip network driver: sync up with latest fixes
arch/tile: support 4KB page size as well as 64KB
arch/tile: add some more VMSPLIT options and use consistent naming
arch/tile: fix some comments and whitespace
arch/tile: export some additional module symbols
arch/tile: enhance existing finv_buffer_remote() routine
arch/tile: fix two bugs in the backtracer code
arch/tile: use extended assembly to inline __mb_incoherent()
arch/tile: use a cleaner technique to enable interrupt for cpu_idle()
arch/tile: sync up with <arch/sim.h> and <arch/sim_def.h> changes
arch/tile: fix reversed test of strict_strtol() return value
arch/tile: avoid a simulator warning during bootup
arch/tile: export <asm/hardwall.h> to userspace
arch/tile: warn and retry if an IPI is not accepted by the target cpu
arch/tile: stop disabling INTCTRL_1 interrupts during hypervisor downcalls
arch/tile: fix __ndelay etc to work better
arch/tile: bug fix: exec'ed task thought it was still single-stepping
...
Fix up trivial conflict in arch/tile/kernel/vmlinux.lds.S (percpu
alignment vs section naming convention fix)
Diffstat (limited to 'arch/tile/kernel')
-rw-r--r-- | arch/tile/kernel/entry.S | 22 | ||||
-rw-r--r-- | arch/tile/kernel/head_32.S | 15 | ||||
-rw-r--r-- | arch/tile/kernel/intvec_32.S | 74 | ||||
-rw-r--r-- | arch/tile/kernel/irq.c | 38 | ||||
-rw-r--r-- | arch/tile/kernel/machine_kexec.c | 7 | ||||
-rw-r--r-- | arch/tile/kernel/pci-dma.c | 38 | ||||
-rw-r--r-- | arch/tile/kernel/process.c | 6 | ||||
-rw-r--r-- | arch/tile/kernel/setup.c | 20 | ||||
-rw-r--r-- | arch/tile/kernel/single_step.c | 21 | ||||
-rw-r--r-- | arch/tile/kernel/smp.c | 33 | ||||
-rw-r--r-- | arch/tile/kernel/stack.c | 28 | ||||
-rw-r--r-- | arch/tile/kernel/time.c | 10 | ||||
-rw-r--r-- | arch/tile/kernel/vmlinux.lds.S | 5 |
13 files changed, 163 insertions, 154 deletions
diff --git a/arch/tile/kernel/entry.S b/arch/tile/kernel/entry.S index fd8dc42abdcb..431e9ae60488 100644 --- a/arch/tile/kernel/entry.S +++ b/arch/tile/kernel/entry.S @@ -38,12 +38,6 @@ STD_ENTRY(kernel_execve) jrp lr STD_ENDPROC(kernel_execve) -/* Delay a fixed number of cycles. */ -STD_ENTRY(__delay) - { addi r0, r0, -1; bnzt r0, . } - jrp lr - STD_ENDPROC(__delay) - /* * We don't run this function directly, but instead copy it to a page * we map into every user process. See vdso_setup(). @@ -97,23 +91,17 @@ STD_ENTRY(smp_nap) /* * Enable interrupts racelessly and then nap until interrupted. + * Architecturally, we are guaranteed that enabling interrupts via + * mtspr to INTERRUPT_CRITICAL_SECTION only interrupts at the next PC. * This function's _cpu_idle_nap address is special; see intvec.S. * When interrupted at _cpu_idle_nap, we bump the PC forward 8, and * as a result return to the function that called _cpu_idle(). */ STD_ENTRY(_cpu_idle) - { - lnk r0 - movei r1, KERNEL_PL - } - { - addli r0, r0, _cpu_idle_nap - . - mtspr INTERRUPT_CRITICAL_SECTION, r1 - } + movei r1, 1 + mtspr INTERRUPT_CRITICAL_SECTION, r1 IRQ_ENABLE(r2, r3) /* unmask, but still with ICS set */ - mtspr SPR_EX_CONTEXT_K_1, r1 /* Kernel PL, ICS clear */ - mtspr SPR_EX_CONTEXT_K_0, r0 - iret + mtspr INTERRUPT_CRITICAL_SECTION, zero .global _cpu_idle_nap _cpu_idle_nap: nap diff --git a/arch/tile/kernel/head_32.S b/arch/tile/kernel/head_32.S index 90e7c4435693..1a39b7c1c87e 100644 --- a/arch/tile/kernel/head_32.S +++ b/arch/tile/kernel/head_32.S @@ -133,7 +133,7 @@ ENTRY(_start) } ENDPROC(_start) -.section ".bss.page_aligned","w" +__PAGE_ALIGNED_BSS .align PAGE_SIZE ENTRY(empty_zero_page) .fill PAGE_SIZE,1,0 @@ -145,10 +145,10 @@ ENTRY(empty_zero_page) .endif .word HV_PTE_PAGE | HV_PTE_DIRTY | HV_PTE_PRESENT | HV_PTE_ACCESSED | \ (HV_PTE_MODE_CACHE_NO_L3 << HV_PTE_INDEX_MODE) - .word (\bits1) | (HV_CPA_TO_PFN(\cpa) << HV_PTE_INDEX_PFN) + .word (\bits1) | (HV_CPA_TO_PFN(\cpa) << (HV_PTE_INDEX_PFN - 32)) .endm -.section ".data.page_aligned","wa" +__PAGE_ALIGNED_DATA .align PAGE_SIZE ENTRY(swapper_pg_dir) /* @@ -158,12 +158,14 @@ ENTRY(swapper_pg_dir) */ .set addr, 0 .rept (MEM_USER_INTRPT - PAGE_OFFSET) >> PGDIR_SHIFT - PTE addr + PAGE_OFFSET, addr, HV_PTE_READABLE | HV_PTE_WRITABLE + PTE addr + PAGE_OFFSET, addr, (1 << (HV_PTE_INDEX_READABLE - 32)) | \ + (1 << (HV_PTE_INDEX_WRITABLE - 32)) .set addr, addr + PGDIR_SIZE .endr /* The true text VAs are mapped as VA = PA + MEM_SV_INTRPT */ - PTE MEM_SV_INTRPT, 0, HV_PTE_READABLE | HV_PTE_EXECUTABLE + PTE MEM_SV_INTRPT, 0, (1 << (HV_PTE_INDEX_READABLE - 32)) | \ + (1 << (HV_PTE_INDEX_EXECUTABLE - 32)) .org swapper_pg_dir + HV_L1_SIZE END(swapper_pg_dir) @@ -176,6 +178,7 @@ ENTRY(swapper_pg_dir) __INITDATA .align CHIP_L2_LINE_SIZE() ENTRY(swapper_pgprot) - PTE 0, 0, HV_PTE_READABLE | HV_PTE_WRITABLE, 1 + PTE 0, 0, (1 << (HV_PTE_INDEX_READABLE - 32)) | \ + (1 << (HV_PTE_INDEX_WRITABLE - 32)), 1 .align CHIP_L2_LINE_SIZE() END(swapper_pgprot) diff --git a/arch/tile/kernel/intvec_32.S b/arch/tile/kernel/intvec_32.S index 5eed4a02bf62..fffcfa6b3a62 100644 --- a/arch/tile/kernel/intvec_32.S +++ b/arch/tile/kernel/intvec_32.S @@ -32,10 +32,6 @@ # error "No support for kernel preemption currently" #endif -#if INT_INTCTRL_K < 32 || INT_INTCTRL_K >= 48 -# error INT_INTCTRL_K coded to set high interrupt mask -#endif - #define PTREGS_PTR(reg, ptreg) addli reg, sp, C_ABI_SAVE_AREA_SIZE + (ptreg) #define PTREGS_OFFSET_SYSCALL PTREGS_OFFSET_REG(TREG_SYSCALL_NR) @@ -1199,46 +1195,6 @@ STD_ENTRY(interrupt_return) STD_ENDPROC(interrupt_return) /* - * This interrupt variant clears the INT_INTCTRL_K interrupt mask bit - * before returning, so we can properly get more downcalls. - */ - .pushsection .text.handle_interrupt_downcall,"ax" -handle_interrupt_downcall: - finish_interrupt_save handle_interrupt_downcall - check_single_stepping normal, .Ldispatch_downcall -.Ldispatch_downcall: - - /* Clear INTCTRL_K from the set of interrupts we ever enable. */ - GET_INTERRUPTS_ENABLED_MASK_PTR(r30) - { - addi r30, r30, 4 - movei r31, INT_MASK(INT_INTCTRL_K) - } - { - lw r20, r30 - nor r21, r31, zero - } - and r20, r20, r21 - sw r30, r20 - - { - jalr r0 - PTREGS_PTR(r0, PTREGS_OFFSET_BASE) - } - FEEDBACK_REENTER(handle_interrupt_downcall) - - /* Allow INTCTRL_K to be enabled next time we enable interrupts. */ - lw r20, r30 - or r20, r20, r31 - sw r30, r20 - - { - movei r30, 0 /* not an NMI */ - j interrupt_return - } - STD_ENDPROC(handle_interrupt_downcall) - - /* * Some interrupts don't check for single stepping */ .pushsection .text.handle_interrupt_no_single_step,"ax" @@ -1600,7 +1556,10 @@ STD_ENTRY(_sys_clone) .align 64 /* Align much later jump on the start of a cache line. */ #if !ATOMIC_LOCKS_FOUND_VIA_TABLE() - nop; nop + nop +#if PAGE_SIZE >= 0x10000 + nop +#endif #endif ENTRY(sys_cmpxchg) @@ -1628,9 +1587,13 @@ ENTRY(sys_cmpxchg) * about aliasing among multiple mappings of the same physical page, * and we ignore the low 3 bits so we have one lock that covers * both a cmpxchg64() and a cmpxchg() on either its low or high word. - * NOTE: this code must match __atomic_hashed_lock() in lib/atomic.c. + * NOTE: this must match __atomic_hashed_lock() in lib/atomic_32.c. */ +#if (PAGE_OFFSET & 0xffff) != 0 +# error Code here assumes PAGE_OFFSET can be loaded with just hi16() +#endif + #if ATOMIC_LOCKS_FOUND_VIA_TABLE() { /* Check for unaligned input. */ @@ -1723,11 +1686,14 @@ ENTRY(sys_cmpxchg) lw r26, r0 } { - /* atomic_locks is page aligned so this suffices to get its addr. */ - auli r21, zero, hi16(atomic_locks) + auli r21, zero, ha16(atomic_locks) bbns r23, .Lcmpxchg_badaddr } +#if PAGE_SIZE < 0x10000 + /* atomic_locks is page-aligned so for big pages we don't need this. */ + addli r21, r21, lo16(atomic_locks) +#endif { /* * Insert the hash bits into the page-aligned pointer. @@ -1762,7 +1728,7 @@ ENTRY(sys_cmpxchg) /* * Perform the actual cmpxchg or atomic_update. - * Note that __futex_mark_unlocked() in uClibc relies on + * Note that the system <arch/atomic.h> header relies on * atomic_update() to always perform an "mf", so don't make * it optional or conditional without modifying that code. */ @@ -2014,17 +1980,17 @@ int_unalign: #endif int_hand INT_INTCTRL_0, INTCTRL_0, bad_intr int_hand INT_MESSAGE_RCV_DWNCL, MESSAGE_RCV_DWNCL, \ - hv_message_intr, handle_interrupt_downcall + hv_message_intr int_hand INT_DEV_INTR_DWNCL, DEV_INTR_DWNCL, \ - tile_dev_intr, handle_interrupt_downcall + tile_dev_intr int_hand INT_I_ASID, I_ASID, bad_intr int_hand INT_D_ASID, D_ASID, bad_intr int_hand INT_DMATLB_MISS_DWNCL, DMATLB_MISS_DWNCL, \ - do_page_fault, handle_interrupt_downcall + do_page_fault int_hand INT_SNITLB_MISS_DWNCL, SNITLB_MISS_DWNCL, \ - do_page_fault, handle_interrupt_downcall + do_page_fault int_hand INT_DMATLB_ACCESS_DWNCL, DMATLB_ACCESS_DWNCL, \ - do_page_fault, handle_interrupt_downcall + do_page_fault int_hand INT_SN_CPL, SN_CPL, bad_intr int_hand INT_DOUBLE_FAULT, DOUBLE_FAULT, do_trap #if CHIP_HAS_AUX_PERF_COUNTERS() diff --git a/arch/tile/kernel/irq.c b/arch/tile/kernel/irq.c index 128805ef8f2c..0baa7580121f 100644 --- a/arch/tile/kernel/irq.c +++ b/arch/tile/kernel/irq.c @@ -176,43 +176,43 @@ void disable_percpu_irq(unsigned int irq) EXPORT_SYMBOL(disable_percpu_irq); /* Mask an interrupt. */ -static void tile_irq_chip_mask(unsigned int irq) +static void tile_irq_chip_mask(struct irq_data *d) { - mask_irqs(1UL << irq); + mask_irqs(1UL << d->irq); } /* Unmask an interrupt. */ -static void tile_irq_chip_unmask(unsigned int irq) +static void tile_irq_chip_unmask(struct irq_data *d) { - unmask_irqs(1UL << irq); + unmask_irqs(1UL << d->irq); } /* * Clear an interrupt before processing it so that any new assertions * will trigger another irq. */ -static void tile_irq_chip_ack(unsigned int irq) +static void tile_irq_chip_ack(struct irq_data *d) { - if ((unsigned long)get_irq_chip_data(irq) != IS_HW_CLEARED) - clear_irqs(1UL << irq); + if ((unsigned long)irq_data_get_irq_chip_data(d) != IS_HW_CLEARED) + clear_irqs(1UL << d->irq); } /* * For per-cpu interrupts, we need to avoid unmasking any interrupts * that we disabled via disable_percpu_irq(). */ -static void tile_irq_chip_eoi(unsigned int irq) +static void tile_irq_chip_eoi(struct irq_data *d) { - if (!(__get_cpu_var(irq_disable_mask) & (1UL << irq))) - unmask_irqs(1UL << irq); + if (!(__get_cpu_var(irq_disable_mask) & (1UL << d->irq))) + unmask_irqs(1UL << d->irq); } static struct irq_chip tile_irq_chip = { .name = "tile_irq_chip", - .ack = tile_irq_chip_ack, - .eoi = tile_irq_chip_eoi, - .mask = tile_irq_chip_mask, - .unmask = tile_irq_chip_unmask, + .irq_ack = tile_irq_chip_ack, + .irq_eoi = tile_irq_chip_eoi, + .irq_mask = tile_irq_chip_mask, + .irq_unmask = tile_irq_chip_unmask, }; void __init init_IRQ(void) @@ -277,8 +277,10 @@ int show_interrupts(struct seq_file *p, void *v) } if (i < NR_IRQS) { - raw_spin_lock_irqsave(&irq_desc[i].lock, flags); - action = irq_desc[i].action; + struct irq_desc *desc = irq_to_desc(i); + + raw_spin_lock_irqsave(&desc->lock, flags); + action = desc->action; if (!action) goto skip; seq_printf(p, "%3d: ", i); @@ -288,7 +290,7 @@ int show_interrupts(struct seq_file *p, void *v) for_each_online_cpu(j) seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); #endif - seq_printf(p, " %14s", irq_desc[i].chip->name); + seq_printf(p, " %14s", get_irq_desc_chip(desc)->name); seq_printf(p, " %s", action->name); for (action = action->next; action; action = action->next) @@ -296,7 +298,7 @@ int show_interrupts(struct seq_file *p, void *v) seq_putc(p, '\n'); skip: - raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); } return 0; } diff --git a/arch/tile/kernel/machine_kexec.c b/arch/tile/kernel/machine_kexec.c index 0d8b9e933487..e00d7179989e 100644 --- a/arch/tile/kernel/machine_kexec.c +++ b/arch/tile/kernel/machine_kexec.c @@ -240,8 +240,11 @@ static void setup_quasi_va_is_pa(void) pte = hv_pte(_PAGE_KERNEL | _PAGE_HUGE_PAGE); pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_NO_L3); - for (i = 0; i < pgd_index(PAGE_OFFSET); i++) - pgtable[i] = pfn_pte(i << (HPAGE_SHIFT - PAGE_SHIFT), pte); + for (i = 0; i < pgd_index(PAGE_OFFSET); i++) { + unsigned long pfn = i << (HPAGE_SHIFT - PAGE_SHIFT); + if (pfn_valid(pfn)) + __set_pte(&pgtable[i], pfn_pte(pfn, pte)); + } } diff --git a/arch/tile/kernel/pci-dma.c b/arch/tile/kernel/pci-dma.c index 5ad5e13b0fa6..658752b2835e 100644 --- a/arch/tile/kernel/pci-dma.c +++ b/arch/tile/kernel/pci-dma.c @@ -86,6 +86,21 @@ EXPORT_SYMBOL(dma_free_coherent); * can count on nothing having been touched. */ +/* Flush a PA range from cache page by page. */ +static void __dma_map_pa_range(dma_addr_t dma_addr, size_t size) +{ + struct page *page = pfn_to_page(PFN_DOWN(dma_addr)); + size_t bytesleft = PAGE_SIZE - (dma_addr & (PAGE_SIZE - 1)); + + while ((ssize_t)size > 0) { + /* Flush the page. */ + homecache_flush_cache(page++, 0); + + /* Figure out if we need to continue on the next page. */ + size -= bytesleft; + bytesleft = PAGE_SIZE; + } +} /* * dma_map_single can be passed any memory address, and there appear @@ -97,26 +112,12 @@ EXPORT_SYMBOL(dma_free_coherent); dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size, enum dma_data_direction direction) { - struct page *page; - dma_addr_t dma_addr; - int thispage; + dma_addr_t dma_addr = __pa(ptr); BUG_ON(!valid_dma_direction(direction)); WARN_ON(size == 0); - dma_addr = __pa(ptr); - - /* We might have been handed a buffer that wraps a page boundary */ - while ((int)size > 0) { - /* The amount to flush that's on this page */ - thispage = PAGE_SIZE - ((unsigned long)ptr & (PAGE_SIZE - 1)); - thispage = min((int)thispage, (int)size); - /* Is this valid for any page we could be handed? */ - page = pfn_to_page(kaddr_to_pfn(ptr)); - homecache_flush_cache(page, 0); - ptr += thispage; - size -= thispage; - } + __dma_map_pa_range(dma_addr, size); return dma_addr; } @@ -140,10 +141,8 @@ int dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents, WARN_ON(nents == 0 || sglist->length == 0); for_each_sg(sglist, sg, nents, i) { - struct page *page; sg->dma_address = sg_phys(sg); - page = pfn_to_page(sg->dma_address >> PAGE_SHIFT); - homecache_flush_cache(page, 0); + __dma_map_pa_range(sg->dma_address, sg->length); } return nents; @@ -163,6 +162,7 @@ dma_addr_t dma_map_page(struct device *dev, struct page *page, { BUG_ON(!valid_dma_direction(direction)); + BUG_ON(offset + size > PAGE_SIZE); homecache_flush_cache(page, 0); return page_to_pa(page) + offset; diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c index e90eb53173b0..b9cd962e1d30 100644 --- a/arch/tile/kernel/process.c +++ b/arch/tile/kernel/process.c @@ -165,7 +165,7 @@ void free_thread_info(struct thread_info *info) kfree(step_state); } - free_page((unsigned long)info); + free_pages((unsigned long)info, THREAD_SIZE_ORDER); } static void save_arch_state(struct thread_struct *t); @@ -574,6 +574,8 @@ SYSCALL_DEFINE4(execve, const char __user *, path, goto out; error = do_execve(filename, argv, envp, regs); putname(filename); + if (error == 0) + single_step_execve(); out: return error; } @@ -593,6 +595,8 @@ long compat_sys_execve(const char __user *path, goto out; error = compat_do_execve(filename, argv, envp, regs); putname(filename); + if (error == 0) + single_step_execve(); out: return error; } diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c index f18573643ed1..3696b1832566 100644 --- a/arch/tile/kernel/setup.c +++ b/arch/tile/kernel/setup.c @@ -59,6 +59,8 @@ unsigned long __initdata node_memmap_pfn[MAX_NUMNODES]; unsigned long __initdata node_percpu_pfn[MAX_NUMNODES]; unsigned long __initdata node_free_pfn[MAX_NUMNODES]; +static unsigned long __initdata node_percpu[MAX_NUMNODES]; + #ifdef CONFIG_HIGHMEM /* Page frame index of end of lowmem on each controller. */ unsigned long __cpuinitdata node_lowmem_end_pfn[MAX_NUMNODES]; @@ -554,7 +556,6 @@ static void __init setup_bootmem_allocator(void) reserve_bootmem(crashk_res.start, crashk_res.end - crashk_res.start + 1, 0); #endif - } void *__init alloc_remap(int nid, unsigned long size) @@ -568,11 +569,13 @@ void *__init alloc_remap(int nid, unsigned long size) static int __init percpu_size(void) { - int size = ALIGN(__per_cpu_end - __per_cpu_start, PAGE_SIZE); -#ifdef CONFIG_MODULES - if (size < PERCPU_ENOUGH_ROOM) - size = PERCPU_ENOUGH_ROOM; -#endif + int size = __per_cpu_end - __per_cpu_start; + size += PERCPU_MODULE_RESERVE; + size += PERCPU_DYNAMIC_EARLY_SIZE; + if (size < PCPU_MIN_UNIT_SIZE) + size = PCPU_MIN_UNIT_SIZE; + size = roundup(size, PAGE_SIZE); + /* In several places we assume the per-cpu data fits on a huge page. */ BUG_ON(kdata_huge && size > HPAGE_SIZE); return size; @@ -589,7 +592,6 @@ static inline unsigned long alloc_bootmem_pfn(int size, unsigned long goal) static void __init zone_sizes_init(void) { unsigned long zones_size[MAX_NR_ZONES] = { 0 }; - unsigned long node_percpu[MAX_NUMNODES] = { 0 }; int size = percpu_size(); int num_cpus = smp_height * smp_width; int i; @@ -674,7 +676,7 @@ static void __init zone_sizes_init(void) NODE_DATA(i)->bdata = NODE_DATA(0)->bdata; free_area_init_node(i, zones_size, start, NULL); - printk(KERN_DEBUG " DMA zone: %ld per-cpu pages\n", + printk(KERN_DEBUG " Normal zone: %ld per-cpu pages\n", PFN_UP(node_percpu[i])); /* Track the type of memory on each node */ @@ -1312,6 +1314,8 @@ static void *__init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align) BUG_ON(size % PAGE_SIZE != 0); pfn_offset[nid] += size / PAGE_SIZE; + BUG_ON(node_percpu[nid] < size); + node_percpu[nid] -= size; if (percpu_pfn[cpu] == 0) percpu_pfn[cpu] = pfn; return pfn_to_kaddr(pfn); diff --git a/arch/tile/kernel/single_step.c b/arch/tile/kernel/single_step.c index 1eb3b39e36c7..84a729e06ec4 100644 --- a/arch/tile/kernel/single_step.c +++ b/arch/tile/kernel/single_step.c @@ -56,7 +56,7 @@ enum mem_op { MEMOP_STORE_POSTINCR }; -static inline tile_bundle_bits set_BrOff_X1(tile_bundle_bits n, int32_t offset) +static inline tile_bundle_bits set_BrOff_X1(tile_bundle_bits n, s32 offset) { tile_bundle_bits result; @@ -254,6 +254,18 @@ P("\n"); return bundle; } +/* + * Called after execve() has started the new image. This allows us + * to reset the info state. Note that the the mmap'ed memory, if there + * was any, has already been unmapped by the exec. + */ +void single_step_execve(void) +{ + struct thread_info *ti = current_thread_info(); + kfree(ti->step_state); + ti->step_state = NULL; +} + /** * single_step_once() - entry point when single stepping has been triggered. * @regs: The machine register state @@ -373,7 +385,7 @@ void single_step_once(struct pt_regs *regs) /* branches */ case BRANCH_OPCODE_X1: { - int32_t offset = signExtend17(get_BrOff_X1(bundle)); + s32 offset = signExtend17(get_BrOff_X1(bundle)); /* * For branches, we use a rewriting trick to let the @@ -731,4 +743,9 @@ void single_step_once(struct pt_regs *regs) __insn_mtspr(SPR_SINGLE_STEP_EN_K_K, 1 << USER_PL); } +void single_step_execve(void) +{ + /* Nothing */ +} + #endif /* !__tilegx__ */ diff --git a/arch/tile/kernel/smp.c b/arch/tile/kernel/smp.c index 9575b37a8b75..a4293102ef81 100644 --- a/arch/tile/kernel/smp.c +++ b/arch/tile/kernel/smp.c @@ -36,6 +36,22 @@ static unsigned long __iomem *ipi_mappings[NR_CPUS]; /* Set by smp_send_stop() to avoid recursive panics. */ static int stopping_cpus; +static void __send_IPI_many(HV_Recipient *recip, int nrecip, int tag) +{ + int sent = 0; + while (sent < nrecip) { + int rc = hv_send_message(recip, nrecip, + (HV_VirtAddr)&tag, sizeof(tag)); + if (rc < 0) { + if (!stopping_cpus) /* avoid recursive panic */ + panic("hv_send_message returned %d", rc); + break; + } + WARN_ONCE(rc == 0, "hv_send_message() returned zero\n"); + sent += rc; + } +} + void send_IPI_single(int cpu, int tag) { HV_Recipient recip = { @@ -43,14 +59,13 @@ void send_IPI_single(int cpu, int tag) .x = cpu % smp_width, .state = HV_TO_BE_SENT }; - int rc = hv_send_message(&recip, 1, (HV_VirtAddr)&tag, sizeof(tag)); - BUG_ON(rc <= 0); + __send_IPI_many(&recip, 1, tag); } void send_IPI_many(const struct cpumask *mask, int tag) { HV_Recipient recip[NR_CPUS]; - int cpu, sent; + int cpu; int nrecip = 0; int my_cpu = smp_processor_id(); for_each_cpu(cpu, mask) { @@ -61,17 +76,7 @@ void send_IPI_many(const struct cpumask *mask, int tag) r->x = cpu % smp_width; r->state = HV_TO_BE_SENT; } - sent = 0; - while (sent < nrecip) { - int rc = hv_send_message(recip, nrecip, - (HV_VirtAddr)&tag, sizeof(tag)); - if (rc <= 0) { - if (!stopping_cpus) /* avoid recursive panic */ - panic("hv_send_message returned %d", rc); - break; - } - sent += rc; - } + __send_IPI_many(recip, nrecip, tag); } void send_IPI_allbutself(int tag) diff --git a/arch/tile/kernel/stack.c b/arch/tile/kernel/stack.c index 0d54106be3d6..dd81713a90dc 100644 --- a/arch/tile/kernel/stack.c +++ b/arch/tile/kernel/stack.c @@ -44,13 +44,6 @@ static int in_kernel_stack(struct KBacktraceIterator *kbt, VirtualAddress sp) return sp >= kstack_base && sp < kstack_base + THREAD_SIZE; } -/* Is address in the specified kernel code? */ -static int in_kernel_text(VirtualAddress address) -{ - return (address >= MEM_SV_INTRPT && - address < MEM_SV_INTRPT + HPAGE_SIZE); -} - /* Is address valid for reading? */ static int valid_address(struct KBacktraceIterator *kbt, VirtualAddress address) { @@ -63,6 +56,23 @@ static int valid_address(struct KBacktraceIterator *kbt, VirtualAddress address) if (l1_pgtable == NULL) return 0; /* can't read user space in other tasks */ +#ifdef CONFIG_64BIT + /* Find the real l1_pgtable by looking in the l0_pgtable. */ + pte = l1_pgtable[HV_L0_INDEX(address)]; + if (!hv_pte_get_present(pte)) + return 0; + pfn = hv_pte_get_pfn(pte); + if (pte_huge(pte)) { + if (!pfn_valid(pfn)) { + pr_err("L0 huge page has bad pfn %#lx\n", pfn); + return 0; + } + return hv_pte_get_present(pte) && hv_pte_get_readable(pte); + } + page = pfn_to_page(pfn); + BUG_ON(PageHighMem(page)); /* No HIGHMEM on 64-bit. */ + l1_pgtable = (HV_PTE *)pfn_to_kaddr(pfn); +#endif pte = l1_pgtable[HV_L1_INDEX(address)]; if (!hv_pte_get_present(pte)) return 0; @@ -92,7 +102,7 @@ static bool read_memory_func(void *result, VirtualAddress address, { int retval; struct KBacktraceIterator *kbt = (struct KBacktraceIterator *)vkbt; - if (in_kernel_text(address)) { + if (__kernel_text_address(address)) { /* OK to read kernel code. */ } else if (address >= PAGE_OFFSET) { /* We only tolerate kernel-space reads of this task's stack */ @@ -132,7 +142,7 @@ static struct pt_regs *valid_fault_handler(struct KBacktraceIterator* kbt) } } if (EX1_PL(p->ex1) == KERNEL_PL && - in_kernel_text(p->pc) && + __kernel_text_address(p->pc) && in_kernel_stack(kbt, p->sp) && p->sp >= sp) { if (kbt->verbose) diff --git a/arch/tile/kernel/time.c b/arch/tile/kernel/time.c index f2e156e44692..49a605be94c5 100644 --- a/arch/tile/kernel/time.c +++ b/arch/tile/kernel/time.c @@ -224,3 +224,13 @@ int setup_profiling_timer(unsigned int multiplier) { return -EINVAL; } + +/* + * Use the tile timer to convert nsecs to core clock cycles, relying + * on it having the same frequency as SPR_CYCLE. + */ +cycles_t ns2cycles(unsigned long nsecs) +{ + struct clock_event_device *dev = &__get_cpu_var(tile_timer); + return ((u64)nsecs * dev->mult) >> dev->shift; +} diff --git a/arch/tile/kernel/vmlinux.lds.S b/arch/tile/kernel/vmlinux.lds.S index c6ce378e0678..38f64fafdc10 100644 --- a/arch/tile/kernel/vmlinux.lds.S +++ b/arch/tile/kernel/vmlinux.lds.S @@ -59,10 +59,7 @@ SECTIONS . = ALIGN(PAGE_SIZE); VMLINUX_SYMBOL(_sinitdata) = .; - .init.page : AT (ADDR(.init.page) - LOAD_OFFSET) { - *(.init.page) - } :data =0 - INIT_DATA_SECTION(16) + INIT_DATA_SECTION(16) :data =0 PERCPU(L2_CACHE_BYTES, PAGE_SIZE) . = ALIGN(PAGE_SIZE); VMLINUX_SYMBOL(_einitdata) = .; |