diff options
Diffstat (limited to 'include')
-rw-r--r-- | include/asm-mips/cevt-r4k.h | 46 | ||||
-rw-r--r-- | include/asm-mips/irqflags.h | 26 | ||||
-rw-r--r-- | include/asm-mips/mipsregs.h | 6 | ||||
-rw-r--r-- | include/asm-mips/smtc.h | 8 | ||||
-rw-r--r-- | include/asm-mips/sn/mapped_kernel.h | 8 | ||||
-rw-r--r-- | include/asm-mips/stackframe.h | 72 | ||||
-rw-r--r-- | include/asm-x86/uaccess_64.h | 1 | ||||
-rw-r--r-- | include/linux/hrtimer.h | 18 | ||||
-rw-r--r-- | include/linux/ide.h | 4 | ||||
-rw-r--r-- | include/linux/ramfs.h | 1 | ||||
-rw-r--r-- | include/linux/stacktrace.h | 2 | ||||
-rw-r--r-- | include/net/sctp/sm.h | 3 |
12 files changed, 166 insertions, 29 deletions
diff --git a/include/asm-mips/cevt-r4k.h b/include/asm-mips/cevt-r4k.h new file mode 100644 index 000000000000..fa4328f9124f --- /dev/null +++ b/include/asm-mips/cevt-r4k.h @@ -0,0 +1,46 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2008 Kevin D. Kissell + */ + +/* + * Definitions used for common event timer implementation + * for MIPS 4K-type processors and their MIPS MT variants. + * Avoids unsightly extern declarations in C files. + */ +#ifndef __ASM_CEVT_R4K_H +#define __ASM_CEVT_R4K_H + +DECLARE_PER_CPU(struct clock_event_device, mips_clockevent_device); + +void mips_event_handler(struct clock_event_device *dev); +int c0_compare_int_usable(void); +void mips_set_clock_mode(enum clock_event_mode, struct clock_event_device *); +irqreturn_t c0_compare_interrupt(int, void *); + +extern struct irqaction c0_compare_irqaction; +extern int cp0_timer_irq_installed; + +/* + * Possibly handle a performance counter interrupt. + * Return true if the timer interrupt should not be checked + */ + +static inline int handle_perf_irq(int r2) +{ + /* + * The performance counter overflow interrupt may be shared with the + * timer interrupt (cp0_perfcount_irq < 0). If it is and a + * performance counter has overflowed (perf_irq() == IRQ_HANDLED) + * and we can't reliably determine if a counter interrupt has also + * happened (!r2) then don't check for a timer interrupt. + */ + return (cp0_perfcount_irq < 0) && + perf_irq() == IRQ_HANDLED && + !r2; +} + +#endif /* __ASM_CEVT_R4K_H */ diff --git a/include/asm-mips/irqflags.h b/include/asm-mips/irqflags.h index 881e8866501d..701ec0ba8fa9 100644 --- a/include/asm-mips/irqflags.h +++ b/include/asm-mips/irqflags.h @@ -38,8 +38,17 @@ __asm__( " .set pop \n" " .endm"); +extern void smtc_ipi_replay(void); + static inline void raw_local_irq_enable(void) { +#ifdef CONFIG_MIPS_MT_SMTC + /* + * SMTC kernel needs to do a software replay of queued + * IPIs, at the cost of call overhead on each local_irq_enable() + */ + smtc_ipi_replay(); +#endif __asm__ __volatile__( "raw_local_irq_enable" : /* no outputs */ @@ -47,6 +56,7 @@ static inline void raw_local_irq_enable(void) : "memory"); } + /* * For cli() we have to insert nops to make sure that the new value * has actually arrived in the status register before the end of this @@ -185,15 +195,14 @@ __asm__( " .set pop \n" " .endm \n"); -extern void smtc_ipi_replay(void); static inline void raw_local_irq_restore(unsigned long flags) { unsigned long __tmp1; -#ifdef CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY +#ifdef CONFIG_MIPS_MT_SMTC /* - * CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY does prompt replay of deferred + * SMTC kernel needs to do a software replay of queued * IPIs, at the cost of branch and call overhead on each * local_irq_restore() */ @@ -208,6 +217,17 @@ static inline void raw_local_irq_restore(unsigned long flags) : "memory"); } +static inline void __raw_local_irq_restore(unsigned long flags) +{ + unsigned long __tmp1; + + __asm__ __volatile__( + "raw_local_irq_restore\t%0" + : "=r" (__tmp1) + : "0" (flags) + : "memory"); +} + static inline int raw_irqs_disabled_flags(unsigned long flags) { #ifdef CONFIG_MIPS_MT_SMTC diff --git a/include/asm-mips/mipsregs.h b/include/asm-mips/mipsregs.h index a46f8e258e6b..979866000da4 100644 --- a/include/asm-mips/mipsregs.h +++ b/include/asm-mips/mipsregs.h @@ -1462,7 +1462,7 @@ set_c0_##name(unsigned int set) \ { \ unsigned int res; \ unsigned int omt; \ - unsigned int flags; \ + unsigned long flags; \ \ local_irq_save(flags); \ omt = __dmt(); \ @@ -1480,7 +1480,7 @@ clear_c0_##name(unsigned int clear) \ { \ unsigned int res; \ unsigned int omt; \ - unsigned int flags; \ + unsigned long flags; \ \ local_irq_save(flags); \ omt = __dmt(); \ @@ -1498,7 +1498,7 @@ change_c0_##name(unsigned int change, unsigned int new) \ { \ unsigned int res; \ unsigned int omt; \ - unsigned int flags; \ + unsigned long flags; \ \ local_irq_save(flags); \ \ diff --git a/include/asm-mips/smtc.h b/include/asm-mips/smtc.h index 3639b28f80db..ea60bf08dcb0 100644 --- a/include/asm-mips/smtc.h +++ b/include/asm-mips/smtc.h @@ -6,6 +6,7 @@ */ #include <asm/mips_mt.h> +#include <asm/smtc_ipi.h> /* * System-wide SMTC status information @@ -38,14 +39,15 @@ struct mm_struct; struct task_struct; void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu); - +void self_ipi(struct smtc_ipi *); void smtc_flush_tlb_asid(unsigned long asid); -extern int mipsmt_build_cpu_map(int startslot); -extern void mipsmt_prepare_cpus(void); +extern int smtc_build_cpu_map(int startslot); +extern void smtc_prepare_cpus(int cpus); extern void smtc_smp_finish(void); extern void smtc_boot_secondary(int cpu, struct task_struct *t); extern void smtc_cpus_done(void); + /* * Sharing the TLB between multiple VPEs means that the * "random" index selection function is not allowed to diff --git a/include/asm-mips/sn/mapped_kernel.h b/include/asm-mips/sn/mapped_kernel.h index c3dd5d0d525f..721496a0bb92 100644 --- a/include/asm-mips/sn/mapped_kernel.h +++ b/include/asm-mips/sn/mapped_kernel.h @@ -5,6 +5,8 @@ #ifndef __ASM_SN_MAPPED_KERNEL_H #define __ASM_SN_MAPPED_KERNEL_H +#include <linux/mmzone.h> + /* * Note on how mapped kernels work: the text and data section is * compiled at cksseg segment (LOADADDR = 0xc001c000), and the @@ -29,10 +31,8 @@ #define MAPPED_ADDR_RO_TO_PHYS(x) (x - REP_BASE) #define MAPPED_ADDR_RW_TO_PHYS(x) (x - REP_BASE - 16777216) -#define MAPPED_KERN_RO_PHYSBASE(n) \ - (PLAT_NODE_DATA(n)->kern_vars.kv_ro_baseaddr) -#define MAPPED_KERN_RW_PHYSBASE(n) \ - (PLAT_NODE_DATA(n)->kern_vars.kv_rw_baseaddr) +#define MAPPED_KERN_RO_PHYSBASE(n) (hub_data(n)->kern_vars.kv_ro_baseaddr) +#define MAPPED_KERN_RW_PHYSBASE(n) (hub_data(n)->kern_vars.kv_rw_baseaddr) #define MAPPED_KERN_RO_TO_PHYS(x) \ ((unsigned long)MAPPED_ADDR_RO_TO_PHYS(x) | \ diff --git a/include/asm-mips/stackframe.h b/include/asm-mips/stackframe.h index 051e1af0bb95..4c37c4e5f72e 100644 --- a/include/asm-mips/stackframe.h +++ b/include/asm-mips/stackframe.h @@ -297,14 +297,31 @@ #ifdef CONFIG_MIPS_MT_SMTC .set mips32r2 /* - * This may not really be necessary if ints are already - * inhibited here. + * We need to make sure the read-modify-write + * of Status below isn't perturbed by an interrupt + * or cross-TC access, so we need to do at least a DMT, + * protected by an interrupt-inhibit. But setting IXMT + * also creates a few-cycle window where an IPI could + * be queued and not be detected before potentially + * returning to a WAIT or user-mode loop. It must be + * replayed. + * + * We're in the middle of a context switch, and + * we can't dispatch it directly without trashing + * some registers, so we'll try to detect this unlikely + * case and program a software interrupt in the VPE, + * as would be done for a cross-VPE IPI. To accomodate + * the handling of that case, we're doing a DVPE instead + * of just a DMT here to protect against other threads. + * This is a lot of cruft to cover a tiny window. + * If you can find a better design, implement it! + * */ mfc0 v0, CP0_TCSTATUS ori v0, TCSTATUS_IXMT mtc0 v0, CP0_TCSTATUS _ehb - DMT 5 # dmt a1 + DVPE 5 # dvpe a1 jal mips_ihb #endif /* CONFIG_MIPS_MT_SMTC */ mfc0 a0, CP0_STATUS @@ -325,17 +342,50 @@ */ LONG_L v1, PT_TCSTATUS(sp) _ehb - mfc0 v0, CP0_TCSTATUS + mfc0 a0, CP0_TCSTATUS andi v1, TCSTATUS_IXMT - /* We know that TCStatua.IXMT should be set from above */ - xori v0, v0, TCSTATUS_IXMT - or v0, v0, v1 - mtc0 v0, CP0_TCSTATUS - _ehb - andi a1, a1, VPECONTROL_TE + bnez v1, 0f + +/* + * We'd like to detect any IPIs queued in the tiny window + * above and request an software interrupt to service them + * when we ERET. + * + * Computing the offset into the IPIQ array of the executing + * TC's IPI queue in-line would be tedious. We use part of + * the TCContext register to hold 16 bits of offset that we + * can add in-line to find the queue head. + */ + mfc0 v0, CP0_TCCONTEXT + la a2, IPIQ + srl v0, v0, 16 + addu a2, a2, v0 + LONG_L v0, 0(a2) + beqz v0, 0f +/* + * If we have a queue, provoke dispatch within the VPE by setting C_SW1 + */ + mfc0 v0, CP0_CAUSE + ori v0, v0, C_SW1 + mtc0 v0, CP0_CAUSE +0: + /* + * This test should really never branch but + * let's be prudent here. Having atomized + * the shared register modifications, we can + * now EVPE, and must do so before interrupts + * are potentially re-enabled. + */ + andi a1, a1, MVPCONTROL_EVP beqz a1, 1f - emt + evpe 1: + /* We know that TCStatua.IXMT should be set from above */ + xori a0, a0, TCSTATUS_IXMT + or a0, a0, v1 + mtc0 a0, CP0_TCSTATUS + _ehb + .set mips0 #endif /* CONFIG_MIPS_MT_SMTC */ LONG_L v1, PT_EPC(sp) diff --git a/include/asm-x86/uaccess_64.h b/include/asm-x86/uaccess_64.h index 515d4dce96b5..45806d60bcbe 100644 --- a/include/asm-x86/uaccess_64.h +++ b/include/asm-x86/uaccess_64.h @@ -7,6 +7,7 @@ #include <linux/compiler.h> #include <linux/errno.h> #include <linux/prefetch.h> +#include <linux/lockdep.h> #include <asm/page.h> /* diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index 6d93dce61cbb..2f245fe63bda 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h @@ -47,14 +47,22 @@ enum hrtimer_restart { * HRTIMER_CB_IRQSAFE: Callback may run in hardirq context * HRTIMER_CB_IRQSAFE_NO_RESTART: Callback may run in hardirq context and * does not restart the timer - * HRTIMER_CB_IRQSAFE_NO_SOFTIRQ: Callback must run in hardirq context - * Special mode for tick emultation + * HRTIMER_CB_IRQSAFE_PERCPU: Callback must run in hardirq context + * Special mode for tick emulation and + * scheduler timer. Such timers are per + * cpu and not allowed to be migrated on + * cpu unplug. + * HRTIMER_CB_IRQSAFE_UNLOCKED: Callback should run in hardirq context + * with timer->base lock unlocked + * used for timers which call wakeup to + * avoid lock order problems with rq->lock */ enum hrtimer_cb_mode { HRTIMER_CB_SOFTIRQ, HRTIMER_CB_IRQSAFE, HRTIMER_CB_IRQSAFE_NO_RESTART, - HRTIMER_CB_IRQSAFE_NO_SOFTIRQ, + HRTIMER_CB_IRQSAFE_PERCPU, + HRTIMER_CB_IRQSAFE_UNLOCKED, }; /* @@ -67,9 +75,10 @@ enum hrtimer_cb_mode { * 0x02 callback function running * 0x04 callback pending (high resolution mode) * - * Special case: + * Special cases: * 0x03 callback function running and enqueued * (was requeued on another CPU) + * 0x09 timer was migrated on CPU hotunplug * The "callback function running and enqueued" status is only possible on * SMP. It happens for example when a posix timer expired and the callback * queued a signal. Between dropping the lock which protects the posix timer @@ -87,6 +96,7 @@ enum hrtimer_cb_mode { #define HRTIMER_STATE_ENQUEUED 0x01 #define HRTIMER_STATE_CALLBACK 0x02 #define HRTIMER_STATE_PENDING 0x04 +#define HRTIMER_STATE_MIGRATE 0x08 /** * struct hrtimer - the basic hrtimer structure diff --git a/include/linux/ide.h b/include/linux/ide.h index 1524829f73f2..6514db8fd2e4 100644 --- a/include/linux/ide.h +++ b/include/linux/ide.h @@ -366,7 +366,9 @@ enum { /* Currently on a filemark */ IDE_AFLAG_FILEMARK = (1 << 25), /* 0 = no tape is loaded, so we don't rewind after ejecting */ - IDE_AFLAG_MEDIUM_PRESENT = (1 << 26) + IDE_AFLAG_MEDIUM_PRESENT = (1 << 26), + + IDE_AFLAG_NO_AUTOCLOSE = (1 << 27), }; struct ide_drive_s { diff --git a/include/linux/ramfs.h b/include/linux/ramfs.h index b160fb18e8d6..37aaf2b39863 100644 --- a/include/linux/ramfs.h +++ b/include/linux/ramfs.h @@ -6,6 +6,7 @@ extern int ramfs_get_sb(struct file_system_type *fs_type, int flags, const char *dev_name, void *data, struct vfsmount *mnt); #ifndef CONFIG_MMU +extern int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize); extern unsigned long ramfs_nommu_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, diff --git a/include/linux/stacktrace.h b/include/linux/stacktrace.h index 5da9794b2d78..b106fd8e0d5c 100644 --- a/include/linux/stacktrace.h +++ b/include/linux/stacktrace.h @@ -1,6 +1,8 @@ #ifndef __LINUX_STACKTRACE_H #define __LINUX_STACKTRACE_H +struct task_struct; + #ifdef CONFIG_STACKTRACE struct stack_trace { unsigned int nr_entries, max_entries; diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h index 24811732bdb2..029a54a02396 100644 --- a/include/net/sctp/sm.h +++ b/include/net/sctp/sm.h @@ -227,6 +227,9 @@ struct sctp_chunk *sctp_make_abort_violation(const struct sctp_association *, const struct sctp_chunk *, const __u8 *, const size_t ); +struct sctp_chunk *sctp_make_violation_paramlen(const struct sctp_association *, + const struct sctp_chunk *, + struct sctp_paramhdr *); struct sctp_chunk *sctp_make_heartbeat(const struct sctp_association *, const struct sctp_transport *, const void *payload, |