From c6298038bcfc20710430a4ad069bb1f3f069997c Mon Sep 17 00:00:00 2001 From: Cyrill Gorcunov Date: Wed, 24 Oct 2012 23:43:21 +0400 Subject: tty, ioctls -- Add new ioctl definitions for tty flags fetching This patch defines new ioctl codes TIOCGPKT, TIOCGPTLCK, TIOCGEXCL for fetching pty's packet mode and locking state, and exclusive mode of tty. [ No real handlers for the codes though, this will be addressed in another patch for easier review and bisectability ] Signed-off-by: Cyrill Gorcunov CC: Alan Cox CC: "H. Peter Anvin" CC: Pavel Emelyanov CC: Jiri Slaby Signed-off-by: Greg Kroah-Hartman --- arch/mips/include/uapi/asm/ioctls.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch/mips/include') diff --git a/arch/mips/include/uapi/asm/ioctls.h b/arch/mips/include/uapi/asm/ioctls.h index 92403c3d6007..addd56b60694 100644 --- a/arch/mips/include/uapi/asm/ioctls.h +++ b/arch/mips/include/uapi/asm/ioctls.h @@ -86,6 +86,9 @@ #define TIOCGDEV _IOR('T', 0x32, unsigned int) /* Get primary device node of /dev/console */ #define TIOCSIG _IOW('T', 0x36, int) /* Generate signal on Pty slave */ #define TIOCVHANGUP 0x5437 +#define TIOCGPKT _IOR('T', 0x38, int) /* Get packet mode state */ +#define TIOCGPTLCK _IOR('T', 0x39, int) /* Get Pty lock state */ +#define TIOCGEXCL _IOR('T', 0x40, int) /* Get exclusive mode state */ /* I hope the range from 0x5480 on is free ... */ #define TIOCSCTTY 0x5480 /* become controlling tty */ -- cgit v1.2.1 From 4870639a754991a52fc8f4ba82f0d9c247c33659 Mon Sep 17 00:00:00 2001 From: Ralf Baechle Date: Wed, 7 Nov 2012 12:54:21 +0100 Subject: MIPS: compat: Fix use of TIF_32BIT_ADDR vs _TIF_32BIT_ADDR Signed-off-by: Ralf Baechle --- arch/mips/include/asm/thread_info.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/mips/include') diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h index 8debe9e91754..d8323ea9d5e8 100644 --- a/arch/mips/include/asm/thread_info.h +++ b/arch/mips/include/asm/thread_info.h @@ -115,7 +115,7 @@ register struct thread_info *__current_thread_info __asm__("$28"); #ifdef CONFIG_MIPS32_O32 #define TIF_32BIT TIF_32BIT_REGS #elif defined(CONFIG_MIPS32_N32) -#define TIF_32BIT _TIF_32BIT_ADDR +#define TIF_32BIT TIF_32BIT_ADDR #endif /* CONFIG_MIPS32_O32 */ #define _TIF_SYSCALL_TRACE (1< Date: Thu, 8 Nov 2012 23:59:31 +0100 Subject: MIPS: compat: Implement is_compat_task() by testing for 32-bit address space. So far is_compat_task() was testing for 32-bit registers if O32 support was enabled and if O32 support was disabled but N32 enabled it was testing for 32-bit address space. So if both O32 and N32 were enabled a N32 task was not considered a compat task, whops. This still leaves potential cases where O32 and N32 need different treatment unsolved. But that's another commit. Signed-off-by: Ralf Baechle --- arch/mips/include/asm/compat.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/mips/include') diff --git a/arch/mips/include/asm/compat.h b/arch/mips/include/asm/compat.h index 58277e0e9cd4..3c5d1464b7bd 100644 --- a/arch/mips/include/asm/compat.h +++ b/arch/mips/include/asm/compat.h @@ -290,7 +290,7 @@ struct compat_shmid64_ds { static inline int is_compat_task(void) { - return test_thread_flag(TIF_32BIT); + return test_thread_flag(TIF_32BIT_ADDR); } #endif /* _ASM_COMPAT_H */ -- cgit v1.2.1 From 34d875d7b579baab2f2f6dfd8e8533602d9dbf33 Mon Sep 17 00:00:00 2001 From: Ralf Baechle Date: Fri, 9 Nov 2012 00:28:59 +0100 Subject: MIPS: compat: Delete now unused TIF_32BIT. Signed-off-by: Ralf Baechle --- arch/mips/include/asm/thread_info.h | 6 ------ 1 file changed, 6 deletions(-) (limited to 'arch/mips/include') diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h index d8323ea9d5e8..18806a52061c 100644 --- a/arch/mips/include/asm/thread_info.h +++ b/arch/mips/include/asm/thread_info.h @@ -112,12 +112,6 @@ register struct thread_info *__current_thread_info __asm__("$28"); #define TIF_LOAD_WATCH 25 /* If set, load watch registers */ #define TIF_SYSCALL_TRACE 31 /* syscall trace active */ -#ifdef CONFIG_MIPS32_O32 -#define TIF_32BIT TIF_32BIT_REGS -#elif defined(CONFIG_MIPS32_N32) -#define TIF_32BIT TIF_32BIT_ADDR -#endif /* CONFIG_MIPS32_O32 */ - #define _TIF_SYSCALL_TRACE (1< Date: Thu, 6 Sep 2012 11:36:54 -0400 Subject: MIPS: bitops.h: Change use of 'unsigned short' to 'int' [ralf@linux-mips.org: No functional change but it's consistent with how use types elsewhere in the code.] Signed-off-by: Jim Quinlan Cc: linux-mips@linux-mips.org Cc: David Daney Cc: Kevin Cernekee cernekee@gmail.com Cc: Jim Quinlan Patchwork: https://patchwork.linux-mips.org/patch/4319/ Signed-off-by: Ralf Baechle --- arch/mips/include/asm/bitops.h | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'arch/mips/include') diff --git a/arch/mips/include/asm/bitops.h b/arch/mips/include/asm/bitops.h index 82ad35ce2b45..455664cf5352 100644 --- a/arch/mips/include/asm/bitops.h +++ b/arch/mips/include/asm/bitops.h @@ -57,7 +57,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr) { unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); - unsigned short bit = nr & SZLONG_MASK; + int bit = nr & SZLONG_MASK; unsigned long temp; if (kernel_uses_llsc && R10000_LLSC_WAR) { @@ -118,7 +118,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr) static inline void clear_bit(unsigned long nr, volatile unsigned long *addr) { unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); - unsigned short bit = nr & SZLONG_MASK; + int bit = nr & SZLONG_MASK; unsigned long temp; if (kernel_uses_llsc && R10000_LLSC_WAR) { @@ -191,7 +191,7 @@ static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *ad */ static inline void change_bit(unsigned long nr, volatile unsigned long *addr) { - unsigned short bit = nr & SZLONG_MASK; + int bit = nr & SZLONG_MASK; if (kernel_uses_llsc && R10000_LLSC_WAR) { unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); @@ -244,7 +244,7 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr) static inline int test_and_set_bit(unsigned long nr, volatile unsigned long *addr) { - unsigned short bit = nr & SZLONG_MASK; + int bit = nr & SZLONG_MASK; unsigned long res; smp_mb__before_llsc(); @@ -310,7 +310,7 @@ static inline int test_and_set_bit(unsigned long nr, static inline int test_and_set_bit_lock(unsigned long nr, volatile unsigned long *addr) { - unsigned short bit = nr & SZLONG_MASK; + int bit = nr & SZLONG_MASK; unsigned long res; if (kernel_uses_llsc && R10000_LLSC_WAR) { @@ -373,7 +373,7 @@ static inline int test_and_set_bit_lock(unsigned long nr, static inline int test_and_clear_bit(unsigned long nr, volatile unsigned long *addr) { - unsigned short bit = nr & SZLONG_MASK; + int bit = nr & SZLONG_MASK; unsigned long res; smp_mb__before_llsc(); @@ -457,7 +457,7 @@ static inline int test_and_clear_bit(unsigned long nr, static inline int test_and_change_bit(unsigned long nr, volatile unsigned long *addr) { - unsigned short bit = nr & SZLONG_MASK; + int bit = nr & SZLONG_MASK; unsigned long res; smp_mb__before_llsc(); -- cgit v1.2.1 From 92d11594f688c8b55b51e80f2eac4417396237a4 Mon Sep 17 00:00:00 2001 From: Jim Quinlan Date: Thu, 6 Sep 2012 11:36:55 -0400 Subject: MIPS: Remove irqflags.h dependency from bitops.h The "else clause" of most functions in bitops.h invoked raw_local_irq_{save,restore}() and in doing so had a dependency on irqflags.h. This fix moves said code to bitops.c, removing the dependency. Signed-off-by: Jim Quinlan Cc: linux-mips@linux-mips.org Cc: David Daney Cc: Kevin Cernekee cernekee@gmail.com Cc: Jim Quinlan Patchwork: https://patchwork.linux-mips.org/patch/4320/ Signed-off-by: Ralf Baechle --- arch/mips/include/asm/bitops.h | 114 ++++++++++++----------------------------- arch/mips/include/asm/io.h | 1 + 2 files changed, 33 insertions(+), 82 deletions(-) (limited to 'arch/mips/include') diff --git a/arch/mips/include/asm/bitops.h b/arch/mips/include/asm/bitops.h index 455664cf5352..46ac73abd5ee 100644 --- a/arch/mips/include/asm/bitops.h +++ b/arch/mips/include/asm/bitops.h @@ -14,7 +14,6 @@ #endif #include -#include #include #include #include /* sigh ... */ @@ -44,6 +43,24 @@ #define smp_mb__before_clear_bit() smp_mb__before_llsc() #define smp_mb__after_clear_bit() smp_llsc_mb() + +/* + * These are the "slower" versions of the functions and are in bitops.c. + * These functions call raw_local_irq_{save,restore}(). + */ +void __mips_set_bit(unsigned long nr, volatile unsigned long *addr); +void __mips_clear_bit(unsigned long nr, volatile unsigned long *addr); +void __mips_change_bit(unsigned long nr, volatile unsigned long *addr); +int __mips_test_and_set_bit(unsigned long nr, + volatile unsigned long *addr); +int __mips_test_and_set_bit_lock(unsigned long nr, + volatile unsigned long *addr); +int __mips_test_and_clear_bit(unsigned long nr, + volatile unsigned long *addr); +int __mips_test_and_change_bit(unsigned long nr, + volatile unsigned long *addr); + + /* * set_bit - Atomically set a bit in memory * @nr: the bit to set @@ -92,17 +109,8 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr) : "=&r" (temp), "+m" (*m) : "ir" (1UL << bit)); } while (unlikely(!temp)); - } else { - volatile unsigned long *a = addr; - unsigned long mask; - unsigned long flags; - - a += nr >> SZLONG_LOG; - mask = 1UL << bit; - raw_local_irq_save(flags); - *a |= mask; - raw_local_irq_restore(flags); - } + } else + __mips_set_bit(nr, addr); } /* @@ -153,17 +161,8 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr) : "=&r" (temp), "+m" (*m) : "ir" (~(1UL << bit))); } while (unlikely(!temp)); - } else { - volatile unsigned long *a = addr; - unsigned long mask; - unsigned long flags; - - a += nr >> SZLONG_LOG; - mask = 1UL << bit; - raw_local_irq_save(flags); - *a &= ~mask; - raw_local_irq_restore(flags); - } + } else + __mips_clear_bit(nr, addr); } /* @@ -220,17 +219,8 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr) : "=&r" (temp), "+m" (*m) : "ir" (1UL << bit)); } while (unlikely(!temp)); - } else { - volatile unsigned long *a = addr; - unsigned long mask; - unsigned long flags; - - a += nr >> SZLONG_LOG; - mask = 1UL << bit; - raw_local_irq_save(flags); - *a ^= mask; - raw_local_irq_restore(flags); - } + } else + __mips_change_bit(nr, addr); } /* @@ -281,18 +271,8 @@ static inline int test_and_set_bit(unsigned long nr, } while (unlikely(!res)); res = temp & (1UL << bit); - } else { - volatile unsigned long *a = addr; - unsigned long mask; - unsigned long flags; - - a += nr >> SZLONG_LOG; - mask = 1UL << bit; - raw_local_irq_save(flags); - res = (mask & *a); - *a |= mask; - raw_local_irq_restore(flags); - } + } else + res = __mips_test_and_set_bit(nr, addr); smp_llsc_mb(); @@ -345,18 +325,8 @@ static inline int test_and_set_bit_lock(unsigned long nr, } while (unlikely(!res)); res = temp & (1UL << bit); - } else { - volatile unsigned long *a = addr; - unsigned long mask; - unsigned long flags; - - a += nr >> SZLONG_LOG; - mask = 1UL << bit; - raw_local_irq_save(flags); - res = (mask & *a); - *a |= mask; - raw_local_irq_restore(flags); - } + } else + res = __mips_test_and_set_bit_lock(nr, addr); smp_llsc_mb(); @@ -428,18 +398,8 @@ static inline int test_and_clear_bit(unsigned long nr, } while (unlikely(!res)); res = temp & (1UL << bit); - } else { - volatile unsigned long *a = addr; - unsigned long mask; - unsigned long flags; - - a += nr >> SZLONG_LOG; - mask = 1UL << bit; - raw_local_irq_save(flags); - res = (mask & *a); - *a &= ~mask; - raw_local_irq_restore(flags); - } + } else + res = __mips_test_and_clear_bit(nr, addr); smp_llsc_mb(); @@ -494,18 +454,8 @@ static inline int test_and_change_bit(unsigned long nr, } while (unlikely(!res)); res = temp & (1UL << bit); - } else { - volatile unsigned long *a = addr; - unsigned long mask; - unsigned long flags; - - a += nr >> SZLONG_LOG; - mask = 1UL << bit; - raw_local_irq_save(flags); - res = (mask & *a); - *a ^= mask; - raw_local_irq_restore(flags); - } + } else + res = __mips_test_and_change_bit(nr, addr); smp_llsc_mb(); diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h index 29d9c23c20c7..ff2e0345e013 100644 --- a/arch/mips/include/asm/io.h +++ b/arch/mips/include/asm/io.h @@ -15,6 +15,7 @@ #include #include #include +#include #include #include -- cgit v1.2.1 From e97c5b609880d97313b13eb71830fca62cee50c2 Mon Sep 17 00:00:00 2001 From: Jim Quinlan Date: Thu, 6 Sep 2012 11:36:56 -0400 Subject: MIPS: Make irqflags.h functions preempt-safe for non-mipsr2 cpus For non MIPSr2 processors, such as the BMIPS 5000, calls to arch_local_irq_disable() and others may be preempted, and in doing so a stale value may be restored to c0_status. This fix disables preemption for such processors prior to the call and enables it after the call. Those functions that needed this fix have been "outlined" to mips-atomic.c, as they are no longer good candidates for inlining. This bug was observed in a BMIPS 5000, occuring once every few hours in a continuous reboot test. It was traced to the write_lock_irq() function which was being invoked in release_task() in exit.c. By placing a number of "nops" inbetween the mfc0/mtc0 pair in arch_local_irq_disable(), which is called by write_lock_irq(), we were able to greatly increase the occurance of this bug. Similarly, the application of this commit silenced the bug. Signed-off-by: Jim Quinlan Cc: linux-mips@linux-mips.org Cc: David Daney Cc: Kevin Cernekee cernekee@gmail.com Cc: Jim Quinlan Patchwork: https://patchwork.linux-mips.org/patch/4321/ Signed-off-by: Ralf Baechle --- arch/mips/include/asm/irqflags.h | 207 ++++++++++++++------------------------- 1 file changed, 75 insertions(+), 132 deletions(-) (limited to 'arch/mips/include') diff --git a/arch/mips/include/asm/irqflags.h b/arch/mips/include/asm/irqflags.h index 309cbcd6909c..9f3384c789d7 100644 --- a/arch/mips/include/asm/irqflags.h +++ b/arch/mips/include/asm/irqflags.h @@ -16,83 +16,13 @@ #include #include -__asm__( - " .macro arch_local_irq_enable \n" - " .set push \n" - " .set reorder \n" - " .set noat \n" -#ifdef CONFIG_MIPS_MT_SMTC - " mfc0 $1, $2, 1 # SMTC - clear TCStatus.IXMT \n" - " ori $1, 0x400 \n" - " xori $1, 0x400 \n" - " mtc0 $1, $2, 1 \n" -#elif defined(CONFIG_CPU_MIPSR2) - " ei \n" -#else - " mfc0 $1,$12 \n" - " ori $1,0x1f \n" - " xori $1,0x1e \n" - " mtc0 $1,$12 \n" -#endif - " irq_enable_hazard \n" - " .set pop \n" - " .endm"); +#if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC) -extern void smtc_ipi_replay(void); - -static inline void arch_local_irq_enable(void) -{ -#ifdef CONFIG_MIPS_MT_SMTC - /* - * SMTC kernel needs to do a software replay of queued - * IPIs, at the cost of call overhead on each local_irq_enable() - */ - smtc_ipi_replay(); -#endif - __asm__ __volatile__( - "arch_local_irq_enable" - : /* no outputs */ - : /* no inputs */ - : "memory"); -} - - -/* - * For cli() we have to insert nops to make sure that the new value - * has actually arrived in the status register before the end of this - * macro. - * R4000/R4400 need three nops, the R4600 two nops and the R10000 needs - * no nops at all. - */ -/* - * For TX49, operating only IE bit is not enough. - * - * If mfc0 $12 follows store and the mfc0 is last instruction of a - * page and fetching the next instruction causes TLB miss, the result - * of the mfc0 might wrongly contain EXL bit. - * - * ERT-TX49H2-027, ERT-TX49H3-012, ERT-TX49HL3-006, ERT-TX49H4-008 - * - * Workaround: mask EXL bit of the result or place a nop before mfc0. - */ __asm__( " .macro arch_local_irq_disable\n" " .set push \n" " .set noat \n" -#ifdef CONFIG_MIPS_MT_SMTC - " mfc0 $1, $2, 1 \n" - " ori $1, 0x400 \n" - " .set noreorder \n" - " mtc0 $1, $2, 1 \n" -#elif defined(CONFIG_CPU_MIPSR2) " di \n" -#else - " mfc0 $1,$12 \n" - " ori $1,0x1f \n" - " xori $1,0x1f \n" - " .set noreorder \n" - " mtc0 $1,$12 \n" -#endif " irq_disable_hazard \n" " .set pop \n" " .endm \n"); @@ -106,46 +36,14 @@ static inline void arch_local_irq_disable(void) : "memory"); } -__asm__( - " .macro arch_local_save_flags flags \n" - " .set push \n" - " .set reorder \n" -#ifdef CONFIG_MIPS_MT_SMTC - " mfc0 \\flags, $2, 1 \n" -#else - " mfc0 \\flags, $12 \n" -#endif - " .set pop \n" - " .endm \n"); - -static inline unsigned long arch_local_save_flags(void) -{ - unsigned long flags; - asm volatile("arch_local_save_flags %0" : "=r" (flags)); - return flags; -} __asm__( " .macro arch_local_irq_save result \n" " .set push \n" " .set reorder \n" " .set noat \n" -#ifdef CONFIG_MIPS_MT_SMTC - " mfc0 \\result, $2, 1 \n" - " ori $1, \\result, 0x400 \n" - " .set noreorder \n" - " mtc0 $1, $2, 1 \n" - " andi \\result, \\result, 0x400 \n" -#elif defined(CONFIG_CPU_MIPSR2) " di \\result \n" " andi \\result, 1 \n" -#else - " mfc0 \\result, $12 \n" - " ori $1, \\result, 0x1f \n" - " xori $1, 0x1f \n" - " .set noreorder \n" - " mtc0 $1, $12 \n" -#endif " irq_disable_hazard \n" " .set pop \n" " .endm \n"); @@ -160,61 +58,37 @@ static inline unsigned long arch_local_irq_save(void) return flags; } + __asm__( " .macro arch_local_irq_restore flags \n" " .set push \n" " .set noreorder \n" " .set noat \n" -#ifdef CONFIG_MIPS_MT_SMTC - "mfc0 $1, $2, 1 \n" - "andi \\flags, 0x400 \n" - "ori $1, 0x400 \n" - "xori $1, 0x400 \n" - "or \\flags, $1 \n" - "mtc0 \\flags, $2, 1 \n" -#elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU) +#if defined(CONFIG_IRQ_CPU) /* * Slow, but doesn't suffer from a relatively unlikely race * condition we're having since days 1. */ " beqz \\flags, 1f \n" - " di \n" + " di \n" " ei \n" "1: \n" -#elif defined(CONFIG_CPU_MIPSR2) +#else /* * Fast, dangerous. Life is fun, life is good. */ " mfc0 $1, $12 \n" " ins $1, \\flags, 0, 1 \n" " mtc0 $1, $12 \n" -#else - " mfc0 $1, $12 \n" - " andi \\flags, 1 \n" - " ori $1, 0x1f \n" - " xori $1, 0x1f \n" - " or \\flags, $1 \n" - " mtc0 \\flags, $12 \n" #endif " irq_disable_hazard \n" " .set pop \n" " .endm \n"); - static inline void arch_local_irq_restore(unsigned long flags) { unsigned long __tmp1; -#ifdef CONFIG_MIPS_MT_SMTC - /* - * SMTC kernel needs to do a software replay of queued - * IPIs, at the cost of branch and call overhead on each - * local_irq_restore() - */ - if (unlikely(!(flags & 0x0400))) - smtc_ipi_replay(); -#endif - __asm__ __volatile__( "arch_local_irq_restore\t%0" : "=r" (__tmp1) @@ -232,6 +106,75 @@ static inline void __arch_local_irq_restore(unsigned long flags) : "0" (flags) : "memory"); } +#else +/* Functions that require preempt_{dis,en}able() are in mips-atomic.c */ +void arch_local_irq_disable(void); +unsigned long arch_local_irq_save(void); +void arch_local_irq_restore(unsigned long flags); +void __arch_local_irq_restore(unsigned long flags); +#endif /* if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC) */ + + +__asm__( + " .macro arch_local_irq_enable \n" + " .set push \n" + " .set reorder \n" + " .set noat \n" +#ifdef CONFIG_MIPS_MT_SMTC + " mfc0 $1, $2, 1 # SMTC - clear TCStatus.IXMT \n" + " ori $1, 0x400 \n" + " xori $1, 0x400 \n" + " mtc0 $1, $2, 1 \n" +#elif defined(CONFIG_CPU_MIPSR2) + " ei \n" +#else + " mfc0 $1,$12 \n" + " ori $1,0x1f \n" + " xori $1,0x1e \n" + " mtc0 $1,$12 \n" +#endif + " irq_enable_hazard \n" + " .set pop \n" + " .endm"); + +extern void smtc_ipi_replay(void); + +static inline void arch_local_irq_enable(void) +{ +#ifdef CONFIG_MIPS_MT_SMTC + /* + * SMTC kernel needs to do a software replay of queued + * IPIs, at the cost of call overhead on each local_irq_enable() + */ + smtc_ipi_replay(); +#endif + __asm__ __volatile__( + "arch_local_irq_enable" + : /* no outputs */ + : /* no inputs */ + : "memory"); +} + + +__asm__( + " .macro arch_local_save_flags flags \n" + " .set push \n" + " .set reorder \n" +#ifdef CONFIG_MIPS_MT_SMTC + " mfc0 \\flags, $2, 1 \n" +#else + " mfc0 \\flags, $12 \n" +#endif + " .set pop \n" + " .endm \n"); + +static inline unsigned long arch_local_save_flags(void) +{ + unsigned long flags; + asm volatile("arch_local_save_flags %0" : "=r" (flags)); + return flags; +} + static inline int arch_irqs_disabled_flags(unsigned long flags) { @@ -245,7 +188,7 @@ static inline int arch_irqs_disabled_flags(unsigned long flags) #endif } -#endif +#endif /* #ifndef __ASSEMBLY__ */ /* * Do the CPU's IRQ-state tracing from assembly code. -- cgit v1.2.1 From 8cbd9cc6254065c97c4bac42daa55ba1abe73a8e Mon Sep 17 00:00:00 2001 From: David Sharp Date: Tue, 13 Nov 2012 12:18:21 -0800 Subject: tracing,x86: Add a TSC trace_clock In order to promote interoperability between userspace tracers and ftrace, add a trace_clock that reports raw TSC values which will then be recorded in the ring buffer. Userspace tracers that also record TSCs are then on exactly the same time base as the kernel and events can be unambiguously interlaced. Tested: Enabled a tracepoint and the "tsc" trace_clock and saw very large timestamp values. v2: Move arch-specific bits out of generic code. v3: Rename "x86-tsc", cleanups v7: Generic arch bits in Kbuild. Google-Bug-Id: 6980623 Link: http://lkml.kernel.org/r/1352837903-32191-1-git-send-email-dhsharp@google.com Acked-by: Ingo Molnar Cc: Masami Hiramatsu Cc: Ingo Molnar Cc: Thomas Gleixner Cc: "H. Peter Anvin" Signed-off-by: David Sharp Signed-off-by: Steven Rostedt --- arch/mips/include/asm/Kbuild | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/mips/include') diff --git a/arch/mips/include/asm/Kbuild b/arch/mips/include/asm/Kbuild index 533053d12ced..9b54b7a403d4 100644 --- a/arch/mips/include/asm/Kbuild +++ b/arch/mips/include/asm/Kbuild @@ -1 +1,2 @@ # MIPS headers +generic-y += trace_clock.h -- cgit v1.2.1 From ac53c4fca42c394d8a06c7a470ae2d1d50503717 Mon Sep 17 00:00:00 2001 From: David Daney Date: Mon, 3 Dec 2012 12:44:26 -0800 Subject: MIPS: Avoid mcheck by flushing page range in huge_ptep_set_access_flags() Problem: 1) Huge page mapping of anonymous memory is initially invalid. Will be faulted in by copy-on-write mechanism. 2) Userspace attempts store at the end of the huge mapping. 3) TLB Refill exception handler fill TLB with a normal (4K sized) invalid page at the end of the huge mapping virtual address range. 4) Userspace restarted, and re-attempts the store at the end of the huge mapping. 5) Page from #3 is invalid, we get a fault and go to the hugepage fault handler. This tries to map a huge page and calls huge_ptep_set_access_flags() to install the mapping. 6) We just call the generic ptep_set_access_flags() to set up the page tables, but the flush there assumes a normal (4K sized) page and only tries to flush the first part of the huge page virtual address out of the TLB, since the existing entry from step #3 doesn't conflict, nothing is flushed. 7) We attempt to load the mapping into the TLB, but because it conflicts with the entry from step #3, we get a Machine Check exception. The fix: Flush the entire rage covered by the huge page in huge_ptep_set_access_flags(), and remove the optimization in local_flush_tlb_range() so that the flush actually does the correct thing. Signed-off-by: David Daney Cc: linux-mips@linux-mips.org Cc: linux-kernel@vger.kernel.org Cc: Hillf Danton Patchwork: https://patchwork.linux-mips.org/patch/4661/ Signed-off-by: Ralf Baechle (cherry picked from commit dd617f258cc39d36be26afee9912624a2d23112c) --- arch/mips/include/asm/hugetlb.h | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) (limited to 'arch/mips/include') diff --git a/arch/mips/include/asm/hugetlb.h b/arch/mips/include/asm/hugetlb.h index bd94946a18f3..ef99db994c2f 100644 --- a/arch/mips/include/asm/hugetlb.h +++ b/arch/mips/include/asm/hugetlb.h @@ -95,7 +95,17 @@ static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, pte_t *ptep, pte_t pte, int dirty) { - return ptep_set_access_flags(vma, addr, ptep, pte, dirty); + int changed = !pte_same(*ptep, pte); + + if (changed) { + set_pte_at(vma->vm_mm, addr, ptep, pte); + /* + * There could be some standard sized pages in there, + * get them all. + */ + flush_tlb_range(vma, addr, addr + HPAGE_SIZE); + } + return changed; } static inline pte_t huge_ptep_get(pte_t *ptep) -- cgit v1.2.1 From 42d7395feb56f0655cd8b68e06fc6063823449f8 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Tue, 11 Dec 2012 16:01:34 -0800 Subject: mm: support more pagesizes for MAP_HUGETLB/SHM_HUGETLB There was some desire in large applications using MAP_HUGETLB or SHM_HUGETLB to use 1GB huge pages on some mappings, and stay with 2MB on others. This is useful together with NUMA policy: use 2MB interleaving on some mappings, but 1GB on local mappings. This patch extends the IPC/SHM syscall interfaces slightly to allow specifying the page size. It borrows some upper bits in the existing flag arguments and allows encoding the log of the desired page size in addition to the *_HUGETLB flag. When 0 is specified the default size is used, this makes the change fully compatible. Extending the internal hugetlb code to handle this is straight forward. Instead of a single mount it just keeps an array of them and selects the right mount based on the specified page size. When no page size is specified it uses the mount of the default page size. The change is not visible in /proc/mounts because internal mounts don't appear there. It also has very little overhead: the additional mounts just consume a super block, but not more memory when not used. I also exported the new flags to the user headers (they were previously under __KERNEL__). Right now only symbols for x86 and some other architecture for 1GB and 2MB are defined. The interface should already work for all other architectures though. Only architectures that define multiple hugetlb sizes actually need it (that is currently x86, tile, powerpc). However tile and powerpc have user configurable hugetlb sizes, so it's not easy to add defines. A program on those architectures would need to query sysfs and use the appropiate log2. [akpm@linux-foundation.org: cleanups] [rientjes@google.com: fix build] [akpm@linux-foundation.org: checkpatch fixes] Signed-off-by: Andi Kleen Cc: Michael Kerrisk Acked-by: Rik van Riel Acked-by: KAMEZAWA Hiroyuki Cc: Hillf Danton Signed-off-by: David Rientjes Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/mips/include/uapi/asm/mman.h | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'arch/mips/include') diff --git a/arch/mips/include/uapi/asm/mman.h b/arch/mips/include/uapi/asm/mman.h index 46d3da0d4b92..9a936ac9a942 100644 --- a/arch/mips/include/uapi/asm/mman.h +++ b/arch/mips/include/uapi/asm/mman.h @@ -87,4 +87,15 @@ /* compatibility flags */ #define MAP_FILE 0 +/* + * When MAP_HUGETLB is set bits [26:31] encode the log2 of the huge page size. + * This gives us 6 bits, which is enough until someone invents 128 bit address + * spaces. + * + * Assume these are all power of twos. + * When 0 use the default page size. + */ +#define MAP_HUGE_SHIFT 26 +#define MAP_HUGE_MASK 0x3f + #endif /* _ASM_MMAN_H */ -- cgit v1.2.1