diff options
Diffstat (limited to 'include')
-rw-r--r-- | include/asm-generic/bug.h | 8 | ||||
-rw-r--r-- | include/asm-generic/qrwlock.h | 7 | ||||
-rw-r--r-- | include/asm-generic/qspinlock.h | 16 | ||||
-rw-r--r-- | include/asm-generic/vmlinux.lds.h | 11 | ||||
-rw-r--r-- | include/linux/compiler.h | 56 | ||||
-rw-r--r-- | include/linux/debug_locks.h | 4 | ||||
-rw-r--r-- | include/linux/jump_label.h | 65 | ||||
-rw-r--r-- | include/linux/lockdep.h | 7 | ||||
-rw-r--r-- | include/linux/rwsem.h | 4 | ||||
-rw-r--r-- | include/linux/sched.h | 6 |
10 files changed, 140 insertions, 44 deletions
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h index 20561a60db9c..cdafa5edea49 100644 --- a/include/asm-generic/bug.h +++ b/include/asm-generic/bug.h @@ -17,10 +17,8 @@ #ifndef __ASSEMBLY__ #include <linux/kernel.h> -#ifdef CONFIG_BUG - -#ifdef CONFIG_GENERIC_BUG struct bug_entry { +#ifdef CONFIG_GENERIC_BUG #ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS unsigned long bug_addr; #else @@ -35,8 +33,10 @@ struct bug_entry { unsigned short line; #endif unsigned short flags; -}; #endif /* CONFIG_GENERIC_BUG */ +}; + +#ifdef CONFIG_BUG /* * Don't use BUG() or BUG_ON() unless there's really no way out; one diff --git a/include/asm-generic/qrwlock.h b/include/asm-generic/qrwlock.h index 0f7062bd55e5..36254d2da8e0 100644 --- a/include/asm-generic/qrwlock.h +++ b/include/asm-generic/qrwlock.h @@ -71,8 +71,8 @@ static inline int queued_write_trylock(struct qrwlock *lock) if (unlikely(cnts)) return 0; - return likely(atomic_cmpxchg_acquire(&lock->cnts, - cnts, cnts | _QW_LOCKED) == cnts); + return likely(atomic_try_cmpxchg_acquire(&lock->cnts, &cnts, + _QW_LOCKED)); } /** * queued_read_lock - acquire read lock of a queue rwlock @@ -96,8 +96,9 @@ static inline void queued_read_lock(struct qrwlock *lock) */ static inline void queued_write_lock(struct qrwlock *lock) { + u32 cnts = 0; /* Optimize for the unfair lock case where the fair flag is 0. */ - if (atomic_cmpxchg_acquire(&lock->cnts, 0, _QW_LOCKED) == 0) + if (likely(atomic_try_cmpxchg_acquire(&lock->cnts, &cnts, _QW_LOCKED))) return; queued_write_lock_slowpath(lock); diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h index 9cc457597ddf..7541fa707f5b 100644 --- a/include/asm-generic/qspinlock.h +++ b/include/asm-generic/qspinlock.h @@ -66,10 +66,12 @@ static __always_inline int queued_spin_is_contended(struct qspinlock *lock) */ static __always_inline int queued_spin_trylock(struct qspinlock *lock) { - if (!atomic_read(&lock->val) && - (atomic_cmpxchg_acquire(&lock->val, 0, _Q_LOCKED_VAL) == 0)) - return 1; - return 0; + u32 val = atomic_read(&lock->val); + + if (unlikely(val)) + return 0; + + return likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL)); } extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); @@ -80,11 +82,11 @@ extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); */ static __always_inline void queued_spin_lock(struct qspinlock *lock) { - u32 val; + u32 val = 0; - val = atomic_cmpxchg_acquire(&lock->val, 0, _Q_LOCKED_VAL); - if (likely(val == 0)) + if (likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL))) return; + queued_spin_lock_slowpath(lock, val); } diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index d7701d466b60..d0bcea7c8f84 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -253,10 +253,6 @@ STRUCT_ALIGN(); \ *(__tracepoints) \ /* implement dynamic printk debug */ \ - . = ALIGN(8); \ - __start___jump_table = .; \ - KEEP(*(__jump_table)) \ - __stop___jump_table = .; \ . = ALIGN(8); \ __start___verbose = .; \ KEEP(*(__verbose)) \ @@ -300,6 +296,12 @@ . = __start_init_task + THREAD_SIZE; \ __end_init_task = .; +#define JUMP_TABLE_DATA \ + . = ALIGN(8); \ + __start___jump_table = .; \ + KEEP(*(__jump_table)) \ + __stop___jump_table = .; + /* * Allow architectures to handle ro_after_init data on their * own by defining an empty RO_AFTER_INIT_DATA. @@ -308,6 +310,7 @@ #define RO_AFTER_INIT_DATA \ __start_ro_after_init = .; \ *(.data..ro_after_init) \ + JUMP_TABLE_DATA \ __end_ro_after_init = .; #endif diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 681d866efb1e..1921545c6351 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -99,22 +99,13 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val, * unique, to convince GCC not to merge duplicate inline asm statements. */ #define annotate_reachable() ({ \ - asm volatile("%c0:\n\t" \ - ".pushsection .discard.reachable\n\t" \ - ".long %c0b - .\n\t" \ - ".popsection\n\t" : : "i" (__COUNTER__)); \ + asm volatile("ANNOTATE_REACHABLE counter=%c0" \ + : : "i" (__COUNTER__)); \ }) #define annotate_unreachable() ({ \ - asm volatile("%c0:\n\t" \ - ".pushsection .discard.unreachable\n\t" \ - ".long %c0b - .\n\t" \ - ".popsection\n\t" : : "i" (__COUNTER__)); \ + asm volatile("ANNOTATE_UNREACHABLE counter=%c0" \ + : : "i" (__COUNTER__)); \ }) -#define ASM_UNREACHABLE \ - "999:\n\t" \ - ".pushsection .discard.unreachable\n\t" \ - ".long 999b - .\n\t" \ - ".popsection\n\t" #else #define annotate_reachable() #define annotate_unreachable() @@ -299,6 +290,45 @@ static inline void *offset_to_ptr(const int *off) return (void *)((unsigned long)off + *off); } +#else /* __ASSEMBLY__ */ + +#ifdef __KERNEL__ +#ifndef LINKER_SCRIPT + +#ifdef CONFIG_STACK_VALIDATION +.macro ANNOTATE_UNREACHABLE counter:req +\counter: + .pushsection .discard.unreachable + .long \counter\()b -. + .popsection +.endm + +.macro ANNOTATE_REACHABLE counter:req +\counter: + .pushsection .discard.reachable + .long \counter\()b -. + .popsection +.endm + +.macro ASM_UNREACHABLE +999: + .pushsection .discard.unreachable + .long 999b - . + .popsection +.endm +#else /* CONFIG_STACK_VALIDATION */ +.macro ANNOTATE_UNREACHABLE counter:req +.endm + +.macro ANNOTATE_REACHABLE counter:req +.endm + +.macro ASM_UNREACHABLE +.endm +#endif /* CONFIG_STACK_VALIDATION */ + +#endif /* LINKER_SCRIPT */ +#endif /* __KERNEL__ */ #endif /* __ASSEMBLY__ */ #ifndef __optimize diff --git a/include/linux/debug_locks.h b/include/linux/debug_locks.h index 120225e9a366..257ab3c92cb8 100644 --- a/include/linux/debug_locks.h +++ b/include/linux/debug_locks.h @@ -8,8 +8,8 @@ struct task_struct; -extern int debug_locks; -extern int debug_locks_silent; +extern int debug_locks __read_mostly; +extern int debug_locks_silent __read_mostly; static inline int __debug_locks_off(void) diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h index 1a0b6f17a5d6..5df6a621e464 100644 --- a/include/linux/jump_label.h +++ b/include/linux/jump_label.h @@ -119,6 +119,68 @@ struct static_key { #ifdef HAVE_JUMP_LABEL #include <asm/jump_label.h> + +#ifndef __ASSEMBLY__ +#ifdef CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE + +struct jump_entry { + s32 code; + s32 target; + long key; // key may be far away from the core kernel under KASLR +}; + +static inline unsigned long jump_entry_code(const struct jump_entry *entry) +{ + return (unsigned long)&entry->code + entry->code; +} + +static inline unsigned long jump_entry_target(const struct jump_entry *entry) +{ + return (unsigned long)&entry->target + entry->target; +} + +static inline struct static_key *jump_entry_key(const struct jump_entry *entry) +{ + long offset = entry->key & ~3L; + + return (struct static_key *)((unsigned long)&entry->key + offset); +} + +#else + +static inline unsigned long jump_entry_code(const struct jump_entry *entry) +{ + return entry->code; +} + +static inline unsigned long jump_entry_target(const struct jump_entry *entry) +{ + return entry->target; +} + +static inline struct static_key *jump_entry_key(const struct jump_entry *entry) +{ + return (struct static_key *)((unsigned long)entry->key & ~3UL); +} + +#endif + +static inline bool jump_entry_is_branch(const struct jump_entry *entry) +{ + return (unsigned long)entry->key & 1UL; +} + +static inline bool jump_entry_is_init(const struct jump_entry *entry) +{ + return (unsigned long)entry->key & 2UL; +} + +static inline void jump_entry_set_init(struct jump_entry *entry) +{ + entry->key |= 2; +} + +#endif #endif #ifndef __ASSEMBLY__ @@ -151,7 +213,6 @@ extern struct jump_entry __start___jump_table[]; extern struct jump_entry __stop___jump_table[]; extern void jump_label_init(void); -extern void jump_label_invalidate_initmem(void); extern void jump_label_lock(void); extern void jump_label_unlock(void); extern void arch_jump_label_transform(struct jump_entry *entry, @@ -199,8 +260,6 @@ static __always_inline void jump_label_init(void) static_key_initialized = true; } -static inline void jump_label_invalidate_initmem(void) {} - static __always_inline bool static_key_false(struct static_key *key) { if (unlikely(static_key_count(key) > 0)) diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index b0d0b51c4d85..1fd82ff99c65 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -99,13 +99,8 @@ struct lock_class { */ unsigned int version; - /* - * Statistics counter: - */ - unsigned long ops; - - const char *name; int name_version; + const char *name; #ifdef CONFIG_LOCK_STAT unsigned long contention_point[LOCKSTAT_POINTS]; diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h index ab93b6eae696..67dbb57508b1 100644 --- a/include/linux/rwsem.h +++ b/include/linux/rwsem.h @@ -45,10 +45,10 @@ struct rw_semaphore { }; /* - * Setting bit 0 of the owner field with other non-zero bits will indicate + * Setting bit 1 of the owner field but not bit 0 will indicate * that the rwsem is writer-owned with an unknown owner. */ -#define RWSEM_OWNER_UNKNOWN ((struct task_struct *)-1L) +#define RWSEM_OWNER_UNKNOWN ((struct task_struct *)-2L) extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem); extern struct rw_semaphore *rwsem_down_read_failed_killable(struct rw_semaphore *sem); diff --git a/include/linux/sched.h b/include/linux/sched.h index 004ca21f7e80..58e2af8d064b 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -735,6 +735,12 @@ struct task_struct { unsigned use_memdelay:1; #endif + /* + * May usercopy functions fault on kernel addresses? + * This is not just a single bit because this can potentially nest. + */ + unsigned int kernel_uaccess_faults_ok; + unsigned long atomic_flags; /* Flags requiring atomic access. */ struct restart_block restart_block; |