diff options
Diffstat (limited to 'tools/include/linux')
38 files changed, 877 insertions, 21 deletions
diff --git a/tools/include/linux/atomic.h b/tools/include/linux/atomic.h index 4e3d3d18ebab..9f21fc2b092b 100644 --- a/tools/include/linux/atomic.h +++ b/tools/include/linux/atomic.h @@ -3,4 +3,10 @@ #include <asm/atomic.h> +/* atomic_cmpxchg_relaxed */ +#ifndef atomic_cmpxchg_relaxed +#define atomic_cmpxchg_relaxed atomic_cmpxchg +#define atomic_cmpxchg_release atomic_cmpxchg +#endif /* atomic_cmpxchg_relaxed */ + #endif /* __TOOLS_LINUX_ATOMIC_H */ diff --git a/tools/include/linux/bitmap.h b/tools/include/linux/bitmap.h index eef41d500e9e..e8b9f518e36b 100644 --- a/tools/include/linux/bitmap.h +++ b/tools/include/linux/bitmap.h @@ -4,6 +4,7 @@ #include <string.h> #include <linux/bitops.h> #include <stdlib.h> +#include <linux/kernel.h> #define DECLARE_BITMAP(name,bits) \ unsigned long name[BITS_TO_LONGS(bits)] diff --git a/tools/include/linux/bitops.h b/tools/include/linux/bitops.h index fc446343ff41..969db1981868 100644 --- a/tools/include/linux/bitops.h +++ b/tools/include/linux/bitops.h @@ -2,7 +2,6 @@ #define _TOOLS_LINUX_BITOPS_H_ #include <asm/types.h> -#include <linux/kernel.h> #include <linux/compiler.h> #ifndef __WORDSIZE @@ -62,4 +61,14 @@ static inline unsigned fls_long(unsigned long l) return fls64(l); } +/** + * rol32 - rotate a 32-bit value left + * @word: value to rotate + * @shift: bits to roll + */ +static inline __u32 rol32(__u32 word, unsigned int shift) +{ + return (word << shift) | (word >> ((-shift) & 31)); +} + #endif diff --git a/tools/include/linux/bug.h b/tools/include/linux/bug.h new file mode 100644 index 000000000000..8e4a4f49135d --- /dev/null +++ b/tools/include/linux/bug.h @@ -0,0 +1,10 @@ +#ifndef _TOOLS_PERF_LINUX_BUG_H +#define _TOOLS_PERF_LINUX_BUG_H + +/* Force a compilation error if condition is true, but also produce a + result (of value 0 and type size_t), so the expression can be used + e.g. in a structure initializer (or where-ever else comma expressions + aren't permitted). */ +#define BUILD_BUG_ON_ZERO(e) (sizeof(struct { int:-!!(e); })) + +#endif /* _TOOLS_PERF_LINUX_BUG_H */ diff --git a/tools/include/linux/compiler-gcc.h b/tools/include/linux/compiler-gcc.h new file mode 100644 index 000000000000..bd39b2090ad1 --- /dev/null +++ b/tools/include/linux/compiler-gcc.h @@ -0,0 +1,31 @@ +#ifndef _TOOLS_LINUX_COMPILER_H_ +#error "Please don't include <linux/compiler-gcc.h> directly, include <linux/compiler.h> instead." +#endif + +/* + * Common definitions for all gcc versions go here. + */ +#define GCC_VERSION (__GNUC__ * 10000 \ + + __GNUC_MINOR__ * 100 \ + + __GNUC_PATCHLEVEL__) + +#if GCC_VERSION >= 70000 && !defined(__CHECKER__) +# define __fallthrough __attribute__ ((fallthrough)) +#endif + +#if GCC_VERSION >= 40300 +# define __compiletime_error(message) __attribute__((error(message))) +#endif /* GCC_VERSION >= 40300 */ + +/* &a[0] degrades to a pointer: a different type from an array */ +#define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0])) + +#define noinline __attribute__((noinline)) + +#define __packed __attribute__((packed)) + +#define __noreturn __attribute__((noreturn)) + +#define __aligned(x) __attribute__((aligned(x))) +#define __printf(a, b) __attribute__((format(printf, a, b))) +#define __scanf(a, b) __attribute__((format(scanf, a, b))) diff --git a/tools/include/linux/compiler.h b/tools/include/linux/compiler.h index e33fc1df3935..d7a5604c38d7 100644 --- a/tools/include/linux/compiler.h +++ b/tools/include/linux/compiler.h @@ -1,6 +1,14 @@ #ifndef _TOOLS_LINUX_COMPILER_H_ #define _TOOLS_LINUX_COMPILER_H_ +#ifdef __GNUC__ +#include <linux/compiler-gcc.h> +#endif + +#ifndef __compiletime_error +# define __compiletime_error(message) +#endif + /* Optimization barrier */ /* The "volatile" is due to gcc bugs */ #define barrier() __asm__ __volatile__("": : :"memory") @@ -9,6 +17,15 @@ # define __always_inline inline __attribute__((always_inline)) #endif +#ifndef noinline +#define noinline +#endif + +/* Are two types/vars the same type (ignoring qualifiers)? */ +#ifndef __same_type +# define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b)) +#endif + #ifdef __ANDROID__ /* * FIXME: Big hammer to get rid of tons of: @@ -21,6 +38,8 @@ #endif #define __user +#define __rcu +#define __read_mostly #ifndef __attribute_const__ # define __attribute_const__ @@ -30,6 +49,10 @@ # define __maybe_unused __attribute__((unused)) #endif +#ifndef __used +# define __used __attribute__((__unused__)) +#endif + #ifndef __packed # define __packed __attribute__((__packed__)) #endif @@ -50,6 +73,16 @@ # define unlikely(x) __builtin_expect(!!(x), 0) #endif +#ifndef __init +# define __init +#endif + +#ifndef noinline +# define noinline +#endif + +#define uninitialized_var(x) x = *(&(x)) + #define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) #include <linux/types.h> @@ -126,4 +159,9 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s #define WRITE_ONCE(x, val) \ ({ union { typeof(x) __val; char __c[1]; } __u = { .__val = (val) }; __write_once_size(&(x), __u.__c, sizeof(x)); __u.__val; }) + +#ifndef __fallthrough +# define __fallthrough +#endif + #endif /* _TOOLS_LINUX_COMPILER_H */ diff --git a/tools/include/linux/debug_locks.h b/tools/include/linux/debug_locks.h new file mode 100644 index 000000000000..61cc7f501168 --- /dev/null +++ b/tools/include/linux/debug_locks.h @@ -0,0 +1,13 @@ +#ifndef _LIBLOCKDEP_DEBUG_LOCKS_H_ +#define _LIBLOCKDEP_DEBUG_LOCKS_H_ + +#include <stddef.h> +#include <linux/compiler.h> +#include <asm/bug.h> + +#define DEBUG_LOCKS_WARN_ON(x) WARN_ON(x) + +extern bool debug_locks; +extern bool debug_locks_silent; + +#endif diff --git a/tools/include/linux/delay.h b/tools/include/linux/delay.h new file mode 100644 index 000000000000..55aa4173af1f --- /dev/null +++ b/tools/include/linux/delay.h @@ -0,0 +1,4 @@ +#ifndef _TOOLS_INCLUDE_LINUX_DELAY_H +#define _TOOLS_INCLUDE_LINUX_DELAY_H + +#endif /* _TOOLS_INCLUDE_LINUX_DELAY_H */ diff --git a/tools/include/linux/err.h b/tools/include/linux/err.h index bdc3dd8131d4..abf0478a8fb2 100644 --- a/tools/include/linux/err.h +++ b/tools/include/linux/err.h @@ -46,4 +46,9 @@ static inline bool __must_check IS_ERR(__force const void *ptr) return IS_ERR_VALUE((unsigned long)ptr); } +static inline bool __must_check IS_ERR_OR_NULL(__force const void *ptr) +{ + return unlikely(!ptr) || IS_ERR_VALUE((unsigned long)ptr); +} + #endif /* _LINUX_ERR_H */ diff --git a/tools/include/linux/filter.h b/tools/include/linux/filter.h index 122153b16ea4..4ce25d43e8e3 100644 --- a/tools/include/linux/filter.h +++ b/tools/include/linux/filter.h @@ -168,6 +168,16 @@ .off = OFF, \ .imm = 0 }) +/* Atomic memory add, *(uint *)(dst_reg + off16) += src_reg */ + +#define BPF_STX_XADD(SIZE, DST, SRC, OFF) \ + ((struct bpf_insn) { \ + .code = BPF_STX | BPF_SIZE(SIZE) | BPF_XADD, \ + .dst_reg = DST, \ + .src_reg = SRC, \ + .off = OFF, \ + .imm = 0 }) + /* Memory store, *(uint *) (dst_reg + off16) = imm32 */ #define BPF_ST_MEM(SIZE, DST, OFF, IMM) \ @@ -198,6 +208,16 @@ .off = OFF, \ .imm = IMM }) +/* Unconditional jumps, goto pc + off16 */ + +#define BPF_JMP_A(OFF) \ + ((struct bpf_insn) { \ + .code = BPF_JMP | BPF_JA, \ + .dst_reg = 0, \ + .src_reg = 0, \ + .off = OFF, \ + .imm = 0 }) + /* Function call */ #define BPF_EMIT_CALL(FUNC) \ diff --git a/tools/include/linux/ftrace.h b/tools/include/linux/ftrace.h new file mode 100644 index 000000000000..949f541ce11e --- /dev/null +++ b/tools/include/linux/ftrace.h @@ -0,0 +1,4 @@ +#ifndef _TOOLS_INCLUDE_LINUX_FTRACE_H +#define _TOOLS_INCLUDE_LINUX_FTRACE_H + +#endif /* _TOOLS_INCLUDE_LINUX_FTRACE_H */ diff --git a/tools/include/linux/gfp.h b/tools/include/linux/gfp.h new file mode 100644 index 000000000000..22030756fbc0 --- /dev/null +++ b/tools/include/linux/gfp.h @@ -0,0 +1,4 @@ +#ifndef _TOOLS_INCLUDE_LINUX_GFP_H +#define _TOOLS_INCLUDE_LINUX_GFP_H + +#endif /* _TOOLS_INCLUDE_LINUX_GFP_H */ diff --git a/tools/include/linux/hardirq.h b/tools/include/linux/hardirq.h new file mode 100644 index 000000000000..c8f3f8f58729 --- /dev/null +++ b/tools/include/linux/hardirq.h @@ -0,0 +1,11 @@ +#ifndef _LIBLOCKDEP_LINUX_HARDIRQ_H_ +#define _LIBLOCKDEP_LINUX_HARDIRQ_H_ + +#define SOFTIRQ_BITS 0UL +#define HARDIRQ_BITS 0UL +#define SOFTIRQ_SHIFT 0UL +#define HARDIRQ_SHIFT 0UL +#define hardirq_count() 0UL +#define softirq_count() 0UL + +#endif diff --git a/tools/include/linux/hashtable.h b/tools/include/linux/hashtable.h index c65cc0aa2659..251eabf2a05e 100644 --- a/tools/include/linux/hashtable.h +++ b/tools/include/linux/hashtable.h @@ -13,10 +13,6 @@ #include <linux/hash.h> #include <linux/log2.h> -#ifndef ARRAY_SIZE -#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) -#endif - #define DEFINE_HASHTABLE(name, bits) \ struct hlist_head name[1 << (bits)] = \ { [0 ... ((1 << (bits)) - 1)] = HLIST_HEAD_INIT } diff --git a/tools/include/linux/interrupt.h b/tools/include/linux/interrupt.h new file mode 100644 index 000000000000..6be25bbdca9e --- /dev/null +++ b/tools/include/linux/interrupt.h @@ -0,0 +1,4 @@ +#ifndef _TOOLS_INCLUDE_LINUX_INTERRUPT_H +#define _TOOLS_INCLUDE_LINUX_INTERRUPT_H + +#endif /* _TOOLS_INCLUDE_LINUX_INTERRUPT_H */ diff --git a/tools/include/linux/irqflags.h b/tools/include/linux/irqflags.h new file mode 100644 index 000000000000..df77669cfe1c --- /dev/null +++ b/tools/include/linux/irqflags.h @@ -0,0 +1,38 @@ +#ifndef _LIBLOCKDEP_LINUX_TRACE_IRQFLAGS_H_ +#define _LIBLOCKDEP_LINUX_TRACE_IRQFLAGS_H_ + +# define trace_hardirq_context(p) 0 +# define trace_softirq_context(p) 0 +# define trace_hardirqs_enabled(p) 0 +# define trace_softirqs_enabled(p) 0 +# define trace_hardirq_enter() do { } while (0) +# define trace_hardirq_exit() do { } while (0) +# define lockdep_softirq_enter() do { } while (0) +# define lockdep_softirq_exit() do { } while (0) +# define INIT_TRACE_IRQFLAGS + +# define stop_critical_timings() do { } while (0) +# define start_critical_timings() do { } while (0) + +#define raw_local_irq_disable() do { } while (0) +#define raw_local_irq_enable() do { } while (0) +#define raw_local_irq_save(flags) ((flags) = 0) +#define raw_local_irq_restore(flags) ((void)(flags)) +#define raw_local_save_flags(flags) ((flags) = 0) +#define raw_irqs_disabled_flags(flags) ((void)(flags)) +#define raw_irqs_disabled() 0 +#define raw_safe_halt() + +#define local_irq_enable() do { } while (0) +#define local_irq_disable() do { } while (0) +#define local_irq_save(flags) ((flags) = 0) +#define local_irq_restore(flags) ((void)(flags)) +#define local_save_flags(flags) ((flags) = 0) +#define irqs_disabled() (1) +#define irqs_disabled_flags(flags) ((void)(flags), 0) +#define safe_halt() do { } while (0) + +#define trace_lock_release(x, y) +#define trace_lock_acquire(a, b, c, d, e, f, g) + +#endif diff --git a/tools/include/linux/jhash.h b/tools/include/linux/jhash.h new file mode 100644 index 000000000000..348c6f47e4cc --- /dev/null +++ b/tools/include/linux/jhash.h @@ -0,0 +1,175 @@ +#ifndef _LINUX_JHASH_H +#define _LINUX_JHASH_H + +/* jhash.h: Jenkins hash support. + * + * Copyright (C) 2006. Bob Jenkins (bob_jenkins@burtleburtle.net) + * + * http://burtleburtle.net/bob/hash/ + * + * These are the credits from Bob's sources: + * + * lookup3.c, by Bob Jenkins, May 2006, Public Domain. + * + * These are functions for producing 32-bit hashes for hash table lookup. + * hashword(), hashlittle(), hashlittle2(), hashbig(), mix(), and final() + * are externally useful functions. Routines to test the hash are included + * if SELF_TEST is defined. You can use this free for any purpose. It's in + * the public domain. It has no warranty. + * + * Copyright (C) 2009-2010 Jozsef Kadlecsik (kadlec@blackhole.kfki.hu) + * + * I've modified Bob's hash to be useful in the Linux kernel, and + * any bugs present are my fault. + * Jozsef + */ +#include <linux/bitops.h> +#include <linux/unaligned/packed_struct.h> + +/* Best hash sizes are of power of two */ +#define jhash_size(n) ((u32)1<<(n)) +/* Mask the hash value, i.e (value & jhash_mask(n)) instead of (value % n) */ +#define jhash_mask(n) (jhash_size(n)-1) + +/* __jhash_mix -- mix 3 32-bit values reversibly. */ +#define __jhash_mix(a, b, c) \ +{ \ + a -= c; a ^= rol32(c, 4); c += b; \ + b -= a; b ^= rol32(a, 6); a += c; \ + c -= b; c ^= rol32(b, 8); b += a; \ + a -= c; a ^= rol32(c, 16); c += b; \ + b -= a; b ^= rol32(a, 19); a += c; \ + c -= b; c ^= rol32(b, 4); b += a; \ +} + +/* __jhash_final - final mixing of 3 32-bit values (a,b,c) into c */ +#define __jhash_final(a, b, c) \ +{ \ + c ^= b; c -= rol32(b, 14); \ + a ^= c; a -= rol32(c, 11); \ + b ^= a; b -= rol32(a, 25); \ + c ^= b; c -= rol32(b, 16); \ + a ^= c; a -= rol32(c, 4); \ + b ^= a; b -= rol32(a, 14); \ + c ^= b; c -= rol32(b, 24); \ +} + +/* An arbitrary initial parameter */ +#define JHASH_INITVAL 0xdeadbeef + +/* jhash - hash an arbitrary key + * @k: sequence of bytes as key + * @length: the length of the key + * @initval: the previous hash, or an arbitray value + * + * The generic version, hashes an arbitrary sequence of bytes. + * No alignment or length assumptions are made about the input key. + * + * Returns the hash value of the key. The result depends on endianness. + */ +static inline u32 jhash(const void *key, u32 length, u32 initval) +{ + u32 a, b, c; + const u8 *k = key; + + /* Set up the internal state */ + a = b = c = JHASH_INITVAL + length + initval; + + /* All but the last block: affect some 32 bits of (a,b,c) */ + while (length > 12) { + a += __get_unaligned_cpu32(k); + b += __get_unaligned_cpu32(k + 4); + c += __get_unaligned_cpu32(k + 8); + __jhash_mix(a, b, c); + length -= 12; + k += 12; + } + /* Last block: affect all 32 bits of (c) */ + /* All the case statements fall through */ + switch (length) { + case 12: c += (u32)k[11]<<24; + case 11: c += (u32)k[10]<<16; + case 10: c += (u32)k[9]<<8; + case 9: c += k[8]; + case 8: b += (u32)k[7]<<24; + case 7: b += (u32)k[6]<<16; + case 6: b += (u32)k[5]<<8; + case 5: b += k[4]; + case 4: a += (u32)k[3]<<24; + case 3: a += (u32)k[2]<<16; + case 2: a += (u32)k[1]<<8; + case 1: a += k[0]; + __jhash_final(a, b, c); + case 0: /* Nothing left to add */ + break; + } + + return c; +} + +/* jhash2 - hash an array of u32's + * @k: the key which must be an array of u32's + * @length: the number of u32's in the key + * @initval: the previous hash, or an arbitray value + * + * Returns the hash value of the key. + */ +static inline u32 jhash2(const u32 *k, u32 length, u32 initval) +{ + u32 a, b, c; + + /* Set up the internal state */ + a = b = c = JHASH_INITVAL + (length<<2) + initval; + + /* Handle most of the key */ + while (length > 3) { + a += k[0]; + b += k[1]; + c += k[2]; + __jhash_mix(a, b, c); + length -= 3; + k += 3; + } + + /* Handle the last 3 u32's: all the case statements fall through */ + switch (length) { + case 3: c += k[2]; + case 2: b += k[1]; + case 1: a += k[0]; + __jhash_final(a, b, c); + case 0: /* Nothing left to add */ + break; + } + + return c; +} + + +/* __jhash_nwords - hash exactly 3, 2 or 1 word(s) */ +static inline u32 __jhash_nwords(u32 a, u32 b, u32 c, u32 initval) +{ + a += initval; + b += initval; + c += initval; + + __jhash_final(a, b, c); + + return c; +} + +static inline u32 jhash_3words(u32 a, u32 b, u32 c, u32 initval) +{ + return __jhash_nwords(a, b, c, initval + JHASH_INITVAL + (3 << 2)); +} + +static inline u32 jhash_2words(u32 a, u32 b, u32 initval) +{ + return __jhash_nwords(a, b, 0, initval + JHASH_INITVAL + (2 << 2)); +} + +static inline u32 jhash_1word(u32 a, u32 initval) +{ + return __jhash_nwords(a, 0, 0, initval + JHASH_INITVAL + (1 << 2)); +} + +#endif /* _LINUX_JHASH_H */ diff --git a/tools/include/linux/kallsyms.h b/tools/include/linux/kallsyms.h new file mode 100644 index 000000000000..582cc1e5f3a4 --- /dev/null +++ b/tools/include/linux/kallsyms.h @@ -0,0 +1,33 @@ +#ifndef _LIBLOCKDEP_LINUX_KALLSYMS_H_ +#define _LIBLOCKDEP_LINUX_KALLSYMS_H_ + +#include <linux/kernel.h> +#include <stdio.h> +#include <unistd.h> + +#define KSYM_NAME_LEN 128 + +struct module; + +static inline const char *kallsyms_lookup(unsigned long addr, + unsigned long *symbolsize, + unsigned long *offset, + char **modname, char *namebuf) +{ + return NULL; +} + +#include <execinfo.h> +#include <stdlib.h> +static inline void print_ip_sym(unsigned long ip) +{ + char **name; + + name = backtrace_symbols((void **)&ip, 1); + + dprintf(STDOUT_FILENO, "%s\n", *name); + + free(name); +} + +#endif diff --git a/tools/include/linux/kern_levels.h b/tools/include/linux/kern_levels.h new file mode 100644 index 000000000000..3b9bade28698 --- /dev/null +++ b/tools/include/linux/kern_levels.h @@ -0,0 +1,25 @@ +#ifndef __KERN_LEVELS_H__ +#define __KERN_LEVELS_H__ + +#define KERN_SOH "" /* ASCII Start Of Header */ +#define KERN_SOH_ASCII '' + +#define KERN_EMERG KERN_SOH "" /* system is unusable */ +#define KERN_ALERT KERN_SOH "" /* action must be taken immediately */ +#define KERN_CRIT KERN_SOH "" /* critical conditions */ +#define KERN_ERR KERN_SOH "" /* error conditions */ +#define KERN_WARNING KERN_SOH "" /* warning conditions */ +#define KERN_NOTICE KERN_SOH "" /* normal but significant condition */ +#define KERN_INFO KERN_SOH "" /* informational */ +#define KERN_DEBUG KERN_SOH "" /* debug-level messages */ + +#define KERN_DEFAULT KERN_SOH "" /* the default kernel loglevel */ + +/* + * Annotation for a "continued" line of log printout (only done after a + * line that had no enclosing \n). Only to be used by core/arch code + * during early bootup (a continued line is not SMP-safe otherwise). + */ +#define KERN_CONT "" + +#endif diff --git a/tools/include/linux/kernel.h b/tools/include/linux/kernel.h index 28607db02bd3..77d2e94ca5df 100644 --- a/tools/include/linux/kernel.h +++ b/tools/include/linux/kernel.h @@ -4,6 +4,13 @@ #include <stdarg.h> #include <stddef.h> #include <assert.h> +#include <linux/compiler.h> +#include <endian.h> +#include <byteswap.h> + +#ifndef UINT_MAX +#define UINT_MAX (~0U) +#endif #define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) @@ -27,6 +34,7 @@ (type *)((char *)__mptr - offsetof(type, member)); }) #endif +#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)])) #define BUILD_BUG_ON_ZERO(e) (sizeof(struct { int:-!!(e); })) #ifndef max @@ -62,16 +70,39 @@ #endif #endif -/* - * Both need more care to handle endianness - * (Don't use bitmap_copy_le() for now) - */ -#define cpu_to_le64(x) (x) -#define cpu_to_le32(x) (x) +#if __BYTE_ORDER == __BIG_ENDIAN +#define cpu_to_le16 bswap_16 +#define cpu_to_le32 bswap_32 +#define cpu_to_le64 bswap_64 +#define le16_to_cpu bswap_16 +#define le32_to_cpu bswap_32 +#define le64_to_cpu bswap_64 +#define cpu_to_be16 +#define cpu_to_be32 +#define cpu_to_be64 +#define be16_to_cpu +#define be32_to_cpu +#define be64_to_cpu +#else +#define cpu_to_le16 +#define cpu_to_le32 +#define cpu_to_le64 +#define le16_to_cpu +#define le32_to_cpu +#define le64_to_cpu +#define cpu_to_be16 bswap_16 +#define cpu_to_be32 bswap_32 +#define cpu_to_be64 bswap_64 +#define be16_to_cpu bswap_16 +#define be32_to_cpu bswap_32 +#define be64_to_cpu bswap_64 +#endif int vscnprintf(char *buf, size_t size, const char *fmt, va_list args); int scnprintf(char * buf, size_t size, const char * fmt, ...); +#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr)) + /* * This looks more complex than it should be. But we need to * get the type for the ~ right in round_down (it needs to be @@ -82,4 +113,7 @@ int scnprintf(char * buf, size_t size, const char * fmt, ...); #define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1) #define round_down(x, y) ((x) & ~__round_mask(x, y)) +#define current_gfp_context(k) 0 +#define synchronize_sched() + #endif diff --git a/tools/include/linux/kmemcheck.h b/tools/include/linux/kmemcheck.h new file mode 100644 index 000000000000..94d598bc6abe --- /dev/null +++ b/tools/include/linux/kmemcheck.h @@ -0,0 +1,8 @@ +#ifndef _LIBLOCKDEP_LINUX_KMEMCHECK_H_ +#define _LIBLOCKDEP_LINUX_KMEMCHECK_H_ + +static inline void kmemcheck_mark_initialized(void *address, unsigned int n) +{ +} + +#endif diff --git a/tools/include/linux/linkage.h b/tools/include/linux/linkage.h new file mode 100644 index 000000000000..bc763d500262 --- /dev/null +++ b/tools/include/linux/linkage.h @@ -0,0 +1,4 @@ +#ifndef _TOOLS_INCLUDE_LINUX_LINKAGE_H +#define _TOOLS_INCLUDE_LINUX_LINKAGE_H + +#endif /* _TOOLS_INCLUDE_LINUX_LINKAGE_H */ diff --git a/tools/include/linux/lockdep.h b/tools/include/linux/lockdep.h new file mode 100644 index 000000000000..8da3e8effafa --- /dev/null +++ b/tools/include/linux/lockdep.h @@ -0,0 +1,67 @@ +#ifndef _LIBLOCKDEP_LOCKDEP_H_ +#define _LIBLOCKDEP_LOCKDEP_H_ + +#include <sys/prctl.h> +#include <sys/syscall.h> +#include <string.h> +#include <limits.h> +#include <linux/utsname.h> +#include <linux/compiler.h> +#include <linux/export.h> +#include <linux/kern_levels.h> +#include <linux/err.h> +#include <linux/rcu.h> +#include <linux/list.h> +#include <linux/hardirq.h> +#include <unistd.h> + +#define MAX_LOCK_DEPTH 63UL + +#define asmlinkage +#define __visible + +#include "../../../include/linux/lockdep.h" + +struct task_struct { + u64 curr_chain_key; + int lockdep_depth; + unsigned int lockdep_recursion; + struct held_lock held_locks[MAX_LOCK_DEPTH]; + gfp_t lockdep_reclaim_gfp; + int pid; + char comm[17]; +}; + +extern struct task_struct *__curr(void); + +#define current (__curr()) + +static inline int debug_locks_off(void) +{ + return 1; +} + +#define task_pid_nr(tsk) ((tsk)->pid) + +#define KSYM_NAME_LEN 128 +#define printk(...) dprintf(STDOUT_FILENO, __VA_ARGS__) +#define pr_err(format, ...) fprintf (stderr, format, ## __VA_ARGS__) +#define pr_warn pr_err + +#define list_del_rcu list_del + +#define atomic_t unsigned long +#define atomic_inc(x) ((*(x))++) + +#define print_tainted() "" +#define static_obj(x) 1 + +#define debug_show_all_locks() +extern void debug_check_no_locks_held(void); + +static __used bool __is_kernel_percpu_address(unsigned long addr, void *can_addr) +{ + return false; +} + +#endif diff --git a/tools/include/linux/log2.h b/tools/include/linux/log2.h index 41446668ccce..0325cefc2220 100644 --- a/tools/include/linux/log2.h +++ b/tools/include/linux/log2.h @@ -12,11 +12,8 @@ #ifndef _TOOLS_LINUX_LOG2_H #define _TOOLS_LINUX_LOG2_H -/* - * deal with unrepresentable constant logarithms - */ -extern __attribute__((const, noreturn)) -int ____ilog2_NaN(void); +#include <linux/bitops.h> +#include <linux/types.h> /* * non-constant log of base 2 calculators @@ -78,7 +75,7 @@ unsigned long __rounddown_pow_of_two(unsigned long n) #define ilog2(n) \ ( \ __builtin_constant_p(n) ? ( \ - (n) < 1 ? ____ilog2_NaN() : \ + (n) < 2 ? 0 : \ (n) & (1ULL << 63) ? 63 : \ (n) & (1ULL << 62) ? 62 : \ (n) & (1ULL << 61) ? 61 : \ @@ -141,10 +138,7 @@ unsigned long __rounddown_pow_of_two(unsigned long n) (n) & (1ULL << 4) ? 4 : \ (n) & (1ULL << 3) ? 3 : \ (n) & (1ULL << 2) ? 2 : \ - (n) & (1ULL << 1) ? 1 : \ - (n) & (1ULL << 0) ? 0 : \ - ____ilog2_NaN() \ - ) : \ + 1 ) : \ (sizeof(n) <= 4) ? \ __ilog2_u32(n) : \ __ilog2_u64(n) \ diff --git a/tools/include/linux/module.h b/tools/include/linux/module.h new file mode 100644 index 000000000000..07055db296f3 --- /dev/null +++ b/tools/include/linux/module.h @@ -0,0 +1,11 @@ +#ifndef _LIBLOCKDEP_LINUX_MODULE_H_ +#define _LIBLOCKDEP_LINUX_MODULE_H_ + +#define module_param(name, type, perm) + +static inline bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr) +{ + return false; +} + +#endif diff --git a/tools/include/linux/mutex.h b/tools/include/linux/mutex.h new file mode 100644 index 000000000000..a8180d25f2fc --- /dev/null +++ b/tools/include/linux/mutex.h @@ -0,0 +1,4 @@ +#ifndef _TOOLS_INCLUDE_LINUX_MUTEX_H +#define _TOOLS_INCLUDE_LINUX_MUTEX_H + +#endif /* _TOOLS_INCLUDE_LINUX_MUTEX_H */ diff --git a/tools/include/linux/proc_fs.h b/tools/include/linux/proc_fs.h new file mode 100644 index 000000000000..8b3b03b64fda --- /dev/null +++ b/tools/include/linux/proc_fs.h @@ -0,0 +1,4 @@ +#ifndef _TOOLS_INCLUDE_LINUX_PROC_FS_H +#define _TOOLS_INCLUDE_LINUX_PROC_FS_H + +#endif /* _TOOLS_INCLUDE_LINUX_PROC_FS_H */ diff --git a/tools/include/linux/rcu.h b/tools/include/linux/rcu.h new file mode 100644 index 000000000000..5080649dad04 --- /dev/null +++ b/tools/include/linux/rcu.h @@ -0,0 +1,24 @@ +#ifndef _LIBLOCKDEP_RCU_H_ +#define _LIBLOCKDEP_RCU_H_ + +int rcu_scheduler_active; + +static inline int rcu_lockdep_current_cpu_online(void) +{ + return 1; +} + +static inline int rcu_is_cpu_idle(void) +{ + return 1; +} + +static inline bool rcu_is_watching(void) +{ + return false; +} + +#define rcu_assign_pointer(p, v) ((p) = (v)) +#define RCU_INIT_POINTER(p, v) p=(v) + +#endif diff --git a/tools/include/linux/refcount.h b/tools/include/linux/refcount.h new file mode 100644 index 000000000000..a0177c1f55b1 --- /dev/null +++ b/tools/include/linux/refcount.h @@ -0,0 +1,151 @@ +#ifndef _TOOLS_LINUX_REFCOUNT_H +#define _TOOLS_LINUX_REFCOUNT_H + +/* + * Variant of atomic_t specialized for reference counts. + * + * The interface matches the atomic_t interface (to aid in porting) but only + * provides the few functions one should use for reference counting. + * + * It differs in that the counter saturates at UINT_MAX and will not move once + * there. This avoids wrapping the counter and causing 'spurious' + * use-after-free issues. + * + * Memory ordering rules are slightly relaxed wrt regular atomic_t functions + * and provide only what is strictly required for refcounts. + * + * The increments are fully relaxed; these will not provide ordering. The + * rationale is that whatever is used to obtain the object we're increasing the + * reference count on will provide the ordering. For locked data structures, + * its the lock acquire, for RCU/lockless data structures its the dependent + * load. + * + * Do note that inc_not_zero() provides a control dependency which will order + * future stores against the inc, this ensures we'll never modify the object + * if we did not in fact acquire a reference. + * + * The decrements will provide release order, such that all the prior loads and + * stores will be issued before, it also provides a control dependency, which + * will order us against the subsequent free(). + * + * The control dependency is against the load of the cmpxchg (ll/sc) that + * succeeded. This means the stores aren't fully ordered, but this is fine + * because the 1->0 transition indicates no concurrency. + * + * Note that the allocator is responsible for ordering things between free() + * and alloc(). + * + */ + +#include <linux/atomic.h> +#include <linux/kernel.h> + +#ifdef NDEBUG +#define REFCOUNT_WARN(cond, str) (void)(cond) +#define __refcount_check +#else +#define REFCOUNT_WARN(cond, str) BUG_ON(cond) +#define __refcount_check __must_check +#endif + +typedef struct refcount_struct { + atomic_t refs; +} refcount_t; + +#define REFCOUNT_INIT(n) { .refs = ATOMIC_INIT(n), } + +static inline void refcount_set(refcount_t *r, unsigned int n) +{ + atomic_set(&r->refs, n); +} + +static inline unsigned int refcount_read(const refcount_t *r) +{ + return atomic_read(&r->refs); +} + +/* + * Similar to atomic_inc_not_zero(), will saturate at UINT_MAX and WARN. + * + * Provides no memory ordering, it is assumed the caller has guaranteed the + * object memory to be stable (RCU, etc.). It does provide a control dependency + * and thereby orders future stores. See the comment on top. + */ +static inline __refcount_check +bool refcount_inc_not_zero(refcount_t *r) +{ + unsigned int old, new, val = atomic_read(&r->refs); + + for (;;) { + new = val + 1; + + if (!val) + return false; + + if (unlikely(!new)) + return true; + + old = atomic_cmpxchg_relaxed(&r->refs, val, new); + if (old == val) + break; + + val = old; + } + + REFCOUNT_WARN(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n"); + + return true; +} + +/* + * Similar to atomic_inc(), will saturate at UINT_MAX and WARN. + * + * Provides no memory ordering, it is assumed the caller already has a + * reference on the object, will WARN when this is not so. + */ +static inline void refcount_inc(refcount_t *r) +{ + REFCOUNT_WARN(!refcount_inc_not_zero(r), "refcount_t: increment on 0; use-after-free.\n"); +} + +/* + * Similar to atomic_dec_and_test(), it will WARN on underflow and fail to + * decrement when saturated at UINT_MAX. + * + * Provides release memory ordering, such that prior loads and stores are done + * before, and provides a control dependency such that free() must come after. + * See the comment on top. + */ +static inline __refcount_check +bool refcount_sub_and_test(unsigned int i, refcount_t *r) +{ + unsigned int old, new, val = atomic_read(&r->refs); + + for (;;) { + if (unlikely(val == UINT_MAX)) + return false; + + new = val - i; + if (new > val) { + REFCOUNT_WARN(new > val, "refcount_t: underflow; use-after-free.\n"); + return false; + } + + old = atomic_cmpxchg_release(&r->refs, val, new); + if (old == val) + break; + + val = old; + } + + return !new; +} + +static inline __refcount_check +bool refcount_dec_and_test(refcount_t *r) +{ + return refcount_sub_and_test(1, r); +} + + +#endif /* _ATOMIC_LINUX_REFCOUNT_H */ diff --git a/tools/include/linux/sched/clock.h b/tools/include/linux/sched/clock.h new file mode 100644 index 000000000000..5837d17c4182 --- /dev/null +++ b/tools/include/linux/sched/clock.h @@ -0,0 +1,4 @@ +#ifndef _TOOLS_PERF_LINUX_SCHED_CLOCK_H +#define _TOOLS_PERF_LINUX_SCHED_CLOCK_H + +#endif /* _TOOLS_PERF_LINUX_SCHED_CLOCK_H */ diff --git a/tools/include/linux/sched/mm.h b/tools/include/linux/sched/mm.h new file mode 100644 index 000000000000..c8d9f19c1f35 --- /dev/null +++ b/tools/include/linux/sched/mm.h @@ -0,0 +1,4 @@ +#ifndef _TOOLS_PERF_LINUX_SCHED_MM_H +#define _TOOLS_PERF_LINUX_SCHED_MM_H + +#endif /* _TOOLS_PERF_LINUX_SCHED_MM_H */ diff --git a/tools/include/linux/sched/task.h b/tools/include/linux/sched/task.h new file mode 100644 index 000000000000..a97890eca110 --- /dev/null +++ b/tools/include/linux/sched/task.h @@ -0,0 +1,4 @@ +#ifndef _TOOLS_PERF_LINUX_SCHED_TASK_H +#define _TOOLS_PERF_LINUX_SCHED_TASK_H + +#endif /* _TOOLS_PERF_LINUX_SCHED_TASK_H */ diff --git a/tools/include/linux/seq_file.h b/tools/include/linux/seq_file.h new file mode 100644 index 000000000000..102fd9217f1f --- /dev/null +++ b/tools/include/linux/seq_file.h @@ -0,0 +1,4 @@ +#ifndef _TOOLS_INCLUDE_LINUX_SEQ_FILE_H +#define _TOOLS_INCLUDE_LINUX_SEQ_FILE_H + +#endif /* _TOOLS_INCLUDE_LINUX_SEQ_FILE_H */ diff --git a/tools/include/linux/spinlock.h b/tools/include/linux/spinlock.h new file mode 100644 index 000000000000..417cda4f793f --- /dev/null +++ b/tools/include/linux/spinlock.h @@ -0,0 +1,31 @@ +#ifndef __LINUX_SPINLOCK_H_ +#define __LINUX_SPINLOCK_H_ + +#include <pthread.h> +#include <stdbool.h> + +#define spinlock_t pthread_mutex_t +#define DEFINE_SPINLOCK(x) pthread_mutex_t x = PTHREAD_MUTEX_INITIALIZER; + +#define spin_lock_irqsave(x, f) (void)f, pthread_mutex_lock(x) +#define spin_unlock_irqrestore(x, f) (void)f, pthread_mutex_unlock(x) + +#define arch_spinlock_t pthread_mutex_t +#define __ARCH_SPIN_LOCK_UNLOCKED PTHREAD_MUTEX_INITIALIZER + +static inline void arch_spin_lock(arch_spinlock_t *mutex) +{ + pthread_mutex_lock(mutex); +} + +static inline void arch_spin_unlock(arch_spinlock_t *mutex) +{ + pthread_mutex_unlock(mutex); +} + +static inline bool arch_spin_is_locked(arch_spinlock_t *mutex) +{ + return true; +} + +#endif diff --git a/tools/include/linux/stacktrace.h b/tools/include/linux/stacktrace.h new file mode 100644 index 000000000000..39aecc6b19d1 --- /dev/null +++ b/tools/include/linux/stacktrace.h @@ -0,0 +1,32 @@ +#ifndef _LIBLOCKDEP_LINUX_STACKTRACE_H_ +#define _LIBLOCKDEP_LINUX_STACKTRACE_H_ + +#include <execinfo.h> + +struct stack_trace { + unsigned int nr_entries, max_entries; + unsigned long *entries; + int skip; +}; + +static inline void print_stack_trace(struct stack_trace *trace, int spaces) +{ + backtrace_symbols_fd((void **)trace->entries, trace->nr_entries, 1); +} + +#define save_stack_trace(trace) \ + ((trace)->nr_entries = \ + backtrace((void **)(trace)->entries, (trace)->max_entries)) + +static inline int dump_stack(void) +{ + void *array[64]; + size_t size; + + size = backtrace(array, 64); + backtrace_symbols_fd(array, size, 1); + + return 0; +} + +#endif diff --git a/tools/include/linux/string.h b/tools/include/linux/string.h index f436d2420a18..d62b56cf8c12 100644 --- a/tools/include/linux/string.h +++ b/tools/include/linux/string.h @@ -18,4 +18,6 @@ extern size_t strlcpy(char *dest, const char *src, size_t size); char *str_error_r(int errnum, char *buf, size_t buflen); +int prefixcmp(const char *str, const char *prefix); + #endif /* _LINUX_STRING_H_ */ diff --git a/tools/include/linux/types.h b/tools/include/linux/types.h index c24b3e3ae296..77a28a26a670 100644 --- a/tools/include/linux/types.h +++ b/tools/include/linux/types.h @@ -7,6 +7,7 @@ #define __SANE_USERSPACE_TYPES__ /* For PPC64, to get LL64 types */ #include <asm/types.h> +#include <asm/posix_types.h> struct page; struct kmem_cache; diff --git a/tools/include/linux/unaligned/packed_struct.h b/tools/include/linux/unaligned/packed_struct.h new file mode 100644 index 000000000000..c0d817de4df2 --- /dev/null +++ b/tools/include/linux/unaligned/packed_struct.h @@ -0,0 +1,46 @@ +#ifndef _LINUX_UNALIGNED_PACKED_STRUCT_H +#define _LINUX_UNALIGNED_PACKED_STRUCT_H + +#include <linux/kernel.h> + +struct __una_u16 { u16 x; } __packed; +struct __una_u32 { u32 x; } __packed; +struct __una_u64 { u64 x; } __packed; + +static inline u16 __get_unaligned_cpu16(const void *p) +{ + const struct __una_u16 *ptr = (const struct __una_u16 *)p; + return ptr->x; +} + +static inline u32 __get_unaligned_cpu32(const void *p) +{ + const struct __una_u32 *ptr = (const struct __una_u32 *)p; + return ptr->x; +} + +static inline u64 __get_unaligned_cpu64(const void *p) +{ + const struct __una_u64 *ptr = (const struct __una_u64 *)p; + return ptr->x; +} + +static inline void __put_unaligned_cpu16(u16 val, void *p) +{ + struct __una_u16 *ptr = (struct __una_u16 *)p; + ptr->x = val; +} + +static inline void __put_unaligned_cpu32(u32 val, void *p) +{ + struct __una_u32 *ptr = (struct __una_u32 *)p; + ptr->x = val; +} + +static inline void __put_unaligned_cpu64(u64 val, void *p) +{ + struct __una_u64 *ptr = (struct __una_u64 *)p; + ptr->x = val; +} + +#endif /* _LINUX_UNALIGNED_PACKED_STRUCT_H */ |