diff options
Diffstat (limited to 'include')
439 files changed, 12279 insertions, 4392 deletions
diff --git a/include/acpi/acoutput.h b/include/acpi/acoutput.h index 5bfc61943f88..34f601e7b88d 100644 --- a/include/acpi/acoutput.h +++ b/include/acpi/acoutput.h @@ -262,7 +262,7 @@ #define ACPI_GET_FUNCTION_NAME _acpi_function_name /* - * The Name parameter should be the procedure name as a quoted string. + * The Name parameter should be the procedure name as a non-quoted string. * The function name is also used by the function exit macros below. * Note: (const char) is used to be compatible with the debug interfaces * and macros such as __func__. diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h index c96621e87c19..17556979dc79 100644 --- a/include/acpi/acpixf.h +++ b/include/acpi/acpixf.h @@ -897,11 +897,9 @@ ACPI_MSG_DEPENDENT_RETURN_VOID(ACPI_PRINTF_LIKE(3) acpi_warning(const char *module_name, u32 line_number, const char *format, ...)) -ACPI_MSG_DEPENDENT_RETURN_VOID(ACPI_PRINTF_LIKE(3) +ACPI_MSG_DEPENDENT_RETURN_VOID(ACPI_PRINTF_LIKE(1) void ACPI_INTERNAL_VAR_XFACE - acpi_info(const char *module_name, - u32 line_number, - const char *format, ...)) + acpi_info(const char *format, ...)) ACPI_MSG_DEPENDENT_RETURN_VOID(ACPI_PRINTF_LIKE(3) void ACPI_INTERNAL_VAR_XFACE acpi_bios_error(const char *module_name, diff --git a/include/acpi/processor.h b/include/acpi/processor.h index 07fb100bcc68..6f1805dd5d3c 100644 --- a/include/acpi/processor.h +++ b/include/acpi/processor.h @@ -9,6 +9,7 @@ #define ACPI_PROCESSOR_CLASS "processor" #define ACPI_PROCESSOR_DEVICE_NAME "Processor" #define ACPI_PROCESSOR_DEVICE_HID "ACPI0007" +#define ACPI_PROCESSOR_CONTAINER_HID "ACPI0010" #define ACPI_PROCESSOR_BUSY_METRIC 10 @@ -394,14 +395,6 @@ static inline int acpi_processor_hotplug(struct acpi_processor *pr) } #endif /* CONFIG_ACPI_PROCESSOR_IDLE */ -#if defined(CONFIG_PM_SLEEP) & defined(CONFIG_ACPI_PROCESSOR_IDLE) -void acpi_processor_syscore_init(void); -void acpi_processor_syscore_exit(void); -#else -static inline void acpi_processor_syscore_init(void) {} -static inline void acpi_processor_syscore_exit(void) {} -#endif - /* in processor_thermal.c */ int acpi_processor_get_limit_info(struct acpi_processor *pr); extern const struct thermal_cooling_device_ops processor_cooling_ops; diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h index eb1973bad80b..5e1f345b58dd 100644 --- a/include/asm-generic/atomic-long.h +++ b/include/asm-generic/atomic-long.h @@ -98,14 +98,14 @@ ATOMIC_LONG_ADD_SUB_OP(sub, _release) #define atomic_long_xchg(v, new) \ (ATOMIC_LONG_PFX(_xchg)((ATOMIC_LONG_PFX(_t) *)(v), (new))) -static inline void atomic_long_inc(atomic_long_t *l) +static __always_inline void atomic_long_inc(atomic_long_t *l) { ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; ATOMIC_LONG_PFX(_inc)(v); } -static inline void atomic_long_dec(atomic_long_t *l) +static __always_inline void atomic_long_dec(atomic_long_t *l) { ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; @@ -113,7 +113,7 @@ static inline void atomic_long_dec(atomic_long_t *l) } #define ATOMIC_LONG_OP(op) \ -static inline void \ +static __always_inline void \ atomic_long_##op(long i, atomic_long_t *l) \ { \ ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; \ diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h index 630dd2372238..f90588abbfd4 100644 --- a/include/asm-generic/bug.h +++ b/include/asm-generic/bug.h @@ -81,6 +81,12 @@ extern void warn_slowpath_null(const char *file, const int line); do { printk(arg); __WARN_TAINT(taint); } while (0) #endif +/* used internally by panic.c */ +struct warn_args; + +void __warn(const char *file, int line, void *caller, unsigned taint, + struct pt_regs *regs, struct warn_args *args); + #ifndef WARN_ON #define WARN_ON(condition) ({ \ int __ret_warn_on = !!(condition); \ @@ -110,9 +116,10 @@ extern void warn_slowpath_null(const char *file, const int line); static bool __section(.data.unlikely) __warned; \ int __ret_warn_once = !!(condition); \ \ - if (unlikely(__ret_warn_once)) \ - if (WARN_ON(!__warned)) \ - __warned = true; \ + if (unlikely(__ret_warn_once && !__warned)) { \ + __warned = true; \ + WARN_ON(1); \ + } \ unlikely(__ret_warn_once); \ }) @@ -120,9 +127,10 @@ extern void warn_slowpath_null(const char *file, const int line); static bool __section(.data.unlikely) __warned; \ int __ret_warn_once = !!(condition); \ \ - if (unlikely(__ret_warn_once)) \ - if (WARN(!__warned, format)) \ - __warned = true; \ + if (unlikely(__ret_warn_once && !__warned)) { \ + __warned = true; \ + WARN(1, format); \ + } \ unlikely(__ret_warn_once); \ }) @@ -130,9 +138,10 @@ extern void warn_slowpath_null(const char *file, const int line); static bool __section(.data.unlikely) __warned; \ int __ret_warn_once = !!(condition); \ \ - if (unlikely(__ret_warn_once)) \ - if (WARN_TAINT(!__warned, taint, format)) \ - __warned = true; \ + if (unlikely(__ret_warn_once && !__warned)) { \ + __warned = true; \ + WARN_TAINT(1, taint, format); \ + } \ unlikely(__ret_warn_once); \ }) diff --git a/include/asm-generic/checksum.h b/include/asm-generic/checksum.h index 59811df58c5b..3150cbd8eb21 100644 --- a/include/asm-generic/checksum.h +++ b/include/asm-generic/checksum.h @@ -65,14 +65,14 @@ static inline __sum16 csum_fold(__wsum csum) * returns a 16-bit checksum, already complemented */ extern __wsum -csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len, - unsigned short proto, __wsum sum); +csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len, + __u8 proto, __wsum sum); #endif #ifndef csum_tcpudp_magic static inline __sum16 -csum_tcpudp_magic(__be32 saddr, __be32 daddr, unsigned short len, - unsigned short proto, __wsum sum) +csum_tcpudp_magic(__be32 saddr, __be32 daddr, __u32 len, + __u8 proto, __wsum sum) { return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum)); } diff --git a/include/asm-generic/cputime_nsecs.h b/include/asm-generic/cputime_nsecs.h index 0419485891f2..0f1c6f315cdc 100644 --- a/include/asm-generic/cputime_nsecs.h +++ b/include/asm-generic/cputime_nsecs.h @@ -75,7 +75,7 @@ typedef u64 __nocast cputime64_t; */ static inline cputime_t timespec_to_cputime(const struct timespec *val) { - u64 ret = val->tv_sec * NSEC_PER_SEC + val->tv_nsec; + u64 ret = (u64)val->tv_sec * NSEC_PER_SEC + val->tv_nsec; return (__force cputime_t) ret; } static inline void cputime_to_timespec(const cputime_t ct, struct timespec *val) @@ -91,7 +91,8 @@ static inline void cputime_to_timespec(const cputime_t ct, struct timespec *val) */ static inline cputime_t timeval_to_cputime(const struct timeval *val) { - u64 ret = val->tv_sec * NSEC_PER_SEC + val->tv_usec * NSEC_PER_USEC; + u64 ret = (u64)val->tv_sec * NSEC_PER_SEC + + val->tv_usec * NSEC_PER_USEC; return (__force cputime_t) ret; } static inline void cputime_to_timeval(const cputime_t ct, struct timeval *val) diff --git a/include/asm-generic/fixmap.h b/include/asm-generic/fixmap.h index 1cbb8338edf3..827e4d3bbc7a 100644 --- a/include/asm-generic/fixmap.h +++ b/include/asm-generic/fixmap.h @@ -70,12 +70,12 @@ static inline unsigned long virt_to_fix(const unsigned long vaddr) #endif /* Return a pointer with offset calculated */ -#define __set_fixmap_offset(idx, phys, flags) \ -({ \ - unsigned long addr; \ - __set_fixmap(idx, phys, flags); \ - addr = fix_to_virt(idx) + ((phys) & (PAGE_SIZE - 1)); \ - addr; \ +#define __set_fixmap_offset(idx, phys, flags) \ +({ \ + unsigned long ________addr; \ + __set_fixmap(idx, phys, flags); \ + ________addr = fix_to_virt(idx) + ((phys) & (PAGE_SIZE - 1)); \ + ________addr; \ }) #define set_fixmap_offset(idx, phys) \ diff --git a/include/asm-generic/gpio.h b/include/asm-generic/gpio.h index 40ec1433f05d..8ca627dcea11 100644 --- a/include/asm-generic/gpio.h +++ b/include/asm-generic/gpio.h @@ -26,8 +26,12 @@ */ #ifndef ARCH_NR_GPIOS +#if defined(CONFIG_ARCH_NR_GPIO) && CONFIG_ARCH_NR_GPIO > 0 +#define ARCH_NR_GPIOS CONFIG_ARCH_NR_GPIO +#else #define ARCH_NR_GPIOS 512 #endif +#endif /* * "valid" GPIO numbers are nonnegative and may be passed to diff --git a/include/asm-generic/pci-bridge.h b/include/asm-generic/pci-bridge.h deleted file mode 100644 index 20db2e5a0a69..000000000000 --- a/include/asm-generic/pci-bridge.h +++ /dev/null @@ -1,74 +0,0 @@ -/* - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ -#ifndef _ASM_GENERIC_PCI_BRIDGE_H -#define _ASM_GENERIC_PCI_BRIDGE_H - -#ifdef __KERNEL__ - -enum { - /* Force re-assigning all resources (ignore firmware - * setup completely) - */ - PCI_REASSIGN_ALL_RSRC = 0x00000001, - - /* Re-assign all bus numbers */ - PCI_REASSIGN_ALL_BUS = 0x00000002, - - /* Do not try to assign, just use existing setup */ - PCI_PROBE_ONLY = 0x00000004, - - /* Don't bother with ISA alignment unless the bridge has - * ISA forwarding enabled - */ - PCI_CAN_SKIP_ISA_ALIGN = 0x00000008, - - /* Enable domain numbers in /proc */ - PCI_ENABLE_PROC_DOMAINS = 0x00000010, - /* ... except for domain 0 */ - PCI_COMPAT_DOMAIN_0 = 0x00000020, - - /* PCIe downstream ports are bridges that normally lead to only a - * device 0, but if this is set, we scan all possible devices, not - * just device 0. - */ - PCI_SCAN_ALL_PCIE_DEVS = 0x00000040, -}; - -#ifdef CONFIG_PCI -extern unsigned int pci_flags; - -static inline void pci_set_flags(int flags) -{ - pci_flags = flags; -} - -static inline void pci_add_flags(int flags) -{ - pci_flags |= flags; -} - -static inline void pci_clear_flags(int flags) -{ - pci_flags &= ~flags; -} - -static inline int pci_has_flag(int flag) -{ - return pci_flags & flag; -} -#else -static inline void pci_set_flags(int flags) { } -static inline void pci_add_flags(int flags) { } -static inline void pci_clear_flags(int flags) { } -static inline int pci_has_flag(int flag) -{ - return 0; -} -#endif /* CONFIG_PCI */ - -#endif /* __KERNEL__ */ -#endif /* _ASM_GENERIC_PCI_BRIDGE_H */ diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index c370b261c720..9401f4819891 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h @@ -783,6 +783,23 @@ static inline int pmd_clear_huge(pmd_t *pmd) } #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */ +#ifndef __HAVE_ARCH_FLUSH_PMD_TLB_RANGE +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +/* + * ARCHes with special requirements for evicting THP backing TLB entries can + * implement this. Otherwise also, it can help optimize normal TLB flush in + * THP regime. stock flush_tlb_range() typically has optimization to nuke the + * entire TLB TLB if flush span is greater than a threshold, which will + * likely be true for a single huge page. Thus a single thp flush will + * invalidate the entire TLB which is not desitable. + * e.g. see arch/arc: flush_pmd_tlb_range + */ +#define flush_pmd_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end) +#else +#define flush_pmd_tlb_range(vma, addr, end) BUILD_BUG() +#endif +#endif + #endif /* !__ASSEMBLY__ */ #ifndef io_remap_pfn_range diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h index 39e1cb201b8e..35a52a880b2f 100644 --- a/include/asm-generic/qspinlock.h +++ b/include/asm-generic/qspinlock.h @@ -120,11 +120,6 @@ static __always_inline bool virt_spin_lock(struct qspinlock *lock) #endif /* - * Initializier - */ -#define __ARCH_SPIN_LOCK_UNLOCKED { ATOMIC_INIT(0) } - -/* * Remapping spinlock architecture specific functions to the corresponding * queued spinlock functions. */ diff --git a/include/asm-generic/qspinlock_types.h b/include/asm-generic/qspinlock_types.h index 85f888e86761..034acd0c4956 100644 --- a/include/asm-generic/qspinlock_types.h +++ b/include/asm-generic/qspinlock_types.h @@ -33,6 +33,11 @@ typedef struct qspinlock { } arch_spinlock_t; /* + * Initializier + */ +#define __ARCH_SPIN_LOCK_UNLOCKED { ATOMIC_INIT(0) } + +/* * Bitfields in the atomic value: * * When NR_CPUS < 16K diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index c4bd0e2c173c..8f5a12ab2f2b 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -157,7 +157,7 @@ #define EARLYCON_TABLE() STRUCT_ALIGN(); \ VMLINUX_SYMBOL(__earlycon_table) = .; \ *(__earlycon_table) \ - *(__earlycon_table_end) + VMLINUX_SYMBOL(__earlycon_table_end) = .; #else #define EARLYCON_TABLE() #endif @@ -179,7 +179,6 @@ #define RESERVEDMEM_OF_TABLES() OF_TABLE(CONFIG_OF_RESERVED_MEM, reservedmem) #define CPU_METHOD_OF_TABLES() OF_TABLE(CONFIG_SMP, cpu_method) #define CPUIDLE_METHOD_OF_TABLES() OF_TABLE(CONFIG_CPU_IDLE, cpuidle_method) -#define EARLYCON_OF_TABLES() OF_TABLE(CONFIG_SERIAL_EARLYCON, earlycon) #ifdef CONFIG_ACPI #define ACPI_PROBE_TABLE(name) \ @@ -256,6 +255,7 @@ .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \ VMLINUX_SYMBOL(__start_rodata) = .; \ *(.rodata) *(.rodata.*) \ + *(.data..ro_after_init) /* Read only after init */ \ *(__vermagic) /* Kernel version magic */ \ . = ALIGN(8); \ VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \ @@ -526,8 +526,7 @@ IRQCHIP_OF_MATCH_TABLE() \ ACPI_PROBE_TABLE(irqchip) \ ACPI_PROBE_TABLE(clksrc) \ - EARLYCON_TABLE() \ - EARLYCON_OF_TABLES() + EARLYCON_TABLE() #define INIT_TEXT \ *(.init.text) \ diff --git a/include/crypto/aead.h b/include/crypto/aead.h index 84d13b11ad7b..957bb8763219 100644 --- a/include/crypto/aead.h +++ b/include/crypto/aead.h @@ -31,10 +31,10 @@ * * For example: authenc(hmac(sha256), cbc(aes)) * - * The example code provided for the asynchronous block cipher operation - * applies here as well. Naturally all *ablkcipher* symbols must be exchanged + * The example code provided for the symmetric key cipher operation + * applies here as well. Naturally all *skcipher* symbols must be exchanged * the *aead* pendants discussed in the following. In addition, for the AEAD - * operation, the aead_request_set_assoc function must be used to set the + * operation, the aead_request_set_ad function must be used to set the * pointer to the associated data memory location before performing the * encryption or decryption operation. In case of an encryption, the associated * data memory is filled during the encryption operation. For decryption, the diff --git a/include/crypto/akcipher.h b/include/crypto/akcipher.h index 354de15cea6b..c37cc59e9bf2 100644 --- a/include/crypto/akcipher.h +++ b/include/crypto/akcipher.h @@ -114,7 +114,7 @@ struct akcipher_alg { */ /** - * crypto_alloc_akcipher() -- allocate AKCIPHER tfm handle + * crypto_alloc_akcipher() - allocate AKCIPHER tfm handle * @alg_name: is the cra_name / name or cra_driver_name / driver name of the * public key algorithm e.g. "rsa" * @type: specifies the type of the algorithm @@ -171,7 +171,7 @@ static inline struct crypto_akcipher *crypto_akcipher_reqtfm( } /** - * crypto_free_akcipher() -- free AKCIPHER tfm handle + * crypto_free_akcipher() - free AKCIPHER tfm handle * * @tfm: AKCIPHER tfm handle allocated with crypto_alloc_akcipher() */ @@ -181,7 +181,7 @@ static inline void crypto_free_akcipher(struct crypto_akcipher *tfm) } /** - * akcipher_request_alloc() -- allocates public key request + * akcipher_request_alloc() - allocates public key request * * @tfm: AKCIPHER tfm handle allocated with crypto_alloc_akcipher() * @gfp: allocation flags @@ -201,7 +201,7 @@ static inline struct akcipher_request *akcipher_request_alloc( } /** - * akcipher_request_free() -- zeroize and free public key request + * akcipher_request_free() - zeroize and free public key request * * @req: request to free */ @@ -211,14 +211,14 @@ static inline void akcipher_request_free(struct akcipher_request *req) } /** - * akcipher_request_set_callback() -- Sets an asynchronous callback. + * akcipher_request_set_callback() - Sets an asynchronous callback. * * Callback will be called when an asynchronous operation on a given * request is finished. * * @req: request that the callback will be set for * @flgs: specify for instance if the operation may backlog - * @cmlp: callback which will be called + * @cmpl: callback which will be called * @data: private data used by the caller */ static inline void akcipher_request_set_callback(struct akcipher_request *req, @@ -232,7 +232,7 @@ static inline void akcipher_request_set_callback(struct akcipher_request *req, } /** - * akcipher_request_set_crypt() -- Sets request parameters + * akcipher_request_set_crypt() - Sets request parameters * * Sets parameters required by crypto operation * @@ -255,7 +255,7 @@ static inline void akcipher_request_set_crypt(struct akcipher_request *req, } /** - * crypto_akcipher_maxsize() -- Get len for output buffer + * crypto_akcipher_maxsize() - Get len for output buffer * * Function returns the dest buffer size required for a given key * @@ -271,7 +271,7 @@ static inline int crypto_akcipher_maxsize(struct crypto_akcipher *tfm) } /** - * crypto_akcipher_encrypt() -- Invoke public key encrypt operation + * crypto_akcipher_encrypt() - Invoke public key encrypt operation * * Function invokes the specific public key encrypt operation for a given * public key algorithm @@ -289,7 +289,7 @@ static inline int crypto_akcipher_encrypt(struct akcipher_request *req) } /** - * crypto_akcipher_decrypt() -- Invoke public key decrypt operation + * crypto_akcipher_decrypt() - Invoke public key decrypt operation * * Function invokes the specific public key decrypt operation for a given * public key algorithm @@ -307,7 +307,7 @@ static inline int crypto_akcipher_decrypt(struct akcipher_request *req) } /** - * crypto_akcipher_sign() -- Invoke public key sign operation + * crypto_akcipher_sign() - Invoke public key sign operation * * Function invokes the specific public key sign operation for a given * public key algorithm @@ -325,7 +325,7 @@ static inline int crypto_akcipher_sign(struct akcipher_request *req) } /** - * crypto_akcipher_verify() -- Invoke public key verify operation + * crypto_akcipher_verify() - Invoke public key verify operation * * Function invokes the specific public key verify operation for a given * public key algorithm @@ -343,7 +343,7 @@ static inline int crypto_akcipher_verify(struct akcipher_request *req) } /** - * crypto_akcipher_set_pub_key() -- Invoke set public key operation + * crypto_akcipher_set_pub_key() - Invoke set public key operation * * Function invokes the algorithm specific set key function, which knows * how to decode and interpret the encoded key @@ -364,7 +364,7 @@ static inline int crypto_akcipher_set_pub_key(struct crypto_akcipher *tfm, } /** - * crypto_akcipher_set_priv_key() -- Invoke set private key operation + * crypto_akcipher_set_priv_key() - Invoke set private key operation * * Function invokes the algorithm specific set key function, which knows * how to decode and interpret the encoded key diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h index c9fe145f7dd3..eeafd21afb44 100644 --- a/include/crypto/algapi.h +++ b/include/crypto/algapi.h @@ -15,6 +15,7 @@ #include <linux/crypto.h> #include <linux/list.h> #include <linux/kernel.h> +#include <linux/kthread.h> #include <linux/skbuff.h> struct crypto_aead; @@ -128,6 +129,75 @@ struct ablkcipher_walk { unsigned int blocksize; }; +#define ENGINE_NAME_LEN 30 +/* + * struct crypto_engine - crypto hardware engine + * @name: the engine name + * @idling: the engine is entering idle state + * @busy: request pump is busy + * @running: the engine is on working + * @cur_req_prepared: current request is prepared + * @list: link with the global crypto engine list + * @queue_lock: spinlock to syncronise access to request queue + * @queue: the crypto queue of the engine + * @rt: whether this queue is set to run as a realtime task + * @prepare_crypt_hardware: a request will soon arrive from the queue + * so the subsystem requests the driver to prepare the hardware + * by issuing this call + * @unprepare_crypt_hardware: there are currently no more requests on the + * queue so the subsystem notifies the driver that it may relax the + * hardware by issuing this call + * @prepare_request: do some prepare if need before handle the current request + * @unprepare_request: undo any work done by prepare_message() + * @crypt_one_request: do encryption for current request + * @kworker: thread struct for request pump + * @kworker_task: pointer to task for request pump kworker thread + * @pump_requests: work struct for scheduling work to the request pump + * @priv_data: the engine private data + * @cur_req: the current request which is on processing + */ +struct crypto_engine { + char name[ENGINE_NAME_LEN]; + bool idling; + bool busy; + bool running; + bool cur_req_prepared; + + struct list_head list; + spinlock_t queue_lock; + struct crypto_queue queue; + + bool rt; + + int (*prepare_crypt_hardware)(struct crypto_engine *engine); + int (*unprepare_crypt_hardware)(struct crypto_engine *engine); + + int (*prepare_request)(struct crypto_engine *engine, + struct ablkcipher_request *req); + int (*unprepare_request)(struct crypto_engine *engine, + struct ablkcipher_request *req); + int (*crypt_one_request)(struct crypto_engine *engine, + struct ablkcipher_request *req); + + struct kthread_worker kworker; + struct task_struct *kworker_task; + struct kthread_work pump_requests; + + void *priv_data; + struct ablkcipher_request *cur_req; +}; + +int crypto_transfer_request(struct crypto_engine *engine, + struct ablkcipher_request *req, bool need_pump); +int crypto_transfer_request_to_engine(struct crypto_engine *engine, + struct ablkcipher_request *req); +void crypto_finalize_request(struct crypto_engine *engine, + struct ablkcipher_request *req, int err); +int crypto_engine_start(struct crypto_engine *engine); +int crypto_engine_stop(struct crypto_engine *engine); +struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt); +int crypto_engine_exit(struct crypto_engine *engine); + extern const struct crypto_type crypto_ablkcipher_type; extern const struct crypto_type crypto_blkcipher_type; @@ -184,6 +254,10 @@ int crypto_enqueue_request(struct crypto_queue *queue, struct crypto_async_request *request); struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue); int crypto_tfm_in_queue(struct crypto_queue *queue, struct crypto_tfm *tfm); +static inline unsigned int crypto_queue_len(struct crypto_queue *queue) +{ + return queue->qlen; +} /* These functions require the input/output to be aligned as u32. */ void crypto_inc(u8 *a, unsigned int size); @@ -275,24 +349,6 @@ static inline struct cipher_alg *crypto_cipher_alg(struct crypto_cipher *tfm) return &crypto_cipher_tfm(tfm)->__crt_alg->cra_cipher; } -static inline struct crypto_hash *crypto_spawn_hash(struct crypto_spawn *spawn) -{ - u32 type = CRYPTO_ALG_TYPE_HASH; - u32 mask = CRYPTO_ALG_TYPE_HASH_MASK; - - return __crypto_hash_cast(crypto_spawn_tfm(spawn, type, mask)); -} - -static inline void *crypto_hash_ctx(struct crypto_hash *tfm) -{ - return crypto_tfm_ctx(&tfm->base); -} - -static inline void *crypto_hash_ctx_aligned(struct crypto_hash *tfm) -{ - return crypto_tfm_ctx_aligned(&tfm->base); -} - static inline void blkcipher_walk_init(struct blkcipher_walk *walk, struct scatterlist *dst, struct scatterlist *src, diff --git a/include/crypto/compress.h b/include/crypto/compress.h deleted file mode 100644 index 5b67af834d83..000000000000 --- a/include/crypto/compress.h +++ /dev/null @@ -1,145 +0,0 @@ -/* - * Compress: Compression algorithms under the cryptographic API. - * - * Copyright 2008 Sony Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. - * If not, see <http://www.gnu.org/licenses/>. - */ - -#ifndef _CRYPTO_COMPRESS_H -#define _CRYPTO_COMPRESS_H - -#include <linux/crypto.h> - - -struct comp_request { - const void *next_in; /* next input byte */ - void *next_out; /* next output byte */ - unsigned int avail_in; /* bytes available at next_in */ - unsigned int avail_out; /* bytes available at next_out */ -}; - -enum zlib_comp_params { - ZLIB_COMP_LEVEL = 1, /* e.g. Z_DEFAULT_COMPRESSION */ - ZLIB_COMP_METHOD, /* e.g. Z_DEFLATED */ - ZLIB_COMP_WINDOWBITS, /* e.g. MAX_WBITS */ - ZLIB_COMP_MEMLEVEL, /* e.g. DEF_MEM_LEVEL */ - ZLIB_COMP_STRATEGY, /* e.g. Z_DEFAULT_STRATEGY */ - __ZLIB_COMP_MAX, -}; - -#define ZLIB_COMP_MAX (__ZLIB_COMP_MAX - 1) - - -enum zlib_decomp_params { - ZLIB_DECOMP_WINDOWBITS = 1, /* e.g. DEF_WBITS */ - __ZLIB_DECOMP_MAX, -}; - -#define ZLIB_DECOMP_MAX (__ZLIB_DECOMP_MAX - 1) - - -struct crypto_pcomp { - struct crypto_tfm base; -}; - -struct pcomp_alg { - int (*compress_setup)(struct crypto_pcomp *tfm, const void *params, - unsigned int len); - int (*compress_init)(struct crypto_pcomp *tfm); - int (*compress_update)(struct crypto_pcomp *tfm, - struct comp_request *req); - int (*compress_final)(struct crypto_pcomp *tfm, - struct comp_request *req); - int (*decompress_setup)(struct crypto_pcomp *tfm, const void *params, - unsigned int len); - int (*decompress_init)(struct crypto_pcomp *tfm); - int (*decompress_update)(struct crypto_pcomp *tfm, - struct comp_request *req); - int (*decompress_final)(struct crypto_pcomp *tfm, - struct comp_request *req); - - struct crypto_alg base; -}; - -extern struct crypto_pcomp *crypto_alloc_pcomp(const char *alg_name, u32 type, - u32 mask); - -static inline struct crypto_tfm *crypto_pcomp_tfm(struct crypto_pcomp *tfm) -{ - return &tfm->base; -} - -static inline void crypto_free_pcomp(struct crypto_pcomp *tfm) -{ - crypto_destroy_tfm(tfm, crypto_pcomp_tfm(tfm)); -} - -static inline struct pcomp_alg *__crypto_pcomp_alg(struct crypto_alg *alg) -{ - return container_of(alg, struct pcomp_alg, base); -} - -static inline struct pcomp_alg *crypto_pcomp_alg(struct crypto_pcomp *tfm) -{ - return __crypto_pcomp_alg(crypto_pcomp_tfm(tfm)->__crt_alg); -} - -static inline int crypto_compress_setup(struct crypto_pcomp *tfm, - const void *params, unsigned int len) -{ - return crypto_pcomp_alg(tfm)->compress_setup(tfm, params, len); -} - -static inline int crypto_compress_init(struct crypto_pcomp *tfm) -{ - return crypto_pcomp_alg(tfm)->compress_init(tfm); -} - -static inline int crypto_compress_update(struct crypto_pcomp *tfm, - struct comp_request *req) -{ - return crypto_pcomp_alg(tfm)->compress_update(tfm, req); -} - -static inline int crypto_compress_final(struct crypto_pcomp *tfm, - struct comp_request *req) -{ - return crypto_pcomp_alg(tfm)->compress_final(tfm, req); -} - -static inline int crypto_decompress_setup(struct crypto_pcomp *tfm, - const void *params, unsigned int len) -{ - return crypto_pcomp_alg(tfm)->decompress_setup(tfm, params, len); -} - -static inline int crypto_decompress_init(struct crypto_pcomp *tfm) -{ - return crypto_pcomp_alg(tfm)->decompress_init(tfm); -} - -static inline int crypto_decompress_update(struct crypto_pcomp *tfm, - struct comp_request *req) -{ - return crypto_pcomp_alg(tfm)->decompress_update(tfm, req); -} - -static inline int crypto_decompress_final(struct crypto_pcomp *tfm, - struct comp_request *req) -{ - return crypto_pcomp_alg(tfm)->decompress_final(tfm, req); -} - -#endif /* _CRYPTO_COMPRESS_H */ diff --git a/include/crypto/drbg.h b/include/crypto/drbg.h index 9756c70899d8..d961b2b16f55 100644 --- a/include/crypto/drbg.h +++ b/include/crypto/drbg.h @@ -117,10 +117,6 @@ struct drbg_state { void *priv_data; /* Cipher handle */ bool seeded; /* DRBG fully seeded? */ bool pr; /* Prediction resistance enabled? */ -#ifdef CONFIG_CRYPTO_FIPS - bool fips_primed; /* Continuous test primed? */ - unsigned char *prev; /* FIPS 140-2 continuous test value */ -#endif struct work_struct seed_work; /* asynchronous seeding support */ struct crypto_rng *jent; const struct drbg_state_ops *d_ops; diff --git a/include/crypto/hash.h b/include/crypto/hash.h index 6361892ea737..1969f1416658 100644 --- a/include/crypto/hash.h +++ b/include/crypto/hash.h @@ -14,6 +14,7 @@ #define _CRYPTO_HASH_H #include <linux/crypto.h> +#include <linux/string.h> struct crypto_ahash; @@ -259,6 +260,28 @@ static inline void crypto_free_ahash(struct crypto_ahash *tfm) crypto_destroy_tfm(tfm, crypto_ahash_tfm(tfm)); } +/** + * crypto_has_ahash() - Search for the availability of an ahash. + * @alg_name: is the cra_name / name or cra_driver_name / driver name of the + * ahash + * @type: specifies the type of the ahash + * @mask: specifies the mask for the ahash + * + * Return: true when the ahash is known to the kernel crypto API; false + * otherwise + */ +int crypto_has_ahash(const char *alg_name, u32 type, u32 mask); + +static inline const char *crypto_ahash_alg_name(struct crypto_ahash *tfm) +{ + return crypto_tfm_alg_name(crypto_ahash_tfm(tfm)); +} + +static inline const char *crypto_ahash_driver_name(struct crypto_ahash *tfm) +{ + return crypto_tfm_alg_driver_name(crypto_ahash_tfm(tfm)); +} + static inline unsigned int crypto_ahash_alignmask( struct crypto_ahash *tfm) { @@ -550,6 +573,12 @@ static inline void ahash_request_free(struct ahash_request *req) kzfree(req); } +static inline void ahash_request_zero(struct ahash_request *req) +{ + memzero_explicit(req, sizeof(*req) + + crypto_ahash_reqsize(crypto_ahash_reqtfm(req))); +} + static inline struct ahash_request *ahash_request_cast( struct crypto_async_request *req) { @@ -657,6 +686,16 @@ static inline void crypto_free_shash(struct crypto_shash *tfm) crypto_destroy_tfm(tfm, crypto_shash_tfm(tfm)); } +static inline const char *crypto_shash_alg_name(struct crypto_shash *tfm) +{ + return crypto_tfm_alg_name(crypto_shash_tfm(tfm)); +} + +static inline const char *crypto_shash_driver_name(struct crypto_shash *tfm) +{ + return crypto_tfm_alg_driver_name(crypto_shash_tfm(tfm)); +} + static inline unsigned int crypto_shash_alignmask( struct crypto_shash *tfm) { @@ -872,4 +911,10 @@ int crypto_shash_final(struct shash_desc *desc, u8 *out); int crypto_shash_finup(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out); +static inline void shash_desc_zero(struct shash_desc *desc) +{ + memzero_explicit(desc, + sizeof(*desc) + crypto_shash_descsize(desc->tfm)); +} + #endif /* _CRYPTO_HASH_H */ diff --git a/include/crypto/internal/aead.h b/include/crypto/internal/aead.h index 5554cdd8d6c1..da3864991d4c 100644 --- a/include/crypto/internal/aead.h +++ b/include/crypto/internal/aead.h @@ -80,6 +80,12 @@ static inline u32 aead_request_flags(struct aead_request *req) return req->base.flags; } +static inline struct aead_request *aead_request_cast( + struct crypto_async_request *req) +{ + return container_of(req, struct aead_request, base); +} + static inline void crypto_set_aead_spawn( struct crypto_aead_spawn *spawn, struct crypto_instance *inst) { diff --git a/include/crypto/internal/compress.h b/include/crypto/internal/compress.h deleted file mode 100644 index 178a888d1d93..000000000000 --- a/include/crypto/internal/compress.h +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Compress: Compression algorithms under the cryptographic API. - * - * Copyright 2008 Sony Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. - * If not, see <http://www.gnu.org/licenses/>. - */ - -#ifndef _CRYPTO_INTERNAL_COMPRESS_H -#define _CRYPTO_INTERNAL_COMPRESS_H - -#include <crypto/compress.h> - -extern int crypto_register_pcomp(struct pcomp_alg *alg); -extern int crypto_unregister_pcomp(struct pcomp_alg *alg); - -#endif /* _CRYPTO_INTERNAL_COMPRESS_H */ diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h index 3b4af1d7c7e9..49dae16f8929 100644 --- a/include/crypto/internal/hash.h +++ b/include/crypto/internal/hash.h @@ -57,9 +57,6 @@ int crypto_hash_walk_first(struct ahash_request *req, struct crypto_hash_walk *walk); int crypto_ahash_walk_first(struct ahash_request *req, struct crypto_hash_walk *walk); -int crypto_hash_walk_first_compat(struct hash_desc *hdesc, - struct crypto_hash_walk *walk, - struct scatterlist *sg, unsigned int len); static inline int crypto_ahash_walk_done(struct crypto_hash_walk *walk, int err) diff --git a/include/crypto/public_key.h b/include/crypto/public_key.h index cc2516df0efa..aa730ea7faf8 100644 --- a/include/crypto/public_key.h +++ b/include/crypto/public_key.h @@ -14,30 +14,6 @@ #ifndef _LINUX_PUBLIC_KEY_H #define _LINUX_PUBLIC_KEY_H -#include <linux/mpi.h> -#include <crypto/hash_info.h> - -enum pkey_algo { - PKEY_ALGO_DSA, - PKEY_ALGO_RSA, - PKEY_ALGO__LAST -}; - -extern const char *const pkey_algo_name[PKEY_ALGO__LAST]; -extern const struct public_key_algorithm *pkey_algo[PKEY_ALGO__LAST]; - -/* asymmetric key implementation supports only up to SHA224 */ -#define PKEY_HASH__LAST (HASH_ALGO_SHA224 + 1) - -enum pkey_id_type { - PKEY_ID_PGP, /* OpenPGP generated key ID */ - PKEY_ID_X509, /* X.509 arbitrary subjectKeyIdentifier */ - PKEY_ID_PKCS7, /* Signature in PKCS#7 message */ - PKEY_ID_TYPE__LAST -}; - -extern const char *const pkey_id_type_name[PKEY_ID_TYPE__LAST]; - /* * The use to which an asymmetric key is being put. */ @@ -59,31 +35,10 @@ extern const char *const key_being_used_for[NR__KEY_BEING_USED_FOR]; * part. */ struct public_key { - const struct public_key_algorithm *algo; - u8 capabilities; -#define PKEY_CAN_ENCRYPT 0x01 -#define PKEY_CAN_DECRYPT 0x02 -#define PKEY_CAN_SIGN 0x04 -#define PKEY_CAN_VERIFY 0x08 - enum pkey_algo pkey_algo : 8; - enum pkey_id_type id_type : 8; - union { - MPI mpi[5]; - struct { - MPI p; /* DSA prime */ - MPI q; /* DSA group order */ - MPI g; /* DSA group generator */ - MPI y; /* DSA public-key value = g^x mod p */ - MPI x; /* DSA secret exponent (if present) */ - } dsa; - struct { - MPI n; /* RSA public modulus */ - MPI e; /* RSA public encryption exponent */ - MPI d; /* RSA secret encryption exponent (if present) */ - MPI p; /* RSA secret prime (if present) */ - MPI q; /* RSA secret prime (if present) */ - } rsa; - }; + void *key; + u32 keylen; + const char *id_type; + const char *pkey_algo; }; extern void public_key_destroy(void *payload); @@ -92,23 +47,15 @@ extern void public_key_destroy(void *payload); * Public key cryptography signature data */ struct public_key_signature { + u8 *s; /* Signature */ + u32 s_size; /* Number of bytes in signature */ u8 *digest; - u8 digest_size; /* Number of bytes in digest */ - u8 nr_mpi; /* Occupancy of mpi[] */ - enum pkey_algo pkey_algo : 8; - enum hash_algo pkey_hash_algo : 8; - union { - MPI mpi[2]; - struct { - MPI s; /* m^d mod n */ - } rsa; - struct { - MPI r; - MPI s; - } dsa; - }; + u8 digest_size; /* Number of bytes in digest */ + const char *pkey_algo; + const char *hash_algo; }; +extern struct asymmetric_key_subtype public_key_subtype; struct key; extern int verify_signature(const struct key *key, const struct public_key_signature *sig); @@ -119,4 +66,7 @@ extern struct key *x509_request_asymmetric_key(struct key *keyring, const struct asymmetric_key_id *skid, bool partial); +int public_key_verify_signature(const struct public_key *pkey, + const struct public_key_signature *sig); + #endif /* _LINUX_PUBLIC_KEY_H */ diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h index fd8742a40ff3..905490c1da89 100644 --- a/include/crypto/skcipher.h +++ b/include/crypto/skcipher.h @@ -60,8 +60,7 @@ struct crypto_skcipher { unsigned int ivsize; unsigned int reqsize; - - bool has_setkey; + unsigned int keysize; struct crypto_tfm base; }; @@ -232,6 +231,12 @@ static inline int crypto_has_skcipher(const char *alg_name, u32 type, crypto_skcipher_mask(mask)); } +static inline const char *crypto_skcipher_driver_name( + struct crypto_skcipher *tfm) +{ + return crypto_tfm_alg_driver_name(crypto_skcipher_tfm(tfm)); +} + /** * crypto_skcipher_ivsize() - obtain IV size * @tfm: cipher handle @@ -309,7 +314,13 @@ static inline int crypto_skcipher_setkey(struct crypto_skcipher *tfm, static inline bool crypto_skcipher_has_setkey(struct crypto_skcipher *tfm) { - return tfm->has_setkey; + return tfm->keysize; +} + +static inline unsigned int crypto_skcipher_default_keysize( + struct crypto_skcipher *tfm) +{ + return tfm->keysize; } /** @@ -440,6 +451,13 @@ static inline void skcipher_request_free(struct skcipher_request *req) kzfree(req); } +static inline void skcipher_request_zero(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + + memzero_explicit(req, sizeof(*req) + crypto_skcipher_reqsize(tfm)); +} + /** * skcipher_request_set_callback() - set asynchronous callback function * @req: request handle diff --git a/include/crypto/xts.h b/include/crypto/xts.h index 72c09eb56437..ede6b97b24cc 100644 --- a/include/crypto/xts.h +++ b/include/crypto/xts.h @@ -2,6 +2,9 @@ #define _CRYPTO_XTS_H #include <crypto/b128ops.h> +#include <linux/crypto.h> +#include <crypto/algapi.h> +#include <linux/fips.h> struct scatterlist; struct blkcipher_desc; @@ -24,4 +27,28 @@ int xts_crypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes, struct xts_crypt_req *req); +static inline int xts_check_key(struct crypto_tfm *tfm, + const u8 *key, unsigned int keylen) +{ + u32 *flags = &tfm->crt_flags; + + /* + * key consists of keys of equal size concatenated, therefore + * the length must be even. + */ + if (keylen % 2) { + *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; + return -EINVAL; + } + + /* ensure that the AES and tweak key are not identical */ + if (fips_enabled && + !crypto_memneq(key, key + (keylen / 2), keylen / 2)) { + *flags |= CRYPTO_TFM_RES_WEAK_KEY; + return -EINVAL; + } + + return 0; +} + #endif /* _CRYPTO_XTS_H */ diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index c65a212db77e..c5b4b81a831b 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -1166,6 +1166,7 @@ struct drm_connector { struct drm_mode_object base; char *name; + int connector_id; int connector_type; int connector_type_id; bool interlace_allowed; @@ -2047,6 +2048,7 @@ struct drm_mode_config { struct list_head fb_list; int num_connector; + struct ida connector_ida; struct list_head connector_list; int num_encoder; struct list_head encoder_list; @@ -2200,7 +2202,11 @@ int drm_connector_register(struct drm_connector *connector); void drm_connector_unregister(struct drm_connector *connector); extern void drm_connector_cleanup(struct drm_connector *connector); -extern unsigned int drm_connector_index(struct drm_connector *connector); +static inline unsigned drm_connector_index(struct drm_connector *connector) +{ + return connector->connector_id; +} + /* helper to unplug all connectors from sysfs for device */ extern void drm_connector_unplug_all(struct drm_device *dev); diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h index 2af97691e878..dec6221e8198 100644 --- a/include/drm/drm_edid.h +++ b/include/drm/drm_edid.h @@ -403,6 +403,18 @@ static inline int drm_eld_size(const uint8_t *eld) return DRM_ELD_HEADER_BLOCK_SIZE + eld[DRM_ELD_BASELINE_ELD_LEN] * 4; } +/** + * drm_eld_get_conn_type - Get device type hdmi/dp connected + * @eld: pointer to an ELD memory structure + * + * The caller need to use %DRM_ELD_CONN_TYPE_HDMI or %DRM_ELD_CONN_TYPE_DP to + * identify the display type connected. + */ +static inline u8 drm_eld_get_conn_type(const uint8_t *eld) +{ + return eld[DRM_ELD_SAD_COUNT_CONN_TYPE] & DRM_ELD_CONN_TYPE_MASK; +} + struct edid *drm_do_get_edid(struct drm_connector *connector, int (*get_edid_block)(void *data, u8 *buf, unsigned int block, size_t len), diff --git a/include/dt-bindings/clock/rk3036-cru.h b/include/dt-bindings/clock/rk3036-cru.h index ebc7a7b43f52..de44109a3a04 100644 --- a/include/dt-bindings/clock/rk3036-cru.h +++ b/include/dt-bindings/clock/rk3036-cru.h @@ -54,6 +54,7 @@ #define SCLK_PVTM_VIDEO 125 #define SCLK_MAC 151 #define SCLK_MACREF 152 +#define SCLK_MACPLL 153 #define SCLK_SFC 160 /* aclk gates */ @@ -92,6 +93,7 @@ #define HCLK_SDMMC 456 #define HCLK_SDIO 457 #define HCLK_EMMC 459 +#define HCLK_MAC 460 #define HCLK_I2S 462 #define HCLK_LCDC 465 #define HCLK_ROM 467 diff --git a/include/dt-bindings/clock/tegra210-car.h b/include/dt-bindings/clock/tegra210-car.h index 6f45aea49e4f..0a05b0d36ae7 100644 --- a/include/dt-bindings/clock/tegra210-car.h +++ b/include/dt-bindings/clock/tegra210-car.h @@ -126,7 +126,7 @@ /* 104 */ /* 105 */ #define TEGRA210_CLK_D_AUDIO 106 -/* 107 ( affects abp -> ape) */ +#define TEGRA210_CLK_APB2APE 107 /* 108 */ /* 109 */ /* 110 */ diff --git a/include/dt-bindings/iio/adc/fsl-imx25-gcq.h b/include/dt-bindings/iio/adc/fsl-imx25-gcq.h new file mode 100644 index 000000000000..87abdd4a7674 --- /dev/null +++ b/include/dt-bindings/iio/adc/fsl-imx25-gcq.h @@ -0,0 +1,18 @@ +/* + * This header provides constants for configuring the I.MX25 ADC + */ + +#ifndef _DT_BINDINGS_IIO_ADC_FS_IMX25_GCQ_H +#define _DT_BINDINGS_IIO_ADC_FS_IMX25_GCQ_H + +#define MX25_ADC_REFP_YP 0 /* YP voltage reference */ +#define MX25_ADC_REFP_XP 1 /* XP voltage reference */ +#define MX25_ADC_REFP_EXT 2 /* External voltage reference */ +#define MX25_ADC_REFP_INT 3 /* Internal voltage reference */ + +#define MX25_ADC_REFN_XN 0 /* XN ground reference */ +#define MX25_ADC_REFN_YN 1 /* YN ground reference */ +#define MX25_ADC_REFN_NGND 2 /* Internal ground reference */ +#define MX25_ADC_REFN_NGND2 3 /* External ground reference */ + +#endif diff --git a/include/media/i2c/tvp5150.h b/include/dt-bindings/media/tvp5150.h index 649908a25605..c852a35e916e 100644 --- a/include/media/i2c/tvp5150.h +++ b/include/dt-bindings/media/tvp5150.h @@ -18,16 +18,18 @@ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ -#ifndef _TVP5150_H_ -#define _TVP5150_H_ +#ifndef _DT_BINDINGS_MEDIA_TVP5150_H +#define _DT_BINDINGS_MEDIA_TVP5150_H /* TVP5150 HW inputs */ #define TVP5150_COMPOSITE0 0 #define TVP5150_COMPOSITE1 1 #define TVP5150_SVIDEO 2 +#define TVP5150_INPUT_NUM 3 + /* TVP5150 HW outputs */ #define TVP5150_NORMAL 0 #define TVP5150_BLACK_SCREEN 1 -#endif +#endif /* _DT_BINDINGS_MEDIA_TVP5150_H */ diff --git a/include/dt-bindings/pinctrl/mt7623-pinfunc.h b/include/dt-bindings/pinctrl/mt7623-pinfunc.h new file mode 100644 index 000000000000..2f00bdc42442 --- /dev/null +++ b/include/dt-bindings/pinctrl/mt7623-pinfunc.h @@ -0,0 +1,520 @@ +#ifndef __DTS_MT7623_PINFUNC_H +#define __DTS_MT7623_PINFUNC_H + +#include <dt-bindings/pinctrl/mt65xx.h> + +#define MT7623_PIN_0_PWRAP_SPI0_MI_FUNC_GPIO0 (MTK_PIN_NO(0) | 0) +#define MT7623_PIN_0_PWRAP_SPI0_MI_FUNC_PWRAP_SPIDO (MTK_PIN_NO(0) | 1) +#define MT7623_PIN_0_PWRAP_SPI0_MI_FUNC_PWRAP_SPIDI (MTK_PIN_NO(0) | 2) + +#define MT7623_PIN_1_PWRAP_SPI0_MO_FUNC_GPIO1 (MTK_PIN_NO(1) | 0) +#define MT7623_PIN_1_PWRAP_SPI0_MO_FUNC_PWRAP_SPIDI (MTK_PIN_NO(1) | 1) +#define MT7623_PIN_1_PWRAP_SPI0_MO_FUNC_PWRAP_SPIDO (MTK_PIN_NO(1) | 2) + +#define MT7623_PIN_2_PWRAP_INT_FUNC_GPIO2 (MTK_PIN_NO(2) | 0) +#define MT7623_PIN_2_PWRAP_INT_FUNC_PWRAP_INT (MTK_PIN_NO(2) | 1) + +#define MT7623_PIN_3_PWRAP_SPI0_CK_FUNC_GPIO3 (MTK_PIN_NO(3) | 0) +#define MT7623_PIN_3_PWRAP_SPI0_CK_FUNC_PWRAP_SPICK_I (MTK_PIN_NO(3) | 1) + +#define MT7623_PIN_4_PWRAP_SPI0_CSN_FUNC_GPIO4 (MTK_PIN_NO(4) | 0) +#define MT7623_PIN_4_PWRAP_SPI0_CSN_FUNC_PWRAP_SPICS_B_I (MTK_PIN_NO(4) | 1) + +#define MT7623_PIN_5_PWRAP_SPI0_CK2_FUNC_GPIO5 (MTK_PIN_NO(5) | 0) +#define MT7623_PIN_5_PWRAP_SPI0_CK2_FUNC_PWRAP_SPICK2_I (MTK_PIN_NO(5) | 1) + +#define MT7623_PIN_6_PWRAP_SPI0_CSN2_FUNC_GPIO6 (MTK_PIN_NO(6) | 0) +#define MT7623_PIN_6_PWRAP_SPI0_CSN2_FUNC_PWRAP_SPICS2_B_I (MTK_PIN_NO(6) | 1) + +#define MT7623_PIN_7_SPI1_CSN_FUNC_GPIO7 (MTK_PIN_NO(7) | 0) +#define MT7623_PIN_7_SPI1_CSN_FUNC_SPI1_CS (MTK_PIN_NO(7) | 1) + +#define MT7623_PIN_8_SPI1_MI_FUNC_GPIO8 (MTK_PIN_NO(8) | 0) +#define MT7623_PIN_8_SPI1_MI_FUNC_SPI1_MI (MTK_PIN_NO(8) | 1) +#define MT7623_PIN_8_SPI1_MI_FUNC_SPI1_MO (MTK_PIN_NO(8) | 2) + +#define MT7623_PIN_9_SPI1_MO_FUNC_GPIO9 (MTK_PIN_NO(9) | 0) +#define MT7623_PIN_9_SPI1_MO_FUNC_SPI1_MO (MTK_PIN_NO(9) | 1) +#define MT7623_PIN_9_SPI1_MO_FUNC_SPI1_MI (MTK_PIN_NO(9) | 2) + +#define MT7623_PIN_10_RTC32K_CK_FUNC_GPIO10 (MTK_PIN_NO(10) | 0) +#define MT7623_PIN_10_RTC32K_CK_FUNC_RTC32K_CK (MTK_PIN_NO(10) | 1) + +#define MT7623_PIN_11_WATCHDOG_FUNC_GPIO11 (MTK_PIN_NO(11) | 0) +#define MT7623_PIN_11_WATCHDOG_FUNC_WATCHDOG (MTK_PIN_NO(11) | 1) + +#define MT7623_PIN_12_SRCLKENA_FUNC_GPIO12 (MTK_PIN_NO(12) | 0) +#define MT7623_PIN_12_SRCLKENA_FUNC_SRCLKENA (MTK_PIN_NO(12) | 1) + +#define MT7623_PIN_13_SRCLKENAI_FUNC_GPIO13 (MTK_PIN_NO(13) | 0) +#define MT7623_PIN_13_SRCLKENAI_FUNC_SRCLKENAI (MTK_PIN_NO(13) | 1) + +#define MT7623_PIN_14_GPIO14_FUNC_GPIO14 (MTK_PIN_NO(14) | 0) +#define MT7623_PIN_14_GPIO14_FUNC_URXD2 (MTK_PIN_NO(14) | 1) +#define MT7623_PIN_14_GPIO14_FUNC_UTXD2 (MTK_PIN_NO(14) | 2) + +#define MT7623_PIN_15_GPIO15_FUNC_GPIO15 (MTK_PIN_NO(15) | 0) +#define MT7623_PIN_15_GPIO15_FUNC_UTXD2 (MTK_PIN_NO(15) | 1) +#define MT7623_PIN_15_GPIO15_FUNC_URXD2 (MTK_PIN_NO(15) | 2) + +#define MT7623_PIN_18_PCM_CLK_FUNC_GPIO18 (MTK_PIN_NO(18) | 0) +#define MT7623_PIN_18_PCM_CLK_FUNC_PCM_CLK0 (MTK_PIN_NO(18) | 1) +#define MT7623_PIN_18_PCM_CLK_FUNC_AP_PCM_CLKO (MTK_PIN_NO(18) | 6) + +#define MT7623_PIN_19_PCM_SYNC_FUNC_GPIO19 (MTK_PIN_NO(19) | 0) +#define MT7623_PIN_19_PCM_SYNC_FUNC_PCM_SYNC (MTK_PIN_NO(19) | 1) +#define MT7623_PIN_19_PCM_SYNC_FUNC_AP_PCM_SYNC (MTK_PIN_NO(19) | 6) + +#define MT7623_PIN_20_PCM_RX_FUNC_GPIO20 (MTK_PIN_NO(20) | 0) +#define MT7623_PIN_20_PCM_RX_FUNC_PCM_RX (MTK_PIN_NO(20) | 1) +#define MT7623_PIN_20_PCM_RX_FUNC_PCM_TX (MTK_PIN_NO(20) | 4) +#define MT7623_PIN_20_PCM_RX_FUNC_AP_PCM_RX (MTK_PIN_NO(20) | 6) + +#define MT7623_PIN_21_PCM_TX_FUNC_GPIO21 (MTK_PIN_NO(21) | 0) +#define MT7623_PIN_21_PCM_TX_FUNC_PCM_TX (MTK_PIN_NO(21) | 1) +#define MT7623_PIN_21_PCM_TX_FUNC_PCM_RX (MTK_PIN_NO(21) | 4) +#define MT7623_PIN_21_PCM_TX_FUNC_AP_PCM_TX (MTK_PIN_NO(21) | 6) + +#define MT7623_PIN_22_EINT0_FUNC_GPIO22 (MTK_PIN_NO(22) | 0) +#define MT7623_PIN_22_EINT0_FUNC_UCTS0 (MTK_PIN_NO(22) | 1) +#define MT7623_PIN_22_EINT0_FUNC_PCIE0_PERST_N (MTK_PIN_NO(22) | 2) + +#define MT7623_PIN_23_EINT1_FUNC_GPIO23 (MTK_PIN_NO(23) | 0) +#define MT7623_PIN_23_EINT1_FUNC_URTS0 (MTK_PIN_NO(23) | 1) +#define MT7623_PIN_23_EINT1_FUNC_PCIE1_PERST_N (MTK_PIN_NO(23) | 2) + +#define MT7623_PIN_24_EINT2_FUNC_GPIO24 (MTK_PIN_NO(24) | 0) +#define MT7623_PIN_24_EINT2_FUNC_UCTS1 (MTK_PIN_NO(24) | 1) +#define MT7623_PIN_24_EINT2_FUNC_PCIE2_PERST_N (MTK_PIN_NO(24) | 2) + +#define MT7623_PIN_25_EINT3_FUNC_GPIO25 (MTK_PIN_NO(25) | 0) +#define MT7623_PIN_25_EINT3_FUNC_URTS1 (MTK_PIN_NO(25) | 1) + +#define MT7623_PIN_26_EINT4_FUNC_GPIO26 (MTK_PIN_NO(26) | 0) +#define MT7623_PIN_26_EINT4_FUNC_UCTS3 (MTK_PIN_NO(26) | 1) +#define MT7623_PIN_26_EINT4_FUNC_PCIE2_WAKE_N (MTK_PIN_NO(26) | 6) + +#define MT7623_PIN_27_EINT5_FUNC_GPIO27 (MTK_PIN_NO(27) | 0) +#define MT7623_PIN_27_EINT5_FUNC_URTS3 (MTK_PIN_NO(27) | 1) +#define MT7623_PIN_27_EINT5_FUNC_PCIE1_WAKE_N (MTK_PIN_NO(27) | 6) + +#define MT7623_PIN_28_EINT6_FUNC_GPIO28 (MTK_PIN_NO(28) | 0) +#define MT7623_PIN_28_EINT6_FUNC_DRV_VBUS (MTK_PIN_NO(28) | 1) +#define MT7623_PIN_28_EINT6_FUNC_PCIE0_WAKE_N (MTK_PIN_NO(28) | 6) + +#define MT7623_PIN_29_EINT7_FUNC_GPIO29 (MTK_PIN_NO(29) | 0) +#define MT7623_PIN_29_EINT7_FUNC_IDDIG (MTK_PIN_NO(29) | 1) +#define MT7623_PIN_29_EINT7_FUNC_MSDC1_WP (MTK_PIN_NO(29) | 2) +#define MT7623_PIN_29_EINT7_FUNC_PCIE2_PERST_N (MTK_PIN_NO(29) | 6) + +#define MT7623_PIN_33_I2S1_DATA_FUNC_GPIO33 (MTK_PIN_NO(33) | 0) +#define MT7623_PIN_33_I2S1_DATA_FUNC_I2S1_DATA (MTK_PIN_NO(33) | 1) +#define MT7623_PIN_33_I2S1_DATA_FUNC_PCM_TX (MTK_PIN_NO(33) | 3) +#define MT7623_PIN_33_I2S1_DATA_FUNC_AP_PCM_TX (MTK_PIN_NO(33) | 6) + +#define MT7623_PIN_34_I2S1_DATA_IN_FUNC_GPIO34 (MTK_PIN_NO(34) | 0) +#define MT7623_PIN_34_I2S1_DATA_IN_FUNC_I2S1_DATA_IN (MTK_PIN_NO(34) | 1) +#define MT7623_PIN_34_I2S1_DATA_IN_FUNC_PCM_RX (MTK_PIN_NO(34) | 3) +#define MT7623_PIN_34_I2S1_DATA_IN_FUNC_AP_PCM_RX (MTK_PIN_NO(34) | 6) + +#define MT7623_PIN_35_I2S1_BCK_FUNC_GPIO35 (MTK_PIN_NO(35) | 0) +#define MT7623_PIN_35_I2S1_BCK_FUNC_I2S1_BCK (MTK_PIN_NO(35) | 1) +#define MT7623_PIN_35_I2S1_BCK_FUNC_PCM_CLK0 (MTK_PIN_NO(35) | 3) +#define MT7623_PIN_35_I2S1_BCK_FUNC_AP_PCM_CLKO (MTK_PIN_NO(35) | 6) + +#define MT7623_PIN_36_I2S1_LRCK_FUNC_GPIO36 (MTK_PIN_NO(36) | 0) +#define MT7623_PIN_36_I2S1_LRCK_FUNC_I2S1_LRCK (MTK_PIN_NO(36) | 1) +#define MT7623_PIN_36_I2S1_LRCK_FUNC_PCM_SYNC (MTK_PIN_NO(36) | 3) +#define MT7623_PIN_36_I2S1_LRCK_FUNC_AP_PCM_SYNC (MTK_PIN_NO(36) | 6) + +#define MT7623_PIN_37_I2S1_MCLK_FUNC_GPIO37 (MTK_PIN_NO(37) | 0) +#define MT7623_PIN_37_I2S1_MCLK_FUNC_I2S1_MCLK (MTK_PIN_NO(37) | 1) + +#define MT7623_PIN_39_JTMS_FUNC_GPIO39 (MTK_PIN_NO(39) | 0) +#define MT7623_PIN_39_JTMS_FUNC_JTMS (MTK_PIN_NO(39) | 1) + +#define MT7623_PIN_40_JTCK_FUNC_GPIO40 (MTK_PIN_NO(40) | 0) +#define MT7623_PIN_40_JTCK_FUNC_JTCK (MTK_PIN_NO(40) | 1) + +#define MT7623_PIN_41_JTDI_FUNC_GPIO41 (MTK_PIN_NO(41) | 0) +#define MT7623_PIN_41_JTDI_FUNC_JTDI (MTK_PIN_NO(41) | 1) + +#define MT7623_PIN_42_JTDO_FUNC_GPIO42 (MTK_PIN_NO(42) | 0) +#define MT7623_PIN_42_JTDO_FUNC_JTDO (MTK_PIN_NO(42) | 1) + +#define MT7623_PIN_43_NCLE_FUNC_GPIO43 (MTK_PIN_NO(43) | 0) +#define MT7623_PIN_43_NCLE_FUNC_NCLE (MTK_PIN_NO(43) | 1) +#define MT7623_PIN_43_NCLE_FUNC_EXT_XCS2 (MTK_PIN_NO(43) | 2) + +#define MT7623_PIN_44_NCEB1_FUNC_GPIO44 (MTK_PIN_NO(44) | 0) +#define MT7623_PIN_44_NCEB1_FUNC_NCEB1 (MTK_PIN_NO(44) | 1) +#define MT7623_PIN_44_NCEB1_FUNC_IDDIG (MTK_PIN_NO(44) | 2) + +#define MT7623_PIN_45_NCEB0_FUNC_GPIO45 (MTK_PIN_NO(45) | 0) +#define MT7623_PIN_45_NCEB0_FUNC_NCEB0 (MTK_PIN_NO(45) | 1) +#define MT7623_PIN_45_NCEB0_FUNC_DRV_VBUS (MTK_PIN_NO(45) | 2) + +#define MT7623_PIN_46_IR_FUNC_GPIO46 (MTK_PIN_NO(46) | 0) +#define MT7623_PIN_46_IR_FUNC_IR (MTK_PIN_NO(46) | 1) + +#define MT7623_PIN_47_NREB_FUNC_GPIO47 (MTK_PIN_NO(47) | 0) +#define MT7623_PIN_47_NREB_FUNC_NREB (MTK_PIN_NO(47) | 1) + +#define MT7623_PIN_48_NRNB_FUNC_GPIO48 (MTK_PIN_NO(48) | 0) +#define MT7623_PIN_48_NRNB_FUNC_NRNB (MTK_PIN_NO(48) | 1) + +#define MT7623_PIN_49_I2S0_DATA_FUNC_GPIO49 (MTK_PIN_NO(49) | 0) +#define MT7623_PIN_49_I2S0_DATA_FUNC_I2S0_DATA (MTK_PIN_NO(49) | 1) +#define MT7623_PIN_49_I2S0_DATA_FUNC_PCM_TX (MTK_PIN_NO(49) | 3) +#define MT7623_PIN_49_I2S0_DATA_FUNC_AP_I2S_DO (MTK_PIN_NO(49) | 6) + +#define MT7623_PIN_53_SPI0_CSN_FUNC_GPIO53 (MTK_PIN_NO(53) | 0) +#define MT7623_PIN_53_SPI0_CSN_FUNC_SPI0_CS (MTK_PIN_NO(53) | 1) +#define MT7623_PIN_53_SPI0_CSN_FUNC_PWM1 (MTK_PIN_NO(53) | 5) + +#define MT7623_PIN_54_SPI0_CK_FUNC_GPIO54 (MTK_PIN_NO(54) | 0) +#define MT7623_PIN_54_SPI0_CK_FUNC_SPI0_CK (MTK_PIN_NO(54) | 1) + +#define MT7623_PIN_55_SPI0_MI_FUNC_GPIO55 (MTK_PIN_NO(55) | 0) +#define MT7623_PIN_55_SPI0_MI_FUNC_SPI0_MI (MTK_PIN_NO(55) | 1) +#define MT7623_PIN_55_SPI0_MI_FUNC_SPI0_MO (MTK_PIN_NO(55) | 2) +#define MT7623_PIN_55_SPI0_MI_FUNC_MSDC1_WP (MTK_PIN_NO(55) | 3) +#define MT7623_PIN_55_SPI0_MI_FUNC_PWM2 (MTK_PIN_NO(55) | 5) + +#define MT7623_PIN_56_SPI0_MO_FUNC_GPIO56 (MTK_PIN_NO(56) | 0) +#define MT7623_PIN_56_SPI0_MO_FUNC_SPI0_MO (MTK_PIN_NO(56) | 1) +#define MT7623_PIN_56_SPI0_MO_FUNC_SPI0_MI (MTK_PIN_NO(56) | 2) + +#define MT7623_PIN_60_WB_RSTB_FUNC_GPIO60 (MTK_PIN_NO(60) | 0) +#define MT7623_PIN_60_WB_RSTB_FUNC_WB_RSTB (MTK_PIN_NO(60) | 1) + +#define MT7623_PIN_61_GPIO61_FUNC_GPIO61 (MTK_PIN_NO(61) | 0) +#define MT7623_PIN_61_GPIO61_FUNC_TEST_FD (MTK_PIN_NO(61) | 1) + +#define MT7623_PIN_62_GPIO62_FUNC_GPIO62 (MTK_PIN_NO(62) | 0) +#define MT7623_PIN_62_GPIO62_FUNC_TEST_FC (MTK_PIN_NO(62) | 1) + +#define MT7623_PIN_63_WB_SCLK_FUNC_GPIO63 (MTK_PIN_NO(63) | 0) +#define MT7623_PIN_63_WB_SCLK_FUNC_WB_SCLK (MTK_PIN_NO(63) | 1) + +#define MT7623_PIN_64_WB_SDATA_FUNC_GPIO64 (MTK_PIN_NO(64) | 0) +#define MT7623_PIN_64_WB_SDATA_FUNC_WB_SDATA (MTK_PIN_NO(64) | 1) + +#define MT7623_PIN_65_WB_SEN_FUNC_GPIO65 (MTK_PIN_NO(65) | 0) +#define MT7623_PIN_65_WB_SEN_FUNC_WB_SEN (MTK_PIN_NO(65) | 1) + +#define MT7623_PIN_66_WB_CRTL0_FUNC_GPIO66 (MTK_PIN_NO(66) | 0) +#define MT7623_PIN_66_WB_CRTL0_FUNC_WB_CRTL0 (MTK_PIN_NO(66) | 1) + +#define MT7623_PIN_67_WB_CRTL1_FUNC_GPIO67 (MTK_PIN_NO(67) | 0) +#define MT7623_PIN_67_WB_CRTL1_FUNC_WB_CRTL1 (MTK_PIN_NO(67) | 1) + +#define MT7623_PIN_68_WB_CRTL2_FUNC_GPIO68 (MTK_PIN_NO(68) | 0) +#define MT7623_PIN_68_WB_CRTL2_FUNC_WB_CRTL2 (MTK_PIN_NO(68) | 1) + +#define MT7623_PIN_69_WB_CRTL3_FUNC_GPIO69 (MTK_PIN_NO(69) | 0) +#define MT7623_PIN_69_WB_CRTL3_FUNC_WB_CRTL3 (MTK_PIN_NO(69) | 1) + +#define MT7623_PIN_70_WB_CRTL4_FUNC_GPIO70 (MTK_PIN_NO(70) | 0) +#define MT7623_PIN_70_WB_CRTL4_FUNC_WB_CRTL4 (MTK_PIN_NO(70) | 1) + +#define MT7623_PIN_71_WB_CRTL5_FUNC_GPIO71 (MTK_PIN_NO(71) | 0) +#define MT7623_PIN_71_WB_CRTL5_FUNC_WB_CRTL5 (MTK_PIN_NO(71) | 1) + +#define MT7623_PIN_72_I2S0_DATA_IN_FUNC_GPIO72 (MTK_PIN_NO(72) | 0) +#define MT7623_PIN_72_I2S0_DATA_IN_FUNC_I2S0_DATA_IN (MTK_PIN_NO(72) | 1) +#define MT7623_PIN_72_I2S0_DATA_IN_FUNC_PCM_RX (MTK_PIN_NO(72) | 3) +#define MT7623_PIN_72_I2S0_DATA_IN_FUNC_PWM0 (MTK_PIN_NO(72) | 4) +#define MT7623_PIN_72_I2S0_DATA_IN_FUNC_DISP_PWM (MTK_PIN_NO(72) | 5) +#define MT7623_PIN_72_I2S0_DATA_IN_FUNC_AP_I2S_DI (MTK_PIN_NO(72) | 6) + +#define MT7623_PIN_73_I2S0_LRCK_FUNC_GPIO73 (MTK_PIN_NO(73) | 0) +#define MT7623_PIN_73_I2S0_LRCK_FUNC_I2S0_LRCK (MTK_PIN_NO(73) | 1) +#define MT7623_PIN_73_I2S0_LRCK_FUNC_PCM_SYNC (MTK_PIN_NO(73) | 3) +#define MT7623_PIN_73_I2S0_LRCK_FUNC_AP_I2S_LRCK (MTK_PIN_NO(73) | 6) + +#define MT7623_PIN_74_I2S0_BCK_FUNC_GPIO74 (MTK_PIN_NO(74) | 0) +#define MT7623_PIN_74_I2S0_BCK_FUNC_I2S0_BCK (MTK_PIN_NO(74) | 1) +#define MT7623_PIN_74_I2S0_BCK_FUNC_PCM_CLK0 (MTK_PIN_NO(74) | 3) +#define MT7623_PIN_74_I2S0_BCK_FUNC_AP_I2S_BCK (MTK_PIN_NO(74) | 6) + +#define MT7623_PIN_75_SDA0_FUNC_GPIO75 (MTK_PIN_NO(75) | 0) +#define MT7623_PIN_75_SDA0_FUNC_SDA0 (MTK_PIN_NO(75) | 1) + +#define MT7623_PIN_76_SCL0_FUNC_GPIO76 (MTK_PIN_NO(76) | 0) +#define MT7623_PIN_76_SCL0_FUNC_SCL0 (MTK_PIN_NO(76) | 1) + +#define MT7623_PIN_83_LCM_RST_FUNC_GPIO83 (MTK_PIN_NO(83) | 0) +#define MT7623_PIN_83_LCM_RST_FUNC_LCM_RST (MTK_PIN_NO(83) | 1) + +#define MT7623_PIN_84_DSI_TE_FUNC_GPIO84 (MTK_PIN_NO(84) | 0) +#define MT7623_PIN_84_DSI_TE_FUNC_DSI_TE (MTK_PIN_NO(84) | 1) + +#define MT7623_PIN_95_MIPI_TCN_FUNC_GPIO95 (MTK_PIN_NO(95) | 0) +#define MT7623_PIN_95_MIPI_TCN_FUNC_TCN (MTK_PIN_NO(95) | 1) + +#define MT7623_PIN_96_MIPI_TCP_FUNC_GPIO96 (MTK_PIN_NO(96) | 0) +#define MT7623_PIN_96_MIPI_TCP_FUNC_TCP (MTK_PIN_NO(96) | 1) + +#define MT7623_PIN_97_MIPI_TDN1_FUNC_GPIO97 (MTK_PIN_NO(97) | 0) +#define MT7623_PIN_97_MIPI_TDN1_FUNC_TDN1 (MTK_PIN_NO(97) | 1) + +#define MT7623_PIN_98_MIPI_TDP1_FUNC_GPIO98 (MTK_PIN_NO(98) | 0) +#define MT7623_PIN_98_MIPI_TDP1_FUNC_TDP1 (MTK_PIN_NO(98) | 1) + +#define MT7623_PIN_99_MIPI_TDN0_FUNC_GPIO99 (MTK_PIN_NO(99) | 0) +#define MT7623_PIN_99_MIPI_TDN0_FUNC_TDN0 (MTK_PIN_NO(99) | 1) + +#define MT7623_PIN_100_MIPI_TDP0_FUNC_GPIO100 (MTK_PIN_NO(100) | 0) +#define MT7623_PIN_100_MIPI_TDP0_FUNC_TDP0 (MTK_PIN_NO(100) | 1) + +#define MT7623_PIN_105_MSDC1_CMD_FUNC_GPIO105 (MTK_PIN_NO(105) | 0) +#define MT7623_PIN_105_MSDC1_CMD_FUNC_MSDC1_CMD (MTK_PIN_NO(105) | 1) +#define MT7623_PIN_105_MSDC1_CMD_FUNC_SDA1 (MTK_PIN_NO(105) | 3) +#define MT7623_PIN_105_MSDC1_CMD_FUNC_I2SOUT_BCK (MTK_PIN_NO(105) | 6) + +#define MT7623_PIN_106_MSDC1_CLK_FUNC_GPIO106 (MTK_PIN_NO(106) | 0) +#define MT7623_PIN_106_MSDC1_CLK_FUNC_MSDC1_CLK (MTK_PIN_NO(106) | 1) +#define MT7623_PIN_106_MSDC1_CLK_FUNC_SCL1 (MTK_PIN_NO(106) | 3) +#define MT7623_PIN_106_MSDC1_CLK_FUNC_I2SOUT_LRCK (MTK_PIN_NO(106) | 6) + +#define MT7623_PIN_107_MSDC1_DAT0_FUNC_GPIO107 (MTK_PIN_NO(107) | 0) +#define MT7623_PIN_107_MSDC1_DAT0_FUNC_MSDC1_DAT0 (MTK_PIN_NO(107) | 1) +#define MT7623_PIN_107_MSDC1_DAT0_FUNC_UTXD0 (MTK_PIN_NO(107) | 5) +#define MT7623_PIN_107_MSDC1_DAT0_FUNC_I2SOUT_DATA_OUT (MTK_PIN_NO(107) | 6) + +#define MT7623_PIN_108_MSDC1_DAT1_FUNC_GPIO108 (MTK_PIN_NO(108) | 0) +#define MT7623_PIN_108_MSDC1_DAT1_FUNC_MSDC1_DAT1 (MTK_PIN_NO(108) | 1) +#define MT7623_PIN_108_MSDC1_DAT1_FUNC_PWM0 (MTK_PIN_NO(108) | 3) +#define MT7623_PIN_108_MSDC1_DAT1_FUNC_URXD0 (MTK_PIN_NO(108) | 5) +#define MT7623_PIN_108_MSDC1_DAT1_FUNC_PWM1 (MTK_PIN_NO(108) | 6) + +#define MT7623_PIN_109_MSDC1_DAT2_FUNC_GPIO109 (MTK_PIN_NO(109) | 0) +#define MT7623_PIN_109_MSDC1_DAT2_FUNC_MSDC1_DAT2 (MTK_PIN_NO(109) | 1) +#define MT7623_PIN_109_MSDC1_DAT2_FUNC_SDA2 (MTK_PIN_NO(109) | 3) +#define MT7623_PIN_109_MSDC1_DAT2_FUNC_UTXD1 (MTK_PIN_NO(109) | 5) +#define MT7623_PIN_109_MSDC1_DAT2_FUNC_PWM2 (MTK_PIN_NO(109) | 6) + +#define MT7623_PIN_110_MSDC1_DAT3_FUNC_GPIO110 (MTK_PIN_NO(110) | 0) +#define MT7623_PIN_110_MSDC1_DAT3_FUNC_MSDC1_DAT3 (MTK_PIN_NO(110) | 1) +#define MT7623_PIN_110_MSDC1_DAT3_FUNC_SCL2 (MTK_PIN_NO(110) | 3) +#define MT7623_PIN_110_MSDC1_DAT3_FUNC_URXD1 (MTK_PIN_NO(110) | 5) +#define MT7623_PIN_110_MSDC1_DAT3_FUNC_PWM3 (MTK_PIN_NO(110) | 6) + +#define MT7623_PIN_111_MSDC0_DAT7_FUNC_GPIO111 (MTK_PIN_NO(111) | 0) +#define MT7623_PIN_111_MSDC0_DAT7_FUNC_MSDC0_DAT7 (MTK_PIN_NO(111) | 1) +#define MT7623_PIN_111_MSDC0_DAT7_FUNC_NLD7 (MTK_PIN_NO(111) | 4) + +#define MT7623_PIN_112_MSDC0_DAT6_FUNC_GPIO112 (MTK_PIN_NO(112) | 0) +#define MT7623_PIN_112_MSDC0_DAT6_FUNC_MSDC0_DAT6 (MTK_PIN_NO(112) | 1) +#define MT7623_PIN_112_MSDC0_DAT6_FUNC_NLD6 (MTK_PIN_NO(112) | 4) + +#define MT7623_PIN_113_MSDC0_DAT5_FUNC_GPIO113 (MTK_PIN_NO(113) | 0) +#define MT7623_PIN_113_MSDC0_DAT5_FUNC_MSDC0_DAT5 (MTK_PIN_NO(113) | 1) +#define MT7623_PIN_113_MSDC0_DAT5_FUNC_NLD5 (MTK_PIN_NO(113) | 4) + +#define MT7623_PIN_114_MSDC0_DAT4_FUNC_GPIO114 (MTK_PIN_NO(114) | 0) +#define MT7623_PIN_114_MSDC0_DAT4_FUNC_MSDC0_DAT4 (MTK_PIN_NO(114) | 1) +#define MT7623_PIN_114_MSDC0_DAT4_FUNC_NLD4 (MTK_PIN_NO(114) | 4) + +#define MT7623_PIN_115_MSDC0_RSTB_FUNC_GPIO115 (MTK_PIN_NO(115) | 0) +#define MT7623_PIN_115_MSDC0_RSTB_FUNC_MSDC0_RSTB (MTK_PIN_NO(115) | 1) +#define MT7623_PIN_115_MSDC0_RSTB_FUNC_NLD8 (MTK_PIN_NO(115) | 4) + +#define MT7623_PIN_116_MSDC0_CMD_FUNC_GPIO116 (MTK_PIN_NO(116) | 0) +#define MT7623_PIN_116_MSDC0_CMD_FUNC_MSDC0_CMD (MTK_PIN_NO(116) | 1) +#define MT7623_PIN_116_MSDC0_CMD_FUNC_NALE (MTK_PIN_NO(116) | 4) + +#define MT7623_PIN_117_MSDC0_CLK_FUNC_GPIO117 (MTK_PIN_NO(117) | 0) +#define MT7623_PIN_117_MSDC0_CLK_FUNC_MSDC0_CLK (MTK_PIN_NO(117) | 1) +#define MT7623_PIN_117_MSDC0_CLK_FUNC_NWEB (MTK_PIN_NO(117) | 4) + +#define MT7623_PIN_118_MSDC0_DAT3_FUNC_GPIO118 (MTK_PIN_NO(118) | 0) +#define MT7623_PIN_118_MSDC0_DAT3_FUNC_MSDC0_DAT3 (MTK_PIN_NO(118) | 1) +#define MT7623_PIN_118_MSDC0_DAT3_FUNC_NLD3 (MTK_PIN_NO(118) | 4) + +#define MT7623_PIN_119_MSDC0_DAT2_FUNC_GPIO119 (MTK_PIN_NO(119) | 0) +#define MT7623_PIN_119_MSDC0_DAT2_FUNC_MSDC0_DAT2 (MTK_PIN_NO(119) | 1) +#define MT7623_PIN_119_MSDC0_DAT2_FUNC_NLD2 (MTK_PIN_NO(119) | 4) + +#define MT7623_PIN_120_MSDC0_DAT1_FUNC_GPIO120 (MTK_PIN_NO(120) | 0) +#define MT7623_PIN_120_MSDC0_DAT1_FUNC_MSDC0_DAT1 (MTK_PIN_NO(120) | 1) +#define MT7623_PIN_120_MSDC0_DAT1_FUNC_NLD1 (MTK_PIN_NO(120) | 4) + +#define MT7623_PIN_121_MSDC0_DAT0_FUNC_GPIO121 (MTK_PIN_NO(121) | 0) +#define MT7623_PIN_121_MSDC0_DAT0_FUNC_MSDC0_DAT0 (MTK_PIN_NO(121) | 1) +#define MT7623_PIN_121_MSDC0_DAT0_FUNC_NLD0 (MTK_PIN_NO(121) | 4) +#define MT7623_PIN_121_MSDC0_DAT0_FUNC_WATCHDOG (MTK_PIN_NO(121) | 5) + +#define MT7623_PIN_122_GPIO122_FUNC_GPIO122 (MTK_PIN_NO(122) | 0) +#define MT7623_PIN_122_GPIO122_FUNC_TEST (MTK_PIN_NO(122) | 1) +#define MT7623_PIN_122_GPIO122_FUNC_SDA2 (MTK_PIN_NO(122) | 4) +#define MT7623_PIN_122_GPIO122_FUNC_URXD0 (MTK_PIN_NO(122) | 5) + +#define MT7623_PIN_123_GPIO123_FUNC_GPIO123 (MTK_PIN_NO(123) | 0) +#define MT7623_PIN_123_GPIO123_FUNC_TEST (MTK_PIN_NO(123) | 1) +#define MT7623_PIN_123_GPIO123_FUNC_SCL2 (MTK_PIN_NO(123) | 4) +#define MT7623_PIN_123_GPIO123_FUNC_UTXD0 (MTK_PIN_NO(123) | 5) + +#define MT7623_PIN_124_GPIO124_FUNC_GPIO124 (MTK_PIN_NO(124) | 0) +#define MT7623_PIN_124_GPIO124_FUNC_TEST (MTK_PIN_NO(124) | 1) +#define MT7623_PIN_124_GPIO124_FUNC_SDA1 (MTK_PIN_NO(124) | 4) +#define MT7623_PIN_124_GPIO124_FUNC_PWM3 (MTK_PIN_NO(124) | 5) + +#define MT7623_PIN_125_GPIO125_FUNC_GPIO125 (MTK_PIN_NO(125) | 0) +#define MT7623_PIN_125_GPIO125_FUNC_TEST (MTK_PIN_NO(125) | 1) +#define MT7623_PIN_125_GPIO125_FUNC_SCL1 (MTK_PIN_NO(125) | 4) +#define MT7623_PIN_125_GPIO125_FUNC_PWM4 (MTK_PIN_NO(125) | 5) + +#define MT7623_PIN_126_I2S0_MCLK_FUNC_GPIO126 (MTK_PIN_NO(126) | 0) +#define MT7623_PIN_126_I2S0_MCLK_FUNC_I2S0_MCLK (MTK_PIN_NO(126) | 1) +#define MT7623_PIN_126_I2S0_MCLK_FUNC_AP_I2S_MCLK (MTK_PIN_NO(126) | 6) + +#define MT7623_PIN_199_SPI1_CK_FUNC_GPIO199 (MTK_PIN_NO(199) | 0) +#define MT7623_PIN_199_SPI1_CK_FUNC_SPI1_CK (MTK_PIN_NO(199) | 1) + +#define MT7623_PIN_200_URXD2_FUNC_GPIO200 (MTK_PIN_NO(200) | 0) +#define MT7623_PIN_200_URXD2_FUNC_URXD2 (MTK_PIN_NO(200) | 6) + +#define MT7623_PIN_201_UTXD2_FUNC_GPIO201 (MTK_PIN_NO(201) | 0) +#define MT7623_PIN_201_UTXD2_FUNC_UTXD2 (MTK_PIN_NO(201) | 6) + +#define MT7623_PIN_203_PWM0_FUNC_GPIO203 (MTK_PIN_NO(203) | 0) +#define MT7623_PIN_203_PWM0_FUNC_PWM0 (MTK_PIN_NO(203) | 1) +#define MT7623_PIN_203_PWM0_FUNC_DISP_PWM (MTK_PIN_NO(203) | 2) + +#define MT7623_PIN_204_PWM1_FUNC_GPIO204 (MTK_PIN_NO(204) | 0) +#define MT7623_PIN_204_PWM1_FUNC_PWM1 (MTK_PIN_NO(204) | 1) + +#define MT7623_PIN_205_PWM2_FUNC_GPIO205 (MTK_PIN_NO(205) | 0) +#define MT7623_PIN_205_PWM2_FUNC_PWM2 (MTK_PIN_NO(205) | 1) + +#define MT7623_PIN_206_PWM3_FUNC_GPIO206 (MTK_PIN_NO(206) | 0) +#define MT7623_PIN_206_PWM3_FUNC_PWM3 (MTK_PIN_NO(206) | 1) + +#define MT7623_PIN_207_PWM4_FUNC_GPIO207 (MTK_PIN_NO(207) | 0) +#define MT7623_PIN_207_PWM4_FUNC_PWM4 (MTK_PIN_NO(207) | 1) + +#define MT7623_PIN_208_AUD_EXT_CK1_FUNC_GPIO208 (MTK_PIN_NO(208) | 0) +#define MT7623_PIN_208_AUD_EXT_CK1_FUNC_AUD_EXT_CK1 (MTK_PIN_NO(208) | 1) +#define MT7623_PIN_208_AUD_EXT_CK1_FUNC_PWM0 (MTK_PIN_NO(208) | 2) +#define MT7623_PIN_208_AUD_EXT_CK1_FUNC_PCIE0_PERST_N (MTK_PIN_NO(208) | 3) +#define MT7623_PIN_208_AUD_EXT_CK1_FUNC_DISP_PWM (MTK_PIN_NO(208) | 5) + +#define MT7623_PIN_209_AUD_EXT_CK2_FUNC_GPIO209 (MTK_PIN_NO(209) | 0) +#define MT7623_PIN_209_AUD_EXT_CK2_FUNC_AUD_EXT_CK2 (MTK_PIN_NO(209) | 1) +#define MT7623_PIN_209_AUD_EXT_CK2_FUNC_MSDC1_WP (MTK_PIN_NO(209) | 2) +#define MT7623_PIN_209_AUD_EXT_CK2_FUNC_PCIE1_PERST_N (MTK_PIN_NO(209) | 3) +#define MT7623_PIN_209_AUD_EXT_CK2_FUNC_PWM1 (MTK_PIN_NO(209) | 5) + +#define MT7623_PIN_236_EXT_SDIO3_FUNC_GPIO236 (MTK_PIN_NO(236) | 0) +#define MT7623_PIN_236_EXT_SDIO3_FUNC_EXT_SDIO3 (MTK_PIN_NO(236) | 1) +#define MT7623_PIN_236_EXT_SDIO3_FUNC_IDDIG (MTK_PIN_NO(236) | 2) + +#define MT7623_PIN_237_EXT_SDIO2_FUNC_GPIO237 (MTK_PIN_NO(237) | 0) +#define MT7623_PIN_237_EXT_SDIO2_FUNC_EXT_SDIO2 (MTK_PIN_NO(237) | 1) +#define MT7623_PIN_237_EXT_SDIO2_FUNC_DRV_VBUS (MTK_PIN_NO(237) | 2) + +#define MT7623_PIN_238_EXT_SDIO1_FUNC_GPIO238 (MTK_PIN_NO(238) | 0) +#define MT7623_PIN_238_EXT_SDIO1_FUNC_EXT_SDIO1 (MTK_PIN_NO(238) | 1) + +#define MT7623_PIN_239_EXT_SDIO0_FUNC_GPIO239 (MTK_PIN_NO(239) | 0) +#define MT7623_PIN_239_EXT_SDIO0_FUNC_EXT_SDIO0 (MTK_PIN_NO(239) | 1) + +#define MT7623_PIN_240_EXT_XCS_FUNC_GPIO240 (MTK_PIN_NO(240) | 0) +#define MT7623_PIN_240_EXT_XCS_FUNC_EXT_XCS (MTK_PIN_NO(240) | 1) + +#define MT7623_PIN_241_EXT_SCK_FUNC_GPIO241 (MTK_PIN_NO(241) | 0) +#define MT7623_PIN_241_EXT_SCK_FUNC_EXT_SCK (MTK_PIN_NO(241) | 1) + +#define MT7623_PIN_242_URTS2_FUNC_GPIO242 (MTK_PIN_NO(242) | 0) +#define MT7623_PIN_242_URTS2_FUNC_URTS2 (MTK_PIN_NO(242) | 1) +#define MT7623_PIN_242_URTS2_FUNC_UTXD3 (MTK_PIN_NO(242) | 2) +#define MT7623_PIN_242_URTS2_FUNC_URXD3 (MTK_PIN_NO(242) | 3) +#define MT7623_PIN_242_URTS2_FUNC_SCL1 (MTK_PIN_NO(242) | 4) + +#define MT7623_PIN_243_UCTS2_FUNC_GPIO243 (MTK_PIN_NO(243) | 0) +#define MT7623_PIN_243_UCTS2_FUNC_UCTS2 (MTK_PIN_NO(243) | 1) +#define MT7623_PIN_243_UCTS2_FUNC_URXD3 (MTK_PIN_NO(243) | 2) +#define MT7623_PIN_243_UCTS2_FUNC_UTXD3 (MTK_PIN_NO(243) | 3) +#define MT7623_PIN_243_UCTS2_FUNC_SDA1 (MTK_PIN_NO(243) | 4) + +#define MT7623_PIN_250_GPIO250_FUNC_GPIO250 (MTK_PIN_NO(250) | 0) +#define MT7623_PIN_250_GPIO250_FUNC_TEST_MD7 (MTK_PIN_NO(250) | 1) +#define MT7623_PIN_250_GPIO250_FUNC_PCIE0_CLKREQ_N (MTK_PIN_NO(250) | 6) + +#define MT7623_PIN_251_GPIO251_FUNC_GPIO251 (MTK_PIN_NO(251) | 0) +#define MT7623_PIN_251_GPIO251_FUNC_TEST_MD6 (MTK_PIN_NO(251) | 1) +#define MT7623_PIN_251_GPIO251_FUNC_PCIE0_WAKE_N (MTK_PIN_NO(251) | 6) + +#define MT7623_PIN_252_GPIO252_FUNC_GPIO252 (MTK_PIN_NO(252) | 0) +#define MT7623_PIN_252_GPIO252_FUNC_TEST_MD5 (MTK_PIN_NO(252) | 1) +#define MT7623_PIN_252_GPIO252_FUNC_PCIE1_CLKREQ_N (MTK_PIN_NO(252) | 6) + +#define MT7623_PIN_253_GPIO253_FUNC_GPIO253 (MTK_PIN_NO(253) | 0) +#define MT7623_PIN_253_GPIO253_FUNC_TEST_MD4 (MTK_PIN_NO(253) | 1) +#define MT7623_PIN_253_GPIO253_FUNC_PCIE1_WAKE_N (MTK_PIN_NO(253) | 6) + +#define MT7623_PIN_254_GPIO254_FUNC_GPIO254 (MTK_PIN_NO(254) | 0) +#define MT7623_PIN_254_GPIO254_FUNC_TEST_MD3 (MTK_PIN_NO(254) | 1) +#define MT7623_PIN_254_GPIO254_FUNC_PCIE2_CLKREQ_N (MTK_PIN_NO(254) | 6) + +#define MT7623_PIN_255_GPIO255_FUNC_GPIO255 (MTK_PIN_NO(255) | 0) +#define MT7623_PIN_255_GPIO255_FUNC_TEST_MD2 (MTK_PIN_NO(255) | 1) +#define MT7623_PIN_255_GPIO255_FUNC_PCIE2_WAKE_N (MTK_PIN_NO(255) | 6) + +#define MT7623_PIN_256_GPIO256_FUNC_GPIO256 (MTK_PIN_NO(256) | 0) +#define MT7623_PIN_256_GPIO256_FUNC_TEST_MD1 (MTK_PIN_NO(256) | 1) + +#define MT7623_PIN_257_GPIO257_FUNC_GPIO257 (MTK_PIN_NO(257) | 0) +#define MT7623_PIN_257_GPIO257_FUNC_TEST_MD0 (MTK_PIN_NO(257) | 1) + +#define MT7623_PIN_261_MSDC1_INS_FUNC_GPIO261 (MTK_PIN_NO(261) | 0) +#define MT7623_PIN_261_MSDC1_INS_FUNC_MSDC1_INS (MTK_PIN_NO(261) | 1) + +#define MT7623_PIN_262_G2_TXEN_FUNC_GPIO262 (MTK_PIN_NO(262) | 0) +#define MT7623_PIN_262_G2_TXEN_FUNC_G2_TXEN (MTK_PIN_NO(262) | 1) + +#define MT7623_PIN_263_G2_TXD3_FUNC_GPIO263 (MTK_PIN_NO(263) | 0) +#define MT7623_PIN_263_G2_TXD3_FUNC_G2_TXD3 (MTK_PIN_NO(263) | 1) + +#define MT7623_PIN_264_G2_TXD2_FUNC_GPIO264 (MTK_PIN_NO(264) | 0) +#define MT7623_PIN_264_G2_TXD2_FUNC_G2_TXD2 (MTK_PIN_NO(264) | 1) + +#define MT7623_PIN_265_G2_TXD1_FUNC_GPIO265 (MTK_PIN_NO(265) | 0) +#define MT7623_PIN_265_G2_TXD1_FUNC_G2_TXD1 (MTK_PIN_NO(265) | 1) + +#define MT7623_PIN_266_G2_TXD0_FUNC_GPIO266 (MTK_PIN_NO(266) | 0) +#define MT7623_PIN_266_G2_TXD0_FUNC_G2_TXD0 (MTK_PIN_NO(266) | 1) + +#define MT7623_PIN_267_G2_TXCLK_FUNC_GPIO267 (MTK_PIN_NO(267) | 0) +#define MT7623_PIN_267_G2_TXCLK_FUNC_G2_TXC (MTK_PIN_NO(267) | 1) + +#define MT7623_PIN_268_G2_RXCLK_FUNC_GPIO268 (MTK_PIN_NO(268) | 0) +#define MT7623_PIN_268_G2_RXCLK_FUNC_G2_RXC (MTK_PIN_NO(268) | 1) + +#define MT7623_PIN_269_G2_RXD0_FUNC_GPIO269 (MTK_PIN_NO(269) | 0) +#define MT7623_PIN_269_G2_RXD0_FUNC_G2_RXD0 (MTK_PIN_NO(269) | 1) + +#define MT7623_PIN_270_G2_RXD1_FUNC_GPIO270 (MTK_PIN_NO(270) | 0) +#define MT7623_PIN_270_G2_RXD1_FUNC_G2_RXD1 (MTK_PIN_NO(270) | 1) + +#define MT7623_PIN_271_G2_RXD2_FUNC_GPIO271 (MTK_PIN_NO(271) | 0) +#define MT7623_PIN_271_G2_RXD2_FUNC_G2_RXD2 (MTK_PIN_NO(271) | 1) + +#define MT7623_PIN_272_G2_RXD3_FUNC_GPIO272 (MTK_PIN_NO(272) | 0) +#define MT7623_PIN_272_G2_RXD3_FUNC_G2_RXD3 (MTK_PIN_NO(272) | 1) + +#define MT7623_PIN_274_G2_RXDV_FUNC_GPIO274 (MTK_PIN_NO(274) | 0) +#define MT7623_PIN_274_G2_RXDV_FUNC_G2_RXDV (MTK_PIN_NO(274) | 1) + +#define MT7623_PIN_275_G2_MDC_FUNC_GPIO275 (MTK_PIN_NO(275) | 0) +#define MT7623_PIN_275_G2_MDC_FUNC_MDC (MTK_PIN_NO(275) | 1) + +#define MT7623_PIN_276_G2_MDIO_FUNC_GPIO276 (MTK_PIN_NO(276) | 0) +#define MT7623_PIN_276_G2_MDIO_FUNC_MDIO (MTK_PIN_NO(276) | 1) + +#define MT7623_PIN_278_JTAG_RESET_FUNC_GPIO278 (MTK_PIN_NO(278) | 0) +#define MT7623_PIN_278_JTAG_RESET_FUNC_JTAG_RESET (MTK_PIN_NO(278) | 1) + +#endif /* __DTS_MT7623_PINFUNC_H */ diff --git a/include/keys/trusted-type.h b/include/keys/trusted-type.h index 42cf2d991bf4..4ea7e55f20b0 100644 --- a/include/keys/trusted-type.h +++ b/include/keys/trusted-type.h @@ -38,7 +38,7 @@ struct trusted_key_options { unsigned char pcrinfo[MAX_PCRINFO_SIZE]; int pcrlock; uint32_t hash; - uint32_t digest_len; + uint32_t policydigest_len; unsigned char policydigest[MAX_DIGEST_SIZE]; uint32_t policyhandle; }; diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h index 1800227af9d6..b651aed9dc6b 100644 --- a/include/kvm/arm_arch_timer.h +++ b/include/kvm/arm_arch_timer.h @@ -55,6 +55,9 @@ struct arch_timer_cpu { /* VGIC mapping */ struct irq_phys_map *map; + + /* Active IRQ state caching */ + bool active_cleared_last; }; int kvm_timer_hyp_init(void); @@ -74,4 +77,6 @@ bool kvm_timer_should_fire(struct kvm_vcpu *vcpu); void kvm_timer_schedule(struct kvm_vcpu *vcpu); void kvm_timer_unschedule(struct kvm_vcpu *vcpu); +void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu); + #endif diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h new file mode 100644 index 000000000000..fe389ac31489 --- /dev/null +++ b/include/kvm/arm_pmu.h @@ -0,0 +1,110 @@ +/* + * Copyright (C) 2015 Linaro Ltd. + * Author: Shannon Zhao <shannon.zhao@linaro.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef __ASM_ARM_KVM_PMU_H +#define __ASM_ARM_KVM_PMU_H + +#ifdef CONFIG_KVM_ARM_PMU + +#include <linux/perf_event.h> +#include <asm/perf_event.h> + +#define ARMV8_PMU_CYCLE_IDX (ARMV8_PMU_MAX_COUNTERS - 1) + +struct kvm_pmc { + u8 idx; /* index into the pmu->pmc array */ + struct perf_event *perf_event; + u64 bitmask; +}; + +struct kvm_pmu { + int irq_num; + struct kvm_pmc pmc[ARMV8_PMU_MAX_COUNTERS]; + bool ready; + bool irq_level; +}; + +#define kvm_arm_pmu_v3_ready(v) ((v)->arch.pmu.ready) +#define kvm_arm_pmu_irq_initialized(v) ((v)->arch.pmu.irq_num >= VGIC_NR_SGIS) +u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx); +void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val); +u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu); +void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu); +void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu); +void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val); +void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val); +void kvm_pmu_overflow_set(struct kvm_vcpu *vcpu, u64 val); +void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu); +void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu); +void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val); +void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val); +void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data, + u64 select_idx); +bool kvm_arm_support_pmu_v3(void); +int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr); +int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr); +int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr); +#else +struct kvm_pmu { +}; + +#define kvm_arm_pmu_v3_ready(v) (false) +#define kvm_arm_pmu_irq_initialized(v) (false) +static inline u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, + u64 select_idx) +{ + return 0; +} +static inline void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, + u64 select_idx, u64 val) {} +static inline u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu) +{ + return 0; +} +static inline void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu) {} +static inline void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu) {} +static inline void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val) {} +static inline void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val) {} +static inline void kvm_pmu_overflow_set(struct kvm_vcpu *vcpu, u64 val) {} +static inline void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu) {} +static inline void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu) {} +static inline void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val) {} +static inline void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val) {} +static inline void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, + u64 data, u64 select_idx) {} +static inline bool kvm_arm_support_pmu_v3(void) { return false; } +static inline int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr) +{ + return -ENXIO; +} +static inline int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr) +{ + return -ENXIO; +} +static inline int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr) +{ + return -ENXIO; +} +#endif + +#endif diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h index 13a3d537811b..281caf847fad 100644 --- a/include/kvm/arm_vgic.h +++ b/include/kvm/arm_vgic.h @@ -279,12 +279,6 @@ struct vgic_v2_cpu_if { u32 vgic_lr[VGIC_V2_MAX_LRS]; }; -/* - * LRs are stored in reverse order in memory. make sure we index them - * correctly. - */ -#define VGIC_V3_LR_INDEX(lr) (VGIC_V3_MAX_LRS - 1 - lr) - struct vgic_v3_cpu_if { #ifdef CONFIG_KVM_ARM_VGIC_V3 u32 vgic_hcr; @@ -321,6 +315,8 @@ struct vgic_cpu { /* Protected by the distributor's irq_phys_map_lock */ struct list_head irq_phys_map_list; + + u64 live_lrs; }; #define LR_EMPTY 0xff diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h index 9006c4e75cf7..3d8dcdd1aeae 100644 --- a/include/linux/amba/bus.h +++ b/include/linux/amba/bus.h @@ -163,4 +163,13 @@ struct amba_device name##_device = { \ #define module_amba_driver(__amba_drv) \ module_driver(__amba_drv, amba_driver_register, amba_driver_unregister) +/* + * builtin_amba_driver() - Helper macro for drivers that don't do anything + * special in driver initcall. This eliminates a lot of boilerplate. Each + * driver may only use this macro once, and calling it replaces the instance + * device_initcall(). + */ +#define builtin_amba_driver(__amba_drv) \ + builtin_driver(__amba_drv, amba_driver_register) + #endif diff --git a/include/linux/ata.h b/include/linux/ata.h index d2992bfa1706..c1a2f345cbe6 100644 --- a/include/linux/ata.h +++ b/include/linux/ata.h @@ -487,8 +487,8 @@ enum ata_tf_protocols { }; enum ata_ioctls { - ATA_IOC_GET_IO32 = 0x309, - ATA_IOC_SET_IO32 = 0x324, + ATA_IOC_GET_IO32 = 0x309, /* HDIO_GET_32BIT */ + ATA_IOC_SET_IO32 = 0x324, /* HDIO_SET_32BIT */ }; /* core structures */ diff --git a/include/linux/atmel_serial.h b/include/linux/atmel_serial.h index ee696d7e8a43..5a4d664af87a 100644 --- a/include/linux/atmel_serial.h +++ b/include/linux/atmel_serial.h @@ -119,7 +119,8 @@ #define ATMEL_US_BRGR 0x20 /* Baud Rate Generator Register */ #define ATMEL_US_CD GENMASK(15, 0) /* Clock Divider */ -#define ATMEL_US_RTOR 0x24 /* Receiver Time-out Register */ +#define ATMEL_US_RTOR 0x24 /* Receiver Time-out Register for USART */ +#define ATMEL_UA_RTOR 0x28 /* Receiver Time-out Register for UART */ #define ATMEL_US_TO GENMASK(15, 0) /* Time-out Value */ #define ATMEL_US_TTGR 0x28 /* Transmitter Timeguard Register */ diff --git a/include/linux/atomic.h b/include/linux/atomic.h index 5f3ee5a60a81..df4f369254c0 100644 --- a/include/linux/atomic.h +++ b/include/linux/atomic.h @@ -558,6 +558,27 @@ static inline int atomic_dec_if_positive(atomic_t *v) } #endif +/** + * fetch_or - perform *ptr |= mask and return old value of *ptr + * @ptr: pointer to value + * @mask: mask to OR on the value + * + * cmpxchg based fetch_or, macro so it works for different integer types + */ +#ifndef fetch_or +#define fetch_or(ptr, mask) \ +({ typeof(*(ptr)) __old, __val = *(ptr); \ + for (;;) { \ + __old = cmpxchg((ptr), __val, __val | (mask)); \ + if (__old == __val) \ + break; \ + __val = __old; \ + } \ + __old; \ +}) +#endif + + #ifdef CONFIG_GENERIC_ATOMIC64 #include <asm-generic/atomic64.h> #endif diff --git a/include/linux/audit.h b/include/linux/audit.h index b40ed5df5542..e38e3fc13ea8 100644 --- a/include/linux/audit.h +++ b/include/linux/audit.h @@ -109,6 +109,10 @@ extern int audit_classify_compat_syscall(int abi, unsigned syscall); /* maximized args number that audit_socketcall can process */ #define AUDITSC_ARGS 6 +/* bit values for ->signal->audit_tty */ +#define AUDIT_TTY_ENABLE BIT(0) +#define AUDIT_TTY_LOG_PASSWD BIT(1) + struct filename; extern void audit_log_session_info(struct audit_buffer *ab); diff --git a/include/linux/auto_dev-ioctl.h b/include/linux/auto_dev-ioctl.h index 850f39b33e74..7caaf298f539 100644 --- a/include/linux/auto_dev-ioctl.h +++ b/include/linux/auto_dev-ioctl.h @@ -11,12 +11,7 @@ #define _LINUX_AUTO_DEV_IOCTL_H #include <linux/auto_fs.h> - -#ifdef __KERNEL__ #include <linux/string.h> -#else -#include <string.h> -#endif /* __KERNEL__ */ #define AUTOFS_DEVICE_NAME "autofs" @@ -125,7 +120,6 @@ static inline void init_autofs_dev_ioctl(struct autofs_dev_ioctl *in) in->ver_minor = AUTOFS_DEV_IOCTL_VERSION_MINOR; in->size = sizeof(struct autofs_dev_ioctl); in->ioctlfd = -1; - return; } /* diff --git a/include/linux/auto_fs.h b/include/linux/auto_fs.h index fcd704d354c4..b4066bb89083 100644 --- a/include/linux/auto_fs.h +++ b/include/linux/auto_fs.h @@ -1,14 +1,10 @@ -/* -*- linux-c -*- ------------------------------------------------------- * - * - * linux/include/linux/auto_fs.h - * - * Copyright 1997 Transmeta Corporation - All Rights Reserved +/* + * Copyright 1997 Transmeta Corporation - All Rights Reserved * * This file is part of the Linux kernel and is made available under * the terms of the GNU General Public License, version 2, or at your * option, any later version, incorporated herein by reference. - * - * ----------------------------------------------------------------------- */ + */ #ifndef _LINUX_AUTO_FS_H #define _LINUX_AUTO_FS_H diff --git a/include/linux/bcma/bcma.h b/include/linux/bcma/bcma.h index 3feb1b2d75d8..0367c63f5960 100644 --- a/include/linux/bcma/bcma.h +++ b/include/linux/bcma/bcma.h @@ -151,6 +151,8 @@ struct bcma_host_ops { #define BCMA_CORE_PCIE2 0x83C /* PCI Express Gen2 */ #define BCMA_CORE_USB30_DEV 0x83D #define BCMA_CORE_ARM_CR4 0x83E +#define BCMA_CORE_GCI 0x840 +#define BCMA_CORE_CMEM 0x846 /* CNDS DDR2/3 memory controller */ #define BCMA_CORE_ARM_CA7 0x847 #define BCMA_CORE_SYS_MEM 0x849 #define BCMA_CORE_DEFAULT 0xFFF @@ -199,6 +201,7 @@ struct bcma_host_ops { #define BCMA_PKG_ID_BCM4707 1 #define BCMA_PKG_ID_BCM4708 2 #define BCMA_PKG_ID_BCM4709 0 +#define BCMA_CHIP_ID_BCM47094 53030 #define BCMA_CHIP_ID_BCM53018 53018 /* Board types (on PCI usually equals to the subsystem dev id) */ diff --git a/include/linux/bcma/bcma_driver_chipcommon.h b/include/linux/bcma/bcma_driver_chipcommon.h index db51a6ffb7d6..846513c73606 100644 --- a/include/linux/bcma/bcma_driver_chipcommon.h +++ b/include/linux/bcma/bcma_driver_chipcommon.h @@ -217,6 +217,11 @@ #define BCMA_CC_CLKDIV_JTAG_SHIFT 8 #define BCMA_CC_CLKDIV_UART 0x000000FF #define BCMA_CC_CAP_EXT 0x00AC /* Capabilities */ +#define BCMA_CC_CAP_EXT_SECI_PRESENT 0x00000001 +#define BCMA_CC_CAP_EXT_GSIO_PRESENT 0x00000002 +#define BCMA_CC_CAP_EXT_GCI_PRESENT 0x00000004 +#define BCMA_CC_CAP_EXT_SECI_PUART_PRESENT 0x00000008 /* UART present */ +#define BCMA_CC_CAP_EXT_AOB_PRESENT 0x00000040 #define BCMA_CC_PLLONDELAY 0x00B0 /* Rev >= 4 only */ #define BCMA_CC_FREFSELDELAY 0x00B4 /* Rev >= 4 only */ #define BCMA_CC_SLOWCLKCTL 0x00B8 /* 6 <= Rev <= 9 only */ @@ -351,12 +356,12 @@ #define BCMA_CC_PMU_RES_REQTS 0x0640 /* PMU res req timer sel */ #define BCMA_CC_PMU_RES_REQT 0x0644 /* PMU res req timer */ #define BCMA_CC_PMU_RES_REQM 0x0648 /* PMU res req mask */ -#define BCMA_CC_CHIPCTL_ADDR 0x0650 -#define BCMA_CC_CHIPCTL_DATA 0x0654 -#define BCMA_CC_REGCTL_ADDR 0x0658 -#define BCMA_CC_REGCTL_DATA 0x065C -#define BCMA_CC_PLLCTL_ADDR 0x0660 -#define BCMA_CC_PLLCTL_DATA 0x0664 +#define BCMA_CC_PMU_CHIPCTL_ADDR 0x0650 +#define BCMA_CC_PMU_CHIPCTL_DATA 0x0654 +#define BCMA_CC_PMU_REGCTL_ADDR 0x0658 +#define BCMA_CC_PMU_REGCTL_DATA 0x065C +#define BCMA_CC_PMU_PLLCTL_ADDR 0x0660 +#define BCMA_CC_PMU_PLLCTL_DATA 0x0664 #define BCMA_CC_PMU_STRAPOPT 0x0668 /* (corerev >= 28) */ #define BCMA_CC_PMU_XTAL_FREQ 0x066C /* (pmurev >= 10) */ #define BCMA_CC_PMU_XTAL_FREQ_ILPCTL_MASK 0x00001FFF @@ -566,17 +571,16 @@ * Check availability with ((struct bcma_chipcommon)->capabilities & BCMA_CC_CAP_PMU) */ struct bcma_chipcommon_pmu { + struct bcma_device *core; /* Can be separated core or just ChipCommon one */ u8 rev; /* PMU revision */ u32 crystalfreq; /* The active crystal frequency (in kHz) */ }; -#ifdef CONFIG_BCMA_DRIVER_MIPS +#ifdef CONFIG_BCMA_PFLASH struct bcma_pflash { bool present; - u8 buswidth; - u32 window; - u32 window_size; }; +#endif #ifdef CONFIG_BCMA_SFLASH struct mtd_info; @@ -600,6 +604,7 @@ struct bcma_nflash { }; #endif +#ifdef CONFIG_BCMA_DRIVER_MIPS struct bcma_serial_port { void *regs; unsigned long clockspeed; @@ -619,8 +624,9 @@ struct bcma_drv_cc { /* Fast Powerup Delay constant */ u16 fast_pwrup_delay; struct bcma_chipcommon_pmu pmu; -#ifdef CONFIG_BCMA_DRIVER_MIPS +#ifdef CONFIG_BCMA_PFLASH struct bcma_pflash pflash; +#endif #ifdef CONFIG_BCMA_SFLASH struct bcma_sflash sflash; #endif @@ -628,6 +634,7 @@ struct bcma_drv_cc { struct bcma_nflash nflash; #endif +#ifdef CONFIG_BCMA_DRIVER_MIPS int nr_serial_ports; struct bcma_serial_port serial_ports[4]; #endif /* CONFIG_BCMA_DRIVER_MIPS */ @@ -660,6 +667,19 @@ struct bcma_drv_cc_b { #define bcma_cc_maskset32(cc, offset, mask, set) \ bcma_cc_write32(cc, offset, (bcma_cc_read32(cc, offset) & (mask)) | (set)) +/* PMU registers access */ +#define bcma_pmu_read32(cc, offset) \ + bcma_read32((cc)->pmu.core, offset) +#define bcma_pmu_write32(cc, offset, val) \ + bcma_write32((cc)->pmu.core, offset, val) + +#define bcma_pmu_mask32(cc, offset, mask) \ + bcma_pmu_write32(cc, offset, bcma_pmu_read32(cc, offset) & (mask)) +#define bcma_pmu_set32(cc, offset, set) \ + bcma_pmu_write32(cc, offset, bcma_pmu_read32(cc, offset) | (set)) +#define bcma_pmu_maskset32(cc, offset, mask, set) \ + bcma_pmu_write32(cc, offset, (bcma_pmu_read32(cc, offset) & (mask)) | (set)) + extern u32 bcma_chipco_watchdog_timer_set(struct bcma_drv_cc *cc, u32 ticks); extern u32 bcma_chipco_get_alp_clock(struct bcma_drv_cc *cc); diff --git a/include/linux/bio.h b/include/linux/bio.h index 5349e6816cbb..88bc64f00bb5 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -310,6 +310,38 @@ static inline void bio_clear_flag(struct bio *bio, unsigned int bit) bio->bi_flags &= ~(1U << bit); } +static inline void bio_get_first_bvec(struct bio *bio, struct bio_vec *bv) +{ + *bv = bio_iovec(bio); +} + +static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv) +{ + struct bvec_iter iter = bio->bi_iter; + int idx; + + if (unlikely(!bio_multiple_segments(bio))) { + *bv = bio_iovec(bio); + return; + } + + bio_advance_iter(bio, &iter, iter.bi_size); + + if (!iter.bi_bvec_done) + idx = iter.bi_idx - 1; + else /* in the middle of bvec */ + idx = iter.bi_idx; + + *bv = bio->bi_io_vec[idx]; + + /* + * iter.bi_bvec_done records actual length of the last bvec + * if this bio ends in the middle of one io vector + */ + if (iter.bi_bvec_done) + bv->bv_len = iter.bi_bvec_done; +} + enum bip_flags { BIP_BLOCK_INTEGRITY = 1 << 0, /* block layer owns integrity data */ BIP_MAPPED_INTEGRITY = 1 << 1, /* ref tag has been remapped */ diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h index 9653fdb76a42..e9b0b9ab07e5 100644 --- a/include/linux/bitmap.h +++ b/include/linux/bitmap.h @@ -59,6 +59,8 @@ * bitmap_find_free_region(bitmap, bits, order) Find and allocate bit region * bitmap_release_region(bitmap, pos, order) Free specified bit region * bitmap_allocate_region(bitmap, pos, order) Allocate specified bit region + * bitmap_from_u32array(dst, nbits, buf, nwords) *dst = *buf (nwords 32b words) + * bitmap_to_u32array(buf, nwords, src, nbits) *buf = *dst (nwords 32b words) */ /* @@ -163,6 +165,14 @@ extern void bitmap_fold(unsigned long *dst, const unsigned long *orig, extern int bitmap_find_free_region(unsigned long *bitmap, unsigned int bits, int order); extern void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int order); extern int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order); +extern unsigned int bitmap_from_u32array(unsigned long *bitmap, + unsigned int nbits, + const u32 *buf, + unsigned int nwords); +extern unsigned int bitmap_to_u32array(u32 *buf, + unsigned int nwords, + const unsigned long *bitmap, + unsigned int nbits); #ifdef __BIG_ENDIAN extern void bitmap_copy_le(unsigned long *dst, const unsigned long *src, unsigned int nbits); #else diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 7fc9296b5742..15a73d49fd1d 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -244,6 +244,8 @@ void blk_mq_freeze_queue(struct request_queue *q); void blk_mq_unfreeze_queue(struct request_queue *q); void blk_mq_freeze_queue_start(struct request_queue *q); +void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues); + /* * Driver command data is immediately after the request. So subtract request * size to get back to the original request, add request size to get the PDU. diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 29189aeace19..8a11b69dfc08 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -682,9 +682,12 @@ static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b) /* * q->prep_rq_fn return values */ -#define BLKPREP_OK 0 /* serve it */ -#define BLKPREP_KILL 1 /* fatal error, kill */ -#define BLKPREP_DEFER 2 /* leave on queue */ +enum { + BLKPREP_OK, /* serve it */ + BLKPREP_KILL, /* fatal error, kill, return -EIO */ + BLKPREP_DEFER, /* leave on queue */ + BLKPREP_INVALID, /* invalid command, kill, return -EREMOTEIO */ +}; extern unsigned long blk_max_low_pfn, blk_max_pfn; @@ -892,7 +895,7 @@ static inline unsigned int blk_rq_get_max_sectors(struct request *rq) { struct request_queue *q = rq->q; - if (unlikely(rq->cmd_type == REQ_TYPE_BLOCK_PC)) + if (unlikely(rq->cmd_type != REQ_TYPE_FS)) return q->limits.max_hw_sectors; if (!q->limits.chunk_sectors || (rq->cmd_flags & REQ_DISCARD)) @@ -1026,6 +1029,7 @@ extern int blk_pre_runtime_suspend(struct request_queue *q); extern void blk_post_runtime_suspend(struct request_queue *q, int err); extern void blk_pre_runtime_resume(struct request_queue *q); extern void blk_post_runtime_resume(struct request_queue *q, int err); +extern void blk_set_runtime_active(struct request_queue *q); #else static inline void blk_pm_runtime_init(struct request_queue *q, struct device *dev) {} @@ -1036,6 +1040,7 @@ static inline int blk_pre_runtime_suspend(struct request_queue *q) static inline void blk_post_runtime_suspend(struct request_queue *q, int err) {} static inline void blk_pre_runtime_resume(struct request_queue *q) {} static inline void blk_post_runtime_resume(struct request_queue *q, int err) {} +extern inline void blk_set_runtime_active(struct request_queue *q) {} #endif /* @@ -1369,6 +1374,13 @@ static inline void put_dev_sector(Sector p) page_cache_release(p.v); } +static inline bool __bvec_gap_to_prev(struct request_queue *q, + struct bio_vec *bprv, unsigned int offset) +{ + return offset || + ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q)); +} + /* * Check if adding a bio_vec after bprv with offset would create a gap in * the SG list. Most drivers don't care about this, but some do. @@ -1378,18 +1390,22 @@ static inline bool bvec_gap_to_prev(struct request_queue *q, { if (!queue_virt_boundary(q)) return false; - return offset || - ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q)); + return __bvec_gap_to_prev(q, bprv, offset); } static inline bool bio_will_gap(struct request_queue *q, struct bio *prev, struct bio *next) { - if (!bio_has_data(prev)) - return false; + if (bio_has_data(prev) && queue_virt_boundary(q)) { + struct bio_vec pb, nb; + + bio_get_last_bvec(prev, &pb); + bio_get_first_bvec(next, &nb); - return bvec_gap_to_prev(q, &prev->bi_io_vec[prev->bi_vcnt - 1], - next->bi_io_vec[0].bv_offset); + return __bvec_gap_to_prev(q, &pb, nb.bv_offset); + } + + return false; } static inline bool req_gap_back_merge(struct request *req, struct bio *bio) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 83d1926c61e4..21ee41b92e8a 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -10,6 +10,7 @@ #include <uapi/linux/bpf.h> #include <linux/workqueue.h> #include <linux/file.h> +#include <linux/percpu.h> struct bpf_map; @@ -36,6 +37,7 @@ struct bpf_map { u32 key_size; u32 value_size; u32 max_entries; + u32 map_flags; u32 pages; struct user_struct *user; const struct bpf_map_ops *ops; @@ -65,6 +67,7 @@ enum bpf_arg_type { */ ARG_PTR_TO_STACK, /* any pointer to eBPF program stack */ ARG_CONST_STACK_SIZE, /* number of bytes accessed from stack */ + ARG_CONST_STACK_SIZE_OR_ZERO, /* number of bytes accessed from stack or 0 */ ARG_PTR_TO_CTX, /* pointer to context */ ARG_ANYTHING, /* any (initialized) argument is ok */ @@ -151,6 +154,7 @@ struct bpf_array { union { char value[0] __aligned(8); void *ptrs[0] __aligned(8); + void __percpu *pptrs[0] __aligned(8); }; }; #define MAX_TAIL_CALL_CNT 32 @@ -161,6 +165,8 @@ bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *f const struct bpf_func_proto *bpf_get_trace_printk_proto(void); #ifdef CONFIG_BPF_SYSCALL +DECLARE_PER_CPU(int, bpf_prog_active); + void bpf_register_prog_type(struct bpf_prog_type_list *tl); void bpf_register_map_type(struct bpf_map_type_list *tl); @@ -173,6 +179,7 @@ struct bpf_map *__bpf_map_get(struct fd f); void bpf_map_inc(struct bpf_map *map, bool uref); void bpf_map_put_with_uref(struct bpf_map *map); void bpf_map_put(struct bpf_map *map); +int bpf_map_precharge_memlock(u32 pages); extern int sysctl_unprivileged_bpf_disabled; @@ -182,6 +189,30 @@ int bpf_prog_new_fd(struct bpf_prog *prog); int bpf_obj_pin_user(u32 ufd, const char __user *pathname); int bpf_obj_get_user(const char __user *pathname); +int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value); +int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value); +int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value, + u64 flags); +int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value, + u64 flags); +int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value); + +/* memcpy that is used with 8-byte aligned pointers, power-of-8 size and + * forced to use 'long' read/writes to try to atomically copy long counters. + * Best-effort only. No barriers here, since it _will_ race with concurrent + * updates from BPF programs. Called from bpf syscall and mostly used with + * size 8 or 16 bytes, so ask compiler to inline it. + */ +static inline void bpf_long_memcpy(void *dst, const void *src, u32 size) +{ + const long *lsrc = src; + long *ldst = dst; + + size /= sizeof(long); + while (size--) + *ldst++ = *lsrc++; +} + /* verify correctness of eBPF program */ int bpf_check(struct bpf_prog **fp, union bpf_attr *attr); #else @@ -213,6 +244,7 @@ extern const struct bpf_func_proto bpf_get_current_uid_gid_proto; extern const struct bpf_func_proto bpf_get_current_comm_proto; extern const struct bpf_func_proto bpf_skb_vlan_push_proto; extern const struct bpf_func_proto bpf_skb_vlan_pop_proto; +extern const struct bpf_func_proto bpf_get_stackid_proto; /* Shared helpers among cBPF and eBPF. */ void bpf_user_rnd_init_once(void); diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index 89d9aa9e79bf..c67f052cc5e5 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h @@ -82,15 +82,15 @@ struct buffer_head { * and buffer_foo() functions. */ #define BUFFER_FNS(bit, name) \ -static inline void set_buffer_##name(struct buffer_head *bh) \ +static __always_inline void set_buffer_##name(struct buffer_head *bh) \ { \ set_bit(BH_##bit, &(bh)->b_state); \ } \ -static inline void clear_buffer_##name(struct buffer_head *bh) \ +static __always_inline void clear_buffer_##name(struct buffer_head *bh) \ { \ clear_bit(BH_##bit, &(bh)->b_state); \ } \ -static inline int buffer_##name(const struct buffer_head *bh) \ +static __always_inline int buffer_##name(const struct buffer_head *bh) \ { \ return test_bit(BH_##bit, &(bh)->b_state); \ } @@ -99,11 +99,11 @@ static inline int buffer_##name(const struct buffer_head *bh) \ * test_set_buffer_foo() and test_clear_buffer_foo() */ #define TAS_BUFFER_FNS(bit, name) \ -static inline int test_set_buffer_##name(struct buffer_head *bh) \ +static __always_inline int test_set_buffer_##name(struct buffer_head *bh) \ { \ return test_and_set_bit(BH_##bit, &(bh)->b_state); \ } \ -static inline int test_clear_buffer_##name(struct buffer_head *bh) \ +static __always_inline int test_clear_buffer_##name(struct buffer_head *bh) \ { \ return test_and_clear_bit(BH_##bit, &(bh)->b_state); \ } \ diff --git a/include/linux/cache.h b/include/linux/cache.h index 17e7e82d2aa7..1be04f8c563a 100644 --- a/include/linux/cache.h +++ b/include/linux/cache.h @@ -12,10 +12,24 @@ #define SMP_CACHE_BYTES L1_CACHE_BYTES #endif +/* + * __read_mostly is used to keep rarely changing variables out of frequently + * updated cachelines. If an architecture doesn't support it, ignore the + * hint. + */ #ifndef __read_mostly #define __read_mostly #endif +/* + * __ro_after_init is used to mark things that are read-only after init (i.e. + * after mark_rodata_ro() has been called). These are effectively read-only, + * but may get written to during init, so can't live in .rodata (via "const"). + */ +#ifndef __ro_after_init +#define __ro_after_init __attribute__((__section__(".data..ro_after_init"))) +#endif + #ifndef ____cacheline_aligned #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES))) #endif diff --git a/include/linux/ccp.h b/include/linux/ccp.h index 7f437036baa4..915af3095b39 100644 --- a/include/linux/ccp.h +++ b/include/linux/ccp.h @@ -33,6 +33,18 @@ struct ccp_cmd; */ int ccp_present(void); +#define CCP_VSIZE 16 +#define CCP_VMASK ((unsigned int)((1 << CCP_VSIZE) - 1)) +#define CCP_VERSION(v, r) ((unsigned int)((v << CCP_VSIZE) \ + | (r & CCP_VMASK))) + +/** + * ccp_version - get the version of the CCP + * + * Returns a positive version number, or zero if no CCP + */ +unsigned int ccp_version(void); + /** * ccp_enqueue_cmd - queue an operation for processing by the CCP * @@ -65,6 +77,11 @@ static inline int ccp_present(void) return -ENODEV; } +static inline unsigned int ccp_version(void) +{ + return 0; +} + static inline int ccp_enqueue_cmd(struct ccp_cmd *cmd) { return -ENODEV; diff --git a/include/linux/ceph/ceph_features.h b/include/linux/ceph/ceph_features.h index c1ef6f14e7be..15151f3c4120 100644 --- a/include/linux/ceph/ceph_features.h +++ b/include/linux/ceph/ceph_features.h @@ -75,6 +75,7 @@ #define CEPH_FEATURE_CRUSH_TUNABLES5 (1ULL<<58) /* chooseleaf stable mode */ // duplicated since it was introduced at the same time as CEPH_FEATURE_CRUSH_TUNABLES5 #define CEPH_FEATURE_NEW_OSDOPREPLY_ENCODING (1ULL<<58) /* New, v7 encoding */ +#define CEPH_FEATURE_FS_FILE_LAYOUT_V2 (1ULL<<58) /* file_layout_t */ /* * The introduction of CEPH_FEATURE_OSD_SNAPMAPPER caused the feature diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index 7f540f7f588d..3e39ae5bc799 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h @@ -45,6 +45,7 @@ enum { CSS_NO_REF = (1 << 0), /* no reference counting for this css */ CSS_ONLINE = (1 << 1), /* between ->css_online() and ->css_offline() */ CSS_RELEASED = (1 << 2), /* refcnt reached zero, released */ + CSS_VISIBLE = (1 << 3), /* css is visible to userland */ }; /* bits in struct cgroup flags field */ @@ -127,6 +128,12 @@ struct cgroup_subsys_state { */ u64 serial_nr; + /* + * Incremented by online self and children. Used to guarantee that + * parents are not offlined before their children. + */ + atomic_t online_cnt; + /* percpu_ref killing and RCU release */ struct rcu_head rcu_head; struct work_struct destroy_work; @@ -184,12 +191,13 @@ struct css_set { /* * If this cset is acting as the source of migration the following - * two fields are set. mg_src_cgrp is the source cgroup of the - * on-going migration and mg_dst_cset is the destination cset the - * target tasks on this cset should be migrated to. Protected by - * cgroup_mutex. + * two fields are set. mg_src_cgrp and mg_dst_cgrp are + * respectively the source and destination cgroups of the on-going + * migration. mg_dst_cset is the destination cset the target tasks + * on this cset should be migrated to. Protected by cgroup_mutex. */ struct cgroup *mg_src_cgrp; + struct cgroup *mg_dst_cgrp; struct css_set *mg_dst_cset; /* @@ -204,6 +212,9 @@ struct css_set { /* all css_task_iters currently walking this cset */ struct list_head task_iters; + /* dead and being drained, ignore for migration */ + bool dead; + /* For RCU-protected deletion */ struct rcu_head rcu_head; }; @@ -247,13 +258,14 @@ struct cgroup { /* * The bitmask of subsystems enabled on the child cgroups. * ->subtree_control is the one configured through - * "cgroup.subtree_control" while ->child_subsys_mask is the - * effective one which may have more subsystems enabled. - * Controller knobs are made available iff it's enabled in - * ->subtree_control. + * "cgroup.subtree_control" while ->child_ss_mask is the effective + * one which may have more subsystems enabled. Controller knobs + * are made available iff it's enabled in ->subtree_control. */ - unsigned int subtree_control; - unsigned int child_subsys_mask; + u16 subtree_control; + u16 subtree_ss_mask; + u16 old_subtree_control; + u16 old_subtree_ss_mask; /* Private pointers for each registered subsystem */ struct cgroup_subsys_state __rcu *subsys[CGROUP_SUBSYS_COUNT]; @@ -428,7 +440,6 @@ struct cgroup_subsys { void (*css_released)(struct cgroup_subsys_state *css); void (*css_free)(struct cgroup_subsys_state *css); void (*css_reset)(struct cgroup_subsys_state *css); - void (*css_e_css_changed)(struct cgroup_subsys_state *css); int (*can_attach)(struct cgroup_taskset *tset); void (*cancel_attach)(struct cgroup_taskset *tset); @@ -440,7 +451,20 @@ struct cgroup_subsys { void (*free)(struct task_struct *task); void (*bind)(struct cgroup_subsys_state *root_css); - int early_init; + bool early_init:1; + + /* + * If %true, the controller, on the default hierarchy, doesn't show + * up in "cgroup.controllers" or "cgroup.subtree_control", is + * implicitly enabled on all cgroups on the default hierarchy, and + * bypasses the "no internal process" constraint. This is for + * utility type controllers which is transparent to userland. + * + * An implicit controller can be stolen from the default hierarchy + * anytime and thus must be okay with offline csses from previous + * hierarchies coexisting with csses for the current one. + */ + bool implicit_on_dfl:1; /* * If %false, this subsystem is properly hierarchical - @@ -454,8 +478,8 @@ struct cgroup_subsys { * cases. Eventually, all subsystems will be made properly * hierarchical and this will go away. */ - bool broken_hierarchy; - bool warned_broken_hierarchy; + bool broken_hierarchy:1; + bool warned_broken_hierarchy:1; /* the following two fields are initialized automtically during boot */ int id; diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h index bdcf358dfce2..0d442e34c349 100644 --- a/include/linux/clockchips.h +++ b/include/linux/clockchips.h @@ -190,9 +190,9 @@ extern void clockevents_config_and_register(struct clock_event_device *dev, extern int clockevents_update_freq(struct clock_event_device *ce, u32 freq); static inline void -clockevents_calc_mult_shift(struct clock_event_device *ce, u32 freq, u32 minsec) +clockevents_calc_mult_shift(struct clock_event_device *ce, u32 freq, u32 maxsec) { - return clocks_calc_mult_shift(&ce->mult, &ce->shift, NSEC_PER_SEC, freq, minsec); + return clocks_calc_mult_shift(&ce->mult, &ce->shift, NSEC_PER_SEC, freq, maxsec); } extern void clockevents_suspend(void); diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h index 6013021a3b39..a307bf62974f 100644 --- a/include/linux/clocksource.h +++ b/include/linux/clocksource.h @@ -118,6 +118,23 @@ struct clocksource { /* simplify initialization of mask field */ #define CLOCKSOURCE_MASK(bits) (cycle_t)((bits) < 64 ? ((1ULL<<(bits))-1) : -1) +static inline u32 clocksource_freq2mult(u32 freq, u32 shift_constant, u64 from) +{ + /* freq = cyc/from + * mult/2^shift = ns/cyc + * mult = ns/cyc * 2^shift + * mult = from/freq * 2^shift + * mult = from * 2^shift / freq + * mult = (from<<shift) / freq + */ + u64 tmp = ((u64)from) << shift_constant; + + tmp += freq/2; /* round for do_div */ + do_div(tmp, freq); + + return (u32)tmp; +} + /** * clocksource_khz2mult - calculates mult from khz and shift * @khz: Clocksource frequency in KHz @@ -128,19 +145,7 @@ struct clocksource { */ static inline u32 clocksource_khz2mult(u32 khz, u32 shift_constant) { - /* khz = cyc/(Million ns) - * mult/2^shift = ns/cyc - * mult = ns/cyc * 2^shift - * mult = 1Million/khz * 2^shift - * mult = 1000000 * 2^shift / khz - * mult = (1000000<<shift) / khz - */ - u64 tmp = ((u64)1000000) << shift_constant; - - tmp += khz/2; /* round for do_div */ - do_div(tmp, khz); - - return (u32)tmp; + return clocksource_freq2mult(khz, shift_constant, NSEC_PER_MSEC); } /** @@ -154,19 +159,7 @@ static inline u32 clocksource_khz2mult(u32 khz, u32 shift_constant) */ static inline u32 clocksource_hz2mult(u32 hz, u32 shift_constant) { - /* hz = cyc/(Billion ns) - * mult/2^shift = ns/cyc - * mult = ns/cyc * 2^shift - * mult = 1Billion/hz * 2^shift - * mult = 1000000000 * 2^shift / hz - * mult = (1000000000<<shift) / hz - */ - u64 tmp = ((u64)1000000000) << shift_constant; - - tmp += hz/2; /* round for do_div */ - do_div(tmp, hz); - - return (u32)tmp; + return clocksource_freq2mult(hz, shift_constant, NSEC_PER_SEC); } /** diff --git a/include/linux/compaction.h b/include/linux/compaction.h index 4cd4ddf64cc7..d7c8de583a23 100644 --- a/include/linux/compaction.h +++ b/include/linux/compaction.h @@ -52,6 +52,10 @@ extern void compaction_defer_reset(struct zone *zone, int order, bool alloc_success); extern bool compaction_restarting(struct zone *zone, int order); +extern int kcompactd_run(int nid); +extern void kcompactd_stop(int nid); +extern void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx); + #else static inline unsigned long try_to_compact_pages(gfp_t gfp_mask, unsigned int order, int alloc_flags, @@ -84,6 +88,18 @@ static inline bool compaction_deferred(struct zone *zone, int order) return true; } +static inline int kcompactd_run(int nid) +{ + return 0; +} +static inline void kcompactd_stop(int nid) +{ +} + +static inline void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx) +{ +} + #endif /* CONFIG_COMPACTION */ #if defined(CONFIG_COMPACTION) && defined(CONFIG_SYSFS) && defined(CONFIG_NUMA) diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 00b042c49ccd..b5ff9881bef8 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -20,12 +20,14 @@ # define __pmem __attribute__((noderef, address_space(5))) #ifdef CONFIG_SPARSE_RCU_POINTER # define __rcu __attribute__((noderef, address_space(4))) -#else +#else /* CONFIG_SPARSE_RCU_POINTER */ # define __rcu -#endif +#endif /* CONFIG_SPARSE_RCU_POINTER */ +# define __private __attribute__((noderef)) extern void __chk_user_ptr(const volatile void __user *); extern void __chk_io_ptr(const volatile void __iomem *); -#else +# define ACCESS_PRIVATE(p, member) (*((typeof((p)->member) __force *) &(p)->member)) +#else /* __CHECKER__ */ # define __user # define __kernel # define __safe @@ -44,7 +46,9 @@ extern void __chk_io_ptr(const volatile void __iomem *); # define __percpu # define __rcu # define __pmem -#endif +# define __private +# define ACCESS_PRIVATE(p, member) ((p)->member) +#endif /* __CHECKER__ */ /* Indirect macros required for expanded argument pasting, eg. __LINE__. */ #define ___PASTE(a,b) a##b @@ -144,7 +148,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect); */ #define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) ) #define __trace_if(cond) \ - if (__builtin_constant_p((cond)) ? !!(cond) : \ + if (__builtin_constant_p(!!(cond)) ? !!(cond) : \ ({ \ int ______r; \ static struct ftrace_branch_data \ @@ -263,8 +267,9 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s * In contrast to ACCESS_ONCE these two macros will also work on aggregate * data types like structs or unions. If the size of the accessed data * type exceeds the word size of the machine (e.g., 32 bits or 64 bits) - * READ_ONCE() and WRITE_ONCE() will fall back to memcpy and print a - * compile-time warning. + * READ_ONCE() and WRITE_ONCE() will fall back to memcpy(). There's at + * least two memcpy()s: one for the __builtin_memcpy() and then one for + * the macro doing the copy of variable - '__u' allocated on the stack. * * Their two major use cases are: (1) Mediating communication between * process-level code and irq/NMI handlers, all running on the same CPU, diff --git a/include/linux/configfs.h b/include/linux/configfs.h index f8165c129ccb..485fe5519448 100644 --- a/include/linux/configfs.h +++ b/include/linux/configfs.h @@ -96,7 +96,8 @@ struct config_group { struct config_item cg_item; struct list_head cg_children; struct configfs_subsystem *cg_subsys; - struct config_group **default_groups; + struct list_head default_groups; + struct list_head group_entry; }; extern void config_group_init(struct config_group *group); @@ -123,6 +124,12 @@ extern struct config_item *config_group_find_item(struct config_group *, const char *); +static inline void configfs_add_default_group(struct config_group *new_group, + struct config_group *group) +{ + list_add_tail(&new_group->group_entry, &group->default_groups); +} + struct configfs_attribute { const char *ca_name; struct module *ca_owner; @@ -251,6 +258,8 @@ int configfs_register_group(struct config_group *parent_group, struct config_group *group); void configfs_unregister_group(struct config_group *group); +void configfs_remove_default_groups(struct config_group *group); + struct config_group * configfs_register_default_group(struct config_group *parent_group, const char *name, diff --git a/include/linux/coresight-pmu.h b/include/linux/coresight-pmu.h new file mode 100644 index 000000000000..7d410260661b --- /dev/null +++ b/include/linux/coresight-pmu.h @@ -0,0 +1,39 @@ +/* + * Copyright(C) 2015 Linaro Limited. All rights reserved. + * Author: Mathieu Poirier <mathieu.poirier@linaro.org> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef _LINUX_CORESIGHT_PMU_H +#define _LINUX_CORESIGHT_PMU_H + +#define CORESIGHT_ETM_PMU_NAME "cs_etm" +#define CORESIGHT_ETM_PMU_SEED 0x10 + +/* ETMv3.5/PTM's ETMCR config bit */ +#define ETM_OPT_CYCACC 12 +#define ETM_OPT_TS 28 + +static inline int coresight_get_trace_id(int cpu) +{ + /* + * A trace ID of value 0 is invalid, so let's start at some + * random value that fits in 7 bits and go from there. Since + * the common convention is to have data trace IDs be I(N) + 1, + * set instruction trace IDs as a function of the CPU number. + */ + return (CORESIGHT_ETM_PMU_SEED + (cpu * 2)); +} + +#endif diff --git a/include/linux/coresight.h b/include/linux/coresight.h index a7cabfa23b55..385d62e64abb 100644 --- a/include/linux/coresight.h +++ b/include/linux/coresight.h @@ -14,6 +14,7 @@ #define _LINUX_CORESIGHT_H #include <linux/device.h> +#include <linux/perf_event.h> #include <linux/sched.h> /* Peripheral id registers (0xFD0-0xFEC) */ @@ -152,7 +153,6 @@ struct coresight_connection { by @coresight_ops. * @dev: The device entity associated to this component. * @refcnt: keep track of what is in use. - * @path_link: link of current component into the path being enabled. * @orphan: true if the component has connections that haven't been linked. * @enable: 'true' if component is currently part of an active path. * @activated: 'true' only if a _sink_ has been activated. A sink can be @@ -168,7 +168,6 @@ struct coresight_device { const struct coresight_ops *ops; struct device dev; atomic_t *refcnt; - struct list_head path_link; bool orphan; bool enable; /* true only if configured as part of a path */ bool activated; /* true only if a sink is part of a path */ @@ -183,12 +182,29 @@ struct coresight_device { /** * struct coresight_ops_sink - basic operations for a sink * Operations available for sinks - * @enable: enables the sink. - * @disable: disables the sink. + * @enable: enables the sink. + * @disable: disables the sink. + * @alloc_buffer: initialises perf's ring buffer for trace collection. + * @free_buffer: release memory allocated in @get_config. + * @set_buffer: initialises buffer mechanic before a trace session. + * @reset_buffer: finalises buffer mechanic after a trace session. + * @update_buffer: update buffer pointers after a trace session. */ struct coresight_ops_sink { - int (*enable)(struct coresight_device *csdev); + int (*enable)(struct coresight_device *csdev, u32 mode); void (*disable)(struct coresight_device *csdev); + void *(*alloc_buffer)(struct coresight_device *csdev, int cpu, + void **pages, int nr_pages, bool overwrite); + void (*free_buffer)(void *config); + int (*set_buffer)(struct coresight_device *csdev, + struct perf_output_handle *handle, + void *sink_config); + unsigned long (*reset_buffer)(struct coresight_device *csdev, + struct perf_output_handle *handle, + void *sink_config, bool *lost); + void (*update_buffer)(struct coresight_device *csdev, + struct perf_output_handle *handle, + void *sink_config); }; /** @@ -205,14 +221,18 @@ struct coresight_ops_link { /** * struct coresight_ops_source - basic operations for a source * Operations available for sources. + * @cpu_id: returns the value of the CPU number this component + * is associated to. * @trace_id: returns the value of the component's trace ID as known - to the HW. + * to the HW. * @enable: enables tracing for a source. * @disable: disables tracing for a source. */ struct coresight_ops_source { + int (*cpu_id)(struct coresight_device *csdev); int (*trace_id)(struct coresight_device *csdev); - int (*enable)(struct coresight_device *csdev); + int (*enable)(struct coresight_device *csdev, + struct perf_event_attr *attr, u32 mode); void (*disable)(struct coresight_device *csdev); }; diff --git a/include/linux/cpu.h b/include/linux/cpu.h index d2ca8c38f9c4..f9b1fab4388a 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -16,6 +16,7 @@ #include <linux/node.h> #include <linux/compiler.h> #include <linux/cpumask.h> +#include <linux/cpuhotplug.h> struct device; struct device_node; @@ -27,6 +28,9 @@ struct cpu { struct device dev; }; +extern void boot_cpu_init(void); +extern void boot_cpu_state_init(void); + extern int register_cpu(struct cpu *cpu, int num); extern struct device *get_cpu_device(unsigned cpu); extern bool cpu_is_hotpluggable(unsigned cpu); @@ -74,7 +78,7 @@ enum { /* migration should happen before other stuff but after perf */ CPU_PRI_PERF = 20, CPU_PRI_MIGRATION = 10, - CPU_PRI_SMPBOOT = 9, + /* bring up workqueues before normal notifiers and down after */ CPU_PRI_WORKQUEUE_UP = 5, CPU_PRI_WORKQUEUE_DOWN = -5, @@ -97,9 +101,7 @@ enum { * Called on the new cpu, just before * enabling interrupts. Must not sleep, * must not fail */ -#define CPU_DYING_IDLE 0x000B /* CPU (unsigned)v dying, reached - * idle loop. */ -#define CPU_BROKEN 0x000C /* CPU (unsigned)v did not die properly, +#define CPU_BROKEN 0x000B /* CPU (unsigned)v did not die properly, * perhaps due to preemption. */ /* Used for CPU hotplug events occurring while tasks are frozen due to a suspend @@ -118,6 +120,7 @@ enum { #ifdef CONFIG_SMP +extern bool cpuhp_tasks_frozen; /* Need to know about CPUs going up/down? */ #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) #define cpu_notifier(fn, pri) { \ @@ -167,7 +170,6 @@ static inline void __unregister_cpu_notifier(struct notifier_block *nb) } #endif -void smpboot_thread_init(void); int cpu_up(unsigned int cpu); void notify_cpu_starting(unsigned int cpu); extern void cpu_maps_update_begin(void); @@ -177,6 +179,7 @@ extern void cpu_maps_update_done(void); #define cpu_notifier_register_done cpu_maps_update_done #else /* CONFIG_SMP */ +#define cpuhp_tasks_frozen 0 #define cpu_notifier(fn, pri) do { (void)(fn); } while (0) #define __cpu_notifier(fn, pri) do { (void)(fn); } while (0) @@ -215,10 +218,6 @@ static inline void cpu_notifier_register_done(void) { } -static inline void smpboot_thread_init(void) -{ -} - #endif /* CONFIG_SMP */ extern struct bus_type cpu_subsys; @@ -265,11 +264,6 @@ static inline int disable_nonboot_cpus(void) { return 0; } static inline void enable_nonboot_cpus(void) {} #endif /* !CONFIG_PM_SLEEP_SMP */ -enum cpuhp_state { - CPUHP_OFFLINE, - CPUHP_ONLINE, -}; - void cpu_startup_entry(enum cpuhp_state state); void cpu_idle_poll_ctrl(bool enable); @@ -280,14 +274,15 @@ void arch_cpu_idle_enter(void); void arch_cpu_idle_exit(void); void arch_cpu_idle_dead(void); -DECLARE_PER_CPU(bool, cpu_dead_idle); - int cpu_report_state(int cpu); int cpu_check_up_prepare(int cpu); void cpu_set_state_online(int cpu); #ifdef CONFIG_HOTPLUG_CPU bool cpu_wait_death(unsigned int cpu, int seconds); bool cpu_report_death(void); +void cpuhp_report_idle_dead(void); +#else +static inline void cpuhp_report_idle_dead(void) { } #endif /* #ifdef CONFIG_HOTPLUG_CPU */ #endif /* _LINUX_CPU_H_ */ diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 88a4215125bc..718e8725de8a 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -80,7 +80,6 @@ struct cpufreq_policy { unsigned int last_policy; /* policy before unplug */ struct cpufreq_governor *governor; /* see below */ void *governor_data; - bool governor_enabled; /* governor start/stop flag */ char last_governor[CPUFREQ_NAME_LEN]; /* last governor used */ struct work_struct update; /* if update_policy() needs to be @@ -100,10 +99,6 @@ struct cpufreq_policy { * - Any routine that will write to the policy structure and/or may take away * the policy altogether (eg. CPU hotplug), will hold this lock in write * mode before doing so. - * - * Additional rules: - * - Lock should not be held across - * __cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT); */ struct rw_semaphore rwsem; @@ -464,29 +459,8 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy, int cpufreq_register_governor(struct cpufreq_governor *governor); void cpufreq_unregister_governor(struct cpufreq_governor *governor); -/* CPUFREQ DEFAULT GOVERNOR */ -/* - * Performance governor is fallback governor if any other gov failed to auto - * load due latency restrictions - */ -#ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE -extern struct cpufreq_governor cpufreq_gov_performance; -#endif -#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE -#define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_performance) -#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE) -extern struct cpufreq_governor cpufreq_gov_powersave; -#define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_powersave) -#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE) -extern struct cpufreq_governor cpufreq_gov_userspace; -#define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_userspace) -#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND) -extern struct cpufreq_governor cpufreq_gov_ondemand; -#define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_ondemand) -#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE) -extern struct cpufreq_governor cpufreq_gov_conservative; -#define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_conservative) -#endif +struct cpufreq_governor *cpufreq_default_governor(void); +struct cpufreq_governor *cpufreq_fallback_governor(void); /********************************************************************* * FREQUENCY TABLE HELPERS * @@ -525,16 +499,6 @@ static inline void dev_pm_opp_free_cpufreq_table(struct device *dev, } #endif -static inline bool cpufreq_next_valid(struct cpufreq_frequency_table **pos) -{ - while ((*pos)->frequency != CPUFREQ_TABLE_END) - if ((*pos)->frequency != CPUFREQ_ENTRY_INVALID) - return true; - else - (*pos)++; - return false; -} - /* * cpufreq_for_each_entry - iterate over a cpufreq_frequency_table * @pos: the cpufreq_frequency_table * to use as a loop cursor. @@ -551,8 +515,11 @@ static inline bool cpufreq_next_valid(struct cpufreq_frequency_table **pos) * @table: the cpufreq_frequency_table * to iterate over. */ -#define cpufreq_for_each_valid_entry(pos, table) \ - for (pos = table; cpufreq_next_valid(&pos); pos++) +#define cpufreq_for_each_valid_entry(pos, table) \ + for (pos = table; pos->frequency != CPUFREQ_TABLE_END; pos++) \ + if (pos->frequency == CPUFREQ_ENTRY_INVALID) \ + continue; \ + else int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy, struct cpufreq_frequency_table *table); diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h new file mode 100644 index 000000000000..5d68e15e46b7 --- /dev/null +++ b/include/linux/cpuhotplug.h @@ -0,0 +1,93 @@ +#ifndef __CPUHOTPLUG_H +#define __CPUHOTPLUG_H + +enum cpuhp_state { + CPUHP_OFFLINE, + CPUHP_CREATE_THREADS, + CPUHP_NOTIFY_PREPARE, + CPUHP_BRINGUP_CPU, + CPUHP_AP_IDLE_DEAD, + CPUHP_AP_OFFLINE, + CPUHP_AP_NOTIFY_STARTING, + CPUHP_AP_ONLINE, + CPUHP_TEARDOWN_CPU, + CPUHP_AP_ONLINE_IDLE, + CPUHP_AP_SMPBOOT_THREADS, + CPUHP_AP_NOTIFY_ONLINE, + CPUHP_AP_ONLINE_DYN, + CPUHP_AP_ONLINE_DYN_END = CPUHP_AP_ONLINE_DYN + 30, + CPUHP_ONLINE, +}; + +int __cpuhp_setup_state(enum cpuhp_state state, const char *name, bool invoke, + int (*startup)(unsigned int cpu), + int (*teardown)(unsigned int cpu)); + +/** + * cpuhp_setup_state - Setup hotplug state callbacks with calling the callbacks + * @state: The state for which the calls are installed + * @name: Name of the callback (will be used in debug output) + * @startup: startup callback function + * @teardown: teardown callback function + * + * Installs the callback functions and invokes the startup callback on + * the present cpus which have already reached the @state. + */ +static inline int cpuhp_setup_state(enum cpuhp_state state, + const char *name, + int (*startup)(unsigned int cpu), + int (*teardown)(unsigned int cpu)) +{ + return __cpuhp_setup_state(state, name, true, startup, teardown); +} + +/** + * cpuhp_setup_state_nocalls - Setup hotplug state callbacks without calling the + * callbacks + * @state: The state for which the calls are installed + * @name: Name of the callback. + * @startup: startup callback function + * @teardown: teardown callback function + * + * Same as @cpuhp_setup_state except that no calls are executed are invoked + * during installation of this callback. NOP if SMP=n or HOTPLUG_CPU=n. + */ +static inline int cpuhp_setup_state_nocalls(enum cpuhp_state state, + const char *name, + int (*startup)(unsigned int cpu), + int (*teardown)(unsigned int cpu)) +{ + return __cpuhp_setup_state(state, name, false, startup, teardown); +} + +void __cpuhp_remove_state(enum cpuhp_state state, bool invoke); + +/** + * cpuhp_remove_state - Remove hotplug state callbacks and invoke the teardown + * @state: The state for which the calls are removed + * + * Removes the callback functions and invokes the teardown callback on + * the present cpus which have already reached the @state. + */ +static inline void cpuhp_remove_state(enum cpuhp_state state) +{ + __cpuhp_remove_state(state, true); +} + +/** + * cpuhp_remove_state_nocalls - Remove hotplug state callbacks without invoking + * teardown + * @state: The state for which the calls are removed + */ +static inline void cpuhp_remove_state_nocalls(enum cpuhp_state state) +{ + __cpuhp_remove_state(state, false); +} + +#ifdef CONFIG_SMP +void cpuhp_online_idle(enum cpuhp_state state); +#else +static inline void cpuhp_online_idle(enum cpuhp_state state) { } +#endif + +#endif diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h index 85a868ccb493..fea160ee5803 100644 --- a/include/linux/cpuset.h +++ b/include/linux/cpuset.h @@ -137,6 +137,8 @@ static inline void set_mems_allowed(nodemask_t nodemask) task_unlock(current); } +extern void cpuset_post_attach_flush(void); + #else /* !CONFIG_CPUSETS */ static inline bool cpusets_enabled(void) { return false; } @@ -243,6 +245,10 @@ static inline bool read_mems_allowed_retry(unsigned int seq) return false; } +static inline void cpuset_post_attach_flush(void) +{ +} + #endif /* !CONFIG_CPUSETS */ #endif /* _LINUX_CPUSET_H */ diff --git a/include/linux/crypto.h b/include/linux/crypto.h index e71cb70a1ac2..99c94899ad0f 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -54,7 +54,6 @@ #define CRYPTO_ALG_TYPE_AHASH 0x0000000a #define CRYPTO_ALG_TYPE_RNG 0x0000000c #define CRYPTO_ALG_TYPE_AKCIPHER 0x0000000d -#define CRYPTO_ALG_TYPE_PCOMPRESS 0x0000000f #define CRYPTO_ALG_TYPE_HASH_MASK 0x0000000e #define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000c @@ -137,7 +136,6 @@ struct scatterlist; struct crypto_ablkcipher; struct crypto_async_request; struct crypto_blkcipher; -struct crypto_hash; struct crypto_tfm; struct crypto_type; struct skcipher_givcrypt_request; @@ -187,11 +185,6 @@ struct cipher_desc { void *info; }; -struct hash_desc { - struct crypto_hash *tfm; - u32 flags; -}; - /** * DOC: Block Cipher Algorithm Definitions * @@ -519,18 +512,6 @@ struct cipher_tfm { void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); }; -struct hash_tfm { - int (*init)(struct hash_desc *desc); - int (*update)(struct hash_desc *desc, - struct scatterlist *sg, unsigned int nsg); - int (*final)(struct hash_desc *desc, u8 *out); - int (*digest)(struct hash_desc *desc, struct scatterlist *sg, - unsigned int nsg, u8 *out); - int (*setkey)(struct crypto_hash *tfm, const u8 *key, - unsigned int keylen); - unsigned int digestsize; -}; - struct compress_tfm { int (*cot_compress)(struct crypto_tfm *tfm, const u8 *src, unsigned int slen, @@ -543,7 +524,6 @@ struct compress_tfm { #define crt_ablkcipher crt_u.ablkcipher #define crt_blkcipher crt_u.blkcipher #define crt_cipher crt_u.cipher -#define crt_hash crt_u.hash #define crt_compress crt_u.compress struct crypto_tfm { @@ -554,7 +534,6 @@ struct crypto_tfm { struct ablkcipher_tfm ablkcipher; struct blkcipher_tfm blkcipher; struct cipher_tfm cipher; - struct hash_tfm hash; struct compress_tfm compress; } crt_u; @@ -581,10 +560,6 @@ struct crypto_comp { struct crypto_tfm base; }; -struct crypto_hash { - struct crypto_tfm base; -}; - enum { CRYPTOA_UNSPEC, CRYPTOA_ALG, @@ -1577,233 +1552,6 @@ static inline void crypto_cipher_decrypt_one(struct crypto_cipher *tfm, dst, src); } -/** - * DOC: Synchronous Message Digest API - * - * The synchronous message digest API is used with the ciphers of type - * CRYPTO_ALG_TYPE_HASH (listed as type "hash" in /proc/crypto) - */ - -static inline struct crypto_hash *__crypto_hash_cast(struct crypto_tfm *tfm) -{ - return (struct crypto_hash *)tfm; -} - -static inline struct crypto_hash *crypto_hash_cast(struct crypto_tfm *tfm) -{ - BUG_ON((crypto_tfm_alg_type(tfm) ^ CRYPTO_ALG_TYPE_HASH) & - CRYPTO_ALG_TYPE_HASH_MASK); - return __crypto_hash_cast(tfm); -} - -/** - * crypto_alloc_hash() - allocate synchronous message digest handle - * @alg_name: is the cra_name / name or cra_driver_name / driver name of the - * message digest cipher - * @type: specifies the type of the cipher - * @mask: specifies the mask for the cipher - * - * Allocate a cipher handle for a message digest. The returned struct - * crypto_hash is the cipher handle that is required for any subsequent - * API invocation for that message digest. - * - * Return: allocated cipher handle in case of success; IS_ERR() is true in case - * of an error, PTR_ERR() returns the error code. - */ -static inline struct crypto_hash *crypto_alloc_hash(const char *alg_name, - u32 type, u32 mask) -{ - type &= ~CRYPTO_ALG_TYPE_MASK; - mask &= ~CRYPTO_ALG_TYPE_MASK; - type |= CRYPTO_ALG_TYPE_HASH; - mask |= CRYPTO_ALG_TYPE_HASH_MASK; - - return __crypto_hash_cast(crypto_alloc_base(alg_name, type, mask)); -} - -static inline struct crypto_tfm *crypto_hash_tfm(struct crypto_hash *tfm) -{ - return &tfm->base; -} - -/** - * crypto_free_hash() - zeroize and free message digest handle - * @tfm: cipher handle to be freed - */ -static inline void crypto_free_hash(struct crypto_hash *tfm) -{ - crypto_free_tfm(crypto_hash_tfm(tfm)); -} - -/** - * crypto_has_hash() - Search for the availability of a message digest - * @alg_name: is the cra_name / name or cra_driver_name / driver name of the - * message digest cipher - * @type: specifies the type of the cipher - * @mask: specifies the mask for the cipher - * - * Return: true when the message digest cipher is known to the kernel crypto - * API; false otherwise - */ -static inline int crypto_has_hash(const char *alg_name, u32 type, u32 mask) -{ - type &= ~CRYPTO_ALG_TYPE_MASK; - mask &= ~CRYPTO_ALG_TYPE_MASK; - type |= CRYPTO_ALG_TYPE_HASH; - mask |= CRYPTO_ALG_TYPE_HASH_MASK; - - return crypto_has_alg(alg_name, type, mask); -} - -static inline struct hash_tfm *crypto_hash_crt(struct crypto_hash *tfm) -{ - return &crypto_hash_tfm(tfm)->crt_hash; -} - -/** - * crypto_hash_blocksize() - obtain block size for message digest - * @tfm: cipher handle - * - * The block size for the message digest cipher referenced with the cipher - * handle is returned. - * - * Return: block size of cipher - */ -static inline unsigned int crypto_hash_blocksize(struct crypto_hash *tfm) -{ - return crypto_tfm_alg_blocksize(crypto_hash_tfm(tfm)); -} - -static inline unsigned int crypto_hash_alignmask(struct crypto_hash *tfm) -{ - return crypto_tfm_alg_alignmask(crypto_hash_tfm(tfm)); -} - -/** - * crypto_hash_digestsize() - obtain message digest size - * @tfm: cipher handle - * - * The size for the message digest created by the message digest cipher - * referenced with the cipher handle is returned. - * - * Return: message digest size - */ -static inline unsigned int crypto_hash_digestsize(struct crypto_hash *tfm) -{ - return crypto_hash_crt(tfm)->digestsize; -} - -static inline u32 crypto_hash_get_flags(struct crypto_hash *tfm) -{ - return crypto_tfm_get_flags(crypto_hash_tfm(tfm)); -} - -static inline void crypto_hash_set_flags(struct crypto_hash *tfm, u32 flags) -{ - crypto_tfm_set_flags(crypto_hash_tfm(tfm), flags); -} - -static inline void crypto_hash_clear_flags(struct crypto_hash *tfm, u32 flags) -{ - crypto_tfm_clear_flags(crypto_hash_tfm(tfm), flags); -} - -/** - * crypto_hash_init() - (re)initialize message digest handle - * @desc: cipher request handle that to be filled by caller -- - * desc.tfm is filled with the hash cipher handle; - * desc.flags is filled with either CRYPTO_TFM_REQ_MAY_SLEEP or 0. - * - * The call (re-)initializes the message digest referenced by the hash cipher - * request handle. Any potentially existing state created by previous - * operations is discarded. - * - * Return: 0 if the message digest initialization was successful; < 0 if an - * error occurred - */ -static inline int crypto_hash_init(struct hash_desc *desc) -{ - return crypto_hash_crt(desc->tfm)->init(desc); -} - -/** - * crypto_hash_update() - add data to message digest for processing - * @desc: cipher request handle - * @sg: scatter / gather list pointing to the data to be added to the message - * digest - * @nbytes: number of bytes to be processed from @sg - * - * Updates the message digest state of the cipher handle pointed to by the - * hash cipher request handle with the input data pointed to by the - * scatter/gather list. - * - * Return: 0 if the message digest update was successful; < 0 if an error - * occurred - */ -static inline int crypto_hash_update(struct hash_desc *desc, - struct scatterlist *sg, - unsigned int nbytes) -{ - return crypto_hash_crt(desc->tfm)->update(desc, sg, nbytes); -} - -/** - * crypto_hash_final() - calculate message digest - * @desc: cipher request handle - * @out: message digest output buffer -- The caller must ensure that the out - * buffer has a sufficient size (e.g. by using the crypto_hash_digestsize - * function). - * - * Finalize the message digest operation and create the message digest - * based on all data added to the cipher handle. The message digest is placed - * into the output buffer. - * - * Return: 0 if the message digest creation was successful; < 0 if an error - * occurred - */ -static inline int crypto_hash_final(struct hash_desc *desc, u8 *out) -{ - return crypto_hash_crt(desc->tfm)->final(desc, out); -} - -/** - * crypto_hash_digest() - calculate message digest for a buffer - * @desc: see crypto_hash_final() - * @sg: see crypto_hash_update() - * @nbytes: see crypto_hash_update() - * @out: see crypto_hash_final() - * - * This function is a "short-hand" for the function calls of crypto_hash_init, - * crypto_hash_update and crypto_hash_final. The parameters have the same - * meaning as discussed for those separate three functions. - * - * Return: 0 if the message digest creation was successful; < 0 if an error - * occurred - */ -static inline int crypto_hash_digest(struct hash_desc *desc, - struct scatterlist *sg, - unsigned int nbytes, u8 *out) -{ - return crypto_hash_crt(desc->tfm)->digest(desc, sg, nbytes, out); -} - -/** - * crypto_hash_setkey() - set key for message digest - * @hash: cipher handle - * @key: buffer holding the key - * @keylen: length of the key in bytes - * - * The caller provided key is set for the message digest cipher. The cipher - * handle must point to a keyed hash in order for this function to succeed. - * - * Return: 0 if the setting of the key was successful; < 0 if an error occurred - */ -static inline int crypto_hash_setkey(struct crypto_hash *hash, - const u8 *key, unsigned int keylen) -{ - return crypto_hash_crt(hash)->setkey(hash, key, keylen); -} - static inline struct crypto_comp *__crypto_comp_cast(struct crypto_tfm *tfm) { return (struct crypto_comp *)tfm; diff --git a/include/linux/davinci_emac.h b/include/linux/davinci_emac.h index 542888504994..05b97144d342 100644 --- a/include/linux/davinci_emac.h +++ b/include/linux/davinci_emac.h @@ -12,7 +12,7 @@ #define _LINUX_DAVINCI_EMAC_H #include <linux/if_ether.h> -#include <linux/memory.h> +#include <linux/nvmem-consumer.h> struct mdio_platform_data { unsigned long bus_freq; @@ -46,5 +46,5 @@ enum { EMAC_VERSION_2, /* DM646x */ }; -void davinci_get_mac_addr(struct memory_accessor *mem_acc, void *context); +void davinci_get_mac_addr(struct nvmem_device *nvmem, void *context); #endif diff --git a/include/linux/dax.h b/include/linux/dax.h index 818e45078929..636dd59ab505 100644 --- a/include/linux/dax.h +++ b/include/linux/dax.h @@ -7,7 +7,7 @@ ssize_t dax_do_io(struct kiocb *, struct inode *, struct iov_iter *, loff_t, get_block_t, dio_iodone_t, int flags); -int dax_clear_blocks(struct inode *, sector_t block, long size); +int dax_clear_sectors(struct block_device *bdev, sector_t _sector, long _size); int dax_zero_page_range(struct inode *, loff_t from, unsigned len, get_block_t); int dax_truncate_page(struct inode *, loff_t from, get_block_t); int dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t, @@ -52,6 +52,8 @@ static inline bool dax_mapping(struct address_space *mapping) { return mapping->host && IS_DAX(mapping->host); } -int dax_writeback_mapping_range(struct address_space *mapping, loff_t start, - loff_t end); + +struct writeback_control; +int dax_writeback_mapping_range(struct address_space *mapping, + struct block_device *bdev, struct writeback_control *wbc); #endif diff --git a/include/linux/dcache.h b/include/linux/dcache.h index 7781ce110503..c4b5f4b3f8f8 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -409,9 +409,7 @@ static inline bool d_mountpoint(const struct dentry *dentry) */ static inline unsigned __d_entry_type(const struct dentry *dentry) { - unsigned type = READ_ONCE(dentry->d_flags); - smp_rmb(); - return type & DCACHE_ENTRY_TYPE; + return dentry->d_flags & DCACHE_ENTRY_TYPE; } static inline bool d_is_miss(const struct dentry *dentry) diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h index 19c066dce1da..981e53ab84e8 100644 --- a/include/linux/debugfs.h +++ b/include/linux/debugfs.h @@ -162,6 +162,14 @@ static inline struct dentry *debugfs_create_symlink(const char *name, return ERR_PTR(-ENODEV); } +static inline struct dentry *debugfs_create_automount(const char *name, + struct dentry *parent, + struct vfsmount *(*f)(void *), + void *data) +{ + return ERR_PTR(-ENODEV); +} + static inline void debugfs_remove(struct dentry *dentry) { } diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index ec1c61c87d89..0830c9e86f0d 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -124,6 +124,8 @@ struct dm_dev { char name[16]; }; +dev_t dm_get_dev_t(const char *path); + /* * Constructors should call these functions to ensure destination devices * are opened/closed correctly. @@ -190,6 +192,13 @@ struct target_type { #define dm_target_is_immutable(type) ((type)->features & DM_TARGET_IMMUTABLE) /* + * Indicates that a target may replace any target; even immutable targets. + * .map, .map_rq, .clone_and_map_rq and .release_clone_rq are all defined. + */ +#define DM_TARGET_WILDCARD 0x00000008 +#define dm_target_is_wildcard(type) ((type)->features & DM_TARGET_WILDCARD) + +/* * Some targets need to be sent the same WRITE bio severals times so * that they can send copies of it to different devices. This function * examines any supplied bio and returns the number of copies of it the @@ -231,10 +240,10 @@ struct dm_target { unsigned num_write_same_bios; /* - * The minimum number of extra bytes allocated in each bio for the - * target to use. dm_per_bio_data returns the data location. + * The minimum number of extra bytes allocated in each io for the + * target to use. */ - unsigned per_bio_data_size; + unsigned per_io_data_size; /* * If defined, this function is called to find out how many diff --git a/include/linux/device.h b/include/linux/device.h index 6d6f1fec092f..2d0e6e541d52 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -958,6 +958,11 @@ static inline void device_lock(struct device *dev) mutex_lock(&dev->mutex); } +static inline int device_lock_interruptible(struct device *dev) +{ + return mutex_lock_interruptible(&dev->mutex); +} + static inline int device_trylock(struct device *dev) { return mutex_trylock(&dev->mutex); diff --git a/include/linux/devpts_fs.h b/include/linux/devpts_fs.h index 251a2090a554..e0ee0b3000b2 100644 --- a/include/linux/devpts_fs.h +++ b/include/linux/devpts_fs.h @@ -19,6 +19,8 @@ int devpts_new_index(struct inode *ptmx_inode); void devpts_kill_index(struct inode *ptmx_inode, int idx); +void devpts_add_ref(struct inode *ptmx_inode); +void devpts_del_ref(struct inode *ptmx_inode); /* mknod in devpts */ struct inode *devpts_pty_new(struct inode *ptmx_inode, dev_t device, int index, void *priv); @@ -32,6 +34,8 @@ void devpts_pty_kill(struct inode *inode); /* Dummy stubs in the no-pty case */ static inline int devpts_new_index(struct inode *ptmx_inode) { return -EINVAL; } static inline void devpts_kill_index(struct inode *ptmx_inode, int idx) { } +static inline void devpts_add_ref(struct inode *ptmx_inode) { } +static inline void devpts_del_ref(struct inode *ptmx_inode) { } static inline struct inode *devpts_pty_new(struct inode *ptmx_inode, dev_t device, int index, void *priv) { diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index 75857cda38e9..5e45cf930a3f 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -386,7 +386,7 @@ static inline void dma_free_attrs(struct device *dev, size_t size, if (dma_release_from_coherent(dev, get_order(size), cpu_addr)) return; - if (!ops->free) + if (!ops->free || !cpu_addr) return; debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); @@ -641,31 +641,40 @@ static inline void dmam_release_declared_memory(struct device *dev) } #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */ -static inline void *dma_alloc_writecombine(struct device *dev, size_t size, - dma_addr_t *dma_addr, gfp_t gfp) +static inline void *dma_alloc_wc(struct device *dev, size_t size, + dma_addr_t *dma_addr, gfp_t gfp) { DEFINE_DMA_ATTRS(attrs); dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); return dma_alloc_attrs(dev, size, dma_addr, gfp, &attrs); } +#ifndef dma_alloc_writecombine +#define dma_alloc_writecombine dma_alloc_wc +#endif -static inline void dma_free_writecombine(struct device *dev, size_t size, - void *cpu_addr, dma_addr_t dma_addr) +static inline void dma_free_wc(struct device *dev, size_t size, + void *cpu_addr, dma_addr_t dma_addr) { DEFINE_DMA_ATTRS(attrs); dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); return dma_free_attrs(dev, size, cpu_addr, dma_addr, &attrs); } +#ifndef dma_free_writecombine +#define dma_free_writecombine dma_free_wc +#endif -static inline int dma_mmap_writecombine(struct device *dev, - struct vm_area_struct *vma, - void *cpu_addr, dma_addr_t dma_addr, - size_t size) +static inline int dma_mmap_wc(struct device *dev, + struct vm_area_struct *vma, + void *cpu_addr, dma_addr_t dma_addr, + size_t size) { DEFINE_DMA_ATTRS(attrs); dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, &attrs); } +#ifndef dma_mmap_writecombine +#define dma_mmap_writecombine dma_mmap_wc +#endif #ifdef CONFIG_NEED_DMA_MAP_STATE #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 16a1cad30c33..017433712833 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -357,8 +357,8 @@ enum dma_slave_buswidth { */ struct dma_slave_config { enum dma_transfer_direction direction; - dma_addr_t src_addr; - dma_addr_t dst_addr; + phys_addr_t src_addr; + phys_addr_t dst_addr; enum dma_slave_buswidth src_addr_width; enum dma_slave_buswidth dst_addr_width; u32 src_maxburst; @@ -401,6 +401,7 @@ enum dma_residue_granularity { * since the enum dma_transfer_direction is not defined as bits for each * type of direction, the dma controller should fill (1 << <TYPE>) and same * should be checked by controller as well + * @max_burst: max burst capability per-transfer * @cmd_pause: true, if pause and thereby resume is supported * @cmd_terminate: true, if terminate cmd is supported * @residue_granularity: granularity of the reported transfer residue @@ -411,6 +412,7 @@ struct dma_slave_caps { u32 src_addr_widths; u32 dst_addr_widths; u32 directions; + u32 max_burst; bool cmd_pause; bool cmd_terminate; enum dma_residue_granularity residue_granularity; @@ -654,6 +656,7 @@ struct dma_filter { * the enum dma_transfer_direction is not defined as bits for * each type of direction, the dma controller should fill (1 << * <TYPE>) and same should be checked by controller as well + * @max_burst: max burst capability per-transfer * @residue_granularity: granularity of the transfer residue reported * by tx_status * @device_alloc_chan_resources: allocate resources and return the @@ -712,6 +715,7 @@ struct dma_device { u32 src_addr_widths; u32 dst_addr_widths; u32 directions; + u32 max_burst; bool descriptor_reuse; enum dma_residue_granularity residue_granularity; diff --git a/include/linux/eeprom_93xx46.h b/include/linux/eeprom_93xx46.h index 06791811e49d..885f587a3555 100644 --- a/include/linux/eeprom_93xx46.h +++ b/include/linux/eeprom_93xx46.h @@ -3,16 +3,25 @@ * platform description for 93xx46 EEPROMs. */ +struct gpio_desc; + struct eeprom_93xx46_platform_data { unsigned char flags; #define EE_ADDR8 0x01 /* 8 bit addr. cfg */ #define EE_ADDR16 0x02 /* 16 bit addr. cfg */ #define EE_READONLY 0x08 /* forbid writing */ + unsigned int quirks; +/* Single word read transfers only; no sequential read. */ +#define EEPROM_93XX46_QUIRK_SINGLE_WORD_READ (1 << 0) +/* Instructions such as EWEN are (addrlen + 2) in length. */ +#define EEPROM_93XX46_QUIRK_INSTRUCTION_LENGTH (1 << 1) + /* * optional hooks to control additional logic * before and after spi transfer. */ void (*prepare)(void *); void (*finish)(void *); + struct gpio_desc *select; }; diff --git a/include/linux/efi.h b/include/linux/efi.h index 569b5a866bb1..333d0ca6940f 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -299,7 +299,7 @@ typedef struct { void *open_protocol_information; void *protocols_per_handle; void *locate_handle_buffer; - void *locate_protocol; + efi_status_t (*locate_protocol)(efi_guid_t *, void *, void **); void *install_multiple_protocol_interfaces; void *uninstall_multiple_protocol_interfaces; void *calculate_crc32; @@ -599,6 +599,10 @@ void efi_native_runtime_setup(void); #define EFI_PROPERTIES_TABLE_GUID \ EFI_GUID( 0x880aaca3, 0x4adc, 0x4a04, 0x90, 0x79, 0xb7, 0x47, 0x34, 0x08, 0x25, 0xe5 ) +#define EFI_RNG_PROTOCOL_GUID \ + EFI_GUID(0x3152bca5, 0xeade, 0x433d, \ + 0x86, 0x2e, 0xc0, 0x1c, 0xdc, 0x29, 0x1f, 0x44) + typedef struct { efi_guid_t guid; u64 table; @@ -1199,7 +1203,10 @@ int efivar_entry_iter(int (*func)(struct efivar_entry *, void *), struct efivar_entry *efivar_entry_find(efi_char16_t *name, efi_guid_t guid, struct list_head *head, bool remove); -bool efivar_validate(efi_char16_t *var_name, u8 *data, unsigned long len); +bool efivar_validate(efi_guid_t vendor, efi_char16_t *var_name, u8 *data, + unsigned long data_size); +bool efivar_variable_is_removable(efi_guid_t vendor, const char *name, + size_t len); extern struct work_struct efivar_work; void efivar_run_worker(void); diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index 653dc9c4ebac..e2b7bf27c03e 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h @@ -12,6 +12,7 @@ #ifndef _LINUX_ETHTOOL_H #define _LINUX_ETHTOOL_H +#include <linux/bitmap.h> #include <linux/compat.h> #include <uapi/linux/ethtool.h> @@ -40,9 +41,6 @@ struct compat_ethtool_rxnfc { #include <linux/rculist.h> -extern int __ethtool_get_settings(struct net_device *dev, - struct ethtool_cmd *cmd); - /** * enum ethtool_phys_id_state - indicator state for physical identification * @ETHTOOL_ID_INACTIVE: Physical ID indicator should be deactivated @@ -97,13 +95,70 @@ static inline u32 ethtool_rxfh_indir_default(u32 index, u32 n_rx_rings) return index % n_rx_rings; } +/* number of link mode bits/ulongs handled internally by kernel */ +#define __ETHTOOL_LINK_MODE_MASK_NBITS \ + (__ETHTOOL_LINK_MODE_LAST + 1) + +/* declare a link mode bitmap */ +#define __ETHTOOL_DECLARE_LINK_MODE_MASK(name) \ + DECLARE_BITMAP(name, __ETHTOOL_LINK_MODE_MASK_NBITS) + +/* drivers must ignore base.cmd and base.link_mode_masks_nwords + * fields, but they are allowed to overwrite them (will be ignored). + */ +struct ethtool_link_ksettings { + struct ethtool_link_settings base; + struct { + __ETHTOOL_DECLARE_LINK_MODE_MASK(supported); + __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising); + __ETHTOOL_DECLARE_LINK_MODE_MASK(lp_advertising); + } link_modes; +}; + +/** + * ethtool_link_ksettings_zero_link_mode - clear link_ksettings link mode mask + * @ptr : pointer to struct ethtool_link_ksettings + * @name : one of supported/advertising/lp_advertising + */ +#define ethtool_link_ksettings_zero_link_mode(ptr, name) \ + bitmap_zero((ptr)->link_modes.name, __ETHTOOL_LINK_MODE_MASK_NBITS) + +/** + * ethtool_link_ksettings_add_link_mode - set bit in link_ksettings + * link mode mask + * @ptr : pointer to struct ethtool_link_ksettings + * @name : one of supported/advertising/lp_advertising + * @mode : one of the ETHTOOL_LINK_MODE_*_BIT + * (not atomic, no bound checking) + */ +#define ethtool_link_ksettings_add_link_mode(ptr, name, mode) \ + __set_bit(ETHTOOL_LINK_MODE_ ## mode ## _BIT, (ptr)->link_modes.name) + +/** + * ethtool_link_ksettings_test_link_mode - test bit in ksettings link mode mask + * @ptr : pointer to struct ethtool_link_ksettings + * @name : one of supported/advertising/lp_advertising + * @mode : one of the ETHTOOL_LINK_MODE_*_BIT + * (not atomic, no bound checking) + * + * Returns true/false. + */ +#define ethtool_link_ksettings_test_link_mode(ptr, name, mode) \ + test_bit(ETHTOOL_LINK_MODE_ ## mode ## _BIT, (ptr)->link_modes.name) + +extern int +__ethtool_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *link_ksettings); + /** * struct ethtool_ops - optional netdev operations - * @get_settings: Get various device settings including Ethernet link + * @get_settings: DEPRECATED, use %get_link_ksettings/%set_link_ksettings + * API. Get various device settings including Ethernet link * settings. The @cmd parameter is expected to have been cleared - * before get_settings is called. Returns a negative error code or - * zero. - * @set_settings: Set various device settings including Ethernet link + * before get_settings is called. Returns a negative error code + * or zero. + * @set_settings: DEPRECATED, use %get_link_ksettings/%set_link_ksettings + * API. Set various device settings including Ethernet link * settings. Returns a negative error code or zero. * @get_drvinfo: Report driver/device information. Should only set the * @driver, @version, @fw_version and @bus_info fields. If not @@ -201,6 +256,29 @@ static inline u32 ethtool_rxfh_indir_default(u32 index, u32 n_rx_rings) * @get_module_eeprom: Get the eeprom information from the plug-in module * @get_eee: Get Energy-Efficient (EEE) supported and status. * @set_eee: Set EEE status (enable/disable) as well as LPI timers. + * @get_per_queue_coalesce: Get interrupt coalescing parameters per queue. + * It must check that the given queue number is valid. If neither a RX nor + * a TX queue has this number, return -EINVAL. If only a RX queue or a TX + * queue has this number, set the inapplicable fields to ~0 and return 0. + * Returns a negative error code or zero. + * @set_per_queue_coalesce: Set interrupt coalescing parameters per queue. + * It must check that the given queue number is valid. If neither a RX nor + * a TX queue has this number, return -EINVAL. If only a RX queue or a TX + * queue has this number, ignore the inapplicable fields. + * Returns a negative error code or zero. + * @get_link_ksettings: When defined, takes precedence over the + * %get_settings method. Get various device settings + * including Ethernet link settings. The %cmd and + * %link_mode_masks_nwords fields should be ignored (use + * %__ETHTOOL_LINK_MODE_MASK_NBITS instead of the latter), any + * change to them will be overwritten by kernel. Returns a + * negative error code or zero. + * @set_link_ksettings: When defined, takes precedence over the + * %set_settings method. Set various device settings including + * Ethernet link settings. The %cmd and %link_mode_masks_nwords + * fields should be ignored (use %__ETHTOOL_LINK_MODE_MASK_NBITS + * instead of the latter), any change to them will be overwritten + * by kernel. Returns a negative error code or zero. * * All operations are optional (i.e. the function pointer may be set * to %NULL) and callers must take this into account. Callers must @@ -279,7 +357,13 @@ struct ethtool_ops { const struct ethtool_tunable *, void *); int (*set_tunable)(struct net_device *, const struct ethtool_tunable *, const void *); - - + int (*get_per_queue_coalesce)(struct net_device *, u32, + struct ethtool_coalesce *); + int (*set_per_queue_coalesce)(struct net_device *, u32, + struct ethtool_coalesce *); + int (*get_link_ksettings)(struct net_device *, + struct ethtool_link_ksettings *); + int (*set_link_ksettings)(struct net_device *, + const struct ethtool_link_ksettings *); }; #endif /* _LINUX_ETHTOOL_H */ diff --git a/include/linux/exportfs.h b/include/linux/exportfs.h index fa05e04c5531..d8414502edb4 100644 --- a/include/linux/exportfs.h +++ b/include/linux/exportfs.h @@ -97,6 +97,12 @@ enum fid_type { FILEID_FAT_WITH_PARENT = 0x72, /* + * 128 bit child FID (struct lu_fid) + * 128 bit parent FID (struct lu_fid) + */ + FILEID_LUSTRE = 0x97, + + /* * Filesystems must not use 0xff file ID. */ FILEID_INVALID = 0xff, diff --git a/include/linux/fault-inject.h b/include/linux/fault-inject.h index 3159a7dba034..9f4956d8601c 100644 --- a/include/linux/fault-inject.h +++ b/include/linux/fault-inject.h @@ -62,10 +62,9 @@ static inline struct dentry *fault_create_debugfs_attr(const char *name, #endif /* CONFIG_FAULT_INJECTION */ #ifdef CONFIG_FAILSLAB -extern bool should_failslab(size_t size, gfp_t gfpflags, unsigned long flags); +extern bool should_failslab(struct kmem_cache *s, gfp_t gfpflags); #else -static inline bool should_failslab(size_t size, gfp_t gfpflags, - unsigned long flags) +static inline bool should_failslab(struct kmem_cache *s, gfp_t gfpflags) { return false; } diff --git a/include/linux/fb.h b/include/linux/fb.h index 55433f86f0a3..dfe88351341f 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -296,9 +296,6 @@ struct fb_ops { /* Draws cursor */ int (*fb_cursor) (struct fb_info *info, struct fb_cursor *cursor); - /* Rotates the display */ - void (*fb_rotate)(struct fb_info *info, int angle); - /* wait for blit idle, optional */ int (*fb_sync)(struct fb_info *info); diff --git a/include/linux/fence.h b/include/linux/fence.h index bb522011383b..605bd88246a6 100644 --- a/include/linux/fence.h +++ b/include/linux/fence.h @@ -79,6 +79,8 @@ struct fence { unsigned long flags; ktime_t timestamp; int status; + struct list_head child_list; + struct list_head active_list; }; enum fence_flag_bits { diff --git a/include/linux/freezer.h b/include/linux/freezer.h index 6b7fd9cf5ea2..dd03e837ebb7 100644 --- a/include/linux/freezer.h +++ b/include/linux/freezer.h @@ -231,7 +231,7 @@ static inline long freezable_schedule_timeout_killable_unsafe(long timeout) * call this with locks held. */ static inline int freezable_schedule_hrtimeout_range(ktime_t *expires, - unsigned long delta, const enum hrtimer_mode mode) + u64 delta, const enum hrtimer_mode mode) { int __retval; freezer_do_not_count(); diff --git a/include/linux/fs.h b/include/linux/fs.h index ae681002100a..e514f76db04f 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -2576,7 +2576,22 @@ static inline void i_readcount_inc(struct inode *inode) #endif extern int do_pipe_flags(int *, int); +enum kernel_read_file_id { + READING_FIRMWARE = 1, + READING_MODULE, + READING_KEXEC_IMAGE, + READING_KEXEC_INITRAMFS, + READING_POLICY, + READING_MAX_ID +}; + extern int kernel_read(struct file *, loff_t, char *, unsigned long); +extern int kernel_read_file(struct file *, void **, loff_t *, loff_t, + enum kernel_read_file_id); +extern int kernel_read_file_from_path(char *, void **, loff_t *, loff_t, + enum kernel_read_file_id); +extern int kernel_read_file_from_fd(int, void **, loff_t *, loff_t, + enum kernel_read_file_id); extern ssize_t kernel_write(struct file *, const char *, size_t, loff_t); extern ssize_t __kernel_write(struct file *, const char *, size_t, loff_t *); extern struct file * open_exec(const char *); diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h index 6b7e89f45aa4..533c4408529a 100644 --- a/include/linux/fsnotify_backend.h +++ b/include/linux/fsnotify_backend.h @@ -220,10 +220,7 @@ struct fsnotify_mark { /* List of marks by group->i_fsnotify_marks. Also reused for queueing * mark into destroy_list when it's waiting for the end of SRCU period * before it can be freed. [group->mark_mutex] */ - union { - struct list_head g_list; - struct rcu_head g_rcu; - }; + struct list_head g_list; /* Protects inode / mnt pointers, flags, masks */ spinlock_t lock; /* List of marks for inode / vfsmount [obj_lock] */ diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 81de7123959d..6d9df3f7e334 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -603,6 +603,7 @@ extern int ftrace_arch_read_dyn_info(char *buf, int size); extern int skip_trace(unsigned long ip); extern void ftrace_module_init(struct module *mod); +extern void ftrace_module_enable(struct module *mod); extern void ftrace_release_mod(struct module *mod); extern void ftrace_disable_daemon(void); @@ -612,8 +613,9 @@ static inline int skip_trace(unsigned long ip) { return 0; } static inline int ftrace_force_update(void) { return 0; } static inline void ftrace_disable_daemon(void) { } static inline void ftrace_enable_daemon(void) { } -static inline void ftrace_release_mod(struct module *mod) {} -static inline void ftrace_module_init(struct module *mod) {} +static inline void ftrace_module_init(struct module *mod) { } +static inline void ftrace_module_enable(struct module *mod) { } +static inline void ftrace_release_mod(struct module *mod) { } static inline __init int register_ftrace_command(struct ftrace_func_command *cmd) { return -EINVAL; @@ -711,6 +713,18 @@ static inline void __ftrace_enabled_restore(int enabled) #define CALLER_ADDR5 ((unsigned long)ftrace_return_address(5)) #define CALLER_ADDR6 ((unsigned long)ftrace_return_address(6)) +static inline unsigned long get_lock_parent_ip(void) +{ + unsigned long addr = CALLER_ADDR0; + + if (!in_lock_functions(addr)) + return addr; + addr = CALLER_ADDR1; + if (!in_lock_functions(addr)) + return addr; + return CALLER_ADDR2; +} + #ifdef CONFIG_IRQSOFF_TRACER extern void time_hardirqs_on(unsigned long a0, unsigned long a1); extern void time_hardirqs_off(unsigned long a0, unsigned long a1); diff --git a/include/linux/gfp.h b/include/linux/gfp.h index af1f2b24bbe4..570383a41853 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -9,6 +9,11 @@ struct vm_area_struct; +/* + * In case of changes, please don't forget to update + * include/trace/events/mmflags.h and tools/perf/builtin-kmem.c + */ + /* Plain integer GFP bitmasks. Do not use this directly. */ #define ___GFP_DMA 0x01u #define ___GFP_HIGHMEM 0x02u @@ -48,7 +53,6 @@ struct vm_area_struct; #define __GFP_DMA ((__force gfp_t)___GFP_DMA) #define __GFP_HIGHMEM ((__force gfp_t)___GFP_HIGHMEM) #define __GFP_DMA32 ((__force gfp_t)___GFP_DMA32) -#define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* Page is movable */ #define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* ZONE_MOVABLE allowed */ #define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE) @@ -101,8 +105,6 @@ struct vm_area_struct; * * __GFP_NOMEMALLOC is used to explicitly forbid access to emergency reserves. * This takes precedence over the __GFP_MEMALLOC flag if both are set. - * - * __GFP_NOACCOUNT ignores the accounting for kmemcg limit enforcement. */ #define __GFP_ATOMIC ((__force gfp_t)___GFP_ATOMIC) #define __GFP_HIGH ((__force gfp_t)___GFP_HIGH) @@ -255,7 +257,7 @@ struct vm_area_struct; #define GFP_HIGHUSER_MOVABLE (GFP_HIGHUSER | __GFP_MOVABLE) #define GFP_TRANSHUGE ((GFP_HIGHUSER_MOVABLE | __GFP_COMP | \ __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN) & \ - ~__GFP_KSWAPD_RECLAIM) + ~__GFP_RECLAIM) /* Convert GFP flags to their corresponding migrate type */ #define GFP_MOVABLE_MASK (__GFP_RECLAIMABLE|__GFP_MOVABLE) @@ -329,22 +331,29 @@ static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags) * 0xe => BAD (MOVABLE+DMA32+HIGHMEM) * 0xf => BAD (MOVABLE+DMA32+HIGHMEM+DMA) * - * ZONES_SHIFT must be <= 2 on 32 bit platforms. + * GFP_ZONES_SHIFT must be <= 2 on 32 bit platforms. */ -#if 16 * ZONES_SHIFT > BITS_PER_LONG -#error ZONES_SHIFT too large to create GFP_ZONE_TABLE integer +#if defined(CONFIG_ZONE_DEVICE) && (MAX_NR_ZONES-1) <= 4 +/* ZONE_DEVICE is not a valid GFP zone specifier */ +#define GFP_ZONES_SHIFT 2 +#else +#define GFP_ZONES_SHIFT ZONES_SHIFT +#endif + +#if 16 * GFP_ZONES_SHIFT > BITS_PER_LONG +#error GFP_ZONES_SHIFT too large to create GFP_ZONE_TABLE integer #endif #define GFP_ZONE_TABLE ( \ - (ZONE_NORMAL << 0 * ZONES_SHIFT) \ - | (OPT_ZONE_DMA << ___GFP_DMA * ZONES_SHIFT) \ - | (OPT_ZONE_HIGHMEM << ___GFP_HIGHMEM * ZONES_SHIFT) \ - | (OPT_ZONE_DMA32 << ___GFP_DMA32 * ZONES_SHIFT) \ - | (ZONE_NORMAL << ___GFP_MOVABLE * ZONES_SHIFT) \ - | (OPT_ZONE_DMA << (___GFP_MOVABLE | ___GFP_DMA) * ZONES_SHIFT) \ - | (ZONE_MOVABLE << (___GFP_MOVABLE | ___GFP_HIGHMEM) * ZONES_SHIFT) \ - | (OPT_ZONE_DMA32 << (___GFP_MOVABLE | ___GFP_DMA32) * ZONES_SHIFT) \ + (ZONE_NORMAL << 0 * GFP_ZONES_SHIFT) \ + | (OPT_ZONE_DMA << ___GFP_DMA * GFP_ZONES_SHIFT) \ + | (OPT_ZONE_HIGHMEM << ___GFP_HIGHMEM * GFP_ZONES_SHIFT) \ + | (OPT_ZONE_DMA32 << ___GFP_DMA32 * GFP_ZONES_SHIFT) \ + | (ZONE_NORMAL << ___GFP_MOVABLE * GFP_ZONES_SHIFT) \ + | (OPT_ZONE_DMA << (___GFP_MOVABLE | ___GFP_DMA) * GFP_ZONES_SHIFT) \ + | (ZONE_MOVABLE << (___GFP_MOVABLE | ___GFP_HIGHMEM) * GFP_ZONES_SHIFT)\ + | (OPT_ZONE_DMA32 << (___GFP_MOVABLE | ___GFP_DMA32) * GFP_ZONES_SHIFT)\ ) /* @@ -369,8 +378,8 @@ static inline enum zone_type gfp_zone(gfp_t flags) enum zone_type z; int bit = (__force int) (flags & GFP_ZONEMASK); - z = (GFP_ZONE_TABLE >> (bit * ZONES_SHIFT)) & - ((1 << ZONES_SHIFT) - 1); + z = (GFP_ZONE_TABLE >> (bit * GFP_ZONES_SHIFT)) & + ((1 << GFP_ZONES_SHIFT) - 1); VM_BUG_ON((GFP_ZONE_BAD >> bit) & 1); return z; } @@ -515,13 +524,7 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp); void drain_all_pages(struct zone *zone); void drain_local_pages(struct zone *zone); -#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT void page_alloc_init_late(void); -#else -static inline void page_alloc_init_late(void) -{ -} -#endif /* * gfp_allowed_mask is set to GFP_BOOT_MASK during early boot to restrict what diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h index 82fda487453f..bee976f82788 100644 --- a/include/linux/gpio/driver.h +++ b/include/linux/gpio/driver.h @@ -1,6 +1,7 @@ #ifndef __LINUX_GPIO_DRIVER_H #define __LINUX_GPIO_DRIVER_H +#include <linux/device.h> #include <linux/types.h> #include <linux/module.h> #include <linux/irq.h> @@ -10,22 +11,21 @@ #include <linux/pinctrl/pinctrl.h> #include <linux/kconfig.h> -struct device; struct gpio_desc; struct of_phandle_args; struct device_node; struct seq_file; +struct gpio_device; #ifdef CONFIG_GPIOLIB /** * struct gpio_chip - abstract a GPIO controller - * @label: for diagnostics + * @label: a functional name for the GPIO device, such as a part + * number or the name of the SoC IP-block implementing it. + * @gpiodev: the internal state holder, opaque struct * @parent: optional parent device providing the GPIOs - * @cdev: class device used by sysfs interface (may be NULL) * @owner: helps prevent removal of modules exporting active GPIOs - * @data: per-instance data assigned by the driver - * @list: links gpio_chips together for traversal * @request: optional hook for chip-specific activation, such as * enabling module power and clock; may sleep * @free: optional hook for chip-specific deactivation, such as @@ -52,7 +52,6 @@ struct seq_file; * get rid of the static GPIO number space in the long run. * @ngpio: the number of GPIOs handled by this controller; the last GPIO * handled is (base + ngpio - 1). - * @desc: array of ngpio descriptors. Private. * @names: if set, must be an array of strings to use as alternative * names for the GPIOs in this chip. Any entry in the array * may be NULL if there is no alias for the GPIO, however the @@ -107,11 +106,9 @@ struct seq_file; */ struct gpio_chip { const char *label; + struct gpio_device *gpiodev; struct device *parent; - struct device *cdev; struct module *owner; - void *data; - struct list_head list; int (*request)(struct gpio_chip *chip, unsigned offset); @@ -141,7 +138,6 @@ struct gpio_chip { struct gpio_chip *chip); int base; u16 ngpio; - struct gpio_desc *desc; const char *const *names; bool can_sleep; bool irq_not_threaded; @@ -184,15 +180,6 @@ struct gpio_chip { int (*of_xlate)(struct gpio_chip *gc, const struct of_phandle_args *gpiospec, u32 *flags); #endif -#ifdef CONFIG_PINCTRL - /* - * If CONFIG_PINCTRL is enabled, then gpio controllers can optionally - * describe the actual pin range which they serve in an SoC. This - * information would be used by pinctrl subsystem to configure - * corresponding pins for gpio usage. - */ - struct list_head pin_ranges; -#endif }; extern const char *gpiochip_is_requested(struct gpio_chip *chip, @@ -205,18 +192,24 @@ static inline int gpiochip_add(struct gpio_chip *chip) return gpiochip_add_data(chip, NULL); } extern void gpiochip_remove(struct gpio_chip *chip); +extern int devm_gpiochip_add_data(struct device *dev, struct gpio_chip *chip, + void *data); +extern void devm_gpiochip_remove(struct device *dev, struct gpio_chip *chip); + extern struct gpio_chip *gpiochip_find(void *data, int (*match)(struct gpio_chip *chip, void *data)); /* lock/unlock as IRQ */ int gpiochip_lock_as_irq(struct gpio_chip *chip, unsigned int offset); void gpiochip_unlock_as_irq(struct gpio_chip *chip, unsigned int offset); +bool gpiochip_line_is_irq(struct gpio_chip *chip, unsigned int offset); + +/* Line status inquiry for drivers */ +bool gpiochip_line_is_open_drain(struct gpio_chip *chip, unsigned int offset); +bool gpiochip_line_is_open_source(struct gpio_chip *chip, unsigned int offset); /* get driver data */ -static inline void *gpiochip_get_data(struct gpio_chip *chip) -{ - return chip->data; -} +void *gpiochip_get_data(struct gpio_chip *chip); struct gpio_chip *gpiod_to_chip(const struct gpio_desc *desc); diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index 2ead22dd74a0..c98c6539e2c2 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h @@ -220,7 +220,7 @@ static inline void hrtimer_set_expires_range(struct hrtimer *timer, ktime_t time timer->node.expires = ktime_add_safe(time, delta); } -static inline void hrtimer_set_expires_range_ns(struct hrtimer *timer, ktime_t time, unsigned long delta) +static inline void hrtimer_set_expires_range_ns(struct hrtimer *timer, ktime_t time, u64 delta) { timer->_softexpires = time; timer->node.expires = ktime_add_safe(time, ns_to_ktime(delta)); @@ -378,7 +378,7 @@ static inline void destroy_hrtimer_on_stack(struct hrtimer *timer) { } /* Basic timer operations: */ extern void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, - unsigned long range_ns, const enum hrtimer_mode mode); + u64 range_ns, const enum hrtimer_mode mode); /** * hrtimer_start - (re)start an hrtimer on the current CPU @@ -399,7 +399,7 @@ extern int hrtimer_try_to_cancel(struct hrtimer *timer); static inline void hrtimer_start_expires(struct hrtimer *timer, enum hrtimer_mode mode) { - unsigned long delta; + u64 delta; ktime_t soft, hard; soft = hrtimer_get_softexpires(timer); hard = hrtimer_get_expires(timer); @@ -477,10 +477,12 @@ extern long hrtimer_nanosleep_restart(struct restart_block *restart_block); extern void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *tsk); -extern int schedule_hrtimeout_range(ktime_t *expires, unsigned long delta, +extern int schedule_hrtimeout_range(ktime_t *expires, u64 delta, const enum hrtimer_mode mode); extern int schedule_hrtimeout_range_clock(ktime_t *expires, - unsigned long delta, const enum hrtimer_mode mode, int clock); + u64 delta, + const enum hrtimer_mode mode, + int clock); extern int schedule_hrtimeout(ktime_t *expires, const enum hrtimer_mode mode); /* Soft interrupt function to run the hrtimer queues: */ diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index f12513a20a06..79b0ef6aaa14 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -41,7 +41,8 @@ int vmf_insert_pfn_pmd(struct vm_area_struct *, unsigned long addr, pmd_t *, enum transparent_hugepage_flag { TRANSPARENT_HUGEPAGE_FLAG, TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, - TRANSPARENT_HUGEPAGE_DEFRAG_FLAG, + TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, + TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG, TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG, @@ -71,12 +72,6 @@ extern bool is_vma_temporary_stack(struct vm_area_struct *vma); ((__vma)->vm_flags & VM_HUGEPAGE))) && \ !((__vma)->vm_flags & VM_NOHUGEPAGE) && \ !is_vma_temporary_stack(__vma)) -#define transparent_hugepage_defrag(__vma) \ - ((transparent_hugepage_flags & \ - (1<<TRANSPARENT_HUGEPAGE_DEFRAG_FLAG)) || \ - (transparent_hugepage_flags & \ - (1<<TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG) && \ - (__vma)->vm_flags & VM_HUGEPAGE)) #define transparent_hugepage_use_zero_page() \ (transparent_hugepage_flags & \ (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG)) @@ -101,16 +96,21 @@ static inline int split_huge_page(struct page *page) void deferred_split_huge_page(struct page *page); void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, - unsigned long address); + unsigned long address, bool freeze); #define split_huge_pmd(__vma, __pmd, __address) \ do { \ pmd_t *____pmd = (__pmd); \ if (pmd_trans_huge(*____pmd) \ || pmd_devmap(*____pmd)) \ - __split_huge_pmd(__vma, __pmd, __address); \ + __split_huge_pmd(__vma, __pmd, __address, \ + false); \ } while (0) + +void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address, + bool freeze, struct page *page); + extern int hugepage_madvise(struct vm_area_struct *vma, unsigned long *vm_flags, int advice); extern void vma_adjust_trans_huge(struct vm_area_struct *vma, @@ -175,6 +175,10 @@ static inline int split_huge_page(struct page *page) static inline void deferred_split_huge_page(struct page *page) {} #define split_huge_pmd(__vma, __pmd, __address) \ do { } while (0) + +static inline void split_huge_pmd_address(struct vm_area_struct *vma, + unsigned long address, bool freeze, struct page *page) {} + static inline int hugepage_madvise(struct vm_area_struct *vma, unsigned long *vm_flags, int advice) { diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index 753dbad0bf94..aa0fadce9308 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h @@ -235,6 +235,7 @@ struct vmbus_channel_offer { #define VMBUS_CHANNEL_LOOPBACK_OFFER 0x100 #define VMBUS_CHANNEL_PARENT_OFFER 0x200 #define VMBUS_CHANNEL_REQUEST_MONITORED_NOTIFICATION 0x400 +#define VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER 0x2000 struct vmpacket_descriptor { u16 type; @@ -391,6 +392,10 @@ enum vmbus_channel_message_type { CHANNELMSG_VERSION_RESPONSE = 15, CHANNELMSG_UNLOAD = 16, CHANNELMSG_UNLOAD_RESPONSE = 17, + CHANNELMSG_18 = 18, + CHANNELMSG_19 = 19, + CHANNELMSG_20 = 20, + CHANNELMSG_TL_CONNECT_REQUEST = 21, CHANNELMSG_COUNT }; @@ -561,6 +566,13 @@ struct vmbus_channel_initiate_contact { u64 monitor_page2; } __packed; +/* Hyper-V socket: guest's connect()-ing to host */ +struct vmbus_channel_tl_connect_request { + struct vmbus_channel_message_header header; + uuid_le guest_endpoint_id; + uuid_le host_service_id; +} __packed; + struct vmbus_channel_version_response { struct vmbus_channel_message_header header; u8 version_supported; @@ -633,6 +645,32 @@ enum hv_signal_policy { HV_SIGNAL_POLICY_EXPLICIT, }; +enum vmbus_device_type { + HV_IDE = 0, + HV_SCSI, + HV_FC, + HV_NIC, + HV_ND, + HV_PCIE, + HV_FB, + HV_KBD, + HV_MOUSE, + HV_KVP, + HV_TS, + HV_HB, + HV_SHUTDOWN, + HV_FCOPY, + HV_BACKUP, + HV_DM, + HV_UNKOWN, +}; + +struct vmbus_device { + u16 dev_type; + uuid_le guid; + bool perf_device; +}; + struct vmbus_channel { /* Unique channel id */ int id; @@ -728,6 +766,12 @@ struct vmbus_channel { void (*sc_creation_callback)(struct vmbus_channel *new_sc); /* + * Channel rescind callback. Some channels (the hvsock ones), need to + * register a callback which is invoked in vmbus_onoffer_rescind(). + */ + void (*chn_rescind_callback)(struct vmbus_channel *channel); + + /* * The spinlock to protect the structure. It is being used to protect * test-and-set access to various attributes of the structure as well * as all sc_list operations. @@ -767,8 +811,30 @@ struct vmbus_channel { * signaling control. */ enum hv_signal_policy signal_policy; + /* + * On the channel send side, many of the VMBUS + * device drivers explicity serialize access to the + * outgoing ring buffer. Give more control to the + * VMBUS device drivers in terms how to serialize + * accesss to the outgoing ring buffer. + * The default behavior will be to aquire the + * ring lock to preserve the current behavior. + */ + bool acquire_ring_lock; + }; +static inline void set_channel_lock_state(struct vmbus_channel *c, bool state) +{ + c->acquire_ring_lock = state; +} + +static inline bool is_hvsock_channel(const struct vmbus_channel *c) +{ + return !!(c->offermsg.offer.chn_flags & + VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER); +} + static inline void set_channel_signal_state(struct vmbus_channel *c, enum hv_signal_policy policy) { @@ -790,6 +856,12 @@ static inline void *get_per_channel_state(struct vmbus_channel *c) return c->per_channel_state; } +static inline void set_channel_pending_send_size(struct vmbus_channel *c, + u32 size) +{ + c->outbound.ring_buffer->pending_send_sz = size; +} + void vmbus_onmessage(void *context); int vmbus_request_offers(void); @@ -801,6 +873,9 @@ int vmbus_request_offers(void); void vmbus_set_sc_create_callback(struct vmbus_channel *primary_channel, void (*sc_cr_cb)(struct vmbus_channel *new_sc)); +void vmbus_set_chn_rescind_callback(struct vmbus_channel *channel, + void (*chn_rescind_cb)(struct vmbus_channel *)); + /* * Retrieve the (sub) channel on which to send an outgoing request. * When a primary channel has multiple sub-channels, we choose a @@ -940,6 +1015,20 @@ extern void vmbus_ontimer(unsigned long data); struct hv_driver { const char *name; + /* + * A hvsock offer, which has a VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER + * channel flag, actually doesn't mean a synthetic device because the + * offer's if_type/if_instance can change for every new hvsock + * connection. + * + * However, to facilitate the notification of new-offer/rescind-offer + * from vmbus driver to hvsock driver, we can handle hvsock offer as + * a special vmbus device, and hence we need the below flag to + * indicate if the driver is the hvsock driver or not: we need to + * specially treat the hvosck offer & driver in vmbus_match(). + */ + bool hvsock; + /* the device type supported by this driver */ uuid_le dev_type; const struct hv_vmbus_device_id *id_table; @@ -959,6 +1048,8 @@ struct hv_device { /* the device instance id of this device */ uuid_le dev_instance; + u16 vendor_id; + u16 device_id; struct device device; @@ -994,6 +1085,8 @@ int __must_check __vmbus_driver_register(struct hv_driver *hv_driver, const char *mod_name); void vmbus_driver_unregister(struct hv_driver *hv_driver); +void vmbus_hvsock_device_unregister(struct vmbus_channel *channel); + int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj, resource_size_t min, resource_size_t max, resource_size_t size, resource_size_t align, @@ -1158,6 +1251,7 @@ u64 hv_do_hypercall(u64 control, void *input, void *output); struct hv_util_service { u8 *recv_buffer; + void *channel; void (*util_cb)(void *); int (*util_init)(struct hv_util_service *); void (*util_deinit)(void); @@ -1242,4 +1336,6 @@ void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid); extern __u32 vmbus_proto_version; +int vmbus_send_tl_connect_request(const uuid_le *shv_guest_servie_id, + const uuid_le *shv_host_servie_id); #endif /* _HYPERV_H */ diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index 452c0b0d2f32..3b1f6cef9513 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -163,6 +163,14 @@ static inline u16 ieee80211_sn_sub(u16 sn1, u16 sn2) /* 30 byte 4 addr hdr, 2 byte QoS, 2304 byte MSDU, 12 byte crypt, 4 byte FCS */ #define IEEE80211_MAX_FRAME_LEN 2352 +/* Maximal size of an A-MSDU */ +#define IEEE80211_MAX_MPDU_LEN_HT_3839 3839 +#define IEEE80211_MAX_MPDU_LEN_HT_7935 7935 + +#define IEEE80211_MAX_MPDU_LEN_VHT_3895 3895 +#define IEEE80211_MAX_MPDU_LEN_VHT_7991 7991 +#define IEEE80211_MAX_MPDU_LEN_VHT_11454 11454 + #define IEEE80211_MAX_SSID_LEN 32 #define IEEE80211_MAX_MESH_ID_LEN 32 @@ -843,6 +851,8 @@ enum ieee80211_vht_opmode_bits { }; #define WLAN_SA_QUERY_TR_ID_LEN 2 +#define WLAN_MEMBERSHIP_LEN 8 +#define WLAN_USER_POSITION_LEN 16 /** * struct ieee80211_tpc_report_ie @@ -991,6 +1001,11 @@ struct ieee80211_mgmt { } __packed vht_opmode_notif; struct { u8 action_code; + u8 membership[WLAN_MEMBERSHIP_LEN]; + u8 position[WLAN_USER_POSITION_LEN]; + } __packed vht_group_notif; + struct { + u8 action_code; u8 dialog_token; u8 tpc_elem_id; u8 tpc_elem_length; @@ -1498,6 +1513,7 @@ struct ieee80211_vht_operation { #define IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895 0x00000000 #define IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991 0x00000001 #define IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 0x00000002 +#define IEEE80211_VHT_CAP_MAX_MPDU_MASK 0x00000003 #define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ 0x00000004 #define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ 0x00000008 #define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK 0x0000000C @@ -2079,6 +2095,16 @@ enum ieee80211_tdls_actioncode { #define WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED BIT(5) #define WLAN_EXT_CAPA8_OPMODE_NOTIF BIT(6) +/* Defines the maximal number of MSDUs in an A-MSDU. */ +#define WLAN_EXT_CAPA8_MAX_MSDU_IN_AMSDU_LSB BIT(7) +#define WLAN_EXT_CAPA9_MAX_MSDU_IN_AMSDU_MSB BIT(0) + +/* + * Fine Timing Measurement Initiator - bit 71 of @WLAN_EID_EXT_CAPABILITY + * information element + */ +#define WLAN_EXT_CAPA9_FTM_INITIATOR BIT(7) + /* TDLS specific payload type in the LLC/SNAP header */ #define WLAN_TDLS_SNAP_RFTYPE 0x2 diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h index a338a688ee4a..dcb89e3515db 100644 --- a/include/linux/if_bridge.h +++ b/include/linux/if_bridge.h @@ -46,10 +46,6 @@ struct br_ip_list { #define BR_LEARNING_SYNC BIT(9) #define BR_PROXYARP_WIFI BIT(10) -/* values as per ieee8021QBridgeFdbAgingTime */ -#define BR_MIN_AGEING_TIME (10 * HZ) -#define BR_MAX_AGEING_TIME (1000000 * HZ) - #define BR_DEFAULT_AGEING_TIME (300 * HZ) extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *)); diff --git a/include/linux/if_team.h b/include/linux/if_team.h index b84e49c3a738..174f43f43aff 100644 --- a/include/linux/if_team.h +++ b/include/linux/if_team.h @@ -24,6 +24,7 @@ struct team_pcpu_stats { struct u64_stats_sync syncp; u32 rx_dropped; u32 tx_dropped; + u32 rx_nohandler; }; struct team; diff --git a/include/linux/igmp.h b/include/linux/igmp.h index 9c9de11549a7..12f6fba6d21a 100644 --- a/include/linux/igmp.h +++ b/include/linux/igmp.h @@ -37,11 +37,6 @@ static inline struct igmpv3_query * return (struct igmpv3_query *)skb_transport_header(skb); } -extern int sysctl_igmp_llm_reports; -extern int sysctl_igmp_max_memberships; -extern int sysctl_igmp_max_msf; -extern int sysctl_igmp_qrv; - struct ip_sf_socklist { unsigned int sl_max; unsigned int sl_count; diff --git a/include/linux/iio/common/st_sensors.h b/include/linux/iio/common/st_sensors.h index 2fe939c73cd2..6670c3d25c58 100644 --- a/include/linux/iio/common/st_sensors.h +++ b/include/linux/iio/common/st_sensors.h @@ -119,6 +119,8 @@ struct st_sensor_bdu { * @addr: address of the register. * @mask_int1: mask to enable/disable IRQ on INT1 pin. * @mask_int2: mask to enable/disable IRQ on INT2 pin. + * @addr_ihl: address to enable/disable active low on the INT lines. + * @mask_ihl: mask to enable/disable active low on the INT lines. * struct ig1 - represents the Interrupt Generator 1 of sensors. * @en_addr: address of the enable ig1 register. * @en_mask: mask to write the on/off value for enable. @@ -127,6 +129,8 @@ struct st_sensor_data_ready_irq { u8 addr; u8 mask_int1; u8 mask_int2; + u8 addr_ihl; + u8 mask_ihl; struct { u8 en_addr; u8 en_mask; diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h index b5894118755f..b2b16772c651 100644 --- a/include/linux/iio/iio.h +++ b/include/linux/iio/iio.h @@ -180,18 +180,18 @@ struct iio_event_spec { * @address: Driver specific identifier. * @scan_index: Monotonic index to give ordering in scans when read * from a buffer. - * @scan_type: Sign: 's' or 'u' to specify signed or unsigned + * @scan_type: sign: 's' or 'u' to specify signed or unsigned * realbits: Number of valid bits of data - * storage_bits: Realbits + padding + * storagebits: Realbits + padding * shift: Shift right by this before masking out * realbits. - * endianness: little or big endian * repeat: Number of times real/storage bits * repeats. When the repeat element is * more than 1, then the type element in * sysfs will show a repeat value. * Otherwise, the number of repetitions is * omitted. + * endianness: little or big endian * @info_mask_separate: What information is to be exported that is specific to * this channel. * @info_mask_shared_by_type: What information is to be exported that is shared @@ -448,7 +448,7 @@ struct iio_buffer_setup_ops { * @buffer: [DRIVER] any buffer present * @buffer_list: [INTERN] list of all buffers currently attached * @scan_bytes: [INTERN] num bytes captured to be fed to buffer demux - * @mlock: [INTERN] lock used to prevent simultaneous device state + * @mlock: [DRIVER] lock used to prevent simultaneous device state * changes * @available_scan_masks: [DRIVER] optional array of allowed bitmasks * @masklength: [INTERN] the length of the mask established from diff --git a/include/linux/ima.h b/include/linux/ima.h index 120ccc53fcb7..e6516cbbe9bf 100644 --- a/include/linux/ima.h +++ b/include/linux/ima.h @@ -18,8 +18,9 @@ extern int ima_bprm_check(struct linux_binprm *bprm); extern int ima_file_check(struct file *file, int mask, int opened); extern void ima_file_free(struct file *file); extern int ima_file_mmap(struct file *file, unsigned long prot); -extern int ima_module_check(struct file *file); -extern int ima_fw_from_file(struct file *file, char *buf, size_t size); +extern int ima_read_file(struct file *file, enum kernel_read_file_id id); +extern int ima_post_read_file(struct file *file, void *buf, loff_t size, + enum kernel_read_file_id id); #else static inline int ima_bprm_check(struct linux_binprm *bprm) @@ -42,12 +43,13 @@ static inline int ima_file_mmap(struct file *file, unsigned long prot) return 0; } -static inline int ima_module_check(struct file *file) +static inline int ima_read_file(struct file *file, enum kernel_read_file_id id) { return 0; } -static inline int ima_fw_from_file(struct file *file, char *buf, size_t size) +static inline int ima_post_read_file(struct file *file, void *buf, loff_t size, + enum kernel_read_file_id id) { return 0; } diff --git a/include/linux/inet_lro.h b/include/linux/inet_lro.h deleted file mode 100644 index 9a715cfa1fe3..000000000000 --- a/include/linux/inet_lro.h +++ /dev/null @@ -1,142 +0,0 @@ -/* - * linux/include/linux/inet_lro.h - * - * Large Receive Offload (ipv4 / tcp) - * - * (C) Copyright IBM Corp. 2007 - * - * Authors: - * Jan-Bernd Themann <themann@de.ibm.com> - * Christoph Raisch <raisch@de.ibm.com> - * - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - */ - -#ifndef __INET_LRO_H_ -#define __INET_LRO_H_ - -#include <net/ip.h> -#include <net/tcp.h> - -/* - * LRO statistics - */ - -struct net_lro_stats { - unsigned long aggregated; - unsigned long flushed; - unsigned long no_desc; -}; - -/* - * LRO descriptor for a tcp session - */ -struct net_lro_desc { - struct sk_buff *parent; - struct sk_buff *last_skb; - struct skb_frag_struct *next_frag; - struct iphdr *iph; - struct tcphdr *tcph; - __wsum data_csum; - __be32 tcp_rcv_tsecr; - __be32 tcp_rcv_tsval; - __be32 tcp_ack; - u32 tcp_next_seq; - u32 skb_tot_frags_len; - u16 ip_tot_len; - u16 tcp_saw_tstamp; /* timestamps enabled */ - __be16 tcp_window; - int pkt_aggr_cnt; /* counts aggregated packets */ - int vlan_packet; - int mss; - int active; -}; - -/* - * Large Receive Offload (LRO) Manager - * - * Fields must be set by driver - */ - -struct net_lro_mgr { - struct net_device *dev; - struct net_lro_stats stats; - - /* LRO features */ - unsigned long features; -#define LRO_F_NAPI 1 /* Pass packets to stack via NAPI */ -#define LRO_F_EXTRACT_VLAN_ID 2 /* Set flag if VLAN IDs are extracted - from received packets and eth protocol - is still ETH_P_8021Q */ - - /* - * Set for generated SKBs that are not added to - * the frag list in fragmented mode - */ - u32 ip_summed; - u32 ip_summed_aggr; /* Set in aggregated SKBs: CHECKSUM_UNNECESSARY - * or CHECKSUM_NONE */ - - int max_desc; /* Max number of LRO descriptors */ - int max_aggr; /* Max number of LRO packets to be aggregated */ - - int frag_align_pad; /* Padding required to properly align layer 3 - * headers in generated skb when using frags */ - - struct net_lro_desc *lro_arr; /* Array of LRO descriptors */ - - /* - * Optimized driver functions - * - * get_skb_header: returns tcp and ip header for packet in SKB - */ - int (*get_skb_header)(struct sk_buff *skb, void **ip_hdr, - void **tcpudp_hdr, u64 *hdr_flags, void *priv); - - /* hdr_flags: */ -#define LRO_IPV4 1 /* ip_hdr is IPv4 header */ -#define LRO_TCP 2 /* tcpudp_hdr is TCP header */ - - /* - * get_frag_header: returns mac, tcp and ip header for packet in SKB - * - * @hdr_flags: Indicate what kind of LRO has to be done - * (IPv4/IPv6/TCP/UDP) - */ - int (*get_frag_header)(struct skb_frag_struct *frag, void **mac_hdr, - void **ip_hdr, void **tcpudp_hdr, u64 *hdr_flags, - void *priv); -}; - -/* - * Processes a SKB - * - * @lro_mgr: LRO manager to use - * @skb: SKB to aggregate - * @priv: Private data that may be used by driver functions - * (for example get_tcp_ip_hdr) - */ - -void lro_receive_skb(struct net_lro_mgr *lro_mgr, - struct sk_buff *skb, - void *priv); -/* - * Forward all aggregated SKBs held by lro_mgr to network stack - */ - -void lro_flush_all(struct net_lro_mgr *lro_mgr); - -#endif diff --git a/include/linux/init.h b/include/linux/init.h index b449f378f995..aedb254abc37 100644 --- a/include/linux/init.h +++ b/include/linux/init.h @@ -142,6 +142,10 @@ void prepare_namespace(void); void __init load_default_modules(void); int __init init_rootfs(void); +#ifdef CONFIG_DEBUG_RODATA +void mark_rodata_ro(void); +#endif + extern void (*late_time_init)(void); extern bool initcall_debug; diff --git a/include/linux/input/cyttsp.h b/include/linux/input/cyttsp.h index 5af7c66f1fca..586c8c95dcb0 100644 --- a/include/linux/input/cyttsp.h +++ b/include/linux/input/cyttsp.h @@ -40,19 +40,4 @@ /* Active distance in pixels for a gesture to be reported */ #define CY_ACT_DIST_DFLT 0xF8 /* pixels */ -struct cyttsp_platform_data { - u32 maxx; - u32 maxy; - bool use_hndshk; - u8 act_dist; /* Active distance */ - u8 act_intrvl; /* Active refresh interval; ms */ - u8 tch_tmout; /* Active touch timeout; ms */ - u8 lp_intrvl; /* Low power refresh interval; ms */ - int (*init)(void); - void (*exit)(void); - char *name; - s16 irq_gpio; - u8 *bl_keys; -}; - #endif /* _CYTTSP_H_ */ diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index 821273ca4873..2d9b650047a5 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h @@ -235,6 +235,9 @@ static inline void dmar_writeq(void __iomem *addr, u64 val) /* low 64 bit */ #define dma_frcd_page_addr(d) (d & (((u64)-1) << PAGE_SHIFT)) +/* PRS_REG */ +#define DMA_PRS_PPR ((u32)1) + #define IOMMU_WAIT_OP(iommu, offset, op, cond, sts) \ do { \ cycles_t start_time = get_cycles(); \ diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 0e95fcc75b2a..358076eda364 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -125,6 +125,16 @@ struct irqaction { extern irqreturn_t no_action(int cpl, void *dev_id); +/* + * If a (PCI) device interrupt is not connected we set dev->irq to + * IRQ_NOTCONNECTED. This causes request_irq() to fail with -ENOTCONN, so we + * can distingiush that case from other error returns. + * + * 0x80000000 is guaranteed to be outside the available range of interrupts + * and easy to distinguish from other possible incorrect values. + */ +#define IRQ_NOTCONNECTED (1U << 31) + extern int __must_check request_threaded_irq(unsigned int irq, irq_handler_t handler, irq_handler_t thread_fn, diff --git a/include/linux/ioport.h b/include/linux/ioport.h index 24bea087e7af..0b65543dc6cf 100644 --- a/include/linux/ioport.h +++ b/include/linux/ioport.h @@ -20,6 +20,7 @@ struct resource { resource_size_t end; const char *name; unsigned long flags; + unsigned long desc; struct resource *parent, *sibling, *child; }; @@ -49,12 +50,19 @@ struct resource { #define IORESOURCE_WINDOW 0x00200000 /* forwarded by bridge */ #define IORESOURCE_MUXED 0x00400000 /* Resource is software muxed */ +#define IORESOURCE_EXT_TYPE_BITS 0x01000000 /* Resource extended types */ +#define IORESOURCE_SYSRAM 0x01000000 /* System RAM (modifier) */ + #define IORESOURCE_EXCLUSIVE 0x08000000 /* Userland may not map this resource */ + #define IORESOURCE_DISABLED 0x10000000 #define IORESOURCE_UNSET 0x20000000 /* No address assigned yet */ #define IORESOURCE_AUTO 0x40000000 #define IORESOURCE_BUSY 0x80000000 /* Driver has marked this resource busy */ +/* I/O resource extended types */ +#define IORESOURCE_SYSTEM_RAM (IORESOURCE_MEM|IORESOURCE_SYSRAM) + /* PnP IRQ specific bits (IORESOURCE_BITS) */ #define IORESOURCE_IRQ_HIGHEDGE (1<<0) #define IORESOURCE_IRQ_LOWEDGE (1<<1) @@ -98,13 +106,27 @@ struct resource { /* PCI ROM control bits (IORESOURCE_BITS) */ #define IORESOURCE_ROM_ENABLE (1<<0) /* ROM is enabled, same as PCI_ROM_ADDRESS_ENABLE */ -#define IORESOURCE_ROM_SHADOW (1<<1) /* ROM is copy at C000:0 */ -#define IORESOURCE_ROM_COPY (1<<2) /* ROM is alloc'd copy, resource field overlaid */ -#define IORESOURCE_ROM_BIOS_COPY (1<<3) /* ROM is BIOS copy, resource field overlaid */ +#define IORESOURCE_ROM_SHADOW (1<<1) /* Use RAM image, not ROM BAR */ /* PCI control bits. Shares IORESOURCE_BITS with above PCI ROM. */ #define IORESOURCE_PCI_FIXED (1<<4) /* Do not move resource */ +/* + * I/O Resource Descriptors + * + * Descriptors are used by walk_iomem_res_desc() and region_intersects() + * for searching a specific resource range in the iomem table. Assign + * a new descriptor when a resource range supports the search interfaces. + * Otherwise, resource.desc must be set to IORES_DESC_NONE (0). + */ +enum { + IORES_DESC_NONE = 0, + IORES_DESC_CRASH_KERNEL = 1, + IORES_DESC_ACPI_TABLES = 2, + IORES_DESC_ACPI_NV_STORAGE = 3, + IORES_DESC_PERSISTENT_MEMORY = 4, + IORES_DESC_PERSISTENT_MEMORY_LEGACY = 5, +}; /* helpers to define resources */ #define DEFINE_RES_NAMED(_start, _size, _name, _flags) \ @@ -113,6 +135,7 @@ struct resource { .end = (_start) + (_size) - 1, \ .name = (_name), \ .flags = (_flags), \ + .desc = IORES_DESC_NONE, \ } #define DEFINE_RES_IO_NAMED(_start, _size, _name) \ @@ -149,6 +172,7 @@ extern void reserve_region_with_split(struct resource *root, extern struct resource *insert_resource_conflict(struct resource *parent, struct resource *new); extern int insert_resource(struct resource *parent, struct resource *new); extern void insert_resource_expand_to_fit(struct resource *root, struct resource *new); +extern int remove_resource(struct resource *old); extern void arch_remove_reservations(struct resource *avail); extern int allocate_resource(struct resource *root, struct resource *new, resource_size_t size, resource_size_t min, @@ -170,6 +194,10 @@ static inline unsigned long resource_type(const struct resource *res) { return res->flags & IORESOURCE_TYPE_BITS; } +static inline unsigned long resource_ext_type(const struct resource *res) +{ + return res->flags & IORESOURCE_EXT_TYPE_BITS; +} /* True iff r1 completely contains r2 */ static inline bool resource_contains(struct resource *r1, struct resource *r2) { @@ -239,8 +267,8 @@ extern int walk_system_ram_res(u64 start, u64 end, void *arg, int (*func)(u64, u64, void *)); extern int -walk_iomem_res(char *name, unsigned long flags, u64 start, u64 end, void *arg, - int (*func)(u64, u64, void *)); +walk_iomem_res_desc(unsigned long desc, unsigned long flags, u64 start, u64 end, + void *arg, int (*func)(u64, u64, void *)); /* True if any part of r1 overlaps r2 */ static inline bool resource_overlaps(struct resource *r1, struct resource *r2) diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index 402753bccafa..7edc14fb66b6 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h @@ -50,16 +50,19 @@ struct ipv6_devconf { __s32 mc_forwarding; #endif __s32 disable_ipv6; + __s32 drop_unicast_in_l2_multicast; __s32 accept_dad; __s32 force_tllao; __s32 ndisc_notify; __s32 suppress_frag_ndisc; __s32 accept_ra_mtu; + __s32 drop_unsolicited_na; struct ipv6_stable_secret { bool initialized; struct in6_addr secret; } stable_secret; __s32 use_oif_addrs_only; + __s32 keep_addr_on_down; void *sysctl; }; diff --git a/include/linux/irq.h b/include/linux/irq.h index 3c1c96786248..c4de62348ff2 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -133,17 +133,23 @@ struct irq_domain; * Use accessor functions to deal with it * @node: node index useful for balancing * @handler_data: per-IRQ data for the irq_chip methods - * @affinity: IRQ affinity on SMP + * @affinity: IRQ affinity on SMP. If this is an IPI + * related irq, then this is the mask of the + * CPUs to which an IPI can be sent. * @msi_desc: MSI descriptor + * @ipi_offset: Offset of first IPI target cpu in @affinity. Optional. */ struct irq_common_data { - unsigned int state_use_accessors; + unsigned int __private state_use_accessors; #ifdef CONFIG_NUMA unsigned int node; #endif void *handler_data; struct msi_desc *msi_desc; cpumask_var_t affinity; +#ifdef CONFIG_GENERIC_IRQ_IPI + unsigned int ipi_offset; +#endif }; /** @@ -208,7 +214,7 @@ enum { IRQD_FORWARDED_TO_VCPU = (1 << 20), }; -#define __irqd_to_state(d) ((d)->common->state_use_accessors) +#define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors) static inline bool irqd_is_setaffinity_pending(struct irq_data *d) { @@ -299,6 +305,8 @@ static inline void irqd_clr_forwarded_to_vcpu(struct irq_data *d) __irqd_to_state(d) &= ~IRQD_FORWARDED_TO_VCPU; } +#undef __irqd_to_state + static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d) { return d->hwirq; @@ -341,6 +349,8 @@ static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d) * @irq_get_irqchip_state: return the internal state of an interrupt * @irq_set_irqchip_state: set the internal state of a interrupt * @irq_set_vcpu_affinity: optional to target a vCPU in a virtual machine + * @ipi_send_single: send a single IPI to destination cpus + * @ipi_send_mask: send an IPI to destination cpus in cpumask * @flags: chip specific flags */ struct irq_chip { @@ -385,6 +395,9 @@ struct irq_chip { int (*irq_set_vcpu_affinity)(struct irq_data *data, void *vcpu_info); + void (*ipi_send_single)(struct irq_data *data, unsigned int cpu); + void (*ipi_send_mask)(struct irq_data *data, const struct cpumask *dest); + unsigned long flags; }; @@ -934,4 +947,12 @@ static inline u32 irq_reg_readl(struct irq_chip_generic *gc, return readl(gc->reg_base + reg_offset); } +/* Contrary to Linux irqs, for hardware irqs the irq number 0 is valid */ +#define INVALID_HWIRQ (~0UL) +irq_hw_number_t ipi_get_hwirq(unsigned int irq, unsigned int cpu); +int __ipi_send_single(struct irq_desc *desc, unsigned int cpu); +int __ipi_send_mask(struct irq_desc *desc, const struct cpumask *dest); +int ipi_send_single(unsigned int virq, unsigned int cpu); +int ipi_send_mask(unsigned int virq, const struct cpumask *dest); + #endif /* _LINUX_IRQ_H */ diff --git a/include/linux/irqchip/mips-gic.h b/include/linux/irqchip/mips-gic.h index ce824db48d64..80f89e4a29ac 100644 --- a/include/linux/irqchip/mips-gic.h +++ b/include/linux/irqchip/mips-gic.h @@ -261,9 +261,6 @@ extern void gic_write_compare(cycle_t cnt); extern void gic_write_cpu_compare(cycle_t cnt, int cpu); extern void gic_start_count(void); extern void gic_stop_count(void); -extern void gic_send_ipi(unsigned int intr); -extern unsigned int plat_ipi_call_int_xlate(unsigned int); -extern unsigned int plat_ipi_resched_int_xlate(unsigned int); extern int gic_get_c0_compare_int(void); extern int gic_get_c0_perfcount_int(void); extern int gic_get_c0_fdc_int(void); diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h index 04579d9fbce4..2aed04396210 100644 --- a/include/linux/irqdomain.h +++ b/include/linux/irqdomain.h @@ -74,6 +74,8 @@ enum irq_domain_bus_token { DOMAIN_BUS_PCI_MSI, DOMAIN_BUS_PLATFORM_MSI, DOMAIN_BUS_NEXUS, + DOMAIN_BUS_IPI, + DOMAIN_BUS_FSL_MC_MSI, }; /** @@ -172,6 +174,12 @@ enum { /* Core calls alloc/free recursive through the domain hierarchy. */ IRQ_DOMAIN_FLAG_AUTO_RECURSIVE = (1 << 1), + /* Irq domain is an IPI domain with virq per cpu */ + IRQ_DOMAIN_FLAG_IPI_PER_CPU = (1 << 2), + + /* Irq domain is an IPI domain with single virq */ + IRQ_DOMAIN_FLAG_IPI_SINGLE = (1 << 3), + /* * Flags starting from IRQ_DOMAIN_FLAG_NONCORE are reserved * for implementation specific purposes and ignored by the @@ -206,6 +214,8 @@ struct irq_domain *irq_domain_add_legacy(struct device_node *of_node, extern struct irq_domain *irq_find_matching_fwnode(struct fwnode_handle *fwnode, enum irq_domain_bus_token bus_token); extern void irq_set_default_host(struct irq_domain *host); +extern int irq_domain_alloc_descs(int virq, unsigned int nr_irqs, + irq_hw_number_t hwirq, int node); static inline struct fwnode_handle *of_node_to_fwnode(struct device_node *node) { @@ -335,6 +345,11 @@ int irq_domain_xlate_onetwocell(struct irq_domain *d, struct device_node *ctrlr, const u32 *intspec, unsigned int intsize, irq_hw_number_t *out_hwirq, unsigned int *out_type); +/* IPI functions */ +unsigned int irq_reserve_ipi(struct irq_domain *domain, + const struct cpumask *dest); +void irq_destroy_ipi(unsigned int irq); + /* V2 interfaces to support hierarchy IRQ domains. */ extern struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain, unsigned int virq); @@ -400,6 +415,22 @@ static inline bool irq_domain_is_hierarchy(struct irq_domain *domain) { return domain->flags & IRQ_DOMAIN_FLAG_HIERARCHY; } + +static inline bool irq_domain_is_ipi(struct irq_domain *domain) +{ + return domain->flags & + (IRQ_DOMAIN_FLAG_IPI_PER_CPU | IRQ_DOMAIN_FLAG_IPI_SINGLE); +} + +static inline bool irq_domain_is_ipi_per_cpu(struct irq_domain *domain) +{ + return domain->flags & IRQ_DOMAIN_FLAG_IPI_PER_CPU; +} + +static inline bool irq_domain_is_ipi_single(struct irq_domain *domain) +{ + return domain->flags & IRQ_DOMAIN_FLAG_IPI_SINGLE; +} #else /* CONFIG_IRQ_DOMAIN_HIERARCHY */ static inline void irq_domain_activate_irq(struct irq_data *data) { } static inline void irq_domain_deactivate_irq(struct irq_data *data) { } @@ -413,6 +444,21 @@ static inline bool irq_domain_is_hierarchy(struct irq_domain *domain) { return false; } + +static inline bool irq_domain_is_ipi(struct irq_domain *domain) +{ + return false; +} + +static inline bool irq_domain_is_ipi_per_cpu(struct irq_domain *domain) +{ + return false; +} + +static inline bool irq_domain_is_ipi_single(struct irq_domain *domain) +{ + return false; +} #endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */ #else /* CONFIG_IRQ_DOMAIN */ diff --git a/include/linux/iscsi_boot_sysfs.h b/include/linux/iscsi_boot_sysfs.h index 2a8b1659bf35..548d55395488 100644 --- a/include/linux/iscsi_boot_sysfs.h +++ b/include/linux/iscsi_boot_sysfs.h @@ -23,6 +23,7 @@ enum iscsi_boot_eth_properties_enum { ISCSI_BOOT_ETH_INDEX, ISCSI_BOOT_ETH_FLAGS, ISCSI_BOOT_ETH_IP_ADDR, + ISCSI_BOOT_ETH_PREFIX_LEN, ISCSI_BOOT_ETH_SUBNET_MASK, ISCSI_BOOT_ETH_ORIGIN, ISCSI_BOOT_ETH_GATEWAY, diff --git a/include/linux/isdn.h b/include/linux/isdn.h index 1e9a0f2a8626..df97c8444f5d 100644 --- a/include/linux/isdn.h +++ b/include/linux/isdn.h @@ -319,6 +319,7 @@ typedef struct modem_info { int online; /* 1 = B-Channel is up, drop data */ /* 2 = B-Channel is up, deliver d.*/ int dialing; /* Dial in progress or ATA */ + int closing; int rcvsched; /* Receive needs schedule */ int isdn_driver; /* Index to isdn-driver */ int isdn_channel; /* Index to isdn-channel */ diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index 65407f6c9120..fd1083c46c61 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -200,7 +200,7 @@ typedef struct journal_block_tag_s __be32 t_blocknr_high; /* most-significant high 32bits. */ } journal_block_tag_t; -/* Tail of descriptor block, for checksumming */ +/* Tail of descriptor or revoke block, for checksumming */ struct jbd2_journal_block_tail { __be32 t_checksum; /* crc32c(uuid+descr_block) */ }; @@ -215,11 +215,6 @@ typedef struct jbd2_journal_revoke_header_s __be32 r_count; /* Count of bytes used in the block */ } jbd2_journal_revoke_header_t; -/* Tail of revoke block, for checksumming */ -struct jbd2_journal_revoke_tail { - __be32 r_checksum; /* crc32c(uuid+revoke_block) */ -}; - /* Definitions for the journal tag flags word: */ #define JBD2_FLAG_ESCAPE 1 /* on-disk block is escaped */ #define JBD2_FLAG_SAME_UUID 2 /* block has same uuid as previous */ @@ -1137,7 +1132,8 @@ static inline void jbd2_unfile_log_bh(struct buffer_head *bh) } /* Log buffer allocation */ -struct buffer_head *jbd2_journal_get_descriptor_buffer(journal_t *journal); +struct buffer_head *jbd2_journal_get_descriptor_buffer(transaction_t *, int); +void jbd2_descriptor_block_csum_set(journal_t *, struct buffer_head *); int jbd2_journal_next_log_block(journal_t *, unsigned long long *); int jbd2_journal_get_log_tail(journal_t *journal, tid_t *tid, unsigned long *block); @@ -1327,10 +1323,8 @@ extern int jbd2_journal_init_revoke_caches(void); extern void jbd2_journal_destroy_revoke(journal_t *); extern int jbd2_journal_revoke (handle_t *, unsigned long long, struct buffer_head *); extern int jbd2_journal_cancel_revoke(handle_t *, struct journal_head *); -extern void jbd2_journal_write_revoke_records(journal_t *journal, - transaction_t *transaction, - struct list_head *log_bufs, - int write_op); +extern void jbd2_journal_write_revoke_records(transaction_t *transaction, + struct list_head *log_bufs); /* Recovery revoke support */ extern int jbd2_journal_set_revoke(journal_t *, unsigned long long, tid_t); diff --git a/include/linux/kasan.h b/include/linux/kasan.h index 4b9f85c963d0..0fdc798e3ff7 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -1,6 +1,7 @@ #ifndef _LINUX_KASAN_H #define _LINUX_KASAN_H +#include <linux/sched.h> #include <linux/types.h> struct kmem_cache; @@ -13,7 +14,6 @@ struct vm_struct; #include <asm/kasan.h> #include <asm/pgtable.h> -#include <linux/sched.h> extern unsigned char kasan_zero_page[PAGE_SIZE]; extern pte_t kasan_zero_pte[PTRS_PER_PTE]; @@ -43,6 +43,8 @@ static inline void kasan_disable_current(void) void kasan_unpoison_shadow(const void *address, size_t size); +void kasan_unpoison_task_stack(struct task_struct *task); + void kasan_alloc_pages(struct page *page, unsigned int order); void kasan_free_pages(struct page *page, unsigned int order); @@ -66,6 +68,8 @@ void kasan_free_shadow(const struct vm_struct *vm); static inline void kasan_unpoison_shadow(const void *address, size_t size) {} +static inline void kasan_unpoison_task_stack(struct task_struct *task) {} + static inline void kasan_enable_current(void) {} static inline void kasan_disable_current(void) {} diff --git a/include/linux/kernel.h b/include/linux/kernel.h index f31638c6e873..b82646ee70eb 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -64,7 +64,7 @@ #define round_down(x, y) ((x) & ~__round_mask(x, y)) #define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f)) -#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) +#define DIV_ROUND_UP __KERNEL_DIV_ROUND_UP #define DIV_ROUND_UP_ULL(ll,d) \ ({ unsigned long long _tmp = (ll)+(d)-1; do_div(_tmp, d); _tmp; }) @@ -357,6 +357,7 @@ int __must_check kstrtou16(const char *s, unsigned int base, u16 *res); int __must_check kstrtos16(const char *s, unsigned int base, s16 *res); int __must_check kstrtou8(const char *s, unsigned int base, u8 *res); int __must_check kstrtos8(const char *s, unsigned int base, s8 *res); +int __must_check kstrtobool(const char *s, bool *res); int __must_check kstrtoull_from_user(const char __user *s, size_t count, unsigned int base, unsigned long long *res); int __must_check kstrtoll_from_user(const char __user *s, size_t count, unsigned int base, long long *res); @@ -368,6 +369,7 @@ int __must_check kstrtou16_from_user(const char __user *s, size_t count, unsigne int __must_check kstrtos16_from_user(const char __user *s, size_t count, unsigned int base, s16 *res); int __must_check kstrtou8_from_user(const char __user *s, size_t count, unsigned int base, u8 *res); int __must_check kstrtos8_from_user(const char __user *s, size_t count, unsigned int base, s8 *res); +int __must_check kstrtobool_from_user(const char __user *s, size_t count, bool *res); static inline int __must_check kstrtou64_from_user(const char __user *s, size_t count, unsigned int base, u64 *res) { diff --git a/include/linux/key.h b/include/linux/key.h index 7321ab8ef949..5f5b1129dc92 100644 --- a/include/linux/key.h +++ b/include/linux/key.h @@ -219,6 +219,7 @@ extern struct key *key_alloc(struct key_type *type, #define KEY_ALLOC_QUOTA_OVERRUN 0x0001 /* add to quota, permit even if overrun */ #define KEY_ALLOC_NOT_IN_QUOTA 0x0002 /* not in quota */ #define KEY_ALLOC_TRUSTED 0x0004 /* Key should be flagged as trusted */ +#define KEY_ALLOC_BUILT_IN 0x0008 /* Key is built into kernel */ extern void key_revoke(struct key *key); extern void key_invalidate(struct key *key); diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 861f690aa791..5276fe0916fc 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -25,6 +25,7 @@ #include <linux/irqflags.h> #include <linux/context_tracking.h> #include <linux/irqbypass.h> +#include <linux/swait.h> #include <asm/signal.h> #include <linux/kvm.h> @@ -218,7 +219,7 @@ struct kvm_vcpu { int fpu_active; int guest_fpu_loaded, guest_xcr0_loaded; unsigned char fpu_counter; - wait_queue_head_t wq; + struct swait_queue_head wq; struct pid *pid; int sigset_active; sigset_t sigset; @@ -782,7 +783,7 @@ static inline bool kvm_arch_has_assigned_device(struct kvm *kvm) } #endif -static inline wait_queue_head_t *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu) +static inline struct swait_queue_head *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu) { #ifdef __KVM_HAVE_ARCH_WQP return vcpu->arch.wqp; diff --git a/include/linux/latencytop.h b/include/linux/latencytop.h index e23121f9d82a..59ccab297ae0 100644 --- a/include/linux/latencytop.h +++ b/include/linux/latencytop.h @@ -37,6 +37,9 @@ account_scheduler_latency(struct task_struct *task, int usecs, int inter) void clear_all_latency_tracing(struct task_struct *p); +extern int sysctl_latencytop(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos); + #else static inline void diff --git a/include/linux/leds.h b/include/linux/leds.h index bc1476fda96e..f203a8f89d30 100644 --- a/include/linux/leds.h +++ b/include/linux/leds.h @@ -39,6 +39,7 @@ struct led_classdev { /* Lower 16 bits reflect status */ #define LED_SUSPENDED (1 << 0) +#define LED_UNREGISTERING (1 << 1) /* Upper 16 bits reflect control information */ #define LED_CORE_SUSPENDRESUME (1 << 16) #define LED_BLINK_ONESHOT (1 << 17) @@ -48,9 +49,12 @@ struct led_classdev { #define LED_BLINK_DISABLE (1 << 21) #define LED_SYSFS_DISABLE (1 << 22) #define LED_DEV_CAP_FLASH (1 << 23) +#define LED_HW_PLUGGABLE (1 << 24) - /* Set LED brightness level */ - /* Must not sleep, use a workqueue if needed */ + /* Set LED brightness level + * Must not sleep. Use brightness_set_blocking for drivers + * that can sleep while setting brightness. + */ void (*brightness_set)(struct led_classdev *led_cdev, enum led_brightness brightness); /* diff --git a/include/linux/libata.h b/include/linux/libata.h index 851821bfd553..2c4ebef79d0c 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -526,6 +526,7 @@ enum ata_lpm_policy { enum ata_lpm_hints { ATA_LPM_EMPTY = (1 << 0), /* port empty/probing */ ATA_LPM_HIPM = (1 << 1), /* may use HIPM */ + ATA_LPM_WAKE_ONLY = (1 << 2), /* only wake up link */ }; /* forward declarations */ @@ -719,7 +720,7 @@ struct ata_device { union { u16 id[ATA_ID_WORDS]; /* IDENTIFY xxx DEVICE data */ u32 gscr[SATA_PMP_GSCR_DWORDS]; /* PMP GSCR block */ - }; + } ____cacheline_aligned; /* DEVSLP Timing Variables from Identify Device Data Log */ u8 devslp_timing[ATA_LOG_DEVSLP_SIZE]; diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h index bed40dff0e86..833867b9ddc2 100644 --- a/include/linux/libnvdimm.h +++ b/include/linux/libnvdimm.h @@ -26,9 +26,8 @@ enum { /* need to set a limit somewhere, but yes, this is likely overkill */ ND_IOCTL_MAX_BUFLEN = SZ_4M, - ND_CMD_MAX_ELEM = 4, + ND_CMD_MAX_ELEM = 5, ND_CMD_MAX_ENVELOPE = 16, - ND_CMD_ARS_STATUS_MAX = SZ_4K, ND_MAX_MAPPINGS = 32, /* region flag indicating to direct-map persistent memory by default */ @@ -49,7 +48,7 @@ struct nvdimm; struct nvdimm_bus_descriptor; typedef int (*ndctl_fn)(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, unsigned int cmd, void *buf, - unsigned int buf_len); + unsigned int buf_len, int *cmd_rc); struct nd_namespace_label; struct nvdimm_drvdata; @@ -72,6 +71,9 @@ struct nvdimm_bus_descriptor { unsigned long dsm_mask; char *provider_name; ndctl_fn ndctl; + int (*flush_probe)(struct nvdimm_bus_descriptor *nd_desc); + int (*clear_to_send)(struct nvdimm_bus_descriptor *nd_desc, + struct nvdimm *nvdimm, unsigned int cmd); }; struct nd_cmd_desc { diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h index d6750111e48e..c3c43184a787 100644 --- a/include/linux/lightnvm.h +++ b/include/linux/lightnvm.h @@ -92,9 +92,9 @@ enum { NVM_ADDRMODE_CHANNEL = 1, /* Plane programming mode for LUN */ - NVM_PLANE_SINGLE = 0, - NVM_PLANE_DOUBLE = 1, - NVM_PLANE_QUAD = 2, + NVM_PLANE_SINGLE = 1, + NVM_PLANE_DOUBLE = 2, + NVM_PLANE_QUAD = 4, /* Status codes */ NVM_RSP_SUCCESS = 0x0, @@ -135,6 +135,10 @@ enum { /* Memory types */ NVM_ID_FMTYPE_SLC = 0, NVM_ID_FMTYPE_MLC = 1, + + /* Device capabilities */ + NVM_ID_DCAP_BBLKMGMT = 0x1, + NVM_UD_DCAP_ECC = 0x2, }; struct nvm_id_lp_mlc { @@ -337,8 +341,8 @@ struct nvm_dev { int lps_per_blk; int *lptbl; - unsigned long total_pages; unsigned long total_blocks; + unsigned long total_secs; int nr_luns; unsigned max_pages_per_blk; diff --git a/include/linux/list.h b/include/linux/list.h index 30cf4200ab40..5356f4d661a7 100644 --- a/include/linux/list.h +++ b/include/linux/list.h @@ -113,17 +113,6 @@ extern void __list_del_entry(struct list_head *entry); extern void list_del(struct list_head *entry); #endif -#ifdef CONFIG_DEBUG_LIST -/* - * See devm_memremap_pages() which wants DEBUG_LIST=y to assert if one - * of the pages it allocates is ever passed to list_add() - */ -extern void list_force_poison(struct list_head *entry); -#else -/* fallback to the less strict LIST_POISON* definitions */ -#define list_force_poison list_del -#endif - /** * list_replace - replace old entry by new one * @old : the element to be replaced diff --git a/include/linux/list_bl.h b/include/linux/list_bl.h index ee7229a6c06a..cb483305e1f5 100644 --- a/include/linux/list_bl.h +++ b/include/linux/list_bl.h @@ -48,7 +48,7 @@ static inline void INIT_HLIST_BL_NODE(struct hlist_bl_node *h) #define hlist_bl_entry(ptr, type, member) container_of(ptr,type,member) -static inline int hlist_bl_unhashed(const struct hlist_bl_node *h) +static inline bool hlist_bl_unhashed(const struct hlist_bl_node *h) { return !h->pprev; } @@ -68,7 +68,7 @@ static inline void hlist_bl_set_first(struct hlist_bl_head *h, h->first = (struct hlist_bl_node *)((unsigned long)n | LIST_BL_LOCKMASK); } -static inline int hlist_bl_empty(const struct hlist_bl_head *h) +static inline bool hlist_bl_empty(const struct hlist_bl_head *h) { return !((unsigned long)READ_ONCE(h->first) & ~LIST_BL_LOCKMASK); } diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h index a8828652f794..bd830d590465 100644 --- a/include/linux/livepatch.h +++ b/include/linux/livepatch.h @@ -134,6 +134,15 @@ int klp_unregister_patch(struct klp_patch *); int klp_enable_patch(struct klp_patch *); int klp_disable_patch(struct klp_patch *); +/* Called from the module loader during module coming/going states */ +int klp_module_coming(struct module *mod); +void klp_module_going(struct module *mod); + +#else /* !CONFIG_LIVEPATCH */ + +static inline int klp_module_coming(struct module *mod) { return 0; } +static inline void klp_module_going(struct module *mod) { } + #endif /* CONFIG_LIVEPATCH */ #endif /* _LINUX_LIVEPATCH_H_ */ diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index c57e424d914b..d026b190c530 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -66,7 +66,7 @@ struct lock_class { /* * class-hash: */ - struct list_head hash_entry; + struct hlist_node hash_entry; /* * global list of all lock-classes: @@ -199,7 +199,7 @@ struct lock_chain { u8 irq_context; u8 depth; u16 base; - struct list_head entry; + struct hlist_node entry; u64 chain_key; }; @@ -261,7 +261,6 @@ struct held_lock { /* * Initialization, self-test and debugging-output methods: */ -extern void lockdep_init(void); extern void lockdep_info(void); extern void lockdep_reset(void); extern void lockdep_reset_lock(struct lockdep_map *lock); @@ -392,7 +391,6 @@ static inline void lockdep_on(void) # define lockdep_set_current_reclaim_state(g) do { } while (0) # define lockdep_clear_current_reclaim_state() do { } while (0) # define lockdep_trace_alloc(g) do { } while (0) -# define lockdep_init() do { } while (0) # define lockdep_info() do { } while (0) # define lockdep_init_map(lock, name, key, sub) \ do { (void)(name); (void)(key); } while (0) diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h index 71969de4058c..cdee11cbcdf1 100644 --- a/include/linux/lsm_hooks.h +++ b/include/linux/lsm_hooks.h @@ -541,25 +541,24 @@ * @inode points to the inode to use as a reference. * The current task must be the one that nominated @inode. * Return 0 if successful. - * @kernel_fw_from_file: - * Load firmware from userspace (not called for built-in firmware). - * @file contains the file structure pointing to the file containing - * the firmware to load. This argument will be NULL if the firmware - * was loaded via the uevent-triggered blob-based interface exposed - * by CONFIG_FW_LOADER_USER_HELPER. - * @buf pointer to buffer containing firmware contents. - * @size length of the firmware contents. - * Return 0 if permission is granted. * @kernel_module_request: * Ability to trigger the kernel to automatically upcall to userspace for * userspace to load a kernel module with the given name. * @kmod_name name of the module requested by the kernel * Return 0 if successful. - * @kernel_module_from_file: - * Load a kernel module from userspace. - * @file contains the file structure pointing to the file containing - * the kernel module to load. If the module is being loaded from a blob, - * this argument will be NULL. + * @kernel_read_file: + * Read a file specified by userspace. + * @file contains the file structure pointing to the file being read + * by the kernel. + * @id kernel read file identifier + * Return 0 if permission is granted. + * @kernel_post_read_file: + * Read a file specified by userspace. + * @file contains the file structure pointing to the file being read + * by the kernel. + * @buf pointer to buffer containing the file contents. + * @size length of the file contents. + * @id kernel read file identifier * Return 0 if permission is granted. * @task_fix_setuid: * Update the module's state after setting one or more of the user @@ -1454,9 +1453,11 @@ union security_list_options { void (*cred_transfer)(struct cred *new, const struct cred *old); int (*kernel_act_as)(struct cred *new, u32 secid); int (*kernel_create_files_as)(struct cred *new, struct inode *inode); - int (*kernel_fw_from_file)(struct file *file, char *buf, size_t size); int (*kernel_module_request)(char *kmod_name); int (*kernel_module_from_file)(struct file *file); + int (*kernel_read_file)(struct file *file, enum kernel_read_file_id id); + int (*kernel_post_read_file)(struct file *file, char *buf, loff_t size, + enum kernel_read_file_id id); int (*task_fix_setuid)(struct cred *new, const struct cred *old, int flags); int (*task_setpgid)(struct task_struct *p, pid_t pgid); @@ -1715,9 +1716,9 @@ struct security_hook_heads { struct list_head cred_transfer; struct list_head kernel_act_as; struct list_head kernel_create_files_as; - struct list_head kernel_fw_from_file; + struct list_head kernel_read_file; + struct list_head kernel_post_read_file; struct list_head kernel_module_request; - struct list_head kernel_module_from_file; struct list_head task_fix_setuid; struct list_head task_setpgid; struct list_head task_getpgid; diff --git a/include/linux/mbcache.h b/include/linux/mbcache.h index 6a392e7a723a..86c9a8b480c5 100644 --- a/include/linux/mbcache.h +++ b/include/linux/mbcache.h @@ -1,55 +1,52 @@ -/* - File: linux/mbcache.h +#ifndef _LINUX_MBCACHE_H +#define _LINUX_MBCACHE_H - (C) 2001 by Andreas Gruenbacher, <a.gruenbacher@computer.org> -*/ -struct mb_cache_entry { - struct list_head e_lru_list; - struct mb_cache *e_cache; - unsigned short e_used; - unsigned short e_queued; - atomic_t e_refcnt; - struct block_device *e_bdev; - sector_t e_block; - struct hlist_bl_node e_block_list; - struct { - struct hlist_bl_node o_list; - unsigned int o_key; - } e_index; - struct hlist_bl_head *e_block_hash_p; - struct hlist_bl_head *e_index_hash_p; -}; +#include <linux/hash.h> +#include <linux/list_bl.h> +#include <linux/list.h> +#include <linux/atomic.h> +#include <linux/fs.h> -struct mb_cache { - struct list_head c_cache_list; - const char *c_name; - atomic_t c_entry_count; - int c_max_entries; - int c_bucket_bits; - struct kmem_cache *c_entry_cache; - struct hlist_bl_head *c_block_hash; - struct hlist_bl_head *c_index_hash; -}; +struct mb_cache; -/* Functions on caches */ +struct mb_cache_entry { + /* List of entries in cache - protected by cache->c_list_lock */ + struct list_head e_list; + /* Hash table list - protected by hash chain bitlock */ + struct hlist_bl_node e_hash_list; + atomic_t e_refcnt; + /* Key in hash - stable during lifetime of the entry */ + u32 e_key; + u32 e_referenced:1; + u32 e_reusable:1; + /* Block number of hashed block - stable during lifetime of the entry */ + sector_t e_block; +}; -struct mb_cache *mb_cache_create(const char *, int); -void mb_cache_shrink(struct block_device *); -void mb_cache_destroy(struct mb_cache *); +struct mb_cache *mb_cache_create(int bucket_bits); +void mb_cache_destroy(struct mb_cache *cache); -/* Functions on cache entries */ +int mb_cache_entry_create(struct mb_cache *cache, gfp_t mask, u32 key, + sector_t block, bool reusable); +void __mb_cache_entry_free(struct mb_cache_entry *entry); +static inline int mb_cache_entry_put(struct mb_cache *cache, + struct mb_cache_entry *entry) +{ + if (!atomic_dec_and_test(&entry->e_refcnt)) + return 0; + __mb_cache_entry_free(entry); + return 1; +} -struct mb_cache_entry *mb_cache_entry_alloc(struct mb_cache *, gfp_t); -int mb_cache_entry_insert(struct mb_cache_entry *, struct block_device *, - sector_t, unsigned int); -void mb_cache_entry_release(struct mb_cache_entry *); -void mb_cache_entry_free(struct mb_cache_entry *); -struct mb_cache_entry *mb_cache_entry_get(struct mb_cache *, - struct block_device *, - sector_t); +void mb_cache_entry_delete_block(struct mb_cache *cache, u32 key, + sector_t block); +struct mb_cache_entry *mb_cache_entry_get(struct mb_cache *cache, u32 key, + sector_t block); struct mb_cache_entry *mb_cache_entry_find_first(struct mb_cache *cache, - struct block_device *, - unsigned int); -struct mb_cache_entry *mb_cache_entry_find_next(struct mb_cache_entry *, - struct block_device *, - unsigned int); + u32 key); +struct mb_cache_entry *mb_cache_entry_find_next(struct mb_cache *cache, + struct mb_cache_entry *entry); +void mb_cache_entry_touch(struct mb_cache *cache, + struct mb_cache_entry *entry); + +#endif /* _LINUX_MBCACHE_H */ diff --git a/include/linux/mbus.h b/include/linux/mbus.h index 1f7bc630d225..ea34a867caa0 100644 --- a/include/linux/mbus.h +++ b/include/linux/mbus.h @@ -69,6 +69,9 @@ static inline const struct mbus_dram_target_info *mv_mbus_dram_info_nooverlap(vo int mvebu_mbus_save_cpu_target(u32 *store_addr); void mvebu_mbus_get_pcie_mem_aperture(struct resource *res); void mvebu_mbus_get_pcie_io_aperture(struct resource *res); +int mvebu_mbus_get_dram_win_info(phys_addr_t phyaddr, u8 *target, u8 *attr); +int mvebu_mbus_get_io_win_info(phys_addr_t phyaddr, u32 *size, u8 *target, + u8 *attr); int mvebu_mbus_add_window_remap_by_id(unsigned int target, unsigned int attribute, phys_addr_t base, size_t size, diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 792c8981e633..1191d79aa495 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -28,6 +28,7 @@ #include <linux/eventfd.h> #include <linux/mmzone.h> #include <linux/writeback.h> +#include <linux/page-flags.h> struct mem_cgroup; struct page; @@ -51,7 +52,10 @@ enum mem_cgroup_stat_index { MEM_CGROUP_STAT_SWAP, /* # of pages, swapped out */ MEM_CGROUP_STAT_NSTATS, /* default hierarchy stats */ - MEMCG_SOCK = MEM_CGROUP_STAT_NSTATS, + MEMCG_KERNEL_STACK = MEM_CGROUP_STAT_NSTATS, + MEMCG_SLAB_RECLAIMABLE, + MEMCG_SLAB_UNRECLAIMABLE, + MEMCG_SOCK, MEMCG_NR_STAT, }; @@ -89,6 +93,10 @@ enum mem_cgroup_events_target { }; #ifdef CONFIG_MEMCG + +#define MEM_CGROUP_ID_SHIFT 16 +#define MEM_CGROUP_ID_MAX USHRT_MAX + struct mem_cgroup_stat_cpu { long count[MEMCG_NR_STAT]; unsigned long events[MEMCG_NR_EVENTS]; @@ -265,6 +273,11 @@ struct mem_cgroup { extern struct mem_cgroup *root_mem_cgroup; +static inline bool mem_cgroup_disabled(void) +{ + return !cgroup_subsys_enabled(memory_cgrp_subsys); +} + /** * mem_cgroup_events - count memory events against a cgroup * @memcg: the memory cgroup @@ -291,7 +304,7 @@ void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg, void mem_cgroup_uncharge(struct page *page); void mem_cgroup_uncharge_list(struct list_head *page_list); -void mem_cgroup_replace_page(struct page *oldpage, struct page *newpage); +void mem_cgroup_migrate(struct page *oldpage, struct page *newpage); struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *); struct lruvec *mem_cgroup_page_lruvec(struct page *, struct zone *); @@ -312,6 +325,28 @@ struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *, struct mem_cgroup_reclaim_cookie *); void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *); +static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg) +{ + if (mem_cgroup_disabled()) + return 0; + + return memcg->css.id; +} + +/** + * mem_cgroup_from_id - look up a memcg from an id + * @id: the id to look up + * + * Caller must hold rcu_read_lock() and use css_tryget() as necessary. + */ +static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id) +{ + struct cgroup_subsys_state *css; + + css = css_from_id(id, &memory_cgrp_subsys); + return mem_cgroup_from_css(css); +} + /** * parent_mem_cgroup - find the accounting parent of a memcg * @memcg: memcg whose parent to find @@ -353,11 +388,6 @@ static inline bool mm_match_cgroup(struct mm_struct *mm, struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page); ino_t page_cgroup_ino(struct page *page); -static inline bool mem_cgroup_disabled(void) -{ - return !cgroup_subsys_enabled(memory_cgrp_subsys); -} - static inline bool mem_cgroup_online(struct mem_cgroup *memcg) { if (mem_cgroup_disabled()) @@ -373,6 +403,9 @@ int mem_cgroup_select_victim_node(struct mem_cgroup *memcg); void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, int nr_pages); +unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg, + int nid, unsigned int lru_mask); + static inline unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru) { @@ -429,36 +462,43 @@ bool mem_cgroup_oom_synchronize(bool wait); extern int do_swap_account; #endif -struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page); -void mem_cgroup_end_page_stat(struct mem_cgroup *memcg); +void lock_page_memcg(struct page *page); +void unlock_page_memcg(struct page *page); /** * mem_cgroup_update_page_stat - update page state statistics - * @memcg: memcg to account against + * @page: the page * @idx: page state item to account * @val: number of pages (positive or negative) * - * See mem_cgroup_begin_page_stat() for locking requirements. + * The @page must be locked or the caller must use lock_page_memcg() + * to prevent double accounting when the page is concurrently being + * moved to another memcg: + * + * lock_page(page) or lock_page_memcg(page) + * if (TestClearPageState(page)) + * mem_cgroup_update_page_stat(page, state, -1); + * unlock_page(page) or unlock_page_memcg(page) */ -static inline void mem_cgroup_update_page_stat(struct mem_cgroup *memcg, +static inline void mem_cgroup_update_page_stat(struct page *page, enum mem_cgroup_stat_index idx, int val) { - VM_BUG_ON(!rcu_read_lock_held()); + VM_BUG_ON(!(rcu_read_lock_held() || PageLocked(page))); - if (memcg) - this_cpu_add(memcg->stat->count[idx], val); + if (page->mem_cgroup) + this_cpu_add(page->mem_cgroup->stat->count[idx], val); } -static inline void mem_cgroup_inc_page_stat(struct mem_cgroup *memcg, +static inline void mem_cgroup_inc_page_stat(struct page *page, enum mem_cgroup_stat_index idx) { - mem_cgroup_update_page_stat(memcg, idx, 1); + mem_cgroup_update_page_stat(page, idx, 1); } -static inline void mem_cgroup_dec_page_stat(struct mem_cgroup *memcg, +static inline void mem_cgroup_dec_page_stat(struct page *page, enum mem_cgroup_stat_index idx) { - mem_cgroup_update_page_stat(memcg, idx, -1); + mem_cgroup_update_page_stat(page, idx, -1); } unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, @@ -496,8 +536,17 @@ void mem_cgroup_split_huge_fixup(struct page *head); #endif #else /* CONFIG_MEMCG */ + +#define MEM_CGROUP_ID_SHIFT 0 +#define MEM_CGROUP_ID_MAX 0 + struct mem_cgroup; +static inline bool mem_cgroup_disabled(void) +{ + return true; +} + static inline void mem_cgroup_events(struct mem_cgroup *memcg, enum mem_cgroup_events_index idx, unsigned int nr) @@ -539,7 +588,7 @@ static inline void mem_cgroup_uncharge_list(struct list_head *page_list) { } -static inline void mem_cgroup_replace_page(struct page *old, struct page *new) +static inline void mem_cgroup_migrate(struct page *old, struct page *new) { } @@ -580,9 +629,16 @@ static inline void mem_cgroup_iter_break(struct mem_cgroup *root, { } -static inline bool mem_cgroup_disabled(void) +static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg) { - return true; + return 0; +} + +static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id) +{ + WARN_ON_ONCE(id); + /* XXX: This should always return root_mem_cgroup */ + return NULL; } static inline bool mem_cgroup_online(struct mem_cgroup *memcg) @@ -608,17 +664,23 @@ mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, { } +static inline unsigned long +mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg, + int nid, unsigned int lru_mask) +{ + return 0; +} + static inline void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p) { } -static inline struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page) +static inline void lock_page_memcg(struct page *page) { - return NULL; } -static inline void mem_cgroup_end_page_stat(struct mem_cgroup *memcg) +static inline void unlock_page_memcg(struct page *page) { } @@ -644,12 +706,12 @@ static inline bool mem_cgroup_oom_synchronize(bool wait) return false; } -static inline void mem_cgroup_inc_page_stat(struct mem_cgroup *memcg, +static inline void mem_cgroup_inc_page_stat(struct page *page, enum mem_cgroup_stat_index idx) { } -static inline void mem_cgroup_dec_page_stat(struct mem_cgroup *memcg, +static inline void mem_cgroup_dec_page_stat(struct page *page, enum mem_cgroup_stat_index idx) { } @@ -743,11 +805,6 @@ static inline bool memcg_kmem_enabled(void) return static_branch_unlikely(&memcg_kmem_enabled_key); } -static inline bool memcg_kmem_online(struct mem_cgroup *memcg) -{ - return memcg->kmem_state == KMEM_ONLINE; -} - /* * In general, we'll do everything in our power to not incur in any overhead * for non-memcg users for the kmem functions. Not even a function call, if we @@ -765,7 +822,7 @@ int __memcg_kmem_charge(struct page *page, gfp_t gfp, int order); void __memcg_kmem_uncharge(struct page *page, int order); /* - * helper for acessing a memcg's index. It will be used as an index in the + * helper for accessing a memcg's index. It will be used as an index in the * child cache array in kmem_cache, and also to derive its name. This function * will return -1 when this is not a kmem-limited memcg. */ @@ -834,6 +891,20 @@ static __always_inline void memcg_kmem_put_cache(struct kmem_cache *cachep) if (memcg_kmem_enabled()) __memcg_kmem_put_cache(cachep); } + +/** + * memcg_kmem_update_page_stat - update kmem page state statistics + * @page: the page + * @idx: page state item to account + * @val: number of pages (positive or negative) + */ +static inline void memcg_kmem_update_page_stat(struct page *page, + enum mem_cgroup_stat_index idx, int val) +{ + if (memcg_kmem_enabled() && page->mem_cgroup) + this_cpu_add(page->mem_cgroup->stat->count[idx], val); +} + #else #define for_each_memcg_cache_index(_idx) \ for (; NULL; ) @@ -843,11 +914,6 @@ static inline bool memcg_kmem_enabled(void) return false; } -static inline bool memcg_kmem_online(struct mem_cgroup *memcg) -{ - return false; -} - static inline int memcg_kmem_charge(struct page *page, gfp_t gfp, int order) { return 0; @@ -879,6 +945,11 @@ memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp) static inline void memcg_kmem_put_cache(struct kmem_cache *cachep) { } + +static inline void memcg_kmem_update_page_stat(struct page *page, + enum mem_cgroup_stat_index idx, int val) +{ +} #endif /* CONFIG_MEMCG && !CONFIG_SLOB */ #endif /* _LINUX_MEMCONTROL_H */ diff --git a/include/linux/memory.h b/include/linux/memory.h index 8b8d8d12348e..093607f90b91 100644 --- a/include/linux/memory.h +++ b/include/linux/memory.h @@ -109,6 +109,9 @@ extern void unregister_memory_notifier(struct notifier_block *nb); extern int register_memory_isolate_notifier(struct notifier_block *nb); extern void unregister_memory_isolate_notifier(struct notifier_block *nb); extern int register_new_memory(int, struct mem_section *); +extern int memory_block_change_state(struct memory_block *mem, + unsigned long to_state, + unsigned long from_state_req); #ifdef CONFIG_MEMORY_HOTREMOVE extern int unregister_memory_section(struct mem_section *); #endif @@ -137,17 +140,6 @@ extern struct memory_block *find_memory_block(struct mem_section *); #endif /* - * 'struct memory_accessor' is a generic interface to provide - * in-kernel access to persistent memory such as i2c or SPI EEPROMs - */ -struct memory_accessor { - ssize_t (*read)(struct memory_accessor *, char *buf, off_t offset, - size_t count); - ssize_t (*write)(struct memory_accessor *, const char *buf, - off_t offset, size_t count); -}; - -/* * Kernel text modification mutex, used for code patching. Users of this lock * can sleep. */ diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index 43405992d027..adbef586e696 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -99,6 +99,8 @@ extern void __online_page_free(struct page *page); extern int try_online_node(int nid); +extern bool memhp_auto_online; + #ifdef CONFIG_MEMORY_HOTREMOVE extern bool is_pageblock_removable_nolock(struct page *page); extern int arch_remove_memory(u64 start, u64 size); @@ -196,6 +198,9 @@ void put_online_mems(void); void mem_hotplug_begin(void); void mem_hotplug_done(void); +extern void set_zone_contiguous(struct zone *zone); +extern void clear_zone_contiguous(struct zone *zone); + #else /* ! CONFIG_MEMORY_HOTPLUG */ /* * Stub functions for when hotplug is off @@ -267,7 +272,7 @@ static inline void remove_memory(int nid, u64 start, u64 size) {} extern int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn, void *arg, int (*func)(struct memory_block *, void *)); extern int add_memory(int nid, u64 start, u64 size); -extern int add_memory_resource(int nid, struct resource *resource); +extern int add_memory_resource(int nid, struct resource *resource, bool online); extern int zone_for_memory(int nid, u64 start, u64 size, int zone_default, bool for_device); extern int arch_add_memory(int nid, u64 start, u64 size, bool for_device); diff --git a/include/linux/mfd/as3711.h b/include/linux/mfd/as3711.h index 38452ce1e892..34cc85864be5 100644 --- a/include/linux/mfd/as3711.h +++ b/include/linux/mfd/as3711.h @@ -51,7 +51,8 @@ #define AS3711_ASIC_ID_1 0x90 #define AS3711_ASIC_ID_2 0x91 -#define AS3711_MAX_REGS 0x92 +#define AS3711_MAX_REG AS3711_ASIC_ID_2 +#define AS3711_NUM_REGS (AS3711_MAX_REG + 1) /* Regulators */ enum { diff --git a/include/linux/mfd/axp20x.h b/include/linux/mfd/axp20x.h index b24c771cebd5..d82e7d51372b 100644 --- a/include/linux/mfd/axp20x.h +++ b/include/linux/mfd/axp20x.h @@ -18,6 +18,7 @@ enum { AXP202_ID, AXP209_ID, AXP221_ID, + AXP223_ID, AXP288_ID, NR_AXP20X_VARIANTS, }; @@ -396,7 +397,7 @@ enum axp288_irqs { struct axp20x_dev { struct device *dev; - struct i2c_client *i2c_client; + int irq; struct regmap *regmap; struct regmap_irq_chip_data *regmap_irqc; long variant; @@ -462,4 +463,35 @@ static inline int axp20x_read_variable_width(struct regmap *regmap, return result; } +/** + * axp20x_match_device(): Setup axp20x variant related fields + * + * @axp20x: axp20x device to setup (.dev field must be set) + * @dev: device associated with this axp20x device + * + * This lets the axp20x core configure the mfd cells and register maps + * for later use. + */ +int axp20x_match_device(struct axp20x_dev *axp20x); + +/** + * axp20x_device_probe(): Probe a configured axp20x device + * + * @axp20x: axp20x device to probe (must be configured) + * + * This function lets the axp20x core register the axp20x mfd devices + * and irqchip. The axp20x device passed in must be fully configured + * with axp20x_match_device, its irq set, and regmap created. + */ +int axp20x_device_probe(struct axp20x_dev *axp20x); + +/** + * axp20x_device_probe(): Remove a axp20x device + * + * @axp20x: axp20x device to remove + * + * This tells the axp20x core to remove the associated mfd devices + */ +int axp20x_device_remove(struct axp20x_dev *axp20x); + #endif /* __LINUX_MFD_AXP20X_H */ diff --git a/include/linux/mfd/cros_ec.h b/include/linux/mfd/cros_ec.h index 494682ce4bf3..a677c2bd485c 100644 --- a/include/linux/mfd/cros_ec.h +++ b/include/linux/mfd/cros_ec.h @@ -245,7 +245,7 @@ int cros_ec_remove(struct cros_ec_device *ec_dev); int cros_ec_register(struct cros_ec_device *ec_dev); /** - * cros_ec_register - Query the protocol version supported by the ChromeOS EC + * cros_ec_query_all - Query the protocol version supported by the ChromeOS EC * * @ec_dev: Device to register * @return 0 if ok, -ve on error diff --git a/include/linux/mfd/imx25-tsadc.h b/include/linux/mfd/imx25-tsadc.h new file mode 100644 index 000000000000..7fe4b8c3baac --- /dev/null +++ b/include/linux/mfd/imx25-tsadc.h @@ -0,0 +1,140 @@ +#ifndef _LINUX_INCLUDE_MFD_IMX25_TSADC_H_ +#define _LINUX_INCLUDE_MFD_IMX25_TSADC_H_ + +struct regmap; +struct clk; + +struct mx25_tsadc { + struct regmap *regs; + struct irq_domain *domain; + struct clk *clk; +}; + +#define MX25_TSC_TGCR 0x00 +#define MX25_TSC_TGSR 0x04 +#define MX25_TSC_TICR 0x08 + +/* The same register layout for TC and GC queue */ +#define MX25_ADCQ_FIFO 0x00 +#define MX25_ADCQ_CR 0x04 +#define MX25_ADCQ_SR 0x08 +#define MX25_ADCQ_MR 0x0c +#define MX25_ADCQ_ITEM_7_0 0x20 +#define MX25_ADCQ_ITEM_15_8 0x24 +#define MX25_ADCQ_CFG(n) (0x40 + ((n) * 0x4)) + +#define MX25_ADCQ_MR_MASK 0xffffffff + +/* TGCR */ +#define MX25_TGCR_PDBTIME(x) ((x) << 25) +#define MX25_TGCR_PDBTIME_MASK GENMASK(31, 25) +#define MX25_TGCR_PDBEN BIT(24) +#define MX25_TGCR_PDEN BIT(23) +#define MX25_TGCR_ADCCLKCFG(x) ((x) << 16) +#define MX25_TGCR_GET_ADCCLK(x) (((x) >> 16) & 0x1f) +#define MX25_TGCR_INTREFEN BIT(10) +#define MX25_TGCR_POWERMODE_MASK GENMASK(9, 8) +#define MX25_TGCR_POWERMODE_SAVE (1 << 8) +#define MX25_TGCR_POWERMODE_ON (2 << 8) +#define MX25_TGCR_STLC BIT(5) +#define MX25_TGCR_SLPC BIT(4) +#define MX25_TGCR_FUNC_RST BIT(2) +#define MX25_TGCR_TSC_RST BIT(1) +#define MX25_TGCR_CLK_EN BIT(0) + +/* TGSR */ +#define MX25_TGSR_SLP_INT BIT(2) +#define MX25_TGSR_GCQ_INT BIT(1) +#define MX25_TGSR_TCQ_INT BIT(0) + +/* ADCQ_ITEM_* */ +#define _MX25_ADCQ_ITEM(item, x) ((x) << ((item) * 4)) +#define MX25_ADCQ_ITEM(item, x) ((item) >= 8 ? \ + _MX25_ADCQ_ITEM((item) - 8, (x)) : _MX25_ADCQ_ITEM((item), (x))) + +/* ADCQ_FIFO (TCQFIFO and GCQFIFO) */ +#define MX25_ADCQ_FIFO_DATA(x) (((x) >> 4) & 0xfff) +#define MX25_ADCQ_FIFO_ID(x) ((x) & 0xf) + +/* ADCQ_CR (TCQR and GCQR) */ +#define MX25_ADCQ_CR_PDCFG_LEVEL BIT(19) +#define MX25_ADCQ_CR_PDMSK BIT(18) +#define MX25_ADCQ_CR_FRST BIT(17) +#define MX25_ADCQ_CR_QRST BIT(16) +#define MX25_ADCQ_CR_RWAIT_MASK GENMASK(15, 12) +#define MX25_ADCQ_CR_RWAIT(x) ((x) << 12) +#define MX25_ADCQ_CR_WMRK_MASK GENMASK(11, 8) +#define MX25_ADCQ_CR_WMRK(x) ((x) << 8) +#define MX25_ADCQ_CR_LITEMID_MASK (0xf << 4) +#define MX25_ADCQ_CR_LITEMID(x) ((x) << 4) +#define MX25_ADCQ_CR_RPT BIT(3) +#define MX25_ADCQ_CR_FQS BIT(2) +#define MX25_ADCQ_CR_QSM_MASK GENMASK(1, 0) +#define MX25_ADCQ_CR_QSM_PD 0x1 +#define MX25_ADCQ_CR_QSM_FQS 0x2 +#define MX25_ADCQ_CR_QSM_FQS_PD 0x3 + +/* ADCQ_SR (TCQSR and GCQSR) */ +#define MX25_ADCQ_SR_FDRY BIT(15) +#define MX25_ADCQ_SR_FULL BIT(14) +#define MX25_ADCQ_SR_EMPT BIT(13) +#define MX25_ADCQ_SR_FDN(x) (((x) >> 8) & 0x1f) +#define MX25_ADCQ_SR_FRR BIT(6) +#define MX25_ADCQ_SR_FUR BIT(5) +#define MX25_ADCQ_SR_FOR BIT(4) +#define MX25_ADCQ_SR_EOQ BIT(1) +#define MX25_ADCQ_SR_PD BIT(0) + +/* ADCQ_MR (TCQMR and GCQMR) */ +#define MX25_ADCQ_MR_FDRY_DMA BIT(31) +#define MX25_ADCQ_MR_FER_DMA BIT(22) +#define MX25_ADCQ_MR_FUR_DMA BIT(21) +#define MX25_ADCQ_MR_FOR_DMA BIT(20) +#define MX25_ADCQ_MR_EOQ_DMA BIT(17) +#define MX25_ADCQ_MR_PD_DMA BIT(16) +#define MX25_ADCQ_MR_FDRY_IRQ BIT(15) +#define MX25_ADCQ_MR_FER_IRQ BIT(6) +#define MX25_ADCQ_MR_FUR_IRQ BIT(5) +#define MX25_ADCQ_MR_FOR_IRQ BIT(4) +#define MX25_ADCQ_MR_EOQ_IRQ BIT(1) +#define MX25_ADCQ_MR_PD_IRQ BIT(0) + +/* ADCQ_CFG (TICR, TCC0-7,GCC0-7) */ +#define MX25_ADCQ_CFG_SETTLING_TIME(x) ((x) << 24) +#define MX25_ADCQ_CFG_IGS (1 << 20) +#define MX25_ADCQ_CFG_NOS_MASK GENMASK(19, 16) +#define MX25_ADCQ_CFG_NOS(x) (((x) - 1) << 16) +#define MX25_ADCQ_CFG_WIPER (1 << 15) +#define MX25_ADCQ_CFG_YNLR (1 << 14) +#define MX25_ADCQ_CFG_YPLL_HIGH (0 << 12) +#define MX25_ADCQ_CFG_YPLL_OFF (1 << 12) +#define MX25_ADCQ_CFG_YPLL_LOW (3 << 12) +#define MX25_ADCQ_CFG_XNUR_HIGH (0 << 10) +#define MX25_ADCQ_CFG_XNUR_OFF (1 << 10) +#define MX25_ADCQ_CFG_XNUR_LOW (3 << 10) +#define MX25_ADCQ_CFG_XPUL_HIGH (0 << 9) +#define MX25_ADCQ_CFG_XPUL_OFF (1 << 9) +#define MX25_ADCQ_CFG_REFP(sel) ((sel) << 7) +#define MX25_ADCQ_CFG_REFP_YP MX25_ADCQ_CFG_REFP(0) +#define MX25_ADCQ_CFG_REFP_XP MX25_ADCQ_CFG_REFP(1) +#define MX25_ADCQ_CFG_REFP_EXT MX25_ADCQ_CFG_REFP(2) +#define MX25_ADCQ_CFG_REFP_INT MX25_ADCQ_CFG_REFP(3) +#define MX25_ADCQ_CFG_REFP_MASK GENMASK(8, 7) +#define MX25_ADCQ_CFG_IN(sel) ((sel) << 4) +#define MX25_ADCQ_CFG_IN_XP MX25_ADCQ_CFG_IN(0) +#define MX25_ADCQ_CFG_IN_YP MX25_ADCQ_CFG_IN(1) +#define MX25_ADCQ_CFG_IN_XN MX25_ADCQ_CFG_IN(2) +#define MX25_ADCQ_CFG_IN_YN MX25_ADCQ_CFG_IN(3) +#define MX25_ADCQ_CFG_IN_WIPER MX25_ADCQ_CFG_IN(4) +#define MX25_ADCQ_CFG_IN_AUX0 MX25_ADCQ_CFG_IN(5) +#define MX25_ADCQ_CFG_IN_AUX1 MX25_ADCQ_CFG_IN(6) +#define MX25_ADCQ_CFG_IN_AUX2 MX25_ADCQ_CFG_IN(7) +#define MX25_ADCQ_CFG_REFN(sel) ((sel) << 2) +#define MX25_ADCQ_CFG_REFN_XN MX25_ADCQ_CFG_REFN(0) +#define MX25_ADCQ_CFG_REFN_YN MX25_ADCQ_CFG_REFN(1) +#define MX25_ADCQ_CFG_REFN_NGND MX25_ADCQ_CFG_REFN(2) +#define MX25_ADCQ_CFG_REFN_NGND2 MX25_ADCQ_CFG_REFN(3) +#define MX25_ADCQ_CFG_REFN_MASK GENMASK(3, 2) +#define MX25_ADCQ_CFG_PENIACK (1 << 1) + +#endif /* _LINUX_INCLUDE_MFD_IMX25_TSADC_H_ */ diff --git a/include/linux/mfd/max77686-private.h b/include/linux/mfd/max77686-private.h index f5043490d67c..643dae777b43 100644 --- a/include/linux/mfd/max77686-private.h +++ b/include/linux/mfd/max77686-private.h @@ -437,14 +437,11 @@ enum max77686_irq { struct max77686_dev { struct device *dev; struct i2c_client *i2c; /* 0xcc / PMIC, Battery Control, and FLASH */ - struct i2c_client *rtc; /* slave addr 0x0c */ unsigned long type; struct regmap *regmap; /* regmap for mfd */ - struct regmap *rtc_regmap; /* regmap for rtc */ struct regmap_irq_chip_data *irq_data; - struct regmap_irq_chip_data *rtc_irq_data; int irq; struct mutex irqlock; diff --git a/include/linux/mfd/mt6323/core.h b/include/linux/mfd/mt6323/core.h new file mode 100644 index 000000000000..06d0ec3b1f8f --- /dev/null +++ b/include/linux/mfd/mt6323/core.h @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2016 Chen Zhong <chen.zhong@mediatek.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __MFD_MT6323_CORE_H__ +#define __MFD_MT6323_CORE_H__ + +enum MT6323_IRQ_STATUS_numbers { + MT6323_IRQ_STATUS_SPKL_AB = 0, + MT6323_IRQ_STATUS_SPKL, + MT6323_IRQ_STATUS_BAT_L, + MT6323_IRQ_STATUS_BAT_H, + MT6323_IRQ_STATUS_WATCHDOG, + MT6323_IRQ_STATUS_PWRKEY, + MT6323_IRQ_STATUS_THR_L, + MT6323_IRQ_STATUS_THR_H, + MT6323_IRQ_STATUS_VBATON_UNDET, + MT6323_IRQ_STATUS_BVALID_DET, + MT6323_IRQ_STATUS_CHRDET, + MT6323_IRQ_STATUS_OV, + MT6323_IRQ_STATUS_LDO = 16, + MT6323_IRQ_STATUS_FCHRKEY, + MT6323_IRQ_STATUS_ACCDET, + MT6323_IRQ_STATUS_AUDIO, + MT6323_IRQ_STATUS_RTC, + MT6323_IRQ_STATUS_VPROC, + MT6323_IRQ_STATUS_VSYS, + MT6323_IRQ_STATUS_VPA, + MT6323_IRQ_STATUS_NR, +}; + +#endif /* __MFD_MT6323_CORE_H__ */ diff --git a/include/linux/mfd/mt6323/registers.h b/include/linux/mfd/mt6323/registers.h new file mode 100644 index 000000000000..160f3c0e2589 --- /dev/null +++ b/include/linux/mfd/mt6323/registers.h @@ -0,0 +1,408 @@ +/* + * Copyright (c) 2016 Chen Zhong <chen.zhong@mediatek.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __MFD_MT6323_REGISTERS_H__ +#define __MFD_MT6323_REGISTERS_H__ + +/* PMIC Registers */ +#define MT6323_CHR_CON0 0x0000 +#define MT6323_CHR_CON1 0x0002 +#define MT6323_CHR_CON2 0x0004 +#define MT6323_CHR_CON3 0x0006 +#define MT6323_CHR_CON4 0x0008 +#define MT6323_CHR_CON5 0x000A +#define MT6323_CHR_CON6 0x000C +#define MT6323_CHR_CON7 0x000E +#define MT6323_CHR_CON8 0x0010 +#define MT6323_CHR_CON9 0x0012 +#define MT6323_CHR_CON10 0x0014 +#define MT6323_CHR_CON11 0x0016 +#define MT6323_CHR_CON12 0x0018 +#define MT6323_CHR_CON13 0x001A +#define MT6323_CHR_CON14 0x001C +#define MT6323_CHR_CON15 0x001E +#define MT6323_CHR_CON16 0x0020 +#define MT6323_CHR_CON17 0x0022 +#define MT6323_CHR_CON18 0x0024 +#define MT6323_CHR_CON19 0x0026 +#define MT6323_CHR_CON20 0x0028 +#define MT6323_CHR_CON21 0x002A +#define MT6323_CHR_CON22 0x002C +#define MT6323_CHR_CON23 0x002E +#define MT6323_CHR_CON24 0x0030 +#define MT6323_CHR_CON25 0x0032 +#define MT6323_CHR_CON26 0x0034 +#define MT6323_CHR_CON27 0x0036 +#define MT6323_CHR_CON28 0x0038 +#define MT6323_CHR_CON29 0x003A +#define MT6323_STRUP_CON0 0x003C +#define MT6323_STRUP_CON2 0x003E +#define MT6323_STRUP_CON3 0x0040 +#define MT6323_STRUP_CON4 0x0042 +#define MT6323_STRUP_CON5 0x0044 +#define MT6323_STRUP_CON6 0x0046 +#define MT6323_STRUP_CON7 0x0048 +#define MT6323_STRUP_CON8 0x004A +#define MT6323_STRUP_CON9 0x004C +#define MT6323_STRUP_CON10 0x004E +#define MT6323_STRUP_CON11 0x0050 +#define MT6323_SPK_CON0 0x0052 +#define MT6323_SPK_CON1 0x0054 +#define MT6323_SPK_CON2 0x0056 +#define MT6323_SPK_CON6 0x005E +#define MT6323_SPK_CON7 0x0060 +#define MT6323_SPK_CON8 0x0062 +#define MT6323_SPK_CON9 0x0064 +#define MT6323_SPK_CON10 0x0066 +#define MT6323_SPK_CON11 0x0068 +#define MT6323_SPK_CON12 0x006A +#define MT6323_CID 0x0100 +#define MT6323_TOP_CKPDN0 0x0102 +#define MT6323_TOP_CKPDN0_SET 0x0104 +#define MT6323_TOP_CKPDN0_CLR 0x0106 +#define MT6323_TOP_CKPDN1 0x0108 +#define MT6323_TOP_CKPDN1_SET 0x010A +#define MT6323_TOP_CKPDN1_CLR 0x010C +#define MT6323_TOP_CKPDN2 0x010E +#define MT6323_TOP_CKPDN2_SET 0x0110 +#define MT6323_TOP_CKPDN2_CLR 0x0112 +#define MT6323_TOP_RST_CON 0x0114 +#define MT6323_TOP_RST_CON_SET 0x0116 +#define MT6323_TOP_RST_CON_CLR 0x0118 +#define MT6323_TOP_RST_MISC 0x011A +#define MT6323_TOP_RST_MISC_SET 0x011C +#define MT6323_TOP_RST_MISC_CLR 0x011E +#define MT6323_TOP_CKCON0 0x0120 +#define MT6323_TOP_CKCON0_SET 0x0122 +#define MT6323_TOP_CKCON0_CLR 0x0124 +#define MT6323_TOP_CKCON1 0x0126 +#define MT6323_TOP_CKCON1_SET 0x0128 +#define MT6323_TOP_CKCON1_CLR 0x012A +#define MT6323_TOP_CKTST0 0x012C +#define MT6323_TOP_CKTST1 0x012E +#define MT6323_TOP_CKTST2 0x0130 +#define MT6323_TEST_OUT 0x0132 +#define MT6323_TEST_CON0 0x0134 +#define MT6323_TEST_CON1 0x0136 +#define MT6323_EN_STATUS0 0x0138 +#define MT6323_EN_STATUS1 0x013A +#define MT6323_OCSTATUS0 0x013C +#define MT6323_OCSTATUS1 0x013E +#define MT6323_PGSTATUS 0x0140 +#define MT6323_CHRSTATUS 0x0142 +#define MT6323_TDSEL_CON 0x0144 +#define MT6323_RDSEL_CON 0x0146 +#define MT6323_SMT_CON0 0x0148 +#define MT6323_SMT_CON1 0x014A +#define MT6323_SMT_CON2 0x014C +#define MT6323_SMT_CON3 0x014E +#define MT6323_SMT_CON4 0x0150 +#define MT6323_DRV_CON0 0x0152 +#define MT6323_DRV_CON1 0x0154 +#define MT6323_DRV_CON2 0x0156 +#define MT6323_DRV_CON3 0x0158 +#define MT6323_DRV_CON4 0x015A +#define MT6323_SIMLS1_CON 0x015C +#define MT6323_SIMLS2_CON 0x015E +#define MT6323_INT_CON0 0x0160 +#define MT6323_INT_CON0_SET 0x0162 +#define MT6323_INT_CON0_CLR 0x0164 +#define MT6323_INT_CON1 0x0166 +#define MT6323_INT_CON1_SET 0x0168 +#define MT6323_INT_CON1_CLR 0x016A +#define MT6323_INT_MISC_CON 0x016C +#define MT6323_INT_MISC_CON_SET 0x016E +#define MT6323_INT_MISC_CON_CLR 0x0170 +#define MT6323_INT_STATUS0 0x0172 +#define MT6323_INT_STATUS1 0x0174 +#define MT6323_OC_GEAR_0 0x0176 +#define MT6323_OC_GEAR_1 0x0178 +#define MT6323_OC_GEAR_2 0x017A +#define MT6323_OC_CTL_VPROC 0x017C +#define MT6323_OC_CTL_VSYS 0x017E +#define MT6323_OC_CTL_VPA 0x0180 +#define MT6323_FQMTR_CON0 0x0182 +#define MT6323_FQMTR_CON1 0x0184 +#define MT6323_FQMTR_CON2 0x0186 +#define MT6323_RG_SPI_CON 0x0188 +#define MT6323_DEW_DIO_EN 0x018A +#define MT6323_DEW_READ_TEST 0x018C +#define MT6323_DEW_WRITE_TEST 0x018E +#define MT6323_DEW_CRC_SWRST 0x0190 +#define MT6323_DEW_CRC_EN 0x0192 +#define MT6323_DEW_CRC_VAL 0x0194 +#define MT6323_DEW_DBG_MON_SEL 0x0196 +#define MT6323_DEW_CIPHER_KEY_SEL 0x0198 +#define MT6323_DEW_CIPHER_IV_SEL 0x019A +#define MT6323_DEW_CIPHER_EN 0x019C +#define MT6323_DEW_CIPHER_RDY 0x019E +#define MT6323_DEW_CIPHER_MODE 0x01A0 +#define MT6323_DEW_CIPHER_SWRST 0x01A2 +#define MT6323_DEW_RDDMY_NO 0x01A4 +#define MT6323_DEW_RDATA_DLY_SEL 0x01A6 +#define MT6323_BUCK_CON0 0x0200 +#define MT6323_BUCK_CON1 0x0202 +#define MT6323_BUCK_CON2 0x0204 +#define MT6323_BUCK_CON3 0x0206 +#define MT6323_BUCK_CON4 0x0208 +#define MT6323_BUCK_CON5 0x020A +#define MT6323_VPROC_CON0 0x020C +#define MT6323_VPROC_CON1 0x020E +#define MT6323_VPROC_CON2 0x0210 +#define MT6323_VPROC_CON3 0x0212 +#define MT6323_VPROC_CON4 0x0214 +#define MT6323_VPROC_CON5 0x0216 +#define MT6323_VPROC_CON7 0x021A +#define MT6323_VPROC_CON8 0x021C +#define MT6323_VPROC_CON9 0x021E +#define MT6323_VPROC_CON10 0x0220 +#define MT6323_VPROC_CON11 0x0222 +#define MT6323_VPROC_CON12 0x0224 +#define MT6323_VPROC_CON13 0x0226 +#define MT6323_VPROC_CON14 0x0228 +#define MT6323_VPROC_CON15 0x022A +#define MT6323_VPROC_CON18 0x0230 +#define MT6323_VSYS_CON0 0x0232 +#define MT6323_VSYS_CON1 0x0234 +#define MT6323_VSYS_CON2 0x0236 +#define MT6323_VSYS_CON3 0x0238 +#define MT6323_VSYS_CON4 0x023A +#define MT6323_VSYS_CON5 0x023C +#define MT6323_VSYS_CON7 0x0240 +#define MT6323_VSYS_CON8 0x0242 +#define MT6323_VSYS_CON9 0x0244 +#define MT6323_VSYS_CON10 0x0246 +#define MT6323_VSYS_CON11 0x0248 +#define MT6323_VSYS_CON12 0x024A +#define MT6323_VSYS_CON13 0x024C +#define MT6323_VSYS_CON14 0x024E +#define MT6323_VSYS_CON15 0x0250 +#define MT6323_VSYS_CON18 0x0256 +#define MT6323_VPA_CON0 0x0300 +#define MT6323_VPA_CON1 0x0302 +#define MT6323_VPA_CON2 0x0304 +#define MT6323_VPA_CON3 0x0306 +#define MT6323_VPA_CON4 0x0308 +#define MT6323_VPA_CON5 0x030A +#define MT6323_VPA_CON7 0x030E +#define MT6323_VPA_CON8 0x0310 +#define MT6323_VPA_CON9 0x0312 +#define MT6323_VPA_CON10 0x0314 +#define MT6323_VPA_CON11 0x0316 +#define MT6323_VPA_CON12 0x0318 +#define MT6323_VPA_CON14 0x031C +#define MT6323_VPA_CON16 0x0320 +#define MT6323_VPA_CON17 0x0322 +#define MT6323_VPA_CON18 0x0324 +#define MT6323_VPA_CON19 0x0326 +#define MT6323_VPA_CON20 0x0328 +#define MT6323_BUCK_K_CON0 0x032A +#define MT6323_BUCK_K_CON1 0x032C +#define MT6323_BUCK_K_CON2 0x032E +#define MT6323_ISINK0_CON0 0x0330 +#define MT6323_ISINK0_CON1 0x0332 +#define MT6323_ISINK0_CON2 0x0334 +#define MT6323_ISINK0_CON3 0x0336 +#define MT6323_ISINK1_CON0 0x0338 +#define MT6323_ISINK1_CON1 0x033A +#define MT6323_ISINK1_CON2 0x033C +#define MT6323_ISINK1_CON3 0x033E +#define MT6323_ISINK2_CON0 0x0340 +#define MT6323_ISINK2_CON1 0x0342 +#define MT6323_ISINK2_CON2 0x0344 +#define MT6323_ISINK2_CON3 0x0346 +#define MT6323_ISINK3_CON0 0x0348 +#define MT6323_ISINK3_CON1 0x034A +#define MT6323_ISINK3_CON2 0x034C +#define MT6323_ISINK3_CON3 0x034E +#define MT6323_ISINK_ANA0 0x0350 +#define MT6323_ISINK_ANA1 0x0352 +#define MT6323_ISINK_PHASE_DLY 0x0354 +#define MT6323_ISINK_EN_CTRL 0x0356 +#define MT6323_ANALDO_CON0 0x0400 +#define MT6323_ANALDO_CON1 0x0402 +#define MT6323_ANALDO_CON2 0x0404 +#define MT6323_ANALDO_CON3 0x0406 +#define MT6323_ANALDO_CON4 0x0408 +#define MT6323_ANALDO_CON5 0x040A +#define MT6323_ANALDO_CON6 0x040C +#define MT6323_ANALDO_CON7 0x040E +#define MT6323_ANALDO_CON8 0x0410 +#define MT6323_ANALDO_CON10 0x0412 +#define MT6323_ANALDO_CON15 0x0414 +#define MT6323_ANALDO_CON16 0x0416 +#define MT6323_ANALDO_CON17 0x0418 +#define MT6323_ANALDO_CON18 0x041A +#define MT6323_ANALDO_CON19 0x041C +#define MT6323_ANALDO_CON20 0x041E +#define MT6323_ANALDO_CON21 0x0420 +#define MT6323_DIGLDO_CON0 0x0500 +#define MT6323_DIGLDO_CON2 0x0502 +#define MT6323_DIGLDO_CON3 0x0504 +#define MT6323_DIGLDO_CON5 0x0506 +#define MT6323_DIGLDO_CON6 0x0508 +#define MT6323_DIGLDO_CON7 0x050A +#define MT6323_DIGLDO_CON8 0x050C +#define MT6323_DIGLDO_CON9 0x050E +#define MT6323_DIGLDO_CON10 0x0510 +#define MT6323_DIGLDO_CON11 0x0512 +#define MT6323_DIGLDO_CON12 0x0514 +#define MT6323_DIGLDO_CON13 0x0516 +#define MT6323_DIGLDO_CON14 0x0518 +#define MT6323_DIGLDO_CON15 0x051A +#define MT6323_DIGLDO_CON16 0x051C +#define MT6323_DIGLDO_CON17 0x051E +#define MT6323_DIGLDO_CON18 0x0520 +#define MT6323_DIGLDO_CON19 0x0522 +#define MT6323_DIGLDO_CON20 0x0524 +#define MT6323_DIGLDO_CON21 0x0526 +#define MT6323_DIGLDO_CON23 0x0528 +#define MT6323_DIGLDO_CON24 0x052A +#define MT6323_DIGLDO_CON26 0x052C +#define MT6323_DIGLDO_CON27 0x052E +#define MT6323_DIGLDO_CON28 0x0530 +#define MT6323_DIGLDO_CON29 0x0532 +#define MT6323_DIGLDO_CON30 0x0534 +#define MT6323_DIGLDO_CON31 0x0536 +#define MT6323_DIGLDO_CON32 0x0538 +#define MT6323_DIGLDO_CON33 0x053A +#define MT6323_DIGLDO_CON34 0x053C +#define MT6323_DIGLDO_CON35 0x053E +#define MT6323_DIGLDO_CON36 0x0540 +#define MT6323_DIGLDO_CON39 0x0542 +#define MT6323_DIGLDO_CON40 0x0544 +#define MT6323_DIGLDO_CON41 0x0546 +#define MT6323_DIGLDO_CON42 0x0548 +#define MT6323_DIGLDO_CON43 0x054A +#define MT6323_DIGLDO_CON44 0x054C +#define MT6323_DIGLDO_CON45 0x054E +#define MT6323_DIGLDO_CON46 0x0550 +#define MT6323_DIGLDO_CON47 0x0552 +#define MT6323_DIGLDO_CON48 0x0554 +#define MT6323_DIGLDO_CON49 0x0556 +#define MT6323_DIGLDO_CON50 0x0558 +#define MT6323_DIGLDO_CON51 0x055A +#define MT6323_DIGLDO_CON52 0x055C +#define MT6323_DIGLDO_CON53 0x055E +#define MT6323_DIGLDO_CON54 0x0560 +#define MT6323_EFUSE_CON0 0x0600 +#define MT6323_EFUSE_CON1 0x0602 +#define MT6323_EFUSE_CON2 0x0604 +#define MT6323_EFUSE_CON3 0x0606 +#define MT6323_EFUSE_CON4 0x0608 +#define MT6323_EFUSE_CON5 0x060A +#define MT6323_EFUSE_CON6 0x060C +#define MT6323_EFUSE_VAL_0_15 0x060E +#define MT6323_EFUSE_VAL_16_31 0x0610 +#define MT6323_EFUSE_VAL_32_47 0x0612 +#define MT6323_EFUSE_VAL_48_63 0x0614 +#define MT6323_EFUSE_VAL_64_79 0x0616 +#define MT6323_EFUSE_VAL_80_95 0x0618 +#define MT6323_EFUSE_VAL_96_111 0x061A +#define MT6323_EFUSE_VAL_112_127 0x061C +#define MT6323_EFUSE_VAL_128_143 0x061E +#define MT6323_EFUSE_VAL_144_159 0x0620 +#define MT6323_EFUSE_VAL_160_175 0x0622 +#define MT6323_EFUSE_VAL_176_191 0x0624 +#define MT6323_EFUSE_DOUT_0_15 0x0626 +#define MT6323_EFUSE_DOUT_16_31 0x0628 +#define MT6323_EFUSE_DOUT_32_47 0x062A +#define MT6323_EFUSE_DOUT_48_63 0x062C +#define MT6323_EFUSE_DOUT_64_79 0x062E +#define MT6323_EFUSE_DOUT_80_95 0x0630 +#define MT6323_EFUSE_DOUT_96_111 0x0632 +#define MT6323_EFUSE_DOUT_112_127 0x0634 +#define MT6323_EFUSE_DOUT_128_143 0x0636 +#define MT6323_EFUSE_DOUT_144_159 0x0638 +#define MT6323_EFUSE_DOUT_160_175 0x063A +#define MT6323_EFUSE_DOUT_176_191 0x063C +#define MT6323_EFUSE_CON7 0x063E +#define MT6323_EFUSE_CON8 0x0640 +#define MT6323_EFUSE_CON9 0x0642 +#define MT6323_RTC_MIX_CON0 0x0644 +#define MT6323_RTC_MIX_CON1 0x0646 +#define MT6323_AUDTOP_CON0 0x0700 +#define MT6323_AUDTOP_CON1 0x0702 +#define MT6323_AUDTOP_CON2 0x0704 +#define MT6323_AUDTOP_CON3 0x0706 +#define MT6323_AUDTOP_CON4 0x0708 +#define MT6323_AUDTOP_CON5 0x070A +#define MT6323_AUDTOP_CON6 0x070C +#define MT6323_AUDTOP_CON7 0x070E +#define MT6323_AUDTOP_CON8 0x0710 +#define MT6323_AUDTOP_CON9 0x0712 +#define MT6323_AUXADC_ADC0 0x0714 +#define MT6323_AUXADC_ADC1 0x0716 +#define MT6323_AUXADC_ADC2 0x0718 +#define MT6323_AUXADC_ADC3 0x071A +#define MT6323_AUXADC_ADC4 0x071C +#define MT6323_AUXADC_ADC5 0x071E +#define MT6323_AUXADC_ADC6 0x0720 +#define MT6323_AUXADC_ADC7 0x0722 +#define MT6323_AUXADC_ADC8 0x0724 +#define MT6323_AUXADC_ADC9 0x0726 +#define MT6323_AUXADC_ADC10 0x0728 +#define MT6323_AUXADC_ADC11 0x072A +#define MT6323_AUXADC_ADC12 0x072C +#define MT6323_AUXADC_ADC13 0x072E +#define MT6323_AUXADC_ADC14 0x0730 +#define MT6323_AUXADC_ADC15 0x0732 +#define MT6323_AUXADC_ADC16 0x0734 +#define MT6323_AUXADC_ADC17 0x0736 +#define MT6323_AUXADC_ADC18 0x0738 +#define MT6323_AUXADC_ADC19 0x073A +#define MT6323_AUXADC_ADC20 0x073C +#define MT6323_AUXADC_RSV1 0x073E +#define MT6323_AUXADC_RSV2 0x0740 +#define MT6323_AUXADC_CON0 0x0742 +#define MT6323_AUXADC_CON1 0x0744 +#define MT6323_AUXADC_CON2 0x0746 +#define MT6323_AUXADC_CON3 0x0748 +#define MT6323_AUXADC_CON4 0x074A +#define MT6323_AUXADC_CON5 0x074C +#define MT6323_AUXADC_CON6 0x074E +#define MT6323_AUXADC_CON7 0x0750 +#define MT6323_AUXADC_CON8 0x0752 +#define MT6323_AUXADC_CON9 0x0754 +#define MT6323_AUXADC_CON10 0x0756 +#define MT6323_AUXADC_CON11 0x0758 +#define MT6323_AUXADC_CON12 0x075A +#define MT6323_AUXADC_CON13 0x075C +#define MT6323_AUXADC_CON14 0x075E +#define MT6323_AUXADC_CON15 0x0760 +#define MT6323_AUXADC_CON16 0x0762 +#define MT6323_AUXADC_CON17 0x0764 +#define MT6323_AUXADC_CON18 0x0766 +#define MT6323_AUXADC_CON19 0x0768 +#define MT6323_AUXADC_CON20 0x076A +#define MT6323_AUXADC_CON21 0x076C +#define MT6323_AUXADC_CON22 0x076E +#define MT6323_AUXADC_CON23 0x0770 +#define MT6323_AUXADC_CON24 0x0772 +#define MT6323_AUXADC_CON25 0x0774 +#define MT6323_AUXADC_CON26 0x0776 +#define MT6323_AUXADC_CON27 0x0778 +#define MT6323_ACCDET_CON0 0x077A +#define MT6323_ACCDET_CON1 0x077C +#define MT6323_ACCDET_CON2 0x077E +#define MT6323_ACCDET_CON3 0x0780 +#define MT6323_ACCDET_CON4 0x0782 +#define MT6323_ACCDET_CON5 0x0784 +#define MT6323_ACCDET_CON6 0x0786 +#define MT6323_ACCDET_CON7 0x0788 +#define MT6323_ACCDET_CON8 0x078A +#define MT6323_ACCDET_CON9 0x078C +#define MT6323_ACCDET_CON10 0x078E +#define MT6323_ACCDET_CON11 0x0790 +#define MT6323_ACCDET_CON12 0x0792 +#define MT6323_ACCDET_CON13 0x0794 +#define MT6323_ACCDET_CON14 0x0796 +#define MT6323_ACCDET_CON15 0x0798 +#define MT6323_ACCDET_CON16 0x079A + +#endif /* __MFD_MT6323_REGISTERS_H__ */ diff --git a/include/linux/mfd/mt6397/core.h b/include/linux/mfd/mt6397/core.h index 45b8e8aa1fbf..d678f526e498 100644 --- a/include/linux/mfd/mt6397/core.h +++ b/include/linux/mfd/mt6397/core.h @@ -60,6 +60,8 @@ struct mt6397_chip { u16 wake_mask[2]; u16 irq_masks_cur[2]; u16 irq_masks_cache[2]; + u16 int_con[2]; + u16 int_status[2]; }; #endif /* __MFD_MT6397_CORE_H__ */ diff --git a/include/linux/mfd/palmas.h b/include/linux/mfd/palmas.h index c800dbc42079..5c9a1d44c125 100644 --- a/include/linux/mfd/palmas.h +++ b/include/linux/mfd/palmas.h @@ -580,7 +580,9 @@ struct palmas_usb { int vbus_irq; int gpio_id_irq; + int gpio_vbus_irq; struct gpio_desc *id_gpiod; + struct gpio_desc *vbus_gpiod; unsigned long sw_debounce_jiffies; struct delayed_work wq_detectid; @@ -589,6 +591,7 @@ struct palmas_usb { bool enable_vbus_detection; bool enable_id_detection; bool enable_gpio_id_detection; + bool enable_gpio_vbus_detection; }; #define comparator_to_palmas(x) container_of((x), struct palmas_usb, comparator) diff --git a/include/linux/mfd/rc5t583.h b/include/linux/mfd/rc5t583.h index fd413ccab915..8d0a392e0a7f 100644 --- a/include/linux/mfd/rc5t583.h +++ b/include/linux/mfd/rc5t583.h @@ -28,8 +28,6 @@ #include <linux/types.h> #include <linux/regmap.h> -#define RC5T583_MAX_REGS 0xF8 - /* Maximum number of main interrupts */ #define MAX_MAIN_INTERRUPT 5 #define RC5T583_MAX_GPEDGE_REG 2 @@ -169,6 +167,9 @@ #define RC5T583_RTC_AY_MONTH 0xF3 #define RC5T583_RTC_AY_YEAR 0xF4 +#define RC5T583_MAX_REG 0xF7 +#define RC5T583_NUM_REGS (RC5T583_MAX_REG + 1) + /* RICOH_RC5T583 IRQ definitions */ enum { RC5T583_IRQ_ONKEY, diff --git a/include/linux/mfd/syscon.h b/include/linux/mfd/syscon.h index 75e543b78f53..1088149be0c9 100644 --- a/include/linux/mfd/syscon.h +++ b/include/linux/mfd/syscon.h @@ -29,24 +29,24 @@ extern struct regmap *syscon_regmap_lookup_by_phandle( #else static inline struct regmap *syscon_node_to_regmap(struct device_node *np) { - return ERR_PTR(-ENOSYS); + return ERR_PTR(-ENOTSUPP); } static inline struct regmap *syscon_regmap_lookup_by_compatible(const char *s) { - return ERR_PTR(-ENOSYS); + return ERR_PTR(-ENOTSUPP); } static inline struct regmap *syscon_regmap_lookup_by_pdevname(const char *s) { - return ERR_PTR(-ENOSYS); + return ERR_PTR(-ENOTSUPP); } static inline struct regmap *syscon_regmap_lookup_by_phandle( struct device_node *np, const char *property) { - return ERR_PTR(-ENOSYS); + return ERR_PTR(-ENOTSUPP); } #endif diff --git a/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h b/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h index 558a485d03ab..238c8db953eb 100644 --- a/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h +++ b/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h @@ -422,6 +422,7 @@ #define IMX6SX_GPR5_VADC_TO_CSI_CAPTURE_EN_MASK (0x1 << 26) #define IMX6SX_GPR5_VADC_TO_CSI_CAPTURE_EN_ENABLE (0x1 << 26) #define IMX6SX_GPR5_VADC_TO_CSI_CAPTURE_EN_DISABLE (0x0 << 26) +#define IMX6SX_GPR5_PCIE_BTNRST_RESET BIT(19) #define IMX6SX_GPR5_CSI1_MUX_CTRL_MASK (0x3 << 4) #define IMX6SX_GPR5_CSI1_MUX_CTRL_EXT_PIN (0x0 << 4) #define IMX6SX_GPR5_CSI1_MUX_CTRL_CVD (0x1 << 4) @@ -435,6 +436,10 @@ #define IMX6SX_GPR5_DISP_MUX_DCIC1_LVDS (0x1 << 1) #define IMX6SX_GPR5_DISP_MUX_DCIC1_MASK (0x1 << 1) +#define IMX6SX_GPR12_PCIE_TEST_POWERDOWN BIT(30) +#define IMX6SX_GPR12_PCIE_RX_EQ_MASK (0x7 << 0) +#define IMX6SX_GPR12_PCIE_RX_EQ_2 (0x2 << 0) + /* For imx6ul iomux gpr register field define */ #define IMX6UL_GPR1_ENET1_CLK_DIR (0x1 << 17) #define IMX6UL_GPR1_ENET2_CLK_DIR (0x1 << 18) diff --git a/include/linux/mfd/tps65086.h b/include/linux/mfd/tps65086.h new file mode 100644 index 000000000000..a228ae4c88d9 --- /dev/null +++ b/include/linux/mfd/tps65086.h @@ -0,0 +1,117 @@ +/* + * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/ + * Andrew F. Davis <afd@ti.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether expressed or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License version 2 for more details. + * + * Based on the TPS65912 driver + */ + +#ifndef __LINUX_MFD_TPS65086_H +#define __LINUX_MFD_TPS65086_H + +#include <linux/device.h> +#include <linux/regmap.h> + +/* List of registers for TPS65086 */ +#define TPS65086_DEVICEID 0x01 +#define TPS65086_IRQ 0x02 +#define TPS65086_IRQ_MASK 0x03 +#define TPS65086_PMICSTAT 0x04 +#define TPS65086_SHUTDNSRC 0x05 +#define TPS65086_BUCK1CTRL 0x20 +#define TPS65086_BUCK2CTRL 0x21 +#define TPS65086_BUCK3DECAY 0x22 +#define TPS65086_BUCK3VID 0x23 +#define TPS65086_BUCK3SLPCTRL 0x24 +#define TPS65086_BUCK4CTRL 0x25 +#define TPS65086_BUCK5CTRL 0x26 +#define TPS65086_BUCK6CTRL 0x27 +#define TPS65086_LDOA2CTRL 0x28 +#define TPS65086_LDOA3CTRL 0x29 +#define TPS65086_DISCHCTRL1 0x40 +#define TPS65086_DISCHCTRL2 0x41 +#define TPS65086_DISCHCTRL3 0x42 +#define TPS65086_PG_DELAY1 0x43 +#define TPS65086_FORCESHUTDN 0x91 +#define TPS65086_BUCK1SLPCTRL 0x92 +#define TPS65086_BUCK2SLPCTRL 0x93 +#define TPS65086_BUCK4VID 0x94 +#define TPS65086_BUCK4SLPVID 0x95 +#define TPS65086_BUCK5VID 0x96 +#define TPS65086_BUCK5SLPVID 0x97 +#define TPS65086_BUCK6VID 0x98 +#define TPS65086_BUCK6SLPVID 0x99 +#define TPS65086_LDOA2VID 0x9A +#define TPS65086_LDOA3VID 0x9B +#define TPS65086_BUCK123CTRL 0x9C +#define TPS65086_PG_DELAY2 0x9D +#define TPS65086_PIN_EN_MASK1 0x9E +#define TPS65086_PIN_EN_MASK2 0x9F +#define TPS65086_SWVTT_EN 0x9F +#define TPS65086_PIN_EN_OVR1 0xA0 +#define TPS65086_PIN_EN_OVR2 0xA1 +#define TPS65086_GPOCTRL 0xA1 +#define TPS65086_PWR_FAULT_MASK1 0xA2 +#define TPS65086_PWR_FAULT_MASK2 0xA3 +#define TPS65086_GPO1PG_CTRL1 0xA4 +#define TPS65086_GPO1PG_CTRL2 0xA5 +#define TPS65086_GPO4PG_CTRL1 0xA6 +#define TPS65086_GPO4PG_CTRL2 0xA7 +#define TPS65086_GPO2PG_CTRL1 0xA8 +#define TPS65086_GPO2PG_CTRL2 0xA9 +#define TPS65086_GPO3PG_CTRL1 0xAA +#define TPS65086_GPO3PG_CTRL2 0xAB +#define TPS65086_LDOA1CTRL 0xAE +#define TPS65086_PG_STATUS1 0xB0 +#define TPS65086_PG_STATUS2 0xB1 +#define TPS65086_PWR_FAULT_STATUS1 0xB2 +#define TPS65086_PWR_FAULT_STATUS2 0xB3 +#define TPS65086_TEMPCRIT 0xB4 +#define TPS65086_TEMPHOT 0xB5 +#define TPS65086_OC_STATUS 0xB6 + +/* IRQ Register field definitions */ +#define TPS65086_IRQ_DIETEMP_MASK BIT(0) +#define TPS65086_IRQ_SHUTDN_MASK BIT(3) +#define TPS65086_IRQ_FAULT_MASK BIT(7) + +/* DEVICEID Register field definitions */ +#define TPS65086_DEVICEID_PART_MASK GENMASK(3, 0) +#define TPS65086_DEVICEID_OTP_MASK GENMASK(5, 4) +#define TPS65086_DEVICEID_REV_MASK GENMASK(7, 6) + +/* VID Masks */ +#define BUCK_VID_MASK GENMASK(7, 1) +#define VDOA1_VID_MASK GENMASK(4, 1) +#define VDOA23_VID_MASK GENMASK(3, 0) + +/* Define the TPS65086 IRQ numbers */ +enum tps65086_irqs { + TPS65086_IRQ_DIETEMP, + TPS65086_IRQ_SHUTDN, + TPS65086_IRQ_FAULT, +}; + +/** + * struct tps65086 - state holder for the tps65086 driver + * + * Device data may be used to access the TPS65086 chip + */ +struct tps65086 { + struct device *dev; + struct regmap *regmap; + + /* IRQ Data */ + int irq; + struct regmap_irq_chip_data *irq_data; +}; + +#endif /* __LINUX_MFD_TPS65086_H */ diff --git a/include/linux/mfd/tps65090.h b/include/linux/mfd/tps65090.h index 0bf2708df150..67d144b3b8f9 100644 --- a/include/linux/mfd/tps65090.h +++ b/include/linux/mfd/tps65090.h @@ -77,6 +77,11 @@ enum { #define TPS65090_REG_CG_CTRL5 0x09 #define TPS65090_REG_CG_STATUS1 0x0a #define TPS65090_REG_CG_STATUS2 0x0b +#define TPS65090_REG_AD_OUT1 0x17 +#define TPS65090_REG_AD_OUT2 0x18 + +#define TPS65090_MAX_REG TPS65090_REG_AD_OUT2 +#define TPS65090_NUM_REGS (TPS65090_MAX_REG + 1) struct tps65090 { struct device *dev; diff --git a/include/linux/mfd/tps65912.h b/include/linux/mfd/tps65912.h index 6d309032dc0d..1a603701550e 100644 --- a/include/linux/mfd/tps65912.h +++ b/include/linux/mfd/tps65912.h @@ -1,28 +1,27 @@ /* - * tps65912.h -- TI TPS6591x + * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/ + * Andrew F. Davis <afd@ti.com> * - * Copyright 2011 Texas Instruments Inc. + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. * - * Author: Margarita Olaya <magi@slimlogic.co.uk> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether expressed or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License version 2 for more details. * + * Based on the TPS65218 driver and the previous TPS65912 driver by + * Margarita Olaya Cabrera <magi@slimlogic.co.uk> */ #ifndef __LINUX_MFD_TPS65912_H #define __LINUX_MFD_TPS65912_H -/* TPS regulator type list */ -#define REGULATOR_LDO 0 -#define REGULATOR_DCDC 1 - -/* - * List of registers for TPS65912 - */ +#include <linux/device.h> +#include <linux/regmap.h> +/* List of registers for TPS65912 */ #define TPS65912_DCDC1_CTRL 0x00 #define TPS65912_DCDC2_CTRL 0x01 #define TPS65912_DCDC3_CTRL 0x02 @@ -126,41 +125,45 @@ #define TPS65912_VERNUM 0x64 #define TPS6591X_MAX_REGISTER 0x64 -/* IRQ Definitions */ -#define TPS65912_IRQ_PWRHOLD_F 0 -#define TPS65912_IRQ_VMON 1 -#define TPS65912_IRQ_PWRON 2 -#define TPS65912_IRQ_PWRON_LP 3 -#define TPS65912_IRQ_PWRHOLD_R 4 -#define TPS65912_IRQ_HOTDIE 5 -#define TPS65912_IRQ_GPIO1_R 6 -#define TPS65912_IRQ_GPIO1_F 7 -#define TPS65912_IRQ_GPIO2_R 8 -#define TPS65912_IRQ_GPIO2_F 9 -#define TPS65912_IRQ_GPIO3_R 10 -#define TPS65912_IRQ_GPIO3_F 11 -#define TPS65912_IRQ_GPIO4_R 12 -#define TPS65912_IRQ_GPIO4_F 13 -#define TPS65912_IRQ_GPIO5_R 14 -#define TPS65912_IRQ_GPIO5_F 15 -#define TPS65912_IRQ_PGOOD_DCDC1 16 -#define TPS65912_IRQ_PGOOD_DCDC2 17 -#define TPS65912_IRQ_PGOOD_DCDC3 18 -#define TPS65912_IRQ_PGOOD_DCDC4 19 -#define TPS65912_IRQ_PGOOD_LDO1 20 -#define TPS65912_IRQ_PGOOD_LDO2 21 -#define TPS65912_IRQ_PGOOD_LDO3 22 -#define TPS65912_IRQ_PGOOD_LDO4 23 -#define TPS65912_IRQ_PGOOD_LDO5 24 -#define TPS65912_IRQ_PGOOD_LDO6 25 -#define TPS65912_IRQ_PGOOD_LDO7 26 -#define TPS65912_IRQ_PGOOD_LD08 27 -#define TPS65912_IRQ_PGOOD_LDO9 28 -#define TPS65912_IRQ_PGOOD_LDO10 29 +/* INT_STS Register field definitions */ +#define TPS65912_INT_STS_PWRHOLD_F BIT(0) +#define TPS65912_INT_STS_VMON BIT(1) +#define TPS65912_INT_STS_PWRON BIT(2) +#define TPS65912_INT_STS_PWRON_LP BIT(3) +#define TPS65912_INT_STS_PWRHOLD_R BIT(4) +#define TPS65912_INT_STS_HOTDIE BIT(5) +#define TPS65912_INT_STS_GPIO1_R BIT(6) +#define TPS65912_INT_STS_GPIO1_F BIT(7) + +/* INT_STS Register field definitions */ +#define TPS65912_INT_STS2_GPIO2_R BIT(0) +#define TPS65912_INT_STS2_GPIO2_F BIT(1) +#define TPS65912_INT_STS2_GPIO3_R BIT(2) +#define TPS65912_INT_STS2_GPIO3_F BIT(3) +#define TPS65912_INT_STS2_GPIO4_R BIT(4) +#define TPS65912_INT_STS2_GPIO4_F BIT(5) +#define TPS65912_INT_STS2_GPIO5_R BIT(6) +#define TPS65912_INT_STS2_GPIO5_F BIT(7) -#define TPS65912_NUM_IRQ 30 +/* INT_STS Register field definitions */ +#define TPS65912_INT_STS3_PGOOD_DCDC1 BIT(0) +#define TPS65912_INT_STS3_PGOOD_DCDC2 BIT(1) +#define TPS65912_INT_STS3_PGOOD_DCDC3 BIT(2) +#define TPS65912_INT_STS3_PGOOD_DCDC4 BIT(3) +#define TPS65912_INT_STS3_PGOOD_LDO1 BIT(4) +#define TPS65912_INT_STS3_PGOOD_LDO2 BIT(5) +#define TPS65912_INT_STS3_PGOOD_LDO3 BIT(6) +#define TPS65912_INT_STS3_PGOOD_LDO4 BIT(7) -/* GPIO 1 and 2 Register Definitions */ +/* INT_STS Register field definitions */ +#define TPS65912_INT_STS4_PGOOD_LDO5 BIT(0) +#define TPS65912_INT_STS4_PGOOD_LDO6 BIT(1) +#define TPS65912_INT_STS4_PGOOD_LDO7 BIT(2) +#define TPS65912_INT_STS4_PGOOD_LDO8 BIT(3) +#define TPS65912_INT_STS4_PGOOD_LDO9 BIT(4) +#define TPS65912_INT_STS4_PGOOD_LDO10 BIT(5) + +/* GPIO 1 and 2 Register field definitions */ #define GPIO_SLEEP_MASK 0x80 #define GPIO_SLEEP_SHIFT 7 #define GPIO_DEB_MASK 0x10 @@ -172,7 +175,7 @@ #define GPIO_SET_MASK 0x01 #define GPIO_SET_SHIFT 0 -/* GPIO 3 Register Definitions */ +/* GPIO 3 Register field definitions */ #define GPIO3_SLEEP_MASK 0x80 #define GPIO3_SLEEP_SHIFT 7 #define GPIO3_SEL_MASK 0x40 @@ -190,7 +193,7 @@ #define GPIO3_SET_MASK 0x01 #define GPIO3_SET_SHIFT 0 -/* GPIO 4 Register Definitions */ +/* GPIO 4 Register field definitions */ #define GPIO4_SLEEP_MASK 0x80 #define GPIO4_SLEEP_SHIFT 7 #define GPIO4_SEL_MASK 0x40 @@ -264,65 +267,75 @@ #define DCDC_LIMIT_MAX_SEL_MASK 0x3F #define DCDC_LIMIT_MAX_SEL_SHIFT 0 -/** - * struct tps65912_board - * Board platform dat may be used to initialize regulators. - */ -struct tps65912_board { - int is_dcdc1_avs; - int is_dcdc2_avs; - int is_dcdc3_avs; - int is_dcdc4_avs; - int irq; - int irq_base; - int gpio_base; - struct regulator_init_data *tps65912_pmic_init_data; +/* Define the TPS65912 IRQ numbers */ +enum tps65912_irqs { + /* INT_STS registers */ + TPS65912_IRQ_PWRHOLD_F, + TPS65912_IRQ_VMON, + TPS65912_IRQ_PWRON, + TPS65912_IRQ_PWRON_LP, + TPS65912_IRQ_PWRHOLD_R, + TPS65912_IRQ_HOTDIE, + TPS65912_IRQ_GPIO1_R, + TPS65912_IRQ_GPIO1_F, + /* INT_STS2 registers */ + TPS65912_IRQ_GPIO2_R, + TPS65912_IRQ_GPIO2_F, + TPS65912_IRQ_GPIO3_R, + TPS65912_IRQ_GPIO3_F, + TPS65912_IRQ_GPIO4_R, + TPS65912_IRQ_GPIO4_F, + TPS65912_IRQ_GPIO5_R, + TPS65912_IRQ_GPIO5_F, + /* INT_STS3 registers */ + TPS65912_IRQ_PGOOD_DCDC1, + TPS65912_IRQ_PGOOD_DCDC2, + TPS65912_IRQ_PGOOD_DCDC3, + TPS65912_IRQ_PGOOD_DCDC4, + TPS65912_IRQ_PGOOD_LDO1, + TPS65912_IRQ_PGOOD_LDO2, + TPS65912_IRQ_PGOOD_LDO3, + TPS65912_IRQ_PGOOD_LDO4, + /* INT_STS4 registers */ + TPS65912_IRQ_PGOOD_LDO5, + TPS65912_IRQ_PGOOD_LDO6, + TPS65912_IRQ_PGOOD_LDO7, + TPS65912_IRQ_PGOOD_LDO8, + TPS65912_IRQ_PGOOD_LDO9, + TPS65912_IRQ_PGOOD_LDO10, }; -/** - * struct tps65912 - tps65912 sub-driver chip access routines +/* + * struct tps65912 - state holder for the tps65912 driver + * + * Device data may be used to access the TPS65912 chip */ - struct tps65912 { struct device *dev; - /* for read/write acces */ - struct mutex io_mutex; - - /* For device IO interfaces: I2C or SPI */ - void *control_data; - - int (*read)(struct tps65912 *tps65912, u8 reg, int size, void *dest); - int (*write)(struct tps65912 *tps65912, u8 reg, int size, void *src); - - /* Client devices */ - struct tps65912_pmic *pmic; + struct regmap *regmap; - /* GPIO Handling */ - struct gpio_chip gpio; + /* IRQ Data */ + int irq; + struct regmap_irq_chip_data *irq_data; +}; - /* IRQ Handling */ - struct mutex irq_lock; - int chip_irq; - int irq_base; - int irq_num; - u32 irq_mask; +static const struct regmap_range tps65912_yes_ranges[] = { + regmap_reg_range(TPS65912_INT_STS, TPS65912_GPIO5), }; -struct tps65912_platform_data { - int irq; - int irq_base; +static const struct regmap_access_table tps65912_volatile_table = { + .yes_ranges = tps65912_yes_ranges, + .n_yes_ranges = ARRAY_SIZE(tps65912_yes_ranges), }; -unsigned int tps_chip(void); +static const struct regmap_config tps65912_regmap_config = { + .reg_bits = 8, + .val_bits = 8, + .cache_type = REGCACHE_RBTREE, + .volatile_table = &tps65912_volatile_table, +}; -int tps65912_set_bits(struct tps65912 *tps65912, u8 reg, u8 mask); -int tps65912_clear_bits(struct tps65912 *tps65912, u8 reg, u8 mask); -int tps65912_reg_read(struct tps65912 *tps65912, u8 reg); -int tps65912_reg_write(struct tps65912 *tps65912, u8 reg, u8 val); -int tps65912_device_init(struct tps65912 *tps65912); -void tps65912_device_exit(struct tps65912 *tps65912); -int tps65912_irq_init(struct tps65912 *tps65912, int irq, - struct tps65912_platform_data *pdata); -int tps65912_irq_exit(struct tps65912 *tps65912); +int tps65912_device_init(struct tps65912 *tps); +int tps65912_device_exit(struct tps65912 *tps); #endif /* __LINUX_MFD_TPS65912_H */ diff --git a/include/linux/migrate.h b/include/linux/migrate.h index cac1c0904d5f..9b50325e4ddf 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -23,9 +23,13 @@ enum migrate_reason { MR_SYSCALL, /* also applies to cpusets */ MR_MEMPOLICY_MBIND, MR_NUMA_MISPLACED, - MR_CMA + MR_CMA, + MR_TYPES }; +/* In mm/debug.c; also keep sync with include/trace/events/migrate.h */ +extern char *migrate_reason_names[MR_TYPES]; + #ifdef CONFIG_MIGRATION extern void putback_movable_pages(struct list_head *l); diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 430a929f048b..8541a913f6a3 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -44,6 +44,8 @@ #include <linux/timecounter.h> +#define DEFAULT_UAR_PAGE_SHIFT 12 + #define MAX_MSIX_P_PORT 17 #define MAX_MSIX 64 #define MIN_MSIX_P_PORT 5 @@ -217,6 +219,7 @@ enum { MLX4_DEV_CAP_FLAG2_UPDATE_QP_SRC_CHECK_LB = 1ULL << 31, MLX4_DEV_CAP_FLAG2_LB_SRC_CHK = 1ULL << 32, MLX4_DEV_CAP_FLAG2_ROCE_V1_V2 = 1ULL << 33, + MLX4_DEV_CAP_FLAG2_DMFS_UC_MC_SNIFFER = 1ULL << 34, }; enum { @@ -856,6 +859,7 @@ struct mlx4_dev { u64 regid_promisc_array[MLX4_MAX_PORTS + 1]; u64 regid_allmulti_array[MLX4_MAX_PORTS + 1]; struct mlx4_vf_dev *dev_vfs; + u8 uar_page_shift; }; struct mlx4_clock_params { @@ -1157,6 +1161,8 @@ enum mlx4_net_trans_promisc_mode { MLX4_FS_REGULAR = 1, MLX4_FS_ALL_DEFAULT, MLX4_FS_MC_DEFAULT, + MLX4_FS_MIRROR_RX_PORT, + MLX4_FS_MIRROR_SX_PORT, MLX4_FS_UC_SNIFFER, MLX4_FS_MC_SNIFFER, MLX4_FS_MODE_NUM, /* should be last */ @@ -1528,4 +1534,14 @@ int mlx4_ACCESS_PTYS_REG(struct mlx4_dev *dev, int mlx4_get_internal_clock_params(struct mlx4_dev *dev, struct mlx4_clock_params *params); +static inline int mlx4_to_hw_uar_index(struct mlx4_dev *dev, int index) +{ + return (index << (PAGE_SHIFT - dev->uar_page_shift)); +} + +static inline int mlx4_get_num_reserved_uar(struct mlx4_dev *dev) +{ + /* The first 128 UARs are used for EQ doorbells */ + return (128 >> (PAGE_SHIFT - dev->uar_page_shift)); +} #endif /* MLX4_DEVICE_H */ diff --git a/include/linux/mlx4/driver.h b/include/linux/mlx4/driver.h index 2e8af001c5da..bd0e7075ea6d 100644 --- a/include/linux/mlx4/driver.h +++ b/include/linux/mlx4/driver.h @@ -33,6 +33,7 @@ #ifndef MLX4_DRIVER_H #define MLX4_DRIVER_H +#include <net/devlink.h> #include <linux/mlx4/device.h> struct mlx4_dev; @@ -89,6 +90,8 @@ int mlx4_port_map_set(struct mlx4_dev *dev, struct mlx4_port_map *v2p); void *mlx4_get_protocol_dev(struct mlx4_dev *dev, enum mlx4_protocol proto, int port); +struct devlink_port *mlx4_get_devlink_port(struct mlx4_dev *dev, int port); + static inline u64 mlx4_mac_to_u64(u8 *addr) { u64 mac = 0; diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 987764afa65c..02ac3000ee3c 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -105,6 +105,29 @@ __mlx5_mask(typ, fld)) ___t; \ }) +/* Big endian getters */ +#define MLX5_GET64_BE(typ, p, fld) (*((__be64 *)(p) +\ + __mlx5_64_off(typ, fld))) + +#define MLX5_GET_BE(type_t, typ, p, fld) ({ \ + type_t tmp; \ + switch (sizeof(tmp)) { \ + case sizeof(u8): \ + tmp = (__force type_t)MLX5_GET(typ, p, fld); \ + break; \ + case sizeof(u16): \ + tmp = (__force type_t)cpu_to_be16(MLX5_GET(typ, p, fld)); \ + break; \ + case sizeof(u32): \ + tmp = (__force type_t)cpu_to_be32(MLX5_GET(typ, p, fld)); \ + break; \ + case sizeof(u64): \ + tmp = (__force type_t)MLX5_GET64_BE(typ, p, fld); \ + break; \ + } \ + tmp; \ + }) + enum { MLX5_MAX_COMMANDS = 32, MLX5_CMD_DATA_BLOCK_SIZE = 512, @@ -351,6 +374,12 @@ enum { }; enum { + MLX5_BW_NO_LIMIT = 0, + MLX5_100_MBPS_UNIT = 3, + MLX5_GBPS_UNIT = 4, +}; + +enum { MLX5_MAX_PAGE_SHIFT = 31 }; @@ -1177,6 +1206,17 @@ enum { MLX5_RQC_RQ_TYPE_MEMORY_RQ_RPM = 0x1, }; +enum mlx5_wol_mode { + MLX5_WOL_DISABLE = 0, + MLX5_WOL_SECURED_MAGIC = 1 << 1, + MLX5_WOL_MAGIC = 1 << 2, + MLX5_WOL_ARP = 1 << 3, + MLX5_WOL_BROADCAST = 1 << 4, + MLX5_WOL_MULTICAST = 1 << 5, + MLX5_WOL_UNICAST = 1 << 6, + MLX5_WOL_PHY_ACTIVITY = 1 << 7, +}; + /* MLX5 DEV CAPs */ /* TODO: EAT.ME */ @@ -1284,7 +1324,8 @@ enum { MLX5_RFC_3635_COUNTERS_GROUP = 0x3, MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP = 0x5, MLX5_PER_PRIORITY_COUNTERS_GROUP = 0x10, - MLX5_PER_TRAFFIC_CLASS_COUNTERS_GROUP = 0x11 + MLX5_PER_TRAFFIC_CLASS_COUNTERS_GROUP = 0x11, + MLX5_INFINIBAND_PORT_COUNTERS_GROUP = 0x20, }; static inline u16 mlx5_to_sw_pkey_sz(int pkey_sz) @@ -1294,6 +1335,11 @@ static inline u16 mlx5_to_sw_pkey_sz(int pkey_sz) return MLX5_MIN_PKEY_TABLE_SIZE << pkey_sz; } -#define MLX5_BY_PASS_NUM_PRIOS 9 +#define MLX5_BY_PASS_NUM_REGULAR_PRIOS 8 +#define MLX5_BY_PASS_NUM_DONT_TRAP_PRIOS 8 +#define MLX5_BY_PASS_NUM_MULTICAST_PRIOS 1 +#define MLX5_BY_PASS_NUM_PRIOS (MLX5_BY_PASS_NUM_REGULAR_PRIOS +\ + MLX5_BY_PASS_NUM_DONT_TRAP_PRIOS +\ + MLX5_BY_PASS_NUM_MULTICAST_PRIOS) #endif /* MLX5_DEVICE_H */ diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 1e3006dcf35d..3a954465b2bf 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -54,7 +54,7 @@ enum { /* one minute for the sake of bringup. Generally, commands must always * complete and we may need to increase this timeout value */ - MLX5_CMD_TIMEOUT_MSEC = 7200 * 1000, + MLX5_CMD_TIMEOUT_MSEC = 60 * 1000, MLX5_CMD_WQ_MAX_NAME = 32, }; @@ -99,6 +99,8 @@ enum { }; enum { + MLX5_REG_QETCR = 0x4005, + MLX5_REG_QTCT = 0x400a, MLX5_REG_PCAP = 0x5001, MLX5_REG_PMTU = 0x5003, MLX5_REG_PTYS = 0x5004, @@ -338,7 +340,7 @@ struct mlx5_core_sig_ctx { u32 sigerr_count; }; -struct mlx5_core_mr { +struct mlx5_core_mkey { u64 iova; u64 size; u32 key; @@ -426,7 +428,7 @@ struct mlx5_srq_table { struct radix_tree_root tree; }; -struct mlx5_mr_table { +struct mlx5_mkey_table { /* protect radix tree */ rwlock_t lock; @@ -458,8 +460,6 @@ struct mlx5_priv { struct mlx5_uuar_info uuari; MLX5_DECLARE_DOORBELL_LOCK(cq_uar_lock); - struct io_mapping *bf_mapping; - /* pages stuff */ struct workqueue_struct *pg_wq; struct rb_root page_root; @@ -484,9 +484,9 @@ struct mlx5_priv { struct mlx5_cq_table cq_table; /* end: cq staff */ - /* start: mr staff */ - struct mlx5_mr_table mr_table; - /* end: mr staff */ + /* start: mkey staff */ + struct mlx5_mkey_table mkey_table; + /* end: mkey staff */ /* start: alloc staff */ /* protect buffer alocation according to numa node */ @@ -717,7 +717,8 @@ int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn); int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn); int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari); int mlx5_free_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari); -int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar); +int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar, + bool map_wc); void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar); void mlx5_health_cleanup(struct mlx5_core_dev *dev); int mlx5_health_init(struct mlx5_core_dev *dev); @@ -739,16 +740,18 @@ int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, struct mlx5_query_srq_mbox_out *out); int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, u16 lwm, int is_srq); -void mlx5_init_mr_table(struct mlx5_core_dev *dev); -void mlx5_cleanup_mr_table(struct mlx5_core_dev *dev); -int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr, +void mlx5_init_mkey_table(struct mlx5_core_dev *dev); +void mlx5_cleanup_mkey_table(struct mlx5_core_dev *dev); +int mlx5_core_create_mkey(struct mlx5_core_dev *dev, + struct mlx5_core_mkey *mkey, struct mlx5_create_mkey_mbox_in *in, int inlen, mlx5_cmd_cbk_t callback, void *context, struct mlx5_create_mkey_mbox_out *out); -int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr); -int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr, +int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, + struct mlx5_core_mkey *mkey); +int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *mkey, struct mlx5_query_mkey_mbox_out *out, int outlen); -int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr, +int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *_mkey, u32 *mkey); int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn); int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn); @@ -794,37 +797,6 @@ int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in, int size_in, void *data_out, int size_out, u16 reg_num, int arg, int write); -int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps); -int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys, - int ptys_size, int proto_mask, u8 local_port); -int mlx5_query_port_proto_cap(struct mlx5_core_dev *dev, - u32 *proto_cap, int proto_mask); -int mlx5_query_port_proto_admin(struct mlx5_core_dev *dev, - u32 *proto_admin, int proto_mask); -int mlx5_query_port_link_width_oper(struct mlx5_core_dev *dev, - u8 *link_width_oper, u8 local_port); -int mlx5_query_port_proto_oper(struct mlx5_core_dev *dev, - u8 *proto_oper, int proto_mask, - u8 local_port); -int mlx5_set_port_proto(struct mlx5_core_dev *dev, u32 proto_admin, - int proto_mask); -int mlx5_set_port_admin_status(struct mlx5_core_dev *dev, - enum mlx5_port_status status); -int mlx5_query_port_admin_status(struct mlx5_core_dev *dev, - enum mlx5_port_status *status); - -int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port); -void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu, u8 port); -void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu, - u8 port); - -int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev, - u8 *vl_hw_cap, u8 local_port); - -int mlx5_set_port_pause(struct mlx5_core_dev *dev, u32 rx_pause, u32 tx_pause); -int mlx5_query_port_pause(struct mlx5_core_dev *dev, - u32 *rx_pause, u32 *tx_pause); - int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq); void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq); int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq, @@ -847,6 +819,8 @@ int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num); void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common); int mlx5_query_odp_caps(struct mlx5_core_dev *dev, struct mlx5_odp_caps *odp_caps); +int mlx5_core_query_ib_ppcnt(struct mlx5_core_dev *dev, + u8 port_num, void *out, size_t sz); static inline int fw_initializing(struct mlx5_core_dev *dev) { diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h index 8230caa3fb6e..8dec5508d93d 100644 --- a/include/linux/mlx5/fs.h +++ b/include/linux/mlx5/fs.h @@ -38,6 +38,10 @@ #define MLX5_FS_DEFAULT_FLOW_TAG 0x0 +enum { + MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO = 1 << 16, +}; + #define LEFTOVERS_RULE_NUM 2 static inline void build_leftovers_ft_param(int *priority, int *n_ent, @@ -52,6 +56,7 @@ enum mlx5_flow_namespace_type { MLX5_FLOW_NAMESPACE_BYPASS, MLX5_FLOW_NAMESPACE_KERNEL, MLX5_FLOW_NAMESPACE_LEFTOVERS, + MLX5_FLOW_NAMESPACE_ANCHOR, MLX5_FLOW_NAMESPACE_FDB, }; diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 231ab6bcea76..e52730e01ed6 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -166,6 +166,8 @@ enum { MLX5_CMD_OP_SET_L2_TABLE_ENTRY = 0x829, MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY = 0x82a, MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY = 0x82b, + MLX5_CMD_OP_SET_WOL_ROL = 0x830, + MLX5_CMD_OP_QUERY_WOL_ROL = 0x831, MLX5_CMD_OP_CREATE_TIR = 0x900, MLX5_CMD_OP_MODIFY_TIR = 0x901, MLX5_CMD_OP_DESTROY_TIR = 0x902, @@ -207,15 +209,15 @@ struct mlx5_ifc_flow_table_fields_supported_bits { u8 outer_dmac[0x1]; u8 outer_smac[0x1]; u8 outer_ether_type[0x1]; - u8 reserved_0[0x1]; + u8 reserved_at_3[0x1]; u8 outer_first_prio[0x1]; u8 outer_first_cfi[0x1]; u8 outer_first_vid[0x1]; - u8 reserved_1[0x1]; + u8 reserved_at_7[0x1]; u8 outer_second_prio[0x1]; u8 outer_second_cfi[0x1]; u8 outer_second_vid[0x1]; - u8 reserved_2[0x1]; + u8 reserved_at_b[0x1]; u8 outer_sip[0x1]; u8 outer_dip[0x1]; u8 outer_frag[0x1]; @@ -230,21 +232,21 @@ struct mlx5_ifc_flow_table_fields_supported_bits { u8 outer_gre_protocol[0x1]; u8 outer_gre_key[0x1]; u8 outer_vxlan_vni[0x1]; - u8 reserved_3[0x5]; + u8 reserved_at_1a[0x5]; u8 source_eswitch_port[0x1]; u8 inner_dmac[0x1]; u8 inner_smac[0x1]; u8 inner_ether_type[0x1]; - u8 reserved_4[0x1]; + u8 reserved_at_23[0x1]; u8 inner_first_prio[0x1]; u8 inner_first_cfi[0x1]; u8 inner_first_vid[0x1]; - u8 reserved_5[0x1]; + u8 reserved_at_27[0x1]; u8 inner_second_prio[0x1]; u8 inner_second_cfi[0x1]; u8 inner_second_vid[0x1]; - u8 reserved_6[0x1]; + u8 reserved_at_2b[0x1]; u8 inner_sip[0x1]; u8 inner_dip[0x1]; u8 inner_frag[0x1]; @@ -256,37 +258,37 @@ struct mlx5_ifc_flow_table_fields_supported_bits { u8 inner_tcp_sport[0x1]; u8 inner_tcp_dport[0x1]; u8 inner_tcp_flags[0x1]; - u8 reserved_7[0x9]; + u8 reserved_at_37[0x9]; - u8 reserved_8[0x40]; + u8 reserved_at_40[0x40]; }; struct mlx5_ifc_flow_table_prop_layout_bits { u8 ft_support[0x1]; - u8 reserved_0[0x2]; + u8 reserved_at_1[0x2]; u8 flow_modify_en[0x1]; u8 modify_root[0x1]; u8 identified_miss_table_mode[0x1]; u8 flow_table_modify[0x1]; - u8 reserved_1[0x19]; + u8 reserved_at_7[0x19]; - u8 reserved_2[0x2]; + u8 reserved_at_20[0x2]; u8 log_max_ft_size[0x6]; - u8 reserved_3[0x10]; + u8 reserved_at_28[0x10]; u8 max_ft_level[0x8]; - u8 reserved_4[0x20]; + u8 reserved_at_40[0x20]; - u8 reserved_5[0x18]; + u8 reserved_at_60[0x18]; u8 log_max_ft_num[0x8]; - u8 reserved_6[0x18]; + u8 reserved_at_80[0x18]; u8 log_max_destination[0x8]; - u8 reserved_7[0x18]; + u8 reserved_at_a0[0x18]; u8 log_max_flow[0x8]; - u8 reserved_8[0x40]; + u8 reserved_at_c0[0x40]; struct mlx5_ifc_flow_table_fields_supported_bits ft_field_support; @@ -298,13 +300,13 @@ struct mlx5_ifc_odp_per_transport_service_cap_bits { u8 receive[0x1]; u8 write[0x1]; u8 read[0x1]; - u8 reserved_0[0x1]; + u8 reserved_at_4[0x1]; u8 srq_receive[0x1]; - u8 reserved_1[0x1a]; + u8 reserved_at_6[0x1a]; }; struct mlx5_ifc_ipv4_layout_bits { - u8 reserved_0[0x60]; + u8 reserved_at_0[0x60]; u8 ipv4[0x20]; }; @@ -316,7 +318,7 @@ struct mlx5_ifc_ipv6_layout_bits { union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits { struct mlx5_ifc_ipv6_layout_bits ipv6_layout; struct mlx5_ifc_ipv4_layout_bits ipv4_layout; - u8 reserved_0[0x80]; + u8 reserved_at_0[0x80]; }; struct mlx5_ifc_fte_match_set_lyr_2_4_bits { @@ -336,15 +338,15 @@ struct mlx5_ifc_fte_match_set_lyr_2_4_bits { u8 ip_dscp[0x6]; u8 ip_ecn[0x2]; u8 vlan_tag[0x1]; - u8 reserved_0[0x1]; + u8 reserved_at_91[0x1]; u8 frag[0x1]; - u8 reserved_1[0x4]; + u8 reserved_at_93[0x4]; u8 tcp_flags[0x9]; u8 tcp_sport[0x10]; u8 tcp_dport[0x10]; - u8 reserved_2[0x20]; + u8 reserved_at_c0[0x20]; u8 udp_sport[0x10]; u8 udp_dport[0x10]; @@ -355,9 +357,9 @@ struct mlx5_ifc_fte_match_set_lyr_2_4_bits { }; struct mlx5_ifc_fte_match_set_misc_bits { - u8 reserved_0[0x20]; + u8 reserved_at_0[0x20]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 source_port[0x10]; u8 outer_second_prio[0x3]; @@ -369,31 +371,31 @@ struct mlx5_ifc_fte_match_set_misc_bits { u8 outer_second_vlan_tag[0x1]; u8 inner_second_vlan_tag[0x1]; - u8 reserved_2[0xe]; + u8 reserved_at_62[0xe]; u8 gre_protocol[0x10]; u8 gre_key_h[0x18]; u8 gre_key_l[0x8]; u8 vxlan_vni[0x18]; - u8 reserved_3[0x8]; + u8 reserved_at_b8[0x8]; - u8 reserved_4[0x20]; + u8 reserved_at_c0[0x20]; - u8 reserved_5[0xc]; + u8 reserved_at_e0[0xc]; u8 outer_ipv6_flow_label[0x14]; - u8 reserved_6[0xc]; + u8 reserved_at_100[0xc]; u8 inner_ipv6_flow_label[0x14]; - u8 reserved_7[0xe0]; + u8 reserved_at_120[0xe0]; }; struct mlx5_ifc_cmd_pas_bits { u8 pa_h[0x20]; u8 pa_l[0x14]; - u8 reserved_0[0xc]; + u8 reserved_at_34[0xc]; }; struct mlx5_ifc_uint64_bits { @@ -418,31 +420,31 @@ enum { struct mlx5_ifc_ads_bits { u8 fl[0x1]; u8 free_ar[0x1]; - u8 reserved_0[0xe]; + u8 reserved_at_2[0xe]; u8 pkey_index[0x10]; - u8 reserved_1[0x8]; + u8 reserved_at_20[0x8]; u8 grh[0x1]; u8 mlid[0x7]; u8 rlid[0x10]; u8 ack_timeout[0x5]; - u8 reserved_2[0x3]; + u8 reserved_at_45[0x3]; u8 src_addr_index[0x8]; - u8 reserved_3[0x4]; + u8 reserved_at_50[0x4]; u8 stat_rate[0x4]; u8 hop_limit[0x8]; - u8 reserved_4[0x4]; + u8 reserved_at_60[0x4]; u8 tclass[0x8]; u8 flow_label[0x14]; u8 rgid_rip[16][0x8]; - u8 reserved_5[0x4]; + u8 reserved_at_100[0x4]; u8 f_dscp[0x1]; u8 f_ecn[0x1]; - u8 reserved_6[0x1]; + u8 reserved_at_106[0x1]; u8 f_eth_prio[0x1]; u8 ecn[0x2]; u8 dscp[0x6]; @@ -458,25 +460,26 @@ struct mlx5_ifc_ads_bits { }; struct mlx5_ifc_flow_table_nic_cap_bits { - u8 reserved_0[0x200]; + u8 nic_rx_multi_path_tirs[0x1]; + u8 reserved_at_1[0x1ff]; struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_receive; - u8 reserved_1[0x200]; + u8 reserved_at_400[0x200]; struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_receive_sniffer; struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_transmit; - u8 reserved_2[0x200]; + u8 reserved_at_a00[0x200]; struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_transmit_sniffer; - u8 reserved_3[0x7200]; + u8 reserved_at_e00[0x7200]; }; struct mlx5_ifc_flow_table_eswitch_cap_bits { - u8 reserved_0[0x200]; + u8 reserved_at_0[0x200]; struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_esw_fdb; @@ -484,7 +487,7 @@ struct mlx5_ifc_flow_table_eswitch_cap_bits { struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_esw_acl_egress; - u8 reserved_1[0x7800]; + u8 reserved_at_800[0x7800]; }; struct mlx5_ifc_e_switch_cap_bits { @@ -493,9 +496,9 @@ struct mlx5_ifc_e_switch_cap_bits { u8 vport_svlan_insert[0x1]; u8 vport_cvlan_insert_if_not_exist[0x1]; u8 vport_cvlan_insert_overwrite[0x1]; - u8 reserved_0[0x1b]; + u8 reserved_at_5[0x1b]; - u8 reserved_1[0x7e0]; + u8 reserved_at_20[0x7e0]; }; struct mlx5_ifc_per_protocol_networking_offload_caps_bits { @@ -504,51 +507,51 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits { u8 lro_cap[0x1]; u8 lro_psh_flag[0x1]; u8 lro_time_stamp[0x1]; - u8 reserved_0[0x3]; + u8 reserved_at_5[0x3]; u8 self_lb_en_modifiable[0x1]; - u8 reserved_1[0x2]; + u8 reserved_at_9[0x2]; u8 max_lso_cap[0x5]; - u8 reserved_2[0x4]; + u8 reserved_at_10[0x4]; u8 rss_ind_tbl_cap[0x4]; - u8 reserved_3[0x3]; + u8 reserved_at_18[0x3]; u8 tunnel_lso_const_out_ip_id[0x1]; - u8 reserved_4[0x2]; + u8 reserved_at_1c[0x2]; u8 tunnel_statless_gre[0x1]; u8 tunnel_stateless_vxlan[0x1]; - u8 reserved_5[0x20]; + u8 reserved_at_20[0x20]; - u8 reserved_6[0x10]; + u8 reserved_at_40[0x10]; u8 lro_min_mss_size[0x10]; - u8 reserved_7[0x120]; + u8 reserved_at_60[0x120]; u8 lro_timer_supported_periods[4][0x20]; - u8 reserved_8[0x600]; + u8 reserved_at_200[0x600]; }; struct mlx5_ifc_roce_cap_bits { u8 roce_apm[0x1]; - u8 reserved_0[0x1f]; + u8 reserved_at_1[0x1f]; - u8 reserved_1[0x60]; + u8 reserved_at_20[0x60]; - u8 reserved_2[0xc]; + u8 reserved_at_80[0xc]; u8 l3_type[0x4]; - u8 reserved_3[0x8]; + u8 reserved_at_90[0x8]; u8 roce_version[0x8]; - u8 reserved_4[0x10]; + u8 reserved_at_a0[0x10]; u8 r_roce_dest_udp_port[0x10]; u8 r_roce_max_src_udp_port[0x10]; u8 r_roce_min_src_udp_port[0x10]; - u8 reserved_5[0x10]; + u8 reserved_at_e0[0x10]; u8 roce_address_table_size[0x10]; - u8 reserved_6[0x700]; + u8 reserved_at_100[0x700]; }; enum { @@ -576,35 +579,35 @@ enum { }; struct mlx5_ifc_atomic_caps_bits { - u8 reserved_0[0x40]; + u8 reserved_at_0[0x40]; u8 atomic_req_8B_endianess_mode[0x2]; - u8 reserved_1[0x4]; + u8 reserved_at_42[0x4]; u8 supported_atomic_req_8B_endianess_mode_1[0x1]; - u8 reserved_2[0x19]; + u8 reserved_at_47[0x19]; - u8 reserved_3[0x20]; + u8 reserved_at_60[0x20]; - u8 reserved_4[0x10]; + u8 reserved_at_80[0x10]; u8 atomic_operations[0x10]; - u8 reserved_5[0x10]; + u8 reserved_at_a0[0x10]; u8 atomic_size_qp[0x10]; - u8 reserved_6[0x10]; + u8 reserved_at_c0[0x10]; u8 atomic_size_dc[0x10]; - u8 reserved_7[0x720]; + u8 reserved_at_e0[0x720]; }; struct mlx5_ifc_odp_cap_bits { - u8 reserved_0[0x40]; + u8 reserved_at_0[0x40]; u8 sig[0x1]; - u8 reserved_1[0x1f]; + u8 reserved_at_41[0x1f]; - u8 reserved_2[0x20]; + u8 reserved_at_60[0x20]; struct mlx5_ifc_odp_per_transport_service_cap_bits rc_odp_caps; @@ -612,7 +615,7 @@ struct mlx5_ifc_odp_cap_bits { struct mlx5_ifc_odp_per_transport_service_cap_bits ud_odp_caps; - u8 reserved_3[0x720]; + u8 reserved_at_e0[0x720]; }; enum { @@ -660,55 +663,55 @@ enum { }; struct mlx5_ifc_cmd_hca_cap_bits { - u8 reserved_0[0x80]; + u8 reserved_at_0[0x80]; u8 log_max_srq_sz[0x8]; u8 log_max_qp_sz[0x8]; - u8 reserved_1[0xb]; + u8 reserved_at_90[0xb]; u8 log_max_qp[0x5]; - u8 reserved_2[0xb]; + u8 reserved_at_a0[0xb]; u8 log_max_srq[0x5]; - u8 reserved_3[0x10]; + u8 reserved_at_b0[0x10]; - u8 reserved_4[0x8]; + u8 reserved_at_c0[0x8]; u8 log_max_cq_sz[0x8]; - u8 reserved_5[0xb]; + u8 reserved_at_d0[0xb]; u8 log_max_cq[0x5]; u8 log_max_eq_sz[0x8]; - u8 reserved_6[0x2]; + u8 reserved_at_e8[0x2]; u8 log_max_mkey[0x6]; - u8 reserved_7[0xc]; + u8 reserved_at_f0[0xc]; u8 log_max_eq[0x4]; u8 max_indirection[0x8]; - u8 reserved_8[0x1]; + u8 reserved_at_108[0x1]; u8 log_max_mrw_sz[0x7]; - u8 reserved_9[0x2]; + u8 reserved_at_110[0x2]; u8 log_max_bsf_list_size[0x6]; - u8 reserved_10[0x2]; + u8 reserved_at_118[0x2]; u8 log_max_klm_list_size[0x6]; - u8 reserved_11[0xa]; + u8 reserved_at_120[0xa]; u8 log_max_ra_req_dc[0x6]; - u8 reserved_12[0xa]; + u8 reserved_at_130[0xa]; u8 log_max_ra_res_dc[0x6]; - u8 reserved_13[0xa]; + u8 reserved_at_140[0xa]; u8 log_max_ra_req_qp[0x6]; - u8 reserved_14[0xa]; + u8 reserved_at_150[0xa]; u8 log_max_ra_res_qp[0x6]; u8 pad_cap[0x1]; u8 cc_query_allowed[0x1]; u8 cc_modify_allowed[0x1]; - u8 reserved_15[0xd]; + u8 reserved_at_163[0xd]; u8 gid_table_size[0x10]; u8 out_of_seq_cnt[0x1]; u8 vport_counters[0x1]; - u8 reserved_16[0x4]; + u8 reserved_at_182[0x4]; u8 max_qp_cnt[0xa]; u8 pkey_table_size[0x10]; @@ -716,158 +719,175 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 vhca_group_manager[0x1]; u8 ib_virt[0x1]; u8 eth_virt[0x1]; - u8 reserved_17[0x1]; + u8 reserved_at_1a4[0x1]; u8 ets[0x1]; u8 nic_flow_table[0x1]; u8 eswitch_flow_table[0x1]; u8 early_vf_enable; - u8 reserved_18[0x2]; + u8 reserved_at_1a8[0x2]; u8 local_ca_ack_delay[0x5]; - u8 reserved_19[0x6]; + u8 reserved_at_1af[0x6]; u8 port_type[0x2]; u8 num_ports[0x8]; - u8 reserved_20[0x3]; + u8 reserved_at_1bf[0x3]; u8 log_max_msg[0x5]; - u8 reserved_21[0x18]; + u8 reserved_at_1c7[0x4]; + u8 max_tc[0x4]; + u8 reserved_at_1cf[0x6]; + u8 rol_s[0x1]; + u8 rol_g[0x1]; + u8 reserved_at_1d7[0x1]; + u8 wol_s[0x1]; + u8 wol_g[0x1]; + u8 wol_a[0x1]; + u8 wol_b[0x1]; + u8 wol_m[0x1]; + u8 wol_u[0x1]; + u8 wol_p[0x1]; u8 stat_rate_support[0x10]; - u8 reserved_22[0xc]; + u8 reserved_at_1ef[0xc]; u8 cqe_version[0x4]; u8 compact_address_vector[0x1]; - u8 reserved_23[0xe]; + u8 reserved_at_200[0x3]; + u8 ipoib_basic_offloads[0x1]; + u8 reserved_at_204[0xa]; u8 drain_sigerr[0x1]; u8 cmdif_checksum[0x2]; u8 sigerr_cqe[0x1]; - u8 reserved_24[0x1]; + u8 reserved_at_212[0x1]; u8 wq_signature[0x1]; u8 sctr_data_cqe[0x1]; - u8 reserved_25[0x1]; + u8 reserved_at_215[0x1]; u8 sho[0x1]; u8 tph[0x1]; u8 rf[0x1]; u8 dct[0x1]; - u8 reserved_26[0x1]; + u8 reserved_at_21a[0x1]; u8 eth_net_offloads[0x1]; u8 roce[0x1]; u8 atomic[0x1]; - u8 reserved_27[0x1]; + u8 reserved_at_21e[0x1]; u8 cq_oi[0x1]; u8 cq_resize[0x1]; u8 cq_moderation[0x1]; - u8 reserved_28[0x3]; + u8 reserved_at_222[0x3]; u8 cq_eq_remap[0x1]; u8 pg[0x1]; u8 block_lb_mc[0x1]; - u8 reserved_29[0x1]; + u8 reserved_at_228[0x1]; u8 scqe_break_moderation[0x1]; - u8 reserved_30[0x1]; + u8 reserved_at_22a[0x1]; u8 cd[0x1]; - u8 reserved_31[0x1]; + u8 reserved_at_22c[0x1]; u8 apm[0x1]; - u8 reserved_32[0x7]; + u8 reserved_at_22e[0x2]; + u8 imaicl[0x1]; + u8 reserved_at_231[0x4]; u8 qkv[0x1]; u8 pkv[0x1]; - u8 reserved_33[0x4]; + u8 set_deth_sqpn[0x1]; + u8 reserved_at_239[0x3]; u8 xrc[0x1]; u8 ud[0x1]; u8 uc[0x1]; u8 rc[0x1]; - u8 reserved_34[0xa]; + u8 reserved_at_23f[0xa]; u8 uar_sz[0x6]; - u8 reserved_35[0x8]; + u8 reserved_at_24f[0x8]; u8 log_pg_sz[0x8]; u8 bf[0x1]; - u8 reserved_36[0x1]; + u8 reserved_at_260[0x1]; u8 pad_tx_eth_packet[0x1]; - u8 reserved_37[0x8]; + u8 reserved_at_262[0x8]; u8 log_bf_reg_size[0x5]; - u8 reserved_38[0x10]; + u8 reserved_at_26f[0x10]; - u8 reserved_39[0x10]; + u8 reserved_at_27f[0x10]; u8 max_wqe_sz_sq[0x10]; - u8 reserved_40[0x10]; + u8 reserved_at_29f[0x10]; u8 max_wqe_sz_rq[0x10]; - u8 reserved_41[0x10]; + u8 reserved_at_2bf[0x10]; u8 max_wqe_sz_sq_dc[0x10]; - u8 reserved_42[0x7]; + u8 reserved_at_2df[0x7]; u8 max_qp_mcg[0x19]; - u8 reserved_43[0x18]; + u8 reserved_at_2ff[0x18]; u8 log_max_mcg[0x8]; - u8 reserved_44[0x3]; + u8 reserved_at_31f[0x3]; u8 log_max_transport_domain[0x5]; - u8 reserved_45[0x3]; + u8 reserved_at_327[0x3]; u8 log_max_pd[0x5]; - u8 reserved_46[0xb]; + u8 reserved_at_32f[0xb]; u8 log_max_xrcd[0x5]; - u8 reserved_47[0x20]; + u8 reserved_at_33f[0x20]; - u8 reserved_48[0x3]; + u8 reserved_at_35f[0x3]; u8 log_max_rq[0x5]; - u8 reserved_49[0x3]; + u8 reserved_at_367[0x3]; u8 log_max_sq[0x5]; - u8 reserved_50[0x3]; + u8 reserved_at_36f[0x3]; u8 log_max_tir[0x5]; - u8 reserved_51[0x3]; + u8 reserved_at_377[0x3]; u8 log_max_tis[0x5]; u8 basic_cyclic_rcv_wqe[0x1]; - u8 reserved_52[0x2]; + u8 reserved_at_380[0x2]; u8 log_max_rmp[0x5]; - u8 reserved_53[0x3]; + u8 reserved_at_387[0x3]; u8 log_max_rqt[0x5]; - u8 reserved_54[0x3]; + u8 reserved_at_38f[0x3]; u8 log_max_rqt_size[0x5]; - u8 reserved_55[0x3]; + u8 reserved_at_397[0x3]; u8 log_max_tis_per_sq[0x5]; - u8 reserved_56[0x3]; + u8 reserved_at_39f[0x3]; u8 log_max_stride_sz_rq[0x5]; - u8 reserved_57[0x3]; + u8 reserved_at_3a7[0x3]; u8 log_min_stride_sz_rq[0x5]; - u8 reserved_58[0x3]; + u8 reserved_at_3af[0x3]; u8 log_max_stride_sz_sq[0x5]; - u8 reserved_59[0x3]; + u8 reserved_at_3b7[0x3]; u8 log_min_stride_sz_sq[0x5]; - u8 reserved_60[0x1b]; + u8 reserved_at_3bf[0x1b]; u8 log_max_wq_sz[0x5]; u8 nic_vport_change_event[0x1]; - u8 reserved_61[0xa]; + u8 reserved_at_3e0[0xa]; u8 log_max_vlan_list[0x5]; - u8 reserved_62[0x3]; + u8 reserved_at_3ef[0x3]; u8 log_max_current_mc_list[0x5]; - u8 reserved_63[0x3]; + u8 reserved_at_3f7[0x3]; u8 log_max_current_uc_list[0x5]; - u8 reserved_64[0x80]; + u8 reserved_at_3ff[0x80]; - u8 reserved_65[0x3]; + u8 reserved_at_47f[0x3]; u8 log_max_l2_table[0x5]; - u8 reserved_66[0x8]; + u8 reserved_at_487[0x8]; u8 log_uar_page_sz[0x10]; - u8 reserved_67[0x20]; + u8 reserved_at_49f[0x20]; u8 device_frequency_mhz[0x20]; u8 device_frequency_khz[0x20]; - u8 reserved_68[0x5f]; + u8 reserved_at_4ff[0x5f]; u8 cqe_zip[0x1]; u8 cqe_zip_timeout[0x10]; u8 cqe_zip_max_num[0x10]; - u8 reserved_69[0x220]; + u8 reserved_at_57f[0x220]; }; enum mlx5_flow_destination_type { @@ -880,7 +900,7 @@ struct mlx5_ifc_dest_format_struct_bits { u8 destination_type[0x8]; u8 destination_id[0x18]; - u8 reserved_0[0x20]; + u8 reserved_at_20[0x20]; }; struct mlx5_ifc_fte_match_param_bits { @@ -890,7 +910,7 @@ struct mlx5_ifc_fte_match_param_bits { struct mlx5_ifc_fte_match_set_lyr_2_4_bits inner_headers; - u8 reserved_0[0xa00]; + u8 reserved_at_600[0xa00]; }; enum { @@ -922,18 +942,18 @@ struct mlx5_ifc_wq_bits { u8 wq_signature[0x1]; u8 end_padding_mode[0x2]; u8 cd_slave[0x1]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 hds_skip_first_sge[0x1]; u8 log2_hds_buf_size[0x3]; - u8 reserved_1[0x7]; + u8 reserved_at_24[0x7]; u8 page_offset[0x5]; u8 lwm[0x10]; - u8 reserved_2[0x8]; + u8 reserved_at_40[0x8]; u8 pd[0x18]; - u8 reserved_3[0x8]; + u8 reserved_at_60[0x8]; u8 uar_page[0x18]; u8 dbr_addr[0x40]; @@ -942,60 +962,60 @@ struct mlx5_ifc_wq_bits { u8 sw_counter[0x20]; - u8 reserved_4[0xc]; + u8 reserved_at_100[0xc]; u8 log_wq_stride[0x4]; - u8 reserved_5[0x3]; + u8 reserved_at_110[0x3]; u8 log_wq_pg_sz[0x5]; - u8 reserved_6[0x3]; + u8 reserved_at_118[0x3]; u8 log_wq_sz[0x5]; - u8 reserved_7[0x4e0]; + u8 reserved_at_120[0x4e0]; struct mlx5_ifc_cmd_pas_bits pas[0]; }; struct mlx5_ifc_rq_num_bits { - u8 reserved_0[0x8]; + u8 reserved_at_0[0x8]; u8 rq_num[0x18]; }; struct mlx5_ifc_mac_address_layout_bits { - u8 reserved_0[0x10]; + u8 reserved_at_0[0x10]; u8 mac_addr_47_32[0x10]; u8 mac_addr_31_0[0x20]; }; struct mlx5_ifc_vlan_layout_bits { - u8 reserved_0[0x14]; + u8 reserved_at_0[0x14]; u8 vlan[0x0c]; - u8 reserved_1[0x20]; + u8 reserved_at_20[0x20]; }; struct mlx5_ifc_cong_control_r_roce_ecn_np_bits { - u8 reserved_0[0xa0]; + u8 reserved_at_0[0xa0]; u8 min_time_between_cnps[0x20]; - u8 reserved_1[0x12]; + u8 reserved_at_c0[0x12]; u8 cnp_dscp[0x6]; - u8 reserved_2[0x5]; + u8 reserved_at_d8[0x5]; u8 cnp_802p_prio[0x3]; - u8 reserved_3[0x720]; + u8 reserved_at_e0[0x720]; }; struct mlx5_ifc_cong_control_r_roce_ecn_rp_bits { - u8 reserved_0[0x60]; + u8 reserved_at_0[0x60]; - u8 reserved_1[0x4]; + u8 reserved_at_60[0x4]; u8 clamp_tgt_rate[0x1]; - u8 reserved_2[0x3]; + u8 reserved_at_65[0x3]; u8 clamp_tgt_rate_after_time_inc[0x1]; - u8 reserved_3[0x17]; + u8 reserved_at_69[0x17]; - u8 reserved_4[0x20]; + u8 reserved_at_80[0x20]; u8 rpg_time_reset[0x20]; @@ -1015,7 +1035,7 @@ struct mlx5_ifc_cong_control_r_roce_ecn_rp_bits { u8 rpg_min_rate[0x20]; - u8 reserved_5[0xe0]; + u8 reserved_at_1c0[0xe0]; u8 rate_to_set_on_first_cnp[0x20]; @@ -1025,15 +1045,15 @@ struct mlx5_ifc_cong_control_r_roce_ecn_rp_bits { u8 rate_reduce_monitor_period[0x20]; - u8 reserved_6[0x20]; + u8 reserved_at_320[0x20]; u8 initial_alpha_value[0x20]; - u8 reserved_7[0x4a0]; + u8 reserved_at_360[0x4a0]; }; struct mlx5_ifc_cong_control_802_1qau_rp_bits { - u8 reserved_0[0x80]; + u8 reserved_at_0[0x80]; u8 rppp_max_rps[0x20]; @@ -1055,7 +1075,7 @@ struct mlx5_ifc_cong_control_802_1qau_rp_bits { u8 rpg_min_rate[0x20]; - u8 reserved_1[0x640]; + u8 reserved_at_1c0[0x640]; }; enum { @@ -1205,7 +1225,37 @@ struct mlx5_ifc_phys_layer_cntrs_bits { u8 successful_recovery_events[0x20]; - u8 reserved_0[0x180]; + u8 reserved_at_640[0x180]; +}; + +struct mlx5_ifc_ib_port_cntrs_grp_data_layout_bits { + u8 symbol_error_counter[0x10]; + + u8 link_error_recovery_counter[0x8]; + + u8 link_downed_counter[0x8]; + + u8 port_rcv_errors[0x10]; + + u8 port_rcv_remote_physical_errors[0x10]; + + u8 port_rcv_switch_relay_errors[0x10]; + + u8 port_xmit_discards[0x10]; + + u8 port_xmit_constraint_errors[0x8]; + + u8 port_rcv_constraint_errors[0x8]; + + u8 reserved_at_70[0x8]; + + u8 link_overrun_errors[0x8]; + + u8 reserved_at_80[0x10]; + + u8 vl_15_dropped[0x10]; + + u8 reserved_at_a0[0xa0]; }; struct mlx5_ifc_eth_per_traffic_grp_data_layout_bits { @@ -1213,7 +1263,7 @@ struct mlx5_ifc_eth_per_traffic_grp_data_layout_bits { u8 transmit_queue_low[0x20]; - u8 reserved_0[0x780]; + u8 reserved_at_40[0x780]; }; struct mlx5_ifc_eth_per_prio_grp_data_layout_bits { @@ -1221,7 +1271,7 @@ struct mlx5_ifc_eth_per_prio_grp_data_layout_bits { u8 rx_octets_low[0x20]; - u8 reserved_0[0xc0]; + u8 reserved_at_40[0xc0]; u8 rx_frames_high[0x20]; @@ -1231,7 +1281,7 @@ struct mlx5_ifc_eth_per_prio_grp_data_layout_bits { u8 tx_octets_low[0x20]; - u8 reserved_1[0xc0]; + u8 reserved_at_180[0xc0]; u8 tx_frames_high[0x20]; @@ -1257,7 +1307,7 @@ struct mlx5_ifc_eth_per_prio_grp_data_layout_bits { u8 rx_pause_transition_low[0x20]; - u8 reserved_2[0x400]; + u8 reserved_at_3c0[0x400]; }; struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits { @@ -1265,7 +1315,7 @@ struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits { u8 port_transmit_wait_low[0x20]; - u8 reserved_0[0x780]; + u8 reserved_at_40[0x780]; }; struct mlx5_ifc_eth_3635_cntrs_grp_data_layout_bits { @@ -1333,7 +1383,7 @@ struct mlx5_ifc_eth_3635_cntrs_grp_data_layout_bits { u8 dot3out_pause_frames_low[0x20]; - u8 reserved_0[0x3c0]; + u8 reserved_at_400[0x3c0]; }; struct mlx5_ifc_eth_2819_cntrs_grp_data_layout_bits { @@ -1421,7 +1471,7 @@ struct mlx5_ifc_eth_2819_cntrs_grp_data_layout_bits { u8 ether_stats_pkts8192to10239octets_low[0x20]; - u8 reserved_0[0x280]; + u8 reserved_at_540[0x280]; }; struct mlx5_ifc_eth_2863_cntrs_grp_data_layout_bits { @@ -1477,7 +1527,7 @@ struct mlx5_ifc_eth_2863_cntrs_grp_data_layout_bits { u8 if_out_broadcast_pkts_low[0x20]; - u8 reserved_0[0x480]; + u8 reserved_at_340[0x480]; }; struct mlx5_ifc_eth_802_3_cntrs_grp_data_layout_bits { @@ -1557,54 +1607,54 @@ struct mlx5_ifc_eth_802_3_cntrs_grp_data_layout_bits { u8 a_pause_mac_ctrl_frames_transmitted_low[0x20]; - u8 reserved_0[0x300]; + u8 reserved_at_4c0[0x300]; }; struct mlx5_ifc_cmd_inter_comp_event_bits { u8 command_completion_vector[0x20]; - u8 reserved_0[0xc0]; + u8 reserved_at_20[0xc0]; }; struct mlx5_ifc_stall_vl_event_bits { - u8 reserved_0[0x18]; + u8 reserved_at_0[0x18]; u8 port_num[0x1]; - u8 reserved_1[0x3]; + u8 reserved_at_19[0x3]; u8 vl[0x4]; - u8 reserved_2[0xa0]; + u8 reserved_at_20[0xa0]; }; struct mlx5_ifc_db_bf_congestion_event_bits { u8 event_subtype[0x8]; - u8 reserved_0[0x8]; + u8 reserved_at_8[0x8]; u8 congestion_level[0x8]; - u8 reserved_1[0x8]; + u8 reserved_at_18[0x8]; - u8 reserved_2[0xa0]; + u8 reserved_at_20[0xa0]; }; struct mlx5_ifc_gpio_event_bits { - u8 reserved_0[0x60]; + u8 reserved_at_0[0x60]; u8 gpio_event_hi[0x20]; u8 gpio_event_lo[0x20]; - u8 reserved_1[0x40]; + u8 reserved_at_a0[0x40]; }; struct mlx5_ifc_port_state_change_event_bits { - u8 reserved_0[0x40]; + u8 reserved_at_0[0x40]; u8 port_num[0x4]; - u8 reserved_1[0x1c]; + u8 reserved_at_44[0x1c]; - u8 reserved_2[0x80]; + u8 reserved_at_60[0x80]; }; struct mlx5_ifc_dropped_packet_logged_bits { - u8 reserved_0[0xe0]; + u8 reserved_at_0[0xe0]; }; enum { @@ -1613,15 +1663,15 @@ enum { }; struct mlx5_ifc_cq_error_bits { - u8 reserved_0[0x8]; + u8 reserved_at_0[0x8]; u8 cqn[0x18]; - u8 reserved_1[0x20]; + u8 reserved_at_20[0x20]; - u8 reserved_2[0x18]; + u8 reserved_at_40[0x18]; u8 syndrome[0x8]; - u8 reserved_3[0x80]; + u8 reserved_at_60[0x80]; }; struct mlx5_ifc_rdma_page_fault_event_bits { @@ -1629,14 +1679,14 @@ struct mlx5_ifc_rdma_page_fault_event_bits { u8 r_key[0x20]; - u8 reserved_0[0x10]; + u8 reserved_at_40[0x10]; u8 packet_len[0x10]; u8 rdma_op_len[0x20]; u8 rdma_va[0x40]; - u8 reserved_1[0x5]; + u8 reserved_at_c0[0x5]; u8 rdma[0x1]; u8 write[0x1]; u8 requestor[0x1]; @@ -1646,15 +1696,15 @@ struct mlx5_ifc_rdma_page_fault_event_bits { struct mlx5_ifc_wqe_associated_page_fault_event_bits { u8 bytes_committed[0x20]; - u8 reserved_0[0x10]; + u8 reserved_at_20[0x10]; u8 wqe_index[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_40[0x10]; u8 len[0x10]; - u8 reserved_2[0x60]; + u8 reserved_at_60[0x60]; - u8 reserved_3[0x5]; + u8 reserved_at_c0[0x5]; u8 rdma[0x1]; u8 write_read[0x1]; u8 requestor[0x1]; @@ -1662,26 +1712,26 @@ struct mlx5_ifc_wqe_associated_page_fault_event_bits { }; struct mlx5_ifc_qp_events_bits { - u8 reserved_0[0xa0]; + u8 reserved_at_0[0xa0]; u8 type[0x8]; - u8 reserved_1[0x18]; + u8 reserved_at_a8[0x18]; - u8 reserved_2[0x8]; + u8 reserved_at_c0[0x8]; u8 qpn_rqn_sqn[0x18]; }; struct mlx5_ifc_dct_events_bits { - u8 reserved_0[0xc0]; + u8 reserved_at_0[0xc0]; - u8 reserved_1[0x8]; + u8 reserved_at_c0[0x8]; u8 dct_number[0x18]; }; struct mlx5_ifc_comp_event_bits { - u8 reserved_0[0xc0]; + u8 reserved_at_0[0xc0]; - u8 reserved_1[0x8]; + u8 reserved_at_c0[0x8]; u8 cq_number[0x18]; }; @@ -1754,41 +1804,41 @@ enum { struct mlx5_ifc_qpc_bits { u8 state[0x4]; - u8 reserved_0[0x4]; + u8 reserved_at_4[0x4]; u8 st[0x8]; - u8 reserved_1[0x3]; + u8 reserved_at_10[0x3]; u8 pm_state[0x2]; - u8 reserved_2[0x7]; + u8 reserved_at_15[0x7]; u8 end_padding_mode[0x2]; - u8 reserved_3[0x2]; + u8 reserved_at_1e[0x2]; u8 wq_signature[0x1]; u8 block_lb_mc[0x1]; u8 atomic_like_write_en[0x1]; u8 latency_sensitive[0x1]; - u8 reserved_4[0x1]; + u8 reserved_at_24[0x1]; u8 drain_sigerr[0x1]; - u8 reserved_5[0x2]; + u8 reserved_at_26[0x2]; u8 pd[0x18]; u8 mtu[0x3]; u8 log_msg_max[0x5]; - u8 reserved_6[0x1]; + u8 reserved_at_48[0x1]; u8 log_rq_size[0x4]; u8 log_rq_stride[0x3]; u8 no_sq[0x1]; u8 log_sq_size[0x4]; - u8 reserved_7[0x6]; + u8 reserved_at_55[0x6]; u8 rlky[0x1]; - u8 reserved_8[0x4]; + u8 ulp_stateless_offload_mode[0x4]; u8 counter_set_id[0x8]; u8 uar_page[0x18]; - u8 reserved_9[0x8]; + u8 reserved_at_80[0x8]; u8 user_index[0x18]; - u8 reserved_10[0x3]; + u8 reserved_at_a0[0x3]; u8 log_page_size[0x5]; u8 remote_qpn[0x18]; @@ -1797,66 +1847,66 @@ struct mlx5_ifc_qpc_bits { struct mlx5_ifc_ads_bits secondary_address_path; u8 log_ack_req_freq[0x4]; - u8 reserved_11[0x4]; + u8 reserved_at_384[0x4]; u8 log_sra_max[0x3]; - u8 reserved_12[0x2]; + u8 reserved_at_38b[0x2]; u8 retry_count[0x3]; u8 rnr_retry[0x3]; - u8 reserved_13[0x1]; + u8 reserved_at_393[0x1]; u8 fre[0x1]; u8 cur_rnr_retry[0x3]; u8 cur_retry_count[0x3]; - u8 reserved_14[0x5]; + u8 reserved_at_39b[0x5]; - u8 reserved_15[0x20]; + u8 reserved_at_3a0[0x20]; - u8 reserved_16[0x8]; + u8 reserved_at_3c0[0x8]; u8 next_send_psn[0x18]; - u8 reserved_17[0x8]; + u8 reserved_at_3e0[0x8]; u8 cqn_snd[0x18]; - u8 reserved_18[0x40]; + u8 reserved_at_400[0x40]; - u8 reserved_19[0x8]; + u8 reserved_at_440[0x8]; u8 last_acked_psn[0x18]; - u8 reserved_20[0x8]; + u8 reserved_at_460[0x8]; u8 ssn[0x18]; - u8 reserved_21[0x8]; + u8 reserved_at_480[0x8]; u8 log_rra_max[0x3]; - u8 reserved_22[0x1]; + u8 reserved_at_48b[0x1]; u8 atomic_mode[0x4]; u8 rre[0x1]; u8 rwe[0x1]; u8 rae[0x1]; - u8 reserved_23[0x1]; + u8 reserved_at_493[0x1]; u8 page_offset[0x6]; - u8 reserved_24[0x3]; + u8 reserved_at_49a[0x3]; u8 cd_slave_receive[0x1]; u8 cd_slave_send[0x1]; u8 cd_master[0x1]; - u8 reserved_25[0x3]; + u8 reserved_at_4a0[0x3]; u8 min_rnr_nak[0x5]; u8 next_rcv_psn[0x18]; - u8 reserved_26[0x8]; + u8 reserved_at_4c0[0x8]; u8 xrcd[0x18]; - u8 reserved_27[0x8]; + u8 reserved_at_4e0[0x8]; u8 cqn_rcv[0x18]; u8 dbr_addr[0x40]; u8 q_key[0x20]; - u8 reserved_28[0x5]; + u8 reserved_at_560[0x5]; u8 rq_type[0x3]; u8 srqn_rmpn[0x18]; - u8 reserved_29[0x8]; + u8 reserved_at_580[0x8]; u8 rmsn[0x18]; u8 hw_sq_wqebb_counter[0x10]; @@ -1866,33 +1916,33 @@ struct mlx5_ifc_qpc_bits { u8 sw_rq_counter[0x20]; - u8 reserved_30[0x20]; + u8 reserved_at_600[0x20]; - u8 reserved_31[0xf]; + u8 reserved_at_620[0xf]; u8 cgs[0x1]; u8 cs_req[0x8]; u8 cs_res[0x8]; u8 dc_access_key[0x40]; - u8 reserved_32[0xc0]; + u8 reserved_at_680[0xc0]; }; struct mlx5_ifc_roce_addr_layout_bits { u8 source_l3_address[16][0x8]; - u8 reserved_0[0x3]; + u8 reserved_at_80[0x3]; u8 vlan_valid[0x1]; u8 vlan_id[0xc]; u8 source_mac_47_32[0x10]; u8 source_mac_31_0[0x20]; - u8 reserved_1[0x14]; + u8 reserved_at_c0[0x14]; u8 roce_l3_type[0x4]; u8 roce_version[0x8]; - u8 reserved_2[0x20]; + u8 reserved_at_e0[0x20]; }; union mlx5_ifc_hca_cap_union_bits { @@ -1904,7 +1954,7 @@ union mlx5_ifc_hca_cap_union_bits { struct mlx5_ifc_flow_table_nic_cap_bits flow_table_nic_cap; struct mlx5_ifc_flow_table_eswitch_cap_bits flow_table_eswitch_cap; struct mlx5_ifc_e_switch_cap_bits e_switch_cap; - u8 reserved_0[0x8000]; + u8 reserved_at_0[0x8000]; }; enum { @@ -1914,24 +1964,24 @@ enum { }; struct mlx5_ifc_flow_context_bits { - u8 reserved_0[0x20]; + u8 reserved_at_0[0x20]; u8 group_id[0x20]; - u8 reserved_1[0x8]; + u8 reserved_at_40[0x8]; u8 flow_tag[0x18]; - u8 reserved_2[0x10]; + u8 reserved_at_60[0x10]; u8 action[0x10]; - u8 reserved_3[0x8]; + u8 reserved_at_80[0x8]; u8 destination_list_size[0x18]; - u8 reserved_4[0x160]; + u8 reserved_at_a0[0x160]; struct mlx5_ifc_fte_match_param_bits match_value; - u8 reserved_5[0x600]; + u8 reserved_at_1200[0x600]; struct mlx5_ifc_dest_format_struct_bits destination[0]; }; @@ -1944,43 +1994,43 @@ enum { struct mlx5_ifc_xrc_srqc_bits { u8 state[0x4]; u8 log_xrc_srq_size[0x4]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 wq_signature[0x1]; u8 cont_srq[0x1]; - u8 reserved_1[0x1]; + u8 reserved_at_22[0x1]; u8 rlky[0x1]; u8 basic_cyclic_rcv_wqe[0x1]; u8 log_rq_stride[0x3]; u8 xrcd[0x18]; u8 page_offset[0x6]; - u8 reserved_2[0x2]; + u8 reserved_at_46[0x2]; u8 cqn[0x18]; - u8 reserved_3[0x20]; + u8 reserved_at_60[0x20]; u8 user_index_equal_xrc_srqn[0x1]; - u8 reserved_4[0x1]; + u8 reserved_at_81[0x1]; u8 log_page_size[0x6]; u8 user_index[0x18]; - u8 reserved_5[0x20]; + u8 reserved_at_a0[0x20]; - u8 reserved_6[0x8]; + u8 reserved_at_c0[0x8]; u8 pd[0x18]; u8 lwm[0x10]; u8 wqe_cnt[0x10]; - u8 reserved_7[0x40]; + u8 reserved_at_100[0x40]; u8 db_record_addr_h[0x20]; u8 db_record_addr_l[0x1e]; - u8 reserved_8[0x2]; + u8 reserved_at_17e[0x2]; - u8 reserved_9[0x80]; + u8 reserved_at_180[0x80]; }; struct mlx5_ifc_traffic_counter_bits { @@ -1990,16 +2040,16 @@ struct mlx5_ifc_traffic_counter_bits { }; struct mlx5_ifc_tisc_bits { - u8 reserved_0[0xc]; + u8 reserved_at_0[0xc]; u8 prio[0x4]; - u8 reserved_1[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_2[0x100]; + u8 reserved_at_20[0x100]; - u8 reserved_3[0x8]; + u8 reserved_at_120[0x8]; u8 transport_domain[0x18]; - u8 reserved_4[0x3c0]; + u8 reserved_at_140[0x3c0]; }; enum { @@ -2024,31 +2074,31 @@ enum { }; struct mlx5_ifc_tirc_bits { - u8 reserved_0[0x20]; + u8 reserved_at_0[0x20]; u8 disp_type[0x4]; - u8 reserved_1[0x1c]; + u8 reserved_at_24[0x1c]; - u8 reserved_2[0x40]; + u8 reserved_at_40[0x40]; - u8 reserved_3[0x4]; + u8 reserved_at_80[0x4]; u8 lro_timeout_period_usecs[0x10]; u8 lro_enable_mask[0x4]; u8 lro_max_ip_payload_size[0x8]; - u8 reserved_4[0x40]; + u8 reserved_at_a0[0x40]; - u8 reserved_5[0x8]; + u8 reserved_at_e0[0x8]; u8 inline_rqn[0x18]; u8 rx_hash_symmetric[0x1]; - u8 reserved_6[0x1]; + u8 reserved_at_101[0x1]; u8 tunneled_offload_en[0x1]; - u8 reserved_7[0x5]; + u8 reserved_at_103[0x5]; u8 indirect_table[0x18]; u8 rx_hash_fn[0x4]; - u8 reserved_8[0x2]; + u8 reserved_at_124[0x2]; u8 self_lb_block[0x2]; u8 transport_domain[0x18]; @@ -2058,7 +2108,7 @@ struct mlx5_ifc_tirc_bits { struct mlx5_ifc_rx_hash_field_select_bits rx_hash_field_selector_inner; - u8 reserved_9[0x4c0]; + u8 reserved_at_2c0[0x4c0]; }; enum { @@ -2069,39 +2119,39 @@ enum { struct mlx5_ifc_srqc_bits { u8 state[0x4]; u8 log_srq_size[0x4]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 wq_signature[0x1]; u8 cont_srq[0x1]; - u8 reserved_1[0x1]; + u8 reserved_at_22[0x1]; u8 rlky[0x1]; - u8 reserved_2[0x1]; + u8 reserved_at_24[0x1]; u8 log_rq_stride[0x3]; u8 xrcd[0x18]; u8 page_offset[0x6]; - u8 reserved_3[0x2]; + u8 reserved_at_46[0x2]; u8 cqn[0x18]; - u8 reserved_4[0x20]; + u8 reserved_at_60[0x20]; - u8 reserved_5[0x2]; + u8 reserved_at_80[0x2]; u8 log_page_size[0x6]; - u8 reserved_6[0x18]; + u8 reserved_at_88[0x18]; - u8 reserved_7[0x20]; + u8 reserved_at_a0[0x20]; - u8 reserved_8[0x8]; + u8 reserved_at_c0[0x8]; u8 pd[0x18]; u8 lwm[0x10]; u8 wqe_cnt[0x10]; - u8 reserved_9[0x40]; + u8 reserved_at_100[0x40]; u8 dbr_addr[0x40]; - u8 reserved_10[0x80]; + u8 reserved_at_180[0x80]; }; enum { @@ -2115,39 +2165,39 @@ struct mlx5_ifc_sqc_bits { u8 cd_master[0x1]; u8 fre[0x1]; u8 flush_in_error_en[0x1]; - u8 reserved_0[0x4]; + u8 reserved_at_4[0x4]; u8 state[0x4]; - u8 reserved_1[0x14]; + u8 reserved_at_c[0x14]; - u8 reserved_2[0x8]; + u8 reserved_at_20[0x8]; u8 user_index[0x18]; - u8 reserved_3[0x8]; + u8 reserved_at_40[0x8]; u8 cqn[0x18]; - u8 reserved_4[0xa0]; + u8 reserved_at_60[0xa0]; u8 tis_lst_sz[0x10]; - u8 reserved_5[0x10]; + u8 reserved_at_110[0x10]; - u8 reserved_6[0x40]; + u8 reserved_at_120[0x40]; - u8 reserved_7[0x8]; + u8 reserved_at_160[0x8]; u8 tis_num_0[0x18]; struct mlx5_ifc_wq_bits wq; }; struct mlx5_ifc_rqtc_bits { - u8 reserved_0[0xa0]; + u8 reserved_at_0[0xa0]; - u8 reserved_1[0x10]; + u8 reserved_at_a0[0x10]; u8 rqt_max_size[0x10]; - u8 reserved_2[0x10]; + u8 reserved_at_c0[0x10]; u8 rqt_actual_size[0x10]; - u8 reserved_3[0x6a0]; + u8 reserved_at_e0[0x6a0]; struct mlx5_ifc_rq_num_bits rq_num[0]; }; @@ -2165,27 +2215,27 @@ enum { struct mlx5_ifc_rqc_bits { u8 rlky[0x1]; - u8 reserved_0[0x2]; + u8 reserved_at_1[0x2]; u8 vsd[0x1]; u8 mem_rq_type[0x4]; u8 state[0x4]; - u8 reserved_1[0x1]; + u8 reserved_at_c[0x1]; u8 flush_in_error_en[0x1]; - u8 reserved_2[0x12]; + u8 reserved_at_e[0x12]; - u8 reserved_3[0x8]; + u8 reserved_at_20[0x8]; u8 user_index[0x18]; - u8 reserved_4[0x8]; + u8 reserved_at_40[0x8]; u8 cqn[0x18]; u8 counter_set_id[0x8]; - u8 reserved_5[0x18]; + u8 reserved_at_68[0x18]; - u8 reserved_6[0x8]; + u8 reserved_at_80[0x8]; u8 rmpn[0x18]; - u8 reserved_7[0xe0]; + u8 reserved_at_a0[0xe0]; struct mlx5_ifc_wq_bits wq; }; @@ -2196,31 +2246,31 @@ enum { }; struct mlx5_ifc_rmpc_bits { - u8 reserved_0[0x8]; + u8 reserved_at_0[0x8]; u8 state[0x4]; - u8 reserved_1[0x14]; + u8 reserved_at_c[0x14]; u8 basic_cyclic_rcv_wqe[0x1]; - u8 reserved_2[0x1f]; + u8 reserved_at_21[0x1f]; - u8 reserved_3[0x140]; + u8 reserved_at_40[0x140]; struct mlx5_ifc_wq_bits wq; }; struct mlx5_ifc_nic_vport_context_bits { - u8 reserved_0[0x1f]; + u8 reserved_at_0[0x1f]; u8 roce_en[0x1]; u8 arm_change_event[0x1]; - u8 reserved_1[0x1a]; + u8 reserved_at_21[0x1a]; u8 event_on_mtu[0x1]; u8 event_on_promisc_change[0x1]; u8 event_on_vlan_change[0x1]; u8 event_on_mc_address_change[0x1]; u8 event_on_uc_address_change[0x1]; - u8 reserved_2[0xf0]; + u8 reserved_at_40[0xf0]; u8 mtu[0x10]; @@ -2228,21 +2278,21 @@ struct mlx5_ifc_nic_vport_context_bits { u8 port_guid[0x40]; u8 node_guid[0x40]; - u8 reserved_3[0x140]; + u8 reserved_at_200[0x140]; u8 qkey_violation_counter[0x10]; - u8 reserved_4[0x430]; + u8 reserved_at_350[0x430]; u8 promisc_uc[0x1]; u8 promisc_mc[0x1]; u8 promisc_all[0x1]; - u8 reserved_5[0x2]; + u8 reserved_at_783[0x2]; u8 allowed_list_type[0x3]; - u8 reserved_6[0xc]; + u8 reserved_at_788[0xc]; u8 allowed_list_size[0xc]; struct mlx5_ifc_mac_address_layout_bits permanent_address; - u8 reserved_7[0x20]; + u8 reserved_at_7e0[0x20]; u8 current_uc_mac_address[0][0x40]; }; @@ -2254,9 +2304,9 @@ enum { }; struct mlx5_ifc_mkc_bits { - u8 reserved_0[0x1]; + u8 reserved_at_0[0x1]; u8 free[0x1]; - u8 reserved_1[0xd]; + u8 reserved_at_2[0xd]; u8 small_fence_on_rdma_read_response[0x1]; u8 umr_en[0x1]; u8 a[0x1]; @@ -2265,19 +2315,19 @@ struct mlx5_ifc_mkc_bits { u8 lw[0x1]; u8 lr[0x1]; u8 access_mode[0x2]; - u8 reserved_2[0x8]; + u8 reserved_at_18[0x8]; u8 qpn[0x18]; u8 mkey_7_0[0x8]; - u8 reserved_3[0x20]; + u8 reserved_at_40[0x20]; u8 length64[0x1]; u8 bsf_en[0x1]; u8 sync_umr[0x1]; - u8 reserved_4[0x2]; + u8 reserved_at_63[0x2]; u8 expected_sigerr_count[0x1]; - u8 reserved_5[0x1]; + u8 reserved_at_66[0x1]; u8 en_rinval[0x1]; u8 pd[0x18]; @@ -2287,18 +2337,18 @@ struct mlx5_ifc_mkc_bits { u8 bsf_octword_size[0x20]; - u8 reserved_6[0x80]; + u8 reserved_at_120[0x80]; u8 translations_octword_size[0x20]; - u8 reserved_7[0x1b]; + u8 reserved_at_1c0[0x1b]; u8 log_page_size[0x5]; - u8 reserved_8[0x20]; + u8 reserved_at_1e0[0x20]; }; struct mlx5_ifc_pkey_bits { - u8 reserved_0[0x10]; + u8 reserved_at_0[0x10]; u8 pkey[0x10]; }; @@ -2309,19 +2359,19 @@ struct mlx5_ifc_array128_auto_bits { struct mlx5_ifc_hca_vport_context_bits { u8 field_select[0x20]; - u8 reserved_0[0xe0]; + u8 reserved_at_20[0xe0]; u8 sm_virt_aware[0x1]; u8 has_smi[0x1]; u8 has_raw[0x1]; u8 grh_required[0x1]; - u8 reserved_1[0xc]; + u8 reserved_at_104[0xc]; u8 port_physical_state[0x4]; u8 vport_state_policy[0x4]; u8 port_state[0x4]; u8 vport_state[0x4]; - u8 reserved_2[0x20]; + u8 reserved_at_120[0x20]; u8 system_image_guid[0x40]; @@ -2337,33 +2387,33 @@ struct mlx5_ifc_hca_vport_context_bits { u8 cap_mask2_field_select[0x20]; - u8 reserved_3[0x80]; + u8 reserved_at_280[0x80]; u8 lid[0x10]; - u8 reserved_4[0x4]; + u8 reserved_at_310[0x4]; u8 init_type_reply[0x4]; u8 lmc[0x3]; u8 subnet_timeout[0x5]; u8 sm_lid[0x10]; u8 sm_sl[0x4]; - u8 reserved_5[0xc]; + u8 reserved_at_334[0xc]; u8 qkey_violation_counter[0x10]; u8 pkey_violation_counter[0x10]; - u8 reserved_6[0xca0]; + u8 reserved_at_360[0xca0]; }; struct mlx5_ifc_esw_vport_context_bits { - u8 reserved_0[0x3]; + u8 reserved_at_0[0x3]; u8 vport_svlan_strip[0x1]; u8 vport_cvlan_strip[0x1]; u8 vport_svlan_insert[0x1]; u8 vport_cvlan_insert[0x2]; - u8 reserved_1[0x18]; + u8 reserved_at_8[0x18]; - u8 reserved_2[0x20]; + u8 reserved_at_20[0x20]; u8 svlan_cfi[0x1]; u8 svlan_pcp[0x3]; @@ -2372,7 +2422,7 @@ struct mlx5_ifc_esw_vport_context_bits { u8 cvlan_pcp[0x3]; u8 cvlan_id[0xc]; - u8 reserved_3[0x7a0]; + u8 reserved_at_60[0x7a0]; }; enum { @@ -2387,41 +2437,41 @@ enum { struct mlx5_ifc_eqc_bits { u8 status[0x4]; - u8 reserved_0[0x9]; + u8 reserved_at_4[0x9]; u8 ec[0x1]; u8 oi[0x1]; - u8 reserved_1[0x5]; + u8 reserved_at_f[0x5]; u8 st[0x4]; - u8 reserved_2[0x8]; + u8 reserved_at_18[0x8]; - u8 reserved_3[0x20]; + u8 reserved_at_20[0x20]; - u8 reserved_4[0x14]; + u8 reserved_at_40[0x14]; u8 page_offset[0x6]; - u8 reserved_5[0x6]; + u8 reserved_at_5a[0x6]; - u8 reserved_6[0x3]; + u8 reserved_at_60[0x3]; u8 log_eq_size[0x5]; u8 uar_page[0x18]; - u8 reserved_7[0x20]; + u8 reserved_at_80[0x20]; - u8 reserved_8[0x18]; + u8 reserved_at_a0[0x18]; u8 intr[0x8]; - u8 reserved_9[0x3]; + u8 reserved_at_c0[0x3]; u8 log_page_size[0x5]; - u8 reserved_10[0x18]; + u8 reserved_at_c8[0x18]; - u8 reserved_11[0x60]; + u8 reserved_at_e0[0x60]; - u8 reserved_12[0x8]; + u8 reserved_at_140[0x8]; u8 consumer_counter[0x18]; - u8 reserved_13[0x8]; + u8 reserved_at_160[0x8]; u8 producer_counter[0x18]; - u8 reserved_14[0x80]; + u8 reserved_at_180[0x80]; }; enum { @@ -2445,14 +2495,14 @@ enum { }; struct mlx5_ifc_dctc_bits { - u8 reserved_0[0x4]; + u8 reserved_at_0[0x4]; u8 state[0x4]; - u8 reserved_1[0x18]; + u8 reserved_at_8[0x18]; - u8 reserved_2[0x8]; + u8 reserved_at_20[0x8]; u8 user_index[0x18]; - u8 reserved_3[0x8]; + u8 reserved_at_40[0x8]; u8 cqn[0x18]; u8 counter_set_id[0x8]; @@ -2464,45 +2514,45 @@ struct mlx5_ifc_dctc_bits { u8 latency_sensitive[0x1]; u8 rlky[0x1]; u8 free_ar[0x1]; - u8 reserved_4[0xd]; + u8 reserved_at_73[0xd]; - u8 reserved_5[0x8]; + u8 reserved_at_80[0x8]; u8 cs_res[0x8]; - u8 reserved_6[0x3]; + u8 reserved_at_90[0x3]; u8 min_rnr_nak[0x5]; - u8 reserved_7[0x8]; + u8 reserved_at_98[0x8]; - u8 reserved_8[0x8]; + u8 reserved_at_a0[0x8]; u8 srqn[0x18]; - u8 reserved_9[0x8]; + u8 reserved_at_c0[0x8]; u8 pd[0x18]; u8 tclass[0x8]; - u8 reserved_10[0x4]; + u8 reserved_at_e8[0x4]; u8 flow_label[0x14]; u8 dc_access_key[0x40]; - u8 reserved_11[0x5]; + u8 reserved_at_140[0x5]; u8 mtu[0x3]; u8 port[0x8]; u8 pkey_index[0x10]; - u8 reserved_12[0x8]; + u8 reserved_at_160[0x8]; u8 my_addr_index[0x8]; - u8 reserved_13[0x8]; + u8 reserved_at_170[0x8]; u8 hop_limit[0x8]; u8 dc_access_key_violation_count[0x20]; - u8 reserved_14[0x14]; + u8 reserved_at_1a0[0x14]; u8 dei_cfi[0x1]; u8 eth_prio[0x3]; u8 ecn[0x2]; u8 dscp[0x6]; - u8 reserved_15[0x40]; + u8 reserved_at_1c0[0x40]; }; enum { @@ -2524,54 +2574,54 @@ enum { struct mlx5_ifc_cqc_bits { u8 status[0x4]; - u8 reserved_0[0x4]; + u8 reserved_at_4[0x4]; u8 cqe_sz[0x3]; u8 cc[0x1]; - u8 reserved_1[0x1]; + u8 reserved_at_c[0x1]; u8 scqe_break_moderation_en[0x1]; u8 oi[0x1]; - u8 reserved_2[0x2]; + u8 reserved_at_f[0x2]; u8 cqe_zip_en[0x1]; u8 mini_cqe_res_format[0x2]; u8 st[0x4]; - u8 reserved_3[0x8]; + u8 reserved_at_18[0x8]; - u8 reserved_4[0x20]; + u8 reserved_at_20[0x20]; - u8 reserved_5[0x14]; + u8 reserved_at_40[0x14]; u8 page_offset[0x6]; - u8 reserved_6[0x6]; + u8 reserved_at_5a[0x6]; - u8 reserved_7[0x3]; + u8 reserved_at_60[0x3]; u8 log_cq_size[0x5]; u8 uar_page[0x18]; - u8 reserved_8[0x4]; + u8 reserved_at_80[0x4]; u8 cq_period[0xc]; u8 cq_max_count[0x10]; - u8 reserved_9[0x18]; + u8 reserved_at_a0[0x18]; u8 c_eqn[0x8]; - u8 reserved_10[0x3]; + u8 reserved_at_c0[0x3]; u8 log_page_size[0x5]; - u8 reserved_11[0x18]; + u8 reserved_at_c8[0x18]; - u8 reserved_12[0x20]; + u8 reserved_at_e0[0x20]; - u8 reserved_13[0x8]; + u8 reserved_at_100[0x8]; u8 last_notified_index[0x18]; - u8 reserved_14[0x8]; + u8 reserved_at_120[0x8]; u8 last_solicit_index[0x18]; - u8 reserved_15[0x8]; + u8 reserved_at_140[0x8]; u8 consumer_counter[0x18]; - u8 reserved_16[0x8]; + u8 reserved_at_160[0x8]; u8 producer_counter[0x18]; - u8 reserved_17[0x40]; + u8 reserved_at_180[0x40]; u8 dbr_addr[0x40]; }; @@ -2580,16 +2630,16 @@ union mlx5_ifc_cong_control_roce_ecn_auto_bits { struct mlx5_ifc_cong_control_802_1qau_rp_bits cong_control_802_1qau_rp; struct mlx5_ifc_cong_control_r_roce_ecn_rp_bits cong_control_r_roce_ecn_rp; struct mlx5_ifc_cong_control_r_roce_ecn_np_bits cong_control_r_roce_ecn_np; - u8 reserved_0[0x800]; + u8 reserved_at_0[0x800]; }; struct mlx5_ifc_query_adapter_param_block_bits { - u8 reserved_0[0xc0]; + u8 reserved_at_0[0xc0]; - u8 reserved_1[0x8]; + u8 reserved_at_c0[0x8]; u8 ieee_vendor_id[0x18]; - u8 reserved_2[0x10]; + u8 reserved_at_e0[0x10]; u8 vsd_vendor_id[0x10]; u8 vsd[208][0x8]; @@ -2600,14 +2650,14 @@ struct mlx5_ifc_query_adapter_param_block_bits { union mlx5_ifc_modify_field_select_resize_field_select_auto_bits { struct mlx5_ifc_modify_field_select_bits modify_field_select; struct mlx5_ifc_resize_field_select_bits resize_field_select; - u8 reserved_0[0x20]; + u8 reserved_at_0[0x20]; }; union mlx5_ifc_field_select_802_1_r_roce_auto_bits { struct mlx5_ifc_field_select_802_1qau_rp_bits field_select_802_1qau_rp; struct mlx5_ifc_field_select_r_roce_rp_bits field_select_r_roce_rp; struct mlx5_ifc_field_select_r_roce_np_bits field_select_r_roce_np; - u8 reserved_0[0x20]; + u8 reserved_at_0[0x20]; }; union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits { @@ -2618,8 +2668,9 @@ union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits { struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits eth_extended_cntrs_grp_data_layout; struct mlx5_ifc_eth_per_prio_grp_data_layout_bits eth_per_prio_grp_data_layout; struct mlx5_ifc_eth_per_traffic_grp_data_layout_bits eth_per_traffic_grp_data_layout; + struct mlx5_ifc_ib_port_cntrs_grp_data_layout_bits ib_port_cntrs_grp_data_layout; struct mlx5_ifc_phys_layer_cntrs_bits phys_layer_cntrs; - u8 reserved_0[0x7c0]; + u8 reserved_at_0[0x7c0]; }; union mlx5_ifc_event_auto_bits { @@ -2635,23 +2686,23 @@ union mlx5_ifc_event_auto_bits { struct mlx5_ifc_db_bf_congestion_event_bits db_bf_congestion_event; struct mlx5_ifc_stall_vl_event_bits stall_vl_event; struct mlx5_ifc_cmd_inter_comp_event_bits cmd_inter_comp_event; - u8 reserved_0[0xe0]; + u8 reserved_at_0[0xe0]; }; struct mlx5_ifc_health_buffer_bits { - u8 reserved_0[0x100]; + u8 reserved_at_0[0x100]; u8 assert_existptr[0x20]; u8 assert_callra[0x20]; - u8 reserved_1[0x40]; + u8 reserved_at_140[0x40]; u8 fw_version[0x20]; u8 hw_id[0x20]; - u8 reserved_2[0x20]; + u8 reserved_at_1c0[0x20]; u8 irisc_index[0x8]; u8 synd[0x8]; @@ -2660,20 +2711,20 @@ struct mlx5_ifc_health_buffer_bits { struct mlx5_ifc_register_loopback_control_bits { u8 no_lb[0x1]; - u8 reserved_0[0x7]; + u8 reserved_at_1[0x7]; u8 port[0x8]; - u8 reserved_1[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_2[0x60]; + u8 reserved_at_20[0x60]; }; struct mlx5_ifc_teardown_hca_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x40]; + u8 reserved_at_40[0x40]; }; enum { @@ -2683,108 +2734,108 @@ enum { struct mlx5_ifc_teardown_hca_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0x10]; + u8 reserved_at_40[0x10]; u8 profile[0x10]; - u8 reserved_3[0x20]; + u8 reserved_at_60[0x20]; }; struct mlx5_ifc_sqerr2rts_qp_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x40]; + u8 reserved_at_40[0x40]; }; struct mlx5_ifc_sqerr2rts_qp_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0x8]; + u8 reserved_at_40[0x8]; u8 qpn[0x18]; - u8 reserved_3[0x20]; + u8 reserved_at_60[0x20]; u8 opt_param_mask[0x20]; - u8 reserved_4[0x20]; + u8 reserved_at_a0[0x20]; struct mlx5_ifc_qpc_bits qpc; - u8 reserved_5[0x80]; + u8 reserved_at_800[0x80]; }; struct mlx5_ifc_sqd2rts_qp_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x40]; + u8 reserved_at_40[0x40]; }; struct mlx5_ifc_sqd2rts_qp_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0x8]; + u8 reserved_at_40[0x8]; u8 qpn[0x18]; - u8 reserved_3[0x20]; + u8 reserved_at_60[0x20]; u8 opt_param_mask[0x20]; - u8 reserved_4[0x20]; + u8 reserved_at_a0[0x20]; struct mlx5_ifc_qpc_bits qpc; - u8 reserved_5[0x80]; + u8 reserved_at_800[0x80]; }; struct mlx5_ifc_set_roce_address_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x40]; + u8 reserved_at_40[0x40]; }; struct mlx5_ifc_set_roce_address_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 roce_address_index[0x10]; - u8 reserved_2[0x10]; + u8 reserved_at_50[0x10]; - u8 reserved_3[0x20]; + u8 reserved_at_60[0x20]; struct mlx5_ifc_roce_addr_layout_bits roce_address; }; struct mlx5_ifc_set_mad_demux_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x40]; + u8 reserved_at_40[0x40]; }; enum { @@ -2794,89 +2845,89 @@ enum { struct mlx5_ifc_set_mad_demux_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0x20]; + u8 reserved_at_40[0x20]; - u8 reserved_3[0x6]; + u8 reserved_at_60[0x6]; u8 demux_mode[0x2]; - u8 reserved_4[0x18]; + u8 reserved_at_68[0x18]; }; struct mlx5_ifc_set_l2_table_entry_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x40]; + u8 reserved_at_40[0x40]; }; struct mlx5_ifc_set_l2_table_entry_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0x60]; + u8 reserved_at_40[0x60]; - u8 reserved_3[0x8]; + u8 reserved_at_a0[0x8]; u8 table_index[0x18]; - u8 reserved_4[0x20]; + u8 reserved_at_c0[0x20]; - u8 reserved_5[0x13]; + u8 reserved_at_e0[0x13]; u8 vlan_valid[0x1]; u8 vlan[0xc]; struct mlx5_ifc_mac_address_layout_bits mac_address; - u8 reserved_6[0xc0]; + u8 reserved_at_140[0xc0]; }; struct mlx5_ifc_set_issi_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x40]; + u8 reserved_at_40[0x40]; }; struct mlx5_ifc_set_issi_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0x10]; + u8 reserved_at_40[0x10]; u8 current_issi[0x10]; - u8 reserved_3[0x20]; + u8 reserved_at_60[0x20]; }; struct mlx5_ifc_set_hca_cap_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x40]; + u8 reserved_at_40[0x40]; }; struct mlx5_ifc_set_hca_cap_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0x40]; + u8 reserved_at_40[0x40]; union mlx5_ifc_hca_cap_union_bits capability; }; @@ -2890,156 +2941,156 @@ enum { struct mlx5_ifc_set_fte_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x40]; + u8 reserved_at_40[0x40]; }; struct mlx5_ifc_set_fte_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0x40]; + u8 reserved_at_40[0x40]; u8 table_type[0x8]; - u8 reserved_3[0x18]; + u8 reserved_at_88[0x18]; - u8 reserved_4[0x8]; + u8 reserved_at_a0[0x8]; u8 table_id[0x18]; - u8 reserved_5[0x18]; + u8 reserved_at_c0[0x18]; u8 modify_enable_mask[0x8]; - u8 reserved_6[0x20]; + u8 reserved_at_e0[0x20]; u8 flow_index[0x20]; - u8 reserved_7[0xe0]; + u8 reserved_at_120[0xe0]; struct mlx5_ifc_flow_context_bits flow_context; }; struct mlx5_ifc_rts2rts_qp_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x40]; + u8 reserved_at_40[0x40]; }; struct mlx5_ifc_rts2rts_qp_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0x8]; + u8 reserved_at_40[0x8]; u8 qpn[0x18]; - u8 reserved_3[0x20]; + u8 reserved_at_60[0x20]; u8 opt_param_mask[0x20]; - u8 reserved_4[0x20]; + u8 reserved_at_a0[0x20]; struct mlx5_ifc_qpc_bits qpc; - u8 reserved_5[0x80]; + u8 reserved_at_800[0x80]; }; struct mlx5_ifc_rtr2rts_qp_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x40]; + u8 reserved_at_40[0x40]; }; struct mlx5_ifc_rtr2rts_qp_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0x8]; + u8 reserved_at_40[0x8]; u8 qpn[0x18]; - u8 reserved_3[0x20]; + u8 reserved_at_60[0x20]; u8 opt_param_mask[0x20]; - u8 reserved_4[0x20]; + u8 reserved_at_a0[0x20]; struct mlx5_ifc_qpc_bits qpc; - u8 reserved_5[0x80]; + u8 reserved_at_800[0x80]; }; struct mlx5_ifc_rst2init_qp_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x40]; + u8 reserved_at_40[0x40]; }; struct mlx5_ifc_rst2init_qp_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0x8]; + u8 reserved_at_40[0x8]; u8 qpn[0x18]; - u8 reserved_3[0x20]; + u8 reserved_at_60[0x20]; u8 opt_param_mask[0x20]; - u8 reserved_4[0x20]; + u8 reserved_at_a0[0x20]; struct mlx5_ifc_qpc_bits qpc; - u8 reserved_5[0x80]; + u8 reserved_at_800[0x80]; }; struct mlx5_ifc_query_xrc_srq_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x40]; + u8 reserved_at_40[0x40]; struct mlx5_ifc_xrc_srqc_bits xrc_srq_context_entry; - u8 reserved_2[0x600]; + u8 reserved_at_280[0x600]; u8 pas[0][0x40]; }; struct mlx5_ifc_query_xrc_srq_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0x8]; + u8 reserved_at_40[0x8]; u8 xrc_srqn[0x18]; - u8 reserved_3[0x20]; + u8 reserved_at_60[0x20]; }; enum { @@ -3049,13 +3100,13 @@ enum { struct mlx5_ifc_query_vport_state_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x20]; + u8 reserved_at_40[0x20]; - u8 reserved_2[0x18]; + u8 reserved_at_60[0x18]; u8 admin_state[0x4]; u8 state[0x4]; }; @@ -3067,25 +3118,25 @@ enum { struct mlx5_ifc_query_vport_state_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 other_vport[0x1]; - u8 reserved_2[0xf]; + u8 reserved_at_41[0xf]; u8 vport_number[0x10]; - u8 reserved_3[0x20]; + u8 reserved_at_60[0x20]; }; struct mlx5_ifc_query_vport_counter_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x40]; + u8 reserved_at_40[0x40]; struct mlx5_ifc_traffic_counter_bits received_errors; @@ -3111,7 +3162,7 @@ struct mlx5_ifc_query_vport_counter_out_bits { struct mlx5_ifc_traffic_counter_bits transmitted_eth_multicast; - u8 reserved_2[0xa00]; + u8 reserved_at_680[0xa00]; }; enum { @@ -3120,328 +3171,329 @@ enum { struct mlx5_ifc_query_vport_counter_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 other_vport[0x1]; - u8 reserved_2[0xf]; + u8 reserved_at_41[0xb]; + u8 port_num[0x4]; u8 vport_number[0x10]; - u8 reserved_3[0x60]; + u8 reserved_at_60[0x60]; u8 clear[0x1]; - u8 reserved_4[0x1f]; + u8 reserved_at_c1[0x1f]; - u8 reserved_5[0x20]; + u8 reserved_at_e0[0x20]; }; struct mlx5_ifc_query_tis_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x40]; + u8 reserved_at_40[0x40]; struct mlx5_ifc_tisc_bits tis_context; }; struct mlx5_ifc_query_tis_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0x8]; + u8 reserved_at_40[0x8]; u8 tisn[0x18]; - u8 reserved_3[0x20]; + u8 reserved_at_60[0x20]; }; struct mlx5_ifc_query_tir_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0xc0]; + u8 reserved_at_40[0xc0]; struct mlx5_ifc_tirc_bits tir_context; }; struct mlx5_ifc_query_tir_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0x8]; + u8 reserved_at_40[0x8]; u8 tirn[0x18]; - u8 reserved_3[0x20]; + u8 reserved_at_60[0x20]; }; struct mlx5_ifc_query_srq_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x40]; + u8 reserved_at_40[0x40]; struct mlx5_ifc_srqc_bits srq_context_entry; - u8 reserved_2[0x600]; + u8 reserved_at_280[0x600]; u8 pas[0][0x40]; }; struct mlx5_ifc_query_srq_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0x8]; + u8 reserved_at_40[0x8]; u8 srqn[0x18]; - u8 reserved_3[0x20]; + u8 reserved_at_60[0x20]; }; struct mlx5_ifc_query_sq_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0xc0]; + u8 reserved_at_40[0xc0]; struct mlx5_ifc_sqc_bits sq_context; }; struct mlx5_ifc_query_sq_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0x8]; + u8 reserved_at_40[0x8]; u8 sqn[0x18]; - u8 reserved_3[0x20]; + u8 reserved_at_60[0x20]; }; struct mlx5_ifc_query_special_contexts_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x20]; + u8 reserved_at_40[0x20]; u8 resd_lkey[0x20]; }; struct mlx5_ifc_query_special_contexts_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0x40]; + u8 reserved_at_40[0x40]; }; struct mlx5_ifc_query_rqt_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0xc0]; + u8 reserved_at_40[0xc0]; struct mlx5_ifc_rqtc_bits rqt_context; }; struct mlx5_ifc_query_rqt_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0x8]; + u8 reserved_at_40[0x8]; u8 rqtn[0x18]; - u8 reserved_3[0x20]; + u8 reserved_at_60[0x20]; }; struct mlx5_ifc_query_rq_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0xc0]; + u8 reserved_at_40[0xc0]; struct mlx5_ifc_rqc_bits rq_context; }; struct mlx5_ifc_query_rq_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0x8]; + u8 reserved_at_40[0x8]; u8 rqn[0x18]; - u8 reserved_3[0x20]; + u8 reserved_at_60[0x20]; }; struct mlx5_ifc_query_roce_address_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x40]; + u8 reserved_at_40[0x40]; struct mlx5_ifc_roce_addr_layout_bits roce_address; }; struct mlx5_ifc_query_roce_address_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 roce_address_index[0x10]; - u8 reserved_2[0x10]; + u8 reserved_at_50[0x10]; - u8 reserved_3[0x20]; + u8 reserved_at_60[0x20]; }; struct mlx5_ifc_query_rmp_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0xc0]; + u8 reserved_at_40[0xc0]; struct mlx5_ifc_rmpc_bits rmp_context; }; struct mlx5_ifc_query_rmp_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0x8]; + u8 reserved_at_40[0x8]; u8 rmpn[0x18]; - u8 reserved_3[0x20]; + u8 reserved_at_60[0x20]; }; struct mlx5_ifc_query_qp_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x40]; + u8 reserved_at_40[0x40]; u8 opt_param_mask[0x20]; - u8 reserved_2[0x20]; + u8 reserved_at_a0[0x20]; struct mlx5_ifc_qpc_bits qpc; - u8 reserved_3[0x80]; + u8 reserved_at_800[0x80]; u8 pas[0][0x40]; }; struct mlx5_ifc_query_qp_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0x8]; + u8 reserved_at_40[0x8]; u8 qpn[0x18]; - u8 reserved_3[0x20]; + u8 reserved_at_60[0x20]; }; struct mlx5_ifc_query_q_counter_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x40]; + u8 reserved_at_40[0x40]; u8 rx_write_requests[0x20]; - u8 reserved_2[0x20]; + u8 reserved_at_a0[0x20]; u8 rx_read_requests[0x20]; - u8 reserved_3[0x20]; + u8 reserved_at_e0[0x20]; u8 rx_atomic_requests[0x20]; - u8 reserved_4[0x20]; + u8 reserved_at_120[0x20]; u8 rx_dct_connect[0x20]; - u8 reserved_5[0x20]; + u8 reserved_at_160[0x20]; u8 out_of_buffer[0x20]; - u8 reserved_6[0x20]; + u8 reserved_at_1a0[0x20]; u8 out_of_sequence[0x20]; - u8 reserved_7[0x620]; + u8 reserved_at_1e0[0x620]; }; struct mlx5_ifc_query_q_counter_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0x80]; + u8 reserved_at_40[0x80]; u8 clear[0x1]; - u8 reserved_3[0x1f]; + u8 reserved_at_c1[0x1f]; - u8 reserved_4[0x18]; + u8 reserved_at_e0[0x18]; u8 counter_set_id[0x8]; }; struct mlx5_ifc_query_pages_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x10]; + u8 reserved_at_40[0x10]; u8 function_id[0x10]; u8 num_pages[0x20]; @@ -3455,55 +3507,55 @@ enum { struct mlx5_ifc_query_pages_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0x10]; + u8 reserved_at_40[0x10]; u8 function_id[0x10]; - u8 reserved_3[0x20]; + u8 reserved_at_60[0x20]; }; struct mlx5_ifc_query_nic_vport_context_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x40]; + u8 reserved_at_40[0x40]; struct mlx5_ifc_nic_vport_context_bits nic_vport_context; }; struct mlx5_ifc_query_nic_vport_context_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 other_vport[0x1]; - u8 reserved_2[0xf]; + u8 reserved_at_41[0xf]; u8 vport_number[0x10]; - u8 reserved_3[0x5]; + u8 reserved_at_60[0x5]; u8 allowed_list_type[0x3]; - u8 reserved_4[0x18]; + u8 reserved_at_68[0x18]; }; struct mlx5_ifc_query_mkey_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x40]; + u8 reserved_at_40[0x40]; struct mlx5_ifc_mkc_bits memory_key_mkey_entry; - u8 reserved_2[0x600]; + u8 reserved_at_280[0x600]; u8 bsf0_klm0_pas_mtt0_1[16][0x8]; @@ -3512,265 +3564,265 @@ struct mlx5_ifc_query_mkey_out_bits { struct mlx5_ifc_query_mkey_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0x8]; + u8 reserved_at_40[0x8]; u8 mkey_index[0x18]; u8 pg_access[0x1]; - u8 reserved_3[0x1f]; + u8 reserved_at_61[0x1f]; }; struct mlx5_ifc_query_mad_demux_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x40]; + u8 reserved_at_40[0x40]; u8 mad_dumux_parameters_block[0x20]; }; struct mlx5_ifc_query_mad_demux_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0x40]; + u8 reserved_at_40[0x40]; }; struct mlx5_ifc_query_l2_table_entry_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0xa0]; + u8 reserved_at_40[0xa0]; - u8 reserved_2[0x13]; + u8 reserved_at_e0[0x13]; u8 vlan_valid[0x1]; u8 vlan[0xc]; struct mlx5_ifc_mac_address_layout_bits mac_address; - u8 reserved_3[0xc0]; + u8 reserved_at_140[0xc0]; }; struct mlx5_ifc_query_l2_table_entry_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0x60]; + u8 reserved_at_40[0x60]; - u8 reserved_3[0x8]; + u8 reserved_at_a0[0x8]; u8 table_index[0x18]; - u8 reserved_4[0x140]; + u8 reserved_at_c0[0x140]; }; struct mlx5_ifc_query_issi_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x10]; + u8 reserved_at_40[0x10]; u8 current_issi[0x10]; - u8 reserved_2[0xa0]; + u8 reserved_at_60[0xa0]; - u8 supported_issi_reserved[76][0x8]; + u8 reserved_at_100[76][0x8]; u8 supported_issi_dw0[0x20]; }; struct mlx5_ifc_query_issi_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0x40]; + u8 reserved_at_40[0x40]; }; struct mlx5_ifc_query_hca_vport_pkey_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x40]; + u8 reserved_at_40[0x40]; struct mlx5_ifc_pkey_bits pkey[0]; }; struct mlx5_ifc_query_hca_vport_pkey_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 other_vport[0x1]; - u8 reserved_2[0xb]; + u8 reserved_at_41[0xb]; u8 port_num[0x4]; u8 vport_number[0x10]; - u8 reserved_3[0x10]; + u8 reserved_at_60[0x10]; u8 pkey_index[0x10]; }; struct mlx5_ifc_query_hca_vport_gid_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x20]; + u8 reserved_at_40[0x20]; u8 gids_num[0x10]; - u8 reserved_2[0x10]; + u8 reserved_at_70[0x10]; struct mlx5_ifc_array128_auto_bits gid[0]; }; struct mlx5_ifc_query_hca_vport_gid_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 other_vport[0x1]; - u8 reserved_2[0xb]; + u8 reserved_at_41[0xb]; u8 port_num[0x4]; u8 vport_number[0x10]; - u8 reserved_3[0x10]; + u8 reserved_at_60[0x10]; u8 gid_index[0x10]; }; struct mlx5_ifc_query_hca_vport_context_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x40]; + u8 reserved_at_40[0x40]; struct mlx5_ifc_hca_vport_context_bits hca_vport_context; }; struct mlx5_ifc_query_hca_vport_context_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 other_vport[0x1]; - u8 reserved_2[0xb]; + u8 reserved_at_41[0xb]; u8 port_num[0x4]; u8 vport_number[0x10]; - u8 reserved_3[0x20]; + u8 reserved_at_60[0x20]; }; struct mlx5_ifc_query_hca_cap_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x40]; + u8 reserved_at_40[0x40]; union mlx5_ifc_hca_cap_union_bits capability; }; struct mlx5_ifc_query_hca_cap_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0x40]; + u8 reserved_at_40[0x40]; }; struct mlx5_ifc_query_flow_table_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x80]; + u8 reserved_at_40[0x80]; - u8 reserved_2[0x8]; + u8 reserved_at_c0[0x8]; u8 level[0x8]; - u8 reserved_3[0x8]; + u8 reserved_at_d0[0x8]; u8 log_size[0x8]; - u8 reserved_4[0x120]; + u8 reserved_at_e0[0x120]; }; struct mlx5_ifc_query_flow_table_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0x40]; + u8 reserved_at_40[0x40]; u8 table_type[0x8]; - u8 reserved_3[0x18]; + u8 reserved_at_88[0x18]; - u8 reserved_4[0x8]; + u8 reserved_at_a0[0x8]; u8 table_id[0x18]; - u8 reserved_5[0x140]; + u8 reserved_at_c0[0x140]; }; struct mlx5_ifc_query_fte_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x1c0]; + u8 reserved_at_40[0x1c0]; struct mlx5_ifc_flow_context_bits flow_context; }; struct mlx5_ifc_query_fte_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0x40]; + u8 reserved_at_40[0x40]; u8 table_type[0x8]; - u8 reserved_3[0x18]; + u8 reserved_at_88[0x18]; - u8 reserved_4[0x8]; + u8 reserved_at_a0[0x8]; u8 table_id[0x18]; - u8 reserved_5[0x40]; + u8 reserved_at_c0[0x40]; u8 flow_index[0x20]; - u8 reserved_6[0xe0]; + u8 reserved_at_120[0xe0]; }; enum { @@ -3781,84 +3833,84 @@ enum { struct mlx5_ifc_query_flow_group_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0xa0]; + u8 reserved_at_40[0xa0]; u8 start_flow_index[0x20]; - u8 reserved_2[0x20]; + u8 reserved_at_100[0x20]; u8 end_flow_index[0x20]; - u8 reserved_3[0xa0]; + u8 reserved_at_140[0xa0]; - u8 reserved_4[0x18]; + u8 reserved_at_1e0[0x18]; u8 match_criteria_enable[0x8]; struct mlx5_ifc_fte_match_param_bits match_criteria; - u8 reserved_5[0xe00]; + u8 reserved_at_1200[0xe00]; }; struct mlx5_ifc_query_flow_group_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0x40]; + u8 reserved_at_40[0x40]; u8 table_type[0x8]; - u8 reserved_3[0x18]; + u8 reserved_at_88[0x18]; - u8 reserved_4[0x8]; + u8 reserved_at_a0[0x8]; u8 table_id[0x18]; u8 group_id[0x20]; - u8 reserved_5[0x120]; + u8 reserved_at_e0[0x120]; }; struct mlx5_ifc_query_esw_vport_context_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x40]; + u8 reserved_at_40[0x40]; struct mlx5_ifc_esw_vport_context_bits esw_vport_context; }; struct mlx5_ifc_query_esw_vport_context_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 other_vport[0x1]; - u8 reserved_2[0xf]; + u8 reserved_at_41[0xf]; u8 vport_number[0x10]; - u8 reserved_3[0x20]; + u8 reserved_at_60[0x20]; }; struct mlx5_ifc_modify_esw_vport_context_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x40]; + u8 reserved_at_40[0x40]; }; struct mlx5_ifc_esw_vport_context_fields_select_bits { - u8 reserved[0x1c]; + u8 reserved_at_0[0x1c]; u8 vport_cvlan_insert[0x1]; u8 vport_svlan_insert[0x1]; u8 vport_cvlan_strip[0x1]; @@ -3867,13 +3919,13 @@ struct mlx5_ifc_esw_vport_context_fields_select_bits { struct mlx5_ifc_modify_esw_vport_context_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 other_vport[0x1]; - u8 reserved_2[0xf]; + u8 reserved_at_41[0xf]; u8 vport_number[0x10]; struct mlx5_ifc_esw_vport_context_fields_select_bits field_select; @@ -3883,124 +3935,124 @@ struct mlx5_ifc_modify_esw_vport_context_in_bits { struct mlx5_ifc_query_eq_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x40]; + u8 reserved_at_40[0x40]; struct mlx5_ifc_eqc_bits eq_context_entry; - u8 reserved_2[0x40]; + u8 reserved_at_280[0x40]; u8 event_bitmask[0x40]; - u8 reserved_3[0x580]; + u8 reserved_at_300[0x580]; u8 pas[0][0x40]; }; struct mlx5_ifc_query_eq_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0x18]; + u8 reserved_at_40[0x18]; u8 eq_number[0x8]; - u8 reserved_3[0x20]; + u8 reserved_at_60[0x20]; }; struct mlx5_ifc_query_dct_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x40]; + u8 reserved_at_40[0x40]; struct mlx5_ifc_dctc_bits dct_context_entry; - u8 reserved_2[0x180]; + u8 reserved_at_280[0x180]; }; struct mlx5_ifc_query_dct_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0x8]; + u8 reserved_at_40[0x8]; u8 dctn[0x18]; - u8 reserved_3[0x20]; + u8 reserved_at_60[0x20]; }; struct mlx5_ifc_query_cq_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x40]; + u8 reserved_at_40[0x40]; struct mlx5_ifc_cqc_bits cq_context; - u8 reserved_2[0x600]; + u8 reserved_at_280[0x600]; u8 pas[0][0x40]; }; struct mlx5_ifc_query_cq_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0x8]; + u8 reserved_at_40[0x8]; u8 cqn[0x18]; - u8 reserved_3[0x20]; + u8 reserved_at_60[0x20]; }; struct mlx5_ifc_query_cong_status_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x20]; + u8 reserved_at_40[0x20]; u8 enable[0x1]; u8 tag_enable[0x1]; - u8 reserved_2[0x1e]; + u8 reserved_at_62[0x1e]; }; struct mlx5_ifc_query_cong_status_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0x18]; + u8 reserved_at_40[0x18]; u8 priority[0x4]; u8 cong_protocol[0x4]; - u8 reserved_3[0x20]; + u8 reserved_at_60[0x20]; }; struct mlx5_ifc_query_cong_statistics_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x40]; + u8 reserved_at_40[0x40]; u8 cur_flows[0x20]; @@ -4014,7 +4066,7 @@ struct mlx5_ifc_query_cong_statistics_out_bits { u8 cnp_handled_low[0x20]; - u8 reserved_2[0x100]; + u8 reserved_at_140[0x100]; u8 time_stamp_high[0x20]; @@ -4030,453 +4082,455 @@ struct mlx5_ifc_query_cong_statistics_out_bits { u8 cnps_sent_low[0x20]; - u8 reserved_3[0x560]; + u8 reserved_at_320[0x560]; }; struct mlx5_ifc_query_cong_statistics_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 clear[0x1]; - u8 reserved_2[0x1f]; + u8 reserved_at_41[0x1f]; - u8 reserved_3[0x20]; + u8 reserved_at_60[0x20]; }; struct mlx5_ifc_query_cong_params_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x40]; + u8 reserved_at_40[0x40]; union mlx5_ifc_cong_control_roce_ecn_auto_bits congestion_parameters; }; struct mlx5_ifc_query_cong_params_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0x1c]; + u8 reserved_at_40[0x1c]; u8 cong_protocol[0x4]; - u8 reserved_3[0x20]; + u8 reserved_at_60[0x20]; }; struct mlx5_ifc_query_adapter_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x40]; + u8 reserved_at_40[0x40]; struct mlx5_ifc_query_adapter_param_block_bits query_adapter_struct; }; struct mlx5_ifc_query_adapter_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0x40]; + u8 reserved_at_40[0x40]; }; struct mlx5_ifc_qp_2rst_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x40]; + u8 reserved_at_40[0x40]; }; struct mlx5_ifc_qp_2rst_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0x8]; + u8 reserved_at_40[0x8]; u8 qpn[0x18]; - u8 reserved_3[0x20]; + u8 reserved_at_60[0x20]; }; struct mlx5_ifc_qp_2err_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x40]; + u8 reserved_at_40[0x40]; }; struct mlx5_ifc_qp_2err_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0x8]; + u8 reserved_at_40[0x8]; u8 qpn[0x18]; - u8 reserved_3[0x20]; + u8 reserved_at_60[0x20]; }; struct mlx5_ifc_page_fault_resume_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x40]; + u8 reserved_at_40[0x40]; }; struct mlx5_ifc_page_fault_resume_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 error[0x1]; - u8 reserved_2[0x4]; + u8 reserved_at_41[0x4]; u8 rdma[0x1]; u8 read_write[0x1]; u8 req_res[0x1]; u8 qpn[0x18]; - u8 reserved_3[0x20]; + u8 reserved_at_60[0x20]; }; struct mlx5_ifc_nop_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x40]; + u8 reserved_at_40[0x40]; }; struct mlx5_ifc_nop_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0x40]; + u8 reserved_at_40[0x40]; }; struct mlx5_ifc_modify_vport_state_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x40]; + u8 reserved_at_40[0x40]; }; struct mlx5_ifc_modify_vport_state_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 other_vport[0x1]; - u8 reserved_2[0xf]; + u8 reserved_at_41[0xf]; u8 vport_number[0x10]; - u8 reserved_3[0x18]; + u8 reserved_at_60[0x18]; u8 admin_state[0x4]; - u8 reserved_4[0x4]; + u8 reserved_at_7c[0x4]; }; struct mlx5_ifc_modify_tis_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x40]; + u8 reserved_at_40[0x40]; }; struct mlx5_ifc_modify_tis_bitmask_bits { - u8 reserved_0[0x20]; + u8 reserved_at_0[0x20]; - u8 reserved_1[0x1f]; + u8 reserved_at_20[0x1f]; u8 prio[0x1]; }; struct mlx5_ifc_modify_tis_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0x8]; + u8 reserved_at_40[0x8]; u8 tisn[0x18]; - u8 reserved_3[0x20]; + u8 reserved_at_60[0x20]; struct mlx5_ifc_modify_tis_bitmask_bits bitmask; - u8 reserved_4[0x40]; + u8 reserved_at_c0[0x40]; struct mlx5_ifc_tisc_bits ctx; }; struct mlx5_ifc_modify_tir_bitmask_bits { - u8 reserved_0[0x20]; + u8 reserved_at_0[0x20]; - u8 reserved_1[0x1b]; + u8 reserved_at_20[0x1b]; u8 self_lb_en[0x1]; - u8 reserved_2[0x3]; + u8 reserved_at_3c[0x1]; + u8 hash[0x1]; + u8 reserved_at_3e[0x1]; u8 lro[0x1]; }; struct mlx5_ifc_modify_tir_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x40]; + u8 reserved_at_40[0x40]; }; struct mlx5_ifc_modify_tir_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0x8]; + u8 reserved_at_40[0x8]; u8 tirn[0x18]; - u8 reserved_3[0x20]; + u8 reserved_at_60[0x20]; struct mlx5_ifc_modify_tir_bitmask_bits bitmask; - u8 reserved_4[0x40]; + u8 reserved_at_c0[0x40]; struct mlx5_ifc_tirc_bits ctx; }; struct mlx5_ifc_modify_sq_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x40]; + u8 reserved_at_40[0x40]; }; struct mlx5_ifc_modify_sq_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 sq_state[0x4]; - u8 reserved_2[0x4]; + u8 reserved_at_44[0x4]; u8 sqn[0x18]; - u8 reserved_3[0x20]; + u8 reserved_at_60[0x20]; u8 modify_bitmask[0x40]; - u8 reserved_4[0x40]; + u8 reserved_at_c0[0x40]; struct mlx5_ifc_sqc_bits ctx; }; struct mlx5_ifc_modify_rqt_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x40]; + u8 reserved_at_40[0x40]; }; struct mlx5_ifc_rqt_bitmask_bits { - u8 reserved[0x20]; + u8 reserved_at_0[0x20]; - u8 reserved1[0x1f]; + u8 reserved_at_20[0x1f]; u8 rqn_list[0x1]; }; struct mlx5_ifc_modify_rqt_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0x8]; + u8 reserved_at_40[0x8]; u8 rqtn[0x18]; - u8 reserved_3[0x20]; + u8 reserved_at_60[0x20]; struct mlx5_ifc_rqt_bitmask_bits bitmask; - u8 reserved_4[0x40]; + u8 reserved_at_c0[0x40]; struct mlx5_ifc_rqtc_bits ctx; }; struct mlx5_ifc_modify_rq_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x40]; + u8 reserved_at_40[0x40]; }; struct mlx5_ifc_modify_rq_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 rq_state[0x4]; - u8 reserved_2[0x4]; + u8 reserved_at_44[0x4]; u8 rqn[0x18]; - u8 reserved_3[0x20]; + u8 reserved_at_60[0x20]; u8 modify_bitmask[0x40]; - u8 reserved_4[0x40]; + u8 reserved_at_c0[0x40]; struct mlx5_ifc_rqc_bits ctx; }; struct mlx5_ifc_modify_rmp_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x40]; + u8 reserved_at_40[0x40]; }; struct mlx5_ifc_rmp_bitmask_bits { - u8 reserved[0x20]; + u8 reserved_at_0[0x20]; - u8 reserved1[0x1f]; + u8 reserved_at_20[0x1f]; u8 lwm[0x1]; }; struct mlx5_ifc_modify_rmp_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 rmp_state[0x4]; - u8 reserved_2[0x4]; + u8 reserved_at_44[0x4]; u8 rmpn[0x18]; - u8 reserved_3[0x20]; + u8 reserved_at_60[0x20]; struct mlx5_ifc_rmp_bitmask_bits bitmask; - u8 reserved_4[0x40]; + u8 reserved_at_c0[0x40]; struct mlx5_ifc_rmpc_bits ctx; }; struct mlx5_ifc_modify_nic_vport_context_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x40]; + u8 reserved_at_40[0x40]; }; struct mlx5_ifc_modify_nic_vport_field_select_bits { - u8 reserved_0[0x19]; + u8 reserved_at_0[0x19]; u8 mtu[0x1]; u8 change_event[0x1]; u8 promisc[0x1]; u8 permanent_address[0x1]; u8 addresses_list[0x1]; u8 roce_en[0x1]; - u8 reserved_1[0x1]; + u8 reserved_at_1f[0x1]; }; struct mlx5_ifc_modify_nic_vport_context_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 other_vport[0x1]; - u8 reserved_2[0xf]; + u8 reserved_at_41[0xf]; u8 vport_number[0x10]; struct mlx5_ifc_modify_nic_vport_field_select_bits field_select; - u8 reserved_3[0x780]; + u8 reserved_at_80[0x780]; struct mlx5_ifc_nic_vport_context_bits nic_vport_context; }; struct mlx5_ifc_modify_hca_vport_context_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x40]; + u8 reserved_at_40[0x40]; }; struct mlx5_ifc_modify_hca_vport_context_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 other_vport[0x1]; - u8 reserved_2[0xb]; + u8 reserved_at_41[0xb]; u8 port_num[0x4]; u8 vport_number[0x10]; - u8 reserved_3[0x20]; + u8 reserved_at_60[0x20]; struct mlx5_ifc_hca_vport_context_bits hca_vport_context; }; struct mlx5_ifc_modify_cq_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x40]; + u8 reserved_at_40[0x40]; }; enum { @@ -4486,83 +4540,83 @@ enum { struct mlx5_ifc_modify_cq_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0x8]; + u8 reserved_at_40[0x8]; u8 cqn[0x18]; union mlx5_ifc_modify_field_select_resize_field_select_auto_bits modify_field_select_resize_field_select; struct mlx5_ifc_cqc_bits cq_context; - u8 reserved_3[0x600]; + u8 reserved_at_280[0x600]; u8 pas[0][0x40]; }; struct mlx5_ifc_modify_cong_status_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x40]; + u8 reserved_at_40[0x40]; }; struct mlx5_ifc_modify_cong_status_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0x18]; + u8 reserved_at_40[0x18]; u8 priority[0x4]; u8 cong_protocol[0x4]; u8 enable[0x1]; u8 tag_enable[0x1]; - u8 reserved_3[0x1e]; + u8 reserved_at_62[0x1e]; }; struct mlx5_ifc_modify_cong_params_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x40]; + u8 reserved_at_40[0x40]; }; struct mlx5_ifc_modify_cong_params_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0x1c]; + u8 reserved_at_40[0x1c]; u8 cong_protocol[0x4]; union mlx5_ifc_field_select_802_1_r_roce_auto_bits field_select; - u8 reserved_3[0x80]; + u8 reserved_at_80[0x80]; union mlx5_ifc_cong_control_roce_ecn_auto_bits congestion_parameters; }; struct mlx5_ifc_manage_pages_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 output_num_entries[0x20]; - u8 reserved_1[0x20]; + u8 reserved_at_60[0x20]; u8 pas[0][0x40]; }; @@ -4575,12 +4629,12 @@ enum { struct mlx5_ifc_manage_pages_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0x10]; + u8 reserved_at_40[0x10]; u8 function_id[0x10]; u8 input_num_entries[0x20]; @@ -4590,117 +4644,117 @@ struct mlx5_ifc_manage_pages_in_bits { struct mlx5_ifc_mad_ifc_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x40]; + u8 reserved_at_40[0x40]; u8 response_mad_packet[256][0x8]; }; struct mlx5_ifc_mad_ifc_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 remote_lid[0x10]; - u8 reserved_2[0x8]; + u8 reserved_at_50[0x8]; u8 port[0x8]; - u8 reserved_3[0x20]; + u8 reserved_at_60[0x20]; u8 mad[256][0x8]; }; struct mlx5_ifc_init_hca_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x40]; + u8 reserved_at_40[0x40]; }; struct mlx5_ifc_init_hca_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0x40]; + u8 reserved_at_40[0x40]; }; struct mlx5_ifc_init2rtr_qp_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x40]; + u8 reserved_at_40[0x40]; }; struct mlx5_ifc_init2rtr_qp_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0x8]; + u8 reserved_at_40[0x8]; u8 qpn[0x18]; - u8 reserved_3[0x20]; + u8 reserved_at_60[0x20]; u8 opt_param_mask[0x20]; - u8 reserved_4[0x20]; + u8 reserved_at_a0[0x20]; struct mlx5_ifc_qpc_bits qpc; - u8 reserved_5[0x80]; + u8 reserved_at_800[0x80]; }; struct mlx5_ifc_init2init_qp_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x40]; + u8 reserved_at_40[0x40]; }; struct mlx5_ifc_init2init_qp_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0x8]; + u8 reserved_at_40[0x8]; u8 qpn[0x18]; - u8 reserved_3[0x20]; + u8 reserved_at_60[0x20]; u8 opt_param_mask[0x20]; - u8 reserved_4[0x20]; + u8 reserved_at_a0[0x20]; struct mlx5_ifc_qpc_bits qpc; - u8 reserved_5[0x80]; + u8 reserved_at_800[0x80]; }; struct mlx5_ifc_get_dropped_packet_log_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x40]; + u8 reserved_at_40[0x40]; u8 packet_headers_log[128][0x8]; @@ -4709,1029 +4763,1029 @@ struct mlx5_ifc_get_dropped_packet_log_out_bits { struct mlx5_ifc_get_dropped_packet_log_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0x40]; + u8 reserved_at_40[0x40]; }; struct mlx5_ifc_gen_eqe_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0x18]; + u8 reserved_at_40[0x18]; u8 eq_number[0x8]; - u8 reserved_3[0x20]; + u8 reserved_at_60[0x20]; u8 eqe[64][0x8]; }; struct mlx5_ifc_gen_eq_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x40]; + u8 reserved_at_40[0x40]; }; struct mlx5_ifc_enable_hca_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x20]; + u8 reserved_at_40[0x20]; }; struct mlx5_ifc_enable_hca_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0x10]; + u8 reserved_at_40[0x10]; u8 function_id[0x10]; - u8 reserved_3[0x20]; + u8 reserved_at_60[0x20]; }; struct mlx5_ifc_drain_dct_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x40]; + u8 reserved_at_40[0x40]; }; struct mlx5_ifc_drain_dct_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0x8]; + u8 reserved_at_40[0x8]; u8 dctn[0x18]; - u8 reserved_3[0x20]; + u8 reserved_at_60[0x20]; }; struct mlx5_ifc_disable_hca_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x20]; + u8 reserved_at_40[0x20]; }; struct mlx5_ifc_disable_hca_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0x10]; + u8 reserved_at_40[0x10]; u8 function_id[0x10]; - u8 reserved_3[0x20]; + u8 reserved_at_60[0x20]; }; struct mlx5_ifc_detach_from_mcg_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x40]; + u8 reserved_at_40[0x40]; }; struct mlx5_ifc_detach_from_mcg_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0x8]; + u8 reserved_at_40[0x8]; u8 qpn[0x18]; - u8 reserved_3[0x20]; + u8 reserved_at_60[0x20]; u8 multicast_gid[16][0x8]; }; struct mlx5_ifc_destroy_xrc_srq_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x40]; + u8 reserved_at_40[0x40]; }; struct mlx5_ifc_destroy_xrc_srq_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0x8]; + u8 reserved_at_40[0x8]; u8 xrc_srqn[0x18]; - u8 reserved_3[0x20]; + u8 reserved_at_60[0x20]; }; struct mlx5_ifc_destroy_tis_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x40]; + u8 reserved_at_40[0x40]; }; struct mlx5_ifc_destroy_tis_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0x8]; + u8 reserved_at_40[0x8]; u8 tisn[0x18]; - u8 reserved_3[0x20]; + u8 reserved_at_60[0x20]; }; struct mlx5_ifc_destroy_tir_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x40]; + u8 reserved_at_40[0x40]; }; struct mlx5_ifc_destroy_tir_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0x8]; + u8 reserved_at_40[0x8]; u8 tirn[0x18]; - u8 reserved_3[0x20]; + u8 reserved_at_60[0x20]; }; struct mlx5_ifc_destroy_srq_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x40]; + u8 reserved_at_40[0x40]; }; struct mlx5_ifc_destroy_srq_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0x8]; + u8 reserved_at_40[0x8]; u8 srqn[0x18]; - u8 reserved_3[0x20]; + u8 reserved_at_60[0x20]; }; struct mlx5_ifc_destroy_sq_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x40]; + u8 reserved_at_40[0x40]; }; struct mlx5_ifc_destroy_sq_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0x8]; + u8 reserved_at_40[0x8]; u8 sqn[0x18]; - u8 reserved_3[0x20]; + u8 reserved_at_60[0x20]; }; struct mlx5_ifc_destroy_rqt_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x40]; + u8 reserved_at_40[0x40]; }; struct mlx5_ifc_destroy_rqt_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0x8]; + u8 reserved_at_40[0x8]; u8 rqtn[0x18]; - u8 reserved_3[0x20]; + u8 reserved_at_60[0x20]; }; struct mlx5_ifc_destroy_rq_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x40]; + u8 reserved_at_40[0x40]; }; struct mlx5_ifc_destroy_rq_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0x8]; + u8 reserved_at_40[0x8]; u8 rqn[0x18]; - u8 reserved_3[0x20]; + u8 reserved_at_60[0x20]; }; struct mlx5_ifc_destroy_rmp_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x40]; + u8 reserved_at_40[0x40]; }; struct mlx5_ifc_destroy_rmp_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0x8]; + u8 reserved_at_40[0x8]; u8 rmpn[0x18]; - u8 reserved_3[0x20]; + u8 reserved_at_60[0x20]; }; struct mlx5_ifc_destroy_qp_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x40]; + u8 reserved_at_40[0x40]; }; struct mlx5_ifc_destroy_qp_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0x8]; + u8 reserved_at_40[0x8]; u8 qpn[0x18]; - u8 reserved_3[0x20]; + u8 reserved_at_60[0x20]; }; struct mlx5_ifc_destroy_psv_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x40]; + u8 reserved_at_40[0x40]; }; struct mlx5_ifc_destroy_psv_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0x8]; + u8 reserved_at_40[0x8]; u8 psvn[0x18]; - u8 reserved_3[0x20]; + u8 reserved_at_60[0x20]; }; struct mlx5_ifc_destroy_mkey_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x40]; + u8 reserved_at_40[0x40]; }; struct mlx5_ifc_destroy_mkey_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0x8]; + u8 reserved_at_40[0x8]; u8 mkey_index[0x18]; - u8 reserved_3[0x20]; + u8 reserved_at_60[0x20]; }; struct mlx5_ifc_destroy_flow_table_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x40]; + u8 reserved_at_40[0x40]; }; struct mlx5_ifc_destroy_flow_table_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0x40]; + u8 reserved_at_40[0x40]; u8 table_type[0x8]; - u8 reserved_3[0x18]; + u8 reserved_at_88[0x18]; - u8 reserved_4[0x8]; + u8 reserved_at_a0[0x8]; u8 table_id[0x18]; - u8 reserved_5[0x140]; + u8 reserved_at_c0[0x140]; }; struct mlx5_ifc_destroy_flow_group_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x40]; + u8 reserved_at_40[0x40]; }; struct mlx5_ifc_destroy_flow_group_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0x40]; + u8 reserved_at_40[0x40]; u8 table_type[0x8]; - u8 reserved_3[0x18]; + u8 reserved_at_88[0x18]; - u8 reserved_4[0x8]; + u8 reserved_at_a0[0x8]; u8 table_id[0x18]; u8 group_id[0x20]; - u8 reserved_5[0x120]; + u8 reserved_at_e0[0x120]; }; struct mlx5_ifc_destroy_eq_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x40]; + u8 reserved_at_40[0x40]; }; struct mlx5_ifc_destroy_eq_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0x18]; + u8 reserved_at_40[0x18]; u8 eq_number[0x8]; - u8 reserved_3[0x20]; + u8 reserved_at_60[0x20]; }; struct mlx5_ifc_destroy_dct_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x40]; + u8 reserved_at_40[0x40]; }; struct mlx5_ifc_destroy_dct_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0x8]; + u8 reserved_at_40[0x8]; u8 dctn[0x18]; - u8 reserved_3[0x20]; + u8 reserved_at_60[0x20]; }; struct mlx5_ifc_destroy_cq_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x40]; + u8 reserved_at_40[0x40]; }; struct mlx5_ifc_destroy_cq_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0x8]; + u8 reserved_at_40[0x8]; u8 cqn[0x18]; - u8 reserved_3[0x20]; + u8 reserved_at_60[0x20]; }; struct mlx5_ifc_delete_vxlan_udp_dport_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x40]; + u8 reserved_at_40[0x40]; }; struct mlx5_ifc_delete_vxlan_udp_dport_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0x20]; + u8 reserved_at_40[0x20]; - u8 reserved_3[0x10]; + u8 reserved_at_60[0x10]; u8 vxlan_udp_port[0x10]; }; struct mlx5_ifc_delete_l2_table_entry_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x40]; + u8 reserved_at_40[0x40]; }; struct mlx5_ifc_delete_l2_table_entry_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0x60]; + u8 reserved_at_40[0x60]; - u8 reserved_3[0x8]; + u8 reserved_at_a0[0x8]; u8 table_index[0x18]; - u8 reserved_4[0x140]; + u8 reserved_at_c0[0x140]; }; struct mlx5_ifc_delete_fte_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x40]; + u8 reserved_at_40[0x40]; }; struct mlx5_ifc_delete_fte_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0x40]; + u8 reserved_at_40[0x40]; u8 table_type[0x8]; - u8 reserved_3[0x18]; + u8 reserved_at_88[0x18]; - u8 reserved_4[0x8]; + u8 reserved_at_a0[0x8]; u8 table_id[0x18]; - u8 reserved_5[0x40]; + u8 reserved_at_c0[0x40]; u8 flow_index[0x20]; - u8 reserved_6[0xe0]; + u8 reserved_at_120[0xe0]; }; struct mlx5_ifc_dealloc_xrcd_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x40]; + u8 reserved_at_40[0x40]; }; struct mlx5_ifc_dealloc_xrcd_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0x8]; + u8 reserved_at_40[0x8]; u8 xrcd[0x18]; - u8 reserved_3[0x20]; + u8 reserved_at_60[0x20]; }; struct mlx5_ifc_dealloc_uar_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x40]; + u8 reserved_at_40[0x40]; }; struct mlx5_ifc_dealloc_uar_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0x8]; + u8 reserved_at_40[0x8]; u8 uar[0x18]; - u8 reserved_3[0x20]; + u8 reserved_at_60[0x20]; }; struct mlx5_ifc_dealloc_transport_domain_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x40]; + u8 reserved_at_40[0x40]; }; struct mlx5_ifc_dealloc_transport_domain_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0x8]; + u8 reserved_at_40[0x8]; u8 transport_domain[0x18]; - u8 reserved_3[0x20]; + u8 reserved_at_60[0x20]; }; struct mlx5_ifc_dealloc_q_counter_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x40]; + u8 reserved_at_40[0x40]; }; struct mlx5_ifc_dealloc_q_counter_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0x18]; + u8 reserved_at_40[0x18]; u8 counter_set_id[0x8]; - u8 reserved_3[0x20]; + u8 reserved_at_60[0x20]; }; struct mlx5_ifc_dealloc_pd_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x40]; + u8 reserved_at_40[0x40]; }; struct mlx5_ifc_dealloc_pd_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0x8]; + u8 reserved_at_40[0x8]; u8 pd[0x18]; - u8 reserved_3[0x20]; + u8 reserved_at_60[0x20]; }; struct mlx5_ifc_create_xrc_srq_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x8]; + u8 reserved_at_40[0x8]; u8 xrc_srqn[0x18]; - u8 reserved_2[0x20]; + u8 reserved_at_60[0x20]; }; struct mlx5_ifc_create_xrc_srq_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0x40]; + u8 reserved_at_40[0x40]; struct mlx5_ifc_xrc_srqc_bits xrc_srq_context_entry; - u8 reserved_3[0x600]; + u8 reserved_at_280[0x600]; u8 pas[0][0x40]; }; struct mlx5_ifc_create_tis_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x8]; + u8 reserved_at_40[0x8]; u8 tisn[0x18]; - u8 reserved_2[0x20]; + u8 reserved_at_60[0x20]; }; struct mlx5_ifc_create_tis_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0xc0]; + u8 reserved_at_40[0xc0]; struct mlx5_ifc_tisc_bits ctx; }; struct mlx5_ifc_create_tir_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x8]; + u8 reserved_at_40[0x8]; u8 tirn[0x18]; - u8 reserved_2[0x20]; + u8 reserved_at_60[0x20]; }; struct mlx5_ifc_create_tir_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0xc0]; + u8 reserved_at_40[0xc0]; struct mlx5_ifc_tirc_bits ctx; }; struct mlx5_ifc_create_srq_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x8]; + u8 reserved_at_40[0x8]; u8 srqn[0x18]; - u8 reserved_2[0x20]; + u8 reserved_at_60[0x20]; }; struct mlx5_ifc_create_srq_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0x40]; + u8 reserved_at_40[0x40]; struct mlx5_ifc_srqc_bits srq_context_entry; - u8 reserved_3[0x600]; + u8 reserved_at_280[0x600]; u8 pas[0][0x40]; }; struct mlx5_ifc_create_sq_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x8]; + u8 reserved_at_40[0x8]; u8 sqn[0x18]; - u8 reserved_2[0x20]; + u8 reserved_at_60[0x20]; }; struct mlx5_ifc_create_sq_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0xc0]; + u8 reserved_at_40[0xc0]; struct mlx5_ifc_sqc_bits ctx; }; struct mlx5_ifc_create_rqt_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x8]; + u8 reserved_at_40[0x8]; u8 rqtn[0x18]; - u8 reserved_2[0x20]; + u8 reserved_at_60[0x20]; }; struct mlx5_ifc_create_rqt_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0xc0]; + u8 reserved_at_40[0xc0]; struct mlx5_ifc_rqtc_bits rqt_context; }; struct mlx5_ifc_create_rq_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x8]; + u8 reserved_at_40[0x8]; u8 rqn[0x18]; - u8 reserved_2[0x20]; + u8 reserved_at_60[0x20]; }; struct mlx5_ifc_create_rq_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0xc0]; + u8 reserved_at_40[0xc0]; struct mlx5_ifc_rqc_bits ctx; }; struct mlx5_ifc_create_rmp_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x8]; + u8 reserved_at_40[0x8]; u8 rmpn[0x18]; - u8 reserved_2[0x20]; + u8 reserved_at_60[0x20]; }; struct mlx5_ifc_create_rmp_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0xc0]; + u8 reserved_at_40[0xc0]; struct mlx5_ifc_rmpc_bits ctx; }; struct mlx5_ifc_create_qp_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x8]; + u8 reserved_at_40[0x8]; u8 qpn[0x18]; - u8 reserved_2[0x20]; + u8 reserved_at_60[0x20]; }; struct mlx5_ifc_create_qp_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0x40]; + u8 reserved_at_40[0x40]; u8 opt_param_mask[0x20]; - u8 reserved_3[0x20]; + u8 reserved_at_a0[0x20]; struct mlx5_ifc_qpc_bits qpc; - u8 reserved_4[0x80]; + u8 reserved_at_800[0x80]; u8 pas[0][0x40]; }; struct mlx5_ifc_create_psv_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x40]; + u8 reserved_at_40[0x40]; - u8 reserved_2[0x8]; + u8 reserved_at_80[0x8]; u8 psv0_index[0x18]; - u8 reserved_3[0x8]; + u8 reserved_at_a0[0x8]; u8 psv1_index[0x18]; - u8 reserved_4[0x8]; + u8 reserved_at_c0[0x8]; u8 psv2_index[0x18]; - u8 reserved_5[0x8]; + u8 reserved_at_e0[0x8]; u8 psv3_index[0x18]; }; struct mlx5_ifc_create_psv_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 num_psv[0x4]; - u8 reserved_2[0x4]; + u8 reserved_at_44[0x4]; u8 pd[0x18]; - u8 reserved_3[0x20]; + u8 reserved_at_60[0x20]; }; struct mlx5_ifc_create_mkey_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x8]; + u8 reserved_at_40[0x8]; u8 mkey_index[0x18]; - u8 reserved_2[0x20]; + u8 reserved_at_60[0x20]; }; struct mlx5_ifc_create_mkey_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0x20]; + u8 reserved_at_40[0x20]; u8 pg_access[0x1]; - u8 reserved_3[0x1f]; + u8 reserved_at_61[0x1f]; struct mlx5_ifc_mkc_bits memory_key_mkey_entry; - u8 reserved_4[0x80]; + u8 reserved_at_280[0x80]; u8 translations_octword_actual_size[0x20]; - u8 reserved_5[0x560]; + u8 reserved_at_320[0x560]; u8 klm_pas_mtt[0][0x20]; }; struct mlx5_ifc_create_flow_table_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x8]; + u8 reserved_at_40[0x8]; u8 table_id[0x18]; - u8 reserved_2[0x20]; + u8 reserved_at_60[0x20]; }; struct mlx5_ifc_create_flow_table_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0x40]; + u8 reserved_at_40[0x40]; u8 table_type[0x8]; - u8 reserved_3[0x18]; + u8 reserved_at_88[0x18]; - u8 reserved_4[0x20]; + u8 reserved_at_a0[0x20]; - u8 reserved_5[0x4]; + u8 reserved_at_c0[0x4]; u8 table_miss_mode[0x4]; u8 level[0x8]; - u8 reserved_6[0x8]; + u8 reserved_at_d0[0x8]; u8 log_size[0x8]; - u8 reserved_7[0x8]; + u8 reserved_at_e0[0x8]; u8 table_miss_id[0x18]; - u8 reserved_8[0x100]; + u8 reserved_at_100[0x100]; }; struct mlx5_ifc_create_flow_group_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x8]; + u8 reserved_at_40[0x8]; u8 group_id[0x18]; - u8 reserved_2[0x20]; + u8 reserved_at_60[0x20]; }; enum { @@ -5742,134 +5796,134 @@ enum { struct mlx5_ifc_create_flow_group_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0x40]; + u8 reserved_at_40[0x40]; u8 table_type[0x8]; - u8 reserved_3[0x18]; + u8 reserved_at_88[0x18]; - u8 reserved_4[0x8]; + u8 reserved_at_a0[0x8]; u8 table_id[0x18]; - u8 reserved_5[0x20]; + u8 reserved_at_c0[0x20]; u8 start_flow_index[0x20]; - u8 reserved_6[0x20]; + u8 reserved_at_100[0x20]; u8 end_flow_index[0x20]; - u8 reserved_7[0xa0]; + u8 reserved_at_140[0xa0]; - u8 reserved_8[0x18]; + u8 reserved_at_1e0[0x18]; u8 match_criteria_enable[0x8]; struct mlx5_ifc_fte_match_param_bits match_criteria; - u8 reserved_9[0xe00]; + u8 reserved_at_1200[0xe00]; }; struct mlx5_ifc_create_eq_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x18]; + u8 reserved_at_40[0x18]; u8 eq_number[0x8]; - u8 reserved_2[0x20]; + u8 reserved_at_60[0x20]; }; struct mlx5_ifc_create_eq_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0x40]; + u8 reserved_at_40[0x40]; struct mlx5_ifc_eqc_bits eq_context_entry; - u8 reserved_3[0x40]; + u8 reserved_at_280[0x40]; u8 event_bitmask[0x40]; - u8 reserved_4[0x580]; + u8 reserved_at_300[0x580]; u8 pas[0][0x40]; }; struct mlx5_ifc_create_dct_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x8]; + u8 reserved_at_40[0x8]; u8 dctn[0x18]; - u8 reserved_2[0x20]; + u8 reserved_at_60[0x20]; }; struct mlx5_ifc_create_dct_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0x40]; + u8 reserved_at_40[0x40]; struct mlx5_ifc_dctc_bits dct_context_entry; - u8 reserved_3[0x180]; + u8 reserved_at_280[0x180]; }; struct mlx5_ifc_create_cq_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x8]; + u8 reserved_at_40[0x8]; u8 cqn[0x18]; - u8 reserved_2[0x20]; + u8 reserved_at_60[0x20]; }; struct mlx5_ifc_create_cq_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0x40]; + u8 reserved_at_40[0x40]; struct mlx5_ifc_cqc_bits cq_context; - u8 reserved_3[0x600]; + u8 reserved_at_280[0x600]; u8 pas[0][0x40]; }; struct mlx5_ifc_config_int_moderation_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x4]; + u8 reserved_at_40[0x4]; u8 min_delay[0xc]; u8 int_vector[0x10]; - u8 reserved_2[0x20]; + u8 reserved_at_60[0x20]; }; enum { @@ -5879,49 +5933,49 @@ enum { struct mlx5_ifc_config_int_moderation_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0x4]; + u8 reserved_at_40[0x4]; u8 min_delay[0xc]; u8 int_vector[0x10]; - u8 reserved_3[0x20]; + u8 reserved_at_60[0x20]; }; struct mlx5_ifc_attach_to_mcg_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x40]; + u8 reserved_at_40[0x40]; }; struct mlx5_ifc_attach_to_mcg_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0x8]; + u8 reserved_at_40[0x8]; u8 qpn[0x18]; - u8 reserved_3[0x20]; + u8 reserved_at_60[0x20]; u8 multicast_gid[16][0x8]; }; struct mlx5_ifc_arm_xrc_srq_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x40]; + u8 reserved_at_40[0x40]; }; enum { @@ -5930,25 +5984,25 @@ enum { struct mlx5_ifc_arm_xrc_srq_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0x8]; + u8 reserved_at_40[0x8]; u8 xrc_srqn[0x18]; - u8 reserved_3[0x10]; + u8 reserved_at_60[0x10]; u8 lwm[0x10]; }; struct mlx5_ifc_arm_rq_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x40]; + u8 reserved_at_40[0x40]; }; enum { @@ -5957,179 +6011,179 @@ enum { struct mlx5_ifc_arm_rq_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0x8]; + u8 reserved_at_40[0x8]; u8 srq_number[0x18]; - u8 reserved_3[0x10]; + u8 reserved_at_60[0x10]; u8 lwm[0x10]; }; struct mlx5_ifc_arm_dct_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x40]; + u8 reserved_at_40[0x40]; }; struct mlx5_ifc_arm_dct_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0x8]; + u8 reserved_at_40[0x8]; u8 dct_number[0x18]; - u8 reserved_3[0x20]; + u8 reserved_at_60[0x20]; }; struct mlx5_ifc_alloc_xrcd_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x8]; + u8 reserved_at_40[0x8]; u8 xrcd[0x18]; - u8 reserved_2[0x20]; + u8 reserved_at_60[0x20]; }; struct mlx5_ifc_alloc_xrcd_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0x40]; + u8 reserved_at_40[0x40]; }; struct mlx5_ifc_alloc_uar_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x8]; + u8 reserved_at_40[0x8]; u8 uar[0x18]; - u8 reserved_2[0x20]; + u8 reserved_at_60[0x20]; }; struct mlx5_ifc_alloc_uar_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0x40]; + u8 reserved_at_40[0x40]; }; struct mlx5_ifc_alloc_transport_domain_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x8]; + u8 reserved_at_40[0x8]; u8 transport_domain[0x18]; - u8 reserved_2[0x20]; + u8 reserved_at_60[0x20]; }; struct mlx5_ifc_alloc_transport_domain_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0x40]; + u8 reserved_at_40[0x40]; }; struct mlx5_ifc_alloc_q_counter_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x18]; + u8 reserved_at_40[0x18]; u8 counter_set_id[0x8]; - u8 reserved_2[0x20]; + u8 reserved_at_60[0x20]; }; struct mlx5_ifc_alloc_q_counter_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0x40]; + u8 reserved_at_40[0x40]; }; struct mlx5_ifc_alloc_pd_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x8]; + u8 reserved_at_40[0x8]; u8 pd[0x18]; - u8 reserved_2[0x20]; + u8 reserved_at_60[0x20]; }; struct mlx5_ifc_alloc_pd_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0x40]; + u8 reserved_at_40[0x40]; }; struct mlx5_ifc_add_vxlan_udp_dport_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x40]; + u8 reserved_at_40[0x40]; }; struct mlx5_ifc_add_vxlan_udp_dport_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0x20]; + u8 reserved_at_40[0x20]; - u8 reserved_3[0x10]; + u8 reserved_at_60[0x10]; u8 vxlan_udp_port[0x10]; }; struct mlx5_ifc_access_register_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x40]; + u8 reserved_at_40[0x40]; u8 register_data[0][0x20]; }; @@ -6141,12 +6195,12 @@ enum { struct mlx5_ifc_access_register_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0x10]; + u8 reserved_at_40[0x10]; u8 register_id[0x10]; u8 argument[0x20]; @@ -6159,24 +6213,24 @@ struct mlx5_ifc_sltp_reg_bits { u8 version[0x4]; u8 local_port[0x8]; u8 pnat[0x2]; - u8 reserved_0[0x2]; + u8 reserved_at_12[0x2]; u8 lane[0x4]; - u8 reserved_1[0x8]; + u8 reserved_at_18[0x8]; - u8 reserved_2[0x20]; + u8 reserved_at_20[0x20]; - u8 reserved_3[0x7]; + u8 reserved_at_40[0x7]; u8 polarity[0x1]; u8 ob_tap0[0x8]; u8 ob_tap1[0x8]; u8 ob_tap2[0x8]; - u8 reserved_4[0xc]; + u8 reserved_at_60[0xc]; u8 ob_preemp_mode[0x4]; u8 ob_reg[0x8]; u8 ob_bias[0x8]; - u8 reserved_5[0x20]; + u8 reserved_at_80[0x20]; }; struct mlx5_ifc_slrg_reg_bits { @@ -6184,36 +6238,36 @@ struct mlx5_ifc_slrg_reg_bits { u8 version[0x4]; u8 local_port[0x8]; u8 pnat[0x2]; - u8 reserved_0[0x2]; + u8 reserved_at_12[0x2]; u8 lane[0x4]; - u8 reserved_1[0x8]; + u8 reserved_at_18[0x8]; u8 time_to_link_up[0x10]; - u8 reserved_2[0xc]; + u8 reserved_at_30[0xc]; u8 grade_lane_speed[0x4]; u8 grade_version[0x8]; u8 grade[0x18]; - u8 reserved_3[0x4]; + u8 reserved_at_60[0x4]; u8 height_grade_type[0x4]; u8 height_grade[0x18]; u8 height_dz[0x10]; u8 height_dv[0x10]; - u8 reserved_4[0x10]; + u8 reserved_at_a0[0x10]; u8 height_sigma[0x10]; - u8 reserved_5[0x20]; + u8 reserved_at_c0[0x20]; - u8 reserved_6[0x4]; + u8 reserved_at_e0[0x4]; u8 phase_grade_type[0x4]; u8 phase_grade[0x18]; - u8 reserved_7[0x8]; + u8 reserved_at_100[0x8]; u8 phase_eo_pos[0x8]; - u8 reserved_8[0x8]; + u8 reserved_at_110[0x8]; u8 phase_eo_neg[0x8]; u8 ffe_set_tested[0x10]; @@ -6221,70 +6275,70 @@ struct mlx5_ifc_slrg_reg_bits { }; struct mlx5_ifc_pvlc_reg_bits { - u8 reserved_0[0x8]; + u8 reserved_at_0[0x8]; u8 local_port[0x8]; - u8 reserved_1[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_2[0x1c]; + u8 reserved_at_20[0x1c]; u8 vl_hw_cap[0x4]; - u8 reserved_3[0x1c]; + u8 reserved_at_40[0x1c]; u8 vl_admin[0x4]; - u8 reserved_4[0x1c]; + u8 reserved_at_60[0x1c]; u8 vl_operational[0x4]; }; struct mlx5_ifc_pude_reg_bits { u8 swid[0x8]; u8 local_port[0x8]; - u8 reserved_0[0x4]; + u8 reserved_at_10[0x4]; u8 admin_status[0x4]; - u8 reserved_1[0x4]; + u8 reserved_at_18[0x4]; u8 oper_status[0x4]; - u8 reserved_2[0x60]; + u8 reserved_at_20[0x60]; }; struct mlx5_ifc_ptys_reg_bits { - u8 reserved_0[0x8]; + u8 reserved_at_0[0x8]; u8 local_port[0x8]; - u8 reserved_1[0xd]; + u8 reserved_at_10[0xd]; u8 proto_mask[0x3]; - u8 reserved_2[0x40]; + u8 reserved_at_20[0x40]; u8 eth_proto_capability[0x20]; u8 ib_link_width_capability[0x10]; u8 ib_proto_capability[0x10]; - u8 reserved_3[0x20]; + u8 reserved_at_a0[0x20]; u8 eth_proto_admin[0x20]; u8 ib_link_width_admin[0x10]; u8 ib_proto_admin[0x10]; - u8 reserved_4[0x20]; + u8 reserved_at_100[0x20]; u8 eth_proto_oper[0x20]; u8 ib_link_width_oper[0x10]; u8 ib_proto_oper[0x10]; - u8 reserved_5[0x20]; + u8 reserved_at_160[0x20]; u8 eth_proto_lp_advertise[0x20]; - u8 reserved_6[0x60]; + u8 reserved_at_1a0[0x60]; }; struct mlx5_ifc_ptas_reg_bits { - u8 reserved_0[0x20]; + u8 reserved_at_0[0x20]; u8 algorithm_options[0x10]; - u8 reserved_1[0x4]; + u8 reserved_at_30[0x4]; u8 repetitions_mode[0x4]; u8 num_of_repetitions[0x8]; @@ -6310,13 +6364,13 @@ struct mlx5_ifc_ptas_reg_bits { u8 ndeo_error_threshold[0x10]; u8 mixer_offset_step_size[0x10]; - u8 reserved_2[0x8]; + u8 reserved_at_110[0x8]; u8 mix90_phase_for_voltage_bath[0x8]; u8 mixer_offset_start[0x10]; u8 mixer_offset_end[0x10]; - u8 reserved_3[0x15]; + u8 reserved_at_140[0x15]; u8 ber_test_time[0xb]; }; @@ -6324,154 +6378,154 @@ struct mlx5_ifc_pspa_reg_bits { u8 swid[0x8]; u8 local_port[0x8]; u8 sub_port[0x8]; - u8 reserved_0[0x8]; + u8 reserved_at_18[0x8]; - u8 reserved_1[0x20]; + u8 reserved_at_20[0x20]; }; struct mlx5_ifc_pqdr_reg_bits { - u8 reserved_0[0x8]; + u8 reserved_at_0[0x8]; u8 local_port[0x8]; - u8 reserved_1[0x5]; + u8 reserved_at_10[0x5]; u8 prio[0x3]; - u8 reserved_2[0x6]; + u8 reserved_at_18[0x6]; u8 mode[0x2]; - u8 reserved_3[0x20]; + u8 reserved_at_20[0x20]; - u8 reserved_4[0x10]; + u8 reserved_at_40[0x10]; u8 min_threshold[0x10]; - u8 reserved_5[0x10]; + u8 reserved_at_60[0x10]; u8 max_threshold[0x10]; - u8 reserved_6[0x10]; + u8 reserved_at_80[0x10]; u8 mark_probability_denominator[0x10]; - u8 reserved_7[0x60]; + u8 reserved_at_a0[0x60]; }; struct mlx5_ifc_ppsc_reg_bits { - u8 reserved_0[0x8]; + u8 reserved_at_0[0x8]; u8 local_port[0x8]; - u8 reserved_1[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_2[0x60]; + u8 reserved_at_20[0x60]; - u8 reserved_3[0x1c]; + u8 reserved_at_80[0x1c]; u8 wrps_admin[0x4]; - u8 reserved_4[0x1c]; + u8 reserved_at_a0[0x1c]; u8 wrps_status[0x4]; - u8 reserved_5[0x8]; + u8 reserved_at_c0[0x8]; u8 up_threshold[0x8]; - u8 reserved_6[0x8]; + u8 reserved_at_d0[0x8]; u8 down_threshold[0x8]; - u8 reserved_7[0x20]; + u8 reserved_at_e0[0x20]; - u8 reserved_8[0x1c]; + u8 reserved_at_100[0x1c]; u8 srps_admin[0x4]; - u8 reserved_9[0x1c]; + u8 reserved_at_120[0x1c]; u8 srps_status[0x4]; - u8 reserved_10[0x40]; + u8 reserved_at_140[0x40]; }; struct mlx5_ifc_pplr_reg_bits { - u8 reserved_0[0x8]; + u8 reserved_at_0[0x8]; u8 local_port[0x8]; - u8 reserved_1[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_2[0x8]; + u8 reserved_at_20[0x8]; u8 lb_cap[0x8]; - u8 reserved_3[0x8]; + u8 reserved_at_30[0x8]; u8 lb_en[0x8]; }; struct mlx5_ifc_pplm_reg_bits { - u8 reserved_0[0x8]; + u8 reserved_at_0[0x8]; u8 local_port[0x8]; - u8 reserved_1[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_2[0x20]; + u8 reserved_at_20[0x20]; u8 port_profile_mode[0x8]; u8 static_port_profile[0x8]; u8 active_port_profile[0x8]; - u8 reserved_3[0x8]; + u8 reserved_at_58[0x8]; u8 retransmission_active[0x8]; u8 fec_mode_active[0x18]; - u8 reserved_4[0x20]; + u8 reserved_at_80[0x20]; }; struct mlx5_ifc_ppcnt_reg_bits { u8 swid[0x8]; u8 local_port[0x8]; u8 pnat[0x2]; - u8 reserved_0[0x8]; + u8 reserved_at_12[0x8]; u8 grp[0x6]; u8 clr[0x1]; - u8 reserved_1[0x1c]; + u8 reserved_at_21[0x1c]; u8 prio_tc[0x3]; union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits counter_set; }; struct mlx5_ifc_ppad_reg_bits { - u8 reserved_0[0x3]; + u8 reserved_at_0[0x3]; u8 single_mac[0x1]; - u8 reserved_1[0x4]; + u8 reserved_at_4[0x4]; u8 local_port[0x8]; u8 mac_47_32[0x10]; u8 mac_31_0[0x20]; - u8 reserved_2[0x40]; + u8 reserved_at_40[0x40]; }; struct mlx5_ifc_pmtu_reg_bits { - u8 reserved_0[0x8]; + u8 reserved_at_0[0x8]; u8 local_port[0x8]; - u8 reserved_1[0x10]; + u8 reserved_at_10[0x10]; u8 max_mtu[0x10]; - u8 reserved_2[0x10]; + u8 reserved_at_30[0x10]; u8 admin_mtu[0x10]; - u8 reserved_3[0x10]; + u8 reserved_at_50[0x10]; u8 oper_mtu[0x10]; - u8 reserved_4[0x10]; + u8 reserved_at_70[0x10]; }; struct mlx5_ifc_pmpr_reg_bits { - u8 reserved_0[0x8]; + u8 reserved_at_0[0x8]; u8 module[0x8]; - u8 reserved_1[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_2[0x18]; + u8 reserved_at_20[0x18]; u8 attenuation_5g[0x8]; - u8 reserved_3[0x18]; + u8 reserved_at_40[0x18]; u8 attenuation_7g[0x8]; - u8 reserved_4[0x18]; + u8 reserved_at_60[0x18]; u8 attenuation_12g[0x8]; }; struct mlx5_ifc_pmpe_reg_bits { - u8 reserved_0[0x8]; + u8 reserved_at_0[0x8]; u8 module[0x8]; - u8 reserved_1[0xc]; + u8 reserved_at_10[0xc]; u8 module_status[0x4]; - u8 reserved_2[0x60]; + u8 reserved_at_20[0x60]; }; struct mlx5_ifc_pmpc_reg_bits { @@ -6479,20 +6533,20 @@ struct mlx5_ifc_pmpc_reg_bits { }; struct mlx5_ifc_pmlpn_reg_bits { - u8 reserved_0[0x4]; + u8 reserved_at_0[0x4]; u8 mlpn_status[0x4]; u8 local_port[0x8]; - u8 reserved_1[0x10]; + u8 reserved_at_10[0x10]; u8 e[0x1]; - u8 reserved_2[0x1f]; + u8 reserved_at_21[0x1f]; }; struct mlx5_ifc_pmlp_reg_bits { u8 rxtx[0x1]; - u8 reserved_0[0x7]; + u8 reserved_at_1[0x7]; u8 local_port[0x8]; - u8 reserved_1[0x8]; + u8 reserved_at_10[0x8]; u8 width[0x8]; u8 lane0_module_mapping[0x20]; @@ -6503,36 +6557,36 @@ struct mlx5_ifc_pmlp_reg_bits { u8 lane3_module_mapping[0x20]; - u8 reserved_2[0x160]; + u8 reserved_at_a0[0x160]; }; struct mlx5_ifc_pmaos_reg_bits { - u8 reserved_0[0x8]; + u8 reserved_at_0[0x8]; u8 module[0x8]; - u8 reserved_1[0x4]; + u8 reserved_at_10[0x4]; u8 admin_status[0x4]; - u8 reserved_2[0x4]; + u8 reserved_at_18[0x4]; u8 oper_status[0x4]; u8 ase[0x1]; u8 ee[0x1]; - u8 reserved_3[0x1c]; + u8 reserved_at_22[0x1c]; u8 e[0x2]; - u8 reserved_4[0x40]; + u8 reserved_at_40[0x40]; }; struct mlx5_ifc_plpc_reg_bits { - u8 reserved_0[0x4]; + u8 reserved_at_0[0x4]; u8 profile_id[0xc]; - u8 reserved_1[0x4]; + u8 reserved_at_10[0x4]; u8 proto_mask[0x4]; - u8 reserved_2[0x8]; + u8 reserved_at_18[0x8]; - u8 reserved_3[0x10]; + u8 reserved_at_20[0x10]; u8 lane_speed[0x10]; - u8 reserved_4[0x17]; + u8 reserved_at_40[0x17]; u8 lpbf[0x1]; u8 fec_mode_policy[0x8]; @@ -6545,44 +6599,44 @@ struct mlx5_ifc_plpc_reg_bits { u8 retransmission_request_admin[0x8]; u8 fec_mode_request_admin[0x18]; - u8 reserved_5[0x80]; + u8 reserved_at_c0[0x80]; }; struct mlx5_ifc_plib_reg_bits { - u8 reserved_0[0x8]; + u8 reserved_at_0[0x8]; u8 local_port[0x8]; - u8 reserved_1[0x8]; + u8 reserved_at_10[0x8]; u8 ib_port[0x8]; - u8 reserved_2[0x60]; + u8 reserved_at_20[0x60]; }; struct mlx5_ifc_plbf_reg_bits { - u8 reserved_0[0x8]; + u8 reserved_at_0[0x8]; u8 local_port[0x8]; - u8 reserved_1[0xd]; + u8 reserved_at_10[0xd]; u8 lbf_mode[0x3]; - u8 reserved_2[0x20]; + u8 reserved_at_20[0x20]; }; struct mlx5_ifc_pipg_reg_bits { - u8 reserved_0[0x8]; + u8 reserved_at_0[0x8]; u8 local_port[0x8]; - u8 reserved_1[0x10]; + u8 reserved_at_10[0x10]; u8 dic[0x1]; - u8 reserved_2[0x19]; + u8 reserved_at_21[0x19]; u8 ipg[0x4]; - u8 reserved_3[0x2]; + u8 reserved_at_3e[0x2]; }; struct mlx5_ifc_pifr_reg_bits { - u8 reserved_0[0x8]; + u8 reserved_at_0[0x8]; u8 local_port[0x8]; - u8 reserved_1[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_2[0xe0]; + u8 reserved_at_20[0xe0]; u8 port_filter[8][0x20]; @@ -6590,36 +6644,36 @@ struct mlx5_ifc_pifr_reg_bits { }; struct mlx5_ifc_pfcc_reg_bits { - u8 reserved_0[0x8]; + u8 reserved_at_0[0x8]; u8 local_port[0x8]; - u8 reserved_1[0x10]; + u8 reserved_at_10[0x10]; u8 ppan[0x4]; - u8 reserved_2[0x4]; + u8 reserved_at_24[0x4]; u8 prio_mask_tx[0x8]; - u8 reserved_3[0x8]; + u8 reserved_at_30[0x8]; u8 prio_mask_rx[0x8]; u8 pptx[0x1]; u8 aptx[0x1]; - u8 reserved_4[0x6]; + u8 reserved_at_42[0x6]; u8 pfctx[0x8]; - u8 reserved_5[0x10]; + u8 reserved_at_50[0x10]; u8 pprx[0x1]; u8 aprx[0x1]; - u8 reserved_6[0x6]; + u8 reserved_at_62[0x6]; u8 pfcrx[0x8]; - u8 reserved_7[0x10]; + u8 reserved_at_70[0x10]; - u8 reserved_8[0x80]; + u8 reserved_at_80[0x80]; }; struct mlx5_ifc_pelc_reg_bits { u8 op[0x4]; - u8 reserved_0[0x4]; + u8 reserved_at_4[0x4]; u8 local_port[0x8]; - u8 reserved_1[0x10]; + u8 reserved_at_10[0x10]; u8 op_admin[0x8]; u8 op_capability[0x8]; @@ -6634,28 +6688,28 @@ struct mlx5_ifc_pelc_reg_bits { u8 active[0x40]; - u8 reserved_2[0x80]; + u8 reserved_at_140[0x80]; }; struct mlx5_ifc_peir_reg_bits { - u8 reserved_0[0x8]; + u8 reserved_at_0[0x8]; u8 local_port[0x8]; - u8 reserved_1[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_2[0xc]; + u8 reserved_at_20[0xc]; u8 error_count[0x4]; - u8 reserved_3[0x10]; + u8 reserved_at_30[0x10]; - u8 reserved_4[0xc]; + u8 reserved_at_40[0xc]; u8 lane[0x4]; - u8 reserved_5[0x8]; + u8 reserved_at_50[0x8]; u8 error_type[0x8]; }; struct mlx5_ifc_pcap_reg_bits { - u8 reserved_0[0x8]; + u8 reserved_at_0[0x8]; u8 local_port[0x8]; - u8 reserved_1[0x10]; + u8 reserved_at_10[0x10]; u8 port_capability_mask[4][0x20]; }; @@ -6663,46 +6717,46 @@ struct mlx5_ifc_pcap_reg_bits { struct mlx5_ifc_paos_reg_bits { u8 swid[0x8]; u8 local_port[0x8]; - u8 reserved_0[0x4]; + u8 reserved_at_10[0x4]; u8 admin_status[0x4]; - u8 reserved_1[0x4]; + u8 reserved_at_18[0x4]; u8 oper_status[0x4]; u8 ase[0x1]; u8 ee[0x1]; - u8 reserved_2[0x1c]; + u8 reserved_at_22[0x1c]; u8 e[0x2]; - u8 reserved_3[0x40]; + u8 reserved_at_40[0x40]; }; struct mlx5_ifc_pamp_reg_bits { - u8 reserved_0[0x8]; + u8 reserved_at_0[0x8]; u8 opamp_group[0x8]; - u8 reserved_1[0xc]; + u8 reserved_at_10[0xc]; u8 opamp_group_type[0x4]; u8 start_index[0x10]; - u8 reserved_2[0x4]; + u8 reserved_at_30[0x4]; u8 num_of_indices[0xc]; u8 index_data[18][0x10]; }; struct mlx5_ifc_lane_2_module_mapping_bits { - u8 reserved_0[0x6]; + u8 reserved_at_0[0x6]; u8 rx_lane[0x2]; - u8 reserved_1[0x6]; + u8 reserved_at_8[0x6]; u8 tx_lane[0x2]; - u8 reserved_2[0x8]; + u8 reserved_at_10[0x8]; u8 module[0x8]; }; struct mlx5_ifc_bufferx_reg_bits { - u8 reserved_0[0x6]; + u8 reserved_at_0[0x6]; u8 lossy[0x1]; u8 epsb[0x1]; - u8 reserved_1[0xc]; + u8 reserved_at_8[0xc]; u8 size[0xc]; u8 xoff_threshold[0x10]; @@ -6714,21 +6768,21 @@ struct mlx5_ifc_set_node_in_bits { }; struct mlx5_ifc_register_power_settings_bits { - u8 reserved_0[0x18]; + u8 reserved_at_0[0x18]; u8 power_settings_level[0x8]; - u8 reserved_1[0x60]; + u8 reserved_at_20[0x60]; }; struct mlx5_ifc_register_host_endianness_bits { u8 he[0x1]; - u8 reserved_0[0x1f]; + u8 reserved_at_1[0x1f]; - u8 reserved_1[0x60]; + u8 reserved_at_20[0x60]; }; struct mlx5_ifc_umr_pointer_desc_argument_bits { - u8 reserved_0[0x20]; + u8 reserved_at_0[0x20]; u8 mkey[0x20]; @@ -6741,7 +6795,7 @@ struct mlx5_ifc_ud_adrs_vector_bits { u8 dc_key[0x40]; u8 ext[0x1]; - u8 reserved_0[0x7]; + u8 reserved_at_41[0x7]; u8 destination_qp_dct[0x18]; u8 static_rate[0x4]; @@ -6750,7 +6804,7 @@ struct mlx5_ifc_ud_adrs_vector_bits { u8 mlid[0x7]; u8 rlid_udp_sport[0x10]; - u8 reserved_1[0x20]; + u8 reserved_at_80[0x20]; u8 rmac_47_16[0x20]; @@ -6758,9 +6812,9 @@ struct mlx5_ifc_ud_adrs_vector_bits { u8 tclass[0x8]; u8 hop_limit[0x8]; - u8 reserved_2[0x1]; + u8 reserved_at_e0[0x1]; u8 grh[0x1]; - u8 reserved_3[0x2]; + u8 reserved_at_e2[0x2]; u8 src_addr_index[0x8]; u8 flow_label[0x14]; @@ -6768,27 +6822,27 @@ struct mlx5_ifc_ud_adrs_vector_bits { }; struct mlx5_ifc_pages_req_event_bits { - u8 reserved_0[0x10]; + u8 reserved_at_0[0x10]; u8 function_id[0x10]; u8 num_pages[0x20]; - u8 reserved_1[0xa0]; + u8 reserved_at_40[0xa0]; }; struct mlx5_ifc_eqe_bits { - u8 reserved_0[0x8]; + u8 reserved_at_0[0x8]; u8 event_type[0x8]; - u8 reserved_1[0x8]; + u8 reserved_at_10[0x8]; u8 event_sub_type[0x8]; - u8 reserved_2[0xe0]; + u8 reserved_at_20[0xe0]; union mlx5_ifc_event_auto_bits event_data; - u8 reserved_3[0x10]; + u8 reserved_at_1e0[0x10]; u8 signature[0x8]; - u8 reserved_4[0x7]; + u8 reserved_at_1f8[0x7]; u8 owner[0x1]; }; @@ -6798,14 +6852,14 @@ enum { struct mlx5_ifc_cmd_queue_entry_bits { u8 type[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 input_length[0x20]; u8 input_mailbox_pointer_63_32[0x20]; u8 input_mailbox_pointer_31_9[0x17]; - u8 reserved_1[0x9]; + u8 reserved_at_77[0x9]; u8 command_input_inline_data[16][0x8]; @@ -6814,20 +6868,20 @@ struct mlx5_ifc_cmd_queue_entry_bits { u8 output_mailbox_pointer_63_32[0x20]; u8 output_mailbox_pointer_31_9[0x17]; - u8 reserved_2[0x9]; + u8 reserved_at_1b7[0x9]; u8 output_length[0x20]; u8 token[0x8]; u8 signature[0x8]; - u8 reserved_3[0x8]; + u8 reserved_at_1f0[0x8]; u8 status[0x7]; u8 ownership[0x1]; }; struct mlx5_ifc_cmd_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; @@ -6836,9 +6890,9 @@ struct mlx5_ifc_cmd_out_bits { struct mlx5_ifc_cmd_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 command[0][0x20]; @@ -6847,16 +6901,16 @@ struct mlx5_ifc_cmd_in_bits { struct mlx5_ifc_cmd_if_box_bits { u8 mailbox_data[512][0x8]; - u8 reserved_0[0x180]; + u8 reserved_at_1000[0x180]; u8 next_pointer_63_32[0x20]; u8 next_pointer_31_10[0x16]; - u8 reserved_1[0xa]; + u8 reserved_at_11b6[0xa]; u8 block_number[0x20]; - u8 reserved_2[0x8]; + u8 reserved_at_11e0[0x8]; u8 token[0x8]; u8 ctrl_signature[0x8]; u8 signature[0x8]; @@ -6866,11 +6920,59 @@ struct mlx5_ifc_mtt_bits { u8 ptag_63_32[0x20]; u8 ptag_31_8[0x18]; - u8 reserved_0[0x6]; + u8 reserved_at_38[0x6]; u8 wr_en[0x1]; u8 rd_en[0x1]; }; +struct mlx5_ifc_query_wol_rol_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x10]; + u8 rol_mode[0x8]; + u8 wol_mode[0x8]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_query_wol_rol_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_set_wol_rol_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_set_wol_rol_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 rol_mode_valid[0x1]; + u8 wol_mode_valid[0x1]; + u8 reserved_at_42[0xe]; + u8 rol_mode[0x8]; + u8 wol_mode[0x8]; + + u8 reserved_at_60[0x20]; +}; + enum { MLX5_INITIAL_SEG_NIC_INTERFACE_FULL_DRIVER = 0x0, MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED = 0x1, @@ -6904,38 +7006,38 @@ struct mlx5_ifc_initial_seg_bits { u8 cmd_interface_rev[0x10]; u8 fw_rev_subminor[0x10]; - u8 reserved_0[0x40]; + u8 reserved_at_40[0x40]; u8 cmdq_phy_addr_63_32[0x20]; u8 cmdq_phy_addr_31_12[0x14]; - u8 reserved_1[0x2]; + u8 reserved_at_b4[0x2]; u8 nic_interface[0x2]; u8 log_cmdq_size[0x4]; u8 log_cmdq_stride[0x4]; u8 command_doorbell_vector[0x20]; - u8 reserved_2[0xf00]; + u8 reserved_at_e0[0xf00]; u8 initializing[0x1]; - u8 reserved_3[0x4]; + u8 reserved_at_fe1[0x4]; u8 nic_interface_supported[0x3]; - u8 reserved_4[0x18]; + u8 reserved_at_fe8[0x18]; struct mlx5_ifc_health_buffer_bits health_buffer; u8 no_dram_nic_offset[0x20]; - u8 reserved_5[0x6e40]; + u8 reserved_at_1220[0x6e40]; - u8 reserved_6[0x1f]; + u8 reserved_at_8060[0x1f]; u8 clear_int[0x1]; u8 health_syndrome[0x8]; u8 health_counter[0x18]; - u8 reserved_7[0x17fc0]; + u8 reserved_at_80a0[0x17fc0]; }; union mlx5_ifc_ports_control_registers_document_bits { @@ -6954,6 +7056,7 @@ union mlx5_ifc_ports_control_registers_document_bits { struct mlx5_ifc_peir_reg_bits peir_reg; struct mlx5_ifc_pelc_reg_bits pelc_reg; struct mlx5_ifc_pfcc_reg_bits pfcc_reg; + struct mlx5_ifc_ib_port_cntrs_grp_data_layout_bits ib_port_cntrs_grp_data_layout; struct mlx5_ifc_phys_layer_cntrs_bits phys_layer_cntrs; struct mlx5_ifc_pifr_reg_bits pifr_reg; struct mlx5_ifc_pipg_reg_bits pipg_reg; @@ -6980,44 +7083,44 @@ union mlx5_ifc_ports_control_registers_document_bits { struct mlx5_ifc_pvlc_reg_bits pvlc_reg; struct mlx5_ifc_slrg_reg_bits slrg_reg; struct mlx5_ifc_sltp_reg_bits sltp_reg; - u8 reserved_0[0x60e0]; + u8 reserved_at_0[0x60e0]; }; union mlx5_ifc_debug_enhancements_document_bits { struct mlx5_ifc_health_buffer_bits health_buffer; - u8 reserved_0[0x200]; + u8 reserved_at_0[0x200]; }; union mlx5_ifc_uplink_pci_interface_document_bits { struct mlx5_ifc_initial_seg_bits initial_seg; - u8 reserved_0[0x20060]; + u8 reserved_at_0[0x20060]; }; struct mlx5_ifc_set_flow_table_root_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x40]; + u8 reserved_at_40[0x40]; }; struct mlx5_ifc_set_flow_table_root_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0x40]; + u8 reserved_at_40[0x40]; u8 table_type[0x8]; - u8 reserved_3[0x18]; + u8 reserved_at_88[0x18]; - u8 reserved_4[0x8]; + u8 reserved_at_a0[0x8]; u8 table_id[0x18]; - u8 reserved_5[0x140]; + u8 reserved_at_c0[0x140]; }; enum { @@ -7026,39 +7129,84 @@ enum { struct mlx5_ifc_modify_flow_table_out_bits { u8 status[0x8]; - u8 reserved_0[0x18]; + u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_1[0x40]; + u8 reserved_at_40[0x40]; }; struct mlx5_ifc_modify_flow_table_in_bits { u8 opcode[0x10]; - u8 reserved_0[0x10]; + u8 reserved_at_10[0x10]; - u8 reserved_1[0x10]; + u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_2[0x20]; + u8 reserved_at_40[0x20]; - u8 reserved_3[0x10]; + u8 reserved_at_60[0x10]; u8 modify_field_select[0x10]; u8 table_type[0x8]; - u8 reserved_4[0x18]; + u8 reserved_at_88[0x18]; - u8 reserved_5[0x8]; + u8 reserved_at_a0[0x8]; u8 table_id[0x18]; - u8 reserved_6[0x4]; + u8 reserved_at_c0[0x4]; u8 table_miss_mode[0x4]; - u8 reserved_7[0x18]; + u8 reserved_at_c8[0x18]; - u8 reserved_8[0x8]; + u8 reserved_at_e0[0x8]; u8 table_miss_id[0x18]; - u8 reserved_9[0x100]; + u8 reserved_at_100[0x100]; +}; + +struct mlx5_ifc_ets_tcn_config_reg_bits { + u8 g[0x1]; + u8 b[0x1]; + u8 r[0x1]; + u8 reserved_at_3[0x9]; + u8 group[0x4]; + u8 reserved_at_10[0x9]; + u8 bw_allocation[0x7]; + + u8 reserved_at_20[0xc]; + u8 max_bw_units[0x4]; + u8 reserved_at_30[0x8]; + u8 max_bw_value[0x8]; +}; + +struct mlx5_ifc_ets_global_config_reg_bits { + u8 reserved_at_0[0x2]; + u8 r[0x1]; + u8 reserved_at_3[0x1d]; + + u8 reserved_at_20[0xc]; + u8 max_bw_units[0x4]; + u8 reserved_at_30[0x8]; + u8 max_bw_value[0x8]; +}; + +struct mlx5_ifc_qetc_reg_bits { + u8 reserved_at_0[0x8]; + u8 port_number[0x8]; + u8 reserved_at_10[0x30]; + + struct mlx5_ifc_ets_tcn_config_reg_bits tc_configuration[0x8]; + struct mlx5_ifc_ets_global_config_reg_bits global_configuration; +}; + +struct mlx5_ifc_qtct_reg_bits { + u8 reserved_at_0[0x8]; + u8 port_number[0x8]; + u8 reserved_at_10[0xd]; + u8 prio[0x3]; + + u8 reserved_at_20[0x1d]; + u8 tclass[0x3]; }; #endif /* MLX5_IFC_H */ diff --git a/include/linux/mlx5/port.h b/include/linux/mlx5/port.h new file mode 100644 index 000000000000..a1d145abd4eb --- /dev/null +++ b/include/linux/mlx5/port.h @@ -0,0 +1,87 @@ +/* + * Copyright (c) 2016, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __MLX5_PORT_H__ +#define __MLX5_PORT_H__ + +#include <linux/mlx5/driver.h> + +int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps); +int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys, + int ptys_size, int proto_mask, u8 local_port); +int mlx5_query_port_proto_cap(struct mlx5_core_dev *dev, + u32 *proto_cap, int proto_mask); +int mlx5_query_port_proto_admin(struct mlx5_core_dev *dev, + u32 *proto_admin, int proto_mask); +int mlx5_query_port_link_width_oper(struct mlx5_core_dev *dev, + u8 *link_width_oper, u8 local_port); +int mlx5_query_port_proto_oper(struct mlx5_core_dev *dev, + u8 *proto_oper, int proto_mask, + u8 local_port); +int mlx5_set_port_proto(struct mlx5_core_dev *dev, u32 proto_admin, + int proto_mask); +int mlx5_set_port_admin_status(struct mlx5_core_dev *dev, + enum mlx5_port_status status); +int mlx5_query_port_admin_status(struct mlx5_core_dev *dev, + enum mlx5_port_status *status); + +int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port); +void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu, u8 port); +void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu, + u8 port); + +int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev, + u8 *vl_hw_cap, u8 local_port); + +int mlx5_set_port_pause(struct mlx5_core_dev *dev, u32 rx_pause, u32 tx_pause); +int mlx5_query_port_pause(struct mlx5_core_dev *dev, + u32 *rx_pause, u32 *tx_pause); + +int mlx5_set_port_pfc(struct mlx5_core_dev *dev, u8 pfc_en_tx, u8 pfc_en_rx); +int mlx5_query_port_pfc(struct mlx5_core_dev *dev, u8 *pfc_en_tx, + u8 *pfc_en_rx); + +int mlx5_max_tc(struct mlx5_core_dev *mdev); + +int mlx5_set_port_prio_tc(struct mlx5_core_dev *mdev, u8 *prio_tc); +int mlx5_set_port_tc_group(struct mlx5_core_dev *mdev, u8 *tc_group); +int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw); +int mlx5_modify_port_ets_rate_limit(struct mlx5_core_dev *mdev, + u8 *max_bw_value, + u8 *max_bw_unit); +int mlx5_query_port_ets_rate_limit(struct mlx5_core_dev *mdev, + u8 *max_bw_value, + u8 *max_bw_unit); +int mlx5_set_port_wol(struct mlx5_core_dev *mdev, u8 wol_mode); +int mlx5_query_port_wol(struct mlx5_core_dev *mdev, u8 *wol_mode); + +#endif /* __MLX5_PORT_H__ */ diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h index 5b8c89ffaa58..cf031a3f16c5 100644 --- a/include/linux/mlx5/qp.h +++ b/include/linux/mlx5/qp.h @@ -499,7 +499,8 @@ struct mlx5_qp_context { u8 reserved2[4]; __be32 next_send_psn; __be32 cqn_send; - u8 reserved3[8]; + __be32 deth_sqpn; + u8 reserved3[4]; __be32 last_acked_psn; __be32 ssn; __be32 params2; @@ -621,9 +622,9 @@ static inline struct mlx5_core_qp *__mlx5_qp_lookup(struct mlx5_core_dev *dev, u return radix_tree_lookup(&dev->priv.qp_table.tree, qpn); } -static inline struct mlx5_core_mr *__mlx5_mr_lookup(struct mlx5_core_dev *dev, u32 key) +static inline struct mlx5_core_mkey *__mlx5_mr_lookup(struct mlx5_core_dev *dev, u32 key) { - return radix_tree_lookup(&dev->priv.mr_table.tree, key); + return radix_tree_lookup(&dev->priv.mkey_table.tree, key); } struct mlx5_page_fault_resume_mbox_in { diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h index 123771003e68..a9f2bcc98cab 100644 --- a/include/linux/mlx5/vport.h +++ b/include/linux/mlx5/vport.h @@ -92,5 +92,7 @@ int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev *dev, int mlx5_nic_vport_enable_roce(struct mlx5_core_dev *mdev); int mlx5_nic_vport_disable_roce(struct mlx5_core_dev *mdev); +int mlx5_core_query_vport_counter(struct mlx5_core_dev *dev, u8 other_vport, + u8 port_num, void *out, size_t out_sz); #endif /* __MLX5_VPORT_H__ */ diff --git a/include/linux/mm.h b/include/linux/mm.h index 516e14944339..7d42501c8bb4 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -22,6 +22,7 @@ #include <linux/resource.h> #include <linux/page_ext.h> #include <linux/err.h> +#include <linux/page_ref.h> struct mempolicy; struct anon_vma; @@ -82,6 +83,27 @@ extern int mmap_rnd_compat_bits __read_mostly; #define mm_forbids_zeropage(X) (0) #endif +/* + * Default maximum number of active map areas, this limits the number of vmas + * per mm struct. Users can overwrite this number by sysctl but there is a + * problem. + * + * When a program's coredump is generated as ELF format, a section is created + * per a vma. In ELF, the number of sections is represented in unsigned short. + * This means the number of sections should be smaller than 65535 at coredump. + * Because the kernel adds some informative sections to a image of program at + * generating coredump, we need some margin. The number of extra sections is + * 1-3 now and depends on arch. We use "5" as safe margin, here. + * + * ELF extended numbering allows more than 65535 sections, so 16-bit bound is + * not a hard limit any more. Although some userspace tools can be surprised by + * that. + */ +#define MAPCOUNT_ELF_CORE_MARGIN (5) +#define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN) + +extern int sysctl_max_map_count; + extern unsigned long sysctl_user_reserve_kbytes; extern unsigned long sysctl_admin_reserve_kbytes; @@ -122,6 +144,7 @@ extern unsigned int kobjsize(const void *objp); /* * vm_flags in vm_area_struct, see mm_types.h. + * When changing, update also include/trace/events/mmflags.h */ #define VM_NONE 0x00000000 @@ -364,8 +387,8 @@ static inline int pmd_devmap(pmd_t pmd) */ static inline int put_page_testzero(struct page *page) { - VM_BUG_ON_PAGE(atomic_read(&page->_count) == 0, page); - return atomic_dec_and_test(&page->_count); + VM_BUG_ON_PAGE(page_ref_count(page) == 0, page); + return page_ref_dec_and_test(page); } /* @@ -376,7 +399,7 @@ static inline int put_page_testzero(struct page *page) */ static inline int get_page_unless_zero(struct page *page) { - return atomic_inc_not_zero(&page->_count); + return page_ref_add_unless(page, 1, 0); } extern int page_is_ram(unsigned long pfn); @@ -387,7 +410,8 @@ enum { REGION_MIXED, }; -int region_intersects(resource_size_t offset, size_t size, const char *type); +int region_intersects(resource_size_t offset, size_t size, unsigned long flags, + unsigned long desc); /* Support for virtually mapped pages */ struct page *vmalloc_to_page(const void *addr); @@ -463,11 +487,6 @@ static inline int total_mapcount(struct page *page) } #endif -static inline int page_count(struct page *page) -{ - return atomic_read(&compound_head(page)->_count); -} - static inline struct page *virt_to_head_page(const void *x) { struct page *page = virt_to_page(x); @@ -475,15 +494,6 @@ static inline struct page *virt_to_head_page(const void *x) return compound_head(page); } -/* - * Setup the page count before being freed into the page allocator for - * the first time (boot or memory hotplug) - */ -static inline void init_page_count(struct page *page) -{ - atomic_set(&page->_count, 1); -} - void __put_page(struct page *page); void put_pages_list(struct list_head *pages); @@ -693,8 +703,8 @@ static inline void get_page(struct page *page) * Getting a normal page or the head of a compound page * requires to already have an elevated page->_count. */ - VM_BUG_ON_PAGE(atomic_read(&page->_count) <= 0, page); - atomic_inc(&page->_count); + VM_BUG_ON_PAGE(page_ref_count(page) <= 0, page); + page_ref_inc(page); if (unlikely(is_zone_device_page(page))) get_zone_device_page(page); @@ -904,20 +914,11 @@ static inline struct mem_cgroup *page_memcg(struct page *page) { return page->mem_cgroup; } - -static inline void set_page_memcg(struct page *page, struct mem_cgroup *memcg) -{ - page->mem_cgroup = memcg; -} #else static inline struct mem_cgroup *page_memcg(struct page *page) { return NULL; } - -static inline void set_page_memcg(struct page *page, struct mem_cgroup *memcg) -{ -} #endif /* @@ -1051,8 +1052,6 @@ static inline void clear_page_pfmemalloc(struct page *page) * just gets major/minor fault counters bumped up. */ -#define VM_FAULT_MINOR 0 /* For backwards compat. Remove me quickly. */ - #define VM_FAULT_OOM 0x0001 #define VM_FAULT_SIGBUS 0x0002 #define VM_FAULT_MAJOR 0x0004 @@ -1299,10 +1298,9 @@ int __set_page_dirty_nobuffers(struct page *page); int __set_page_dirty_no_writeback(struct page *page); int redirty_page_for_writepage(struct writeback_control *wbc, struct page *page); -void account_page_dirtied(struct page *page, struct address_space *mapping, - struct mem_cgroup *memcg); +void account_page_dirtied(struct page *page, struct address_space *mapping); void account_page_cleaned(struct page *page, struct address_space *mapping, - struct mem_cgroup *memcg, struct bdi_writeback *wb); + struct bdi_writeback *wb); int set_page_dirty(struct page *page); int set_page_dirty_lock(struct page *page); void cancel_dirty_page(struct page *page); @@ -1532,8 +1530,7 @@ static inline void mm_dec_nr_pmds(struct mm_struct *mm) } #endif -int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, - pmd_t *pmd, unsigned long address); +int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address); int __pte_alloc_kernel(pmd_t *pmd, unsigned long address); /* @@ -1659,15 +1656,15 @@ static inline void pgtable_page_dtor(struct page *page) pte_unmap(pte); \ } while (0) -#define pte_alloc_map(mm, vma, pmd, address) \ - ((unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, vma, \ - pmd, address))? \ - NULL: pte_offset_map(pmd, address)) +#define pte_alloc(mm, pmd, address) \ + (unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, pmd, address)) + +#define pte_alloc_map(mm, pmd, address) \ + (pte_alloc(mm, pmd, address) ? NULL : pte_offset_map(pmd, address)) #define pte_alloc_map_lock(mm, pmd, address, ptlp) \ - ((unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, NULL, \ - pmd, address))? \ - NULL: pte_offset_map_lock(mm, pmd, address, ptlp)) + (pte_alloc(mm, pmd, address) ? \ + NULL : pte_offset_map_lock(mm, pmd, address, ptlp)) #define pte_alloc_kernel(pmd, address) \ ((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd, address))? \ @@ -1862,6 +1859,7 @@ extern int __meminit init_per_zone_wmark_min(void); extern void mem_init(void); extern void __init mmap_init(void); extern void show_mem(unsigned int flags); +extern long si_mem_available(void); extern void si_meminfo(struct sysinfo * val); extern void si_meminfo_node(struct sysinfo *val, int nid); @@ -1876,6 +1874,7 @@ extern void zone_pcp_reset(struct zone *zone); /* page_alloc.c */ extern int min_free_kbytes; +extern int watermark_scale_factor; /* nommu.c */ extern atomic_long_t mmap_pages_allocated; @@ -2138,6 +2137,8 @@ int remap_pfn_range(struct vm_area_struct *, unsigned long addr, int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *); int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn); +int vm_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr, + unsigned long pfn, pgprot_t pgprot); int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr, pfn_t pfn); int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len); @@ -2175,6 +2176,17 @@ extern int apply_to_page_range(struct mm_struct *mm, unsigned long address, unsigned long size, pte_fn_t fn, void *data); +#ifdef CONFIG_PAGE_POISONING +extern bool page_poisoning_enabled(void); +extern void kernel_poison_pages(struct page *page, int numpages, int enable); +extern bool page_is_poisoned(struct page *page); +#else +static inline bool page_poisoning_enabled(void) { return false; } +static inline void kernel_poison_pages(struct page *page, int numpages, + int enable) { } +static inline bool page_is_poisoned(struct page *page) { return false; } +#endif + #ifdef CONFIG_DEBUG_PAGEALLOC extern bool _debug_pagealloc_enabled; extern void __kernel_map_pages(struct page *page, int numpages, int enable); @@ -2194,14 +2206,18 @@ kernel_map_pages(struct page *page, int numpages, int enable) } #ifdef CONFIG_HIBERNATION extern bool kernel_page_present(struct page *page); -#endif /* CONFIG_HIBERNATION */ -#else +#endif /* CONFIG_HIBERNATION */ +#else /* CONFIG_DEBUG_PAGEALLOC */ static inline void kernel_map_pages(struct page *page, int numpages, int enable) {} #ifdef CONFIG_HIBERNATION static inline bool kernel_page_present(struct page *page) { return true; } -#endif /* CONFIG_HIBERNATION */ -#endif +#endif /* CONFIG_HIBERNATION */ +static inline bool debug_pagealloc_enabled(void) +{ + return false; +} +#endif /* CONFIG_DEBUG_PAGEALLOC */ #ifdef __HAVE_ARCH_GATE_AREA extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm); diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 624b78b848b8..944b2b37313b 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -566,10 +566,26 @@ static inline void clear_tlb_flush_pending(struct mm_struct *mm) } #endif -struct vm_special_mapping -{ - const char *name; +struct vm_fault; + +struct vm_special_mapping { + const char *name; /* The name, e.g. "[vdso]". */ + + /* + * If .fault is not provided, this points to a + * NULL-terminated array of pages that back the special mapping. + * + * This must not be NULL unless .fault is provided. + */ struct page **pages; + + /* + * If non-NULL, then this is called to resolve page faults + * on the special mapping. If used, .pages is not checked. + */ + int (*fault)(const struct vm_special_mapping *sm, + struct vm_area_struct *vma, + struct vm_fault *vmf); }; enum tlb_flush_reason { diff --git a/include/linux/mmdebug.h b/include/linux/mmdebug.h index 053824b0a412..de7be78c6f0e 100644 --- a/include/linux/mmdebug.h +++ b/include/linux/mmdebug.h @@ -9,8 +9,7 @@ struct vm_area_struct; struct mm_struct; extern void dump_page(struct page *page, const char *reason); -extern void dump_page_badflags(struct page *page, const char *reason, - unsigned long badflags); +extern void __dump_page(struct page *page, const char *reason); void dump_vma(const struct vm_area_struct *vma); void dump_mm(const struct mm_struct *mm); diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 7b6c2cfee390..c60df9257cc7 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -63,6 +63,9 @@ enum { MIGRATE_TYPES }; +/* In mm/page_alloc.c; keep in sync also with show_migration_types() there */ +extern char * const migratetype_names[MIGRATE_TYPES]; + #ifdef CONFIG_CMA # define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA) #else @@ -209,10 +212,12 @@ struct zone_reclaim_stat { }; struct lruvec { - struct list_head lists[NR_LRU_LISTS]; - struct zone_reclaim_stat reclaim_stat; + struct list_head lists[NR_LRU_LISTS]; + struct zone_reclaim_stat reclaim_stat; + /* Evictions & activations on the inactive file list */ + atomic_long_t inactive_age; #ifdef CONFIG_MEMCG - struct zone *zone; + struct zone *zone; #endif }; @@ -487,9 +492,6 @@ struct zone { spinlock_t lru_lock; struct lruvec lruvec; - /* Evictions & activations on the inactive file list */ - atomic_long_t inactive_age; - /* * When free pages are below this point, additional steps are taken * when reading the number of free pages to avoid per-cpu counter @@ -520,6 +522,8 @@ struct zone { bool compact_blockskip_flush; #endif + bool contiguous; + ZONE_PADDING(_pad3_) /* Zone statistics */ atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; @@ -664,6 +668,12 @@ typedef struct pglist_data { mem_hotplug_begin/end() */ int kswapd_max_order; enum zone_type classzone_idx; +#ifdef CONFIG_COMPACTION + int kcompactd_max_order; + enum zone_type kcompactd_classzone_idx; + wait_queue_head_t kcompactd_wait; + struct task_struct *kcompactd; +#endif #ifdef CONFIG_NUMA_BALANCING /* Lock serializing the migrate rate limiting window */ spinlock_t numabalancing_migrate_lock; @@ -758,6 +768,8 @@ static inline struct zone *lruvec_zone(struct lruvec *lruvec) #endif } +extern unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru); + #ifdef CONFIG_HAVE_MEMORY_PRESENT void memory_present(int nid, unsigned long start, unsigned long end); #else @@ -829,6 +841,8 @@ static inline int is_highmem(struct zone *zone) struct ctl_table; int min_free_kbytes_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); +int watermark_scale_factor_sysctl_handler(struct ctl_table *, int, + void __user *, size_t *, loff_t *); extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1]; int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); diff --git a/include/linux/module.h b/include/linux/module.h index 4560d8f1545d..2bb0c3085706 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -324,6 +324,12 @@ struct module_layout { #define __module_layout_align #endif +struct mod_kallsyms { + Elf_Sym *symtab; + unsigned int num_symtab; + char *strtab; +}; + struct module { enum module_state state; @@ -405,15 +411,10 @@ struct module { #endif #ifdef CONFIG_KALLSYMS - /* - * We keep the symbol and string tables for kallsyms. - * The core_* fields below are temporary, loader-only (they - * could really be discarded after module init). - */ - Elf_Sym *symtab, *core_symtab; - unsigned int num_symtab, core_num_syms; - char *strtab, *core_strtab; - + /* Protected by RCU and/or module_mutex: use rcu_dereference() */ + struct mod_kallsyms *kallsyms; + struct mod_kallsyms core_kallsyms; + /* Section attributes */ struct module_sect_attrs *sect_attrs; diff --git a/include/linux/msi.h b/include/linux/msi.h index a2a0068a8387..8b425c66305a 100644 --- a/include/linux/msi.h +++ b/include/linux/msi.h @@ -33,6 +33,14 @@ struct platform_msi_desc { }; /** + * fsl_mc_msi_desc - FSL-MC device specific msi descriptor data + * @msi_index: The index of the MSI descriptor + */ +struct fsl_mc_msi_desc { + u16 msi_index; +}; + +/** * struct msi_desc - Descriptor structure for MSI based interrupts * @list: List head for management * @irq: The base interrupt number @@ -87,6 +95,7 @@ struct msi_desc { * tree wide cleanup. */ struct platform_msi_desc platform; + struct fsl_mc_msi_desc fsl_mc; }; }; diff --git a/include/linux/nd.h b/include/linux/nd.h index 507e47c86737..5489ab756d1a 100644 --- a/include/linux/nd.h +++ b/include/linux/nd.h @@ -16,11 +16,16 @@ #include <linux/ndctl.h> #include <linux/device.h> +enum nvdimm_event { + NVDIMM_REVALIDATE_POISON, +}; + struct nd_device_driver { struct device_driver drv; unsigned long type; int (*probe)(struct device *dev); int (*remove)(struct device *dev); + void (*notify)(struct device *dev, enum nvdimm_event event); }; static inline struct nd_device_driver *to_nd_device_driver( @@ -144,6 +149,8 @@ static inline int nvdimm_write_bytes(struct nd_namespace_common *ndns, MODULE_ALIAS("nd:t" __stringify(type) "*") #define ND_DEVICE_MODALIAS_FMT "nd:t%d" +struct nd_region; +void nvdimm_region_notify(struct nd_region *nd_region, enum nvdimm_event event); int __must_check __nd_driver_register(struct nd_device_driver *nd_drv, struct module *module, const char *mod_name); #define nd_driver_register(driver) \ diff --git a/include/linux/net.h b/include/linux/net.h index 0b4ac7da583a..49175e4ced11 100644 --- a/include/linux/net.h +++ b/include/linux/net.h @@ -215,6 +215,7 @@ int __sock_create(struct net *net, int family, int type, int proto, int sock_create(int family, int type, int proto, struct socket **res); int sock_create_kern(struct net *net, int family, int type, int proto, struct socket **res); int sock_create_lite(int family, int type, int proto, struct socket **res); +struct socket *sock_alloc(void); void sock_release(struct socket *sock); int sock_sendmsg(struct socket *sock, struct msghdr *msg); int sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h index d9654f0eecb3..a734bf43d190 100644 --- a/include/linux/netdev_features.h +++ b/include/linux/netdev_features.h @@ -67,6 +67,8 @@ enum { NETIF_F_HW_L2FW_DOFFLOAD_BIT, /* Allow L2 Forwarding in Hardware */ NETIF_F_BUSY_POLL_BIT, /* Busy poll */ + NETIF_F_HW_TC_BIT, /* Offload TC infrastructure */ + /* * Add your fresh new feature above and remember to update * netdev_features_strings[] in net/core/ethtool.c and maybe @@ -124,6 +126,7 @@ enum { #define NETIF_F_HW_VLAN_STAG_TX __NETIF_F(HW_VLAN_STAG_TX) #define NETIF_F_HW_L2FW_DOFFLOAD __NETIF_F(HW_L2FW_DOFFLOAD) #define NETIF_F_BUSY_POLL __NETIF_F(BUSY_POLL) +#define NETIF_F_HW_TC __NETIF_F(HW_TC) #define for_each_netdev_feature(mask_addr, bit) \ for_each_set_bit(bit, (unsigned long *)mask_addr, NETDEV_FEATURE_COUNT) diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 289c2314d766..be693b34662f 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -51,6 +51,7 @@ #include <linux/neighbour.h> #include <uapi/linux/netdevice.h> #include <uapi/linux/if_bonding.h> +#include <uapi/linux/pkt_cls.h> struct netpoll_info; struct device; @@ -267,6 +268,7 @@ struct header_ops { void (*cache_update)(struct hh_cache *hh, const struct net_device *dev, const unsigned char *haddr); + bool (*validate)(const char *ll_header, unsigned int len); }; /* These flag bits are private to the generic network queueing @@ -778,6 +780,27 @@ static inline bool netdev_phys_item_id_same(struct netdev_phys_item_id *a, typedef u16 (*select_queue_fallback_t)(struct net_device *dev, struct sk_buff *skb); +/* These structures hold the attributes of qdisc and classifiers + * that are being passed to the netdevice through the setup_tc op. + */ +enum { + TC_SETUP_MQPRIO, + TC_SETUP_CLSU32, + TC_SETUP_CLSFLOWER, +}; + +struct tc_cls_u32_offload; + +struct tc_to_netdev { + unsigned int type; + union { + u8 tc; + struct tc_cls_u32_offload *cls_u32; + struct tc_cls_flower_offload *cls_flower; + }; +}; + + /* * This structure defines the management hooks for network devices. * The following hooks can be defined; unless noted otherwise, they are @@ -1073,6 +1096,12 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev, * This function is used to get egress tunnel information for given skb. * This is useful for retrieving outer tunnel header parameters while * sampling packet. + * void (*ndo_set_rx_headroom)(struct net_device *dev, int needed_headroom); + * This function is used to specify the headroom that the skb must + * consider when allocation skb during packet reception. Setting + * appropriate rx headroom value allows avoiding skb head copy on + * forward. Setting a negative value reset the rx headroom to the + * default value. * */ struct net_device_ops { @@ -1150,7 +1179,10 @@ struct net_device_ops { int (*ndo_set_vf_rss_query_en)( struct net_device *dev, int vf, bool setting); - int (*ndo_setup_tc)(struct net_device *dev, u8 tc); + int (*ndo_setup_tc)(struct net_device *dev, + u32 handle, + __be16 protocol, + struct tc_to_netdev *tc); #if IS_ENABLED(CONFIG_FCOE) int (*ndo_fcoe_enable)(struct net_device *dev); int (*ndo_fcoe_disable)(struct net_device *dev); @@ -1255,6 +1287,8 @@ struct net_device_ops { bool proto_down); int (*ndo_fill_metadata_dst)(struct net_device *dev, struct sk_buff *skb); + void (*ndo_set_rx_headroom)(struct net_device *dev, + int needed_headroom); }; /** @@ -1291,6 +1325,10 @@ struct net_device_ops { * @IFF_OPENVSWITCH: device is a Open vSwitch master * @IFF_L3MDEV_SLAVE: device is enslaved to an L3 master device * @IFF_TEAM: device is a team device + * @IFF_RXFH_CONFIGURED: device has had Rx Flow indirection table configured + * @IFF_PHONY_HEADROOM: the headroom value is controlled by an external + * entity (i.e. the master device for bridged veth) + * @IFF_MACSEC: device is a MACsec device */ enum netdev_priv_flags { IFF_802_1Q_VLAN = 1<<0, @@ -1318,6 +1356,9 @@ enum netdev_priv_flags { IFF_OPENVSWITCH = 1<<22, IFF_L3MDEV_SLAVE = 1<<23, IFF_TEAM = 1<<24, + IFF_RXFH_CONFIGURED = 1<<25, + IFF_PHONY_HEADROOM = 1<<26, + IFF_MACSEC = 1<<27, }; #define IFF_802_1Q_VLAN IFF_802_1Q_VLAN @@ -1345,6 +1386,8 @@ enum netdev_priv_flags { #define IFF_OPENVSWITCH IFF_OPENVSWITCH #define IFF_L3MDEV_SLAVE IFF_L3MDEV_SLAVE #define IFF_TEAM IFF_TEAM +#define IFF_RXFH_CONFIGURED IFF_RXFH_CONFIGURED +#define IFF_MACSEC IFF_MACSEC /** * struct net_device - The DEVICE structure. @@ -1397,6 +1440,8 @@ enum netdev_priv_flags { * do not use this in drivers * @tx_dropped: Dropped packets by core network, * do not use this in drivers + * @rx_nohandler: nohandler dropped packets by core network on + * inactive devices, do not use this in drivers * * @wireless_handlers: List of functions to handle Wireless Extensions, * instead of ioctl, @@ -1420,8 +1465,7 @@ enum netdev_priv_flags { * @dma: DMA channel * @mtu: Interface MTU value * @type: Interface hardware type - * @hard_header_len: Hardware header length, which means that this is the - * minimum size of a packet. + * @hard_header_len: Maximum hardware header length. * * @needed_headroom: Extra headroom the hardware may need, but not in all * cases can this be guaranteed @@ -1611,6 +1655,7 @@ struct net_device { atomic_long_t rx_dropped; atomic_long_t tx_dropped; + atomic_long_t rx_nohandler; #ifdef CONFIG_WIRELESS_EXT const struct iw_handler_def * wireless_handlers; @@ -1908,6 +1953,26 @@ struct netdev_queue *netdev_pick_tx(struct net_device *dev, struct sk_buff *skb, void *accel_priv); +/* returns the headroom that the master device needs to take in account + * when forwarding to this dev + */ +static inline unsigned netdev_get_fwd_headroom(struct net_device *dev) +{ + return dev->priv_flags & IFF_PHONY_HEADROOM ? 0 : dev->needed_headroom; +} + +static inline void netdev_set_rx_headroom(struct net_device *dev, int new_hr) +{ + if (dev->netdev_ops->ndo_set_rx_headroom) + dev->netdev_ops->ndo_set_rx_headroom(dev, new_hr); +} + +/* set the device rx headroom to the dev's default */ +static inline void netdev_reset_rx_headroom(struct net_device *dev) +{ + netdev_set_rx_headroom(dev, -1); +} + /* * Net namespace inlines */ @@ -2627,6 +2692,24 @@ static inline int dev_parse_header(const struct sk_buff *skb, return dev->header_ops->parse(skb, haddr); } +/* ll_header must have at least hard_header_len allocated */ +static inline bool dev_validate_header(const struct net_device *dev, + char *ll_header, int len) +{ + if (likely(len >= dev->hard_header_len)) + return true; + + if (capable(CAP_SYS_RAWIO)) { + memset(ll_header + len, 0, dev->hard_header_len - len); + return true; + } + + if (dev->header_ops && dev->header_ops->validate) + return dev->header_ops->validate(ll_header, len); + + return false; +} + typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len); int register_gifconf(unsigned int family, gifconf_func_t *gifconf); static inline int unregister_gifconf(unsigned int family) @@ -3718,7 +3801,7 @@ void *netdev_lower_get_next_private_rcu(struct net_device *dev, void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter); #define netdev_for_each_lower_dev(dev, ldev, iter) \ - for (iter = &(dev)->adj_list.lower, \ + for (iter = (dev)->adj_list.lower.next, \ ldev = netdev_lower_get_next(dev, &(iter)); \ ldev; \ ldev = netdev_lower_get_next(dev, &(iter))) @@ -3741,7 +3824,7 @@ void netdev_lower_state_changed(struct net_device *lower_dev, /* RSS keys are 40 or 52 bytes long */ #define NETDEV_RSS_KEY_LEN 52 -extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN]; +extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN] __read_mostly; void netdev_rss_key_fill(void *buffer, size_t len); int dev_get_nest_level(struct net_device *dev, @@ -3965,6 +4048,11 @@ static inline void skb_gso_error_unwind(struct sk_buff *skb, __be16 protocol, skb->mac_len = mac_len; } +static inline bool netif_is_macsec(const struct net_device *dev) +{ + return dev->priv_flags & IFF_MACSEC; +} + static inline bool netif_is_macvlan(const struct net_device *dev) { return dev->priv_flags & IFF_MACVLAN; @@ -4045,6 +4133,11 @@ static inline bool netif_is_lag_port(const struct net_device *dev) return netif_is_bond_slave(dev) || netif_is_team_port(dev); } +static inline bool netif_is_rxfh_configured(const struct net_device *dev) +{ + return dev->priv_flags & IFF_RXFH_CONFIGURED; +} + /* This device needs to keep skb dst for qdisc enqueue or ndo_start_xmit() */ static inline void netif_keep_dst(struct net_device *dev) { diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h index 0ad556726181..9230f9aee896 100644 --- a/include/linux/netfilter.h +++ b/include/linux/netfilter.h @@ -141,22 +141,6 @@ void nf_unregister_sockopt(struct nf_sockopt_ops *reg); #ifdef HAVE_JUMP_LABEL extern struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS]; - -static inline bool nf_hook_list_active(struct list_head *hook_list, - u_int8_t pf, unsigned int hook) -{ - if (__builtin_constant_p(pf) && - __builtin_constant_p(hook)) - return static_key_false(&nf_hooks_needed[pf][hook]); - - return !list_empty(hook_list); -} -#else -static inline bool nf_hook_list_active(struct list_head *hook_list, - u_int8_t pf, unsigned int hook) -{ - return !list_empty(hook_list); -} #endif int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state); @@ -177,9 +161,18 @@ static inline int nf_hook_thresh(u_int8_t pf, unsigned int hook, int (*okfn)(struct net *, struct sock *, struct sk_buff *), int thresh) { - struct list_head *hook_list = &net->nf.hooks[pf][hook]; + struct list_head *hook_list; + +#ifdef HAVE_JUMP_LABEL + if (__builtin_constant_p(pf) && + __builtin_constant_p(hook) && + !static_key_false(&nf_hooks_needed[pf][hook])) + return 1; +#endif + + hook_list = &net->nf.hooks[pf][hook]; - if (nf_hook_list_active(hook_list, pf, hook)) { + if (!list_empty(hook_list)) { struct nf_hook_state state; nf_hook_state_init(&state, hook_list, hook, thresh, diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h index ba0d9789eb6e..1d82dd5e9a08 100644 --- a/include/linux/netfilter/nfnetlink.h +++ b/include/linux/netfilter/nfnetlink.h @@ -34,8 +34,6 @@ int nfnetlink_subsys_register(const struct nfnetlink_subsystem *n); int nfnetlink_subsys_unregister(const struct nfnetlink_subsystem *n); int nfnetlink_has_listeners(struct net *net, unsigned int group); -struct sk_buff *nfnetlink_alloc_skb(struct net *net, unsigned int size, - u32 dst_portid, gfp_t gfp_mask); int nfnetlink_send(struct sk_buff *skb, struct net *net, u32 portid, unsigned int group, int echo, gfp_t flags); int nfnetlink_set_err(struct net *net, u32 portid, u32 group, int error); diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h index c5577410c25d..80a305b85323 100644 --- a/include/linux/netfilter/x_tables.h +++ b/include/linux/netfilter/x_tables.h @@ -200,6 +200,9 @@ struct xt_table { u_int8_t af; /* address/protocol family */ int priority; /* hook order */ + /* called when table is needed in the given netns */ + int (*table_init)(struct net *net); + /* A unique name... */ const char name[XT_TABLE_MAXNAMELEN]; }; @@ -408,8 +411,7 @@ xt_get_per_cpu_counter(struct xt_counters *cnt, unsigned int cpu) return cnt; } -struct nf_hook_ops *xt_hook_link(const struct xt_table *, nf_hookfn *); -void xt_hook_unlink(const struct xt_table *, struct nf_hook_ops *); +struct nf_hook_ops *xt_hook_ops_alloc(const struct xt_table *, nf_hookfn *); #ifdef CONFIG_COMPAT #include <net/compat.h> diff --git a/include/linux/netfilter_arp/arp_tables.h b/include/linux/netfilter_arp/arp_tables.h index 6f074db2f23d..029b95e8924e 100644 --- a/include/linux/netfilter_arp/arp_tables.h +++ b/include/linux/netfilter_arp/arp_tables.h @@ -48,10 +48,11 @@ struct arpt_error { } extern void *arpt_alloc_initial_table(const struct xt_table *); -extern struct xt_table *arpt_register_table(struct net *net, - const struct xt_table *table, - const struct arpt_replace *repl); -extern void arpt_unregister_table(struct xt_table *table); +int arpt_register_table(struct net *net, const struct xt_table *table, + const struct arpt_replace *repl, + const struct nf_hook_ops *ops, struct xt_table **res); +void arpt_unregister_table(struct net *net, struct xt_table *table, + const struct nf_hook_ops *ops); extern unsigned int arpt_do_table(struct sk_buff *skb, const struct nf_hook_state *state, struct xt_table *table); diff --git a/include/linux/netfilter_ipv4/ip_tables.h b/include/linux/netfilter_ipv4/ip_tables.h index aa598f942c01..7bfc5893ec31 100644 --- a/include/linux/netfilter_ipv4/ip_tables.h +++ b/include/linux/netfilter_ipv4/ip_tables.h @@ -24,10 +24,11 @@ extern void ipt_init(void) __init; -extern struct xt_table *ipt_register_table(struct net *net, - const struct xt_table *table, - const struct ipt_replace *repl); -extern void ipt_unregister_table(struct net *net, struct xt_table *table); +int ipt_register_table(struct net *net, const struct xt_table *table, + const struct ipt_replace *repl, + const struct nf_hook_ops *ops, struct xt_table **res); +void ipt_unregister_table(struct net *net, struct xt_table *table, + const struct nf_hook_ops *ops); /* Standard entry. */ struct ipt_standard { diff --git a/include/linux/netfilter_ipv6/ip6_tables.h b/include/linux/netfilter_ipv6/ip6_tables.h index 0f76e5c674f9..b21c392d6012 100644 --- a/include/linux/netfilter_ipv6/ip6_tables.h +++ b/include/linux/netfilter_ipv6/ip6_tables.h @@ -25,10 +25,11 @@ extern void ip6t_init(void) __init; extern void *ip6t_alloc_initial_table(const struct xt_table *); -extern struct xt_table *ip6t_register_table(struct net *net, - const struct xt_table *table, - const struct ip6t_replace *repl); -extern void ip6t_unregister_table(struct net *net, struct xt_table *table); +int ip6t_register_table(struct net *net, const struct xt_table *table, + const struct ip6t_replace *repl, + const struct nf_hook_ops *ops, struct xt_table **res); +void ip6t_unregister_table(struct net *net, struct xt_table *table, + const struct nf_hook_ops *ops); extern unsigned int ip6t_do_table(struct sk_buff *skb, const struct nf_hook_state *state, struct xt_table *table); diff --git a/include/linux/netlink.h b/include/linux/netlink.h index 0b41959aab9f..da14ab61f363 100644 --- a/include/linux/netlink.h +++ b/include/linux/netlink.h @@ -69,16 +69,6 @@ extern void __netlink_clear_multicast_users(struct sock *sk, unsigned int group) extern void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err); extern int netlink_has_listeners(struct sock *sk, unsigned int group); -extern struct sk_buff *__netlink_alloc_skb(struct sock *ssk, unsigned int size, - unsigned int ldiff, u32 dst_portid, - gfp_t gfp_mask); -static inline struct sk_buff * -netlink_alloc_skb(struct sock *ssk, unsigned int size, u32 dst_portid, - gfp_t gfp_mask) -{ - return __netlink_alloc_skb(ssk, size, 0, dst_portid, gfp_mask); -} - extern int netlink_unicast(struct sock *ssk, struct sk_buff *skb, __u32 portid, int nonblock); extern int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, __u32 portid, __u32 group, gfp_t allocation); diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 48e0320cd643..67300f8e5f2f 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -550,9 +550,7 @@ extern int nfs_readpage_async(struct nfs_open_context *, struct inode *, static inline loff_t nfs_size_to_loff_t(__u64 size) { - if (size > (__u64) OFFSET_MAX - 1) - return OFFSET_MAX - 1; - return (loff_t) size; + return min_t(u64, size, OFFSET_MAX); } static inline ino_t diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 791098a08a87..d320906cf13e 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -275,6 +275,7 @@ struct nfs4_layoutcommit_args { size_t layoutupdate_len; struct page *layoutupdate_page; struct page **layoutupdate_pages; + __be32 *start_p; }; struct nfs4_layoutcommit_res { diff --git a/include/linux/notifier.h b/include/linux/notifier.h index d14a4c362465..4149868de4e6 100644 --- a/include/linux/notifier.h +++ b/include/linux/notifier.h @@ -47,6 +47,8 @@ * runtime initialization. */ +struct notifier_block; + typedef int (*notifier_fn_t)(struct notifier_block *nb, unsigned long action, void *data); diff --git a/include/linux/nvmem-provider.h b/include/linux/nvmem-provider.h index 0b68caff1b3c..a4fcc90b0f20 100644 --- a/include/linux/nvmem-provider.h +++ b/include/linux/nvmem-provider.h @@ -23,6 +23,10 @@ struct nvmem_config { const struct nvmem_cell_info *cells; int ncells; bool read_only; + bool root_only; + /* To be only used by old driver/misc/eeprom drivers */ + bool compat; + struct device *base_dev; }; #if IS_ENABLED(CONFIG_NVMEM) @@ -43,5 +47,4 @@ static inline int nvmem_unregister(struct nvmem_device *nvmem) } #endif /* CONFIG_NVMEM */ - #endif /* ifndef _LINUX_NVMEM_PROVIDER_H */ diff --git a/include/linux/of.h b/include/linux/of.h index dc6e39696b64..7fcb681baadf 100644 --- a/include/linux/of.h +++ b/include/linux/of.h @@ -296,13 +296,13 @@ extern int of_property_read_u64_array(const struct device_node *np, u64 *out_values, size_t sz); -extern int of_property_read_string(struct device_node *np, +extern int of_property_read_string(const struct device_node *np, const char *propname, const char **out_string); -extern int of_property_match_string(struct device_node *np, +extern int of_property_match_string(const struct device_node *np, const char *propname, const char *string); -extern int of_property_read_string_helper(struct device_node *np, +extern int of_property_read_string_helper(const struct device_node *np, const char *propname, const char **out_strs, size_t sz, int index); extern int of_device_is_compatible(const struct device_node *device, @@ -538,14 +538,14 @@ static inline int of_property_read_u64_array(const struct device_node *np, return -ENOSYS; } -static inline int of_property_read_string(struct device_node *np, +static inline int of_property_read_string(const struct device_node *np, const char *propname, const char **out_string) { return -ENOSYS; } -static inline int of_property_read_string_helper(struct device_node *np, +static inline int of_property_read_string_helper(const struct device_node *np, const char *propname, const char **out_strs, size_t sz, int index) { @@ -571,7 +571,7 @@ static inline int of_property_read_u64(const struct device_node *np, return -ENOSYS; } -static inline int of_property_match_string(struct device_node *np, +static inline int of_property_match_string(const struct device_node *np, const char *propname, const char *string) { @@ -773,7 +773,7 @@ static inline int of_property_count_u64_elems(const struct device_node *np, * * If @out_strs is NULL, the number of strings in the property is returned. */ -static inline int of_property_read_string_array(struct device_node *np, +static inline int of_property_read_string_array(const struct device_node *np, const char *propname, const char **out_strs, size_t sz) { @@ -792,7 +792,7 @@ static inline int of_property_read_string_array(struct device_node *np, * does not have a value, and -EILSEQ if the string is not null-terminated * within the length of the property data. */ -static inline int of_property_count_strings(struct device_node *np, +static inline int of_property_count_strings(const struct device_node *np, const char *propname) { return of_property_read_string_helper(np, propname, NULL, 0, 0); @@ -816,7 +816,7 @@ static inline int of_property_count_strings(struct device_node *np, * * The out_string pointer is modified only if a valid string can be decoded. */ -static inline int of_property_read_string_index(struct device_node *np, +static inline int of_property_read_string_index(const struct device_node *np, const char *propname, int index, const char **output) { diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h index df9ef3801812..2fbe8682a66f 100644 --- a/include/linux/of_fdt.h +++ b/include/linux/of_fdt.h @@ -88,7 +88,7 @@ extern void unflatten_device_tree(void); extern void unflatten_and_copy_device_tree(void); extern void early_init_devtree(void *); extern void early_get_first_memblock_info(void *, phys_addr_t *); -extern u64 fdt_translate_address(const void *blob, int node_offset); +extern u64 of_flat_dt_translate_address(unsigned long node); extern void of_fdt_limit_memory(int limit); #else /* CONFIG_OF_FLATTREE */ static inline void early_init_fdt_scan_reserved_mem(void) {} diff --git a/include/linux/page-flags-layout.h b/include/linux/page-flags-layout.h index da523661500a..77b078c103b2 100644 --- a/include/linux/page-flags-layout.h +++ b/include/linux/page-flags-layout.h @@ -17,6 +17,8 @@ #define ZONES_SHIFT 1 #elif MAX_NR_ZONES <= 4 #define ZONES_SHIFT 2 +#elif MAX_NR_ZONES <= 8 +#define ZONES_SHIFT 3 #else #error ZONES_SHIFT -- too many zones configured adjust calculation #endif diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 19724e6ebd26..f4ed4f1b0c77 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -144,12 +144,12 @@ static inline struct page *compound_head(struct page *page) return page; } -static inline int PageTail(struct page *page) +static __always_inline int PageTail(struct page *page) { return READ_ONCE(page->compound_head) & 1; } -static inline int PageCompound(struct page *page) +static __always_inline int PageCompound(struct page *page) { return test_bit(PG_head, &page->flags) || PageTail(page); } @@ -184,31 +184,31 @@ static inline int PageCompound(struct page *page) * Macros to create function definitions for page flags */ #define TESTPAGEFLAG(uname, lname, policy) \ -static inline int Page##uname(struct page *page) \ +static __always_inline int Page##uname(struct page *page) \ { return test_bit(PG_##lname, &policy(page, 0)->flags); } #define SETPAGEFLAG(uname, lname, policy) \ -static inline void SetPage##uname(struct page *page) \ +static __always_inline void SetPage##uname(struct page *page) \ { set_bit(PG_##lname, &policy(page, 1)->flags); } #define CLEARPAGEFLAG(uname, lname, policy) \ -static inline void ClearPage##uname(struct page *page) \ +static __always_inline void ClearPage##uname(struct page *page) \ { clear_bit(PG_##lname, &policy(page, 1)->flags); } #define __SETPAGEFLAG(uname, lname, policy) \ -static inline void __SetPage##uname(struct page *page) \ +static __always_inline void __SetPage##uname(struct page *page) \ { __set_bit(PG_##lname, &policy(page, 1)->flags); } #define __CLEARPAGEFLAG(uname, lname, policy) \ -static inline void __ClearPage##uname(struct page *page) \ +static __always_inline void __ClearPage##uname(struct page *page) \ { __clear_bit(PG_##lname, &policy(page, 1)->flags); } #define TESTSETFLAG(uname, lname, policy) \ -static inline int TestSetPage##uname(struct page *page) \ +static __always_inline int TestSetPage##uname(struct page *page) \ { return test_and_set_bit(PG_##lname, &policy(page, 1)->flags); } #define TESTCLEARFLAG(uname, lname, policy) \ -static inline int TestClearPage##uname(struct page *page) \ +static __always_inline int TestClearPage##uname(struct page *page) \ { return test_and_clear_bit(PG_##lname, &policy(page, 1)->flags); } #define PAGEFLAG(uname, lname, policy) \ @@ -371,7 +371,7 @@ PAGEFLAG(Idle, idle, PF_ANY) #define PAGE_MAPPING_KSM 2 #define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM) -static inline int PageAnon(struct page *page) +static __always_inline int PageAnon(struct page *page) { page = compound_head(page); return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0; @@ -384,7 +384,7 @@ static inline int PageAnon(struct page *page) * is found in VM_MERGEABLE vmas. It's a PageAnon page, pointing not to any * anon_vma, but to that page's node of the stable tree. */ -static inline int PageKsm(struct page *page) +static __always_inline int PageKsm(struct page *page) { page = compound_head(page); return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) == @@ -415,14 +415,14 @@ static inline int PageUptodate(struct page *page) return ret; } -static inline void __SetPageUptodate(struct page *page) +static __always_inline void __SetPageUptodate(struct page *page) { VM_BUG_ON_PAGE(PageTail(page), page); smp_wmb(); __set_bit(PG_uptodate, &page->flags); } -static inline void SetPageUptodate(struct page *page) +static __always_inline void SetPageUptodate(struct page *page) { VM_BUG_ON_PAGE(PageTail(page), page); /* @@ -456,12 +456,12 @@ static inline void set_page_writeback_keepwrite(struct page *page) __PAGEFLAG(Head, head, PF_ANY) CLEARPAGEFLAG(Head, head, PF_ANY) -static inline void set_compound_head(struct page *page, struct page *head) +static __always_inline void set_compound_head(struct page *page, struct page *head) { WRITE_ONCE(page->compound_head, (unsigned long)head + 1); } -static inline void clear_compound_head(struct page *page) +static __always_inline void clear_compound_head(struct page *page) { WRITE_ONCE(page->compound_head, 0); } @@ -593,6 +593,8 @@ static inline void __ClearPageBuddy(struct page *page) atomic_set(&page->_mapcount, -1); } +extern bool is_free_buddy_page(struct page *page); + #define PAGE_BALLOON_MAPCOUNT_VALUE (-256) static inline int PageBalloon(struct page *page) diff --git a/include/linux/page_ext.h b/include/linux/page_ext.h index 17f118a82854..e1fe7cf5bddf 100644 --- a/include/linux/page_ext.h +++ b/include/linux/page_ext.h @@ -45,6 +45,7 @@ struct page_ext { unsigned int order; gfp_t gfp_mask; unsigned int nr_entries; + int last_migrate_reason; unsigned long trace_entries[8]; #endif }; diff --git a/include/linux/page_owner.h b/include/linux/page_owner.h index cacaabea8a09..46f1b939948c 100644 --- a/include/linux/page_owner.h +++ b/include/linux/page_owner.h @@ -1,38 +1,54 @@ #ifndef __LINUX_PAGE_OWNER_H #define __LINUX_PAGE_OWNER_H +#include <linux/jump_label.h> + #ifdef CONFIG_PAGE_OWNER -extern bool page_owner_inited; +extern struct static_key_false page_owner_inited; extern struct page_ext_operations page_owner_ops; extern void __reset_page_owner(struct page *page, unsigned int order); extern void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask); extern gfp_t __get_page_owner_gfp(struct page *page); +extern void __copy_page_owner(struct page *oldpage, struct page *newpage); +extern void __set_page_owner_migrate_reason(struct page *page, int reason); +extern void __dump_page_owner(struct page *page); static inline void reset_page_owner(struct page *page, unsigned int order) { - if (likely(!page_owner_inited)) - return; - - __reset_page_owner(page, order); + if (static_branch_unlikely(&page_owner_inited)) + __reset_page_owner(page, order); } static inline void set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask) { - if (likely(!page_owner_inited)) - return; - - __set_page_owner(page, order, gfp_mask); + if (static_branch_unlikely(&page_owner_inited)) + __set_page_owner(page, order, gfp_mask); } static inline gfp_t get_page_owner_gfp(struct page *page) { - if (likely(!page_owner_inited)) + if (static_branch_unlikely(&page_owner_inited)) + return __get_page_owner_gfp(page); + else return 0; - - return __get_page_owner_gfp(page); +} +static inline void copy_page_owner(struct page *oldpage, struct page *newpage) +{ + if (static_branch_unlikely(&page_owner_inited)) + __copy_page_owner(oldpage, newpage); +} +static inline void set_page_owner_migrate_reason(struct page *page, int reason) +{ + if (static_branch_unlikely(&page_owner_inited)) + __set_page_owner_migrate_reason(page, reason); +} +static inline void dump_page_owner(struct page *page) +{ + if (static_branch_unlikely(&page_owner_inited)) + __dump_page_owner(page); } #else static inline void reset_page_owner(struct page *page, unsigned int order) @@ -46,6 +62,14 @@ static inline gfp_t get_page_owner_gfp(struct page *page) { return 0; } - +static inline void copy_page_owner(struct page *oldpage, struct page *newpage) +{ +} +static inline void set_page_owner_migrate_reason(struct page *page, int reason) +{ +} +static inline void dump_page_owner(struct page *page) +{ +} #endif /* CONFIG_PAGE_OWNER */ #endif /* __LINUX_PAGE_OWNER_H */ diff --git a/include/linux/page_ref.h b/include/linux/page_ref.h new file mode 100644 index 000000000000..e596d5d9540e --- /dev/null +++ b/include/linux/page_ref.h @@ -0,0 +1,173 @@ +#ifndef _LINUX_PAGE_REF_H +#define _LINUX_PAGE_REF_H + +#include <linux/atomic.h> +#include <linux/mm_types.h> +#include <linux/page-flags.h> +#include <linux/tracepoint-defs.h> + +extern struct tracepoint __tracepoint_page_ref_set; +extern struct tracepoint __tracepoint_page_ref_mod; +extern struct tracepoint __tracepoint_page_ref_mod_and_test; +extern struct tracepoint __tracepoint_page_ref_mod_and_return; +extern struct tracepoint __tracepoint_page_ref_mod_unless; +extern struct tracepoint __tracepoint_page_ref_freeze; +extern struct tracepoint __tracepoint_page_ref_unfreeze; + +#ifdef CONFIG_DEBUG_PAGE_REF + +/* + * Ideally we would want to use the trace_<tracepoint>_enabled() helper + * functions. But due to include header file issues, that is not + * feasible. Instead we have to open code the static key functions. + * + * See trace_##name##_enabled(void) in include/linux/tracepoint.h + */ +#define page_ref_tracepoint_active(t) static_key_false(&(t).key) + +extern void __page_ref_set(struct page *page, int v); +extern void __page_ref_mod(struct page *page, int v); +extern void __page_ref_mod_and_test(struct page *page, int v, int ret); +extern void __page_ref_mod_and_return(struct page *page, int v, int ret); +extern void __page_ref_mod_unless(struct page *page, int v, int u); +extern void __page_ref_freeze(struct page *page, int v, int ret); +extern void __page_ref_unfreeze(struct page *page, int v); + +#else + +#define page_ref_tracepoint_active(t) false + +static inline void __page_ref_set(struct page *page, int v) +{ +} +static inline void __page_ref_mod(struct page *page, int v) +{ +} +static inline void __page_ref_mod_and_test(struct page *page, int v, int ret) +{ +} +static inline void __page_ref_mod_and_return(struct page *page, int v, int ret) +{ +} +static inline void __page_ref_mod_unless(struct page *page, int v, int u) +{ +} +static inline void __page_ref_freeze(struct page *page, int v, int ret) +{ +} +static inline void __page_ref_unfreeze(struct page *page, int v) +{ +} + +#endif + +static inline int page_ref_count(struct page *page) +{ + return atomic_read(&page->_count); +} + +static inline int page_count(struct page *page) +{ + return atomic_read(&compound_head(page)->_count); +} + +static inline void set_page_count(struct page *page, int v) +{ + atomic_set(&page->_count, v); + if (page_ref_tracepoint_active(__tracepoint_page_ref_set)) + __page_ref_set(page, v); +} + +/* + * Setup the page count before being freed into the page allocator for + * the first time (boot or memory hotplug) + */ +static inline void init_page_count(struct page *page) +{ + set_page_count(page, 1); +} + +static inline void page_ref_add(struct page *page, int nr) +{ + atomic_add(nr, &page->_count); + if (page_ref_tracepoint_active(__tracepoint_page_ref_mod)) + __page_ref_mod(page, nr); +} + +static inline void page_ref_sub(struct page *page, int nr) +{ + atomic_sub(nr, &page->_count); + if (page_ref_tracepoint_active(__tracepoint_page_ref_mod)) + __page_ref_mod(page, -nr); +} + +static inline void page_ref_inc(struct page *page) +{ + atomic_inc(&page->_count); + if (page_ref_tracepoint_active(__tracepoint_page_ref_mod)) + __page_ref_mod(page, 1); +} + +static inline void page_ref_dec(struct page *page) +{ + atomic_dec(&page->_count); + if (page_ref_tracepoint_active(__tracepoint_page_ref_mod)) + __page_ref_mod(page, -1); +} + +static inline int page_ref_sub_and_test(struct page *page, int nr) +{ + int ret = atomic_sub_and_test(nr, &page->_count); + + if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_test)) + __page_ref_mod_and_test(page, -nr, ret); + return ret; +} + +static inline int page_ref_dec_and_test(struct page *page) +{ + int ret = atomic_dec_and_test(&page->_count); + + if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_test)) + __page_ref_mod_and_test(page, -1, ret); + return ret; +} + +static inline int page_ref_dec_return(struct page *page) +{ + int ret = atomic_dec_return(&page->_count); + + if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_return)) + __page_ref_mod_and_return(page, -1, ret); + return ret; +} + +static inline int page_ref_add_unless(struct page *page, int nr, int u) +{ + int ret = atomic_add_unless(&page->_count, nr, u); + + if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_unless)) + __page_ref_mod_unless(page, nr, ret); + return ret; +} + +static inline int page_ref_freeze(struct page *page, int count) +{ + int ret = likely(atomic_cmpxchg(&page->_count, count, 0) == count); + + if (page_ref_tracepoint_active(__tracepoint_page_ref_freeze)) + __page_ref_freeze(page, count, ret); + return ret; +} + +static inline void page_ref_unfreeze(struct page *page, int count) +{ + VM_BUG_ON_PAGE(page_count(page) != 0, page); + VM_BUG_ON(count == 0); + + atomic_set(&page->_count, count); + if (page_ref_tracepoint_active(__tracepoint_page_ref_unfreeze)) + __page_ref_unfreeze(page, count); +} + +#endif diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 92395a0a7dc5..1ebd65c91422 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -165,7 +165,7 @@ static inline int page_cache_get_speculative(struct page *page) * SMP requires. */ VM_BUG_ON_PAGE(page_count(page) == 0, page); - atomic_inc(&page->_count); + page_ref_inc(page); #else if (unlikely(!get_page_unless_zero(page))) { @@ -194,10 +194,10 @@ static inline int page_cache_add_speculative(struct page *page, int count) VM_BUG_ON(!in_atomic()); # endif VM_BUG_ON_PAGE(page_count(page) == 0, page); - atomic_add(count, &page->_count); + page_ref_add(page, count); #else - if (unlikely(!atomic_add_unless(&page->_count, count, 0))) + if (unlikely(!page_ref_add_unless(page, count, 0))) return 0; #endif VM_BUG_ON_PAGE(PageCompound(page) && page != compound_head(page), page); @@ -205,19 +205,6 @@ static inline int page_cache_add_speculative(struct page *page, int count) return 1; } -static inline int page_freeze_refs(struct page *page, int count) -{ - return likely(atomic_cmpxchg(&page->_count, count, 0) == count); -} - -static inline void page_unfreeze_refs(struct page *page, int count) -{ - VM_BUG_ON_PAGE(page_count(page) != 0, page); - VM_BUG_ON(count == 0); - - atomic_set(&page->_count, count); -} - #ifdef CONFIG_NUMA extern struct page *__page_cache_alloc(gfp_t gfp); #else @@ -663,8 +650,7 @@ int add_to_page_cache_locked(struct page *page, struct address_space *mapping, int add_to_page_cache_lru(struct page *page, struct address_space *mapping, pgoff_t index, gfp_t gfp_mask); extern void delete_from_page_cache(struct page *page); -extern void __delete_from_page_cache(struct page *page, void *shadow, - struct mem_cgroup *memcg); +extern void __delete_from_page_cache(struct page *page, void *shadow); int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask); /* diff --git a/include/asm-generic/pci-dma-compat.h b/include/linux/pci-dma-compat.h index eafce7b6f052..39726caef5b1 100644 --- a/include/asm-generic/pci-dma-compat.h +++ b/include/linux/pci-dma-compat.h @@ -6,6 +6,12 @@ #include <linux/dma-mapping.h> +/* This defines the direction arg to the DMA mapping routines. */ +#define PCI_DMA_BIDIRECTIONAL 0 +#define PCI_DMA_TODEVICE 1 +#define PCI_DMA_FROMDEVICE 2 +#define PCI_DMA_NONE 3 + static inline void * pci_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle) @@ -113,6 +119,29 @@ static inline int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask) { return dma_set_coherent_mask(&dev->dev, mask); } + +static inline int pci_set_dma_max_seg_size(struct pci_dev *dev, + unsigned int size) +{ + return dma_set_max_seg_size(&dev->dev, size); +} + +static inline int pci_set_dma_seg_boundary(struct pci_dev *dev, + unsigned long mask) +{ + return dma_set_seg_boundary(&dev->dev, mask); +} +#else +static inline int pci_set_dma_mask(struct pci_dev *dev, u64 mask) +{ return -EIO; } +static inline int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask) +{ return -EIO; } +static inline int pci_set_dma_max_seg_size(struct pci_dev *dev, + unsigned int size) +{ return -EIO; } +static inline int pci_set_dma_seg_boundary(struct pci_dev *dev, + unsigned long mask) +{ return -EIO; } #endif #endif diff --git a/include/linux/pci.h b/include/linux/pci.h index bc435d6293d2..004b8133417d 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -70,12 +70,6 @@ enum pci_mmap_state { pci_mmap_mem }; -/* This defines the direction arg to the DMA mapping routines. */ -#define PCI_DMA_BIDIRECTIONAL 0 -#define PCI_DMA_TODEVICE 1 -#define PCI_DMA_FROMDEVICE 2 -#define PCI_DMA_NONE 3 - /* * For PCI devices, the region numbers are assigned this way: */ @@ -359,6 +353,7 @@ struct pci_dev { unsigned int io_window_1k:1; /* Intel P2P bridge 1K I/O windows */ unsigned int irq_managed:1; unsigned int has_secondary_link:1; + unsigned int non_compliant_bars:1; /* broken BARs; ignore them */ pci_dev_flags_t dev_flags; atomic_t enable_cnt; /* pci_enable_device has been called */ @@ -578,6 +573,8 @@ static inline int pcibios_err_to_errno(int err) /* Low-level architecture-dependent routines */ struct pci_ops { + int (*add_bus)(struct pci_bus *bus); + void (*remove_bus)(struct pci_bus *bus); void __iomem *(*map_bus)(struct pci_bus *bus, unsigned int devfn, int where); int (*read)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val); int (*write)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val); @@ -746,9 +743,26 @@ struct pci_driver { .vendor = PCI_VENDOR_ID_##vend, .device = (dev), \ .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, 0, 0 +enum { + PCI_REASSIGN_ALL_RSRC = 0x00000001, /* ignore firmware setup */ + PCI_REASSIGN_ALL_BUS = 0x00000002, /* reassign all bus numbers */ + PCI_PROBE_ONLY = 0x00000004, /* use existing setup */ + PCI_CAN_SKIP_ISA_ALIGN = 0x00000008, /* don't do ISA alignment */ + PCI_ENABLE_PROC_DOMAINS = 0x00000010, /* enable domains in /proc */ + PCI_COMPAT_DOMAIN_0 = 0x00000020, /* ... except domain 0 */ + PCI_SCAN_ALL_PCIE_DEVS = 0x00000040, /* scan all, not just dev 0 */ +}; + /* these external functions are only available when PCI support is enabled */ #ifdef CONFIG_PCI +extern unsigned int pci_flags; + +static inline void pci_set_flags(int flags) { pci_flags = flags; } +static inline void pci_add_flags(int flags) { pci_flags |= flags; } +static inline void pci_clear_flags(int flags) { pci_flags &= ~flags; } +static inline int pci_has_flag(int flag) { return pci_flags & flag; } + void pcie_bus_configure_settings(struct pci_bus *bus); enum pcie_bus_config_types { @@ -989,23 +1003,6 @@ static inline int pci_is_managed(struct pci_dev *pdev) return pdev->is_managed; } -static inline void pci_set_managed_irq(struct pci_dev *pdev, unsigned int irq) -{ - pdev->irq = irq; - pdev->irq_managed = 1; -} - -static inline void pci_reset_managed_irq(struct pci_dev *pdev) -{ - pdev->irq = 0; - pdev->irq_managed = 0; -} - -static inline bool pci_has_managed_irq(struct pci_dev *pdev) -{ - return pdev->irq_managed && pdev->irq > 0; -} - void pci_disable_device(struct pci_dev *dev); extern unsigned int pcibios_max_latency; @@ -1022,8 +1019,6 @@ void pci_intx(struct pci_dev *dev, int enable); bool pci_intx_mask_supported(struct pci_dev *dev); bool pci_check_and_mask_intx(struct pci_dev *dev); bool pci_check_and_unmask_intx(struct pci_dev *dev); -int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size); -int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask); int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask); int pci_wait_for_pending_transaction(struct pci_dev *dev); int pcix_get_max_mmrbc(struct pci_dev *dev); @@ -1239,6 +1234,7 @@ resource_size_t pcibios_iov_resource_alignment(struct pci_dev *dev, int resno); int pci_set_vga_state(struct pci_dev *pdev, bool decode, unsigned int command_bits, u32 flags); + /* kmem_cache style wrapper around pci_alloc_consistent() */ #include <linux/pci-dma.h> @@ -1406,6 +1402,11 @@ void pci_register_set_vga_state(arch_set_vga_state_t func); #else /* CONFIG_PCI is not enabled */ +static inline void pci_set_flags(int flags) { } +static inline void pci_add_flags(int flags) { } +static inline void pci_clear_flags(int flags) { } +static inline int pci_has_flag(int flag) { return 0; } + /* * If the system does not have PCI, clearly these return errors. Define * these as simple inline functions to avoid hair in drivers. @@ -1445,16 +1446,6 @@ static inline struct pci_dev *pci_get_class(unsigned int class, static inline void pci_set_master(struct pci_dev *dev) { } static inline int pci_enable_device(struct pci_dev *dev) { return -EIO; } static inline void pci_disable_device(struct pci_dev *dev) { } -static inline int pci_set_dma_mask(struct pci_dev *dev, u64 mask) -{ return -EIO; } -static inline int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask) -{ return -EIO; } -static inline int pci_set_dma_max_seg_size(struct pci_dev *dev, - unsigned int size) -{ return -EIO; } -static inline int pci_set_dma_seg_boundary(struct pci_dev *dev, - unsigned long mask) -{ return -EIO; } static inline int pci_assign_resource(struct pci_dev *dev, int i) { return -EBUSY; } static inline int __pci_register_driver(struct pci_driver *drv, @@ -1516,6 +1507,10 @@ static inline int pci_get_new_domain_nr(void) { return -ENOSYS; } #include <asm/pci.h> +#ifndef pci_root_bus_fwnode +#define pci_root_bus_fwnode(bus) NULL +#endif + /* these helpers provide future and backwards compatibility * for accessing popular PCI BAR info */ #define pci_resource_start(dev, bar) ((dev)->resource[(bar)].start) @@ -1843,12 +1838,13 @@ bool pci_acs_path_enabled(struct pci_dev *start, #define PCI_VPD_LRDT_RW_DATA PCI_VPD_LRDT_ID(PCI_VPD_LTIN_RW_DATA) /* Small Resource Data Type Tag Item Names */ -#define PCI_VPD_STIN_END 0x78 /* End */ +#define PCI_VPD_STIN_END 0x0f /* End */ -#define PCI_VPD_SRDT_END PCI_VPD_STIN_END +#define PCI_VPD_SRDT_END (PCI_VPD_STIN_END << 3) #define PCI_VPD_SRDT_TIN_MASK 0x78 #define PCI_VPD_SRDT_LEN_MASK 0x07 +#define PCI_VPD_LRDT_TIN_MASK 0x7f #define PCI_VPD_LRDT_TAG_SIZE 3 #define PCI_VPD_SRDT_TAG_SIZE 1 @@ -1872,6 +1868,17 @@ static inline u16 pci_vpd_lrdt_size(const u8 *lrdt) } /** + * pci_vpd_lrdt_tag - Extracts the Large Resource Data Type Tag Item + * @lrdt: Pointer to the beginning of the Large Resource Data Type tag + * + * Returns the extracted Large Resource Data Type Tag item. + */ +static inline u16 pci_vpd_lrdt_tag(const u8 *lrdt) +{ + return (u16)(lrdt[0] & PCI_VPD_LRDT_TIN_MASK); +} + +/** * pci_vpd_srdt_size - Extracts the Small Resource Data Type length * @lrdt: Pointer to the beginning of the Small Resource Data Type tag * @@ -1883,6 +1890,17 @@ static inline u8 pci_vpd_srdt_size(const u8 *srdt) } /** + * pci_vpd_srdt_tag - Extracts the Small Resource Data Type Tag Item + * @lrdt: Pointer to the beginning of the Small Resource Data Type tag + * + * Returns the extracted Small Resource Data Type Tag Item. + */ +static inline u8 pci_vpd_srdt_tag(const u8 *srdt) +{ + return ((*srdt) & PCI_VPD_SRDT_TIN_MASK) >> 3; +} + +/** * pci_vpd_info_field_size - Extracts the information field length * @lrdt: Pointer to the beginning of an information field header * @@ -1998,4 +2016,8 @@ static inline bool pci_ari_enabled(struct pci_bus *bus) { return bus->self && bus->self->ari_enabled; } + +/* provide the legacy pci_dma_* API */ +#include <linux/pci-dma-compat.h> + #endif /* LINUX_PCI_H */ diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 37f05cb1dfd6..247da8c95860 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -110,6 +110,7 @@ #define PCI_CLASS_SERIAL_USB_OHCI 0x0c0310 #define PCI_CLASS_SERIAL_USB_EHCI 0x0c0320 #define PCI_CLASS_SERIAL_USB_XHCI 0x0c0330 +#define PCI_CLASS_SERIAL_USB_DEVICE 0x0c03fe #define PCI_CLASS_SERIAL_FIBER 0x0c04 #define PCI_CLASS_SERIAL_SMBUS 0x0c05 @@ -2506,6 +2507,10 @@ #define PCI_VENDOR_ID_AZWAVE 0x1a3b +#define PCI_VENDOR_ID_REDHAT_QUMRANET 0x1af4 +#define PCI_SUBVENDOR_ID_REDHAT_QUMRANET 0x1af4 +#define PCI_SUBDEVICE_ID_QEMU 0x1100 + #define PCI_VENDOR_ID_ASMEDIA 0x1b21 #define PCI_VENDOR_ID_CIRCUITCO 0x1cc8 diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index b35a61a481fa..78fda2a69ab8 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -397,6 +397,7 @@ struct pmu { * enum perf_event_active_state - the states of a event */ enum perf_event_active_state { + PERF_EVENT_STATE_DEAD = -4, PERF_EVENT_STATE_EXIT = -3, PERF_EVENT_STATE_ERROR = -2, PERF_EVENT_STATE_OFF = -1, @@ -467,6 +468,7 @@ struct perf_event { int group_flags; struct perf_event *group_leader; struct pmu *pmu; + void *pmu_private; enum perf_event_active_state state; unsigned int attach_state; @@ -905,7 +907,7 @@ perf_sw_event_sched(u32 event_id, u64 nr, u64 addr) } } -extern struct static_key_deferred perf_sched_events; +extern struct static_key_false perf_sched_events; static __always_inline bool perf_sw_migrate_enabled(void) @@ -924,7 +926,7 @@ static inline void perf_event_task_migrate(struct task_struct *task) static inline void perf_event_task_sched_in(struct task_struct *prev, struct task_struct *task) { - if (static_key_false(&perf_sched_events.key)) + if (static_branch_unlikely(&perf_sched_events)) __perf_event_task_sched_in(prev, task); if (perf_sw_migrate_enabled() && task->sched_migrated) { @@ -941,7 +943,7 @@ static inline void perf_event_task_sched_out(struct task_struct *prev, { perf_sw_event_sched(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 0); - if (static_key_false(&perf_sched_events.key)) + if (static_branch_unlikely(&perf_sched_events)) __perf_event_task_sched_out(prev, next); } @@ -964,11 +966,20 @@ DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry); extern void perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs); extern void perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs); +extern struct perf_callchain_entry * +get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user, + bool crosstask, bool add_mark); +extern int get_callchain_buffers(void); +extern void put_callchain_buffers(void); -static inline void perf_callchain_store(struct perf_callchain_entry *entry, u64 ip) +static inline int perf_callchain_store(struct perf_callchain_entry *entry, u64 ip) { - if (entry->nr < PERF_MAX_STACK_DEPTH) + if (entry->nr < PERF_MAX_STACK_DEPTH) { entry->ip[entry->nr++] = ip; + return 0; + } else { + return -1; /* no more room, stop walking the stack */ + } } extern int sysctl_perf_event_paranoid; @@ -1108,12 +1119,6 @@ static inline void perf_event_task_tick(void) { } static inline int perf_event_release_kernel(struct perf_event *event) { return 0; } #endif -#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_NO_HZ_FULL) -extern bool perf_event_can_stop_tick(void); -#else -static inline bool perf_event_can_stop_tick(void) { return true; } -#endif - #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL) extern void perf_restore_debug_store(void); #else diff --git a/include/linux/pfn.h b/include/linux/pfn.h index 2d8e49711b63..1132953235c0 100644 --- a/include/linux/pfn.h +++ b/include/linux/pfn.h @@ -10,7 +10,7 @@ * backing is indicated by flags in the high bits of the value. */ typedef struct { - unsigned long val; + u64 val; } pfn_t; #endif diff --git a/include/linux/pfn_t.h b/include/linux/pfn_t.h index 37448ab5fb5c..94994810c7c0 100644 --- a/include/linux/pfn_t.h +++ b/include/linux/pfn_t.h @@ -9,14 +9,13 @@ * PFN_DEV - pfn is not covered by system memmap by default * PFN_MAP - pfn has a dynamic page mapping established by a device driver */ -#define PFN_FLAGS_MASK (((unsigned long) ~PAGE_MASK) \ - << (BITS_PER_LONG - PAGE_SHIFT)) -#define PFN_SG_CHAIN (1UL << (BITS_PER_LONG - 1)) -#define PFN_SG_LAST (1UL << (BITS_PER_LONG - 2)) -#define PFN_DEV (1UL << (BITS_PER_LONG - 3)) -#define PFN_MAP (1UL << (BITS_PER_LONG - 4)) - -static inline pfn_t __pfn_to_pfn_t(unsigned long pfn, unsigned long flags) +#define PFN_FLAGS_MASK (((u64) ~PAGE_MASK) << (BITS_PER_LONG_LONG - PAGE_SHIFT)) +#define PFN_SG_CHAIN (1ULL << (BITS_PER_LONG_LONG - 1)) +#define PFN_SG_LAST (1ULL << (BITS_PER_LONG_LONG - 2)) +#define PFN_DEV (1ULL << (BITS_PER_LONG_LONG - 3)) +#define PFN_MAP (1ULL << (BITS_PER_LONG_LONG - 4)) + +static inline pfn_t __pfn_to_pfn_t(unsigned long pfn, u64 flags) { pfn_t pfn_t = { .val = pfn | (flags & PFN_FLAGS_MASK), }; @@ -29,7 +28,7 @@ static inline pfn_t pfn_to_pfn_t(unsigned long pfn) return __pfn_to_pfn_t(pfn, 0); } -extern pfn_t phys_to_pfn_t(phys_addr_t addr, unsigned long flags); +extern pfn_t phys_to_pfn_t(phys_addr_t addr, u64 flags); static inline bool pfn_t_has_page(pfn_t pfn) { @@ -87,7 +86,7 @@ static inline pmd_t pfn_t_pmd(pfn_t pfn, pgprot_t pgprot) #ifdef __HAVE_ARCH_PTE_DEVMAP static inline bool pfn_t_devmap(pfn_t pfn) { - const unsigned long flags = PFN_DEV|PFN_MAP; + const u64 flags = PFN_DEV|PFN_MAP; return (pfn.val & flags) == flags; } diff --git a/include/linux/phy.h b/include/linux/phy.h index d6f3641e7933..2abd7918f64f 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -327,8 +327,6 @@ struct phy_c45_device_ids { /* phy_device: An instance of a PHY * * drv: Pointer to the driver for this PHY instance - * bus: Pointer to the bus this PHY is on - * dev: driver model device structure for this PHY * phy_id: UID for this device found during discovery * c45_ids: 802.3-c45 Device Identifers if is_c45. * is_c45: Set to true if this phy uses clause 45 addressing. @@ -338,7 +336,6 @@ struct phy_c45_device_ids { * suspended: Set to true if this phy has been suspended successfully. * state: state of the PHY for management purposes * dev_flags: Device-specific flags used by the PHY driver. - * addr: Bus address of PHY * link_timeout: The number of timer firings to wait before the * giving up on the current attempt at acquiring a link * irq: IRQ number of the PHY's interrupt (-1 if none) diff --git a/include/linux/phy_fixed.h b/include/linux/phy_fixed.h index 2400d2ea4f34..1d41ec44e39d 100644 --- a/include/linux/phy_fixed.h +++ b/include/linux/phy_fixed.h @@ -19,7 +19,7 @@ extern struct phy_device *fixed_phy_register(unsigned int irq, struct fixed_phy_status *status, int link_gpio, struct device_node *np); -extern void fixed_phy_del(int phy_addr); +extern void fixed_phy_unregister(struct phy_device *phydev); extern int fixed_phy_set_link_update(struct phy_device *phydev, int (*link_update)(struct net_device *, struct fixed_phy_status *)); @@ -40,9 +40,8 @@ static inline struct phy_device *fixed_phy_register(unsigned int irq, { return ERR_PTR(-ENODEV); } -static inline int fixed_phy_del(int phy_addr) +static inline void fixed_phy_unregister(struct phy_device *phydev) { - return -ENODEV; } static inline int fixed_phy_set_link_update(struct phy_device *phydev, int (*link_update)(struct net_device *, diff --git a/include/linux/platform_data/ad5761.h b/include/linux/platform_data/ad5761.h new file mode 100644 index 000000000000..7bd8ed7d978e --- /dev/null +++ b/include/linux/platform_data/ad5761.h @@ -0,0 +1,44 @@ +/* + * AD5721, AD5721R, AD5761, AD5761R, Voltage Output Digital to Analog Converter + * + * Copyright 2016 Qtechnology A/S + * 2016 Ricardo Ribalda <ricardo.ribalda@gmail.com> + * + * Licensed under the GPL-2. + */ +#ifndef __LINUX_PLATFORM_DATA_AD5761_H__ +#define __LINUX_PLATFORM_DATA_AD5761_H__ + +/** + * enum ad5761_voltage_range - Voltage range the AD5761 is configured for. + * @AD5761_VOLTAGE_RANGE_M10V_10V: -10V to 10V + * @AD5761_VOLTAGE_RANGE_0V_10V: 0V to 10V + * @AD5761_VOLTAGE_RANGE_M5V_5V: -5V to 5V + * @AD5761_VOLTAGE_RANGE_0V_5V: 0V to 5V + * @AD5761_VOLTAGE_RANGE_M2V5_7V5: -2.5V to 7.5V + * @AD5761_VOLTAGE_RANGE_M3V_3V: -3V to 3V + * @AD5761_VOLTAGE_RANGE_0V_16V: 0V to 16V + * @AD5761_VOLTAGE_RANGE_0V_20V: 0V to 20V + */ + +enum ad5761_voltage_range { + AD5761_VOLTAGE_RANGE_M10V_10V, + AD5761_VOLTAGE_RANGE_0V_10V, + AD5761_VOLTAGE_RANGE_M5V_5V, + AD5761_VOLTAGE_RANGE_0V_5V, + AD5761_VOLTAGE_RANGE_M2V5_7V5, + AD5761_VOLTAGE_RANGE_M3V_3V, + AD5761_VOLTAGE_RANGE_0V_16V, + AD5761_VOLTAGE_RANGE_0V_20V, +}; + +/** + * struct ad5761_platform_data - AD5761 DAC driver platform data + * @voltage_range: Voltage range the AD5761 is configured for + */ + +struct ad5761_platform_data { + enum ad5761_voltage_range voltage_range; +}; + +#endif diff --git a/include/linux/spi/ad7879.h b/include/linux/platform_data/ad7879.h index 58368be0b4c0..69e2e1fd2bc8 100644 --- a/include/linux/spi/ad7879.h +++ b/include/linux/platform_data/ad7879.h @@ -1,4 +1,4 @@ -/* linux/spi/ad7879.h */ +/* linux/platform_data/ad7879.h */ /* Touchscreen characteristics vary between boards and models. The * platform_data for the device's "struct device" holds this information. diff --git a/include/linux/platform_data/adau17x1.h b/include/linux/platform_data/adau17x1.h index a81766cae230..9db1b905df24 100644 --- a/include/linux/platform_data/adau17x1.h +++ b/include/linux/platform_data/adau17x1.h @@ -1,5 +1,5 @@ /* - * Driver for ADAU1761/ADAU1461/ADAU1761/ADAU1961/ADAU1781/ADAU1781 codecs + * Driver for ADAU1361/ADAU1461/ADAU1761/ADAU1961/ADAU1381/ADAU1781 codecs * * Copyright 2011-2014 Analog Devices Inc. * Author: Lars-Peter Clausen <lars@metafoo.de> diff --git a/include/linux/platform_data/at24.h b/include/linux/platform_data/at24.h index c42aa89d34ee..dc9a13e5acda 100644 --- a/include/linux/platform_data/at24.h +++ b/include/linux/platform_data/at24.h @@ -9,7 +9,7 @@ #define _LINUX_AT24_H #include <linux/types.h> -#include <linux/memory.h> +#include <linux/nvmem-consumer.h> /** * struct at24_platform_data - data to set up at24 (generic eeprom) driver @@ -17,7 +17,7 @@ * @page_size: number of byte which can be written in one go * @flags: tunable options, check AT24_FLAG_* defines * @setup: an optional callback invoked after eeprom is probed; enables kernel - code to access eeprom via memory_accessor, see example + code to access eeprom via nvmem, see example * @context: optional parameter passed to setup() * * If you set up a custom eeprom type, please double-check the parameters. @@ -26,13 +26,13 @@ * * An example in pseudo code for a setup() callback: * - * void get_mac_addr(struct memory_accessor *mem_acc, void *context) + * void get_mac_addr(struct mvmem_device *nvmem, void *context) * { * u8 *mac_addr = ethernet_pdata->mac_addr; * off_t offset = context; * * // Read MAC addr from EEPROM - * if (mem_acc->read(mem_acc, mac_addr, offset, ETH_ALEN) == ETH_ALEN) + * if (nvmem_device_read(nvmem, offset, ETH_ALEN, mac_addr) == ETH_ALEN) * pr_info("Read MAC addr from EEPROM: %pM\n", mac_addr); * } * @@ -48,7 +48,7 @@ struct at24_platform_data { #define AT24_FLAG_IRUGO 0x20 /* sysfs-entry will be world-readable */ #define AT24_FLAG_TAKE8ADDR 0x10 /* take always 8 addresses (24c00) */ - void (*setup)(struct memory_accessor *, void *context); + void (*setup)(struct nvmem_device *nvmem, void *context); void *context; }; diff --git a/include/linux/platform_data/brcmfmac-sdio.h b/include/linux/platform_data/brcmfmac-sdio.h deleted file mode 100644 index e75dcbf2b230..000000000000 --- a/include/linux/platform_data/brcmfmac-sdio.h +++ /dev/null @@ -1,135 +0,0 @@ -/* - * Copyright (c) 2013 Broadcom Corporation - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY - * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION - * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN - * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -#ifndef _LINUX_BRCMFMAC_PLATFORM_H -#define _LINUX_BRCMFMAC_PLATFORM_H - -/* - * Platform specific driver functions and data. Through the platform specific - * device data functions can be provided to help the brcmfmac driver to - * operate with the device in combination with the used platform. - * - * Use the platform data in the following (similar) way: - * - * -#include <brcmfmac_platform.h> - - -static void brcmfmac_power_on(void) -{ -} - -static void brcmfmac_power_off(void) -{ -} - -static void brcmfmac_reset(void) -{ -} - -static struct brcmfmac_sdio_platform_data brcmfmac_sdio_pdata = { - .power_on = brcmfmac_power_on, - .power_off = brcmfmac_power_off, - .reset = brcmfmac_reset -}; - -static struct platform_device brcmfmac_device = { - .name = BRCMFMAC_SDIO_PDATA_NAME, - .id = PLATFORM_DEVID_NONE, - .dev.platform_data = &brcmfmac_sdio_pdata -}; - -void __init brcmfmac_init_pdata(void) -{ - brcmfmac_sdio_pdata.oob_irq_supported = true; - brcmfmac_sdio_pdata.oob_irq_nr = gpio_to_irq(GPIO_BRCMF_SDIO_OOB); - brcmfmac_sdio_pdata.oob_irq_flags = IORESOURCE_IRQ | - IORESOURCE_IRQ_HIGHLEVEL; - platform_device_register(&brcmfmac_device); -} - * - * - * Note: the brcmfmac can be loaded as module or be statically built-in into - * the kernel. If built-in then do note that it uses module_init (and - * module_exit) routines which equal device_initcall. So if you intend to - * create a module with the platform specific data for the brcmfmac and have - * it built-in to the kernel then use a higher initcall then device_initcall - * (see init.h). If this is not done then brcmfmac will load without problems - * but will not pickup the platform data. - * - * When the driver does not "detect" platform driver data then it will continue - * without reporting anything and just assume there is no data needed. Which is - * probably true for most platforms. - * - * Explanation of the platform_data fields: - * - * drive_strength: is the preferred drive_strength to be used for the SDIO - * pins. If 0 then a default value will be used. This is the target drive - * strength, the exact drive strength which will be used depends on the - * capabilities of the device. - * - * oob_irq_supported: does the board have support for OOB interrupts. SDIO - * in-band interrupts are relatively slow and for having less overhead on - * interrupt processing an out of band interrupt can be used. If the HW - * supports this then enable this by setting this field to true and configure - * the oob related fields. - * - * oob_irq_nr, oob_irq_flags: the OOB interrupt information. The values are - * used for registering the irq using request_irq function. - * - * broken_sg_support: flag for broken sg list support of SDIO host controller. - * Set this to true if the SDIO host controller has higher align requirement - * than 32 bytes for each scatterlist item. - * - * sd_head_align: alignment requirement for start of data buffer - * - * sd_sgentry_align: length alignment requirement for each sg entry - * - * power_on: This function is called by the brcmfmac when the module gets - * loaded. This can be particularly useful for low power devices. The platform - * spcific routine may for example decide to power up the complete device. - * If there is no use-case for this function then provide NULL. - * - * power_off: This function is called by the brcmfmac when the module gets - * unloaded. At this point the device can be powered down or otherwise be reset. - * So if an actual power_off is not supported but reset is then reset the device - * when this function gets called. This can be particularly useful for low power - * devices. If there is no use-case for this function (either power-down or - * reset) then provide NULL. - * - * reset: This function can get called if the device communication broke down. - * This functionality is particularly useful in case of SDIO type devices. It is - * possible to reset a dongle via sdio data interface, but it requires that - * this is fully functional. This function is chip/module specific and this - * function should return only after the complete reset has completed. - */ - -#define BRCMFMAC_SDIO_PDATA_NAME "brcmfmac_sdio" - -struct brcmfmac_sdio_platform_data { - unsigned int drive_strength; - bool oob_irq_supported; - unsigned int oob_irq_nr; - unsigned long oob_irq_flags; - bool broken_sg_support; - unsigned short sd_head_align; - unsigned short sd_sgentry_align; - void (*power_on)(void); - void (*power_off)(void); - void (*reset)(void); -}; - -#endif /* _LINUX_BRCMFMAC_PLATFORM_H */ diff --git a/include/linux/platform_data/brcmfmac.h b/include/linux/platform_data/brcmfmac.h new file mode 100644 index 000000000000..1d30bf278231 --- /dev/null +++ b/include/linux/platform_data/brcmfmac.h @@ -0,0 +1,185 @@ +/* + * Copyright (c) 201 Broadcom Corporation + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION + * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN + * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _LINUX_BRCMFMAC_PLATFORM_H +#define _LINUX_BRCMFMAC_PLATFORM_H + + +#define BRCMFMAC_PDATA_NAME "brcmfmac" + +#define BRCMFMAC_COUNTRY_BUF_SZ 4 + + +/* + * Platform specific driver functions and data. Through the platform specific + * device data functions and data can be provided to help the brcmfmac driver to + * operate with the device in combination with the used platform. + */ + + +/** + * Note: the brcmfmac can be loaded as module or be statically built-in into + * the kernel. If built-in then do note that it uses module_init (and + * module_exit) routines which equal device_initcall. So if you intend to + * create a module with the platform specific data for the brcmfmac and have + * it built-in to the kernel then use a higher initcall then device_initcall + * (see init.h). If this is not done then brcmfmac will load without problems + * but will not pickup the platform data. + * + * When the driver does not "detect" platform driver data then it will continue + * without reporting anything and just assume there is no data needed. Which is + * probably true for most platforms. + */ + +/** + * enum brcmf_bus_type - Bus type identifier. Currently SDIO, USB and PCIE are + * supported. + */ +enum brcmf_bus_type { + BRCMF_BUSTYPE_SDIO, + BRCMF_BUSTYPE_USB, + BRCMF_BUSTYPE_PCIE +}; + + +/** + * struct brcmfmac_sdio_pd - SDIO Device specific platform data. + * + * @txglomsz: SDIO txglom size. Use 0 if default of driver is to be + * used. + * @drive_strength: is the preferred drive_strength to be used for the SDIO + * pins. If 0 then a default value will be used. This is + * the target drive strength, the exact drive strength + * which will be used depends on the capabilities of the + * device. + * @oob_irq_supported: does the board have support for OOB interrupts. SDIO + * in-band interrupts are relatively slow and for having + * less overhead on interrupt processing an out of band + * interrupt can be used. If the HW supports this then + * enable this by setting this field to true and configure + * the oob related fields. + * @oob_irq_nr, + * @oob_irq_flags: the OOB interrupt information. The values are used for + * registering the irq using request_irq function. + * @broken_sg_support: flag for broken sg list support of SDIO host controller. + * Set this to true if the SDIO host controller has higher + * align requirement than 32 bytes for each scatterlist + * item. + * @sd_head_align: alignment requirement for start of data buffer. + * @sd_sgentry_align: length alignment requirement for each sg entry. + * @reset: This function can get called if the device communication + * broke down. This functionality is particularly useful in + * case of SDIO type devices. It is possible to reset a + * dongle via sdio data interface, but it requires that + * this is fully functional. This function is chip/module + * specific and this function should return only after the + * complete reset has completed. + */ +struct brcmfmac_sdio_pd { + int txglomsz; + unsigned int drive_strength; + bool oob_irq_supported; + unsigned int oob_irq_nr; + unsigned long oob_irq_flags; + bool broken_sg_support; + unsigned short sd_head_align; + unsigned short sd_sgentry_align; + void (*reset)(void); +}; + +/** + * struct brcmfmac_pd_cc_entry - Struct for translating user space country code + * (iso3166) to firmware country code and + * revision. + * + * @iso3166: iso3166 alpha 2 country code string. + * @cc: firmware country code string. + * @rev: firmware country code revision. + */ +struct brcmfmac_pd_cc_entry { + char iso3166[BRCMFMAC_COUNTRY_BUF_SZ]; + char cc[BRCMFMAC_COUNTRY_BUF_SZ]; + s32 rev; +}; + +/** + * struct brcmfmac_pd_cc - Struct for translating country codes as set by user + * space to a country code and rev which can be used by + * firmware. + * + * @table_size: number of entries in table (> 0) + * @table: array of 1 or more elements with translation information. + */ +struct brcmfmac_pd_cc { + int table_size; + struct brcmfmac_pd_cc_entry table[0]; +}; + +/** + * struct brcmfmac_pd_device - Device specific platform data. (id/rev/bus_type) + * is the unique identifier of the device. + * + * @id: ID of the device for which this data is. In case of SDIO + * or PCIE this is the chipid as identified by chip.c In + * case of USB this is the chipid as identified by the + * device query. + * @rev: chip revision, see id. + * @bus_type: The type of bus. Some chipid/rev exist for different bus + * types. Each bus type has its own set of settings. + * @feature_disable: Bitmask of features to disable (override), See feature.c + * in brcmfmac for details. + * @country_codes: If available, pointer to struct for translating country + * codes. + * @bus: Bus specific (union) device settings. Currently only + * SDIO. + */ +struct brcmfmac_pd_device { + unsigned int id; + unsigned int rev; + enum brcmf_bus_type bus_type; + unsigned int feature_disable; + struct brcmfmac_pd_cc *country_codes; + union { + struct brcmfmac_sdio_pd sdio; + } bus; +}; + +/** + * struct brcmfmac_platform_data - BRCMFMAC specific platform data. + * + * @power_on: This function is called by the brcmfmac driver when the module + * gets loaded. This can be particularly useful for low power + * devices. The platform spcific routine may for example decide to + * power up the complete device. If there is no use-case for this + * function then provide NULL. + * @power_off: This function is called by the brcmfmac when the module gets + * unloaded. At this point the devices can be powered down or + * otherwise be reset. So if an actual power_off is not supported + * but reset is supported by the devices then reset the devices + * when this function gets called. This can be particularly useful + * for low power devices. If there is no use-case for this + * function then provide NULL. + */ +struct brcmfmac_platform_data { + void (*power_on)(void); + void (*power_off)(void); + char *fw_alternative_path; + int device_count; + struct brcmfmac_pd_device devices[0]; +}; + + +#endif /* _LINUX_BRCMFMAC_PLATFORM_H */ diff --git a/include/linux/platform_data/microread.h b/include/linux/platform_data/microread.h deleted file mode 100644 index ca13992089b8..000000000000 --- a/include/linux/platform_data/microread.h +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Driver include for the Inside Secure microread NFC Chip. - * - * Copyright (C) 2011 Tieto Poland - * Copyright (C) 2012 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -#ifndef _MICROREAD_H -#define _MICROREAD_H - -#include <linux/i2c.h> - -#define MICROREAD_DRIVER_NAME "microread" - -/* board config platform data for microread */ -struct microread_nfc_platform_data { - unsigned int rst_gpio; - unsigned int irq_gpio; - unsigned int ioh_gpio; -}; - -#endif /* _MICROREAD_H */ diff --git a/include/linux/platform_data/ntc_thermistor.h b/include/linux/platform_data/ntc_thermistor.h index aed170588b74..698d0d59db76 100644 --- a/include/linux/platform_data/ntc_thermistor.h +++ b/include/linux/platform_data/ntc_thermistor.h @@ -28,6 +28,7 @@ enum ntc_thermistor_type { TYPE_NCPXXWL333, TYPE_B57330V2103, TYPE_NCPXXWF104, + TYPE_NCPXXXH103, }; struct ntc_thermistor_platform_data { diff --git a/include/linux/platform_data/sa11x0-serial.h b/include/linux/platform_data/sa11x0-serial.h index 4504d5d592f0..009e1d83fe39 100644 --- a/include/linux/platform_data/sa11x0-serial.h +++ b/include/linux/platform_data/sa11x0-serial.h @@ -26,8 +26,12 @@ struct sa1100_port_fns { void sa1100_register_uart_fns(struct sa1100_port_fns *fns); void sa1100_register_uart(int idx, int port); #else -#define sa1100_register_uart_fns(fns) do { } while (0) -#define sa1100_register_uart(idx,port) do { } while (0) +static inline void sa1100_register_uart_fns(struct sa1100_port_fns *fns) +{ +} +static inline void sa1100_register_uart(int idx, int port) +{ +} #endif #endif diff --git a/include/linux/platform_data/serial-omap.h b/include/linux/platform_data/serial-omap.h index d09275f3cde3..2ba2c34ca3d3 100644 --- a/include/linux/platform_data/serial-omap.h +++ b/include/linux/platform_data/serial-omap.h @@ -21,7 +21,7 @@ #include <linux/device.h> #include <linux/pm_qos.h> -#define DRIVER_NAME "omap_uart" +#define OMAP_SERIAL_DRIVER_NAME "omap_uart" /* * Use tty device name as ttyO, [O -> OMAP] diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h index db21d3995f7e..49cd8890b873 100644 --- a/include/linux/pm_domain.h +++ b/include/linux/pm_domain.h @@ -19,6 +19,8 @@ /* Defines used for the flags field in the struct generic_pm_domain */ #define GENPD_FLAG_PM_CLK (1U << 0) /* PM domain uses PM clk */ +#define GENPD_MAX_NUM_STATES 8 /* Number of possible low power states */ + enum gpd_status { GPD_STATE_ACTIVE = 0, /* PM domain is active */ GPD_STATE_POWER_OFF, /* PM domain is off */ @@ -37,6 +39,11 @@ struct gpd_dev_ops { bool (*active_wakeup)(struct device *dev); }; +struct genpd_power_state { + s64 power_off_latency_ns; + s64 power_on_latency_ns; +}; + struct generic_pm_domain { struct dev_pm_domain domain; /* PM domain operations */ struct list_head gpd_list_node; /* Node in the global PM domains list */ @@ -54,9 +61,7 @@ struct generic_pm_domain { unsigned int prepared_count; /* Suspend counter of prepared devices */ bool suspend_power_off; /* Power status before system suspend */ int (*power_off)(struct generic_pm_domain *domain); - s64 power_off_latency_ns; int (*power_on)(struct generic_pm_domain *domain); - s64 power_on_latency_ns; struct gpd_dev_ops dev_ops; s64 max_off_time_ns; /* Maximum allowed "suspended" time. */ bool max_off_time_changed; @@ -66,6 +71,10 @@ struct generic_pm_domain { void (*detach_dev)(struct generic_pm_domain *domain, struct device *dev); unsigned int flags; /* Bit field of configs for genpd */ + struct genpd_power_state states[GENPD_MAX_NUM_STATES]; + unsigned int state_count; /* number of states */ + unsigned int state_idx; /* state that genpd will go to when off */ + }; static inline struct generic_pm_domain *pd_to_genpd(struct dev_pm_domain *pd) diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h index 95403d2ccaf5..cccaf4a29e9f 100644 --- a/include/linux/pm_opp.h +++ b/include/linux/pm_opp.h @@ -34,6 +34,8 @@ bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp); int dev_pm_opp_get_opp_count(struct device *dev); unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev); +unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev); +unsigned long dev_pm_opp_get_max_transition_latency(struct device *dev); struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev); struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev, @@ -60,6 +62,9 @@ int dev_pm_opp_set_supported_hw(struct device *dev, const u32 *versions, void dev_pm_opp_put_supported_hw(struct device *dev); int dev_pm_opp_set_prop_name(struct device *dev, const char *name); void dev_pm_opp_put_prop_name(struct device *dev); +int dev_pm_opp_set_regulator(struct device *dev, const char *name); +void dev_pm_opp_put_regulator(struct device *dev); +int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq); #else static inline unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp) { @@ -86,6 +91,16 @@ static inline unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev) return 0; } +static inline unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev) +{ + return 0; +} + +static inline unsigned long dev_pm_opp_get_max_transition_latency(struct device *dev) +{ + return 0; +} + static inline struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev) { return NULL; @@ -151,6 +166,18 @@ static inline int dev_pm_opp_set_prop_name(struct device *dev, const char *name) static inline void dev_pm_opp_put_prop_name(struct device *dev) {} +static inline int dev_pm_opp_set_regulator(struct device *dev, const char *name) +{ + return -EINVAL; +} + +static inline void dev_pm_opp_put_regulator(struct device *dev) {} + +static inline int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq) +{ + return -EINVAL; +} + #endif /* CONFIG_PM_OPP */ #if defined(CONFIG_PM_OPP) && defined(CONFIG_OF) diff --git a/include/linux/pmem.h b/include/linux/pmem.h index 7c3d11a6b4ad..3ec5309e29f3 100644 --- a/include/linux/pmem.h +++ b/include/linux/pmem.h @@ -58,6 +58,11 @@ static inline void arch_wb_cache_pmem(void __pmem *addr, size_t size) { BUG(); } + +static inline void arch_invalidate_pmem(void __pmem *addr, size_t size) +{ + BUG(); +} #endif /* @@ -186,6 +191,20 @@ static inline void clear_pmem(void __pmem *addr, size_t size) } /** + * invalidate_pmem - flush a pmem range from the cache hierarchy + * @addr: virtual start address + * @size: bytes to invalidate (internally aligned to cache line size) + * + * For platforms that support clearing poison this flushes any poisoned + * ranges out of the cache + */ +static inline void invalidate_pmem(void __pmem *addr, size_t size) +{ + if (arch_has_pmem_api()) + arch_invalidate_pmem(addr, size); +} + +/** * wb_cache_pmem - write back processor cache for PMEM memory range * @addr: virtual start address * @size: number of bytes to write back diff --git a/include/linux/poison.h b/include/linux/poison.h index 4a27153574e2..51334edec506 100644 --- a/include/linux/poison.h +++ b/include/linux/poison.h @@ -30,7 +30,11 @@ #define TIMER_ENTRY_STATIC ((void *) 0x300 + POISON_POINTER_DELTA) /********** mm/debug-pagealloc.c **********/ +#ifdef CONFIG_PAGE_POISONING_ZERO +#define PAGE_POISON 0x00 +#else #define PAGE_POISON 0xaa +#endif /********** mm/page_alloc.c ************/ diff --git a/include/linux/poll.h b/include/linux/poll.h index c08386fb3e08..9fb4f40d9a26 100644 --- a/include/linux/poll.h +++ b/include/linux/poll.h @@ -96,7 +96,7 @@ extern void poll_initwait(struct poll_wqueues *pwq); extern void poll_freewait(struct poll_wqueues *pwq); extern int poll_schedule_timeout(struct poll_wqueues *pwq, int state, ktime_t *expires, unsigned long slack); -extern long select_estimate_accuracy(struct timespec *tv); +extern u64 select_estimate_accuracy(struct timespec *tv); static inline int poll_schedule(struct poll_wqueues *pwq, int state) diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h index 907f3fd191ac..62d44c176071 100644 --- a/include/linux/posix-timers.h +++ b/include/linux/posix-timers.h @@ -128,9 +128,6 @@ void posix_cpu_timer_schedule(struct k_itimer *timer); void run_posix_cpu_timers(struct task_struct *task); void posix_cpu_timers_exit(struct task_struct *task); void posix_cpu_timers_exit_group(struct task_struct *task); - -bool posix_cpu_timers_can_stop_tick(struct task_struct *tsk); - void set_process_cpu_timer(struct task_struct *task, unsigned int clock_idx, cputime_t *newval, cputime_t *oldval); diff --git a/include/linux/power/bq24735-charger.h b/include/linux/power/bq24735-charger.h index f536164a6069..6b750c1a45fa 100644 --- a/include/linux/power/bq24735-charger.h +++ b/include/linux/power/bq24735-charger.h @@ -32,6 +32,8 @@ struct bq24735_platform { int status_gpio_active_low; bool status_gpio_valid; + bool ext_control; + char **supplied_to; size_t num_supplicants; }; diff --git a/include/linux/power/bq27xxx_battery.h b/include/linux/power/bq27xxx_battery.h index 998d8f1c3c91..b50c0492629d 100644 --- a/include/linux/power/bq27xxx_battery.h +++ b/include/linux/power/bq27xxx_battery.h @@ -49,6 +49,7 @@ struct bq27xxx_reg_cache { struct bq27xxx_device_info { struct device *dev; + int id; enum bq27xxx_chip chip; const char *name; struct bq27xxx_access_methods bus; diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h index ef9f1592185d..751061790626 100644 --- a/include/linux/power_supply.h +++ b/include/linux/power_supply.h @@ -163,6 +163,9 @@ enum power_supply_type { POWER_SUPPLY_TYPE_USB_DCP, /* Dedicated Charging Port */ POWER_SUPPLY_TYPE_USB_CDP, /* Charging Downstream Port */ POWER_SUPPLY_TYPE_USB_ACA, /* Accessory Charger Adapters */ + POWER_SUPPLY_TYPE_USB_TYPE_C, /* Type C Port */ + POWER_SUPPLY_TYPE_USB_PD, /* Power Delivery Port */ + POWER_SUPPLY_TYPE_USB_PD_DRP, /* PD Dual Role Port */ }; enum power_supply_notifier_events { diff --git a/include/linux/pps_kernel.h b/include/linux/pps_kernel.h index 54bf1484d41f..35ac903956c7 100644 --- a/include/linux/pps_kernel.h +++ b/include/linux/pps_kernel.h @@ -111,22 +111,17 @@ static inline void timespec_to_pps_ktime(struct pps_ktime *kt, kt->nsec = ts.tv_nsec; } -#ifdef CONFIG_NTP_PPS - static inline void pps_get_ts(struct pps_event_time *ts) { - ktime_get_raw_and_real_ts64(&ts->ts_raw, &ts->ts_real); -} + struct system_time_snapshot snap; -#else /* CONFIG_NTP_PPS */ - -static inline void pps_get_ts(struct pps_event_time *ts) -{ - ktime_get_real_ts64(&ts->ts_real); + ktime_get_snapshot(&snap); + ts->ts_real = ktime_to_timespec64(snap.real); +#ifdef CONFIG_NTP_PPS + ts->ts_raw = ktime_to_timespec64(snap.raw); +#endif } -#endif /* CONFIG_NTP_PPS */ - /* Subtract known time delay from PPS event time(s) */ static inline void pps_sub_ts(struct pps_event_time *ts, struct timespec64 delta) { diff --git a/include/linux/pstore_ram.h b/include/linux/pstore_ram.h index 9c9d6c154c8e..4660aaa3195e 100644 --- a/include/linux/pstore_ram.h +++ b/include/linux/pstore_ram.h @@ -76,7 +76,7 @@ ssize_t persistent_ram_ecc_string(struct persistent_ram_zone *prz, struct ramoops_platform_data { unsigned long mem_size; - unsigned long mem_address; + phys_addr_t mem_address; unsigned int mem_type; unsigned long record_size; unsigned long console_size; diff --git a/include/linux/ptp_clock_kernel.h b/include/linux/ptp_clock_kernel.h index b8b73066d137..6b15e168148a 100644 --- a/include/linux/ptp_clock_kernel.h +++ b/include/linux/ptp_clock_kernel.h @@ -38,6 +38,7 @@ struct ptp_clock_request { }; }; +struct system_device_crosststamp; /** * struct ptp_clock_info - decribes a PTP hardware clock * @@ -67,6 +68,11 @@ struct ptp_clock_request { * @gettime64: Reads the current time from the hardware clock. * parameter ts: Holds the result. * + * @getcrosststamp: Reads the current time from the hardware clock and + * system clock simultaneously. + * parameter cts: Contains timestamp (device,system) pair, + * where system time is realtime and monotonic. + * * @settime64: Set the current time on the hardware clock. * parameter ts: Time value to set. * @@ -105,6 +111,8 @@ struct ptp_clock_info { int (*adjfreq)(struct ptp_clock_info *ptp, s32 delta); int (*adjtime)(struct ptp_clock_info *ptp, s64 delta); int (*gettime64)(struct ptp_clock_info *ptp, struct timespec64 *ts); + int (*getcrosststamp)(struct ptp_clock_info *ptp, + struct system_device_crosststamp *cts); int (*settime64)(struct ptp_clock_info *p, const struct timespec64 *ts); int (*enable)(struct ptp_clock_info *ptp, struct ptp_clock_request *request, int on); diff --git a/include/linux/pxa2xx_ssp.h b/include/linux/pxa2xx_ssp.h index c2f2574ff61c..2a097d176ba9 100644 --- a/include/linux/pxa2xx_ssp.h +++ b/include/linux/pxa2xx_ssp.h @@ -197,6 +197,7 @@ enum pxa_ssp_type { QUARK_X1000_SSP, LPSS_LPT_SSP, /* Keep LPSS types sorted with lpss_platforms[] */ LPSS_BYT_SSP, + LPSS_BSW_SSP, LPSS_SPT_SSP, LPSS_BXT_SSP, }; diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h index 1d1ba2c5ee7a..53ecb37ae563 100644 --- a/include/linux/qed/common_hsi.h +++ b/include/linux/qed/common_hsi.h @@ -11,9 +11,11 @@ #define CORE_SPQE_PAGE_SIZE_BYTES 4096 +#define X_FINAL_CLEANUP_AGG_INT 1 + #define FW_MAJOR_VERSION 8 -#define FW_MINOR_VERSION 4 -#define FW_REVISION_VERSION 2 +#define FW_MINOR_VERSION 7 +#define FW_REVISION_VERSION 3 #define FW_ENGINEERING_VERSION 0 /***********************/ @@ -152,6 +154,9 @@ /* number of queues in a PF queue group */ #define QM_PF_QUEUE_GROUP_SIZE 8 +/* the size of a single queue element in bytes */ +#define QM_PQ_ELEMENT_SIZE 4 + /* base number of Tx PQs in the CM PQ representation. * should be used when storing PQ IDs in CM PQ registers and context */ @@ -285,6 +290,16 @@ #define PXP_NUM_ILT_RECORDS_K2 11000 #define MAX_NUM_ILT_RECORDS MAX(PXP_NUM_ILT_RECORDS_BB, PXP_NUM_ILT_RECORDS_K2) +#define SDM_COMP_TYPE_NONE 0 +#define SDM_COMP_TYPE_WAKE_THREAD 1 +#define SDM_COMP_TYPE_AGG_INT 2 +#define SDM_COMP_TYPE_CM 3 +#define SDM_COMP_TYPE_LOADER 4 +#define SDM_COMP_TYPE_PXP 5 +#define SDM_COMP_TYPE_INDICATE_ERROR 6 +#define SDM_COMP_TYPE_RELEASE_THREAD 7 +#define SDM_COMP_TYPE_RAM 8 + /******************/ /* PBF CONSTANTS */ /******************/ @@ -335,7 +350,7 @@ struct event_ring_entry { /* Multi function mode */ enum mf_mode { - SF, + ERROR_MODE /* Unsupported mode */, MF_OVLAN, MF_NPAR, MAX_MF_MODE @@ -606,4 +621,19 @@ struct status_block { #define STATUS_BLOCK_ZERO_PAD3_SHIFT 24 }; +struct tunnel_parsing_flags { + u8 flags; +#define TUNNEL_PARSING_FLAGS_TYPE_MASK 0x3 +#define TUNNEL_PARSING_FLAGS_TYPE_SHIFT 0 +#define TUNNEL_PARSING_FLAGS_TENNANT_ID_EXIST_MASK 0x1 +#define TUNNEL_PARSING_FLAGS_TENNANT_ID_EXIST_SHIFT 2 +#define TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_MASK 0x3 +#define TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_SHIFT 3 +#define TUNNEL_PARSING_FLAGS_FIRSTHDRIPMATCH_MASK 0x1 +#define TUNNEL_PARSING_FLAGS_FIRSTHDRIPMATCH_SHIFT 5 +#define TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_MASK 0x1 +#define TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_SHIFT 6 +#define TUNNEL_PARSING_FLAGS_IPV4_OPTIONS_MASK 0x1 +#define TUNNEL_PARSING_FLAGS_IPV4_OPTIONS_SHIFT 7 +}; #endif /* __COMMON_HSI__ */ diff --git a/include/linux/qed/eth_common.h b/include/linux/qed/eth_common.h index 320b3373ac1d..092cb0c1afcb 100644 --- a/include/linux/qed/eth_common.h +++ b/include/linux/qed/eth_common.h @@ -17,10 +17,8 @@ #define ETH_MAX_RAMROD_PER_CON 8 #define ETH_TX_BD_PAGE_SIZE_BYTES 4096 #define ETH_RX_BD_PAGE_SIZE_BYTES 4096 -#define ETH_RX_SGE_PAGE_SIZE_BYTES 4096 #define ETH_RX_CQE_PAGE_SIZE_BYTES 4096 #define ETH_RX_NUM_NEXT_PAGE_BDS 2 -#define ETH_RX_NUM_NEXT_PAGE_SGES 2 #define ETH_TX_MIN_BDS_PER_NON_LSO_PKT 1 #define ETH_TX_MAX_BDS_PER_NON_LSO_PACKET 18 @@ -34,7 +32,8 @@ #define ETH_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS -#define ETH_REG_CQE_PBL_SIZE 3 +/* Maximum number of buffers, used for RX packet placement */ +#define ETH_RX_MAX_BUFF_PER_PKT 5 /* num of MAC/VLAN filters */ #define ETH_NUM_MAC_FILTERS 512 @@ -54,9 +53,9 @@ /* TPA constants */ #define ETH_TPA_MAX_AGGS_NUM 64 -#define ETH_TPA_CQE_START_SGL_SIZE 3 -#define ETH_TPA_CQE_CONT_SGL_SIZE 6 -#define ETH_TPA_CQE_END_SGL_SIZE 4 +#define ETH_TPA_CQE_START_LEN_LIST_SIZE ETH_RX_MAX_BUFF_PER_PKT +#define ETH_TPA_CQE_CONT_LEN_LIST_SIZE 6 +#define ETH_TPA_CQE_END_LEN_LIST_SIZE 4 /* Queue Zone sizes */ #define TSTORM_QZONE_SIZE 0 @@ -74,18 +73,18 @@ struct coalescing_timeset { struct eth_tx_1st_bd_flags { u8 bitfields; +#define ETH_TX_1ST_BD_FLAGS_START_BD_MASK 0x1 +#define ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT 0 #define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_MASK 0x1 -#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_SHIFT 0 +#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_SHIFT 1 #define ETH_TX_1ST_BD_FLAGS_IP_CSUM_MASK 0x1 -#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT 1 +#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT 2 #define ETH_TX_1ST_BD_FLAGS_L4_CSUM_MASK 0x1 -#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT 2 +#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT 3 #define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_MASK 0x1 -#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT 3 +#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT 4 #define ETH_TX_1ST_BD_FLAGS_LSO_MASK 0x1 -#define ETH_TX_1ST_BD_FLAGS_LSO_SHIFT 4 -#define ETH_TX_1ST_BD_FLAGS_START_BD_MASK 0x1 -#define ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT 5 +#define ETH_TX_1ST_BD_FLAGS_LSO_SHIFT 5 #define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_MASK 0x1 #define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT 6 #define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK 0x1 @@ -97,38 +96,44 @@ struct eth_tx_data_1st_bd { __le16 vlan; u8 nbds; struct eth_tx_1st_bd_flags bd_flags; - __le16 fw_use_only; + __le16 bitfields; +#define ETH_TX_DATA_1ST_BD_TUNN_CFG_OVERRIDE_MASK 0x1 +#define ETH_TX_DATA_1ST_BD_TUNN_CFG_OVERRIDE_SHIFT 0 +#define ETH_TX_DATA_1ST_BD_RESERVED0_MASK 0x1 +#define ETH_TX_DATA_1ST_BD_RESERVED0_SHIFT 1 +#define ETH_TX_DATA_1ST_BD_FW_USE_ONLY_MASK 0x3FFF +#define ETH_TX_DATA_1ST_BD_FW_USE_ONLY_SHIFT 2 }; /* The parsing information data for the second tx bd of a given packet. */ struct eth_tx_data_2nd_bd { __le16 tunn_ip_size; - __le16 bitfields; -#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK 0x1FFF -#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT 0 -#define ETH_TX_DATA_2ND_BD_RESERVED0_MASK 0x7 -#define ETH_TX_DATA_2ND_BD_RESERVED0_SHIFT 13 - __le16 bitfields2; + __le16 bitfields1; #define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_MASK 0xF #define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_SHIFT 0 #define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_MASK 0x3 #define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_SHIFT 4 #define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_MASK 0x3 #define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_SHIFT 6 +#define ETH_TX_DATA_2ND_BD_START_BD_MASK 0x1 +#define ETH_TX_DATA_2ND_BD_START_BD_SHIFT 8 #define ETH_TX_DATA_2ND_BD_TUNN_TYPE_MASK 0x3 -#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_SHIFT 8 +#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_SHIFT 9 #define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_MASK 0x1 -#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_SHIFT 10 +#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_SHIFT 11 #define ETH_TX_DATA_2ND_BD_IPV6_EXT_MASK 0x1 -#define ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT 11 +#define ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT 12 #define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_MASK 0x1 -#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_SHIFT 12 +#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_SHIFT 13 #define ETH_TX_DATA_2ND_BD_L4_UDP_MASK 0x1 -#define ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT 13 +#define ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT 14 #define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_MASK 0x1 -#define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT 14 -#define ETH_TX_DATA_2ND_BD_RESERVED1_MASK 0x1 -#define ETH_TX_DATA_2ND_BD_RESERVED1_SHIFT 15 +#define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT 15 + __le16 bitfields2; +#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK 0x1FFF +#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT 0 +#define ETH_TX_DATA_2ND_BD_RESERVED0_MASK 0x7 +#define ETH_TX_DATA_2ND_BD_RESERVED0_SHIFT 13 }; /* Regular ETH Rx FP CQE. */ @@ -145,11 +150,68 @@ struct eth_fast_path_rx_reg_cqe { struct parsing_and_err_flags pars_flags; __le16 vlan_tag; __le32 rss_hash; - __le16 len_on_bd; + __le16 len_on_first_bd; u8 placement_offset; - u8 reserved; - __le16 pbl[ETH_REG_CQE_PBL_SIZE]; - u8 reserved1[10]; + struct tunnel_parsing_flags tunnel_pars_flags; + u8 bd_num; + u8 reserved[7]; + u32 fw_debug; + u8 reserved1[3]; + u8 flags; +#define ETH_FAST_PATH_RX_REG_CQE_VALID_MASK 0x1 +#define ETH_FAST_PATH_RX_REG_CQE_VALID_SHIFT 0 +#define ETH_FAST_PATH_RX_REG_CQE_VALID_TOGGLE_MASK 0x1 +#define ETH_FAST_PATH_RX_REG_CQE_VALID_TOGGLE_SHIFT 1 +#define ETH_FAST_PATH_RX_REG_CQE_RESERVED2_MASK 0x3F +#define ETH_FAST_PATH_RX_REG_CQE_RESERVED2_SHIFT 2 +}; + +/* TPA-continue ETH Rx FP CQE. */ +struct eth_fast_path_rx_tpa_cont_cqe { + u8 type; + u8 tpa_agg_index; + __le16 len_list[ETH_TPA_CQE_CONT_LEN_LIST_SIZE]; + u8 reserved[5]; + u8 reserved1; + __le16 reserved2[ETH_TPA_CQE_CONT_LEN_LIST_SIZE]; +}; + +/* TPA-end ETH Rx FP CQE. */ +struct eth_fast_path_rx_tpa_end_cqe { + u8 type; + u8 tpa_agg_index; + __le16 total_packet_len; + u8 num_of_bds; + u8 end_reason; + __le16 num_of_coalesced_segs; + __le32 ts_delta; + __le16 len_list[ETH_TPA_CQE_END_LEN_LIST_SIZE]; + u8 reserved1[3]; + u8 reserved2; + __le16 reserved3[ETH_TPA_CQE_END_LEN_LIST_SIZE]; +}; + +/* TPA-start ETH Rx FP CQE. */ +struct eth_fast_path_rx_tpa_start_cqe { + u8 type; + u8 bitfields; +#define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_MASK 0x7 +#define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_SHIFT 0 +#define ETH_FAST_PATH_RX_TPA_START_CQE_TC_MASK 0xF +#define ETH_FAST_PATH_RX_TPA_START_CQE_TC_SHIFT 3 +#define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_MASK 0x1 +#define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_SHIFT 7 + __le16 seg_len; + struct parsing_and_err_flags pars_flags; + __le16 vlan_tag; + __le32 rss_hash; + __le16 len_on_first_bd; + u8 placement_offset; + struct tunnel_parsing_flags tunnel_pars_flags; + u8 tpa_agg_index; + u8 header_len; + __le16 ext_bd_len_list[ETH_TPA_CQE_START_LEN_LIST_SIZE]; + u32 fw_debug; }; /* The L4 pseudo checksum mode for Ethernet */ @@ -168,13 +230,26 @@ struct eth_slow_path_rx_cqe { u8 type; u8 ramrod_cmd_id; u8 error_flag; - u8 reserved[27]; + u8 reserved[25]; __le16 echo; + u8 reserved1; + u8 flags; +/* for PMD mode - valid indication */ +#define ETH_SLOW_PATH_RX_CQE_VALID_MASK 0x1 +#define ETH_SLOW_PATH_RX_CQE_VALID_SHIFT 0 +/* for PMD mode - valid toggle indication */ +#define ETH_SLOW_PATH_RX_CQE_VALID_TOGGLE_MASK 0x1 +#define ETH_SLOW_PATH_RX_CQE_VALID_TOGGLE_SHIFT 1 +#define ETH_SLOW_PATH_RX_CQE_RESERVED2_MASK 0x3F +#define ETH_SLOW_PATH_RX_CQE_RESERVED2_SHIFT 2 }; /* union for all ETH Rx CQE types */ union eth_rx_cqe { struct eth_fast_path_rx_reg_cqe fast_path_regular; + struct eth_fast_path_rx_tpa_start_cqe fast_path_tpa_start; + struct eth_fast_path_rx_tpa_cont_cqe fast_path_tpa_cont; + struct eth_fast_path_rx_tpa_end_cqe fast_path_tpa_end; struct eth_slow_path_rx_cqe slow_path; }; @@ -183,15 +258,18 @@ enum eth_rx_cqe_type { ETH_RX_CQE_TYPE_UNUSED, ETH_RX_CQE_TYPE_REGULAR, ETH_RX_CQE_TYPE_SLOW_PATH, + ETH_RX_CQE_TYPE_TPA_START, + ETH_RX_CQE_TYPE_TPA_CONT, + ETH_RX_CQE_TYPE_TPA_END, MAX_ETH_RX_CQE_TYPE }; /* ETH Rx producers data */ struct eth_rx_prod_data { __le16 bd_prod; - __le16 sge_prod; __le16 cqe_prod; __le16 reserved; + __le16 reserved1; }; /* The first tx bd of a given packet */ @@ -211,12 +289,17 @@ struct eth_tx_2nd_bd { /* The parsing information data for the third tx bd of a given packet. */ struct eth_tx_data_3rd_bd { __le16 lso_mss; - u8 bitfields; + __le16 bitfields; #define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK 0xF #define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT 0 #define ETH_TX_DATA_3RD_BD_HDR_NBD_MASK 0xF #define ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT 4 - u8 resereved0[3]; +#define ETH_TX_DATA_3RD_BD_START_BD_MASK 0x1 +#define ETH_TX_DATA_3RD_BD_START_BD_SHIFT 8 +#define ETH_TX_DATA_3RD_BD_RESERVED0_MASK 0x7F +#define ETH_TX_DATA_3RD_BD_RESERVED0_SHIFT 9 + u8 tunn_l4_hdr_start_offset_w; + u8 tunn_hdr_size_w; }; /* The third tx bd of a given packet */ @@ -226,12 +309,24 @@ struct eth_tx_3rd_bd { struct eth_tx_data_3rd_bd data; }; +/* Complementary information for the regular tx bd of a given packet. */ +struct eth_tx_data_bd { + __le16 reserved0; + __le16 bitfields; +#define ETH_TX_DATA_BD_RESERVED1_MASK 0xFF +#define ETH_TX_DATA_BD_RESERVED1_SHIFT 0 +#define ETH_TX_DATA_BD_START_BD_MASK 0x1 +#define ETH_TX_DATA_BD_START_BD_SHIFT 8 +#define ETH_TX_DATA_BD_RESERVED2_MASK 0x7F +#define ETH_TX_DATA_BD_RESERVED2_SHIFT 9 + __le16 reserved3; +}; + /* The common non-special TX BD ring element */ struct eth_tx_bd { struct regpair addr; __le16 nbytes; - __le16 reserved0; - __le32 reserved1; + struct eth_tx_data_bd data; }; union eth_tx_bd_types { diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h index 41b9049b57e2..5f8fcaaa6504 100644 --- a/include/linux/qed/qed_chain.h +++ b/include/linux/qed/qed_chain.h @@ -19,6 +19,10 @@ /* dma_addr_t manip */ #define DMA_LO_LE(x) cpu_to_le32(lower_32_bits(x)) #define DMA_HI_LE(x) cpu_to_le32(upper_32_bits(x)) +#define DMA_REGPAIR_LE(x, val) do { \ + (x).hi = DMA_HI_LE((val)); \ + (x).lo = DMA_LO_LE((val)); \ + } while (0) #define HILO_GEN(hi, lo, type) ((((type)(hi)) << 32) + (lo)) #define HILO_DMA(hi, lo) HILO_GEN(hi, lo, dma_addr_t) diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h index 81ab178e31c1..e1d69834a11f 100644 --- a/include/linux/qed/qed_eth_if.h +++ b/include/linux/qed/qed_eth_if.h @@ -33,10 +33,20 @@ struct qed_update_vport_params { u8 vport_id; u8 update_vport_active_flg; u8 vport_active_flg; + u8 update_accept_any_vlan_flg; + u8 accept_any_vlan; u8 update_rss_flg; struct qed_update_vport_rss_params rss_params; }; +struct qed_start_vport_params { + bool remove_inner_vlan; + bool gro_enable; + bool drop_ttl0; + u8 vport_id; + u16 mtu; +}; + struct qed_stop_rxq_params { u8 rss_id; u8 rx_queue_id; @@ -116,9 +126,7 @@ struct qed_eth_ops { void *cookie); int (*vport_start)(struct qed_dev *cdev, - u8 vport_id, u16 mtu, - u8 drop_ttl0_flg, - u8 inner_vlan_removal_en_flg); + struct qed_start_vport_params *params); int (*vport_stop)(struct qed_dev *cdev, u8 vport_id); diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index d4a32e878180..1f7599c77cd4 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -80,7 +80,7 @@ struct qed_dev_info { u8 num_hwfns; u8 hw_mac[ETH_ALEN]; - bool is_mf; + bool is_mf_default; /* FW version */ u16 fw_major; @@ -360,6 +360,12 @@ enum DP_MODULE { /* to be added...up to 0x8000000 */ }; +enum qed_mf_mode { + QED_MF_DEFAULT, + QED_MF_OVLAN, + QED_MF_NPAR, +}; + struct qed_eth_stats { u64 no_buff_discards; u64 packet_too_big_discard; @@ -440,6 +446,12 @@ struct qed_eth_stats { #define RX_PI 0 #define TX_PI(tc) (RX_PI + 1 + tc) +struct qed_sb_cnt_info { + int sb_cnt; + int sb_iov_cnt; + int sb_free_blk; +}; + static inline u16 qed_sb_update_sb_idx(struct qed_sb_info *sb_info) { u32 prod = 0; diff --git a/include/linux/quicklist.h b/include/linux/quicklist.h index bd466439c588..3bdfa70bc642 100644 --- a/include/linux/quicklist.h +++ b/include/linux/quicklist.h @@ -5,7 +5,7 @@ * as needed after allocation when they are freed. Per cpu lists of pages * are kept that only contain node local pages. * - * (C) 2007, SGI. Christoph Lameter <clameter@sgi.com> + * (C) 2007, SGI. Christoph Lameter <cl@linux.com> */ #include <linux/kernel.h> #include <linux/gfp.h> diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h index f54be7082207..51a97ac8bfbf 100644 --- a/include/linux/radix-tree.h +++ b/include/linux/radix-tree.h @@ -21,6 +21,7 @@ #ifndef _LINUX_RADIX_TREE_H #define _LINUX_RADIX_TREE_H +#include <linux/bitops.h> #include <linux/preempt.h> #include <linux/types.h> #include <linux/bug.h> @@ -270,8 +271,15 @@ static inline void radix_tree_replace_slot(void **pslot, void *item) } int __radix_tree_create(struct radix_tree_root *root, unsigned long index, - struct radix_tree_node **nodep, void ***slotp); -int radix_tree_insert(struct radix_tree_root *, unsigned long, void *); + unsigned order, struct radix_tree_node **nodep, + void ***slotp); +int __radix_tree_insert(struct radix_tree_root *, unsigned long index, + unsigned order, void *); +static inline int radix_tree_insert(struct radix_tree_root *root, + unsigned long index, void *entry) +{ + return __radix_tree_insert(root, index, 0, entry); +} void *__radix_tree_lookup(struct radix_tree_root *root, unsigned long index, struct radix_tree_node **nodep, void ***slotp); void *radix_tree_lookup(struct radix_tree_root *, unsigned long); @@ -395,6 +403,22 @@ void **radix_tree_iter_retry(struct radix_tree_iter *iter) } /** + * radix_tree_iter_next - resume iterating when the chunk may be invalid + * @iter: iterator state + * + * If the iterator needs to release then reacquire a lock, the chunk may + * have been invalidated by an insertion or deletion. Call this function + * to continue the iteration from the next index. + */ +static inline __must_check +void **radix_tree_iter_next(struct radix_tree_iter *iter) +{ + iter->next_index = iter->index + 1; + iter->tags = 0; + return NULL; +} + +/** * radix_tree_chunk_size - get current chunk size * * @iter: pointer to radix tree iterator diff --git a/include/linux/random.h b/include/linux/random.h index a75840c1aa71..9c29122037f9 100644 --- a/include/linux/random.h +++ b/include/linux/random.h @@ -34,6 +34,7 @@ extern const struct file_operations random_fops, urandom_fops; #endif unsigned int get_random_int(void); +unsigned long get_random_long(void); unsigned long randomize_range(unsigned long start, unsigned long end, unsigned long len); u32 prandom_u32(void); diff --git a/include/linux/rculist.h b/include/linux/rculist.h index 14ec1652daf4..17d4f849c65e 100644 --- a/include/linux/rculist.h +++ b/include/linux/rculist.h @@ -319,6 +319,27 @@ static inline void list_splice_tail_init_rcu(struct list_head *list, }) /** + * list_next_or_null_rcu - get the first element from a list + * @head: the head for the list. + * @ptr: the list head to take the next element from. + * @type: the type of the struct this is embedded in. + * @member: the name of the list_head within the struct. + * + * Note that if the ptr is at the end of the list, NULL is returned. + * + * This primitive may safely run concurrently with the _rcu list-mutation + * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock(). + */ +#define list_next_or_null_rcu(head, ptr, type, member) \ +({ \ + struct list_head *__head = (head); \ + struct list_head *__ptr = (ptr); \ + struct list_head *__next = READ_ONCE(__ptr->next); \ + likely(__next != __head) ? list_entry_rcu(__next, type, \ + member) : NULL; \ +}) + +/** * list_for_each_entry_rcu - iterate over rcu list of given type * @pos: the type * to use as a loop cursor. * @head: the head for your list. diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 14e6f47ee16f..2657aff2725b 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -332,9 +332,7 @@ void rcu_init(void); void rcu_sched_qs(void); void rcu_bh_qs(void); void rcu_check_callbacks(int user); -struct notifier_block; -int rcu_cpu_notify(struct notifier_block *self, - unsigned long action, void *hcpu); +void rcu_report_dead(unsigned int cpu); #ifndef CONFIG_TINY_RCU void rcu_end_inkernel_boot(void); @@ -360,8 +358,6 @@ void rcu_user_exit(void); #else static inline void rcu_user_enter(void) { } static inline void rcu_user_exit(void) { } -static inline void rcu_user_hooks_switch(struct task_struct *prev, - struct task_struct *next) { } #endif /* CONFIG_NO_HZ_FULL */ #ifdef CONFIG_RCU_NOCB_CPU diff --git a/include/linux/regmap.h b/include/linux/regmap.h index 18394343f489..3dc08ce15426 100644 --- a/include/linux/regmap.h +++ b/include/linux/regmap.h @@ -65,6 +65,36 @@ struct reg_sequence { unsigned int delay_us; }; +#define regmap_update_bits(map, reg, mask, val) \ + regmap_update_bits_base(map, reg, mask, val, NULL, false, false) +#define regmap_update_bits_async(map, reg, mask, val)\ + regmap_update_bits_base(map, reg, mask, val, NULL, true, false) +#define regmap_update_bits_check(map, reg, mask, val, change)\ + regmap_update_bits_base(map, reg, mask, val, change, false, false) +#define regmap_update_bits_check_async(map, reg, mask, val, change)\ + regmap_update_bits_base(map, reg, mask, val, change, true, false) + +#define regmap_write_bits(map, reg, mask, val) \ + regmap_update_bits_base(map, reg, mask, val, NULL, false, true) + +#define regmap_field_write(field, val) \ + regmap_field_update_bits_base(field, ~0, val, NULL, false, false) +#define regmap_field_force_write(field, val) \ + regmap_field_update_bits_base(field, ~0, val, NULL, false, true) +#define regmap_field_update_bits(field, mask, val)\ + regmap_field_update_bits_base(field, mask, val, NULL, false, false) +#define regmap_field_force_update_bits(field, mask, val) \ + regmap_field_update_bits_base(field, mask, val, NULL, false, true) + +#define regmap_fields_write(field, id, val) \ + regmap_fields_update_bits_base(field, id, ~0, val, NULL, false, false) +#define regmap_fields_force_write(field, id, val) \ + regmap_fields_update_bits_base(field, id, ~0, val, NULL, false, true) +#define regmap_fields_update_bits(field, id, mask, val)\ + regmap_fields_update_bits_base(field, id, mask, val, NULL, false, false) +#define regmap_fields_force_update_bits(field, id, mask, val) \ + regmap_fields_update_bits_base(field, id, mask, val, NULL, false, true) + #ifdef CONFIG_REGMAP enum regmap_endian { @@ -162,7 +192,7 @@ typedef void (*regmap_unlock)(void *); * This field is a duplicate of a similar file in * 'struct regmap_bus' and serves exact same purpose. * Use it only for "no-bus" cases. - * @max_register: Optional, specifies the maximum valid register index. + * @max_register: Optional, specifies the maximum valid register address. * @wr_table: Optional, points to a struct regmap_access_table specifying * valid ranges for write access. * @rd_table: As above, for read access. @@ -691,18 +721,9 @@ int regmap_raw_read(struct regmap *map, unsigned int reg, void *val, size_t val_len); int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val, size_t val_count); -int regmap_update_bits(struct regmap *map, unsigned int reg, - unsigned int mask, unsigned int val); -int regmap_write_bits(struct regmap *map, unsigned int reg, - unsigned int mask, unsigned int val); -int regmap_update_bits_async(struct regmap *map, unsigned int reg, - unsigned int mask, unsigned int val); -int regmap_update_bits_check(struct regmap *map, unsigned int reg, - unsigned int mask, unsigned int val, - bool *change); -int regmap_update_bits_check_async(struct regmap *map, unsigned int reg, - unsigned int mask, unsigned int val, - bool *change); +int regmap_update_bits_base(struct regmap *map, unsigned int reg, + unsigned int mask, unsigned int val, + bool *change, bool async, bool force); int regmap_get_val_bytes(struct regmap *map); int regmap_get_max_register(struct regmap *map); int regmap_get_reg_stride(struct regmap *map); @@ -770,18 +791,14 @@ struct regmap_field *devm_regmap_field_alloc(struct device *dev, void devm_regmap_field_free(struct device *dev, struct regmap_field *field); int regmap_field_read(struct regmap_field *field, unsigned int *val); -int regmap_field_write(struct regmap_field *field, unsigned int val); -int regmap_field_update_bits(struct regmap_field *field, - unsigned int mask, unsigned int val); - -int regmap_fields_write(struct regmap_field *field, unsigned int id, - unsigned int val); -int regmap_fields_force_write(struct regmap_field *field, unsigned int id, - unsigned int val); +int regmap_field_update_bits_base(struct regmap_field *field, + unsigned int mask, unsigned int val, + bool *change, bool async, bool force); int regmap_fields_read(struct regmap_field *field, unsigned int id, unsigned int *val); -int regmap_fields_update_bits(struct regmap_field *field, unsigned int id, - unsigned int mask, unsigned int val); +int regmap_fields_update_bits_base(struct regmap_field *field, unsigned int id, + unsigned int mask, unsigned int val, + bool *change, bool async, bool force); /** * Description of an IRQ for the generic regmap irq_chip. @@ -868,6 +885,14 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags, int irq_base, const struct regmap_irq_chip *chip, struct regmap_irq_chip_data **data); void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *data); + +int devm_regmap_add_irq_chip(struct device *dev, struct regmap *map, int irq, + int irq_flags, int irq_base, + const struct regmap_irq_chip *chip, + struct regmap_irq_chip_data **data); +void devm_regmap_del_irq_chip(struct device *dev, int irq, + struct regmap_irq_chip_data *data); + int regmap_irq_chip_get_base(struct regmap_irq_chip_data *data); int regmap_irq_get_virq(struct regmap_irq_chip_data *data, int irq); struct irq_domain *regmap_irq_get_domain(struct regmap_irq_chip_data *data); @@ -937,42 +962,26 @@ static inline int regmap_bulk_read(struct regmap *map, unsigned int reg, return -EINVAL; } -static inline int regmap_update_bits(struct regmap *map, unsigned int reg, - unsigned int mask, unsigned int val) -{ - WARN_ONCE(1, "regmap API is disabled"); - return -EINVAL; -} - -static inline int regmap_write_bits(struct regmap *map, unsigned int reg, - unsigned int mask, unsigned int val) -{ - WARN_ONCE(1, "regmap API is disabled"); - return -EINVAL; -} - -static inline int regmap_update_bits_async(struct regmap *map, - unsigned int reg, - unsigned int mask, unsigned int val) +static inline int regmap_update_bits_base(struct regmap *map, unsigned int reg, + unsigned int mask, unsigned int val, + bool *change, bool async, bool force) { WARN_ONCE(1, "regmap API is disabled"); return -EINVAL; } -static inline int regmap_update_bits_check(struct regmap *map, - unsigned int reg, - unsigned int mask, unsigned int val, - bool *change) +static inline int regmap_field_update_bits_base(struct regmap_field *field, + unsigned int mask, unsigned int val, + bool *change, bool async, bool force) { WARN_ONCE(1, "regmap API is disabled"); return -EINVAL; } -static inline int regmap_update_bits_check_async(struct regmap *map, - unsigned int reg, - unsigned int mask, - unsigned int val, - bool *change) +static inline int regmap_fields_update_bits_base(struct regmap_field *field, + unsigned int id, + unsigned int mask, unsigned int val, + bool *change, bool async, bool force) { WARN_ONCE(1, "regmap API is disabled"); return -EINVAL; diff --git a/include/linux/regulator/act8865.h b/include/linux/regulator/act8865.h index 15fa8f2d35c9..2eb386017fa5 100644 --- a/include/linux/regulator/act8865.h +++ b/include/linux/regulator/act8865.h @@ -68,12 +68,12 @@ enum { * act8865_regulator_data - regulator data * @id: regulator id * @name: regulator name - * @platform_data: regulator init data + * @init_data: regulator init data */ struct act8865_regulator_data { int id; const char *name; - struct regulator_init_data *platform_data; + struct regulator_init_data *init_data; }; /** diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h index 16ac9e108806..cd271e89a7e6 100644 --- a/include/linux/regulator/driver.h +++ b/include/linux/regulator/driver.h @@ -93,6 +93,8 @@ struct regulator_linear_range { * @get_current_limit: Get the configured limit for a current-limited regulator. * @set_input_current_limit: Configure an input limit. * + * @set_active_discharge: Set active discharge enable/disable of regulators. + * * @set_mode: Set the configured operating mode for the regulator. * @get_mode: Get the configured operating mode for the regulator. * @get_status: Return actual (not as-configured) status of regulator, as a @@ -149,6 +151,7 @@ struct regulator_ops { int (*set_input_current_limit) (struct regulator_dev *, int lim_uA); int (*set_over_current_protection) (struct regulator_dev *); + int (*set_active_discharge) (struct regulator_dev *, bool enable); /* enable/disable regulator */ int (*enable) (struct regulator_dev *); @@ -266,6 +269,14 @@ enum regulator_type { * @bypass_mask: Mask for control when using regmap set_bypass * @bypass_val_on: Enabling value for control when using regmap set_bypass * @bypass_val_off: Disabling value for control when using regmap set_bypass + * @active_discharge_off: Enabling value for control when using regmap + * set_active_discharge + * @active_discharge_on: Disabling value for control when using regmap + * set_active_discharge + * @active_discharge_mask: Mask for control when using regmap + * set_active_discharge + * @active_discharge_reg: Register for control when using regmap + * set_active_discharge * * @enable_time: Time taken for initial enable of regulator (in uS). * @off_on_delay: guard time (in uS), before re-enabling a regulator @@ -315,6 +326,10 @@ struct regulator_desc { unsigned int bypass_mask; unsigned int bypass_val_on; unsigned int bypass_val_off; + unsigned int active_discharge_on; + unsigned int active_discharge_off; + unsigned int active_discharge_mask; + unsigned int active_discharge_reg; unsigned int enable_time; @@ -447,6 +462,8 @@ int regulator_set_voltage_time_sel(struct regulator_dev *rdev, int regulator_set_bypass_regmap(struct regulator_dev *rdev, bool enable); int regulator_get_bypass_regmap(struct regulator_dev *rdev, bool *enable); +int regulator_set_active_discharge_regmap(struct regulator_dev *rdev, + bool enable); void *regulator_get_init_drvdata(struct regulator_init_data *reg_init_data); #endif diff --git a/include/linux/regulator/lp872x.h b/include/linux/regulator/lp872x.h index 132e05c46661..6029279f4eed 100644 --- a/include/linux/regulator/lp872x.h +++ b/include/linux/regulator/lp872x.h @@ -18,6 +18,9 @@ #define LP872X_MAX_REGULATORS 9 +#define LP8720_ENABLE_DELAY 200 +#define LP8725_ENABLE_DELAY 30000 + enum lp872x_regulator_id { LP8720_ID_BASE, LP8720_ID_LDO1 = LP8720_ID_BASE, @@ -79,12 +82,14 @@ struct lp872x_regulator_data { * @update_config : if LP872X_GENERAL_CFG register is updated, set true * @regulator_data : platform regulator id and init data * @dvs : dvs data for buck voltage control + * @enable_gpio : gpio pin number for enable control */ struct lp872x_platform_data { u8 general_config; bool update_config; struct lp872x_regulator_data regulator_data[LP872X_MAX_REGULATORS]; struct lp872x_dvs *dvs; + int enable_gpio; }; #endif diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h index a1067d0b3991..5d627c83a630 100644 --- a/include/linux/regulator/machine.h +++ b/include/linux/regulator/machine.h @@ -42,6 +42,13 @@ struct regulator; #define REGULATOR_CHANGE_DRMS 0x10 #define REGULATOR_CHANGE_BYPASS 0x20 +/* Regulator active discharge flags */ +enum regulator_active_discharge { + REGULATOR_ACTIVE_DISCHARGE_DEFAULT, + REGULATOR_ACTIVE_DISCHARGE_DISABLE, + REGULATOR_ACTIVE_DISCHARGE_ENABLE, +}; + /** * struct regulator_state - regulator state during low power system states * @@ -100,6 +107,9 @@ struct regulator_state { * @initial_state: Suspend state to set by default. * @initial_mode: Mode to set at startup. * @ramp_delay: Time to settle down after voltage change (unit: uV/us) + * @active_discharge: Enable/disable active discharge. The enum + * regulator_active_discharge values are used for + * initialisation. * @enable_time: Turn-on time of the rails (unit: microseconds) */ struct regulation_constraints { @@ -140,6 +150,8 @@ struct regulation_constraints { unsigned int ramp_delay; unsigned int enable_time; + unsigned int active_discharge; + /* constraint flags */ unsigned always_on:1; /* regulator never off when system is on */ unsigned boot_on:1; /* bootloader/firmware enabled regulator */ diff --git a/include/linux/rfkill-gpio.h b/include/linux/rfkill-gpio.h deleted file mode 100644 index 20bcb55498cd..000000000000 --- a/include/linux/rfkill-gpio.h +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Copyright (c) 2011, NVIDIA Corporation. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. - */ - - -#ifndef __RFKILL_GPIO_H -#define __RFKILL_GPIO_H - -#include <linux/types.h> -#include <linux/rfkill.h> - -/** - * struct rfkill_gpio_platform_data - platform data for rfkill gpio device. - * for unused gpio's, the expected value is -1. - * @name: name for the gpio rf kill instance - */ - -struct rfkill_gpio_platform_data { - char *name; - enum rfkill_type type; -}; - -#endif /* __RFKILL_GPIO_H */ diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h index d9010789b4e8..e6a0031d1b1f 100644 --- a/include/linux/rfkill.h +++ b/include/linux/rfkill.h @@ -104,7 +104,8 @@ int __must_check rfkill_register(struct rfkill *rfkill); * * Pause polling -- say transmitter is off for other reasons. * NOTE: not necessary for suspend/resume -- in that case the - * core stops polling anyway + * core stops polling anyway (but will also correctly handle + * the case of polling having been paused before suspend.) */ void rfkill_pause_polling(struct rfkill *rfkill); @@ -212,6 +213,15 @@ void rfkill_set_states(struct rfkill *rfkill, bool sw, bool hw); * @rfkill: rfkill struct to query */ bool rfkill_blocked(struct rfkill *rfkill); + +/** + * rfkill_find_type - Helpper for finding rfkill type by name + * @name: the name of the type + * + * Returns enum rfkill_type that conrresponds the name. + */ +enum rfkill_type rfkill_find_type(const char *name); + #else /* !RFKILL */ static inline struct rfkill * __must_check rfkill_alloc(const char *name, @@ -268,6 +278,12 @@ static inline bool rfkill_blocked(struct rfkill *rfkill) { return false; } + +static inline enum rfkill_type rfkill_find_type(const char *name) +{ + return RFKILL_TYPE_ALL; +} + #endif /* RFKILL || RFKILL_MODULE */ diff --git a/include/linux/rmap.h b/include/linux/rmap.h index a07f42bedda3..49eb4f8ebac9 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -86,6 +86,7 @@ enum ttu_flags { TTU_MIGRATION = 2, /* migration mode */ TTU_MUNLOCK = 4, /* munlock mode */ TTU_LZFREE = 8, /* lazy free mode */ + TTU_SPLIT_HUGE_PMD = 16, /* split huge PMD if any */ TTU_IGNORE_MLOCK = (1 << 8), /* ignore mlock */ TTU_IGNORE_ACCESS = (1 << 9), /* don't age */ @@ -93,6 +94,8 @@ enum ttu_flags { TTU_BATCH_FLUSH = (1 << 11), /* Batch TLB flushes where possible * and caller guarantees they will * do a final flush if necessary */ + TTU_RMAP_LOCKED = (1 << 12) /* do not grab rmap lock: + * caller holds it */ }; #ifdef CONFIG_MMU @@ -240,6 +243,8 @@ int page_mkclean(struct page *); */ int try_to_munlock(struct page *); +void remove_migration_ptes(struct page *old, struct page *new, bool locked); + /* * Called by memory-failure.c to kill processes. */ @@ -266,6 +271,7 @@ struct rmap_walk_control { }; int rmap_walk(struct page *page, struct rmap_walk_control *rwc); +int rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc); #else /* !CONFIG_MMU */ diff --git a/include/linux/rmi.h b/include/linux/rmi.h new file mode 100644 index 000000000000..e0aca1476001 --- /dev/null +++ b/include/linux/rmi.h @@ -0,0 +1,359 @@ +/* + * Copyright (c) 2011-2016 Synaptics Incorporated + * Copyright (c) 2011 Unixphere + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + */ + +#ifndef _RMI_H +#define _RMI_H +#include <linux/kernel.h> +#include <linux/device.h> +#include <linux/interrupt.h> +#include <linux/input.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/types.h> + +#define NAME_BUFFER_SIZE 256 + +/** + * struct rmi_2d_axis_alignment - target axis alignment + * @swap_axes: set to TRUE if desired to swap x- and y-axis + * @flip_x: set to TRUE if desired to flip direction on x-axis + * @flip_y: set to TRUE if desired to flip direction on y-axis + * @clip_x_low - reported X coordinates below this setting will be clipped to + * the specified value + * @clip_x_high - reported X coordinates above this setting will be clipped to + * the specified value + * @clip_y_low - reported Y coordinates below this setting will be clipped to + * the specified value + * @clip_y_high - reported Y coordinates above this setting will be clipped to + * the specified value + * @offset_x - this value will be added to all reported X coordinates + * @offset_y - this value will be added to all reported Y coordinates + * @rel_report_enabled - if set to true, the relative reporting will be + * automatically enabled for this sensor. + */ +struct rmi_2d_axis_alignment { + bool swap_axes; + bool flip_x; + bool flip_y; + u16 clip_x_low; + u16 clip_y_low; + u16 clip_x_high; + u16 clip_y_high; + u16 offset_x; + u16 offset_y; + u8 delta_x_threshold; + u8 delta_y_threshold; +}; + +/** This is used to override any hints an F11 2D sensor might have provided + * as to what type of sensor it is. + * + * @rmi_f11_sensor_default - do not override, determine from F11_2D_QUERY14 if + * available. + * @rmi_f11_sensor_touchscreen - treat the sensor as a touchscreen (direct + * pointing). + * @rmi_f11_sensor_touchpad - thread the sensor as a touchpad (indirect + * pointing). + */ +enum rmi_sensor_type { + rmi_sensor_default = 0, + rmi_sensor_touchscreen, + rmi_sensor_touchpad +}; + +#define RMI_F11_DISABLE_ABS_REPORT BIT(0) + +/** + * struct rmi_2d_sensor_data - overrides defaults for a 2D sensor. + * @axis_align - provides axis alignment overrides (see above). + * @sensor_type - Forces the driver to treat the sensor as an indirect + * pointing device (touchpad) rather than a direct pointing device + * (touchscreen). This is useful when F11_2D_QUERY14 register is not + * available. + * @disable_report_mask - Force data to not be reported even if it is supported + * by the firware. + * @topbuttonpad - Used with the "5 buttons touchpads" found on the Lenovo 40 + * series + * @kernel_tracking - most moderns RMI f11 firmwares implement Multifinger + * Type B protocol. However, there are some corner cases where the user + * triggers some jumps by tapping with two fingers on the touchpad. + * Use this setting and dmax to filter out these jumps. + * Also, when using an old sensor using MF Type A behavior, set to true to + * report an actual MT protocol B. + * @dmax - the maximum distance (in sensor units) the kernel tracking allows two + * distincts fingers to be considered the same. + */ +struct rmi_2d_sensor_platform_data { + struct rmi_2d_axis_alignment axis_align; + enum rmi_sensor_type sensor_type; + int x_mm; + int y_mm; + int disable_report_mask; + u16 rezero_wait; + bool topbuttonpad; + bool kernel_tracking; + int dmax; +}; + +/** + * struct rmi_f30_data - overrides defaults for a single F30 GPIOs/LED chip. + * @buttonpad - the touchpad is a buttonpad, so enable only the first actual + * button that is found. + * @trackstick_buttons - Set when the function 30 is handling the physical + * buttons of the trackstick (as a PD/2 passthrough device. + * @disable - the touchpad incorrectly reports F30 and it should be ignored. + * This is a special case which is due to misconfigured firmware. + */ +struct rmi_f30_data { + bool buttonpad; + bool trackstick_buttons; + bool disable; +}; + +/** + * struct rmi_f01_power - override default power management settings. + * + */ +enum rmi_f01_nosleep { + RMI_F01_NOSLEEP_DEFAULT = 0, + RMI_F01_NOSLEEP_OFF = 1, + RMI_F01_NOSLEEP_ON = 2 +}; + +/** + * struct rmi_f01_power_management -When non-zero, these values will be written + * to the touch sensor to override the default firmware settigns. For a + * detailed explanation of what each field does, see the corresponding + * documention in the RMI4 specification. + * + * @nosleep - specifies whether the device is permitted to sleep or doze (that + * is, enter a temporary low power state) when no fingers are touching the + * sensor. + * @wakeup_threshold - controls the capacitance threshold at which the touch + * sensor will decide to wake up from that low power state. + * @doze_holdoff - controls how long the touch sensor waits after the last + * finger lifts before entering the doze state, in units of 100ms. + * @doze_interval - controls the interval between checks for finger presence + * when the touch sensor is in doze mode, in units of 10ms. + */ +struct rmi_f01_power_management { + enum rmi_f01_nosleep nosleep; + u8 wakeup_threshold; + u8 doze_holdoff; + u8 doze_interval; +}; + +/** + * struct rmi_device_platform_data_spi - provides parameters used in SPI + * communications. All Synaptics SPI products support a standard SPI + * interface; some also support what is called SPI V2 mode, depending on + * firmware and/or ASIC limitations. In V2 mode, the touch sensor can + * support shorter delays during certain operations, and these are specified + * separately from the standard mode delays. + * + * @block_delay - for standard SPI transactions consisting of both a read and + * write operation, the delay (in microseconds) between the read and write + * operations. + * @split_read_block_delay_us - for V2 SPI transactions consisting of both a + * read and write operation, the delay (in microseconds) between the read and + * write operations. + * @read_delay_us - the delay between each byte of a read operation in normal + * SPI mode. + * @write_delay_us - the delay between each byte of a write operation in normal + * SPI mode. + * @split_read_byte_delay_us - the delay between each byte of a read operation + * in V2 mode. + * @pre_delay_us - the delay before the start of a SPI transaction. This is + * typically useful in conjunction with custom chip select assertions (see + * below). + * @post_delay_us - the delay after the completion of an SPI transaction. This + * is typically useful in conjunction with custom chip select assertions (see + * below). + * @cs_assert - For systems where the SPI subsystem does not control the CS/SSB + * line, or where such control is broken, you can provide a custom routine to + * handle a GPIO as CS/SSB. This routine will be called at the beginning and + * end of each SPI transaction. The RMI SPI implementation will wait + * pre_delay_us after this routine returns before starting the SPI transfer; + * and post_delay_us after completion of the SPI transfer(s) before calling it + * with assert==FALSE. + */ +struct rmi_device_platform_data_spi { + u32 block_delay_us; + u32 split_read_block_delay_us; + u32 read_delay_us; + u32 write_delay_us; + u32 split_read_byte_delay_us; + u32 pre_delay_us; + u32 post_delay_us; + u8 bits_per_word; + u16 mode; + + void *cs_assert_data; + int (*cs_assert)(const void *cs_assert_data, const bool assert); +}; + +/** + * struct rmi_device_platform_data - system specific configuration info. + * + * @reset_delay_ms - after issuing a reset command to the touch sensor, the + * driver waits a few milliseconds to give the firmware a chance to + * to re-initialize. You can override the default wait period here. + */ +struct rmi_device_platform_data { + int reset_delay_ms; + + struct rmi_device_platform_data_spi spi_data; + + /* function handler pdata */ + struct rmi_2d_sensor_platform_data *sensor_pdata; + struct rmi_f01_power_management power_management; + struct rmi_f30_data *f30_data; +}; + +/** + * struct rmi_function_descriptor - RMI function base addresses + * + * @query_base_addr: The RMI Query base address + * @command_base_addr: The RMI Command base address + * @control_base_addr: The RMI Control base address + * @data_base_addr: The RMI Data base address + * @interrupt_source_count: The number of irqs this RMI function needs + * @function_number: The RMI function number + * + * This struct is used when iterating the Page Description Table. The addresses + * are 16-bit values to include the current page address. + * + */ +struct rmi_function_descriptor { + u16 query_base_addr; + u16 command_base_addr; + u16 control_base_addr; + u16 data_base_addr; + u8 interrupt_source_count; + u8 function_number; + u8 function_version; +}; + +struct rmi_device; + +/** + * struct rmi_transport_dev - represent an RMI transport device + * + * @dev: Pointer to the communication device, e.g. i2c or spi + * @rmi_dev: Pointer to the RMI device + * @proto_name: name of the transport protocol (SPI, i2c, etc) + * @ops: pointer to transport operations implementation + * + * The RMI transport device implements the glue between different communication + * buses such as I2C and SPI. + * + */ +struct rmi_transport_dev { + struct device *dev; + struct rmi_device *rmi_dev; + + const char *proto_name; + const struct rmi_transport_ops *ops; + + struct rmi_device_platform_data pdata; + + struct input_dev *input; + + void *attn_data; + int attn_size; +}; + +/** + * struct rmi_transport_ops - defines transport protocol operations. + * + * @write_block: Writing a block of data to the specified address + * @read_block: Read a block of data from the specified address. + */ +struct rmi_transport_ops { + int (*write_block)(struct rmi_transport_dev *xport, u16 addr, + const void *buf, size_t len); + int (*read_block)(struct rmi_transport_dev *xport, u16 addr, + void *buf, size_t len); + int (*reset)(struct rmi_transport_dev *xport, u16 reset_addr); +}; + +/** + * struct rmi_driver - driver for an RMI4 sensor on the RMI bus. + * + * @driver: Device driver model driver + * @reset_handler: Called when a reset is detected. + * @clear_irq_bits: Clear the specified bits in the current interrupt mask. + * @set_irq_bist: Set the specified bits in the current interrupt mask. + * @store_productid: Callback for cache product id from function 01 + * @data: Private data pointer + * + */ +struct rmi_driver { + struct device_driver driver; + + int (*reset_handler)(struct rmi_device *rmi_dev); + int (*clear_irq_bits)(struct rmi_device *rmi_dev, unsigned long *mask); + int (*set_irq_bits)(struct rmi_device *rmi_dev, unsigned long *mask); + int (*store_productid)(struct rmi_device *rmi_dev); + int (*set_input_params)(struct rmi_device *rmi_dev, + struct input_dev *input); + void *data; +}; + +/** + * struct rmi_device - represents an RMI4 sensor device on the RMI bus. + * + * @dev: The device created for the RMI bus + * @number: Unique number for the device on the bus. + * @driver: Pointer to associated driver + * @xport: Pointer to the transport interface + * + */ +struct rmi_device { + struct device dev; + int number; + + struct rmi_driver *driver; + struct rmi_transport_dev *xport; + +}; + +struct rmi_driver_data { + struct list_head function_list; + + struct rmi_device *rmi_dev; + + struct rmi_function *f01_container; + bool f01_bootloader_mode; + + u32 attn_count; + int num_of_irq_regs; + int irq_count; + unsigned long *irq_status; + unsigned long *fn_irq_bits; + unsigned long *current_irq_mask; + unsigned long *new_irq_mask; + struct mutex irq_mutex; + struct input_dev *input; + + u8 pdt_props; + u8 bsr; + + bool enabled; + + void *data; +}; + +int rmi_register_transport_device(struct rmi_transport_dev *xport); +void rmi_unregister_transport_device(struct rmi_transport_dev *xport); +int rmi_process_interrupt_requests(struct rmi_device *rmi_dev); + +int rmi_driver_suspend(struct rmi_device *rmi_dev); +int rmi_driver_resume(struct rmi_device *rmi_dev); +#endif diff --git a/include/linux/rotary_encoder.h b/include/linux/rotary_encoder.h deleted file mode 100644 index fe3dc64e5aeb..000000000000 --- a/include/linux/rotary_encoder.h +++ /dev/null @@ -1,17 +0,0 @@ -#ifndef __ROTARY_ENCODER_H__ -#define __ROTARY_ENCODER_H__ - -struct rotary_encoder_platform_data { - unsigned int steps; - unsigned int axis; - unsigned int gpio_a; - unsigned int gpio_b; - unsigned int inverted_a; - unsigned int inverted_b; - unsigned int steps_per_period; - bool relative_axis; - bool rollover; - bool wakeup_source; -}; - -#endif /* __ROTARY_ENCODER_H__ */ diff --git a/include/linux/rtc.h b/include/linux/rtc.h index 3359f0422c6b..b693adac853b 100644 --- a/include/linux/rtc.h +++ b/include/linux/rtc.h @@ -89,6 +89,8 @@ struct rtc_class_ops { int (*set_mmss)(struct device *, unsigned long secs); int (*read_callback)(struct device *, int data); int (*alarm_irq_enable)(struct device *, unsigned int enabled); + int (*read_offset)(struct device *, long *offset); + int (*set_offset)(struct device *, long offset); }; #define RTC_DEVICE_NAME_SIZE 20 @@ -208,6 +210,8 @@ void rtc_timer_init(struct rtc_timer *timer, void (*f)(void *p), void *data); int rtc_timer_start(struct rtc_device *rtc, struct rtc_timer *timer, ktime_t expires, ktime_t period); void rtc_timer_cancel(struct rtc_device *rtc, struct rtc_timer *timer); +int rtc_read_offset(struct rtc_device *rtc, long *offset); +int rtc_set_offset(struct rtc_device *rtc, long offset); void rtc_timer_do_work(struct work_struct *work); static inline bool is_leap_year(unsigned int year) diff --git a/include/linux/sched.h b/include/linux/sched.h index a10494a94cc3..084ed9fba620 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -182,8 +182,6 @@ extern void update_cpu_load_nohz(int active); static inline void update_cpu_load_nohz(int active) { } #endif -extern unsigned long get_parent_ip(unsigned long addr); - extern void dump_cpu_task(int cpu); struct seq_file; @@ -719,6 +717,10 @@ struct signal_struct { /* Earliest-expiration cache. */ struct task_cputime cputime_expires; +#ifdef CONFIG_NO_HZ_FULL + unsigned long tick_dep_mask; +#endif + struct list_head cpu_timers[3]; struct pid *tty_old_pgrp; @@ -775,7 +777,6 @@ struct signal_struct { #endif #ifdef CONFIG_AUDIT unsigned audit_tty; - unsigned audit_tty_log_passwd; struct tty_audit_buf *tty_audit_buf; #endif @@ -920,6 +921,10 @@ static inline int sched_info_on(void) #endif } +#ifdef CONFIG_SCHEDSTATS +void force_schedstat_enabled(void); +#endif + enum cpu_idle_type { CPU_IDLE, CPU_NOT_IDLE, @@ -1289,6 +1294,8 @@ struct sched_rt_entity { unsigned long timeout; unsigned long watchdog_stamp; unsigned int time_slice; + unsigned short on_rq; + unsigned short on_list; struct sched_rt_entity *back; #ifdef CONFIG_RT_GROUP_SCHED @@ -1329,10 +1336,6 @@ struct sched_dl_entity { * task has to wait for a replenishment to be performed at the * next firing of dl_timer. * - * @dl_new tells if a new instance arrived. If so we must - * start executing it with full runtime and reset its absolute - * deadline; - * * @dl_boosted tells if we are boosted due to DI. If so we are * outside bandwidth enforcement mechanism (but only until we * exit the critical section); @@ -1340,7 +1343,7 @@ struct sched_dl_entity { * @dl_yielded tells if task gave up the cpu before consuming * all its available runtime during the last job. */ - int dl_throttled, dl_new, dl_boosted, dl_yielded; + int dl_throttled, dl_boosted, dl_yielded; /* * Bandwidth enforcement timer. Each -deadline task has its @@ -1542,6 +1545,10 @@ struct task_struct { VTIME_SYS, } vtime_snap_whence; #endif + +#ifdef CONFIG_NO_HZ_FULL + unsigned long tick_dep_mask; +#endif unsigned long nvcsw, nivcsw; /* context switch counts */ u64 start_time; /* monotonic time in nsec */ u64 real_start_time; /* boot based time in nsec */ @@ -1784,8 +1791,8 @@ struct task_struct { * time slack values; these are used to round up poll() and * select() etc timeout values. These are in nanoseconds. */ - unsigned long timer_slack_ns; - unsigned long default_timer_slack_ns; + u64 timer_slack_ns; + u64 default_timer_slack_ns; #ifdef CONFIG_KASAN unsigned int kasan_depth; @@ -2356,10 +2363,7 @@ static inline void wake_up_nohz_cpu(int cpu) { } #endif #ifdef CONFIG_NO_HZ_FULL -extern bool sched_can_stop_tick(void); extern u64 scheduler_tick_max_deferment(void); -#else -static inline bool sched_can_stop_tick(void) { return false; } #endif #ifdef CONFIG_SCHED_AUTOGROUP @@ -3207,4 +3211,13 @@ static inline unsigned long rlimit_max(unsigned int limit) return task_rlimit_max(current, limit); } +#ifdef CONFIG_CPU_FREQ +struct update_util_data { + void (*func)(struct update_util_data *data, + u64 time, unsigned long util, unsigned long max); +}; + +void cpufreq_set_update_util_data(int cpu, struct update_util_data *data); +#endif /* CONFIG_CPU_FREQ */ + #endif diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h index c9e4731cf10b..22db1e63707e 100644 --- a/include/linux/sched/sysctl.h +++ b/include/linux/sched/sysctl.h @@ -14,27 +14,6 @@ extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write, enum { sysctl_hung_task_timeout_secs = 0 }; #endif -/* - * Default maximum number of active map areas, this limits the number of vmas - * per mm struct. Users can overwrite this number by sysctl but there is a - * problem. - * - * When a program's coredump is generated as ELF format, a section is created - * per a vma. In ELF, the number of sections is represented in unsigned short. - * This means the number of sections should be smaller than 65535 at coredump. - * Because the kernel adds some informative sections to a image of program at - * generating coredump, we need some margin. The number of extra sections is - * 1-3 now and depends on arch. We use "5" as safe margin, here. - * - * ELF extended numbering allows more than 65535 sections, so 16-bit bound is - * not a hard limit any more. Although some userspace tools can be surprised by - * that. - */ -#define MAPCOUNT_ELF_CORE_MARGIN (5) -#define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN) - -extern int sysctl_max_map_count; - extern unsigned int sysctl_sched_latency; extern unsigned int sysctl_sched_min_granularity; extern unsigned int sysctl_sched_wakeup_granularity; @@ -95,4 +74,8 @@ extern int sysctl_numa_balancing(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); +extern int sysctl_schedstats(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos); + #endif /* _SCHED_SYSCTL_H */ diff --git a/include/linux/security.h b/include/linux/security.h index 4824a4ccaf1c..157f0cb1e4d2 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -24,10 +24,12 @@ #include <linux/key.h> #include <linux/capability.h> +#include <linux/fs.h> #include <linux/slab.h> #include <linux/err.h> #include <linux/string.h> #include <linux/mm.h> +#include <linux/fs.h> struct linux_binprm; struct cred; @@ -298,9 +300,11 @@ int security_prepare_creds(struct cred *new, const struct cred *old, gfp_t gfp); void security_transfer_creds(struct cred *new, const struct cred *old); int security_kernel_act_as(struct cred *new, u32 secid); int security_kernel_create_files_as(struct cred *new, struct inode *inode); -int security_kernel_fw_from_file(struct file *file, char *buf, size_t size); int security_kernel_module_request(char *kmod_name); int security_kernel_module_from_file(struct file *file); +int security_kernel_read_file(struct file *file, enum kernel_read_file_id id); +int security_kernel_post_read_file(struct file *file, char *buf, loff_t size, + enum kernel_read_file_id id); int security_task_fix_setuid(struct cred *new, const struct cred *old, int flags); int security_task_setpgid(struct task_struct *p, pid_t pgid); @@ -850,18 +854,20 @@ static inline int security_kernel_create_files_as(struct cred *cred, return 0; } -static inline int security_kernel_fw_from_file(struct file *file, - char *buf, size_t size) +static inline int security_kernel_module_request(char *kmod_name) { return 0; } -static inline int security_kernel_module_request(char *kmod_name) +static inline int security_kernel_read_file(struct file *file, + enum kernel_read_file_id id) { return 0; } -static inline int security_kernel_module_from_file(struct file *file) +static inline int security_kernel_post_read_file(struct file *file, + char *buf, loff_t size, + enum kernel_read_file_id id) { return 0; } diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h index faa0e0370ce7..434879759725 100644 --- a/include/linux/serial_8250.h +++ b/include/linux/serial_8250.h @@ -76,6 +76,12 @@ struct uart_8250_ops { void (*release_irq)(struct uart_8250_port *); }; +struct uart_8250_em485 { + struct timer_list start_tx_timer; /* "rs485 start tx" timer */ + struct timer_list stop_tx_timer; /* "rs485 stop tx" timer */ + struct timer_list *active_timer; /* pointer to active timer */ +}; + /* * This should be used by drivers which want to register * their own 8250 ports without registering their own @@ -122,6 +128,8 @@ struct uart_8250_port { /* 8250 specific callbacks */ int (*dl_read)(struct uart_8250_port *); void (*dl_write)(struct uart_8250_port *, int); + + struct uart_8250_em485 *em485; }; static inline struct uart_8250_port *up_to_u8250p(struct uart_port *up) diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index e03d6ba5e5b4..cbfcf38e220d 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h @@ -342,21 +342,26 @@ struct earlycon_device { struct earlycon_id { char name[16]; + char compatible[128]; int (*setup)(struct earlycon_device *, const char *options); } __aligned(32); -extern int setup_earlycon(char *buf); -extern int of_setup_earlycon(unsigned long addr, - int (*setup)(struct earlycon_device *, const char *)); +extern const struct earlycon_id __earlycon_table[]; +extern const struct earlycon_id __earlycon_table_end[]; + +#define OF_EARLYCON_DECLARE(_name, compat, fn) \ + static const struct earlycon_id __UNIQUE_ID(__earlycon_##_name) \ + __used __section(__earlycon_table) \ + = { .name = __stringify(_name), \ + .compatible = compat, \ + .setup = fn } -#define EARLYCON_DECLARE(_name, func) \ - static const struct earlycon_id __earlycon_##_name \ - __used __section(__earlycon_table) \ - = { .name = __stringify(_name), \ - .setup = func } +#define EARLYCON_DECLARE(_name, fn) OF_EARLYCON_DECLARE(_name, "", fn) -#define OF_EARLYCON_DECLARE(name, compat, fn) \ - _OF_DECLARE(earlycon, name, compat, fn, void *) +extern int setup_earlycon(char *buf); +extern int of_setup_earlycon(const struct earlycon_id *match, + unsigned long node, + const char *options); struct uart_port *uart_get_console(struct uart_port *ports, int nr, struct console *c); diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 11f935c1a090..15d0df943466 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -299,6 +299,7 @@ struct sk_buff; #else #define MAX_SKB_FRAGS (65536/PAGE_SIZE + 1) #endif +extern int sysctl_max_skb_frags; typedef struct skb_frag_struct skb_frag_t; @@ -1160,10 +1161,6 @@ static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from) to->l4_hash = from->l4_hash; }; -static inline void skb_sender_cpu_clear(struct sk_buff *skb) -{ -} - #ifdef NET_SKBUFF_DATA_USES_OFFSET static inline unsigned char *skb_end_pointer(const struct sk_buff *skb) { @@ -1984,6 +1981,30 @@ static inline void skb_reserve(struct sk_buff *skb, int len) skb->tail += len; } +/** + * skb_tailroom_reserve - adjust reserved_tailroom + * @skb: buffer to alter + * @mtu: maximum amount of headlen permitted + * @needed_tailroom: minimum amount of reserved_tailroom + * + * Set reserved_tailroom so that headlen can be as large as possible but + * not larger than mtu and tailroom cannot be smaller than + * needed_tailroom. + * The required headroom should already have been reserved before using + * this function. + */ +static inline void skb_tailroom_reserve(struct sk_buff *skb, unsigned int mtu, + unsigned int needed_tailroom) +{ + SKB_LINEAR_ASSERT(skb); + if (mtu < skb_tailroom(skb) - needed_tailroom) + /* use at most mtu */ + skb->reserved_tailroom = skb_tailroom(skb) - mtu; + else + /* use up to all available space */ + skb->reserved_tailroom = needed_tailroom; +} + #define ENCAP_TYPE_ETHER 0 #define ENCAP_TYPE_IPPROTO 1 @@ -2161,6 +2182,11 @@ static inline int skb_checksum_start_offset(const struct sk_buff *skb) return skb->csum_start - skb_headroom(skb); } +static inline unsigned char *skb_checksum_start(const struct sk_buff *skb) +{ + return skb->head + skb->csum_start; +} + static inline int skb_transport_offset(const struct sk_buff *skb) { return skb_transport_header(skb) - skb->data; @@ -2399,6 +2425,10 @@ static inline struct sk_buff *napi_alloc_skb(struct napi_struct *napi, { return __napi_alloc_skb(napi, length, GFP_ATOMIC); } +void napi_consume_skb(struct sk_buff *skb, int budget); + +void __kfree_skb_flush(void); +void __kfree_skb_defer(struct sk_buff *skb); /** * __dev_alloc_pages - allocate page for network Rx @@ -2621,6 +2651,13 @@ static inline int skb_clone_writable(const struct sk_buff *skb, unsigned int len skb_headroom(skb) + len <= skb->hdr_len; } +static inline int skb_try_make_writable(struct sk_buff *skb, + unsigned int write_len) +{ + return skb_cloned(skb) && !skb_clone_writable(skb, write_len) && + pskb_expand_head(skb, 0, 0, GFP_ATOMIC); +} + static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom, int cloned) { @@ -3549,6 +3586,7 @@ static inline struct sec_path *skb_sec_path(struct sk_buff *skb) struct skb_gso_cb { int mac_offset; int encap_level; + __wsum csum; __u16 csum_start; }; #define SKB_SGO_CB_OFFSET 32 @@ -3575,6 +3613,16 @@ static inline int gso_pskb_expand_head(struct sk_buff *skb, int extra) return 0; } +static inline void gso_reset_checksum(struct sk_buff *skb, __wsum res) +{ + /* Do not update partial checksums if remote checksum is enabled. */ + if (skb->remcsum_offload) + return; + + SKB_GSO_CB(skb)->csum = res; + SKB_GSO_CB(skb)->csum_start = skb_checksum_start(skb) - skb->head; +} + /* Compute the checksum for a gso segment. First compute the checksum value * from the start of transport header to SKB_GSO_CB(skb)->csum_start, and * then add in skb->csum (checksum from csum_start to end of packet). @@ -3585,15 +3633,14 @@ static inline int gso_pskb_expand_head(struct sk_buff *skb, int extra) */ static inline __sum16 gso_make_checksum(struct sk_buff *skb, __wsum res) { - int plen = SKB_GSO_CB(skb)->csum_start - skb_headroom(skb) - - skb_transport_offset(skb); - __wsum partial; + unsigned char *csum_start = skb_transport_header(skb); + int plen = (skb->head + SKB_GSO_CB(skb)->csum_start) - csum_start; + __wsum partial = SKB_GSO_CB(skb)->csum; - partial = csum_partial(skb_transport_header(skb), plen, skb->csum); - skb->csum = res; - SKB_GSO_CB(skb)->csum_start -= plen; + SKB_GSO_CB(skb)->csum = res; + SKB_GSO_CB(skb)->csum_start = csum_start - skb->head; - return csum_fold(partial); + return csum_fold(csum_partial(csum_start, plen, partial)); } static inline bool skb_is_gso(const struct sk_buff *skb) @@ -3683,5 +3730,30 @@ static inline unsigned int skb_gso_network_seglen(const struct sk_buff *skb) return hdr_len + skb_gso_transport_seglen(skb); } +/* Local Checksum Offload. + * Compute outer checksum based on the assumption that the + * inner checksum will be offloaded later. + * See Documentation/networking/checksum-offloads.txt for + * explanation of how this works. + * Fill in outer checksum adjustment (e.g. with sum of outer + * pseudo-header) before calling. + * Also ensure that inner checksum is in linear data area. + */ +static inline __wsum lco_csum(struct sk_buff *skb) +{ + unsigned char *csum_start = skb_checksum_start(skb); + unsigned char *l4_hdr = skb_transport_header(skb); + __wsum partial; + + /* Start with complement of inner checksum adjustment */ + partial = ~csum_unfold(*(__force __sum16 *)(csum_start + + skb->csum_offset)); + + /* Add in checksum of our headers (incl. outer checksum + * adjustment filled in by caller) and return result. + */ + return csum_partial(l4_hdr, csum_start - l4_hdr, partial); +} + #endif /* __KERNEL__ */ #endif /* _LINUX_SKBUFF_H */ diff --git a/include/linux/slab.h b/include/linux/slab.h index 3627d5c1bc47..e4b568738ca3 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -20,7 +20,7 @@ * Flags to pass to kmem_cache_create(). * The ones marked DEBUG are only valid if CONFIG_DEBUG_SLAB is set. */ -#define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */ +#define SLAB_CONSISTENCY_CHECKS 0x00000100UL /* DEBUG: Perform (expensive) checks on alloc/free */ #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */ #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */ #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */ @@ -314,7 +314,7 @@ void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags) __assume_slab_alignment void kmem_cache_free(struct kmem_cache *, void *); /* - * Bulk allocation and freeing operations. These are accellerated in an + * Bulk allocation and freeing operations. These are accelerated in an * allocator specific way to avoid taking locks repeatedly or building * metadata structures unnecessarily. * @@ -323,6 +323,15 @@ void kmem_cache_free(struct kmem_cache *, void *); void kmem_cache_free_bulk(struct kmem_cache *, size_t, void **); int kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **); +/* + * Caller must not use kfree_bulk() on memory not originally allocated + * by kmalloc(), because the SLOB allocator cannot handle this. + */ +static __always_inline void kfree_bulk(size_t size, void **p) +{ + kmem_cache_free_bulk(NULL, size, p); +} + #ifdef CONFIG_NUMA void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment; void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node) __assume_slab_alignment; diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h index cf139d3fa513..e878ba35ae91 100644 --- a/include/linux/slab_def.h +++ b/include/linux/slab_def.h @@ -60,6 +60,9 @@ struct kmem_cache { atomic_t allocmiss; atomic_t freehit; atomic_t freemiss; +#ifdef CONFIG_DEBUG_SLAB_LEAK + atomic_t store_user_clean; +#endif /* * If debugging is enabled, then the allocator can add additional diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index b7e57927f521..ac5143f95ee6 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h @@ -81,6 +81,7 @@ struct kmem_cache { int reserved; /* Reserved bytes at the end of slabs */ const char *name; /* Name (only for display!) */ struct list_head list; /* List of slab caches */ + int red_left_pad; /* Left redzone padding size */ #ifdef CONFIG_SYSFS struct kobject kobj; /* For sysfs */ #endif diff --git a/include/linux/soc/ti/knav_dma.h b/include/linux/soc/ti/knav_dma.h index 343c13ac4f71..35cb9264e0d5 100644 --- a/include/linux/soc/ti/knav_dma.h +++ b/include/linux/soc/ti/knav_dma.h @@ -44,6 +44,7 @@ #define KNAV_DMA_NUM_EPIB_WORDS 4 #define KNAV_DMA_NUM_PS_WORDS 16 +#define KNAV_DMA_NUM_SW_DATA_WORDS 4 #define KNAV_DMA_FDQ_PER_CHAN 4 /* Tx channel scheduling priority */ @@ -142,6 +143,7 @@ struct knav_dma_cfg { * @orig_buff: buff pointer since 'buff' can be overwritten * @epib: Extended packet info block * @psdata: Protocol specific + * @sw_data: Software private data not touched by h/w */ struct knav_dma_desc { __le32 desc_info; @@ -154,7 +156,7 @@ struct knav_dma_desc { __le32 orig_buff; __le32 epib[KNAV_DMA_NUM_EPIB_WORDS]; __le32 psdata[KNAV_DMA_NUM_PS_WORDS]; - __le32 pad[4]; + u32 sw_data[KNAV_DMA_NUM_SW_DATA_WORDS]; } ____cacheline_aligned; #if IS_ENABLED(CONFIG_KEYSTONE_NAVIGATOR_DMA) diff --git a/include/linux/socket.h b/include/linux/socket.h index 5bf59c8493b7..73bf6c6a833b 100644 --- a/include/linux/socket.h +++ b/include/linux/socket.h @@ -200,7 +200,9 @@ struct ucred { #define AF_ALG 38 /* Algorithm sockets */ #define AF_NFC 39 /* NFC sockets */ #define AF_VSOCK 40 /* vSockets */ -#define AF_MAX 41 /* For now.. */ +#define AF_KCM 41 /* Kernel Connection Multiplexor*/ + +#define AF_MAX 42 /* For now.. */ /* Protocol families, same as address families. */ #define PF_UNSPEC AF_UNSPEC @@ -246,6 +248,7 @@ struct ucred { #define PF_ALG AF_ALG #define PF_NFC AF_NFC #define PF_VSOCK AF_VSOCK +#define PF_KCM AF_KCM #define PF_MAX AF_MAX /* Maximum queue length specifiable by listen. */ @@ -274,6 +277,7 @@ struct ucred { #define MSG_MORE 0x8000 /* Sender will send more */ #define MSG_WAITFORONE 0x10000 /* recvmmsg(): block until 1+ packets avail */ #define MSG_SENDPAGE_NOTLAST 0x20000 /* sendpage() internal : not the last page */ +#define MSG_BATCH 0x40000 /* sendmmsg(): more messages coming */ #define MSG_EOF MSG_FIN #define MSG_FASTOPEN 0x20000000 /* Send data in TCP SYN */ @@ -322,6 +326,7 @@ struct ucred { #define SOL_CAIF 278 #define SOL_ALG 279 #define SOL_NFC 280 +#define SOL_KCM 281 /* IPX options */ #define IPX_TYPE 1 diff --git a/include/linux/spi/eeprom.h b/include/linux/spi/eeprom.h index 403e007aef68..e34e169f9dcb 100644 --- a/include/linux/spi/eeprom.h +++ b/include/linux/spi/eeprom.h @@ -30,8 +30,6 @@ struct spi_eeprom { */ #define EE_INSTR_BIT3_IS_ADDR 0x0010 - /* for exporting this chip's data to other kernel code */ - void (*setup)(struct memory_accessor *mem, void *context); void *context; }; diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index 53be3a4c60cb..857a9a1d82b5 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -25,6 +25,7 @@ struct dma_chan; struct spi_master; struct spi_transfer; +struct spi_flash_read_message; /* * INTERFACES between SPI master-side drivers and SPI infrastructure. @@ -53,6 +54,10 @@ extern struct bus_type spi_bus_type; * * @transfer_bytes_histo: * transfer bytes histogramm + * + * @transfers_split_maxsize: + * number of transfers that have been split because of + * maxsize limit */ struct spi_statistics { spinlock_t lock; /* lock for the whole structure */ @@ -72,6 +77,8 @@ struct spi_statistics { #define SPI_STATISTICS_HISTO_SIZE 17 unsigned long transfer_bytes_histo[SPI_STATISTICS_HISTO_SIZE]; + + unsigned long transfers_split_maxsize; }; void spi_statistics_add_transfer_stats(struct spi_statistics *stats, @@ -303,6 +310,8 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv) * @min_speed_hz: Lowest supported transfer speed * @max_speed_hz: Highest supported transfer speed * @flags: other constraints relevant to this driver + * @max_transfer_size: function that returns the max transfer size for + * a &spi_device; may be %NULL, so the default %SIZE_MAX will be used. * @bus_lock_spinlock: spinlock for SPI bus locking * @bus_lock_mutex: mutex for SPI bus locking * @bus_lock_flag: indicates that the SPI bus is locked for exclusive use @@ -361,6 +370,8 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv) * @handle_err: the subsystem calls the driver to handle an error that occurs * in the generic implementation of transfer_one_message(). * @unprepare_message: undo any work done by prepare_message(). + * @spi_flash_read: to support spi-controller hardwares that provide + * accelerated interface to read from flash devices. * @cs_gpios: Array of GPIOs to use as chip select lines; one per CS * number. Any individual value may be -ENOENT for CS lines that * are not GPIOs (driven by the SPI controller itself). @@ -369,6 +380,9 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv) * @dma_rx: DMA receive channel * @dummy_rx: dummy receive buffer for full-duplex devices * @dummy_tx: dummy transmit buffer for full-duplex devices + * @fw_translate_cs: If the boot firmware uses different numbering scheme + * what Linux expects, this optional hook can be used to translate + * between the two. * * Each SPI master controller can communicate with one or more @spi_device * children. These make a small bus, sharing MOSI, MISO and SCK signals @@ -513,6 +527,8 @@ struct spi_master { struct spi_message *message); int (*unprepare_message)(struct spi_master *master, struct spi_message *message); + int (*spi_flash_read)(struct spi_device *spi, + struct spi_flash_read_message *msg); /* * These hooks are for drivers that use a generic implementation @@ -537,6 +553,8 @@ struct spi_master { /* dummy data for full duplex devices */ void *dummy_rx; void *dummy_tx; + + int (*fw_translate_cs)(struct spi_master *master, unsigned cs); }; static inline void *spi_master_get_devdata(struct spi_master *master) @@ -582,6 +600,38 @@ extern void spi_unregister_master(struct spi_master *master); extern struct spi_master *spi_busnum_to_master(u16 busnum); +/* + * SPI resource management while processing a SPI message + */ + +typedef void (*spi_res_release_t)(struct spi_master *master, + struct spi_message *msg, + void *res); + +/** + * struct spi_res - spi resource management structure + * @entry: list entry + * @release: release code called prior to freeing this resource + * @data: extra data allocated for the specific use-case + * + * this is based on ideas from devres, but focused on life-cycle + * management during spi_message processing + */ +struct spi_res { + struct list_head entry; + spi_res_release_t release; + unsigned long long data[]; /* guarantee ull alignment */ +}; + +extern void *spi_res_alloc(struct spi_device *spi, + spi_res_release_t release, + size_t size, gfp_t gfp); +extern void spi_res_add(struct spi_message *message, void *res); +extern void spi_res_free(void *res); + +extern void spi_res_release(struct spi_master *master, + struct spi_message *message); + /*---------------------------------------------------------------------------*/ /* @@ -720,6 +770,7 @@ struct spi_transfer { * @status: zero for success, else negative errno * @queue: for use by whichever driver currently owns the message * @state: for use by whichever driver currently owns the message + * @resources: for resource management when the spi message is processed * * A @spi_message is used to execute an atomic sequence of data transfers, * each represented by a struct spi_transfer. The sequence is "atomic" @@ -766,11 +817,15 @@ struct spi_message { */ struct list_head queue; void *state; + + /* list of spi_res reources when the spi message is processed */ + struct list_head resources; }; static inline void spi_message_init_no_memset(struct spi_message *m) { INIT_LIST_HEAD(&m->transfers); + INIT_LIST_HEAD(&m->resources); } static inline void spi_message_init(struct spi_message *m) @@ -854,6 +909,60 @@ spi_max_transfer_size(struct spi_device *spi) /*---------------------------------------------------------------------------*/ +/* SPI transfer replacement methods which make use of spi_res */ + +struct spi_replaced_transfers; +typedef void (*spi_replaced_release_t)(struct spi_master *master, + struct spi_message *msg, + struct spi_replaced_transfers *res); +/** + * struct spi_replaced_transfers - structure describing the spi_transfer + * replacements that have occurred + * so that they can get reverted + * @release: some extra release code to get executed prior to + * relasing this structure + * @extradata: pointer to some extra data if requested or NULL + * @replaced_transfers: transfers that have been replaced and which need + * to get restored + * @replaced_after: the transfer after which the @replaced_transfers + * are to get re-inserted + * @inserted: number of transfers inserted + * @inserted_transfers: array of spi_transfers of array-size @inserted, + * that have been replacing replaced_transfers + * + * note: that @extradata will point to @inserted_transfers[@inserted] + * if some extra allocation is requested, so alignment will be the same + * as for spi_transfers + */ +struct spi_replaced_transfers { + spi_replaced_release_t release; + void *extradata; + struct list_head replaced_transfers; + struct list_head *replaced_after; + size_t inserted; + struct spi_transfer inserted_transfers[]; +}; + +extern struct spi_replaced_transfers *spi_replace_transfers( + struct spi_message *msg, + struct spi_transfer *xfer_first, + size_t remove, + size_t insert, + spi_replaced_release_t release, + size_t extradatasize, + gfp_t gfp); + +/*---------------------------------------------------------------------------*/ + +/* SPI transfer transformation methods */ + +extern int spi_split_transfers_maxsize(struct spi_master *master, + struct spi_message *msg, + size_t maxsize, + gfp_t gfp); + +/*---------------------------------------------------------------------------*/ + /* All these synchronous SPI transfer routines are utilities layered * over the core async transfer primitive. Here, "synchronous" means * they will sleep uninterruptibly until the async transfer completes. @@ -1019,6 +1128,42 @@ static inline ssize_t spi_w8r16be(struct spi_device *spi, u8 cmd) return be16_to_cpu(result); } +/** + * struct spi_flash_read_message - flash specific information for + * spi-masters that provide accelerated flash read interfaces + * @buf: buffer to read data + * @from: offset within the flash from where data is to be read + * @len: length of data to be read + * @retlen: actual length of data read + * @read_opcode: read_opcode to be used to communicate with flash + * @addr_width: number of address bytes + * @dummy_bytes: number of dummy bytes + * @opcode_nbits: number of lines to send opcode + * @addr_nbits: number of lines to send address + * @data_nbits: number of lines for data + */ +struct spi_flash_read_message { + void *buf; + loff_t from; + size_t len; + size_t retlen; + u8 read_opcode; + u8 addr_width; + u8 dummy_bytes; + u8 opcode_nbits; + u8 addr_nbits; + u8 data_nbits; +}; + +/* SPI core interface for flash read support */ +static inline bool spi_flash_read_supported(struct spi_device *spi) +{ + return spi->master->spi_flash_read ? true : false; +} + +int spi_flash_read(struct spi_device *spi, + struct spi_flash_read_message *msg); + /*---------------------------------------------------------------------------*/ /* diff --git a/include/linux/srcu.h b/include/linux/srcu.h index f5f80c5643ac..dc8eb63c6568 100644 --- a/include/linux/srcu.h +++ b/include/linux/srcu.h @@ -99,8 +99,23 @@ void process_srcu(struct work_struct *work); } /* - * define and init a srcu struct at build time. - * dont't call init_srcu_struct() nor cleanup_srcu_struct() on it. + * Define and initialize a srcu struct at build time. + * Do -not- call init_srcu_struct() nor cleanup_srcu_struct() on it. + * + * Note that although DEFINE_STATIC_SRCU() hides the name from other + * files, the per-CPU variable rules nevertheless require that the + * chosen name be globally unique. These rules also prohibit use of + * DEFINE_STATIC_SRCU() within a function. If these rules are too + * restrictive, declare the srcu_struct manually. For example, in + * each file: + * + * static struct srcu_struct my_srcu; + * + * Then, before the first use of each my_srcu, manually initialize it: + * + * init_srcu_struct(&my_srcu); + * + * See include/linux/percpu-defs.h for the rules on per-CPU variables. */ #define __DEFINE_SRCU(name, is_static) \ static DEFINE_PER_CPU(struct srcu_struct_array, name##_srcu_array);\ diff --git a/include/linux/stm.h b/include/linux/stm.h index 9d0083d364e6..1a79ed8e43da 100644 --- a/include/linux/stm.h +++ b/include/linux/stm.h @@ -67,6 +67,16 @@ struct stm_device; * description. That is, the lowest master that can be allocated to software * writers is @sw_start and data from this writer will appear is @sw_start * master in the STP stream. + * + * The @packet callback should adhere to the following rules: + * 1) it must return the number of bytes it consumed from the payload; + * 2) therefore, if it sent a packet that does not have payload (like FLAG), + * it must return zero; + * 3) if it does not support the requested packet type/flag combination, + * it must return -ENOTSUPP. + * + * The @unlink callback is called when there are no more active writers so + * that the master/channel can be quiesced. */ struct stm_data { const char *name; diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h index eead8ab93c0a..4bcf5a61aada 100644 --- a/include/linux/stmmac.h +++ b/include/linux/stmmac.h @@ -90,7 +90,21 @@ struct stmmac_dma_cfg { int pbl; int fixed_burst; int mixed_burst; - int burst_len; + bool aal; +}; + +#define AXI_BLEN 7 +struct stmmac_axi { + bool axi_lpi_en; + bool axi_xit_frm; + u32 axi_wr_osr_lmt; + u32 axi_rd_osr_lmt; + bool axi_kbbe; + bool axi_axi_all; + u32 axi_blen[AXI_BLEN]; + bool axi_fb; + bool axi_mb; + bool axi_rb; }; struct plat_stmmacenet_data { @@ -100,6 +114,7 @@ struct plat_stmmacenet_data { int interface; struct stmmac_mdio_bus_data *mdio_bus_data; struct device_node *phy_node; + struct device_node *mdio_node; struct stmmac_dma_cfg *dma_cfg; int clk_csr; int has_gmac; @@ -122,5 +137,6 @@ struct plat_stmmacenet_data { int (*init)(struct platform_device *pdev, void *priv); void (*exit)(struct platform_device *pdev, void *priv); void *bsp_priv; + struct stmmac_axi *axi; }; #endif diff --git a/include/linux/string.h b/include/linux/string.h index 9eebc66d957a..d3993a79a325 100644 --- a/include/linux/string.h +++ b/include/linux/string.h @@ -128,7 +128,13 @@ extern char **argv_split(gfp_t gfp, const char *str, int *argcp); extern void argv_free(char **argv); extern bool sysfs_streq(const char *s1, const char *s2); -extern int strtobool(const char *s, bool *res); +extern int kstrtobool(const char *s, bool *res); +static inline int strtobool(const char *s, bool *res) +{ + return kstrtobool(s, res); +} + +int match_string(const char * const *array, size_t n, const char *string); #ifdef CONFIG_BINARY_PRINTF int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args); diff --git a/include/linux/sunrpc/gss_krb5.h b/include/linux/sunrpc/gss_krb5.h index df02a4188487..7df625d41e35 100644 --- a/include/linux/sunrpc/gss_krb5.h +++ b/include/linux/sunrpc/gss_krb5.h @@ -36,7 +36,7 @@ * */ -#include <linux/crypto.h> +#include <crypto/skcipher.h> #include <linux/sunrpc/auth_gss.h> #include <linux/sunrpc/gss_err.h> #include <linux/sunrpc/gss_asn1.h> @@ -71,10 +71,10 @@ struct gss_krb5_enctype { const u32 keyed_cksum; /* is it a keyed cksum? */ const u32 keybytes; /* raw key len, in bytes */ const u32 keylength; /* final key len, in bytes */ - u32 (*encrypt) (struct crypto_blkcipher *tfm, + u32 (*encrypt) (struct crypto_skcipher *tfm, void *iv, void *in, void *out, int length); /* encryption function */ - u32 (*decrypt) (struct crypto_blkcipher *tfm, + u32 (*decrypt) (struct crypto_skcipher *tfm, void *iv, void *in, void *out, int length); /* decryption function */ u32 (*mk_key) (const struct gss_krb5_enctype *gk5e, @@ -98,12 +98,12 @@ struct krb5_ctx { u32 enctype; u32 flags; const struct gss_krb5_enctype *gk5e; /* enctype-specific info */ - struct crypto_blkcipher *enc; - struct crypto_blkcipher *seq; - struct crypto_blkcipher *acceptor_enc; - struct crypto_blkcipher *initiator_enc; - struct crypto_blkcipher *acceptor_enc_aux; - struct crypto_blkcipher *initiator_enc_aux; + struct crypto_skcipher *enc; + struct crypto_skcipher *seq; + struct crypto_skcipher *acceptor_enc; + struct crypto_skcipher *initiator_enc; + struct crypto_skcipher *acceptor_enc_aux; + struct crypto_skcipher *initiator_enc_aux; u8 Ksess[GSS_KRB5_MAX_KEYLEN]; /* session key */ u8 cksum[GSS_KRB5_MAX_KEYLEN]; s32 endtime; @@ -262,24 +262,24 @@ gss_unwrap_kerberos(struct gss_ctx *ctx_id, int offset, u32 -krb5_encrypt(struct crypto_blkcipher *key, +krb5_encrypt(struct crypto_skcipher *key, void *iv, void *in, void *out, int length); u32 -krb5_decrypt(struct crypto_blkcipher *key, +krb5_decrypt(struct crypto_skcipher *key, void *iv, void *in, void *out, int length); int -gss_encrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *outbuf, +gss_encrypt_xdr_buf(struct crypto_skcipher *tfm, struct xdr_buf *outbuf, int offset, struct page **pages); int -gss_decrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *inbuf, +gss_decrypt_xdr_buf(struct crypto_skcipher *tfm, struct xdr_buf *inbuf, int offset); s32 krb5_make_seq_num(struct krb5_ctx *kctx, - struct crypto_blkcipher *key, + struct crypto_skcipher *key, int direction, u32 seqnum, unsigned char *cksum, unsigned char *buf); @@ -320,12 +320,12 @@ gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, int krb5_rc4_setup_seq_key(struct krb5_ctx *kctx, - struct crypto_blkcipher *cipher, + struct crypto_skcipher *cipher, unsigned char *cksum); int krb5_rc4_setup_enc_key(struct krb5_ctx *kctx, - struct crypto_blkcipher *cipher, + struct crypto_skcipher *cipher, s32 seqnum); void gss_krb5_make_confounder(char *p, u32 conflen); diff --git a/include/linux/swait.h b/include/linux/swait.h new file mode 100644 index 000000000000..c1f9c62a8a50 --- /dev/null +++ b/include/linux/swait.h @@ -0,0 +1,172 @@ +#ifndef _LINUX_SWAIT_H +#define _LINUX_SWAIT_H + +#include <linux/list.h> +#include <linux/stddef.h> +#include <linux/spinlock.h> +#include <asm/current.h> + +/* + * Simple wait queues + * + * While these are very similar to the other/complex wait queues (wait.h) the + * most important difference is that the simple waitqueue allows for + * deterministic behaviour -- IOW it has strictly bounded IRQ and lock hold + * times. + * + * In order to make this so, we had to drop a fair number of features of the + * other waitqueue code; notably: + * + * - mixing INTERRUPTIBLE and UNINTERRUPTIBLE sleeps on the same waitqueue; + * all wakeups are TASK_NORMAL in order to avoid O(n) lookups for the right + * sleeper state. + * + * - the exclusive mode; because this requires preserving the list order + * and this is hard. + * + * - custom wake functions; because you cannot give any guarantees about + * random code. + * + * As a side effect of this; the data structures are slimmer. + * + * One would recommend using this wait queue where possible. + */ + +struct task_struct; + +struct swait_queue_head { + raw_spinlock_t lock; + struct list_head task_list; +}; + +struct swait_queue { + struct task_struct *task; + struct list_head task_list; +}; + +#define __SWAITQUEUE_INITIALIZER(name) { \ + .task = current, \ + .task_list = LIST_HEAD_INIT((name).task_list), \ +} + +#define DECLARE_SWAITQUEUE(name) \ + struct swait_queue name = __SWAITQUEUE_INITIALIZER(name) + +#define __SWAIT_QUEUE_HEAD_INITIALIZER(name) { \ + .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \ + .task_list = LIST_HEAD_INIT((name).task_list), \ +} + +#define DECLARE_SWAIT_QUEUE_HEAD(name) \ + struct swait_queue_head name = __SWAIT_QUEUE_HEAD_INITIALIZER(name) + +extern void __init_swait_queue_head(struct swait_queue_head *q, const char *name, + struct lock_class_key *key); + +#define init_swait_queue_head(q) \ + do { \ + static struct lock_class_key __key; \ + __init_swait_queue_head((q), #q, &__key); \ + } while (0) + +#ifdef CONFIG_LOCKDEP +# define __SWAIT_QUEUE_HEAD_INIT_ONSTACK(name) \ + ({ init_swait_queue_head(&name); name; }) +# define DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(name) \ + struct swait_queue_head name = __SWAIT_QUEUE_HEAD_INIT_ONSTACK(name) +#else +# define DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(name) \ + DECLARE_SWAIT_QUEUE_HEAD(name) +#endif + +static inline int swait_active(struct swait_queue_head *q) +{ + return !list_empty(&q->task_list); +} + +extern void swake_up(struct swait_queue_head *q); +extern void swake_up_all(struct swait_queue_head *q); +extern void swake_up_locked(struct swait_queue_head *q); + +extern void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait); +extern void prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait, int state); +extern long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait, int state); + +extern void __finish_swait(struct swait_queue_head *q, struct swait_queue *wait); +extern void finish_swait(struct swait_queue_head *q, struct swait_queue *wait); + +/* as per ___wait_event() but for swait, therefore "exclusive == 0" */ +#define ___swait_event(wq, condition, state, ret, cmd) \ +({ \ + struct swait_queue __wait; \ + long __ret = ret; \ + \ + INIT_LIST_HEAD(&__wait.task_list); \ + for (;;) { \ + long __int = prepare_to_swait_event(&wq, &__wait, state);\ + \ + if (condition) \ + break; \ + \ + if (___wait_is_interruptible(state) && __int) { \ + __ret = __int; \ + break; \ + } \ + \ + cmd; \ + } \ + finish_swait(&wq, &__wait); \ + __ret; \ +}) + +#define __swait_event(wq, condition) \ + (void)___swait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, \ + schedule()) + +#define swait_event(wq, condition) \ +do { \ + if (condition) \ + break; \ + __swait_event(wq, condition); \ +} while (0) + +#define __swait_event_timeout(wq, condition, timeout) \ + ___swait_event(wq, ___wait_cond_timeout(condition), \ + TASK_UNINTERRUPTIBLE, timeout, \ + __ret = schedule_timeout(__ret)) + +#define swait_event_timeout(wq, condition, timeout) \ +({ \ + long __ret = timeout; \ + if (!___wait_cond_timeout(condition)) \ + __ret = __swait_event_timeout(wq, condition, timeout); \ + __ret; \ +}) + +#define __swait_event_interruptible(wq, condition) \ + ___swait_event(wq, condition, TASK_INTERRUPTIBLE, 0, \ + schedule()) + +#define swait_event_interruptible(wq, condition) \ +({ \ + int __ret = 0; \ + if (!(condition)) \ + __ret = __swait_event_interruptible(wq, condition); \ + __ret; \ +}) + +#define __swait_event_interruptible_timeout(wq, condition, timeout) \ + ___swait_event(wq, ___wait_cond_timeout(condition), \ + TASK_INTERRUPTIBLE, timeout, \ + __ret = schedule_timeout(__ret)) + +#define swait_event_interruptible_timeout(wq, condition, timeout) \ +({ \ + long __ret = timeout; \ + if (!___wait_cond_timeout(condition)) \ + __ret = __swait_event_interruptible_timeout(wq, \ + condition, timeout); \ + __ret; \ +}) + +#endif /* _LINUX_SWAIT_H */ diff --git a/include/linux/tcp.h b/include/linux/tcp.h index b386361ba3e8..7be9b1242354 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -29,9 +29,14 @@ static inline struct tcphdr *tcp_hdr(const struct sk_buff *skb) return (struct tcphdr *)skb_transport_header(skb); } +static inline unsigned int __tcp_hdrlen(const struct tcphdr *th) +{ + return th->doff * 4; +} + static inline unsigned int tcp_hdrlen(const struct sk_buff *skb) { - return tcp_hdr(skb)->doff * 4; + return __tcp_hdrlen(tcp_hdr(skb)); } static inline struct tcphdr *inner_tcp_hdr(const struct sk_buff *skb) @@ -153,6 +158,9 @@ struct tcp_sock { u32 segs_in; /* RFC4898 tcpEStatsPerfSegsIn * total number of segments in. */ + u32 data_segs_in; /* RFC4898 tcpEStatsPerfDataSegsIn + * total number of data segments in. + */ u32 rcv_nxt; /* What we want to receive next */ u32 copied_seq; /* Head of yet unread data */ u32 rcv_wup; /* rcv_nxt on last window update sent */ @@ -160,6 +168,9 @@ struct tcp_sock { u32 segs_out; /* RFC4898 tcpEStatsPerfSegsOut * The total number of segments sent. */ + u32 data_segs_out; /* RFC4898 tcpEStatsPerfDataSegsOut + * total number of data segments sent. + */ u64 bytes_acked; /* RFC4898 tcpEStatsAppHCThruOctetsAcked * sum(delta(snd_una)), or how many bytes * were acked. @@ -256,6 +267,7 @@ struct tcp_sock { u32 prr_delivered; /* Number of newly delivered packets to * receiver in Recovery. */ u32 prr_out; /* Total number of pkts sent during Recovery. */ + u32 delivered; /* Total data packets delivered incl. rexmits */ u32 rcv_wnd; /* Current receiver window */ u32 write_seq; /* Tail(+1) of data held in tcp send buffer */ diff --git a/include/linux/tick.h b/include/linux/tick.h index 97fd4e543846..62be0786d6d0 100644 --- a/include/linux/tick.h +++ b/include/linux/tick.h @@ -97,8 +97,21 @@ static inline void tick_broadcast_exit(void) tick_broadcast_oneshot_control(TICK_BROADCAST_EXIT); } +enum tick_dep_bits { + TICK_DEP_BIT_POSIX_TIMER = 0, + TICK_DEP_BIT_PERF_EVENTS = 1, + TICK_DEP_BIT_SCHED = 2, + TICK_DEP_BIT_CLOCK_UNSTABLE = 3 +}; + +#define TICK_DEP_MASK_NONE 0 +#define TICK_DEP_MASK_POSIX_TIMER (1 << TICK_DEP_BIT_POSIX_TIMER) +#define TICK_DEP_MASK_PERF_EVENTS (1 << TICK_DEP_BIT_PERF_EVENTS) +#define TICK_DEP_MASK_SCHED (1 << TICK_DEP_BIT_SCHED) +#define TICK_DEP_MASK_CLOCK_UNSTABLE (1 << TICK_DEP_BIT_CLOCK_UNSTABLE) + #ifdef CONFIG_NO_HZ_COMMON -extern int tick_nohz_enabled; +extern bool tick_nohz_enabled; extern int tick_nohz_tick_stopped(void); extern void tick_nohz_idle_enter(void); extern void tick_nohz_idle_exit(void); @@ -154,9 +167,73 @@ static inline int housekeeping_any_cpu(void) return cpumask_any_and(housekeeping_mask, cpu_online_mask); } -extern void tick_nohz_full_kick(void); +extern void tick_nohz_dep_set(enum tick_dep_bits bit); +extern void tick_nohz_dep_clear(enum tick_dep_bits bit); +extern void tick_nohz_dep_set_cpu(int cpu, enum tick_dep_bits bit); +extern void tick_nohz_dep_clear_cpu(int cpu, enum tick_dep_bits bit); +extern void tick_nohz_dep_set_task(struct task_struct *tsk, + enum tick_dep_bits bit); +extern void tick_nohz_dep_clear_task(struct task_struct *tsk, + enum tick_dep_bits bit); +extern void tick_nohz_dep_set_signal(struct signal_struct *signal, + enum tick_dep_bits bit); +extern void tick_nohz_dep_clear_signal(struct signal_struct *signal, + enum tick_dep_bits bit); + +/* + * The below are tick_nohz_[set,clear]_dep() wrappers that optimize off-cases + * on top of static keys. + */ +static inline void tick_dep_set(enum tick_dep_bits bit) +{ + if (tick_nohz_full_enabled()) + tick_nohz_dep_set(bit); +} + +static inline void tick_dep_clear(enum tick_dep_bits bit) +{ + if (tick_nohz_full_enabled()) + tick_nohz_dep_clear(bit); +} + +static inline void tick_dep_set_cpu(int cpu, enum tick_dep_bits bit) +{ + if (tick_nohz_full_cpu(cpu)) + tick_nohz_dep_set_cpu(cpu, bit); +} + +static inline void tick_dep_clear_cpu(int cpu, enum tick_dep_bits bit) +{ + if (tick_nohz_full_cpu(cpu)) + tick_nohz_dep_clear_cpu(cpu, bit); +} + +static inline void tick_dep_set_task(struct task_struct *tsk, + enum tick_dep_bits bit) +{ + if (tick_nohz_full_enabled()) + tick_nohz_dep_set_task(tsk, bit); +} +static inline void tick_dep_clear_task(struct task_struct *tsk, + enum tick_dep_bits bit) +{ + if (tick_nohz_full_enabled()) + tick_nohz_dep_clear_task(tsk, bit); +} +static inline void tick_dep_set_signal(struct signal_struct *signal, + enum tick_dep_bits bit) +{ + if (tick_nohz_full_enabled()) + tick_nohz_dep_set_signal(signal, bit); +} +static inline void tick_dep_clear_signal(struct signal_struct *signal, + enum tick_dep_bits bit) +{ + if (tick_nohz_full_enabled()) + tick_nohz_dep_clear_signal(signal, bit); +} + extern void tick_nohz_full_kick_cpu(int cpu); -extern void tick_nohz_full_kick_all(void); extern void __tick_nohz_task_switch(void); #else static inline int housekeeping_any_cpu(void) @@ -166,9 +243,21 @@ static inline int housekeeping_any_cpu(void) static inline bool tick_nohz_full_enabled(void) { return false; } static inline bool tick_nohz_full_cpu(int cpu) { return false; } static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask) { } + +static inline void tick_dep_set(enum tick_dep_bits bit) { } +static inline void tick_dep_clear(enum tick_dep_bits bit) { } +static inline void tick_dep_set_cpu(int cpu, enum tick_dep_bits bit) { } +static inline void tick_dep_clear_cpu(int cpu, enum tick_dep_bits bit) { } +static inline void tick_dep_set_task(struct task_struct *tsk, + enum tick_dep_bits bit) { } +static inline void tick_dep_clear_task(struct task_struct *tsk, + enum tick_dep_bits bit) { } +static inline void tick_dep_set_signal(struct signal_struct *signal, + enum tick_dep_bits bit) { } +static inline void tick_dep_clear_signal(struct signal_struct *signal, + enum tick_dep_bits bit) { } + static inline void tick_nohz_full_kick_cpu(int cpu) { } -static inline void tick_nohz_full_kick(void) { } -static inline void tick_nohz_full_kick_all(void) { } static inline void __tick_nohz_task_switch(void) { } #endif diff --git a/include/linux/timekeeper_internal.h b/include/linux/timekeeper_internal.h index 25247220b4b7..e88005459035 100644 --- a/include/linux/timekeeper_internal.h +++ b/include/linux/timekeeper_internal.h @@ -50,6 +50,7 @@ struct tk_read_base { * @offs_tai: Offset clock monotonic -> clock tai * @tai_offset: The current UTC to TAI offset in seconds * @clock_was_set_seq: The sequence number of clock was set events + * @cs_was_changed_seq: The sequence number of clocksource change events * @next_leap_ktime: CLOCK_MONOTONIC time value of a pending leap-second * @raw_time: Monotonic raw base time in timespec64 format * @cycle_interval: Number of clock cycles in one NTP interval @@ -91,6 +92,7 @@ struct timekeeper { ktime_t offs_tai; s32 tai_offset; unsigned int clock_was_set_seq; + u8 cs_was_changed_seq; ktime_t next_leap_ktime; struct timespec64 raw_time; diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h index ec89d846324c..96f37bee3bc1 100644 --- a/include/linux/timekeeping.h +++ b/include/linux/timekeeping.h @@ -267,6 +267,64 @@ extern void ktime_get_raw_and_real_ts64(struct timespec64 *ts_raw, struct timespec64 *ts_real); /* + * struct system_time_snapshot - simultaneous raw/real time capture with + * counter value + * @cycles: Clocksource counter value to produce the system times + * @real: Realtime system time + * @raw: Monotonic raw system time + * @clock_was_set_seq: The sequence number of clock was set events + * @cs_was_changed_seq: The sequence number of clocksource change events + */ +struct system_time_snapshot { + cycle_t cycles; + ktime_t real; + ktime_t raw; + unsigned int clock_was_set_seq; + u8 cs_was_changed_seq; +}; + +/* + * struct system_device_crosststamp - system/device cross-timestamp + * (syncronized capture) + * @device: Device time + * @sys_realtime: Realtime simultaneous with device time + * @sys_monoraw: Monotonic raw simultaneous with device time + */ +struct system_device_crosststamp { + ktime_t device; + ktime_t sys_realtime; + ktime_t sys_monoraw; +}; + +/* + * struct system_counterval_t - system counter value with the pointer to the + * corresponding clocksource + * @cycles: System counter value + * @cs: Clocksource corresponding to system counter value. Used by + * timekeeping code to verify comparibility of two cycle values + */ +struct system_counterval_t { + cycle_t cycles; + struct clocksource *cs; +}; + +/* + * Get cross timestamp between system clock and device clock + */ +extern int get_device_system_crosststamp( + int (*get_time_fn)(ktime_t *device_time, + struct system_counterval_t *system_counterval, + void *ctx), + void *ctx, + struct system_time_snapshot *history, + struct system_device_crosststamp *xtstamp); + +/* + * Simultaneously snapshot realtime and monotonic raw clocks + */ +extern void ktime_get_snapshot(struct system_time_snapshot *systime_snapshot); + +/* * Persistent clock related interfaces */ extern int persistent_clock_is_local; diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h index 429fdfc3baf5..705df7db4482 100644 --- a/include/linux/trace_events.h +++ b/include/linux/trace_events.h @@ -15,16 +15,6 @@ struct tracer; struct dentry; struct bpf_prog; -struct trace_print_flags { - unsigned long mask; - const char *name; -}; - -struct trace_print_flags_u64 { - unsigned long long mask; - const char *name; -}; - const char *trace_print_flags_seq(struct trace_seq *p, const char *delim, unsigned long flags, const struct trace_print_flags *flag_array); @@ -568,6 +558,8 @@ enum { FILTER_DYN_STRING, FILTER_PTR_STRING, FILTER_TRACE_FN, + FILTER_COMM, + FILTER_CPU, }; extern int trace_event_raw_init(struct trace_event_call *call); diff --git a/include/linux/tracepoint-defs.h b/include/linux/tracepoint-defs.h index e1ee97c713bf..4ac89acb6136 100644 --- a/include/linux/tracepoint-defs.h +++ b/include/linux/tracepoint-defs.h @@ -3,13 +3,23 @@ /* * File can be included directly by headers who only want to access - * tracepoint->key to guard out of line trace calls. Otherwise - * linux/tracepoint.h should be used. + * tracepoint->key to guard out of line trace calls, or the definition of + * trace_print_flags{_u64}. Otherwise linux/tracepoint.h should be used. */ #include <linux/atomic.h> #include <linux/static_key.h> +struct trace_print_flags { + unsigned long mask; + const char *name; +}; + +struct trace_print_flags_u64 { + unsigned long long mask; + const char *name; +}; + struct tracepoint_func { void *func; void *data; diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index acd522a91539..be586c632a0c 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h @@ -14,8 +14,10 @@ * See the file COPYING for more details. */ +#include <linux/smp.h> #include <linux/errno.h> #include <linux/types.h> +#include <linux/cpumask.h> #include <linux/rcupdate.h> #include <linux/tracepoint-defs.h> @@ -338,15 +340,19 @@ extern void syscall_unregfunc(void); * "void *__data, proto" as the callback prototype. */ #define DECLARE_TRACE_NOARGS(name) \ - __DECLARE_TRACE(name, void, , 1, void *__data, __data) + __DECLARE_TRACE(name, void, , \ + cpu_online(raw_smp_processor_id()), \ + void *__data, __data) #define DECLARE_TRACE(name, proto, args) \ - __DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), 1, \ - PARAMS(void *__data, proto), \ - PARAMS(__data, args)) + __DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), \ + cpu_online(raw_smp_processor_id()), \ + PARAMS(void *__data, proto), \ + PARAMS(__data, args)) #define DECLARE_TRACE_CONDITION(name, proto, args, cond) \ - __DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), PARAMS(cond), \ + __DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), \ + cpu_online(raw_smp_processor_id()) && (PARAMS(cond)), \ PARAMS(void *__data, proto), \ PARAMS(__data, args)) diff --git a/include/linux/tty.h b/include/linux/tty.h index d9fb4b043f56..3b09f235db66 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h @@ -302,6 +302,7 @@ struct tty_struct { struct work_struct hangup_work; void *disc_data; void *driver_data; + spinlock_t files_lock; /* protects tty_files list */ struct list_head tty_files; #define N_TTY_BUF_SIZE 4096 @@ -336,7 +337,6 @@ struct tty_file_private { #define TTY_IO_ERROR 1 /* Cause an I/O error (may be no ldisc too) */ #define TTY_OTHER_CLOSED 2 /* Other side (if any) has closed */ #define TTY_EXCLUSIVE 3 /* Exclusive open mode */ -#define TTY_DEBUG 4 /* Debugging */ #define TTY_DO_WRITE_WAKEUP 5 /* Call write_wakeup after queuing new */ #define TTY_OTHER_DONE 6 /* Closed pty has completed input processing */ #define TTY_LDISC_OPEN 11 /* Line discipline is open */ @@ -433,8 +433,6 @@ extern struct device *tty_register_device_attr(struct tty_driver *driver, void *drvdata, const struct attribute_group **attr_grp); extern void tty_unregister_device(struct tty_driver *driver, unsigned index); -extern int tty_read_raw_data(struct tty_struct *tty, unsigned char *bufp, - int buflen); extern void tty_write_message(struct tty_struct *tty, char *msg); extern int tty_send_xchar(struct tty_struct *tty, char ch); extern int tty_put_char(struct tty_struct *tty, unsigned char c); @@ -446,12 +444,7 @@ extern void tty_unthrottle(struct tty_struct *tty); extern int tty_throttle_safe(struct tty_struct *tty); extern int tty_unthrottle_safe(struct tty_struct *tty); extern int tty_do_resize(struct tty_struct *tty, struct winsize *ws); -extern void tty_driver_remove_tty(struct tty_driver *driver, - struct tty_struct *tty); -extern void tty_free_termios(struct tty_struct *tty); extern int is_current_pgrp_orphaned(void); -extern int is_ignored(int sig); -extern int tty_signal(int sig, struct tty_struct *tty); extern void tty_hangup(struct tty_struct *tty); extern void tty_vhangup(struct tty_struct *tty); extern int tty_hung_up_p(struct file *filp); @@ -493,7 +486,8 @@ extern int tty_set_termios(struct tty_struct *tty, struct ktermios *kt); extern struct tty_ldisc *tty_ldisc_ref(struct tty_struct *); extern void tty_ldisc_deref(struct tty_ldisc *); extern struct tty_ldisc *tty_ldisc_ref_wait(struct tty_struct *); -extern void tty_ldisc_hangup(struct tty_struct *tty); +extern void tty_ldisc_hangup(struct tty_struct *tty, bool reset); +extern int tty_ldisc_reinit(struct tty_struct *tty, int disc); extern const struct file_operations tty_ldiscs_proc_fops; extern void tty_wakeup(struct tty_struct *tty); @@ -508,16 +502,13 @@ extern struct tty_struct *alloc_tty_struct(struct tty_driver *driver, int idx); extern int tty_alloc_file(struct file *file); extern void tty_add_file(struct tty_struct *tty, struct file *file); extern void tty_free_file(struct file *file); -extern void free_tty_struct(struct tty_struct *tty); -extern void deinitialize_tty_struct(struct tty_struct *tty); extern struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx); extern int tty_release(struct inode *inode, struct file *filp); -extern int tty_init_termios(struct tty_struct *tty); +extern void tty_init_termios(struct tty_struct *tty); extern int tty_standard_install(struct tty_driver *driver, struct tty_struct *tty); extern struct mutex tty_mutex; -extern spinlock_t tty_files_lock; #define tty_is_writelocked(tty) (mutex_is_locked(&tty->atomic_write_lock)) @@ -575,43 +566,29 @@ static inline int tty_port_users(struct tty_port *port) extern int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc); extern int tty_unregister_ldisc(int disc); -extern int tty_set_ldisc(struct tty_struct *tty, int ldisc); +extern int tty_set_ldisc(struct tty_struct *tty, int disc); extern int tty_ldisc_setup(struct tty_struct *tty, struct tty_struct *o_tty); extern void tty_ldisc_release(struct tty_struct *tty); extern void tty_ldisc_init(struct tty_struct *tty); extern void tty_ldisc_deinit(struct tty_struct *tty); -extern void tty_ldisc_begin(void); - -static inline int tty_ldisc_receive_buf(struct tty_ldisc *ld, unsigned char *p, - char *f, int count) -{ - if (ld->ops->receive_buf2) - count = ld->ops->receive_buf2(ld->tty, p, f, count); - else { - count = min_t(int, count, ld->tty->receive_room); - if (count) - ld->ops->receive_buf(ld->tty, p, f, count); - } - return count; -} - +extern int tty_ldisc_receive_buf(struct tty_ldisc *ld, unsigned char *p, + char *f, int count); /* n_tty.c */ -extern struct tty_ldisc_ops tty_ldisc_N_TTY; extern void n_tty_inherit_ops(struct tty_ldisc_ops *ops); +extern void __init n_tty_init(void); /* tty_audit.c */ #ifdef CONFIG_AUDIT extern void tty_audit_add_data(struct tty_struct *tty, const void *data, - size_t size, unsigned icanon); + size_t size); extern void tty_audit_exit(void); extern void tty_audit_fork(struct signal_struct *sig); extern void tty_audit_tiocsti(struct tty_struct *tty, char ch); -extern void tty_audit_push(struct tty_struct *tty); -extern int tty_audit_push_current(void); +extern int tty_audit_push(void); #else static inline void tty_audit_add_data(struct tty_struct *tty, const void *data, - size_t size, unsigned icanon) + size_t size) { } static inline void tty_audit_tiocsti(struct tty_struct *tty, char ch) @@ -623,10 +600,7 @@ static inline void tty_audit_exit(void) static inline void tty_audit_fork(struct signal_struct *sig) { } -static inline void tty_audit_push(struct tty_struct *tty) -{ -} -static inline int tty_audit_push_current(void) +static inline int tty_audit_push(void) { return 0; } @@ -648,11 +622,11 @@ extern long vt_compat_ioctl(struct tty_struct *tty, /* tty_mutex.c */ /* functions for preparation of BKL removal */ -extern void __lockfunc tty_lock(struct tty_struct *tty); +extern void tty_lock(struct tty_struct *tty); extern int tty_lock_interruptible(struct tty_struct *tty); -extern void __lockfunc tty_unlock(struct tty_struct *tty); -extern void __lockfunc tty_lock_slave(struct tty_struct *tty); -extern void __lockfunc tty_unlock_slave(struct tty_struct *tty); +extern void tty_unlock(struct tty_struct *tty); +extern void tty_lock_slave(struct tty_struct *tty); +extern void tty_unlock_slave(struct tty_struct *tty); extern void tty_set_lock_subclass(struct tty_struct *tty); #ifdef CONFIG_PROC_FS diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h index 00c9d688d7b7..3971cf0eb467 100644 --- a/include/linux/tty_ldisc.h +++ b/include/linux/tty_ldisc.h @@ -25,12 +25,6 @@ * buffers of any input characters it may have queued to be * delivered to the user mode process. * - * ssize_t (*chars_in_buffer)(struct tty_struct *tty); - * - * This function returns the number of input characters the line - * discipline may have queued up to be delivered to the user mode - * process. - * * ssize_t (*read)(struct tty_struct * tty, struct file * file, * unsigned char * buf, size_t nr); * @@ -104,11 +98,6 @@ * seek to perform this action quickly but should wait until * any pending driver I/O is completed. * - * void (*fasync)(struct tty_struct *, int on) - * - * Notify line discipline when signal-driven I/O is enabled or - * disabled. - * * void (*dcd_change)(struct tty_struct *tty, unsigned int status) * * Tells the discipline that the DCD pin has changed its status. @@ -188,7 +177,6 @@ struct tty_ldisc_ops { int (*open)(struct tty_struct *); void (*close)(struct tty_struct *); void (*flush_buffer)(struct tty_struct *tty); - ssize_t (*chars_in_buffer)(struct tty_struct *tty); ssize_t (*read)(struct tty_struct *tty, struct file *file, unsigned char __user *buf, size_t nr); ssize_t (*write)(struct tty_struct *tty, struct file *file, @@ -209,7 +197,6 @@ struct tty_ldisc_ops { char *fp, int count); void (*write_wakeup)(struct tty_struct *); void (*dcd_change)(struct tty_struct *, unsigned int); - void (*fasync)(struct tty_struct *tty, int on); int (*receive_buf2)(struct tty_struct *, const unsigned char *cp, char *fp, int count); diff --git a/include/linux/ucs2_string.h b/include/linux/ucs2_string.h index cbb20afdbc01..bb679b48f408 100644 --- a/include/linux/ucs2_string.h +++ b/include/linux/ucs2_string.h @@ -11,4 +11,8 @@ unsigned long ucs2_strlen(const ucs2_char_t *s); unsigned long ucs2_strsize(const ucs2_char_t *data, unsigned long maxlength); int ucs2_strncmp(const ucs2_char_t *a, const ucs2_char_t *b, size_t len); +unsigned long ucs2_utf8size(const ucs2_char_t *src); +unsigned long ucs2_as_utf8(u8 *dest, const ucs2_char_t *src, + unsigned long maxlength); + #endif /* _LINUX_UCS2_STRING_H_ */ diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h index 99c1b4d20b0f..33383ca23837 100644 --- a/include/linux/unaligned/access_ok.h +++ b/include/linux/unaligned/access_ok.h @@ -4,62 +4,62 @@ #include <linux/kernel.h> #include <asm/byteorder.h> -static inline u16 get_unaligned_le16(const void *p) +static __always_inline u16 get_unaligned_le16(const void *p) { return le16_to_cpup((__le16 *)p); } -static inline u32 get_unaligned_le32(const void *p) +static __always_inline u32 get_unaligned_le32(const void *p) { return le32_to_cpup((__le32 *)p); } -static inline u64 get_unaligned_le64(const void *p) +static __always_inline u64 get_unaligned_le64(const void *p) { return le64_to_cpup((__le64 *)p); } -static inline u16 get_unaligned_be16(const void *p) +static __always_inline u16 get_unaligned_be16(const void *p) { return be16_to_cpup((__be16 *)p); } -static inline u32 get_unaligned_be32(const void *p) +static __always_inline u32 get_unaligned_be32(const void *p) { return be32_to_cpup((__be32 *)p); } -static inline u64 get_unaligned_be64(const void *p) +static __always_inline u64 get_unaligned_be64(const void *p) { return be64_to_cpup((__be64 *)p); } -static inline void put_unaligned_le16(u16 val, void *p) +static __always_inline void put_unaligned_le16(u16 val, void *p) { *((__le16 *)p) = cpu_to_le16(val); } -static inline void put_unaligned_le32(u32 val, void *p) +static __always_inline void put_unaligned_le32(u32 val, void *p) { *((__le32 *)p) = cpu_to_le32(val); } -static inline void put_unaligned_le64(u64 val, void *p) +static __always_inline void put_unaligned_le64(u64 val, void *p) { *((__le64 *)p) = cpu_to_le64(val); } -static inline void put_unaligned_be16(u16 val, void *p) +static __always_inline void put_unaligned_be16(u16 val, void *p) { *((__be16 *)p) = cpu_to_be16(val); } -static inline void put_unaligned_be32(u32 val, void *p) +static __always_inline void put_unaligned_be32(u32 val, void *p) { *((__be32 *)p) = cpu_to_be32(val); } -static inline void put_unaligned_be64(u64 val, void *p) +static __always_inline void put_unaligned_be64(u64 val, void *p) { *((__be64 *)p) = cpu_to_be64(val); } diff --git a/include/linux/usb.h b/include/linux/usb.h index 89533ba38691..6a9a0c28415d 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h @@ -50,6 +50,7 @@ struct ep_device; * struct usb_host_endpoint - host-side endpoint descriptor and queue * @desc: descriptor for this endpoint, wMaxPacketSize in native byteorder * @ss_ep_comp: SuperSpeed companion descriptor for this endpoint + * @ssp_isoc_ep_comp: SuperSpeedPlus isoc companion descriptor for this endpoint * @urb_list: urbs queued to this endpoint; maintained by usbcore * @hcpriv: for use by HCD; typically holds hardware dma queue head (QH) * with one or more transfer descriptors (TDs) per urb @@ -65,6 +66,7 @@ struct ep_device; struct usb_host_endpoint { struct usb_endpoint_descriptor desc; struct usb_ss_ep_comp_descriptor ss_ep_comp; + struct usb_ssp_isoc_ep_comp_descriptor ssp_isoc_ep_comp; struct list_head urb_list; void *hcpriv; struct ep_device *ep_dev; /* For sysfs info */ @@ -330,6 +332,7 @@ struct usb_host_bos { struct usb_ss_cap_descriptor *ss_cap; struct usb_ssp_cap_descriptor *ssp_cap; struct usb_ss_container_id_descriptor *ss_id; + struct usb_ptm_cap_descriptor *ptm_cap; }; int __usb_get_extra_descriptor(char *buffer, unsigned size, @@ -375,7 +378,6 @@ struct usb_bus { struct usb_devmap devmap; /* device address allocation map */ struct usb_device *root_hub; /* Root hub */ struct usb_bus *hs_companion; /* Companion EHCI bus, if any */ - struct list_head bus_list; /* list of busses */ struct mutex usb_address0_mutex; /* unaddressed device mutex */ @@ -642,9 +644,10 @@ extern struct usb_device *usb_hub_find_child(struct usb_device *hdev, if (!child) continue; else /* USB device locking */ -#define usb_lock_device(udev) device_lock(&(udev)->dev) -#define usb_unlock_device(udev) device_unlock(&(udev)->dev) -#define usb_trylock_device(udev) device_trylock(&(udev)->dev) +#define usb_lock_device(udev) device_lock(&(udev)->dev) +#define usb_unlock_device(udev) device_unlock(&(udev)->dev) +#define usb_lock_device_interruptible(udev) device_lock_interruptible(&(udev)->dev) +#define usb_trylock_device(udev) device_trylock(&(udev)->dev) extern int usb_lock_device_for_reset(struct usb_device *udev, const struct usb_interface *iface); diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h index 1074b8921a5d..2b81b24eb5aa 100644 --- a/include/linux/usb/composite.h +++ b/include/linux/usb/composite.h @@ -126,6 +126,10 @@ struct usb_os_desc_table { * string identifiers assigned during @bind(). If this * pointer is null after initiation, the function will not * be available at super speed. + * @ssp_descriptors: Table of super speed plus descriptors, using + * interface and string identifiers assigned during @bind(). If + * this pointer is null after initiation, the function will not + * be available at super speed plus. * @config: assigned when @usb_add_function() is called; this is the * configuration with which this function is associated. * @os_desc_table: Table of (interface id, os descriptors) pairs. The function @@ -186,6 +190,7 @@ struct usb_function { struct usb_descriptor_header **fs_descriptors; struct usb_descriptor_header **hs_descriptors; struct usb_descriptor_header **ss_descriptors; + struct usb_descriptor_header **ssp_descriptors; struct usb_configuration *config; @@ -317,6 +322,7 @@ struct usb_configuration { unsigned superspeed:1; unsigned highspeed:1; unsigned fullspeed:1; + unsigned superspeed_plus:1; struct usb_function *interface[MAX_CONFIG_INTERFACES]; }; diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h index d82d0068872b..5d4e151c49bf 100644 --- a/include/linux/usb/gadget.h +++ b/include/linux/usb/gadget.h @@ -595,6 +595,10 @@ struct usb_gadget_ops { * only supports HNP on a different root port. * @b_hnp_enable: OTG device feature flag, indicating that the A-Host * enabled HNP support. + * @hnp_polling_support: OTG device feature flag, indicating if the OTG device + * in peripheral mode can support HNP polling. + * @host_request_flag: OTG device feature flag, indicating if A-Peripheral + * or B-Peripheral wants to take host role. * @quirk_ep_out_aligned_size: epout requires buffer size to be aligned to * MaxPacketSize. * @is_selfpowered: if the gadget is self-powered. @@ -642,6 +646,8 @@ struct usb_gadget { unsigned b_hnp_enable:1; unsigned a_hnp_support:1; unsigned a_alt_hnp_support:1; + unsigned hnp_polling_support:1; + unsigned host_request_flag:1; unsigned quirk_ep_out_aligned_size:1; unsigned quirk_altset_not_supp:1; unsigned quirk_stall_not_supp:1; @@ -729,6 +735,16 @@ static inline int gadget_is_superspeed(struct usb_gadget *g) } /** + * gadget_is_superspeed_plus() - return true if the hardware handles + * superspeed plus + * @g: controller that might support superspeed plus + */ +static inline int gadget_is_superspeed_plus(struct usb_gadget *g) +{ + return g->max_speed >= USB_SPEED_SUPER_PLUS; +} + +/** * gadget_is_otg - return true iff the hardware is OTG-ready * @g: controller that might have a Mini-AB connector * @@ -1126,6 +1142,7 @@ extern int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget, void (*release)(struct device *dev)); extern int usb_add_gadget_udc(struct device *parent, struct usb_gadget *gadget); extern void usb_del_gadget_udc(struct usb_gadget *gadget); +extern char *usb_get_gadget_udc_name(void); /*-------------------------------------------------------------------------*/ @@ -1194,7 +1211,8 @@ struct usb_function; int usb_assign_descriptors(struct usb_function *f, struct usb_descriptor_header **fs, struct usb_descriptor_header **hs, - struct usb_descriptor_header **ss); + struct usb_descriptor_header **ss, + struct usb_descriptor_header **ssp); void usb_free_all_descriptors(struct usb_function *f); struct usb_descriptor_header *usb_otg_descriptor_alloc( diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h index 4dcf8446dbcd..b98f831dcda3 100644 --- a/include/linux/usb/hcd.h +++ b/include/linux/usb/hcd.h @@ -23,6 +23,7 @@ #include <linux/rwsem.h> #include <linux/interrupt.h> +#include <linux/idr.h> #define MAX_TOPO_LEVEL 6 @@ -630,8 +631,8 @@ extern void usb_set_device_state(struct usb_device *udev, /* exported only within usbcore */ -extern struct list_head usb_bus_list; -extern struct mutex usb_bus_list_lock; +extern struct idr usb_bus_idr; +extern struct mutex usb_bus_idr_lock; extern wait_queue_head_t usb_kill_urb_queue; diff --git a/include/linux/usb/msm_hsusb_hw.h b/include/linux/usb/msm_hsusb_hw.h index e159b39f67a2..974c3796a23f 100644 --- a/include/linux/usb/msm_hsusb_hw.h +++ b/include/linux/usb/msm_hsusb_hw.h @@ -22,6 +22,7 @@ #define USB_AHBBURST (MSM_USB_BASE + 0x0090) #define USB_AHBMODE (MSM_USB_BASE + 0x0098) #define USB_GENCONFIG_2 (MSM_USB_BASE + 0x00a0) +#define ULPI_TX_PKT_EN_CLR_FIX BIT(19) #define USB_CAPLENGTH (MSM_USB_BASE + 0x0100) /* 8 bit */ diff --git a/include/linux/usb/musb.h b/include/linux/usb/musb.h index 96ddfb7ab018..0b3da40a525e 100644 --- a/include/linux/usb/musb.h +++ b/include/linux/usb/musb.h @@ -124,7 +124,7 @@ struct musb_hdrc_platform_data { int (*set_power)(int state); /* MUSB configuration-specific details */ - struct musb_hdrc_config *config; + const struct musb_hdrc_config *config; /* Architecture specific board data */ void *board_data; diff --git a/include/linux/usb/of.h b/include/linux/usb/of.h index 974bce93aa28..de3237fce6b2 100644 --- a/include/linux/usb/of.h +++ b/include/linux/usb/of.h @@ -16,6 +16,8 @@ enum usb_dr_mode of_usb_get_dr_mode_by_phy(struct device_node *phy_np); bool of_usb_host_tpl_support(struct device_node *np); int of_usb_update_otg_caps(struct device_node *np, struct usb_otg_caps *otg_caps); +struct device_node *usb_of_get_child_node(struct device_node *parent, + int portnum); #else static inline enum usb_dr_mode of_usb_get_dr_mode_by_phy(struct device_node *phy_np) @@ -31,6 +33,11 @@ static inline int of_usb_update_otg_caps(struct device_node *np, { return 0; } +static inline struct device_node *usb_of_get_child_node + (struct device_node *parent, int portnum) +{ + return NULL; +} #endif #if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_USB_SUPPORT) diff --git a/include/linux/usb/otg-fsm.h b/include/linux/usb/otg-fsm.h index f728f1854829..24198e16f849 100644 --- a/include/linux/usb/otg-fsm.h +++ b/include/linux/usb/otg-fsm.h @@ -40,6 +40,18 @@ #define PROTO_HOST (1) #define PROTO_GADGET (2) +#define OTG_STS_SELECTOR 0xF000 /* OTG status selector, according to + * OTG and EH 2.0 Chapter 6.2.3 + * Table:6-4 + */ + +#define HOST_REQUEST_FLAG 1 /* Host request flag, according to + * OTG and EH 2.0 Charpter 6.2.3 + * Table:6-5 + */ + +#define T_HOST_REQ_POLL (1500) /* 1500ms, HNP polling interval */ + enum otg_fsm_timer { /* Standard OTG timers */ A_WAIT_VRISE, @@ -48,6 +60,7 @@ enum otg_fsm_timer { A_AIDL_BDIS, B_ASE0_BRST, A_BIDL_ADIS, + B_AIDL_BDIS, /* Auxiliary timers */ B_SE0_SRP, @@ -119,6 +132,8 @@ struct otg_fsm { /* Current usb protocol used: 0:undefine; 1:host; 2:client */ int protocol; struct mutex lock; + u8 *host_req_flag; + struct delayed_work hnp_polling_work; }; struct otg_fsm_ops { diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h index 4db191fe8c2c..00a47d058d83 100644 --- a/include/linux/usb/renesas_usbhs.h +++ b/include/linux/usb/renesas_usbhs.h @@ -184,6 +184,7 @@ struct renesas_usbhs_driver_param { }; #define USBHS_TYPE_RCAR_GEN2 1 +#define USBHS_TYPE_RCAR_GEN3 2 /* * option: diff --git a/include/linux/usb/storage.h b/include/linux/usb/storage.h index cb33fff2ba0b..305ee8db7faf 100644 --- a/include/linux/usb/storage.h +++ b/include/linux/usb/storage.h @@ -45,9 +45,9 @@ #define USB_PR_DEVICE 0xff /* Use device's value */ - /* - * Bulk only data structures - */ +/* + * Bulk only data structures + */ /* command block wrapper */ struct bulk_cb_wrap { @@ -56,18 +56,18 @@ struct bulk_cb_wrap { __le32 DataTransferLength; /* size of data */ __u8 Flags; /* direction in bit 0 */ __u8 Lun; /* LUN normally 0 */ - __u8 Length; /* of of the CDB */ + __u8 Length; /* length of the CDB */ __u8 CDB[16]; /* max command */ }; #define US_BULK_CB_WRAP_LEN 31 -#define US_BULK_CB_SIGN 0x43425355 /*spells out USBC */ +#define US_BULK_CB_SIGN 0x43425355 /* spells out 'USBC' */ #define US_BULK_FLAG_IN (1 << 7) #define US_BULK_FLAG_OUT 0 /* command status wrapper */ struct bulk_cs_wrap { - __le32 Signature; /* should = 'USBS' */ + __le32 Signature; /* contains 'USBS' */ __u32 Tag; /* same as original command */ __le32 Residue; /* amount not transferred */ __u8 Status; /* see below */ diff --git a/include/linux/vfio.h b/include/linux/vfio.h index 610a86a892b8..0ecae0b1cd34 100644 --- a/include/linux/vfio.h +++ b/include/linux/vfio.h @@ -92,6 +92,17 @@ extern int vfio_external_user_iommu_id(struct vfio_group *group); extern long vfio_external_check_extension(struct vfio_group *group, unsigned long arg); +/* + * Sub-module helpers + */ +struct vfio_info_cap { + struct vfio_info_cap_header *buf; + size_t size; +}; +extern struct vfio_info_cap_header *vfio_info_cap_add( + struct vfio_info_cap *caps, size_t size, u16 id, u16 version); +extern void vfio_info_cap_shift(struct vfio_info_cap *caps, size_t offset); + struct pci_dev; #ifdef CONFIG_EEH extern void vfio_spapr_pci_eeh_open(struct pci_dev *pdev); diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h index 67c1dbd19c6d..ec084321fe09 100644 --- a/include/linux/vm_event_item.h +++ b/include/linux/vm_event_item.h @@ -53,6 +53,7 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, COMPACTMIGRATE_SCANNED, COMPACTFREE_SCANNED, COMPACTISOLATED, COMPACTSTALL, COMPACTFAIL, COMPACTSUCCESS, + KCOMPACTD_WAKE, #endif #ifdef CONFIG_HUGETLB_PAGE HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL, @@ -71,6 +72,7 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, THP_COLLAPSE_ALLOC_FAILED, THP_SPLIT_PAGE, THP_SPLIT_PAGE_FAILED, + THP_DEFERRED_SPLIT_PAGE, THP_SPLIT_PMD, THP_ZERO_PAGE_ALLOC, THP_ZERO_PAGE_ALLOC_FAILED, diff --git a/include/linux/vmw_vmci_defs.h b/include/linux/vmw_vmci_defs.h index 65ac54c61c18..1bd31a38c51e 100644 --- a/include/linux/vmw_vmci_defs.h +++ b/include/linux/vmw_vmci_defs.h @@ -734,6 +734,41 @@ static inline void *vmci_event_data_payload(struct vmci_event_data *ev_data) } /* + * Helper to read a value from a head or tail pointer. For X86_32, the + * pointer is treated as a 32bit value, since the pointer value + * never exceeds a 32bit value in this case. Also, doing an + * atomic64_read on X86_32 uniprocessor systems may be implemented + * as a non locked cmpxchg8b, that may end up overwriting updates done + * by the VMCI device to the memory location. On 32bit SMP, the lock + * prefix will be used, so correctness isn't an issue, but using a + * 64bit operation still adds unnecessary overhead. + */ +static inline u64 vmci_q_read_pointer(atomic64_t *var) +{ +#if defined(CONFIG_X86_32) + return atomic_read((atomic_t *)var); +#else + return atomic64_read(var); +#endif +} + +/* + * Helper to set the value of a head or tail pointer. For X86_32, the + * pointer is treated as a 32bit value, since the pointer value + * never exceeds a 32bit value in this case. On 32bit SMP, using a + * locked cmpxchg8b adds unnecessary overhead. + */ +static inline void vmci_q_set_pointer(atomic64_t *var, + u64 new_val) +{ +#if defined(CONFIG_X86_32) + return atomic_set((atomic_t *)var, (u32)new_val); +#else + return atomic64_set(var, new_val); +#endif +} + +/* * Helper to add a given offset to a head or tail pointer. Wraps the * value of the pointer around the max size of the queue. */ @@ -741,14 +776,14 @@ static inline void vmci_qp_add_pointer(atomic64_t *var, size_t add, u64 size) { - u64 new_val = atomic64_read(var); + u64 new_val = vmci_q_read_pointer(var); if (new_val >= size - add) new_val -= size; new_val += add; - atomic64_set(var, new_val); + vmci_q_set_pointer(var, new_val); } /* @@ -758,7 +793,7 @@ static inline u64 vmci_q_header_producer_tail(const struct vmci_queue_header *q_header) { struct vmci_queue_header *qh = (struct vmci_queue_header *)q_header; - return atomic64_read(&qh->producer_tail); + return vmci_q_read_pointer(&qh->producer_tail); } /* @@ -768,7 +803,7 @@ static inline u64 vmci_q_header_consumer_head(const struct vmci_queue_header *q_header) { struct vmci_queue_header *qh = (struct vmci_queue_header *)q_header; - return atomic64_read(&qh->consumer_head); + return vmci_q_read_pointer(&qh->consumer_head); } /* diff --git a/include/linux/wait.h b/include/linux/wait.h index ae71a769b89e..27d7a0ab5da3 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h @@ -338,7 +338,7 @@ do { \ schedule(); try_to_freeze()) /** - * wait_event - sleep (or freeze) until a condition gets true + * wait_event_freezable - sleep (or freeze) until a condition gets true * @wq: the waitqueue to wait on * @condition: a C expression for the event to wait for * diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 0e32bc71245e..ca73c503b92a 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -311,6 +311,7 @@ enum { __WQ_DRAINING = 1 << 16, /* internal: workqueue is draining */ __WQ_ORDERED = 1 << 17, /* internal: workqueue is ordered */ + __WQ_LEGACY = 1 << 18, /* internal: create*_workqueue() */ WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */ WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */ @@ -411,12 +412,12 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active, alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags), 1, ##args) #define create_workqueue(name) \ - alloc_workqueue("%s", WQ_MEM_RECLAIM, 1, (name)) + alloc_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, 1, (name)) #define create_freezable_workqueue(name) \ - alloc_workqueue("%s", WQ_FREEZABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, \ - 1, (name)) + alloc_workqueue("%s", __WQ_LEGACY | WQ_FREEZABLE | WQ_UNBOUND | \ + WQ_MEM_RECLAIM, 1, (name)) #define create_singlethread_workqueue(name) \ - alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, name) + alloc_ordered_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, name) extern void destroy_workqueue(struct workqueue_struct *wq); diff --git a/include/linux/writeback.h b/include/linux/writeback.h index b333c945e571..d0b5ca5d4e08 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h @@ -198,6 +198,7 @@ void wbc_attach_and_unlock_inode(struct writeback_control *wbc, void wbc_detach_inode(struct writeback_control *wbc); void wbc_account_io(struct writeback_control *wbc, struct page *page, size_t bytes); +void cgroup_writeback_umount(void); /** * inode_attach_wb - associate an inode with its wb @@ -301,6 +302,10 @@ static inline void wbc_account_io(struct writeback_control *wbc, { } +static inline void cgroup_writeback_umount(void) +{ +} + #endif /* CONFIG_CGROUP_WRITEBACK */ /* diff --git a/include/media/media-device.h b/include/media/media-device.h index d3855898c3fc..df74cfa7da4a 100644 --- a/include/media/media-device.h +++ b/include/media/media-device.h @@ -265,9 +265,29 @@ struct ida; struct device; /** + * struct media_entity_notify - Media Entity Notify + * + * @list: List head + * @notify_data: Input data to invoke the callback + * @notify: Callback function pointer + * + * Drivers may register a callback to take action when + * new entities get registered with the media device. + */ +struct media_entity_notify { + struct list_head list; + void *notify_data; + void (*notify)(struct media_entity *entity, void *notify_data); +}; + +/** * struct media_device - Media device * @dev: Parent device * @devnode: Media device node + * @driver_name: Optional device driver name. If not set, calls to + * %MEDIA_IOC_DEVICE_INFO will return dev->driver->name. + * This is needed for USB drivers for example, as otherwise + * they'll all appear as if the driver name was "usb". * @model: Device model name * @serial: Device serial number (optional) * @bus_info: Unique and stable device location identifier @@ -283,8 +303,16 @@ struct device; * @interfaces: List of registered interfaces * @pads: List of registered pads * @links: List of registered links + * @entity_notify: List of registered entity_notify callbacks * @lock: Entities list lock * @graph_mutex: Entities graph operation lock + * @pm_count_walk: Graph walk for power state walk. Access serialised using + * graph_mutex. + * + * @source_priv: Driver Private data for enable/disable source handlers + * @enable_source: Enable Source Handler function pointer + * @disable_source: Disable Source Handler function pointer + * * @link_notify: Link state change notification callback * * This structure represents an abstract high-level media device. It allows easy @@ -296,6 +324,26 @@ struct device; * * @model is a descriptive model name exported through sysfs. It doesn't have to * be unique. + * + * @enable_source is a handler to find source entity for the + * sink entity and activate the link between them if source + * entity is free. Drivers should call this handler before + * accessing the source. + * + * @disable_source is a handler to find source entity for the + * sink entity and deactivate the link between them. Drivers + * should call this handler to release the source. + * + * Note: Bridge driver is expected to implement and set the + * handler when media_device is registered or when + * bridge driver finds the media_device during probe. + * Bridge driver sets source_priv with information + * necessary to run enable/disable source handlers. + * + * Use-case: find tuner entity connected to the decoder + * entity and check if it is available, and activate the + * the link between them from enable_source and deactivate + * from disable_source. */ struct media_device { /* dev->driver_data points to this struct. */ @@ -303,6 +351,7 @@ struct media_device { struct media_devnode devnode; char model[32]; + char driver_name[32]; char serial[40]; char bus_info[32]; u32 hw_revision; @@ -319,15 +368,28 @@ struct media_device { struct list_head pads; struct list_head links; + /* notify callback list invoked when a new entity is registered */ + struct list_head entity_notify; + /* Protects the graph objects creation/removal */ spinlock_t lock; /* Serializes graph operations. */ struct mutex graph_mutex; + struct media_entity_graph pm_count_walk; + + void *source_priv; + int (*enable_source)(struct media_entity *entity, + struct media_pipeline *pipe); + void (*disable_source)(struct media_entity *entity); int (*link_notify)(struct media_link *link, u32 flags, unsigned int notification); }; +/* We don't need to include pci.h or usb.h here */ +struct pci_dev; +struct usb_device; + #ifdef CONFIG_MEDIA_CONTROLLER /* Supported link_notify @notification values. */ @@ -498,6 +560,31 @@ int __must_check media_device_register_entity(struct media_device *mdev, void media_device_unregister_entity(struct media_entity *entity); /** + * media_device_register_entity_notify() - Registers a media entity_notify + * callback + * + * @mdev: The media device + * @nptr: The media_entity_notify + * + * Note: When a new entity is registered, all the registered + * media_entity_notify callbacks are invoked. + */ + +int __must_check media_device_register_entity_notify(struct media_device *mdev, + struct media_entity_notify *nptr); + +/** + * media_device_unregister_entity_notify() - Unregister a media entity notify + * callback + * + * @mdev: The media device + * @nptr: The media_entity_notify + * + */ +void media_device_unregister_entity_notify(struct media_device *mdev, + struct media_entity_notify *nptr); + +/** * media_device_get_devres() - get media device as device resource * creates if one doesn't exist * @@ -536,6 +623,39 @@ struct media_device *media_device_find_devres(struct device *dev); /* Iterate over all links. */ #define media_device_for_each_link(link, mdev) \ list_for_each_entry(link, &(mdev)->links, graph_obj.list) + +/** + * media_device_pci_init() - create and initialize a + * struct &media_device from a PCI device. + * + * @mdev: pointer to struct &media_device + * @pci_dev: pointer to struct pci_dev + * @name: media device name. If %NULL, the routine will use the default + * name for the pci device, given by pci_name() macro. + */ +void media_device_pci_init(struct media_device *mdev, + struct pci_dev *pci_dev, + const char *name); +/** + * __media_device_usb_init() - create and initialize a + * struct &media_device from a PCI device. + * + * @mdev: pointer to struct &media_device + * @udev: pointer to struct usb_device + * @board_name: media device name. If %NULL, the routine will use the usb + * product name, if available. + * @driver_name: name of the driver. if %NULL, the routine will use the name + * given by udev->dev->driver->name, with is usually the wrong + * thing to do. + * + * NOTE: It is better to call media_device_usb_init() instead, as + * such macro fills driver_name with %KBUILD_MODNAME. + */ +void __media_device_usb_init(struct media_device *mdev, + struct usb_device *udev, + const char *board_name, + const char *driver_name); + #else static inline int media_device_register(struct media_device *mdev) { @@ -552,6 +672,17 @@ static inline int media_device_register_entity(struct media_device *mdev, static inline void media_device_unregister_entity(struct media_entity *entity) { } +static inline int media_device_register_entity_notify( + struct media_device *mdev, + struct media_entity_notify *nptr) +{ + return 0; +} +static inline void media_device_unregister_entity_notify( + struct media_device *mdev, + struct media_entity_notify *nptr) +{ +} static inline struct media_device *media_device_get_devres(struct device *dev) { return NULL; @@ -560,5 +691,23 @@ static inline struct media_device *media_device_find_devres(struct device *dev) { return NULL; } + +static inline void media_device_pci_init(struct media_device *mdev, + struct pci_dev *pci_dev, + char *name) +{ +} + +static inline void __media_device_usb_init(struct media_device *mdev, + struct usb_device *udev, + char *board_name, + char *driver_name) +{ +} + #endif /* CONFIG_MEDIA_CONTROLLER */ + +#define media_device_usb_init(mdev, udev, name) \ + __media_device_usb_init(mdev, udev, name, KBUILD_MODNAME) + #endif diff --git a/include/media/media-entity.h b/include/media/media-entity.h index fe485d367985..6dc9e4e8cbd4 100644 --- a/include/media/media-entity.h +++ b/include/media/media-entity.h @@ -24,6 +24,7 @@ #define _MEDIA_ENTITY_H #include <linux/bitmap.h> +#include <linux/bug.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/media.h> @@ -832,6 +833,16 @@ media_entity_graph_walk_next(struct media_entity_graph *graph); */ __must_check int media_entity_pipeline_start(struct media_entity *entity, struct media_pipeline *pipe); +/** + * __media_entity_pipeline_start - Mark a pipeline as streaming + * + * @entity: Starting entity + * @pipe: Media pipeline to be assigned to all entities in the pipeline. + * + * Note: This is the non-locking version of media_entity_pipeline_start() + */ +__must_check int __media_entity_pipeline_start(struct media_entity *entity, + struct media_pipeline *pipe); /** * media_entity_pipeline_stop - Mark a pipeline as not streaming @@ -848,6 +859,15 @@ __must_check int media_entity_pipeline_start(struct media_entity *entity, void media_entity_pipeline_stop(struct media_entity *entity); /** + * __media_entity_pipeline_stop - Mark a pipeline as not streaming + * + * @entity: Starting entity + * + * Note: This is the non-locking version of media_entity_pipeline_stop() + */ +void __media_entity_pipeline_stop(struct media_entity *entity); + +/** * media_devnode_create() - creates and initializes a device node interface * * @mdev: pointer to struct &media_device diff --git a/include/media/rc-core.h b/include/media/rc-core.h index f6494709e230..0f77b3dffb37 100644 --- a/include/media/rc-core.h +++ b/include/media/rc-core.h @@ -60,6 +60,7 @@ enum rc_filter_type { /** * struct rc_dev - represents a remote control device * @dev: driver model's view of this device + * @initialized: 1 if the device init has completed, 0 otherwise * @sysfs_groups: sysfs attribute groups * @input_name: name of the input child device * @input_phys: physical path to the input child device @@ -121,6 +122,7 @@ enum rc_filter_type { */ struct rc_dev { struct device dev; + atomic_t initialized; const struct attribute_group *sysfs_groups[5]; const char *input_name; const char *input_phys; diff --git a/include/media/tuner.h b/include/media/tuner.h index e5321fda5489..b3edc14e763f 100644 --- a/include/media/tuner.h +++ b/include/media/tuner.h @@ -20,14 +20,7 @@ #ifdef __KERNEL__ #include <linux/videodev2.h> - -/* Tuner PADs */ -/* FIXME: is this the right place for it? */ -enum tuner_pad_index { - TUNER_PAD_RF_INPUT, - TUNER_PAD_IF_OUTPUT, - TUNER_NUM_PADS -}; +#include <media/v4l2-mc.h> #define ADDR_UNSET (255) diff --git a/include/media/v4l2-ctrls.h b/include/media/v4l2-ctrls.h index da6fe9802fee..0bc9b35b8f3e 100644 --- a/include/media/v4l2-ctrls.h +++ b/include/media/v4l2-ctrls.h @@ -535,18 +535,6 @@ struct v4l2_ctrl *v4l2_ctrl_new_int_menu(struct v4l2_ctrl_handler *hdl, u32 id, u8 max, u8 def, const s64 *qmenu_int); /** - * v4l2_ctrl_add_ctrl() - Add a control from another handler to this handler. - * @hdl: The control handler. - * @ctrl: The control to add. - * - * It will return NULL if it was unable to add the control reference. - * If the control already belonged to the handler, then it will do - * nothing and just return @ctrl. - */ -struct v4l2_ctrl *v4l2_ctrl_add_ctrl(struct v4l2_ctrl_handler *hdl, - struct v4l2_ctrl *ctrl); - -/** * v4l2_ctrl_add_handler() - Add all controls from handler @add to * handler @hdl. * @hdl: The control handler. diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h index eeabf20e87a6..76056ab5c5bd 100644 --- a/include/media/v4l2-dev.h +++ b/include/media/v4l2-dev.h @@ -87,6 +87,7 @@ struct video_device #if defined(CONFIG_MEDIA_CONTROLLER) struct media_entity entity; struct media_intf_devnode *intf_devnode; + struct media_pipeline pipe; #endif /* device ops */ const struct v4l2_file_operations *fops; diff --git a/include/media/v4l2-mc.h b/include/media/v4l2-mc.h new file mode 100644 index 000000000000..98a938aabdfb --- /dev/null +++ b/include/media/v4l2-mc.h @@ -0,0 +1,243 @@ +/* + * v4l2-mc.h - Media Controller V4L2 types and prototypes + * + * Copyright (C) 2016 Mauro Carvalho Chehab <mchehab@osg.samsung.com> + * Copyright (C) 2006-2010 Nokia Corporation + * Copyright (c) 2016 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _V4L2_MC_H +#define _V4L2_MC_H + +#include <media/media-device.h> +#include <media/v4l2-dev.h> +#include <linux/types.h> + +/** + * enum tuner_pad_index - tuner pad index for MEDIA_ENT_F_TUNER + * + * @TUNER_PAD_RF_INPUT: Radiofrequency (RF) sink pad, usually linked to a + * RF connector entity. + * @TUNER_PAD_OUTPUT: Tuner video output source pad. Contains the video + * chrominance and luminance or the hole bandwidth + * of the signal converted to an Intermediate Frequency + * (IF) or to baseband (on zero-IF tuners). + * @TUNER_PAD_AUD_OUT: Tuner audio output source pad. Tuners used to decode + * analog TV signals have an extra pad for audio output. + * Old tuners use an analog stage with a saw filter for + * the audio IF frequency. The output of the pad is, in + * this case, the audio IF, with should be decoded either + * by the bridge chipset (that's the case of cx2388x + * chipsets) or may require an external IF sound + * processor, like msp34xx. On modern silicon tuners, + * the audio IF decoder is usually incorporated at the + * tuner. On such case, the output of this pad is an + * audio sampled data. + * @TUNER_NUM_PADS: Number of pads of the tuner. + */ +enum tuner_pad_index { + TUNER_PAD_RF_INPUT, + TUNER_PAD_OUTPUT, + TUNER_PAD_AUD_OUT, + TUNER_NUM_PADS +}; + +/** + * enum if_vid_dec_index - video IF-PLL pad index for + * MEDIA_ENT_F_IF_VID_DECODER + * + * @IF_VID_DEC_PAD_IF_INPUT: video Intermediate Frequency (IF) sink pad + * @IF_VID_DEC_PAD_OUT: IF-PLL video output source pad. Contains the + * video chrominance and luminance IF signals. + * @IF_VID_DEC_PAD_NUM_PADS: Number of pads of the video IF-PLL. + */ +enum if_vid_dec_pad_index { + IF_VID_DEC_PAD_IF_INPUT, + IF_VID_DEC_PAD_OUT, + IF_VID_DEC_PAD_NUM_PADS +}; + +/** + * enum if_aud_dec_index - audio/sound IF-PLL pad index for + * MEDIA_ENT_F_IF_AUD_DECODER + * + * @IF_AUD_DEC_PAD_IF_INPUT: audio Intermediate Frequency (IF) sink pad + * @IF_AUD_DEC_PAD_OUT: IF-PLL audio output source pad. Contains the + * audio sampled stream data, usually connected + * to the bridge bus via an Inter-IC Sound (I2S) + * bus. + * @IF_AUD_DEC_PAD_NUM_PADS: Number of pads of the audio IF-PLL. + */ +enum if_aud_dec_pad_index { + IF_AUD_DEC_PAD_IF_INPUT, + IF_AUD_DEC_PAD_OUT, + IF_AUD_DEC_PAD_NUM_PADS +}; + +/** + * enum demod_pad_index - analog TV pad index for MEDIA_ENT_F_ATV_DECODER + * + * @DEMOD_PAD_IF_INPUT: IF input sink pad. + * @DEMOD_PAD_VID_OUT: Video output source pad. + * @DEMOD_PAD_VBI_OUT: Vertical Blank Interface (VBI) output source pad. + * @DEMOD_PAD_AUDIO_OUT: Audio output source pad. + * @DEMOD_NUM_PADS: Maximum number of output pads. + */ +enum demod_pad_index { + DEMOD_PAD_IF_INPUT, + DEMOD_PAD_VID_OUT, + DEMOD_PAD_VBI_OUT, + DEMOD_PAD_AUDIO_OUT, + DEMOD_NUM_PADS +}; + +/* We don't need to include pci.h or usb.h here */ +struct pci_dev; +struct usb_device; + +#ifdef CONFIG_MEDIA_CONTROLLER +/** + * v4l2_mc_create_media_graph() - create Media Controller links at the graph. + * + * @mdev: pointer to the &media_device struct. + * + * Add links between the entities commonly found on PC customer's hardware at + * the V4L2 side: camera sensors, audio and video PLL-IF decoders, tuners, + * analog TV decoder and I/O entities (video, VBI and Software Defined Radio). + * NOTE: webcams are modelled on a very simple way: the sensor is + * connected directly to the I/O entity. All dirty details, like + * scaler and crop HW are hidden. While such mapping is enough for v4l2 + * interface centric PC-consumer's hardware, V4L2 subdev centric camera + * hardware should not use this routine, as it will not build the right graph. + */ +int v4l2_mc_create_media_graph(struct media_device *mdev); + +/** + * v4l_enable_media_source() - Hold media source for exclusive use + * if free + * + * @vdev: pointer to struct video_device + * + * This interface calls enable_source handler to determine if + * media source is free for use. The enable_source handler is + * responsible for checking is the media source is free and + * start a pipeline between the media source and the media + * entity associated with the video device. This interface + * should be called from v4l2-core and dvb-core interfaces + * that change the source configuration. + * + * Return: returns zero on success or a negative error code. + */ +int v4l_enable_media_source(struct video_device *vdev); + +/** + * v4l_disable_media_source() - Release media source + * + * @vdev: pointer to struct video_device + * + * This interface calls disable_source handler to release + * the media source. The disable_source handler stops the + * active media pipeline between the media source and the + * media entity associated with the video device. + * + * Return: returns zero on success or a negative error code. + */ +void v4l_disable_media_source(struct video_device *vdev); + +/* + * v4l_vb2q_enable_media_tuner - Hold media source for exclusive use + * if free. + * @q - pointer to struct vb2_queue + * + * Wrapper for v4l_enable_media_source(). This function should + * be called from v4l2-core to enable the media source with + * pointer to struct vb2_queue as the input argument. Some + * v4l2-core interfaces don't have access to video device and + * this interface finds the struct video_device for the q and + * calls v4l_enable_media_source(). + */ +int v4l_vb2q_enable_media_source(struct vb2_queue *q); + + +/** + * v4l2_pipeline_pm_use - Update the use count of an entity + * @entity: The entity + * @use: Use (1) or stop using (0) the entity + * + * Update the use count of all entities in the pipeline and power entities on or + * off accordingly. + * + * This function is intended to be called in video node open (use == + * 1) and release (use == 0). It uses struct media_entity.use_count to + * track the power status. The use of this function should be paired + * with v4l2_pipeline_link_notify(). + * + * Return 0 on success or a negative error code on failure. Powering entities + * off is assumed to never fail. No failure can occur when the use parameter is + * set to 0. + */ +int v4l2_pipeline_pm_use(struct media_entity *entity, int use); + + +/** + * v4l2_pipeline_link_notify - Link management notification callback + * @link: The link + * @flags: New link flags that will be applied + * @notification: The link's state change notification type (MEDIA_DEV_NOTIFY_*) + * + * React to link management on powered pipelines by updating the use count of + * all entities in the source and sink sides of the link. Entities are powered + * on or off accordingly. The use of this function should be paired + * with v4l2_pipeline_pm_use(). + * + * Return 0 on success or a negative error code on failure. Powering entities + * off is assumed to never fail. This function will not fail for disconnection + * events. + */ +int v4l2_pipeline_link_notify(struct media_link *link, u32 flags, + unsigned int notification); + +#else /* CONFIG_MEDIA_CONTROLLER */ + +static inline int v4l2_mc_create_media_graph(struct media_device *mdev) +{ + return 0; +} + +static inline int v4l_enable_media_source(struct video_device *vdev) +{ + return 0; +} + +static inline void v4l_disable_media_source(struct video_device *vdev) +{ +} + +static inline int v4l_vb2q_enable_media_source(struct vb2_queue *q) +{ + return 0; +} + +static inline int v4l2_pipeline_pm_use(struct media_entity *entity, int use) +{ + return 0; +} + +static inline int v4l2_pipeline_link_notify(struct media_link *link, u32 flags, + unsigned int notification) +{ + return 0; +} + +#endif /* CONFIG_MEDIA_CONTROLLER */ +#endif /* _V4L2_MC_H */ diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h index b273cf9ac047..11e2dfec0198 100644 --- a/include/media/v4l2-subdev.h +++ b/include/media/v4l2-subdev.h @@ -179,6 +179,8 @@ struct v4l2_subdev_io_pin_config { * for it to be warned when the value of a control changes. * * @unsubscribe_event: remove event subscription from the control framework. + * + * @registered_async: the subdevice has been registered async. */ struct v4l2_subdev_core_ops { int (*log_status)(struct v4l2_subdev *sd); @@ -211,6 +213,7 @@ struct v4l2_subdev_core_ops { struct v4l2_event_subscription *sub); int (*unsubscribe_event)(struct v4l2_subdev *sd, struct v4l2_fh *fh, struct v4l2_event_subscription *sub); + int (*registered_async)(struct v4l2_subdev *sd); }; /** diff --git a/include/media/videobuf2-dvb.h b/include/media/videobuf2-dvb.h index 5b64c9eac2c9..87b559024b4a 100644 --- a/include/media/videobuf2-dvb.h +++ b/include/media/videobuf2-dvb.h @@ -8,6 +8,10 @@ #include <dvb_frontend.h> #include <media/videobuf2-v4l2.h> + +/* We don't actually need to include media-device.h here */ +struct media_device; + /* * TODO: This header file should be replaced with videobuf2-core.h * Currently, vb2_thread is not a stuff of videobuf2-core, @@ -50,6 +54,7 @@ int vb2_dvb_register_bus(struct vb2_dvb_frontends *f, struct module *module, void *adapter_priv, struct device *device, + struct media_device *mdev, short *adapter_nr, int mfe_shared); diff --git a/include/media/vsp1.h b/include/media/vsp1.h new file mode 100644 index 000000000000..cc541753896f --- /dev/null +++ b/include/media/vsp1.h @@ -0,0 +1,33 @@ +/* + * vsp1.h -- R-Car VSP1 API + * + * Copyright (C) 2015 Renesas Electronics Corporation + * + * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ +#ifndef __MEDIA_VSP1_H__ +#define __MEDIA_VSP1_H__ + +#include <linux/types.h> + +struct device; +struct v4l2_rect; + +int vsp1_du_init(struct device *dev); + +int vsp1_du_setup_lif(struct device *dev, unsigned int width, + unsigned int height); + +int vsp1_du_atomic_begin(struct device *dev); +int vsp1_du_atomic_update(struct device *dev, unsigned int rpf, u32 pixelformat, + unsigned int pitch, dma_addr_t mem[2], + const struct v4l2_rect *src, + const struct v4l2_rect *dst); +int vsp1_du_atomic_flush(struct device *dev); + +#endif /* __MEDIA_VSP1_H__ */ diff --git a/include/net/6lowpan.h b/include/net/6lowpan.h index 2f6a3f2233ed..da3a77d25fcb 100644 --- a/include/net/6lowpan.h +++ b/include/net/6lowpan.h @@ -75,6 +75,8 @@ #define LOWPAN_IPHC_MAX_HC_BUF_LEN (sizeof(struct ipv6hdr) + \ LOWPAN_IPHC_MAX_HEADER_LEN + \ LOWPAN_NHC_MAX_HDR_LEN) +/* SCI/DCI is 4 bit width, so we have maximum 16 entries */ +#define LOWPAN_IPHC_CTX_TABLE_SIZE (1 << 4) #define LOWPAN_DISPATCH_IPV6 0x41 /* 01000001 = 65 */ #define LOWPAN_DISPATCH_IPHC 0x60 /* 011xxxxx = ... */ @@ -98,9 +100,39 @@ enum lowpan_lltypes { LOWPAN_LLTYPE_IEEE802154, }; +enum lowpan_iphc_ctx_flags { + LOWPAN_IPHC_CTX_FLAG_ACTIVE, + LOWPAN_IPHC_CTX_FLAG_COMPRESSION, +}; + +struct lowpan_iphc_ctx { + u8 id; + struct in6_addr pfx; + u8 plen; + unsigned long flags; +}; + +struct lowpan_iphc_ctx_table { + spinlock_t lock; + const struct lowpan_iphc_ctx_ops *ops; + struct lowpan_iphc_ctx table[LOWPAN_IPHC_CTX_TABLE_SIZE]; +}; + +static inline bool lowpan_iphc_ctx_is_active(const struct lowpan_iphc_ctx *ctx) +{ + return test_bit(LOWPAN_IPHC_CTX_FLAG_ACTIVE, &ctx->flags); +} + +static inline bool +lowpan_iphc_ctx_is_compression(const struct lowpan_iphc_ctx *ctx) +{ + return test_bit(LOWPAN_IPHC_CTX_FLAG_COMPRESSION, &ctx->flags); +} + struct lowpan_priv { enum lowpan_lltypes lltype; struct dentry *iface_debugfs; + struct lowpan_iphc_ctx_table ctx; /* must be last */ u8 priv[0] __aligned(sizeof(void *)); diff --git a/include/net/act_api.h b/include/net/act_api.h index 9d446f136607..2a19fe111c78 100644 --- a/include/net/act_api.h +++ b/include/net/act_api.h @@ -7,6 +7,8 @@ #include <net/sch_generic.h> #include <net/pkt_sched.h> +#include <net/net_namespace.h> +#include <net/netns/generic.h> struct tcf_common { struct hlist_node tcfc_head; @@ -65,11 +67,6 @@ static inline int tcf_hashinfo_init(struct tcf_hashinfo *hf, unsigned int mask) return 0; } -static inline void tcf_hashinfo_destroy(struct tcf_hashinfo *hf) -{ - kfree(hf->htab); -} - /* Update lastuse only if needed, to avoid dirtying a cache line. * We use a temp variable to avoid fetching jiffies twice. */ @@ -81,42 +78,76 @@ static inline void tcf_lastuse_update(struct tcf_t *tm) tm->lastuse = now; } -#ifdef CONFIG_NET_CLS_ACT - -#define ACT_P_CREATED 1 -#define ACT_P_DELETED 1 - struct tc_action { void *priv; const struct tc_action_ops *ops; __u32 type; /* for backward compat(TCA_OLD_COMPAT) */ __u32 order; struct list_head list; + struct tcf_hashinfo *hinfo; }; +#ifdef CONFIG_NET_CLS_ACT + +#define ACT_P_CREATED 1 +#define ACT_P_DELETED 1 + struct tc_action_ops { struct list_head head; - struct tcf_hashinfo *hinfo; char kind[IFNAMSIZ]; __u32 type; /* TBD to match kind */ struct module *owner; int (*act)(struct sk_buff *, const struct tc_action *, struct tcf_result *); int (*dump)(struct sk_buff *, struct tc_action *, int, int); void (*cleanup)(struct tc_action *, int bind); - int (*lookup)(struct tc_action *, u32); + int (*lookup)(struct net *, struct tc_action *, u32); int (*init)(struct net *net, struct nlattr *nla, struct nlattr *est, struct tc_action *act, int ovr, int bind); - int (*walk)(struct sk_buff *, struct netlink_callback *, int, struct tc_action *); + int (*walk)(struct net *, struct sk_buff *, + struct netlink_callback *, int, struct tc_action *); +}; + +struct tc_action_net { + struct tcf_hashinfo *hinfo; + const struct tc_action_ops *ops; }; -int tcf_hash_search(struct tc_action *a, u32 index); -u32 tcf_hash_new_index(struct tcf_hashinfo *hinfo); -int tcf_hash_check(u32 index, struct tc_action *a, int bind); -int tcf_hash_create(u32 index, struct nlattr *est, struct tc_action *a, - int size, int bind, bool cpustats); +static inline +int tc_action_net_init(struct tc_action_net *tn, const struct tc_action_ops *ops, + unsigned int mask) +{ + int err = 0; + + tn->hinfo = kmalloc(sizeof(*tn->hinfo), GFP_KERNEL); + if (!tn->hinfo) + return -ENOMEM; + tn->ops = ops; + err = tcf_hashinfo_init(tn->hinfo, mask); + if (err) + kfree(tn->hinfo); + return err; +} + +void tcf_hashinfo_destroy(const struct tc_action_ops *ops, + struct tcf_hashinfo *hinfo); + +static inline void tc_action_net_exit(struct tc_action_net *tn) +{ + tcf_hashinfo_destroy(tn->ops, tn->hinfo); +} + +int tcf_generic_walker(struct tc_action_net *tn, struct sk_buff *skb, + struct netlink_callback *cb, int type, + struct tc_action *a); +int tcf_hash_search(struct tc_action_net *tn, struct tc_action *a, u32 index); +u32 tcf_hash_new_index(struct tc_action_net *tn); +int tcf_hash_check(struct tc_action_net *tn, u32 index, struct tc_action *a, + int bind); +int tcf_hash_create(struct tc_action_net *tn, u32 index, struct nlattr *est, + struct tc_action *a, int size, int bind, bool cpustats); void tcf_hash_cleanup(struct tc_action *a, struct nlattr *est); -void tcf_hash_insert(struct tc_action *a); +void tcf_hash_insert(struct tc_action_net *tn, struct tc_action *a); int __tcf_hash_release(struct tc_action *a, bool bind, bool strict); @@ -125,8 +156,8 @@ static inline int tcf_hash_release(struct tc_action *a, bool bind) return __tcf_hash_release(a, bind, false); } -int tcf_register_action(struct tc_action_ops *a, unsigned int mask); -int tcf_unregister_action(struct tc_action_ops *a); +int tcf_register_action(struct tc_action_ops *a, struct pernet_operations *ops); +int tcf_unregister_action(struct tc_action_ops *a, struct pernet_operations *ops); int tcf_action_destroy(struct list_head *actions, int bind); int tcf_action_exec(struct sk_buff *skb, const struct list_head *actions, struct tcf_result *res); @@ -140,5 +171,16 @@ int tcf_action_dump(struct sk_buff *skb, struct list_head *, int, int); int tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int, int); int tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int, int); int tcf_action_copy_stats(struct sk_buff *, struct tc_action *, int); + +#define tc_no_actions(_exts) \ + (list_empty(&(_exts)->actions)) + +#define tc_for_each_action(_a, _exts) \ + list_for_each_entry(a, &(_exts)->actions, list) +#else /* CONFIG_NET_CLS_ACT */ + +#define tc_no_actions(_exts) true +#define tc_for_each_action(_a, _exts) while (0) + #endif /* CONFIG_NET_CLS_ACT */ #endif diff --git a/include/net/addrconf.h b/include/net/addrconf.h index 47f52d3cd8df..730d856683e5 100644 --- a/include/net/addrconf.h +++ b/include/net/addrconf.h @@ -87,6 +87,8 @@ int __ipv6_get_lladdr(struct inet6_dev *idev, struct in6_addr *addr, u32 banned_flags); int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr, u32 banned_flags); +int ipv4_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2, + bool match_wildcard); int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2, bool match_wildcard); void addrconf_join_solict(struct net_device *dev, const struct in6_addr *addr); diff --git a/include/net/af_unix.h b/include/net/af_unix.h index 2a91a0561a47..9b4c418bebd8 100644 --- a/include/net/af_unix.h +++ b/include/net/af_unix.h @@ -6,8 +6,8 @@ #include <linux/mutex.h> #include <net/sock.h> -void unix_inflight(struct file *fp); -void unix_notinflight(struct file *fp); +void unix_inflight(struct user_struct *user, struct file *fp); +void unix_notinflight(struct user_struct *user, struct file *fp); void unix_gc(void); void wait_for_unix_gc(void); struct sock *unix_get_socket(struct file *filp); diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h index 339ea57be423..5d38d980b89d 100644 --- a/include/net/bluetooth/hci.h +++ b/include/net/bluetooth/hci.h @@ -233,6 +233,7 @@ enum { HCI_SC_ENABLED, HCI_SC_ONLY, HCI_PRIVACY, + HCI_LIMITED_PRIVACY, HCI_RPA_EXPIRED, HCI_RPA_RESOLVING, HCI_HS_ENABLED, diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index d4f82edb5cff..dc71473462ac 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -25,6 +25,7 @@ #ifndef __HCI_CORE_H #define __HCI_CORE_H +#include <linux/leds.h> #include <net/bluetooth/hci.h> #include <net/bluetooth/hci_sock.h> @@ -396,6 +397,8 @@ struct hci_dev { struct delayed_work rpa_expired; bdaddr_t rpa; + struct led_trigger *power_led; + int (*open)(struct hci_dev *hdev); int (*close)(struct hci_dev *hdev); int (*flush)(struct hci_dev *hdev); diff --git a/include/net/bond_3ad.h b/include/net/bond_3ad.h index f1fbc3b11962..f358ad5e4214 100644 --- a/include/net/bond_3ad.h +++ b/include/net/bond_3ad.h @@ -306,5 +306,6 @@ int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond, struct slave *slave); int bond_3ad_set_carrier(struct bonding *bond); void bond_3ad_update_lacp_rate(struct bonding *bond); +void bond_3ad_update_ad_actor_settings(struct bonding *bond); #endif /* _NET_BOND_3AD_H */ diff --git a/include/net/bonding.h b/include/net/bonding.h index ee6c52053aa3..791800ddd6d9 100644 --- a/include/net/bonding.h +++ b/include/net/bonding.h @@ -215,6 +215,7 @@ struct bonding { * ALB mode (6) - to sync the use and modifications of its hash table */ spinlock_t mode_lock; + spinlock_t stats_lock; u8 send_peer_notif; u8 igmp_retrans; #ifdef CONFIG_PROC_FS diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 9bcaaf7cd15a..9e1b24c29f0c 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -712,6 +712,8 @@ struct cfg80211_acl_data { * @p2p_opp_ps: P2P opportunistic PS * @acl: ACL configuration used by the drivers which has support for * MAC address based access control + * @pbss: If set, start as a PCP instead of AP. Relevant for DMG + * networks. */ struct cfg80211_ap_settings { struct cfg80211_chan_def chandef; @@ -730,6 +732,7 @@ struct cfg80211_ap_settings { u8 p2p_ctwindow; bool p2p_opp_ps; const struct cfg80211_acl_data *acl; + bool pbss; }; /** @@ -1888,6 +1891,8 @@ struct cfg80211_ibss_params { * @ht_capa_mask: The bits of ht_capa which are to be used. * @vht_capa: VHT Capability overrides * @vht_capa_mask: The bits of vht_capa which are to be used. + * @pbss: if set, connect to a PCP instead of AP. Valid for DMG + * networks. */ struct cfg80211_connect_params { struct ieee80211_channel *channel; @@ -1910,6 +1915,7 @@ struct cfg80211_connect_params { struct ieee80211_ht_cap ht_capa_mask; struct ieee80211_vht_cap vht_capa; struct ieee80211_vht_cap vht_capa_mask; + bool pbss; }; /** @@ -3489,6 +3495,7 @@ struct cfg80211_cached_keys; * registered for unexpected class 3 frames (AP mode) * @conn: (private) cfg80211 software SME connection state machine data * @connect_keys: (private) keys to set after connection is established + * @conn_bss_type: connecting/connected BSS type * @ibss_fixed: (private) IBSS is using fixed BSSID * @ibss_dfs_possible: (private) IBSS may change to a DFS channel * @event_list: (private) list for internal event processing @@ -3519,6 +3526,7 @@ struct wireless_dev { u8 ssid_len, mesh_id_len, mesh_id_up_len; struct cfg80211_conn *conn; struct cfg80211_cached_keys *connect_keys; + enum ieee80211_bss_type conn_bss_type; struct list_head event_list; spinlock_t event_lock; diff --git a/include/net/checksum.h b/include/net/checksum.h index 10a16b5bd1c7..5c30891e84e5 100644 --- a/include/net/checksum.h +++ b/include/net/checksum.h @@ -88,8 +88,11 @@ static inline __wsum csum_block_add(__wsum csum, __wsum csum2, int offset) { u32 sum = (__force u32)csum2; - if (offset&1) - sum = ((sum&0xFF00FF)<<8)+((sum>>8)&0xFF00FF); + + /* rotate sum to align it with a 16b boundary */ + if (offset & 1) + sum = ror32(sum, 8); + return csum_add(csum, (__force __wsum)sum); } @@ -102,10 +105,7 @@ csum_block_add_ext(__wsum csum, __wsum csum2, int offset, int len) static inline __wsum csum_block_sub(__wsum csum, __wsum csum2, int offset) { - u32 sum = (__force u32)csum2; - if (offset&1) - sum = ((sum&0xFF00FF)<<8)+((sum>>8)&0xFF00FF); - return csum_sub(csum, (__force __wsum)sum); + return csum_block_add(csum, ~csum2, offset); } static inline __wsum csum_unfold(__sum16 n) @@ -120,6 +120,11 @@ static inline __wsum csum_partial_ext(const void *buff, int len, __wsum sum) #define CSUM_MANGLED_0 ((__force __sum16)0xffff) +static inline void csum_replace_by_diff(__sum16 *sum, __wsum diff) +{ + *sum = csum_fold(csum_add(diff, ~csum_unfold(*sum))); +} + static inline void csum_replace4(__sum16 *sum, __be32 from, __be32 to) { __wsum tmp = csum_sub(~csum_unfold(*sum), (__force __wsum)from); diff --git a/include/net/codel.h b/include/net/codel.h index 267e70210061..d168aca115cc 100644 --- a/include/net/codel.h +++ b/include/net/codel.h @@ -162,12 +162,14 @@ struct codel_vars { * struct codel_stats - contains codel shared variables and stats * @maxpacket: largest packet we've seen so far * @drop_count: temp count of dropped packets in dequeue() + * @drop_len: bytes of dropped packets in dequeue() * ecn_mark: number of packets we ECN marked instead of dropping * ce_mark: number of packets CE marked because sojourn time was above ce_threshold */ struct codel_stats { u32 maxpacket; u32 drop_count; + u32 drop_len; u32 ecn_mark; u32 ce_mark; }; @@ -308,6 +310,7 @@ static struct sk_buff *codel_dequeue(struct Qdisc *sch, vars->rec_inv_sqrt); goto end; } + stats->drop_len += qdisc_pkt_len(skb); qdisc_drop(skb, sch); stats->drop_count++; skb = dequeue_func(vars, sch); @@ -330,6 +333,7 @@ static struct sk_buff *codel_dequeue(struct Qdisc *sch, if (params->ecn && INET_ECN_set_ce(skb)) { stats->ecn_mark++; } else { + stats->drop_len += qdisc_pkt_len(skb); qdisc_drop(skb, sch); stats->drop_count++; diff --git a/include/net/devlink.h b/include/net/devlink.h new file mode 100644 index 000000000000..c37d257891d6 --- /dev/null +++ b/include/net/devlink.h @@ -0,0 +1,140 @@ +/* + * include/net/devlink.h - Network physical device Netlink interface + * Copyright (c) 2016 Mellanox Technologies. All rights reserved. + * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ +#ifndef _NET_DEVLINK_H_ +#define _NET_DEVLINK_H_ + +#include <linux/device.h> +#include <linux/slab.h> +#include <linux/gfp.h> +#include <linux/list.h> +#include <linux/netdevice.h> +#include <net/net_namespace.h> +#include <uapi/linux/devlink.h> + +struct devlink_ops; + +struct devlink { + struct list_head list; + struct list_head port_list; + const struct devlink_ops *ops; + struct device *dev; + possible_net_t _net; + char priv[0] __aligned(NETDEV_ALIGN); +}; + +struct devlink_port { + struct list_head list; + struct devlink *devlink; + unsigned index; + bool registered; + enum devlink_port_type type; + enum devlink_port_type desired_type; + void *type_dev; + bool split; + u32 split_group; +}; + +struct devlink_ops { + size_t priv_size; + int (*port_type_set)(struct devlink_port *devlink_port, + enum devlink_port_type port_type); + int (*port_split)(struct devlink *devlink, unsigned int port_index, + unsigned int count); + int (*port_unsplit)(struct devlink *devlink, unsigned int port_index); +}; + +static inline void *devlink_priv(struct devlink *devlink) +{ + BUG_ON(!devlink); + return &devlink->priv; +} + +static inline struct devlink *priv_to_devlink(void *priv) +{ + BUG_ON(!priv); + return container_of(priv, struct devlink, priv); +} + +struct ib_device; + +#if IS_ENABLED(CONFIG_NET_DEVLINK) + +struct devlink *devlink_alloc(const struct devlink_ops *ops, size_t priv_size); +int devlink_register(struct devlink *devlink, struct device *dev); +void devlink_unregister(struct devlink *devlink); +void devlink_free(struct devlink *devlink); +int devlink_port_register(struct devlink *devlink, + struct devlink_port *devlink_port, + unsigned int port_index); +void devlink_port_unregister(struct devlink_port *devlink_port); +void devlink_port_type_eth_set(struct devlink_port *devlink_port, + struct net_device *netdev); +void devlink_port_type_ib_set(struct devlink_port *devlink_port, + struct ib_device *ibdev); +void devlink_port_type_clear(struct devlink_port *devlink_port); +void devlink_port_split_set(struct devlink_port *devlink_port, + u32 split_group); + +#else + +static inline struct devlink *devlink_alloc(const struct devlink_ops *ops, + size_t priv_size) +{ + return kzalloc(sizeof(struct devlink) + priv_size, GFP_KERNEL); +} + +static inline int devlink_register(struct devlink *devlink, struct device *dev) +{ + return 0; +} + +static inline void devlink_unregister(struct devlink *devlink) +{ +} + +static inline void devlink_free(struct devlink *devlink) +{ + kfree(devlink); +} + +static inline int devlink_port_register(struct devlink *devlink, + struct devlink_port *devlink_port, + unsigned int port_index) +{ + return 0; +} + +static inline void devlink_port_unregister(struct devlink_port *devlink_port) +{ +} + +static inline void devlink_port_type_eth_set(struct devlink_port *devlink_port, + struct net_device *netdev) +{ +} + +static inline void devlink_port_type_ib_set(struct devlink_port *devlink_port, + struct ib_device *ibdev) +{ +} + +static inline void devlink_port_type_clear(struct devlink_port *devlink_port) +{ +} + +static inline void devlink_port_split_set(struct devlink_port *devlink_port, + u32 split_group) +{ +} + +#endif + +#endif /* _NET_DEVLINK_H_ */ diff --git a/include/net/dsa.h b/include/net/dsa.h index 26a0e86e611e..6463bb2863ac 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -296,16 +296,17 @@ struct dsa_switch_driver { /* * Bridge integration */ - int (*port_join_bridge)(struct dsa_switch *ds, int port, - u32 br_port_mask); - int (*port_leave_bridge)(struct dsa_switch *ds, int port, - u32 br_port_mask); + int (*port_bridge_join)(struct dsa_switch *ds, int port, + struct net_device *bridge); + void (*port_bridge_leave)(struct dsa_switch *ds, int port); int (*port_stp_update)(struct dsa_switch *ds, int port, u8 state); /* * VLAN support */ + int (*port_vlan_filtering)(struct dsa_switch *ds, int port, + bool vlan_filtering); int (*port_vlan_prepare)(struct dsa_switch *ds, int port, const struct switchdev_obj_port_vlan *vlan, struct switchdev_trans *trans); @@ -314,9 +315,9 @@ struct dsa_switch_driver { struct switchdev_trans *trans); int (*port_vlan_del)(struct dsa_switch *ds, int port, const struct switchdev_obj_port_vlan *vlan); - int (*port_pvid_get)(struct dsa_switch *ds, int port, u16 *pvid); - int (*vlan_getnext)(struct dsa_switch *ds, u16 *vid, - unsigned long *ports, unsigned long *untagged); + int (*port_vlan_dump)(struct dsa_switch *ds, int port, + struct switchdev_obj_port_vlan *vlan, + int (*cb)(struct switchdev_obj *obj)); /* * Forwarding database diff --git a/include/net/dst.h b/include/net/dst.h index c7329dcd90cc..5c98443c1c9e 100644 --- a/include/net/dst.h +++ b/include/net/dst.h @@ -398,6 +398,18 @@ static inline void skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev, __skb_tunnel_rx(skb, dev, net); } +static inline u32 dst_tclassid(const struct sk_buff *skb) +{ +#ifdef CONFIG_IP_ROUTE_CLASSID + const struct dst_entry *dst; + + dst = skb_dst(skb); + if (dst) + return dst->tclassid; +#endif + return 0; +} + int dst_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb); static inline int dst_discard(struct sk_buff *skb) { diff --git a/include/net/dst_cache.h b/include/net/dst_cache.h new file mode 100644 index 000000000000..151accae708b --- /dev/null +++ b/include/net/dst_cache.h @@ -0,0 +1,97 @@ +#ifndef _NET_DST_CACHE_H +#define _NET_DST_CACHE_H + +#include <linux/jiffies.h> +#include <net/dst.h> +#if IS_ENABLED(CONFIG_IPV6) +#include <net/ip6_fib.h> +#endif + +struct dst_cache { + struct dst_cache_pcpu __percpu *cache; + unsigned long reset_ts; +}; + +/** + * dst_cache_get - perform cache lookup + * @dst_cache: the cache + * + * The caller should use dst_cache_get_ip4() if it need to retrieve the + * source address to be used when xmitting to the cached dst. + * local BH must be disabled. + */ +struct dst_entry *dst_cache_get(struct dst_cache *dst_cache); + +/** + * dst_cache_get_ip4 - perform cache lookup and fetch ipv4 source address + * @dst_cache: the cache + * @saddr: return value for the retrieved source address + * + * local BH must be disabled. + */ +struct rtable *dst_cache_get_ip4(struct dst_cache *dst_cache, __be32 *saddr); + +/** + * dst_cache_set_ip4 - store the ipv4 dst into the cache + * @dst_cache: the cache + * @dst: the entry to be cached + * @saddr: the source address to be stored inside the cache + * + * local BH must be disabled. + */ +void dst_cache_set_ip4(struct dst_cache *dst_cache, struct dst_entry *dst, + __be32 saddr); + +#if IS_ENABLED(CONFIG_IPV6) + +/** + * dst_cache_set_ip6 - store the ipv6 dst into the cache + * @dst_cache: the cache + * @dst: the entry to be cached + * @saddr: the source address to be stored inside the cache + * + * local BH must be disabled. + */ +void dst_cache_set_ip6(struct dst_cache *dst_cache, struct dst_entry *dst, + const struct in6_addr *addr); + +/** + * dst_cache_get_ip6 - perform cache lookup and fetch ipv6 source address + * @dst_cache: the cache + * @saddr: return value for the retrieved source address + * + * local BH must be disabled. + */ +struct dst_entry *dst_cache_get_ip6(struct dst_cache *dst_cache, + struct in6_addr *saddr); +#endif + +/** + * dst_cache_reset - invalidate the cache contents + * @dst_cache: the cache + * + * This do not free the cached dst to avoid races and contentions. + * the dst will be freed on later cache lookup. + */ +static inline void dst_cache_reset(struct dst_cache *dst_cache) +{ + dst_cache->reset_ts = jiffies; +} + +/** + * dst_cache_init - initialize the cache, allocating the required storage + * @dst_cache: the cache + * @gfp: allocation flags + */ +int dst_cache_init(struct dst_cache *dst_cache, gfp_t gfp); + +/** + * dst_cache_destroy - empty the cache and free the allocated storage + * @dst_cache: the cache + * + * No synchronization is enforced: it must be called only when the cache + * is unsed. + */ +void dst_cache_destroy(struct dst_cache *dst_cache); + +#endif diff --git a/include/net/dst_metadata.h b/include/net/dst_metadata.h index 30a56ab2ccfb..5db9f5910428 100644 --- a/include/net/dst_metadata.h +++ b/include/net/dst_metadata.h @@ -62,6 +62,7 @@ static inline int skb_metadata_dst_cmp(const struct sk_buff *skb_a, sizeof(a->u.tun_info) + a->u.tun_info.options_len); } +void metadata_dst_free(struct metadata_dst *); struct metadata_dst *metadata_dst_alloc(u8 optslen, gfp_t flags); struct metadata_dst __percpu *metadata_dst_alloc_percpu(u8 optslen, gfp_t flags); @@ -125,7 +126,7 @@ static inline struct metadata_dst *ip_tun_rx_dst(struct sk_buff *skb, ip_tunnel_key_init(&tun_dst->u.tun_info.key, iph->saddr, iph->daddr, iph->tos, iph->ttl, - 0, 0, tunnel_id, flags); + 0, 0, 0, tunnel_id, flags); return tun_dst; } @@ -151,8 +152,11 @@ static inline struct metadata_dst *ipv6_tun_rx_dst(struct sk_buff *skb, info->key.u.ipv6.src = ip6h->saddr; info->key.u.ipv6.dst = ip6h->daddr; + info->key.tos = ipv6_get_dsfield(ip6h); info->key.ttl = ip6h->hop_limit; + info->key.label = ip6_flowlabel(ip6h); + return tun_dst; } diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h index 8c8548cf5888..d3d60dccd19f 100644 --- a/include/net/flow_dissector.h +++ b/include/net/flow_dissector.h @@ -184,4 +184,17 @@ static inline bool flow_keys_have_l4(struct flow_keys *keys) u32 flow_hash_from_keys(struct flow_keys *keys); +static inline bool dissector_uses_key(const struct flow_dissector *flow_dissector, + enum flow_dissector_key_id key_id) +{ + return flow_dissector->used_keys & (1 << key_id); +} + +static inline void *skb_flow_dissector_target(struct flow_dissector *flow_dissector, + enum flow_dissector_key_id key_id, + void *target_container) +{ + return ((char *)target_container) + flow_dissector->offset[key_id]; +} + #endif diff --git a/include/net/genetlink.h b/include/net/genetlink.h index 43c0e771f417..8d4608ce8716 100644 --- a/include/net/genetlink.h +++ b/include/net/genetlink.h @@ -83,7 +83,6 @@ struct genl_family { * @attrs: netlink attributes * @_net: network namespace * @user_ptr: user pointers - * @dst_sk: destination socket */ struct genl_info { u32 snd_seq; @@ -94,7 +93,6 @@ struct genl_info { struct nlattr ** attrs; possible_net_t _net; void * user_ptr[2]; - struct sock * dst_sk; }; static inline struct net *genl_info_net(struct genl_info *info) @@ -188,8 +186,6 @@ int genl_unregister_family(struct genl_family *family); void genl_notify(struct genl_family *family, struct sk_buff *skb, struct genl_info *info, u32 group, gfp_t flags); -struct sk_buff *genlmsg_new_unicast(size_t payload, struct genl_info *info, - gfp_t flags); void *genlmsg_put(struct sk_buff *skb, u32 portid, u32 seq, struct genl_family *family, int flags, u8 cmd); diff --git a/include/net/hwbm.h b/include/net/hwbm.h new file mode 100644 index 000000000000..47d08662501b --- /dev/null +++ b/include/net/hwbm.h @@ -0,0 +1,28 @@ +#ifndef _HWBM_H +#define _HWBM_H + +struct hwbm_pool { + /* Capacity of the pool */ + int size; + /* Size of the buffers managed */ + int frag_size; + /* Number of buffers currently used by this pool */ + int buf_num; + /* constructor called during alocation */ + int (*construct)(struct hwbm_pool *bm_pool, void *buf); + /* protect acces to the buffer counter*/ + spinlock_t lock; + /* private data */ + void *priv; +}; +#ifdef CONFIG_HWBM +void hwbm_buf_free(struct hwbm_pool *bm_pool, void *buf); +int hwbm_pool_refill(struct hwbm_pool *bm_pool, gfp_t gfp); +int hwbm_pool_add(struct hwbm_pool *bm_pool, unsigned int buf_num, gfp_t gfp); +#else +void hwbm_buf_free(struct hwbm_pool *bm_pool, void *buf) {} +int hwbm_pool_refill(struct hwbm_pool *bm_pool, gfp_t gfp) { return 0; } +int hwbm_pool_add(struct hwbm_pool *bm_pool, unsigned int buf_num, gfp_t gfp) +{ return 0; } +#endif /* CONFIG_HWBM */ +#endif /* _HWBM_H */ diff --git a/include/net/inet6_hashtables.h b/include/net/inet6_hashtables.h index 7ff588ca6817..28332bdac333 100644 --- a/include/net/inet6_hashtables.h +++ b/include/net/inet6_hashtables.h @@ -53,6 +53,7 @@ struct sock *__inet6_lookup_established(struct net *net, struct sock *inet6_lookup_listener(struct net *net, struct inet_hashinfo *hashinfo, + struct sk_buff *skb, int doff, const struct in6_addr *saddr, const __be16 sport, const struct in6_addr *daddr, @@ -60,6 +61,7 @@ struct sock *inet6_lookup_listener(struct net *net, static inline struct sock *__inet6_lookup(struct net *net, struct inet_hashinfo *hashinfo, + struct sk_buff *skb, int doff, const struct in6_addr *saddr, const __be16 sport, const struct in6_addr *daddr, @@ -71,12 +73,12 @@ static inline struct sock *__inet6_lookup(struct net *net, if (sk) return sk; - return inet6_lookup_listener(net, hashinfo, saddr, sport, + return inet6_lookup_listener(net, hashinfo, skb, doff, saddr, sport, daddr, hnum, dif); } static inline struct sock *__inet6_lookup_skb(struct inet_hashinfo *hashinfo, - struct sk_buff *skb, + struct sk_buff *skb, int doff, const __be16 sport, const __be16 dport, int iif) @@ -86,16 +88,19 @@ static inline struct sock *__inet6_lookup_skb(struct inet_hashinfo *hashinfo, if (sk) return sk; - return __inet6_lookup(dev_net(skb_dst(skb)->dev), hashinfo, - &ipv6_hdr(skb)->saddr, sport, + return __inet6_lookup(dev_net(skb_dst(skb)->dev), hashinfo, skb, + doff, &ipv6_hdr(skb)->saddr, sport, &ipv6_hdr(skb)->daddr, ntohs(dport), iif); } struct sock *inet6_lookup(struct net *net, struct inet_hashinfo *hashinfo, + struct sk_buff *skb, int doff, const struct in6_addr *saddr, const __be16 sport, const struct in6_addr *daddr, const __be16 dport, const int dif); + +int inet6_hash(struct sock *sk); #endif /* IS_ENABLED(CONFIG_IPV6) */ #define INET6_MATCH(__sk, __net, __saddr, __daddr, __ports, __dif) \ diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h index 481fe1c9044c..49dcad4fe99e 100644 --- a/include/net/inet_connection_sock.h +++ b/include/net/inet_connection_sock.h @@ -270,8 +270,9 @@ struct dst_entry *inet_csk_route_child_sock(const struct sock *sk, struct sock *newsk, const struct request_sock *req); -void inet_csk_reqsk_queue_add(struct sock *sk, struct request_sock *req, - struct sock *child); +struct sock *inet_csk_reqsk_queue_add(struct sock *sk, + struct request_sock *req, + struct sock *child); void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req, unsigned long timeout); struct sock *inet_csk_complete_hashdance(struct sock *sk, struct sock *child, diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h index 12aac0fd6ee7..909972aa3acd 100644 --- a/include/net/inet_frag.h +++ b/include/net/inet_frag.h @@ -13,6 +13,7 @@ struct netns_frags { int timeout; int high_thresh; int low_thresh; + int max_dist; }; /** diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h index de2e3ade6102..50f635c2c536 100644 --- a/include/net/inet_hashtables.h +++ b/include/net/inet_hashtables.h @@ -207,12 +207,16 @@ void inet_hashinfo_init(struct inet_hashinfo *h); bool inet_ehash_insert(struct sock *sk, struct sock *osk); bool inet_ehash_nolisten(struct sock *sk, struct sock *osk); -void __inet_hash(struct sock *sk, struct sock *osk); -void inet_hash(struct sock *sk); +int __inet_hash(struct sock *sk, struct sock *osk, + int (*saddr_same)(const struct sock *sk1, + const struct sock *sk2, + bool match_wildcard)); +int inet_hash(struct sock *sk); void inet_unhash(struct sock *sk); struct sock *__inet_lookup_listener(struct net *net, struct inet_hashinfo *hashinfo, + struct sk_buff *skb, int doff, const __be32 saddr, const __be16 sport, const __be32 daddr, const unsigned short hnum, @@ -220,10 +224,11 @@ struct sock *__inet_lookup_listener(struct net *net, static inline struct sock *inet_lookup_listener(struct net *net, struct inet_hashinfo *hashinfo, + struct sk_buff *skb, int doff, __be32 saddr, __be16 sport, __be32 daddr, __be16 dport, int dif) { - return __inet_lookup_listener(net, hashinfo, saddr, sport, + return __inet_lookup_listener(net, hashinfo, skb, doff, saddr, sport, daddr, ntohs(dport), dif); } @@ -299,6 +304,7 @@ static inline struct sock * static inline struct sock *__inet_lookup(struct net *net, struct inet_hashinfo *hashinfo, + struct sk_buff *skb, int doff, const __be32 saddr, const __be16 sport, const __be32 daddr, const __be16 dport, const int dif) @@ -307,12 +313,13 @@ static inline struct sock *__inet_lookup(struct net *net, struct sock *sk = __inet_lookup_established(net, hashinfo, saddr, sport, daddr, hnum, dif); - return sk ? : __inet_lookup_listener(net, hashinfo, saddr, sport, - daddr, hnum, dif); + return sk ? : __inet_lookup_listener(net, hashinfo, skb, doff, saddr, + sport, daddr, hnum, dif); } static inline struct sock *inet_lookup(struct net *net, struct inet_hashinfo *hashinfo, + struct sk_buff *skb, int doff, const __be32 saddr, const __be16 sport, const __be32 daddr, const __be16 dport, const int dif) @@ -320,7 +327,8 @@ static inline struct sock *inet_lookup(struct net *net, struct sock *sk; local_bh_disable(); - sk = __inet_lookup(net, hashinfo, saddr, sport, daddr, dport, dif); + sk = __inet_lookup(net, hashinfo, skb, doff, saddr, sport, daddr, + dport, dif); local_bh_enable(); return sk; @@ -328,6 +336,7 @@ static inline struct sock *inet_lookup(struct net *net, static inline struct sock *__inet_lookup_skb(struct inet_hashinfo *hashinfo, struct sk_buff *skb, + int doff, const __be16 sport, const __be16 dport) { @@ -337,8 +346,8 @@ static inline struct sock *__inet_lookup_skb(struct inet_hashinfo *hashinfo, if (sk) return sk; else - return __inet_lookup(dev_net(skb_dst(skb)->dev), hashinfo, - iph->saddr, sport, + return __inet_lookup(dev_net(skb_dst(skb)->dev), hashinfo, skb, + doff, iph->saddr, sport, iph->daddr, dport, inet_iif(skb)); } diff --git a/include/net/ip.h b/include/net/ip.h index 1a98f1ca1638..fad74d323bd6 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -240,17 +240,13 @@ static inline int inet_is_local_reserved_port(struct net *net, int port) } #endif +__be32 inet_current_timestamp(void); + /* From inetpeer.c */ extern int inet_peer_threshold; extern int inet_peer_minttl; extern int inet_peer_maxttl; -/* From ip_input.c */ -extern int sysctl_ip_early_demux; - -/* From ip_output.c */ -extern int sysctl_ip_dynaddr; - void ipfrag_init(void); void ip_static_sysctl_init(void); diff --git a/include/net/ip6_checksum.h b/include/net/ip6_checksum.h index 1a49b73f7f6e..cca840584c88 100644 --- a/include/net/ip6_checksum.h +++ b/include/net/ip6_checksum.h @@ -37,8 +37,7 @@ #ifndef _HAVE_ARCH_IPV6_CSUM __sum16 csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr, - __u32 len, unsigned short proto, - __wsum csum); + __u32 len, __u8 proto, __wsum csum); #endif static inline __wsum ip6_compute_pseudo(struct sk_buff *skb, int proto) diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h index 0d0ce0b2d870..499a707765ea 100644 --- a/include/net/ip6_tunnel.h +++ b/include/net/ip6_tunnel.h @@ -6,6 +6,7 @@ #include <linux/if_tunnel.h> #include <linux/ip6_tunnel.h> #include <net/ip_tunnels.h> +#include <net/dst_cache.h> #define IP6TUNNEL_ERR_TIMEO (30*HZ) @@ -33,12 +34,6 @@ struct __ip6_tnl_parm { __be32 o_key; }; -struct ip6_tnl_dst { - seqlock_t lock; - struct dst_entry __rcu *dst; - u32 cookie; -}; - /* IPv6 tunnel */ struct ip6_tnl { struct ip6_tnl __rcu *next; /* next tunnel in list */ @@ -46,7 +41,7 @@ struct ip6_tnl { struct net *net; /* netns for packet i/o */ struct __ip6_tnl_parm parms; /* tunnel configuration parameters */ struct flowi fl; /* flowi template for xmit */ - struct ip6_tnl_dst __percpu *dst_cache; /* cached dst */ + struct dst_cache dst_cache; /* cached dst */ int err_count; unsigned long err_time; @@ -66,11 +61,6 @@ struct ipv6_tlv_tnl_enc_lim { __u8 encap_limit; /* tunnel encapsulation limit */ } __packed; -struct dst_entry *ip6_tnl_dst_get(struct ip6_tnl *t); -int ip6_tnl_dst_init(struct ip6_tnl *t); -void ip6_tnl_dst_destroy(struct ip6_tnl *t); -void ip6_tnl_dst_reset(struct ip6_tnl *t); -void ip6_tnl_dst_set(struct ip6_tnl *t, struct dst_entry *dst); int ip6_tnl_rcv_ctl(struct ip6_tnl *t, const struct in6_addr *laddr, const struct in6_addr *raddr); int ip6_tnl_xmit_ctl(struct ip6_tnl *t, const struct in6_addr *laddr, diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h index 7029527725dd..4079fc18ffe4 100644 --- a/include/net/ip_fib.h +++ b/include/net/ip_fib.h @@ -61,6 +61,7 @@ struct fib_nh_exception { struct rtable __rcu *fnhe_rth_input; struct rtable __rcu *fnhe_rth_output; unsigned long fnhe_stamp; + struct rcu_head rcu; }; struct fnhe_hash_bucket { diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h index 6db96ea0144f..c35dda9ec991 100644 --- a/include/net/ip_tunnels.h +++ b/include/net/ip_tunnels.h @@ -7,12 +7,15 @@ #include <linux/socket.h> #include <linux/types.h> #include <linux/u64_stats_sync.h> +#include <linux/bitops.h> + #include <net/dsfield.h> #include <net/gro_cells.h> #include <net/inet_ecn.h> #include <net/netns/generic.h> #include <net/rtnetlink.h> #include <net/lwtunnel.h> +#include <net/dst_cache.h> #if IS_ENABLED(CONFIG_IPV6) #include <net/ipv6.h> @@ -47,6 +50,7 @@ struct ip_tunnel_key { __be16 tun_flags; u8 tos; /* TOS for IPv4, TC for IPv6 */ u8 ttl; /* TTL for IPv4, HL for IPv6 */ + __be32 label; /* Flow Label for IPv6 */ __be16 tp_src; __be16 tp_dst; }; @@ -55,8 +59,16 @@ struct ip_tunnel_key { #define IP_TUNNEL_INFO_TX 0x01 /* represents tx tunnel parameters */ #define IP_TUNNEL_INFO_IPV6 0x02 /* key contains IPv6 addresses */ +/* Maximum tunnel options length. */ +#define IP_TUNNEL_OPTS_MAX \ + GENMASK((FIELD_SIZEOF(struct ip_tunnel_info, \ + options_len) * BITS_PER_BYTE) - 1, 0) + struct ip_tunnel_info { struct ip_tunnel_key key; +#ifdef CONFIG_DST_CACHE + struct dst_cache dst_cache; +#endif u8 options_len; u8 mode; }; @@ -85,11 +97,6 @@ struct ip_tunnel_prl_entry { struct rcu_head rcu_head; }; -struct ip_tunnel_dst { - struct dst_entry __rcu *dst; - __be32 saddr; -}; - struct metadata_dst; struct ip_tunnel { @@ -108,7 +115,7 @@ struct ip_tunnel { int tun_hlen; /* Precalculated header length */ int mlink; - struct ip_tunnel_dst __percpu *dst_cache; + struct dst_cache dst_cache; struct ip_tunnel_parm parms; @@ -141,6 +148,7 @@ struct ip_tunnel { #define TUNNEL_CRIT_OPT __cpu_to_be16(0x0400) #define TUNNEL_GENEVE_OPT __cpu_to_be16(0x0800) #define TUNNEL_VXLAN_OPT __cpu_to_be16(0x1000) +#define TUNNEL_NOCACHE __cpu_to_be16(0x2000) #define TUNNEL_OPTIONS_PRESENT (TUNNEL_GENEVE_OPT | TUNNEL_VXLAN_OPT) @@ -181,7 +189,7 @@ int ip_tunnel_encap_del_ops(const struct ip_tunnel_encap_ops *op, static inline void ip_tunnel_key_init(struct ip_tunnel_key *key, __be32 saddr, __be32 daddr, - u8 tos, u8 ttl, + u8 tos, u8 ttl, __be32 label, __be16 tp_src, __be16 tp_dst, __be64 tun_id, __be16 tun_flags) { @@ -192,6 +200,7 @@ static inline void ip_tunnel_key_init(struct ip_tunnel_key *key, 0, IP_TUNNEL_KEY_IPV4_PAD_LEN); key->tos = tos; key->ttl = ttl; + key->label = label; key->tun_flags = tun_flags; /* For the tunnel types on the top of IPsec, the tp_src and tp_dst of @@ -207,6 +216,20 @@ static inline void ip_tunnel_key_init(struct ip_tunnel_key *key, 0, sizeof(*key) - IP_TUNNEL_KEY_SIZE); } +static inline bool +ip_tunnel_dst_cache_usable(const struct sk_buff *skb, + const struct ip_tunnel_info *info) +{ + if (skb->mark) + return false; + if (!info) + return true; + if (info->key.tun_flags & TUNNEL_NOCACHE) + return false; + + return true; +} + static inline unsigned short ip_tunnel_info_af(const struct ip_tunnel_info *tun_info) { @@ -230,6 +253,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd); int ip_tunnel_encap(struct sk_buff *skb, struct ip_tunnel *t, u8 *protocol, struct flowi4 *fl4); +int __ip_tunnel_change_mtu(struct net_device *dev, int new_mtu, bool strict); int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu); struct rtnl_link_stats64 *ip_tunnel_get_stats64(struct net_device *dev, @@ -247,7 +271,6 @@ int ip_tunnel_changelink(struct net_device *dev, struct nlattr *tb[], int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[], struct ip_tunnel_parm *p); void ip_tunnel_setup(struct net_device *dev, int net_id); -void ip_tunnel_dst_reset_all(struct ip_tunnel *t); int ip_tunnel_encap_setup(struct ip_tunnel *t, struct ip_tunnel_encap *ipencap); @@ -272,15 +295,15 @@ static inline u8 ip_tunnel_ecn_encap(u8 tos, const struct iphdr *iph, return INET_ECN_encapsulate(tos, inner); } -int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto); +int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto, + bool xnet); void iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb, __be32 src, __be32 dst, u8 proto, u8 tos, u8 ttl, __be16 df, bool xnet); struct metadata_dst *iptunnel_metadata_reply(struct metadata_dst *md, gfp_t flags); -struct sk_buff *iptunnel_handle_offloads(struct sk_buff *skb, bool gre_csum, - int gso_type_mask); +struct sk_buff *iptunnel_handle_offloads(struct sk_buff *skb, int gso_type_mask); static inline void iptunnel_xmit_stats(struct net_device *dev, int pkt_len) { @@ -355,6 +378,17 @@ static inline void ip_tunnel_unneed_metadata(void) { } +static inline void ip_tunnel_info_opts_get(void *to, + const struct ip_tunnel_info *info) +{ +} + +static inline void ip_tunnel_info_opts_set(struct ip_tunnel_info *info, + const void *from, int len) +{ + info->options_len = 0; +} + #endif /* CONFIG_INET */ #endif /* __NET_IP_TUNNELS_H */ diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h index 0816c872b689..a6cc576fd467 100644 --- a/include/net/ip_vs.h +++ b/include/net/ip_vs.h @@ -1588,6 +1588,23 @@ static inline void ip_vs_conn_drop_conntrack(struct ip_vs_conn *cp) } #endif /* CONFIG_IP_VS_NFCT */ +/* Really using conntrack? */ +static inline bool ip_vs_conn_uses_conntrack(struct ip_vs_conn *cp, + struct sk_buff *skb) +{ +#ifdef CONFIG_IP_VS_NFCT + enum ip_conntrack_info ctinfo; + struct nf_conn *ct; + + if (!(cp->flags & IP_VS_CONN_F_NFCT)) + return false; + ct = nf_ct_get(skb, &ctinfo); + if (ct && !nf_ct_is_untracked(ct)) + return true; +#endif + return false; +} + static inline int ip_vs_dest_conn_overhead(struct ip_vs_dest *dest) { diff --git a/include/net/ipv6.h b/include/net/ipv6.h index 6570f379aba2..f3c9857c645d 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -259,8 +259,12 @@ static inline struct ipv6_txoptions *txopt_get(const struct ipv6_pinfo *np) rcu_read_lock(); opt = rcu_dereference(np->opt); - if (opt && !atomic_inc_not_zero(&opt->refcnt)) - opt = NULL; + if (opt) { + if (!atomic_inc_not_zero(&opt->refcnt)) + opt = NULL; + else + opt = rcu_pointer_handoff(opt); + } rcu_read_unlock(); return opt; } diff --git a/include/net/iw_handler.h b/include/net/iw_handler.h index 8f81bbbc38fc..e0f4109e64c6 100644 --- a/include/net/iw_handler.h +++ b/include/net/iw_handler.h @@ -439,6 +439,12 @@ int dev_get_wireless_info(char *buffer, char **start, off_t offset, int length); /* Send a single event to user space */ void wireless_send_event(struct net_device *dev, unsigned int cmd, union iwreq_data *wrqu, const char *extra); +#ifdef CONFIG_WEXT_CORE +/* flush all previous wext events - if work is done from netdev notifiers */ +void wireless_nlevent_flush(void); +#else +static inline void wireless_nlevent_flush(void) {} +#endif /* We may need a function to send a stream of events to user space. * More on that later... */ diff --git a/include/net/kcm.h b/include/net/kcm.h new file mode 100644 index 000000000000..2840b5825dcc --- /dev/null +++ b/include/net/kcm.h @@ -0,0 +1,226 @@ +/* + * Kernel Connection Multiplexor + * + * Copyright (c) 2016 Tom Herbert <tom@herbertland.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + */ + +#ifndef __NET_KCM_H_ +#define __NET_KCM_H_ + +#include <linux/skbuff.h> +#include <net/sock.h> +#include <uapi/linux/kcm.h> + +extern unsigned int kcm_net_id; + +#define KCM_STATS_ADD(stat, count) ((stat) += (count)) +#define KCM_STATS_INCR(stat) ((stat)++) + +struct kcm_psock_stats { + unsigned long long rx_msgs; + unsigned long long rx_bytes; + unsigned long long tx_msgs; + unsigned long long tx_bytes; + unsigned int rx_aborts; + unsigned int rx_mem_fail; + unsigned int rx_need_more_hdr; + unsigned int rx_msg_too_big; + unsigned int rx_msg_timeouts; + unsigned int rx_bad_hdr_len; + unsigned long long reserved; + unsigned long long unreserved; + unsigned int tx_aborts; +}; + +struct kcm_mux_stats { + unsigned long long rx_msgs; + unsigned long long rx_bytes; + unsigned long long tx_msgs; + unsigned long long tx_bytes; + unsigned int rx_ready_drops; + unsigned int tx_retries; + unsigned int psock_attach; + unsigned int psock_unattach_rsvd; + unsigned int psock_unattach; +}; + +struct kcm_stats { + unsigned long long rx_msgs; + unsigned long long rx_bytes; + unsigned long long tx_msgs; + unsigned long long tx_bytes; +}; + +struct kcm_tx_msg { + unsigned int sent; + unsigned int fragidx; + unsigned int frag_offset; + unsigned int msg_flags; + struct sk_buff *frag_skb; + struct sk_buff *last_skb; +}; + +struct kcm_rx_msg { + int full_len; + int accum_len; + int offset; + int early_eaten; +}; + +/* Socket structure for KCM client sockets */ +struct kcm_sock { + struct sock sk; + struct kcm_mux *mux; + struct list_head kcm_sock_list; + int index; + u32 done : 1; + struct work_struct done_work; + + struct kcm_stats stats; + + /* Transmit */ + struct kcm_psock *tx_psock; + struct work_struct tx_work; + struct list_head wait_psock_list; + struct sk_buff *seq_skb; + + /* Don't use bit fields here, these are set under different locks */ + bool tx_wait; + bool tx_wait_more; + + /* Receive */ + struct kcm_psock *rx_psock; + struct list_head wait_rx_list; /* KCMs waiting for receiving */ + bool rx_wait; + u32 rx_disabled : 1; +}; + +struct bpf_prog; + +/* Structure for an attached lower socket */ +struct kcm_psock { + struct sock *sk; + struct kcm_mux *mux; + int index; + + u32 tx_stopped : 1; + u32 rx_stopped : 1; + u32 done : 1; + u32 unattaching : 1; + + void (*save_state_change)(struct sock *sk); + void (*save_data_ready)(struct sock *sk); + void (*save_write_space)(struct sock *sk); + + struct list_head psock_list; + + struct kcm_psock_stats stats; + + /* Receive */ + struct sk_buff *rx_skb_head; + struct sk_buff **rx_skb_nextp; + struct sk_buff *ready_rx_msg; + struct list_head psock_ready_list; + struct work_struct rx_work; + struct delayed_work rx_delayed_work; + struct bpf_prog *bpf_prog; + struct kcm_sock *rx_kcm; + unsigned long long saved_rx_bytes; + unsigned long long saved_rx_msgs; + struct timer_list rx_msg_timer; + unsigned int rx_need_bytes; + + /* Transmit */ + struct kcm_sock *tx_kcm; + struct list_head psock_avail_list; + unsigned long long saved_tx_bytes; + unsigned long long saved_tx_msgs; +}; + +/* Per net MUX list */ +struct kcm_net { + struct mutex mutex; + struct kcm_psock_stats aggregate_psock_stats; + struct kcm_mux_stats aggregate_mux_stats; + struct list_head mux_list; + int count; +}; + +/* Structure for a MUX */ +struct kcm_mux { + struct list_head kcm_mux_list; + struct rcu_head rcu; + struct kcm_net *knet; + + struct list_head kcm_socks; /* All KCM sockets on MUX */ + int kcm_socks_cnt; /* Total KCM socket count for MUX */ + struct list_head psocks; /* List of all psocks on MUX */ + int psocks_cnt; /* Total attached sockets */ + + struct kcm_mux_stats stats; + struct kcm_psock_stats aggregate_psock_stats; + + /* Receive */ + spinlock_t rx_lock ____cacheline_aligned_in_smp; + struct list_head kcm_rx_waiters; /* KCMs waiting for receiving */ + struct list_head psocks_ready; /* List of psocks with a msg ready */ + struct sk_buff_head rx_hold_queue; + + /* Transmit */ + spinlock_t lock ____cacheline_aligned_in_smp; /* TX and mux locking */ + struct list_head psocks_avail; /* List of available psocks */ + struct list_head kcm_tx_waiters; /* KCMs waiting for a TX psock */ +}; + +#ifdef CONFIG_PROC_FS +int kcm_proc_init(void); +void kcm_proc_exit(void); +#else +static inline int kcm_proc_init(void) { return 0; } +static inline void kcm_proc_exit(void) { } +#endif + +static inline void aggregate_psock_stats(struct kcm_psock_stats *stats, + struct kcm_psock_stats *agg_stats) +{ + /* Save psock statistics in the mux when psock is being unattached. */ + +#define SAVE_PSOCK_STATS(_stat) (agg_stats->_stat += stats->_stat) + SAVE_PSOCK_STATS(rx_msgs); + SAVE_PSOCK_STATS(rx_bytes); + SAVE_PSOCK_STATS(rx_aborts); + SAVE_PSOCK_STATS(rx_mem_fail); + SAVE_PSOCK_STATS(rx_need_more_hdr); + SAVE_PSOCK_STATS(rx_msg_too_big); + SAVE_PSOCK_STATS(rx_msg_timeouts); + SAVE_PSOCK_STATS(rx_bad_hdr_len); + SAVE_PSOCK_STATS(tx_msgs); + SAVE_PSOCK_STATS(tx_bytes); + SAVE_PSOCK_STATS(reserved); + SAVE_PSOCK_STATS(unreserved); + SAVE_PSOCK_STATS(tx_aborts); +#undef SAVE_PSOCK_STATS +} + +static inline void aggregate_mux_stats(struct kcm_mux_stats *stats, + struct kcm_mux_stats *agg_stats) +{ + /* Save psock statistics in the mux when psock is being unattached. */ + +#define SAVE_MUX_STATS(_stat) (agg_stats->_stat += stats->_stat) + SAVE_MUX_STATS(rx_msgs); + SAVE_MUX_STATS(rx_bytes); + SAVE_MUX_STATS(tx_msgs); + SAVE_MUX_STATS(tx_bytes); + SAVE_MUX_STATS(rx_ready_drops); + SAVE_MUX_STATS(psock_attach); + SAVE_MUX_STATS(psock_unattach_rsvd); + SAVE_MUX_STATS(psock_unattach); +#undef SAVE_MUX_STATS +} + +#endif /* __NET_KCM_H_ */ diff --git a/include/net/l3mdev.h b/include/net/l3mdev.h index 5567d46b3cff..c43a9c73de5e 100644 --- a/include/net/l3mdev.h +++ b/include/net/l3mdev.h @@ -39,7 +39,7 @@ struct l3mdev_ops { #ifdef CONFIG_NET_L3_MASTER_DEV -int l3mdev_master_ifindex_rcu(struct net_device *dev); +int l3mdev_master_ifindex_rcu(const struct net_device *dev); static inline int l3mdev_master_ifindex(struct net_device *dev) { int ifindex; @@ -179,7 +179,7 @@ struct dst_entry *l3mdev_rt6_dst_by_oif(struct net *net, #else -static inline int l3mdev_master_ifindex_rcu(struct net_device *dev) +static inline int l3mdev_master_ifindex_rcu(const struct net_device *dev) { return 0; } diff --git a/include/net/lwtunnel.h b/include/net/lwtunnel.h index 66350ce3e955..e9f116e29c22 100644 --- a/include/net/lwtunnel.h +++ b/include/net/lwtunnel.h @@ -170,6 +170,8 @@ static inline int lwtunnel_input(struct sk_buff *skb) return -EOPNOTSUPP; } -#endif +#endif /* CONFIG_LWTUNNEL */ + +#define MODULE_ALIAS_RTNL_LWT(encap_type) MODULE_ALIAS("rtnl-lwt-" __stringify(encap_type)) #endif /* __NET_LWTUNNEL_H */ diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 7c30faff245f..0c09da34b67a 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -5,7 +5,7 @@ * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> * Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net> * Copyright 2013-2014 Intel Mobile Communications GmbH - * Copyright (C) 2015 Intel Deutschland GmbH + * Copyright (C) 2015 - 2016 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -298,6 +298,7 @@ struct ieee80211_vif_chanctx_switch { * note that this is only called when it changes after the channel * context had been assigned. * @BSS_CHANGED_OCB: OCB join status changed + * @BSS_CHANGED_MU_GROUPS: VHT MU-MIMO group id or user position changed */ enum ieee80211_bss_change { BSS_CHANGED_ASSOC = 1<<0, @@ -323,6 +324,7 @@ enum ieee80211_bss_change { BSS_CHANGED_BEACON_INFO = 1<<20, BSS_CHANGED_BANDWIDTH = 1<<21, BSS_CHANGED_OCB = 1<<22, + BSS_CHANGED_MU_GROUPS = 1<<23, /* when adding here, make sure to change ieee80211_reconfig */ }; @@ -436,6 +438,19 @@ struct ieee80211_event { }; /** + * struct ieee80211_mu_group_data - STA's VHT MU-MIMO group data + * + * This structure describes the group id data of VHT MU-MIMO + * + * @membership: 64 bits array - a bit is set if station is member of the group + * @position: 2 bits per group id indicating the position in the group + */ +struct ieee80211_mu_group_data { + u8 membership[WLAN_MEMBERSHIP_LEN]; + u8 position[WLAN_USER_POSITION_LEN]; +}; + +/** * struct ieee80211_bss_conf - holds the BSS's changing parameters * * This structure keeps information about a BSS (and an association @@ -477,6 +492,7 @@ struct ieee80211_event { * @enable_beacon: whether beaconing should be enabled or not * @chandef: Channel definition for this BSS -- the hardware might be * configured a higher bandwidth than this BSS uses, for example. + * @mu_group: VHT MU-MIMO group membership data * @ht_operation_mode: HT operation mode like in &struct ieee80211_ht_operation. * This field is only valid when the channel is a wide HT/VHT channel. * Note that with TDLS this can be the case (channel is HT, protection must @@ -535,6 +551,7 @@ struct ieee80211_bss_conf { s32 cqm_rssi_thold; u32 cqm_rssi_hyst; struct cfg80211_chan_def chandef; + struct ieee80211_mu_group_data mu_group; __be32 arp_addr_list[IEEE80211_BSS_ARP_ADDR_LIST_LEN]; int arp_addr_cnt; bool qos; @@ -691,12 +708,14 @@ enum mac80211_tx_info_flags { * protocol frame (e.g. EAP) * @IEEE80211_TX_CTRL_PS_RESPONSE: This frame is a response to a poll * frame (PS-Poll or uAPSD). + * @IEEE80211_TX_CTRL_RATE_INJECT: This frame is injected with rate information * * These flags are used in tx_info->control.flags. */ enum mac80211_tx_control_flags { IEEE80211_TX_CTRL_PORT_CTRL_PROTO = BIT(0), IEEE80211_TX_CTRL_PS_RESPONSE = BIT(1), + IEEE80211_TX_CTRL_RATE_INJECT = BIT(2), }; /* @@ -993,6 +1012,8 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info) * @RX_FLAG_MACTIME_END: The timestamp passed in the RX status (@mactime * field) is valid and contains the time the last symbol of the MPDU * (including FCS) was received. + * @RX_FLAG_MACTIME_PLCP_START: The timestamp passed in the RX status (@mactime + * field) is valid and contains the time the SYNC preamble was received. * @RX_FLAG_SHORTPRE: Short preamble was used for this frame * @RX_FLAG_HT: HT MCS was used and rate_idx is MCS index * @RX_FLAG_VHT: VHT MCS was used and rate_index is MCS index @@ -1014,6 +1035,14 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info) * @RX_FLAG_AMPDU_DELIM_CRC_KNOWN: The delimiter CRC field is known (the CRC * is stored in the @ampdu_delimiter_crc field) * @RX_FLAG_LDPC: LDPC was used + * @RX_FLAG_ONLY_MONITOR: Report frame only to monitor interfaces without + * processing it in any regular way. + * This is useful if drivers offload some frames but still want to report + * them for sniffing purposes. + * @RX_FLAG_SKIP_MONITOR: Process and report frame to all interfaces except + * monitor interfaces. + * This is useful if drivers offload some frames but still want to report + * them for sniffing purposes. * @RX_FLAG_STBC_MASK: STBC 2 bit bitmask. 1 - Nss=1, 2 - Nss=2, 3 - Nss=3 * @RX_FLAG_10MHZ: 10 MHz (half channel) was used * @RX_FLAG_5MHZ: 5 MHz (quarter channel) was used @@ -1033,6 +1062,7 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info) enum mac80211_rx_flags { RX_FLAG_MMIC_ERROR = BIT(0), RX_FLAG_DECRYPTED = BIT(1), + RX_FLAG_MACTIME_PLCP_START = BIT(2), RX_FLAG_MMIC_STRIPPED = BIT(3), RX_FLAG_IV_STRIPPED = BIT(4), RX_FLAG_FAILED_FCS_CRC = BIT(5), @@ -1046,7 +1076,7 @@ enum mac80211_rx_flags { RX_FLAG_HT_GF = BIT(13), RX_FLAG_AMPDU_DETAILS = BIT(14), RX_FLAG_PN_VALIDATED = BIT(15), - /* bit 16 free */ + RX_FLAG_DUP_VALIDATED = BIT(16), RX_FLAG_AMPDU_LAST_KNOWN = BIT(17), RX_FLAG_AMPDU_IS_LAST = BIT(18), RX_FLAG_AMPDU_DELIM_CRC_ERROR = BIT(19), @@ -1054,6 +1084,8 @@ enum mac80211_rx_flags { RX_FLAG_MACTIME_END = BIT(21), RX_FLAG_VHT = BIT(22), RX_FLAG_LDPC = BIT(23), + RX_FLAG_ONLY_MONITOR = BIT(24), + RX_FLAG_SKIP_MONITOR = BIT(25), RX_FLAG_STBC_MASK = BIT(26) | BIT(27), RX_FLAG_10MHZ = BIT(28), RX_FLAG_5MHZ = BIT(29), @@ -1072,6 +1104,7 @@ enum mac80211_rx_flags { * @RX_VHT_FLAG_160MHZ: 160 MHz was used * @RX_VHT_FLAG_BF: packet was beamformed */ + enum mac80211_rx_vht_flags { RX_VHT_FLAG_80MHZ = BIT(0), RX_VHT_FLAG_160MHZ = BIT(1), @@ -1091,6 +1124,8 @@ enum mac80211_rx_vht_flags { * it but can store it and pass it back to the driver for synchronisation * @band: the active band when this frame was received * @freq: frequency the radio was tuned to when receiving this frame, in MHz + * This field must be set for management frames, but isn't strictly needed + * for data (other) frames - for those it only affects radiotap reporting. * @signal: signal strength when receiving this frame, either in dBm, in dB or * unspecified depending on the hardware capabilities flags * @IEEE80211_HW_SIGNAL_* @@ -1347,6 +1382,7 @@ enum ieee80211_vif_flags { * @csa_active: marks whether a channel switch is going on. Internally it is * write-protected by sdata_lock and local->mtx so holding either is fine * for read access. + * @mu_mimo_owner: indicates interface owns MU-MIMO capability * @driver_flags: flags/capabilities the driver has for this interface, * these need to be set (or cleared) when the interface is added * or, if supported by the driver, the interface type is changed @@ -1373,6 +1409,7 @@ struct ieee80211_vif { u8 addr[ETH_ALEN]; bool p2p; bool csa_active; + bool mu_mimo_owner; u8 cab_queue; u8 hw_queue[IEEE80211_NUM_ACS]; @@ -1486,9 +1523,8 @@ enum ieee80211_key_flags { * wants to be given when a frame is transmitted and needs to be * encrypted in hardware. * @cipher: The key's cipher suite selector. - * @tx_pn: PN used for TX on non-TKIP keys, may be used by the driver - * as well if it needs to do software PN assignment by itself - * (e.g. due to TSO) + * @tx_pn: PN used for TX keys, may be used by the driver as well if it + * needs to do software PN assignment by itself (e.g. due to TSO) * @flags: key flags, see &enum ieee80211_key_flags. * @keyidx: the key index (0-3) * @keylen: key material length @@ -1514,6 +1550,9 @@ struct ieee80211_key_conf { #define IEEE80211_MAX_PN_LEN 16 +#define TKIP_PN_TO_IV16(pn) ((u16)(pn & 0xffff)) +#define TKIP_PN_TO_IV32(pn) ((u32)((pn >> 16) & 0xffffffff)) + /** * struct ieee80211_key_seq - key sequence counter * @@ -1684,6 +1723,18 @@ struct ieee80211_sta_rates { * @tdls_initiator: indicates the STA is an initiator of the TDLS link. Only * valid if the STA is a TDLS peer in the first place. * @mfp: indicates whether the STA uses management frame protection or not. + * @max_amsdu_subframes: indicates the maximal number of MSDUs in a single + * A-MSDU. Taken from the Extended Capabilities element. 0 means + * unlimited. + * @max_amsdu_len: indicates the maximal length of an A-MSDU in bytes. This + * field is always valid for packets with a VHT preamble. For packets + * with a HT preamble, additional limits apply: + * + If the skb is transmitted as part of a BA agreement, the + * A-MSDU maximal size is min(max_amsdu_len, 4065) bytes. + * + If the skb is not part of a BA aggreement, the A-MSDU maximal + * size is min(max_amsdu_len, 7935) bytes. + * Both additional HT limits must be enforced by the low level driver. + * This is defined by the spec (IEEE 802.11-2012 section 8.3.2.2 NOTE 2). * @txq: per-TID data TX queues (if driver uses the TXQ abstraction) */ struct ieee80211_sta { @@ -1702,6 +1753,8 @@ struct ieee80211_sta { bool tdls; bool tdls_initiator; bool mfp; + u8 max_amsdu_subframes; + u16 max_amsdu_len; struct ieee80211_txq *txq[IEEE80211_NUM_TIDS]; @@ -1910,6 +1963,11 @@ struct ieee80211_txq { * by just its MAC address; this prevents, for example, the same station * from connecting to two virtual AP interfaces at the same time. * + * @IEEE80211_HW_SUPPORTS_REORDERING_BUFFER: Hardware (or driver) manages the + * reordering buffer internally, guaranteeing mac80211 receives frames in + * order and does not need to manage its own reorder buffer or BA session + * timeout. + * * @NUM_IEEE80211_HW_FLAGS: number of hardware flags, used for sizing arrays */ enum ieee80211_hw_flags { @@ -1946,6 +2004,7 @@ enum ieee80211_hw_flags { IEEE80211_HW_SUPPORTS_AMSDU_IN_AMPDU, IEEE80211_HW_BEACON_TX_STATUS, IEEE80211_HW_NEEDS_UNIQUE_STA_ADDR, + IEEE80211_HW_SUPPORTS_REORDERING_BUFFER, /* keep last, obviously */ NUM_IEEE80211_HW_FLAGS @@ -2167,7 +2226,7 @@ static inline void SET_IEEE80211_DEV(struct ieee80211_hw *hw, struct device *dev * @hw: the &struct ieee80211_hw to set the MAC address for * @addr: the address to set */ -static inline void SET_IEEE80211_PERM_ADDR(struct ieee80211_hw *hw, u8 *addr) +static inline void SET_IEEE80211_PERM_ADDR(struct ieee80211_hw *hw, const u8 *addr) { memcpy(hw->wiphy->perm_addr, addr, ETH_ALEN); } @@ -2684,6 +2743,33 @@ enum ieee80211_ampdu_mlme_action { }; /** + * struct ieee80211_ampdu_params - AMPDU action parameters + * + * @action: the ampdu action, value from %ieee80211_ampdu_mlme_action. + * @sta: peer of this AMPDU session + * @tid: tid of the BA session + * @ssn: start sequence number of the session. TX/RX_STOP can pass 0. When + * action is set to %IEEE80211_AMPDU_RX_START the driver passes back the + * actual ssn value used to start the session and writes the value here. + * @buf_size: reorder buffer size (number of subframes). Valid only when the + * action is set to %IEEE80211_AMPDU_RX_START or + * %IEEE80211_AMPDU_TX_OPERATIONAL + * @amsdu: indicates the peer's ability to receive A-MSDU within A-MPDU. + * valid when the action is set to %IEEE80211_AMPDU_TX_OPERATIONAL + * @timeout: BA session timeout. Valid only when the action is set to + * %IEEE80211_AMPDU_RX_START + */ +struct ieee80211_ampdu_params { + enum ieee80211_ampdu_mlme_action action; + struct ieee80211_sta *sta; + u16 tid; + u16 ssn; + u8 buf_size; + bool amsdu; + u16 timeout; +}; + +/** * enum ieee80211_frame_release_type - frame release reason * @IEEE80211_FRAME_RELEASE_PSPOLL: frame released for PS-Poll * @IEEE80211_FRAME_RELEASE_UAPSD: frame(s) released due to @@ -3027,13 +3113,9 @@ enum ieee80211_reconfig_type { * @ampdu_action: Perform a certain A-MPDU action * The RA/TID combination determines the destination and TID we want * the ampdu action to be performed for. The action is defined through - * ieee80211_ampdu_mlme_action. Starting sequence number (@ssn) - * is the first frame we expect to perform the action on. Notice - * that TX/RX_STOP can pass NULL for this parameter. - * The @buf_size parameter is only valid when the action is set to - * %IEEE80211_AMPDU_TX_OPERATIONAL and indicates the peer's reorder - * buffer size (number of subframes) for this session -- the driver - * may neither send aggregates containing more subframes than this + * ieee80211_ampdu_mlme_action. + * When the action is set to %IEEE80211_AMPDU_TX_OPERATIONAL the driver + * may neither send aggregates containing more subframes than @buf_size * nor send aggregates in a way that lost frames would exceed the * buffer size. If just limiting the aggregate size, this would be * possible with a buf_size of 8: @@ -3044,9 +3126,6 @@ enum ieee80211_reconfig_type { * buffer size of 8. Correct ways to retransmit #1 would be: * - TX: 1 or 18 or 81 * Even "189" would be wrong since 1 could be lost again. - * The @amsdu parameter is valid when the action is set to - * %IEEE80211_AMPDU_TX_OPERATIONAL and indicates the peer's ability - * to receive A-MSDU within A-MPDU. * * Returns a negative error code on failure. * The callback can sleep. @@ -3388,9 +3467,7 @@ struct ieee80211_ops { int (*tx_last_beacon)(struct ieee80211_hw *hw); int (*ampdu_action)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - enum ieee80211_ampdu_mlme_action action, - struct ieee80211_sta *sta, u16 tid, u16 *ssn, - u8 buf_size, bool amsdu); + struct ieee80211_ampdu_params *params); int (*get_survey)(struct ieee80211_hw *hw, int idx, struct survey_info *survey); void (*rfkill_poll)(struct ieee80211_hw *hw); @@ -4374,21 +4451,19 @@ void ieee80211_get_tkip_p2k(struct ieee80211_key_conf *keyconf, struct sk_buff *skb, u8 *p2k); /** - * ieee80211_get_key_tx_seq - get key TX sequence counter + * ieee80211_tkip_add_iv - write TKIP IV and Ext. IV to pos * + * @pos: start of crypto header * @keyconf: the parameter passed with the set key - * @seq: buffer to receive the sequence data + * @pn: PN to add * - * This function allows a driver to retrieve the current TX IV/PN - * for the given key. It must not be called if IV generation is - * offloaded to the device. + * Returns: pointer to the octet following IVs (i.e. beginning of + * the packet payload) * - * Note that this function may only be called when no TX processing - * can be done concurrently, for example when queues are stopped - * and the stop has been synchronized. + * This function writes the tkip IV value to pos (which should + * point to the crypto header) */ -void ieee80211_get_key_tx_seq(struct ieee80211_key_conf *keyconf, - struct ieee80211_key_seq *seq); +u8 *ieee80211_tkip_add_iv(u8 *pos, struct ieee80211_key_conf *keyconf, u64 pn); /** * ieee80211_get_key_rx_seq - get key RX sequence counter @@ -4410,23 +4485,6 @@ void ieee80211_get_key_rx_seq(struct ieee80211_key_conf *keyconf, int tid, struct ieee80211_key_seq *seq); /** - * ieee80211_set_key_tx_seq - set key TX sequence counter - * - * @keyconf: the parameter passed with the set key - * @seq: new sequence data - * - * This function allows a driver to set the current TX IV/PNs for the - * given key. This is useful when resuming from WoWLAN sleep and the - * device may have transmitted frames using the PTK, e.g. replies to - * ARP requests. - * - * Note that this function may only be called when no TX processing - * can be done concurrently. - */ -void ieee80211_set_key_tx_seq(struct ieee80211_key_conf *keyconf, - struct ieee80211_key_seq *seq); - -/** * ieee80211_set_key_rx_seq - set key RX sequence counter * * @keyconf: the parameter passed with the set key @@ -5121,6 +5179,24 @@ void ieee80211_stop_rx_ba_session(struct ieee80211_vif *vif, u16 ba_rx_bitmap, const u8 *addr); /** + * ieee80211_mark_rx_ba_filtered_frames - move RX BA window and mark filtered + * @pubsta: station struct + * @tid: the session's TID + * @ssn: starting sequence number of the bitmap, all frames before this are + * assumed to be out of the window after the call + * @filtered: bitmap of filtered frames, BIT(0) is the @ssn entry etc. + * @received_mpdus: number of received mpdus in firmware + * + * This function moves the BA window and releases all frames before @ssn, and + * marks frames marked in the bitmap as having been filtered. Afterwards, it + * checks if any frames in the window starting from @ssn can now be released + * (in case they were only waiting for frames that were filtered.) + */ +void ieee80211_mark_rx_ba_filtered_frames(struct ieee80211_sta *pubsta, u8 tid, + u16 ssn, u64 filtered, + u16 received_mpdus); + +/** * ieee80211_send_bar - send a BlockAckReq frame * * can be used to flush pending frames from the peer's aggregation reorder @@ -5371,6 +5447,21 @@ ieee80211_vif_type_p2p(struct ieee80211_vif *vif) return ieee80211_iftype_p2p(vif->type, vif->p2p); } +/** + * ieee80211_update_mu_groups - set the VHT MU-MIMO groud data + * + * @vif: the specified virtual interface + * @membership: 64 bits array - a bit is set if station is member of the group + * @position: 2 bits per group id indicating the position in the group + * + * Note: This function assumes that the given vif is valid and the position and + * membership data is of the correct size and are in the same byte order as the + * matching GroupId management frame. + * Calls to this function need to be serialized with RX path. + */ +void ieee80211_update_mu_groups(struct ieee80211_vif *vif, + const u8 *membership, const u8 *position); + void ieee80211_enable_rssi_reports(struct ieee80211_vif *vif, int rssi_min_thold, int rssi_max_thold); @@ -5523,4 +5614,19 @@ void ieee80211_unreserve_tid(struct ieee80211_sta *sta, u8 tid); */ struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw, struct ieee80211_txq *txq); + +/** + * ieee80211_txq_get_depth - get pending frame/byte count of given txq + * + * The values are not guaranteed to be coherent with regard to each other, i.e. + * txq state can change half-way of this function and the caller may end up + * with "new" frame_cnt and "old" byte_cnt or vice-versa. + * + * @txq: pointer obtained from station or virtual interface + * @frame_cnt: pointer to store frame count + * @byte_cnt: pointer to store byte count + */ +void ieee80211_txq_get_depth(struct ieee80211_txq *txq, + unsigned long *frame_cnt, + unsigned long *byte_cnt); #endif /* MAC80211_H */ diff --git a/include/net/mac802154.h b/include/net/mac802154.h index da574bbdc333..6cd7a70706a9 100644 --- a/include/net/mac802154.h +++ b/include/net/mac802154.h @@ -16,10 +16,10 @@ #ifndef NET_MAC802154_H #define NET_MAC802154_H +#include <asm/unaligned.h> #include <net/af_ieee802154.h> #include <linux/ieee802154.h> #include <linux/skbuff.h> -#include <linux/unaligned/memmove.h> #include <net/cfg802154.h> @@ -247,13 +247,14 @@ struct ieee802154_ops { */ static inline __le16 ieee802154_get_fc_from_skb(const struct sk_buff *skb) { - /* return some invalid fc on failure */ - if (unlikely(skb->len < 2)) { + /* check if we can fc at skb_mac_header of sk buffer */ + if (unlikely(!skb_mac_header_was_set(skb) || + (skb_tail_pointer(skb) - skb_mac_header(skb)) < 2)) { WARN_ON(1); return cpu_to_le16(0); } - return (__force __le16)__get_unaligned_memmove16(skb_mac_header(skb)); + return get_unaligned_le16(skb_mac_header(skb)); } /** @@ -263,7 +264,7 @@ static inline __le16 ieee802154_get_fc_from_skb(const struct sk_buff *skb) */ static inline void ieee802154_be64_to_le64(void *le64_dst, const void *be64_src) { - __put_unaligned_memmove64(swab64p(be64_src), le64_dst); + put_unaligned_le64(get_unaligned_be64(be64_src), le64_dst); } /** @@ -273,7 +274,7 @@ static inline void ieee802154_be64_to_le64(void *le64_dst, const void *be64_src) */ static inline void ieee802154_le64_to_be64(void *be64_dst, const void *le64_src) { - __put_unaligned_memmove64(swab64p(le64_src), be64_dst); + put_unaligned_be64(get_unaligned_le64(le64_src), be64_dst); } /** @@ -283,7 +284,7 @@ static inline void ieee802154_le64_to_be64(void *be64_dst, const void *le64_src) */ static inline void ieee802154_le16_to_be16(void *be16_dst, const void *le16_src) { - __put_unaligned_memmove16(swab16p(le16_src), be16_dst); + put_unaligned_be16(get_unaligned_le16(le16_src), be16_dst); } /** diff --git a/include/net/netfilter/nft_masq.h b/include/net/netfilter/nft_masq.h index e2a518b60e19..a3f3c11b2526 100644 --- a/include/net/netfilter/nft_masq.h +++ b/include/net/netfilter/nft_masq.h @@ -2,7 +2,9 @@ #define _NFT_MASQ_H_ struct nft_masq { - u32 flags; + u32 flags; + enum nft_registers sreg_proto_min:8; + enum nft_registers sreg_proto_max:8; }; extern const struct nla_policy nft_masq_policy[]; diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h index 2b7907a35568..a69cde3ce460 100644 --- a/include/net/netns/ipv4.h +++ b/include/net/netns/ipv4.h @@ -80,9 +80,13 @@ struct netns_ipv4 { int sysctl_tcp_ecn; int sysctl_tcp_ecn_fallback; + int sysctl_ip_default_ttl; int sysctl_ip_no_pmtu_disc; int sysctl_ip_fwd_use_pmtu; int sysctl_ip_nonlocal_bind; + /* Shall we try to damage output packets if routing dev changes? */ + int sysctl_ip_dynaddr; + int sysctl_ip_early_demux; int sysctl_fwmark_reflect; int sysctl_tcp_fwmark_accept; @@ -98,6 +102,21 @@ struct netns_ipv4 { int sysctl_tcp_keepalive_probes; int sysctl_tcp_keepalive_intvl; + int sysctl_tcp_syn_retries; + int sysctl_tcp_synack_retries; + int sysctl_tcp_syncookies; + int sysctl_tcp_reordering; + int sysctl_tcp_retries1; + int sysctl_tcp_retries2; + int sysctl_tcp_orphan_retries; + int sysctl_tcp_fin_timeout; + unsigned int sysctl_tcp_notsent_lowat; + + int sysctl_igmp_max_memberships; + int sysctl_igmp_max_msf; + int sysctl_igmp_llm_reports; + int sysctl_igmp_qrv; + struct ping_group_range ping_group_range; atomic_t dev_addr_genid; diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h index c0368db6df54..10d0848f5b8a 100644 --- a/include/net/netns/ipv6.h +++ b/include/net/netns/ipv6.h @@ -58,7 +58,10 @@ struct netns_ipv6 { struct timer_list ip6_fib_timer; struct hlist_head *fib_table_hash; struct fib6_table *fib6_main_tbl; + struct list_head fib6_walkers; struct dst_ops ip6_dst_ops; + rwlock_t fib6_walker_lock; + spinlock_t fib6_gc_lock; unsigned int ip6_rt_gc_expire; unsigned long ip6_rt_last_gc; #ifdef CONFIG_IPV6_MULTIPLE_TABLES diff --git a/include/net/phonet/phonet.h b/include/net/phonet/phonet.h index 68e509750caa..039cc29cb4a8 100644 --- a/include/net/phonet/phonet.h +++ b/include/net/phonet/phonet.h @@ -51,7 +51,7 @@ void pn_sock_init(void); struct sock *pn_find_sock_by_sa(struct net *net, const struct sockaddr_pn *sa); void pn_deliver_sock_broadcast(struct net *net, struct sk_buff *skb); void phonet_get_local_port_range(int *min, int *max); -void pn_sock_hash(struct sock *sk); +int pn_sock_hash(struct sock *sk); void pn_sock_unhash(struct sock *sk); int pn_sock_get_port(struct sock *sk, unsigned short sport); diff --git a/include/net/ping.h b/include/net/ping.h index ac80cb45e630..5fd7cc244833 100644 --- a/include/net/ping.h +++ b/include/net/ping.h @@ -65,7 +65,7 @@ struct pingfakehdr { }; int ping_get_port(struct sock *sk, unsigned short ident); -void ping_hash(struct sock *sk); +int ping_hash(struct sock *sk); void ping_unhash(struct sock *sk); int ping_init_sock(struct sock *sk); diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index bc49967e1a68..caa5e18636df 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -358,4 +358,69 @@ tcf_match_indev(struct sk_buff *skb, int ifindex) } #endif /* CONFIG_NET_CLS_IND */ +struct tc_cls_u32_knode { + struct tcf_exts *exts; + struct tc_u32_sel *sel; + u32 handle; + u32 val; + u32 mask; + u32 link_handle; + u8 fshift; +}; + +struct tc_cls_u32_hnode { + u32 handle; + u32 prio; + unsigned int divisor; +}; + +enum tc_clsu32_command { + TC_CLSU32_NEW_KNODE, + TC_CLSU32_REPLACE_KNODE, + TC_CLSU32_DELETE_KNODE, + TC_CLSU32_NEW_HNODE, + TC_CLSU32_REPLACE_HNODE, + TC_CLSU32_DELETE_HNODE, +}; + +struct tc_cls_u32_offload { + /* knode values */ + enum tc_clsu32_command command; + union { + struct tc_cls_u32_knode knode; + struct tc_cls_u32_hnode hnode; + }; +}; + +/* tca flags definitions */ +#define TCA_CLS_FLAGS_SKIP_HW 1 + +static inline bool tc_should_offload(struct net_device *dev, u32 flags) +{ + if (!(dev->features & NETIF_F_HW_TC)) + return false; + + if (flags & TCA_CLS_FLAGS_SKIP_HW) + return false; + + if (!dev->netdev_ops->ndo_setup_tc) + return false; + + return true; +} + +enum tc_fl_command { + TC_CLSFLOWER_REPLACE, + TC_CLSFLOWER_DESTROY, +}; + +struct tc_cls_flower_offload { + enum tc_fl_command command; + unsigned long cookie; + struct flow_dissector *dissector; + struct fl_flow_key *mask; + struct fl_flow_key *key; + struct tcf_exts *exts; +}; + #endif diff --git a/include/net/raw.h b/include/net/raw.h index 6a40c6562dd2..3e789008394d 100644 --- a/include/net/raw.h +++ b/include/net/raw.h @@ -57,7 +57,7 @@ int raw_seq_open(struct inode *ino, struct file *file, #endif -void raw_hash_sk(struct sock *sk); +int raw_hash_sk(struct sock *sk); void raw_unhash_sk(struct sock *sk); struct raw_sock { diff --git a/include/net/route.h b/include/net/route.h index a3b9ef74a389..9b0a523bb428 100644 --- a/include/net/route.h +++ b/include/net/route.h @@ -329,14 +329,13 @@ static inline int inet_iif(const struct sk_buff *skb) return skb->skb_iif; } -extern int sysctl_ip_default_ttl; - static inline int ip4_dst_hoplimit(const struct dst_entry *dst) { int hoplimit = dst_metric_raw(dst, RTAX_HOPLIMIT); + struct net *net = dev_net(dst->dev); if (hoplimit == 0) - hoplimit = sysctl_ip_default_ttl; + hoplimit = net->ipv4.sysctl_ip_default_ttl; return hoplimit; } diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 636a362a0e03..46e55f0202a6 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -345,6 +345,12 @@ extern struct Qdisc_ops pfifo_fast_ops; extern struct Qdisc_ops mq_qdisc_ops; extern struct Qdisc_ops noqueue_qdisc_ops; extern const struct Qdisc_ops *default_qdisc_ops; +static inline const struct Qdisc_ops * +get_default_qdisc_ops(const struct net_device *dev, int ntx) +{ + return ntx < dev->real_num_tx_queues ? + default_qdisc_ops : &pfifo_fast_ops; +} struct Qdisc_class_common { u32 classid; @@ -396,7 +402,8 @@ struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue, struct Qdisc *qdisc); void qdisc_reset(struct Qdisc *qdisc); void qdisc_destroy(struct Qdisc *qdisc); -void qdisc_tree_decrease_qlen(struct Qdisc *qdisc, unsigned int n); +void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, unsigned int n, + unsigned int len); struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, const struct Qdisc_ops *ops); struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue, @@ -707,6 +714,23 @@ static inline void qdisc_reset_queue(struct Qdisc *sch) sch->qstats.backlog = 0; } +static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new, + struct Qdisc **pold) +{ + struct Qdisc *old; + + sch_tree_lock(sch); + old = *pold; + *pold = new; + if (old != NULL) { + qdisc_tree_reduce_backlog(old, old->q.qlen, old->qstats.backlog); + qdisc_reset(old); + } + sch_tree_unlock(sch); + + return old; +} + static inline unsigned int __qdisc_queue_drop(struct Qdisc *sch, struct sk_buff_head *list) { diff --git a/include/net/scm.h b/include/net/scm.h index 262532d111f5..59fa93c01d2a 100644 --- a/include/net/scm.h +++ b/include/net/scm.h @@ -21,6 +21,7 @@ struct scm_creds { struct scm_fp_list { short count; short max; + struct user_struct *user; struct file *fp[SCM_MAX_FD]; }; diff --git a/include/net/sctp/auth.h b/include/net/sctp/auth.h index f2d58aa37a6f..9b9fb122b31f 100644 --- a/include/net/sctp/auth.h +++ b/include/net/sctp/auth.h @@ -31,12 +31,12 @@ #define __sctp_auth_h__ #include <linux/list.h> -#include <linux/crypto.h> struct sctp_endpoint; struct sctp_association; struct sctp_authkey; struct sctp_hmacalgo; +struct crypto_shash; /* * Define a generic struct that will hold all the info @@ -90,7 +90,7 @@ int sctp_auth_asoc_copy_shkeys(const struct sctp_endpoint *ep, struct sctp_association *asoc, gfp_t gfp); int sctp_auth_init_hmacs(struct sctp_endpoint *ep, gfp_t gfp); -void sctp_auth_destroy_hmacs(struct crypto_hash *auth_hmacs[]); +void sctp_auth_destroy_hmacs(struct crypto_shash *auth_hmacs[]); struct sctp_hmac *sctp_auth_get_hmac(__u16 hmac_id); struct sctp_hmac *sctp_auth_asoc_get_hmac(const struct sctp_association *asoc); void sctp_auth_asoc_set_default_hmac(struct sctp_association *asoc, diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h index 487ef34bbd63..efc01743b9d6 100644 --- a/include/net/sctp/sm.h +++ b/include/net/sctp/sm.h @@ -201,7 +201,7 @@ struct sctp_chunk *sctp_make_cwr(const struct sctp_association *, struct sctp_chunk * sctp_make_datafrag_empty(struct sctp_association *, const struct sctp_sndrcvinfo *sinfo, int len, const __u8 flags, - __u16 ssn); + __u16 ssn, gfp_t gfp); struct sctp_chunk *sctp_make_ecne(const struct sctp_association *, const __u32); struct sctp_chunk *sctp_make_sack(const struct sctp_association *); diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index 205630bb5010..e2ac0620d4be 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -82,7 +82,7 @@ struct sctp_bind_addr; struct sctp_ulpq; struct sctp_ep_common; struct sctp_ssnmap; -struct crypto_hash; +struct crypto_shash; #include <net/sctp/tsnmap.h> @@ -166,7 +166,7 @@ struct sctp_sock { struct sctp_pf *pf; /* Access to HMAC transform. */ - struct crypto_hash *hmac; + struct crypto_shash *hmac; char *sctp_hmac_alg; /* What is our base endpointer? */ @@ -535,7 +535,6 @@ struct sctp_datamsg { struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *, struct sctp_sndrcvinfo *, struct iov_iter *); -void sctp_datamsg_free(struct sctp_datamsg *); void sctp_datamsg_put(struct sctp_datamsg *); void sctp_chunk_fail(struct sctp_chunk *, int error); int sctp_chunk_abandoned(struct sctp_chunk *); @@ -656,7 +655,7 @@ void sctp_chunk_free(struct sctp_chunk *); void *sctp_addto_chunk(struct sctp_chunk *, int len, const void *data); struct sctp_chunk *sctp_chunkify(struct sk_buff *, const struct sctp_association *, - struct sock *); + struct sock *, gfp_t gfp); void sctp_init_addrs(struct sctp_chunk *, union sctp_addr *, union sctp_addr *); const union sctp_addr *sctp_source(const struct sctp_chunk *chunk); @@ -718,10 +717,10 @@ struct sctp_packet *sctp_packet_init(struct sctp_packet *, __u16 sport, __u16 dport); struct sctp_packet *sctp_packet_config(struct sctp_packet *, __u32 vtag, int); sctp_xmit_t sctp_packet_transmit_chunk(struct sctp_packet *, - struct sctp_chunk *, int); + struct sctp_chunk *, int, gfp_t); sctp_xmit_t sctp_packet_append_chunk(struct sctp_packet *, struct sctp_chunk *); -int sctp_packet_transmit(struct sctp_packet *); +int sctp_packet_transmit(struct sctp_packet *, gfp_t); void sctp_packet_free(struct sctp_packet *); static inline int sctp_packet_empty(struct sctp_packet *packet) @@ -1054,7 +1053,7 @@ struct sctp_outq { void sctp_outq_init(struct sctp_association *, struct sctp_outq *); void sctp_outq_teardown(struct sctp_outq *); void sctp_outq_free(struct sctp_outq*); -int sctp_outq_tail(struct sctp_outq *, struct sctp_chunk *chunk); +int sctp_outq_tail(struct sctp_outq *, struct sctp_chunk *chunk, gfp_t); int sctp_outq_sack(struct sctp_outq *, struct sctp_chunk *); int sctp_outq_is_empty(const struct sctp_outq *); void sctp_outq_restart(struct sctp_outq *); @@ -1062,7 +1061,7 @@ void sctp_outq_restart(struct sctp_outq *); void sctp_retransmit(struct sctp_outq *, struct sctp_transport *, sctp_retransmit_reason_t); void sctp_retransmit_mark(struct sctp_outq *, struct sctp_transport *, __u8); -int sctp_outq_uncork(struct sctp_outq *); +int sctp_outq_uncork(struct sctp_outq *, gfp_t gfp); /* Uncork and flush an outqueue. */ static inline void sctp_outq_cork(struct sctp_outq *q) { @@ -1234,7 +1233,7 @@ struct sctp_endpoint { /* SCTP AUTH: array of the HMACs that will be allocated * we need this per association so that we don't serialize */ - struct crypto_hash **auth_hmacs; + struct crypto_shash **auth_hmacs; /* SCTP-AUTH: hmacs for the endpoint encoded into parameter */ struct sctp_hmac_algo_param *auth_hmacs_list; diff --git a/include/net/sock.h b/include/net/sock.h index f5ea148853e2..255d3e03727b 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -984,7 +984,7 @@ struct proto { void (*release_cb)(struct sock *sk); /* Keeping track of sk's, looking them up, and port selection methods. */ - void (*hash)(struct sock *sk); + int (*hash)(struct sock *sk); void (*unhash)(struct sock *sk); void (*rehash)(struct sock *sk); int (*get_port)(struct sock *sk, unsigned short snum); @@ -1194,10 +1194,10 @@ static inline void sock_prot_inuse_add(struct net *net, struct proto *prot, /* With per-bucket locks this operation is not-atomic, so that * this version is not worse. */ -static inline void __sk_prot_rehash(struct sock *sk) +static inline int __sk_prot_rehash(struct sock *sk) { sk->sk_prot->unhash(sk); - sk->sk_prot->hash(sk); + return sk->sk_prot->hash(sk); } void sk_prot_clear_portaddr_nulls(struct sock *sk, int size); diff --git a/include/net/tc_act/tc_gact.h b/include/net/tc_act/tc_gact.h index 592a6bc02b0b..93c520b83d10 100644 --- a/include/net/tc_act/tc_gact.h +++ b/include/net/tc_act/tc_gact.h @@ -2,6 +2,7 @@ #define __NET_TC_GACT_H #include <net/act_api.h> +#include <linux/tc_act/tc_gact.h> struct tcf_gact { struct tcf_common common; @@ -15,4 +16,19 @@ struct tcf_gact { #define to_gact(a) \ container_of(a->priv, struct tcf_gact, common) +static inline bool is_tcf_gact_shot(const struct tc_action *a) +{ +#ifdef CONFIG_NET_CLS_ACT + struct tcf_gact *gact; + + if (a->ops && a->ops->type != TCA_ACT_GACT) + return false; + + gact = a->priv; + if (gact->tcf_action == TC_ACT_SHOT) + return true; + +#endif + return false; +} #endif /* __NET_TC_GACT_H */ diff --git a/include/net/tc_act/tc_ife.h b/include/net/tc_act/tc_ife.h new file mode 100644 index 000000000000..dc9a09aefb33 --- /dev/null +++ b/include/net/tc_act/tc_ife.h @@ -0,0 +1,61 @@ +#ifndef __NET_TC_IFE_H +#define __NET_TC_IFE_H + +#include <net/act_api.h> +#include <linux/etherdevice.h> +#include <linux/rtnetlink.h> +#include <linux/module.h> + +#define IFE_METAHDRLEN 2 +struct tcf_ife_info { + struct tcf_common common; + u8 eth_dst[ETH_ALEN]; + u8 eth_src[ETH_ALEN]; + u16 eth_type; + u16 flags; + /* list of metaids allowed */ + struct list_head metalist; +}; +#define to_ife(a) \ + container_of(a->priv, struct tcf_ife_info, common) + +struct tcf_meta_info { + const struct tcf_meta_ops *ops; + void *metaval; + u16 metaid; + struct list_head metalist; +}; + +struct tcf_meta_ops { + u16 metaid; /*Maintainer provided ID */ + u16 metatype; /*netlink attribute type (look at net/netlink.h) */ + const char *name; + const char *synopsis; + struct list_head list; + int (*check_presence)(struct sk_buff *, struct tcf_meta_info *); + int (*encode)(struct sk_buff *, void *, struct tcf_meta_info *); + int (*decode)(struct sk_buff *, void *, u16 len); + int (*get)(struct sk_buff *skb, struct tcf_meta_info *mi); + int (*alloc)(struct tcf_meta_info *, void *); + void (*release)(struct tcf_meta_info *); + int (*validate)(void *val, int len); + struct module *owner; +}; + +#define MODULE_ALIAS_IFE_META(metan) MODULE_ALIAS("ifemeta" __stringify_1(metan)) + +int ife_get_meta_u32(struct sk_buff *skb, struct tcf_meta_info *mi); +int ife_get_meta_u16(struct sk_buff *skb, struct tcf_meta_info *mi); +int ife_tlv_meta_encode(void *skbdata, u16 attrtype, u16 dlen, + const void *dval); +int ife_alloc_meta_u32(struct tcf_meta_info *mi, void *metaval); +int ife_alloc_meta_u16(struct tcf_meta_info *mi, void *metaval); +int ife_check_meta_u32(u32 metaval, struct tcf_meta_info *mi); +int ife_encode_meta_u32(u32 metaval, void *skbdata, struct tcf_meta_info *mi); +int ife_validate_meta_u32(void *val, int len); +int ife_validate_meta_u16(void *val, int len); +void ife_release_meta_gen(struct tcf_meta_info *mi); +int register_ife_op(struct tcf_meta_ops *mops); +int unregister_ife_op(struct tcf_meta_ops *mops); + +#endif /* __NET_TC_IFE_H */ diff --git a/include/net/tc_act/tc_skbedit.h b/include/net/tc_act/tc_skbedit.h index 0df9a0db4a8e..b496d5ad7d42 100644 --- a/include/net/tc_act/tc_skbedit.h +++ b/include/net/tc_act/tc_skbedit.h @@ -20,6 +20,7 @@ #define __NET_TC_SKBEDIT_H #include <net/act_api.h> +#include <linux/tc_act/tc_skbedit.h> struct tcf_skbedit { struct tcf_common common; @@ -32,4 +33,19 @@ struct tcf_skbedit { #define to_skbedit(a) \ container_of(a->priv, struct tcf_skbedit, common) +/* Return true iff action is mark */ +static inline bool is_tcf_skbedit_mark(const struct tc_action *a) +{ +#ifdef CONFIG_NET_CLS_ACT + if (a->ops && a->ops->type == TCA_ACT_SKBEDIT) + return to_skbedit(a)->flags == SKBEDIT_F_MARK; +#endif + return false; +} + +static inline u32 tcf_skbedit_mark(const struct tc_action *a) +{ + return to_skbedit(a)->mark; +} + #endif /* __NET_TC_SKBEDIT_H */ diff --git a/include/net/tcp.h b/include/net/tcp.h index f6f8f032c73e..b91370f61be6 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -27,7 +27,6 @@ #include <linux/cache.h> #include <linux/percpu.h> #include <linux/skbuff.h> -#include <linux/crypto.h> #include <linux/cryptohash.h> #include <linux/kref.h> #include <linux/ktime.h> @@ -239,13 +238,6 @@ extern struct inet_timewait_death_row tcp_death_row; extern int sysctl_tcp_timestamps; extern int sysctl_tcp_window_scaling; extern int sysctl_tcp_sack; -extern int sysctl_tcp_fin_timeout; -extern int sysctl_tcp_syn_retries; -extern int sysctl_tcp_synack_retries; -extern int sysctl_tcp_retries1; -extern int sysctl_tcp_retries2; -extern int sysctl_tcp_orphan_retries; -extern int sysctl_tcp_syncookies; extern int sysctl_tcp_fastopen; extern int sysctl_tcp_retrans_collapse; extern int sysctl_tcp_stdurg; @@ -274,7 +266,6 @@ extern int sysctl_tcp_thin_dupack; extern int sysctl_tcp_early_retrans; extern int sysctl_tcp_limit_output_bytes; extern int sysctl_tcp_challenge_ack_limit; -extern unsigned int sysctl_tcp_notsent_lowat; extern int sysctl_tcp_min_tso_segs; extern int sysctl_tcp_min_rtt_wlen; extern int sysctl_tcp_autocorking; @@ -447,7 +438,7 @@ const u8 *tcp_parse_md5sig_option(const struct tcphdr *th); void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb); void tcp_v4_mtu_reduced(struct sock *sk); -void tcp_req_err(struct sock *sk, u32 seq); +void tcp_req_err(struct sock *sk, u32 seq, bool abort); int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb); struct sock *tcp_create_openreq_child(const struct sock *sk, struct request_sock *req, @@ -568,6 +559,7 @@ void tcp_rearm_rto(struct sock *sk); void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req); void tcp_reset(struct sock *sk); void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, struct sk_buff *skb); +void tcp_fin(struct sock *sk); /* tcp_timer.c */ void tcp_init_xmit_timers(struct sock *); @@ -963,9 +955,11 @@ static inline void tcp_enable_fack(struct tcp_sock *tp) */ static inline void tcp_enable_early_retrans(struct tcp_sock *tp) { + struct net *net = sock_net((struct sock *)tp); + tp->do_early_retrans = sysctl_tcp_early_retrans && sysctl_tcp_early_retrans < 4 && !sysctl_tcp_thin_dupack && - sysctl_tcp_reordering == 3; + net->ipv4.sysctl_tcp_reordering == 3; } static inline void tcp_disable_early_retrans(struct tcp_sock *tp) @@ -1252,7 +1246,7 @@ static inline u32 keepalive_time_elapsed(const struct tcp_sock *tp) static inline int tcp_fin_time(const struct sock *sk) { - int fin_timeout = tcp_sk(sk)->linger2 ? : sysctl_tcp_fin_timeout; + int fin_timeout = tcp_sk(sk)->linger2 ? : sock_net(sk)->ipv4.sysctl_tcp_fin_timeout; const int rto = inet_csk(sk)->icsk_rto; if (fin_timeout < (rto << 2) - (rto >> 1)) @@ -1325,9 +1319,6 @@ static inline void tcp_clear_all_retrans_hints(struct tcp_sock *tp) tp->retransmit_skb_hint = NULL; } -/* MD5 Signature */ -struct crypto_hash; - union tcp_md5_addr { struct in_addr a4; #if IS_ENABLED(CONFIG_IPV6) @@ -1376,7 +1367,7 @@ union tcp_md5sum_block { /* - pool: digest algorithm, hash description and scratch buffer */ struct tcp_md5sig_pool { - struct hash_desc md5_desc; + struct ahash_request *md5_req; union tcp_md5sum_block md5_blk; }; @@ -1437,6 +1428,7 @@ void tcp_free_fastopen_req(struct tcp_sock *tp); extern struct tcp_fastopen_context __rcu *tcp_fastopen_ctx; int tcp_fastopen_reset_cipher(void *key, unsigned int len); +void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb); struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb, struct request_sock *req, struct tcp_fastopen_cookie *foc, @@ -1685,7 +1677,8 @@ void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr); static inline u32 tcp_notsent_lowat(const struct tcp_sock *tp) { - return tp->notsent_lowat ?: sysctl_tcp_notsent_lowat; + struct net *net = sock_net((struct sock *)tp); + return tp->notsent_lowat ?: net->ipv4.sysctl_tcp_notsent_lowat; } static inline bool tcp_stream_memory_free(const struct sock *sk) @@ -1819,4 +1812,38 @@ static inline void skb_set_tcp_pure_ack(struct sk_buff *skb) skb->truesize = 2; } +static inline int tcp_inq(struct sock *sk) +{ + struct tcp_sock *tp = tcp_sk(sk); + int answ; + + if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) { + answ = 0; + } else if (sock_flag(sk, SOCK_URGINLINE) || + !tp->urg_data || + before(tp->urg_seq, tp->copied_seq) || + !before(tp->urg_seq, tp->rcv_nxt)) { + + answ = tp->rcv_nxt - tp->copied_seq; + + /* Subtract 1, if FIN was received */ + if (answ && sock_flag(sk, SOCK_DONE)) + answ--; + } else { + answ = tp->urg_seq - tp->copied_seq; + } + + return answ; +} + +static inline void tcp_segs_in(struct tcp_sock *tp, const struct sk_buff *skb) +{ + u16 segs_in; + + segs_in = max_t(u16, 1, skb_shinfo(skb)->gso_segs); + tp->segs_in += segs_in; + if (skb->len > tcp_hdrlen(skb)) + tp->data_segs_in += segs_in; +} + #endif /* _TCP_H */ diff --git a/include/net/udp.h b/include/net/udp.h index 2842541e28e7..92927f729ac8 100644 --- a/include/net/udp.h +++ b/include/net/udp.h @@ -177,9 +177,10 @@ static inline struct udphdr *udp_gro_udphdr(struct sk_buff *skb) } /* hash routines shared between UDPv4/6 and UDP-Litev4/6 */ -static inline void udp_lib_hash(struct sock *sk) +static inline int udp_lib_hash(struct sock *sk) { BUG(); + return 0; } void udp_lib_unhash(struct sock *sk); diff --git a/include/net/udp_tunnel.h b/include/net/udp_tunnel.h index cca2ad3082c3..b83114077cee 100644 --- a/include/net/udp_tunnel.h +++ b/include/net/udp_tunnel.h @@ -88,8 +88,8 @@ int udp_tunnel6_xmit_skb(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb, struct net_device *dev, struct in6_addr *saddr, struct in6_addr *daddr, - __u8 prio, __u8 ttl, __be16 src_port, - __be16 dst_port, bool nocheck); + __u8 prio, __u8 ttl, __be32 label, + __be16 src_port, __be16 dst_port, bool nocheck); #endif void udp_tunnel_sock_release(struct socket *sock); @@ -103,7 +103,7 @@ static inline struct sk_buff *udp_tunnel_handle_offloads(struct sk_buff *skb, { int type = udp_csum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL; - return iptunnel_handle_offloads(skb, udp_csum, type); + return iptunnel_handle_offloads(skb, type); } static inline void udp_tunnel_gro_complete(struct sk_buff *skb, int nhoff) diff --git a/include/net/vxlan.h b/include/net/vxlan.h index 0fb86442544b..a763c96ecde4 100644 --- a/include/net/vxlan.h +++ b/include/net/vxlan.h @@ -9,17 +9,71 @@ #include <linux/udp.h> #include <net/dst_metadata.h> +/* VXLAN protocol (RFC 7348) header: + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * |R|R|R|R|I|R|R|R| Reserved | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | VXLAN Network Identifier (VNI) | Reserved | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * + * I = VXLAN Network Identifier (VNI) present. + */ +struct vxlanhdr { + __be32 vx_flags; + __be32 vx_vni; +}; + +/* VXLAN header flags. */ +#define VXLAN_HF_VNI cpu_to_be32(BIT(27)) + +#define VXLAN_N_VID (1u << 24) +#define VXLAN_VID_MASK (VXLAN_N_VID - 1) +#define VXLAN_VNI_MASK cpu_to_be32(VXLAN_VID_MASK << 8) +#define VXLAN_HLEN (sizeof(struct udphdr) + sizeof(struct vxlanhdr)) + #define VNI_HASH_BITS 10 #define VNI_HASH_SIZE (1<<VNI_HASH_BITS) +#define FDB_HASH_BITS 8 +#define FDB_HASH_SIZE (1<<FDB_HASH_BITS) + +/* Remote checksum offload for VXLAN (VXLAN_F_REMCSUM_[RT]X): + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * |R|R|R|R|I|R|R|R|R|R|C| Reserved | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | VXLAN Network Identifier (VNI) |O| Csum start | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * + * C = Remote checksum offload bit. When set indicates that the + * remote checksum offload data is present. + * + * O = Offset bit. Indicates the checksum offset relative to + * checksum start. + * + * Csum start = Checksum start divided by two. + * + * http://tools.ietf.org/html/draft-herbert-vxlan-rco + */ + +/* VXLAN-RCO header flags. */ +#define VXLAN_HF_RCO cpu_to_be32(BIT(21)) + +/* Remote checksum offload header option */ +#define VXLAN_RCO_MASK cpu_to_be32(0x7f) /* Last byte of vni field */ +#define VXLAN_RCO_UDP cpu_to_be32(0x80) /* Indicate UDP RCO (TCP when not set *) */ +#define VXLAN_RCO_SHIFT 1 /* Left shift of start */ +#define VXLAN_RCO_SHIFT_MASK ((1 << VXLAN_RCO_SHIFT) - 1) +#define VXLAN_MAX_REMCSUM_START (0x7f << VXLAN_RCO_SHIFT) /* - * VXLAN Group Based Policy Extension: + * VXLAN Group Based Policy Extension (VXLAN_F_GBP): * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - * |1|-|-|-|1|-|-|-|R|D|R|R|A|R|R|R| Group Policy ID | + * |G|R|R|R|I|R|R|R|R|D|R|R|A|R|R|R| Group Policy ID | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | VXLAN Network Identifier (VNI) | Reserved | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * + * G = Group Policy ID present. + * * D = Don't Learn bit. When set, this bit indicates that the egress * VTEP MUST NOT learn the source address of the encapsulated frame. * @@ -27,18 +81,18 @@ * this packet. Policies MUST NOT be applied by devices when the * A bit is set. * - * [0] https://tools.ietf.org/html/draft-smith-vxlan-group-policy + * https://tools.ietf.org/html/draft-smith-vxlan-group-policy */ struct vxlanhdr_gbp { - __u8 vx_flags; + u8 vx_flags; #ifdef __LITTLE_ENDIAN_BITFIELD - __u8 reserved_flags1:3, + u8 reserved_flags1:3, policy_applied:1, reserved_flags2:2, dont_learn:1, reserved_flags3:1; #elif defined(__BIG_ENDIAN_BITFIELD) - __u8 reserved_flags1:1, + u8 reserved_flags1:1, dont_learn:1, reserved_flags2:2, policy_applied:1, @@ -50,7 +104,10 @@ struct vxlanhdr_gbp { __be32 vx_vni; }; -#define VXLAN_GBP_USED_BITS (VXLAN_HF_GBP | 0xFFFFFF) +/* VXLAN-GBP header flags. */ +#define VXLAN_HF_GBP cpu_to_be32(BIT(31)) + +#define VXLAN_GBP_USED_BITS (VXLAN_HF_GBP | cpu_to_be32(0xFFFFFF)) /* skb->mark mapping * @@ -62,44 +119,6 @@ struct vxlanhdr_gbp { #define VXLAN_GBP_POLICY_APPLIED (BIT(3) << 16) #define VXLAN_GBP_ID_MASK (0xFFFF) -/* VXLAN protocol header: - * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - * |G|R|R|R|I|R|R|C| Reserved | - * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - * | VXLAN Network Identifier (VNI) | Reserved | - * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - * - * G = 1 Group Policy (VXLAN-GBP) - * I = 1 VXLAN Network Identifier (VNI) present - * C = 1 Remote checksum offload (RCO) - */ -struct vxlanhdr { - __be32 vx_flags; - __be32 vx_vni; -}; - -/* VXLAN header flags. */ -#define VXLAN_HF_RCO BIT(21) -#define VXLAN_HF_VNI BIT(27) -#define VXLAN_HF_GBP BIT(31) - -/* Remote checksum offload header option */ -#define VXLAN_RCO_MASK 0x7f /* Last byte of vni field */ -#define VXLAN_RCO_UDP 0x80 /* Indicate UDP RCO (TCP when not set *) */ -#define VXLAN_RCO_SHIFT 1 /* Left shift of start */ -#define VXLAN_RCO_SHIFT_MASK ((1 << VXLAN_RCO_SHIFT) - 1) -#define VXLAN_MAX_REMCSUM_START (VXLAN_RCO_MASK << VXLAN_RCO_SHIFT) - -#define VXLAN_N_VID (1u << 24) -#define VXLAN_VID_MASK (VXLAN_N_VID - 1) -#define VXLAN_VNI_MASK (VXLAN_VID_MASK << 8) -#define VXLAN_HLEN (sizeof(struct udphdr) + sizeof(struct vxlanhdr)) - -#define VNI_HASH_BITS 10 -#define VNI_HASH_SIZE (1<<VNI_HASH_BITS) -#define FDB_HASH_BITS 8 -#define FDB_HASH_SIZE (1<<FDB_HASH_BITS) - struct vxlan_metadata { u32 gbp; }; @@ -125,23 +144,25 @@ union vxlan_addr { struct vxlan_rdst { union vxlan_addr remote_ip; __be16 remote_port; - u32 remote_vni; + __be32 remote_vni; u32 remote_ifindex; struct list_head list; struct rcu_head rcu; + struct dst_cache dst_cache; }; struct vxlan_config { union vxlan_addr remote_ip; union vxlan_addr saddr; - u32 vni; + __be32 vni; int remote_ifindex; int mtu; __be16 dst_port; - __u16 port_min; - __u16 port_max; - __u8 tos; - __u8 ttl; + u16 port_min; + u16 port_max; + u8 tos; + u8 ttl; + __be32 label; u32 flags; unsigned long age_interval; unsigned int addrmax; @@ -177,7 +198,7 @@ struct vxlan_dev { #define VXLAN_F_L2MISS 0x08 #define VXLAN_F_L3MISS 0x10 #define VXLAN_F_IPV6 0x20 -#define VXLAN_F_UDP_CSUM 0x40 +#define VXLAN_F_UDP_ZERO_CSUM_TX 0x40 #define VXLAN_F_UDP_ZERO_CSUM6_TX 0x80 #define VXLAN_F_UDP_ZERO_CSUM6_RX 0x100 #define VXLAN_F_REMCSUM_TX 0x200 @@ -242,6 +263,68 @@ static inline netdev_features_t vxlan_features_check(struct sk_buff *skb, /* IPv6 header + UDP + VXLAN + Ethernet header */ #define VXLAN6_HEADROOM (40 + 8 + 8 + 14) +static inline struct vxlanhdr *vxlan_hdr(struct sk_buff *skb) +{ + return (struct vxlanhdr *)(udp_hdr(skb) + 1); +} + +static inline __be32 vxlan_vni(__be32 vni_field) +{ +#if defined(__BIG_ENDIAN) + return vni_field >> 8; +#else + return (vni_field & VXLAN_VNI_MASK) << 8; +#endif +} + +static inline __be32 vxlan_vni_field(__be32 vni) +{ +#if defined(__BIG_ENDIAN) + return vni << 8; +#else + return vni >> 8; +#endif +} + +static inline __be32 vxlan_tun_id_to_vni(__be64 tun_id) +{ +#if defined(__BIG_ENDIAN) + return tun_id; +#else + return tun_id >> 32; +#endif +} + +static inline __be64 vxlan_vni_to_tun_id(__be32 vni) +{ +#if defined(__BIG_ENDIAN) + return (__be64)vni; +#else + return (__be64)vni << 32; +#endif +} + +static inline size_t vxlan_rco_start(__be32 vni_field) +{ + return be32_to_cpu(vni_field & VXLAN_RCO_MASK) << VXLAN_RCO_SHIFT; +} + +static inline size_t vxlan_rco_offset(__be32 vni_field) +{ + return (vni_field & VXLAN_RCO_UDP) ? + offsetof(struct udphdr, check) : + offsetof(struct tcphdr, check); +} + +static inline __be32 vxlan_compute_rco(unsigned int start, unsigned int offset) +{ + __be32 vni_field = cpu_to_be32(start >> VXLAN_RCO_SHIFT); + + if (offset == offsetof(struct udphdr, check)) + vni_field |= VXLAN_RCO_UDP; + return vni_field; +} + #if IS_ENABLED(CONFIG_VXLAN) void vxlan_get_rx_port(struct net_device *netdev); #else diff --git a/include/rdma/ib_addr.h b/include/rdma/ib_addr.h index c34c9002460c..931a47ba4571 100644 --- a/include/rdma/ib_addr.h +++ b/include/rdma/ib_addr.h @@ -262,24 +262,22 @@ static inline enum ib_mtu iboe_get_mtu(int mtu) static inline int iboe_get_rate(struct net_device *dev) { - struct ethtool_cmd cmd; - u32 speed; + struct ethtool_link_ksettings cmd; int err; rtnl_lock(); - err = __ethtool_get_settings(dev, &cmd); + err = __ethtool_get_link_ksettings(dev, &cmd); rtnl_unlock(); if (err) return IB_RATE_PORT_CURRENT; - speed = ethtool_cmd_speed(&cmd); - if (speed >= 40000) + if (cmd.base.speed >= 40000) return IB_RATE_40_GBPS; - else if (speed >= 30000) + else if (cmd.base.speed >= 30000) return IB_RATE_30_GBPS; - else if (speed >= 20000) + else if (cmd.base.speed >= 20000) return IB_RATE_20_GBPS; - else if (speed >= 10000) + else if (cmd.base.speed >= 10000) return IB_RATE_10_GBPS; else return IB_RATE_PORT_CURRENT; diff --git a/include/rdma/ib_mad.h b/include/rdma/ib_mad.h index 0ff049bd9ad4..37dd534cbeab 100644 --- a/include/rdma/ib_mad.h +++ b/include/rdma/ib_mad.h @@ -424,11 +424,11 @@ typedef void (*ib_mad_send_handler)(struct ib_mad_agent *mad_agent, /** * ib_mad_snoop_handler - Callback handler for snooping sent MADs. * @mad_agent: MAD agent that snooped the MAD. - * @send_wr: Work request information on the sent MAD. + * @send_buf: send MAD data buffer. * @mad_send_wc: Work completion information on the sent MAD. Valid * only for snooping that occurs on a send completion. * - * Clients snooping MADs should not modify data referenced by the @send_wr + * Clients snooping MADs should not modify data referenced by the @send_buf * or @mad_send_wc. */ typedef void (*ib_mad_snoop_handler)(struct ib_mad_agent *mad_agent, diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 284b00c8fea4..3a03c1d18afa 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -212,6 +212,7 @@ enum ib_device_cap_flags { IB_DEVICE_MANAGED_FLOW_STEERING = (1 << 29), IB_DEVICE_SIGNATURE_HANDOVER = (1 << 30), IB_DEVICE_ON_DEMAND_PAGING = (1 << 31), + IB_DEVICE_SG_GAPS_REG = (1ULL << 32), }; enum ib_signature_prot_cap { @@ -662,10 +663,15 @@ __attribute_const__ int ib_rate_to_mbps(enum ib_rate rate); * @IB_MR_TYPE_SIGNATURE: memory region that is used for * signature operations (data-integrity * capable regions) + * @IB_MR_TYPE_SG_GAPS: memory region that is capable to + * register any arbitrary sg lists (without + * the normal mr constraints - see + * ib_map_mr_sg) */ enum ib_mr_type { IB_MR_TYPE_MEM_REG, IB_MR_TYPE_SIGNATURE, + IB_MR_TYPE_SG_GAPS, }; /** @@ -1487,6 +1493,11 @@ enum ib_flow_domain { IB_FLOW_DOMAIN_NUM /* Must be last */ }; +enum ib_flow_flags { + IB_FLOW_ATTR_FLAGS_DONT_TRAP = 1UL << 1, /* Continue match, no steal */ + IB_FLOW_ATTR_FLAGS_RESERVED = 1UL << 2 /* Must be last */ +}; + struct ib_flow_eth_filter { u8 dst_mac[6]; u8 src_mac[6]; @@ -1808,7 +1819,8 @@ struct ib_device { struct scatterlist *sg, int sg_nents); struct ib_mw * (*alloc_mw)(struct ib_pd *pd, - enum ib_mw_type type); + enum ib_mw_type type, + struct ib_udata *udata); int (*dealloc_mw)(struct ib_mw *mw); struct ib_fmr * (*alloc_fmr)(struct ib_pd *pd, int mr_access_flags, @@ -1846,6 +1858,8 @@ struct ib_device { int (*check_mr_status)(struct ib_mr *mr, u32 check_mask, struct ib_mr_status *mr_status); void (*disassociate_ucontext)(struct ib_ucontext *ibcontext); + void (*drain_rq)(struct ib_qp *qp); + void (*drain_sq)(struct ib_qp *qp); struct ib_dma_mapping_ops *dma_ops; @@ -3094,4 +3108,7 @@ int ib_sg_to_pages(struct ib_mr *mr, int sg_nents, int (*set_page)(struct ib_mr *, u64)); +void ib_drain_rq(struct ib_qp *qp); +void ib_drain_sq(struct ib_qp *qp); +void ib_drain_qp(struct ib_qp *qp); #endif /* IB_VERBS_H */ diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h index 036bd2772662..6d0065c322b7 100644 --- a/include/rdma/iw_cm.h +++ b/include/rdma/iw_cm.h @@ -83,8 +83,10 @@ struct iw_cm_id { iw_cm_handler cm_handler; /* client callback function */ void *context; /* client cb context */ struct ib_device *device; - struct sockaddr_storage local_addr; + struct sockaddr_storage local_addr; /* local addr */ struct sockaddr_storage remote_addr; + struct sockaddr_storage m_local_addr; /* nmapped local addr */ + struct sockaddr_storage m_remote_addr; /* nmapped rem addr */ void *provider_data; /* provider private data */ iw_event_handler event_handler; /* cb for provider events */ @@ -92,6 +94,7 @@ struct iw_cm_id { void (*add_ref)(struct iw_cm_id *); void (*rem_ref)(struct iw_cm_id *); u8 tos; + bool mapped; }; struct iw_cm_conn_param { @@ -123,6 +126,7 @@ struct iw_cm_verbs { int backlog); int (*destroy_listen)(struct iw_cm_id *cm_id); + char ifname[IFNAMSIZ]; }; /** diff --git a/include/rxrpc/packet.h b/include/rxrpc/packet.h index 4dce116bfd80..9ebab3a8cf0a 100644 --- a/include/rxrpc/packet.h +++ b/include/rxrpc/packet.h @@ -22,7 +22,7 @@ typedef __be32 rxrpc_serial_net_t; /* on-the-wire Rx message serial number */ * on-the-wire Rx packet header * - all multibyte fields should be in network byte order */ -struct rxrpc_header { +struct rxrpc_wire_header { __be32 epoch; /* client boot timestamp */ __be32 cid; /* connection and channel ID */ @@ -68,10 +68,19 @@ struct rxrpc_header { } __packed; -#define __rxrpc_header_off(X) offsetof(struct rxrpc_header,X) - extern const char *rxrpc_pkts[]; +#define RXRPC_SUPPORTED_PACKET_TYPES ( \ + (1 << RXRPC_PACKET_TYPE_DATA) | \ + (1 << RXRPC_PACKET_TYPE_ACK) | \ + (1 << RXRPC_PACKET_TYPE_BUSY) | \ + (1 << RXRPC_PACKET_TYPE_ABORT) | \ + (1 << RXRPC_PACKET_TYPE_ACKALL) | \ + (1 << RXRPC_PACKET_TYPE_CHALLENGE) | \ + (1 << RXRPC_PACKET_TYPE_RESPONSE) | \ + /*(1 << RXRPC_PACKET_TYPE_DEBUG) | */ \ + (1 << RXRPC_PACKET_TYPE_VERSION)) + /*****************************************************************************/ /* * jumbo packet secondary header diff --git a/include/scsi/iscsi_if.h b/include/scsi/iscsi_if.h index 95ed9424a11a..d66c07077d68 100644 --- a/include/scsi/iscsi_if.h +++ b/include/scsi/iscsi_if.h @@ -724,6 +724,8 @@ enum iscsi_port_speed { ISCSI_PORT_SPEED_100MBPS = 0x4, ISCSI_PORT_SPEED_1GBPS = 0x8, ISCSI_PORT_SPEED_10GBPS = 0x10, + ISCSI_PORT_SPEED_25GBPS = 0x20, + ISCSI_PORT_SPEED_40GBPS = 0x40, }; /* iSCSI port state */ diff --git a/include/scsi/libiscsi_tcp.h b/include/scsi/libiscsi_tcp.h index 2a7aa75dd009..30520d5ee3d1 100644 --- a/include/scsi/libiscsi_tcp.h +++ b/include/scsi/libiscsi_tcp.h @@ -26,7 +26,7 @@ struct iscsi_tcp_conn; struct iscsi_segment; struct sk_buff; -struct hash_desc; +struct ahash_request; typedef int iscsi_segment_done_fn_t(struct iscsi_tcp_conn *, struct iscsi_segment *); @@ -38,7 +38,7 @@ struct iscsi_segment { unsigned int total_size; unsigned int total_copied; - struct hash_desc *hash; + struct ahash_request *hash; unsigned char padbuf[ISCSI_PAD_LEN]; unsigned char recv_digest[ISCSI_DIGEST_SIZE]; unsigned char digest[ISCSI_DIGEST_SIZE]; @@ -73,7 +73,7 @@ struct iscsi_tcp_conn { /* control data */ struct iscsi_tcp_recv in; /* TCP receive context */ /* CRC32C (Rx) LLD should set this is they do not offload */ - struct hash_desc *rx_hash; + struct ahash_request *rx_hash; }; struct iscsi_tcp_task { @@ -111,15 +111,16 @@ extern void iscsi_tcp_segment_unmap(struct iscsi_segment *segment); extern void iscsi_segment_init_linear(struct iscsi_segment *segment, void *data, size_t size, iscsi_segment_done_fn_t *done, - struct hash_desc *hash); + struct ahash_request *hash); extern int iscsi_segment_seek_sg(struct iscsi_segment *segment, struct scatterlist *sg_list, unsigned int sg_count, unsigned int offset, size_t size, - iscsi_segment_done_fn_t *done, struct hash_desc *hash); + iscsi_segment_done_fn_t *done, + struct ahash_request *hash); /* digest helpers */ -extern void iscsi_tcp_dgst_header(struct hash_desc *hash, const void *hdr, +extern void iscsi_tcp_dgst_header(struct ahash_request *hash, const void *hdr, size_t hdrlen, unsigned char digest[ISCSI_DIGEST_SIZE]); extern struct iscsi_cls_conn * diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h index f63a16760ae9..c067019ed12a 100644 --- a/include/scsi/scsi_device.h +++ b/include/scsi/scsi_device.h @@ -176,6 +176,7 @@ struct scsi_device { unsigned no_dif:1; /* T10 PI (DIF) should be disabled */ unsigned broken_fua:1; /* Don't set FUA bit */ unsigned lun_in_cdb:1; /* Store LUN bits in CDB[1] */ + unsigned synchronous_alua:1; /* Synchronous ALUA commands */ atomic_t disk_events_disable_depth; /* disable depth for disk events */ @@ -200,6 +201,7 @@ struct scsi_device { struct scsi_device_handler *handler; void *handler_data; + unsigned char access_state; enum scsi_device_state sdev_state; unsigned long sdev_data[0]; } __attribute__((aligned(sizeof(unsigned long)))); @@ -397,6 +399,7 @@ extern void scsi_remove_target(struct device *); extern const char *scsi_device_state_name(enum scsi_device_state); extern int scsi_is_sdev_device(const struct device *); extern int scsi_is_target_device(const struct device *); +extern void scsi_sanitize_inquiry_string(unsigned char *s, int len); extern int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, int data_direction, void *buffer, unsigned bufflen, unsigned char *sense, int timeout, int retries, diff --git a/include/scsi/scsi_devinfo.h b/include/scsi/scsi_devinfo.h index 96e3f56519e7..9f750cb63b03 100644 --- a/include/scsi/scsi_devinfo.h +++ b/include/scsi/scsi_devinfo.h @@ -37,5 +37,6 @@ #define BLIST_TRY_VPD_PAGES 0x10000000 /* Attempt to read VPD pages */ #define BLIST_NO_RSOC 0x20000000 /* don't try to issue RSOC */ #define BLIST_MAX_1024 0x40000000 /* maximum 1024 sector cdb length */ +#define BLIST_SYNC_ALUA 0x80000000 /* Synchronous ALUA commands */ #endif diff --git a/include/scsi/scsi_dh.h b/include/scsi/scsi_dh.h index 85d731746834..c7bba2b24849 100644 --- a/include/scsi/scsi_dh.h +++ b/include/scsi/scsi_dh.h @@ -52,6 +52,7 @@ enum { SCSI_DH_TIMED_OUT, SCSI_DH_RES_TEMP_UNAVAIL, SCSI_DH_DEV_OFFLINED, + SCSI_DH_NOMEM, SCSI_DH_NOSYS, SCSI_DH_DRIVER_MAX, }; @@ -70,6 +71,7 @@ struct scsi_device_handler { int (*activate)(struct scsi_device *, activate_complete, void *); int (*prep_fn)(struct scsi_device *, struct request *); int (*set_params)(struct scsi_device *, const char *); + void (*rescan)(struct scsi_device *); }; #ifdef CONFIG_SCSI_DH diff --git a/include/scsi/scsi_proto.h b/include/scsi/scsi_proto.h index a9fbf1b38e71..c2ae21cbaa2c 100644 --- a/include/scsi/scsi_proto.h +++ b/include/scsi/scsi_proto.h @@ -277,5 +277,17 @@ struct scsi_lun { __u8 scsi_lun[8]; }; +/* SPC asymmetric access states */ +#define SCSI_ACCESS_STATE_OPTIMAL 0x00 +#define SCSI_ACCESS_STATE_ACTIVE 0x01 +#define SCSI_ACCESS_STATE_STANDBY 0x02 +#define SCSI_ACCESS_STATE_UNAVAILABLE 0x03 +#define SCSI_ACCESS_STATE_LBA 0x04 +#define SCSI_ACCESS_STATE_OFFLINE 0x0e +#define SCSI_ACCESS_STATE_TRANSITIONING 0x0f + +/* Values for REPORT TARGET GROUP STATES */ +#define SCSI_ACCESS_STATE_MASK 0x0f +#define SCSI_ACCESS_STATE_PREFERRED 0x80 #endif /* _SCSI_PROTO_H_ */ diff --git a/include/sound/hda_chmap.h b/include/sound/hda_chmap.h new file mode 100644 index 000000000000..e20d219a0304 --- /dev/null +++ b/include/sound/hda_chmap.h @@ -0,0 +1,76 @@ +/* + * For multichannel support + */ + +#ifndef __SOUND_HDA_CHMAP_H +#define __SOUND_HDA_CHMAP_H + +#include <sound/pcm.h> +#include <sound/hdaudio.h> + + +#define SND_PRINT_CHANNEL_ALLOCATION_ADVISED_BUFSIZE 80 + +struct hdac_cea_channel_speaker_allocation { + int ca_index; + int speakers[8]; + + /* derived values, just for convenience */ + int channels; + int spk_mask; +}; +struct hdac_chmap; + +struct hdac_chmap_ops { + /* + * Helpers for producing the channel map TLVs. These can be overridden + * for devices that have non-standard mapping requirements. + */ + int (*chmap_cea_alloc_validate_get_type)(struct hdac_chmap *chmap, + struct hdac_cea_channel_speaker_allocation *cap, int channels); + void (*cea_alloc_to_tlv_chmap)(struct hdac_chmap *hchmap, + struct hdac_cea_channel_speaker_allocation *cap, + unsigned int *chmap, int channels); + + /* check that the user-given chmap is supported */ + int (*chmap_validate)(struct hdac_chmap *hchmap, int ca, + int channels, unsigned char *chmap); + + void (*get_chmap)(struct hdac_device *hdac, int pcm_idx, + unsigned char *chmap); + void (*set_chmap)(struct hdac_device *hdac, int pcm_idx, + unsigned char *chmap, int prepared); + bool (*is_pcm_attached)(struct hdac_device *hdac, int pcm_idx); + + /* get and set channel assigned to each HDMI ASP (audio sample packet) slot */ + int (*pin_get_slot_channel)(struct hdac_device *codec, + hda_nid_t pin_nid, int asp_slot); + int (*pin_set_slot_channel)(struct hdac_device *codec, + hda_nid_t pin_nid, int asp_slot, int channel); + void (*set_channel_count)(struct hdac_device *codec, + hda_nid_t cvt_nid, int chs); +}; + +struct hdac_chmap { + unsigned int channels_max; /* max over all cvts */ + struct hdac_chmap_ops ops; + struct hdac_device *hdac; +}; + +void snd_hdac_register_chmap_ops(struct hdac_device *hdac, + struct hdac_chmap *chmap); +int snd_hdac_channel_allocation(struct hdac_device *hdac, int spk_alloc, + int channels, bool chmap_set, + bool non_pcm, unsigned char *map); +int snd_hdac_get_active_channels(int ca); +void snd_hdac_setup_channel_mapping(struct hdac_chmap *chmap, + hda_nid_t pin_nid, bool non_pcm, int ca, + int channels, unsigned char *map, + bool chmap_set); +void snd_hdac_print_channel_allocation(int spk_alloc, char *buf, int buflen); +struct hdac_cea_channel_speaker_allocation *snd_hdac_get_ch_alloc_from_ca(int ca); +int snd_hdac_chmap_to_spk_mask(unsigned char c); +int snd_hdac_spk_to_chmap(int spk); +int snd_hdac_add_chmap_ctls(struct snd_pcm *pcm, int pcm_idx, + struct hdac_chmap *chmap); +#endif /* __SOUND_HDA_CHMAP_H */ diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h index e2b712c90d3f..93e63c56f48f 100644 --- a/include/sound/hdaudio.h +++ b/include/sound/hdaudio.h @@ -168,11 +168,13 @@ int snd_hdac_power_up(struct hdac_device *codec); int snd_hdac_power_down(struct hdac_device *codec); int snd_hdac_power_up_pm(struct hdac_device *codec); int snd_hdac_power_down_pm(struct hdac_device *codec); +int snd_hdac_keep_power_up(struct hdac_device *codec); #else static inline int snd_hdac_power_up(struct hdac_device *codec) { return 0; } static inline int snd_hdac_power_down(struct hdac_device *codec) { return 0; } static inline int snd_hdac_power_up_pm(struct hdac_device *codec) { return 0; } static inline int snd_hdac_power_down_pm(struct hdac_device *codec) { return 0; } +static inline int snd_hdac_keep_power_up(struct hdac_device *codec) { return 0; } #endif /* @@ -343,7 +345,7 @@ void snd_hdac_bus_enter_link_reset(struct hdac_bus *bus); void snd_hdac_bus_exit_link_reset(struct hdac_bus *bus); void snd_hdac_bus_update_rirb(struct hdac_bus *bus); -void snd_hdac_bus_handle_stream_irq(struct hdac_bus *bus, unsigned int status, +int snd_hdac_bus_handle_stream_irq(struct hdac_bus *bus, unsigned int status, void (*ack)(struct hdac_bus *, struct hdac_stream *)); diff --git a/include/sound/jack.h b/include/sound/jack.h index 23bede121c78..1e84bfb553cf 100644 --- a/include/sound/jack.h +++ b/include/sound/jack.h @@ -72,14 +72,16 @@ enum snd_jack_types { #define SND_JACK_SWITCH_TYPES 6 struct snd_jack { - struct input_dev *input_dev; struct list_head kctl_list; struct snd_card *card; + const char *id; +#ifdef CONFIG_SND_JACK_INPUT_DEV + struct input_dev *input_dev; int registered; int type; - const char *id; char name[100]; unsigned int key[6]; /* Keep in sync with definitions above */ +#endif /* CONFIG_SND_JACK_INPUT_DEV */ void *private_data; void (*private_free)(struct snd_jack *); }; @@ -89,10 +91,11 @@ struct snd_jack { int snd_jack_new(struct snd_card *card, const char *id, int type, struct snd_jack **jack, bool initial_kctl, bool phantom_jack); int snd_jack_add_new_kctl(struct snd_jack *jack, const char * name, int mask); +#ifdef CONFIG_SND_JACK_INPUT_DEV void snd_jack_set_parent(struct snd_jack *jack, struct device *parent); int snd_jack_set_key(struct snd_jack *jack, enum snd_jack_types type, int keytype); - +#endif void snd_jack_report(struct snd_jack *jack, int status); #else @@ -107,6 +110,13 @@ static inline int snd_jack_add_new_kctl(struct snd_jack *jack, const char * name return 0; } +static inline void snd_jack_report(struct snd_jack *jack, int status) +{ +} + +#endif + +#if !defined(CONFIG_SND_JACK) || !defined(CONFIG_SND_JACK_INPUT_DEV) static inline void snd_jack_set_parent(struct snd_jack *jack, struct device *parent) { @@ -118,11 +128,6 @@ static inline int snd_jack_set_key(struct snd_jack *jack, { return 0; } - -static inline void snd_jack_report(struct snd_jack *jack, int status) -{ -} - -#endif +#endif /* !CONFIG_SND_JACK || !CONFIG_SND_JACK_INPUT_DEV */ #endif diff --git a/include/sound/pcm.h b/include/sound/pcm.h index b0be09279943..af1fb37c6b26 100644 --- a/include/sound/pcm.h +++ b/include/sound/pcm.h @@ -1093,6 +1093,8 @@ unsigned int snd_pcm_rate_to_rate_bit(unsigned int rate); unsigned int snd_pcm_rate_bit_to_rate(unsigned int rate_bit); unsigned int snd_pcm_rate_mask_intersect(unsigned int rates_a, unsigned int rates_b); +unsigned int snd_pcm_rate_range_to_bits(unsigned int rate_min, + unsigned int rate_max); /** * snd_pcm_set_runtime_buffer - Set the PCM runtime buffer diff --git a/include/sound/soc-topology.h b/include/sound/soc-topology.h index 5b68e3f5aa85..b897b9d63161 100644 --- a/include/sound/soc-topology.h +++ b/include/sound/soc-topology.h @@ -56,12 +56,6 @@ struct snd_soc_dobj_widget { unsigned int kcontrol_enum:1; /* this widget is an enum kcontrol */ }; -/* dynamic PCM DAI object */ -struct snd_soc_dobj_pcm_dai { - struct snd_soc_tplg_pcm_dai *pd; - unsigned int count; -}; - /* generic dynamic object - all dynamic objects belong to this struct */ struct snd_soc_dobj { enum snd_soc_dobj_type type; @@ -71,7 +65,6 @@ struct snd_soc_dobj { union { struct snd_soc_dobj_control control; struct snd_soc_dobj_widget widget; - struct snd_soc_dobj_pcm_dai pcm_dai; }; void *private; /* core does not touch this */ }; @@ -126,10 +119,16 @@ struct snd_soc_tplg_ops { int (*widget_unload)(struct snd_soc_component *, struct snd_soc_dobj *); - /* FE - used for any driver specific init */ - int (*pcm_dai_load)(struct snd_soc_component *, - struct snd_soc_tplg_pcm_dai *pcm_dai, int num_fe); - int (*pcm_dai_unload)(struct snd_soc_component *, + /* FE DAI - used for any driver specific init */ + int (*dai_load)(struct snd_soc_component *, + struct snd_soc_dai_driver *dai_drv); + int (*dai_unload)(struct snd_soc_component *, + struct snd_soc_dobj *); + + /* DAI link - used for any driver specific init */ + int (*link_load)(struct snd_soc_component *, + struct snd_soc_dai_link *link); + int (*link_unload)(struct snd_soc_component *, struct snd_soc_dobj *); /* callback to handle vendor bespoke data */ diff --git a/include/sound/soc.h b/include/sound/soc.h index 7afb72ceac56..02b4a215fd75 100644 --- a/include/sound/soc.h +++ b/include/sound/soc.h @@ -27,7 +27,6 @@ #include <sound/compress_driver.h> #include <sound/control.h> #include <sound/ac97_codec.h> -#include <sound/soc-topology.h> /* * Convenience kcontrol builders @@ -404,6 +403,7 @@ struct snd_soc_jack_zone; struct snd_soc_jack_pin; #include <sound/soc-dapm.h> #include <sound/soc-dpcm.h> +#include <sound/soc-topology.h> struct snd_soc_jack_gpio; diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h index 373d3342002b..c3371fa548cb 100644 --- a/include/target/iscsi/iscsi_target_core.h +++ b/include/target/iscsi/iscsi_target_core.h @@ -570,8 +570,8 @@ struct iscsi_conn { spinlock_t response_queue_lock; spinlock_t state_lock; /* libcrypto RX and TX contexts for crc32c */ - struct hash_desc conn_rx_hash; - struct hash_desc conn_tx_hash; + struct ahash_request *conn_rx_hash; + struct ahash_request *conn_tx_hash; /* Used for scheduling TX and RX connection kthreads */ cpumask_var_t conn_cpumask; unsigned int conn_rx_reset_cpumask:1; diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h index 56cf8e485ef2..28ee5c2e6bcd 100644 --- a/include/target/target_core_backend.h +++ b/include/target/target_core_backend.h @@ -94,5 +94,8 @@ sense_reason_t passthrough_parse_cdb(struct se_cmd *cmd, sense_reason_t (*exec_cmd)(struct se_cmd *cmd)); bool target_sense_desc_format(struct se_device *dev); +sector_t target_to_linux_sector(struct se_device *dev, sector_t lb); +bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib, + struct request_queue *q, int block_size); #endif /* TARGET_CORE_BACKEND_H */ diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index 5d82816cc4e3..1b09cac06508 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -140,6 +140,8 @@ enum se_cmd_flags_table { SCF_COMPARE_AND_WRITE = 0x00080000, SCF_COMPARE_AND_WRITE_POST = 0x00100000, SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC = 0x00200000, + SCF_ACK_KREF = 0x00400000, + SCF_USE_CPUID = 0x00800000, }; /* struct se_dev_entry->lun_flags and struct se_lun->lun_access */ @@ -187,6 +189,7 @@ enum target_sc_flags_table { TARGET_SCF_BIDI_OP = 0x01, TARGET_SCF_ACK_KREF = 0x02, TARGET_SCF_UNKNOWN_SIZE = 0x04, + TARGET_SCF_USE_CPUID = 0x08, }; /* fabric independent task management function values */ @@ -490,8 +493,9 @@ struct se_cmd { #define CMD_T_SENT (1 << 4) #define CMD_T_STOP (1 << 5) #define CMD_T_DEV_ACTIVE (1 << 7) -#define CMD_T_REQUEST_STOP (1 << 8) #define CMD_T_BUSY (1 << 9) +#define CMD_T_TAS (1 << 10) +#define CMD_T_FABRIC_STOP (1 << 11) spinlock_t t_state_lock; struct kref cmd_kref; struct completion t_transport_stop_comp; @@ -511,9 +515,6 @@ struct se_cmd { struct list_head state_list; - /* old task stop completion, consider merging with some of the above */ - struct completion task_stop_comp; - /* backend private data */ void *priv; @@ -559,7 +560,6 @@ struct se_node_acl { struct config_group acl_auth_group; struct config_group acl_param_group; struct config_group acl_fabric_stat_group; - struct config_group *acl_default_groups[5]; struct list_head acl_list; struct list_head acl_sess_list; struct completion acl_free_comp; @@ -886,7 +886,6 @@ struct se_portal_group { const struct target_core_fabric_ops *se_tpg_tfo; struct se_wwn *se_tpg_wwn; struct config_group tpg_group; - struct config_group *tpg_default_groups[7]; struct config_group tpg_lun_group; struct config_group tpg_np_group; struct config_group tpg_acl_group; @@ -922,7 +921,6 @@ static inline struct se_portal_group *param_to_tpg(struct config_item *item) struct se_wwn { struct target_fabric_configfs *wwn_tf; struct config_group wwn_group; - struct config_group *wwn_default_groups[2]; struct config_group fabric_stat_group; }; diff --git a/include/trace/events/asoc.h b/include/trace/events/asoc.h index 317a1ed2f4ac..9130dd5a184a 100644 --- a/include/trace/events/asoc.h +++ b/include/trace/events/asoc.h @@ -231,13 +231,13 @@ TRACE_EVENT(snd_soc_jack_report, TP_ARGS(jack, mask, val), TP_STRUCT__entry( - __string( name, jack->jack->name ) + __string( name, jack->jack->id ) __field( int, mask ) __field( int, val ) ), TP_fast_assign( - __assign_str(name, jack->jack->name); + __assign_str(name, jack->jack->id); __entry->mask = mask; __entry->val = val; ), @@ -253,12 +253,12 @@ TRACE_EVENT(snd_soc_jack_notify, TP_ARGS(jack, val), TP_STRUCT__entry( - __string( name, jack->jack->name ) + __string( name, jack->jack->id ) __field( int, val ) ), TP_fast_assign( - __assign_str(name, jack->jack->name); + __assign_str(name, jack->jack->id); __entry->val = val; ), diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h index d866f21efbbf..677807f29a1c 100644 --- a/include/trace/events/btrfs.h +++ b/include/trace/events/btrfs.h @@ -6,7 +6,7 @@ #include <linux/writeback.h> #include <linux/tracepoint.h> -#include <trace/events/gfpflags.h> +#include <trace/events/mmflags.h> struct btrfs_root; struct btrfs_fs_info; diff --git a/include/trace/events/compaction.h b/include/trace/events/compaction.h index c92d1e1cbad9..e215bf68f521 100644 --- a/include/trace/events/compaction.h +++ b/include/trace/events/compaction.h @@ -7,7 +7,7 @@ #include <linux/types.h> #include <linux/list.h> #include <linux/tracepoint.h> -#include <trace/events/gfpflags.h> +#include <trace/events/mmflags.h> #define COMPACTION_STATUS \ EM( COMPACT_DEFERRED, "deferred") \ @@ -350,6 +350,61 @@ DEFINE_EVENT(mm_compaction_defer_template, mm_compaction_defer_reset, ); #endif +TRACE_EVENT(mm_compaction_kcompactd_sleep, + + TP_PROTO(int nid), + + TP_ARGS(nid), + + TP_STRUCT__entry( + __field(int, nid) + ), + + TP_fast_assign( + __entry->nid = nid; + ), + + TP_printk("nid=%d", __entry->nid) +); + +DECLARE_EVENT_CLASS(kcompactd_wake_template, + + TP_PROTO(int nid, int order, enum zone_type classzone_idx), + + TP_ARGS(nid, order, classzone_idx), + + TP_STRUCT__entry( + __field(int, nid) + __field(int, order) + __field(enum zone_type, classzone_idx) + ), + + TP_fast_assign( + __entry->nid = nid; + __entry->order = order; + __entry->classzone_idx = classzone_idx; + ), + + TP_printk("nid=%d order=%d classzone_idx=%-8s", + __entry->nid, + __entry->order, + __print_symbolic(__entry->classzone_idx, ZONE_TYPE)) +); + +DEFINE_EVENT(kcompactd_wake_template, mm_compaction_wakeup_kcompactd, + + TP_PROTO(int nid, int order, enum zone_type classzone_idx), + + TP_ARGS(nid, order, classzone_idx) +); + +DEFINE_EVENT(kcompactd_wake_template, mm_compaction_kcompactd_wake, + + TP_PROTO(int nid, int order, enum zone_type classzone_idx), + + TP_ARGS(nid, order, classzone_idx) +); + #endif /* _TRACE_COMPACTION_H */ /* This part must be outside protection */ diff --git a/include/trace/events/cpuhp.h b/include/trace/events/cpuhp.h new file mode 100644 index 000000000000..a72bd93ec7e5 --- /dev/null +++ b/include/trace/events/cpuhp.h @@ -0,0 +1,66 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM cpuhp + +#if !defined(_TRACE_CPUHP_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_CPUHP_H + +#include <linux/tracepoint.h> + +TRACE_EVENT(cpuhp_enter, + + TP_PROTO(unsigned int cpu, + int target, + int idx, + int (*fun)(unsigned int)), + + TP_ARGS(cpu, target, idx, fun), + + TP_STRUCT__entry( + __field( unsigned int, cpu ) + __field( int, target ) + __field( int, idx ) + __field( void *, fun ) + ), + + TP_fast_assign( + __entry->cpu = cpu; + __entry->target = target; + __entry->idx = idx; + __entry->fun = fun; + ), + + TP_printk("cpu: %04u target: %3d step: %3d (%pf)", + __entry->cpu, __entry->target, __entry->idx, __entry->fun) +); + +TRACE_EVENT(cpuhp_exit, + + TP_PROTO(unsigned int cpu, + int state, + int idx, + int ret), + + TP_ARGS(cpu, state, idx, ret), + + TP_STRUCT__entry( + __field( unsigned int, cpu ) + __field( int, state ) + __field( int, idx ) + __field( int, ret ) + ), + + TP_fast_assign( + __entry->cpu = cpu; + __entry->state = state; + __entry->idx = idx; + __entry->ret = ret; + ), + + TP_printk(" cpu: %04u state: %3d step: %3d ret: %d", + __entry->cpu, __entry->state, __entry->idx, __entry->ret) +); + +#endif + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/trace/events/gfpflags.h b/include/trace/events/gfpflags.h deleted file mode 100644 index dde6bf092c8a..000000000000 --- a/include/trace/events/gfpflags.h +++ /dev/null @@ -1,43 +0,0 @@ -/* - * The order of these masks is important. Matching masks will be seen - * first and the left over flags will end up showing by themselves. - * - * For example, if we have GFP_KERNEL before GFP_USER we wil get: - * - * GFP_KERNEL|GFP_HARDWALL - * - * Thus most bits set go first. - */ -#define show_gfp_flags(flags) \ - (flags) ? __print_flags(flags, "|", \ - {(unsigned long)GFP_TRANSHUGE, "GFP_TRANSHUGE"}, \ - {(unsigned long)GFP_HIGHUSER_MOVABLE, "GFP_HIGHUSER_MOVABLE"}, \ - {(unsigned long)GFP_HIGHUSER, "GFP_HIGHUSER"}, \ - {(unsigned long)GFP_USER, "GFP_USER"}, \ - {(unsigned long)GFP_TEMPORARY, "GFP_TEMPORARY"}, \ - {(unsigned long)GFP_KERNEL, "GFP_KERNEL"}, \ - {(unsigned long)GFP_NOFS, "GFP_NOFS"}, \ - {(unsigned long)GFP_ATOMIC, "GFP_ATOMIC"}, \ - {(unsigned long)GFP_NOIO, "GFP_NOIO"}, \ - {(unsigned long)__GFP_HIGH, "GFP_HIGH"}, \ - {(unsigned long)__GFP_ATOMIC, "GFP_ATOMIC"}, \ - {(unsigned long)__GFP_IO, "GFP_IO"}, \ - {(unsigned long)__GFP_COLD, "GFP_COLD"}, \ - {(unsigned long)__GFP_NOWARN, "GFP_NOWARN"}, \ - {(unsigned long)__GFP_REPEAT, "GFP_REPEAT"}, \ - {(unsigned long)__GFP_NOFAIL, "GFP_NOFAIL"}, \ - {(unsigned long)__GFP_NORETRY, "GFP_NORETRY"}, \ - {(unsigned long)__GFP_COMP, "GFP_COMP"}, \ - {(unsigned long)__GFP_ZERO, "GFP_ZERO"}, \ - {(unsigned long)__GFP_NOMEMALLOC, "GFP_NOMEMALLOC"}, \ - {(unsigned long)__GFP_MEMALLOC, "GFP_MEMALLOC"}, \ - {(unsigned long)__GFP_HARDWALL, "GFP_HARDWALL"}, \ - {(unsigned long)__GFP_THISNODE, "GFP_THISNODE"}, \ - {(unsigned long)__GFP_RECLAIMABLE, "GFP_RECLAIMABLE"}, \ - {(unsigned long)__GFP_MOVABLE, "GFP_MOVABLE"}, \ - {(unsigned long)__GFP_NOTRACK, "GFP_NOTRACK"}, \ - {(unsigned long)__GFP_DIRECT_RECLAIM, "GFP_DIRECT_RECLAIM"}, \ - {(unsigned long)__GFP_KSWAPD_RECLAIM, "GFP_KSWAPD_RECLAIM"}, \ - {(unsigned long)__GFP_OTHER_NODE, "GFP_OTHER_NODE"} \ - ) : "GFP_NOWAIT" - diff --git a/include/trace/events/huge_memory.h b/include/trace/events/huge_memory.h index 47c6212d8f3c..551ba4acde4d 100644 --- a/include/trace/events/huge_memory.h +++ b/include/trace/events/huge_memory.h @@ -6,8 +6,6 @@ #include <linux/tracepoint.h> -#include <trace/events/gfpflags.h> - #define SCAN_STATUS \ EM( SCAN_FAIL, "failed") \ EM( SCAN_SUCCEED, "succeeded") \ diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h index f7554fd7fc62..ca7217389067 100644 --- a/include/trace/events/kmem.h +++ b/include/trace/events/kmem.h @@ -6,7 +6,7 @@ #include <linux/types.h> #include <linux/tracepoint.h> -#include <trace/events/gfpflags.h> +#include <trace/events/mmflags.h> DECLARE_EVENT_CLASS(kmem_alloc, diff --git a/include/trace/events/kvm.h b/include/trace/events/kvm.h index d6f83222a6a1..aa69253ecc7d 100644 --- a/include/trace/events/kvm.h +++ b/include/trace/events/kvm.h @@ -359,14 +359,15 @@ TRACE_EVENT( #endif TRACE_EVENT(kvm_halt_poll_ns, - TP_PROTO(bool grow, unsigned int vcpu_id, int new, int old), + TP_PROTO(bool grow, unsigned int vcpu_id, unsigned int new, + unsigned int old), TP_ARGS(grow, vcpu_id, new, old), TP_STRUCT__entry( __field(bool, grow) __field(unsigned int, vcpu_id) - __field(int, new) - __field(int, old) + __field(unsigned int, new) + __field(unsigned int, old) ), TP_fast_assign( @@ -376,7 +377,7 @@ TRACE_EVENT(kvm_halt_poll_ns, __entry->old = old; ), - TP_printk("vcpu %u: halt_poll_ns %d (%s %d)", + TP_printk("vcpu %u: halt_poll_ns %u (%s %u)", __entry->vcpu_id, __entry->new, __entry->grow ? "grow" : "shrink", diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h new file mode 100644 index 000000000000..43cedbf0c759 --- /dev/null +++ b/include/trace/events/mmflags.h @@ -0,0 +1,173 @@ +/* + * The order of these masks is important. Matching masks will be seen + * first and the left over flags will end up showing by themselves. + * + * For example, if we have GFP_KERNEL before GFP_USER we wil get: + * + * GFP_KERNEL|GFP_HARDWALL + * + * Thus most bits set go first. + */ + +#define __def_gfpflag_names \ + {(unsigned long)GFP_TRANSHUGE, "GFP_TRANSHUGE"}, \ + {(unsigned long)GFP_HIGHUSER_MOVABLE, "GFP_HIGHUSER_MOVABLE"},\ + {(unsigned long)GFP_HIGHUSER, "GFP_HIGHUSER"}, \ + {(unsigned long)GFP_USER, "GFP_USER"}, \ + {(unsigned long)GFP_TEMPORARY, "GFP_TEMPORARY"}, \ + {(unsigned long)GFP_KERNEL_ACCOUNT, "GFP_KERNEL_ACCOUNT"}, \ + {(unsigned long)GFP_KERNEL, "GFP_KERNEL"}, \ + {(unsigned long)GFP_NOFS, "GFP_NOFS"}, \ + {(unsigned long)GFP_ATOMIC, "GFP_ATOMIC"}, \ + {(unsigned long)GFP_NOIO, "GFP_NOIO"}, \ + {(unsigned long)GFP_NOWAIT, "GFP_NOWAIT"}, \ + {(unsigned long)GFP_DMA, "GFP_DMA"}, \ + {(unsigned long)__GFP_HIGHMEM, "__GFP_HIGHMEM"}, \ + {(unsigned long)GFP_DMA32, "GFP_DMA32"}, \ + {(unsigned long)__GFP_HIGH, "__GFP_HIGH"}, \ + {(unsigned long)__GFP_ATOMIC, "__GFP_ATOMIC"}, \ + {(unsigned long)__GFP_IO, "__GFP_IO"}, \ + {(unsigned long)__GFP_FS, "__GFP_FS"}, \ + {(unsigned long)__GFP_COLD, "__GFP_COLD"}, \ + {(unsigned long)__GFP_NOWARN, "__GFP_NOWARN"}, \ + {(unsigned long)__GFP_REPEAT, "__GFP_REPEAT"}, \ + {(unsigned long)__GFP_NOFAIL, "__GFP_NOFAIL"}, \ + {(unsigned long)__GFP_NORETRY, "__GFP_NORETRY"}, \ + {(unsigned long)__GFP_COMP, "__GFP_COMP"}, \ + {(unsigned long)__GFP_ZERO, "__GFP_ZERO"}, \ + {(unsigned long)__GFP_NOMEMALLOC, "__GFP_NOMEMALLOC"}, \ + {(unsigned long)__GFP_MEMALLOC, "__GFP_MEMALLOC"}, \ + {(unsigned long)__GFP_HARDWALL, "__GFP_HARDWALL"}, \ + {(unsigned long)__GFP_THISNODE, "__GFP_THISNODE"}, \ + {(unsigned long)__GFP_RECLAIMABLE, "__GFP_RECLAIMABLE"}, \ + {(unsigned long)__GFP_MOVABLE, "__GFP_MOVABLE"}, \ + {(unsigned long)__GFP_ACCOUNT, "__GFP_ACCOUNT"}, \ + {(unsigned long)__GFP_NOTRACK, "__GFP_NOTRACK"}, \ + {(unsigned long)__GFP_WRITE, "__GFP_WRITE"}, \ + {(unsigned long)__GFP_RECLAIM, "__GFP_RECLAIM"}, \ + {(unsigned long)__GFP_DIRECT_RECLAIM, "__GFP_DIRECT_RECLAIM"},\ + {(unsigned long)__GFP_KSWAPD_RECLAIM, "__GFP_KSWAPD_RECLAIM"},\ + {(unsigned long)__GFP_OTHER_NODE, "__GFP_OTHER_NODE"} \ + +#define show_gfp_flags(flags) \ + (flags) ? __print_flags(flags, "|", \ + __def_gfpflag_names \ + ) : "none" + +#ifdef CONFIG_MMU +#define IF_HAVE_PG_MLOCK(flag,string) ,{1UL << flag, string} +#else +#define IF_HAVE_PG_MLOCK(flag,string) +#endif + +#ifdef CONFIG_ARCH_USES_PG_UNCACHED +#define IF_HAVE_PG_UNCACHED(flag,string) ,{1UL << flag, string} +#else +#define IF_HAVE_PG_UNCACHED(flag,string) +#endif + +#ifdef CONFIG_MEMORY_FAILURE +#define IF_HAVE_PG_HWPOISON(flag,string) ,{1UL << flag, string} +#else +#define IF_HAVE_PG_HWPOISON(flag,string) +#endif + +#if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT) +#define IF_HAVE_PG_IDLE(flag,string) ,{1UL << flag, string} +#else +#define IF_HAVE_PG_IDLE(flag,string) +#endif + +#define __def_pageflag_names \ + {1UL << PG_locked, "locked" }, \ + {1UL << PG_error, "error" }, \ + {1UL << PG_referenced, "referenced" }, \ + {1UL << PG_uptodate, "uptodate" }, \ + {1UL << PG_dirty, "dirty" }, \ + {1UL << PG_lru, "lru" }, \ + {1UL << PG_active, "active" }, \ + {1UL << PG_slab, "slab" }, \ + {1UL << PG_owner_priv_1, "owner_priv_1" }, \ + {1UL << PG_arch_1, "arch_1" }, \ + {1UL << PG_reserved, "reserved" }, \ + {1UL << PG_private, "private" }, \ + {1UL << PG_private_2, "private_2" }, \ + {1UL << PG_writeback, "writeback" }, \ + {1UL << PG_head, "head" }, \ + {1UL << PG_swapcache, "swapcache" }, \ + {1UL << PG_mappedtodisk, "mappedtodisk" }, \ + {1UL << PG_reclaim, "reclaim" }, \ + {1UL << PG_swapbacked, "swapbacked" }, \ + {1UL << PG_unevictable, "unevictable" } \ +IF_HAVE_PG_MLOCK(PG_mlocked, "mlocked" ) \ +IF_HAVE_PG_UNCACHED(PG_uncached, "uncached" ) \ +IF_HAVE_PG_HWPOISON(PG_hwpoison, "hwpoison" ) \ +IF_HAVE_PG_IDLE(PG_young, "young" ) \ +IF_HAVE_PG_IDLE(PG_idle, "idle" ) + +#define show_page_flags(flags) \ + (flags) ? __print_flags(flags, "|", \ + __def_pageflag_names \ + ) : "none" + +#if defined(CONFIG_X86) +#define __VM_ARCH_SPECIFIC_1 {VM_PAT, "pat" } +#elif defined(CONFIG_PPC) +#define __VM_ARCH_SPECIFIC_1 {VM_SAO, "sao" } +#elif defined(CONFIG_PARISC) || defined(CONFIG_METAG) || defined(CONFIG_IA64) +#define __VM_ARCH_SPECIFIC_1 {VM_GROWSUP, "growsup" } +#elif !defined(CONFIG_MMU) +#define __VM_ARCH_SPECIFIC_1 {VM_MAPPED_COPY,"mappedcopy" } +#else +#define __VM_ARCH_SPECIFIC_1 {VM_ARCH_1, "arch_1" } +#endif + +#if defined(CONFIG_X86) +#define __VM_ARCH_SPECIFIC_2 {VM_MPX, "mpx" } +#else +#define __VM_ARCH_SPECIFIC_2 {VM_ARCH_2, "arch_2" } +#endif + +#ifdef CONFIG_MEM_SOFT_DIRTY +#define IF_HAVE_VM_SOFTDIRTY(flag,name) {flag, name }, +#else +#define IF_HAVE_VM_SOFTDIRTY(flag,name) +#endif + +#define __def_vmaflag_names \ + {VM_READ, "read" }, \ + {VM_WRITE, "write" }, \ + {VM_EXEC, "exec" }, \ + {VM_SHARED, "shared" }, \ + {VM_MAYREAD, "mayread" }, \ + {VM_MAYWRITE, "maywrite" }, \ + {VM_MAYEXEC, "mayexec" }, \ + {VM_MAYSHARE, "mayshare" }, \ + {VM_GROWSDOWN, "growsdown" }, \ + {VM_UFFD_MISSING, "uffd_missing" }, \ + {VM_PFNMAP, "pfnmap" }, \ + {VM_DENYWRITE, "denywrite" }, \ + {VM_UFFD_WP, "uffd_wp" }, \ + {VM_LOCKED, "locked" }, \ + {VM_IO, "io" }, \ + {VM_SEQ_READ, "seqread" }, \ + {VM_RAND_READ, "randread" }, \ + {VM_DONTCOPY, "dontcopy" }, \ + {VM_DONTEXPAND, "dontexpand" }, \ + {VM_LOCKONFAULT, "lockonfault" }, \ + {VM_ACCOUNT, "account" }, \ + {VM_NORESERVE, "noreserve" }, \ + {VM_HUGETLB, "hugetlb" }, \ + __VM_ARCH_SPECIFIC_1 , \ + __VM_ARCH_SPECIFIC_2 , \ + {VM_DONTDUMP, "dontdump" }, \ +IF_HAVE_VM_SOFTDIRTY(VM_SOFTDIRTY, "softdirty" ) \ + {VM_MIXEDMAP, "mixedmap" }, \ + {VM_HUGEPAGE, "hugepage" }, \ + {VM_NOHUGEPAGE, "nohugepage" }, \ + {VM_MERGEABLE, "mergeable" } \ + +#define show_vma_flags(flags) \ + (flags) ? __print_flags(flags, "|", \ + __def_vmaflag_names \ + ) : "none" diff --git a/include/trace/events/page_ref.h b/include/trace/events/page_ref.h new file mode 100644 index 000000000000..81001f8b0db4 --- /dev/null +++ b/include/trace/events/page_ref.h @@ -0,0 +1,134 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM page_ref + +#if !defined(_TRACE_PAGE_REF_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_PAGE_REF_H + +#include <linux/types.h> +#include <linux/page_ref.h> +#include <linux/tracepoint.h> +#include <trace/events/mmflags.h> + +DECLARE_EVENT_CLASS(page_ref_mod_template, + + TP_PROTO(struct page *page, int v), + + TP_ARGS(page, v), + + TP_STRUCT__entry( + __field(unsigned long, pfn) + __field(unsigned long, flags) + __field(int, count) + __field(int, mapcount) + __field(void *, mapping) + __field(int, mt) + __field(int, val) + ), + + TP_fast_assign( + __entry->pfn = page_to_pfn(page); + __entry->flags = page->flags; + __entry->count = page_ref_count(page); + __entry->mapcount = page_mapcount(page); + __entry->mapping = page->mapping; + __entry->mt = get_pageblock_migratetype(page); + __entry->val = v; + ), + + TP_printk("pfn=0x%lx flags=%s count=%d mapcount=%d mapping=%p mt=%d val=%d", + __entry->pfn, + show_page_flags(__entry->flags & ((1UL << NR_PAGEFLAGS) - 1)), + __entry->count, + __entry->mapcount, __entry->mapping, __entry->mt, + __entry->val) +); + +DEFINE_EVENT(page_ref_mod_template, page_ref_set, + + TP_PROTO(struct page *page, int v), + + TP_ARGS(page, v) +); + +DEFINE_EVENT(page_ref_mod_template, page_ref_mod, + + TP_PROTO(struct page *page, int v), + + TP_ARGS(page, v) +); + +DECLARE_EVENT_CLASS(page_ref_mod_and_test_template, + + TP_PROTO(struct page *page, int v, int ret), + + TP_ARGS(page, v, ret), + + TP_STRUCT__entry( + __field(unsigned long, pfn) + __field(unsigned long, flags) + __field(int, count) + __field(int, mapcount) + __field(void *, mapping) + __field(int, mt) + __field(int, val) + __field(int, ret) + ), + + TP_fast_assign( + __entry->pfn = page_to_pfn(page); + __entry->flags = page->flags; + __entry->count = page_ref_count(page); + __entry->mapcount = page_mapcount(page); + __entry->mapping = page->mapping; + __entry->mt = get_pageblock_migratetype(page); + __entry->val = v; + __entry->ret = ret; + ), + + TP_printk("pfn=0x%lx flags=%s count=%d mapcount=%d mapping=%p mt=%d val=%d ret=%d", + __entry->pfn, + show_page_flags(__entry->flags & ((1UL << NR_PAGEFLAGS) - 1)), + __entry->count, + __entry->mapcount, __entry->mapping, __entry->mt, + __entry->val, __entry->ret) +); + +DEFINE_EVENT(page_ref_mod_and_test_template, page_ref_mod_and_test, + + TP_PROTO(struct page *page, int v, int ret), + + TP_ARGS(page, v, ret) +); + +DEFINE_EVENT(page_ref_mod_and_test_template, page_ref_mod_and_return, + + TP_PROTO(struct page *page, int v, int ret), + + TP_ARGS(page, v, ret) +); + +DEFINE_EVENT(page_ref_mod_and_test_template, page_ref_mod_unless, + + TP_PROTO(struct page *page, int v, int ret), + + TP_ARGS(page, v, ret) +); + +DEFINE_EVENT(page_ref_mod_and_test_template, page_ref_freeze, + + TP_PROTO(struct page *page, int v, int ret), + + TP_ARGS(page, v, ret) +); + +DEFINE_EVENT(page_ref_mod_template, page_ref_unfreeze, + + TP_PROTO(struct page *page, int v), + + TP_ARGS(page, v) +); + +#endif /* _TRACE_PAGE_COUNT_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/trace/events/power.h b/include/trace/events/power.h index 284244ebfe8d..19e50300ce7d 100644 --- a/include/trace/events/power.h +++ b/include/trace/events/power.h @@ -38,6 +38,28 @@ DEFINE_EVENT(cpu, cpu_idle, TP_ARGS(state, cpu_id) ); +TRACE_EVENT(powernv_throttle, + + TP_PROTO(int chip_id, const char *reason, int pmax), + + TP_ARGS(chip_id, reason, pmax), + + TP_STRUCT__entry( + __field(int, chip_id) + __string(reason, reason) + __field(int, pmax) + ), + + TP_fast_assign( + __entry->chip_id = chip_id; + __assign_str(reason, reason); + __entry->pmax = pmax; + ), + + TP_printk("Chip %d Pmax %d %s", __entry->chip_id, + __entry->pmax, __get_str(reason)) +); + TRACE_EVENT(pstate_sample, TP_PROTO(u32 core_busy, diff --git a/include/trace/events/sunvnet.h b/include/trace/events/sunvnet.h new file mode 100644 index 000000000000..eb080b267e55 --- /dev/null +++ b/include/trace/events/sunvnet.h @@ -0,0 +1,139 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM sunvnet + +#if !defined(_TRACE_SUNVNET_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_SUNVNET_H + +#include <linux/tracepoint.h> + +TRACE_EVENT(vnet_rx_one, + + TP_PROTO(int lsid, int rsid, int index, int needs_ack), + + TP_ARGS(lsid, rsid, index, needs_ack), + + TP_STRUCT__entry( + __field(int, lsid) + __field(int, rsid) + __field(int, index) + __field(int, needs_ack) + ), + + TP_fast_assign( + __entry->lsid = lsid; + __entry->rsid = rsid; + __entry->index = index; + __entry->needs_ack = needs_ack; + ), + + TP_printk("(%x:%x) walk_rx_one index %d; needs_ack %d", + __entry->lsid, __entry->rsid, + __entry->index, __entry->needs_ack) +); + +DECLARE_EVENT_CLASS(vnet_tx_stopped_ack_template, + + TP_PROTO(int lsid, int rsid, int ack_end, int npkts), + + TP_ARGS(lsid, rsid, ack_end, npkts), + + TP_STRUCT__entry( + __field(int, lsid) + __field(int, rsid) + __field(int, ack_end) + __field(int, npkts) + ), + + TP_fast_assign( + __entry->lsid = lsid; + __entry->rsid = rsid; + __entry->ack_end = ack_end; + __entry->npkts = npkts; + ), + + TP_printk("(%x:%x) stopped ack for %d; npkts %d", + __entry->lsid, __entry->rsid, + __entry->ack_end, __entry->npkts) +); +DEFINE_EVENT(vnet_tx_stopped_ack_template, vnet_tx_send_stopped_ack, + TP_PROTO(int lsid, int rsid, int ack_end, int npkts), + TP_ARGS(lsid, rsid, ack_end, npkts)); +DEFINE_EVENT(vnet_tx_stopped_ack_template, vnet_tx_defer_stopped_ack, + TP_PROTO(int lsid, int rsid, int ack_end, int npkts), + TP_ARGS(lsid, rsid, ack_end, npkts)); +DEFINE_EVENT(vnet_tx_stopped_ack_template, vnet_tx_pending_stopped_ack, + TP_PROTO(int lsid, int rsid, int ack_end, int npkts), + TP_ARGS(lsid, rsid, ack_end, npkts)); + +TRACE_EVENT(vnet_rx_stopped_ack, + + TP_PROTO(int lsid, int rsid, int end), + + TP_ARGS(lsid, rsid, end), + + TP_STRUCT__entry( + __field(int, lsid) + __field(int, rsid) + __field(int, end) + ), + + TP_fast_assign( + __entry->lsid = lsid; + __entry->rsid = rsid; + __entry->end = end; + ), + + TP_printk("(%x:%x) stopped ack for index %d", + __entry->lsid, __entry->rsid, __entry->end) +); + +TRACE_EVENT(vnet_tx_trigger, + + TP_PROTO(int lsid, int rsid, int start, int err), + + TP_ARGS(lsid, rsid, start, err), + + TP_STRUCT__entry( + __field(int, lsid) + __field(int, rsid) + __field(int, start) + __field(int, err) + ), + + TP_fast_assign( + __entry->lsid = lsid; + __entry->rsid = rsid; + __entry->start = start; + __entry->err = err; + ), + + TP_printk("(%x:%x) Tx trigger for %d sent with err %d %s", + __entry->lsid, __entry->rsid, __entry->start, + __entry->err, __entry->err > 0 ? "(ok)" : " ") +); + +TRACE_EVENT(vnet_skip_tx_trigger, + + TP_PROTO(int lsid, int rsid, int last), + + TP_ARGS(lsid, rsid, last), + + TP_STRUCT__entry( + __field(int, lsid) + __field(int, rsid) + __field(int, last) + ), + + TP_fast_assign( + __entry->lsid = lsid; + __entry->rsid = rsid; + __entry->last = last; + ), + + TP_printk("(%x:%x) Skip Tx trigger. Last trigger sent was %d", + __entry->lsid, __entry->rsid, __entry->last) +); +#endif /* _TRACE_SOCK_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/trace/events/timer.h b/include/trace/events/timer.h index 073b9ac245ba..51440131d337 100644 --- a/include/trace/events/timer.h +++ b/include/trace/events/timer.h @@ -328,23 +328,49 @@ TRACE_EVENT(itimer_expire, ); #ifdef CONFIG_NO_HZ_COMMON + +#define TICK_DEP_NAMES \ + tick_dep_name(NONE) \ + tick_dep_name(POSIX_TIMER) \ + tick_dep_name(PERF_EVENTS) \ + tick_dep_name(SCHED) \ + tick_dep_name_end(CLOCK_UNSTABLE) + +#undef tick_dep_name +#undef tick_dep_name_end + +#define tick_dep_name(sdep) TRACE_DEFINE_ENUM(TICK_DEP_MASK_##sdep); +#define tick_dep_name_end(sdep) TRACE_DEFINE_ENUM(TICK_DEP_MASK_##sdep); + +TICK_DEP_NAMES + +#undef tick_dep_name +#undef tick_dep_name_end + +#define tick_dep_name(sdep) { TICK_DEP_MASK_##sdep, #sdep }, +#define tick_dep_name_end(sdep) { TICK_DEP_MASK_##sdep, #sdep } + +#define show_tick_dep_name(val) \ + __print_symbolic(val, TICK_DEP_NAMES) + TRACE_EVENT(tick_stop, - TP_PROTO(int success, char *error_msg), + TP_PROTO(int success, int dependency), - TP_ARGS(success, error_msg), + TP_ARGS(success, dependency), TP_STRUCT__entry( __field( int , success ) - __string( msg, error_msg ) + __field( int , dependency ) ), TP_fast_assign( __entry->success = success; - __assign_str(msg, error_msg); + __entry->dependency = dependency; ), - TP_printk("success=%s msg=%s", __entry->success ? "yes" : "no", __get_str(msg)) + TP_printk("success=%d dependency=%s", __entry->success, \ + show_tick_dep_name(__entry->dependency)) ); #endif diff --git a/include/trace/events/vmscan.h b/include/trace/events/vmscan.h index 31763dd8db1c..0101ef37f1ee 100644 --- a/include/trace/events/vmscan.h +++ b/include/trace/events/vmscan.h @@ -8,7 +8,7 @@ #include <linux/tracepoint.h> #include <linux/mm.h> #include <linux/memcontrol.h> -#include <trace/events/gfpflags.h> +#include <trace/events/mmflags.h> #define RECLAIM_WB_ANON 0x0001u #define RECLAIM_WB_FILE 0x0002u diff --git a/include/uapi/asm-generic/socket.h b/include/uapi/asm-generic/socket.h index fb8a41668382..67d632f1743d 100644 --- a/include/uapi/asm-generic/socket.h +++ b/include/uapi/asm-generic/socket.h @@ -90,4 +90,6 @@ #define SO_ATTACH_REUSEPORT_CBPF 51 #define SO_ATTACH_REUSEPORT_EBPF 52 +#define SO_CNX_ADVICE 53 + #endif /* __ASM_GENERIC_SOCKET_H */ diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild index ebd10e624598..0495884defc1 100644 --- a/include/uapi/linux/Kbuild +++ b/include/uapi/linux/Kbuild @@ -138,6 +138,7 @@ header-y += genetlink.h header-y += gen_stats.h header-y += gfs2_ondisk.h header-y += gigaset_dev.h +header-y += gpio.h header-y += gsmmux.h header-y += hdlcdrv.h header-y += hdlc.h @@ -173,6 +174,7 @@ header-y += if_hippi.h header-y += if_infiniband.h header-y += if_link.h header-y += if_ltalk.h +header-y += if_macsec.h header-y += if_packet.h header-y += if_phonet.h header-y += if_plip.h diff --git a/include/uapi/linux/auto_fs.h b/include/uapi/linux/auto_fs.h index bb991dfe134f..9175a1b4dc69 100644 --- a/include/uapi/linux/auto_fs.h +++ b/include/uapi/linux/auto_fs.h @@ -1,7 +1,4 @@ -/* -*- linux-c -*- ------------------------------------------------------- * - * - * linux/include/linux/auto_fs.h - * +/* * Copyright 1997 Transmeta Corporation - All Rights Reserved * * This file is part of the Linux kernel and is made available under @@ -51,7 +48,7 @@ struct autofs_packet_hdr { struct autofs_packet_missing { struct autofs_packet_hdr hdr; - autofs_wqt_t wait_queue_token; + autofs_wqt_t wait_queue_token; int len; char name[NAME_MAX+1]; }; @@ -63,12 +60,12 @@ struct autofs_packet_expire { char name[NAME_MAX+1]; }; -#define AUTOFS_IOC_READY _IO(0x93,0x60) -#define AUTOFS_IOC_FAIL _IO(0x93,0x61) -#define AUTOFS_IOC_CATATONIC _IO(0x93,0x62) -#define AUTOFS_IOC_PROTOVER _IOR(0x93,0x63,int) -#define AUTOFS_IOC_SETTIMEOUT32 _IOWR(0x93,0x64,compat_ulong_t) -#define AUTOFS_IOC_SETTIMEOUT _IOWR(0x93,0x64,unsigned long) -#define AUTOFS_IOC_EXPIRE _IOR(0x93,0x65,struct autofs_packet_expire) +#define AUTOFS_IOC_READY _IO(0x93, 0x60) +#define AUTOFS_IOC_FAIL _IO(0x93, 0x61) +#define AUTOFS_IOC_CATATONIC _IO(0x93, 0x62) +#define AUTOFS_IOC_PROTOVER _IOR(0x93, 0x63, int) +#define AUTOFS_IOC_SETTIMEOUT32 _IOWR(0x93, 0x64, compat_ulong_t) +#define AUTOFS_IOC_SETTIMEOUT _IOWR(0x93, 0x64, unsigned long) +#define AUTOFS_IOC_EXPIRE _IOR(0x93, 0x65, struct autofs_packet_expire) #endif /* _UAPI_LINUX_AUTO_FS_H */ diff --git a/include/uapi/linux/auto_fs4.h b/include/uapi/linux/auto_fs4.h index e02982fa2953..8f8f1bdcca8c 100644 --- a/include/uapi/linux/auto_fs4.h +++ b/include/uapi/linux/auto_fs4.h @@ -1,6 +1,4 @@ -/* -*- c -*- - * linux/include/linux/auto_fs4.h - * +/* * Copyright 1999-2000 Jeremy Fitzhardinge <jeremy@goop.org> * * This file is part of the Linux kernel and is made available under @@ -38,7 +36,6 @@ static inline void set_autofs_type_indirect(unsigned int *type) { *type = AUTOFS_TYPE_INDIRECT; - return; } static inline unsigned int autofs_type_indirect(unsigned int type) @@ -49,7 +46,6 @@ static inline unsigned int autofs_type_indirect(unsigned int type) static inline void set_autofs_type_direct(unsigned int *type) { *type = AUTOFS_TYPE_DIRECT; - return; } static inline unsigned int autofs_type_direct(unsigned int type) @@ -60,7 +56,6 @@ static inline unsigned int autofs_type_direct(unsigned int type) static inline void set_autofs_type_offset(unsigned int *type) { *type = AUTOFS_TYPE_OFFSET; - return; } static inline unsigned int autofs_type_offset(unsigned int type) @@ -81,7 +76,6 @@ static inline unsigned int autofs_type_trigger(unsigned int type) static inline void set_autofs_type_any(unsigned int *type) { *type = AUTOFS_TYPE_ANY; - return; } static inline unsigned int autofs_type_any(unsigned int type) @@ -114,7 +108,7 @@ enum autofs_notify { /* v4 multi expire (via pipe) */ struct autofs_packet_expire_multi { struct autofs_packet_hdr hdr; - autofs_wqt_t wait_queue_token; + autofs_wqt_t wait_queue_token; int len; char name[NAME_MAX+1]; }; @@ -154,11 +148,10 @@ union autofs_v5_packet_union { autofs_packet_expire_direct_t expire_direct; }; -#define AUTOFS_IOC_EXPIRE_MULTI _IOW(0x93,0x66,int) +#define AUTOFS_IOC_EXPIRE_MULTI _IOW(0x93, 0x66, int) #define AUTOFS_IOC_EXPIRE_INDIRECT AUTOFS_IOC_EXPIRE_MULTI #define AUTOFS_IOC_EXPIRE_DIRECT AUTOFS_IOC_EXPIRE_MULTI -#define AUTOFS_IOC_PROTOSUBVER _IOR(0x93,0x67,int) -#define AUTOFS_IOC_ASKUMOUNT _IOR(0x93,0x70,int) - +#define AUTOFS_IOC_PROTOSUBVER _IOR(0x93, 0x67, int) +#define AUTOFS_IOC_ASKUMOUNT _IOR(0x93, 0x70, int) #endif /* _LINUX_AUTO_FS4_H */ diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index aa6f8571de13..924f537183fd 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -81,6 +81,9 @@ enum bpf_map_type { BPF_MAP_TYPE_ARRAY, BPF_MAP_TYPE_PROG_ARRAY, BPF_MAP_TYPE_PERF_EVENT_ARRAY, + BPF_MAP_TYPE_PERCPU_HASH, + BPF_MAP_TYPE_PERCPU_ARRAY, + BPF_MAP_TYPE_STACK_TRACE, }; enum bpf_prog_type { @@ -98,12 +101,15 @@ enum bpf_prog_type { #define BPF_NOEXIST 1 /* create new element if it didn't exist */ #define BPF_EXIST 2 /* update existing element */ +#define BPF_F_NO_PREALLOC (1U << 0) + union bpf_attr { struct { /* anonymous struct used by BPF_MAP_CREATE command */ __u32 map_type; /* one of enum bpf_map_type */ __u32 key_size; /* size of key in bytes */ __u32 value_size; /* size of value in bytes */ __u32 max_entries; /* max number of entries in a map */ + __u32 map_flags; /* prealloc or not */ }; struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */ @@ -270,6 +276,42 @@ enum bpf_func_id { */ BPF_FUNC_perf_event_output, BPF_FUNC_skb_load_bytes, + + /** + * bpf_get_stackid(ctx, map, flags) - walk user or kernel stack and return id + * @ctx: struct pt_regs* + * @map: pointer to stack_trace map + * @flags: bits 0-7 - numer of stack frames to skip + * bit 8 - collect user stack instead of kernel + * bit 9 - compare stacks by hash only + * bit 10 - if two different stacks hash into the same stackid + * discard old + * other bits - reserved + * Return: >= 0 stackid on success or negative error + */ + BPF_FUNC_get_stackid, + + /** + * bpf_csum_diff(from, from_size, to, to_size, seed) - calculate csum diff + * @from: raw from buffer + * @from_size: length of from buffer + * @to: raw to buffer + * @to_size: length of to buffer + * @seed: optional seed + * Return: csum result + */ + BPF_FUNC_csum_diff, + + /** + * bpf_skb_[gs]et_tunnel_opt(skb, opt, size) + * retrieve or populate tunnel options metadata + * @skb: pointer to skb + * @opt: pointer to raw tunnel option data + * @size: size of @opt + * Return: 0 on success for set, option size for get + */ + BPF_FUNC_skb_get_tunnel_opt, + BPF_FUNC_skb_set_tunnel_opt, __BPF_FUNC_MAX_ID, }; @@ -277,6 +319,7 @@ enum bpf_func_id { /* BPF_FUNC_skb_store_bytes flags. */ #define BPF_F_RECOMPUTE_CSUM (1ULL << 0) +#define BPF_F_INVALIDATE_HASH (1ULL << 1) /* BPF_FUNC_l3_csum_replace and BPF_FUNC_l4_csum_replace flags. * First 4 bits are for passing the header field size. @@ -285,6 +328,7 @@ enum bpf_func_id { /* BPF_FUNC_l4_csum_replace flags. */ #define BPF_F_PSEUDO_HDR (1ULL << 4) +#define BPF_F_MARK_MANGLED_0 (1ULL << 5) /* BPF_FUNC_clone_redirect and BPF_FUNC_redirect flags. */ #define BPF_F_INGRESS (1ULL << 0) @@ -292,6 +336,16 @@ enum bpf_func_id { /* BPF_FUNC_skb_set_tunnel_key and BPF_FUNC_skb_get_tunnel_key flags. */ #define BPF_F_TUNINFO_IPV6 (1ULL << 0) +/* BPF_FUNC_get_stackid flags. */ +#define BPF_F_SKIP_FIELD_MASK 0xffULL +#define BPF_F_USER_STACK (1ULL << 8) +#define BPF_F_FAST_STACK_CMP (1ULL << 9) +#define BPF_F_REUSE_STACKID (1ULL << 10) + +/* BPF_FUNC_skb_set_tunnel_key flags. */ +#define BPF_F_ZERO_CSUM_TX (1ULL << 1) +#define BPF_F_DONT_FRAGMENT (1ULL << 2) + /* user accessible mirror of in-kernel sk_buff. * new fields can only be added to the end of this structure */ @@ -321,6 +375,7 @@ struct bpf_tunnel_key { }; __u8 tunnel_tos; __u8 tunnel_ttl; + __u32 tunnel_label; }; #endif /* _UAPI__LINUX_BPF_H__ */ diff --git a/include/uapi/linux/byteorder/big_endian.h b/include/uapi/linux/byteorder/big_endian.h index 672374450095..cdab17ab907c 100644 --- a/include/uapi/linux/byteorder/big_endian.h +++ b/include/uapi/linux/byteorder/big_endian.h @@ -40,51 +40,51 @@ #define __cpu_to_be16(x) ((__force __be16)(__u16)(x)) #define __be16_to_cpu(x) ((__force __u16)(__be16)(x)) -static inline __le64 __cpu_to_le64p(const __u64 *p) +static __always_inline __le64 __cpu_to_le64p(const __u64 *p) { return (__force __le64)__swab64p(p); } -static inline __u64 __le64_to_cpup(const __le64 *p) +static __always_inline __u64 __le64_to_cpup(const __le64 *p) { return __swab64p((__u64 *)p); } -static inline __le32 __cpu_to_le32p(const __u32 *p) +static __always_inline __le32 __cpu_to_le32p(const __u32 *p) { return (__force __le32)__swab32p(p); } -static inline __u32 __le32_to_cpup(const __le32 *p) +static __always_inline __u32 __le32_to_cpup(const __le32 *p) { return __swab32p((__u32 *)p); } -static inline __le16 __cpu_to_le16p(const __u16 *p) +static __always_inline __le16 __cpu_to_le16p(const __u16 *p) { return (__force __le16)__swab16p(p); } -static inline __u16 __le16_to_cpup(const __le16 *p) +static __always_inline __u16 __le16_to_cpup(const __le16 *p) { return __swab16p((__u16 *)p); } -static inline __be64 __cpu_to_be64p(const __u64 *p) +static __always_inline __be64 __cpu_to_be64p(const __u64 *p) { return (__force __be64)*p; } -static inline __u64 __be64_to_cpup(const __be64 *p) +static __always_inline __u64 __be64_to_cpup(const __be64 *p) { return (__force __u64)*p; } -static inline __be32 __cpu_to_be32p(const __u32 *p) +static __always_inline __be32 __cpu_to_be32p(const __u32 *p) { return (__force __be32)*p; } -static inline __u32 __be32_to_cpup(const __be32 *p) +static __always_inline __u32 __be32_to_cpup(const __be32 *p) { return (__force __u32)*p; } -static inline __be16 __cpu_to_be16p(const __u16 *p) +static __always_inline __be16 __cpu_to_be16p(const __u16 *p) { return (__force __be16)*p; } -static inline __u16 __be16_to_cpup(const __be16 *p) +static __always_inline __u16 __be16_to_cpup(const __be16 *p) { return (__force __u16)*p; } diff --git a/include/uapi/linux/byteorder/little_endian.h b/include/uapi/linux/byteorder/little_endian.h index d876736a0017..4b93f2b260dd 100644 --- a/include/uapi/linux/byteorder/little_endian.h +++ b/include/uapi/linux/byteorder/little_endian.h @@ -40,51 +40,51 @@ #define __cpu_to_be16(x) ((__force __be16)__swab16((x))) #define __be16_to_cpu(x) __swab16((__force __u16)(__be16)(x)) -static inline __le64 __cpu_to_le64p(const __u64 *p) +static __always_inline __le64 __cpu_to_le64p(const __u64 *p) { return (__force __le64)*p; } -static inline __u64 __le64_to_cpup(const __le64 *p) +static __always_inline __u64 __le64_to_cpup(const __le64 *p) { return (__force __u64)*p; } -static inline __le32 __cpu_to_le32p(const __u32 *p) +static __always_inline __le32 __cpu_to_le32p(const __u32 *p) { return (__force __le32)*p; } -static inline __u32 __le32_to_cpup(const __le32 *p) +static __always_inline __u32 __le32_to_cpup(const __le32 *p) { return (__force __u32)*p; } -static inline __le16 __cpu_to_le16p(const __u16 *p) +static __always_inline __le16 __cpu_to_le16p(const __u16 *p) { return (__force __le16)*p; } -static inline __u16 __le16_to_cpup(const __le16 *p) +static __always_inline __u16 __le16_to_cpup(const __le16 *p) { return (__force __u16)*p; } -static inline __be64 __cpu_to_be64p(const __u64 *p) +static __always_inline __be64 __cpu_to_be64p(const __u64 *p) { return (__force __be64)__swab64p(p); } -static inline __u64 __be64_to_cpup(const __be64 *p) +static __always_inline __u64 __be64_to_cpup(const __be64 *p) { return __swab64p((__u64 *)p); } -static inline __be32 __cpu_to_be32p(const __u32 *p) +static __always_inline __be32 __cpu_to_be32p(const __u32 *p) { return (__force __be32)__swab32p(p); } -static inline __u32 __be32_to_cpup(const __be32 *p) +static __always_inline __u32 __be32_to_cpup(const __be32 *p) { return __swab32p((__u32 *)p); } -static inline __be16 __cpu_to_be16p(const __u16 *p) +static __always_inline __be16 __cpu_to_be16p(const __u16 *p) { return (__force __be16)__swab16p(p); } -static inline __u16 __be16_to_cpup(const __be16 *p) +static __always_inline __u16 __be16_to_cpup(const __be16 *p) { return __swab16p((__u16 *)p); } diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h new file mode 100644 index 000000000000..c9fee5781eb1 --- /dev/null +++ b/include/uapi/linux/devlink.h @@ -0,0 +1,72 @@ +/* + * include/uapi/linux/devlink.h - Network physical device Netlink interface + * Copyright (c) 2016 Mellanox Technologies. All rights reserved. + * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef _UAPI_LINUX_DEVLINK_H_ +#define _UAPI_LINUX_DEVLINK_H_ + +#define DEVLINK_GENL_NAME "devlink" +#define DEVLINK_GENL_VERSION 0x1 +#define DEVLINK_GENL_MCGRP_CONFIG_NAME "config" + +enum devlink_command { + /* don't change the order or add anything between, this is ABI! */ + DEVLINK_CMD_UNSPEC, + + DEVLINK_CMD_GET, /* can dump */ + DEVLINK_CMD_SET, + DEVLINK_CMD_NEW, + DEVLINK_CMD_DEL, + + DEVLINK_CMD_PORT_GET, /* can dump */ + DEVLINK_CMD_PORT_SET, + DEVLINK_CMD_PORT_NEW, + DEVLINK_CMD_PORT_DEL, + + DEVLINK_CMD_PORT_SPLIT, + DEVLINK_CMD_PORT_UNSPLIT, + + /* add new commands above here */ + + __DEVLINK_CMD_MAX, + DEVLINK_CMD_MAX = __DEVLINK_CMD_MAX - 1 +}; + +enum devlink_port_type { + DEVLINK_PORT_TYPE_NOTSET, + DEVLINK_PORT_TYPE_AUTO, + DEVLINK_PORT_TYPE_ETH, + DEVLINK_PORT_TYPE_IB, +}; + +enum devlink_attr { + /* don't change the order or add anything between, this is ABI! */ + DEVLINK_ATTR_UNSPEC, + + /* bus name + dev name together are a handle for devlink entity */ + DEVLINK_ATTR_BUS_NAME, /* string */ + DEVLINK_ATTR_DEV_NAME, /* string */ + + DEVLINK_ATTR_PORT_INDEX, /* u32 */ + DEVLINK_ATTR_PORT_TYPE, /* u16 */ + DEVLINK_ATTR_PORT_DESIRED_TYPE, /* u16 */ + DEVLINK_ATTR_PORT_NETDEV_IFINDEX, /* u32 */ + DEVLINK_ATTR_PORT_NETDEV_NAME, /* string */ + DEVLINK_ATTR_PORT_IBDEV_NAME, /* string */ + DEVLINK_ATTR_PORT_SPLIT_COUNT, /* u32 */ + DEVLINK_ATTR_PORT_SPLIT_GROUP, /* u32 */ + + /* add new attributes above here, update the policy in devlink.c */ + + __DEVLINK_ATTR_MAX, + DEVLINK_ATTR_MAX = __DEVLINK_ATTR_MAX - 1 +}; + +#endif /* _UAPI_LINUX_DEVLINK_H_ */ diff --git a/include/uapi/linux/elf-em.h b/include/uapi/linux/elf-em.h index b56dfcfe922a..c3fdfe79e5cc 100644 --- a/include/uapi/linux/elf-em.h +++ b/include/uapi/linux/elf-em.h @@ -30,7 +30,6 @@ #define EM_X86_64 62 /* AMD x86-64 */ #define EM_S390 22 /* IBM S/390 */ #define EM_CRIS 76 /* Axis Communications 32-bit embedded processor */ -#define EM_V850 87 /* NEC v850 */ #define EM_M32R 88 /* Renesas M32R */ #define EM_MN10300 89 /* Panasonic/MEI MN10300, AM33 */ #define EM_OPENRISC 92 /* OpenRISC 32-bit embedded processor */ @@ -50,8 +49,6 @@ */ #define EM_ALPHA 0x9026 -/* Bogus old v850 magic number, used by old tools. */ -#define EM_CYGNUS_V850 0x9080 /* Bogus old m32r magic number, used by old tools. */ #define EM_CYGNUS_M32R 0x9041 /* This is the old interim value for S/390 architecture */ diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h index 57fa39005e79..2835b07416b7 100644 --- a/include/uapi/linux/ethtool.h +++ b/include/uapi/linux/ethtool.h @@ -13,15 +13,21 @@ #ifndef _UAPI_LINUX_ETHTOOL_H #define _UAPI_LINUX_ETHTOOL_H +#include <linux/kernel.h> #include <linux/types.h> #include <linux/if_ether.h> +#ifndef __KERNEL__ +#include <limits.h> /* for INT_MAX */ +#endif + /* All structures exposed to userland should be defined such that they * have the same layout for 32-bit and 64-bit userland. */ /** - * struct ethtool_cmd - link control and status + * struct ethtool_cmd - DEPRECATED, link control and status + * This structure is DEPRECATED, please use struct ethtool_link_settings. * @cmd: Command number = %ETHTOOL_GSET or %ETHTOOL_SSET * @supported: Bitmask of %SUPPORTED_* flags for the link modes, * physical connectors and other link features for which the @@ -31,7 +37,7 @@ * physical connectors and other link features that are * advertised through autonegotiation or enabled for * auto-detection. - * @speed: Low bits of the speed + * @speed: Low bits of the speed, 1Mb units, 0 to INT_MAX or SPEED_UNKNOWN * @duplex: Duplex mode; one of %DUPLEX_* * @port: Physical connector type; one of %PORT_* * @phy_address: MDIO address of PHY (transceiver); 0 or 255 if not @@ -47,7 +53,7 @@ * obsoleted by &struct ethtool_coalesce. Read-only; deprecated. * @maxrxpkt: Historically used to report RX IRQ coalescing; now * obsoleted by &struct ethtool_coalesce. Read-only; deprecated. - * @speed_hi: High bits of the speed + * @speed_hi: High bits of the speed, 1Mb units, 0 to INT_MAX or SPEED_UNKNOWN * @eth_tp_mdix: Ethernet twisted-pair MDI(-X) status; one of * %ETH_TP_MDI_*. If the status is unknown or not applicable, the * value will be %ETH_TP_MDI_INVALID. Read-only. @@ -748,6 +754,56 @@ struct ethtool_usrip4_spec { __u8 proto; }; +/** + * struct ethtool_tcpip6_spec - flow specification for TCP/IPv6 etc. + * @ip6src: Source host + * @ip6dst: Destination host + * @psrc: Source port + * @pdst: Destination port + * @tclass: Traffic Class + * + * This can be used to specify a TCP/IPv6, UDP/IPv6 or SCTP/IPv6 flow. + */ +struct ethtool_tcpip6_spec { + __be32 ip6src[4]; + __be32 ip6dst[4]; + __be16 psrc; + __be16 pdst; + __u8 tclass; +}; + +/** + * struct ethtool_ah_espip6_spec - flow specification for IPsec/IPv6 + * @ip6src: Source host + * @ip6dst: Destination host + * @spi: Security parameters index + * @tclass: Traffic Class + * + * This can be used to specify an IPsec transport or tunnel over IPv6. + */ +struct ethtool_ah_espip6_spec { + __be32 ip6src[4]; + __be32 ip6dst[4]; + __be32 spi; + __u8 tclass; +}; + +/** + * struct ethtool_usrip6_spec - general flow specification for IPv6 + * @ip6src: Source host + * @ip6dst: Destination host + * @l4_4_bytes: First 4 bytes of transport (layer 4) header + * @tclass: Traffic Class + * @l4_proto: Transport protocol number (nexthdr after any Extension Headers) + */ +struct ethtool_usrip6_spec { + __be32 ip6src[4]; + __be32 ip6dst[4]; + __be32 l4_4_bytes; + __u8 tclass; + __u8 l4_proto; +}; + union ethtool_flow_union { struct ethtool_tcpip4_spec tcp_ip4_spec; struct ethtool_tcpip4_spec udp_ip4_spec; @@ -755,6 +811,12 @@ union ethtool_flow_union { struct ethtool_ah_espip4_spec ah_ip4_spec; struct ethtool_ah_espip4_spec esp_ip4_spec; struct ethtool_usrip4_spec usr_ip4_spec; + struct ethtool_tcpip6_spec tcp_ip6_spec; + struct ethtool_tcpip6_spec udp_ip6_spec; + struct ethtool_tcpip6_spec sctp_ip6_spec; + struct ethtool_ah_espip6_spec ah_ip6_spec; + struct ethtool_ah_espip6_spec esp_ip6_spec; + struct ethtool_usrip6_spec usr_ip6_spec; struct ethhdr ether_spec; __u8 hdata[52]; }; @@ -1146,10 +1208,29 @@ enum ethtool_sfeatures_retval_bits { #define ETHTOOL_F_WISH (1 << ETHTOOL_F_WISH__BIT) #define ETHTOOL_F_COMPAT (1 << ETHTOOL_F_COMPAT__BIT) +#define MAX_NUM_QUEUE 4096 + +/** + * struct ethtool_per_queue_op - apply sub command to the queues in mask. + * @cmd: ETHTOOL_PERQUEUE + * @sub_command: the sub command which apply to each queues + * @queue_mask: Bitmap of the queues which sub command apply to + * @data: A complete command structure following for each of the queues addressed + */ +struct ethtool_per_queue_op { + __u32 cmd; + __u32 sub_command; + __u32 queue_mask[__KERNEL_DIV_ROUND_UP(MAX_NUM_QUEUE, 32)]; + char data[]; +}; /* CMDs currently supported */ -#define ETHTOOL_GSET 0x00000001 /* Get settings. */ -#define ETHTOOL_SSET 0x00000002 /* Set settings. */ +#define ETHTOOL_GSET 0x00000001 /* DEPRECATED, Get settings. + * Please use ETHTOOL_GLINKSETTINGS + */ +#define ETHTOOL_SSET 0x00000002 /* DEPRECATED, Set settings. + * Please use ETHTOOL_SLINKSETTINGS + */ #define ETHTOOL_GDRVINFO 0x00000003 /* Get driver info. */ #define ETHTOOL_GREGS 0x00000004 /* Get NIC registers. */ #define ETHTOOL_GWOL 0x00000005 /* Get wake-on-lan options. */ @@ -1229,73 +1310,141 @@ enum ethtool_sfeatures_retval_bits { #define ETHTOOL_STUNABLE 0x00000049 /* Set tunable configuration */ #define ETHTOOL_GPHYSTATS 0x0000004a /* get PHY-specific statistics */ +#define ETHTOOL_PERQUEUE 0x0000004b /* Set per queue options */ + +#define ETHTOOL_GLINKSETTINGS 0x0000004c /* Get ethtool_link_settings */ +#define ETHTOOL_SLINKSETTINGS 0x0000004d /* Set ethtool_link_settings */ + + /* compatibility with older code */ #define SPARC_ETH_GSET ETHTOOL_GSET #define SPARC_ETH_SSET ETHTOOL_SSET -#define SUPPORTED_10baseT_Half (1 << 0) -#define SUPPORTED_10baseT_Full (1 << 1) -#define SUPPORTED_100baseT_Half (1 << 2) -#define SUPPORTED_100baseT_Full (1 << 3) -#define SUPPORTED_1000baseT_Half (1 << 4) -#define SUPPORTED_1000baseT_Full (1 << 5) -#define SUPPORTED_Autoneg (1 << 6) -#define SUPPORTED_TP (1 << 7) -#define SUPPORTED_AUI (1 << 8) -#define SUPPORTED_MII (1 << 9) -#define SUPPORTED_FIBRE (1 << 10) -#define SUPPORTED_BNC (1 << 11) -#define SUPPORTED_10000baseT_Full (1 << 12) -#define SUPPORTED_Pause (1 << 13) -#define SUPPORTED_Asym_Pause (1 << 14) -#define SUPPORTED_2500baseX_Full (1 << 15) -#define SUPPORTED_Backplane (1 << 16) -#define SUPPORTED_1000baseKX_Full (1 << 17) -#define SUPPORTED_10000baseKX4_Full (1 << 18) -#define SUPPORTED_10000baseKR_Full (1 << 19) -#define SUPPORTED_10000baseR_FEC (1 << 20) -#define SUPPORTED_20000baseMLD2_Full (1 << 21) -#define SUPPORTED_20000baseKR2_Full (1 << 22) -#define SUPPORTED_40000baseKR4_Full (1 << 23) -#define SUPPORTED_40000baseCR4_Full (1 << 24) -#define SUPPORTED_40000baseSR4_Full (1 << 25) -#define SUPPORTED_40000baseLR4_Full (1 << 26) -#define SUPPORTED_56000baseKR4_Full (1 << 27) -#define SUPPORTED_56000baseCR4_Full (1 << 28) -#define SUPPORTED_56000baseSR4_Full (1 << 29) -#define SUPPORTED_56000baseLR4_Full (1 << 30) - -#define ADVERTISED_10baseT_Half (1 << 0) -#define ADVERTISED_10baseT_Full (1 << 1) -#define ADVERTISED_100baseT_Half (1 << 2) -#define ADVERTISED_100baseT_Full (1 << 3) -#define ADVERTISED_1000baseT_Half (1 << 4) -#define ADVERTISED_1000baseT_Full (1 << 5) -#define ADVERTISED_Autoneg (1 << 6) -#define ADVERTISED_TP (1 << 7) -#define ADVERTISED_AUI (1 << 8) -#define ADVERTISED_MII (1 << 9) -#define ADVERTISED_FIBRE (1 << 10) -#define ADVERTISED_BNC (1 << 11) -#define ADVERTISED_10000baseT_Full (1 << 12) -#define ADVERTISED_Pause (1 << 13) -#define ADVERTISED_Asym_Pause (1 << 14) -#define ADVERTISED_2500baseX_Full (1 << 15) -#define ADVERTISED_Backplane (1 << 16) -#define ADVERTISED_1000baseKX_Full (1 << 17) -#define ADVERTISED_10000baseKX4_Full (1 << 18) -#define ADVERTISED_10000baseKR_Full (1 << 19) -#define ADVERTISED_10000baseR_FEC (1 << 20) -#define ADVERTISED_20000baseMLD2_Full (1 << 21) -#define ADVERTISED_20000baseKR2_Full (1 << 22) -#define ADVERTISED_40000baseKR4_Full (1 << 23) -#define ADVERTISED_40000baseCR4_Full (1 << 24) -#define ADVERTISED_40000baseSR4_Full (1 << 25) -#define ADVERTISED_40000baseLR4_Full (1 << 26) -#define ADVERTISED_56000baseKR4_Full (1 << 27) -#define ADVERTISED_56000baseCR4_Full (1 << 28) -#define ADVERTISED_56000baseSR4_Full (1 << 29) -#define ADVERTISED_56000baseLR4_Full (1 << 30) +/* Link mode bit indices */ +enum ethtool_link_mode_bit_indices { + ETHTOOL_LINK_MODE_10baseT_Half_BIT = 0, + ETHTOOL_LINK_MODE_10baseT_Full_BIT = 1, + ETHTOOL_LINK_MODE_100baseT_Half_BIT = 2, + ETHTOOL_LINK_MODE_100baseT_Full_BIT = 3, + ETHTOOL_LINK_MODE_1000baseT_Half_BIT = 4, + ETHTOOL_LINK_MODE_1000baseT_Full_BIT = 5, + ETHTOOL_LINK_MODE_Autoneg_BIT = 6, + ETHTOOL_LINK_MODE_TP_BIT = 7, + ETHTOOL_LINK_MODE_AUI_BIT = 8, + ETHTOOL_LINK_MODE_MII_BIT = 9, + ETHTOOL_LINK_MODE_FIBRE_BIT = 10, + ETHTOOL_LINK_MODE_BNC_BIT = 11, + ETHTOOL_LINK_MODE_10000baseT_Full_BIT = 12, + ETHTOOL_LINK_MODE_Pause_BIT = 13, + ETHTOOL_LINK_MODE_Asym_Pause_BIT = 14, + ETHTOOL_LINK_MODE_2500baseX_Full_BIT = 15, + ETHTOOL_LINK_MODE_Backplane_BIT = 16, + ETHTOOL_LINK_MODE_1000baseKX_Full_BIT = 17, + ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT = 18, + ETHTOOL_LINK_MODE_10000baseKR_Full_BIT = 19, + ETHTOOL_LINK_MODE_10000baseR_FEC_BIT = 20, + ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT = 21, + ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT = 22, + ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT = 23, + ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT = 24, + ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT = 25, + ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT = 26, + ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT = 27, + ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT = 28, + ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT = 29, + ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT = 30, + + /* Last allowed bit for __ETHTOOL_LINK_MODE_LEGACY_MASK is bit + * 31. Please do NOT define any SUPPORTED_* or ADVERTISED_* + * macro for bits > 31. The only way to use indices > 31 is to + * use the new ETHTOOL_GLINKSETTINGS/ETHTOOL_SLINKSETTINGS API. + */ + + __ETHTOOL_LINK_MODE_LAST + = ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT, +}; + +#define __ETHTOOL_LINK_MODE_LEGACY_MASK(base_name) \ + (1UL << (ETHTOOL_LINK_MODE_ ## base_name ## _BIT)) + +/* DEPRECATED macros. Please migrate to + * ETHTOOL_GLINKSETTINGS/ETHTOOL_SLINKSETTINGS API. Please do NOT + * define any new SUPPORTED_* macro for bits > 31. + */ +#define SUPPORTED_10baseT_Half __ETHTOOL_LINK_MODE_LEGACY_MASK(10baseT_Half) +#define SUPPORTED_10baseT_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(10baseT_Full) +#define SUPPORTED_100baseT_Half __ETHTOOL_LINK_MODE_LEGACY_MASK(100baseT_Half) +#define SUPPORTED_100baseT_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(100baseT_Full) +#define SUPPORTED_1000baseT_Half __ETHTOOL_LINK_MODE_LEGACY_MASK(1000baseT_Half) +#define SUPPORTED_1000baseT_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(1000baseT_Full) +#define SUPPORTED_Autoneg __ETHTOOL_LINK_MODE_LEGACY_MASK(Autoneg) +#define SUPPORTED_TP __ETHTOOL_LINK_MODE_LEGACY_MASK(TP) +#define SUPPORTED_AUI __ETHTOOL_LINK_MODE_LEGACY_MASK(AUI) +#define SUPPORTED_MII __ETHTOOL_LINK_MODE_LEGACY_MASK(MII) +#define SUPPORTED_FIBRE __ETHTOOL_LINK_MODE_LEGACY_MASK(FIBRE) +#define SUPPORTED_BNC __ETHTOOL_LINK_MODE_LEGACY_MASK(BNC) +#define SUPPORTED_10000baseT_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(10000baseT_Full) +#define SUPPORTED_Pause __ETHTOOL_LINK_MODE_LEGACY_MASK(Pause) +#define SUPPORTED_Asym_Pause __ETHTOOL_LINK_MODE_LEGACY_MASK(Asym_Pause) +#define SUPPORTED_2500baseX_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(2500baseX_Full) +#define SUPPORTED_Backplane __ETHTOOL_LINK_MODE_LEGACY_MASK(Backplane) +#define SUPPORTED_1000baseKX_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(1000baseKX_Full) +#define SUPPORTED_10000baseKX4_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(10000baseKX4_Full) +#define SUPPORTED_10000baseKR_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(10000baseKR_Full) +#define SUPPORTED_10000baseR_FEC __ETHTOOL_LINK_MODE_LEGACY_MASK(10000baseR_FEC) +#define SUPPORTED_20000baseMLD2_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(20000baseMLD2_Full) +#define SUPPORTED_20000baseKR2_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(20000baseKR2_Full) +#define SUPPORTED_40000baseKR4_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(40000baseKR4_Full) +#define SUPPORTED_40000baseCR4_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(40000baseCR4_Full) +#define SUPPORTED_40000baseSR4_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(40000baseSR4_Full) +#define SUPPORTED_40000baseLR4_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(40000baseLR4_Full) +#define SUPPORTED_56000baseKR4_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(56000baseKR4_Full) +#define SUPPORTED_56000baseCR4_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(56000baseCR4_Full) +#define SUPPORTED_56000baseSR4_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(56000baseSR4_Full) +#define SUPPORTED_56000baseLR4_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(56000baseLR4_Full) +/* Please do not define any new SUPPORTED_* macro for bits > 31, see + * notice above. + */ + +/* + * DEPRECATED macros. Please migrate to + * ETHTOOL_GLINKSETTINGS/ETHTOOL_SLINKSETTINGS API. Please do NOT + * define any new ADERTISE_* macro for bits > 31. + */ +#define ADVERTISED_10baseT_Half __ETHTOOL_LINK_MODE_LEGACY_MASK(10baseT_Half) +#define ADVERTISED_10baseT_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(10baseT_Full) +#define ADVERTISED_100baseT_Half __ETHTOOL_LINK_MODE_LEGACY_MASK(100baseT_Half) +#define ADVERTISED_100baseT_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(100baseT_Full) +#define ADVERTISED_1000baseT_Half __ETHTOOL_LINK_MODE_LEGACY_MASK(1000baseT_Half) +#define ADVERTISED_1000baseT_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(1000baseT_Full) +#define ADVERTISED_Autoneg __ETHTOOL_LINK_MODE_LEGACY_MASK(Autoneg) +#define ADVERTISED_TP __ETHTOOL_LINK_MODE_LEGACY_MASK(TP) +#define ADVERTISED_AUI __ETHTOOL_LINK_MODE_LEGACY_MASK(AUI) +#define ADVERTISED_MII __ETHTOOL_LINK_MODE_LEGACY_MASK(MII) +#define ADVERTISED_FIBRE __ETHTOOL_LINK_MODE_LEGACY_MASK(FIBRE) +#define ADVERTISED_BNC __ETHTOOL_LINK_MODE_LEGACY_MASK(BNC) +#define ADVERTISED_10000baseT_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(10000baseT_Full) +#define ADVERTISED_Pause __ETHTOOL_LINK_MODE_LEGACY_MASK(Pause) +#define ADVERTISED_Asym_Pause __ETHTOOL_LINK_MODE_LEGACY_MASK(Asym_Pause) +#define ADVERTISED_2500baseX_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(2500baseX_Full) +#define ADVERTISED_Backplane __ETHTOOL_LINK_MODE_LEGACY_MASK(Backplane) +#define ADVERTISED_1000baseKX_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(1000baseKX_Full) +#define ADVERTISED_10000baseKX4_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(10000baseKX4_Full) +#define ADVERTISED_10000baseKR_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(10000baseKR_Full) +#define ADVERTISED_10000baseR_FEC __ETHTOOL_LINK_MODE_LEGACY_MASK(10000baseR_FEC) +#define ADVERTISED_20000baseMLD2_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(20000baseMLD2_Full) +#define ADVERTISED_20000baseKR2_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(20000baseKR2_Full) +#define ADVERTISED_40000baseKR4_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(40000baseKR4_Full) +#define ADVERTISED_40000baseCR4_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(40000baseCR4_Full) +#define ADVERTISED_40000baseSR4_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(40000baseSR4_Full) +#define ADVERTISED_40000baseLR4_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(40000baseLR4_Full) +#define ADVERTISED_56000baseKR4_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(56000baseKR4_Full) +#define ADVERTISED_56000baseCR4_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(56000baseCR4_Full) +#define ADVERTISED_56000baseSR4_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(56000baseSR4_Full) +#define ADVERTISED_56000baseLR4_Full __ETHTOOL_LINK_MODE_LEGACY_MASK(56000baseLR4_Full) +/* Please do not define any new ADVERTISED_* macro for bits > 31, see + * notice above. + */ /* The following are all involved in forcing a particular link * mode for the device for setting things. When getting the @@ -1303,7 +1452,7 @@ enum ethtool_sfeatures_retval_bits { * it was forced up into this mode or autonegotiated. */ -/* The forced speed, 10Mb, 100Mb, gigabit, [2.5|5|10|20|25|40|50|56|100]GbE. */ +/* The forced speed, in units of 1Mb. All values 0 to INT_MAX are legal. */ #define SPEED_10 10 #define SPEED_100 100 #define SPEED_1000 1000 @@ -1319,11 +1468,28 @@ enum ethtool_sfeatures_retval_bits { #define SPEED_UNKNOWN -1 +static inline int ethtool_validate_speed(__u32 speed) +{ + return speed <= INT_MAX || speed == SPEED_UNKNOWN; +} + /* Duplex, half or full. */ #define DUPLEX_HALF 0x00 #define DUPLEX_FULL 0x01 #define DUPLEX_UNKNOWN 0xff +static inline int ethtool_validate_duplex(__u8 duplex) +{ + switch (duplex) { + case DUPLEX_HALF: + case DUPLEX_FULL: + case DUPLEX_UNKNOWN: + return 1; + } + + return 0; +} + /* Which connector port. */ #define PORT_TP 0x00 #define PORT_AUI 0x01 @@ -1367,15 +1533,17 @@ enum ethtool_sfeatures_retval_bits { #define UDP_V4_FLOW 0x02 /* hash or spec (udp_ip4_spec) */ #define SCTP_V4_FLOW 0x03 /* hash or spec (sctp_ip4_spec) */ #define AH_ESP_V4_FLOW 0x04 /* hash only */ -#define TCP_V6_FLOW 0x05 /* hash only */ -#define UDP_V6_FLOW 0x06 /* hash only */ -#define SCTP_V6_FLOW 0x07 /* hash only */ +#define TCP_V6_FLOW 0x05 /* hash or spec (tcp_ip6_spec; nfc only) */ +#define UDP_V6_FLOW 0x06 /* hash or spec (udp_ip6_spec; nfc only) */ +#define SCTP_V6_FLOW 0x07 /* hash or spec (sctp_ip6_spec; nfc only) */ #define AH_ESP_V6_FLOW 0x08 /* hash only */ #define AH_V4_FLOW 0x09 /* hash or spec (ah_ip4_spec) */ #define ESP_V4_FLOW 0x0a /* hash or spec (esp_ip4_spec) */ -#define AH_V6_FLOW 0x0b /* hash only */ -#define ESP_V6_FLOW 0x0c /* hash only */ -#define IP_USER_FLOW 0x0d /* spec only (usr_ip4_spec) */ +#define AH_V6_FLOW 0x0b /* hash or spec (ah_ip6_spec; nfc only) */ +#define ESP_V6_FLOW 0x0c /* hash or spec (esp_ip6_spec; nfc only) */ +#define IPV4_USER_FLOW 0x0d /* spec only (usr_ip4_spec) */ +#define IP_USER_FLOW IPV4_USER_FLOW +#define IPV6_USER_FLOW 0x0e /* spec only (usr_ip6_spec; nfc only) */ #define IPV4_FLOW 0x10 /* hash only */ #define IPV6_FLOW 0x11 /* hash only */ #define ETHER_FLOW 0x12 /* spec only (ether_spec) */ @@ -1441,4 +1609,123 @@ enum ethtool_reset_flags { }; #define ETH_RESET_SHARED_SHIFT 16 + +/** + * struct ethtool_link_settings - link control and status + * + * IMPORTANT, Backward compatibility notice: When implementing new + * user-space tools, please first try %ETHTOOL_GLINKSETTINGS, and + * if it succeeds use %ETHTOOL_SLINKSETTINGS to change link + * settings; do not use %ETHTOOL_SSET if %ETHTOOL_GLINKSETTINGS + * succeeded: stick to %ETHTOOL_GLINKSETTINGS/%SLINKSETTINGS in + * that case. Conversely, if %ETHTOOL_GLINKSETTINGS fails, use + * %ETHTOOL_GSET to query and %ETHTOOL_SSET to change link + * settings; do not use %ETHTOOL_SLINKSETTINGS if + * %ETHTOOL_GLINKSETTINGS failed: stick to + * %ETHTOOL_GSET/%ETHTOOL_SSET in that case. + * + * @cmd: Command number = %ETHTOOL_GLINKSETTINGS or %ETHTOOL_SLINKSETTINGS + * @speed: Link speed (Mbps) + * @duplex: Duplex mode; one of %DUPLEX_* + * @port: Physical connector type; one of %PORT_* + * @phy_address: MDIO address of PHY (transceiver); 0 or 255 if not + * applicable. For clause 45 PHYs this is the PRTAD. + * @autoneg: Enable/disable autonegotiation and auto-detection; + * either %AUTONEG_DISABLE or %AUTONEG_ENABLE + * @mdio_support: Bitmask of %ETH_MDIO_SUPPORTS_* flags for the MDIO + * protocols supported by the interface; 0 if unknown. + * Read-only. + * @eth_tp_mdix: Ethernet twisted-pair MDI(-X) status; one of + * %ETH_TP_MDI_*. If the status is unknown or not applicable, the + * value will be %ETH_TP_MDI_INVALID. Read-only. + * @eth_tp_mdix_ctrl: Ethernet twisted pair MDI(-X) control; one of + * %ETH_TP_MDI_*. If MDI(-X) control is not implemented, reads + * yield %ETH_TP_MDI_INVALID and writes may be ignored or rejected. + * When written successfully, the link should be renegotiated if + * necessary. + * @link_mode_masks_nwords: Number of 32-bit words for each of the + * supported, advertising, lp_advertising link mode bitmaps. For + * %ETHTOOL_GLINKSETTINGS: on entry, number of words passed by user + * (>= 0); on return, if handshake in progress, negative if + * request size unsupported by kernel: absolute value indicates + * kernel recommended size and cmd field is 0, as well as all the + * other fields; otherwise (handshake completed), strictly + * positive to indicate size used by kernel and cmd field is + * %ETHTOOL_GLINKSETTINGS, all other fields populated by driver. For + * %ETHTOOL_SLINKSETTINGS: must be valid on entry, ie. a positive + * value returned previously by %ETHTOOL_GLINKSETTINGS, otherwise + * refused. For drivers: ignore this field (use kernel's + * __ETHTOOL_LINK_MODE_MASK_NBITS instead), any change to it will + * be overwritten by kernel. + * @supported: Bitmap with each bit meaning given by + * %ethtool_link_mode_bit_indices for the link modes, physical + * connectors and other link features for which the interface + * supports autonegotiation or auto-detection. Read-only. + * @advertising: Bitmap with each bit meaning given by + * %ethtool_link_mode_bit_indices for the link modes, physical + * connectors and other link features that are advertised through + * autonegotiation or enabled for auto-detection. + * @lp_advertising: Bitmap with each bit meaning given by + * %ethtool_link_mode_bit_indices for the link modes, and other + * link features that the link partner advertised through + * autonegotiation; 0 if unknown or not applicable. Read-only. + * + * If autonegotiation is disabled, the speed and @duplex represent the + * fixed link mode and are writable if the driver supports multiple + * link modes. If it is enabled then they are read-only; if the link + * is up they represent the negotiated link mode; if the link is down, + * the speed is 0, %SPEED_UNKNOWN or the highest enabled speed and + * @duplex is %DUPLEX_UNKNOWN or the best enabled duplex mode. + * + * Some hardware interfaces may have multiple PHYs and/or physical + * connectors fitted or do not allow the driver to detect which are + * fitted. For these interfaces @port and/or @phy_address may be + * writable, possibly dependent on @autoneg being %AUTONEG_DISABLE. + * Otherwise, attempts to write different values may be ignored or + * rejected. + * + * Deprecated %ethtool_cmd fields transceiver, maxtxpkt and maxrxpkt + * are not available in %ethtool_link_settings. Until all drivers are + * converted to ignore them or to the new %ethtool_link_settings API, + * for both queries and changes, users should always try + * %ETHTOOL_GLINKSETTINGS first, and if it fails with -ENOTSUPP stick + * only to %ETHTOOL_GSET and %ETHTOOL_SSET consistently. If it + * succeeds, then users should stick to %ETHTOOL_GLINKSETTINGS and + * %ETHTOOL_SLINKSETTINGS (which would support drivers implementing + * either %ethtool_cmd or %ethtool_link_settings). + * + * Users should assume that all fields not marked read-only are + * writable and subject to validation by the driver. They should use + * %ETHTOOL_GLINKSETTINGS to get the current values before making specific + * changes and then applying them with %ETHTOOL_SLINKSETTINGS. + * + * Drivers that implement %get_link_ksettings and/or + * %set_link_ksettings should ignore the @cmd + * and @link_mode_masks_nwords fields (any change to them overwritten + * by kernel), and rely only on kernel's internal + * %__ETHTOOL_LINK_MODE_MASK_NBITS and + * %ethtool_link_mode_mask_t. Drivers that implement + * %set_link_ksettings() should validate all fields other than @cmd + * and @link_mode_masks_nwords that are not described as read-only or + * deprecated, and must ignore all fields described as read-only. + */ +struct ethtool_link_settings { + __u32 cmd; + __u32 speed; + __u8 duplex; + __u8 port; + __u8 phy_address; + __u8 autoneg; + __u8 mdio_support; + __u8 eth_tp_mdix; + __u8 eth_tp_mdix_ctrl; + __s8 link_mode_masks_nwords; + __u32 reserved[8]; + __u32 link_mode_masks[0]; + /* layout of link_mode_masks fields: + * __u32 map_supported[link_mode_masks_nwords]; + * __u32 map_advertising[link_mode_masks_nwords]; + * __u32 map_lp_advertising[link_mode_masks_nwords]; + */ +}; #endif /* _UAPI_LINUX_ETHTOOL_H */ diff --git a/include/uapi/linux/genetlink.h b/include/uapi/linux/genetlink.h index c3363ba1ae05..5512c90af7e3 100644 --- a/include/uapi/linux/genetlink.h +++ b/include/uapi/linux/genetlink.h @@ -21,6 +21,7 @@ struct genlmsghdr { #define GENL_CMD_CAP_DO 0x02 #define GENL_CMD_CAP_DUMP 0x04 #define GENL_CMD_CAP_HASPOL 0x08 +#define GENL_UNS_ADMIN_PERM 0x10 /* * List of reserved static generic netlink identifiers: diff --git a/include/uapi/linux/gpio.h b/include/uapi/linux/gpio.h new file mode 100644 index 000000000000..d0a3cac72250 --- /dev/null +++ b/include/uapi/linux/gpio.h @@ -0,0 +1,58 @@ +/* + * <linux/gpio.h> - userspace ABI for the GPIO character devices + * + * Copyright (C) 2015 Linus Walleij + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + */ +#ifndef _UAPI_GPIO_H_ +#define _UAPI_GPIO_H_ + +#include <linux/ioctl.h> +#include <linux/types.h> + +/** + * struct gpiochip_info - Information about a certain GPIO chip + * @name: the Linux kernel name of this GPIO chip + * @label: a functional name for this GPIO chip, such as a product + * number, may be NULL + * @lines: number of GPIO lines on this chip + */ +struct gpiochip_info { + char name[32]; + char label[32]; + __u32 lines; +}; + +/* Line is in use by the kernel */ +#define GPIOLINE_FLAG_KERNEL (1UL << 0) +#define GPIOLINE_FLAG_IS_OUT (1UL << 1) +#define GPIOLINE_FLAG_ACTIVE_LOW (1UL << 2) +#define GPIOLINE_FLAG_OPEN_DRAIN (1UL << 3) +#define GPIOLINE_FLAG_OPEN_SOURCE (1UL << 4) + +/** + * struct gpioline_info - Information about a certain GPIO line + * @line_offset: the local offset on this GPIO device, fill this in when + * requesting the line information from the kernel + * @flags: various flags for this line + * @name: the name of this GPIO line, such as the output pin of the line on the + * chip, a rail or a pin header name on a board, as specified by the gpio + * chip, may be NULL + * @consumer: a functional name for the consumer of this GPIO line as set by + * whatever is using it, will be NULL if there is no current user but may + * also be NULL if the consumer doesn't set this up + */ +struct gpioline_info { + __u32 line_offset; + __u32 flags; + char name[32]; + char consumer[32]; +}; + +#define GPIO_GET_CHIPINFO_IOCTL _IOR(0xB4, 0x01, struct gpiochip_info) +#define GPIO_GET_LINEINFO_IOCTL _IOWR(0xB4, 0x02, struct gpioline_info) + +#endif /* _UAPI_GPIO_H_ */ diff --git a/include/uapi/linux/if.h b/include/uapi/linux/if.h index 9cf2394f0bcf..f80277569f24 100644 --- a/include/uapi/linux/if.h +++ b/include/uapi/linux/if.h @@ -37,7 +37,7 @@ * are shared for all types of net_devices. The sysfs entries are available * via /sys/class/net/<dev>/flags. Flags which can be toggled through sysfs * are annotated below, note that only a few flags can be toggled and some - * other flags are always always preserved from the original net_device flags + * other flags are always preserved from the original net_device flags * even if you try to set them via sysfs. Flags which are always preserved * are kept under the flag grouping @IFF_VOLATILE. Flags which are volatile * are annotated below as such. diff --git a/include/uapi/linux/if_bridge.h b/include/uapi/linux/if_bridge.h index 18db14477bdd..0536eefff9bf 100644 --- a/include/uapi/linux/if_bridge.h +++ b/include/uapi/linux/if_bridge.h @@ -137,11 +137,17 @@ struct bridge_vlan_info { /* Bridge multicast database attributes * [MDBA_MDB] = { * [MDBA_MDB_ENTRY] = { - * [MDBA_MDB_ENTRY_INFO] + * [MDBA_MDB_ENTRY_INFO] { + * struct br_mdb_entry + * [MDBA_MDB_EATTR attributes] + * } * } * } * [MDBA_ROUTER] = { - * [MDBA_ROUTER_PORT] + * [MDBA_ROUTER_PORT] = { + * u32 ifindex + * [MDBA_ROUTER_PATTR attributes] + * } * } */ enum { @@ -166,6 +172,22 @@ enum { }; #define MDBA_MDB_ENTRY_MAX (__MDBA_MDB_ENTRY_MAX - 1) +/* per mdb entry additional attributes */ +enum { + MDBA_MDB_EATTR_UNSPEC, + MDBA_MDB_EATTR_TIMER, + __MDBA_MDB_EATTR_MAX +}; +#define MDBA_MDB_EATTR_MAX (__MDBA_MDB_EATTR_MAX - 1) + +/* multicast router types */ +enum { + MDB_RTR_TYPE_DISABLED, + MDB_RTR_TYPE_TEMP_QUERY, + MDB_RTR_TYPE_PERM, + MDB_RTR_TYPE_TEMP +}; + enum { MDBA_ROUTER_UNSPEC, MDBA_ROUTER_PORT, @@ -173,6 +195,15 @@ enum { }; #define MDBA_ROUTER_MAX (__MDBA_ROUTER_MAX - 1) +/* router port attributes */ +enum { + MDBA_ROUTER_PATTR_UNSPEC, + MDBA_ROUTER_PATTR_TIMER, + MDBA_ROUTER_PATTR_TYPE, + __MDBA_ROUTER_PATTR_MAX +}; +#define MDBA_ROUTER_PATTR_MAX (__MDBA_ROUTER_PATTR_MAX - 1) + struct br_port_msg { __u8 family; __u32 ifindex; @@ -183,6 +214,8 @@ struct br_mdb_entry { #define MDB_TEMPORARY 0 #define MDB_PERMANENT 1 __u8 state; +#define MDB_FLAGS_OFFLOAD (1 << 0) + __u8 flags; __u16 vid; struct { union { diff --git a/include/uapi/linux/if_ether.h b/include/uapi/linux/if_ether.h index ea9221b0331a..4a93051c578c 100644 --- a/include/uapi/linux/if_ether.h +++ b/include/uapi/linux/if_ether.h @@ -83,6 +83,7 @@ #define ETH_P_8021AD 0x88A8 /* 802.1ad Service VLAN */ #define ETH_P_802_EX1 0x88B5 /* 802.1 Local Experimental 1. */ #define ETH_P_TIPC 0x88CA /* TIPC */ +#define ETH_P_MACSEC 0x88E5 /* 802.1ae MACsec */ #define ETH_P_8021AH 0x88E7 /* 802.1ah Backbone Service Tag */ #define ETH_P_MVRP 0x88F5 /* 802.1Q MVRP */ #define ETH_P_1588 0x88F7 /* IEEE 1588 Timesync */ diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index a30b78090594..8e3f88fa5b59 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -35,6 +35,8 @@ struct rtnl_link_stats { /* for cslip etc */ __u32 rx_compressed; __u32 tx_compressed; + + __u32 rx_nohandler; /* dropped, no handler found */ }; /* The main device statistics structure */ @@ -68,6 +70,8 @@ struct rtnl_link_stats64 { /* for cslip etc */ __u64 rx_compressed; __u64 tx_compressed; + + __u64 rx_nohandler; /* dropped, no handler found */ }; /* The struct should be in sync with struct ifmap */ @@ -401,6 +405,43 @@ enum { #define IFLA_VRF_MAX (__IFLA_VRF_MAX - 1) +enum { + IFLA_VRF_PORT_UNSPEC, + IFLA_VRF_PORT_TABLE, + __IFLA_VRF_PORT_MAX +}; + +#define IFLA_VRF_PORT_MAX (__IFLA_VRF_PORT_MAX - 1) + +/* MACSEC section */ +enum { + IFLA_MACSEC_UNSPEC, + IFLA_MACSEC_SCI, + IFLA_MACSEC_PORT, + IFLA_MACSEC_ICV_LEN, + IFLA_MACSEC_CIPHER_SUITE, + IFLA_MACSEC_WINDOW, + IFLA_MACSEC_ENCODING_SA, + IFLA_MACSEC_ENCRYPT, + IFLA_MACSEC_PROTECT, + IFLA_MACSEC_INC_SCI, + IFLA_MACSEC_ES, + IFLA_MACSEC_SCB, + IFLA_MACSEC_REPLAY_PROTECT, + IFLA_MACSEC_VALIDATION, + __IFLA_MACSEC_MAX, +}; + +#define IFLA_MACSEC_MAX (__IFLA_MACSEC_MAX - 1) + +enum macsec_validation_type { + MACSEC_VALIDATE_DISABLED = 0, + MACSEC_VALIDATE_CHECK = 1, + MACSEC_VALIDATE_STRICT = 2, + __MACSEC_VALIDATE_END, + MACSEC_VALIDATE_MAX = __MACSEC_VALIDATE_END - 1, +}; + /* IPVLAN section */ enum { IFLA_IPVLAN_UNSPEC, @@ -444,6 +485,7 @@ enum { IFLA_VXLAN_GBP, IFLA_VXLAN_REMCSUM_NOPARTIAL, IFLA_VXLAN_COLLECT_METADATA, + IFLA_VXLAN_LABEL, __IFLA_VXLAN_MAX }; #define IFLA_VXLAN_MAX (__IFLA_VXLAN_MAX - 1) @@ -466,6 +508,7 @@ enum { IFLA_GENEVE_UDP_CSUM, IFLA_GENEVE_UDP_ZERO_CSUM6_TX, IFLA_GENEVE_UDP_ZERO_CSUM6_RX, + IFLA_GENEVE_LABEL, __IFLA_GENEVE_MAX }; #define IFLA_GENEVE_MAX (__IFLA_GENEVE_MAX - 1) diff --git a/include/uapi/linux/if_macsec.h b/include/uapi/linux/if_macsec.h new file mode 100644 index 000000000000..26b0d1e3e3e7 --- /dev/null +++ b/include/uapi/linux/if_macsec.h @@ -0,0 +1,161 @@ +/* + * include/uapi/linux/if_macsec.h - MACsec device + * + * Copyright (c) 2015 Sabrina Dubroca <sd@queasysnail.net> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef _UAPI_MACSEC_H +#define _UAPI_MACSEC_H + +#include <linux/types.h> + +#define MACSEC_GENL_NAME "macsec" +#define MACSEC_GENL_VERSION 1 + +#define MACSEC_MAX_KEY_LEN 128 + +#define DEFAULT_CIPHER_ID 0x0080020001000001ULL +#define DEFAULT_CIPHER_ALT 0x0080C20001000001ULL + +#define MACSEC_MIN_ICV_LEN 8 +#define MACSEC_MAX_ICV_LEN 32 + +enum macsec_attrs { + MACSEC_ATTR_UNSPEC, + MACSEC_ATTR_IFINDEX, /* u32, ifindex of the MACsec netdevice */ + MACSEC_ATTR_RXSC_CONFIG, /* config, nested macsec_rxsc_attrs */ + MACSEC_ATTR_SA_CONFIG, /* config, nested macsec_sa_attrs */ + MACSEC_ATTR_SECY, /* dump, nested macsec_secy_attrs */ + MACSEC_ATTR_TXSA_LIST, /* dump, nested, macsec_sa_attrs for each TXSA */ + MACSEC_ATTR_RXSC_LIST, /* dump, nested, macsec_rxsc_attrs for each RXSC */ + MACSEC_ATTR_TXSC_STATS, /* dump, nested, macsec_txsc_stats_attr */ + MACSEC_ATTR_SECY_STATS, /* dump, nested, macsec_secy_stats_attr */ + __MACSEC_ATTR_END, + NUM_MACSEC_ATTR = __MACSEC_ATTR_END, + MACSEC_ATTR_MAX = __MACSEC_ATTR_END - 1, +}; + +enum macsec_secy_attrs { + MACSEC_SECY_ATTR_UNSPEC, + MACSEC_SECY_ATTR_SCI, + MACSEC_SECY_ATTR_ENCODING_SA, + MACSEC_SECY_ATTR_WINDOW, + MACSEC_SECY_ATTR_CIPHER_SUITE, + MACSEC_SECY_ATTR_ICV_LEN, + MACSEC_SECY_ATTR_PROTECT, + MACSEC_SECY_ATTR_REPLAY, + MACSEC_SECY_ATTR_OPER, + MACSEC_SECY_ATTR_VALIDATE, + MACSEC_SECY_ATTR_ENCRYPT, + MACSEC_SECY_ATTR_INC_SCI, + MACSEC_SECY_ATTR_ES, + MACSEC_SECY_ATTR_SCB, + __MACSEC_SECY_ATTR_END, + NUM_MACSEC_SECY_ATTR = __MACSEC_SECY_ATTR_END, + MACSEC_SECY_ATTR_MAX = __MACSEC_SECY_ATTR_END - 1, +}; + +enum macsec_rxsc_attrs { + MACSEC_RXSC_ATTR_UNSPEC, + MACSEC_RXSC_ATTR_SCI, /* config/dump, u64 */ + MACSEC_RXSC_ATTR_ACTIVE, /* config/dump, u8 0..1 */ + MACSEC_RXSC_ATTR_SA_LIST, /* dump, nested */ + MACSEC_RXSC_ATTR_STATS, /* dump, nested, macsec_rxsc_stats_attr */ + __MACSEC_RXSC_ATTR_END, + NUM_MACSEC_RXSC_ATTR = __MACSEC_RXSC_ATTR_END, + MACSEC_RXSC_ATTR_MAX = __MACSEC_RXSC_ATTR_END - 1, +}; + +enum macsec_sa_attrs { + MACSEC_SA_ATTR_UNSPEC, + MACSEC_SA_ATTR_AN, /* config/dump, u8 0..3 */ + MACSEC_SA_ATTR_ACTIVE, /* config/dump, u8 0..1 */ + MACSEC_SA_ATTR_PN, /* config/dump, u32 */ + MACSEC_SA_ATTR_KEY, /* config, data */ + MACSEC_SA_ATTR_KEYID, /* config/dump, u64 */ + MACSEC_SA_ATTR_STATS, /* dump, nested, macsec_sa_stats_attr */ + __MACSEC_SA_ATTR_END, + NUM_MACSEC_SA_ATTR = __MACSEC_SA_ATTR_END, + MACSEC_SA_ATTR_MAX = __MACSEC_SA_ATTR_END - 1, +}; + +enum macsec_nl_commands { + MACSEC_CMD_GET_TXSC, + MACSEC_CMD_ADD_RXSC, + MACSEC_CMD_DEL_RXSC, + MACSEC_CMD_UPD_RXSC, + MACSEC_CMD_ADD_TXSA, + MACSEC_CMD_DEL_TXSA, + MACSEC_CMD_UPD_TXSA, + MACSEC_CMD_ADD_RXSA, + MACSEC_CMD_DEL_RXSA, + MACSEC_CMD_UPD_RXSA, +}; + +/* u64 per-RXSC stats */ +enum macsec_rxsc_stats_attr { + MACSEC_RXSC_STATS_ATTR_UNSPEC, + MACSEC_RXSC_STATS_ATTR_IN_OCTETS_VALIDATED, + MACSEC_RXSC_STATS_ATTR_IN_OCTETS_DECRYPTED, + MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNCHECKED, + MACSEC_RXSC_STATS_ATTR_IN_PKTS_DELAYED, + MACSEC_RXSC_STATS_ATTR_IN_PKTS_OK, + MACSEC_RXSC_STATS_ATTR_IN_PKTS_INVALID, + MACSEC_RXSC_STATS_ATTR_IN_PKTS_LATE, + MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_VALID, + MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_USING_SA, + MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNUSED_SA, + __MACSEC_RXSC_STATS_ATTR_END, + NUM_MACSEC_RXSC_STATS_ATTR = __MACSEC_RXSC_STATS_ATTR_END, + MACSEC_RXSC_STATS_ATTR_MAX = __MACSEC_RXSC_STATS_ATTR_END - 1, +}; + +/* u32 per-{RX,TX}SA stats */ +enum macsec_sa_stats_attr { + MACSEC_SA_STATS_ATTR_UNSPEC, + MACSEC_SA_STATS_ATTR_IN_PKTS_OK, + MACSEC_SA_STATS_ATTR_IN_PKTS_INVALID, + MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_VALID, + MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_USING_SA, + MACSEC_SA_STATS_ATTR_IN_PKTS_UNUSED_SA, + MACSEC_SA_STATS_ATTR_OUT_PKTS_PROTECTED, + MACSEC_SA_STATS_ATTR_OUT_PKTS_ENCRYPTED, + __MACSEC_SA_STATS_ATTR_END, + NUM_MACSEC_SA_STATS_ATTR = __MACSEC_SA_STATS_ATTR_END, + MACSEC_SA_STATS_ATTR_MAX = __MACSEC_SA_STATS_ATTR_END - 1, +}; + +/* u64 per-TXSC stats */ +enum macsec_txsc_stats_attr { + MACSEC_TXSC_STATS_ATTR_UNSPEC, + MACSEC_TXSC_STATS_ATTR_OUT_PKTS_PROTECTED, + MACSEC_TXSC_STATS_ATTR_OUT_PKTS_ENCRYPTED, + MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_PROTECTED, + MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_ENCRYPTED, + __MACSEC_TXSC_STATS_ATTR_END, + NUM_MACSEC_TXSC_STATS_ATTR = __MACSEC_TXSC_STATS_ATTR_END, + MACSEC_TXSC_STATS_ATTR_MAX = __MACSEC_TXSC_STATS_ATTR_END - 1, +}; + +/* u64 per-SecY stats */ +enum macsec_secy_stats_attr { + MACSEC_SECY_STATS_ATTR_UNSPEC, + MACSEC_SECY_STATS_ATTR_OUT_PKTS_UNTAGGED, + MACSEC_SECY_STATS_ATTR_IN_PKTS_UNTAGGED, + MACSEC_SECY_STATS_ATTR_OUT_PKTS_TOO_LONG, + MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_TAG, + MACSEC_SECY_STATS_ATTR_IN_PKTS_BAD_TAG, + MACSEC_SECY_STATS_ATTR_IN_PKTS_UNKNOWN_SCI, + MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_SCI, + MACSEC_SECY_STATS_ATTR_IN_PKTS_OVERRUN, + __MACSEC_SECY_STATS_ATTR_END, + NUM_MACSEC_SECY_STATS_ATTR = __MACSEC_SECY_STATS_ATTR_END, + MACSEC_SECY_STATS_ATTR_MAX = __MACSEC_SECY_STATS_ATTR_END - 1, +}; + +#endif /* _UAPI_MACSEC_H */ diff --git a/include/uapi/linux/iio/types.h b/include/uapi/linux/iio/types.h index 7c63bd67c36e..c077617f3304 100644 --- a/include/uapi/linux/iio/types.h +++ b/include/uapi/linux/iio/types.h @@ -37,6 +37,7 @@ enum iio_chan_type { IIO_VELOCITY, IIO_CONCENTRATION, IIO_RESISTANCE, + IIO_PH, }; enum iio_modifier { diff --git a/include/uapi/linux/input.h b/include/uapi/linux/input.h index 2758687300b4..01113841190d 100644 --- a/include/uapi/linux/input.h +++ b/include/uapi/linux/input.h @@ -246,6 +246,7 @@ struct input_mask { #define BUS_GSC 0x1A #define BUS_ATARI 0x1B #define BUS_SPI 0x1C +#define BUS_RMI 0x1D /* * MT_TOOL types diff --git a/include/uapi/linux/ip.h b/include/uapi/linux/ip.h index 08f894d2ddbd..f291569768dd 100644 --- a/include/uapi/linux/ip.h +++ b/include/uapi/linux/ip.h @@ -165,6 +165,8 @@ enum IPV4_DEVCONF_IGMPV2_UNSOLICITED_REPORT_INTERVAL, IPV4_DEVCONF_IGMPV3_UNSOLICITED_REPORT_INTERVAL, IPV4_DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN, + IPV4_DEVCONF_DROP_UNICAST_IN_L2_MULTICAST, + IPV4_DEVCONF_DROP_GRATUITOUS_ARP, __IPV4_DEVCONF_MAX }; diff --git a/include/uapi/linux/ipv6.h b/include/uapi/linux/ipv6.h index 38b4fef20219..395876060f50 100644 --- a/include/uapi/linux/ipv6.h +++ b/include/uapi/linux/ipv6.h @@ -174,6 +174,9 @@ enum { DEVCONF_USE_OIF_ADDRS_ONLY, DEVCONF_ACCEPT_RA_MIN_HOP_LIMIT, DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN, + DEVCONF_DROP_UNICAST_IN_L2_MULTICAST, + DEVCONF_DROP_UNSOLICITED_NA, + DEVCONF_KEEP_ADDR_ON_DOWN, DEVCONF_MAX }; diff --git a/include/uapi/linux/kcm.h b/include/uapi/linux/kcm.h new file mode 100644 index 000000000000..a5a530940b99 --- /dev/null +++ b/include/uapi/linux/kcm.h @@ -0,0 +1,40 @@ +/* + * Kernel Connection Multiplexor + * + * Copyright (c) 2016 Tom Herbert <tom@herbertland.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * User API to clone KCM sockets and attach transport socket to a KCM + * multiplexor. + */ + +#ifndef KCM_KERNEL_H +#define KCM_KERNEL_H + +struct kcm_attach { + int fd; + int bpf_fd; +}; + +struct kcm_unattach { + int fd; +}; + +struct kcm_clone { + int fd; +}; + +#define SIOCKCMATTACH (SIOCPROTOPRIVATE + 0) +#define SIOCKCMUNATTACH (SIOCPROTOPRIVATE + 1) +#define SIOCKCMCLONE (SIOCPROTOPRIVATE + 2) + +#define KCMPROTO_CONNECTED 0 + +/* Socket options */ +#define KCM_RECV_DISABLE 1 + +#endif + diff --git a/include/uapi/linux/kernel.h b/include/uapi/linux/kernel.h index 321e399457f5..466073f0ce46 100644 --- a/include/uapi/linux/kernel.h +++ b/include/uapi/linux/kernel.h @@ -9,5 +9,6 @@ #define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (typeof(x))(a) - 1) #define __ALIGN_KERNEL_MASK(x, mask) (((x) + (mask)) & ~(mask)) +#define __KERNEL_DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d)) #endif /* _UAPI_LINUX_KERNEL_H */ diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 9da905157cee..a7f1f8032ec1 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -157,6 +157,7 @@ struct kvm_s390_skeys { struct kvm_hyperv_exit { #define KVM_EXIT_HYPERV_SYNIC 1 +#define KVM_EXIT_HYPERV_HCALL 2 __u32 type; union { struct { @@ -165,6 +166,11 @@ struct kvm_hyperv_exit { __u64 evt_page; __u64 msg_page; } synic; + struct { + __u64 input; + __u64 result; + __u64 params[2]; + } hcall; } u; }; @@ -541,7 +547,13 @@ struct kvm_s390_pgm_info { __u8 exc_access_id; __u8 per_access_id; __u8 op_access_id; - __u8 pad[3]; +#define KVM_S390_PGM_FLAGS_ILC_VALID 0x01 +#define KVM_S390_PGM_FLAGS_ILC_0 0x02 +#define KVM_S390_PGM_FLAGS_ILC_1 0x04 +#define KVM_S390_PGM_FLAGS_ILC_MASK 0x06 +#define KVM_S390_PGM_FLAGS_NO_REWIND 0x08 + __u8 flags; + __u8 pad[2]; }; struct kvm_s390_prefix_info { @@ -850,6 +862,9 @@ struct kvm_ppc_smmu_info { #define KVM_CAP_IOEVENTFD_ANY_LENGTH 122 #define KVM_CAP_HYPERV_SYNIC 123 #define KVM_CAP_S390_RI 124 +#define KVM_CAP_SPAPR_TCE_64 125 +#define KVM_CAP_ARM_PMU_V3 126 +#define KVM_CAP_VCPU_ATTRIBUTES 127 #ifdef KVM_CAP_IRQ_ROUTING @@ -1142,6 +1157,8 @@ struct kvm_s390_ucas_mapping { /* Available with KVM_CAP_PPC_ALLOC_HTAB */ #define KVM_PPC_ALLOCATE_HTAB _IOWR(KVMIO, 0xa7, __u32) #define KVM_CREATE_SPAPR_TCE _IOW(KVMIO, 0xa8, struct kvm_create_spapr_tce) +#define KVM_CREATE_SPAPR_TCE_64 _IOW(KVMIO, 0xa8, \ + struct kvm_create_spapr_tce_64) /* Available with KVM_CAP_RMA */ #define KVM_ALLOCATE_RMA _IOR(KVMIO, 0xa9, struct kvm_allocate_rma) /* Available with KVM_CAP_PPC_HTAB_FD */ diff --git a/include/uapi/linux/media.h b/include/uapi/linux/media.h index 1e3c8cb43bd7..df59edee25d1 100644 --- a/include/uapi/linux/media.h +++ b/include/uapi/linux/media.h @@ -66,27 +66,49 @@ struct media_device_info { /* * DVB entities */ -#define MEDIA_ENT_F_DTV_DEMOD (MEDIA_ENT_F_BASE + 1) -#define MEDIA_ENT_F_TS_DEMUX (MEDIA_ENT_F_BASE + 2) -#define MEDIA_ENT_F_DTV_CA (MEDIA_ENT_F_BASE + 3) -#define MEDIA_ENT_F_DTV_NET_DECAP (MEDIA_ENT_F_BASE + 4) +#define MEDIA_ENT_F_DTV_DEMOD (MEDIA_ENT_F_BASE + 0x00001) +#define MEDIA_ENT_F_TS_DEMUX (MEDIA_ENT_F_BASE + 0x00002) +#define MEDIA_ENT_F_DTV_CA (MEDIA_ENT_F_BASE + 0x00003) +#define MEDIA_ENT_F_DTV_NET_DECAP (MEDIA_ENT_F_BASE + 0x00004) /* - * Connectors + * I/O entities */ -/* It is a responsibility of the entity drivers to add connectors and links */ -#define MEDIA_ENT_F_CONN_RF (MEDIA_ENT_F_BASE + 21) -#define MEDIA_ENT_F_CONN_SVIDEO (MEDIA_ENT_F_BASE + 22) -#define MEDIA_ENT_F_CONN_COMPOSITE (MEDIA_ENT_F_BASE + 23) -/* For internal test signal generators and other debug connectors */ -#define MEDIA_ENT_F_CONN_TEST (MEDIA_ENT_F_BASE + 24) +#define MEDIA_ENT_F_IO_DTV (MEDIA_ENT_F_BASE + 0x01001) +#define MEDIA_ENT_F_IO_VBI (MEDIA_ENT_F_BASE + 0x01002) +#define MEDIA_ENT_F_IO_SWRADIO (MEDIA_ENT_F_BASE + 0x01003) /* - * I/O entities + * Analog TV IF-PLL decoders + * + * It is a responsibility of the master/bridge drivers to create links + * for MEDIA_ENT_F_IF_VID_DECODER and MEDIA_ENT_F_IF_AUD_DECODER. + */ +#define MEDIA_ENT_F_IF_VID_DECODER (MEDIA_ENT_F_BASE + 0x02001) +#define MEDIA_ENT_F_IF_AUD_DECODER (MEDIA_ENT_F_BASE + 0x02002) + +/* + * Audio Entity Functions */ -#define MEDIA_ENT_F_IO_DTV (MEDIA_ENT_F_BASE + 31) -#define MEDIA_ENT_F_IO_VBI (MEDIA_ENT_F_BASE + 32) -#define MEDIA_ENT_F_IO_SWRADIO (MEDIA_ENT_F_BASE + 33) +#define MEDIA_ENT_F_AUDIO_CAPTURE (MEDIA_ENT_F_BASE + 0x03001) +#define MEDIA_ENT_F_AUDIO_PLAYBACK (MEDIA_ENT_F_BASE + 0x03002) +#define MEDIA_ENT_F_AUDIO_MIXER (MEDIA_ENT_F_BASE + 0x03003) + +/* + * Connectors + */ +/* It is a responsibility of the entity drivers to add connectors and links */ +#ifdef __KERNEL__ + /* + * For now, it should not be used in userspace, as some + * definitions may change + */ + +#define MEDIA_ENT_F_CONN_RF (MEDIA_ENT_F_BASE + 0x30001) +#define MEDIA_ENT_F_CONN_SVIDEO (MEDIA_ENT_F_BASE + 0x30002) +#define MEDIA_ENT_F_CONN_COMPOSITE (MEDIA_ENT_F_BASE + 0x30003) + +#endif /* * Don't touch on those. The ranges MEDIA_ENT_F_OLD_BASE and @@ -107,14 +129,18 @@ struct media_device_info { #define MEDIA_ENT_F_LENS (MEDIA_ENT_F_OLD_SUBDEV_BASE + 3) #define MEDIA_ENT_F_ATV_DECODER (MEDIA_ENT_F_OLD_SUBDEV_BASE + 4) /* - * It is a responsibility of the entity drivers to add connectors and links - * for the tuner entities. + * It is a responsibility of the master/bridge drivers to add connectors + * and links for MEDIA_ENT_F_TUNER. Please notice that some old tuners + * may require the usage of separate I2C chips to decode analog TV signals, + * when the master/bridge chipset doesn't have its own TV standard decoder. + * On such cases, the IF-PLL staging is mapped via one or two entities: + * MEDIA_ENT_F_IF_VID_DECODER and/or MEDIA_ENT_F_IF_AUD_DECODER. */ #define MEDIA_ENT_F_TUNER (MEDIA_ENT_F_OLD_SUBDEV_BASE + 5) #define MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN MEDIA_ENT_F_OLD_SUBDEV_BASE -#ifndef __KERNEL__ +#if !defined(__KERNEL__) || defined(__NEED_MEDIA_LEGACY_API) /* * Legacy symbols used to avoid userspace compilation breakages @@ -127,6 +153,10 @@ struct media_device_info { #define MEDIA_ENT_TYPE_MASK 0x00ff0000 #define MEDIA_ENT_SUBTYPE_MASK 0x0000ffff +/* End of the old subdev reserved numberspace */ +#define MEDIA_ENT_T_DEVNODE_UNKNOWN (MEDIA_ENT_T_DEVNODE | \ + MEDIA_ENT_SUBTYPE_MASK) + #define MEDIA_ENT_T_DEVNODE MEDIA_ENT_F_OLD_BASE #define MEDIA_ENT_T_DEVNODE_V4L MEDIA_ENT_F_IO_V4L #define MEDIA_ENT_T_DEVNODE_FB (MEDIA_ENT_T_DEVNODE + 2) @@ -252,6 +282,7 @@ struct media_links_enum { #define MEDIA_INTF_T_DVB_BASE 0x00000100 #define MEDIA_INTF_T_V4L_BASE 0x00000200 +#define MEDIA_INTF_T_ALSA_BASE 0x00000300 /* Interface types */ @@ -267,6 +298,15 @@ struct media_links_enum { #define MEDIA_INTF_T_V4L_SUBDEV (MEDIA_INTF_T_V4L_BASE + 3) #define MEDIA_INTF_T_V4L_SWRADIO (MEDIA_INTF_T_V4L_BASE + 4) +#define MEDIA_INTF_T_ALSA_PCM_CAPTURE (MEDIA_INTF_T_ALSA_BASE) +#define MEDIA_INTF_T_ALSA_PCM_PLAYBACK (MEDIA_INTF_T_ALSA_BASE + 1) +#define MEDIA_INTF_T_ALSA_CONTROL (MEDIA_INTF_T_ALSA_BASE + 2) +#define MEDIA_INTF_T_ALSA_COMPRESS (MEDIA_INTF_T_ALSA_BASE + 3) +#define MEDIA_INTF_T_ALSA_RAWMIDI (MEDIA_INTF_T_ALSA_BASE + 4) +#define MEDIA_INTF_T_ALSA_HWDEP (MEDIA_INTF_T_ALSA_BASE + 5) +#define MEDIA_INTF_T_ALSA_SEQUENCER (MEDIA_INTF_T_ALSA_BASE + 6) +#define MEDIA_INTF_T_ALSA_TIMER (MEDIA_INTF_T_ALSA_BASE + 7) + /* * MC next gen API definitions * @@ -286,19 +326,19 @@ struct media_links_enum { * later, before the adding this API upstream. */ -#if 0 /* Let's postpone it to Kernel 4.6 */ + struct media_v2_entity { __u32 id; char name[64]; /* FIXME: move to a property? (RFC says so) */ __u32 function; /* Main function of the entity */ - __u16 reserved[12]; -}; + __u32 reserved[6]; +} __attribute__ ((packed)); /* Should match the specific fields at media_intf_devnode */ struct media_v2_intf_devnode { __u32 major; __u32 minor; -}; +} __attribute__ ((packed)); struct media_v2_interface { __u32 id; @@ -310,22 +350,22 @@ struct media_v2_interface { struct media_v2_intf_devnode devnode; __u32 raw[16]; }; -}; +} __attribute__ ((packed)); struct media_v2_pad { __u32 id; __u32 entity_id; __u32 flags; - __u16 reserved[9]; -}; + __u32 reserved[5]; +} __attribute__ ((packed)); struct media_v2_link { __u32 id; __u32 source_id; __u32 sink_id; __u32 flags; - __u32 reserved[5]; -}; + __u32 reserved[6]; +} __attribute__ ((packed)); struct media_v2_topology { __u64 topology_version; @@ -345,13 +385,7 @@ struct media_v2_topology { __u32 num_links; __u32 reserved4; __u64 ptr_links; -}; - -static inline void __user *media_get_uptr(__u64 arg) -{ - return (void __user *)(uintptr_t)arg; -} -#endif +} __attribute__ ((packed)); /* ioctls */ @@ -359,9 +393,6 @@ static inline void __user *media_get_uptr(__u64 arg) #define MEDIA_IOC_ENUM_ENTITIES _IOWR('|', 0x01, struct media_entity_desc) #define MEDIA_IOC_ENUM_LINKS _IOWR('|', 0x02, struct media_links_enum) #define MEDIA_IOC_SETUP_LINK _IOWR('|', 0x03, struct media_link_desc) - -#if 0 /* Let's postpone it to Kernel 4.6 */ #define MEDIA_IOC_G_TOPOLOGY _IOWR('|', 0x04, struct media_v2_topology) -#endif #endif /* __LINUX_MEDIA_H */ diff --git a/include/uapi/linux/mroute6.h b/include/uapi/linux/mroute6.h index ce91215cf7e6..5062fb5751e1 100644 --- a/include/uapi/linux/mroute6.h +++ b/include/uapi/linux/mroute6.h @@ -1,6 +1,7 @@ #ifndef _UAPI__LINUX_MROUTE6_H #define _UAPI__LINUX_MROUTE6_H +#include <linux/kernel.h> #include <linux/types.h> #include <linux/sockios.h> @@ -46,14 +47,8 @@ typedef unsigned short mifi_t; typedef __u32 if_mask; #define NIFBITS (sizeof(if_mask) * 8) /* bits per mask */ -#if !defined(__KERNEL__) -#if !defined(DIV_ROUND_UP) -#define DIV_ROUND_UP(x,y) (((x) + ((y) - 1)) / (y)) -#endif -#endif - typedef struct if_set { - if_mask ifs_bits[DIV_ROUND_UP(IF_SETSIZE, NIFBITS)]; + if_mask ifs_bits[__KERNEL_DIV_ROUND_UP(IF_SETSIZE, NIFBITS)]; } if_set; #define IF_SET(n, p) ((p)->ifs_bits[(n)/NIFBITS] |= (1 << ((n) % NIFBITS))) diff --git a/include/uapi/linux/ndctl.h b/include/uapi/linux/ndctl.h index 5b4a4be06e2b..7cc28ab05b87 100644 --- a/include/uapi/linux/ndctl.h +++ b/include/uapi/linux/ndctl.h @@ -66,14 +66,18 @@ struct nd_cmd_ars_cap { __u64 length; __u32 status; __u32 max_ars_out; + __u32 clear_err_unit; + __u32 reserved; } __packed; struct nd_cmd_ars_start { __u64 address; __u64 length; __u16 type; - __u8 reserved[6]; + __u8 flags; + __u8 reserved[5]; __u32 status; + __u32 scrub_time; } __packed; struct nd_cmd_ars_status { @@ -81,16 +85,27 @@ struct nd_cmd_ars_status { __u32 out_length; __u64 address; __u64 length; + __u64 restart_address; + __u64 restart_length; __u16 type; + __u16 flags; __u32 num_records; struct nd_ars_record { __u32 handle; - __u32 flags; + __u32 reserved; __u64 err_address; __u64 length; } __packed records[0]; } __packed; +struct nd_cmd_clear_error { + __u64 address; + __u64 length; + __u32 status; + __u8 reserved[4]; + __u64 cleared; +} __packed; + enum { ND_CMD_IMPLEMENTED = 0, @@ -98,6 +113,7 @@ enum { ND_CMD_ARS_CAP = 1, ND_CMD_ARS_START = 2, ND_CMD_ARS_STATUS = 3, + ND_CMD_CLEAR_ERROR = 4, /* per-dimm commands */ ND_CMD_SMART = 1, @@ -122,6 +138,7 @@ static inline const char *nvdimm_bus_cmd_name(unsigned cmd) [ND_CMD_ARS_CAP] = "ars_cap", [ND_CMD_ARS_START] = "ars_start", [ND_CMD_ARS_STATUS] = "ars_status", + [ND_CMD_CLEAR_ERROR] = "clear_error", }; if (cmd < ARRAY_SIZE(names) && names[cmd]) @@ -180,6 +197,9 @@ static inline const char *nvdimm_cmd_name(unsigned cmd) #define ND_IOCTL_ARS_STATUS _IOWR(ND_IOCTL, ND_CMD_ARS_STATUS,\ struct nd_cmd_ars_status) +#define ND_IOCTL_CLEAR_ERROR _IOWR(ND_IOCTL, ND_CMD_CLEAR_ERROR,\ + struct nd_cmd_clear_error) + #define ND_DEVICE_DIMM 1 /* nd_dimm: container for "config data" */ #define ND_DEVICE_REGION_PMEM 2 /* nd_region: (parent of PMEM namespaces) */ #define ND_DEVICE_REGION_BLK 3 /* nd_region: (parent of BLK namespaces) */ diff --git a/include/uapi/linux/netconf.h b/include/uapi/linux/netconf.h index 23cbd34e4ac7..45dfad509c4d 100644 --- a/include/uapi/linux/netconf.h +++ b/include/uapi/linux/netconf.h @@ -19,6 +19,7 @@ enum { __NETCONFA_MAX }; #define NETCONFA_MAX (__NETCONFA_MAX - 1) +#define NETCONFA_ALL -1 #define NETCONFA_IFINDEX_ALL -1 #define NETCONFA_IFINDEX_DEFAULT -2 diff --git a/include/uapi/linux/netfilter/nf_conntrack_common.h b/include/uapi/linux/netfilter/nf_conntrack_common.h index 319f47128db8..6d074d14ee27 100644 --- a/include/uapi/linux/netfilter/nf_conntrack_common.h +++ b/include/uapi/linux/netfilter/nf_conntrack_common.h @@ -20,9 +20,15 @@ enum ip_conntrack_info { IP_CT_ESTABLISHED_REPLY = IP_CT_ESTABLISHED + IP_CT_IS_REPLY, IP_CT_RELATED_REPLY = IP_CT_RELATED + IP_CT_IS_REPLY, - IP_CT_NEW_REPLY = IP_CT_NEW + IP_CT_IS_REPLY, - /* Number of distinct IP_CT types (no NEW in reply dirn). */ - IP_CT_NUMBER = IP_CT_IS_REPLY * 2 - 1 + /* No NEW in reply direction. */ + + /* Number of distinct IP_CT types. */ + IP_CT_NUMBER, + + /* only for userspace compatibility */ +#ifndef __KERNEL__ + IP_CT_NEW_REPLY = IP_CT_NUMBER, +#endif }; #define NF_CT_STATE_INVALID_BIT (1 << 0) diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h index be41ffc128b8..eeffde196f80 100644 --- a/include/uapi/linux/netfilter/nf_tables.h +++ b/include/uapi/linux/netfilter/nf_tables.h @@ -681,6 +681,7 @@ enum nft_exthdr_attributes { * @NFT_META_IIFGROUP: packet input interface group * @NFT_META_OIFGROUP: packet output interface group * @NFT_META_CGROUP: socket control group (skb->sk->sk_classid) + * @NFT_META_PRANDOM: a 32bit pseudo-random number */ enum nft_meta_keys { NFT_META_LEN, @@ -707,6 +708,7 @@ enum nft_meta_keys { NFT_META_IIFGROUP, NFT_META_OIFGROUP, NFT_META_CGROUP, + NFT_META_PRANDOM, }; /** @@ -949,10 +951,14 @@ enum nft_nat_attributes { * enum nft_masq_attributes - nf_tables masquerade expression attributes * * @NFTA_MASQ_FLAGS: NAT flags (see NF_NAT_RANGE_* in linux/netfilter/nf_nat.h) (NLA_U32) + * @NFTA_MASQ_REG_PROTO_MIN: source register of proto range start (NLA_U32: nft_registers) + * @NFTA_MASQ_REG_PROTO_MAX: source register of proto range end (NLA_U32: nft_registers) */ enum nft_masq_attributes { NFTA_MASQ_UNSPEC, NFTA_MASQ_FLAGS, + NFTA_MASQ_REG_PROTO_MIN, + NFTA_MASQ_REG_PROTO_MAX, __NFTA_MASQ_MAX }; #define NFTA_MASQ_MAX (__NFTA_MASQ_MAX - 1) diff --git a/include/uapi/linux/netlink.h b/include/uapi/linux/netlink.h index f095155d8749..0dba4e4ed2be 100644 --- a/include/uapi/linux/netlink.h +++ b/include/uapi/linux/netlink.h @@ -107,8 +107,10 @@ struct nlmsgerr { #define NETLINK_PKTINFO 3 #define NETLINK_BROADCAST_ERROR 4 #define NETLINK_NO_ENOBUFS 5 +#ifndef __KERNEL__ #define NETLINK_RX_RING 6 #define NETLINK_TX_RING 7 +#endif #define NETLINK_LISTEN_ALL_NSID 8 #define NETLINK_LIST_MEMBERSHIPS 9 #define NETLINK_CAP_ACK 10 @@ -134,6 +136,7 @@ struct nl_mmap_hdr { __u32 nm_gid; }; +#ifndef __KERNEL__ enum nl_mmap_status { NL_MMAP_STATUS_UNUSED, NL_MMAP_STATUS_RESERVED, @@ -145,6 +148,7 @@ enum nl_mmap_status { #define NL_MMAP_MSG_ALIGNMENT NLMSG_ALIGNTO #define NL_MMAP_MSG_ALIGN(sz) __ALIGN_KERNEL(sz, NL_MMAP_MSG_ALIGNMENT) #define NL_MMAP_HDRLEN NL_MMAP_MSG_ALIGN(sizeof(struct nl_mmap_hdr)) +#endif #define NET_MAJOR 36 /* Major 36 is reserved for networking */ diff --git a/include/uapi/linux/netlink_diag.h b/include/uapi/linux/netlink_diag.h index f2159d30d1f5..d79399394b46 100644 --- a/include/uapi/linux/netlink_diag.h +++ b/include/uapi/linux/netlink_diag.h @@ -48,6 +48,8 @@ enum { #define NDIAG_SHOW_MEMINFO 0x00000001 /* show memory info of a socket */ #define NDIAG_SHOW_GROUPS 0x00000002 /* show groups of a netlink socket */ +#ifndef __KERNEL__ #define NDIAG_SHOW_RING_CFG 0x00000004 /* show ring configuration */ +#endif #endif diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index 5b7b5ebe7ca8..5a30a7563633 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -1727,6 +1727,8 @@ enum nl80211_commands { * underlying device supports these minimal RRM features: * %NL80211_FEATURE_DS_PARAM_SET_IE_IN_PROBES, * %NL80211_FEATURE_QUIET, + * Or, if global RRM is supported, see: + * %NL80211_EXT_FEATURE_RRM * If this flag is used, driver must add the Power Capabilities IE to the * association request. In addition, it must also set the RRM capability * flag in the association request's Capability Info field. @@ -1789,6 +1791,10 @@ enum nl80211_commands { * thus it must not specify the number of iterations, only the interval * between scans. The scan plans are executed sequentially. * Each scan plan is a nested attribute of &enum nl80211_sched_scan_plan. + * @NL80211_ATTR_PBSS: flag attribute. If set it means operate + * in a PBSS. Specified in %NL80211_CMD_CONNECT to request + * connecting to a PCP, and in %NL80211_CMD_START_AP to start + * a PCP instead of AP. Relevant for DMG networks only. * * @NUM_NL80211_ATTR: total number of nl80211_attrs available * @NL80211_ATTR_MAX: highest attribute number currently defined @@ -2164,6 +2170,8 @@ enum nl80211_attrs { NL80211_ATTR_MAX_SCAN_PLAN_ITERATIONS, NL80211_ATTR_SCHED_SCAN_PLANS, + NL80211_ATTR_PBSS, + /* add attributes here, update the policy in nl80211.c */ __NL80211_ATTR_AFTER_LAST, @@ -4396,12 +4404,18 @@ enum nl80211_feature_flags { /** * enum nl80211_ext_feature_index - bit index of extended features. * @NL80211_EXT_FEATURE_VHT_IBSS: This driver supports IBSS with VHT datarates. + * @NL80211_EXT_FEATURE_RRM: This driver supports RRM. When featured, user can + * can request to use RRM (see %NL80211_ATTR_USE_RRM) with + * %NL80211_CMD_ASSOCIATE and %NL80211_CMD_CONNECT requests, which will set + * the ASSOC_REQ_USE_RRM flag in the association request even if + * NL80211_FEATURE_QUIET is not advertized. * * @NUM_NL80211_EXT_FEATURES: number of extended features. * @MAX_NL80211_EXT_FEATURES: highest extended feature index. */ enum nl80211_ext_feature_index { NL80211_EXT_FEATURE_VHT_IBSS, + NL80211_EXT_FEATURE_RRM, /* add new features before the definition below */ NUM_NL80211_EXT_FEATURES, diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h index a27222d5b413..616d04761730 100644 --- a/include/uapi/linux/openvswitch.h +++ b/include/uapi/linux/openvswitch.h @@ -454,6 +454,14 @@ struct ovs_key_ct_labels { #define OVS_CS_F_REPLY_DIR 0x08 /* Flow is in the reply direction. */ #define OVS_CS_F_INVALID 0x10 /* Could not track connection. */ #define OVS_CS_F_TRACKED 0x20 /* Conntrack has occurred. */ +#define OVS_CS_F_SRC_NAT 0x40 /* Packet's source address/port was + * mangled by NAT. + */ +#define OVS_CS_F_DST_NAT 0x80 /* Packet's destination address/port + * was mangled by NAT. + */ + +#define OVS_CS_F_NAT_MASK (OVS_CS_F_SRC_NAT | OVS_CS_F_DST_NAT) /** * enum ovs_flow_attr - attributes for %OVS_FLOW_* commands. @@ -632,6 +640,8 @@ struct ovs_action_hash { * mask. For each bit set in the mask, the corresponding bit in the value is * copied to the connection tracking label field in the connection. * @OVS_CT_ATTR_HELPER: variable length string defining conntrack ALG. + * @OVS_CT_ATTR_NAT: Nested OVS_NAT_ATTR_* for performing L3 network address + * translation (NAT) on the packet. */ enum ovs_ct_attr { OVS_CT_ATTR_UNSPEC, @@ -641,12 +651,51 @@ enum ovs_ct_attr { OVS_CT_ATTR_LABELS, /* labels to associate with this connection. */ OVS_CT_ATTR_HELPER, /* netlink helper to assist detection of related connections. */ + OVS_CT_ATTR_NAT, /* Nested OVS_NAT_ATTR_* */ __OVS_CT_ATTR_MAX }; #define OVS_CT_ATTR_MAX (__OVS_CT_ATTR_MAX - 1) /** + * enum ovs_nat_attr - Attributes for %OVS_CT_ATTR_NAT. + * + * @OVS_NAT_ATTR_SRC: Flag for Source NAT (mangle source address/port). + * @OVS_NAT_ATTR_DST: Flag for Destination NAT (mangle destination + * address/port). Only one of (@OVS_NAT_ATTR_SRC, @OVS_NAT_ATTR_DST) may be + * specified. Effective only for packets for ct_state NEW connections. + * Packets of committed connections are mangled by the NAT action according to + * the committed NAT type regardless of the flags specified. As a corollary, a + * NAT action without a NAT type flag will only mangle packets of committed + * connections. The following NAT attributes only apply for NEW + * (non-committed) connections, and they may be included only when the CT + * action has the @OVS_CT_ATTR_COMMIT flag and either @OVS_NAT_ATTR_SRC or + * @OVS_NAT_ATTR_DST is also included. + * @OVS_NAT_ATTR_IP_MIN: struct in_addr or struct in6_addr + * @OVS_NAT_ATTR_IP_MAX: struct in_addr or struct in6_addr + * @OVS_NAT_ATTR_PROTO_MIN: u16 L4 protocol specific lower boundary (port) + * @OVS_NAT_ATTR_PROTO_MAX: u16 L4 protocol specific upper boundary (port) + * @OVS_NAT_ATTR_PERSISTENT: Flag for persistent IP mapping across reboots + * @OVS_NAT_ATTR_PROTO_HASH: Flag for pseudo random L4 port mapping (MD5) + * @OVS_NAT_ATTR_PROTO_RANDOM: Flag for fully randomized L4 port mapping + */ +enum ovs_nat_attr { + OVS_NAT_ATTR_UNSPEC, + OVS_NAT_ATTR_SRC, + OVS_NAT_ATTR_DST, + OVS_NAT_ATTR_IP_MIN, + OVS_NAT_ATTR_IP_MAX, + OVS_NAT_ATTR_PROTO_MIN, + OVS_NAT_ATTR_PROTO_MAX, + OVS_NAT_ATTR_PERSISTENT, + OVS_NAT_ATTR_PROTO_HASH, + OVS_NAT_ATTR_PROTO_RANDOM, + __OVS_NAT_ATTR_MAX, +}; + +#define OVS_NAT_ATTR_MAX (__OVS_NAT_ATTR_MAX - 1) + +/** * enum ovs_action_attr - Action types. * * @OVS_ACTION_ATTR_OUTPUT: Output packet to port. diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h index 439873775d49..c43c5f78b9c4 100644 --- a/include/uapi/linux/pkt_cls.h +++ b/include/uapi/linux/pkt_cls.h @@ -172,6 +172,7 @@ enum { TCA_U32_INDEV, TCA_U32_PCNT, TCA_U32_MARK, + TCA_U32_FLAGS, __TCA_U32_MAX }; @@ -416,6 +417,8 @@ enum { TCA_FLOWER_KEY_TCP_DST, /* be16 */ TCA_FLOWER_KEY_UDP_SRC, /* be16 */ TCA_FLOWER_KEY_UDP_DST, /* be16 */ + + TCA_FLOWER_FLAGS, __TCA_FLOWER_MAX, }; diff --git a/include/uapi/linux/ptp_clock.h b/include/uapi/linux/ptp_clock.h index f0b7bfe5da92..ac6dded80ffa 100644 --- a/include/uapi/linux/ptp_clock.h +++ b/include/uapi/linux/ptp_clock.h @@ -51,7 +51,9 @@ struct ptp_clock_caps { int n_per_out; /* Number of programmable periodic signals. */ int pps; /* Whether the clock supports a PPS callback. */ int n_pins; /* Number of input/output pins. */ - int rsv[14]; /* Reserved for future use. */ + /* Whether the clock supports precise system-device cross timestamps */ + int cross_timestamping; + int rsv[13]; /* Reserved for future use. */ }; struct ptp_extts_request { @@ -81,6 +83,13 @@ struct ptp_sys_offset { struct ptp_clock_time ts[2 * PTP_MAX_SAMPLES + 1]; }; +struct ptp_sys_offset_precise { + struct ptp_clock_time device; + struct ptp_clock_time sys_realtime; + struct ptp_clock_time sys_monoraw; + unsigned int rsv[4]; /* Reserved for future use. */ +}; + enum ptp_pin_function { PTP_PF_NONE, PTP_PF_EXTTS, @@ -124,6 +133,8 @@ struct ptp_pin_desc { #define PTP_SYS_OFFSET _IOW(PTP_CLK_MAGIC, 5, struct ptp_sys_offset) #define PTP_PIN_GETFUNC _IOWR(PTP_CLK_MAGIC, 6, struct ptp_pin_desc) #define PTP_PIN_SETFUNC _IOW(PTP_CLK_MAGIC, 7, struct ptp_pin_desc) +#define PTP_SYS_OFFSET_PRECISE \ + _IOWR(PTP_CLK_MAGIC, 8, struct ptp_sys_offset_precise) struct ptp_extts_event { struct ptp_clock_time t; /* Time event occured. */ diff --git a/include/uapi/linux/rfkill.h b/include/uapi/linux/rfkill.h index 058757f7a733..2e00dcebebd0 100644 --- a/include/uapi/linux/rfkill.h +++ b/include/uapi/linux/rfkill.h @@ -59,6 +59,8 @@ enum rfkill_type { * @RFKILL_OP_DEL: a device was removed * @RFKILL_OP_CHANGE: a device's state changed -- userspace changes one device * @RFKILL_OP_CHANGE_ALL: userspace changes all devices (of a type, or all) + * into a state, also updating the default state used for devices that + * are hot-plugged later. */ enum rfkill_operation { RFKILL_OP_ADD = 0, diff --git a/include/uapi/linux/serial_core.h b/include/uapi/linux/serial_core.h index 3e5d757407fb..e513a4ee369b 100644 --- a/include/uapi/linux/serial_core.h +++ b/include/uapi/linux/serial_core.h @@ -261,4 +261,7 @@ /* STM32 USART */ #define PORT_STM32 113 +/* MVEBU UART */ +#define PORT_MVEBU 114 + #endif /* _UAPILINUX_SERIAL_CORE_H */ diff --git a/include/uapi/linux/swab.h b/include/uapi/linux/swab.h index 0e011eb91b5d..3f10e5317b46 100644 --- a/include/uapi/linux/swab.h +++ b/include/uapi/linux/swab.h @@ -151,7 +151,7 @@ static inline __attribute_const__ __u32 __fswahb32(__u32 val) * __swab16p - return a byteswapped 16-bit value from a pointer * @p: pointer to a naturally-aligned 16-bit value */ -static inline __u16 __swab16p(const __u16 *p) +static __always_inline __u16 __swab16p(const __u16 *p) { #ifdef __arch_swab16p return __arch_swab16p(p); @@ -164,7 +164,7 @@ static inline __u16 __swab16p(const __u16 *p) * __swab32p - return a byteswapped 32-bit value from a pointer * @p: pointer to a naturally-aligned 32-bit value */ -static inline __u32 __swab32p(const __u32 *p) +static __always_inline __u32 __swab32p(const __u32 *p) { #ifdef __arch_swab32p return __arch_swab32p(p); @@ -177,7 +177,7 @@ static inline __u32 __swab32p(const __u32 *p) * __swab64p - return a byteswapped 64-bit value from a pointer * @p: pointer to a naturally-aligned 64-bit value */ -static inline __u64 __swab64p(const __u64 *p) +static __always_inline __u64 __swab64p(const __u64 *p) { #ifdef __arch_swab64p return __arch_swab64p(p); @@ -232,7 +232,7 @@ static inline void __swab16s(__u16 *p) * __swab32s - byteswap a 32-bit value in-place * @p: pointer to a naturally-aligned 32-bit value */ -static inline void __swab32s(__u32 *p) +static __always_inline void __swab32s(__u32 *p) { #ifdef __arch_swab32s __arch_swab32s(p); @@ -245,7 +245,7 @@ static inline void __swab32s(__u32 *p) * __swab64s - byteswap a 64-bit value in-place * @p: pointer to a naturally-aligned 64-bit value */ -static inline void __swab64s(__u64 *p) +static __always_inline void __swab64s(__u64 *p) { #ifdef __arch_swab64s __arch_swab64s(p); diff --git a/include/uapi/linux/tc_act/tc_ife.h b/include/uapi/linux/tc_act/tc_ife.h new file mode 100644 index 000000000000..d648ff66586f --- /dev/null +++ b/include/uapi/linux/tc_act/tc_ife.h @@ -0,0 +1,38 @@ +#ifndef __UAPI_TC_IFE_H +#define __UAPI_TC_IFE_H + +#include <linux/types.h> +#include <linux/pkt_cls.h> + +#define TCA_ACT_IFE 25 +/* Flag bits for now just encoding/decoding; mutually exclusive */ +#define IFE_ENCODE 1 +#define IFE_DECODE 0 + +struct tc_ife { + tc_gen; + __u16 flags; +}; + +/*XXX: We need to encode the total number of bytes consumed */ +enum { + TCA_IFE_UNSPEC, + TCA_IFE_PARMS, + TCA_IFE_TM, + TCA_IFE_DMAC, + TCA_IFE_SMAC, + TCA_IFE_TYPE, + TCA_IFE_METALST, + __TCA_IFE_MAX +}; +#define TCA_IFE_MAX (__TCA_IFE_MAX - 1) + +#define IFE_META_SKBMARK 1 +#define IFE_META_HASHID 2 +#define IFE_META_PRIO 3 +#define IFE_META_QMAP 4 +/*Can be overridden at runtime by module option*/ +#define __IFE_META_MAX 5 +#define IFE_META_MAX (__IFE_META_MAX - 1) + +#endif diff --git a/include/uapi/linux/tcp.h b/include/uapi/linux/tcp.h index 65a77b071e22..53e8e3fe6b1b 100644 --- a/include/uapi/linux/tcp.h +++ b/include/uapi/linux/tcp.h @@ -196,6 +196,11 @@ struct tcp_info { __u64 tcpi_bytes_received; /* RFC4898 tcpEStatsAppHCThruOctetsReceived */ __u32 tcpi_segs_out; /* RFC4898 tcpEStatsPerfSegsOut */ __u32 tcpi_segs_in; /* RFC4898 tcpEStatsPerfSegsIn */ + + __u32 tcpi_notsent_bytes; + __u32 tcpi_min_rtt; + __u32 tcpi_data_segs_in; /* RFC4898 tcpEStatsDataSegsIn */ + __u32 tcpi_data_segs_out; /* RFC4898 tcpEStatsDataSegsOut */ }; /* for TCP_MD5SIG socket option */ diff --git a/include/uapi/linux/usb/ch11.h b/include/uapi/linux/usb/ch11.h index 331499d597fa..361297e96f58 100644 --- a/include/uapi/linux/usb/ch11.h +++ b/include/uapi/linux/usb/ch11.h @@ -30,6 +30,14 @@ #define USB_RT_PORT (USB_TYPE_CLASS | USB_RECIP_OTHER) /* + * Port status type for GetPortStatus requests added in USB 3.1 + * See USB 3.1 spec Table 10-12 + */ +#define HUB_PORT_STATUS 0 +#define HUB_PORT_PD_STATUS 1 +#define HUB_EXT_PORT_STATUS 2 + +/* * Hub class requests * See USB 2.0 spec Table 11-16 */ @@ -97,10 +105,13 @@ /* * Hub Status and Hub Change results * See USB 2.0 spec Table 11-19 and Table 11-20 + * USB 3.1 extends the port status request and may return 4 additional bytes. + * See USB 3.1 spec section 10.16.2.6 Table 10-12 and 10-15 */ struct usb_port_status { __le16 wPortStatus; __le16 wPortChange; + __le32 dwExtPortStatus; } __attribute__ ((packed)); /* @@ -173,6 +184,16 @@ struct usb_port_status { #define USB_PORT_STAT_C_CONFIG_ERROR 0x0080 /* + * USB 3.1 dwExtPortStatus field masks + * See USB 3.1 spec 10.16.2.6.3 Table 10-15 + */ + +#define USB_EXT_PORT_STAT_RX_SPEED_ID 0x0000000f +#define USB_EXT_PORT_STAT_TX_SPEED_ID 0x000000f0 +#define USB_EXT_PORT_STAT_RX_LANES 0x00000f00 +#define USB_EXT_PORT_STAT_TX_LANES 0x0000f000 + +/* * wHubCharacteristics (masks) * See USB 2.0 spec Table 11-13, offset 3 */ diff --git a/include/uapi/linux/usb/ch9.h b/include/uapi/linux/usb/ch9.h index 4338eb7b09b3..06d6c6228a7a 100644 --- a/include/uapi/linux/usb/ch9.h +++ b/include/uapi/linux/usb/ch9.h @@ -234,6 +234,8 @@ struct usb_ctrlrequest { #define USB_DT_PIPE_USAGE 0x24 /* From the USB 3.0 spec */ #define USB_DT_SS_ENDPOINT_COMP 0x30 +/* From the USB 3.1 spec */ +#define USB_DT_SSP_ISOC_ENDPOINT_COMP 0x31 /* Conventional codes for class-specific descriptors. The convention is * defined in the USB "Common Class" Spec (3.11). Individual class specs @@ -613,6 +615,20 @@ static inline int usb_endpoint_interrupt_type( /*-------------------------------------------------------------------------*/ +/* USB_DT_SSP_ISOC_ENDPOINT_COMP: SuperSpeedPlus Isochronous Endpoint Companion + * descriptor + */ +struct usb_ssp_isoc_ep_comp_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __le16 wReseved; + __le32 dwBytesPerInterval; +} __attribute__ ((packed)); + +#define USB_DT_SSP_ISOC_EP_COMP_SIZE 8 + +/*-------------------------------------------------------------------------*/ + /* USB_DT_SS_ENDPOINT_COMP: SuperSpeed Endpoint Companion descriptor */ struct usb_ss_ep_comp_descriptor { __u8 bLength; @@ -646,6 +662,8 @@ usb_ss_max_streams(const struct usb_ss_ep_comp_descriptor *comp) /* Bits 1:0 of bmAttributes if this is an isoc endpoint */ #define USB_SS_MULT(p) (1 + ((p) & 0x3)) +/* Bit 7 of bmAttributes if a SSP isoc endpoint companion descriptor exists */ +#define USB_SS_SSP_ISOC_COMP(p) ((p) & (1 << 7)) /*-------------------------------------------------------------------------*/ @@ -690,6 +708,7 @@ struct usb_otg20_descriptor { #define USB_OTG_HNP (1 << 1) /* swap host/device roles */ #define USB_OTG_ADP (1 << 2) /* support ADP */ +#define OTG_STS_SELECTOR 0xF000 /* OTG status selector */ /*-------------------------------------------------------------------------*/ /* USB_DT_DEBUG: for special highspeed devices, replacing serial console */ @@ -894,6 +913,22 @@ struct usb_ssp_cap_descriptor { #define USB_SSP_SUBLINK_SPEED_LSM (0xff << 16) /* Lanespeed mantissa */ } __attribute__((packed)); +/* + * Precision time measurement capability descriptor: advertised by devices and + * hubs that support PTM + */ +#define USB_PTM_CAP_TYPE 0xb +struct usb_ptm_cap_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDevCapabilityType; +} __attribute__((packed)); + +/* + * The size of the descriptor for the Sublink Speed Attribute Count + * (SSAC) specified in bmAttributes[4:0]. + */ +#define USB_DT_USB_SSP_CAP_SIZE(ssac) (16 + ssac * 4) /*-------------------------------------------------------------------------*/ @@ -954,6 +989,7 @@ enum usb_device_speed { USB_SPEED_HIGH, /* usb 2.0 */ USB_SPEED_WIRELESS, /* wireless (usb 2.5) */ USB_SPEED_SUPER, /* usb 3.0 */ + USB_SPEED_SUPER_PLUS, /* usb 3.1 */ }; diff --git a/include/uapi/linux/usb/tmc.h b/include/uapi/linux/usb/tmc.h index c045ae12556c..2e59d9c50b8d 100644 --- a/include/uapi/linux/usb/tmc.h +++ b/include/uapi/linux/usb/tmc.h @@ -2,12 +2,14 @@ * Copyright (C) 2007 Stefan Kopp, Gechingen, Germany * Copyright (C) 2008 Novell, Inc. * Copyright (C) 2008 Greg Kroah-Hartman <gregkh@suse.de> + * Copyright (C) 2015 Dave Penkler <dpenkler@gmail.com> * * This file holds USB constants defined by the USB Device Class - * Definition for Test and Measurement devices published by the USB-IF. + * and USB488 Subclass Definitions for Test and Measurement devices + * published by the USB-IF. * - * It also has the ioctl definitions for the usbtmc kernel driver that - * userspace needs to know about. + * It also has the ioctl and capability definitions for the + * usbtmc kernel driver that userspace needs to know about. */ #ifndef __LINUX_USB_TMC_H @@ -30,6 +32,10 @@ #define USBTMC_REQUEST_CHECK_CLEAR_STATUS 6 #define USBTMC_REQUEST_GET_CAPABILITIES 7 #define USBTMC_REQUEST_INDICATOR_PULSE 64 +#define USBTMC488_REQUEST_READ_STATUS_BYTE 128 +#define USBTMC488_REQUEST_REN_CONTROL 160 +#define USBTMC488_REQUEST_GOTO_LOCAL 161 +#define USBTMC488_REQUEST_LOCAL_LOCKOUT 162 /* Request values for USBTMC driver's ioctl entry point */ #define USBTMC_IOC_NR 91 @@ -39,5 +45,22 @@ #define USBTMC_IOCTL_ABORT_BULK_IN _IO(USBTMC_IOC_NR, 4) #define USBTMC_IOCTL_CLEAR_OUT_HALT _IO(USBTMC_IOC_NR, 6) #define USBTMC_IOCTL_CLEAR_IN_HALT _IO(USBTMC_IOC_NR, 7) +#define USBTMC488_IOCTL_GET_CAPS _IOR(USBTMC_IOC_NR, 17, unsigned char) +#define USBTMC488_IOCTL_READ_STB _IOR(USBTMC_IOC_NR, 18, unsigned char) +#define USBTMC488_IOCTL_REN_CONTROL _IOW(USBTMC_IOC_NR, 19, unsigned char) +#define USBTMC488_IOCTL_GOTO_LOCAL _IO(USBTMC_IOC_NR, 20) +#define USBTMC488_IOCTL_LOCAL_LOCKOUT _IO(USBTMC_IOC_NR, 21) + +/* Driver encoded usb488 capabilities */ +#define USBTMC488_CAPABILITY_TRIGGER 1 +#define USBTMC488_CAPABILITY_SIMPLE 2 +#define USBTMC488_CAPABILITY_REN_CONTROL 2 +#define USBTMC488_CAPABILITY_GOTO_LOCAL 2 +#define USBTMC488_CAPABILITY_LOCAL_LOCKOUT 2 +#define USBTMC488_CAPABILITY_488_DOT_2 4 +#define USBTMC488_CAPABILITY_DT1 16 +#define USBTMC488_CAPABILITY_RL1 32 +#define USBTMC488_CAPABILITY_SR1 64 +#define USBTMC488_CAPABILITY_FULL_SCPI 128 #endif diff --git a/include/uapi/linux/usbdevice_fs.h b/include/uapi/linux/usbdevice_fs.h index 019ba1e0799a..a8653a6f40df 100644 --- a/include/uapi/linux/usbdevice_fs.h +++ b/include/uapi/linux/usbdevice_fs.h @@ -134,6 +134,8 @@ struct usbdevfs_hub_portinfo { #define USBDEVFS_CAP_NO_PACKET_SIZE_LIM 0x04 #define USBDEVFS_CAP_BULK_SCATTER_GATHER 0x08 #define USBDEVFS_CAP_REAP_AFTER_DISCONNECT 0x10 +#define USBDEVFS_CAP_MMAP 0x20 +#define USBDEVFS_CAP_DROP_PRIVILEGES 0x40 /* USBDEVFS_DISCONNECT_CLAIM flags & struct */ @@ -187,5 +189,6 @@ struct usbdevfs_streams { #define USBDEVFS_DISCONNECT_CLAIM _IOR('U', 27, struct usbdevfs_disconnect_claim) #define USBDEVFS_ALLOC_STREAMS _IOR('U', 28, struct usbdevfs_streams) #define USBDEVFS_FREE_STREAMS _IOR('U', 29, struct usbdevfs_streams) +#define USBDEVFS_DROP_PRIVILEGES _IOW('U', 30, __u32) #endif /* _UAPI_LINUX_USBDEVICE_FS_H */ diff --git a/include/uapi/linux/v4l2-common.h b/include/uapi/linux/v4l2-common.h index 15273987093e..5b3f685a2d50 100644 --- a/include/uapi/linux/v4l2-common.h +++ b/include/uapi/linux/v4l2-common.h @@ -10,19 +10,43 @@ * Copyright (C) 2012 Nokia Corporation * Contact: Sakari Ailus <sakari.ailus@iki.fi> * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * version 2 as published by the Free Software Foundation. + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA - * 02110-1301 USA + * Alternatively you can redistribute this file under the terms of the + * BSD license as stated below: + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * 3. The names of its contributors may not be used to endorse or promote + * products derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED + * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h index 2d225bcdb831..b6a357a5f053 100644 --- a/include/uapi/linux/v4l2-controls.h +++ b/include/uapi/linux/v4l2-controls.h @@ -390,6 +390,7 @@ enum v4l2_mpeg_video_multi_slice_mode { #define V4L2_CID_MPEG_VIDEO_REPEAT_SEQ_HEADER (V4L2_CID_MPEG_BASE+226) #define V4L2_CID_MPEG_VIDEO_MV_H_SEARCH_RANGE (V4L2_CID_MPEG_BASE+227) #define V4L2_CID_MPEG_VIDEO_MV_V_SEARCH_RANGE (V4L2_CID_MPEG_BASE+228) +#define V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME (V4L2_CID_MPEG_BASE+229) #define V4L2_CID_MPEG_VIDEO_H263_I_FRAME_QP (V4L2_CID_MPEG_BASE+300) #define V4L2_CID_MPEG_VIDEO_H263_P_FRAME_QP (V4L2_CID_MPEG_BASE+301) @@ -912,8 +913,18 @@ enum v4l2_dv_rgb_range { V4L2_DV_RGB_RANGE_FULL = 2, }; +#define V4L2_CID_DV_TX_IT_CONTENT_TYPE (V4L2_CID_DV_CLASS_BASE + 6) +enum v4l2_dv_it_content_type { + V4L2_DV_IT_CONTENT_TYPE_GRAPHICS = 0, + V4L2_DV_IT_CONTENT_TYPE_PHOTO = 1, + V4L2_DV_IT_CONTENT_TYPE_CINEMA = 2, + V4L2_DV_IT_CONTENT_TYPE_GAME = 3, + V4L2_DV_IT_CONTENT_TYPE_NO_ITC = 4, +}; + #define V4L2_CID_DV_RX_POWER_PRESENT (V4L2_CID_DV_CLASS_BASE + 100) #define V4L2_CID_DV_RX_RGB_RANGE (V4L2_CID_DV_CLASS_BASE + 101) +#define V4L2_CID_DV_RX_IT_CONTENT_TYPE (V4L2_CID_DV_CLASS_BASE + 102) #define V4L2_CID_FM_RX_CLASS_BASE (V4L2_CTRL_CLASS_FM_RX | 0x900) #define V4L2_CID_FM_RX_CLASS (V4L2_CTRL_CLASS_FM_RX | 1) diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h index 7d7a4c6f2090..255a2113f53c 100644 --- a/include/uapi/linux/vfio.h +++ b/include/uapi/linux/vfio.h @@ -59,6 +59,33 @@ #define VFIO_TYPE (';') #define VFIO_BASE 100 +/* + * For extension of INFO ioctls, VFIO makes use of a capability chain + * designed after PCI/e capabilities. A flag bit indicates whether + * this capability chain is supported and a field defined in the fixed + * structure defines the offset of the first capability in the chain. + * This field is only valid when the corresponding bit in the flags + * bitmap is set. This offset field is relative to the start of the + * INFO buffer, as is the next field within each capability header. + * The id within the header is a shared address space per INFO ioctl, + * while the version field is specific to the capability id. The + * contents following the header are specific to the capability id. + */ +struct vfio_info_cap_header { + __u16 id; /* Identifies capability */ + __u16 version; /* Version specific to the capability ID */ + __u32 next; /* Offset of next capability */ +}; + +/* + * Callers of INFO ioctls passing insufficiently sized buffers will see + * the capability chain flag bit set, a zero value for the first capability + * offset (if available within the provided argsz), and argsz will be + * updated to report the necessary buffer size. For compatibility, the + * INFO ioctl will not report error in this case, but the capability chain + * will not be available. + */ + /* -------- IOCTLs for VFIO file descriptor (/dev/vfio/vfio) -------- */ /** @@ -194,13 +221,73 @@ struct vfio_region_info { #define VFIO_REGION_INFO_FLAG_READ (1 << 0) /* Region supports read */ #define VFIO_REGION_INFO_FLAG_WRITE (1 << 1) /* Region supports write */ #define VFIO_REGION_INFO_FLAG_MMAP (1 << 2) /* Region supports mmap */ +#define VFIO_REGION_INFO_FLAG_CAPS (1 << 3) /* Info supports caps */ __u32 index; /* Region index */ - __u32 resv; /* Reserved for alignment */ + __u32 cap_offset; /* Offset within info struct of first cap */ __u64 size; /* Region size (bytes) */ __u64 offset; /* Region offset from start of device fd */ }; #define VFIO_DEVICE_GET_REGION_INFO _IO(VFIO_TYPE, VFIO_BASE + 8) +/* + * The sparse mmap capability allows finer granularity of specifying areas + * within a region with mmap support. When specified, the user should only + * mmap the offset ranges specified by the areas array. mmaps outside of the + * areas specified may fail (such as the range covering a PCI MSI-X table) or + * may result in improper device behavior. + * + * The structures below define version 1 of this capability. + */ +#define VFIO_REGION_INFO_CAP_SPARSE_MMAP 1 + +struct vfio_region_sparse_mmap_area { + __u64 offset; /* Offset of mmap'able area within region */ + __u64 size; /* Size of mmap'able area */ +}; + +struct vfio_region_info_cap_sparse_mmap { + struct vfio_info_cap_header header; + __u32 nr_areas; + __u32 reserved; + struct vfio_region_sparse_mmap_area areas[]; +}; + +/* + * The device specific type capability allows regions unique to a specific + * device or class of devices to be exposed. This helps solve the problem for + * vfio bus drivers of defining which region indexes correspond to which region + * on the device, without needing to resort to static indexes, as done by + * vfio-pci. For instance, if we were to go back in time, we might remove + * VFIO_PCI_VGA_REGION_INDEX and let vfio-pci simply define that all indexes + * greater than or equal to VFIO_PCI_NUM_REGIONS are device specific and we'd + * make a "VGA" device specific type to describe the VGA access space. This + * means that non-VGA devices wouldn't need to waste this index, and thus the + * address space associated with it due to implementation of device file + * descriptor offsets in vfio-pci. + * + * The current implementation is now part of the user ABI, so we can't use this + * for VGA, but there are other upcoming use cases, such as opregions for Intel + * IGD devices and framebuffers for vGPU devices. We missed VGA, but we'll + * use this for future additions. + * + * The structure below defines version 1 of this capability. + */ +#define VFIO_REGION_INFO_CAP_TYPE 2 + +struct vfio_region_info_cap_type { + struct vfio_info_cap_header header; + __u32 type; /* global per bus driver */ + __u32 subtype; /* type specific */ +}; + +#define VFIO_REGION_TYPE_PCI_VENDOR_TYPE (1 << 31) +#define VFIO_REGION_TYPE_PCI_VENDOR_MASK (0xffff) + +/* 8086 Vendor sub-types */ +#define VFIO_REGION_SUBTYPE_INTEL_IGD_OPREGION (1) +#define VFIO_REGION_SUBTYPE_INTEL_IGD_HOST_CFG (2) +#define VFIO_REGION_SUBTYPE_INTEL_IGD_LPC_CFG (3) + /** * VFIO_DEVICE_GET_IRQ_INFO - _IOWR(VFIO_TYPE, VFIO_BASE + 9, * struct vfio_irq_info) @@ -336,7 +423,8 @@ enum { * between described ranges are unimplemented. */ VFIO_PCI_VGA_REGION_INDEX, - VFIO_PCI_NUM_REGIONS + VFIO_PCI_NUM_REGIONS = 9 /* Fixed user ABI, region indexes >=9 use */ + /* device specific cap to define content. */ }; enum { diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h index 14cd5ebfee6d..e895975c5b0e 100644 --- a/include/uapi/linux/videodev2.h +++ b/include/uapi/linux/videodev2.h @@ -546,6 +546,10 @@ struct v4l2_pix_format { /* three non contiguous planes - Y, Cb, Cr */ #define V4L2_PIX_FMT_YUV420M v4l2_fourcc('Y', 'M', '1', '2') /* 12 YUV420 planar */ #define V4L2_PIX_FMT_YVU420M v4l2_fourcc('Y', 'M', '2', '1') /* 12 YVU420 planar */ +#define V4L2_PIX_FMT_YUV422M v4l2_fourcc('Y', 'M', '1', '6') /* 16 YUV422 planar */ +#define V4L2_PIX_FMT_YVU422M v4l2_fourcc('Y', 'M', '6', '1') /* 16 YVU422 planar */ +#define V4L2_PIX_FMT_YUV444M v4l2_fourcc('Y', 'M', '2', '4') /* 24 YUV444 planar */ +#define V4L2_PIX_FMT_YVU444M v4l2_fourcc('Y', 'M', '4', '2') /* 24 YVU444 planar */ /* Bayer formats - see http://www.siliconimaging.com/RGB%20Bayer.htm */ #define V4L2_PIX_FMT_SBGGR8 v4l2_fourcc('B', 'A', '8', '1') /* 8 BGBG.. GRGR.. */ @@ -621,6 +625,9 @@ struct v4l2_pix_format { #define V4L2_PIX_FMT_JPGL v4l2_fourcc('J', 'P', 'G', 'L') /* JPEG-Lite */ #define V4L2_PIX_FMT_SE401 v4l2_fourcc('S', '4', '0', '1') /* se401 janggu compressed rgb */ #define V4L2_PIX_FMT_S5C_UYVY_JPG v4l2_fourcc('S', '5', 'C', 'I') /* S5C73M3 interleaved UYVY/JPEG */ +#define V4L2_PIX_FMT_Y8I v4l2_fourcc('Y', '8', 'I', ' ') /* Greyscale 8-bit L/R interleaved */ +#define V4L2_PIX_FMT_Y12I v4l2_fourcc('Y', '1', '2', 'I') /* Greyscale 12-bit L/R interleaved */ +#define V4L2_PIX_FMT_Z16 v4l2_fourcc('Z', '1', '6', ' ') /* Depth data 16-bit */ /* SDR formats - used only for Software Defined Radio devices */ #define V4L2_SDR_FMT_CU8 v4l2_fourcc('C', 'U', '0', '8') /* IQ u8 */ diff --git a/include/uapi/linux/virtio_balloon.h b/include/uapi/linux/virtio_balloon.h index d7f1cbc3766c..343d7ddefe04 100644 --- a/include/uapi/linux/virtio_balloon.h +++ b/include/uapi/linux/virtio_balloon.h @@ -51,7 +51,8 @@ struct virtio_balloon_config { #define VIRTIO_BALLOON_S_MINFLT 3 /* Number of minor faults */ #define VIRTIO_BALLOON_S_MEMFREE 4 /* Total amount of free memory */ #define VIRTIO_BALLOON_S_MEMTOT 5 /* Total amount of memory */ -#define VIRTIO_BALLOON_S_NR 6 +#define VIRTIO_BALLOON_S_AVAIL 6 /* Available memory as in /proc */ +#define VIRTIO_BALLOON_S_NR 7 /* * Memory statistics structure. diff --git a/include/uapi/rdma/rdma_netlink.h b/include/uapi/rdma/rdma_netlink.h index c19a5dc1531a..f7d7b6fec935 100644 --- a/include/uapi/rdma/rdma_netlink.h +++ b/include/uapi/rdma/rdma_netlink.h @@ -5,8 +5,8 @@ enum { RDMA_NL_RDMA_CM = 1, - RDMA_NL_NES, - RDMA_NL_C4IW, + RDMA_NL_IWCM, + RDMA_NL_RSVD, RDMA_NL_LS, /* RDMA Local Services */ RDMA_NL_NUM_CLIENTS }; diff --git a/include/uapi/sound/asequencer.h b/include/uapi/sound/asequencer.h index 5a5fa4956ebd..7b7659a79ac4 100644 --- a/include/uapi/sound/asequencer.h +++ b/include/uapi/sound/asequencer.h @@ -25,7 +25,7 @@ #include <sound/asound.h> /** version of the sequencer */ -#define SNDRV_SEQ_VERSION SNDRV_PROTOCOL_VERSION (1, 0, 1) +#define SNDRV_SEQ_VERSION SNDRV_PROTOCOL_VERSION(1, 0, 2) /** * definition of sequencer event types @@ -357,7 +357,9 @@ struct snd_seq_client_info { unsigned char event_filter[32]; /* event filter bitmap */ int num_ports; /* RO: number of ports */ int event_lost; /* number of lost events */ - char reserved[64]; /* for future use */ + int card; /* RO: card number[kernel] */ + int pid; /* RO: pid[user] */ + char reserved[56]; /* for future use */ }; @@ -594,14 +596,8 @@ struct snd_seq_query_subs { #define SNDRV_SEQ_IOCTL_GET_QUEUE_STATUS _IOWR('S', 0x40, struct snd_seq_queue_status) #define SNDRV_SEQ_IOCTL_GET_QUEUE_TEMPO _IOWR('S', 0x41, struct snd_seq_queue_tempo) #define SNDRV_SEQ_IOCTL_SET_QUEUE_TEMPO _IOW ('S', 0x42, struct snd_seq_queue_tempo) -#define SNDRV_SEQ_IOCTL_GET_QUEUE_OWNER _IOWR('S', 0x43, struct snd_seq_queue_owner) -#define SNDRV_SEQ_IOCTL_SET_QUEUE_OWNER _IOW ('S', 0x44, struct snd_seq_queue_owner) #define SNDRV_SEQ_IOCTL_GET_QUEUE_TIMER _IOWR('S', 0x45, struct snd_seq_queue_timer) #define SNDRV_SEQ_IOCTL_SET_QUEUE_TIMER _IOW ('S', 0x46, struct snd_seq_queue_timer) -/* XXX -#define SNDRV_SEQ_IOCTL_GET_QUEUE_SYNC _IOWR('S', 0x53, struct snd_seq_queue_sync) -#define SNDRV_SEQ_IOCTL_SET_QUEUE_SYNC _IOW ('S', 0x54, struct snd_seq_queue_sync) -*/ #define SNDRV_SEQ_IOCTL_GET_QUEUE_CLIENT _IOWR('S', 0x49, struct snd_seq_queue_client) #define SNDRV_SEQ_IOCTL_SET_QUEUE_CLIENT _IOW ('S', 0x4a, struct snd_seq_queue_client) #define SNDRV_SEQ_IOCTL_GET_CLIENT_POOL _IOWR('S', 0x4b, struct snd_seq_client_pool) diff --git a/include/uapi/sound/asound.h b/include/uapi/sound/asound.h index a82108e5d1c0..67bf49d8c944 100644 --- a/include/uapi/sound/asound.h +++ b/include/uapi/sound/asound.h @@ -23,7 +23,11 @@ #ifndef _UAPI__SOUND_ASOUND_H #define _UAPI__SOUND_ASOUND_H +#if defined(__KERNEL__) || defined(__linux__) #include <linux/types.h> +#else +#include <sys/ioctl.h> +#endif #ifndef __KERNEL__ #include <stdlib.h> diff --git a/include/xen/interface/io/netif.h b/include/xen/interface/io/netif.h index 252ffd4801ef..4f20dbc42910 100644 --- a/include/xen/interface/io/netif.h +++ b/include/xen/interface/io/netif.h @@ -1,16 +1,34 @@ /****************************************************************************** - * netif.h + * xen_netif.h * * Unified network-device I/O interface for Xen guest OSes. * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * * Copyright (c) 2003-2004, Keir Fraser */ -#ifndef __XEN_PUBLIC_IO_NETIF_H__ -#define __XEN_PUBLIC_IO_NETIF_H__ +#ifndef __XEN_PUBLIC_IO_XEN_NETIF_H__ +#define __XEN_PUBLIC_IO_XEN_NETIF_H__ -#include <xen/interface/io/ring.h> -#include <xen/interface/grant_table.h> +#include "ring.h" +#include "../grant_table.h" /* * Older implementation of Xen network frontend / backend has an @@ -38,10 +56,10 @@ * that it cannot safely queue packets (as it may not be kicked to send them). */ - /* +/* * "feature-split-event-channels" is introduced to separate guest TX - * and RX notificaion. Backend either doesn't support this feature or - * advertise it via xenstore as 0 (disabled) or 1 (enabled). + * and RX notification. Backend either doesn't support this feature or + * advertises it via xenstore as 0 (disabled) or 1 (enabled). * * To make use of this feature, frontend should allocate two event * channels for TX and RX, advertise them to backend as @@ -118,151 +136,804 @@ */ /* - * This is the 'wire' format for packets: - * Request 1: xen_netif_tx_request -- XEN_NETTXF_* (any flags) - * [Request 2: xen_netif_extra_info] (only if request 1 has XEN_NETTXF_extra_info) - * [Request 3: xen_netif_extra_info] (only if request 2 has XEN_NETIF_EXTRA_MORE) - * Request 4: xen_netif_tx_request -- XEN_NETTXF_more_data - * Request 5: xen_netif_tx_request -- XEN_NETTXF_more_data + * "feature-multicast-control" and "feature-dynamic-multicast-control" + * advertise the capability to filter ethernet multicast packets in the + * backend. If the frontend wishes to take advantage of this feature then + * it may set "request-multicast-control". If the backend only advertises + * "feature-multicast-control" then "request-multicast-control" must be set + * before the frontend moves into the connected state. The backend will + * sample the value on this state transition and any subsequent change in + * value will have no effect. However, if the backend also advertises + * "feature-dynamic-multicast-control" then "request-multicast-control" + * may be set by the frontend at any time. In this case, the backend will + * watch the value and re-sample on watch events. + * + * If the sampled value of "request-multicast-control" is set then the + * backend transmit side should no longer flood multicast packets to the + * frontend, it should instead drop any multicast packet that does not + * match in a filter list. + * The list is amended by the frontend by sending dummy transmit requests + * containing XEN_NETIF_EXTRA_TYPE_MCAST_{ADD,DEL} extra-info fragments as + * specified below. + * Note that the filter list may be amended even if the sampled value of + * "request-multicast-control" is not set, however the filter should only + * be applied if it is set. + */ + +/* + * Control ring + * ============ + * + * Some features, such as hashing (detailed below), require a + * significant amount of out-of-band data to be passed from frontend to + * backend. Use of xenstore is not suitable for large quantities of data + * because of quota limitations and so a dedicated 'control ring' is used. + * The ability of the backend to use a control ring is advertised by + * setting: + * + * /local/domain/X/backend/<domid>/<vif>/feature-ctrl-ring = "1" + * + * The frontend provides a control ring to the backend by setting: + * + * /local/domain/<domid>/device/vif/<vif>/ctrl-ring-ref = <gref> + * /local/domain/<domid>/device/vif/<vif>/event-channel-ctrl = <port> + * + * where <gref> is the grant reference of the shared page used to + * implement the control ring and <port> is an event channel to be used + * as a mailbox interrupt. These keys must be set before the frontend + * moves into the connected state. + * + * The control ring uses a fixed request/response message size and is + * balanced (i.e. one request to one response), so operationally it is much + * the same as a transmit or receive ring. + * Note that there is no requirement that responses are issued in the same + * order as requests. + */ + +/* + * Hash types + * ========== + * + * For the purposes of the definitions below, 'Packet[]' is an array of + * octets containing an IP packet without options, 'Array[X..Y]' means a + * sub-array of 'Array' containing bytes X thru Y inclusive, and '+' is + * used to indicate concatenation of arrays. + */ + +/* + * A hash calculated over an IP version 4 header as follows: + * + * Buffer[0..8] = Packet[12..15] (source address) + + * Packet[16..19] (destination address) + * + * Result = Hash(Buffer, 8) + */ +#define _XEN_NETIF_CTRL_HASH_TYPE_IPV4 0 +#define XEN_NETIF_CTRL_HASH_TYPE_IPV4 \ + (1 << _XEN_NETIF_CTRL_HASH_TYPE_IPV4) + +/* + * A hash calculated over an IP version 4 header and TCP header as + * follows: + * + * Buffer[0..12] = Packet[12..15] (source address) + + * Packet[16..19] (destination address) + + * Packet[20..21] (source port) + + * Packet[22..23] (destination port) + * + * Result = Hash(Buffer, 12) + */ +#define _XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP 1 +#define XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP \ + (1 << _XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP) + +/* + * A hash calculated over an IP version 6 header as follows: + * + * Buffer[0..32] = Packet[8..23] (source address ) + + * Packet[24..39] (destination address) + * + * Result = Hash(Buffer, 32) + */ +#define _XEN_NETIF_CTRL_HASH_TYPE_IPV6 2 +#define XEN_NETIF_CTRL_HASH_TYPE_IPV6 \ + (1 << _XEN_NETIF_CTRL_HASH_TYPE_IPV6) + +/* + * A hash calculated over an IP version 6 header and TCP header as + * follows: + * + * Buffer[0..36] = Packet[8..23] (source address) + + * Packet[24..39] (destination address) + + * Packet[40..41] (source port) + + * Packet[42..43] (destination port) + * + * Result = Hash(Buffer, 36) + */ +#define _XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP 3 +#define XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP \ + (1 << _XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP) + +/* + * Hash algorithms + * =============== + */ + +#define XEN_NETIF_CTRL_HASH_ALGORITHM_NONE 0 + +/* + * Toeplitz hash: + */ + +#define XEN_NETIF_CTRL_HASH_ALGORITHM_TOEPLITZ 1 + +/* + * This algorithm uses a 'key' as well as the data buffer itself. + * (Buffer[] and Key[] are treated as shift-registers where the MSB of + * Buffer/Key[0] is considered 'left-most' and the LSB of Buffer/Key[N-1] + * is the 'right-most'). + * + * Value = 0 + * For number of bits in Buffer[] + * If (left-most bit of Buffer[] is 1) + * Value ^= left-most 32 bits of Key[] + * Key[] << 1 + * Buffer[] << 1 + * + * The code below is provided for convenience where an operating system + * does not already provide an implementation. + */ +#ifdef XEN_NETIF_DEFINE_TOEPLITZ +static uint32_t xen_netif_toeplitz_hash(const uint8_t *key, + unsigned int keylen, + const uint8_t *buf, unsigned int buflen) +{ + unsigned int keyi, bufi; + uint64_t prefix = 0; + uint64_t hash = 0; + + /* Pre-load prefix with the first 8 bytes of the key */ + for (keyi = 0; keyi < 8; keyi++) { + prefix <<= 8; + prefix |= (keyi < keylen) ? key[keyi] : 0; + } + + for (bufi = 0; bufi < buflen; bufi++) { + uint8_t byte = buf[bufi]; + unsigned int bit; + + for (bit = 0; bit < 8; bit++) { + if (byte & 0x80) + hash ^= prefix; + prefix <<= 1; + byte <<= 1; + } + + /* + * 'prefix' has now been left-shifted by 8, so + * OR in the next byte. + */ + prefix |= (keyi < keylen) ? key[keyi] : 0; + keyi++; + } + + /* The valid part of the hash is in the upper 32 bits. */ + return hash >> 32; +} +#endif /* XEN_NETIF_DEFINE_TOEPLITZ */ + +/* + * Control requests (struct xen_netif_ctrl_request) + * ================================================ + * + * All requests have the following format: + * + * 0 1 2 3 4 5 6 7 octet + * +-----+-----+-----+-----+-----+-----+-----+-----+ + * | id | type | data[0] | + * +-----+-----+-----+-----+-----+-----+-----+-----+ + * | data[1] | data[2] | + * +-----+-----+-----+-----+-----------------------+ + * + * id: the request identifier, echoed in response. + * type: the type of request (see below) + * data[]: any data associated with the request (determined by type) + */ + +struct xen_netif_ctrl_request { + uint16_t id; + uint16_t type; + +#define XEN_NETIF_CTRL_TYPE_INVALID 0 +#define XEN_NETIF_CTRL_TYPE_GET_HASH_FLAGS 1 +#define XEN_NETIF_CTRL_TYPE_SET_HASH_FLAGS 2 +#define XEN_NETIF_CTRL_TYPE_SET_HASH_KEY 3 +#define XEN_NETIF_CTRL_TYPE_GET_HASH_MAPPING_SIZE 4 +#define XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING_SIZE 5 +#define XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING 6 +#define XEN_NETIF_CTRL_TYPE_SET_HASH_ALGORITHM 7 + + uint32_t data[3]; +}; + +/* + * Control responses (struct xen_netif_ctrl_response) + * ================================================== + * + * All responses have the following format: + * + * 0 1 2 3 4 5 6 7 octet + * +-----+-----+-----+-----+-----+-----+-----+-----+ + * | id | type | status | + * +-----+-----+-----+-----+-----+-----+-----+-----+ + * | data | + * +-----+-----+-----+-----+ + * + * id: the corresponding request identifier + * type: the type of the corresponding request + * status: the status of request processing + * data: any data associated with the response (determined by type and + * status) + */ + +struct xen_netif_ctrl_response { + uint16_t id; + uint16_t type; + uint32_t status; + +#define XEN_NETIF_CTRL_STATUS_SUCCESS 0 +#define XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED 1 +#define XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER 2 +#define XEN_NETIF_CTRL_STATUS_BUFFER_OVERFLOW 3 + + uint32_t data; +}; + +/* + * Control messages + * ================ + * + * XEN_NETIF_CTRL_TYPE_SET_HASH_ALGORITHM + * -------------------------------------- + * + * This is sent by the frontend to set the desired hash algorithm. + * + * Request: + * + * type = XEN_NETIF_CTRL_TYPE_SET_HASH_ALGORITHM + * data[0] = a XEN_NETIF_CTRL_HASH_ALGORITHM_* value + * data[1] = 0 + * data[2] = 0 + * + * Response: + * + * status = XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED - Operation not + * supported + * XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER - The algorithm is not + * supported + * XEN_NETIF_CTRL_STATUS_SUCCESS - Operation successful + * + * NOTE: Setting data[0] to XEN_NETIF_CTRL_HASH_ALGORITHM_NONE disables + * hashing and the backend is free to choose how it steers packets + * to queues (which is the default behaviour). + * + * XEN_NETIF_CTRL_TYPE_GET_HASH_FLAGS + * ---------------------------------- + * + * This is sent by the frontend to query the types of hash supported by + * the backend. + * + * Request: + * + * type = XEN_NETIF_CTRL_TYPE_GET_HASH_FLAGS + * data[0] = 0 + * data[1] = 0 + * data[2] = 0 + * + * Response: + * + * status = XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED - Operation not supported + * XEN_NETIF_CTRL_STATUS_SUCCESS - Operation successful + * data = supported hash types (if operation was successful) + * + * NOTE: A valid hash algorithm must be selected before this operation can + * succeed. + * + * XEN_NETIF_CTRL_TYPE_SET_HASH_FLAGS + * ---------------------------------- + * + * This is sent by the frontend to set the types of hash that the backend + * should calculate. (See above for hash type definitions). + * Note that the 'maximal' type of hash should always be chosen. For + * example, if the frontend sets both IPV4 and IPV4_TCP hash types then + * the latter hash type should be calculated for any TCP packet and the + * former only calculated for non-TCP packets. + * + * Request: + * + * type = XEN_NETIF_CTRL_TYPE_SET_HASH_FLAGS + * data[0] = bitwise OR of XEN_NETIF_CTRL_HASH_TYPE_* values + * data[1] = 0 + * data[2] = 0 + * + * Response: + * + * status = XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED - Operation not + * supported + * XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER - One or more flag + * value is invalid or + * unsupported + * XEN_NETIF_CTRL_STATUS_SUCCESS - Operation successful + * data = 0 + * + * NOTE: A valid hash algorithm must be selected before this operation can + * succeed. + * Also, setting data[0] to zero disables hashing and the backend + * is free to choose how it steers packets to queues. + * + * XEN_NETIF_CTRL_TYPE_SET_HASH_KEY + * -------------------------------- + * + * This is sent by the frontend to set the key of the hash if the algorithm + * requires it. (See hash algorithms above). + * + * Request: + * + * type = XEN_NETIF_CTRL_TYPE_SET_HASH_KEY + * data[0] = grant reference of page containing the key (assumed to + * start at beginning of grant) + * data[1] = size of key in octets + * data[2] = 0 + * + * Response: + * + * status = XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED - Operation not + * supported + * XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER - Key size is invalid + * XEN_NETIF_CTRL_STATUS_BUFFER_OVERFLOW - Key size is larger + * than the backend + * supports + * XEN_NETIF_CTRL_STATUS_SUCCESS - Operation successful + * data = 0 + * + * NOTE: Any key octets not specified are assumed to be zero (the key + * is assumed to be empty by default) and specifying a new key + * invalidates any previous key, hence specifying a key size of + * zero will clear the key (which ensures that the calculated hash + * will always be zero). + * The maximum size of key is algorithm and backend specific, but + * is also limited by the single grant reference. + * The grant reference may be read-only and must remain valid until + * the response has been processed. + * + * XEN_NETIF_CTRL_TYPE_GET_HASH_MAPPING_SIZE + * ----------------------------------------- + * + * This is sent by the frontend to query the maximum size of mapping + * table supported by the backend. The size is specified in terms of + * table entries. + * + * Request: + * + * type = XEN_NETIF_CTRL_TYPE_GET_HASH_MAPPING_SIZE + * data[0] = 0 + * data[1] = 0 + * data[2] = 0 + * + * Response: + * + * status = XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED - Operation not supported + * XEN_NETIF_CTRL_STATUS_SUCCESS - Operation successful + * data = maximum number of entries allowed in the mapping table + * (if operation was successful) or zero if a mapping table is + * not supported (i.e. hash mapping is done only by modular + * arithmetic). + * + * XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING_SIZE + * ------------------------------------- + * + * This is sent by the frontend to set the actual size of the mapping + * table to be used by the backend. The size is specified in terms of + * table entries. + * Any previous table is invalidated by this message and any new table + * is assumed to be zero filled. + * + * Request: + * + * type = XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING_SIZE + * data[0] = number of entries in mapping table + * data[1] = 0 + * data[2] = 0 + * + * Response: + * + * status = XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED - Operation not + * supported + * XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER - Table size is invalid + * XEN_NETIF_CTRL_STATUS_SUCCESS - Operation successful + * data = 0 + * + * NOTE: Setting data[0] to 0 means that hash mapping should be done + * using modular arithmetic. + * + * XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING + * ------------------------------------ + * + * This is sent by the frontend to set the content of the table mapping + * hash value to queue number. The backend should calculate the hash from + * the packet header, use it as an index into the table (modulo the size + * of the table) and then steer the packet to the queue number found at + * that index. + * + * Request: + * + * type = XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING + * data[0] = grant reference of page containing the mapping (sub-)table + * (assumed to start at beginning of grant) + * data[1] = size of (sub-)table in entries + * data[2] = offset, in entries, of sub-table within overall table + * + * Response: + * + * status = XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED - Operation not + * supported + * XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER - Table size or content + * is invalid + * XEN_NETIF_CTRL_STATUS_BUFFER_OVERFLOW - Table size is larger + * than the backend + * supports + * XEN_NETIF_CTRL_STATUS_SUCCESS - Operation successful + * data = 0 + * + * NOTE: The overall table has the following format: + * + * 0 1 2 3 4 5 6 7 octet + * +-----+-----+-----+-----+-----+-----+-----+-----+ + * | mapping[0] | mapping[1] | + * +-----+-----+-----+-----+-----+-----+-----+-----+ + * | . | + * | . | + * | . | + * +-----+-----+-----+-----+-----+-----+-----+-----+ + * | mapping[N-2] | mapping[N-1] | + * +-----+-----+-----+-----+-----+-----+-----+-----+ + * + * where N is specified by a XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING_SIZE + * message and each mapping must specifies a queue between 0 and + * "multi-queue-num-queues" (see above). + * The backend may support a mapping table larger than can be + * mapped by a single grant reference. Thus sub-tables within a + * larger table can be individually set by sending multiple messages + * with differing offset values. Specifying a new sub-table does not + * invalidate any table data outside that range. + * The grant reference may be read-only and must remain valid until + * the response has been processed. + */ + +DEFINE_RING_TYPES(xen_netif_ctrl, + struct xen_netif_ctrl_request, + struct xen_netif_ctrl_response); + +/* + * Guest transmit + * ============== + * + * This is the 'wire' format for transmit (frontend -> backend) packets: + * + * Fragment 1: xen_netif_tx_request_t - flags = XEN_NETTXF_* + * size = total packet size + * [Extra 1: xen_netif_extra_info_t] - (only if fragment 1 flags include + * XEN_NETTXF_extra_info) + * ... + * [Extra N: xen_netif_extra_info_t] - (only if extra N-1 flags include + * XEN_NETIF_EXTRA_MORE) * ... - * Request N: xen_netif_tx_request -- 0 + * Fragment N: xen_netif_tx_request_t - (only if fragment N-1 flags include + * XEN_NETTXF_more_data - flags on preceding + * extras are not relevant here) + * flags = 0 + * size = fragment size + * + * NOTE: + * + * This format slightly is different from that used for receive + * (backend -> frontend) packets. Specifically, in a multi-fragment + * packet the actual size of fragment 1 can only be determined by + * subtracting the sizes of fragments 2..N from the total packet size. + * + * Ring slot size is 12 octets, however not all request/response + * structs use the full size. + * + * tx request data (xen_netif_tx_request_t) + * ------------------------------------ + * + * 0 1 2 3 4 5 6 7 octet + * +-----+-----+-----+-----+-----+-----+-----+-----+ + * | grant ref | offset | flags | + * +-----+-----+-----+-----+-----+-----+-----+-----+ + * | id | size | + * +-----+-----+-----+-----+ + * + * grant ref: Reference to buffer page. + * offset: Offset within buffer page. + * flags: XEN_NETTXF_*. + * id: request identifier, echoed in response. + * size: packet size in bytes. + * + * tx response (xen_netif_tx_response_t) + * --------------------------------- + * + * 0 1 2 3 4 5 6 7 octet + * +-----+-----+-----+-----+-----+-----+-----+-----+ + * | id | status | unused | + * +-----+-----+-----+-----+-----+-----+-----+-----+ + * | unused | + * +-----+-----+-----+-----+ + * + * id: reflects id in transmit request + * status: XEN_NETIF_RSP_* + * + * Guest receive + * ============= + * + * This is the 'wire' format for receive (backend -> frontend) packets: + * + * Fragment 1: xen_netif_rx_request_t - flags = XEN_NETRXF_* + * size = fragment size + * [Extra 1: xen_netif_extra_info_t] - (only if fragment 1 flags include + * XEN_NETRXF_extra_info) + * ... + * [Extra N: xen_netif_extra_info_t] - (only if extra N-1 flags include + * XEN_NETIF_EXTRA_MORE) + * ... + * Fragment N: xen_netif_rx_request_t - (only if fragment N-1 flags include + * XEN_NETRXF_more_data - flags on preceding + * extras are not relevant here) + * flags = 0 + * size = fragment size + * + * NOTE: + * + * This format slightly is different from that used for transmit + * (frontend -> backend) packets. Specifically, in a multi-fragment + * packet the size of the packet can only be determined by summing the + * sizes of fragments 1..N. + * + * Ring slot size is 8 octets. + * + * rx request (xen_netif_rx_request_t) + * ------------------------------- + * + * 0 1 2 3 4 5 6 7 octet + * +-----+-----+-----+-----+-----+-----+-----+-----+ + * | id | pad | gref | + * +-----+-----+-----+-----+-----+-----+-----+-----+ + * + * id: request identifier, echoed in response. + * gref: reference to incoming granted frame. + * + * rx response (xen_netif_rx_response_t) + * --------------------------------- + * + * 0 1 2 3 4 5 6 7 octet + * +-----+-----+-----+-----+-----+-----+-----+-----+ + * | id | offset | flags | status | + * +-----+-----+-----+-----+-----+-----+-----+-----+ + * + * id: reflects id in receive request + * offset: offset in page of start of received packet + * flags: XEN_NETRXF_* + * status: -ve: XEN_NETIF_RSP_*; +ve: Rx'ed pkt size. + * + * NOTE: Historically, to support GSO on the frontend receive side, Linux + * netfront does not make use of the rx response id (because, as + * described below, extra info structures overlay the id field). + * Instead it assumes that responses always appear in the same ring + * slot as their corresponding request. Thus, to maintain + * compatibility, backends must make sure this is the case. + * + * Extra Info + * ========== + * + * Can be present if initial request or response has NET{T,R}XF_extra_info, + * or previous extra request has XEN_NETIF_EXTRA_MORE. + * + * The struct therefore needs to fit into either a tx or rx slot and + * is therefore limited to 8 octets. + * + * NOTE: Because extra info data overlays the usual request/response + * structures, there is no id information in the opposite direction. + * So, if an extra info overlays an rx response the frontend can + * assume that it is in the same ring slot as the request that was + * consumed to make the slot available, and the backend must ensure + * this assumption is true. + * + * extra info (xen_netif_extra_info_t) + * ------------------------------- + * + * General format: + * + * 0 1 2 3 4 5 6 7 octet + * +-----+-----+-----+-----+-----+-----+-----+-----+ + * |type |flags| type specific data | + * +-----+-----+-----+-----+-----+-----+-----+-----+ + * | padding for tx | + * +-----+-----+-----+-----+ + * + * type: XEN_NETIF_EXTRA_TYPE_* + * flags: XEN_NETIF_EXTRA_FLAG_* + * padding for tx: present only in the tx case due to 8 octet limit + * from rx case. Not shown in type specific entries + * below. + * + * XEN_NETIF_EXTRA_TYPE_GSO: + * + * 0 1 2 3 4 5 6 7 octet + * +-----+-----+-----+-----+-----+-----+-----+-----+ + * |type |flags| size |type | pad | features | + * +-----+-----+-----+-----+-----+-----+-----+-----+ + * + * type: Must be XEN_NETIF_EXTRA_TYPE_GSO + * flags: XEN_NETIF_EXTRA_FLAG_* + * size: Maximum payload size of each segment. For example, + * for TCP this is just the path MSS. + * type: XEN_NETIF_GSO_TYPE_*: This determines the protocol of + * the packet and any extra features required to segment the + * packet properly. + * features: EN_XEN_NETIF_GSO_FEAT_*: This specifies any extra GSO + * features required to process this packet, such as ECN + * support for TCPv4. + * + * XEN_NETIF_EXTRA_TYPE_MCAST_{ADD,DEL}: + * + * 0 1 2 3 4 5 6 7 octet + * +-----+-----+-----+-----+-----+-----+-----+-----+ + * |type |flags| addr | + * +-----+-----+-----+-----+-----+-----+-----+-----+ + * + * type: Must be XEN_NETIF_EXTRA_TYPE_MCAST_{ADD,DEL} + * flags: XEN_NETIF_EXTRA_FLAG_* + * addr: address to add/remove + * + * XEN_NETIF_EXTRA_TYPE_HASH: + * + * A backend that supports teoplitz hashing is assumed to accept + * this type of extra info in transmit packets. + * A frontend that enables hashing is assumed to accept + * this type of extra info in receive packets. + * + * 0 1 2 3 4 5 6 7 octet + * +-----+-----+-----+-----+-----+-----+-----+-----+ + * |type |flags|htype| alg |LSB ---- value ---- MSB| + * +-----+-----+-----+-----+-----+-----+-----+-----+ + * + * type: Must be XEN_NETIF_EXTRA_TYPE_HASH + * flags: XEN_NETIF_EXTRA_FLAG_* + * htype: Hash type (one of _XEN_NETIF_CTRL_HASH_TYPE_* - see above) + * alg: The algorithm used to calculate the hash (one of + * XEN_NETIF_CTRL_HASH_TYPE_ALGORITHM_* - see above) + * value: Hash value */ /* Protocol checksum field is blank in the packet (hardware offload)? */ -#define _XEN_NETTXF_csum_blank (0) -#define XEN_NETTXF_csum_blank (1U<<_XEN_NETTXF_csum_blank) +#define _XEN_NETTXF_csum_blank (0) +#define XEN_NETTXF_csum_blank (1U<<_XEN_NETTXF_csum_blank) /* Packet data has been validated against protocol checksum. */ -#define _XEN_NETTXF_data_validated (1) -#define XEN_NETTXF_data_validated (1U<<_XEN_NETTXF_data_validated) +#define _XEN_NETTXF_data_validated (1) +#define XEN_NETTXF_data_validated (1U<<_XEN_NETTXF_data_validated) /* Packet continues in the next request descriptor. */ -#define _XEN_NETTXF_more_data (2) -#define XEN_NETTXF_more_data (1U<<_XEN_NETTXF_more_data) +#define _XEN_NETTXF_more_data (2) +#define XEN_NETTXF_more_data (1U<<_XEN_NETTXF_more_data) /* Packet to be followed by extra descriptor(s). */ -#define _XEN_NETTXF_extra_info (3) -#define XEN_NETTXF_extra_info (1U<<_XEN_NETTXF_extra_info) +#define _XEN_NETTXF_extra_info (3) +#define XEN_NETTXF_extra_info (1U<<_XEN_NETTXF_extra_info) #define XEN_NETIF_MAX_TX_SIZE 0xFFFF struct xen_netif_tx_request { - grant_ref_t gref; /* Reference to buffer page */ - uint16_t offset; /* Offset within buffer page */ - uint16_t flags; /* XEN_NETTXF_* */ - uint16_t id; /* Echoed in response message. */ - uint16_t size; /* Packet size in bytes. */ + grant_ref_t gref; + uint16_t offset; + uint16_t flags; + uint16_t id; + uint16_t size; }; /* Types of xen_netif_extra_info descriptors. */ -#define XEN_NETIF_EXTRA_TYPE_NONE (0) /* Never used - invalid */ -#define XEN_NETIF_EXTRA_TYPE_GSO (1) /* u.gso */ -#define XEN_NETIF_EXTRA_TYPE_MCAST_ADD (2) /* u.mcast */ -#define XEN_NETIF_EXTRA_TYPE_MCAST_DEL (3) /* u.mcast */ -#define XEN_NETIF_EXTRA_TYPE_MAX (4) +#define XEN_NETIF_EXTRA_TYPE_NONE (0) /* Never used - invalid */ +#define XEN_NETIF_EXTRA_TYPE_GSO (1) /* u.gso */ +#define XEN_NETIF_EXTRA_TYPE_MCAST_ADD (2) /* u.mcast */ +#define XEN_NETIF_EXTRA_TYPE_MCAST_DEL (3) /* u.mcast */ +#define XEN_NETIF_EXTRA_TYPE_HASH (4) /* u.hash */ +#define XEN_NETIF_EXTRA_TYPE_MAX (5) -/* xen_netif_extra_info flags. */ -#define _XEN_NETIF_EXTRA_FLAG_MORE (0) -#define XEN_NETIF_EXTRA_FLAG_MORE (1U<<_XEN_NETIF_EXTRA_FLAG_MORE) +/* xen_netif_extra_info_t flags. */ +#define _XEN_NETIF_EXTRA_FLAG_MORE (0) +#define XEN_NETIF_EXTRA_FLAG_MORE (1U<<_XEN_NETIF_EXTRA_FLAG_MORE) /* GSO types */ -#define XEN_NETIF_GSO_TYPE_NONE (0) -#define XEN_NETIF_GSO_TYPE_TCPV4 (1) -#define XEN_NETIF_GSO_TYPE_TCPV6 (2) +#define XEN_NETIF_GSO_TYPE_NONE (0) +#define XEN_NETIF_GSO_TYPE_TCPV4 (1) +#define XEN_NETIF_GSO_TYPE_TCPV6 (2) /* - * This structure needs to fit within both netif_tx_request and - * netif_rx_response for compatibility. + * This structure needs to fit within both xen_netif_tx_request_t and + * xen_netif_rx_response_t for compatibility. */ struct xen_netif_extra_info { - uint8_t type; /* XEN_NETIF_EXTRA_TYPE_* */ - uint8_t flags; /* XEN_NETIF_EXTRA_FLAG_* */ - + uint8_t type; + uint8_t flags; union { struct { - /* - * Maximum payload size of each segment. For - * example, for TCP this is just the path MSS. - */ uint16_t size; - - /* - * GSO type. This determines the protocol of - * the packet and any extra features required - * to segment the packet properly. - */ - uint8_t type; /* XEN_NETIF_GSO_TYPE_* */ - - /* Future expansion. */ + uint8_t type; uint8_t pad; - - /* - * GSO features. This specifies any extra GSO - * features required to process this packet, - * such as ECN support for TCPv4. - */ - uint16_t features; /* XEN_NETIF_GSO_FEAT_* */ + uint16_t features; } gso; - struct { - uint8_t addr[6]; /* Address to add/remove. */ + uint8_t addr[6]; } mcast; - + struct { + uint8_t type; + uint8_t algorithm; + uint8_t value[4]; + } hash; uint16_t pad[3]; } u; }; struct xen_netif_tx_response { uint16_t id; - int16_t status; /* XEN_NETIF_RSP_* */ + int16_t status; }; struct xen_netif_rx_request { - uint16_t id; /* Echoed in response message. */ - grant_ref_t gref; /* Reference to incoming granted frame */ + uint16_t id; /* Echoed in response message. */ + uint16_t pad; + grant_ref_t gref; }; /* Packet data has been validated against protocol checksum. */ -#define _XEN_NETRXF_data_validated (0) -#define XEN_NETRXF_data_validated (1U<<_XEN_NETRXF_data_validated) +#define _XEN_NETRXF_data_validated (0) +#define XEN_NETRXF_data_validated (1U<<_XEN_NETRXF_data_validated) /* Protocol checksum field is blank in the packet (hardware offload)? */ -#define _XEN_NETRXF_csum_blank (1) -#define XEN_NETRXF_csum_blank (1U<<_XEN_NETRXF_csum_blank) +#define _XEN_NETRXF_csum_blank (1) +#define XEN_NETRXF_csum_blank (1U<<_XEN_NETRXF_csum_blank) /* Packet continues in the next request descriptor. */ -#define _XEN_NETRXF_more_data (2) -#define XEN_NETRXF_more_data (1U<<_XEN_NETRXF_more_data) +#define _XEN_NETRXF_more_data (2) +#define XEN_NETRXF_more_data (1U<<_XEN_NETRXF_more_data) /* Packet to be followed by extra descriptor(s). */ -#define _XEN_NETRXF_extra_info (3) -#define XEN_NETRXF_extra_info (1U<<_XEN_NETRXF_extra_info) +#define _XEN_NETRXF_extra_info (3) +#define XEN_NETRXF_extra_info (1U<<_XEN_NETRXF_extra_info) -/* GSO Prefix descriptor. */ -#define _XEN_NETRXF_gso_prefix (4) -#define XEN_NETRXF_gso_prefix (1U<<_XEN_NETRXF_gso_prefix) +/* Packet has GSO prefix. Deprecated but included for compatibility */ +#define _XEN_NETRXF_gso_prefix (4) +#define XEN_NETRXF_gso_prefix (1U<<_XEN_NETRXF_gso_prefix) struct xen_netif_rx_response { - uint16_t id; - uint16_t offset; /* Offset in page of start of received packet */ - uint16_t flags; /* XEN_NETRXF_* */ - int16_t status; /* -ve: BLKIF_RSP_* ; +ve: Rx'ed pkt size. */ + uint16_t id; + uint16_t offset; + uint16_t flags; + int16_t status; }; /* - * Generate netif ring structures and types. + * Generate xen_netif ring structures and types. */ -DEFINE_RING_TYPES(xen_netif_tx, - struct xen_netif_tx_request, +DEFINE_RING_TYPES(xen_netif_tx, struct xen_netif_tx_request, struct xen_netif_tx_response); -DEFINE_RING_TYPES(xen_netif_rx, - struct xen_netif_rx_request, +DEFINE_RING_TYPES(xen_netif_rx, struct xen_netif_rx_request, struct xen_netif_rx_response); -#define XEN_NETIF_RSP_DROPPED -2 -#define XEN_NETIF_RSP_ERROR -1 -#define XEN_NETIF_RSP_OKAY 0 -/* No response: used for auxiliary requests (e.g., xen_netif_extra_info). */ -#define XEN_NETIF_RSP_NULL 1 +#define XEN_NETIF_RSP_DROPPED -2 +#define XEN_NETIF_RSP_ERROR -1 +#define XEN_NETIF_RSP_OKAY 0 +/* No response: used for auxiliary requests (e.g., xen_netif_extra_info_t). */ +#define XEN_NETIF_RSP_NULL 1 #endif |