diff options
Diffstat (limited to 'include/linux')
69 files changed, 1598 insertions, 491 deletions
diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 6ac47f5ea514..d5dcebd7aad3 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -400,12 +400,17 @@ extern bool acpi_osi_is_win8(void); #ifdef CONFIG_ACPI_NUMA int acpi_map_pxm_to_online_node(int pxm); +int acpi_map_pxm_to_node(int pxm); int acpi_get_node(acpi_handle handle); #else static inline int acpi_map_pxm_to_online_node(int pxm) { return 0; } +static inline int acpi_map_pxm_to_node(int pxm) +{ + return 0; +} static inline int acpi_get_node(acpi_handle handle) { return 0; diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h index d143c13bed26..f99b74a6e4ca 100644 --- a/include/linux/amba/bus.h +++ b/include/linux/amba/bus.h @@ -25,6 +25,43 @@ #define AMBA_CID 0xb105f00d #define CORESIGHT_CID 0xb105900d +/* + * CoreSight Architecture specification updates the ID specification + * for components on the AMBA bus. (ARM IHI 0029E) + * + * Bits 15:12 of the CID are the device class. + * + * Class 0xF remains for PrimeCell and legacy components. (AMBA_CID above) + * Class 0x9 defines the component as CoreSight (CORESIGHT_CID above) + * Class 0x0, 0x1, 0xB, 0xE define components that do not have driver support + * at present. + * Class 0x2-0x8,0xA and 0xD-0xD are presently reserved. + * + * Remaining CID bits stay as 0xb105-00d + */ + +/** + * Class 0x9 components use additional values to form a Unique Component + * Identifier (UCI), where peripheral ID values are identical for different + * components. Passed to the amba bus code from the component driver via + * the amba_id->data pointer. + * @devarch : coresight devarch register value + * @devarch_mask: mask bits used for matching. 0 indicates UCI not used. + * @devtype : coresight device type value + * @data : additional driver data. As we have usurped the original + * pointer some devices may still need additional data + */ +struct amba_cs_uci_id { + unsigned int devarch; + unsigned int devarch_mask; + unsigned int devtype; + void *data; +}; + +/* define offsets for registers used by UCI */ +#define UCI_REG_DEVTYPE_OFFSET 0xFCC +#define UCI_REG_DEVARCH_OFFSET 0xFBC + struct clk; struct amba_device { @@ -32,6 +69,8 @@ struct amba_device { struct resource res; struct clk *pclk; unsigned int periphid; + unsigned int cid; + struct amba_cs_uci_id uci; unsigned int irq[AMBA_NR_IRQS]; char *driver_override; }; diff --git a/include/linux/atalk.h b/include/linux/atalk.h index 5a90f28d5ff2..d5cfc0b15b76 100644 --- a/include/linux/atalk.h +++ b/include/linux/atalk.h @@ -161,16 +161,26 @@ extern int sysctl_aarp_resolve_time; extern int atalk_register_sysctl(void); extern void atalk_unregister_sysctl(void); #else -#define atalk_register_sysctl() do { } while(0) -#define atalk_unregister_sysctl() do { } while(0) +static inline int atalk_register_sysctl(void) +{ + return 0; +} +static inline void atalk_unregister_sysctl(void) +{ +} #endif #ifdef CONFIG_PROC_FS extern int atalk_proc_init(void); extern void atalk_proc_exit(void); #else -#define atalk_proc_init() ({ 0; }) -#define atalk_proc_exit() do { } while(0) +static inline int atalk_proc_init(void) +{ + return 0; +} +static inline void atalk_proc_exit(void) +{ +} #endif /* CONFIG_PROC_FS */ #endif /* __LINUX_ATALK_H__ */ diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h index 695b2a880d9a..a4c644c1c091 100644 --- a/include/linux/bpf-cgroup.h +++ b/include/linux/bpf-cgroup.h @@ -292,7 +292,7 @@ static inline int bpf_cgroup_storage_assign(struct bpf_prog *prog, static inline void bpf_cgroup_storage_release(struct bpf_prog *prog, struct bpf_map *map) {} static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc( - struct bpf_prog *prog, enum bpf_cgroup_storage_type stype) { return 0; } + struct bpf_prog *prog, enum bpf_cgroup_storage_type stype) { return NULL; } static inline void bpf_cgroup_storage_free( struct bpf_cgroup_storage *storage) {} static inline int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, diff --git a/include/linux/ceph/types.h b/include/linux/ceph/types.h index 27cd973d3881..bd3d532902d7 100644 --- a/include/linux/ceph/types.h +++ b/include/linux/ceph/types.h @@ -24,6 +24,7 @@ struct ceph_vino { /* context for the caps reservation mechanism */ struct ceph_cap_reservation { int count; + int used; }; diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h index e443fa9fa859..b7cf80a71293 100644 --- a/include/linux/clk-provider.h +++ b/include/linux/clk-provider.h @@ -792,6 +792,9 @@ unsigned int __clk_get_enable_count(struct clk *clk); unsigned long clk_hw_get_rate(const struct clk_hw *hw); unsigned long __clk_get_flags(struct clk *clk); unsigned long clk_hw_get_flags(const struct clk_hw *hw); +#define clk_hw_can_set_rate_parent(hw) \ + (clk_hw_get_flags((hw)) & CLK_SET_RATE_PARENT) + bool clk_hw_is_prepared(const struct clk_hw *hw); bool clk_hw_rate_is_protected(const struct clk_hw *hw); bool clk_hw_is_enabled(const struct clk_hw *hw); diff --git a/include/linux/clk.h b/include/linux/clk.h index a7773b5c0b9f..d8bc1a856b39 100644 --- a/include/linux/clk.h +++ b/include/linux/clk.h @@ -384,6 +384,17 @@ int __must_check devm_clk_bulk_get_all(struct device *dev, struct clk *devm_clk_get(struct device *dev, const char *id); /** + * devm_clk_get_optional - lookup and obtain a managed reference to an optional + * clock producer. + * @dev: device for clock "consumer" + * @id: clock consumer ID + * + * Behaves the same as devm_clk_get() except where there is no clock producer. + * In this case, instead of returning -ENOENT, the function returns NULL. + */ +struct clk *devm_clk_get_optional(struct device *dev, const char *id); + +/** * devm_get_clk_from_child - lookup and obtain a managed reference to a * clock producer from child node. * @dev: device for clock "consumer" @@ -718,6 +729,12 @@ static inline struct clk *devm_clk_get(struct device *dev, const char *id) return NULL; } +static inline struct clk *devm_clk_get_optional(struct device *dev, + const char *id) +{ + return NULL; +} + static inline int __must_check devm_clk_bulk_get(struct device *dev, int num_clks, struct clk_bulk_data *clks) { @@ -862,6 +879,25 @@ static inline void clk_bulk_disable_unprepare(int num_clks, clk_bulk_unprepare(num_clks, clks); } +/** + * clk_get_optional - lookup and obtain a reference to an optional clock + * producer. + * @dev: device for clock "consumer" + * @id: clock consumer ID + * + * Behaves the same as clk_get() except where there is no clock producer. In + * this case, instead of returning -ENOENT, the function returns NULL. + */ +static inline struct clk *clk_get_optional(struct device *dev, const char *id) +{ + struct clk *clk = clk_get(dev, id); + + if (clk == ERR_PTR(-ENOENT)) + return NULL; + + return clk; +} + #if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK) struct clk *of_clk_get(struct device_node *np, int index); struct clk *of_clk_get_by_name(struct device_node *np, const char *name); diff --git a/include/linux/clk/ti.h b/include/linux/clk/ti.h index eacc5df57b99..78872efc7be0 100644 --- a/include/linux/clk/ti.h +++ b/include/linux/clk/ti.h @@ -160,6 +160,7 @@ struct clk_hw_omap { struct clockdomain *clkdm; const struct clk_hw_omap_ops *ops; u32 context; + int autoidle_count; }; /* diff --git a/include/linux/clkdev.h b/include/linux/clkdev.h index 4890ff033220..ccb32af5848b 100644 --- a/include/linux/clkdev.h +++ b/include/linux/clkdev.h @@ -52,4 +52,8 @@ int clk_add_alias(const char *, const char *, const char *, struct device *); int clk_register_clkdev(struct clk *, const char *, const char *); int clk_hw_register_clkdev(struct clk_hw *, const char *, const char *); +int devm_clk_hw_register_clkdev(struct device *dev, struct clk_hw *hw, + const char *con_id, const char *dev_id); +void devm_clk_release_clkdev(struct device *dev, const char *con_id, + const char *dev_id); #endif diff --git a/include/linux/dma/dw.h b/include/linux/dma/dw.h index e166cac8e870..9752f3745f76 100644 --- a/include/linux/dma/dw.h +++ b/include/linux/dma/dw.h @@ -1,13 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* * Driver for the Synopsys DesignWare DMA Controller * * Copyright (C) 2007 Atmel Corporation * Copyright (C) 2010-2011 ST Microelectronics * Copyright (C) 2014 Intel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #ifndef _DMA_DW_H #define _DMA_DW_H @@ -45,9 +42,13 @@ struct dw_dma_chip { #if IS_ENABLED(CONFIG_DW_DMAC_CORE) int dw_dma_probe(struct dw_dma_chip *chip); int dw_dma_remove(struct dw_dma_chip *chip); +int idma32_dma_probe(struct dw_dma_chip *chip); +int idma32_dma_remove(struct dw_dma_chip *chip); #else static inline int dw_dma_probe(struct dw_dma_chip *chip) { return -ENODEV; } static inline int dw_dma_remove(struct dw_dma_chip *chip) { return 0; } +static inline int idma32_dma_probe(struct dw_dma_chip *chip) { return -ENODEV; } +static inline int idma32_dma_remove(struct dw_dma_chip *chip) { return 0; } #endif /* CONFIG_DW_DMAC_CORE */ #endif /* _DMA_DW_H */ diff --git a/include/linux/errno.h b/include/linux/errno.h index 3cba627577d6..d73f597a2484 100644 --- a/include/linux/errno.h +++ b/include/linux/errno.h @@ -18,6 +18,7 @@ #define ERESTART_RESTARTBLOCK 516 /* restart by calling sys_restart_syscall */ #define EPROBE_DEFER 517 /* Driver requests probe retry */ #define EOPENSTALE 518 /* open found a stale dentry */ +#define ENOPARAM 519 /* Parameter not supported */ /* Defined for the NFSv3 protocol */ #define EBADHANDLE 521 /* Illegal NFS file handle */ diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h index d7711048ef93..f5740423b002 100644 --- a/include/linux/f2fs_fs.h +++ b/include/linux/f2fs_fs.h @@ -116,6 +116,7 @@ struct f2fs_super_block { /* * For checkpoint */ +#define CP_DISABLED_QUICK_FLAG 0x00002000 #define CP_DISABLED_FLAG 0x00001000 #define CP_QUOTA_NEED_FSCK_FLAG 0x00000800 #define CP_LARGE_NAT_BITMAP_FLAG 0x00000400 @@ -186,7 +187,7 @@ struct f2fs_orphan_block { struct f2fs_extent { __le32 fofs; /* start file offset of the extent */ __le32 blk; /* start block address of the extent */ - __le32 len; /* lengh of the extent */ + __le32 len; /* length of the extent */ } __packed; #define F2FS_NAME_LEN 255 @@ -284,7 +285,7 @@ enum { struct node_footer { __le32 nid; /* node id */ - __le32 ino; /* inode nunmber */ + __le32 ino; /* inode number */ __le32 flag; /* include cold/fsync/dentry marks and offset */ __le64 cp_ver; /* checkpoint version */ __le32 next_blkaddr; /* next node page block address */ @@ -489,12 +490,12 @@ typedef __le32 f2fs_hash_t; /* * space utilization of regular dentry and inline dentry (w/o extra reservation) - * regular dentry inline dentry - * bitmap 1 * 27 = 27 1 * 23 = 23 - * reserved 1 * 3 = 3 1 * 7 = 7 - * dentry 11 * 214 = 2354 11 * 182 = 2002 - * filename 8 * 214 = 1712 8 * 182 = 1456 - * total 4096 3488 + * regular dentry inline dentry (def) inline dentry (min) + * bitmap 1 * 27 = 27 1 * 23 = 23 1 * 1 = 1 + * reserved 1 * 3 = 3 1 * 7 = 7 1 * 1 = 1 + * dentry 11 * 214 = 2354 11 * 182 = 2002 11 * 2 = 22 + * filename 8 * 214 = 1712 8 * 182 = 1456 8 * 2 = 16 + * total 4096 3488 40 * * Note: there are more reserved space in inline dentry than in regular * dentry, when converting inline dentry we should handle this carefully. @@ -506,12 +507,13 @@ typedef __le32 f2fs_hash_t; #define SIZE_OF_RESERVED (PAGE_SIZE - ((SIZE_OF_DIR_ENTRY + \ F2FS_SLOT_LEN) * \ NR_DENTRY_IN_BLOCK + SIZE_OF_DENTRY_BITMAP)) +#define MIN_INLINE_DENTRY_SIZE 40 /* just include '.' and '..' entries */ /* One directory entry slot representing F2FS_SLOT_LEN-sized file name */ struct f2fs_dir_entry { __le32 hash_code; /* hash code of file name */ __le32 ino; /* inode number */ - __le16 name_len; /* lengh of file name */ + __le16 name_len; /* length of file name */ __u8 file_type; /* file type */ } __packed; diff --git a/include/linux/flex_array.h b/include/linux/flex_array.h deleted file mode 100644 index b94fa61b51fb..000000000000 --- a/include/linux/flex_array.h +++ /dev/null @@ -1,149 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _FLEX_ARRAY_H -#define _FLEX_ARRAY_H - -#include <linux/types.h> -#include <linux/reciprocal_div.h> -#include <asm/page.h> - -#define FLEX_ARRAY_PART_SIZE PAGE_SIZE -#define FLEX_ARRAY_BASE_SIZE PAGE_SIZE - -struct flex_array_part; - -/* - * This is meant to replace cases where an array-like - * structure has gotten too big to fit into kmalloc() - * and the developer is getting tempted to use - * vmalloc(). - */ - -struct flex_array { - union { - struct { - int element_size; - int total_nr_elements; - int elems_per_part; - struct reciprocal_value reciprocal_elems; - struct flex_array_part *parts[]; - }; - /* - * This little trick makes sure that - * sizeof(flex_array) == PAGE_SIZE - */ - char padding[FLEX_ARRAY_BASE_SIZE]; - }; -}; - -/* Number of bytes left in base struct flex_array, excluding metadata */ -#define FLEX_ARRAY_BASE_BYTES_LEFT \ - (FLEX_ARRAY_BASE_SIZE - offsetof(struct flex_array, parts)) - -/* Number of pointers in base to struct flex_array_part pages */ -#define FLEX_ARRAY_NR_BASE_PTRS \ - (FLEX_ARRAY_BASE_BYTES_LEFT / sizeof(struct flex_array_part *)) - -/* Number of elements of size that fit in struct flex_array_part */ -#define FLEX_ARRAY_ELEMENTS_PER_PART(size) \ - (FLEX_ARRAY_PART_SIZE / size) - -/* - * Defines a statically allocated flex array and ensures its parameters are - * valid. - */ -#define DEFINE_FLEX_ARRAY(__arrayname, __element_size, __total) \ - struct flex_array __arrayname = { { { \ - .element_size = (__element_size), \ - .total_nr_elements = (__total), \ - } } }; \ - static inline void __arrayname##_invalid_parameter(void) \ - { \ - BUILD_BUG_ON((__total) > FLEX_ARRAY_NR_BASE_PTRS * \ - FLEX_ARRAY_ELEMENTS_PER_PART(__element_size)); \ - } - -/** - * flex_array_alloc() - Creates a flexible array. - * @element_size: individual object size. - * @total: maximum number of objects which can be stored. - * @flags: GFP flags - * - * Return: Returns an object of structure flex_array. - */ -struct flex_array *flex_array_alloc(int element_size, unsigned int total, - gfp_t flags); - -/** - * flex_array_prealloc() - Ensures that memory for the elements indexed in the - * range defined by start and nr_elements has been allocated. - * @fa: array to allocate memory to. - * @start: start address - * @nr_elements: number of elements to be allocated. - * @flags: GFP flags - * - */ -int flex_array_prealloc(struct flex_array *fa, unsigned int start, - unsigned int nr_elements, gfp_t flags); - -/** - * flex_array_free() - Removes all elements of a flexible array. - * @fa: array to be freed. - */ -void flex_array_free(struct flex_array *fa); - -/** - * flex_array_free_parts() - Removes all elements of a flexible array, but - * leaves the array itself in place. - * @fa: array to be emptied. - */ -void flex_array_free_parts(struct flex_array *fa); - -/** - * flex_array_put() - Stores data into a flexible array. - * @fa: array where element is to be stored. - * @element_nr: position to copy, must be less than the maximum specified when - * the array was created. - * @src: data source to be copied into the array. - * @flags: GFP flags - * - * Return: Returns zero on success, a negative error code otherwise. - */ -int flex_array_put(struct flex_array *fa, unsigned int element_nr, void *src, - gfp_t flags); - -/** - * flex_array_clear() - Clears an individual element in the array, sets the - * given element to FLEX_ARRAY_FREE. - * @element_nr: element position to clear. - * @fa: array to which element to be cleared belongs. - * - * Return: Returns zero on success, -EINVAL otherwise. - */ -int flex_array_clear(struct flex_array *fa, unsigned int element_nr); - -/** - * flex_array_get() - Retrieves data into a flexible array. - * - * @element_nr: Element position to retrieve data from. - * @fa: array from which data is to be retrieved. - * - * Return: Returns a pointer to the data element, or NULL if that - * particular element has never been allocated. - */ -void *flex_array_get(struct flex_array *fa, unsigned int element_nr); - -/** - * flex_array_shrink() - Reduces the allocated size of an array. - * @fa: array to shrink. - * - * Return: Returns number of pages of memory actually freed. - * - */ -int flex_array_shrink(struct flex_array *fa); - -#define flex_array_put_ptr(fa, nr, src, gfp) \ - flex_array_put(fa, nr, (void *)&(src), gfp) - -void *flex_array_get_ptr(struct flex_array *fa, unsigned int element_nr); - -#endif /* _FLEX_ARRAY_H */ diff --git a/include/linux/fs.h b/include/linux/fs.h index 80c6a4093b46..8b42df09b04c 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -64,6 +64,8 @@ struct workqueue_struct; struct iov_iter; struct fscrypt_info; struct fscrypt_operations; +struct fs_context; +struct fs_parameter_description; extern void __init inode_init(void); extern void __init inode_init_early(void); @@ -1349,6 +1351,7 @@ extern int send_sigurg(struct fown_struct *fown); /* These sb flags are internal to the kernel */ #define SB_SUBMOUNT (1<<26) +#define SB_FORCE (1<<27) #define SB_NOSEC (1<<28) #define SB_BORN (1<<29) #define SB_ACTIVE (1<<30) @@ -1459,7 +1462,7 @@ struct super_block { * Filesystem subtype. If non-empty the filesystem type field * in /proc/mounts will be "type.subtype" */ - char *s_subtype; + const char *s_subtype; const struct dentry_operations *s_d_op; /* default d_op for dentries */ @@ -2170,6 +2173,8 @@ struct file_system_type { #define FS_HAS_SUBTYPE 4 #define FS_USERNS_MOUNT 8 /* Can be mounted by userns root */ #define FS_RENAME_DOES_D_MOVE 32768 /* FS will handle d_move() during rename() internally. */ + int (*init_fs_context)(struct fs_context *); + const struct fs_parameter_description *parameters; struct dentry *(*mount) (struct file_system_type *, int, const char *, void *); void (*kill_sb) (struct super_block *); @@ -2225,8 +2230,12 @@ void kill_litter_super(struct super_block *sb); void deactivate_super(struct super_block *sb); void deactivate_locked_super(struct super_block *sb); int set_anon_super(struct super_block *s, void *data); +int set_anon_super_fc(struct super_block *s, struct fs_context *fc); int get_anon_bdev(dev_t *); void free_anon_bdev(dev_t); +struct super_block *sget_fc(struct fs_context *fc, + int (*test)(struct super_block *, struct fs_context *), + int (*set)(struct super_block *, struct fs_context *)); struct super_block *sget_userns(struct file_system_type *type, int (*test)(struct super_block *,void *), int (*set)(struct super_block *,void *), @@ -2269,8 +2278,7 @@ mount_pseudo(struct file_system_type *fs_type, char *name, extern int register_filesystem(struct file_system_type *); extern int unregister_filesystem(struct file_system_type *); -extern struct vfsmount *kern_mount_data(struct file_system_type *, void *data); -#define kern_mount(type) kern_mount_data(type, NULL) +extern struct vfsmount *kern_mount(struct file_system_type *); extern void kern_unmount(struct vfsmount *mnt); extern int may_umount_tree(struct vfsmount *); extern int may_umount(struct vfsmount *); diff --git a/include/linux/fs_context.h b/include/linux/fs_context.h new file mode 100644 index 000000000000..eaca452088fa --- /dev/null +++ b/include/linux/fs_context.h @@ -0,0 +1,188 @@ +/* Filesystem superblock creation and reconfiguration context. + * + * Copyright (C) 2018 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ + +#ifndef _LINUX_FS_CONTEXT_H +#define _LINUX_FS_CONTEXT_H + +#include <linux/kernel.h> +#include <linux/errno.h> +#include <linux/security.h> + +struct cred; +struct dentry; +struct file_operations; +struct file_system_type; +struct mnt_namespace; +struct net; +struct pid_namespace; +struct super_block; +struct user_namespace; +struct vfsmount; +struct path; + +enum fs_context_purpose { + FS_CONTEXT_FOR_MOUNT, /* New superblock for explicit mount */ + FS_CONTEXT_FOR_SUBMOUNT, /* New superblock for automatic submount */ + FS_CONTEXT_FOR_RECONFIGURE, /* Superblock reconfiguration (remount) */ +}; + +/* + * Type of parameter value. + */ +enum fs_value_type { + fs_value_is_undefined, + fs_value_is_flag, /* Value not given a value */ + fs_value_is_string, /* Value is a string */ + fs_value_is_blob, /* Value is a binary blob */ + fs_value_is_filename, /* Value is a filename* + dirfd */ + fs_value_is_filename_empty, /* Value is a filename* + dirfd + AT_EMPTY_PATH */ + fs_value_is_file, /* Value is a file* */ +}; + +/* + * Configuration parameter. + */ +struct fs_parameter { + const char *key; /* Parameter name */ + enum fs_value_type type:8; /* The type of value here */ + union { + char *string; + void *blob; + struct filename *name; + struct file *file; + }; + size_t size; + int dirfd; +}; + +/* + * Filesystem context for holding the parameters used in the creation or + * reconfiguration of a superblock. + * + * Superblock creation fills in ->root whereas reconfiguration begins with this + * already set. + * + * See Documentation/filesystems/mounting.txt + */ +struct fs_context { + const struct fs_context_operations *ops; + struct file_system_type *fs_type; + void *fs_private; /* The filesystem's context */ + struct dentry *root; /* The root and superblock */ + struct user_namespace *user_ns; /* The user namespace for this mount */ + struct net *net_ns; /* The network namespace for this mount */ + const struct cred *cred; /* The mounter's credentials */ + const char *source; /* The source name (eg. dev path) */ + const char *subtype; /* The subtype to set on the superblock */ + void *security; /* Linux S&M options */ + void *s_fs_info; /* Proposed s_fs_info */ + unsigned int sb_flags; /* Proposed superblock flags (SB_*) */ + unsigned int sb_flags_mask; /* Superblock flags that were changed */ + unsigned int lsm_flags; /* Information flags from the fs to the LSM */ + enum fs_context_purpose purpose:8; + bool need_free:1; /* Need to call ops->free() */ + bool global:1; /* Goes into &init_user_ns */ +}; + +struct fs_context_operations { + void (*free)(struct fs_context *fc); + int (*dup)(struct fs_context *fc, struct fs_context *src_fc); + int (*parse_param)(struct fs_context *fc, struct fs_parameter *param); + int (*parse_monolithic)(struct fs_context *fc, void *data); + int (*get_tree)(struct fs_context *fc); + int (*reconfigure)(struct fs_context *fc); +}; + +/* + * fs_context manipulation functions. + */ +extern struct fs_context *fs_context_for_mount(struct file_system_type *fs_type, + unsigned int sb_flags); +extern struct fs_context *fs_context_for_reconfigure(struct dentry *dentry, + unsigned int sb_flags, + unsigned int sb_flags_mask); +extern struct fs_context *fs_context_for_submount(struct file_system_type *fs_type, + struct dentry *reference); + +extern struct fs_context *vfs_dup_fs_context(struct fs_context *fc); +extern int vfs_parse_fs_param(struct fs_context *fc, struct fs_parameter *param); +extern int vfs_parse_fs_string(struct fs_context *fc, const char *key, + const char *value, size_t v_size); +extern int generic_parse_monolithic(struct fs_context *fc, void *data); +extern int vfs_get_tree(struct fs_context *fc); +extern void put_fs_context(struct fs_context *fc); + +/* + * sget() wrapper to be called from the ->get_tree() op. + */ +enum vfs_get_super_keying { + vfs_get_single_super, /* Only one such superblock may exist */ + vfs_get_keyed_super, /* Superblocks with different s_fs_info keys may exist */ + vfs_get_independent_super, /* Multiple independent superblocks may exist */ +}; +extern int vfs_get_super(struct fs_context *fc, + enum vfs_get_super_keying keying, + int (*fill_super)(struct super_block *sb, + struct fs_context *fc)); + +extern const struct file_operations fscontext_fops; + +#ifdef CONFIG_PRINTK +extern __attribute__((format(printf, 2, 3))) +void logfc(struct fs_context *fc, const char *fmt, ...); +#else +static inline __attribute__((format(printf, 2, 3))) +void logfc(struct fs_context *fc, const char *fmt, ...) +{ +} +#endif + +/** + * infof - Store supplementary informational message + * @fc: The context in which to log the informational message + * @fmt: The format string + * + * Store the supplementary informational message for the process if the process + * has enabled the facility. + */ +#define infof(fc, fmt, ...) ({ logfc(fc, "i "fmt, ## __VA_ARGS__); }) + +/** + * warnf - Store supplementary warning message + * @fc: The context in which to log the error message + * @fmt: The format string + * + * Store the supplementary warning message for the process if the process has + * enabled the facility. + */ +#define warnf(fc, fmt, ...) ({ logfc(fc, "w "fmt, ## __VA_ARGS__); }) + +/** + * errorf - Store supplementary error message + * @fc: The context in which to log the error message + * @fmt: The format string + * + * Store the supplementary error message for the process if the process has + * enabled the facility. + */ +#define errorf(fc, fmt, ...) ({ logfc(fc, "e "fmt, ## __VA_ARGS__); }) + +/** + * invalf - Store supplementary invalid argument error message + * @fc: The context in which to log the error message + * @fmt: The format string + * + * Store the supplementary error message for the process if the process has + * enabled the facility and return -EINVAL. + */ +#define invalf(fc, fmt, ...) ({ errorf(fc, fmt, ## __VA_ARGS__); -EINVAL; }) + +#endif /* _LINUX_FS_CONTEXT_H */ diff --git a/include/linux/fs_parser.h b/include/linux/fs_parser.h new file mode 100644 index 000000000000..d966f96ffe62 --- /dev/null +++ b/include/linux/fs_parser.h @@ -0,0 +1,151 @@ +/* Filesystem parameter description and parser + * + * Copyright (C) 2018 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ + +#ifndef _LINUX_FS_PARSER_H +#define _LINUX_FS_PARSER_H + +#include <linux/fs_context.h> + +struct path; + +struct constant_table { + const char *name; + int value; +}; + +/* + * The type of parameter expected. + */ +enum fs_parameter_type { + __fs_param_wasnt_defined, + fs_param_is_flag, + fs_param_is_bool, + fs_param_is_u32, + fs_param_is_u32_octal, + fs_param_is_u32_hex, + fs_param_is_s32, + fs_param_is_u64, + fs_param_is_enum, + fs_param_is_string, + fs_param_is_blob, + fs_param_is_blockdev, + fs_param_is_path, + fs_param_is_fd, + nr__fs_parameter_type, +}; + +/* + * Specification of the type of value a parameter wants. + * + * Note that the fsparam_flag(), fsparam_string(), fsparam_u32(), ... macros + * should be used to generate elements of this type. + */ +struct fs_parameter_spec { + const char *name; + u8 opt; /* Option number (returned by fs_parse()) */ + enum fs_parameter_type type:8; /* The desired parameter type */ + unsigned short flags; +#define fs_param_v_optional 0x0001 /* The value is optional */ +#define fs_param_neg_with_no 0x0002 /* "noxxx" is negative param */ +#define fs_param_neg_with_empty 0x0004 /* "xxx=" is negative param */ +#define fs_param_deprecated 0x0008 /* The param is deprecated */ +}; + +struct fs_parameter_enum { + u8 opt; /* Option number (as fs_parameter_spec::opt) */ + char name[14]; + u8 value; +}; + +struct fs_parameter_description { + const char name[16]; /* Name for logging purposes */ + const struct fs_parameter_spec *specs; /* List of param specifications */ + const struct fs_parameter_enum *enums; /* Enum values */ +}; + +/* + * Result of parse. + */ +struct fs_parse_result { + bool negated; /* T if param was "noxxx" */ + bool has_value; /* T if value supplied to param */ + union { + bool boolean; /* For spec_bool */ + int int_32; /* For spec_s32/spec_enum */ + unsigned int uint_32; /* For spec_u32{,_octal,_hex}/spec_enum */ + u64 uint_64; /* For spec_u64 */ + }; +}; + +extern int fs_parse(struct fs_context *fc, + const struct fs_parameter_description *desc, + struct fs_parameter *value, + struct fs_parse_result *result); +extern int fs_lookup_param(struct fs_context *fc, + struct fs_parameter *param, + bool want_bdev, + struct path *_path); + +extern int __lookup_constant(const struct constant_table tbl[], size_t tbl_size, + const char *name, int not_found); +#define lookup_constant(t, n, nf) __lookup_constant(t, ARRAY_SIZE(t), (n), (nf)) + +#ifdef CONFIG_VALIDATE_FS_PARSER +extern bool validate_constant_table(const struct constant_table *tbl, size_t tbl_size, + int low, int high, int special); +extern bool fs_validate_description(const struct fs_parameter_description *desc); +#else +static inline bool validate_constant_table(const struct constant_table *tbl, size_t tbl_size, + int low, int high, int special) +{ return true; } +static inline bool fs_validate_description(const struct fs_parameter_description *desc) +{ return true; } +#endif + +/* + * Parameter type, name, index and flags element constructors. Use as: + * + * fsparam_xxxx("foo", Opt_foo) + * + * If existing helpers are not enough, direct use of __fsparam() would + * work, but any such case is probably a sign that new helper is needed. + * Helpers will remain stable; low-level implementation may change. + */ +#define __fsparam(TYPE, NAME, OPT, FLAGS) \ + { \ + .name = NAME, \ + .opt = OPT, \ + .type = TYPE, \ + .flags = FLAGS \ + } + +#define fsparam_flag(NAME, OPT) __fsparam(fs_param_is_flag, NAME, OPT, 0) +#define fsparam_flag_no(NAME, OPT) \ + __fsparam(fs_param_is_flag, NAME, OPT, \ + fs_param_neg_with_no) +#define fsparam_bool(NAME, OPT) __fsparam(fs_param_is_bool, NAME, OPT, 0) +#define fsparam_u32(NAME, OPT) __fsparam(fs_param_is_u32, NAME, OPT, 0) +#define fsparam_u32oct(NAME, OPT) \ + __fsparam(fs_param_is_u32_octal, NAME, OPT, 0) +#define fsparam_u32hex(NAME, OPT) \ + __fsparam(fs_param_is_u32_hex, NAME, OPT, 0) +#define fsparam_s32(NAME, OPT) __fsparam(fs_param_is_s32, NAME, OPT, 0) +#define fsparam_u64(NAME, OPT) __fsparam(fs_param_is_u64, NAME, OPT, 0) +#define fsparam_enum(NAME, OPT) __fsparam(fs_param_is_enum, NAME, OPT, 0) +#define fsparam_string(NAME, OPT) \ + __fsparam(fs_param_is_string, NAME, OPT, 0) +#define fsparam_blob(NAME, OPT) __fsparam(fs_param_is_blob, NAME, OPT, 0) +#define fsparam_bdev(NAME, OPT) __fsparam(fs_param_is_blockdev, NAME, OPT, 0) +#define fsparam_path(NAME, OPT) __fsparam(fs_param_is_path, NAME, OPT, 0) +#define fsparam_fd(NAME, OPT) __fsparam(fs_param_is_fd, NAME, OPT, 0) + + +#endif /* _LINUX_FS_PARSER_H */ diff --git a/include/linux/generic-radix-tree.h b/include/linux/generic-radix-tree.h new file mode 100644 index 000000000000..3a91130a4fbd --- /dev/null +++ b/include/linux/generic-radix-tree.h @@ -0,0 +1,231 @@ +#ifndef _LINUX_GENERIC_RADIX_TREE_H +#define _LINUX_GENERIC_RADIX_TREE_H + +/** + * DOC: Generic radix trees/sparse arrays: + * + * Very simple and minimalistic, supporting arbitrary size entries up to + * PAGE_SIZE. + * + * A genradix is defined with the type it will store, like so: + * + * static GENRADIX(struct foo) foo_genradix; + * + * The main operations are: + * + * - genradix_init(radix) - initialize an empty genradix + * + * - genradix_free(radix) - free all memory owned by the genradix and + * reinitialize it + * + * - genradix_ptr(radix, idx) - gets a pointer to the entry at idx, returning + * NULL if that entry does not exist + * + * - genradix_ptr_alloc(radix, idx, gfp) - gets a pointer to an entry, + * allocating it if necessary + * + * - genradix_for_each(radix, iter, p) - iterate over each entry in a genradix + * + * The radix tree allocates one page of entries at a time, so entries may exist + * that were never explicitly allocated - they will be initialized to all + * zeroes. + * + * Internally, a genradix is just a radix tree of pages, and indexing works in + * terms of byte offsets. The wrappers in this header file use sizeof on the + * type the radix contains to calculate a byte offset from the index - see + * __idx_to_offset. + */ + +#include <asm/page.h> +#include <linux/bug.h> +#include <linux/kernel.h> +#include <linux/log2.h> + +struct genradix_root; + +struct __genradix { + struct genradix_root __rcu *root; +}; + +/* + * NOTE: currently, sizeof(_type) must not be larger than PAGE_SIZE: + */ + +#define __GENRADIX_INITIALIZER \ + { \ + .tree = { \ + .root = NULL, \ + } \ + } + +/* + * We use a 0 size array to stash the type we're storing without taking any + * space at runtime - then the various accessor macros can use typeof() to get + * to it for casts/sizeof - we also force the alignment so that storing a type + * with a ridiculous alignment doesn't blow up the alignment or size of the + * genradix. + */ + +#define GENRADIX(_type) \ +struct { \ + struct __genradix tree; \ + _type type[0] __aligned(1); \ +} + +#define DEFINE_GENRADIX(_name, _type) \ + GENRADIX(_type) _name = __GENRADIX_INITIALIZER + +/** + * genradix_init - initialize a genradix + * @_radix: genradix to initialize + * + * Does not fail + */ +#define genradix_init(_radix) \ +do { \ + *(_radix) = (typeof(*_radix)) __GENRADIX_INITIALIZER; \ +} while (0) + +void __genradix_free(struct __genradix *); + +/** + * genradix_free: free all memory owned by a genradix + * @_radix: the genradix to free + * + * After freeing, @_radix will be reinitialized and empty + */ +#define genradix_free(_radix) __genradix_free(&(_radix)->tree) + +static inline size_t __idx_to_offset(size_t idx, size_t obj_size) +{ + if (__builtin_constant_p(obj_size)) + BUILD_BUG_ON(obj_size > PAGE_SIZE); + else + BUG_ON(obj_size > PAGE_SIZE); + + if (!is_power_of_2(obj_size)) { + size_t objs_per_page = PAGE_SIZE / obj_size; + + return (idx / objs_per_page) * PAGE_SIZE + + (idx % objs_per_page) * obj_size; + } else { + return idx * obj_size; + } +} + +#define __genradix_cast(_radix) (typeof((_radix)->type[0]) *) +#define __genradix_obj_size(_radix) sizeof((_radix)->type[0]) +#define __genradix_idx_to_offset(_radix, _idx) \ + __idx_to_offset(_idx, __genradix_obj_size(_radix)) + +void *__genradix_ptr(struct __genradix *, size_t); + +/** + * genradix_ptr - get a pointer to a genradix entry + * @_radix: genradix to access + * @_idx: index to fetch + * + * Returns a pointer to entry at @_idx, or NULL if that entry does not exist. + */ +#define genradix_ptr(_radix, _idx) \ + (__genradix_cast(_radix) \ + __genradix_ptr(&(_radix)->tree, \ + __genradix_idx_to_offset(_radix, _idx))) + +void *__genradix_ptr_alloc(struct __genradix *, size_t, gfp_t); + +/** + * genradix_ptr_alloc - get a pointer to a genradix entry, allocating it + * if necessary + * @_radix: genradix to access + * @_idx: index to fetch + * @_gfp: gfp mask + * + * Returns a pointer to entry at @_idx, or NULL on allocation failure + */ +#define genradix_ptr_alloc(_radix, _idx, _gfp) \ + (__genradix_cast(_radix) \ + __genradix_ptr_alloc(&(_radix)->tree, \ + __genradix_idx_to_offset(_radix, _idx), \ + _gfp)) + +struct genradix_iter { + size_t offset; + size_t pos; +}; + +/** + * genradix_iter_init - initialize a genradix_iter + * @_radix: genradix that will be iterated over + * @_idx: index to start iterating from + */ +#define genradix_iter_init(_radix, _idx) \ + ((struct genradix_iter) { \ + .pos = (_idx), \ + .offset = __genradix_idx_to_offset((_radix), (_idx)),\ + }) + +void *__genradix_iter_peek(struct genradix_iter *, struct __genradix *, size_t); + +/** + * genradix_iter_peek - get first entry at or above iterator's current + * position + * @_iter: a genradix_iter + * @_radix: genradix being iterated over + * + * If no more entries exist at or above @_iter's current position, returns NULL + */ +#define genradix_iter_peek(_iter, _radix) \ + (__genradix_cast(_radix) \ + __genradix_iter_peek(_iter, &(_radix)->tree, \ + PAGE_SIZE / __genradix_obj_size(_radix))) + +static inline void __genradix_iter_advance(struct genradix_iter *iter, + size_t obj_size) +{ + iter->offset += obj_size; + + if (!is_power_of_2(obj_size) && + (iter->offset & (PAGE_SIZE - 1)) + obj_size > PAGE_SIZE) + iter->offset = round_up(iter->offset, PAGE_SIZE); + + iter->pos++; +} + +#define genradix_iter_advance(_iter, _radix) \ + __genradix_iter_advance(_iter, __genradix_obj_size(_radix)) + +#define genradix_for_each_from(_radix, _iter, _p, _start) \ + for (_iter = genradix_iter_init(_radix, _start); \ + (_p = genradix_iter_peek(&_iter, _radix)) != NULL; \ + genradix_iter_advance(&_iter, _radix)) + +/** + * genradix_for_each - iterate over entry in a genradix + * @_radix: genradix to iterate over + * @_iter: a genradix_iter to track current position + * @_p: pointer to genradix entry type + * + * On every iteration, @_p will point to the current entry, and @_iter.pos + * will be the current entry's index. + */ +#define genradix_for_each(_radix, _iter, _p) \ + genradix_for_each_from(_radix, _iter, _p, 0) + +int __genradix_prealloc(struct __genradix *, size_t, gfp_t); + +/** + * genradix_prealloc - preallocate entries in a generic radix tree + * @_radix: genradix to preallocate + * @_nr: number of entries to preallocate + * @_gfp: gfp mask + * + * Returns 0 on success, -ENOMEM on failure + */ +#define genradix_prealloc(_radix, _nr, _gfp) \ + __genradix_prealloc(&(_radix)->tree, \ + __genradix_idx_to_offset(_radix, _nr + 1),\ + _gfp) + + +#endif /* _LINUX_GENERIC_RADIX_TREE_H */ diff --git a/include/linux/hmm.h b/include/linux/hmm.h index 66f9ebbb1df3..ad50b7b4f141 100644 --- a/include/linux/hmm.h +++ b/include/linux/hmm.h @@ -468,7 +468,7 @@ struct hmm_devmem_ops { * Note that mmap semaphore is held in read mode at least when this * callback occurs, hence the vma is valid upon callback entry. */ - int (*fault)(struct hmm_devmem *devmem, + vm_fault_t (*fault)(struct hmm_devmem *devmem, struct vm_area_struct *vma, unsigned long addr, const struct page *page, @@ -511,7 +511,7 @@ struct hmm_devmem_ops { * chunk, as an optimization. It must, however, prioritize the faulting address * over all the others. */ -typedef int (*dev_page_fault_t)(struct vm_area_struct *vma, +typedef vm_fault_t (*dev_page_fault_t)(struct vm_area_struct *vma, unsigned long addr, const struct page *page, unsigned int flags, diff --git a/include/linux/igmp.h b/include/linux/igmp.h index cc85f4524dbf..9c94b2ea789c 100644 --- a/include/linux/igmp.h +++ b/include/linux/igmp.h @@ -110,7 +110,7 @@ struct ip_mc_list { static inline int ip_mc_may_pull(struct sk_buff *skb, unsigned int len) { if (skb_transport_offset(skb) + ip_transport_len(skb) < len) - return -EINVAL; + return 0; return pskb_may_pull(skb, len); } diff --git a/include/linux/ima.h b/include/linux/ima.h index b5e16b8c50b7..dc12fbcf484c 100644 --- a/include/linux/ima.h +++ b/include/linux/ima.h @@ -18,6 +18,7 @@ struct linux_binprm; #ifdef CONFIG_IMA extern int ima_bprm_check(struct linux_binprm *bprm); extern int ima_file_check(struct file *file, int mask); +extern void ima_post_create_tmpfile(struct inode *inode); extern void ima_file_free(struct file *file); extern int ima_file_mmap(struct file *file, unsigned long prot); extern int ima_load_data(enum kernel_load_data_id id); @@ -56,6 +57,10 @@ static inline int ima_file_check(struct file *file, int mask) return 0; } +static inline void ima_post_create_tmpfile(struct inode *inode) +{ +} + static inline void ima_file_free(struct file *file) { return; diff --git a/include/linux/input/ili210x.h b/include/linux/input/ili210x.h deleted file mode 100644 index b76e7c1404cd..000000000000 --- a/include/linux/input/ili210x.h +++ /dev/null @@ -1,11 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _ILI210X_H -#define _ILI210X_H - -struct ili210x_platform_data { - unsigned long irq_flags; - unsigned int poll_period; - bool (*get_pendown_state)(void); -}; - -#endif diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h index 0cac1207bb00..c8893f663470 100644 --- a/include/linux/kernfs.h +++ b/include/linux/kernfs.h @@ -26,7 +26,9 @@ struct vm_area_struct; struct super_block; struct file_system_type; struct poll_table_struct; +struct fs_context; +struct kernfs_fs_context; struct kernfs_open_node; struct kernfs_iattrs; @@ -168,7 +170,6 @@ struct kernfs_node { * kernfs_node parameter. */ struct kernfs_syscall_ops { - int (*remount_fs)(struct kernfs_root *root, int *flags, char *data); int (*show_options)(struct seq_file *sf, struct kernfs_root *root); int (*mkdir)(struct kernfs_node *parent, const char *name, @@ -272,6 +273,18 @@ struct kernfs_ops { #endif }; +/* + * The kernfs superblock creation/mount parameter context. + */ +struct kernfs_fs_context { + struct kernfs_root *root; /* Root of the hierarchy being mounted */ + void *ns_tag; /* Namespace tag of the mount (or NULL) */ + unsigned long magic; /* File system specific magic number */ + + /* The following are set/used by kernfs_mount() */ + bool new_sb_created; /* Set to T if we allocated a new sb */ +}; + #ifdef CONFIG_KERNFS static inline enum kernfs_node_type kernfs_type(struct kernfs_node *kn) @@ -359,11 +372,9 @@ __poll_t kernfs_generic_poll(struct kernfs_open_file *of, void kernfs_notify(struct kernfs_node *kn); const void *kernfs_super_ns(struct super_block *sb); -struct dentry *kernfs_mount_ns(struct file_system_type *fs_type, int flags, - struct kernfs_root *root, unsigned long magic, - bool *new_sb_created, const void *ns); +int kernfs_get_tree(struct fs_context *fc); +void kernfs_free_fs_context(struct fs_context *fc); void kernfs_kill_sb(struct super_block *sb); -struct super_block *kernfs_pin_sb(struct kernfs_root *root, const void *ns); void kernfs_init(void); @@ -465,11 +476,10 @@ static inline void kernfs_notify(struct kernfs_node *kn) { } static inline const void *kernfs_super_ns(struct super_block *sb) { return NULL; } -static inline struct dentry * -kernfs_mount_ns(struct file_system_type *fs_type, int flags, - struct kernfs_root *root, unsigned long magic, - bool *new_sb_created, const void *ns) -{ return ERR_PTR(-ENOSYS); } +static inline int kernfs_get_tree(struct fs_context *fc) +{ return -ENOSYS; } + +static inline void kernfs_free_fs_context(struct fs_context *fc) { } static inline void kernfs_kill_sb(struct super_block *sb) { } @@ -552,13 +562,4 @@ static inline int kernfs_rename(struct kernfs_node *kn, return kernfs_rename_ns(kn, new_parent, new_name, NULL); } -static inline struct dentry * -kernfs_mount(struct file_system_type *fs_type, int flags, - struct kernfs_root *root, unsigned long magic, - bool *new_sb_created) -{ - return kernfs_mount_ns(fs_type, flags, root, - magic, new_sb_created, NULL); -} - #endif /* __LINUX_KERNFS_H */ diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index c38cc5eb7e73..9d55c63db09b 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -48,6 +48,27 @@ */ #define KVM_MEMSLOT_INVALID (1UL << 16) +/* + * Bit 63 of the memslot generation number is an "update in-progress flag", + * e.g. is temporarily set for the duration of install_new_memslots(). + * This flag effectively creates a unique generation number that is used to + * mark cached memslot data, e.g. MMIO accesses, as potentially being stale, + * i.e. may (or may not) have come from the previous memslots generation. + * + * This is necessary because the actual memslots update is not atomic with + * respect to the generation number update. Updating the generation number + * first would allow a vCPU to cache a spte from the old memslots using the + * new generation number, and updating the generation number after switching + * to the new memslots would allow cache hits using the old generation number + * to reference the defunct memslots. + * + * This mechanism is used to prevent getting hits in KVM's caches while a + * memslot update is in-progress, and to prevent cache hits *after* updating + * the actual generation number against accesses that were inserted into the + * cache *before* the memslots were updated. + */ +#define KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS BIT_ULL(63) + /* Two fragments for cross MMIO pages. */ #define KVM_MAX_MMIO_FRAGMENTS 2 @@ -634,7 +655,7 @@ void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, struct kvm_memory_slot *dont); int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, unsigned long npages); -void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots); +void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen); int kvm_arch_prepare_memory_region(struct kvm *kvm, struct kvm_memory_slot *memslot, const struct kvm_userspace_memory_region *mem, @@ -1182,6 +1203,7 @@ extern bool kvm_rebooting; extern unsigned int halt_poll_ns; extern unsigned int halt_poll_ns_grow; +extern unsigned int halt_poll_ns_grow_start; extern unsigned int halt_poll_ns_shrink; struct kvm_device { diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h index ad609617aeb8..feb342d026f2 100644 --- a/include/linux/libnvdimm.h +++ b/include/linux/libnvdimm.h @@ -42,6 +42,8 @@ enum { NDD_SECURITY_OVERWRITE = 3, /* tracking whether or not there is a pending device reference */ NDD_WORK_PENDING = 4, + /* ignore / filter NSLABEL_FLAG_LOCAL for this DIMM, i.e. no aliasing */ + NDD_NOBLK = 5, /* need to set a limit somewhere, but yes, this is likely overkill */ ND_IOCTL_MAX_BUFLEN = SZ_4M, @@ -128,6 +130,7 @@ struct nd_region_desc { void *provider_data; int num_lanes; int numa_node; + int target_node; unsigned long flags; struct device_node *of_node; }; diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h index 85a301632cf1..a9b8ff578b6b 100644 --- a/include/linux/lsm_hooks.h +++ b/include/linux/lsm_hooks.h @@ -76,6 +76,22 @@ * changes on the process such as clearing out non-inheritable signal * state. This is called immediately after commit_creds(). * + * Security hooks for mount using fs_context. + * [See also Documentation/filesystems/mounting.txt] + * + * @fs_context_dup: + * Allocate and attach a security structure to sc->security. This pointer + * is initialised to NULL by the caller. + * @fc indicates the new filesystem context. + * @src_fc indicates the original filesystem context. + * @fs_context_parse_param: + * Userspace provided a parameter to configure a superblock. The LSM may + * reject it with an error and may use it for itself, in which case it + * should return 0; otherwise it should return -ENOPARAM to pass it on to + * the filesystem. + * @fc indicates the filesystem context. + * @param The parameter + * * Security hooks for filesystem operations. * * @sb_alloc_security: @@ -1460,6 +1476,9 @@ union security_list_options { void (*bprm_committing_creds)(struct linux_binprm *bprm); void (*bprm_committed_creds)(struct linux_binprm *bprm); + int (*fs_context_dup)(struct fs_context *fc, struct fs_context *src_sc); + int (*fs_context_parse_param)(struct fs_context *fc, struct fs_parameter *param); + int (*sb_alloc_security)(struct super_block *sb); void (*sb_free_security)(struct super_block *sb); void (*sb_free_mnt_opts)(void *mnt_opts); @@ -1800,6 +1819,8 @@ struct security_hook_heads { struct hlist_head bprm_check_security; struct hlist_head bprm_committing_creds; struct hlist_head bprm_committed_creds; + struct hlist_head fs_context_dup; + struct hlist_head fs_context_parse_param; struct hlist_head sb_alloc_security; struct hlist_head sb_free_security; struct hlist_head sb_free_mnt_opts; diff --git a/include/linux/mailbox/zynqmp-ipi-message.h b/include/linux/mailbox/zynqmp-ipi-message.h new file mode 100644 index 000000000000..9542b41eacfd --- /dev/null +++ b/include/linux/mailbox/zynqmp-ipi-message.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _LINUX_ZYNQMP_IPI_MESSAGE_H_ +#define _LINUX_ZYNQMP_IPI_MESSAGE_H_ + +/** + * struct zynqmp_ipi_message - ZynqMP IPI message structure + * @len: Length of message + * @data: message payload + * + * This is the structure for data used in mbox_send_message + * the maximum length of data buffer is fixed to 12 bytes. + * Client is supposed to be aware of this. + */ +struct zynqmp_ipi_message { + size_t len; + u8 data[0]; +}; + +#endif /* _LINUX_ZYNQMP_IPI_MESSAGE_H_ */ diff --git a/include/linux/memblock.h b/include/linux/memblock.h index 859b55b66db2..294d5d80e150 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h @@ -108,9 +108,6 @@ void memblock_discard(void); #define memblock_dbg(fmt, ...) \ if (memblock_debug) printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__) -phys_addr_t memblock_find_in_range_node(phys_addr_t size, phys_addr_t align, - phys_addr_t start, phys_addr_t end, - int nid, enum memblock_flags flags); phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end, phys_addr_t size, phys_addr_t align); void memblock_allow_resize(void); @@ -127,7 +124,6 @@ int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size); int memblock_mark_mirror(phys_addr_t base, phys_addr_t size); int memblock_mark_nomap(phys_addr_t base, phys_addr_t size); int memblock_clear_nomap(phys_addr_t base, phys_addr_t size); -enum memblock_flags choose_memblock_flags(void); unsigned long memblock_free_all(void); void reset_node_managed_pages(pg_data_t *pgdat); @@ -277,18 +273,6 @@ void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn, for_each_mem_range_rev(i, &memblock.memory, &memblock.reserved, \ nid, flags, p_start, p_end, p_nid) -static inline void memblock_set_region_flags(struct memblock_region *r, - enum memblock_flags flags) -{ - r->flags |= flags; -} - -static inline void memblock_clear_region_flags(struct memblock_region *r, - enum memblock_flags flags) -{ - r->flags &= ~flags; -} - #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP int memblock_set_node(phys_addr_t base, phys_addr_t size, struct memblock_type *type, int nid); @@ -325,17 +309,20 @@ static inline int memblock_get_region_node(const struct memblock_region *r) #define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL #endif -phys_addr_t memblock_phys_alloc_nid(phys_addr_t size, phys_addr_t align, int nid); +phys_addr_t memblock_phys_alloc_range(phys_addr_t size, phys_addr_t align, + phys_addr_t start, phys_addr_t end); phys_addr_t memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid); -phys_addr_t memblock_phys_alloc(phys_addr_t size, phys_addr_t align); +static inline phys_addr_t memblock_phys_alloc(phys_addr_t size, + phys_addr_t align) +{ + return memblock_phys_alloc_range(size, align, 0, + MEMBLOCK_ALLOC_ACCESSIBLE); +} void *memblock_alloc_try_nid_raw(phys_addr_t size, phys_addr_t align, phys_addr_t min_addr, phys_addr_t max_addr, int nid); -void *memblock_alloc_try_nid_nopanic(phys_addr_t size, phys_addr_t align, - phys_addr_t min_addr, phys_addr_t max_addr, - int nid); void *memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, phys_addr_t min_addr, phys_addr_t max_addr, int nid); @@ -362,36 +349,12 @@ static inline void * __init memblock_alloc_from(phys_addr_t size, MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE); } -static inline void * __init memblock_alloc_nopanic(phys_addr_t size, - phys_addr_t align) -{ - return memblock_alloc_try_nid_nopanic(size, align, MEMBLOCK_LOW_LIMIT, - MEMBLOCK_ALLOC_ACCESSIBLE, - NUMA_NO_NODE); -} - static inline void * __init memblock_alloc_low(phys_addr_t size, phys_addr_t align) { return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT, ARCH_LOW_ADDRESS_LIMIT, NUMA_NO_NODE); } -static inline void * __init memblock_alloc_low_nopanic(phys_addr_t size, - phys_addr_t align) -{ - return memblock_alloc_try_nid_nopanic(size, align, MEMBLOCK_LOW_LIMIT, - ARCH_LOW_ADDRESS_LIMIT, - NUMA_NO_NODE); -} - -static inline void * __init memblock_alloc_from_nopanic(phys_addr_t size, - phys_addr_t align, - phys_addr_t min_addr) -{ - return memblock_alloc_try_nid_nopanic(size, align, min_addr, - MEMBLOCK_ALLOC_ACCESSIBLE, - NUMA_NO_NODE); -} static inline void * __init memblock_alloc_node(phys_addr_t size, phys_addr_t align, int nid) @@ -400,14 +363,6 @@ static inline void * __init memblock_alloc_node(phys_addr_t size, MEMBLOCK_ALLOC_ACCESSIBLE, nid); } -static inline void * __init memblock_alloc_node_nopanic(phys_addr_t size, - int nid) -{ - return memblock_alloc_try_nid_nopanic(size, SMP_CACHE_BYTES, - MEMBLOCK_LOW_LIMIT, - MEMBLOCK_ALLOC_ACCESSIBLE, nid); -} - static inline void __init memblock_free_early(phys_addr_t base, phys_addr_t size) { @@ -443,16 +398,6 @@ static inline bool memblock_bottom_up(void) return memblock.bottom_up; } -phys_addr_t __init memblock_alloc_range(phys_addr_t size, phys_addr_t align, - phys_addr_t start, phys_addr_t end, - enum memblock_flags flags); -phys_addr_t memblock_alloc_base_nid(phys_addr_t size, - phys_addr_t align, phys_addr_t max_addr, - int nid, enum memblock_flags flags); -phys_addr_t memblock_alloc_base(phys_addr_t size, phys_addr_t align, - phys_addr_t max_addr); -phys_addr_t __memblock_alloc_base(phys_addr_t size, phys_addr_t align, - phys_addr_t max_addr); phys_addr_t memblock_phys_mem_size(void); phys_addr_t memblock_reserved_size(void); phys_addr_t memblock_mem_size(unsigned long limit_pfn); diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index 52869d6d38b3..8ade08c50d26 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -100,6 +100,8 @@ extern void __online_page_free(struct page *page); extern int try_online_node(int nid); +extern u64 max_mem_size; + extern bool memhp_auto_online; /* If movable_node boot option specified */ extern bool movable_node_enabled; diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h index b26ea9077384..0343c81d4c5f 100644 --- a/include/linux/mlx5/qp.h +++ b/include/linux/mlx5/qp.h @@ -557,7 +557,8 @@ static inline struct mlx5_core_mkey *__mlx5_mr_lookup(struct mlx5_core_dev *dev, int mlx5_core_create_dct(struct mlx5_core_dev *dev, struct mlx5_core_dct *qp, - u32 *in, int inlen); + u32 *in, int inlen, + u32 *out, int outlen); int mlx5_core_create_qp(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp, u32 *in, diff --git a/include/linux/mm.h b/include/linux/mm.h index 5801ee849f36..76769749b5a5 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -26,6 +26,7 @@ #include <linux/page_ref.h> #include <linux/memremap.h> #include <linux/overflow.h> +#include <linux/sizes.h> struct mempolicy; struct anon_vma; @@ -2402,8 +2403,7 @@ int __must_check write_one_page(struct page *page); void task_dirty_inc(struct task_struct *tsk); /* readahead.c */ -#define VM_MAX_READAHEAD 128 /* kbytes */ -#define VM_MIN_READAHEAD 16 /* kbytes (includes current page) */ +#define VM_READAHEAD_PAGES (SZ_128K / PAGE_SIZE) int force_page_cache_readahead(struct address_space *mapping, struct file *filp, pgoff_t offset, unsigned long nr_to_read); diff --git a/include/linux/mount.h b/include/linux/mount.h index 037eed52164b..9197ddbf35fb 100644 --- a/include/linux/mount.h +++ b/include/linux/mount.h @@ -21,6 +21,7 @@ struct super_block; struct vfsmount; struct dentry; struct mnt_namespace; +struct fs_context; #define MNT_NOSUID 0x01 #define MNT_NODEV 0x02 @@ -88,6 +89,8 @@ struct path; extern struct vfsmount *clone_private_mount(const struct path *path); struct file_system_type; +extern struct vfsmount *fc_mount(struct fs_context *fc); +extern struct vfsmount *vfs_create_mount(struct fs_context *fc); extern struct vfsmount *vfs_kern_mount(struct file_system_type *type, int flags, const char *name, void *data); diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h index 1b06f0b28453..22494d170619 100644 --- a/include/linux/nfs4.h +++ b/include/linux/nfs4.h @@ -538,6 +538,7 @@ enum { NFSPROC4_CLNT_OFFLOAD_CANCEL, NFSPROC4_CLNT_LOOKUPP, + NFSPROC4_CLNT_LAYOUTERROR, }; /* nfs41 types */ diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h index 6aa8cc83c3b6..c827d31298cc 100644 --- a/include/linux/nfs_fs_sb.h +++ b/include/linux/nfs_fs_sb.h @@ -261,5 +261,6 @@ struct nfs_server { #define NFS_CAP_CLONE (1U << 23) #define NFS_CAP_COPY (1U << 24) #define NFS_CAP_OFFLOAD_CANCEL (1U << 25) +#define NFS_CAP_LAYOUTERROR (1U << 26) #endif diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h index e27572d30d97..ad69430fd0eb 100644 --- a/include/linux/nfs_page.h +++ b/include/linux/nfs_page.h @@ -164,6 +164,16 @@ nfs_list_add_request(struct nfs_page *req, struct list_head *head) list_add_tail(&req->wb_list, head); } +/** + * nfs_list_move_request - Move a request to a new list + * @req: request + * @head: head of list into which to insert the request. + */ +static inline void +nfs_list_move_request(struct nfs_page *req, struct list_head *head) +{ + list_move_tail(&req->wb_list, head); +} /** * nfs_list_remove_request - Remove a request from its wb_list diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 441a93ebcac0..9b8324ec08f3 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -383,6 +383,41 @@ struct nfs42_layoutstat_data { struct nfs42_layoutstat_res res; }; +struct nfs42_device_error { + struct nfs4_deviceid dev_id; + int status; + enum nfs_opnum4 opnum; +}; + +struct nfs42_layout_error { + __u64 offset; + __u64 length; + nfs4_stateid stateid; + struct nfs42_device_error errors[1]; +}; + +#define NFS42_LAYOUTERROR_MAX 5 + +struct nfs42_layouterror_args { + struct nfs4_sequence_args seq_args; + struct inode *inode; + unsigned int num_errors; + struct nfs42_layout_error errors[NFS42_LAYOUTERROR_MAX]; +}; + +struct nfs42_layouterror_res { + struct nfs4_sequence_res seq_res; + unsigned int num_errors; + int rpc_status; +}; + +struct nfs42_layouterror_data { + struct nfs42_layouterror_args args; + struct nfs42_layouterror_res res; + struct inode *inode; + struct pnfs_layout_segment *lseg; +}; + struct nfs42_clone_args { struct nfs4_sequence_args seq_args; struct nfs_fh *src_fh; @@ -1549,7 +1584,7 @@ struct nfs_commit_data { }; struct nfs_pgio_completion_ops { - void (*error_cleanup)(struct list_head *head); + void (*error_cleanup)(struct list_head *head, int); void (*init_hdr)(struct nfs_pgio_header *hdr); void (*completion)(struct nfs_pgio_header *hdr); void (*reschedule_io)(struct nfs_pgio_header *hdr); diff --git a/include/linux/ntb.h b/include/linux/ntb.h index 181d16601dd9..56a92e3ae3ae 100644 --- a/include/linux/ntb.h +++ b/include/linux/ntb.h @@ -296,7 +296,8 @@ struct ntb_dev_ops { int (*db_clear_mask)(struct ntb_dev *ntb, u64 db_bits); int (*peer_db_addr)(struct ntb_dev *ntb, - phys_addr_t *db_addr, resource_size_t *db_size); + phys_addr_t *db_addr, resource_size_t *db_size, + u64 *db_data, int db_bit); u64 (*peer_db_read)(struct ntb_dev *ntb); int (*peer_db_set)(struct ntb_dev *ntb, u64 db_bits); int (*peer_db_clear)(struct ntb_dev *ntb, u64 db_bits); @@ -1078,6 +1079,8 @@ static inline int ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits) * @ntb: NTB device context. * @db_addr: OUT - The address of the peer doorbell register. * @db_size: OUT - The number of bytes to write the peer doorbell register. + * @db_data: OUT - The data of peer doorbell register + * @db_bit: door bell bit number * * Return the address of the peer doorbell register. This may be used, for * example, by drivers that offload memory copy operations to a dma engine. @@ -1091,12 +1094,13 @@ static inline int ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits) */ static inline int ntb_peer_db_addr(struct ntb_dev *ntb, phys_addr_t *db_addr, - resource_size_t *db_size) + resource_size_t *db_size, + u64 *db_data, int db_bit) { if (!ntb->ops->peer_db_addr) return -EINVAL; - return ntb->ops->peer_db_addr(ntb, db_addr, db_size); + return ntb->ops->peer_db_addr(ntb, db_addr, db_size, db_data, db_bit); } /** diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index b477a70cc2e4..bcf909d0de5f 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -239,6 +239,7 @@ pgoff_t page_cache_prev_miss(struct address_space *mapping, #define FGP_WRITE 0x00000008 #define FGP_NOFS 0x00000010 #define FGP_NOWAIT 0x00000020 +#define FGP_FOR_MMAP 0x00000040 struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset, int fgp_flags, gfp_t cache_gfp_mask); diff --git a/include/linux/pinctrl/pinconf.h b/include/linux/pinctrl/pinconf.h index 8dd85d302b90..93c9dd133e9d 100644 --- a/include/linux/pinctrl/pinconf.h +++ b/include/linux/pinctrl/pinconf.h @@ -14,8 +14,6 @@ #ifdef CONFIG_PINCONF -#include <linux/pinctrl/machine.h> - struct pinctrl_dev; struct seq_file; @@ -31,7 +29,6 @@ struct seq_file; * @pin_config_group_get: get configurations for an entire pin group; should * return -ENOTSUPP and -EINVAL using the same rules as pin_config_get. * @pin_config_group_set: configure all pins in a group - * @pin_config_dbg_parse_modify: optional debugfs to modify a pin configuration * @pin_config_dbg_show: optional debugfs display hook that will provide * per-device info for a certain pin in debugfs * @pin_config_group_dbg_show: optional debugfs display hook that will provide @@ -57,9 +54,6 @@ struct pinconf_ops { unsigned selector, unsigned long *configs, unsigned num_configs); - int (*pin_config_dbg_parse_modify) (struct pinctrl_dev *pctldev, - const char *arg, - unsigned long *config); void (*pin_config_dbg_show) (struct pinctrl_dev *pctldev, struct seq_file *s, unsigned offset); diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h index 5a3bb3b7c9ad..787d224ff43e 100644 --- a/include/linux/pipe_fs_i.h +++ b/include/linux/pipe_fs_i.h @@ -74,13 +74,6 @@ struct pipe_inode_info { */ struct pipe_buf_operations { /* - * This is set to 1, if the generic pipe read/write may coalesce - * data into an existing buffer. If this is set to 0, a new pipe - * page segment is always used for new data. - */ - int can_merge; - - /* * ->confirm() verifies that the data in the pipe buffer is there * and that the contents are good. If the pages in the pipe belong * to a file system, we may need to wait for IO completion in this @@ -182,6 +175,7 @@ void generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *); int generic_pipe_buf_confirm(struct pipe_inode_info *, struct pipe_buffer *); int generic_pipe_buf_steal(struct pipe_inode_info *, struct pipe_buffer *); void generic_pipe_buf_release(struct pipe_inode_info *, struct pipe_buffer *); +void pipe_buf_mark_unmergeable(struct pipe_buffer *buf); extern const struct pipe_buf_operations nosteal_pipe_buf_ops; diff --git a/include/linux/platform_data/dma-dw.h b/include/linux/platform_data/dma-dw.h index 1a1d58ebffbf..f3eaf9ec00a1 100644 --- a/include/linux/platform_data/dma-dw.h +++ b/include/linux/platform_data/dma-dw.h @@ -1,12 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* * Driver for the Synopsys DesignWare DMA Controller * * Copyright (C) 2007 Atmel Corporation * Copyright (C) 2010-2011 ST Microelectronics - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #ifndef _PLATFORM_DATA_DMA_DW_H #define _PLATFORM_DATA_DMA_DW_H @@ -38,10 +35,6 @@ struct dw_dma_slave { /** * struct dw_dma_platform_data - Controller configuration parameters * @nr_channels: Number of channels supported by hardware (max 8) - * @is_private: The device channels should be marked as private and not for - * by the general purpose DMA channel allocator. - * @is_memcpy: The device channels do support memory-to-memory transfers. - * @is_idma32: The type of the DMA controller is iDMA32 * @chan_allocation_order: Allocate channels starting from 0 or 7 * @chan_priority: Set channel priority increasing from 0 to 7 or 7 to 0. * @block_size: Maximum block size supported by the controller @@ -53,9 +46,6 @@ struct dw_dma_slave { */ struct dw_dma_platform_data { unsigned int nr_channels; - bool is_private; - bool is_memcpy; - bool is_idma32; #define CHAN_ALLOCATION_ASCENDING 0 /* zero to seven */ #define CHAN_ALLOCATION_DESCENDING 1 /* seven to zero */ unsigned char chan_allocation_order; diff --git a/include/linux/platform_data/dma-imx.h b/include/linux/platform_data/dma-imx.h index 7d964e787299..9daea8d42a10 100644 --- a/include/linux/platform_data/dma-imx.h +++ b/include/linux/platform_data/dma-imx.h @@ -55,6 +55,7 @@ struct imx_dma_data { int dma_request2; /* secondary DMA request line */ enum sdma_peripheral_type peripheral_type; int priority; + struct device_node *of_node; }; static inline int imx_dma_is_ipu(struct dma_chan *chan) diff --git a/include/linux/platform_data/mlxreg.h b/include/linux/platform_data/mlxreg.h index 1b2f86f96743..6d54fe3bcac9 100644 --- a/include/linux/platform_data/mlxreg.h +++ b/include/linux/platform_data/mlxreg.h @@ -35,6 +35,19 @@ #define __LINUX_PLATFORM_DATA_MLXREG_H #define MLXREG_CORE_LABEL_MAX_SIZE 32 +#define MLXREG_CORE_WD_FEATURE_NOWAYOUT BIT(0) +#define MLXREG_CORE_WD_FEATURE_START_AT_BOOT BIT(1) + +/** + * enum mlxreg_wdt_type - type of HW watchdog + * + * TYPE1 HW watchdog implementation exist in old systems. + * All new systems have TYPE2 HW watchdog. + */ +enum mlxreg_wdt_type { + MLX_WDT_TYPE1, + MLX_WDT_TYPE2, +}; /** * struct mlxreg_hotplug_device - I2C device data: @@ -112,11 +125,17 @@ struct mlxreg_core_item { * @data: instance private data; * @regmap: register map of parent device; * @counter: number of instances; + * @features: supported features of device; + * @version: implementation version; + * @identity: device identity name; */ struct mlxreg_core_platform_data { struct mlxreg_core_data *data; void *regmap; int counter; + u32 features; + u32 version; + char identity[MLXREG_CORE_LABEL_MAX_SIZE]; }; /** diff --git a/include/linux/platform_data/wilco-ec.h b/include/linux/platform_data/wilco-ec.h new file mode 100644 index 000000000000..446473a46b88 --- /dev/null +++ b/include/linux/platform_data/wilco-ec.h @@ -0,0 +1,144 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * ChromeOS Wilco Embedded Controller + * + * Copyright 2018 Google LLC + */ + +#ifndef WILCO_EC_H +#define WILCO_EC_H + +#include <linux/device.h> +#include <linux/kernel.h> + +/* Message flags for using the mailbox() interface */ +#define WILCO_EC_FLAG_NO_RESPONSE BIT(0) /* EC does not respond */ +#define WILCO_EC_FLAG_EXTENDED_DATA BIT(1) /* EC returns 256 data bytes */ +#define WILCO_EC_FLAG_RAW_REQUEST BIT(2) /* Do not trim request data */ +#define WILCO_EC_FLAG_RAW_RESPONSE BIT(3) /* Do not trim response data */ +#define WILCO_EC_FLAG_RAW (WILCO_EC_FLAG_RAW_REQUEST | \ + WILCO_EC_FLAG_RAW_RESPONSE) + +/* Normal commands have a maximum 32 bytes of data */ +#define EC_MAILBOX_DATA_SIZE 32 +/* Extended commands have 256 bytes of response data */ +#define EC_MAILBOX_DATA_SIZE_EXTENDED 256 + +/** + * struct wilco_ec_device - Wilco Embedded Controller handle. + * @dev: Device handle. + * @mailbox_lock: Mutex to ensure one mailbox command at a time. + * @io_command: I/O port for mailbox command. Provided by ACPI. + * @io_data: I/O port for mailbox data. Provided by ACPI. + * @io_packet: I/O port for mailbox packet data. Provided by ACPI. + * @data_buffer: Buffer used for EC communication. The same buffer + * is used to hold the request and the response. + * @data_size: Size of the data buffer used for EC communication. + * @debugfs_pdev: The child platform_device used by the debugfs sub-driver. + * @rtc_pdev: The child platform_device used by the RTC sub-driver. + */ +struct wilco_ec_device { + struct device *dev; + struct mutex mailbox_lock; + struct resource *io_command; + struct resource *io_data; + struct resource *io_packet; + void *data_buffer; + size_t data_size; + struct platform_device *debugfs_pdev; + struct platform_device *rtc_pdev; +}; + +/** + * struct wilco_ec_request - Mailbox request message format. + * @struct_version: Should be %EC_MAILBOX_PROTO_VERSION + * @checksum: Sum of all bytes must be 0. + * @mailbox_id: Mailbox identifier, specifies the command set. + * @mailbox_version: Mailbox interface version %EC_MAILBOX_VERSION + * @reserved: Set to zero. + * @data_size: Length of request, data + last 2 bytes of the header. + * @command: Mailbox command code, unique for each mailbox_id set. + * @reserved_raw: Set to zero for most commands, but is used by + * some command types and for raw commands. + */ +struct wilco_ec_request { + u8 struct_version; + u8 checksum; + u16 mailbox_id; + u8 mailbox_version; + u8 reserved; + u16 data_size; + u8 command; + u8 reserved_raw; +} __packed; + +/** + * struct wilco_ec_response - Mailbox response message format. + * @struct_version: Should be %EC_MAILBOX_PROTO_VERSION + * @checksum: Sum of all bytes must be 0. + * @result: Result code from the EC. Non-zero indicates an error. + * @data_size: Length of the response data buffer. + * @reserved: Set to zero. + * @mbox0: EC returned data at offset 0 is unused (always 0) so this byte + * is treated as part of the header instead of the data. + * @data: Response data buffer. Max size is %EC_MAILBOX_DATA_SIZE_EXTENDED. + */ +struct wilco_ec_response { + u8 struct_version; + u8 checksum; + u16 result; + u16 data_size; + u8 reserved[2]; + u8 mbox0; + u8 data[0]; +} __packed; + +/** + * enum wilco_ec_msg_type - Message type to select a set of command codes. + * @WILCO_EC_MSG_LEGACY: Legacy EC messages for standard EC behavior. + * @WILCO_EC_MSG_PROPERTY: Get/Set/Sync EC controlled NVRAM property. + * @WILCO_EC_MSG_TELEMETRY_SHORT: 32 bytes of telemetry data provided by the EC. + * @WILCO_EC_MSG_TELEMETRY_LONG: 256 bytes of telemetry data provided by the EC. + */ +enum wilco_ec_msg_type { + WILCO_EC_MSG_LEGACY = 0x00f0, + WILCO_EC_MSG_PROPERTY = 0x00f2, + WILCO_EC_MSG_TELEMETRY_SHORT = 0x00f5, + WILCO_EC_MSG_TELEMETRY_LONG = 0x00f6, +}; + +/** + * struct wilco_ec_message - Request and response message. + * @type: Mailbox message type. + * @flags: Message flags, e.g. %WILCO_EC_FLAG_NO_RESPONSE. + * @command: Mailbox command code. + * @result: Result code from the EC. Non-zero indicates an error. + * @request_size: Number of bytes to send to the EC. + * @request_data: Buffer containing the request data. + * @response_size: Number of bytes expected from the EC. + * This is 32 by default and 256 if the flag + * is set for %WILCO_EC_FLAG_EXTENDED_DATA + * @response_data: Buffer containing the response data, should be + * response_size bytes and allocated by caller. + */ +struct wilco_ec_message { + enum wilco_ec_msg_type type; + u8 flags; + u8 command; + u8 result; + size_t request_size; + void *request_data; + size_t response_size; + void *response_data; +}; + +/** + * wilco_ec_mailbox() - Send request to the EC and receive the response. + * @ec: Wilco EC device. + * @msg: Wilco EC message. + * + * Return: Number of bytes received or negative error code on failure. + */ +int wilco_ec_mailbox(struct wilco_ec_device *ec, struct wilco_ec_message *msg); + +#endif /* WILCO_EC_H */ diff --git a/include/linux/platform_data/clk-lpss.h b/include/linux/platform_data/x86/clk-lpss.h index 23901992b9dd..23901992b9dd 100644 --- a/include/linux/platform_data/clk-lpss.h +++ b/include/linux/platform_data/x86/clk-lpss.h diff --git a/include/linux/pm.h b/include/linux/pm.h index 06f7ed893928..66c19a65a514 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h @@ -643,7 +643,6 @@ struct dev_pm_info { struct dev_pm_qos *qos; }; -extern void update_pm_runtime_accounting(struct device *dev); extern int dev_pm_get_subsys_data(struct device *dev); extern void dev_pm_put_subsys_data(struct device *dev); diff --git a/include/linux/pm_wakeup.h b/include/linux/pm_wakeup.h index 4238dde0aaf0..0ff134d6575a 100644 --- a/include/linux/pm_wakeup.h +++ b/include/linux/pm_wakeup.h @@ -96,7 +96,6 @@ static inline void device_set_wakeup_path(struct device *dev) /* drivers/base/power/wakeup.c */ extern void wakeup_source_prepare(struct wakeup_source *ws, const char *name); extern struct wakeup_source *wakeup_source_create(const char *name); -extern void wakeup_source_drop(struct wakeup_source *ws); extern void wakeup_source_destroy(struct wakeup_source *ws); extern void wakeup_source_add(struct wakeup_source *ws); extern void wakeup_source_remove(struct wakeup_source *ws); @@ -134,8 +133,6 @@ static inline struct wakeup_source *wakeup_source_create(const char *name) return NULL; } -static inline void wakeup_source_drop(struct wakeup_source *ws) {} - static inline void wakeup_source_destroy(struct wakeup_source *ws) {} static inline void wakeup_source_add(struct wakeup_source *ws) {} @@ -204,12 +201,6 @@ static inline void wakeup_source_init(struct wakeup_source *ws, wakeup_source_add(ws); } -static inline void wakeup_source_trash(struct wakeup_source *ws) -{ - wakeup_source_remove(ws); - wakeup_source_drop(ws); -} - static inline void __pm_wakeup_event(struct wakeup_source *ws, unsigned int msec) { return pm_wakeup_ws_event(ws, msec, false); diff --git a/include/linux/poison.h b/include/linux/poison.h index 5046bad0c1c5..d6d980a681c7 100644 --- a/include/linux/poison.h +++ b/include/linux/poison.h @@ -83,9 +83,6 @@ #define MUTEX_DEBUG_FREE 0x22 #define MUTEX_POISON_WW_CTX ((void *) 0x500 + POISON_POINTER_DELTA) -/********** lib/flex_array.c **********/ -#define FLEX_ARRAY_FREE 0x6c /* for use-after-free poisoning */ - /********** security/ **********/ #define KEY_DESTROY 0xbd diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h index d0e1f1522a78..52a283ba0465 100644 --- a/include/linux/proc_fs.h +++ b/include/linux/proc_fs.h @@ -73,6 +73,7 @@ struct proc_dir_entry *proc_create_net_single_write(const char *name, umode_t mo int (*show)(struct seq_file *, void *), proc_write_t write, void *data); +extern struct pid *tgid_pidfd_to_pid(const struct file *file); #else /* CONFIG_PROC_FS */ @@ -114,6 +115,11 @@ static inline int remove_proc_subtree(const char *name, struct proc_dir_entry *p #define proc_create_net(name, mode, parent, state_size, ops) ({NULL;}) #define proc_create_net_single(name, mode, parent, show, data) ({NULL;}) +static inline struct pid *tgid_pidfd_to_pid(const struct file *file) +{ + return ERR_PTR(-EBADF); +} + #endif /* CONFIG_PROC_FS */ struct net; diff --git a/include/linux/pwm.h b/include/linux/pwm.h index d5199b507d79..b628abfffacc 100644 --- a/include/linux/pwm.h +++ b/include/linux/pwm.h @@ -242,11 +242,7 @@ pwm_set_relative_duty_cycle(struct pwm_state *state, unsigned int duty_cycle, * struct pwm_ops - PWM controller operations * @request: optional hook for requesting a PWM * @free: optional hook for freeing a PWM - * @config: configure duty cycles and period length for this PWM - * @set_polarity: configure the polarity of this PWM * @capture: capture and report PWM signal - * @enable: enable PWM output toggling - * @disable: disable PWM output toggling * @apply: atomically apply a new PWM config. The state argument * should be adjusted with the real hardware config (if the * approximate the period or duty_cycle value, state should @@ -254,53 +250,56 @@ pwm_set_relative_duty_cycle(struct pwm_state *state, unsigned int duty_cycle, * @get_state: get the current PWM state. This function is only * called once per PWM device when the PWM chip is * registered. - * @dbg_show: optional routine to show contents in debugfs * @owner: helps prevent removal of modules exporting active PWMs + * @config: configure duty cycles and period length for this PWM + * @set_polarity: configure the polarity of this PWM + * @enable: enable PWM output toggling + * @disable: disable PWM output toggling */ struct pwm_ops { int (*request)(struct pwm_chip *chip, struct pwm_device *pwm); void (*free)(struct pwm_chip *chip, struct pwm_device *pwm); - int (*config)(struct pwm_chip *chip, struct pwm_device *pwm, - int duty_ns, int period_ns); - int (*set_polarity)(struct pwm_chip *chip, struct pwm_device *pwm, - enum pwm_polarity polarity); int (*capture)(struct pwm_chip *chip, struct pwm_device *pwm, struct pwm_capture *result, unsigned long timeout); - int (*enable)(struct pwm_chip *chip, struct pwm_device *pwm); - void (*disable)(struct pwm_chip *chip, struct pwm_device *pwm); int (*apply)(struct pwm_chip *chip, struct pwm_device *pwm, struct pwm_state *state); void (*get_state)(struct pwm_chip *chip, struct pwm_device *pwm, struct pwm_state *state); -#ifdef CONFIG_DEBUG_FS - void (*dbg_show)(struct pwm_chip *chip, struct seq_file *s); -#endif struct module *owner; + + /* Only used by legacy drivers */ + int (*config)(struct pwm_chip *chip, struct pwm_device *pwm, + int duty_ns, int period_ns); + int (*set_polarity)(struct pwm_chip *chip, struct pwm_device *pwm, + enum pwm_polarity polarity); + int (*enable)(struct pwm_chip *chip, struct pwm_device *pwm); + void (*disable)(struct pwm_chip *chip, struct pwm_device *pwm); }; /** * struct pwm_chip - abstract a PWM controller * @dev: device providing the PWMs - * @list: list node for internal use * @ops: callbacks for this PWM controller * @base: number of first PWM controlled by this chip * @npwm: number of PWMs controlled by this chip - * @pwms: array of PWM devices allocated by the framework * @of_xlate: request a PWM device given a device tree PWM specifier * @of_pwm_n_cells: number of cells expected in the device tree PWM specifier + * @list: list node for internal use + * @pwms: array of PWM devices allocated by the framework */ struct pwm_chip { struct device *dev; - struct list_head list; const struct pwm_ops *ops; int base; unsigned int npwm; - struct pwm_device *pwms; - struct pwm_device * (*of_xlate)(struct pwm_chip *pc, const struct of_phandle_args *args); unsigned int of_pwm_n_cells; + + /* only used internally by the PWM framework */ + struct list_head list; + struct pwm_device *pwms; }; /** diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h index 507a2b524208..04d04709f2bd 100644 --- a/include/linux/remoteproc.h +++ b/include/linux/remoteproc.h @@ -345,9 +345,9 @@ struct firmware; * @stop: power off the device * @kick: kick a virtqueue (virtqueue id given as a parameter) * @da_to_va: optional platform hook to perform address translations - * @load_rsc_table: load resource table from firmware image + * @parse_fw: parse firmware to extract information (e.g. resource table) * @find_loaded_rsc_table: find the loaded resouce table - * @load: load firmeware to memory, where the remote processor + * @load: load firmware to memory, where the remote processor * expects to find it * @sanity_check: sanity check the fw image * @get_boot_addr: get boot address to entry point specified in firmware @@ -554,11 +554,11 @@ struct rproc_vdev { struct kref refcount; struct rproc_subdev subdev; + struct device dev; unsigned int id; struct list_head node; struct rproc *rproc; - struct virtio_device vdev; struct rproc_vring vring[RVDEV_NUM_VRINGS]; u32 rsc_offset; u32 index; @@ -601,7 +601,7 @@ int rproc_coredump_add_custom_segment(struct rproc *rproc, static inline struct rproc_vdev *vdev_to_rvdev(struct virtio_device *vdev) { - return container_of(vdev, struct rproc_vdev, vdev); + return container_of(vdev->dev.parent, struct rproc_vdev, dev); } static inline struct rproc *vdev_to_rproc(struct virtio_device *vdev) diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h index 5b9ae62272bb..1a40277b512c 100644 --- a/include/linux/ring_buffer.h +++ b/include/linux/ring_buffer.h @@ -128,7 +128,7 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts, unsigned long *lost_events); struct ring_buffer_iter * -ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu); +ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu, gfp_t flags); void ring_buffer_read_prepare_sync(void); void ring_buffer_read_start(struct ring_buffer_iter *iter); void ring_buffer_read_finish(struct ring_buffer_iter *iter); @@ -187,8 +187,6 @@ void ring_buffer_set_clock(struct ring_buffer *buffer, void ring_buffer_set_time_stamp_abs(struct ring_buffer *buffer, bool abs); bool ring_buffer_time_stamp_abs(struct ring_buffer *buffer); -size_t ring_buffer_page_len(void *page); - size_t ring_buffer_nr_pages(struct ring_buffer *buffer, int cpu); size_t ring_buffer_nr_dirty_pages(struct ring_buffer *buffer, int cpu); diff --git a/include/linux/security.h b/include/linux/security.h index 2b35a43d11d6..49f2685324b0 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -53,6 +53,9 @@ struct msg_msg; struct xattr; struct xfrm_sec_ctx; struct mm_struct; +struct fs_context; +struct fs_parameter; +enum fs_value_type; /* Default (no) options for the capable function */ #define CAP_OPT_NONE 0x0 @@ -61,7 +64,7 @@ struct mm_struct; /* If capable is being called by a setid function */ #define CAP_OPT_INSETID BIT(2) -/* LSM Agnostic defines for sb_set_mnt_opts */ +/* LSM Agnostic defines for fs_context::lsm_flags */ #define SECURITY_LSM_NATIVE_LABELS 1 struct ctl_table; @@ -223,6 +226,8 @@ int security_bprm_set_creds(struct linux_binprm *bprm); int security_bprm_check(struct linux_binprm *bprm); void security_bprm_committing_creds(struct linux_binprm *bprm); void security_bprm_committed_creds(struct linux_binprm *bprm); +int security_fs_context_dup(struct fs_context *fc, struct fs_context *src_fc); +int security_fs_context_parse_param(struct fs_context *fc, struct fs_parameter *param); int security_sb_alloc(struct super_block *sb); void security_sb_free(struct super_block *sb); void security_free_mnt_opts(void **mnt_opts); @@ -519,6 +524,17 @@ static inline void security_bprm_committed_creds(struct linux_binprm *bprm) { } +static inline int security_fs_context_dup(struct fs_context *fc, + struct fs_context *src_fc) +{ + return 0; +} +static inline int security_fs_context_parse_param(struct fs_context *fc, + struct fs_parameter *param) +{ + return -ENOPARAM; +} + static inline int security_sb_alloc(struct super_block *sb) { return 0; diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index b48bb8b42fc8..9027a8c4219f 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -327,26 +327,49 @@ struct skb_frag_struct { #endif }; +/** + * skb_frag_size - Returns the size of a skb fragment + * @frag: skb fragment + */ static inline unsigned int skb_frag_size(const skb_frag_t *frag) { return frag->size; } +/** + * skb_frag_size_set - Sets the size of a skb fragment + * @frag: skb fragment + * @size: size of fragment + */ static inline void skb_frag_size_set(skb_frag_t *frag, unsigned int size) { frag->size = size; } +/** + * skb_frag_size_add - Incrementes the size of a skb fragment by %delta + * @frag: skb fragment + * @delta: value to add + */ static inline void skb_frag_size_add(skb_frag_t *frag, int delta) { frag->size += delta; } +/** + * skb_frag_size_sub - Decrements the size of a skb fragment by %delta + * @frag: skb fragment + * @delta: value to subtract + */ static inline void skb_frag_size_sub(skb_frag_t *frag, int delta) { frag->size -= delta; } +/** + * skb_frag_must_loop - Test if %p is a high memory page + * @p: fragment's page + */ static inline bool skb_frag_must_loop(struct page *p) { #if defined(CONFIG_HIGHMEM) @@ -590,7 +613,7 @@ typedef unsigned int sk_buff_data_t; typedef unsigned char *sk_buff_data_t; #endif -/** +/** * struct sk_buff - socket buffer * @next: Next buffer in list * @prev: Previous buffer in list @@ -648,7 +671,7 @@ typedef unsigned char *sk_buff_data_t; * @csum_not_inet: use CRC32c to resolve CHECKSUM_PARTIAL * @dst_pending_confirm: need to confirm neighbour * @decrypted: Decrypted SKB - * @napi_id: id of the NAPI struct this skb came from + * @napi_id: id of the NAPI struct this skb came from * @secmark: security marking * @mark: Generic packet mark * @vlan_proto: vlan encapsulation protocol @@ -883,7 +906,10 @@ struct sk_buff { #define SKB_ALLOC_RX 0x02 #define SKB_ALLOC_NAPI 0x04 -/* Returns true if the skb was allocated from PFMEMALLOC reserves */ +/** + * skb_pfmemalloc - Test if the skb was allocated from PFMEMALLOC reserves + * @skb: buffer + */ static inline bool skb_pfmemalloc(const struct sk_buff *skb) { return unlikely(skb->pfmemalloc); @@ -905,7 +931,7 @@ static inline bool skb_pfmemalloc(const struct sk_buff *skb) */ static inline struct dst_entry *skb_dst(const struct sk_buff *skb) { - /* If refdst was not refcounted, check we still are in a + /* If refdst was not refcounted, check we still are in a * rcu_read_lock section */ WARN_ON((skb->_skb_refdst & SKB_DST_NOREF) && @@ -952,6 +978,10 @@ static inline bool skb_dst_is_noref(const struct sk_buff *skb) return (skb->_skb_refdst & SKB_DST_NOREF) && skb_dst(skb); } +/** + * skb_rtable - Returns the skb &rtable + * @skb: buffer + */ static inline struct rtable *skb_rtable(const struct sk_buff *skb) { return (struct rtable *)skb_dst(skb); @@ -966,6 +996,10 @@ static inline bool skb_pkt_type_ok(u32 ptype) return ptype <= PACKET_OTHERHOST; } +/** + * skb_napi_id - Returns the skb's NAPI id + * @skb: buffer + */ static inline unsigned int skb_napi_id(const struct sk_buff *skb) { #ifdef CONFIG_NET_RX_BUSY_POLL @@ -975,7 +1009,12 @@ static inline unsigned int skb_napi_id(const struct sk_buff *skb) #endif } -/* decrement the reference count and return true if we can free the skb */ +/** + * skb_unref - decrement the skb's reference count + * @skb: buffer + * + * Returns true if we can free the skb. + */ static inline bool skb_unref(struct sk_buff *skb) { if (unlikely(!skb)) @@ -1005,6 +1044,14 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags, int node); struct sk_buff *__build_skb(void *data, unsigned int frag_size); struct sk_buff *build_skb(void *data, unsigned int frag_size); + +/** + * alloc_skb - allocate a network buffer + * @size: size to allocate + * @priority: allocation mask + * + * This function is a convenient wrapper around __alloc_skb(). + */ static inline struct sk_buff *alloc_skb(unsigned int size, gfp_t priority) { @@ -1047,6 +1094,13 @@ static inline bool skb_fclone_busy(const struct sock *sk, fclones->skb2.sk == sk; } +/** + * alloc_skb_fclone - allocate a network buffer from fclone cache + * @size: size to allocate + * @priority: allocation mask + * + * This function is a convenient wrapper around __alloc_skb(). + */ static inline struct sk_buff *alloc_skb_fclone(unsigned int size, gfp_t priority) { @@ -4232,10 +4286,10 @@ static inline bool skb_is_gso_sctp(const struct sk_buff *skb) return skb_shinfo(skb)->gso_type & SKB_GSO_SCTP; } +/* Note: Should be called only if skb_is_gso(skb) is true */ static inline bool skb_is_gso_tcp(const struct sk_buff *skb) { - return skb_is_gso(skb) && - skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6); + return skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6); } static inline void skb_gso_reset(struct sk_buff *skb) diff --git a/include/linux/sunrpc/auth.h b/include/linux/sunrpc/auth.h index eed3cb16ccf1..5f9076fdb090 100644 --- a/include/linux/sunrpc/auth.h +++ b/include/linux/sunrpc/auth.h @@ -74,14 +74,12 @@ struct rpc_cred_cache; struct rpc_authops; struct rpc_auth { unsigned int au_cslack; /* call cred size estimate */ - /* guess at number of u32's auth adds before - * reply data; normally the verifier size: */ - unsigned int au_rslack; - /* for gss, used to calculate au_rslack: */ - unsigned int au_verfsize; - - unsigned int au_flags; /* various flags */ - const struct rpc_authops *au_ops; /* operations */ + unsigned int au_rslack; /* reply cred size estimate */ + unsigned int au_verfsize; /* size of reply verifier */ + unsigned int au_ralign; /* words before UL header */ + + unsigned int au_flags; + const struct rpc_authops *au_ops; rpc_authflavor_t au_flavor; /* pseudoflavor (note may * differ from the flavor in * au_ops->au_flavor in gss @@ -131,13 +129,15 @@ struct rpc_credops { void (*crdestroy)(struct rpc_cred *); int (*crmatch)(struct auth_cred *, struct rpc_cred *, int); - __be32 * (*crmarshal)(struct rpc_task *, __be32 *); + int (*crmarshal)(struct rpc_task *task, + struct xdr_stream *xdr); int (*crrefresh)(struct rpc_task *); - __be32 * (*crvalidate)(struct rpc_task *, __be32 *); - int (*crwrap_req)(struct rpc_task *, kxdreproc_t, - void *, __be32 *, void *); - int (*crunwrap_resp)(struct rpc_task *, kxdrdproc_t, - void *, __be32 *, void *); + int (*crvalidate)(struct rpc_task *task, + struct xdr_stream *xdr); + int (*crwrap_req)(struct rpc_task *task, + struct xdr_stream *xdr); + int (*crunwrap_resp)(struct rpc_task *task, + struct xdr_stream *xdr); int (*crkey_timeout)(struct rpc_cred *); char * (*crstringify_acceptor)(struct rpc_cred *); bool (*crneed_reencode)(struct rpc_task *); @@ -165,10 +165,18 @@ struct rpc_cred * rpcauth_lookup_credcache(struct rpc_auth *, struct auth_cred * void rpcauth_init_cred(struct rpc_cred *, const struct auth_cred *, struct rpc_auth *, const struct rpc_credops *); struct rpc_cred * rpcauth_lookupcred(struct rpc_auth *, int); void put_rpccred(struct rpc_cred *); -__be32 * rpcauth_marshcred(struct rpc_task *, __be32 *); -__be32 * rpcauth_checkverf(struct rpc_task *, __be32 *); -int rpcauth_wrap_req(struct rpc_task *task, kxdreproc_t encode, void *rqstp, __be32 *data, void *obj); -int rpcauth_unwrap_resp(struct rpc_task *task, kxdrdproc_t decode, void *rqstp, __be32 *data, void *obj); +int rpcauth_marshcred(struct rpc_task *task, + struct xdr_stream *xdr); +int rpcauth_checkverf(struct rpc_task *task, + struct xdr_stream *xdr); +int rpcauth_wrap_req_encode(struct rpc_task *task, + struct xdr_stream *xdr); +int rpcauth_wrap_req(struct rpc_task *task, + struct xdr_stream *xdr); +int rpcauth_unwrap_resp_decode(struct rpc_task *task, + struct xdr_stream *xdr); +int rpcauth_unwrap_resp(struct rpc_task *task, + struct xdr_stream *xdr); bool rpcauth_xmit_need_reencode(struct rpc_task *task); int rpcauth_refreshcred(struct rpc_task *); void rpcauth_invalcred(struct rpc_task *); diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h index 1c441714d569..98bc9883b230 100644 --- a/include/linux/sunrpc/clnt.h +++ b/include/linux/sunrpc/clnt.h @@ -169,6 +169,9 @@ int rpcb_v4_register(struct net *net, const u32 program, const char *netid); void rpcb_getport_async(struct rpc_task *); +void rpc_prepare_reply_pages(struct rpc_rqst *req, struct page **pages, + unsigned int base, unsigned int len, + unsigned int hdrsize); void rpc_call_start(struct rpc_task *); int rpc_call_async(struct rpc_clnt *clnt, const struct rpc_message *msg, int flags, diff --git a/include/linux/sunrpc/gss_krb5_enctypes.h b/include/linux/sunrpc/gss_krb5_enctypes.h index ec6234eee89c..981c89cef19d 100644 --- a/include/linux/sunrpc/gss_krb5_enctypes.h +++ b/include/linux/sunrpc/gss_krb5_enctypes.h @@ -1,4 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* - * Dumb way to share this static piece of information with nfsd + * Define the string that exports the set of kernel-supported + * Kerberos enctypes. This list is sent via upcall to gssd, and + * is also exposed via the nfsd /proc API. The consumers generally + * treat this as an ordered list, where the first item in the list + * is the most preferred. + */ + +#ifndef _LINUX_SUNRPC_GSS_KRB5_ENCTYPES_H +#define _LINUX_SUNRPC_GSS_KRB5_ENCTYPES_H + +#ifdef CONFIG_SUNRPC_DISABLE_INSECURE_ENCTYPES + +/* + * NB: This list includes encryption types that were deprecated + * by RFC 8429 (DES3_CBC_SHA1 and ARCFOUR_HMAC). + * + * ENCTYPE_AES256_CTS_HMAC_SHA1_96 + * ENCTYPE_AES128_CTS_HMAC_SHA1_96 + * ENCTYPE_DES3_CBC_SHA1 + * ENCTYPE_ARCFOUR_HMAC + */ +#define KRB5_SUPPORTED_ENCTYPES "18,17,16,23" + +#else /* CONFIG_SUNRPC_DISABLE_INSECURE_ENCTYPES */ + +/* + * NB: This list includes encryption types that were deprecated + * by RFC 8429 and RFC 6649. + * + * ENCTYPE_AES256_CTS_HMAC_SHA1_96 + * ENCTYPE_AES128_CTS_HMAC_SHA1_96 + * ENCTYPE_DES3_CBC_SHA1 + * ENCTYPE_ARCFOUR_HMAC + * ENCTYPE_DES_CBC_MD5 + * ENCTYPE_DES_CBC_CRC + * ENCTYPE_DES_CBC_MD4 */ #define KRB5_SUPPORTED_ENCTYPES "18,17,16,23,3,1,2" + +#endif /* CONFIG_SUNRPC_DISABLE_INSECURE_ENCTYPES */ + +#endif /* _LINUX_SUNRPC_GSS_KRB5_ENCTYPES_H */ diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h index 219aa3910a0c..ec861cd0cfe8 100644 --- a/include/linux/sunrpc/sched.h +++ b/include/linux/sunrpc/sched.h @@ -97,6 +97,7 @@ typedef void (*rpc_action)(struct rpc_task *); struct rpc_call_ops { void (*rpc_call_prepare)(struct rpc_task *, void *); + void (*rpc_call_prepare_transmit)(struct rpc_task *, void *); void (*rpc_call_done)(struct rpc_task *, void *); void (*rpc_count_stats)(struct rpc_task *, void *); void (*rpc_release)(void *); @@ -303,4 +304,12 @@ rpc_clnt_swap_deactivate(struct rpc_clnt *clnt) } #endif /* CONFIG_SUNRPC_SWAP */ +static inline bool +rpc_task_need_resched(const struct rpc_task *task) +{ + if (RPC_IS_QUEUED(task) || task->tk_callback) + return true; + return false; +} + #endif /* _LINUX_SUNRPC_SCHED_H_ */ diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h index 2ec128060239..9ee3970ba59c 100644 --- a/include/linux/sunrpc/xdr.h +++ b/include/linux/sunrpc/xdr.h @@ -87,6 +87,16 @@ xdr_buf_init(struct xdr_buf *buf, void *start, size_t len) #define xdr_one cpu_to_be32(1) #define xdr_two cpu_to_be32(2) +#define rpc_auth_null cpu_to_be32(RPC_AUTH_NULL) +#define rpc_auth_unix cpu_to_be32(RPC_AUTH_UNIX) +#define rpc_auth_short cpu_to_be32(RPC_AUTH_SHORT) +#define rpc_auth_gss cpu_to_be32(RPC_AUTH_GSS) + +#define rpc_call cpu_to_be32(RPC_CALL) +#define rpc_reply cpu_to_be32(RPC_REPLY) + +#define rpc_msg_accepted cpu_to_be32(RPC_MSG_ACCEPTED) + #define rpc_success cpu_to_be32(RPC_SUCCESS) #define rpc_prog_unavail cpu_to_be32(RPC_PROG_UNAVAIL) #define rpc_prog_mismatch cpu_to_be32(RPC_PROG_MISMATCH) @@ -95,6 +105,9 @@ xdr_buf_init(struct xdr_buf *buf, void *start, size_t len) #define rpc_system_err cpu_to_be32(RPC_SYSTEM_ERR) #define rpc_drop_reply cpu_to_be32(RPC_DROP_REPLY) +#define rpc_mismatch cpu_to_be32(RPC_MISMATCH) +#define rpc_auth_error cpu_to_be32(RPC_AUTH_ERROR) + #define rpc_auth_ok cpu_to_be32(RPC_AUTH_OK) #define rpc_autherr_badcred cpu_to_be32(RPC_AUTH_BADCRED) #define rpc_autherr_rejectedcred cpu_to_be32(RPC_AUTH_REJECTEDCRED) @@ -103,7 +116,6 @@ xdr_buf_init(struct xdr_buf *buf, void *start, size_t len) #define rpc_autherr_tooweak cpu_to_be32(RPC_AUTH_TOOWEAK) #define rpcsec_gsserr_credproblem cpu_to_be32(RPCSEC_GSS_CREDPROBLEM) #define rpcsec_gsserr_ctxproblem cpu_to_be32(RPCSEC_GSS_CTXPROBLEM) -#define rpc_autherr_oldseqnum cpu_to_be32(101) /* * Miscellaneous XDR helper functions @@ -167,7 +179,6 @@ xdr_adjust_iovec(struct kvec *iov, __be32 *p) extern void xdr_shift_buf(struct xdr_buf *, size_t); extern void xdr_buf_from_iov(struct kvec *, struct xdr_buf *); extern int xdr_buf_subsegment(struct xdr_buf *, struct xdr_buf *, unsigned int, unsigned int); -extern void xdr_buf_trim(struct xdr_buf *, unsigned int); extern int xdr_buf_read_netobj(struct xdr_buf *, struct xdr_netobj *, unsigned int); extern int read_bytes_from_xdr_buf(struct xdr_buf *, unsigned int, void *, unsigned int); extern int write_bytes_to_xdr_buf(struct xdr_buf *, unsigned int, void *, unsigned int); @@ -217,6 +228,8 @@ struct xdr_stream { struct kvec scratch; /* Scratch buffer */ struct page **page_ptr; /* pointer to the current page */ unsigned int nwords; /* Remaining decode buffer length */ + + struct rpc_rqst *rqst; /* For debugging */ }; /* @@ -227,7 +240,8 @@ typedef void (*kxdreproc_t)(struct rpc_rqst *rqstp, struct xdr_stream *xdr, typedef int (*kxdrdproc_t)(struct rpc_rqst *rqstp, struct xdr_stream *xdr, void *obj); -extern void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p); +extern void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, + __be32 *p, struct rpc_rqst *rqst); extern __be32 *xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes); extern void xdr_commit_encode(struct xdr_stream *xdr); extern void xdr_truncate_encode(struct xdr_stream *xdr, size_t len); @@ -235,7 +249,8 @@ extern int xdr_restrict_buflen(struct xdr_stream *xdr, int newbuflen); extern void xdr_write_pages(struct xdr_stream *xdr, struct page **pages, unsigned int base, unsigned int len); extern unsigned int xdr_stream_pos(const struct xdr_stream *xdr); -extern void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p); +extern void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, + __be32 *p, struct rpc_rqst *rqst); extern void xdr_init_decode_pages(struct xdr_stream *xdr, struct xdr_buf *buf, struct page **pages, unsigned int len); extern void xdr_set_scratch_buffer(struct xdr_stream *xdr, void *buf, size_t buflen); diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h index ad7e910b119d..3a391544299e 100644 --- a/include/linux/sunrpc/xprt.h +++ b/include/linux/sunrpc/xprt.h @@ -196,8 +196,6 @@ struct rpc_xprt { size_t max_payload; /* largest RPC payload size, in bytes */ - unsigned int tsh_size; /* size of transport specific - header */ struct rpc_wait_queue binding; /* requests waiting on rpcbind */ struct rpc_wait_queue sending; /* requests waiting to send */ @@ -362,11 +360,6 @@ struct rpc_xprt * xprt_alloc(struct net *net, size_t size, unsigned int max_req); void xprt_free(struct rpc_xprt *); -static inline __be32 *xprt_skip_transport_header(struct rpc_xprt *xprt, __be32 *p) -{ - return p + xprt->tsh_size; -} - static inline int xprt_enable_swap(struct rpc_xprt *xprt) { diff --git a/include/linux/sunrpc/xprtsock.h b/include/linux/sunrpc/xprtsock.h index 458bfe0137f5..b81d0b3e0799 100644 --- a/include/linux/sunrpc/xprtsock.h +++ b/include/linux/sunrpc/xprtsock.h @@ -26,6 +26,7 @@ struct sock_xprt { */ struct socket * sock; struct sock * inet; + struct file * file; /* * State of TCP reply receive diff --git a/include/linux/swap.h b/include/linux/swap.h index fc50e21b3b88..4bfb5c4ac108 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -157,9 +157,9 @@ struct swap_extent { /* * Max bad pages in the new format.. */ -#define __swapoffset(x) ((unsigned long)&((union swap_header *)0)->x) #define MAX_SWAP_BADPAGES \ - ((__swapoffset(magic.magic) - __swapoffset(info.badpages)) / sizeof(int)) + ((offsetof(union swap_header, magic.magic) - \ + offsetof(union swap_header, info.badpages)) / sizeof(int)) enum { SWP_USED = (1 << 0), /* is slot in swap_info[] used? */ diff --git a/include/linux/switchtec.h b/include/linux/switchtec.h index eee0412bdf4b..52a079b3a9a6 100644 --- a/include/linux/switchtec.h +++ b/include/linux/switchtec.h @@ -248,9 +248,13 @@ struct ntb_ctrl_regs { u32 win_size; u64 xlate_addr; } bar_entry[6]; - u32 reserved2[216]; - u32 req_id_table[256]; - u32 reserved3[512]; + struct { + u32 win_size; + u32 reserved[3]; + } bar_ext_entry[6]; + u32 reserved2[192]; + u32 req_id_table[512]; + u32 reserved3[256]; u64 lut_entry[512]; } __packed; diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index c2962953bf11..e446806a561f 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -985,6 +985,9 @@ asmlinkage long sys_statx(int dfd, const char __user *path, unsigned flags, unsigned mask, struct statx __user *buffer); asmlinkage long sys_rseq(struct rseq __user *rseq, uint32_t rseq_len, int flags, uint32_t sig); +asmlinkage long sys_pidfd_send_signal(int pidfd, int sig, + siginfo_t __user *info, + unsigned int flags); /* * Architecture-specific system calls diff --git a/include/linux/tpm.h b/include/linux/tpm.h index b49a55cf775f..1b5436b213a2 100644 --- a/include/linux/tpm.h +++ b/include/linux/tpm.h @@ -22,12 +22,41 @@ #ifndef __LINUX_TPM_H__ #define __LINUX_TPM_H__ +#include <linux/hw_random.h> +#include <linux/acpi.h> +#include <linux/cdev.h> +#include <linux/fs.h> +#include <crypto/hash_info.h> + #define TPM_DIGEST_SIZE 20 /* Max TPM v1.2 PCR size */ +#define TPM_MAX_DIGEST_SIZE SHA512_DIGEST_SIZE struct tpm_chip; struct trusted_key_payload; struct trusted_key_options; +enum tpm_algorithms { + TPM_ALG_ERROR = 0x0000, + TPM_ALG_SHA1 = 0x0004, + TPM_ALG_KEYEDHASH = 0x0008, + TPM_ALG_SHA256 = 0x000B, + TPM_ALG_SHA384 = 0x000C, + TPM_ALG_SHA512 = 0x000D, + TPM_ALG_NULL = 0x0010, + TPM_ALG_SM3_256 = 0x0012, +}; + +struct tpm_digest { + u16 alg_id; + u8 digest[TPM_MAX_DIGEST_SIZE]; +} __packed; + +struct tpm_bank_info { + u16 alg_id; + u16 digest_size; + u16 crypto_id; +}; + enum TPM_OPS_FLAGS { TPM_OPS_AUTO_STARTUP = BIT(0), }; @@ -41,7 +70,7 @@ struct tpm_class_ops { int (*send) (struct tpm_chip *chip, u8 *buf, size_t len); void (*cancel) (struct tpm_chip *chip); u8 (*status) (struct tpm_chip *chip); - bool (*update_timeouts)(struct tpm_chip *chip, + void (*update_timeouts)(struct tpm_chip *chip, unsigned long *timeout_cap); int (*go_idle)(struct tpm_chip *chip); int (*cmd_ready)(struct tpm_chip *chip); @@ -50,11 +79,100 @@ struct tpm_class_ops { void (*clk_enable)(struct tpm_chip *chip, bool value); }; +#define TPM_NUM_EVENT_LOG_FILES 3 + +/* Indexes the duration array */ +enum tpm_duration { + TPM_SHORT = 0, + TPM_MEDIUM = 1, + TPM_LONG = 2, + TPM_LONG_LONG = 3, + TPM_UNDEFINED, + TPM_NUM_DURATIONS = TPM_UNDEFINED, +}; + +#define TPM_PPI_VERSION_LEN 3 + +struct tpm_space { + u32 context_tbl[3]; + u8 *context_buf; + u32 session_tbl[3]; + u8 *session_buf; +}; + +struct tpm_bios_log { + void *bios_event_log; + void *bios_event_log_end; +}; + +struct tpm_chip_seqops { + struct tpm_chip *chip; + const struct seq_operations *seqops; +}; + +struct tpm_chip { + struct device dev; + struct device devs; + struct cdev cdev; + struct cdev cdevs; + + /* A driver callback under ops cannot be run unless ops_sem is held + * (sometimes implicitly, eg for the sysfs code). ops becomes null + * when the driver is unregistered, see tpm_try_get_ops. + */ + struct rw_semaphore ops_sem; + const struct tpm_class_ops *ops; + + struct tpm_bios_log log; + struct tpm_chip_seqops bin_log_seqops; + struct tpm_chip_seqops ascii_log_seqops; + + unsigned int flags; + + int dev_num; /* /dev/tpm# */ + unsigned long is_open; /* only one allowed */ + + char hwrng_name[64]; + struct hwrng hwrng; + + struct mutex tpm_mutex; /* tpm is processing */ + + unsigned long timeout_a; /* jiffies */ + unsigned long timeout_b; /* jiffies */ + unsigned long timeout_c; /* jiffies */ + unsigned long timeout_d; /* jiffies */ + bool timeout_adjusted; + unsigned long duration[TPM_NUM_DURATIONS]; /* jiffies */ + bool duration_adjusted; + + struct dentry *bios_dir[TPM_NUM_EVENT_LOG_FILES]; + + const struct attribute_group *groups[3]; + unsigned int groups_cnt; + + u32 nr_allocated_banks; + struct tpm_bank_info *allocated_banks; +#ifdef CONFIG_ACPI + acpi_handle acpi_dev_handle; + char ppi_version[TPM_PPI_VERSION_LEN + 1]; +#endif /* CONFIG_ACPI */ + + struct tpm_space work_space; + u32 last_cc; + u32 nr_commands; + u32 *cc_attrs_tbl; + + /* active locality */ + int locality; +}; + #if defined(CONFIG_TCG_TPM) || defined(CONFIG_TCG_TPM_MODULE) extern int tpm_is_tpm2(struct tpm_chip *chip); -extern int tpm_pcr_read(struct tpm_chip *chip, u32 pcr_idx, u8 *res_buf); -extern int tpm_pcr_extend(struct tpm_chip *chip, u32 pcr_idx, const u8 *hash); +extern int tpm_pcr_read(struct tpm_chip *chip, u32 pcr_idx, + struct tpm_digest *digest); +extern int tpm_pcr_extend(struct tpm_chip *chip, u32 pcr_idx, + struct tpm_digest *digests); extern int tpm_send(struct tpm_chip *chip, void *cmd, size_t buflen); extern int tpm_get_random(struct tpm_chip *chip, u8 *data, size_t max); extern int tpm_seal_trusted(struct tpm_chip *chip, @@ -70,13 +188,14 @@ static inline int tpm_is_tpm2(struct tpm_chip *chip) return -ENODEV; } -static inline int tpm_pcr_read(struct tpm_chip *chip, u32 pcr_idx, u8 *res_buf) +static inline int tpm_pcr_read(struct tpm_chip *chip, int pcr_idx, + struct tpm_digest *digest) { return -ENODEV; } static inline int tpm_pcr_extend(struct tpm_chip *chip, u32 pcr_idx, - const u8 *hash) + struct tpm_digest *digests) { return -ENODEV; } diff --git a/include/linux/tpm_eventlog.h b/include/linux/tpm_eventlog.h index 20d9da77fc11..81519f163211 100644 --- a/include/linux/tpm_eventlog.h +++ b/include/linux/tpm_eventlog.h @@ -3,12 +3,11 @@ #ifndef __LINUX_TPM_EVENTLOG_H__ #define __LINUX_TPM_EVENTLOG_H__ -#include <crypto/hash_info.h> +#include <linux/tpm.h> #define TCG_EVENT_NAME_LEN_MAX 255 #define MAX_TEXT_EVENT 1000 /* Max event string length */ #define ACPI_TCPA_SIG "TCPA" /* 0x41504354 /'TCPA' */ -#define TPM2_ACTIVE_PCR_BANKS 3 #define EFI_TCG2_EVENT_LOG_FORMAT_TCG_1_2 0x1 #define EFI_TCG2_EVENT_LOG_FORMAT_TCG_2 0x2 @@ -82,7 +81,7 @@ struct tcg_efi_specid_event_algs { u16 digest_size; } __packed; -struct tcg_efi_specid_event { +struct tcg_efi_specid_event_head { u8 signature[16]; u32 platform_class; u8 spec_version_minor; @@ -90,9 +89,7 @@ struct tcg_efi_specid_event { u8 spec_errata; u8 uintnsize; u32 num_algs; - struct tcg_efi_specid_event_algs digest_sizes[TPM2_ACTIVE_PCR_BANKS]; - u8 vendor_info_size; - u8 vendor_info[0]; + struct tcg_efi_specid_event_algs digest_sizes[]; } __packed; struct tcg_pcr_event { @@ -108,17 +105,11 @@ struct tcg_event_field { u8 event[0]; } __packed; -struct tpm2_digest { - u16 alg_id; - u8 digest[SHA512_DIGEST_SIZE]; -} __packed; - -struct tcg_pcr_event2 { +struct tcg_pcr_event2_head { u32 pcr_idx; u32 event_type; u32 count; - struct tpm2_digest digests[TPM2_ACTIVE_PCR_BANKS]; - struct tcg_event_field event; + struct tpm_digest digests[]; } __packed; #endif diff --git a/include/linux/uio.h b/include/linux/uio.h index ecf584f6b82d..87477e1640f9 100644 --- a/include/linux/uio.h +++ b/include/linux/uio.h @@ -110,14 +110,6 @@ static inline struct iovec iov_iter_iovec(const struct iov_iter *iter) }; } -#define iov_for_each(iov, iter, start) \ - if (iov_iter_type(start) == ITER_IOVEC || \ - iov_iter_type(start) == ITER_KVEC) \ - for (iter = (start); \ - (iter).count && \ - ((iov = iov_iter_iovec(&(iter))), 1); \ - iov_iter_advance(&(iter), (iov).iov_len)) - size_t iov_iter_copy_from_user_atomic(struct page *page, struct iov_iter *i, unsigned long offset, size_t bytes); void iov_iter_advance(struct iov_iter *i, size_t bytes); diff --git a/include/linux/verification.h b/include/linux/verification.h index cfa4730d607a..018fb5f13d44 100644 --- a/include/linux/verification.h +++ b/include/linux/verification.h @@ -17,6 +17,7 @@ * should be used. */ #define VERIFY_USE_SECONDARY_KEYRING ((struct key *)1UL) +#define VERIFY_USE_PLATFORM_KEYRING ((struct key *)2UL) /* * The use to which an asymmetric key is being put. diff --git a/include/linux/vgaarb.h b/include/linux/vgaarb.h index ee162e3e879b..553b34c8b5f7 100644 --- a/include/linux/vgaarb.h +++ b/include/linux/vgaarb.h @@ -125,9 +125,11 @@ extern void vga_put(struct pci_dev *pdev, unsigned int rsrc); #ifdef CONFIG_VGA_ARB extern struct pci_dev *vga_default_device(void); extern void vga_set_default_device(struct pci_dev *pdev); +extern int vga_remove_vgacon(struct pci_dev *pdev); #else static inline struct pci_dev *vga_default_device(void) { return NULL; }; static inline void vga_set_default_device(struct pci_dev *pdev) { }; +static inline int vga_remove_vgacon(struct pci_dev *pdev) { return 0; }; #endif /* diff --git a/include/linux/xarray.h b/include/linux/xarray.h index 5d9d318bcf7a..0e01e6129145 100644 --- a/include/linux/xarray.h +++ b/include/linux/xarray.h @@ -131,6 +131,12 @@ static inline unsigned int xa_pointer_tag(void *entry) * xa_mk_internal() - Create an internal entry. * @v: Value to turn into an internal entry. * + * Internal entries are used for a number of purposes. Entries 0-255 are + * used for sibling entries (only 0-62 are used by the current code). 256 + * is used for the retry entry. 257 is used for the reserved / zero entry. + * Negative internal entries are used to represent errnos. Node pointers + * are also tagged as internal entries in some situations. + * * Context: Any context. * Return: An XArray internal entry corresponding to this value. */ @@ -163,6 +169,22 @@ static inline bool xa_is_internal(const void *entry) return ((unsigned long)entry & 3) == 2; } +#define XA_ZERO_ENTRY xa_mk_internal(257) + +/** + * xa_is_zero() - Is the entry a zero entry? + * @entry: Entry retrieved from the XArray + * + * The normal API will return NULL as the contents of a slot containing + * a zero entry. You can only see zero entries by using the advanced API. + * + * Return: %true if the entry is a zero entry. + */ +static inline bool xa_is_zero(const void *entry) +{ + return unlikely(entry == XA_ZERO_ENTRY); +} + /** * xa_is_err() - Report whether an XArray operation returned an error * @entry: Result from calling an XArray function @@ -200,6 +222,27 @@ static inline int xa_err(void *entry) return 0; } +/** + * struct xa_limit - Represents a range of IDs. + * @min: The lowest ID to allocate (inclusive). + * @max: The maximum ID to allocate (inclusive). + * + * This structure is used either directly or via the XA_LIMIT() macro + * to communicate the range of IDs that are valid for allocation. + * Two common ranges are predefined for you: + * * xa_limit_32b - [0 - UINT_MAX] + * * xa_limit_31b - [0 - INT_MAX] + */ +struct xa_limit { + u32 max; + u32 min; +}; + +#define XA_LIMIT(_min, _max) (struct xa_limit) { .min = _min, .max = _max } + +#define xa_limit_32b XA_LIMIT(0, UINT_MAX) +#define xa_limit_31b XA_LIMIT(0, INT_MAX) + typedef unsigned __bitwise xa_mark_t; #define XA_MARK_0 ((__force xa_mark_t)0U) #define XA_MARK_1 ((__force xa_mark_t)1U) @@ -220,10 +263,14 @@ enum xa_lock_type { #define XA_FLAGS_LOCK_IRQ ((__force gfp_t)XA_LOCK_IRQ) #define XA_FLAGS_LOCK_BH ((__force gfp_t)XA_LOCK_BH) #define XA_FLAGS_TRACK_FREE ((__force gfp_t)4U) +#define XA_FLAGS_ZERO_BUSY ((__force gfp_t)8U) +#define XA_FLAGS_ALLOC_WRAPPED ((__force gfp_t)16U) #define XA_FLAGS_MARK(mark) ((__force gfp_t)((1U << __GFP_BITS_SHIFT) << \ (__force unsigned)(mark))) +/* ALLOC is for a normal 0-based alloc. ALLOC1 is for an 1-based alloc */ #define XA_FLAGS_ALLOC (XA_FLAGS_TRACK_FREE | XA_FLAGS_MARK(XA_FREE_MARK)) +#define XA_FLAGS_ALLOC1 (XA_FLAGS_TRACK_FREE | XA_FLAGS_ZERO_BUSY) /** * struct xarray - The anchor of the XArray. @@ -279,7 +326,7 @@ struct xarray { #define DEFINE_XARRAY(name) DEFINE_XARRAY_FLAGS(name, 0) /** - * DEFINE_XARRAY_ALLOC() - Define an XArray which can allocate IDs. + * DEFINE_XARRAY_ALLOC() - Define an XArray which allocates IDs starting at 0. * @name: A string that names your XArray. * * This is intended for file scope definitions of allocating XArrays. @@ -287,6 +334,15 @@ struct xarray { */ #define DEFINE_XARRAY_ALLOC(name) DEFINE_XARRAY_FLAGS(name, XA_FLAGS_ALLOC) +/** + * DEFINE_XARRAY_ALLOC1() - Define an XArray which allocates IDs starting at 1. + * @name: A string that names your XArray. + * + * This is intended for file scope definitions of allocating XArrays. + * See also DEFINE_XARRAY(). + */ +#define DEFINE_XARRAY_ALLOC1(name) DEFINE_XARRAY_FLAGS(name, XA_FLAGS_ALLOC1) + void *xa_load(struct xarray *, unsigned long index); void *xa_store(struct xarray *, unsigned long index, void *entry, gfp_t); void *xa_erase(struct xarray *, unsigned long index); @@ -463,9 +519,12 @@ void *__xa_erase(struct xarray *, unsigned long index); void *__xa_store(struct xarray *, unsigned long index, void *entry, gfp_t); void *__xa_cmpxchg(struct xarray *, unsigned long index, void *old, void *entry, gfp_t); -int __xa_insert(struct xarray *, unsigned long index, void *entry, gfp_t); -int __xa_alloc(struct xarray *, u32 *id, u32 max, void *entry, gfp_t); -int __xa_reserve(struct xarray *, unsigned long index, gfp_t); +int __must_check __xa_insert(struct xarray *, unsigned long index, + void *entry, gfp_t); +int __must_check __xa_alloc(struct xarray *, u32 *id, void *entry, + struct xa_limit, gfp_t); +int __must_check __xa_alloc_cyclic(struct xarray *, u32 *id, void *entry, + struct xa_limit, u32 *next, gfp_t); void __xa_set_mark(struct xarray *, unsigned long index, xa_mark_t); void __xa_clear_mark(struct xarray *, unsigned long index, xa_mark_t); @@ -526,9 +585,9 @@ static inline void *xa_store_irq(struct xarray *xa, unsigned long index, * @xa: XArray. * @index: Index of entry. * - * This function is the equivalent of calling xa_store() with %NULL as - * the third argument. The XArray does not need to allocate memory, so - * the user does not need to provide GFP flags. + * After this function returns, loading from @index will return %NULL. + * If the index is part of a multi-index entry, all indices will be erased + * and none of the entries will be part of a multi-index entry. * * Context: Any context. Takes and releases the xa_lock while * disabling softirqs. @@ -550,9 +609,9 @@ static inline void *xa_erase_bh(struct xarray *xa, unsigned long index) * @xa: XArray. * @index: Index of entry. * - * This function is the equivalent of calling xa_store() with %NULL as - * the third argument. The XArray does not need to allocate memory, so - * the user does not need to provide GFP flags. + * After this function returns, loading from @index will return %NULL. + * If the index is part of a multi-index entry, all indices will be erased + * and none of the entries will be part of a multi-index entry. * * Context: Process context. Takes and releases the xa_lock while * disabling interrupts. @@ -664,11 +723,11 @@ static inline void *xa_cmpxchg_irq(struct xarray *xa, unsigned long index, * * Context: Any context. Takes and releases the xa_lock. May sleep if * the @gfp flags permit. - * Return: 0 if the store succeeded. -EEXIST if another entry was present. + * Return: 0 if the store succeeded. -EBUSY if another entry was present. * -ENOMEM if memory could not be allocated. */ -static inline int xa_insert(struct xarray *xa, unsigned long index, - void *entry, gfp_t gfp) +static inline int __must_check xa_insert(struct xarray *xa, + unsigned long index, void *entry, gfp_t gfp) { int err; @@ -693,11 +752,11 @@ static inline int xa_insert(struct xarray *xa, unsigned long index, * * Context: Any context. Takes and releases the xa_lock while * disabling softirqs. May sleep if the @gfp flags permit. - * Return: 0 if the store succeeded. -EEXIST if another entry was present. + * Return: 0 if the store succeeded. -EBUSY if another entry was present. * -ENOMEM if memory could not be allocated. */ -static inline int xa_insert_bh(struct xarray *xa, unsigned long index, - void *entry, gfp_t gfp) +static inline int __must_check xa_insert_bh(struct xarray *xa, + unsigned long index, void *entry, gfp_t gfp) { int err; @@ -722,11 +781,11 @@ static inline int xa_insert_bh(struct xarray *xa, unsigned long index, * * Context: Process context. Takes and releases the xa_lock while * disabling interrupts. May sleep if the @gfp flags permit. - * Return: 0 if the store succeeded. -EEXIST if another entry was present. + * Return: 0 if the store succeeded. -EBUSY if another entry was present. * -ENOMEM if memory could not be allocated. */ -static inline int xa_insert_irq(struct xarray *xa, unsigned long index, - void *entry, gfp_t gfp) +static inline int __must_check xa_insert_irq(struct xarray *xa, + unsigned long index, void *entry, gfp_t gfp) { int err; @@ -741,26 +800,26 @@ static inline int xa_insert_irq(struct xarray *xa, unsigned long index, * xa_alloc() - Find somewhere to store this entry in the XArray. * @xa: XArray. * @id: Pointer to ID. - * @max: Maximum ID to allocate (inclusive). * @entry: New entry. + * @limit: Range of ID to allocate. * @gfp: Memory allocation flags. * - * Allocates an unused ID in the range specified by @id and @max. - * Updates the @id pointer with the index, then stores the entry at that - * index. A concurrent lookup will not see an uninitialised @id. + * Finds an empty entry in @xa between @limit.min and @limit.max, + * stores the index into the @id pointer, then stores the entry at + * that index. A concurrent lookup will not see an uninitialised @id. * - * Context: Process context. Takes and releases the xa_lock. May sleep if + * Context: Any context. Takes and releases the xa_lock. May sleep if * the @gfp flags permit. - * Return: 0 on success, -ENOMEM if memory allocation fails or -ENOSPC if - * there is no more space in the XArray. + * Return: 0 on success, -ENOMEM if memory could not be allocated or + * -EBUSY if there are no free entries in @limit. */ -static inline int xa_alloc(struct xarray *xa, u32 *id, u32 max, void *entry, - gfp_t gfp) +static inline __must_check int xa_alloc(struct xarray *xa, u32 *id, + void *entry, struct xa_limit limit, gfp_t gfp) { int err; xa_lock(xa); - err = __xa_alloc(xa, id, max, entry, gfp); + err = __xa_alloc(xa, id, entry, limit, gfp); xa_unlock(xa); return err; @@ -770,26 +829,26 @@ static inline int xa_alloc(struct xarray *xa, u32 *id, u32 max, void *entry, * xa_alloc_bh() - Find somewhere to store this entry in the XArray. * @xa: XArray. * @id: Pointer to ID. - * @max: Maximum ID to allocate (inclusive). * @entry: New entry. + * @limit: Range of ID to allocate. * @gfp: Memory allocation flags. * - * Allocates an unused ID in the range specified by @id and @max. - * Updates the @id pointer with the index, then stores the entry at that - * index. A concurrent lookup will not see an uninitialised @id. + * Finds an empty entry in @xa between @limit.min and @limit.max, + * stores the index into the @id pointer, then stores the entry at + * that index. A concurrent lookup will not see an uninitialised @id. * * Context: Any context. Takes and releases the xa_lock while * disabling softirqs. May sleep if the @gfp flags permit. - * Return: 0 on success, -ENOMEM if memory allocation fails or -ENOSPC if - * there is no more space in the XArray. + * Return: 0 on success, -ENOMEM if memory could not be allocated or + * -EBUSY if there are no free entries in @limit. */ -static inline int xa_alloc_bh(struct xarray *xa, u32 *id, u32 max, void *entry, - gfp_t gfp) +static inline int __must_check xa_alloc_bh(struct xarray *xa, u32 *id, + void *entry, struct xa_limit limit, gfp_t gfp) { int err; xa_lock_bh(xa); - err = __xa_alloc(xa, id, max, entry, gfp); + err = __xa_alloc(xa, id, entry, limit, gfp); xa_unlock_bh(xa); return err; @@ -799,26 +858,125 @@ static inline int xa_alloc_bh(struct xarray *xa, u32 *id, u32 max, void *entry, * xa_alloc_irq() - Find somewhere to store this entry in the XArray. * @xa: XArray. * @id: Pointer to ID. - * @max: Maximum ID to allocate (inclusive). * @entry: New entry. + * @limit: Range of ID to allocate. * @gfp: Memory allocation flags. * - * Allocates an unused ID in the range specified by @id and @max. - * Updates the @id pointer with the index, then stores the entry at that - * index. A concurrent lookup will not see an uninitialised @id. + * Finds an empty entry in @xa between @limit.min and @limit.max, + * stores the index into the @id pointer, then stores the entry at + * that index. A concurrent lookup will not see an uninitialised @id. * * Context: Process context. Takes and releases the xa_lock while * disabling interrupts. May sleep if the @gfp flags permit. - * Return: 0 on success, -ENOMEM if memory allocation fails or -ENOSPC if - * there is no more space in the XArray. + * Return: 0 on success, -ENOMEM if memory could not be allocated or + * -EBUSY if there are no free entries in @limit. */ -static inline int xa_alloc_irq(struct xarray *xa, u32 *id, u32 max, void *entry, - gfp_t gfp) +static inline int __must_check xa_alloc_irq(struct xarray *xa, u32 *id, + void *entry, struct xa_limit limit, gfp_t gfp) { int err; xa_lock_irq(xa); - err = __xa_alloc(xa, id, max, entry, gfp); + err = __xa_alloc(xa, id, entry, limit, gfp); + xa_unlock_irq(xa); + + return err; +} + +/** + * xa_alloc_cyclic() - Find somewhere to store this entry in the XArray. + * @xa: XArray. + * @id: Pointer to ID. + * @entry: New entry. + * @limit: Range of allocated ID. + * @next: Pointer to next ID to allocate. + * @gfp: Memory allocation flags. + * + * Finds an empty entry in @xa between @limit.min and @limit.max, + * stores the index into the @id pointer, then stores the entry at + * that index. A concurrent lookup will not see an uninitialised @id. + * The search for an empty entry will start at @next and will wrap + * around if necessary. + * + * Context: Any context. Takes and releases the xa_lock. May sleep if + * the @gfp flags permit. + * Return: 0 if the allocation succeeded without wrapping. 1 if the + * allocation succeeded after wrapping, -ENOMEM if memory could not be + * allocated or -EBUSY if there are no free entries in @limit. + */ +static inline int xa_alloc_cyclic(struct xarray *xa, u32 *id, void *entry, + struct xa_limit limit, u32 *next, gfp_t gfp) +{ + int err; + + xa_lock(xa); + err = __xa_alloc_cyclic(xa, id, entry, limit, next, gfp); + xa_unlock(xa); + + return err; +} + +/** + * xa_alloc_cyclic_bh() - Find somewhere to store this entry in the XArray. + * @xa: XArray. + * @id: Pointer to ID. + * @entry: New entry. + * @limit: Range of allocated ID. + * @next: Pointer to next ID to allocate. + * @gfp: Memory allocation flags. + * + * Finds an empty entry in @xa between @limit.min and @limit.max, + * stores the index into the @id pointer, then stores the entry at + * that index. A concurrent lookup will not see an uninitialised @id. + * The search for an empty entry will start at @next and will wrap + * around if necessary. + * + * Context: Any context. Takes and releases the xa_lock while + * disabling softirqs. May sleep if the @gfp flags permit. + * Return: 0 if the allocation succeeded without wrapping. 1 if the + * allocation succeeded after wrapping, -ENOMEM if memory could not be + * allocated or -EBUSY if there are no free entries in @limit. + */ +static inline int xa_alloc_cyclic_bh(struct xarray *xa, u32 *id, void *entry, + struct xa_limit limit, u32 *next, gfp_t gfp) +{ + int err; + + xa_lock_bh(xa); + err = __xa_alloc_cyclic(xa, id, entry, limit, next, gfp); + xa_unlock_bh(xa); + + return err; +} + +/** + * xa_alloc_cyclic_irq() - Find somewhere to store this entry in the XArray. + * @xa: XArray. + * @id: Pointer to ID. + * @entry: New entry. + * @limit: Range of allocated ID. + * @next: Pointer to next ID to allocate. + * @gfp: Memory allocation flags. + * + * Finds an empty entry in @xa between @limit.min and @limit.max, + * stores the index into the @id pointer, then stores the entry at + * that index. A concurrent lookup will not see an uninitialised @id. + * The search for an empty entry will start at @next and will wrap + * around if necessary. + * + * Context: Process context. Takes and releases the xa_lock while + * disabling interrupts. May sleep if the @gfp flags permit. + * Return: 0 if the allocation succeeded without wrapping. 1 if the + * allocation succeeded after wrapping, -ENOMEM if memory could not be + * allocated or -EBUSY if there are no free entries in @limit. + */ +static inline int xa_alloc_cyclic_irq(struct xarray *xa, u32 *id, void *entry, + struct xa_limit limit, u32 *next, gfp_t gfp) +{ + int err; + + xa_lock_irq(xa); + err = __xa_alloc_cyclic(xa, id, entry, limit, next, gfp); xa_unlock_irq(xa); return err; @@ -842,16 +1000,10 @@ static inline int xa_alloc_irq(struct xarray *xa, u32 *id, u32 max, void *entry, * May sleep if the @gfp flags permit. * Return: 0 if the reservation succeeded or -ENOMEM if it failed. */ -static inline +static inline __must_check int xa_reserve(struct xarray *xa, unsigned long index, gfp_t gfp) { - int ret; - - xa_lock(xa); - ret = __xa_reserve(xa, index, gfp); - xa_unlock(xa); - - return ret; + return xa_err(xa_cmpxchg(xa, index, NULL, XA_ZERO_ENTRY, gfp)); } /** @@ -866,16 +1018,10 @@ int xa_reserve(struct xarray *xa, unsigned long index, gfp_t gfp) * disabling softirqs. * Return: 0 if the reservation succeeded or -ENOMEM if it failed. */ -static inline +static inline __must_check int xa_reserve_bh(struct xarray *xa, unsigned long index, gfp_t gfp) { - int ret; - - xa_lock_bh(xa); - ret = __xa_reserve(xa, index, gfp); - xa_unlock_bh(xa); - - return ret; + return xa_err(xa_cmpxchg_bh(xa, index, NULL, XA_ZERO_ENTRY, gfp)); } /** @@ -890,16 +1036,10 @@ int xa_reserve_bh(struct xarray *xa, unsigned long index, gfp_t gfp) * disabling interrupts. * Return: 0 if the reservation succeeded or -ENOMEM if it failed. */ -static inline +static inline __must_check int xa_reserve_irq(struct xarray *xa, unsigned long index, gfp_t gfp) { - int ret; - - xa_lock_irq(xa); - ret = __xa_reserve(xa, index, gfp); - xa_unlock_irq(xa); - - return ret; + return xa_err(xa_cmpxchg_irq(xa, index, NULL, XA_ZERO_ENTRY, gfp)); } /** @@ -913,7 +1053,7 @@ int xa_reserve_irq(struct xarray *xa, unsigned long index, gfp_t gfp) */ static inline void xa_release(struct xarray *xa, unsigned long index) { - xa_cmpxchg(xa, index, NULL, NULL, 0); + xa_cmpxchg(xa, index, XA_ZERO_ENTRY, NULL, 0); } /* Everything below here is the Advanced API. Proceed with caution. */ @@ -1073,18 +1213,6 @@ static inline bool xa_is_sibling(const void *entry) } #define XA_RETRY_ENTRY xa_mk_internal(256) -#define XA_ZERO_ENTRY xa_mk_internal(257) - -/** - * xa_is_zero() - Is the entry a zero entry? - * @entry: Entry retrieved from the XArray - * - * Return: %true if the entry is a zero entry. - */ -static inline bool xa_is_zero(const void *entry) -{ - return unlikely(entry == XA_ZERO_ENTRY); -} /** * xa_is_retry() - Is the entry a retry entry? |