diff options
Diffstat (limited to 'include/linux')
234 files changed, 7190 insertions, 5172 deletions
diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 0fecacca51e8..978cc239f23b 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -931,6 +931,8 @@ int acpi_subsys_suspend_noirq(struct device *dev); int acpi_subsys_suspend(struct device *dev); int acpi_subsys_freeze(struct device *dev); int acpi_subsys_poweroff(struct device *dev); +void acpi_ec_mark_gpe_for_wake(void); +void acpi_ec_set_gpe_wake_mask(u8 action); #else static inline int acpi_subsys_prepare(struct device *dev) { return 0; } static inline void acpi_subsys_complete(struct device *dev) {} @@ -939,6 +941,8 @@ static inline int acpi_subsys_suspend_noirq(struct device *dev) { return 0; } static inline int acpi_subsys_suspend(struct device *dev) { return 0; } static inline int acpi_subsys_freeze(struct device *dev) { return 0; } static inline int acpi_subsys_poweroff(struct device *dev) { return 0; } +static inline void acpi_ec_mark_gpe_for_wake(void) {} +static inline void acpi_ec_set_gpe_wake_mask(u8 action) {} #endif #ifdef CONFIG_ACPI diff --git a/include/linux/amba/clcd-regs.h b/include/linux/amba/clcd-regs.h index 516a6fda83c5..421b0fa90d6a 100644 --- a/include/linux/amba/clcd-regs.h +++ b/include/linux/amba/clcd-regs.h @@ -42,6 +42,7 @@ #define TIM2_PCD_LO_MASK GENMASK(4, 0) #define TIM2_PCD_LO_BITS 5 #define TIM2_CLKSEL (1 << 5) +#define TIM2_ACB_MASK GENMASK(10, 6) #define TIM2_IVS (1 << 11) #define TIM2_IHS (1 << 12) #define TIM2_IPC (1 << 13) diff --git a/include/linux/audit.h b/include/linux/audit.h index 97d0925454df..aee3dc9eb378 100644 --- a/include/linux/audit.h +++ b/include/linux/audit.h @@ -11,7 +11,6 @@ #include <linux/sched.h> #include <linux/ptrace.h> -#include <linux/namei.h> /* LOOKUP_* */ #include <uapi/linux/audit.h> #define AUDIT_INO_UNSET ((unsigned long)-1) @@ -252,6 +251,10 @@ static inline int audit_signal_info(int sig, struct task_struct *t) #define audit_is_compat(arch) false #endif +#define AUDIT_INODE_PARENT 1 /* dentry represents the parent */ +#define AUDIT_INODE_HIDDEN 2 /* audit record should be hidden */ +#define AUDIT_INODE_NOEVAL 4 /* audit record incomplete */ + #ifdef CONFIG_AUDITSYSCALL #include <asm/syscall.h> /* for syscall_get_arch() */ @@ -265,9 +268,6 @@ extern void __audit_syscall_exit(int ret_success, long ret_value); extern struct filename *__audit_reusename(const __user char *uptr); extern void __audit_getname(struct filename *name); -#define AUDIT_INODE_PARENT 1 /* dentry represents the parent */ -#define AUDIT_INODE_HIDDEN 2 /* audit record should be hidden */ -#define AUDIT_INODE_NOEVAL 4 /* audit record incomplete */ extern void __audit_inode(struct filename *name, const struct dentry *dentry, unsigned int flags); extern void __audit_file(const struct file *); @@ -328,16 +328,9 @@ static inline void audit_getname(struct filename *name) } static inline void audit_inode(struct filename *name, const struct dentry *dentry, - unsigned int flags) { - if (unlikely(!audit_dummy_context())) { - unsigned int aflags = 0; - - if (flags & LOOKUP_PARENT) - aflags |= AUDIT_INODE_PARENT; - if (flags & LOOKUP_NO_EVAL) - aflags |= AUDIT_INODE_NOEVAL; + unsigned int aflags) { + if (unlikely(!audit_dummy_context())) __audit_inode(name, dentry, aflags); - } } static inline void audit_file(struct file *file) { @@ -561,7 +554,7 @@ static inline void __audit_inode_child(struct inode *parent, { } static inline void audit_inode(struct filename *name, const struct dentry *dentry, - unsigned int parent) + unsigned int aflags) { } static inline void audit_file(struct file *file) { diff --git a/include/linux/backlight.h b/include/linux/backlight.h index 0b5897446dca..c7d6b2e8c3b5 100644 --- a/include/linux/backlight.h +++ b/include/linux/backlight.h @@ -46,6 +46,12 @@ enum backlight_notification { BACKLIGHT_UNREGISTERED, }; +enum backlight_scale { + BACKLIGHT_SCALE_UNKNOWN = 0, + BACKLIGHT_SCALE_LINEAR, + BACKLIGHT_SCALE_NON_LINEAR, +}; + struct backlight_device; struct fb_info; @@ -80,6 +86,8 @@ struct backlight_properties { enum backlight_type type; /* Flags used to signal drivers of state changes */ unsigned int state; + /* Type of the brightness scale (linear, non-linear, ...) */ + enum backlight_scale scale; #define BL_CORE_SUSPENDED (1 << 0) /* backlight is suspended */ #define BL_CORE_FBBLANK (1 << 1) /* backlight is under an fb blank event */ diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 6032bb740cf4..f3ea78b0c91c 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -1110,6 +1110,8 @@ extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua); extern void blk_queue_required_elevator_features(struct request_queue *q, unsigned int features); +extern bool blk_queue_can_use_dma_map_merging(struct request_queue *q, + struct device *dev); /* * Number of physical segments as sent to the device. diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 18f4cc2c6acd..5b9d22338606 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -24,6 +24,9 @@ struct seq_file; struct btf; struct btf_type; +extern struct idr btf_idr; +extern spinlock_t btf_idr_lock; + /* map is generic key/value storage optionally accesible by eBPF programs */ struct bpf_map_ops { /* funcs callable from userspace (via syscall) */ @@ -647,6 +650,8 @@ void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock); struct bpf_map *bpf_map_get_with_uref(u32 ufd); struct bpf_map *__bpf_map_get(struct fd f); struct bpf_map * __must_check bpf_map_inc(struct bpf_map *map, bool uref); +struct bpf_map * __must_check bpf_map_inc_not_zero(struct bpf_map *map, + bool uref); void bpf_map_put_with_uref(struct bpf_map *map); void bpf_map_put(struct bpf_map *map); int bpf_map_charge_memlock(struct bpf_map *map, u32 pages); @@ -713,7 +718,7 @@ struct xdp_buff; struct sk_buff; struct bpf_dtab_netdev *__dev_map_lookup_elem(struct bpf_map *map, u32 key); -void __dev_map_insert_ctx(struct bpf_map *map, u32 index); +struct bpf_dtab_netdev *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key); void __dev_map_flush(struct bpf_map *map); int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp, struct net_device *dev_rx); @@ -721,7 +726,6 @@ int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb, struct bpf_prog *xdp_prog); struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key); -void __cpu_map_insert_ctx(struct bpf_map *map, u32 index); void __cpu_map_flush(struct bpf_map *map); int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp, struct net_device *dev_rx); @@ -801,8 +805,10 @@ static inline struct net_device *__dev_map_lookup_elem(struct bpf_map *map, return NULL; } -static inline void __dev_map_insert_ctx(struct bpf_map *map, u32 index) +static inline struct net_device *__dev_map_hash_lookup_elem(struct bpf_map *map, + u32 key) { + return NULL; } static inline void __dev_map_flush(struct bpf_map *map) @@ -834,10 +840,6 @@ struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key) return NULL; } -static inline void __cpu_map_insert_ctx(struct bpf_map *map, u32 index) -{ -} - static inline void __cpu_map_flush(struct bpf_map *map) { } diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h index eec5aeeeaf92..36a9c2325176 100644 --- a/include/linux/bpf_types.h +++ b/include/linux/bpf_types.h @@ -62,6 +62,7 @@ BPF_MAP_TYPE(BPF_MAP_TYPE_ARRAY_OF_MAPS, array_of_maps_map_ops) BPF_MAP_TYPE(BPF_MAP_TYPE_HASH_OF_MAPS, htab_of_maps_map_ops) #ifdef CONFIG_NET BPF_MAP_TYPE(BPF_MAP_TYPE_DEVMAP, dev_map_ops) +BPF_MAP_TYPE(BPF_MAP_TYPE_DEVMAP_HASH, dev_map_hash_ops) BPF_MAP_TYPE(BPF_MAP_TYPE_SK_STORAGE, sk_storage_map_ops) #if defined(CONFIG_BPF_STREAM_PARSER) BPF_MAP_TYPE(BPF_MAP_TYPE_SOCKMAP, sock_map_ops) diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index 5fe99f322b1c..26a6d58ca78c 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -355,6 +355,7 @@ struct bpf_verifier_env { struct bpf_verifier_stack_elem *head; /* stack of verifier states to be processed */ int stack_size; /* number of states to be processed */ bool strict_alignment; /* perform strict pointer alignment checks */ + bool test_state_freq; /* test verifier with different pruning frequency */ struct bpf_verifier_state *cur_state; /* current verifier state */ struct bpf_verifier_state_list **explored_states; /* search pruning optimization */ struct bpf_verifier_state_list *free_list; diff --git a/include/linux/can/can-ml.h b/include/linux/can/can-ml.h new file mode 100644 index 000000000000..2f5d731ae251 --- /dev/null +++ b/include/linux/can/can-ml.h @@ -0,0 +1,68 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */ +/* Copyright (c) 2002-2007 Volkswagen Group Electronic Research + * Copyright (c) 2017 Pengutronix, Marc Kleine-Budde <kernel@pengutronix.de> + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Volkswagen nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * Alternatively, provided that this notice is retained in full, this + * software may be distributed under the terms of the GNU General + * Public License ("GPL") version 2, in which case the provisions of the + * GPL apply INSTEAD OF those given above. + * + * The provided data structures and external interfaces from this code + * are not restricted to be used by modules with a GPL compatible license. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH + * DAMAGE. + * + */ + +#ifndef CAN_ML_H +#define CAN_ML_H + +#include <linux/can.h> +#include <linux/list.h> + +#define CAN_SFF_RCV_ARRAY_SZ (1 << CAN_SFF_ID_BITS) +#define CAN_EFF_RCV_HASH_BITS 10 +#define CAN_EFF_RCV_ARRAY_SZ (1 << CAN_EFF_RCV_HASH_BITS) + +enum { RX_ERR, RX_ALL, RX_FIL, RX_INV, RX_MAX }; + +struct can_dev_rcv_lists { + struct hlist_head rx[RX_MAX]; + struct hlist_head rx_sff[CAN_SFF_RCV_ARRAY_SZ]; + struct hlist_head rx_eff[CAN_EFF_RCV_ARRAY_SZ]; + int entries; +}; + +struct can_ml_priv { + struct can_dev_rcv_lists dev_rcv_lists; +#ifdef CAN_J1939 + struct j1939_priv *j1939_priv; +#endif +}; + +#endif /* CAN_ML_H */ diff --git a/include/linux/can/core.h b/include/linux/can/core.h index 6099bc18bd0c..8339071ab08b 100644 --- a/include/linux/can/core.h +++ b/include/linux/can/core.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */ /* * linux/can/core.h * @@ -41,6 +41,14 @@ struct can_proto { struct proto *prot; }; +/* required_size + * macro to find the minimum size of a struct + * that includes a requested member + */ +#define CAN_REQUIRED_SIZE(struct_type, member) \ + (offsetof(typeof(struct_type), member) + \ + sizeof(((typeof(struct_type) *)(NULL))->member)) + /* function prototypes for the CAN networklayer core (af_can.c) */ extern int can_proto_register(const struct can_proto *cp); @@ -57,6 +65,5 @@ extern void can_rx_unregister(struct net *net, struct net_device *dev, void *data); extern int can_send(struct sk_buff *skb, int loop); -extern int can_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg); #endif /* !_CAN_CORE_H */ diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h index f01623aef2f7..9b3c720a31b1 100644 --- a/include/linux/can/dev.h +++ b/include/linux/can/dev.h @@ -169,7 +169,8 @@ void can_change_state(struct net_device *dev, struct can_frame *cf, void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev, unsigned int idx); -struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr); +struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, + u8 *len_ptr); unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx); void can_free_echo_skb(struct net_device *dev, unsigned int idx); diff --git a/include/linux/can/platform/rcar_can.h b/include/linux/can/platform/rcar_can.h deleted file mode 100644 index a43dcd0cf79e..000000000000 --- a/include/linux/can/platform/rcar_can.h +++ /dev/null @@ -1,18 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _CAN_PLATFORM_RCAR_CAN_H_ -#define _CAN_PLATFORM_RCAR_CAN_H_ - -#include <linux/types.h> - -/* Clock Select Register settings */ -enum CLKR { - CLKR_CLKP1 = 0, /* Peripheral clock (clkp1) */ - CLKR_CLKP2 = 1, /* Peripheral clock (clkp2) */ - CLKR_CLKEXT = 3 /* Externally input clock */ -}; - -struct rcar_can_platform_data { - enum CLKR clock_select; /* Clock source select */ -}; - -#endif /* !_CAN_PLATFORM_RCAR_CAN_H_ */ diff --git a/include/linux/can/rx-offload.h b/include/linux/can/rx-offload.h index 9daa1119ea42..01219f2902bf 100644 --- a/include/linux/can/rx-offload.h +++ b/include/linux/can/rx-offload.h @@ -15,7 +15,8 @@ struct can_rx_offload { struct net_device *dev; - unsigned int (*mailbox_read)(struct can_rx_offload *offload, struct can_frame *cf, + unsigned int (*mailbox_read)(struct can_rx_offload *offload, + struct can_frame *cf, u32 *timestamp, unsigned int mb); struct sk_buff_head skb_queue; @@ -29,9 +30,13 @@ struct can_rx_offload { bool inc; }; -int can_rx_offload_add_timestamp(struct net_device *dev, struct can_rx_offload *offload); -int can_rx_offload_add_fifo(struct net_device *dev, struct can_rx_offload *offload, unsigned int weight); -int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload, u64 reg); +int can_rx_offload_add_timestamp(struct net_device *dev, + struct can_rx_offload *offload); +int can_rx_offload_add_fifo(struct net_device *dev, + struct can_rx_offload *offload, + unsigned int weight); +int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload, + u64 reg); int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload); int can_rx_offload_queue_sorted(struct can_rx_offload *offload, struct sk_buff *skb, u32 timestamp); diff --git a/include/linux/can/skb.h b/include/linux/can/skb.h index b3379a97245c..a954def26c0d 100644 --- a/include/linux/can/skb.h +++ b/include/linux/can/skb.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */ /* * linux/can/skb.h * diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h index 82156da3c650..b9dbda1c26aa 100644 --- a/include/linux/ceph/libceph.h +++ b/include/linux/ceph/libceph.h @@ -293,6 +293,7 @@ struct ceph_client *ceph_create_client(struct ceph_options *opt, void *private); struct ceph_entity_addr *ceph_client_addr(struct ceph_client *client); u64 ceph_client_gid(struct ceph_client *client); extern void ceph_destroy_client(struct ceph_client *client); +extern void ceph_reset_client_addr(struct ceph_client *client); extern int __ceph_open_session(struct ceph_client *client, unsigned long started); extern int ceph_open_session(struct ceph_client *client); diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h index 23895d178149..c4458dc6a757 100644 --- a/include/linux/ceph/messenger.h +++ b/include/linux/ceph/messenger.h @@ -337,6 +337,7 @@ extern void ceph_msgr_flush(void); extern void ceph_messenger_init(struct ceph_messenger *msgr, struct ceph_entity_addr *myaddr); extern void ceph_messenger_fini(struct ceph_messenger *msgr); +extern void ceph_messenger_reset_nonce(struct ceph_messenger *msgr); extern void ceph_con_init(struct ceph_connection *con, void *private, const struct ceph_connection_operations *ops, diff --git a/include/linux/ceph/mon_client.h b/include/linux/ceph/mon_client.h index b4d134d3312a..dbb8a6959a73 100644 --- a/include/linux/ceph/mon_client.h +++ b/include/linux/ceph/mon_client.h @@ -109,6 +109,7 @@ extern int ceph_monmap_contains(struct ceph_monmap *m, extern int ceph_monc_init(struct ceph_mon_client *monc, struct ceph_client *cl); extern void ceph_monc_stop(struct ceph_mon_client *monc); +extern void ceph_monc_reopen_session(struct ceph_mon_client *monc); enum { CEPH_SUB_MONMAP = 0, diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h index ad7fe5d10dcd..eaffbdddf89a 100644 --- a/include/linux/ceph/osd_client.h +++ b/include/linux/ceph/osd_client.h @@ -381,6 +381,7 @@ extern void ceph_osdc_cleanup(void); extern int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client); extern void ceph_osdc_stop(struct ceph_osd_client *osdc); +extern void ceph_osdc_reopen_osds(struct ceph_osd_client *osdc); extern void ceph_osdc_handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg); @@ -388,6 +389,7 @@ extern void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg); void ceph_osdc_update_epoch_barrier(struct ceph_osd_client *osdc, u32 eb); void ceph_osdc_abort_requests(struct ceph_osd_client *osdc, int err); +void ceph_osdc_clear_abort_err(struct ceph_osd_client *osdc); #define osd_req_op_data(oreq, whch, typ, fld) \ ({ \ diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h index dce5521a9bf6..2fdfe8061363 100644 --- a/include/linux/clk-provider.h +++ b/include/linux/clk-provider.h @@ -299,7 +299,8 @@ struct clk_init_data { * into the clk API * * @init: pointer to struct clk_init_data that contains the init data shared - * with the common clock framework. + * with the common clock framework. This pointer will be set to NULL once + * a clk_register() variant is called on this clk_hw pointer. */ struct clk_hw { struct clk_core *core; diff --git a/include/linux/clk.h b/include/linux/clk.h index 853a8f181394..18b7b95a8253 100644 --- a/include/linux/clk.h +++ b/include/linux/clk.h @@ -239,7 +239,8 @@ static inline int clk_prepare(struct clk *clk) return 0; } -static inline int __must_check clk_bulk_prepare(int num_clks, struct clk_bulk_data *clks) +static inline int __must_check +clk_bulk_prepare(int num_clks, const struct clk_bulk_data *clks) { might_sleep(); return 0; @@ -263,7 +264,8 @@ static inline void clk_unprepare(struct clk *clk) { might_sleep(); } -static inline void clk_bulk_unprepare(int num_clks, struct clk_bulk_data *clks) +static inline void clk_bulk_unprepare(int num_clks, + const struct clk_bulk_data *clks) { might_sleep(); } @@ -820,7 +822,8 @@ static inline int clk_enable(struct clk *clk) return 0; } -static inline int __must_check clk_bulk_enable(int num_clks, struct clk_bulk_data *clks) +static inline int __must_check clk_bulk_enable(int num_clks, + const struct clk_bulk_data *clks) { return 0; } @@ -829,7 +832,7 @@ static inline void clk_disable(struct clk *clk) {} static inline void clk_bulk_disable(int num_clks, - struct clk_bulk_data *clks) {} + const struct clk_bulk_data *clks) {} static inline unsigned long clk_get_rate(struct clk *clk) { @@ -918,8 +921,8 @@ static inline void clk_disable_unprepare(struct clk *clk) clk_unprepare(clk); } -static inline int __must_check clk_bulk_prepare_enable(int num_clks, - struct clk_bulk_data *clks) +static inline int __must_check +clk_bulk_prepare_enable(int num_clks, const struct clk_bulk_data *clks) { int ret; @@ -934,7 +937,7 @@ static inline int __must_check clk_bulk_prepare_enable(int num_clks, } static inline void clk_bulk_disable_unprepare(int num_clks, - struct clk_bulk_data *clks) + const struct clk_bulk_data *clks) { clk_bulk_disable(num_clks, clks); clk_bulk_unprepare(num_clks, clks); diff --git a/include/linux/clk/clk-conf.h b/include/linux/clk/clk-conf.h index 85f8cf9d1226..eae9652c70cd 100644 --- a/include/linux/clk/clk-conf.h +++ b/include/linux/clk/clk-conf.h @@ -4,6 +4,9 @@ * Sylwester Nawrocki <s.nawrocki@samsung.com> */ +#ifndef __CLK_CONF_H +#define __CLK_CONF_H + #include <linux/types.h> struct device_node; @@ -17,3 +20,5 @@ static inline int of_clk_set_defaults(struct device_node *node, return 0; } #endif + +#endif /* __CLK_CONF_H */ diff --git a/include/linux/compaction.h b/include/linux/compaction.h index 9569e7c786d3..4b898cdbdf05 100644 --- a/include/linux/compaction.h +++ b/include/linux/compaction.h @@ -129,11 +129,8 @@ static inline bool compaction_failed(enum compact_result result) return false; } -/* - * Compaction has backed off for some reason. It might be throttling or - * lock contention. Retrying is still worthwhile. - */ -static inline bool compaction_withdrawn(enum compact_result result) +/* Compaction needs reclaim to be performed first, so it can continue. */ +static inline bool compaction_needs_reclaim(enum compact_result result) { /* * Compaction backed off due to watermark checks for order-0 @@ -142,6 +139,16 @@ static inline bool compaction_withdrawn(enum compact_result result) if (result == COMPACT_SKIPPED) return true; + return false; +} + +/* + * Compaction has backed off for some reason after doing some work or none + * at all. It might be throttling or lock contention. Retrying might be still + * worthwhile, but with a higher priority if allowed. + */ +static inline bool compaction_withdrawn(enum compact_result result) +{ /* * If compaction is deferred for high-order allocations, it is * because sync compaction recently failed. If this is the case @@ -207,6 +214,11 @@ static inline bool compaction_failed(enum compact_result result) return false; } +static inline bool compaction_needs_reclaim(enum compact_result result) +{ + return false; +} + static inline bool compaction_withdrawn(enum compact_result result) { return true; diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h index 599c27b56c29..72393a8c1a6c 100644 --- a/include/linux/compiler_types.h +++ b/include/linux/compiler_types.h @@ -130,10 +130,6 @@ struct ftrace_likely_data { /* * Force always-inline if the user requests it so via the .config. - * GCC does not warn about unused static inline functions for - * -Wunused-function. This turns out to avoid the need for complex #ifdef - * directives. Suppress the warning in clang as well by using "unused" - * function attribute, which is redundant but not harmful for gcc. * Prefer gnu_inline, so that extern inline functions do not emit an * externally visible function. This makes extern inline behave as per gnu89 * semantics rather than c99. This prevents multiple symbol definition errors @@ -144,14 +140,35 @@ struct ftrace_likely_data { */ #if !defined(CONFIG_OPTIMIZE_INLINING) #define inline inline __attribute__((__always_inline__)) __gnu_inline \ - __maybe_unused notrace + __inline_maybe_unused notrace #else #define inline inline __gnu_inline \ - __maybe_unused notrace + __inline_maybe_unused notrace #endif +/* + * gcc provides both __inline__ and __inline as alternate spellings of + * the inline keyword, though the latter is undocumented. New kernel + * code should only use the inline spelling, but some existing code + * uses __inline__. Since we #define inline above, to ensure + * __inline__ has the same semantics, we need this #define. + * + * However, the spelling __inline is strictly reserved for referring + * to the bare keyword. + */ #define __inline__ inline -#define __inline inline + +/* + * GCC does not warn about unused static inline functions for -Wunused-function. + * Suppress the warning in clang as well by using __maybe_unused, but enable it + * for W=1 build. This will allow clang to find unused functions. Remove the + * __inline_maybe_unused entirely after fixing most of -Wunused-function warnings. + */ +#ifdef KBUILD_EXTRA_WARN1 +#define __inline_maybe_unused +#else +#define __inline_maybe_unused __maybe_unused +#endif /* * Rather then using noinline to prevent stack consumption, use @@ -189,6 +206,12 @@ struct ftrace_likely_data { #define asm_volatile_goto(x...) asm goto(x) #endif +#ifdef CONFIG_CC_HAS_ASM_INLINE +#define asm_inline asm __inline +#else +#define asm_inline asm +#endif + #ifndef __no_fgcse # define __no_fgcse #endif diff --git a/include/linux/container.h b/include/linux/container.h index 0cc2ee91905c..2566a1baa736 100644 --- a/include/linux/container.h +++ b/include/linux/container.h @@ -6,6 +6,9 @@ * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com> */ +#ifndef _LINUX_CONTAINER_H +#define _LINUX_CONTAINER_H + #include <linux/device.h> /* drivers/base/power/container.c */ @@ -20,3 +23,5 @@ static inline struct container_dev *to_container_dev(struct device *dev) { return container_of(dev, struct container_dev, dev); } + +#endif /* _LINUX_CONTAINER_H */ diff --git a/include/linux/cpu.h b/include/linux/cpu.h index fcb1386bb0d4..88dc0c653925 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -179,7 +179,7 @@ void arch_cpu_idle_dead(void); int cpu_report_state(int cpu); int cpu_check_up_prepare(int cpu); void cpu_set_state_online(int cpu); -void play_idle(unsigned long duration_ms); +void play_idle(unsigned long duration_us); #ifdef CONFIG_HOTPLUG_CPU bool cpu_wait_death(unsigned int cpu, int seconds); diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 536a049d7ecc..c57e88e85c41 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -456,8 +456,8 @@ static inline void cpufreq_resume(void) {} #define CPUFREQ_POSTCHANGE (1) /* Policy Notifiers */ -#define CPUFREQ_ADJUST (0) -#define CPUFREQ_NOTIFY (1) +#define CPUFREQ_CREATE_POLICY (0) +#define CPUFREQ_REMOVE_POLICY (1) #ifdef CONFIG_CPU_FREQ int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list); diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h index 12ae4b87494e..4b6b5bea8f79 100644 --- a/include/linux/cpuidle.h +++ b/include/linux/cpuidle.h @@ -85,7 +85,9 @@ struct cpuidle_device { unsigned int cpu; ktime_t next_hrtimer; + int last_state_idx; int last_residency; + u64 poll_limit_ns; struct cpuidle_state_usage states_usage[CPUIDLE_STATE_MAX]; struct cpuidle_state_kobj *kobjs[CPUIDLE_STATE_MAX]; struct cpuidle_driver_kobj *kobj_driver; @@ -119,6 +121,9 @@ struct cpuidle_driver { /* the driver handles the cpus in cpumask */ struct cpumask *cpumask; + + /* preferred governor to switch at register time */ + const char *governor; }; #ifdef CONFIG_CPU_IDLE @@ -132,6 +137,8 @@ extern int cpuidle_select(struct cpuidle_driver *drv, extern int cpuidle_enter(struct cpuidle_driver *drv, struct cpuidle_device *dev, int index); extern void cpuidle_reflect(struct cpuidle_device *dev, int index); +extern u64 cpuidle_poll_time(struct cpuidle_driver *drv, + struct cpuidle_device *dev); extern int cpuidle_register_driver(struct cpuidle_driver *drv); extern struct cpuidle_driver *cpuidle_get_driver(void); @@ -166,6 +173,9 @@ static inline int cpuidle_enter(struct cpuidle_driver *drv, struct cpuidle_device *dev, int index) {return -ENODEV; } static inline void cpuidle_reflect(struct cpuidle_device *dev, int index) { } +static inline u64 cpuidle_poll_time(struct cpuidle_driver *drv, + struct cpuidle_device *dev) +{return 0; } static inline int cpuidle_register_driver(struct cpuidle_driver *drv) {return -ENODEV; } static inline struct cpuidle_driver *cpuidle_get_driver(void) {return NULL; } diff --git a/include/linux/cpuidle_haltpoll.h b/include/linux/cpuidle_haltpoll.h new file mode 100644 index 000000000000..d50c1e0411a2 --- /dev/null +++ b/include/linux/cpuidle_haltpoll.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _CPUIDLE_HALTPOLL_H +#define _CPUIDLE_HALTPOLL_H + +#ifdef CONFIG_ARCH_CPUIDLE_HALTPOLL +#include <asm/cpuidle_haltpoll.h> +#else +static inline void arch_haltpoll_enable(unsigned int cpu) +{ +} + +static inline void arch_haltpoll_disable(unsigned int cpu) +{ +} +#endif +#endif diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h index f774c5eb9e3c..4664fc1871de 100644 --- a/include/linux/crash_dump.h +++ b/include/linux/crash_dump.h @@ -115,4 +115,18 @@ static inline int vmcore_add_device_dump(struct vmcoredd_data *data) return -EOPNOTSUPP; } #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */ + +#ifdef CONFIG_PROC_VMCORE +ssize_t read_from_oldmem(char *buf, size_t count, + u64 *ppos, int userbuf, + bool encrypted); +#else +static inline ssize_t read_from_oldmem(char *buf, size_t count, + u64 *ppos, int userbuf, + bool encrypted) +{ + return -EOPNOTSUPP; +} +#endif /* CONFIG_PROC_VMCORE */ + #endif /* LINUX_CRASHDUMP_H */ diff --git a/include/linux/cred.h b/include/linux/cred.h index f7a30e0099be..18639c069263 100644 --- a/include/linux/cred.h +++ b/include/linux/cred.h @@ -386,7 +386,6 @@ static inline void put_cred(const struct cred *_cred) #define current_fsgid() (current_cred_xxx(fsgid)) #define current_cap() (current_cred_xxx(cap_effective)) #define current_user() (current_cred_xxx(user)) -#define current_security() (current_cred_xxx(security)) extern struct user_namespace init_user_ns; #ifdef CONFIG_USER_NS diff --git a/include/linux/devfreq-event.h b/include/linux/devfreq-event.h index 29fc0dd735ae..f14f17f8cb7f 100644 --- a/include/linux/devfreq-event.h +++ b/include/linux/devfreq-event.h @@ -78,14 +78,20 @@ struct devfreq_event_ops { * struct devfreq_event_desc - the descriptor of devfreq-event device * * @name : the name of devfreq-event device. + * @event_type : the type of the event determined and used by driver * @driver_data : the private data for devfreq-event driver. * @ops : the operation to control devfreq-event device. * * Each devfreq-event device is described with a this structure. * This structure contains the various data for devfreq-event device. + * The event_type describes what is going to be counted in the register. + * It might choose to count e.g. read requests, write data in bytes, etc. + * The full supported list of types is present in specyfic header in: + * include/dt-bindings/pmu/. */ struct devfreq_event_desc { const char *name; + u32 event_type; void *driver_data; const struct devfreq_event_ops *ops; diff --git a/include/linux/device.h b/include/linux/device.h index 6717adee33f0..297239a08bb7 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -164,16 +164,102 @@ void subsys_dev_iter_init(struct subsys_dev_iter *iter, struct device *subsys_dev_iter_next(struct subsys_dev_iter *iter); void subsys_dev_iter_exit(struct subsys_dev_iter *iter); +int device_match_name(struct device *dev, const void *name); int device_match_of_node(struct device *dev, const void *np); +int device_match_fwnode(struct device *dev, const void *fwnode); +int device_match_devt(struct device *dev, const void *pdevt); +int device_match_acpi_dev(struct device *dev, const void *adev); +int device_match_any(struct device *dev, const void *unused); int bus_for_each_dev(struct bus_type *bus, struct device *start, void *data, int (*fn)(struct device *dev, void *data)); struct device *bus_find_device(struct bus_type *bus, struct device *start, const void *data, int (*match)(struct device *dev, const void *data)); -struct device *bus_find_device_by_name(struct bus_type *bus, - struct device *start, - const char *name); +/** + * bus_find_device_by_name - device iterator for locating a particular device + * of a specific name. + * @bus: bus type + * @start: Device to begin with + * @name: name of the device to match + */ +static inline struct device *bus_find_device_by_name(struct bus_type *bus, + struct device *start, + const char *name) +{ + return bus_find_device(bus, start, name, device_match_name); +} + +/** + * bus_find_device_by_of_node : device iterator for locating a particular device + * matching the of_node. + * @bus: bus type + * @np: of_node of the device to match. + */ +static inline struct device * +bus_find_device_by_of_node(struct bus_type *bus, const struct device_node *np) +{ + return bus_find_device(bus, NULL, np, device_match_of_node); +} + +/** + * bus_find_device_by_fwnode : device iterator for locating a particular device + * matching the fwnode. + * @bus: bus type + * @fwnode: fwnode of the device to match. + */ +static inline struct device * +bus_find_device_by_fwnode(struct bus_type *bus, const struct fwnode_handle *fwnode) +{ + return bus_find_device(bus, NULL, fwnode, device_match_fwnode); +} + +/** + * bus_find_device_by_devt : device iterator for locating a particular device + * matching the device type. + * @bus: bus type + * @devt: device type of the device to match. + */ +static inline struct device *bus_find_device_by_devt(struct bus_type *bus, + dev_t devt) +{ + return bus_find_device(bus, NULL, &devt, device_match_devt); +} + +/** + * bus_find_next_device - Find the next device after a given device in a + * given bus. + * @bus: bus type + * @cur: device to begin the search with. + */ +static inline struct device * +bus_find_next_device(struct bus_type *bus,struct device *cur) +{ + return bus_find_device(bus, cur, NULL, device_match_any); +} + +#ifdef CONFIG_ACPI +struct acpi_device; + +/** + * bus_find_device_by_acpi_dev : device iterator for locating a particular device + * matching the ACPI COMPANION device. + * @bus: bus type + * @adev: ACPI COMPANION device to match. + */ +static inline struct device * +bus_find_device_by_acpi_dev(struct bus_type *bus, const struct acpi_device *adev) +{ + return bus_find_device(bus, NULL, adev, device_match_acpi_dev); +} +#else +static inline struct device * +bus_find_device_by_acpi_dev(struct bus_type *bus, const void *adev) +{ + return NULL; +} +#endif + struct device *subsys_find_device_by_id(struct bus_type *bus, unsigned int id, struct device *hint); int bus_for_each_drv(struct bus_type *bus, struct device_driver *start, @@ -262,6 +348,8 @@ enum probe_type { * @resume: Called to bring a device from sleep mode. * @groups: Default attributes that get created by the driver core * automatically. + * @dev_groups: Additional attributes attached to device instance once the + * it is bound to the driver. * @pm: Power management operations of the device which matched * this driver. * @coredump: Called when sysfs entry is written to. The device driver @@ -296,6 +384,7 @@ struct device_driver { int (*suspend) (struct device *dev, pm_message_t state); int (*resume) (struct device *dev); const struct attribute_group **groups; + const struct attribute_group **dev_groups; const struct dev_pm_ops *pm; void (*coredump) (struct device *dev); @@ -342,6 +431,83 @@ struct device *driver_find_device(struct device_driver *drv, struct device *start, const void *data, int (*match)(struct device *dev, const void *data)); +/** + * driver_find_device_by_name - device iterator for locating a particular device + * of a specific name. + * @drv: the driver we're iterating + * @name: name of the device to match + */ +static inline struct device *driver_find_device_by_name(struct device_driver *drv, + const char *name) +{ + return driver_find_device(drv, NULL, name, device_match_name); +} + +/** + * driver_find_device_by_of_node- device iterator for locating a particular device + * by of_node pointer. + * @drv: the driver we're iterating + * @np: of_node pointer to match. + */ +static inline struct device * +driver_find_device_by_of_node(struct device_driver *drv, + const struct device_node *np) +{ + return driver_find_device(drv, NULL, np, device_match_of_node); +} + +/** + * driver_find_device_by_fwnode- device iterator for locating a particular device + * by fwnode pointer. + * @drv: the driver we're iterating + * @fwnode: fwnode pointer to match. + */ +static inline struct device * +driver_find_device_by_fwnode(struct device_driver *drv, + const struct fwnode_handle *fwnode) +{ + return driver_find_device(drv, NULL, fwnode, device_match_fwnode); +} + +/** + * driver_find_device_by_devt- device iterator for locating a particular device + * by devt. + * @drv: the driver we're iterating + * @devt: devt pointer to match. + */ +static inline struct device *driver_find_device_by_devt(struct device_driver *drv, + dev_t devt) +{ + return driver_find_device(drv, NULL, &devt, device_match_devt); +} + +static inline struct device *driver_find_next_device(struct device_driver *drv, + struct device *start) +{ + return driver_find_device(drv, start, NULL, device_match_any); +} + +#ifdef CONFIG_ACPI +/** + * driver_find_device_by_acpi_dev : device iterator for locating a particular + * device matching the ACPI_COMPANION device. + * @drv: the driver we're iterating + * @adev: ACPI_COMPANION device to match. + */ +static inline struct device * +driver_find_device_by_acpi_dev(struct device_driver *drv, + const struct acpi_device *adev) +{ + return driver_find_device(drv, NULL, adev, device_match_acpi_dev); +} +#else +static inline struct device * +driver_find_device_by_acpi_dev(struct device_driver *drv, const void *adev) +{ + return NULL; +} +#endif + void driver_deferred_probe_add(struct device *dev); int driver_deferred_probe_check_state(struct device *dev); int driver_deferred_probe_check_state_continue(struct device *dev); @@ -471,6 +637,76 @@ extern struct device *class_find_device(struct class *class, struct device *start, const void *data, int (*match)(struct device *, const void *)); +/** + * class_find_device_by_name - device iterator for locating a particular device + * of a specific name. + * @class: class type + * @name: name of the device to match + */ +static inline struct device *class_find_device_by_name(struct class *class, + const char *name) +{ + return class_find_device(class, NULL, name, device_match_name); +} + +/** + * class_find_device_by_of_node : device iterator for locating a particular device + * matching the of_node. + * @class: class type + * @np: of_node of the device to match. + */ +static inline struct device * +class_find_device_by_of_node(struct class *class, const struct device_node *np) +{ + return class_find_device(class, NULL, np, device_match_of_node); +} + +/** + * class_find_device_by_fwnode : device iterator for locating a particular device + * matching the fwnode. + * @class: class type + * @fwnode: fwnode of the device to match. + */ +static inline struct device * +class_find_device_by_fwnode(struct class *class, + const struct fwnode_handle *fwnode) +{ + return class_find_device(class, NULL, fwnode, device_match_fwnode); +} + +/** + * class_find_device_by_devt : device iterator for locating a particular device + * matching the device type. + * @class: class type + * @devt: device type of the device to match. + */ +static inline struct device *class_find_device_by_devt(struct class *class, + dev_t devt) +{ + return class_find_device(class, NULL, &devt, device_match_devt); +} + +#ifdef CONFIG_ACPI +struct acpi_device; +/** + * class_find_device_by_acpi_dev : device iterator for locating a particular + * device matching the ACPI_COMPANION device. + * @class: class type + * @adev: ACPI_COMPANION device to match. + */ +static inline struct device * +class_find_device_by_acpi_dev(struct class *class, const struct acpi_device *adev) +{ + return class_find_device(class, NULL, adev, device_match_acpi_dev); +} +#else +static inline struct device * +class_find_device_by_acpi_dev(struct class *class, const void *adev) +{ + return NULL; +} +#endif + struct class_attribute { struct attribute attr; ssize_t (*show)(struct class *class, struct class_attribute *attr, @@ -778,10 +1014,14 @@ struct device_connection { struct list_head list; }; +typedef void *(*devcon_match_fn_t)(struct device_connection *con, int ep, + void *data); + +void *fwnode_connection_find_match(struct fwnode_handle *fwnode, + const char *con_id, void *data, + devcon_match_fn_t match); void *device_connection_find_match(struct device *dev, const char *con_id, - void *data, - void *(*match)(struct device_connection *con, - int ep, void *data)); + void *data, devcon_match_fn_t match); struct device *device_connection_find(struct device *dev, const char *con_id); @@ -833,12 +1073,13 @@ enum device_link_state { /* * Device link flags. * - * STATELESS: The core won't track the presence of supplier/consumer drivers. + * STATELESS: The core will not remove this link automatically. * AUTOREMOVE_CONSUMER: Remove the link automatically on consumer driver unbind. * PM_RUNTIME: If set, the runtime PM framework will use this link. * RPM_ACTIVE: Run pm_runtime_get_sync() on the supplier during link creation. * AUTOREMOVE_SUPPLIER: Remove the link automatically on supplier driver unbind. * AUTOPROBE_CONSUMER: Probe consumer driver automatically after supplier binds. + * MANAGED: The core tracks presence of supplier/consumer drivers (internal). */ #define DL_FLAG_STATELESS BIT(0) #define DL_FLAG_AUTOREMOVE_CONSUMER BIT(1) @@ -846,6 +1087,7 @@ enum device_link_state { #define DL_FLAG_RPM_ACTIVE BIT(3) #define DL_FLAG_AUTOREMOVE_SUPPLIER BIT(4) #define DL_FLAG_AUTOPROBE_CONSUMER BIT(5) +#define DL_FLAG_MANAGED BIT(6) /** * struct device_link - Device link representation. diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h index bae060fae862..ec212cb27fdc 100644 --- a/include/linux/dma-buf.h +++ b/include/linux/dma-buf.h @@ -306,7 +306,7 @@ struct dma_buf { struct module *owner; struct list_head list_node; void *priv; - struct reservation_object *resv; + struct dma_resv *resv; /* poll support */ wait_queue_head_t poll; @@ -365,7 +365,7 @@ struct dma_buf_export_info { const struct dma_buf_ops *ops; size_t size; int flags; - struct reservation_object *resv; + struct dma_resv *resv; void *priv; }; diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h index 05d29dbc7e62..3347c54f3a87 100644 --- a/include/linux/dma-fence.h +++ b/include/linux/dma-fence.h @@ -63,15 +63,35 @@ struct dma_fence_cb; * been completed, or never called at all. */ struct dma_fence { - struct kref refcount; - const struct dma_fence_ops *ops; - struct rcu_head rcu; - struct list_head cb_list; spinlock_t *lock; + const struct dma_fence_ops *ops; + /* + * We clear the callback list on kref_put so that by the time we + * release the fence it is unused. No one should be adding to the + * cb_list that they don't themselves hold a reference for. + * + * The lifetime of the timestamp is similarly tied to both the + * rcu freelist and the cb_list. The timestamp is only set upon + * signaling while simultaneously notifying the cb_list. Ergo, we + * only use either the cb_list of timestamp. Upon destruction, + * neither are accessible, and so we can use the rcu. This means + * that the cb_list is *only* valid until the signal bit is set, + * and to read either you *must* hold a reference to the fence, + * and not just the rcu_read_lock. + * + * Listed in chronological order. + */ + union { + struct list_head cb_list; + /* @cb_list replaced by @timestamp on dma_fence_signal() */ + ktime_t timestamp; + /* @timestamp replaced by @rcu on dma_fence_release() */ + struct rcu_head rcu; + }; u64 context; u64 seqno; unsigned long flags; - ktime_t timestamp; + struct kref refcount; int error; }; @@ -273,7 +293,7 @@ static inline struct dma_fence *dma_fence_get(struct dma_fence *fence) } /** - * dma_fence_get_rcu - get a fence from a reservation_object_list with + * dma_fence_get_rcu - get a fence from a dma_resv_list with * rcu read lock * @fence: fence to increase refcount of * @@ -297,7 +317,7 @@ static inline struct dma_fence *dma_fence_get_rcu(struct dma_fence *fence) * so long as the caller is using RCU on the pointer to the fence. * * An alternative mechanism is to employ a seqlock to protect a bunch of - * fences, such as used by struct reservation_object. When using a seqlock, + * fences, such as used by struct dma_resv. When using a seqlock, * the seqlock must be taken before and checked after a reference to the * fence is acquired (as shown here). * diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index f7d1eea32c78..4a1c4fca475a 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -131,6 +131,7 @@ struct dma_map_ops { int (*dma_supported)(struct device *dev, u64 mask); u64 (*get_required_mask)(struct device *dev); size_t (*max_mapping_size)(struct device *dev); + unsigned long (*get_merge_boundary)(struct device *dev); }; #define DMA_MAPPING_ERROR (~(dma_addr_t)0) @@ -149,11 +150,6 @@ static inline int valid_dma_direction(int dma_direction) (dma_direction == DMA_FROM_DEVICE)); } -static inline int is_device_dma_capable(struct device *dev) -{ - return dev->dma_mask != NULL && *dev->dma_mask != DMA_MASK_NONE; -} - #ifdef CONFIG_DMA_DECLARE_COHERENT /* * These three functions are only for dma allocator. @@ -462,11 +458,13 @@ int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, dma_addr_t dma_addr, size_t size, unsigned long attrs); +bool dma_can_mmap(struct device *dev); int dma_supported(struct device *dev, u64 mask); int dma_set_mask(struct device *dev, u64 mask); int dma_set_coherent_mask(struct device *dev, u64 mask); u64 dma_get_required_mask(struct device *dev); size_t dma_max_mapping_size(struct device *dev); +unsigned long dma_get_merge_boundary(struct device *dev); #else /* CONFIG_HAS_DMA */ static inline dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page, size_t offset, size_t size, @@ -552,6 +550,10 @@ static inline int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, { return -ENXIO; } +static inline bool dma_can_mmap(struct device *dev) +{ + return false; +} static inline int dma_supported(struct device *dev, u64 mask) { return 0; @@ -572,6 +574,10 @@ static inline size_t dma_max_mapping_size(struct device *dev) { return 0; } +static inline unsigned long dma_get_merge_boundary(struct device *dev) +{ + return 0; +} #endif /* CONFIG_HAS_DMA */ static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr, @@ -615,16 +621,14 @@ extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, dma_addr_t dma_addr, size_t size, unsigned long attrs); +struct page **dma_common_find_pages(void *cpu_addr); void *dma_common_contiguous_remap(struct page *page, size_t size, - unsigned long vm_flags, pgprot_t prot, const void *caller); void *dma_common_pages_remap(struct page **pages, size_t size, - unsigned long vm_flags, pgprot_t prot, - const void *caller); -void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags); + pgprot_t prot, const void *caller); +void dma_common_free_remap(void *cpu_addr, size_t size); -int __init dma_atomic_pool_init(gfp_t gfp, pgprot_t prot); bool dma_in_atomic_pool(void *start, size_t size); void *dma_alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags); bool dma_free_from_pool(void *start, size_t size); @@ -754,7 +758,6 @@ static inline int dma_get_cache_alignment(void) #ifdef CONFIG_DMA_DECLARE_COHERENT int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, dma_addr_t device_addr, size_t size); -void dma_release_declared_memory(struct device *dev); #else static inline int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, @@ -762,11 +765,6 @@ dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, { return -ENOSYS; } - -static inline void -dma_release_declared_memory(struct device *dev) -{ -} #endif /* CONFIG_DMA_DECLARE_COHERENT */ static inline void *dmam_alloc_coherent(struct device *dev, size_t size, @@ -786,9 +784,6 @@ static inline void *dma_alloc_wc(struct device *dev, size_t size, return dma_alloc_attrs(dev, size, dma_addr, gfp, attrs); } -#ifndef dma_alloc_writecombine -#define dma_alloc_writecombine dma_alloc_wc -#endif static inline void dma_free_wc(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_addr) @@ -796,9 +791,6 @@ static inline void dma_free_wc(struct device *dev, size_t size, return dma_free_attrs(dev, size, cpu_addr, dma_addr, DMA_ATTR_WRITE_COMBINE); } -#ifndef dma_free_writecombine -#define dma_free_writecombine dma_free_wc -#endif static inline int dma_mmap_wc(struct device *dev, struct vm_area_struct *vma, @@ -808,9 +800,6 @@ static inline int dma_mmap_wc(struct device *dev, return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, DMA_ATTR_WRITE_COMBINE); } -#ifndef dma_mmap_writecombine -#define dma_mmap_writecombine dma_mmap_wc -#endif #ifdef CONFIG_NEED_DMA_MAP_STATE #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME diff --git a/include/linux/dma-noncoherent.h b/include/linux/dma-noncoherent.h index 0bff3d7fac92..dd3de6d88fc0 100644 --- a/include/linux/dma-noncoherent.h +++ b/include/linux/dma-noncoherent.h @@ -3,6 +3,7 @@ #define _LINUX_DMA_NONCOHERENT_H 1 #include <linux/dma-mapping.h> +#include <asm/pgtable.h> #ifdef CONFIG_ARCH_HAS_DMA_COHERENCE_H #include <asm/dma-coherence.h> @@ -42,10 +43,18 @@ void arch_dma_free(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs); long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr, dma_addr_t dma_addr); -pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot, - unsigned long attrs); #ifdef CONFIG_MMU +/* + * Page protection so that devices that can't snoop CPU caches can use the + * memory coherently. We default to pgprot_noncached which is usually used + * for ioremap as a safe bet, but architectures can override this with less + * strict semantics if possible. + */ +#ifndef pgprot_dmacoherent +#define pgprot_dmacoherent(prot) pgprot_noncached(prot) +#endif + pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs); #else static inline pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, diff --git a/include/linux/reservation.h b/include/linux/dma-resv.h index 644a22dbe53b..ee50d10f052b 100644 --- a/include/linux/reservation.h +++ b/include/linux/dma-resv.h @@ -50,98 +50,52 @@ extern struct lock_class_key reservation_seqcount_class; extern const char reservation_seqcount_string[]; /** - * struct reservation_object_list - a list of shared fences + * struct dma_resv_list - a list of shared fences * @rcu: for internal use * @shared_count: table of shared fences * @shared_max: for growing shared fence table * @shared: shared fence table */ -struct reservation_object_list { +struct dma_resv_list { struct rcu_head rcu; u32 shared_count, shared_max; struct dma_fence __rcu *shared[]; }; /** - * struct reservation_object - a reservation object manages fences for a buffer + * struct dma_resv - a reservation object manages fences for a buffer * @lock: update side lock * @seq: sequence count for managing RCU read-side synchronization * @fence_excl: the exclusive fence, if there is one currently * @fence: list of current shared fences */ -struct reservation_object { +struct dma_resv { struct ww_mutex lock; seqcount_t seq; struct dma_fence __rcu *fence_excl; - struct reservation_object_list __rcu *fence; + struct dma_resv_list __rcu *fence; }; -#define reservation_object_held(obj) lockdep_is_held(&(obj)->lock.base) -#define reservation_object_assert_held(obj) \ - lockdep_assert_held(&(obj)->lock.base) +#define dma_resv_held(obj) lockdep_is_held(&(obj)->lock.base) +#define dma_resv_assert_held(obj) lockdep_assert_held(&(obj)->lock.base) /** - * reservation_object_init - initialize a reservation object - * @obj: the reservation object - */ -static inline void -reservation_object_init(struct reservation_object *obj) -{ - ww_mutex_init(&obj->lock, &reservation_ww_class); - - __seqcount_init(&obj->seq, reservation_seqcount_string, &reservation_seqcount_class); - RCU_INIT_POINTER(obj->fence, NULL); - RCU_INIT_POINTER(obj->fence_excl, NULL); -} - -/** - * reservation_object_fini - destroys a reservation object - * @obj: the reservation object - */ -static inline void -reservation_object_fini(struct reservation_object *obj) -{ - int i; - struct reservation_object_list *fobj; - struct dma_fence *excl; - - /* - * This object should be dead and all references must have - * been released to it, so no need to be protected with rcu. - */ - excl = rcu_dereference_protected(obj->fence_excl, 1); - if (excl) - dma_fence_put(excl); - - fobj = rcu_dereference_protected(obj->fence, 1); - if (fobj) { - for (i = 0; i < fobj->shared_count; ++i) - dma_fence_put(rcu_dereference_protected(fobj->shared[i], 1)); - - kfree(fobj); - } - - ww_mutex_destroy(&obj->lock); -} - -/** - * reservation_object_get_list - get the reservation object's + * dma_resv_get_list - get the reservation object's * shared fence list, with update-side lock held * @obj: the reservation object * * Returns the shared fence list. Does NOT take references to * the fence. The obj->lock must be held. */ -static inline struct reservation_object_list * -reservation_object_get_list(struct reservation_object *obj) +static inline struct dma_resv_list *dma_resv_get_list(struct dma_resv *obj) { return rcu_dereference_protected(obj->fence, - reservation_object_held(obj)); + dma_resv_held(obj)); } /** - * reservation_object_lock - lock the reservation object + * dma_resv_lock - lock the reservation object * @obj: the reservation object * @ctx: the locking context * @@ -155,15 +109,14 @@ reservation_object_get_list(struct reservation_object *obj) * is detected. See ww_mutex_lock() and ww_acquire_init(). A reservation * object may be locked by itself by passing NULL as @ctx. */ -static inline int -reservation_object_lock(struct reservation_object *obj, - struct ww_acquire_ctx *ctx) +static inline int dma_resv_lock(struct dma_resv *obj, + struct ww_acquire_ctx *ctx) { return ww_mutex_lock(&obj->lock, ctx); } /** - * reservation_object_lock_interruptible - lock the reservation object + * dma_resv_lock_interruptible - lock the reservation object * @obj: the reservation object * @ctx: the locking context * @@ -177,16 +130,45 @@ reservation_object_lock(struct reservation_object *obj, * is detected. See ww_mutex_lock() and ww_acquire_init(). A reservation * object may be locked by itself by passing NULL as @ctx. */ -static inline int -reservation_object_lock_interruptible(struct reservation_object *obj, - struct ww_acquire_ctx *ctx) +static inline int dma_resv_lock_interruptible(struct dma_resv *obj, + struct ww_acquire_ctx *ctx) { return ww_mutex_lock_interruptible(&obj->lock, ctx); } +/** + * dma_resv_lock_slow - slowpath lock the reservation object + * @obj: the reservation object + * @ctx: the locking context + * + * Acquires the reservation object after a die case. This function + * will sleep until the lock becomes available. See dma_resv_lock() as + * well. + */ +static inline void dma_resv_lock_slow(struct dma_resv *obj, + struct ww_acquire_ctx *ctx) +{ + ww_mutex_lock_slow(&obj->lock, ctx); +} + +/** + * dma_resv_lock_slow_interruptible - slowpath lock the reservation + * object, interruptible + * @obj: the reservation object + * @ctx: the locking context + * + * Acquires the reservation object interruptible after a die case. This function + * will sleep until the lock becomes available. See + * dma_resv_lock_interruptible() as well. + */ +static inline int dma_resv_lock_slow_interruptible(struct dma_resv *obj, + struct ww_acquire_ctx *ctx) +{ + return ww_mutex_lock_slow_interruptible(&obj->lock, ctx); +} /** - * reservation_object_trylock - trylock the reservation object + * dma_resv_trylock - trylock the reservation object * @obj: the reservation object * * Tries to lock the reservation object for exclusive access and modification. @@ -199,26 +181,46 @@ reservation_object_lock_interruptible(struct reservation_object *obj, * * Returns true if the lock was acquired, false otherwise. */ -static inline bool __must_check -reservation_object_trylock(struct reservation_object *obj) +static inline bool __must_check dma_resv_trylock(struct dma_resv *obj) { return ww_mutex_trylock(&obj->lock); } /** - * reservation_object_unlock - unlock the reservation object + * dma_resv_is_locked - is the reservation object locked + * @obj: the reservation object + * + * Returns true if the mutex is locked, false if unlocked. + */ +static inline bool dma_resv_is_locked(struct dma_resv *obj) +{ + return ww_mutex_is_locked(&obj->lock); +} + +/** + * dma_resv_locking_ctx - returns the context used to lock the object + * @obj: the reservation object + * + * Returns the context used to lock a reservation object or NULL if no context + * was used or the object is not locked at all. + */ +static inline struct ww_acquire_ctx *dma_resv_locking_ctx(struct dma_resv *obj) +{ + return READ_ONCE(obj->lock.ctx); +} + +/** + * dma_resv_unlock - unlock the reservation object * @obj: the reservation object * * Unlocks the reservation object following exclusive access. */ -static inline void -reservation_object_unlock(struct reservation_object *obj) +static inline void dma_resv_unlock(struct dma_resv *obj) { #ifdef CONFIG_DEBUG_MUTEXES /* Test shared fence slot reservation */ if (rcu_access_pointer(obj->fence)) { - struct reservation_object_list *fence = - reservation_object_get_list(obj); + struct dma_resv_list *fence = dma_resv_get_list(obj); fence->shared_max = fence->shared_count; } @@ -227,7 +229,7 @@ reservation_object_unlock(struct reservation_object *obj) } /** - * reservation_object_get_excl - get the reservation object's + * dma_resv_get_excl - get the reservation object's * exclusive fence, with update-side lock held * @obj: the reservation object * @@ -239,14 +241,14 @@ reservation_object_unlock(struct reservation_object *obj) * The exclusive fence or NULL */ static inline struct dma_fence * -reservation_object_get_excl(struct reservation_object *obj) +dma_resv_get_excl(struct dma_resv *obj) { return rcu_dereference_protected(obj->fence_excl, - reservation_object_held(obj)); + dma_resv_held(obj)); } /** - * reservation_object_get_excl_rcu - get the reservation object's + * dma_resv_get_excl_rcu - get the reservation object's * exclusive fence, without lock held. * @obj: the reservation object * @@ -257,7 +259,7 @@ reservation_object_get_excl(struct reservation_object *obj) * The exclusive fence or NULL if none */ static inline struct dma_fence * -reservation_object_get_excl_rcu(struct reservation_object *obj) +dma_resv_get_excl_rcu(struct dma_resv *obj) { struct dma_fence *fence; @@ -271,27 +273,23 @@ reservation_object_get_excl_rcu(struct reservation_object *obj) return fence; } -int reservation_object_reserve_shared(struct reservation_object *obj, - unsigned int num_fences); -void reservation_object_add_shared_fence(struct reservation_object *obj, - struct dma_fence *fence); +void dma_resv_init(struct dma_resv *obj); +void dma_resv_fini(struct dma_resv *obj); +int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences); +void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence); -void reservation_object_add_excl_fence(struct reservation_object *obj, - struct dma_fence *fence); +void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence); -int reservation_object_get_fences_rcu(struct reservation_object *obj, - struct dma_fence **pfence_excl, - unsigned *pshared_count, - struct dma_fence ***pshared); +int dma_resv_get_fences_rcu(struct dma_resv *obj, + struct dma_fence **pfence_excl, + unsigned *pshared_count, + struct dma_fence ***pshared); -int reservation_object_copy_fences(struct reservation_object *dst, - struct reservation_object *src); +int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src); -long reservation_object_wait_timeout_rcu(struct reservation_object *obj, - bool wait_all, bool intr, - unsigned long timeout); +long dma_resv_wait_timeout_rcu(struct dma_resv *obj, bool wait_all, bool intr, + unsigned long timeout); -bool reservation_object_test_signaled_rcu(struct reservation_object *obj, - bool test_all); +bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all); #endif /* _LINUX_RESERVATION_H */ diff --git a/include/linux/edma.h b/include/linux/edma.h deleted file mode 100644 index a1307e7827e8..000000000000 --- a/include/linux/edma.h +++ /dev/null @@ -1,29 +0,0 @@ -/* - * TI EDMA DMA engine driver - * - * Copyright 2012 Texas Instruments - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation version 2. - * - * This program is distributed "as is" WITHOUT ANY WARRANTY of any - * kind, whether express or implied; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ -#ifndef __LINUX_EDMA_H -#define __LINUX_EDMA_H - -struct dma_chan; - -#if defined(CONFIG_TI_EDMA) || defined(CONFIG_TI_EDMA_MODULE) -bool edma_filter_fn(struct dma_chan *, void *); -#else -static inline bool edma_filter_fn(struct dma_chan *chan, void *param) -{ - return false; -} -#endif - -#endif diff --git a/include/linux/export.h b/include/linux/export.h index fd8711ed9ac4..95f55b7f83a0 100644 --- a/include/linux/export.h +++ b/include/linux/export.h @@ -18,9 +18,8 @@ extern struct module __this_module; #define THIS_MODULE ((struct module *)0) #endif -#ifdef CONFIG_MODULES +#define NS_SEPARATOR "." -#if defined(__KERNEL__) && !defined(__GENKSYMS__) #ifdef CONFIG_MODVERSIONS /* Mark the CRC weak since genksyms apparently decides not to * generate a checksums for some symbols */ @@ -29,13 +28,13 @@ extern struct module __this_module; asm(" .section \"___kcrctab" sec "+" #sym "\", \"a\" \n" \ " .weak __crc_" #sym " \n" \ " .long __crc_" #sym " - . \n" \ - " .previous \n"); + " .previous \n") #else #define __CRC_SYMBOL(sym, sec) \ asm(" .section \"___kcrctab" sec "+" #sym "\", \"a\" \n" \ " .weak __crc_" #sym " \n" \ " .long __crc_" #sym " \n" \ - " .previous \n"); + " .previous \n") #endif #else #define __CRC_SYMBOL(sym, sec) @@ -49,47 +48,89 @@ extern struct module __this_module; * absolute relocations that require runtime processing on relocatable * kernels. */ +#define __KSYMTAB_ENTRY_NS(sym, sec, ns) \ + __ADDRESSABLE(sym) \ + asm(" .section \"___ksymtab" sec "+" #sym "\", \"a\" \n" \ + " .balign 4 \n" \ + "__ksymtab_" #sym NS_SEPARATOR #ns ": \n" \ + " .long " #sym "- . \n" \ + " .long __kstrtab_" #sym "- . \n" \ + " .long __kstrtab_ns_" #sym "- . \n" \ + " .previous \n") + #define __KSYMTAB_ENTRY(sym, sec) \ __ADDRESSABLE(sym) \ asm(" .section \"___ksymtab" sec "+" #sym "\", \"a\" \n" \ - " .balign 8 \n" \ + " .balign 4 \n" \ "__ksymtab_" #sym ": \n" \ " .long " #sym "- . \n" \ " .long __kstrtab_" #sym "- . \n" \ + " .long 0 \n" \ " .previous \n") struct kernel_symbol { int value_offset; int name_offset; + int namespace_offset; }; #else +#define __KSYMTAB_ENTRY_NS(sym, sec, ns) \ + static const struct kernel_symbol __ksymtab_##sym##__##ns \ + asm("__ksymtab_" #sym NS_SEPARATOR #ns) \ + __attribute__((section("___ksymtab" sec "+" #sym), used)) \ + __aligned(sizeof(void *)) \ + = { (unsigned long)&sym, __kstrtab_##sym, __kstrtab_ns_##sym } + #define __KSYMTAB_ENTRY(sym, sec) \ static const struct kernel_symbol __ksymtab_##sym \ + asm("__ksymtab_" #sym) \ __attribute__((section("___ksymtab" sec "+" #sym), used)) \ - = { (unsigned long)&sym, __kstrtab_##sym } + __aligned(sizeof(void *)) \ + = { (unsigned long)&sym, __kstrtab_##sym, NULL } struct kernel_symbol { unsigned long value; const char *name; + const char *namespace; }; #endif -/* For every exported symbol, place a struct in the __ksymtab section */ -#define ___EXPORT_SYMBOL(sym, sec) \ +#ifdef __GENKSYMS__ + +#define ___EXPORT_SYMBOL(sym,sec) __GENKSYMS_EXPORT_SYMBOL(sym) +#define ___EXPORT_SYMBOL_NS(sym,sec,ns) __GENKSYMS_EXPORT_SYMBOL(sym) + +#else + +#define ___export_symbol_common(sym, sec) \ extern typeof(sym) sym; \ - __CRC_SYMBOL(sym, sec) \ + __CRC_SYMBOL(sym, sec); \ static const char __kstrtab_##sym[] \ __attribute__((section("__ksymtab_strings"), used, aligned(1))) \ - = #sym; \ + = #sym \ + +/* For every exported symbol, place a struct in the __ksymtab section */ +#define ___EXPORT_SYMBOL_NS(sym, sec, ns) \ + ___export_symbol_common(sym, sec); \ + static const char __kstrtab_ns_##sym[] \ + __attribute__((section("__ksymtab_strings"), used, aligned(1))) \ + = #ns; \ + __KSYMTAB_ENTRY_NS(sym, sec, ns) + +#define ___EXPORT_SYMBOL(sym, sec) \ + ___export_symbol_common(sym, sec); \ __KSYMTAB_ENTRY(sym, sec) -#if defined(__DISABLE_EXPORTS) +#endif + +#if !defined(CONFIG_MODULES) || defined(__DISABLE_EXPORTS) /* * Allow symbol exports to be disabled completely so that C code may * be reused in other execution contexts such as the UEFI stub or the * decompressor. */ +#define __EXPORT_SYMBOL_NS(sym, sec, ns) #define __EXPORT_SYMBOL(sym, sec) #elif defined(CONFIG_TRIM_UNUSED_KSYMS) @@ -116,38 +157,43 @@ struct kernel_symbol { #define __cond_export_sym_1(sym, sec) ___EXPORT_SYMBOL(sym, sec) #define __cond_export_sym_0(sym, sec) /* nothing */ +#define __EXPORT_SYMBOL_NS(sym, sec, ns) \ + __ksym_marker(sym); \ + __cond_export_ns_sym(sym, sec, ns, __is_defined(__KSYM_##sym)) +#define __cond_export_ns_sym(sym, sec, ns, conf) \ + ___cond_export_ns_sym(sym, sec, ns, conf) +#define ___cond_export_ns_sym(sym, sec, ns, enabled) \ + __cond_export_ns_sym_##enabled(sym, sec, ns) +#define __cond_export_ns_sym_1(sym, sec, ns) ___EXPORT_SYMBOL_NS(sym, sec, ns) +#define __cond_export_ns_sym_0(sym, sec, ns) /* nothing */ + #else -#define __EXPORT_SYMBOL ___EXPORT_SYMBOL -#endif -#define EXPORT_SYMBOL(sym) \ - __EXPORT_SYMBOL(sym, "") +#define __EXPORT_SYMBOL_NS(sym,sec,ns) ___EXPORT_SYMBOL_NS(sym,sec,ns) +#define __EXPORT_SYMBOL(sym,sec) ___EXPORT_SYMBOL(sym,sec) -#define EXPORT_SYMBOL_GPL(sym) \ - __EXPORT_SYMBOL(sym, "_gpl") +#endif /* CONFIG_MODULES */ -#define EXPORT_SYMBOL_GPL_FUTURE(sym) \ - __EXPORT_SYMBOL(sym, "_gpl_future") +#ifdef DEFAULT_SYMBOL_NAMESPACE +#undef __EXPORT_SYMBOL +#define __EXPORT_SYMBOL(sym, sec) \ + __EXPORT_SYMBOL_NS(sym, sec, DEFAULT_SYMBOL_NAMESPACE) +#endif + +#define EXPORT_SYMBOL(sym) __EXPORT_SYMBOL(sym, "") +#define EXPORT_SYMBOL_GPL(sym) __EXPORT_SYMBOL(sym, "_gpl") +#define EXPORT_SYMBOL_GPL_FUTURE(sym) __EXPORT_SYMBOL(sym, "_gpl_future") +#define EXPORT_SYMBOL_NS(sym, ns) __EXPORT_SYMBOL_NS(sym, "", ns) +#define EXPORT_SYMBOL_NS_GPL(sym, ns) __EXPORT_SYMBOL_NS(sym, "_gpl", ns) #ifdef CONFIG_UNUSED_SYMBOLS -#define EXPORT_UNUSED_SYMBOL(sym) __EXPORT_SYMBOL(sym, "_unused") -#define EXPORT_UNUSED_SYMBOL_GPL(sym) __EXPORT_SYMBOL(sym, "_unused_gpl") +#define EXPORT_UNUSED_SYMBOL(sym) __EXPORT_SYMBOL(sym, "_unused") +#define EXPORT_UNUSED_SYMBOL_GPL(sym) __EXPORT_SYMBOL(sym, "_unused_gpl") #else #define EXPORT_UNUSED_SYMBOL(sym) #define EXPORT_UNUSED_SYMBOL_GPL(sym) #endif -#endif /* __GENKSYMS__ */ - -#else /* !CONFIG_MODULES... */ - -#define EXPORT_SYMBOL(sym) -#define EXPORT_SYMBOL_GPL(sym) -#define EXPORT_SYMBOL_GPL_FUTURE(sym) -#define EXPORT_UNUSED_SYMBOL(sym) -#define EXPORT_UNUSED_SYMBOL_GPL(sym) - -#endif /* CONFIG_MODULES */ #endif /* !__ASSEMBLY__ */ #endif /* _LINUX_EXPORT_H */ diff --git a/include/linux/extable.h b/include/linux/extable.h index 41c5b3a25f67..81ecfaa83ad3 100644 --- a/include/linux/extable.h +++ b/include/linux/extable.h @@ -19,6 +19,8 @@ void trim_init_extable(struct module *m); /* Given an address, look for it in the exception tables */ const struct exception_table_entry *search_exception_tables(unsigned long add); +const struct exception_table_entry * +search_kernel_exception_table(unsigned long addr); #ifdef CONFIG_MODULES /* For extable.c to search modules' exception tables. */ diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h index 65559900d4d7..284738996028 100644 --- a/include/linux/f2fs_fs.h +++ b/include/linux/f2fs_fs.h @@ -36,11 +36,17 @@ #define F2FS_MAX_QUOTAS 3 +#define F2FS_ENC_UTF8_12_1 1 +#define F2FS_ENC_STRICT_MODE_FL (1 << 0) +#define f2fs_has_strict_mode(sbi) \ + (sbi->s_encoding_flags & F2FS_ENC_STRICT_MODE_FL) + #define F2FS_IO_SIZE(sbi) (1 << F2FS_OPTION(sbi).write_io_size_bits) /* Blocks */ #define F2FS_IO_SIZE_KB(sbi) (1 << (F2FS_OPTION(sbi).write_io_size_bits + 2)) /* KB */ #define F2FS_IO_SIZE_BYTES(sbi) (1 << (F2FS_OPTION(sbi).write_io_size_bits + 12)) /* B */ #define F2FS_IO_SIZE_BITS(sbi) (F2FS_OPTION(sbi).write_io_size_bits) /* power of 2 */ #define F2FS_IO_SIZE_MASK(sbi) (F2FS_IO_SIZE(sbi) - 1) +#define F2FS_IO_ALIGNED(sbi) (F2FS_IO_SIZE(sbi) > 1) /* This flag is used by node and meta inodes, and by recovery */ #define GFP_F2FS_ZERO (GFP_NOFS | __GFP_ZERO) @@ -109,7 +115,9 @@ struct f2fs_super_block { struct f2fs_device devs[MAX_DEVICES]; /* device list */ __le32 qf_ino[F2FS_MAX_QUOTAS]; /* quota inode numbers */ __u8 hot_ext_count; /* # of hot file extension */ - __u8 reserved[310]; /* valid reserved region */ + __le16 s_encoding; /* Filename charset encoding */ + __le16 s_encoding_flags; /* Filename charset encoding flags */ + __u8 reserved[306]; /* valid reserved region */ __le32 crc; /* checksum of superblock */ } __packed; diff --git a/include/linux/fb.h b/include/linux/fb.h index 303771264644..756706b666a1 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -135,10 +135,6 @@ struct fb_cursor_user { /* A display blank is requested */ #define FB_EVENT_BLANK 0x09 -/* A hardware display blank early change occurred */ -#define FB_EARLY_EVENT_BLANK 0x10 -/* A hardware display blank revert early change occurred */ -#define FB_R_EARLY_EVENT_BLANK 0x11 struct fb_event { struct fb_info *info; @@ -721,8 +717,6 @@ extern int fb_parse_edid(unsigned char *edid, struct fb_var_screeninfo *var); extern const unsigned char *fb_firmware_edid(struct device *device); extern void fb_edid_to_monspecs(unsigned char *edid, struct fb_monspecs *specs); -extern void fb_edid_add_monspecs(unsigned char *edid, - struct fb_monspecs *specs); extern void fb_destroy_modedb(struct fb_videomode *modedb); extern int fb_find_mode_cvt(struct fb_videomode *mode, int margins, int rb); extern unsigned char *fb_ddc_read(struct i2c_adapter *adapter); @@ -796,7 +790,6 @@ struct dmt_videomode { extern const char *fb_mode_option; extern const struct fb_videomode vesa_modes[]; -extern const struct fb_videomode cea_modes[65]; extern const struct dmt_videomode dmt_modes[]; struct fb_modelist { diff --git a/include/linux/filter.h b/include/linux/filter.h index 92c6e31fb008..2ce57645f3cd 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -749,14 +749,14 @@ bpf_ctx_narrow_access_ok(u32 off, u32 size, u32 size_default) } static inline u8 -bpf_ctx_narrow_load_shift(u32 off, u32 size, u32 size_default) +bpf_ctx_narrow_access_offset(u32 off, u32 size, u32 size_default) { - u8 load_off = off & (size_default - 1); + u8 access_off = off & (size_default - 1); #ifdef __LITTLE_ENDIAN - return load_off * 8; + return access_off; #else - return (size_default - (load_off + size)) * 8; + return size_default - (access_off + size); #endif } diff --git a/include/linux/fips.h b/include/linux/fips.h index afeeece92302..c6961e932fef 100644 --- a/include/linux/fips.h +++ b/include/linux/fips.h @@ -4,8 +4,15 @@ #ifdef CONFIG_CRYPTO_FIPS extern int fips_enabled; +extern struct atomic_notifier_head fips_fail_notif_chain; + +void fips_fail_notify(void); + #else #define fips_enabled 0 + +static inline void fips_fail_notify(void) {} + #endif #endif diff --git a/include/linux/firmware/intel/stratix10-smc.h b/include/linux/firmware/intel/stratix10-smc.h index 01684d935580..013ae4819deb 100644 --- a/include/linux/firmware/intel/stratix10-smc.h +++ b/include/linux/firmware/intel/stratix10-smc.h @@ -210,7 +210,7 @@ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_COMPLETED_WRITE) #define INTEL_SIP_SMC_FPGA_CONFIG_LOOPBACK \ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_LOOPBACK) -/* +/** * Request INTEL_SIP_SMC_REG_READ * * Read a protected register at EL3 @@ -229,7 +229,7 @@ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_COMPLETED_WRITE) #define INTEL_SIP_SMC_REG_READ \ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_REG_READ) -/* +/** * Request INTEL_SIP_SMC_REG_WRITE * * Write a protected register at EL3 @@ -248,7 +248,7 @@ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_COMPLETED_WRITE) #define INTEL_SIP_SMC_REG_WRITE \ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_REG_WRITE) -/* +/** * Request INTEL_SIP_SMC_FUNCID_REG_UPDATE * * Update one or more bits in a protected register at EL3 using a @@ -269,7 +269,7 @@ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_COMPLETED_WRITE) #define INTEL_SIP_SMC_REG_UPDATE \ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_REG_UPDATE) -/* +/** * Request INTEL_SIP_SMC_RSU_STATUS * * Request remote status update boot log, call is synchronous. @@ -292,7 +292,7 @@ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_COMPLETED_WRITE) #define INTEL_SIP_SMC_RSU_STATUS \ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_RSU_STATUS) -/* +/** * Request INTEL_SIP_SMC_RSU_UPDATE * * Request to set the offset of the bitstream to boot after reboot, call @@ -310,7 +310,7 @@ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_COMPLETED_WRITE) #define INTEL_SIP_SMC_RSU_UPDATE \ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_RSU_UPDATE) -/* +/** * Request INTEL_SIP_SMC_ECC_DBE * * Sync call used by service driver at EL1 to alert EL3 that a Double @@ -329,3 +329,42 @@ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_COMPLETED_WRITE) INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_ECC_DBE) #endif + +/** + * Request INTEL_SIP_SMC_RSU_NOTIFY + * + * Sync call used by service driver at EL1 to report hard processor + * system execution stage to firmware + * + * Call register usage: + * a0 INTEL_SIP_SMC_RSU_NOTIFY + * a1 32bit value representing hard processor system execution stage + * a2-7 not used + * + * Return status + * a0 INTEL_SIP_SMC_STATUS_OK + */ +#define INTEL_SIP_SMC_FUNCID_RSU_NOTIFY 14 +#define INTEL_SIP_SMC_RSU_NOTIFY \ + INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_RSU_NOTIFY) + +/** + * Request INTEL_SIP_SMC_RSU_RETRY_COUNTER + * + * Sync call used by service driver at EL1 to query RSU retry counter + * + * Call register usage: + * a0 INTEL_SIP_SMC_RSU_RETRY_COUNTER + * a1-7 not used + * + * Return status + * a0 INTEL_SIP_SMC_STATUS_OK + * a1 the retry counter + * + * Or + * + * a0 INTEL_SIP_SMC_RSU_ERROR + */ +#define INTEL_SIP_SMC_FUNCID_RSU_RETRY_COUNTER 15 +#define INTEL_SIP_SMC_RSU_RETRY_COUNTER \ + INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_RSU_RETRY_COUNTER) diff --git a/include/linux/firmware/intel/stratix10-svc-client.h b/include/linux/firmware/intel/stratix10-svc-client.h index e521f172a47a..b6c4302a39e0 100644 --- a/include/linux/firmware/intel/stratix10-svc-client.h +++ b/include/linux/firmware/intel/stratix10-svc-client.h @@ -95,6 +95,13 @@ struct stratix10_svc_chan; * * @COMMAND_RSU_UPDATE: set the offset of the bitstream to boot after reboot, * return status is SVC_STATUS_RSU_OK or SVC_STATUS_RSU_ERROR + * + * @COMMAND_RSU_NOTIFY: report the status of hard processor system + * software to firmware, return status is SVC_STATUS_RSU_OK or + * SVC_STATUS_RSU_ERROR + * + * @COMMAND_RSU_RETRY: query firmware for the current image's retry counter, + * return status is SVC_STATUS_RSU_OK or SVC_STATUS_RSU_ERROR */ enum stratix10_svc_command_code { COMMAND_NOOP = 0, @@ -103,7 +110,9 @@ enum stratix10_svc_command_code { COMMAND_RECONFIG_DATA_CLAIM, COMMAND_RECONFIG_STATUS, COMMAND_RSU_STATUS, - COMMAND_RSU_UPDATE + COMMAND_RSU_UPDATE, + COMMAND_RSU_NOTIFY, + COMMAND_RSU_RETRY, }; /** diff --git a/include/linux/fpga/altera-pr-ip-core.h b/include/linux/fpga/altera-pr-ip-core.h index 7d4664730d60..0b08ac20ab16 100644 --- a/include/linux/fpga/altera-pr-ip-core.h +++ b/include/linux/fpga/altera-pr-ip-core.h @@ -13,6 +13,6 @@ #include <linux/io.h> int alt_pr_register(struct device *dev, void __iomem *reg_base); -int alt_pr_unregister(struct device *dev); +void alt_pr_unregister(struct device *dev); #endif /* _ALT_PR_IP_CORE_H */ diff --git a/include/linux/fs.h b/include/linux/fs.h index 997a530ff4e9..b0c6b0d34d02 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -64,6 +64,8 @@ struct workqueue_struct; struct iov_iter; struct fscrypt_info; struct fscrypt_operations; +struct fsverity_info; +struct fsverity_operations; struct fs_context; struct fs_parameter_description; @@ -427,6 +429,7 @@ int pagecache_write_end(struct file *, struct address_space *mapping, * @i_pages: Cached pages. * @gfp_mask: Memory allocation flags to use for allocating pages. * @i_mmap_writable: Number of VM_SHARED mappings. + * @nr_thps: Number of THPs in the pagecache (non-shmem only). * @i_mmap: Tree of private and shared mappings. * @i_mmap_rwsem: Protects @i_mmap and @i_mmap_writable. * @nrpages: Number of page entries, protected by the i_pages lock. @@ -444,6 +447,10 @@ struct address_space { struct xarray i_pages; gfp_t gfp_mask; atomic_t i_mmap_writable; +#ifdef CONFIG_READ_ONLY_THP_FOR_FS + /* number of thp, only for non-shmem files */ + atomic_t nr_thps; +#endif struct rb_root_cached i_mmap; struct rw_semaphore i_mmap_rwsem; unsigned long nrpages; @@ -723,9 +730,15 @@ struct inode { struct fscrypt_info *i_crypt_info; #endif +#ifdef CONFIG_FS_VERITY + struct fsverity_info *i_verity_info; +#endif + void *i_private; /* fs or device private pointer */ } __randomize_layout; +struct timespec64 timestamp_truncate(struct timespec64 t, struct inode *inode); + static inline unsigned int i_blocksize(const struct inode *node) { return (1 << node->i_blkbits); @@ -1427,6 +1440,10 @@ struct super_block { const struct xattr_handler **s_xattr; #ifdef CONFIG_FS_ENCRYPTION const struct fscrypt_operations *s_cop; + struct key *s_master_keys; /* master crypto keys in use */ +#endif +#ifdef CONFIG_FS_VERITY + const struct fsverity_operations *s_vop; #endif struct hlist_bl_head s_roots; /* alternate root dentries for NFS */ struct list_head s_mounts; /* list of mounts; _not_ for fs use */ @@ -1448,6 +1465,9 @@ struct super_block { /* Granularity of c/m/atime in ns (cannot be worse than a second) */ u32 s_time_gran; + /* Time limits for c/m/atime in seconds */ + time64_t s_time_min; + time64_t s_time_max; #ifdef CONFIG_FSNOTIFY __u32 s_fsnotify_mask; struct fsnotify_mark_connector __rcu *s_fsnotify_marks; @@ -1965,6 +1985,7 @@ struct super_operations { #endif #define S_ENCRYPTED 16384 /* Encrypted file (using fs/crypto/) */ #define S_CASEFOLD 32768 /* Casefolded file */ +#define S_VERITY 65536 /* Verity file (using fs/verity/) */ /* * Note that nosuid etc flags are inode-specific: setting some file-system @@ -2006,6 +2027,7 @@ static inline bool sb_rdonly(const struct super_block *sb) { return sb->s_flags #define IS_DAX(inode) ((inode)->i_flags & S_DAX) #define IS_ENCRYPTED(inode) ((inode)->i_flags & S_ENCRYPTED) #define IS_CASEFOLDED(inode) ((inode)->i_flags & S_CASEFOLD) +#define IS_VERITY(inode) ((inode)->i_flags & S_VERITY) #define IS_WHITEOUT(inode) (S_ISCHR(inode->i_mode) && \ (inode)->i_rdev == WHITEOUT_DEV) @@ -2781,6 +2803,33 @@ static inline errseq_t filemap_sample_wb_err(struct address_space *mapping) return errseq_sample(&mapping->wb_err); } +static inline int filemap_nr_thps(struct address_space *mapping) +{ +#ifdef CONFIG_READ_ONLY_THP_FOR_FS + return atomic_read(&mapping->nr_thps); +#else + return 0; +#endif +} + +static inline void filemap_nr_thps_inc(struct address_space *mapping) +{ +#ifdef CONFIG_READ_ONLY_THP_FOR_FS + atomic_inc(&mapping->nr_thps); +#else + WARN_ON_ONCE(1); +#endif +} + +static inline void filemap_nr_thps_dec(struct address_space *mapping) +{ +#ifdef CONFIG_READ_ONLY_THP_FOR_FS + atomic_dec(&mapping->nr_thps); +#else + WARN_ON_ONCE(1); +#endif +} + extern int vfs_fsync_range(struct file *file, loff_t start, loff_t end, int datasync); extern int vfs_fsync(struct file *file, int datasync); @@ -3531,6 +3580,8 @@ extern void inode_nohighmem(struct inode *inode); /* mm/fadvise.c */ extern int vfs_fadvise(struct file *file, loff_t offset, loff_t len, int advice); +extern int generic_fadvise(struct file *file, loff_t offset, loff_t len, + int advice); #if defined(CONFIG_IO_URING) extern struct sock *io_uring_get_socket(struct file *file); @@ -3553,4 +3604,15 @@ static inline void simple_fill_fsxattr(struct fsxattr *fa, __u32 xflags) fa->fsx_xflags = xflags; } +/* + * Flush file data before changing attributes. Caller must hold any locks + * required to prevent further writes to this file until we're done setting + * flags. + */ +static inline int inode_drain_writes(struct inode *inode) +{ + inode_dio_wait(inode); + return filemap_write_and_wait(inode->i_mapping); +} + #endif /* _LINUX_FS_H */ diff --git a/include/linux/fs_context.h b/include/linux/fs_context.h index 7c6fe3d47fa6..e5c14e2c53d3 100644 --- a/include/linux/fs_context.h +++ b/include/linux/fs_context.h @@ -88,13 +88,13 @@ struct fs_context { struct mutex uapi_mutex; /* Userspace access mutex */ struct file_system_type *fs_type; void *fs_private; /* The filesystem's context */ + void *sget_key; struct dentry *root; /* The root and superblock */ struct user_namespace *user_ns; /* The user namespace for this mount */ struct net *net_ns; /* The network namespace for this mount */ const struct cred *cred; /* The mounter's credentials */ struct fc_log *log; /* Logging buffer */ const char *source; /* The source name (eg. dev path) */ - const char *subtype; /* The subtype to set on the superblock */ void *security; /* Linux S&M options */ void *s_fs_info; /* Proposed s_fs_info */ unsigned int sb_flags; /* Proposed superblock flags (SB_*) */ @@ -136,10 +136,11 @@ extern int vfs_get_tree(struct fs_context *fc); extern void put_fs_context(struct fs_context *fc); /* - * sget() wrapper to be called from the ->get_tree() op. + * sget() wrappers to be called from the ->get_tree() op. */ enum vfs_get_super_keying { vfs_get_single_super, /* Only one such superblock may exist */ + vfs_get_single_reconf_super, /* As above, but reconfigure if it exists */ vfs_get_keyed_super, /* Superblocks with different s_fs_info keys may exist */ vfs_get_independent_super, /* Multiple independent superblocks may exist */ }; @@ -147,12 +148,24 @@ extern int vfs_get_super(struct fs_context *fc, enum vfs_get_super_keying keying, int (*fill_super)(struct super_block *sb, struct fs_context *fc)); + extern int get_tree_nodev(struct fs_context *fc, int (*fill_super)(struct super_block *sb, struct fs_context *fc)); extern int get_tree_single(struct fs_context *fc, int (*fill_super)(struct super_block *sb, struct fs_context *fc)); +extern int get_tree_single_reconf(struct fs_context *fc, + int (*fill_super)(struct super_block *sb, + struct fs_context *fc)); +extern int get_tree_keyed(struct fs_context *fc, + int (*fill_super)(struct super_block *sb, + struct fs_context *fc), + void *key); + +extern int get_tree_bdev(struct fs_context *fc, + int (*fill_super)(struct super_block *sb, + struct fs_context *fc)); extern const struct file_operations fscontext_fops; diff --git a/include/linux/fscrypt.h b/include/linux/fscrypt.h index bd8f207a2fb6..f622f7460ed8 100644 --- a/include/linux/fscrypt.h +++ b/include/linux/fscrypt.h @@ -16,6 +16,7 @@ #include <linux/fs.h> #include <linux/mm.h> #include <linux/slab.h> +#include <uapi/linux/fscrypt.h> #define FS_CRYPTO_BLOCK_SIZE 16 @@ -42,7 +43,7 @@ struct fscrypt_name { #define fname_len(p) ((p)->disk_name.len) /* Maximum value for the third parameter of fscrypt_operations.set_context(). */ -#define FSCRYPT_SET_CONTEXT_MAX_SIZE 28 +#define FSCRYPT_SET_CONTEXT_MAX_SIZE 40 #ifdef CONFIG_FS_ENCRYPTION /* @@ -134,13 +135,23 @@ extern void fscrypt_free_bounce_page(struct page *bounce_page); /* policy.c */ extern int fscrypt_ioctl_set_policy(struct file *, const void __user *); extern int fscrypt_ioctl_get_policy(struct file *, void __user *); +extern int fscrypt_ioctl_get_policy_ex(struct file *, void __user *); extern int fscrypt_has_permitted_context(struct inode *, struct inode *); extern int fscrypt_inherit_context(struct inode *, struct inode *, void *, bool); -/* keyinfo.c */ +/* keyring.c */ +extern void fscrypt_sb_free(struct super_block *sb); +extern int fscrypt_ioctl_add_key(struct file *filp, void __user *arg); +extern int fscrypt_ioctl_remove_key(struct file *filp, void __user *arg); +extern int fscrypt_ioctl_remove_key_all_users(struct file *filp, + void __user *arg); +extern int fscrypt_ioctl_get_key_status(struct file *filp, void __user *arg); + +/* keysetup.c */ extern int fscrypt_get_encryption_info(struct inode *); extern void fscrypt_put_encryption_info(struct inode *); extern void fscrypt_free_inode(struct inode *); +extern int fscrypt_drop_inode(struct inode *inode); /* fname.c */ extern int fscrypt_setup_filename(struct inode *, const struct qstr *, @@ -353,6 +364,12 @@ static inline int fscrypt_ioctl_get_policy(struct file *filp, void __user *arg) return -EOPNOTSUPP; } +static inline int fscrypt_ioctl_get_policy_ex(struct file *filp, + void __user *arg) +{ + return -EOPNOTSUPP; +} + static inline int fscrypt_has_permitted_context(struct inode *parent, struct inode *child) { @@ -366,7 +383,34 @@ static inline int fscrypt_inherit_context(struct inode *parent, return -EOPNOTSUPP; } -/* keyinfo.c */ +/* keyring.c */ +static inline void fscrypt_sb_free(struct super_block *sb) +{ +} + +static inline int fscrypt_ioctl_add_key(struct file *filp, void __user *arg) +{ + return -EOPNOTSUPP; +} + +static inline int fscrypt_ioctl_remove_key(struct file *filp, void __user *arg) +{ + return -EOPNOTSUPP; +} + +static inline int fscrypt_ioctl_remove_key_all_users(struct file *filp, + void __user *arg) +{ + return -EOPNOTSUPP; +} + +static inline int fscrypt_ioctl_get_key_status(struct file *filp, + void __user *arg) +{ + return -EOPNOTSUPP; +} + +/* keysetup.c */ static inline int fscrypt_get_encryption_info(struct inode *inode) { return -EOPNOTSUPP; @@ -381,6 +425,11 @@ static inline void fscrypt_free_inode(struct inode *inode) { } +static inline int fscrypt_drop_inode(struct inode *inode) +{ + return 0; +} + /* fname.c */ static inline int fscrypt_setup_filename(struct inode *dir, const struct qstr *iname, diff --git a/include/linux/fsverity.h b/include/linux/fsverity.h new file mode 100644 index 000000000000..3b6b8ccebe7d --- /dev/null +++ b/include/linux/fsverity.h @@ -0,0 +1,211 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * fs-verity: read-only file-based authenticity protection + * + * This header declares the interface between the fs/verity/ support layer and + * filesystems that support fs-verity. + * + * Copyright 2019 Google LLC + */ + +#ifndef _LINUX_FSVERITY_H +#define _LINUX_FSVERITY_H + +#include <linux/fs.h> +#include <uapi/linux/fsverity.h> + +/* Verity operations for filesystems */ +struct fsverity_operations { + + /** + * Begin enabling verity on the given file. + * + * @filp: a readonly file descriptor for the file + * + * The filesystem must do any needed filesystem-specific preparations + * for enabling verity, e.g. evicting inline data. It also must return + * -EBUSY if verity is already being enabled on the given file. + * + * i_rwsem is held for write. + * + * Return: 0 on success, -errno on failure + */ + int (*begin_enable_verity)(struct file *filp); + + /** + * End enabling verity on the given file. + * + * @filp: a readonly file descriptor for the file + * @desc: the verity descriptor to write, or NULL on failure + * @desc_size: size of verity descriptor, or 0 on failure + * @merkle_tree_size: total bytes the Merkle tree took up + * + * If desc == NULL, then enabling verity failed and the filesystem only + * must do any necessary cleanups. Else, it must also store the given + * verity descriptor to a fs-specific location associated with the inode + * and do any fs-specific actions needed to mark the inode as a verity + * inode, e.g. setting a bit in the on-disk inode. The filesystem is + * also responsible for setting the S_VERITY flag in the VFS inode. + * + * i_rwsem is held for write, but it may have been dropped between + * ->begin_enable_verity() and ->end_enable_verity(). + * + * Return: 0 on success, -errno on failure + */ + int (*end_enable_verity)(struct file *filp, const void *desc, + size_t desc_size, u64 merkle_tree_size); + + /** + * Get the verity descriptor of the given inode. + * + * @inode: an inode with the S_VERITY flag set + * @buf: buffer in which to place the verity descriptor + * @bufsize: size of @buf, or 0 to retrieve the size only + * + * If bufsize == 0, then the size of the verity descriptor is returned. + * Otherwise the verity descriptor is written to 'buf' and its actual + * size is returned; -ERANGE is returned if it's too large. This may be + * called by multiple processes concurrently on the same inode. + * + * Return: the size on success, -errno on failure + */ + int (*get_verity_descriptor)(struct inode *inode, void *buf, + size_t bufsize); + + /** + * Read a Merkle tree page of the given inode. + * + * @inode: the inode + * @index: 0-based index of the page within the Merkle tree + * + * This can be called at any time on an open verity file, as well as + * between ->begin_enable_verity() and ->end_enable_verity(). It may be + * called by multiple processes concurrently, even with the same page. + * + * Note that this must retrieve a *page*, not necessarily a *block*. + * + * Return: the page on success, ERR_PTR() on failure + */ + struct page *(*read_merkle_tree_page)(struct inode *inode, + pgoff_t index); + + /** + * Write a Merkle tree block to the given inode. + * + * @inode: the inode for which the Merkle tree is being built + * @buf: block to write + * @index: 0-based index of the block within the Merkle tree + * @log_blocksize: log base 2 of the Merkle tree block size + * + * This is only called between ->begin_enable_verity() and + * ->end_enable_verity(). + * + * Return: 0 on success, -errno on failure + */ + int (*write_merkle_tree_block)(struct inode *inode, const void *buf, + u64 index, int log_blocksize); +}; + +#ifdef CONFIG_FS_VERITY + +static inline struct fsverity_info *fsverity_get_info(const struct inode *inode) +{ + /* pairs with the cmpxchg() in fsverity_set_info() */ + return READ_ONCE(inode->i_verity_info); +} + +/* enable.c */ + +extern int fsverity_ioctl_enable(struct file *filp, const void __user *arg); + +/* measure.c */ + +extern int fsverity_ioctl_measure(struct file *filp, void __user *arg); + +/* open.c */ + +extern int fsverity_file_open(struct inode *inode, struct file *filp); +extern int fsverity_prepare_setattr(struct dentry *dentry, struct iattr *attr); +extern void fsverity_cleanup_inode(struct inode *inode); + +/* verify.c */ + +extern bool fsverity_verify_page(struct page *page); +extern void fsverity_verify_bio(struct bio *bio); +extern void fsverity_enqueue_verify_work(struct work_struct *work); + +#else /* !CONFIG_FS_VERITY */ + +static inline struct fsverity_info *fsverity_get_info(const struct inode *inode) +{ + return NULL; +} + +/* enable.c */ + +static inline int fsverity_ioctl_enable(struct file *filp, + const void __user *arg) +{ + return -EOPNOTSUPP; +} + +/* measure.c */ + +static inline int fsverity_ioctl_measure(struct file *filp, void __user *arg) +{ + return -EOPNOTSUPP; +} + +/* open.c */ + +static inline int fsverity_file_open(struct inode *inode, struct file *filp) +{ + return IS_VERITY(inode) ? -EOPNOTSUPP : 0; +} + +static inline int fsverity_prepare_setattr(struct dentry *dentry, + struct iattr *attr) +{ + return IS_VERITY(d_inode(dentry)) ? -EOPNOTSUPP : 0; +} + +static inline void fsverity_cleanup_inode(struct inode *inode) +{ +} + +/* verify.c */ + +static inline bool fsverity_verify_page(struct page *page) +{ + WARN_ON(1); + return false; +} + +static inline void fsverity_verify_bio(struct bio *bio) +{ + WARN_ON(1); +} + +static inline void fsverity_enqueue_verify_work(struct work_struct *work) +{ + WARN_ON(1); +} + +#endif /* !CONFIG_FS_VERITY */ + +/** + * fsverity_active() - do reads from the inode need to go through fs-verity? + * + * This checks whether ->i_verity_info has been set. + * + * Filesystems call this from ->readpages() to check whether the pages need to + * be verified or not. Don't use IS_VERITY() for this purpose; it's subject to + * a race condition where the file is being read concurrently with + * FS_IOC_ENABLE_VERITY completing. (S_VERITY is set before ->i_verity_info.) + */ +static inline bool fsverity_active(const struct inode *inode) +{ + return fsverity_get_info(inode) != NULL; +} + +#endif /* _LINUX_FSVERITY_H */ diff --git a/include/linux/fwnode.h b/include/linux/fwnode.h index a11c8c56c78b..ababd6bc82f3 100644 --- a/include/linux/fwnode.h +++ b/include/linux/fwnode.h @@ -110,10 +110,11 @@ struct fwnode_operations { (fwnode ? (fwnode_has_op(fwnode, op) ? \ (fwnode)->ops->op(fwnode, ## __VA_ARGS__) : -ENXIO) : \ -EINVAL) -#define fwnode_call_bool_op(fwnode, op, ...) \ - (fwnode ? (fwnode_has_op(fwnode, op) ? \ - (fwnode)->ops->op(fwnode, ## __VA_ARGS__) : false) : \ - false) + +#define fwnode_call_bool_op(fwnode, op, ...) \ + (fwnode_has_op(fwnode, op) ? \ + (fwnode)->ops->op(fwnode, ## __VA_ARGS__) : false) + #define fwnode_call_ptr_op(fwnode, op, ...) \ (fwnode_has_op(fwnode, op) ? \ (fwnode)->ops->op(fwnode, ## __VA_ARGS__) : NULL) diff --git a/include/linux/greybus.h b/include/linux/greybus.h new file mode 100644 index 000000000000..18c0fb958b74 --- /dev/null +++ b/include/linux/greybus.h @@ -0,0 +1,152 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Greybus driver and device API + * + * Copyright 2014-2015 Google Inc. + * Copyright 2014-2015 Linaro Ltd. + */ + +#ifndef __LINUX_GREYBUS_H +#define __LINUX_GREYBUS_H + +#ifdef __KERNEL__ + +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/list.h> +#include <linux/slab.h> +#include <linux/device.h> +#include <linux/module.h> +#include <linux/pm_runtime.h> +#include <linux/idr.h> + +#include <linux/greybus/greybus_id.h> +#include <linux/greybus/greybus_manifest.h> +#include <linux/greybus/greybus_protocols.h> +#include <linux/greybus/manifest.h> +#include <linux/greybus/hd.h> +#include <linux/greybus/svc.h> +#include <linux/greybus/control.h> +#include <linux/greybus/module.h> +#include <linux/greybus/interface.h> +#include <linux/greybus/bundle.h> +#include <linux/greybus/connection.h> +#include <linux/greybus/operation.h> + +/* Matches up with the Greybus Protocol specification document */ +#define GREYBUS_VERSION_MAJOR 0x00 +#define GREYBUS_VERSION_MINOR 0x01 + +#define GREYBUS_ID_MATCH_DEVICE \ + (GREYBUS_ID_MATCH_VENDOR | GREYBUS_ID_MATCH_PRODUCT) + +#define GREYBUS_DEVICE(v, p) \ + .match_flags = GREYBUS_ID_MATCH_DEVICE, \ + .vendor = (v), \ + .product = (p), + +#define GREYBUS_DEVICE_CLASS(c) \ + .match_flags = GREYBUS_ID_MATCH_CLASS, \ + .class = (c), + +/* Maximum number of CPorts */ +#define CPORT_ID_MAX 4095 /* UniPro max id is 4095 */ +#define CPORT_ID_BAD U16_MAX + +struct greybus_driver { + const char *name; + + int (*probe)(struct gb_bundle *bundle, + const struct greybus_bundle_id *id); + void (*disconnect)(struct gb_bundle *bundle); + + const struct greybus_bundle_id *id_table; + + struct device_driver driver; +}; +#define to_greybus_driver(d) container_of(d, struct greybus_driver, driver) + +static inline void greybus_set_drvdata(struct gb_bundle *bundle, void *data) +{ + dev_set_drvdata(&bundle->dev, data); +} + +static inline void *greybus_get_drvdata(struct gb_bundle *bundle) +{ + return dev_get_drvdata(&bundle->dev); +} + +/* Don't call these directly, use the module_greybus_driver() macro instead */ +int greybus_register_driver(struct greybus_driver *driver, + struct module *module, const char *mod_name); +void greybus_deregister_driver(struct greybus_driver *driver); + +/* define to get proper THIS_MODULE and KBUILD_MODNAME values */ +#define greybus_register(driver) \ + greybus_register_driver(driver, THIS_MODULE, KBUILD_MODNAME) +#define greybus_deregister(driver) \ + greybus_deregister_driver(driver) + +/** + * module_greybus_driver() - Helper macro for registering a Greybus driver + * @__greybus_driver: greybus_driver structure + * + * Helper macro for Greybus drivers to set up proper module init / exit + * functions. Replaces module_init() and module_exit() and keeps people from + * printing pointless things to the kernel log when their driver is loaded. + */ +#define module_greybus_driver(__greybus_driver) \ + module_driver(__greybus_driver, greybus_register, greybus_deregister) + +int greybus_disabled(void); + +void gb_debugfs_init(void); +void gb_debugfs_cleanup(void); +struct dentry *gb_debugfs_get(void); + +extern struct bus_type greybus_bus_type; + +extern struct device_type greybus_hd_type; +extern struct device_type greybus_module_type; +extern struct device_type greybus_interface_type; +extern struct device_type greybus_control_type; +extern struct device_type greybus_bundle_type; +extern struct device_type greybus_svc_type; + +static inline int is_gb_host_device(const struct device *dev) +{ + return dev->type == &greybus_hd_type; +} + +static inline int is_gb_module(const struct device *dev) +{ + return dev->type == &greybus_module_type; +} + +static inline int is_gb_interface(const struct device *dev) +{ + return dev->type == &greybus_interface_type; +} + +static inline int is_gb_control(const struct device *dev) +{ + return dev->type == &greybus_control_type; +} + +static inline int is_gb_bundle(const struct device *dev) +{ + return dev->type == &greybus_bundle_type; +} + +static inline int is_gb_svc(const struct device *dev) +{ + return dev->type == &greybus_svc_type; +} + +static inline bool cport_id_valid(struct gb_host_device *hd, u16 cport_id) +{ + return cport_id != CPORT_ID_BAD && cport_id < hd->num_cports; +} + +#endif /* __KERNEL__ */ +#endif /* __LINUX_GREYBUS_H */ diff --git a/include/linux/greybus/bundle.h b/include/linux/greybus/bundle.h new file mode 100644 index 000000000000..df8d88424cb7 --- /dev/null +++ b/include/linux/greybus/bundle.h @@ -0,0 +1,92 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Greybus bundles + * + * Copyright 2014 Google Inc. + * Copyright 2014 Linaro Ltd. + */ + +#ifndef __BUNDLE_H +#define __BUNDLE_H + +#include <linux/types.h> +#include <linux/list.h> +#include <linux/pm_runtime.h> +#include <linux/device.h> + +#define BUNDLE_ID_NONE U8_MAX + +/* Greybus "public" definitions" */ +struct gb_bundle { + struct device dev; + struct gb_interface *intf; + + u8 id; + u8 class; + u8 class_major; + u8 class_minor; + + size_t num_cports; + struct greybus_descriptor_cport *cport_desc; + + struct list_head connections; + u8 *state; + + struct list_head links; /* interface->bundles */ +}; +#define to_gb_bundle(d) container_of(d, struct gb_bundle, dev) + +/* Greybus "private" definitions" */ +struct gb_bundle *gb_bundle_create(struct gb_interface *intf, u8 bundle_id, + u8 class); +int gb_bundle_add(struct gb_bundle *bundle); +void gb_bundle_destroy(struct gb_bundle *bundle); + +/* Bundle Runtime PM wrappers */ +#ifdef CONFIG_PM +static inline int gb_pm_runtime_get_sync(struct gb_bundle *bundle) +{ + int retval; + + retval = pm_runtime_get_sync(&bundle->dev); + if (retval < 0) { + dev_err(&bundle->dev, + "pm_runtime_get_sync failed: %d\n", retval); + pm_runtime_put_noidle(&bundle->dev); + return retval; + } + + return 0; +} + +static inline int gb_pm_runtime_put_autosuspend(struct gb_bundle *bundle) +{ + int retval; + + pm_runtime_mark_last_busy(&bundle->dev); + retval = pm_runtime_put_autosuspend(&bundle->dev); + + return retval; +} + +static inline void gb_pm_runtime_get_noresume(struct gb_bundle *bundle) +{ + pm_runtime_get_noresume(&bundle->dev); +} + +static inline void gb_pm_runtime_put_noidle(struct gb_bundle *bundle) +{ + pm_runtime_put_noidle(&bundle->dev); +} + +#else +static inline int gb_pm_runtime_get_sync(struct gb_bundle *bundle) +{ return 0; } +static inline int gb_pm_runtime_put_autosuspend(struct gb_bundle *bundle) +{ return 0; } + +static inline void gb_pm_runtime_get_noresume(struct gb_bundle *bundle) {} +static inline void gb_pm_runtime_put_noidle(struct gb_bundle *bundle) {} +#endif + +#endif /* __BUNDLE_H */ diff --git a/include/linux/greybus/connection.h b/include/linux/greybus/connection.h new file mode 100644 index 000000000000..d59b7fc1de3e --- /dev/null +++ b/include/linux/greybus/connection.h @@ -0,0 +1,131 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Greybus connections + * + * Copyright 2014 Google Inc. + * Copyright 2014 Linaro Ltd. + */ + +#ifndef __CONNECTION_H +#define __CONNECTION_H + +#include <linux/bits.h> +#include <linux/list.h> +#include <linux/kfifo.h> +#include <linux/kref.h> +#include <linux/workqueue.h> + +#define GB_CONNECTION_FLAG_CSD BIT(0) +#define GB_CONNECTION_FLAG_NO_FLOWCTRL BIT(1) +#define GB_CONNECTION_FLAG_OFFLOADED BIT(2) +#define GB_CONNECTION_FLAG_CDSI1 BIT(3) +#define GB_CONNECTION_FLAG_CONTROL BIT(4) +#define GB_CONNECTION_FLAG_HIGH_PRIO BIT(5) + +#define GB_CONNECTION_FLAG_CORE_MASK GB_CONNECTION_FLAG_CONTROL + +enum gb_connection_state { + GB_CONNECTION_STATE_DISABLED = 0, + GB_CONNECTION_STATE_ENABLED_TX = 1, + GB_CONNECTION_STATE_ENABLED = 2, + GB_CONNECTION_STATE_DISCONNECTING = 3, +}; + +struct gb_operation; + +typedef int (*gb_request_handler_t)(struct gb_operation *); + +struct gb_connection { + struct gb_host_device *hd; + struct gb_interface *intf; + struct gb_bundle *bundle; + struct kref kref; + u16 hd_cport_id; + u16 intf_cport_id; + + struct list_head hd_links; + struct list_head bundle_links; + + gb_request_handler_t handler; + unsigned long flags; + + struct mutex mutex; + spinlock_t lock; + enum gb_connection_state state; + struct list_head operations; + + char name[16]; + struct workqueue_struct *wq; + + atomic_t op_cycle; + + void *private; + + bool mode_switch; +}; + +struct gb_connection *gb_connection_create_static(struct gb_host_device *hd, + u16 hd_cport_id, gb_request_handler_t handler); +struct gb_connection *gb_connection_create_control(struct gb_interface *intf); +struct gb_connection *gb_connection_create(struct gb_bundle *bundle, + u16 cport_id, gb_request_handler_t handler); +struct gb_connection *gb_connection_create_flags(struct gb_bundle *bundle, + u16 cport_id, gb_request_handler_t handler, + unsigned long flags); +struct gb_connection *gb_connection_create_offloaded(struct gb_bundle *bundle, + u16 cport_id, unsigned long flags); +void gb_connection_destroy(struct gb_connection *connection); + +static inline bool gb_connection_is_static(struct gb_connection *connection) +{ + return !connection->intf; +} + +int gb_connection_enable(struct gb_connection *connection); +int gb_connection_enable_tx(struct gb_connection *connection); +void gb_connection_disable_rx(struct gb_connection *connection); +void gb_connection_disable(struct gb_connection *connection); +void gb_connection_disable_forced(struct gb_connection *connection); + +void gb_connection_mode_switch_prepare(struct gb_connection *connection); +void gb_connection_mode_switch_complete(struct gb_connection *connection); + +void greybus_data_rcvd(struct gb_host_device *hd, u16 cport_id, + u8 *data, size_t length); + +void gb_connection_latency_tag_enable(struct gb_connection *connection); +void gb_connection_latency_tag_disable(struct gb_connection *connection); + +static inline bool gb_connection_e2efc_enabled(struct gb_connection *connection) +{ + return !(connection->flags & GB_CONNECTION_FLAG_CSD); +} + +static inline bool +gb_connection_flow_control_disabled(struct gb_connection *connection) +{ + return connection->flags & GB_CONNECTION_FLAG_NO_FLOWCTRL; +} + +static inline bool gb_connection_is_offloaded(struct gb_connection *connection) +{ + return connection->flags & GB_CONNECTION_FLAG_OFFLOADED; +} + +static inline bool gb_connection_is_control(struct gb_connection *connection) +{ + return connection->flags & GB_CONNECTION_FLAG_CONTROL; +} + +static inline void *gb_connection_get_data(struct gb_connection *connection) +{ + return connection->private; +} + +static inline void gb_connection_set_data(struct gb_connection *connection, + void *data) +{ + connection->private = data; +} + +#endif /* __CONNECTION_H */ diff --git a/include/linux/greybus/control.h b/include/linux/greybus/control.h new file mode 100644 index 000000000000..da11fe871653 --- /dev/null +++ b/include/linux/greybus/control.h @@ -0,0 +1,60 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Greybus CPort control protocol + * + * Copyright 2015 Google Inc. + * Copyright 2015 Linaro Ltd. + */ + +#ifndef __CONTROL_H +#define __CONTROL_H + +#include <linux/types.h> +#include <linux/device.h> + +struct gb_control { + struct device dev; + struct gb_interface *intf; + + struct gb_connection *connection; + + u8 protocol_major; + u8 protocol_minor; + + bool has_bundle_activate; + bool has_bundle_version; + + char *vendor_string; + char *product_string; +}; +#define to_gb_control(d) container_of(d, struct gb_control, dev) + +struct gb_control *gb_control_create(struct gb_interface *intf); +int gb_control_enable(struct gb_control *control); +void gb_control_disable(struct gb_control *control); +int gb_control_suspend(struct gb_control *control); +int gb_control_resume(struct gb_control *control); +int gb_control_add(struct gb_control *control); +void gb_control_del(struct gb_control *control); +struct gb_control *gb_control_get(struct gb_control *control); +void gb_control_put(struct gb_control *control); + +int gb_control_get_bundle_versions(struct gb_control *control); +int gb_control_connected_operation(struct gb_control *control, u16 cport_id); +int gb_control_disconnected_operation(struct gb_control *control, u16 cport_id); +int gb_control_disconnecting_operation(struct gb_control *control, + u16 cport_id); +int gb_control_mode_switch_operation(struct gb_control *control); +void gb_control_mode_switch_prepare(struct gb_control *control); +void gb_control_mode_switch_complete(struct gb_control *control); +int gb_control_get_manifest_size_operation(struct gb_interface *intf); +int gb_control_get_manifest_operation(struct gb_interface *intf, void *manifest, + size_t size); +int gb_control_bundle_suspend(struct gb_control *control, u8 bundle_id); +int gb_control_bundle_resume(struct gb_control *control, u8 bundle_id); +int gb_control_bundle_deactivate(struct gb_control *control, u8 bundle_id); +int gb_control_bundle_activate(struct gb_control *control, u8 bundle_id); +int gb_control_interface_suspend_prepare(struct gb_control *control); +int gb_control_interface_deactivate_prepare(struct gb_control *control); +int gb_control_interface_hibernate_abort(struct gb_control *control); +#endif /* __CONTROL_H */ diff --git a/include/linux/greybus/greybus_id.h b/include/linux/greybus/greybus_id.h new file mode 100644 index 000000000000..f4c8440093e4 --- /dev/null +++ b/include/linux/greybus/greybus_id.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* FIXME + * move this to include/linux/mod_devicetable.h when merging + */ + +#ifndef __LINUX_GREYBUS_ID_H +#define __LINUX_GREYBUS_ID_H + +#include <linux/types.h> +#include <linux/mod_devicetable.h> + + +struct greybus_bundle_id { + __u16 match_flags; + __u32 vendor; + __u32 product; + __u8 class; + + kernel_ulong_t driver_info __aligned(sizeof(kernel_ulong_t)); +}; + +/* Used to match the greybus_bundle_id */ +#define GREYBUS_ID_MATCH_VENDOR BIT(0) +#define GREYBUS_ID_MATCH_PRODUCT BIT(1) +#define GREYBUS_ID_MATCH_CLASS BIT(2) + +#endif /* __LINUX_GREYBUS_ID_H */ diff --git a/include/linux/greybus/greybus_manifest.h b/include/linux/greybus/greybus_manifest.h new file mode 100644 index 000000000000..6e62fe478712 --- /dev/null +++ b/include/linux/greybus/greybus_manifest.h @@ -0,0 +1,181 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Greybus manifest definition + * + * See "Greybus Application Protocol" document (version 0.1) for + * details on these values and structures. + * + * Copyright 2014-2015 Google Inc. + * Copyright 2014-2015 Linaro Ltd. + * + * Released under the GPLv2 and BSD licenses. + */ + +#ifndef __GREYBUS_MANIFEST_H +#define __GREYBUS_MANIFEST_H + +#include <linux/bits.h> +#include <linux/types.h> + +enum greybus_descriptor_type { + GREYBUS_TYPE_INVALID = 0x00, + GREYBUS_TYPE_INTERFACE = 0x01, + GREYBUS_TYPE_STRING = 0x02, + GREYBUS_TYPE_BUNDLE = 0x03, + GREYBUS_TYPE_CPORT = 0x04, +}; + +enum greybus_protocol { + GREYBUS_PROTOCOL_CONTROL = 0x00, + /* 0x01 is unused */ + GREYBUS_PROTOCOL_GPIO = 0x02, + GREYBUS_PROTOCOL_I2C = 0x03, + GREYBUS_PROTOCOL_UART = 0x04, + GREYBUS_PROTOCOL_HID = 0x05, + GREYBUS_PROTOCOL_USB = 0x06, + GREYBUS_PROTOCOL_SDIO = 0x07, + GREYBUS_PROTOCOL_POWER_SUPPLY = 0x08, + GREYBUS_PROTOCOL_PWM = 0x09, + /* 0x0a is unused */ + GREYBUS_PROTOCOL_SPI = 0x0b, + GREYBUS_PROTOCOL_DISPLAY = 0x0c, + GREYBUS_PROTOCOL_CAMERA_MGMT = 0x0d, + GREYBUS_PROTOCOL_SENSOR = 0x0e, + GREYBUS_PROTOCOL_LIGHTS = 0x0f, + GREYBUS_PROTOCOL_VIBRATOR = 0x10, + GREYBUS_PROTOCOL_LOOPBACK = 0x11, + GREYBUS_PROTOCOL_AUDIO_MGMT = 0x12, + GREYBUS_PROTOCOL_AUDIO_DATA = 0x13, + GREYBUS_PROTOCOL_SVC = 0x14, + GREYBUS_PROTOCOL_BOOTROM = 0x15, + GREYBUS_PROTOCOL_CAMERA_DATA = 0x16, + GREYBUS_PROTOCOL_FW_DOWNLOAD = 0x17, + GREYBUS_PROTOCOL_FW_MANAGEMENT = 0x18, + GREYBUS_PROTOCOL_AUTHENTICATION = 0x19, + GREYBUS_PROTOCOL_LOG = 0x1a, + /* ... */ + GREYBUS_PROTOCOL_RAW = 0xfe, + GREYBUS_PROTOCOL_VENDOR = 0xff, +}; + +enum greybus_class_type { + GREYBUS_CLASS_CONTROL = 0x00, + /* 0x01 is unused */ + /* 0x02 is unused */ + /* 0x03 is unused */ + /* 0x04 is unused */ + GREYBUS_CLASS_HID = 0x05, + /* 0x06 is unused */ + /* 0x07 is unused */ + GREYBUS_CLASS_POWER_SUPPLY = 0x08, + /* 0x09 is unused */ + GREYBUS_CLASS_BRIDGED_PHY = 0x0a, + /* 0x0b is unused */ + GREYBUS_CLASS_DISPLAY = 0x0c, + GREYBUS_CLASS_CAMERA = 0x0d, + GREYBUS_CLASS_SENSOR = 0x0e, + GREYBUS_CLASS_LIGHTS = 0x0f, + GREYBUS_CLASS_VIBRATOR = 0x10, + GREYBUS_CLASS_LOOPBACK = 0x11, + GREYBUS_CLASS_AUDIO = 0x12, + /* 0x13 is unused */ + /* 0x14 is unused */ + GREYBUS_CLASS_BOOTROM = 0x15, + GREYBUS_CLASS_FW_MANAGEMENT = 0x16, + GREYBUS_CLASS_LOG = 0x17, + /* ... */ + GREYBUS_CLASS_RAW = 0xfe, + GREYBUS_CLASS_VENDOR = 0xff, +}; + +enum { + GREYBUS_INTERFACE_FEATURE_TIMESYNC = BIT(0), +}; + +/* + * The string in a string descriptor is not NUL-terminated. The + * size of the descriptor will be rounded up to a multiple of 4 + * bytes, by padding the string with 0x00 bytes if necessary. + */ +struct greybus_descriptor_string { + __u8 length; + __u8 id; + __u8 string[0]; +} __packed; + +/* + * An interface descriptor describes information about an interface as a whole, + * *not* the functions within it. + */ +struct greybus_descriptor_interface { + __u8 vendor_stringid; + __u8 product_stringid; + __u8 features; + __u8 pad; +} __packed; + +/* + * An bundle descriptor defines an identification number and a class for + * each bundle. + * + * @id: Uniquely identifies a bundle within a interface, its sole purpose is to + * allow CPort descriptors to specify which bundle they are associated with. + * The first bundle will have id 0, second will have 1 and so on. + * + * The largest CPort id associated with an bundle (defined by a + * CPort descriptor in the manifest) is used to determine how to + * encode the device id and module number in UniPro packets + * that use the bundle. + * + * @class: It is used by kernel to know the functionality provided by the + * bundle and will be matched against drivers functinality while probing greybus + * driver. It should contain one of the values defined in + * 'enum greybus_class_type'. + * + */ +struct greybus_descriptor_bundle { + __u8 id; /* interface-relative id (0..) */ + __u8 class; + __u8 pad[2]; +} __packed; + +/* + * A CPort descriptor indicates the id of the bundle within the + * module it's associated with, along with the CPort id used to + * address the CPort. The protocol id defines the format of messages + * exchanged using the CPort. + */ +struct greybus_descriptor_cport { + __le16 id; + __u8 bundle; + __u8 protocol_id; /* enum greybus_protocol */ +} __packed; + +struct greybus_descriptor_header { + __le16 size; + __u8 type; /* enum greybus_descriptor_type */ + __u8 pad; +} __packed; + +struct greybus_descriptor { + struct greybus_descriptor_header header; + union { + struct greybus_descriptor_string string; + struct greybus_descriptor_interface interface; + struct greybus_descriptor_bundle bundle; + struct greybus_descriptor_cport cport; + }; +} __packed; + +struct greybus_manifest_header { + __le16 size; + __u8 version_major; + __u8 version_minor; +} __packed; + +struct greybus_manifest { + struct greybus_manifest_header header; + struct greybus_descriptor descriptors[0]; +} __packed; + +#endif /* __GREYBUS_MANIFEST_H */ diff --git a/include/linux/greybus/greybus_protocols.h b/include/linux/greybus/greybus_protocols.h new file mode 100644 index 000000000000..dfbc6c39a74b --- /dev/null +++ b/include/linux/greybus/greybus_protocols.h @@ -0,0 +1,2178 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */ +/* + * Copyright(c) 2014 - 2015 Google Inc. All rights reserved. + * Copyright(c) 2014 - 2015 Linaro Ltd. All rights reserved. + */ + +#ifndef __GREYBUS_PROTOCOLS_H +#define __GREYBUS_PROTOCOLS_H + +#include <linux/types.h> + +/* Fixed IDs for control/svc protocols */ + +/* SVC switch-port device ids */ +#define GB_SVC_DEVICE_ID_SVC 0 +#define GB_SVC_DEVICE_ID_AP 1 +#define GB_SVC_DEVICE_ID_MIN 2 +#define GB_SVC_DEVICE_ID_MAX 31 + +#define GB_SVC_CPORT_ID 0 +#define GB_CONTROL_BUNDLE_ID 0 +#define GB_CONTROL_CPORT_ID 0 + + +/* + * All operation messages (both requests and responses) begin with + * a header that encodes the size of the message (header included). + * This header also contains a unique identifier, that associates a + * response message with its operation. The header contains an + * operation type field, whose interpretation is dependent on what + * type of protocol is used over the connection. The high bit + * (0x80) of the operation type field is used to indicate whether + * the message is a request (clear) or a response (set). + * + * Response messages include an additional result byte, which + * communicates the result of the corresponding request. A zero + * result value means the operation completed successfully. Any + * other value indicates an error; in this case, the payload of the + * response message (if any) is ignored. The result byte must be + * zero in the header for a request message. + * + * The wire format for all numeric fields in the header is little + * endian. Any operation-specific data begins immediately after the + * header. + */ +struct gb_operation_msg_hdr { + __le16 size; /* Size in bytes of header + payload */ + __le16 operation_id; /* Operation unique id */ + __u8 type; /* E.g GB_I2C_TYPE_* or GB_GPIO_TYPE_* */ + __u8 result; /* Result of request (in responses only) */ + __u8 pad[2]; /* must be zero (ignore when read) */ +} __packed; + + +/* Generic request types */ +#define GB_REQUEST_TYPE_CPORT_SHUTDOWN 0x00 +#define GB_REQUEST_TYPE_INVALID 0x7f + +struct gb_cport_shutdown_request { + __u8 phase; +} __packed; + + +/* Control Protocol */ + +/* Greybus control request types */ +#define GB_CONTROL_TYPE_VERSION 0x01 +#define GB_CONTROL_TYPE_PROBE_AP 0x02 +#define GB_CONTROL_TYPE_GET_MANIFEST_SIZE 0x03 +#define GB_CONTROL_TYPE_GET_MANIFEST 0x04 +#define GB_CONTROL_TYPE_CONNECTED 0x05 +#define GB_CONTROL_TYPE_DISCONNECTED 0x06 +#define GB_CONTROL_TYPE_TIMESYNC_ENABLE 0x07 +#define GB_CONTROL_TYPE_TIMESYNC_DISABLE 0x08 +#define GB_CONTROL_TYPE_TIMESYNC_AUTHORITATIVE 0x09 +/* Unused 0x0a */ +#define GB_CONTROL_TYPE_BUNDLE_VERSION 0x0b +#define GB_CONTROL_TYPE_DISCONNECTING 0x0c +#define GB_CONTROL_TYPE_TIMESYNC_GET_LAST_EVENT 0x0d +#define GB_CONTROL_TYPE_MODE_SWITCH 0x0e +#define GB_CONTROL_TYPE_BUNDLE_SUSPEND 0x0f +#define GB_CONTROL_TYPE_BUNDLE_RESUME 0x10 +#define GB_CONTROL_TYPE_BUNDLE_DEACTIVATE 0x11 +#define GB_CONTROL_TYPE_BUNDLE_ACTIVATE 0x12 +#define GB_CONTROL_TYPE_INTF_SUSPEND_PREPARE 0x13 +#define GB_CONTROL_TYPE_INTF_DEACTIVATE_PREPARE 0x14 +#define GB_CONTROL_TYPE_INTF_HIBERNATE_ABORT 0x15 + +struct gb_control_version_request { + __u8 major; + __u8 minor; +} __packed; + +struct gb_control_version_response { + __u8 major; + __u8 minor; +} __packed; + +struct gb_control_bundle_version_request { + __u8 bundle_id; +} __packed; + +struct gb_control_bundle_version_response { + __u8 major; + __u8 minor; +} __packed; + +/* Control protocol manifest get size request has no payload*/ +struct gb_control_get_manifest_size_response { + __le16 size; +} __packed; + +/* Control protocol manifest get request has no payload */ +struct gb_control_get_manifest_response { + __u8 data[0]; +} __packed; + +/* Control protocol [dis]connected request */ +struct gb_control_connected_request { + __le16 cport_id; +} __packed; + +struct gb_control_disconnecting_request { + __le16 cport_id; +} __packed; +/* disconnecting response has no payload */ + +struct gb_control_disconnected_request { + __le16 cport_id; +} __packed; +/* Control protocol [dis]connected response has no payload */ + +/* + * All Bundle power management operations use the same request and response + * layout and status codes. + */ + +#define GB_CONTROL_BUNDLE_PM_OK 0x00 +#define GB_CONTROL_BUNDLE_PM_INVAL 0x01 +#define GB_CONTROL_BUNDLE_PM_BUSY 0x02 +#define GB_CONTROL_BUNDLE_PM_FAIL 0x03 +#define GB_CONTROL_BUNDLE_PM_NA 0x04 + +struct gb_control_bundle_pm_request { + __u8 bundle_id; +} __packed; + +struct gb_control_bundle_pm_response { + __u8 status; +} __packed; + +/* + * Interface Suspend Prepare and Deactivate Prepare operations use the same + * response layout and error codes. Define a single response structure and reuse + * it. Both operations have no payload. + */ + +#define GB_CONTROL_INTF_PM_OK 0x00 +#define GB_CONTROL_INTF_PM_BUSY 0x01 +#define GB_CONTROL_INTF_PM_NA 0x02 + +struct gb_control_intf_pm_response { + __u8 status; +} __packed; + +/* APBridge protocol */ + +/* request APB1 log */ +#define GB_APB_REQUEST_LOG 0x02 + +/* request to map a cport to bulk in and bulk out endpoints */ +#define GB_APB_REQUEST_EP_MAPPING 0x03 + +/* request to get the number of cports available */ +#define GB_APB_REQUEST_CPORT_COUNT 0x04 + +/* request to reset a cport state */ +#define GB_APB_REQUEST_RESET_CPORT 0x05 + +/* request to time the latency of messages on a given cport */ +#define GB_APB_REQUEST_LATENCY_TAG_EN 0x06 +#define GB_APB_REQUEST_LATENCY_TAG_DIS 0x07 + +/* request to control the CSI transmitter */ +#define GB_APB_REQUEST_CSI_TX_CONTROL 0x08 + +/* request to control audio streaming */ +#define GB_APB_REQUEST_AUDIO_CONTROL 0x09 + +/* TimeSync requests */ +#define GB_APB_REQUEST_TIMESYNC_ENABLE 0x0d +#define GB_APB_REQUEST_TIMESYNC_DISABLE 0x0e +#define GB_APB_REQUEST_TIMESYNC_AUTHORITATIVE 0x0f +#define GB_APB_REQUEST_TIMESYNC_GET_LAST_EVENT 0x10 + +/* requests to set Greybus CPort flags */ +#define GB_APB_REQUEST_CPORT_FLAGS 0x11 + +/* ARPC request */ +#define GB_APB_REQUEST_ARPC_RUN 0x12 + +struct gb_apb_request_cport_flags { + __le32 flags; +#define GB_APB_CPORT_FLAG_CONTROL 0x01 +#define GB_APB_CPORT_FLAG_HIGH_PRIO 0x02 +} __packed; + + +/* Firmware Download Protocol */ + +/* Request Types */ +#define GB_FW_DOWNLOAD_TYPE_FIND_FIRMWARE 0x01 +#define GB_FW_DOWNLOAD_TYPE_FETCH_FIRMWARE 0x02 +#define GB_FW_DOWNLOAD_TYPE_RELEASE_FIRMWARE 0x03 + +#define GB_FIRMWARE_TAG_MAX_SIZE 10 + +/* firmware download find firmware request/response */ +struct gb_fw_download_find_firmware_request { + __u8 firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE]; +} __packed; + +struct gb_fw_download_find_firmware_response { + __u8 firmware_id; + __le32 size; +} __packed; + +/* firmware download fetch firmware request/response */ +struct gb_fw_download_fetch_firmware_request { + __u8 firmware_id; + __le32 offset; + __le32 size; +} __packed; + +struct gb_fw_download_fetch_firmware_response { + __u8 data[0]; +} __packed; + +/* firmware download release firmware request */ +struct gb_fw_download_release_firmware_request { + __u8 firmware_id; +} __packed; +/* firmware download release firmware response has no payload */ + + +/* Firmware Management Protocol */ + +/* Request Types */ +#define GB_FW_MGMT_TYPE_INTERFACE_FW_VERSION 0x01 +#define GB_FW_MGMT_TYPE_LOAD_AND_VALIDATE_FW 0x02 +#define GB_FW_MGMT_TYPE_LOADED_FW 0x03 +#define GB_FW_MGMT_TYPE_BACKEND_FW_VERSION 0x04 +#define GB_FW_MGMT_TYPE_BACKEND_FW_UPDATE 0x05 +#define GB_FW_MGMT_TYPE_BACKEND_FW_UPDATED 0x06 + +#define GB_FW_LOAD_METHOD_UNIPRO 0x01 +#define GB_FW_LOAD_METHOD_INTERNAL 0x02 + +#define GB_FW_LOAD_STATUS_FAILED 0x00 +#define GB_FW_LOAD_STATUS_UNVALIDATED 0x01 +#define GB_FW_LOAD_STATUS_VALIDATED 0x02 +#define GB_FW_LOAD_STATUS_VALIDATION_FAILED 0x03 + +#define GB_FW_BACKEND_FW_STATUS_SUCCESS 0x01 +#define GB_FW_BACKEND_FW_STATUS_FAIL_FIND 0x02 +#define GB_FW_BACKEND_FW_STATUS_FAIL_FETCH 0x03 +#define GB_FW_BACKEND_FW_STATUS_FAIL_WRITE 0x04 +#define GB_FW_BACKEND_FW_STATUS_INT 0x05 +#define GB_FW_BACKEND_FW_STATUS_RETRY 0x06 +#define GB_FW_BACKEND_FW_STATUS_NOT_SUPPORTED 0x07 + +#define GB_FW_BACKEND_VERSION_STATUS_SUCCESS 0x01 +#define GB_FW_BACKEND_VERSION_STATUS_NOT_AVAILABLE 0x02 +#define GB_FW_BACKEND_VERSION_STATUS_NOT_SUPPORTED 0x03 +#define GB_FW_BACKEND_VERSION_STATUS_RETRY 0x04 +#define GB_FW_BACKEND_VERSION_STATUS_FAIL_INT 0x05 + +/* firmware management interface firmware version request has no payload */ +struct gb_fw_mgmt_interface_fw_version_response { + __u8 firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE]; + __le16 major; + __le16 minor; +} __packed; + +/* firmware management load and validate firmware request/response */ +struct gb_fw_mgmt_load_and_validate_fw_request { + __u8 request_id; + __u8 load_method; + __u8 firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE]; +} __packed; +/* firmware management load and validate firmware response has no payload*/ + +/* firmware management loaded firmware request */ +struct gb_fw_mgmt_loaded_fw_request { + __u8 request_id; + __u8 status; + __le16 major; + __le16 minor; +} __packed; +/* firmware management loaded firmware response has no payload */ + +/* firmware management backend firmware version request/response */ +struct gb_fw_mgmt_backend_fw_version_request { + __u8 firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE]; +} __packed; + +struct gb_fw_mgmt_backend_fw_version_response { + __le16 major; + __le16 minor; + __u8 status; +} __packed; + +/* firmware management backend firmware update request */ +struct gb_fw_mgmt_backend_fw_update_request { + __u8 request_id; + __u8 firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE]; +} __packed; +/* firmware management backend firmware update response has no payload */ + +/* firmware management backend firmware updated request */ +struct gb_fw_mgmt_backend_fw_updated_request { + __u8 request_id; + __u8 status; +} __packed; +/* firmware management backend firmware updated response has no payload */ + + +/* Component Authentication Protocol (CAP) */ + +/* Request Types */ +#define GB_CAP_TYPE_GET_ENDPOINT_UID 0x01 +#define GB_CAP_TYPE_GET_IMS_CERTIFICATE 0x02 +#define GB_CAP_TYPE_AUTHENTICATE 0x03 + +/* CAP get endpoint uid request has no payload */ +struct gb_cap_get_endpoint_uid_response { + __u8 uid[8]; +} __packed; + +/* CAP get endpoint ims certificate request/response */ +struct gb_cap_get_ims_certificate_request { + __le32 certificate_class; + __le32 certificate_id; +} __packed; + +struct gb_cap_get_ims_certificate_response { + __u8 result_code; + __u8 certificate[0]; +} __packed; + +/* CAP authenticate request/response */ +struct gb_cap_authenticate_request { + __le32 auth_type; + __u8 uid[8]; + __u8 challenge[32]; +} __packed; + +struct gb_cap_authenticate_response { + __u8 result_code; + __u8 response[64]; + __u8 signature[0]; +} __packed; + + +/* Bootrom Protocol */ + +/* Version of the Greybus bootrom protocol we support */ +#define GB_BOOTROM_VERSION_MAJOR 0x00 +#define GB_BOOTROM_VERSION_MINOR 0x01 + +/* Greybus bootrom request types */ +#define GB_BOOTROM_TYPE_VERSION 0x01 +#define GB_BOOTROM_TYPE_FIRMWARE_SIZE 0x02 +#define GB_BOOTROM_TYPE_GET_FIRMWARE 0x03 +#define GB_BOOTROM_TYPE_READY_TO_BOOT 0x04 +#define GB_BOOTROM_TYPE_AP_READY 0x05 /* Request with no-payload */ +#define GB_BOOTROM_TYPE_GET_VID_PID 0x06 /* Request with no-payload */ + +/* Greybus bootrom boot stages */ +#define GB_BOOTROM_BOOT_STAGE_ONE 0x01 /* Reserved for the boot ROM */ +#define GB_BOOTROM_BOOT_STAGE_TWO 0x02 /* Bootrom package to be loaded by the boot ROM */ +#define GB_BOOTROM_BOOT_STAGE_THREE 0x03 /* Module personality package loaded by Stage 2 firmware */ + +/* Greybus bootrom ready to boot status */ +#define GB_BOOTROM_BOOT_STATUS_INVALID 0x00 /* Firmware blob could not be validated */ +#define GB_BOOTROM_BOOT_STATUS_INSECURE 0x01 /* Firmware blob is valid but insecure */ +#define GB_BOOTROM_BOOT_STATUS_SECURE 0x02 /* Firmware blob is valid and secure */ + +/* Max bootrom data fetch size in bytes */ +#define GB_BOOTROM_FETCH_MAX 2000 + +struct gb_bootrom_version_request { + __u8 major; + __u8 minor; +} __packed; + +struct gb_bootrom_version_response { + __u8 major; + __u8 minor; +} __packed; + +/* Bootrom protocol firmware size request/response */ +struct gb_bootrom_firmware_size_request { + __u8 stage; +} __packed; + +struct gb_bootrom_firmware_size_response { + __le32 size; +} __packed; + +/* Bootrom protocol get firmware request/response */ +struct gb_bootrom_get_firmware_request { + __le32 offset; + __le32 size; +} __packed; + +struct gb_bootrom_get_firmware_response { + __u8 data[0]; +} __packed; + +/* Bootrom protocol Ready to boot request */ +struct gb_bootrom_ready_to_boot_request { + __u8 status; +} __packed; +/* Bootrom protocol Ready to boot response has no payload */ + +/* Bootrom protocol get VID/PID request has no payload */ +struct gb_bootrom_get_vid_pid_response { + __le32 vendor_id; + __le32 product_id; +} __packed; + + +/* Power Supply */ + +/* Greybus power supply request types */ +#define GB_POWER_SUPPLY_TYPE_GET_SUPPLIES 0x02 +#define GB_POWER_SUPPLY_TYPE_GET_DESCRIPTION 0x03 +#define GB_POWER_SUPPLY_TYPE_GET_PROP_DESCRIPTORS 0x04 +#define GB_POWER_SUPPLY_TYPE_GET_PROPERTY 0x05 +#define GB_POWER_SUPPLY_TYPE_SET_PROPERTY 0x06 +#define GB_POWER_SUPPLY_TYPE_EVENT 0x07 + +/* Greybus power supply battery technologies types */ +#define GB_POWER_SUPPLY_TECH_UNKNOWN 0x0000 +#define GB_POWER_SUPPLY_TECH_NiMH 0x0001 +#define GB_POWER_SUPPLY_TECH_LION 0x0002 +#define GB_POWER_SUPPLY_TECH_LIPO 0x0003 +#define GB_POWER_SUPPLY_TECH_LiFe 0x0004 +#define GB_POWER_SUPPLY_TECH_NiCd 0x0005 +#define GB_POWER_SUPPLY_TECH_LiMn 0x0006 + +/* Greybus power supply types */ +#define GB_POWER_SUPPLY_UNKNOWN_TYPE 0x0000 +#define GB_POWER_SUPPLY_BATTERY_TYPE 0x0001 +#define GB_POWER_SUPPLY_UPS_TYPE 0x0002 +#define GB_POWER_SUPPLY_MAINS_TYPE 0x0003 +#define GB_POWER_SUPPLY_USB_TYPE 0x0004 +#define GB_POWER_SUPPLY_USB_DCP_TYPE 0x0005 +#define GB_POWER_SUPPLY_USB_CDP_TYPE 0x0006 +#define GB_POWER_SUPPLY_USB_ACA_TYPE 0x0007 + +/* Greybus power supply health values */ +#define GB_POWER_SUPPLY_HEALTH_UNKNOWN 0x0000 +#define GB_POWER_SUPPLY_HEALTH_GOOD 0x0001 +#define GB_POWER_SUPPLY_HEALTH_OVERHEAT 0x0002 +#define GB_POWER_SUPPLY_HEALTH_DEAD 0x0003 +#define GB_POWER_SUPPLY_HEALTH_OVERVOLTAGE 0x0004 +#define GB_POWER_SUPPLY_HEALTH_UNSPEC_FAILURE 0x0005 +#define GB_POWER_SUPPLY_HEALTH_COLD 0x0006 +#define GB_POWER_SUPPLY_HEALTH_WATCHDOG_TIMER_EXPIRE 0x0007 +#define GB_POWER_SUPPLY_HEALTH_SAFETY_TIMER_EXPIRE 0x0008 + +/* Greybus power supply status values */ +#define GB_POWER_SUPPLY_STATUS_UNKNOWN 0x0000 +#define GB_POWER_SUPPLY_STATUS_CHARGING 0x0001 +#define GB_POWER_SUPPLY_STATUS_DISCHARGING 0x0002 +#define GB_POWER_SUPPLY_STATUS_NOT_CHARGING 0x0003 +#define GB_POWER_SUPPLY_STATUS_FULL 0x0004 + +/* Greybus power supply capacity level values */ +#define GB_POWER_SUPPLY_CAPACITY_LEVEL_UNKNOWN 0x0000 +#define GB_POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL 0x0001 +#define GB_POWER_SUPPLY_CAPACITY_LEVEL_LOW 0x0002 +#define GB_POWER_SUPPLY_CAPACITY_LEVEL_NORMAL 0x0003 +#define GB_POWER_SUPPLY_CAPACITY_LEVEL_HIGH 0x0004 +#define GB_POWER_SUPPLY_CAPACITY_LEVEL_FULL 0x0005 + +/* Greybus power supply scope values */ +#define GB_POWER_SUPPLY_SCOPE_UNKNOWN 0x0000 +#define GB_POWER_SUPPLY_SCOPE_SYSTEM 0x0001 +#define GB_POWER_SUPPLY_SCOPE_DEVICE 0x0002 + +struct gb_power_supply_get_supplies_response { + __u8 supplies_count; +} __packed; + +struct gb_power_supply_get_description_request { + __u8 psy_id; +} __packed; + +struct gb_power_supply_get_description_response { + __u8 manufacturer[32]; + __u8 model[32]; + __u8 serial_number[32]; + __le16 type; + __u8 properties_count; +} __packed; + +struct gb_power_supply_props_desc { + __u8 property; +#define GB_POWER_SUPPLY_PROP_STATUS 0x00 +#define GB_POWER_SUPPLY_PROP_CHARGE_TYPE 0x01 +#define GB_POWER_SUPPLY_PROP_HEALTH 0x02 +#define GB_POWER_SUPPLY_PROP_PRESENT 0x03 +#define GB_POWER_SUPPLY_PROP_ONLINE 0x04 +#define GB_POWER_SUPPLY_PROP_AUTHENTIC 0x05 +#define GB_POWER_SUPPLY_PROP_TECHNOLOGY 0x06 +#define GB_POWER_SUPPLY_PROP_CYCLE_COUNT 0x07 +#define GB_POWER_SUPPLY_PROP_VOLTAGE_MAX 0x08 +#define GB_POWER_SUPPLY_PROP_VOLTAGE_MIN 0x09 +#define GB_POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN 0x0A +#define GB_POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN 0x0B +#define GB_POWER_SUPPLY_PROP_VOLTAGE_NOW 0x0C +#define GB_POWER_SUPPLY_PROP_VOLTAGE_AVG 0x0D +#define GB_POWER_SUPPLY_PROP_VOLTAGE_OCV 0x0E +#define GB_POWER_SUPPLY_PROP_VOLTAGE_BOOT 0x0F +#define GB_POWER_SUPPLY_PROP_CURRENT_MAX 0x10 +#define GB_POWER_SUPPLY_PROP_CURRENT_NOW 0x11 +#define GB_POWER_SUPPLY_PROP_CURRENT_AVG 0x12 +#define GB_POWER_SUPPLY_PROP_CURRENT_BOOT 0x13 +#define GB_POWER_SUPPLY_PROP_POWER_NOW 0x14 +#define GB_POWER_SUPPLY_PROP_POWER_AVG 0x15 +#define GB_POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN 0x16 +#define GB_POWER_SUPPLY_PROP_CHARGE_EMPTY_DESIGN 0x17 +#define GB_POWER_SUPPLY_PROP_CHARGE_FULL 0x18 +#define GB_POWER_SUPPLY_PROP_CHARGE_EMPTY 0x19 +#define GB_POWER_SUPPLY_PROP_CHARGE_NOW 0x1A +#define GB_POWER_SUPPLY_PROP_CHARGE_AVG 0x1B +#define GB_POWER_SUPPLY_PROP_CHARGE_COUNTER 0x1C +#define GB_POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT 0x1D +#define GB_POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX 0x1E +#define GB_POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE 0x1F +#define GB_POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX 0x20 +#define GB_POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT 0x21 +#define GB_POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX 0x22 +#define GB_POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT 0x23 +#define GB_POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN 0x24 +#define GB_POWER_SUPPLY_PROP_ENERGY_EMPTY_DESIGN 0x25 +#define GB_POWER_SUPPLY_PROP_ENERGY_FULL 0x26 +#define GB_POWER_SUPPLY_PROP_ENERGY_EMPTY 0x27 +#define GB_POWER_SUPPLY_PROP_ENERGY_NOW 0x28 +#define GB_POWER_SUPPLY_PROP_ENERGY_AVG 0x29 +#define GB_POWER_SUPPLY_PROP_CAPACITY 0x2A +#define GB_POWER_SUPPLY_PROP_CAPACITY_ALERT_MIN 0x2B +#define GB_POWER_SUPPLY_PROP_CAPACITY_ALERT_MAX 0x2C +#define GB_POWER_SUPPLY_PROP_CAPACITY_LEVEL 0x2D +#define GB_POWER_SUPPLY_PROP_TEMP 0x2E +#define GB_POWER_SUPPLY_PROP_TEMP_MAX 0x2F +#define GB_POWER_SUPPLY_PROP_TEMP_MIN 0x30 +#define GB_POWER_SUPPLY_PROP_TEMP_ALERT_MIN 0x31 +#define GB_POWER_SUPPLY_PROP_TEMP_ALERT_MAX 0x32 +#define GB_POWER_SUPPLY_PROP_TEMP_AMBIENT 0x33 +#define GB_POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MIN 0x34 +#define GB_POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MAX 0x35 +#define GB_POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW 0x36 +#define GB_POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG 0x37 +#define GB_POWER_SUPPLY_PROP_TIME_TO_FULL_NOW 0x38 +#define GB_POWER_SUPPLY_PROP_TIME_TO_FULL_AVG 0x39 +#define GB_POWER_SUPPLY_PROP_TYPE 0x3A +#define GB_POWER_SUPPLY_PROP_SCOPE 0x3B +#define GB_POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT 0x3C +#define GB_POWER_SUPPLY_PROP_CALIBRATE 0x3D + __u8 is_writeable; +} __packed; + +struct gb_power_supply_get_property_descriptors_request { + __u8 psy_id; +} __packed; + +struct gb_power_supply_get_property_descriptors_response { + __u8 properties_count; + struct gb_power_supply_props_desc props[]; +} __packed; + +struct gb_power_supply_get_property_request { + __u8 psy_id; + __u8 property; +} __packed; + +struct gb_power_supply_get_property_response { + __le32 prop_val; +}; + +struct gb_power_supply_set_property_request { + __u8 psy_id; + __u8 property; + __le32 prop_val; +} __packed; + +struct gb_power_supply_event_request { + __u8 psy_id; + __u8 event; +#define GB_POWER_SUPPLY_UPDATE 0x01 +} __packed; + + +/* HID */ + +/* Greybus HID operation types */ +#define GB_HID_TYPE_GET_DESC 0x02 +#define GB_HID_TYPE_GET_REPORT_DESC 0x03 +#define GB_HID_TYPE_PWR_ON 0x04 +#define GB_HID_TYPE_PWR_OFF 0x05 +#define GB_HID_TYPE_GET_REPORT 0x06 +#define GB_HID_TYPE_SET_REPORT 0x07 +#define GB_HID_TYPE_IRQ_EVENT 0x08 + +/* Report type */ +#define GB_HID_INPUT_REPORT 0 +#define GB_HID_OUTPUT_REPORT 1 +#define GB_HID_FEATURE_REPORT 2 + +/* Different request/response structures */ +/* HID get descriptor response */ +struct gb_hid_desc_response { + __u8 bLength; + __le16 wReportDescLength; + __le16 bcdHID; + __le16 wProductID; + __le16 wVendorID; + __u8 bCountryCode; +} __packed; + +/* HID get report request/response */ +struct gb_hid_get_report_request { + __u8 report_type; + __u8 report_id; +} __packed; + +/* HID set report request */ +struct gb_hid_set_report_request { + __u8 report_type; + __u8 report_id; + __u8 report[0]; +} __packed; + +/* HID input report request, via interrupt pipe */ +struct gb_hid_input_report_request { + __u8 report[0]; +} __packed; + + +/* I2C */ + +/* Greybus i2c request types */ +#define GB_I2C_TYPE_FUNCTIONALITY 0x02 +#define GB_I2C_TYPE_TRANSFER 0x05 + +/* functionality request has no payload */ +struct gb_i2c_functionality_response { + __le32 functionality; +} __packed; + +/* + * Outgoing data immediately follows the op count and ops array. + * The data for each write (master -> slave) op in the array is sent + * in order, with no (e.g. pad) bytes separating them. + * + * Short reads cause the entire transfer request to fail So response + * payload consists only of bytes read, and the number of bytes is + * exactly what was specified in the corresponding op. Like + * outgoing data, the incoming data is in order and contiguous. + */ +struct gb_i2c_transfer_op { + __le16 addr; + __le16 flags; + __le16 size; +} __packed; + +struct gb_i2c_transfer_request { + __le16 op_count; + struct gb_i2c_transfer_op ops[0]; /* op_count of these */ +} __packed; +struct gb_i2c_transfer_response { + __u8 data[0]; /* inbound data */ +} __packed; + + +/* GPIO */ + +/* Greybus GPIO request types */ +#define GB_GPIO_TYPE_LINE_COUNT 0x02 +#define GB_GPIO_TYPE_ACTIVATE 0x03 +#define GB_GPIO_TYPE_DEACTIVATE 0x04 +#define GB_GPIO_TYPE_GET_DIRECTION 0x05 +#define GB_GPIO_TYPE_DIRECTION_IN 0x06 +#define GB_GPIO_TYPE_DIRECTION_OUT 0x07 +#define GB_GPIO_TYPE_GET_VALUE 0x08 +#define GB_GPIO_TYPE_SET_VALUE 0x09 +#define GB_GPIO_TYPE_SET_DEBOUNCE 0x0a +#define GB_GPIO_TYPE_IRQ_TYPE 0x0b +#define GB_GPIO_TYPE_IRQ_MASK 0x0c +#define GB_GPIO_TYPE_IRQ_UNMASK 0x0d +#define GB_GPIO_TYPE_IRQ_EVENT 0x0e + +#define GB_GPIO_IRQ_TYPE_NONE 0x00 +#define GB_GPIO_IRQ_TYPE_EDGE_RISING 0x01 +#define GB_GPIO_IRQ_TYPE_EDGE_FALLING 0x02 +#define GB_GPIO_IRQ_TYPE_EDGE_BOTH 0x03 +#define GB_GPIO_IRQ_TYPE_LEVEL_HIGH 0x04 +#define GB_GPIO_IRQ_TYPE_LEVEL_LOW 0x08 + +/* line count request has no payload */ +struct gb_gpio_line_count_response { + __u8 count; +} __packed; + +struct gb_gpio_activate_request { + __u8 which; +} __packed; +/* activate response has no payload */ + +struct gb_gpio_deactivate_request { + __u8 which; +} __packed; +/* deactivate response has no payload */ + +struct gb_gpio_get_direction_request { + __u8 which; +} __packed; +struct gb_gpio_get_direction_response { + __u8 direction; +} __packed; + +struct gb_gpio_direction_in_request { + __u8 which; +} __packed; +/* direction in response has no payload */ + +struct gb_gpio_direction_out_request { + __u8 which; + __u8 value; +} __packed; +/* direction out response has no payload */ + +struct gb_gpio_get_value_request { + __u8 which; +} __packed; +struct gb_gpio_get_value_response { + __u8 value; +} __packed; + +struct gb_gpio_set_value_request { + __u8 which; + __u8 value; +} __packed; +/* set value response has no payload */ + +struct gb_gpio_set_debounce_request { + __u8 which; + __le16 usec; +} __packed; +/* debounce response has no payload */ + +struct gb_gpio_irq_type_request { + __u8 which; + __u8 type; +} __packed; +/* irq type response has no payload */ + +struct gb_gpio_irq_mask_request { + __u8 which; +} __packed; +/* irq mask response has no payload */ + +struct gb_gpio_irq_unmask_request { + __u8 which; +} __packed; +/* irq unmask response has no payload */ + +/* irq event requests originate on another module and are handled on the AP */ +struct gb_gpio_irq_event_request { + __u8 which; +} __packed; +/* irq event has no response */ + + +/* PWM */ + +/* Greybus PWM operation types */ +#define GB_PWM_TYPE_PWM_COUNT 0x02 +#define GB_PWM_TYPE_ACTIVATE 0x03 +#define GB_PWM_TYPE_DEACTIVATE 0x04 +#define GB_PWM_TYPE_CONFIG 0x05 +#define GB_PWM_TYPE_POLARITY 0x06 +#define GB_PWM_TYPE_ENABLE 0x07 +#define GB_PWM_TYPE_DISABLE 0x08 + +/* pwm count request has no payload */ +struct gb_pwm_count_response { + __u8 count; +} __packed; + +struct gb_pwm_activate_request { + __u8 which; +} __packed; + +struct gb_pwm_deactivate_request { + __u8 which; +} __packed; + +struct gb_pwm_config_request { + __u8 which; + __le32 duty; + __le32 period; +} __packed; + +struct gb_pwm_polarity_request { + __u8 which; + __u8 polarity; +} __packed; + +struct gb_pwm_enable_request { + __u8 which; +} __packed; + +struct gb_pwm_disable_request { + __u8 which; +} __packed; + +/* SPI */ + +/* Should match up with modes in linux/spi/spi.h */ +#define GB_SPI_MODE_CPHA 0x01 /* clock phase */ +#define GB_SPI_MODE_CPOL 0x02 /* clock polarity */ +#define GB_SPI_MODE_MODE_0 (0 | 0) /* (original MicroWire) */ +#define GB_SPI_MODE_MODE_1 (0 | GB_SPI_MODE_CPHA) +#define GB_SPI_MODE_MODE_2 (GB_SPI_MODE_CPOL | 0) +#define GB_SPI_MODE_MODE_3 (GB_SPI_MODE_CPOL | GB_SPI_MODE_CPHA) +#define GB_SPI_MODE_CS_HIGH 0x04 /* chipselect active high? */ +#define GB_SPI_MODE_LSB_FIRST 0x08 /* per-word bits-on-wire */ +#define GB_SPI_MODE_3WIRE 0x10 /* SI/SO signals shared */ +#define GB_SPI_MODE_LOOP 0x20 /* loopback mode */ +#define GB_SPI_MODE_NO_CS 0x40 /* 1 dev/bus, no chipselect */ +#define GB_SPI_MODE_READY 0x80 /* slave pulls low to pause */ + +/* Should match up with flags in linux/spi/spi.h */ +#define GB_SPI_FLAG_HALF_DUPLEX BIT(0) /* can't do full duplex */ +#define GB_SPI_FLAG_NO_RX BIT(1) /* can't do buffer read */ +#define GB_SPI_FLAG_NO_TX BIT(2) /* can't do buffer write */ + +/* Greybus spi operation types */ +#define GB_SPI_TYPE_MASTER_CONFIG 0x02 +#define GB_SPI_TYPE_DEVICE_CONFIG 0x03 +#define GB_SPI_TYPE_TRANSFER 0x04 + +/* mode request has no payload */ +struct gb_spi_master_config_response { + __le32 bits_per_word_mask; + __le32 min_speed_hz; + __le32 max_speed_hz; + __le16 mode; + __le16 flags; + __u8 num_chipselect; +} __packed; + +struct gb_spi_device_config_request { + __u8 chip_select; +} __packed; + +struct gb_spi_device_config_response { + __le16 mode; + __u8 bits_per_word; + __le32 max_speed_hz; + __u8 device_type; +#define GB_SPI_SPI_DEV 0x00 +#define GB_SPI_SPI_NOR 0x01 +#define GB_SPI_SPI_MODALIAS 0x02 + __u8 name[32]; +} __packed; + +/** + * struct gb_spi_transfer - a read/write buffer pair + * @speed_hz: Select a speed other than the device default for this transfer. If + * 0 the default (from @spi_device) is used. + * @len: size of rx and tx buffers (in bytes) + * @delay_usecs: microseconds to delay after this transfer before (optionally) + * changing the chipselect status, then starting the next transfer or + * completing this spi_message. + * @cs_change: affects chipselect after this transfer completes + * @bits_per_word: select a bits_per_word other than the device default for this + * transfer. If 0 the default (from @spi_device) is used. + */ +struct gb_spi_transfer { + __le32 speed_hz; + __le32 len; + __le16 delay_usecs; + __u8 cs_change; + __u8 bits_per_word; + __u8 xfer_flags; +#define GB_SPI_XFER_READ 0x01 +#define GB_SPI_XFER_WRITE 0x02 +#define GB_SPI_XFER_INPROGRESS 0x04 +} __packed; + +struct gb_spi_transfer_request { + __u8 chip_select; /* of the spi device */ + __u8 mode; /* of the spi device */ + __le16 count; + struct gb_spi_transfer transfers[0]; /* count of these */ +} __packed; + +struct gb_spi_transfer_response { + __u8 data[0]; /* inbound data */ +} __packed; + +/* Version of the Greybus SVC protocol we support */ +#define GB_SVC_VERSION_MAJOR 0x00 +#define GB_SVC_VERSION_MINOR 0x01 + +/* Greybus SVC request types */ +#define GB_SVC_TYPE_PROTOCOL_VERSION 0x01 +#define GB_SVC_TYPE_SVC_HELLO 0x02 +#define GB_SVC_TYPE_INTF_DEVICE_ID 0x03 +#define GB_SVC_TYPE_INTF_RESET 0x06 +#define GB_SVC_TYPE_CONN_CREATE 0x07 +#define GB_SVC_TYPE_CONN_DESTROY 0x08 +#define GB_SVC_TYPE_DME_PEER_GET 0x09 +#define GB_SVC_TYPE_DME_PEER_SET 0x0a +#define GB_SVC_TYPE_ROUTE_CREATE 0x0b +#define GB_SVC_TYPE_ROUTE_DESTROY 0x0c +#define GB_SVC_TYPE_TIMESYNC_ENABLE 0x0d +#define GB_SVC_TYPE_TIMESYNC_DISABLE 0x0e +#define GB_SVC_TYPE_TIMESYNC_AUTHORITATIVE 0x0f +#define GB_SVC_TYPE_INTF_SET_PWRM 0x10 +#define GB_SVC_TYPE_INTF_EJECT 0x11 +#define GB_SVC_TYPE_PING 0x13 +#define GB_SVC_TYPE_PWRMON_RAIL_COUNT_GET 0x14 +#define GB_SVC_TYPE_PWRMON_RAIL_NAMES_GET 0x15 +#define GB_SVC_TYPE_PWRMON_SAMPLE_GET 0x16 +#define GB_SVC_TYPE_PWRMON_INTF_SAMPLE_GET 0x17 +#define GB_SVC_TYPE_TIMESYNC_WAKE_PINS_ACQUIRE 0x18 +#define GB_SVC_TYPE_TIMESYNC_WAKE_PINS_RELEASE 0x19 +#define GB_SVC_TYPE_TIMESYNC_PING 0x1a +#define GB_SVC_TYPE_MODULE_INSERTED 0x1f +#define GB_SVC_TYPE_MODULE_REMOVED 0x20 +#define GB_SVC_TYPE_INTF_VSYS_ENABLE 0x21 +#define GB_SVC_TYPE_INTF_VSYS_DISABLE 0x22 +#define GB_SVC_TYPE_INTF_REFCLK_ENABLE 0x23 +#define GB_SVC_TYPE_INTF_REFCLK_DISABLE 0x24 +#define GB_SVC_TYPE_INTF_UNIPRO_ENABLE 0x25 +#define GB_SVC_TYPE_INTF_UNIPRO_DISABLE 0x26 +#define GB_SVC_TYPE_INTF_ACTIVATE 0x27 +#define GB_SVC_TYPE_INTF_RESUME 0x28 +#define GB_SVC_TYPE_INTF_MAILBOX_EVENT 0x29 +#define GB_SVC_TYPE_INTF_OOPS 0x2a + +/* Greybus SVC protocol status values */ +#define GB_SVC_OP_SUCCESS 0x00 +#define GB_SVC_OP_UNKNOWN_ERROR 0x01 +#define GB_SVC_INTF_NOT_DETECTED 0x02 +#define GB_SVC_INTF_NO_UPRO_LINK 0x03 +#define GB_SVC_INTF_UPRO_NOT_DOWN 0x04 +#define GB_SVC_INTF_UPRO_NOT_HIBERNATED 0x05 +#define GB_SVC_INTF_NO_V_SYS 0x06 +#define GB_SVC_INTF_V_CHG 0x07 +#define GB_SVC_INTF_WAKE_BUSY 0x08 +#define GB_SVC_INTF_NO_REFCLK 0x09 +#define GB_SVC_INTF_RELEASING 0x0a +#define GB_SVC_INTF_NO_ORDER 0x0b +#define GB_SVC_INTF_MBOX_SET 0x0c +#define GB_SVC_INTF_BAD_MBOX 0x0d +#define GB_SVC_INTF_OP_TIMEOUT 0x0e +#define GB_SVC_PWRMON_OP_NOT_PRESENT 0x0f + +struct gb_svc_version_request { + __u8 major; + __u8 minor; +} __packed; + +struct gb_svc_version_response { + __u8 major; + __u8 minor; +} __packed; + +/* SVC protocol hello request */ +struct gb_svc_hello_request { + __le16 endo_id; + __u8 interface_id; +} __packed; +/* hello response has no payload */ + +struct gb_svc_intf_device_id_request { + __u8 intf_id; + __u8 device_id; +} __packed; +/* device id response has no payload */ + +struct gb_svc_intf_reset_request { + __u8 intf_id; +} __packed; +/* interface reset response has no payload */ + +struct gb_svc_intf_eject_request { + __u8 intf_id; +} __packed; +/* interface eject response has no payload */ + +struct gb_svc_conn_create_request { + __u8 intf1_id; + __le16 cport1_id; + __u8 intf2_id; + __le16 cport2_id; + __u8 tc; + __u8 flags; +} __packed; +/* connection create response has no payload */ + +struct gb_svc_conn_destroy_request { + __u8 intf1_id; + __le16 cport1_id; + __u8 intf2_id; + __le16 cport2_id; +} __packed; +/* connection destroy response has no payload */ + +struct gb_svc_dme_peer_get_request { + __u8 intf_id; + __le16 attr; + __le16 selector; +} __packed; + +struct gb_svc_dme_peer_get_response { + __le16 result_code; + __le32 attr_value; +} __packed; + +struct gb_svc_dme_peer_set_request { + __u8 intf_id; + __le16 attr; + __le16 selector; + __le32 value; +} __packed; + +struct gb_svc_dme_peer_set_response { + __le16 result_code; +} __packed; + +/* Greybus init-status values, currently retrieved using DME peer gets. */ +#define GB_INIT_SPI_BOOT_STARTED 0x02 +#define GB_INIT_TRUSTED_SPI_BOOT_FINISHED 0x03 +#define GB_INIT_UNTRUSTED_SPI_BOOT_FINISHED 0x04 +#define GB_INIT_BOOTROM_UNIPRO_BOOT_STARTED 0x06 +#define GB_INIT_BOOTROM_FALLBACK_UNIPRO_BOOT_STARTED 0x09 +#define GB_INIT_S2_LOADER_BOOT_STARTED 0x0D + +struct gb_svc_route_create_request { + __u8 intf1_id; + __u8 dev1_id; + __u8 intf2_id; + __u8 dev2_id; +} __packed; +/* route create response has no payload */ + +struct gb_svc_route_destroy_request { + __u8 intf1_id; + __u8 intf2_id; +} __packed; +/* route destroy response has no payload */ + +/* used for svc_intf_vsys_{enable,disable} */ +struct gb_svc_intf_vsys_request { + __u8 intf_id; +} __packed; + +struct gb_svc_intf_vsys_response { + __u8 result_code; +#define GB_SVC_INTF_VSYS_OK 0x00 + /* 0x01 is reserved */ +#define GB_SVC_INTF_VSYS_FAIL 0x02 +} __packed; + +/* used for svc_intf_refclk_{enable,disable} */ +struct gb_svc_intf_refclk_request { + __u8 intf_id; +} __packed; + +struct gb_svc_intf_refclk_response { + __u8 result_code; +#define GB_SVC_INTF_REFCLK_OK 0x00 + /* 0x01 is reserved */ +#define GB_SVC_INTF_REFCLK_FAIL 0x02 +} __packed; + +/* used for svc_intf_unipro_{enable,disable} */ +struct gb_svc_intf_unipro_request { + __u8 intf_id; +} __packed; + +struct gb_svc_intf_unipro_response { + __u8 result_code; +#define GB_SVC_INTF_UNIPRO_OK 0x00 + /* 0x01 is reserved */ +#define GB_SVC_INTF_UNIPRO_FAIL 0x02 +#define GB_SVC_INTF_UNIPRO_NOT_OFF 0x03 +} __packed; + +#define GB_SVC_UNIPRO_FAST_MODE 0x01 +#define GB_SVC_UNIPRO_SLOW_MODE 0x02 +#define GB_SVC_UNIPRO_FAST_AUTO_MODE 0x04 +#define GB_SVC_UNIPRO_SLOW_AUTO_MODE 0x05 +#define GB_SVC_UNIPRO_MODE_UNCHANGED 0x07 +#define GB_SVC_UNIPRO_HIBERNATE_MODE 0x11 +#define GB_SVC_UNIPRO_OFF_MODE 0x12 + +#define GB_SVC_SMALL_AMPLITUDE 0x01 +#define GB_SVC_LARGE_AMPLITUDE 0x02 + +#define GB_SVC_NO_DE_EMPHASIS 0x00 +#define GB_SVC_SMALL_DE_EMPHASIS 0x01 +#define GB_SVC_LARGE_DE_EMPHASIS 0x02 + +#define GB_SVC_PWRM_RXTERMINATION 0x01 +#define GB_SVC_PWRM_TXTERMINATION 0x02 +#define GB_SVC_PWRM_LINE_RESET 0x04 +#define GB_SVC_PWRM_SCRAMBLING 0x20 + +#define GB_SVC_PWRM_QUIRK_HSSER 0x00000001 + +#define GB_SVC_UNIPRO_HS_SERIES_A 0x01 +#define GB_SVC_UNIPRO_HS_SERIES_B 0x02 + +#define GB_SVC_SETPWRM_PWR_OK 0x00 +#define GB_SVC_SETPWRM_PWR_LOCAL 0x01 +#define GB_SVC_SETPWRM_PWR_REMOTE 0x02 +#define GB_SVC_SETPWRM_PWR_BUSY 0x03 +#define GB_SVC_SETPWRM_PWR_ERROR_CAP 0x04 +#define GB_SVC_SETPWRM_PWR_FATAL_ERROR 0x05 + +struct gb_svc_l2_timer_cfg { + __le16 tsb_fc0_protection_timeout; + __le16 tsb_tc0_replay_timeout; + __le16 tsb_afc0_req_timeout; + __le16 tsb_fc1_protection_timeout; + __le16 tsb_tc1_replay_timeout; + __le16 tsb_afc1_req_timeout; + __le16 reserved_for_tc2[3]; + __le16 reserved_for_tc3[3]; +} __packed; + +struct gb_svc_intf_set_pwrm_request { + __u8 intf_id; + __u8 hs_series; + __u8 tx_mode; + __u8 tx_gear; + __u8 tx_nlanes; + __u8 tx_amplitude; + __u8 tx_hs_equalizer; + __u8 rx_mode; + __u8 rx_gear; + __u8 rx_nlanes; + __u8 flags; + __le32 quirks; + struct gb_svc_l2_timer_cfg local_l2timerdata, remote_l2timerdata; +} __packed; + +struct gb_svc_intf_set_pwrm_response { + __u8 result_code; +} __packed; + +struct gb_svc_key_event_request { + __le16 key_code; +#define GB_KEYCODE_ARA 0x00 + + __u8 key_event; +#define GB_SVC_KEY_RELEASED 0x00 +#define GB_SVC_KEY_PRESSED 0x01 +} __packed; + +#define GB_SVC_PWRMON_MAX_RAIL_COUNT 254 + +struct gb_svc_pwrmon_rail_count_get_response { + __u8 rail_count; +} __packed; + +#define GB_SVC_PWRMON_RAIL_NAME_BUFSIZE 32 + +struct gb_svc_pwrmon_rail_names_get_response { + __u8 status; + __u8 name[0][GB_SVC_PWRMON_RAIL_NAME_BUFSIZE]; +} __packed; + +#define GB_SVC_PWRMON_TYPE_CURR 0x01 +#define GB_SVC_PWRMON_TYPE_VOL 0x02 +#define GB_SVC_PWRMON_TYPE_PWR 0x03 + +#define GB_SVC_PWRMON_GET_SAMPLE_OK 0x00 +#define GB_SVC_PWRMON_GET_SAMPLE_INVAL 0x01 +#define GB_SVC_PWRMON_GET_SAMPLE_NOSUPP 0x02 +#define GB_SVC_PWRMON_GET_SAMPLE_HWERR 0x03 + +struct gb_svc_pwrmon_sample_get_request { + __u8 rail_id; + __u8 measurement_type; +} __packed; + +struct gb_svc_pwrmon_sample_get_response { + __u8 result; + __le32 measurement; +} __packed; + +struct gb_svc_pwrmon_intf_sample_get_request { + __u8 intf_id; + __u8 measurement_type; +} __packed; + +struct gb_svc_pwrmon_intf_sample_get_response { + __u8 result; + __le32 measurement; +} __packed; + +#define GB_SVC_MODULE_INSERTED_FLAG_NO_PRIMARY 0x0001 + +struct gb_svc_module_inserted_request { + __u8 primary_intf_id; + __u8 intf_count; + __le16 flags; +} __packed; +/* module_inserted response has no payload */ + +struct gb_svc_module_removed_request { + __u8 primary_intf_id; +} __packed; +/* module_removed response has no payload */ + +struct gb_svc_intf_activate_request { + __u8 intf_id; +} __packed; + +#define GB_SVC_INTF_TYPE_UNKNOWN 0x00 +#define GB_SVC_INTF_TYPE_DUMMY 0x01 +#define GB_SVC_INTF_TYPE_UNIPRO 0x02 +#define GB_SVC_INTF_TYPE_GREYBUS 0x03 + +struct gb_svc_intf_activate_response { + __u8 status; + __u8 intf_type; +} __packed; + +struct gb_svc_intf_resume_request { + __u8 intf_id; +} __packed; + +struct gb_svc_intf_resume_response { + __u8 status; +} __packed; + +#define GB_SVC_INTF_MAILBOX_NONE 0x00 +#define GB_SVC_INTF_MAILBOX_AP 0x01 +#define GB_SVC_INTF_MAILBOX_GREYBUS 0x02 + +struct gb_svc_intf_mailbox_event_request { + __u8 intf_id; + __le16 result_code; + __le32 mailbox; +} __packed; +/* intf_mailbox_event response has no payload */ + +struct gb_svc_intf_oops_request { + __u8 intf_id; + __u8 reason; +} __packed; +/* intf_oops response has no payload */ + + +/* RAW */ + +/* Greybus raw request types */ +#define GB_RAW_TYPE_SEND 0x02 + +struct gb_raw_send_request { + __le32 len; + __u8 data[0]; +} __packed; + + +/* UART */ + +/* Greybus UART operation types */ +#define GB_UART_TYPE_SEND_DATA 0x02 +#define GB_UART_TYPE_RECEIVE_DATA 0x03 /* Unsolicited data */ +#define GB_UART_TYPE_SET_LINE_CODING 0x04 +#define GB_UART_TYPE_SET_CONTROL_LINE_STATE 0x05 +#define GB_UART_TYPE_SEND_BREAK 0x06 +#define GB_UART_TYPE_SERIAL_STATE 0x07 /* Unsolicited data */ +#define GB_UART_TYPE_RECEIVE_CREDITS 0x08 +#define GB_UART_TYPE_FLUSH_FIFOS 0x09 + +/* Represents data from AP -> Module */ +struct gb_uart_send_data_request { + __le16 size; + __u8 data[0]; +} __packed; + +/* recv-data-request flags */ +#define GB_UART_RECV_FLAG_FRAMING 0x01 /* Framing error */ +#define GB_UART_RECV_FLAG_PARITY 0x02 /* Parity error */ +#define GB_UART_RECV_FLAG_OVERRUN 0x04 /* Overrun error */ +#define GB_UART_RECV_FLAG_BREAK 0x08 /* Break */ + +/* Represents data from Module -> AP */ +struct gb_uart_recv_data_request { + __le16 size; + __u8 flags; + __u8 data[0]; +} __packed; + +struct gb_uart_receive_credits_request { + __le16 count; +} __packed; + +struct gb_uart_set_line_coding_request { + __le32 rate; + __u8 format; +#define GB_SERIAL_1_STOP_BITS 0 +#define GB_SERIAL_1_5_STOP_BITS 1 +#define GB_SERIAL_2_STOP_BITS 2 + + __u8 parity; +#define GB_SERIAL_NO_PARITY 0 +#define GB_SERIAL_ODD_PARITY 1 +#define GB_SERIAL_EVEN_PARITY 2 +#define GB_SERIAL_MARK_PARITY 3 +#define GB_SERIAL_SPACE_PARITY 4 + + __u8 data_bits; + + __u8 flow_control; +#define GB_SERIAL_AUTO_RTSCTS_EN 0x1 +} __packed; + +/* output control lines */ +#define GB_UART_CTRL_DTR 0x01 +#define GB_UART_CTRL_RTS 0x02 + +struct gb_uart_set_control_line_state_request { + __u8 control; +} __packed; + +struct gb_uart_set_break_request { + __u8 state; +} __packed; + +/* input control lines and line errors */ +#define GB_UART_CTRL_DCD 0x01 +#define GB_UART_CTRL_DSR 0x02 +#define GB_UART_CTRL_RI 0x04 + +struct gb_uart_serial_state_request { + __u8 control; +} __packed; + +struct gb_uart_serial_flush_request { + __u8 flags; +#define GB_SERIAL_FLAG_FLUSH_TRANSMITTER 0x01 +#define GB_SERIAL_FLAG_FLUSH_RECEIVER 0x02 +} __packed; + +/* Loopback */ + +/* Greybus loopback request types */ +#define GB_LOOPBACK_TYPE_PING 0x02 +#define GB_LOOPBACK_TYPE_TRANSFER 0x03 +#define GB_LOOPBACK_TYPE_SINK 0x04 + +/* + * Loopback request/response header format should be identical + * to simplify bandwidth and data movement analysis. + */ +struct gb_loopback_transfer_request { + __le32 len; + __le32 reserved0; + __le32 reserved1; + __u8 data[0]; +} __packed; + +struct gb_loopback_transfer_response { + __le32 len; + __le32 reserved0; + __le32 reserved1; + __u8 data[0]; +} __packed; + +/* SDIO */ +/* Greybus SDIO operation types */ +#define GB_SDIO_TYPE_GET_CAPABILITIES 0x02 +#define GB_SDIO_TYPE_SET_IOS 0x03 +#define GB_SDIO_TYPE_COMMAND 0x04 +#define GB_SDIO_TYPE_TRANSFER 0x05 +#define GB_SDIO_TYPE_EVENT 0x06 + +/* get caps response: request has no payload */ +struct gb_sdio_get_caps_response { + __le32 caps; +#define GB_SDIO_CAP_NONREMOVABLE 0x00000001 +#define GB_SDIO_CAP_4_BIT_DATA 0x00000002 +#define GB_SDIO_CAP_8_BIT_DATA 0x00000004 +#define GB_SDIO_CAP_MMC_HS 0x00000008 +#define GB_SDIO_CAP_SD_HS 0x00000010 +#define GB_SDIO_CAP_ERASE 0x00000020 +#define GB_SDIO_CAP_1_2V_DDR 0x00000040 +#define GB_SDIO_CAP_1_8V_DDR 0x00000080 +#define GB_SDIO_CAP_POWER_OFF_CARD 0x00000100 +#define GB_SDIO_CAP_UHS_SDR12 0x00000200 +#define GB_SDIO_CAP_UHS_SDR25 0x00000400 +#define GB_SDIO_CAP_UHS_SDR50 0x00000800 +#define GB_SDIO_CAP_UHS_SDR104 0x00001000 +#define GB_SDIO_CAP_UHS_DDR50 0x00002000 +#define GB_SDIO_CAP_DRIVER_TYPE_A 0x00004000 +#define GB_SDIO_CAP_DRIVER_TYPE_C 0x00008000 +#define GB_SDIO_CAP_DRIVER_TYPE_D 0x00010000 +#define GB_SDIO_CAP_HS200_1_2V 0x00020000 +#define GB_SDIO_CAP_HS200_1_8V 0x00040000 +#define GB_SDIO_CAP_HS400_1_2V 0x00080000 +#define GB_SDIO_CAP_HS400_1_8V 0x00100000 + + /* see possible values below at vdd */ + __le32 ocr; + __le32 f_min; + __le32 f_max; + __le16 max_blk_count; + __le16 max_blk_size; +} __packed; + +/* set ios request: response has no payload */ +struct gb_sdio_set_ios_request { + __le32 clock; + __le32 vdd; +#define GB_SDIO_VDD_165_195 0x00000001 +#define GB_SDIO_VDD_20_21 0x00000002 +#define GB_SDIO_VDD_21_22 0x00000004 +#define GB_SDIO_VDD_22_23 0x00000008 +#define GB_SDIO_VDD_23_24 0x00000010 +#define GB_SDIO_VDD_24_25 0x00000020 +#define GB_SDIO_VDD_25_26 0x00000040 +#define GB_SDIO_VDD_26_27 0x00000080 +#define GB_SDIO_VDD_27_28 0x00000100 +#define GB_SDIO_VDD_28_29 0x00000200 +#define GB_SDIO_VDD_29_30 0x00000400 +#define GB_SDIO_VDD_30_31 0x00000800 +#define GB_SDIO_VDD_31_32 0x00001000 +#define GB_SDIO_VDD_32_33 0x00002000 +#define GB_SDIO_VDD_33_34 0x00004000 +#define GB_SDIO_VDD_34_35 0x00008000 +#define GB_SDIO_VDD_35_36 0x00010000 + + __u8 bus_mode; +#define GB_SDIO_BUSMODE_OPENDRAIN 0x00 +#define GB_SDIO_BUSMODE_PUSHPULL 0x01 + + __u8 power_mode; +#define GB_SDIO_POWER_OFF 0x00 +#define GB_SDIO_POWER_UP 0x01 +#define GB_SDIO_POWER_ON 0x02 +#define GB_SDIO_POWER_UNDEFINED 0x03 + + __u8 bus_width; +#define GB_SDIO_BUS_WIDTH_1 0x00 +#define GB_SDIO_BUS_WIDTH_4 0x02 +#define GB_SDIO_BUS_WIDTH_8 0x03 + + __u8 timing; +#define GB_SDIO_TIMING_LEGACY 0x00 +#define GB_SDIO_TIMING_MMC_HS 0x01 +#define GB_SDIO_TIMING_SD_HS 0x02 +#define GB_SDIO_TIMING_UHS_SDR12 0x03 +#define GB_SDIO_TIMING_UHS_SDR25 0x04 +#define GB_SDIO_TIMING_UHS_SDR50 0x05 +#define GB_SDIO_TIMING_UHS_SDR104 0x06 +#define GB_SDIO_TIMING_UHS_DDR50 0x07 +#define GB_SDIO_TIMING_MMC_DDR52 0x08 +#define GB_SDIO_TIMING_MMC_HS200 0x09 +#define GB_SDIO_TIMING_MMC_HS400 0x0A + + __u8 signal_voltage; +#define GB_SDIO_SIGNAL_VOLTAGE_330 0x00 +#define GB_SDIO_SIGNAL_VOLTAGE_180 0x01 +#define GB_SDIO_SIGNAL_VOLTAGE_120 0x02 + + __u8 drv_type; +#define GB_SDIO_SET_DRIVER_TYPE_B 0x00 +#define GB_SDIO_SET_DRIVER_TYPE_A 0x01 +#define GB_SDIO_SET_DRIVER_TYPE_C 0x02 +#define GB_SDIO_SET_DRIVER_TYPE_D 0x03 +} __packed; + +/* command request */ +struct gb_sdio_command_request { + __u8 cmd; + __u8 cmd_flags; +#define GB_SDIO_RSP_NONE 0x00 +#define GB_SDIO_RSP_PRESENT 0x01 +#define GB_SDIO_RSP_136 0x02 +#define GB_SDIO_RSP_CRC 0x04 +#define GB_SDIO_RSP_BUSY 0x08 +#define GB_SDIO_RSP_OPCODE 0x10 + + __u8 cmd_type; +#define GB_SDIO_CMD_AC 0x00 +#define GB_SDIO_CMD_ADTC 0x01 +#define GB_SDIO_CMD_BC 0x02 +#define GB_SDIO_CMD_BCR 0x03 + + __le32 cmd_arg; + __le16 data_blocks; + __le16 data_blksz; +} __packed; + +struct gb_sdio_command_response { + __le32 resp[4]; +} __packed; + +/* transfer request */ +struct gb_sdio_transfer_request { + __u8 data_flags; +#define GB_SDIO_DATA_WRITE 0x01 +#define GB_SDIO_DATA_READ 0x02 +#define GB_SDIO_DATA_STREAM 0x04 + + __le16 data_blocks; + __le16 data_blksz; + __u8 data[0]; +} __packed; + +struct gb_sdio_transfer_response { + __le16 data_blocks; + __le16 data_blksz; + __u8 data[0]; +} __packed; + +/* event request: generated by module and is defined as unidirectional */ +struct gb_sdio_event_request { + __u8 event; +#define GB_SDIO_CARD_INSERTED 0x01 +#define GB_SDIO_CARD_REMOVED 0x02 +#define GB_SDIO_WP 0x04 +} __packed; + +/* Camera */ + +/* Greybus Camera request types */ +#define GB_CAMERA_TYPE_CAPABILITIES 0x02 +#define GB_CAMERA_TYPE_CONFIGURE_STREAMS 0x03 +#define GB_CAMERA_TYPE_CAPTURE 0x04 +#define GB_CAMERA_TYPE_FLUSH 0x05 +#define GB_CAMERA_TYPE_METADATA 0x06 + +#define GB_CAMERA_MAX_STREAMS 4 +#define GB_CAMERA_MAX_SETTINGS_SIZE 8192 + +/* Greybus Camera Configure Streams request payload */ +struct gb_camera_stream_config_request { + __le16 width; + __le16 height; + __le16 format; + __le16 padding; +} __packed; + +struct gb_camera_configure_streams_request { + __u8 num_streams; + __u8 flags; +#define GB_CAMERA_CONFIGURE_STREAMS_TEST_ONLY 0x01 + __le16 padding; + struct gb_camera_stream_config_request config[0]; +} __packed; + +/* Greybus Camera Configure Streams response payload */ +struct gb_camera_stream_config_response { + __le16 width; + __le16 height; + __le16 format; + __u8 virtual_channel; + __u8 data_type[2]; + __le16 max_pkt_size; + __u8 padding; + __le32 max_size; +} __packed; + +struct gb_camera_configure_streams_response { + __u8 num_streams; +#define GB_CAMERA_CONFIGURE_STREAMS_ADJUSTED 0x01 + __u8 flags; + __u8 padding[2]; + __le32 data_rate; + struct gb_camera_stream_config_response config[0]; +}; + +/* Greybus Camera Capture request payload - response has no payload */ +struct gb_camera_capture_request { + __le32 request_id; + __u8 streams; + __u8 padding; + __le16 num_frames; + __u8 settings[0]; +} __packed; + +/* Greybus Camera Flush response payload - request has no payload */ +struct gb_camera_flush_response { + __le32 request_id; +} __packed; + +/* Greybus Camera Metadata request payload - operation has no response */ +struct gb_camera_metadata_request { + __le32 request_id; + __le16 frame_number; + __u8 stream; + __u8 padding; + __u8 metadata[0]; +} __packed; + +/* Lights */ + +/* Greybus Lights request types */ +#define GB_LIGHTS_TYPE_GET_LIGHTS 0x02 +#define GB_LIGHTS_TYPE_GET_LIGHT_CONFIG 0x03 +#define GB_LIGHTS_TYPE_GET_CHANNEL_CONFIG 0x04 +#define GB_LIGHTS_TYPE_GET_CHANNEL_FLASH_CONFIG 0x05 +#define GB_LIGHTS_TYPE_SET_BRIGHTNESS 0x06 +#define GB_LIGHTS_TYPE_SET_BLINK 0x07 +#define GB_LIGHTS_TYPE_SET_COLOR 0x08 +#define GB_LIGHTS_TYPE_SET_FADE 0x09 +#define GB_LIGHTS_TYPE_EVENT 0x0A +#define GB_LIGHTS_TYPE_SET_FLASH_INTENSITY 0x0B +#define GB_LIGHTS_TYPE_SET_FLASH_STROBE 0x0C +#define GB_LIGHTS_TYPE_SET_FLASH_TIMEOUT 0x0D +#define GB_LIGHTS_TYPE_GET_FLASH_FAULT 0x0E + +/* Greybus Light modes */ + +/* + * if you add any specific mode below, update also the + * GB_CHANNEL_MODE_DEFINED_RANGE value accordingly + */ +#define GB_CHANNEL_MODE_NONE 0x00000000 +#define GB_CHANNEL_MODE_BATTERY 0x00000001 +#define GB_CHANNEL_MODE_POWER 0x00000002 +#define GB_CHANNEL_MODE_WIRELESS 0x00000004 +#define GB_CHANNEL_MODE_BLUETOOTH 0x00000008 +#define GB_CHANNEL_MODE_KEYBOARD 0x00000010 +#define GB_CHANNEL_MODE_BUTTONS 0x00000020 +#define GB_CHANNEL_MODE_NOTIFICATION 0x00000040 +#define GB_CHANNEL_MODE_ATTENTION 0x00000080 +#define GB_CHANNEL_MODE_FLASH 0x00000100 +#define GB_CHANNEL_MODE_TORCH 0x00000200 +#define GB_CHANNEL_MODE_INDICATOR 0x00000400 + +/* Lights Mode valid bit values */ +#define GB_CHANNEL_MODE_DEFINED_RANGE 0x000004FF +#define GB_CHANNEL_MODE_VENDOR_RANGE 0x00F00000 + +/* Greybus Light Channels Flags */ +#define GB_LIGHT_CHANNEL_MULTICOLOR 0x00000001 +#define GB_LIGHT_CHANNEL_FADER 0x00000002 +#define GB_LIGHT_CHANNEL_BLINK 0x00000004 + +/* get count of lights in module */ +struct gb_lights_get_lights_response { + __u8 lights_count; +} __packed; + +/* light config request payload */ +struct gb_lights_get_light_config_request { + __u8 id; +} __packed; + +/* light config response payload */ +struct gb_lights_get_light_config_response { + __u8 channel_count; + __u8 name[32]; +} __packed; + +/* channel config request payload */ +struct gb_lights_get_channel_config_request { + __u8 light_id; + __u8 channel_id; +} __packed; + +/* channel flash config request payload */ +struct gb_lights_get_channel_flash_config_request { + __u8 light_id; + __u8 channel_id; +} __packed; + +/* channel config response payload */ +struct gb_lights_get_channel_config_response { + __u8 max_brightness; + __le32 flags; + __le32 color; + __u8 color_name[32]; + __le32 mode; + __u8 mode_name[32]; +} __packed; + +/* channel flash config response payload */ +struct gb_lights_get_channel_flash_config_response { + __le32 intensity_min_uA; + __le32 intensity_max_uA; + __le32 intensity_step_uA; + __le32 timeout_min_us; + __le32 timeout_max_us; + __le32 timeout_step_us; +} __packed; + +/* blink request payload: response have no payload */ +struct gb_lights_blink_request { + __u8 light_id; + __u8 channel_id; + __le16 time_on_ms; + __le16 time_off_ms; +} __packed; + +/* set brightness request payload: response have no payload */ +struct gb_lights_set_brightness_request { + __u8 light_id; + __u8 channel_id; + __u8 brightness; +} __packed; + +/* set color request payload: response have no payload */ +struct gb_lights_set_color_request { + __u8 light_id; + __u8 channel_id; + __le32 color; +} __packed; + +/* set fade request payload: response have no payload */ +struct gb_lights_set_fade_request { + __u8 light_id; + __u8 channel_id; + __u8 fade_in; + __u8 fade_out; +} __packed; + +/* event request: generated by module */ +struct gb_lights_event_request { + __u8 light_id; + __u8 event; +#define GB_LIGHTS_LIGHT_CONFIG 0x01 +} __packed; + +/* set flash intensity request payload: response have no payload */ +struct gb_lights_set_flash_intensity_request { + __u8 light_id; + __u8 channel_id; + __le32 intensity_uA; +} __packed; + +/* set flash strobe state request payload: response have no payload */ +struct gb_lights_set_flash_strobe_request { + __u8 light_id; + __u8 channel_id; + __u8 state; +} __packed; + +/* set flash timeout request payload: response have no payload */ +struct gb_lights_set_flash_timeout_request { + __u8 light_id; + __u8 channel_id; + __le32 timeout_us; +} __packed; + +/* get flash fault request payload */ +struct gb_lights_get_flash_fault_request { + __u8 light_id; + __u8 channel_id; +} __packed; + +/* get flash fault response payload */ +struct gb_lights_get_flash_fault_response { + __le32 fault; +#define GB_LIGHTS_FLASH_FAULT_OVER_VOLTAGE 0x00000000 +#define GB_LIGHTS_FLASH_FAULT_TIMEOUT 0x00000001 +#define GB_LIGHTS_FLASH_FAULT_OVER_TEMPERATURE 0x00000002 +#define GB_LIGHTS_FLASH_FAULT_SHORT_CIRCUIT 0x00000004 +#define GB_LIGHTS_FLASH_FAULT_OVER_CURRENT 0x00000008 +#define GB_LIGHTS_FLASH_FAULT_INDICATOR 0x00000010 +#define GB_LIGHTS_FLASH_FAULT_UNDER_VOLTAGE 0x00000020 +#define GB_LIGHTS_FLASH_FAULT_INPUT_VOLTAGE 0x00000040 +#define GB_LIGHTS_FLASH_FAULT_LED_OVER_TEMPERATURE 0x00000080 +} __packed; + +/* Audio */ + +#define GB_AUDIO_TYPE_GET_TOPOLOGY_SIZE 0x02 +#define GB_AUDIO_TYPE_GET_TOPOLOGY 0x03 +#define GB_AUDIO_TYPE_GET_CONTROL 0x04 +#define GB_AUDIO_TYPE_SET_CONTROL 0x05 +#define GB_AUDIO_TYPE_ENABLE_WIDGET 0x06 +#define GB_AUDIO_TYPE_DISABLE_WIDGET 0x07 +#define GB_AUDIO_TYPE_GET_PCM 0x08 +#define GB_AUDIO_TYPE_SET_PCM 0x09 +#define GB_AUDIO_TYPE_SET_TX_DATA_SIZE 0x0a + /* 0x0b unused */ +#define GB_AUDIO_TYPE_ACTIVATE_TX 0x0c +#define GB_AUDIO_TYPE_DEACTIVATE_TX 0x0d +#define GB_AUDIO_TYPE_SET_RX_DATA_SIZE 0x0e + /* 0x0f unused */ +#define GB_AUDIO_TYPE_ACTIVATE_RX 0x10 +#define GB_AUDIO_TYPE_DEACTIVATE_RX 0x11 +#define GB_AUDIO_TYPE_JACK_EVENT 0x12 +#define GB_AUDIO_TYPE_BUTTON_EVENT 0x13 +#define GB_AUDIO_TYPE_STREAMING_EVENT 0x14 +#define GB_AUDIO_TYPE_SEND_DATA 0x15 + +/* Module must be able to buffer 10ms of audio data, minimum */ +#define GB_AUDIO_SAMPLE_BUFFER_MIN_US 10000 + +#define GB_AUDIO_PCM_NAME_MAX 32 +#define AUDIO_DAI_NAME_MAX 32 +#define AUDIO_CONTROL_NAME_MAX 32 +#define AUDIO_CTL_ELEM_NAME_MAX 44 +#define AUDIO_ENUM_NAME_MAX 64 +#define AUDIO_WIDGET_NAME_MAX 32 + +/* See SNDRV_PCM_FMTBIT_* in Linux source */ +#define GB_AUDIO_PCM_FMT_S8 BIT(0) +#define GB_AUDIO_PCM_FMT_U8 BIT(1) +#define GB_AUDIO_PCM_FMT_S16_LE BIT(2) +#define GB_AUDIO_PCM_FMT_S16_BE BIT(3) +#define GB_AUDIO_PCM_FMT_U16_LE BIT(4) +#define GB_AUDIO_PCM_FMT_U16_BE BIT(5) +#define GB_AUDIO_PCM_FMT_S24_LE BIT(6) +#define GB_AUDIO_PCM_FMT_S24_BE BIT(7) +#define GB_AUDIO_PCM_FMT_U24_LE BIT(8) +#define GB_AUDIO_PCM_FMT_U24_BE BIT(9) +#define GB_AUDIO_PCM_FMT_S32_LE BIT(10) +#define GB_AUDIO_PCM_FMT_S32_BE BIT(11) +#define GB_AUDIO_PCM_FMT_U32_LE BIT(12) +#define GB_AUDIO_PCM_FMT_U32_BE BIT(13) + +/* See SNDRV_PCM_RATE_* in Linux source */ +#define GB_AUDIO_PCM_RATE_5512 BIT(0) +#define GB_AUDIO_PCM_RATE_8000 BIT(1) +#define GB_AUDIO_PCM_RATE_11025 BIT(2) +#define GB_AUDIO_PCM_RATE_16000 BIT(3) +#define GB_AUDIO_PCM_RATE_22050 BIT(4) +#define GB_AUDIO_PCM_RATE_32000 BIT(5) +#define GB_AUDIO_PCM_RATE_44100 BIT(6) +#define GB_AUDIO_PCM_RATE_48000 BIT(7) +#define GB_AUDIO_PCM_RATE_64000 BIT(8) +#define GB_AUDIO_PCM_RATE_88200 BIT(9) +#define GB_AUDIO_PCM_RATE_96000 BIT(10) +#define GB_AUDIO_PCM_RATE_176400 BIT(11) +#define GB_AUDIO_PCM_RATE_192000 BIT(12) + +#define GB_AUDIO_STREAM_TYPE_CAPTURE 0x1 +#define GB_AUDIO_STREAM_TYPE_PLAYBACK 0x2 + +#define GB_AUDIO_CTL_ELEM_ACCESS_READ BIT(0) +#define GB_AUDIO_CTL_ELEM_ACCESS_WRITE BIT(1) + +/* See SNDRV_CTL_ELEM_TYPE_* in Linux source */ +#define GB_AUDIO_CTL_ELEM_TYPE_BOOLEAN 0x01 +#define GB_AUDIO_CTL_ELEM_TYPE_INTEGER 0x02 +#define GB_AUDIO_CTL_ELEM_TYPE_ENUMERATED 0x03 +#define GB_AUDIO_CTL_ELEM_TYPE_INTEGER64 0x06 + +/* See SNDRV_CTL_ELEM_IFACE_* in Linux source */ +#define GB_AUDIO_CTL_ELEM_IFACE_CARD 0x00 +#define GB_AUDIO_CTL_ELEM_IFACE_HWDEP 0x01 +#define GB_AUDIO_CTL_ELEM_IFACE_MIXER 0x02 +#define GB_AUDIO_CTL_ELEM_IFACE_PCM 0x03 +#define GB_AUDIO_CTL_ELEM_IFACE_RAWMIDI 0x04 +#define GB_AUDIO_CTL_ELEM_IFACE_TIMER 0x05 +#define GB_AUDIO_CTL_ELEM_IFACE_SEQUENCER 0x06 + +/* SNDRV_CTL_ELEM_ACCESS_* in Linux source */ +#define GB_AUDIO_ACCESS_READ BIT(0) +#define GB_AUDIO_ACCESS_WRITE BIT(1) +#define GB_AUDIO_ACCESS_VOLATILE BIT(2) +#define GB_AUDIO_ACCESS_TIMESTAMP BIT(3) +#define GB_AUDIO_ACCESS_TLV_READ BIT(4) +#define GB_AUDIO_ACCESS_TLV_WRITE BIT(5) +#define GB_AUDIO_ACCESS_TLV_COMMAND BIT(6) +#define GB_AUDIO_ACCESS_INACTIVE BIT(7) +#define GB_AUDIO_ACCESS_LOCK BIT(8) +#define GB_AUDIO_ACCESS_OWNER BIT(9) + +/* enum snd_soc_dapm_type */ +#define GB_AUDIO_WIDGET_TYPE_INPUT 0x0 +#define GB_AUDIO_WIDGET_TYPE_OUTPUT 0x1 +#define GB_AUDIO_WIDGET_TYPE_MUX 0x2 +#define GB_AUDIO_WIDGET_TYPE_VIRT_MUX 0x3 +#define GB_AUDIO_WIDGET_TYPE_VALUE_MUX 0x4 +#define GB_AUDIO_WIDGET_TYPE_MIXER 0x5 +#define GB_AUDIO_WIDGET_TYPE_MIXER_NAMED_CTL 0x6 +#define GB_AUDIO_WIDGET_TYPE_PGA 0x7 +#define GB_AUDIO_WIDGET_TYPE_OUT_DRV 0x8 +#define GB_AUDIO_WIDGET_TYPE_ADC 0x9 +#define GB_AUDIO_WIDGET_TYPE_DAC 0xa +#define GB_AUDIO_WIDGET_TYPE_MICBIAS 0xb +#define GB_AUDIO_WIDGET_TYPE_MIC 0xc +#define GB_AUDIO_WIDGET_TYPE_HP 0xd +#define GB_AUDIO_WIDGET_TYPE_SPK 0xe +#define GB_AUDIO_WIDGET_TYPE_LINE 0xf +#define GB_AUDIO_WIDGET_TYPE_SWITCH 0x10 +#define GB_AUDIO_WIDGET_TYPE_VMID 0x11 +#define GB_AUDIO_WIDGET_TYPE_PRE 0x12 +#define GB_AUDIO_WIDGET_TYPE_POST 0x13 +#define GB_AUDIO_WIDGET_TYPE_SUPPLY 0x14 +#define GB_AUDIO_WIDGET_TYPE_REGULATOR_SUPPLY 0x15 +#define GB_AUDIO_WIDGET_TYPE_CLOCK_SUPPLY 0x16 +#define GB_AUDIO_WIDGET_TYPE_AIF_IN 0x17 +#define GB_AUDIO_WIDGET_TYPE_AIF_OUT 0x18 +#define GB_AUDIO_WIDGET_TYPE_SIGGEN 0x19 +#define GB_AUDIO_WIDGET_TYPE_DAI_IN 0x1a +#define GB_AUDIO_WIDGET_TYPE_DAI_OUT 0x1b +#define GB_AUDIO_WIDGET_TYPE_DAI_LINK 0x1c + +#define GB_AUDIO_WIDGET_STATE_DISABLED 0x01 +#define GB_AUDIO_WIDGET_STATE_ENAABLED 0x02 + +#define GB_AUDIO_JACK_EVENT_INSERTION 0x1 +#define GB_AUDIO_JACK_EVENT_REMOVAL 0x2 + +#define GB_AUDIO_BUTTON_EVENT_PRESS 0x1 +#define GB_AUDIO_BUTTON_EVENT_RELEASE 0x2 + +#define GB_AUDIO_STREAMING_EVENT_UNSPECIFIED 0x1 +#define GB_AUDIO_STREAMING_EVENT_HALT 0x2 +#define GB_AUDIO_STREAMING_EVENT_INTERNAL_ERROR 0x3 +#define GB_AUDIO_STREAMING_EVENT_PROTOCOL_ERROR 0x4 +#define GB_AUDIO_STREAMING_EVENT_FAILURE 0x5 +#define GB_AUDIO_STREAMING_EVENT_UNDERRUN 0x6 +#define GB_AUDIO_STREAMING_EVENT_OVERRUN 0x7 +#define GB_AUDIO_STREAMING_EVENT_CLOCKING 0x8 +#define GB_AUDIO_STREAMING_EVENT_DATA_LEN 0x9 + +#define GB_AUDIO_INVALID_INDEX 0xff + +/* enum snd_jack_types */ +#define GB_AUDIO_JACK_HEADPHONE 0x0000001 +#define GB_AUDIO_JACK_MICROPHONE 0x0000002 +#define GB_AUDIO_JACK_HEADSET (GB_AUDIO_JACK_HEADPHONE | \ + GB_AUDIO_JACK_MICROPHONE) +#define GB_AUDIO_JACK_LINEOUT 0x0000004 +#define GB_AUDIO_JACK_MECHANICAL 0x0000008 +#define GB_AUDIO_JACK_VIDEOOUT 0x0000010 +#define GB_AUDIO_JACK_AVOUT (GB_AUDIO_JACK_LINEOUT | \ + GB_AUDIO_JACK_VIDEOOUT) +#define GB_AUDIO_JACK_LINEIN 0x0000020 +#define GB_AUDIO_JACK_OC_HPHL 0x0000040 +#define GB_AUDIO_JACK_OC_HPHR 0x0000080 +#define GB_AUDIO_JACK_MICROPHONE2 0x0000200 +#define GB_AUDIO_JACK_ANC_HEADPHONE (GB_AUDIO_JACK_HEADPHONE | \ + GB_AUDIO_JACK_MICROPHONE | \ + GB_AUDIO_JACK_MICROPHONE2) +/* Kept separate from switches to facilitate implementation */ +#define GB_AUDIO_JACK_BTN_0 0x4000000 +#define GB_AUDIO_JACK_BTN_1 0x2000000 +#define GB_AUDIO_JACK_BTN_2 0x1000000 +#define GB_AUDIO_JACK_BTN_3 0x0800000 + +struct gb_audio_pcm { + __u8 stream_name[GB_AUDIO_PCM_NAME_MAX]; + __le32 formats; /* GB_AUDIO_PCM_FMT_* */ + __le32 rates; /* GB_AUDIO_PCM_RATE_* */ + __u8 chan_min; + __u8 chan_max; + __u8 sig_bits; /* number of bits of content */ +} __packed; + +struct gb_audio_dai { + __u8 name[AUDIO_DAI_NAME_MAX]; + __le16 data_cport; + struct gb_audio_pcm capture; + struct gb_audio_pcm playback; +} __packed; + +struct gb_audio_integer { + __le32 min; + __le32 max; + __le32 step; +} __packed; + +struct gb_audio_integer64 { + __le64 min; + __le64 max; + __le64 step; +} __packed; + +struct gb_audio_enumerated { + __le32 items; + __le16 names_length; + __u8 names[0]; +} __packed; + +struct gb_audio_ctl_elem_info { /* See snd_ctl_elem_info in Linux source */ + __u8 type; /* GB_AUDIO_CTL_ELEM_TYPE_* */ + __le16 dimen[4]; + union { + struct gb_audio_integer integer; + struct gb_audio_integer64 integer64; + struct gb_audio_enumerated enumerated; + } value; +} __packed; + +struct gb_audio_ctl_elem_value { /* See snd_ctl_elem_value in Linux source */ + __le64 timestamp; /* XXX needed? */ + union { + __le32 integer_value[2]; /* consider CTL_DOUBLE_xxx */ + __le64 integer64_value[2]; + __le32 enumerated_item[2]; + } value; +} __packed; + +struct gb_audio_control { + __u8 name[AUDIO_CONTROL_NAME_MAX]; + __u8 id; /* 0-63 */ + __u8 iface; /* GB_AUDIO_IFACE_* */ + __le16 data_cport; + __le32 access; /* GB_AUDIO_ACCESS_* */ + __u8 count; /* count of same elements */ + __u8 count_values; /* count of values, max=2 for CTL_DOUBLE_xxx */ + struct gb_audio_ctl_elem_info info; +} __packed; + +struct gb_audio_widget { + __u8 name[AUDIO_WIDGET_NAME_MAX]; + __u8 sname[AUDIO_WIDGET_NAME_MAX]; + __u8 id; + __u8 type; /* GB_AUDIO_WIDGET_TYPE_* */ + __u8 state; /* GB_AUDIO_WIDGET_STATE_* */ + __u8 ncontrols; + struct gb_audio_control ctl[0]; /* 'ncontrols' entries */ +} __packed; + +struct gb_audio_route { + __u8 source_id; /* widget id */ + __u8 destination_id; /* widget id */ + __u8 control_id; /* 0-63 */ + __u8 index; /* Selection within the control */ +} __packed; + +struct gb_audio_topology { + __u8 num_dais; + __u8 num_controls; + __u8 num_widgets; + __u8 num_routes; + __le32 size_dais; + __le32 size_controls; + __le32 size_widgets; + __le32 size_routes; + __le32 jack_type; + /* + * struct gb_audio_dai dai[num_dais]; + * struct gb_audio_control controls[num_controls]; + * struct gb_audio_widget widgets[num_widgets]; + * struct gb_audio_route routes[num_routes]; + */ + __u8 data[0]; +} __packed; + +struct gb_audio_get_topology_size_response { + __le16 size; +} __packed; + +struct gb_audio_get_topology_response { + struct gb_audio_topology topology; +} __packed; + +struct gb_audio_get_control_request { + __u8 control_id; + __u8 index; +} __packed; + +struct gb_audio_get_control_response { + struct gb_audio_ctl_elem_value value; +} __packed; + +struct gb_audio_set_control_request { + __u8 control_id; + __u8 index; + struct gb_audio_ctl_elem_value value; +} __packed; + +struct gb_audio_enable_widget_request { + __u8 widget_id; +} __packed; + +struct gb_audio_disable_widget_request { + __u8 widget_id; +} __packed; + +struct gb_audio_get_pcm_request { + __le16 data_cport; +} __packed; + +struct gb_audio_get_pcm_response { + __le32 format; + __le32 rate; + __u8 channels; + __u8 sig_bits; +} __packed; + +struct gb_audio_set_pcm_request { + __le16 data_cport; + __le32 format; + __le32 rate; + __u8 channels; + __u8 sig_bits; +} __packed; + +struct gb_audio_set_tx_data_size_request { + __le16 data_cport; + __le16 size; +} __packed; + +struct gb_audio_activate_tx_request { + __le16 data_cport; +} __packed; + +struct gb_audio_deactivate_tx_request { + __le16 data_cport; +} __packed; + +struct gb_audio_set_rx_data_size_request { + __le16 data_cport; + __le16 size; +} __packed; + +struct gb_audio_activate_rx_request { + __le16 data_cport; +} __packed; + +struct gb_audio_deactivate_rx_request { + __le16 data_cport; +} __packed; + +struct gb_audio_jack_event_request { + __u8 widget_id; + __u8 jack_attribute; + __u8 event; +} __packed; + +struct gb_audio_button_event_request { + __u8 widget_id; + __u8 button_id; + __u8 event; +} __packed; + +struct gb_audio_streaming_event_request { + __le16 data_cport; + __u8 event; +} __packed; + +struct gb_audio_send_data_request { + __le64 timestamp; + __u8 data[0]; +} __packed; + + +/* Log */ + +/* operations */ +#define GB_LOG_TYPE_SEND_LOG 0x02 + +/* length */ +#define GB_LOG_MAX_LEN 1024 + +struct gb_log_send_log_request { + __le16 len; + __u8 msg[0]; +} __packed; + +#endif /* __GREYBUS_PROTOCOLS_H */ + diff --git a/include/linux/greybus/hd.h b/include/linux/greybus/hd.h new file mode 100644 index 000000000000..d3faf0c1a569 --- /dev/null +++ b/include/linux/greybus/hd.h @@ -0,0 +1,85 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Greybus Host Device + * + * Copyright 2014-2015 Google Inc. + * Copyright 2014-2015 Linaro Ltd. + */ + +#ifndef __HD_H +#define __HD_H + +#include <linux/types.h> +#include <linux/device.h> + +struct gb_host_device; +struct gb_message; + +struct gb_hd_driver { + size_t hd_priv_size; + + int (*cport_allocate)(struct gb_host_device *hd, int cport_id, + unsigned long flags); + void (*cport_release)(struct gb_host_device *hd, u16 cport_id); + int (*cport_enable)(struct gb_host_device *hd, u16 cport_id, + unsigned long flags); + int (*cport_disable)(struct gb_host_device *hd, u16 cport_id); + int (*cport_connected)(struct gb_host_device *hd, u16 cport_id); + int (*cport_flush)(struct gb_host_device *hd, u16 cport_id); + int (*cport_shutdown)(struct gb_host_device *hd, u16 cport_id, + u8 phase, unsigned int timeout); + int (*cport_quiesce)(struct gb_host_device *hd, u16 cport_id, + size_t peer_space, unsigned int timeout); + int (*cport_clear)(struct gb_host_device *hd, u16 cport_id); + + int (*message_send)(struct gb_host_device *hd, u16 dest_cport_id, + struct gb_message *message, gfp_t gfp_mask); + void (*message_cancel)(struct gb_message *message); + int (*latency_tag_enable)(struct gb_host_device *hd, u16 cport_id); + int (*latency_tag_disable)(struct gb_host_device *hd, u16 cport_id); + int (*output)(struct gb_host_device *hd, void *req, u16 size, u8 cmd, + bool async); +}; + +struct gb_host_device { + struct device dev; + int bus_id; + const struct gb_hd_driver *driver; + + struct list_head modules; + struct list_head connections; + struct ida cport_id_map; + + /* Number of CPorts supported by the UniPro IP */ + size_t num_cports; + + /* Host device buffer constraints */ + size_t buffer_size_max; + + struct gb_svc *svc; + /* Private data for the host driver */ + unsigned long hd_priv[0] __aligned(sizeof(s64)); +}; +#define to_gb_host_device(d) container_of(d, struct gb_host_device, dev) + +int gb_hd_cport_reserve(struct gb_host_device *hd, u16 cport_id); +void gb_hd_cport_release_reserved(struct gb_host_device *hd, u16 cport_id); +int gb_hd_cport_allocate(struct gb_host_device *hd, int cport_id, + unsigned long flags); +void gb_hd_cport_release(struct gb_host_device *hd, u16 cport_id); + +struct gb_host_device *gb_hd_create(struct gb_hd_driver *driver, + struct device *parent, + size_t buffer_size_max, + size_t num_cports); +int gb_hd_add(struct gb_host_device *hd); +void gb_hd_del(struct gb_host_device *hd); +void gb_hd_shutdown(struct gb_host_device *hd); +void gb_hd_put(struct gb_host_device *hd); +int gb_hd_output(struct gb_host_device *hd, void *req, u16 size, u8 cmd, + bool in_irq); + +int gb_hd_init(void); +void gb_hd_exit(void); + +#endif /* __HD_H */ diff --git a/include/linux/greybus/interface.h b/include/linux/greybus/interface.h new file mode 100644 index 000000000000..ce4def881e6f --- /dev/null +++ b/include/linux/greybus/interface.h @@ -0,0 +1,85 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Greybus Interface Block code + * + * Copyright 2014 Google Inc. + * Copyright 2014 Linaro Ltd. + */ + +#ifndef __INTERFACE_H +#define __INTERFACE_H + +#include <linux/types.h> +#include <linux/device.h> + +enum gb_interface_type { + GB_INTERFACE_TYPE_INVALID = 0, + GB_INTERFACE_TYPE_UNKNOWN, + GB_INTERFACE_TYPE_DUMMY, + GB_INTERFACE_TYPE_UNIPRO, + GB_INTERFACE_TYPE_GREYBUS, +}; + +#define GB_INTERFACE_QUIRK_NO_CPORT_FEATURES BIT(0) +#define GB_INTERFACE_QUIRK_NO_INIT_STATUS BIT(1) +#define GB_INTERFACE_QUIRK_NO_GMP_IDS BIT(2) +#define GB_INTERFACE_QUIRK_FORCED_DISABLE BIT(3) +#define GB_INTERFACE_QUIRK_LEGACY_MODE_SWITCH BIT(4) +#define GB_INTERFACE_QUIRK_NO_BUNDLE_ACTIVATE BIT(5) +#define GB_INTERFACE_QUIRK_NO_PM BIT(6) + +struct gb_interface { + struct device dev; + struct gb_control *control; + + struct list_head bundles; + struct list_head module_node; + struct list_head manifest_descs; + u8 interface_id; /* Physical location within the Endo */ + u8 device_id; + u8 features; /* Feature flags set in the manifest */ + + enum gb_interface_type type; + + u32 ddbl1_manufacturer_id; + u32 ddbl1_product_id; + u32 vendor_id; + u32 product_id; + u64 serial_number; + + struct gb_host_device *hd; + struct gb_module *module; + + unsigned long quirks; + + struct mutex mutex; + + bool disconnected; + + bool ejected; + bool removed; + bool active; + bool enabled; + bool mode_switch; + bool dme_read; + + struct work_struct mode_switch_work; + struct completion mode_switch_completion; +}; +#define to_gb_interface(d) container_of(d, struct gb_interface, dev) + +struct gb_interface *gb_interface_create(struct gb_module *module, + u8 interface_id); +int gb_interface_activate(struct gb_interface *intf); +void gb_interface_deactivate(struct gb_interface *intf); +int gb_interface_enable(struct gb_interface *intf); +void gb_interface_disable(struct gb_interface *intf); +int gb_interface_add(struct gb_interface *intf); +void gb_interface_del(struct gb_interface *intf); +void gb_interface_put(struct gb_interface *intf); +void gb_interface_mailbox_event(struct gb_interface *intf, u16 result, + u32 mailbox); + +int gb_interface_request_mode_switch(struct gb_interface *intf); + +#endif /* __INTERFACE_H */ diff --git a/include/linux/greybus/manifest.h b/include/linux/greybus/manifest.h new file mode 100644 index 000000000000..830301b7a8bc --- /dev/null +++ b/include/linux/greybus/manifest.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Greybus manifest parsing + * + * Copyright 2014 Google Inc. + * Copyright 2014 Linaro Ltd. + */ + +#ifndef __MANIFEST_H +#define __MANIFEST_H + +#include <linux/types.h> + +struct gb_interface; +bool gb_manifest_parse(struct gb_interface *intf, void *data, size_t size); + +#endif /* __MANIFEST_H */ diff --git a/include/linux/greybus/module.h b/include/linux/greybus/module.h new file mode 100644 index 000000000000..47b839af145d --- /dev/null +++ b/include/linux/greybus/module.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Greybus Module code + * + * Copyright 2016 Google Inc. + * Copyright 2016 Linaro Ltd. + */ + +#ifndef __MODULE_H +#define __MODULE_H + +#include <linux/types.h> +#include <linux/device.h> + +struct gb_module { + struct device dev; + struct gb_host_device *hd; + + struct list_head hd_node; + + u8 module_id; + size_t num_interfaces; + + bool disconnected; + + struct gb_interface *interfaces[0]; +}; +#define to_gb_module(d) container_of(d, struct gb_module, dev) + +struct gb_module *gb_module_create(struct gb_host_device *hd, u8 module_id, + size_t num_interfaces); +int gb_module_add(struct gb_module *module); +void gb_module_del(struct gb_module *module); +void gb_module_put(struct gb_module *module); + +#endif /* __MODULE_H */ diff --git a/include/linux/greybus/operation.h b/include/linux/greybus/operation.h new file mode 100644 index 000000000000..cb8e4ef45222 --- /dev/null +++ b/include/linux/greybus/operation.h @@ -0,0 +1,229 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Greybus operations + * + * Copyright 2014 Google Inc. + * Copyright 2014 Linaro Ltd. + */ + +#ifndef __OPERATION_H +#define __OPERATION_H + +#include <linux/completion.h> +#include <linux/kref.h> +#include <linux/timer.h> +#include <linux/types.h> +#include <linux/workqueue.h> + +struct gb_host_device; +struct gb_operation; + +/* The default amount of time a request is given to complete */ +#define GB_OPERATION_TIMEOUT_DEFAULT 1000 /* milliseconds */ + +/* + * The top bit of the type in an operation message header indicates + * whether the message is a request (bit clear) or response (bit set) + */ +#define GB_MESSAGE_TYPE_RESPONSE ((u8)0x80) + +enum gb_operation_result { + GB_OP_SUCCESS = 0x00, + GB_OP_INTERRUPTED = 0x01, + GB_OP_TIMEOUT = 0x02, + GB_OP_NO_MEMORY = 0x03, + GB_OP_PROTOCOL_BAD = 0x04, + GB_OP_OVERFLOW = 0x05, + GB_OP_INVALID = 0x06, + GB_OP_RETRY = 0x07, + GB_OP_NONEXISTENT = 0x08, + GB_OP_UNKNOWN_ERROR = 0xfe, + GB_OP_MALFUNCTION = 0xff, +}; + +#define GB_OPERATION_MESSAGE_SIZE_MIN sizeof(struct gb_operation_msg_hdr) +#define GB_OPERATION_MESSAGE_SIZE_MAX U16_MAX + +/* + * Protocol code should only examine the payload and payload_size fields, and + * host-controller drivers may use the hcpriv field. All other fields are + * intended to be private to the operations core code. + */ +struct gb_message { + struct gb_operation *operation; + struct gb_operation_msg_hdr *header; + + void *payload; + size_t payload_size; + + void *buffer; + + void *hcpriv; +}; + +#define GB_OPERATION_FLAG_INCOMING BIT(0) +#define GB_OPERATION_FLAG_UNIDIRECTIONAL BIT(1) +#define GB_OPERATION_FLAG_SHORT_RESPONSE BIT(2) +#define GB_OPERATION_FLAG_CORE BIT(3) + +#define GB_OPERATION_FLAG_USER_MASK (GB_OPERATION_FLAG_SHORT_RESPONSE | \ + GB_OPERATION_FLAG_UNIDIRECTIONAL) + +/* + * A Greybus operation is a remote procedure call performed over a + * connection between two UniPro interfaces. + * + * Every operation consists of a request message sent to the other + * end of the connection coupled with a reply message returned to + * the sender. Every operation has a type, whose interpretation is + * dependent on the protocol associated with the connection. + * + * Only four things in an operation structure are intended to be + * directly usable by protocol handlers: the operation's connection + * pointer; the operation type; the request message payload (and + * size); and the response message payload (and size). Note that a + * message with a 0-byte payload has a null message payload pointer. + * + * In addition, every operation has a result, which is an errno + * value. Protocol handlers access the operation result using + * gb_operation_result(). + */ +typedef void (*gb_operation_callback)(struct gb_operation *); +struct gb_operation { + struct gb_connection *connection; + struct gb_message *request; + struct gb_message *response; + + unsigned long flags; + u8 type; + u16 id; + int errno; /* Operation result */ + + struct work_struct work; + gb_operation_callback callback; + struct completion completion; + struct timer_list timer; + + struct kref kref; + atomic_t waiters; + + int active; + struct list_head links; /* connection->operations */ + + void *private; +}; + +static inline bool +gb_operation_is_incoming(struct gb_operation *operation) +{ + return operation->flags & GB_OPERATION_FLAG_INCOMING; +} + +static inline bool +gb_operation_is_unidirectional(struct gb_operation *operation) +{ + return operation->flags & GB_OPERATION_FLAG_UNIDIRECTIONAL; +} + +static inline bool +gb_operation_short_response_allowed(struct gb_operation *operation) +{ + return operation->flags & GB_OPERATION_FLAG_SHORT_RESPONSE; +} + +static inline bool gb_operation_is_core(struct gb_operation *operation) +{ + return operation->flags & GB_OPERATION_FLAG_CORE; +} + +void gb_connection_recv(struct gb_connection *connection, + void *data, size_t size); + +int gb_operation_result(struct gb_operation *operation); + +size_t gb_operation_get_payload_size_max(struct gb_connection *connection); +struct gb_operation * +gb_operation_create_flags(struct gb_connection *connection, + u8 type, size_t request_size, + size_t response_size, unsigned long flags, + gfp_t gfp); + +static inline struct gb_operation * +gb_operation_create(struct gb_connection *connection, + u8 type, size_t request_size, + size_t response_size, gfp_t gfp) +{ + return gb_operation_create_flags(connection, type, request_size, + response_size, 0, gfp); +} + +struct gb_operation * +gb_operation_create_core(struct gb_connection *connection, + u8 type, size_t request_size, + size_t response_size, unsigned long flags, + gfp_t gfp); + +void gb_operation_get(struct gb_operation *operation); +void gb_operation_put(struct gb_operation *operation); + +bool gb_operation_response_alloc(struct gb_operation *operation, + size_t response_size, gfp_t gfp); + +int gb_operation_request_send(struct gb_operation *operation, + gb_operation_callback callback, + unsigned int timeout, + gfp_t gfp); +int gb_operation_request_send_sync_timeout(struct gb_operation *operation, + unsigned int timeout); +static inline int +gb_operation_request_send_sync(struct gb_operation *operation) +{ + return gb_operation_request_send_sync_timeout(operation, + GB_OPERATION_TIMEOUT_DEFAULT); +} + +void gb_operation_cancel(struct gb_operation *operation, int errno); +void gb_operation_cancel_incoming(struct gb_operation *operation, int errno); + +void greybus_message_sent(struct gb_host_device *hd, + struct gb_message *message, int status); + +int gb_operation_sync_timeout(struct gb_connection *connection, int type, + void *request, int request_size, + void *response, int response_size, + unsigned int timeout); +int gb_operation_unidirectional_timeout(struct gb_connection *connection, + int type, void *request, int request_size, + unsigned int timeout); + +static inline int gb_operation_sync(struct gb_connection *connection, int type, + void *request, int request_size, + void *response, int response_size) +{ + return gb_operation_sync_timeout(connection, type, + request, request_size, response, response_size, + GB_OPERATION_TIMEOUT_DEFAULT); +} + +static inline int gb_operation_unidirectional(struct gb_connection *connection, + int type, void *request, int request_size) +{ + return gb_operation_unidirectional_timeout(connection, type, + request, request_size, GB_OPERATION_TIMEOUT_DEFAULT); +} + +static inline void *gb_operation_get_data(struct gb_operation *operation) +{ + return operation->private; +} + +static inline void gb_operation_set_data(struct gb_operation *operation, + void *data) +{ + operation->private = data; +} + +int gb_operation_init(void); +void gb_operation_exit(void); + +#endif /* !__OPERATION_H */ diff --git a/include/linux/greybus/svc.h b/include/linux/greybus/svc.h new file mode 100644 index 000000000000..5afaf5f06856 --- /dev/null +++ b/include/linux/greybus/svc.h @@ -0,0 +1,106 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Greybus SVC code + * + * Copyright 2015 Google Inc. + * Copyright 2015 Linaro Ltd. + */ + +#ifndef __SVC_H +#define __SVC_H + +#include <linux/types.h> +#include <linux/device.h> + +struct gb_svc_l2_timer_cfg; + +#define GB_SVC_CPORT_FLAG_E2EFC BIT(0) +#define GB_SVC_CPORT_FLAG_CSD_N BIT(1) +#define GB_SVC_CPORT_FLAG_CSV_N BIT(2) + +enum gb_svc_state { + GB_SVC_STATE_RESET, + GB_SVC_STATE_PROTOCOL_VERSION, + GB_SVC_STATE_SVC_HELLO, +}; + +enum gb_svc_watchdog_bite { + GB_SVC_WATCHDOG_BITE_RESET_UNIPRO = 0, + GB_SVC_WATCHDOG_BITE_PANIC_KERNEL, +}; + +struct gb_svc_watchdog; + +struct svc_debugfs_pwrmon_rail { + u8 id; + struct gb_svc *svc; +}; + +struct gb_svc { + struct device dev; + + struct gb_host_device *hd; + struct gb_connection *connection; + enum gb_svc_state state; + struct ida device_id_map; + struct workqueue_struct *wq; + + u16 endo_id; + u8 ap_intf_id; + + u8 protocol_major; + u8 protocol_minor; + + struct gb_svc_watchdog *watchdog; + enum gb_svc_watchdog_bite action; + + struct dentry *debugfs_dentry; + struct svc_debugfs_pwrmon_rail *pwrmon_rails; +}; +#define to_gb_svc(d) container_of(d, struct gb_svc, dev) + +struct gb_svc *gb_svc_create(struct gb_host_device *hd); +int gb_svc_add(struct gb_svc *svc); +void gb_svc_del(struct gb_svc *svc); +void gb_svc_put(struct gb_svc *svc); + +int gb_svc_pwrmon_intf_sample_get(struct gb_svc *svc, u8 intf_id, + u8 measurement_type, u32 *value); +int gb_svc_intf_device_id(struct gb_svc *svc, u8 intf_id, u8 device_id); +int gb_svc_route_create(struct gb_svc *svc, u8 intf1_id, u8 dev1_id, + u8 intf2_id, u8 dev2_id); +void gb_svc_route_destroy(struct gb_svc *svc, u8 intf1_id, u8 intf2_id); +int gb_svc_connection_create(struct gb_svc *svc, u8 intf1_id, u16 cport1_id, + u8 intf2_id, u16 cport2_id, u8 cport_flags); +void gb_svc_connection_destroy(struct gb_svc *svc, u8 intf1_id, u16 cport1_id, + u8 intf2_id, u16 cport2_id); +int gb_svc_intf_eject(struct gb_svc *svc, u8 intf_id); +int gb_svc_intf_vsys_set(struct gb_svc *svc, u8 intf_id, bool enable); +int gb_svc_intf_refclk_set(struct gb_svc *svc, u8 intf_id, bool enable); +int gb_svc_intf_unipro_set(struct gb_svc *svc, u8 intf_id, bool enable); +int gb_svc_intf_activate(struct gb_svc *svc, u8 intf_id, u8 *intf_type); +int gb_svc_intf_resume(struct gb_svc *svc, u8 intf_id); + +int gb_svc_dme_peer_get(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector, + u32 *value); +int gb_svc_dme_peer_set(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector, + u32 value); +int gb_svc_intf_set_power_mode(struct gb_svc *svc, u8 intf_id, u8 hs_series, + u8 tx_mode, u8 tx_gear, u8 tx_nlanes, + u8 tx_amplitude, u8 tx_hs_equalizer, + u8 rx_mode, u8 rx_gear, u8 rx_nlanes, + u8 flags, u32 quirks, + struct gb_svc_l2_timer_cfg *local, + struct gb_svc_l2_timer_cfg *remote); +int gb_svc_intf_set_power_mode_hibernate(struct gb_svc *svc, u8 intf_id); +int gb_svc_ping(struct gb_svc *svc); +int gb_svc_watchdog_create(struct gb_svc *svc); +void gb_svc_watchdog_destroy(struct gb_svc *svc); +bool gb_svc_watchdog_enabled(struct gb_svc *svc); +int gb_svc_watchdog_enable(struct gb_svc *svc); +int gb_svc_watchdog_disable(struct gb_svc *svc); + +int gb_svc_protocol_init(void); +void gb_svc_protocol_exit(void); + +#endif /* __SVC_H */ diff --git a/include/linux/hid.h b/include/linux/hid.h index d770ab1a0479..cd41f209043f 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h @@ -1154,29 +1154,32 @@ int hid_pidff_init(struct hid_device *hid); #define hid_pidff_init NULL #endif -#define dbg_hid(format, arg...) \ +#define dbg_hid(fmt, ...) \ do { \ if (hid_debug) \ - printk(KERN_DEBUG "%s: " format, __FILE__, ##arg); \ + printk(KERN_DEBUG "%s: " fmt, __FILE__, ##__VA_ARGS__); \ } while (0) -#define hid_printk(level, hid, fmt, arg...) \ - dev_printk(level, &(hid)->dev, fmt, ##arg) -#define hid_emerg(hid, fmt, arg...) \ - dev_emerg(&(hid)->dev, fmt, ##arg) -#define hid_crit(hid, fmt, arg...) \ - dev_crit(&(hid)->dev, fmt, ##arg) -#define hid_alert(hid, fmt, arg...) \ - dev_alert(&(hid)->dev, fmt, ##arg) -#define hid_err(hid, fmt, arg...) \ - dev_err(&(hid)->dev, fmt, ##arg) -#define hid_notice(hid, fmt, arg...) \ - dev_notice(&(hid)->dev, fmt, ##arg) -#define hid_warn(hid, fmt, arg...) \ - dev_warn(&(hid)->dev, fmt, ##arg) -#define hid_info(hid, fmt, arg...) \ - dev_info(&(hid)->dev, fmt, ##arg) -#define hid_dbg(hid, fmt, arg...) \ - dev_dbg(&(hid)->dev, fmt, ##arg) +#define hid_err(hid, fmt, ...) \ + dev_err(&(hid)->dev, fmt, ##__VA_ARGS__) +#define hid_notice(hid, fmt, ...) \ + dev_notice(&(hid)->dev, fmt, ##__VA_ARGS__) +#define hid_warn(hid, fmt, ...) \ + dev_warn(&(hid)->dev, fmt, ##__VA_ARGS__) +#define hid_info(hid, fmt, ...) \ + dev_info(&(hid)->dev, fmt, ##__VA_ARGS__) +#define hid_dbg(hid, fmt, ...) \ + dev_dbg(&(hid)->dev, fmt, ##__VA_ARGS__) + +#define hid_err_once(hid, fmt, ...) \ + dev_err_once(&(hid)->dev, fmt, ##__VA_ARGS__) +#define hid_notice_once(hid, fmt, ...) \ + dev_notice_once(&(hid)->dev, fmt, ##__VA_ARGS__) +#define hid_warn_once(hid, fmt, ...) \ + dev_warn_once(&(hid)->dev, fmt, ##__VA_ARGS__) +#define hid_info_once(hid, fmt, ...) \ + dev_info_once(&(hid)->dev, fmt, ##__VA_ARGS__) +#define hid_dbg_once(hid, fmt, ...) \ + dev_dbg_once(&(hid)->dev, fmt, ##__VA_ARGS__) #endif diff --git a/include/linux/hmm.h b/include/linux/hmm.h index 7ef56dc18050..3fec513b9c00 100644 --- a/include/linux/hmm.h +++ b/include/linux/hmm.h @@ -84,15 +84,12 @@ * @notifiers: count of active mmu notifiers */ struct hmm { - struct mm_struct *mm; - struct kref kref; + struct mmu_notifier mmu_notifier; spinlock_t ranges_lock; struct list_head ranges; struct list_head mirrors; - struct mmu_notifier mmu_notifier; struct rw_semaphore mirrors_sem; wait_queue_head_t wq; - struct rcu_head rcu; long notifiers; }; @@ -158,13 +155,11 @@ enum hmm_pfn_value_e { * @values: pfn value for some special case (none, special, error, ...) * @default_flags: default flags for the range (write, read, ... see hmm doc) * @pfn_flags_mask: allows to mask pfn flags so that only default_flags matter - * @page_shift: device virtual address shift value (should be >= PAGE_SHIFT) * @pfn_shifts: pfn shift value (should be <= PAGE_SHIFT) * @valid: pfns array did not change since it has been fill by an HMM function */ struct hmm_range { struct hmm *hmm; - struct vm_area_struct *vma; struct list_head list; unsigned long start; unsigned long end; @@ -173,32 +168,11 @@ struct hmm_range { const uint64_t *values; uint64_t default_flags; uint64_t pfn_flags_mask; - uint8_t page_shift; uint8_t pfn_shift; bool valid; }; /* - * hmm_range_page_shift() - return the page shift for the range - * @range: range being queried - * Return: page shift (page size = 1 << page shift) for the range - */ -static inline unsigned hmm_range_page_shift(const struct hmm_range *range) -{ - return range->page_shift; -} - -/* - * hmm_range_page_size() - return the page size for the range - * @range: range being queried - * Return: page size for the range in bytes - */ -static inline unsigned long hmm_range_page_size(const struct hmm_range *range) -{ - return 1UL << hmm_range_page_shift(range); -} - -/* * hmm_range_wait_until_valid() - wait for range to be valid * @range: range affected by invalidation to wait on * @timeout: time out for wait in ms (ie abort wait after that period of time) @@ -291,40 +265,6 @@ static inline uint64_t hmm_device_entry_from_pfn(const struct hmm_range *range, } /* - * Old API: - * hmm_pfn_to_page() - * hmm_pfn_to_pfn() - * hmm_pfn_from_page() - * hmm_pfn_from_pfn() - * - * This are the OLD API please use new API, it is here to avoid cross-tree - * merge painfullness ie we convert things to new API in stages. - */ -static inline struct page *hmm_pfn_to_page(const struct hmm_range *range, - uint64_t pfn) -{ - return hmm_device_entry_to_page(range, pfn); -} - -static inline unsigned long hmm_pfn_to_pfn(const struct hmm_range *range, - uint64_t pfn) -{ - return hmm_device_entry_to_pfn(range, pfn); -} - -static inline uint64_t hmm_pfn_from_page(const struct hmm_range *range, - struct page *page) -{ - return hmm_device_entry_from_page(range, page); -} - -static inline uint64_t hmm_pfn_from_pfn(const struct hmm_range *range, - unsigned long pfn) -{ - return hmm_device_entry_from_pfn(range, pfn); -} - -/* * Mirroring: how to synchronize device page table with CPU page table. * * A device driver that is participating in HMM mirroring must always @@ -375,29 +315,6 @@ static inline uint64_t hmm_pfn_from_pfn(const struct hmm_range *range, struct hmm_mirror; /* - * enum hmm_update_event - type of update - * @HMM_UPDATE_INVALIDATE: invalidate range (no indication as to why) - */ -enum hmm_update_event { - HMM_UPDATE_INVALIDATE, -}; - -/* - * struct hmm_update - HMM update information for callback - * - * @start: virtual start address of the range to update - * @end: virtual end address of the range to update - * @event: event triggering the update (what is happening) - * @blockable: can the callback block/sleep ? - */ -struct hmm_update { - unsigned long start; - unsigned long end; - enum hmm_update_event event; - bool blockable; -}; - -/* * struct hmm_mirror_ops - HMM mirror device operations callback * * @update: callback to update range on a device @@ -417,9 +334,9 @@ struct hmm_mirror_ops { /* sync_cpu_device_pagetables() - synchronize page tables * * @mirror: pointer to struct hmm_mirror - * @update: update information (see struct hmm_update) - * Return: -EAGAIN if update.blockable false and callback need to - * block, 0 otherwise. + * @update: update information (see struct mmu_notifier_range) + * Return: -EAGAIN if mmu_notifier_range_blockable(update) is false + * and callback needs to block, 0 otherwise. * * This callback ultimately originates from mmu_notifiers when the CPU * page table is updated. The device driver must update its page table @@ -430,8 +347,9 @@ struct hmm_mirror_ops { * page tables are completely updated (TLBs flushed, etc); this is a * synchronous call. */ - int (*sync_cpu_device_pagetables)(struct hmm_mirror *mirror, - const struct hmm_update *update); + int (*sync_cpu_device_pagetables)( + struct hmm_mirror *mirror, + const struct mmu_notifier_range *update); }; /* @@ -457,20 +375,24 @@ void hmm_mirror_unregister(struct hmm_mirror *mirror); /* * Please see Documentation/vm/hmm.rst for how to use the range API. */ -int hmm_range_register(struct hmm_range *range, - struct hmm_mirror *mirror, - unsigned long start, - unsigned long end, - unsigned page_shift); +int hmm_range_register(struct hmm_range *range, struct hmm_mirror *mirror); void hmm_range_unregister(struct hmm_range *range); -long hmm_range_snapshot(struct hmm_range *range); -long hmm_range_fault(struct hmm_range *range, bool block); + +/* + * Retry fault if non-blocking, drop mmap_sem and return -EAGAIN in that case. + */ +#define HMM_FAULT_ALLOW_RETRY (1 << 0) + +/* Don't fault in missing PTEs, just snapshot the current state. */ +#define HMM_FAULT_SNAPSHOT (1 << 1) + +long hmm_range_fault(struct hmm_range *range, unsigned int flags); + long hmm_range_dma_map(struct hmm_range *range, struct device *device, dma_addr_t *daddrs, - bool block); + unsigned int flags); long hmm_range_dma_unmap(struct hmm_range *range, - struct vm_area_struct *vma, struct device *device, dma_addr_t *daddrs, bool dirty); @@ -484,13 +406,6 @@ long hmm_range_dma_unmap(struct hmm_range *range, */ #define HMM_RANGE_DEFAULT_TIMEOUT 1000 -/* Below are for HMM internal use only! Not to be used by device driver! */ -static inline void hmm_mm_init(struct mm_struct *mm) -{ - mm->hmm = NULL; -} -#else /* IS_ENABLED(CONFIG_HMM_MIRROR) */ -static inline void hmm_mm_init(struct mm_struct *mm) {} #endif /* IS_ENABLED(CONFIG_HMM_MIRROR) */ #endif /* LINUX_HMM_H */ diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 45ede62aa85b..61c9ffd89b05 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -267,6 +267,15 @@ static inline bool thp_migration_supported(void) return IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION); } +static inline struct list_head *page_deferred_list(struct page *page) +{ + /* + * Global or memcg deferred list in the second tail pages is + * occupied by compound_head. + */ + return &page[2].deferred_list; +} + #else /* CONFIG_TRANSPARENT_HUGEPAGE */ #define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; }) #define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; }) diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index edfca4278319..53fc34f930d0 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -454,7 +454,7 @@ static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma, static inline struct hstate *page_hstate(struct page *page) { VM_BUG_ON_PAGE(!PageHuge(page), page); - return size_to_hstate(PAGE_SIZE << compound_order(page)); + return size_to_hstate(page_size(page)); } static inline unsigned hstate_index_to_shift(unsigned index) diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index 6256cc34c4a6..b4a017093b69 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h @@ -245,7 +245,10 @@ struct vmbus_channel_offer { } pipe; } u; /* - * The sub_channel_index is defined in win8. + * The sub_channel_index is defined in Win8: a value of zero means a + * primary channel and a value of non-zero means a sub-channel. + * + * Before Win8, the field is reserved, meaning it's always zero. */ u16 sub_channel_index; u16 reserved3; @@ -423,6 +426,9 @@ enum vmbus_channel_message_type { CHANNELMSG_COUNT }; +/* Hyper-V supports about 2048 channels, and the RELIDs start with 1. */ +#define INVALID_RELID U32_MAX + struct vmbus_channel_message_header { enum vmbus_channel_message_type msgtype; u32 padding; @@ -934,6 +940,11 @@ static inline bool is_hvsock_channel(const struct vmbus_channel *c) VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER); } +static inline bool is_sub_channel(const struct vmbus_channel *c) +{ + return c->offermsg.offer.sub_channel_index != 0; +} + static inline void set_channel_affinity_state(struct vmbus_channel *c, enum hv_numa_policy policy) { @@ -1149,6 +1160,9 @@ struct hv_driver { int (*remove)(struct hv_device *); void (*shutdown)(struct hv_device *); + int (*suspend)(struct hv_device *); + int (*resume)(struct hv_device *); + }; /* Base device object */ @@ -1578,4 +1592,33 @@ hv_pkt_iter_next(struct vmbus_channel *channel, for (pkt = hv_pkt_iter_first(channel); pkt; \ pkt = hv_pkt_iter_next(channel, pkt)) +/* + * Interface for passing data between SR-IOV PF and VF drivers. The VF driver + * sends requests to read and write blocks. Each block must be 128 bytes or + * smaller. Optionally, the VF driver can register a callback function which + * will be invoked when the host says that one or more of the first 64 block + * IDs is "invalid" which means that the VF driver should reread them. + */ +#define HV_CONFIG_BLOCK_SIZE_MAX 128 + +int hyperv_read_cfg_blk(struct pci_dev *dev, void *buf, unsigned int buf_len, + unsigned int block_id, unsigned int *bytes_returned); +int hyperv_write_cfg_blk(struct pci_dev *dev, void *buf, unsigned int len, + unsigned int block_id); +int hyperv_reg_block_invalidate(struct pci_dev *dev, void *context, + void (*block_invalidate)(void *context, + u64 block_mask)); + +struct hyperv_pci_block_ops { + int (*read_block)(struct pci_dev *dev, void *buf, unsigned int buf_len, + unsigned int block_id, unsigned int *bytes_returned); + int (*write_block)(struct pci_dev *dev, void *buf, unsigned int len, + unsigned int block_id); + int (*reg_blk_invalidate)(struct pci_dev *dev, void *context, + void (*block_invalidate)(void *context, + u64 block_mask)); +}; + +extern struct hyperv_pci_block_ops hvpci_block_ops; + #endif /* _HYPERV_H */ diff --git a/include/linux/i2c.h b/include/linux/i2c.h index c0a78c069117..1361637c369d 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -473,7 +473,7 @@ extern struct i2c_client * devm_i2c_new_dummy_device(struct device *dev, struct i2c_adapter *adap, u16 address); extern struct i2c_client * -i2c_new_secondary_device(struct i2c_client *client, +i2c_new_ancillary_device(struct i2c_client *client, const char *name, u16 default_addr); diff --git a/include/linux/idle_inject.h b/include/linux/idle_inject.h index bdc0293fb6cb..a445cd1a36c5 100644 --- a/include/linux/idle_inject.h +++ b/include/linux/idle_inject.h @@ -20,10 +20,10 @@ int idle_inject_start(struct idle_inject_device *ii_dev); void idle_inject_stop(struct idle_inject_device *ii_dev); void idle_inject_set_duration(struct idle_inject_device *ii_dev, - unsigned int run_duration_ms, - unsigned int idle_duration_ms); + unsigned int run_duration_us, + unsigned int idle_duration_us); void idle_inject_get_duration(struct idle_inject_device *ii_dev, - unsigned int *run_duration_ms, - unsigned int *idle_duration_ms); + unsigned int *run_duration_us, + unsigned int *idle_duration_us); #endif /* __IDLE_INJECT_H__ */ diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index 8511fadc0935..7d3f2ced92d1 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -881,6 +881,14 @@ struct ieee80211_tpc_report_ie { u8 link_margin; } __packed; +#define IEEE80211_ADDBA_EXT_FRAG_LEVEL_MASK GENMASK(2, 1) +#define IEEE80211_ADDBA_EXT_FRAG_LEVEL_SHIFT 1 +#define IEEE80211_ADDBA_EXT_NO_FRAG BIT(0) + +struct ieee80211_addba_ext_ie { + u8 data; +} __packed; + struct ieee80211_mgmt { __le16 frame_control; __le16 duration; @@ -973,6 +981,8 @@ struct ieee80211_mgmt { __le16 capab; __le16 timeout; __le16 start_seq_num; + /* followed by BA Extension */ + u8 variable[0]; } __packed addba_req; struct{ u8 action_code; @@ -1626,6 +1636,18 @@ struct ieee80211_he_operation { } __packed; /** + * struct ieee80211_he_spr - HE spatial reuse element + * + * This structure is the "HE spatial reuse element" element as + * described in P802.11ax_D4.0 section 9.4.2.241 + */ +struct ieee80211_he_spr { + u8 he_sr_control; + /* Optional 0 to 19 bytes: depends on @he_sr_control */ + u8 optional[0]; +} __packed; + +/** * struct ieee80211_he_mu_edca_param_ac_rec - MU AC Parameter Record field * * This structure is the "MU AC Parameter Record" fields as @@ -2033,8 +2055,8 @@ ieee80211_he_ppe_size(u8 ppe_thres_hdr, const u8 *phy_cap_info) * ieee80211_he_oper_size - calculate 802.11ax HE Operations IE size * @he_oper_ie: byte data of the He Operations IE, stating from the the byte * after the ext ID byte. It is assumed that he_oper_ie has at least - * sizeof(struct ieee80211_he_operation) bytes, checked already in - * ieee802_11_parse_elems_crc() + * sizeof(struct ieee80211_he_operation) bytes, the caller must have + * validated this. * @return the actual size of the IE data (not including header), or 0 on error */ static inline u8 @@ -2063,6 +2085,42 @@ ieee80211_he_oper_size(const u8 *he_oper_ie) return oper_len; } +/* HE Spatial Reuse defines */ +#define IEEE80211_HE_SPR_NON_SRG_OFFSET_PRESENT 0x4 +#define IEEE80211_HE_SPR_SRG_INFORMATION_PRESENT 0x8 + +/* + * ieee80211_he_spr_size - calculate 802.11ax HE Spatial Reuse IE size + * @he_spr_ie: byte data of the He Spatial Reuse IE, stating from the the byte + * after the ext ID byte. It is assumed that he_spr_ie has at least + * sizeof(struct ieee80211_he_spr) bytes, the caller must have validated + * this + * @return the actual size of the IE data (not including header), or 0 on error + */ +static inline u8 +ieee80211_he_spr_size(const u8 *he_spr_ie) +{ + struct ieee80211_he_spr *he_spr = (void *)he_spr_ie; + u8 spr_len = sizeof(struct ieee80211_he_spr); + u32 he_spr_params; + + /* Make sure the input is not NULL */ + if (!he_spr_ie) + return 0; + + /* Calc required length */ + he_spr_params = le32_to_cpu(he_spr->he_sr_control); + if (he_spr_params & IEEE80211_HE_SPR_NON_SRG_OFFSET_PRESENT) + spr_len++; + if (he_spr_params & IEEE80211_HE_SPR_SRG_INFORMATION_PRESENT) + spr_len += 18; + + /* Add the first byte (extension ID) to the total length */ + spr_len++; + + return spr_len; +} + /* Authentication algorithms */ #define WLAN_AUTH_OPEN 0 #define WLAN_AUTH_SHARED_KEY 1 @@ -2485,6 +2543,7 @@ enum ieee80211_eid_ext { WLAN_EID_EXT_HE_OPERATION = 36, WLAN_EID_EXT_UORA = 37, WLAN_EID_EXT_HE_MU_EDCA = 38, + WLAN_EID_EXT_HE_SPR = 39, WLAN_EID_EXT_MAX_CHANNEL_SWITCH_TIME = 52, WLAN_EID_EXT_MULTIPLE_BSSID_CONFIGURATION = 55, WLAN_EID_EXT_NON_INHERITANCE = 56, diff --git a/include/linux/iio/common/cros_ec_sensors_core.h b/include/linux/iio/common/cros_ec_sensors_core.h index 0c636b9fe8d7..bb331e6356a9 100644 --- a/include/linux/iio/common/cros_ec_sensors_core.h +++ b/include/linux/iio/common/cros_ec_sensors_core.h @@ -10,7 +10,8 @@ #include <linux/iio/iio.h> #include <linux/irqreturn.h> -#include <linux/mfd/cros_ec.h> +#include <linux/platform_data/cros_ec_commands.h> +#include <linux/platform_data/cros_ec_proto.h> enum { CROS_EC_SENSOR_X, @@ -62,14 +63,20 @@ struct cros_ec_sensors_core_state { enum motionsensor_type type; enum motionsensor_location loc; - s16 calib[CROS_EC_SENSOR_MAX_AXIS]; - + struct calib_data { + s16 offset; + u16 scale; + } calib[CROS_EC_SENSOR_MAX_AXIS]; + s8 sign[CROS_EC_SENSOR_MAX_AXIS]; u8 samples[CROS_EC_SAMPLE_SIZE]; int (*read_ec_sensors_data)(struct iio_dev *indio_dev, unsigned long scan_mask, s16 *data); int curr_sampl_freq; + + /* Table of known available frequencies : 0, Min and Max in mHz */ + int frequencies[3]; }; /** @@ -151,6 +158,24 @@ int cros_ec_sensors_core_read(struct cros_ec_sensors_core_state *st, int *val, int *val2, long mask); /** + * cros_ec_sensors_core_read_avail() - get available values + * @indio_dev: pointer to state information for device + * @chan: channel specification structure table + * @vals: list of available values + * @type: type of data returned + * @length: number of data returned in the array + * @mask: specifies which values to be requested + * + * Return: an error code, IIO_AVAIL_RANGE or IIO_AVAIL_LIST + */ +int cros_ec_sensors_core_read_avail(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + const int **vals, + int *type, + int *length, + long mask); + +/** * cros_ec_sensors_core_write() - function to write a value to the sensor * @st: pointer to state information for device * @chan: channel specification structure table diff --git a/include/linux/iio/common/st_sensors.h b/include/linux/iio/common/st_sensors.h index 2948ac99e2da..686be532f4cb 100644 --- a/include/linux/iio/common/st_sensors.h +++ b/include/linux/iio/common/st_sensors.h @@ -16,11 +16,16 @@ #include <linux/iio/trigger.h> #include <linux/bitops.h> #include <linux/regulator/consumer.h> +#include <linux/regmap.h> #include <linux/platform_data/st_sensors_pdata.h> -#define ST_SENSORS_TX_MAX_LENGTH 2 -#define ST_SENSORS_RX_MAX_LENGTH 6 +/* + * Buffer size max case: 2bytes per channel, 3 channels in total + + * 8bytes timestamp channel (s64) + */ +#define ST_SENSORS_MAX_BUFFER_SIZE (ALIGN(2 * 3, sizeof(s64)) + \ + sizeof(s64)) #define ST_SENSORS_ODR_LIST_MAX 10 #define ST_SENSORS_FULLSCALE_AVL_MAX 10 @@ -170,36 +175,6 @@ struct st_sensor_data_ready_irq { }; /** - * struct st_sensor_transfer_buffer - ST sensor device I/O buffer - * @buf_lock: Mutex to protect rx and tx buffers. - * @tx_buf: Buffer used by SPI transfer function to send data to the sensors. - * This buffer is used to avoid DMA not-aligned issue. - * @rx_buf: Buffer used by SPI transfer to receive data from sensors. - * This buffer is used to avoid DMA not-aligned issue. - */ -struct st_sensor_transfer_buffer { - struct mutex buf_lock; - u8 rx_buf[ST_SENSORS_RX_MAX_LENGTH]; - u8 tx_buf[ST_SENSORS_TX_MAX_LENGTH] ____cacheline_aligned; -}; - -/** - * struct st_sensor_transfer_function - ST sensor device I/O function - * @read_byte: Function used to read one byte. - * @write_byte: Function used to write one byte. - * @read_multiple_byte: Function used to read multiple byte. - */ -struct st_sensor_transfer_function { - int (*read_byte) (struct st_sensor_transfer_buffer *tb, - struct device *dev, u8 reg_addr, u8 *res_byte); - int (*write_byte) (struct st_sensor_transfer_buffer *tb, - struct device *dev, u8 reg_addr, u8 data); - int (*read_multiple_byte) (struct st_sensor_transfer_buffer *tb, - struct device *dev, u8 reg_addr, int len, u8 *data, - bool multiread_bit); -}; - -/** * struct st_sensor_settings - ST specific sensor settings * @wai: Contents of WhoAmI register. * @wai_addr: The address of WhoAmI register. @@ -242,19 +217,17 @@ struct st_sensor_settings { * @current_fullscale: Maximum range of measure by the sensor. * @vdd: Pointer to sensor's Vdd power supply * @vdd_io: Pointer to sensor's Vdd-IO power supply + * @regmap: Pointer to specific sensor regmap configuration. * @enabled: Status of the sensor (false->off, true->on). - * @multiread_bit: Use or not particular bit for [I2C/SPI] multiread. - * @buffer_data: Data used by buffer part. * @odr: Output data rate of the sensor [Hz]. * num_data_channels: Number of data channels used in buffer. * @drdy_int_pin: Redirect DRDY on pin 1 (1) or pin 2 (2). * @int_pin_open_drain: Set the interrupt/DRDY to open drain. - * @get_irq_data_ready: Function to get the IRQ used for data ready signal. - * @tf: Transfer function structure used by I/O operations. - * @tb: Transfer buffers and mutex used by I/O operations. + * @irq: the IRQ number. * @edge_irq: the IRQ triggers on edges and need special handling. * @hw_irq_trigger: if we're using the hardware interrupt on the sensor. * @hw_timestamp: Latest timestamp from the interrupt handler, when in use. + * @buffer_data: Data used by buffer part. */ struct st_sensor_data { struct device *dev; @@ -264,26 +237,22 @@ struct st_sensor_data { struct st_sensor_fullscale_avl *current_fullscale; struct regulator *vdd; struct regulator *vdd_io; + struct regmap *regmap; bool enabled; - bool multiread_bit; - - char *buffer_data; unsigned int odr; unsigned int num_data_channels; u8 drdy_int_pin; bool int_pin_open_drain; - - unsigned int (*get_irq_data_ready) (struct iio_dev *indio_dev); - - const struct st_sensor_transfer_function *tf; - struct st_sensor_transfer_buffer tb; + int irq; bool edge_irq; bool hw_irq_trigger; s64 hw_timestamp; + + char buffer_data[ST_SENSORS_MAX_BUFFER_SIZE] ____cacheline_aligned; }; #ifdef CONFIG_IIO_BUFFER @@ -334,8 +303,11 @@ int st_sensors_set_fullscale_by_gain(struct iio_dev *indio_dev, int scale); int st_sensors_read_info_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *ch, int *val); -int st_sensors_check_device_support(struct iio_dev *indio_dev, - int num_sensors_list, const struct st_sensor_settings *sensor_settings); +int st_sensors_get_settings_index(const char *name, + const struct st_sensor_settings *list, + const int list_length); + +int st_sensors_verify_id(struct iio_dev *indio_dev); ssize_t st_sensors_sysfs_sampling_frequency_avail(struct device *dev, struct device_attribute *attr, char *buf); diff --git a/include/linux/iio/common/st_sensors_i2c.h b/include/linux/iio/common/st_sensors_i2c.h index 5ada89944698..01e424e2af4f 100644 --- a/include/linux/iio/common/st_sensors_i2c.h +++ b/include/linux/iio/common/st_sensors_i2c.h @@ -14,8 +14,8 @@ #include <linux/iio/common/st_sensors.h> #include <linux/of.h> -void st_sensors_i2c_configure(struct iio_dev *indio_dev, - struct i2c_client *client, struct st_sensor_data *sdata); +int st_sensors_i2c_configure(struct iio_dev *indio_dev, + struct i2c_client *client); #ifdef CONFIG_ACPI int st_sensors_match_acpi_device(struct device *dev); diff --git a/include/linux/iio/common/st_sensors_spi.h b/include/linux/iio/common/st_sensors_spi.h index 6020f7167859..90b25f087f06 100644 --- a/include/linux/iio/common/st_sensors_spi.h +++ b/include/linux/iio/common/st_sensors_spi.h @@ -13,7 +13,7 @@ #include <linux/spi/spi.h> #include <linux/iio/common/st_sensors.h> -void st_sensors_spi_configure(struct iio_dev *indio_dev, - struct spi_device *spi, struct st_sensor_data *sdata); +int st_sensors_spi_configure(struct iio_dev *indio_dev, + struct spi_device *spi); #endif /* ST_SENSORS_SPI_H */ diff --git a/include/linux/iio/imu/adis.h b/include/linux/iio/imu/adis.h index 3428d06b2f44..4c53815bb729 100644 --- a/include/linux/iio/imu/adis.h +++ b/include/linux/iio/imu/adis.h @@ -26,6 +26,7 @@ struct adis_burst; * struct adis_data - ADIS chip variant specific data * @read_delay: SPI delay for read operations in us * @write_delay: SPI delay for write operations in us + * @cs_change_delay: SPI delay between CS changes in us * @glob_cmd_reg: Register address of the GLOB_CMD register * @msc_ctrl_reg: Register address of the MSC_CTRL register * @diag_stat_reg: Register address of the DIAG_STAT register @@ -35,6 +36,7 @@ struct adis_burst; struct adis_data { unsigned int read_delay; unsigned int write_delay; + unsigned int cs_change_delay; unsigned int glob_cmd_reg; unsigned int msc_ctrl_reg; diff --git a/include/linux/input.h b/include/linux/input.h index 510e78558c10..94f277cd806a 100644 --- a/include/linux/input.h +++ b/include/linux/input.h @@ -21,6 +21,8 @@ #include <linux/timer.h> #include <linux/mod_devicetable.h> +struct input_dev_poller; + /** * struct input_value - input value representation * @type: type of value (EV_KEY, EV_ABS, etc) @@ -33,6 +35,13 @@ struct input_value { __s32 value; }; +enum input_clock_type { + INPUT_CLK_REAL = 0, + INPUT_CLK_MONO, + INPUT_CLK_BOOT, + INPUT_CLK_MAX +}; + /** * struct input_dev - represents an input device * @name: name of the device @@ -64,6 +73,8 @@ struct input_value { * not sleep * @ff: force feedback structure associated with the device if device * supports force feedback effects + * @poller: poller structure associated with the device if device is + * set up to use polling mode * @repeat_key: stores key code of the last key pressed; used to implement * software autorepeat * @timer: timer for software autorepeat @@ -114,6 +125,8 @@ struct input_value { * @vals: array of values queued in the current frame * @devres_managed: indicates that devices is managed with devres framework * and needs not be explicitly unregistered or freed. + * @timestamp: storage for a timestamp set by input_set_timestamp called + * by a driver */ struct input_dev { const char *name; @@ -147,6 +160,8 @@ struct input_dev { struct ff_device *ff; + struct input_dev_poller *poller; + unsigned int repeat_key; struct timer_list timer; @@ -184,6 +199,8 @@ struct input_dev { struct input_value *vals; bool devres_managed; + + ktime_t timestamp[INPUT_CLK_MAX]; }; #define to_input_dev(d) container_of(d, struct input_dev, dev) @@ -361,6 +378,12 @@ void input_unregister_device(struct input_dev *); void input_reset_device(struct input_dev *); +int input_setup_polling(struct input_dev *dev, + void (*poll_fn)(struct input_dev *dev)); +void input_set_poll_interval(struct input_dev *dev, unsigned int interval); +void input_set_min_poll_interval(struct input_dev *dev, unsigned int interval); +void input_set_max_poll_interval(struct input_dev *dev, unsigned int interval); + int __must_check input_register_handler(struct input_handler *); void input_unregister_handler(struct input_handler *); @@ -382,6 +405,9 @@ void input_close_device(struct input_handle *); int input_flush_device(struct input_handle *handle, struct file *file); +void input_set_timestamp(struct input_dev *dev, ktime_t timestamp); +ktime_t *input_get_timestamp(struct input_dev *dev); + void input_event(struct input_dev *dev, unsigned int type, unsigned int code, int value); void input_inject_event(struct input_handle *handle, unsigned int type, unsigned int code, int value); diff --git a/include/linux/input/bu21013.h b/include/linux/input/bu21013.h deleted file mode 100644 index 7e5b7e978e8a..000000000000 --- a/include/linux/input/bu21013.h +++ /dev/null @@ -1,34 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (C) ST-Ericsson SA 2010 - * Author: Naveen Kumar G <naveen.gaddipati@stericsson.com> for ST-Ericsson - */ - -#ifndef _BU21013_H -#define _BU21013_H - -/** - * struct bu21013_platform_device - Handle the platform data - * @touch_x_max: touch x max - * @touch_y_max: touch y max - * @cs_pin: chip select pin - * @touch_pin: touch gpio pin - * @ext_clk: external clock flag - * @x_flip: x flip flag - * @y_flip: y flip flag - * @wakeup: wakeup flag - * - * This is used to handle the platform data - */ -struct bu21013_platform_device { - int touch_x_max; - int touch_y_max; - unsigned int cs_pin; - unsigned int touch_pin; - bool ext_clk; - bool x_flip; - bool y_flip; - bool wakeup; -}; - -#endif diff --git a/include/linux/intel_th.h b/include/linux/intel_th.h new file mode 100644 index 000000000000..9b7f4c22499c --- /dev/null +++ b/include/linux/intel_th.h @@ -0,0 +1,79 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Intel(R) Trace Hub data structures for implementing buffer sinks. + * + * Copyright (C) 2019 Intel Corporation. + */ + +#ifndef _INTEL_TH_H_ +#define _INTEL_TH_H_ + +#include <linux/scatterlist.h> + +/* MSC operating modes (MSC_MODE) */ +enum { + MSC_MODE_SINGLE = 0, + MSC_MODE_MULTI, + MSC_MODE_EXI, + MSC_MODE_DEBUG, +}; + +struct msu_buffer { + const char *name; + /* + * ->assign() called when buffer 'mode' is set to this driver + * (aka mode_store()) + * @device: struct device * of the msc + * @mode: allows the driver to set HW mode (see the enum above) + * Returns: a pointer to a private structure associated with this + * msc or NULL in case of error. This private structure + * will then be passed into all other callbacks. + */ + void *(*assign)(struct device *dev, int *mode); + /* ->unassign(): some other mode is selected, clean up */ + void (*unassign)(void *priv); + /* + * ->alloc_window(): allocate memory for the window of a given + * size + * @sgt: pointer to sg_table, can be overridden by the buffer + * driver, or kept intact + * Returns: number of sg table entries <= number of pages; + * 0 is treated as an allocation failure. + */ + int (*alloc_window)(void *priv, struct sg_table **sgt, + size_t size); + void (*free_window)(void *priv, struct sg_table *sgt); + /* ->activate(): trace has started */ + void (*activate)(void *priv); + /* ->deactivate(): trace is about to stop */ + void (*deactivate)(void *priv); + /* + * ->ready(): window @sgt is filled up to the last block OR + * tracing is stopped by the user; this window contains + * @bytes data. The window in question transitions into + * the "LOCKED" state, indicating that it can't be used + * by hardware. To clear this state and make the window + * available to the hardware again, call + * intel_th_msc_window_unlock(). + */ + int (*ready)(void *priv, struct sg_table *sgt, size_t bytes); +}; + +int intel_th_msu_buffer_register(const struct msu_buffer *mbuf, + struct module *owner); +void intel_th_msu_buffer_unregister(const struct msu_buffer *mbuf); +void intel_th_msc_window_unlock(struct device *dev, struct sg_table *sgt); + +#define module_intel_th_msu_buffer(__buffer) \ +static int __init __buffer##_init(void) \ +{ \ + return intel_th_msu_buffer_register(&(__buffer), THIS_MODULE); \ +} \ +module_init(__buffer##_init); \ +static void __exit __buffer##_exit(void) \ +{ \ + intel_th_msu_buffer_unregister(&(__buffer)); \ +} \ +module_exit(__buffer##_exit); + +#endif /* _INTEL_TH_H_ */ diff --git a/include/linux/interconnect-provider.h b/include/linux/interconnect-provider.h index 63caccadc2db..b16f9effa555 100644 --- a/include/linux/interconnect-provider.h +++ b/include/linux/interconnect-provider.h @@ -36,6 +36,8 @@ struct icc_node *of_icc_xlate_onecell(struct of_phandle_args *spec, * @nodes: internal list of the interconnect provider nodes * @set: pointer to device specific set operation function * @aggregate: pointer to device specific aggregate operation function + * @pre_aggregate: pointer to device specific function that is called + * before the aggregation begins (optional) * @xlate: provider-specific callback for mapping nodes from phandle arguments * @dev: the device this interconnect provider belongs to * @users: count of active users @@ -45,8 +47,9 @@ struct icc_provider { struct list_head provider_list; struct list_head nodes; int (*set)(struct icc_node *src, struct icc_node *dst); - int (*aggregate)(struct icc_node *node, u32 avg_bw, u32 peak_bw, - u32 *agg_avg, u32 *agg_peak); + int (*aggregate)(struct icc_node *node, u32 tag, u32 avg_bw, + u32 peak_bw, u32 *agg_avg, u32 *agg_peak); + void (*pre_aggregate)(struct icc_node *node); struct icc_node* (*xlate)(struct of_phandle_args *spec, void *data); struct device *dev; int users; diff --git a/include/linux/interconnect.h b/include/linux/interconnect.h index dc25864755ba..d70a914cba11 100644 --- a/include/linux/interconnect.h +++ b/include/linux/interconnect.h @@ -30,6 +30,7 @@ struct icc_path *icc_get(struct device *dev, const int src_id, struct icc_path *of_icc_get(struct device *dev, const char *name); void icc_put(struct icc_path *path); int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw); +void icc_set_tag(struct icc_path *path, u32 tag); #else @@ -54,6 +55,10 @@ static inline int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw) return 0; } +static inline void icc_set_tag(struct icc_path *path, u32 tag) +{ +} + #endif /* CONFIG_INTERCONNECT */ #endif /* __LINUX_INTERCONNECT_H */ diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 07b527dca996..89fc59dab57d 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -238,6 +238,7 @@ extern void teardown_percpu_nmi(unsigned int irq); /* The following three functions are for the core kernel use only. */ extern void suspend_device_irqs(void); extern void resume_device_irqs(void); +extern void rearm_wake_irq(unsigned int irq); /** * struct irq_affinity_notify - context for notification of IRQ affinity changes diff --git a/include/linux/iomap.h b/include/linux/iomap.h index bc499ceae392..7aa5d6117936 100644 --- a/include/linux/iomap.h +++ b/include/linux/iomap.h @@ -188,10 +188,14 @@ sector_t iomap_bmap(struct address_space *mapping, sector_t bno, */ #define IOMAP_DIO_UNWRITTEN (1 << 0) /* covers unwritten extent(s) */ #define IOMAP_DIO_COW (1 << 1) /* covers COW extent(s) */ -typedef int (iomap_dio_end_io_t)(struct kiocb *iocb, ssize_t ret, - unsigned flags); + +struct iomap_dio_ops { + int (*end_io)(struct kiocb *iocb, ssize_t size, int error, + unsigned flags); +}; + ssize_t iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, - const struct iomap_ops *ops, iomap_dio_end_io_t end_io); + const struct iomap_ops *ops, const struct iomap_dio_ops *dops); int iomap_dio_iopoll(struct kiocb *kiocb, bool spin); #ifdef CONFIG_SWAP diff --git a/include/linux/ioport.h b/include/linux/ioport.h index 5b6a7121c9f0..7bddddfc76d6 100644 --- a/include/linux/ioport.h +++ b/include/linux/ioport.h @@ -297,6 +297,8 @@ static inline bool resource_overlaps(struct resource *r1, struct resource *r2) struct resource *devm_request_free_mem_region(struct device *dev, struct resource *base, unsigned long size); +struct resource *request_free_mem_region(struct resource *base, + unsigned long size, const char *name); #endif /* __ASSEMBLY__ */ #endif /* _LINUX_IOPORT_H */ diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index df03825ad1a1..603fbc4e2f70 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -1410,8 +1410,6 @@ extern int jbd2_journal_clear_err (journal_t *); extern int jbd2_journal_bmap(journal_t *, unsigned long, unsigned long long *); extern int jbd2_journal_force_commit(journal_t *); extern int jbd2_journal_force_commit_nested(journal_t *); -extern int jbd2_journal_inode_add_write(handle_t *handle, struct jbd2_inode *inode); -extern int jbd2_journal_inode_add_wait(handle_t *handle, struct jbd2_inode *inode); extern int jbd2_journal_inode_ranged_write(handle_t *handle, struct jbd2_inode *inode, loff_t start_byte, loff_t length); diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 4fa360a13c1e..d83d403dac2e 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -217,7 +217,9 @@ extern void __cant_sleep(const char *file, int line, int preempt_offset); * might_sleep - annotation for functions that can sleep * * this macro will print a stack trace if it is executed in an atomic - * context (spinlock, irq-handler, ...). + * context (spinlock, irq-handler, ...). Additional sections where blocking is + * not allowed can be annotated with non_block_start() and non_block_end() + * pairs. * * This is a useful debugging help to be able to catch problems early and not * be bitten later when the calling function happens to sleep when it is not @@ -233,6 +235,23 @@ extern void __cant_sleep(const char *file, int line, int preempt_offset); # define cant_sleep() \ do { __cant_sleep(__FILE__, __LINE__, 0); } while (0) # define sched_annotate_sleep() (current->task_state_change = 0) +/** + * non_block_start - annotate the start of section where sleeping is prohibited + * + * This is on behalf of the oom reaper, specifically when it is calling the mmu + * notifiers. The problem is that if the notifier were to block on, for example, + * mutex_lock() and if the process which holds that mutex were to perform a + * sleeping memory allocation, the oom reaper is now blocked on completion of + * that memory allocation. Other blocking calls like wait_event() pose similar + * issues. + */ +# define non_block_start() (current->non_block_count++) +/** + * non_block_end - annotate the end of section where sleeping is prohibited + * + * Closes a section opened by non_block_start(). + */ +# define non_block_end() WARN_ON(current->non_block_count-- == 0) #else static inline void ___might_sleep(const char *file, int line, int preempt_offset) { } @@ -241,6 +260,8 @@ extern void __cant_sleep(const char *file, int line, int preempt_offset); # define might_sleep() do { might_resched(); } while (0) # define cant_sleep() do { } while (0) # define sched_annotate_sleep() do { } while (0) +# define non_block_start() do { } while (0) +# define non_block_end() do { } while (0) #endif #define might_sleep_if(cond) do { if (cond) might_sleep(); } while (0) diff --git a/include/linux/key.h b/include/linux/key.h index 50028338a4cc..6cf8e71cf8b7 100644 --- a/include/linux/key.h +++ b/include/linux/key.h @@ -324,7 +324,7 @@ static inline struct key *request_key(struct key_type *type, } #ifdef CONFIG_NET -/* +/** * request_key_net - Request a key for a net namespace and wait for construction * @type: Type of key. * @description: The searchable description of the key. @@ -341,6 +341,18 @@ static inline struct key *request_key(struct key_type *type, */ #define request_key_net(type, description, net, callout_info) \ request_key_tag(type, description, net->key_domain, callout_info); + +/** + * request_key_net_rcu - Request a key for a net namespace under RCU conditions + * @type: Type of key. + * @description: The searchable description of the key. + * @net: The network namespace that is the key's domain of operation. + * + * As for request_key_rcu() except that only keys that operate the specified + * network namespace are used. + */ +#define request_key_net_rcu(type, description, net) \ + request_key_rcu(type, description, net->key_domain); #endif /* CONFIG_NET */ extern int wait_for_key_construction(struct key *key, bool intr); diff --git a/include/linux/khugepaged.h b/include/linux/khugepaged.h index 082d1d2a5216..bc45ea1efbf7 100644 --- a/include/linux/khugepaged.h +++ b/include/linux/khugepaged.h @@ -15,6 +15,14 @@ extern int __khugepaged_enter(struct mm_struct *mm); extern void __khugepaged_exit(struct mm_struct *mm); extern int khugepaged_enter_vma_merge(struct vm_area_struct *vma, unsigned long vm_flags); +#ifdef CONFIG_SHMEM +extern void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr); +#else +static inline void collapse_pte_mapped_thp(struct mm_struct *mm, + unsigned long addr) +{ +} +#endif #define khugepaged_enabled() \ (transparent_hugepage_flags & \ @@ -73,6 +81,10 @@ static inline int khugepaged_enter_vma_merge(struct vm_area_struct *vma, { return 0; } +static inline void collapse_pte_mapped_thp(struct mm_struct *mm, + unsigned long addr) +{ +} #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif /* _LINUX_KHUGEPAGED_H */ diff --git a/include/linux/lcd.h b/include/linux/lcd.h index 851eee8fff25..238fb1dfed98 100644 --- a/include/linux/lcd.h +++ b/include/linux/lcd.h @@ -41,16 +41,6 @@ struct lcd_ops { /* Get the LCD panel power status (0: full on, 1..3: controller power on, flat panel power off, 4: full off), see FB_BLANK_XXX */ int (*get_power)(struct lcd_device *); - /* - * Enable or disable power to the LCD(0: on; 4: off, see FB_BLANK_XXX) - * and this callback would be called proir to fb driver's callback. - * - * P.S. note that if early_set_power is not NULL then early fb notifier - * would be registered. - */ - int (*early_set_power)(struct lcd_device *, int power); - /* revert the effects of the early blank event. */ - int (*r_early_set_power)(struct lcd_device *, int power); /* Enable or disable power to the LCD (0: on; 4: off, see FB_BLANK_XXX) */ int (*set_power)(struct lcd_device *, int power); /* Get the current contrast setting (0-max_contrast) */ diff --git a/include/linux/led-class-flash.h b/include/linux/led-class-flash.h index f52713f0a269..1e824963af17 100644 --- a/include/linux/led-class-flash.h +++ b/include/linux/led-class-flash.h @@ -86,15 +86,20 @@ static inline struct led_classdev_flash *lcdev_to_flcdev( } /** - * led_classdev_flash_register - register a new object of led_classdev class - * with support for flash LEDs - * @parent: the flash LED to register + * led_classdev_flash_register_ext - register a new object of LED class with + * init data and with support for flash LEDs + * @parent: LED flash controller device this flash LED is driven by * @fled_cdev: the led_classdev_flash structure for this device + * @init_data: the LED class flash device initialization data * * Returns: 0 on success or negative error value on failure */ -extern int led_classdev_flash_register(struct device *parent, - struct led_classdev_flash *fled_cdev); +extern int led_classdev_flash_register_ext(struct device *parent, + struct led_classdev_flash *fled_cdev, + struct led_init_data *init_data); + +#define led_classdev_flash_register(parent, fled_cdev) \ + led_classdev_flash_register_ext(parent, fled_cdev, NULL) /** * led_classdev_flash_unregister - unregisters an object of led_classdev class diff --git a/include/linux/leds.h b/include/linux/leds.h index 9b2bf574a17a..b8df71193329 100644 --- a/include/linux/leds.h +++ b/include/linux/leds.h @@ -8,6 +8,7 @@ #ifndef __LINUX_LEDS_H_INCLUDED #define __LINUX_LEDS_H_INCLUDED +#include <dt-bindings/leds/common.h> #include <linux/device.h> #include <linux/kernfs.h> #include <linux/list.h> @@ -30,6 +31,30 @@ enum led_brightness { LED_FULL = 255, }; +struct led_init_data { + /* device fwnode handle */ + struct fwnode_handle *fwnode; + /* + * default <color:function> tuple, for backward compatibility + * with in-driver hard-coded LED names used as a fallback when + * DT "label" property is absent; it should be set to NULL + * in new LED class drivers. + */ + const char *default_label; + /* + * string to be used for devicename section of LED class device + * either for label based LED name composition path or for fwnode + * based when devname_mandatory is true + */ + const char *devicename; + /* + * indicates if LED name should always comprise devicename section; + * only LEDs exposed by drivers of hot-pluggable devices should + * set it to true + */ + bool devname_mandatory; +}; + struct led_classdev { const char *name; enum led_brightness brightness; @@ -125,16 +150,46 @@ struct led_classdev { struct mutex led_access; }; -extern int of_led_classdev_register(struct device *parent, - struct device_node *np, - struct led_classdev *led_cdev); -#define led_classdev_register(parent, led_cdev) \ - of_led_classdev_register(parent, NULL, led_cdev) -extern int devm_of_led_classdev_register(struct device *parent, - struct device_node *np, - struct led_classdev *led_cdev); -#define devm_led_classdev_register(parent, led_cdev) \ - devm_of_led_classdev_register(parent, NULL, led_cdev) +/** + * led_classdev_register_ext - register a new object of LED class with + * init data + * @parent: LED controller device this LED is driven by + * @led_cdev: the led_classdev structure for this device + * @init_data: the LED class device initialization data + * + * Register a new object of LED class, with name derived from init_data. + * + * Returns: 0 on success or negative error value on failure + */ +extern int led_classdev_register_ext(struct device *parent, + struct led_classdev *led_cdev, + struct led_init_data *init_data); + +/** + * led_classdev_register - register a new object of LED class + * @parent: LED controller device this LED is driven by + * @led_cdev: the led_classdev structure for this device + * + * Register a new object of LED class, with name derived from the name property + * of passed led_cdev argument. + * + * Returns: 0 on success or negative error value on failure + */ +static inline int led_classdev_register(struct device *parent, + struct led_classdev *led_cdev) +{ + return led_classdev_register_ext(parent, led_cdev, NULL); +} + +extern int devm_led_classdev_register_ext(struct device *parent, + struct led_classdev *led_cdev, + struct led_init_data *init_data); + +static inline int devm_led_classdev_register(struct device *parent, + struct led_classdev *led_cdev) +{ + return devm_led_classdev_register_ext(parent, led_cdev, NULL); +} extern void led_classdev_unregister(struct led_classdev *led_cdev); extern void devm_led_classdev_unregister(struct device *parent, struct led_classdev *led_cdev); @@ -244,6 +299,22 @@ extern void led_sysfs_disable(struct led_classdev *led_cdev); extern void led_sysfs_enable(struct led_classdev *led_cdev); /** + * led_compose_name - compose LED class device name + * @dev: LED controller device object + * @child: child fwnode_handle describing a LED or a group of synchronized LEDs; + * it must be provided only for fwnode based LEDs + * @led_classdev_name: composed LED class device name + * + * Create LED class device name basing on the provided init_data argument. + * The name can have <devicename:color:function> or <color:function>. + * form, depending on the init_data configuration. + * + * Returns: 0 on success or negative error value on failure + */ +extern int led_compose_name(struct device *dev, struct led_init_data *init_data, + char *led_classdev_name); + +/** * led_sysfs_is_disabled - check if LED sysfs interface is disabled * @led_cdev: the LED to query * @@ -420,6 +491,15 @@ struct led_platform_data { struct led_info *leds; }; +struct led_properties { + u32 color; + bool color_present; + const char *function; + u32 func_enum; + bool func_enum_present; + const char *label; +}; + struct gpio_desc; typedef int (*gpio_blink_set_t)(struct gpio_desc *desc, int state, unsigned long *delay_on, diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h index 7a64b3ddb408..b6eddf912568 100644 --- a/include/linux/libnvdimm.h +++ b/include/linux/libnvdimm.h @@ -160,8 +160,11 @@ static inline struct nd_blk_region_desc *to_blk_region_desc( } -enum nvdimm_security_state { - NVDIMM_SECURITY_ERROR = -1, +/* + * Note that separate bits for locked + unlocked are defined so that + * 'flags == 0' corresponds to an error / not-supported state. + */ +enum nvdimm_security_bits { NVDIMM_SECURITY_DISABLED, NVDIMM_SECURITY_UNLOCKED, NVDIMM_SECURITY_LOCKED, @@ -182,7 +185,7 @@ enum nvdimm_passphrase_type { }; struct nvdimm_security_ops { - enum nvdimm_security_state (*state)(struct nvdimm *nvdimm, + unsigned long (*get_flags)(struct nvdimm *nvdimm, enum nvdimm_passphrase_type pass_type); int (*freeze)(struct nvdimm *nvdimm); int (*change_key)(struct nvdimm *nvdimm, diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h index df1318d85f7d..3fced5824aee 100644 --- a/include/linux/lsm_hooks.h +++ b/include/linux/lsm_hooks.h @@ -339,6 +339,9 @@ * Check for permission to change root directory. * @path contains the path structure. * Return 0 if permission is granted. + * @path_notify: + * Check permissions before setting a watch on events as defined by @mask, + * on an object at @path, whose type is defined by @obj_type. * @inode_readlink: * Check the permission to read the symbolic link. * @dentry contains the dentry structure for the file link. @@ -1535,7 +1538,9 @@ union security_list_options { int (*path_chown)(const struct path *path, kuid_t uid, kgid_t gid); int (*path_chroot)(const struct path *path); #endif - + /* Needed for inode based security check */ + int (*path_notify)(const struct path *path, u64 mask, + unsigned int obj_type); int (*inode_alloc_security)(struct inode *inode); void (*inode_free_security)(struct inode *inode); int (*inode_init_security)(struct inode *inode, struct inode *dir, @@ -1860,6 +1865,8 @@ struct security_hook_heads { struct hlist_head path_chown; struct hlist_head path_chroot; #endif + /* Needed for inode based modules as well */ + struct hlist_head path_notify; struct hlist_head inode_alloc_security; struct hlist_head inode_free_security; struct hlist_head inode_init_security; diff --git a/include/linux/mailbox/mtk-cmdq-mailbox.h b/include/linux/mailbox/mtk-cmdq-mailbox.h index ccb73422c2fa..e6f54ef6698b 100644 --- a/include/linux/mailbox/mtk-cmdq-mailbox.h +++ b/include/linux/mailbox/mtk-cmdq-mailbox.h @@ -20,6 +20,9 @@ #define CMDQ_WFE_WAIT BIT(15) #define CMDQ_WFE_WAIT_VALUE 0x1 +/** cmdq event maximum */ +#define CMDQ_MAX_EVENT 0x3ff + /* * CMDQ_CODE_MASK: * set write mask diff --git a/include/linux/mdio.h b/include/linux/mdio.h index e8242ad88c81..a7604248777b 100644 --- a/include/linux/mdio.h +++ b/include/linux/mdio.h @@ -68,6 +68,17 @@ struct mdio_driver { #define to_mdio_driver(d) \ container_of(to_mdio_common_driver(d), struct mdio_driver, mdiodrv) +/* device driver data */ +static inline void mdiodev_set_drvdata(struct mdio_device *mdio, void *data) +{ + dev_set_drvdata(&mdio->dev, data); +} + +static inline void *mdiodev_get_drvdata(struct mdio_device *mdio) +{ + return dev_get_drvdata(&mdio->dev); +} + void mdio_device_free(struct mdio_device *mdiodev); struct mdio_device *mdio_device_create(struct mii_bus *bus, int addr); int mdio_device_register(struct mdio_device *mdiodev); diff --git a/include/linux/mem_encrypt.h b/include/linux/mem_encrypt.h index 470bd53a89df..5c4a18a91f89 100644 --- a/include/linux/mem_encrypt.h +++ b/include/linux/mem_encrypt.h @@ -18,23 +18,10 @@ #else /* !CONFIG_ARCH_HAS_MEM_ENCRYPT */ -#define sme_me_mask 0ULL - -static inline bool sme_active(void) { return false; } -static inline bool sev_active(void) { return false; } +static inline bool mem_encrypt_active(void) { return false; } #endif /* CONFIG_ARCH_HAS_MEM_ENCRYPT */ -static inline bool mem_encrypt_active(void) -{ - return sme_me_mask; -} - -static inline u64 sme_get_me_mask(void) -{ - return sme_me_mask; -} - #ifdef CONFIG_AMD_MEM_ENCRYPT /* * The __sme_set() and __sme_clr() macros are useful for adding or removing diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index ad8f1a397ae4..9b60863429cc 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -128,9 +128,8 @@ struct mem_cgroup_per_node { struct mem_cgroup_reclaim_iter iter[DEF_PRIORITY + 1]; -#ifdef CONFIG_MEMCG_KMEM struct memcg_shrinker_map __rcu *shrinker_map; -#endif + struct rb_node tree_node; /* RB tree node */ unsigned long usage_in_excess;/* Set to the value by which */ /* the soft limit is exceeded*/ @@ -331,6 +330,10 @@ struct mem_cgroup { struct list_head event_list; spinlock_t event_list_lock; +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + struct deferred_split deferred_split_queue; +#endif + struct mem_cgroup_per_node *nodeinfo[0]; /* WARNING: nodeinfo must be the last member here */ }; @@ -1311,6 +1314,11 @@ static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg) } while ((memcg = parent_mem_cgroup(memcg))); return false; } + +extern int memcg_expand_shrinker_maps(int new_id); + +extern void memcg_set_shrinker_bit(struct mem_cgroup *memcg, + int nid, int shrinker_id); #else #define mem_cgroup_sockets_enabled 0 static inline void mem_cgroup_sk_alloc(struct sock *sk) { }; @@ -1319,6 +1327,11 @@ static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg) { return false; } + +static inline void memcg_set_shrinker_bit(struct mem_cgroup *memcg, + int nid, int shrinker_id) +{ +} #endif struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep); @@ -1390,10 +1403,6 @@ static inline int memcg_cache_id(struct mem_cgroup *memcg) return memcg ? memcg->kmemcg_id : -1; } -extern int memcg_expand_shrinker_maps(int new_id); - -extern void memcg_set_shrinker_bit(struct mem_cgroup *memcg, - int nid, int shrinker_id); #else static inline int memcg_kmem_charge(struct page *page, gfp_t gfp, int order) @@ -1435,8 +1444,6 @@ static inline void memcg_put_cache_ids(void) { } -static inline void memcg_set_shrinker_bit(struct mem_cgroup *memcg, - int nid, int shrinker_id) { } #endif /* CONFIG_MEMCG_KMEM */ #endif /* _LINUX_MEMCONTROL_H */ diff --git a/include/linux/memory.h b/include/linux/memory.h index 02e633f3ede0..0ebb105eb261 100644 --- a/include/linux/memory.h +++ b/include/linux/memory.h @@ -25,7 +25,6 @@ struct memory_block { unsigned long start_section_nr; - unsigned long end_section_nr; unsigned long state; /* serialized by the dev->lock */ int section_count; /* serialized by mem_sysfs_mutex */ int online_type; /* for passing data to online routine */ @@ -80,9 +79,9 @@ struct mem_section; #define IPC_CALLBACK_PRI 10 #ifndef CONFIG_MEMORY_HOTPLUG_SPARSE -static inline int memory_dev_init(void) +static inline void memory_dev_init(void) { - return 0; + return; } static inline int register_memory_notifier(struct notifier_block *nb) { @@ -113,7 +112,7 @@ extern int register_memory_isolate_notifier(struct notifier_block *nb); extern void unregister_memory_isolate_notifier(struct notifier_block *nb); int create_memory_block_devices(unsigned long start, unsigned long size); void remove_memory_block_devices(unsigned long start, unsigned long size); -extern int memory_dev_init(void); +extern void memory_dev_init(void); extern int memory_notify(unsigned long val, void *v); extern int memory_isolate_notify(unsigned long val, void *v); extern struct memory_block *find_memory_block(struct mem_section *); diff --git a/include/linux/memremap.h b/include/linux/memremap.h index f8a5b2a19945..bef51e35d8d2 100644 --- a/include/linux/memremap.h +++ b/include/linux/memremap.h @@ -109,10 +109,8 @@ struct dev_pagemap { struct percpu_ref *ref; struct percpu_ref internal_ref; struct completion done; - struct device *dev; enum memory_type type; unsigned int flags; - u64 pci_p2pdma_bus_offset; const struct dev_pagemap_ops *ops; }; @@ -124,6 +122,8 @@ static inline struct vmem_altmap *pgmap_altmap(struct dev_pagemap *pgmap) } #ifdef CONFIG_ZONE_DEVICE +void *memremap_pages(struct dev_pagemap *pgmap, int nid); +void memunmap_pages(struct dev_pagemap *pgmap); void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap); void devm_memunmap_pages(struct device *dev, struct dev_pagemap *pgmap); struct dev_pagemap *get_dev_pagemap(unsigned long pfn, diff --git a/include/linux/mfd/aat2870.h b/include/linux/mfd/aat2870.h index af7267c480ee..2445842d482d 100644 --- a/include/linux/mfd/aat2870.h +++ b/include/linux/mfd/aat2870.h @@ -136,7 +136,6 @@ struct aat2870_data { /* for debugfs */ struct dentry *dentry_root; - struct dentry *dentry_reg; }; struct aat2870_subdev_info { diff --git a/include/linux/mfd/cros_ec.h b/include/linux/mfd/cros_ec.h index 77805c3f2de7..61c2875c2a40 100644 --- a/include/linux/mfd/cros_ec.h +++ b/include/linux/mfd/cros_ec.h @@ -8,183 +8,11 @@ #ifndef __LINUX_MFD_CROS_EC_H #define __LINUX_MFD_CROS_EC_H -#include <linux/cdev.h> #include <linux/device.h> -#include <linux/notifier.h> -#include <linux/mfd/cros_ec_commands.h> -#include <linux/mutex.h> - -#define CROS_EC_DEV_NAME "cros_ec" -#define CROS_EC_DEV_FP_NAME "cros_fp" -#define CROS_EC_DEV_PD_NAME "cros_pd" -#define CROS_EC_DEV_TP_NAME "cros_tp" -#define CROS_EC_DEV_ISH_NAME "cros_ish" -#define CROS_EC_DEV_SCP_NAME "cros_scp" - -/* - * The EC is unresponsive for a time after a reboot command. Add a - * simple delay to make sure that the bus stays locked. - */ -#define EC_REBOOT_DELAY_MS 50 - -/* - * Max bus-specific overhead incurred by request/responses. - * I2C requires 1 additional byte for requests. - * I2C requires 2 additional bytes for responses. - * SPI requires up to 32 additional bytes for responses. - */ -#define EC_PROTO_VERSION_UNKNOWN 0 -#define EC_MAX_REQUEST_OVERHEAD 1 -#define EC_MAX_RESPONSE_OVERHEAD 32 - -/* - * Command interface between EC and AP, for LPC, I2C and SPI interfaces. - */ -enum { - EC_MSG_TX_HEADER_BYTES = 3, - EC_MSG_TX_TRAILER_BYTES = 1, - EC_MSG_TX_PROTO_BYTES = EC_MSG_TX_HEADER_BYTES + - EC_MSG_TX_TRAILER_BYTES, - EC_MSG_RX_PROTO_BYTES = 3, - - /* Max length of messages for proto 2*/ - EC_PROTO2_MSG_BYTES = EC_PROTO2_MAX_PARAM_SIZE + - EC_MSG_TX_PROTO_BYTES, - - EC_MAX_MSG_BYTES = 64 * 1024, -}; - -/** - * struct cros_ec_command - Information about a ChromeOS EC command. - * @version: Command version number (often 0). - * @command: Command to send (EC_CMD_...). - * @outsize: Outgoing length in bytes. - * @insize: Max number of bytes to accept from the EC. - * @result: EC's response to the command (separate from communication failure). - * @data: Where to put the incoming data from EC and outgoing data to EC. - */ -struct cros_ec_command { - uint32_t version; - uint32_t command; - uint32_t outsize; - uint32_t insize; - uint32_t result; - uint8_t data[0]; -}; - -/** - * struct cros_ec_device - Information about a ChromeOS EC device. - * @phys_name: Name of physical comms layer (e.g. 'i2c-4'). - * @dev: Device pointer for physical comms device - * @was_wake_device: True if this device was set to wake the system from - * sleep at the last suspend. - * @cros_class: The class structure for this device. - * @cmd_readmem: Direct read of the EC memory-mapped region, if supported. - * @offset: Is within EC_LPC_ADDR_MEMMAP region. - * @bytes: Number of bytes to read. zero means "read a string" (including - * the trailing '\0'). At most only EC_MEMMAP_SIZE bytes can be - * read. Caller must ensure that the buffer is large enough for the - * result when reading a string. - * @max_request: Max size of message requested. - * @max_response: Max size of message response. - * @max_passthru: Max sice of passthru message. - * @proto_version: The protocol version used for this device. - * @priv: Private data. - * @irq: Interrupt to use. - * @id: Device id. - * @din: Input buffer (for data from EC). This buffer will always be - * dword-aligned and include enough space for up to 7 word-alignment - * bytes also, so we can ensure that the body of the message is always - * dword-aligned (64-bit). We use this alignment to keep ARM and x86 - * happy. Probably word alignment would be OK, there might be a small - * performance advantage to using dword. - * @dout: Output buffer (for data to EC). This buffer will always be - * dword-aligned and include enough space for up to 7 word-alignment - * bytes also, so we can ensure that the body of the message is always - * dword-aligned (64-bit). We use this alignment to keep ARM and x86 - * happy. Probably word alignment would be OK, there might be a small - * performance advantage to using dword. - * @din_size: Size of din buffer to allocate (zero to use static din). - * @dout_size: Size of dout buffer to allocate (zero to use static dout). - * @wake_enabled: True if this device can wake the system from sleep. - * @suspended: True if this device had been suspended. - * @cmd_xfer: Send command to EC and get response. - * Returns the number of bytes received if the communication - * succeeded, but that doesn't mean the EC was happy with the - * command. The caller should check msg.result for the EC's result - * code. - * @pkt_xfer: Send packet to EC and get response. - * @lock: One transaction at a time. - * @mkbp_event_supported: True if this EC supports the MKBP event protocol. - * @host_sleep_v1: True if this EC supports the sleep v1 command. - * @event_notifier: Interrupt event notifier for transport devices. - * @event_data: Raw payload transferred with the MKBP event. - * @event_size: Size in bytes of the event data. - * @host_event_wake_mask: Mask of host events that cause wake from suspend. - */ -struct cros_ec_device { - /* These are used by other drivers that want to talk to the EC */ - const char *phys_name; - struct device *dev; - bool was_wake_device; - struct class *cros_class; - int (*cmd_readmem)(struct cros_ec_device *ec, unsigned int offset, - unsigned int bytes, void *dest); - - /* These are used to implement the platform-specific interface */ - u16 max_request; - u16 max_response; - u16 max_passthru; - u16 proto_version; - void *priv; - int irq; - u8 *din; - u8 *dout; - int din_size; - int dout_size; - bool wake_enabled; - bool suspended; - int (*cmd_xfer)(struct cros_ec_device *ec, - struct cros_ec_command *msg); - int (*pkt_xfer)(struct cros_ec_device *ec, - struct cros_ec_command *msg); - struct mutex lock; - bool mkbp_event_supported; - bool host_sleep_v1; - struct blocking_notifier_head event_notifier; - - struct ec_response_get_next_event_v1 event_data; - int event_size; - u32 host_event_wake_mask; - u32 last_resume_result; -}; - -/** - * struct cros_ec_sensor_platform - ChromeOS EC sensor platform information. - * @sensor_num: Id of the sensor, as reported by the EC. - */ -struct cros_ec_sensor_platform { - u8 sensor_num; -}; - -/** - * struct cros_ec_platform - ChromeOS EC platform information. - * @ec_name: Name of EC device (e.g. 'cros-ec', 'cros-pd', ...) - * used in /dev/ and sysfs. - * @cmd_offset: Offset to apply for each command. Set when - * registering a device behind another one. - */ -struct cros_ec_platform { - const char *ec_name; - u16 cmd_offset; -}; - -struct cros_ec_debugfs; /** * struct cros_ec_dev - ChromeOS EC device entry point. * @class_dev: Device structure used in sysfs. - * @cdev: Character device structure in /dev. * @ec_dev: cros_ec_device structure to talk to the physical device. * @dev: Pointer to the platform device. * @debug_info: cros_ec_debugfs structure for debugging information. @@ -194,7 +22,6 @@ struct cros_ec_debugfs; */ struct cros_ec_dev { struct device class_dev; - struct cdev cdev; struct cros_ec_device *ec_dev; struct device *dev; struct cros_ec_debugfs *debug_info; @@ -205,123 +32,4 @@ struct cros_ec_dev { #define to_cros_ec_dev(dev) container_of(dev, struct cros_ec_dev, class_dev) -/** - * cros_ec_suspend() - Handle a suspend operation for the ChromeOS EC device. - * @ec_dev: Device to suspend. - * - * This can be called by drivers to handle a suspend event. - * - * Return: 0 on success or negative error code. - */ -int cros_ec_suspend(struct cros_ec_device *ec_dev); - -/** - * cros_ec_resume() - Handle a resume operation for the ChromeOS EC device. - * @ec_dev: Device to resume. - * - * This can be called by drivers to handle a resume event. - * - * Return: 0 on success or negative error code. - */ -int cros_ec_resume(struct cros_ec_device *ec_dev); - -/** - * cros_ec_prepare_tx() - Prepare an outgoing message in the output buffer. - * @ec_dev: Device to register. - * @msg: Message to write. - * - * This is intended to be used by all ChromeOS EC drivers, but at present - * only SPI uses it. Once LPC uses the same protocol it can start using it. - * I2C could use it now, with a refactor of the existing code. - * - * Return: 0 on success or negative error code. - */ -int cros_ec_prepare_tx(struct cros_ec_device *ec_dev, - struct cros_ec_command *msg); - -/** - * cros_ec_check_result() - Check ec_msg->result. - * @ec_dev: EC device. - * @msg: Message to check. - * - * This is used by ChromeOS EC drivers to check the ec_msg->result for - * errors and to warn about them. - * - * Return: 0 on success or negative error code. - */ -int cros_ec_check_result(struct cros_ec_device *ec_dev, - struct cros_ec_command *msg); - -/** - * cros_ec_cmd_xfer() - Send a command to the ChromeOS EC. - * @ec_dev: EC device. - * @msg: Message to write. - * - * Call this to send a command to the ChromeOS EC. This should be used - * instead of calling the EC's cmd_xfer() callback directly. - * - * Return: 0 on success or negative error code. - */ -int cros_ec_cmd_xfer(struct cros_ec_device *ec_dev, - struct cros_ec_command *msg); - -/** - * cros_ec_cmd_xfer_status() - Send a command to the ChromeOS EC. - * @ec_dev: EC device. - * @msg: Message to write. - * - * This function is identical to cros_ec_cmd_xfer, except it returns success - * status only if both the command was transmitted successfully and the EC - * replied with success status. It's not necessary to check msg->result when - * using this function. - * - * Return: The number of bytes transferred on success or negative error code. - */ -int cros_ec_cmd_xfer_status(struct cros_ec_device *ec_dev, - struct cros_ec_command *msg); - -/** - * cros_ec_register() - Register a new ChromeOS EC, using the provided info. - * @ec_dev: Device to register. - * - * Before calling this, allocate a pointer to a new device and then fill - * in all the fields up to the --private-- marker. - * - * Return: 0 on success or negative error code. - */ -int cros_ec_register(struct cros_ec_device *ec_dev); - -/** - * cros_ec_query_all() - Query the protocol version supported by the - * ChromeOS EC. - * @ec_dev: Device to register. - * - * Return: 0 on success or negative error code. - */ -int cros_ec_query_all(struct cros_ec_device *ec_dev); - -/** - * cros_ec_get_next_event() - Fetch next event from the ChromeOS EC. - * @ec_dev: Device to fetch event from. - * @wake_event: Pointer to a bool set to true upon return if the event might be - * treated as a wake event. Ignored if null. - * - * Return: negative error code on errors; 0 for no data; or else number of - * bytes received (i.e., an event was retrieved successfully). Event types are - * written out to @ec_dev->event_data.event_type on success. - */ -int cros_ec_get_next_event(struct cros_ec_device *ec_dev, bool *wake_event); - -/** - * cros_ec_get_host_event() - Return a mask of event set by the ChromeOS EC. - * @ec_dev: Device to fetch event from. - * - * When MKBP is supported, when the EC raises an interrupt, we collect the - * events raised and call the functions in the ec notifier. This function - * is a helper to know which events are raised. - * - * Return: 0 on error or non-zero bitmask of one or more EC_HOST_EVENT_*. - */ -u32 cros_ec_get_host_event(struct cros_ec_device *ec_dev); - #endif /* __LINUX_MFD_CROS_EC_H */ diff --git a/include/linux/mfd/da9063/pdata.h b/include/linux/mfd/da9063/pdata.h deleted file mode 100644 index 085edbf7601b..000000000000 --- a/include/linux/mfd/da9063/pdata.h +++ /dev/null @@ -1,60 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * Platform configuration options for DA9063 - * - * Copyright 2012 Dialog Semiconductor Ltd. - * - * Author: Michal Hajduk, Dialog Semiconductor - * Author: Krystian Garbaciak, Dialog Semiconductor - */ - -#ifndef __MFD_DA9063_PDATA_H__ -#define __MFD_DA9063_PDATA_H__ - -/* - * RGB LED configuration - */ -/* LED IDs for flags in struct led_info. */ -enum { - DA9063_GPIO11_LED, - DA9063_GPIO14_LED, - DA9063_GPIO15_LED, - - DA9063_LED_NUM -}; -#define DA9063_LED_ID_MASK 0x3 - -/* LED polarity for flags in struct led_info. */ -#define DA9063_LED_HIGH_LEVEL_ACTIVE 0x0 -#define DA9063_LED_LOW_LEVEL_ACTIVE 0x4 - - -/* - * General PMIC configuration - */ -/* HWMON ADC channels configuration */ -#define DA9063_FLG_FORCE_IN0_MANUAL_MODE 0x0010 -#define DA9063_FLG_FORCE_IN0_AUTO_MODE 0x0020 -#define DA9063_FLG_FORCE_IN1_MANUAL_MODE 0x0040 -#define DA9063_FLG_FORCE_IN1_AUTO_MODE 0x0080 -#define DA9063_FLG_FORCE_IN2_MANUAL_MODE 0x0100 -#define DA9063_FLG_FORCE_IN2_AUTO_MODE 0x0200 -#define DA9063_FLG_FORCE_IN3_MANUAL_MODE 0x0400 -#define DA9063_FLG_FORCE_IN3_AUTO_MODE 0x0800 - -/* Disable register caching. */ -#define DA9063_FLG_NO_CACHE 0x0008 - -struct da9063; - -/* DA9063 platform data */ -struct da9063_pdata { - int (*init)(struct da9063 *da9063); - int irq_base; - bool key_power; - unsigned flags; - struct da9063_regulators_pdata *regulators_pdata; - struct led_platform_data *leds_pdata; -}; - -#endif /* __MFD_DA9063_PDATA_H__ */ diff --git a/include/linux/mfd/intel_soc_pmic_mrfld.h b/include/linux/mfd/intel_soc_pmic_mrfld.h new file mode 100644 index 000000000000..4daecd682275 --- /dev/null +++ b/include/linux/mfd/intel_soc_pmic_mrfld.h @@ -0,0 +1,81 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Header file for Intel Merrifield Basin Cove PMIC + * + * Copyright (C) 2019 Intel Corporation. All rights reserved. + */ + +#ifndef __INTEL_SOC_PMIC_MRFLD_H__ +#define __INTEL_SOC_PMIC_MRFLD_H__ + +#include <linux/bits.h> + +#define BCOVE_ID 0x00 + +#define BCOVE_ID_MINREV0 GENMASK(2, 0) +#define BCOVE_ID_MAJREV0 GENMASK(5, 3) +#define BCOVE_ID_VENDID0 GENMASK(7, 6) + +#define BCOVE_MINOR(x) (unsigned int)(((x) & BCOVE_ID_MINREV0) >> 0) +#define BCOVE_MAJOR(x) (unsigned int)(((x) & BCOVE_ID_MAJREV0) >> 3) +#define BCOVE_VENDOR(x) (unsigned int)(((x) & BCOVE_ID_VENDID0) >> 6) + +#define BCOVE_IRQLVL1 0x01 + +#define BCOVE_PBIRQ 0x02 +#define BCOVE_TMUIRQ 0x03 +#define BCOVE_THRMIRQ 0x04 +#define BCOVE_BCUIRQ 0x05 +#define BCOVE_ADCIRQ 0x06 +#define BCOVE_CHGRIRQ0 0x07 +#define BCOVE_CHGRIRQ1 0x08 +#define BCOVE_GPIOIRQ 0x09 +#define BCOVE_CRITIRQ 0x0B + +#define BCOVE_MIRQLVL1 0x0C + +#define BCOVE_MPBIRQ 0x0D +#define BCOVE_MTMUIRQ 0x0E +#define BCOVE_MTHRMIRQ 0x0F +#define BCOVE_MBCUIRQ 0x10 +#define BCOVE_MADCIRQ 0x11 +#define BCOVE_MCHGRIRQ0 0x12 +#define BCOVE_MCHGRIRQ1 0x13 +#define BCOVE_MGPIOIRQ 0x14 +#define BCOVE_MCRITIRQ 0x16 + +#define BCOVE_SCHGRIRQ0 0x4E +#define BCOVE_SCHGRIRQ1 0x4F + +/* Level 1 IRQs */ +#define BCOVE_LVL1_PWRBTN BIT(0) /* power button */ +#define BCOVE_LVL1_TMU BIT(1) /* time management unit */ +#define BCOVE_LVL1_THRM BIT(2) /* thermal */ +#define BCOVE_LVL1_BCU BIT(3) /* burst control unit */ +#define BCOVE_LVL1_ADC BIT(4) /* ADC */ +#define BCOVE_LVL1_CHGR BIT(5) /* charger */ +#define BCOVE_LVL1_GPIO BIT(6) /* GPIO */ +#define BCOVE_LVL1_CRIT BIT(7) /* critical event */ + +/* Level 2 IRQs: power button */ +#define BCOVE_PBIRQ_PBTN BIT(0) +#define BCOVE_PBIRQ_UBTN BIT(1) + +/* Level 2 IRQs: ADC */ +#define BCOVE_ADCIRQ_BATTEMP BIT(2) +#define BCOVE_ADCIRQ_SYSTEMP BIT(3) +#define BCOVE_ADCIRQ_BATTID BIT(4) +#define BCOVE_ADCIRQ_VIBATT BIT(5) +#define BCOVE_ADCIRQ_CCTICK BIT(7) + +/* Level 2 IRQs: charger */ +#define BCOVE_CHGRIRQ_BAT0ALRT BIT(4) +#define BCOVE_CHGRIRQ_BAT1ALRT BIT(5) +#define BCOVE_CHGRIRQ_BATCRIT BIT(6) + +#define BCOVE_CHGRIRQ_VBUSDET BIT(0) +#define BCOVE_CHGRIRQ_DCDET BIT(1) +#define BCOVE_CHGRIRQ_BATTDET BIT(2) +#define BCOVE_CHGRIRQ_USBIDDET BIT(3) + +#endif /* __INTEL_SOC_PMIC_MRFLD_H__ */ diff --git a/include/linux/mfd/mt6397/core.h b/include/linux/mfd/mt6397/core.h index 25a95e72179b..fc88d315bdde 100644 --- a/include/linux/mfd/mt6397/core.h +++ b/include/linux/mfd/mt6397/core.h @@ -7,6 +7,14 @@ #ifndef __MFD_MT6397_CORE_H__ #define __MFD_MT6397_CORE_H__ +#include <linux/mutex.h> + +enum chip_id { + MT6323_CHIP_ID = 0x23, + MT6391_CHIP_ID = 0x91, + MT6397_CHIP_ID = 0x97, +}; + enum mt6397_irq_numbers { MT6397_IRQ_SPKL_AB = 0, MT6397_IRQ_SPKR_AB, @@ -54,6 +62,9 @@ struct mt6397_chip { u16 irq_masks_cache[2]; u16 int_con[2]; u16 int_status[2]; + u16 chip_id; }; +int mt6397_irq_init(struct mt6397_chip *chip); + #endif /* __MFD_MT6397_CORE_H__ */ diff --git a/include/linux/mfd/syscon.h b/include/linux/mfd/syscon.h index 8cfda0554381..112dc66262cc 100644 --- a/include/linux/mfd/syscon.h +++ b/include/linux/mfd/syscon.h @@ -17,12 +17,18 @@ struct device_node; #ifdef CONFIG_MFD_SYSCON +extern struct regmap *device_node_to_regmap(struct device_node *np); extern struct regmap *syscon_node_to_regmap(struct device_node *np); extern struct regmap *syscon_regmap_lookup_by_compatible(const char *s); extern struct regmap *syscon_regmap_lookup_by_phandle( struct device_node *np, const char *property); #else +static inline struct regmap *device_node_to_regmap(struct device_node *np) +{ + return ERR_PTR(-ENOTSUPP); +} + static inline struct regmap *syscon_node_to_regmap(struct device_node *np) { return ERR_PTR(-ENOTSUPP); diff --git a/include/linux/migrate.h b/include/linux/migrate.h index 7f04754c7f2b..72120061b7d4 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -166,8 +166,6 @@ static inline int migrate_misplaced_transhuge_page(struct mm_struct *mm, #define MIGRATE_PFN_MIGRATE (1UL << 1) #define MIGRATE_PFN_LOCKED (1UL << 2) #define MIGRATE_PFN_WRITE (1UL << 3) -#define MIGRATE_PFN_DEVICE (1UL << 4) -#define MIGRATE_PFN_ERROR (1UL << 5) #define MIGRATE_PFN_SHIFT 6 static inline struct page *migrate_pfn_to_page(unsigned long mpfn) @@ -182,107 +180,27 @@ static inline unsigned long migrate_pfn(unsigned long pfn) return (pfn << MIGRATE_PFN_SHIFT) | MIGRATE_PFN_VALID; } -/* - * struct migrate_vma_ops - migrate operation callback - * - * @alloc_and_copy: alloc destination memory and copy source memory to it - * @finalize_and_map: allow caller to map the successfully migrated pages - * - * - * The alloc_and_copy() callback happens once all source pages have been locked, - * unmapped and checked (checked whether pinned or not). All pages that can be - * migrated will have an entry in the src array set with the pfn value of the - * page and with the MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE flag set (other - * flags might be set but should be ignored by the callback). - * - * The alloc_and_copy() callback can then allocate destination memory and copy - * source memory to it for all those entries (ie with MIGRATE_PFN_VALID and - * MIGRATE_PFN_MIGRATE flag set). Once these are allocated and copied, the - * callback must update each corresponding entry in the dst array with the pfn - * value of the destination page and with the MIGRATE_PFN_VALID and - * MIGRATE_PFN_LOCKED flags set (destination pages must have their struct pages - * locked, via lock_page()). - * - * At this point the alloc_and_copy() callback is done and returns. - * - * Note that the callback does not have to migrate all the pages that are - * marked with MIGRATE_PFN_MIGRATE flag in src array unless this is a migration - * from device memory to system memory (ie the MIGRATE_PFN_DEVICE flag is also - * set in the src array entry). If the device driver cannot migrate a device - * page back to system memory, then it must set the corresponding dst array - * entry to MIGRATE_PFN_ERROR. This will trigger a SIGBUS if CPU tries to - * access any of the virtual addresses originally backed by this page. Because - * a SIGBUS is such a severe result for the userspace process, the device - * driver should avoid setting MIGRATE_PFN_ERROR unless it is really in an - * unrecoverable state. - * - * For empty entry inside CPU page table (pte_none() or pmd_none() is true) we - * do set MIGRATE_PFN_MIGRATE flag inside the corresponding source array thus - * allowing device driver to allocate device memory for those unback virtual - * address. For this the device driver simply have to allocate device memory - * and properly set the destination entry like for regular migration. Note that - * this can still fails and thus inside the device driver must check if the - * migration was successful for those entry inside the finalize_and_map() - * callback just like for regular migration. - * - * THE alloc_and_copy() CALLBACK MUST NOT CHANGE ANY OF THE SRC ARRAY ENTRIES - * OR BAD THINGS WILL HAPPEN ! - * - * - * The finalize_and_map() callback happens after struct page migration from - * source to destination (destination struct pages are the struct pages for the - * memory allocated by the alloc_and_copy() callback). Migration can fail, and - * thus the finalize_and_map() allows the driver to inspect which pages were - * successfully migrated, and which were not. Successfully migrated pages will - * have the MIGRATE_PFN_MIGRATE flag set for their src array entry. - * - * It is safe to update device page table from within the finalize_and_map() - * callback because both destination and source page are still locked, and the - * mmap_sem is held in read mode (hence no one can unmap the range being - * migrated). - * - * Once callback is done cleaning up things and updating its page table (if it - * chose to do so, this is not an obligation) then it returns. At this point, - * the HMM core will finish up the final steps, and the migration is complete. - * - * THE finalize_and_map() CALLBACK MUST NOT CHANGE ANY OF THE SRC OR DST ARRAY - * ENTRIES OR BAD THINGS WILL HAPPEN ! - */ -struct migrate_vma_ops { - void (*alloc_and_copy)(struct vm_area_struct *vma, - const unsigned long *src, - unsigned long *dst, - unsigned long start, - unsigned long end, - void *private); - void (*finalize_and_map)(struct vm_area_struct *vma, - const unsigned long *src, - const unsigned long *dst, - unsigned long start, - unsigned long end, - void *private); +struct migrate_vma { + struct vm_area_struct *vma; + /* + * Both src and dst array must be big enough for + * (end - start) >> PAGE_SHIFT entries. + * + * The src array must not be modified by the caller after + * migrate_vma_setup(), and must not change the dst array after + * migrate_vma_pages() returns. + */ + unsigned long *dst; + unsigned long *src; + unsigned long cpages; + unsigned long npages; + unsigned long start; + unsigned long end; }; -#if defined(CONFIG_MIGRATE_VMA_HELPER) -int migrate_vma(const struct migrate_vma_ops *ops, - struct vm_area_struct *vma, - unsigned long start, - unsigned long end, - unsigned long *src, - unsigned long *dst, - void *private); -#else -static inline int migrate_vma(const struct migrate_vma_ops *ops, - struct vm_area_struct *vma, - unsigned long start, - unsigned long end, - unsigned long *src, - unsigned long *dst, - void *private) -{ - return -EINVAL; -} -#endif /* IS_ENABLED(CONFIG_MIGRATE_VMA_HELPER) */ +int migrate_vma_setup(struct migrate_vma *args); +void migrate_vma_pages(struct migrate_vma *migrate); +void migrate_vma_finalize(struct migrate_vma *migrate); #endif /* CONFIG_MIGRATION */ diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index c2f056b5766d..cc1c230f10ee 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -328,6 +328,7 @@ enum mlx5_event { MLX5_EVENT_TYPE_GPIO_EVENT = 0x15, MLX5_EVENT_TYPE_PORT_MODULE_EVENT = 0x16, MLX5_EVENT_TYPE_TEMP_WARN_EVENT = 0x17, + MLX5_EVENT_TYPE_XRQ_ERROR = 0x18, MLX5_EVENT_TYPE_REMOTE_CONFIG = 0x19, MLX5_EVENT_TYPE_GENERAL_EVENT = 0x22, MLX5_EVENT_TYPE_MONITOR_COUNTER = 0x24, @@ -345,6 +346,7 @@ enum mlx5_event { MLX5_EVENT_TYPE_ESW_FUNCTIONS_CHANGED = 0xe, MLX5_EVENT_TYPE_DCT_DRAINED = 0x1c, + MLX5_EVENT_TYPE_DCT_KEY_VIOLATION = 0x1d, MLX5_EVENT_TYPE_FPGA_ERROR = 0x20, MLX5_EVENT_TYPE_FPGA_QP_ERROR = 0x21, @@ -584,6 +586,12 @@ struct mlx5_eqe_cq_err { u8 syndrome; }; +struct mlx5_eqe_xrq_err { + __be32 reserved1[5]; + __be32 type_xrqn; + __be32 reserved2; +}; + struct mlx5_eqe_port_state { u8 reserved0[8]; u8 port; @@ -698,6 +706,7 @@ union ev_data { struct mlx5_eqe_pps pps; struct mlx5_eqe_dct dct; struct mlx5_eqe_temp_warning temp_warning; + struct mlx5_eqe_xrq_err xrq_err; } __packed; struct mlx5_eqe { @@ -1162,6 +1171,9 @@ enum mlx5_qcam_feature_groups { #define MLX5_CAP_FLOWTABLE(mdev, cap) \ MLX5_GET(flow_table_nic_cap, mdev->caps.hca_cur[MLX5_CAP_FLOW_TABLE], cap) +#define MLX5_CAP64_FLOWTABLE(mdev, cap) \ + MLX5_GET64(flow_table_nic_cap, (mdev)->caps.hca_cur[MLX5_CAP_FLOW_TABLE], cap) + #define MLX5_CAP_FLOWTABLE_MAX(mdev, cap) \ MLX5_GET(flow_table_nic_cap, mdev->caps.hca_max[MLX5_CAP_FLOW_TABLE], cap) @@ -1225,6 +1237,10 @@ enum mlx5_qcam_feature_groups { MLX5_GET(e_switch_cap, \ mdev->caps.hca_cur[MLX5_CAP_ESWITCH], cap) +#define MLX5_CAP64_ESW_FLOWTABLE(mdev, cap) \ + MLX5_GET64(flow_table_eswitch_cap, \ + (mdev)->caps.hca_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap) + #define MLX5_CAP_ESW_MAX(mdev, cap) \ MLX5_GET(e_switch_cap, \ mdev->caps.hca_max[MLX5_CAP_ESWITCH], cap) @@ -1309,6 +1325,7 @@ enum { MLX5_PER_PRIORITY_COUNTERS_GROUP = 0x10, MLX5_PER_TRAFFIC_CLASS_COUNTERS_GROUP = 0x11, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP = 0x12, + MLX5_PER_TRAFFIC_CLASS_CONGESTION_GROUP = 0x13, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP = 0x16, MLX5_INFINIBAND_PORT_COUNTERS_GROUP = 0x20, }; diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 0e6da1840c7d..3e80f03a387f 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -47,6 +47,7 @@ #include <linux/interrupt.h> #include <linux/idr.h> #include <linux/notifier.h> +#include <linux/refcount.h> #include <linux/mlx5/device.h> #include <linux/mlx5/doorbell.h> @@ -189,7 +190,6 @@ enum mlx5_coredev_type { }; struct mlx5_field_desc { - struct dentry *dent; int i; }; @@ -242,11 +242,6 @@ struct mlx5_cmd_msg { struct mlx5_cmd_debug { struct dentry *dbg_root; - struct dentry *dbg_in; - struct dentry *dbg_out; - struct dentry *dbg_outlen; - struct dentry *dbg_status; - struct dentry *dbg_run; void *in_msg; void *out_msg; u8 status; @@ -271,8 +266,6 @@ struct mlx5_cmd_stats { u64 sum; u64 n; struct dentry *root; - struct dentry *avg; - struct dentry *count; /* protect command average calculations */ spinlock_t lock; }; @@ -398,7 +391,7 @@ enum mlx5_res_type { struct mlx5_core_rsc_common { enum mlx5_res_type res; - atomic_t refcount; + refcount_t refcount; struct completion free; }; @@ -477,6 +470,17 @@ struct mlx5_core_sriov { u16 max_vfs; }; +struct mlx5_fc_pool { + struct mlx5_core_dev *dev; + struct mutex pool_lock; /* protects pool lists */ + struct list_head fully_used; + struct list_head partially_used; + struct list_head unused; + int available_fcs; + int used_fcs; + int threshold; +}; + struct mlx5_fc_stats { spinlock_t counters_idr_lock; /* protects counters_idr */ struct idr counters_idr; @@ -488,6 +492,8 @@ struct mlx5_fc_stats { struct delayed_work work; unsigned long next_query; unsigned long sampling_interval; /* jiffies */ + u32 *bulk_query_out; + struct mlx5_fc_pool fc_pool; }; struct mlx5_events; @@ -620,6 +626,11 @@ struct mlx5e_resources { struct mlx5_sq_bfreg bfreg; }; +enum mlx5_sw_icm_type { + MLX5_SW_ICM_TYPE_STEERING, + MLX5_SW_ICM_TYPE_HEADER_MODIFY, +}; + #define MLX5_MAX_RESERVED_GIDS 8 struct mlx5_rsvd_gids { @@ -651,9 +662,14 @@ struct mlx5_clock { struct mlx5_pps pps_info; }; +struct mlx5_dm; struct mlx5_fw_tracer; struct mlx5_vxlan; struct mlx5_geneve; +struct mlx5_hv_vhca; + +#define MLX5_LOG_SW_ICM_BLOCK_SIZE(dev) (MLX5_CAP_DEV_MEM(dev, log_sw_icm_alloc_granularity)) +#define MLX5_SW_ICM_BLOCK_SIZE(dev) (1 << MLX5_LOG_SW_ICM_BLOCK_SIZE(dev)) struct mlx5_core_dev { struct device *device; @@ -688,6 +704,7 @@ struct mlx5_core_dev { atomic_t num_qps; u32 issi; struct mlx5e_resources mlx5e_res; + struct mlx5_dm *dm; struct mlx5_vxlan *vxlan; struct mlx5_geneve *geneve; struct { @@ -701,6 +718,7 @@ struct mlx5_core_dev { struct mlx5_ib_clock_info *clock_info; struct mlx5_fw_tracer *tracer; u32 vsc_addr; + struct mlx5_hv_vhca *hv_vhca; }; struct mlx5_db { @@ -959,7 +977,7 @@ int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn); int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn); -int mlx5_qp_debugfs_init(struct mlx5_core_dev *dev); +void mlx5_qp_debugfs_init(struct mlx5_core_dev *dev); void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev); int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in, int size_in, void *data_out, int size_out, @@ -971,7 +989,7 @@ int mlx5_db_alloc_node(struct mlx5_core_dev *dev, struct mlx5_db *db, void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db); const char *mlx5_command_str(int command); -int mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev); +void mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev); void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev); int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn, int npsvs, u32 *sig_index); @@ -1070,6 +1088,10 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev, size_t *offsets); struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev); void mlx5_put_uars_page(struct mlx5_core_dev *mdev, struct mlx5_uars_page *up); +int mlx5_dm_sw_icm_alloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type, + u64 length, u16 uid, phys_addr_t *addr, u32 *obj_id); +int mlx5_dm_sw_icm_dealloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type, + u64 length, u16 uid, phys_addr_t addr, u32 obj_id); #ifdef CONFIG_MLX5_CORE_IPOIB struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev, diff --git a/include/linux/mlx5/eswitch.h b/include/linux/mlx5/eswitch.h index 46b5ba029802..98e667b176ef 100644 --- a/include/linux/mlx5/eswitch.h +++ b/include/linux/mlx5/eswitch.h @@ -44,7 +44,6 @@ struct mlx5_eswitch_rep_data { struct mlx5_eswitch_rep { struct mlx5_eswitch_rep_data rep_data[NUM_REP_TYPES]; u16 vport; - u8 hw_id[ETH_ALEN]; u16 vlan; /* Only IB rep is using vport_index */ u16 vport_index; @@ -61,7 +60,6 @@ void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw, u16 vport_num); void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch *esw, u8 rep_type); -u8 mlx5_eswitch_mode(struct mlx5_eswitch *esw); struct mlx5_flow_handle * mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, u16 vport_num, u32 sqn); @@ -75,7 +73,14 @@ mlx5_eswitch_get_encap_mode(const struct mlx5_core_dev *dev); bool mlx5_eswitch_vport_match_metadata_enabled(const struct mlx5_eswitch *esw); u32 mlx5_eswitch_get_vport_metadata_for_match(const struct mlx5_eswitch *esw, u16 vport_num); +u8 mlx5_eswitch_mode(struct mlx5_eswitch *esw); #else /* CONFIG_MLX5_ESWITCH */ + +static inline u8 mlx5_eswitch_mode(struct mlx5_eswitch *esw) +{ + return MLX5_ESWITCH_NONE; +} + static inline enum devlink_eswitch_encap_mode mlx5_eswitch_get_encap_mode(const struct mlx5_core_dev *dev) { diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h index f049af3f3cd8..724d276ea133 100644 --- a/include/linux/mlx5/fs.h +++ b/include/linux/mlx5/fs.h @@ -75,6 +75,7 @@ enum mlx5_flow_namespace_type { MLX5_FLOW_NAMESPACE_SNIFFER_TX, MLX5_FLOW_NAMESPACE_EGRESS, MLX5_FLOW_NAMESPACE_RDMA_RX, + MLX5_FLOW_NAMESPACE_RDMA_RX_KERNEL, }; enum { @@ -83,6 +84,8 @@ enum { FDB_SLOW_PATH, }; +struct mlx5_pkt_reformat; +struct mlx5_modify_hdr; struct mlx5_flow_table; struct mlx5_flow_group; struct mlx5_flow_namespace; @@ -120,12 +123,17 @@ struct mlx5_flow_destination { struct { u16 num; u16 vhca_id; - u32 reformat_id; + struct mlx5_pkt_reformat *pkt_reformat; u8 flags; } vport; }; }; +struct mod_hdr_tbl { + struct mutex lock; /* protects hlist */ + DECLARE_HASHTABLE(hlist, 8); +}; + struct mlx5_flow_namespace * mlx5_get_fdb_sub_ns(struct mlx5_core_dev *dev, int n); struct mlx5_flow_namespace * @@ -189,8 +197,8 @@ enum { struct mlx5_flow_act { u32 action; - u32 reformat_id; - u32 modify_id; + struct mlx5_modify_hdr *modify_hdr; + struct mlx5_pkt_reformat *pkt_reformat; uintptr_t esp_id; u32 flags; struct mlx5_fs_vlan vlan[MLX5_FS_VLAN_DEPTH]; @@ -199,8 +207,6 @@ struct mlx5_flow_act { #define MLX5_DECLARE_FLOW_ACT(name) \ struct mlx5_flow_act name = { .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,\ - .reformat_id = 0, \ - .modify_id = 0, \ .flags = 0, } /* Single destination per rule. @@ -230,19 +236,18 @@ u32 mlx5_fc_id(struct mlx5_fc *counter); int mlx5_fs_add_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn); int mlx5_fs_remove_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn); -int mlx5_modify_header_alloc(struct mlx5_core_dev *dev, - u8 namespace, u8 num_actions, - void *modify_actions, u32 *modify_header_id); +struct mlx5_modify_hdr *mlx5_modify_header_alloc(struct mlx5_core_dev *dev, + u8 ns_type, u8 num_actions, + void *modify_actions); void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev, - u32 modify_header_id); - -int mlx5_packet_reformat_alloc(struct mlx5_core_dev *dev, - int reformat_type, - size_t size, - void *reformat_data, - enum mlx5_flow_namespace_type namespace, - u32 *packet_reformat_id); + struct mlx5_modify_hdr *modify_hdr); + +struct mlx5_pkt_reformat *mlx5_packet_reformat_alloc(struct mlx5_core_dev *dev, + int reformat_type, + size_t size, + void *reformat_data, + enum mlx5_flow_namespace_type ns_type); void mlx5_packet_reformat_dealloc(struct mlx5_core_dev *dev, - u32 packet_reformat_id); + struct mlx5_pkt_reformat *reformat); #endif diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index b8b570c30b5e..a487b681b516 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -172,6 +172,8 @@ enum { MLX5_CMD_OP_QUERY_XRQ_DC_PARAMS_ENTRY = 0x725, MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY = 0x726, MLX5_CMD_OP_QUERY_XRQ_ERROR_PARAMS = 0x727, + MLX5_CMD_OP_RELEASE_XRQ_ERROR = 0x729, + MLX5_CMD_OP_MODIFY_XRQ = 0x72a, MLX5_CMD_OP_QUERY_ESW_FUNCTIONS = 0x740, MLX5_CMD_OP_QUERY_VPORT_STATE = 0x750, MLX5_CMD_OP_MODIFY_VPORT_STATE = 0x751, @@ -280,6 +282,7 @@ enum { MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT = 0x940, MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT = 0x941, MLX5_CMD_OP_QUERY_MODIFY_HEADER_CONTEXT = 0x942, + MLX5_CMD_OP_SYNC_STEERING = 0xb00, MLX5_CMD_OP_FPGA_CREATE_QP = 0x960, MLX5_CMD_OP_FPGA_MODIFY_QP = 0x961, MLX5_CMD_OP_FPGA_QUERY_QP = 0x962, @@ -483,7 +486,11 @@ union mlx5_ifc_gre_key_bits { }; struct mlx5_ifc_fte_match_set_misc_bits { - u8 reserved_at_0[0x8]; + u8 gre_c_present[0x1]; + u8 reserved_auto1[0x1]; + u8 gre_k_present[0x1]; + u8 gre_s_present[0x1]; + u8 source_vhca_port[0x4]; u8 source_sqn[0x18]; u8 source_eswitch_owner_vhca_id[0x10]; @@ -563,12 +570,38 @@ struct mlx5_ifc_fte_match_set_misc2_bits { u8 metadata_reg_a[0x20]; - u8 reserved_at_1a0[0x60]; + u8 metadata_reg_b[0x20]; + + u8 reserved_at_1c0[0x40]; }; struct mlx5_ifc_fte_match_set_misc3_bits { - u8 reserved_at_0[0x120]; + u8 inner_tcp_seq_num[0x20]; + + u8 outer_tcp_seq_num[0x20]; + + u8 inner_tcp_ack_num[0x20]; + + u8 outer_tcp_ack_num[0x20]; + + u8 reserved_at_80[0x8]; + u8 outer_vxlan_gpe_vni[0x18]; + + u8 outer_vxlan_gpe_next_protocol[0x8]; + u8 outer_vxlan_gpe_flags[0x8]; + u8 reserved_at_b0[0x10]; + + u8 icmp_header_data[0x20]; + + u8 icmpv6_header_data[0x20]; + + u8 icmp_type[0x8]; + u8 icmp_code[0x8]; + u8 icmpv6_type[0x8]; + u8 icmpv6_code[0x8]; + u8 geneve_tlv_option_0_data[0x20]; + u8 reserved_at_140[0xc0]; }; @@ -664,7 +697,15 @@ struct mlx5_ifc_flow_table_nic_cap_bits { struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_transmit_sniffer; - u8 reserved_at_e00[0x7200]; + u8 reserved_at_e00[0x1200]; + + u8 sw_steering_nic_rx_action_drop_icm_address[0x40]; + + u8 sw_steering_nic_tx_action_drop_icm_address[0x40]; + + u8 sw_steering_nic_tx_action_allow_icm_address[0x40]; + + u8 reserved_at_20c0[0x5f40]; }; enum { @@ -696,7 +737,17 @@ struct mlx5_ifc_flow_table_eswitch_cap_bits { struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_esw_acl_egress; - u8 reserved_at_800[0x7800]; + u8 reserved_at_800[0x1000]; + + u8 sw_steering_fdb_action_drop_icm_address_rx[0x40]; + + u8 sw_steering_fdb_action_drop_icm_address_tx[0x40]; + + u8 sw_steering_uplink_icm_address_rx[0x40]; + + u8 sw_steering_uplink_icm_address_tx[0x40]; + + u8 reserved_at_1900[0x6700]; }; enum { @@ -806,7 +857,9 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits { u8 swp_csum[0x1]; u8 swp_lso[0x1]; u8 cqe_checksum_full[0x1]; - u8 reserved_at_24[0xc]; + u8 reserved_at_24[0x5]; + u8 tunnel_stateless_ip_over_ip[0x1]; + u8 reserved_at_2a[0x6]; u8 max_vxlan_udp_ports[0x8]; u8 reserved_at_38[0x6]; u8 max_geneve_opt_len[0x1]; @@ -845,6 +898,25 @@ struct mlx5_ifc_roce_cap_bits { u8 reserved_at_100[0x700]; }; +struct mlx5_ifc_sync_steering_in_bits { + u8 opcode[0x10]; + u8 uid[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0xc0]; +}; + +struct mlx5_ifc_sync_steering_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + struct mlx5_ifc_device_mem_cap_bits { u8 memic[0x1]; u8 reserved_at_1[0x1f]; @@ -944,7 +1016,9 @@ struct mlx5_ifc_odp_cap_bits { struct mlx5_ifc_odp_per_transport_service_cap_bits xrc_odp_caps; - u8 reserved_at_100[0x700]; + struct mlx5_ifc_odp_per_transport_service_cap_bits dc_odp_caps; + + u8 reserved_at_120[0x6E0]; }; struct mlx5_ifc_calc_op { @@ -1036,10 +1110,31 @@ enum { }; enum { + MLX5_FLEX_PARSER_VXLAN_GPE_ENABLED = 1 << 7, + MLX5_FLEX_PARSER_ICMP_V4_ENABLED = 1 << 8, + MLX5_FLEX_PARSER_ICMP_V6_ENABLED = 1 << 9, +}; + +enum { MLX5_UCTX_CAP_RAW_TX = 1UL << 0, MLX5_UCTX_CAP_INTERNAL_DEV_RES = 1UL << 1, }; +#define MLX5_FC_BULK_SIZE_FACTOR 128 + +enum mlx5_fc_bulk_alloc_bitmask { + MLX5_FC_BULK_128 = (1 << 0), + MLX5_FC_BULK_256 = (1 << 1), + MLX5_FC_BULK_512 = (1 << 2), + MLX5_FC_BULK_1024 = (1 << 3), + MLX5_FC_BULK_2048 = (1 << 4), + MLX5_FC_BULK_4096 = (1 << 5), + MLX5_FC_BULK_8192 = (1 << 6), + MLX5_FC_BULK_16384 = (1 << 7), +}; + +#define MLX5_FC_BULK_NUM_FCS(fc_enum) (MLX5_FC_BULK_SIZE_FACTOR * (fc_enum)) + struct mlx5_ifc_cmd_hca_cap_bits { u8 reserved_at_0[0x30]; u8 vhca_id[0x10]; @@ -1099,7 +1194,10 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 cache_line_128byte[0x1]; u8 reserved_at_165[0x4]; u8 rts2rts_qp_counters_set_id[0x1]; - u8 reserved_at_16a[0x5]; + u8 reserved_at_16a[0x2]; + u8 vnic_env_int_rq_oob[0x1]; + u8 sbcam_reg[0x1]; + u8 reserved_at_16e[0x1]; u8 qcam_reg[0x1]; u8 gid_table_size[0x10]; @@ -1228,7 +1326,9 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 reserved_at_263[0x8]; u8 log_bf_reg_size[0x5]; - u8 reserved_at_270[0xb]; + u8 reserved_at_270[0x8]; + u8 lag_tx_port_affinity[0x1]; + u8 reserved_at_279[0x2]; u8 lag_master[0x1]; u8 num_lag_ports[0x4]; @@ -1244,7 +1344,8 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 reserved_at_2e0[0x7]; u8 max_qp_mcg[0x19]; - u8 reserved_at_300[0x18]; + u8 reserved_at_300[0x10]; + u8 flow_counter_bulk_alloc[0x8]; u8 log_max_mcg[0x8]; u8 reserved_at_320[0x3]; @@ -1388,7 +1489,14 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 reserved_at_6c0[0x4]; u8 flex_parser_id_geneve_tlv_option_0[0x4]; - u8 reserved_at_6c8[0x28]; + u8 flex_parser_id_icmp_dw1[0x4]; + u8 flex_parser_id_icmp_dw0[0x4]; + u8 flex_parser_id_icmpv6_dw1[0x4]; + u8 flex_parser_id_icmpv6_dw0[0x4]; + u8 flex_parser_id_outer_first_mpls_over_gre[0x4]; + u8 flex_parser_id_outer_first_mpls_over_udp_label[0x4]; + + u8 reserved_at_6e0[0x10]; u8 sf_base_id[0x10]; u8 reserved_at_700[0x80]; @@ -1853,12 +1961,28 @@ struct mlx5_ifc_ib_port_cntrs_grp_data_layout_bits { u8 port_xmit_wait[0x20]; }; -struct mlx5_ifc_eth_per_traffic_grp_data_layout_bits { +struct mlx5_ifc_eth_per_tc_prio_grp_data_layout_bits { u8 transmit_queue_high[0x20]; u8 transmit_queue_low[0x20]; - u8 reserved_at_40[0x780]; + u8 no_buffer_discard_uc_high[0x20]; + + u8 no_buffer_discard_uc_low[0x20]; + + u8 reserved_at_80[0x740]; +}; + +struct mlx5_ifc_eth_per_tc_congest_prio_grp_data_layout_bits { + u8 wred_discard_high[0x20]; + + u8 wred_discard_low[0x20]; + + u8 ecn_marked_tc_high[0x20]; + + u8 ecn_marked_tc_low[0x20]; + + u8 reserved_at_80[0x740]; }; struct mlx5_ifc_eth_per_prio_grp_data_layout_bits { @@ -2626,6 +2750,7 @@ union mlx5_ifc_hca_cap_union_bits { struct mlx5_ifc_debug_cap_bits debug_cap; struct mlx5_ifc_fpga_cap_bits fpga_cap; struct mlx5_ifc_tls_cap_bits tls_cap; + struct mlx5_ifc_device_mem_cap_bits device_mem_cap; u8 reserved_at_0[0x8000]; }; @@ -2754,7 +2879,11 @@ struct mlx5_ifc_vnic_diagnostic_statistics_bits { u8 transmit_discard_vport_down[0x40]; - u8 reserved_at_140[0xec0]; + u8 reserved_at_140[0xa0]; + + u8 internal_rq_out_of_buffer[0x20]; + + u8 reserved_at_200[0xe00]; }; struct mlx5_ifc_traffic_counter_bits { @@ -2766,7 +2895,7 @@ struct mlx5_ifc_traffic_counter_bits { struct mlx5_ifc_tisc_bits { u8 strict_lag_tx_port_affinity[0x1]; u8 tls_en[0x1]; - u8 reserved_at_1[0x2]; + u8 reserved_at_2[0x2]; u8 lag_tx_port_affinity[0x04]; u8 reserved_at_8[0x4]; @@ -2941,6 +3070,13 @@ enum { SCHEDULING_CONTEXT_ELEMENT_TYPE_PARA_VPORT_TC = 0x3, }; +enum { + ELEMENT_TYPE_CAP_MASK_TASR = 1 << 0, + ELEMENT_TYPE_CAP_MASK_VPORT = 1 << 1, + ELEMENT_TYPE_CAP_MASK_VPORT_TC = 1 << 2, + ELEMENT_TYPE_CAP_MASK_PARA_VPORT_TC = 1 << 3, +}; + struct mlx5_ifc_scheduling_context_bits { u8 element_type[0x8]; u8 reserved_at_8[0x18]; @@ -3218,7 +3354,11 @@ struct mlx5_ifc_esw_vport_context_bits { u8 cvlan_pcp[0x3]; u8 cvlan_id[0xc]; - u8 reserved_at_60[0x7a0]; + u8 reserved_at_60[0x720]; + + u8 sw_steering_vport_icm_address_rx[0x40]; + + u8 sw_steering_vport_icm_address_tx[0x40]; }; enum { @@ -3519,7 +3659,8 @@ union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits { struct mlx5_ifc_eth_3635_cntrs_grp_data_layout_bits eth_3635_cntrs_grp_data_layout; struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits eth_extended_cntrs_grp_data_layout; struct mlx5_ifc_eth_per_prio_grp_data_layout_bits eth_per_prio_grp_data_layout; - struct mlx5_ifc_eth_per_traffic_grp_data_layout_bits eth_per_traffic_grp_data_layout; + struct mlx5_ifc_eth_per_tc_prio_grp_data_layout_bits eth_per_tc_prio_grp_data_layout; + struct mlx5_ifc_eth_per_tc_congest_prio_grp_data_layout_bits eth_per_tc_congest_prio_grp_data_layout; struct mlx5_ifc_ib_port_cntrs_grp_data_layout_bits ib_port_cntrs_grp_data_layout; struct mlx5_ifc_phys_layer_cntrs_bits phys_layer_cntrs; struct mlx5_ifc_phys_layer_statistical_cntrs_bits phys_layer_statistical_cntrs; @@ -4904,23 +5045,98 @@ struct mlx5_ifc_query_hca_cap_in_bits { u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_at_40[0x40]; + u8 other_function[0x1]; + u8 reserved_at_41[0xf]; + u8 function_id[0x10]; + + u8 reserved_at_60[0x20]; }; -struct mlx5_ifc_query_flow_table_out_bits { +struct mlx5_ifc_other_hca_cap_bits { + u8 roce[0x1]; + u8 reserved_0[0x27f]; +}; + +struct mlx5_ifc_query_other_hca_cap_out_bits { u8 status[0x8]; - u8 reserved_at_8[0x18]; + u8 reserved_0[0x18]; u8 syndrome[0x20]; - u8 reserved_at_40[0x80]; + u8 reserved_1[0x40]; - u8 reserved_at_c0[0x8]; + struct mlx5_ifc_other_hca_cap_bits other_capability; +}; + +struct mlx5_ifc_query_other_hca_cap_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x10]; + u8 function_id[0x10]; + + u8 reserved_3[0x20]; +}; + +struct mlx5_ifc_modify_other_hca_cap_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_modify_other_hca_cap_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x10]; + u8 function_id[0x10]; + u8 field_select[0x20]; + + struct mlx5_ifc_other_hca_cap_bits other_capability; +}; + +struct mlx5_ifc_flow_table_context_bits { + u8 reformat_en[0x1]; + u8 decap_en[0x1]; + u8 sw_owner[0x1]; + u8 termination_table[0x1]; + u8 table_miss_action[0x4]; u8 level[0x8]; - u8 reserved_at_d0[0x8]; + u8 reserved_at_10[0x8]; u8 log_size[0x8]; - u8 reserved_at_e0[0x120]; + u8 reserved_at_20[0x8]; + u8 table_miss_id[0x18]; + + u8 reserved_at_40[0x8]; + u8 lag_master_next_table_id[0x18]; + + u8 reserved_at_60[0x60]; + + u8 sw_owner_icm_root_1[0x40]; + + u8 sw_owner_icm_root_0[0x40]; + +}; + +struct mlx5_ifc_query_flow_table_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x80]; + + struct mlx5_ifc_flow_table_context_bits flow_table_context; }; struct mlx5_ifc_query_flow_table_in_bits { @@ -5190,7 +5406,7 @@ struct mlx5_ifc_alloc_packet_reformat_context_out_bits { u8 reserved_at_60[0x20]; }; -enum { +enum mlx5_reformat_ctx_type { MLX5_REFORMAT_TYPE_L2_TO_VXLAN = 0x0, MLX5_REFORMAT_TYPE_L2_TO_NVGRE = 0x1, MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL = 0x2, @@ -5286,7 +5502,16 @@ enum { MLX5_ACTION_IN_FIELD_OUT_DIPV4 = 0x16, MLX5_ACTION_IN_FIELD_OUT_FIRST_VID = 0x17, MLX5_ACTION_IN_FIELD_OUT_IPV6_HOPLIMIT = 0x47, + MLX5_ACTION_IN_FIELD_METADATA_REG_A = 0x49, + MLX5_ACTION_IN_FIELD_METADATA_REG_B = 0x50, MLX5_ACTION_IN_FIELD_METADATA_REG_C_0 = 0x51, + MLX5_ACTION_IN_FIELD_METADATA_REG_C_1 = 0x52, + MLX5_ACTION_IN_FIELD_METADATA_REG_C_2 = 0x53, + MLX5_ACTION_IN_FIELD_METADATA_REG_C_3 = 0x54, + MLX5_ACTION_IN_FIELD_METADATA_REG_C_4 = 0x55, + MLX5_ACTION_IN_FIELD_METADATA_REG_C_5 = 0x56, + MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM = 0x59, + MLX5_ACTION_IN_FIELD_OUT_TCP_ACK_NUM = 0x5B, }; struct mlx5_ifc_alloc_modify_header_context_out_bits { @@ -7334,35 +7559,26 @@ struct mlx5_ifc_create_mkey_in_bits { u8 klm_pas_mtt[0][0x20]; }; +enum { + MLX5_FLOW_TABLE_TYPE_NIC_RX = 0x0, + MLX5_FLOW_TABLE_TYPE_NIC_TX = 0x1, + MLX5_FLOW_TABLE_TYPE_ESW_EGRESS_ACL = 0x2, + MLX5_FLOW_TABLE_TYPE_ESW_INGRESS_ACL = 0x3, + MLX5_FLOW_TABLE_TYPE_FDB = 0X4, + MLX5_FLOW_TABLE_TYPE_SNIFFER_RX = 0X5, + MLX5_FLOW_TABLE_TYPE_SNIFFER_TX = 0X6, +}; + struct mlx5_ifc_create_flow_table_out_bits { u8 status[0x8]; - u8 reserved_at_8[0x18]; + u8 icm_address_63_40[0x18]; u8 syndrome[0x20]; - u8 reserved_at_40[0x8]; + u8 icm_address_39_32[0x8]; u8 table_id[0x18]; - u8 reserved_at_60[0x20]; -}; - -struct mlx5_ifc_flow_table_context_bits { - u8 reformat_en[0x1]; - u8 decap_en[0x1]; - u8 reserved_at_2[0x1]; - u8 termination_table[0x1]; - u8 table_miss_action[0x4]; - u8 level[0x8]; - u8 reserved_at_10[0x8]; - u8 log_size[0x8]; - - u8 reserved_at_20[0x8]; - u8 table_miss_id[0x18]; - - u8 reserved_at_40[0x8]; - u8 lag_master_next_table_id[0x18]; - - u8 reserved_at_60[0xe0]; + u8 icm_address_31_0[0x20]; }; struct mlx5_ifc_create_flow_table_in_bits { @@ -7817,7 +8033,8 @@ struct mlx5_ifc_alloc_flow_counter_in_bits { u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_at_40[0x40]; + u8 reserved_at_40[0x38]; + u8 flow_counter_bulk[0x8]; }; struct mlx5_ifc_add_vxlan_udp_dport_out_bits { @@ -9223,7 +9440,8 @@ union mlx5_ifc_ports_control_registers_document_bits { struct mlx5_ifc_eth_802_3_cntrs_grp_data_layout_bits eth_802_3_cntrs_grp_data_layout; struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits eth_extended_cntrs_grp_data_layout; struct mlx5_ifc_eth_per_prio_grp_data_layout_bits eth_per_prio_grp_data_layout; - struct mlx5_ifc_eth_per_traffic_grp_data_layout_bits eth_per_traffic_grp_data_layout; + struct mlx5_ifc_eth_per_tc_prio_grp_data_layout_bits eth_per_tc_prio_grp_data_layout; + struct mlx5_ifc_eth_per_tc_congest_prio_grp_data_layout_bits eth_per_tc_congest_prio_grp_data_layout; struct mlx5_ifc_lane_2_module_mapping_bits lane_2_module_mapping; struct mlx5_ifc_pamp_reg_bits pamp_reg; struct mlx5_ifc_paos_reg_bits paos_reg; @@ -9570,8 +9788,6 @@ struct mlx5_ifc_query_lag_out_bits { u8 syndrome[0x20]; - u8 reserved_at_40[0x40]; - struct mlx5_ifc_lagc_bits ctx; }; diff --git a/include/linux/mm.h b/include/linux/mm.h index 0334ca97c584..294a67b94147 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -805,6 +805,24 @@ static inline void set_compound_order(struct page *page, unsigned int order) page[1].compound_order = order; } +/* Returns the number of pages in this potentially compound page. */ +static inline unsigned long compound_nr(struct page *page) +{ + return 1UL << compound_order(page); +} + +/* Returns the number of bytes in this potentially compound page. */ +static inline unsigned long page_size(struct page *page) +{ + return PAGE_SIZE << compound_order(page); +} + +/* Returns the number of bits needed for the number of bytes in a page */ +static inline unsigned int page_shift(struct page *page) +{ + return PAGE_SHIFT + compound_order(page); +} + void free_compound_page(struct page *page); #ifdef CONFIG_MMU @@ -1057,8 +1075,9 @@ static inline void put_user_page(struct page *page) put_page(page); } -void put_user_pages_dirty(struct page **pages, unsigned long npages); -void put_user_pages_dirty_lock(struct page **pages, unsigned long npages); +void put_user_pages_dirty_lock(struct page **pages, unsigned long npages, + bool make_dirty); + void put_user_pages(struct page **pages, unsigned long npages); #if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP) @@ -1405,7 +1424,11 @@ extern void pagefault_out_of_memory(void); extern void show_free_areas(unsigned int flags, nodemask_t *nodemask); +#ifdef CONFIG_MMU extern bool can_do_mlock(void); +#else +static inline bool can_do_mlock(void) { return false; } +#endif extern int user_shm_lock(size_t, struct user_struct *); extern void user_shm_unlock(size_t, struct user_struct *); @@ -1430,54 +1453,8 @@ void zap_page_range(struct vm_area_struct *vma, unsigned long address, void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma, unsigned long start, unsigned long end); -/** - * mm_walk - callbacks for walk_page_range - * @pud_entry: if set, called for each non-empty PUD (2nd-level) entry - * this handler should only handle pud_trans_huge() puds. - * the pmd_entry or pte_entry callbacks will be used for - * regular PUDs. - * @pmd_entry: if set, called for each non-empty PMD (3rd-level) entry - * this handler is required to be able to handle - * pmd_trans_huge() pmds. They may simply choose to - * split_huge_page() instead of handling it explicitly. - * @pte_entry: if set, called for each non-empty PTE (4th-level) entry - * @pte_hole: if set, called for each hole at all levels - * @hugetlb_entry: if set, called for each hugetlb entry - * @test_walk: caller specific callback function to determine whether - * we walk over the current vma or not. Returning 0 - * value means "do page table walk over the current vma," - * and a negative one means "abort current page table walk - * right now." 1 means "skip the current vma." - * @mm: mm_struct representing the target process of page table walk - * @vma: vma currently walked (NULL if walking outside vmas) - * @private: private data for callbacks' usage - * - * (see the comment on walk_page_range() for more details) - */ -struct mm_walk { - int (*pud_entry)(pud_t *pud, unsigned long addr, - unsigned long next, struct mm_walk *walk); - int (*pmd_entry)(pmd_t *pmd, unsigned long addr, - unsigned long next, struct mm_walk *walk); - int (*pte_entry)(pte_t *pte, unsigned long addr, - unsigned long next, struct mm_walk *walk); - int (*pte_hole)(unsigned long addr, unsigned long next, - struct mm_walk *walk); - int (*hugetlb_entry)(pte_t *pte, unsigned long hmask, - unsigned long addr, unsigned long next, - struct mm_walk *walk); - int (*test_walk)(unsigned long addr, unsigned long next, - struct mm_walk *walk); - struct mm_struct *mm; - struct vm_area_struct *vma; - void *private; -}; - struct mmu_notifier_range; -int walk_page_range(unsigned long addr, unsigned long end, - struct mm_walk *walk); -int walk_page_vma(struct vm_area_struct *vma, struct mm_walk *walk); void free_pgd_range(struct mmu_gather *tlb, unsigned long addr, unsigned long end, unsigned long floor, unsigned long ceiling); int copy_page_range(struct mm_struct *dst, struct mm_struct *src, @@ -2351,6 +2328,8 @@ extern int install_special_mapping(struct mm_struct *mm, unsigned long addr, unsigned long len, unsigned long flags, struct page **pages); +unsigned long randomize_stack_top(unsigned long stack_top); + extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); extern unsigned long mmap_region(struct file *file, unsigned long addr, @@ -2614,6 +2593,7 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address, #define FOLL_COW 0x4000 /* internal GUP flag */ #define FOLL_ANON 0x8000 /* don't do file mappings */ #define FOLL_LONGTERM 0x10000 /* mapping lifetime is indefinite: see below */ +#define FOLL_SPLIT_PMD 0x20000 /* split huge pmd before returning */ /* * NOTE on FOLL_LONGTERM: @@ -2891,5 +2871,12 @@ void __init setup_nr_node_ids(void); static inline void setup_nr_node_ids(void) {} #endif +extern int memcmp_pages(struct page *page1, struct page *page2); + +static inline int pages_identical(struct page *page1, struct page *page2) +{ + return !memcmp_pages(page1, page2); +} + #endif /* __KERNEL__ */ #endif /* _LINUX_MM_H */ diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 6a7a1083b6fb..5183e0d77dfa 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -25,7 +25,6 @@ struct address_space; struct mem_cgroup; -struct hmm; /* * Each physical page in the system has a struct page associated with @@ -139,6 +138,7 @@ struct page { struct { /* Second tail page of compound page */ unsigned long _compound_pad_1; /* compound_head */ unsigned long _compound_pad_2; + /* For both global and memcg */ struct list_head deferred_list; }; struct { /* Page table pages */ @@ -511,11 +511,6 @@ struct mm_struct { atomic_long_t hugetlb_usage; #endif struct work_struct async_put_work; - -#ifdef CONFIG_HMM_MIRROR - /* HMM needs to track a few things per mm */ - struct hmm *hmm; -#endif } __randomize_layout; /* diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index 4a351cb7f20f..ba703384bea0 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h @@ -128,6 +128,7 @@ struct mmc_host_ops { int (*get_cd)(struct mmc_host *host); void (*enable_sdio_irq)(struct mmc_host *host, int enable); + /* Mandatory callback when using MMC_CAP2_SDIO_IRQ_NOTHREAD. */ void (*ack_sdio_irq)(struct mmc_host *host); /* optional callback for HC quirks */ @@ -367,6 +368,7 @@ struct mmc_host { #define MMC_CAP2_CQE (1 << 23) /* Has eMMC command queue engine */ #define MMC_CAP2_CQE_DCMD (1 << 24) /* CQE can issue a direct command */ #define MMC_CAP2_AVOID_3_3V (1 << 25) /* Host must negotiate down from 3.3V */ +#define MMC_CAP2_MERGE_CAPABLE (1 << 26) /* Host can merge a segment over the segment size */ int fixed_drv_type; /* fixed driver type for non-removable media */ @@ -396,6 +398,7 @@ struct mmc_host { unsigned int retune_paused:1; /* re-tuning is temporarily disabled */ unsigned int use_blk_mq:1; /* use blk-mq */ unsigned int retune_crc_disable:1; /* don't trigger retune upon crc */ + unsigned int can_dma_map_merge:1; /* merging can be used */ int rescan_disable; /* disable card detection */ int rescan_entered; /* used with nonremovable devices */ @@ -493,6 +496,15 @@ void mmc_command_done(struct mmc_host *host, struct mmc_request *mrq); void mmc_cqe_request_done(struct mmc_host *host, struct mmc_request *mrq); +/* + * May be called from host driver's system/runtime suspend/resume callbacks, + * to know if SDIO IRQs has been claimed. + */ +static inline bool sdio_irq_claimed(struct mmc_host *host) +{ + return host->sdio_irqs > 0; +} + static inline void mmc_signal_sdio_irq(struct mmc_host *host) { host->ops->enable_sdio_irq(host, 0); diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h index b6c004bd9f6a..1bd8e6a09a3c 100644 --- a/include/linux/mmu_notifier.h +++ b/include/linux/mmu_notifier.h @@ -42,6 +42,10 @@ enum mmu_notifier_event { #ifdef CONFIG_MMU_NOTIFIER +#ifdef CONFIG_LOCKDEP +extern struct lockdep_map __mmu_notifier_invalidate_range_start_map; +#endif + /* * The mmu notifier_mm structure is allocated and installed in * mm->mmu_notifier_mm inside the mm_take_all_locks() protected @@ -211,6 +215,19 @@ struct mmu_notifier_ops { */ void (*invalidate_range)(struct mmu_notifier *mn, struct mm_struct *mm, unsigned long start, unsigned long end); + + /* + * These callbacks are used with the get/put interface to manage the + * lifetime of the mmu_notifier memory. alloc_notifier() returns a new + * notifier for use with the mm. + * + * free_notifier() is only called after the mmu_notifier has been + * fully put, calls to any ops callback are prevented and no ops + * callbacks are currently running. It is called from a SRCU callback + * and cannot sleep. + */ + struct mmu_notifier *(*alloc_notifier)(struct mm_struct *mm); + void (*free_notifier)(struct mmu_notifier *mn); }; /* @@ -227,6 +244,9 @@ struct mmu_notifier_ops { struct mmu_notifier { struct hlist_node hlist; const struct mmu_notifier_ops *ops; + struct mm_struct *mm; + struct rcu_head rcu; + unsigned int users; }; static inline int mm_has_notifiers(struct mm_struct *mm) @@ -234,14 +254,27 @@ static inline int mm_has_notifiers(struct mm_struct *mm) return unlikely(mm->mmu_notifier_mm); } +struct mmu_notifier *mmu_notifier_get_locked(const struct mmu_notifier_ops *ops, + struct mm_struct *mm); +static inline struct mmu_notifier * +mmu_notifier_get(const struct mmu_notifier_ops *ops, struct mm_struct *mm) +{ + struct mmu_notifier *ret; + + down_write(&mm->mmap_sem); + ret = mmu_notifier_get_locked(ops, mm); + up_write(&mm->mmap_sem); + return ret; +} +void mmu_notifier_put(struct mmu_notifier *mn); +void mmu_notifier_synchronize(void); + extern int mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm); extern int __mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm); extern void mmu_notifier_unregister(struct mmu_notifier *mn, struct mm_struct *mm); -extern void mmu_notifier_unregister_no_release(struct mmu_notifier *mn, - struct mm_struct *mm); extern void __mmu_notifier_mm_destroy(struct mm_struct *mm); extern void __mmu_notifier_release(struct mm_struct *mm); extern int __mmu_notifier_clear_flush_young(struct mm_struct *mm, @@ -310,25 +343,36 @@ static inline void mmu_notifier_change_pte(struct mm_struct *mm, static inline void mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range) { + might_sleep(); + + lock_map_acquire(&__mmu_notifier_invalidate_range_start_map); if (mm_has_notifiers(range->mm)) { range->flags |= MMU_NOTIFIER_RANGE_BLOCKABLE; __mmu_notifier_invalidate_range_start(range); } + lock_map_release(&__mmu_notifier_invalidate_range_start_map); } static inline int mmu_notifier_invalidate_range_start_nonblock(struct mmu_notifier_range *range) { + int ret = 0; + + lock_map_acquire(&__mmu_notifier_invalidate_range_start_map); if (mm_has_notifiers(range->mm)) { range->flags &= ~MMU_NOTIFIER_RANGE_BLOCKABLE; - return __mmu_notifier_invalidate_range_start(range); + ret = __mmu_notifier_invalidate_range_start(range); } - return 0; + lock_map_release(&__mmu_notifier_invalidate_range_start_map); + return ret; } static inline void mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range) { + if (mmu_notifier_range_blockable(range)) + might_sleep(); + if (mm_has_notifiers(range->mm)) __mmu_notifier_invalidate_range_end(range, false); } @@ -482,9 +526,6 @@ static inline void mmu_notifier_range_init(struct mmu_notifier_range *range, set_pte_at(___mm, ___address, __ptep, ___pte); \ }) -extern void mmu_notifier_call_srcu(struct rcu_head *rcu, - void (*func)(struct rcu_head *rcu)); - #else /* CONFIG_MMU_NOTIFIER */ struct mmu_notifier_range { @@ -581,6 +622,10 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm) #define pudp_huge_clear_flush_notify pudp_huge_clear_flush #define set_pte_at_notify set_pte_at +static inline void mmu_notifier_synchronize(void) +{ +} + #endif /* CONFIG_MMU_NOTIFIER */ #endif /* _LINUX_MMU_NOTIFIER_H */ diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 3f38c30d2f13..bda20282746b 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -235,6 +235,8 @@ enum node_stat_item { NR_SHMEM, /* shmem pages (included tmpfs/GEM pages) */ NR_SHMEM_THPS, NR_SHMEM_PMDMAPPED, + NR_FILE_THPS, + NR_FILE_PMDMAPPED, NR_ANON_THPS, NR_UNSTABLE_NFS, /* NFS unstable pages */ NR_VMSCAN_WRITE, @@ -677,6 +679,14 @@ struct zonelist { extern struct page *mem_map; #endif +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +struct deferred_split { + spinlock_t split_queue_lock; + struct list_head split_queue; + unsigned long split_queue_len; +}; +#endif + /* * On NUMA machines, each NUMA node would have a pg_data_t to describe * it's memory layout. On UMA machines there is a single pglist_data which @@ -756,9 +766,7 @@ typedef struct pglist_data { #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ #ifdef CONFIG_TRANSPARENT_HUGEPAGE - spinlock_t split_queue_lock; - struct list_head split_queue; - unsigned long split_queue_len; + struct deferred_split deferred_split_queue; #endif /* Fields commonly accessed by the page reclaim scanner */ diff --git a/include/linux/module.h b/include/linux/module.h index 1455812dd325..b1a67352d2dc 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -276,6 +276,8 @@ extern typeof(name) __mod_##type##__##name##_device_table \ * files require multiple MODULE_FIRMWARE() specifiers */ #define MODULE_FIRMWARE(_firmware) MODULE_INFO(firmware, _firmware) +#define MODULE_IMPORT_NS(ns) MODULE_INFO(import_ns, #ns) + struct notifier_block; #ifdef CONFIG_MODULES diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h index 4ca8c1c845fb..249e8d9bfbcd 100644 --- a/include/linux/mtd/mtd.h +++ b/include/linux/mtd/mtd.h @@ -189,6 +189,9 @@ struct module; /* only needed for owner field in mtd_info */ */ struct mtd_debug_info { struct dentry *dfs_dir; + + const char *partname; + const char *partid; }; struct mtd_info { diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h index cebc38b6d6f5..0c7483843a32 100644 --- a/include/linux/mtd/nand.h +++ b/include/linux/mtd/nand.h @@ -346,7 +346,7 @@ static inline unsigned int nanddev_ntargets(const struct nand_device *nand) } /** - * nanddev_neraseblocks() - Get the total number of erasablocks + * nanddev_neraseblocks() - Get the total number of eraseblocks * @nand: NAND device * * Return: the total number of eraseblocks exposed by @nand. diff --git a/include/linux/mtd/sharpsl.h b/include/linux/mtd/sharpsl.h index 01306ebe266d..d2c3cf29e0d1 100644 --- a/include/linux/mtd/sharpsl.h +++ b/include/linux/mtd/sharpsl.h @@ -5,6 +5,9 @@ * Copyright (C) 2008 Dmitry Baryshkov */ +#ifndef _MTD_SHARPSL_H +#define _MTD_SHARPSL_H + #include <linux/mtd/rawnand.h> #include <linux/mtd/nand_ecc.h> #include <linux/mtd/partitions.h> @@ -16,3 +19,5 @@ struct sharpsl_nand_platform_data { unsigned int nr_partitions; const char *const *part_parsers; }; + +#endif /* _MTD_SHARPSL_H */ diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h index 9f57cdfcc93d..fc0b4b19c900 100644 --- a/include/linux/mtd/spi-nor.h +++ b/include/linux/mtd/spi-nor.h @@ -9,6 +9,7 @@ #include <linux/bitops.h> #include <linux/mtd/cfi.h> #include <linux/mtd/mtd.h> +#include <linux/spi/spi-mem.h> /* * Manufacturer IDs @@ -224,7 +225,6 @@ static inline u8 spi_nor_get_protocol_width(enum spi_nor_protocol proto) return spi_nor_get_protocol_data_nbits(proto); } -#define SPI_NOR_MAX_CMD_SIZE 8 enum spi_nor_ops { SPI_NOR_OPS_READ = 0, SPI_NOR_OPS_WRITE, @@ -237,12 +237,12 @@ enum spi_nor_option_flags { SNOR_F_USE_FSR = BIT(0), SNOR_F_HAS_SR_TB = BIT(1), SNOR_F_NO_OP_CHIP_ERASE = BIT(2), - SNOR_F_S3AN_ADDR_DEFAULT = BIT(3), - SNOR_F_READY_XSR_RDY = BIT(4), - SNOR_F_USE_CLSR = BIT(5), - SNOR_F_BROKEN_RESET = BIT(6), - SNOR_F_4B_OPCODES = BIT(7), - SNOR_F_HAS_4BAIT = BIT(8), + SNOR_F_READY_XSR_RDY = BIT(3), + SNOR_F_USE_CLSR = BIT(4), + SNOR_F_BROKEN_RESET = BIT(5), + SNOR_F_4B_OPCODES = BIT(6), + SNOR_F_HAS_4BAIT = BIT(7), + SNOR_F_HAS_LOCK = BIT(8), }; /** @@ -334,6 +334,195 @@ struct spi_nor_erase_map { }; /** + * struct spi_nor_hwcaps - Structure for describing the hardware capabilies + * supported by the SPI controller (bus master). + * @mask: the bitmask listing all the supported hw capabilies + */ +struct spi_nor_hwcaps { + u32 mask; +}; + +/* + *(Fast) Read capabilities. + * MUST be ordered by priority: the higher bit position, the higher priority. + * As a matter of performances, it is relevant to use Octal SPI protocols first, + * then Quad SPI protocols before Dual SPI protocols, Fast Read and lastly + * (Slow) Read. + */ +#define SNOR_HWCAPS_READ_MASK GENMASK(14, 0) +#define SNOR_HWCAPS_READ BIT(0) +#define SNOR_HWCAPS_READ_FAST BIT(1) +#define SNOR_HWCAPS_READ_1_1_1_DTR BIT(2) + +#define SNOR_HWCAPS_READ_DUAL GENMASK(6, 3) +#define SNOR_HWCAPS_READ_1_1_2 BIT(3) +#define SNOR_HWCAPS_READ_1_2_2 BIT(4) +#define SNOR_HWCAPS_READ_2_2_2 BIT(5) +#define SNOR_HWCAPS_READ_1_2_2_DTR BIT(6) + +#define SNOR_HWCAPS_READ_QUAD GENMASK(10, 7) +#define SNOR_HWCAPS_READ_1_1_4 BIT(7) +#define SNOR_HWCAPS_READ_1_4_4 BIT(8) +#define SNOR_HWCAPS_READ_4_4_4 BIT(9) +#define SNOR_HWCAPS_READ_1_4_4_DTR BIT(10) + +#define SNOR_HWCAPS_READ_OCTAL GENMASK(14, 11) +#define SNOR_HWCAPS_READ_1_1_8 BIT(11) +#define SNOR_HWCAPS_READ_1_8_8 BIT(12) +#define SNOR_HWCAPS_READ_8_8_8 BIT(13) +#define SNOR_HWCAPS_READ_1_8_8_DTR BIT(14) + +/* + * Page Program capabilities. + * MUST be ordered by priority: the higher bit position, the higher priority. + * Like (Fast) Read capabilities, Octal/Quad SPI protocols are preferred to the + * legacy SPI 1-1-1 protocol. + * Note that Dual Page Programs are not supported because there is no existing + * JEDEC/SFDP standard to define them. Also at this moment no SPI flash memory + * implements such commands. + */ +#define SNOR_HWCAPS_PP_MASK GENMASK(22, 16) +#define SNOR_HWCAPS_PP BIT(16) + +#define SNOR_HWCAPS_PP_QUAD GENMASK(19, 17) +#define SNOR_HWCAPS_PP_1_1_4 BIT(17) +#define SNOR_HWCAPS_PP_1_4_4 BIT(18) +#define SNOR_HWCAPS_PP_4_4_4 BIT(19) + +#define SNOR_HWCAPS_PP_OCTAL GENMASK(22, 20) +#define SNOR_HWCAPS_PP_1_1_8 BIT(20) +#define SNOR_HWCAPS_PP_1_8_8 BIT(21) +#define SNOR_HWCAPS_PP_8_8_8 BIT(22) + +#define SNOR_HWCAPS_X_X_X (SNOR_HWCAPS_READ_2_2_2 | \ + SNOR_HWCAPS_READ_4_4_4 | \ + SNOR_HWCAPS_READ_8_8_8 | \ + SNOR_HWCAPS_PP_4_4_4 | \ + SNOR_HWCAPS_PP_8_8_8) + +#define SNOR_HWCAPS_DTR (SNOR_HWCAPS_READ_1_1_1_DTR | \ + SNOR_HWCAPS_READ_1_2_2_DTR | \ + SNOR_HWCAPS_READ_1_4_4_DTR | \ + SNOR_HWCAPS_READ_1_8_8_DTR) + +#define SNOR_HWCAPS_ALL (SNOR_HWCAPS_READ_MASK | \ + SNOR_HWCAPS_PP_MASK) + +struct spi_nor_read_command { + u8 num_mode_clocks; + u8 num_wait_states; + u8 opcode; + enum spi_nor_protocol proto; +}; + +struct spi_nor_pp_command { + u8 opcode; + enum spi_nor_protocol proto; +}; + +enum spi_nor_read_command_index { + SNOR_CMD_READ, + SNOR_CMD_READ_FAST, + SNOR_CMD_READ_1_1_1_DTR, + + /* Dual SPI */ + SNOR_CMD_READ_1_1_2, + SNOR_CMD_READ_1_2_2, + SNOR_CMD_READ_2_2_2, + SNOR_CMD_READ_1_2_2_DTR, + + /* Quad SPI */ + SNOR_CMD_READ_1_1_4, + SNOR_CMD_READ_1_4_4, + SNOR_CMD_READ_4_4_4, + SNOR_CMD_READ_1_4_4_DTR, + + /* Octal SPI */ + SNOR_CMD_READ_1_1_8, + SNOR_CMD_READ_1_8_8, + SNOR_CMD_READ_8_8_8, + SNOR_CMD_READ_1_8_8_DTR, + + SNOR_CMD_READ_MAX +}; + +enum spi_nor_pp_command_index { + SNOR_CMD_PP, + + /* Quad SPI */ + SNOR_CMD_PP_1_1_4, + SNOR_CMD_PP_1_4_4, + SNOR_CMD_PP_4_4_4, + + /* Octal SPI */ + SNOR_CMD_PP_1_1_8, + SNOR_CMD_PP_1_8_8, + SNOR_CMD_PP_8_8_8, + + SNOR_CMD_PP_MAX +}; + +/* Forward declaration that will be used in 'struct spi_nor_flash_parameter' */ +struct spi_nor; + +/** + * struct spi_nor_locking_ops - SPI NOR locking methods + * @lock: lock a region of the SPI NOR. + * @unlock: unlock a region of the SPI NOR. + * @is_locked: check if a region of the SPI NOR is completely locked + */ +struct spi_nor_locking_ops { + int (*lock)(struct spi_nor *nor, loff_t ofs, uint64_t len); + int (*unlock)(struct spi_nor *nor, loff_t ofs, uint64_t len); + int (*is_locked)(struct spi_nor *nor, loff_t ofs, uint64_t len); +}; + +/** + * struct spi_nor_flash_parameter - SPI NOR flash parameters and settings. + * Includes legacy flash parameters and settings that can be overwritten + * by the spi_nor_fixups hooks, or dynamically when parsing the JESD216 + * Serial Flash Discoverable Parameters (SFDP) tables. + * + * @size: the flash memory density in bytes. + * @page_size: the page size of the SPI NOR flash memory. + * @hwcaps: describes the read and page program hardware + * capabilities. + * @reads: read capabilities ordered by priority: the higher index + * in the array, the higher priority. + * @page_programs: page program capabilities ordered by priority: the + * higher index in the array, the higher priority. + * @erase_map: the erase map parsed from the SFDP Sector Map Parameter + * Table. + * @quad_enable: enables SPI NOR quad mode. + * @set_4byte: puts the SPI NOR in 4 byte addressing mode. + * @convert_addr: converts an absolute address into something the flash + * will understand. Particularly useful when pagesize is + * not a power-of-2. + * @setup: configures the SPI NOR memory. Useful for SPI NOR + * flashes that have peculiarities to the SPI NOR standard + * e.g. different opcodes, specific address calculation, + * page size, etc. + * @locking_ops: SPI NOR locking methods. + */ +struct spi_nor_flash_parameter { + u64 size; + u32 page_size; + + struct spi_nor_hwcaps hwcaps; + struct spi_nor_read_command reads[SNOR_CMD_READ_MAX]; + struct spi_nor_pp_command page_programs[SNOR_CMD_PP_MAX]; + + struct spi_nor_erase_map erase_map; + + int (*quad_enable)(struct spi_nor *nor); + int (*set_4byte)(struct spi_nor *nor, bool enable); + u32 (*convert_addr)(struct spi_nor *nor, u32 addr); + int (*setup)(struct spi_nor *nor, const struct spi_nor_hwcaps *hwcaps); + + const struct spi_nor_locking_ops *locking_ops; +}; + +/** * struct flash_info - Forward declaration of a structure used internally by * spi_nor_scan() */ @@ -344,6 +533,10 @@ struct flash_info; * @mtd: point to a mtd_info structure * @lock: the lock for the read/write/erase/lock/unlock operations * @dev: point to a spi device, or a spi nor controller device. + * @spimem: point to the spi mem device + * @bouncebuf: bounce buffer used when the buffer passed by the MTD + * layer is not DMA-able + * @bouncebuf_size: size of the bounce buffer * @info: spi-nor part JDEC MFR id and other info * @page_size: the page size of the SPI NOR * @addr_width: number of address bytes @@ -356,8 +549,6 @@ struct flash_info; * @read_proto: the SPI protocol for read operations * @write_proto: the SPI protocol for write operations * @reg_proto the SPI protocol for read_reg/write_reg/erase operations - * @cmd_buf: used by the write_reg - * @erase_map: the erase map of the SPI NOR * @prepare: [OPTIONAL] do some preparations for the * read/write/erase/lock/unlock operations * @unprepare: [OPTIONAL] do some post work after the @@ -369,19 +560,21 @@ struct flash_info; * @erase: [DRIVER-SPECIFIC] erase a sector of the SPI NOR * at the offset @offs; if not provided by the driver, * spi-nor will send the erase opcode via write_reg() - * @flash_lock: [FLASH-SPECIFIC] lock a region of the SPI NOR - * @flash_unlock: [FLASH-SPECIFIC] unlock a region of the SPI NOR - * @flash_is_locked: [FLASH-SPECIFIC] check if a region of the SPI NOR is - * @quad_enable: [FLASH-SPECIFIC] enables SPI NOR quad mode * @clear_sr_bp: [FLASH-SPECIFIC] clears the Block Protection Bits from * the SPI NOR Status Register. - * completely locked + * @params: [FLASH-SPECIFIC] SPI-NOR flash parameters and settings. + * The structure includes legacy flash parameters and + * settings that can be overwritten by the spi_nor_fixups + * hooks, or dynamically when parsing the SFDP tables. * @priv: the private data */ struct spi_nor { struct mtd_info mtd; struct mutex lock; struct device *dev; + struct spi_mem *spimem; + u8 *bouncebuf; + size_t bouncebuf_size; const struct flash_info *info; u32 page_size; u8 addr_width; @@ -394,8 +587,6 @@ struct spi_nor { enum spi_nor_protocol reg_proto; bool sst_write_second; u32 flags; - u8 cmd_buf[SPI_NOR_MAX_CMD_SIZE]; - struct spi_nor_erase_map erase_map; int (*prepare)(struct spi_nor *nor, enum spi_nor_ops ops); void (*unprepare)(struct spi_nor *nor, enum spi_nor_ops ops); @@ -408,11 +599,8 @@ struct spi_nor { size_t len, const u_char *write_buf); int (*erase)(struct spi_nor *nor, loff_t offs); - int (*flash_lock)(struct spi_nor *nor, loff_t ofs, uint64_t len); - int (*flash_unlock)(struct spi_nor *nor, loff_t ofs, uint64_t len); - int (*flash_is_locked)(struct spi_nor *nor, loff_t ofs, uint64_t len); - int (*quad_enable)(struct spi_nor *nor); int (*clear_sr_bp)(struct spi_nor *nor); + struct spi_nor_flash_parameter params; void *priv; }; @@ -443,7 +631,7 @@ spi_nor_region_mark_overlay(struct spi_nor_erase_region *region) static bool __maybe_unused spi_nor_has_uniform_erase(const struct spi_nor *nor) { - return !!nor->erase_map.uniform_erase_type; + return !!nor->params.erase_map.uniform_erase_type; } static inline void spi_nor_set_flash_node(struct spi_nor *nor, @@ -458,67 +646,6 @@ static inline struct device_node *spi_nor_get_flash_node(struct spi_nor *nor) } /** - * struct spi_nor_hwcaps - Structure for describing the hardware capabilies - * supported by the SPI controller (bus master). - * @mask: the bitmask listing all the supported hw capabilies - */ -struct spi_nor_hwcaps { - u32 mask; -}; - -/* - *(Fast) Read capabilities. - * MUST be ordered by priority: the higher bit position, the higher priority. - * As a matter of performances, it is relevant to use Octal SPI protocols first, - * then Quad SPI protocols before Dual SPI protocols, Fast Read and lastly - * (Slow) Read. - */ -#define SNOR_HWCAPS_READ_MASK GENMASK(14, 0) -#define SNOR_HWCAPS_READ BIT(0) -#define SNOR_HWCAPS_READ_FAST BIT(1) -#define SNOR_HWCAPS_READ_1_1_1_DTR BIT(2) - -#define SNOR_HWCAPS_READ_DUAL GENMASK(6, 3) -#define SNOR_HWCAPS_READ_1_1_2 BIT(3) -#define SNOR_HWCAPS_READ_1_2_2 BIT(4) -#define SNOR_HWCAPS_READ_2_2_2 BIT(5) -#define SNOR_HWCAPS_READ_1_2_2_DTR BIT(6) - -#define SNOR_HWCAPS_READ_QUAD GENMASK(10, 7) -#define SNOR_HWCAPS_READ_1_1_4 BIT(7) -#define SNOR_HWCAPS_READ_1_4_4 BIT(8) -#define SNOR_HWCAPS_READ_4_4_4 BIT(9) -#define SNOR_HWCAPS_READ_1_4_4_DTR BIT(10) - -#define SNOR_HWCAPS_READ_OCTAL GENMASK(14, 11) -#define SNOR_HWCAPS_READ_1_1_8 BIT(11) -#define SNOR_HWCAPS_READ_1_8_8 BIT(12) -#define SNOR_HWCAPS_READ_8_8_8 BIT(13) -#define SNOR_HWCAPS_READ_1_8_8_DTR BIT(14) - -/* - * Page Program capabilities. - * MUST be ordered by priority: the higher bit position, the higher priority. - * Like (Fast) Read capabilities, Octal/Quad SPI protocols are preferred to the - * legacy SPI 1-1-1 protocol. - * Note that Dual Page Programs are not supported because there is no existing - * JEDEC/SFDP standard to define them. Also at this moment no SPI flash memory - * implements such commands. - */ -#define SNOR_HWCAPS_PP_MASK GENMASK(22, 16) -#define SNOR_HWCAPS_PP BIT(16) - -#define SNOR_HWCAPS_PP_QUAD GENMASK(19, 17) -#define SNOR_HWCAPS_PP_1_1_4 BIT(17) -#define SNOR_HWCAPS_PP_1_4_4 BIT(18) -#define SNOR_HWCAPS_PP_4_4_4 BIT(19) - -#define SNOR_HWCAPS_PP_OCTAL GENMASK(22, 20) -#define SNOR_HWCAPS_PP_1_1_8 BIT(20) -#define SNOR_HWCAPS_PP_1_8_8 BIT(21) -#define SNOR_HWCAPS_PP_8_8_8 BIT(22) - -/** * spi_nor_scan() - scan the SPI NOR * @nor: the spi_nor structure * @name: the chip type name diff --git a/include/linux/mtd/super.h b/include/linux/mtd/super.h index 056c9932c723..3608a6c36fac 100644 --- a/include/linux/mtd/super.h +++ b/include/linux/mtd/super.h @@ -14,9 +14,9 @@ #include <linux/fs.h> #include <linux/mount.h> -extern struct dentry *mount_mtd(struct file_system_type *fs_type, int flags, - const char *dev_name, void *data, - int (*fill_super)(struct super_block *, void *, int)); +extern int get_tree_mtd(struct fs_context *fc, + int (*fill_super)(struct super_block *sb, + struct fs_context *fc)); extern void kill_mtd_super(struct super_block *sb); diff --git a/include/linux/namei.h b/include/linux/namei.h index 9138b4471dbf..397a08ade6a2 100644 --- a/include/linux/namei.h +++ b/include/linux/namei.h @@ -16,39 +16,28 @@ enum { MAX_NESTED_LINKS = 8 }; */ enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT, LAST_BIND}; -/* - * The bitmask for a lookup event: - * - follow links at the end - * - require a directory - * - ending slashes ok even for nonexistent files - * - internal "there are more path components" flag - * - dentry cache is untrusted; force a real lookup - * - suppress terminal automount - * - skip revalidation - * - don't fetch xattrs on audit_inode - */ -#define LOOKUP_FOLLOW 0x0001 -#define LOOKUP_DIRECTORY 0x0002 -#define LOOKUP_AUTOMOUNT 0x0004 - +/* pathwalk mode */ +#define LOOKUP_FOLLOW 0x0001 /* follow links at the end */ +#define LOOKUP_DIRECTORY 0x0002 /* require a directory */ +#define LOOKUP_AUTOMOUNT 0x0004 /* force terminal automount */ +#define LOOKUP_EMPTY 0x4000 /* accept empty path [user_... only] */ +#define LOOKUP_DOWN 0x8000 /* follow mounts in the starting point */ + +#define LOOKUP_REVAL 0x0020 /* tell ->d_revalidate() to trust no cache */ +#define LOOKUP_RCU 0x0040 /* RCU pathwalk mode; semi-internal */ + +/* These tell filesystem methods that we are dealing with the final component... */ +#define LOOKUP_OPEN 0x0100 /* ... in open */ +#define LOOKUP_CREATE 0x0200 /* ... in object creation */ +#define LOOKUP_EXCL 0x0400 /* ... in exclusive creation */ +#define LOOKUP_RENAME_TARGET 0x0800 /* ... in destination of rename() */ + +/* internal use only */ #define LOOKUP_PARENT 0x0010 -#define LOOKUP_REVAL 0x0020 -#define LOOKUP_RCU 0x0040 #define LOOKUP_NO_REVAL 0x0080 -#define LOOKUP_NO_EVAL 0x0100 - -/* - * Intent data - */ -#define LOOKUP_OPEN 0x0100 -#define LOOKUP_CREATE 0x0200 -#define LOOKUP_EXCL 0x0400 -#define LOOKUP_RENAME_TARGET 0x0800 - #define LOOKUP_JUMPED 0x1000 #define LOOKUP_ROOT 0x2000 -#define LOOKUP_EMPTY 0x4000 -#define LOOKUP_DOWN 0x8000 +#define LOOKUP_ROOT_GRABBED 0x0008 extern int path_pts(struct path *path); @@ -60,22 +49,6 @@ static inline int user_path_at(int dfd, const char __user *name, unsigned flags, return user_path_at_empty(dfd, name, flags, path, NULL); } -static inline int user_path(const char __user *name, struct path *path) -{ - return user_path_at_empty(AT_FDCWD, name, LOOKUP_FOLLOW, path, NULL); -} - -static inline int user_lpath(const char __user *name, struct path *path) -{ - return user_path_at_empty(AT_FDCWD, name, 0, path, NULL); -} - -static inline int user_path_dir(const char __user *name, struct path *path) -{ - return user_path_at_empty(AT_FDCWD, name, - LOOKUP_FOLLOW | LOOKUP_DIRECTORY, path, NULL); -} - extern int kern_path(const char *, unsigned, struct path *); extern struct dentry *kern_path_create(int, const char *, struct path *, unsigned int); diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 88292953aa6f..9eda1c31d1f7 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -332,6 +332,8 @@ struct napi_struct { struct net_device *dev; struct gro_list gro_hash[GRO_HASH_BUCKETS]; struct sk_buff *skb; + struct list_head rx_list; /* Pending GRO_NORMAL skbs */ + int rx_count; /* length of rx_list */ struct hrtimer timer; struct list_head dev_list; struct hlist_node napi_hash_node; @@ -845,6 +847,7 @@ enum tc_setup_type { TC_SETUP_QDISC_ETF, TC_SETUP_ROOT_QDISC, TC_SETUP_QDISC_GRED, + TC_SETUP_QDISC_TAPRIO, }; /* These structures hold the attributes of bpf state that are being passed @@ -899,6 +902,10 @@ struct netdev_bpf { }; }; +/* Flags for ndo_xsk_wakeup. */ +#define XDP_WAKEUP_RX (1 << 0) +#define XDP_WAKEUP_TX (1 << 1) + #ifdef CONFIG_XFRM_OFFLOAD struct xfrmdev_ops { int (*xdo_dev_state_add) (struct xfrm_state *x); @@ -1225,6 +1232,12 @@ struct tlsdev_ops; * that got dropped are freed/returned via xdp_return_frame(). * Returns negative number, means general error invoking ndo, meaning * no frames were xmit'ed and core-caller will free all frames. + * int (*ndo_xsk_wakeup)(struct net_device *dev, u32 queue_id, u32 flags); + * This function is used to wake up the softirq, ksoftirqd or kthread + * responsible for sending and/or receiving packets on a specific + * queue id bound to an AF_XDP socket. The flags field specifies if + * only RX, only Tx, or both should be woken up using the flags + * XDP_WAKEUP_RX and XDP_WAKEUP_TX. * struct devlink_port *(*ndo_get_devlink_port)(struct net_device *dev); * Get devlink port instance associated with a given netdev. * Called with a reference on the netdevice and devlink locks only, @@ -1424,8 +1437,8 @@ struct net_device_ops { int (*ndo_xdp_xmit)(struct net_device *dev, int n, struct xdp_frame **xdp, u32 flags); - int (*ndo_xsk_async_xmit)(struct net_device *dev, - u32 queue_id); + int (*ndo_xsk_wakeup)(struct net_device *dev, + u32 queue_id, u32 flags); struct devlink_port * (*ndo_get_devlink_port)(struct net_device *dev); }; @@ -3270,7 +3283,7 @@ static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue, */ smp_mb(); - if (dql_avail(&dev_queue->dql) < 0) + if (unlikely(dql_avail(&dev_queue->dql) < 0)) return; if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state)) @@ -4239,6 +4252,7 @@ extern int dev_weight_rx_bias; extern int dev_weight_tx_bias; extern int dev_rx_weight; extern int dev_tx_weight; +extern int gro_normal_batch; bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev); struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev, diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h index 049aeb40fa35..77ebb61faf48 100644 --- a/include/linux/netfilter.h +++ b/include/linux/netfilter.h @@ -15,7 +15,6 @@ #include <linux/netdevice.h> #include <net/net_namespace.h> -#ifdef CONFIG_NETFILTER static inline int NF_DROP_GETERR(int verdict) { return -(verdict >> NF_VERDICT_QBITS); @@ -118,6 +117,7 @@ struct nf_hook_entries { */ }; +#ifdef CONFIG_NETFILTER static inline struct nf_hook_ops **nf_hook_entries_get_hook_ops(const struct nf_hook_entries *e) { unsigned int n = e->num_hook_entries; @@ -422,7 +422,7 @@ nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family) } #endif /*CONFIG_NETFILTER*/ -#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) +#if IS_ENABLED(CONFIG_NF_CONNTRACK) #include <linux/netfilter/nf_conntrack_zones_common.h> extern void (*ip_ct_attach)(struct sk_buff *, const struct sk_buff *) __rcu; diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h index 12ad9b1853b4..9bc255a8461b 100644 --- a/include/linux/netfilter/ipset/ip_set.h +++ b/include/linux/netfilter/ipset/ip_set.h @@ -452,10 +452,240 @@ bitmap_bytes(u32 a, u32 b) return 4 * ((((b - a + 8) / 8) + 3) / 4); } -#include <linux/netfilter/ipset/ip_set_timeout.h> -#include <linux/netfilter/ipset/ip_set_comment.h> -#include <linux/netfilter/ipset/ip_set_counter.h> -#include <linux/netfilter/ipset/ip_set_skbinfo.h> +/* How often should the gc be run by default */ +#define IPSET_GC_TIME (3 * 60) + +/* Timeout period depending on the timeout value of the given set */ +#define IPSET_GC_PERIOD(timeout) \ + ((timeout/3) ? min_t(u32, (timeout)/3, IPSET_GC_TIME) : 1) + +/* Entry is set with no timeout value */ +#define IPSET_ELEM_PERMANENT 0 + +/* Set is defined with timeout support: timeout value may be 0 */ +#define IPSET_NO_TIMEOUT UINT_MAX + +/* Max timeout value, see msecs_to_jiffies() in jiffies.h */ +#define IPSET_MAX_TIMEOUT (UINT_MAX >> 1)/MSEC_PER_SEC + +#define ip_set_adt_opt_timeout(opt, set) \ +((opt)->ext.timeout != IPSET_NO_TIMEOUT ? (opt)->ext.timeout : (set)->timeout) + +static inline unsigned int +ip_set_timeout_uget(struct nlattr *tb) +{ + unsigned int timeout = ip_set_get_h32(tb); + + /* Normalize to fit into jiffies */ + if (timeout > IPSET_MAX_TIMEOUT) + timeout = IPSET_MAX_TIMEOUT; + + return timeout; +} + +static inline bool +ip_set_timeout_expired(const unsigned long *t) +{ + return *t != IPSET_ELEM_PERMANENT && time_is_before_jiffies(*t); +} + +static inline void +ip_set_timeout_set(unsigned long *timeout, u32 value) +{ + unsigned long t; + + if (!value) { + *timeout = IPSET_ELEM_PERMANENT; + return; + } + + t = msecs_to_jiffies(value * MSEC_PER_SEC) + jiffies; + if (t == IPSET_ELEM_PERMANENT) + /* Bingo! :-) */ + t--; + *timeout = t; +} + +static inline u32 +ip_set_timeout_get(const unsigned long *timeout) +{ + u32 t; + + if (*timeout == IPSET_ELEM_PERMANENT) + return 0; + + t = jiffies_to_msecs(*timeout - jiffies)/MSEC_PER_SEC; + /* Zero value in userspace means no timeout */ + return t == 0 ? 1 : t; +} + +static inline char* +ip_set_comment_uget(struct nlattr *tb) +{ + return nla_data(tb); +} + +/* Called from uadd only, protected by the set spinlock. + * The kadt functions don't use the comment extensions in any way. + */ +static inline void +ip_set_init_comment(struct ip_set *set, struct ip_set_comment *comment, + const struct ip_set_ext *ext) +{ + struct ip_set_comment_rcu *c = rcu_dereference_protected(comment->c, 1); + size_t len = ext->comment ? strlen(ext->comment) : 0; + + if (unlikely(c)) { + set->ext_size -= sizeof(*c) + strlen(c->str) + 1; + kfree_rcu(c, rcu); + rcu_assign_pointer(comment->c, NULL); + } + if (!len) + return; + if (unlikely(len > IPSET_MAX_COMMENT_SIZE)) + len = IPSET_MAX_COMMENT_SIZE; + c = kmalloc(sizeof(*c) + len + 1, GFP_ATOMIC); + if (unlikely(!c)) + return; + strlcpy(c->str, ext->comment, len + 1); + set->ext_size += sizeof(*c) + strlen(c->str) + 1; + rcu_assign_pointer(comment->c, c); +} + +/* Used only when dumping a set, protected by rcu_read_lock() */ +static inline int +ip_set_put_comment(struct sk_buff *skb, const struct ip_set_comment *comment) +{ + struct ip_set_comment_rcu *c = rcu_dereference(comment->c); + + if (!c) + return 0; + return nla_put_string(skb, IPSET_ATTR_COMMENT, c->str); +} + +/* Called from uadd/udel, flush or the garbage collectors protected + * by the set spinlock. + * Called when the set is destroyed and when there can't be any user + * of the set data anymore. + */ +static inline void +ip_set_comment_free(struct ip_set *set, struct ip_set_comment *comment) +{ + struct ip_set_comment_rcu *c; + + c = rcu_dereference_protected(comment->c, 1); + if (unlikely(!c)) + return; + set->ext_size -= sizeof(*c) + strlen(c->str) + 1; + kfree_rcu(c, rcu); + rcu_assign_pointer(comment->c, NULL); +} + +static inline void +ip_set_add_bytes(u64 bytes, struct ip_set_counter *counter) +{ + atomic64_add((long long)bytes, &(counter)->bytes); +} + +static inline void +ip_set_add_packets(u64 packets, struct ip_set_counter *counter) +{ + atomic64_add((long long)packets, &(counter)->packets); +} + +static inline u64 +ip_set_get_bytes(const struct ip_set_counter *counter) +{ + return (u64)atomic64_read(&(counter)->bytes); +} + +static inline u64 +ip_set_get_packets(const struct ip_set_counter *counter) +{ + return (u64)atomic64_read(&(counter)->packets); +} + +static inline bool +ip_set_match_counter(u64 counter, u64 match, u8 op) +{ + switch (op) { + case IPSET_COUNTER_NONE: + return true; + case IPSET_COUNTER_EQ: + return counter == match; + case IPSET_COUNTER_NE: + return counter != match; + case IPSET_COUNTER_LT: + return counter < match; + case IPSET_COUNTER_GT: + return counter > match; + } + return false; +} + +static inline void +ip_set_update_counter(struct ip_set_counter *counter, + const struct ip_set_ext *ext, u32 flags) +{ + if (ext->packets != ULLONG_MAX && + !(flags & IPSET_FLAG_SKIP_COUNTER_UPDATE)) { + ip_set_add_bytes(ext->bytes, counter); + ip_set_add_packets(ext->packets, counter); + } +} + +static inline bool +ip_set_put_counter(struct sk_buff *skb, const struct ip_set_counter *counter) +{ + return nla_put_net64(skb, IPSET_ATTR_BYTES, + cpu_to_be64(ip_set_get_bytes(counter)), + IPSET_ATTR_PAD) || + nla_put_net64(skb, IPSET_ATTR_PACKETS, + cpu_to_be64(ip_set_get_packets(counter)), + IPSET_ATTR_PAD); +} + +static inline void +ip_set_init_counter(struct ip_set_counter *counter, + const struct ip_set_ext *ext) +{ + if (ext->bytes != ULLONG_MAX) + atomic64_set(&(counter)->bytes, (long long)(ext->bytes)); + if (ext->packets != ULLONG_MAX) + atomic64_set(&(counter)->packets, (long long)(ext->packets)); +} + +static inline void +ip_set_get_skbinfo(struct ip_set_skbinfo *skbinfo, + const struct ip_set_ext *ext, + struct ip_set_ext *mext, u32 flags) +{ + mext->skbinfo = *skbinfo; +} + +static inline bool +ip_set_put_skbinfo(struct sk_buff *skb, const struct ip_set_skbinfo *skbinfo) +{ + /* Send nonzero parameters only */ + return ((skbinfo->skbmark || skbinfo->skbmarkmask) && + nla_put_net64(skb, IPSET_ATTR_SKBMARK, + cpu_to_be64((u64)skbinfo->skbmark << 32 | + skbinfo->skbmarkmask), + IPSET_ATTR_PAD)) || + (skbinfo->skbprio && + nla_put_net32(skb, IPSET_ATTR_SKBPRIO, + cpu_to_be32(skbinfo->skbprio))) || + (skbinfo->skbqueue && + nla_put_net16(skb, IPSET_ATTR_SKBQUEUE, + cpu_to_be16(skbinfo->skbqueue))); +} + +static inline void +ip_set_init_skbinfo(struct ip_set_skbinfo *skbinfo, + const struct ip_set_ext *ext) +{ + *skbinfo = ext->skbinfo; +} #define IP_SET_INIT_KEXT(skb, opt, set) \ { .bytes = (skb)->len, .packets = 1, \ diff --git a/include/linux/netfilter/ipset/ip_set_comment.h b/include/linux/netfilter/ipset/ip_set_comment.h deleted file mode 100644 index 0b894d81bbf2..000000000000 --- a/include/linux/netfilter/ipset/ip_set_comment.h +++ /dev/null @@ -1,73 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -#ifndef _IP_SET_COMMENT_H -#define _IP_SET_COMMENT_H - -/* Copyright (C) 2013 Oliver Smith <oliver@8.c.9.b.0.7.4.0.1.0.0.2.ip6.arpa> - */ - -#ifdef __KERNEL__ - -static inline char* -ip_set_comment_uget(struct nlattr *tb) -{ - return nla_data(tb); -} - -/* Called from uadd only, protected by the set spinlock. - * The kadt functions don't use the comment extensions in any way. - */ -static inline void -ip_set_init_comment(struct ip_set *set, struct ip_set_comment *comment, - const struct ip_set_ext *ext) -{ - struct ip_set_comment_rcu *c = rcu_dereference_protected(comment->c, 1); - size_t len = ext->comment ? strlen(ext->comment) : 0; - - if (unlikely(c)) { - set->ext_size -= sizeof(*c) + strlen(c->str) + 1; - kfree_rcu(c, rcu); - rcu_assign_pointer(comment->c, NULL); - } - if (!len) - return; - if (unlikely(len > IPSET_MAX_COMMENT_SIZE)) - len = IPSET_MAX_COMMENT_SIZE; - c = kmalloc(sizeof(*c) + len + 1, GFP_ATOMIC); - if (unlikely(!c)) - return; - strlcpy(c->str, ext->comment, len + 1); - set->ext_size += sizeof(*c) + strlen(c->str) + 1; - rcu_assign_pointer(comment->c, c); -} - -/* Used only when dumping a set, protected by rcu_read_lock() */ -static inline int -ip_set_put_comment(struct sk_buff *skb, const struct ip_set_comment *comment) -{ - struct ip_set_comment_rcu *c = rcu_dereference(comment->c); - - if (!c) - return 0; - return nla_put_string(skb, IPSET_ATTR_COMMENT, c->str); -} - -/* Called from uadd/udel, flush or the garbage collectors protected - * by the set spinlock. - * Called when the set is destroyed and when there can't be any user - * of the set data anymore. - */ -static inline void -ip_set_comment_free(struct ip_set *set, struct ip_set_comment *comment) -{ - struct ip_set_comment_rcu *c; - - c = rcu_dereference_protected(comment->c, 1); - if (unlikely(!c)) - return; - set->ext_size -= sizeof(*c) + strlen(c->str) + 1; - kfree_rcu(c, rcu); - rcu_assign_pointer(comment->c, NULL); -} - -#endif -#endif diff --git a/include/linux/netfilter/ipset/ip_set_counter.h b/include/linux/netfilter/ipset/ip_set_counter.h deleted file mode 100644 index 3400958c07be..000000000000 --- a/include/linux/netfilter/ipset/ip_set_counter.h +++ /dev/null @@ -1,84 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -#ifndef _IP_SET_COUNTER_H -#define _IP_SET_COUNTER_H - -/* Copyright (C) 2015 Jozsef Kadlecsik <kadlec@netfilter.org> */ - -#ifdef __KERNEL__ - -static inline void -ip_set_add_bytes(u64 bytes, struct ip_set_counter *counter) -{ - atomic64_add((long long)bytes, &(counter)->bytes); -} - -static inline void -ip_set_add_packets(u64 packets, struct ip_set_counter *counter) -{ - atomic64_add((long long)packets, &(counter)->packets); -} - -static inline u64 -ip_set_get_bytes(const struct ip_set_counter *counter) -{ - return (u64)atomic64_read(&(counter)->bytes); -} - -static inline u64 -ip_set_get_packets(const struct ip_set_counter *counter) -{ - return (u64)atomic64_read(&(counter)->packets); -} - -static inline bool -ip_set_match_counter(u64 counter, u64 match, u8 op) -{ - switch (op) { - case IPSET_COUNTER_NONE: - return true; - case IPSET_COUNTER_EQ: - return counter == match; - case IPSET_COUNTER_NE: - return counter != match; - case IPSET_COUNTER_LT: - return counter < match; - case IPSET_COUNTER_GT: - return counter > match; - } - return false; -} - -static inline void -ip_set_update_counter(struct ip_set_counter *counter, - const struct ip_set_ext *ext, u32 flags) -{ - if (ext->packets != ULLONG_MAX && - !(flags & IPSET_FLAG_SKIP_COUNTER_UPDATE)) { - ip_set_add_bytes(ext->bytes, counter); - ip_set_add_packets(ext->packets, counter); - } -} - -static inline bool -ip_set_put_counter(struct sk_buff *skb, const struct ip_set_counter *counter) -{ - return nla_put_net64(skb, IPSET_ATTR_BYTES, - cpu_to_be64(ip_set_get_bytes(counter)), - IPSET_ATTR_PAD) || - nla_put_net64(skb, IPSET_ATTR_PACKETS, - cpu_to_be64(ip_set_get_packets(counter)), - IPSET_ATTR_PAD); -} - -static inline void -ip_set_init_counter(struct ip_set_counter *counter, - const struct ip_set_ext *ext) -{ - if (ext->bytes != ULLONG_MAX) - atomic64_set(&(counter)->bytes, (long long)(ext->bytes)); - if (ext->packets != ULLONG_MAX) - atomic64_set(&(counter)->packets, (long long)(ext->packets)); -} - -#endif /* __KERNEL__ */ -#endif /* _IP_SET_COUNTER_H */ diff --git a/include/linux/netfilter/ipset/ip_set_getport.h b/include/linux/netfilter/ipset/ip_set_getport.h index ac6a11d38a19..d74cd112b88a 100644 --- a/include/linux/netfilter/ipset/ip_set_getport.h +++ b/include/linux/netfilter/ipset/ip_set_getport.h @@ -2,10 +2,14 @@ #ifndef _IP_SET_GETPORT_H #define _IP_SET_GETPORT_H +#include <linux/skbuff.h> +#include <linux/types.h> +#include <uapi/linux/in.h> + extern bool ip_set_get_ip4_port(const struct sk_buff *skb, bool src, __be16 *port, u8 *proto); -#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE) +#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) extern bool ip_set_get_ip6_port(const struct sk_buff *skb, bool src, __be16 *port, u8 *proto); #else diff --git a/include/linux/netfilter/ipset/ip_set_skbinfo.h b/include/linux/netfilter/ipset/ip_set_skbinfo.h deleted file mode 100644 index 3a2df02dbd55..000000000000 --- a/include/linux/netfilter/ipset/ip_set_skbinfo.h +++ /dev/null @@ -1,42 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -#ifndef _IP_SET_SKBINFO_H -#define _IP_SET_SKBINFO_H - -/* Copyright (C) 2015 Jozsef Kadlecsik <kadlec@netfilter.org> */ - -#ifdef __KERNEL__ - -static inline void -ip_set_get_skbinfo(struct ip_set_skbinfo *skbinfo, - const struct ip_set_ext *ext, - struct ip_set_ext *mext, u32 flags) -{ - mext->skbinfo = *skbinfo; -} - -static inline bool -ip_set_put_skbinfo(struct sk_buff *skb, const struct ip_set_skbinfo *skbinfo) -{ - /* Send nonzero parameters only */ - return ((skbinfo->skbmark || skbinfo->skbmarkmask) && - nla_put_net64(skb, IPSET_ATTR_SKBMARK, - cpu_to_be64((u64)skbinfo->skbmark << 32 | - skbinfo->skbmarkmask), - IPSET_ATTR_PAD)) || - (skbinfo->skbprio && - nla_put_net32(skb, IPSET_ATTR_SKBPRIO, - cpu_to_be32(skbinfo->skbprio))) || - (skbinfo->skbqueue && - nla_put_net16(skb, IPSET_ATTR_SKBQUEUE, - cpu_to_be16(skbinfo->skbqueue))); -} - -static inline void -ip_set_init_skbinfo(struct ip_set_skbinfo *skbinfo, - const struct ip_set_ext *ext) -{ - *skbinfo = ext->skbinfo; -} - -#endif /* __KERNEL__ */ -#endif /* _IP_SET_SKBINFO_H */ diff --git a/include/linux/netfilter/ipset/ip_set_timeout.h b/include/linux/netfilter/ipset/ip_set_timeout.h deleted file mode 100644 index 2be60e379ecf..000000000000 --- a/include/linux/netfilter/ipset/ip_set_timeout.h +++ /dev/null @@ -1,77 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -#ifndef _IP_SET_TIMEOUT_H -#define _IP_SET_TIMEOUT_H - -/* Copyright (C) 2003-2013 Jozsef Kadlecsik <kadlec@netfilter.org> */ - -#ifdef __KERNEL__ - -/* How often should the gc be run by default */ -#define IPSET_GC_TIME (3 * 60) - -/* Timeout period depending on the timeout value of the given set */ -#define IPSET_GC_PERIOD(timeout) \ - ((timeout/3) ? min_t(u32, (timeout)/3, IPSET_GC_TIME) : 1) - -/* Entry is set with no timeout value */ -#define IPSET_ELEM_PERMANENT 0 - -/* Set is defined with timeout support: timeout value may be 0 */ -#define IPSET_NO_TIMEOUT UINT_MAX - -/* Max timeout value, see msecs_to_jiffies() in jiffies.h */ -#define IPSET_MAX_TIMEOUT (UINT_MAX >> 1)/MSEC_PER_SEC - -#define ip_set_adt_opt_timeout(opt, set) \ -((opt)->ext.timeout != IPSET_NO_TIMEOUT ? (opt)->ext.timeout : (set)->timeout) - -static inline unsigned int -ip_set_timeout_uget(struct nlattr *tb) -{ - unsigned int timeout = ip_set_get_h32(tb); - - /* Normalize to fit into jiffies */ - if (timeout > IPSET_MAX_TIMEOUT) - timeout = IPSET_MAX_TIMEOUT; - - return timeout; -} - -static inline bool -ip_set_timeout_expired(const unsigned long *t) -{ - return *t != IPSET_ELEM_PERMANENT && time_is_before_jiffies(*t); -} - -static inline void -ip_set_timeout_set(unsigned long *timeout, u32 value) -{ - unsigned long t; - - if (!value) { - *timeout = IPSET_ELEM_PERMANENT; - return; - } - - t = msecs_to_jiffies(value * MSEC_PER_SEC) + jiffies; - if (t == IPSET_ELEM_PERMANENT) - /* Bingo! :-) */ - t--; - *timeout = t; -} - -static inline u32 -ip_set_timeout_get(const unsigned long *timeout) -{ - u32 t; - - if (*timeout == IPSET_ELEM_PERMANENT) - return 0; - - t = jiffies_to_msecs(*timeout - jiffies)/MSEC_PER_SEC; - /* Zero value in userspace means no timeout */ - return t == 0 ? 1 : t; -} - -#endif /* __KERNEL__ */ -#endif /* _IP_SET_TIMEOUT_H */ diff --git a/include/linux/netfilter/nf_conntrack_amanda.h b/include/linux/netfilter/nf_conntrack_amanda.h index 34345e543ba2..6f0ac896fcc9 100644 --- a/include/linux/netfilter/nf_conntrack_amanda.h +++ b/include/linux/netfilter/nf_conntrack_amanda.h @@ -3,6 +3,10 @@ #define _NF_CONNTRACK_AMANDA_H /* AMANDA tracking. */ +#include <linux/netfilter.h> +#include <linux/skbuff.h> +#include <net/netfilter/nf_conntrack_expect.h> + extern unsigned int (*nf_nat_amanda_hook)(struct sk_buff *skb, enum ip_conntrack_info ctinfo, unsigned int protoff, diff --git a/include/linux/netfilter/nf_conntrack_common.h b/include/linux/netfilter/nf_conntrack_common.h index e142b2b5f1ea..1db83c931d9c 100644 --- a/include/linux/netfilter/nf_conntrack_common.h +++ b/include/linux/netfilter/nf_conntrack_common.h @@ -2,6 +2,7 @@ #ifndef _NF_CONNTRACK_COMMON_H #define _NF_CONNTRACK_COMMON_H +#include <linux/atomic.h> #include <uapi/linux/netfilter/nf_conntrack_common.h> struct ip_conntrack_stat { @@ -19,4 +20,23 @@ struct ip_conntrack_stat { unsigned int search_restart; }; +#define NFCT_INFOMASK 7UL +#define NFCT_PTRMASK ~(NFCT_INFOMASK) + +struct nf_conntrack { + atomic_t use; +}; + +void nf_conntrack_destroy(struct nf_conntrack *nfct); +static inline void nf_conntrack_put(struct nf_conntrack *nfct) +{ + if (nfct && atomic_dec_and_test(&nfct->use)) + nf_conntrack_destroy(nfct); +} +static inline void nf_conntrack_get(struct nf_conntrack *nfct) +{ + if (nfct) + atomic_inc(&nfct->use); +} + #endif /* _NF_CONNTRACK_COMMON_H */ diff --git a/include/linux/netfilter/nf_conntrack_dccp.h b/include/linux/netfilter/nf_conntrack_dccp.h index ace0f952d50f..c509ed76e714 100644 --- a/include/linux/netfilter/nf_conntrack_dccp.h +++ b/include/linux/netfilter/nf_conntrack_dccp.h @@ -25,7 +25,6 @@ enum ct_dccp_roles { }; #define CT_DCCP_ROLE_MAX (__CT_DCCP_ROLE_MAX - 1) -#ifdef __KERNEL__ #include <linux/netfilter/nf_conntrack_tuple_common.h> struct nf_ct_dccp { @@ -36,6 +35,4 @@ struct nf_ct_dccp { u_int64_t handshake_seq; }; -#endif /* __KERNEL__ */ - #endif /* _NF_CONNTRACK_DCCP_H */ diff --git a/include/linux/netfilter/nf_conntrack_ftp.h b/include/linux/netfilter/nf_conntrack_ftp.h index 73a296dfd019..0e38302820b9 100644 --- a/include/linux/netfilter/nf_conntrack_ftp.h +++ b/include/linux/netfilter/nf_conntrack_ftp.h @@ -2,8 +2,12 @@ #ifndef _NF_CONNTRACK_FTP_H #define _NF_CONNTRACK_FTP_H +#include <linux/netfilter.h> +#include <linux/skbuff.h> +#include <linux/types.h> +#include <net/netfilter/nf_conntrack_expect.h> #include <uapi/linux/netfilter/nf_conntrack_ftp.h> - +#include <uapi/linux/netfilter/nf_conntrack_tuple_common.h> #define FTP_PORT 21 @@ -20,8 +24,6 @@ struct nf_ct_ftp_master { u_int16_t flags[IP_CT_DIR_MAX]; }; -struct nf_conntrack_expect; - /* For NAT to hook in when we find a packet which describes what other * connection we should expect. */ extern unsigned int (*nf_nat_ftp_hook)(struct sk_buff *skb, diff --git a/include/linux/netfilter/nf_conntrack_h323.h b/include/linux/netfilter/nf_conntrack_h323.h index f76ed373a2a5..4561ec0fcea4 100644 --- a/include/linux/netfilter/nf_conntrack_h323.h +++ b/include/linux/netfilter/nf_conntrack_h323.h @@ -2,9 +2,12 @@ #ifndef _NF_CONNTRACK_H323_H #define _NF_CONNTRACK_H323_H -#ifdef __KERNEL__ - +#include <linux/netfilter.h> +#include <linux/skbuff.h> +#include <linux/types.h> #include <linux/netfilter/nf_conntrack_h323_asn1.h> +#include <net/netfilter/nf_conntrack_expect.h> +#include <uapi/linux/netfilter/nf_conntrack_tuple_common.h> #define RAS_PORT 1719 #define Q931_PORT 1720 @@ -28,8 +31,6 @@ struct nf_ct_h323_master { }; }; -struct nf_conn; - int get_h225_addr(struct nf_conn *ct, unsigned char *data, TransportAddress *taddr, union nf_inet_addr *addr, __be16 *port); @@ -94,5 +95,3 @@ extern int (*nat_q931_hook) (struct sk_buff *skb, struct nf_conn *ct, struct nf_conntrack_expect *exp); #endif - -#endif diff --git a/include/linux/netfilter/nf_conntrack_h323_asn1.h b/include/linux/netfilter/nf_conntrack_h323_asn1.h index 19df78341fb3..bd6797f823b2 100644 --- a/include/linux/netfilter/nf_conntrack_h323_asn1.h +++ b/include/linux/netfilter/nf_conntrack_h323_asn1.h @@ -37,6 +37,8 @@ /***************************************************************************** * H.323 Types ****************************************************************************/ + +#include <linux/types.h> #include <linux/netfilter/nf_conntrack_h323_types.h> typedef struct { diff --git a/include/linux/netfilter/nf_conntrack_irc.h b/include/linux/netfilter/nf_conntrack_irc.h index 00c2b74206e1..d02255f721e1 100644 --- a/include/linux/netfilter/nf_conntrack_irc.h +++ b/include/linux/netfilter/nf_conntrack_irc.h @@ -2,7 +2,9 @@ #ifndef _NF_CONNTRACK_IRC_H #define _NF_CONNTRACK_IRC_H -#ifdef __KERNEL__ +#include <linux/netfilter.h> +#include <linux/skbuff.h> +#include <net/netfilter/nf_conntrack_expect.h> #define IRC_PORT 6667 @@ -13,5 +15,4 @@ extern unsigned int (*nf_nat_irc_hook)(struct sk_buff *skb, unsigned int matchlen, struct nf_conntrack_expect *exp); -#endif /* __KERNEL__ */ #endif /* _NF_CONNTRACK_IRC_H */ diff --git a/include/linux/netfilter/nf_conntrack_pptp.h b/include/linux/netfilter/nf_conntrack_pptp.h index 833a5b2255ea..fcc409de31a4 100644 --- a/include/linux/netfilter/nf_conntrack_pptp.h +++ b/include/linux/netfilter/nf_conntrack_pptp.h @@ -3,7 +3,12 @@ #ifndef _NF_CONNTRACK_PPTP_H #define _NF_CONNTRACK_PPTP_H +#include <linux/netfilter.h> +#include <linux/skbuff.h> +#include <linux/types.h> #include <linux/netfilter/nf_conntrack_common.h> +#include <net/netfilter/nf_conntrack_expect.h> +#include <uapi/linux/netfilter/nf_conntrack_tuple_common.h> extern const char *const pptp_msg_name[]; @@ -45,8 +50,6 @@ struct nf_nat_pptp { __be16 pac_call_id; /* NAT'ed PAC call id */ }; -#ifdef __KERNEL__ - #define PPTP_CONTROL_PORT 1723 #define PPTP_PACKET_CONTROL 1 @@ -297,10 +300,6 @@ union pptp_ctrl_union { struct PptpSetLinkInfo setlink; }; -/* crap needed for nf_conntrack_compat.h */ -struct nf_conn; -struct nf_conntrack_expect; - extern int (*nf_nat_pptp_hook_outbound)(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, @@ -323,5 +322,4 @@ extern void (*nf_nat_pptp_hook_expectfn)(struct nf_conn *ct, struct nf_conntrack_expect *exp); -#endif /* __KERNEL__ */ #endif /* _NF_CONNTRACK_PPTP_H */ diff --git a/include/linux/netfilter/nf_conntrack_proto_gre.h b/include/linux/netfilter/nf_conntrack_proto_gre.h index 25f9a770fb84..f33aa6021364 100644 --- a/include/linux/netfilter/nf_conntrack_proto_gre.h +++ b/include/linux/netfilter/nf_conntrack_proto_gre.h @@ -10,7 +10,6 @@ struct nf_ct_gre { unsigned int timeout; }; -#ifdef __KERNEL__ #include <net/netfilter/nf_conntrack_tuple.h> struct nf_conn; @@ -32,5 +31,4 @@ void nf_ct_gre_keymap_destroy(struct nf_conn *ct); bool gre_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff, struct net *net, struct nf_conntrack_tuple *tuple); -#endif /* __KERNEL__ */ #endif /* _CONNTRACK_PROTO_GRE_H */ diff --git a/include/linux/netfilter/nf_conntrack_sane.h b/include/linux/netfilter/nf_conntrack_sane.h index 7d2de44edce3..46c7acd1b4a7 100644 --- a/include/linux/netfilter/nf_conntrack_sane.h +++ b/include/linux/netfilter/nf_conntrack_sane.h @@ -3,8 +3,6 @@ #define _NF_CONNTRACK_SANE_H /* SANE tracking. */ -#ifdef __KERNEL__ - #define SANE_PORT 6566 enum sane_state { @@ -17,6 +15,4 @@ struct nf_ct_sane_master { enum sane_state state; }; -#endif /* __KERNEL__ */ - #endif /* _NF_CONNTRACK_SANE_H */ diff --git a/include/linux/netfilter/nf_conntrack_sip.h b/include/linux/netfilter/nf_conntrack_sip.h index c7fc38807a33..c620521c42bc 100644 --- a/include/linux/netfilter/nf_conntrack_sip.h +++ b/include/linux/netfilter/nf_conntrack_sip.h @@ -1,11 +1,10 @@ /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __NF_CONNTRACK_SIP_H__ #define __NF_CONNTRACK_SIP_H__ -#ifdef __KERNEL__ - -#include <net/netfilter/nf_conntrack_expect.h> +#include <linux/skbuff.h> #include <linux/types.h> +#include <net/netfilter/nf_conntrack_expect.h> #define SIP_PORT 5060 #define SIP_TIMEOUT 3600 @@ -196,5 +195,4 @@ int ct_sip_get_sdp_header(const struct nf_conn *ct, const char *dptr, enum sdp_header_types term, unsigned int *matchoff, unsigned int *matchlen); -#endif /* __KERNEL__ */ #endif /* __NF_CONNTRACK_SIP_H__ */ diff --git a/include/linux/netfilter/nf_conntrack_snmp.h b/include/linux/netfilter/nf_conntrack_snmp.h index 818088c47475..87e4f33eb55f 100644 --- a/include/linux/netfilter/nf_conntrack_snmp.h +++ b/include/linux/netfilter/nf_conntrack_snmp.h @@ -2,6 +2,9 @@ #ifndef _NF_CONNTRACK_SNMP_H #define _NF_CONNTRACK_SNMP_H +#include <linux/netfilter.h> +#include <linux/skbuff.h> + extern int (*nf_nat_snmp_hook)(struct sk_buff *skb, unsigned int protoff, struct nf_conn *ct, diff --git a/include/linux/netfilter/nf_conntrack_tftp.h b/include/linux/netfilter/nf_conntrack_tftp.h index 5769e12dd0a2..dc4c1b9beac0 100644 --- a/include/linux/netfilter/nf_conntrack_tftp.h +++ b/include/linux/netfilter/nf_conntrack_tftp.h @@ -4,6 +4,11 @@ #define TFTP_PORT 69 +#include <linux/netfilter.h> +#include <linux/skbuff.h> +#include <linux/types.h> +#include <net/netfilter/nf_conntrack_expect.h> + struct tftphdr { __be16 opcode; }; diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h index 1f852ef7b098..1b261c51b3a3 100644 --- a/include/linux/netfilter/x_tables.h +++ b/include/linux/netfilter/x_tables.h @@ -336,7 +336,7 @@ void xt_free_table_info(struct xt_table_info *info); /** * xt_recseq - recursive seqcount for netfilter use - * + * * Packet processing changes the seqcount only if no recursion happened * get_counters() can use read_seqcount_begin()/read_seqcount_retry(), * because we use the normal seqcount convention : diff --git a/include/linux/netfilter/xt_hashlimit.h b/include/linux/netfilter/xt_hashlimit.h deleted file mode 100644 index 169d03983589..000000000000 --- a/include/linux/netfilter/xt_hashlimit.h +++ /dev/null @@ -1,11 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _XT_HASHLIMIT_H -#define _XT_HASHLIMIT_H - -#include <uapi/linux/netfilter/xt_hashlimit.h> - -#define XT_HASHLIMIT_ALL (XT_HASHLIMIT_HASH_DIP | XT_HASHLIMIT_HASH_DPT | \ - XT_HASHLIMIT_HASH_SIP | XT_HASHLIMIT_HASH_SPT | \ - XT_HASHLIMIT_INVERT | XT_HASHLIMIT_BYTES |\ - XT_HASHLIMIT_RATE_MATCH) -#endif /*_XT_HASHLIMIT_H*/ diff --git a/include/linux/netfilter/xt_physdev.h b/include/linux/netfilter/xt_physdev.h deleted file mode 100644 index 4ca0593949cd..000000000000 --- a/include/linux/netfilter/xt_physdev.h +++ /dev/null @@ -1,8 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _XT_PHYSDEV_H -#define _XT_PHYSDEV_H - -#include <linux/if.h> -#include <uapi/linux/netfilter/xt_physdev.h> - -#endif /*_XT_PHYSDEV_H*/ diff --git a/include/linux/netfilter_bridge.h b/include/linux/netfilter_bridge.h index 5f2614d02e03..f980edfdd278 100644 --- a/include/linux/netfilter_bridge.h +++ b/include/linux/netfilter_bridge.h @@ -5,6 +5,13 @@ #include <uapi/linux/netfilter_bridge.h> #include <linux/skbuff.h> +struct nf_bridge_frag_data { + char mac[ETH_HLEN]; + bool vlan_present; + u16 vlan_tci; + __be16 vlan_proto; +}; + #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb); diff --git a/include/linux/netfilter_bridge/ebt_802_3.h b/include/linux/netfilter_bridge/ebt_802_3.h deleted file mode 100644 index c6147f9c0d80..000000000000 --- a/include/linux/netfilter_bridge/ebt_802_3.h +++ /dev/null @@ -1,12 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef __LINUX_BRIDGE_EBT_802_3_H -#define __LINUX_BRIDGE_EBT_802_3_H - -#include <linux/skbuff.h> -#include <uapi/linux/netfilter_bridge/ebt_802_3.h> - -static inline struct ebt_802_3_hdr *ebt_802_3_hdr(const struct sk_buff *skb) -{ - return (struct ebt_802_3_hdr *)skb_mac_header(skb); -} -#endif diff --git a/include/linux/netfilter_bridge/ebtables.h b/include/linux/netfilter_bridge/ebtables.h index c6935be7c6ca..162f59d0d17a 100644 --- a/include/linux/netfilter_bridge/ebtables.h +++ b/include/linux/netfilter_bridge/ebtables.h @@ -105,6 +105,7 @@ struct ebt_table { #define EBT_ALIGN(s) (((s) + (__alignof__(struct _xt_align)-1)) & \ ~(__alignof__(struct _xt_align)-1)) + extern int ebt_register_table(struct net *net, const struct ebt_table *table, const struct nf_hook_ops *ops, diff --git a/include/linux/netfilter_ipv4/ip_tables.h b/include/linux/netfilter_ipv4/ip_tables.h index d026e63a5aa4..e9e1ed74cdf1 100644 --- a/include/linux/netfilter_ipv4/ip_tables.h +++ b/include/linux/netfilter_ipv4/ip_tables.h @@ -17,14 +17,11 @@ #include <linux/if.h> #include <linux/in.h> +#include <linux/init.h> #include <linux/ip.h> #include <linux/skbuff.h> - -#include <linux/init.h> #include <uapi/linux/netfilter_ipv4/ip_tables.h> -extern void ipt_init(void) __init; - int ipt_register_table(struct net *net, const struct xt_table *table, const struct ipt_replace *repl, const struct nf_hook_ops *ops, struct xt_table **res); diff --git a/include/linux/netfilter_ipv6.h b/include/linux/netfilter_ipv6.h index 7beb681e1ce5..aac42c28fe62 100644 --- a/include/linux/netfilter_ipv6.h +++ b/include/linux/netfilter_ipv6.h @@ -1,7 +1,7 @@ /* IPv6-specific defines for netfilter. * (C)1998 Rusty Russell -- This code is GPL. * (C)1999 David Jeffery - * this header was blatantly ripped from netfilter_ipv4.h + * this header was blatantly ripped from netfilter_ipv4.h * it's amazing what adding a bunch of 6s can do =8^) */ #ifndef __LINUX_IP6_NETFILTER_H @@ -10,6 +10,18 @@ #include <uapi/linux/netfilter_ipv6.h> #include <net/tcp.h> +/* Check for an extension */ +static inline int +nf_ip6_ext_hdr(u8 nexthdr) +{ return (nexthdr == IPPROTO_HOPOPTS) || + (nexthdr == IPPROTO_ROUTING) || + (nexthdr == IPPROTO_FRAGMENT) || + (nexthdr == IPPROTO_ESP) || + (nexthdr == IPPROTO_AH) || + (nexthdr == IPPROTO_NONE) || + (nexthdr == IPPROTO_DSTOPTS); +} + /* Extra routing may needed on local out, as the QUEUE target never returns * control to the table. */ @@ -20,7 +32,7 @@ struct ip6_rt_info { }; struct nf_queue_entry; -struct nf_ct_bridge_frag_data; +struct nf_bridge_frag_data; /* * Hook functions for ipv6 to allow xt_* modules to be built-in even @@ -49,9 +61,9 @@ struct nf_ipv6_ops { int (*br_defrag)(struct net *net, struct sk_buff *skb, u32 user); int (*br_fragment)(struct net *net, struct sock *sk, struct sk_buff *skb, - struct nf_ct_bridge_frag_data *data, + struct nf_bridge_frag_data *data, int (*output)(struct net *, struct sock *sk, - const struct nf_ct_bridge_frag_data *data, + const struct nf_bridge_frag_data *data, struct sk_buff *)); #endif }; @@ -123,16 +135,16 @@ static inline int nf_ipv6_br_defrag(struct net *net, struct sk_buff *skb, } int br_ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, - struct nf_ct_bridge_frag_data *data, + struct nf_bridge_frag_data *data, int (*output)(struct net *, struct sock *sk, - const struct nf_ct_bridge_frag_data *data, + const struct nf_bridge_frag_data *data, struct sk_buff *)); static inline int nf_br_ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, - struct nf_ct_bridge_frag_data *data, + struct nf_bridge_frag_data *data, int (*output)(struct net *, struct sock *sk, - const struct nf_ct_bridge_frag_data *data, + const struct nf_bridge_frag_data *data, struct sk_buff *)) { #if IS_MODULE(CONFIG_IPV6) diff --git a/include/linux/netfilter_ipv6/ip6_tables.h b/include/linux/netfilter_ipv6/ip6_tables.h index 99cbfd3add40..78ab959c4575 100644 --- a/include/linux/netfilter_ipv6/ip6_tables.h +++ b/include/linux/netfilter_ipv6/ip6_tables.h @@ -17,15 +17,13 @@ #include <linux/if.h> #include <linux/in6.h> +#include <linux/init.h> #include <linux/ipv6.h> #include <linux/skbuff.h> - -#include <linux/init.h> #include <uapi/linux/netfilter_ipv6/ip6_tables.h> -extern void ip6t_init(void) __init; - extern void *ip6t_alloc_initial_table(const struct xt_table *); + int ip6t_register_table(struct net *net, const struct xt_table *table, const struct ip6t_replace *repl, const struct nf_hook_ops *ops, struct xt_table **res); @@ -35,18 +33,6 @@ extern unsigned int ip6t_do_table(struct sk_buff *skb, const struct nf_hook_state *state, struct xt_table *table); -/* Check for an extension */ -static inline int -ip6t_ext_hdr(u8 nexthdr) -{ return (nexthdr == IPPROTO_HOPOPTS) || - (nexthdr == IPPROTO_ROUTING) || - (nexthdr == IPPROTO_FRAGMENT) || - (nexthdr == IPPROTO_ESP) || - (nexthdr == IPPROTO_AH) || - (nexthdr == IPPROTO_NONE) || - (nexthdr == IPPROTO_DSTOPTS); -} - #ifdef CONFIG_COMPAT #include <net/compat.h> diff --git a/include/linux/nvme-fc-driver.h b/include/linux/nvme-fc-driver.h index 98d904961b33..10f81629b9ce 100644 --- a/include/linux/nvme-fc-driver.h +++ b/include/linux/nvme-fc-driver.h @@ -6,6 +6,8 @@ #ifndef _NVME_FC_DRIVER_H #define _NVME_FC_DRIVER_H 1 +#include <linux/scatterlist.h> + /* * ********************** LLDD FC-NVME Host API ******************** diff --git a/include/linux/omap-dma.h b/include/linux/omap-dma.h index 840ce551e773..ba3cfbb52312 100644 --- a/include/linux/omap-dma.h +++ b/include/linux/omap-dma.h @@ -1,8 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_OMAP_DMA_H #define __LINUX_OMAP_DMA_H -#include <linux/omap-dmaengine.h> - /* * Legacy OMAP DMA handling defines and functions * diff --git a/include/linux/omap-dmaengine.h b/include/linux/omap-dmaengine.h deleted file mode 100644 index b6e42f933c40..000000000000 --- a/include/linux/omap-dmaengine.h +++ /dev/null @@ -1,18 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * OMAP DMA Engine support - */ -#ifndef __LINUX_OMAP_DMAENGINE_H -#define __LINUX_OMAP_DMAENGINE_H - -struct dma_chan; - -#if defined(CONFIG_DMA_OMAP) || (defined(CONFIG_DMA_OMAP_MODULE) && defined(MODULE)) -bool omap_dma_filter_fn(struct dma_chan *, void *); -#else -static inline bool omap_dma_filter_fn(struct dma_chan *c, void *d) -{ - return false; -} -#endif -#endif /* __LINUX_OMAP_DMAENGINE_H */ diff --git a/include/linux/oxu210hp.h b/include/linux/oxu210hp.h deleted file mode 100644 index 94cd25165c08..000000000000 --- a/include/linux/oxu210hp.h +++ /dev/null @@ -1,8 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* platform data for the OXU210HP HCD */ - -struct oxu210hp_platform_data { - unsigned int bus16:1; - unsigned int use_hcd_otg:1; - unsigned int use_hcd_sph:1; -}; diff --git a/include/linux/padata.h b/include/linux/padata.h index 56f09e36f770..23717eeaad23 100644 --- a/include/linux/padata.h +++ b/include/linux/padata.h @@ -12,7 +12,6 @@ #include <linux/workqueue.h> #include <linux/spinlock.h> #include <linux/list.h> -#include <linux/timer.h> #include <linux/notifier.h> #include <linux/kobject.h> @@ -36,6 +35,7 @@ struct padata_priv { struct parallel_data *pd; int cb_cpu; int cpu; + unsigned int seq_nr; int info; void (*parallel)(struct padata_priv *padata); void (*serial)(struct padata_priv *padata); @@ -73,20 +73,14 @@ struct padata_serial_queue { * @serial: List to wait for serialization after reordering. * @pwork: work struct for parallelization. * @swork: work struct for serialization. - * @pd: Backpointer to the internal control structure. * @work: work struct for parallelization. - * @reorder_work: work struct for reordering. * @num_obj: Number of objects that are processed by this cpu. - * @cpu_index: Index of the cpu. */ struct padata_parallel_queue { struct padata_list parallel; struct padata_list reorder; - struct parallel_data *pd; struct work_struct work; - struct work_struct reorder_work; atomic_t num_obj; - int cpu_index; }; /** @@ -110,10 +104,11 @@ struct padata_cpumask { * @reorder_objects: Number of objects waiting in the reorder queues. * @refcnt: Number of objects holding a reference on this parallel_data. * @max_seq_nr: Maximal used sequence number. + * @processed: Number of already processed objects. + * @cpu: Next CPU to be processed. * @cpumask: The cpumasks in use for parallel and serial workers. + * @reorder_work: work struct for reordering. * @lock: Reorder lock. - * @processed: Number of already processed objects. - * @timer: Reorder timer. */ struct parallel_data { struct padata_instance *pinst; @@ -122,17 +117,19 @@ struct parallel_data { atomic_t reorder_objects; atomic_t refcnt; atomic_t seq_nr; + unsigned int processed; + int cpu; struct padata_cpumask cpumask; + struct work_struct reorder_work; spinlock_t lock ____cacheline_aligned; - unsigned int processed; - struct timer_list timer; }; /** * struct padata_instance - The overall control structure. * * @cpu_notifier: cpu hotplug notifier. - * @wq: The workqueue in use. + * @parallel_wq: The workqueue used for parallel work. + * @serial_wq: The workqueue used for serial work. * @pd: The internal control structure. * @cpumask: User supplied cpumasks for parallel and serial works. * @cpumask_change_notifier: Notifiers chain for user-defined notify @@ -144,7 +141,8 @@ struct parallel_data { */ struct padata_instance { struct hlist_node node; - struct workqueue_struct *wq; + struct workqueue_struct *parallel_wq; + struct workqueue_struct *serial_wq; struct parallel_data *pd; struct padata_cpumask cpumask; struct blocking_notifier_head cpumask_change_notifier; @@ -156,11 +154,10 @@ struct padata_instance { #define PADATA_INVALID 4 }; -extern struct padata_instance *padata_alloc_possible( - struct workqueue_struct *wq); +extern struct padata_instance *padata_alloc_possible(const char *name); extern void padata_free(struct padata_instance *pinst); extern int padata_do_parallel(struct padata_instance *pinst, - struct padata_priv *padata, int cb_cpu); + struct padata_priv *padata, int *cb_cpu); extern void padata_do_serial(struct padata_priv *padata); extern int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type, cpumask_var_t cpumask); diff --git a/include/linux/page_ext.h b/include/linux/page_ext.h index 09592951725c..682fd465df06 100644 --- a/include/linux/page_ext.h +++ b/include/linux/page_ext.h @@ -18,6 +18,7 @@ struct page_ext_operations { enum page_ext_flags { PAGE_EXT_OWNER, + PAGE_EXT_OWNER_ACTIVE, #if defined(CONFIG_IDLE_PAGE_TRACKING) && !defined(CONFIG_64BIT) PAGE_EXT_YOUNG, PAGE_EXT_IDLE, diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index c7552459a15f..37a4d9e32cd3 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -333,6 +333,16 @@ static inline struct page *grab_cache_page_nowait(struct address_space *mapping, mapping_gfp_mask(mapping)); } +static inline struct page *find_subpage(struct page *page, pgoff_t offset) +{ + if (PageHuge(page)) + return page; + + VM_BUG_ON_PAGE(PageTail(page), page); + + return page + (offset & (compound_nr(page) - 1)); +} + struct page *find_get_entry(struct address_space *mapping, pgoff_t offset); struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset); unsigned find_get_entries(struct address_space *mapping, pgoff_t start, diff --git a/include/linux/pagewalk.h b/include/linux/pagewalk.h new file mode 100644 index 000000000000..bddd9759bab9 --- /dev/null +++ b/include/linux/pagewalk.h @@ -0,0 +1,66 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_PAGEWALK_H +#define _LINUX_PAGEWALK_H + +#include <linux/mm.h> + +struct mm_walk; + +/** + * mm_walk_ops - callbacks for walk_page_range + * @pud_entry: if set, called for each non-empty PUD (2nd-level) entry + * this handler should only handle pud_trans_huge() puds. + * the pmd_entry or pte_entry callbacks will be used for + * regular PUDs. + * @pmd_entry: if set, called for each non-empty PMD (3rd-level) entry + * this handler is required to be able to handle + * pmd_trans_huge() pmds. They may simply choose to + * split_huge_page() instead of handling it explicitly. + * @pte_entry: if set, called for each non-empty PTE (4th-level) entry + * @pte_hole: if set, called for each hole at all levels + * @hugetlb_entry: if set, called for each hugetlb entry + * @test_walk: caller specific callback function to determine whether + * we walk over the current vma or not. Returning 0 means + * "do page table walk over the current vma", returning + * a negative value means "abort current page table walk + * right now" and returning 1 means "skip the current vma" + */ +struct mm_walk_ops { + int (*pud_entry)(pud_t *pud, unsigned long addr, + unsigned long next, struct mm_walk *walk); + int (*pmd_entry)(pmd_t *pmd, unsigned long addr, + unsigned long next, struct mm_walk *walk); + int (*pte_entry)(pte_t *pte, unsigned long addr, + unsigned long next, struct mm_walk *walk); + int (*pte_hole)(unsigned long addr, unsigned long next, + struct mm_walk *walk); + int (*hugetlb_entry)(pte_t *pte, unsigned long hmask, + unsigned long addr, unsigned long next, + struct mm_walk *walk); + int (*test_walk)(unsigned long addr, unsigned long next, + struct mm_walk *walk); +}; + +/** + * mm_walk - walk_page_range data + * @ops: operation to call during the walk + * @mm: mm_struct representing the target process of page table walk + * @vma: vma currently walked (NULL if walking outside vmas) + * @private: private data for callbacks' usage + * + * (see the comment on walk_page_range() for more details) + */ +struct mm_walk { + const struct mm_walk_ops *ops; + struct mm_struct *mm; + struct vm_area_struct *vma; + void *private; +}; + +int walk_page_range(struct mm_struct *mm, unsigned long start, + unsigned long end, const struct mm_walk_ops *ops, + void *private); +int walk_page_vma(struct vm_area_struct *vma, const struct mm_walk_ops *ops, + void *private); + +#endif /* _LINUX_PAGEWALK_H */ diff --git a/include/linux/pci-aspm.h b/include/linux/pci-aspm.h deleted file mode 100644 index 67064145d76e..000000000000 --- a/include/linux/pci-aspm.h +++ /dev/null @@ -1,36 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * aspm.h - * - * PCI Express ASPM defines and function prototypes - * - * Copyright (C) 2007 Intel Corp. - * Zhang Yanmin (yanmin.zhang@intel.com) - * Shaohua Li (shaohua.li@intel.com) - * - * For more information, please consult the following manuals (look at - * http://www.pcisig.com/ for how to get them): - * - * PCI Express Specification - */ - -#ifndef LINUX_ASPM_H -#define LINUX_ASPM_H - -#include <linux/pci.h> - -#define PCIE_LINK_STATE_L0S 1 -#define PCIE_LINK_STATE_L1 2 -#define PCIE_LINK_STATE_CLKPM 4 - -#ifdef CONFIG_PCIEASPM -int pci_disable_link_state(struct pci_dev *pdev, int state); -int pci_disable_link_state_locked(struct pci_dev *pdev, int state); -void pcie_no_aspm(void); -#else -static inline int pci_disable_link_state(struct pci_dev *pdev, int state) -{ return 0; } -static inline void pcie_no_aspm(void) { } -#endif - -#endif /* LINUX_ASPM_H */ diff --git a/include/linux/pci-p2pdma.h b/include/linux/pci-p2pdma.h index bca9bc3e5be7..8318a97c9c61 100644 --- a/include/linux/pci-p2pdma.h +++ b/include/linux/pci-p2pdma.h @@ -30,8 +30,10 @@ struct scatterlist *pci_p2pmem_alloc_sgl(struct pci_dev *pdev, unsigned int *nents, u32 length); void pci_p2pmem_free_sgl(struct pci_dev *pdev, struct scatterlist *sgl); void pci_p2pmem_publish(struct pci_dev *pdev, bool publish); -int pci_p2pdma_map_sg(struct device *dev, struct scatterlist *sg, int nents, - enum dma_data_direction dir); +int pci_p2pdma_map_sg_attrs(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction dir, unsigned long attrs); +void pci_p2pdma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction dir, unsigned long attrs); int pci_p2pdma_enable_store(const char *page, struct pci_dev **p2p_dev, bool *use_p2pdma); ssize_t pci_p2pdma_enable_show(char *page, struct pci_dev *p2p_dev, @@ -81,11 +83,17 @@ static inline void pci_p2pmem_free_sgl(struct pci_dev *pdev, static inline void pci_p2pmem_publish(struct pci_dev *pdev, bool publish) { } -static inline int pci_p2pdma_map_sg(struct device *dev, - struct scatterlist *sg, int nents, enum dma_data_direction dir) +static inline int pci_p2pdma_map_sg_attrs(struct device *dev, + struct scatterlist *sg, int nents, enum dma_data_direction dir, + unsigned long attrs) { return 0; } +static inline void pci_p2pdma_unmap_sg_attrs(struct device *dev, + struct scatterlist *sg, int nents, enum dma_data_direction dir, + unsigned long attrs) +{ +} static inline int pci_p2pdma_enable_store(const char *page, struct pci_dev **p2p_dev, bool *use_p2pdma) { @@ -111,4 +119,16 @@ static inline struct pci_dev *pci_p2pmem_find(struct device *client) return pci_p2pmem_find_many(&client, 1); } +static inline int pci_p2pdma_map_sg(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction dir) +{ + return pci_p2pdma_map_sg_attrs(dev, sg, nents, dir, 0); +} + +static inline void pci_p2pdma_unmap_sg(struct device *dev, + struct scatterlist *sg, int nents, enum dma_data_direction dir) +{ + pci_p2pdma_unmap_sg_attrs(dev, sg, nents, dir, 0); +} + #endif /* _LINUX_PCI_P2P_H */ diff --git a/include/linux/pci.h b/include/linux/pci.h index 82e4cd1b7ac3..f9088c89a534 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -6,12 +6,18 @@ * Copyright 1994, Drew Eckhardt * Copyright 1997--1999 Martin Mares <mj@ucw.cz> * + * PCI Express ASPM defines and function prototypes + * Copyright (c) 2007 Intel Corp. + * Zhang Yanmin (yanmin.zhang@intel.com) + * Shaohua Li (shaohua.li@intel.com) + * * For more information, please consult the following manuals (look at * http://www.pcisig.com/ for how to get them): * * PCI BIOS Specification * PCI Local Bus Specification * PCI to PCI Bridge Specification + * PCI Express Specification * PCI System Design Guide */ #ifndef LINUX_PCI_H @@ -145,11 +151,6 @@ static inline const char *pci_power_name(pci_power_t state) return pci_power_names[1 + (__force int) state]; } -#define PCI_PM_D2_DELAY 200 -#define PCI_PM_D3_WAIT 10 -#define PCI_PM_D3COLD_WAIT 100 -#define PCI_PM_BUS_WAIT 50 - /** * typedef pci_channel_state_t * @@ -418,7 +419,6 @@ struct pci_dev { unsigned int broken_intx_masking:1; /* INTx masking can't be used */ unsigned int io_window_1k:1; /* Intel bridge 1K I/O windows */ unsigned int irq_managed:1; - unsigned int has_secondary_link:1; unsigned int non_compliant_bars:1; /* Broken BARs; ignore them */ unsigned int is_probed:1; /* Device probing in progress */ unsigned int link_active_reporting:1;/* Device capable of reporting link active */ @@ -649,9 +649,6 @@ static inline struct pci_dev *pci_upstream_bridge(struct pci_dev *dev) return dev->bus->self; } -struct device *pci_get_host_bridge_device(struct pci_dev *dev); -void pci_put_host_bridge_device(struct device *dev); - #ifdef CONFIG_PCI_MSI static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev) { @@ -925,6 +922,11 @@ enum { PCI_SCAN_ALL_PCIE_DEVS = 0x00000040, /* Scan all, not just dev 0 */ }; +#define PCI_IRQ_LEGACY (1 << 0) /* Allow legacy interrupts */ +#define PCI_IRQ_MSI (1 << 1) /* Allow MSI interrupts */ +#define PCI_IRQ_MSIX (1 << 2) /* Allow MSI-X interrupts */ +#define PCI_IRQ_AFFINITY (1 << 3) /* Auto-assign affinity */ + /* These external functions are only available when PCI support is enabled */ #ifdef CONFIG_PCI @@ -969,7 +971,7 @@ resource_size_t pcibios_align_resource(void *, const struct resource *, resource_size_t, resource_size_t); -/* Weak but can be overriden by arch */ +/* Weak but can be overridden by arch */ void pci_fixup_cardbus(struct pci_bus *); /* Generic PCI functions used internally */ @@ -995,7 +997,6 @@ struct pci_bus *pci_scan_root_bus(struct device *parent, int bus, int pci_scan_root_bus_bridge(struct pci_host_bridge *bridge); struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev, int busnr); -void pcie_update_link_speed(struct pci_bus *bus, u16 link_status); struct pci_slot *pci_create_slot(struct pci_bus *parent, int slot_nr, const char *name, struct hotplug_slot *hotplug); @@ -1241,19 +1242,12 @@ int pci_wake_from_d3(struct pci_dev *dev, bool enable); int pci_prepare_to_sleep(struct pci_dev *dev); int pci_back_from_sleep(struct pci_dev *dev); bool pci_dev_run_wake(struct pci_dev *dev); -bool pci_check_pme_status(struct pci_dev *dev); -void pci_pme_wakeup_bus(struct pci_bus *bus); void pci_d3cold_enable(struct pci_dev *dev); void pci_d3cold_disable(struct pci_dev *dev); bool pcie_relaxed_ordering_enabled(struct pci_dev *dev); void pci_wakeup_bus(struct pci_bus *bus); void pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state); -/* PCI Virtual Channel */ -int pci_save_vc_state(struct pci_dev *dev); -void pci_restore_vc_state(struct pci_dev *dev); -void pci_allocate_vc_save_buffers(struct pci_dev *dev); - /* For use by arch with custom probe code */ void set_pcie_port_type(struct pci_dev *pdev); void set_pcie_hotplug_bridge(struct pci_dev *pdev); @@ -1297,8 +1291,6 @@ int pci_request_selected_regions_exclusive(struct pci_dev *, int, const char *); void pci_release_selected_regions(struct pci_dev *, int); /* drivers/pci/bus.c */ -struct pci_bus *pci_bus_get(struct pci_bus *bus); -void pci_bus_put(struct pci_bus *bus); void pci_add_resource(struct list_head *resources, struct resource *res); void pci_add_resource_offset(struct list_head *resources, struct resource *res, resource_size_t offset); @@ -1408,11 +1400,6 @@ resource_size_t pcibios_window_alignment(struct pci_bus *bus, int pci_set_vga_state(struct pci_dev *pdev, bool decode, unsigned int command_bits, u32 flags); -#define PCI_IRQ_LEGACY (1 << 0) /* Allow legacy interrupts */ -#define PCI_IRQ_MSI (1 << 1) /* Allow MSI interrupts */ -#define PCI_IRQ_MSIX (1 << 2) /* Allow MSI-X interrupts */ -#define PCI_IRQ_AFFINITY (1 << 3) /* Auto-assign affinity */ - /* * Virtual interrupts allow for more interrupts to be allocated * than the device has interrupts for. These are not programmed @@ -1517,14 +1504,6 @@ static inline int pci_irq_get_node(struct pci_dev *pdev, int vec) } #endif -static inline int -pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs, - unsigned int max_vecs, unsigned int flags) -{ - return pci_alloc_irq_vectors_affinity(dev, min_vecs, max_vecs, flags, - NULL); -} - /** * pci_irqd_intx_xlate() - Translate PCI INTx value to an IRQ domain hwirq * @d: the INTx IRQ domain @@ -1565,10 +1544,22 @@ extern bool pcie_ports_native; #define pcie_ports_native false #endif +#define PCIE_LINK_STATE_L0S 1 +#define PCIE_LINK_STATE_L1 2 +#define PCIE_LINK_STATE_CLKPM 4 + #ifdef CONFIG_PCIEASPM +int pci_disable_link_state(struct pci_dev *pdev, int state); +int pci_disable_link_state_locked(struct pci_dev *pdev, int state); +void pcie_no_aspm(void); bool pcie_aspm_support_enabled(void); bool pcie_aspm_enabled(struct pci_dev *pdev); #else +static inline int pci_disable_link_state(struct pci_dev *pdev, int state) +{ return 0; } +static inline int pci_disable_link_state_locked(struct pci_dev *pdev, int state) +{ return 0; } +static inline void pcie_no_aspm(void) { } static inline bool pcie_aspm_support_enabled(void) { return false; } static inline bool pcie_aspm_enabled(struct pci_dev *pdev) { return false; } #endif @@ -1579,23 +1570,8 @@ bool pci_aer_available(void); static inline bool pci_aer_available(void) { return false; } #endif -#ifdef CONFIG_PCIE_ECRC -void pcie_set_ecrc_checking(struct pci_dev *dev); -void pcie_ecrc_get_policy(char *str); -#else -static inline void pcie_set_ecrc_checking(struct pci_dev *dev) { } -static inline void pcie_ecrc_get_policy(char *str) { } -#endif - bool pci_ats_disabled(void); -#ifdef CONFIG_PCIE_PTM -int pci_enable_ptm(struct pci_dev *dev, u8 *granularity); -#else -static inline int pci_enable_ptm(struct pci_dev *dev, u8 *granularity) -{ return -EINVAL; } -#endif - void pci_cfg_access_lock(struct pci_dev *dev); bool pci_cfg_access_trylock(struct pci_dev *dev); void pci_cfg_access_unlock(struct pci_dev *dev); @@ -1749,11 +1725,6 @@ static inline void pci_release_regions(struct pci_dev *dev) { } static inline unsigned long pci_address_to_pio(phys_addr_t addr) { return -1; } -static inline void pci_block_cfg_access(struct pci_dev *dev) { } -static inline int pci_block_cfg_access_in_atomic(struct pci_dev *dev) -{ return 0; } -static inline void pci_unblock_cfg_access(struct pci_dev *dev) { } - static inline struct pci_bus *pci_find_next_bus(const struct pci_bus *from) { return NULL; } static inline struct pci_dev *pci_get_slot(struct pci_bus *bus, @@ -1782,17 +1753,36 @@ static inline const struct pci_device_id *pci_match_id(const struct pci_device_i struct pci_dev *dev) { return NULL; } static inline bool pci_ats_disabled(void) { return true; } + +static inline int pci_irq_vector(struct pci_dev *dev, unsigned int nr) +{ + return -EINVAL; +} + +static inline int +pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs, + unsigned int max_vecs, unsigned int flags, + struct irq_affinity *aff_desc) +{ + return -ENOSPC; +} #endif /* CONFIG_PCI */ +static inline int +pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs, + unsigned int max_vecs, unsigned int flags) +{ + return pci_alloc_irq_vectors_affinity(dev, min_vecs, max_vecs, flags, + NULL); +} + #ifdef CONFIG_PCI_ATS /* Address Translation Service */ -void pci_ats_init(struct pci_dev *dev); int pci_enable_ats(struct pci_dev *dev, int ps); void pci_disable_ats(struct pci_dev *dev); int pci_ats_queue_depth(struct pci_dev *dev); int pci_ats_page_aligned(struct pci_dev *dev); #else -static inline void pci_ats_init(struct pci_dev *d) { } static inline int pci_enable_ats(struct pci_dev *d, int ps) { return -ENODEV; } static inline void pci_disable_ats(struct pci_dev *d) { } static inline int pci_ats_queue_depth(struct pci_dev *d) { return -ENODEV; } @@ -1803,7 +1793,7 @@ static inline int pci_ats_page_aligned(struct pci_dev *dev) { return 0; } #include <asm/pci.h> -/* These two functions provide almost identical functionality. Depennding +/* These two functions provide almost identical functionality. Depending * on the architecture, one will be implemented as a wrapper around the * other (in drivers/pci/mmap.c). * @@ -1872,25 +1862,9 @@ static inline const char *pci_name(const struct pci_dev *pdev) return dev_name(&pdev->dev); } - -/* - * Some archs don't want to expose struct resource to userland as-is - * in sysfs and /proc - */ -#ifdef HAVE_ARCH_PCI_RESOURCE_TO_USER void pci_resource_to_user(const struct pci_dev *dev, int bar, const struct resource *rsrc, resource_size_t *start, resource_size_t *end); -#else -static inline void pci_resource_to_user(const struct pci_dev *dev, int bar, - const struct resource *rsrc, resource_size_t *start, - resource_size_t *end) -{ - *start = rsrc->start; - *end = rsrc->end; -} -#endif /* HAVE_ARCH_PCI_RESOURCE_TO_USER */ - /* * The world is not perfect and supplies us with broken PCI devices. @@ -2032,10 +2006,6 @@ extern unsigned long pci_cardbus_mem_size; extern u8 pci_dfl_cache_line_size; extern u8 pci_cache_line_size; -extern unsigned long pci_hotplug_io_size; -extern unsigned long pci_hotplug_mem_size; -extern unsigned long pci_hotplug_bus_size; - /* Architecture-specific versions may override these (weak) */ void pcibios_disable_device(struct pci_dev *dev); void pcibios_set_master(struct pci_dev *dev); @@ -2305,10 +2275,6 @@ int pci_vpd_find_info_keyword(const u8 *buf, unsigned int off, #ifdef CONFIG_OF struct device_node; struct irq_domain; -void pci_set_of_node(struct pci_dev *dev); -void pci_release_of_node(struct pci_dev *dev); -void pci_set_bus_of_node(struct pci_bus *bus); -void pci_release_bus_of_node(struct pci_bus *bus); struct irq_domain *pci_host_bridge_of_msi_domain(struct pci_bus *bus); int pci_parse_request_of_pci_ranges(struct device *dev, struct list_head *resources, @@ -2318,10 +2284,6 @@ int pci_parse_request_of_pci_ranges(struct device *dev, struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus); #else /* CONFIG_OF */ -static inline void pci_set_of_node(struct pci_dev *dev) { } -static inline void pci_release_of_node(struct pci_dev *dev) { } -static inline void pci_set_bus_of_node(struct pci_bus *bus) { } -static inline void pci_release_bus_of_node(struct pci_bus *bus) { } static inline struct irq_domain * pci_host_bridge_of_msi_domain(struct pci_bus *bus) { return NULL; } static inline int pci_parse_request_of_pci_ranges(struct device *dev, @@ -2435,4 +2397,7 @@ void pci_uevent_ers(struct pci_dev *pdev, enum pci_ers_result err_type); #define pci_notice_ratelimited(pdev, fmt, arg...) \ dev_notice_ratelimited(&(pdev)->dev, fmt, ##arg) +#define pci_info_ratelimited(pdev, fmt, arg...) \ + dev_info_ratelimited(&(pdev)->dev, fmt, ##arg) + #endif /* LINUX_PCI_H */ diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h index f694eb2ca978..b482e42d7153 100644 --- a/include/linux/pci_hotplug.h +++ b/include/linux/pci_hotplug.h @@ -86,114 +86,14 @@ void pci_hp_deregister(struct hotplug_slot *slot); #define pci_hp_initialize(slot, bus, nr, name) \ __pci_hp_initialize(slot, bus, nr, name, THIS_MODULE, KBUILD_MODNAME) -/* PCI Setting Record (Type 0) */ -struct hpp_type0 { - u32 revision; - u8 cache_line_size; - u8 latency_timer; - u8 enable_serr; - u8 enable_perr; -}; - -/* PCI-X Setting Record (Type 1) */ -struct hpp_type1 { - u32 revision; - u8 max_mem_read; - u8 avg_max_split; - u16 tot_max_split; -}; - -/* PCI Express Setting Record (Type 2) */ -struct hpp_type2 { - u32 revision; - u32 unc_err_mask_and; - u32 unc_err_mask_or; - u32 unc_err_sever_and; - u32 unc_err_sever_or; - u32 cor_err_mask_and; - u32 cor_err_mask_or; - u32 adv_err_cap_and; - u32 adv_err_cap_or; - u16 pci_exp_devctl_and; - u16 pci_exp_devctl_or; - u16 pci_exp_lnkctl_and; - u16 pci_exp_lnkctl_or; - u32 sec_unc_err_sever_and; - u32 sec_unc_err_sever_or; - u32 sec_unc_err_mask_and; - u32 sec_unc_err_mask_or; -}; - -/* - * _HPX PCI Express Setting Record (Type 3) - */ -struct hpx_type3 { - u16 device_type; - u16 function_type; - u16 config_space_location; - u16 pci_exp_cap_id; - u16 pci_exp_cap_ver; - u16 pci_exp_vendor_id; - u16 dvsec_id; - u16 dvsec_rev; - u16 match_offset; - u32 match_mask_and; - u32 match_value; - u16 reg_offset; - u32 reg_mask_and; - u32 reg_mask_or; -}; - -struct hotplug_program_ops { - void (*program_type0)(struct pci_dev *dev, struct hpp_type0 *hpp); - void (*program_type1)(struct pci_dev *dev, struct hpp_type1 *hpp); - void (*program_type2)(struct pci_dev *dev, struct hpp_type2 *hpp); - void (*program_type3)(struct pci_dev *dev, struct hpx_type3 *hpp); -}; - -enum hpx_type3_dev_type { - HPX_TYPE_ENDPOINT = BIT(0), - HPX_TYPE_LEG_END = BIT(1), - HPX_TYPE_RC_END = BIT(2), - HPX_TYPE_RC_EC = BIT(3), - HPX_TYPE_ROOT_PORT = BIT(4), - HPX_TYPE_UPSTREAM = BIT(5), - HPX_TYPE_DOWNSTREAM = BIT(6), - HPX_TYPE_PCI_BRIDGE = BIT(7), - HPX_TYPE_PCIE_BRIDGE = BIT(8), -}; - -enum hpx_type3_fn_type { - HPX_FN_NORMAL = BIT(0), - HPX_FN_SRIOV_PHYS = BIT(1), - HPX_FN_SRIOV_VIRT = BIT(2), -}; - -enum hpx_type3_cfg_loc { - HPX_CFG_PCICFG = 0, - HPX_CFG_PCIE_CAP = 1, - HPX_CFG_PCIE_CAP_EXT = 2, - HPX_CFG_VEND_CAP = 3, - HPX_CFG_DVSEC = 4, - HPX_CFG_MAX, -}; - #ifdef CONFIG_ACPI #include <linux/acpi.h> -int pci_acpi_program_hp_params(struct pci_dev *dev, - const struct hotplug_program_ops *hp_ops); bool pciehp_is_native(struct pci_dev *bridge); int acpi_get_hp_hw_control_from_firmware(struct pci_dev *bridge); bool shpchp_is_native(struct pci_dev *bridge); int acpi_pci_check_ejectable(struct pci_bus *pbus, acpi_handle handle); int acpi_pci_detect_ejectable(acpi_handle handle); #else -static inline int pci_acpi_program_hp_params(struct pci_dev *dev, - const struct hotplug_program_ops *hp_ops) -{ - return -ENODEV; -} - static inline int acpi_get_hp_hw_control_from_firmware(struct pci_dev *bridge) { return 0; diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 24486f405f12..21a572469a4e 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -1951,6 +1951,8 @@ #define PCI_VENDOR_ID_DIGIGRAM 0x1369 #define PCI_SUBDEVICE_ID_DIGIGRAM_LX6464ES_SERIAL_SUBSYSTEM 0xc001 #define PCI_SUBDEVICE_ID_DIGIGRAM_LX6464ES_CAE_SERIAL_SUBSYSTEM 0xc002 +#define PCI_SUBDEVICE_ID_DIGIGRAM_LX6464ESE_SERIAL_SUBSYSTEM 0xc021 +#define PCI_SUBDEVICE_ID_DIGIGRAM_LX6464ESE_CAE_SERIAL_SUBSYSTEM 0xc022 #define PCI_VENDOR_ID_KAWASAKI 0x136b #define PCI_DEVICE_ID_MCHIP_KL5A72002 0xff01 @@ -2132,6 +2134,7 @@ #define PCI_VENDOR_ID_MYRICOM 0x14c1 #define PCI_VENDOR_ID_MEDIATEK 0x14c3 +#define PCI_DEVICE_ID_MEDIATEK_7629 0x7629 #define PCI_VENDOR_ID_TITAN 0x14D2 #define PCI_DEVICE_ID_TITAN_010L 0x8001 @@ -2405,6 +2408,8 @@ #define PCI_DEVICE_ID_RDC_R6061 0x6061 #define PCI_DEVICE_ID_RDC_D1010 0x1010 +#define PCI_VENDOR_ID_GLI 0x17a0 + #define PCI_VENDOR_ID_LENOVO 0x17aa #define PCI_VENDOR_ID_QCOM 0x17cb @@ -2570,6 +2575,8 @@ #define PCI_VENDOR_ID_ASMEDIA 0x1b21 +#define PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS 0x1c36 + #define PCI_VENDOR_ID_CIRCUITCO 0x1cc8 #define PCI_SUBSYSTEM_ID_CIRCUITCO_MINNOWBOARD 0x0001 diff --git a/include/linux/phy.h b/include/linux/phy.h index 2fb9c8ffaf10..a7ecbe0e55aa 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -403,6 +403,8 @@ struct phy_device { __ETHTOOL_DECLARE_LINK_MODE_MASK(supported); __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising); __ETHTOOL_DECLARE_LINK_MODE_MASK(lp_advertising); + /* used with phy_speed_down */ + __ETHTOOL_DECLARE_LINK_MODE_MASK(adv_old); /* Energy efficient ethernet modes which should be prohibited */ u32 eee_broken_modes; @@ -665,6 +667,7 @@ size_t phy_speeds(unsigned int *speeds, size_t size, unsigned long *mask); void of_set_phy_supported(struct phy_device *phydev); void of_set_phy_eee_broken(struct phy_device *phydev); +int phy_speed_down_core(struct phy_device *phydev); /** * phy_is_started - Convenience function to check whether PHY is started @@ -984,6 +987,8 @@ int phy_select_page(struct phy_device *phydev, int page); int phy_restore_page(struct phy_device *phydev, int oldpage, int ret); int phy_read_paged(struct phy_device *phydev, int page, u32 regnum); int phy_write_paged(struct phy_device *phydev, int page, u32 regnum, u16 val); +int phy_modify_paged_changed(struct phy_device *phydev, int page, u32 regnum, + u16 mask, u16 set); int phy_modify_paged(struct phy_device *phydev, int page, u32 regnum, u16 mask, u16 set); @@ -1064,12 +1069,11 @@ void phy_attached_print(struct phy_device *phydev, const char *fmt, ...) void phy_attached_info(struct phy_device *phydev); /* Clause 22 PHY */ -int genphy_config_init(struct phy_device *phydev); int genphy_read_abilities(struct phy_device *phydev); int genphy_setup_forced(struct phy_device *phydev); int genphy_restart_aneg(struct phy_device *phydev); int genphy_config_eee_advert(struct phy_device *phydev); -int genphy_config_aneg(struct phy_device *phydev); +int __genphy_config_aneg(struct phy_device *phydev, bool changed); int genphy_aneg_done(struct phy_device *phydev); int genphy_update_link(struct phy_device *phydev); int genphy_read_status(struct phy_device *phydev); @@ -1077,6 +1081,12 @@ int genphy_suspend(struct phy_device *phydev); int genphy_resume(struct phy_device *phydev); int genphy_loopback(struct phy_device *phydev, bool enable); int genphy_soft_reset(struct phy_device *phydev); + +static inline int genphy_config_aneg(struct phy_device *phydev) +{ + return __genphy_config_aneg(phydev, false); +} + static inline int genphy_no_soft_reset(struct phy_device *phydev) { return 0; diff --git a/include/linux/pinctrl/consumer.h b/include/linux/pinctrl/consumer.h index 86720a5a384f..7f8c7d9583d3 100644 --- a/include/linux/pinctrl/consumer.h +++ b/include/linux/pinctrl/consumer.h @@ -24,6 +24,7 @@ struct device; #ifdef CONFIG_PINCTRL /* External interface to pin control */ +extern bool pinctrl_gpio_can_use_line(unsigned gpio); extern int pinctrl_gpio_request(unsigned gpio); extern void pinctrl_gpio_free(unsigned gpio); extern int pinctrl_gpio_direction_input(unsigned gpio); @@ -61,6 +62,11 @@ static inline int pinctrl_pm_select_idle_state(struct device *dev) #else /* !CONFIG_PINCTRL */ +static inline bool pinctrl_gpio_can_use_line(unsigned gpio) +{ + return true; +} + static inline int pinctrl_gpio_request(unsigned gpio) { return 0; diff --git a/include/linux/platform_data/cros_ec_chardev.h b/include/linux/platform_data/cros_ec_chardev.h new file mode 100644 index 000000000000..7de8faaf77df --- /dev/null +++ b/include/linux/platform_data/cros_ec_chardev.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * ChromeOS EC device interface. + * + * Copyright (C) 2014 Google, Inc. + */ + +#ifndef _UAPI_LINUX_CROS_EC_DEV_H_ +#define _UAPI_LINUX_CROS_EC_DEV_H_ + +#include <linux/bits.h> +#include <linux/ioctl.h> +#include <linux/types.h> + +#include <linux/platform_data/cros_ec_commands.h> + +#define CROS_EC_DEV_VERSION "1.0.0" + +/** + * struct cros_ec_readmem - Struct used to read mapped memory. + * @offset: Within EC_LPC_ADDR_MEMMAP region. + * @bytes: Number of bytes to read. Zero means "read a string" (including '\0') + * At most only EC_MEMMAP_SIZE bytes can be read. + * @buffer: Where to store the result. The ioctl returns the number of bytes + * read or negative on error. + */ +struct cros_ec_readmem { + uint32_t offset; + uint32_t bytes; + uint8_t buffer[EC_MEMMAP_SIZE]; +}; + +#define CROS_EC_DEV_IOC 0xEC +#define CROS_EC_DEV_IOCXCMD _IOWR(CROS_EC_DEV_IOC, 0, struct cros_ec_command) +#define CROS_EC_DEV_IOCRDMEM _IOWR(CROS_EC_DEV_IOC, 1, struct cros_ec_readmem) +#define CROS_EC_DEV_IOCEVENTMASK _IO(CROS_EC_DEV_IOC, 2) + +#endif /* _CROS_EC_DEV_H_ */ diff --git a/include/linux/mfd/cros_ec_commands.h b/include/linux/platform_data/cros_ec_commands.h index 7ccb8757b79d..98415686cbfa 100644 --- a/include/linux/mfd/cros_ec_commands.h +++ b/include/linux/platform_data/cros_ec_commands.h @@ -5513,6 +5513,18 @@ struct ec_params_fp_seed { uint8_t seed[FP_CONTEXT_TPM_BYTES]; } __ec_align4; +#define EC_CMD_FP_ENC_STATUS 0x0409 + +/* FP TPM seed has been set or not */ +#define FP_ENC_STATUS_SEED_SET BIT(0) + +struct ec_response_fp_encryption_status { + /* Used bits in encryption engine status */ + uint32_t valid_flags; + /* Encryption engine status */ + uint32_t status; +} __ec_align4; + /*****************************************************************************/ /* Touchpad MCU commands: range 0x0500-0x05FF */ diff --git a/include/linux/platform_data/cros_ec_proto.h b/include/linux/platform_data/cros_ec_proto.h new file mode 100644 index 000000000000..eab7036cda09 --- /dev/null +++ b/include/linux/platform_data/cros_ec_proto.h @@ -0,0 +1,319 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * ChromeOS Embedded Controller protocol interface. + * + * Copyright (C) 2012 Google, Inc + */ + +#ifndef __LINUX_CROS_EC_PROTO_H +#define __LINUX_CROS_EC_PROTO_H + +#include <linux/device.h> +#include <linux/mutex.h> +#include <linux/notifier.h> + +#include <linux/platform_data/cros_ec_commands.h> + +#define CROS_EC_DEV_NAME "cros_ec" +#define CROS_EC_DEV_FP_NAME "cros_fp" +#define CROS_EC_DEV_ISH_NAME "cros_ish" +#define CROS_EC_DEV_PD_NAME "cros_pd" +#define CROS_EC_DEV_SCP_NAME "cros_scp" +#define CROS_EC_DEV_TP_NAME "cros_tp" + +/* + * The EC is unresponsive for a time after a reboot command. Add a + * simple delay to make sure that the bus stays locked. + */ +#define EC_REBOOT_DELAY_MS 50 + +/* + * Max bus-specific overhead incurred by request/responses. + * I2C requires 1 additional byte for requests. + * I2C requires 2 additional bytes for responses. + * SPI requires up to 32 additional bytes for responses. + */ +#define EC_PROTO_VERSION_UNKNOWN 0 +#define EC_MAX_REQUEST_OVERHEAD 1 +#define EC_MAX_RESPONSE_OVERHEAD 32 + +/* + * Command interface between EC and AP, for LPC, I2C and SPI interfaces. + */ +enum { + EC_MSG_TX_HEADER_BYTES = 3, + EC_MSG_TX_TRAILER_BYTES = 1, + EC_MSG_TX_PROTO_BYTES = EC_MSG_TX_HEADER_BYTES + + EC_MSG_TX_TRAILER_BYTES, + EC_MSG_RX_PROTO_BYTES = 3, + + /* Max length of messages for proto 2*/ + EC_PROTO2_MSG_BYTES = EC_PROTO2_MAX_PARAM_SIZE + + EC_MSG_TX_PROTO_BYTES, + + EC_MAX_MSG_BYTES = 64 * 1024, +}; + +/** + * struct cros_ec_command - Information about a ChromeOS EC command. + * @version: Command version number (often 0). + * @command: Command to send (EC_CMD_...). + * @outsize: Outgoing length in bytes. + * @insize: Max number of bytes to accept from the EC. + * @result: EC's response to the command (separate from communication failure). + * @data: Where to put the incoming data from EC and outgoing data to EC. + */ +struct cros_ec_command { + uint32_t version; + uint32_t command; + uint32_t outsize; + uint32_t insize; + uint32_t result; + uint8_t data[0]; +}; + +/** + * struct cros_ec_device - Information about a ChromeOS EC device. + * @phys_name: Name of physical comms layer (e.g. 'i2c-4'). + * @dev: Device pointer for physical comms device + * @was_wake_device: True if this device was set to wake the system from + * sleep at the last suspend. + * @cros_class: The class structure for this device. + * @cmd_readmem: Direct read of the EC memory-mapped region, if supported. + * @offset: Is within EC_LPC_ADDR_MEMMAP region. + * @bytes: Number of bytes to read. zero means "read a string" (including + * the trailing '\0'). At most only EC_MEMMAP_SIZE bytes can be + * read. Caller must ensure that the buffer is large enough for the + * result when reading a string. + * @max_request: Max size of message requested. + * @max_response: Max size of message response. + * @max_passthru: Max sice of passthru message. + * @proto_version: The protocol version used for this device. + * @priv: Private data. + * @irq: Interrupt to use. + * @id: Device id. + * @din: Input buffer (for data from EC). This buffer will always be + * dword-aligned and include enough space for up to 7 word-alignment + * bytes also, so we can ensure that the body of the message is always + * dword-aligned (64-bit). We use this alignment to keep ARM and x86 + * happy. Probably word alignment would be OK, there might be a small + * performance advantage to using dword. + * @dout: Output buffer (for data to EC). This buffer will always be + * dword-aligned and include enough space for up to 7 word-alignment + * bytes also, so we can ensure that the body of the message is always + * dword-aligned (64-bit). We use this alignment to keep ARM and x86 + * happy. Probably word alignment would be OK, there might be a small + * performance advantage to using dword. + * @din_size: Size of din buffer to allocate (zero to use static din). + * @dout_size: Size of dout buffer to allocate (zero to use static dout). + * @wake_enabled: True if this device can wake the system from sleep. + * @suspended: True if this device had been suspended. + * @cmd_xfer: Send command to EC and get response. + * Returns the number of bytes received if the communication + * succeeded, but that doesn't mean the EC was happy with the + * command. The caller should check msg.result for the EC's result + * code. + * @pkt_xfer: Send packet to EC and get response. + * @lock: One transaction at a time. + * @mkbp_event_supported: True if this EC supports the MKBP event protocol. + * @host_sleep_v1: True if this EC supports the sleep v1 command. + * @event_notifier: Interrupt event notifier for transport devices. + * @event_data: Raw payload transferred with the MKBP event. + * @event_size: Size in bytes of the event data. + * @host_event_wake_mask: Mask of host events that cause wake from suspend. + * @ec: The platform_device used by the mfd driver to interface with the + * main EC. + * @pd: The platform_device used by the mfd driver to interface with the + * PD behind an EC. + */ +struct cros_ec_device { + /* These are used by other drivers that want to talk to the EC */ + const char *phys_name; + struct device *dev; + bool was_wake_device; + struct class *cros_class; + int (*cmd_readmem)(struct cros_ec_device *ec, unsigned int offset, + unsigned int bytes, void *dest); + + /* These are used to implement the platform-specific interface */ + u16 max_request; + u16 max_response; + u16 max_passthru; + u16 proto_version; + void *priv; + int irq; + u8 *din; + u8 *dout; + int din_size; + int dout_size; + bool wake_enabled; + bool suspended; + int (*cmd_xfer)(struct cros_ec_device *ec, + struct cros_ec_command *msg); + int (*pkt_xfer)(struct cros_ec_device *ec, + struct cros_ec_command *msg); + struct mutex lock; + bool mkbp_event_supported; + bool host_sleep_v1; + struct blocking_notifier_head event_notifier; + + struct ec_response_get_next_event_v1 event_data; + int event_size; + u32 host_event_wake_mask; + u32 last_resume_result; + + /* The platform devices used by the mfd driver */ + struct platform_device *ec; + struct platform_device *pd; +}; + +/** + * struct cros_ec_sensor_platform - ChromeOS EC sensor platform information. + * @sensor_num: Id of the sensor, as reported by the EC. + */ +struct cros_ec_sensor_platform { + u8 sensor_num; +}; + +/** + * struct cros_ec_platform - ChromeOS EC platform information. + * @ec_name: Name of EC device (e.g. 'cros-ec', 'cros-pd', ...) + * used in /dev/ and sysfs. + * @cmd_offset: Offset to apply for each command. Set when + * registering a device behind another one. + */ +struct cros_ec_platform { + const char *ec_name; + u16 cmd_offset; +}; + +/** + * cros_ec_suspend() - Handle a suspend operation for the ChromeOS EC device. + * @ec_dev: Device to suspend. + * + * This can be called by drivers to handle a suspend event. + * + * Return: 0 on success or negative error code. + */ +int cros_ec_suspend(struct cros_ec_device *ec_dev); + +/** + * cros_ec_resume() - Handle a resume operation for the ChromeOS EC device. + * @ec_dev: Device to resume. + * + * This can be called by drivers to handle a resume event. + * + * Return: 0 on success or negative error code. + */ +int cros_ec_resume(struct cros_ec_device *ec_dev); + +/** + * cros_ec_prepare_tx() - Prepare an outgoing message in the output buffer. + * @ec_dev: Device to register. + * @msg: Message to write. + * + * This is intended to be used by all ChromeOS EC drivers, but at present + * only SPI uses it. Once LPC uses the same protocol it can start using it. + * I2C could use it now, with a refactor of the existing code. + * + * Return: 0 on success or negative error code. + */ +int cros_ec_prepare_tx(struct cros_ec_device *ec_dev, + struct cros_ec_command *msg); + +/** + * cros_ec_check_result() - Check ec_msg->result. + * @ec_dev: EC device. + * @msg: Message to check. + * + * This is used by ChromeOS EC drivers to check the ec_msg->result for + * errors and to warn about them. + * + * Return: 0 on success or negative error code. + */ +int cros_ec_check_result(struct cros_ec_device *ec_dev, + struct cros_ec_command *msg); + +/** + * cros_ec_cmd_xfer() - Send a command to the ChromeOS EC. + * @ec_dev: EC device. + * @msg: Message to write. + * + * Call this to send a command to the ChromeOS EC. This should be used + * instead of calling the EC's cmd_xfer() callback directly. + * + * Return: 0 on success or negative error code. + */ +int cros_ec_cmd_xfer(struct cros_ec_device *ec_dev, + struct cros_ec_command *msg); + +/** + * cros_ec_cmd_xfer_status() - Send a command to the ChromeOS EC. + * @ec_dev: EC device. + * @msg: Message to write. + * + * This function is identical to cros_ec_cmd_xfer, except it returns success + * status only if both the command was transmitted successfully and the EC + * replied with success status. It's not necessary to check msg->result when + * using this function. + * + * Return: The number of bytes transferred on success or negative error code. + */ +int cros_ec_cmd_xfer_status(struct cros_ec_device *ec_dev, + struct cros_ec_command *msg); + +/** + * cros_ec_register() - Register a new ChromeOS EC, using the provided info. + * @ec_dev: Device to register. + * + * Before calling this, allocate a pointer to a new device and then fill + * in all the fields up to the --private-- marker. + * + * Return: 0 on success or negative error code. + */ +int cros_ec_register(struct cros_ec_device *ec_dev); + +/** + * cros_ec_unregister() - Remove a ChromeOS EC. + * @ec_dev: Device to unregister. + * + * Call this to deregister a ChromeOS EC, then clean up any private data. + * + * Return: 0 on success or negative error code. + */ +int cros_ec_unregister(struct cros_ec_device *ec_dev); + +/** + * cros_ec_query_all() - Query the protocol version supported by the + * ChromeOS EC. + * @ec_dev: Device to register. + * + * Return: 0 on success or negative error code. + */ +int cros_ec_query_all(struct cros_ec_device *ec_dev); + +/** + * cros_ec_get_next_event() - Fetch next event from the ChromeOS EC. + * @ec_dev: Device to fetch event from. + * @wake_event: Pointer to a bool set to true upon return if the event might be + * treated as a wake event. Ignored if null. + * + * Return: negative error code on errors; 0 for no data; or else number of + * bytes received (i.e., an event was retrieved successfully). Event types are + * written out to @ec_dev->event_data.event_type on success. + */ +int cros_ec_get_next_event(struct cros_ec_device *ec_dev, bool *wake_event); + +/** + * cros_ec_get_host_event() - Return a mask of event set by the ChromeOS EC. + * @ec_dev: Device to fetch event from. + * + * When MKBP is supported, when the EC raises an interrupt, we collect the + * events raised and call the functions in the ec notifier. This function + * is a helper to know which events are raised. + * + * Return: 0 on error or non-zero bitmask of one or more EC_HOST_EVENT_*. + */ +u32 cros_ec_get_host_event(struct cros_ec_device *ec_dev); + +#endif /* __LINUX_CROS_EC_PROTO_H */ diff --git a/include/linux/platform_data/dwc3-omap.h b/include/linux/platform_data/dwc3-omap.h deleted file mode 100644 index 1d36ca874cc8..000000000000 --- a/include/linux/platform_data/dwc3-omap.h +++ /dev/null @@ -1,43 +0,0 @@ -/** - * dwc3-omap.h - OMAP Specific Glue layer, header. - * - * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com - * All rights reserved. - * - * Author: Felipe Balbi <balbi@ti.com> - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions, and the following disclaimer, - * without modification. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. The names of the above-listed copyright holders may not be used - * to endorse or promote products derived from this software without - * specific prior written permission. - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") version 2, as published by the Free - * Software Foundation. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS - * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, - * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR - * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF - * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING - * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -enum dwc3_omap_utmi_mode { - DWC3_OMAP_UTMI_MODE_UNKNOWN = 0, - DWC3_OMAP_UTMI_MODE_HW, - DWC3_OMAP_UTMI_MODE_SW, -}; diff --git a/include/linux/platform_data/keypad-w90p910.h b/include/linux/platform_data/keypad-w90p910.h deleted file mode 100644 index 206ca4ecd93f..000000000000 --- a/include/linux/platform_data/keypad-w90p910.h +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef __ASM_ARCH_W90P910_KEYPAD_H -#define __ASM_ARCH_W90P910_KEYPAD_H - -#include <linux/input/matrix_keypad.h> - -extern void mfp_set_groupi(struct device *dev); - -struct w90p910_keypad_platform_data { - const struct matrix_keymap_data *keymap_data; - - unsigned int prescale; - unsigned int debounce; -}; - -#endif /* __ASM_ARCH_W90P910_KEYPAD_H */ diff --git a/include/linux/platform_data/leds-kirkwood-netxbig.h b/include/linux/platform_data/leds-kirkwood-netxbig.h deleted file mode 100644 index 3c85a735c380..000000000000 --- a/include/linux/platform_data/leds-kirkwood-netxbig.h +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Platform data structure for netxbig LED driver - * - * This file is licensed under the terms of the GNU General Public - * License version 2. This program is licensed "as is" without any - * warranty of any kind, whether express or implied. - */ - -#ifndef __LEDS_KIRKWOOD_NETXBIG_H -#define __LEDS_KIRKWOOD_NETXBIG_H - -struct netxbig_gpio_ext { - unsigned *addr; - int num_addr; - unsigned *data; - int num_data; - unsigned enable; -}; - -enum netxbig_led_mode { - NETXBIG_LED_OFF, - NETXBIG_LED_ON, - NETXBIG_LED_SATA, - NETXBIG_LED_TIMER1, - NETXBIG_LED_TIMER2, - NETXBIG_LED_MODE_NUM, -}; - -#define NETXBIG_LED_INVALID_MODE NETXBIG_LED_MODE_NUM - -struct netxbig_led_timer { - unsigned long delay_on; - unsigned long delay_off; - enum netxbig_led_mode mode; -}; - -struct netxbig_led { - const char *name; - const char *default_trigger; - int mode_addr; - int *mode_val; - int bright_addr; - int bright_max; -}; - -struct netxbig_led_platform_data { - struct netxbig_gpio_ext *gpio_ext; - struct netxbig_led_timer *timer; - int num_timer; - struct netxbig_led *leds; - int num_leds; -}; - -#endif /* __LEDS_KIRKWOOD_NETXBIG_H */ diff --git a/include/linux/platform_data/nxp-nci.h b/include/linux/platform_data/nxp-nci.h deleted file mode 100644 index 97827ad468e2..000000000000 --- a/include/linux/platform_data/nxp-nci.h +++ /dev/null @@ -1,19 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Generic platform data for the NXP NCI NFC chips. - * - * Copyright (C) 2014 NXP Semiconductors All rights reserved. - * - * Authors: Clément Perrochaud <clement.perrochaud@nxp.com> - */ - -#ifndef _NXP_NCI_H_ -#define _NXP_NCI_H_ - -struct nxp_nci_nfc_platform_data { - unsigned int gpio_en; - unsigned int gpio_fw; - unsigned int irq; -}; - -#endif /* _NXP_NCI_H_ */ diff --git a/include/linux/platform_data/pinctrl-single.h b/include/linux/platform_data/pinctrl-single.h index 1cf36fdf9510..7473d3c4cabf 100644 --- a/include/linux/platform_data/pinctrl-single.h +++ b/include/linux/platform_data/pinctrl-single.h @@ -1,4 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _PINCTRL_SINGLE_H +#define _PINCTRL_SINGLE_H + /** * irq: optional wake-up interrupt * rearm: optional soc specific rearm function @@ -11,3 +15,5 @@ struct pcs_pdata { int irq; void (*rearm)(void); }; + +#endif /* _PINCTRL_SINGLE_H */ diff --git a/include/linux/platform_data/sgi-w1.h b/include/linux/platform_data/sgi-w1.h new file mode 100644 index 000000000000..e28c8a90ff84 --- /dev/null +++ b/include/linux/platform_data/sgi-w1.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * SGI One-Wire (W1) IP + */ + +#ifndef PLATFORM_DATA_SGI_W1_H +#define PLATFORM_DATA_SGI_W1_H + +struct sgi_w1_platform_data { + char dev_id[64]; +}; + +#endif /* PLATFORM_DATA_SGI_W1_H */ diff --git a/include/linux/platform_data/ti-sysc.h b/include/linux/platform_data/ti-sysc.h index 0c587d4fc718..b5b7a3423ca8 100644 --- a/include/linux/platform_data/ti-sysc.h +++ b/include/linux/platform_data/ti-sysc.h @@ -1,3 +1,5 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + #ifndef __TI_SYSC_DATA_H__ #define __TI_SYSC_DATA_H__ @@ -47,6 +49,7 @@ struct sysc_regbits { s8 emufree_shift; }; +#define SYSC_MODULE_QUIRK_SGX BIT(18) #define SYSC_MODULE_QUIRK_HDQ1W BIT(17) #define SYSC_MODULE_QUIRK_I2C BIT(16) #define SYSC_MODULE_QUIRK_WDT BIT(15) @@ -70,7 +73,7 @@ struct sysc_regbits { /** * struct sysc_capabilities - capabilities for an interconnect target module - * + * @type: sysc type identifier for the module * @sysc_mask: bitmask of supported SYSCONFIG register bits * @regbits: bitmask of SYSCONFIG register bits * @mod_quirks: bitmask of module specific quirks @@ -85,8 +88,9 @@ struct sysc_capabilities { /** * struct sysc_config - configuration for an interconnect target module * @sysc_val: configured value for sysc register + * @syss_mask: configured mask value for SYSSTATUS register * @midlemodes: bitmask of supported master idle modes - * @sidlemodes: bitmask of supported master idle modes + * @sidlemodes: bitmask of supported slave idle modes * @srst_udelay: optional delay needed after OCP soft reset * @quirks: bitmask of enabled quirks */ diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h index 9bc36b589827..1b5cec067533 100644 --- a/include/linux/platform_device.h +++ b/include/linux/platform_device.h @@ -24,6 +24,7 @@ struct platform_device { int id; bool id_auto; struct device dev; + u64 dma_mask; u32 num_resources; struct resource *resource; @@ -48,13 +49,16 @@ extern void platform_device_unregister(struct platform_device *); extern struct bus_type platform_bus_type; extern struct device platform_bus; -extern void arch_setup_pdev_archdata(struct platform_device *); extern struct resource *platform_get_resource(struct platform_device *, unsigned int, unsigned int); +extern struct device * +platform_find_device_by_driver(struct device *start, + const struct device_driver *drv); extern void __iomem * devm_platform_ioremap_resource(struct platform_device *pdev, unsigned int index); extern int platform_get_irq(struct platform_device *, unsigned int); +extern int platform_get_irq_optional(struct platform_device *, unsigned int); extern int platform_irq_count(struct platform_device *); extern struct resource *platform_get_resource_byname(struct platform_device *, unsigned int, diff --git a/include/linux/pm.h b/include/linux/pm.h index 3619a870eaa4..4c441be03079 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h @@ -712,8 +712,6 @@ struct dev_pm_domain { extern void device_pm_lock(void); extern void dpm_resume_start(pm_message_t state); extern void dpm_resume_end(pm_message_t state); -extern void dpm_noirq_resume_devices(pm_message_t state); -extern void dpm_noirq_end(void); extern void dpm_resume_noirq(pm_message_t state); extern void dpm_resume_early(pm_message_t state); extern void dpm_resume(pm_message_t state); @@ -722,8 +720,6 @@ extern void dpm_complete(pm_message_t state); extern void device_pm_unlock(void); extern int dpm_suspend_end(pm_message_t state); extern int dpm_suspend_start(pm_message_t state); -extern void dpm_noirq_begin(void); -extern int dpm_noirq_suspend_devices(pm_message_t state); extern int dpm_suspend_noirq(pm_message_t state); extern int dpm_suspend_late(pm_message_t state); extern int dpm_suspend(pm_message_t state); diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h index 91d9bf497071..baf02ff91a31 100644 --- a/include/linux/pm_domain.h +++ b/include/linux/pm_domain.h @@ -197,9 +197,9 @@ static inline struct generic_pm_domain_data *dev_gpd_data(struct device *dev) int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev); int pm_genpd_remove_device(struct device *dev); int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, - struct generic_pm_domain *new_subdomain); + struct generic_pm_domain *subdomain); int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, - struct generic_pm_domain *target); + struct generic_pm_domain *subdomain); int pm_genpd_init(struct generic_pm_domain *genpd, struct dev_power_governor *gov, bool is_off); int pm_genpd_remove(struct generic_pm_domain *genpd); @@ -226,12 +226,12 @@ static inline int pm_genpd_remove_device(struct device *dev) return -ENOSYS; } static inline int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, - struct generic_pm_domain *new_sd) + struct generic_pm_domain *subdomain) { return -ENOSYS; } static inline int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, - struct generic_pm_domain *target) + struct generic_pm_domain *subdomain) { return -ENOSYS; } @@ -282,8 +282,8 @@ int of_genpd_add_provider_onecell(struct device_node *np, struct genpd_onecell_data *data); void of_genpd_del_provider(struct device_node *np); int of_genpd_add_device(struct of_phandle_args *args, struct device *dev); -int of_genpd_add_subdomain(struct of_phandle_args *parent, - struct of_phandle_args *new_subdomain); +int of_genpd_add_subdomain(struct of_phandle_args *parent_spec, + struct of_phandle_args *subdomain_spec); struct generic_pm_domain *of_genpd_remove_last(struct device_node *np); int of_genpd_parse_idle_states(struct device_node *dn, struct genpd_power_state **states, int *n); @@ -316,8 +316,8 @@ static inline int of_genpd_add_device(struct of_phandle_args *args, return -ENODEV; } -static inline int of_genpd_add_subdomain(struct of_phandle_args *parent, - struct of_phandle_args *new_subdomain) +static inline int of_genpd_add_subdomain(struct of_phandle_args *parent_spec, + struct of_phandle_args *subdomain_spec) { return -ENODEV; } diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h index af5021f27cb7..b8197ab014f2 100644 --- a/include/linux/pm_opp.h +++ b/include/linux/pm_opp.h @@ -96,6 +96,8 @@ unsigned long dev_pm_opp_get_suspend_opp_freq(struct device *dev); struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev, unsigned long freq, bool available); +struct dev_pm_opp *dev_pm_opp_find_level_exact(struct device *dev, + unsigned int level); struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev, unsigned long *freq); @@ -128,7 +130,7 @@ struct opp_table *dev_pm_opp_set_clkname(struct device *dev, const char * name); void dev_pm_opp_put_clkname(struct opp_table *opp_table); struct opp_table *dev_pm_opp_register_set_opp_helper(struct device *dev, int (*set_opp)(struct dev_pm_set_opp_data *data)); void dev_pm_opp_unregister_set_opp_helper(struct opp_table *opp_table); -struct opp_table *dev_pm_opp_attach_genpd(struct device *dev, const char **names); +struct opp_table *dev_pm_opp_attach_genpd(struct device *dev, const char **names, struct device ***virt_devs); void dev_pm_opp_detach_genpd(struct opp_table *opp_table); int dev_pm_opp_xlate_performance_state(struct opp_table *src_table, struct opp_table *dst_table, unsigned int pstate); int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq); @@ -200,6 +202,12 @@ static inline struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev, return ERR_PTR(-ENOTSUPP); } +static inline struct dev_pm_opp *dev_pm_opp_find_level_exact(struct device *dev, + unsigned int level) +{ + return ERR_PTR(-ENOTSUPP); +} + static inline struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev, unsigned long *freq) { @@ -292,7 +300,7 @@ static inline struct opp_table *dev_pm_opp_set_clkname(struct device *dev, const static inline void dev_pm_opp_put_clkname(struct opp_table *opp_table) {} -static inline struct opp_table *dev_pm_opp_attach_genpd(struct device *dev, const char **names) +static inline struct opp_table *dev_pm_opp_attach_genpd(struct device *dev, const char **names, struct device ***virt_devs) { return ERR_PTR(-ENOTSUPP); } diff --git a/include/linux/pm_qos.h b/include/linux/pm_qos.h index 2aebbc5b9950..222c3e01397c 100644 --- a/include/linux/pm_qos.h +++ b/include/linux/pm_qos.h @@ -13,9 +13,6 @@ enum { PM_QOS_RESERVED = 0, PM_QOS_CPU_DMA_LATENCY, - PM_QOS_NETWORK_LATENCY, - PM_QOS_NETWORK_THROUGHPUT, - PM_QOS_MEMORY_BANDWIDTH, /* insert new class ID */ PM_QOS_NUM_CLASSES, @@ -33,9 +30,6 @@ enum pm_qos_flags_status { #define PM_QOS_LATENCY_ANY_NS ((s64)PM_QOS_LATENCY_ANY * NSEC_PER_USEC) #define PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC) -#define PM_QOS_NETWORK_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC) -#define PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE 0 -#define PM_QOS_MEMORY_BANDWIDTH_DEFAULT_VALUE 0 #define PM_QOS_RESUME_LATENCY_DEFAULT_VALUE PM_QOS_LATENCY_ANY #define PM_QOS_RESUME_LATENCY_NO_CONSTRAINT PM_QOS_LATENCY_ANY #define PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS PM_QOS_LATENCY_ANY_NS diff --git a/include/linux/pm_wakeup.h b/include/linux/pm_wakeup.h index 91027602d137..661efa029c96 100644 --- a/include/linux/pm_wakeup.h +++ b/include/linux/pm_wakeup.h @@ -21,6 +21,7 @@ struct wake_irq; * struct wakeup_source - Representation of wakeup sources * * @name: Name of the wakeup source + * @id: Wakeup source id * @entry: Wakeup source list entry * @lock: Wakeup source lock * @wakeirq: Optional device specific wakeirq @@ -35,11 +36,13 @@ struct wake_irq; * @relax_count: Number of times the wakeup source was deactivated. * @expire_count: Number of times the wakeup source's timeout has expired. * @wakeup_count: Number of times the wakeup source might abort suspend. + * @dev: Struct device for sysfs statistics about the wakeup source. * @active: Status of the wakeup source. * @autosleep_enabled: Autosleep is active, so update @prevent_sleep_time. */ struct wakeup_source { const char *name; + int id; struct list_head entry; spinlock_t lock; struct wake_irq *wakeirq; @@ -55,6 +58,7 @@ struct wakeup_source { unsigned long relax_count; unsigned long expire_count; unsigned long wakeup_count; + struct device *dev; bool active:1; bool autosleep_enabled:1; }; @@ -81,12 +85,12 @@ static inline void device_set_wakeup_path(struct device *dev) } /* drivers/base/power/wakeup.c */ -extern void wakeup_source_prepare(struct wakeup_source *ws, const char *name); extern struct wakeup_source *wakeup_source_create(const char *name); extern void wakeup_source_destroy(struct wakeup_source *ws); extern void wakeup_source_add(struct wakeup_source *ws); extern void wakeup_source_remove(struct wakeup_source *ws); -extern struct wakeup_source *wakeup_source_register(const char *name); +extern struct wakeup_source *wakeup_source_register(struct device *dev, + const char *name); extern void wakeup_source_unregister(struct wakeup_source *ws); extern int device_wakeup_enable(struct device *dev); extern int device_wakeup_disable(struct device *dev); @@ -112,9 +116,6 @@ static inline bool device_can_wakeup(struct device *dev) return dev->power.can_wakeup; } -static inline void wakeup_source_prepare(struct wakeup_source *ws, - const char *name) {} - static inline struct wakeup_source *wakeup_source_create(const char *name) { return NULL; @@ -126,7 +127,8 @@ static inline void wakeup_source_add(struct wakeup_source *ws) {} static inline void wakeup_source_remove(struct wakeup_source *ws) {} -static inline struct wakeup_source *wakeup_source_register(const char *name) +static inline struct wakeup_source *wakeup_source_register(struct device *dev, + const char *name) { return NULL; } @@ -181,13 +183,6 @@ static inline void pm_wakeup_dev_event(struct device *dev, unsigned int msec, #endif /* !CONFIG_PM_SLEEP */ -static inline void wakeup_source_init(struct wakeup_source *ws, - const char *name) -{ - wakeup_source_prepare(ws, name); - wakeup_source_add(ws); -} - static inline void __pm_wakeup_event(struct wakeup_source *ws, unsigned int msec) { return pm_wakeup_ws_event(ws, msec, false); diff --git a/include/linux/property.h b/include/linux/property.h index 5a910ad79591..9b3d4ca3a73a 100644 --- a/include/linux/property.h +++ b/include/linux/property.h @@ -421,6 +421,10 @@ bool is_software_node(const struct fwnode_handle *fwnode); const struct software_node *to_software_node(struct fwnode_handle *fwnode); struct fwnode_handle *software_node_fwnode(const struct software_node *node); +const struct software_node * +software_node_find_by_name(const struct software_node *parent, + const char *name); + int software_node_register_nodes(const struct software_node *nodes); void software_node_unregister_nodes(const struct software_node *nodes); diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index eef02e64b422..b5db1ee96d78 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -689,7 +689,7 @@ enum qed_link_mode_bits { QED_LM_40000baseLR4_Full_BIT = BIT(9), QED_LM_50000baseKR2_Full_BIT = BIT(10), QED_LM_100000baseKR4_Full_BIT = BIT(11), - QED_LM_2500baseX_Full_BIT = BIT(12), + QED_LM_TP_BIT = BIT(12), QED_LM_Backplane_BIT = BIT(13), QED_LM_1000baseKX_Full_BIT = BIT(14), QED_LM_10000baseKX4_Full_BIT = BIT(15), @@ -804,6 +804,7 @@ enum qed_nvm_flash_cmd { QED_NVM_FLASH_CMD_FILE_DATA = 0x2, QED_NVM_FLASH_CMD_FILE_START = 0x3, QED_NVM_FLASH_CMD_NVM_CHANGE = 0x4, + QED_NVM_FLASH_CMD_NVM_CFG_ID = 0x5, QED_NVM_FLASH_CMD_NVM_MAX, }; @@ -1131,6 +1132,34 @@ struct qed_common_ops { * @param cdev */ u8 (*get_affin_hwfn_idx)(struct qed_dev *cdev); + +/** + * @brief read_nvm_cfg - Read NVM config attribute value. + * @param cdev + * @param buf - buffer + * @param cmd - NVM CFG command id + * @param entity_id - Entity id + * + */ + int (*read_nvm_cfg)(struct qed_dev *cdev, u8 **buf, u32 cmd, + u32 entity_id); +/** + * @brief read_nvm_cfg - Read NVM config attribute value. + * @param cdev + * @param cmd - NVM CFG command id + * + * @return config id length, 0 on error. + */ + int (*read_nvm_cfg_len)(struct qed_dev *cdev, u32 cmd); + +/** + * @brief set_grc_config - Configure value for grc config id. + * @param cdev + * @param cfg_id - grc config id + * @param val - grc config value + * + */ + int (*set_grc_config)(struct qed_dev *cdev, u32 cfg_id, u32 val); }; #define MASK_FIELD(_name, _value) \ diff --git a/include/linux/qed/qed_rdma_if.h b/include/linux/qed/qed_rdma_if.h index 898f595ea3d6..74efca15fde7 100644 --- a/include/linux/qed/qed_rdma_if.h +++ b/include/linux/qed/qed_rdma_if.h @@ -225,7 +225,7 @@ struct qed_rdma_start_in_params { struct qed_rdma_add_user_out_params { u16 dpi; - u64 dpi_addr; + void __iomem *dpi_addr; u64 dpi_phys_addr; u32 dpi_size; u16 wid_count; diff --git a/include/linux/quicklist.h b/include/linux/quicklist.h deleted file mode 100644 index 034982c98c8b..000000000000 --- a/include/linux/quicklist.h +++ /dev/null @@ -1,94 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef LINUX_QUICKLIST_H -#define LINUX_QUICKLIST_H -/* - * Fast allocations and disposal of pages. Pages must be in the condition - * as needed after allocation when they are freed. Per cpu lists of pages - * are kept that only contain node local pages. - * - * (C) 2007, SGI. Christoph Lameter <cl@linux.com> - */ -#include <linux/kernel.h> -#include <linux/gfp.h> -#include <linux/percpu.h> - -#ifdef CONFIG_QUICKLIST - -struct quicklist { - void *page; - int nr_pages; -}; - -DECLARE_PER_CPU(struct quicklist, quicklist)[CONFIG_NR_QUICK]; - -/* - * The two key functions quicklist_alloc and quicklist_free are inline so - * that they may be custom compiled for the platform. - * Specifying a NULL ctor can remove constructor support. Specifying - * a constant quicklist allows the determination of the exact address - * in the per cpu area. - * - * The fast patch in quicklist_alloc touched only a per cpu cacheline and - * the first cacheline of the page itself. There is minmal overhead involved. - */ -static inline void *quicklist_alloc(int nr, gfp_t flags, void (*ctor)(void *)) -{ - struct quicklist *q; - void **p = NULL; - - q =&get_cpu_var(quicklist)[nr]; - p = q->page; - if (likely(p)) { - q->page = p[0]; - p[0] = NULL; - q->nr_pages--; - } - put_cpu_var(quicklist); - if (likely(p)) - return p; - - p = (void *)__get_free_page(flags | __GFP_ZERO); - if (ctor && p) - ctor(p); - return p; -} - -static inline void __quicklist_free(int nr, void (*dtor)(void *), void *p, - struct page *page) -{ - struct quicklist *q; - - q = &get_cpu_var(quicklist)[nr]; - *(void **)p = q->page; - q->page = p; - q->nr_pages++; - put_cpu_var(quicklist); -} - -static inline void quicklist_free(int nr, void (*dtor)(void *), void *pp) -{ - __quicklist_free(nr, dtor, pp, virt_to_page(pp)); -} - -static inline void quicklist_free_page(int nr, void (*dtor)(void *), - struct page *page) -{ - __quicklist_free(nr, dtor, page_address(page), page); -} - -void quicklist_trim(int nr, void (*dtor)(void *), - unsigned long min_pages, unsigned long max_free); - -unsigned long quicklist_total_size(void); - -#else - -static inline unsigned long quicklist_total_size(void) -{ - return 0; -} - -#endif - -#endif /* LINUX_QUICKLIST_H */ - diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h index dc905a4ff8d7..185d94829701 100644 --- a/include/linux/quotaops.h +++ b/include/linux/quotaops.h @@ -22,7 +22,7 @@ static inline struct quota_info *sb_dqopt(struct super_block *sb) /* i_mutex must being held */ static inline bool is_quota_modification(struct inode *inode, struct iattr *ia) { - return (ia->ia_valid & ATTR_SIZE && ia->ia_size != inode->i_size) || + return (ia->ia_valid & ATTR_SIZE) || (ia->ia_valid & ATTR_UID && !uid_eq(ia->ia_uid, inode->i_uid)) || (ia->ia_valid & ATTR_GID && !gid_eq(ia->ia_gid, inode->i_gid)); } diff --git a/include/linux/ramfs.h b/include/linux/ramfs.h index ee582bdb7fda..b806a0ff6554 100644 --- a/include/linux/ramfs.h +++ b/include/linux/ramfs.h @@ -4,8 +4,7 @@ struct inode *ramfs_get_inode(struct super_block *sb, const struct inode *dir, umode_t mode, dev_t dev); -extern struct dentry *ramfs_mount(struct file_system_type *fs_type, - int flags, const char *dev_name, void *data); +extern int ramfs_init_fs_context(struct fs_context *fc); #ifdef CONFIG_MMU static inline int @@ -17,9 +16,8 @@ ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize) extern int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize); #endif +extern const struct fs_parameter_description ramfs_fs_parameters; extern const struct file_operations ramfs_file_operations; extern const struct vm_operations_struct generic_file_vm_ops; -int ramfs_fill_super(struct super_block *sb, void *data, int silent); - #endif diff --git a/include/linux/root_dev.h b/include/linux/root_dev.h index bab671b0782f..4e78651371ba 100644 --- a/include/linux/root_dev.h +++ b/include/linux/root_dev.h @@ -8,6 +8,7 @@ enum { Root_NFS = MKDEV(UNNAMED_MAJOR, 255), + Root_CIFS = MKDEV(UNNAMED_MAJOR, 254), Root_RAM0 = MKDEV(RAMDISK_MAJOR, 0), Root_RAM1 = MKDEV(RAMDISK_MAJOR, 1), Root_FD0 = MKDEV(FLOPPY_MAJOR, 0), diff --git a/include/linux/sched.h b/include/linux/sched.h index b75b28287005..70db597d6fd4 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -958,6 +958,10 @@ struct task_struct { struct mutex_waiter *blocked_on; #endif +#ifdef CONFIG_DEBUG_ATOMIC_SLEEP + int non_block_count; +#endif + #ifdef CONFIG_TRACE_IRQFLAGS unsigned int irq_events; unsigned long hardirq_enable_ip; diff --git a/include/linux/security.h b/include/linux/security.h index 5f7441abbf42..ace6fdb604f9 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -259,7 +259,8 @@ int security_dentry_create_files_as(struct dentry *dentry, int mode, struct qstr *name, const struct cred *old, struct cred *new); - +int security_path_notify(const struct path *path, u64 mask, + unsigned int obj_type); int security_inode_alloc(struct inode *inode); void security_inode_free(struct inode *inode); int security_inode_init_security(struct inode *inode, struct inode *dir, @@ -387,7 +388,6 @@ int security_ismaclabel(const char *name); int security_secid_to_secctx(u32 secid, char **secdata, u32 *seclen); int security_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid); void security_release_secctx(char *secdata, u32 seclen); - void security_inode_invalidate_secctx(struct inode *inode); int security_inode_notifysecctx(struct inode *inode, void *ctx, u32 ctxlen); int security_inode_setsecctx(struct dentry *dentry, void *ctx, u32 ctxlen); @@ -621,6 +621,12 @@ static inline int security_move_mount(const struct path *from_path, return 0; } +static inline int security_path_notify(const struct path *path, u64 mask, + unsigned int obj_type) +{ + return 0; +} + static inline int security_inode_alloc(struct inode *inode) { return 0; diff --git a/include/linux/sha256.h b/include/linux/sha256.h deleted file mode 100644 index 26972b9e92db..000000000000 --- a/include/linux/sha256.h +++ /dev/null @@ -1,28 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (C) 2014 Red Hat Inc. - * - * Author: Vivek Goyal <vgoyal@redhat.com> - */ - -#ifndef SHA256_H -#define SHA256_H - -#include <linux/types.h> -#include <crypto/sha.h> - -/* - * Stand-alone implementation of the SHA256 algorithm. It is designed to - * have as little dependencies as possible so it can be used in the - * kexec_file purgatory. In other cases you should use the implementation in - * crypto/. - * - * For details see lib/sha256.c - */ - -extern int sha256_init(struct sha256_state *sctx); -extern int sha256_update(struct sha256_state *sctx, const u8 *input, - unsigned int length); -extern int sha256_final(struct sha256_state *sctx, u8 *hash); - -#endif /* SHA256_H */ diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h index 20d815a33145..de8e4b71e3ba 100644 --- a/include/linux/shmem_fs.h +++ b/include/linux/shmem_fs.h @@ -49,8 +49,9 @@ static inline struct shmem_inode_info *SHMEM_I(struct inode *inode) /* * Functions in mm/shmem.c called directly from elsewhere: */ +extern const struct fs_parameter_description shmem_fs_parameters; extern int shmem_init(void); -extern int shmem_fill_super(struct super_block *sb, void *data, int silent); +extern int shmem_init_fs_context(struct fs_context *fc); extern struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags); extern struct file *shmem_kernel_file_setup(const char *name, loff_t size, diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h index 9443cafd1969..0f80123650e2 100644 --- a/include/linux/shrinker.h +++ b/include/linux/shrinker.h @@ -69,7 +69,7 @@ struct shrinker { /* These are for internal use */ struct list_head list; -#ifdef CONFIG_MEMCG_KMEM +#ifdef CONFIG_MEMCG /* ID in shrinker_idr */ int id; #endif @@ -81,6 +81,11 @@ struct shrinker { /* Flags */ #define SHRINKER_NUMA_AWARE (1 << 0) #define SHRINKER_MEMCG_AWARE (1 << 1) +/* + * It just makes sense when the shrinker is also MEMCG_AWARE for now, + * non-MEMCG_AWARE shrinker should not have this flag set. + */ +#define SHRINKER_NONSLAB (1 << 2) extern int prealloc_shrinker(struct shrinker *shrinker); extern void register_shrinker_prepared(struct shrinker *shrinker); diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index ba5583522d24..907209c0794e 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -14,6 +14,7 @@ #include <linux/compiler.h> #include <linux/time.h> #include <linux/bug.h> +#include <linux/bvec.h> #include <linux/cache.h> #include <linux/rbtree.h> #include <linux/socket.h> @@ -36,6 +37,9 @@ #include <linux/in6.h> #include <linux/if_packet.h> #include <net/flow.h> +#if IS_ENABLED(CONFIG_NF_CONNTRACK) +#include <linux/netfilter/nf_conntrack_common.h> +#endif /* The interface for checksum offload between the stack and networking drivers * is as follows... @@ -243,12 +247,6 @@ struct bpf_prog; union bpf_attr; struct skb_ext; -#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) -struct nf_conntrack { - atomic_t use; -}; -#endif - #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) struct nf_bridge_info { enum { @@ -278,6 +276,16 @@ struct nf_bridge_info { }; #endif +#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) +/* Chain in tc_skb_ext will be used to share the tc chain with + * ovs recirc_id. It will be set to the current chain by tc + * and read by ovs to recirc_id. + */ +struct tc_skb_ext { + __u32 chain; +}; +#endif + struct sk_buff_head { /* These two members must be first. */ struct sk_buff *next; @@ -308,58 +316,45 @@ extern int sysctl_max_skb_frags; */ #define GSO_BY_FRAGS 0xFFFF -typedef struct skb_frag_struct skb_frag_t; - -struct skb_frag_struct { - struct { - struct page *p; - } page; -#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) - __u32 page_offset; - __u32 size; -#else - __u16 page_offset; - __u16 size; -#endif -}; +typedef struct bio_vec skb_frag_t; /** - * skb_frag_size - Returns the size of a skb fragment + * skb_frag_size() - Returns the size of a skb fragment * @frag: skb fragment */ static inline unsigned int skb_frag_size(const skb_frag_t *frag) { - return frag->size; + return frag->bv_len; } /** - * skb_frag_size_set - Sets the size of a skb fragment + * skb_frag_size_set() - Sets the size of a skb fragment * @frag: skb fragment * @size: size of fragment */ static inline void skb_frag_size_set(skb_frag_t *frag, unsigned int size) { - frag->size = size; + frag->bv_len = size; } /** - * skb_frag_size_add - Incrementes the size of a skb fragment by %delta + * skb_frag_size_add() - Increments the size of a skb fragment by @delta * @frag: skb fragment * @delta: value to add */ static inline void skb_frag_size_add(skb_frag_t *frag, int delta) { - frag->size += delta; + frag->bv_len += delta; } /** - * skb_frag_size_sub - Decrements the size of a skb fragment by %delta + * skb_frag_size_sub() - Decrements the size of a skb fragment by @delta * @frag: skb fragment * @delta: value to subtract */ static inline void skb_frag_size_sub(skb_frag_t *frag, int delta) { - frag->size -= delta; + frag->bv_len -= delta; } /** @@ -379,7 +374,7 @@ static inline bool skb_frag_must_loop(struct page *p) * skb_frag_foreach_page - loop over pages in a fragment * * @f: skb frag to operate on - * @f_off: offset from start of f->page.p + * @f_off: offset from start of f->bv_page * @f_len: length from f_off to loop over * @p: (temp var) current page * @p_off: (temp var) offset from start of current page, @@ -916,7 +911,6 @@ static inline bool skb_pfmemalloc(const struct sk_buff *skb) #define SKB_DST_NOREF 1UL #define SKB_DST_PTRMASK ~(SKB_DST_NOREF) -#define SKB_NFCT_PTRMASK ~(7UL) /** * skb_dst - returns skb dst_entry * @skb: buffer @@ -1283,7 +1277,7 @@ static inline int skb_flow_dissector_bpf_prog_detach(const union bpf_attr *attr) struct bpf_flow_dissector; bool bpf_flow_dissect(struct bpf_prog *prog, struct bpf_flow_dissector *ctx, - __be16 proto, int nhoff, int hlen); + __be16 proto, int nhoff, int hlen, unsigned int flags); bool __skb_flow_dissect(const struct net *net, const struct sk_buff *skb, @@ -2097,8 +2091,8 @@ static inline void __skb_fill_page_desc(struct sk_buff *skb, int i, * that not all callers have unique ownership of the page but rely * on page_is_pfmemalloc doing the right thing(tm). */ - frag->page.p = page; - frag->page_offset = off; + frag->bv_page = page; + frag->bv_offset = off; skb_frag_size_set(frag, size); page = compound_head(page); @@ -2878,6 +2872,46 @@ static inline void skb_propagate_pfmemalloc(struct page *page, } /** + * skb_frag_off() - Returns the offset of a skb fragment + * @frag: the paged fragment + */ +static inline unsigned int skb_frag_off(const skb_frag_t *frag) +{ + return frag->bv_offset; +} + +/** + * skb_frag_off_add() - Increments the offset of a skb fragment by @delta + * @frag: skb fragment + * @delta: value to add + */ +static inline void skb_frag_off_add(skb_frag_t *frag, int delta) +{ + frag->bv_offset += delta; +} + +/** + * skb_frag_off_set() - Sets the offset of a skb fragment + * @frag: skb fragment + * @offset: offset of fragment + */ +static inline void skb_frag_off_set(skb_frag_t *frag, unsigned int offset) +{ + frag->bv_offset = offset; +} + +/** + * skb_frag_off_copy() - Sets the offset of a skb fragment from another fragment + * @fragto: skb fragment where offset is set + * @fragfrom: skb fragment offset is copied from + */ +static inline void skb_frag_off_copy(skb_frag_t *fragto, + const skb_frag_t *fragfrom) +{ + fragto->bv_offset = fragfrom->bv_offset; +} + +/** * skb_frag_page - retrieve the page referred to by a paged fragment * @frag: the paged fragment * @@ -2885,7 +2919,7 @@ static inline void skb_propagate_pfmemalloc(struct page *page, */ static inline struct page *skb_frag_page(const skb_frag_t *frag) { - return frag->page.p; + return frag->bv_page; } /** @@ -2943,7 +2977,7 @@ static inline void skb_frag_unref(struct sk_buff *skb, int f) */ static inline void *skb_frag_address(const skb_frag_t *frag) { - return page_address(skb_frag_page(frag)) + frag->page_offset; + return page_address(skb_frag_page(frag)) + skb_frag_off(frag); } /** @@ -2959,7 +2993,18 @@ static inline void *skb_frag_address_safe(const skb_frag_t *frag) if (unlikely(!ptr)) return NULL; - return ptr + frag->page_offset; + return ptr + skb_frag_off(frag); +} + +/** + * skb_frag_page_copy() - sets the page in a fragment from another fragment + * @fragto: skb fragment where page is set + * @fragfrom: skb fragment page is copied from + */ +static inline void skb_frag_page_copy(skb_frag_t *fragto, + const skb_frag_t *fragfrom) +{ + fragto->bv_page = fragfrom->bv_page; } /** @@ -2971,7 +3016,7 @@ static inline void *skb_frag_address_safe(const skb_frag_t *frag) */ static inline void __skb_frag_set_page(skb_frag_t *frag, struct page *page) { - frag->page.p = page; + frag->bv_page = page; } /** @@ -3007,7 +3052,7 @@ static inline dma_addr_t skb_frag_dma_map(struct device *dev, enum dma_data_direction dir) { return dma_map_page(dev, skb_frag_page(frag), - frag->page_offset + offset, size, dir); + skb_frag_off(frag) + offset, size, dir); } static inline struct sk_buff *pskb_copy(struct sk_buff *skb, @@ -3174,10 +3219,10 @@ static inline bool skb_can_coalesce(struct sk_buff *skb, int i, if (skb_zcopy(skb)) return false; if (i) { - const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1]; + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1]; return page == skb_frag_page(frag) && - off == frag->page_offset + skb_frag_size(frag); + off == skb_frag_off(frag) + skb_frag_size(frag); } return false; } @@ -3991,25 +4036,27 @@ static inline void skb_remcsum_process(struct sk_buff *skb, void *ptr, static inline struct nf_conntrack *skb_nfct(const struct sk_buff *skb) { #if IS_ENABLED(CONFIG_NF_CONNTRACK) - return (void *)(skb->_nfct & SKB_NFCT_PTRMASK); + return (void *)(skb->_nfct & NFCT_PTRMASK); #else return NULL; #endif } -#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) -void nf_conntrack_destroy(struct nf_conntrack *nfct); -static inline void nf_conntrack_put(struct nf_conntrack *nfct) +static inline unsigned long skb_get_nfct(const struct sk_buff *skb) { - if (nfct && atomic_dec_and_test(&nfct->use)) - nf_conntrack_destroy(nfct); +#if IS_ENABLED(CONFIG_NF_CONNTRACK) + return skb->_nfct; +#else + return 0UL; +#endif } -static inline void nf_conntrack_get(struct nf_conntrack *nfct) + +static inline void skb_set_nfct(struct sk_buff *skb, unsigned long nfct) { - if (nfct) - atomic_inc(&nfct->use); -} +#if IS_ENABLED(CONFIG_NF_CONNTRACK) + skb->_nfct = nfct; #endif +} #ifdef CONFIG_SKB_EXTENSIONS enum skb_ext_id { @@ -4019,6 +4066,9 @@ enum skb_ext_id { #ifdef CONFIG_XFRM SKB_EXT_SEC_PATH, #endif +#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) + TC_SKB_EXT, +#endif SKB_EXT_NUM, /* must be last */ }; diff --git a/include/linux/slab.h b/include/linux/slab.h index 56c9c7eed34e..ab2b98ad76e1 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -595,68 +595,6 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) return __kmalloc_node(size, flags, node); } -struct memcg_cache_array { - struct rcu_head rcu; - struct kmem_cache *entries[0]; -}; - -/* - * This is the main placeholder for memcg-related information in kmem caches. - * Both the root cache and the child caches will have it. For the root cache, - * this will hold a dynamically allocated array large enough to hold - * information about the currently limited memcgs in the system. To allow the - * array to be accessed without taking any locks, on relocation we free the old - * version only after a grace period. - * - * Root and child caches hold different metadata. - * - * @root_cache: Common to root and child caches. NULL for root, pointer to - * the root cache for children. - * - * The following fields are specific to root caches. - * - * @memcg_caches: kmemcg ID indexed table of child caches. This table is - * used to index child cachces during allocation and cleared - * early during shutdown. - * - * @root_caches_node: List node for slab_root_caches list. - * - * @children: List of all child caches. While the child caches are also - * reachable through @memcg_caches, a child cache remains on - * this list until it is actually destroyed. - * - * The following fields are specific to child caches. - * - * @memcg: Pointer to the memcg this cache belongs to. - * - * @children_node: List node for @root_cache->children list. - * - * @kmem_caches_node: List node for @memcg->kmem_caches list. - */ -struct memcg_cache_params { - struct kmem_cache *root_cache; - union { - struct { - struct memcg_cache_array __rcu *memcg_caches; - struct list_head __root_caches_node; - struct list_head children; - bool dying; - }; - struct { - struct mem_cgroup *memcg; - struct list_head children_node; - struct list_head kmem_caches_node; - struct percpu_ref refcnt; - - void (*work_fn)(struct kmem_cache *); - union { - struct rcu_head rcu_head; - struct work_struct work; - }; - }; - }; -}; - int memcg_update_all_caches(int num_memcgs); /** diff --git a/include/linux/soc/amlogic/meson-canvas.h b/include/linux/soc/amlogic/meson-canvas.h index b4dde2fbeb3f..0cb2a6050d1f 100644 --- a/include/linux/soc/amlogic/meson-canvas.h +++ b/include/linux/soc/amlogic/meson-canvas.h @@ -20,6 +20,7 @@ #define MESON_CANVAS_ENDIAN_SWAP64 0x7 #define MESON_CANVAS_ENDIAN_SWAP128 0xf +struct device; struct meson_canvas; /** diff --git a/include/linux/soc/mediatek/mtk-cmdq.h b/include/linux/soc/mediatek/mtk-cmdq.h index f3ae45d02e80..9618debb9ceb 100644 --- a/include/linux/soc/mediatek/mtk-cmdq.h +++ b/include/linux/soc/mediatek/mtk-cmdq.h @@ -13,9 +13,6 @@ #define CMDQ_NO_TIMEOUT 0xffffffffu -/** cmdq event maximum */ -#define CMDQ_MAX_EVENT 0x3ff - struct cmdq_pkt; struct cmdq_client { diff --git a/include/linux/sort.h b/include/linux/sort.h index 2b99a5dd073d..61b96d0ebc44 100644 --- a/include/linux/sort.h +++ b/include/linux/sort.h @@ -4,6 +4,11 @@ #include <linux/types.h> +void sort_r(void *base, size_t num, size_t size, + int (*cmp)(const void *, const void *, const void *), + void (*swap)(void *, void *, int), + const void *priv); + void sort(void *base, size_t num, size_t size, int (*cmp)(const void *, const void *), void (*swap)(void *, void *, int)); diff --git a/include/linux/soundwire/sdw.h b/include/linux/soundwire/sdw.h index bea46bd8b6ce..ea787201c3ac 100644 --- a/include/linux/soundwire/sdw.h +++ b/include/linux/soundwire/sdw.h @@ -4,6 +4,8 @@ #ifndef __SOUNDWIRE_H #define __SOUNDWIRE_H +#include <linux/mod_devicetable.h> + struct sdw_bus; struct sdw_slave; @@ -377,6 +379,8 @@ struct sdw_slave_prop { * @dynamic_frame: Dynamic frame shape supported * @err_threshold: Number of times that software may retry sending a single * command + * @mclk_freq: clock reference passed to SoundWire Master, in Hz. + * @hw_disabled: if true, the Master is not functional, typically due to pin-mux */ struct sdw_master_prop { u32 revision; @@ -391,6 +395,8 @@ struct sdw_master_prop { u32 default_col; bool dynamic_frame; u32 err_threshold; + u32 mclk_freq; + bool hw_disabled; }; int sdw_master_read_prop(struct sdw_bus *bus); @@ -538,6 +544,7 @@ struct sdw_slave_ops { * @bus: Bus handle * @ops: Slave callback ops * @prop: Slave properties + * @debugfs: Slave debugfs * @node: node for bus list * @port_ready: Port ready completion flag for each Slave port * @dev_num: Device Number assigned by Bus @@ -549,6 +556,9 @@ struct sdw_slave { struct sdw_bus *bus; const struct sdw_slave_ops *ops; struct sdw_slave_prop prop; +#ifdef CONFIG_DEBUG_FS + struct dentry *debugfs; +#endif struct list_head node; struct completion *port_ready; u16 dev_num; @@ -718,6 +728,7 @@ struct sdw_master_ops { * Bit set implies used number, bit clear implies unused number. * @bus_lock: bus lock * @msg_lock: message lock + * @compute_params: points to Bus resource management implementation * @ops: Master callback ops * @port_ops: Master port callback ops * @params: Current bus parameters @@ -725,6 +736,7 @@ struct sdw_master_ops { * @m_rt_list: List of Master instance of all stream(s) running on Bus. This * is used to compute and program bus bandwidth, clock, frame shape, * transport and port parameters + * @debugfs: Bus debugfs * @defer_msg: Defer message * @clk_stop_timeout: Clock stop timeout computed * @bank_switch_timeout: Bank switch timeout computed @@ -739,11 +751,15 @@ struct sdw_bus { DECLARE_BITMAP(assigned, SDW_MAX_DEVICES); struct mutex bus_lock; struct mutex msg_lock; + int (*compute_params)(struct sdw_bus *bus); const struct sdw_master_ops *ops; const struct sdw_master_port_ops *port_ops; struct sdw_bus_params params; struct sdw_master_prop prop; struct list_head m_rt_list; +#ifdef CONFIG_DEBUG_FS + struct dentry *debugfs; +#endif struct sdw_defer defer_msg; unsigned int clk_stop_timeout; u32 bank_switch_timeout; @@ -828,7 +844,7 @@ struct sdw_stream_params { * @m_rt_count: Count of Master runtime(s) in this stream */ struct sdw_stream_runtime { - char *name; + const char *name; struct sdw_stream_params params; enum sdw_stream_state state; enum sdw_stream_type type; @@ -836,7 +852,7 @@ struct sdw_stream_runtime { int m_rt_count; }; -struct sdw_stream_runtime *sdw_alloc_stream(char *stream_name); +struct sdw_stream_runtime *sdw_alloc_stream(const char *stream_name); void sdw_release_stream(struct sdw_stream_runtime *stream); int sdw_stream_add_master(struct sdw_bus *bus, struct sdw_stream_config *stream_config, diff --git a/include/linux/soundwire/sdw_intel.h b/include/linux/soundwire/sdw_intel.h index 4d70da45363d..c9427cb6020b 100644 --- a/include/linux/soundwire/sdw_intel.h +++ b/include/linux/soundwire/sdw_intel.h @@ -8,6 +8,7 @@ * struct sdw_intel_ops: Intel audio driver callback ops * * @config_stream: configure the stream with the hw_params + * the first argument containing the context is mandatory */ struct sdw_intel_ops { int (*config_stream)(void *arg, void *substream, diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h index 7d06241582dd..dc60d03c4b60 100644 --- a/include/linux/stmmac.h +++ b/include/linux/stmmac.h @@ -81,6 +81,7 @@ struct stmmac_mdio_bus_data { unsigned int phy_mask; int *irqs; int probed_phy_irq; + bool needs_reset; }; struct stmmac_dma_cfg { @@ -130,6 +131,7 @@ struct plat_stmmacenet_data { int bus_id; int phy_addr; int interface; + int phy_interface; struct stmmac_mdio_bus_data *mdio_bus_data; struct device_node *phy_node; struct device_node *phylink_node; @@ -167,11 +169,13 @@ struct plat_stmmacenet_data { struct clk *clk_ptp_ref; unsigned int clk_ptp_rate; unsigned int clk_ref_rate; + s32 ptp_max_adj; struct reset_control *stmmac_rst; struct stmmac_axi *axi; int has_gmac4; bool has_sun8i; bool tso_en; + int rss_en; int mac_port_sel_speed; bool en_tx_lpi_clockgating; int has_xgmac; diff --git a/include/linux/suspend.h b/include/linux/suspend.h index 9c0ad1a3a727..6fc8843f1c9e 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h @@ -190,8 +190,9 @@ struct platform_suspend_ops { struct platform_s2idle_ops { int (*begin)(void); int (*prepare)(void); + int (*prepare_late)(void); void (*wake)(void); - void (*sync)(void); + void (*restore_early)(void); void (*restore)(void); void (*end)(void); }; @@ -336,6 +337,7 @@ static inline void pm_set_suspend_via_firmware(void) {} static inline void pm_set_resume_via_firmware(void) {} static inline bool pm_suspend_via_firmware(void) { return false; } static inline bool pm_resume_via_firmware(void) { return false; } +static inline bool pm_suspend_no_platform(void) { return false; } static inline bool pm_suspend_default_s2idle(void) { return false; } static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {} diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h index 965236795750..5420817ed317 100644 --- a/include/linux/sysfs.h +++ b/include/linux/sysfs.h @@ -196,6 +196,12 @@ struct bin_attribute { .size = _size, \ } +#define __BIN_ATTR_WO(_name) { \ + .attr = { .name = __stringify(_name), .mode = 0200 }, \ + .store = _name##_store, \ + .size = _size, \ +} + #define __BIN_ATTR_RW(_name, _size) \ __BIN_ATTR(_name, 0644, _name##_read, _name##_write, _size) @@ -208,6 +214,9 @@ struct bin_attribute bin_attr_##_name = __BIN_ATTR(_name, _mode, _read, \ #define BIN_ATTR_RO(_name, _size) \ struct bin_attribute bin_attr_##_name = __BIN_ATTR_RO(_name, _size) +#define BIN_ATTR_WO(_name, _size) \ +struct bin_attribute bin_attr_##_name = __BIN_ATTR_WO(_name, _size) + #define BIN_ATTR_RW(_name, _size) \ struct bin_attribute bin_attr_##_name = __BIN_ATTR_RW(_name, _size) diff --git a/include/linux/tcp.h b/include/linux/tcp.h index f3a85a7fb4b1..99617e528ea2 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -354,6 +354,8 @@ struct tcp_sock { #define BPF_SOCK_OPS_TEST_FLAG(TP, ARG) 0 #endif + u32 rcv_ooopack; /* Received out-of-order packets, for tcpinfo */ + /* Receiver side RTT estimation */ u32 rcv_rtt_last_tsecr; struct { diff --git a/include/linux/thunderbolt.h b/include/linux/thunderbolt.h index 2d7e012db03f..ece782ef5466 100644 --- a/include/linux/thunderbolt.h +++ b/include/linux/thunderbolt.h @@ -429,6 +429,7 @@ static inline struct tb_xdomain *tb_service_parent(struct tb_service *svc) * @lock: Must be held during ring creation/destruction. Is acquired by * interrupt_work when dispatching interrupts to individual rings. * @pdev: Pointer to the PCI device + * @ops: NHI specific optional ops * @iobase: MMIO space of the NHI * @tx_rings: All Tx rings available on this host controller * @rx_rings: All Rx rings available on this host controller @@ -442,6 +443,7 @@ static inline struct tb_xdomain *tb_service_parent(struct tb_service *svc) struct tb_nhi { spinlock_t lock; struct pci_dev *pdev; + const struct tb_nhi_ops *ops; void __iomem *iobase; struct tb_ring **tx_rings; struct tb_ring **rx_rings; diff --git a/include/linux/time64.h b/include/linux/time64.h index a620ee610b9f..19125489ae94 100644 --- a/include/linux/time64.h +++ b/include/linux/time64.h @@ -30,6 +30,8 @@ struct itimerspec64 { /* Located here for timespec[64]_valid_strict */ #define TIME64_MAX ((s64)~((u64)1 << 63)) +#define TIME64_MIN (-TIME64_MAX - 1) + #define KTIME_MAX ((s64)~((u64)1 << 63)) #define KTIME_SEC_MAX (KTIME_MAX / NSEC_PER_SEC) diff --git a/include/linux/timeriomem-rng.h b/include/linux/timeriomem-rng.h index fd4a6e6ec831..672df7fbf6c1 100644 --- a/include/linux/timeriomem-rng.h +++ b/include/linux/timeriomem-rng.h @@ -5,6 +5,9 @@ * Copyright (c) 2009 Alexander Clouter <alex@digriz.org.uk> */ +#ifndef _LINUX_TIMERIOMEM_RNG_H +#define _LINUX_TIMERIOMEM_RNG_H + struct timeriomem_rng_data { void __iomem *address; @@ -14,3 +17,5 @@ struct timeriomem_rng_data { /* bits of entropy per 1024 bits read */ unsigned int quality; }; + +#endif /* _LINUX_TIMERIOMEM_RNG_H */ diff --git a/include/linux/tnum.h b/include/linux/tnum.h index c7dc2b5902c0..c17af77f3fae 100644 --- a/include/linux/tnum.h +++ b/include/linux/tnum.h @@ -5,6 +5,10 @@ * propagate the unknown bits such that the tnum result represents all the * possible results for possible values of the operands. */ + +#ifndef _LINUX_TNUM_H +#define _LINUX_TNUM_H + #include <linux/types.h> struct tnum { @@ -81,3 +85,5 @@ bool tnum_in(struct tnum a, struct tnum b); int tnum_strn(char *str, size_t size, struct tnum a); /* Format a tnum as tristate binary expansion */ int tnum_sbin(char *str, size_t size, struct tnum a); + +#endif /* _LINUX_TNUM_H */ diff --git a/include/linux/usb.h b/include/linux/usb.h index e87826e23d59..e656e7b4b1e4 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h @@ -426,7 +426,6 @@ struct usb_bus { struct device *sysdev; /* as seen from firmware or bus */ int busnum; /* Bus number (in order of reg) */ const char *bus_name; /* stable id (PCI slot_name etc) */ - u8 uses_dma; /* Does the host controller use DMA? */ u8 uses_pio_for_control; /* * Does the host controller use PIO * for control transfers? @@ -1151,6 +1150,8 @@ struct usbdrv_wrap { * @id_table: USB drivers use ID table to support hotplugging. * Export this with MODULE_DEVICE_TABLE(usb,...). This must be set * or your driver's probe function will never get called. + * @dev_groups: Attributes attached to the device that will be created once it + * is bound to the driver. * @dynids: used internally to hold the list of dynamically added device * ids for this driver. * @drvwrap: Driver-model core structure wrapper. @@ -1198,6 +1199,7 @@ struct usb_driver { int (*post_reset)(struct usb_interface *intf); const struct usb_device_id *id_table; + const struct attribute_group **dev_groups; struct usb_dynids dynids; struct usbdrv_wrap drvwrap; @@ -1221,6 +1223,8 @@ struct usb_driver { * module is being unloaded. * @suspend: Called when the device is going to be suspended by the system. * @resume: Called when the device is being resumed by the system. + * @dev_groups: Attributes attached to the device that will be created once it + * is bound to the driver. * @drvwrap: Driver-model core structure wrapper. * @supports_autosuspend: if set to 0, the USB core will not allow autosuspend * for devices bound to this driver. @@ -1235,6 +1239,7 @@ struct usb_device_driver { int (*suspend) (struct usb_device *udev, pm_message_t message); int (*resume) (struct usb_device *udev, pm_message_t message); + const struct attribute_group **dev_groups; struct usbdrv_wrap drvwrap; unsigned int supports_autosuspend:1; }; diff --git a/include/linux/usb/association.h b/include/linux/usb/association.h deleted file mode 100644 index d7f3cb9b9db5..000000000000 --- a/include/linux/usb/association.h +++ /dev/null @@ -1,151 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Wireless USB - Cable Based Association - * - * Copyright (C) 2006 Intel Corporation - * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - */ -#ifndef __LINUX_USB_ASSOCIATION_H -#define __LINUX_USB_ASSOCIATION_H - - -/* - * Association attributes - * - * Association Models Supplement to WUSB 1.0 T[3-1] - * - * Each field in the structures has it's ID, it's length and then the - * value. This is the actual definition of the field's ID and its - * length. - */ -struct wusb_am_attr { - __u8 id; - __u8 len; -}; - -/* Different fields defined by the spec */ -#define WUSB_AR_AssociationTypeId { .id = cpu_to_le16(0x0000), .len = cpu_to_le16(2) } -#define WUSB_AR_AssociationSubTypeId { .id = cpu_to_le16(0x0001), .len = cpu_to_le16(2) } -#define WUSB_AR_Length { .id = cpu_to_le16(0x0002), .len = cpu_to_le16(4) } -#define WUSB_AR_AssociationStatus { .id = cpu_to_le16(0x0004), .len = cpu_to_le16(4) } -#define WUSB_AR_LangID { .id = cpu_to_le16(0x0008), .len = cpu_to_le16(2) } -#define WUSB_AR_DeviceFriendlyName { .id = cpu_to_le16(0x000b), .len = cpu_to_le16(64) } /* max */ -#define WUSB_AR_HostFriendlyName { .id = cpu_to_le16(0x000c), .len = cpu_to_le16(64) } /* max */ -#define WUSB_AR_CHID { .id = cpu_to_le16(0x1000), .len = cpu_to_le16(16) } -#define WUSB_AR_CDID { .id = cpu_to_le16(0x1001), .len = cpu_to_le16(16) } -#define WUSB_AR_ConnectionContext { .id = cpu_to_le16(0x1002), .len = cpu_to_le16(48) } -#define WUSB_AR_BandGroups { .id = cpu_to_le16(0x1004), .len = cpu_to_le16(2) } - -/* CBAF Control Requests (AMS1.0[T4-1] */ -enum { - CBAF_REQ_GET_ASSOCIATION_INFORMATION = 0x01, - CBAF_REQ_GET_ASSOCIATION_REQUEST, - CBAF_REQ_SET_ASSOCIATION_RESPONSE -}; - -/* - * CBAF USB-interface defitions - * - * No altsettings, one optional interrupt endpoint. - */ -enum { - CBAF_IFACECLASS = 0xef, - CBAF_IFACESUBCLASS = 0x03, - CBAF_IFACEPROTOCOL = 0x01, -}; - -/* Association Information (AMS1.0[T4-3]) */ -struct wusb_cbaf_assoc_info { - __le16 Length; - __u8 NumAssociationRequests; - __le16 Flags; - __u8 AssociationRequestsArray[]; -} __attribute__((packed)); - -/* Association Request (AMS1.0[T4-4]) */ -struct wusb_cbaf_assoc_request { - __u8 AssociationDataIndex; - __u8 Reserved; - __le16 AssociationTypeId; - __le16 AssociationSubTypeId; - __le32 AssociationTypeInfoSize; -} __attribute__((packed)); - -enum { - AR_TYPE_WUSB = 0x0001, - AR_TYPE_WUSB_RETRIEVE_HOST_INFO = 0x0000, - AR_TYPE_WUSB_ASSOCIATE = 0x0001, -}; - -/* Association Attribute header (AMS1.0[3.8]) */ -struct wusb_cbaf_attr_hdr { - __le16 id; - __le16 len; -} __attribute__((packed)); - -/* Host Info (AMS1.0[T4-7]) (yeah, more headers and fields...) */ -struct wusb_cbaf_host_info { - struct wusb_cbaf_attr_hdr AssociationTypeId_hdr; - __le16 AssociationTypeId; - struct wusb_cbaf_attr_hdr AssociationSubTypeId_hdr; - __le16 AssociationSubTypeId; - struct wusb_cbaf_attr_hdr CHID_hdr; - struct wusb_ckhdid CHID; - struct wusb_cbaf_attr_hdr LangID_hdr; - __le16 LangID; - struct wusb_cbaf_attr_hdr HostFriendlyName_hdr; - __u8 HostFriendlyName[]; -} __attribute__((packed)); - -/* Device Info (AMS1.0[T4-8]) - * - * I still don't get this tag'n'header stuff for each goddamn - * field... - */ -struct wusb_cbaf_device_info { - struct wusb_cbaf_attr_hdr Length_hdr; - __le32 Length; - struct wusb_cbaf_attr_hdr CDID_hdr; - struct wusb_ckhdid CDID; - struct wusb_cbaf_attr_hdr BandGroups_hdr; - __le16 BandGroups; - struct wusb_cbaf_attr_hdr LangID_hdr; - __le16 LangID; - struct wusb_cbaf_attr_hdr DeviceFriendlyName_hdr; - __u8 DeviceFriendlyName[]; -} __attribute__((packed)); - -/* Connection Context; CC_DATA - Success case (AMS1.0[T4-9]) */ -struct wusb_cbaf_cc_data { - struct wusb_cbaf_attr_hdr AssociationTypeId_hdr; - __le16 AssociationTypeId; - struct wusb_cbaf_attr_hdr AssociationSubTypeId_hdr; - __le16 AssociationSubTypeId; - struct wusb_cbaf_attr_hdr Length_hdr; - __le32 Length; - struct wusb_cbaf_attr_hdr ConnectionContext_hdr; - struct wusb_ckhdid CHID; - struct wusb_ckhdid CDID; - struct wusb_ckhdid CK; - struct wusb_cbaf_attr_hdr BandGroups_hdr; - __le16 BandGroups; -} __attribute__((packed)); - -/* CC_DATA - Failure case (AMS1.0[T4-10]) */ -struct wusb_cbaf_cc_data_fail { - struct wusb_cbaf_attr_hdr AssociationTypeId_hdr; - __le16 AssociationTypeId; - struct wusb_cbaf_attr_hdr AssociationSubTypeId_hdr; - __le16 AssociationSubTypeId; - struct wusb_cbaf_attr_hdr Length_hdr; - __le16 Length; - struct wusb_cbaf_attr_hdr AssociationStatus_hdr; - __u32 AssociationStatus; -} __attribute__((packed)); - -#endif /* __LINUX_USB_ASSOCIATION_H */ diff --git a/include/linux/usb/ch9.h b/include/linux/usb/ch9.h index da82606be605..58b83066bea4 100644 --- a/include/linux/usb/ch9.h +++ b/include/linux/usb/ch9.h @@ -70,4 +70,31 @@ extern enum usb_device_speed usb_get_maximum_speed(struct device *dev); */ extern const char *usb_state_string(enum usb_device_state state); +#ifdef CONFIG_TRACING +/** + * usb_decode_ctrl - Returns human readable representation of control request. + * @str: buffer to return a human-readable representation of control request. + * This buffer should have about 200 bytes. + * @size: size of str buffer. + * @bRequestType: matches the USB bmRequestType field + * @bRequest: matches the USB bRequest field + * @wValue: matches the USB wValue field (CPU byte order) + * @wIndex: matches the USB wIndex field (CPU byte order) + * @wLength: matches the USB wLength field (CPU byte order) + * + * Function returns decoded, formatted and human-readable description of + * control request packet. + * + * The usage scenario for this is for tracepoints, so function as a return + * use the same value as in parameters. This approach allows to use this + * function in TP_printk + * + * Important: wValue, wIndex, wLength parameters before invoking this function + * should be processed by le16_to_cpu macro. + */ +extern const char *usb_decode_ctrl(char *str, size_t size, __u8 bRequestType, + __u8 bRequest, __u16 wValue, __u16 wIndex, + __u16 wLength); +#endif + #endif /* __LINUX_USB_CH9_H */ diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h index fb19141151d8..124462d65eac 100644 --- a/include/linux/usb/gadget.h +++ b/include/linux/usb/gadget.h @@ -291,6 +291,9 @@ struct usb_dcd_config_params { #define USB_DEFAULT_U1_DEV_EXIT_LAT 0x01 /* Less then 1 microsec */ __le16 bU2DevExitLat; /* U2 Device exit Latency */ #define USB_DEFAULT_U2_DEV_EXIT_LAT 0x1F4 /* Less then 500 microsec */ + __u8 besl_baseline; /* Recommended baseline BESL (0-15) */ + __u8 besl_deep; /* Recommended deep BESL (0-15) */ +#define USB_DEFAULT_BESL_UNSPECIFIED 0xFF /* No recommended value */ }; diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h index a20e7815d814..712b2a603645 100644 --- a/include/linux/usb/hcd.h +++ b/include/linux/usb/hcd.h @@ -256,6 +256,7 @@ struct hc_driver { int flags; #define HCD_MEMORY 0x0001 /* HC regs use memory (else I/O) */ +#define HCD_DMA 0x0002 /* HC uses DMA */ #define HCD_SHARED 0x0004 /* Two (or more) usb_hcds share HW */ #define HCD_USB11 0x0010 /* USB 1.1 */ #define HCD_USB2 0x0020 /* USB 2.0 */ @@ -422,8 +423,10 @@ static inline bool hcd_periodic_completion_in_progress(struct usb_hcd *hcd, return hcd->high_prio_bh.completing_ep == ep; } -#define hcd_uses_dma(hcd) \ - (IS_ENABLED(CONFIG_HAS_DMA) && (hcd)->self.uses_dma) +static inline bool hcd_uses_dma(struct usb_hcd *hcd) +{ + return IS_ENABLED(CONFIG_HAS_DMA) && (hcd->driver->flags & HCD_DMA); +} extern int usb_hcd_link_urb_to_ep(struct usb_hcd *hcd, struct urb *urb); extern int usb_hcd_check_unlink_urb(struct usb_hcd *hcd, struct urb *urb, @@ -594,6 +597,10 @@ extern void usb_ep0_reinit(struct usb_device *); #define GetPortStatus HUB_CLASS_REQ(USB_DIR_IN, USB_RT_PORT, USB_REQ_GET_STATUS) #define SetHubFeature HUB_CLASS_REQ(USB_DIR_OUT, USB_RT_HUB, USB_REQ_SET_FEATURE) #define SetPortFeature HUB_CLASS_REQ(USB_DIR_OUT, USB_RT_PORT, USB_REQ_SET_FEATURE) +#define ClearTTBuffer HUB_CLASS_REQ(USB_DIR_OUT, USB_RT_PORT, HUB_CLEAR_TT_BUFFER) +#define ResetTT HUB_CLASS_REQ(USB_DIR_OUT, USB_RT_PORT, HUB_RESET_TT) +#define GetTTState HUB_CLASS_REQ(USB_DIR_IN, USB_RT_PORT, HUB_GET_TT_STATE) +#define StopTT HUB_CLASS_REQ(USB_DIR_OUT, USB_RT_PORT, HUB_STOP_TT) /*-------------------------------------------------------------------------*/ diff --git a/include/linux/usb/role.h b/include/linux/usb/role.h index c05ffa6abda9..2d77f97df72d 100644 --- a/include/linux/usb/role.h +++ b/include/linux/usb/role.h @@ -42,14 +42,51 @@ struct usb_role_switch_desc { bool allow_userspace_control; }; + +#if IS_ENABLED(CONFIG_USB_ROLE_SWITCH) int usb_role_switch_set_role(struct usb_role_switch *sw, enum usb_role role); enum usb_role usb_role_switch_get_role(struct usb_role_switch *sw); struct usb_role_switch *usb_role_switch_get(struct device *dev); +struct usb_role_switch *fwnode_usb_role_switch_get(struct fwnode_handle *node); void usb_role_switch_put(struct usb_role_switch *sw); struct usb_role_switch * usb_role_switch_register(struct device *parent, const struct usb_role_switch_desc *desc); void usb_role_switch_unregister(struct usb_role_switch *sw); +#else +static inline int usb_role_switch_set_role(struct usb_role_switch *sw, + enum usb_role role) +{ + return 0; +} + +static inline enum usb_role usb_role_switch_get_role(struct usb_role_switch *sw) +{ + return USB_ROLE_NONE; +} + +static inline struct usb_role_switch *usb_role_switch_get(struct device *dev) +{ + return ERR_PTR(-ENODEV); +} + +static inline struct usb_role_switch * +fwnode_usb_role_switch_get(struct fwnode_handle *node) +{ + return ERR_PTR(-ENODEV); +} + +static inline void usb_role_switch_put(struct usb_role_switch *sw) { } + +static inline struct usb_role_switch * +usb_role_switch_register(struct device *parent, + const struct usb_role_switch_desc *desc) +{ + return ERR_PTR(-ENODEV); +} + +static inline void usb_role_switch_unregister(struct usb_role_switch *sw) { } +#endif #endif /* __LINUX_USB_ROLE_H */ diff --git a/include/linux/usb/usb338x.h b/include/linux/usb/usb338x.h index 7189e3387bf9..20020c1336d5 100644 --- a/include/linux/usb/usb338x.h +++ b/include/linux/usb/usb338x.h @@ -113,7 +113,10 @@ struct usb338x_ll_regs { u32 ll_ltssm_ctrl1; u32 ll_ltssm_ctrl2; u32 ll_ltssm_ctrl3; - u32 unused[2]; + u32 unused1; + + /* 0x710 */ + u32 unused2; u32 ll_general_ctrl0; u32 ll_general_ctrl1; #define PM_U3_AUTO_EXIT 29 @@ -136,29 +139,41 @@ struct usb338x_ll_regs { u32 ll_general_ctrl2; #define SELECT_INVERT_LANE_POLARITY 7 #define FORCE_INVERT_LANE_POLARITY 6 + + /* 0x720 */ u32 ll_general_ctrl3; u32 ll_general_ctrl4; u32 ll_error_gen; -} __packed; + u32 unused3; + + /* 0x730 */ + u32 unused4[4]; -struct usb338x_ll_lfps_regs { - /* offset 0x748 */ + /* 0x740 */ + u32 unused5[2]; u32 ll_lfps_5; #define TIMER_LFPS_6US 16 u32 ll_lfps_6; #define TIMER_LFPS_80US 0 -} __packed; -struct usb338x_ll_tsn_regs { - /* offset 0x77C */ + /* 0x750 */ + u32 unused6[8]; + + /* 0x770 */ + u32 unused7[3]; u32 ll_tsn_counters_2; #define HOT_TX_NORESET_TS2 24 + + /* 0x780 */ u32 ll_tsn_counters_3; #define HOT_RX_RESET_TS2 0 -} __packed; + u32 unused8[3]; -struct usb338x_ll_chi_regs { - /* offset 0x79C */ + /* 0x790 */ + u32 unused9; + u32 ll_lfps_timers_2; +#define LFPS_TIMERS_2_WORKAROUND_VALUE 0x084d + u32 unused10; u32 ll_tsn_chicken_bit; #define RECOVERY_IDLE_TO_RECOVER_FMW 3 } __packed; diff --git a/include/linux/usb/wusb-wa.h b/include/linux/usb/wusb-wa.h deleted file mode 100644 index 64a840b5106e..000000000000 --- a/include/linux/usb/wusb-wa.h +++ /dev/null @@ -1,304 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Wireless USB Wire Adapter constants and structures. - * - * Copyright (C) 2005-2006 Intel Corporation. - * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA - * 02110-1301, USA. - * - * - * FIXME: docs - * FIXME: organize properly, group logically - * - * All the event structures are defined in uwb/spec.h, as they are - * common to the WHCI and WUSB radio control interfaces. - * - * References: - * [WUSB] Wireless Universal Serial Bus Specification, revision 1.0, ch8 - */ -#ifndef __LINUX_USB_WUSB_WA_H -#define __LINUX_USB_WUSB_WA_H - -/** - * Radio Command Request for the Radio Control Interface - * - * Radio Control Interface command and event codes are the same as - * WHCI, and listed in include/linux/uwb.h:UWB_RC_{CMD,EVT}_* - */ -enum { - WA_EXEC_RC_CMD = 40, /* Radio Control command Request */ -}; - -/* Wireless Adapter Requests ([WUSB] table 8-51) */ -enum { - WUSB_REQ_ADD_MMC_IE = 20, - WUSB_REQ_REMOVE_MMC_IE = 21, - WUSB_REQ_SET_NUM_DNTS = 22, - WUSB_REQ_SET_CLUSTER_ID = 23, - WUSB_REQ_SET_DEV_INFO = 24, - WUSB_REQ_GET_TIME = 25, - WUSB_REQ_SET_STREAM_IDX = 26, - WUSB_REQ_SET_WUSB_MAS = 27, - WUSB_REQ_CHAN_STOP = 28, -}; - - -/* Wireless Adapter WUSB Channel Time types ([WUSB] table 8-52) */ -enum { - WUSB_TIME_ADJ = 0, - WUSB_TIME_BPST = 1, - WUSB_TIME_WUSB = 2, -}; - -enum { - WA_ENABLE = 0x01, - WA_RESET = 0x02, - RPIPE_PAUSE = 0x1, - RPIPE_STALL = 0x2, -}; - -/* Responses from Get Status request ([WUSB] section 8.3.1.6) */ -enum { - WA_STATUS_ENABLED = 0x01, - WA_STATUS_RESETTING = 0x02 -}; - -enum rpipe_crs { - RPIPE_CRS_CTL = 0x01, - RPIPE_CRS_ISO = 0x02, - RPIPE_CRS_BULK = 0x04, - RPIPE_CRS_INTR = 0x08 -}; - -/** - * RPipe descriptor ([WUSB] section 8.5.2.11) - * - * FIXME: explain rpipes - */ -struct usb_rpipe_descriptor { - u8 bLength; - u8 bDescriptorType; - __le16 wRPipeIndex; - __le16 wRequests; - __le16 wBlocks; /* rw if 0 */ - __le16 wMaxPacketSize; /* rw */ - union { - u8 dwa_bHSHubAddress; /* rw: DWA. */ - u8 hwa_bMaxBurst; /* rw: HWA. */ - }; - union { - u8 dwa_bHSHubPort; /* rw: DWA. */ - u8 hwa_bDeviceInfoIndex; /* rw: HWA. */ - }; - u8 bSpeed; /* rw: xfer rate 'enum uwb_phy_rate' */ - union { - u8 dwa_bDeviceAddress; /* rw: DWA Target device address. */ - u8 hwa_reserved; /* rw: HWA. */ - }; - u8 bEndpointAddress; /* rw: Target EP address */ - u8 bDataSequence; /* ro: Current Data sequence */ - __le32 dwCurrentWindow; /* ro */ - u8 bMaxDataSequence; /* ro?: max supported seq */ - u8 bInterval; /* rw: */ - u8 bOverTheAirInterval; /* rw: */ - u8 bmAttribute; /* ro? */ - u8 bmCharacteristics; /* ro? enum rpipe_attr, supported xsactions */ - u8 bmRetryOptions; /* rw? */ - __le16 wNumTransactionErrors; /* rw */ -} __attribute__ ((packed)); - -/** - * Wire Adapter Notification types ([WUSB] sections 8.4.5 & 8.5.4) - * - * These are the notifications coming on the notification endpoint of - * an HWA and a DWA. - */ -enum wa_notif_type { - DWA_NOTIF_RWAKE = 0x91, - DWA_NOTIF_PORTSTATUS = 0x92, - WA_NOTIF_TRANSFER = 0x93, - HWA_NOTIF_BPST_ADJ = 0x94, - HWA_NOTIF_DN = 0x95, -}; - -/** - * Wire Adapter notification header - * - * Notifications coming from a wire adapter use a common header - * defined in [WUSB] sections 8.4.5 & 8.5.4. - */ -struct wa_notif_hdr { - u8 bLength; - u8 bNotifyType; /* enum wa_notif_type */ -} __packed; - -/** - * HWA DN Received notification [(WUSB] section 8.5.4.2) - * - * The DNData is specified in WUSB1.0[7.6]. For each device - * notification we received, we just need to dispatch it. - * - * @dndata: this is really an array of notifications, but all start - * with the same header. - */ -struct hwa_notif_dn { - struct wa_notif_hdr hdr; - u8 bSourceDeviceAddr; /* from errata 2005/07 */ - u8 bmAttributes; - struct wusb_dn_hdr dndata[]; -} __packed; - -/* [WUSB] section 8.3.3 */ -enum wa_xfer_type { - WA_XFER_TYPE_CTL = 0x80, - WA_XFER_TYPE_BI = 0x81, /* bulk/interrupt */ - WA_XFER_TYPE_ISO = 0x82, - WA_XFER_RESULT = 0x83, - WA_XFER_ABORT = 0x84, - WA_XFER_ISO_PACKET_INFO = 0xA0, - WA_XFER_ISO_PACKET_STATUS = 0xA1, -}; - -/* [WUSB] section 8.3.3 */ -struct wa_xfer_hdr { - u8 bLength; /* 0x18 */ - u8 bRequestType; /* 0x80 WA_REQUEST_TYPE_CTL */ - __le16 wRPipe; /* RPipe index */ - __le32 dwTransferID; /* Host-assigned ID */ - __le32 dwTransferLength; /* Length of data to xfer */ - u8 bTransferSegment; -} __packed; - -struct wa_xfer_ctl { - struct wa_xfer_hdr hdr; - u8 bmAttribute; - __le16 wReserved; - struct usb_ctrlrequest baSetupData; -} __packed; - -struct wa_xfer_bi { - struct wa_xfer_hdr hdr; - u8 bReserved; - __le16 wReserved; -} __packed; - -/* [WUSB] section 8.5.5 */ -struct wa_xfer_hwaiso { - struct wa_xfer_hdr hdr; - u8 bReserved; - __le16 wPresentationTime; - __le32 dwNumOfPackets; -} __packed; - -struct wa_xfer_packet_info_hwaiso { - __le16 wLength; - u8 bPacketType; - u8 bReserved; - __le16 PacketLength[0]; -} __packed; - -struct wa_xfer_packet_status_len_hwaiso { - __le16 PacketLength; - __le16 PacketStatus; -} __packed; - -struct wa_xfer_packet_status_hwaiso { - __le16 wLength; - u8 bPacketType; - u8 bReserved; - struct wa_xfer_packet_status_len_hwaiso PacketStatus[0]; -} __packed; - -/* [WUSB] section 8.3.3.5 */ -struct wa_xfer_abort { - u8 bLength; - u8 bRequestType; - __le16 wRPipe; /* RPipe index */ - __le32 dwTransferID; /* Host-assigned ID */ -} __packed; - -/** - * WA Transfer Complete notification ([WUSB] section 8.3.3.3) - * - */ -struct wa_notif_xfer { - struct wa_notif_hdr hdr; - u8 bEndpoint; - u8 Reserved; -} __packed; - -/** Transfer result basic codes [WUSB] table 8-15 */ -enum { - WA_XFER_STATUS_SUCCESS, - WA_XFER_STATUS_HALTED, - WA_XFER_STATUS_DATA_BUFFER_ERROR, - WA_XFER_STATUS_BABBLE, - WA_XFER_RESERVED, - WA_XFER_STATUS_NOT_FOUND, - WA_XFER_STATUS_INSUFFICIENT_RESOURCE, - WA_XFER_STATUS_TRANSACTION_ERROR, - WA_XFER_STATUS_ABORTED, - WA_XFER_STATUS_RPIPE_NOT_READY, - WA_XFER_INVALID_FORMAT, - WA_XFER_UNEXPECTED_SEGMENT_NUMBER, - WA_XFER_STATUS_RPIPE_TYPE_MISMATCH, -}; - -/** [WUSB] section 8.3.3.4 */ -struct wa_xfer_result { - struct wa_notif_hdr hdr; - __le32 dwTransferID; - __le32 dwTransferLength; - u8 bTransferSegment; - u8 bTransferStatus; - __le32 dwNumOfPackets; -} __packed; - -/** - * Wire Adapter Class Descriptor ([WUSB] section 8.5.2.7). - * - * NOTE: u16 fields are read Little Endian from the hardware. - * - * @bNumPorts is the original max number of devices that the host can - * connect; we might chop this so the stack can handle - * it. In case you need to access it, use wusbhc->ports_max - * if it is a Wireless USB WA. - */ -struct usb_wa_descriptor { - u8 bLength; - u8 bDescriptorType; - __le16 bcdWAVersion; - u8 bNumPorts; /* don't use!! */ - u8 bmAttributes; /* Reserved == 0 */ - __le16 wNumRPipes; - __le16 wRPipeMaxBlock; - u8 bRPipeBlockSize; - u8 bPwrOn2PwrGood; - u8 bNumMMCIEs; - u8 DeviceRemovable; /* FIXME: in DWA this is up to 16 bytes */ -} __packed; - -/** - * HWA Device Information Buffer (WUSB1.0[T8.54]) - */ -struct hwa_dev_info { - u8 bmDeviceAvailability[32]; /* FIXME: ignored for now */ - u8 bDeviceAddress; - __le16 wPHYRates; - u8 bmDeviceAttribute; -} __packed; - -#endif /* #ifndef __LINUX_USB_WUSB_WA_H */ diff --git a/include/linux/usb/wusb.h b/include/linux/usb/wusb.h deleted file mode 100644 index 65adee629106..000000000000 --- a/include/linux/usb/wusb.h +++ /dev/null @@ -1,362 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Wireless USB Standard Definitions - * Event Size Tables - * - * Copyright (C) 2005-2006 Intel Corporation - * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA - * 02110-1301, USA. - * - * - * FIXME: docs - * FIXME: organize properly, group logically - * - * All the event structures are defined in uwb/spec.h, as they are - * common to the WHCI and WUSB radio control interfaces. - */ - -#ifndef __WUSB_H__ -#define __WUSB_H__ - -#include <linux/types.h> -#include <linux/kernel.h> -#include <linux/uwb/spec.h> -#include <linux/usb/ch9.h> -#include <linux/param.h> - -/** - * WUSB Information Element header - * - * I don't know why, they decided to make it different to the MBOA MAC - * IE Header; beats me. - */ -struct wuie_hdr { - u8 bLength; - u8 bIEIdentifier; -} __attribute__((packed)); - -enum { - WUIE_ID_WCTA = 0x80, - WUIE_ID_CONNECTACK, - WUIE_ID_HOST_INFO, - WUIE_ID_CHANGE_ANNOUNCE, - WUIE_ID_DEVICE_DISCONNECT, - WUIE_ID_HOST_DISCONNECT, - WUIE_ID_KEEP_ALIVE = 0x89, - WUIE_ID_ISOCH_DISCARD, - WUIE_ID_RESET_DEVICE, -}; - -/** - * Maximum number of array elements in a WUSB IE. - * - * WUSB1.0[7.5 before table 7-38] says that in WUSB IEs that - * are "arrays" have to limited to 4 elements. So we define it - * like that to ease up and submit only the neeed size. - */ -#define WUIE_ELT_MAX 4 - -/** - * Wrapper for the data that defines a CHID, a CDID or a CK - * - * WUSB defines that CHIDs, CDIDs and CKs are a 16 byte string of - * data. In order to avoid confusion and enforce types, we wrap it. - * - * Make it packed, as we use it in some hw definitions. - */ -struct wusb_ckhdid { - u8 data[16]; -} __attribute__((packed)); - -static const struct wusb_ckhdid wusb_ckhdid_zero = { .data = { 0 } }; - -#define WUSB_CKHDID_STRSIZE (3 * sizeof(struct wusb_ckhdid) + 1) - -/** - * WUSB IE: Host Information (WUSB1.0[7.5.2]) - * - * Used to provide information about the host to the Wireless USB - * devices in range (CHID can be used as an ASCII string). - */ -struct wuie_host_info { - struct wuie_hdr hdr; - __le16 attributes; - struct wusb_ckhdid CHID; -} __attribute__((packed)); - -/** - * WUSB IE: Connect Ack (WUSB1.0[7.5.1]) - * - * Used to acknowledge device connect requests. See note for - * WUIE_ELT_MAX. - */ -struct wuie_connect_ack { - struct wuie_hdr hdr; - struct { - struct wusb_ckhdid CDID; - u8 bDeviceAddress; /* 0 means unused */ - u8 bReserved; - } blk[WUIE_ELT_MAX]; -} __attribute__((packed)); - -/** - * WUSB IE Host Information Element, Connect Availability - * - * WUSB1.0[7.5.2], bmAttributes description - */ -enum { - WUIE_HI_CAP_RECONNECT = 0, - WUIE_HI_CAP_LIMITED, - WUIE_HI_CAP_RESERVED, - WUIE_HI_CAP_ALL, -}; - -/** - * WUSB IE: Channel Stop (WUSB1.0[7.5.8]) - * - * Tells devices the host is going to stop sending MMCs and will disappear. - */ -struct wuie_channel_stop { - struct wuie_hdr hdr; - u8 attributes; - u8 timestamp[3]; -} __attribute__((packed)); - -/** - * WUSB IE: Keepalive (WUSB1.0[7.5.9]) - * - * Ask device(s) to send keepalives. - */ -struct wuie_keep_alive { - struct wuie_hdr hdr; - u8 bDeviceAddress[WUIE_ELT_MAX]; -} __attribute__((packed)); - -/** - * WUSB IE: Reset device (WUSB1.0[7.5.11]) - * - * Tell device to reset; in all truth, we can fit 4 CDIDs, but we only - * use it for one at the time... - * - * In any case, this request is a wee bit silly: why don't they target - * by address?? - */ -struct wuie_reset { - struct wuie_hdr hdr; - struct wusb_ckhdid CDID; -} __attribute__((packed)); - -/** - * WUSB IE: Disconnect device (WUSB1.0[7.5.11]) - * - * Tell device to disconnect; we can fit 4 addresses, but we only use - * it for one at the time... - */ -struct wuie_disconnect { - struct wuie_hdr hdr; - u8 bDeviceAddress; - u8 padding; -} __attribute__((packed)); - -/** - * WUSB IE: Host disconnect ([WUSB] section 7.5.5) - * - * Tells all connected devices to disconnect. - */ -struct wuie_host_disconnect { - struct wuie_hdr hdr; -} __attribute__((packed)); - -/** - * WUSB Device Notification header (WUSB1.0[7.6]) - */ -struct wusb_dn_hdr { - u8 bType; - u8 notifdata[]; -} __attribute__((packed)); - -/** Device Notification codes (WUSB1.0[Table 7-54]) */ -enum WUSB_DN { - WUSB_DN_CONNECT = 0x01, - WUSB_DN_DISCONNECT = 0x02, - WUSB_DN_EPRDY = 0x03, - WUSB_DN_MASAVAILCHANGED = 0x04, - WUSB_DN_RWAKE = 0x05, - WUSB_DN_SLEEP = 0x06, - WUSB_DN_ALIVE = 0x07, -}; - -/** WUSB Device Notification Connect */ -struct wusb_dn_connect { - struct wusb_dn_hdr hdr; - __le16 attributes; - struct wusb_ckhdid CDID; -} __attribute__((packed)); - -static inline int wusb_dn_connect_prev_dev_addr(const struct wusb_dn_connect *dn) -{ - return le16_to_cpu(dn->attributes) & 0xff; -} - -static inline int wusb_dn_connect_new_connection(const struct wusb_dn_connect *dn) -{ - return (le16_to_cpu(dn->attributes) >> 8) & 0x1; -} - -static inline int wusb_dn_connect_beacon_behavior(const struct wusb_dn_connect *dn) -{ - return (le16_to_cpu(dn->attributes) >> 9) & 0x03; -} - -/** Device is alive (aka: pong) (WUSB1.0[7.6.7]) */ -struct wusb_dn_alive { - struct wusb_dn_hdr hdr; -} __attribute__((packed)); - -/** Device is disconnecting (WUSB1.0[7.6.2]) */ -struct wusb_dn_disconnect { - struct wusb_dn_hdr hdr; -} __attribute__((packed)); - -/* General constants */ -enum { - WUSB_TRUST_TIMEOUT_MS = 4000, /* [WUSB] section 4.15.1 */ -}; - -/* - * WUSB Crypto stuff (WUSB1.0[6]) - */ - -extern const char *wusb_et_name(u8); - -/** - * WUSB key index WUSB1.0[7.3.2.4], for usage when setting keys for - * the host or the device. - */ -static inline u8 wusb_key_index(int index, int type, int originator) -{ - return (originator << 6) | (type << 4) | index; -} - -#define WUSB_KEY_INDEX_TYPE_PTK 0 /* for HWA only */ -#define WUSB_KEY_INDEX_TYPE_ASSOC 1 -#define WUSB_KEY_INDEX_TYPE_GTK 2 -#define WUSB_KEY_INDEX_ORIGINATOR_HOST 0 -#define WUSB_KEY_INDEX_ORIGINATOR_DEVICE 1 -/* bits 0-3 used for the key index. */ -#define WUSB_KEY_INDEX_MAX 15 - -/* A CCM Nonce, defined in WUSB1.0[6.4.1] */ -struct aes_ccm_nonce { - u8 sfn[6]; /* Little Endian */ - u8 tkid[3]; /* LE */ - struct uwb_dev_addr dest_addr; - struct uwb_dev_addr src_addr; -} __attribute__((packed)); - -/* A CCM operation label, defined on WUSB1.0[6.5.x] */ -struct aes_ccm_label { - u8 data[14]; -} __attribute__((packed)); - -/* - * Input to the key derivation sequence defined in - * WUSB1.0[6.5.1]. Rest of the data is in the CCM Nonce passed to the - * PRF function. - */ -struct wusb_keydvt_in { - u8 hnonce[16]; - u8 dnonce[16]; -} __attribute__((packed)); - -/* - * Output from the key derivation sequence defined in - * WUSB1.0[6.5.1]. - */ -struct wusb_keydvt_out { - u8 kck[16]; - u8 ptk[16]; -} __attribute__((packed)); - -/* Pseudo Random Function WUSB1.0[6.5] */ -extern int wusb_crypto_init(void); -extern void wusb_crypto_exit(void); -extern ssize_t wusb_prf(void *out, size_t out_size, - const u8 key[16], const struct aes_ccm_nonce *_n, - const struct aes_ccm_label *a, - const void *b, size_t blen, size_t len); - -static inline int wusb_prf_64(void *out, size_t out_size, const u8 key[16], - const struct aes_ccm_nonce *n, - const struct aes_ccm_label *a, - const void *b, size_t blen) -{ - return wusb_prf(out, out_size, key, n, a, b, blen, 64); -} - -static inline int wusb_prf_128(void *out, size_t out_size, const u8 key[16], - const struct aes_ccm_nonce *n, - const struct aes_ccm_label *a, - const void *b, size_t blen) -{ - return wusb_prf(out, out_size, key, n, a, b, blen, 128); -} - -static inline int wusb_prf_256(void *out, size_t out_size, const u8 key[16], - const struct aes_ccm_nonce *n, - const struct aes_ccm_label *a, - const void *b, size_t blen) -{ - return wusb_prf(out, out_size, key, n, a, b, blen, 256); -} - -/* Key derivation WUSB1.0[6.5.1] */ -static inline int wusb_key_derive(struct wusb_keydvt_out *keydvt_out, - const u8 key[16], - const struct aes_ccm_nonce *n, - const struct wusb_keydvt_in *keydvt_in) -{ - const struct aes_ccm_label a = { .data = "Pair-wise keys" }; - return wusb_prf_256(keydvt_out, sizeof(*keydvt_out), key, n, &a, - keydvt_in, sizeof(*keydvt_in)); -} - -/* - * Out-of-band MIC Generation WUSB1.0[6.5.2] - * - * Compute the MIC over @key, @n and @hs and place it in @mic_out. - * - * @mic_out: Where to place the 8 byte MIC tag - * @key: KCK from the derivation process - * @n: CCM nonce, n->sfn == 0, TKID as established in the - * process. - * @hs: Handshake struct for phase 2 of the 4-way. - * hs->bStatus and hs->bReserved are zero. - * hs->bMessageNumber is 2 (WUSB1.0[7.3.2.5.2] - * hs->dest_addr is the device's USB address padded with 0 - * hs->src_addr is the hosts's UWB device address - * hs->mic is ignored (as we compute that value). - */ -static inline int wusb_oob_mic(u8 mic_out[8], const u8 key[16], - const struct aes_ccm_nonce *n, - const struct usb_handshake *hs) -{ - const struct aes_ccm_label a = { .data = "out-of-bandMIC" }; - return wusb_prf_64(mic_out, 8, key, n, &a, - hs, sizeof(*hs) - sizeof(hs->MIC)); -} - -#endif /* #ifndef __WUSB_H__ */ diff --git a/include/linux/uwb.h b/include/linux/uwb.h deleted file mode 100644 index 6918a61e1ac1..000000000000 --- a/include/linux/uwb.h +++ /dev/null @@ -1,817 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Ultra Wide Band - * UWB API - * - * Copyright (C) 2005-2006 Intel Corporation - * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> - * - * FIXME: doc: overview of the API, different parts and pointers - */ - -#ifndef __LINUX__UWB_H__ -#define __LINUX__UWB_H__ - -#include <linux/limits.h> -#include <linux/device.h> -#include <linux/mutex.h> -#include <linux/timer.h> -#include <linux/wait.h> -#include <linux/workqueue.h> -#include <linux/uwb/spec.h> -#include <asm/page.h> - -struct uwb_dev; -struct uwb_beca_e; -struct uwb_rc; -struct uwb_rsv; -struct uwb_dbg; - -/** - * struct uwb_dev - a UWB Device - * @rc: UWB Radio Controller that discovered the device (kind of its - * parent). - * @bce: a beacon cache entry for this device; or NULL if the device - * is a local radio controller. - * @mac_addr: the EUI-48 address of this device. - * @dev_addr: the current DevAddr used by this device. - * @beacon_slot: the slot number the beacon is using. - * @streams: bitmap of streams allocated to reservations targeted at - * this device. For an RC, this is the streams allocated for - * reservations targeted at DevAddrs. - * - * A UWB device may either by a neighbor or part of a local radio - * controller. - */ -struct uwb_dev { - struct mutex mutex; - struct list_head list_node; - struct device dev; - struct uwb_rc *rc; /* radio controller */ - struct uwb_beca_e *bce; /* Beacon Cache Entry */ - - struct uwb_mac_addr mac_addr; - struct uwb_dev_addr dev_addr; - int beacon_slot; - DECLARE_BITMAP(streams, UWB_NUM_STREAMS); - DECLARE_BITMAP(last_availability_bm, UWB_NUM_MAS); -}; -#define to_uwb_dev(d) container_of(d, struct uwb_dev, dev) - -/** - * UWB HWA/WHCI Radio Control {Command|Event} Block context IDs - * - * RC[CE]Bs have a 'context ID' field that matches the command with - * the event received to confirm it. - * - * Maximum number of context IDs - */ -enum { UWB_RC_CTX_MAX = 256 }; - - -/** Notification chain head for UWB generated events to listeners */ -struct uwb_notifs_chain { - struct list_head list; - struct mutex mutex; -}; - -/* Beacon cache list */ -struct uwb_beca { - struct list_head list; - size_t entries; - struct mutex mutex; -}; - -/* Event handling thread. */ -struct uwbd { - int pid; - struct task_struct *task; - wait_queue_head_t wq; - struct list_head event_list; - spinlock_t event_list_lock; -}; - -/** - * struct uwb_mas_bm - a bitmap of all MAS in a superframe - * @bm: a bitmap of length #UWB_NUM_MAS - */ -struct uwb_mas_bm { - DECLARE_BITMAP(bm, UWB_NUM_MAS); - DECLARE_BITMAP(unsafe_bm, UWB_NUM_MAS); - int safe; - int unsafe; -}; - -/** - * uwb_rsv_state - UWB Reservation state. - * - * NONE - reservation is not active (no DRP IE being transmitted). - * - * Owner reservation states: - * - * INITIATED - owner has sent an initial DRP request. - * PENDING - target responded with pending Reason Code. - * MODIFIED - reservation manager is modifying an established - * reservation with a different MAS allocation. - * ESTABLISHED - the reservation has been successfully negotiated. - * - * Target reservation states: - * - * DENIED - request is denied. - * ACCEPTED - request is accepted. - * PENDING - PAL has yet to make a decision to whether to accept or - * deny. - * - * FIXME: further target states TBD. - */ -enum uwb_rsv_state { - UWB_RSV_STATE_NONE = 0, - UWB_RSV_STATE_O_INITIATED, - UWB_RSV_STATE_O_PENDING, - UWB_RSV_STATE_O_MODIFIED, - UWB_RSV_STATE_O_ESTABLISHED, - UWB_RSV_STATE_O_TO_BE_MOVED, - UWB_RSV_STATE_O_MOVE_EXPANDING, - UWB_RSV_STATE_O_MOVE_COMBINING, - UWB_RSV_STATE_O_MOVE_REDUCING, - UWB_RSV_STATE_T_ACCEPTED, - UWB_RSV_STATE_T_DENIED, - UWB_RSV_STATE_T_CONFLICT, - UWB_RSV_STATE_T_PENDING, - UWB_RSV_STATE_T_EXPANDING_ACCEPTED, - UWB_RSV_STATE_T_EXPANDING_CONFLICT, - UWB_RSV_STATE_T_EXPANDING_PENDING, - UWB_RSV_STATE_T_EXPANDING_DENIED, - UWB_RSV_STATE_T_RESIZED, - - UWB_RSV_STATE_LAST, -}; - -enum uwb_rsv_target_type { - UWB_RSV_TARGET_DEV, - UWB_RSV_TARGET_DEVADDR, -}; - -/** - * struct uwb_rsv_target - the target of a reservation. - * - * Reservations unicast and targeted at a single device - * (UWB_RSV_TARGET_DEV); or (e.g., in the case of WUSB) targeted at a - * specific (private) DevAddr (UWB_RSV_TARGET_DEVADDR). - */ -struct uwb_rsv_target { - enum uwb_rsv_target_type type; - union { - struct uwb_dev *dev; - struct uwb_dev_addr devaddr; - }; -}; - -struct uwb_rsv_move { - struct uwb_mas_bm final_mas; - struct uwb_ie_drp *companion_drp_ie; - struct uwb_mas_bm companion_mas; -}; - -/* - * Number of streams reserved for reservations targeted at DevAddrs. - */ -#define UWB_NUM_GLOBAL_STREAMS 1 - -typedef void (*uwb_rsv_cb_f)(struct uwb_rsv *rsv); - -/** - * struct uwb_rsv - a DRP reservation - * - * Data structure management: - * - * @rc: the radio controller this reservation is for - * (as target or owner) - * @rc_node: a list node for the RC - * @pal_node: a list node for the PAL - * - * Owner and target parameters: - * - * @owner: the UWB device owning this reservation - * @target: the target UWB device - * @type: reservation type - * - * Owner parameters: - * - * @max_mas: maxiumum number of MAS - * @min_mas: minimum number of MAS - * @sparsity: owner selected sparsity - * @is_multicast: true iff multicast - * - * @callback: callback function when the reservation completes - * @pal_priv: private data for the PAL making the reservation - * - * Reservation status: - * - * @status: negotiation status - * @stream: stream index allocated for this reservation - * @tiebreaker: conflict tiebreaker for this reservation - * @mas: reserved MAS - * @drp_ie: the DRP IE - * @ie_valid: true iff the DRP IE matches the reservation parameters - * - * DRP reservations are uniquely identified by the owner, target and - * stream index. However, when using a DevAddr as a target (e.g., for - * a WUSB cluster reservation) the responses may be received from - * devices with different DevAddrs. In this case, reservations are - * uniquely identified by just the stream index. A number of stream - * indexes (UWB_NUM_GLOBAL_STREAMS) are reserved for this. - */ -struct uwb_rsv { - struct uwb_rc *rc; - struct list_head rc_node; - struct list_head pal_node; - struct kref kref; - - struct uwb_dev *owner; - struct uwb_rsv_target target; - enum uwb_drp_type type; - int max_mas; - int min_mas; - int max_interval; - bool is_multicast; - - uwb_rsv_cb_f callback; - void *pal_priv; - - enum uwb_rsv_state state; - bool needs_release_companion_mas; - u8 stream; - u8 tiebreaker; - struct uwb_mas_bm mas; - struct uwb_ie_drp *drp_ie; - struct uwb_rsv_move mv; - bool ie_valid; - struct timer_list timer; - struct work_struct handle_timeout_work; -}; - -static const -struct uwb_mas_bm uwb_mas_bm_zero = { .bm = { 0 } }; - -static inline void uwb_mas_bm_copy_le(void *dst, const struct uwb_mas_bm *mas) -{ - bitmap_copy_le(dst, mas->bm, UWB_NUM_MAS); -} - -/** - * struct uwb_drp_avail - a radio controller's view of MAS usage - * @global: MAS unused by neighbors (excluding reservations targeted - * or owned by the local radio controller) or the beaon period - * @local: MAS unused by local established reservations - * @pending: MAS unused by local pending reservations - * @ie: DRP Availability IE to be included in the beacon - * @ie_valid: true iff @ie is valid and does not need to regenerated from - * @global and @local - * - * Each radio controller maintains a view of MAS usage or - * availability. MAS available for a new reservation are determined - * from the intersection of @global, @local, and @pending. - * - * The radio controller must transmit a DRP Availability IE that's the - * intersection of @global and @local. - * - * A set bit indicates the MAS is unused and available. - * - * rc->rsvs_mutex should be held before accessing this data structure. - * - * [ECMA-368] section 17.4.3. - */ -struct uwb_drp_avail { - DECLARE_BITMAP(global, UWB_NUM_MAS); - DECLARE_BITMAP(local, UWB_NUM_MAS); - DECLARE_BITMAP(pending, UWB_NUM_MAS); - struct uwb_ie_drp_avail ie; - bool ie_valid; -}; - -struct uwb_drp_backoff_win { - u8 window; - u8 n; - int total_expired; - struct timer_list timer; - bool can_reserve_extra_mases; -}; - -const char *uwb_rsv_state_str(enum uwb_rsv_state state); -const char *uwb_rsv_type_str(enum uwb_drp_type type); - -struct uwb_rsv *uwb_rsv_create(struct uwb_rc *rc, uwb_rsv_cb_f cb, - void *pal_priv); -void uwb_rsv_destroy(struct uwb_rsv *rsv); - -int uwb_rsv_establish(struct uwb_rsv *rsv); -int uwb_rsv_modify(struct uwb_rsv *rsv, - int max_mas, int min_mas, int sparsity); -void uwb_rsv_terminate(struct uwb_rsv *rsv); - -void uwb_rsv_accept(struct uwb_rsv *rsv, uwb_rsv_cb_f cb, void *pal_priv); - -void uwb_rsv_get_usable_mas(struct uwb_rsv *orig_rsv, struct uwb_mas_bm *mas); - -/** - * Radio Control Interface instance - * - * - * Life cycle rules: those of the UWB Device. - * - * @index: an index number for this radio controller, as used in the - * device name. - * @version: version of protocol supported by this device - * @priv: Backend implementation; rw with uwb_dev.dev.sem taken. - * @cmd: Backend implementation to execute commands; rw and call - * only with uwb_dev.dev.sem taken. - * @reset: Hardware reset of radio controller and any PAL controllers. - * @filter: Backend implementation to manipulate data to and from device - * to be compliant to specification assumed by driver (WHCI - * 0.95). - * - * uwb_dev.dev.mutex is used to execute commands and update - * the corresponding structures; can't use a spinlock - * because rc->cmd() can sleep. - * @ies: This is a dynamically allocated array cacheing the - * IEs (settable by the host) that the beacon of this - * radio controller is currently sending. - * - * In reality, we store here the full command we set to - * the radio controller (which is basically a command - * prefix followed by all the IEs the beacon currently - * contains). This way we don't have to realloc and - * memcpy when setting it. - * - * We set this up in uwb_rc_ie_setup(), where we alloc - * this struct, call get_ie() [so we know which IEs are - * currently being sent, if any]. - * - * @ies_capacity:Amount of space (in bytes) allocated in @ies. The - * amount used is given by sizeof(*ies) plus ies->wIELength - * (which is a little endian quantity all the time). - * @ies_mutex: protect the IE cache - * @dbg: information for the debug interface - */ -struct uwb_rc { - struct uwb_dev uwb_dev; - int index; - u16 version; - - struct module *owner; - void *priv; - int (*start)(struct uwb_rc *rc); - void (*stop)(struct uwb_rc *rc); - int (*cmd)(struct uwb_rc *, const struct uwb_rccb *, size_t); - int (*reset)(struct uwb_rc *rc); - int (*filter_cmd)(struct uwb_rc *, struct uwb_rccb **, size_t *); - int (*filter_event)(struct uwb_rc *, struct uwb_rceb **, const size_t, - size_t *, size_t *); - - spinlock_t neh_lock; /* protects neh_* and ctx_* */ - struct list_head neh_list; /* Open NE handles */ - unsigned long ctx_bm[UWB_RC_CTX_MAX / 8 / sizeof(unsigned long)]; - u8 ctx_roll; - - int beaconing; /* Beaconing state [channel number] */ - int beaconing_forced; - int scanning; - enum uwb_scan_type scan_type:3; - unsigned ready:1; - struct uwb_notifs_chain notifs_chain; - struct uwb_beca uwb_beca; - - struct uwbd uwbd; - - struct uwb_drp_backoff_win bow; - struct uwb_drp_avail drp_avail; - struct list_head reservations; - struct list_head cnflt_alien_list; - struct uwb_mas_bm cnflt_alien_bitmap; - struct mutex rsvs_mutex; - spinlock_t rsvs_lock; - struct workqueue_struct *rsv_workq; - - struct delayed_work rsv_update_work; - struct delayed_work rsv_alien_bp_work; - int set_drp_ie_pending; - struct mutex ies_mutex; - struct uwb_rc_cmd_set_ie *ies; - size_t ies_capacity; - - struct list_head pals; - int active_pals; - - struct uwb_dbg *dbg; -}; - - -/** - * struct uwb_pal - a UWB PAL - * @name: descriptive name for this PAL (wusbhc, wlp, etc.). - * @device: a device for the PAL. Used to link the PAL and the radio - * controller in sysfs. - * @rc: the radio controller the PAL uses. - * @channel_changed: called when the channel used by the radio changes. - * A channel of -1 means the channel has been stopped. - * @new_rsv: called when a peer requests a reservation (may be NULL if - * the PAL cannot accept reservation requests). - * @channel: channel being used by the PAL; 0 if the PAL isn't using - * the radio; -1 if the PAL wishes to use the radio but - * cannot. - * @debugfs_dir: a debugfs directory which the PAL can use for its own - * debugfs files. - * - * A Protocol Adaptation Layer (PAL) is a user of the WiMedia UWB - * radio platform (e.g., WUSB, WLP or Bluetooth UWB AMP). - * - * The PALs using a radio controller must register themselves to - * permit the UWB stack to coordinate usage of the radio between the - * various PALs or to allow PALs to response to certain requests from - * peers. - * - * A struct uwb_pal should be embedded in a containing structure - * belonging to the PAL and initialized with uwb_pal_init()). Fields - * should be set appropriately by the PAL before registering the PAL - * with uwb_pal_register(). - */ -struct uwb_pal { - struct list_head node; - const char *name; - struct device *device; - struct uwb_rc *rc; - - void (*channel_changed)(struct uwb_pal *pal, int channel); - void (*new_rsv)(struct uwb_pal *pal, struct uwb_rsv *rsv); - - int channel; - struct dentry *debugfs_dir; -}; - -void uwb_pal_init(struct uwb_pal *pal); -int uwb_pal_register(struct uwb_pal *pal); -void uwb_pal_unregister(struct uwb_pal *pal); - -int uwb_radio_start(struct uwb_pal *pal); -void uwb_radio_stop(struct uwb_pal *pal); - -/* - * General public API - * - * This API can be used by UWB device drivers or by those implementing - * UWB Radio Controllers - */ -struct uwb_dev *uwb_dev_get_by_devaddr(struct uwb_rc *rc, - const struct uwb_dev_addr *devaddr); -struct uwb_dev *uwb_dev_get_by_rc(struct uwb_dev *, struct uwb_rc *); -static inline void uwb_dev_get(struct uwb_dev *uwb_dev) -{ - get_device(&uwb_dev->dev); -} -static inline void uwb_dev_put(struct uwb_dev *uwb_dev) -{ - put_device(&uwb_dev->dev); -} -struct uwb_dev *uwb_dev_try_get(struct uwb_rc *rc, struct uwb_dev *uwb_dev); - -/** - * Callback function for 'uwb_{dev,rc}_foreach()'. - * - * @dev: Linux device instance - * 'uwb_dev = container_of(dev, struct uwb_dev, dev)' - * @priv: Data passed by the caller to 'uwb_{dev,rc}_foreach()'. - * - * @returns: 0 to continue the iterations, any other val to stop - * iterating and return the value to the caller of - * _foreach(). - */ -typedef int (*uwb_dev_for_each_f)(struct device *dev, void *priv); -int uwb_dev_for_each(struct uwb_rc *rc, uwb_dev_for_each_f func, void *priv); - -struct uwb_rc *uwb_rc_alloc(void); -struct uwb_rc *uwb_rc_get_by_dev(const struct uwb_dev_addr *); -struct uwb_rc *uwb_rc_get_by_grandpa(const struct device *); -void uwb_rc_put(struct uwb_rc *rc); - -typedef void (*uwb_rc_cmd_cb_f)(struct uwb_rc *rc, void *arg, - struct uwb_rceb *reply, ssize_t reply_size); - -int uwb_rc_cmd_async(struct uwb_rc *rc, const char *cmd_name, - struct uwb_rccb *cmd, size_t cmd_size, - u8 expected_type, u16 expected_event, - uwb_rc_cmd_cb_f cb, void *arg); -ssize_t uwb_rc_cmd(struct uwb_rc *rc, const char *cmd_name, - struct uwb_rccb *cmd, size_t cmd_size, - struct uwb_rceb *reply, size_t reply_size); -ssize_t uwb_rc_vcmd(struct uwb_rc *rc, const char *cmd_name, - struct uwb_rccb *cmd, size_t cmd_size, - u8 expected_type, u16 expected_event, - struct uwb_rceb **preply); - -size_t __uwb_addr_print(char *, size_t, const unsigned char *, int); - -int uwb_rc_dev_addr_set(struct uwb_rc *, const struct uwb_dev_addr *); -int uwb_rc_dev_addr_get(struct uwb_rc *, struct uwb_dev_addr *); -int uwb_rc_mac_addr_set(struct uwb_rc *, const struct uwb_mac_addr *); -int uwb_rc_mac_addr_get(struct uwb_rc *, struct uwb_mac_addr *); -int __uwb_mac_addr_assigned_check(struct device *, void *); -int __uwb_dev_addr_assigned_check(struct device *, void *); - -/* Print in @buf a pretty repr of @addr */ -static inline size_t uwb_dev_addr_print(char *buf, size_t buf_size, - const struct uwb_dev_addr *addr) -{ - return __uwb_addr_print(buf, buf_size, addr->data, 0); -} - -/* Print in @buf a pretty repr of @addr */ -static inline size_t uwb_mac_addr_print(char *buf, size_t buf_size, - const struct uwb_mac_addr *addr) -{ - return __uwb_addr_print(buf, buf_size, addr->data, 1); -} - -/* @returns 0 if device addresses @addr2 and @addr1 are equal */ -static inline int uwb_dev_addr_cmp(const struct uwb_dev_addr *addr1, - const struct uwb_dev_addr *addr2) -{ - return memcmp(addr1, addr2, sizeof(*addr1)); -} - -/* @returns 0 if MAC addresses @addr2 and @addr1 are equal */ -static inline int uwb_mac_addr_cmp(const struct uwb_mac_addr *addr1, - const struct uwb_mac_addr *addr2) -{ - return memcmp(addr1, addr2, sizeof(*addr1)); -} - -/* @returns !0 if a MAC @addr is a broadcast address */ -static inline int uwb_mac_addr_bcast(const struct uwb_mac_addr *addr) -{ - struct uwb_mac_addr bcast = { - .data = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff } - }; - return !uwb_mac_addr_cmp(addr, &bcast); -} - -/* @returns !0 if a MAC @addr is all zeroes*/ -static inline int uwb_mac_addr_unset(const struct uwb_mac_addr *addr) -{ - struct uwb_mac_addr unset = { - .data = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 } - }; - return !uwb_mac_addr_cmp(addr, &unset); -} - -/* @returns !0 if the address is in use. */ -static inline unsigned __uwb_dev_addr_assigned(struct uwb_rc *rc, - struct uwb_dev_addr *addr) -{ - return uwb_dev_for_each(rc, __uwb_dev_addr_assigned_check, addr); -} - -/* - * UWB Radio Controller API - * - * This API is used (in addition to the general API) to implement UWB - * Radio Controllers. - */ -void uwb_rc_init(struct uwb_rc *); -int uwb_rc_add(struct uwb_rc *, struct device *dev, void *rc_priv); -void uwb_rc_rm(struct uwb_rc *); -void uwb_rc_neh_grok(struct uwb_rc *, void *, size_t); -void uwb_rc_neh_error(struct uwb_rc *, int); -void uwb_rc_reset_all(struct uwb_rc *rc); -void uwb_rc_pre_reset(struct uwb_rc *rc); -int uwb_rc_post_reset(struct uwb_rc *rc); - -/** - * uwb_rsv_is_owner - is the owner of this reservation the RC? - * @rsv: the reservation - */ -static inline bool uwb_rsv_is_owner(struct uwb_rsv *rsv) -{ - return rsv->owner == &rsv->rc->uwb_dev; -} - -/** - * enum uwb_notifs - UWB events that can be passed to any listeners - * @UWB_NOTIF_ONAIR: a new neighbour has joined the beacon group. - * @UWB_NOTIF_OFFAIR: a neighbour has left the beacon group. - * - * Higher layers can register callback functions with the radio - * controller using uwb_notifs_register(). The radio controller - * maintains a list of all registered handlers and will notify all - * nodes when an event occurs. - */ -enum uwb_notifs { - UWB_NOTIF_ONAIR, - UWB_NOTIF_OFFAIR, -}; - -/* Callback function registered with UWB */ -struct uwb_notifs_handler { - struct list_head list_node; - void (*cb)(void *, struct uwb_dev *, enum uwb_notifs); - void *data; -}; - -int uwb_notifs_register(struct uwb_rc *, struct uwb_notifs_handler *); -int uwb_notifs_deregister(struct uwb_rc *, struct uwb_notifs_handler *); - - -/** - * UWB radio controller Event Size Entry (for creating entry tables) - * - * WUSB and WHCI define events and notifications, and they might have - * fixed or variable size. - * - * Each event/notification has a size which is not necessarily known - * in advance based on the event code. As well, vendor specific - * events/notifications will have a size impossible to determine - * unless we know about the device's specific details. - * - * It was way too smart of the spec writers not to think that it would - * be impossible for a generic driver to skip over vendor specific - * events/notifications if there are no LENGTH fields in the HEADER of - * each message...the transaction size cannot be counted on as the - * spec does not forbid to pack more than one event in a single - * transaction. - * - * Thus, we guess sizes with tables (or for events, when you know the - * size ahead of time you can use uwb_rc_neh_extra_size*()). We - * register tables with the known events and their sizes, and then we - * traverse those tables. For those with variable length, we provide a - * way to lookup the size inside the event/notification's - * payload. This allows device-specific event size tables to be - * registered. - * - * @size: Size of the payload - * - * @offset: if != 0, at offset @offset-1 starts a field with a length - * that has to be added to @size. The format of the field is - * given by @type. - * - * @type: Type and length of the offset field. Most common is LE 16 - * bits (that's why that is zero); others are there mostly to - * cover for bugs and weirdos. - */ -struct uwb_est_entry { - size_t size; - unsigned offset; - enum { UWB_EST_16 = 0, UWB_EST_8 = 1 } type; -}; - -int uwb_est_register(u8 type, u8 code_high, u16 vendor, u16 product, - const struct uwb_est_entry *, size_t entries); -int uwb_est_unregister(u8 type, u8 code_high, u16 vendor, u16 product, - const struct uwb_est_entry *, size_t entries); -ssize_t uwb_est_find_size(struct uwb_rc *rc, const struct uwb_rceb *rceb, - size_t len); - -/* -- Misc */ - -enum { - EDC_MAX_ERRORS = 10, - EDC_ERROR_TIMEFRAME = HZ, -}; - -/* error density counter */ -struct edc { - unsigned long timestart; - u16 errorcount; -}; - -static inline -void edc_init(struct edc *edc) -{ - edc->timestart = jiffies; -} - -/* Called when an error occurred. - * This is way to determine if the number of acceptable errors per time - * period has been exceeded. It is not accurate as there are cases in which - * this scheme will not work, for example if there are periodic occurrences - * of errors that straddle updates to the start time. This scheme is - * sufficient for our usage. - * - * @returns 1 if maximum acceptable errors per timeframe has been exceeded. - */ -static inline int edc_inc(struct edc *err_hist, u16 max_err, u16 timeframe) -{ - unsigned long now; - - now = jiffies; - if (now - err_hist->timestart > timeframe) { - err_hist->errorcount = 1; - err_hist->timestart = now; - } else if (++err_hist->errorcount > max_err) { - err_hist->errorcount = 0; - err_hist->timestart = now; - return 1; - } - return 0; -} - - -/* Information Element handling */ - -struct uwb_ie_hdr *uwb_ie_next(void **ptr, size_t *len); -int uwb_rc_ie_add(struct uwb_rc *uwb_rc, const struct uwb_ie_hdr *ies, size_t size); -int uwb_rc_ie_rm(struct uwb_rc *uwb_rc, enum uwb_ie element_id); - -/* - * Transmission statistics - * - * UWB uses LQI and RSSI (one byte values) for reporting radio signal - * strength and line quality indication. We do quick and dirty - * averages of those. They are signed values, btw. - * - * For 8 bit quantities, we keep the min, the max, an accumulator - * (@sigma) and a # of samples. When @samples gets to 255, we compute - * the average (@sigma / @samples), place it in @sigma and reset - * @samples to 1 (so we use it as the first sample). - * - * Now, statistically speaking, probably I am kicking the kidneys of - * some books I have in my shelves collecting dust, but I just want to - * get an approx, not the Nobel. - * - * LOCKING: there is no locking per se, but we try to keep a lockless - * schema. Only _add_samples() modifies the values--as long as you - * have other locking on top that makes sure that no two calls of - * _add_sample() happen at the same time, then we are fine. Now, for - * resetting the values we just set @samples to 0 and that makes the - * next _add_sample() to start with defaults. Reading the values in - * _show() currently can race, so you need to make sure the calls are - * under the same lock that protects calls to _add_sample(). FIXME: - * currently unlocked (It is not ultraprecise but does the trick. Bite - * me). - */ -struct stats { - s8 min, max; - s16 sigma; - atomic_t samples; -}; - -static inline -void stats_init(struct stats *stats) -{ - atomic_set(&stats->samples, 0); - wmb(); -} - -static inline -void stats_add_sample(struct stats *stats, s8 sample) -{ - s8 min, max; - s16 sigma; - unsigned samples = atomic_read(&stats->samples); - if (samples == 0) { /* it was zero before, so we initialize */ - min = 127; - max = -128; - sigma = 0; - } else { - min = stats->min; - max = stats->max; - sigma = stats->sigma; - } - - if (sample < min) /* compute new values */ - min = sample; - else if (sample > max) - max = sample; - sigma += sample; - - stats->min = min; /* commit */ - stats->max = max; - stats->sigma = sigma; - if (atomic_add_return(1, &stats->samples) > 255) { - /* wrapped around! reset */ - stats->sigma = sigma / 256; - atomic_set(&stats->samples, 1); - } -} - -static inline ssize_t stats_show(struct stats *stats, char *buf) -{ - int min, max, avg; - int samples = atomic_read(&stats->samples); - if (samples == 0) - min = max = avg = 0; - else { - min = stats->min; - max = stats->max; - avg = stats->sigma / samples; - } - return scnprintf(buf, PAGE_SIZE, "%d %d %d\n", min, max, avg); -} - -static inline ssize_t stats_store(struct stats *stats, const char *buf, - size_t size) -{ - stats_init(stats); - return size; -} - -#endif /* #ifndef __LINUX__UWB_H__ */ diff --git a/include/linux/uwb/debug-cmd.h b/include/linux/uwb/debug-cmd.h deleted file mode 100644 index f97db6c3bcc0..000000000000 --- a/include/linux/uwb/debug-cmd.h +++ /dev/null @@ -1,57 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Ultra Wide Band - * Debug interface commands - * - * Copyright (C) 2008 Cambridge Silicon Radio Ltd. - */ -#ifndef __LINUX__UWB__DEBUG_CMD_H__ -#define __LINUX__UWB__DEBUG_CMD_H__ - -#include <linux/types.h> - -/* - * Debug interface commands - * - * UWB_DBG_CMD_RSV_ESTABLISH: Establish a new unicast reservation. - * - * UWB_DBG_CMD_RSV_TERMINATE: Terminate the Nth reservation. - */ - -enum uwb_dbg_cmd_type { - UWB_DBG_CMD_RSV_ESTABLISH = 1, - UWB_DBG_CMD_RSV_TERMINATE = 2, - UWB_DBG_CMD_IE_ADD = 3, - UWB_DBG_CMD_IE_RM = 4, - UWB_DBG_CMD_RADIO_START = 5, - UWB_DBG_CMD_RADIO_STOP = 6, -}; - -struct uwb_dbg_cmd_rsv_establish { - __u8 target[6]; - __u8 type; - __u16 max_mas; - __u16 min_mas; - __u8 max_interval; -}; - -struct uwb_dbg_cmd_rsv_terminate { - int index; -}; - -struct uwb_dbg_cmd_ie { - __u8 data[128]; - int len; -}; - -struct uwb_dbg_cmd { - __u32 type; - union { - struct uwb_dbg_cmd_rsv_establish rsv_establish; - struct uwb_dbg_cmd_rsv_terminate rsv_terminate; - struct uwb_dbg_cmd_ie ie_add; - struct uwb_dbg_cmd_ie ie_rm; - }; -}; - -#endif /* #ifndef __LINUX__UWB__DEBUG_CMD_H__ */ diff --git a/include/linux/uwb/spec.h b/include/linux/uwb/spec.h deleted file mode 100644 index 5f75caf7b4ba..000000000000 --- a/include/linux/uwb/spec.h +++ /dev/null @@ -1,767 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Ultra Wide Band - * UWB Standard definitions - * - * Copyright (C) 2005-2006 Intel Corporation - * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> - * - * All these definitions are based on the ECMA-368 standard. - * - * Note all definitions are Little Endian in the wire, and we will - * convert them to host order before operating on the bitfields (that - * yes, we use extensively). - */ - -#ifndef __LINUX__UWB_SPEC_H__ -#define __LINUX__UWB_SPEC_H__ - -#include <linux/types.h> -#include <linux/bitmap.h> -#include <linux/if_ether.h> - -#define i1480_FW 0x00000303 -/* #define i1480_FW 0x00000302 */ - -/** - * Number of Medium Access Slots in a superframe. - * - * UWB divides time in SuperFrames, each one divided in 256 pieces, or - * Medium Access Slots. See MBOA MAC[5.4.5] for details. The MAS is the - * basic bandwidth allocation unit in UWB. - */ -enum { UWB_NUM_MAS = 256 }; - -/** - * Number of Zones in superframe. - * - * UWB divides the superframe into zones with numbering starting from BPST. - * See MBOA MAC[16.8.6] - */ -enum { UWB_NUM_ZONES = 16 }; - -/* - * Number of MAS in a zone. - */ -#define UWB_MAS_PER_ZONE (UWB_NUM_MAS / UWB_NUM_ZONES) - -/* - * Number of MAS required before a row can be considered available. - */ -#define UWB_USABLE_MAS_PER_ROW (UWB_NUM_ZONES - 1) - -/* - * Number of streams per DRP reservation between a pair of devices. - * - * [ECMA-368] section 16.8.6. - */ -enum { UWB_NUM_STREAMS = 8 }; - -/* - * mMasLength - * - * The length of a MAS in microseconds. - * - * [ECMA-368] section 17.16. - */ -enum { UWB_MAS_LENGTH_US = 256 }; - -/* - * mBeaconSlotLength - * - * The length of the beacon slot in microseconds. - * - * [ECMA-368] section 17.16 - */ -enum { UWB_BEACON_SLOT_LENGTH_US = 85 }; - -/* - * mMaxLostBeacons - * - * The number beacons missing in consecutive superframes before a - * device can be considered as unreachable. - * - * [ECMA-368] section 17.16 - */ -enum { UWB_MAX_LOST_BEACONS = 3 }; - -/* - * mDRPBackOffWinMin - * - * The minimum number of superframes to wait before trying to reserve - * extra MAS. - * - * [ECMA-368] section 17.16 - */ -enum { UWB_DRP_BACKOFF_WIN_MIN = 2 }; - -/* - * mDRPBackOffWinMax - * - * The maximum number of superframes to wait before trying to reserve - * extra MAS. - * - * [ECMA-368] section 17.16 - */ -enum { UWB_DRP_BACKOFF_WIN_MAX = 16 }; - -/* - * Length of a superframe in microseconds. - */ -#define UWB_SUPERFRAME_LENGTH_US (UWB_MAS_LENGTH_US * UWB_NUM_MAS) - -/** - * UWB MAC address - * - * It is *imperative* that this struct is exactly 6 packed bytes (as - * it is also used to define headers sent down and up the wire/radio). - */ -struct uwb_mac_addr { - u8 data[ETH_ALEN]; -} __attribute__((packed)); - - -/** - * UWB device address - * - * It is *imperative* that this struct is exactly 6 packed bytes (as - * it is also used to define headers sent down and up the wire/radio). - */ -struct uwb_dev_addr { - u8 data[2]; -} __attribute__((packed)); - - -/** - * Types of UWB addresses - * - * Order matters (by size). - */ -enum uwb_addr_type { - UWB_ADDR_DEV = 0, - UWB_ADDR_MAC = 1, -}; - - -/** Size of a char buffer for printing a MAC/device address */ -enum { UWB_ADDR_STRSIZE = 32 }; - - -/** UWB WiMedia protocol IDs. */ -enum uwb_prid { - UWB_PRID_WLP_RESERVED = 0x0000, - UWB_PRID_WLP = 0x0001, - UWB_PRID_WUSB_BOT = 0x0010, - UWB_PRID_WUSB = 0x0010, - UWB_PRID_WUSB_TOP = 0x001F, -}; - - -/** PHY Rate (MBOA MAC[7.8.12, Table 61]) */ -enum uwb_phy_rate { - UWB_PHY_RATE_53 = 0, - UWB_PHY_RATE_80, - UWB_PHY_RATE_106, - UWB_PHY_RATE_160, - UWB_PHY_RATE_200, - UWB_PHY_RATE_320, - UWB_PHY_RATE_400, - UWB_PHY_RATE_480, - UWB_PHY_RATE_INVALID -}; - - -/** - * Different ways to scan (MBOA MAC[6.2.2, Table 8], WUSB[Table 8-78]) - */ -enum uwb_scan_type { - UWB_SCAN_ONLY = 0, - UWB_SCAN_OUTSIDE_BP, - UWB_SCAN_WHILE_INACTIVE, - UWB_SCAN_DISABLED, - UWB_SCAN_ONLY_STARTTIME, - UWB_SCAN_TOP -}; - - -/** ACK Policy types (MBOA MAC[7.2.1.3]) */ -enum uwb_ack_pol { - UWB_ACK_NO = 0, - UWB_ACK_INM = 1, - UWB_ACK_B = 2, - UWB_ACK_B_REQ = 3, -}; - - -/** DRP reservation types ([ECMA-368 table 106) */ -enum uwb_drp_type { - UWB_DRP_TYPE_ALIEN_BP = 0, - UWB_DRP_TYPE_HARD, - UWB_DRP_TYPE_SOFT, - UWB_DRP_TYPE_PRIVATE, - UWB_DRP_TYPE_PCA, -}; - - -/** DRP Reason Codes ([ECMA-368] table 107) */ -enum uwb_drp_reason { - UWB_DRP_REASON_ACCEPTED = 0, - UWB_DRP_REASON_CONFLICT, - UWB_DRP_REASON_PENDING, - UWB_DRP_REASON_DENIED, - UWB_DRP_REASON_MODIFIED, -}; - -/** Relinquish Request Reason Codes ([ECMA-368] table 113) */ -enum uwb_relinquish_req_reason { - UWB_RELINQUISH_REQ_REASON_NON_SPECIFIC = 0, - UWB_RELINQUISH_REQ_REASON_OVER_ALLOCATION, -}; - -/** - * DRP Notification Reason Codes (WHCI 0.95 [3.1.4.9]) - */ -enum uwb_drp_notif_reason { - UWB_DRP_NOTIF_DRP_IE_RCVD = 0, - UWB_DRP_NOTIF_CONFLICT, - UWB_DRP_NOTIF_TERMINATE, -}; - - -/** Allocation of MAS slots in a DRP request MBOA MAC[7.8.7] */ -struct uwb_drp_alloc { - __le16 zone_bm; - __le16 mas_bm; -} __attribute__((packed)); - - -/** General MAC Header format (ECMA-368[16.2]) */ -struct uwb_mac_frame_hdr { - __le16 Frame_Control; - struct uwb_dev_addr DestAddr; - struct uwb_dev_addr SrcAddr; - __le16 Sequence_Control; - __le16 Access_Information; -} __attribute__((packed)); - - -/** - * uwb_beacon_frame - a beacon frame including MAC headers - * - * [ECMA] section 16.3. - */ -struct uwb_beacon_frame { - struct uwb_mac_frame_hdr hdr; - struct uwb_mac_addr Device_Identifier; /* may be a NULL EUI-48 */ - u8 Beacon_Slot_Number; - u8 Device_Control; - u8 IEData[]; -} __attribute__((packed)); - - -/** Information Element codes (MBOA MAC[T54]) */ -enum uwb_ie { - UWB_PCA_AVAILABILITY = 2, - UWB_IE_DRP_AVAILABILITY = 8, - UWB_IE_DRP = 9, - UWB_BP_SWITCH_IE = 11, - UWB_MAC_CAPABILITIES_IE = 12, - UWB_PHY_CAPABILITIES_IE = 13, - UWB_APP_SPEC_PROBE_IE = 15, - UWB_IDENTIFICATION_IE = 19, - UWB_MASTER_KEY_ID_IE = 20, - UWB_RELINQUISH_REQUEST_IE = 21, - UWB_IE_WLP = 250, /* WiMedia Logical Link Control Protocol WLP 0.99 */ - UWB_APP_SPEC_IE = 255, -}; - - -/** - * Header common to all Information Elements (IEs) - */ -struct uwb_ie_hdr { - u8 element_id; /* enum uwb_ie */ - u8 length; -} __attribute__((packed)); - - -/** Dynamic Reservation Protocol IE (MBOA MAC[7.8.6]) */ -struct uwb_ie_drp { - struct uwb_ie_hdr hdr; - __le16 drp_control; - struct uwb_dev_addr dev_addr; - struct uwb_drp_alloc allocs[]; -} __attribute__((packed)); - -static inline int uwb_ie_drp_type(struct uwb_ie_drp *ie) -{ - return (le16_to_cpu(ie->drp_control) >> 0) & 0x7; -} - -static inline int uwb_ie_drp_stream_index(struct uwb_ie_drp *ie) -{ - return (le16_to_cpu(ie->drp_control) >> 3) & 0x7; -} - -static inline int uwb_ie_drp_reason_code(struct uwb_ie_drp *ie) -{ - return (le16_to_cpu(ie->drp_control) >> 6) & 0x7; -} - -static inline int uwb_ie_drp_status(struct uwb_ie_drp *ie) -{ - return (le16_to_cpu(ie->drp_control) >> 9) & 0x1; -} - -static inline int uwb_ie_drp_owner(struct uwb_ie_drp *ie) -{ - return (le16_to_cpu(ie->drp_control) >> 10) & 0x1; -} - -static inline int uwb_ie_drp_tiebreaker(struct uwb_ie_drp *ie) -{ - return (le16_to_cpu(ie->drp_control) >> 11) & 0x1; -} - -static inline int uwb_ie_drp_unsafe(struct uwb_ie_drp *ie) -{ - return (le16_to_cpu(ie->drp_control) >> 12) & 0x1; -} - -static inline void uwb_ie_drp_set_type(struct uwb_ie_drp *ie, enum uwb_drp_type type) -{ - u16 drp_control = le16_to_cpu(ie->drp_control); - drp_control = (drp_control & ~(0x7 << 0)) | (type << 0); - ie->drp_control = cpu_to_le16(drp_control); -} - -static inline void uwb_ie_drp_set_stream_index(struct uwb_ie_drp *ie, int stream_index) -{ - u16 drp_control = le16_to_cpu(ie->drp_control); - drp_control = (drp_control & ~(0x7 << 3)) | (stream_index << 3); - ie->drp_control = cpu_to_le16(drp_control); -} - -static inline void uwb_ie_drp_set_reason_code(struct uwb_ie_drp *ie, - enum uwb_drp_reason reason_code) -{ - u16 drp_control = le16_to_cpu(ie->drp_control); - drp_control = (ie->drp_control & ~(0x7 << 6)) | (reason_code << 6); - ie->drp_control = cpu_to_le16(drp_control); -} - -static inline void uwb_ie_drp_set_status(struct uwb_ie_drp *ie, int status) -{ - u16 drp_control = le16_to_cpu(ie->drp_control); - drp_control = (drp_control & ~(0x1 << 9)) | (status << 9); - ie->drp_control = cpu_to_le16(drp_control); -} - -static inline void uwb_ie_drp_set_owner(struct uwb_ie_drp *ie, int owner) -{ - u16 drp_control = le16_to_cpu(ie->drp_control); - drp_control = (drp_control & ~(0x1 << 10)) | (owner << 10); - ie->drp_control = cpu_to_le16(drp_control); -} - -static inline void uwb_ie_drp_set_tiebreaker(struct uwb_ie_drp *ie, int tiebreaker) -{ - u16 drp_control = le16_to_cpu(ie->drp_control); - drp_control = (drp_control & ~(0x1 << 11)) | (tiebreaker << 11); - ie->drp_control = cpu_to_le16(drp_control); -} - -static inline void uwb_ie_drp_set_unsafe(struct uwb_ie_drp *ie, int unsafe) -{ - u16 drp_control = le16_to_cpu(ie->drp_control); - drp_control = (drp_control & ~(0x1 << 12)) | (unsafe << 12); - ie->drp_control = cpu_to_le16(drp_control); -} - -/** Dynamic Reservation Protocol IE (MBOA MAC[7.8.7]) */ -struct uwb_ie_drp_avail { - struct uwb_ie_hdr hdr; - DECLARE_BITMAP(bmp, UWB_NUM_MAS); -} __attribute__((packed)); - -/* Relinqish Request IE ([ECMA-368] section 16.8.19). */ -struct uwb_relinquish_request_ie { - struct uwb_ie_hdr hdr; - __le16 relinquish_req_control; - struct uwb_dev_addr dev_addr; - struct uwb_drp_alloc allocs[]; -} __attribute__((packed)); - -static inline int uwb_ie_relinquish_req_reason_code(struct uwb_relinquish_request_ie *ie) -{ - return (le16_to_cpu(ie->relinquish_req_control) >> 0) & 0xf; -} - -static inline void uwb_ie_relinquish_req_set_reason_code(struct uwb_relinquish_request_ie *ie, - int reason_code) -{ - u16 ctrl = le16_to_cpu(ie->relinquish_req_control); - ctrl = (ctrl & ~(0xf << 0)) | (reason_code << 0); - ie->relinquish_req_control = cpu_to_le16(ctrl); -} - -/** - * The Vendor ID is set to an OUI that indicates the vendor of the device. - * ECMA-368 [16.8.10] - */ -struct uwb_vendor_id { - u8 data[3]; -} __attribute__((packed)); - -/** - * The device type ID - * FIXME: clarify what this means - * ECMA-368 [16.8.10] - */ -struct uwb_device_type_id { - u8 data[3]; -} __attribute__((packed)); - - -/** - * UWB device information types - * ECMA-368 [16.8.10] - */ -enum uwb_dev_info_type { - UWB_DEV_INFO_VENDOR_ID = 0, - UWB_DEV_INFO_VENDOR_TYPE, - UWB_DEV_INFO_NAME, -}; - -/** - * UWB device information found in Identification IE - * ECMA-368 [16.8.10] - */ -struct uwb_dev_info { - u8 type; /* enum uwb_dev_info_type */ - u8 length; - u8 data[]; -} __attribute__((packed)); - -/** - * UWB Identification IE - * ECMA-368 [16.8.10] - */ -struct uwb_identification_ie { - struct uwb_ie_hdr hdr; - struct uwb_dev_info info[]; -} __attribute__((packed)); - -/* - * UWB Radio Controller - * - * These definitions are common to the Radio Control layers as - * exported by the WUSB1.0 HWA and WHCI interfaces. - */ - -/** Radio Control Command Block (WUSB1.0[Table 8-65] and WHCI 0.95) */ -struct uwb_rccb { - u8 bCommandType; /* enum hwa_cet */ - __le16 wCommand; /* Command code */ - u8 bCommandContext; /* Context ID */ -} __attribute__((packed)); - - -/** Radio Control Event Block (WUSB[table 8-66], WHCI 0.95) */ -struct uwb_rceb { - u8 bEventType; /* enum hwa_cet */ - __le16 wEvent; /* Event code */ - u8 bEventContext; /* Context ID */ -} __attribute__((packed)); - - -enum { - UWB_RC_CET_GENERAL = 0, /* General Command/Event type */ - UWB_RC_CET_EX_TYPE_1 = 1, /* Extended Type 1 Command/Event type */ -}; - -/* Commands to the radio controller */ -enum uwb_rc_cmd { - UWB_RC_CMD_CHANNEL_CHANGE = 16, - UWB_RC_CMD_DEV_ADDR_MGMT = 17, /* Device Address Management */ - UWB_RC_CMD_GET_IE = 18, /* GET Information Elements */ - UWB_RC_CMD_RESET = 19, - UWB_RC_CMD_SCAN = 20, /* Scan management */ - UWB_RC_CMD_SET_BEACON_FILTER = 21, - UWB_RC_CMD_SET_DRP_IE = 22, /* Dynamic Reservation Protocol IEs */ - UWB_RC_CMD_SET_IE = 23, /* Information Element management */ - UWB_RC_CMD_SET_NOTIFICATION_FILTER = 24, - UWB_RC_CMD_SET_TX_POWER = 25, - UWB_RC_CMD_SLEEP = 26, - UWB_RC_CMD_START_BEACON = 27, - UWB_RC_CMD_STOP_BEACON = 28, - UWB_RC_CMD_BP_MERGE = 29, - UWB_RC_CMD_SEND_COMMAND_FRAME = 30, - UWB_RC_CMD_SET_ASIE_NOTIF = 31, -}; - -/* Notifications from the radio controller */ -enum uwb_rc_evt { - UWB_RC_EVT_IE_RCV = 0, - UWB_RC_EVT_BEACON = 1, - UWB_RC_EVT_BEACON_SIZE = 2, - UWB_RC_EVT_BPOIE_CHANGE = 3, - UWB_RC_EVT_BP_SLOT_CHANGE = 4, - UWB_RC_EVT_BP_SWITCH_IE_RCV = 5, - UWB_RC_EVT_DEV_ADDR_CONFLICT = 6, - UWB_RC_EVT_DRP_AVAIL = 7, - UWB_RC_EVT_DRP = 8, - UWB_RC_EVT_BP_SWITCH_STATUS = 9, - UWB_RC_EVT_CMD_FRAME_RCV = 10, - UWB_RC_EVT_CHANNEL_CHANGE_IE_RCV = 11, - /* Events (command responses) use the same code as the command */ - UWB_RC_EVT_UNKNOWN_CMD_RCV = 65535, -}; - -enum uwb_rc_extended_type_1_cmd { - UWB_RC_SET_DAA_ENERGY_MASK = 32, - UWB_RC_SET_NOTIFICATION_FILTER_EX = 33, -}; - -enum uwb_rc_extended_type_1_evt { - UWB_RC_DAA_ENERGY_DETECTED = 0, -}; - -/* Radio Control Result Code. [WHCI] table 3-3. */ -enum { - UWB_RC_RES_SUCCESS = 0, - UWB_RC_RES_FAIL, - UWB_RC_RES_FAIL_HARDWARE, - UWB_RC_RES_FAIL_NO_SLOTS, - UWB_RC_RES_FAIL_BEACON_TOO_LARGE, - UWB_RC_RES_FAIL_INVALID_PARAMETER, - UWB_RC_RES_FAIL_UNSUPPORTED_PWR_LEVEL, - UWB_RC_RES_FAIL_INVALID_IE_DATA, - UWB_RC_RES_FAIL_BEACON_SIZE_EXCEEDED, - UWB_RC_RES_FAIL_CANCELLED, - UWB_RC_RES_FAIL_INVALID_STATE, - UWB_RC_RES_FAIL_INVALID_SIZE, - UWB_RC_RES_FAIL_ACK_NOT_RECEIVED, - UWB_RC_RES_FAIL_NO_MORE_ASIE_NOTIF, - UWB_RC_RES_FAIL_TIME_OUT = 255, -}; - -/* Confirm event. [WHCI] section 3.1.3.1 etc. */ -struct uwb_rc_evt_confirm { - struct uwb_rceb rceb; - u8 bResultCode; -} __attribute__((packed)); - -/* Device Address Management event. [WHCI] section 3.1.3.2. */ -struct uwb_rc_evt_dev_addr_mgmt { - struct uwb_rceb rceb; - u8 baAddr[ETH_ALEN]; - u8 bResultCode; -} __attribute__((packed)); - - -/* Get IE Event. [WHCI] section 3.1.3.3. */ -struct uwb_rc_evt_get_ie { - struct uwb_rceb rceb; - __le16 wIELength; - u8 IEData[]; -} __attribute__((packed)); - -/* Set DRP IE Event. [WHCI] section 3.1.3.7. */ -struct uwb_rc_evt_set_drp_ie { - struct uwb_rceb rceb; - __le16 wRemainingSpace; - u8 bResultCode; -} __attribute__((packed)); - -/* Set IE Event. [WHCI] section 3.1.3.8. */ -struct uwb_rc_evt_set_ie { - struct uwb_rceb rceb; - __le16 RemainingSpace; - u8 bResultCode; -} __attribute__((packed)); - -/* Scan command. [WHCI] 3.1.3.5. */ -struct uwb_rc_cmd_scan { - struct uwb_rccb rccb; - u8 bChannelNumber; - u8 bScanState; - __le16 wStartTime; -} __attribute__((packed)); - -/* Set DRP IE command. [WHCI] section 3.1.3.7. */ -struct uwb_rc_cmd_set_drp_ie { - struct uwb_rccb rccb; - __le16 wIELength; - struct uwb_ie_drp IEData[]; -} __attribute__((packed)); - -/* Set IE command. [WHCI] section 3.1.3.8. */ -struct uwb_rc_cmd_set_ie { - struct uwb_rccb rccb; - __le16 wIELength; - u8 IEData[]; -} __attribute__((packed)); - -/* Set DAA Energy Mask event. [WHCI 0.96] section 3.1.3.17. */ -struct uwb_rc_evt_set_daa_energy_mask { - struct uwb_rceb rceb; - __le16 wLength; - u8 result; -} __attribute__((packed)); - -/* Set Notification Filter Extended event. [WHCI 0.96] section 3.1.3.18. */ -struct uwb_rc_evt_set_notification_filter_ex { - struct uwb_rceb rceb; - __le16 wLength; - u8 result; -} __attribute__((packed)); - -/* IE Received notification. [WHCI] section 3.1.4.1. */ -struct uwb_rc_evt_ie_rcv { - struct uwb_rceb rceb; - struct uwb_dev_addr SrcAddr; - __le16 wIELength; - u8 IEData[]; -} __attribute__((packed)); - -/* Type of the received beacon. [WHCI] section 3.1.4.2. */ -enum uwb_rc_beacon_type { - UWB_RC_BEACON_TYPE_SCAN = 0, - UWB_RC_BEACON_TYPE_NEIGHBOR, - UWB_RC_BEACON_TYPE_OL_ALIEN, - UWB_RC_BEACON_TYPE_NOL_ALIEN, -}; - -/* Beacon received notification. [WHCI] 3.1.4.2. */ -struct uwb_rc_evt_beacon { - struct uwb_rceb rceb; - u8 bChannelNumber; - u8 bBeaconType; - __le16 wBPSTOffset; - u8 bLQI; - u8 bRSSI; - __le16 wBeaconInfoLength; - u8 BeaconInfo[]; -} __attribute__((packed)); - - -/* Beacon Size Change notification. [WHCI] section 3.1.4.3 */ -struct uwb_rc_evt_beacon_size { - struct uwb_rceb rceb; - __le16 wNewBeaconSize; -} __attribute__((packed)); - - -/* BPOIE Change notification. [WHCI] section 3.1.4.4. */ -struct uwb_rc_evt_bpoie_change { - struct uwb_rceb rceb; - __le16 wBPOIELength; - u8 BPOIE[]; -} __attribute__((packed)); - - -/* Beacon Slot Change notification. [WHCI] section 3.1.4.5. */ -struct uwb_rc_evt_bp_slot_change { - struct uwb_rceb rceb; - u8 slot_info; -} __attribute__((packed)); - -static inline int uwb_rc_evt_bp_slot_change_slot_num( - const struct uwb_rc_evt_bp_slot_change *evt) -{ - return evt->slot_info & 0x7f; -} - -static inline int uwb_rc_evt_bp_slot_change_no_slot( - const struct uwb_rc_evt_bp_slot_change *evt) -{ - return (evt->slot_info & 0x80) >> 7; -} - -/* BP Switch IE Received notification. [WHCI] section 3.1.4.6. */ -struct uwb_rc_evt_bp_switch_ie_rcv { - struct uwb_rceb rceb; - struct uwb_dev_addr wSrcAddr; - __le16 wIELength; - u8 IEData[]; -} __attribute__((packed)); - -/* DevAddr Conflict notification. [WHCI] section 3.1.4.7. */ -struct uwb_rc_evt_dev_addr_conflict { - struct uwb_rceb rceb; -} __attribute__((packed)); - -/* DRP notification. [WHCI] section 3.1.4.9. */ -struct uwb_rc_evt_drp { - struct uwb_rceb rceb; - struct uwb_dev_addr src_addr; - u8 reason; - u8 beacon_slot_number; - __le16 ie_length; - u8 ie_data[]; -} __attribute__((packed)); - -static inline enum uwb_drp_notif_reason uwb_rc_evt_drp_reason(struct uwb_rc_evt_drp *evt) -{ - return evt->reason & 0x0f; -} - - -/* DRP Availability Change notification. [WHCI] section 3.1.4.8. */ -struct uwb_rc_evt_drp_avail { - struct uwb_rceb rceb; - DECLARE_BITMAP(bmp, UWB_NUM_MAS); -} __attribute__((packed)); - -/* BP switch status notification. [WHCI] section 3.1.4.10. */ -struct uwb_rc_evt_bp_switch_status { - struct uwb_rceb rceb; - u8 status; - u8 slot_offset; - __le16 bpst_offset; - u8 move_countdown; -} __attribute__((packed)); - -/* Command Frame Received notification. [WHCI] section 3.1.4.11. */ -struct uwb_rc_evt_cmd_frame_rcv { - struct uwb_rceb rceb; - __le16 receive_time; - struct uwb_dev_addr wSrcAddr; - struct uwb_dev_addr wDstAddr; - __le16 control; - __le16 reserved; - __le16 dataLength; - u8 data[]; -} __attribute__((packed)); - -/* Channel Change IE Received notification. [WHCI] section 3.1.4.12. */ -struct uwb_rc_evt_channel_change_ie_rcv { - struct uwb_rceb rceb; - struct uwb_dev_addr wSrcAddr; - __le16 wIELength; - u8 IEData[]; -} __attribute__((packed)); - -/* DAA Energy Detected notification. [WHCI 0.96] section 3.1.4.14. */ -struct uwb_rc_evt_daa_energy_detected { - struct uwb_rceb rceb; - __le16 wLength; - u8 bandID; - u8 reserved; - u8 toneBmp[16]; -} __attribute__((packed)); - - -/** - * Radio Control Interface Class Descriptor - * - * WUSB 1.0 [8.6.1.2] - */ -struct uwb_rc_control_intf_class_desc { - u8 bLength; - u8 bDescriptorType; - __le16 bcdRCIVersion; -} __attribute__((packed)); - -#endif /* #ifndef __LINUX__UWB_SPEC_H__ */ diff --git a/include/linux/uwb/umc.h b/include/linux/uwb/umc.h deleted file mode 100644 index ddbceb39ad15..000000000000 --- a/include/linux/uwb/umc.h +++ /dev/null @@ -1,192 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * UWB Multi-interface Controller support. - * - * Copyright (C) 2007 Cambridge Silicon Radio Ltd. - * - * UMC (UWB Multi-interface Controller) capabilities (e.g., radio - * controller, host controller) are presented as devices on the "umc" - * bus. - * - * The radio controller is not strictly a UMC capability but it's - * useful to present it as such. - * - * References: - * - * [WHCI] Wireless Host Controller Interface Specification for - * Certified Wireless Universal Serial Bus, revision 0.95. - * - * How this works is kind of convoluted but simple. The whci.ko driver - * loads when WHCI devices are detected. These WHCI devices expose - * many devices in the same PCI function (they couldn't have reused - * functions, no), so for each PCI function that exposes these many - * devices, whci ceates a umc_dev [whci_probe() -> whci_add_cap()] - * with umc_device_create() and adds it to the bus with - * umc_device_register(). - * - * umc_device_register() calls device_register() which will push the - * bus management code to load your UMC driver's somehting_probe() - * that you have registered for that capability code. - * - * Now when the WHCI device is removed, whci_remove() will go over - * each umc_dev assigned to each of the PCI function's capabilities - * and through whci_del_cap() call umc_device_unregister() each - * created umc_dev. Of course, if you are bound to the device, your - * driver's something_remove() will be called. - */ - -#ifndef _LINUX_UWB_UMC_H_ -#define _LINUX_UWB_UMC_H_ - -#include <linux/device.h> -#include <linux/pci.h> - -/* - * UMC capability IDs. - * - * 0x00 is reserved so use it for the radio controller device. - * - * [WHCI] table 2-8 - */ -#define UMC_CAP_ID_WHCI_RC 0x00 /* radio controller */ -#define UMC_CAP_ID_WHCI_WUSB_HC 0x01 /* WUSB host controller */ - -/** - * struct umc_dev - UMC capability device - * - * @version: version of the specification this capability conforms to. - * @cap_id: capability ID. - * @bar: PCI Bar (64 bit) where the resource lies - * @resource: register space resource. - * @irq: interrupt line. - */ -struct umc_dev { - u16 version; - u8 cap_id; - u8 bar; - struct resource resource; - unsigned irq; - struct device dev; -}; - -#define to_umc_dev(d) container_of(d, struct umc_dev, dev) - -/** - * struct umc_driver - UMC capability driver - * @cap_id: supported capability ID. - * @match: driver specific capability matching function. - * @match_data: driver specific data for match() (e.g., a - * table of pci_device_id's if umc_match_pci_id() is used). - */ -struct umc_driver { - char *name; - u8 cap_id; - int (*match)(struct umc_driver *, struct umc_dev *); - const void *match_data; - - int (*probe)(struct umc_dev *); - void (*remove)(struct umc_dev *); - int (*pre_reset)(struct umc_dev *); - int (*post_reset)(struct umc_dev *); - - struct device_driver driver; -}; - -#define to_umc_driver(d) container_of(d, struct umc_driver, driver) - -extern struct bus_type umc_bus_type; - -struct umc_dev *umc_device_create(struct device *parent, int n); -int __must_check umc_device_register(struct umc_dev *umc); -void umc_device_unregister(struct umc_dev *umc); - -int __must_check __umc_driver_register(struct umc_driver *umc_drv, - struct module *mod, - const char *mod_name); - -/** - * umc_driver_register - register a UMC capabiltity driver. - * @umc_drv: pointer to the driver. - */ -#define umc_driver_register(umc_drv) \ - __umc_driver_register(umc_drv, THIS_MODULE, KBUILD_MODNAME) - -void umc_driver_unregister(struct umc_driver *umc_drv); - -/* - * Utility function you can use to match (umc_driver->match) against a - * null-terminated array of 'struct pci_device_id' in - * umc_driver->match_data. - */ -int umc_match_pci_id(struct umc_driver *umc_drv, struct umc_dev *umc); - -/** - * umc_parent_pci_dev - return the UMC's parent PCI device or NULL if none - * @umc_dev: UMC device whose parent PCI device we are looking for - * - * DIRTY!!! DON'T RELY ON THIS - * - * FIXME: This is as dirty as it gets, but we need some way to check - * the correct type of umc_dev->parent (so that for example, we can - * cast to pci_dev). Casting to pci_dev is necessary because at some - * point we need to request resources from the device. Mapping is - * easily over come (ioremap and stuff are bus agnostic), but hooking - * up to some error handlers (such as pci error handlers) might need - * this. - * - * THIS might (probably will) be removed in the future, so don't count - * on it. - */ -static inline struct pci_dev *umc_parent_pci_dev(struct umc_dev *umc_dev) -{ - struct pci_dev *pci_dev = NULL; - if (dev_is_pci(umc_dev->dev.parent)) - pci_dev = to_pci_dev(umc_dev->dev.parent); - return pci_dev; -} - -/** - * umc_dev_get() - reference a UMC device. - * @umc_dev: Pointer to UMC device. - * - * NOTE: we are assuming in this whole scheme that the parent device - * is referenced at _probe() time and unreferenced at _remove() - * time by the parent's subsystem. - */ -static inline struct umc_dev *umc_dev_get(struct umc_dev *umc_dev) -{ - get_device(&umc_dev->dev); - return umc_dev; -} - -/** - * umc_dev_put() - unreference a UMC device. - * @umc_dev: Pointer to UMC device. - */ -static inline void umc_dev_put(struct umc_dev *umc_dev) -{ - put_device(&umc_dev->dev); -} - -/** - * umc_set_drvdata - set UMC device's driver data. - * @umc_dev: Pointer to UMC device. - * @data: Data to set. - */ -static inline void umc_set_drvdata(struct umc_dev *umc_dev, void *data) -{ - dev_set_drvdata(&umc_dev->dev, data); -} - -/** - * umc_get_drvdata - recover UMC device's driver data. - * @umc_dev: Pointer to UMC device. - */ -static inline void *umc_get_drvdata(struct umc_dev *umc_dev) -{ - return dev_get_drvdata(&umc_dev->dev); -} - -int umc_controller_reset(struct umc_dev *umc); - -#endif /* #ifndef _LINUX_UWB_UMC_H_ */ diff --git a/include/linux/uwb/whci.h b/include/linux/uwb/whci.h deleted file mode 100644 index 1a5c2cc2b008..000000000000 --- a/include/linux/uwb/whci.h +++ /dev/null @@ -1,102 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Wireless Host Controller Interface for Ultra-Wide-Band and Wireless USB - * - * Copyright (C) 2005-2006 Intel Corporation - * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> - * - * References: - * [WHCI] Wireless Host Controller Interface Specification for - * Certified Wireless Universal Serial Bus, revision 0.95. - */ -#ifndef _LINUX_UWB_WHCI_H_ -#define _LINUX_UWB_WHCI_H_ - -#include <linux/pci.h> - -/* - * UWB interface capability registers (offsets from UWBBASE) - * - * [WHCI] section 2.2 - */ -#define UWBCAPINFO 0x00 /* == UWBCAPDATA(0) */ -# define UWBCAPINFO_TO_N_CAPS(c) (((c) >> 0) & 0xFull) -#define UWBCAPDATA(n) (8*(n)) -# define UWBCAPDATA_TO_VERSION(c) (((c) >> 32) & 0xFFFFull) -# define UWBCAPDATA_TO_OFFSET(c) (((c) >> 18) & 0x3FFFull) -# define UWBCAPDATA_TO_BAR(c) (((c) >> 16) & 0x3ull) -# define UWBCAPDATA_TO_SIZE(c) ((((c) >> 8) & 0xFFull) * sizeof(u32)) -# define UWBCAPDATA_TO_CAP_ID(c) (((c) >> 0) & 0xFFull) - -/* Size of the WHCI capability data (including the RC capability) for - a device with n capabilities. */ -#define UWBCAPDATA_SIZE(n) (8 + 8*(n)) - - -/* - * URC registers (offsets from URCBASE) - * - * [WHCI] section 2.3 - */ -#define URCCMD 0x00 -# define URCCMD_RESET (1 << 31) /* UMC Hardware reset */ -# define URCCMD_RS (1 << 30) /* Run/Stop */ -# define URCCMD_EARV (1 << 29) /* Event Address Register Valid */ -# define URCCMD_ACTIVE (1 << 15) /* Command is active */ -# define URCCMD_IWR (1 << 14) /* Interrupt When Ready */ -# define URCCMD_SIZE_MASK 0x00000fff /* Command size mask */ -#define URCSTS 0x04 -# define URCSTS_EPS (1 << 17) /* Event Processing Status */ -# define URCSTS_HALTED (1 << 16) /* RC halted */ -# define URCSTS_HSE (1 << 10) /* Host System Error...fried */ -# define URCSTS_ER (1 << 9) /* Event Ready */ -# define URCSTS_RCI (1 << 8) /* Ready for Command Interrupt */ -# define URCSTS_INT_MASK 0x00000700 /* URC interrupt sources */ -# define URCSTS_ISI 0x000000ff /* Interrupt Source Identification */ -#define URCINTR 0x08 -# define URCINTR_EN_ALL 0x000007ff /* Enable all interrupt sources */ -#define URCCMDADDR 0x10 -#define URCEVTADDR 0x18 -# define URCEVTADDR_OFFSET_MASK 0xfff /* Event pointer offset mask */ - - -/** Write 32 bit @value to little endian register at @addr */ -static inline -void le_writel(u32 value, void __iomem *addr) -{ - iowrite32(value, addr); -} - - -/** Read from 32 bit little endian register at @addr */ -static inline -u32 le_readl(void __iomem *addr) -{ - return ioread32(addr); -} - - -/** Write 64 bit @value to little endian register at @addr */ -static inline -void le_writeq(u64 value, void __iomem *addr) -{ - iowrite32(value, addr); - iowrite32(value >> 32, addr + 4); -} - - -/** Read from 64 bit little endian register at @addr */ -static inline -u64 le_readq(void __iomem *addr) -{ - u64 value; - value = ioread32(addr); - value |= (u64)ioread32(addr + 4) << 32; - return value; -} - -extern int whci_wait_for(struct device *dev, u32 __iomem *reg, - u32 mask, u32 result, - unsigned long max_ms, const char *tag); - -#endif /* #ifndef _LINUX_UWB_WHCI_H_ */ diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h index bae807eb2933..9aced11e9000 100644 --- a/include/linux/vermagic.h +++ b/include/linux/vermagic.h @@ -9,6 +9,8 @@ #endif #ifdef CONFIG_PREEMPT #define MODULE_VERMAGIC_PREEMPT "preempt " +#elif defined(CONFIG_PREEMPT_RT) +#define MODULE_VERMAGIC_PREEMPT "preempt_rt " #else #define MODULE_VERMAGIC_PREEMPT "" #endif diff --git a/include/linux/virtio_vsock.h b/include/linux/virtio_vsock.h index e223e2632edd..4c7781f4b29b 100644 --- a/include/linux/virtio_vsock.h +++ b/include/linux/virtio_vsock.h @@ -35,13 +35,14 @@ struct virtio_vsock_sock { /* Protected by tx_lock */ u32 tx_cnt; - u32 buf_alloc; u32 peer_fwd_cnt; u32 peer_buf_alloc; /* Protected by rx_lock */ u32 fwd_cnt; + u32 last_fwd_cnt; u32 rx_bytes; + u32 buf_alloc; struct list_head rx_queue; }; @@ -52,6 +53,7 @@ struct virtio_vsock_pkt { /* socket refcnt not held, only use for cancellation */ struct vsock_sock *vsk; void *buf; + u32 buf_len; u32 len; u32 off; bool reply; diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index 9b21d0047710..4e7809408073 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -18,6 +18,7 @@ struct notifier_block; /* in notifier.h */ #define VM_ALLOC 0x00000002 /* vmalloc() */ #define VM_MAP 0x00000004 /* vmap()ed pages */ #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */ +#define VM_DMA_COHERENT 0x00000010 /* dma_alloc_coherent */ #define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */ #define VM_NO_GUARD 0x00000040 /* don't add guard page */ #define VM_KASAN 0x00000080 /* has allocated kasan shadow memory */ @@ -26,6 +27,7 @@ struct notifier_block; /* in notifier.h */ * vfree_atomic(). */ #define VM_FLUSH_RESET_PERMS 0x00000100 /* Reset direct map and flush TLB on unmap */ + /* bits [20..32] reserved for arch specific ioremap internals */ /* @@ -51,15 +53,21 @@ struct vmap_area { unsigned long va_start; unsigned long va_end; - /* - * Largest available free size in subtree. - */ - unsigned long subtree_max_size; - unsigned long flags; struct rb_node rb_node; /* address sorted rbtree */ struct list_head list; /* address sorted list */ - struct llist_node purge_list; /* "lazy purge" list */ - struct vm_struct *vm; + + /* + * The following three variables can be packed, because + * a vmap_area object is always one of the three states: + * 1) in "free" tree (root is vmap_area_root) + * 2) in "busy" tree (root is free_vmap_area_root) + * 3) in purge list (head is vmap_purge_list) + */ + union { + unsigned long subtree_max_size; /* in "free" tree */ + struct vm_struct *vm; /* in "busy" tree */ + struct llist_node purge_list; /* in purge list */ + }; }; /* diff --git a/include/linux/w1.h b/include/linux/w1.h index e0b5156f78fd..7da0c7588e04 100644 --- a/include/linux/w1.h +++ b/include/linux/w1.h @@ -118,6 +118,9 @@ typedef void (*w1_slave_found_callback)(struct w1_master *, u64); * w1_master* is passed to the slave found callback. * u8 is search_type, W1_SEARCH or W1_ALARM_SEARCH * + * @dev_id: Optional device id string, which w1 slaves could use for + * creating names, which then give a connection to the w1 master + * * Note: read_bit and write_bit are very low level functions and should only * be used with hardware that doesn't really support 1-wire operations, * like a parallel/serial port. @@ -150,6 +153,8 @@ struct w1_bus_master { void (*search)(void *, struct w1_master *, u8, w1_slave_found_callback); + + char *dev_id; }; /** diff --git a/include/linux/wimax/debug.h b/include/linux/wimax/debug.h index 7cb63e4ec0ae..4dd2c1cea6a9 100644 --- a/include/linux/wimax/debug.h +++ b/include/linux/wimax/debug.h @@ -98,9 +98,7 @@ * To manipulate from user space the levels, create a debugfs dentry * and then register each submodule with: * - * result = d_level_register_debugfs("PREFIX_", submodule_X, parent); - * if (result < 0) - * goto error; + * d_level_register_debugfs("PREFIX_", submodule_X, parent); * * Where PREFIX_ is a name of your chosing. This will create debugfs * file with a single numeric value that can be use to tweak it. To @@ -408,25 +406,13 @@ do { \ * @submodule: name of submodule (not a string, just the name) * @dentry: debugfs parent dentry * - * Returns: 0 if ok, < 0 errno on error. - * * For removing, just use debugfs_remove_recursive() on the parent. */ #define d_level_register_debugfs(prefix, name, parent) \ ({ \ - int rc; \ - struct dentry *fd; \ - struct dentry *verify_parent_type = parent; \ - fd = debugfs_create_u8( \ - prefix #name, 0600, verify_parent_type, \ + debugfs_create_u8( \ + prefix #name, 0600, parent, \ &(D_LEVEL[__D_SUBMODULE_ ## name].level)); \ - rc = PTR_ERR(fd); \ - if (IS_ERR(fd) && rc != -ENODEV) \ - printk(KERN_ERR "%s: Can't create debugfs entry %s: " \ - "%d\n", __func__, prefix #name, rc); \ - else \ - rc = 0; \ - rc; \ }) diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index b7c585b5ec1c..4261d1c6e87b 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -435,6 +435,10 @@ struct workqueue_struct *alloc_workqueue(const char *fmt, extern void destroy_workqueue(struct workqueue_struct *wq); +struct workqueue_attrs *alloc_workqueue_attrs(void); +void free_workqueue_attrs(struct workqueue_attrs *attrs); +int apply_workqueue_attrs(struct workqueue_struct *wq, + const struct workqueue_attrs *attrs); int workqueue_set_unbound_cpumask(cpumask_var_t cpumask); extern bool queue_work_on(int cpu, struct workqueue_struct *wq, diff --git a/include/linux/zpool.h b/include/linux/zpool.h index 7238865e75b0..51bf43076165 100644 --- a/include/linux/zpool.h +++ b/include/linux/zpool.h @@ -46,6 +46,8 @@ const char *zpool_get_type(struct zpool *pool); void zpool_destroy_pool(struct zpool *pool); +bool zpool_malloc_support_movable(struct zpool *pool); + int zpool_malloc(struct zpool *pool, size_t size, gfp_t gfp, unsigned long *handle); @@ -90,6 +92,7 @@ struct zpool_driver { struct zpool *zpool); void (*destroy)(void *pool); + bool malloc_support_movable; int (*malloc)(void *pool, size_t size, gfp_t gfp, unsigned long *handle); void (*free)(void *pool, unsigned long handle); |